Print this page
NEX-18318 qlt needs to disable MSI-X if running passthrough under ESXi
Reviewed by: Joyce McIntosh <joyce.mcintosh@nexenta.com>
Reviewed by: Roman Strashkin <roman.strashkin@nexenta.com>
NEX-14413 Bad trap in module "apix" due to a NULL pointer dereference
Reviewed by: Steve Peng <steve.peng@nexenta.com>
Reviewed by: Evan Layton <evan.layton@nexenta.com>
Reviewed by: Rob Gittins <rob.gittins@nexenta.com>
NEX-10267 BAD TRAP: type=d (#gp General protection) in qlt_msix_resp_handler()
Reviewed by: Evan Layton <evan.layton@nexenta.com>
Reviewed by: Rob Gittins <rob.gittins@nexenta.com>
NEX-5733 cleanup qlt/qlc
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
Reviewed by: Alek Pinchuk <alek.pinchuk@nexenta.com>
NEX-5717 import QLogic 16G FC drivers
Reviewed by: Steve Peng <steve.peng@nexenta.com>
Reviewed by: Josef 'Jeff' Sipek <josef.sipek@nexenta.com>
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/comstar/port/qlt/qlt.c
+++ new/usr/src/uts/common/io/comstar/port/qlt/qlt.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
|
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 - * Copyright 2009 QLogic Corporation. All rights reserved.
23 + * Copyright 2009-2015 QLogic Corporation. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /*
28 - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 + * Copyright (c) 2009, 2015, Oracle and/or its affiliates. All rights reserved.
29 29 */
30 30
31 31 #include <sys/conf.h>
32 32 #include <sys/ddi.h>
33 33 #include <sys/stat.h>
34 34 #include <sys/pci.h>
35 35 #include <sys/sunddi.h>
36 36 #include <sys/modctl.h>
37 37 #include <sys/file.h>
38 38 #include <sys/cred.h>
39 39 #include <sys/byteorder.h>
40 40 #include <sys/atomic.h>
41 41 #include <sys/scsi/scsi.h>
42 +#include <sys/time.h>
43 +#ifdef __x86
44 +#include <sys/x86_archext.h>
45 +#endif
42 46
43 47 #include <sys/stmf_defines.h>
48 +#include <sys/stmf_ioctl.h>
44 49 #include <sys/fct_defines.h>
45 50 #include <sys/stmf.h>
46 -#include <sys/stmf_ioctl.h>
47 51 #include <sys/portif.h>
48 52 #include <sys/fct.h>
49 53
50 54 #include "qlt.h"
51 55 #include "qlt_dma.h"
52 56 #include "qlt_ioctl.h"
53 57 #include "qlt_open.h"
54 58
55 59 static int qlt_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
56 60 static int qlt_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
61 +static uint8_t *qlt_vpd_findtag(qlt_state_t *qlt, uint8_t *vpdbuf,
62 + int8_t *opcode);
63 +static int qlt_vpd_lookup(qlt_state_t *qlt, uint8_t *opcode, uint8_t *bp,
64 + int32_t bplen);
57 65 static void qlt_enable_intr(qlt_state_t *);
58 66 static void qlt_disable_intr(qlt_state_t *);
59 67 static fct_status_t qlt_reset_chip(qlt_state_t *qlt);
60 68 static fct_status_t qlt_download_fw(qlt_state_t *qlt);
61 69 static fct_status_t qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr,
62 70 uint32_t word_count, uint32_t risc_addr);
63 71 static fct_status_t qlt_raw_mailbox_command(qlt_state_t *qlt);
64 72 static mbox_cmd_t *qlt_alloc_mailbox_command(qlt_state_t *qlt,
65 - uint32_t dma_size);
73 + uint32_t dma_size);
66 74 void qlt_free_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp);
67 75 static fct_status_t qlt_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp);
68 76 static uint_t qlt_isr(caddr_t arg, caddr_t arg2);
77 +static uint_t qlt_msix_resp_handler(caddr_t arg, caddr_t arg2);
78 +static uint_t qlt_msix_default_handler(caddr_t arg, caddr_t arg2);
69 79 static fct_status_t qlt_firmware_dump(fct_local_port_t *port,
70 80 stmf_state_change_info_t *ssci);
71 81 static void qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot);
72 82 static void qlt_handle_purex(qlt_state_t *qlt, uint8_t *resp);
73 83 static void qlt_handle_atio(qlt_state_t *qlt, uint8_t *atio);
74 -static void qlt_handle_ctio_completion(qlt_state_t *qlt, uint8_t *rsp);
84 +static void qlt_handle_ctio_completion(qlt_state_t *qlt, uint8_t *rsp,
85 + uint16_t qi);
75 86 static void qlt_handle_sol_abort_completion(qlt_state_t *qlt, uint8_t *rsp);
76 87 static void qlt_handle_dereg_completion(qlt_state_t *qlt, uint8_t *rsp);
77 88 static void qlt_handle_unsol_els_completion(qlt_state_t *qlt, uint8_t *rsp);
78 89 static void qlt_handle_unsol_els_abort_completion(qlt_state_t *qlt,
79 90 uint8_t *rsp);
80 91 static void qlt_handle_sol_els_completion(qlt_state_t *qlt, uint8_t *rsp);
81 -static void qlt_handle_rcvd_abts(qlt_state_t *qlt, uint8_t *resp);
82 -static void qlt_handle_abts_completion(qlt_state_t *qlt, uint8_t *resp);
92 +static void qlt_handle_rcvd_abts(qlt_state_t *qlt, uint8_t *resp, uint16_t qi);
93 +static void qlt_handle_abts_completion(qlt_state_t *qlt, uint8_t *resp,
94 + uint16_t qi);
83 95 static fct_status_t qlt_read_nvram(qlt_state_t *qlt);
96 +static fct_status_t qlt_read_vpd(qlt_state_t *qlt);
97 +static fct_status_t qlt_read_rom_image(qlt_state_t *qlt);
84 98 static void qlt_verify_fw(qlt_state_t *qlt);
85 99 static void qlt_handle_verify_fw_completion(qlt_state_t *qlt, uint8_t *rsp);
86 100 fct_status_t qlt_port_start(caddr_t arg);
87 101 fct_status_t qlt_port_stop(caddr_t arg);
88 102 fct_status_t qlt_port_online(qlt_state_t *qlt);
89 103 fct_status_t qlt_port_offline(qlt_state_t *qlt);
90 104 static fct_status_t qlt_get_link_info(fct_local_port_t *port,
91 105 fct_link_info_t *li);
92 106 static void qlt_ctl(struct fct_local_port *port, int cmd, void *arg);
93 107 static fct_status_t qlt_force_lip(qlt_state_t *);
94 108 static fct_status_t qlt_do_flogi(struct fct_local_port *port,
95 - fct_flogi_xchg_t *fx);
109 + fct_flogi_xchg_t *fx);
96 110 void qlt_handle_atio_queue_update(qlt_state_t *qlt);
97 -void qlt_handle_resp_queue_update(qlt_state_t *qlt);
111 +void qlt_handle_resp_queue_update(qlt_state_t *qlt, uint16_t qi);
98 112 fct_status_t qlt_register_remote_port(fct_local_port_t *port,
99 113 fct_remote_port_t *rp, fct_cmd_t *login);
100 114 fct_status_t qlt_deregister_remote_port(fct_local_port_t *port,
101 115 fct_remote_port_t *rp);
102 116 fct_status_t qlt_send_cmd_response(fct_cmd_t *cmd, uint32_t ioflags);
103 117 fct_status_t qlt_send_els_response(qlt_state_t *qlt, fct_cmd_t *cmd);
104 118 fct_status_t qlt_send_abts_response(qlt_state_t *qlt,
105 119 fct_cmd_t *cmd, int terminate);
106 120 static void qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot);
107 121 int qlt_set_uniq_flag(uint16_t *ptr, uint16_t setf, uint16_t abortf);
108 122 fct_status_t qlt_abort_cmd(struct fct_local_port *port,
109 123 fct_cmd_t *cmd, uint32_t flags);
110 124 fct_status_t qlt_abort_sol_cmd(qlt_state_t *qlt, fct_cmd_t *cmd);
111 125 fct_status_t qlt_abort_purex(qlt_state_t *qlt, fct_cmd_t *cmd);
112 126 fct_status_t qlt_abort_unsol_scsi_cmd(qlt_state_t *qlt, fct_cmd_t *cmd);
113 127 fct_status_t qlt_send_cmd(fct_cmd_t *cmd);
114 128 fct_status_t qlt_send_els(qlt_state_t *qlt, fct_cmd_t *cmd);
115 129 fct_status_t qlt_send_status(qlt_state_t *qlt, fct_cmd_t *cmd);
|
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
116 130 fct_status_t qlt_xfer_scsi_data(fct_cmd_t *cmd,
117 131 stmf_data_buf_t *dbuf, uint32_t ioflags);
118 132 fct_status_t qlt_send_ct(qlt_state_t *qlt, fct_cmd_t *cmd);
119 133 static void qlt_handle_ct_completion(qlt_state_t *qlt, uint8_t *rsp);
120 134 static void qlt_release_intr(qlt_state_t *qlt);
121 135 static int qlt_setup_interrupts(qlt_state_t *qlt);
122 136 static void qlt_destroy_mutex(qlt_state_t *qlt);
123 137
124 138 static fct_status_t qlt_read_risc_ram(qlt_state_t *qlt, uint32_t addr,
125 139 uint32_t words);
140 +static fct_status_t qlt_mbx_mpi_ram(qlt_state_t *qlt, uint32_t addr,
141 + uint32_t words, uint16_t direction);
126 142 static int qlt_dump_queue(qlt_state_t *qlt, caddr_t qadr, int entries,
127 143 caddr_t buf, uint_t size_left);
128 144 static int qlt_dump_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words,
129 145 caddr_t buf, uint_t size_left);
130 146 static int qlt_fwdump_dump_regs(qlt_state_t *qlt, caddr_t buf, int startaddr,
131 147 int count, uint_t size_left);
132 148 static int qlt_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
133 149 cred_t *credp, int *rval);
134 150 static int qlt_open(dev_t *devp, int flag, int otype, cred_t *credp);
135 151 static int qlt_close(dev_t dev, int flag, int otype, cred_t *credp);
136 152
137 153 static int qlt_setup_msi(qlt_state_t *qlt);
138 154 static int qlt_setup_msix(qlt_state_t *qlt);
139 155
140 156 static int qlt_el_trace_desc_ctor(qlt_state_t *qlt);
141 157 static int qlt_el_trace_desc_dtor(qlt_state_t *qlt);
142 -static int qlt_validate_trace_desc(qlt_state_t *qlt);
143 -static char *qlt_find_trace_start(qlt_state_t *qlt);
144 158
145 159 static int qlt_read_int_prop(qlt_state_t *qlt, char *prop, int defval);
146 160 static int qlt_read_string_prop(qlt_state_t *qlt, char *prop, char **prop_val);
147 161 static int qlt_read_string_instance_prop(qlt_state_t *qlt, char *prop,
148 162 char **prop_val);
149 163 static int qlt_read_int_instance_prop(qlt_state_t *, char *, int);
150 164 static int qlt_convert_string_to_ull(char *prop, int radix,
151 165 u_longlong_t *result);
152 166 static boolean_t qlt_wwn_overload_prop(qlt_state_t *qlt);
153 167 static int qlt_quiesce(dev_info_t *dip);
168 +static void qlt_disable_intr(qlt_state_t *qlt);
154 169 static fct_status_t qlt_raw_wrt_risc_ram_word(qlt_state_t *qlt, uint32_t,
155 170 uint32_t);
156 171 static fct_status_t qlt_raw_rd_risc_ram_word(qlt_state_t *qlt, uint32_t,
157 172 uint32_t *);
158 173 static void qlt_mps_reset(qlt_state_t *qlt);
159 174 static void qlt_properties(qlt_state_t *qlt);
160 175
176 +static fct_status_t qlt_mq_create(qlt_state_t *qlt, int idx);
177 +static fct_status_t qlt_mq_destroy(qlt_state_t *qlt);
161 178
179 +static fct_status_t qlt_27xx_get_dmp_template(qlt_state_t *);
180 +static uint32_t qlt_27xx_dmp_parse_template(qlt_state_t *, qlt_dt_hdr_t *,
181 + uint8_t *, uint32_t);
182 +static int qlt_27xx_dump_ram(qlt_state_t *, uint16_t, uint32_t,
183 + uint32_t, uint8_t *);
184 +
162 185 #define SETELSBIT(bmp, els) (bmp)[((els) >> 3) & 0x1F] = \
163 186 (uint8_t)((bmp)[((els) >> 3) & 0x1F] | ((uint8_t)1) << ((els) & 7))
164 187
165 -int qlt_enable_msix = 0;
188 +int qlt_enable_msix = 1;
166 189 int qlt_enable_msi = 1;
167 190
168 191
169 192 string_table_t prop_status_tbl[] = DDI_PROP_STATUS();
170 193
171 194 /* Array to quickly calculate next free buf index to use */
172 195 #if 0
173 196 static int qlt_nfb[] = { 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 0xff };
174 197 #endif
175 198
176 199 static struct cb_ops qlt_cb_ops = {
177 200 qlt_open,
178 201 qlt_close,
179 202 nodev,
180 203 nodev,
181 204 nodev,
182 205 nodev,
183 206 nodev,
184 207 qlt_ioctl,
185 208 nodev,
186 209 nodev,
187 210 nodev,
188 211 nochpoll,
189 212 ddi_prop_op,
190 213 0,
191 214 D_MP | D_NEW
192 215 };
193 216
194 217 static struct dev_ops qlt_ops = {
195 218 DEVO_REV,
196 219 0,
197 220 nodev,
198 221 nulldev,
|
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
199 222 nulldev,
200 223 qlt_attach,
201 224 qlt_detach,
202 225 nodev,
203 226 &qlt_cb_ops,
204 227 NULL,
205 228 ddi_power,
206 229 qlt_quiesce
207 230 };
208 231
209 -#ifndef PORT_SPEED_10G
210 -#define PORT_SPEED_10G 16
232 +#ifndef PORT_SPEED_16G
233 +#define PORT_SPEED_16G 32
211 234 #endif
212 235
236 +#ifndef PORT_SPEED_32G
237 +#define PORT_SPEED_32G 64
238 +#endif
239 +
240 +#ifndef QL_NAME
241 +#define QL_NAME "qlt"
242 +#endif
243 +
213 244 static struct modldrv modldrv = {
214 245 &mod_driverops,
215 246 QLT_NAME" "QLT_VERSION,
216 247 &qlt_ops,
217 248 };
218 249
219 250 static struct modlinkage modlinkage = {
220 251 MODREV_1, &modldrv, NULL
221 252 };
222 253
223 254 void *qlt_state = NULL;
224 255 kmutex_t qlt_global_lock;
225 -static uint32_t qlt_loaded_counter = 0;
256 +static uint32_t qlt_loaded_counter = 0;
257 +uint8_t qlt_reprocess_attempt_cnt = 5;
258 +uint32_t qlt_reprocess_delay = 75; /* default 75 microseconds */
226 259
227 260 static char *pci_speeds[] = { " 33", "-X Mode 1 66", "-X Mode 1 100",
228 261 "-X Mode 1 133", "--Invalid--",
229 262 "-X Mode 2 66", "-X Mode 2 100",
230 263 "-X Mode 2 133", " 66" };
231 264
232 265 /* Always use 64 bit DMA. */
233 266 static ddi_dma_attr_t qlt_queue_dma_attr = {
234 267 DMA_ATTR_V0, /* dma_attr_version */
235 268 0, /* low DMA address range */
236 269 0xffffffffffffffff, /* high DMA address range */
237 270 0xffffffff, /* DMA counter register */
|
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
238 271 64, /* DMA address alignment */
239 272 0xff, /* DMA burstsizes */
240 273 1, /* min effective DMA size */
241 274 0xffffffff, /* max DMA xfer size */
242 275 0xffffffff, /* segment boundary */
243 276 1, /* s/g list length */
244 277 1, /* granularity of device */
245 278 0 /* DMA transfer flags */
246 279 };
247 280
281 +
282 +/* Always use 64 bit DMA. */
283 +static ddi_dma_attr_t qlt_queue_dma_attr_mq_req1 = {
284 + DMA_ATTR_V0, /* dma_attr_version */
285 + 0, /* low DMA address range */
286 + 0xffffffffffffffff, /* high DMA address range */
287 + 0xffffffff, /* DMA counter register */
288 + 64, /* DMA address alignment */
289 + 0xff, /* DMA burstsizes */
290 + 1, /* min effective DMA size */
291 + 0xffffffff, /* max DMA xfer size */
292 + 0xffffffff, /* segment boundary */
293 + 1, /* s/g list length */
294 + 1, /* granularity of device */
295 + 0 /* DMA transfer flags */
296 +};
297 +
298 +/* Always use 64 bit DMA. */
299 +static ddi_dma_attr_t qlt_queue_dma_attr_mq_rsp1 = {
300 + DMA_ATTR_V0, /* dma_attr_version */
301 + 0, /* low DMA address range */
302 + 0xffffffffffffffff, /* high DMA address range */
303 + 0xffffffff, /* DMA counter register */
304 + 64, /* DMA address alignment */
305 + 0xff, /* DMA burstsizes */
306 + 1, /* min effective DMA size */
307 + 0xffffffff, /* max DMA xfer size */
308 + 0xffffffff, /* segment boundary */
309 + 1, /* s/g list length */
310 + 1, /* granularity of device */
311 + 0 /* DMA transfer flags */
312 +};
313 +
314 +
248 315 /* qlogic logging */
249 316 int enable_extended_logging = 0;
250 -
251 317 static char qlt_provider_name[] = "qlt";
252 318 static struct stmf_port_provider *qlt_pp;
253 319
254 320 int
255 321 _init(void)
256 322 {
257 323 int ret;
258 324
259 325 ret = ddi_soft_state_init(&qlt_state, sizeof (qlt_state_t), 0);
260 326 if (ret == 0) {
261 327 mutex_init(&qlt_global_lock, 0, MUTEX_DRIVER, 0);
262 328 qlt_pp = (stmf_port_provider_t *)stmf_alloc(
263 329 STMF_STRUCT_PORT_PROVIDER, 0, 0);
264 330 qlt_pp->pp_portif_rev = PORTIF_REV_1;
265 331 qlt_pp->pp_name = qlt_provider_name;
266 332 if (stmf_register_port_provider(qlt_pp) != STMF_SUCCESS) {
267 333 stmf_free(qlt_pp);
268 334 mutex_destroy(&qlt_global_lock);
269 335 ddi_soft_state_fini(&qlt_state);
270 336 return (EIO);
271 337 }
272 338 ret = mod_install(&modlinkage);
273 339 if (ret != 0) {
274 340 (void) stmf_deregister_port_provider(qlt_pp);
275 341 stmf_free(qlt_pp);
276 342 mutex_destroy(&qlt_global_lock);
277 343 ddi_soft_state_fini(&qlt_state);
278 344 }
279 345 }
280 346 return (ret);
281 347 }
282 348
283 349 int
284 350 _fini(void)
285 351 {
286 352 int ret;
287 353
288 354 if (qlt_loaded_counter)
289 355 return (EBUSY);
290 356 ret = mod_remove(&modlinkage);
291 357 if (ret == 0) {
292 358 (void) stmf_deregister_port_provider(qlt_pp);
293 359 stmf_free(qlt_pp);
294 360 mutex_destroy(&qlt_global_lock);
295 361 ddi_soft_state_fini(&qlt_state);
|
↓ open down ↓ |
35 lines elided |
↑ open up ↑ |
296 362 }
297 363 return (ret);
298 364 }
299 365
300 366 int
301 367 _info(struct modinfo *modinfop)
302 368 {
303 369 return (mod_info(&modlinkage, modinfop));
304 370 }
305 371
306 -
307 372 static int
308 373 qlt_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
309 374 {
310 375 int instance;
311 376 qlt_state_t *qlt;
312 377 ddi_device_acc_attr_t dev_acc_attr;
313 378 uint16_t did;
314 379 uint16_t val;
315 380 uint16_t mr;
316 381 size_t discard;
317 382 uint_t ncookies;
318 383 int max_read_size;
319 384 int max_payload_size;
320 385 fct_status_t ret;
321 386
322 387 /* No support for suspend resume yet */
323 388 if (cmd != DDI_ATTACH)
324 389 return (DDI_FAILURE);
325 390 instance = ddi_get_instance(dip);
326 391
392 + cmn_err(CE_CONT, "!Qlogic %s(%d) FCA Driver v%s\n",
393 + QLT_NAME, instance, QLT_VERSION);
394 +
327 395 if (ddi_soft_state_zalloc(qlt_state, instance) != DDI_SUCCESS) {
396 + cmn_err(CE_WARN, "qlt(%d): soft state alloc failed", instance);
328 397 return (DDI_FAILURE);
329 398 }
330 399
331 400 if ((qlt = (qlt_state_t *)ddi_get_soft_state(qlt_state, instance)) ==
332 401 NULL) {
402 + cmn_err(CE_WARN, "qlt(%d): can't get soft state", instance);
333 403 goto attach_fail_1;
334 404 }
335 405
336 406 qlt->instance = instance;
337 407
338 408 qlt->nvram = (qlt_nvram_t *)kmem_zalloc(sizeof (qlt_nvram_t), KM_SLEEP);
409 + qlt->vpd = (uint32_t *)kmem_zalloc(QL_24XX_VPD_SIZE, KM_SLEEP);
339 410 qlt->dip = dip;
340 411
341 412 if (qlt_el_trace_desc_ctor(qlt) != DDI_SUCCESS) {
342 413 cmn_err(CE_WARN, "qlt(%d): can't setup el tracing", instance);
343 - goto attach_fail_1;
414 + goto attach_fail_2;
344 415 }
345 416
346 417 EL(qlt, "instance=%d, ptr=%p\n", instance, (void *)qlt);
347 418
348 419 if (pci_config_setup(dip, &qlt->pcicfg_acc_handle) != DDI_SUCCESS) {
349 - goto attach_fail_2;
420 + cmn_err(CE_WARN, "qlt(%d): pci_config_setup failed", instance);
421 + goto attach_fail_3;
350 422 }
423 +
351 424 did = PCICFG_RD16(qlt, PCI_CONF_DEVID);
352 425 if ((did != 0x2422) && (did != 0x2432) &&
353 426 (did != 0x8432) && (did != 0x2532) &&
354 - (did != 0x8001)) {
427 + (did != 0x8001) && (did != 0x2031) &&
428 + (did != 0x2071) && (did != 0x2261)) {
355 429 cmn_err(CE_WARN, "qlt(%d): unknown devid(%x), failing attach",
356 430 instance, did);
357 431 goto attach_fail_4;
358 432 }
359 433
360 - if ((did & 0xFF00) == 0x8000)
434 + if ((did & 0xFFFF) == 0x2071) {
435 + qlt->qlt_27xx_chip = 1;
436 + qlt->qlt_fcoe_enabled = 0;
437 + } else if ((did & 0xFFFF) == 0x2261) {
438 + qlt->qlt_27xx_chip = 1;
439 + qlt->qlt_fcoe_enabled = 0;
440 + } else if ((did & 0xFFFF) == 0x2031) {
441 + qlt->qlt_83xx_chip = 1;
442 + qlt->qlt_fcoe_enabled = 0;
443 + } else if ((did & 0xFFF0) == 0x8000) {
361 444 qlt->qlt_81xx_chip = 1;
362 - else if ((did & 0xFF00) == 0x2500)
445 + qlt->qlt_fcoe_enabled = 1;
446 + } else if ((did & 0xFF00) == 0x2500)
363 447 qlt->qlt_25xx_chip = 1;
364 448
365 449 dev_acc_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
366 450 dev_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
367 451 dev_acc_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
368 - if (ddi_regs_map_setup(dip, 2, &qlt->regs, 0, 0x100,
369 - &dev_acc_attr, &qlt->regs_acc_handle) != DDI_SUCCESS) {
370 - goto attach_fail_4;
452 +
453 + if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
454 + int stat_1;
455 + off_t regsize_1;
456 +
457 + stat_1 = ddi_dev_regsize(dip, 1, ®size_1);
458 + if (stat_1 != DDI_SUCCESS) {
459 + stmf_trace(qlt->qlt_port_alias,
460 + "instance=%d, reg 1 regsize failed,"
461 + " stat %x", instance, stat_1);
462 + goto attach_fail_4;
463 + }
464 +
465 + if (ddi_regs_map_setup(dip, 1, &qlt->regs, 0, regsize_1,
466 + &dev_acc_attr, &qlt->regs_acc_handle) != DDI_SUCCESS) {
467 + cmn_err(CE_NOTE, "qlt(%d) ddi_regs_map_setup failed\n",
468 + instance);
469 + goto attach_fail_4;
470 + }
471 + } else {
472 + /*
473 + * 24xx and 25xx: rnumber 0 is config space
474 + * rnumber 1 is for IO space
475 + * rnumber 2 is for MBAR0: ISP, MSIX, PBA
476 + */
477 + if (ddi_regs_map_setup(dip, 2, &qlt->regs, 0, 0x100,
478 + &dev_acc_attr, &qlt->regs_acc_handle) != DDI_SUCCESS) {
479 + goto attach_fail_4;
480 + }
371 481 }
482 +
483 + if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
484 +
485 + uint32_t w32h;
486 + uint32_t w32l;
487 + int stat;
488 + off_t regsize;
489 +
490 + w32l = PCICFG_RD32(qlt, PCI_CONF_BASE2);
491 + w32h = PCICFG_RD32(qlt, PCI_CONF_BASE3);
492 +
493 + if ((w32h > 0) || w32l > 0) {
494 + stat = ddi_dev_regsize(dip, 2, ®size);
495 + if (stat != DDI_SUCCESS) {
496 + stmf_trace(qlt->qlt_port_alias,
497 + "instance=%d, MSI-X regsize failed,"
498 + " stat %x", instance, stat);
499 + }
500 + stmf_trace(qlt->qlt_port_alias,
501 + "instance=%d, MSI-X MEM Bar size %x",
502 + instance, regsize);
503 +
504 + stat = ddi_regs_map_setup(dip, 2, &qlt->msix_base, 0,
505 + /* ((MQ_MAX_QUEUES * 2) +1) << 2, */
506 + regsize,
507 + &dev_acc_attr, &qlt->msix_acc_handle);
508 +
509 + if (stat != DDI_SUCCESS || qlt->msix_base == NULL ||
510 + qlt->msix_acc_handle == NULL) {
511 +
512 + cmn_err(CE_WARN,
513 + "qlt(%d): can't map MBar for MSI-X",
514 + instance);
515 + stmf_trace(qlt->qlt_port_alias,
516 + "instance=%d, MSI-X MEM Bar map fail",
517 + instance);
518 +
519 + if (qlt->msix_acc_handle != NULL) {
520 + ddi_regs_map_free(
521 + &qlt->msix_acc_handle);
522 + }
523 + goto attach_fail_5;
524 + }
525 + } else {
526 + cmn_err(CE_WARN, "qlt(%d): can't setup MBar for MSI-X",
527 + instance);
528 + stmf_trace(qlt->qlt_port_alias,
529 + "instance=%d, No MSI-X MEM Bar", instance);
530 + goto attach_fail_5;
531 + }
532 +
533 + w32l = PCICFG_RD32(qlt, PCI_CONF_BASE4);
534 + w32h = PCICFG_RD32(qlt, PCI_CONF_BASE5);
535 +
536 + if ((w32h > 0) || w32l > 0) {
537 + stat = ddi_dev_regsize(dip, 3, ®size);
538 + if (stat != DDI_SUCCESS) {
539 + stmf_trace(qlt->qlt_port_alias,
540 + "instance=%d, MQ regsize failed, stat %x",
541 + instance, stat);
542 + }
543 + stmf_trace(qlt->qlt_port_alias,
544 + "instance=%d, MQ MEM Bar size %x",
545 + instance, regsize);
546 +
547 + /* for 83xx the QP pointers are in the 3rd MBar */
548 + stat = ddi_regs_map_setup(dip, 3, &qlt->mq_reg_base, 0,
549 + (MQ_MAX_QUEUES * MQBAR_REG_OFFSET),
550 + &dev_acc_attr, &qlt->mq_reg_acc_handle);
551 +
552 + if (stat != DDI_SUCCESS || qlt->mq_reg_base == NULL ||
553 + qlt->mq_reg_acc_handle == NULL) {
554 +
555 + cmn_err(CE_WARN, "qlt(%d): can't map QP MBar",
556 + instance);
557 + stmf_trace(qlt->qlt_port_alias,
558 + "instance=%d, QP MEM Bar map fail st:%x",
559 + instance, stat);
560 +
561 + if (qlt->msix_acc_handle != NULL) {
562 + ddi_regs_map_free(
563 + &qlt->msix_acc_handle);
564 + }
565 + if (qlt->mq_reg_acc_handle != NULL) {
566 + ddi_regs_map_free(
567 + &qlt->mq_reg_acc_handle);
568 + }
569 + goto attach_fail_5;
570 + } else {
571 + qlt->qlt_mq_enabled = 1;
572 + }
573 + } else {
574 + cmn_err(CE_WARN, "qlt(%d): can't setup MBar for QPs",
575 + instance);
576 + stmf_trace(qlt->qlt_port_alias,
577 + "instance=%d, No QPs MEM Bar", instance);
578 +
579 + if (qlt->msix_acc_handle != NULL) {
580 + ddi_regs_map_free(
581 + &qlt->msix_acc_handle);
582 + }
583 + goto attach_fail_5;
584 + }
585 + } else if (qlt->qlt_81xx_chip) {
586 +
587 + uint32_t w32;
588 + int stat;
589 +
590 + w32 = PCICFG_RD32(qlt, PCI_CONF_BASE3);
591 + if (w32 == 0) {
592 +
593 + cmn_err(CE_WARN, "qlt(%d): can't setup MBar2",
594 + instance);
595 + stmf_trace(qlt->qlt_port_alias,
596 + "instance=%d, No MEM Bar2", instance);
597 + goto attach_fail_5;
598 + }
599 +
600 + stat = ddi_regs_map_setup(dip, 3, &qlt->mq_reg_base, 0,
601 + (MQ_MAX_QUEUES * MQBAR_REG_OFFSET),
602 + &dev_acc_attr, &qlt->mq_reg_acc_handle);
603 +
604 + if (stat != DDI_SUCCESS || qlt->mq_reg_base == NULL ||
605 + qlt->mq_reg_acc_handle == NULL) {
606 +
607 + cmn_err(CE_WARN, "qlt(%d): can't map MBar2",
608 + instance);
609 + stmf_trace(qlt->qlt_port_alias,
610 + "instance=%d, MEM Bar2 map fail", instance);
611 +
612 + if (qlt->mq_reg_acc_handle != NULL) {
613 + ddi_regs_map_free(&qlt->mq_reg_acc_handle);
614 + }
615 + goto attach_fail_5;
616 + } else {
617 + qlt->qlt_mq_enabled = 1;
618 + }
619 + } else if (qlt->qlt_25xx_chip) {
620 + uint32_t w32h;
621 + uint32_t w32l;
622 + int stat;
623 + off_t regsize;
624 +
625 + /* MBAR2 rnumber 3 */
626 + w32l = PCICFG_RD32(qlt, PCI_CONF_BASE3);
627 + w32h = PCICFG_RD32(qlt, PCI_CONF_BASE4);
628 +
629 + if ((w32h > 0) || (w32l > 0)) {
630 + stat = ddi_dev_regsize(dip, 3, ®size);
631 + if (stat != DDI_SUCCESS) {
632 + stmf_trace(qlt->qlt_port_alias,
633 + "ISP25xx inst=%d, MQ regsize failed, stat %x",
634 + instance, stat);
635 + EL(qlt, "ISP25xx MQ regsize failed, stat %x\n",
636 + stat);
637 +
638 + }
639 + stmf_trace(qlt->qlt_port_alias,
640 + "ISP25xx instance=%d, MQ MEM Bar size %lx",
641 + instance, regsize);
642 + EL(qlt, "ISP25xx MQ MEM Bar (MBAR2) size: %x\n",
643 + regsize);
644 +
645 + stat = ddi_regs_map_setup(dip, 3, &qlt->mq_reg_base, 0,
646 + (MQ_MAX_QUEUES * MQBAR_REG_OFFSET),
647 + &dev_acc_attr, &qlt->mq_reg_acc_handle);
648 + if (stat != DDI_SUCCESS || qlt->mq_reg_base == NULL ||
649 + qlt->mq_reg_acc_handle == NULL) {
650 + cmn_err(CE_WARN,
651 + "qlt(%d): ISP25xx can't map QP MBar",
652 + instance);
653 + stmf_trace(qlt->qlt_port_alias,
654 + "instance=%d, QP MEM Bar map fail st:%x",
655 + instance, stat);
656 + if (qlt->mq_reg_acc_handle != NULL) {
657 + ddi_regs_map_free(
658 + &qlt->mq_reg_acc_handle);
659 + }
660 + } else {
661 + qlt->qlt_mq_enabled = 1;
662 + }
663 + } else {
664 + stmf_trace(qlt->qlt_port_alias,
665 + "instance=%d, No QPs MEM Bar", instance);
666 + EL(qlt,
667 + "ISP25xx can't setup MBar QPs, use baseq\n");
668 + }
669 + }
670 +
671 + if (qlt->qlt_mq_enabled) {
672 + qlt->mq_req = kmem_zalloc(
673 + ((sizeof (qlt_mq_req_ptr_blk_t)) * MQ_MAX_QUEUES),
674 + KM_SLEEP);
675 + qlt->mq_resp = kmem_zalloc(
676 + ((sizeof (qlt_mq_rsp_ptr_blk_t)) * MQ_MAX_QUEUES),
677 + KM_SLEEP);
678 + } else {
679 + qlt->mq_req = kmem_zalloc(
680 + (sizeof (qlt_mq_req_ptr_blk_t)), KM_SLEEP);
681 + qlt->mq_resp = kmem_zalloc(
682 + (sizeof (qlt_mq_rsp_ptr_blk_t)), KM_SLEEP);
683 + }
684 +
372 685 if (did == 0x2422) {
373 686 uint32_t pci_bits = REG_RD32(qlt, REG_CTRL_STATUS);
374 687 uint32_t slot = pci_bits & PCI_64_BIT_SLOT;
375 688 pci_bits >>= 8;
376 689 pci_bits &= 0xf;
377 690 if ((pci_bits == 3) || (pci_bits == 7)) {
378 691 cmn_err(CE_NOTE,
379 692 "!qlt(%d): HBA running at PCI%sMHz (%d)",
380 693 instance, pci_speeds[pci_bits], pci_bits);
381 694 } else {
382 695 cmn_err(CE_WARN,
383 696 "qlt(%d): HBA running at PCI%sMHz %s(%d)",
384 697 instance, (pci_bits <= 8) ? pci_speeds[pci_bits] :
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
385 698 "(Invalid)", ((pci_bits == 0) ||
386 699 (pci_bits == 8)) ? (slot ? "64 bit slot " :
387 700 "32 bit slot ") : "", pci_bits);
388 701 }
389 702 }
390 703 if ((ret = qlt_read_nvram(qlt)) != QLT_SUCCESS) {
391 704 cmn_err(CE_WARN, "qlt(%d): read nvram failure %llx", instance,
392 705 (unsigned long long)ret);
393 706 goto attach_fail_5;
394 707 }
708 + if ((ret = qlt_read_vpd(qlt)) != QLT_SUCCESS) {
709 + cmn_err(CE_WARN, "qlt(%d): read vpd failure %llx", instance,
710 + (unsigned long long)ret);
711 + goto attach_fail_5;
712 + }
713 + if ((ret = qlt_read_rom_image(qlt)) != QLT_SUCCESS) {
714 + cmn_err(CE_WARN, "qlt(%d): read rom image failure %llx",
715 + instance, (unsigned long long)ret);
716 + goto attach_fail_5;
717 + }
395 718
396 719 qlt_properties(qlt);
397 720
398 721 if (ddi_dma_alloc_handle(dip, &qlt_queue_dma_attr, DDI_DMA_SLEEP,
399 722 0, &qlt->queue_mem_dma_handle) != DDI_SUCCESS) {
400 723 goto attach_fail_5;
401 724 }
402 725 if (ddi_dma_mem_alloc(qlt->queue_mem_dma_handle, TOTAL_DMA_MEM_SIZE,
403 726 &dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
404 727 &qlt->queue_mem_ptr, &discard, &qlt->queue_mem_acc_handle) !=
405 728 DDI_SUCCESS) {
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
406 729 goto attach_fail_6;
407 730 }
408 731 if (ddi_dma_addr_bind_handle(qlt->queue_mem_dma_handle, NULL,
409 732 qlt->queue_mem_ptr, TOTAL_DMA_MEM_SIZE,
410 733 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
411 734 &qlt->queue_mem_cookie, &ncookies) != DDI_SUCCESS) {
412 735 goto attach_fail_7;
413 736 }
414 737 if (ncookies != 1)
415 738 goto attach_fail_8;
416 - qlt->req_ptr = qlt->queue_mem_ptr + REQUEST_QUEUE_OFFSET;
417 - qlt->resp_ptr = qlt->queue_mem_ptr + RESPONSE_QUEUE_OFFSET;
739 +
740 + /*
741 + * Base queue (0), alwasy available
742 + */
743 + qlt->mq_req[0].queue_mem_mq_base_addr =
744 + qlt->mq_req[0].mq_ptr =
745 + qlt->queue_mem_ptr + REQUEST_QUEUE_OFFSET;
746 + qlt->mq_resp[0].queue_mem_mq_base_addr =
747 + qlt->mq_resp[0].mq_ptr =
748 + qlt->queue_mem_ptr + RESPONSE_QUEUE_OFFSET;
749 +
418 750 qlt->preq_ptr = qlt->queue_mem_ptr + PRIORITY_QUEUE_OFFSET;
419 751 qlt->atio_ptr = qlt->queue_mem_ptr + ATIO_QUEUE_OFFSET;
420 752
421 753 /* mutex are inited in this function */
422 754 if (qlt_setup_interrupts(qlt) != DDI_SUCCESS)
423 755 goto attach_fail_8;
424 756
757 + qlt->qlt_queue_cnt = 1;
758 + if ((qlt->qlt_mq_enabled) && (qlt->intr_cnt > 1)) {
759 + int i;
760 +
761 + for (i = 1; i < qlt->intr_cnt; i++) {
762 + if (qlt_mq_create(qlt, i) != QLT_SUCCESS) {
763 + cmn_err(CE_WARN, "qlt(%d) mq create (%d) "
764 + "failed\n", qlt->instance, i);
765 + break;
766 + }
767 + qlt->qlt_queue_cnt++;
768 + if (qlt->qlt_queue_cnt >= MQ_MAX_QUEUES)
769 + break;
770 + }
771 + }
772 + EL(qlt, "Queue count = %d\n", qlt->qlt_queue_cnt);
773 +
425 774 (void) snprintf(qlt->qlt_minor_name, sizeof (qlt->qlt_minor_name),
426 775 "qlt%d", instance);
427 776 (void) snprintf(qlt->qlt_port_alias, sizeof (qlt->qlt_port_alias),
428 777 "%s,0", qlt->qlt_minor_name);
429 778
430 779 if (ddi_create_minor_node(dip, qlt->qlt_minor_name, S_IFCHR,
431 780 instance, DDI_NT_STMF_PP, 0) != DDI_SUCCESS) {
432 781 goto attach_fail_9;
433 782 }
434 783
435 784 cv_init(&qlt->rp_dereg_cv, NULL, CV_DRIVER, NULL);
436 785 cv_init(&qlt->mbox_cv, NULL, CV_DRIVER, NULL);
437 786 mutex_init(&qlt->qlt_ioctl_lock, NULL, MUTEX_DRIVER, NULL);
438 787
439 788 /* Setup PCI cfg space registers */
440 789 max_read_size = qlt_read_int_prop(qlt, "pci-max-read-request", 11);
441 790 if (max_read_size == 11)
442 791 goto over_max_read_xfer_setting;
443 792 if (did == 0x2422) {
444 793 if (max_read_size == 512)
445 794 val = 0;
446 795 else if (max_read_size == 1024)
447 796 val = 1;
448 797 else if (max_read_size == 2048)
449 798 val = 2;
450 799 else if (max_read_size == 4096)
451 800 val = 3;
452 801 else {
|
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
453 802 cmn_err(CE_WARN, "qlt(%d) malformed "
454 803 "pci-max-read-request in qlt.conf. Valid values "
455 804 "for this HBA are 512/1024/2048/4096", instance);
456 805 goto over_max_read_xfer_setting;
457 806 }
458 807 mr = (uint16_t)PCICFG_RD16(qlt, 0x4E);
459 808 mr = (uint16_t)(mr & 0xfff3);
460 809 mr = (uint16_t)(mr | (val << 2));
461 810 PCICFG_WR16(qlt, 0x4E, mr);
462 811 } else if ((did == 0x2432) || (did == 0x8432) ||
463 - (did == 0x2532) || (did == 0x8001)) {
812 + (did == 0x2532) || (did == 0x8001) ||
813 + (did == 0x2031) || (did == 0x2071) ||
814 + (did == 0x2261)) {
464 815 if (max_read_size == 128)
465 816 val = 0;
466 817 else if (max_read_size == 256)
467 818 val = 1;
468 819 else if (max_read_size == 512)
469 820 val = 2;
470 821 else if (max_read_size == 1024)
471 822 val = 3;
472 823 else if (max_read_size == 2048)
473 824 val = 4;
474 825 else if (max_read_size == 4096)
475 826 val = 5;
476 827 else {
477 828 cmn_err(CE_WARN, "qlt(%d) malformed "
478 829 "pci-max-read-request in qlt.conf. Valid values "
479 830 "for this HBA are 128/256/512/1024/2048/4096",
480 831 instance);
481 832 goto over_max_read_xfer_setting;
482 833 }
483 834 mr = (uint16_t)PCICFG_RD16(qlt, 0x54);
484 835 mr = (uint16_t)(mr & 0x8fff);
485 836 mr = (uint16_t)(mr | (val << 12));
486 837 PCICFG_WR16(qlt, 0x54, mr);
487 838 } else {
|
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
488 839 cmn_err(CE_WARN, "qlt(%d): dont know how to set "
489 840 "pci-max-read-request for this device (%x)",
490 841 instance, did);
491 842 }
492 843 over_max_read_xfer_setting:;
493 844
494 845 max_payload_size = qlt_read_int_prop(qlt, "pcie-max-payload-size", 11);
495 846 if (max_payload_size == 11)
496 847 goto over_max_payload_setting;
497 848 if ((did == 0x2432) || (did == 0x8432) ||
498 - (did == 0x2532) || (did == 0x8001)) {
849 + (did == 0x2532) || (did == 0x8001) ||
850 + (did == 0x2031) || (did == 0x2071) ||
851 + (did == 0x2261)) {
499 852 if (max_payload_size == 128)
500 853 val = 0;
501 854 else if (max_payload_size == 256)
502 855 val = 1;
503 856 else if (max_payload_size == 512)
504 857 val = 2;
505 858 else if (max_payload_size == 1024)
506 859 val = 3;
507 860 else {
508 861 cmn_err(CE_WARN, "qlt(%d) malformed "
509 862 "pcie-max-payload-size in qlt.conf. Valid values "
510 863 "for this HBA are 128/256/512/1024",
511 864 instance);
512 865 goto over_max_payload_setting;
513 866 }
514 867 mr = (uint16_t)PCICFG_RD16(qlt, 0x54);
515 868 mr = (uint16_t)(mr & 0xff1f);
516 869 mr = (uint16_t)(mr | (val << 5));
517 870 PCICFG_WR16(qlt, 0x54, mr);
|
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
518 871 } else {
519 872 cmn_err(CE_WARN, "qlt(%d): dont know how to set "
520 873 "pcie-max-payload-size for this device (%x)",
521 874 instance, did);
522 875 }
523 876
524 877 over_max_payload_setting:;
525 878
526 879 qlt_enable_intr(qlt);
527 880
528 - if (qlt_port_start((caddr_t)qlt) != QLT_SUCCESS)
881 + if (qlt_port_start((caddr_t)qlt) != QLT_SUCCESS) {
882 + EL(qlt, "qlt_port_start failed, tear down\n");
883 + qlt_disable_intr(qlt);
529 884 goto attach_fail_10;
885 + }
530 886
531 887 ddi_report_dev(dip);
532 888 return (DDI_SUCCESS);
533 889
534 890 attach_fail_10:;
535 891 mutex_destroy(&qlt->qlt_ioctl_lock);
536 892 cv_destroy(&qlt->mbox_cv);
537 893 cv_destroy(&qlt->rp_dereg_cv);
538 894 ddi_remove_minor_node(dip, qlt->qlt_minor_name);
539 895 attach_fail_9:;
540 896 qlt_destroy_mutex(qlt);
541 897 qlt_release_intr(qlt);
898 + (void) qlt_mq_destroy(qlt);
899 +
542 900 attach_fail_8:;
543 901 (void) ddi_dma_unbind_handle(qlt->queue_mem_dma_handle);
544 902 attach_fail_7:;
545 903 ddi_dma_mem_free(&qlt->queue_mem_acc_handle);
546 904 attach_fail_6:;
547 905 ddi_dma_free_handle(&qlt->queue_mem_dma_handle);
548 906 attach_fail_5:;
907 + if (qlt->mq_resp) {
908 + kmem_free(qlt->mq_resp,
909 + (qlt->qlt_mq_enabled ?
910 + (sizeof (qlt_mq_rsp_ptr_blk_t) * MQ_MAX_QUEUES) :
911 + (sizeof (qlt_mq_rsp_ptr_blk_t))));
912 + }
913 + qlt->mq_resp = NULL;
914 + if (qlt->mq_req) {
915 + kmem_free(qlt->mq_req,
916 + (qlt->qlt_mq_enabled ?
917 + (sizeof (qlt_mq_req_ptr_blk_t) * MQ_MAX_QUEUES) :
918 + (sizeof (qlt_mq_req_ptr_blk_t))));
919 + }
920 + qlt->mq_req = NULL;
921 +
549 922 ddi_regs_map_free(&qlt->regs_acc_handle);
550 923 attach_fail_4:;
551 924 pci_config_teardown(&qlt->pcicfg_acc_handle);
552 - kmem_free(qlt->nvram, sizeof (qlt_nvram_t));
925 +attach_fail_3:;
553 926 (void) qlt_el_trace_desc_dtor(qlt);
554 927 attach_fail_2:;
928 + kmem_free(qlt->vpd, QL_24XX_VPD_SIZE);
929 + kmem_free(qlt->nvram, sizeof (qlt_nvram_t));
555 930 attach_fail_1:;
556 931 ddi_soft_state_free(qlt_state, instance);
557 932 return (DDI_FAILURE);
558 933 }
559 934
560 935 #define FCT_I_EVENT_BRING_PORT_OFFLINE 0x83
561 936
562 937 /* ARGSUSED */
563 938 static int
564 939 qlt_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
565 940 {
566 941 qlt_state_t *qlt;
567 942
568 943 int instance;
569 944
570 945 instance = ddi_get_instance(dip);
571 946 if ((qlt = (qlt_state_t *)ddi_get_soft_state(qlt_state, instance)) ==
572 947 NULL) {
573 948 return (DDI_FAILURE);
574 949 }
575 950
576 951 if (qlt->fw_code01) {
577 952 return (DDI_FAILURE);
578 953 }
579 954
|
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
580 955 if ((qlt->qlt_state != FCT_STATE_OFFLINE) ||
581 956 qlt->qlt_state_not_acked) {
582 957 return (DDI_FAILURE);
583 958 }
584 959 if (qlt_port_stop((caddr_t)qlt) != FCT_SUCCESS) {
585 960 return (DDI_FAILURE);
586 961 }
587 962
588 963 qlt_disable_intr(qlt);
589 964
965 + if (qlt->dmp_template_addr != NULL) {
966 + (void) ddi_dma_unbind_handle(qlt->dmp_template_dma_handle);
967 + ddi_dma_mem_free(&qlt->dmp_template_acc_handle);
968 + ddi_dma_free_handle(&qlt->dmp_template_dma_handle);
969 + }
970 +
971 + if (qlt->fw_bin_dump_buf != NULL) {
972 + kmem_free(qlt->fw_bin_dump_buf, qlt->fw_bin_dump_size);
973 + qlt->fw_bin_dump_buf = NULL;
974 + qlt->fw_bin_dump_size = 0;
975 + qlt->fw_ascii_dump_size = 0;
976 + }
977 +
978 + if (qlt->qlt_fwdump_buf) {
979 + kmem_free(qlt->qlt_fwdump_buf, qlt->fw_dump_size);
980 + qlt->qlt_fwdump_buf = NULL;
981 + }
982 +
590 983 ddi_remove_minor_node(dip, qlt->qlt_minor_name);
591 984 qlt_destroy_mutex(qlt);
592 985 qlt_release_intr(qlt);
986 + if (qlt->qlt_mq_enabled == 1) {
987 + (void) qlt_mq_destroy(qlt);
988 + }
989 +
593 990 (void) ddi_dma_unbind_handle(qlt->queue_mem_dma_handle);
594 991 ddi_dma_mem_free(&qlt->queue_mem_acc_handle);
595 992 ddi_dma_free_handle(&qlt->queue_mem_dma_handle);
596 993 ddi_regs_map_free(&qlt->regs_acc_handle);
994 +
995 + if (qlt->mq_resp) {
996 + kmem_free(qlt->mq_resp,
997 + (qlt->qlt_mq_enabled ?
998 + (sizeof (qlt_mq_rsp_ptr_blk_t) * MQ_MAX_QUEUES) :
999 + (sizeof (qlt_mq_rsp_ptr_blk_t))));
1000 + }
1001 + qlt->mq_resp = NULL;
1002 + if (qlt->mq_req) {
1003 + kmem_free(qlt->mq_req,
1004 + (qlt->qlt_mq_enabled ?
1005 + (sizeof (qlt_mq_req_ptr_blk_t) * MQ_MAX_QUEUES) :
1006 + (sizeof (qlt_mq_req_ptr_blk_t))));
1007 + }
1008 + qlt->mq_req = NULL;
1009 +
1010 + if (qlt->qlt_mq_enabled == 1) {
1011 + if ((qlt->msix_acc_handle != NULL) &&
1012 + ((qlt->qlt_83xx_chip == 1) ||
1013 + (qlt->qlt_27xx_chip == 1))) {
1014 + ddi_regs_map_free(&qlt->msix_acc_handle);
1015 + }
1016 + ddi_regs_map_free(&qlt->mq_reg_acc_handle);
1017 + }
597 1018 pci_config_teardown(&qlt->pcicfg_acc_handle);
598 1019 kmem_free(qlt->nvram, sizeof (qlt_nvram_t));
599 1020 cv_destroy(&qlt->mbox_cv);
600 1021 cv_destroy(&qlt->rp_dereg_cv);
601 1022 (void) qlt_el_trace_desc_dtor(qlt);
602 1023 ddi_soft_state_free(qlt_state, instance);
603 1024
604 1025 return (DDI_SUCCESS);
605 1026 }
606 1027
607 1028 /*
608 1029 * qlt_quiesce quiesce a device attached to the system.
609 1030 */
610 1031 static int
611 1032 qlt_quiesce(dev_info_t *dip)
612 1033 {
613 1034 qlt_state_t *qlt;
614 1035 uint32_t timer;
|
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
615 1036 uint32_t stat;
616 1037
617 1038 qlt = ddi_get_soft_state(qlt_state, ddi_get_instance(dip));
618 1039 if (qlt == NULL) {
619 1040 /* Oh well.... */
620 1041 return (DDI_SUCCESS);
621 1042 }
622 1043
623 1044 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_HOST_TO_RISC_INTR));
624 1045 REG_WR16(qlt, REG_MBOX0, MBC_STOP_FIRMWARE);
1046 + REG_WR16(qlt, REG_MBOX(1), 0);
1047 + REG_WR16(qlt, REG_MBOX(2), 0);
1048 + REG_WR16(qlt, REG_MBOX(3), 0);
1049 + REG_WR16(qlt, REG_MBOX(4), 0);
1050 + REG_WR16(qlt, REG_MBOX(5), 0);
1051 + REG_WR16(qlt, REG_MBOX(6), 0);
1052 + REG_WR16(qlt, REG_MBOX(7), 0);
1053 + REG_WR16(qlt, REG_MBOX(8), 0);
625 1054 REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_HOST_TO_RISC_INTR));
626 1055 for (timer = 0; timer < 30000; timer++) {
627 1056 stat = REG_RD32(qlt, REG_RISC_STATUS);
628 1057 if (stat & RISC_HOST_INTR_REQUEST) {
629 1058 if ((stat & FW_INTR_STATUS_MASK) < 0x12) {
630 1059 REG_WR32(qlt, REG_HCCR,
631 - HCCR_CMD(CLEAR_RISC_PAUSE));
1060 + HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
632 1061 break;
633 1062 }
634 1063 REG_WR32(qlt, REG_HCCR,
635 - HCCR_CMD(CLEAR_HOST_TO_RISC_INTR));
1064 + HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
636 1065 }
637 1066 drv_usecwait(100);
638 1067 }
1068 +
1069 +
1070 + /* need to ensure no one accesses the hw during the reset 100us */
1071 + if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
1072 + REG_WR32(qlt, REG_INTR_CTRL, 0);
1073 + mutex_enter(&qlt->mbox_lock);
1074 + if (qlt->qlt_mq_enabled == 1) {
1075 + int i;
1076 + for (i = 1; i < qlt->qlt_queue_cnt; i++) {
1077 + mutex_enter(&qlt->mq_req[i].mq_lock);
1078 + }
1079 + }
1080 + mutex_enter(&qlt->mq_req[0].mq_lock);
1081 + drv_usecwait(40);
1082 + }
1083 +
639 1084 /* Reset the chip. */
640 1085 REG_WR32(qlt, REG_CTRL_STATUS, CHIP_SOFT_RESET | DMA_SHUTDOWN_CTRL |
641 1086 PCI_X_XFER_CTRL);
642 1087 drv_usecwait(100);
643 1088
1089 + if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
1090 + mutex_exit(&qlt->mq_req[0].mq_lock);
1091 + if (qlt->qlt_mq_enabled == 1) {
1092 + int i;
1093 + for (i = 1; i < qlt->qlt_queue_cnt; i++) {
1094 + mutex_exit(&qlt->mq_req[i].mq_lock);
1095 + }
1096 + }
1097 + mutex_exit(&qlt->mbox_lock);
1098 + }
1099 +
644 1100 qlt_disable_intr(qlt);
645 1101
646 1102 return (DDI_SUCCESS);
647 1103 }
648 1104
649 1105 static void
650 1106 qlt_enable_intr(qlt_state_t *qlt)
651 1107 {
652 1108 if (qlt->intr_cap & DDI_INTR_FLAG_BLOCK) {
653 - (void) ddi_intr_block_enable(qlt->htable, qlt->intr_cnt);
1109 + int stat;
1110 +
1111 + stat = ddi_intr_block_enable(qlt->htable, qlt->intr_cnt);
1112 + if (stat != DDI_SUCCESS) {
1113 + stmf_trace(qlt->qlt_port_alias,
1114 + "qlt_enable_intr: ddi_intr_block_enable failed:%x",
1115 + stat);
1116 +
1117 + cmn_err(CE_WARN, "!qlt(%d): qlt_enable_intr: "
1118 + "ddi_intr_block_enable failed:%x",
1119 + qlt->instance, stat);
1120 + }
1121 +
1122 +#ifndef __sparc
1123 + else {
1124 + /* Please see CR6840537, MSI isn't re-enabled x86 */
1125 + off_t offset;
1126 + uint8_t val8;
1127 + ddi_intr_handle_impl_t *hdlp;
1128 +
1129 + if (qlt->qlt_81xx_chip || qlt->qlt_25xx_chip) {
1130 + offset = (off_t)0x8a;
1131 + } else {
1132 + offset = (off_t)0x66;
1133 + }
1134 +
1135 + hdlp = (ddi_intr_handle_impl_t *)qlt->htable[0];
1136 + if ((hdlp->ih_state == DDI_IHDL_STATE_ENABLE) &&
1137 + (hdlp->ih_type == DDI_INTR_TYPE_MSI)) {
1138 +
1139 + /* get MSI control */
1140 + val8 = pci_config_get8(qlt->pcicfg_acc_handle,
1141 + offset);
1142 +
1143 + if ((val8 & 1) == 0) {
1144 + stmf_trace(qlt->qlt_port_alias,
1145 + "qlt(%d): qlt_enable_intr: "
1146 + "MSI enable failed (%x)",
1147 + qlt->instance, val8);
1148 +
1149 + /* write enable to MSI control */
1150 + val8 = (uint8_t)(val8 | 1);
1151 + pci_config_put8(qlt->pcicfg_acc_handle,
1152 + offset, val8);
1153 +
1154 + /* read back to veriy */
1155 + val8 = pci_config_get8
1156 + (qlt->pcicfg_acc_handle, offset);
1157 +
1158 + if (val8 & 1) {
1159 + stmf_trace(qlt->qlt_port_alias,
1160 + "qlt(%d): qlt_enable_intr: "
1161 + "MSI enabled kludge!(%x)",
1162 + qlt->instance, val8);
1163 + }
1164 + }
1165 + }
1166 + }
1167 +#endif /* x86 specific hack */
654 1168 } else {
655 1169 int i;
656 - for (i = 0; i < qlt->intr_cnt; i++)
657 - (void) ddi_intr_enable(qlt->htable[i]);
1170 + int stat = DDI_SUCCESS;
1171 +
1172 + for (i = 0;
1173 + ((i < qlt->intr_cnt) && (stat == DDI_SUCCESS)); i++) {
1174 + stat = ddi_intr_enable(qlt->htable[i]);
1175 + }
1176 + if (stat != DDI_SUCCESS) {
1177 + stmf_trace(qlt->qlt_port_alias,
1178 + "qlt_enable_intr: ddi_intr_enable failed:%x",
1179 + stat);
1180 +
1181 + cmn_err(CE_WARN, "!qlt(%d): qlt_enable_intr: "
1182 + "ddi_intr_enable failed:%x", qlt->instance, stat);
1183 + }
658 1184 }
659 - qlt->qlt_intr_enabled = 1;
660 1185 }
661 1186
662 1187 static void
663 1188 qlt_disable_intr(qlt_state_t *qlt)
664 1189 {
1190 + if (qlt->qlt_intr_enabled == 0) {
1191 + /* ---- If we've disabled it once, just return ---- */
1192 + return;
1193 + }
1194 +
665 1195 if (qlt->intr_cap & DDI_INTR_FLAG_BLOCK) {
666 1196 (void) ddi_intr_block_disable(qlt->htable, qlt->intr_cnt);
667 1197 } else {
668 1198 int i;
669 1199 for (i = 0; i < qlt->intr_cnt; i++)
670 1200 (void) ddi_intr_disable(qlt->htable[i]);
671 1201 }
672 1202 qlt->qlt_intr_enabled = 0;
673 1203 }
674 1204
675 1205 static void
676 1206 qlt_release_intr(qlt_state_t *qlt)
677 1207 {
678 1208 if (qlt->htable) {
679 1209 int i;
680 1210 for (i = 0; i < qlt->intr_cnt; i++) {
681 1211 (void) ddi_intr_remove_handler(qlt->htable[i]);
682 1212 (void) ddi_intr_free(qlt->htable[i]);
|
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
683 1213 }
684 1214 kmem_free(qlt->htable, (uint_t)qlt->intr_size);
685 1215 }
686 1216 qlt->htable = NULL;
687 1217 qlt->intr_pri = 0;
688 1218 qlt->intr_cnt = 0;
689 1219 qlt->intr_size = 0;
690 1220 qlt->intr_cap = 0;
691 1221 }
692 1222
693 -
694 1223 static void
695 1224 qlt_init_mutex(qlt_state_t *qlt)
696 1225 {
697 - mutex_init(&qlt->req_lock, 0, MUTEX_DRIVER,
1226 + if (qlt->qlt_mq_enabled == 1) {
1227 + int i;
1228 +
1229 + for (i = 1; i < MQ_MAX_QUEUES; i++) {
1230 + mutex_init(&qlt->mq_req[i].mq_lock, 0, MUTEX_DRIVER,
1231 + INT2PTR(qlt->intr_pri, void *));
1232 + mutex_init(&qlt->mq_resp[i].mq_lock, 0, MUTEX_DRIVER,
1233 + INT2PTR(qlt->intr_pri, void *));
1234 + }
1235 + }
1236 + mutex_init(&qlt->mq_req[0].mq_lock, 0, MUTEX_DRIVER,
698 1237 INT2PTR(qlt->intr_pri, void *));
699 1238 mutex_init(&qlt->preq_lock, 0, MUTEX_DRIVER,
700 1239 INT2PTR(qlt->intr_pri, void *));
701 1240 mutex_init(&qlt->mbox_lock, NULL, MUTEX_DRIVER,
702 1241 INT2PTR(qlt->intr_pri, void *));
703 1242 mutex_init(&qlt->intr_lock, NULL, MUTEX_DRIVER,
704 1243 INT2PTR(qlt->intr_pri, void *));
705 1244 }
706 1245
707 1246 static void
708 1247 qlt_destroy_mutex(qlt_state_t *qlt)
709 1248 {
710 - mutex_destroy(&qlt->req_lock);
1249 + if (qlt->qlt_mq_enabled == 1) {
1250 + int i;
1251 +
1252 + for (i = 1; i < MQ_MAX_QUEUES; i++) {
1253 + mutex_destroy(&qlt->mq_req[i].mq_lock);
1254 + mutex_destroy(&qlt->mq_resp[i].mq_lock);
1255 + }
1256 + }
1257 + mutex_destroy(&qlt->mq_req[0].mq_lock);
711 1258 mutex_destroy(&qlt->preq_lock);
712 1259 mutex_destroy(&qlt->mbox_lock);
713 1260 mutex_destroy(&qlt->intr_lock);
714 1261 }
715 1262
716 -
717 1263 static int
718 1264 qlt_setup_msix(qlt_state_t *qlt)
719 1265 {
720 1266 int count, avail, actual;
721 1267 int ret;
722 1268 int itype = DDI_INTR_TYPE_MSIX;
723 1269 int i;
724 1270
1271 +#ifdef __x86
1272 + if (get_hwenv() == HW_VMWARE) {
1273 + EL(qlt, "running under hypervisor, disabling MSI-X\n");
1274 + return (DDI_FAILURE);
1275 + }
1276 +#endif
1277 +
1278 + /* check 24xx revision */
1279 + if ((!qlt->qlt_25xx_chip) && (!qlt->qlt_81xx_chip) &&
1280 + (!qlt->qlt_83xx_chip) && (!qlt->qlt_27xx_chip)) {
1281 + uint8_t rev_id;
1282 + rev_id = (uint8_t)
1283 + pci_config_get8(qlt->pcicfg_acc_handle, PCI_CONF_REVID);
1284 + if (rev_id < 3) {
1285 + return (DDI_FAILURE);
1286 + }
1287 + }
1288 +
725 1289 ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
726 1290 if (ret != DDI_SUCCESS || count == 0) {
727 1291 EL(qlt, "ddi_intr_get_nintrs status=%xh, count=%d\n", ret,
728 1292 count);
729 1293 return (DDI_FAILURE);
730 1294 }
731 1295 ret = ddi_intr_get_navail(qlt->dip, itype, &avail);
732 1296 if (ret != DDI_SUCCESS || avail == 0) {
733 1297 EL(qlt, "ddi_intr_get_navail status=%xh, avail=%d\n", ret,
734 1298 avail);
735 1299 return (DDI_FAILURE);
736 1300 }
737 1301 if (avail < count) {
738 1302 stmf_trace(qlt->qlt_port_alias,
739 1303 "qlt_setup_msix: nintrs=%d,avail=%d", count, avail);
740 1304 }
741 1305
1306 + if ((qlt->qlt_25xx_chip) && (qlt->qlt_mq_enabled == 0)) {
1307 + count = 2;
1308 + }
1309 +
742 1310 qlt->intr_size = (int)(count * (int)sizeof (ddi_intr_handle_t));
743 1311 qlt->htable = kmem_zalloc((uint_t)qlt->intr_size, KM_SLEEP);
744 1312 ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
745 1313 DDI_INTR_ALLOC_NORMAL, count, &actual, 0);
1314 +
1315 + EL(qlt, "qlt_setup_msix: count=%d,avail=%d,actual=%d\n", count,
1316 + avail, actual);
1317 +
746 1318 /* we need at least 2 interrupt vectors */
747 - if (ret != DDI_SUCCESS || actual < 2) {
1319 + if (((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) &&
1320 + (ret != DDI_SUCCESS || actual < 2)) {
748 1321 EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
749 1322 actual);
750 1323 ret = DDI_FAILURE;
751 1324 goto release_intr;
1325 + } else if ((qlt->qlt_81xx_chip) && (ret != DDI_SUCCESS || actual < 3)) {
1326 + EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
1327 + actual);
1328 + ret = DDI_FAILURE;
1329 + goto release_intr;
1330 + } else if (ret != DDI_SUCCESS || actual < 2) {
1331 + EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
1332 + actual);
1333 + ret = DDI_FAILURE;
1334 + goto release_intr;
752 1335 }
753 1336 if (actual < count) {
754 1337 EL(qlt, "requested: %d, received: %d\n", count, actual);
755 1338 }
756 1339
757 1340 qlt->intr_cnt = actual;
758 1341 ret = ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
759 1342 if (ret != DDI_SUCCESS) {
760 1343 EL(qlt, "ddi_intr_get_pri status=%xh\n", ret);
761 1344 ret = DDI_FAILURE;
762 1345 goto release_intr;
763 1346 }
764 1347 qlt_init_mutex(qlt);
765 - for (i = 0; i < actual; i++) {
766 - ret = ddi_intr_add_handler(qlt->htable[i], qlt_isr,
1348 + for (i = 0; i < qlt->intr_cnt; i++) {
1349 + ret = ddi_intr_add_handler(qlt->htable[i],
1350 + (i != 0) ? qlt_msix_resp_handler :
1351 + qlt_msix_default_handler,
767 1352 qlt, INT2PTR((uint_t)i, void *));
768 1353 if (ret != DDI_SUCCESS) {
769 1354 EL(qlt, "ddi_intr_add_handler status=%xh\n", ret);
770 1355 goto release_mutex;
771 1356 }
772 1357 }
773 1358
774 1359 (void) ddi_intr_get_cap(qlt->htable[0], &qlt->intr_cap);
775 1360 qlt->intr_flags |= QLT_INTR_MSIX;
776 1361 return (DDI_SUCCESS);
777 1362
778 1363 release_mutex:
779 1364 qlt_destroy_mutex(qlt);
780 1365 release_intr:
781 1366 for (i = 0; i < actual; i++)
782 1367 (void) ddi_intr_free(qlt->htable[i]);
783 -#if 0
784 -free_mem:
785 -#endif
1368 +
786 1369 kmem_free(qlt->htable, (uint_t)qlt->intr_size);
787 1370 qlt->htable = NULL;
788 1371 qlt_release_intr(qlt);
789 1372 return (ret);
790 1373 }
791 1374
792 -
793 1375 static int
794 1376 qlt_setup_msi(qlt_state_t *qlt)
795 1377 {
796 1378 int count, avail, actual;
797 1379 int itype = DDI_INTR_TYPE_MSI;
798 1380 int ret;
799 1381 int i;
800 1382
1383 + /* 83xx and 27xx doesn't do MSI - don't even bother? */
1384 + if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
1385 + return (DDI_FAILURE);
1386 + }
1387 +
801 1388 /* get the # of interrupts */
802 1389 ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
803 1390 if (ret != DDI_SUCCESS || count == 0) {
804 1391 EL(qlt, "ddi_intr_get_nintrs status=%xh, count=%d\n", ret,
805 1392 count);
806 1393 return (DDI_FAILURE);
807 1394 }
808 1395 ret = ddi_intr_get_navail(qlt->dip, itype, &avail);
809 1396 if (ret != DDI_SUCCESS || avail == 0) {
810 1397 EL(qlt, "ddi_intr_get_navail status=%xh, avail=%d\n", ret,
811 1398 avail);
812 1399 return (DDI_FAILURE);
813 1400 }
814 1401 if (avail < count) {
815 1402 EL(qlt, "nintrs=%d, avail=%d\n", count, avail);
816 1403 }
817 1404 /* MSI requires only 1 interrupt. */
818 1405 count = 1;
819 1406
820 1407 /* allocate interrupt */
821 1408 qlt->intr_size = (int)(count * (int)sizeof (ddi_intr_handle_t));
822 1409 qlt->htable = kmem_zalloc((uint_t)qlt->intr_size, KM_SLEEP);
823 1410 ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
824 1411 0, count, &actual, DDI_INTR_ALLOC_NORMAL);
825 1412 if (ret != DDI_SUCCESS || actual == 0) {
826 1413 EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
827 1414 actual);
828 1415 ret = DDI_FAILURE;
829 1416 goto free_mem;
830 1417 }
831 1418 if (actual < count) {
832 1419 EL(qlt, "requested: %d, received: %d\n", count, actual);
833 1420 }
834 1421 qlt->intr_cnt = actual;
835 1422
836 1423 /*
837 1424 * Get priority for first msi, assume remaining are all the same.
838 1425 */
839 1426 ret = ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
840 1427 if (ret != DDI_SUCCESS) {
841 1428 EL(qlt, "ddi_intr_get_pri status=%xh\n", ret);
842 1429 ret = DDI_FAILURE;
843 1430 goto release_intr;
844 1431 }
845 1432 qlt_init_mutex(qlt);
846 1433
847 1434 /* add handler */
848 1435 for (i = 0; i < actual; i++) {
849 1436 ret = ddi_intr_add_handler(qlt->htable[i], qlt_isr,
850 1437 qlt, INT2PTR((uint_t)i, void *));
851 1438 if (ret != DDI_SUCCESS) {
852 1439 EL(qlt, "ddi_intr_add_handler status=%xh\n", ret);
853 1440 goto release_mutex;
854 1441 }
855 1442 }
856 1443
857 1444 (void) ddi_intr_get_cap(qlt->htable[0], &qlt->intr_cap);
858 1445 qlt->intr_flags |= QLT_INTR_MSI;
859 1446 return (DDI_SUCCESS);
860 1447
861 1448 release_mutex:
862 1449 qlt_destroy_mutex(qlt);
863 1450 release_intr:
864 1451 for (i = 0; i < actual; i++)
865 1452 (void) ddi_intr_free(qlt->htable[i]);
866 1453 free_mem:
867 1454 kmem_free(qlt->htable, (uint_t)qlt->intr_size);
868 1455 qlt->htable = NULL;
869 1456 qlt_release_intr(qlt);
870 1457 return (ret);
871 1458 }
872 1459
873 1460 static int
874 1461 qlt_setup_fixed(qlt_state_t *qlt)
875 1462 {
876 1463 int count;
877 1464 int actual;
878 1465 int ret;
879 1466 int itype = DDI_INTR_TYPE_FIXED;
880 1467
881 1468 ret = ddi_intr_get_nintrs(qlt->dip, itype, &count);
882 1469 /* Fixed interrupts can only have one interrupt. */
883 1470 if (ret != DDI_SUCCESS || count != 1) {
884 1471 EL(qlt, "ddi_intr_get_nintrs status=%xh, count=%d\n", ret,
885 1472 count);
886 1473 return (DDI_FAILURE);
887 1474 }
888 1475
889 1476 qlt->intr_size = sizeof (ddi_intr_handle_t);
890 1477 qlt->htable = kmem_zalloc((uint_t)qlt->intr_size, KM_SLEEP);
891 1478 ret = ddi_intr_alloc(qlt->dip, qlt->htable, itype,
892 1479 DDI_INTR_ALLOC_NORMAL, count, &actual, 0);
893 1480 if (ret != DDI_SUCCESS || actual != 1) {
894 1481 EL(qlt, "ddi_intr_alloc status=%xh, actual=%d\n", ret,
895 1482 actual);
896 1483 ret = DDI_FAILURE;
897 1484 goto free_mem;
898 1485 }
899 1486
900 1487 qlt->intr_cnt = actual;
901 1488 ret = ddi_intr_get_pri(qlt->htable[0], &qlt->intr_pri);
902 1489 if (ret != DDI_SUCCESS) {
903 1490 EL(qlt, "ddi_intr_get_pri status=%xh\n", ret);
904 1491 ret = DDI_FAILURE;
905 1492 goto release_intr;
906 1493 }
907 1494 qlt_init_mutex(qlt);
908 1495 ret = ddi_intr_add_handler(qlt->htable[0], qlt_isr, qlt, 0);
909 1496 if (ret != DDI_SUCCESS) {
910 1497 EL(qlt, "ddi_intr_add_handler status=%xh\n", ret);
911 1498 goto release_mutex;
912 1499 }
913 1500
914 1501 qlt->intr_flags |= QLT_INTR_FIXED;
915 1502 return (DDI_SUCCESS);
916 1503
917 1504 release_mutex:
918 1505 qlt_destroy_mutex(qlt);
919 1506 release_intr:
920 1507 (void) ddi_intr_free(qlt->htable[0]);
921 1508 free_mem:
922 1509 kmem_free(qlt->htable, (uint_t)qlt->intr_size);
923 1510 qlt->htable = NULL;
924 1511 qlt_release_intr(qlt);
925 1512 return (ret);
926 1513 }
|
↓ open down ↓ |
116 lines elided |
↑ open up ↑ |
927 1514
928 1515 static int
929 1516 qlt_setup_interrupts(qlt_state_t *qlt)
930 1517 {
931 1518 int itypes = 0;
932 1519
933 1520 /*
934 1521 * x86 has a bug in the ddi_intr_block_enable/disable area (6562198).
935 1522 */
936 1523 #ifndef __sparc
937 - if (qlt_enable_msi != 0) {
1524 + if ((qlt_enable_msi != 0) || (qlt_enable_msix != 0)) {
938 1525 #endif
939 1526 if (ddi_intr_get_supported_types(qlt->dip, &itypes) != DDI_SUCCESS) {
940 1527 itypes = DDI_INTR_TYPE_FIXED;
941 1528 }
942 -
943 1529 if (qlt_enable_msix && (itypes & DDI_INTR_TYPE_MSIX)) {
944 1530 if (qlt_setup_msix(qlt) == DDI_SUCCESS)
945 1531 return (DDI_SUCCESS);
946 1532 }
947 -
948 - if (itypes & DDI_INTR_TYPE_MSI) {
1533 + if (qlt_enable_msi && (itypes & DDI_INTR_TYPE_MSI)) {
949 1534 if (qlt_setup_msi(qlt) == DDI_SUCCESS)
950 1535 return (DDI_SUCCESS);
951 1536 }
952 1537 #ifndef __sparc
953 1538 }
954 1539 #endif
955 1540 return (qlt_setup_fixed(qlt));
956 1541 }
957 1542
1543 +static uint8_t *
1544 +qlt_vpd_findtag(qlt_state_t *qlt, uint8_t *vpdbuf, int8_t *opcode)
1545 +{
1546 + uint8_t *vpd = vpdbuf;
1547 + uint8_t *end = vpdbuf + QL_24XX_VPD_SIZE;
1548 + uint32_t found = 0;
1549 +
1550 + if (vpdbuf == NULL || opcode == NULL) {
1551 + EL(qlt, "null parameter passed!\n");
1552 + return (NULL);
1553 + }
1554 +
1555 + while (vpd < end) {
1556 + if (vpd[0] == VPD_TAG_END) {
1557 + if (opcode[0] == VPD_TAG_END) {
1558 + found = 1;
1559 + } else {
1560 + found = 0;
1561 + }
1562 + break;
1563 + }
1564 +
1565 + if (bcmp(opcode, vpd, strlen(opcode)) == 0) {
1566 + found = 1;
1567 + break;
1568 + }
1569 +
1570 + if (!(strncmp((char *)vpd, (char *)VPD_TAG_PRODID, 1))) {
1571 + vpd += (vpd[2] << 8) + vpd[1] + 3;
1572 + } else if (*vpd == VPD_TAG_LRT || *vpd == VPD_TAG_LRTC) {
1573 + vpd += 3;
1574 + } else {
1575 + vpd += vpd[2] +3;
1576 + }
1577 + }
1578 + return (found == 1 ? vpd : NULL);
1579 +}
1580 +
958 1581 /*
1582 + * qlt_vpd_lookup
1583 + * Return the VPD data for the request VPD tag
1584 + *
1585 + * Input:
1586 + * qlt = adapter state pointer.
1587 + * opcode = VPD opcode to find (must be NULL terminated).
1588 + * bp = Pointer to returned data buffer.
1589 + * bplen = Length of returned data buffer.
1590 + *
1591 + * Returns:
1592 + * Length of data copied into returned data buffer.
1593 + * >0 = VPD data field (NULL terminated)
1594 + * 0 = no data.
1595 + * -1 = Could not find opcode in vpd buffer / error.
1596 + *
1597 + * Context:
1598 + * Kernel context.
1599 + *
1600 + * NB: The opcode buffer and the bp buffer *could* be the same buffer!
1601 + *
1602 + */
1603 +static int
1604 +qlt_vpd_lookup(qlt_state_t *qlt, uint8_t *opcode, uint8_t *bp,
1605 + int32_t bplen)
1606 +{
1607 + uint8_t *vpd = NULL;
1608 + uint8_t *vpdbuf = NULL;
1609 + int32_t len = -1;
1610 +
1611 + if (opcode == NULL || bp == NULL || bplen < 1) {
1612 + EL(qlt, "invalid parameter passed: opcode=%ph, "
1613 + "bp=%ph, bplen=%xh\n", opcode, bp, bplen);
1614 + return (len);
1615 + }
1616 +
1617 + vpdbuf = (uint8_t *)qlt->vpd;
1618 + if ((vpd = qlt_vpd_findtag(qlt, vpdbuf, (int8_t *)opcode)) != NULL) {
1619 + /*
1620 + * Found the tag
1621 + */
1622 + if (*opcode == VPD_TAG_END || *opcode == VPD_TAG_LRT ||
1623 + *opcode == VPD_TAG_LRTC) {
1624 + /*
1625 + * We found it, but the tag doesn't have a data
1626 + * field.
1627 + */
1628 + len = 0;
1629 + } else if (!(strncmp((char *)vpd, (char *)
1630 + VPD_TAG_PRODID, 1))) {
1631 + len = vpd[2] << 8;
1632 + len += vpd[1];
1633 + } else {
1634 + len = vpd[2];
1635 + }
1636 +
1637 + /*
1638 + * Make sure that the vpd len does not exceed the
1639 + * vpd end
1640 + */
1641 + if (vpd+len > vpdbuf + QL_24XX_VPD_SIZE) {
1642 + EL(qlt, "vpd tag len (%xh) exceeds vpd buffer "
1643 + "length\n", len);
1644 + len = -1;
1645 + }
1646 + } else {
1647 + EL(qlt, "Cna't find vpd tag \n");
1648 + return (-1);
1649 + }
1650 +
1651 + if (len >= 0) {
1652 + /*
1653 + * make sure we don't exceed callers buffer space len
1654 + */
1655 + if (len > bplen) {
1656 + len = bplen - 1;
1657 + }
1658 + /* copy the data back */
1659 + (void) strncpy((int8_t *)bp, (int8_t *)(vpd+3), (int64_t)len);
1660 + bp[len] = NULL;
1661 + } else {
1662 + /* error -- couldn't find tag */
1663 + bp[0] = NULL;
1664 + if (opcode[1] != NULL) {
1665 + EL(qlt, "unable to find tag '%s'\n", opcode);
1666 + } else {
1667 + EL(qlt, "unable to find tag '%xh'\n", opcode[0]);
1668 + }
1669 + }
1670 + return (len);
1671 +}
1672 +
1673 +void
1674 +qlt_get_rom_version(qlt_state_t *qlt, caddr_t orv)
1675 +{
1676 + int i;
1677 + char bios0_str[32];
1678 + char fcode_str[32];
1679 + char efi_str[32];
1680 + char hppa_str[32];
1681 + char tmp[80];
1682 + uint32_t bios_cnt = 0;
1683 + uint32_t fcode_cnt = 0;
1684 + boolean_t last_image = FALSE;
1685 +
1686 + /* collect right rom_version from image[] */
1687 + i = 0;
1688 + do {
1689 + if (qlt->rimage[0].header.signature[0] != PCI_HEADER0) {
1690 + break;
1691 + }
1692 +
1693 + if (qlt->rimage[i].data.codetype == PCI_CODE_X86PC) {
1694 + /* BIOS */
1695 + if (bios_cnt == 0) {
1696 + (void) snprintf(bios0_str,
1697 + 32,
1698 + "%d.%02d",
1699 + qlt->rimage[i].data.
1700 + revisionlevel[1],
1701 + qlt->rimage[i].data.
1702 + revisionlevel[0]);
1703 + (void) snprintf(tmp, 80,
1704 + " BIOS: %s;", bios0_str);
1705 + (void) strcat(orv, tmp);
1706 + }
1707 + bios_cnt++;
1708 + } else if (qlt->rimage[i].data.codetype == PCI_CODE_FCODE) {
1709 + /* FCode */
1710 + if (fcode_cnt == 0) {
1711 + (void) snprintf(fcode_str,
1712 + 32,
1713 + "%d.%02d",
1714 + qlt->rimage[i].data.revisionlevel[1],
1715 + qlt->rimage[i].data.revisionlevel[0]);
1716 + (void) snprintf(tmp, 80,
1717 + " FCode: %s;", fcode_str);
1718 + (void) strcat(orv, tmp);
1719 + }
1720 + fcode_cnt++;
1721 + } else if (qlt->rimage[i].data.codetype == PCI_CODE_EFI) {
1722 + /* EFI */
1723 + (void) snprintf(efi_str,
1724 + 32,
1725 + "%d.%02d",
1726 + qlt->rimage[i].data.revisionlevel[1],
1727 + qlt->rimage[i].data.revisionlevel[0]);
1728 + (void) snprintf(tmp, 80, " EFI: %s;", efi_str);
1729 + (void) strcat(orv, tmp);
1730 + } else if (qlt->rimage[i].data.codetype == PCI_CODE_HPPA) {
1731 + /* HPPA */
1732 + (void) snprintf(hppa_str,
1733 + 32,
1734 + "%d.%02d",
1735 + qlt->rimage[i].data.revisionlevel[1],
1736 + qlt->rimage[i].data.revisionlevel[0]);
1737 + (void) snprintf(orv, 80, " HPPA: %s;", hppa_str);
1738 + (void) strcat(orv, tmp);
1739 + } else if (qlt->rimage[i].data.codetype == PCI_CODE_FW) {
1740 + EL(qlt, "fw infor skip\n");
1741 + } else {
1742 + /* Unknown */
1743 + EL(qlt, "unknown image\n");
1744 + break;
1745 + }
1746 +
1747 + if (qlt->rimage[i].data.indicator == PCI_IND_LAST_IMAGE) {
1748 + last_image = TRUE;
1749 + break;
1750 + }
1751 +
1752 + i ++;
1753 + } while ((last_image != TRUE) && (i < 6));
1754 +
1755 + if (last_image != TRUE) {
1756 + /* No boot image detected */
1757 + (void) snprintf(orv, FCHBA_OPTION_ROM_VERSION_LEN, "%s",
1758 + "No boot image detected");
1759 + }
1760 +}
1761 +
1762 +/*
959 1763 * Filling the hba attributes
960 1764 */
961 1765 void
962 1766 qlt_populate_hba_fru_details(struct fct_local_port *port,
963 1767 struct fct_port_attrs *port_attrs)
964 1768 {
965 - caddr_t bufp;
966 1769 int len;
967 1770 qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
968 1771
969 1772 (void) snprintf(port_attrs->manufacturer, FCHBA_MANUFACTURER_LEN,
970 1773 "QLogic Corp.");
971 1774 (void) snprintf(port_attrs->driver_name, FCHBA_DRIVER_NAME_LEN,
972 1775 "%s", QLT_NAME);
973 1776 (void) snprintf(port_attrs->driver_version, FCHBA_DRIVER_VERSION_LEN,
974 1777 "%s", QLT_VERSION);
975 - port_attrs->serial_number[0] = '\0';
1778 + /* get serial_number from vpd data */
1779 + if (qlt_vpd_lookup(qlt, (uint8_t *)VPD_TAG_SN, (uint8_t *)
1780 + port_attrs->serial_number, FCHBA_SERIAL_NUMBER_LEN) == -1) {
1781 + port_attrs->serial_number[0] = '\0';
1782 + }
976 1783 port_attrs->hardware_version[0] = '\0';
977 1784
978 1785 (void) snprintf(port_attrs->firmware_version,
979 1786 FCHBA_FIRMWARE_VERSION_LEN, "%d.%d.%d", qlt->fw_major,
980 1787 qlt->fw_minor, qlt->fw_subminor);
981 1788
982 1789 /* Get FCode version */
983 - if (ddi_getlongprop(DDI_DEV_T_ANY, qlt->dip, PROP_LEN_AND_VAL_ALLOC |
984 - DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
985 - (int *)&len) == DDI_PROP_SUCCESS) {
986 - (void) snprintf(port_attrs->option_rom_version,
987 - FCHBA_OPTION_ROM_VERSION_LEN, "%s", bufp);
988 - kmem_free(bufp, (uint_t)len);
989 - bufp = NULL;
990 - } else {
991 -#ifdef __sparc
992 - (void) snprintf(port_attrs->option_rom_version,
993 - FCHBA_OPTION_ROM_VERSION_LEN, "No Fcode found");
994 -#else
995 - (void) snprintf(port_attrs->option_rom_version,
996 - FCHBA_OPTION_ROM_VERSION_LEN, "N/A");
997 -#endif
998 - }
1790 + qlt_get_rom_version(qlt, (caddr_t)&port_attrs->option_rom_version[0]);
1791 +
999 1792 port_attrs->vendor_specific_id = qlt->nvram->subsystem_vendor_id[0] |
1000 1793 qlt->nvram->subsystem_vendor_id[1] << 8;
1001 1794
1002 1795 port_attrs->max_frame_size = qlt->nvram->max_frame_length[1] << 8 |
1003 1796 qlt->nvram->max_frame_length[0];
1004 1797
1005 1798 port_attrs->supported_cos = 0x10000000;
1006 - port_attrs->supported_speed = PORT_SPEED_1G |
1007 - PORT_SPEED_2G | PORT_SPEED_4G;
1008 - if (qlt->qlt_25xx_chip)
1799 +
1800 + if (qlt->qlt_fcoe_enabled) {
1801 + port_attrs->supported_speed = PORT_SPEED_10G;
1802 + } else if (qlt->qlt_27xx_chip) {
1803 + if ((qlt->qlt_27xx_speed & MAX_SPEED_MASK) == MAX_SPEED_32G) {
1804 + port_attrs->supported_speed = PORT_SPEED_8G |
1805 + PORT_SPEED_16G | PORT_SPEED_32G;
1806 + } else {
1807 + port_attrs->supported_speed = PORT_SPEED_4G |
1808 + PORT_SPEED_8G | PORT_SPEED_16G;
1809 + }
1810 + } else if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
1811 + port_attrs->supported_speed = PORT_SPEED_4G |
1812 + PORT_SPEED_8G | PORT_SPEED_16G;
1813 + } else if (qlt->qlt_25xx_chip) {
1009 1814 port_attrs->supported_speed = PORT_SPEED_2G | PORT_SPEED_4G |
1010 1815 PORT_SPEED_8G;
1011 - if (qlt->qlt_81xx_chip)
1012 - port_attrs->supported_speed = PORT_SPEED_10G;
1816 + } else {
1817 + port_attrs->supported_speed = PORT_SPEED_1G |
1818 + PORT_SPEED_2G | PORT_SPEED_4G;
1819 + }
1013 1820
1014 1821 /* limit string length to nvr model_name length */
1015 - len = (qlt->qlt_81xx_chip) ? 16 : 8;
1822 + len = ((qlt->qlt_81xx_chip) || (qlt->qlt_83xx_chip) ||
1823 + (qlt->qlt_27xx_chip)) ? 16 : 8;
1016 1824 (void) snprintf(port_attrs->model,
1017 1825 (uint_t)(len < FCHBA_MODEL_LEN ? len : FCHBA_MODEL_LEN),
1018 1826 "%s", qlt->nvram->model_name);
1019 1827
1020 1828 (void) snprintf(port_attrs->model_description,
1021 1829 (uint_t)(len < FCHBA_MODEL_DESCRIPTION_LEN ? len :
1022 1830 FCHBA_MODEL_DESCRIPTION_LEN),
1023 1831 "%s", qlt->nvram->model_name);
1024 1832 }
1025 1833
1026 1834 /* ARGSUSED */
1027 1835 fct_status_t
1028 1836 qlt_info(uint32_t cmd, fct_local_port_t *port,
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
1029 1837 void *arg, uint8_t *buf, uint32_t *bufsizep)
1030 1838 {
1031 1839 qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
1032 1840 mbox_cmd_t *mcp;
1033 1841 fct_status_t ret = FCT_SUCCESS;
1034 1842 uint8_t *p;
1035 1843 fct_port_link_status_t *link_status;
1036 1844
1037 1845 switch (cmd) {
1038 1846 case FC_TGT_PORT_RLS:
1847 + if (qlt->qlt_state != FCT_STATE_ONLINE) {
1848 + break;
1849 + }
1039 1850 if ((*bufsizep) < sizeof (fct_port_link_status_t)) {
1040 1851 EL(qlt, "FC_TGT_PORT_RLS bufsizep=%xh < "
1041 1852 "fct_port_link_status_t=%xh\n", *bufsizep,
1042 1853 sizeof (fct_port_link_status_t));
1043 1854 ret = FCT_FAILURE;
1044 1855 break;
1045 1856 }
1046 1857 /* send mailbox command to get link status */
1047 1858 mcp = qlt_alloc_mailbox_command(qlt, 156);
1048 1859 if (mcp == NULL) {
1049 1860 EL(qlt, "qlt_alloc_mailbox_command mcp=null\n");
1050 1861 ret = FCT_ALLOC_FAILURE;
1051 1862 break;
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
1052 1863 }
1053 1864
1054 1865 /* GET LINK STATUS count */
1055 1866 mcp->to_fw[0] = MBC_GET_STATUS_COUNTS;
1056 1867 mcp->to_fw[8] = 156/4;
1057 1868 mcp->to_fw_mask |= BIT_1 | BIT_8;
1058 1869 mcp->from_fw_mask |= BIT_1 | BIT_2;
1059 1870
1060 1871 ret = qlt_mailbox_command(qlt, mcp);
1061 1872 if (ret != QLT_SUCCESS) {
1062 - EL(qlt, "qlt_mailbox_command=6dh status=%llxh\n", ret);
1873 + EL(qlt, "qlt_mbox_command=6dh status=%llxh\n", ret);
1063 1874 qlt_free_mailbox_command(qlt, mcp);
1064 1875 break;
1065 1876 }
1066 1877 qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
1067 1878
1068 1879 p = mcp->dbuf->db_sglist[0].seg_addr;
1069 1880 link_status = (fct_port_link_status_t *)buf;
1070 1881 link_status->LinkFailureCount = LE_32(*((uint32_t *)p));
1071 1882 link_status->LossOfSyncCount = LE_32(*((uint32_t *)(p + 4)));
1072 1883 link_status->LossOfSignalsCount = LE_32(*((uint32_t *)(p + 8)));
1073 1884 link_status->PrimitiveSeqProtocolErrorCount =
1074 1885 LE_32(*((uint32_t *)(p + 12)));
1075 1886 link_status->InvalidTransmissionWordCount =
1076 1887 LE_32(*((uint32_t *)(p + 16)));
1077 1888 link_status->InvalidCRCCount =
1078 1889 LE_32(*((uint32_t *)(p + 20)));
1079 1890
1080 1891 qlt_free_mailbox_command(qlt, mcp);
1081 1892 break;
1082 1893 default:
1083 1894 EL(qlt, "Unknown cmd=%xh\n", cmd);
1084 1895 ret = FCT_FAILURE;
1085 1896 break;
1086 1897 }
1087 1898 return (ret);
1088 1899 }
1089 1900
1090 1901 fct_status_t
|
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
1091 1902 qlt_port_start(caddr_t arg)
1092 1903 {
1093 1904 qlt_state_t *qlt = (qlt_state_t *)arg;
1094 1905 fct_local_port_t *port;
1095 1906 fct_dbuf_store_t *fds;
1096 1907 fct_status_t ret;
1097 1908
1098 1909 if (qlt_dmem_init(qlt) != QLT_SUCCESS) {
1099 1910 return (FCT_FAILURE);
1100 1911 }
1912 +
1101 1913 /* Initialize the ddi_dma_handle free pool */
1102 1914 qlt_dma_handle_pool_init(qlt);
1103 1915
1104 1916 port = (fct_local_port_t *)fct_alloc(FCT_STRUCT_LOCAL_PORT, 0, 0);
1105 1917 if (port == NULL) {
1106 1918 goto qlt_pstart_fail_1;
1107 1919 }
1108 1920 fds = (fct_dbuf_store_t *)fct_alloc(FCT_STRUCT_DBUF_STORE, 0, 0);
1109 1921 if (fds == NULL) {
1110 1922 goto qlt_pstart_fail_2;
1111 1923 }
1112 1924 qlt->qlt_port = port;
1113 1925 fds->fds_alloc_data_buf = qlt_dmem_alloc;
1114 1926 fds->fds_free_data_buf = qlt_dmem_free;
1115 1927 fds->fds_setup_dbuf = qlt_dma_setup_dbuf;
1116 1928 fds->fds_teardown_dbuf = qlt_dma_teardown_dbuf;
1117 1929 fds->fds_max_sgl_xfer_len = QLT_DMA_SG_LIST_LENGTH * MMU_PAGESIZE;
1118 - fds->fds_copy_threshold = MMU_PAGESIZE;
1930 + fds->fds_copy_threshold = (uint32_t)MMU_PAGESIZE;
1119 1931 fds->fds_fca_private = (void *)qlt;
1120 1932 /*
1121 1933 * Since we keep everything in the state struct and dont allocate any
1122 1934 * port private area, just use that pointer to point to the
1123 1935 * state struct.
1124 1936 */
1125 1937 port->port_fca_private = qlt;
1126 1938 port->port_fca_abort_timeout = 5 * 1000; /* 5 seconds */
1127 1939 bcopy(qlt->nvram->node_name, port->port_nwwn, 8);
1128 1940 bcopy(qlt->nvram->port_name, port->port_pwwn, 8);
1129 1941 fct_wwn_to_str(port->port_nwwn_str, port->port_nwwn);
1130 1942 fct_wwn_to_str(port->port_pwwn_str, port->port_pwwn);
1131 1943 port->port_default_alias = qlt->qlt_port_alias;
1132 1944 port->port_pp = qlt_pp;
1133 1945 port->port_fds = fds;
1134 1946 port->port_max_logins = QLT_MAX_LOGINS;
1135 1947 port->port_max_xchges = QLT_MAX_XCHGES;
1136 1948 port->port_fca_fcp_cmd_size = sizeof (qlt_cmd_t);
1137 1949 port->port_fca_rp_private_size = sizeof (qlt_remote_port_t);
1138 1950 port->port_fca_sol_els_private_size = sizeof (qlt_cmd_t);
1139 1951 port->port_fca_sol_ct_private_size = sizeof (qlt_cmd_t);
1140 1952 port->port_get_link_info = qlt_get_link_info;
1141 1953 port->port_register_remote_port = qlt_register_remote_port;
1142 1954 port->port_deregister_remote_port = qlt_deregister_remote_port;
1143 1955 port->port_send_cmd = qlt_send_cmd;
1144 1956 port->port_xfer_scsi_data = qlt_xfer_scsi_data;
1145 1957 port->port_send_cmd_response = qlt_send_cmd_response;
1146 1958 port->port_abort_cmd = qlt_abort_cmd;
1147 1959 port->port_ctl = qlt_ctl;
|
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
1148 1960 port->port_flogi_xchg = qlt_do_flogi;
1149 1961 port->port_populate_hba_details = qlt_populate_hba_fru_details;
1150 1962 port->port_info = qlt_info;
1151 1963 port->port_fca_version = FCT_FCA_MODREV_1;
1152 1964
1153 1965 if ((ret = fct_register_local_port(port)) != FCT_SUCCESS) {
1154 1966 EL(qlt, "fct_register_local_port status=%llxh\n", ret);
1155 1967 goto qlt_pstart_fail_2_5;
1156 1968 }
1157 1969
1970 + EL(qlt, "Qlogic qlt(%d) "
1971 + "WWPN=%02x%02x%02x%02x%02x%02x%02x%02x:"
1972 + "WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
1973 + qlt->instance,
1974 + qlt->nvram->port_name[0],
1975 + qlt->nvram->port_name[1],
1976 + qlt->nvram->port_name[2],
1977 + qlt->nvram->port_name[3],
1978 + qlt->nvram->port_name[4],
1979 + qlt->nvram->port_name[5],
1980 + qlt->nvram->port_name[6],
1981 + qlt->nvram->port_name[7],
1982 + qlt->nvram->node_name[0],
1983 + qlt->nvram->node_name[1],
1984 + qlt->nvram->node_name[2],
1985 + qlt->nvram->node_name[3],
1986 + qlt->nvram->node_name[4],
1987 + qlt->nvram->node_name[5],
1988 + qlt->nvram->node_name[6],
1989 + qlt->nvram->node_name[7]);
1990 +
1158 1991 return (QLT_SUCCESS);
1159 1992 #if 0
1160 1993 qlt_pstart_fail_3:
1161 1994 (void) fct_deregister_local_port(port);
1162 1995 #endif
1163 1996 qlt_pstart_fail_2_5:
1164 1997 fct_free(fds);
1165 1998 qlt_pstart_fail_2:
1166 1999 fct_free(port);
1167 2000 qlt->qlt_port = NULL;
1168 2001 qlt_pstart_fail_1:
1169 2002 qlt_dma_handle_pool_fini(qlt);
1170 2003 qlt_dmem_fini(qlt);
1171 2004 return (QLT_FAILURE);
1172 2005 }
1173 2006
1174 2007 fct_status_t
1175 2008 qlt_port_stop(caddr_t arg)
|
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
1176 2009 {
1177 2010 qlt_state_t *qlt = (qlt_state_t *)arg;
1178 2011 fct_status_t ret;
1179 2012
1180 2013 if ((ret = fct_deregister_local_port(qlt->qlt_port)) != FCT_SUCCESS) {
1181 2014 EL(qlt, "fct_register_local_port status=%llxh\n", ret);
1182 2015 return (QLT_FAILURE);
1183 2016 }
1184 2017 fct_free(qlt->qlt_port->port_fds);
1185 2018 fct_free(qlt->qlt_port);
1186 - qlt->qlt_port = NULL;
1187 2019 qlt_dma_handle_pool_fini(qlt);
2020 + qlt->qlt_port = NULL;
1188 2021 qlt_dmem_fini(qlt);
1189 2022 return (QLT_SUCCESS);
1190 2023 }
1191 2024
1192 2025 /*
1193 2026 * Called by framework to init the HBA.
1194 2027 * Can be called in the middle of I/O. (Why ??)
1195 2028 * Should make sure sane state both before and after the initialization
1196 2029 */
1197 2030 fct_status_t
1198 2031 qlt_port_online(qlt_state_t *qlt)
1199 2032 {
1200 2033 uint64_t da;
1201 - int instance, i;
2034 + int instance, i, j;
1202 2035 fct_status_t ret;
1203 2036 uint16_t rcount;
1204 2037 caddr_t icb;
1205 2038 mbox_cmd_t *mcp;
1206 2039 uint8_t *elsbmp;
1207 2040
1208 2041 instance = ddi_get_instance(qlt->dip);
1209 2042
1210 2043 /* XXX Make sure a sane state */
1211 2044
1212 2045 if ((ret = qlt_download_fw(qlt)) != QLT_SUCCESS) {
1213 - cmn_err(CE_NOTE, "reset chip failed %llx", (long long)ret);
2046 + cmn_err(CE_NOTE, "qlt(%d): reset chip failed %llx",
2047 + qlt->instance, (long long)ret);
1214 2048 return (ret);
1215 2049 }
1216 2050
1217 2051 bzero(qlt->queue_mem_ptr, TOTAL_DMA_MEM_SIZE);
1218 2052
1219 2053 /* Get resource count */
1220 2054 REG_WR16(qlt, REG_MBOX(0), MBC_GET_RESOURCE_COUNTS);
1221 2055 ret = qlt_raw_mailbox_command(qlt);
1222 2056 rcount = REG_RD16(qlt, REG_MBOX(3));
1223 2057 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
1224 2058 if (ret != QLT_SUCCESS) {
1225 2059 EL(qlt, "qlt_raw_mailbox_command=42h status=%llxh\n", ret);
1226 2060 return (ret);
1227 2061 }
1228 2062
1229 2063 /* Enable PUREX */
1230 2064 REG_WR16(qlt, REG_MBOX(0), MBC_SET_ADDITIONAL_FIRMWARE_OPT);
1231 2065 REG_WR16(qlt, REG_MBOX(1), OPT_PUREX_ENABLE);
1232 2066 REG_WR16(qlt, REG_MBOX(2), 0x0);
1233 2067 REG_WR16(qlt, REG_MBOX(3), 0x0);
1234 2068 ret = qlt_raw_mailbox_command(qlt);
1235 2069 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
1236 2070 if (ret != QLT_SUCCESS) {
1237 2071 EL(qlt, "qlt_raw_mailbox_command=38h status=%llxh\n", ret);
1238 2072 cmn_err(CE_NOTE, "Enable PUREX failed");
1239 2073 return (ret);
1240 2074 }
1241 2075
1242 2076 /* Pass ELS bitmap to fw */
1243 2077 REG_WR16(qlt, REG_MBOX(0), MBC_SET_PARAMETERS);
1244 2078 REG_WR16(qlt, REG_MBOX(1), PARAM_TYPE(PUREX_ELS_CMDS));
1245 2079 elsbmp = (uint8_t *)qlt->queue_mem_ptr + MBOX_DMA_MEM_OFFSET;
|
↓ open down ↓ |
22 lines elided |
↑ open up ↑ |
1246 2080 bzero(elsbmp, 32);
1247 2081 da = qlt->queue_mem_cookie.dmac_laddress;
1248 2082 da += MBOX_DMA_MEM_OFFSET;
1249 2083 REG_WR16(qlt, REG_MBOX(3), LSW(LSD(da)));
1250 2084 REG_WR16(qlt, REG_MBOX(2), MSW(LSD(da)));
1251 2085 REG_WR16(qlt, REG_MBOX(7), LSW(MSD(da)));
1252 2086 REG_WR16(qlt, REG_MBOX(6), MSW(MSD(da)));
1253 2087 SETELSBIT(elsbmp, ELS_OP_PLOGI);
1254 2088 SETELSBIT(elsbmp, ELS_OP_LOGO);
1255 2089 SETELSBIT(elsbmp, ELS_OP_ABTX);
1256 - SETELSBIT(elsbmp, ELS_OP_ECHO);
2090 +/* SETELSBIT(elsbmp, ELS_OP_ECHO); till fct handles it */
1257 2091 SETELSBIT(elsbmp, ELS_OP_PRLI);
1258 2092 SETELSBIT(elsbmp, ELS_OP_PRLO);
1259 2093 SETELSBIT(elsbmp, ELS_OP_SCN);
1260 2094 SETELSBIT(elsbmp, ELS_OP_TPRLO);
1261 2095 SETELSBIT(elsbmp, ELS_OP_PDISC);
1262 2096 SETELSBIT(elsbmp, ELS_OP_ADISC);
1263 2097 SETELSBIT(elsbmp, ELS_OP_RSCN);
1264 2098 SETELSBIT(elsbmp, ELS_OP_RNID);
1265 2099 (void) ddi_dma_sync(qlt->queue_mem_dma_handle, MBOX_DMA_MEM_OFFSET, 32,
1266 2100 DDI_DMA_SYNC_FORDEV);
1267 2101 ret = qlt_raw_mailbox_command(qlt);
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
1268 2102 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
1269 2103 if (ret != QLT_SUCCESS) {
1270 2104 EL(qlt, "qlt_raw_mailbox_command=59h status=llxh\n", ret);
1271 2105 cmn_err(CE_NOTE, "Set ELS Bitmap failed ret=%llx, "
1272 2106 "elsbmp0=%x elabmp1=%x", (long long)ret, elsbmp[0],
1273 2107 elsbmp[1]);
1274 2108 return (ret);
1275 2109 }
1276 2110
1277 2111 /* Init queue pointers */
1278 - REG_WR32(qlt, REG_REQ_IN_PTR, 0);
1279 - REG_WR32(qlt, REG_REQ_OUT_PTR, 0);
1280 - REG_WR32(qlt, REG_RESP_IN_PTR, 0);
1281 - REG_WR32(qlt, REG_RESP_OUT_PTR, 0);
1282 - REG_WR32(qlt, REG_PREQ_IN_PTR, 0);
1283 - REG_WR32(qlt, REG_PREQ_OUT_PTR, 0);
1284 - REG_WR32(qlt, REG_ATIO_IN_PTR, 0);
1285 - REG_WR32(qlt, REG_ATIO_OUT_PTR, 0);
1286 - qlt->req_ndx_to_fw = qlt->req_ndx_from_fw = 0;
1287 - qlt->req_available = REQUEST_QUEUE_ENTRIES - 1;
1288 - qlt->resp_ndx_to_fw = qlt->resp_ndx_from_fw = 0;
2112 + if (qlt->qlt_mq_enabled == 1) {
2113 + uint16_t qi;
2114 +
2115 + for (qi = 0; qi < MQ_MAX_QUEUES; qi++) {
2116 + MQBAR_WR32(qlt,
2117 + (qi * MQBAR_REG_OFFSET) + MQBAR_REQ_IN, 0);
2118 + MQBAR_WR32(qlt,
2119 + (qi * MQBAR_REG_OFFSET) + MQBAR_REQ_OUT, 0);
2120 + MQBAR_WR32(qlt,
2121 + (qi * MQBAR_REG_OFFSET) + MQBAR_RESP_IN, 0);
2122 + MQBAR_WR32(qlt,
2123 + (qi * MQBAR_REG_OFFSET) +
2124 + MQBAR_RESP_OUT, 0);
2125 + }
2126 + } else {
2127 + REG_WR32(qlt, REG_REQ_IN_PTR, 0);
2128 + REG_WR32(qlt, REG_REQ_OUT_PTR, 0);
2129 + REG_WR32(qlt, REG_RESP_IN_PTR, 0);
2130 + REG_WR32(qlt, REG_RESP_OUT_PTR, 0);
2131 + }
2132 +
2133 + if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
2134 + REG_WR32(qlt, REG_PREQ_IN_PTR, 0);
2135 + REG_WR32(qlt, REG_PREQ_OUT_PTR, 0);
2136 + REG_WR32(qlt, REG_ATIO_IN_PTR, 0);
2137 + REG_WR32(qlt, REG_ATIO_OUT_PTR, 0);
2138 + }
2139 + qlt->mq_req[0].mq_ndx_to_fw = qlt->mq_req[0].mq_ndx_from_fw = 0;
2140 + qlt->mq_req[0].mq_available = REQUEST_QUEUE_ENTRIES - 1;
2141 +
2142 + if (qlt->qlt_mq_enabled == 1) {
2143 + for (i = 1; i < qlt->qlt_queue_cnt; i++) {
2144 + qlt->mq_req[i].mq_ndx_to_fw = 0;
2145 + qlt->mq_req[i].mq_ndx_from_fw = 0;
2146 + qlt->mq_req[i].mq_available =
2147 + REQUEST_QUEUE_MQ_ENTRIES - 1;
2148 + }
2149 + }
2150 + qlt->mq_resp[0].mq_ndx_to_fw = qlt->mq_resp[0].mq_ndx_from_fw = 0;
2151 +
2152 + if (qlt->qlt_mq_enabled == 1) {
2153 + caddr_t resp;
2154 +
2155 + for (i = 1; i < qlt->qlt_queue_cnt; i++) {
2156 + qlt->mq_resp[i].mq_ndx_to_fw = 0;
2157 + qlt->mq_resp[i].mq_ndx_from_fw = 0;
2158 + for (j = 0; j < RESPONSE_QUEUE_MQ_ENTRIES; j++) {
2159 + resp = &qlt->mq_resp[i].mq_ptr[j << 6];
2160 + QMEM_WR32_RSPQ(qlt, i, resp+0x3c, 0xdeadbeef);
2161 + }
2162 + }
2163 + }
2164 +
2165 + for (i = 0; i < ATIO_QUEUE_ENTRIES; i++) {
2166 + caddr_t atio;
2167 +
2168 + atio = &qlt->atio_ptr[i << 6];
2169 + QMEM_WR32(qlt, atio+0x3c, 0xdeadbeef);
2170 + }
2171 +
1289 2172 qlt->preq_ndx_to_fw = qlt->preq_ndx_from_fw = 0;
1290 2173 qlt->atio_ndx_to_fw = qlt->atio_ndx_from_fw = 0;
1291 2174
1292 2175 /*
1293 2176 * XXX support for tunables. Also should we cache icb ?
1294 2177 */
1295 - if (qlt->qlt_81xx_chip) {
1296 - /* allocate extra 64 bytes for Extended init control block */
1297 - mcp = qlt_alloc_mailbox_command(qlt, 0xC0);
2178 + if ((qlt->qlt_83xx_chip) || (qlt->qlt_81xx_chip) ||
2179 + (qlt->qlt_27xx_chip) || ((qlt->qlt_25xx_chip) &&
2180 + (qlt->qlt_mq_enabled))) {
2181 + /*
2182 + * allocate extra 64 bytes for Extended init control block,
2183 + * with separation to allow for a minimal MID section.
2184 + */
2185 + mcp = qlt_alloc_mailbox_command(qlt, 0xE0);
1298 2186 } else {
1299 2187 mcp = qlt_alloc_mailbox_command(qlt, 0x80);
1300 2188 }
1301 2189 if (mcp == NULL) {
1302 2190 EL(qlt, "qlt_alloc_mailbox_command mcp=null\n");
1303 2191 return (STMF_ALLOC_FAILURE);
1304 2192 }
1305 2193 icb = (caddr_t)mcp->dbuf->db_sglist[0].seg_addr;
1306 - if (qlt->qlt_81xx_chip) {
1307 - bzero(icb, 0xC0);
2194 + if ((qlt->qlt_83xx_chip) || (qlt->qlt_81xx_chip) ||
2195 + (qlt->qlt_27xx_chip) || ((qlt->qlt_25xx_chip) &&
2196 + (qlt->qlt_mq_enabled))) {
2197 + bzero(icb, 0xE0);
1308 2198 } else {
1309 2199 bzero(icb, 0x80);
1310 2200 }
1311 2201 da = qlt->queue_mem_cookie.dmac_laddress;
1312 2202 DMEM_WR16(qlt, icb, 1); /* Version */
1313 2203 DMEM_WR16(qlt, icb+4, 2112); /* Max frame length */
1314 2204 DMEM_WR16(qlt, icb+6, 16); /* Execution throttle */
1315 2205 DMEM_WR16(qlt, icb+8, rcount); /* Xchg count */
1316 2206 DMEM_WR16(qlt, icb+0x0a, 0x00); /* Hard address (not used) */
1317 2207 bcopy(qlt->qlt_port->port_pwwn, icb+0x0c, 8);
1318 2208 bcopy(qlt->qlt_port->port_nwwn, icb+0x14, 8);
1319 2209 DMEM_WR16(qlt, icb+0x20, 3); /* Login retry count */
1320 2210 DMEM_WR16(qlt, icb+0x24, RESPONSE_QUEUE_ENTRIES);
1321 2211 DMEM_WR16(qlt, icb+0x26, REQUEST_QUEUE_ENTRIES);
1322 - if (!qlt->qlt_81xx_chip) {
2212 + if ((!qlt->qlt_83xx_chip) && (!qlt->qlt_81xx_chip) &&
2213 + (!qlt->qlt_27xx_chip)) {
1323 2214 DMEM_WR16(qlt, icb+0x28, 100); /* ms of NOS/OLS for Link down */
1324 2215 }
1325 - DMEM_WR16(qlt, icb+0x2a, PRIORITY_QUEUE_ENTRIES);
2216 + if ((!qlt->qlt_83xx_chip) || (!qlt->qlt_27xx_chip)) {
2217 + DMEM_WR16(qlt, icb+0x2a, PRIORITY_QUEUE_ENTRIES);
2218 + }
1326 2219 DMEM_WR64(qlt, icb+0x2c, (da+REQUEST_QUEUE_OFFSET));
1327 2220 DMEM_WR64(qlt, icb+0x34, (da+RESPONSE_QUEUE_OFFSET));
1328 - DMEM_WR64(qlt, icb+0x3c, (da+PRIORITY_QUEUE_OFFSET));
2221 + if ((!qlt->qlt_83xx_chip) || (!qlt->qlt_27xx_chip)) {
2222 + DMEM_WR64(qlt, icb+0x3c, (da+PRIORITY_QUEUE_OFFSET));
2223 + }
2224 + /* XXX: all hba model atio/resp 0 use vector 0 */
1329 2225 DMEM_WR16(qlt, icb+0x4e, ATIO_QUEUE_ENTRIES);
1330 2226 DMEM_WR64(qlt, icb+0x50, (da+ATIO_QUEUE_OFFSET));
1331 2227 DMEM_WR16(qlt, icb+0x58, 2); /* Interrupt delay Timer */
1332 2228 DMEM_WR16(qlt, icb+0x5a, 4); /* Login timeout (secs) */
1333 - if (qlt->qlt_81xx_chip) {
2229 + if ((qlt->qlt_83xx_chip) || (qlt->qlt_81xx_chip) ||
2230 + (qlt->qlt_27xx_chip) || ((qlt->qlt_25xx_chip) &&
2231 + (qlt->qlt_mq_enabled))) {
1334 2232 qlt_nvram_81xx_t *qlt81nvr = (qlt_nvram_81xx_t *)qlt->nvram;
1335 2233
1336 - DMEM_WR32(qlt, icb+0x5c, BIT_5 | BIT_4); /* fw options 1 */
1337 - DMEM_WR32(qlt, icb+0x64, BIT_20 | BIT_4); /* fw options 3 */
2234 + /* fw options 1 */
2235 + if (qlt->qlt_fcoe_enabled) {
2236 + DMEM_WR32(qlt, icb+0x5c, BIT_5 | BIT_4);
2237 + } else {
2238 + DMEM_WR32(qlt, icb+0x5c,
2239 + BIT_11 | BIT_5 | BIT_4 | BIT_2 | BIT_1 | BIT_0);
2240 + }
2241 + /* fw options 2 */
2242 + if (qlt->qlt_mq_enabled) {
2243 + if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
2244 + if (qlt->qlt_fcoe_enabled) {
2245 + DMEM_WR32(qlt, icb+0x60,
2246 + BIT_26 | BIT_23 | BIT_22);
2247 + } else {
2248 + DMEM_WR32(qlt, icb+0x60,
2249 + BIT_26 | BIT_23 | BIT_22 | BIT_5);
2250 + }
2251 + } else {
2252 + DMEM_WR32(qlt,
2253 + icb+0x60, BIT_26 | BIT_23 | BIT_22 | BIT_5);
2254 + }
2255 + }
2256 +
2257 + /* fw options 3 */
2258 + if (qlt->qlt_fcoe_enabled) {
2259 + DMEM_WR32(qlt, icb+0x64, BIT_4);
2260 + } else {
2261 + DMEM_WR32(qlt, icb+0x64,
2262 + BIT_14 | BIT_8 | BIT_7 | BIT_4);
2263 + }
2264 +
2265 + if (qlt->qlt_mq_enabled) {
2266 + DMEM_WR16(qlt, icb+0x68, 5); /* QoS priority = 5 */
2267 + }
2268 +
1338 2269 DMEM_WR32(qlt, icb+0x70,
1339 2270 qlt81nvr->enode_mac[0] |
1340 2271 (qlt81nvr->enode_mac[1] << 8) |
1341 2272 (qlt81nvr->enode_mac[2] << 16) |
1342 2273 (qlt81nvr->enode_mac[3] << 24));
1343 2274 DMEM_WR16(qlt, icb+0x74,
1344 2275 qlt81nvr->enode_mac[4] |
1345 2276 (qlt81nvr->enode_mac[5] << 8));
1346 - } else {
1347 - DMEM_WR32(qlt, icb+0x5c, BIT_11 | BIT_5 | BIT_4 |
1348 - BIT_2 | BIT_1 | BIT_0);
1349 - DMEM_WR32(qlt, icb+0x60, BIT_5);
1350 - DMEM_WR32(qlt, icb+0x64, BIT_14 | BIT_8 | BIT_7 |
1351 - BIT_4);
1352 - }
2277 + } else {
2278 + DMEM_WR32(qlt, icb+0x5c, BIT_11 | BIT_5 | BIT_4 |
2279 + BIT_2 | BIT_1 | BIT_0);
2280 + DMEM_WR32(qlt, icb+0x60, BIT_5);
2281 + DMEM_WR32(qlt, icb+0x64, BIT_14 | BIT_8 | BIT_7 |
2282 + BIT_4);
1353 2283
1354 - if (qlt->qlt_81xx_chip) {
2284 +/* null MID setup */
2285 + DMEM_WR16(qlt, icb+0x80, 1); /* VP count 1 */
2286 + }
2287 +
2288 + if (qlt->qlt_fcoe_enabled) {
1355 2289 qlt_dmem_bctl_t *bctl;
1356 2290 uint32_t index;
1357 2291 caddr_t src;
1358 2292 caddr_t dst;
1359 2293 qlt_nvram_81xx_t *qlt81nvr;
1360 2294
1361 - dst = icb+0x80;
2295 + dst = icb+0xA0;
1362 2296 qlt81nvr = (qlt_nvram_81xx_t *)qlt->nvram;
1363 2297 src = (caddr_t)&qlt81nvr->ext_blk;
1364 2298 index = sizeof (qlt_ext_icb_81xx_t);
1365 2299
1366 2300 /* Use defaults for cases where we find nothing in NVR */
1367 - if (*src == 0) {
1368 - EL(qlt, "nvram eicb=null\n");
1369 - cmn_err(CE_NOTE, "qlt(%d) NVR eicb is zeroed",
1370 - instance);
2301 + if ((qlt->qlt_83xx_chip) || (*src == 0)) {
2302 + if (*src == 0) {
2303 + EL(qlt, "nvram eicb=null\n");
2304 + cmn_err(CE_NOTE, "qlt(%d) NVR eicb is zeroed",
2305 + instance);
2306 + }
1371 2307 qlt81nvr->ext_blk.version[0] = 1;
1372 2308 /*
1373 2309 * not yet, for !FIP firmware at least
1374 2310 *
1375 2311 * qlt81nvr->ext_blk.fcf_vlan_match = 0x81;
1376 2312 */
1377 2313 #ifdef _LITTLE_ENDIAN
1378 2314 qlt81nvr->ext_blk.fcf_vlan_id[0] = 0xEA;
1379 2315 qlt81nvr->ext_blk.fcf_vlan_id[1] = 0x03;
1380 2316 #else
1381 2317 qlt81nvr->ext_blk.fcf_vlan_id[1] = 0xEA;
1382 2318 qlt81nvr->ext_blk.fcf_vlan_id[0] = 0x03;
1383 2319 #endif
1384 2320 }
1385 2321
1386 2322 while (index--) {
1387 2323 *dst++ = *src++;
1388 2324 }
1389 2325
1390 2326 bctl = (qlt_dmem_bctl_t *)mcp->dbuf->db_port_private;
1391 - da = bctl->bctl_dev_addr + 0x80; /* base addr of eicb (phys) */
2327 + da = bctl->bctl_dev_addr + 0xA0; /* base addr of eicb (phys) */
1392 2328
1393 2329 mcp->to_fw[11] = LSW(LSD(da));
1394 2330 mcp->to_fw[10] = MSW(LSD(da));
1395 2331 mcp->to_fw[13] = LSW(MSD(da));
1396 2332 mcp->to_fw[12] = MSW(MSD(da));
1397 2333 mcp->to_fw[14] = (uint16_t)(sizeof (qlt_ext_icb_81xx_t) &
1398 2334 0xffff);
1399 2335
1400 2336 /* eicb enable */
1401 2337 mcp->to_fw[1] = (uint16_t)(mcp->to_fw[1] | BIT_0);
1402 2338 mcp->to_fw_mask |= BIT_14 | BIT_13 | BIT_12 | BIT_11 | BIT_10 |
1403 2339 BIT_1;
1404 2340 }
1405 2341
1406 2342 qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORDEV);
1407 - mcp->to_fw[0] = MBC_INITIALIZE_FIRMWARE;
2343 + if (((qlt->qlt_83xx_chip) || (qlt->qlt_81xx_chip) ||
2344 + (qlt->qlt_27xx_chip) || ((qlt->qlt_25xx_chip) &&
2345 + (qlt->qlt_mq_enabled))) && (qlt->fw_attr & BIT_6)) {
2346 + mcp->to_fw[0] = MBC_INITIALIZE_MULTI_ID_FW;
2347 + } else {
2348 + mcp->to_fw[0] = MBC_INITIALIZE_FIRMWARE;
2349 + }
1408 2350
1409 2351 /*
1410 2352 * This is the 1st command after adapter initialize which will
1411 2353 * use interrupts and regular mailbox interface.
1412 2354 */
2355 + qlt->qlt_intr_enabled = 1;
1413 2356 qlt->mbox_io_state = MBOX_STATE_READY;
1414 2357 REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
1415 2358 /* Issue mailbox to firmware */
1416 2359 ret = qlt_mailbox_command(qlt, mcp);
1417 2360 if (ret != QLT_SUCCESS) {
1418 - EL(qlt, "qlt_mailbox_command=60h status=%llxh\n", ret);
2361 + EL(qlt, "qlt_mbox_command=48h/60h status=%llxh\n", ret);
1419 2362 cmn_err(CE_NOTE, "qlt(%d) init fw failed %llx, intr status %x",
1420 2363 instance, (long long)ret, REG_RD32(qlt, REG_INTR_STATUS));
2364 + qlt_free_mailbox_command(qlt, mcp);
2365 + return (ret);
1421 2366 }
1422 2367
1423 2368 mcp->to_fw_mask = BIT_0;
1424 2369 mcp->from_fw_mask = BIT_0 | BIT_1;
1425 2370 mcp->to_fw[0] = 0x28;
1426 2371 ret = qlt_mailbox_command(qlt, mcp);
1427 2372 if (ret != QLT_SUCCESS) {
1428 - EL(qlt, "qlt_mailbox_command=28h status=%llxh\n", ret);
2373 + EL(qlt, "qlt_mbox_command=28h status=%llxh\n", ret);
1429 2374 cmn_err(CE_NOTE, "qlt(%d) get_fw_options %llx", instance,
1430 2375 (long long)ret);
2376 + qlt_free_mailbox_command(qlt, mcp);
2377 + return (ret);
1431 2378 }
1432 2379
2380 + if (qlt->qlt_mq_enabled == 1) {
2381 +
2382 + for (i = 1; i < qlt->qlt_queue_cnt; i++) {
2383 + da = qlt->mq_resp[i].queue_mem_mq_cookie.dmac_laddress;
2384 +
2385 + mcp->to_fw_mask = BIT_14 | BIT_13 | BIT_12 | BIT_11 |
2386 + BIT_10 | BIT_9 | BIT_8 | BIT_7 | BIT_6 | BIT_5 |
2387 + BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0;
2388 + mcp->from_fw_mask = BIT_0 | BIT_1;
2389 +
2390 + /* msix vector setup */
2391 + mcp->to_fw[14] = (uint16_t)(i);
2392 +
2393 + mcp->to_fw[13] = 0;
2394 + mcp->to_fw[12] = 0;
2395 + mcp->to_fw[11] = 0;
2396 + mcp->to_fw[10] = 0;
2397 + mcp->to_fw[9] = 0;
2398 + mcp->to_fw[8] = 0;
2399 + mcp->to_fw[7] = LSW(MSD(da));
2400 + mcp->to_fw[6] = MSW(MSD(da));
2401 + mcp->to_fw[5] = RESPONSE_QUEUE_MQ_ENTRIES;
2402 + mcp->to_fw[4] = (uint16_t)(i);
2403 + mcp->to_fw[3] = LSW(LSD(da));
2404 + mcp->to_fw[2] = MSW(LSD(da));
2405 + mcp->to_fw[1] = BIT_6 | BIT_1;
2406 + mcp->to_fw[0] = 0x1F;
2407 + ret = qlt_mailbox_command(qlt, mcp);
2408 +
2409 + if (ret != QLT_SUCCESS) {
2410 + EL(qlt, "qlt_mbox_command=1fh status=%llxh\n",
2411 + ret);
2412 + cmn_err(CE_NOTE, "qlt(%d) queue manage %llx",
2413 + instance, (long long)ret);
2414 + qlt_free_mailbox_command(qlt, mcp);
2415 + return (ret);
2416 + }
2417 +
2418 + da = qlt->mq_req[i].queue_mem_mq_cookie.dmac_laddress;
2419 +
2420 + mcp->to_fw_mask = BIT_14 | BIT_13 | BIT_12 | BIT_11 |
2421 + BIT_10 | BIT_9 | BIT_8 | BIT_7 | BIT_6 | BIT_5 |
2422 + BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0;
2423 + mcp->from_fw_mask = BIT_0 | BIT_1;
2424 +
2425 + /*
2426 + * msix vector does not apply for request queue create
2427 + */
2428 + mcp->to_fw[14] = 2;
2429 + mcp->to_fw[13] = 0;
2430 + mcp->to_fw[12] = 4;
2431 + mcp->to_fw[11] = 0;
2432 + mcp->to_fw[10] = (uint16_t)(i);
2433 + mcp->to_fw[9] = 0;
2434 + mcp->to_fw[8] = 0;
2435 + mcp->to_fw[7] = LSW(MSD(da));
2436 + mcp->to_fw[6] = MSW(MSD(da));
2437 + mcp->to_fw[5] = REQUEST_QUEUE_MQ_ENTRIES;
2438 + mcp->to_fw[4] = (uint16_t)(i);
2439 + mcp->to_fw[3] = LSW(LSD(da));
2440 + mcp->to_fw[2] = MSW(LSD(da));
2441 + mcp->to_fw[1] = BIT_6;
2442 + mcp->to_fw[0] = 0x1F;
2443 + ret = qlt_mailbox_command(qlt, mcp);
2444 +
2445 + if (ret != QLT_SUCCESS) {
2446 + EL(qlt, "qlt_mbox_command=1fh status=%llxh\n",
2447 + ret);
2448 + cmn_err(CE_NOTE, "qlt(%d) queue manage %llx",
2449 + instance, (long long)ret);
2450 + qlt_free_mailbox_command(qlt, mcp);
2451 + return (ret);
2452 + }
2453 + }
2454 + }
2455 +
1433 2456 /*
1434 2457 * Report FW versions for 81xx - MPI rev is useful
1435 2458 */
1436 - if (qlt->qlt_81xx_chip) {
2459 + /* if ((qlt->qlt_83xx_chip) || (qlt->qlt_81xx_chip)) { */
2460 + if (qlt->qlt_fcoe_enabled) {
1437 2461 mcp->to_fw_mask = BIT_0;
1438 - mcp->from_fw_mask = BIT_11 | BIT_10 | BIT_3 | BIT_2 | BIT_1 |
1439 - BIT_0;
1440 - mcp->to_fw[0] = 0x8;
2462 + mcp->from_fw_mask = BIT_11 | BIT_10 | BIT_6 | BIT_3 | BIT_2 |
2463 + BIT_1 | BIT_0;
2464 +
2465 + mcp->to_fw[0] = MBC_ABOUT_FIRMWARE;
1441 2466 ret = qlt_mailbox_command(qlt, mcp);
1442 2467 if (ret != QLT_SUCCESS) {
1443 2468 EL(qlt, "about fw failed: %llx\n", (long long)ret);
1444 2469 } else {
1445 2470 EL(qlt, "Firmware version %d.%d.%d, MPI: %d.%d.%d\n",
1446 2471 mcp->from_fw[1], mcp->from_fw[2], mcp->from_fw[3],
1447 2472 mcp->from_fw[10] & 0xff, mcp->from_fw[11] >> 8,
1448 2473 mcp->from_fw[11] & 0xff);
2474 + EL(qlt, "Firmware Attributes %x[h]\n",
2475 + mcp->from_fw[6]);
1449 2476 }
1450 2477 }
1451 2478
1452 2479 qlt_free_mailbox_command(qlt, mcp);
1453 2480
1454 2481 for (i = 0; i < 5; i++) {
1455 2482 qlt->qlt_bufref[i] = 0;
1456 2483 }
1457 2484 qlt->qlt_bumpbucket = 0;
1458 2485 qlt->qlt_pmintry = 0;
1459 2486 qlt->qlt_pmin_ok = 0;
1460 2487
1461 2488 if (ret != QLT_SUCCESS)
1462 2489 return (ret);
2490 +
1463 2491 return (FCT_SUCCESS);
1464 2492 }
1465 2493
1466 2494 fct_status_t
1467 2495 qlt_port_offline(qlt_state_t *qlt)
1468 2496 {
1469 - int retries;
2497 + int retries;
2498 + int i;
1470 2499
1471 2500 mutex_enter(&qlt->mbox_lock);
1472 2501
1473 2502 if (qlt->mbox_io_state == MBOX_STATE_UNKNOWN) {
1474 2503 mutex_exit(&qlt->mbox_lock);
1475 2504 goto poff_mbox_done;
1476 2505 }
1477 2506
1478 2507 /* Wait to grab the mailboxes */
1479 2508 for (retries = 0; qlt->mbox_io_state != MBOX_STATE_READY;
1480 2509 retries++) {
1481 2510 cv_wait(&qlt->mbox_cv, &qlt->mbox_lock);
1482 2511 if ((retries > 5) ||
1483 2512 (qlt->mbox_io_state == MBOX_STATE_UNKNOWN)) {
|
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
1484 2513 qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
1485 2514 mutex_exit(&qlt->mbox_lock);
1486 2515 goto poff_mbox_done;
1487 2516 }
1488 2517 }
1489 2518 qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
1490 2519 mutex_exit(&qlt->mbox_lock);
1491 2520 poff_mbox_done:;
1492 2521 qlt->intr_sneak_counter = 10;
1493 2522 mutex_enter(&qlt->intr_lock);
2523 + if (qlt->qlt_mq_enabled == 1) {
2524 + for (i = 1; i < qlt->qlt_queue_cnt; i++) {
2525 + mutex_enter(&qlt->mq_resp[i].mq_lock);
2526 + }
2527 + }
1494 2528 (void) qlt_reset_chip(qlt);
1495 2529 drv_usecwait(20);
1496 2530 qlt->intr_sneak_counter = 0;
2531 + if (qlt->qlt_mq_enabled == 1) {
2532 + for (i = 1; i < qlt->qlt_queue_cnt; i++) {
2533 + mutex_exit(&qlt->mq_resp[i].mq_lock);
2534 + }
2535 + }
1497 2536 mutex_exit(&qlt->intr_lock);
1498 2537
1499 2538 return (FCT_SUCCESS);
1500 2539 }
1501 2540
1502 2541 static fct_status_t
1503 2542 qlt_get_link_info(fct_local_port_t *port, fct_link_info_t *li)
1504 2543 {
1505 2544 qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
1506 2545 mbox_cmd_t *mcp;
1507 2546 fct_status_t fc_ret;
1508 2547 fct_status_t ret;
1509 2548 clock_t et;
1510 2549
|
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
1511 2550 et = ddi_get_lbolt() + drv_usectohz(5000000);
1512 2551 mcp = qlt_alloc_mailbox_command(qlt, 0);
1513 2552 link_info_retry:
1514 2553 mcp->to_fw[0] = MBC_GET_ID;
1515 2554 mcp->to_fw[9] = 0;
1516 2555 mcp->to_fw_mask |= BIT_0 | BIT_9;
1517 2556 mcp->from_fw_mask |= BIT_0 | BIT_1 | BIT_2 | BIT_3 | BIT_6 | BIT_7;
1518 2557 /* Issue mailbox to firmware */
1519 2558 ret = qlt_mailbox_command(qlt, mcp);
1520 2559 if (ret != QLT_SUCCESS) {
1521 - EL(qlt, "qlt_mailbox_command=20h status=%llxh\n", ret);
1522 - if ((mcp->from_fw[0] == 0x4005) && (mcp->from_fw[1] == 7)) {
2560 + EL(qlt, "qlt_mbox_command=20h status=%llxh\n", ret);
2561 + if ((mcp->from_fw[0] == 0x4005) &&
2562 + ((mcp->from_fw[1] == 7) || (mcp->from_fw[1] == 0x1b))) {
1523 2563 /* Firmware is not ready */
1524 2564 if (ddi_get_lbolt() < et) {
1525 2565 delay(drv_usectohz(50000));
1526 2566 goto link_info_retry;
1527 2567 }
1528 2568 }
2569 + EL(qlt, "GET ID mbox failed, ret=%llx mb0=%x mb1=%x",
2570 + ret, mcp->from_fw[0], mcp->from_fw[1]);
1529 2571 stmf_trace(qlt->qlt_port_alias, "GET ID mbox failed, ret=%llx "
1530 2572 "mb0=%x mb1=%x", ret, mcp->from_fw[0], mcp->from_fw[1]);
1531 2573 fc_ret = FCT_FAILURE;
1532 2574 } else {
1533 2575 li->portid = ((uint32_t)(mcp->from_fw[2])) |
1534 2576 (((uint32_t)(mcp->from_fw[3])) << 16);
1535 2577
1536 2578 li->port_speed = qlt->link_speed;
1537 2579 switch (mcp->from_fw[6]) {
1538 2580 case 1:
1539 2581 li->port_topology = PORT_TOPOLOGY_PUBLIC_LOOP;
1540 2582 li->port_fca_flogi_done = 1;
1541 2583 break;
1542 2584 case 0:
1543 2585 li->port_topology = PORT_TOPOLOGY_PRIVATE_LOOP;
1544 2586 li->port_no_fct_flogi = 1;
1545 2587 break;
1546 2588 case 3:
1547 2589 li->port_topology = PORT_TOPOLOGY_FABRIC_PT_TO_PT;
1548 2590 li->port_fca_flogi_done = 1;
1549 2591 break;
1550 2592 case 2: /*FALLTHROUGH*/
|
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
1551 2593 case 4:
1552 2594 li->port_topology = PORT_TOPOLOGY_PT_TO_PT;
1553 2595 li->port_fca_flogi_done = 1;
1554 2596 break;
1555 2597 default:
1556 2598 li->port_topology = PORT_TOPOLOGY_UNKNOWN;
1557 2599 EL(qlt, "Unknown topology=%xh\n", mcp->from_fw[6]);
1558 2600 }
1559 2601 qlt->cur_topology = li->port_topology;
1560 2602 fc_ret = FCT_SUCCESS;
2603 +
2604 + EL(qlt, "MBC_GET_ID done, Topology=%x, portid=%xh, "
2605 + "port speed=%xh\n", li->port_topology, li->portid,
2606 + li->port_speed);
1561 2607 }
1562 2608 qlt_free_mailbox_command(qlt, mcp);
1563 2609
1564 2610 if ((fc_ret == FCT_SUCCESS) && (li->port_fca_flogi_done)) {
1565 2611 mcp = qlt_alloc_mailbox_command(qlt, 64);
1566 2612 mcp->to_fw[0] = MBC_GET_PORT_DATABASE;
1567 2613 mcp->to_fw[1] = 0x7FE;
1568 2614 mcp->to_fw[9] = 0;
1569 2615 mcp->to_fw[10] = 0;
1570 2616 mcp->to_fw_mask |= BIT_0 | BIT_1 | BIT_9 | BIT_10;
1571 2617 fc_ret = qlt_mailbox_command(qlt, mcp);
1572 2618 if (fc_ret != QLT_SUCCESS) {
1573 - EL(qlt, "qlt_mailbox_command=64h status=%llxh\n",
2619 + EL(qlt, "qlt_mbox_command=64h status=%llxh\n",
1574 2620 fc_ret);
1575 2621 stmf_trace(qlt->qlt_port_alias, "Attempt to get port "
1576 2622 "database for F_port failed, ret = %llx", fc_ret);
1577 2623 } else {
1578 2624 uint8_t *p;
1579 2625
1580 2626 qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
1581 2627 p = mcp->dbuf->db_sglist[0].seg_addr;
1582 2628 bcopy(p + 0x18, li->port_rpwwn, 8);
1583 2629 bcopy(p + 0x20, li->port_rnwwn, 8);
2630 + EL(qlt, "qlt_mbox_command=64h, GET_PORT_DATABASE "
2631 + "complete\n");
1584 2632 }
1585 2633 qlt_free_mailbox_command(qlt, mcp);
1586 2634 }
1587 2635 return (fc_ret);
1588 2636 }
1589 2637
1590 2638 static int
1591 2639 qlt_open(dev_t *devp, int flag, int otype, cred_t *credp)
1592 2640 {
1593 2641 int instance;
1594 2642 qlt_state_t *qlt;
1595 2643
1596 2644 if (otype != OTYP_CHR) {
1597 2645 return (EINVAL);
1598 2646 }
1599 2647
1600 2648 /*
1601 2649 * Since this is for debugging only, only allow root to issue ioctl now
1602 2650 */
1603 2651 if (drv_priv(credp)) {
1604 2652 return (EPERM);
1605 2653 }
1606 2654
1607 2655 instance = (int)getminor(*devp);
1608 2656 qlt = ddi_get_soft_state(qlt_state, instance);
1609 2657 if (qlt == NULL) {
1610 2658 return (ENXIO);
1611 2659 }
1612 2660
1613 2661 mutex_enter(&qlt->qlt_ioctl_lock);
1614 2662 if (qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_EXCL) {
1615 2663 /*
1616 2664 * It is already open for exclusive access.
1617 2665 * So shut the door on this caller.
1618 2666 */
1619 2667 mutex_exit(&qlt->qlt_ioctl_lock);
1620 2668 return (EBUSY);
1621 2669 }
1622 2670
1623 2671 if (flag & FEXCL) {
1624 2672 if (qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_OPEN) {
1625 2673 /*
1626 2674 * Exclusive operation not possible
1627 2675 * as it is already opened
1628 2676 */
1629 2677 mutex_exit(&qlt->qlt_ioctl_lock);
1630 2678 return (EBUSY);
1631 2679 }
1632 2680 qlt->qlt_ioctl_flags |= QLT_IOCTL_FLAG_EXCL;
1633 2681 }
1634 2682 qlt->qlt_ioctl_flags |= QLT_IOCTL_FLAG_OPEN;
1635 2683 mutex_exit(&qlt->qlt_ioctl_lock);
1636 2684
1637 2685 return (0);
1638 2686 }
1639 2687
1640 2688 /* ARGSUSED */
1641 2689 static int
1642 2690 qlt_close(dev_t dev, int flag, int otype, cred_t *credp)
1643 2691 {
1644 2692 int instance;
1645 2693 qlt_state_t *qlt;
1646 2694
1647 2695 if (otype != OTYP_CHR) {
1648 2696 return (EINVAL);
1649 2697 }
1650 2698
1651 2699 instance = (int)getminor(dev);
1652 2700 qlt = ddi_get_soft_state(qlt_state, instance);
1653 2701 if (qlt == NULL) {
1654 2702 return (ENXIO);
1655 2703 }
1656 2704
1657 2705 mutex_enter(&qlt->qlt_ioctl_lock);
1658 2706 if ((qlt->qlt_ioctl_flags & QLT_IOCTL_FLAG_OPEN) == 0) {
1659 2707 mutex_exit(&qlt->qlt_ioctl_lock);
1660 2708 return (ENODEV);
1661 2709 }
1662 2710
1663 2711 /*
1664 2712 * It looks there's one hole here, maybe there could several concurrent
1665 2713 * shareed open session, but we never check this case.
1666 2714 * But it will not hurt too much, disregard it now.
1667 2715 */
1668 2716 qlt->qlt_ioctl_flags &= ~QLT_IOCTL_FLAG_MASK;
1669 2717 mutex_exit(&qlt->qlt_ioctl_lock);
1670 2718
1671 2719 return (0);
1672 2720 }
1673 2721
1674 2722 /*
1675 2723 * All of these ioctls are unstable interfaces which are meant to be used
1676 2724 * in a controlled lab env. No formal testing will be (or needs to be) done
1677 2725 * for these ioctls. Specially note that running with an additional
1678 2726 * uploaded firmware is not supported and is provided here for test
1679 2727 * purposes only.
1680 2728 */
1681 2729 /* ARGSUSED */
1682 2730 static int
1683 2731 qlt_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
1684 2732 cred_t *credp, int *rval)
1685 2733 {
1686 2734 qlt_state_t *qlt;
1687 2735 int ret = 0;
|
↓ open down ↓ |
94 lines elided |
↑ open up ↑ |
1688 2736 #ifdef _LITTLE_ENDIAN
1689 2737 int i;
1690 2738 #endif
1691 2739 stmf_iocdata_t *iocd;
1692 2740 void *ibuf = NULL;
1693 2741 void *obuf = NULL;
1694 2742 uint32_t *intp;
1695 2743 qlt_fw_info_t *fwi;
1696 2744 mbox_cmd_t *mcp;
1697 2745 fct_status_t st;
1698 - char info[QLT_INFO_LEN];
2746 + char info[80];
1699 2747 fct_status_t ret2;
1700 2748
1701 2749 if (drv_priv(credp) != 0)
1702 2750 return (EPERM);
1703 2751
1704 2752 qlt = ddi_get_soft_state(qlt_state, (int32_t)getminor(dev));
1705 2753 ret = stmf_copyin_iocdata(data, mode, &iocd, &ibuf, &obuf);
1706 2754 if (ret)
1707 2755 return (ret);
1708 2756 iocd->stmf_error = 0;
1709 2757
1710 2758 switch (cmd) {
1711 2759 case QLT_IOCTL_FETCH_FWDUMP:
1712 2760 if (iocd->stmf_obuf_size < QLT_FWDUMP_BUFSIZE) {
1713 2761 EL(qlt, "FETCH_FWDUMP obuf_size=%d < %d\n",
1714 2762 iocd->stmf_obuf_size, QLT_FWDUMP_BUFSIZE);
1715 2763 ret = EINVAL;
1716 2764 break;
1717 2765 }
1718 2766 mutex_enter(&qlt->qlt_ioctl_lock);
1719 2767 if (!(qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID)) {
1720 2768 mutex_exit(&qlt->qlt_ioctl_lock);
1721 2769 ret = ENODATA;
1722 2770 EL(qlt, "no fwdump\n");
1723 2771 iocd->stmf_error = QLTIO_NO_DUMP;
1724 2772 break;
1725 2773 }
1726 2774 if (qlt->qlt_ioctl_flags & QLT_FWDUMP_INPROGRESS) {
1727 2775 mutex_exit(&qlt->qlt_ioctl_lock);
1728 2776 ret = EBUSY;
1729 2777 EL(qlt, "fwdump inprogress\n");
1730 2778 iocd->stmf_error = QLTIO_DUMP_INPROGRESS;
1731 2779 break;
1732 2780 }
1733 2781 if (qlt->qlt_ioctl_flags & QLT_FWDUMP_FETCHED_BY_USER) {
1734 2782 mutex_exit(&qlt->qlt_ioctl_lock);
1735 2783 ret = EEXIST;
1736 2784 EL(qlt, "fwdump already fetched\n");
1737 2785 iocd->stmf_error = QLTIO_ALREADY_FETCHED;
1738 2786 break;
1739 2787 }
1740 2788 bcopy(qlt->qlt_fwdump_buf, obuf, QLT_FWDUMP_BUFSIZE);
1741 2789 qlt->qlt_ioctl_flags |= QLT_FWDUMP_FETCHED_BY_USER;
|
↓ open down ↓ |
33 lines elided |
↑ open up ↑ |
1742 2790 mutex_exit(&qlt->qlt_ioctl_lock);
1743 2791
1744 2792 break;
1745 2793
1746 2794 case QLT_IOCTL_TRIGGER_FWDUMP:
1747 2795 if (qlt->qlt_state != FCT_STATE_ONLINE) {
1748 2796 ret = EACCES;
1749 2797 iocd->stmf_error = QLTIO_NOT_ONLINE;
1750 2798 break;
1751 2799 }
1752 - (void) snprintf(info, sizeof (info), "qlt_ioctl: qlt-%p, "
2800 + (void) snprintf(info, 80, "qlt_ioctl: qlt-%p, "
1753 2801 "user triggered FWDUMP with RFLAG_RESET", (void *)qlt);
2802 + info[79] = 0;
1754 2803 if ((ret2 = fct_port_shutdown(qlt->qlt_port,
1755 2804 STMF_RFLAG_USER_REQUEST | STMF_RFLAG_RESET |
1756 2805 STMF_RFLAG_COLLECT_DEBUG_DUMP, info)) != FCT_SUCCESS) {
1757 2806 EL(qlt, "TRIGGER_FWDUMP fct_port_shutdown status="
1758 2807 "%llxh\n", ret2);
1759 2808 ret = EIO;
1760 2809 }
1761 2810 break;
1762 2811 case QLT_IOCTL_UPLOAD_FW:
1763 2812 if ((iocd->stmf_ibuf_size < 1024) ||
1764 2813 (iocd->stmf_ibuf_size & 3)) {
1765 2814 EL(qlt, "UPLOAD_FW ibuf_size=%d < 1024\n",
1766 2815 iocd->stmf_ibuf_size);
1767 2816 ret = EINVAL;
1768 2817 iocd->stmf_error = QLTIO_INVALID_FW_SIZE;
1769 2818 break;
1770 2819 }
1771 2820 intp = (uint32_t *)ibuf;
1772 2821 #ifdef _LITTLE_ENDIAN
1773 2822 for (i = 0; (i << 2) < iocd->stmf_ibuf_size; i++) {
1774 2823 intp[i] = BSWAP_32(intp[i]);
1775 2824 }
1776 2825 #endif
1777 2826 if (((intp[3] << 2) >= iocd->stmf_ibuf_size) ||
1778 2827 (((intp[intp[3] + 3] + intp[3]) << 2) !=
|
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
1779 2828 iocd->stmf_ibuf_size)) {
1780 2829 EL(qlt, "UPLOAD_FW fw_size=%d >= %d\n", intp[3] << 2,
1781 2830 iocd->stmf_ibuf_size);
1782 2831 ret = EINVAL;
1783 2832 iocd->stmf_error = QLTIO_INVALID_FW_SIZE;
1784 2833 break;
1785 2834 }
1786 2835 if ((qlt->qlt_81xx_chip && ((intp[8] & 8) == 0)) ||
1787 2836 (qlt->qlt_25xx_chip && ((intp[8] & 4) == 0)) ||
1788 2837 (!qlt->qlt_25xx_chip && !qlt->qlt_81xx_chip &&
2838 + !qlt->qlt_83xx_chip && !qlt->qlt_27xx_chip &&
1789 2839 ((intp[8] & 3) == 0))) {
1790 2840 EL(qlt, "UPLOAD_FW fw_type=%d\n", intp[8]);
1791 2841 ret = EACCES;
1792 2842 iocd->stmf_error = QLTIO_INVALID_FW_TYPE;
1793 2843 break;
1794 2844 }
1795 2845
1796 2846 /* Everything looks ok, lets copy this firmware */
1797 2847 if (qlt->fw_code01) {
1798 2848 kmem_free(qlt->fw_code01, (qlt->fw_length01 +
1799 2849 qlt->fw_length02) << 2);
1800 2850 qlt->fw_code01 = NULL;
1801 2851 } else {
1802 2852 atomic_inc_32(&qlt_loaded_counter);
1803 2853 }
1804 2854 qlt->fw_length01 = intp[3];
1805 2855 qlt->fw_code01 = (uint32_t *)kmem_alloc(iocd->stmf_ibuf_size,
1806 2856 KM_SLEEP);
1807 2857 bcopy(intp, qlt->fw_code01, iocd->stmf_ibuf_size);
1808 2858 qlt->fw_addr01 = intp[2];
1809 2859 qlt->fw_code02 = &qlt->fw_code01[intp[3]];
1810 2860 qlt->fw_addr02 = qlt->fw_code02[2];
1811 2861 qlt->fw_length02 = qlt->fw_code02[3];
1812 2862 break;
1813 2863
1814 2864 case QLT_IOCTL_CLEAR_FW:
1815 2865 if (qlt->fw_code01) {
1816 2866 kmem_free(qlt->fw_code01, (qlt->fw_length01 +
1817 2867 qlt->fw_length02) << 2);
1818 2868 qlt->fw_code01 = NULL;
1819 2869 atomic_dec_32(&qlt_loaded_counter);
1820 2870 }
1821 2871 break;
1822 2872
1823 2873 case QLT_IOCTL_GET_FW_INFO:
1824 2874 if (iocd->stmf_obuf_size != sizeof (qlt_fw_info_t)) {
1825 2875 EL(qlt, "GET_FW_INFO obuf_size=%d != %d\n",
1826 2876 iocd->stmf_obuf_size, sizeof (qlt_fw_info_t));
1827 2877 ret = EINVAL;
1828 2878 break;
1829 2879 }
1830 2880 fwi = (qlt_fw_info_t *)obuf;
1831 2881 if (qlt->qlt_stay_offline) {
1832 2882 fwi->fwi_stay_offline = 1;
1833 2883 }
1834 2884 if (qlt->qlt_state == FCT_STATE_ONLINE) {
1835 2885 fwi->fwi_port_active = 1;
1836 2886 }
1837 2887 fwi->fwi_active_major = qlt->fw_major;
|
↓ open down ↓ |
39 lines elided |
↑ open up ↑ |
1838 2888 fwi->fwi_active_minor = qlt->fw_minor;
1839 2889 fwi->fwi_active_subminor = qlt->fw_subminor;
1840 2890 fwi->fwi_active_attr = qlt->fw_attr;
1841 2891 if (qlt->fw_code01) {
1842 2892 fwi->fwi_fw_uploaded = 1;
1843 2893 fwi->fwi_loaded_major = (uint16_t)qlt->fw_code01[4];
1844 2894 fwi->fwi_loaded_minor = (uint16_t)qlt->fw_code01[5];
1845 2895 fwi->fwi_loaded_subminor = (uint16_t)qlt->fw_code01[6];
1846 2896 fwi->fwi_loaded_attr = (uint16_t)qlt->fw_code01[7];
1847 2897 }
1848 - if (qlt->qlt_81xx_chip) {
2898 + if (qlt->qlt_27xx_chip) {
2899 + fwi->fwi_default_major = (uint16_t)fw2700_code01[4];
2900 + fwi->fwi_default_minor = (uint16_t)fw2700_code01[5];
2901 + fwi->fwi_default_subminor = (uint16_t)fw2700_code01[6];
2902 + fwi->fwi_default_attr = (uint16_t)fw2700_code01[7];
2903 + } else if (qlt->qlt_83xx_chip) {
2904 + fwi->fwi_default_major = (uint16_t)fw8300fc_code01[4];
2905 + fwi->fwi_default_minor = (uint16_t)fw8300fc_code01[5];
2906 + fwi->fwi_default_subminor =
2907 + (uint16_t)fw8300fc_code01[6];
2908 + fwi->fwi_default_attr = (uint16_t)fw8300fc_code01[7];
2909 + } else if (qlt->qlt_81xx_chip) {
1849 2910 fwi->fwi_default_major = (uint16_t)fw8100_code01[4];
1850 2911 fwi->fwi_default_minor = (uint16_t)fw8100_code01[5];
1851 2912 fwi->fwi_default_subminor = (uint16_t)fw8100_code01[6];
1852 2913 fwi->fwi_default_attr = (uint16_t)fw8100_code01[7];
1853 2914 } else if (qlt->qlt_25xx_chip) {
1854 2915 fwi->fwi_default_major = (uint16_t)fw2500_code01[4];
1855 2916 fwi->fwi_default_minor = (uint16_t)fw2500_code01[5];
1856 2917 fwi->fwi_default_subminor = (uint16_t)fw2500_code01[6];
1857 2918 fwi->fwi_default_attr = (uint16_t)fw2500_code01[7];
1858 2919 } else {
1859 2920 fwi->fwi_default_major = (uint16_t)fw2400_code01[4];
1860 2921 fwi->fwi_default_minor = (uint16_t)fw2400_code01[5];
1861 2922 fwi->fwi_default_subminor = (uint16_t)fw2400_code01[6];
1862 2923 fwi->fwi_default_attr = (uint16_t)fw2400_code01[7];
1863 2924 }
1864 2925 break;
1865 2926
1866 2927 case QLT_IOCTL_STAY_OFFLINE:
1867 2928 if (!iocd->stmf_ibuf_size) {
1868 2929 EL(qlt, "STAY_OFFLINE ibuf_size=%d\n",
1869 2930 iocd->stmf_ibuf_size);
1870 2931 ret = EINVAL;
1871 2932 break;
1872 2933 }
1873 2934 if (*((char *)ibuf)) {
1874 2935 qlt->qlt_stay_offline = 1;
1875 2936 } else {
1876 2937 qlt->qlt_stay_offline = 0;
1877 2938 }
1878 2939 break;
1879 2940
1880 2941 case QLT_IOCTL_MBOX:
1881 2942 if ((iocd->stmf_ibuf_size < sizeof (qlt_ioctl_mbox_t)) ||
1882 2943 (iocd->stmf_obuf_size < sizeof (qlt_ioctl_mbox_t))) {
1883 2944 EL(qlt, "IOCTL_MBOX ibuf_size=%d, obuf_size=%d\n",
1884 2945 iocd->stmf_ibuf_size, iocd->stmf_obuf_size);
1885 2946 ret = EINVAL;
1886 2947 break;
1887 2948 }
1888 2949 mcp = qlt_alloc_mailbox_command(qlt, 0);
1889 2950 if (mcp == NULL) {
1890 2951 EL(qlt, "IOCTL_MBOX mcp == NULL\n");
1891 2952 ret = ENOMEM;
1892 2953 break;
1893 2954 }
1894 2955 bcopy(ibuf, mcp, sizeof (qlt_ioctl_mbox_t));
1895 2956 st = qlt_mailbox_command(qlt, mcp);
1896 2957 bcopy(mcp, obuf, sizeof (qlt_ioctl_mbox_t));
1897 2958 qlt_free_mailbox_command(qlt, mcp);
1898 2959 if (st != QLT_SUCCESS) {
1899 2960 if ((st & (~((uint64_t)(0xFFFF)))) == QLT_MBOX_FAILED)
1900 2961 st = QLT_SUCCESS;
1901 2962 }
1902 2963 if (st != QLT_SUCCESS) {
1903 2964 EL(qlt, "IOCTL_MBOX status=%xh\n", st);
1904 2965 ret = EIO;
1905 2966 switch (st) {
1906 2967 case QLT_MBOX_NOT_INITIALIZED:
1907 2968 iocd->stmf_error = QLTIO_MBOX_NOT_INITIALIZED;
1908 2969 break;
1909 2970 case QLT_MBOX_BUSY:
1910 2971 iocd->stmf_error = QLTIO_CANT_GET_MBOXES;
1911 2972 break;
1912 2973 case QLT_MBOX_TIMEOUT:
|
↓ open down ↓ |
54 lines elided |
↑ open up ↑ |
1913 2974 iocd->stmf_error = QLTIO_MBOX_TIMED_OUT;
1914 2975 break;
1915 2976 case QLT_MBOX_ABORTED:
1916 2977 iocd->stmf_error = QLTIO_MBOX_ABORTED;
1917 2978 break;
1918 2979 }
1919 2980 }
1920 2981 break;
1921 2982
1922 2983 case QLT_IOCTL_ELOG:
1923 - qlt_dump_el_trace_buffer(qlt);
2984 + EL(qlt, "Not support yet, ioctl-%xh\n", cmd);
1924 2985 break;
1925 2986
1926 2987 default:
1927 2988 EL(qlt, "Unknown ioctl-%xh\n", cmd);
1928 2989 ret = ENOTTY;
1929 2990 }
1930 2991
1931 2992 if (ret == 0) {
1932 2993 ret = stmf_copyout_iocdata(data, mode, iocd, obuf);
1933 2994 } else if (iocd->stmf_error) {
1934 2995 (void) stmf_copyout_iocdata(data, mode, iocd, obuf);
1935 2996 }
1936 2997 if (obuf) {
1937 2998 kmem_free(obuf, iocd->stmf_obuf_size);
1938 2999 obuf = NULL;
1939 3000 }
1940 3001 if (ibuf) {
1941 3002 kmem_free(ibuf, iocd->stmf_ibuf_size);
1942 3003 ibuf = NULL;
1943 3004 }
1944 3005 kmem_free(iocd, sizeof (stmf_iocdata_t));
|
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
1945 3006 return (ret);
1946 3007 }
1947 3008
1948 3009 static fct_status_t
1949 3010 qlt_force_lip(qlt_state_t *qlt)
1950 3011 {
1951 3012 mbox_cmd_t *mcp;
1952 3013 fct_status_t rval;
1953 3014
1954 3015 mcp = qlt_alloc_mailbox_command(qlt, 0);
1955 - mcp->to_fw[0] = 0x0072;
1956 - mcp->to_fw[1] = BIT_4;
1957 - mcp->to_fw[3] = 1;
1958 - mcp->to_fw_mask |= BIT_1 | BIT_3;
3016 + if (qlt->qlt_fcoe_enabled) {
3017 + mcp->to_fw[0] = MBC_PORT_RESET;
3018 + } else {
3019 + mcp->to_fw[0] = MBC_LIP_FULL_LOGIN;
3020 + mcp->to_fw[1] = BIT_4;
3021 + mcp->to_fw[3] = 1;
3022 + mcp->to_fw_mask |= BIT_1 | BIT_3;
3023 + }
1959 3024 rval = qlt_mailbox_command(qlt, mcp);
1960 3025 if (rval != FCT_SUCCESS) {
1961 - EL(qlt, "qlt force lip MB failed: rval=%x", rval);
3026 + EL(qlt, "qlt force lip MB failed: rval=%x\n", rval);
1962 3027 } else {
1963 - if (mcp->from_fw[0] != 0x4000) {
3028 + if (mcp->from_fw[0] != QLT_MBX_CMD_SUCCESS) {
1964 3029 QLT_LOG(qlt->qlt_port_alias, "qlt FLIP: fw[0]=%x",
1965 3030 mcp->from_fw[0]);
1966 3031 rval = FCT_FAILURE;
1967 3032 }
1968 3033 }
1969 3034 qlt_free_mailbox_command(qlt, mcp);
1970 3035 return (rval);
1971 3036 }
1972 3037
1973 3038 static void
1974 3039 qlt_ctl(struct fct_local_port *port, int cmd, void *arg)
1975 3040 {
1976 3041 stmf_change_status_t st;
1977 3042 stmf_state_change_info_t *ssci = (stmf_state_change_info_t *)arg;
1978 3043 qlt_state_t *qlt;
1979 3044 fct_status_t ret;
1980 3045
|
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
1981 3046 ASSERT((cmd == FCT_CMD_PORT_ONLINE) ||
1982 3047 (cmd == FCT_CMD_PORT_OFFLINE) ||
1983 3048 (cmd == FCT_CMD_FORCE_LIP) ||
1984 3049 (cmd == FCT_ACK_PORT_ONLINE_COMPLETE) ||
1985 3050 (cmd == FCT_ACK_PORT_OFFLINE_COMPLETE));
1986 3051
1987 3052 qlt = (qlt_state_t *)port->port_fca_private;
1988 3053 st.st_completion_status = FCT_SUCCESS;
1989 3054 st.st_additional_info = NULL;
1990 3055
3056 + EL(qlt, "port (%p) qlt_state (%xh) cmd (%xh) arg (%p)\n",
3057 + port, qlt->qlt_state, cmd, arg);
3058 +
1991 3059 switch (cmd) {
1992 3060 case FCT_CMD_PORT_ONLINE:
1993 3061 if (qlt->qlt_state == FCT_STATE_ONLINE)
1994 3062 st.st_completion_status = STMF_ALREADY;
1995 3063 else if (qlt->qlt_state != FCT_STATE_OFFLINE)
1996 3064 st.st_completion_status = FCT_FAILURE;
1997 3065 if (st.st_completion_status == FCT_SUCCESS) {
1998 3066 qlt->qlt_state = FCT_STATE_ONLINING;
1999 3067 qlt->qlt_state_not_acked = 1;
2000 3068 st.st_completion_status = qlt_port_online(qlt);
2001 3069 if (st.st_completion_status != STMF_SUCCESS) {
2002 3070 EL(qlt, "PORT_ONLINE status=%xh\n",
2003 3071 st.st_completion_status);
2004 3072 qlt->qlt_state = FCT_STATE_OFFLINE;
2005 3073 qlt->qlt_state_not_acked = 0;
2006 3074 } else {
2007 3075 qlt->qlt_state = FCT_STATE_ONLINE;
2008 3076 }
2009 3077 }
2010 3078 fct_ctl(port->port_lport, FCT_CMD_PORT_ONLINE_COMPLETE, &st);
2011 3079 qlt->qlt_change_state_flags = 0;
2012 3080 break;
2013 3081
2014 3082 case FCT_CMD_PORT_OFFLINE:
2015 3083 if (qlt->qlt_state == FCT_STATE_OFFLINE) {
2016 3084 st.st_completion_status = STMF_ALREADY;
2017 3085 } else if (qlt->qlt_state != FCT_STATE_ONLINE) {
2018 3086 st.st_completion_status = FCT_FAILURE;
2019 3087 }
2020 3088 if (st.st_completion_status == FCT_SUCCESS) {
2021 3089 qlt->qlt_state = FCT_STATE_OFFLINING;
2022 3090 qlt->qlt_state_not_acked = 1;
2023 3091
2024 3092 if (ssci->st_rflags & STMF_RFLAG_COLLECT_DEBUG_DUMP) {
2025 3093 (void) qlt_firmware_dump(port, ssci);
2026 3094 }
2027 3095 qlt->qlt_change_state_flags = (uint32_t)ssci->st_rflags;
2028 3096 st.st_completion_status = qlt_port_offline(qlt);
2029 3097 if (st.st_completion_status != STMF_SUCCESS) {
2030 3098 EL(qlt, "PORT_OFFLINE status=%xh\n",
2031 3099 st.st_completion_status);
2032 3100 qlt->qlt_state = FCT_STATE_ONLINE;
2033 3101 qlt->qlt_state_not_acked = 0;
2034 3102 } else {
2035 3103 qlt->qlt_state = FCT_STATE_OFFLINE;
2036 3104 }
2037 3105 }
2038 3106 fct_ctl(port->port_lport, FCT_CMD_PORT_OFFLINE_COMPLETE, &st);
2039 3107 break;
2040 3108
2041 3109 case FCT_ACK_PORT_ONLINE_COMPLETE:
2042 3110 qlt->qlt_state_not_acked = 0;
2043 3111 break;
2044 3112
2045 3113 case FCT_ACK_PORT_OFFLINE_COMPLETE:
2046 3114 qlt->qlt_state_not_acked = 0;
2047 3115 if ((qlt->qlt_change_state_flags & STMF_RFLAG_RESET) &&
2048 3116 (qlt->qlt_stay_offline == 0)) {
2049 3117 if ((ret = fct_port_initialize(port,
2050 3118 qlt->qlt_change_state_flags,
2051 3119 "qlt_ctl FCT_ACK_PORT_OFFLINE_COMPLETE "
2052 3120 "with RLFLAG_RESET")) != FCT_SUCCESS) {
2053 3121 EL(qlt, "fct_port_initialize status=%llxh\n",
|
↓ open down ↓ |
53 lines elided |
↑ open up ↑ |
2054 3122 ret);
2055 3123 cmn_err(CE_WARN, "qlt_ctl: "
2056 3124 "fct_port_initialize failed, please use "
2057 3125 "stmfstate to start the port-%s manualy",
2058 3126 qlt->qlt_port_alias);
2059 3127 }
2060 3128 }
2061 3129 break;
2062 3130
2063 3131 case FCT_CMD_FORCE_LIP:
2064 - if (qlt->qlt_81xx_chip) {
3132 + if (qlt->qlt_fcoe_enabled) {
2065 3133 EL(qlt, "force lip is an unsupported command "
2066 3134 "for this adapter type\n");
2067 3135 } else {
2068 - *((fct_status_t *)arg) = qlt_force_lip(qlt);
2069 - EL(qlt, "forcelip done\n");
3136 + if (qlt->qlt_state == FCT_STATE_ONLINE) {
3137 + *((fct_status_t *)arg) = qlt_force_lip(qlt);
3138 + EL(qlt, "forcelip done\n");
3139 + }
2070 3140 }
2071 3141 break;
2072 3142
2073 3143 default:
2074 - EL(qlt, "unsupport cmd - 0x%02X", cmd);
3144 + EL(qlt, "unsupport cmd - 0x%02X\n", cmd);
2075 3145 break;
2076 3146 }
2077 3147 }
2078 3148
2079 3149 /* ARGSUSED */
2080 3150 static fct_status_t
2081 3151 qlt_do_flogi(fct_local_port_t *port, fct_flogi_xchg_t *fx)
2082 3152 {
2083 3153 qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
2084 3154
2085 3155 EL(qlt, "FLOGI requested not supported\n");
2086 3156 cmn_err(CE_WARN, "qlt: FLOGI requested (not supported)");
2087 3157 return (FCT_FAILURE);
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
2088 3158 }
2089 3159
2090 3160 /*
2091 3161 * Return a pointer to n entries in the request queue. Assumes that
2092 3162 * request queue lock is held. Does a very short busy wait if
2093 3163 * less/zero entries are available. Retuns NULL if it still cannot
2094 3164 * fullfill the request.
2095 3165 * **CALL qlt_submit_req_entries() BEFORE DROPPING THE LOCK**
2096 3166 */
2097 3167 caddr_t
2098 -qlt_get_req_entries(qlt_state_t *qlt, uint32_t n)
3168 +qlt_get_req_entries(qlt_state_t *qlt, uint32_t n, uint16_t qi)
2099 3169 {
2100 3170 int try = 0;
2101 3171
2102 - while (qlt->req_available < n) {
3172 + while (qlt->mq_req[qi].mq_available < n) {
2103 3173 uint32_t val1, val2, val3;
2104 - val1 = REG_RD32(qlt, REG_REQ_OUT_PTR);
2105 - val2 = REG_RD32(qlt, REG_REQ_OUT_PTR);
2106 - val3 = REG_RD32(qlt, REG_REQ_OUT_PTR);
3174 +
3175 + if (qlt->qlt_mq_enabled) {
3176 + /* debounce */
3177 + val1 = MQBAR_RD32(qlt,
3178 + (qi * MQBAR_REG_OFFSET) + MQBAR_REQ_OUT);
3179 + val2 = MQBAR_RD32(qlt,
3180 + (qi * MQBAR_REG_OFFSET) + MQBAR_REQ_OUT);
3181 + val3 = MQBAR_RD32(qlt,
3182 + (qi * MQBAR_REG_OFFSET) + MQBAR_REQ_OUT);
3183 + } else {
3184 + val1 = REG_RD32(qlt, REG_REQ_OUT_PTR);
3185 + val2 = REG_RD32(qlt, REG_REQ_OUT_PTR);
3186 + val3 = REG_RD32(qlt, REG_REQ_OUT_PTR);
3187 + }
2107 3188 if ((val1 != val2) || (val2 != val3))
2108 3189 continue;
2109 3190
2110 - qlt->req_ndx_from_fw = val1;
2111 - qlt->req_available = REQUEST_QUEUE_ENTRIES - 1 -
2112 - ((qlt->req_ndx_to_fw - qlt->req_ndx_from_fw) &
2113 - (REQUEST_QUEUE_ENTRIES - 1));
2114 - if (qlt->req_available < n) {
3191 + qlt->mq_req[qi].mq_ndx_from_fw = val1;
3192 + if (qi != 0) {
3193 + qlt->mq_req[qi].mq_available =
3194 + REQUEST_QUEUE_MQ_ENTRIES - 1 -
3195 + ((qlt->mq_req[qi].mq_ndx_to_fw -
3196 + qlt->mq_req[qi].mq_ndx_from_fw) &
3197 + (REQUEST_QUEUE_MQ_ENTRIES - 1));
3198 + } else {
3199 + qlt->mq_req[qi].mq_available =
3200 + REQUEST_QUEUE_ENTRIES - 1 -
3201 + ((qlt->mq_req[qi].mq_ndx_to_fw -
3202 + qlt->mq_req[qi].mq_ndx_from_fw) &
3203 + (REQUEST_QUEUE_ENTRIES - 1));
3204 + }
3205 + if (qlt->mq_req[qi].mq_available < n) {
2115 3206 if (try < 2) {
2116 3207 drv_usecwait(100);
2117 3208 try++;
2118 3209 continue;
2119 3210 } else {
2120 3211 stmf_trace(qlt->qlt_port_alias,
2121 - "Req Q is full");
3212 + "Req Q# %xh is full", qi);
3213 + EL(qlt, "Req %xh is full (%d,%d) (%d,%d)\n",
3214 + qi, qlt->mq_req[qi].mq_ndx_to_fw,
3215 + qlt->mq_req[qi].mq_ndx_from_fw,
3216 + n, qlt->mq_req[qi].mq_available);
2122 3217 return (NULL);
2123 3218 }
2124 3219 }
2125 3220 break;
2126 3221 }
2127 3222 /* We dont change anything until the entries are sumitted */
2128 - return (&qlt->req_ptr[qlt->req_ndx_to_fw << 6]);
3223 + return (&qlt->mq_req[qi].mq_ptr[qlt->mq_req[qi].mq_ndx_to_fw << 6]);
2129 3224 }
2130 3225
2131 3226 /*
2132 3227 * updates the req in ptr to fw. Assumes that req lock is held.
2133 3228 */
2134 3229 void
2135 -qlt_submit_req_entries(qlt_state_t *qlt, uint32_t n)
3230 +qlt_submit_req_entries(qlt_state_t *qlt, uint32_t n, uint16_t qi)
2136 3231 {
3232 +
2137 3233 ASSERT(n >= 1);
2138 - qlt->req_ndx_to_fw += n;
2139 - qlt->req_ndx_to_fw &= REQUEST_QUEUE_ENTRIES - 1;
2140 - qlt->req_available -= n;
2141 - REG_WR32(qlt, REG_REQ_IN_PTR, qlt->req_ndx_to_fw);
2142 -}
2143 3234
3235 + qlt->mq_req[qi].mq_ndx_to_fw += n;
3236 + if (qi != 0) {
3237 + qlt->mq_req[qi].mq_ndx_to_fw &= REQUEST_QUEUE_MQ_ENTRIES - 1;
3238 + } else {
3239 + qlt->mq_req[qi].mq_ndx_to_fw &= REQUEST_QUEUE_ENTRIES - 1;
3240 + }
3241 + qlt->mq_req[qi].mq_available -= n;
2144 3242
3243 + if (qlt->qlt_mq_enabled) {
3244 + MQBAR_WR32(qlt, (qi * MQBAR_REG_OFFSET) + MQBAR_REQ_IN,
3245 + qlt->mq_req[qi].mq_ndx_to_fw);
3246 + } else {
3247 + REG_WR32(qlt, REG_REQ_IN_PTR, qlt->mq_req[0].mq_ndx_to_fw);
3248 + }
3249 +}
3250 +
2145 3251 /*
2146 3252 * Return a pointer to n entries in the priority request queue. Assumes that
2147 3253 * priority request queue lock is held. Does a very short busy wait if
2148 3254 * less/zero entries are available. Retuns NULL if it still cannot
2149 3255 * fullfill the request.
2150 3256 * **CALL qlt_submit_preq_entries() BEFORE DROPPING THE LOCK**
2151 3257 */
2152 3258 caddr_t
2153 3259 qlt_get_preq_entries(qlt_state_t *qlt, uint32_t n)
2154 3260 {
2155 3261 int try = 0;
2156 3262 uint32_t req_available = PRIORITY_QUEUE_ENTRIES - 1 -
2157 3263 ((qlt->preq_ndx_to_fw - qlt->preq_ndx_from_fw) &
2158 3264 (PRIORITY_QUEUE_ENTRIES - 1));
2159 3265
2160 3266 while (req_available < n) {
2161 3267 uint32_t val1, val2, val3;
2162 3268 val1 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
2163 3269 val2 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
2164 3270 val3 = REG_RD32(qlt, REG_PREQ_OUT_PTR);
2165 3271 if ((val1 != val2) || (val2 != val3))
2166 3272 continue;
2167 3273
2168 3274 qlt->preq_ndx_from_fw = val1;
2169 3275 req_available = PRIORITY_QUEUE_ENTRIES - 1 -
2170 3276 ((qlt->preq_ndx_to_fw - qlt->preq_ndx_from_fw) &
2171 3277 (PRIORITY_QUEUE_ENTRIES - 1));
2172 3278 if (req_available < n) {
2173 3279 if (try < 2) {
2174 3280 drv_usecwait(100);
2175 3281 try++;
2176 3282 continue;
2177 3283 } else {
2178 3284 return (NULL);
2179 3285 }
2180 3286 }
2181 3287 break;
2182 3288 }
2183 3289 /* We dont change anything until the entries are sumitted */
2184 3290 return (&qlt->preq_ptr[qlt->preq_ndx_to_fw << 6]);
2185 3291 }
2186 3292
2187 3293 /*
2188 3294 * updates the req in ptr to fw. Assumes that req lock is held.
2189 3295 */
2190 3296 void
2191 3297 qlt_submit_preq_entries(qlt_state_t *qlt, uint32_t n)
2192 3298 {
2193 3299 ASSERT(n >= 1);
2194 3300 qlt->preq_ndx_to_fw += n;
2195 3301 qlt->preq_ndx_to_fw &= PRIORITY_QUEUE_ENTRIES - 1;
2196 3302 REG_WR32(qlt, REG_PREQ_IN_PTR, qlt->preq_ndx_to_fw);
2197 3303 }
2198 3304
2199 3305 /*
2200 3306 * - Should not be called from Interrupt.
2201 3307 * - A very hardware specific function. Does not touch driver state.
2202 3308 * - Assumes that interrupts are disabled or not there.
2203 3309 * - Expects that the caller makes sure that all activity has stopped
2204 3310 * and its ok now to go ahead and reset the chip. Also the caller
2205 3311 * takes care of post reset damage control.
2206 3312 * - called by initialize adapter() and dump_fw(for reset only).
2207 3313 * - During attach() nothing much is happening and during initialize_adapter()
2208 3314 * the function (caller) does all the housekeeping so that this function
2209 3315 * can execute in peace.
2210 3316 * - Returns 0 on success.
|
↓ open down ↓ |
56 lines elided |
↑ open up ↑ |
2211 3317 */
2212 3318 static fct_status_t
2213 3319 qlt_reset_chip(qlt_state_t *qlt)
2214 3320 {
2215 3321 int cntr;
2216 3322
2217 3323 EL(qlt, "initiated\n");
2218 3324
2219 3325 /* XXX: Switch off LEDs */
2220 3326
3327 + qlt->qlt_intr_enabled = 0;
2221 3328 /* Disable Interrupts */
2222 3329 REG_WR32(qlt, REG_INTR_CTRL, 0);
2223 3330 (void) REG_RD32(qlt, REG_INTR_CTRL);
2224 3331 /* Stop DMA */
2225 3332 REG_WR32(qlt, REG_CTRL_STATUS, DMA_SHUTDOWN_CTRL | PCI_X_XFER_CTRL);
2226 3333
2227 3334 /* Wait for DMA to be stopped */
2228 3335 cntr = 0;
2229 3336 while (REG_RD32(qlt, REG_CTRL_STATUS) & DMA_ACTIVE_STATUS) {
2230 3337 delay(drv_usectohz(10000)); /* mostly 10ms is 1 tick */
2231 3338 cntr++;
2232 3339 /* 3 sec should be more than enough */
2233 3340 if (cntr == 300)
2234 3341 return (QLT_DMA_STUCK);
2235 3342 }
2236 3343
3344 + /* need to ensure no one accesses the hw during the reset 100us */
3345 + if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
3346 + mutex_enter(&qlt->mbox_lock);
3347 + if (qlt->qlt_mq_enabled == 1) {
3348 + int i;
3349 +
3350 + for (i = 1; i < qlt->qlt_queue_cnt; i++) {
3351 + mutex_enter(&qlt->mq_req[i].mq_lock);
3352 + }
3353 + }
3354 + mutex_enter(&qlt->mq_req[0].mq_lock);
3355 + /*
3356 + * We need to give time for other threads to finsh their
3357 + * interupts (or we need another lock)
3358 + */
3359 + drv_usecwait(40);
3360 + }
3361 +
2237 3362 /* Reset the Chip */
2238 3363 REG_WR32(qlt, REG_CTRL_STATUS,
2239 3364 DMA_SHUTDOWN_CTRL | PCI_X_XFER_CTRL | CHIP_SOFT_RESET);
2240 3365
2241 3366 qlt->qlt_link_up = 0;
2242 3367
2243 3368 drv_usecwait(100);
2244 3369
3370 + if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
3371 + mutex_exit(&qlt->mq_req[0].mq_lock);
3372 + if (qlt->qlt_mq_enabled == 1) {
3373 + int i;
3374 +
3375 + for (i = 1; i < qlt->qlt_queue_cnt; i++) {
3376 + mutex_exit(&qlt->mq_req[i].mq_lock);
3377 + }
3378 + }
3379 + mutex_exit(&qlt->mbox_lock);
3380 + }
3381 +
2245 3382 /* Wait for ROM firmware to initialize (0x0000) in mailbox 0 */
2246 3383 cntr = 0;
2247 3384 while (REG_RD16(qlt, REG_MBOX(0)) != 0) {
2248 3385 delay(drv_usectohz(10000));
2249 3386 cntr++;
2250 3387 /* 3 sec should be more than enough */
2251 3388 if (cntr == 300)
2252 3389 return (QLT_ROM_STUCK);
2253 3390 }
2254 3391 /* Disable Interrupts (Probably not needed) */
2255 3392 REG_WR32(qlt, REG_INTR_CTRL, 0);
2256 3393
2257 3394 return (QLT_SUCCESS);
2258 3395 }
3396 +
2259 3397 /*
2260 3398 * - Should not be called from Interrupt.
2261 3399 * - A very hardware specific function. Does not touch driver state.
2262 3400 * - Assumes that interrupts are disabled or not there.
2263 3401 * - Expects that the caller makes sure that all activity has stopped
2264 3402 * and its ok now to go ahead and reset the chip. Also the caller
2265 3403 * takes care of post reset damage control.
2266 3404 * - called by initialize adapter() and dump_fw(for reset only).
2267 3405 * - During attach() nothing much is happening and during initialize_adapter()
2268 3406 * the function (caller) does all the housekeeping so that this function
2269 3407 * can execute in peace.
2270 3408 * - Returns 0 on success.
2271 3409 */
2272 3410 static fct_status_t
2273 3411 qlt_download_fw(qlt_state_t *qlt)
2274 3412 {
2275 3413 uint32_t start_addr;
2276 3414 fct_status_t ret;
2277 3415
2278 3416 EL(qlt, "initiated\n");
2279 3417
2280 3418 (void) qlt_reset_chip(qlt);
2281 3419
2282 3420 if (qlt->qlt_81xx_chip) {
2283 3421 qlt_mps_reset(qlt);
2284 3422 }
|
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
2285 3423
2286 3424 /* Load the two segments */
2287 3425 if (qlt->fw_code01 != NULL) {
2288 3426 ret = qlt_load_risc_ram(qlt, qlt->fw_code01, qlt->fw_length01,
2289 3427 qlt->fw_addr01);
2290 3428 if (ret == QLT_SUCCESS) {
2291 3429 ret = qlt_load_risc_ram(qlt, qlt->fw_code02,
2292 3430 qlt->fw_length02, qlt->fw_addr02);
2293 3431 }
2294 3432 start_addr = qlt->fw_addr01;
3433 + } else if (qlt->qlt_27xx_chip) {
3434 + (void) qlt_27xx_get_dmp_template(qlt);
3435 + ret = qlt_load_risc_ram(qlt, fw2700_code01,
3436 + fw2700_length01, fw2700_addr01);
3437 + if (ret == QLT_SUCCESS) {
3438 + ret = qlt_load_risc_ram(qlt, fw2700_code02,
3439 + fw2700_length02, fw2700_addr02);
3440 + }
3441 + start_addr = fw2700_addr01;
3442 + } else if (qlt->qlt_83xx_chip) {
3443 + ret = qlt_load_risc_ram(qlt, fw8300fc_code01,
3444 + fw8300fc_length01, fw8300fc_addr01);
3445 + if (ret == QLT_SUCCESS) {
3446 + ret = qlt_load_risc_ram(qlt, fw8300fc_code02,
3447 + fw8300fc_length02, fw8300fc_addr02);
3448 + }
3449 + start_addr = fw8300fc_addr01;
2295 3450 } else if (qlt->qlt_81xx_chip) {
2296 3451 ret = qlt_load_risc_ram(qlt, fw8100_code01, fw8100_length01,
2297 3452 fw8100_addr01);
2298 3453 if (ret == QLT_SUCCESS) {
2299 3454 ret = qlt_load_risc_ram(qlt, fw8100_code02,
2300 3455 fw8100_length02, fw8100_addr02);
2301 3456 }
2302 3457 start_addr = fw8100_addr01;
2303 3458 } else if (qlt->qlt_25xx_chip) {
2304 3459 ret = qlt_load_risc_ram(qlt, fw2500_code01, fw2500_length01,
2305 3460 fw2500_addr01);
2306 3461 if (ret == QLT_SUCCESS) {
2307 3462 ret = qlt_load_risc_ram(qlt, fw2500_code02,
2308 3463 fw2500_length02, fw2500_addr02);
2309 3464 }
2310 3465 start_addr = fw2500_addr01;
2311 3466 } else {
2312 3467 ret = qlt_load_risc_ram(qlt, fw2400_code01, fw2400_length01,
2313 3468 fw2400_addr01);
2314 3469 if (ret == QLT_SUCCESS) {
2315 3470 ret = qlt_load_risc_ram(qlt, fw2400_code02,
2316 3471 fw2400_length02, fw2400_addr02);
2317 3472 }
2318 3473 start_addr = fw2400_addr01;
2319 3474 }
2320 3475 if (ret != QLT_SUCCESS) {
2321 3476 EL(qlt, "qlt_load_risc_ram status=%llxh\n", ret);
2322 3477 return (ret);
2323 3478 }
2324 3479
2325 3480 /* Verify Checksum */
2326 3481 REG_WR16(qlt, REG_MBOX(0), MBC_VERIFY_CHECKSUM);
2327 3482 REG_WR16(qlt, REG_MBOX(1), MSW(start_addr));
2328 3483 REG_WR16(qlt, REG_MBOX(2), LSW(start_addr));
2329 3484 ret = qlt_raw_mailbox_command(qlt);
2330 3485 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
|
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
2331 3486 if (ret != QLT_SUCCESS) {
2332 3487 EL(qlt, "qlt_raw_mailbox_command=7h status=%llxh\n", ret);
2333 3488 return (ret);
2334 3489 }
2335 3490
2336 3491 /* Execute firmware */
2337 3492 REG_WR16(qlt, REG_MBOX(0), MBC_EXECUTE_FIRMWARE);
2338 3493 REG_WR16(qlt, REG_MBOX(1), MSW(start_addr));
2339 3494 REG_WR16(qlt, REG_MBOX(2), LSW(start_addr));
2340 3495 REG_WR16(qlt, REG_MBOX(3), 0);
2341 - REG_WR16(qlt, REG_MBOX(4), 1); /* 25xx enable additional credits */
3496 +#ifdef EXTRA_CREDIT
3497 + /* enable extra credits (reduces available buffers) */
3498 + if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip) ||
3499 + (qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
3500 + REG_WR16(qlt, REG_MBOX(4), 1);
3501 + } else {
3502 + REG_WR16(qlt, REG_MBOX(4), 0);
3503 + }
3504 +#else
3505 + REG_WR16(qlt, REG_MBOX(4), 0);
3506 +#endif
2342 3507 ret = qlt_raw_mailbox_command(qlt);
2343 3508 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2344 3509 if (ret != QLT_SUCCESS) {
2345 3510 EL(qlt, "qlt_raw_mailbox_command=2h status=%llxh\n", ret);
2346 3511 return (ret);
3512 + } else {
3513 + if (qlt->qlt_27xx_chip) {
3514 + qlt->qlt_27xx_speed = (uint32_t)
3515 + (REG_RD16(qlt, REG_MBOX(3)) << 16 |
3516 + REG_RD16(qlt, REG_MBOX(2)));
3517 +
3518 + }
2347 3519 }
2348 3520
2349 3521 /* Get revisions (About Firmware) */
2350 3522 REG_WR16(qlt, REG_MBOX(0), MBC_ABOUT_FIRMWARE);
2351 3523 ret = qlt_raw_mailbox_command(qlt);
2352 3524 qlt->fw_major = REG_RD16(qlt, REG_MBOX(1));
2353 3525 qlt->fw_minor = REG_RD16(qlt, REG_MBOX(2));
2354 3526 qlt->fw_subminor = REG_RD16(qlt, REG_MBOX(3));
2355 3527 qlt->fw_endaddrlo = REG_RD16(qlt, REG_MBOX(4));
2356 3528 qlt->fw_endaddrhi = REG_RD16(qlt, REG_MBOX(5));
2357 3529 qlt->fw_attr = REG_RD16(qlt, REG_MBOX(6));
2358 3530 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2359 3531 if (ret != QLT_SUCCESS) {
2360 3532 EL(qlt, "qlt_raw_mailbox_command=8h status=%llxh\n", ret);
2361 3533 return (ret);
2362 3534 }
2363 3535
3536 + if (qlt->qlt_27xx_chip) {
3537 + qlt->fw_ext_memory_end = SHORT_TO_LONG(
3538 + REG_RD16(qlt, REG_MBOX(4)),
3539 + REG_RD16(qlt, REG_MBOX(5)));
3540 + qlt->fw_shared_ram_start = SHORT_TO_LONG(
3541 + REG_RD16(qlt, REG_MBOX(18)),
3542 + REG_RD16(qlt, REG_MBOX(19)));
3543 + qlt->fw_shared_ram_end = SHORT_TO_LONG(
3544 + REG_RD16(qlt, REG_MBOX(20)),
3545 + REG_RD16(qlt, REG_MBOX(21)));
3546 + qlt->fw_ddr_ram_start = SHORT_TO_LONG(
3547 + REG_RD16(qlt, REG_MBOX(22)),
3548 + REG_RD16(qlt, REG_MBOX(23)));
3549 + qlt->fw_ddr_ram_end = SHORT_TO_LONG(
3550 + REG_RD16(qlt, REG_MBOX(24)),
3551 + REG_RD16(qlt, REG_MBOX(25)));
3552 + }
3553 +
3554 +
2364 3555 return (QLT_SUCCESS);
2365 3556 }
2366 3557
2367 3558 /*
2368 3559 * Used only from qlt_download_fw().
2369 3560 */
2370 3561 static fct_status_t
2371 3562 qlt_load_risc_ram(qlt_state_t *qlt, uint32_t *host_addr,
2372 - uint32_t word_count, uint32_t risc_addr)
3563 + uint32_t word_count, uint32_t risc_addr)
2373 3564 {
2374 3565 uint32_t words_sent = 0;
2375 3566 uint32_t words_being_sent;
2376 3567 uint32_t *cur_host_addr;
2377 3568 uint32_t cur_risc_addr;
2378 3569 uint64_t da;
2379 3570 fct_status_t ret;
2380 3571
2381 3572 while (words_sent < word_count) {
2382 3573 cur_host_addr = &(host_addr[words_sent]);
2383 3574 cur_risc_addr = risc_addr + (words_sent << 2);
2384 3575 words_being_sent = min(word_count - words_sent,
2385 3576 TOTAL_DMA_MEM_SIZE >> 2);
2386 3577 ddi_rep_put32(qlt->queue_mem_acc_handle, cur_host_addr,
2387 3578 (uint32_t *)qlt->queue_mem_ptr, words_being_sent,
2388 3579 DDI_DEV_AUTOINCR);
2389 3580 (void) ddi_dma_sync(qlt->queue_mem_dma_handle, 0,
2390 3581 words_being_sent << 2, DDI_DMA_SYNC_FORDEV);
2391 3582 da = qlt->queue_mem_cookie.dmac_laddress;
2392 3583 REG_WR16(qlt, REG_MBOX(0), MBC_LOAD_RAM_EXTENDED);
2393 3584 REG_WR16(qlt, REG_MBOX(1), LSW(risc_addr));
2394 3585 REG_WR16(qlt, REG_MBOX(8), MSW(cur_risc_addr));
2395 3586 REG_WR16(qlt, REG_MBOX(3), LSW(LSD(da)));
2396 3587 REG_WR16(qlt, REG_MBOX(2), MSW(LSD(da)));
2397 3588 REG_WR16(qlt, REG_MBOX(7), LSW(MSD(da)));
2398 3589 REG_WR16(qlt, REG_MBOX(6), MSW(MSD(da)));
2399 3590 REG_WR16(qlt, REG_MBOX(5), LSW(words_being_sent));
|
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
2400 3591 REG_WR16(qlt, REG_MBOX(4), MSW(words_being_sent));
2401 3592 ret = qlt_raw_mailbox_command(qlt);
2402 3593 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2403 3594 if (ret != QLT_SUCCESS) {
2404 3595 EL(qlt, "qlt_raw_mailbox_command=0Bh status=%llxh\n",
2405 3596 ret);
2406 3597 return (ret);
2407 3598 }
2408 3599 words_sent += words_being_sent;
2409 3600 }
3601 + EL(qlt, "qlt_raw_mailbox_command=0Bh, LOAD_RAM_EXTENDED complete\n");
2410 3602 return (QLT_SUCCESS);
2411 3603 }
2412 3604
2413 3605 /*
2414 3606 * Not used during normal operation. Only during driver init.
2415 3607 * Assumes that interrupts are disabled and mailboxes are loaded.
2416 3608 * Just triggers the mailbox command an waits for the completion.
2417 3609 * Also expects that There is nothing else going on and we will only
2418 3610 * get back a mailbox completion from firmware.
2419 3611 * ---DOES NOT CLEAR INTERRUPT---
2420 3612 * Used only from the code path originating from
2421 - * qlt_reset_chip_and_download_fw()
3613 + * qlt_reset_chip()
2422 3614 */
2423 3615 static fct_status_t
2424 3616 qlt_raw_mailbox_command(qlt_state_t *qlt)
2425 3617 {
2426 3618 int cntr = 0;
2427 3619 uint32_t status;
3620 + fct_local_port_t *port = qlt->qlt_port;
2428 3621
2429 3622 REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_HOST_TO_RISC_INTR));
3623 +retry_raw:;
2430 3624 while ((REG_RD32(qlt, REG_INTR_STATUS) & RISC_PCI_INTR_REQUEST) == 0) {
2431 3625 cntr++;
2432 - if (cntr == 100) {
3626 + if (cntr == 3000) {
3627 + EL(qlt, "polling exhausted, dump fw now..\n");
3628 + (void) qlt_firmware_dump(port,
3629 + (stmf_state_change_info_t *)NULL);
2433 3630 return (QLT_MAILBOX_STUCK);
2434 3631 }
2435 3632 delay(drv_usectohz(10000));
2436 3633 }
2437 3634 status = (REG_RD32(qlt, REG_RISC_STATUS) & FW_INTR_STATUS_MASK);
2438 3635
2439 3636 if ((status == ROM_MBX_CMD_SUCCESSFUL) ||
2440 3637 (status == ROM_MBX_CMD_NOT_SUCCESSFUL) ||
2441 3638 (status == MBX_CMD_SUCCESSFUL) ||
2442 3639 (status == MBX_CMD_NOT_SUCCESSFUL)) {
2443 3640 uint16_t mbox0 = REG_RD16(qlt, REG_MBOX(0));
2444 3641 if (mbox0 == QLT_MBX_CMD_SUCCESS) {
2445 3642 return (QLT_SUCCESS);
2446 3643 } else {
3644 + EL(qlt, "mbx cmd failed, dump fw now..\n");
3645 + (void) qlt_firmware_dump(port,
3646 + (stmf_state_change_info_t *)NULL);
2447 3647 return (QLT_MBOX_FAILED | mbox0);
2448 3648 }
3649 + } else if (status == ASYNC_EVENT) {
3650 + uint16_t mbox0, mbox1, mbox2, mbox3;
3651 + uint16_t mbox4, mbox5, mbox6, mbox7;
3652 +
3653 + mbox0 = REG_RD16(qlt, REG_MBOX(0));
3654 + mbox1 = REG_RD16(qlt, REG_MBOX(1));
3655 + mbox2 = REG_RD16(qlt, REG_MBOX(2));
3656 + mbox3 = REG_RD16(qlt, REG_MBOX(3));
3657 + mbox4 = REG_RD16(qlt, REG_MBOX(4));
3658 + mbox5 = REG_RD16(qlt, REG_MBOX(5));
3659 + mbox6 = REG_RD16(qlt, REG_MBOX(6));
3660 + mbox7 = REG_RD16(qlt, REG_MBOX(7));
3661 +
3662 + cmn_err(CE_NOTE, "!qlt(%d): Async event %x mb1=%x mb2=%x"
3663 + "mb3=%x mb4=%x mb5=%x mb6=%x mb7=%x",
3664 + qlt->instance, mbox0, mbox1, mbox2, mbox3,
3665 + mbox4, mbox5, mbox6, mbox7);
3666 + if (mbox0 == 0x8002) {
3667 + (void) qlt_firmware_dump(port,
3668 + (stmf_state_change_info_t *)NULL);
3669 + return (QLT_UNEXPECTED_RESPONSE);
3670 + } else {
3671 + REG_WR32(qlt,
3672 + REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
3673 + cntr = 0;
3674 + goto retry_raw;
3675 + }
2449 3676 }
3677 +
2450 3678 /* This is unexpected, dump a message */
2451 3679 cmn_err(CE_WARN, "qlt(%d): Unexpect intr status %llx",
2452 3680 ddi_get_instance(qlt->dip), (unsigned long long)status);
2453 3681 return (QLT_UNEXPECTED_RESPONSE);
2454 3682 }
2455 3683
2456 3684 static mbox_cmd_t *
2457 3685 qlt_alloc_mailbox_command(qlt_state_t *qlt, uint32_t dma_size)
2458 3686 {
2459 3687 mbox_cmd_t *mcp;
2460 3688
2461 3689 mcp = (mbox_cmd_t *)kmem_zalloc(sizeof (mbox_cmd_t), KM_SLEEP);
2462 3690 if (dma_size) {
2463 3691 qlt_dmem_bctl_t *bctl;
2464 3692 uint64_t da;
2465 3693
2466 3694 mcp->dbuf = qlt_i_dmem_alloc(qlt, dma_size, &dma_size, 0);
2467 3695 if (mcp->dbuf == NULL) {
2468 3696 kmem_free(mcp, sizeof (*mcp));
2469 3697 return (NULL);
2470 3698 }
2471 3699 mcp->dbuf->db_data_size = dma_size;
2472 3700 ASSERT(mcp->dbuf->db_sglist_length == 1);
2473 3701
2474 3702 bctl = (qlt_dmem_bctl_t *)mcp->dbuf->db_port_private;
2475 3703 da = bctl->bctl_dev_addr;
2476 3704 /* This is the most common initialization of dma ptrs */
2477 3705 mcp->to_fw[3] = LSW(LSD(da));
2478 3706 mcp->to_fw[2] = MSW(LSD(da));
2479 3707 mcp->to_fw[7] = LSW(MSD(da));
2480 3708 mcp->to_fw[6] = MSW(MSD(da));
2481 3709 mcp->to_fw_mask |= BIT_2 | BIT_3 | BIT_7 | BIT_6;
2482 3710 }
2483 3711 mcp->to_fw_mask |= BIT_0;
2484 3712 mcp->from_fw_mask |= BIT_0;
2485 3713 return (mcp);
2486 3714 }
2487 3715
2488 3716 void
2489 3717 qlt_free_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp)
2490 3718 {
2491 3719 if (mcp->dbuf)
2492 3720 qlt_i_dmem_free(qlt, mcp->dbuf);
2493 3721 kmem_free(mcp, sizeof (*mcp));
|
↓ open down ↓ |
34 lines elided |
↑ open up ↑ |
2494 3722 }
2495 3723
2496 3724 /*
2497 3725 * This can sleep. Should never be called from interrupt context.
2498 3726 */
2499 3727 static fct_status_t
2500 3728 qlt_mailbox_command(qlt_state_t *qlt, mbox_cmd_t *mcp)
2501 3729 {
2502 3730 int retries;
2503 3731 int i;
2504 - char info[QLT_INFO_LEN];
3732 + char info[80];
2505 3733
2506 3734 if (curthread->t_flag & T_INTR_THREAD) {
2507 3735 ASSERT(0);
2508 3736 return (QLT_MBOX_FAILED);
2509 3737 }
2510 3738
3739 + EL(qlt, "mailbox:[0]=%xh [1]=%xh\n",
3740 + mcp->to_fw[0], mcp->to_fw[1]);
3741 +
2511 3742 mutex_enter(&qlt->mbox_lock);
2512 3743 /* See if mailboxes are still uninitialized */
2513 3744 if (qlt->mbox_io_state == MBOX_STATE_UNKNOWN) {
2514 3745 mutex_exit(&qlt->mbox_lock);
2515 3746 return (QLT_MBOX_NOT_INITIALIZED);
2516 3747 }
2517 3748
2518 3749 /* Wait to grab the mailboxes */
2519 3750 for (retries = 0; qlt->mbox_io_state != MBOX_STATE_READY;
2520 3751 retries++) {
2521 3752 cv_wait(&qlt->mbox_cv, &qlt->mbox_lock);
2522 3753 if ((retries > 5) ||
2523 3754 (qlt->mbox_io_state == MBOX_STATE_UNKNOWN)) {
2524 3755 mutex_exit(&qlt->mbox_lock);
2525 3756 return (QLT_MBOX_BUSY);
2526 3757 }
2527 3758 }
2528 3759 /* Make sure we always ask for mailbox 0 */
2529 3760 mcp->from_fw_mask |= BIT_0;
2530 3761
2531 3762 /* Load mailboxes, set state and generate RISC interrupt */
2532 3763 qlt->mbox_io_state = MBOX_STATE_CMD_RUNNING;
2533 3764 qlt->mcp = mcp;
|
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
2534 3765 for (i = 0; i < MAX_MBOXES; i++) {
2535 3766 if (mcp->to_fw_mask & ((uint32_t)1 << i))
2536 3767 REG_WR16(qlt, REG_MBOX(i), mcp->to_fw[i]);
2537 3768 }
2538 3769 REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_HOST_TO_RISC_INTR));
2539 3770
2540 3771 qlt_mbox_wait_loop:;
2541 3772 /* Wait for mailbox command completion */
2542 3773 if (cv_timedwait(&qlt->mbox_cv, &qlt->mbox_lock, ddi_get_lbolt()
2543 3774 + drv_usectohz(MBOX_TIMEOUT)) < 0) {
2544 - (void) snprintf(info, sizeof (info),
2545 - "qlt_mailbox_command: qlt-%p, "
3775 + (void) snprintf(info, 80, "qlt_mailbox_command: qlt-%p, "
2546 3776 "cmd-0x%02X timed out", (void *)qlt, qlt->mcp->to_fw[0]);
3777 + info[79] = 0;
2547 3778 qlt->mcp = NULL;
2548 3779 qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
2549 3780 mutex_exit(&qlt->mbox_lock);
2550 3781
2551 3782 /*
2552 3783 * XXX Throw HBA fatal error event
2553 3784 */
2554 3785 (void) fct_port_shutdown(qlt->qlt_port, STMF_RFLAG_FATAL_ERROR |
2555 3786 STMF_RFLAG_RESET | STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2556 3787 return (QLT_MBOX_TIMEOUT);
2557 3788 }
2558 3789 if (qlt->mbox_io_state == MBOX_STATE_CMD_RUNNING)
2559 3790 goto qlt_mbox_wait_loop;
2560 3791
2561 3792 qlt->mcp = NULL;
2562 3793
2563 3794 /* Make sure its a completion */
|
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
2564 3795 if (qlt->mbox_io_state != MBOX_STATE_CMD_DONE) {
2565 3796 ASSERT(qlt->mbox_io_state == MBOX_STATE_UNKNOWN);
2566 3797 mutex_exit(&qlt->mbox_lock);
2567 3798 return (QLT_MBOX_ABORTED);
2568 3799 }
2569 3800
2570 3801 /* MBox command completed. Clear state, retuen based on mbox 0 */
2571 3802 /* Mailboxes are already loaded by interrupt routine */
2572 3803 qlt->mbox_io_state = MBOX_STATE_READY;
2573 3804 mutex_exit(&qlt->mbox_lock);
2574 - if (mcp->from_fw[0] != QLT_MBX_CMD_SUCCESS)
3805 + if (mcp->from_fw[0] != QLT_MBX_CMD_SUCCESS) {
3806 + EL(qlt, "fw[0] = %xh\n", mcp->from_fw[0]);
3807 + if ((mcp->from_fw[0] != 0x4005) &&
3808 + (mcp->from_fw[1] != 0x7)) {
3809 + (void) qlt_firmware_dump(qlt->qlt_port,
3810 + (stmf_state_change_info_t *)NULL);
3811 + }
2575 3812 return (QLT_MBOX_FAILED | mcp->from_fw[0]);
3813 + }
2576 3814
2577 3815 return (QLT_SUCCESS);
2578 3816 }
2579 3817
3818 +clock_t qlt_next_invalid_msg = 0;
3819 +int qlt_invalid_idx_cnt = 0;
3820 +
2580 3821 /*
2581 3822 * **SHOULD ONLY BE CALLED FROM INTERRUPT CONTEXT. DO NOT CALL ELSEWHERE**
2582 3823 */
2583 3824 /* ARGSUSED */
2584 3825 static uint_t
3826 +qlt_msix_resp_handler(caddr_t arg, caddr_t arg2)
3827 +{
3828 + qlt_state_t *qlt = (qlt_state_t *)arg;
3829 + uint32_t risc_status;
3830 + uint16_t qi = 0;
3831 + clock_t now;
3832 +
3833 + risc_status = REG_RD32(qlt, REG_RISC_STATUS);
3834 + if (qlt->qlt_mq_enabled) {
3835 + /* XXX: */
3836 + /* qi = (uint16_t)((unsigned long)arg2); */
3837 + qi = (uint16_t)(risc_status >> 16);
3838 + if (qi >= MQ_MAX_QUEUES) {
3839 + /*
3840 + * Two customers have reported panics in the call to
3841 + * mutex_enter below. Analysis showed the address passed
3842 + * in could only occur if 'qi' had a value of 0x4000.
3843 + * We'll ignore the upper bits and see if an index which
3844 + * at least within the range of possible produces some
3845 + * sane results.
3846 + */
3847 + now = ddi_get_lbolt();
3848 + if (now > qlt_next_invalid_msg) {
3849 + /*
3850 + * Since this issue has never been seen in the
3851 + * lab it's unknown if once this bit gets set
3852 + * does it remain until the next hardware reset?
3853 + * If so, we don't want to flood the message
3854 + * buffer or make it difficult to reboot the
3855 + * system.
3856 + */
3857 + qlt_next_invalid_msg = now +
3858 + drv_usectohz(MICROSEC * 10);
3859 + cmn_err(CE_NOTE, "QLT: hardware reporting "
3860 + "invalid index: 0x%x", qi);
3861 + }
3862 + qi &= MQ_MAX_QUEUES_MASK;
3863 + qlt_invalid_idx_cnt++;
3864 + }
3865 +
3866 + mutex_enter(&qlt->mq_resp[qi].mq_lock);
3867 + if (!qlt->qlt_intr_enabled) {
3868 + /*
3869 + * No further interrupt since intr disabled.
3870 + */
3871 + REG_WR32(qlt, REG_HCCR,
3872 + HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
3873 + mutex_exit(&qlt->mq_resp[qi].mq_lock);
3874 + return (DDI_INTR_UNCLAIMED);
3875 + }
3876 +
3877 + qlt->mq_resp[qi].mq_ndx_from_fw =
3878 + (uint16_t)MQBAR_RD32(qlt,
3879 + (qi * MQBAR_REG_OFFSET) + MQBAR_RESP_IN);
3880 +
3881 + qlt_handle_resp_queue_update(qlt, qi);
3882 + mutex_exit(&qlt->mq_resp[qi].mq_lock);
3883 + } else {
3884 + mutex_enter(&qlt->intr_lock);
3885 + if (!qlt->qlt_intr_enabled) {
3886 + /*
3887 + * No further interrupt since intr disabled.
3888 + */
3889 + REG_WR32(qlt, REG_HCCR,
3890 + HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
3891 + mutex_exit(&qlt->intr_lock);
3892 + return (DDI_INTR_UNCLAIMED);
3893 + }
3894 +
3895 + qlt->atio_ndx_from_fw =
3896 + (uint16_t)REG_RD32(qlt, REG_ATIO_IN_PTR);
3897 + qlt_handle_atio_queue_update(qlt);
3898 +
3899 + qlt->mq_resp[qi].mq_ndx_from_fw = risc_status >> 16;
3900 + qlt_handle_resp_queue_update(qlt, qi);
3901 + mutex_exit(&qlt->intr_lock);
3902 + }
3903 +
3904 + if (risc_status & BIT_15) {
3905 + REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
3906 + }
3907 + return (DDI_INTR_CLAIMED);
3908 +}
3909 +
3910 +
3911 +/*
3912 + * **SHOULD ONLY BE CALLED FROM INTERRUPT CONTEXT. DO NOT CALL ELSEWHERE**
3913 + */
3914 +/* ARGSUSED */
3915 +static uint_t
3916 +qlt_msix_default_handler(caddr_t arg, caddr_t arg2)
3917 +{
3918 + qlt_state_t *qlt = (qlt_state_t *)arg;
3919 + uint32_t risc_status, intr_type;
3920 + int i;
3921 + char info[80];
3922 +
3923 + risc_status = REG_RD32(qlt, REG_RISC_STATUS);
3924 + if (!mutex_tryenter(&qlt->intr_lock)) {
3925 + /*
3926 + * Normally we will always get this lock. If tryenter is
3927 + * failing then it means that driver is trying to do
3928 + * some cleanup and is masking the intr but some intr
3929 + * has sneaked in between. See if our device has generated
3930 + * this intr. If so then wait a bit and return claimed.
3931 + * If not then return claimed if this is the 1st instance
3932 + * of a interrupt after driver has grabbed the lock.
3933 + */
3934 + if ((risc_status & BIT_15) == 0) {
3935 + return (DDI_INTR_UNCLAIMED);
3936 + } else {
3937 + /* try again */
3938 + drv_usecwait(10);
3939 + if (!mutex_tryenter(&qlt->intr_lock)) {
3940 + /* really bad! */
3941 + return (DDI_INTR_CLAIMED);
3942 + }
3943 + }
3944 + }
3945 + if (((risc_status & BIT_15) == 0) ||
3946 + (qlt->qlt_intr_enabled == 0)) {
3947 + /*
3948 + * This might be a pure coincedence that we are operating
3949 + * in a interrupt disabled mode and another device
3950 + * sharing the interrupt line has generated an interrupt
3951 + * while an interrupt from our device might be pending. Just
3952 + * ignore it and let the code handling the interrupt
3953 + * disabled mode handle it.
3954 + */
3955 + mutex_exit(&qlt->intr_lock);
3956 + return (DDI_INTR_UNCLAIMED);
3957 + }
3958 +
3959 + /* REG_WR32(qlt, REG_INTR_CTRL, 0); */
3960 +
3961 + /* check for risc pause - unlikely */
3962 + if (risc_status & BIT_8) {
3963 + uint32_t hccsr;
3964 +
3965 + hccsr = REG_RD32(qlt, REG_HCCR);
3966 + EL(qlt, "Risc Pause status=%xh hccsr=%x\n",
3967 + risc_status, hccsr);
3968 + cmn_err(CE_WARN, "qlt(%d): Risc Pause %08x hccsr:%x",
3969 + qlt->instance, risc_status, hccsr);
3970 + (void) snprintf(info, 80, "Risc Pause %08x hccsr:%x",
3971 + risc_status, hccsr);
3972 + info[79] = 0;
3973 + (void) fct_port_shutdown(qlt->qlt_port,
3974 + STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
3975 + STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
3976 + }
3977 +
3978 + /* check most likely types first */
3979 + intr_type = risc_status & 0xff;
3980 + if (intr_type == 0x1D) {
3981 + qlt->atio_ndx_from_fw =
3982 + (uint16_t)REG_RD32(qlt, REG_ATIO_IN_PTR);
3983 + qlt_handle_atio_queue_update(qlt);
3984 + qlt->mq_resp[0].mq_ndx_from_fw = risc_status >> 16;
3985 + qlt_handle_resp_queue_update(qlt, 0);
3986 + REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
3987 + } else if (intr_type == 0x1C) {
3988 + qlt->atio_ndx_from_fw = (uint16_t)(risc_status >> 16);
3989 + qlt_handle_atio_queue_update(qlt);
3990 + REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
3991 + } else if (intr_type == 0x1E) {
3992 + /* 83xx */
3993 + qlt->atio_ndx_from_fw =
3994 + (uint16_t)MQBAR_RD32(qlt, MQBAR_ATIO_IN);
3995 + qlt_handle_atio_queue_update(qlt);
3996 + REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
3997 + } else if (intr_type == 0x13) {
3998 + uint16_t qi;
3999 +
4000 + qlt->atio_ndx_from_fw =
4001 + (uint16_t)REG_RD32(qlt, REG_ATIO_IN_PTR);
4002 + qlt_handle_atio_queue_update(qlt);
4003 +
4004 + if (qlt->qlt_mq_enabled) {
4005 + qi = (uint16_t)(risc_status >> 16);
4006 + qlt->mq_resp[qi].mq_ndx_from_fw =
4007 + (uint16_t)MQBAR_RD32(qlt,
4008 + (qi * MQBAR_REG_OFFSET) + MQBAR_RESP_IN);
4009 + /* FIX THIS to be optional */
4010 + REG_WR32(qlt, REG_HCCR,
4011 + HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4012 + } else {
4013 + qi = 0;
4014 + REG_WR32(qlt, REG_HCCR,
4015 + HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4016 + qlt->mq_resp[qi].mq_ndx_from_fw = risc_status >> 16;
4017 + }
4018 + qlt_handle_resp_queue_update(qlt, qi);
4019 +
4020 + } else if (intr_type == 0x14) {
4021 + uint16_t qi = (uint16_t)(risc_status >> 16);
4022 +
4023 + if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
4024 + qlt->atio_ndx_from_fw =
4025 + (uint16_t)MQBAR_RD32(qlt, MQBAR_ATIO_IN);
4026 + } else {
4027 + qlt->atio_ndx_from_fw = (uint16_t)
4028 + REG_RD32(qlt, REG_ATIO_IN_PTR);
4029 + }
4030 + qlt_handle_atio_queue_update(qlt);
4031 +
4032 + qlt->mq_resp[qi].mq_ndx_from_fw =
4033 + (uint16_t)MQBAR_RD32(qlt,
4034 + (qi * MQBAR_REG_OFFSET) + MQBAR_RESP_IN);
4035 + qlt_handle_resp_queue_update(qlt, qi);
4036 +
4037 + REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4038 +
4039 + } else if (intr_type == 0x12) {
4040 + uint16_t code, mbox1, mbox2, mbox3, mbox4, mbox5, mbox6;
4041 +
4042 + REG_WR32(qlt, REG_INTR_CTRL, 0);
4043 +
4044 + code = (uint16_t)(risc_status >> 16);
4045 + mbox1 = REG_RD16(qlt, REG_MBOX(1));
4046 + mbox2 = REG_RD16(qlt, REG_MBOX(2));
4047 + mbox3 = REG_RD16(qlt, REG_MBOX(3));
4048 + mbox4 = REG_RD16(qlt, REG_MBOX(4));
4049 + mbox5 = REG_RD16(qlt, REG_MBOX(5));
4050 + mbox6 = REG_RD16(qlt, REG_MBOX(6));
4051 +
4052 + REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4053 + EL(qlt, "Async event: %x mb1=%x mb2=%x,"
4054 + " mb3=%x, mb4=%x, mb5=%x, mb6=%x", code, mbox1, mbox2,
4055 + mbox3, mbox4, mbox5, mbox6);
4056 + stmf_trace(qlt->qlt_port_alias, "Async event: %x mb1=%x mb2=%x,"
4057 + " mb3=%x, mb4=%x, mb5=%x, mb6=%x", code, mbox1, mbox2,
4058 + mbox3, mbox4, mbox5, mbox6);
4059 + cmn_err(CE_NOTE, "!qlt(%d): Async event %x mb1=%x mb2=%x,"
4060 + " mb3=%x, mb4=%x, mb5=%x, mb6=%x", qlt->instance, code,
4061 + mbox1, mbox2, mbox3, mbox4, mbox5, mbox6);
4062 +
4063 + if ((code == 0x8030) || (code == 0x8010) || (code == 0x8013)) {
4064 + if (qlt->qlt_link_up) {
4065 + fct_handle_event(qlt->qlt_port,
4066 + FCT_EVENT_LINK_RESET, 0, 0);
4067 + }
4068 + } else if (code == 0x8012) {
4069 + qlt->qlt_link_up = 0;
4070 + fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_DOWN,
4071 + 0, 0);
4072 + } else if (code == 0x8014) {
4073 + if (mbox1 == 0xffff) { /* global event */
4074 + uint8_t reason_code;
4075 +
4076 + reason_code = (uint8_t)(mbox3 >> 8);
4077 +
4078 + switch (reason_code) {
4079 + case 0x1d: /* FIP Clear Virtual Link received */
4080 + case 0x1a: /* received FLOGO */
4081 + case 0x1c: /* FCF configuration changed */
4082 + case 0x1e: /* FKA timeout */
4083 + if (mbox2 == 7) {
4084 + qlt->qlt_link_up = 0;
4085 + fct_handle_event(qlt->qlt_port,
4086 + FCT_EVENT_LINK_DOWN, 0, 0);
4087 + }
4088 + break;
4089 + case 0x12:
4090 + if (mbox2 == 4) {
4091 + qlt->qlt_link_up = 1;
4092 + fct_handle_event(qlt->qlt_port,
4093 + FCT_EVENT_LINK_UP, 0, 0);
4094 + stmf_trace(qlt->qlt_port_alias,
4095 + "SNS login and SCR done");
4096 + }
4097 + break;
4098 + case 0:
4099 + if ((mbox2 == 6) &&
4100 + (!qlt->qlt_link_up)) {
4101 + qlt->qlt_link_up = 1;
4102 + fct_handle_event(qlt->qlt_port,
4103 + FCT_EVENT_LINK_UP, 0, 0);
4104 + stmf_trace(qlt->qlt_port_alias,
4105 + "Link reinitialised");
4106 + }
4107 + break;
4108 + default:
4109 + stmf_trace(qlt->qlt_port_alias,
4110 + "AEN ignored");
4111 + break;
4112 + }
4113 + }
4114 + } else if (code == 0x8011) {
4115 + switch (mbox1) {
4116 + case 0: qlt->link_speed = PORT_SPEED_1G;
4117 + break;
4118 + case 1: qlt->link_speed = PORT_SPEED_2G;
4119 + break;
4120 + case 3: qlt->link_speed = PORT_SPEED_4G;
4121 + break;
4122 + case 4: qlt->link_speed = PORT_SPEED_8G;
4123 + break;
4124 + case 5: qlt->link_speed = PORT_SPEED_16G;
4125 + break;
4126 + case 0x13: qlt->link_speed = PORT_SPEED_10G;
4127 + break;
4128 + default:
4129 + qlt->link_speed = PORT_SPEED_UNKNOWN;
4130 + }
4131 + qlt->qlt_link_up = 1;
4132 + fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_UP,
4133 + 0, 0);
4134 + } else if ((code == 0x8002) || (code == 0x8003) ||
4135 + (code == 0x8004) || (code == 0x8005)) {
4136 + (void) snprintf(info, 80,
4137 + "Got %04x, mb1=%x mb2=%x mb5=%x mb6=%x",
4138 + code, mbox1, mbox2, mbox5, mbox6);
4139 + info[79] = 0;
4140 + (void) fct_port_shutdown(qlt->qlt_port,
4141 + STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4142 + STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4143 + } else if (code == 0x800F) {
4144 + (void) snprintf(info, 80,
4145 + "Got 800F, mb1=%x mb2=%x mb3=%x",
4146 + mbox1, mbox2, mbox3);
4147 +
4148 + if (mbox1 != 1) {
4149 + /* issue "verify fw" */
4150 + qlt_verify_fw(qlt);
4151 + }
4152 + } else if (code == 0x8101) {
4153 + (void) snprintf(info, 80,
4154 + "IDC Req Rcvd:%04x, mb1=%x mb2=%x mb3=%x",
4155 + code, mbox1, mbox2, mbox3);
4156 + info[79] = 0;
4157 +
4158 + /* check if "ACK" is required (timeout != 0) */
4159 + if (mbox1 & 0x0f00) {
4160 + caddr_t req;
4161 +
4162 + /*
4163 + * Ack the request (queue work to do it?)
4164 + * using a mailbox iocb
4165 + * (Only Queue #0 allowed)
4166 + */
4167 + mutex_enter(&qlt->mq_req[0].mq_lock);
4168 + req = qlt_get_req_entries(qlt, 1, 0);
4169 + if (req) {
4170 + bzero(req, IOCB_SIZE);
4171 + req[0] = 0x39; req[1] = 1;
4172 + QMEM_WR16(qlt, req+8, 0x101);
4173 + QMEM_WR16(qlt, req+10, mbox1);
4174 + QMEM_WR16(qlt, req+12, mbox2);
4175 + QMEM_WR16(qlt, req+14, mbox3);
4176 + QMEM_WR16(qlt, req+16, mbox4);
4177 + QMEM_WR16(qlt, req+18, mbox5);
4178 + QMEM_WR16(qlt, req+20, mbox6);
4179 + qlt_submit_req_entries(qlt, 1, 0);
4180 + } else {
4181 + (void) snprintf(info, 80,
4182 + "IDC ACK failed");
4183 + info[79] = 0;
4184 + }
4185 + mutex_exit(&qlt->mq_req[0].mq_lock);
4186 + }
4187 + } else {
4188 + stmf_trace(qlt->qlt_port_alias,
4189 + "Async event: 0x%x ignored",
4190 + code);
4191 + }
4192 + REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
4193 + } else if ((intr_type == 0x10) || (intr_type == 0x11)) {
4194 + /* Handle mailbox completion */
4195 + mutex_enter(&qlt->mbox_lock);
4196 + if (qlt->mbox_io_state != MBOX_STATE_CMD_RUNNING) {
4197 + cmn_err(CE_WARN, "qlt(%d): mailbox completion received"
4198 + " when driver wasn't waiting for it %d",
4199 + qlt->instance, qlt->mbox_io_state);
4200 + } else {
4201 + for (i = 0; i < MAX_MBOXES; i++) {
4202 + if (qlt->mcp->from_fw_mask &
4203 + (((uint32_t)1) << i)) {
4204 + qlt->mcp->from_fw[i] =
4205 + REG_RD16(qlt, REG_MBOX(i));
4206 + }
4207 + }
4208 + qlt->mbox_io_state = MBOX_STATE_CMD_DONE;
4209 + }
4210 + REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4211 + cv_broadcast(&qlt->mbox_cv);
4212 + mutex_exit(&qlt->mbox_lock);
4213 + } else {
4214 + cmn_err(CE_WARN, "qlt(%d): Unknown intr type 0x%x",
4215 + qlt->instance, intr_type);
4216 + stmf_trace(qlt->qlt_port_alias,
4217 + "%s: Unknown intr type 0x%x [%x]",
4218 + __func__, intr_type, risc_status);
4219 + REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4220 + }
4221 +
4222 + /* REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR); */
4223 + mutex_exit(&qlt->intr_lock);
4224 +
4225 + return (DDI_INTR_CLAIMED);
4226 +}
4227 +
4228 +/*
4229 + * **SHOULD ONLY BE CALLED FROM INTERRUPT CONTEXT. DO NOT CALL ELSEWHERE**
4230 + */
4231 +/* ARGSUSED */
4232 +static uint_t
2585 4233 qlt_isr(caddr_t arg, caddr_t arg2)
2586 4234 {
2587 4235 qlt_state_t *qlt = (qlt_state_t *)arg;
2588 4236 uint32_t risc_status, intr_type;
2589 4237 int i;
2590 4238 int intr_loop_count;
2591 - char info[QLT_INFO_LEN];
4239 + char info[80];
2592 4240
2593 4241 risc_status = REG_RD32(qlt, REG_RISC_STATUS);
2594 4242 if (!mutex_tryenter(&qlt->intr_lock)) {
2595 4243 /*
2596 4244 * Normally we will always get this lock. If tryenter is
2597 4245 * failing then it means that driver is trying to do
2598 4246 * some cleanup and is masking the intr but some intr
2599 4247 * has sneaked in between. See if our device has generated
2600 4248 * this intr. If so then wait a bit and return claimed.
2601 4249 * If not then return claimed if this is the 1st instance
2602 4250 * of a interrupt after driver has grabbed the lock.
2603 4251 */
2604 4252 if (risc_status & BIT_15) {
2605 4253 drv_usecwait(10);
2606 4254 return (DDI_INTR_CLAIMED);
2607 4255 } else if (qlt->intr_sneak_counter) {
2608 4256 qlt->intr_sneak_counter--;
2609 4257 return (DDI_INTR_CLAIMED);
2610 4258 } else {
2611 4259 return (DDI_INTR_UNCLAIMED);
2612 4260 }
2613 4261 }
2614 4262 if (((risc_status & BIT_15) == 0) ||
2615 4263 (qlt->qlt_intr_enabled == 0)) {
2616 4264 /*
2617 4265 * This might be a pure coincedence that we are operating
2618 4266 * in a interrupt disabled mode and another device
|
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
2619 4267 * sharing the interrupt line has generated an interrupt
2620 4268 * while an interrupt from our device might be pending. Just
2621 4269 * ignore it and let the code handling the interrupt
2622 4270 * disabled mode handle it.
2623 4271 */
2624 4272 mutex_exit(&qlt->intr_lock);
2625 4273 return (DDI_INTR_UNCLAIMED);
2626 4274 }
2627 4275
2628 4276 /*
2629 - * XXX take care for MSI case. disable intrs
4277 + * XXX take care for MSI-X case. disable intrs
2630 4278 * Its gonna be complicated because of the max iterations.
2631 4279 * as hba will have posted the intr which did not go on PCI
2632 4280 * but we did not service it either because of max iterations.
2633 4281 * Maybe offload the intr on a different thread.
2634 4282 */
2635 4283 intr_loop_count = 0;
2636 4284
2637 4285 REG_WR32(qlt, REG_INTR_CTRL, 0);
2638 4286
2639 4287 intr_again:;
2640 4288
2641 4289 /* check for risc pause */
2642 4290 if (risc_status & BIT_8) {
2643 4291 EL(qlt, "Risc Pause status=%xh\n", risc_status);
2644 4292 cmn_err(CE_WARN, "qlt(%d): Risc Pause %08x",
2645 4293 qlt->instance, risc_status);
2646 - (void) snprintf(info, sizeof (info), "Risc Pause %08x",
2647 - risc_status);
4294 + (void) snprintf(info, 80, "Risc Pause %08x", risc_status);
4295 + info[79] = 0;
2648 4296 (void) fct_port_shutdown(qlt->qlt_port,
2649 4297 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
2650 4298 STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2651 4299 }
2652 4300
2653 4301 /* First check for high performance path */
2654 4302 intr_type = risc_status & 0xff;
2655 4303 if (intr_type == 0x1D) {
2656 - qlt->atio_ndx_from_fw = (uint16_t)
2657 - REG_RD32(qlt, REG_ATIO_IN_PTR);
2658 - REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2659 - qlt->resp_ndx_from_fw = risc_status >> 16;
4304 +
4305 + /* process the atio queue first */
4306 + qlt->atio_ndx_from_fw =
4307 + (uint16_t)REG_RD32(qlt, REG_ATIO_IN_PTR);
2660 4308 qlt_handle_atio_queue_update(qlt);
2661 - qlt_handle_resp_queue_update(qlt);
4309 +
4310 + /* process the response queue next */
4311 + qlt->mq_resp[0].mq_ndx_from_fw =
4312 + (uint16_t)REG_RD32(qlt, REG_RESP_IN_PTR);
4313 + qlt_handle_resp_queue_update(qlt, 0);
4314 + REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4315 +
2662 4316 } else if (intr_type == 0x1C) {
2663 4317 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2664 4318 qlt->atio_ndx_from_fw = (uint16_t)(risc_status >> 16);
2665 4319 qlt_handle_atio_queue_update(qlt);
4320 + } else if (intr_type == 0x1E) {
4321 + /* 83xx Atio Queue update */
4322 + qlt->atio_ndx_from_fw =
4323 + (uint16_t)MQBAR_RD32(qlt, MQBAR_ATIO_IN);
4324 + qlt_handle_atio_queue_update(qlt);
4325 + REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2666 4326 } else if (intr_type == 0x13) {
4327 + uint16_t qi;
4328 +
4329 + qlt->atio_ndx_from_fw =
4330 + (uint16_t)REG_RD32(qlt, REG_ATIO_IN_PTR);
4331 + qlt_handle_atio_queue_update(qlt);
4332 +
4333 + if (qlt->qlt_mq_enabled) {
4334 + qi = (uint16_t)(risc_status >> 16);
4335 + qlt->mq_resp[0].mq_ndx_from_fw =
4336 + (uint16_t)MQBAR_RD32(qlt,
4337 + (qi * MQBAR_REG_OFFSET) + MQBAR_RESP_IN);
4338 + /* FIX THIS to be optional */
4339 + REG_WR32(qlt, REG_HCCR,
4340 + HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4341 + } else {
4342 + qi = 0;
4343 + REG_WR32(qlt, REG_HCCR,
4344 + HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4345 + qlt->mq_resp[qi].mq_ndx_from_fw = risc_status >> 16;
4346 + REG_WR32(qlt, REG_HCCR,
4347 + HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
4348 + }
4349 + qlt_handle_resp_queue_update(qlt, qi);
4350 +
4351 + } else if (intr_type == 0x14) {
4352 + /* MQ */
4353 + uint16_t qi = 0;
4354 +
4355 + if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
4356 + qlt->atio_ndx_from_fw =
4357 + (uint16_t)MQBAR_RD32(qlt, MQBAR_ATIO_IN);
4358 + } else {
4359 + qi = (uint16_t)(risc_status >> 16);
4360 + qlt->atio_ndx_from_fw = (uint16_t)
4361 + REG_RD32(qlt, REG_ATIO_IN_PTR);
4362 + }
4363 + qlt_handle_atio_queue_update(qlt);
4364 +
4365 + qlt->mq_resp[qi].mq_ndx_from_fw =
4366 + (uint16_t)MQBAR_RD32(qlt,
4367 + (qi * MQBAR_REG_OFFSET) + MQBAR_RESP_IN);
4368 + qlt_handle_resp_queue_update(qlt, qi);
2667 4369 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2668 - qlt->resp_ndx_from_fw = risc_status >> 16;
2669 - qlt_handle_resp_queue_update(qlt);
4370 +
2670 4371 } else if (intr_type == 0x12) {
2671 4372 uint16_t code = (uint16_t)(risc_status >> 16);
2672 4373 uint16_t mbox1 = REG_RD16(qlt, REG_MBOX(1));
2673 4374 uint16_t mbox2 = REG_RD16(qlt, REG_MBOX(2));
2674 4375 uint16_t mbox3 = REG_RD16(qlt, REG_MBOX(3));
2675 4376 uint16_t mbox4 = REG_RD16(qlt, REG_MBOX(4));
2676 4377 uint16_t mbox5 = REG_RD16(qlt, REG_MBOX(5));
2677 4378 uint16_t mbox6 = REG_RD16(qlt, REG_MBOX(6));
2678 4379
2679 4380 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2680 - stmf_trace(qlt->qlt_port_alias, "Async event %x mb1=%x mb2=%x,"
2681 - " mb3=%x, mb5=%x, mb6=%x", code, mbox1, mbox2, mbox3,
4381 + EL(qlt, "Async event %x mb1=%x, mb2=%x, mb3=%x, mb4=%x, "
4382 + "mb5=%x, mb6=%x\n", code, mbox1, mbox2, mbox3, mbox4,
2682 4383 mbox5, mbox6);
2683 - EL(qlt, "Async event %x mb1=%x mb2=%x, mb3=%x, mb5=%x, mb6=%x",
2684 - code, mbox1, mbox2, mbox3, mbox5, mbox6);
4384 + stmf_trace(qlt->qlt_port_alias, "Async event %x mb1=%x mb2=%x,"
4385 + " mb3=%x, mb4=%x, mb5=%x, mb6=%x", code, mbox1, mbox2,
4386 + mbox3, mbox4, mbox5, mbox6);
4387 + cmn_err(CE_NOTE, "!qlt(%d): Async event %x mb1=%x mb2=%x,"
4388 + " mb3=%x, mb4=%x, mb5=%x, mb6=%x", qlt->instance, code,
4389 + mbox1, mbox2, mbox3, mbox4, mbox5, mbox6);
2685 4390
2686 4391 if ((code == 0x8030) || (code == 0x8010) || (code == 0x8013)) {
2687 4392 if (qlt->qlt_link_up) {
2688 4393 fct_handle_event(qlt->qlt_port,
2689 4394 FCT_EVENT_LINK_RESET, 0, 0);
2690 4395 }
2691 4396 } else if (code == 0x8012) {
2692 4397 qlt->qlt_link_up = 0;
2693 4398 fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_DOWN,
2694 4399 0, 0);
4400 + } else if (code == 0x8014) {
4401 + if (mbox1 == 0xffff) { /* global event */
4402 + uint8_t reason_code;
4403 +
4404 + reason_code = (uint8_t)(mbox3 >> 8);
4405 +
4406 + switch (reason_code) {
4407 + case 0x1d: /* FIP Clear Virtual Link received */
4408 + case 0x1a: /* received FLOGO */
4409 + case 0x1c: /* FCF configuration changed */
4410 + case 0x1e: /* FKA timeout */
4411 + if (mbox2 == 7) {
4412 + qlt->qlt_link_up = 0;
4413 + fct_handle_event(qlt->qlt_port,
4414 + FCT_EVENT_LINK_DOWN, 0, 0);
4415 + }
4416 + break;
4417 + case 0x12:
4418 + if (mbox2 == 4) {
4419 + qlt->qlt_link_up = 1;
4420 + fct_handle_event(qlt->qlt_port,
4421 + FCT_EVENT_LINK_UP, 0, 0);
4422 + stmf_trace(qlt->qlt_port_alias,
4423 + "SNS login and SCR done");
4424 + }
4425 + break;
4426 + case 0:
4427 + if ((mbox2 == 6) &&
4428 + (!qlt->qlt_link_up)) {
4429 + qlt->qlt_link_up = 1;
4430 + fct_handle_event(qlt->qlt_port,
4431 + FCT_EVENT_LINK_UP, 0, 0);
4432 + stmf_trace(qlt->qlt_port_alias,
4433 + "Link reinitialised");
4434 + }
4435 + break;
4436 + default:
4437 + stmf_trace(qlt->qlt_port_alias,
4438 + "AEN ignored");
4439 + break;
4440 + }
4441 + }
2695 4442 } else if (code == 0x8011) {
2696 4443 switch (mbox1) {
2697 4444 case 0: qlt->link_speed = PORT_SPEED_1G;
2698 4445 break;
2699 4446 case 1: qlt->link_speed = PORT_SPEED_2G;
2700 4447 break;
2701 4448 case 3: qlt->link_speed = PORT_SPEED_4G;
2702 4449 break;
2703 4450 case 4: qlt->link_speed = PORT_SPEED_8G;
2704 4451 break;
4452 + case 5: qlt->link_speed = PORT_SPEED_16G;
4453 + break;
4454 + case 6: qlt->link_speed = PORT_SPEED_32G;
4455 + break;
2705 4456 case 0x13: qlt->link_speed = PORT_SPEED_10G;
2706 4457 break;
2707 4458 default:
2708 4459 qlt->link_speed = PORT_SPEED_UNKNOWN;
2709 4460 }
2710 4461 qlt->qlt_link_up = 1;
2711 4462 fct_handle_event(qlt->qlt_port, FCT_EVENT_LINK_UP,
2712 4463 0, 0);
2713 4464 } else if ((code == 0x8002) || (code == 0x8003) ||
2714 4465 (code == 0x8004) || (code == 0x8005)) {
2715 - (void) snprintf(info, sizeof (info),
4466 + (void) snprintf(info, 80,
2716 4467 "Got %04x, mb1=%x mb2=%x mb5=%x mb6=%x",
2717 4468 code, mbox1, mbox2, mbox5, mbox6);
4469 + info[79] = 0;
2718 4470 (void) fct_port_shutdown(qlt->qlt_port,
2719 4471 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
2720 4472 STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
2721 4473 } else if (code == 0x800F) {
2722 - (void) snprintf(info, sizeof (info),
4474 + (void) snprintf(info, 80,
2723 4475 "Got 800F, mb1=%x mb2=%x mb3=%x",
2724 4476 mbox1, mbox2, mbox3);
2725 4477
2726 4478 if (mbox1 != 1) {
2727 4479 /* issue "verify fw" */
2728 4480 qlt_verify_fw(qlt);
2729 4481 }
2730 4482 } else if (code == 0x8101) {
2731 - (void) snprintf(info, sizeof (info),
4483 + (void) snprintf(info, 80,
2732 4484 "IDC Req Rcvd:%04x, mb1=%x mb2=%x mb3=%x",
2733 4485 code, mbox1, mbox2, mbox3);
4486 + info[79] = 0;
2734 4487
2735 4488 /* check if "ACK" is required (timeout != 0) */
2736 4489 if (mbox1 & 0x0f00) {
2737 4490 caddr_t req;
2738 4491
2739 4492 /*
2740 4493 * Ack the request (queue work to do it?)
2741 - * using a mailbox iocb
4494 + * using a mailbox iocb (only Queue 0 allowed)
2742 4495 */
2743 - mutex_enter(&qlt->req_lock);
2744 - req = qlt_get_req_entries(qlt, 1);
4496 + mutex_enter(&qlt->mq_req[0].mq_lock);
4497 + req = qlt_get_req_entries(qlt, 1, 0);
2745 4498 if (req) {
2746 4499 bzero(req, IOCB_SIZE);
2747 4500 req[0] = 0x39; req[1] = 1;
2748 4501 QMEM_WR16(qlt, req+8, 0x101);
2749 4502 QMEM_WR16(qlt, req+10, mbox1);
2750 4503 QMEM_WR16(qlt, req+12, mbox2);
2751 4504 QMEM_WR16(qlt, req+14, mbox3);
2752 4505 QMEM_WR16(qlt, req+16, mbox4);
2753 4506 QMEM_WR16(qlt, req+18, mbox5);
2754 4507 QMEM_WR16(qlt, req+20, mbox6);
2755 - qlt_submit_req_entries(qlt, 1);
4508 + qlt_submit_req_entries(qlt, 1, 0);
2756 4509 } else {
2757 - (void) snprintf(info, sizeof (info),
4510 + (void) snprintf(info, 80,
2758 4511 "IDC ACK failed");
4512 + info[79] = 0;
2759 4513 }
2760 - mutex_exit(&qlt->req_lock);
4514 + mutex_exit(&qlt->mq_req[0].mq_lock);
2761 4515 }
2762 4516 }
2763 4517 } else if ((intr_type == 0x10) || (intr_type == 0x11)) {
2764 4518 /* Handle mailbox completion */
2765 4519 mutex_enter(&qlt->mbox_lock);
2766 4520 if (qlt->mbox_io_state != MBOX_STATE_CMD_RUNNING) {
2767 4521 cmn_err(CE_WARN, "qlt(%d): mailbox completion received"
2768 4522 " when driver wasn't waiting for it %d",
2769 4523 qlt->instance, qlt->mbox_io_state);
2770 4524 } else {
2771 4525 for (i = 0; i < MAX_MBOXES; i++) {
2772 4526 if (qlt->mcp->from_fw_mask &
2773 4527 (((uint32_t)1) << i)) {
2774 4528 qlt->mcp->from_fw[i] =
2775 4529 REG_RD16(qlt, REG_MBOX(i));
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
2776 4530 }
2777 4531 }
2778 4532 qlt->mbox_io_state = MBOX_STATE_CMD_DONE;
2779 4533 }
2780 4534 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2781 4535 cv_broadcast(&qlt->mbox_cv);
2782 4536 mutex_exit(&qlt->mbox_lock);
2783 4537 } else {
2784 4538 cmn_err(CE_WARN, "qlt(%d): Unknown intr type 0x%x",
2785 4539 qlt->instance, intr_type);
4540 + stmf_trace(qlt->qlt_port_alias,
4541 + "%s: Unknown intr type 0x%x [%x]",
4542 + __func__, intr_type, risc_status);
2786 4543 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
2787 4544 }
2788 4545
2789 - (void) REG_RD32(qlt, REG_HCCR); /* PCI Posting */
2790 - risc_status = REG_RD32(qlt, REG_RISC_STATUS);
2791 - if ((risc_status & BIT_15) &&
2792 - (++intr_loop_count < QLT_MAX_ITERATIONS_PER_INTR)) {
2793 - goto intr_again;
4546 + (void) REG_RD32(qlt, REG_HCCR); /* PCI Posting */
4547 +
4548 + if ((qlt->intr_flags & QLT_INTR_MSIX) == 0) {
4549 + risc_status = REG_RD32(qlt, REG_RISC_STATUS);
4550 + if ((risc_status & BIT_15) &&
4551 + (++intr_loop_count < QLT_MAX_ITERATIONS_PER_INTR)) {
4552 + goto intr_again;
4553 + }
4554 + REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
4555 + mutex_exit(&qlt->intr_lock);
4556 + } else {
4557 + mutex_exit(&qlt->intr_lock);
4558 + REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
2794 4559 }
2795 4560
2796 - REG_WR32(qlt, REG_INTR_CTRL, ENABLE_RISC_INTR);
2797 -
2798 - mutex_exit(&qlt->intr_lock);
2799 4561 return (DDI_INTR_CLAIMED);
2800 4562 }
2801 4563
2802 4564 /* **************** NVRAM Functions ********************** */
2803 4565
2804 4566 fct_status_t
2805 4567 qlt_read_flash_word(qlt_state_t *qlt, uint32_t faddr, uint32_t *bp)
2806 4568 {
2807 4569 uint32_t timer;
2808 4570
2809 4571 /* Clear access error flag */
2810 4572 REG_WR32(qlt, REG_CTRL_STATUS,
2811 4573 REG_RD32(qlt, REG_CTRL_STATUS) | FLASH_ERROR);
2812 4574
2813 4575 REG_WR32(qlt, REG_FLASH_ADDR, faddr & ~BIT_31);
2814 4576
2815 4577 /* Wait for READ cycle to complete. */
2816 4578 for (timer = 3000; timer; timer--) {
2817 4579 if (REG_RD32(qlt, REG_FLASH_ADDR) & BIT_31) {
2818 4580 break;
2819 4581 }
2820 4582 drv_usecwait(10);
2821 4583 }
2822 4584 if (timer == 0) {
2823 4585 EL(qlt, "flash timeout\n");
2824 4586 return (QLT_FLASH_TIMEOUT);
2825 4587 } else if (REG_RD32(qlt, REG_CTRL_STATUS) & FLASH_ERROR) {
2826 4588 EL(qlt, "flash access error\n");
2827 4589 return (QLT_FLASH_ACCESS_ERROR);
|
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
2828 4590 }
2829 4591
2830 4592 *bp = REG_RD32(qlt, REG_FLASH_DATA);
2831 4593
2832 4594 return (QLT_SUCCESS);
2833 4595 }
2834 4596
2835 4597 fct_status_t
2836 4598 qlt_read_nvram(qlt_state_t *qlt)
2837 4599 {
2838 - uint32_t index, addr, chksum;
2839 - uint32_t val, *ptr;
2840 - fct_status_t ret;
2841 - qlt_nvram_t *nv;
2842 - uint64_t empty_node_name = 0;
4600 + uint32_t index, addr, chksum;
4601 + uint32_t val, *ptr;
4602 + fct_status_t ret;
4603 + qlt_nvram_t *nv;
4604 + uint64_t empty_node_name = 0;
2843 4605
2844 - if (qlt->qlt_81xx_chip) {
4606 + if (qlt->qlt_27xx_chip) {
4607 + int func;
4608 +
4609 + func = ((REG_RD32(qlt, REG_CTRL_STATUS) & 0x0000f000) >> 12);
4610 + switch (func) {
4611 + case 0: addr = QLT27_NVRAM_FUNC0_ADDR; break;
4612 + case 1: addr = QLT27_NVRAM_FUNC1_ADDR; break;
4613 + case 2: addr = QLT27_NVRAM_FUNC2_ADDR; break;
4614 + case 3: addr = QLT27_NVRAM_FUNC3_ADDR; break;
4615 + }
4616 + } else if (qlt->qlt_83xx_chip) {
4617 + if (qlt->qlt_fcoe_enabled) {
4618 + addr = REG_RD32(qlt, REG_CTRL_STATUS) & BIT_12 ?
4619 + QLT83FCOE_NVRAM_FUNC1_ADDR :
4620 + QLT83FCOE_NVRAM_FUNC0_ADDR;
4621 + } else {
4622 + addr = REG_RD32(qlt, REG_CTRL_STATUS) & BIT_12 ?
4623 + QLT83FC_NVRAM_FUNC1_ADDR :
4624 + QLT83FC_NVRAM_FUNC0_ADDR;
4625 + }
4626 + } else if (qlt->qlt_81xx_chip) {
2845 4627 addr = REG_RD32(qlt, REG_CTRL_STATUS) & BIT_12 ?
2846 4628 QLT81_NVRAM_FUNC1_ADDR : QLT81_NVRAM_FUNC0_ADDR;
2847 4629 } else if (qlt->qlt_25xx_chip) {
2848 4630 addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
2849 4631 QLT25_NVRAM_FUNC1_ADDR : QLT25_NVRAM_FUNC0_ADDR;
2850 4632 } else {
2851 4633 addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
2852 4634 NVRAM_FUNC1_ADDR : NVRAM_FUNC0_ADDR;
2853 4635 }
2854 4636 mutex_enter(&qlt_global_lock);
2855 4637
2856 4638 /* Pause RISC. */
2857 4639 REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_RISC_PAUSE));
2858 4640 (void) REG_RD32(qlt, REG_HCCR); /* PCI Posting. */
2859 4641
2860 4642 /* Get NVRAM data and calculate checksum. */
2861 4643 ptr = (uint32_t *)qlt->nvram;
2862 4644 chksum = 0;
2863 4645 for (index = 0; index < sizeof (qlt_nvram_t) / 4; index++) {
2864 4646 ret = qlt_read_flash_word(qlt, addr++, &val);
2865 4647 if (ret != QLT_SUCCESS) {
2866 4648 EL(qlt, "qlt_read_flash_word, status=%llxh\n", ret);
2867 4649 mutex_exit(&qlt_global_lock);
2868 4650 return (ret);
2869 4651 }
2870 4652 chksum += val;
2871 4653 *ptr = LE_32(val);
2872 4654 ptr++;
2873 4655 }
2874 4656
2875 4657 /* Release RISC Pause */
2876 4658 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_PAUSE));
2877 4659 (void) REG_RD32(qlt, REG_HCCR); /* PCI Posting. */
2878 4660
2879 4661 mutex_exit(&qlt_global_lock);
2880 4662
2881 4663 /* Sanity check NVRAM Data */
2882 4664 nv = qlt->nvram;
2883 4665 if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
2884 4666 nv->id[2] != 'P' || nv->id[3] != ' ' ||
2885 4667 (nv->nvram_version[0] | nv->nvram_version[1]) == 0) {
2886 4668 EL(qlt, "chksum=%xh, id=%c%c%c%c, ver=%02d%02d\n", chksum,
2887 4669 nv->id[0], nv->id[1], nv->id[2], nv->id[3],
2888 4670 nv->nvram_version[1], nv->nvram_version[0]);
2889 4671 return (QLT_BAD_NVRAM_DATA);
2890 4672 }
2891 4673
|
↓ open down ↓ |
37 lines elided |
↑ open up ↑ |
2892 4674 /* If node name is zero, hand craft it from port name */
2893 4675 if (bcmp(nv->node_name, &empty_node_name, 8) == 0) {
2894 4676 bcopy(nv->port_name, nv->node_name, 8);
2895 4677 nv->node_name[0] = (uint8_t)(nv->node_name[0] & ~BIT_0);
2896 4678 nv->port_name[0] = (uint8_t)(nv->node_name[0] | BIT_0);
2897 4679 }
2898 4680
2899 4681 return (QLT_SUCCESS);
2900 4682 }
2901 4683
4684 +fct_status_t
4685 +qlt_read_vpd(qlt_state_t *qlt)
4686 +{
4687 + uint32_t index, addr, chksum;
4688 + uint32_t val, *ptr;
4689 + fct_status_t ret;
4690 +
4691 + if (qlt->qlt_27xx_chip) {
4692 + int func;
4693 +
4694 + func = ((REG_RD32(qlt, REG_CTRL_STATUS) & 0x0000f000) >> 12);
4695 + switch (func) {
4696 + case 0: addr = QLT27_VPD_FUNC0_ADDR; break;
4697 + case 1: addr = QLT27_VPD_FUNC1_ADDR; break;
4698 + case 2: addr = QLT27_VPD_FUNC2_ADDR; break;
4699 + case 3: addr = QLT27_VPD_FUNC3_ADDR; break;
4700 + }
4701 + } else if (qlt->qlt_83xx_chip) {
4702 + if (qlt->qlt_fcoe_enabled) {
4703 + addr = REG_RD32(qlt, REG_CTRL_STATUS) & BIT_12 ?
4704 + QLT83FCOE_VPD_FUNC1_ADDR :
4705 + QLT83FCOE_VPD_FUNC0_ADDR;
4706 + } else {
4707 + addr = REG_RD32(qlt, REG_CTRL_STATUS) & BIT_12 ?
4708 + QLT83FC_VPD_FUNC1_ADDR :
4709 + QLT83FC_VPD_FUNC0_ADDR;
4710 + }
4711 + } else if (qlt->qlt_81xx_chip) {
4712 + addr = REG_RD32(qlt, REG_CTRL_STATUS) & BIT_12 ?
4713 + QLT81_VPD_FUNC1_ADDR : QLT81_VPD_FUNC0_ADDR;
4714 + } else if (qlt->qlt_25xx_chip) {
4715 + addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
4716 + QLT25_VPD_FUNC1_ADDR : QLT25_VPD_FUNC0_ADDR;
4717 + } else {
4718 + addr = REG_RD32(qlt, REG_CTRL_STATUS) & FUNCTION_NUMBER ?
4719 + QLT24_VPD_FUNC1_ADDR : QLT24_VPD_FUNC0_ADDR;
4720 + }
4721 + mutex_enter(&qlt_global_lock);
4722 +
4723 + /* Pause RISC. */
4724 + REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_RISC_PAUSE));
4725 + (void) REG_RD32(qlt, REG_HCCR); /* PCI Posting. */
4726 +
4727 + /* Get VPD data and calculate checksum. */
4728 + ptr = (uint32_t *)qlt->vpd;
4729 + chksum = 0;
4730 + for (index = 0; index < QL_24XX_VPD_SIZE / 4; index++) {
4731 + ret = qlt_read_flash_word(qlt, addr++, &val);
4732 + if (ret != QLT_SUCCESS) {
4733 + EL(qlt, "qlt_read_flash_word, status=%llxh\n", ret);
4734 + mutex_exit(&qlt_global_lock);
4735 + return (ret);
4736 + }
4737 + chksum += val;
4738 + *ptr = LE_32(val);
4739 + ptr++;
4740 + }
4741 +
4742 + /* Release RISC Pause */
4743 + REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_PAUSE));
4744 + (void) REG_RD32(qlt, REG_HCCR); /* PCI Posting. */
4745 +
4746 + mutex_exit(&qlt_global_lock);
4747 +
4748 + return (QLT_SUCCESS);
4749 +}
4750 +
4751 +fct_status_t
4752 +qlt_read_bfe(qlt_state_t *qlt, uint32_t in_addr, uint32_t *out_addr, uint32_t i)
4753 +{
4754 + uint32_t index;
4755 + uint32_t chksum;
4756 + fct_status_t ret = QLT_SUCCESS;
4757 + uint32_t val;
4758 + uint16_t dataoffset;
4759 + uint32_t *ptr;
4760 + uint32_t addr, addr0;
4761 + uint16_t length;
4762 +
4763 + val = chksum = 0;
4764 + ptr = (uint32_t *)&qlt->rimage[i].header;
4765 +
4766 + addr = in_addr;
4767 + addr0 = addr;
4768 +
4769 + /* read rom header first */
4770 + for (index = 0; index < sizeof (qlt_rom_header_t)/4;
4771 + index ++) {
4772 + ret = qlt_read_flash_word(qlt, addr++, &val);
4773 + if (ret != QLT_SUCCESS) {
4774 + EL(qlt, "read flash, status=%llxh\n", ret);
4775 + return (ret);
4776 + }
4777 + chksum += val;
4778 + *ptr = LE_32(val);
4779 + ptr++;
4780 + }
4781 +
4782 + /* check the signature */
4783 + if (qlt->rimage[i].header.signature[0] != PCI_HEADER0) {
4784 + EL(qlt, "hdr[%d] sig[1] [0] (%xh) (%xh) is wrong.\n",
4785 + i, qlt->rimage[i].header.signature[1],
4786 + qlt->rimage[i].header.signature[0]);
4787 + return (QLT_SUCCESS);
4788 + }
4789 +
4790 + if ((qlt->rimage[i].header.signature[0] == PCI_HEADER0) &&
4791 + (qlt->rimage[i].header.signature[1] == PCI_HEADER1)) {
4792 + /* get dataoffset */
4793 + dataoffset = (qlt->rimage[i].header.dataoffset[1] |
4794 + qlt->rimage[i].header.dataoffset[0]);
4795 + EL(qlt, "dataoffset[0] = %xh\n", dataoffset);
4796 +
4797 + ptr = (uint32_t *)&qlt->rimage[i].data;
4798 +
4799 + /* adjust addr */
4800 + addr = addr0 + (dataoffset/4);
4801 + for (index = 0; index < sizeof (qlt_rom_data_t)/4;
4802 + index ++) {
4803 + ret = qlt_read_flash_word(qlt, addr++, &val);
4804 + if (ret != QLT_SUCCESS) {
4805 + EL(qlt, "read flash, status=%llxh\n", ret);
4806 + return (ret);
4807 + }
4808 + chksum += val;
4809 + *ptr = LE_32(val);
4810 + ptr++;
4811 + }
4812 +
4813 + /* check signature */
4814 + if ((qlt->rimage[i].data.signature[0] != 0x50) &&
4815 + (qlt->rimage[i].data.signature[1] != 0x43) &&
4816 + (qlt->rimage[i].data.signature[2] != 0x49) &&
4817 + (qlt->rimage[i].data.signature[3] != 0x52)) {
4818 + EL(qlt,
4819 + "data sig[3] [2] [1] [0] (%xh)(%xh)(%xh)(%xh)\n",
4820 + qlt->rimage[i].data.signature[3],
4821 + qlt->rimage[i].data.signature[2],
4822 + qlt->rimage[i].data.signature[1],
4823 + qlt->rimage[i].data.signature[0]);
4824 + return (QLT_SUCCESS);
4825 + }
4826 +
4827 + EL(qlt, "codetype (%xh) revisionlevel[1][0] (%xh)(%xh)\n",
4828 + qlt->rimage[i].data.codetype,
4829 + qlt->rimage[i].data.revisionlevel[1],
4830 + qlt->rimage[i].data.revisionlevel[0]);
4831 +
4832 + /* check if this is the last image */
4833 + if (qlt->rimage[i].data.indicator == PCI_IND_LAST_IMAGE) {
4834 + EL(qlt, "last image (%xh)\n",
4835 + qlt->rimage[i].data.indicator);
4836 + return (QLT_SUCCESS);
4837 +
4838 + }
4839 +
4840 + /* Get the image length and adjust the addr according */
4841 + length = (qlt->rimage[i].data.imagelength[1] |
4842 + qlt->rimage[i].data.imagelength[0]);
4843 +
4844 + EL(qlt, "image[%d] length[1][0] (%xh) (%xh) in sectors\n",
4845 + i, length);
4846 +
4847 + /* the starting addr of the next image */
4848 + addr = addr0 + ((length * 512)/4);
4849 + *out_addr = addr;
4850 + }
4851 +
4852 + return (QLT_SUCCESS);
4853 +}
4854 +
4855 +fct_status_t
4856 +qlt_read_rom_image(qlt_state_t *qlt)
4857 +{
4858 + uint32_t addr;
4859 + uint32_t out_addr = 0;
4860 + uint32_t count = 0;
4861 + boolean_t last_image = FALSE;
4862 + fct_status_t ret;
4863 +
4864 + if (qlt->qlt_27xx_chip) {
4865 + addr = FLASH_2700_DATA_ADDR + FLASH_2700_BOOT_CODE_ADDR;
4866 + } else if (qlt->qlt_83xx_chip) {
4867 + addr = FLASH_8300_DATA_ADDR + FLASH_8300_BOOT_CODE_ADDR;
4868 + } else if (qlt->qlt_81xx_chip) {
4869 + addr = FLASH_8100_DATA_ADDR + FLASH_8100_BOOT_CODE_ADDR;
4870 + } else if (qlt->qlt_25xx_chip) {
4871 + addr = FLASH_2500_DATA_ADDR + FLASH_2500_BOOT_CODE_ADDR;
4872 + } else {
4873 + addr = FLASH_2400_DATA_ADDR + FLASH_2400_BOOT_CODE_ADDR;
4874 + }
4875 + mutex_enter(&qlt_global_lock);
4876 +
4877 + /* Pause RISC. */
4878 + REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_RISC_PAUSE));
4879 + (void) REG_RD32(qlt, REG_HCCR); /* PCI Posting. */
4880 +
4881 + do {
4882 + ret = qlt_read_bfe(qlt, addr, &out_addr, count);
4883 + if (ret != QLT_SUCCESS) {
4884 + EL(qlt, "qlt_read_bfe, status=%llxh\n", ret);
4885 + break;
4886 + }
4887 + if (qlt->rimage[count].data.indicator ==
4888 + PCI_IND_LAST_IMAGE) {
4889 + last_image = TRUE;
4890 + } else {
4891 + addr = out_addr;
4892 + }
4893 + count ++;
4894 + } while ((last_image != TRUE) && (count < 6));
4895 +
4896 + /* Release RISC Pause */
4897 + REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_PAUSE));
4898 + (void) REG_RD32(qlt, REG_HCCR); /* PCI Posting. */
4899 +
4900 + mutex_exit(&qlt_global_lock);
4901 +
4902 + return (QLT_SUCCESS);
4903 +}
4904 +
2902 4905 uint32_t
2903 4906 qlt_sync_atio_queue(qlt_state_t *qlt)
2904 4907 {
2905 4908 uint32_t total_ent;
2906 4909
2907 4910 if (qlt->atio_ndx_from_fw > qlt->atio_ndx_to_fw) {
2908 4911 total_ent = qlt->atio_ndx_from_fw - qlt->atio_ndx_to_fw;
2909 4912 (void) ddi_dma_sync(qlt->queue_mem_dma_handle, ATIO_QUEUE_OFFSET
2910 4913 + (qlt->atio_ndx_to_fw << 6), total_ent << 6,
2911 4914 DDI_DMA_SYNC_FORCPU);
2912 4915 } else {
2913 4916 total_ent = ATIO_QUEUE_ENTRIES - qlt->atio_ndx_to_fw +
2914 4917 qlt->atio_ndx_from_fw;
2915 4918 (void) ddi_dma_sync(qlt->queue_mem_dma_handle, ATIO_QUEUE_OFFSET
2916 4919 + (qlt->atio_ndx_to_fw << 6), (uint_t)(ATIO_QUEUE_ENTRIES -
2917 4920 qlt->atio_ndx_to_fw) << 6, DDI_DMA_SYNC_FORCPU);
2918 4921 (void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2919 4922 ATIO_QUEUE_OFFSET, (uint_t)(qlt->atio_ndx_from_fw << 6),
2920 4923 DDI_DMA_SYNC_FORCPU);
2921 4924 }
2922 4925 return (total_ent);
2923 4926 }
2924 4927
2925 4928 void
2926 4929 qlt_handle_atio_queue_update(qlt_state_t *qlt)
2927 4930 {
2928 4931 uint32_t total_ent;
2929 4932
2930 4933 if (qlt->atio_ndx_to_fw == qlt->atio_ndx_from_fw)
2931 4934 return;
2932 4935
2933 4936 total_ent = qlt_sync_atio_queue(qlt);
2934 4937
|
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
2935 4938 do {
2936 4939 uint8_t *atio = (uint8_t *)&qlt->atio_ptr[
2937 4940 qlt->atio_ndx_to_fw << 6];
2938 4941 uint32_t ent_cnt;
2939 4942
2940 4943 ent_cnt = (uint32_t)(atio[1]);
2941 4944 if (ent_cnt > total_ent) {
2942 4945 break;
2943 4946 }
2944 4947 switch ((uint8_t)(atio[0])) {
4948 + case 0x06: /* ATIO, make performance case the 1st test */
4949 + qlt_handle_atio(qlt, atio);
4950 + break;
2945 4951 case 0x0d: /* INOT */
2946 4952 qlt_handle_inot(qlt, atio);
2947 4953 break;
2948 - case 0x06: /* ATIO */
2949 - qlt_handle_atio(qlt, atio);
2950 - break;
2951 4954 default:
2952 4955 EL(qlt, "atio_queue_update atio[0]=%xh\n", atio[0]);
2953 4956 cmn_err(CE_WARN, "qlt_handle_atio_queue_update: "
2954 4957 "atio[0] is %x, qlt-%p", atio[0], (void *)qlt);
2955 4958 break;
2956 4959 }
2957 4960 qlt->atio_ndx_to_fw = (uint16_t)(
2958 4961 (qlt->atio_ndx_to_fw + ent_cnt) & (ATIO_QUEUE_ENTRIES - 1));
2959 4962 total_ent -= ent_cnt;
2960 4963 } while (total_ent > 0);
2961 - REG_WR32(qlt, REG_ATIO_OUT_PTR, qlt->atio_ndx_to_fw);
4964 +
4965 + if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
4966 + MQBAR_WR32(qlt, MQBAR_ATIO_OUT, qlt->atio_ndx_to_fw);
4967 + } else {
4968 + REG_WR32(qlt, REG_ATIO_OUT_PTR, qlt->atio_ndx_to_fw);
4969 + }
2962 4970 }
2963 4971
2964 4972 uint32_t
2965 -qlt_sync_resp_queue(qlt_state_t *qlt)
4973 +qlt_sync_resp_queue(qlt_state_t *qlt, uint16_t qi)
2966 4974 {
2967 4975 uint32_t total_ent;
2968 4976
2969 - if (qlt->resp_ndx_from_fw > qlt->resp_ndx_to_fw) {
2970 - total_ent = qlt->resp_ndx_from_fw - qlt->resp_ndx_to_fw;
2971 - (void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2972 - RESPONSE_QUEUE_OFFSET
2973 - + (qlt->resp_ndx_to_fw << 6), total_ent << 6,
2974 - DDI_DMA_SYNC_FORCPU);
4977 + if (qlt->mq_resp[qi].mq_ndx_from_fw > qlt->mq_resp[qi].mq_ndx_to_fw) {
4978 + total_ent = qlt->mq_resp[qi].mq_ndx_from_fw -
4979 + qlt->mq_resp[qi].mq_ndx_to_fw;
4980 + if (qi) {
4981 + (void) ddi_dma_sync(
4982 + qlt->mq_resp[qi].queue_mem_mq_dma_handle,
4983 + (qlt->mq_resp[qi].mq_ndx_to_fw << 6),
4984 + total_ent << 6,
4985 + DDI_DMA_SYNC_FORCPU);
4986 + } else {
4987 + (void) ddi_dma_sync(
4988 + qlt->queue_mem_dma_handle,
4989 + RESPONSE_QUEUE_OFFSET +
4990 + (qlt->mq_resp[qi].mq_ndx_to_fw << 6),
4991 + total_ent << 6,
4992 + DDI_DMA_SYNC_FORCPU);
4993 + }
2975 4994 } else {
2976 - total_ent = RESPONSE_QUEUE_ENTRIES - qlt->resp_ndx_to_fw +
2977 - qlt->resp_ndx_from_fw;
2978 - (void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2979 - RESPONSE_QUEUE_OFFSET
2980 - + (qlt->resp_ndx_to_fw << 6), (RESPONSE_QUEUE_ENTRIES -
2981 - qlt->resp_ndx_to_fw) << 6, DDI_DMA_SYNC_FORCPU);
2982 - (void) ddi_dma_sync(qlt->queue_mem_dma_handle,
2983 - RESPONSE_QUEUE_OFFSET,
2984 - qlt->resp_ndx_from_fw << 6, DDI_DMA_SYNC_FORCPU);
4995 + total_ent =
4996 + (qi ? RESPONSE_QUEUE_MQ_ENTRIES : RESPONSE_QUEUE_ENTRIES) -
4997 + qlt->mq_resp[qi].mq_ndx_to_fw +
4998 + qlt->mq_resp[qi].mq_ndx_from_fw;
4999 +
5000 + if (qi) {
5001 +
5002 + (void) ddi_dma_sync(
5003 + qlt->mq_resp[qi].queue_mem_mq_dma_handle,
5004 + qlt->mq_resp[qi].mq_ndx_to_fw << 6,
5005 + (RESPONSE_QUEUE_MQ_ENTRIES -
5006 + qlt->mq_resp[qi].mq_ndx_to_fw) << 6,
5007 + DDI_DMA_SYNC_FORCPU);
5008 + (void) ddi_dma_sync(
5009 + qlt->mq_resp[qi].queue_mem_mq_dma_handle, 0,
5010 + qlt->mq_resp[qi].mq_ndx_from_fw << 6,
5011 + DDI_DMA_SYNC_FORCPU);
5012 + } else {
5013 + (void) ddi_dma_sync(qlt->queue_mem_dma_handle,
5014 + RESPONSE_QUEUE_OFFSET +
5015 + (qlt->mq_resp[qi].mq_ndx_to_fw << 6),
5016 + (RESPONSE_QUEUE_ENTRIES -
5017 + qlt->mq_resp[qi].mq_ndx_to_fw) << 6,
5018 + DDI_DMA_SYNC_FORCPU);
5019 + (void) ddi_dma_sync(qlt->queue_mem_dma_handle,
5020 + RESPONSE_QUEUE_OFFSET,
5021 + qlt->mq_resp[qi].mq_ndx_from_fw << 6,
5022 + DDI_DMA_SYNC_FORCPU);
5023 + }
2985 5024 }
5025 +
2986 5026 return (total_ent);
2987 5027 }
2988 5028
2989 5029 void
2990 -qlt_handle_resp_queue_update(qlt_state_t *qlt)
5030 +qlt_handle_resp_queue_update(qlt_state_t *qlt, uint16_t qi)
2991 5031 {
2992 5032 uint32_t total_ent;
2993 5033 uint8_t c;
2994 5034
2995 - if (qlt->resp_ndx_to_fw == qlt->resp_ndx_from_fw)
5035 + if (qlt->mq_resp[qi].mq_ndx_to_fw == qlt->mq_resp[qi].mq_ndx_from_fw)
2996 5036 return;
2997 5037
2998 - total_ent = qlt_sync_resp_queue(qlt);
5038 + total_ent = qlt_sync_resp_queue(qlt, qi);
2999 5039
3000 5040 do {
3001 - caddr_t resp = &qlt->resp_ptr[qlt->resp_ndx_to_fw << 6];
5041 + uint32_t qe = qlt->mq_resp[qi].mq_ndx_to_fw;
5042 + caddr_t resp = &qlt->mq_resp[qi].mq_ptr[qe << 6];
5043 +
3002 5044 uint32_t ent_cnt;
3003 5045
3004 5046 ent_cnt = (uint32_t)(resp[0] == 0x51 ? resp[1] : 1);
3005 5047 if (ent_cnt > total_ent) {
3006 5048 break;
3007 5049 }
3008 5050 switch ((uint8_t)(resp[0])) {
3009 5051 case 0x12: /* CTIO completion */
3010 - qlt_handle_ctio_completion(qlt, (uint8_t *)resp);
5052 + qlt_handle_ctio_completion(qlt, (uint8_t *)resp, qi);
3011 5053 break;
3012 5054 case 0x0e: /* NACK */
3013 5055 /* Do Nothing */
3014 5056 break;
3015 5057 case 0x1b: /* Verify FW */
3016 5058 qlt_handle_verify_fw_completion(qlt, (uint8_t *)resp);
3017 5059 break;
3018 5060 case 0x29: /* CT PassThrough */
3019 5061 qlt_handle_ct_completion(qlt, (uint8_t *)resp);
3020 5062 break;
5063 + case 0x32: /* Report ID */
5064 + EL(qlt, "report Id received [type %xh]\n", resp[0]);
5065 + break;
3021 5066 case 0x33: /* Abort IO IOCB completion */
3022 5067 qlt_handle_sol_abort_completion(qlt, (uint8_t *)resp);
3023 5068 break;
3024 5069 case 0x51: /* PUREX */
3025 5070 qlt_handle_purex(qlt, (uint8_t *)resp);
3026 5071 break;
3027 5072 case 0x52:
3028 5073 qlt_handle_dereg_completion(qlt, (uint8_t *)resp);
3029 5074 break;
3030 5075 case 0x53: /* ELS passthrough */
3031 5076 c = (uint8_t)(((uint8_t)resp[0x1f]) >> 5);
3032 5077 if (c == 0) {
3033 5078 qlt_handle_sol_els_completion(qlt,
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
3034 5079 (uint8_t *)resp);
3035 5080 } else if (c == 3) {
3036 5081 qlt_handle_unsol_els_abort_completion(qlt,
3037 5082 (uint8_t *)resp);
3038 5083 } else {
3039 5084 qlt_handle_unsol_els_completion(qlt,
3040 5085 (uint8_t *)resp);
3041 5086 }
3042 5087 break;
3043 5088 case 0x54: /* ABTS received */
3044 - qlt_handle_rcvd_abts(qlt, (uint8_t *)resp);
5089 + qlt_handle_rcvd_abts(qlt, (uint8_t *)resp, qi);
3045 5090 break;
3046 5091 case 0x55: /* ABTS completion */
3047 - qlt_handle_abts_completion(qlt, (uint8_t *)resp);
5092 + qlt_handle_abts_completion(qlt, (uint8_t *)resp, qi);
3048 5093 break;
3049 5094 default:
3050 5095 EL(qlt, "response entry=%xh\n", resp[0]);
3051 5096 break;
3052 5097 }
3053 - qlt->resp_ndx_to_fw = (qlt->resp_ndx_to_fw + ent_cnt) &
3054 - (RESPONSE_QUEUE_ENTRIES - 1);
5098 + if (qi != 0) {
5099 + qlt->mq_resp[qi].mq_ndx_to_fw =
5100 + (qlt->mq_resp[qi].mq_ndx_to_fw + ent_cnt) &
5101 + (RESPONSE_QUEUE_MQ_ENTRIES - 1);
5102 + } else {
5103 + qlt->mq_resp[qi].mq_ndx_to_fw =
5104 + (qlt->mq_resp[qi].mq_ndx_to_fw + ent_cnt) &
5105 + (RESPONSE_QUEUE_ENTRIES - 1);
5106 + }
3055 5107 total_ent -= ent_cnt;
3056 5108 } while (total_ent > 0);
3057 - REG_WR32(qlt, REG_RESP_OUT_PTR, qlt->resp_ndx_to_fw);
5109 + if (qlt->qlt_mq_enabled) {
5110 + MQBAR_WR32(qlt, (qi * MQBAR_REG_OFFSET) + MQBAR_RESP_OUT,
5111 + qlt->mq_resp[qi].mq_ndx_to_fw);
5112 + } else {
5113 + REG_WR32(qlt, REG_RESP_OUT_PTR, qlt->mq_resp[qi].mq_ndx_to_fw);
5114 + }
3058 5115 }
3059 5116
3060 5117 fct_status_t
3061 5118 qlt_portid_to_handle(qlt_state_t *qlt, uint32_t id, uint16_t cmd_handle,
3062 - uint16_t *ret_handle)
5119 + uint16_t *ret_handle)
3063 5120 {
3064 5121 fct_status_t ret;
3065 5122 mbox_cmd_t *mcp;
3066 5123 uint16_t n;
3067 5124 uint16_t h;
3068 5125 uint32_t ent_id;
3069 5126 uint8_t *p;
3070 5127 int found = 0;
3071 5128
3072 5129 mcp = qlt_alloc_mailbox_command(qlt, 2048 * 8);
3073 5130 if (mcp == NULL) {
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
3074 5131 return (STMF_ALLOC_FAILURE);
3075 5132 }
3076 5133 mcp->to_fw[0] = MBC_GET_ID_LIST;
3077 5134 mcp->to_fw[8] = 2048 * 8;
3078 5135 mcp->to_fw[9] = 0;
3079 5136 mcp->to_fw_mask |= BIT_9 | BIT_8;
3080 5137 mcp->from_fw_mask |= BIT_1 | BIT_2;
3081 5138
3082 5139 ret = qlt_mailbox_command(qlt, mcp);
3083 5140 if (ret != QLT_SUCCESS) {
3084 - EL(qlt, "qlt_mailbox_command=7Ch status=%llxh\n", ret);
3085 - cmn_err(CE_WARN, "GET ID list failed, ret = %llx, mb0=%x, "
3086 - "mb1=%x, mb2=%x", (long long)ret, mcp->from_fw[0],
3087 - mcp->from_fw[1], mcp->from_fw[2]);
5141 + EL(qlt, "qlt_mbox_command=7Ch status=%llxh\n", ret);
5142 + cmn_err(CE_WARN, "qlt(%d) GET ID list failed, ret = %llx, "
5143 + "mb0=%x, mb1=%x, mb2=%x", qlt->instance, (long long)ret,
5144 + mcp->from_fw[0], mcp->from_fw[1], mcp->from_fw[2]);
3088 5145 qlt_free_mailbox_command(qlt, mcp);
3089 5146 return (ret);
3090 5147 }
5148 +
5149 + EL(qlt, "mbx cmd=7Ch, GET_ID_LIST id=%x fw[1]=%x\n",
5150 + id, mcp->from_fw[1]);
5151 +
3091 5152 qlt_dmem_dma_sync(mcp->dbuf, DDI_DMA_SYNC_FORCPU);
3092 5153 p = mcp->dbuf->db_sglist[0].seg_addr;
3093 5154 for (n = 0; n < mcp->from_fw[1]; n++) {
3094 5155 ent_id = LE_32(*((uint32_t *)p)) & 0xFFFFFF;
3095 5156 h = (uint16_t)((uint16_t)p[4] | (((uint16_t)p[5]) << 8));
3096 5157 if (ent_id == id) {
3097 5158 found = 1;
3098 5159 *ret_handle = h;
3099 5160 if ((cmd_handle != FCT_HANDLE_NONE) &&
3100 5161 (cmd_handle != h)) {
3101 - cmn_err(CE_WARN, "login for portid %x came in "
3102 - "with handle %x, while the portid was "
3103 - "already using a different handle %x",
3104 - id, cmd_handle, h);
5162 + cmn_err(CE_WARN, "qlt(%d) login for portid %x "
5163 + "came in with handle %x, while the portid "
5164 + "was already using a different handle %x",
5165 + qlt->instance, id, cmd_handle, h);
3105 5166 qlt_free_mailbox_command(qlt, mcp);
3106 5167 return (QLT_FAILURE);
3107 5168 }
3108 5169 break;
3109 5170 }
3110 5171 if ((cmd_handle != FCT_HANDLE_NONE) && (h == cmd_handle)) {
3111 - cmn_err(CE_WARN, "login for portid %x came in with "
3112 - "handle %x, while the handle was already in use "
3113 - "for portid %x", id, cmd_handle, ent_id);
5172 + cmn_err(CE_WARN, "qlt(%d) login for portid %x came in "
5173 + "with handle %x, while the handle was already in "
5174 + "use for portid %x",
5175 + qlt->instance, id, cmd_handle, ent_id);
3114 5176 qlt_free_mailbox_command(qlt, mcp);
3115 5177 return (QLT_FAILURE);
3116 5178 }
3117 5179 p += 8;
3118 5180 }
3119 5181 if (!found) {
3120 5182 *ret_handle = cmd_handle;
3121 5183 }
3122 5184 qlt_free_mailbox_command(qlt, mcp);
3123 5185 return (FCT_SUCCESS);
3124 5186 }
3125 5187
3126 5188 /* ARGSUSED */
3127 5189 fct_status_t
3128 5190 qlt_fill_plogi_req(fct_local_port_t *port, fct_remote_port_t *rp,
3129 - fct_cmd_t *login)
5191 + fct_cmd_t *login)
3130 5192 {
3131 5193 uint8_t *p;
3132 5194
3133 5195 p = ((fct_els_t *)login->cmd_specific)->els_req_payload;
3134 5196 p[0] = ELS_OP_PLOGI;
3135 5197 *((uint16_t *)(&p[4])) = 0x2020;
3136 5198 p[7] = 3;
3137 5199 p[8] = 0x88;
3138 5200 p[10] = 8;
3139 5201 p[13] = 0xff; p[15] = 0x1f;
3140 5202 p[18] = 7; p[19] = 0xd0;
3141 5203
3142 5204 bcopy(port->port_pwwn, p + 20, 8);
3143 5205 bcopy(port->port_nwwn, p + 28, 8);
3144 5206
3145 5207 p[68] = 0x80;
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
3146 5208 p[74] = 8;
3147 5209 p[77] = 0xff;
3148 5210 p[81] = 1;
3149 5211
3150 5212 return (FCT_SUCCESS);
3151 5213 }
3152 5214
3153 5215 /* ARGSUSED */
3154 5216 fct_status_t
3155 5217 qlt_fill_plogi_resp(fct_local_port_t *port, fct_remote_port_t *rp,
3156 - fct_cmd_t *login)
5218 + fct_cmd_t *login)
3157 5219 {
3158 5220 return (FCT_SUCCESS);
3159 5221 }
3160 5222
3161 5223 fct_status_t
3162 5224 qlt_register_remote_port(fct_local_port_t *port, fct_remote_port_t *rp,
3163 5225 fct_cmd_t *login)
3164 5226 {
3165 5227 uint16_t h;
3166 5228 fct_status_t ret;
3167 5229 qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
3168 5230
3169 5231 switch (rp->rp_id) {
3170 5232 case 0xFFFFFC: h = 0x7FC; break;
3171 5233 case 0xFFFFFD: h = 0x7FD; break;
3172 5234 case 0xFFFFFE: h = 0x7FE; break;
3173 5235 case 0xFFFFFF: h = 0x7FF; break;
3174 5236 default:
3175 5237 ret = qlt_portid_to_handle(qlt, rp->rp_id,
3176 5238 login->cmd_rp_handle, &h);
3177 5239 if (ret != FCT_SUCCESS) {
3178 5240 EL(qlt, "qlt_portid_to_handle, status=%llxh\n", ret);
3179 5241 return (ret);
3180 5242 }
3181 5243 }
3182 5244
3183 5245 if (login->cmd_type == FCT_CMD_SOL_ELS) {
|
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
3184 5246 ret = qlt_fill_plogi_req(port, rp, login);
3185 5247 } else {
3186 5248 ret = qlt_fill_plogi_resp(port, rp, login);
3187 5249 }
3188 5250
3189 5251 if (ret != FCT_SUCCESS) {
3190 5252 EL(qlt, "qlt_fill_plogi, status=%llxh\n", ret);
3191 5253 return (ret);
3192 5254 }
3193 5255
5256 + EL(qlt, "rport id=%xh cmd_type=%xh handle=%xh(%xh)\n",
5257 + rp->rp_id, login->cmd_type, h, rp->rp_handle);
5258 +
3194 5259 if (h == FCT_HANDLE_NONE)
3195 5260 return (FCT_SUCCESS);
3196 5261
3197 5262 if (rp->rp_handle == FCT_HANDLE_NONE) {
3198 5263 rp->rp_handle = h;
3199 5264 return (FCT_SUCCESS);
3200 5265 }
3201 5266
3202 5267 if (rp->rp_handle == h)
3203 5268 return (FCT_SUCCESS);
3204 5269
3205 - EL(qlt, "rp_handle=%xh != h=%xh\n", rp->rp_handle, h);
5270 + EL(qlt, "failed, rp_handle=%xh != h=%xh\n", rp->rp_handle, h);
3206 5271 return (FCT_FAILURE);
3207 5272 }
5273 +
3208 5274 /* invoked in single thread */
3209 5275 fct_status_t
3210 5276 qlt_deregister_remote_port(fct_local_port_t *port, fct_remote_port_t *rp)
3211 5277 {
3212 5278 uint8_t *req;
3213 5279 qlt_state_t *qlt;
3214 5280 clock_t dereg_req_timer;
3215 5281 fct_status_t ret;
3216 5282
3217 5283 qlt = (qlt_state_t *)port->port_fca_private;
3218 5284
3219 5285 if ((qlt->qlt_state == FCT_STATE_OFFLINE) ||
3220 5286 (qlt->qlt_state == FCT_STATE_OFFLINING))
3221 5287 return (FCT_SUCCESS);
3222 5288 ASSERT(qlt->rp_id_in_dereg == 0);
3223 5289
3224 - mutex_enter(&qlt->preq_lock);
3225 - req = (uint8_t *)qlt_get_preq_entries(qlt, 1);
3226 - if (req == NULL) {
3227 - mutex_exit(&qlt->preq_lock);
3228 - return (FCT_BUSY);
5290 + if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
5291 + mutex_enter(&qlt->mq_req[0].mq_lock);
5292 + req = (uint8_t *)qlt_get_req_entries(qlt, 1, 0);
5293 + if (req == NULL) {
5294 + EL(qlt, "req = NULL\n");
5295 + mutex_exit(&qlt->mq_req[0].mq_lock);
5296 + return (FCT_BUSY);
5297 + }
5298 + } else {
5299 + mutex_enter(&qlt->preq_lock);
5300 + req = (uint8_t *)qlt_get_preq_entries(qlt, 1);
5301 + if (req == NULL) {
5302 + EL(qlt, "req = NULL\n");
5303 + mutex_exit(&qlt->preq_lock);
5304 + return (FCT_BUSY);
5305 + }
3229 5306 }
3230 5307 bzero(req, IOCB_SIZE);
3231 5308 req[0] = 0x52; req[1] = 1;
3232 5309 /* QMEM_WR32(qlt, (&req[4]), 0xffffffff); */
3233 5310 QMEM_WR16(qlt, (&req[0xA]), rp->rp_handle);
3234 5311 QMEM_WR16(qlt, (&req[0xC]), 0x98); /* implicit logo */
3235 5312 QMEM_WR32(qlt, (&req[0x10]), rp->rp_id);
3236 5313 qlt->rp_id_in_dereg = rp->rp_id;
3237 - qlt_submit_preq_entries(qlt, 1);
5314 + if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
5315 + qlt_submit_req_entries(qlt, 1, 0);
5316 + } else {
5317 + qlt_submit_preq_entries(qlt, 1);
5318 + }
3238 5319
3239 5320 dereg_req_timer = ddi_get_lbolt() + drv_usectohz(DEREG_RP_TIMEOUT);
3240 5321 if (cv_timedwait(&qlt->rp_dereg_cv,
3241 - &qlt->preq_lock, dereg_req_timer) > 0) {
5322 + (((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) ?
5323 + &qlt->mq_req[0].mq_lock : &qlt->preq_lock),
5324 + dereg_req_timer) > 0) {
3242 5325 ret = qlt->rp_dereg_status;
3243 5326 } else {
3244 5327 ret = FCT_BUSY;
3245 5328 }
3246 5329 qlt->rp_dereg_status = 0;
3247 5330 qlt->rp_id_in_dereg = 0;
3248 - mutex_exit(&qlt->preq_lock);
5331 + if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
5332 + mutex_exit(&qlt->mq_req[0].mq_lock);
5333 + } else {
5334 + mutex_exit(&qlt->preq_lock);
5335 + }
5336 +
5337 + EL(qlt, "Dereg remote port(%Xh), ret=%llxh\n",
5338 + rp->rp_id, ret);
5339 +
3249 5340 return (ret);
3250 5341 }
3251 5342
3252 5343 /*
3253 5344 * Pass received ELS up to framework.
3254 5345 */
3255 5346 static void
3256 5347 qlt_handle_purex(qlt_state_t *qlt, uint8_t *resp)
3257 5348 {
3258 5349 fct_cmd_t *cmd;
3259 5350 fct_els_t *els;
3260 5351 qlt_cmd_t *qcmd;
3261 5352 uint32_t payload_size;
3262 5353 uint32_t remote_portid;
3263 5354 uint8_t *pldptr, *bndrptr;
3264 5355 int i, off;
3265 5356 uint16_t iocb_flags;
3266 - char info[QLT_INFO_LEN];
5357 + char info[160];
3267 5358
3268 5359 remote_portid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x18])))) |
3269 5360 ((uint32_t)(resp[0x1A])) << 16;
3270 5361 iocb_flags = QMEM_RD16(qlt, (&resp[8]));
3271 5362 if (iocb_flags & BIT_15) {
3272 5363 payload_size = (QMEM_RD16(qlt, (&resp[0x0e])) & 0xfff) - 24;
3273 5364 } else {
3274 5365 payload_size = QMEM_RD16(qlt, (&resp[0x0c])) - 24;
3275 5366 }
3276 5367
3277 5368 if (payload_size > ((uint32_t)resp[1] * IOCB_SIZE - 0x2C)) {
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
3278 5369 EL(qlt, "payload is too large = %xh\n", payload_size);
3279 5370 cmn_err(CE_WARN, "handle_purex: payload is too large");
3280 5371 goto cmd_null;
3281 5372 }
3282 5373
3283 5374 cmd = (fct_cmd_t *)fct_alloc(FCT_STRUCT_CMD_RCVD_ELS,
3284 5375 (int)(payload_size + GET_STRUCT_SIZE(qlt_cmd_t)), 0);
3285 5376 if (cmd == NULL) {
3286 5377 EL(qlt, "fct_alloc cmd==NULL\n");
3287 5378 cmd_null:;
3288 - (void) snprintf(info, sizeof (info),
3289 - "qlt_handle_purex: qlt-%p, "
3290 - "can't allocate space for fct_cmd", (void *)qlt);
5379 + (void) snprintf(info, 160, "qlt_handle_purex: qlt-%p, can't "
5380 + "allocate space for fct_cmd", (void *)qlt);
5381 + info[159] = 0;
3291 5382 (void) fct_port_shutdown(qlt->qlt_port,
3292 5383 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3293 5384 return;
3294 5385 }
3295 5386
3296 5387 cmd->cmd_port = qlt->qlt_port;
3297 5388 cmd->cmd_rp_handle = QMEM_RD16(qlt, resp+0xa);
3298 5389 if (cmd->cmd_rp_handle == 0xFFFF) {
3299 5390 cmd->cmd_rp_handle = FCT_HANDLE_NONE;
3300 5391 }
3301 5392
3302 5393 els = (fct_els_t *)cmd->cmd_specific;
3303 5394 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
3304 5395 els->els_req_size = (uint16_t)payload_size;
3305 5396 els->els_req_payload = GET_BYTE_OFFSET(qcmd,
3306 5397 GET_STRUCT_SIZE(qlt_cmd_t));
3307 5398 qcmd->fw_xchg_addr = QMEM_RD32(qlt, (&resp[0x10]));
3308 5399 cmd->cmd_rportid = remote_portid;
3309 5400 cmd->cmd_lportid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x14])))) |
3310 5401 ((uint32_t)(resp[0x16])) << 16;
3311 5402 cmd->cmd_oxid = QMEM_RD16(qlt, (&resp[0x26]));
3312 5403 cmd->cmd_rxid = QMEM_RD16(qlt, (&resp[0x24]));
3313 5404 pldptr = &resp[0x2C];
3314 - bndrptr = (uint8_t *)(qlt->resp_ptr + (RESPONSE_QUEUE_ENTRIES << 6));
5405 + bndrptr = (uint8_t *)(qlt->mq_resp[0].mq_ptr
5406 + + (RESPONSE_QUEUE_ENTRIES << 6));
3315 5407 for (i = 0, off = 0x2c; i < payload_size; i += 4) {
3316 5408 /* Take care of fw's swapping of payload */
3317 5409 els->els_req_payload[i] = pldptr[3];
3318 5410 els->els_req_payload[i+1] = pldptr[2];
3319 5411 els->els_req_payload[i+2] = pldptr[1];
3320 5412 els->els_req_payload[i+3] = pldptr[0];
3321 5413 pldptr += 4;
3322 5414 if (pldptr == bndrptr)
3323 - pldptr = (uint8_t *)qlt->resp_ptr;
5415 + pldptr = (uint8_t *)qlt->mq_resp[0].mq_ptr;
3324 5416 off += 4;
3325 5417 if (off >= IOCB_SIZE) {
3326 5418 off = 4;
3327 5419 pldptr += 4;
3328 5420 }
3329 5421 }
5422 +
5423 + EL(qlt, "remote portid = %xh logi/o(%xh) to us revd rex1=%xh\n",
5424 + remote_portid, els->els_req_payload[0], qcmd->fw_xchg_addr);
5425 +
3330 5426 fct_post_rcvd_cmd(cmd, 0);
3331 5427 }
3332 5428
3333 5429 fct_status_t
3334 5430 qlt_send_cmd_response(fct_cmd_t *cmd, uint32_t ioflags)
3335 5431 {
3336 5432 qlt_state_t *qlt;
3337 - char info[QLT_INFO_LEN];
5433 + char info[160];
3338 5434
3339 5435 qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
3340 5436
3341 5437 if (cmd->cmd_type == FCT_CMD_FCP_XCHG) {
3342 5438 if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
3343 5439 EL(qlt, "ioflags = %xh\n", ioflags);
3344 5440 goto fatal_panic;
3345 5441 } else {
3346 5442 return (qlt_send_status(qlt, cmd));
3347 5443 }
3348 5444 }
3349 5445
3350 5446 if (cmd->cmd_type == FCT_CMD_RCVD_ELS) {
3351 5447 if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
3352 5448 goto fatal_panic;
3353 5449 } else {
3354 5450 return (qlt_send_els_response(qlt, cmd));
3355 5451 }
3356 5452 }
3357 5453
3358 5454 if (ioflags & FCT_IOF_FORCE_FCA_DONE) {
3359 5455 cmd->cmd_handle = 0;
3360 5456 }
|
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
3361 5457
3362 5458 if (cmd->cmd_type == FCT_CMD_RCVD_ABTS) {
3363 5459 return (qlt_send_abts_response(qlt, cmd, 0));
3364 5460 } else {
3365 5461 EL(qlt, "cmd->cmd_type=%xh\n", cmd->cmd_type);
3366 5462 ASSERT(0);
3367 5463 return (FCT_FAILURE);
3368 5464 }
3369 5465
3370 5466 fatal_panic:;
3371 - (void) snprintf(info, sizeof (info),
3372 - "qlt_send_cmd_response: can not handle "
5467 + (void) snprintf(info, 160, "qlt_send_cmd_response: can not handle "
3373 5468 "FCT_IOF_FORCE_FCA_DONE for cmd %p, ioflags-%x", (void *)cmd,
3374 5469 ioflags);
5470 + info[159] = 0;
3375 5471 (void) fct_port_shutdown(qlt->qlt_port,
3376 5472 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3377 5473 return (FCT_FAILURE);
3378 5474 }
3379 5475
3380 5476 /* ARGSUSED */
3381 5477 fct_status_t
3382 5478 qlt_xfer_scsi_data(fct_cmd_t *cmd, stmf_data_buf_t *dbuf, uint32_t ioflags)
3383 5479 {
3384 - qlt_dmem_bctl_t *bctl = (qlt_dmem_bctl_t *)dbuf->db_port_private;
3385 - qlt_state_t *qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
3386 - qlt_cmd_t *qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3387 - uint8_t *req, rcnt;
3388 - uint16_t flags;
3389 - uint16_t cookie_count;
5480 + qlt_dmem_bctl_t *bctl = (qlt_dmem_bctl_t *)dbuf->db_port_private;
5481 + qlt_state_t *qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
5482 + qlt_cmd_t *qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
5483 + uint8_t *req, rcnt;
5484 + uint16_t flags;
5485 + uint16_t cookie_count;
5486 + uint32_t ent_cnt;
5487 + uint16_t qi;
3390 5488
5489 + qi = qcmd->qid;
5490 +
3391 5491 if (dbuf->db_handle == 0)
3392 5492 qcmd->dbuf = dbuf;
3393 5493 flags = (uint16_t)(((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5);
3394 5494 if (dbuf->db_flags & DB_DIRECTION_TO_RPORT) {
3395 5495 flags = (uint16_t)(flags | 2);
3396 5496 qlt_dmem_dma_sync(dbuf, DDI_DMA_SYNC_FORDEV);
3397 5497 } else {
3398 5498 flags = (uint16_t)(flags | 1);
3399 5499 }
3400 5500
3401 5501 if (dbuf->db_flags & DB_SEND_STATUS_GOOD)
3402 5502 flags = (uint16_t)(flags | BIT_15);
3403 5503
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
3404 5504 if (dbuf->db_flags & DB_LU_DATA_BUF) {
3405 5505 /*
3406 5506 * Data bufs from LU are in scatter/gather list format.
3407 5507 */
3408 5508 cookie_count = qlt_get_cookie_count(dbuf);
3409 5509 rcnt = qlt_get_iocb_count(cookie_count);
3410 5510 } else {
3411 5511 cookie_count = 1;
3412 5512 rcnt = 1;
3413 5513 }
3414 - mutex_enter(&qlt->req_lock);
3415 - req = (uint8_t *)qlt_get_req_entries(qlt, rcnt);
5514 + mutex_enter(&qlt->mq_req[qi].mq_lock);
5515 + req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
3416 5516 if (req == NULL) {
3417 - mutex_exit(&qlt->req_lock);
5517 + mutex_exit(&qlt->mq_req[qi].mq_lock);
3418 5518 return (FCT_BUSY);
3419 5519 }
3420 - bzero(req, IOCB_SIZE); /* XXX needed ? */
5520 + bzero(req, IOCB_SIZE);
3421 5521 req[0] = 0x12;
3422 5522 req[1] = rcnt;
3423 5523 req[2] = dbuf->db_handle;
3424 - QMEM_WR32(qlt, req+4, cmd->cmd_handle);
3425 - QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
3426 - QMEM_WR16(qlt, req+10, 60); /* 60 seconds timeout */
3427 - QMEM_WR16(qlt, req+12, cookie_count);
3428 - QMEM_WR32(qlt, req+0x10, cmd->cmd_rportid);
3429 - QMEM_WR32(qlt, req+0x14, qcmd->fw_xchg_addr);
3430 - QMEM_WR16(qlt, req+0x1A, flags);
3431 - QMEM_WR16(qlt, req+0x20, cmd->cmd_oxid);
3432 - QMEM_WR32(qlt, req+0x24, dbuf->db_relative_offset);
3433 - QMEM_WR32(qlt, req+0x2C, dbuf->db_data_size);
5524 + QMEM_WR32_REQ(qlt, qi, req+4, cmd->cmd_handle);
5525 + QMEM_WR16_REQ(qlt, qi, req+8, cmd->cmd_rp->rp_handle);
5526 + QMEM_WR16_REQ(qlt, qi, req+10, 60); /* 60 seconds timeout */
5527 + QMEM_WR16_REQ(qlt, qi, req+12, cookie_count);
5528 + QMEM_WR32_REQ(qlt, qi, req+0x10, cmd->cmd_rportid);
5529 + QMEM_WR32_REQ(qlt, qi, req+0x14, qcmd->fw_xchg_addr);
5530 + QMEM_WR16_REQ(qlt, qi, req+0x1A, flags);
5531 + QMEM_WR16_REQ(qlt, qi, req+0x20, cmd->cmd_oxid);
5532 + QMEM_WR32_REQ(qlt, qi, req+0x24, dbuf->db_relative_offset);
5533 + QMEM_WR32_REQ(qlt, qi, req+0x2C, dbuf->db_data_size);
3434 5534 if (dbuf->db_flags & DB_LU_DATA_BUF) {
3435 5535 uint8_t *qptr; /* qlt continuation segs */
3436 5536 uint16_t cookie_resid;
3437 5537 uint16_t cont_segs;
3438 5538 ddi_dma_cookie_t cookie, *ckp;
3439 5539
3440 5540 /*
3441 5541 * See if the dma cookies are in simple array format.
3442 5542 */
3443 5543 ckp = qlt_get_cookie_array(dbuf);
3444 5544
3445 5545 /*
3446 5546 * Program the first segment into main record.
3447 5547 */
3448 5548 if (ckp) {
3449 5549 ASSERT(ckp->dmac_size);
3450 - QMEM_WR64(qlt, req+0x34, ckp->dmac_laddress);
3451 - QMEM_WR32(qlt, req+0x3c, ckp->dmac_size);
5550 + QMEM_WR64_REQ(qlt, qi, req+0x34, ckp->dmac_laddress);
5551 + QMEM_WR32_REQ(qlt, qi, req+0x3c, ckp->dmac_size);
3452 5552 } else {
3453 5553 qlt_ddi_dma_nextcookie(dbuf, &cookie);
3454 5554 ASSERT(cookie.dmac_size);
3455 - QMEM_WR64(qlt, req+0x34, cookie.dmac_laddress);
3456 - QMEM_WR32(qlt, req+0x3c, cookie.dmac_size);
5555 + QMEM_WR64_REQ(qlt, qi, req+0x34, cookie.dmac_laddress);
5556 + QMEM_WR32_REQ(qlt, qi, req+0x3c, cookie.dmac_size);
3457 5557 }
3458 5558 cookie_resid = cookie_count-1;
3459 5559
5560 + ent_cnt = (qi == 0) ? REQUEST_QUEUE_ENTRIES :
5561 + REQUEST_QUEUE_MQ_ENTRIES;
3460 5562 /*
3461 5563 * Program remaining segments into continuation records.
3462 5564 */
3463 5565 while (cookie_resid) {
3464 5566 req += IOCB_SIZE;
3465 - if (req >= (uint8_t *)qlt->resp_ptr) {
3466 - req = (uint8_t *)qlt->req_ptr;
5567 + if (req >= (uint8_t *)(qlt->mq_req[qi].mq_ptr +
5568 + (ent_cnt * IOCB_SIZE))) {
5569 + req = (uint8_t *)(qlt->mq_req[qi].mq_ptr);
3467 5570 }
5571 +
3468 5572 req[0] = 0x0a;
3469 5573 req[1] = 1;
3470 5574 req[2] = req[3] = 0; /* tidy */
3471 5575 qptr = &req[4];
3472 5576 for (cont_segs = CONT_A64_DATA_SEGMENTS;
3473 5577 cont_segs && cookie_resid; cont_segs--) {
3474 5578
3475 5579 if (ckp) {
3476 5580 ++ckp; /* next cookie */
3477 5581 ASSERT(ckp->dmac_size != 0);
3478 - QMEM_WR64(qlt, qptr,
5582 + QMEM_WR64_REQ(qlt, qi, qptr,
3479 5583 ckp->dmac_laddress);
3480 5584 qptr += 8; /* skip over laddress */
3481 - QMEM_WR32(qlt, qptr, ckp->dmac_size);
5585 + QMEM_WR32_REQ(qlt, qi, qptr,
5586 + ckp->dmac_size);
3482 5587 qptr += 4; /* skip over size */
3483 5588 } else {
3484 5589 qlt_ddi_dma_nextcookie(dbuf, &cookie);
3485 5590 ASSERT(cookie.dmac_size != 0);
3486 - QMEM_WR64(qlt, qptr,
5591 + QMEM_WR64_REQ(qlt, qi, qptr,
3487 5592 cookie.dmac_laddress);
3488 5593 qptr += 8; /* skip over laddress */
3489 - QMEM_WR32(qlt, qptr, cookie.dmac_size);
5594 + QMEM_WR32_REQ(qlt, qi, qptr,
5595 + cookie.dmac_size);
3490 5596 qptr += 4; /* skip over size */
3491 5597 }
3492 5598 cookie_resid--;
3493 5599 }
3494 5600 /*
3495 5601 * zero unused remainder of IOCB
3496 5602 */
3497 5603 if (cont_segs) {
3498 5604 size_t resid;
3499 5605 resid = (size_t)((uintptr_t)(req+IOCB_SIZE) -
3500 5606 (uintptr_t)qptr);
3501 5607 ASSERT(resid < IOCB_SIZE);
3502 5608 bzero(qptr, resid);
3503 5609 }
3504 5610 }
3505 5611 } else {
3506 5612 /* Single, contiguous buffer */
3507 - QMEM_WR64(qlt, req+0x34, bctl->bctl_dev_addr);
3508 - QMEM_WR32(qlt, req+0x34+8, dbuf->db_data_size);
5613 + QMEM_WR64_REQ(qlt, qi, req+0x34, bctl->bctl_dev_addr);
5614 + QMEM_WR32_REQ(qlt, qi, req+0x34+8, dbuf->db_data_size);
3509 5615 }
3510 5616
3511 - qlt_submit_req_entries(qlt, rcnt);
3512 - mutex_exit(&qlt->req_lock);
5617 + qlt_submit_req_entries(qlt, rcnt, qi);
5618 + mutex_exit(&qlt->mq_req[qi].mq_lock);
3513 5619
3514 5620 return (STMF_SUCCESS);
3515 5621 }
3516 5622
3517 5623 /*
3518 5624 * We must construct proper FCP_RSP_IU now. Here we only focus on
3519 5625 * the handling of FCP_SNS_INFO. If there's protocol failures (FCP_RSP_INFO),
3520 - * we could have catched them before we enter here.
5626 + * we could have caught them before we enter here.
3521 5627 */
3522 5628 fct_status_t
3523 5629 qlt_send_status(qlt_state_t *qlt, fct_cmd_t *cmd)
3524 5630 {
3525 5631 qlt_cmd_t *qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3526 5632 scsi_task_t *task = (scsi_task_t *)cmd->cmd_specific;
3527 5633 qlt_dmem_bctl_t *bctl;
3528 5634 uint32_t size;
3529 5635 uint8_t *req, *fcp_rsp_iu;
3530 5636 uint8_t *psd, sensbuf[24]; /* sense data */
3531 5637 uint16_t flags;
3532 5638 uint16_t scsi_status;
3533 5639 int use_mode2;
3534 5640 int ndx;
5641 + uint16_t qi;
3535 5642
5643 + qi = qcmd->qid;
5644 +
3536 5645 /*
3537 5646 * Enter fast channel for non check condition
3538 5647 */
3539 5648 if (task->task_scsi_status != STATUS_CHECK) {
3540 5649 /*
3541 5650 * We will use mode1
3542 5651 */
3543 5652 flags = (uint16_t)(BIT_6 | BIT_15 |
3544 5653 (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5));
3545 5654 scsi_status = (uint16_t)task->task_scsi_status;
3546 5655 if (task->task_status_ctrl == TASK_SCTRL_OVER) {
3547 - scsi_status = (uint16_t)(scsi_status | BIT_10);
5656 + scsi_status = (uint16_t)(scsi_status | FCP_RESID_OVER);
3548 5657 } else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
3549 - scsi_status = (uint16_t)(scsi_status | BIT_11);
5658 + scsi_status = (uint16_t)(scsi_status | FCP_RESID_UNDER);
3550 5659 }
3551 5660 qcmd->dbuf_rsp_iu = NULL;
3552 5661
3553 5662 /*
3554 5663 * Fillout CTIO type 7 IOCB
3555 5664 */
3556 - mutex_enter(&qlt->req_lock);
3557 - req = (uint8_t *)qlt_get_req_entries(qlt, 1);
5665 + mutex_enter(&qlt->mq_req[qi].mq_lock);
5666 + req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
3558 5667 if (req == NULL) {
3559 - mutex_exit(&qlt->req_lock);
5668 + mutex_exit(&qlt->mq_req[qi].mq_lock);
3560 5669 return (FCT_BUSY);
3561 5670 }
3562 5671
3563 5672 /*
3564 5673 * Common fields
3565 5674 */
3566 5675 bzero(req, IOCB_SIZE);
3567 5676 req[0x00] = 0x12;
3568 5677 req[0x01] = 0x1;
3569 5678 req[0x02] = BIT_7; /* indicate if it's a pure status req */
3570 - QMEM_WR32(qlt, req + 0x04, cmd->cmd_handle);
3571 - QMEM_WR16(qlt, req + 0x08, cmd->cmd_rp->rp_handle);
3572 - QMEM_WR32(qlt, req + 0x10, cmd->cmd_rportid);
3573 - QMEM_WR32(qlt, req + 0x14, qcmd->fw_xchg_addr);
5679 + QMEM_WR32_REQ(qlt, qi, req + 0x04, cmd->cmd_handle);
5680 + QMEM_WR16_REQ(qlt, qi, req + 0x08, cmd->cmd_rp->rp_handle);
5681 + QMEM_WR32_REQ(qlt, qi, req + 0x10, cmd->cmd_rportid);
5682 + QMEM_WR32_REQ(qlt, qi, req + 0x14, qcmd->fw_xchg_addr);
3574 5683
5684 + /* handle TMF completion - !!! Important FIX */
5685 + if (task->task_mgmt_function) {
5686 + scsi_status =
5687 + (uint16_t)(scsi_status | FCP_RESP_LEN_VALID);
5688 +
5689 + /* no sense length, 4 bytes of resp info */
5690 + QMEM_WR16_REQ(qlt, qi, req + 0x24, 4);
5691 + }
5692 +
3575 5693 /*
3576 5694 * Mode-specific fields
3577 5695 */
3578 - QMEM_WR16(qlt, req + 0x1A, flags);
3579 - QMEM_WR32(qlt, req + 0x1C, task->task_resid);
3580 - QMEM_WR16(qlt, req + 0x20, cmd->cmd_oxid);
3581 - QMEM_WR16(qlt, req + 0x22, scsi_status);
5696 + QMEM_WR16_REQ(qlt, qi, req + 0x1A, flags);
5697 + QMEM_WR32_REQ(qlt, qi, req + 0x1C, task->task_resid);
5698 + QMEM_WR16_REQ(qlt, qi, req + 0x20, cmd->cmd_oxid);
5699 + QMEM_WR16_REQ(qlt, qi, req + 0x22, scsi_status);
3582 5700
3583 5701 /*
3584 5702 * Trigger FW to send SCSI status out
3585 5703 */
3586 - qlt_submit_req_entries(qlt, 1);
3587 - mutex_exit(&qlt->req_lock);
5704 + qlt_submit_req_entries(qlt, 1, qi);
5705 + mutex_exit(&qlt->mq_req[qi].mq_lock);
3588 5706 return (STMF_SUCCESS);
3589 5707 }
3590 5708
3591 5709 ASSERT(task->task_scsi_status == STATUS_CHECK);
3592 5710 /*
3593 5711 * Decide the SCSI status mode, that should be used
3594 5712 */
3595 5713 use_mode2 = (task->task_sense_length > 24);
3596 5714
3597 5715 /*
3598 5716 * Prepare required information per the SCSI status mode
3599 5717 */
3600 5718 flags = (uint16_t)(BIT_15 |
3601 5719 (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5));
3602 5720 if (use_mode2) {
3603 5721 flags = (uint16_t)(flags | BIT_7);
3604 5722
3605 5723 size = task->task_sense_length;
3606 5724 qcmd->dbuf_rsp_iu = qlt_i_dmem_alloc(qlt,
3607 5725 task->task_sense_length, &size, 0);
3608 5726 if (!qcmd->dbuf_rsp_iu) {
3609 5727 return (FCT_ALLOC_FAILURE);
3610 5728 }
3611 5729
3612 5730 /*
3613 5731 * Start to construct FCP_RSP IU
3614 5732 */
3615 5733 fcp_rsp_iu = qcmd->dbuf_rsp_iu->db_sglist[0].seg_addr;
3616 5734 bzero(fcp_rsp_iu, 24);
3617 5735
3618 5736 /*
3619 5737 * FCP_RSP IU flags, byte10
3620 5738 */
3621 5739 fcp_rsp_iu[10] = (uint8_t)(fcp_rsp_iu[10] | BIT_1);
3622 5740 if (task->task_status_ctrl == TASK_SCTRL_OVER) {
3623 5741 fcp_rsp_iu[10] = (uint8_t)(fcp_rsp_iu[10] | BIT_2);
3624 5742 } else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
3625 5743 fcp_rsp_iu[10] = (uint8_t)(fcp_rsp_iu[10] | BIT_3);
3626 5744 }
3627 5745
3628 5746 /*
3629 5747 * SCSI status code, byte11
3630 5748 */
3631 5749 fcp_rsp_iu[11] = task->task_scsi_status;
3632 5750
3633 5751 /*
3634 5752 * FCP_RESID (Overrun or underrun)
3635 5753 */
3636 5754 fcp_rsp_iu[12] = (uint8_t)((task->task_resid >> 24) & 0xFF);
3637 5755 fcp_rsp_iu[13] = (uint8_t)((task->task_resid >> 16) & 0xFF);
3638 5756 fcp_rsp_iu[14] = (uint8_t)((task->task_resid >> 8) & 0xFF);
3639 5757 fcp_rsp_iu[15] = (uint8_t)((task->task_resid >> 0) & 0xFF);
3640 5758
3641 5759 /*
3642 5760 * FCP_SNS_LEN
3643 5761 */
3644 5762 fcp_rsp_iu[18] = (uint8_t)((task->task_sense_length >> 8) &
3645 5763 0xFF);
3646 5764 fcp_rsp_iu[19] = (uint8_t)((task->task_sense_length >> 0) &
3647 5765 0xFF);
3648 5766
3649 5767 /*
3650 5768 * FCP_RSP_LEN
3651 5769 */
3652 5770 /*
3653 5771 * no FCP_RSP_INFO
3654 5772 */
3655 5773 /*
3656 5774 * FCP_SNS_INFO
3657 5775 */
3658 5776 bcopy(task->task_sense_data, fcp_rsp_iu + 24,
3659 5777 task->task_sense_length);
3660 5778
3661 5779 /*
3662 5780 * Ensure dma data consistency
3663 5781 */
3664 5782 qlt_dmem_dma_sync(qcmd->dbuf_rsp_iu, DDI_DMA_SYNC_FORDEV);
3665 5783 } else {
3666 5784 flags = (uint16_t)(flags | BIT_6);
3667 5785
3668 5786 scsi_status = (uint16_t)task->task_scsi_status;
3669 5787 if (task->task_status_ctrl == TASK_SCTRL_OVER) {
3670 5788 scsi_status = (uint16_t)(scsi_status | BIT_10);
3671 5789 } else if (task->task_status_ctrl == TASK_SCTRL_UNDER) {
3672 5790 scsi_status = (uint16_t)(scsi_status | BIT_11);
3673 5791 }
|
↓ open down ↓ |
76 lines elided |
↑ open up ↑ |
3674 5792 if (task->task_sense_length) {
3675 5793 scsi_status = (uint16_t)(scsi_status | BIT_9);
3676 5794 }
3677 5795 bcopy(task->task_sense_data, sensbuf, task->task_sense_length);
3678 5796 qcmd->dbuf_rsp_iu = NULL;
3679 5797 }
3680 5798
3681 5799 /*
3682 5800 * Fillout CTIO type 7 IOCB
3683 5801 */
3684 - mutex_enter(&qlt->req_lock);
3685 - req = (uint8_t *)qlt_get_req_entries(qlt, 1);
5802 + mutex_enter(&qlt->mq_req[qi].mq_lock);
5803 + req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
3686 5804 if (req == NULL) {
3687 - mutex_exit(&qlt->req_lock);
5805 + mutex_exit(&qlt->mq_req[qi].mq_lock);
3688 5806 if (use_mode2) {
3689 5807 qlt_dmem_free(cmd->cmd_port->port_fds,
3690 5808 qcmd->dbuf_rsp_iu);
3691 5809 qcmd->dbuf_rsp_iu = NULL;
3692 5810 }
3693 5811 return (FCT_BUSY);
3694 5812 }
3695 5813
3696 5814 /*
3697 5815 * Common fields
3698 5816 */
3699 5817 bzero(req, IOCB_SIZE);
3700 5818 req[0x00] = 0x12;
3701 5819 req[0x01] = 0x1;
3702 5820 req[0x02] = BIT_7; /* to indicate if it's a pure status req */
3703 - QMEM_WR32(qlt, req + 0x04, cmd->cmd_handle);
3704 - QMEM_WR16(qlt, req + 0x08, cmd->cmd_rp->rp_handle);
3705 - QMEM_WR16(qlt, req + 0x0A, 0); /* not timed by FW */
5821 + QMEM_WR32_REQ(qlt, qi, req + 0x04, cmd->cmd_handle);
5822 + QMEM_WR16_REQ(qlt, qi, req + 0x08, cmd->cmd_rp->rp_handle);
5823 + QMEM_WR16_REQ(qlt, qi, req + 0x0A, 0); /* not timed by FW */
3706 5824 if (use_mode2) {
3707 - QMEM_WR16(qlt, req+0x0C, 1); /* FCP RSP IU data field */
5825 + /* FCP RSP IU data field */
5826 + QMEM_WR16_REQ(qlt, qi, req+0x0C, 1);
3708 5827 }
3709 - QMEM_WR32(qlt, req + 0x10, cmd->cmd_rportid);
3710 - QMEM_WR32(qlt, req + 0x14, qcmd->fw_xchg_addr);
5828 + QMEM_WR32_REQ(qlt, qi, req + 0x10, cmd->cmd_rportid);
5829 + QMEM_WR32_REQ(qlt, qi, req + 0x14, qcmd->fw_xchg_addr);
3711 5830
3712 5831 /*
3713 5832 * Mode-specific fields
3714 5833 */
3715 5834 if (!use_mode2) {
3716 - QMEM_WR16(qlt, req + 0x18, task->task_sense_length);
5835 + QMEM_WR16_REQ(qlt, qi, req + 0x18, task->task_sense_length);
3717 5836 }
3718 - QMEM_WR16(qlt, req + 0x1A, flags);
3719 - QMEM_WR32(qlt, req + 0x1C, task->task_resid);
3720 - QMEM_WR16(qlt, req + 0x20, cmd->cmd_oxid);
5837 + QMEM_WR16_REQ(qlt, qi, req + 0x1A, flags);
5838 + QMEM_WR32_REQ(qlt, qi, req + 0x1C, task->task_resid);
5839 + QMEM_WR16_REQ(qlt, qi, req + 0x20, cmd->cmd_oxid);
3721 5840 if (use_mode2) {
3722 5841 bctl = (qlt_dmem_bctl_t *)qcmd->dbuf_rsp_iu->db_port_private;
3723 - QMEM_WR32(qlt, req + 0x2C, 24 + task->task_sense_length);
3724 - QMEM_WR64(qlt, req + 0x34, bctl->bctl_dev_addr);
3725 - QMEM_WR32(qlt, req + 0x3C, 24 + task->task_sense_length);
5842 + QMEM_WR32_REQ(qlt, qi, req + 0x2C,
5843 + 24 + task->task_sense_length);
5844 + QMEM_WR64_REQ(qlt, qi, req + 0x34, bctl->bctl_dev_addr);
5845 + QMEM_WR32_REQ(qlt, qi, req + 0x3C,
5846 + 24 + task->task_sense_length);
3726 5847 } else {
3727 - QMEM_WR16(qlt, req + 0x22, scsi_status);
5848 + QMEM_WR16_REQ(qlt, qi, req + 0x22, scsi_status);
3728 5849 psd = req+0x28;
3729 5850
3730 5851 /*
3731 5852 * Data in sense buf is always big-endian, data in IOCB
3732 5853 * should always be little-endian, so we must do swapping.
3733 5854 */
3734 5855 size = ((task->task_sense_length + 3) & (~3));
3735 5856 for (ndx = 0; ndx < size; ndx += 4) {
3736 5857 psd[ndx + 0] = sensbuf[ndx + 3];
3737 5858 psd[ndx + 1] = sensbuf[ndx + 2];
3738 5859 psd[ndx + 2] = sensbuf[ndx + 1];
3739 5860 psd[ndx + 3] = sensbuf[ndx + 0];
3740 5861 }
3741 5862 }
3742 5863
3743 5864 /*
3744 5865 * Trigger FW to send SCSI status out
3745 5866 */
3746 - qlt_submit_req_entries(qlt, 1);
3747 - mutex_exit(&qlt->req_lock);
5867 + qlt_submit_req_entries(qlt, 1, qi);
5868 + mutex_exit(&qlt->mq_req[qi].mq_lock);
3748 5869
3749 5870 return (STMF_SUCCESS);
3750 5871 }
3751 5872
3752 5873 fct_status_t
3753 5874 qlt_send_els_response(qlt_state_t *qlt, fct_cmd_t *cmd)
3754 5875 {
3755 5876 qlt_cmd_t *qcmd;
3756 5877 fct_els_t *els = (fct_els_t *)cmd->cmd_specific;
3757 5878 uint8_t *req, *addr;
3758 5879 qlt_dmem_bctl_t *bctl;
3759 5880 uint32_t minsize;
3760 5881 uint8_t elsop, req1f;
5882 + uint16_t qi = 0;
3761 5883
3762 5884 addr = els->els_resp_payload;
3763 5885 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3764 5886
3765 5887 minsize = els->els_resp_size;
3766 5888 qcmd->dbuf = qlt_i_dmem_alloc(qlt, els->els_resp_size, &minsize, 0);
3767 5889 if (qcmd->dbuf == NULL)
3768 5890 return (FCT_BUSY);
3769 5891
3770 5892 bctl = (qlt_dmem_bctl_t *)qcmd->dbuf->db_port_private;
3771 5893
3772 5894 bcopy(addr, qcmd->dbuf->db_sglist[0].seg_addr, els->els_resp_size);
3773 5895 qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORDEV);
3774 5896
3775 5897 if (addr[0] == 0x02) { /* ACC */
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
3776 5898 req1f = BIT_5;
3777 5899 } else {
3778 5900 req1f = BIT_6;
3779 5901 }
3780 5902 elsop = els->els_req_payload[0];
3781 5903 if ((elsop == ELS_OP_PRLI) || (elsop == ELS_OP_PRLO) ||
3782 5904 (elsop == ELS_OP_TPRLO) || (elsop == ELS_OP_LOGO)) {
3783 5905 req1f = (uint8_t)(req1f | BIT_4);
3784 5906 }
3785 5907
3786 - mutex_enter(&qlt->req_lock);
3787 - req = (uint8_t *)qlt_get_req_entries(qlt, 1);
5908 + mutex_enter(&qlt->mq_req[qi].mq_lock);
5909 + req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
3788 5910 if (req == NULL) {
3789 - mutex_exit(&qlt->req_lock);
5911 + EL(qlt, "req = NULL, %xh %xh %p %xh\n", cmd->cmd_oxid,
5912 + cmd->cmd_rportid, cmd, qcmd->fw_xchg_addr);
5913 + mutex_exit(&qlt->mq_req[qi].mq_lock);
3790 5914 qlt_dmem_free(NULL, qcmd->dbuf);
3791 5915 qcmd->dbuf = NULL;
3792 5916 return (FCT_BUSY);
3793 5917 }
3794 5918 bzero(req, IOCB_SIZE);
3795 5919 req[0] = 0x53; req[1] = 1; req[0xf] = 0x10;
3796 5920 req[0x16] = elsop; req[0x1f] = req1f;
3797 5921 QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
3798 5922 QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
3799 5923 QMEM_WR16(qlt, (&req[0xC]), 1);
3800 5924 QMEM_WR32(qlt, (&req[0x10]), qcmd->fw_xchg_addr);
3801 5925 QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rportid);
3802 5926 if (qlt->cur_topology == PORT_TOPOLOGY_PT_TO_PT) {
3803 5927 req[0x1b] = (uint8_t)((cmd->cmd_lportid >> 16) & 0xff);
3804 5928 req[0x1c] = (uint8_t)(cmd->cmd_lportid & 0xff);
3805 5929 req[0x1d] = (uint8_t)((cmd->cmd_lportid >> 8) & 0xff);
3806 5930 }
3807 5931 QMEM_WR32(qlt, (&req[0x24]), els->els_resp_size);
3808 5932 QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr);
3809 5933 QMEM_WR32(qlt, (&req[0x30]), els->els_resp_size);
3810 - qlt_submit_req_entries(qlt, 1);
3811 - mutex_exit(&qlt->req_lock);
3812 5934
5935 + EL(qlt, "elsop=%xh req1f=%xh IOCB_TYPE_ELSPASS: rex1=%xh\n",
5936 + elsop, req1f, qcmd->fw_xchg_addr);
5937 +
5938 + qlt_submit_req_entries(qlt, 1, qi);
5939 + mutex_exit(&qlt->mq_req[qi].mq_lock);
5940 +
3813 5941 return (FCT_SUCCESS);
3814 5942 }
3815 5943
3816 5944 fct_status_t
3817 5945 qlt_send_abts_response(qlt_state_t *qlt, fct_cmd_t *cmd, int terminate)
3818 5946 {
3819 5947 qlt_abts_cmd_t *qcmd;
3820 5948 fct_rcvd_abts_t *abts = (fct_rcvd_abts_t *)cmd->cmd_specific;
3821 5949 uint8_t *req;
3822 5950 uint32_t lportid;
3823 5951 uint32_t fctl;
3824 5952 int i;
5953 + uint16_t qi;
5954 + uint32_t rex1, rex2;
5955 + uint8_t temp[64];
3825 5956
5957 + qi = 0;
5958 +
3826 5959 qcmd = (qlt_abts_cmd_t *)cmd->cmd_fca_private;
3827 5960
3828 - mutex_enter(&qlt->req_lock);
3829 - req = (uint8_t *)qlt_get_req_entries(qlt, 1);
5961 + mutex_enter(&qlt->mq_req[qi].mq_lock);
5962 + req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
3830 5963 if (req == NULL) {
3831 - mutex_exit(&qlt->req_lock);
5964 + bcopy(qcmd->buf, &temp, IOCB_SIZE);
5965 + for (i = 0; i < 12; i += 4) {
5966 + /* Take care of firmware's LE requirement */
5967 + temp[0x2C+i] = abts->abts_resp_payload[i+3];
5968 + temp[0x2C+i+1] = abts->abts_resp_payload[i+2];
5969 + temp[0x2C+i+2] = abts->abts_resp_payload[i+1];
5970 + temp[0x2C+i+3] = abts->abts_resp_payload[i];
5971 + }
5972 + rex1 = QMEM_RD32(qlt, &temp[0x10]);
5973 + rex2 = QMEM_RD32(qlt, &temp[0x3C]);
5974 +
5975 + EL(qlt, "req = NULL, %xh %xh %p %xh %xh\n", cmd->cmd_oxid,
5976 + cmd->cmd_rportid, cmd, rex1, rex2);
5977 +
5978 + mutex_exit(&qlt->mq_req[qi].mq_lock);
3832 5979 return (FCT_BUSY);
3833 5980 }
3834 5981 bcopy(qcmd->buf, req, IOCB_SIZE);
3835 5982 lportid = QMEM_RD32(qlt, req+0x14) & 0xFFFFFF;
3836 5983 fctl = QMEM_RD32(qlt, req+0x1C);
3837 5984 fctl = ((fctl ^ BIT_23) & ~BIT_22) | (BIT_19 | BIT_16);
3838 5985 req[0] = 0x55; req[1] = 1; req[2] = (uint8_t)terminate;
3839 5986 QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
3840 5987 if (cmd->cmd_rp)
3841 5988 QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
3842 5989 else
3843 5990 QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp_handle);
3844 5991 if (terminate) {
3845 5992 QMEM_WR16(qlt, (&req[0xC]), 1);
3846 5993 }
3847 5994 QMEM_WR32(qlt, req+0x14, cmd->cmd_rportid);
3848 5995 req[0x17] = abts->abts_resp_rctl;
|
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
3849 5996 QMEM_WR32(qlt, req+0x18, lportid);
3850 5997 QMEM_WR32(qlt, req+0x1C, fctl);
3851 5998 req[0x23]++;
3852 5999 for (i = 0; i < 12; i += 4) {
3853 6000 /* Take care of firmware's LE requirement */
3854 6001 req[0x2C+i] = abts->abts_resp_payload[i+3];
3855 6002 req[0x2C+i+1] = abts->abts_resp_payload[i+2];
3856 6003 req[0x2C+i+2] = abts->abts_resp_payload[i+1];
3857 6004 req[0x2C+i+3] = abts->abts_resp_payload[i];
3858 6005 }
3859 - qlt_submit_req_entries(qlt, 1);
3860 - mutex_exit(&qlt->req_lock);
3861 6006
6007 + rex1 = QMEM_RD32(qlt, &req[0x10]);
6008 + rex2 = QMEM_RD32(qlt, &req[0x3C]);
6009 +
6010 + EL(qlt, "%xh %xh %d %p %xh %xh\n",
6011 + QMEM_RD16(qlt, req+0x26), QMEM_RD16(qlt, req+0x24),
6012 + terminate, cmd, rex1, rex2);
6013 +
6014 + qlt_submit_req_entries(qlt, 1, qi);
6015 + mutex_exit(&qlt->mq_req[qi].mq_lock);
6016 +
3862 6017 return (FCT_SUCCESS);
3863 6018 }
3864 6019
3865 6020 static void
3866 6021 qlt_handle_inot(qlt_state_t *qlt, uint8_t *inot)
3867 6022 {
3868 6023 int i;
3869 6024 uint32_t d;
3870 6025 caddr_t req;
6026 + uint16_t qi;
6027 + uint8_t *entry = inot;
6028 +
6029 + qi = 0;
6030 +
3871 6031 /* Just put it on the request queue */
3872 - mutex_enter(&qlt->req_lock);
3873 - req = qlt_get_req_entries(qlt, 1);
6032 + mutex_enter(&qlt->mq_req[qi].mq_lock);
6033 + req = qlt_get_req_entries(qlt, 1, qi);
3874 6034 if (req == NULL) {
3875 - mutex_exit(&qlt->req_lock);
3876 - /* XXX handle this */
6035 + mutex_exit(&qlt->mq_req[qi].mq_lock);
6036 + stmf_trace(qlt->qlt_port_alias,
6037 + "qlt_handle_inot: can't get a ReqQ entry");
6038 + EL(qlt, "req = NULL\n");
3877 6039 return;
3878 6040 }
3879 6041 for (i = 0; i < 16; i++) {
3880 6042 d = QMEM_RD32(qlt, inot);
3881 6043 inot += 4;
3882 6044 QMEM_WR32(qlt, req, d);
3883 6045 req += 4;
3884 6046 }
3885 6047 req -= 64;
3886 6048 req[0] = 0x0e;
3887 - qlt_submit_req_entries(qlt, 1);
3888 - mutex_exit(&qlt->req_lock);
6049 +
6050 + QMEM_WR32(qlt, entry+0x3c, 0xdeadbeef);
6051 + EL(qlt, "Issue inot ack\n");
6052 +
6053 + qlt_submit_req_entries(qlt, 1, qi);
6054 + mutex_exit(&qlt->mq_req[qi].mq_lock);
3889 6055 }
3890 6056
6057 +static uint16_t
6058 +qlt_get_queue_id(qlt_state_t *qlt, int id)
6059 +{
6060 + uint16_t qid;
6061 +
6062 + if ((!qlt->qlt_mq_enabled) || (qlt->qlt_queue_cnt == 1)) {
6063 + return (0);
6064 + }
6065 +
6066 + mutex_enter(&qlt->qlock);
6067 + if ((id == 0) && (qlt->last_qi == 0)) {
6068 + qlt->last_qi++;
6069 + }
6070 + qid = qlt->last_qi;
6071 + qlt->last_qi++;
6072 +
6073 + if (qlt->last_qi >= qlt->qlt_queue_cnt) {
6074 + qlt->last_qi -= qlt->qlt_queue_cnt;
6075 + }
6076 + mutex_exit(&qlt->qlock);
6077 +
6078 + return (qid);
6079 +}
6080 +
6081 +static fct_status_t
6082 +qlt_verify_atio_entry(qlt_state_t *qlt, uint8_t *atio)
6083 +{
6084 + uint32_t sig;
6085 + int i;
6086 + char info[160];
6087 +
6088 +
6089 + sig = QMEM_RD32(qlt, atio+0x3c);
6090 + for (i = 0; ((sig == 0xdeadbeef) &&
6091 + (i < qlt_reprocess_attempt_cnt)); i++) {
6092 + (void) ddi_dma_sync(
6093 + qlt->queue_mem_dma_handle,
6094 + ATIO_QUEUE_OFFSET + (qlt->atio_ndx_to_fw << 6),
6095 + IOCB_SIZE, DDI_DMA_SYNC_FORCPU);
6096 +
6097 + qlt->qlt_atio_reproc_cnt++;
6098 + drv_usecwait(qlt_reprocess_delay);
6099 + sig = QMEM_RD32(qlt, atio+0x3c);
6100 + }
6101 +
6102 + if (i) {
6103 + if (i >= qlt_reprocess_attempt_cnt) {
6104 + EL(qlt, "atio entry reprocess failed, %x\n",
6105 + qlt->qlt_atio_reproc_cnt);
6106 + cmn_err(CE_WARN, "qlt%d: atio entry reprocess"
6107 + " failed %x\n",
6108 + qlt->instance, qlt->qlt_atio_reproc_cnt);
6109 + (void) snprintf(info, 160,
6110 + "qlt_handle_ctio_completion: atio entry reprocess"
6111 + " failed, %x rsp-%p",
6112 + qlt->qlt_atio_reproc_cnt, (void *)atio);
6113 + info[159] = 0;
6114 + (void) fct_port_shutdown(qlt->qlt_port,
6115 + STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
6116 + STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
6117 + return (QLT_FAILURE);
6118 + } else {
6119 + EL(qlt, "atio entry reprocess succeeded, %x %x\n",
6120 + i, qlt->qlt_atio_reproc_cnt);
6121 + }
6122 + }
6123 +
6124 + return (QLT_SUCCESS);
6125 +}
6126 +
3891 6127 uint8_t qlt_task_flags[] = { 1, 3, 2, 1, 4, 0, 1, 1 };
3892 6128 static void
3893 6129 qlt_handle_atio(qlt_state_t *qlt, uint8_t *atio)
3894 6130 {
3895 6131 fct_cmd_t *cmd;
3896 6132 scsi_task_t *task;
3897 6133 qlt_cmd_t *qcmd;
3898 6134 uint32_t rportid, fw_xchg_addr;
3899 6135 uint8_t *p, *q, *req, tm;
3900 6136 uint16_t cdb_size, flags, oxid;
3901 - char info[QLT_INFO_LEN];
6137 + char info[160];
6138 + uint16_t qi;
3902 6139
6140 + if (qlt_verify_atio_entry(qlt, atio) != QLT_SUCCESS)
6141 + return;
6142 +
3903 6143 /*
3904 6144 * If either bidirection xfer is requested of there is extended
3905 6145 * CDB, atio[0x20 + 11] will be greater than or equal to 3.
3906 6146 */
3907 6147 cdb_size = 16;
3908 6148 if (atio[0x20 + 11] >= 3) {
3909 6149 uint8_t b = atio[0x20 + 11];
3910 6150 uint16_t b1;
3911 6151 if ((b & 3) == 3) {
3912 6152 EL(qlt, "bidirectional I/O not supported\n");
3913 6153 cmn_err(CE_WARN, "qlt(%d) CMD with bidirectional I/O "
3914 6154 "received, dropping the cmd as bidirectional "
3915 6155 " transfers are not yet supported", qlt->instance);
3916 6156 /* XXX abort the I/O */
3917 6157 return;
3918 6158 }
3919 6159 cdb_size = (uint16_t)(cdb_size + (b & 0xfc));
3920 6160 /*
3921 6161 * Verify that we have enough entries. Without additional CDB
3922 6162 * Everything will fit nicely within the same 64 bytes. So the
3923 6163 * additional cdb size is essentially the # of additional bytes
3924 6164 * we need.
3925 6165 */
3926 6166 b1 = (uint16_t)b;
3927 6167 if (((((b1 & 0xfc) + 63) >> 6) + 1) > ((uint16_t)atio[1])) {
3928 6168 EL(qlt, "extended cdb received\n");
3929 6169 cmn_err(CE_WARN, "qlt(%d): cmd received with extended "
3930 6170 " cdb (cdb size = %d bytes), however the firmware "
3931 6171 " did not DMAed the entire FCP_CMD IU, entry count "
3932 6172 " is %d while it should be %d", qlt->instance,
3933 6173 cdb_size, atio[1], ((((b1 & 0xfc) + 63) >> 6) + 1));
3934 6174 /* XXX abort the I/O */
3935 6175 return;
3936 6176 }
3937 6177 }
3938 6178
3939 6179 rportid = (((uint32_t)atio[8 + 5]) << 16) |
3940 6180 (((uint32_t)atio[8 + 6]) << 8) | atio[8+7];
3941 6181 fw_xchg_addr = QMEM_RD32(qlt, atio+4);
3942 6182 oxid = (uint16_t)((((uint16_t)atio[8 + 16]) << 8) | atio[8+17]);
3943 6183
3944 6184 if (fw_xchg_addr == 0xFFFFFFFF) {
|
↓ open down ↓ |
32 lines elided |
↑ open up ↑ |
3945 6185 EL(qlt, "fw_xchg_addr==0xFFFFFFFF\n");
3946 6186 cmd = NULL;
3947 6187 } else {
3948 6188 cmd = fct_scsi_task_alloc(qlt->qlt_port, FCT_HANDLE_NONE,
3949 6189 rportid, atio+0x20, cdb_size, STMF_TASK_EXT_NONE);
3950 6190 if (cmd == NULL) {
3951 6191 EL(qlt, "fct_scsi_task_alloc cmd==NULL\n");
3952 6192 }
3953 6193 }
3954 6194 if (cmd == NULL) {
6195 + qi = 0; /* just use request queue 0 */
6196 +
3955 6197 EL(qlt, "fct_scsi_task_alloc cmd==NULL\n");
3956 6198 /* Abort this IO */
3957 6199 flags = (uint16_t)(BIT_14 | ((atio[3] & 0xF0) << 5));
3958 6200
3959 - mutex_enter(&qlt->req_lock);
3960 - req = (uint8_t *)qlt_get_req_entries(qlt, 1);
6201 + mutex_enter(&qlt->mq_req[qi].mq_lock);
6202 + req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
3961 6203 if (req == NULL) {
3962 - mutex_exit(&qlt->req_lock);
6204 + mutex_exit(&qlt->mq_req[0].mq_lock);
3963 6205
3964 - (void) snprintf(info, sizeof (info),
6206 + (void) snprintf(info, 160,
3965 6207 "qlt_handle_atio: qlt-%p, can't "
3966 6208 "allocate space for scsi_task", (void *)qlt);
6209 + info[159] = 0;
3967 6210 (void) fct_port_shutdown(qlt->qlt_port,
3968 6211 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
3969 6212 return;
3970 6213 }
3971 6214 bzero(req, IOCB_SIZE);
3972 6215 req[0] = 0x12; req[1] = 0x1;
3973 6216 QMEM_WR32(qlt, req+4, 0);
3974 6217 QMEM_WR16(qlt, req+8, fct_get_rp_handle(qlt->qlt_port,
3975 6218 rportid));
3976 6219 QMEM_WR16(qlt, req+10, 60);
3977 6220 QMEM_WR32(qlt, req+0x10, rportid);
3978 6221 QMEM_WR32(qlt, req+0x14, fw_xchg_addr);
3979 6222 QMEM_WR16(qlt, req+0x1A, flags);
3980 6223 QMEM_WR16(qlt, req+0x20, oxid);
3981 - qlt_submit_req_entries(qlt, 1);
3982 - mutex_exit(&qlt->req_lock);
6224 + qlt_submit_req_entries(qlt, 1, qi);
6225 + mutex_exit(&qlt->mq_req[qi].mq_lock);
3983 6226
3984 6227 return;
3985 6228 }
6229 + if (cmd == NULL) {
6230 + uint32_t res;
6231 + uint16_t scsi_status = 0;
6232 + uint16_t rphdl = 0;
3986 6233
6234 + qi = 0; /* always use request queue 0 */
6235 +
6236 + rphdl = fct_get_rp_handle(qlt->qlt_port, rportid);
6237 + if ((rphdl != 0xFFFF) &&
6238 + (rphdl >= qlt->qlt_port->port_max_logins)) {
6239 + rphdl = 0xFFFF;
6240 + }
6241 +
6242 + mutex_enter(&qlt->mq_req[qi].mq_lock);
6243 + req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
6244 + if (req == NULL) {
6245 + EL(qlt, "cannot get reqq\n");
6246 + mutex_exit(&qlt->mq_req[qi].mq_lock);
6247 + (void) snprintf(info, 160,
6248 + "qlt_handle_atio: qlt-%p, can't "
6249 + "allocate space for termi-excg", (void *)qlt);
6250 + info[159] = 0;
6251 + (void) fct_port_shutdown(qlt->qlt_port,
6252 + STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
6253 + return;
6254 + }
6255 +
6256 + if (rphdl != 0xFFFF) {
6257 + /* Driver send scsi qfull status now */
6258 + flags = (uint16_t)(BIT_15 |
6259 + ((uint16_t)(atio[0x3] & 0xF0) << 5));
6260 + /* always use SCSI status mode 1 */
6261 + flags = (uint16_t)(flags | BIT_6);
6262 +
6263 + scsi_status |= (uint16_t)(0x28);
6264 +
6265 + /* Build SCSI Status Mode 1, FCP_RSP IU 24-48 byte */
6266 + bzero(req, IOCB_SIZE);
6267 + req[0] = 0x12;
6268 + req[1] = 0x1;
6269 +
6270 + /* allocate a special IOCB handle? or donot care */
6271 + QMEM_WR32(qlt, req+4, 0);
6272 + QMEM_WR16(qlt, req+8, rphdl);
6273 + QMEM_WR16(qlt, req+10, 60);
6274 + QMEM_WR32(qlt, req+0x10, rportid);
6275 + QMEM_WR32(qlt, req+0x14, fw_xchg_addr);
6276 +
6277 + /* sense_length set to 0 */
6278 + QMEM_WR16(qlt, req+0x18, 0);
6279 +
6280 + QMEM_WR16(qlt, req+0x1A, flags);
6281 +
6282 + /* Residual transfer length */
6283 + res = QMEM_RD32(qlt, atio+0x3C);
6284 + BIG_ENDIAN_32(&res);
6285 + if (res != 0) {
6286 + scsi_status |= FCP_RESID_UNDER;
6287 + }
6288 + QMEM_WR32_REQ(qlt, qi, req + 0x1C, res);
6289 +
6290 + QMEM_WR16(qlt, req+0x20, oxid);
6291 + QMEM_WR16_REQ(qlt, qi, req + 0x22, scsi_status);
6292 +
6293 + EL(qlt, "Send qfull (%Xh) (%Xh)(%Xh)(%Xh) from port "
6294 + "(%Xh:%Xh)\n", scsi_status, fw_xchg_addr, flags,
6295 + oxid, rportid, rphdl);
6296 + } else {
6297 + /* Terminate exchange because no remote port context */
6298 + flags = (uint16_t)(BIT_14 | ((atio[3] & 0xF0) << 5));
6299 +
6300 + bzero(req, IOCB_SIZE);
6301 + req[0] = 0x12;
6302 + req[1] = 0x1;
6303 +
6304 + QMEM_WR32(qlt, req+4, 0);
6305 + QMEM_WR16(qlt, req+8, rphdl);
6306 + QMEM_WR16(qlt, req+10, 60);
6307 + QMEM_WR32(qlt, req+0x10, rportid);
6308 + QMEM_WR32(qlt, req+0x14, fw_xchg_addr);
6309 + QMEM_WR16(qlt, req+0x1A, flags);
6310 + QMEM_WR16(qlt, req+0x20, oxid);
6311 +
6312 + EL(qlt, "Termi excg (%Xh)(%Xh)(%Xh) from port (%Xh)\n",
6313 + fw_xchg_addr, flags, oxid, rportid);
6314 +
6315 + EL(qlt, "Termi rp_handle (%Xh)\n", rphdl);
6316 + }
6317 +
6318 + qlt_submit_req_entries(qlt, 1, qi);
6319 + mutex_exit(&qlt->mq_req[qi].mq_lock);
6320 + return;
6321 + }
6322 +
6323 + qi = qlt_get_queue_id(qlt, 0);
3987 6324 task = (scsi_task_t *)cmd->cmd_specific;
3988 6325 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
3989 6326 qcmd->fw_xchg_addr = fw_xchg_addr;
3990 6327 qcmd->param.atio_byte3 = atio[3];
6328 + qcmd->qid = qi;
3991 6329 cmd->cmd_oxid = oxid;
3992 6330 cmd->cmd_rxid = (uint16_t)((((uint16_t)atio[8 + 18]) << 8) |
3993 6331 atio[8+19]);
3994 6332 cmd->cmd_rportid = rportid;
3995 6333 cmd->cmd_lportid = (((uint32_t)atio[8 + 1]) << 16) |
3996 6334 (((uint32_t)atio[8 + 2]) << 8) | atio[8 + 3];
3997 6335 cmd->cmd_rp_handle = FCT_HANDLE_NONE;
3998 6336 /* Dont do a 64 byte read as this is IOMMU */
3999 6337 q = atio+0x28;
4000 6338 /* XXX Handle fcp_cntl */
4001 6339 task->task_cmd_seq_no = (uint32_t)(*q++);
4002 6340 task->task_csn_size = 8;
4003 6341 task->task_flags = qlt_task_flags[(*q++) & 7];
4004 6342 tm = *q++;
4005 6343 if (tm) {
4006 6344 if (tm & BIT_1)
4007 6345 task->task_mgmt_function = TM_ABORT_TASK_SET;
4008 6346 else if (tm & BIT_2)
4009 6347 task->task_mgmt_function = TM_CLEAR_TASK_SET;
4010 6348 else if (tm & BIT_4)
4011 6349 task->task_mgmt_function = TM_LUN_RESET;
4012 6350 else if (tm & BIT_5)
4013 6351 task->task_mgmt_function = TM_TARGET_COLD_RESET;
4014 6352 else if (tm & BIT_6)
4015 6353 task->task_mgmt_function = TM_CLEAR_ACA;
4016 6354 else
4017 6355 task->task_mgmt_function = TM_ABORT_TASK;
4018 6356 }
4019 6357 task->task_max_nbufs = STMF_BUFS_MAX;
4020 6358 task->task_csn_size = 8;
4021 6359 task->task_flags = (uint8_t)(task->task_flags | (((*q++) & 3) << 5));
4022 6360 p = task->task_cdb;
4023 6361 *p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
4024 6362 *p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
4025 6363 *p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
4026 6364 *p++ = *q++; *p++ = *q++; *p++ = *q++; *p++ = *q++;
4027 6365 if (cdb_size > 16) {
4028 6366 uint16_t xtra = (uint16_t)(cdb_size - 16);
4029 6367 uint16_t i;
4030 6368 uint8_t cb[4];
4031 6369
4032 6370 while (xtra) {
4033 6371 *p++ = *q++;
4034 6372 xtra--;
4035 6373 if (q == ((uint8_t *)qlt->queue_mem_ptr +
4036 6374 ATIO_QUEUE_OFFSET + (ATIO_QUEUE_ENTRIES * 64))) {
4037 6375 q = (uint8_t *)qlt->queue_mem_ptr +
4038 6376 ATIO_QUEUE_OFFSET;
4039 6377 }
4040 6378 }
4041 6379 for (i = 0; i < 4; i++) {
4042 6380 cb[i] = *q++;
4043 6381 if (q == ((uint8_t *)qlt->queue_mem_ptr +
4044 6382 ATIO_QUEUE_OFFSET + (ATIO_QUEUE_ENTRIES * 64))) {
4045 6383 q = (uint8_t *)qlt->queue_mem_ptr +
4046 6384 ATIO_QUEUE_OFFSET;
|
↓ open down ↓ |
46 lines elided |
↑ open up ↑ |
4047 6385 }
4048 6386 }
4049 6387 task->task_expected_xfer_length = (((uint32_t)cb[0]) << 24) |
4050 6388 (((uint32_t)cb[1]) << 16) |
4051 6389 (((uint32_t)cb[2]) << 8) | cb[3];
4052 6390 } else {
4053 6391 task->task_expected_xfer_length = (((uint32_t)q[0]) << 24) |
4054 6392 (((uint32_t)q[1]) << 16) |
4055 6393 (((uint32_t)q[2]) << 8) | q[3];
4056 6394 }
6395 +
6396 + QMEM_WR32(qlt, atio+0x3c, 0xdeadbeef);
4057 6397 fct_post_rcvd_cmd(cmd, 0);
4058 6398 }
4059 6399
4060 6400 static void
4061 6401 qlt_handle_dereg_completion(qlt_state_t *qlt, uint8_t *rsp)
4062 6402 {
4063 6403 uint16_t status;
4064 6404 uint32_t portid;
4065 6405 uint32_t subcode1, subcode2;
4066 6406
4067 6407 status = QMEM_RD16(qlt, rsp+8);
4068 6408 portid = QMEM_RD32(qlt, rsp+0x10) & 0xffffff;
4069 6409 subcode1 = QMEM_RD32(qlt, rsp+0x14);
4070 6410 subcode2 = QMEM_RD32(qlt, rsp+0x18);
4071 6411
4072 - mutex_enter(&qlt->preq_lock);
6412 + if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
6413 + mutex_enter(&qlt->mq_req[0].mq_lock);
6414 + } else {
6415 + mutex_enter(&qlt->preq_lock);
6416 + }
4073 6417 if (portid != qlt->rp_id_in_dereg) {
4074 6418 int instance = ddi_get_instance(qlt->dip);
4075 6419
4076 6420 EL(qlt, "implicit logout reveived portid = %xh\n", portid);
4077 6421 cmn_err(CE_WARN, "qlt(%d): implicit logout completion for 0x%x"
4078 6422 " received when driver wasn't waiting for it",
4079 6423 instance, portid);
4080 - mutex_exit(&qlt->preq_lock);
6424 + if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
6425 + mutex_exit(&qlt->mq_req[0].mq_lock);
6426 + } else {
6427 + mutex_exit(&qlt->preq_lock);
6428 + }
4081 6429 return;
4082 6430 }
4083 6431
4084 6432 if (status != 0) {
4085 6433 EL(qlt, "implicit logout completed for %xh with status %xh, "
4086 6434 "subcode1 %xh subcode2 %xh\n", portid, status, subcode1,
4087 6435 subcode2);
4088 6436 if (status == 0x31 && subcode1 == 0x0a) {
4089 6437 qlt->rp_dereg_status = FCT_SUCCESS;
4090 6438 } else {
4091 6439 EL(qlt, "implicit logout portid=%xh, status=%xh, "
4092 6440 "subcode1=%xh, subcode2=%xh\n", portid, status,
4093 6441 subcode1, subcode2);
4094 6442 qlt->rp_dereg_status =
4095 6443 QLT_FIRMWARE_ERROR(status, subcode1, subcode2);
4096 6444 }
4097 6445 } else {
4098 6446 qlt->rp_dereg_status = FCT_SUCCESS;
4099 6447 }
4100 6448 cv_signal(&qlt->rp_dereg_cv);
4101 - mutex_exit(&qlt->preq_lock);
6449 + if ((qlt->qlt_83xx_chip) || (qlt->qlt_27xx_chip)) {
6450 + mutex_exit(&qlt->mq_req[0].mq_lock);
6451 + } else {
6452 + mutex_exit(&qlt->preq_lock);
6453 + }
4102 6454 }
4103 6455
4104 6456 /*
4105 6457 * Note that when an ELS is aborted, the regular or aborted completion
4106 6458 * (if any) gets posted before the abort IOCB comes back on response queue.
4107 6459 */
4108 6460 static void
4109 6461 qlt_handle_unsol_els_completion(qlt_state_t *qlt, uint8_t *rsp)
4110 6462 {
4111 - char info[QLT_INFO_LEN];
6463 + char info[160];
4112 6464 fct_cmd_t *cmd;
4113 6465 qlt_cmd_t *qcmd;
4114 6466 uint32_t hndl;
4115 6467 uint32_t subcode1, subcode2;
4116 6468 uint16_t status;
6469 + uint8_t elsop;
4117 6470
4118 6471 hndl = QMEM_RD32(qlt, rsp+4);
4119 6472 status = QMEM_RD16(qlt, rsp+8);
4120 6473 subcode1 = QMEM_RD32(qlt, rsp+0x24);
4121 6474 subcode2 = QMEM_RD32(qlt, rsp+0x28);
6475 + elsop = rsp[0x16];
4122 6476
4123 6477 if (!CMD_HANDLE_VALID(hndl)) {
4124 6478 EL(qlt, "handle = %xh\n", hndl);
4125 6479 /*
4126 6480 * This cannot happen for unsol els completion. This can
4127 6481 * only happen when abort for an unsol els completes.
4128 6482 * This condition indicates a firmware bug.
4129 6483 */
4130 - (void) snprintf(info, sizeof (info),
4131 - "qlt_handle_unsol_els_completion: "
6484 + (void) snprintf(info, 160, "qlt_handle_unsol_els_completion: "
4132 6485 "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
4133 6486 hndl, status, subcode1, subcode2, (void *)rsp);
6487 + info[159] = 0;
4134 6488 (void) fct_port_shutdown(qlt->qlt_port,
4135 6489 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4136 6490 STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4137 6491 return;
4138 6492 }
4139 6493
4140 6494 if (status == 5) {
4141 6495 /*
4142 6496 * When an unsolicited els is aborted, the abort is done
4143 6497 * by a ELSPT iocb with abort control. This is the aborted IOCB
4144 6498 * and not the abortee. We will do the cleanup when the
4145 6499 * IOCB which caused the abort, returns.
4146 6500 */
4147 6501 EL(qlt, "status = %xh\n", status);
|
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
4148 6502 stmf_trace(0, "--UNSOL ELS returned with status 5 --");
4149 6503 return;
4150 6504 }
4151 6505
4152 6506 cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4153 6507 if (cmd == NULL) {
4154 6508 EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4155 6509 /*
4156 6510 * Now why would this happen ???
4157 6511 */
4158 - (void) snprintf(info, sizeof (info),
6512 + (void) snprintf(info, 160,
4159 6513 "qlt_handle_unsol_els_completion: can not "
4160 6514 "get cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4161 6515 (void *)rsp);
6516 + info[159] = 0;
4162 6517 (void) fct_port_shutdown(qlt->qlt_port,
4163 6518 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4164 6519
4165 6520 return;
4166 6521 }
4167 6522
4168 6523 ASSERT(cmd->cmd_type == FCT_CMD_RCVD_ELS);
4169 6524 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4170 6525 if (qcmd->flags & QLT_CMD_ABORTING) {
4171 6526 /*
4172 6527 * This is the same case as "if (status == 5)" above. The
4173 6528 * only difference is that in this case the firmware actually
4174 6529 * finished sending the response. So the abort attempt will
4175 6530 * come back with status ?. We will handle it there.
4176 6531 */
4177 6532 stmf_trace(0, "--UNSOL ELS finished while we are trying to "
4178 6533 "abort it");
|
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
4179 6534 return;
4180 6535 }
4181 6536
4182 6537 if (qcmd->dbuf != NULL) {
4183 6538 qlt_dmem_free(NULL, qcmd->dbuf);
4184 6539 qcmd->dbuf = NULL;
4185 6540 }
4186 6541
4187 6542 if (status == 0) {
4188 6543 fct_send_response_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
6544 +
6545 + if ((elsop == ELS_OP_LOGO) &&
6546 + (qlt->cur_topology == PORT_TOPOLOGY_PT_TO_PT)) {
6547 + EL(qlt, "reset link since this is LOGO and N2N\n");
6548 + (void) snprintf(info, 80,
6549 + "qlt_handle_unsol_els_completion: qlt-%p, "
6550 + "trigger RFLAG_RESET to recover",
6551 + (void *)qlt);
6552 +
6553 + info[79] = 0;
6554 + (void) fct_port_shutdown(qlt->qlt_port,
6555 + STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET,
6556 + info);
6557 + }
4189 6558 } else {
6559 + EL(qlt, "status (0xh) sucode1=%xh subconde2=%xh\n",
6560 + status, subcode1, subcode2);
4190 6561 fct_send_response_done(cmd,
4191 6562 QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
4192 6563 }
4193 6564 }
4194 6565
4195 6566 static void
4196 6567 qlt_handle_unsol_els_abort_completion(qlt_state_t *qlt, uint8_t *rsp)
4197 6568 {
4198 - char info[QLT_INFO_LEN];
6569 + char info[160];
4199 6570 fct_cmd_t *cmd;
4200 6571 qlt_cmd_t *qcmd;
4201 6572 uint32_t hndl;
4202 6573 uint32_t subcode1, subcode2;
4203 6574 uint16_t status;
4204 6575
4205 6576 hndl = QMEM_RD32(qlt, rsp+4);
4206 6577 status = QMEM_RD16(qlt, rsp+8);
4207 6578 subcode1 = QMEM_RD32(qlt, rsp+0x24);
4208 6579 subcode2 = QMEM_RD32(qlt, rsp+0x28);
4209 6580
4210 6581 if (!CMD_HANDLE_VALID(hndl)) {
4211 6582 EL(qlt, "handle = %xh\n", hndl);
4212 6583 ASSERT(hndl == 0);
|
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
4213 6584 /*
4214 6585 * Someone has requested to abort it, but no one is waiting for
4215 6586 * this completion.
4216 6587 */
4217 6588 if ((status != 0) && (status != 8)) {
4218 6589 EL(qlt, "status = %xh\n", status);
4219 6590 /*
4220 6591 * There could be exchange resource leakage, so
4221 6592 * throw HBA fatal error event now
4222 6593 */
4223 - (void) snprintf(info, sizeof (info),
6594 + (void) snprintf(info, 160,
4224 6595 "qlt_handle_unsol_els_abort_completion: "
4225 6596 "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
4226 6597 hndl, status, subcode1, subcode2, (void *)rsp);
6598 + info[159] = 0;
4227 6599 (void) fct_port_shutdown(qlt->qlt_port,
4228 6600 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4229 6601 STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4230 6602 return;
4231 6603 }
4232 6604
4233 6605 return;
4234 6606 }
4235 6607
4236 6608 cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4237 6609 if (cmd == NULL) {
4238 6610 EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4239 6611 /*
4240 6612 * Why would this happen ??
4241 6613 */
4242 - (void) snprintf(info, sizeof (info),
6614 + (void) snprintf(info, 160,
4243 6615 "qlt_handle_unsol_els_abort_completion: can not get "
4244 6616 "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4245 6617 (void *)rsp);
6618 + info[159] = 0;
4246 6619 (void) fct_port_shutdown(qlt->qlt_port,
4247 6620 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4248 6621
4249 6622 return;
4250 6623 }
4251 6624
4252 6625 ASSERT(cmd->cmd_type == FCT_CMD_RCVD_ELS);
4253 6626 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4254 6627 ASSERT(qcmd->flags & QLT_CMD_ABORTING);
4255 6628
4256 6629 if (qcmd->dbuf != NULL) {
4257 6630 qlt_dmem_free(NULL, qcmd->dbuf);
4258 6631 qcmd->dbuf = NULL;
4259 6632 }
4260 6633
4261 6634 if (status == 0) {
4262 6635 fct_cmd_fca_aborted(cmd, FCT_ABORT_SUCCESS, FCT_IOF_FCA_DONE);
4263 6636 } else if (status == 8) {
|
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
4264 6637 fct_cmd_fca_aborted(cmd, FCT_NOT_FOUND, FCT_IOF_FCA_DONE);
4265 6638 } else {
4266 6639 fct_cmd_fca_aborted(cmd,
4267 6640 QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
4268 6641 }
4269 6642 }
4270 6643
4271 6644 static void
4272 6645 qlt_handle_sol_els_completion(qlt_state_t *qlt, uint8_t *rsp)
4273 6646 {
4274 - char info[QLT_INFO_LEN];
6647 + char info[160];
4275 6648 fct_cmd_t *cmd;
4276 6649 fct_els_t *els;
4277 6650 qlt_cmd_t *qcmd;
4278 6651 uint32_t hndl;
4279 6652 uint32_t subcode1, subcode2;
4280 6653 uint16_t status;
4281 6654
4282 6655 hndl = QMEM_RD32(qlt, rsp+4);
4283 6656 status = QMEM_RD16(qlt, rsp+8);
4284 6657 subcode1 = QMEM_RD32(qlt, rsp+0x24);
4285 6658 subcode2 = QMEM_RD32(qlt, rsp+0x28);
4286 6659
4287 6660 if (!CMD_HANDLE_VALID(hndl)) {
4288 6661 EL(qlt, "handle = %xh\n", hndl);
4289 6662 /*
4290 6663 * This cannot happen for sol els completion.
4291 6664 */
4292 - (void) snprintf(info, sizeof (info),
4293 - "qlt_handle_sol_els_completion: "
6665 + (void) snprintf(info, 160, "qlt_handle_sol_els_completion: "
4294 6666 "Invalid handle: hndl-%x, status-%x/%x/%x, rsp-%p",
4295 6667 hndl, status, subcode1, subcode2, (void *)rsp);
6668 + info[159] = 0;
4296 6669 (void) fct_port_shutdown(qlt->qlt_port,
4297 6670 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4298 6671 STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4299 6672 return;
4300 6673 }
4301 6674
4302 6675 cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4303 6676 if (cmd == NULL) {
4304 6677 EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4305 - (void) snprintf(info, sizeof (info),
6678 + (void) snprintf(info, 160,
4306 6679 "qlt_handle_sol_els_completion: can not "
4307 6680 "get cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4308 6681 (void *)rsp);
6682 + info[159] = 0;
4309 6683 (void) fct_port_shutdown(qlt->qlt_port,
4310 6684 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4311 6685
4312 6686 return;
4313 6687 }
4314 6688
4315 6689 ASSERT(cmd->cmd_type == FCT_CMD_SOL_ELS);
4316 6690 els = (fct_els_t *)cmd->cmd_specific;
4317 6691 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4318 6692 qcmd->fw_xchg_addr = QMEM_RD32(qlt, (&rsp[0x10]));
4319 6693
4320 6694 if (qcmd->flags & QLT_CMD_ABORTING) {
4321 6695 /*
4322 6696 * We will handle it when the ABORT IO IOCB returns.
4323 6697 */
4324 6698 return;
4325 6699 }
4326 6700
4327 6701 if (qcmd->dbuf != NULL) {
4328 6702 if (status == 0) {
4329 6703 qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORKERNEL);
4330 6704 bcopy(qcmd->dbuf->db_sglist[0].seg_addr +
4331 6705 qcmd->param.resp_offset,
4332 6706 els->els_resp_payload, els->els_resp_size);
4333 6707 }
4334 6708 qlt_dmem_free(NULL, qcmd->dbuf);
4335 6709 qcmd->dbuf = NULL;
4336 6710 }
4337 6711
4338 6712 if (status == 0) {
4339 6713 fct_send_cmd_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
4340 6714 } else {
4341 6715 fct_send_cmd_done(cmd,
4342 6716 QLT_FIRMWARE_ERROR(status, subcode1, subcode2), 0);
4343 6717 }
|
↓ open down ↓ |
25 lines elided |
↑ open up ↑ |
4344 6718 }
4345 6719
4346 6720 static void
4347 6721 qlt_handle_ct_completion(qlt_state_t *qlt, uint8_t *rsp)
4348 6722 {
4349 6723 fct_cmd_t *cmd;
4350 6724 fct_sol_ct_t *ct;
4351 6725 qlt_cmd_t *qcmd;
4352 6726 uint32_t hndl;
4353 6727 uint16_t status;
4354 - char info[QLT_INFO_LEN];
6728 + char info[160];
4355 6729
4356 6730 hndl = QMEM_RD32(qlt, rsp+4);
4357 6731 status = QMEM_RD16(qlt, rsp+8);
4358 6732
4359 6733 if (!CMD_HANDLE_VALID(hndl)) {
4360 6734 EL(qlt, "handle = %xh\n", hndl);
4361 6735 /*
4362 6736 * Solicited commands will always have a valid handle.
4363 6737 */
4364 - (void) snprintf(info, sizeof (info),
4365 - "qlt_handle_ct_completion: "
4366 - "hndl-%x, status-%x, rsp-%p", hndl, status, (void *)rsp);
6738 + (void) snprintf(info, 160, "qlt_handle_ct_completion: hndl-"
6739 + "%x, status-%x, rsp-%p", hndl, status, (void *)rsp);
6740 + info[159] = 0;
4367 6741 (void) fct_port_shutdown(qlt->qlt_port,
4368 6742 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4369 6743 STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4370 6744 return;
4371 6745 }
4372 6746
4373 6747 cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
6748 + EL(qlt, "cmd=%ph hndl=%xh status=%xh\n", cmd, hndl, status);
4374 6749 if (cmd == NULL) {
4375 6750 EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4376 - (void) snprintf(info, sizeof (info),
6751 + (void) snprintf(info, 160,
4377 6752 "qlt_handle_ct_completion: cannot find "
4378 6753 "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4379 6754 (void *)rsp);
6755 + info[159] = 0;
4380 6756 (void) fct_port_shutdown(qlt->qlt_port,
4381 6757 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4382 6758
4383 6759 return;
4384 6760 }
4385 6761
4386 6762 ct = (fct_sol_ct_t *)cmd->cmd_specific;
4387 6763 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4388 6764 ASSERT(cmd->cmd_type == FCT_CMD_SOL_CT);
4389 6765
4390 6766 if (qcmd->flags & QLT_CMD_ABORTING) {
4391 6767 /*
4392 6768 * We will handle it when ABORT IO IOCB returns;
4393 6769 */
4394 6770 return;
4395 6771 }
4396 6772
4397 6773 ASSERT(qcmd->dbuf);
4398 - if (status == 0) {
6774 + if ((status == 0) || (status == 0x15)) {
4399 6775 qlt_dmem_dma_sync(qcmd->dbuf, DDI_DMA_SYNC_FORKERNEL);
4400 6776 bcopy(qcmd->dbuf->db_sglist[0].seg_addr +
4401 6777 qcmd->param.resp_offset,
4402 6778 ct->ct_resp_payload, ct->ct_resp_size);
4403 6779 }
4404 6780 qlt_dmem_free(NULL, qcmd->dbuf);
4405 6781 qcmd->dbuf = NULL;
4406 6782
4407 - if (status == 0) {
6783 + if ((status == 0) || (status == 0x15)) {
4408 6784 fct_send_cmd_done(cmd, FCT_SUCCESS, FCT_IOF_FCA_DONE);
4409 6785 } else {
4410 6786 fct_send_cmd_done(cmd, QLT_FIRMWARE_ERROR(status, 0, 0), 0);
4411 6787 }
4412 6788 }
4413 6789
6790 +static fct_status_t
6791 +qlt_verify_resp_entry(qlt_state_t *qlt, uint8_t *rsp, uint16_t qi)
6792 +{
6793 + uint32_t sig;
6794 + int i;
6795 + char info[160];
6796 +
6797 + sig = QMEM_RD32_RSPQ(qlt, qi, rsp+0x3c);
6798 + for (i = 0; ((sig == 0xdeadbeef) &&
6799 + (i < qlt_reprocess_attempt_cnt)); i++) {
6800 + (void) ddi_dma_sync(
6801 + qlt->mq_resp[qi].queue_mem_mq_dma_handle,
6802 + (qlt->mq_resp[qi].mq_ndx_to_fw << 6),
6803 + IOCB_SIZE, DDI_DMA_SYNC_FORCPU);
6804 +
6805 + qlt->qlt_resp_reproc_cnt++;
6806 + drv_usecwait(qlt_reprocess_delay);
6807 + sig = QMEM_RD32_RSPQ(qlt, qi, rsp+0x3c);
6808 + }
6809 +
6810 + if (i) {
6811 + if (i >= qlt_reprocess_attempt_cnt) {
6812 + EL(qlt, "resp entry reprocess failed, %x\n",
6813 + qlt->qlt_resp_reproc_cnt);
6814 + cmn_err(CE_WARN, "qlt%d: resp entry reprocess"
6815 + " failed %x\n",
6816 + qlt->instance, qlt->qlt_resp_reproc_cnt);
6817 + (void) snprintf(info, 160,
6818 + "qlt_handle_ctio_completion: resp entry reprocess"
6819 + " failed, %x rsp-%p",
6820 + qlt->qlt_resp_reproc_cnt, (void *)rsp);
6821 + info[159] = 0;
6822 + (void) fct_port_shutdown(qlt->qlt_port,
6823 + STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET,
6824 + info);
6825 + return (QLT_FAILURE);
6826 + } else {
6827 + EL(qlt, "resp entry reprocess succeeded, %x %x\n",
6828 + i, qlt->qlt_resp_reproc_cnt);
6829 + }
6830 + }
6831 +
6832 + return (QLT_SUCCESS);
6833 +}
6834 +
4414 6835 static void
4415 -qlt_handle_ctio_completion(qlt_state_t *qlt, uint8_t *rsp)
6836 +qlt_handle_ctio_completion(qlt_state_t *qlt, uint8_t *rsp, uint16_t qi)
4416 6837 {
4417 6838 fct_cmd_t *cmd;
4418 6839 scsi_task_t *task;
4419 6840 qlt_cmd_t *qcmd;
4420 6841 stmf_data_buf_t *dbuf;
4421 6842 fct_status_t fc_st;
4422 6843 uint32_t iof = 0;
4423 6844 uint32_t hndl;
6845 + uint32_t rex1;
6846 + uint16_t oxid;
4424 6847 uint16_t status;
4425 6848 uint16_t flags;
4426 6849 uint8_t abort_req;
4427 6850 uint8_t n;
4428 - char info[QLT_INFO_LEN];
6851 + char info[160];
4429 6852
6853 + if (qlt_verify_resp_entry(qlt, rsp, qi) != QLT_SUCCESS)
6854 + return;
6855 +
6856 + /* write a deadbeef in the last 4 bytes of the IOCB */
6857 + QMEM_WR32_RSPQ(qlt, qi, rsp+0x3c, 0xdeadbeef);
6858 +
4430 6859 /* XXX: Check validity of the IOCB by checking 4th byte. */
4431 - hndl = QMEM_RD32(qlt, rsp+4);
4432 - status = QMEM_RD16(qlt, rsp+8);
4433 - flags = QMEM_RD16(qlt, rsp+0x1a);
6860 + hndl = QMEM_RD32_RSPQ(qlt, qi, rsp+4);
6861 + status = QMEM_RD16_RSPQ(qlt, qi, rsp+8);
6862 + flags = QMEM_RD16_RSPQ(qlt, qi, rsp+0x1a);
6863 + oxid = QMEM_RD16_RSPQ(qlt, qi, rsp+0x20);
6864 + rex1 = QMEM_RD32_RSPQ(qlt, qi, rsp+0x14);
4434 6865 n = rsp[2];
4435 6866
4436 6867 if (!CMD_HANDLE_VALID(hndl)) {
4437 6868 EL(qlt, "handle = %xh\n", hndl);
4438 6869 ASSERT(hndl == 0);
4439 6870 /*
4440 6871 * Someone has requested to abort it, but no one is waiting for
4441 6872 * this completion.
4442 6873 */
4443 6874 EL(qlt, "hndl-%xh, status-%xh, rsp-%p\n", hndl, status,
4444 6875 (void *)rsp);
4445 6876 if ((status != 1) && (status != 2)) {
4446 6877 EL(qlt, "status = %xh\n", status);
4447 - /*
4448 - * There could be exchange resource leakage, so
4449 - * throw HBA fatal error event now
4450 - */
4451 - (void) snprintf(info, sizeof (info),
4452 - "qlt_handle_ctio_completion: hndl-"
4453 - "%x, status-%x, rsp-%p", hndl, status, (void *)rsp);
4454 - (void) fct_port_shutdown(qlt->qlt_port,
4455 - STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
6878 + if (status == 0x29) {
6879 + uint8_t *req;
4456 6880
6881 + /*
6882 + * The qlt port received an ATIO request from
6883 + * remote port before it issued a plogi.
6884 + * The qlt fw returned the CTIO completion
6885 + * status 0x29 to inform driver to do cleanup
6886 + * (terminate the IO exchange). The subsequent
6887 + * ABTS from the initiator can be handled
6888 + * cleanly.
6889 + */
6890 + qi = 0;
6891 + mutex_enter(&qlt->mq_req[qi].mq_lock);
6892 + req = (uint8_t *)
6893 + qlt_get_req_entries(qlt, 1, qi);
6894 +
6895 + if (req == NULL) {
6896 + EL(qlt, "No reqq entry available to "
6897 + "termi exchg\n");
6898 + mutex_exit(&qlt->mq_req[qi].mq_lock);
6899 +
6900 + (void) snprintf(info, 160,
6901 + "qlt_handle_ctio_completion: no "
6902 + "reqq entry available, status-%x,"
6903 + "rsp-%p", status, (void *)rsp);
6904 +
6905 + info[159] = 0;
6906 +
6907 + (void) fct_port_shutdown(qlt->qlt_port,
6908 + STMF_RFLAG_FATAL_ERROR |
6909 + STMF_RFLAG_RESET,
6910 + info);
6911 +
6912 + return;
6913 + }
6914 +
6915 + flags &= 0x1E00;
6916 + flags |= BIT_14;
6917 +
6918 + bzero(req, IOCB_SIZE);
6919 + req[0] = 0x12;
6920 + req[1] = 0x1;
6921 +
6922 + QMEM_WR32(qlt, req+4, 0);
6923 + QMEM_WR16(qlt, req+8, 0xFFFF);
6924 + QMEM_WR16(qlt, req+10, 60);
6925 + QMEM_WR32(qlt, req+0x14, rex1);
6926 + QMEM_WR16(qlt, req+0x1A, flags);
6927 + QMEM_WR16(qlt, req+0x20, oxid);
6928 +
6929 + EL(qlt, "Termi exchg (%Xh)(%Xh)(%Xh) "
6930 + "rphdl=0xFFFF\n", rex1, flags, oxid);
6931 +
6932 + qlt_submit_req_entries(qlt, 1, qi);
6933 + mutex_exit(&qlt->mq_req[qi].mq_lock);
6934 + } else {
6935 + /*
6936 + * There could be exchange resource leakage,
6937 + * so throw HBA fatal error event now
6938 + */
6939 + (void) snprintf(info, 160,
6940 + "qlt_handle_ctio_completion: hndl-%x, "
6941 + "status-%x, rsp-%p", hndl, status,
6942 + (void *)rsp);
6943 +
6944 + info[159] = 0;
6945 +
6946 + (void) fct_port_shutdown(qlt->qlt_port,
6947 + STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET,
6948 + info);
6949 + }
4457 6950 }
4458 6951
4459 6952 return;
4460 6953 }
4461 6954
4462 6955 if (flags & BIT_14) {
4463 6956 abort_req = 1;
4464 6957 EL(qlt, "abort: hndl-%x, status-%x, rsp-%p\n", hndl, status,
4465 6958 (void *)rsp);
4466 6959 } else {
4467 6960 abort_req = 0;
4468 6961 }
4469 6962
4470 6963 cmd = fct_handle_to_cmd(qlt->qlt_port, hndl);
4471 6964 if (cmd == NULL) {
4472 6965 EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", hndl);
4473 - (void) snprintf(info, sizeof (info),
6966 + (void) snprintf(info, 160,
4474 6967 "qlt_handle_ctio_completion: cannot find "
4475 6968 "cmd, hndl-%x, status-%x, rsp-%p", hndl, status,
4476 6969 (void *)rsp);
6970 + info[159] = 0;
4477 6971 (void) fct_port_shutdown(qlt->qlt_port,
4478 - STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
6972 + /* STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info); */
6973 + STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
6974 + STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4479 6975
4480 6976 return;
4481 6977 }
4482 6978
4483 6979 task = (scsi_task_t *)cmd->cmd_specific;
4484 6980 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4485 6981 if (qcmd->dbuf_rsp_iu) {
4486 6982 ASSERT((flags & (BIT_6 | BIT_7)) == BIT_7);
4487 6983 qlt_dmem_free(NULL, qcmd->dbuf_rsp_iu);
4488 6984 qcmd->dbuf_rsp_iu = NULL;
4489 6985 }
4490 6986
4491 6987 if ((status == 1) || (status == 2)) {
4492 6988 if (abort_req) {
4493 6989 fc_st = FCT_ABORT_SUCCESS;
4494 6990 iof = FCT_IOF_FCA_DONE;
4495 6991 } else {
4496 6992 fc_st = FCT_SUCCESS;
4497 6993 if (flags & BIT_15) {
4498 6994 iof = FCT_IOF_FCA_DONE;
4499 6995 }
4500 6996 }
4501 6997 } else {
4502 6998 EL(qlt, "status = %xh\n", status);
4503 6999 if ((status == 8) && abort_req) {
4504 7000 fc_st = FCT_NOT_FOUND;
4505 7001 iof = FCT_IOF_FCA_DONE;
4506 7002 } else {
4507 7003 fc_st = QLT_FIRMWARE_ERROR(status, 0, 0);
4508 7004 }
4509 7005 }
4510 7006 dbuf = NULL;
4511 7007 if (((n & BIT_7) == 0) && (!abort_req)) {
4512 7008 /* A completion of data xfer */
4513 7009 if (n == 0) {
4514 7010 dbuf = qcmd->dbuf;
4515 7011 } else {
4516 7012 dbuf = stmf_handle_to_buf(task, n);
4517 7013 }
4518 7014
4519 7015 ASSERT(dbuf != NULL);
4520 7016 if (dbuf->db_flags & DB_DIRECTION_FROM_RPORT)
4521 7017 qlt_dmem_dma_sync(dbuf, DDI_DMA_SYNC_FORCPU);
4522 7018 if (flags & BIT_15) {
4523 7019 dbuf->db_flags = (uint16_t)(dbuf->db_flags |
4524 7020 DB_STATUS_GOOD_SENT);
4525 7021 }
4526 7022
4527 7023 dbuf->db_xfer_status = fc_st;
4528 7024 fct_scsi_data_xfer_done(cmd, dbuf, iof);
4529 7025 return;
|
↓ open down ↓ |
41 lines elided |
↑ open up ↑ |
4530 7026 }
4531 7027 if (!abort_req) {
4532 7028 /*
4533 7029 * This was just a pure status xfer.
4534 7030 */
4535 7031 fct_send_response_done(cmd, fc_st, iof);
4536 7032 return;
4537 7033 }
4538 7034
4539 7035 fct_cmd_fca_aborted(cmd, fc_st, iof);
7036 +
7037 + EL(qlt, "(%d) (%p)(%xh,%xh),%x %x %x\n",
7038 + qi, cmd, cmd->cmd_oxid, cmd->cmd_rxid,
7039 + cmd->cmd_handle, qcmd->fw_xchg_addr,
7040 + fc_st);
4540 7041 }
4541 7042
4542 7043 static void
4543 7044 qlt_handle_sol_abort_completion(qlt_state_t *qlt, uint8_t *rsp)
4544 7045 {
4545 - char info[QLT_INFO_LEN];
7046 + char info[80];
4546 7047 fct_cmd_t *cmd;
4547 7048 qlt_cmd_t *qcmd;
4548 7049 uint32_t h;
4549 7050 uint16_t status;
4550 7051
4551 7052 h = QMEM_RD32(qlt, rsp+4);
4552 7053 status = QMEM_RD16(qlt, rsp+8);
4553 7054
4554 7055 if (!CMD_HANDLE_VALID(h)) {
4555 7056 EL(qlt, "handle = %xh\n", h);
4556 7057 /*
4557 7058 * Solicited commands always have a valid handle.
4558 7059 */
4559 - (void) snprintf(info, sizeof (info),
7060 + (void) snprintf(info, 80,
4560 7061 "qlt_handle_sol_abort_completion: hndl-"
4561 7062 "%x, status-%x, rsp-%p", h, status, (void *)rsp);
7063 + info[79] = 0;
4562 7064 (void) fct_port_shutdown(qlt->qlt_port,
4563 7065 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET |
4564 7066 STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4565 7067 return;
4566 7068 }
4567 7069 cmd = fct_handle_to_cmd(qlt->qlt_port, h);
4568 7070 if (cmd == NULL) {
4569 7071 EL(qlt, "fct_handle_to_cmd cmd==NULL, hndl=%xh\n", h);
4570 7072 /*
4571 7073 * What happened to the cmd ??
4572 7074 */
4573 - (void) snprintf(info, sizeof (info),
7075 + (void) snprintf(info, 80,
4574 7076 "qlt_handle_sol_abort_completion: cannot "
4575 7077 "find cmd, hndl-%x, status-%x, rsp-%p", h, status,
4576 7078 (void *)rsp);
7079 + info[79] = 0;
4577 7080 (void) fct_port_shutdown(qlt->qlt_port,
4578 7081 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4579 7082
4580 7083 return;
4581 7084 }
4582 7085
4583 7086 ASSERT((cmd->cmd_type == FCT_CMD_SOL_ELS) ||
4584 7087 (cmd->cmd_type == FCT_CMD_SOL_CT));
4585 7088 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4586 7089 if (qcmd->dbuf != NULL) {
4587 7090 qlt_dmem_free(NULL, qcmd->dbuf);
4588 7091 qcmd->dbuf = NULL;
4589 7092 }
4590 7093 ASSERT(qcmd->flags & QLT_CMD_ABORTING);
7094 + EL(qlt, "status=%xh\n", status);
4591 7095 if (status == 0) {
4592 7096 fct_cmd_fca_aborted(cmd, FCT_ABORT_SUCCESS, FCT_IOF_FCA_DONE);
4593 7097 } else if (status == 0x31) {
4594 7098 fct_cmd_fca_aborted(cmd, FCT_NOT_FOUND, FCT_IOF_FCA_DONE);
4595 7099 } else {
4596 7100 fct_cmd_fca_aborted(cmd, QLT_FIRMWARE_ERROR(status, 0, 0), 0);
4597 7101 }
4598 7102 }
4599 7103
4600 7104 static void
4601 -qlt_handle_rcvd_abts(qlt_state_t *qlt, uint8_t *resp)
7105 +qlt_handle_rcvd_abts(qlt_state_t *qlt, uint8_t *resp, uint16_t qi)
4602 7106 {
4603 7107 qlt_abts_cmd_t *qcmd;
4604 7108 fct_cmd_t *cmd;
4605 7109 uint32_t remote_portid;
4606 - char info[QLT_INFO_LEN];
7110 + uint32_t rex1;
7111 + uint32_t rex2;
7112 + char info[160];
4607 7113
4608 7114 remote_portid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x18])))) |
4609 7115 ((uint32_t)(resp[0x1A])) << 16;
4610 7116 cmd = (fct_cmd_t *)fct_alloc(FCT_STRUCT_CMD_RCVD_ABTS,
4611 7117 sizeof (qlt_abts_cmd_t), 0);
4612 7118 if (cmd == NULL) {
4613 7119 EL(qlt, "fct_alloc cmd==NULL\n");
4614 - (void) snprintf(info, sizeof (info),
7120 + (void) snprintf(info, 160,
4615 7121 "qlt_handle_rcvd_abts: qlt-%p, can't "
4616 7122 "allocate space for fct_cmd", (void *)qlt);
7123 + info[159] = 0;
4617 7124 (void) fct_port_shutdown(qlt->qlt_port,
4618 7125 STMF_RFLAG_FATAL_ERROR | STMF_RFLAG_RESET, info);
4619 7126 return;
4620 7127 }
4621 7128
4622 7129 resp[0xC] = resp[0xD] = resp[0xE] = 0;
4623 7130 qcmd = (qlt_abts_cmd_t *)cmd->cmd_fca_private;
7131 + qcmd->qid = qi;
4624 7132 bcopy(resp, qcmd->buf, IOCB_SIZE);
4625 7133 cmd->cmd_port = qlt->qlt_port;
4626 7134 cmd->cmd_rp_handle = QMEM_RD16(qlt, resp+0xA);
4627 7135 if (cmd->cmd_rp_handle == 0xFFFF)
4628 7136 cmd->cmd_rp_handle = FCT_HANDLE_NONE;
4629 7137
4630 7138 cmd->cmd_rportid = remote_portid;
4631 7139 cmd->cmd_lportid = ((uint32_t)(QMEM_RD16(qlt, (&resp[0x14])))) |
4632 7140 ((uint32_t)(resp[0x16])) << 16;
4633 7141 cmd->cmd_oxid = QMEM_RD16(qlt, (&resp[0x26]));
4634 7142 cmd->cmd_rxid = QMEM_RD16(qlt, (&resp[0x24]));
7143 +
7144 + rex1 = QMEM_RD32(qlt, resp+0x10);
7145 + rex2 = QMEM_RD32(qlt, resp+0x3C);
7146 +
7147 + EL(qlt, "(%d)(%xh %xh) (%xh)(%p) (%xh %xh) (%x)\n",
7148 + qi, cmd->cmd_oxid, cmd->cmd_rxid, remote_portid,
7149 + cmd, rex1, rex2, cmd->cmd_handle);
7150 +
4635 7151 fct_post_rcvd_cmd(cmd, 0);
4636 7152 }
4637 7153
4638 7154 static void
4639 -qlt_handle_abts_completion(qlt_state_t *qlt, uint8_t *resp)
7155 +qlt_handle_abts_completion(qlt_state_t *qlt, uint8_t *resp, uint16_t qi)
4640 7156 {
4641 7157 uint16_t status;
4642 - char info[QLT_INFO_LEN];
7158 + char info[80];
4643 7159
4644 7160 status = QMEM_RD16(qlt, resp+8);
4645 7161
4646 7162 if ((status == 0) || (status == 5)) {
7163 + EL(qlt, "qi(%d) status =%xh,(%xh %xh)\n",
7164 + qi, status, QMEM_RD16(qlt, resp+0x26),
7165 + QMEM_RD16(qlt, resp+0x24));
4647 7166 return;
4648 7167 }
4649 - EL(qlt, "status = %xh\n", status);
4650 - (void) snprintf(info, sizeof (info),
4651 - "ABTS completion failed %x/%x/%x resp_off %x",
7168 +
7169 + EL(qlt, "ABTS status=%x/%x/%x resp_off %x",
7170 + status, QMEM_RD32(qlt, resp+0x34),
7171 + QMEM_RD32(qlt, resp+0x38),
7172 + ((uint32_t)(qlt->mq_resp[0].mq_ndx_to_fw)) << 6);
7173 +
7174 + (void) snprintf(info, 80, "ABTS completion failed %x/%x/%x resp_off %x",
4652 7175 status, QMEM_RD32(qlt, resp+0x34), QMEM_RD32(qlt, resp+0x38),
4653 - ((uint32_t)(qlt->resp_ndx_to_fw)) << 6);
7176 + ((uint32_t)(qlt->mq_resp[0].mq_ndx_to_fw)) << 6);
7177 + info[79] = 0;
4654 7178 (void) fct_port_shutdown(qlt->qlt_port, STMF_RFLAG_FATAL_ERROR |
4655 7179 STMF_RFLAG_RESET | STMF_RFLAG_COLLECT_DEBUG_DUMP, info);
4656 7180 }
4657 7181
4658 7182 #ifdef DEBUG
4659 7183 uint32_t qlt_drop_abort_counter = 0;
4660 7184 #endif
4661 7185
4662 7186 fct_status_t
4663 7187 qlt_abort_cmd(struct fct_local_port *port, fct_cmd_t *cmd, uint32_t flags)
4664 7188 {
4665 7189 qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
4666 7190
4667 7191 if ((qlt->qlt_state == FCT_STATE_OFFLINE) ||
4668 7192 (qlt->qlt_state == FCT_STATE_OFFLINING)) {
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
4669 7193 return (FCT_NOT_FOUND);
4670 7194 }
4671 7195
4672 7196 #ifdef DEBUG
4673 7197 if (qlt_drop_abort_counter > 0) {
4674 7198 if (atomic_dec_32_nv(&qlt_drop_abort_counter) == 1)
4675 7199 return (FCT_SUCCESS);
4676 7200 }
4677 7201 #endif
4678 7202
7203 + EL(qlt, "cmd_type = %x\n", cmd->cmd_type);
4679 7204 if (cmd->cmd_type == FCT_CMD_FCP_XCHG) {
4680 7205 return (qlt_abort_unsol_scsi_cmd(qlt, cmd));
4681 7206 }
4682 7207
4683 7208 if (flags & FCT_IOF_FORCE_FCA_DONE) {
4684 7209 cmd->cmd_handle = 0;
4685 7210 }
4686 7211
4687 7212 if (cmd->cmd_type == FCT_CMD_RCVD_ABTS) {
7213 + /* this is retried ABTS, terminate it now */
4688 7214 return (qlt_send_abts_response(qlt, cmd, 1));
4689 7215 }
4690 7216
4691 7217 if (cmd->cmd_type == FCT_CMD_RCVD_ELS) {
4692 7218 return (qlt_abort_purex(qlt, cmd));
4693 7219 }
4694 7220
4695 7221 if ((cmd->cmd_type == FCT_CMD_SOL_ELS) ||
4696 7222 (cmd->cmd_type == FCT_CMD_SOL_CT)) {
4697 7223 return (qlt_abort_sol_cmd(qlt, cmd));
4698 7224 }
4699 - EL(qlt, "cmd->cmd_type = %xh\n", cmd->cmd_type);
7225 + EL(qlt, "cmd->cmd_type = %x\n", cmd->cmd_type);
4700 7226
4701 7227 ASSERT(0);
4702 7228 return (FCT_FAILURE);
4703 7229 }
4704 7230
4705 7231 fct_status_t
4706 7232 qlt_abort_sol_cmd(qlt_state_t *qlt, fct_cmd_t *cmd)
4707 7233 {
4708 7234 uint8_t *req;
4709 7235 qlt_cmd_t *qcmd;
7236 + uint16_t qi;
4710 7237
4711 7238 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4712 7239 qcmd->flags = (uint16_t)(qcmd->flags | QLT_CMD_ABORTING);
4713 - EL(qlt, "fctcmd-%p, cmd_handle-%xh\n", cmd, cmd->cmd_handle);
7240 + qi = qcmd->qid;
4714 7241
4715 - mutex_enter(&qlt->req_lock);
4716 - req = (uint8_t *)qlt_get_req_entries(qlt, 1);
7242 + EL(qlt, "fctcmd-%p, cmd_handle-%xh rportid=%xh\n",
7243 + cmd, cmd->cmd_handle, cmd->cmd_rportid);
7244 +
7245 + mutex_enter(&qlt->mq_req[qi].mq_lock);
7246 + req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
4717 7247 if (req == NULL) {
4718 - mutex_exit(&qlt->req_lock);
7248 + EL(qlt, "req == NULL\n");
7249 + mutex_exit(&qlt->mq_req[qi].mq_lock);
4719 7250
4720 7251 return (FCT_BUSY);
4721 7252 }
4722 7253 bzero(req, IOCB_SIZE);
4723 7254 req[0] = 0x33; req[1] = 1;
4724 7255 QMEM_WR32(qlt, req+4, cmd->cmd_handle);
4725 7256 if (cmd->cmd_rp) {
4726 7257 QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
4727 7258 } else {
4728 7259 QMEM_WR16(qlt, req+8, 0xFFFF);
4729 7260 }
4730 7261
4731 7262 QMEM_WR32(qlt, req+0xc, cmd->cmd_handle);
4732 7263 QMEM_WR32(qlt, req+0x30, cmd->cmd_rportid);
4733 - qlt_submit_req_entries(qlt, 1);
4734 - mutex_exit(&qlt->req_lock);
7264 + qlt_submit_req_entries(qlt, 1, qi);
7265 + mutex_exit(&qlt->mq_req[qi].mq_lock);
4735 7266
4736 7267 return (FCT_SUCCESS);
4737 7268 }
4738 7269
4739 7270 fct_status_t
4740 7271 qlt_abort_purex(qlt_state_t *qlt, fct_cmd_t *cmd)
4741 7272 {
4742 7273 uint8_t *req;
4743 7274 qlt_cmd_t *qcmd;
4744 7275 fct_els_t *els;
4745 7276 uint8_t elsop, req1f;
7277 + uint16_t qi;
4746 7278
4747 7279 els = (fct_els_t *)cmd->cmd_specific;
4748 7280 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
7281 + qi = qcmd->qid;
4749 7282 elsop = els->els_req_payload[0];
4750 - EL(qlt, "fctcmd-%p, cmd_handle-%xh, elsop-%xh\n", cmd, cmd->cmd_handle,
4751 - elsop);
7283 + EL(qlt, "fctcmd-%p, cmd_handle-%xh, elsop-%xh\n", cmd,
7284 + cmd->cmd_handle, elsop);
4752 7285 req1f = 0x60; /* Terminate xchg */
4753 7286 if ((elsop == ELS_OP_PRLI) || (elsop == ELS_OP_PRLO) ||
4754 7287 (elsop == ELS_OP_TPRLO) || (elsop == ELS_OP_LOGO)) {
4755 7288 req1f = (uint8_t)(req1f | BIT_4);
4756 7289 }
4757 7290
4758 - mutex_enter(&qlt->req_lock);
4759 - req = (uint8_t *)qlt_get_req_entries(qlt, 1);
7291 + mutex_enter(&qlt->mq_req[qi].mq_lock);
7292 + req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
4760 7293 if (req == NULL) {
4761 - mutex_exit(&qlt->req_lock);
4762 -
7294 + EL(qlt, "req == NULL\n");
7295 + mutex_exit(&qlt->mq_req[qi].mq_lock);
4763 7296 return (FCT_BUSY);
4764 7297 }
4765 7298
4766 7299 qcmd->flags = (uint16_t)(qcmd->flags | QLT_CMD_ABORTING);
4767 7300 bzero(req, IOCB_SIZE);
4768 7301 req[0] = 0x53; req[1] = 1; req[0xf] = 0x10;
4769 7302 req[0x16] = elsop; req[0x1f] = req1f;
4770 7303 QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4771 7304 if (cmd->cmd_rp) {
4772 7305 QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4773 7306 EL(qlt, "rp_handle-%x\n", cmd->cmd_rp->rp_handle);
4774 7307 } else {
4775 7308 QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp_handle);
4776 7309 EL(qlt, "cmd_rp_handle-%x\n", cmd->cmd_rp_handle);
4777 7310 }
4778 7311
4779 7312 QMEM_WR32(qlt, (&req[0x10]), qcmd->fw_xchg_addr);
4780 7313 QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rportid);
4781 - qlt_submit_req_entries(qlt, 1);
4782 - mutex_exit(&qlt->req_lock);
7314 + qlt_submit_req_entries(qlt, 1, qi);
7315 + mutex_exit(&qlt->mq_req[qi].mq_lock);
4783 7316
4784 7317 return (FCT_SUCCESS);
4785 7318 }
4786 7319
4787 7320 fct_status_t
4788 7321 qlt_abort_unsol_scsi_cmd(qlt_state_t *qlt, fct_cmd_t *cmd)
4789 7322 {
4790 7323 qlt_cmd_t *qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4791 7324 uint8_t *req;
4792 7325 uint16_t flags;
7326 + uint16_t qi;
4793 7327
7328 + qi = qcmd->qid;
7329 +
4794 7330 flags = (uint16_t)(BIT_14 |
4795 7331 (((uint16_t)qcmd->param.atio_byte3 & 0xf0) << 5));
4796 - EL(qlt, "fctcmd-%p, cmd_handle-%x\n", cmd, cmd->cmd_handle);
4797 7332
4798 - mutex_enter(&qlt->req_lock);
4799 - req = (uint8_t *)qlt_get_req_entries(qlt, 1);
4800 - if (req == NULL) {
4801 - mutex_exit(&qlt->req_lock);
7333 + EL(qlt, "(%d) (%x) (%p) (%x)\n", qi, cmd->cmd_oxid,
7334 + cmd, qcmd->fw_xchg_addr);
4802 7335
7336 + mutex_enter(&qlt->mq_req[qi].mq_lock);
7337 + req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
7338 + if (req == NULL) {
7339 + EL(qlt, "req == NULL\n");
7340 + mutex_exit(&qlt->mq_req[qi].mq_lock);
4803 7341 return (FCT_BUSY);
4804 7342 }
4805 7343
4806 7344 qcmd->flags = (uint16_t)(qcmd->flags | QLT_CMD_ABORTING);
4807 7345 bzero(req, IOCB_SIZE);
4808 7346 req[0] = 0x12; req[1] = 0x1;
4809 - QMEM_WR32(qlt, req+4, cmd->cmd_handle);
4810 - QMEM_WR16(qlt, req+8, cmd->cmd_rp->rp_handle);
4811 - QMEM_WR16(qlt, req+10, 60); /* 60 seconds timeout */
4812 - QMEM_WR32(qlt, req+0x10, cmd->cmd_rportid);
4813 - QMEM_WR32(qlt, req+0x14, qcmd->fw_xchg_addr);
4814 - QMEM_WR16(qlt, req+0x1A, flags);
4815 - QMEM_WR16(qlt, req+0x20, cmd->cmd_oxid);
4816 - qlt_submit_req_entries(qlt, 1);
4817 - mutex_exit(&qlt->req_lock);
7347 + QMEM_WR32_REQ(qlt, qi, req+4, cmd->cmd_handle);
7348 + QMEM_WR16_REQ(qlt, qi, req+8, cmd->cmd_rp->rp_handle);
7349 + QMEM_WR16_REQ(qlt, qi, req+10, 60); /* 60 seconds timeout */
7350 + QMEM_WR32_REQ(qlt, qi, req+0x10, cmd->cmd_rportid);
7351 + QMEM_WR32_REQ(qlt, qi, req+0x14, qcmd->fw_xchg_addr);
7352 + QMEM_WR16_REQ(qlt, qi, req+0x1A, flags);
7353 + QMEM_WR16_REQ(qlt, qi, req+0x20, cmd->cmd_oxid);
7354 + qlt_submit_req_entries(qlt, 1, qi);
7355 + mutex_exit(&qlt->mq_req[qi].mq_lock);
4818 7356
4819 7357 return (FCT_SUCCESS);
4820 7358 }
4821 7359
4822 7360 fct_status_t
4823 7361 qlt_send_cmd(fct_cmd_t *cmd)
4824 7362 {
4825 7363 qlt_state_t *qlt;
4826 7364
4827 7365 qlt = (qlt_state_t *)cmd->cmd_port->port_fca_private;
7366 + EL(qlt, "cmd->cmd_type = %xh\n", cmd->cmd_type);
4828 7367 if (cmd->cmd_type == FCT_CMD_SOL_ELS) {
4829 7368 return (qlt_send_els(qlt, cmd));
4830 7369 } else if (cmd->cmd_type == FCT_CMD_SOL_CT) {
4831 7370 return (qlt_send_ct(qlt, cmd));
4832 7371 }
4833 - EL(qlt, "cmd->cmd_type = %xh\n", cmd->cmd_type);
7372 + EL(qlt, "Unknown cmd->cmd_type = %xh\n", cmd->cmd_type);
4834 7373
4835 7374 ASSERT(0);
4836 7375 return (FCT_FAILURE);
4837 7376 }
4838 7377
4839 7378 fct_status_t
4840 7379 qlt_send_els(qlt_state_t *qlt, fct_cmd_t *cmd)
4841 7380 {
4842 7381 uint8_t *req;
4843 7382 fct_els_t *els;
4844 7383 qlt_cmd_t *qcmd;
4845 7384 stmf_data_buf_t *buf;
4846 7385 qlt_dmem_bctl_t *bctl;
4847 7386 uint32_t sz, minsz;
7387 + uint16_t qi;
4848 7388
7389 + qi = 0;
7390 +
4849 7391 els = (fct_els_t *)cmd->cmd_specific;
4850 7392 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4851 7393 qcmd->flags = QLT_CMD_TYPE_SOLICITED;
4852 7394 qcmd->param.resp_offset = (uint16_t)((els->els_req_size + 7) & ~7);
4853 7395 sz = minsz = qcmd->param.resp_offset + els->els_resp_size;
4854 7396 buf = qlt_i_dmem_alloc(qlt, sz, &minsz, 0);
4855 7397 if (buf == NULL) {
4856 7398 return (FCT_BUSY);
4857 7399 }
4858 7400 bctl = (qlt_dmem_bctl_t *)buf->db_port_private;
4859 7401
4860 7402 qcmd->dbuf = buf;
4861 7403 bcopy(els->els_req_payload, buf->db_sglist[0].seg_addr,
4862 7404 els->els_req_size);
4863 7405 qlt_dmem_dma_sync(buf, DDI_DMA_SYNC_FORDEV);
4864 7406
4865 - mutex_enter(&qlt->req_lock);
4866 - req = (uint8_t *)qlt_get_req_entries(qlt, 1);
7407 + mutex_enter(&qlt->mq_req[qi].mq_lock);
7408 + req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
4867 7409 if (req == NULL) {
7410 + EL(qlt, "req = NULL, %xh %xh %p %xh\n", cmd->cmd_oxid,
7411 + cmd->cmd_rportid, cmd, qcmd->fw_xchg_addr);
4868 7412 qlt_dmem_free(NULL, buf);
4869 - mutex_exit(&qlt->req_lock);
7413 + mutex_exit(&qlt->mq_req[qi].mq_lock);
4870 7414 return (FCT_BUSY);
4871 7415 }
4872 7416 bzero(req, IOCB_SIZE);
4873 7417 req[0] = 0x53; req[1] = 1;
4874 7418 QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4875 7419 QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4876 7420 QMEM_WR16(qlt, (&req[0xC]), 1);
4877 7421 QMEM_WR16(qlt, (&req[0xE]), 0x1000);
4878 7422 QMEM_WR16(qlt, (&req[0x14]), 1);
4879 7423 req[0x16] = els->els_req_payload[0];
4880 7424 if (qlt->cur_topology == PORT_TOPOLOGY_PT_TO_PT) {
4881 7425 req[0x1b] = (uint8_t)((cmd->cmd_lportid >> 16) & 0xff);
4882 7426 req[0x1c] = (uint8_t)(cmd->cmd_lportid & 0xff);
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
4883 7427 req[0x1d] = (uint8_t)((cmd->cmd_lportid >> 8) & 0xff);
4884 7428 }
4885 7429 QMEM_WR32(qlt, (&req[0x18]), cmd->cmd_rp->rp_id);
4886 7430 QMEM_WR32(qlt, (&req[0x20]), els->els_resp_size);
4887 7431 QMEM_WR32(qlt, (&req[0x24]), els->els_req_size);
4888 7432 QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr);
4889 7433 QMEM_WR32(qlt, (&req[0x30]), els->els_req_size);
4890 7434 QMEM_WR64(qlt, (&req[0x34]), (bctl->bctl_dev_addr +
4891 7435 qcmd->param.resp_offset));
4892 7436 QMEM_WR32(qlt, (&req[0x3C]), els->els_resp_size);
4893 - qlt_submit_req_entries(qlt, 1);
4894 - mutex_exit(&qlt->req_lock);
4895 7437
7438 + EL(qlt, "ELS opcode %xh to %xh\n",
7439 + req[0x16], cmd->cmd_rp->rp_id);
7440 +
7441 + qlt_submit_req_entries(qlt, 1, qi);
7442 + mutex_exit(&qlt->mq_req[qi].mq_lock);
7443 +
4896 7444 return (FCT_SUCCESS);
4897 7445 }
4898 7446
4899 7447 fct_status_t
4900 7448 qlt_send_ct(qlt_state_t *qlt, fct_cmd_t *cmd)
4901 7449 {
4902 7450 uint8_t *req;
4903 7451 fct_sol_ct_t *ct;
4904 7452 qlt_cmd_t *qcmd;
4905 7453 stmf_data_buf_t *buf;
4906 7454 qlt_dmem_bctl_t *bctl;
4907 7455 uint32_t sz, minsz;
7456 + uint16_t qi;
4908 7457
7458 + qi = 0;
7459 +
4909 7460 ct = (fct_sol_ct_t *)cmd->cmd_specific;
4910 7461 qcmd = (qlt_cmd_t *)cmd->cmd_fca_private;
4911 7462 qcmd->flags = QLT_CMD_TYPE_SOLICITED;
4912 7463 qcmd->param.resp_offset = (uint16_t)((ct->ct_req_size + 7) & ~7);
4913 7464 sz = minsz = qcmd->param.resp_offset + ct->ct_resp_size;
4914 7465 buf = qlt_i_dmem_alloc(qlt, sz, &minsz, 0);
4915 7466 if (buf == NULL) {
4916 7467 return (FCT_BUSY);
4917 7468 }
4918 7469 bctl = (qlt_dmem_bctl_t *)buf->db_port_private;
4919 7470
4920 7471 qcmd->dbuf = buf;
4921 7472 bcopy(ct->ct_req_payload, buf->db_sglist[0].seg_addr,
4922 7473 ct->ct_req_size);
4923 7474 qlt_dmem_dma_sync(buf, DDI_DMA_SYNC_FORDEV);
4924 7475
4925 - mutex_enter(&qlt->req_lock);
4926 - req = (uint8_t *)qlt_get_req_entries(qlt, 1);
7476 + mutex_enter(&qlt->mq_req[qi].mq_lock);
7477 + req = (uint8_t *)qlt_get_req_entries(qlt, 1, qi);
4927 7478 if (req == NULL) {
7479 + EL(qlt, "req = NULL, %xh %xh %p %xh\n", cmd->cmd_oxid,
7480 + cmd->cmd_rportid, cmd, qcmd->fw_xchg_addr);
4928 7481 qlt_dmem_free(NULL, buf);
4929 - mutex_exit(&qlt->req_lock);
7482 + mutex_exit(&qlt->mq_req[qi].mq_lock);
4930 7483 return (FCT_BUSY);
4931 7484 }
4932 7485 bzero(req, IOCB_SIZE);
4933 7486 req[0] = 0x29; req[1] = 1;
4934 7487 QMEM_WR32(qlt, (&req[4]), cmd->cmd_handle);
4935 7488 QMEM_WR16(qlt, (&req[0xA]), cmd->cmd_rp->rp_handle);
4936 7489 QMEM_WR16(qlt, (&req[0xC]), 1);
4937 7490 QMEM_WR16(qlt, (&req[0x10]), 0x20); /* > (2 * RA_TOV) */
4938 7491 QMEM_WR16(qlt, (&req[0x14]), 1);
4939 7492
4940 7493 QMEM_WR32(qlt, (&req[0x20]), ct->ct_resp_size);
4941 7494 QMEM_WR32(qlt, (&req[0x24]), ct->ct_req_size);
4942 7495
4943 7496 QMEM_WR64(qlt, (&req[0x28]), bctl->bctl_dev_addr); /* COMMAND DSD */
4944 7497 QMEM_WR32(qlt, (&req[0x30]), ct->ct_req_size);
4945 7498 QMEM_WR64(qlt, (&req[0x34]), (bctl->bctl_dev_addr +
4946 7499 qcmd->param.resp_offset)); /* RESPONSE DSD */
4947 7500 QMEM_WR32(qlt, (&req[0x3C]), ct->ct_resp_size);
4948 7501
4949 - qlt_submit_req_entries(qlt, 1);
4950 - mutex_exit(&qlt->req_lock);
7502 + EL(qlt, "%p cmd_hdl=%xh %xh %xh\n",
7503 + cmd, cmd->cmd_handle, ct->ct_req_size, ct->ct_resp_size);
4951 7504
7505 + qlt_submit_req_entries(qlt, 1, qi);
7506 + mutex_exit(&qlt->mq_req[qi].mq_lock);
7507 +
4952 7508 return (FCT_SUCCESS);
4953 7509 }
4954 7510
7511 +/*ARGSUSED*/
7512 +caddr_t
7513 +qlt_str_ptr(qlt_state_t *qlt, caddr_t bp, uint32_t *len)
7514 +{
7515 + caddr_t sp;
7516 + uint32_t i = 0;
4955 7517
7518 + sp = bp;
7519 + while (*sp++ != 0) i++;
7520 + if (i > *len || !(*len -= i)) {
7521 + EL(qlt, "full buffer\n");
7522 + return (NULL);
7523 + }
7524 + return (bp += i);
7525 +}
7526 +
7527 +static fct_status_t
7528 +qlt_27xx_fw_dump(fct_local_port_t *port, stmf_state_change_info_t *ssci)
7529 +{
7530 + qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
7531 + qlt_dmp_template_t *template_buff;
7532 + uint32_t tsize, dsize, len;
7533 + uint32_t cnt, *dp, *bp;
7534 + uint8_t *fw;
7535 + caddr_t sp;
7536 +
7537 + EL(qlt, "enter...\n");
7538 +
7539 + mutex_enter(&qlt->qlt_ioctl_lock);
7540 + /*
7541 + * To make sure that there's no outstanding dumping task
7542 + */
7543 + if (qlt->qlt_ioctl_flags & QLT_FWDUMP_INPROGRESS) {
7544 + mutex_exit(&qlt->qlt_ioctl_lock);
7545 + EL(qlt, "qlt_ioctl_flags=%xh, inprogress\n",
7546 + qlt->qlt_ioctl_flags);
7547 + return (FCT_FAILURE);
7548 + }
7549 +
7550 + /*
7551 + * To make sure not to overwrite existing dump
7552 + */
7553 + if ((qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID) &&
7554 + !(qlt->qlt_ioctl_flags & QLT_FWDUMP_TRIGGERED_BY_USER) &&
7555 + !(qlt->qlt_ioctl_flags & QLT_FWDUMP_FETCHED_BY_USER)) {
7556 + /*
7557 + * If we have already one dump, but it's not triggered by user
7558 + * and the user hasn't fetched it, we shouldn't dump again.
7559 + * But if qlt force a fw dump, then we need to overwrite the
7560 + * previous one anyway.
7561 + */
7562 + mutex_exit(&qlt->qlt_ioctl_lock);
7563 + EL(qlt, "qlt_ioctl_flags=%xh, already done\n",
7564 + qlt->qlt_ioctl_flags);
7565 + cmn_err(CE_NOTE, "qlt(%d): Skipping firmware dump as there "
7566 + "is one already outstanding.", qlt->instance);
7567 + return (FCT_FAILURE);
7568 + }
7569 +
7570 + if (qlt->dmp_template_addr == NULL) {
7571 + mutex_exit(&qlt->qlt_ioctl_lock);
7572 + EL(qlt, "dmp_template_addr is NULL, can't "
7573 + "perform firmware dump\n");
7574 + cmn_err(CE_WARN, "!qlt(%d) dmp_template_addr is NULL, can't "
7575 + "perform firmware dump", qlt->instance);
7576 + return (FCT_FAILURE);
7577 + }
7578 +
7579 + qlt->qlt_ioctl_flags |= QLT_FWDUMP_INPROGRESS;
7580 + if (ssci != NULL && (ssci->st_rflags & STMF_RFLAG_USER_REQUEST)) {
7581 + qlt->qlt_ioctl_flags |= QLT_FWDUMP_TRIGGERED_BY_USER;
7582 + } else {
7583 + qlt->qlt_ioctl_flags &= ~QLT_FWDUMP_TRIGGERED_BY_USER;
7584 + }
7585 + mutex_exit(&qlt->qlt_ioctl_lock);
7586 +
7587 + template_buff = (qlt_dmp_template_t *)qlt->dmp_template_addr;
7588 + tsize = template_buff->hdr.size_of_template;
7589 +
7590 + if (qlt->fw_bin_dump_size == 0) {
7591 + qlt->fw_bin_dump_buf = kmem_zalloc(tsize, KM_NOSLEEP);
7592 + if (qlt->fw_bin_dump_buf == NULL) {
7593 + cmn_err(CE_WARN, "!qlt(%d) cannot alloc bin dump buf",
7594 + qlt->instance);
7595 + return (FCT_FAILURE);
7596 + }
7597 + cnt = (uint32_t)(tsize / sizeof (uint32_t));
7598 + dp = (uint32_t *)qlt->fw_bin_dump_buf;
7599 + bp = (uint32_t *)&template_buff->hdr;
7600 + while (cnt--) {
7601 + *dp++ = ddi_get32(qlt->dmp_template_acc_handle, bp++);
7602 + }
7603 + qlt->fw_bin_dump_size = qlt_27xx_dmp_parse_template(qlt,
7604 + (qlt_dt_hdr_t *)qlt->fw_bin_dump_buf, NULL, 0);
7605 + kmem_free(qlt->fw_bin_dump_buf, tsize);
7606 + qlt->fw_bin_dump_buf = NULL;
7607 +
7608 + if (qlt->fw_bin_dump_size == 0) {
7609 + return (FCT_FAILURE);
7610 + }
7611 +
7612 + /*
7613 + * Determine ascii dump file size
7614 + * 2 ascii bytes per binary byte + a space and
7615 + * a newline every 16 binary bytes
7616 + */
7617 + qlt->fw_ascii_dump_size = qlt->fw_bin_dump_size << 1;
7618 + qlt->fw_ascii_dump_size += qlt->fw_bin_dump_size;
7619 + qlt->fw_ascii_dump_size += qlt->fw_bin_dump_size / 16 + 1;
7620 +
7621 + EL(qlt, "fw_bin_dump_size=%xh, "
7622 + "fw_acsii_dump_size=%xh\n", qlt->fw_bin_dump_size,
7623 + qlt->fw_ascii_dump_size);
7624 + }
7625 +
7626 + if (qlt->fw_bin_dump_buf != NULL) {
7627 + /* overwrite the previous fw dump by qlt forced fw dump */
7628 + bzero((void *) qlt->fw_bin_dump_buf, qlt->fw_bin_dump_size);
7629 + } else {
7630 + qlt->fw_bin_dump_buf = kmem_zalloc(qlt->fw_bin_dump_size,
7631 + KM_NOSLEEP);
7632 + if (qlt->fw_bin_dump_buf == NULL) {
7633 + qlt->fw_bin_dump_size = 0;
7634 + EL(qlt, "done, failed alloc bin dump buf\n");
7635 + return (FCT_FAILURE);
7636 + }
7637 + }
7638 +
7639 + if ((qlt->fw_dump_size != 0) &&
7640 + (qlt->fw_dump_size != qlt->fw_ascii_dump_size)) {
7641 + if (qlt->qlt_fwdump_buf != NULL) {
7642 + /* Release previously allocated buffer */
7643 + kmem_free(qlt->qlt_fwdump_buf, qlt->fw_dump_size);
7644 + qlt->qlt_fwdump_buf = NULL;
7645 + }
7646 + }
7647 +
7648 + if (qlt->qlt_fwdump_buf == NULL) {
7649 + qlt->qlt_fwdump_buf = kmem_zalloc(qlt->fw_ascii_dump_size,
7650 + KM_NOSLEEP);
7651 + if (qlt->qlt_fwdump_buf == NULL) {
7652 + EL(qlt, "done, failed alloc ascii fw dump buf\n");
7653 + return (FCT_FAILURE);
7654 + }
7655 + qlt->fw_dump_size = qlt->fw_ascii_dump_size;
7656 + }
7657 +
7658 + /* Disable ISP interrupts. */
7659 + REG_WR32(qlt, 0xc, 0);
7660 +
7661 + cnt = (uint32_t)(tsize / sizeof (uint32_t));
7662 + dp = (uint32_t *)qlt->fw_bin_dump_buf;
7663 + bp = (uint32_t *)&template_buff->hdr;
7664 + while (cnt--) {
7665 + *dp++ = ddi_get32(qlt->dmp_template_acc_handle, bp++);
7666 + }
7667 +
7668 + (void) qlt_27xx_dmp_parse_template(qlt,
7669 + (qlt_dt_hdr_t *)qlt->fw_bin_dump_buf,
7670 + (uint8_t *)dp, qlt->fw_bin_dump_size);
7671 +
7672 +#ifdef _BIG_ENDIAN
7673 + cnt = (uint32_t)(tsize / sizeof (uint32_t));
7674 + dp = (uint32_t *)qlt->fw_bin_dump_buf;
7675 + while (cnt--) {
7676 + qlt_chg_endian((uint8_t *)dp, 4);
7677 + dp++;
7678 + }
7679 +#endif
7680 +
7681 + /*
7682 + * Build ascii dump
7683 + */
7684 + len = qlt->fw_ascii_dump_size;
7685 + dsize = qlt->fw_bin_dump_size;
7686 + fw = (uint8_t *)qlt->fw_bin_dump_buf;
7687 + sp = qlt->qlt_fwdump_buf;
7688 +
7689 + EL(qlt, "fw_dump_buffer=%ph, fw=%ph, fw_ascii_dump_size=%xh, "
7690 + "dsize=%xh\n", (void *)qlt->qlt_fwdump_buf, (void *)fw,
7691 + len, dsize);
7692 +
7693 + /*
7694 + * 2 ascii bytes per binary byte + a space and
7695 + * a newline every 16 binary bytes
7696 + */
7697 + cnt = 0;
7698 + while (cnt < dsize) {
7699 + (void) snprintf(sp, len, "%02x ", *fw++);
7700 + if ((sp = qlt_str_ptr(qlt, sp, &len)) == NULL) {
7701 + break;
7702 + }
7703 + if (++cnt % 16 == 0) {
7704 + (void) snprintf(sp, len, "\n");
7705 + if ((sp = qlt_str_ptr(qlt, sp, &len)) == NULL) {
7706 + break;
7707 + }
7708 + }
7709 + }
7710 + if (cnt % 16 != 0) {
7711 + (void) snprintf(sp, len, "\n");
7712 + sp = qlt_str_ptr(qlt, sp, &len);
7713 + }
7714 +
7715 + mutex_enter(&qlt->qlt_ioctl_lock);
7716 + qlt->qlt_ioctl_flags &=
7717 + ~(QLT_FWDUMP_INPROGRESS | QLT_FWDUMP_FETCHED_BY_USER);
7718 + qlt->qlt_ioctl_flags |= QLT_FWDUMP_ISVALID;
7719 + mutex_exit(&qlt->qlt_ioctl_lock);
7720 +
7721 + EL(qlt, "done...\n");
7722 + return (FCT_SUCCESS);
7723 +}
7724 +
4956 7725 /*
4957 7726 * All QLT_FIRMWARE_* will mainly be handled in this function
4958 7727 * It can not be called in interrupt context
4959 7728 *
4960 7729 * FWDUMP's purpose is to serve ioctl, so we will use qlt_ioctl_flags
4961 7730 * and qlt_ioctl_lock
4962 7731 */
4963 7732 static fct_status_t
4964 7733 qlt_firmware_dump(fct_local_port_t *port, stmf_state_change_info_t *ssci)
4965 7734 {
4966 7735 qlt_state_t *qlt = (qlt_state_t *)port->port_fca_private;
4967 7736 int i;
4968 7737 int retries, n;
4969 7738 uint_t size_left;
4970 7739 char c = ' ';
4971 7740 uint32_t addr, endaddr, words_to_read;
4972 7741 caddr_t buf;
4973 7742 fct_status_t ret;
4974 7743
7744 + if (qlt->qlt_27xx_chip) {
7745 + return (qlt_27xx_fw_dump(port, ssci));
7746 + }
4975 7747 mutex_enter(&qlt->qlt_ioctl_lock);
4976 7748 /*
4977 7749 * To make sure that there's no outstanding dumping task
4978 7750 */
4979 7751 if (qlt->qlt_ioctl_flags & QLT_FWDUMP_INPROGRESS) {
4980 7752 mutex_exit(&qlt->qlt_ioctl_lock);
4981 7753 EL(qlt, "qlt_ioctl_flags=%xh, inprogress\n",
4982 7754 qlt->qlt_ioctl_flags);
4983 - EL(qlt, "outstanding\n");
4984 7755 return (FCT_FAILURE);
4985 7756 }
4986 7757
4987 7758 /*
4988 7759 * To make sure not to overwrite existing dump
4989 7760 */
4990 7761 if ((qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID) &&
4991 7762 !(qlt->qlt_ioctl_flags & QLT_FWDUMP_TRIGGERED_BY_USER) &&
4992 7763 !(qlt->qlt_ioctl_flags & QLT_FWDUMP_FETCHED_BY_USER)) {
4993 7764 /*
4994 - * If we have alreay one dump, but it's not triggered by user
7765 + * If we have already one dump, but it's not triggered by user
4995 7766 * and the user hasn't fetched it, we shouldn't dump again.
4996 7767 */
4997 7768 mutex_exit(&qlt->qlt_ioctl_lock);
4998 7769 EL(qlt, "qlt_ioctl_flags=%xh, already done\n",
4999 7770 qlt->qlt_ioctl_flags);
5000 7771 cmn_err(CE_NOTE, "qlt(%d): Skipping firmware dump as there "
5001 7772 "is one already outstanding.", qlt->instance);
5002 7773 return (FCT_FAILURE);
5003 7774 }
5004 7775 qlt->qlt_ioctl_flags |= QLT_FWDUMP_INPROGRESS;
5005 - if (ssci->st_rflags & STMF_RFLAG_USER_REQUEST) {
7776 + if ((ssci != NULL) && (ssci->st_rflags & STMF_RFLAG_USER_REQUEST)) {
5006 7777 qlt->qlt_ioctl_flags |= QLT_FWDUMP_TRIGGERED_BY_USER;
5007 7778 } else {
5008 7779 qlt->qlt_ioctl_flags &= ~QLT_FWDUMP_TRIGGERED_BY_USER;
5009 7780 }
5010 7781 mutex_exit(&qlt->qlt_ioctl_lock);
5011 7782
5012 7783 size_left = QLT_FWDUMP_BUFSIZE;
7784 + if (qlt->qlt_mq_enabled && qlt->qlt_queue_cnt >= 8) {
7785 + size_left += 512 * 1024;
7786 + }
7787 + qlt->fw_dump_size = size_left;
5013 7788 if (!qlt->qlt_fwdump_buf) {
5014 7789 ASSERT(!(qlt->qlt_ioctl_flags & QLT_FWDUMP_ISVALID));
5015 7790 /*
5016 7791 * It's the only place that we allocate buf for dumping. After
5017 7792 * it's allocated, we will use it until the port is detached.
5018 7793 */
5019 - qlt->qlt_fwdump_buf = kmem_zalloc(size_left, KM_SLEEP);
7794 + qlt->qlt_fwdump_buf = kmem_zalloc(size_left, KM_NOSLEEP);
7795 + if (qlt->qlt_fwdump_buf == NULL) {
7796 + EL(qlt, "cannot alloc fwdump buffer\n");
7797 + cmn_err(CE_WARN, "!qlt(%d): cannot alloc fwdump buf",
7798 + qlt->instance);
7799 + return (FCT_FAILURE);
7800 + }
5020 7801 }
5021 7802
7803 + EL(qlt, "starting firmware dump...\n");
7804 + cmn_err(CE_WARN, "!qlt(%d) starting firmware dump...",
7805 + qlt->instance);
7806 +
5022 7807 /*
5023 7808 * Start to dump firmware
5024 7809 */
5025 7810 buf = (caddr_t)qlt->qlt_fwdump_buf;
5026 7811
5027 7812 /*
5028 7813 * Print the ISP firmware revision number and attributes information
5029 7814 * Read the RISC to Host Status register
5030 7815 */
5031 7816 n = (int)snprintf(buf, size_left, "ISP FW Version %d.%02d.%02d "
5032 - "Attributes %04x\n\nR2H Status Register\n%08x",
7817 + "Attributes %04x\n\nR2H Status register\n%08x",
5033 7818 qlt->fw_major, qlt->fw_minor,
5034 7819 qlt->fw_subminor, qlt->fw_attr, REG_RD32(qlt, REG_RISC_STATUS));
5035 7820 buf += n; size_left -= n;
5036 7821
5037 7822 /*
5038 7823 * Before pausing the RISC, make sure no mailbox can execute
5039 7824 */
5040 7825 mutex_enter(&qlt->mbox_lock);
5041 - if (qlt->mbox_io_state != MBOX_STATE_UNKNOWN) {
7826 + if ((qlt->mbox_io_state != MBOX_STATE_UNKNOWN) &&
7827 + (qlt->qlt_intr_enabled)) {
5042 7828 /*
5043 7829 * Wait to grab the mailboxes
5044 7830 */
5045 7831 for (retries = 0; (qlt->mbox_io_state != MBOX_STATE_READY) &&
5046 7832 (qlt->mbox_io_state != MBOX_STATE_UNKNOWN); retries++) {
5047 7833 (void) cv_timedwait(&qlt->mbox_cv, &qlt->mbox_lock,
5048 7834 ddi_get_lbolt() + drv_usectohz(1000000));
5049 7835 if (retries > 5) {
5050 7836 mutex_exit(&qlt->mbox_lock);
5051 7837 EL(qlt, "can't drain out mailbox commands\n");
5052 7838 goto dump_fail;
5053 7839 }
5054 7840 }
5055 7841 qlt->mbox_io_state = MBOX_STATE_UNKNOWN;
5056 7842 cv_broadcast(&qlt->mbox_cv);
5057 7843 }
5058 7844 mutex_exit(&qlt->mbox_lock);
5059 7845
5060 7846 /*
5061 7847 * Pause the RISC processor
5062 7848 */
5063 7849 REG_WR32(qlt, REG_HCCR, HCCR_CMD(SET_RISC_PAUSE));
5064 7850
5065 7851 /*
5066 7852 * Wait for the RISC processor to pause
5067 7853 */
5068 7854 for (i = 0; i < 200; i++) {
|
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
5069 7855 if (REG_RD32(qlt, REG_RISC_STATUS) & 0x100) {
5070 7856 break;
5071 7857 }
5072 7858 drv_usecwait(1000);
5073 7859 }
5074 7860 if (i == 200) {
5075 7861 EL(qlt, "can't pause\n");
5076 7862 return (FCT_FAILURE);
5077 7863 }
5078 7864
5079 - if ((!qlt->qlt_25xx_chip) && (!qlt->qlt_81xx_chip)) {
7865 + if (qlt->qlt_83xx_chip) {
7866 + /* Disable ECC checks in FB registers */
7867 + REG_WR32(qlt, 0x54, 0x6000);
7868 + REG_WR32(qlt, 0xC0, 0); /* 6000h */
7869 + REG_WR32(qlt, 0xCC, 0); /* 6003h */
7870 + REG_WR32(qlt, 0x54, 0x6010);
7871 + REG_WR32(qlt, 0xD4, 0); /* 6015h */
7872 +
7873 + /* disable ECC detection in PCR whilst dumping */
7874 + REG_WR32(qlt, 0x54, 0xF70);
7875 + REG_WR32(qlt, 0xF0, 0x60000000);
7876 + }
7877 +
7878 + if ((!qlt->qlt_25xx_chip) && (!qlt->qlt_81xx_chip) &&
7879 + (!qlt->qlt_83xx_chip) && (!qlt->qlt_27xx_chip)) {
5080 7880 goto over_25xx_specific_dump;
5081 7881 }
5082 7882 n = (int)snprintf(buf, size_left, "\n\nHostRisc registers\n");
5083 7883 buf += n; size_left -= n;
5084 7884 REG_WR32(qlt, 0x54, 0x7000);
5085 7885 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5086 7886 buf += n; size_left -= n;
5087 7887 REG_WR32(qlt, 0x54, 0x7010);
5088 7888 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5089 7889 buf += n; size_left -= n;
7890 + if (qlt->qlt_83xx_chip) {
7891 + REG_WR32(qlt, 0x54, 0x7040);
7892 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
7893 + buf += n; size_left -= n;
7894 + }
5090 7895 REG_WR32(qlt, 0x54, 0x7C00);
5091 7896
5092 7897 n = (int)snprintf(buf, size_left, "\nPCIe registers\n");
5093 7898 buf += n; size_left -= n;
5094 7899 REG_WR32(qlt, 0xC0, 0x1);
5095 7900 n = qlt_fwdump_dump_regs(qlt, buf, 0xc4, 3, size_left);
5096 7901 buf += n; size_left -= n;
5097 7902 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 1, size_left);
5098 7903 buf += n; size_left -= n;
5099 7904 REG_WR32(qlt, 0xC0, 0x0);
5100 7905
7906 + /* don't need to do this for 83xx */
7907 + if ((!qlt->qlt_83xx_chip) && (qlt->qlt_mq_enabled)) {
7908 + uint16_t qi;
7909 +
7910 + for (qi = 0; qi < qlt->qlt_queue_cnt; qi++) {
7911 +
7912 + n = (int)snprintf(buf, size_left,
7913 + "\n\nQueue Pointers #%x\n", qi);
7914 + buf += n; size_left -= n;
7915 +
7916 + n = (int)snprintf(buf, size_left, "%08x ",
7917 + MQBAR_RD32(qlt,
7918 + (qi * MQBAR_REG_OFFSET) + MQBAR_REQ_IN));
7919 + buf += n; size_left -= n;
7920 + n = (int)snprintf(buf, size_left, "%08x ",
7921 + MQBAR_RD32(qlt,
7922 + (qi * MQBAR_REG_OFFSET) + MQBAR_REQ_OUT));
7923 + buf += n; size_left -= n;
7924 + n = (int)snprintf(buf, size_left, "%08x ",
7925 + MQBAR_RD32(qlt,
7926 + (qi * MQBAR_REG_OFFSET) + MQBAR_RESP_IN));
7927 + buf += n; size_left -= n;
7928 + n = (int)snprintf(buf, size_left, "%08x",
7929 + MQBAR_RD32(qlt,
7930 + (qi * MQBAR_REG_OFFSET) + MQBAR_RESP_OUT));
7931 + buf += n; size_left -= n;
7932 + }
7933 + }
7934 +
5101 7935 over_25xx_specific_dump:;
5102 - n = (int)snprintf(buf, size_left, "\n\nHost Interface Registers\n");
7936 + n = (int)snprintf(buf, size_left, "\n\nHost Interface registers\n");
5103 7937 buf += n; size_left -= n;
5104 7938 /*
5105 - * Capture data from 32 regsiters
7939 + * Capture data from 32 registers
5106 7940 */
5107 7941 n = qlt_fwdump_dump_regs(qlt, buf, 0, 32, size_left);
5108 7942 buf += n; size_left -= n;
5109 7943
5110 7944 /*
5111 7945 * Disable interrupts
5112 7946 */
5113 7947 REG_WR32(qlt, 0xc, 0);
7948 + EL(qlt, "Disable interrupt\n");
5114 7949
5115 7950 /*
5116 7951 * Shadow registers
5117 7952 */
5118 - n = (int)snprintf(buf, size_left, "\nShadow Registers\n");
7953 + n = (int)snprintf(buf, size_left, "\nShadow registers\n");
5119 7954 buf += n; size_left -= n;
5120 7955
5121 7956 REG_WR32(qlt, 0x54, 0xF70);
5122 7957 addr = 0xb0000000;
5123 7958 for (i = 0; i < 0xb; i++) {
5124 7959 if ((!qlt->qlt_25xx_chip) &&
5125 7960 (!qlt->qlt_81xx_chip) &&
7961 + (!qlt->qlt_83xx_chip) &&
5126 7962 (i >= 7)) {
5127 7963 break;
5128 7964 }
5129 7965 if (i && ((i & 7) == 0)) {
5130 7966 n = (int)snprintf(buf, size_left, "\n");
5131 7967 buf += n; size_left -= n;
5132 7968 }
5133 7969 REG_WR32(qlt, 0xF0, addr);
5134 7970 n = (int)snprintf(buf, size_left, "%08x ", REG_RD32(qlt, 0xFC));
5135 7971 buf += n; size_left -= n;
5136 7972 addr += 0x100000;
5137 7973 }
5138 7974
5139 - if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
7975 + if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip) ||
7976 + (qlt->qlt_83xx_chip)) {
5140 7977 REG_WR32(qlt, 0x54, 0x10);
5141 7978 n = (int)snprintf(buf, size_left,
5142 - "\n\nRISC IO Register\n%08x", REG_RD32(qlt, 0xC0));
7979 + "\n\nRISC IO register\n%08x", REG_RD32(qlt, 0xC0));
5143 7980 buf += n; size_left -= n;
5144 7981 }
5145 7982
5146 7983 /*
5147 7984 * Mailbox registers
5148 7985 */
5149 - n = (int)snprintf(buf, size_left, "\n\nMailbox Registers\n");
7986 + n = (int)snprintf(buf, size_left, "\n\nMailbox registers\n");
5150 7987 buf += n; size_left -= n;
5151 7988 for (i = 0; i < 32; i += 2) {
5152 7989 if ((i + 2) & 15) {
5153 7990 c = ' ';
5154 7991 } else {
5155 7992 c = '\n';
5156 7993 }
5157 7994 n = (int)snprintf(buf, size_left, "%04x %04x%c",
5158 7995 REG_RD16(qlt, 0x80 + (i << 1)),
5159 7996 REG_RD16(qlt, 0x80 + ((i+1) << 1)), c);
5160 7997 buf += n; size_left -= n;
5161 7998 }
5162 7999
5163 8000 /*
5164 8001 * Transfer sequence registers
5165 8002 */
5166 - n = (int)snprintf(buf, size_left, "\nXSEQ GP Registers\n");
8003 + n = (int)snprintf(buf, size_left, "\nXSEQ GP registers\n");
5167 8004 buf += n; size_left -= n;
5168 8005
8006 + if (qlt->qlt_83xx_chip) {
8007 + REG_WR32(qlt, 0x54, 0xBE00);
8008 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8009 + buf += n; size_left -= n;
8010 + REG_WR32(qlt, 0x54, 0xBE10);
8011 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8012 + buf += n; size_left -= n;
8013 + REG_WR32(qlt, 0x54, 0xBE20);
8014 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8015 + buf += n; size_left -= n;
8016 + REG_WR32(qlt, 0x54, 0xBE30);
8017 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8018 + buf += n; size_left -= n;
8019 + REG_WR32(qlt, 0x54, 0xBE40);
8020 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8021 + buf += n; size_left -= n;
8022 + REG_WR32(qlt, 0x54, 0xBE50);
8023 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8024 + buf += n; size_left -= n;
8025 + REG_WR32(qlt, 0x54, 0xBE60);
8026 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8027 + buf += n; size_left -= n;
8028 + REG_WR32(qlt, 0x54, 0xBE70);
8029 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8030 + buf += n; size_left -= n;
8031 + }
5169 8032 REG_WR32(qlt, 0x54, 0xBF00);
5170 8033 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5171 8034 buf += n; size_left -= n;
5172 8035 REG_WR32(qlt, 0x54, 0xBF10);
5173 8036 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5174 8037 buf += n; size_left -= n;
5175 8038 REG_WR32(qlt, 0x54, 0xBF20);
5176 8039 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5177 8040 buf += n; size_left -= n;
5178 8041 REG_WR32(qlt, 0x54, 0xBF30);
5179 8042 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5180 8043 buf += n; size_left -= n;
5181 8044 REG_WR32(qlt, 0x54, 0xBF40);
5182 8045 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5183 8046 buf += n; size_left -= n;
5184 8047 REG_WR32(qlt, 0x54, 0xBF50);
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
5185 8048 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5186 8049 buf += n; size_left -= n;
5187 8050 REG_WR32(qlt, 0x54, 0xBF60);
5188 8051 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5189 8052 buf += n; size_left -= n;
5190 8053 REG_WR32(qlt, 0x54, 0xBF70);
5191 8054 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5192 8055 buf += n; size_left -= n;
5193 8056 n = (int)snprintf(buf, size_left, "\nXSEQ-0 registers\n");
5194 8057 buf += n; size_left -= n;
5195 - if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
8058 + if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip) ||
8059 + (qlt->qlt_83xx_chip)) {
5196 8060 REG_WR32(qlt, 0x54, 0xBFC0);
5197 8061 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5198 8062 buf += n; size_left -= n;
5199 8063 REG_WR32(qlt, 0x54, 0xBFD0);
5200 8064 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5201 8065 buf += n; size_left -= n;
5202 8066 }
5203 8067 REG_WR32(qlt, 0x54, 0xBFE0);
5204 8068 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5205 8069 buf += n; size_left -= n;
5206 8070 n = (int)snprintf(buf, size_left, "\nXSEQ-1 registers\n");
5207 8071 buf += n; size_left -= n;
5208 8072 REG_WR32(qlt, 0x54, 0xBFF0);
5209 8073 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5210 8074 buf += n; size_left -= n;
5211 8075
8076 + if (qlt->qlt_83xx_chip) {
8077 + n = (int)snprintf(buf, size_left, "\nXSEQ-2 registers\n");
8078 + buf += n; size_left -= n;
8079 + REG_WR32(qlt, 0x54, 0xBEF0);
8080 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8081 + buf += n; size_left -= n;
8082 + }
8083 +
5212 8084 /*
5213 8085 * Receive sequence registers
5214 8086 */
5215 - n = (int)snprintf(buf, size_left, "\nRSEQ GP Registers\n");
8087 + n = (int)snprintf(buf, size_left, "\nRSEQ GP registers\n");
5216 8088 buf += n; size_left -= n;
8089 + if (qlt->qlt_83xx_chip) {
8090 + REG_WR32(qlt, 0x54, 0xFE00);
8091 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8092 + buf += n; size_left -= n;
8093 + REG_WR32(qlt, 0x54, 0xFE10);
8094 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8095 + buf += n; size_left -= n;
8096 + REG_WR32(qlt, 0x54, 0xFE20);
8097 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8098 + buf += n; size_left -= n;
8099 + REG_WR32(qlt, 0x54, 0xFE30);
8100 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8101 + buf += n; size_left -= n;
8102 + REG_WR32(qlt, 0x54, 0xFE40);
8103 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8104 + buf += n; size_left -= n;
8105 + REG_WR32(qlt, 0x54, 0xFE50);
8106 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8107 + buf += n; size_left -= n;
8108 + REG_WR32(qlt, 0x54, 0xFE60);
8109 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8110 + buf += n; size_left -= n;
8111 + REG_WR32(qlt, 0x54, 0xFE70);
8112 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8113 + buf += n; size_left -= n;
8114 + }
5217 8115 REG_WR32(qlt, 0x54, 0xFF00);
5218 8116 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5219 8117 buf += n; size_left -= n;
5220 8118 REG_WR32(qlt, 0x54, 0xFF10);
5221 8119 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5222 8120 buf += n; size_left -= n;
5223 8121 REG_WR32(qlt, 0x54, 0xFF20);
5224 8122 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5225 8123 buf += n; size_left -= n;
5226 8124 REG_WR32(qlt, 0x54, 0xFF30);
5227 8125 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5228 8126 buf += n; size_left -= n;
5229 8127 REG_WR32(qlt, 0x54, 0xFF40);
5230 8128 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5231 8129 buf += n; size_left -= n;
5232 8130 REG_WR32(qlt, 0x54, 0xFF50);
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
5233 8131 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5234 8132 buf += n; size_left -= n;
5235 8133 REG_WR32(qlt, 0x54, 0xFF60);
5236 8134 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5237 8135 buf += n; size_left -= n;
5238 8136 REG_WR32(qlt, 0x54, 0xFF70);
5239 8137 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5240 8138 buf += n; size_left -= n;
5241 8139 n = (int)snprintf(buf, size_left, "\nRSEQ-0 registers\n");
5242 8140 buf += n; size_left -= n;
5243 - if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
8141 + if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip) ||
8142 + (qlt->qlt_83xx_chip)) {
5244 8143 REG_WR32(qlt, 0x54, 0xFFC0);
5245 8144 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5246 8145 buf += n; size_left -= n;
5247 8146 }
5248 8147 REG_WR32(qlt, 0x54, 0xFFD0);
5249 8148 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5250 8149 buf += n; size_left -= n;
5251 8150 n = (int)snprintf(buf, size_left, "\nRSEQ-1 registers\n");
5252 8151 buf += n; size_left -= n;
5253 8152 REG_WR32(qlt, 0x54, 0xFFE0);
5254 8153 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5255 8154 buf += n; size_left -= n;
5256 8155 n = (int)snprintf(buf, size_left, "\nRSEQ-2 registers\n");
5257 8156 buf += n; size_left -= n;
5258 8157 REG_WR32(qlt, 0x54, 0xFFF0);
5259 8158 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5260 8159 buf += n; size_left -= n;
8160 + if (qlt->qlt_83xx_chip) {
8161 + n = (int)snprintf(buf, size_left, "\nRSEQ-3 registers\n");
8162 + buf += n; size_left -= n;
8163 + REG_WR32(qlt, 0x54, 0xFEF0);
8164 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8165 + buf += n; size_left -= n;
8166 + }
5261 8167
5262 - if ((!qlt->qlt_25xx_chip) && (!qlt->qlt_81xx_chip))
8168 + if ((!qlt->qlt_25xx_chip) && (!qlt->qlt_81xx_chip) &&
8169 + (!qlt->qlt_83xx_chip))
5263 8170 goto over_aseq_regs;
5264 8171
5265 8172 /*
5266 8173 * Auxiliary sequencer registers
5267 8174 */
5268 - n = (int)snprintf(buf, size_left, "\nASEQ GP Registers\n");
8175 + n = (int)snprintf(buf, size_left, "\nASEQ GP registers\n");
5269 8176 buf += n; size_left -= n;
5270 8177 REG_WR32(qlt, 0x54, 0xB000);
5271 8178 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5272 8179 buf += n; size_left -= n;
5273 8180 REG_WR32(qlt, 0x54, 0xB010);
5274 8181 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5275 8182 buf += n; size_left -= n;
5276 8183 REG_WR32(qlt, 0x54, 0xB020);
5277 8184 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5278 8185 buf += n; size_left -= n;
5279 8186 REG_WR32(qlt, 0x54, 0xB030);
5280 8187 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5281 8188 buf += n; size_left -= n;
5282 8189 REG_WR32(qlt, 0x54, 0xB040);
5283 8190 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
5284 8191 buf += n; size_left -= n;
5285 8192 REG_WR32(qlt, 0x54, 0xB050);
5286 8193 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5287 8194 buf += n; size_left -= n;
5288 8195 REG_WR32(qlt, 0x54, 0xB060);
5289 8196 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5290 8197 buf += n; size_left -= n;
5291 8198 REG_WR32(qlt, 0x54, 0xB070);
5292 8199 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5293 8200 buf += n; size_left -= n;
8201 + if (qlt->qlt_83xx_chip) {
8202 + REG_WR32(qlt, 0x54, 0xB100);
8203 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8204 + buf += n; size_left -= n;
8205 + REG_WR32(qlt, 0x54, 0xB110);
8206 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8207 + buf += n; size_left -= n;
8208 + REG_WR32(qlt, 0x54, 0xB120);
8209 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8210 + buf += n; size_left -= n;
8211 + REG_WR32(qlt, 0x54, 0xB130);
8212 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8213 + buf += n; size_left -= n;
8214 + REG_WR32(qlt, 0x54, 0xB140);
8215 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8216 + buf += n; size_left -= n;
8217 + REG_WR32(qlt, 0x54, 0xB150);
8218 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8219 + buf += n; size_left -= n;
8220 + REG_WR32(qlt, 0x54, 0xB160);
8221 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8222 + buf += n; size_left -= n;
8223 + REG_WR32(qlt, 0x54, 0xB170);
8224 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8225 + buf += n; size_left -= n;
8226 + }
5294 8227 n = (int)snprintf(buf, size_left, "\nASEQ-0 registers\n");
5295 8228 buf += n; size_left -= n;
5296 8229 REG_WR32(qlt, 0x54, 0xB0C0);
5297 8230 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5298 8231 buf += n; size_left -= n;
5299 8232 REG_WR32(qlt, 0x54, 0xB0D0);
5300 8233 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5301 8234 buf += n; size_left -= n;
5302 8235 n = (int)snprintf(buf, size_left, "\nASEQ-1 registers\n");
5303 8236 buf += n; size_left -= n;
5304 8237 REG_WR32(qlt, 0x54, 0xB0E0);
5305 8238 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5306 8239 buf += n; size_left -= n;
5307 8240 n = (int)snprintf(buf, size_left, "\nASEQ-2 registers\n");
5308 8241 buf += n; size_left -= n;
5309 8242 REG_WR32(qlt, 0x54, 0xB0F0);
5310 8243 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5311 8244 buf += n; size_left -= n;
8245 + if (qlt->qlt_83xx_chip) {
8246 + n = (int)snprintf(buf, size_left, "\nASEQ-3 registers\n");
8247 + buf += n; size_left -= n;
8248 + REG_WR32(qlt, 0x54, 0xB1F0);
8249 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8250 + buf += n; size_left -= n;
8251 + }
5312 8252
5313 8253 over_aseq_regs:;
5314 8254
5315 8255 /*
5316 8256 * Command DMA registers
5317 8257 */
5318 8258 n = (int)snprintf(buf, size_left, "\nCommand DMA registers\n");
5319 8259 buf += n; size_left -= n;
5320 8260 REG_WR32(qlt, 0x54, 0x7100);
5321 8261 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5322 8262 buf += n; size_left -= n;
8263 + if (qlt->qlt_83xx_chip) {
8264 + REG_WR32(qlt, 0x54, 0x7120);
8265 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8266 + buf += n; size_left -= n;
8267 + REG_WR32(qlt, 0x54, 0x7130);
8268 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8269 + buf += n; size_left -= n;
8270 + REG_WR32(qlt, 0x54, 0x71F0);
8271 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8272 + buf += n; size_left -= n;
8273 + }
5323 8274
5324 8275 /*
5325 8276 * Queues
5326 8277 */
5327 8278 n = (int)snprintf(buf, size_left,
5328 8279 "\nRequest0 Queue DMA Channel registers\n");
5329 8280 buf += n; size_left -= n;
5330 8281 REG_WR32(qlt, 0x54, 0x7200);
5331 8282 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
5332 8283 buf += n; size_left -= n;
5333 8284 n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
5334 8285 buf += n; size_left -= n;
5335 8286
5336 8287 n = (int)snprintf(buf, size_left,
5337 8288 "\n\nResponse0 Queue DMA Channel registers\n");
5338 8289 buf += n; size_left -= n;
5339 8290 REG_WR32(qlt, 0x54, 0x7300);
5340 8291 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
5341 8292 buf += n; size_left -= n;
5342 8293 n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
5343 8294 buf += n; size_left -= n;
5344 8295
5345 8296 n = (int)snprintf(buf, size_left,
5346 8297 "\n\nRequest1 Queue DMA Channel registers\n");
5347 8298 buf += n; size_left -= n;
5348 8299 REG_WR32(qlt, 0x54, 0x7400);
5349 8300 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 8, size_left);
5350 8301 buf += n; size_left -= n;
5351 8302 n = qlt_fwdump_dump_regs(qlt, buf, 0xe4, 7, size_left);
5352 8303 buf += n; size_left -= n;
5353 8304
5354 8305 /*
5355 8306 * Transmit DMA registers
5356 8307 */
5357 8308 n = (int)snprintf(buf, size_left, "\n\nXMT0 Data DMA registers\n");
5358 8309 buf += n; size_left -= n;
5359 8310 REG_WR32(qlt, 0x54, 0x7600);
5360 8311 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5361 8312 buf += n; size_left -= n;
5362 8313 REG_WR32(qlt, 0x54, 0x7610);
5363 8314 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5364 8315 buf += n; size_left -= n;
5365 8316 n = (int)snprintf(buf, size_left, "\nXMT1 Data DMA registers\n");
5366 8317 buf += n; size_left -= n;
5367 8318 REG_WR32(qlt, 0x54, 0x7620);
5368 8319 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5369 8320 buf += n; size_left -= n;
5370 8321 REG_WR32(qlt, 0x54, 0x7630);
5371 8322 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5372 8323 buf += n; size_left -= n;
5373 8324 n = (int)snprintf(buf, size_left, "\nXMT2 Data DMA registers\n");
5374 8325 buf += n; size_left -= n;
5375 8326 REG_WR32(qlt, 0x54, 0x7640);
5376 8327 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5377 8328 buf += n; size_left -= n;
5378 8329 REG_WR32(qlt, 0x54, 0x7650);
5379 8330 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5380 8331 buf += n; size_left -= n;
5381 8332 n = (int)snprintf(buf, size_left, "\nXMT3 Data DMA registers\n");
5382 8333 buf += n; size_left -= n;
5383 8334 REG_WR32(qlt, 0x54, 0x7660);
5384 8335 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5385 8336 buf += n; size_left -= n;
5386 8337 REG_WR32(qlt, 0x54, 0x7670);
5387 8338 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5388 8339 buf += n; size_left -= n;
5389 8340 n = (int)snprintf(buf, size_left, "\nXMT4 Data DMA registers\n");
5390 8341 buf += n; size_left -= n;
5391 8342 REG_WR32(qlt, 0x54, 0x7680);
5392 8343 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5393 8344 buf += n; size_left -= n;
5394 8345 REG_WR32(qlt, 0x54, 0x7690);
5395 8346 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5396 8347 buf += n; size_left -= n;
5397 8348 n = (int)snprintf(buf, size_left, "\nXMT Data DMA Common registers\n");
5398 8349 buf += n; size_left -= n;
5399 8350 REG_WR32(qlt, 0x54, 0x76A0);
5400 8351 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5401 8352 buf += n; size_left -= n;
5402 8353
5403 8354 /*
5404 8355 * Receive DMA registers
5405 8356 */
5406 8357 n = (int)snprintf(buf, size_left,
5407 8358 "\nRCV Thread 0 Data DMA registers\n");
5408 8359 buf += n; size_left -= n;
5409 8360 REG_WR32(qlt, 0x54, 0x7700);
5410 8361 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5411 8362 buf += n; size_left -= n;
5412 8363 REG_WR32(qlt, 0x54, 0x7710);
5413 8364 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5414 8365 buf += n; size_left -= n;
5415 8366 n = (int)snprintf(buf, size_left,
5416 8367 "\nRCV Thread 1 Data DMA registers\n");
5417 8368 buf += n; size_left -= n;
5418 8369 REG_WR32(qlt, 0x54, 0x7720);
5419 8370 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5420 8371 buf += n; size_left -= n;
5421 8372 REG_WR32(qlt, 0x54, 0x7730);
5422 8373 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5423 8374 buf += n; size_left -= n;
5424 8375
5425 8376 /*
5426 8377 * RISC registers
5427 8378 */
5428 8379 n = (int)snprintf(buf, size_left, "\nRISC GP registers\n");
5429 8380 buf += n; size_left -= n;
5430 8381 REG_WR32(qlt, 0x54, 0x0F00);
5431 8382 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5432 8383 buf += n; size_left -= n;
5433 8384 REG_WR32(qlt, 0x54, 0x0F10);
5434 8385 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5435 8386 buf += n; size_left -= n;
5436 8387 REG_WR32(qlt, 0x54, 0x0F20);
5437 8388 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5438 8389 buf += n; size_left -= n;
5439 8390 REG_WR32(qlt, 0x54, 0x0F30);
5440 8391 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5441 8392 buf += n; size_left -= n;
5442 8393 REG_WR32(qlt, 0x54, 0x0F40);
5443 8394 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5444 8395 buf += n; size_left -= n;
5445 8396 REG_WR32(qlt, 0x54, 0x0F50);
5446 8397 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5447 8398 buf += n; size_left -= n;
5448 8399 REG_WR32(qlt, 0x54, 0x0F60);
5449 8400 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5450 8401 buf += n; size_left -= n;
5451 8402 REG_WR32(qlt, 0x54, 0x0F70);
5452 8403 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5453 8404 buf += n; size_left -= n;
5454 8405
5455 8406 /*
5456 8407 * Local memory controller registers
5457 8408 */
5458 8409 n = (int)snprintf(buf, size_left, "\nLMC registers\n");
5459 8410 buf += n; size_left -= n;
5460 8411 REG_WR32(qlt, 0x54, 0x3000);
5461 8412 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5462 8413 buf += n; size_left -= n;
5463 8414 REG_WR32(qlt, 0x54, 0x3010);
5464 8415 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5465 8416 buf += n; size_left -= n;
5466 8417 REG_WR32(qlt, 0x54, 0x3020);
5467 8418 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5468 8419 buf += n; size_left -= n;
5469 8420 REG_WR32(qlt, 0x54, 0x3030);
5470 8421 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5471 8422 buf += n; size_left -= n;
|
↓ open down ↓ |
139 lines elided |
↑ open up ↑ |
5472 8423 REG_WR32(qlt, 0x54, 0x3040);
5473 8424 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5474 8425 buf += n; size_left -= n;
5475 8426 REG_WR32(qlt, 0x54, 0x3050);
5476 8427 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5477 8428 buf += n; size_left -= n;
5478 8429 REG_WR32(qlt, 0x54, 0x3060);
5479 8430 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5480 8431 buf += n; size_left -= n;
5481 8432
5482 - if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
8433 + if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip) ||
8434 + (qlt->qlt_83xx_chip)) {
5483 8435 REG_WR32(qlt, 0x54, 0x3070);
5484 8436 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5485 8437 buf += n; size_left -= n;
5486 8438 }
5487 8439
5488 8440 /*
5489 - * Fibre protocol module regsiters
8441 + * Fibre protocol module registers
5490 8442 */
5491 8443 n = (int)snprintf(buf, size_left, "\nFPM hardware registers\n");
5492 8444 buf += n; size_left -= n;
5493 8445 REG_WR32(qlt, 0x54, 0x4000);
5494 8446 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5495 8447 buf += n; size_left -= n;
5496 8448 REG_WR32(qlt, 0x54, 0x4010);
5497 8449 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5498 8450 buf += n; size_left -= n;
5499 8451 REG_WR32(qlt, 0x54, 0x4020);
5500 8452 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5501 8453 buf += n; size_left -= n;
5502 8454 REG_WR32(qlt, 0x54, 0x4030);
5503 8455 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5504 8456 buf += n; size_left -= n;
5505 8457 REG_WR32(qlt, 0x54, 0x4040);
5506 8458 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5507 8459 buf += n; size_left -= n;
5508 8460 REG_WR32(qlt, 0x54, 0x4050);
5509 8461 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5510 8462 buf += n; size_left -= n;
5511 8463 REG_WR32(qlt, 0x54, 0x4060);
5512 8464 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5513 8465 buf += n; size_left -= n;
5514 8466 REG_WR32(qlt, 0x54, 0x4070);
5515 8467 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5516 8468 buf += n; size_left -= n;
5517 8469 REG_WR32(qlt, 0x54, 0x4080);
5518 8470 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
|
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
5519 8471 buf += n; size_left -= n;
5520 8472 REG_WR32(qlt, 0x54, 0x4090);
5521 8473 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5522 8474 buf += n; size_left -= n;
5523 8475 REG_WR32(qlt, 0x54, 0x40A0);
5524 8476 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5525 8477 buf += n; size_left -= n;
5526 8478 REG_WR32(qlt, 0x54, 0x40B0);
5527 8479 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5528 8480 buf += n; size_left -= n;
5529 - if (qlt->qlt_81xx_chip) {
8481 + if ((qlt->qlt_83xx_chip) || (qlt->qlt_81xx_chip)) {
5530 8482 REG_WR32(qlt, 0x54, 0x40C0);
5531 8483 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5532 8484 buf += n; size_left -= n;
5533 8485 REG_WR32(qlt, 0x54, 0x40D0);
5534 8486 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5535 8487 buf += n; size_left -= n;
5536 8488 }
8489 + if (qlt->qlt_83xx_chip) {
8490 + REG_WR32(qlt, 0x54, 0x40E0);
8491 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8492 + buf += n; size_left -= n;
8493 + REG_WR32(qlt, 0x54, 0x40F0);
8494 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8495 + buf += n; size_left -= n;
5537 8496
8497 + n = (int)snprintf(buf, size_left, "\nRQ0 Array registers\n");
8498 + buf += n; size_left -= n;
8499 + REG_WR32(qlt, 0x54, 0x5C00);
8500 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8501 + buf += n; size_left -= n;
8502 + REG_WR32(qlt, 0x54, 0x5C10);
8503 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8504 + buf += n; size_left -= n;
8505 + REG_WR32(qlt, 0x54, 0x5C20);
8506 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8507 + buf += n; size_left -= n;
8508 + REG_WR32(qlt, 0x54, 0x5C30);
8509 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8510 + buf += n; size_left -= n;
8511 + REG_WR32(qlt, 0x54, 0x5C40);
8512 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8513 + buf += n; size_left -= n;
8514 + REG_WR32(qlt, 0x54, 0x5C50);
8515 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8516 + buf += n; size_left -= n;
8517 + REG_WR32(qlt, 0x54, 0x5C60);
8518 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8519 + buf += n; size_left -= n;
8520 + REG_WR32(qlt, 0x54, 0x5C70);
8521 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8522 + buf += n; size_left -= n;
8523 + REG_WR32(qlt, 0x54, 0x5C80);
8524 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8525 + buf += n; size_left -= n;
8526 + REG_WR32(qlt, 0x54, 0x5C90);
8527 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8528 + buf += n; size_left -= n;
8529 + REG_WR32(qlt, 0x54, 0x5CA0);
8530 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8531 + buf += n; size_left -= n;
8532 + REG_WR32(qlt, 0x54, 0x5CB0);
8533 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8534 + buf += n; size_left -= n;
8535 + REG_WR32(qlt, 0x54, 0x5CC0);
8536 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8537 + buf += n; size_left -= n;
8538 + REG_WR32(qlt, 0x54, 0x5CD0);
8539 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8540 + buf += n; size_left -= n;
8541 + REG_WR32(qlt, 0x54, 0x5CE0);
8542 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8543 + buf += n; size_left -= n;
8544 + REG_WR32(qlt, 0x54, 0x5CF0);
8545 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8546 + buf += n; size_left -= n;
8547 +
8548 + n = (int)snprintf(buf, size_left, "\nRQ1 Array registers\n");
8549 + buf += n; size_left -= n;
8550 + REG_WR32(qlt, 0x54, 0x5D00);
8551 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8552 + buf += n; size_left -= n;
8553 + REG_WR32(qlt, 0x54, 0x5D10);
8554 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8555 + buf += n; size_left -= n;
8556 + REG_WR32(qlt, 0x54, 0x5D20);
8557 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8558 + buf += n; size_left -= n;
8559 + REG_WR32(qlt, 0x54, 0x5D30);
8560 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8561 + buf += n; size_left -= n;
8562 + REG_WR32(qlt, 0x54, 0x5D40);
8563 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8564 + buf += n; size_left -= n;
8565 + REG_WR32(qlt, 0x54, 0x5D50);
8566 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8567 + buf += n; size_left -= n;
8568 + REG_WR32(qlt, 0x54, 0x5D60);
8569 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8570 + buf += n; size_left -= n;
8571 + REG_WR32(qlt, 0x54, 0x5D70);
8572 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8573 + buf += n; size_left -= n;
8574 + REG_WR32(qlt, 0x54, 0x5D80);
8575 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8576 + buf += n; size_left -= n;
8577 + REG_WR32(qlt, 0x54, 0x5D90);
8578 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8579 + buf += n; size_left -= n;
8580 + REG_WR32(qlt, 0x54, 0x5DA0);
8581 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8582 + buf += n; size_left -= n;
8583 + REG_WR32(qlt, 0x54, 0x5DB0);
8584 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8585 + buf += n; size_left -= n;
8586 + REG_WR32(qlt, 0x54, 0x5DC0);
8587 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8588 + buf += n; size_left -= n;
8589 + REG_WR32(qlt, 0x54, 0x5DD0);
8590 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8591 + buf += n; size_left -= n;
8592 + REG_WR32(qlt, 0x54, 0x5DE0);
8593 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8594 + buf += n; size_left -= n;
8595 + REG_WR32(qlt, 0x54, 0x5DF0);
8596 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8597 + buf += n; size_left -= n;
8598 +
8599 + n = (int)snprintf(buf, size_left, "\nRP0 Array registers\n");
8600 + buf += n; size_left -= n;
8601 + REG_WR32(qlt, 0x54, 0x5E00);
8602 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8603 + buf += n; size_left -= n;
8604 + REG_WR32(qlt, 0x54, 0x5E10);
8605 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8606 + buf += n; size_left -= n;
8607 + REG_WR32(qlt, 0x54, 0x5E20);
8608 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8609 + buf += n; size_left -= n;
8610 + REG_WR32(qlt, 0x54, 0x5E30);
8611 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8612 + buf += n; size_left -= n;
8613 + REG_WR32(qlt, 0x54, 0x5E40);
8614 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8615 + buf += n; size_left -= n;
8616 + REG_WR32(qlt, 0x54, 0x5E50);
8617 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8618 + buf += n; size_left -= n;
8619 + REG_WR32(qlt, 0x54, 0x5E60);
8620 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8621 + buf += n; size_left -= n;
8622 + REG_WR32(qlt, 0x54, 0x5E70);
8623 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8624 + buf += n; size_left -= n;
8625 + REG_WR32(qlt, 0x54, 0x5E80);
8626 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8627 + buf += n; size_left -= n;
8628 + REG_WR32(qlt, 0x54, 0x5E90);
8629 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8630 + buf += n; size_left -= n;
8631 + REG_WR32(qlt, 0x54, 0x5EA0);
8632 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8633 + buf += n; size_left -= n;
8634 + REG_WR32(qlt, 0x54, 0x5EB0);
8635 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8636 + buf += n; size_left -= n;
8637 + REG_WR32(qlt, 0x54, 0x5EC0);
8638 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8639 + buf += n; size_left -= n;
8640 + REG_WR32(qlt, 0x54, 0x5ED0);
8641 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8642 + buf += n; size_left -= n;
8643 + REG_WR32(qlt, 0x54, 0x5EE0);
8644 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8645 + buf += n; size_left -= n;
8646 + REG_WR32(qlt, 0x54, 0x5EF0);
8647 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8648 + buf += n; size_left -= n;
8649 +
8650 + n = (int)snprintf(buf, size_left, "\nRP1 Array registers\n");
8651 + buf += n; size_left -= n;
8652 + REG_WR32(qlt, 0x54, 0x5F00);
8653 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8654 + buf += n; size_left -= n;
8655 + REG_WR32(qlt, 0x54, 0x5F10);
8656 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8657 + buf += n; size_left -= n;
8658 + REG_WR32(qlt, 0x54, 0x5F20);
8659 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8660 + buf += n; size_left -= n;
8661 + REG_WR32(qlt, 0x54, 0x5F30);
8662 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8663 + buf += n; size_left -= n;
8664 + REG_WR32(qlt, 0x54, 0x5F40);
8665 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8666 + buf += n; size_left -= n;
8667 + REG_WR32(qlt, 0x54, 0x5F50);
8668 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8669 + buf += n; size_left -= n;
8670 + REG_WR32(qlt, 0x54, 0x5F60);
8671 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8672 + buf += n; size_left -= n;
8673 + REG_WR32(qlt, 0x54, 0x5F70);
8674 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8675 + buf += n; size_left -= n;
8676 + REG_WR32(qlt, 0x54, 0x5F80);
8677 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8678 + buf += n; size_left -= n;
8679 + REG_WR32(qlt, 0x54, 0x5F90);
8680 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8681 + buf += n; size_left -= n;
8682 + REG_WR32(qlt, 0x54, 0x5FA0);
8683 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8684 + buf += n; size_left -= n;
8685 + REG_WR32(qlt, 0x54, 0x5FB0);
8686 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8687 + buf += n; size_left -= n;
8688 + REG_WR32(qlt, 0x54, 0x5FC0);
8689 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8690 + buf += n; size_left -= n;
8691 + REG_WR32(qlt, 0x54, 0x5FD0);
8692 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8693 + buf += n; size_left -= n;
8694 + REG_WR32(qlt, 0x54, 0x5FE0);
8695 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8696 + buf += n; size_left -= n;
8697 + REG_WR32(qlt, 0x54, 0x5FF0);
8698 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8699 + buf += n; size_left -= n;
8700 +
8701 + n = (int)snprintf(buf,
8702 + size_left, "\nQueue Control Registers\n");
8703 + buf += n; size_left -= n;
8704 + REG_WR32(qlt, 0x54, 0x7800);
8705 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8706 + buf += n; size_left -= n;
8707 + }
8708 +
5538 8709 /*
5539 8710 * Fibre buffer registers
5540 8711 */
5541 8712 n = (int)snprintf(buf, size_left, "\nFB hardware registers\n");
5542 8713 buf += n; size_left -= n;
5543 8714 REG_WR32(qlt, 0x54, 0x6000);
5544 8715 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5545 8716 buf += n; size_left -= n;
5546 8717 REG_WR32(qlt, 0x54, 0x6010);
5547 8718 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5548 8719 buf += n; size_left -= n;
5549 8720 REG_WR32(qlt, 0x54, 0x6020);
5550 8721 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5551 8722 buf += n; size_left -= n;
5552 8723 REG_WR32(qlt, 0x54, 0x6030);
5553 8724 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5554 8725 buf += n; size_left -= n;
5555 8726 REG_WR32(qlt, 0x54, 0x6040);
5556 8727 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5557 8728 buf += n; size_left -= n;
8729 + if (qlt->qlt_83xx_chip) {
8730 + REG_WR32(qlt, 0x54, 0x6060);
8731 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8732 + buf += n; size_left -= n;
8733 + REG_WR32(qlt, 0x54, 0x6070);
8734 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8735 + buf += n; size_left -= n;
8736 + }
5558 8737 REG_WR32(qlt, 0x54, 0x6100);
5559 8738 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5560 8739 buf += n; size_left -= n;
5561 8740 REG_WR32(qlt, 0x54, 0x6130);
5562 8741 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5563 8742 buf += n; size_left -= n;
5564 8743 REG_WR32(qlt, 0x54, 0x6150);
5565 8744 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5566 8745 buf += n; size_left -= n;
5567 8746 REG_WR32(qlt, 0x54, 0x6170);
5568 8747 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5569 8748 buf += n; size_left -= n;
5570 8749 REG_WR32(qlt, 0x54, 0x6190);
5571 8750 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5572 8751 buf += n; size_left -= n;
5573 8752 REG_WR32(qlt, 0x54, 0x61B0);
5574 8753 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5575 8754 buf += n; size_left -= n;
5576 - if (qlt->qlt_81xx_chip) {
8755 + if ((qlt->qlt_83xx_chip) || (qlt->qlt_81xx_chip)) {
5577 8756 REG_WR32(qlt, 0x54, 0x61C0);
5578 8757 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5579 8758 buf += n; size_left -= n;
5580 8759 }
5581 - if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip)) {
8760 + if (qlt->qlt_83xx_chip) {
8761 + REG_WR32(qlt, 0x54, 0x6530);
8762 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8763 + buf += n; size_left -= n;
8764 + REG_WR32(qlt, 0x54, 0x6540);
8765 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8766 + buf += n; size_left -= n;
8767 + REG_WR32(qlt, 0x54, 0x6550);
8768 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8769 + buf += n; size_left -= n;
8770 + REG_WR32(qlt, 0x54, 0x6560);
8771 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8772 + buf += n; size_left -= n;
8773 + REG_WR32(qlt, 0x54, 0x6570);
8774 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8775 + buf += n; size_left -= n;
8776 + REG_WR32(qlt, 0x54, 0x6580);
8777 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8778 + buf += n; size_left -= n;
8779 + REG_WR32(qlt, 0x54, 0x6590);
8780 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8781 + buf += n; size_left -= n;
8782 + REG_WR32(qlt, 0x54, 0x65A0);
8783 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8784 + buf += n; size_left -= n;
8785 + REG_WR32(qlt, 0x54, 0x65B0);
8786 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8787 + buf += n; size_left -= n;
8788 + REG_WR32(qlt, 0x54, 0x65C0);
8789 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8790 + buf += n; size_left -= n;
8791 + REG_WR32(qlt, 0x54, 0x65D0);
8792 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8793 + buf += n; size_left -= n;
8794 + REG_WR32(qlt, 0x54, 0x65E0);
8795 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8796 + buf += n; size_left -= n;
8797 + }
8798 + if ((qlt->qlt_25xx_chip) || (qlt->qlt_81xx_chip) ||
8799 + (qlt->qlt_83xx_chip)) {
5582 8800 REG_WR32(qlt, 0x54, 0x6F00);
5583 8801 n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
5584 8802 buf += n; size_left -= n;
5585 8803 }
5586 8804
8805 + if (qlt->qlt_83xx_chip) {
8806 + n = (int)snprintf(buf, size_left, "\nAT0 Array registers\n");
8807 + buf += n; size_left -= n;
8808 + REG_WR32(qlt, 0x54, 0x7080);
8809 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8810 + buf += n; size_left -= n;
8811 + REG_WR32(qlt, 0x54, 0x7090);
8812 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8813 + buf += n; size_left -= n;
8814 + REG_WR32(qlt, 0x54, 0x70A0);
8815 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8816 + buf += n; size_left -= n;
8817 + REG_WR32(qlt, 0x54, 0x70B0);
8818 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8819 + buf += n; size_left -= n;
8820 + REG_WR32(qlt, 0x54, 0x70C0);
8821 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8822 + buf += n; size_left -= n;
8823 + REG_WR32(qlt, 0x54, 0x70D0);
8824 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8825 + buf += n; size_left -= n;
8826 + REG_WR32(qlt, 0x54, 0x70E0);
8827 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8828 + buf += n; size_left -= n;
8829 + REG_WR32(qlt, 0x54, 0x70F0);
8830 + n = qlt_fwdump_dump_regs(qlt, buf, 0xc0, 16, size_left);
8831 + buf += n; size_left -= n;
8832 + }
8833 +
8834 + EL(qlt, "reset chip\n");
5587 8835 qlt->intr_sneak_counter = 10;
5588 8836 mutex_enter(&qlt->intr_lock);
8837 + if (qlt->qlt_mq_enabled) {
8838 + for (i = 1; i < qlt->qlt_queue_cnt; i++) {
8839 + mutex_enter(&qlt->mq_resp[i].mq_lock);
8840 + }
8841 + }
5589 8842 (void) qlt_reset_chip(qlt);
5590 8843 drv_usecwait(20);
5591 8844 qlt->intr_sneak_counter = 0;
8845 + if (qlt->qlt_mq_enabled) {
8846 + for (i = 1; i < qlt->qlt_queue_cnt; i++) {
8847 + mutex_exit(&qlt->mq_resp[i].mq_lock);
8848 + }
8849 + }
5592 8850 mutex_exit(&qlt->intr_lock);
8851 + EL(qlt, "reset chip, done\n");
5593 8852
5594 8853 /*
5595 8854 * Memory
5596 8855 */
5597 8856 n = (int)snprintf(buf, size_left, "\nCode RAM\n");
5598 8857 buf += n; size_left -= n;
5599 8858
5600 8859 addr = 0x20000;
5601 - endaddr = 0x22000;
8860 + endaddr = (qlt->qlt_83xx_chip) ? 0x22400 : 0x22000;
5602 8861 words_to_read = 0;
5603 8862 while (addr < endaddr) {
5604 8863 words_to_read = MBOX_DMA_MEM_SIZE >> 2;
5605 8864 if ((words_to_read + addr) > endaddr) {
5606 8865 words_to_read = endaddr - addr;
5607 8866 }
5608 8867 if ((ret = qlt_read_risc_ram(qlt, addr, words_to_read)) !=
5609 8868 QLT_SUCCESS) {
5610 8869 EL(qlt, "Error reading risc ram - CODE RAM status="
5611 8870 "%llxh\n", ret);
5612 8871 goto dump_fail;
5613 8872 }
5614 8873
5615 8874 n = qlt_dump_risc_ram(qlt, addr, words_to_read, buf, size_left);
5616 8875 buf += n; size_left -= n;
5617 8876
5618 8877 if (size_left < 100000) {
5619 8878 EL(qlt, "run out of space - CODE RAM size_left=%d\n",
5620 8879 size_left);
5621 8880 goto dump_ok;
5622 8881 }
5623 8882 addr += words_to_read;
5624 8883 }
5625 8884
5626 8885 n = (int)snprintf(buf, size_left, "\nExternal Memory\n");
5627 8886 buf += n; size_left -= n;
5628 8887
5629 8888 addr = 0x100000;
5630 8889 endaddr = (((uint32_t)(qlt->fw_endaddrhi)) << 16) | qlt->fw_endaddrlo;
5631 8890 endaddr++;
5632 8891 if (endaddr & 7) {
5633 8892 endaddr = (endaddr + 7) & 0xFFFFFFF8;
5634 8893 }
5635 8894
5636 8895 words_to_read = 0;
5637 8896 while (addr < endaddr) {
5638 8897 words_to_read = MBOX_DMA_MEM_SIZE >> 2;
5639 8898 if ((words_to_read + addr) > endaddr) {
5640 8899 words_to_read = endaddr - addr;
5641 8900 }
5642 8901 if ((ret = qlt_read_risc_ram(qlt, addr, words_to_read)) !=
5643 8902 QLT_SUCCESS) {
5644 8903 EL(qlt, "Error reading risc ram - EXT RAM status="
5645 8904 "%llxh\n", ret);
5646 8905 goto dump_fail;
5647 8906 }
5648 8907 n = qlt_dump_risc_ram(qlt, addr, words_to_read, buf, size_left);
5649 8908 buf += n; size_left -= n;
5650 8909 if (size_left < 100000) {
5651 8910 EL(qlt, "run out of space - EXT RAM\n");
5652 8911 goto dump_ok;
5653 8912 }
5654 8913 addr += words_to_read;
5655 8914 }
5656 8915
5657 8916 /*
|
↓ open down ↓ |
46 lines elided |
↑ open up ↑ |
5658 8917 * Label the end tag
5659 8918 */
5660 8919 n = (int)snprintf(buf, size_left, "[<==END] ISP Debug Dump\n");
5661 8920 buf += n; size_left -= n;
5662 8921
5663 8922 /*
5664 8923 * Queue dumping
5665 8924 */
5666 8925 n = (int)snprintf(buf, size_left, "\nRequest Queue\n");
5667 8926 buf += n; size_left -= n;
5668 - n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + REQUEST_QUEUE_OFFSET,
5669 - REQUEST_QUEUE_ENTRIES, buf, size_left);
5670 - buf += n; size_left -= n;
5671 8927
5672 - n = (int)snprintf(buf, size_left, "\nPriority Queue\n");
5673 - buf += n; size_left -= n;
5674 - n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + PRIORITY_QUEUE_OFFSET,
5675 - PRIORITY_QUEUE_ENTRIES, buf, size_left);
5676 - buf += n; size_left -= n;
8928 + if (qlt->qlt_mq_enabled) {
8929 + for (i = 0; i < qlt->qlt_queue_cnt; i++) {
8930 + if (qlt->mq_req[i].queue_mem_mq_base_addr) {
8931 + n = (int)snprintf(buf, size_left,
8932 + "\nQueue %d:\n", i);
8933 + buf += n; size_left -= n;
8934 + n = qlt_dump_queue(qlt,
8935 + qlt->mq_req[i].queue_mem_mq_base_addr,
8936 + REQUEST_QUEUE_MQ_ENTRIES,
8937 + buf, size_left);
8938 + buf += n; size_left -= n;
8939 + }
8940 + }
8941 + } else {
8942 + n = (int)snprintf(buf, size_left, "\nQueue 0:\n");
8943 + buf += n; size_left -= n;
8944 + n = qlt_dump_queue(qlt,
8945 + qlt->queue_mem_ptr + REQUEST_QUEUE_OFFSET,
8946 + REQUEST_QUEUE_ENTRIES, buf, size_left);
8947 + buf += n; size_left -= n;
8948 + }
5677 8949
8950 + if (!qlt->qlt_83xx_chip) {
8951 + n = (int)snprintf(buf, size_left, "\nPriority Queue\n");
8952 + buf += n; size_left -= n;
8953 + n = qlt_dump_queue(qlt,
8954 + qlt->queue_mem_ptr + PRIORITY_QUEUE_OFFSET,
8955 + PRIORITY_QUEUE_ENTRIES, buf, size_left);
8956 + buf += n; size_left -= n;
8957 + }
8958 +
5678 8959 n = (int)snprintf(buf, size_left, "\nResponse Queue\n");
5679 8960 buf += n; size_left -= n;
5680 - n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + RESPONSE_QUEUE_OFFSET,
5681 - RESPONSE_QUEUE_ENTRIES, buf, size_left);
5682 - buf += n; size_left -= n;
5683 8961
5684 - n = (int)snprintf(buf, size_left, "\nATIO queue\n");
8962 + if (qlt->qlt_mq_enabled) {
8963 + for (i = 0; i < qlt->qlt_queue_cnt; i++) {
8964 + if (qlt->mq_resp[i].queue_mem_mq_base_addr) {
8965 + n = (int)snprintf(buf, size_left,
8966 + "\nQueue %d:\n", i);
8967 + buf += n; size_left -= n;
8968 + n = qlt_dump_queue(qlt,
8969 + qlt->mq_resp[i].queue_mem_mq_base_addr,
8970 + RESPONSE_QUEUE_MQ_ENTRIES,
8971 + buf, size_left);
8972 + buf += n; size_left -= n;
8973 + }
8974 + }
8975 + } else {
8976 + n = (int)snprintf(buf, size_left, "\nQueue 0:\n");
8977 + buf += n; size_left -= n;
8978 + n = qlt_dump_queue(qlt,
8979 + qlt->queue_mem_ptr + RESPONSE_QUEUE_OFFSET,
8980 + RESPONSE_QUEUE_ENTRIES, buf, size_left);
8981 + buf += n; size_left -= n;
8982 + }
8983 +
8984 + n = (int)snprintf(buf, size_left, "\nATIO Queue\nQueue 0:\n");
5685 8985 buf += n; size_left -= n;
5686 8986 n = qlt_dump_queue(qlt, qlt->queue_mem_ptr + ATIO_QUEUE_OFFSET,
5687 8987 ATIO_QUEUE_ENTRIES, buf, size_left);
5688 8988 buf += n; size_left -= n;
5689 8989
5690 8990 /*
5691 8991 * Label dump reason
5692 8992 */
5693 - n = (int)snprintf(buf, size_left, "\nFirmware dump reason: %s-%s\n",
5694 - qlt->qlt_port_alias, ssci->st_additional_info);
8993 + if (ssci != NULL) {
8994 + n = (int)snprintf(buf, size_left,
8995 + "\nFirmware dump reason: %s-%s\n",
8996 + qlt->qlt_port_alias, ssci->st_additional_info);
8997 + } else {
8998 + n = (int)snprintf(buf, size_left,
8999 + "\nFirmware dump reason: %s-%s\n",
9000 + qlt->qlt_port_alias, "no additional infor");
9001 + }
5695 9002 buf += n; size_left -= n;
5696 9003
5697 9004 dump_ok:
5698 9005 EL(qlt, "left-%d\n", size_left);
5699 -
5700 9006 mutex_enter(&qlt->qlt_ioctl_lock);
5701 9007 qlt->qlt_ioctl_flags &=
5702 9008 ~(QLT_FWDUMP_INPROGRESS | QLT_FWDUMP_FETCHED_BY_USER);
5703 9009 qlt->qlt_ioctl_flags |= QLT_FWDUMP_ISVALID;
5704 9010 mutex_exit(&qlt->qlt_ioctl_lock);
5705 9011 return (FCT_SUCCESS);
5706 9012
5707 9013 dump_fail:
5708 9014 EL(qlt, "dump not done\n");
5709 9015 mutex_enter(&qlt->qlt_ioctl_lock);
5710 9016 qlt->qlt_ioctl_flags &= QLT_IOCTL_FLAG_MASK;
5711 9017 mutex_exit(&qlt->qlt_ioctl_lock);
5712 9018 return (FCT_FAILURE);
5713 9019 }
5714 9020
5715 9021 static int
5716 9022 qlt_fwdump_dump_regs(qlt_state_t *qlt, caddr_t buf, int startaddr, int count,
5717 9023 uint_t size_left)
5718 9024 {
5719 9025 int i;
5720 9026 int n;
5721 9027 char c = ' ';
5722 9028
5723 9029 for (i = 0, n = 0; i < count; i++) {
5724 9030 if ((i + 1) & 7) {
5725 9031 c = ' ';
5726 9032 } else {
5727 9033 c = '\n';
5728 9034 }
5729 9035 n = (int)(n + (int)snprintf(&buf[n], (uint_t)(size_left - n),
5730 9036 "%08x%c", REG_RD32(qlt, startaddr + (i << 2)), c));
5731 9037 }
5732 9038 return (n);
5733 9039 }
5734 9040
5735 9041 static int
5736 9042 qlt_dump_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words,
5737 9043 caddr_t buf, uint_t size_left)
5738 9044 {
5739 9045 int i;
5740 9046 int n;
5741 9047 char c = ' ';
5742 9048 uint32_t *ptr;
5743 9049
5744 9050 ptr = (uint32_t *)((caddr_t)qlt->queue_mem_ptr + MBOX_DMA_MEM_OFFSET);
5745 9051 for (i = 0, n = 0; i < words; i++) {
5746 9052 if ((i & 7) == 0) {
5747 9053 n = (int)(n + (int)snprintf(&buf[n],
5748 9054 (uint_t)(size_left - n), "%08x: ", addr + i));
5749 9055 }
5750 9056 if ((i + 1) & 7) {
5751 9057 c = ' ';
5752 9058 } else {
5753 9059 c = '\n';
5754 9060 }
5755 9061 n = (int)(n + (int)snprintf(&buf[n], (uint_t)(size_left - n),
5756 9062 "%08x%c", ptr[i], c));
5757 9063 }
5758 9064 return (n);
|
↓ open down ↓ |
49 lines elided |
↑ open up ↑ |
5759 9065 }
5760 9066
5761 9067 static int
5762 9068 qlt_dump_queue(qlt_state_t *qlt, caddr_t qadr, int entries, caddr_t buf,
5763 9069 uint_t size_left)
5764 9070 {
5765 9071 int i;
5766 9072 int n;
5767 9073 char c = ' ';
5768 9074 int words;
5769 - uint16_t *ptr;
5770 - uint16_t w;
9075 + uint32_t *ptr;
9076 + uint32_t w;
5771 9077
5772 - words = entries * 32;
5773 - ptr = (uint16_t *)qadr;
9078 + words = entries * 16;
9079 + ptr = (uint32_t *)qadr;
5774 9080 for (i = 0, n = 0; i < words; i++) {
5775 9081 if ((i & 7) == 0) {
5776 9082 n = (int)(n + (int)snprintf(&buf[n],
5777 9083 (uint_t)(size_left - n), "%05x: ", i));
5778 9084 }
5779 9085 if ((i + 1) & 7) {
5780 9086 c = ' ';
5781 9087 } else {
5782 9088 c = '\n';
5783 9089 }
5784 - w = QMEM_RD16(qlt, &ptr[i]);
5785 - n = (int)(n + (int)snprintf(&buf[n], (size_left - n), "%04x%c",
9090 + w = QMEM_RD32(qlt, &ptr[i]);
9091 + n = (int)(n + (int)snprintf(&buf[n], (size_left - n), "%08x%c",
5786 9092 w, c));
5787 9093 }
5788 9094 return (n);
5789 9095 }
5790 9096
5791 9097 /*
5792 9098 * Only called by debug dump. Interrupts are disabled and mailboxes alongwith
5793 9099 * mailbox ram is available.
5794 9100 * Copy data from RISC RAM to system memory
5795 9101 */
5796 9102 static fct_status_t
5797 9103 qlt_read_risc_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words)
5798 9104 {
5799 9105 uint64_t da;
5800 9106 fct_status_t ret;
5801 9107
5802 9108 REG_WR16(qlt, REG_MBOX(0), MBC_DUMP_RAM_EXTENDED);
5803 9109 da = qlt->queue_mem_cookie.dmac_laddress;
5804 9110 da += MBOX_DMA_MEM_OFFSET;
5805 9111
5806 9112 /* System destination address */
5807 9113 REG_WR16(qlt, REG_MBOX(3), LSW(LSD(da)));
5808 9114 REG_WR16(qlt, REG_MBOX(2), MSW(LSD(da)));
5809 9115 REG_WR16(qlt, REG_MBOX(7), LSW(MSD(da)));
5810 9116 REG_WR16(qlt, REG_MBOX(6), MSW(MSD(da)));
5811 9117
5812 9118 /* Length */
5813 9119 REG_WR16(qlt, REG_MBOX(5), LSW(words));
5814 9120 REG_WR16(qlt, REG_MBOX(4), MSW(words));
5815 9121
|
↓ open down ↓ |
20 lines elided |
↑ open up ↑ |
5816 9122 /* RISC source address */
5817 9123 REG_WR16(qlt, REG_MBOX(1), LSW(addr));
5818 9124 REG_WR16(qlt, REG_MBOX(8), MSW(addr));
5819 9125
5820 9126 ret = qlt_raw_mailbox_command(qlt);
5821 9127 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
5822 9128 if (ret == QLT_SUCCESS) {
5823 9129 (void) ddi_dma_sync(qlt->queue_mem_dma_handle,
5824 9130 MBOX_DMA_MEM_OFFSET, words << 2, DDI_DMA_SYNC_FORCPU);
5825 9131 } else {
5826 - EL(qlt, "qlt_raw_mailbox_command=ch status=%llxh\n", ret);
9132 + EL(qlt, "qlt_raw_mailbox_command=0x0ch status=%llxh\n", ret);
5827 9133 }
5828 9134 return (ret);
5829 9135 }
5830 9136
9137 +static fct_status_t
9138 +qlt_mbx_mpi_ram(qlt_state_t *qlt, uint32_t addr, uint32_t words,
9139 + uint16_t direction)
9140 +{
9141 + uint64_t da;
9142 + fct_status_t ret;
9143 +
9144 + REG_WR16(qlt, REG_MBOX(0), MBC_MPI_RAM);
9145 + da = qlt->queue_mem_cookie.dmac_laddress;
9146 + da += MBOX_DMA_MEM_OFFSET;
9147 +
9148 + /* System destination address */
9149 + REG_WR16(qlt, REG_MBOX(3), LSW(LSD(da)));
9150 + REG_WR16(qlt, REG_MBOX(2), MSW(LSD(da)));
9151 + REG_WR16(qlt, REG_MBOX(7), LSW(MSD(da)));
9152 + REG_WR16(qlt, REG_MBOX(6), MSW(MSD(da)));
9153 +
9154 + /* Length */
9155 + REG_WR16(qlt, REG_MBOX(5), LSW(words));
9156 + REG_WR16(qlt, REG_MBOX(4), MSW(words));
9157 +
9158 + /* RISC source address */
9159 + REG_WR16(qlt, REG_MBOX(1), LSW(addr));
9160 + REG_WR16(qlt, REG_MBOX(8), MSW(addr));
9161 +
9162 + REG_WR16(qlt, REG_MBOX(9), direction);
9163 + ret = qlt_raw_mailbox_command(qlt);
9164 + REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
9165 + if (ret == QLT_SUCCESS) {
9166 + (void) ddi_dma_sync(qlt->queue_mem_dma_handle,
9167 + MBOX_DMA_MEM_OFFSET, words << 2, DDI_DMA_SYNC_FORCPU);
9168 + } else {
9169 + EL(qlt, "qlt_raw_mailbox_command=0x05h status=%llxh\n", ret);
9170 + }
9171 + return (ret);
9172 +}
9173 +
5831 9174 static void
5832 9175 qlt_verify_fw(qlt_state_t *qlt)
5833 9176 {
5834 9177 caddr_t req;
9178 + uint16_t qi = 0;
9179 +
5835 9180 /* Just put it on the request queue */
5836 - mutex_enter(&qlt->req_lock);
5837 - req = qlt_get_req_entries(qlt, 1);
9181 + mutex_enter(&qlt->mq_req[qi].mq_lock);
9182 + req = qlt_get_req_entries(qlt, 1, qi);
5838 9183 if (req == NULL) {
5839 - mutex_exit(&qlt->req_lock);
5840 - /* XXX handle this */
9184 + EL(qlt, "req = NULL\n");
9185 + mutex_exit(&qlt->mq_req[qi].mq_lock);
5841 9186 return;
5842 9187 }
5843 9188
5844 9189 bzero(req, IOCB_SIZE);
5845 9190
5846 9191 req[0] = 0x1b;
5847 9192 req[1] = 1;
5848 9193
5849 9194 QMEM_WR32(qlt, (&req[4]), 0xffffffff);
5850 9195 QMEM_WR16(qlt, (&req[0x8]), 1); /* options - don't update */
5851 9196 QMEM_WR32(qlt, (&req[0x14]), 0x80010300);
5852 9197
5853 - qlt_submit_req_entries(qlt, 1);
5854 - mutex_exit(&qlt->req_lock);
9198 + qlt_submit_req_entries(qlt, 1, qi);
9199 + mutex_exit(&qlt->mq_req[qi].mq_lock);
5855 9200 }
5856 9201
9202 +static fct_status_t
9203 +qlt_mq_destroy(qlt_state_t *qlt)
9204 +{
9205 + int idx;
9206 +
9207 + for (idx = 1; idx < qlt->qlt_queue_cnt; idx++) {
9208 + (void) ddi_dma_unbind_handle(
9209 + qlt->mq_req[idx].queue_mem_mq_dma_handle);
9210 + ddi_dma_mem_free(&qlt->mq_req[idx].queue_mem_mq_acc_handle);
9211 + ddi_dma_free_handle(&qlt->mq_req[idx].queue_mem_mq_dma_handle);
9212 + (void) ddi_dma_unbind_handle(
9213 + qlt->mq_resp[idx].queue_mem_mq_dma_handle);
9214 + ddi_dma_mem_free(&qlt->mq_resp[idx].queue_mem_mq_acc_handle);
9215 + ddi_dma_free_handle(&qlt->mq_resp[idx].queue_mem_mq_dma_handle);
9216 + }
9217 + return (QLT_SUCCESS);
9218 +}
9219 +
9220 +static fct_status_t
9221 +qlt_mq_create(qlt_state_t *qlt, int idx)
9222 +{
9223 + ddi_device_acc_attr_t dev_acc_attr;
9224 + size_t discard;
9225 + uint_t ncookies;
9226 +
9227 + dev_acc_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
9228 + dev_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
9229 + dev_acc_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
9230 +
9231 + /*
9232 + * MQ Request queue
9233 + */
9234 + if (ddi_dma_alloc_handle(qlt->dip, &qlt_queue_dma_attr_mq_req1,
9235 + DDI_DMA_SLEEP, 0,
9236 + &qlt->mq_req[idx].queue_mem_mq_dma_handle) != DDI_SUCCESS) {
9237 + return (QLT_FAILURE);
9238 + }
9239 + if (ddi_dma_mem_alloc(qlt->mq_req[idx].queue_mem_mq_dma_handle,
9240 + REQUEST_QUEUE_MQ_SIZE,
9241 + &dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
9242 + &qlt->mq_req[idx].queue_mem_mq_base_addr, &discard,
9243 + &qlt->mq_req[idx].queue_mem_mq_acc_handle) != DDI_SUCCESS) {
9244 + ddi_dma_free_handle(&qlt->mq_req[idx].queue_mem_mq_dma_handle);
9245 + return (QLT_FAILURE);
9246 + }
9247 + if (ddi_dma_addr_bind_handle(
9248 + qlt->mq_req[idx].queue_mem_mq_dma_handle,
9249 + NULL, qlt->mq_req[idx].queue_mem_mq_base_addr,
9250 + REQUEST_QUEUE_MQ_SIZE,
9251 + DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
9252 + &qlt->mq_req[idx].queue_mem_mq_cookie,
9253 + &ncookies) != DDI_SUCCESS) {
9254 + ddi_dma_mem_free(&qlt->mq_req[idx].queue_mem_mq_acc_handle);
9255 + ddi_dma_free_handle(&qlt->mq_req[idx].queue_mem_mq_dma_handle);
9256 + return (QLT_FAILURE);
9257 + }
9258 + if (ncookies != 1) {
9259 + (void) ddi_dma_unbind_handle(
9260 + qlt->mq_req[idx].queue_mem_mq_dma_handle);
9261 + ddi_dma_mem_free(&qlt->mq_req[idx].queue_mem_mq_acc_handle);
9262 + ddi_dma_free_handle(&qlt->mq_req[idx].queue_mem_mq_dma_handle);
9263 + return (QLT_FAILURE);
9264 + }
9265 +
9266 + /*
9267 + * MQ Response queue
9268 + */
9269 + if (ddi_dma_alloc_handle(qlt->dip, &qlt_queue_dma_attr_mq_rsp1,
9270 + DDI_DMA_SLEEP, 0,
9271 + &qlt->mq_resp[idx].queue_mem_mq_dma_handle) != DDI_SUCCESS) {
9272 + (void) ddi_dma_unbind_handle(
9273 + qlt->mq_req[idx].queue_mem_mq_dma_handle);
9274 + ddi_dma_mem_free(&qlt->mq_req[idx].queue_mem_mq_acc_handle);
9275 + ddi_dma_free_handle(&qlt->mq_req[idx].queue_mem_mq_dma_handle);
9276 + return (QLT_FAILURE);
9277 + }
9278 + if (ddi_dma_mem_alloc(qlt->mq_resp[idx].queue_mem_mq_dma_handle,
9279 + RESPONSE_QUEUE_MQ_SIZE,
9280 + &dev_acc_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
9281 + &qlt->mq_resp[idx].queue_mem_mq_base_addr, &discard,
9282 + &qlt->mq_resp[idx].queue_mem_mq_acc_handle) != DDI_SUCCESS) {
9283 + (void) ddi_dma_unbind_handle(
9284 + qlt->mq_req[idx].queue_mem_mq_dma_handle);
9285 + ddi_dma_mem_free(&qlt->mq_req[idx].queue_mem_mq_acc_handle);
9286 + ddi_dma_free_handle(&qlt->mq_req[idx].queue_mem_mq_dma_handle);
9287 + ddi_dma_free_handle(&qlt->mq_resp[idx].queue_mem_mq_dma_handle);
9288 + return (QLT_FAILURE);
9289 + }
9290 + if (ddi_dma_addr_bind_handle(
9291 + qlt->mq_resp[idx].queue_mem_mq_dma_handle,
9292 + NULL, qlt->mq_resp[idx].queue_mem_mq_base_addr,
9293 + RESPONSE_QUEUE_MQ_SIZE,
9294 + DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
9295 + &qlt->mq_resp[idx].queue_mem_mq_cookie,
9296 + &ncookies) != DDI_SUCCESS) {
9297 + (void) ddi_dma_unbind_handle(
9298 + qlt->mq_req[idx].queue_mem_mq_dma_handle);
9299 + ddi_dma_mem_free(&qlt->mq_req[idx].queue_mem_mq_acc_handle);
9300 + ddi_dma_free_handle(&qlt->mq_req[idx].queue_mem_mq_dma_handle);
9301 + ddi_dma_mem_free(&qlt->mq_resp[idx].queue_mem_mq_acc_handle);
9302 + ddi_dma_free_handle(&qlt->mq_resp[idx].queue_mem_mq_dma_handle);
9303 + return (QLT_FAILURE);
9304 + }
9305 + if (ncookies != 1) {
9306 + (void) ddi_dma_unbind_handle(
9307 + qlt->mq_req[idx].queue_mem_mq_dma_handle);
9308 + ddi_dma_mem_free(&qlt->mq_req[idx].queue_mem_mq_acc_handle);
9309 + ddi_dma_free_handle(&qlt->mq_req[idx].queue_mem_mq_dma_handle);
9310 + (void) ddi_dma_unbind_handle(
9311 + qlt->mq_resp[idx].queue_mem_mq_dma_handle);
9312 + ddi_dma_mem_free(&qlt->mq_resp[idx].queue_mem_mq_acc_handle);
9313 + ddi_dma_free_handle(&qlt->mq_resp[idx].queue_mem_mq_dma_handle);
9314 + return (QLT_FAILURE);
9315 + }
9316 +
9317 + qlt->mq_req[idx].mq_ptr = qlt->mq_req[idx].queue_mem_mq_base_addr;
9318 + qlt->mq_req[idx].mq_ndx_to_fw = qlt->mq_req[idx].mq_ndx_from_fw = 0;
9319 + qlt->mq_req[idx].mq_available = REQUEST_QUEUE_MQ_ENTRIES - 1;
9320 + bzero(qlt->mq_req[idx].mq_ptr, REQUEST_QUEUE_MQ_SIZE);
9321 +
9322 + qlt->mq_resp[idx].mq_ptr = qlt->mq_resp[idx].queue_mem_mq_base_addr;
9323 + qlt->mq_resp[idx].mq_ndx_to_fw = qlt->mq_resp[idx].mq_ndx_from_fw = 0;
9324 + bzero(qlt->mq_resp[idx].mq_ptr, RESPONSE_QUEUE_MQ_SIZE);
9325 +
9326 + return (QLT_SUCCESS);
9327 +}
9328 +
5857 9329 static void
5858 9330 qlt_handle_verify_fw_completion(qlt_state_t *qlt, uint8_t *rsp)
5859 9331 {
5860 9332 uint16_t status;
5861 - char info[QLT_INFO_LEN];
9333 + char info[80];
5862 9334
5863 9335 status = QMEM_RD16(qlt, rsp+8);
5864 9336 if (status != 0) {
5865 - (void) snprintf(info, sizeof (info),
5866 - "qlt_handle_verify_fw_completion: "
9337 + (void) snprintf(info, 80, "qlt_handle_verify_fw_completion: "
5867 9338 "status:%x, rsp:%p", status, (void *)rsp);
5868 9339 if (status == 3) {
5869 9340 uint16_t error_code;
5870 9341
5871 9342 error_code = QMEM_RD16(qlt, rsp+0xA);
5872 - (void) snprintf(info, sizeof (info),
5873 - "qlt_handle_verify_fw_completion: error code:%x",
5874 - error_code);
9343 + (void) snprintf(info, 80, "qlt_handle_verify_fw_"
9344 + "completion: error code:%x", error_code);
5875 9345 }
5876 9346 }
5877 9347 }
5878 9348
5879 9349 /*
5880 9350 * qlt_el_trace_desc_ctor - Construct an extended logging trace descriptor.
5881 9351 *
5882 9352 * Input: Pointer to the adapter state structure.
5883 9353 * Returns: Success or Failure.
5884 9354 * Context: Kernel context.
5885 9355 */
5886 9356 static int
5887 9357 qlt_el_trace_desc_ctor(qlt_state_t *qlt)
5888 9358 {
5889 - int rval = DDI_SUCCESS;
9359 + qlt_trace_entry_t *entry;
9360 + size_t maxsize;
5890 9361
5891 - qlt->el_trace_desc = (qlt_el_trace_desc_t *)
5892 - kmem_zalloc(sizeof (qlt_el_trace_desc_t), KM_SLEEP);
9362 + qlt->qlt_trace_desc =
9363 + (qlt_trace_desc_t *)kmem_zalloc(
9364 + sizeof (qlt_trace_desc_t), KM_SLEEP);
5893 9365
5894 - if (qlt->el_trace_desc == NULL) {
5895 - cmn_err(CE_WARN, "qlt(%d): can't construct trace descriptor",
5896 - qlt->instance);
5897 - rval = DDI_FAILURE;
5898 - } else {
5899 - qlt->el_trace_desc->next = 0;
5900 - qlt->el_trace_desc->trace_buffer =
5901 - (char *)kmem_zalloc(EL_TRACE_BUF_SIZE, KM_SLEEP);
9366 + qlt->qlt_log_entries = QL_LOG_ENTRIES;
9367 + maxsize = qlt->qlt_log_entries * sizeof (qlt_trace_entry_t);
9368 + entry = kmem_zalloc(maxsize, KM_SLEEP);
5902 9369
5903 - if (qlt->el_trace_desc->trace_buffer == NULL) {
5904 - cmn_err(CE_WARN, "qlt(%d): can't get trace buffer",
5905 - qlt->instance);
5906 - kmem_free(qlt->el_trace_desc,
5907 - sizeof (qlt_el_trace_desc_t));
5908 - qlt->el_trace_desc = NULL;
5909 - rval = DDI_FAILURE;
5910 - } else {
5911 - qlt->el_trace_desc->trace_buffer_size =
5912 - EL_TRACE_BUF_SIZE;
5913 - mutex_init(&qlt->el_trace_desc->mutex, NULL,
5914 - MUTEX_DRIVER, NULL);
5915 - }
5916 - }
9370 + mutex_init(&qlt->qlt_trace_desc->mutex, NULL,
9371 + MUTEX_DRIVER, NULL);
5917 9372
5918 - return (rval);
9373 + qlt->qlt_trace_desc->trace_buffer = entry;
9374 + qlt->qlt_trace_desc->trace_buffer_size = maxsize;
9375 + qlt->qlt_trace_desc->nindex = 0;
9376 +
9377 + qlt->qlt_trace_desc->nentries = qlt->qlt_log_entries;
9378 + qlt->qlt_trace_desc->start = qlt->qlt_trace_desc->end = 0;
9379 + qlt->qlt_trace_desc->csize = 0;
9380 + qlt->qlt_trace_desc->count = 0;
9381 +
9382 + return (DDI_SUCCESS);
5919 9383 }
5920 9384
5921 9385 /*
5922 9386 * qlt_el_trace_desc_dtor - Destroy an extended logging trace descriptor.
5923 9387 *
5924 9388 * Input: Pointer to the adapter state structure.
5925 9389 * Returns: Success or Failure.
5926 9390 * Context: Kernel context.
5927 9391 */
5928 9392 static int
5929 9393 qlt_el_trace_desc_dtor(qlt_state_t *qlt)
5930 9394 {
5931 9395 int rval = DDI_SUCCESS;
5932 9396
5933 - if (qlt->el_trace_desc == NULL) {
5934 - cmn_err(CE_WARN, "qlt(%d): can't destroy el trace descriptor",
5935 - qlt->instance);
5936 - rval = DDI_FAILURE;
5937 - } else {
5938 - if (qlt->el_trace_desc->trace_buffer != NULL) {
5939 - kmem_free(qlt->el_trace_desc->trace_buffer,
5940 - qlt->el_trace_desc->trace_buffer_size);
9397 + if (qlt->qlt_trace_desc != NULL) {
9398 + if (qlt->qlt_trace_desc->trace_buffer != NULL) {
9399 + kmem_free(qlt->qlt_trace_desc->trace_buffer,
9400 + qlt->qlt_trace_desc->trace_buffer_size);
5941 9401 }
5942 - mutex_destroy(&qlt->el_trace_desc->mutex);
5943 - kmem_free(qlt->el_trace_desc, sizeof (qlt_el_trace_desc_t));
5944 - qlt->el_trace_desc = NULL;
9402 + mutex_destroy(&qlt->qlt_trace_desc->mutex);
9403 + kmem_free(qlt->qlt_trace_desc, sizeof (qlt_trace_desc_t));
5945 9404 }
5946 9405
5947 9406 return (rval);
5948 9407 }
5949 9408
5950 9409 /*
5951 9410 * qlt_el_msg
5952 9411 * Extended logging message
5953 9412 *
5954 9413 * Input:
5955 9414 * qlt: adapter state pointer.
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
5956 9415 * fn: function name.
5957 9416 * ce: level
5958 9417 * ...: Variable argument list.
5959 9418 *
5960 9419 * Context:
5961 9420 * Kernel/Interrupt context.
5962 9421 */
5963 9422 void
5964 9423 qlt_el_msg(qlt_state_t *qlt, const char *fn, int ce, ...)
5965 9424 {
5966 - char *s, *fmt = 0, *fmt1 = 0;
5967 - char fmt2[EL_BUFFER_RESERVE];
5968 - int rval, tmp;
5969 - int tracing = 0;
5970 - va_list vl;
9425 + char *s, *fmt = 0, *fmt1 = 0;
5971 9426
5972 - /* Tracing is the default but it can be disabled. */
5973 - if ((rval = qlt_validate_trace_desc(qlt)) == DDI_SUCCESS) {
5974 - tracing = 1;
9427 + /*
9428 + * EL_BUFFER_RESERVE 256 is the max # of bytes
9429 + * that driver's log could be collected.
9430 + * add 3 more buytes for safely maniplulation.
9431 + */
9432 + char buf[EL_BUFFER_RESERVE + 3];
9433 + char buf1[QL_LOG_LENGTH];
9434 + size_t tmp;
9435 + size_t rval, rval1;
9436 + va_list vl;
9437 + qlt_trace_desc_t *desc = qlt->qlt_trace_desc;
9438 + qlt_trace_entry_t *entry;
9439 + uint32_t cindex;
9440 + timespec_t time;
9441 + uint32_t count;
9442 + size_t left;
5975 9443
5976 - mutex_enter(&qlt->el_trace_desc->mutex);
9444 + (void) bzero((void *)&buf[0], EL_BUFFER_RESERVE + 3);
9445 + fmt1 = &buf[0];
5977 9446
9447 + TRACE_BUFFER_LOCK(qlt);
9448 +
9449 + /* locate the entry to be filled out */
9450 + cindex = desc->nindex;
9451 + entry = &desc->trace_buffer[cindex];
9452 +
9453 + count = desc->count;
9454 +
9455 + desc->end = desc->nindex;
9456 + desc->nindex++;
9457 + if (desc->nindex == desc->nentries) {
9458 + desc->nindex = 0;
9459 + }
9460 +
9461 + if (desc->csize < desc->nentries) {
9462 + desc->csize ++;
9463 + } else {
5978 9464 /*
5979 - * Ensure enough space for the string. Wrap to
5980 - * start when default message allocation size
5981 - * would overrun the end.
9465 + * once wrapped, csize is fixed.
9466 + * so we have to adjust start point
5982 9467 */
5983 - if ((qlt->el_trace_desc->next + EL_BUFFER_RESERVE) >=
5984 - qlt->el_trace_desc->trace_buffer_size) {
5985 - fmt = qlt->el_trace_desc->trace_buffer;
5986 - qlt->el_trace_desc->next = 0;
5987 - } else {
5988 - fmt = qlt->el_trace_desc->trace_buffer +
5989 - qlt->el_trace_desc->next;
5990 - }
9468 + desc->start = desc->nindex;
5991 9469 }
5992 9470
5993 - /* if no buffer use the stack */
5994 - if (fmt == NULL) {
5995 - fmt = fmt2;
5996 - }
9471 + gethrestime(&time);
5997 9472
5998 - va_start(vl, ce);
9473 + rval = snprintf(fmt1, (size_t)EL_BUFFER_RESERVE,
9474 + QL_BANG "%d=>QEL %s(%d,%d):: %s, ", count, QL_NAME,
9475 + qlt->instance, 0, fn);
5999 9476
6000 - s = va_arg(vl, char *);
9477 + rval1 = rval;
6001 9478
6002 - rval = (int)snprintf(fmt, (size_t)EL_BUFFER_RESERVE,
6003 - "QEL qlt(%d): %s, ", qlt->instance, fn);
6004 - fmt1 = fmt + rval;
6005 - tmp = (int)vsnprintf(fmt1,
9479 + va_start(vl, ce);
9480 + s = va_arg(vl, char *);
9481 + fmt = fmt1 + rval;
9482 + tmp = vsnprintf(fmt,
6006 9483 (size_t)(uint32_t)((int)EL_BUFFER_RESERVE - rval), s, vl);
9484 + va_end(vl);
9485 +
6007 9486 rval += tmp;
9487 + if (rval > QL_LOG_LENGTH - 1) {
9488 + left = rval - (QL_LOG_LENGTH - 1);
6008 9489
6009 - /*
6010 - * Calculate the offset where the next message will go,
6011 - * skipping the NULL.
6012 - */
6013 - if (tracing) {
6014 - uint16_t next = (uint16_t)(rval += 1);
6015 - qlt->el_trace_desc->next += next;
6016 - mutex_exit(&qlt->el_trace_desc->mutex);
6017 - }
9490 + /* store the remaining string */
9491 + (void) strncpy(buf1, fmt1 + (QL_LOG_LENGTH - 1), left);
9492 + (void) strncpy(entry->buf, fmt1, (QL_LOG_LENGTH - 1));
9493 + entry->buf[QL_LOG_LENGTH - 1] = '\n';
6018 9494
6019 - if (enable_extended_logging) {
6020 - cmn_err(ce, fmt);
6021 - }
9495 + bcopy((void *)&time, (void *)&entry->hs_time,
9496 + sizeof (timespec_t));
6022 9497
6023 - va_end(vl);
6024 -}
9498 + /*
9499 + * remaining msg will be stored in the nex entry
9500 + * with same timestamp and same sequence number
9501 + */
9502 + cindex = desc->nindex;
9503 + entry = &desc->trace_buffer[cindex];
6025 9504
6026 -/*
6027 - * qlt_dump_el_trace_buffer
6028 - * Outputs extended logging trace buffer.
6029 - *
6030 - * Input:
6031 - * qlt: adapter state pointer.
6032 - */
6033 -void
6034 -qlt_dump_el_trace_buffer(qlt_state_t *qlt)
6035 -{
6036 - char *dump_start = NULL;
6037 - char *dump_current = NULL;
6038 - char *trace_start;
6039 - char *trace_end;
6040 - int wrapped = 0;
6041 - int rval;
9505 + desc->end = desc->nindex;
9506 + desc->nindex++;
9507 + if (desc->nindex == desc->nentries) {
9508 + desc->nindex = 0;
9509 + }
6042 9510
6043 - mutex_enter(&qlt->el_trace_desc->mutex);
9511 + if (desc->csize < desc->nentries) {
9512 + desc->csize ++;
9513 + } else {
9514 + desc->start = desc->nindex;
9515 + }
6044 9516
6045 - rval = qlt_validate_trace_desc(qlt);
6046 - if (rval != NULL) {
6047 - cmn_err(CE_CONT, "qlt(%d) Dump EL trace - invalid desc\n",
6048 - qlt->instance);
6049 - } else if ((dump_start = qlt_find_trace_start(qlt)) != NULL) {
6050 - dump_current = dump_start;
6051 - trace_start = qlt->el_trace_desc->trace_buffer;
6052 - trace_end = trace_start +
6053 - qlt->el_trace_desc->trace_buffer_size;
9517 + (void) strncpy(&entry->buf[0], fmt1, rval1);
9518 + (void) strncpy(&entry->buf[rval1], &buf1[0], left);
9519 + entry->buf[rval1 + left] = 0;
6054 9520
6055 - cmn_err(CE_CONT, "qlt(%d) Dump EL trace - start %p %p\n",
6056 - qlt->instance,
6057 - (void *)dump_start, (void *)trace_start);
9521 + bcopy((void *)&time, (void *)&entry->hs_time,
9522 + sizeof (timespec_t));
6058 9523
6059 - while (((uintptr_t)dump_current - (uintptr_t)trace_start) <=
6060 - (uintptr_t)qlt->el_trace_desc->trace_buffer_size) {
6061 - /* Show it... */
6062 - cmn_err(CE_CONT, "%p - %s", (void *)dump_current,
6063 - dump_current);
6064 - /* Make the next the current */
6065 - dump_current += (strlen(dump_current) + 1);
6066 - /* check for wrap */
6067 - if ((dump_current + EL_BUFFER_RESERVE) >= trace_end) {
6068 - dump_current = trace_start;
6069 - wrapped = 1;
6070 - } else if (wrapped) {
6071 - /* Don't go past next. */
6072 - if ((trace_start + qlt->el_trace_desc->next) <=
6073 - dump_current) {
6074 - break;
6075 - }
6076 - } else if (*dump_current == NULL) {
6077 - break;
6078 - }
9524 + if (qlt->qlt_eel_level == 1) {
9525 + cmn_err(ce, fmt1);
6079 9526 }
6080 - }
6081 - mutex_exit(&qlt->el_trace_desc->mutex);
6082 -}
6083 9527
6084 -/*
6085 - * qlt_validate_trace_desc
6086 - * Ensures the extended logging trace descriptor is good.
6087 - *
6088 - * Input:
6089 - * qlt: adapter state pointer.
6090 - *
6091 - * Returns:
6092 - * ql local function return status code.
6093 - */
6094 -static int
6095 -qlt_validate_trace_desc(qlt_state_t *qlt)
6096 -{
6097 - int rval = DDI_SUCCESS;
9528 + desc->count++;
6098 9529
6099 - if (qlt->el_trace_desc == NULL) {
6100 - rval = DDI_FAILURE;
6101 - } else if (qlt->el_trace_desc->trace_buffer == NULL) {
6102 - rval = DDI_FAILURE;
9530 + TRACE_BUFFER_UNLOCK(qlt);
9531 + return;
6103 9532 }
6104 - return (rval);
6105 -}
6106 9533
6107 -/*
6108 - * qlt_find_trace_start
6109 - * Locate the oldest extended logging trace entry.
6110 - *
6111 - * Input:
6112 - * qlt: adapter state pointer.
6113 - *
6114 - * Returns:
6115 - * Pointer to a string.
6116 - *
6117 - * Context:
6118 - * Kernel/Interrupt context.
6119 - */
6120 -static char *
6121 -qlt_find_trace_start(qlt_state_t *qlt)
6122 -{
6123 - char *trace_start = 0;
6124 - char *trace_next = 0;
9534 + desc->count ++;
9535 + bcopy((void *)&time, (void *)&entry->hs_time,
9536 + sizeof (timespec_t));
6125 9537
6126 - trace_next = qlt->el_trace_desc->trace_buffer +
6127 - qlt->el_trace_desc->next;
9538 + (void) strcpy(entry->buf, fmt1);
9539 + entry->buf[rval] = 0;
6128 9540
6129 - /*
6130 - * If the buffer has not wrapped next will point at a null so
6131 - * start is the beginning of the buffer. If next points at a char
6132 - * then we must traverse the buffer until a null is detected and
6133 - * that will be the beginning of the oldest whole object in the buffer
6134 - * which is the start.
6135 - */
9541 + TRACE_BUFFER_UNLOCK(qlt);
6136 9542
6137 - if ((trace_next + EL_BUFFER_RESERVE) >=
6138 - (qlt->el_trace_desc->trace_buffer +
6139 - qlt->el_trace_desc->trace_buffer_size)) {
6140 - trace_start = qlt->el_trace_desc->trace_buffer;
6141 - } else if (*trace_next != NULL) {
6142 - trace_start = trace_next + (strlen(trace_next) + 1);
6143 - } else {
6144 - trace_start = qlt->el_trace_desc->trace_buffer;
9543 + if (qlt->qlt_eel_level == 1) {
9544 + cmn_err(ce, fmt1);
6145 9545 }
6146 - return (trace_start);
6147 9546 }
6148 9547
6149 -
6150 9548 static int
6151 9549 qlt_read_int_prop(qlt_state_t *qlt, char *prop, int defval)
6152 9550 {
6153 9551 return (ddi_getprop(DDI_DEV_T_ANY, qlt->dip,
6154 9552 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, prop, defval));
6155 9553 }
6156 9554
6157 9555 static int
6158 9556 qlt_read_string_prop(qlt_state_t *qlt, char *prop, char **prop_val)
6159 9557 {
6160 9558 return (ddi_prop_lookup_string(DDI_DEV_T_ANY, qlt->dip,
6161 9559 DDI_PROP_DONTPASS, prop, prop_val));
6162 9560 }
6163 9561
6164 9562 static int
6165 9563 qlt_read_int_instance_prop(qlt_state_t *qlt, char *prop, int defval)
6166 9564 {
6167 9565 char inst_prop[256];
6168 9566 int val;
6169 9567
6170 9568 /*
6171 9569 * Get adapter instance specific parameters. If the instance
6172 9570 * specific parameter isn't there, try the global parameter.
6173 9571 */
6174 9572
6175 9573 (void) sprintf(inst_prop, "hba%d-%s", qlt->instance, prop);
6176 9574
6177 9575 if ((val = qlt_read_int_prop(qlt, inst_prop, defval)) == defval) {
6178 9576 val = qlt_read_int_prop(qlt, prop, defval);
6179 9577 }
6180 9578
6181 9579 return (val);
6182 9580 }
6183 9581
6184 9582 static int
6185 9583 qlt_read_string_instance_prop(qlt_state_t *qlt, char *prop, char **prop_val)
6186 9584 {
6187 9585 char instance_prop[256];
6188 9586
6189 9587 /* Get adapter instance specific parameter. */
6190 9588 (void) sprintf(instance_prop, "hba%d-%s", qlt->instance, prop);
6191 9589 return (qlt_read_string_prop(qlt, instance_prop, prop_val));
6192 9590 }
6193 9591
6194 9592 static int
6195 9593 qlt_convert_string_to_ull(char *prop, int radix,
6196 9594 u_longlong_t *result)
6197 9595 {
6198 9596 return (ddi_strtoull((const char *)prop, 0, radix, result));
6199 9597 }
6200 9598
6201 9599 static boolean_t
6202 9600 qlt_wwn_overload_prop(qlt_state_t *qlt)
6203 9601 {
6204 9602 char *prop_val = 0;
6205 9603 int rval;
6206 9604 int radix;
6207 9605 u_longlong_t wwnn = 0, wwpn = 0;
6208 9606 boolean_t overloaded = FALSE;
6209 9607
6210 9608 radix = 16;
6211 9609
6212 9610 rval = qlt_read_string_instance_prop(qlt, "adapter-wwnn", &prop_val);
6213 9611 if (rval == DDI_PROP_SUCCESS) {
6214 9612 rval = qlt_convert_string_to_ull(prop_val, radix, &wwnn);
6215 9613 }
6216 9614 if (rval == DDI_PROP_SUCCESS) {
6217 9615 rval = qlt_read_string_instance_prop(qlt, "adapter-wwpn",
6218 9616 &prop_val);
6219 9617 if (rval == DDI_PROP_SUCCESS) {
6220 9618 rval = qlt_convert_string_to_ull(prop_val, radix,
6221 9619 &wwpn);
6222 9620 }
6223 9621 }
6224 9622 if (rval == DDI_PROP_SUCCESS) {
6225 9623 overloaded = TRUE;
6226 9624 /* Overload the current node/port name nvram copy */
6227 9625 bcopy((char *)&wwnn, qlt->nvram->node_name, 8);
6228 9626 BIG_ENDIAN_64(qlt->nvram->node_name);
6229 9627 bcopy((char *)&wwpn, qlt->nvram->port_name, 8);
6230 9628 BIG_ENDIAN_64(qlt->nvram->port_name);
6231 9629 }
6232 9630 return (overloaded);
6233 9631 }
6234 9632
6235 9633 /*
6236 9634 * prop_text - Return a pointer to a string describing the status
6237 9635 *
6238 9636 * Input: prop_status = the return status from a property function.
6239 9637 * Returns: pointer to a string.
6240 9638 * Context: Kernel context.
6241 9639 */
6242 9640 char *
6243 9641 prop_text(int prop_status)
6244 9642 {
6245 9643 string_table_t *entry = &prop_status_tbl[0];
6246 9644
6247 9645 return (value2string(entry, prop_status, 0xFFFF));
6248 9646 }
6249 9647
6250 9648 /*
6251 9649 * value2string Return a pointer to a string associated with the value
6252 9650 *
6253 9651 * Input: entry = the value to string table
6254 9652 * value = the value
6255 9653 * Returns: pointer to a string.
6256 9654 * Context: Kernel context.
6257 9655 */
6258 9656 char *
6259 9657 value2string(string_table_t *entry, int value, int delimiter)
6260 9658 {
6261 9659 for (; entry->value != delimiter; entry++) {
6262 9660 if (entry->value == value) {
6263 9661 break;
6264 9662 }
6265 9663 }
6266 9664 return (entry->string);
6267 9665 }
6268 9666
6269 9667 /*
6270 9668 * qlt_chg_endian Change endianess of byte array.
6271 9669 *
6272 9670 * Input: buf = array pointer.
6273 9671 * size = size of array in bytes.
6274 9672 *
6275 9673 * Context: Interrupt or Kernel context.
6276 9674 */
6277 9675 void
6278 9676 qlt_chg_endian(uint8_t buf[], size_t size)
6279 9677 {
6280 9678 uint8_t byte;
6281 9679 size_t cnt1;
6282 9680 size_t cnt;
6283 9681
6284 9682 cnt1 = size - 1;
6285 9683 for (cnt = 0; cnt < size / 2; cnt++) {
6286 9684 byte = buf[cnt1];
6287 9685 buf[cnt1] = buf[cnt];
6288 9686 buf[cnt] = byte;
6289 9687 cnt1--;
6290 9688 }
6291 9689 }
6292 9690
6293 9691 /*
6294 9692 * ql_mps_reset
6295 9693 * Reset MPS for FCoE functions.
6296 9694 *
6297 9695 * Input:
6298 9696 * ha = virtual adapter state pointer.
6299 9697 *
6300 9698 * Context:
|
↓ open down ↓ |
141 lines elided |
↑ open up ↑ |
6301 9699 * Kernel context.
6302 9700 */
6303 9701 static void
6304 9702 qlt_mps_reset(qlt_state_t *qlt)
6305 9703 {
6306 9704 uint32_t data, dctl = 1000;
6307 9705
6308 9706 do {
6309 9707 if (dctl-- == 0 || qlt_raw_wrt_risc_ram_word(qlt, 0x7c00, 1) !=
6310 9708 QLT_SUCCESS) {
9709 + EL(qlt, "qlt_mps_reset: semaphore request fail,"
9710 + " cnt=%d\n", dctl);
6311 9711 return;
6312 9712 }
6313 9713 if (qlt_raw_rd_risc_ram_word(qlt, 0x7c00, &data) !=
6314 9714 QLT_SUCCESS) {
6315 9715 (void) qlt_raw_wrt_risc_ram_word(qlt, 0x7c00, 0);
9716 + EL(qlt, "qlt_mps_reset: semaphore read fail,"
9717 + " cnt=%d\n", dctl);
6316 9718 return;
6317 9719 }
6318 9720 } while (!(data & BIT_0));
6319 9721
6320 9722 if (qlt_raw_rd_risc_ram_word(qlt, 0x7A15, &data) == QLT_SUCCESS) {
6321 9723 dctl = (uint16_t)PCICFG_RD16(qlt, 0x54);
6322 9724 if ((data & 0xe0) != (dctl & 0xe0)) {
6323 9725 data &= 0xff1f;
6324 9726 data |= dctl & 0xe0;
6325 9727 (void) qlt_raw_wrt_risc_ram_word(qlt, 0x7A15, data);
6326 9728 }
9729 + } else {
9730 + EL(qlt, "qlt_mps_reset: read 0x7a15 failed.\n");
6327 9731 }
6328 9732 (void) qlt_raw_wrt_risc_ram_word(qlt, 0x7c00, 0);
6329 9733 }
6330 9734
6331 9735 /*
6332 9736 * qlt_raw_wrt_risc_ram_word
6333 9737 * Write RISC RAM word.
6334 9738 *
6335 9739 * Input: qlt: adapter state pointer.
6336 9740 * risc_address: risc ram word address.
6337 9741 * data: data.
6338 9742 *
6339 9743 * Returns: qlt local function return status code.
6340 9744 *
6341 9745 * Context: Kernel context.
6342 9746 */
6343 9747 static fct_status_t
6344 9748 qlt_raw_wrt_risc_ram_word(qlt_state_t *qlt, uint32_t risc_address,
6345 9749 uint32_t data)
6346 9750 {
6347 9751 fct_status_t ret;
6348 9752
6349 9753 REG_WR16(qlt, REG_MBOX(0), MBC_WRITE_RAM_EXTENDED);
6350 9754 REG_WR16(qlt, REG_MBOX(1), LSW(risc_address));
6351 9755 REG_WR16(qlt, REG_MBOX(2), LSW(data));
6352 9756 REG_WR16(qlt, REG_MBOX(3), MSW(data));
6353 9757 REG_WR16(qlt, REG_MBOX(8), MSW(risc_address));
6354 9758 ret = qlt_raw_mailbox_command(qlt);
6355 9759 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
6356 9760 if (ret != QLT_SUCCESS) {
6357 9761 EL(qlt, "qlt_raw_mailbox_command=MBC_WRITE_RAM_EXTENDED status"
6358 9762 "=%llxh\n", ret);
6359 9763 }
6360 9764 return (ret);
6361 9765 }
6362 9766
6363 9767 /*
6364 9768 * ql_raw_rd_risc_ram_word
6365 9769 * Read RISC RAM word.
6366 9770 *
6367 9771 * Input: qlt: adapter state pointer.
6368 9772 * risc_address: risc ram word address.
6369 9773 * data: data pointer.
6370 9774 *
6371 9775 * Returns: ql local function return status code.
6372 9776 *
6373 9777 * Context: Kernel context.
6374 9778 */
6375 9779 static fct_status_t
6376 9780 qlt_raw_rd_risc_ram_word(qlt_state_t *qlt, uint32_t risc_address,
6377 9781 uint32_t *data)
6378 9782 {
6379 9783 fct_status_t ret;
6380 9784
6381 9785 REG_WR16(qlt, REG_MBOX(0), MBC_READ_RAM_EXTENDED);
6382 9786 REG_WR16(qlt, REG_MBOX(1), LSW(risc_address));
6383 9787 REG_WR16(qlt, REG_MBOX(2), MSW(risc_address));
6384 9788 ret = qlt_raw_mailbox_command(qlt);
6385 9789 *data = REG_RD16(qlt, REG_MBOX(2));
6386 9790 *data |= (REG_RD16(qlt, REG_MBOX(3)) << 16);
6387 9791 REG_WR32(qlt, REG_HCCR, HCCR_CMD(CLEAR_RISC_TO_PCI_INTR));
6388 9792 if (ret != QLT_SUCCESS) {
6389 9793 EL(qlt, "qlt_raw_mailbox_command=MBC_READ_RAM_EXTENDED status"
6390 9794 "=%llxh\n", ret);
6391 9795 }
6392 9796 return (ret);
6393 9797 }
6394 9798
|
↓ open down ↓ |
58 lines elided |
↑ open up ↑ |
6395 9799 static void
6396 9800 qlt_properties(qlt_state_t *qlt)
6397 9801 {
6398 9802 int32_t cnt = 0;
6399 9803 int32_t defval = 0xffff;
6400 9804
6401 9805 if (qlt_wwn_overload_prop(qlt) == TRUE) {
6402 9806 EL(qlt, "wwnn overloaded.\n");
6403 9807 }
6404 9808
9809 + /* configure extended logging from conf file */
9810 + if ((cnt = qlt_read_int_instance_prop(qlt, "extended-logging",
9811 + defval)) != defval) {
9812 + qlt->qlt_eel_level = (uint8_t)(cnt & 0xff);
9813 + EL(qlt, "extended error logging=%d\n", cnt);
9814 + }
9815 +
6405 9816 if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt2k", defval)) !=
6406 9817 defval) {
6407 9818 qlt->qlt_bucketcnt[0] = cnt;
6408 9819 EL(qlt, "2k bucket o/l=%d\n", cnt);
6409 9820 }
6410 9821
6411 9822 if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt8k", defval)) !=
6412 9823 defval) {
6413 9824 qlt->qlt_bucketcnt[1] = cnt;
6414 9825 EL(qlt, "8k bucket o/l=%d\n", cnt);
6415 9826 }
6416 9827
6417 9828 if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt64k", defval)) !=
6418 9829 defval) {
6419 9830 qlt->qlt_bucketcnt[2] = cnt;
6420 9831 EL(qlt, "64k bucket o/l=%d\n", cnt);
6421 9832 }
6422 9833
6423 9834 if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt128k", defval)) !=
|
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
6424 9835 defval) {
6425 9836 qlt->qlt_bucketcnt[3] = cnt;
6426 9837 EL(qlt, "128k bucket o/l=%d\n", cnt);
6427 9838 }
6428 9839
6429 9840 if ((cnt = qlt_read_int_instance_prop(qlt, "bucketcnt256", defval)) !=
6430 9841 defval) {
6431 9842 qlt->qlt_bucketcnt[4] = cnt;
6432 9843 EL(qlt, "256k bucket o/l=%d\n", cnt);
6433 9844 }
9845 +}
9846 +
9847 +/* ******************************************************************* */
9848 +/* ****************** 27xx Dump Template Functions ******************* */
9849 +/* ******************************************************************* */
9850 +
9851 +/*
9852 + * qlt_get_dmp_template
9853 + * Get dump template from firmware module
9854 + *
9855 + * Input:
9856 + * qlt: qlt_state_t pointer.
9857 + *
9858 + * Returns:
9859 + * qlt local function return status code.
9860 + *
9861 + * Context:
9862 + * Kernel context.
9863 + */
9864 +static fct_status_t
9865 +qlt_27xx_get_dmp_template(qlt_state_t *qlt)
9866 +{
9867 + ddi_device_acc_attr_t dev_acc_attr;
9868 + dev_info_t *dip = qlt->dip;
9869 + uint_t ncookies;
9870 + size_t discard;
9871 + uint32_t word_count, cnt, *bp, *dp;
9872 +
9873 + if (qlt->dmp_template_dma_handle != NULL) {
9874 + (void) ddi_dma_unbind_handle(qlt->dmp_template_dma_handle);
9875 + if (qlt->dmp_template_acc_handle != NULL) {
9876 + ddi_dma_mem_free(&qlt->dmp_template_acc_handle);
9877 + }
9878 + ddi_dma_free_handle(&qlt->dmp_template_dma_handle);
9879 + }
9880 +
9881 + if ((word_count = tmplt2700_length01) == 0) {
9882 + EL(qlt, "No dump template, length=0\n");
9883 + return (QLT_FAILURE);
9884 + }
9885 +
9886 + dev_acc_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
9887 + dev_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
9888 + dev_acc_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
9889 +
9890 + if (ddi_dma_alloc_handle(dip, &qlt_queue_dma_attr,
9891 + DDI_DMA_SLEEP, 0, &qlt->dmp_template_dma_handle) !=
9892 + DDI_SUCCESS) {
9893 + EL(qlt, "Unable to allocate template handle");
9894 + return (QLT_FAILURE);
9895 + }
9896 +
9897 + if (ddi_dma_mem_alloc(qlt->dmp_template_dma_handle,
9898 + (word_count << 2), &dev_acc_attr, DDI_DMA_CONSISTENT,
9899 + DDI_DMA_SLEEP, 0, &qlt->dmp_template_addr, &discard,
9900 + &qlt->dmp_template_acc_handle) != DDI_SUCCESS) {
9901 + ddi_dma_free_handle(&qlt->dmp_template_dma_handle);
9902 + EL(qlt, "Unable to allocate template buffer");
9903 + return (QLT_FAILURE);
9904 + }
9905 +
9906 + if (ddi_dma_addr_bind_handle(qlt->dmp_template_dma_handle, NULL,
9907 + qlt->dmp_template_addr, (word_count << 2),
9908 + DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
9909 + &qlt->dmp_template_cookie, &ncookies) != DDI_SUCCESS) {
9910 + ddi_dma_mem_free(&qlt->dmp_template_acc_handle);
9911 + ddi_dma_free_handle(&qlt->dmp_template_dma_handle);
9912 + EL(qlt, "Unable to bind template handle");
9913 + return (QLT_FAILURE);
9914 + }
9915 +
9916 + if (ncookies != 1) {
9917 + (void) ddi_dma_unbind_handle(qlt->dmp_template_dma_handle);
9918 + ddi_dma_mem_free(&qlt->dmp_template_acc_handle);
9919 + ddi_dma_free_handle(&qlt->dmp_template_dma_handle);
9920 + EL(qlt, "cookies (%d) > 1.\n", ncookies);
9921 + return (QLT_FAILURE);
9922 + }
9923 +
9924 + /* Get big endian template. */
9925 + bp = (uint32_t *)qlt->dmp_template_addr;
9926 + dp = (uint32_t *)tmplt2700_code01;
9927 + for (cnt = 0; cnt < word_count; cnt++) {
9928 + ddi_put32(qlt->dmp_template_acc_handle, bp, *dp++);
9929 + if (cnt > 6) {
9930 + qlt_chg_endian((uint8_t *)bp, 4);
9931 + }
9932 + bp++;
9933 + }
9934 +
9935 + return (QLT_SUCCESS);
9936 +}
9937 +
9938 +static int
9939 +qlt_27xx_dt_riob1(qlt_state_t *qlt, qlt_dt_riob1_t *entry,
9940 + uint8_t *dbuff, uint8_t *dbuff_end)
9941 +{
9942 + int esize;
9943 + uint32_t i, cnt;
9944 + uint8_t *bp = dbuff;
9945 + uint32_t addr = entry->addr;
9946 + uint32_t reg = entry->pci_offset;
9947 +
9948 + cnt = CHAR_TO_SHORT(entry->reg_count_l, entry->reg_count_h);
9949 + esize = cnt * 4; /* addr */
9950 + esize += cnt * entry->reg_size; /* data */
9951 +
9952 + if (dbuff == NULL) {
9953 + return (esize);
9954 + }
9955 + if (esize + dbuff >= dbuff_end) {
9956 + EL(qlt, "skipped, no buffer space, needed=%xh\n", esize);
9957 + entry->h.driver_flags = (uint8_t)
9958 + (entry->h.driver_flags | SKIPPED_FLAG);
9959 + return (0);
9960 + }
9961 +
9962 + REG_WR32(qlt, REG_IOBUS_BASE_ADDR, addr);
9963 + while (cnt--) {
9964 + *bp++ = LSB(LSW(addr));
9965 + *bp++ = MSB(LSW(addr));
9966 + *bp++ = LSB(MSW(addr));
9967 + *bp++ = MSB(MSW(addr));
9968 + for (i = 0; i < entry->reg_size; i++) {
9969 + *bp++ = REG_RD8(qlt, reg++);
9970 + }
9971 + addr++;
9972 + }
9973 +
9974 + return (esize);
9975 +}
9976 +
9977 +static void
9978 +qlt_27xx_dt_wiob1(qlt_state_t *qlt, qlt_dt_wiob1_t *entry,
9979 + uint8_t *dbuff, uint8_t *dbuff_end)
9980 +{
9981 + uint32_t reg = entry->pci_offset;
9982 +
9983 + if (dbuff == NULL) {
9984 + return;
9985 + }
9986 + if (dbuff >= dbuff_end) {
9987 + EL(qlt, "skipped, no buffer space, needed=0\n");
9988 + entry->h.driver_flags = (uint8_t)
9989 + (entry->h.driver_flags | SKIPPED_FLAG);
9990 + return;
9991 + }
9992 +
9993 + REG_WR32(qlt, REG_IOBUS_BASE_ADDR, entry->addr);
9994 + REG_WR32(qlt, reg, entry->data);
9995 +}
9996 +
9997 +static int
9998 +qlt_27xx_dt_riob2(qlt_state_t *qlt, qlt_dt_riob2_t *entry,
9999 + uint8_t *dbuff, uint8_t *dbuff_end)
10000 +{
10001 + int esize;
10002 + uint32_t i, cnt;
10003 + uint8_t *bp = dbuff;
10004 + uint32_t reg = entry->pci_offset;
10005 + uint32_t addr = entry->addr;
10006 +
10007 + cnt = CHAR_TO_SHORT(entry->reg_count_l, entry->reg_count_h);
10008 + esize = cnt * 4; /* addr */
10009 + esize += cnt * entry->reg_size; /* data */
10010 +
10011 + if (dbuff == NULL) {
10012 + return (esize);
10013 + }
10014 + if (esize + dbuff >= dbuff_end) {
10015 + EL(qlt, "skipped, no buffer space, needed=%xh\n", esize);
10016 + entry->h.driver_flags = (uint8_t)
10017 + (entry->h.driver_flags | SKIPPED_FLAG);
10018 + return (0);
10019 + }
10020 +
10021 + REG_WR32(qlt, REG_IOBUS_BASE_ADDR, addr);
10022 + REG_WR32(qlt, entry->bank_sel_offset, entry->reg_bank);
10023 + while (cnt--) {
10024 + *bp++ = LSB(LSW(addr));
10025 + *bp++ = MSB(LSW(addr));
10026 + *bp++ = LSB(MSW(addr));
10027 + *bp++ = MSB(MSW(addr));
10028 + for (i = 0; i < entry->reg_size; i++) {
10029 + *bp++ = REG_RD8(qlt, reg++);
10030 + }
10031 + addr++;
10032 + }
10033 +
10034 + return (esize);
10035 +}
10036 +
10037 +static void
10038 +qlt_27xx_dt_wiob2(qlt_state_t *qlt, qlt_dt_wiob2_t *entry,
10039 + uint8_t *dbuff, uint8_t *dbuff_end)
10040 +{
10041 + uint16_t data;
10042 + uint32_t reg = entry->pci_offset;
10043 +
10044 + if (dbuff == NULL) {
10045 + return;
10046 + }
10047 + if (dbuff >= dbuff_end) {
10048 + EL(qlt, "skipped, no buffer space, needed=0\n");
10049 + entry->h.driver_flags = (uint8_t)
10050 + (entry->h.driver_flags | SKIPPED_FLAG);
10051 + return;
10052 + }
10053 +
10054 + data = CHAR_TO_SHORT(entry->data_l, entry->data_h);
10055 +
10056 + REG_WR32(qlt, REG_IOBUS_BASE_ADDR, entry->addr);
10057 + REG_WR32(qlt, entry->bank_sel_offset, entry->reg_bank);
10058 + REG_WR16(qlt, reg, data);
10059 +}
10060 +
10061 +static int
10062 +qlt_27xx_dt_rpci(qlt_state_t *qlt, qlt_dt_rpci_t *entry, uint8_t *dbuff,
10063 + uint8_t *dbuff_end)
10064 +{
10065 + int esize;
10066 + uint32_t i;
10067 + uint8_t *bp = dbuff;
10068 + uint32_t reg = entry->addr;
10069 +
10070 + esize = 4; /* addr */
10071 + esize += 4; /* data */
10072 +
10073 + if (dbuff == NULL) {
10074 + return (esize);
10075 + }
10076 + if (esize + dbuff >= dbuff_end) {
10077 + EL(qlt, "skipped, no buffer space, needed=%xh\n", esize);
10078 + entry->h.driver_flags = (uint8_t)
10079 + (entry->h.driver_flags | SKIPPED_FLAG);
10080 + return (0);
10081 + }
10082 +
10083 + *bp++ = LSB(LSW(entry->addr));
10084 + *bp++ = MSB(LSW(entry->addr));
10085 + *bp++ = LSB(MSW(entry->addr));
10086 + *bp++ = MSB(MSW(entry->addr));
10087 + for (i = 0; i < 4; i++) {
10088 + *bp++ = REG_RD8(qlt, reg++);
10089 + }
10090 +
10091 + return (esize);
10092 +}
10093 +
10094 +static void
10095 +qlt_27xx_dt_wpci(qlt_state_t *qlt, qlt_dt_wpci_t *entry,
10096 + uint8_t *dbuff, uint8_t *dbuff_end)
10097 +{
10098 + uint32_t reg = entry->addr;
10099 +
10100 + if (dbuff == NULL) {
10101 + return;
10102 + }
10103 + if (dbuff >= dbuff_end) {
10104 + EL(qlt, "skipped, no buffer space, needed=0\n");
10105 + entry->h.driver_flags = (uint8_t)
10106 + (entry->h.driver_flags | SKIPPED_FLAG);
10107 + return;
10108 + }
10109 +
10110 + REG_WR32(qlt, reg, entry->data);
10111 +}
10112 +
10113 +static int
10114 +qlt_27xx_dt_rram(qlt_state_t *qlt, qlt_dt_rram_t *entry,
10115 + uint8_t *dbuff, uint8_t *dbuff_end)
10116 +{
10117 + int esize, rval;
10118 + uint32_t start = entry->start_addr;
10119 + uint32_t end = entry->end_addr;
10120 +
10121 + if (entry->ram_area == 2) {
10122 + end = qlt->fw_ext_memory_end;
10123 + } else if (entry->ram_area == 3) {
10124 + start = qlt->fw_shared_ram_start;
10125 + end = qlt->fw_shared_ram_end;
10126 + } else if (entry->ram_area == 4) {
10127 + start = qlt->fw_ddr_ram_start;
10128 + end = qlt->fw_ddr_ram_end;
10129 + } else if (entry->ram_area != 1) {
10130 + EL(qlt, "skipped, unknown RAM_AREA %d\n", entry->ram_area);
10131 + start = 0;
10132 + end = 0;
10133 + }
10134 + esize = end > start ? end - start : 0;
10135 + if (esize) {
10136 + esize = (esize + 1) * 4;
10137 + }
10138 +
10139 + if (dbuff == NULL) {
10140 + return (esize);
10141 + }
10142 + if (esize == 0 || esize + dbuff >= dbuff_end) {
10143 + if (esize != 0) {
10144 + EL(qlt, "skipped, no buffer space, needed=%xh\n",
10145 + esize);
10146 + } else {
10147 + EL(qlt, "skipped, no ram_area=%xh, start=%xh "
10148 + "end=%xh\n", entry->ram_area, start, end);
10149 + }
10150 + entry->h.driver_flags = (uint8_t)
10151 + (entry->h.driver_flags | SKIPPED_FLAG);
10152 + return (0);
10153 + }
10154 + entry->end_addr = end;
10155 + entry->start_addr = start;
10156 +
10157 + if ((rval = qlt_27xx_dump_ram(qlt, MBC_DUMP_RAM_EXTENDED,
10158 + start, esize / 4, dbuff)) != QLT_SUCCESS) {
10159 + EL(qlt, "dump_ram failed, rval=%xh, addr=%xh, len=%xh, "
10160 + "esize=0\n", rval, start, esize / 4);
10161 + return (0);
10162 + }
10163 +
10164 + return (esize);
10165 +}
10166 +
10167 +static int
10168 +qlt_27xx_dt_gque(qlt_state_t *qlt, qlt_dt_gque_t *entry,
10169 + uint8_t *dbuff, uint8_t *dbuff_end)
10170 +{
10171 + int esize;
10172 + uint32_t cnt, q_cnt, e_cnt, i;
10173 + uint8_t *bp = dbuff, *dp;
10174 +
10175 + if (entry->queue_type == 1) {
10176 + e_cnt = qlt->qlt_queue_cnt;
10177 + esize = e_cnt * 2; /* queue number */
10178 + esize += e_cnt * 2; /* queue entries */
10179 +
10180 + /* queue size */
10181 + esize += REQUEST_QUEUE_ENTRIES * IOCB_SIZE;
10182 + for (q_cnt = 1; q_cnt < qlt->qlt_queue_cnt; q_cnt++) {
10183 + esize += REQUEST_QUEUE_MQ_ENTRIES * IOCB_SIZE;
10184 + }
10185 +
10186 + if (dbuff == NULL) {
10187 + return (esize);
10188 + }
10189 + if (esize + dbuff >= dbuff_end) {
10190 + EL(qlt, "skipped, no buffer space, needed=%xh\n",
10191 + esize);
10192 + entry->h.driver_flags = (uint8_t)
10193 + (entry->h.driver_flags | SKIPPED_FLAG);
10194 + return (0);
10195 + }
10196 + entry->num_queues = e_cnt;
10197 +
10198 + for (q_cnt = 0; q_cnt < entry->num_queues; q_cnt++) {
10199 + e_cnt = q_cnt == 0 ?
10200 + REQUEST_QUEUE_ENTRIES : REQUEST_QUEUE_MQ_ENTRIES;
10201 + dp = (uint8_t *)qlt->mq_req[q_cnt].mq_ptr;
10202 + *bp++ = LSB(q_cnt);
10203 + *bp++ = MSB(q_cnt);
10204 + *bp++ = LSB(e_cnt);
10205 + *bp++ = MSB(e_cnt);
10206 + for (cnt = 0; cnt < e_cnt; cnt++) {
10207 + for (i = 0; i < IOCB_SIZE; i++) {
10208 + *bp++ = *dp++;
10209 + }
10210 + }
10211 + }
10212 + } else if (entry->queue_type == 2) {
10213 +
10214 + e_cnt = qlt->qlt_queue_cnt;
10215 + esize = e_cnt * 2; /* queue number */
10216 + esize += e_cnt * 2; /* queue entries */
10217 +
10218 + /* queue size */
10219 + esize += RESPONSE_QUEUE_ENTRIES * IOCB_SIZE;
10220 + for (q_cnt = 1; q_cnt < qlt->qlt_queue_cnt; q_cnt++) {
10221 + esize += RESPONSE_QUEUE_MQ_ENTRIES * IOCB_SIZE;
10222 + }
10223 +
10224 + if (dbuff == NULL) {
10225 + return (esize);
10226 + }
10227 + if (esize + dbuff >= dbuff_end) {
10228 + EL(qlt, "skipped2, no buffer space, needed=%xh\n",
10229 + esize);
10230 + entry->h.driver_flags = (uint8_t)
10231 + (entry->h.driver_flags | SKIPPED_FLAG);
10232 + return (0);
10233 + }
10234 + entry->num_queues = e_cnt;
10235 +
10236 + for (q_cnt = 0; q_cnt < entry->num_queues; q_cnt++) {
10237 + e_cnt = q_cnt == 0 ?
10238 + RESPONSE_QUEUE_ENTRIES : RESPONSE_QUEUE_MQ_ENTRIES;
10239 + dp = (uint8_t *)qlt->mq_resp[q_cnt].mq_ptr;
10240 + *bp++ = LSB(q_cnt);
10241 + *bp++ = MSB(q_cnt);
10242 + *bp++ = LSB(e_cnt);
10243 + *bp++ = MSB(e_cnt);
10244 + for (cnt = 0; cnt < e_cnt; cnt++) {
10245 + for (i = 0; i < IOCB_SIZE; i++) {
10246 + *bp++ = *dp++;
10247 + }
10248 + }
10249 + }
10250 + } else if (entry->queue_type == 3) {
10251 + e_cnt = 1;
10252 + esize = e_cnt * 2; /* queue number */
10253 + esize += e_cnt * 2; /* queue entries */
10254 +
10255 + /* queue size */
10256 + esize += RESPONSE_QUEUE_ENTRIES * IOCB_SIZE;
10257 +
10258 + if (dbuff == NULL) {
10259 + return (esize);
10260 + }
10261 + if (esize + dbuff >= dbuff_end) {
10262 + EL(qlt, "skipped2, no buffer space, needed=%xh\n",
10263 + esize);
10264 + entry->h.driver_flags = (uint8_t)
10265 + (entry->h.driver_flags | SKIPPED_FLAG);
10266 + return (0);
10267 + }
10268 + entry->num_queues = e_cnt;
10269 +
10270 + for (q_cnt = 0; q_cnt < entry->num_queues; q_cnt++) {
10271 + e_cnt = ATIO_QUEUE_ENTRIES;
10272 + dp = (uint8_t *)qlt->atio_ptr;
10273 + *bp++ = LSB(q_cnt);
10274 + *bp++ = MSB(q_cnt);
10275 + *bp++ = LSB(e_cnt);
10276 + *bp++ = MSB(e_cnt);
10277 + for (cnt = 0; cnt < e_cnt; cnt++) {
10278 + for (i = 0; i < IOCB_SIZE; i++) {
10279 + *bp++ = *dp++;
10280 + }
10281 + }
10282 + }
10283 + } else {
10284 + EL(qlt, "skipped, unknown queue_type %d, esize=0\n",
10285 + entry->queue_type);
10286 + if (dbuff != NULL) {
10287 + entry->h.driver_flags = (uint8_t)
10288 + (entry->h.driver_flags | SKIPPED_FLAG);
10289 + }
10290 + return (0);
10291 + }
10292 +
10293 + return (esize);
10294 +}
10295 +
10296 +/*ARGSUSED*/
10297 +static int
10298 +qlt_27xx_dt_gfce(qlt_state_t *qlt, qlt_dt_gfce_t *entry,
10299 + uint8_t *dbuff, uint8_t *dbuff_end)
10300 +{
10301 + if (dbuff != NULL) {
10302 + entry->h.driver_flags = (uint8_t)
10303 + (entry->h.driver_flags | SKIPPED_FLAG);
10304 + }
10305 +
10306 + return (0);
10307 +}
10308 +
10309 +static void
10310 +qlt_27xx_dt_prisc(qlt_state_t *qlt, qlt_dt_prisc_t *entry,
10311 + uint8_t *dbuff, uint8_t *dbuff_end)
10312 +{
10313 + clock_t timer;
10314 +
10315 + if (dbuff == NULL) {
10316 + return;
10317 + }
10318 + if (dbuff >= dbuff_end) {
10319 + EL(qlt, "skipped, no buffer space, needed=0\n");
10320 + entry->h.driver_flags = (uint8_t)
10321 + (entry->h.driver_flags | SKIPPED_FLAG);
10322 + return;
10323 + }
10324 +
10325 + /* Pause RISC. */
10326 + if ((REG_RD32(qlt, REG_RISC_STATUS) & BIT_8) == 0) {
10327 + REG_WR32(qlt, REG_HCCR, 0x30000000);
10328 + for (timer = 30000;
10329 + (REG_RD32(qlt, REG_RISC_STATUS) & BIT_8) == 0;
10330 + timer--) {
10331 + if (timer) {
10332 + drv_usecwait(100);
10333 + if (timer % 10000 == 0) {
10334 + EL(qlt, "risc pause %d\n", timer);
10335 + }
10336 + } else {
10337 + EL(qlt, "risc pause timeout\n");
10338 + break;
10339 + }
10340 + }
10341 + }
10342 +}
10343 +
10344 +static void
10345 +qlt_27xx_dt_rrisc(qlt_state_t *qlt, qlt_dt_rrisc_t *entry,
10346 + uint8_t *dbuff, uint8_t *dbuff_end)
10347 +{
10348 + clock_t timer;
10349 + uint16_t rom_status;
10350 +
10351 + if (dbuff == NULL) {
10352 + return;
10353 + }
10354 + if (dbuff >= dbuff_end) {
10355 + EL(qlt, "skipped, no buffer space, needed=0\n");
10356 + entry->h.driver_flags = (uint8_t)
10357 + (entry->h.driver_flags | SKIPPED_FLAG);
10358 + return;
10359 + }
10360 +
10361 + /* Shutdown DMA. */
10362 + REG_WR32(qlt, REG_CTRL_STATUS, DMA_SHUTDOWN_CTRL);
10363 +
10364 + /* Wait for DMA to stop. */
10365 + for (timer = 0; timer < 30000; timer++) {
10366 + if (!(REG_RD32(qlt, REG_CTRL_STATUS) & DMA_ACTIVE_STATUS)) {
10367 + break;
10368 + }
10369 + drv_usecwait(100);
10370 + }
10371 +
10372 + /* Reset the chip. */
10373 + REG_WR32(qlt, REG_CTRL_STATUS, CHIP_SOFT_RESET);
10374 + drv_usecwait(200);
10375 +
10376 + /* Wait for RISC to recover from reset. */
10377 + for (timer = 30000; timer; timer--) {
10378 + rom_status = REG_RD16(qlt, REG_MBOX0);
10379 + if ((rom_status & MBS_ROM_STATUS_MASK) != MBS_ROM_BUSY) {
10380 + break;
10381 + }
10382 + drv_usecwait(100);
10383 + }
10384 +
10385 + /* Wait for reset to finish. */
10386 + for (timer = 30000; timer; timer--) {
10387 + if (!(REG_RD32(qlt, REG_CTRL_STATUS) & CHIP_SOFT_RESET)) {
10388 + break;
10389 + }
10390 + drv_usecwait(100);
10391 + }
10392 +
10393 + /* XXX: Disable Interrupts (Probably not needed) */
10394 + REG_WR32(qlt, REG_INTR_CTRL, 0);
10395 +
10396 + qlt->qlt_intr_enabled = 0;
10397 +}
10398 +
10399 +static void
10400 +qlt_27xx_dt_dint(qlt_state_t *qlt, qlt_dt_dint_t *entry,
10401 + uint8_t *dbuff, uint8_t *dbuff_end)
10402 +{
10403 + if (dbuff == NULL) {
10404 + return;
10405 + }
10406 + if (dbuff >= dbuff_end) {
10407 + EL(qlt, "skipped, no buffer space, needed=0\n");
10408 + entry->h.driver_flags = (uint8_t)
10409 + (entry->h.driver_flags | SKIPPED_FLAG);
10410 + return;
10411 + }
10412 +
10413 + PCICFG_WR32(qlt, entry->pci_offset, entry->data);
10414 +}
10415 +
10416 +/*ARGSUSED*/
10417 +static int
10418 +qlt_27xx_dt_ghbd(qlt_state_t *qlt, qlt_dt_ghbd_t *entry,
10419 + uint8_t *dbuff, uint8_t *dbuff_end)
10420 +{
10421 + if (dbuff != NULL) {
10422 + entry->h.driver_flags = (uint8_t)
10423 + (entry->h.driver_flags | SKIPPED_FLAG);
10424 + }
10425 +
10426 + return (0);
10427 +}
10428 +
10429 +/*ARGSUSED*/
10430 +static int
10431 +qlt_27xx_dt_scra(qlt_state_t *qlt, qlt_dt_scra_t *entry,
10432 + uint8_t *dbuff, uint8_t *dbuff_end)
10433 +{
10434 + if (dbuff != NULL) {
10435 + entry->h.driver_flags = (uint8_t)
10436 + (entry->h.driver_flags | SKIPPED_FLAG);
10437 + }
10438 +
10439 + return (0);
10440 +}
10441 +
10442 +static int
10443 +qlt_27xx_dt_rrreg(qlt_state_t *qlt, qlt_dt_rrreg_t *entry,
10444 + uint8_t *dbuff, uint8_t *dbuff_end)
10445 +{
10446 + int esize;
10447 + uint32_t i;
10448 + uint8_t *bp = dbuff;
10449 + uint32_t addr = entry->addr;
10450 + uint32_t cnt = entry->count;
10451 +
10452 + esize = cnt * 4; /* addr */
10453 + esize += cnt * 4; /* data */
10454 +
10455 + if (dbuff == NULL) {
10456 + return (esize);
10457 + }
10458 + if (esize + dbuff >= dbuff_end) {
10459 + EL(qlt, "skipped, no buffer space, needed=%xh\n", esize);
10460 + entry->h.driver_flags = (uint8_t)
10461 + (entry->h.driver_flags | SKIPPED_FLAG);
10462 + return (0);
10463 + }
10464 +
10465 + REG_WR32(qlt, REG_IOBUS_BASE_ADDR, 0x40);
10466 + while (cnt--) {
10467 + REG_WR32(qlt, 0xc0, addr | 0x80000000);
10468 + *bp++ = LSB(LSW(addr));
10469 + *bp++ = MSB(LSW(addr));
10470 + *bp++ = LSB(MSW(addr));
10471 + *bp++ = MSB(MSW(addr));
10472 + for (i = 0; i < 4; i++) {
10473 + *bp++ = REG_RD8(qlt, i);
10474 + }
10475 + addr += 4;
10476 + }
10477 +
10478 + return (esize);
10479 +}
10480 +
10481 +static void
10482 +qlt_27xx_dt_wrreg(qlt_state_t *qlt, qlt_dt_wrreg_t *entry,
10483 + uint8_t *dbuff, uint8_t *dbuff_end)
10484 +{
10485 + if (dbuff == NULL) {
10486 + return;
10487 + }
10488 + if (dbuff >= dbuff_end) {
10489 + EL(qlt, "skipped, no buffer space, needed=0\n");
10490 + entry->h.driver_flags = (uint8_t)
10491 + (entry->h.driver_flags | SKIPPED_FLAG);
10492 + return;
10493 + }
10494 +
10495 + REG_WR32(qlt, REG_IOBUS_BASE_ADDR, 0x40);
10496 + REG_WR32(qlt, 0xc4, entry->data);
10497 + REG_WR32(qlt, 0xc0, entry->addr);
10498 +}
10499 +
10500 +static int
10501 +qlt_27xx_dt_rrram(qlt_state_t *qlt, qlt_dt_rrram_t *entry,
10502 + uint8_t *dbuff, uint8_t *dbuff_end)
10503 +{
10504 + int rval, esize;
10505 +
10506 + esize = entry->count * 4; /* data */
10507 +
10508 + if (dbuff == NULL) {
10509 + return (esize);
10510 + }
10511 + if (esize + dbuff >= dbuff_end) {
10512 + EL(qlt, "skipped, no buffer space, needed=%xh\n", esize);
10513 + entry->h.driver_flags = (uint8_t)
10514 + (entry->h.driver_flags | SKIPPED_FLAG);
10515 + return (0);
10516 + }
10517 +
10518 + if ((rval = qlt_27xx_dump_ram(qlt, MBC_MPI_RAM, entry->addr,
10519 + entry->count, dbuff)) != QLT_SUCCESS) {
10520 + EL(qlt, "dump_ram failed, rval=%xh, addr=%xh, len=%xh, "
10521 + "esize=0\n", rval, entry->addr, entry->count);
10522 + return (0);
10523 + }
10524 +
10525 + return (esize);
10526 +}
10527 +
10528 +static int
10529 +qlt_27xx_dt_rpcic(qlt_state_t *qlt, qlt_dt_rpcic_t *entry,
10530 + uint8_t *dbuff, uint8_t *dbuff_end)
10531 +{
10532 + int esize;
10533 + uint32_t i;
10534 + uint8_t *bp = dbuff;
10535 + uint32_t addr = entry->addr;
10536 + uint32_t cnt = entry->count;
10537 +
10538 + esize = cnt * 4; /* addr */
10539 + esize += cnt * 4; /* data */
10540 +
10541 + if (dbuff == NULL) {
10542 + return (esize);
10543 + }
10544 + if (esize + dbuff >= dbuff_end) {
10545 + EL(qlt, "skipped, no buffer space, needed=%xh\n", esize);
10546 + entry->h.driver_flags = (uint8_t)
10547 + (entry->h.driver_flags | SKIPPED_FLAG);
10548 + return (0);
10549 + }
10550 +
10551 + while (cnt--) {
10552 + *bp++ = LSB(LSW(addr));
10553 + *bp++ = MSB(LSW(addr));
10554 + *bp++ = LSB(MSW(addr));
10555 + *bp++ = MSB(MSW(addr));
10556 + for (i = 0; i < 4; i++) {
10557 + *bp++ = PCICFG_RD8(qlt, addr++);
10558 + }
10559 + }
10560 +
10561 + return (esize);
10562 +}
10563 +
10564 +/*ARGSUSED*/
10565 +static int
10566 +qlt_27xx_dt_gques(qlt_state_t *qlt, qlt_dt_gques_t *entry,
10567 + uint8_t *dbuff, uint8_t *dbuff_end)
10568 +{
10569 + if (entry->queue_type == 1) {
10570 + EL(qlt, "skipped, no request queue shadowing, esize=0\n");
10571 + if (dbuff != NULL) {
10572 + entry->num_queues = 0;
10573 + entry->h.driver_flags = (uint8_t)
10574 + (entry->h.driver_flags | SKIPPED_FLAG);
10575 + }
10576 + return (0);
10577 + } else if (entry->queue_type == 2) {
10578 + EL(qlt, "skipped, no response queue shadowing, esize=0\n");
10579 + if (dbuff != NULL) {
10580 + entry->num_queues = 0;
10581 + entry->h.driver_flags = (uint8_t)
10582 + (entry->h.driver_flags | SKIPPED_FLAG);
10583 + }
10584 + return (0);
10585 + } else if (entry->queue_type == 3) {
10586 + EL(qlt, "skipped, no ATIO queue, esize=0\n");
10587 + if (dbuff != NULL) {
10588 + entry->num_queues = 0;
10589 + entry->h.driver_flags = (uint8_t)
10590 + (entry->h.driver_flags | SKIPPED_FLAG);
10591 + }
10592 + return (0);
10593 + } else {
10594 + EL(qlt, "skipped, unknown queue_type %d, esize=0\n",
10595 + entry->queue_type);
10596 + if (dbuff != NULL) {
10597 + entry->h.driver_flags = (uint8_t)
10598 + (entry->h.driver_flags | SKIPPED_FLAG);
10599 + }
10600 + return (0);
10601 + }
10602 +}
10603 +
10604 +static int
10605 +qlt_27xx_dt_wdmp(qlt_state_t *qlt, qlt_dt_wdmp_t *entry,
10606 + uint8_t *dbuff, uint8_t *dbuff_end)
10607 +{
10608 + int esize;
10609 + uint8_t *bp = dbuff;
10610 + uint32_t data, cnt = entry->length, *dp = entry->data;
10611 +
10612 + esize = cnt;
10613 + if (dbuff == NULL) {
10614 + return (esize);
10615 + }
10616 + if (esize + dbuff >= dbuff_end) {
10617 + EL(qlt, "skipped, no buffer space, needed=%xh\n", esize);
10618 + entry->h.driver_flags = (uint8_t)
10619 + (entry->h.driver_flags | SKIPPED_FLAG);
10620 + return (0);
10621 + }
10622 +
10623 + while (cnt--) {
10624 + data = *dp++;
10625 + *bp++ = LSB(LSW(data));
10626 + *bp++ = MSB(LSW(data));
10627 + *bp++ = LSB(MSW(data));
10628 + *bp++ = MSB(MSW(data));
10629 + }
10630 +
10631 + return (esize);
10632 +}
10633 +
10634 +/*
10635 + * qlt_27xx_dump_ram
10636 + * Dumps RAM.
10637 + * Risc interrupts must be disabled when this routine is called.
10638 + *
10639 + * Input:
10640 + * pi: port info pointer.
10641 + * cmd: MBC_DUMP_RAM_EXTENDED/MBC_MPI_RAM.
10642 + * risc_address: RISC code start address.
10643 + * len: Number of words.
10644 + * bp: buffer pointer.
10645 + *
10646 + * Returns:
10647 + * qlt local function return status code.
10648 + *
10649 + * Context:
10650 + * Interrupt or Kernel context, no mailbox commands allowed.
10651 + */
10652 +/*ARGSUSED*/
10653 +static int
10654 +qlt_27xx_dump_ram(qlt_state_t *qlt, uint16_t cmd, uint32_t risc_address,
10655 + uint32_t len, uint8_t *bp)
10656 +{
10657 + uint8_t *dp;
10658 + uint32_t words_to_read, endaddr;
10659 + uint32_t i;
10660 + int rval = QLT_SUCCESS;
10661 +
10662 + endaddr = risc_address + len;
10663 + words_to_read = 0;
10664 + while (risc_address < endaddr) {
10665 + words_to_read = MBOX_DMA_MEM_SIZE >> 2;
10666 + if ((words_to_read + risc_address) > endaddr) {
10667 + words_to_read = endaddr - risc_address;
10668 + }
10669 +
10670 + if (cmd == MBC_DUMP_RAM_EXTENDED) {
10671 + rval = qlt_read_risc_ram(qlt, risc_address,
10672 + words_to_read);
10673 + } else {
10674 + rval = qlt_mbx_mpi_ram(qlt, risc_address,
10675 + words_to_read, 0);
10676 + }
10677 +
10678 + if (rval != QLT_SUCCESS) {
10679 + EL(qlt, "Error reading risc ram = %xh len = %x\n",
10680 + risc_address, words_to_read);
10681 + return (rval);
10682 + }
10683 +
10684 + dp = (uint8_t *)(qlt->queue_mem_ptr + MBOX_DMA_MEM_OFFSET);
10685 + for (i = 0; i < (words_to_read * 4); i++) {
10686 + *bp++ = *dp++;
10687 + }
10688 + risc_address += words_to_read;
10689 + }
10690 +
10691 + return (rval);
10692 +}
10693 +
10694 +static uint32_t
10695 +qlt_27xx_dmp_parse_template(qlt_state_t *qlt, qlt_dt_hdr_t *template_hdr,
10696 + uint8_t *dump_buff, uint32_t buff_size)
10697 +{
10698 + int e_cnt, esize, num_of_entries;
10699 + uint32_t bsize;
10700 + time_t time;
10701 + uint8_t *dbuff, *dbuff_end;
10702 + qlt_dt_entry_t *entry;
10703 + int sane_end = 0;
10704 +
10705 + dbuff = dump_buff; /* dbuff = NULL size determination. */
10706 + dbuff_end = dump_buff + buff_size;
10707 +
10708 + if (template_hdr->type != DT_THDR) {
10709 + EL(qlt, "Template header not found\n");
10710 + return (0);
10711 + }
10712 + if (dbuff != NULL) {
10713 + (void) drv_getparm(TIME, &time);
10714 + template_hdr->driver_timestamp = LSD(time);
10715 + }
10716 +
10717 + num_of_entries = template_hdr->num_of_entries;
10718 + entry = (qlt_dt_entry_t *)((caddr_t)template_hdr +
10719 + template_hdr->first_entry_offset);
10720 +
10721 + bsize = template_hdr->size_of_template;
10722 + for (e_cnt = 0; e_cnt < num_of_entries; e_cnt++) {
10723 + /*
10724 + * Decode the entry type and process it accordingly
10725 + */
10726 + esize = 0;
10727 + switch (entry->h.type) {
10728 + case DT_NOP:
10729 + if (dbuff != NULL) {
10730 + entry->h.driver_flags = (uint8_t)
10731 + (entry->h.driver_flags | SKIPPED_FLAG);
10732 + }
10733 + break;
10734 + case DT_TEND:
10735 + if (dbuff != NULL) {
10736 + entry->h.driver_flags = (uint8_t)
10737 + (entry->h.driver_flags | SKIPPED_FLAG);
10738 + }
10739 + sane_end++;
10740 + break;
10741 + case DT_RIOB1:
10742 + esize = qlt_27xx_dt_riob1(qlt, (qlt_dt_riob1_t *)entry,
10743 + dbuff, dbuff_end);
10744 + break;
10745 + case DT_WIOB1:
10746 + qlt_27xx_dt_wiob1(qlt, (qlt_dt_wiob1_t *)entry,
10747 + dbuff, dbuff_end);
10748 + break;
10749 + case DT_RIOB2:
10750 + esize = qlt_27xx_dt_riob2(qlt, (qlt_dt_riob2_t *)entry,
10751 + dbuff, dbuff_end);
10752 + break;
10753 + case DT_WIOB2:
10754 + qlt_27xx_dt_wiob2(qlt, (qlt_dt_wiob2_t *)entry,
10755 + dbuff, dbuff_end);
10756 + break;
10757 + case DT_RPCI:
10758 + esize = qlt_27xx_dt_rpci(qlt, (qlt_dt_rpci_t *)entry,
10759 + dbuff, dbuff_end);
10760 + break;
10761 + case DT_WPCI:
10762 + qlt_27xx_dt_wpci(qlt, (qlt_dt_wpci_t *)entry,
10763 + dbuff, dbuff_end);
10764 + break;
10765 + case DT_RRAM:
10766 + esize = qlt_27xx_dt_rram(qlt, (qlt_dt_rram_t *)entry,
10767 + dbuff, dbuff_end);
10768 + break;
10769 + case DT_GQUE:
10770 + esize = qlt_27xx_dt_gque(qlt, (qlt_dt_gque_t *)entry,
10771 + dbuff, dbuff_end);
10772 + break;
10773 + case DT_GFCE:
10774 + esize = qlt_27xx_dt_gfce(qlt, (qlt_dt_gfce_t *)entry,
10775 + dbuff, dbuff_end);
10776 + break;
10777 + case DT_PRISC:
10778 + qlt_27xx_dt_prisc(qlt, (qlt_dt_prisc_t *)entry,
10779 + dbuff, dbuff_end);
10780 + break;
10781 + case DT_RRISC:
10782 + qlt_27xx_dt_rrisc(qlt, (qlt_dt_rrisc_t *)entry,
10783 + dbuff, dbuff_end);
10784 + break;
10785 + case DT_DINT:
10786 + qlt_27xx_dt_dint(qlt, (qlt_dt_dint_t *)entry,
10787 + dbuff, dbuff_end);
10788 + break;
10789 + case DT_GHBD:
10790 + esize = qlt_27xx_dt_ghbd(qlt, (qlt_dt_ghbd_t *)entry,
10791 + dbuff, dbuff_end);
10792 + break;
10793 + case DT_SCRA:
10794 + esize = qlt_27xx_dt_scra(qlt, (qlt_dt_scra_t *)entry,
10795 + dbuff, dbuff_end);
10796 + break;
10797 + case DT_RRREG:
10798 + esize = qlt_27xx_dt_rrreg(qlt, (qlt_dt_rrreg_t *)entry,
10799 + dbuff, dbuff_end);
10800 + break;
10801 + case DT_WRREG:
10802 + qlt_27xx_dt_wrreg(qlt, (qlt_dt_wrreg_t *)entry,
10803 + dbuff, dbuff_end);
10804 + break;
10805 + case DT_RRRAM:
10806 + esize = qlt_27xx_dt_rrram(qlt, (qlt_dt_rrram_t *)entry,
10807 + dbuff, dbuff_end);
10808 + break;
10809 + case DT_RPCIC:
10810 + esize = qlt_27xx_dt_rpcic(qlt, (qlt_dt_rpcic_t *)entry,
10811 + dbuff, dbuff_end);
10812 + break;
10813 + case DT_GQUES:
10814 + esize = qlt_27xx_dt_gques(qlt, (qlt_dt_gques_t *)entry,
10815 + dbuff, dbuff_end);
10816 + break;
10817 + case DT_WDMP:
10818 + esize = qlt_27xx_dt_wdmp(qlt, (qlt_dt_wdmp_t *)entry,
10819 + dbuff, dbuff_end);
10820 + break;
10821 + default:
10822 + entry->h.driver_flags = (uint8_t)
10823 + (entry->h.driver_flags | SKIPPED_FLAG);
10824 + EL(qlt, "Entry ID=%d, type=%d unknown\n", e_cnt,
10825 + entry->h.type);
10826 + break;
10827 + }
10828 + if (dbuff != NULL && esize) {
10829 + dbuff += esize;
10830 + }
10831 + bsize += esize;
10832 + /* next entry in the template */
10833 + entry = (qlt_dt_entry_t *)((caddr_t)entry + entry->h.size);
10834 + }
10835 + if (sane_end > 1) {
10836 + EL(qlt, "Template configuration error. Check Template\n");
10837 + }
10838 +
10839 + return (bsize);
6434 10840 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX