2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /* Copyright 2010 QLogic Corporation */
23
24 /*
25 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
26 */
27
28 /*
29 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
30 */
31
32 #pragma ident "Copyright 2010 QLogic Corporation; ql_xioctl.c"
33
34 /*
35 * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
36 *
37 * ***********************************************************************
38 * * **
39 * * NOTICE **
40 * * COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION **
41 * * ALL RIGHTS RESERVED **
42 * * **
43 * ***********************************************************************
44 *
45 */
46
47 #include <ql_apps.h>
48 #include <ql_api.h>
49 #include <ql_debug.h>
50 #include <ql_init.h>
51 #include <ql_iocb.h>
52 #include <ql_ioctl.h>
53 #include <ql_mbx.h>
54 #include <ql_xioctl.h>
55
56 /*
57 * Local data
58 */
59
60 /*
61 * Local prototypes
62 */
63 static int ql_sdm_ioctl(ql_adapter_state_t *, int, void *, int);
64 static int ql_sdm_setup(ql_adapter_state_t *, EXT_IOCTL **, void *, int,
65 boolean_t (*)(EXT_IOCTL *));
66 static boolean_t ql_validate_signature(EXT_IOCTL *);
67 static int ql_sdm_return(ql_adapter_state_t *, EXT_IOCTL *, void *, int);
68 static void ql_query(ql_adapter_state_t *, EXT_IOCTL *, int);
69 static void ql_qry_hba_node(ql_adapter_state_t *, EXT_IOCTL *, int);
70 static void ql_qry_hba_port(ql_adapter_state_t *, EXT_IOCTL *, int);
71 static void ql_qry_disc_port(ql_adapter_state_t *, EXT_IOCTL *, int);
72 static void ql_qry_disc_tgt(ql_adapter_state_t *, EXT_IOCTL *, int);
73 static void ql_qry_fw(ql_adapter_state_t *, EXT_IOCTL *, int);
79 static void ql_scsi_passthru(ql_adapter_state_t *, EXT_IOCTL *, int);
80 static void ql_wwpn_to_scsiaddr(ql_adapter_state_t *, EXT_IOCTL *, int);
81 static void ql_host_idx(ql_adapter_state_t *, EXT_IOCTL *, int);
82 static void ql_host_drvname(ql_adapter_state_t *, EXT_IOCTL *, int);
83 static void ql_read_nvram(ql_adapter_state_t *, EXT_IOCTL *, int);
84 static void ql_write_nvram(ql_adapter_state_t *, EXT_IOCTL *, int);
85 static void ql_read_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
86 static void ql_write_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
87 static void ql_write_vpd(ql_adapter_state_t *, EXT_IOCTL *, int);
88 static void ql_read_vpd(ql_adapter_state_t *, EXT_IOCTL *, int);
89 static void ql_diagnostic_loopback(ql_adapter_state_t *, EXT_IOCTL *, int);
90 static void ql_send_els_rnid(ql_adapter_state_t *, EXT_IOCTL *, int);
91 static void ql_set_host_data(ql_adapter_state_t *, EXT_IOCTL *, int);
92 static void ql_get_host_data(ql_adapter_state_t *, EXT_IOCTL *, int);
93 static void ql_qry_cna_port(ql_adapter_state_t *, EXT_IOCTL *, int);
94
95 static int ql_lun_count(ql_adapter_state_t *, ql_tgt_t *);
96 static int ql_report_lun(ql_adapter_state_t *, ql_tgt_t *);
97 static int ql_inq_scan(ql_adapter_state_t *, ql_tgt_t *, int);
98 static int ql_inq(ql_adapter_state_t *, ql_tgt_t *, int, ql_mbx_iocb_t *,
99 uint8_t);
100 static uint32_t ql_get_buffer_data(caddr_t, caddr_t, uint32_t, int);
101 static uint32_t ql_send_buffer_data(caddr_t, caddr_t, uint32_t, int);
102 static int ql_24xx_flash_desc(ql_adapter_state_t *);
103 static int ql_setup_flash(ql_adapter_state_t *);
104 static ql_tgt_t *ql_find_port(ql_adapter_state_t *, uint8_t *, uint16_t);
105 static int ql_flash_fcode_load(ql_adapter_state_t *, void *, uint32_t, int);
106 static int ql_flash_fcode_dump(ql_adapter_state_t *, void *, uint32_t,
107 uint32_t, int);
108 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t,
109 uint8_t);
110 static void ql_set_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
111 static void ql_get_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
112 static int ql_reset_statistics(ql_adapter_state_t *, EXT_IOCTL *);
113 static void ql_get_statistics(ql_adapter_state_t *, EXT_IOCTL *, int);
114 static void ql_get_statistics_fc(ql_adapter_state_t *, EXT_IOCTL *, int);
115 static void ql_get_statistics_fc4(ql_adapter_state_t *, EXT_IOCTL *, int);
116 static void ql_set_led_state(ql_adapter_state_t *, EXT_IOCTL *, int);
117 static void ql_get_led_state(ql_adapter_state_t *, EXT_IOCTL *, int);
118 static void ql_drive_led(ql_adapter_state_t *, uint32_t);
119 static uint32_t ql_setup_led(ql_adapter_state_t *);
120 static uint32_t ql_wrapup_led(ql_adapter_state_t *);
121 static void ql_get_port_summary(ql_adapter_state_t *, EXT_IOCTL *, int);
122 static void ql_get_target_id(ql_adapter_state_t *, EXT_IOCTL *, int);
123 static void ql_get_sfp(ql_adapter_state_t *, EXT_IOCTL *, int);
124 static int ql_dump_sfp(ql_adapter_state_t *, void *, int);
125 static ql_fcache_t *ql_setup_fnode(ql_adapter_state_t *);
126 static void ql_get_fcache(ql_adapter_state_t *, EXT_IOCTL *, int);
127 static void ql_get_fcache_ex(ql_adapter_state_t *, EXT_IOCTL *, int);
128 void ql_update_fcache(ql_adapter_state_t *, uint8_t *, uint32_t);
129 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *);
130 static void ql_flash_layout_table(ql_adapter_state_t *, uint32_t);
131 static void ql_process_flt(ql_adapter_state_t *, uint32_t);
132 static void ql_flash_nvram_defaults(ql_adapter_state_t *);
133 static void ql_port_param(ql_adapter_state_t *, EXT_IOCTL *, int);
134 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *);
135 static void ql_get_pci_data(ql_adapter_state_t *, EXT_IOCTL *, int);
136 static void ql_get_fwfcetrace(ql_adapter_state_t *, EXT_IOCTL *, int);
137 static void ql_get_fwexttrace(ql_adapter_state_t *, EXT_IOCTL *, int);
138 static void ql_menlo_reset(ql_adapter_state_t *, EXT_IOCTL *, int);
139 static void ql_menlo_get_fw_version(ql_adapter_state_t *, EXT_IOCTL *, int);
140 static void ql_menlo_update_fw(ql_adapter_state_t *, EXT_IOCTL *, int);
141 static void ql_menlo_manage_info(ql_adapter_state_t *, EXT_IOCTL *, int);
142 static int ql_suspend_hba(ql_adapter_state_t *, uint32_t);
143 static void ql_restart_hba(ql_adapter_state_t *);
144 static void ql_get_vp_cnt_id(ql_adapter_state_t *, EXT_IOCTL *, int);
145 static void ql_vp_ioctl(ql_adapter_state_t *, EXT_IOCTL *, int);
146 static void ql_qry_vport(ql_adapter_state_t *, EXT_IOCTL *, int);
147 static void ql_access_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
148 static void ql_reset_cmd(ql_adapter_state_t *, EXT_IOCTL *);
149 static void ql_update_flash_caches(ql_adapter_state_t *);
150 static void ql_get_dcbx_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
151 static void ql_get_xgmac_statistics(ql_adapter_state_t *, EXT_IOCTL *, int);
152 static void ql_get_fcf_list(ql_adapter_state_t *, EXT_IOCTL *, int);
153 static void ql_get_resource_counts(ql_adapter_state_t *, EXT_IOCTL *, int);
154 static void ql_qry_adapter_versions(ql_adapter_state_t *, EXT_IOCTL *, int);
155 static int ql_set_loop_point(ql_adapter_state_t *, uint16_t);
156
157 /* ******************************************************************** */
158 /* External IOCTL support. */
159 /* ******************************************************************** */
160
161 /*
162 * ql_alloc_xioctl_resource
163 * Allocates resources needed by module code.
164 *
165 * Input:
166 * ha: adapter state pointer.
167 *
168 * Returns:
169 * SYS_ERRNO
170 *
171 * Context:
172 * Kernel context.
173 */
174 int
175 ql_alloc_xioctl_resource(ql_adapter_state_t *ha)
176 {
177 ql_xioctl_t *xp;
178
179 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
180
181 if (ha->xioctl != NULL) {
182 QL_PRINT_9(CE_CONT, "(%d): already allocated done\n",
183 ha->instance);
184 return (0);
185 }
186
187 xp = kmem_zalloc(sizeof (ql_xioctl_t), KM_SLEEP);
188 if (xp == NULL) {
189 EL(ha, "failed, kmem_zalloc\n");
190 return (ENOMEM);
191 }
192 ha->xioctl = xp;
193
194 /* Allocate AEN tracking buffer */
195 xp->aen_tracking_queue = kmem_zalloc(EXT_DEF_MAX_AEN_QUEUE *
196 sizeof (EXT_ASYNC_EVENT), KM_SLEEP);
197 if (xp->aen_tracking_queue == NULL) {
198 EL(ha, "failed, kmem_zalloc-2\n");
199 ql_free_xioctl_resource(ha);
200 return (ENOMEM);
201 }
202
203 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
204
205 return (0);
206 }
207
208 /*
209 * ql_free_xioctl_resource
210 * Frees resources used by module code.
211 *
212 * Input:
213 * ha: adapter state pointer.
214 *
215 * Context:
216 * Kernel context.
217 */
218 void
219 ql_free_xioctl_resource(ql_adapter_state_t *ha)
220 {
221 ql_xioctl_t *xp = ha->xioctl;
222
223 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
224
225 if (xp == NULL) {
226 QL_PRINT_9(CE_CONT, "(%d): already freed\n", ha->instance);
227 return;
228 }
229
230 if (xp->aen_tracking_queue != NULL) {
231 kmem_free(xp->aen_tracking_queue, EXT_DEF_MAX_AEN_QUEUE *
232 sizeof (EXT_ASYNC_EVENT));
233 xp->aen_tracking_queue = NULL;
234 }
235
236 kmem_free(xp, sizeof (ql_xioctl_t));
237 ha->xioctl = NULL;
238
239 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
240 }
241
242 /*
243 * ql_xioctl
244 * External IOCTL processing.
245 *
246 * Input:
247 * ha: adapter state pointer.
248 * cmd: function to perform
249 * arg: data type varies with request
250 * mode: flags
251 * cred_p: credentials pointer
252 * rval_p: pointer to result value
253 *
254 * Returns:
255 * 0: success
256 * ENXIO: No such device or address
257 * ENOPROTOOPT: Protocol not available
258 *
259 * Context:
260 * Kernel context.
261 */
262 /* ARGSUSED */
263 int
264 ql_xioctl(ql_adapter_state_t *ha, int cmd, intptr_t arg, int mode,
265 cred_t *cred_p, int *rval_p)
266 {
267 int rval;
268
269 QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance, cmd);
270
271 if (ha->xioctl == NULL) {
272 QL_PRINT_9(CE_CONT, "(%d): no context\n", ha->instance);
273 return (ENXIO);
274 }
275
276 switch (cmd) {
277 case EXT_CC_QUERY:
278 case EXT_CC_SEND_FCCT_PASSTHRU:
279 case EXT_CC_REG_AEN:
280 case EXT_CC_GET_AEN:
281 case EXT_CC_SEND_SCSI_PASSTHRU:
282 case EXT_CC_WWPN_TO_SCSIADDR:
283 case EXT_CC_SEND_ELS_RNID:
284 case EXT_CC_SET_DATA:
285 case EXT_CC_GET_DATA:
286 case EXT_CC_HOST_IDX:
287 case EXT_CC_READ_NVRAM:
288 case EXT_CC_UPDATE_NVRAM:
289 case EXT_CC_READ_OPTION_ROM:
290 case EXT_CC_READ_OPTION_ROM_EX:
291 case EXT_CC_UPDATE_OPTION_ROM:
292 case EXT_CC_UPDATE_OPTION_ROM_EX:
293 case EXT_CC_GET_VPD:
294 case EXT_CC_SET_VPD:
295 case EXT_CC_LOOPBACK:
296 case EXT_CC_GET_FCACHE:
297 case EXT_CC_GET_FCACHE_EX:
298 case EXT_CC_HOST_DRVNAME:
299 case EXT_CC_GET_SFP_DATA:
300 case EXT_CC_PORT_PARAM:
301 case EXT_CC_GET_PCI_DATA:
302 case EXT_CC_GET_FWEXTTRACE:
303 case EXT_CC_GET_FWFCETRACE:
304 case EXT_CC_GET_VP_CNT_ID:
305 case EXT_CC_VPORT_CMD:
306 case EXT_CC_ACCESS_FLASH:
307 case EXT_CC_RESET_FW:
308 case EXT_CC_MENLO_MANAGE_INFO:
309 rval = ql_sdm_ioctl(ha, cmd, (void *)arg, mode);
310 break;
311 default:
312 /* function not supported. */
313 EL(ha, "function=%d not supported\n", cmd);
314 rval = ENOPROTOOPT;
315 }
316
317 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
318
319 return (rval);
320 }
321
322 /*
323 * ql_sdm_ioctl
324 * Provides ioctl functions for SAN/Device Management functions
325 * AKA External Ioctl functions.
326 *
327 * Input:
328 * ha: adapter state pointer.
329 * ioctl_code: ioctl function to perform
330 * arg: Pointer to EXT_IOCTL cmd data in application land.
331 * mode: flags
332 *
333 * Returns:
334 * 0: success
335 * ENOMEM: Alloc of local EXT_IOCTL struct failed.
336 * EFAULT: Copyin of caller's EXT_IOCTL struct failed or
337 * copyout of EXT_IOCTL status info failed.
338 * EINVAL: Signature or version of caller's EXT_IOCTL invalid.
339 * EBUSY: Device busy
340 *
341 * Context:
342 * Kernel context.
343 */
344 static int
345 ql_sdm_ioctl(ql_adapter_state_t *ha, int ioctl_code, void *arg, int mode)
346 {
347 EXT_IOCTL *cmd;
348 int rval;
349 ql_adapter_state_t *vha;
350
351 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
352
353 /* Copy argument structure (EXT_IOCTL) from application land. */
354 if ((rval = ql_sdm_setup(ha, &cmd, arg, mode,
355 ql_validate_signature)) != 0) {
356 /*
357 * a non-zero value at this time means a problem getting
358 * the requested information from application land, just
359 * return the error code and hope for the best.
360 */
361 EL(ha, "failed, sdm_setup\n");
362 return (rval);
363 }
364
365 /*
366 * Map the physical ha ptr (which the ioctl is called with)
367 * to the virtual ha that the caller is addressing.
368 */
369 if (ha->flags & VP_ENABLED) {
370 /* Check that it is within range. */
371 if (cmd->HbaSelect > (CFG_IST(ha, CFG_CTRL_2422) ?
372 MAX_24_VIRTUAL_PORTS : MAX_25_VIRTUAL_PORTS)) {
373 EL(ha, "Invalid HbaSelect vp index: %xh\n",
374 cmd->HbaSelect);
375 cmd->Status = EXT_STATUS_INVALID_VPINDEX;
376 cmd->ResponseLen = 0;
377 return (EFAULT);
378 }
379 /*
380 * Special case: HbaSelect == 0 is physical ha
381 */
382 if (cmd->HbaSelect != 0) {
383 vha = ha->vp_next;
384 while (vha != NULL) {
385 if (vha->vp_index == cmd->HbaSelect) {
386 ha = vha;
387 break;
388 }
389 vha = vha->vp_next;
390 }
391 /*
392 * The specified vp index may be valid(within range)
393 * but it's not in the list. Currently this is all
394 * we can say.
395 */
396 if (vha == NULL) {
397 cmd->Status = EXT_STATUS_INVALID_VPINDEX;
398 cmd->ResponseLen = 0;
399 return (EFAULT);
400 }
401 }
402 }
403
404 /*
405 * If driver is suspended, stalled, or powered down rtn BUSY
406 */
407 if (ha->flags & ADAPTER_SUSPENDED ||
408 ha->task_daemon_flags & DRIVER_STALL ||
409 ha->power_level != PM_LEVEL_D0) {
410 EL(ha, " %s\n", ha->flags & ADAPTER_SUSPENDED ?
411 "driver suspended" :
412 (ha->task_daemon_flags & DRIVER_STALL ? "driver stalled" :
413 "FCA powered down"));
414 cmd->Status = EXT_STATUS_BUSY;
415 cmd->ResponseLen = 0;
416 rval = EBUSY;
417
418 /* Return results to caller */
419 if ((ql_sdm_return(ha, cmd, arg, mode)) == -1) {
420 EL(ha, "failed, sdm_return\n");
421 rval = EFAULT;
422 }
423 return (rval);
424 }
425
426 switch (ioctl_code) {
427 case EXT_CC_QUERY_OS:
428 ql_query(ha, cmd, mode);
429 break;
430 case EXT_CC_SEND_FCCT_PASSTHRU_OS:
431 ql_fcct(ha, cmd, mode);
432 break;
508 ql_menlo_get_fw_version(ha, cmd, mode);
509 break;
510 case EXT_CC_MENLO_UPDATE_FW:
511 ql_menlo_update_fw(ha, cmd, mode);
512 break;
513 case EXT_CC_MENLO_MANAGE_INFO:
514 ql_menlo_manage_info(ha, cmd, mode);
515 break;
516 case EXT_CC_GET_VP_CNT_ID_OS:
517 ql_get_vp_cnt_id(ha, cmd, mode);
518 break;
519 case EXT_CC_VPORT_CMD_OS:
520 ql_vp_ioctl(ha, cmd, mode);
521 break;
522 case EXT_CC_ACCESS_FLASH_OS:
523 ql_access_flash(ha, cmd, mode);
524 break;
525 case EXT_CC_RESET_FW_OS:
526 ql_reset_cmd(ha, cmd);
527 break;
528 default:
529 /* function not supported. */
530 EL(ha, "failed, function not supported=%d\n", ioctl_code);
531
532 cmd->Status = EXT_STATUS_INVALID_REQUEST;
533 cmd->ResponseLen = 0;
534 break;
535 }
536
537 /* Return results to caller */
538 if (ql_sdm_return(ha, cmd, arg, mode) == -1) {
539 EL(ha, "failed, sdm_return\n");
540 return (EFAULT);
541 }
542
543 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
544
545 return (0);
546 }
547
548 /*
549 * ql_sdm_setup
550 * Make a local copy of the EXT_IOCTL struct and validate it.
551 *
552 * Input:
553 * ha: adapter state pointer.
554 * cmd_struct: Pointer to location to store local adrs of EXT_IOCTL.
555 * arg: Address of application EXT_IOCTL cmd data
556 * mode: flags
557 * val_sig: Pointer to a function to validate the ioctl signature.
558 *
559 * Returns:
560 * 0: success
561 * EFAULT: Copy in error of application EXT_IOCTL struct.
562 * EINVAL: Invalid version, signature.
563 * ENOMEM: Local allocation of EXT_IOCTL failed.
564 *
565 * Context:
566 * Kernel context.
567 */
568 static int
569 ql_sdm_setup(ql_adapter_state_t *ha, EXT_IOCTL **cmd_struct, void *arg,
570 int mode, boolean_t (*val_sig)(EXT_IOCTL *))
571 {
572 int rval;
573 EXT_IOCTL *cmd;
574
575 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
576
577 /* Allocate local memory for EXT_IOCTL. */
578 *cmd_struct = NULL;
579 cmd = (EXT_IOCTL *)kmem_zalloc(sizeof (EXT_IOCTL), KM_SLEEP);
580 if (cmd == NULL) {
581 EL(ha, "failed, kmem_zalloc\n");
582 return (ENOMEM);
583 }
584 /* Get argument structure. */
585 rval = ddi_copyin(arg, (void *)cmd, sizeof (EXT_IOCTL), mode);
586 if (rval != 0) {
587 EL(ha, "failed, ddi_copyin\n");
588 rval = EFAULT;
589 } else {
590 /*
591 * Check signature and the version.
592 * If either are not valid then neither is the
593 * structure so don't attempt to return any error status
594 * because we can't trust what caller's arg points to.
595 * Just return the errno.
596 */
597 if (val_sig(cmd) == 0) {
598 EL(ha, "failed, signature\n");
599 rval = EINVAL;
600 } else if (cmd->Version > EXT_VERSION) {
601 EL(ha, "failed, version\n");
602 rval = EINVAL;
603 }
604 }
605
606 if (rval == 0) {
607 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
608 *cmd_struct = cmd;
609 cmd->Status = EXT_STATUS_OK;
610 cmd->DetailStatus = 0;
611 } else {
612 kmem_free((void *)cmd, sizeof (EXT_IOCTL));
613 }
614
615 return (rval);
616 }
617
618 /*
619 * ql_validate_signature
620 * Validate the signature string for an external ioctl call.
621 *
622 * Input:
623 * sg: Pointer to EXT_IOCTL signature to validate.
624 *
625 * Returns:
626 * B_TRUE: Signature is valid.
627 * B_FALSE: Signature is NOT valid.
628 *
629 * Context:
630 * Kernel context.
631 */
632 static boolean_t
633 ql_validate_signature(EXT_IOCTL *cmd_struct)
634 {
635 /*
636 * Check signature.
637 *
638 * If signature is not valid then neither is the rest of
639 * the structure (e.g., can't trust it), so don't attempt
640 * to return any error status other than the errno.
641 */
642 if (bcmp(&cmd_struct->Signature, "QLOGIC", 6) != 0) {
643 QL_PRINT_2(CE_CONT, "failed,\n");
644 return (B_FALSE);
645 }
646
647 return (B_TRUE);
648 }
649
650 /*
651 * ql_sdm_return
652 * Copies return data/status to application land for
653 * ioctl call using the SAN/Device Management EXT_IOCTL call interface.
654 *
655 * Input:
656 * ha: adapter state pointer.
657 * cmd: Pointer to kernel copy of requestor's EXT_IOCTL struct.
658 * ioctl_code: ioctl function to perform
659 * arg: EXT_IOCTL cmd data in application land.
660 * mode: flags
661 *
662 * Returns:
663 * 0: success
664 * EFAULT: Copy out error.
665 *
666 * Context:
667 * Kernel context.
668 */
669 /* ARGSUSED */
670 static int
671 ql_sdm_return(ql_adapter_state_t *ha, EXT_IOCTL *cmd, void *arg, int mode)
672 {
673 int rval = 0;
674
675 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
676
677 rval |= ddi_copyout((void *)&cmd->ResponseLen,
678 (void *)&(((EXT_IOCTL*)arg)->ResponseLen), sizeof (uint32_t),
679 mode);
680
681 rval |= ddi_copyout((void *)&cmd->Status,
682 (void *)&(((EXT_IOCTL*)arg)->Status),
683 sizeof (cmd->Status), mode);
684 rval |= ddi_copyout((void *)&cmd->DetailStatus,
685 (void *)&(((EXT_IOCTL*)arg)->DetailStatus),
686 sizeof (cmd->DetailStatus), mode);
687
688 kmem_free((void *)cmd, sizeof (EXT_IOCTL));
689
690 if (rval != 0) {
691 /* Some copyout operation failed */
692 EL(ha, "failed, ddi_copyout\n");
693 return (EFAULT);
694 }
695
696 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
697
698 return (0);
699 }
700
701 /*
702 * ql_query
703 * Performs all EXT_CC_QUERY functions.
704 *
705 * Input:
706 * ha: adapter state pointer.
707 * cmd: Local EXT_IOCTL cmd struct pointer.
708 * mode: flags.
709 *
710 * Returns:
711 * None, request status indicated in cmd->Status.
712 *
713 * Context:
714 * Kernel context.
715 */
716 static void
717 ql_query(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
718 {
719 QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance,
720 cmd->SubCode);
721
722 /* case off on command subcode */
723 switch (cmd->SubCode) {
724 case EXT_SC_QUERY_HBA_NODE:
725 ql_qry_hba_node(ha, cmd, mode);
726 break;
727 case EXT_SC_QUERY_HBA_PORT:
728 ql_qry_hba_port(ha, cmd, mode);
729 break;
730 case EXT_SC_QUERY_DISC_PORT:
731 ql_qry_disc_port(ha, cmd, mode);
732 break;
733 case EXT_SC_QUERY_DISC_TGT:
734 ql_qry_disc_tgt(ha, cmd, mode);
735 break;
736 case EXT_SC_QUERY_DRIVER:
737 ql_qry_driver(ha, cmd, mode);
738 break;
739 case EXT_SC_QUERY_FW:
740 ql_qry_fw(ha, cmd, mode);
741 break;
742 case EXT_SC_QUERY_CHIP:
743 ql_qry_chip(ha, cmd, mode);
744 break;
745 case EXT_SC_QUERY_CNA_PORT:
746 ql_qry_cna_port(ha, cmd, mode);
747 break;
748 case EXT_SC_QUERY_ADAPTER_VERSIONS:
749 ql_qry_adapter_versions(ha, cmd, mode);
750 break;
751 case EXT_SC_QUERY_DISC_LUN:
752 default:
753 /* function not supported. */
754 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
755 EL(ha, "failed, Unsupported Subcode=%xh\n",
756 cmd->SubCode);
757 break;
758 }
759
760 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
761 }
762
763 /*
764 * ql_qry_hba_node
765 * Performs EXT_SC_QUERY_HBA_NODE subfunction.
766 *
767 * Input:
768 * ha: adapter state pointer.
769 * cmd: EXT_IOCTL cmd struct pointer.
770 * mode: flags.
771 *
772 * Returns:
773 * None, request status indicated in cmd->Status.
774 *
775 * Context:
776 * Kernel context.
777 */
778 static void
779 ql_qry_hba_node(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
780 {
781 EXT_HBA_NODE tmp_node = {0};
782 uint_t len;
783 caddr_t bufp;
784
785 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
786
787 if (cmd->ResponseLen < sizeof (EXT_HBA_NODE)) {
788 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
789 cmd->DetailStatus = sizeof (EXT_HBA_NODE);
790 EL(ha, "failed, ResponseLen < EXT_HBA_NODE, "
791 "Len=%xh\n", cmd->ResponseLen);
792 cmd->ResponseLen = 0;
793 return;
794 }
795
796 /* fill in the values */
797
798 bcopy(ha->loginparams.node_ww_name.raw_wwn, tmp_node.WWNN,
799 EXT_DEF_WWN_NAME_SIZE);
800
801 (void) sprintf((char *)(tmp_node.Manufacturer), "QLogic Corporation");
802
803 (void) sprintf((char *)(tmp_node.Model), "%x", ha->device_id);
804
805 bcopy(&tmp_node.WWNN[5], tmp_node.SerialNum, 3);
806
807 (void) sprintf((char *)(tmp_node.DriverVersion), QL_VERSION);
808
809 if (CFG_IST(ha, CFG_SBUS_CARD)) {
810 size_t verlen;
811 uint16_t w;
812 char *tmpptr;
813
814 verlen = strlen((char *)(tmp_node.DriverVersion));
815 if (verlen + 5 > EXT_DEF_MAX_STR_SIZE) {
816 EL(ha, "failed, No room for fpga version string\n");
817 } else {
818 w = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
819 (uint16_t *)
820 (ha->sbus_fpga_iobase + FPGA_REVISION));
821
822 tmpptr = (char *)&(tmp_node.DriverVersion[verlen+1]);
823 if (tmpptr == NULL) {
824 EL(ha, "Unable to insert fpga version str\n");
825 } else {
826 (void) sprintf(tmpptr, "%d.%d",
827 ((w & 0xf0) >> 4), (w & 0x0f));
828 tmp_node.DriverAttr |= EXT_CC_HBA_NODE_SBUS;
829 }
830 }
831 }
832
833 (void) sprintf((char *)(tmp_node.FWVersion), "%01d.%02d.%02d",
834 ha->fw_major_version, ha->fw_minor_version,
835 ha->fw_subminor_version);
836
837 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
838 switch (ha->fw_attributes) {
839 case FWATTRIB_EF:
840 (void) strcat((char *)(tmp_node.FWVersion), " EF");
841 break;
842 case FWATTRIB_TP:
843 (void) strcat((char *)(tmp_node.FWVersion), " TP");
844 break;
845 case FWATTRIB_IP:
846 (void) strcat((char *)(tmp_node.FWVersion), " IP");
847 break;
848 case FWATTRIB_IPX:
849 (void) strcat((char *)(tmp_node.FWVersion), " IPX");
850 break;
851 case FWATTRIB_FL:
852 (void) strcat((char *)(tmp_node.FWVersion), " FL");
853 break;
854 case FWATTRIB_FPX:
855 (void) strcat((char *)(tmp_node.FWVersion), " FLX");
856 break;
857 default:
862 /* FCode version. */
863 /*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
864 if (ddi_getlongprop(DDI_DEV_T_ANY, ha->dip, PROP_LEN_AND_VAL_ALLOC |
865 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
866 (int *)&len) == DDI_PROP_SUCCESS) {
867 if (len < EXT_DEF_MAX_STR_SIZE) {
868 bcopy(bufp, tmp_node.OptRomVersion, len);
869 } else {
870 bcopy(bufp, tmp_node.OptRomVersion,
871 EXT_DEF_MAX_STR_SIZE - 1);
872 tmp_node.OptRomVersion[EXT_DEF_MAX_STR_SIZE - 1] =
873 '\0';
874 }
875 kmem_free(bufp, len);
876 } else {
877 (void) sprintf((char *)tmp_node.OptRomVersion, "0");
878 }
879 tmp_node.PortCount = 1;
880 tmp_node.InterfaceType = EXT_DEF_FC_INTF_TYPE;
881
882 if (ddi_copyout((void *)&tmp_node,
883 (void *)(uintptr_t)(cmd->ResponseAdr),
884 sizeof (EXT_HBA_NODE), mode) != 0) {
885 cmd->Status = EXT_STATUS_COPY_ERR;
886 cmd->ResponseLen = 0;
887 EL(ha, "failed, ddi_copyout\n");
888 } else {
889 cmd->ResponseLen = sizeof (EXT_HBA_NODE);
890 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
891 }
892 }
893
894 /*
895 * ql_qry_hba_port
896 * Performs EXT_SC_QUERY_HBA_PORT subfunction.
897 *
898 * Input:
899 * ha: adapter state pointer.
900 * cmd: EXT_IOCTL cmd struct pointer.
901 * mode: flags.
902 *
903 * Returns:
904 * None, request status indicated in cmd->Status.
905 *
906 * Context:
907 * Kernel context.
908 */
909 static void
910 ql_qry_hba_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
911 {
912 ql_link_t *link;
913 ql_tgt_t *tq;
914 ql_mbx_data_t mr;
915 EXT_HBA_PORT tmp_port = {0};
916 int rval;
917 uint16_t port_cnt, tgt_cnt, index;
918
919 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
920
921 if (cmd->ResponseLen < sizeof (EXT_HBA_PORT)) {
922 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
923 cmd->DetailStatus = sizeof (EXT_HBA_PORT);
924 EL(ha, "failed, ResponseLen < EXT_HBA_NODE, Len=%xh\n",
925 cmd->ResponseLen);
926 cmd->ResponseLen = 0;
927 return;
928 }
929
930 /* fill in the values */
931
932 bcopy(ha->loginparams.nport_ww_name.raw_wwn, tmp_port.WWPN,
933 EXT_DEF_WWN_NAME_SIZE);
934 tmp_port.Id[0] = 0;
935 tmp_port.Id[1] = ha->d_id.b.domain;
936 tmp_port.Id[2] = ha->d_id.b.area;
937 tmp_port.Id[3] = ha->d_id.b.al_pa;
938
939 /* For now we are initiator only driver */
959 */
960 tmp_port.PortSpeed = EXT_PORTSPEED_NOT_NEGOTIATED;
961
962 if (tmp_port.State == EXT_DEF_HBA_OK) {
963 switch (ha->iidma_rate) {
964 case IIDMA_RATE_1GB:
965 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_1GBIT;
966 break;
967 case IIDMA_RATE_2GB:
968 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_2GBIT;
969 break;
970 case IIDMA_RATE_4GB:
971 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_4GBIT;
972 break;
973 case IIDMA_RATE_8GB:
974 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_8GBIT;
975 break;
976 case IIDMA_RATE_10GB:
977 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_10GBIT;
978 break;
979 default:
980 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_UNKNOWN;
981 EL(ha, "failed, data rate=%xh\n", mr.mb[1]);
982 break;
983 }
984 }
985
986 /* Report all supported port speeds */
987 if (CFG_IST(ha, CFG_CTRL_25XX)) {
988 tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_8GBIT |
989 EXT_DEF_PORTSPEED_4GBIT | EXT_DEF_PORTSPEED_2GBIT |
990 EXT_DEF_PORTSPEED_1GBIT);
991 /*
992 * Correct supported speeds based on type of
993 * sfp that is present
994 */
995 switch (ha->sfp_stat) {
996 case 1:
997 /* no sfp detected */
998 break;
999 case 2:
1000 case 4:
1001 /* 4GB sfp */
1002 tmp_port.PortSupportedSpeed &=
1003 ~EXT_DEF_PORTSPEED_8GBIT;
1004 break;
1005 case 3:
1006 case 5:
1007 /* 8GB sfp */
1008 tmp_port.PortSupportedSpeed &=
1009 ~EXT_DEF_PORTSPEED_1GBIT;
1010 break;
1011 default:
1012 EL(ha, "sfp_stat: %xh\n", ha->sfp_stat);
1013 break;
1014
1015 }
1016 } else if (CFG_IST(ha, CFG_CTRL_8081)) {
1017 tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_10GBIT;
1018 } else if (CFG_IST(ha, CFG_CTRL_2422)) {
1019 tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_4GBIT |
1020 EXT_DEF_PORTSPEED_2GBIT | EXT_DEF_PORTSPEED_1GBIT);
1021 } else if (CFG_IST(ha, CFG_CTRL_2300)) {
1022 tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_2GBIT |
1023 EXT_DEF_PORTSPEED_1GBIT);
1024 } else if (CFG_IST(ha, CFG_CTRL_6322)) {
1025 tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_2GBIT;
1026 } else if (CFG_IST(ha, CFG_CTRL_2200)) {
1027 tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_1GBIT;
1028 } else {
1029 tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_UNKNOWN;
1030 EL(ha, "unknown HBA type: %xh\n", ha->device_id);
1031 }
1032 tmp_port.LinkState2 = LSB(ha->sfp_stat);
1033 port_cnt = 0;
1034 tgt_cnt = 0;
1035
1036 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
1037 for (link = ha->dev[index].first; link != NULL;
1038 link = link->next) {
1039 tq = link->base_address;
1040
1041 if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1042 continue;
1043 }
1044
1045 port_cnt++;
1046 if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
1047 tgt_cnt++;
1048 }
1049 }
1050 }
1051
1052 tmp_port.DiscPortCount = port_cnt;
1053 tmp_port.DiscTargetCount = tgt_cnt;
1054
1055 tmp_port.DiscPortNameType = EXT_DEF_USE_NODE_NAME;
1056
1057 rval = ddi_copyout((void *)&tmp_port,
1058 (void *)(uintptr_t)(cmd->ResponseAdr),
1059 sizeof (EXT_HBA_PORT), mode);
1060 if (rval != 0) {
1061 cmd->Status = EXT_STATUS_COPY_ERR;
1062 cmd->ResponseLen = 0;
1063 EL(ha, "failed, ddi_copyout\n");
1064 } else {
1065 cmd->ResponseLen = sizeof (EXT_HBA_PORT);
1066 QL_PRINT_9(CE_CONT, "(%d): done, ports=%d, targets=%d\n",
1067 ha->instance, port_cnt, tgt_cnt);
1068 }
1069 }
1070
1071 /*
1072 * ql_qry_disc_port
1073 * Performs EXT_SC_QUERY_DISC_PORT subfunction.
1074 *
1075 * Input:
1076 * ha: adapter state pointer.
1077 * cmd: EXT_IOCTL cmd struct pointer.
1078 * mode: flags.
1079 *
1080 * cmd->Instance = Port instance in fcport chain.
1081 *
1082 * Returns:
1083 * None, request status indicated in cmd->Status.
1084 *
1085 * Context:
1086 * Kernel context.
1087 */
1088 static void
1089 ql_qry_disc_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1090 {
1091 EXT_DISC_PORT tmp_port = {0};
1092 ql_link_t *link;
1093 ql_tgt_t *tq;
1094 uint16_t index;
1095 uint16_t inst = 0;
1096
1097 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1098
1099 if (cmd->ResponseLen < sizeof (EXT_DISC_PORT)) {
1100 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1101 cmd->DetailStatus = sizeof (EXT_DISC_PORT);
1102 EL(ha, "failed, ResponseLen < EXT_DISC_PORT, Len=%xh\n",
1103 cmd->ResponseLen);
1104 cmd->ResponseLen = 0;
1105 return;
1106 }
1107
1108 for (link = NULL, index = 0;
1109 index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1110 for (link = ha->dev[index].first; link != NULL;
1111 link = link->next) {
1112 tq = link->base_address;
1113
1114 if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1115 continue;
1116 }
1117 if (inst != cmd->Instance) {
1118 inst++;
1119 continue;
1120 }
1121
1122 /* fill in the values */
1123 bcopy(tq->node_name, tmp_port.WWNN,
1124 EXT_DEF_WWN_NAME_SIZE);
1125 bcopy(tq->port_name, tmp_port.WWPN,
1126 EXT_DEF_WWN_NAME_SIZE);
1127
1128 break;
1129 }
1130 }
1131
1132 if (link == NULL) {
1133 /* no matching device */
1134 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1135 EL(ha, "failed, port not found port=%d\n", cmd->Instance);
1136 cmd->ResponseLen = 0;
1154
1155 if (tq->flags & TQF_FABRIC_DEVICE) {
1156 tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_FABRIC_DEV);
1157 } else {
1158 tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TARGET_DEV);
1159 }
1160
1161 tmp_port.Status = 0;
1162 tmp_port.Bus = 0; /* Hard-coded for Solaris */
1163
1164 bcopy(tq->port_name, &tmp_port.TargetId, 8);
1165
1166 if (ddi_copyout((void *)&tmp_port,
1167 (void *)(uintptr_t)(cmd->ResponseAdr),
1168 sizeof (EXT_DISC_PORT), mode) != 0) {
1169 cmd->Status = EXT_STATUS_COPY_ERR;
1170 cmd->ResponseLen = 0;
1171 EL(ha, "failed, ddi_copyout\n");
1172 } else {
1173 cmd->ResponseLen = sizeof (EXT_DISC_PORT);
1174 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1175 }
1176 }
1177
1178 /*
1179 * ql_qry_disc_tgt
1180 * Performs EXT_SC_QUERY_DISC_TGT subfunction.
1181 *
1182 * Input:
1183 * ha: adapter state pointer.
1184 * cmd: EXT_IOCTL cmd struct pointer.
1185 * mode: flags.
1186 *
1187 * cmd->Instance = Port instance in fcport chain.
1188 *
1189 * Returns:
1190 * None, request status indicated in cmd->Status.
1191 *
1192 * Context:
1193 * Kernel context.
1194 */
1195 static void
1196 ql_qry_disc_tgt(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1197 {
1198 EXT_DISC_TARGET tmp_tgt = {0};
1199 ql_link_t *link;
1200 ql_tgt_t *tq;
1201 uint16_t index;
1202 uint16_t inst = 0;
1203
1204 QL_PRINT_9(CE_CONT, "(%d): started, target=%d\n", ha->instance,
1205 cmd->Instance);
1206
1207 if (cmd->ResponseLen < sizeof (EXT_DISC_TARGET)) {
1208 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1209 cmd->DetailStatus = sizeof (EXT_DISC_TARGET);
1210 EL(ha, "failed, ResponseLen < EXT_DISC_TARGET, Len=%xh\n",
1211 cmd->ResponseLen);
1212 cmd->ResponseLen = 0;
1213 return;
1214 }
1215
1216 /* Scan port list for requested target and fill in the values */
1217 for (link = NULL, index = 0;
1218 index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1219 for (link = ha->dev[index].first; link != NULL;
1220 link = link->next) {
1221 tq = link->base_address;
1222
1223 if (!VALID_TARGET_ID(ha, tq->loop_id) ||
1224 tq->flags & TQF_INITIATOR_DEVICE) {
1225 continue;
1226 }
1227 if (inst != cmd->Instance) {
1228 inst++;
1229 continue;
1230 }
1231
1232 /* fill in the values */
1233 bcopy(tq->node_name, tmp_tgt.WWNN,
1234 EXT_DEF_WWN_NAME_SIZE);
1235 bcopy(tq->port_name, tmp_tgt.WWPN,
1236 EXT_DEF_WWN_NAME_SIZE);
1237
1238 break;
1239 }
1240 }
1241
1242 if (link == NULL) {
1243 /* no matching device */
1244 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1266 if (tq->flags & TQF_FABRIC_DEVICE) {
1267 tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_FABRIC_DEV);
1268 } else {
1269 tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TARGET_DEV);
1270 }
1271
1272 tmp_tgt.Status = 0;
1273
1274 tmp_tgt.Bus = 0; /* Hard-coded for Solaris. */
1275
1276 bcopy(tq->port_name, &tmp_tgt.TargetId, 8);
1277
1278 if (ddi_copyout((void *)&tmp_tgt,
1279 (void *)(uintptr_t)(cmd->ResponseAdr),
1280 sizeof (EXT_DISC_TARGET), mode) != 0) {
1281 cmd->Status = EXT_STATUS_COPY_ERR;
1282 cmd->ResponseLen = 0;
1283 EL(ha, "failed, ddi_copyout\n");
1284 } else {
1285 cmd->ResponseLen = sizeof (EXT_DISC_TARGET);
1286 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1287 }
1288 }
1289
1290 /*
1291 * ql_qry_fw
1292 * Performs EXT_SC_QUERY_FW subfunction.
1293 *
1294 * Input:
1295 * ha: adapter state pointer.
1296 * cmd: EXT_IOCTL cmd struct pointer.
1297 * mode: flags.
1298 *
1299 * Returns:
1300 * None, request status indicated in cmd->Status.
1301 *
1302 * Context:
1303 * Kernel context.
1304 */
1305 static void
1306 ql_qry_fw(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1307 {
1308 EXT_FW fw_info = {0};
1309
1310 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1311
1312 if (cmd->ResponseLen < sizeof (EXT_FW)) {
1313 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1314 cmd->DetailStatus = sizeof (EXT_FW);
1315 EL(ha, "failed, ResponseLen < EXT_FW, Len=%xh\n",
1316 cmd->ResponseLen);
1317 cmd->ResponseLen = 0;
1318 return;
1319 }
1320
1321 (void) sprintf((char *)(fw_info.Version), "%d.%02d.%02d",
1322 ha->fw_major_version, ha->fw_minor_version,
1323 ha->fw_subminor_version);
1324
1325 fw_info.Attrib = ha->fw_attributes;
1326
1327 if (ddi_copyout((void *)&fw_info,
1328 (void *)(uintptr_t)(cmd->ResponseAdr),
1329 sizeof (EXT_FW), mode) != 0) {
1330 cmd->Status = EXT_STATUS_COPY_ERR;
1331 cmd->ResponseLen = 0;
1332 EL(ha, "failed, ddi_copyout\n");
1333 return;
1334 } else {
1335 cmd->ResponseLen = sizeof (EXT_FW);
1336 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1337 }
1338 }
1339
1340 /*
1341 * ql_qry_chip
1342 * Performs EXT_SC_QUERY_CHIP subfunction.
1343 *
1344 * Input:
1345 * ha: adapter state pointer.
1346 * cmd: EXT_IOCTL cmd struct pointer.
1347 * mode: flags.
1348 *
1349 * Returns:
1350 * None, request status indicated in cmd->Status.
1351 *
1352 * Context:
1353 * Kernel context.
1354 */
1355 static void
1356 ql_qry_chip(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1357 {
1358 EXT_CHIP chip = {0};
1359
1360 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1361
1362 if (cmd->ResponseLen < sizeof (EXT_CHIP)) {
1363 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1364 cmd->DetailStatus = sizeof (EXT_CHIP);
1365 EL(ha, "failed, ResponseLen < EXT_CHIP, Len=%xh\n",
1366 cmd->ResponseLen);
1367 cmd->ResponseLen = 0;
1368 return;
1369 }
1370
1371 chip.VendorId = ha->ven_id;
1372 chip.DeviceId = ha->device_id;
1373 chip.SubVendorId = ha->subven_id;
1374 chip.SubSystemId = ha->subsys_id;
1375 chip.IoAddr = ql_pci_config_get32(ha, PCI_CONF_BASE0);
1376 chip.IoAddrLen = 0x100;
1377 chip.MemAddr = ql_pci_config_get32(ha, PCI_CONF_BASE1);
1378 chip.MemAddrLen = 0x100;
1379 chip.ChipRevID = ha->rev_id;
1380 if (ha->flags & FUNCTION_1) {
1381 chip.FuncNo = 1;
1382 }
1383
1384 if (ddi_copyout((void *)&chip,
1385 (void *)(uintptr_t)(cmd->ResponseAdr),
1386 sizeof (EXT_CHIP), mode) != 0) {
1387 cmd->Status = EXT_STATUS_COPY_ERR;
1388 cmd->ResponseLen = 0;
1389 EL(ha, "failed, ddi_copyout\n");
1390 } else {
1391 cmd->ResponseLen = sizeof (EXT_CHIP);
1392 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1393 }
1394 }
1395
1396 /*
1397 * ql_qry_driver
1398 * Performs EXT_SC_QUERY_DRIVER subfunction.
1399 *
1400 * Input:
1401 * ha: adapter state pointer.
1402 * cmd: EXT_IOCTL cmd struct pointer.
1403 * mode: flags.
1404 *
1405 * Returns:
1406 * None, request status indicated in cmd->Status.
1407 *
1408 * Context:
1409 * Kernel context.
1410 */
1411 static void
1412 ql_qry_driver(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1413 {
1414 EXT_DRIVER qd = {0};
1415
1416 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1417
1418 if (cmd->ResponseLen < sizeof (EXT_DRIVER)) {
1419 cmd->Status = EXT_STATUS_DATA_OVERRUN;
1420 cmd->DetailStatus = sizeof (EXT_DRIVER);
1421 EL(ha, "failed, ResponseLen < EXT_DRIVER, Len=%xh\n",
1422 cmd->ResponseLen);
1423 cmd->ResponseLen = 0;
1424 return;
1425 }
1426
1427 (void) strcpy((void *)&qd.Version[0], QL_VERSION);
1428 qd.NumOfBus = 1; /* Fixed for Solaris */
1429 qd.TargetsPerBus = (uint16_t)
1430 (CFG_IST(ha, (CFG_CTRL_24258081 | CFG_EXT_FW_INTERFACE)) ?
1431 MAX_24_FIBRE_DEVICES : MAX_22_FIBRE_DEVICES);
1432 qd.LunsPerTarget = 2030;
1433 qd.MaxTransferLen = QL_DMA_MAX_XFER_SIZE;
1434 qd.MaxDataSegments = QL_DMA_SG_LIST_LENGTH;
1435
1436 if (ddi_copyout((void *)&qd, (void *)(uintptr_t)cmd->ResponseAdr,
1437 sizeof (EXT_DRIVER), mode) != 0) {
1438 cmd->Status = EXT_STATUS_COPY_ERR;
1439 cmd->ResponseLen = 0;
1440 EL(ha, "failed, ddi_copyout\n");
1441 } else {
1442 cmd->ResponseLen = sizeof (EXT_DRIVER);
1443 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1444 }
1445 }
1446
1447 /*
1448 * ql_fcct
1449 * IOCTL management server FC-CT passthrough.
1450 *
1451 * Input:
1452 * ha: adapter state pointer.
1453 * cmd: User space CT arguments pointer.
1454 * mode: flags.
1455 *
1456 * Returns:
1457 * None, request status indicated in cmd->Status.
1458 *
1459 * Context:
1460 * Kernel context.
1461 */
1462 static void
1463 ql_fcct(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1464 {
1465 ql_mbx_iocb_t *pkt;
1466 ql_mbx_data_t mr;
1467 dma_mem_t *dma_mem;
1468 caddr_t pld;
1469 uint32_t pkt_size, pld_byte_cnt, *long_ptr;
1470 int rval;
1471 ql_ct_iu_preamble_t *ct;
1472 ql_xioctl_t *xp = ha->xioctl;
1473 ql_tgt_t tq;
1474 uint16_t comp_status, loop_id;
1475
1476 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1477
1478 /* Get CT argument structure. */
1479 if ((ha->topology & QL_SNS_CONNECTION) == 0) {
1480 EL(ha, "failed, No switch\n");
1481 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1482 cmd->ResponseLen = 0;
1483 return;
1484 }
1485
1486 if (DRIVER_SUSPENDED(ha)) {
1487 EL(ha, "failed, LOOP_NOT_READY\n");
1488 cmd->Status = EXT_STATUS_BUSY;
1489 cmd->ResponseLen = 0;
1490 return;
1491 }
1492
1493 /* Login management server device. */
1494 if ((xp->flags & QL_MGMT_SERVER_LOGIN) == 0) {
1495 tq.d_id.b.al_pa = 0xfa;
1496 tq.d_id.b.area = 0xff;
1497 tq.d_id.b.domain = 0xff;
1498 tq.loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
1499 MANAGEMENT_SERVER_24XX_LOOP_ID :
1500 MANAGEMENT_SERVER_LOOP_ID);
1501 rval = ql_login_fport(ha, &tq, tq.loop_id, LFF_NO_PRLI, &mr);
1502 if (rval != QL_SUCCESS) {
1503 EL(ha, "failed, server login\n");
1504 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1505 cmd->ResponseLen = 0;
1506 return;
1507 } else {
1508 xp->flags |= QL_MGMT_SERVER_LOGIN;
1509 }
1510 }
1511
1512 QL_PRINT_9(CE_CONT, "(%d): cmd\n", ha->instance);
1513 QL_DUMP_9(cmd, 8, sizeof (EXT_IOCTL));
1514
1515 /* Allocate a DMA Memory Descriptor */
1516 dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
1517 if (dma_mem == NULL) {
1518 EL(ha, "failed, kmem_zalloc\n");
1519 cmd->Status = EXT_STATUS_NO_MEMORY;
1520 cmd->ResponseLen = 0;
1521 return;
1522 }
1523 /* Determine maximum buffer size. */
1524 if (cmd->RequestLen < cmd->ResponseLen) {
1525 pld_byte_cnt = cmd->ResponseLen;
1526 } else {
1527 pld_byte_cnt = cmd->RequestLen;
1528 }
1529
1530 /* Allocate command block. */
1531 pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_byte_cnt);
1532 pkt = kmem_zalloc(pkt_size, KM_SLEEP);
1534 EL(ha, "failed, kmem_zalloc\n");
1535 cmd->Status = EXT_STATUS_NO_MEMORY;
1536 cmd->ResponseLen = 0;
1537 return;
1538 }
1539 pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
1540
1541 /* Get command payload data. */
1542 if (ql_get_buffer_data((caddr_t)(uintptr_t)cmd->RequestAdr, pld,
1543 cmd->RequestLen, mode) != cmd->RequestLen) {
1544 EL(ha, "failed, get_buffer_data\n");
1545 kmem_free(pkt, pkt_size);
1546 cmd->Status = EXT_STATUS_COPY_ERR;
1547 cmd->ResponseLen = 0;
1548 return;
1549 }
1550
1551 /* Get DMA memory for the IOCB */
1552 if (ql_get_dma_mem(ha, dma_mem, pkt_size, LITTLE_ENDIAN_DMA,
1553 QL_DMA_RING_ALIGN) != QL_SUCCESS) {
1554 cmn_err(CE_WARN, "%s(%d): DMA memory "
1555 "alloc failed", QL_NAME, ha->instance);
1556 kmem_free(pkt, pkt_size);
1557 kmem_free(dma_mem, sizeof (dma_mem_t));
1558 cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1559 cmd->ResponseLen = 0;
1560 return;
1561 }
1562
1563 /* Copy out going payload data to IOCB DMA buffer. */
1564 ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
1565 (uint8_t *)dma_mem->bp, pld_byte_cnt, DDI_DEV_AUTOINCR);
1566
1567 /* Sync IOCB DMA buffer. */
1568 (void) ddi_dma_sync(dma_mem->dma_handle, 0, pld_byte_cnt,
1569 DDI_DMA_SYNC_FORDEV);
1570
1571 /*
1572 * Setup IOCB
1573 */
1574 ct = (ql_ct_iu_preamble_t *)pld;
1575 if (CFG_IST(ha, CFG_CTRL_24258081)) {
1576 pkt->ms24.entry_type = CT_PASSTHRU_TYPE;
1577 pkt->ms24.entry_count = 1;
1578
1579 pkt->ms24.vp_index = ha->vp_index;
1580
1581 /* Set loop ID */
1582 pkt->ms24.n_port_hdl = (uint16_t)
1583 (ct->gs_type == GS_TYPE_DIR_SERVER ?
1584 LE_16(SNS_24XX_HDL) :
1585 LE_16(MANAGEMENT_SERVER_24XX_LOOP_ID));
1586
1587 /* Set ISP command timeout. */
1588 pkt->ms24.timeout = LE_16(120);
1589
1590 /* Set cmd/response data segment counts. */
1591 pkt->ms24.cmd_dseg_count = LE_16(1);
1592 pkt->ms24.resp_dseg_count = LE_16(1);
1593
1594 /* Load ct cmd byte count. */
1595 pkt->ms24.cmd_byte_count = LE_32(cmd->RequestLen);
1596
1597 /* Load ct rsp byte count. */
1598 pkt->ms24.resp_byte_count = LE_32(cmd->ResponseLen);
1599
1600 long_ptr = (uint32_t *)&pkt->ms24.dseg_0_address;
1601
1602 /* Load MS command entry data segments. */
1603 *long_ptr++ = (uint32_t)
1604 LE_32(LSD(dma_mem->cookie.dmac_laddress));
1605 *long_ptr++ = (uint32_t)
1606 LE_32(MSD(dma_mem->cookie.dmac_laddress));
1607 *long_ptr++ = (uint32_t)(LE_32(cmd->RequestLen));
1608
1609 /* Load MS response entry data segments. */
1610 *long_ptr++ = (uint32_t)
1611 LE_32(LSD(dma_mem->cookie.dmac_laddress));
1612 *long_ptr++ = (uint32_t)
1613 LE_32(MSD(dma_mem->cookie.dmac_laddress));
1614 *long_ptr = (uint32_t)LE_32(cmd->ResponseLen);
1615
1616 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
1617 sizeof (ql_mbx_iocb_t));
1618
1619 comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
1620 if (comp_status == CS_DATA_UNDERRUN) {
1642
1643 /* Set loop ID */
1644 loop_id = (uint16_t)(ct->gs_type == GS_TYPE_DIR_SERVER ?
1645 SIMPLE_NAME_SERVER_LOOP_ID : MANAGEMENT_SERVER_LOOP_ID);
1646 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
1647 pkt->ms.loop_id_l = LSB(loop_id);
1648 pkt->ms.loop_id_h = MSB(loop_id);
1649 } else {
1650 pkt->ms.loop_id_h = LSB(loop_id);
1651 }
1652
1653 /* Set ISP command timeout. */
1654 pkt->ms.timeout = LE_16(120);
1655
1656 /* Set data segment counts. */
1657 pkt->ms.cmd_dseg_count_l = 1;
1658 pkt->ms.total_dseg_count = LE_16(2);
1659
1660 /* Response total byte count. */
1661 pkt->ms.resp_byte_count = LE_32(cmd->ResponseLen);
1662 pkt->ms.dseg_1_length = LE_32(cmd->ResponseLen);
1663
1664 /* Command total byte count. */
1665 pkt->ms.cmd_byte_count = LE_32(cmd->RequestLen);
1666 pkt->ms.dseg_0_length = LE_32(cmd->RequestLen);
1667
1668 /* Load command/response data segments. */
1669 pkt->ms.dseg_0_address[0] = (uint32_t)
1670 LE_32(LSD(dma_mem->cookie.dmac_laddress));
1671 pkt->ms.dseg_0_address[1] = (uint32_t)
1672 LE_32(MSD(dma_mem->cookie.dmac_laddress));
1673 pkt->ms.dseg_1_address[0] = (uint32_t)
1674 LE_32(LSD(dma_mem->cookie.dmac_laddress));
1675 pkt->ms.dseg_1_address[1] = (uint32_t)
1676 LE_32(MSD(dma_mem->cookie.dmac_laddress));
1677
1678 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
1679 sizeof (ql_mbx_iocb_t));
1680
1681 comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
1682 if (comp_status == CS_DATA_UNDERRUN) {
1683 if ((BE_16(ct->max_residual_size)) == 0) {
1684 comp_status = CS_COMPLETE;
1685 }
1686 }
1687 if (rval != QL_SUCCESS || (pkt->sts.entry_status & 0x7e) != 0) {
1688 EL(ha, "failed, I/O timeout or "
1689 "es=%xh, rval=%xh\n", pkt->sts.entry_status, rval);
1690 kmem_free(pkt, pkt_size);
1691 ql_free_dma_resource(ha, dma_mem);
1692 kmem_free(dma_mem, sizeof (dma_mem_t));
1693 cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1694 cmd->ResponseLen = 0;
1695 return;
1696 }
1697 }
1698
1699 /* Sync in coming DMA buffer. */
1700 (void) ddi_dma_sync(dma_mem->dma_handle, 0,
1701 pld_byte_cnt, DDI_DMA_SYNC_FORKERNEL);
1702 /* Copy in coming DMA data. */
1703 ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld,
1704 (uint8_t *)dma_mem->bp, pld_byte_cnt,
1705 DDI_DEV_AUTOINCR);
1706
1707 /* Copy response payload from DMA buffer to application. */
1708 if (cmd->ResponseLen != 0) {
1709 QL_PRINT_9(CE_CONT, "(%d): ResponseLen=%d\n", ha->instance,
1710 cmd->ResponseLen);
1711 QL_DUMP_9(pld, 8, cmd->ResponseLen);
1712
1713 /* Send response payload. */
1714 if (ql_send_buffer_data(pld,
1715 (caddr_t)(uintptr_t)cmd->ResponseAdr,
1716 cmd->ResponseLen, mode) != cmd->ResponseLen) {
1717 EL(ha, "failed, send_buffer_data\n");
1718 cmd->Status = EXT_STATUS_COPY_ERR;
1719 cmd->ResponseLen = 0;
1720 }
1721 }
1722
1723 kmem_free(pkt, pkt_size);
1724 ql_free_dma_resource(ha, dma_mem);
1725 kmem_free(dma_mem, sizeof (dma_mem_t));
1726
1727 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1728 }
1729
1730 /*
1731 * ql_aen_reg
1732 * IOCTL management server Asynchronous Event Tracking Enable/Disable.
1733 *
1734 * Input:
1735 * ha: adapter state pointer.
1736 * cmd: EXT_IOCTL cmd struct pointer.
1737 * mode: flags.
1738 *
1739 * Returns:
1740 * None, request status indicated in cmd->Status.
1741 *
1742 * Context:
1743 * Kernel context.
1744 */
1745 static void
1746 ql_aen_reg(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1747 {
1748 EXT_REG_AEN reg_struct;
1749 int rval = 0;
1750 ql_xioctl_t *xp = ha->xioctl;
1751
1752 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1753
1754 rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, ®_struct,
1755 cmd->RequestLen, mode);
1756
1757 if (rval == 0) {
1758 if (reg_struct.Enable) {
1759 xp->flags |= QL_AEN_TRACKING_ENABLE;
1760 } else {
1761 xp->flags &= ~QL_AEN_TRACKING_ENABLE;
1762 /* Empty the queue. */
1763 INTR_LOCK(ha);
1764 xp->aen_q_head = 0;
1765 xp->aen_q_tail = 0;
1766 INTR_UNLOCK(ha);
1767 }
1768 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1769 } else {
1770 cmd->Status = EXT_STATUS_COPY_ERR;
1771 EL(ha, "failed, ddi_copyin\n");
1772 }
1773 }
1774
1775 /*
1776 * ql_aen_get
1777 * IOCTL management server Asynchronous Event Record Transfer.
1778 *
1779 * Input:
1780 * ha: adapter state pointer.
1781 * cmd: EXT_IOCTL cmd struct pointer.
1782 * mode: flags.
1783 *
1784 * Returns:
1785 * None, request status indicated in cmd->Status.
1786 *
1787 * Context:
1788 * Kernel context.
1789 */
1790 static void
1791 ql_aen_get(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1792 {
1793 uint32_t out_size;
1794 EXT_ASYNC_EVENT *tmp_q;
1795 EXT_ASYNC_EVENT aen[EXT_DEF_MAX_AEN_QUEUE];
1796 uint8_t i;
1797 uint8_t queue_cnt;
1798 uint8_t request_cnt;
1799 ql_xioctl_t *xp = ha->xioctl;
1800
1801 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
1802
1803 /* Compute the number of events that can be returned */
1804 request_cnt = (uint8_t)(cmd->ResponseLen / sizeof (EXT_ASYNC_EVENT));
1805
1806 if (request_cnt < EXT_DEF_MAX_AEN_QUEUE) {
1807 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1808 cmd->DetailStatus = EXT_DEF_MAX_AEN_QUEUE;
1809 EL(ha, "failed, request_cnt < EXT_DEF_MAX_AEN_QUEUE, "
1810 "Len=%xh\n", request_cnt);
1811 cmd->ResponseLen = 0;
1812 return;
1813 }
1814
1815 /* 1st: Make a local copy of the entire queue content. */
1816 tmp_q = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue;
1817 queue_cnt = 0;
1818
1819 INTR_LOCK(ha);
1820 i = xp->aen_q_head;
1821
1838
1839 /* Empty the queue. */
1840 xp->aen_q_head = 0;
1841 xp->aen_q_tail = 0;
1842
1843 INTR_UNLOCK(ha);
1844
1845 /* 2nd: Now transfer the queue content to user buffer */
1846 /* Copy the entire queue to user's buffer. */
1847 out_size = (uint32_t)(queue_cnt * sizeof (EXT_ASYNC_EVENT));
1848 if (queue_cnt == 0) {
1849 cmd->ResponseLen = 0;
1850 } else if (ddi_copyout((void *)&aen[0],
1851 (void *)(uintptr_t)(cmd->ResponseAdr),
1852 out_size, mode) != 0) {
1853 cmd->Status = EXT_STATUS_COPY_ERR;
1854 cmd->ResponseLen = 0;
1855 EL(ha, "failed, ddi_copyout\n");
1856 } else {
1857 cmd->ResponseLen = out_size;
1858 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1859 }
1860 }
1861
1862 /*
1863 * ql_enqueue_aen
1864 *
1865 * Input:
1866 * ha: adapter state pointer.
1867 * event_code: async event code of the event to add to queue.
1868 * payload: event payload for the queue.
1869 * INTR_LOCK must be already obtained.
1870 *
1871 * Context:
1872 * Interrupt or Kernel context, no mailbox commands allowed.
1873 */
1874 void
1875 ql_enqueue_aen(ql_adapter_state_t *ha, uint16_t event_code, void *payload)
1876 {
1877 uint8_t new_entry; /* index to current entry */
1878 uint16_t *mbx;
1879 EXT_ASYNC_EVENT *aen_queue;
1880 ql_xioctl_t *xp = ha->xioctl;
1881
1882 QL_PRINT_9(CE_CONT, "(%d): started, event_code=%d\n", ha->instance,
1883 event_code);
1884
1885 if (xp == NULL) {
1886 QL_PRINT_9(CE_CONT, "(%d): no context\n", ha->instance);
1887 return;
1888 }
1889 aen_queue = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue;
1890
1891 if (aen_queue[xp->aen_q_tail].AsyncEventCode != NULL) {
1892 /* Need to change queue pointers to make room. */
1893
1894 /* Increment tail for adding new entry. */
1895 xp->aen_q_tail++;
1896 if (xp->aen_q_tail == EXT_DEF_MAX_AEN_QUEUE) {
1897 xp->aen_q_tail = 0;
1898 }
1899 if (xp->aen_q_head == xp->aen_q_tail) {
1900 /*
1901 * We're overwriting the oldest entry, so need to
1902 * update the head pointer.
1903 */
1904 xp->aen_q_head++;
1905 if (xp->aen_q_head == EXT_DEF_MAX_AEN_QUEUE) {
1906 xp->aen_q_head = 0;
1932 /* domain */
1933 aen_queue[new_entry].Payload.RSCN.RSCNInfo[2] =
1934 LSB(mbx[1]);
1935 /* save in big endian */
1936 BIG_ENDIAN_24(&aen_queue[new_entry].
1937 Payload.RSCN.RSCNInfo[0]);
1938
1939 aen_queue[new_entry].Payload.RSCN.AddrFormat =
1940 MSB(mbx[1]);
1941
1942 break;
1943 default:
1944 /* Not supported */
1945 EL(ha, "failed, event code not supported=%xh\n",
1946 event_code);
1947 aen_queue[new_entry].AsyncEventCode = 0;
1948 break;
1949 }
1950 }
1951
1952 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
1953 }
1954
1955 /*
1956 * ql_scsi_passthru
1957 * IOCTL SCSI passthrough.
1958 *
1959 * Input:
1960 * ha: adapter state pointer.
1961 * cmd: User space SCSI command pointer.
1962 * mode: flags.
1963 *
1964 * Returns:
1965 * None, request status indicated in cmd->Status.
1966 *
1967 * Context:
1968 * Kernel context.
1969 */
1970 static void
1971 ql_scsi_passthru(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1972 {
1996 size_t resid; /* Residual */
1997 uint8_t *cdbp; /* Requestor's CDB */
1998 uint8_t *u_sense; /* Requestor's sense buffer */
1999 uint8_t cdb_len; /* Requestor's CDB length */
2000 uint8_t direction;
2001 } scsi_req;
2002
2003 struct {
2004 uint8_t *rsp_info;
2005 uint8_t *req_sense_data;
2006 uint32_t residual_length;
2007 uint32_t rsp_info_length;
2008 uint32_t req_sense_length;
2009 uint16_t comp_status;
2010 uint8_t state_flags_l;
2011 uint8_t state_flags_h;
2012 uint8_t scsi_status_l;
2013 uint8_t scsi_status_h;
2014 } sts;
2015
2016 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2017
2018 /* Verify Sub Code and set cnt to needed request size. */
2019 if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) {
2020 pld_size = sizeof (EXT_SCSI_PASSTHRU);
2021 } else if (cmd->SubCode == EXT_SC_SEND_FC_SCSI_PASSTHRU) {
2022 pld_size = sizeof (EXT_FC_SCSI_PASSTHRU);
2023 } else {
2024 EL(ha, "failed, invalid SubCode=%xh\n", cmd->SubCode);
2025 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
2026 cmd->ResponseLen = 0;
2027 return;
2028 }
2029
2030 dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
2031 if (dma_mem == NULL) {
2032 EL(ha, "failed, kmem_zalloc\n");
2033 cmd->Status = EXT_STATUS_NO_MEMORY;
2034 cmd->ResponseLen = 0;
2035 return;
2036 }
2052 cmd->ResponseLen = 0;
2053 return;
2054 }
2055
2056 /*
2057 * Find fc_port from SCSI PASSTHRU structure fill in the scsi_req
2058 * request data structure.
2059 */
2060 if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) {
2061 scsi_req.lun = sp_req->TargetAddr.Lun;
2062 scsi_req.sense_length = sizeof (sp_req->SenseData);
2063 scsi_req.cdbp = &sp_req->Cdb[0];
2064 scsi_req.cdb_len = sp_req->CdbLength;
2065 scsi_req.direction = sp_req->Direction;
2066 usp_req = (EXT_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr;
2067 scsi_req.u_sense = &usp_req->SenseData[0];
2068 cmd->DetailStatus = EXT_DSTATUS_TARGET;
2069
2070 qlnt = QLNT_PORT;
2071 name = (uint8_t *)&sp_req->TargetAddr.Target;
2072 QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, Target=%lld\n",
2073 ha->instance, cmd->SubCode, sp_req->TargetAddr.Target);
2074 tq = ql_find_port(ha, name, qlnt);
2075 } else {
2076 /*
2077 * Must be FC PASSTHRU, verified above.
2078 */
2079 if (fc_req->FCScsiAddr.DestType == EXT_DEF_DESTTYPE_WWPN) {
2080 qlnt = QLNT_PORT;
2081 name = &fc_req->FCScsiAddr.DestAddr.WWPN[0];
2082 QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, "
2083 "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2084 ha->instance, cmd->SubCode, name[0], name[1],
2085 name[2], name[3], name[4], name[5], name[6],
2086 name[7]);
2087 tq = ql_find_port(ha, name, qlnt);
2088 } else if (fc_req->FCScsiAddr.DestType ==
2089 EXT_DEF_DESTTYPE_WWNN) {
2090 qlnt = QLNT_NODE;
2091 name = &fc_req->FCScsiAddr.DestAddr.WWNN[0];
2092 QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, "
2093 "wwnn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2094 ha->instance, cmd->SubCode, name[0], name[1],
2095 name[2], name[3], name[4], name[5], name[6],
2096 name[7]);
2097 tq = ql_find_port(ha, name, qlnt);
2098 } else if (fc_req->FCScsiAddr.DestType ==
2099 EXT_DEF_DESTTYPE_PORTID) {
2100 qlnt = QLNT_PID;
2101 name = &fc_req->FCScsiAddr.DestAddr.Id[0];
2102 QL_PRINT_9(CE_CONT, "(%d): SubCode=%xh, PID="
2103 "%02x%02x%02x\n", ha->instance, cmd->SubCode,
2104 name[0], name[1], name[2]);
2105 tq = ql_find_port(ha, name, qlnt);
2106 } else {
2107 EL(ha, "failed, SubCode=%xh invalid DestType=%xh\n",
2108 cmd->SubCode, fc_req->FCScsiAddr.DestType);
2109 cmd->Status = EXT_STATUS_INVALID_PARAM;
2110 cmd->ResponseLen = 0;
2111 return;
2112 }
2113 scsi_req.lun = fc_req->FCScsiAddr.Lun;
2114 scsi_req.sense_length = sizeof (fc_req->SenseData);
2115 scsi_req.cdbp = &sp_req->Cdb[0];
2116 scsi_req.cdb_len = sp_req->CdbLength;
2117 ufc_req = (EXT_FC_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr;
2118 scsi_req.u_sense = &ufc_req->SenseData[0];
2119 scsi_req.direction = fc_req->Direction;
2120 }
2121
2122 if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
2123 EL(ha, "failed, fc_port not found\n");
2134 }
2135
2136 /* Allocate command block. */
2137 if ((scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN ||
2138 scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_OUT) &&
2139 cmd->ResponseLen) {
2140 pld_size = cmd->ResponseLen;
2141 pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_size);
2142 pkt = kmem_zalloc(pkt_size, KM_SLEEP);
2143 if (pkt == NULL) {
2144 EL(ha, "failed, kmem_zalloc\n");
2145 cmd->Status = EXT_STATUS_NO_MEMORY;
2146 cmd->ResponseLen = 0;
2147 return;
2148 }
2149 pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
2150
2151 /* Get DMA memory for the IOCB */
2152 if (ql_get_dma_mem(ha, dma_mem, pld_size, LITTLE_ENDIAN_DMA,
2153 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
2154 cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
2155 "alloc failed", QL_NAME, ha->instance);
2156 kmem_free(pkt, pkt_size);
2157 cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
2158 cmd->ResponseLen = 0;
2159 return;
2160 }
2161
2162 if (scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN) {
2163 scsi_req.direction = (uint8_t)
2164 (CFG_IST(ha, CFG_CTRL_24258081) ?
2165 CF_RD : CF_DATA_IN | CF_STAG);
2166 } else {
2167 scsi_req.direction = (uint8_t)
2168 (CFG_IST(ha, CFG_CTRL_24258081) ?
2169 CF_WR : CF_DATA_OUT | CF_STAG);
2170 cmd->ResponseLen = 0;
2171
2172 /* Get command payload. */
2173 if (ql_get_buffer_data(
2174 (caddr_t)(uintptr_t)cmd->ResponseAdr,
2175 pld, pld_size, mode) != pld_size) {
2176 EL(ha, "failed, get_buffer_data\n");
2177 cmd->Status = EXT_STATUS_COPY_ERR;
2178
2179 kmem_free(pkt, pkt_size);
2180 ql_free_dma_resource(ha, dma_mem);
2181 kmem_free(dma_mem, sizeof (dma_mem_t));
2182 return;
2183 }
2184
2185 /* Copy out going data to DMA buffer. */
2186 ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
2187 (uint8_t *)dma_mem->bp, pld_size,
2188 DDI_DEV_AUTOINCR);
2189
2190 /* Sync DMA buffer. */
2191 (void) ddi_dma_sync(dma_mem->dma_handle, 0,
2192 dma_mem->size, DDI_DMA_SYNC_FORDEV);
2193 }
2194 } else {
2195 scsi_req.direction = (uint8_t)
2196 (CFG_IST(ha, CFG_CTRL_24258081) ? 0 : CF_STAG);
2197 cmd->ResponseLen = 0;
2198
2199 pkt_size = sizeof (ql_mbx_iocb_t);
2200 pkt = kmem_zalloc(pkt_size, KM_SLEEP);
2201 if (pkt == NULL) {
2202 EL(ha, "failed, kmem_zalloc-2\n");
2203 cmd->Status = EXT_STATUS_NO_MEMORY;
2204 return;
2205 }
2206 pld = NULL;
2207 pld_size = 0;
2208 }
2209
2210 /* retries = ha->port_down_retry_count; */
2211 retries = 1;
2212 cmd->Status = EXT_STATUS_OK;
2213 cmd->DetailStatus = EXT_DSTATUS_NOADNL_INFO;
2214
2215 QL_PRINT_9(CE_CONT, "(%d): SCSI cdb\n", ha->instance);
2216 QL_DUMP_9(scsi_req.cdbp, 8, scsi_req.cdb_len);
2217
2218 do {
2219 if (DRIVER_SUSPENDED(ha)) {
2220 sts.comp_status = CS_LOOP_DOWN_ABORT;
2221 break;
2222 }
2223
2224 if (CFG_IST(ha, CFG_CTRL_24258081)) {
2225 pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
2226 pkt->cmd24.entry_count = 1;
2227
2228 /* Set LUN number */
2229 pkt->cmd24.fcp_lun[2] = LSB(scsi_req.lun);
2230 pkt->cmd24.fcp_lun[3] = MSB(scsi_req.lun);
2231
2232 /* Set N_port handle */
2233 pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
2234
2235 /* Set VP Index */
2236 pkt->cmd24.vp_index = ha->vp_index;
2237
2238 /* Set target ID */
2239 pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
2240 pkt->cmd24.target_id[1] = tq->d_id.b.area;
2241 pkt->cmd24.target_id[2] = tq->d_id.b.domain;
2242
2243 /* Set ISP command timeout. */
2244 pkt->cmd24.timeout = (uint16_t)LE_16(15);
2245
2246 /* Load SCSI CDB */
2247 ddi_rep_put8(ha->hba_buf.acc_handle, scsi_req.cdbp,
2248 pkt->cmd24.scsi_cdb, scsi_req.cdb_len,
2249 DDI_DEV_AUTOINCR);
2250 for (cnt = 0; cnt < MAX_CMDSZ;
2251 cnt = (uint16_t)(cnt + 4)) {
2252 ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
2253 + cnt, 4);
2254 }
2255
2256 /* Set tag queue control flags */
2257 pkt->cmd24.task = TA_STAG;
2258
2259 if (pld_size) {
2260 /* Set transfer direction. */
2261 pkt->cmd24.control_flags = scsi_req.direction;
2262
2263 /* Set data segment count. */
2264 pkt->cmd24.dseg_count = LE_16(1);
2265
2266 /* Load total byte count. */
2267 pkt->cmd24.total_byte_count = LE_32(pld_size);
2268
2269 /* Load data descriptor. */
2270 pkt->cmd24.dseg_0_address[0] = (uint32_t)
2271 LE_32(LSD(dma_mem->cookie.dmac_laddress));
2272 pkt->cmd24.dseg_0_address[1] = (uint32_t)
2273 LE_32(MSD(dma_mem->cookie.dmac_laddress));
2274 pkt->cmd24.dseg_0_length = LE_32(pld_size);
2275 }
2276 } else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
2277 pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
2278 pkt->cmd3.entry_count = 1;
2279 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
2280 pkt->cmd3.target_l = LSB(tq->loop_id);
2281 pkt->cmd3.target_h = MSB(tq->loop_id);
2282 } else {
2283 pkt->cmd3.target_h = LSB(tq->loop_id);
2284 }
2285 pkt->cmd3.lun_l = LSB(scsi_req.lun);
2286 pkt->cmd3.lun_h = MSB(scsi_req.lun);
2287 pkt->cmd3.control_flags_l = scsi_req.direction;
2288 pkt->cmd3.timeout = LE_16(15);
2289 for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) {
2290 pkt->cmd3.scsi_cdb[cnt] = scsi_req.cdbp[cnt];
2291 }
2292 if (pld_size) {
2293 pkt->cmd3.dseg_count = LE_16(1);
2294 pkt->cmd3.byte_count = LE_32(pld_size);
2295 pkt->cmd3.dseg_0_address[0] = (uint32_t)
2296 LE_32(LSD(dma_mem->cookie.dmac_laddress));
2297 pkt->cmd3.dseg_0_address[1] = (uint32_t)
2298 LE_32(MSD(dma_mem->cookie.dmac_laddress));
2299 pkt->cmd3.dseg_0_length = LE_32(pld_size);
2300 }
2301 } else {
2302 pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
2303 pkt->cmd.entry_count = 1;
2304 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
2305 pkt->cmd.target_l = LSB(tq->loop_id);
2306 pkt->cmd.target_h = MSB(tq->loop_id);
2307 } else {
2308 pkt->cmd.target_h = LSB(tq->loop_id);
2309 }
2310 pkt->cmd.lun_l = LSB(scsi_req.lun);
2311 pkt->cmd.lun_h = MSB(scsi_req.lun);
2312 pkt->cmd.control_flags_l = scsi_req.direction;
2313 pkt->cmd.timeout = LE_16(15);
2314 for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) {
2315 pkt->cmd.scsi_cdb[cnt] = scsi_req.cdbp[cnt];
2316 }
2317 if (pld_size) {
2318 pkt->cmd.dseg_count = LE_16(1);
2319 pkt->cmd.byte_count = LE_32(pld_size);
2320 pkt->cmd.dseg_0_address = (uint32_t)
2321 LE_32(LSD(dma_mem->cookie.dmac_laddress));
2322 pkt->cmd.dseg_0_length = LE_32(pld_size);
2323 }
2324 }
2325 /* Go issue command and wait for completion. */
2326 QL_PRINT_9(CE_CONT, "(%d): request pkt\n", ha->instance);
2327 QL_DUMP_9(pkt, 8, pkt_size);
2328
2329 status = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size);
2330
2331 if (pld_size) {
2332 /* Sync in coming DMA buffer. */
2333 (void) ddi_dma_sync(dma_mem->dma_handle, 0,
2334 dma_mem->size, DDI_DMA_SYNC_FORKERNEL);
2335 /* Copy in coming DMA data. */
2336 ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld,
2337 (uint8_t *)dma_mem->bp, pld_size,
2338 DDI_DEV_AUTOINCR);
2339 }
2340
2341 if (CFG_IST(ha, CFG_CTRL_24258081)) {
2342 pkt->sts24.entry_status = (uint8_t)
2343 (pkt->sts24.entry_status & 0x3c);
2344 } else {
2345 pkt->sts.entry_status = (uint8_t)
2346 (pkt->sts.entry_status & 0x7e);
2347 }
2348
2349 if (status == QL_SUCCESS && pkt->sts.entry_status != 0) {
2350 EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
2351 pkt->sts.entry_status, tq->d_id.b24);
2352 status = QL_FUNCTION_PARAMETER_ERROR;
2353 }
2354
2355 sts.comp_status = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
2356 LE_16(pkt->sts24.comp_status) :
2357 LE_16(pkt->sts.comp_status));
2358
2359 /*
2360 * We have verified about all the request that can be so far.
2361 * Now we need to start verification of our ability to
2362 * actually issue the CDB.
2363 */
2364 if (DRIVER_SUSPENDED(ha)) {
2365 sts.comp_status = CS_LOOP_DOWN_ABORT;
2366 break;
2367 } else if (status == QL_SUCCESS &&
2368 (sts.comp_status == CS_PORT_LOGGED_OUT ||
2369 sts.comp_status == CS_PORT_UNAVAILABLE)) {
2370 EL(ha, "login retry d_id=%xh\n", tq->d_id.b24);
2371 if (tq->flags & TQF_FABRIC_DEVICE) {
2372 rval = ql_login_fport(ha, tq, tq->loop_id,
2373 LFF_NO_PLOGI, &mr);
2374 if (rval != QL_SUCCESS) {
2375 EL(ha, "failed, login_fport=%xh, "
2398 ql_free_dma_resource(ha, dma_mem);
2399 kmem_free(dma_mem, sizeof (dma_mem_t));
2400 cmd->Status = EXT_STATUS_SUSPENDED;
2401 cmd->ResponseLen = 0;
2402 return;
2403 }
2404
2405 if (status != QL_SUCCESS) {
2406 /* Command error */
2407 EL(ha, "failed, I/O\n");
2408 kmem_free(pkt, pkt_size);
2409 ql_free_dma_resource(ha, dma_mem);
2410 kmem_free(dma_mem, sizeof (dma_mem_t));
2411 cmd->Status = EXT_STATUS_ERR;
2412 cmd->DetailStatus = status;
2413 cmd->ResponseLen = 0;
2414 return;
2415 }
2416
2417 /* Setup status. */
2418 if (CFG_IST(ha, CFG_CTRL_24258081)) {
2419 sts.scsi_status_l = pkt->sts24.scsi_status_l;
2420 sts.scsi_status_h = pkt->sts24.scsi_status_h;
2421
2422 /* Setup residuals. */
2423 sts.residual_length = LE_32(pkt->sts24.residual_length);
2424
2425 /* Setup state flags. */
2426 sts.state_flags_l = pkt->sts24.state_flags_l;
2427 sts.state_flags_h = pkt->sts24.state_flags_h;
2428 if (pld_size && sts.comp_status != CS_DATA_UNDERRUN) {
2429 sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2430 SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD |
2431 SF_XFERRED_DATA | SF_GOT_STATUS);
2432 } else {
2433 sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2434 SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD |
2435 SF_GOT_STATUS);
2436 }
2437 if (scsi_req.direction & CF_WR) {
2438 sts.state_flags_l = (uint8_t)(sts.state_flags_l |
2473 sts.scsi_status_h = pkt->sts.scsi_status_h;
2474
2475 /* Setup residuals. */
2476 sts.residual_length = LE_32(pkt->sts.residual_length);
2477
2478 /* Setup state flags. */
2479 sts.state_flags_l = pkt->sts.state_flags_l;
2480 sts.state_flags_h = pkt->sts.state_flags_h;
2481
2482 /* Setup FCP response info. */
2483 sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ?
2484 LE_16(pkt->sts.rsp_info_length) : 0;
2485 sts.rsp_info = &pkt->sts.rsp_info[0];
2486
2487 /* Setup sense data. */
2488 sts.req_sense_length = sts.scsi_status_h & FCP_SNS_LEN_VALID ?
2489 LE_16(pkt->sts.req_sense_length) : 0;
2490 sts.req_sense_data = &pkt->sts.req_sense_data[0];
2491 }
2492
2493 QL_PRINT_9(CE_CONT, "(%d): response pkt\n", ha->instance);
2494 QL_DUMP_9(&pkt->sts, 8, sizeof (sts_entry_t));
2495
2496 switch (sts.comp_status) {
2497 case CS_INCOMPLETE:
2498 case CS_ABORTED:
2499 case CS_DEVICE_UNAVAILABLE:
2500 case CS_PORT_UNAVAILABLE:
2501 case CS_PORT_LOGGED_OUT:
2502 case CS_PORT_CONFIG_CHG:
2503 case CS_PORT_BUSY:
2504 case CS_LOOP_DOWN_ABORT:
2505 cmd->Status = EXT_STATUS_BUSY;
2506 break;
2507 case CS_RESET:
2508 case CS_QUEUE_FULL:
2509 cmd->Status = EXT_STATUS_ERR;
2510 break;
2511 case CS_TIMEOUT:
2512 cmd->Status = EXT_STATUS_ERR;
2513 break;
2537 scsi_req.cdbp[0] == SCMD_ERASE ||
2538 (scsi_req.cdbp[0] == SCMD_FORMAT &&
2539 (scsi_req.cdbp[1] & FPB_DATA) == 0)) {
2540 /*
2541 * Non data transfer command, clear sts_entry residual
2542 * length.
2543 */
2544 sts.residual_length = 0;
2545 cmd->ResponseLen = 0;
2546 if (sts.comp_status == CS_DATA_UNDERRUN) {
2547 sts.comp_status = CS_COMPLETE;
2548 cmd->Status = EXT_STATUS_OK;
2549 }
2550 } else {
2551 cmd->ResponseLen = pld_size;
2552 }
2553
2554 /* Correct ISP completion status */
2555 if (sts.comp_status == CS_COMPLETE && sts.scsi_status_l == 0 &&
2556 (sts.scsi_status_h & FCP_RSP_MASK) == 0) {
2557 QL_PRINT_9(CE_CONT, "(%d): Correct completion\n",
2558 ha->instance);
2559 scsi_req.resid = 0;
2560 } else if (sts.comp_status == CS_DATA_UNDERRUN) {
2561 QL_PRINT_9(CE_CONT, "(%d): Correct UNDERRUN\n",
2562 ha->instance);
2563 scsi_req.resid = sts.residual_length;
2564 if (sts.scsi_status_h & FCP_RESID_UNDER) {
2565 cmd->Status = (uint32_t)EXT_STATUS_OK;
2566
2567 cmd->ResponseLen = (uint32_t)
2568 (pld_size - scsi_req.resid);
2569 } else {
2570 EL(ha, "failed, Transfer ERROR\n");
2571 cmd->Status = EXT_STATUS_ERR;
2572 cmd->ResponseLen = 0;
2573 }
2574 } else {
2575 QL_PRINT_9(CE_CONT, "(%d): error d_id=%xh, comp_status=%xh, "
2576 "scsi_status_h=%xh, scsi_status_l=%xh\n", ha->instance,
2577 tq->d_id.b24, sts.comp_status, sts.scsi_status_h,
2578 sts.scsi_status_l);
2579
2580 scsi_req.resid = pld_size;
2581 /*
2582 * Handle residual count on SCSI check
2583 * condition.
2584 *
2585 * - If Residual Under / Over is set, use the
2586 * Residual Transfer Length field in IOCB.
2587 * - If Residual Under / Over is not set, and
2588 * Transferred Data bit is set in State Flags
2589 * field of IOCB, report residual value of 0
2590 * (you may want to do this for tape
2591 * Write-type commands only). This takes care
2592 * of logical end of tape problem and does
2593 * not break Unit Attention.
2594 * - If Residual Under / Over is not set, and
2595 * Transferred Data bit is not set in State
2596 * Flags, report residual value equal to
2621 } else {
2622 sense_sz = sts.req_sense_length;
2623 }
2624
2625 EL(ha, "failed, Check Condition Status, d_id=%xh\n",
2626 tq->d_id.b24);
2627 QL_DUMP_2(sts.req_sense_data, 8, sts.req_sense_length);
2628
2629 if (ddi_copyout(sts.req_sense_data, scsi_req.u_sense,
2630 (size_t)sense_sz, mode) != 0) {
2631 EL(ha, "failed, request sense ddi_copyout\n");
2632 }
2633
2634 cmd->Status = EXT_STATUS_SCSI_STATUS;
2635 cmd->DetailStatus = sts.scsi_status_l;
2636 }
2637
2638 /* Copy response payload from DMA buffer to application. */
2639 if (scsi_req.direction & (CF_RD | CF_DATA_IN) &&
2640 cmd->ResponseLen != 0) {
2641 QL_PRINT_9(CE_CONT, "(%d): Data Return resid=%lu, "
2642 "byte_count=%u, ResponseLen=%xh\n", ha->instance,
2643 scsi_req.resid, pld_size, cmd->ResponseLen);
2644 QL_DUMP_9(pld, 8, cmd->ResponseLen);
2645
2646 /* Send response payload. */
2647 if (ql_send_buffer_data(pld,
2648 (caddr_t)(uintptr_t)cmd->ResponseAdr,
2649 cmd->ResponseLen, mode) != cmd->ResponseLen) {
2650 EL(ha, "failed, send_buffer_data\n");
2651 cmd->Status = EXT_STATUS_COPY_ERR;
2652 cmd->ResponseLen = 0;
2653 }
2654 }
2655
2656 if (cmd->Status != EXT_STATUS_OK) {
2657 EL(ha, "failed, cmd->Status=%xh, comp_status=%xh, "
2658 "d_id=%xh\n", cmd->Status, sts.comp_status, tq->d_id.b24);
2659 } else {
2660 /*EMPTY*/
2661 QL_PRINT_9(CE_CONT, "(%d): done, ResponseLen=%d\n",
2662 ha->instance, cmd->ResponseLen);
2663 }
2664
2665 kmem_free(pkt, pkt_size);
2666 ql_free_dma_resource(ha, dma_mem);
2667 kmem_free(dma_mem, sizeof (dma_mem_t));
2668 }
2669
2670 /*
2671 * ql_wwpn_to_scsiaddr
2672 *
2673 * Input:
2674 * ha: adapter state pointer.
2675 * cmd: EXT_IOCTL cmd struct pointer.
2676 * mode: flags.
2677 *
2678 * Context:
2679 * Kernel context.
2680 */
2681 static void
2682 ql_wwpn_to_scsiaddr(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2683 {
2684 int status;
2685 uint8_t wwpn[EXT_DEF_WWN_NAME_SIZE];
2686 EXT_SCSI_ADDR *tmp_addr;
2687 ql_tgt_t *tq;
2688
2689 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2690
2691 if (cmd->RequestLen != EXT_DEF_WWN_NAME_SIZE) {
2692 /* Return error */
2693 EL(ha, "incorrect RequestLen\n");
2694 cmd->Status = EXT_STATUS_INVALID_PARAM;
2695 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
2696 return;
2697 }
2698
2699 status = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, wwpn,
2700 cmd->RequestLen, mode);
2701
2702 if (status != 0) {
2703 cmd->Status = EXT_STATUS_COPY_ERR;
2704 EL(ha, "failed, ddi_copyin\n");
2705 return;
2706 }
2707
2708 tq = ql_find_port(ha, wwpn, QLNT_PORT);
2709
2710 if (tq == NULL || tq->flags & TQF_INITIATOR_DEVICE) {
2711 /* no matching device */
2712 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
2713 EL(ha, "failed, device not found\n");
2714 return;
2715 }
2716
2717 /* Copy out the IDs found. For now we can only return target ID. */
2718 tmp_addr = (EXT_SCSI_ADDR *)(uintptr_t)cmd->ResponseAdr;
2719
2720 status = ddi_copyout((void *)wwpn, (void *)&tmp_addr->Target, 8, mode);
2721
2722 if (status != 0) {
2723 cmd->Status = EXT_STATUS_COPY_ERR;
2724 EL(ha, "failed, ddi_copyout\n");
2725 } else {
2726 cmd->Status = EXT_STATUS_OK;
2727 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2728 }
2729 }
2730
2731 /*
2732 * ql_host_idx
2733 * Gets host order index.
2734 *
2735 * Input:
2736 * ha: adapter state pointer.
2737 * cmd: EXT_IOCTL cmd struct pointer.
2738 * mode: flags.
2739 *
2740 * Returns:
2741 * None, request status indicated in cmd->Status.
2742 *
2743 * Context:
2744 * Kernel context.
2745 */
2746 static void
2747 ql_host_idx(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2748 {
2749 uint16_t idx;
2750
2751 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2752
2753 if (cmd->ResponseLen < sizeof (uint16_t)) {
2754 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2755 cmd->DetailStatus = sizeof (uint16_t);
2756 EL(ha, "failed, ResponseLen < Len=%xh\n", cmd->ResponseLen);
2757 cmd->ResponseLen = 0;
2758 return;
2759 }
2760
2761 idx = (uint16_t)ha->instance;
2762
2763 if (ddi_copyout((void *)&idx, (void *)(uintptr_t)(cmd->ResponseAdr),
2764 sizeof (uint16_t), mode) != 0) {
2765 cmd->Status = EXT_STATUS_COPY_ERR;
2766 cmd->ResponseLen = 0;
2767 EL(ha, "failed, ddi_copyout\n");
2768 } else {
2769 cmd->ResponseLen = sizeof (uint16_t);
2770 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2771 }
2772 }
2773
2774 /*
2775 * ql_host_drvname
2776 * Gets host driver name
2777 *
2778 * Input:
2779 * ha: adapter state pointer.
2780 * cmd: EXT_IOCTL cmd struct pointer.
2781 * mode: flags.
2782 *
2783 * Returns:
2784 * None, request status indicated in cmd->Status.
2785 *
2786 * Context:
2787 * Kernel context.
2788 */
2789 static void
2790 ql_host_drvname(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2791 {
2792
2793 char drvname[] = QL_NAME;
2794 uint32_t qlnamelen;
2795
2796 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2797
2798 qlnamelen = (uint32_t)(strlen(QL_NAME)+1);
2799
2800 if (cmd->ResponseLen < qlnamelen) {
2801 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2802 cmd->DetailStatus = qlnamelen;
2803 EL(ha, "failed, ResponseLen: %xh, needed: %xh\n",
2804 cmd->ResponseLen, qlnamelen);
2805 cmd->ResponseLen = 0;
2806 return;
2807 }
2808
2809 if (ddi_copyout((void *)&drvname,
2810 (void *)(uintptr_t)(cmd->ResponseAdr),
2811 qlnamelen, mode) != 0) {
2812 cmd->Status = EXT_STATUS_COPY_ERR;
2813 cmd->ResponseLen = 0;
2814 EL(ha, "failed, ddi_copyout\n");
2815 } else {
2816 cmd->ResponseLen = qlnamelen-1;
2817 }
2818
2819 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2820 }
2821
2822 /*
2823 * ql_read_nvram
2824 * Get NVRAM contents.
2825 *
2826 * Input:
2827 * ha: adapter state pointer.
2828 * cmd: EXT_IOCTL cmd struct pointer.
2829 * mode: flags.
2830 *
2831 * Returns:
2832 * None, request status indicated in cmd->Status.
2833 *
2834 * Context:
2835 * Kernel context.
2836 */
2837 static void
2838 ql_read_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2839 {
2840
2841 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2842
2843 if (cmd->ResponseLen < ha->nvram_cache->size) {
2844 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2845 cmd->DetailStatus = ha->nvram_cache->size;
2846 EL(ha, "failed, ResponseLen != NVRAM, Len=%xh\n",
2847 cmd->ResponseLen);
2848 cmd->ResponseLen = 0;
2849 return;
2850 }
2851
2852 /* Get NVRAM data. */
2853 if (ql_nv_util_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
2854 mode) != 0) {
2855 cmd->Status = EXT_STATUS_COPY_ERR;
2856 cmd->ResponseLen = 0;
2857 EL(ha, "failed, copy error\n");
2858 } else {
2859 cmd->ResponseLen = ha->nvram_cache->size;
2860 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2861 }
2862 }
2863
2864 /*
2865 * ql_write_nvram
2866 * Loads NVRAM contents.
2867 *
2868 * Input:
2869 * ha: adapter state pointer.
2870 * cmd: EXT_IOCTL cmd struct pointer.
2871 * mode: flags.
2872 *
2873 * Returns:
2874 * None, request status indicated in cmd->Status.
2875 *
2876 * Context:
2877 * Kernel context.
2878 */
2879 static void
2880 ql_write_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2881 {
2882
2883 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2884
2885 if (cmd->RequestLen < ha->nvram_cache->size) {
2886 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2887 cmd->DetailStatus = ha->nvram_cache->size;
2888 EL(ha, "failed, RequestLen != NVRAM, Len=%xh\n",
2889 cmd->RequestLen);
2890 return;
2891 }
2892
2893 /* Load NVRAM data. */
2894 if (ql_nv_util_load(ha, (void *)(uintptr_t)(cmd->RequestAdr),
2895 mode) != 0) {
2896 cmd->Status = EXT_STATUS_COPY_ERR;
2897 EL(ha, "failed, copy error\n");
2898 } else {
2899 /*EMPTY*/
2900 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2901 }
2902 }
2903
2904 /*
2905 * ql_write_vpd
2906 * Loads VPD contents.
2907 *
2908 * Input:
2909 * ha: adapter state pointer.
2910 * cmd: EXT_IOCTL cmd struct pointer.
2911 * mode: flags.
2912 *
2913 * Returns:
2914 * None, request status indicated in cmd->Status.
2915 *
2916 * Context:
2917 * Kernel context.
2918 */
2919 static void
2920 ql_write_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2921 {
2922 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2923
2924 int32_t rval = 0;
2925
2926 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
2927 cmd->Status = EXT_STATUS_INVALID_REQUEST;
2928 EL(ha, "failed, invalid request for HBA\n");
2929 return;
2930 }
2931
2932 if (cmd->RequestLen < QL_24XX_VPD_SIZE) {
2933 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2934 cmd->DetailStatus = QL_24XX_VPD_SIZE;
2935 EL(ha, "failed, RequestLen != VPD len, len passed=%xh\n",
2936 cmd->RequestLen);
2937 return;
2938 }
2939
2940 /* Load VPD data. */
2941 if ((rval = ql_vpd_load(ha, (void *)(uintptr_t)(cmd->RequestAdr),
2942 mode)) != 0) {
2943 cmd->Status = EXT_STATUS_COPY_ERR;
2944 cmd->DetailStatus = rval;
2945 EL(ha, "failed, errno=%x\n", rval);
2946 } else {
2947 /*EMPTY*/
2948 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2949 }
2950 }
2951
2952 /*
2953 * ql_read_vpd
2954 * Dumps VPD contents.
2955 *
2956 * Input:
2957 * ha: adapter state pointer.
2958 * cmd: EXT_IOCTL cmd struct pointer.
2959 * mode: flags.
2960 *
2961 * Returns:
2962 * None, request status indicated in cmd->Status.
2963 *
2964 * Context:
2965 * Kernel context.
2966 */
2967 static void
2968 ql_read_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2969 {
2970 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
2971
2972 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
2973 cmd->Status = EXT_STATUS_INVALID_REQUEST;
2974 EL(ha, "failed, invalid request for HBA\n");
2975 return;
2976 }
2977
2978 if (cmd->ResponseLen < QL_24XX_VPD_SIZE) {
2979 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2980 cmd->DetailStatus = QL_24XX_VPD_SIZE;
2981 EL(ha, "failed, ResponseLen < VPD len, len passed=%xh\n",
2982 cmd->ResponseLen);
2983 return;
2984 }
2985
2986 /* Dump VPD data. */
2987 if ((ql_vpd_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
2988 mode)) != 0) {
2989 cmd->Status = EXT_STATUS_COPY_ERR;
2990 EL(ha, "failed,\n");
2991 } else {
2992 /*EMPTY*/
2993 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
2994 }
2995 }
2996
2997 /*
2998 * ql_get_fcache
2999 * Dumps flash cache contents.
3000 *
3001 * Input:
3002 * ha: adapter state pointer.
3003 * cmd: EXT_IOCTL cmd struct pointer.
3004 * mode: flags.
3005 *
3006 * Returns:
3007 * None, request status indicated in cmd->Status.
3008 *
3009 * Context:
3010 * Kernel context.
3011 */
3012 static void
3013 ql_get_fcache(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3014 {
3015 uint32_t bsize, boff, types, cpsize, hsize;
3016 ql_fcache_t *fptr;
3017
3018 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3019
3020 CACHE_LOCK(ha);
3021
3022 if (ha->fcache == NULL) {
3023 CACHE_UNLOCK(ha);
3024 cmd->Status = EXT_STATUS_ERR;
3025 EL(ha, "failed, adapter fcache not setup\n");
3026 return;
3027 }
3028
3029 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
3030 bsize = 100;
3031 } else {
3032 bsize = 400;
3033 }
3034
3035 if (cmd->ResponseLen < bsize) {
3036 CACHE_UNLOCK(ha);
3037 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3038 cmd->DetailStatus = bsize;
3039 EL(ha, "failed, ResponseLen < %d, len passed=%xh\n",
3040 bsize, cmd->ResponseLen);
3041 return;
3042 }
3043
3044 boff = 0;
3045 bsize = 0;
3046 fptr = ha->fcache;
3047
3048 /*
3049 * For backwards compatibility, get one of each image type
3050 */
3051 types = (FTYPE_BIOS | FTYPE_FCODE | FTYPE_EFI);
3052 while ((fptr != NULL) && (fptr->buf != NULL) && (types != 0)) {
3053 /* Get the next image */
3054 if ((fptr = ql_get_fbuf(ha->fcache, types)) != NULL) {
3055
3056 cpsize = (fptr->buflen < 100 ? fptr->buflen : 100);
3057
3058 if (ddi_copyout(fptr->buf,
3059 (void *)(uintptr_t)(cmd->ResponseAdr + boff),
3060 cpsize, mode) != 0) {
3061 CACHE_UNLOCK(ha);
3062 EL(ha, "ddicopy failed, done\n");
3063 cmd->Status = EXT_STATUS_COPY_ERR;
3064 cmd->DetailStatus = 0;
3065 return;
3066 }
3067 boff += 100;
3068 bsize += cpsize;
3069 types &= ~(fptr->type);
3070 }
3071 }
3072
3073 /*
3074 * Get the firmware image -- it needs to be last in the
3075 * buffer at offset 300 for backwards compatibility. Also for
3076 * backwards compatibility, the pci header is stripped off.
3077 */
3078 if ((fptr = ql_get_fbuf(ha->fcache, FTYPE_FW)) != NULL) {
3079
3080 hsize = sizeof (pci_header_t) + sizeof (pci_data_t);
3081 if (hsize > fptr->buflen) {
3082 CACHE_UNLOCK(ha);
3083 EL(ha, "header size (%xh) exceeds buflen (%xh)\n",
3084 hsize, fptr->buflen);
3085 cmd->Status = EXT_STATUS_COPY_ERR;
3086 cmd->DetailStatus = 0;
3087 return;
3088 }
3089
3090 cpsize = ((fptr->buflen - hsize) < 100 ?
3091 fptr->buflen - hsize : 100);
3092
3093 if (ddi_copyout(fptr->buf+hsize,
3094 (void *)(uintptr_t)(cmd->ResponseAdr + 300),
3095 cpsize, mode) != 0) {
3096 CACHE_UNLOCK(ha);
3097 EL(ha, "fw ddicopy failed, done\n");
3098 cmd->Status = EXT_STATUS_COPY_ERR;
3099 cmd->DetailStatus = 0;
3100 return;
3101 }
3102 bsize += 100;
3103 }
3104
3105 CACHE_UNLOCK(ha);
3106 cmd->Status = EXT_STATUS_OK;
3107 cmd->DetailStatus = bsize;
3108
3109 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3110 }
3111
3112 /*
3113 * ql_get_fcache_ex
3114 * Dumps flash cache contents.
3115 *
3116 * Input:
3117 * ha: adapter state pointer.
3118 * cmd: EXT_IOCTL cmd struct pointer.
3119 * mode: flags.
3120 *
3121 * Returns:
3122 * None, request status indicated in cmd->Status.
3123 *
3124 * Context:
3125 * Kernel context.
3126 */
3127 static void
3128 ql_get_fcache_ex(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3129 {
3130 uint32_t bsize = 0;
3131 uint32_t boff = 0;
3132 ql_fcache_t *fptr;
3133
3134 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3135
3136 CACHE_LOCK(ha);
3137 if (ha->fcache == NULL) {
3138 CACHE_UNLOCK(ha);
3139 cmd->Status = EXT_STATUS_ERR;
3140 EL(ha, "failed, adapter fcache not setup\n");
3141 return;
3142 }
3143
3144 /* Make sure user passed enough buffer space */
3145 for (fptr = ha->fcache; fptr != NULL; fptr = fptr->next) {
3146 bsize += FBUFSIZE;
3147 }
3148
3149 if (cmd->ResponseLen < bsize) {
3150 CACHE_UNLOCK(ha);
3151 if (cmd->ResponseLen != 0) {
3152 EL(ha, "failed, ResponseLen < %d, len passed=%xh\n",
3153 bsize, cmd->ResponseLen);
3154 }
3155 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3156 cmd->DetailStatus = bsize;
3157 return;
3158 }
3159
3160 boff = 0;
3161 fptr = ha->fcache;
3162 while ((fptr != NULL) && (fptr->buf != NULL)) {
3163 /* Get the next image */
3164 if (ddi_copyout(fptr->buf,
3165 (void *)(uintptr_t)(cmd->ResponseAdr + boff),
3166 (fptr->buflen < FBUFSIZE ? fptr->buflen : FBUFSIZE),
3167 mode) != 0) {
3168 CACHE_UNLOCK(ha);
3169 EL(ha, "failed, ddicopy at %xh, done\n", boff);
3170 cmd->Status = EXT_STATUS_COPY_ERR;
3171 cmd->DetailStatus = 0;
3172 return;
3173 }
3174 boff += FBUFSIZE;
3175 fptr = fptr->next;
3176 }
3177
3178 CACHE_UNLOCK(ha);
3179 cmd->Status = EXT_STATUS_OK;
3180 cmd->DetailStatus = bsize;
3181
3182 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3183 }
3184
3185 /*
3186 * ql_read_flash
3187 * Get flash contents.
3188 *
3189 * Input:
3190 * ha: adapter state pointer.
3191 * cmd: EXT_IOCTL cmd struct pointer.
3192 * mode: flags.
3193 *
3194 * Returns:
3195 * None, request status indicated in cmd->Status.
3196 *
3197 * Context:
3198 * Kernel context.
3199 */
3200 static void
3201 ql_read_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3202 {
3203 ql_xioctl_t *xp = ha->xioctl;
3204
3205 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3206
3207 if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
3208 EL(ha, "ql_stall_driver failed\n");
3209 cmd->Status = EXT_STATUS_BUSY;
3210 cmd->DetailStatus = xp->fdesc.flash_size;
3211 cmd->ResponseLen = 0;
3212 return;
3213 }
3214
3215 if (ql_setup_fcache(ha) != QL_SUCCESS) {
3216 cmd->Status = EXT_STATUS_ERR;
3217 cmd->DetailStatus = xp->fdesc.flash_size;
3218 EL(ha, "failed, ResponseLen=%xh, flash size=%xh\n",
3219 cmd->ResponseLen, xp->fdesc.flash_size);
3220 cmd->ResponseLen = 0;
3221 } else {
3222 /* adjust read size to flash size */
3223 if (cmd->ResponseLen > xp->fdesc.flash_size) {
3224 EL(ha, "adjusting req=%xh, max=%xh\n",
3225 cmd->ResponseLen, xp->fdesc.flash_size);
3226 cmd->ResponseLen = xp->fdesc.flash_size;
3227 }
3228
3229 /* Get flash data. */
3230 if (ql_flash_fcode_dump(ha,
3231 (void *)(uintptr_t)(cmd->ResponseAdr),
3232 (size_t)(cmd->ResponseLen), 0, mode) != 0) {
3233 cmd->Status = EXT_STATUS_COPY_ERR;
3234 cmd->ResponseLen = 0;
3235 EL(ha, "failed,\n");
3236 }
3237 }
3238
3239 /* Resume I/O */
3240 if (CFG_IST(ha, CFG_CTRL_24258081)) {
3241 ql_restart_driver(ha);
3242 } else {
3243 EL(ha, "isp_abort_needed for restart\n");
3244 ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED,
3245 DRIVER_STALL);
3246 }
3247
3248 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3249 }
3250
3251 /*
3252 * ql_write_flash
3253 * Loads flash contents.
3254 *
3255 * Input:
3256 * ha: adapter state pointer.
3257 * cmd: EXT_IOCTL cmd struct pointer.
3258 * mode: flags.
3259 *
3260 * Returns:
3261 * None, request status indicated in cmd->Status.
3262 *
3263 * Context:
3264 * Kernel context.
3265 */
3266 static void
3267 ql_write_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3268 {
3269 ql_xioctl_t *xp = ha->xioctl;
3270
3271 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3272
3273 if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
3274 EL(ha, "ql_stall_driver failed\n");
3275 cmd->Status = EXT_STATUS_BUSY;
3276 cmd->DetailStatus = xp->fdesc.flash_size;
3277 cmd->ResponseLen = 0;
3278 return;
3279 }
3280
3281 if (ql_setup_fcache(ha) != QL_SUCCESS) {
3282 cmd->Status = EXT_STATUS_ERR;
3283 cmd->DetailStatus = xp->fdesc.flash_size;
3284 EL(ha, "failed, RequestLen=%xh, size=%xh\n",
3285 cmd->RequestLen, xp->fdesc.flash_size);
3286 cmd->ResponseLen = 0;
3287 } else {
3288 /* Load flash data. */
3289 if (cmd->RequestLen > xp->fdesc.flash_size) {
3290 cmd->Status = EXT_STATUS_ERR;
3291 cmd->DetailStatus = xp->fdesc.flash_size;
3292 EL(ha, "failed, RequestLen=%xh, flash size=%xh\n",
3293 cmd->RequestLen, xp->fdesc.flash_size);
3294 } else if (ql_flash_fcode_load(ha,
3295 (void *)(uintptr_t)(cmd->RequestAdr),
3296 (size_t)(cmd->RequestLen), mode) != 0) {
3297 cmd->Status = EXT_STATUS_COPY_ERR;
3298 EL(ha, "failed,\n");
3299 }
3300 }
3301
3302 /* Resume I/O */
3303 if (CFG_IST(ha, CFG_CTRL_24258081)) {
3304 ql_restart_driver(ha);
3305 } else {
3306 EL(ha, "isp_abort_needed for restart\n");
3307 ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED,
3308 DRIVER_STALL);
3309 }
3310
3311 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3312 }
3313
3314 /*
3315 * ql_diagnostic_loopback
3316 * Performs EXT_CC_LOOPBACK Command
3317 *
3318 * Input:
3319 * ha: adapter state pointer.
3320 * cmd: Local EXT_IOCTL cmd struct pointer.
3321 * mode: flags.
3322 *
3323 * Returns:
3324 * None, request status indicated in cmd->Status.
3325 *
3326 * Context:
3327 * Kernel context.
3328 */
3329 static void
3330 ql_diagnostic_loopback(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3331 {
3332 EXT_LOOPBACK_REQ plbreq;
3333 EXT_LOOPBACK_RSP plbrsp;
3334 ql_mbx_data_t mr;
3335 uint32_t rval;
3336 caddr_t bp;
3337 uint16_t opt;
3338
3339 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3340
3341 /* Get loop back request. */
3342 if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
3343 (void *)&plbreq, sizeof (EXT_LOOPBACK_REQ), mode) != 0) {
3344 EL(ha, "failed, ddi_copyin\n");
3345 cmd->Status = EXT_STATUS_COPY_ERR;
3346 cmd->ResponseLen = 0;
3347 return;
3348 }
3349
3350 opt = (uint16_t)(plbreq.Options & MBC_LOOPBACK_POINT_MASK);
3351
3352 /* Check transfer length fits in buffer. */
3353 if (plbreq.BufferLength < plbreq.TransferCount &&
3354 plbreq.TransferCount < MAILBOX_BUFFER_SIZE) {
3355 EL(ha, "failed, BufferLength=%d, xfercnt=%d, "
3356 "mailbox_buffer_size=%d\n", plbreq.BufferLength,
3357 plbreq.TransferCount, MAILBOX_BUFFER_SIZE);
3358 cmd->Status = EXT_STATUS_INVALID_PARAM;
3359 cmd->ResponseLen = 0;
3360 return;
3361 }
3362
3363 /* Allocate command memory. */
3364 bp = kmem_zalloc(plbreq.TransferCount, KM_SLEEP);
3365 if (bp == NULL) {
3366 EL(ha, "failed, kmem_zalloc\n");
3367 cmd->Status = EXT_STATUS_NO_MEMORY;
3368 cmd->ResponseLen = 0;
3369 return;
3370 }
3371
3372 /* Get loopback data. */
3373 if (ql_get_buffer_data((caddr_t)(uintptr_t)plbreq.BufferAddress,
3374 bp, plbreq.TransferCount, mode) != plbreq.TransferCount) {
3375 EL(ha, "failed, ddi_copyin-2\n");
3376 kmem_free(bp, plbreq.TransferCount);
3377 cmd->Status = EXT_STATUS_COPY_ERR;
3378 cmd->ResponseLen = 0;
3379 return;
3380 }
3381
3382 if ((ha->task_daemon_flags & (QL_LOOP_TRANSITION | DRIVER_STALL)) ||
3383 ql_stall_driver(ha, 0) != QL_SUCCESS) {
3384 EL(ha, "failed, LOOP_NOT_READY\n");
3385 kmem_free(bp, plbreq.TransferCount);
3386 cmd->Status = EXT_STATUS_BUSY;
3387 cmd->ResponseLen = 0;
3388 return;
3389 }
3390
3391 /* Shutdown IP. */
3392 if (ha->flags & IP_INITIALIZED) {
3393 (void) ql_shutdown_ip(ha);
3394 }
3395
3396 /* determine topology so we can send the loopback or the echo */
3397 /* Echo is supported on 2300's only and above */
3398
3399 if (CFG_IST(ha, CFG_CTRL_8081)) {
3400 if (!(ha->task_daemon_flags & LOOP_DOWN) && opt ==
3401 MBC_LOOPBACK_POINT_EXTERNAL) {
3402 if (plbreq.TransferCount > 252) {
3403 EL(ha, "transfer count (%d) > 252\n",
3404 plbreq.TransferCount);
3405 kmem_free(bp, plbreq.TransferCount);
3406 cmd->Status = EXT_STATUS_INVALID_PARAM;
3407 cmd->ResponseLen = 0;
3408 return;
3409 }
3410 plbrsp.CommandSent = INT_DEF_LB_ECHO_CMD;
3411 rval = ql_diag_echo(ha, 0, bp, plbreq.TransferCount,
3412 MBC_ECHO_ELS, &mr);
3413 } else {
3414 if (CFG_IST(ha, CFG_CTRL_81XX)) {
3415 (void) ql_set_loop_point(ha, opt);
3416 }
3417 plbrsp.CommandSent = INT_DEF_LB_LOOPBACK_CMD;
3418 rval = ql_diag_loopback(ha, 0, bp, plbreq.TransferCount,
3419 opt, plbreq.IterationCount, &mr);
3420 if (CFG_IST(ha, CFG_CTRL_81XX)) {
3421 (void) ql_set_loop_point(ha, 0);
3422 }
3423 }
3424 } else {
3425 if (!(ha->task_daemon_flags & LOOP_DOWN) &&
3426 (ha->topology & QL_F_PORT) &&
3427 ha->device_id >= 0x2300) {
3428 QL_PRINT_9(CE_CONT, "(%d): F_PORT topology -- using "
3429 "echo\n", ha->instance);
3430 plbrsp.CommandSent = INT_DEF_LB_ECHO_CMD;
3431 rval = ql_diag_echo(ha, 0, bp, plbreq.TransferCount,
3432 (uint16_t)(CFG_IST(ha, CFG_CTRL_8081) ?
3433 MBC_ECHO_ELS : MBC_ECHO_64BIT), &mr);
3434 } else {
3435 plbrsp.CommandSent = INT_DEF_LB_LOOPBACK_CMD;
3436 rval = ql_diag_loopback(ha, 0, bp, plbreq.TransferCount,
3437 opt, plbreq.IterationCount, &mr);
3438 }
3439 }
3440
3441 ql_restart_driver(ha);
3442
3443 /* Restart IP if it was shutdown. */
3444 if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
3445 (void) ql_initialize_ip(ha);
3446 ql_isp_rcvbuf(ha);
3447 }
3448
3449 if (rval != QL_SUCCESS) {
3450 EL(ha, "failed, diagnostic_loopback_mbx=%xh\n", rval);
3451 kmem_free(bp, plbreq.TransferCount);
3452 cmd->Status = EXT_STATUS_MAILBOX;
3453 cmd->DetailStatus = rval;
3454 cmd->ResponseLen = 0;
3455 return;
3456 }
3457
3458 /* Return loopback data. */
3459 if (ql_send_buffer_data(bp, (caddr_t)(uintptr_t)plbreq.BufferAddress,
3460 plbreq.TransferCount, mode) != plbreq.TransferCount) {
3461 EL(ha, "failed, ddi_copyout\n");
3462 kmem_free(bp, plbreq.TransferCount);
3463 cmd->Status = EXT_STATUS_COPY_ERR;
3464 cmd->ResponseLen = 0;
3465 return;
3466 }
3467 kmem_free(bp, plbreq.TransferCount);
3468
3469 /* Return loopback results. */
3470 plbrsp.BufferAddress = plbreq.BufferAddress;
3471 plbrsp.BufferLength = plbreq.TransferCount;
3472 plbrsp.CompletionStatus = mr.mb[0];
3473
3474 if (plbrsp.CommandSent == INT_DEF_LB_ECHO_CMD) {
3475 plbrsp.CrcErrorCount = 0;
3476 plbrsp.DisparityErrorCount = 0;
3477 plbrsp.FrameLengthErrorCount = 0;
3478 plbrsp.IterationCountLastError = 0;
3479 } else {
3480 plbrsp.CrcErrorCount = mr.mb[1];
3481 plbrsp.DisparityErrorCount = mr.mb[2];
3482 plbrsp.FrameLengthErrorCount = mr.mb[3];
3483 plbrsp.IterationCountLastError = (mr.mb[19] >> 16) | mr.mb[18];
3484 }
3485
3486 rval = ddi_copyout((void *)&plbrsp,
3487 (void *)(uintptr_t)cmd->ResponseAdr,
3488 sizeof (EXT_LOOPBACK_RSP), mode);
3489 if (rval != 0) {
3490 EL(ha, "failed, ddi_copyout-2\n");
3491 cmd->Status = EXT_STATUS_COPY_ERR;
3492 cmd->ResponseLen = 0;
3493 return;
3494 }
3495 cmd->ResponseLen = sizeof (EXT_LOOPBACK_RSP);
3496
3497 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3498 }
3499
3500 /*
3501 * ql_set_loop_point
3502 * Setup loop point for port configuration.
3503 *
3504 * Input:
3505 * ha: adapter state structure.
3506 * opt: loop point option.
3507 *
3508 * Returns:
3509 * ql local function return status code.
3510 *
3511 * Context:
3512 * Kernel context.
3513 */
3514 static int
3515 ql_set_loop_point(ql_adapter_state_t *ha, uint16_t opt)
3516 {
3517 ql_mbx_data_t mr;
3518 int rval;
3519 uint32_t timer;
3520
3521 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3522
3523 /*
3524 * We get the current port config, modify the loopback field and
3525 * write it back out.
3526 */
3527 if ((rval = ql_get_port_config(ha, &mr)) != QL_SUCCESS) {
3528 EL(ha, "get_port_config status=%xh\n", rval);
3529 return (rval);
3530 }
3531 /*
3532 * Set the loopback mode field while maintaining the others.
3533 * Currently only internal or none are supported.
3534 */
3535 mr.mb[1] = (uint16_t)(mr.mb[1] &~LOOPBACK_MODE_FIELD_MASK);
3536 if (opt == MBC_LOOPBACK_POINT_INTERNAL) {
3537 mr.mb[1] = (uint16_t)(mr.mb[1] |
3538 LOOPBACK_MODE(LOOPBACK_MODE_INTERNAL));
3539 }
3540 /*
3541 * Changing the port configuration will cause the port state to cycle
3542 * down and back up. The indication that this has happened is that
3543 * the point to point flag gets set.
3544 */
3545 ADAPTER_STATE_LOCK(ha);
3546 ha->flags &= ~POINT_TO_POINT;
3547 ADAPTER_STATE_UNLOCK(ha);
3548 if ((rval = ql_set_port_config(ha, &mr)) != QL_SUCCESS) {
3549 EL(ha, "set_port_config status=%xh\n", rval);
3550 }
3551
3552 /* wait for a while */
3553 for (timer = opt ? 10 : 0; timer; timer--) {
3554 if (ha->flags & POINT_TO_POINT) {
3555 break;
3556 }
3557 /* Delay for 1000000 usec (1 second). */
3558 ql_delay(ha, 1000000);
3559 }
3560
3561 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3562
3563 return (rval);
3564 }
3565
3566 /*
3567 * ql_send_els_rnid
3568 * IOCTL for extended link service RNID command.
3569 *
3570 * Input:
3571 * ha: adapter state pointer.
3572 * cmd: User space CT arguments pointer.
3573 * mode: flags.
3574 *
3575 * Returns:
3576 * None, request status indicated in cmd->Status.
3577 *
3578 * Context:
3579 * Kernel context.
3580 */
3581 static void
3582 ql_send_els_rnid(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3583 {
3584 EXT_RNID_REQ tmp_rnid;
3585 port_id_t tmp_fcid;
3586 caddr_t tmp_buf, bptr;
3587 uint32_t copy_len;
3588 ql_tgt_t *tq;
3589 EXT_RNID_DATA rnid_data;
3590 uint32_t loop_ready_wait = 10 * 60 * 10;
3591 int rval = 0;
3592 uint32_t local_hba = 0;
3593
3594 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3595
3596 if (DRIVER_SUSPENDED(ha)) {
3597 EL(ha, "failed, LOOP_NOT_READY\n");
3598 cmd->Status = EXT_STATUS_BUSY;
3599 cmd->ResponseLen = 0;
3600 return;
3601 }
3602
3603 if (cmd->RequestLen != sizeof (EXT_RNID_REQ)) {
3604 /* parameter error */
3605 EL(ha, "failed, RequestLen < EXT_RNID_REQ, Len=%xh\n",
3606 cmd->RequestLen);
3607 cmd->Status = EXT_STATUS_INVALID_PARAM;
3608 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
3609 cmd->ResponseLen = 0;
3610 return;
3611 }
3612
3613 if (ddi_copyin((void*)(uintptr_t)cmd->RequestAdr,
3614 &tmp_rnid, cmd->RequestLen, mode) != 0) {
3615 EL(ha, "failed, ddi_copyin\n");
3616 cmd->Status = EXT_STATUS_COPY_ERR;
3617 cmd->ResponseLen = 0;
3618 return;
3619 }
3620
3621 /* Find loop ID of the device */
3622 if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_WWNN) {
3623 bptr = CFG_IST(ha, CFG_CTRL_24258081) ?
3624 (caddr_t)&ha->init_ctrl_blk.cb24.node_name :
3625 (caddr_t)&ha->init_ctrl_blk.cb.node_name;
3626 if (bcmp((void *)bptr, (void *)tmp_rnid.Addr.FcAddr.WWNN,
3627 EXT_DEF_WWN_NAME_SIZE) == 0) {
3628 local_hba = 1;
3629 } else {
3630 tq = ql_find_port(ha,
3631 (uint8_t *)tmp_rnid.Addr.FcAddr.WWNN, QLNT_NODE);
3632 }
3633 } else if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_WWPN) {
3634 bptr = CFG_IST(ha, CFG_CTRL_24258081) ?
3635 (caddr_t)&ha->init_ctrl_blk.cb24.port_name :
3636 (caddr_t)&ha->init_ctrl_blk.cb.port_name;
3637 if (bcmp((void *)bptr, (void *)tmp_rnid.Addr.FcAddr.WWPN,
3638 EXT_DEF_WWN_NAME_SIZE) == 0) {
3639 local_hba = 1;
3640 } else {
3641 tq = ql_find_port(ha,
3642 (uint8_t *)tmp_rnid.Addr.FcAddr.WWPN, QLNT_PORT);
3643 }
3644 } else if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_PORTID) {
3645 /*
3646 * Copy caller's d_id to tmp space.
3647 */
3648 bcopy(&tmp_rnid.Addr.FcAddr.Id[1], tmp_fcid.r.d_id,
3649 EXT_DEF_PORTID_SIZE_ACTUAL);
3650 BIG_ENDIAN_24(&tmp_fcid.r.d_id[0]);
3651
3652 if (bcmp((void *)&ha->d_id, (void *)tmp_fcid.r.d_id,
3653 EXT_DEF_PORTID_SIZE_ACTUAL) == 0) {
3654 local_hba = 1;
3655 } else {
3656 tq = ql_find_port(ha, (uint8_t *)tmp_fcid.r.d_id,
3668 }
3669
3670 if (local_hba) {
3671 rval = ql_get_rnid_params(ha, SEND_RNID_RSP_SIZE, tmp_buf);
3672 if (rval != QL_SUCCESS) {
3673 EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval);
3674 kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3675 cmd->Status = EXT_STATUS_ERR;
3676 cmd->ResponseLen = 0;
3677 return;
3678 }
3679
3680 /* Save gotten RNID data. */
3681 bcopy(tmp_buf, &rnid_data, sizeof (EXT_RNID_DATA));
3682
3683 /* Now build the Send RNID response */
3684 tmp_buf[0] = (char)(EXT_DEF_RNID_DFORMAT_TOPO_DISC);
3685 tmp_buf[1] = (2 * EXT_DEF_WWN_NAME_SIZE);
3686 tmp_buf[2] = 0;
3687 tmp_buf[3] = sizeof (EXT_RNID_DATA);
3688
3689 if (CFG_IST(ha, CFG_CTRL_24258081)) {
3690 bcopy(ha->init_ctrl_blk.cb24.port_name, &tmp_buf[4],
3691 EXT_DEF_WWN_NAME_SIZE);
3692 bcopy(ha->init_ctrl_blk.cb24.node_name,
3693 &tmp_buf[4 + EXT_DEF_WWN_NAME_SIZE],
3694 EXT_DEF_WWN_NAME_SIZE);
3695 } else {
3696 bcopy(ha->init_ctrl_blk.cb.port_name, &tmp_buf[4],
3697 EXT_DEF_WWN_NAME_SIZE);
3698 bcopy(ha->init_ctrl_blk.cb.node_name,
3699 &tmp_buf[4 + EXT_DEF_WWN_NAME_SIZE],
3700 EXT_DEF_WWN_NAME_SIZE);
3701 }
3702
3703 bcopy((uint8_t *)&rnid_data,
3704 &tmp_buf[4 + 2 * EXT_DEF_WWN_NAME_SIZE],
3705 sizeof (EXT_RNID_DATA));
3706 } else {
3707 if (tq == NULL) {
3708 /* no matching device */
3709 EL(ha, "failed, device not found\n");
3710 kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3711 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
3712 cmd->DetailStatus = EXT_DSTATUS_TARGET;
3713 cmd->ResponseLen = 0;
3714 return;
3715 }
3716
3717 /* Send command */
3718 rval = ql_send_rnid_els(ha, tq->loop_id,
3719 (uint8_t)tmp_rnid.DataFormat, SEND_RNID_RSP_SIZE, tmp_buf);
3720 if (rval != QL_SUCCESS) {
3721 EL(ha, "failed, send_rnid_mbx=%xh, id=%xh\n",
3722 rval, tq->loop_id);
3745
3746 /* Copy the response */
3747 copy_len = (cmd->ResponseLen > SEND_RNID_RSP_SIZE) ?
3748 SEND_RNID_RSP_SIZE : cmd->ResponseLen;
3749
3750 if (ql_send_buffer_data(tmp_buf, (caddr_t)(uintptr_t)cmd->ResponseAdr,
3751 copy_len, mode) != copy_len) {
3752 cmd->Status = EXT_STATUS_COPY_ERR;
3753 EL(ha, "failed, ddi_copyout\n");
3754 } else {
3755 cmd->ResponseLen = copy_len;
3756 if (copy_len < SEND_RNID_RSP_SIZE) {
3757 cmd->Status = EXT_STATUS_DATA_OVERRUN;
3758 EL(ha, "failed, EXT_STATUS_DATA_OVERRUN\n");
3759
3760 } else if (cmd->ResponseLen > SEND_RNID_RSP_SIZE) {
3761 cmd->Status = EXT_STATUS_DATA_UNDERRUN;
3762 EL(ha, "failed, EXT_STATUS_DATA_UNDERRUN\n");
3763 } else {
3764 cmd->Status = EXT_STATUS_OK;
3765 QL_PRINT_9(CE_CONT, "(%d): done\n",
3766 ha->instance);
3767 }
3768 }
3769
3770 kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3771 }
3772
3773 /*
3774 * ql_set_host_data
3775 * Process IOCTL subcommand to set host/adapter related data.
3776 *
3777 * Input:
3778 * ha: adapter state pointer.
3779 * cmd: User space CT arguments pointer.
3780 * mode: flags.
3781 *
3782 * Returns:
3783 * None, request status indicated in cmd->Status.
3784 *
3785 * Context:
3786 * Kernel context.
3787 */
3788 static void
3789 ql_set_host_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3790 {
3791 QL_PRINT_9(CE_CONT, "(%d): started, SubCode=%d\n", ha->instance,
3792 cmd->SubCode);
3793
3794 /*
3795 * case off on command subcode
3796 */
3797 switch (cmd->SubCode) {
3798 case EXT_SC_SET_RNID:
3799 ql_set_rnid_parameters(ha, cmd, mode);
3800 break;
3801 case EXT_SC_RST_STATISTICS:
3802 (void) ql_reset_statistics(ha, cmd);
3803 break;
3804 case EXT_SC_SET_BEACON_STATE:
3805 ql_set_led_state(ha, cmd, mode);
3806 break;
3807 case EXT_SC_SET_PARMS:
3808 case EXT_SC_SET_BUS_MODE:
3809 case EXT_SC_SET_DR_DUMP_BUF:
3810 case EXT_SC_SET_RISC_CODE:
3811 case EXT_SC_SET_FLASH_RAM:
3812 case EXT_SC_SET_LUN_BITMASK:
3813 case EXT_SC_SET_RETRY_CNT:
3814 case EXT_SC_SET_RTIN:
3815 case EXT_SC_SET_FC_LUN_BITMASK:
3816 case EXT_SC_ADD_TARGET_DEVICE:
3817 case EXT_SC_SWAP_TARGET_DEVICE:
3818 case EXT_SC_SET_SEL_TIMEOUT:
3819 default:
3820 /* function not supported. */
3821 EL(ha, "failed, function not supported=%d\n", cmd->SubCode);
3822 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
3823 break;
3824 }
3825
3826 if (cmd->Status != EXT_STATUS_OK) {
3827 EL(ha, "failed, Status=%d\n", cmd->Status);
3828 } else {
3829 /*EMPTY*/
3830 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3831 }
3832 }
3833
3834 /*
3835 * ql_get_host_data
3836 * Performs EXT_CC_GET_DATA subcommands.
3837 *
3838 * Input:
3839 * ha: adapter state pointer.
3840 * cmd: Local EXT_IOCTL cmd struct pointer.
3841 * mode: flags.
3842 *
3843 * Returns:
3844 * None, request status indicated in cmd->Status.
3845 *
3846 * Context:
3847 * Kernel context.
3848 */
3849 static void
3850 ql_get_host_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3851 {
3852 int out_size = 0;
3853
3854 QL_PRINT_9(CE_CONT, "(%d): started, SubCode=%d\n", ha->instance,
3855 cmd->SubCode);
3856
3857 /* case off on command subcode */
3858 switch (cmd->SubCode) {
3859 case EXT_SC_GET_STATISTICS:
3860 out_size = sizeof (EXT_HBA_PORT_STAT);
3861 break;
3862 case EXT_SC_GET_FC_STATISTICS:
3863 out_size = sizeof (EXT_HBA_PORT_STAT);
3864 break;
3865 case EXT_SC_GET_PORT_SUMMARY:
3866 out_size = sizeof (EXT_DEVICEDATA);
3867 break;
3868 case EXT_SC_GET_RNID:
3869 out_size = sizeof (EXT_RNID_DATA);
3870 break;
3871 case EXT_SC_GET_TARGET_ID:
3872 out_size = sizeof (EXT_DEST_ADDR);
3873 break;
3874 case EXT_SC_GET_BEACON_STATE:
3875 out_size = sizeof (EXT_BEACON_CONTROL);
3876 break;
3877 case EXT_SC_GET_FC4_STATISTICS:
3878 out_size = sizeof (EXT_HBA_FC4STATISTICS);
3879 break;
3880 case EXT_SC_GET_DCBX_PARAM:
3881 out_size = EXT_DEF_DCBX_PARAM_BUF_SIZE;
3882 break;
3883 case EXT_SC_GET_RESOURCE_CNTS:
3884 out_size = sizeof (EXT_RESOURCE_CNTS);
3885 break;
3886 case EXT_SC_GET_FCF_LIST:
3887 out_size = sizeof (EXT_FCF_LIST);
3888 break;
3889 case EXT_SC_GET_SCSI_ADDR:
3890 case EXT_SC_GET_ERR_DETECTIONS:
3891 case EXT_SC_GET_BUS_MODE:
3892 case EXT_SC_GET_DR_DUMP_BUF:
3893 case EXT_SC_GET_RISC_CODE:
3894 case EXT_SC_GET_FLASH_RAM:
3895 case EXT_SC_GET_LINK_STATUS:
3896 case EXT_SC_GET_LOOP_ID:
3897 case EXT_SC_GET_LUN_BITMASK:
3898 case EXT_SC_GET_PORT_DATABASE:
3899 case EXT_SC_GET_PORT_DATABASE_MEM:
3900 case EXT_SC_GET_POSITION_MAP:
3901 case EXT_SC_GET_RETRY_CNT:
3902 case EXT_SC_GET_RTIN:
3903 case EXT_SC_GET_FC_LUN_BITMASK:
3904 case EXT_SC_GET_SEL_TIMEOUT:
3905 default:
3906 /* function not supported. */
3907 EL(ha, "failed, function not supported=%d\n", cmd->SubCode);
3908 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
3933 ql_get_statistics_fc4(ha, cmd, mode);
3934 break;
3935 case EXT_SC_GET_PORT_SUMMARY:
3936 ql_get_port_summary(ha, cmd, mode);
3937 break;
3938 case EXT_SC_GET_TARGET_ID:
3939 ql_get_target_id(ha, cmd, mode);
3940 break;
3941 case EXT_SC_GET_BEACON_STATE:
3942 ql_get_led_state(ha, cmd, mode);
3943 break;
3944 case EXT_SC_GET_DCBX_PARAM:
3945 ql_get_dcbx_parameters(ha, cmd, mode);
3946 break;
3947 case EXT_SC_GET_FCF_LIST:
3948 ql_get_fcf_list(ha, cmd, mode);
3949 break;
3950 case EXT_SC_GET_RESOURCE_CNTS:
3951 ql_get_resource_counts(ha, cmd, mode);
3952 break;
3953 }
3954
3955 if (cmd->Status != EXT_STATUS_OK) {
3956 EL(ha, "failed, Status=%d\n", cmd->Status);
3957 } else {
3958 /*EMPTY*/
3959 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3960 }
3961 }
3962
3963 /* ******************************************************************** */
3964 /* Helper Functions */
3965 /* ******************************************************************** */
3966
3967 /*
3968 * ql_lun_count
3969 * Get numbers of LUNS on target.
3970 *
3971 * Input:
3972 * ha: adapter state pointer.
3973 * q: device queue pointer.
3974 *
3975 * Returns:
3976 * Number of LUNs.
3977 *
3978 * Context:
3979 * Kernel context.
3980 */
3981 static int
3982 ql_lun_count(ql_adapter_state_t *ha, ql_tgt_t *tq)
3983 {
3984 int cnt;
3985
3986 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
3987
3988 /* Bypass LUNs that failed. */
3989 cnt = ql_report_lun(ha, tq);
3990 if (cnt == 0) {
3991 cnt = ql_inq_scan(ha, tq, ha->maximum_luns_per_target);
3992 }
3993
3994 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
3995
3996 return (cnt);
3997 }
3998
3999 /*
4000 * ql_report_lun
4001 * Get numbers of LUNS using report LUN command.
4002 *
4003 * Input:
4004 * ha: adapter state pointer.
4005 * q: target queue pointer.
4006 *
4007 * Returns:
4008 * Number of LUNs.
4009 *
4010 * Context:
4011 * Kernel context.
4012 */
4013 static int
4014 ql_report_lun(ql_adapter_state_t *ha, ql_tgt_t *tq)
4015 {
4016 int rval;
4017 uint8_t retries;
4018 ql_mbx_iocb_t *pkt;
4019 ql_rpt_lun_lst_t *rpt;
4020 dma_mem_t dma_mem;
4021 uint32_t pkt_size, cnt;
4022 uint16_t comp_status;
4023 uint8_t scsi_status_h, scsi_status_l, *reqs;
4024
4025 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
4026
4027 if (DRIVER_SUSPENDED(ha)) {
4028 EL(ha, "failed, LOOP_NOT_READY\n");
4029 return (0);
4030 }
4031
4032 pkt_size = sizeof (ql_mbx_iocb_t) + sizeof (ql_rpt_lun_lst_t);
4033 pkt = kmem_zalloc(pkt_size, KM_SLEEP);
4034 if (pkt == NULL) {
4035 EL(ha, "failed, kmem_zalloc\n");
4036 return (0);
4037 }
4038 rpt = (ql_rpt_lun_lst_t *)((caddr_t)pkt + sizeof (ql_mbx_iocb_t));
4039
4040 /* Get DMA memory for the IOCB */
4041 if (ql_get_dma_mem(ha, &dma_mem, sizeof (ql_rpt_lun_lst_t),
4042 LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN) != QL_SUCCESS) {
4043 cmn_err(CE_WARN, "%s(%d): DMA memory "
4044 "alloc failed", QL_NAME, ha->instance);
4045 kmem_free(pkt, pkt_size);
4046 return (0);
4047 }
4048
4049 for (retries = 0; retries < 4; retries++) {
4050 if (CFG_IST(ha, CFG_CTRL_24258081)) {
4051 pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
4052 pkt->cmd24.entry_count = 1;
4053
4054 /* Set N_port handle */
4055 pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
4056
4057 /* Set target ID */
4058 pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
4059 pkt->cmd24.target_id[1] = tq->d_id.b.area;
4060 pkt->cmd24.target_id[2] = tq->d_id.b.domain;
4061
4062 /* Set Virtual Port ID */
4063 pkt->cmd24.vp_index = ha->vp_index;
4064
4065 /* Set ISP command timeout. */
4066 pkt->cmd24.timeout = LE_16(15);
4067
4068 /* Load SCSI CDB */
4069 pkt->cmd24.scsi_cdb[0] = SCMD_REPORT_LUNS;
4070 pkt->cmd24.scsi_cdb[6] =
4074 pkt->cmd24.scsi_cdb[8] =
4075 MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4076 pkt->cmd24.scsi_cdb[9] =
4077 LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4078 for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) {
4079 ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
4080 + cnt, 4);
4081 }
4082
4083 /* Set tag queue control flags */
4084 pkt->cmd24.task = TA_STAG;
4085
4086 /* Set transfer direction. */
4087 pkt->cmd24.control_flags = CF_RD;
4088
4089 /* Set data segment count. */
4090 pkt->cmd24.dseg_count = LE_16(1);
4091
4092 /* Load total byte count. */
4093 /* Load data descriptor. */
4094 pkt->cmd24.dseg_0_address[0] = (uint32_t)
4095 LE_32(LSD(dma_mem.cookie.dmac_laddress));
4096 pkt->cmd24.dseg_0_address[1] = (uint32_t)
4097 LE_32(MSD(dma_mem.cookie.dmac_laddress));
4098 pkt->cmd24.total_byte_count =
4099 LE_32(sizeof (ql_rpt_lun_lst_t));
4100 pkt->cmd24.dseg_0_length =
4101 LE_32(sizeof (ql_rpt_lun_lst_t));
4102 } else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
4103 pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
4104 pkt->cmd3.entry_count = 1;
4105 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4106 pkt->cmd3.target_l = LSB(tq->loop_id);
4107 pkt->cmd3.target_h = MSB(tq->loop_id);
4108 } else {
4109 pkt->cmd3.target_h = LSB(tq->loop_id);
4110 }
4111 pkt->cmd3.control_flags_l = CF_DATA_IN | CF_STAG;
4112 pkt->cmd3.timeout = LE_16(15);
4113 pkt->cmd3.dseg_count = LE_16(1);
4114 pkt->cmd3.scsi_cdb[0] = SCMD_REPORT_LUNS;
4115 pkt->cmd3.scsi_cdb[6] =
4116 MSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4117 pkt->cmd3.scsi_cdb[7] =
4118 LSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4119 pkt->cmd3.scsi_cdb[8] =
4120 MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4121 pkt->cmd3.scsi_cdb[9] =
4122 LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4123 pkt->cmd3.byte_count =
4124 LE_32(sizeof (ql_rpt_lun_lst_t));
4125 pkt->cmd3.dseg_0_address[0] = (uint32_t)
4126 LE_32(LSD(dma_mem.cookie.dmac_laddress));
4127 pkt->cmd3.dseg_0_address[1] = (uint32_t)
4128 LE_32(MSD(dma_mem.cookie.dmac_laddress));
4129 pkt->cmd3.dseg_0_length =
4130 LE_32(sizeof (ql_rpt_lun_lst_t));
4131 } else {
4132 pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
4133 pkt->cmd.entry_count = 1;
4134 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4135 pkt->cmd.target_l = LSB(tq->loop_id);
4136 pkt->cmd.target_h = MSB(tq->loop_id);
4137 } else {
4138 pkt->cmd.target_h = LSB(tq->loop_id);
4139 }
4140 pkt->cmd.control_flags_l = CF_DATA_IN | CF_STAG;
4141 pkt->cmd.timeout = LE_16(15);
4142 pkt->cmd.dseg_count = LE_16(1);
4143 pkt->cmd.scsi_cdb[0] = SCMD_REPORT_LUNS;
4144 pkt->cmd.scsi_cdb[6] =
4145 MSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4146 pkt->cmd.scsi_cdb[7] =
4147 LSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4148 pkt->cmd.scsi_cdb[8] =
4149 MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4150 pkt->cmd.scsi_cdb[9] =
4151 LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4152 pkt->cmd.byte_count =
4153 LE_32(sizeof (ql_rpt_lun_lst_t));
4154 pkt->cmd.dseg_0_address = (uint32_t)
4155 LE_32(LSD(dma_mem.cookie.dmac_laddress));
4156 pkt->cmd.dseg_0_length =
4157 LE_32(sizeof (ql_rpt_lun_lst_t));
4158 }
4159
4160 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
4161 sizeof (ql_mbx_iocb_t));
4162
4163 /* Sync in coming DMA buffer. */
4164 (void) ddi_dma_sync(dma_mem.dma_handle, 0, dma_mem.size,
4165 DDI_DMA_SYNC_FORKERNEL);
4166 /* Copy in coming DMA data. */
4167 ddi_rep_get8(dma_mem.acc_handle, (uint8_t *)rpt,
4168 (uint8_t *)dma_mem.bp, dma_mem.size, DDI_DEV_AUTOINCR);
4169
4170 if (CFG_IST(ha, CFG_CTRL_24258081)) {
4171 pkt->sts24.entry_status = (uint8_t)
4172 (pkt->sts24.entry_status & 0x3c);
4173 comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
4174 scsi_status_h = pkt->sts24.scsi_status_h;
4175 scsi_status_l = pkt->sts24.scsi_status_l;
4176 cnt = scsi_status_h & FCP_RSP_LEN_VALID ?
4177 LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
4178 reqs = &pkt->sts24.rsp_sense_data[cnt];
4179 } else {
4180 pkt->sts.entry_status = (uint8_t)
4181 (pkt->sts.entry_status & 0x7e);
4182 comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
4183 scsi_status_h = pkt->sts.scsi_status_h;
4184 scsi_status_l = pkt->sts.scsi_status_l;
4185 reqs = &pkt->sts.req_sense_data[0];
4186 }
4187 if (rval == QL_SUCCESS && pkt->sts.entry_status != 0) {
4188 EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
4189 pkt->sts.entry_status, tq->d_id.b24);
4190 rval = QL_FUNCTION_PARAMETER_ERROR;
4218 if (scsi_status_l & STATUS_CHECK) {
4219 EL(ha, "STATUS_CHECK Sense Data\n%2xh%3xh"
4220 "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
4221 "%3xh%3xh%3xh%3xh%3xh%3xh%3xh\n", reqs[0],
4222 reqs[1], reqs[2], reqs[3], reqs[4],
4223 reqs[5], reqs[6], reqs[7], reqs[8],
4224 reqs[9], reqs[10], reqs[11], reqs[12],
4225 reqs[13], reqs[14], reqs[15], reqs[16],
4226 reqs[17]);
4227 }
4228 } else {
4229 break;
4230 }
4231 bzero((caddr_t)pkt, pkt_size);
4232 }
4233
4234 if (rval != QL_SUCCESS) {
4235 EL(ha, "failed=%xh\n", rval);
4236 rval = 0;
4237 } else {
4238 QL_PRINT_9(CE_CONT, "(%d): LUN list\n", ha->instance);
4239 QL_DUMP_9(rpt, 8, rpt->hdr.len + 8);
4240 rval = (int)(BE_32(rpt->hdr.len) / 8);
4241 }
4242
4243 kmem_free(pkt, pkt_size);
4244 ql_free_dma_resource(ha, &dma_mem);
4245
4246 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
4247
4248 return (rval);
4249 }
4250
4251 /*
4252 * ql_inq_scan
4253 * Get numbers of LUNS using inquiry command.
4254 *
4255 * Input:
4256 * ha: adapter state pointer.
4257 * tq: target queue pointer.
4258 * count: scan for the number of existing LUNs.
4259 *
4260 * Returns:
4261 * Number of LUNs.
4262 *
4263 * Context:
4264 * Kernel context.
4265 */
4266 static int
4267 ql_inq_scan(ql_adapter_state_t *ha, ql_tgt_t *tq, int count)
4268 {
4269 int lun, cnt, rval;
4270 ql_mbx_iocb_t *pkt;
4271 uint8_t *inq;
4272 uint32_t pkt_size;
4273
4274 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
4275
4276 pkt_size = sizeof (ql_mbx_iocb_t) + INQ_DATA_SIZE;
4277 pkt = kmem_zalloc(pkt_size, KM_SLEEP);
4278 if (pkt == NULL) {
4279 EL(ha, "failed, kmem_zalloc\n");
4280 return (0);
4281 }
4282 inq = (uint8_t *)((caddr_t)pkt + sizeof (ql_mbx_iocb_t));
4283
4284 cnt = 0;
4285 for (lun = 0; lun < MAX_LUNS; lun++) {
4286
4287 if (DRIVER_SUSPENDED(ha)) {
4288 rval = QL_LOOP_DOWN;
4289 cnt = 0;
4290 break;
4291 }
4292
4293 rval = ql_inq(ha, tq, lun, pkt, INQ_DATA_SIZE);
4294 if (rval == QL_SUCCESS) {
4295 switch (*inq) {
4296 case DTYPE_DIRECT:
4297 case DTYPE_PROCESSOR: /* Appliance. */
4298 case DTYPE_WORM:
4299 case DTYPE_RODIRECT:
4300 case DTYPE_SCANNER:
4301 case DTYPE_OPTICAL:
4302 case DTYPE_CHANGER:
4303 case DTYPE_ESI:
4304 cnt++;
4305 break;
4306 case DTYPE_SEQUENTIAL:
4307 cnt++;
4308 tq->flags |= TQF_TAPE_DEVICE;
4309 break;
4310 default:
4311 QL_PRINT_9(CE_CONT, "(%d): failed, "
4312 "unsupported device id=%xh, lun=%d, "
4313 "type=%xh\n", ha->instance, tq->loop_id,
4314 lun, *inq);
4315 break;
4316 }
4317
4318 if (*inq == DTYPE_ESI || cnt >= count) {
4319 break;
4320 }
4321 } else if (rval == QL_ABORTED || rval == QL_FUNCTION_TIMEOUT) {
4322 cnt = 0;
4323 break;
4324 }
4325 }
4326
4327 kmem_free(pkt, pkt_size);
4328
4329 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
4330
4331 return (cnt);
4332 }
4333
4334 /*
4335 * ql_inq
4336 * Issue inquiry command.
4337 *
4338 * Input:
4339 * ha: adapter state pointer.
4340 * tq: target queue pointer.
4341 * lun: LUN number.
4342 * pkt: command and buffer pointer.
4343 * inq_len: amount of inquiry data.
4344 *
4345 * Returns:
4346 * ql local function return status code.
4347 *
4348 * Context:
4349 * Kernel context.
4350 */
4351 static int
4352 ql_inq(ql_adapter_state_t *ha, ql_tgt_t *tq, int lun, ql_mbx_iocb_t *pkt,
4353 uint8_t inq_len)
4354 {
4355 dma_mem_t dma_mem;
4356 int rval, retries;
4357 uint32_t pkt_size, cnt;
4358 uint16_t comp_status;
4359 uint8_t scsi_status_h, scsi_status_l, *reqs;
4360 caddr_t inq_data;
4361
4362 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
4363
4364 if (DRIVER_SUSPENDED(ha)) {
4365 EL(ha, "failed, loop down\n");
4366 return (QL_FUNCTION_TIMEOUT);
4367 }
4368
4369 pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + inq_len);
4370 bzero((caddr_t)pkt, pkt_size);
4371
4372 inq_data = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
4373
4374 /* Get DMA memory for the IOCB */
4375 if (ql_get_dma_mem(ha, &dma_mem, inq_len,
4376 LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN) != QL_SUCCESS) {
4377 cmn_err(CE_WARN, "%s(%d): DMA memory "
4378 "alloc failed", QL_NAME, ha->instance);
4379 return (0);
4380 }
4381
4382 for (retries = 0; retries < 4; retries++) {
4383 if (CFG_IST(ha, CFG_CTRL_24258081)) {
4384 pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
4385 pkt->cmd24.entry_count = 1;
4386
4387 /* Set LUN number */
4388 pkt->cmd24.fcp_lun[2] = LSB(lun);
4389 pkt->cmd24.fcp_lun[3] = MSB(lun);
4390
4391 /* Set N_port handle */
4392 pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
4393
4394 /* Set target ID */
4395 pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
4396 pkt->cmd24.target_id[1] = tq->d_id.b.area;
4397 pkt->cmd24.target_id[2] = tq->d_id.b.domain;
4398
4399 /* Set Virtual Port ID */
4400 pkt->cmd24.vp_index = ha->vp_index;
4401
4402 /* Set ISP command timeout. */
4403 pkt->cmd24.timeout = LE_16(15);
4404
4405 /* Load SCSI CDB */
4406 pkt->cmd24.scsi_cdb[0] = SCMD_INQUIRY;
4407 pkt->cmd24.scsi_cdb[4] = inq_len;
4408 for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) {
4409 ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
4410 + cnt, 4);
4411 }
4412
4413 /* Set tag queue control flags */
4414 pkt->cmd24.task = TA_STAG;
4415
4416 /* Set transfer direction. */
4417 pkt->cmd24.control_flags = CF_RD;
4418
4419 /* Set data segment count. */
4420 pkt->cmd24.dseg_count = LE_16(1);
4421
4422 /* Load total byte count. */
4423 pkt->cmd24.total_byte_count = LE_32(inq_len);
4424
4425 /* Load data descriptor. */
4426 pkt->cmd24.dseg_0_address[0] = (uint32_t)
4427 LE_32(LSD(dma_mem.cookie.dmac_laddress));
4428 pkt->cmd24.dseg_0_address[1] = (uint32_t)
4429 LE_32(MSD(dma_mem.cookie.dmac_laddress));
4430 pkt->cmd24.dseg_0_length = LE_32(inq_len);
4431 } else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
4432 pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
4433 cnt = CMD_TYPE_3_DATA_SEGMENTS;
4434
4435 pkt->cmd3.entry_count = 1;
4436 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4437 pkt->cmd3.target_l = LSB(tq->loop_id);
4438 pkt->cmd3.target_h = MSB(tq->loop_id);
4439 } else {
4440 pkt->cmd3.target_h = LSB(tq->loop_id);
4441 }
4442 pkt->cmd3.lun_l = LSB(lun);
4443 pkt->cmd3.lun_h = MSB(lun);
4444 pkt->cmd3.control_flags_l = CF_DATA_IN | CF_STAG;
4445 pkt->cmd3.timeout = LE_16(15);
4446 pkt->cmd3.scsi_cdb[0] = SCMD_INQUIRY;
4447 pkt->cmd3.scsi_cdb[4] = inq_len;
4448 pkt->cmd3.dseg_count = LE_16(1);
4449 pkt->cmd3.byte_count = LE_32(inq_len);
4450 pkt->cmd3.dseg_0_address[0] = (uint32_t)
4451 LE_32(LSD(dma_mem.cookie.dmac_laddress));
4452 pkt->cmd3.dseg_0_address[1] = (uint32_t)
4453 LE_32(MSD(dma_mem.cookie.dmac_laddress));
4454 pkt->cmd3.dseg_0_length = LE_32(inq_len);
4455 } else {
4456 pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
4457 cnt = CMD_TYPE_2_DATA_SEGMENTS;
4458
4459 pkt->cmd.entry_count = 1;
4460 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4461 pkt->cmd.target_l = LSB(tq->loop_id);
4462 pkt->cmd.target_h = MSB(tq->loop_id);
4463 } else {
4464 pkt->cmd.target_h = LSB(tq->loop_id);
4465 }
4466 pkt->cmd.lun_l = LSB(lun);
4467 pkt->cmd.lun_h = MSB(lun);
4468 pkt->cmd.control_flags_l = CF_DATA_IN | CF_STAG;
4469 pkt->cmd.timeout = LE_16(15);
4470 pkt->cmd.scsi_cdb[0] = SCMD_INQUIRY;
4471 pkt->cmd.scsi_cdb[4] = inq_len;
4472 pkt->cmd.dseg_count = LE_16(1);
4473 pkt->cmd.byte_count = LE_32(inq_len);
4474 pkt->cmd.dseg_0_address = (uint32_t)
4475 LE_32(LSD(dma_mem.cookie.dmac_laddress));
4476 pkt->cmd.dseg_0_length = LE_32(inq_len);
4477 }
4478
4479 /* rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size); */
4480 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
4481 sizeof (ql_mbx_iocb_t));
4482
4483 /* Sync in coming IOCB DMA buffer. */
4484 (void) ddi_dma_sync(dma_mem.dma_handle, 0, dma_mem.size,
4485 DDI_DMA_SYNC_FORKERNEL);
4486 /* Copy in coming DMA data. */
4487 ddi_rep_get8(dma_mem.acc_handle, (uint8_t *)inq_data,
4488 (uint8_t *)dma_mem.bp, dma_mem.size, DDI_DEV_AUTOINCR);
4489
4490 if (CFG_IST(ha, CFG_CTRL_24258081)) {
4491 pkt->sts24.entry_status = (uint8_t)
4492 (pkt->sts24.entry_status & 0x3c);
4493 comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
4494 scsi_status_h = pkt->sts24.scsi_status_h;
4495 scsi_status_l = pkt->sts24.scsi_status_l;
4496 cnt = scsi_status_h & FCP_RSP_LEN_VALID ?
4497 LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
4498 reqs = &pkt->sts24.rsp_sense_data[cnt];
4499 } else {
4500 pkt->sts.entry_status = (uint8_t)
4501 (pkt->sts.entry_status & 0x7e);
4502 comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
4503 scsi_status_h = pkt->sts.scsi_status_h;
4504 scsi_status_l = pkt->sts.scsi_status_l;
4505 reqs = &pkt->sts.req_sense_data[0];
4506 }
4507 if (rval == QL_SUCCESS && pkt->sts.entry_status != 0) {
4508 EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
4509 pkt->sts.entry_status, tq->d_id.b24);
4510 rval = QL_FUNCTION_PARAMETER_ERROR;
4525 }
4526 rval = QL_FUNCTION_FAILED;
4527 }
4528
4529 if (scsi_status_l & STATUS_CHECK) {
4530 EL(ha, "STATUS_CHECK Sense Data\n%2xh%3xh"
4531 "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
4532 "%3xh%3xh%3xh%3xh%3xh%3xh%3xh\n", reqs[0],
4533 reqs[1], reqs[2], reqs[3], reqs[4],
4534 reqs[5], reqs[6], reqs[7], reqs[8],
4535 reqs[9], reqs[10], reqs[11], reqs[12],
4536 reqs[13], reqs[14], reqs[15], reqs[16],
4537 reqs[17]);
4538 }
4539 } else {
4540 break;
4541 }
4542 }
4543 ql_free_dma_resource(ha, &dma_mem);
4544
4545 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
4546
4547 return (rval);
4548 }
4549
4550 /*
4551 * ql_get_buffer_data
4552 * Copies data from user space to kernal buffer.
4553 *
4554 * Input:
4555 * src: User source buffer address.
4556 * dst: Kernal destination buffer address.
4557 * size: Amount of data.
4558 * mode: flags.
4559 *
4560 * Returns:
4561 * Returns number of bytes transferred.
4562 *
4563 * Context:
4564 * Kernel context.
4565 */
4566 static uint32_t
4567 ql_get_buffer_data(caddr_t src, caddr_t dst, uint32_t size, int mode)
4568 {
4569 uint32_t cnt;
4570
4571 for (cnt = 0; cnt < size; cnt++) {
4572 if (ddi_copyin(src++, dst++, 1, mode) != 0) {
4573 QL_PRINT_2(CE_CONT, "failed, ddi_copyin\n");
4574 break;
4575 }
4576 }
4577
4578 return (cnt);
4579 }
4580
4581 /*
4582 * ql_send_buffer_data
4583 * Copies data from kernal buffer to user space.
4584 *
4585 * Input:
4586 * src: Kernal source buffer address.
4587 * dst: User destination buffer address.
4588 * size: Amount of data.
4589 * mode: flags.
4590 *
4591 * Returns:
4592 * Returns number of bytes transferred.
4593 *
4594 * Context:
4595 * Kernel context.
4596 */
4597 static uint32_t
4598 ql_send_buffer_data(caddr_t src, caddr_t dst, uint32_t size, int mode)
4599 {
4600 uint32_t cnt;
4601
4602 for (cnt = 0; cnt < size; cnt++) {
4603 if (ddi_copyout(src++, dst++, 1, mode) != 0) {
4604 QL_PRINT_2(CE_CONT, "failed, ddi_copyin\n");
4605 break;
4606 }
4607 }
4608
4609 return (cnt);
4610 }
4611
4612 /*
4613 * ql_find_port
4614 * Locates device queue.
4615 *
4616 * Input:
4617 * ha: adapter state pointer.
4618 * name: device port name.
4619 *
4620 * Returns:
4621 * Returns target queue pointer.
4622 *
4623 * Context:
4624 * Kernel context.
4674 * Get flash descriptor table.
4675 *
4676 * Input:
4677 * ha: adapter state pointer.
4678 *
4679 * Returns:
4680 * ql local function return status code.
4681 *
4682 * Context:
4683 * Kernel context.
4684 */
4685 static int
4686 ql_24xx_flash_desc(ql_adapter_state_t *ha)
4687 {
4688 uint32_t cnt;
4689 uint16_t chksum, *bp, data;
4690 int rval;
4691 flash_desc_t *fdesc;
4692 ql_xioctl_t *xp = ha->xioctl;
4693
4694 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
4695
4696 if (ha->flash_desc_addr == 0) {
4697 QL_PRINT_9(CE_CONT, "(%d): desc ptr=0\n", ha->instance);
4698 return (QL_FUNCTION_FAILED);
4699 }
4700
4701 if ((fdesc = kmem_zalloc(sizeof (flash_desc_t), KM_SLEEP)) == NULL) {
4702 EL(ha, "kmem_zalloc=null\n");
4703 return (QL_MEMORY_ALLOC_FAILED);
4704 }
4705 rval = ql_dump_fcode(ha, (uint8_t *)fdesc, sizeof (flash_desc_t),
4706 ha->flash_desc_addr << 2);
4707 if (rval != QL_SUCCESS) {
4708 EL(ha, "read status=%xh\n", rval);
4709 kmem_free(fdesc, sizeof (flash_desc_t));
4710 return (rval);
4711 }
4712
4713 chksum = 0;
4714 bp = (uint16_t *)fdesc;
4715 for (cnt = 0; cnt < (sizeof (flash_desc_t)) / 2; cnt++) {
4716 data = *bp++;
4717 LITTLE_ENDIAN_16(&data);
4726 LITTLE_ENDIAN_16(&fdesc->flash_id);
4727 LITTLE_ENDIAN_32(&fdesc->block_size);
4728 LITTLE_ENDIAN_32(&fdesc->alt_block_size);
4729 LITTLE_ENDIAN_32(&fdesc->flash_size);
4730 LITTLE_ENDIAN_32(&fdesc->write_enable_data);
4731 LITTLE_ENDIAN_32(&fdesc->read_timeout);
4732
4733 /* flash size in desc table is in 1024 bytes */
4734 fdesc->flash_size = fdesc->flash_size * 0x400;
4735
4736 if (chksum != 0 || fdesc->flash_valid != FLASH_DESC_VAILD ||
4737 fdesc->flash_version != FLASH_DESC_VERSION) {
4738 EL(ha, "invalid descriptor table\n");
4739 kmem_free(fdesc, sizeof (flash_desc_t));
4740 return (QL_FUNCTION_FAILED);
4741 }
4742
4743 bcopy(fdesc, &xp->fdesc, sizeof (flash_desc_t));
4744 kmem_free(fdesc, sizeof (flash_desc_t));
4745
4746 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
4747
4748 return (QL_SUCCESS);
4749 }
4750
4751 /*
4752 * ql_setup_flash
4753 * Gets the manufacturer and id number of the flash chip, and
4754 * sets up the size parameter.
4755 *
4756 * Input:
4757 * ha: adapter state pointer.
4758 *
4759 * Returns:
4760 * int: ql local function return status code.
4761 *
4762 * Context:
4763 * Kernel context.
4764 */
4765 static int
4766 ql_setup_flash(ql_adapter_state_t *ha)
4767 {
4768 ql_xioctl_t *xp = ha->xioctl;
4769 int rval = QL_SUCCESS;
4770
4771 if (xp->fdesc.flash_size != 0) {
4772 return (rval);
4773 }
4774
4775 if (CFG_IST(ha, CFG_CTRL_2200) && !ha->subven_id) {
4776 return (QL_FUNCTION_FAILED);
4777 }
4778
4779 if (CFG_IST(ha, CFG_CTRL_258081)) {
4780 /*
4781 * Temporarily set the ha->xioctl->fdesc.flash_size to
4782 * 25xx flash size to avoid failing of ql_dump_focde.
4783 */
4784 if (CFG_IST(ha, CFG_CTRL_8021)) {
4785 ha->xioctl->fdesc.flash_size = 0x800000;
4786 } else if (CFG_IST(ha, CFG_CTRL_25XX)) {
4787 ha->xioctl->fdesc.flash_size = 0x200000;
4788 } else {
4789 ha->xioctl->fdesc.flash_size = 0x400000;
4790 }
4791
4792 if (ql_24xx_flash_desc(ha) == QL_SUCCESS) {
4793 EL(ha, "flash desc table ok, exit\n");
4794 return (rval);
4795 }
4796 if (CFG_IST(ha, CFG_CTRL_8021)) {
4797 xp->fdesc.flash_manuf = WINBOND_FLASH;
4798 xp->fdesc.flash_id = WINBOND_FLASHID;
4799 xp->fdesc.flash_len = 0x17;
4800 } else {
4801 (void) ql_24xx_flash_id(ha);
4802 }
4803
4804 } else if (CFG_IST(ha, CFG_CTRL_2422)) {
4805 (void) ql_24xx_flash_id(ha);
4806 } else {
4807 ql_flash_enable(ha);
4808
4809 ql_write_flash_byte(ha, 0x5555, 0xaa);
4810 ql_write_flash_byte(ha, 0x2aaa, 0x55);
4811 ql_write_flash_byte(ha, 0x5555, 0x90);
4812 xp->fdesc.flash_manuf = (uint8_t)ql_read_flash_byte(ha, 0x0000);
4813
4814 if (CFG_IST(ha, CFG_SBUS_CARD)) {
4815 ql_write_flash_byte(ha, 0xaaaa, 0xaa);
4816 ql_write_flash_byte(ha, 0x5555, 0x55);
4817 ql_write_flash_byte(ha, 0xaaaa, 0x90);
4818 xp->fdesc.flash_id = (uint16_t)
4819 ql_read_flash_byte(ha, 0x0002);
4820 } else {
4821 ql_write_flash_byte(ha, 0x5555, 0xaa);
4822 ql_write_flash_byte(ha, 0x2aaa, 0x55);
4823 ql_write_flash_byte(ha, 0x5555, 0x90);
4824 xp->fdesc.flash_id = (uint16_t)
4825 ql_read_flash_byte(ha, 0x0001);
4826 }
4827
4828 ql_write_flash_byte(ha, 0x5555, 0xaa);
4829 ql_write_flash_byte(ha, 0x2aaa, 0x55);
4830 ql_write_flash_byte(ha, 0x5555, 0xf0);
4831
4832 ql_flash_disable(ha);
4833 }
4834
4835 /* Default flash descriptor table. */
4836 xp->fdesc.write_statusreg_cmd = 1;
4837 xp->fdesc.write_enable_bits = 0;
4838 xp->fdesc.unprotect_sector_cmd = 0;
4839 xp->fdesc.protect_sector_cmd = 0;
4840 xp->fdesc.write_disable_bits = 0x9c;
4841 xp->fdesc.block_size = 0x10000;
4842 xp->fdesc.erase_cmd = 0xd8;
4843
4844 switch (xp->fdesc.flash_manuf) {
4845 case AMD_FLASH:
4846 switch (xp->fdesc.flash_id) {
4847 case SPAN_FLASHID_2048K:
4848 xp->fdesc.flash_size = 0x200000;
4849 break;
4850 case AMD_FLASHID_1024K:
4851 xp->fdesc.flash_size = 0x100000;
4852 break;
4853 case AMD_FLASHID_512K:
4854 case AMD_FLASHID_512Kt:
4855 case AMD_FLASHID_512Kb:
4856 if (CFG_IST(ha, CFG_SBUS_CARD)) {
4857 xp->fdesc.flash_size = QL_SBUS_FCODE_SIZE;
4858 } else {
4859 xp->fdesc.flash_size = 0x80000;
4860 }
4861 break;
4862 case AMD_FLASHID_128K:
4863 xp->fdesc.flash_size = 0x20000;
4864 break;
4865 default:
4866 rval = QL_FUNCTION_FAILED;
4867 break;
4868 }
4869 break;
4870 case ST_FLASH:
4871 switch (xp->fdesc.flash_id) {
4872 case ST_FLASHID_128K:
4873 xp->fdesc.flash_size = 0x20000;
4874 break;
4875 case ST_FLASHID_512K:
4876 xp->fdesc.flash_size = 0x80000;
4877 break;
4878 case ST_FLASHID_M25PXX:
4879 if (xp->fdesc.flash_len == 0x14) {
4880 xp->fdesc.flash_size = 0x100000;
4881 } else if (xp->fdesc.flash_len == 0x15) {
4882 xp->fdesc.flash_size = 0x200000;
4883 } else {
4884 rval = QL_FUNCTION_FAILED;
4885 }
4886 break;
4887 default:
4888 rval = QL_FUNCTION_FAILED;
4889 break;
4890 }
4891 break;
4892 case SST_FLASH:
4893 switch (xp->fdesc.flash_id) {
4894 case SST_FLASHID_128K:
4895 xp->fdesc.flash_size = 0x20000;
4896 break;
4897 case SST_FLASHID_1024K_A:
4898 xp->fdesc.flash_size = 0x100000;
4899 xp->fdesc.block_size = 0x8000;
4900 xp->fdesc.erase_cmd = 0x52;
4901 break;
4902 case SST_FLASHID_1024K:
4903 case SST_FLASHID_1024K_B:
4904 xp->fdesc.flash_size = 0x100000;
4905 break;
4906 case SST_FLASHID_2048K:
4907 xp->fdesc.flash_size = 0x200000;
4908 break;
4909 default:
4910 rval = QL_FUNCTION_FAILED;
4911 break;
4912 }
4913 break;
4914 case MXIC_FLASH:
4915 switch (xp->fdesc.flash_id) {
4916 case MXIC_FLASHID_512K:
4917 xp->fdesc.flash_size = 0x80000;
4918 break;
4919 case MXIC_FLASHID_1024K:
4920 xp->fdesc.flash_size = 0x100000;
4921 break;
4922 case MXIC_FLASHID_25LXX:
4923 if (xp->fdesc.flash_len == 0x14) {
4924 xp->fdesc.flash_size = 0x100000;
4925 } else if (xp->fdesc.flash_len == 0x15) {
4926 xp->fdesc.flash_size = 0x200000;
4927 } else {
4928 rval = QL_FUNCTION_FAILED;
4929 }
4930 break;
4931 default:
4932 rval = QL_FUNCTION_FAILED;
4933 break;
4934 }
4935 break;
4936 case ATMEL_FLASH:
4937 switch (xp->fdesc.flash_id) {
4938 case ATMEL_FLASHID_1024K:
4939 xp->fdesc.flash_size = 0x100000;
4940 xp->fdesc.write_disable_bits = 0xbc;
4941 xp->fdesc.unprotect_sector_cmd = 0x39;
4942 xp->fdesc.protect_sector_cmd = 0x36;
4943 break;
4944 default:
4945 rval = QL_FUNCTION_FAILED;
4946 break;
4947 }
4948 break;
4949 case WINBOND_FLASH:
4950 switch (xp->fdesc.flash_id) {
4951 case WINBOND_FLASHID:
4952 if (xp->fdesc.flash_len == 0x15) {
4953 xp->fdesc.flash_size = 0x200000;
4954 } else if (xp->fdesc.flash_len == 0x16) {
4955 xp->fdesc.flash_size = 0x400000;
4956 } else if (xp->fdesc.flash_len == 0x17) {
4957 xp->fdesc.flash_size = 0x800000;
4958 } else {
4959 rval = QL_FUNCTION_FAILED;
4960 }
4961 break;
4962 default:
4963 rval = QL_FUNCTION_FAILED;
4964 break;
4965 }
4966 break;
4967 case INTEL_FLASH:
4968 switch (xp->fdesc.flash_id) {
4969 case INTEL_FLASHID:
4970 if (xp->fdesc.flash_len == 0x11) {
4971 xp->fdesc.flash_size = 0x200000;
4972 } else if (xp->fdesc.flash_len == 0x12) {
4973 xp->fdesc.flash_size = 0x400000;
4974 } else if (xp->fdesc.flash_len == 0x13) {
4975 xp->fdesc.flash_size = 0x800000;
4976 } else {
4977 rval = QL_FUNCTION_FAILED;
4978 }
4979 break;
4980 default:
4981 rval = QL_FUNCTION_FAILED;
4982 break;
4983 }
4984 break;
4985 default:
4986 rval = QL_FUNCTION_FAILED;
4987 break;
4988 }
4989
4990 /* Try flash table later. */
4991 if (rval != QL_SUCCESS && CFG_IST(ha, CFG_CTRL_24258081)) {
4992 EL(ha, "no default id\n");
4993 return (QL_SUCCESS);
4994 }
4995
4996 /*
4997 * hack for non std 2312 and 6312 boards. hardware people need to
4998 * use either the 128k flash chip (original), or something larger.
4999 * For driver purposes, we'll treat it as a 128k flash chip.
5000 */
5001 if ((ha->device_id == 0x2312 || ha->device_id == 0x6312 ||
5002 ha->device_id == 0x2322 || ha->device_id == 0x6322) &&
5003 (xp->fdesc.flash_size > 0x20000) &&
5004 (CFG_IST(ha, CFG_SBUS_CARD) == 0)) {
5005 EL(ha, "chip exceeds max size: %xh, using 128k\n",
5006 xp->fdesc.flash_size);
5007 xp->fdesc.flash_size = 0x20000;
5008 }
5009
5010 if (rval == QL_SUCCESS) {
5011 EL(ha, "man_id=%xh, flash_id=%xh, size=%xh\n",
5012 xp->fdesc.flash_manuf, xp->fdesc.flash_id,
5013 xp->fdesc.flash_size);
5014 } else {
5015 EL(ha, "unsupported mfr / type: man_id=%xh, flash_id=%xh\n",
5016 xp->fdesc.flash_manuf, xp->fdesc.flash_id);
5017 }
5018
5019 return (rval);
5025 *
5026 * Input:
5027 * ha: adapter state pointer.
5028 * bp: user buffer address.
5029 * size: user buffer size.
5030 * mode: flags
5031 *
5032 * Returns:
5033 *
5034 * Context:
5035 * Kernel context.
5036 */
5037 static int
5038 ql_flash_fcode_load(ql_adapter_state_t *ha, void *bp, uint32_t bsize,
5039 int mode)
5040 {
5041 uint8_t *bfp;
5042 ql_xioctl_t *xp = ha->xioctl;
5043 int rval = 0;
5044
5045 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5046
5047 if (bsize > xp->fdesc.flash_size) {
5048 EL(ha, "failed, bufsize: %xh, flash size: %xh\n", bsize,
5049 xp->fdesc.flash_size);
5050 return (ENOMEM);
5051 }
5052
5053 if ((bfp = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP)) == NULL) {
5054 EL(ha, "failed, kmem_zalloc\n");
5055 rval = ENOMEM;
5056 } else {
5057 if (ddi_copyin(bp, bfp, bsize, mode) != 0) {
5058 EL(ha, "failed, ddi_copyin\n");
5059 rval = EFAULT;
5060 } else if (ql_load_fcode(ha, bfp, bsize, 0) != QL_SUCCESS) {
5061 EL(ha, "failed, load_fcode\n");
5062 rval = EFAULT;
5063 } else {
5064 /* Reset caches on all adapter instances. */
5065 ql_update_flash_caches(ha);
5066 rval = 0;
5067 }
5068 kmem_free(bfp, bsize);
5069 }
5070
5071 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5072
5073 return (rval);
5074 }
5075
5076 /*
5077 * ql_load_fcode
5078 * Loads fcode in to flash.
5079 *
5080 * Input:
5081 * ha: adapter state pointer.
5082 * dp: data pointer.
5083 * size: data length.
5084 * addr: flash byte address.
5085 *
5086 * Returns:
5087 * ql local function return status code.
5088 *
5089 * Context:
5090 * Kernel context.
5091 */
5092 int
5093 ql_load_fcode(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size, uint32_t addr)
5094 {
5095 uint32_t cnt;
5096 int rval;
5097
5098 if (CFG_IST(ha, CFG_CTRL_24258081)) {
5099 return (ql_24xx_load_flash(ha, dp, size, addr));
5100 }
5101
5102 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5103
5104 if (CFG_IST(ha, CFG_SBUS_CARD)) {
5105 /*
5106 * sbus has an additional check to make
5107 * sure they don't brick the HBA.
5108 */
5109 if (dp[0] != 0xf1) {
5110 EL(ha, "failed, incorrect fcode for sbus\n");
5111 return (QL_FUNCTION_PARAMETER_ERROR);
5112 }
5113 }
5114
5115 GLOBAL_HW_LOCK();
5116
5117 /* Enable Flash Read/Write. */
5118 ql_flash_enable(ha);
5119
5120 /* Erase flash prior to write. */
5121 rval = ql_erase_flash(ha, 0);
5122
5124 /* Write fcode data to flash. */
5125 for (cnt = 0; cnt < (uint32_t)size; cnt++) {
5126 /* Allow other system activity. */
5127 if (cnt % 0x1000 == 0) {
5128 drv_usecwait(1);
5129 }
5130 rval = ql_program_flash_address(ha, addr++, *dp++);
5131 if (rval != QL_SUCCESS)
5132 break;
5133 }
5134 }
5135
5136 ql_flash_disable(ha);
5137
5138 GLOBAL_HW_UNLOCK();
5139
5140 if (rval != QL_SUCCESS) {
5141 EL(ha, "failed, rval=%xh\n", rval);
5142 } else {
5143 /*EMPTY*/
5144 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5145 }
5146 return (rval);
5147 }
5148
5149 /*
5150 * ql_flash_fcode_dump
5151 * Dumps FLASH to application.
5152 *
5153 * Input:
5154 * ha: adapter state pointer.
5155 * bp: user buffer address.
5156 * bsize: user buffer size
5157 * faddr: flash byte address
5158 * mode: flags
5159 *
5160 * Returns:
5161 *
5162 * Context:
5163 * Kernel context.
5164 */
5165 static int
5166 ql_flash_fcode_dump(ql_adapter_state_t *ha, void *bp, uint32_t bsize,
5167 uint32_t faddr, int mode)
5168 {
5169 uint8_t *bfp;
5170 int rval;
5171 ql_xioctl_t *xp = ha->xioctl;
5172
5173 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5174
5175 /* adjust max read size to flash size */
5176 if (bsize > xp->fdesc.flash_size) {
5177 EL(ha, "adjusting req=%xh, max=%xh\n", bsize,
5178 xp->fdesc.flash_size);
5179 bsize = xp->fdesc.flash_size;
5180 }
5181
5182 if ((bfp = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP)) == NULL) {
5183 EL(ha, "failed, kmem_zalloc\n");
5184 rval = ENOMEM;
5185 } else {
5186 /* Dump Flash fcode. */
5187 rval = ql_dump_fcode(ha, bfp, bsize, faddr);
5188
5189 if (rval != QL_SUCCESS) {
5190 EL(ha, "failed, dump_fcode = %x\n", rval);
5191 rval = EFAULT;
5192 } else if (ddi_copyout(bfp, bp, bsize, mode) != 0) {
5193 EL(ha, "failed, ddi_copyout\n");
5194 rval = EFAULT;
5195 } else {
5196 rval = 0;
5197 }
5198 kmem_free(bfp, bsize);
5199 }
5200
5201 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5202
5203 return (rval);
5204 }
5205
5206 /*
5207 * ql_dump_fcode
5208 * Dumps fcode from flash.
5209 *
5210 * Input:
5211 * ha: adapter state pointer.
5212 * dp: data pointer.
5213 * size: data length in bytes.
5214 * startpos: starting position in flash (byte address).
5215 *
5216 * Returns:
5217 * ql local function return status code.
5218 *
5219 * Context:
5220 * Kernel context.
5221 *
5222 */
5223 int
5224 ql_dump_fcode(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size,
5225 uint32_t startpos)
5226 {
5227 uint32_t cnt, data, addr;
5228 uint8_t bp[4], *src;
5229 int fp_rval, rval = QL_SUCCESS;
5230 dma_mem_t mem;
5231
5232 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5233
5234 /* make sure startpos+size doesn't exceed flash */
5235 if (size + startpos > ha->xioctl->fdesc.flash_size) {
5236 EL(ha, "exceeded flash range, sz=%xh, stp=%xh, flsz=%xh\n",
5237 size, startpos, ha->xioctl->fdesc.flash_size);
5238 return (QL_FUNCTION_PARAMETER_ERROR);
5239 }
5240
5241 if (CFG_IST(ha, CFG_CTRL_24258081)) {
5242 /* check start addr is 32 bit aligned for 24xx */
5243 if ((startpos & 0x3) != 0) {
5244 rval = ql_24xx_read_flash(ha,
5245 ha->flash_data_addr | startpos >> 2, &data);
5246 if (rval != QL_SUCCESS) {
5247 EL(ha, "failed2, rval = %xh\n", rval);
5248 return (rval);
5249 }
5250 bp[0] = LSB(LSW(data));
5251 bp[1] = MSB(LSW(data));
5252 bp[2] = LSB(MSW(data));
5253 bp[3] = MSB(MSW(data));
5254 while (size && startpos & 0x3) {
5255 *dp++ = bp[startpos & 0x3];
5256 startpos++;
5257 size--;
5258 }
5259 if (size == 0) {
5260 QL_PRINT_9(CE_CONT, "(%d): done2\n",
5261 ha->instance);
5262 return (rval);
5263 }
5264 }
5265
5266 /* adjust 24xx start addr for 32 bit words */
5267 addr = startpos / 4 | ha->flash_data_addr;
5268 }
5269
5270 bzero(&mem, sizeof (dma_mem_t));
5271 /* Check for Fast page is supported */
5272 if ((ha->pha->task_daemon_flags & FIRMWARE_UP) &&
5273 (CFG_IST(ha, CFG_CTRL_2581))) {
5274 fp_rval = QL_SUCCESS;
5275 /* Setup DMA buffer. */
5276 rval = ql_get_dma_mem(ha, &mem, size,
5277 LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN);
5278 if (rval != QL_SUCCESS) {
5279 EL(ha, "failed, ql_get_dma_mem=%xh\n",
5280 rval);
5281 return (ENOMEM);
5282 }
5283 } else {
5284 fp_rval = QL_NOT_SUPPORTED;
5285 }
5286
5287 GLOBAL_HW_LOCK();
5288
5289 /* Enable Flash Read/Write. */
5290 if (CFG_IST(ha, CFG_CTRL_24258081) == 0) {
5291 ql_flash_enable(ha);
5292 }
5293
5294 /* Read fcode data from flash. */
5295 while (size) {
5296 /* Allow other system activity. */
5297 if (size % 0x1000 == 0) {
5298 ql_delay(ha, 100000);
5299 }
5300 if (CFG_IST(ha, CFG_CTRL_24258081)) {
5301 if (fp_rval == QL_SUCCESS && (addr & 0x3f) == 0) {
5302 cnt = (size + 3) >> 2;
5303 fp_rval = ql_rd_risc_ram(ha, addr,
5304 mem.cookie.dmac_laddress, cnt);
5305 if (fp_rval == QL_SUCCESS) {
5306 for (src = mem.bp; size; size--) {
5307 *dp++ = *src++;
5308 }
5309 addr += cnt;
5310 continue;
5311 }
5312 }
5313 rval = ql_24xx_read_flash(ha, addr++,
5314 &data);
5315 if (rval != QL_SUCCESS) {
5316 break;
5317 }
5318 bp[0] = LSB(LSW(data));
5319 bp[1] = MSB(LSW(data));
5320 bp[2] = LSB(MSW(data));
5321 bp[3] = MSB(MSW(data));
5322 for (cnt = 0; size && cnt < 4; size--) {
5323 *dp++ = bp[cnt++];
5324 }
5325 } else {
5326 *dp++ = (uint8_t)ql_read_flash_byte(ha, startpos++);
5327 size--;
5328 }
5329 }
5330
5331 if (CFG_IST(ha, CFG_CTRL_24258081) == 0) {
5332 ql_flash_disable(ha);
5333 }
5334
5335 GLOBAL_HW_UNLOCK();
5336
5337 if (mem.dma_handle != NULL) {
5338 ql_free_dma_resource(ha, &mem);
5339 }
5340
5341 if (rval != QL_SUCCESS) {
5342 EL(ha, "failed, rval = %xh\n", rval);
5343 } else {
5344 /*EMPTY*/
5345 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5346 }
5347 return (rval);
5348 }
5349
5350 /*
5351 * ql_program_flash_address
5352 * Program flash address.
5353 *
5354 * Input:
5355 * ha: adapter state pointer.
5356 * addr: flash byte address.
5357 * data: data to be written to flash.
5358 *
5359 * Returns:
5360 * ql local function return status code.
5361 *
5362 * Context:
5363 * Kernel context.
5364 */
5365 static int
5387 }
5388 return (rval);
5389 }
5390
5391 /*
5392 * ql_set_rnid_parameters
5393 * Set RNID parameters.
5394 *
5395 * Input:
5396 * ha: adapter state pointer.
5397 * cmd: User space CT arguments pointer.
5398 * mode: flags.
5399 */
5400 static void
5401 ql_set_rnid_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5402 {
5403 EXT_SET_RNID_REQ tmp_set;
5404 EXT_RNID_DATA *tmp_buf;
5405 int rval = 0;
5406
5407 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5408
5409 if (DRIVER_SUSPENDED(ha)) {
5410 EL(ha, "failed, LOOP_NOT_READY\n");
5411 cmd->Status = EXT_STATUS_BUSY;
5412 cmd->ResponseLen = 0;
5413 return;
5414 }
5415
5416 cmd->ResponseLen = 0; /* NO response to caller. */
5417 if (cmd->RequestLen != sizeof (EXT_SET_RNID_REQ)) {
5418 /* parameter error */
5419 EL(ha, "failed, RequestLen < EXT_SET_RNID_REQ, Len=%xh\n",
5420 cmd->RequestLen);
5421 cmd->Status = EXT_STATUS_INVALID_PARAM;
5422 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
5423 cmd->ResponseLen = 0;
5424 return;
5425 }
5426
5427 rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &tmp_set,
5452 cmd->ResponseLen = 0;
5453 return;
5454 }
5455
5456 /* Now set the requested params. */
5457 bcopy(tmp_set.IPVersion, tmp_buf->IPVersion, 2);
5458 bcopy(tmp_set.UDPPortNumber, tmp_buf->UDPPortNumber, 2);
5459 bcopy(tmp_set.IPAddress, tmp_buf->IPAddress, 16);
5460
5461 rval = ql_set_rnid_params(ha, sizeof (EXT_RNID_DATA),
5462 (caddr_t)tmp_buf);
5463 if (rval != QL_SUCCESS) {
5464 /* error */
5465 EL(ha, "failed, set_rnid_params_mbx=%xh\n", rval);
5466 cmd->Status = EXT_STATUS_ERR;
5467 cmd->ResponseLen = 0;
5468 }
5469
5470 kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5471
5472 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5473 }
5474
5475 /*
5476 * ql_get_rnid_parameters
5477 * Get RNID parameters.
5478 *
5479 * Input:
5480 * ha: adapter state pointer.
5481 * cmd: User space CT arguments pointer.
5482 * mode: flags.
5483 */
5484 static void
5485 ql_get_rnid_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5486 {
5487 EXT_RNID_DATA *tmp_buf;
5488 uint32_t rval;
5489
5490 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5491
5492 if (DRIVER_SUSPENDED(ha)) {
5493 EL(ha, "failed, LOOP_NOT_READY\n");
5494 cmd->Status = EXT_STATUS_BUSY;
5495 cmd->ResponseLen = 0;
5496 return;
5497 }
5498
5499 /* Allocate memory for command. */
5500 tmp_buf = kmem_zalloc(sizeof (EXT_RNID_DATA), KM_SLEEP);
5501 if (tmp_buf == NULL) {
5502 EL(ha, "failed, kmem_zalloc\n");
5503 cmd->Status = EXT_STATUS_NO_MEMORY;
5504 cmd->ResponseLen = 0;
5505 return;
5506 }
5507
5508 /* Send command */
5509 rval = ql_get_rnid_params(ha, sizeof (EXT_RNID_DATA),
5510 (caddr_t)tmp_buf);
5511 if (rval != QL_SUCCESS) {
5512 /* error */
5513 EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval);
5514 kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5515 cmd->Status = EXT_STATUS_ERR;
5516 cmd->ResponseLen = 0;
5517 return;
5518 }
5519
5520 /* Copy the response */
5521 if (ql_send_buffer_data((caddr_t)tmp_buf,
5522 (caddr_t)(uintptr_t)cmd->ResponseAdr,
5523 sizeof (EXT_RNID_DATA), mode) != sizeof (EXT_RNID_DATA)) {
5524 EL(ha, "failed, ddi_copyout\n");
5525 cmd->Status = EXT_STATUS_COPY_ERR;
5526 cmd->ResponseLen = 0;
5527 } else {
5528 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5529 cmd->ResponseLen = sizeof (EXT_RNID_DATA);
5530 }
5531
5532 kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5533 }
5534
5535 /*
5536 * ql_reset_statistics
5537 * Performs EXT_SC_RST_STATISTICS subcommand. of EXT_CC_SET_DATA.
5538 *
5539 * Input:
5540 * ha: adapter state pointer.
5541 * cmd: Local EXT_IOCTL cmd struct pointer.
5542 *
5543 * Returns:
5544 * None, request status indicated in cmd->Status.
5545 *
5546 * Context:
5547 * Kernel context.
5548 */
5549 static int
5550 ql_reset_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd)
5551 {
5552 ql_xioctl_t *xp = ha->xioctl;
5553 int rval = 0;
5554
5555 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5556
5557 if (DRIVER_SUSPENDED(ha)) {
5558 EL(ha, "failed, LOOP_NOT_READY\n");
5559 cmd->Status = EXT_STATUS_BUSY;
5560 cmd->ResponseLen = 0;
5561 return (QL_FUNCTION_SUSPENDED);
5562 }
5563
5564 rval = ql_reset_link_status(ha);
5565 if (rval != QL_SUCCESS) {
5566 EL(ha, "failed, reset_link_status_mbx=%xh\n", rval);
5567 cmd->Status = EXT_STATUS_MAILBOX;
5568 cmd->DetailStatus = rval;
5569 cmd->ResponseLen = 0;
5570 }
5571
5572 TASK_DAEMON_LOCK(ha);
5573 xp->IosRequested = 0;
5574 xp->BytesRequested = 0;
5575 xp->IOInputRequests = 0;
5576 xp->IOOutputRequests = 0;
5577 xp->IOControlRequests = 0;
5578 xp->IOInputMByteCnt = 0;
5579 xp->IOOutputMByteCnt = 0;
5580 xp->IOOutputByteCnt = 0;
5581 xp->IOInputByteCnt = 0;
5582 TASK_DAEMON_UNLOCK(ha);
5583
5584 INTR_LOCK(ha);
5585 xp->ControllerErrorCount = 0;
5586 xp->DeviceErrorCount = 0;
5587 xp->TotalLipResets = 0;
5588 xp->TotalInterrupts = 0;
5589 INTR_UNLOCK(ha);
5590
5591 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5592
5593 return (rval);
5594 }
5595
5596 /*
5597 * ql_get_statistics
5598 * Performs EXT_SC_GET_STATISTICS subcommand. of EXT_CC_GET_DATA.
5599 *
5600 * Input:
5601 * ha: adapter state pointer.
5602 * cmd: Local EXT_IOCTL cmd struct pointer.
5603 * mode: flags.
5604 *
5605 * Returns:
5606 * None, request status indicated in cmd->Status.
5607 *
5608 * Context:
5609 * Kernel context.
5610 */
5611 static void
5612 ql_get_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5613 {
5614 EXT_HBA_PORT_STAT ps = {0};
5615 ql_link_stats_t *ls;
5616 int rval;
5617 ql_xioctl_t *xp = ha->xioctl;
5618 int retry = 10;
5619
5620 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5621
5622 while (ha->task_daemon_flags &
5623 (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE | DRIVER_STALL)) {
5624 ql_delay(ha, 10000000); /* 10 second delay */
5625
5626 retry--;
5627
5628 if (retry == 0) { /* effectively 100 seconds */
5629 EL(ha, "failed, LOOP_NOT_READY\n");
5630 cmd->Status = EXT_STATUS_BUSY;
5631 cmd->ResponseLen = 0;
5632 return;
5633 }
5634 }
5635
5636 /* Allocate memory for command. */
5637 ls = kmem_zalloc(sizeof (ql_link_stats_t), KM_SLEEP);
5638 if (ls == NULL) {
5639 EL(ha, "failed, kmem_zalloc\n");
5640 cmd->Status = EXT_STATUS_NO_MEMORY;
5668 ps.LossOfSyncCount = LE_32(ls->sync_loss_cnt);
5669 ps.LossOfSignalsCount = LE_32(ls->signal_loss_cnt);
5670 ps.PrimitiveSeqProtocolErrorCount = LE_32(ls->prot_err_cnt);
5671 ps.InvalidTransmissionWordCount = LE_32(ls->inv_xmit_cnt);
5672 ps.InvalidCRCCount = LE_32(ls->inv_crc_cnt);
5673
5674 rval = ddi_copyout((void *)&ps,
5675 (void *)(uintptr_t)cmd->ResponseAdr,
5676 sizeof (EXT_HBA_PORT_STAT), mode);
5677 if (rval != 0) {
5678 EL(ha, "failed, ddi_copyout\n");
5679 cmd->Status = EXT_STATUS_COPY_ERR;
5680 cmd->ResponseLen = 0;
5681 } else {
5682 cmd->ResponseLen = sizeof (EXT_HBA_PORT_STAT);
5683 }
5684 }
5685
5686 kmem_free(ls, sizeof (ql_link_stats_t));
5687
5688 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5689 }
5690
5691 /*
5692 * ql_get_statistics_fc
5693 * Performs EXT_SC_GET_FC_STATISTICS subcommand. of EXT_CC_GET_DATA.
5694 *
5695 * Input:
5696 * ha: adapter state pointer.
5697 * cmd: Local EXT_IOCTL cmd struct pointer.
5698 * mode: flags.
5699 *
5700 * Returns:
5701 * None, request status indicated in cmd->Status.
5702 *
5703 * Context:
5704 * Kernel context.
5705 */
5706 static void
5707 ql_get_statistics_fc(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5708 {
5709 EXT_HBA_PORT_STAT ps = {0};
5710 ql_link_stats_t *ls;
5711 int rval;
5712 uint16_t qlnt;
5713 EXT_DEST_ADDR pextdestaddr;
5714 uint8_t *name;
5715 ql_tgt_t *tq = NULL;
5716 int retry = 10;
5717
5718 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5719
5720 if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
5721 (void *)&pextdestaddr, sizeof (EXT_DEST_ADDR), mode) != 0) {
5722 EL(ha, "failed, ddi_copyin\n");
5723 cmd->Status = EXT_STATUS_COPY_ERR;
5724 cmd->ResponseLen = 0;
5725 return;
5726 }
5727
5728 qlnt = QLNT_PORT;
5729 name = pextdestaddr.DestAddr.WWPN;
5730
5731 QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
5732 ha->instance, name[0], name[1], name[2], name[3], name[4],
5733 name[5], name[6], name[7]);
5734
5735 tq = ql_find_port(ha, name, qlnt);
5736
5737 if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
5738 EL(ha, "failed, fc_port not found\n");
5739 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
5740 cmd->ResponseLen = 0;
5741 return;
5742 }
5743
5744 while (ha->task_daemon_flags &
5745 (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE | DRIVER_STALL)) {
5746 ql_delay(ha, 10000000); /* 10 second delay */
5747
5748 retry--;
5749
5750 if (retry == 0) { /* effectively 100 seconds */
5751 EL(ha, "failed, LOOP_NOT_READY\n");
5778 ps.LossOfSignalsCount = LE_32(ls->signal_loss_cnt);
5779 ps.PrimitiveSeqProtocolErrorCount = LE_32(ls->prot_err_cnt);
5780 ps.InvalidTransmissionWordCount = LE_32(ls->inv_xmit_cnt);
5781 ps.InvalidCRCCount = LE_32(ls->inv_crc_cnt);
5782
5783 rval = ddi_copyout((void *)&ps,
5784 (void *)(uintptr_t)cmd->ResponseAdr,
5785 sizeof (EXT_HBA_PORT_STAT), mode);
5786
5787 if (rval != 0) {
5788 EL(ha, "failed, ddi_copyout\n");
5789 cmd->Status = EXT_STATUS_COPY_ERR;
5790 cmd->ResponseLen = 0;
5791 } else {
5792 cmd->ResponseLen = sizeof (EXT_HBA_PORT_STAT);
5793 }
5794 }
5795
5796 kmem_free(ls, sizeof (ql_link_stats_t));
5797
5798 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5799 }
5800
5801 /*
5802 * ql_get_statistics_fc4
5803 * Performs EXT_SC_GET_FC_STATISTICS subcommand. of EXT_CC_GET_DATA.
5804 *
5805 * Input:
5806 * ha: adapter state pointer.
5807 * cmd: Local EXT_IOCTL cmd struct pointer.
5808 * mode: flags.
5809 *
5810 * Returns:
5811 * None, request status indicated in cmd->Status.
5812 *
5813 * Context:
5814 * Kernel context.
5815 */
5816 static void
5817 ql_get_statistics_fc4(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5818 {
5819 uint32_t rval;
5820 EXT_HBA_FC4STATISTICS fc4stats = {0};
5821 ql_xioctl_t *xp = ha->xioctl;
5822
5823 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5824
5825 fc4stats.InputRequests = xp->IOInputRequests;
5826 fc4stats.OutputRequests = xp->IOOutputRequests;
5827 fc4stats.ControlRequests = xp->IOControlRequests;
5828 fc4stats.InputMegabytes = xp->IOInputMByteCnt;
5829 fc4stats.OutputMegabytes = xp->IOOutputMByteCnt;
5830
5831 rval = ddi_copyout((void *)&fc4stats,
5832 (void *)(uintptr_t)cmd->ResponseAdr,
5833 sizeof (EXT_HBA_FC4STATISTICS), mode);
5834
5835 if (rval != 0) {
5836 EL(ha, "failed, ddi_copyout\n");
5837 cmd->Status = EXT_STATUS_COPY_ERR;
5838 cmd->ResponseLen = 0;
5839 } else {
5840 cmd->ResponseLen = sizeof (EXT_HBA_FC4STATISTICS);
5841 }
5842
5843 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5844 }
5845
5846 /*
5847 * ql_set_led_state
5848 * Performs EXT_SET_BEACON_STATE subcommand of EXT_CC_SET_DATA.
5849 *
5850 * Input:
5851 * ha: adapter state pointer.
5852 * cmd: Local EXT_IOCTL cmd struct pointer.
5853 * mode: flags.
5854 *
5855 * Returns:
5856 * None, request status indicated in cmd->Status.
5857 *
5858 * Context:
5859 * Kernel context.
5860 */
5861 static void
5862 ql_set_led_state(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5863 {
5864 EXT_BEACON_CONTROL bstate;
5865 uint32_t rval;
5866 ql_xioctl_t *xp = ha->xioctl;
5867
5868 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5869
5870 if (cmd->RequestLen < sizeof (EXT_BEACON_CONTROL)) {
5871 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
5872 cmd->DetailStatus = sizeof (EXT_BEACON_CONTROL);
5873 EL(ha, "done - failed, RequestLen < EXT_BEACON_CONTROL,"
5874 " Len=%xh\n", cmd->RequestLen);
5875 cmd->ResponseLen = 0;
5876 return;
5877 }
5878
5879 if (ha->device_id < 0x2300) {
5880 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
5881 cmd->DetailStatus = 0;
5882 EL(ha, "done - failed, Invalid function for HBA model\n");
5883 cmd->ResponseLen = 0;
5884 return;
5885 }
5886
5887 rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &bstate,
5888 cmd->RequestLen, mode);
5889
5890 if (rval != 0) {
5891 cmd->Status = EXT_STATUS_COPY_ERR;
5892 EL(ha, "done - failed, ddi_copyin\n");
5893 return;
5894 }
5895
5896 switch (bstate.State) {
5897 case EXT_DEF_GRN_BLINK_OFF: /* turn beacon off */
5898 if (xp->ledstate.BeaconState == BEACON_OFF) {
5899 /* not quite an error -- LED state is already off */
5900 cmd->Status = EXT_STATUS_OK;
5901 EL(ha, "LED off request -- LED is already off\n");
5902 break;
5903 }
5904
5905 xp->ledstate.BeaconState = BEACON_OFF;
5906 xp->ledstate.LEDflags = LED_ALL_OFF;
5907
5908 if ((rval = ql_wrapup_led(ha)) != QL_SUCCESS) {
5909 cmd->Status = EXT_STATUS_MAILBOX;
5910 } else {
5911 cmd->Status = EXT_STATUS_OK;
5912 }
5913 break;
5914
5915 case EXT_DEF_GRN_BLINK_ON: /* turn beacon on */
5916 if (xp->ledstate.BeaconState == BEACON_ON) {
5917 /* not quite an error -- LED state is already on */
5918 cmd->Status = EXT_STATUS_OK;
5919 EL(ha, "LED on request - LED is already on\n");
5920 break;
5921 }
5922
5923 if ((rval = ql_setup_led(ha)) != QL_SUCCESS) {
5924 cmd->Status = EXT_STATUS_MAILBOX;
5925 break;
5926 }
5927
5928 if (CFG_IST(ha, CFG_CTRL_24258081)) {
5929 xp->ledstate.LEDflags = LED_YELLOW_24 | LED_AMBER_24;
5930 } else {
5931 xp->ledstate.LEDflags = LED_GREEN;
5932 }
5933 xp->ledstate.BeaconState = BEACON_ON;
5934
5935 cmd->Status = EXT_STATUS_OK;
5936 break;
5937 default:
5938 cmd->Status = EXT_STATUS_ERR;
5939 EL(ha, "failed, unknown state request %xh\n", bstate.State);
5940 break;
5941 }
5942
5943 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
5944 }
5945
5946 /*
5947 * ql_get_led_state
5948 * Performs EXT_GET_BEACON_STATE subcommand of EXT_CC_GET_DATA.
5949 *
5950 * Input:
5951 * ha: adapter state pointer.
5952 * cmd: Local EXT_IOCTL cmd struct pointer.
5953 * mode: flags.
5954 *
5955 * Returns:
5956 * None, request status indicated in cmd->Status.
5957 *
5958 * Context:
5959 * Kernel context.
5960 */
5961 static void
5962 ql_get_led_state(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5963 {
5964 EXT_BEACON_CONTROL bstate = {0};
5965 uint32_t rval;
5966 ql_xioctl_t *xp = ha->xioctl;
5967
5968 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
5969
5970 if (cmd->ResponseLen < sizeof (EXT_BEACON_CONTROL)) {
5971 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
5972 cmd->DetailStatus = sizeof (EXT_BEACON_CONTROL);
5973 EL(ha, "done - failed, ResponseLen < EXT_BEACON_CONTROL,"
5974 "Len=%xh\n", cmd->ResponseLen);
5975 cmd->ResponseLen = 0;
5976 return;
5977 }
5978
5979 if (ha->device_id < 0x2300) {
5980 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
5981 cmd->DetailStatus = 0;
5982 EL(ha, "done - failed, Invalid function for HBA model\n");
5983 cmd->ResponseLen = 0;
5984 return;
5985 }
5986
5987 if (ha->task_daemon_flags & ABORT_ISP_ACTIVE) {
5988 cmd->Status = EXT_STATUS_BUSY;
5989 EL(ha, "done - failed, isp abort active\n");
5990 cmd->ResponseLen = 0;
5991 return;
5992 }
5993
5994 /* inform the user of the current beacon state (off or on) */
5995 bstate.State = xp->ledstate.BeaconState;
5996
5997 rval = ddi_copyout((void *)&bstate,
5998 (void *)(uintptr_t)cmd->ResponseAdr,
5999 sizeof (EXT_BEACON_CONTROL), mode);
6000
6001 if (rval != 0) {
6002 EL(ha, "failed, ddi_copyout\n");
6003 cmd->Status = EXT_STATUS_COPY_ERR;
6004 cmd->ResponseLen = 0;
6005 } else {
6006 cmd->Status = EXT_STATUS_OK;
6007 cmd->ResponseLen = sizeof (EXT_BEACON_CONTROL);
6008 }
6009
6010 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6011 }
6012
6013 /*
6014 * ql_blink_led
6015 * Determine the next state of the LED and drive it
6016 *
6017 * Input:
6018 * ha: adapter state pointer.
6019 *
6020 * Context:
6021 * Interrupt context.
6022 */
6023 void
6024 ql_blink_led(ql_adapter_state_t *ha)
6025 {
6026 uint32_t nextstate;
6027 ql_xioctl_t *xp = ha->xioctl;
6028
6029 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6030
6031 if (xp->ledstate.BeaconState == BEACON_ON) {
6032 /* determine the next led state */
6033 if (CFG_IST(ha, CFG_CTRL_24258081)) {
6034 nextstate = (xp->ledstate.LEDflags) &
6035 (~(RD32_IO_REG(ha, gpiod)));
6036 } else {
6037 nextstate = (xp->ledstate.LEDflags) &
6038 (~(RD16_IO_REG(ha, gpiod)));
6039 }
6040
6041 /* turn the led on or off */
6042 ql_drive_led(ha, nextstate);
6043 }
6044
6045 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6046 }
6047
6048 /*
6049 * ql_drive_led
6050 * drive the led's as determined by LEDflags
6051 *
6052 * Input:
6053 * ha: adapter state pointer.
6054 * LEDflags: LED flags
6055 *
6056 * Context:
6057 * Kernel/Interrupt context.
6058 */
6059 static void
6060 ql_drive_led(ql_adapter_state_t *ha, uint32_t LEDflags)
6061 {
6062
6063 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6064
6065 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
6066
6067 uint16_t gpio_enable, gpio_data;
6068
6069 /* setup to send new data */
6070 gpio_enable = (uint16_t)RD16_IO_REG(ha, gpioe);
6071 gpio_enable = (uint16_t)(gpio_enable | LED_MASK);
6072 WRT16_IO_REG(ha, gpioe, gpio_enable);
6073
6074 /* read current data and clear out old led data */
6075 gpio_data = (uint16_t)RD16_IO_REG(ha, gpiod);
6076 gpio_data = (uint16_t)(gpio_data & ~LED_MASK);
6077
6078 /* set in the new led data. */
6079 gpio_data = (uint16_t)(gpio_data | LEDflags);
6080
6081 /* write out the new led data */
6082 WRT16_IO_REG(ha, gpiod, gpio_data);
6083
6084 } else if (CFG_IST(ha, CFG_CTRL_24258081)) {
6085
6086 uint32_t gpio_data;
6087
6088 /* setup to send new data */
6089 gpio_data = RD32_IO_REG(ha, gpiod);
6090 gpio_data |= LED_MASK_UPDATE_24;
6091 WRT32_IO_REG(ha, gpiod, gpio_data);
6092
6093 /* read current data and clear out old led data */
6094 gpio_data = RD32_IO_REG(ha, gpiod);
6095 gpio_data &= ~LED_MASK_COLORS_24;
6096
6097 /* set in the new led data */
6098 gpio_data |= LEDflags;
6099
6100 /* write out the new led data */
6101 WRT32_IO_REG(ha, gpiod, gpio_data);
6102
6103 } else {
6104 EL(ha, "unsupported HBA: %xh", ha->device_id);
6105 }
6106
6107 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6108 }
6109
6110 /*
6111 * ql_setup_led
6112 * Setup LED for driver control
6113 *
6114 * Input:
6115 * ha: adapter state pointer.
6116 *
6117 * Context:
6118 * Kernel/Interrupt context.
6119 */
6120 static uint32_t
6121 ql_setup_led(ql_adapter_state_t *ha)
6122 {
6123 uint32_t rval;
6124 ql_mbx_data_t mr;
6125
6126 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6127
6128 /* decouple the LED control from the fw */
6129 rval = ql_get_firmware_option(ha, &mr);
6130 if (rval != QL_SUCCESS) {
6131 EL(ha, "failed, get_firmware_option=%xh\n", rval);
6132 return (rval);
6133 }
6134
6135 /* set the appropriate options */
6136 mr.mb[1] = (uint16_t)(mr.mb[1] | FO1_DISABLE_GPIO);
6137
6138 /* send it back to the firmware */
6139 rval = ql_set_firmware_option(ha, &mr);
6140 if (rval != QL_SUCCESS) {
6141 EL(ha, "failed, set_firmware_option=%xh\n", rval);
6142 return (rval);
6143 }
6144
6145 /* initally, turn the LED's off */
6146 ql_drive_led(ha, LED_ALL_OFF);
6147
6148 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6149
6150 return (rval);
6151 }
6152
6153 /*
6154 * ql_wrapup_led
6155 * Return LED control to the firmware
6156 *
6157 * Input:
6158 * ha: adapter state pointer.
6159 *
6160 * Context:
6161 * Kernel/Interrupt context.
6162 */
6163 static uint32_t
6164 ql_wrapup_led(ql_adapter_state_t *ha)
6165 {
6166 uint32_t rval;
6167 ql_mbx_data_t mr;
6168
6169 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6170
6171 /* Turn all LED's off */
6172 ql_drive_led(ha, LED_ALL_OFF);
6173
6174 if (CFG_IST(ha, CFG_CTRL_24258081)) {
6175
6176 uint32_t gpio_data;
6177
6178 /* disable the LED update mask */
6179 gpio_data = RD32_IO_REG(ha, gpiod);
6180 gpio_data &= ~LED_MASK_UPDATE_24;
6181
6182 /* write out the data */
6183 WRT32_IO_REG(ha, gpiod, gpio_data);
6184 }
6185
6186 /* give LED control back to the f/w */
6187 rval = ql_get_firmware_option(ha, &mr);
6188 if (rval != QL_SUCCESS) {
6189 EL(ha, "failed, get_firmware_option=%xh\n", rval);
6190 return (rval);
6191 }
6192
6193 mr.mb[1] = (uint16_t)(mr.mb[1] & ~FO1_DISABLE_GPIO);
6194
6195 rval = ql_set_firmware_option(ha, &mr);
6196 if (rval != QL_SUCCESS) {
6197 EL(ha, "failed, set_firmware_option=%xh\n", rval);
6198 return (rval);
6199 }
6200
6201 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6202
6203 return (rval);
6204 }
6205
6206 /*
6207 * ql_get_port_summary
6208 * Performs EXT_SC_GET_PORT_SUMMARY subcommand. of EXT_CC_GET_DATA.
6209 *
6210 * The EXT_IOCTL->RequestAdr points to a single
6211 * UINT32 which identifies the device type.
6212 *
6213 * Input:
6214 * ha: adapter state pointer.
6215 * cmd: Local EXT_IOCTL cmd struct pointer.
6216 * mode: flags.
6217 *
6218 * Returns:
6219 * None, request status indicated in cmd->Status.
6220 *
6221 * Context:
6222 * Kernel context.
6223 */
6224 static void
6225 ql_get_port_summary(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
6226 {
6227 EXT_DEVICEDATA dd = {0};
6228 EXT_DEVICEDATA *uddp;
6229 ql_link_t *link;
6230 ql_tgt_t *tq;
6231 uint32_t rlen, dev_type, index;
6232 int rval = 0;
6233 EXT_DEVICEDATAENTRY *uddep, *ddep;
6234
6235 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6236
6237 ddep = &dd.EntryList[0];
6238
6239 /*
6240 * Get the type of device the requestor is looking for.
6241 *
6242 * We ignore this for now.
6243 */
6244 rval = ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
6245 (void *)&dev_type, sizeof (dev_type), mode);
6246 if (rval != 0) {
6247 cmd->Status = EXT_STATUS_COPY_ERR;
6248 cmd->ResponseLen = 0;
6249 EL(ha, "failed, ddi_copyin\n");
6250 return;
6251 }
6252 /*
6253 * Count the number of entries to be returned. Count devices
6254 * that are offlline, but have been persistently bound.
6255 */
6274 } else {
6275 rlen = (uint32_t)(sizeof (EXT_DEVICEDATA) +
6276 (sizeof (EXT_DEVICEDATAENTRY) * (dd.TotalDevices - 1)));
6277 }
6278 if (rlen > cmd->ResponseLen) {
6279 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
6280 cmd->DetailStatus = rlen;
6281 EL(ha, "failed, rlen > ResponseLen, rlen=%d, Len=%d\n",
6282 rlen, cmd->ResponseLen);
6283 cmd->ResponseLen = 0;
6284 return;
6285 }
6286 cmd->ResponseLen = 0;
6287 uddp = (EXT_DEVICEDATA *)(uintptr_t)cmd->ResponseAdr;
6288 uddep = &uddp->EntryList[0];
6289 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
6290 for (link = ha->dev[index].first; link != NULL;
6291 link = link->next) {
6292 tq = link->base_address;
6293 if (tq->flags & TQF_INITIATOR_DEVICE ||
6294 !VALID_TARGET_ID(ha, tq->loop_id)) {
6295 continue; /* Skip this one */
6296 }
6297
6298 bzero((void *)ddep, sizeof (EXT_DEVICEDATAENTRY));
6299
6300 bcopy(tq->node_name, ddep->NodeWWN, 8);
6301 bcopy(tq->port_name, ddep->PortWWN, 8);
6302
6303 ddep->PortID[0] = tq->d_id.b.domain;
6304 ddep->PortID[1] = tq->d_id.b.area;
6305 ddep->PortID[2] = tq->d_id.b.al_pa;
6306
6307 bcopy(tq->port_name,
6308 (caddr_t)&ddep->TargetAddress.Target, 8);
6309
6310 ddep->DeviceFlags = tq->flags;
6311 ddep->LoopID = tq->loop_id;
6312 QL_PRINT_9(CE_CONT, "(%d): Tgt=%lld, loop=%xh, "
6313 "wwnn=%02x%02x%02x%02x%02x%02x%02x%02x, "
6314 "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
6315 ha->instance, ddep->TargetAddress.Target,
6316 ddep->LoopID, ddep->NodeWWN[0], ddep->NodeWWN[1],
6317 ddep->NodeWWN[2], ddep->NodeWWN[3],
6318 ddep->NodeWWN[4], ddep->NodeWWN[5],
6319 ddep->NodeWWN[6], ddep->NodeWWN[7],
6320 ddep->PortWWN[0], ddep->PortWWN[1],
6321 ddep->PortWWN[2], ddep->PortWWN[3],
6322 ddep->PortWWN[4], ddep->PortWWN[5],
6323 ddep->PortWWN[6], ddep->PortWWN[7]);
6324 rval = ddi_copyout((void *)ddep, (void *)uddep,
6325 sizeof (EXT_DEVICEDATAENTRY), mode);
6326
6327 if (rval != 0) {
6328 cmd->Status = EXT_STATUS_COPY_ERR;
6329 cmd->ResponseLen = 0;
6330 EL(ha, "failed, ddi_copyout\n");
6331 break;
6332 }
6333 dd.ReturnListEntryCount++;
6334 uddep++;
6335 cmd->ResponseLen += (uint32_t)
6336 sizeof (EXT_DEVICEDATAENTRY);
6337 }
6338 }
6339 rval = ddi_copyout((void *)&dd, (void *)uddp,
6340 sizeof (EXT_DEVICEDATA) - sizeof (EXT_DEVICEDATAENTRY), mode);
6341
6342 if (rval != 0) {
6343 cmd->Status = EXT_STATUS_COPY_ERR;
6344 cmd->ResponseLen = 0;
6345 EL(ha, "failed, ddi_copyout-2\n");
6346 } else {
6347 cmd->ResponseLen += (uint32_t)sizeof (EXT_DEVICEDATAENTRY);
6348 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6349 }
6350 }
6351
6352 /*
6353 * ql_get_target_id
6354 * Performs EXT_SC_GET_TARGET_ID subcommand. of EXT_CC_GET_DATA.
6355 *
6356 * Input:
6357 * ha: adapter state pointer.
6358 * cmd: Local EXT_IOCTL cmd struct pointer.
6359 * mode: flags.
6360 *
6361 * Returns:
6362 * None, request status indicated in cmd->Status.
6363 *
6364 * Context:
6365 * Kernel context.
6366 */
6367 static void
6368 ql_get_target_id(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
6369 {
6370 uint32_t rval;
6371 uint16_t qlnt;
6372 EXT_DEST_ADDR extdestaddr = {0};
6373 uint8_t *name;
6374 uint8_t wwpn[EXT_DEF_WWN_NAME_SIZE];
6375 ql_tgt_t *tq;
6376
6377 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6378
6379 if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
6380 (void*)wwpn, sizeof (EXT_DEST_ADDR), mode) != 0) {
6381 EL(ha, "failed, ddi_copyin\n");
6382 cmd->Status = EXT_STATUS_COPY_ERR;
6383 cmd->ResponseLen = 0;
6384 return;
6385 }
6386
6387 qlnt = QLNT_PORT;
6388 name = wwpn;
6389 QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
6390 ha->instance, name[0], name[1], name[2], name[3], name[4],
6391 name[5], name[6], name[7]);
6392
6393 tq = ql_find_port(ha, name, qlnt);
6394 if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
6395 EL(ha, "failed, fc_port not found\n");
6396 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
6397 cmd->ResponseLen = 0;
6398 return;
6399 }
6400
6401 bcopy(tq->port_name, (caddr_t)&extdestaddr.DestAddr.ScsiAddr.Target, 8);
6402
6403 rval = ddi_copyout((void *)&extdestaddr,
6404 (void *)(uintptr_t)cmd->ResponseAdr, sizeof (EXT_DEST_ADDR), mode);
6405 if (rval != 0) {
6406 EL(ha, "failed, ddi_copyout\n");
6407 cmd->Status = EXT_STATUS_COPY_ERR;
6408 cmd->ResponseLen = 0;
6409 }
6410
6411 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6412 }
6413
6414 /*
6415 * ql_setup_fcache
6416 * Populates selected flash sections into the cache
6417 *
6418 * Input:
6419 * ha = adapter state pointer.
6420 *
6421 * Returns:
6422 * ql local function return status code.
6423 *
6424 * Context:
6425 * Kernel context.
6426 *
6427 * Note:
6428 * Driver must be in stalled state prior to entering or
6429 * add code to this function prior to calling ql_setup_flash()
6430 */
6431 int
6432 ql_setup_fcache(ql_adapter_state_t *ha)
6433 {
6434 int rval;
6435 uint32_t freadpos = 0;
6436 uint32_t fw_done = 0;
6437 ql_fcache_t *head = NULL;
6438 ql_fcache_t *tail = NULL;
6439 ql_fcache_t *ftmp;
6440
6441 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6442
6443 CACHE_LOCK(ha);
6444
6445 /* If we already have populated it, rtn */
6446 if (ha->fcache != NULL) {
6447 CACHE_UNLOCK(ha);
6448 EL(ha, "buffer already populated\n");
6449 return (QL_SUCCESS);
6450 }
6451
6452 ql_flash_nvram_defaults(ha);
6453
6454 if ((rval = ql_setup_flash(ha)) != QL_SUCCESS) {
6455 CACHE_UNLOCK(ha);
6456 EL(ha, "unable to setup flash; rval=%xh\n", rval);
6457 return (rval);
6458 }
6459
6460 while (freadpos != 0xffffffff) {
6461 /* Allocate & populate this node */
6462 if ((ftmp = ql_setup_fnode(ha)) == NULL) {
6463 EL(ha, "node alloc failed\n");
6464 rval = QL_FUNCTION_FAILED;
6465 break;
6466 }
6467
6468 /* link in the new node */
6469 if (head == NULL) {
6470 head = tail = ftmp;
6471 } else {
6472 tail->next = ftmp;
6473 tail = ftmp;
6474 }
6475
6476 /* Do the firmware node first for 24xx/25xx's */
6477 if (fw_done == 0) {
6478 if (CFG_IST(ha, CFG_CTRL_24258081)) {
6479 freadpos = ha->flash_fw_addr << 2;
6480 }
6481 fw_done = 1;
6482 }
6483
6484 if ((rval = ql_dump_fcode(ha, ftmp->buf, FBUFSIZE,
6485 freadpos)) != QL_SUCCESS) {
6486 EL(ha, "failed, 24xx dump_fcode"
6487 " pos=%xh rval=%xh\n", freadpos, rval);
6488 rval = QL_FUNCTION_FAILED;
6489 break;
6490 }
6491
6492 /* checkout the pci data / format */
6493 if (ql_check_pci(ha, ftmp, &freadpos)) {
6494 EL(ha, "flash header incorrect\n");
6495 rval = QL_FUNCTION_FAILED;
6496 break;
6497 }
6498 }
6499
6500 if (rval != QL_SUCCESS) {
6501 /* release all resources we have */
6502 ftmp = head;
6503 while (ftmp != NULL) {
6504 tail = ftmp->next;
6505 kmem_free(ftmp->buf, FBUFSIZE);
6506 kmem_free(ftmp, sizeof (ql_fcache_t));
6507 ftmp = tail;
6508 }
6509
6510 EL(ha, "failed, done\n");
6511 } else {
6512 ha->fcache = head;
6513 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6514 }
6515 CACHE_UNLOCK(ha);
6516
6517 return (rval);
6518 }
6519
6520 /*
6521 * ql_update_fcache
6522 * re-populates updated flash into the fcache. If
6523 * fcache does not exist (e.g., flash was empty/invalid on
6524 * boot), this routine will create and the populate it.
6525 *
6526 * Input:
6527 * ha = adapter state pointer.
6528 * *bpf = Pointer to flash buffer.
6529 * bsize = Size of flash buffer.
6530 *
6531 * Returns:
6532 *
6533 * Context:
6534 * Kernel context.
6535 */
6536 void
6537 ql_update_fcache(ql_adapter_state_t *ha, uint8_t *bfp, uint32_t bsize)
6538 {
6539 int rval = QL_SUCCESS;
6540 uint32_t freadpos = 0;
6541 uint32_t fw_done = 0;
6542 ql_fcache_t *head = NULL;
6543 ql_fcache_t *tail = NULL;
6544 ql_fcache_t *ftmp;
6545
6546 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6547
6548 while (freadpos != 0xffffffff) {
6549
6550 /* Allocate & populate this node */
6551
6552 if ((ftmp = ql_setup_fnode(ha)) == NULL) {
6553 EL(ha, "node alloc failed\n");
6554 rval = QL_FUNCTION_FAILED;
6555 break;
6556 }
6557
6558 /* link in the new node */
6559 if (head == NULL) {
6560 head = tail = ftmp;
6561 } else {
6562 tail->next = ftmp;
6563 tail = ftmp;
6564 }
6565
6566 /* Do the firmware node first for 24xx's */
6567 if (fw_done == 0) {
6568 if (CFG_IST(ha, CFG_CTRL_24258081)) {
6569 freadpos = ha->flash_fw_addr << 2;
6570 }
6571 fw_done = 1;
6572 }
6573
6574 /* read in first FBUFSIZE bytes of this flash section */
6575 if (freadpos+FBUFSIZE > bsize) {
6576 EL(ha, "passed buffer too small; fr=%xh, bsize=%xh\n",
6577 freadpos, bsize);
6578 rval = QL_FUNCTION_FAILED;
6579 break;
6580 }
6581 bcopy(bfp+freadpos, ftmp->buf, FBUFSIZE);
6582
6583 /* checkout the pci data / format */
6584 if (ql_check_pci(ha, ftmp, &freadpos)) {
6585 EL(ha, "flash header incorrect\n");
6586 rval = QL_FUNCTION_FAILED;
6587 break;
6588 }
6589 }
6590
6591 if (rval != QL_SUCCESS) {
6592 /*
6593 * release all resources we have
6594 */
6595 ql_fcache_rel(head);
6596 EL(ha, "failed, done\n");
6597 } else {
6598 /*
6599 * Release previous fcache resources and update with new
6600 */
6601 CACHE_LOCK(ha);
6602 ql_fcache_rel(ha->fcache);
6603 ha->fcache = head;
6604 CACHE_UNLOCK(ha);
6605
6606 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6607 }
6608 }
6609
6610 /*
6611 * ql_setup_fnode
6612 * Allocates fcache node
6613 *
6614 * Input:
6615 * ha = adapter state pointer.
6616 * node = point to allocated fcache node (NULL = failed)
6617 *
6618 * Returns:
6619 *
6620 * Context:
6621 * Kernel context.
6622 *
6623 * Note:
6624 * Driver must be in stalled state prior to entering or
6625 * add code to this function prior to calling ql_setup_flash()
6626 */
6674 }
6675 }
6676
6677 /*
6678 * ql_update_flash_caches
6679 * Updates driver flash caches
6680 *
6681 * Input:
6682 * ha: adapter state pointer.
6683 *
6684 * Context:
6685 * Kernel context.
6686 */
6687 static void
6688 ql_update_flash_caches(ql_adapter_state_t *ha)
6689 {
6690 uint32_t len;
6691 ql_link_t *link;
6692 ql_adapter_state_t *ha2;
6693
6694 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6695
6696 /* Get base path length. */
6697 for (len = (uint32_t)strlen(ha->devpath); len; len--) {
6698 if (ha->devpath[len] == ',' ||
6699 ha->devpath[len] == '@') {
6700 break;
6701 }
6702 }
6703
6704 /* Reset fcache on all adapter instances. */
6705 for (link = ql_hba.first; link != NULL; link = link->next) {
6706 ha2 = link->base_address;
6707
6708 if (strncmp(ha->devpath, ha2->devpath, len) != 0) {
6709 continue;
6710 }
6711
6712 CACHE_LOCK(ha2);
6713 ql_fcache_rel(ha2->fcache);
6714 ha2->fcache = NULL;
6715
6716 if (CFG_IST(ha, CFG_CTRL_24258081)) {
6717 if (ha2->vcache != NULL) {
6718 kmem_free(ha2->vcache, QL_24XX_VPD_SIZE);
6719 ha2->vcache = NULL;
6720 }
6721 }
6722 CACHE_UNLOCK(ha2);
6723
6724 (void) ql_setup_fcache(ha2);
6725 }
6726
6727 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6728 }
6729
6730 /*
6731 * ql_get_fbuf
6732 * Search the fcache list for the type specified
6733 *
6734 * Input:
6735 * fptr = Pointer to fcache linked list
6736 * ftype = Type of image to be returned.
6737 *
6738 * Returns:
6739 * Pointer to ql_fcache_t.
6740 * NULL means not found.
6741 *
6742 * Context:
6743 * Kernel context.
6744 *
6745 *
6746 */
6747 ql_fcache_t *
6767 * the flash does not have one (!!!).
6768 *
6769 * On successful pci check, nextpos adjusted to next pci header.
6770 *
6771 * Returns:
6772 * -1 --> last pci image
6773 * 0 --> pci header valid
6774 * 1 --> pci header invalid.
6775 *
6776 * Context:
6777 * Kernel context.
6778 */
6779 static int
6780 ql_check_pci(ql_adapter_state_t *ha, ql_fcache_t *fcache, uint32_t *nextpos)
6781 {
6782 pci_header_t *pcih;
6783 pci_data_t *pcid;
6784 uint32_t doff;
6785 uint8_t *pciinfo;
6786
6787 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6788
6789 if (fcache != NULL) {
6790 pciinfo = fcache->buf;
6791 } else {
6792 EL(ha, "failed, null fcache ptr passed\n");
6793 return (1);
6794 }
6795
6796 if (pciinfo == NULL) {
6797 EL(ha, "failed, null pciinfo ptr passed\n");
6798 return (1);
6799 }
6800
6801 if (CFG_IST(ha, CFG_SBUS_CARD)) {
6802 caddr_t bufp;
6803 uint_t len;
6804
6805 if (pciinfo[0] != SBUS_CODE_FCODE) {
6806 EL(ha, "failed, unable to detect sbus fcode\n");
6807 return (1);
6808 }
6809 fcache->type = FTYPE_FCODE;
6810
6811 /*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
6812 if (ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
6813 PROP_LEN_AND_VAL_ALLOC | DDI_PROP_DONTPASS |
6814 DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
6815 (int *)&len) == DDI_PROP_SUCCESS) {
6816
6817 (void) snprintf(fcache->verstr,
6818 FCHBA_OPTION_ROM_VERSION_LEN, "%s", bufp);
6819 kmem_free(bufp, len);
6820 }
6821
6822 *nextpos = 0xffffffff;
6823
6824 QL_PRINT_9(CE_CONT, "(%d): CFG_SBUS_CARD, done\n",
6825 ha->instance);
6826
6827 return (0);
6828 }
6829
6830 if (*nextpos == ha->flash_fw_addr << 2) {
6831
6832 pci_header_t fwh = {0};
6833 pci_data_t fwd = {0};
6834 uint8_t *buf, *bufp;
6835
6836 /*
6837 * Build a pci header for the firmware module
6838 */
6839 if ((buf = (uint8_t *)(kmem_zalloc(FBUFSIZE, KM_SLEEP))) ==
6840 NULL) {
6841 EL(ha, "failed, unable to allocate buffer\n");
6842 return (1);
6843 }
6844
6845 fwh.signature[0] = PCI_HEADER0;
6854 fwd.codetype = PCI_CODE_FW;
6855 fwd.pcidatalen[0] = LSB(sizeof (pci_data_t));
6856 fwd.pcidatalen[1] = MSB(sizeof (pci_data_t));
6857
6858 bufp = buf;
6859 bcopy(&fwh, bufp, sizeof (pci_header_t));
6860 bufp += sizeof (pci_header_t);
6861 bcopy(&fwd, bufp, sizeof (pci_data_t));
6862 bufp += sizeof (pci_data_t);
6863
6864 bcopy(fcache->buf, bufp, (FBUFSIZE - sizeof (pci_header_t) -
6865 sizeof (pci_data_t)));
6866 bcopy(buf, fcache->buf, FBUFSIZE);
6867
6868 fcache->type = FTYPE_FW;
6869
6870 (void) snprintf(fcache->verstr, FCHBA_OPTION_ROM_VERSION_LEN,
6871 "%d.%02d.%02d", fcache->buf[19], fcache->buf[23],
6872 fcache->buf[27]);
6873
6874 if (CFG_IST(ha, CFG_CTRL_81XX)) {
6875 *nextpos = 0x200000;
6876 } else if (CFG_IST(ha, CFG_CTRL_8021)) {
6877 *nextpos = 0x80000;
6878 } else {
6879 *nextpos = 0;
6880 }
6881 kmem_free(buf, FBUFSIZE);
6882
6883 QL_PRINT_9(CE_CONT, "(%d): FTYPE_FW, done\n", ha->instance);
6884
6885 return (0);
6886 }
6887
6888 /* get to the pci header image length */
6889 pcih = (pci_header_t *)pciinfo;
6890
6891 doff = pcih->dataoffset[0] | (pcih->dataoffset[1] << 8);
6892
6893 /* some header section sanity check */
6894 if (pcih->signature[0] != PCI_HEADER0 ||
6895 pcih->signature[1] != PCI_HEADER1 || doff > 50) {
6896 EL(ha, "buffer format error: s0=%xh, s1=%xh, off=%xh\n",
6897 pcih->signature[0], pcih->signature[1], doff);
6898 return (1);
6899 }
6900
6901 pcid = (pci_data_t *)(pciinfo + doff);
6902
6903 /* a slight sanity data section check */
6904 if (pcid->signature[0] != 'P' || pcid->signature[1] != 'C' ||
6905 pcid->signature[2] != 'I' || pcid->signature[3] != 'R') {
6906 EL(ha, "failed, data sig mismatch!\n");
6907 return (1);
6908 }
6909
6910 if (pcid->indicator == PCI_IND_LAST_IMAGE) {
6911 QL_PRINT_9(CE_CONT, "(%d): last image\n", ha->instance);
6912 if (CFG_IST(ha, CFG_CTRL_24258081)) {
6913 ql_flash_layout_table(ha, *nextpos +
6914 (pcid->imagelength[0] | (pcid->imagelength[1] <<
6915 8)) * PCI_SECTOR_SIZE);
6916 (void) ql_24xx_flash_desc(ha);
6917 }
6918 *nextpos = 0xffffffff;
6919 } else {
6920 /* adjust the next flash read start position */
6921 *nextpos += (pcid->imagelength[0] |
6922 (pcid->imagelength[1] << 8)) * PCI_SECTOR_SIZE;
6923 }
6924
6925 switch (pcid->codetype) {
6926 case PCI_CODE_X86PC:
6927 fcache->type = FTYPE_BIOS;
6928 break;
6929 case PCI_CODE_FCODE:
6930 fcache->type = FTYPE_FCODE;
6931 break;
6932 case PCI_CODE_EFI:
6933 fcache->type = FTYPE_EFI;
6934 break;
6935 case PCI_CODE_HPPA:
6936 fcache->type = FTYPE_HPPA;
6937 break;
6938 default:
6939 fcache->type = FTYPE_UNKNOWN;
6940 break;
6941 }
6942
6943 (void) snprintf(fcache->verstr, FCHBA_OPTION_ROM_VERSION_LEN,
6944 "%d.%02d", pcid->revisionlevel[1], pcid->revisionlevel[0]);
6945
6946 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
6947
6948 return (0);
6949 }
6950
6951 /*
6952 * ql_flash_layout_table
6953 * Obtains flash addresses from table
6954 *
6955 * Input:
6956 * ha: adapter state pointer.
6957 * flt_paddr: flash layout pointer address.
6958 *
6959 * Context:
6960 * Kernel context.
6961 */
6962 static void
6963 ql_flash_layout_table(ql_adapter_state_t *ha, uint32_t flt_paddr)
6964 {
6965 ql_flt_ptr_t *fptr;
6966 uint8_t *bp;
6967 int rval;
6968 uint32_t len, faddr, cnt;
6969 uint16_t chksum, w16;
6970
6971 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
6972
6973 /* Process flash layout table header */
6974 len = sizeof (ql_flt_ptr_t);
6975 if ((bp = kmem_zalloc(len, KM_SLEEP)) == NULL) {
6976 EL(ha, "kmem_zalloc=null\n");
6977 return;
6978 }
6979
6980 /* Process pointer to flash layout table */
6981 if ((rval = ql_dump_fcode(ha, bp, len, flt_paddr)) != QL_SUCCESS) {
6982 EL(ha, "fptr dump_flash pos=%xh, status=%xh\n", flt_paddr,
6983 rval);
6984 kmem_free(bp, len);
6985 return;
6986 }
6987 fptr = (ql_flt_ptr_t *)bp;
6988
6989 /* Verify pointer to flash layout table. */
6990 for (chksum = 0, cnt = 0; cnt < len; cnt += 2) {
6991 w16 = (uint16_t)CHAR_TO_SHORT(bp[cnt], bp[cnt + 1]);
6992 chksum += w16;
6993 }
6994 if (chksum != 0 || fptr->sig[0] != 'Q' || fptr->sig[1] != 'F' ||
6995 fptr->sig[2] != 'L' || fptr->sig[3] != 'T') {
6996 EL(ha, "ptr chksum=%xh, sig=%c%c%c%c\n", chksum, fptr->sig[0],
6997 fptr->sig[1], fptr->sig[2], fptr->sig[3]);
6998 kmem_free(bp, len);
6999 return;
7000 }
7001 faddr = CHAR_TO_LONG(fptr->addr[0], fptr->addr[1], fptr->addr[2],
7002 fptr->addr[3]);
7003
7004 kmem_free(bp, len);
7005
7006 ql_process_flt(ha, faddr);
7007
7008 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7009 }
7010
7011 /*
7012 * ql_process_flt
7013 * Obtains flash addresses from flash layout table
7014 *
7015 * Input:
7016 * ha: adapter state pointer.
7017 * faddr: flash layout table byte address.
7018 *
7019 * Context:
7020 * Kernel context.
7021 */
7022 static void
7023 ql_process_flt(ql_adapter_state_t *ha, uint32_t faddr)
7024 {
7025 ql_flt_hdr_t *fhdr;
7026 ql_flt_region_t *frgn;
7027 uint8_t *bp, *eaddr, nv_rg, vpd_rg;
7028 int rval;
7029 uint32_t len, cnt, fe_addr;
7030 uint16_t chksum, w16;
7031
7032 QL_PRINT_9(CE_CONT, "(%d): started faddr=%xh\n", ha->instance, faddr);
7033
7034 /* Process flash layout table header */
7035 if ((bp = kmem_zalloc(FLASH_LAYOUT_TABLE_SIZE, KM_SLEEP)) == NULL) {
7036 EL(ha, "kmem_zalloc=null\n");
7037 return;
7038 }
7039 fhdr = (ql_flt_hdr_t *)bp;
7040
7041 /* Process flash layout table. */
7042 if ((rval = ql_dump_fcode(ha, bp, FLASH_LAYOUT_TABLE_SIZE, faddr)) !=
7043 QL_SUCCESS) {
7044 EL(ha, "fhdr dump_flash pos=%xh, status=%xh\n", faddr, rval);
7045 kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
7046 return;
7047 }
7048
7049 /* Verify flash layout table. */
7050 len = (uint32_t)(CHAR_TO_SHORT(fhdr->len[0], fhdr->len[1]) +
7051 sizeof (ql_flt_hdr_t) + sizeof (ql_flt_region_t));
7052 if (len > FLASH_LAYOUT_TABLE_SIZE) {
7053 chksum = 0xffff;
7054 } else {
7055 for (chksum = 0, cnt = 0; cnt < len; cnt += 2) {
7056 w16 = (uint16_t)CHAR_TO_SHORT(bp[cnt], bp[cnt + 1]);
7057 chksum += w16;
7058 }
7059 }
7060 w16 = CHAR_TO_SHORT(fhdr->version[0], fhdr->version[1]);
7061 if (chksum != 0 || w16 != 1) {
7062 EL(ha, "table chksum=%xh, version=%d\n", chksum, w16);
7063 kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
7064 return;
7065 }
7066 eaddr = bp + len;
7067
7068 /* Process Function/Port Configuration Map. */
7069 nv_rg = vpd_rg = 0;
7070 if (CFG_IST(ha, CFG_CTRL_8021)) {
7071 uint16_t i;
7072 uint8_t *mbp = eaddr;
7073 ql_fp_cfg_map_t *cmp = (ql_fp_cfg_map_t *)mbp;
7074
7075 len = (uint32_t)(CHAR_TO_SHORT(cmp->hdr.len[0],
7076 cmp->hdr.len[1]));
7077 if (len > FLASH_LAYOUT_TABLE_SIZE) {
7078 chksum = 0xffff;
7079 } else {
7080 for (chksum = 0, cnt = 0; cnt < len; cnt += 2) {
7081 w16 = (uint16_t)CHAR_TO_SHORT(mbp[cnt],
7082 mbp[cnt + 1]);
7083 chksum += w16;
7084 }
7085 }
7086 w16 = CHAR_TO_SHORT(cmp->hdr.version[0], cmp->hdr.version[1]);
7087 if (chksum != 0 || w16 != 1 ||
7088 cmp->hdr.Signature[0] != 'F' ||
7089 cmp->hdr.Signature[1] != 'P' ||
7090 cmp->hdr.Signature[2] != 'C' ||
7091 cmp->hdr.Signature[3] != 'M') {
7092 EL(ha, "cfg_map chksum=%xh, version=%d, "
7093 "sig=%c%c%c%c\n", chksum, w16,
7094 cmp->hdr.Signature[0], cmp->hdr.Signature[1],
7095 cmp->hdr.Signature[2], cmp->hdr.Signature[3]);
7096 } else {
7097 cnt = (uint16_t)
7098 (CHAR_TO_SHORT(cmp->hdr.NumberEntries[0],
7099 cmp->hdr.NumberEntries[1]));
7100 /* Locate entry for function. */
7101 for (i = 0; i < cnt; i++) {
7102 if (cmp->cfg[i].FunctionType == FT_FC &&
7103 cmp->cfg[i].FunctionNumber[0] ==
7104 ha->function_number &&
7105 cmp->cfg[i].FunctionNumber[1] == 0) {
7106 nv_rg = cmp->cfg[i].ConfigRegion;
7107 vpd_rg = cmp->cfg[i].VpdRegion;
7108 break;
7109 }
7110 }
7111
7112 if (nv_rg == 0 || vpd_rg == 0) {
7113 EL(ha, "cfg_map nv_rg=%d, vpd_rg=%d\n", nv_rg,
7114 vpd_rg);
7115 nv_rg = vpd_rg = 0;
7116 }
7117 }
7118 }
7119
7120 /* Process flash layout table regions */
7121 for (frgn = (ql_flt_region_t *)(bp + sizeof (ql_flt_hdr_t));
7122 (uint8_t *)frgn < eaddr; frgn++) {
7123 faddr = CHAR_TO_LONG(frgn->beg_addr[0], frgn->beg_addr[1],
7124 frgn->beg_addr[2], frgn->beg_addr[3]);
7125 faddr >>= 2;
7126 fe_addr = CHAR_TO_LONG(frgn->end_addr[0], frgn->end_addr[1],
7127 frgn->end_addr[2], frgn->end_addr[3]);
7128 fe_addr >>= 2;
7129
7130 switch (frgn->region) {
7131 case FLASH_8021_BOOTLOADER_REGION:
7132 ha->bootloader_addr = faddr;
7133 ha->bootloader_size = (fe_addr - faddr) + 1;
7134 QL_PRINT_9(CE_CONT, "(%d): bootloader_addr=%xh, "
7135 "size=%xh\n", ha->instance, faddr,
7136 ha->bootloader_size);
7137 break;
7138 case FLASH_FW_REGION:
7139 case FLASH_8021_FW_REGION:
7140 ha->flash_fw_addr = faddr;
7141 ha->flash_fw_size = (fe_addr - faddr) + 1;
7142 QL_PRINT_9(CE_CONT, "(%d): flash_fw_addr=%xh, "
7143 "size=%xh\n", ha->instance, faddr,
7144 ha->flash_fw_size);
7145 break;
7146 case FLASH_GOLDEN_FW_REGION:
7147 case FLASH_8021_GOLDEN_FW_REGION:
7148 ha->flash_golden_fw_addr = faddr;
7149 QL_PRINT_9(CE_CONT, "(%d): flash_golden_fw_addr=%xh\n",
7150 ha->instance, faddr);
7151 break;
7152 case FLASH_8021_VPD_REGION:
7153 if (!vpd_rg || vpd_rg == FLASH_8021_VPD_REGION) {
7154 ha->flash_vpd_addr = faddr;
7155 QL_PRINT_9(CE_CONT, "(%d): 8021_flash_vpd_"
7156 "addr=%xh\n", ha->instance, faddr);
7157 }
7158 break;
7159 case FLASH_VPD_0_REGION:
7160 if (vpd_rg) {
7161 if (vpd_rg == FLASH_VPD_0_REGION) {
7162 ha->flash_vpd_addr = faddr;
7163 QL_PRINT_9(CE_CONT, "(%d): vpd_rg "
7164 "flash_vpd_addr=%xh\n",
7165 ha->instance, faddr);
7166 }
7167 } else if (!(ha->flags & FUNCTION_1) &&
7168 !(CFG_IST(ha, CFG_CTRL_8021))) {
7169 ha->flash_vpd_addr = faddr;
7170 QL_PRINT_9(CE_CONT, "(%d): flash_vpd_addr=%xh"
7171 "\n", ha->instance, faddr);
7172 }
7173 break;
7174 case FLASH_NVRAM_0_REGION:
7175 if (nv_rg) {
7176 if (nv_rg == FLASH_NVRAM_0_REGION) {
7177 ADAPTER_STATE_LOCK(ha);
7178 ha->flags &= ~FUNCTION_1;
7179 ADAPTER_STATE_UNLOCK(ha);
7180 ha->flash_nvram_addr = faddr;
7181 QL_PRINT_9(CE_CONT, "(%d): nv_rg "
7182 "flash_nvram_addr=%xh\n",
7183 ha->instance, faddr);
7184 }
7185 } else if (!(ha->flags & FUNCTION_1)) {
7186 ha->flash_nvram_addr = faddr;
7187 QL_PRINT_9(CE_CONT, "(%d): flash_nvram_addr="
7188 "%xh\n", ha->instance, faddr);
7189 }
7190 break;
7191 case FLASH_VPD_1_REGION:
7192 if (vpd_rg) {
7193 if (vpd_rg == FLASH_VPD_1_REGION) {
7194 ha->flash_vpd_addr = faddr;
7195 QL_PRINT_9(CE_CONT, "(%d): vpd_rg "
7196 "flash_vpd_addr=%xh\n",
7197 ha->instance, faddr);
7198 }
7199 } else if (ha->flags & FUNCTION_1 &&
7200 !(CFG_IST(ha, CFG_CTRL_8021))) {
7201 ha->flash_vpd_addr = faddr;
7202 QL_PRINT_9(CE_CONT, "(%d): flash_vpd_addr=%xh"
7203 "\n", ha->instance, faddr);
7204 }
7205 break;
7206 case FLASH_NVRAM_1_REGION:
7207 if (nv_rg) {
7208 if (nv_rg == FLASH_NVRAM_1_REGION) {
7209 ADAPTER_STATE_LOCK(ha);
7210 ha->flags |= FUNCTION_1;
7211 ADAPTER_STATE_UNLOCK(ha);
7212 ha->flash_nvram_addr = faddr;
7213 QL_PRINT_9(CE_CONT, "(%d): nv_rg "
7214 "flash_nvram_addr=%xh\n",
7215 ha->instance, faddr);
7216 }
7217 } else if (ha->flags & FUNCTION_1) {
7218 ha->flash_nvram_addr = faddr;
7219 QL_PRINT_9(CE_CONT, "(%d): flash_nvram_addr="
7220 "%xh\n", ha->instance, faddr);
7221 }
7222 break;
7223 case FLASH_DESC_TABLE_REGION:
7224 if (!(CFG_IST(ha, CFG_CTRL_8021))) {
7225 ha->flash_desc_addr = faddr;
7226 QL_PRINT_9(CE_CONT, "(%d): flash_desc_addr="
7227 "%xh\n", ha->instance, faddr);
7228 }
7229 break;
7230 case FLASH_ERROR_LOG_0_REGION:
7231 if (!(ha->flags & FUNCTION_1)) {
7232 ha->flash_errlog_start = faddr;
7233 QL_PRINT_9(CE_CONT, "(%d): flash_errlog_addr="
7234 "%xh\n", ha->instance, faddr);
7235 }
7236 break;
7237 case FLASH_ERROR_LOG_1_REGION:
7238 if (ha->flags & FUNCTION_1) {
7239 ha->flash_errlog_start = faddr;
7240 QL_PRINT_9(CE_CONT, "(%d): flash_errlog_addr="
7241 "%xh\n", ha->instance, faddr);
7242 }
7243 break;
7244 default:
7245 break;
7246 }
7247 }
7248 kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
7249
7250 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7251 }
7252
7253 /*
7254 * ql_flash_nvram_defaults
7255 * Flash default addresses.
7256 *
7257 * Input:
7258 * ha: adapter state pointer.
7259 *
7260 * Returns:
7261 * ql local function return status code.
7262 *
7263 * Context:
7264 * Kernel context.
7265 */
7266 static void
7267 ql_flash_nvram_defaults(ql_adapter_state_t *ha)
7268 {
7269 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7270
7271 if (ha->flags & FUNCTION_1) {
7272 if (CFG_IST(ha, CFG_CTRL_2300)) {
7273 ha->flash_nvram_addr = NVRAM_2300_FUNC1_ADDR;
7274 ha->flash_fw_addr = FLASH_2300_FIRMWARE_ADDR;
7275 } else if (CFG_IST(ha, CFG_CTRL_2422)) {
7276 ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
7277 ha->flash_nvram_addr = NVRAM_2400_FUNC1_ADDR;
7278 ha->flash_vpd_addr = VPD_2400_FUNC1_ADDR;
7279 ha->flash_errlog_start = FLASH_2400_ERRLOG_START_ADDR_1;
7280 ha->flash_desc_addr = FLASH_2400_DESCRIPTOR_TABLE;
7281 ha->flash_fw_addr = FLASH_2400_FIRMWARE_ADDR;
7282 } else if (CFG_IST(ha, CFG_CTRL_25XX)) {
7283 ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
7284 ha->flash_nvram_addr = NVRAM_2500_FUNC1_ADDR;
7285 ha->flash_vpd_addr = VPD_2500_FUNC1_ADDR;
7286 ha->flash_errlog_start = FLASH_2500_ERRLOG_START_ADDR_1;
7287 ha->flash_desc_addr = FLASH_2500_DESCRIPTOR_TABLE;
7288 ha->flash_fw_addr = FLASH_2500_FIRMWARE_ADDR;
7289 } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
7290 ha->flash_data_addr = FLASH_8100_DATA_ADDR;
7291 ha->flash_nvram_addr = NVRAM_8100_FUNC1_ADDR;
7292 ha->flash_vpd_addr = VPD_8100_FUNC1_ADDR;
7293 ha->flash_errlog_start = FLASH_8100_ERRLOG_START_ADDR_1;
7294 ha->flash_desc_addr = FLASH_8100_DESCRIPTOR_TABLE;
7295 ha->flash_fw_addr = FLASH_8100_FIRMWARE_ADDR;
7296 } else if (CFG_IST(ha, CFG_CTRL_8021)) {
7297 ha->flash_data_addr = 0;
7298 ha->flash_nvram_addr = NVRAM_8021_FUNC1_ADDR;
7299 ha->flash_vpd_addr = VPD_8021_FUNC1_ADDR;
7300 ha->flash_errlog_start = 0;
7301 ha->flash_desc_addr = FLASH_8021_DESCRIPTOR_TABLE;
7302 ha->flash_fw_addr = FLASH_8021_FIRMWARE_ADDR;
7303 ha->flash_fw_size = FLASH_8021_FIRMWARE_SIZE;
7304 ha->bootloader_addr = FLASH_8021_BOOTLOADER_ADDR;
7305 ha->bootloader_size = FLASH_8021_BOOTLOADER_SIZE;
7306 }
7307 } else {
7308 if (CFG_IST(ha, CFG_CTRL_2200)) {
7309 ha->flash_nvram_addr = NVRAM_2200_FUNC0_ADDR;
7310 ha->flash_fw_addr = FLASH_2200_FIRMWARE_ADDR;
7311 } else if (CFG_IST(ha, CFG_CTRL_2300) ||
7312 (CFG_IST(ha, CFG_CTRL_6322))) {
7313 ha->flash_nvram_addr = NVRAM_2300_FUNC0_ADDR;
7314 ha->flash_fw_addr = FLASH_2300_FIRMWARE_ADDR;
7315 } else if (CFG_IST(ha, CFG_CTRL_2422)) {
7316 ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
7317 ha->flash_nvram_addr = NVRAM_2400_FUNC0_ADDR;
7318 ha->flash_vpd_addr = VPD_2400_FUNC0_ADDR;
7319 ha->flash_errlog_start = FLASH_2400_ERRLOG_START_ADDR_0;
7320 ha->flash_desc_addr = FLASH_2400_DESCRIPTOR_TABLE;
7321 ha->flash_fw_addr = FLASH_2400_FIRMWARE_ADDR;
7322 } else if (CFG_IST(ha, CFG_CTRL_25XX)) {
7323 ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
7324 ha->flash_nvram_addr = NVRAM_2500_FUNC0_ADDR;
7325 ha->flash_vpd_addr = VPD_2500_FUNC0_ADDR;
7326 ha->flash_errlog_start = FLASH_2500_ERRLOG_START_ADDR_0;
7327 ha->flash_desc_addr = FLASH_2500_DESCRIPTOR_TABLE;
7328 ha->flash_fw_addr = FLASH_2500_FIRMWARE_ADDR;
7329 } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
7330 ha->flash_data_addr = FLASH_8100_DATA_ADDR;
7331 ha->flash_nvram_addr = NVRAM_8100_FUNC0_ADDR;
7332 ha->flash_vpd_addr = VPD_8100_FUNC0_ADDR;
7333 ha->flash_errlog_start = FLASH_8100_ERRLOG_START_ADDR_0;
7334 ha->flash_desc_addr = FLASH_8100_DESCRIPTOR_TABLE;
7335 ha->flash_fw_addr = FLASH_8100_FIRMWARE_ADDR;
7336 } else if (CFG_IST(ha, CFG_CTRL_8021)) {
7337 ha->flash_data_addr = 0;
7338 ha->flash_nvram_addr = NVRAM_8021_FUNC0_ADDR;
7339 ha->flash_vpd_addr = VPD_8021_FUNC0_ADDR;
7340 ha->flash_errlog_start = 0;
7341 ha->flash_desc_addr = FLASH_8021_DESCRIPTOR_TABLE;
7342 ha->flash_fw_addr = FLASH_8021_FIRMWARE_ADDR;
7343 ha->flash_fw_size = FLASH_8021_FIRMWARE_SIZE;
7344 ha->bootloader_addr = FLASH_8021_BOOTLOADER_ADDR;
7345 ha->bootloader_size = FLASH_8021_BOOTLOADER_SIZE;
7346 } else {
7347 EL(ha, "unassigned flash fn0 addr: %x\n",
7348 ha->device_id);
7349 }
7350 }
7351 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7352 }
7353
7354 /*
7355 * ql_get_sfp
7356 * Returns sfp data to sdmapi caller
7357 *
7358 * Input:
7359 * ha: adapter state pointer.
7360 * cmd: Local EXT_IOCTL cmd struct pointer.
7361 * mode: flags.
7362 *
7363 * Returns:
7364 * None, request status indicated in cmd->Status.
7365 *
7366 * Context:
7367 * Kernel context.
7368 */
7369 static void
7370 ql_get_sfp(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7371 {
7372 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7373
7374 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
7375 cmd->Status = EXT_STATUS_INVALID_REQUEST;
7376 EL(ha, "failed, invalid request for HBA\n");
7377 return;
7378 }
7379
7380 if (cmd->ResponseLen < QL_24XX_SFP_SIZE) {
7381 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7382 cmd->DetailStatus = QL_24XX_SFP_SIZE;
7383 EL(ha, "failed, ResponseLen < SFP len, len passed=%xh\n",
7384 cmd->ResponseLen);
7385 return;
7386 }
7387
7388 /* Dump SFP data in user buffer */
7389 if ((ql_dump_sfp(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
7390 mode)) != 0) {
7391 cmd->Status = EXT_STATUS_COPY_ERR;
7392 EL(ha, "failed, copy error\n");
7393 } else {
7394 cmd->Status = EXT_STATUS_OK;
7395 }
7396
7397 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7398 }
7399
7400 /*
7401 * ql_dump_sfp
7402 * Dumps SFP.
7403 *
7404 * Input:
7405 * ha: adapter state pointer.
7406 * bp: buffer address.
7407 * mode: flags
7408 *
7409 * Returns:
7410 *
7411 * Context:
7412 * Kernel context.
7413 */
7414 static int
7415 ql_dump_sfp(ql_adapter_state_t *ha, void *bp, int mode)
7416 {
7417 dma_mem_t mem;
7418 uint32_t cnt;
7419 int rval2, rval = 0;
7420 uint32_t dxfer;
7421
7422 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7423
7424 /* Get memory for SFP. */
7425
7426 if ((rval2 = ql_get_dma_mem(ha, &mem, 64, LITTLE_ENDIAN_DMA,
7427 QL_DMA_DATA_ALIGN)) != QL_SUCCESS) {
7428 EL(ha, "failed, ql_get_dma_mem=%xh\n", rval2);
7429 return (ENOMEM);
7430 }
7431
7432 for (cnt = 0; cnt < QL_24XX_SFP_SIZE; cnt += mem.size) {
7433 rval2 = ql_read_sfp(ha, &mem,
7434 (uint16_t)(cnt < 256 ? 0xA0 : 0xA2),
7435 (uint16_t)(cnt & 0xff));
7436 if (rval2 != QL_SUCCESS) {
7437 EL(ha, "failed, read_sfp=%xh\n", rval2);
7438 rval = EFAULT;
7439 break;
7440 }
7441
7442 /* copy the data back */
7443 if ((dxfer = ql_send_buffer_data(mem.bp, bp, mem.size,
7444 mode)) != mem.size) {
7445 /* ddi copy error */
7446 EL(ha, "failed, ddi copy; byte cnt = %xh", dxfer);
7447 rval = EFAULT;
7448 break;
7449 }
7450
7451 /* adjust the buffer pointer */
7452 bp = (caddr_t)bp + mem.size;
7453 }
7454
7455 ql_free_phys(ha, &mem);
7456
7457 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7458
7459 return (rval);
7460 }
7461
7462 /*
7463 * ql_port_param
7464 * Retrieves or sets the firmware port speed settings
7465 *
7466 * Input:
7467 * ha: adapter state pointer.
7468 * cmd: Local EXT_IOCTL cmd struct pointer.
7469 * mode: flags.
7470 *
7471 * Returns:
7472 * None, request status indicated in cmd->Status.
7473 *
7474 * Context:
7475 * Kernel context.
7476 *
7477 */
7478 static void
7479 ql_port_param(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7480 {
7481 uint8_t *name;
7482 ql_tgt_t *tq;
7483 EXT_PORT_PARAM port_param = {0};
7484 uint32_t rval = QL_SUCCESS;
7485 uint32_t idma_rate;
7486
7487 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7488
7489 if (CFG_IST(ha, CFG_CTRL_242581) == 0) {
7490 EL(ha, "invalid request for this HBA\n");
7491 cmd->Status = EXT_STATUS_INVALID_REQUEST;
7492 cmd->ResponseLen = 0;
7493 return;
7494 }
7495
7496 if (LOOP_NOT_READY(ha)) {
7497 EL(ha, "failed, loop not ready\n");
7498 cmd->Status = EXT_STATUS_DEVICE_OFFLINE;
7499 cmd->ResponseLen = 0;
7500 return;
7501 }
7502
7503 if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
7504 (void*)&port_param, sizeof (EXT_PORT_PARAM), mode) != 0) {
7505 EL(ha, "failed, ddi_copyin\n");
7506 cmd->Status = EXT_STATUS_COPY_ERR;
7507 cmd->ResponseLen = 0;
7508 return;
7509 }
7510
7511 if (port_param.FCScsiAddr.DestType != EXT_DEF_DESTTYPE_WWPN) {
7512 EL(ha, "Unsupported dest lookup type: %xh\n",
7513 port_param.FCScsiAddr.DestType);
7514 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
7515 cmd->ResponseLen = 0;
7516 return;
7517 }
7518
7519 name = port_param.FCScsiAddr.DestAddr.WWPN;
7520
7521 QL_PRINT_9(CE_CONT, "(%d): wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
7522 ha->instance, name[0], name[1], name[2], name[3], name[4],
7523 name[5], name[6], name[7]);
7524
7525 tq = ql_find_port(ha, name, (uint16_t)QLNT_PORT);
7526 if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
7527 EL(ha, "failed, fc_port not found\n");
7528 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
7529 cmd->ResponseLen = 0;
7530 return;
7531 }
7532
7533 cmd->Status = EXT_STATUS_OK;
7534 cmd->DetailStatus = EXT_STATUS_OK;
7535
7536 switch (port_param.Mode) {
7537 case EXT_IIDMA_MODE_GET:
7538 /*
7539 * Report the firmware's port rate for the wwpn
7540 */
7541 rval = ql_iidma_rate(ha, tq->loop_id, &idma_rate,
7542 port_param.Mode);
7543
7544 if (rval != QL_SUCCESS) {
7545 EL(ha, "iidma get failed: %xh\n", rval);
7546 cmd->Status = EXT_STATUS_MAILBOX;
7551 case IIDMA_RATE_1GB:
7552 port_param.Speed =
7553 EXT_DEF_PORTSPEED_1GBIT;
7554 break;
7555 case IIDMA_RATE_2GB:
7556 port_param.Speed =
7557 EXT_DEF_PORTSPEED_2GBIT;
7558 break;
7559 case IIDMA_RATE_4GB:
7560 port_param.Speed =
7561 EXT_DEF_PORTSPEED_4GBIT;
7562 break;
7563 case IIDMA_RATE_8GB:
7564 port_param.Speed =
7565 EXT_DEF_PORTSPEED_8GBIT;
7566 break;
7567 case IIDMA_RATE_10GB:
7568 port_param.Speed =
7569 EXT_DEF_PORTSPEED_10GBIT;
7570 break;
7571 default:
7572 port_param.Speed =
7573 EXT_DEF_PORTSPEED_UNKNOWN;
7574 EL(ha, "failed, Port speed rate=%xh\n",
7575 idma_rate);
7576 break;
7577 }
7578
7579 /* Copy back the data */
7580 rval = ddi_copyout((void *)&port_param,
7581 (void *)(uintptr_t)cmd->ResponseAdr,
7582 sizeof (EXT_PORT_PARAM), mode);
7583
7584 if (rval != 0) {
7585 cmd->Status = EXT_STATUS_COPY_ERR;
7586 cmd->ResponseLen = 0;
7587 EL(ha, "failed, ddi_copyout\n");
7588 } else {
7589 cmd->ResponseLen = (uint32_t)
7590 sizeof (EXT_PORT_PARAM);
7593 break;
7594
7595 case EXT_IIDMA_MODE_SET:
7596 /*
7597 * Set the firmware's port rate for the wwpn
7598 */
7599 switch (port_param.Speed) {
7600 case EXT_DEF_PORTSPEED_1GBIT:
7601 idma_rate = IIDMA_RATE_1GB;
7602 break;
7603 case EXT_DEF_PORTSPEED_2GBIT:
7604 idma_rate = IIDMA_RATE_2GB;
7605 break;
7606 case EXT_DEF_PORTSPEED_4GBIT:
7607 idma_rate = IIDMA_RATE_4GB;
7608 break;
7609 case EXT_DEF_PORTSPEED_8GBIT:
7610 idma_rate = IIDMA_RATE_8GB;
7611 break;
7612 case EXT_DEF_PORTSPEED_10GBIT:
7613 port_param.Speed = IIDMA_RATE_10GB;
7614 break;
7615 default:
7616 EL(ha, "invalid set iidma rate: %x\n",
7617 port_param.Speed);
7618 cmd->Status = EXT_STATUS_INVALID_PARAM;
7619 cmd->ResponseLen = 0;
7620 rval = QL_PARAMETER_ERROR;
7621 break;
7622 }
7623
7624 if (rval == QL_SUCCESS) {
7625 rval = ql_iidma_rate(ha, tq->loop_id, &idma_rate,
7626 port_param.Mode);
7627 if (rval != QL_SUCCESS) {
7628 EL(ha, "iidma set failed: %xh\n", rval);
7629 cmd->Status = EXT_STATUS_MAILBOX;
7630 cmd->DetailStatus = rval;
7631 cmd->ResponseLen = 0;
7632 }
7633 }
7634 break;
7635 default:
7636 EL(ha, "invalid mode specified: %x\n", port_param.Mode);
7637 cmd->Status = EXT_STATUS_INVALID_PARAM;
7638 cmd->ResponseLen = 0;
7639 cmd->DetailStatus = 0;
7640 break;
7641 }
7642
7643 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7644 }
7645
7646 /*
7647 * ql_get_fwexttrace
7648 * Dumps f/w extended trace buffer
7649 *
7650 * Input:
7651 * ha: adapter state pointer.
7652 * bp: buffer address.
7653 * mode: flags
7654 *
7655 * Returns:
7656 *
7657 * Context:
7658 * Kernel context.
7659 */
7660 /* ARGSUSED */
7661 static void
7662 ql_get_fwexttrace(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7663 {
7664 int rval;
7665 caddr_t payload;
7666
7667 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7668
7669 if (CFG_IST(ha, CFG_CTRL_24258081) == 0) {
7670 EL(ha, "invalid request for this HBA\n");
7671 cmd->Status = EXT_STATUS_INVALID_REQUEST;
7672 cmd->ResponseLen = 0;
7673 return;
7674 }
7675
7676 if ((CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) == 0) ||
7677 (ha->fwexttracebuf.bp == NULL)) {
7678 EL(ha, "f/w extended trace is not enabled\n");
7679 cmd->Status = EXT_STATUS_INVALID_REQUEST;
7680 cmd->ResponseLen = 0;
7681 return;
7682 }
7683
7684 if (cmd->ResponseLen < FWEXTSIZE) {
7685 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7686 cmd->DetailStatus = FWEXTSIZE;
7687 EL(ha, "failed, ResponseLen (%xh) < %xh (FWEXTSIZE)\n",
7688 cmd->ResponseLen, FWEXTSIZE);
7689 cmd->ResponseLen = 0;
7690 return;
7691 }
7692
7693 /* Time Stamp */
7694 rval = ql_fw_etrace(ha, &ha->fwexttracebuf, FTO_INSERT_TIME_STAMP);
7695 if (rval != QL_SUCCESS) {
7696 EL(ha, "f/w extended trace insert"
7697 "time stamp failed: %xh\n", rval);
7698 cmd->Status = EXT_STATUS_ERR;
7699 cmd->ResponseLen = 0;
7700 return;
7701 }
7702
7703 /* Disable Tracing */
7704 rval = ql_fw_etrace(ha, &ha->fwexttracebuf, FTO_EXT_TRACE_DISABLE);
7705 if (rval != QL_SUCCESS) {
7706 EL(ha, "f/w extended trace disable failed: %xh\n", rval);
7707 cmd->Status = EXT_STATUS_ERR;
7708 cmd->ResponseLen = 0;
7709 return;
7710 }
7711
7712 /* Allocate payload buffer */
7713 payload = kmem_zalloc(FWEXTSIZE, KM_SLEEP);
7714 if (payload == NULL) {
7715 EL(ha, "failed, kmem_zalloc\n");
7716 cmd->Status = EXT_STATUS_NO_MEMORY;
7717 cmd->ResponseLen = 0;
7718 return;
7719 }
7720
7721 /* Sync DMA buffer. */
7722 (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
7723 FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
7724
7725 /* Copy trace buffer data. */
7726 ddi_rep_get8(ha->fwexttracebuf.acc_handle, (uint8_t *)payload,
7727 (uint8_t *)ha->fwexttracebuf.bp, FWEXTSIZE,
7728 DDI_DEV_AUTOINCR);
7729
7730 /* Send payload to application. */
7731 if (ql_send_buffer_data(payload, (caddr_t)(uintptr_t)cmd->ResponseAdr,
7732 cmd->ResponseLen, mode) != cmd->ResponseLen) {
7733 EL(ha, "failed, send_buffer_data\n");
7734 cmd->Status = EXT_STATUS_COPY_ERR;
7735 cmd->ResponseLen = 0;
7736 } else {
7737 cmd->Status = EXT_STATUS_OK;
7738 }
7739
7740 kmem_free(payload, FWEXTSIZE);
7741
7742 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7743 }
7744
7745 /*
7746 * ql_get_fwfcetrace
7747 * Dumps f/w fibre channel event trace buffer
7748 *
7749 * Input:
7750 * ha: adapter state pointer.
7751 * bp: buffer address.
7752 * mode: flags
7753 *
7754 * Returns:
7755 *
7756 * Context:
7757 * Kernel context.
7758 */
7759 /* ARGSUSED */
7760 static void
7761 ql_get_fwfcetrace(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7762 {
7763 int rval;
7764 caddr_t payload;
7765
7766 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7767
7768 if (CFG_IST(ha, CFG_CTRL_24258081) == 0) {
7769 EL(ha, "invalid request for this HBA\n");
7770 cmd->Status = EXT_STATUS_INVALID_REQUEST;
7771 cmd->ResponseLen = 0;
7772 return;
7773 }
7774
7775 if ((CFG_IST(ha, CFG_ENABLE_FWFCETRACE) == 0) ||
7776 (ha->fwfcetracebuf.bp == NULL)) {
7777 EL(ha, "f/w FCE trace is not enabled\n");
7778 cmd->Status = EXT_STATUS_INVALID_REQUEST;
7779 cmd->ResponseLen = 0;
7780 return;
7781 }
7782
7783 if (cmd->ResponseLen < FWFCESIZE) {
7784 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7785 cmd->DetailStatus = FWFCESIZE;
7786 EL(ha, "failed, ResponseLen (%xh) < %xh (FWFCESIZE)\n",
7787 cmd->ResponseLen, FWFCESIZE);
7788 cmd->ResponseLen = 0;
7789 return;
7790 }
7791
7792 /* Disable Tracing */
7793 rval = ql_fw_etrace(ha, &ha->fwfcetracebuf, FTO_FCE_TRACE_DISABLE);
7794 if (rval != QL_SUCCESS) {
7795 EL(ha, "f/w FCE trace disable failed: %xh\n", rval);
7796 cmd->Status = EXT_STATUS_ERR;
7797 cmd->ResponseLen = 0;
7798 return;
7799 }
7800
7801 /* Allocate payload buffer */
7802 payload = kmem_zalloc(FWEXTSIZE, KM_SLEEP);
7803 if (payload == NULL) {
7804 EL(ha, "failed, kmem_zalloc\n");
7805 cmd->Status = EXT_STATUS_NO_MEMORY;
7806 cmd->ResponseLen = 0;
7807 return;
7808 }
7809
7810 /* Sync DMA buffer. */
7811 (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
7812 FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
7813
7814 /* Copy trace buffer data. */
7815 ddi_rep_get8(ha->fwfcetracebuf.acc_handle, (uint8_t *)payload,
7816 (uint8_t *)ha->fwfcetracebuf.bp, FWFCESIZE,
7817 DDI_DEV_AUTOINCR);
7818
7819 /* Send payload to application. */
7820 if (ql_send_buffer_data(payload, (caddr_t)(uintptr_t)cmd->ResponseAdr,
7821 cmd->ResponseLen, mode) != cmd->ResponseLen) {
7822 EL(ha, "failed, send_buffer_data\n");
7823 cmd->Status = EXT_STATUS_COPY_ERR;
7824 cmd->ResponseLen = 0;
7825 } else {
7826 cmd->Status = EXT_STATUS_OK;
7827 }
7828
7829 kmem_free(payload, FWFCESIZE);
7830
7831 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7832 }
7833
7834 /*
7835 * ql_get_pci_data
7836 * Retrieves pci config space data
7837 *
7838 * Input:
7839 * ha: adapter state pointer.
7840 * cmd: Local EXT_IOCTL cmd struct pointer.
7841 * mode: flags.
7842 *
7843 * Returns:
7844 * None, request status indicated in cmd->Status.
7845 *
7846 * Context:
7847 * Kernel context.
7848 *
7849 */
7850 static void
7851 ql_get_pci_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7852 {
7853 uint8_t cap_ptr;
7854 uint8_t cap_id;
7855 uint32_t buf_size = 256;
7856
7857 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7858
7859 /*
7860 * First check the "Capabilities List" bit of the status register.
7861 */
7862 if (ql_pci_config_get16(ha, PCI_CONF_STAT) & PCI_STAT_CAP) {
7863 /*
7864 * Now get the capability pointer
7865 */
7866 cap_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
7867 while (cap_ptr != PCI_CAP_NEXT_PTR_NULL) {
7868 /*
7869 * Check for the pcie capability.
7870 */
7871 cap_id = (uint8_t)ql_pci_config_get8(ha, cap_ptr);
7872 if (cap_id == PCI_CAP_ID_PCI_E) {
7873 buf_size = 4096;
7874 break;
7875 }
7876 cap_ptr = (uint8_t)ql_pci_config_get8(ha,
7877 (cap_ptr + PCI_CAP_NEXT_PTR));
7880
7881 if (cmd->ResponseLen < buf_size) {
7882 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7883 cmd->DetailStatus = buf_size;
7884 EL(ha, "failed ResponseLen < buf_size, len passed=%xh\n",
7885 cmd->ResponseLen);
7886 return;
7887 }
7888
7889 /* Dump PCI config data. */
7890 if ((ql_pci_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
7891 buf_size, mode)) != 0) {
7892 cmd->Status = EXT_STATUS_COPY_ERR;
7893 cmd->DetailStatus = 0;
7894 EL(ha, "failed, copy err pci_dump\n");
7895 } else {
7896 cmd->Status = EXT_STATUS_OK;
7897 cmd->DetailStatus = buf_size;
7898 }
7899
7900 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7901 }
7902
7903 /*
7904 * ql_pci_dump
7905 * Dumps PCI config data to application buffer.
7906 *
7907 * Input:
7908 * ha = adapter state pointer.
7909 * bp = user buffer address.
7910 *
7911 * Returns:
7912 *
7913 * Context:
7914 * Kernel context.
7915 */
7916 int
7917 ql_pci_dump(ql_adapter_state_t *ha, uint32_t *bp, uint32_t pci_size, int mode)
7918 {
7919 uint32_t pci_os;
7920 uint32_t *ptr32, *org_ptr32;
7921
7922 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7923
7924 ptr32 = kmem_zalloc(pci_size, KM_SLEEP);
7925 if (ptr32 == NULL) {
7926 EL(ha, "failed kmem_zalloc\n");
7927 return (ENOMEM);
7928 }
7929
7930 /* store the initial value of ptr32 */
7931 org_ptr32 = ptr32;
7932 for (pci_os = 0; pci_os < pci_size; pci_os += 4) {
7933 *ptr32 = (uint32_t)ql_pci_config_get32(ha, pci_os);
7934 LITTLE_ENDIAN_32(ptr32);
7935 ptr32++;
7936 }
7937
7938 if (ddi_copyout((void *)org_ptr32, (void *)bp, pci_size, mode) !=
7939 0) {
7940 EL(ha, "failed ddi_copyout\n");
7941 kmem_free(org_ptr32, pci_size);
7942 return (EFAULT);
7943 }
7944
7945 QL_DUMP_9(org_ptr32, 8, pci_size);
7946
7947 kmem_free(org_ptr32, pci_size);
7948
7949 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
7950
7951 return (0);
7952 }
7953
7954 /*
7955 * ql_menlo_reset
7956 * Reset Menlo
7957 *
7958 * Input:
7959 * ha: adapter state pointer.
7960 * bp: buffer address.
7961 * mode: flags
7962 *
7963 * Returns:
7964 *
7965 * Context:
7966 * Kernel context.
7967 */
7968 static void
7969 ql_menlo_reset(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7970 {
7971 EXT_MENLO_RESET rst;
7972 ql_mbx_data_t mr;
7973 int rval;
7974
7975 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
7976
7977 if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
7978 EL(ha, "failed, invalid request for HBA\n");
7979 cmd->Status = EXT_STATUS_INVALID_REQUEST;
7980 cmd->ResponseLen = 0;
7981 return;
7982 }
7983
7984 /*
7985 * TODO: only vp_index 0 can do this (?)
7986 */
7987
7988 /* Verify the size of request structure. */
7989 if (cmd->RequestLen < sizeof (EXT_MENLO_RESET)) {
7990 /* Return error */
7991 EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
7992 sizeof (EXT_MENLO_RESET));
7993 cmd->Status = EXT_STATUS_INVALID_PARAM;
7994 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
7995 cmd->ResponseLen = 0;
8012 cmd->Status = EXT_STATUS_BUSY;
8013 cmd->ResponseLen = 0;
8014 return;
8015 }
8016
8017 rval = ql_reset_menlo(ha, &mr, rst.Flags);
8018 if (rval != QL_SUCCESS) {
8019 EL(ha, "failed, status=%xh\n", rval);
8020 cmd->Status = EXT_STATUS_MAILBOX;
8021 cmd->DetailStatus = rval;
8022 cmd->ResponseLen = 0;
8023 } else if (mr.mb[1] != 0) {
8024 EL(ha, "failed, substatus=%d\n", mr.mb[1]);
8025 cmd->Status = EXT_STATUS_ERR;
8026 cmd->DetailStatus = mr.mb[1];
8027 cmd->ResponseLen = 0;
8028 }
8029
8030 ql_restart_hba(ha);
8031
8032 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8033 }
8034
8035 /*
8036 * ql_menlo_get_fw_version
8037 * Get Menlo firmware version.
8038 *
8039 * Input:
8040 * ha: adapter state pointer.
8041 * bp: buffer address.
8042 * mode: flags
8043 *
8044 * Returns:
8045 *
8046 * Context:
8047 * Kernel context.
8048 */
8049 static void
8050 ql_menlo_get_fw_version(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8051 {
8052 int rval;
8053 ql_mbx_iocb_t *pkt;
8054 EXT_MENLO_GET_FW_VERSION ver = {0};
8055
8056 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8057
8058 if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
8059 EL(ha, "failed, invalid request for HBA\n");
8060 cmd->Status = EXT_STATUS_INVALID_REQUEST;
8061 cmd->ResponseLen = 0;
8062 return;
8063 }
8064
8065 if (cmd->ResponseLen < sizeof (EXT_MENLO_GET_FW_VERSION)) {
8066 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8067 cmd->DetailStatus = sizeof (EXT_MENLO_GET_FW_VERSION);
8068 EL(ha, "ResponseLen=%d < %d\n", cmd->ResponseLen,
8069 sizeof (EXT_MENLO_GET_FW_VERSION));
8070 cmd->ResponseLen = 0;
8071 return;
8072 }
8073
8074 /* Allocate packet. */
8075 pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
8076 if (pkt == NULL) {
8094 /* Command error */
8095 EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval,
8096 pkt->mvfy.entry_status & 0x3c, pkt->mvfy.options_status,
8097 pkt->mvfy.failure_code);
8098 cmd->Status = EXT_STATUS_ERR;
8099 cmd->DetailStatus = rval != QL_SUCCESS ? rval :
8100 QL_FUNCTION_FAILED;
8101 cmd->ResponseLen = 0;
8102 } else if (ddi_copyout((void *)&ver,
8103 (void *)(uintptr_t)cmd->ResponseAdr,
8104 sizeof (EXT_MENLO_GET_FW_VERSION), mode) != 0) {
8105 EL(ha, "failed, ddi_copyout\n");
8106 cmd->Status = EXT_STATUS_COPY_ERR;
8107 cmd->ResponseLen = 0;
8108 } else {
8109 cmd->ResponseLen = sizeof (EXT_MENLO_GET_FW_VERSION);
8110 }
8111
8112 kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8113
8114 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8115 }
8116
8117 /*
8118 * ql_menlo_update_fw
8119 * Get Menlo update firmware.
8120 *
8121 * Input:
8122 * ha: adapter state pointer.
8123 * bp: buffer address.
8124 * mode: flags
8125 *
8126 * Returns:
8127 *
8128 * Context:
8129 * Kernel context.
8130 */
8131 static void
8132 ql_menlo_update_fw(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8133 {
8134 ql_mbx_iocb_t *pkt;
8135 dma_mem_t *dma_mem;
8136 EXT_MENLO_UPDATE_FW fw;
8137 uint32_t *ptr32;
8138 int rval;
8139
8140 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8141
8142 if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
8143 EL(ha, "failed, invalid request for HBA\n");
8144 cmd->Status = EXT_STATUS_INVALID_REQUEST;
8145 cmd->ResponseLen = 0;
8146 return;
8147 }
8148
8149 /*
8150 * TODO: only vp_index 0 can do this (?)
8151 */
8152
8153 /* Verify the size of request structure. */
8154 if (cmd->RequestLen < sizeof (EXT_MENLO_UPDATE_FW)) {
8155 /* Return error */
8156 EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
8157 sizeof (EXT_MENLO_UPDATE_FW));
8158 cmd->Status = EXT_STATUS_INVALID_PARAM;
8159 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
8160 cmd->ResponseLen = 0;
8183 dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
8184 if (dma_mem == NULL) {
8185 EL(ha, "failed, kmem_zalloc\n");
8186 cmd->Status = EXT_STATUS_NO_MEMORY;
8187 cmd->ResponseLen = 0;
8188 return;
8189 }
8190 pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
8191 if (pkt == NULL) {
8192 EL(ha, "failed, kmem_zalloc\n");
8193 kmem_free(dma_mem, sizeof (dma_mem_t));
8194 ql_restart_hba(ha);
8195 cmd->Status = EXT_STATUS_NO_MEMORY;
8196 cmd->ResponseLen = 0;
8197 return;
8198 }
8199
8200 /* Get DMA memory for the IOCB */
8201 if (ql_get_dma_mem(ha, dma_mem, fw.TotalByteCount, LITTLE_ENDIAN_DMA,
8202 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
8203 cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
8204 "alloc failed", QL_NAME, ha->instance);
8205 kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8206 kmem_free(dma_mem, sizeof (dma_mem_t));
8207 ql_restart_hba(ha);
8208 cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
8209 cmd->ResponseLen = 0;
8210 return;
8211 }
8212
8213 /* Get firmware data. */
8214 if (ql_get_buffer_data((caddr_t)(uintptr_t)fw.pFwDataBytes, dma_mem->bp,
8215 fw.TotalByteCount, mode) != fw.TotalByteCount) {
8216 EL(ha, "failed, get_buffer_data\n");
8217 ql_free_dma_resource(ha, dma_mem);
8218 kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8219 kmem_free(dma_mem, sizeof (dma_mem_t));
8220 ql_restart_hba(ha);
8221 cmd->Status = EXT_STATUS_COPY_ERR;
8222 cmd->ResponseLen = 0;
8223 return;
8224 }
8225
8226 /* Sync DMA buffer. */
8227 (void) ddi_dma_sync(dma_mem->dma_handle, 0, dma_mem->size,
8228 DDI_DMA_SYNC_FORDEV);
8229
8230 pkt->mvfy.entry_type = VERIFY_MENLO_TYPE;
8231 pkt->mvfy.entry_count = 1;
8232 pkt->mvfy.options_status = (uint16_t)LE_16(fw.Flags);
8233 ptr32 = dma_mem->bp;
8234 pkt->mvfy.fw_version = LE_32(ptr32[2]);
8235 pkt->mvfy.fw_size = LE_32(fw.TotalByteCount);
8236 pkt->mvfy.fw_sequence_size = LE_32(fw.TotalByteCount);
8237 pkt->mvfy.dseg_count = LE_16(1);
8238 pkt->mvfy.dseg_0_address[0] = (uint32_t)
8239 LE_32(LSD(dma_mem->cookie.dmac_laddress));
8240 pkt->mvfy.dseg_0_address[1] = (uint32_t)
8241 LE_32(MSD(dma_mem->cookie.dmac_laddress));
8242 pkt->mvfy.dseg_0_length = LE_32(fw.TotalByteCount);
8243
8244 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t));
8245 LITTLE_ENDIAN_16(&pkt->mvfy.options_status);
8246 LITTLE_ENDIAN_16(&pkt->mvfy.failure_code);
8247
8248 if (rval != QL_SUCCESS || (pkt->mvfy.entry_status & 0x3c) != 0 ||
8249 pkt->mvfy.options_status != CS_COMPLETE) {
8250 /* Command error */
8251 EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval,
8252 pkt->mvfy.entry_status & 0x3c, pkt->mvfy.options_status,
8253 pkt->mvfy.failure_code);
8254 cmd->Status = EXT_STATUS_ERR;
8255 cmd->DetailStatus = rval != QL_SUCCESS ? rval :
8256 QL_FUNCTION_FAILED;
8257 cmd->ResponseLen = 0;
8258 }
8259
8260 ql_free_dma_resource(ha, dma_mem);
8261 kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8262 kmem_free(dma_mem, sizeof (dma_mem_t));
8263 ql_restart_hba(ha);
8264
8265 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8266 }
8267
8268 /*
8269 * ql_menlo_manage_info
8270 * Get Menlo manage info.
8271 *
8272 * Input:
8273 * ha: adapter state pointer.
8274 * bp: buffer address.
8275 * mode: flags
8276 *
8277 * Returns:
8278 *
8279 * Context:
8280 * Kernel context.
8281 */
8282 static void
8283 ql_menlo_manage_info(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8284 {
8285 ql_mbx_iocb_t *pkt;
8286 dma_mem_t *dma_mem = NULL;
8287 EXT_MENLO_MANAGE_INFO info;
8288 int rval;
8289
8290 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8291
8292
8293 /* The call is only supported for Schultz right now */
8294 if (CFG_IST(ha, CFG_CTRL_8081)) {
8295 ql_get_xgmac_statistics(ha, cmd, mode);
8296 QL_PRINT_9(CE_CONT, "(%d): CFG_CTRL_81XX done\n",
8297 ha->instance);
8298 return;
8299 }
8300
8301 if (!CFG_IST(ha, CFG_CTRL_8081) || !CFG_IST(ha, CFG_CTRL_MENLO)) {
8302 EL(ha, "failed, invalid request for HBA\n");
8303 cmd->Status = EXT_STATUS_INVALID_REQUEST;
8304 cmd->ResponseLen = 0;
8305 return;
8306 }
8307
8308 /* Verify the size of request structure. */
8309 if (cmd->RequestLen < sizeof (EXT_MENLO_MANAGE_INFO)) {
8310 /* Return error */
8311 EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
8312 sizeof (EXT_MENLO_MANAGE_INFO));
8313 cmd->Status = EXT_STATUS_INVALID_PARAM;
8314 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
8315 cmd->ResponseLen = 0;
8316 return;
8317 }
8318
8319 /* Get manage info request. */
8320 if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr,
8321 (caddr_t)&info, sizeof (EXT_MENLO_MANAGE_INFO), mode) != 0) {
8339 pkt->mdata.entry_count = 1;
8340 pkt->mdata.options_status = (uint16_t)LE_16(info.Operation);
8341
8342 /* Get DMA memory for the IOCB */
8343 if (info.Operation == MENLO_OP_READ_MEM ||
8344 info.Operation == MENLO_OP_WRITE_MEM) {
8345 pkt->mdata.total_byte_count = LE_32(info.TotalByteCount);
8346 pkt->mdata.parameter_1 =
8347 LE_32(info.Parameters.ap.MenloMemory.StartingAddr);
8348 dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t),
8349 KM_SLEEP);
8350 if (dma_mem == NULL) {
8351 EL(ha, "failed, kmem_zalloc\n");
8352 kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8353 cmd->Status = EXT_STATUS_NO_MEMORY;
8354 cmd->ResponseLen = 0;
8355 return;
8356 }
8357 if (ql_get_dma_mem(ha, dma_mem, info.TotalByteCount,
8358 LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
8359 cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
8360 "alloc failed", QL_NAME, ha->instance);
8361 kmem_free(dma_mem, sizeof (dma_mem_t));
8362 kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8363 cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
8364 cmd->ResponseLen = 0;
8365 return;
8366 }
8367 if (info.Operation == MENLO_OP_WRITE_MEM) {
8368 /* Get data. */
8369 if (ql_get_buffer_data(
8370 (caddr_t)(uintptr_t)info.pDataBytes,
8371 dma_mem->bp, info.TotalByteCount, mode) !=
8372 info.TotalByteCount) {
8373 EL(ha, "failed, get_buffer_data\n");
8374 ql_free_dma_resource(ha, dma_mem);
8375 kmem_free(dma_mem, sizeof (dma_mem_t));
8376 kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8377 cmd->Status = EXT_STATUS_COPY_ERR;
8378 cmd->ResponseLen = 0;
8379 return;
8380 }
8381 (void) ddi_dma_sync(dma_mem->dma_handle, 0,
8382 dma_mem->size, DDI_DMA_SYNC_FORDEV);
8383 }
8384 pkt->mdata.dseg_count = LE_16(1);
8385 pkt->mdata.dseg_0_address[0] = (uint32_t)
8386 LE_32(LSD(dma_mem->cookie.dmac_laddress));
8387 pkt->mdata.dseg_0_address[1] = (uint32_t)
8388 LE_32(MSD(dma_mem->cookie.dmac_laddress));
8389 pkt->mdata.dseg_0_length = LE_32(info.TotalByteCount);
8390 } else if (info.Operation & MENLO_OP_CHANGE_CONFIG) {
8391 pkt->mdata.parameter_1 =
8392 LE_32(info.Parameters.ap.MenloConfig.ConfigParamID);
8393 pkt->mdata.parameter_2 =
8394 LE_32(info.Parameters.ap.MenloConfig.ConfigParamData0);
8395 pkt->mdata.parameter_3 =
8396 LE_32(info.Parameters.ap.MenloConfig.ConfigParamData1);
8397 } else if (info.Operation & MENLO_OP_GET_INFO) {
8398 pkt->mdata.parameter_1 =
8399 LE_32(info.Parameters.ap.MenloInfo.InfoDataType);
8400 pkt->mdata.parameter_2 =
8401 LE_32(info.Parameters.ap.MenloInfo.InfoContext);
8402 }
8403
8404 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t));
8405 LITTLE_ENDIAN_16(&pkt->mdata.options_status);
8406 LITTLE_ENDIAN_16(&pkt->mdata.failure_code);
8407
8408 if (rval != QL_SUCCESS || (pkt->mdata.entry_status & 0x3c) != 0 ||
8409 pkt->mdata.options_status != CS_COMPLETE) {
8413 pkt->mdata.failure_code);
8414 cmd->Status = EXT_STATUS_ERR;
8415 cmd->DetailStatus = rval != QL_SUCCESS ? rval :
8416 QL_FUNCTION_FAILED;
8417 cmd->ResponseLen = 0;
8418 } else if (info.Operation == MENLO_OP_READ_MEM) {
8419 (void) ddi_dma_sync(dma_mem->dma_handle, 0, dma_mem->size,
8420 DDI_DMA_SYNC_FORKERNEL);
8421 if (ql_send_buffer_data((caddr_t)(uintptr_t)info.pDataBytes,
8422 dma_mem->bp, info.TotalByteCount, mode) !=
8423 info.TotalByteCount) {
8424 cmd->Status = EXT_STATUS_COPY_ERR;
8425 cmd->ResponseLen = 0;
8426 }
8427 }
8428
8429 ql_free_dma_resource(ha, dma_mem);
8430 kmem_free(dma_mem, sizeof (dma_mem_t));
8431 kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8432
8433 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8434 }
8435
8436 /*
8437 * ql_suspend_hba
8438 * Suspends all adapter ports.
8439 *
8440 * Input:
8441 * ha: adapter state pointer.
8442 * options: BIT_0 --> leave driver stalled on exit if
8443 * failed.
8444 *
8445 * Returns:
8446 * ql local function return status code.
8447 *
8448 * Context:
8449 * Kernel context.
8450 */
8451 static int
8452 ql_suspend_hba(ql_adapter_state_t *ha, uint32_t opt)
8453 {
8508 * ha: adapter state pointer.
8509 * cmd: Local EXT_IOCTL cmd struct pointer.
8510 * mode: flags.
8511 *
8512 * Returns:
8513 * None, request status indicated in cmd->Status.
8514 *
8515 * Context:
8516 * Kernel context.
8517 *
8518 */
8519 static void
8520 ql_get_vp_cnt_id(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8521 {
8522 ql_adapter_state_t *vha;
8523 PEXT_VPORT_ID_CNT ptmp_vp;
8524 int id = 0;
8525 int rval;
8526 char name[MAXPATHLEN];
8527
8528 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8529
8530 /*
8531 * To be backward compatible with older API
8532 * check for the size of old EXT_VPORT_ID_CNT
8533 */
8534 if (cmd->ResponseLen < sizeof (EXT_VPORT_ID_CNT) &&
8535 (cmd->ResponseLen != EXT_OLD_VPORT_ID_CNT_SIZE)) {
8536 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8537 cmd->DetailStatus = sizeof (EXT_VPORT_ID_CNT);
8538 EL(ha, "failed, ResponseLen < EXT_VPORT_ID_CNT, Len=%xh\n",
8539 cmd->ResponseLen);
8540 cmd->ResponseLen = 0;
8541 return;
8542 }
8543
8544 ptmp_vp = (EXT_VPORT_ID_CNT *)
8545 kmem_zalloc(sizeof (EXT_VPORT_ID_CNT), KM_SLEEP);
8546 if (ptmp_vp == NULL) {
8547 EL(ha, "failed, kmem_zalloc\n");
8548 cmd->ResponseLen = 0;
8549 return;
8550 }
8551 vha = ha->vp_next;
8552 while (vha != NULL) {
8553 ptmp_vp->VpCnt++;
8554 ptmp_vp->VpId[id] = vha->vp_index;
8555 (void) ddi_pathname(vha->dip, name);
8556 (void) strcpy((char *)ptmp_vp->vp_path[id], name);
8557 ptmp_vp->VpDrvInst[id] = (int32_t)vha->instance;
8558 id++;
8559 vha = vha->vp_next;
8560 }
8561 rval = ddi_copyout((void *)ptmp_vp,
8562 (void *)(uintptr_t)(cmd->ResponseAdr),
8563 cmd->ResponseLen, mode);
8564 if (rval != 0) {
8565 cmd->Status = EXT_STATUS_COPY_ERR;
8566 cmd->ResponseLen = 0;
8567 EL(ha, "failed, ddi_copyout\n");
8568 } else {
8569 cmd->ResponseLen = sizeof (EXT_VPORT_ID_CNT);
8570 QL_PRINT_9(CE_CONT, "(%d): done, vport_cnt=%d\n",
8571 ha->instance, ptmp_vp->VpCnt);
8572 }
8573
8574 }
8575
8576 /*
8577 * ql_vp_ioctl
8578 * Performs all EXT_CC_VPORT_CMD functions.
8579 *
8580 * Input:
8581 * ha: adapter state pointer.
8582 * cmd: Local EXT_IOCTL cmd struct pointer.
8583 * mode: flags.
8584 *
8585 * Returns:
8586 * None, request status indicated in cmd->Status.
8587 *
8588 * Context:
8589 * Kernel context.
8590 */
8591 static void
8592 ql_vp_ioctl(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8593 {
8594 QL_PRINT_9(CE_CONT, "(%d): started, cmd=%d\n", ha->instance,
8595 cmd->SubCode);
8596
8597 /* case off on command subcode */
8598 switch (cmd->SubCode) {
8599 case EXT_VF_SC_VPORT_GETINFO:
8600 ql_qry_vport(ha, cmd, mode);
8601 break;
8602 default:
8603 /* function not supported. */
8604 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
8605 EL(ha, "failed, Unsupported Subcode=%xh\n",
8606 cmd->SubCode);
8607 break;
8608 }
8609
8610 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8611 }
8612
8613 /*
8614 * ql_qry_vport
8615 * Performs EXT_VF_SC_VPORT_GETINFO subfunction.
8616 *
8617 * Input:
8618 * ha: adapter state pointer.
8619 * cmd: EXT_IOCTL cmd struct pointer.
8620 * mode: flags.
8621 *
8622 * Returns:
8623 * None, request status indicated in cmd->Status.
8624 *
8625 * Context:
8626 * Kernel context.
8627 */
8628 static void
8629 ql_qry_vport(ql_adapter_state_t *vha, EXT_IOCTL *cmd, int mode)
8630 {
8631 ql_adapter_state_t *tmp_vha;
8632 EXT_VPORT_INFO tmp_vport = {0};
8633 int max_vport;
8634
8635 QL_PRINT_9(CE_CONT, "(%d): started\n", vha->instance);
8636
8637 if (cmd->ResponseLen < sizeof (EXT_VPORT_INFO)) {
8638 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8639 cmd->DetailStatus = sizeof (EXT_VPORT_INFO);
8640 EL(vha, "failed, ResponseLen < EXT_VPORT_INFO, Len=%xh\n",
8641 cmd->ResponseLen);
8642 cmd->ResponseLen = 0;
8643 return;
8644 }
8645
8646 /* Fill in the vport information. */
8647 bcopy(vha->loginparams.node_ww_name.raw_wwn, tmp_vport.wwnn,
8648 EXT_DEF_WWN_NAME_SIZE);
8649 bcopy(vha->loginparams.nport_ww_name.raw_wwn, tmp_vport.wwpn,
8650 EXT_DEF_WWN_NAME_SIZE);
8651 tmp_vport.state = vha->state;
8652 tmp_vport.id = vha->vp_index;
8653
8654 tmp_vha = vha->pha->vp_next;
8655 while (tmp_vha != NULL) {
8656 tmp_vport.used++;
8657 tmp_vha = tmp_vha->vp_next;
8658 }
8659
8660 max_vport = (CFG_IST(vha, CFG_CTRL_2422) ? MAX_24_VIRTUAL_PORTS :
8661 MAX_25_VIRTUAL_PORTS);
8662 if (max_vport > tmp_vport.used) {
8663 tmp_vport.free = max_vport - tmp_vport.used;
8664 }
8665
8666 if (ddi_copyout((void *)&tmp_vport,
8667 (void *)(uintptr_t)(cmd->ResponseAdr),
8668 sizeof (EXT_VPORT_INFO), mode) != 0) {
8669 cmd->Status = EXT_STATUS_COPY_ERR;
8670 cmd->ResponseLen = 0;
8671 EL(vha, "failed, ddi_copyout\n");
8672 } else {
8673 cmd->ResponseLen = sizeof (EXT_VPORT_INFO);
8674 QL_PRINT_9(CE_CONT, "(%d): done\n", vha->instance);
8675 }
8676 }
8677
8678 /*
8679 * ql_access_flash
8680 * Performs all EXT_CC_ACCESS_FLASH_OS functions.
8681 *
8682 * Input:
8683 * pi: port info pointer.
8684 * cmd: Local EXT_IOCTL cmd struct pointer.
8685 * mode: flags.
8686 *
8687 * Returns:
8688 * None, request status indicated in cmd->Status.
8689 *
8690 * Context:
8691 * Kernel context.
8692 */
8693 static void
8694 ql_access_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8695 {
8696 int rval;
8697
8698 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8699
8700 switch (cmd->SubCode) {
8701 case EXT_SC_FLASH_READ:
8702 if ((rval = ql_flash_fcode_dump(ha,
8703 (void *)(uintptr_t)(cmd->ResponseAdr),
8704 (size_t)(cmd->ResponseLen), cmd->Reserved1, mode)) != 0) {
8705 cmd->Status = EXT_STATUS_COPY_ERR;
8706 cmd->ResponseLen = 0;
8707 EL(ha, "flash_fcode_dump status=%xh\n", rval);
8708 }
8709 break;
8710 case EXT_SC_FLASH_WRITE:
8711 if ((rval = ql_r_m_w_flash(ha,
8712 (void *)(uintptr_t)(cmd->RequestAdr),
8713 (size_t)(cmd->RequestLen), cmd->Reserved1, mode)) !=
8714 QL_SUCCESS) {
8715 cmd->Status = EXT_STATUS_COPY_ERR;
8716 cmd->ResponseLen = 0;
8717 EL(ha, "r_m_w_flash status=%xh\n", rval);
8718 } else {
8719 /* Reset caches on all adapter instances. */
8720 ql_update_flash_caches(ha);
8721 }
8722 break;
8723 default:
8724 EL(ha, "unknown subcode=%xh\n", cmd->SubCode);
8725 cmd->Status = EXT_STATUS_ERR;
8726 cmd->ResponseLen = 0;
8727 break;
8728 }
8729
8730 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8731 }
8732
8733 /*
8734 * ql_reset_cmd
8735 * Performs all EXT_CC_RESET_FW_OS functions.
8736 *
8737 * Input:
8738 * ha: adapter state pointer.
8739 * cmd: Local EXT_IOCTL cmd struct pointer.
8740 *
8741 * Returns:
8742 * None, request status indicated in cmd->Status.
8743 *
8744 * Context:
8745 * Kernel context.
8746 */
8747 static void
8748 ql_reset_cmd(ql_adapter_state_t *ha, EXT_IOCTL *cmd)
8749 {
8750 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8751
8752 switch (cmd->SubCode) {
8753 case EXT_SC_RESET_FC_FW:
8754 EL(ha, "isp_abort_needed\n");
8755 ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, 0);
8756 break;
8757 case EXT_SC_RESET_MPI_FW:
8758 if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
8759 EL(ha, "invalid request for HBA\n");
8760 cmd->Status = EXT_STATUS_INVALID_REQUEST;
8761 cmd->ResponseLen = 0;
8762 } else {
8763 /* Wait for I/O to stop and daemon to stall. */
8764 if (ql_suspend_hba(ha, 0) != QL_SUCCESS) {
8765 EL(ha, "ql_suspend_hba failed\n");
8766 cmd->Status = EXT_STATUS_BUSY;
8767 cmd->ResponseLen = 0;
8768 } else if (ql_restart_mpi(ha) != QL_SUCCESS) {
8769 cmd->Status = EXT_STATUS_ERR;
8770 cmd->ResponseLen = 0;
8771 } else {
8772 uint8_t timer;
8773 /*
8774 * While the restart_mpi mailbox cmd may be
8775 * done the MPI is not. Wait at least 6 sec. or
8776 * exit if the loop comes up.
8777 */
8778 for (timer = 6; timer; timer--) {
8779 if (!(ha->task_daemon_flags &
8780 LOOP_DOWN)) {
8781 break;
8782 }
8783 /* Delay for 1 second. */
8784 ql_delay(ha, 1000000);
8785 }
8786 }
8787 ql_restart_hba(ha);
8788 }
8789 break;
8790 default:
8791 EL(ha, "unknown subcode=%xh\n", cmd->SubCode);
8792 cmd->Status = EXT_STATUS_ERR;
8793 cmd->ResponseLen = 0;
8794 break;
8795 }
8796
8797 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8798 }
8799
8800 /*
8801 * ql_get_dcbx_parameters
8802 * Get DCBX parameters.
8803 *
8804 * Input:
8805 * ha: adapter state pointer.
8806 * cmd: User space CT arguments pointer.
8807 * mode: flags.
8808 */
8809 static void
8810 ql_get_dcbx_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8811 {
8812 uint8_t *tmp_buf;
8813 int rval;
8814
8815 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8816
8817 if (!(CFG_IST(ha, CFG_CTRL_8081))) {
8818 EL(ha, "invalid request for HBA\n");
8819 cmd->Status = EXT_STATUS_INVALID_REQUEST;
8820 cmd->ResponseLen = 0;
8821 return;
8822 }
8823
8824 /* Allocate memory for command. */
8825 tmp_buf = kmem_zalloc(EXT_DEF_DCBX_PARAM_BUF_SIZE, KM_SLEEP);
8826 if (tmp_buf == NULL) {
8827 EL(ha, "failed, kmem_zalloc\n");
8828 cmd->Status = EXT_STATUS_NO_MEMORY;
8829 cmd->ResponseLen = 0;
8830 return;
8831 }
8832 /* Send command */
8833 rval = ql_get_dcbx_params(ha, EXT_DEF_DCBX_PARAM_BUF_SIZE,
8834 (caddr_t)tmp_buf);
8835 if (rval != QL_SUCCESS) {
8836 /* error */
8837 EL(ha, "failed, get_dcbx_params_mbx=%xh\n", rval);
8838 kmem_free(tmp_buf, EXT_DEF_DCBX_PARAM_BUF_SIZE);
8839 cmd->Status = EXT_STATUS_ERR;
8840 cmd->ResponseLen = 0;
8841 return;
8842 }
8843
8844 /* Copy the response */
8845 if (ql_send_buffer_data((caddr_t)tmp_buf,
8846 (caddr_t)(uintptr_t)cmd->ResponseAdr,
8847 EXT_DEF_DCBX_PARAM_BUF_SIZE, mode) != EXT_DEF_DCBX_PARAM_BUF_SIZE) {
8848 EL(ha, "failed, ddi_copyout\n");
8849 cmd->Status = EXT_STATUS_COPY_ERR;
8850 cmd->ResponseLen = 0;
8851 } else {
8852 cmd->ResponseLen = EXT_DEF_DCBX_PARAM_BUF_SIZE;
8853 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8854 }
8855 kmem_free(tmp_buf, EXT_DEF_DCBX_PARAM_BUF_SIZE);
8856
8857 }
8858
8859 /*
8860 * ql_qry_cna_port
8861 * Performs EXT_SC_QUERY_CNA_PORT subfunction.
8862 *
8863 * Input:
8864 * ha: adapter state pointer.
8865 * cmd: EXT_IOCTL cmd struct pointer.
8866 * mode: flags.
8867 *
8868 * Returns:
8869 * None, request status indicated in cmd->Status.
8870 *
8871 * Context:
8872 * Kernel context.
8873 */
8874 static void
8875 ql_qry_cna_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8876 {
8877 EXT_CNA_PORT cna_port = {0};
8878
8879 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8880
8881 if (!(CFG_IST(ha, CFG_CTRL_8081))) {
8882 EL(ha, "invalid request for HBA\n");
8883 cmd->Status = EXT_STATUS_INVALID_REQUEST;
8884 cmd->ResponseLen = 0;
8885 return;
8886 }
8887
8888 if (cmd->ResponseLen < sizeof (EXT_CNA_PORT)) {
8889 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8890 cmd->DetailStatus = sizeof (EXT_CNA_PORT);
8891 EL(ha, "failed, ResponseLen < EXT_CNA_PORT, Len=%xh\n",
8892 cmd->ResponseLen);
8893 cmd->ResponseLen = 0;
8894 return;
8895 }
8896
8897 cna_port.VLanId = ha->fcoe_vlan_id;
8898 cna_port.FabricParam = ha->fabric_params;
8899 bcopy(ha->fcoe_vnport_mac, cna_port.VNPortMACAddress,
8900 EXT_DEF_MAC_ADDRESS_SIZE);
8901
8902 if (ddi_copyout((void *)&cna_port,
8903 (void *)(uintptr_t)(cmd->ResponseAdr),
8904 sizeof (EXT_CNA_PORT), mode) != 0) {
8905 cmd->Status = EXT_STATUS_COPY_ERR;
8906 cmd->ResponseLen = 0;
8907 EL(ha, "failed, ddi_copyout\n");
8908 } else {
8909 cmd->ResponseLen = sizeof (EXT_CNA_PORT);
8910 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
8911 }
8912 }
8913
8914 /*
8915 * ql_qry_adapter_versions
8916 * Performs EXT_SC_QUERY_ADAPTER_VERSIONS subfunction.
8917 *
8918 * Input:
8919 * ha: adapter state pointer.
8920 * cmd: EXT_IOCTL cmd struct pointer.
8921 * mode: flags.
8922 *
8923 * Returns:
8924 * None, request status indicated in cmd->Status.
8925 *
8926 * Context:
8927 * Kernel context.
8928 */
8929 static void
8930 ql_qry_adapter_versions(ql_adapter_state_t *ha, EXT_IOCTL *cmd,
8931 int mode)
8932 {
8933 uint8_t is_8142, mpi_cap;
8934 uint32_t ver_len, transfer_size;
8935 PEXT_ADAPTERREGIONVERSION padapter_ver = NULL;
8936
8937 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
8938
8939 /* 8142s do not have a EDC PHY firmware. */
8940 mpi_cap = (uint8_t)(ha->mpi_capability_list >> 8);
8941
8942 is_8142 = 0;
8943 /* Sizeof (Length + Reserved) = 8 Bytes */
8944 if (mpi_cap == 0x02 || mpi_cap == 0x04) {
8945 ver_len = (sizeof (EXT_REGIONVERSION) * (NO_OF_VERSIONS - 1))
8946 + 8;
8947 is_8142 = 1;
8948 } else {
8949 ver_len = (sizeof (EXT_REGIONVERSION) * NO_OF_VERSIONS) + 8;
8950 }
8951
8952 /* Allocate local memory for EXT_ADAPTERREGIONVERSION */
8953 padapter_ver = (EXT_ADAPTERREGIONVERSION *)kmem_zalloc(ver_len,
8954 KM_SLEEP);
8955
8956 if (padapter_ver == NULL) {
8957 EL(ha, "failed, kmem_zalloc\n");
8991 EL(ha, "failed, ResponseLen < ver_len, ",
8992 "RespLen=%xh ver_len=%xh\n", cmd->ResponseLen, ver_len);
8993 /* Calculate the No. of valid versions being returned. */
8994 padapter_ver->Length = (uint32_t)
8995 ((cmd->ResponseLen - 8) / sizeof (EXT_REGIONVERSION));
8996 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8997 cmd->DetailStatus = ver_len;
8998 transfer_size = cmd->ResponseLen;
8999 } else {
9000 transfer_size = ver_len;
9001 }
9002
9003 if (ddi_copyout((void *)padapter_ver,
9004 (void *)(uintptr_t)(cmd->ResponseAdr),
9005 transfer_size, mode) != 0) {
9006 cmd->Status = EXT_STATUS_COPY_ERR;
9007 cmd->ResponseLen = 0;
9008 EL(ha, "failed, ddi_copyout\n");
9009 } else {
9010 cmd->ResponseLen = ver_len;
9011 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
9012 }
9013
9014 kmem_free(padapter_ver, ver_len);
9015 }
9016
9017 /*
9018 * ql_get_xgmac_statistics
9019 * Get XgMac information
9020 *
9021 * Input:
9022 * ha: adapter state pointer.
9023 * cmd: EXT_IOCTL cmd struct pointer.
9024 * mode: flags.
9025 *
9026 * Returns:
9027 * None, request status indicated in cmd->Status.
9028 *
9029 * Context:
9030 * Kernel context.
9031 */
9032 static void
9033 ql_get_xgmac_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
9034 {
9035 int rval;
9036 uint32_t size;
9037 int8_t *tmp_buf;
9038 EXT_MENLO_MANAGE_INFO info;
9039
9040 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
9041
9042 /* Verify the size of request structure. */
9043 if (cmd->RequestLen < sizeof (EXT_MENLO_MANAGE_INFO)) {
9044 /* Return error */
9045 EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
9046 sizeof (EXT_MENLO_MANAGE_INFO));
9047 cmd->Status = EXT_STATUS_INVALID_PARAM;
9048 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
9049 cmd->ResponseLen = 0;
9050 return;
9051 }
9052
9053 /* Get manage info request. */
9054 if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr,
9055 (caddr_t)&info, sizeof (EXT_MENLO_MANAGE_INFO), mode) != 0) {
9056 EL(ha, "failed, ddi_copyin\n");
9057 cmd->Status = EXT_STATUS_COPY_ERR;
9058 cmd->ResponseLen = 0;
9059 return;
9060 }
9087 }
9088
9089 rval = ql_get_xgmac_stats(ha, size, (caddr_t)tmp_buf);
9090
9091 if (rval != QL_SUCCESS) {
9092 /* error */
9093 EL(ha, "failed, get_xgmac_stats =%xh\n", rval);
9094 kmem_free(tmp_buf, size);
9095 cmd->Status = EXT_STATUS_ERR;
9096 cmd->ResponseLen = 0;
9097 return;
9098 }
9099
9100 if (ql_send_buffer_data(tmp_buf, (caddr_t)(uintptr_t)info.pDataBytes,
9101 size, mode) != size) {
9102 EL(ha, "failed, ddi_copyout\n");
9103 cmd->Status = EXT_STATUS_COPY_ERR;
9104 cmd->ResponseLen = 0;
9105 } else {
9106 cmd->ResponseLen = info.TotalByteCount;
9107 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
9108 }
9109 kmem_free(tmp_buf, size);
9110 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
9111 }
9112
9113 /*
9114 * ql_get_fcf_list
9115 * Get FCF list.
9116 *
9117 * Input:
9118 * ha: adapter state pointer.
9119 * cmd: User space CT arguments pointer.
9120 * mode: flags.
9121 */
9122 static void
9123 ql_get_fcf_list(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
9124 {
9125 uint8_t *tmp_buf;
9126 int rval;
9127 EXT_FCF_LIST fcf_list = {0};
9128 ql_fcf_list_desc_t mb_fcf_list = {0};
9129
9130 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
9131
9132 if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
9133 EL(ha, "invalid request for HBA\n");
9134 cmd->Status = EXT_STATUS_INVALID_REQUEST;
9135 cmd->ResponseLen = 0;
9136 return;
9137 }
9138 /* Get manage info request. */
9139 if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr,
9140 (caddr_t)&fcf_list, sizeof (EXT_FCF_LIST), mode) != 0) {
9141 EL(ha, "failed, ddi_copyin\n");
9142 cmd->Status = EXT_STATUS_COPY_ERR;
9143 cmd->ResponseLen = 0;
9144 return;
9145 }
9146
9147 if (!(fcf_list.BufSize)) {
9148 /* Return error */
9149 EL(ha, "failed, fcf_list BufSize is=%xh\n",
9150 fcf_list.BufSize);
9151 cmd->Status = EXT_STATUS_INVALID_PARAM;
9152 cmd->ResponseLen = 0;
9172 /* Send command */
9173 rval = ql_get_fcf_list_mbx(ha, &mb_fcf_list, (caddr_t)tmp_buf);
9174 if (rval != QL_SUCCESS) {
9175 /* error */
9176 EL(ha, "failed, get_fcf_list_mbx=%xh\n", rval);
9177 kmem_free(tmp_buf, fcf_list.BufSize);
9178 cmd->Status = EXT_STATUS_ERR;
9179 cmd->ResponseLen = 0;
9180 return;
9181 }
9182
9183 /* Copy the response */
9184 if (ql_send_buffer_data((caddr_t)tmp_buf,
9185 (caddr_t)(uintptr_t)cmd->ResponseAdr,
9186 fcf_list.BufSize, mode) != fcf_list.BufSize) {
9187 EL(ha, "failed, ddi_copyout\n");
9188 cmd->Status = EXT_STATUS_COPY_ERR;
9189 cmd->ResponseLen = 0;
9190 } else {
9191 cmd->ResponseLen = mb_fcf_list.buffer_size;
9192 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
9193 }
9194
9195 kmem_free(tmp_buf, fcf_list.BufSize);
9196 }
9197
9198 /*
9199 * ql_get_resource_counts
9200 * Get Resource counts:
9201 *
9202 * Input:
9203 * ha: adapter state pointer.
9204 * cmd: User space CT arguments pointer.
9205 * mode: flags.
9206 */
9207 static void
9208 ql_get_resource_counts(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
9209 {
9210 int rval;
9211 ql_mbx_data_t mr;
9212 EXT_RESOURCE_CNTS tmp_rc_cnt = {0};
9213
9214 QL_PRINT_9(CE_CONT, "(%d): started\n", ha->instance);
9215
9216 if (!(CFG_IST(ha, CFG_CTRL_242581))) {
9217 EL(ha, "invalid request for HBA\n");
9218 cmd->Status = EXT_STATUS_INVALID_REQUEST;
9219 cmd->ResponseLen = 0;
9220 return;
9221 }
9222
9223 if (cmd->ResponseLen < sizeof (EXT_RESOURCE_CNTS)) {
9224 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
9225 cmd->DetailStatus = sizeof (EXT_RESOURCE_CNTS);
9226 EL(ha, "failed, ResponseLen < EXT_RESOURCE_CNTS, "
9227 "Len=%xh\n", cmd->ResponseLen);
9228 cmd->ResponseLen = 0;
9229 return;
9230 }
9231
9232 rval = ql_get_resource_cnts(ha, &mr);
9233 if (rval != QL_SUCCESS) {
9234 EL(ha, "resource cnt mbx failed\n");
9235 cmd->Status = EXT_STATUS_ERR;
9236 cmd->ResponseLen = 0;
9237 return;
9238 }
9239
9240 tmp_rc_cnt.OrgTgtXchgCtrlCnt = (uint32_t)mr.mb[1];
9241 tmp_rc_cnt.CurTgtXchgCtrlCnt = (uint32_t)mr.mb[2];
9242 tmp_rc_cnt.CurXchgCtrlCnt = (uint32_t)mr.mb[3];
9243 tmp_rc_cnt.OrgXchgCtrlCnt = (uint32_t)mr.mb[6];
9244 tmp_rc_cnt.CurIocbBufCnt = (uint32_t)mr.mb[7];
9245 tmp_rc_cnt.OrgIocbBufCnt = (uint32_t)mr.mb[10];
9246 tmp_rc_cnt.NoOfSupVPs = (uint32_t)mr.mb[11];
9247 tmp_rc_cnt.NoOfSupFCFs = (uint32_t)mr.mb[12];
9248
9249 rval = ddi_copyout((void *)&tmp_rc_cnt,
9250 (void *)(uintptr_t)(cmd->ResponseAdr),
9251 sizeof (EXT_RESOURCE_CNTS), mode);
9252 if (rval != 0) {
9253 cmd->Status = EXT_STATUS_COPY_ERR;
9254 cmd->ResponseLen = 0;
9255 EL(ha, "failed, ddi_copyout\n");
9256 } else {
9257 cmd->ResponseLen = sizeof (EXT_RESOURCE_CNTS);
9258 QL_PRINT_9(CE_CONT, "(%d): done\n", ha->instance);
9259 }
9260 }
|
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /* Copyright 2015 QLogic Corporation */
23
24 /*
25 * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
26 */
27
28 /*
29 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
30 */
31
32 #pragma ident "Copyright 2015 QLogic Corporation; ql_xioctl.c"
33
34 /*
35 * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
36 *
37 * ***********************************************************************
38 * * **
39 * * NOTICE **
40 * * COPYRIGHT (C) 1996-2015 QLOGIC CORPORATION **
41 * * ALL RIGHTS RESERVED **
42 * * **
43 * ***********************************************************************
44 *
45 */
46
47 #include <ql_apps.h>
48 #include <ql_api.h>
49 #include <ql_debug.h>
50 #include <ql_init.h>
51 #include <ql_iocb.h>
52 #include <ql_ioctl.h>
53 #include <ql_mbx.h>
54 #include <ql_nx.h>
55 #include <ql_xioctl.h>
56
57 /*
58 * Local data
59 */
60
61 /*
62 * Local prototypes
63 */
64 static int ql_sdm_ioctl(ql_adapter_state_t *, int, void *, int);
65 static int ql_sdm_setup(ql_adapter_state_t *, EXT_IOCTL **, void *, int,
66 boolean_t (*)(EXT_IOCTL *));
67 static boolean_t ql_validate_signature(EXT_IOCTL *);
68 static int ql_sdm_return(ql_adapter_state_t *, EXT_IOCTL *, void *, int);
69 static void ql_query(ql_adapter_state_t *, EXT_IOCTL *, int);
70 static void ql_qry_hba_node(ql_adapter_state_t *, EXT_IOCTL *, int);
71 static void ql_qry_hba_port(ql_adapter_state_t *, EXT_IOCTL *, int);
72 static void ql_qry_disc_port(ql_adapter_state_t *, EXT_IOCTL *, int);
73 static void ql_qry_disc_tgt(ql_adapter_state_t *, EXT_IOCTL *, int);
74 static void ql_qry_fw(ql_adapter_state_t *, EXT_IOCTL *, int);
80 static void ql_scsi_passthru(ql_adapter_state_t *, EXT_IOCTL *, int);
81 static void ql_wwpn_to_scsiaddr(ql_adapter_state_t *, EXT_IOCTL *, int);
82 static void ql_host_idx(ql_adapter_state_t *, EXT_IOCTL *, int);
83 static void ql_host_drvname(ql_adapter_state_t *, EXT_IOCTL *, int);
84 static void ql_read_nvram(ql_adapter_state_t *, EXT_IOCTL *, int);
85 static void ql_write_nvram(ql_adapter_state_t *, EXT_IOCTL *, int);
86 static void ql_read_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
87 static void ql_write_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
88 static void ql_write_vpd(ql_adapter_state_t *, EXT_IOCTL *, int);
89 static void ql_read_vpd(ql_adapter_state_t *, EXT_IOCTL *, int);
90 static void ql_diagnostic_loopback(ql_adapter_state_t *, EXT_IOCTL *, int);
91 static void ql_send_els_rnid(ql_adapter_state_t *, EXT_IOCTL *, int);
92 static void ql_set_host_data(ql_adapter_state_t *, EXT_IOCTL *, int);
93 static void ql_get_host_data(ql_adapter_state_t *, EXT_IOCTL *, int);
94 static void ql_qry_cna_port(ql_adapter_state_t *, EXT_IOCTL *, int);
95
96 static int ql_lun_count(ql_adapter_state_t *, ql_tgt_t *);
97 static int ql_report_lun(ql_adapter_state_t *, ql_tgt_t *);
98 static int ql_inq_scan(ql_adapter_state_t *, ql_tgt_t *, int);
99 static int ql_inq(ql_adapter_state_t *, ql_tgt_t *, int, ql_mbx_iocb_t *,
100 uint32_t);
101 static uint32_t ql_get_buffer_data(caddr_t, caddr_t, uint32_t, int);
102 static uint32_t ql_send_buffer_data(caddr_t, caddr_t, uint32_t, int);
103 static int ql_24xx_flash_desc(ql_adapter_state_t *);
104 static int ql_setup_flash(ql_adapter_state_t *);
105 static ql_tgt_t *ql_find_port(ql_adapter_state_t *, uint8_t *, uint16_t);
106 static int ql_flash_fcode_load(ql_adapter_state_t *, void *, uint32_t, int);
107 static int ql_flash_fcode_dump(ql_adapter_state_t *, void *, uint32_t,
108 uint32_t, int);
109 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t,
110 uint8_t);
111 static void ql_set_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
112 static void ql_get_rnid_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
113 static int ql_reset_statistics(ql_adapter_state_t *, EXT_IOCTL *);
114 static void ql_get_statistics(ql_adapter_state_t *, EXT_IOCTL *, int);
115 static void ql_get_statistics_fc(ql_adapter_state_t *, EXT_IOCTL *, int);
116 static void ql_get_statistics_fc4(ql_adapter_state_t *, EXT_IOCTL *, int);
117 static void ql_set_led_state(ql_adapter_state_t *, EXT_IOCTL *, int);
118 static void ql_get_led_state(ql_adapter_state_t *, EXT_IOCTL *, int);
119 static void ql_drive_led(ql_adapter_state_t *, uint32_t);
120 static int ql_setup_led(ql_adapter_state_t *);
121 static int ql_wrapup_led(ql_adapter_state_t *);
122 static void ql_get_port_summary(ql_adapter_state_t *, EXT_IOCTL *, int);
123 static void ql_get_target_id(ql_adapter_state_t *, EXT_IOCTL *, int);
124 static void ql_get_sfp(ql_adapter_state_t *, EXT_IOCTL *, int);
125 static int ql_dump_sfp(ql_adapter_state_t *, void *, int);
126 static ql_fcache_t *ql_setup_fnode(ql_adapter_state_t *);
127 static void ql_get_fcache(ql_adapter_state_t *, EXT_IOCTL *, int);
128 static void ql_get_fcache_ex(ql_adapter_state_t *, EXT_IOCTL *, int);
129 void ql_update_fcache(ql_adapter_state_t *, uint8_t *, uint32_t);
130 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *);
131 static void ql_flash_layout_table(ql_adapter_state_t *, uint32_t);
132 static void ql_process_flt(ql_adapter_state_t *, uint32_t);
133 static void ql_flash_nvram_defaults(ql_adapter_state_t *);
134 static void ql_port_param(ql_adapter_state_t *, EXT_IOCTL *, int);
135 static int ql_check_pci(ql_adapter_state_t *, ql_fcache_t *, uint32_t *);
136 static void ql_get_pci_data(ql_adapter_state_t *, EXT_IOCTL *, int);
137 static void ql_get_fwfcetrace(ql_adapter_state_t *, EXT_IOCTL *, int);
138 static void ql_get_fwexttrace(ql_adapter_state_t *, EXT_IOCTL *, int);
139 static void ql_menlo_reset(ql_adapter_state_t *, EXT_IOCTL *, int);
140 static void ql_menlo_get_fw_version(ql_adapter_state_t *, EXT_IOCTL *, int);
141 static void ql_menlo_update_fw(ql_adapter_state_t *, EXT_IOCTL *, int);
142 static void ql_menlo_manage_info(ql_adapter_state_t *, EXT_IOCTL *, int);
143 static int ql_suspend_hba(ql_adapter_state_t *, uint32_t);
144 static void ql_restart_hba(ql_adapter_state_t *);
145 static void ql_get_vp_cnt_id(ql_adapter_state_t *, EXT_IOCTL *, int);
146 static void ql_vp_ioctl(ql_adapter_state_t *, EXT_IOCTL *, int);
147 static void ql_qry_vport(ql_adapter_state_t *, EXT_IOCTL *, int);
148 static void ql_access_flash(ql_adapter_state_t *, EXT_IOCTL *, int);
149 static void ql_reset_cmd(ql_adapter_state_t *, EXT_IOCTL *);
150 static void ql_update_flash_caches(ql_adapter_state_t *);
151 static void ql_get_dcbx_parameters(ql_adapter_state_t *, EXT_IOCTL *, int);
152 static void ql_get_xgmac_statistics(ql_adapter_state_t *, EXT_IOCTL *, int);
153 static void ql_get_fcf_list(ql_adapter_state_t *, EXT_IOCTL *, int);
154 static void ql_get_resource_counts(ql_adapter_state_t *, EXT_IOCTL *, int);
155 static void ql_qry_adapter_versions(ql_adapter_state_t *, EXT_IOCTL *, int);
156 static void ql_get_temperature(ql_adapter_state_t *, EXT_IOCTL *, int);
157 static void ql_dump_cmd(ql_adapter_state_t *, EXT_IOCTL *, int);
158 static void ql_serdes_reg(ql_adapter_state_t *, EXT_IOCTL *, int);
159 static void ql_serdes_reg_ex(ql_adapter_state_t *, EXT_IOCTL *, int);
160 static void ql_els_passthru(ql_adapter_state_t *, EXT_IOCTL *, int);
161 static void ql_flash_update_caps(ql_adapter_state_t *, EXT_IOCTL *, int);
162 static void ql_get_bbcr_data(ql_adapter_state_t *, EXT_IOCTL *, int);
163 static void ql_get_priv_stats(ql_adapter_state_t *, EXT_IOCTL *, int);
164
165 /* ******************************************************************** */
166 /* External IOCTL support. */
167 /* ******************************************************************** */
168
169 /*
170 * ql_alloc_xioctl_resource
171 * Allocates resources needed by module code.
172 *
173 * Input:
174 * ha: adapter state pointer.
175 *
176 * Returns:
177 * SYS_ERRNO
178 *
179 * Context:
180 * Kernel context.
181 */
182 int
183 ql_alloc_xioctl_resource(ql_adapter_state_t *ha)
184 {
185 ql_xioctl_t *xp;
186
187 QL_PRINT_9(ha, "started\n");
188
189 if (ha->xioctl != NULL) {
190 QL_PRINT_9(ha, "already allocated done\n",
191 ha->instance);
192 return (0);
193 }
194
195 xp = kmem_zalloc(sizeof (ql_xioctl_t), KM_SLEEP);
196 if (xp == NULL) {
197 EL(ha, "failed, kmem_zalloc\n");
198 return (ENOMEM);
199 }
200 ha->xioctl = xp;
201
202 /* Allocate AEN tracking buffer */
203 xp->aen_tracking_queue = kmem_zalloc(EXT_DEF_MAX_AEN_QUEUE *
204 sizeof (EXT_ASYNC_EVENT), KM_SLEEP);
205 if (xp->aen_tracking_queue == NULL) {
206 EL(ha, "failed, kmem_zalloc-2\n");
207 ql_free_xioctl_resource(ha);
208 return (ENOMEM);
209 }
210
211 QL_PRINT_9(ha, "done\n");
212
213 return (0);
214 }
215
216 /*
217 * ql_free_xioctl_resource
218 * Frees resources used by module code.
219 *
220 * Input:
221 * ha: adapter state pointer.
222 *
223 * Context:
224 * Kernel context.
225 */
226 void
227 ql_free_xioctl_resource(ql_adapter_state_t *ha)
228 {
229 ql_xioctl_t *xp = ha->xioctl;
230
231 QL_PRINT_9(ha, "started\n");
232
233 if (xp == NULL) {
234 QL_PRINT_9(ha, "already freed\n");
235 return;
236 }
237
238 if (xp->aen_tracking_queue != NULL) {
239 kmem_free(xp->aen_tracking_queue, EXT_DEF_MAX_AEN_QUEUE *
240 sizeof (EXT_ASYNC_EVENT));
241 xp->aen_tracking_queue = NULL;
242 }
243
244 kmem_free(xp, sizeof (ql_xioctl_t));
245 ha->xioctl = NULL;
246
247 QL_PRINT_9(ha, "done\n");
248 }
249
250 /*
251 * ql_xioctl
252 * External IOCTL processing.
253 *
254 * Input:
255 * ha: adapter state pointer.
256 * cmd: function to perform
257 * arg: data type varies with request
258 * mode: flags
259 * cred_p: credentials pointer
260 * rval_p: pointer to result value
261 *
262 * Returns:
263 * 0: success
264 * ENXIO: No such device or address
265 * ENOPROTOOPT: Protocol not available
266 *
267 * Context:
268 * Kernel context.
269 */
270 /* ARGSUSED */
271 int
272 ql_xioctl(ql_adapter_state_t *ha, int cmd, intptr_t arg, int mode,
273 cred_t *cred_p, int *rval_p)
274 {
275 int rval;
276
277 QL_PRINT_9(ha, "started, cmd=%d\n", cmd);
278
279 if (ha->xioctl == NULL) {
280 QL_PRINT_9(ha, "no context\n");
281 return (ENXIO);
282 }
283
284 switch (cmd) {
285 case EXT_CC_QUERY:
286 case EXT_CC_SEND_FCCT_PASSTHRU:
287 case EXT_CC_REG_AEN:
288 case EXT_CC_GET_AEN:
289 case EXT_CC_SEND_SCSI_PASSTHRU:
290 case EXT_CC_WWPN_TO_SCSIADDR:
291 case EXT_CC_SEND_ELS_RNID:
292 case EXT_CC_SET_DATA:
293 case EXT_CC_GET_DATA:
294 case EXT_CC_HOST_IDX:
295 case EXT_CC_READ_NVRAM:
296 case EXT_CC_UPDATE_NVRAM:
297 case EXT_CC_READ_OPTION_ROM:
298 case EXT_CC_READ_OPTION_ROM_EX:
299 case EXT_CC_UPDATE_OPTION_ROM:
300 case EXT_CC_UPDATE_OPTION_ROM_EX:
301 case EXT_CC_GET_VPD:
302 case EXT_CC_SET_VPD:
303 case EXT_CC_LOOPBACK:
304 case EXT_CC_GET_FCACHE:
305 case EXT_CC_GET_FCACHE_EX:
306 case EXT_CC_HOST_DRVNAME:
307 case EXT_CC_GET_SFP_DATA:
308 case EXT_CC_PORT_PARAM:
309 case EXT_CC_GET_PCI_DATA:
310 case EXT_CC_GET_FWEXTTRACE:
311 case EXT_CC_GET_FWFCETRACE:
312 case EXT_CC_GET_VP_CNT_ID:
313 case EXT_CC_VPORT_CMD:
314 case EXT_CC_ACCESS_FLASH:
315 case EXT_CC_RESET_FW:
316 case EXT_CC_MENLO_MANAGE_INFO:
317 case EXT_CC_I2C_DATA:
318 case EXT_CC_DUMP:
319 case EXT_CC_SERDES_REG_OP:
320 case EXT_CC_VF_STATE:
321 case EXT_CC_SERDES_REG_OP_EX:
322 case EXT_CC_ELS_PASSTHRU_OS:
323 case EXT_CC_FLASH_UPDATE_CAPS_OS:
324 case EXT_CC_GET_BBCR_DATA_OS:
325 rval = ql_sdm_ioctl(ha, cmd, (void *)arg, mode);
326 break;
327 default:
328 /* function not supported. */
329 EL(ha, "function=%d not supported\n", cmd);
330 rval = ENOPROTOOPT;
331 }
332
333 QL_PRINT_9(ha, "done\n");
334
335 return (rval);
336 }
337
338 /*
339 * ql_sdm_ioctl
340 * Provides ioctl functions for SAN/Device Management functions
341 * AKA External Ioctl functions.
342 *
343 * Input:
344 * ha: adapter state pointer.
345 * ioctl_code: ioctl function to perform
346 * arg: Pointer to EXT_IOCTL cmd data in application land.
347 * mode: flags
348 *
349 * Returns:
350 * 0: success
351 * ENOMEM: Alloc of local EXT_IOCTL struct failed.
352 * EFAULT: Copyin of caller's EXT_IOCTL struct failed or
353 * copyout of EXT_IOCTL status info failed.
354 * EINVAL: Signature or version of caller's EXT_IOCTL invalid.
355 * EBUSY: Device busy
356 *
357 * Context:
358 * Kernel context.
359 */
360 static int
361 ql_sdm_ioctl(ql_adapter_state_t *ha, int ioctl_code, void *arg, int mode)
362 {
363 EXT_IOCTL *cmd;
364 int rval;
365 ql_adapter_state_t *vha;
366
367 QL_PRINT_9(ha, "started\n");
368
369 /* Copy argument structure (EXT_IOCTL) from application land. */
370 if ((rval = ql_sdm_setup(ha, &cmd, arg, mode,
371 ql_validate_signature)) != 0) {
372 /*
373 * a non-zero value at this time means a problem getting
374 * the requested information from application land, just
375 * return the error code and hope for the best.
376 */
377 EL(ha, "failed, sdm_setup\n");
378 return (rval);
379 }
380
381 /*
382 * Map the physical ha ptr (which the ioctl is called with)
383 * to the virtual ha that the caller is addressing.
384 */
385 if (ha->flags & VP_ENABLED) {
386 /* Check that it is within range. */
387 if (cmd->HbaSelect > ha->max_vports) {
388 EL(ha, "Invalid HbaSelect vp index: %xh\n",
389 cmd->HbaSelect);
390 cmd->Status = EXT_STATUS_INVALID_VPINDEX;
391 cmd->ResponseLen = 0;
392 return (EFAULT);
393 }
394 /*
395 * Special case: HbaSelect == 0 is physical ha
396 */
397 if (cmd->HbaSelect != 0) {
398 vha = ha->vp_next;
399 while (vha != NULL) {
400 if (vha->vp_index == cmd->HbaSelect) {
401 ha = vha;
402 break;
403 }
404 vha = vha->vp_next;
405 }
406 /*
407 * The specified vp index may be valid(within range)
408 * but it's not in the list. Currently this is all
409 * we can say.
410 */
411 if (vha == NULL || !(vha->flags & VP_ENABLED)) {
412 cmd->Status = EXT_STATUS_INVALID_VPINDEX;
413 cmd->ResponseLen = 0;
414 return (EFAULT);
415 }
416 }
417 }
418
419 /*
420 * If driver is suspended, stalled, or powered down rtn BUSY
421 */
422 if (ha->flags & ADAPTER_SUSPENDED ||
423 (ha->task_daemon_flags & (DRIVER_STALL | ISP_ABORT_NEEDED |
424 ABORT_ISP_ACTIVE | LOOP_RESYNC_NEEDED | LOOP_RESYNC_ACTIVE)) ||
425 ha->power_level != PM_LEVEL_D0) {
426 EL(ha, " %s\n", ha->flags & ADAPTER_SUSPENDED ?
427 "driver suspended" :
428 (ha->task_daemon_flags & (DRIVER_STALL | ISP_ABORT_NEEDED |
429 ABORT_ISP_ACTIVE | LOOP_RESYNC_NEEDED |
430 LOOP_RESYNC_ACTIVE) ? "driver stalled" :
431 "FCA powered down"));
432 cmd->Status = EXT_STATUS_BUSY;
433 cmd->ResponseLen = 0;
434 rval = EBUSY;
435
436 /* Return results to caller */
437 if ((ql_sdm_return(ha, cmd, arg, mode)) == -1) {
438 EL(ha, "failed, sdm_return\n");
439 rval = EFAULT;
440 }
441 return (rval);
442 }
443
444 switch (ioctl_code) {
445 case EXT_CC_QUERY_OS:
446 ql_query(ha, cmd, mode);
447 break;
448 case EXT_CC_SEND_FCCT_PASSTHRU_OS:
449 ql_fcct(ha, cmd, mode);
450 break;
526 ql_menlo_get_fw_version(ha, cmd, mode);
527 break;
528 case EXT_CC_MENLO_UPDATE_FW:
529 ql_menlo_update_fw(ha, cmd, mode);
530 break;
531 case EXT_CC_MENLO_MANAGE_INFO:
532 ql_menlo_manage_info(ha, cmd, mode);
533 break;
534 case EXT_CC_GET_VP_CNT_ID_OS:
535 ql_get_vp_cnt_id(ha, cmd, mode);
536 break;
537 case EXT_CC_VPORT_CMD_OS:
538 ql_vp_ioctl(ha, cmd, mode);
539 break;
540 case EXT_CC_ACCESS_FLASH_OS:
541 ql_access_flash(ha, cmd, mode);
542 break;
543 case EXT_CC_RESET_FW_OS:
544 ql_reset_cmd(ha, cmd);
545 break;
546 case EXT_CC_I2C_DATA:
547 ql_get_temperature(ha, cmd, mode);
548 break;
549 case EXT_CC_DUMP_OS:
550 ql_dump_cmd(ha, cmd, mode);
551 break;
552 case EXT_CC_SERDES_REG_OP:
553 ql_serdes_reg(ha, cmd, mode);
554 break;
555 case EXT_CC_SERDES_REG_OP_EX:
556 ql_serdes_reg_ex(ha, cmd, mode);
557 break;
558 case EXT_CC_ELS_PASSTHRU_OS:
559 ql_els_passthru(ha, cmd, mode);
560 break;
561 case EXT_CC_FLASH_UPDATE_CAPS_OS:
562 ql_flash_update_caps(ha, cmd, mode);
563 break;
564 case EXT_CC_GET_BBCR_DATA_OS:
565 ql_get_bbcr_data(ha, cmd, mode);
566 break;
567 default:
568 /* function not supported. */
569 EL(ha, "failed, function not supported=%d\n", ioctl_code);
570
571 cmd->Status = EXT_STATUS_INVALID_REQUEST;
572 cmd->ResponseLen = 0;
573 break;
574 }
575
576 /* Return results to caller */
577 if (ql_sdm_return(ha, cmd, arg, mode) == -1) {
578 EL(ha, "failed, sdm_return\n");
579 return (EFAULT);
580 }
581
582 QL_PRINT_9(ha, "done\n");
583
584 return (0);
585 }
586
587 /*
588 * ql_sdm_setup
589 * Make a local copy of the EXT_IOCTL struct and validate it.
590 *
591 * Input:
592 * ha: adapter state pointer.
593 * cmd_struct: Pointer to location to store local adrs of EXT_IOCTL.
594 * arg: Address of application EXT_IOCTL cmd data
595 * mode: flags
596 * val_sig: Pointer to a function to validate the ioctl signature.
597 *
598 * Returns:
599 * 0: success
600 * EFAULT: Copy in error of application EXT_IOCTL struct.
601 * EINVAL: Invalid version, signature.
602 * ENOMEM: Local allocation of EXT_IOCTL failed.
603 *
604 * Context:
605 * Kernel context.
606 */
607 static int
608 ql_sdm_setup(ql_adapter_state_t *ha, EXT_IOCTL **cmd_struct, void *arg,
609 int mode, boolean_t (*val_sig)(EXT_IOCTL *))
610 {
611 int rval;
612 EXT_IOCTL *cmd;
613
614 QL_PRINT_9(ha, "started\n");
615
616 /* Allocate local memory for EXT_IOCTL. */
617 *cmd_struct = NULL;
618 cmd = (EXT_IOCTL *)kmem_zalloc(sizeof (EXT_IOCTL), KM_SLEEP);
619 if (cmd == NULL) {
620 EL(ha, "failed, kmem_zalloc\n");
621 return (ENOMEM);
622 }
623 /* Get argument structure. */
624 rval = ddi_copyin(arg, (void *)cmd, sizeof (EXT_IOCTL), mode);
625 if (rval != 0) {
626 EL(ha, "failed, ddi_copyin\n");
627 rval = EFAULT;
628 } else {
629 /*
630 * Check signature and the version.
631 * If either are not valid then neither is the
632 * structure so don't attempt to return any error status
633 * because we can't trust what caller's arg points to.
634 * Just return the errno.
635 */
636 if (val_sig(cmd) == 0) {
637 EL(ha, "failed, signature\n");
638 rval = EINVAL;
639 } else if (cmd->Version > EXT_VERSION) {
640 EL(ha, "failed, version\n");
641 rval = EINVAL;
642 }
643 }
644
645 if (rval == 0) {
646 QL_PRINT_9(ha, "done\n");
647 *cmd_struct = cmd;
648 cmd->Status = EXT_STATUS_OK;
649 cmd->DetailStatus = 0;
650 } else {
651 kmem_free((void *)cmd, sizeof (EXT_IOCTL));
652 }
653
654 return (rval);
655 }
656
657 /*
658 * ql_validate_signature
659 * Validate the signature string for an external ioctl call.
660 *
661 * Input:
662 * sg: Pointer to EXT_IOCTL signature to validate.
663 *
664 * Returns:
665 * B_TRUE: Signature is valid.
666 * B_FALSE: Signature is NOT valid.
667 *
668 * Context:
669 * Kernel context.
670 */
671 static boolean_t
672 ql_validate_signature(EXT_IOCTL *cmd_struct)
673 {
674 /*
675 * Check signature.
676 *
677 * If signature is not valid then neither is the rest of
678 * the structure (e.g., can't trust it), so don't attempt
679 * to return any error status other than the errno.
680 */
681 if (bcmp(&cmd_struct->Signature, "QLOGIC", 6) != 0) {
682 QL_PRINT_2(NULL, "failed,\n");
683 return (B_FALSE);
684 }
685
686 return (B_TRUE);
687 }
688
689 /*
690 * ql_sdm_return
691 * Copies return data/status to application land for
692 * ioctl call using the SAN/Device Management EXT_IOCTL call interface.
693 *
694 * Input:
695 * ha: adapter state pointer.
696 * cmd: Pointer to kernel copy of requestor's EXT_IOCTL struct.
697 * ioctl_code: ioctl function to perform
698 * arg: EXT_IOCTL cmd data in application land.
699 * mode: flags
700 *
701 * Returns:
702 * 0: success
703 * EFAULT: Copy out error.
704 *
705 * Context:
706 * Kernel context.
707 */
708 /* ARGSUSED */
709 static int
710 ql_sdm_return(ql_adapter_state_t *ha, EXT_IOCTL *cmd, void *arg, int mode)
711 {
712 int rval = 0;
713
714 QL_PRINT_9(ha, "started\n");
715
716 rval |= ddi_copyout((void *)&cmd->ResponseLen,
717 (void *)&(((EXT_IOCTL*)arg)->ResponseLen), sizeof (uint32_t),
718 mode);
719
720 rval |= ddi_copyout((void *)&cmd->Status,
721 (void *)&(((EXT_IOCTL*)arg)->Status),
722 sizeof (cmd->Status), mode);
723 rval |= ddi_copyout((void *)&cmd->DetailStatus,
724 (void *)&(((EXT_IOCTL*)arg)->DetailStatus),
725 sizeof (cmd->DetailStatus), mode);
726
727 kmem_free((void *)cmd, sizeof (EXT_IOCTL));
728
729 if (rval != 0) {
730 /* Some copyout operation failed */
731 EL(ha, "failed, ddi_copyout\n");
732 return (EFAULT);
733 }
734
735 QL_PRINT_9(ha, "done\n");
736
737 return (0);
738 }
739
740 /*
741 * ql_query
742 * Performs all EXT_CC_QUERY functions.
743 *
744 * Input:
745 * ha: adapter state pointer.
746 * cmd: Local EXT_IOCTL cmd struct pointer.
747 * mode: flags.
748 *
749 * Returns:
750 * None, request status indicated in cmd->Status.
751 *
752 * Context:
753 * Kernel context.
754 */
755 static void
756 ql_query(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
757 {
758 QL_PRINT_9(ha, "started, cmd=%d\n",
759 cmd->SubCode);
760
761 /* case off on command subcode */
762 switch (cmd->SubCode) {
763 case EXT_SC_QUERY_HBA_NODE:
764 ql_qry_hba_node(ha, cmd, mode);
765 break;
766 case EXT_SC_QUERY_HBA_PORT:
767 ql_qry_hba_port(ha, cmd, mode);
768 break;
769 case EXT_SC_QUERY_DISC_PORT:
770 ql_qry_disc_port(ha, cmd, mode);
771 break;
772 case EXT_SC_QUERY_DISC_TGT:
773 ql_qry_disc_tgt(ha, cmd, mode);
774 break;
775 case EXT_SC_QUERY_DRIVER:
776 ql_qry_driver(ha, cmd, mode);
777 break;
778 case EXT_SC_QUERY_FW:
779 ql_qry_fw(ha, cmd, mode);
780 break;
781 case EXT_SC_QUERY_CHIP:
782 ql_qry_chip(ha, cmd, mode);
783 break;
784 case EXT_SC_QUERY_CNA_PORT:
785 ql_qry_cna_port(ha, cmd, mode);
786 break;
787 case EXT_SC_QUERY_ADAPTER_VERSIONS:
788 ql_qry_adapter_versions(ha, cmd, mode);
789 break;
790 case EXT_SC_QUERY_DISC_LUN:
791 default:
792 /* function not supported. */
793 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
794 EL(ha, "failed, Unsupported Subcode=%xh\n",
795 cmd->SubCode);
796 break;
797 }
798
799 QL_PRINT_9(ha, "done\n");
800 }
801
802 /*
803 * ql_qry_hba_node
804 * Performs EXT_SC_QUERY_HBA_NODE subfunction.
805 *
806 * Input:
807 * ha: adapter state pointer.
808 * cmd: EXT_IOCTL cmd struct pointer.
809 * mode: flags.
810 *
811 * Returns:
812 * None, request status indicated in cmd->Status.
813 *
814 * Context:
815 * Kernel context.
816 */
817 static void
818 ql_qry_hba_node(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
819 {
820 EXT_HBA_NODE tmp_node = {0};
821 uint_t len;
822 caddr_t bufp;
823
824 QL_PRINT_9(ha, "started\n");
825
826 if (cmd->ResponseLen < sizeof (EXT_HBA_NODE)) {
827 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
828 cmd->DetailStatus = sizeof (EXT_HBA_NODE);
829 EL(ha, "failed, ResponseLen < EXT_HBA_NODE, "
830 "Len=%xh\n", cmd->ResponseLen);
831 cmd->ResponseLen = 0;
832 return;
833 }
834
835 /* fill in the values */
836
837 bcopy(ha->loginparams.node_ww_name.raw_wwn, tmp_node.WWNN,
838 EXT_DEF_WWN_NAME_SIZE);
839
840 (void) sprintf((char *)(tmp_node.Manufacturer), "QLogic Corporation");
841
842 (void) sprintf((char *)(tmp_node.Model), "%x", ha->device_id);
843
844 bcopy(&tmp_node.WWNN[5], tmp_node.SerialNum, 3);
845
846 (void) sprintf((char *)(tmp_node.DriverVersion), QL_VERSION);
847
848 if (CFG_IST(ha, CFG_SBUS_CARD)) {
849 size_t verlen;
850 uint16_t w;
851 char *tmpptr;
852
853 verlen = strlen((char *)(tmp_node.DriverVersion));
854 if (verlen + 5 > EXT_DEF_MAX_STR_SIZE) {
855 EL(ha, "failed, No room for fpga version string\n");
856 } else {
857 w = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
858 (uint16_t *)
859 (ha->sbus_fpga_iobase + FPGA_REVISION));
860
861 tmpptr = (char *)&(tmp_node.DriverVersion[verlen + 1]);
862 if (tmpptr == NULL) {
863 EL(ha, "Unable to insert fpga version str\n");
864 } else {
865 (void) sprintf(tmpptr, "%d.%d",
866 ((w & 0xf0) >> 4), (w & 0x0f));
867 tmp_node.DriverAttr |= EXT_CC_HBA_NODE_SBUS;
868 }
869 }
870 }
871
872 (void) sprintf((char *)(tmp_node.FWVersion), "%01d.%02d.%02d",
873 ha->fw_major_version, ha->fw_minor_version,
874 ha->fw_subminor_version);
875
876 if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
877 switch (ha->fw_attributes) {
878 case FWATTRIB_EF:
879 (void) strcat((char *)(tmp_node.FWVersion), " EF");
880 break;
881 case FWATTRIB_TP:
882 (void) strcat((char *)(tmp_node.FWVersion), " TP");
883 break;
884 case FWATTRIB_IP:
885 (void) strcat((char *)(tmp_node.FWVersion), " IP");
886 break;
887 case FWATTRIB_IPX:
888 (void) strcat((char *)(tmp_node.FWVersion), " IPX");
889 break;
890 case FWATTRIB_FL:
891 (void) strcat((char *)(tmp_node.FWVersion), " FL");
892 break;
893 case FWATTRIB_FPX:
894 (void) strcat((char *)(tmp_node.FWVersion), " FLX");
895 break;
896 default:
901 /* FCode version. */
902 /*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
903 if (ddi_getlongprop(DDI_DEV_T_ANY, ha->dip, PROP_LEN_AND_VAL_ALLOC |
904 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
905 (int *)&len) == DDI_PROP_SUCCESS) {
906 if (len < EXT_DEF_MAX_STR_SIZE) {
907 bcopy(bufp, tmp_node.OptRomVersion, len);
908 } else {
909 bcopy(bufp, tmp_node.OptRomVersion,
910 EXT_DEF_MAX_STR_SIZE - 1);
911 tmp_node.OptRomVersion[EXT_DEF_MAX_STR_SIZE - 1] =
912 '\0';
913 }
914 kmem_free(bufp, len);
915 } else {
916 (void) sprintf((char *)tmp_node.OptRomVersion, "0");
917 }
918 tmp_node.PortCount = 1;
919 tmp_node.InterfaceType = EXT_DEF_FC_INTF_TYPE;
920
921 tmp_node.MpiVersion[0] = ha->mpi_fw_major_version;
922 tmp_node.MpiVersion[1] = ha->mpi_fw_minor_version;
923 tmp_node.MpiVersion[2] = ha->mpi_fw_subminor_version;
924 tmp_node.PepFwVersion[0] = ha->phy_fw_major_version;
925 tmp_node.PepFwVersion[1] = ha->phy_fw_minor_version;
926 tmp_node.PepFwVersion[2] = ha->phy_fw_subminor_version;
927 if (ddi_copyout((void *)&tmp_node,
928 (void *)(uintptr_t)(cmd->ResponseAdr),
929 sizeof (EXT_HBA_NODE), mode) != 0) {
930 cmd->Status = EXT_STATUS_COPY_ERR;
931 cmd->ResponseLen = 0;
932 EL(ha, "failed, ddi_copyout\n");
933 } else {
934 cmd->ResponseLen = sizeof (EXT_HBA_NODE);
935 QL_PRINT_9(ha, "done\n");
936 }
937 }
938
939 /*
940 * ql_qry_hba_port
941 * Performs EXT_SC_QUERY_HBA_PORT subfunction.
942 *
943 * Input:
944 * ha: adapter state pointer.
945 * cmd: EXT_IOCTL cmd struct pointer.
946 * mode: flags.
947 *
948 * Returns:
949 * None, request status indicated in cmd->Status.
950 *
951 * Context:
952 * Kernel context.
953 */
954 static void
955 ql_qry_hba_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
956 {
957 ql_link_t *link;
958 ql_tgt_t *tq;
959 ql_mbx_data_t mr = {0};
960 EXT_HBA_PORT tmp_port = {0};
961 int rval;
962 uint16_t port_cnt, tgt_cnt, index;
963
964 QL_PRINT_9(ha, "started\n");
965
966 if (cmd->ResponseLen < sizeof (EXT_HBA_PORT)) {
967 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
968 cmd->DetailStatus = sizeof (EXT_HBA_PORT);
969 EL(ha, "failed, ResponseLen < EXT_HBA_NODE, Len=%xh\n",
970 cmd->ResponseLen);
971 cmd->ResponseLen = 0;
972 return;
973 }
974
975 /* fill in the values */
976
977 bcopy(ha->loginparams.nport_ww_name.raw_wwn, tmp_port.WWPN,
978 EXT_DEF_WWN_NAME_SIZE);
979 tmp_port.Id[0] = 0;
980 tmp_port.Id[1] = ha->d_id.b.domain;
981 tmp_port.Id[2] = ha->d_id.b.area;
982 tmp_port.Id[3] = ha->d_id.b.al_pa;
983
984 /* For now we are initiator only driver */
1004 */
1005 tmp_port.PortSpeed = EXT_PORTSPEED_NOT_NEGOTIATED;
1006
1007 if (tmp_port.State == EXT_DEF_HBA_OK) {
1008 switch (ha->iidma_rate) {
1009 case IIDMA_RATE_1GB:
1010 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_1GBIT;
1011 break;
1012 case IIDMA_RATE_2GB:
1013 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_2GBIT;
1014 break;
1015 case IIDMA_RATE_4GB:
1016 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_4GBIT;
1017 break;
1018 case IIDMA_RATE_8GB:
1019 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_8GBIT;
1020 break;
1021 case IIDMA_RATE_10GB:
1022 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_10GBIT;
1023 break;
1024 case IIDMA_RATE_16GB:
1025 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_16GBIT;
1026 break;
1027 case IIDMA_RATE_32GB:
1028 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_32GBIT;
1029 break;
1030 default:
1031 tmp_port.PortSpeed = EXT_DEF_PORTSPEED_UNKNOWN;
1032 EL(ha, "failed, data rate=%xh\n", mr.mb[1]);
1033 break;
1034 }
1035 }
1036
1037 /* Report all supported port speeds */
1038 if (CFG_IST(ha, CFG_CTRL_25XX)) {
1039 tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_8GBIT |
1040 EXT_DEF_PORTSPEED_4GBIT | EXT_DEF_PORTSPEED_2GBIT |
1041 EXT_DEF_PORTSPEED_1GBIT);
1042 /*
1043 * Correct supported speeds based on type of
1044 * sfp that is present
1045 */
1046 switch (ha->sfp_stat) {
1047 case 1:
1048 /* no sfp detected */
1049 break;
1050 case 2:
1051 case 4:
1052 /* 4GB sfp */
1053 tmp_port.PortSupportedSpeed &=
1054 ~EXT_DEF_PORTSPEED_8GBIT;
1055 break;
1056 case 3:
1057 case 5:
1058 /* 8GB sfp */
1059 tmp_port.PortSupportedSpeed &=
1060 ~EXT_DEF_PORTSPEED_1GBIT;
1061 break;
1062 default:
1063 EL(ha, "sfp_stat: %xh\n", ha->sfp_stat);
1064 break;
1065
1066 }
1067 } else if (CFG_IST(ha, CFG_FCOE_SUPPORT)) {
1068 tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_10GBIT;
1069 } else if (CFG_IST(ha, CFG_CTRL_24XX)) {
1070 tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_4GBIT |
1071 EXT_DEF_PORTSPEED_2GBIT | EXT_DEF_PORTSPEED_1GBIT);
1072 } else if (CFG_IST(ha, CFG_CTRL_23XX)) {
1073 tmp_port.PortSupportedSpeed = (EXT_DEF_PORTSPEED_2GBIT |
1074 EXT_DEF_PORTSPEED_1GBIT);
1075 } else if (CFG_IST(ha, CFG_CTRL_63XX)) {
1076 tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_2GBIT;
1077 } else if (CFG_IST(ha, CFG_CTRL_22XX)) {
1078 tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_1GBIT;
1079 } else if (CFG_IST(ha, CFG_CTRL_83XX)) {
1080 tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_4GBIT |
1081 EXT_DEF_PORTSPEED_8GBIT | EXT_DEF_PORTSPEED_16GBIT;
1082 } else if (CFG_IST(ha, CFG_CTRL_27XX)) {
1083 tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_4GBIT |
1084 EXT_DEF_PORTSPEED_8GBIT | EXT_DEF_PORTSPEED_16GBIT |
1085 EXT_DEF_PORTSPEED_32GBIT;
1086 } else {
1087 tmp_port.PortSupportedSpeed = EXT_DEF_PORTSPEED_UNKNOWN;
1088 EL(ha, "unknown HBA type: %xh\n", ha->device_id);
1089 }
1090
1091 if (ha->task_daemon_flags & LOOP_DOWN) {
1092 (void) ql_get_firmware_state(ha, NULL);
1093 }
1094
1095 tmp_port.LinkState1 = ha->fw_state[1];
1096 tmp_port.LinkState2 = LSB(ha->sfp_stat);
1097 tmp_port.LinkState3 = ha->fw_state[3];
1098 tmp_port.LinkState6 = ha->fw_state[6];
1099
1100 port_cnt = 0;
1101 tgt_cnt = 0;
1102
1103 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
1104 for (link = ha->dev[index].first; link != NULL;
1105 link = link->next) {
1106 tq = link->base_address;
1107
1108 if (!VALID_TARGET_ID(ha, tq->loop_id) ||
1109 tq->d_id.b24 == FS_MANAGEMENT_SERVER) {
1110 continue;
1111 }
1112
1113 if (tq->flags & (TQF_RSCN_RCVD | TQF_IIDMA_NEEDED |
1114 TQF_NEED_AUTHENTICATION | TQF_PLOGI_PROGRS)) {
1115 continue;
1116 }
1117
1118 port_cnt++;
1119 if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
1120 tgt_cnt++;
1121 }
1122 }
1123 }
1124
1125 tmp_port.DiscPortCount = port_cnt;
1126 tmp_port.DiscTargetCount = tgt_cnt;
1127
1128 tmp_port.DiscPortNameType = EXT_DEF_USE_NODE_NAME;
1129
1130 rval = ddi_copyout((void *)&tmp_port,
1131 (void *)(uintptr_t)(cmd->ResponseAdr),
1132 sizeof (EXT_HBA_PORT), mode);
1133 if (rval != 0) {
1134 cmd->Status = EXT_STATUS_COPY_ERR;
1135 cmd->ResponseLen = 0;
1136 EL(ha, "failed, ddi_copyout\n");
1137 } else {
1138 cmd->ResponseLen = sizeof (EXT_HBA_PORT);
1139 QL_PRINT_9(ha, "done, ports=%d, targets=%d\n",
1140 ha->instance, port_cnt, tgt_cnt);
1141 }
1142 }
1143
1144 /*
1145 * ql_qry_disc_port
1146 * Performs EXT_SC_QUERY_DISC_PORT subfunction.
1147 *
1148 * Input:
1149 * ha: adapter state pointer.
1150 * cmd: EXT_IOCTL cmd struct pointer.
1151 * mode: flags.
1152 *
1153 * cmd->Instance = Port instance in fcport chain.
1154 *
1155 * Returns:
1156 * None, request status indicated in cmd->Status.
1157 *
1158 * Context:
1159 * Kernel context.
1160 */
1161 static void
1162 ql_qry_disc_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1163 {
1164 EXT_DISC_PORT tmp_port = {0};
1165 ql_link_t *link;
1166 ql_tgt_t *tq;
1167 uint16_t index;
1168 uint16_t inst = 0;
1169
1170 QL_PRINT_9(ha, "started\n");
1171
1172 if (cmd->ResponseLen < sizeof (EXT_DISC_PORT)) {
1173 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1174 cmd->DetailStatus = sizeof (EXT_DISC_PORT);
1175 EL(ha, "failed, ResponseLen < EXT_DISC_PORT, Len=%xh\n",
1176 cmd->ResponseLen);
1177 cmd->ResponseLen = 0;
1178 return;
1179 }
1180
1181 for (link = NULL, index = 0;
1182 index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1183 for (link = ha->dev[index].first; link != NULL;
1184 link = link->next) {
1185 tq = link->base_address;
1186
1187 if (!VALID_TARGET_ID(ha, tq->loop_id) ||
1188 tq->d_id.b24 == FS_MANAGEMENT_SERVER) {
1189 continue;
1190 }
1191
1192 if (tq->flags & (TQF_RSCN_RCVD | TQF_IIDMA_NEEDED |
1193 TQF_NEED_AUTHENTICATION | TQF_PLOGI_PROGRS)) {
1194 continue;
1195 }
1196
1197 if (inst != cmd->Instance) {
1198 inst++;
1199 continue;
1200 }
1201
1202 /* fill in the values */
1203 bcopy(tq->node_name, tmp_port.WWNN,
1204 EXT_DEF_WWN_NAME_SIZE);
1205 bcopy(tq->port_name, tmp_port.WWPN,
1206 EXT_DEF_WWN_NAME_SIZE);
1207
1208 break;
1209 }
1210 }
1211
1212 if (link == NULL) {
1213 /* no matching device */
1214 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1215 EL(ha, "failed, port not found port=%d\n", cmd->Instance);
1216 cmd->ResponseLen = 0;
1234
1235 if (tq->flags & TQF_FABRIC_DEVICE) {
1236 tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_FABRIC_DEV);
1237 } else {
1238 tmp_port.Type = (uint16_t)(tmp_port.Type | EXT_DEF_TARGET_DEV);
1239 }
1240
1241 tmp_port.Status = 0;
1242 tmp_port.Bus = 0; /* Hard-coded for Solaris */
1243
1244 bcopy(tq->port_name, &tmp_port.TargetId, 8);
1245
1246 if (ddi_copyout((void *)&tmp_port,
1247 (void *)(uintptr_t)(cmd->ResponseAdr),
1248 sizeof (EXT_DISC_PORT), mode) != 0) {
1249 cmd->Status = EXT_STATUS_COPY_ERR;
1250 cmd->ResponseLen = 0;
1251 EL(ha, "failed, ddi_copyout\n");
1252 } else {
1253 cmd->ResponseLen = sizeof (EXT_DISC_PORT);
1254 QL_PRINT_9(ha, "done\n");
1255 }
1256 }
1257
1258 /*
1259 * ql_qry_disc_tgt
1260 * Performs EXT_SC_QUERY_DISC_TGT subfunction.
1261 *
1262 * Input:
1263 * ha: adapter state pointer.
1264 * cmd: EXT_IOCTL cmd struct pointer.
1265 * mode: flags.
1266 *
1267 * cmd->Instance = Port instance in fcport chain.
1268 *
1269 * Returns:
1270 * None, request status indicated in cmd->Status.
1271 *
1272 * Context:
1273 * Kernel context.
1274 */
1275 static void
1276 ql_qry_disc_tgt(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1277 {
1278 EXT_DISC_TARGET tmp_tgt = {0};
1279 ql_link_t *link;
1280 ql_tgt_t *tq;
1281 uint16_t index;
1282 uint16_t inst = 0;
1283
1284 QL_PRINT_9(ha, "started, target=%d\n",
1285 cmd->Instance);
1286
1287 if (cmd->ResponseLen < sizeof (EXT_DISC_TARGET)) {
1288 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1289 cmd->DetailStatus = sizeof (EXT_DISC_TARGET);
1290 EL(ha, "failed, ResponseLen < EXT_DISC_TARGET, Len=%xh\n",
1291 cmd->ResponseLen);
1292 cmd->ResponseLen = 0;
1293 return;
1294 }
1295
1296 /* Scan port list for requested target and fill in the values */
1297 for (link = NULL, index = 0;
1298 index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1299 for (link = ha->dev[index].first; link != NULL;
1300 link = link->next) {
1301 tq = link->base_address;
1302
1303 if (!VALID_TARGET_ID(ha, tq->loop_id) ||
1304 tq->flags & TQF_INITIATOR_DEVICE ||
1305 tq->d_id.b24 == FS_MANAGEMENT_SERVER) {
1306 continue;
1307 }
1308 if (inst != cmd->Instance) {
1309 inst++;
1310 continue;
1311 }
1312
1313 /* fill in the values */
1314 bcopy(tq->node_name, tmp_tgt.WWNN,
1315 EXT_DEF_WWN_NAME_SIZE);
1316 bcopy(tq->port_name, tmp_tgt.WWPN,
1317 EXT_DEF_WWN_NAME_SIZE);
1318
1319 break;
1320 }
1321 }
1322
1323 if (link == NULL) {
1324 /* no matching device */
1325 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1347 if (tq->flags & TQF_FABRIC_DEVICE) {
1348 tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_FABRIC_DEV);
1349 } else {
1350 tmp_tgt.Type = (uint16_t)(tmp_tgt.Type | EXT_DEF_TARGET_DEV);
1351 }
1352
1353 tmp_tgt.Status = 0;
1354
1355 tmp_tgt.Bus = 0; /* Hard-coded for Solaris. */
1356
1357 bcopy(tq->port_name, &tmp_tgt.TargetId, 8);
1358
1359 if (ddi_copyout((void *)&tmp_tgt,
1360 (void *)(uintptr_t)(cmd->ResponseAdr),
1361 sizeof (EXT_DISC_TARGET), mode) != 0) {
1362 cmd->Status = EXT_STATUS_COPY_ERR;
1363 cmd->ResponseLen = 0;
1364 EL(ha, "failed, ddi_copyout\n");
1365 } else {
1366 cmd->ResponseLen = sizeof (EXT_DISC_TARGET);
1367 QL_PRINT_9(ha, "done\n");
1368 }
1369 }
1370
1371 /*
1372 * ql_qry_fw
1373 * Performs EXT_SC_QUERY_FW subfunction.
1374 *
1375 * Input:
1376 * ha: adapter state pointer.
1377 * cmd: EXT_IOCTL cmd struct pointer.
1378 * mode: flags.
1379 *
1380 * Returns:
1381 * None, request status indicated in cmd->Status.
1382 *
1383 * Context:
1384 * Kernel context.
1385 */
1386 static void
1387 ql_qry_fw(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1388 {
1389 EXT_FW fw_info = {0};
1390
1391 QL_PRINT_9(ha, "started\n");
1392
1393 if (cmd->ResponseLen < sizeof (EXT_FW)) {
1394 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1395 cmd->DetailStatus = sizeof (EXT_FW);
1396 EL(ha, "failed, ResponseLen < EXT_FW, Len=%xh\n",
1397 cmd->ResponseLen);
1398 cmd->ResponseLen = 0;
1399 return;
1400 }
1401
1402 (void) sprintf((char *)(fw_info.Version), "%d.%02d.%02d",
1403 ha->fw_major_version, ha->fw_minor_version,
1404 ha->fw_subminor_version);
1405
1406 fw_info.Attrib = ha->fw_attributes;
1407
1408 if (ddi_copyout((void *)&fw_info,
1409 (void *)(uintptr_t)(cmd->ResponseAdr),
1410 sizeof (EXT_FW), mode) != 0) {
1411 cmd->Status = EXT_STATUS_COPY_ERR;
1412 cmd->ResponseLen = 0;
1413 EL(ha, "failed, ddi_copyout\n");
1414 return;
1415 } else {
1416 cmd->ResponseLen = sizeof (EXT_FW);
1417 QL_PRINT_9(ha, "done\n");
1418 }
1419 }
1420
1421 /*
1422 * ql_qry_chip
1423 * Performs EXT_SC_QUERY_CHIP subfunction.
1424 *
1425 * Input:
1426 * ha: adapter state pointer.
1427 * cmd: EXT_IOCTL cmd struct pointer.
1428 * mode: flags.
1429 *
1430 * Returns:
1431 * None, request status indicated in cmd->Status.
1432 *
1433 * Context:
1434 * Kernel context.
1435 */
1436 static void
1437 ql_qry_chip(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1438 {
1439 EXT_CHIP chip = {0};
1440 uint16_t PciDevNumber;
1441
1442 QL_PRINT_9(ha, "started\n");
1443
1444 if (cmd->ResponseLen < sizeof (EXT_CHIP)) {
1445 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1446 cmd->DetailStatus = sizeof (EXT_CHIP);
1447 EL(ha, "failed, ResponseLen < EXT_CHIP, Len=%xh\n",
1448 cmd->ResponseLen);
1449 cmd->ResponseLen = 0;
1450 return;
1451 }
1452
1453 chip.VendorId = ha->ven_id;
1454 chip.DeviceId = ha->device_id;
1455 chip.SubVendorId = ha->subven_id;
1456 chip.SubSystemId = ha->subsys_id;
1457 chip.IoAddr = ql_pci_config_get32(ha, PCI_CONF_BASE0);
1458 chip.IoAddrLen = 0x100;
1459 chip.MemAddr = ql_pci_config_get32(ha, PCI_CONF_BASE1);
1460 chip.MemAddrLen = 0x100;
1461 chip.ChipRevID = ha->rev_id;
1462 chip.FuncNo = ha->pci_function_number;
1463 chip.PciBusNumber = (uint16_t)
1464 ((ha->pci_bus_addr & PCI_REG_BUS_M) >> PCI_REG_BUS_SHIFT);
1465
1466 PciDevNumber = (uint16_t)
1467 ((ha->pci_bus_addr & PCI_REG_DEV_M) >> PCI_REG_DEV_SHIFT);
1468 chip.PciSlotNumber = (uint16_t)(((PciDevNumber << 3) & 0xF8) |
1469 (chip.FuncNo & 0x7));
1470
1471 if (ddi_copyout((void *)&chip,
1472 (void *)(uintptr_t)(cmd->ResponseAdr),
1473 sizeof (EXT_CHIP), mode) != 0) {
1474 cmd->Status = EXT_STATUS_COPY_ERR;
1475 cmd->ResponseLen = 0;
1476 EL(ha, "failed, ddi_copyout\n");
1477 } else {
1478 cmd->ResponseLen = sizeof (EXT_CHIP);
1479 QL_PRINT_9(ha, "done\n");
1480 }
1481 }
1482
1483 /*
1484 * ql_qry_driver
1485 * Performs EXT_SC_QUERY_DRIVER subfunction.
1486 *
1487 * Input:
1488 * ha: adapter state pointer.
1489 * cmd: EXT_IOCTL cmd struct pointer.
1490 * mode: flags.
1491 *
1492 * Returns:
1493 * None, request status indicated in cmd->Status.
1494 *
1495 * Context:
1496 * Kernel context.
1497 */
1498 static void
1499 ql_qry_driver(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1500 {
1501 EXT_DRIVER qd = {0};
1502
1503 QL_PRINT_9(ha, "started\n");
1504
1505 if (cmd->ResponseLen < sizeof (EXT_DRIVER)) {
1506 cmd->Status = EXT_STATUS_DATA_OVERRUN;
1507 cmd->DetailStatus = sizeof (EXT_DRIVER);
1508 EL(ha, "failed, ResponseLen < EXT_DRIVER, Len=%xh\n",
1509 cmd->ResponseLen);
1510 cmd->ResponseLen = 0;
1511 return;
1512 }
1513
1514 (void) strcpy((void *)&qd.Version[0], QL_VERSION);
1515 qd.NumOfBus = 1; /* Fixed for Solaris */
1516 qd.TargetsPerBus = (uint16_t)
1517 (CFG_IST(ha, (CFG_ISP_FW_TYPE_2 | CFG_EXT_FW_INTERFACE)) ?
1518 MAX_24_FIBRE_DEVICES : MAX_22_FIBRE_DEVICES);
1519 qd.LunsPerTarget = 2030;
1520 qd.MaxTransferLen = QL_DMA_MAX_XFER_SIZE;
1521 qd.MaxDataSegments = QL_DMA_SG_LIST_LENGTH;
1522
1523 if (ddi_copyout((void *)&qd, (void *)(uintptr_t)cmd->ResponseAdr,
1524 sizeof (EXT_DRIVER), mode) != 0) {
1525 cmd->Status = EXT_STATUS_COPY_ERR;
1526 cmd->ResponseLen = 0;
1527 EL(ha, "failed, ddi_copyout\n");
1528 } else {
1529 cmd->ResponseLen = sizeof (EXT_DRIVER);
1530 QL_PRINT_9(ha, "done\n");
1531 }
1532 }
1533
1534 /*
1535 * ql_fcct
1536 * IOCTL management server FC-CT passthrough.
1537 *
1538 * Input:
1539 * ha: adapter state pointer.
1540 * cmd: User space CT arguments pointer.
1541 * mode: flags.
1542 *
1543 * Returns:
1544 * None, request status indicated in cmd->Status.
1545 *
1546 * Context:
1547 * Kernel context.
1548 */
1549 static void
1550 ql_fcct(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1551 {
1552 ql_mbx_iocb_t *pkt;
1553 ql_mbx_data_t mr;
1554 dma_mem_t *dma_mem;
1555 caddr_t pld;
1556 uint32_t pkt_size, pld_byte_cnt, *long_ptr;
1557 int rval;
1558 ql_ct_iu_preamble_t *ct;
1559 ql_xioctl_t *xp = ha->xioctl;
1560 ql_tgt_t tq;
1561 uint16_t comp_status, loop_id;
1562
1563 QL_PRINT_9(ha, "started\n");
1564
1565 /* Get CT argument structure. */
1566 if ((ha->topology & QL_FABRIC_CONNECTION) == 0) {
1567 EL(ha, "failed, No switch\n");
1568 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1569 cmd->ResponseLen = 0;
1570 return;
1571 }
1572
1573 if (DRIVER_SUSPENDED(ha)) {
1574 EL(ha, "failed, LOOP_NOT_READY\n");
1575 cmd->Status = EXT_STATUS_BUSY;
1576 cmd->ResponseLen = 0;
1577 return;
1578 }
1579
1580 /* Login management server device. */
1581 if ((xp->flags & QL_MGMT_SERVER_LOGIN) == 0) {
1582 tq.d_id.b.al_pa = 0xfa;
1583 tq.d_id.b.area = 0xff;
1584 tq.d_id.b.domain = 0xff;
1585 tq.loop_id = (uint16_t)(CFG_IST(ha, CFG_ISP_FW_TYPE_2) ?
1586 MANAGEMENT_SERVER_24XX_LOOP_ID :
1587 MANAGEMENT_SERVER_LOOP_ID);
1588 rval = ql_login_fport(ha, &tq, tq.loop_id, LFF_NO_PRLI, &mr);
1589 if (rval != QL_SUCCESS) {
1590 EL(ha, "failed, server login\n");
1591 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
1592 cmd->ResponseLen = 0;
1593 return;
1594 } else {
1595 xp->flags |= QL_MGMT_SERVER_LOGIN;
1596 }
1597 }
1598
1599 QL_PRINT_9(ha, "cmd\n");
1600 QL_DUMP_9(cmd, 8, sizeof (EXT_IOCTL));
1601
1602 /* Allocate a DMA Memory Descriptor */
1603 dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
1604 if (dma_mem == NULL) {
1605 EL(ha, "failed, kmem_zalloc\n");
1606 cmd->Status = EXT_STATUS_NO_MEMORY;
1607 cmd->ResponseLen = 0;
1608 return;
1609 }
1610 /* Determine maximum buffer size. */
1611 if (cmd->RequestLen < cmd->ResponseLen) {
1612 pld_byte_cnt = cmd->ResponseLen;
1613 } else {
1614 pld_byte_cnt = cmd->RequestLen;
1615 }
1616
1617 /* Allocate command block. */
1618 pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_byte_cnt);
1619 pkt = kmem_zalloc(pkt_size, KM_SLEEP);
1621 EL(ha, "failed, kmem_zalloc\n");
1622 cmd->Status = EXT_STATUS_NO_MEMORY;
1623 cmd->ResponseLen = 0;
1624 return;
1625 }
1626 pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
1627
1628 /* Get command payload data. */
1629 if (ql_get_buffer_data((caddr_t)(uintptr_t)cmd->RequestAdr, pld,
1630 cmd->RequestLen, mode) != cmd->RequestLen) {
1631 EL(ha, "failed, get_buffer_data\n");
1632 kmem_free(pkt, pkt_size);
1633 cmd->Status = EXT_STATUS_COPY_ERR;
1634 cmd->ResponseLen = 0;
1635 return;
1636 }
1637
1638 /* Get DMA memory for the IOCB */
1639 if (ql_get_dma_mem(ha, dma_mem, pkt_size, LITTLE_ENDIAN_DMA,
1640 QL_DMA_RING_ALIGN) != QL_SUCCESS) {
1641 cmn_err(CE_WARN, "%sDMA memory "
1642 "alloc failed", QL_NAME);
1643 kmem_free(pkt, pkt_size);
1644 kmem_free(dma_mem, sizeof (dma_mem_t));
1645 cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1646 cmd->ResponseLen = 0;
1647 return;
1648 }
1649
1650 /* Copy out going payload data to IOCB DMA buffer. */
1651 ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
1652 (uint8_t *)dma_mem->bp, pld_byte_cnt, DDI_DEV_AUTOINCR);
1653
1654 /* Sync IOCB DMA buffer. */
1655 (void) ddi_dma_sync(dma_mem->dma_handle, 0, pld_byte_cnt,
1656 DDI_DMA_SYNC_FORDEV);
1657
1658 /*
1659 * Setup IOCB
1660 */
1661 ct = (ql_ct_iu_preamble_t *)pld;
1662 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
1663 pkt->ms24.entry_type = CT_PASSTHRU_TYPE;
1664 pkt->ms24.entry_count = 1;
1665
1666 pkt->ms24.vp_index = ha->vp_index;
1667
1668 /* Set loop ID */
1669 pkt->ms24.n_port_hdl = (uint16_t)
1670 (ct->gs_type == GS_TYPE_DIR_SERVER ?
1671 LE_16(SNS_24XX_HDL) :
1672 LE_16(MANAGEMENT_SERVER_24XX_LOOP_ID));
1673
1674 /* Set ISP command timeout. */
1675 pkt->ms24.timeout = LE_16(120);
1676
1677 /* Set cmd/response data segment counts. */
1678 pkt->ms24.cmd_dseg_count = LE_16(1);
1679 pkt->ms24.resp_dseg_count = LE_16(1);
1680
1681 /* Load ct cmd byte count. */
1682 pkt->ms24.cmd_byte_count = LE_32(cmd->RequestLen);
1683
1684 /* Load ct rsp byte count. */
1685 pkt->ms24.resp_byte_count = LE_32(cmd->ResponseLen);
1686
1687 long_ptr = (uint32_t *)&pkt->ms24.dseg;
1688
1689 /* Load MS command entry data segments. */
1690 *long_ptr++ = (uint32_t)
1691 LE_32(LSD(dma_mem->cookie.dmac_laddress));
1692 *long_ptr++ = (uint32_t)
1693 LE_32(MSD(dma_mem->cookie.dmac_laddress));
1694 *long_ptr++ = (uint32_t)(LE_32(cmd->RequestLen));
1695
1696 /* Load MS response entry data segments. */
1697 *long_ptr++ = (uint32_t)
1698 LE_32(LSD(dma_mem->cookie.dmac_laddress));
1699 *long_ptr++ = (uint32_t)
1700 LE_32(MSD(dma_mem->cookie.dmac_laddress));
1701 *long_ptr = (uint32_t)LE_32(cmd->ResponseLen);
1702
1703 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
1704 sizeof (ql_mbx_iocb_t));
1705
1706 comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
1707 if (comp_status == CS_DATA_UNDERRUN) {
1729
1730 /* Set loop ID */
1731 loop_id = (uint16_t)(ct->gs_type == GS_TYPE_DIR_SERVER ?
1732 SIMPLE_NAME_SERVER_LOOP_ID : MANAGEMENT_SERVER_LOOP_ID);
1733 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
1734 pkt->ms.loop_id_l = LSB(loop_id);
1735 pkt->ms.loop_id_h = MSB(loop_id);
1736 } else {
1737 pkt->ms.loop_id_h = LSB(loop_id);
1738 }
1739
1740 /* Set ISP command timeout. */
1741 pkt->ms.timeout = LE_16(120);
1742
1743 /* Set data segment counts. */
1744 pkt->ms.cmd_dseg_count_l = 1;
1745 pkt->ms.total_dseg_count = LE_16(2);
1746
1747 /* Response total byte count. */
1748 pkt->ms.resp_byte_count = LE_32(cmd->ResponseLen);
1749 pkt->ms.dseg[1].length = LE_32(cmd->ResponseLen);
1750
1751 /* Command total byte count. */
1752 pkt->ms.cmd_byte_count = LE_32(cmd->RequestLen);
1753 pkt->ms.dseg[0].length = LE_32(cmd->RequestLen);
1754
1755 /* Load command/response data segments. */
1756 pkt->ms.dseg[0].address[0] = (uint32_t)
1757 LE_32(LSD(dma_mem->cookie.dmac_laddress));
1758 pkt->ms.dseg[0].address[1] = (uint32_t)
1759 LE_32(MSD(dma_mem->cookie.dmac_laddress));
1760 pkt->ms.dseg[1].address[0] = (uint32_t)
1761 LE_32(LSD(dma_mem->cookie.dmac_laddress));
1762 pkt->ms.dseg[1].address[1] = (uint32_t)
1763 LE_32(MSD(dma_mem->cookie.dmac_laddress));
1764
1765 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
1766 sizeof (ql_mbx_iocb_t));
1767
1768 comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
1769 if (comp_status == CS_DATA_UNDERRUN) {
1770 if ((BE_16(ct->max_residual_size)) == 0) {
1771 comp_status = CS_COMPLETE;
1772 }
1773 }
1774 if (rval != QL_SUCCESS || (pkt->sts.entry_status & 0x7e) != 0) {
1775 EL(ha, "failed, I/O timeout or "
1776 "es=%xh, rval=%xh\n", pkt->sts.entry_status, rval);
1777 kmem_free(pkt, pkt_size);
1778 ql_free_dma_resource(ha, dma_mem);
1779 kmem_free(dma_mem, sizeof (dma_mem_t));
1780 cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
1781 cmd->ResponseLen = 0;
1782 return;
1783 }
1784 }
1785
1786 /* Sync in coming DMA buffer. */
1787 (void) ddi_dma_sync(dma_mem->dma_handle, 0,
1788 pld_byte_cnt, DDI_DMA_SYNC_FORKERNEL);
1789 /* Copy in coming DMA data. */
1790 ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld,
1791 (uint8_t *)dma_mem->bp, pld_byte_cnt,
1792 DDI_DEV_AUTOINCR);
1793
1794 /* Copy response payload from DMA buffer to application. */
1795 if (cmd->ResponseLen != 0) {
1796 QL_PRINT_9(ha, "ResponseLen=%d\n",
1797 cmd->ResponseLen);
1798 QL_DUMP_9(pld, 8, cmd->ResponseLen);
1799
1800 /* Send response payload. */
1801 if (ql_send_buffer_data(pld,
1802 (caddr_t)(uintptr_t)cmd->ResponseAdr,
1803 cmd->ResponseLen, mode) != cmd->ResponseLen) {
1804 EL(ha, "failed, send_buffer_data\n");
1805 cmd->Status = EXT_STATUS_COPY_ERR;
1806 cmd->ResponseLen = 0;
1807 }
1808 }
1809
1810 kmem_free(pkt, pkt_size);
1811 ql_free_dma_resource(ha, dma_mem);
1812 kmem_free(dma_mem, sizeof (dma_mem_t));
1813
1814 QL_PRINT_9(ha, "done\n");
1815 }
1816
1817 /*
1818 * ql_aen_reg
1819 * IOCTL management server Asynchronous Event Tracking Enable/Disable.
1820 *
1821 * Input:
1822 * ha: adapter state pointer.
1823 * cmd: EXT_IOCTL cmd struct pointer.
1824 * mode: flags.
1825 *
1826 * Returns:
1827 * None, request status indicated in cmd->Status.
1828 *
1829 * Context:
1830 * Kernel context.
1831 */
1832 static void
1833 ql_aen_reg(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1834 {
1835 EXT_REG_AEN reg_struct;
1836 int rval = 0;
1837 ql_xioctl_t *xp = ha->xioctl;
1838
1839 QL_PRINT_9(ha, "started\n");
1840
1841 rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, ®_struct,
1842 cmd->RequestLen, mode);
1843
1844 if (rval == 0) {
1845 if (reg_struct.Enable) {
1846 xp->flags |= QL_AEN_TRACKING_ENABLE;
1847 } else {
1848 xp->flags &= ~QL_AEN_TRACKING_ENABLE;
1849 /* Empty the queue. */
1850 INTR_LOCK(ha);
1851 xp->aen_q_head = 0;
1852 xp->aen_q_tail = 0;
1853 INTR_UNLOCK(ha);
1854 }
1855 QL_PRINT_9(ha, "done\n");
1856 } else {
1857 cmd->Status = EXT_STATUS_COPY_ERR;
1858 EL(ha, "failed, ddi_copyin\n");
1859 }
1860 }
1861
1862 /*
1863 * ql_aen_get
1864 * IOCTL management server Asynchronous Event Record Transfer.
1865 *
1866 * Input:
1867 * ha: adapter state pointer.
1868 * cmd: EXT_IOCTL cmd struct pointer.
1869 * mode: flags.
1870 *
1871 * Returns:
1872 * None, request status indicated in cmd->Status.
1873 *
1874 * Context:
1875 * Kernel context.
1876 */
1877 static void
1878 ql_aen_get(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
1879 {
1880 uint32_t out_size;
1881 EXT_ASYNC_EVENT *tmp_q;
1882 EXT_ASYNC_EVENT aen[EXT_DEF_MAX_AEN_QUEUE];
1883 uint8_t i;
1884 uint8_t queue_cnt;
1885 uint8_t request_cnt;
1886 ql_xioctl_t *xp = ha->xioctl;
1887
1888 QL_PRINT_9(ha, "started\n");
1889
1890 /* Compute the number of events that can be returned */
1891 request_cnt = (uint8_t)(cmd->ResponseLen / sizeof (EXT_ASYNC_EVENT));
1892
1893 if (request_cnt < EXT_DEF_MAX_AEN_QUEUE) {
1894 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
1895 cmd->DetailStatus = EXT_DEF_MAX_AEN_QUEUE;
1896 EL(ha, "failed, request_cnt < EXT_DEF_MAX_AEN_QUEUE, "
1897 "Len=%xh\n", request_cnt);
1898 cmd->ResponseLen = 0;
1899 return;
1900 }
1901
1902 /* 1st: Make a local copy of the entire queue content. */
1903 tmp_q = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue;
1904 queue_cnt = 0;
1905
1906 INTR_LOCK(ha);
1907 i = xp->aen_q_head;
1908
1925
1926 /* Empty the queue. */
1927 xp->aen_q_head = 0;
1928 xp->aen_q_tail = 0;
1929
1930 INTR_UNLOCK(ha);
1931
1932 /* 2nd: Now transfer the queue content to user buffer */
1933 /* Copy the entire queue to user's buffer. */
1934 out_size = (uint32_t)(queue_cnt * sizeof (EXT_ASYNC_EVENT));
1935 if (queue_cnt == 0) {
1936 cmd->ResponseLen = 0;
1937 } else if (ddi_copyout((void *)&aen[0],
1938 (void *)(uintptr_t)(cmd->ResponseAdr),
1939 out_size, mode) != 0) {
1940 cmd->Status = EXT_STATUS_COPY_ERR;
1941 cmd->ResponseLen = 0;
1942 EL(ha, "failed, ddi_copyout\n");
1943 } else {
1944 cmd->ResponseLen = out_size;
1945 QL_PRINT_9(ha, "done\n");
1946 }
1947 }
1948
1949 /*
1950 * ql_enqueue_aen
1951 *
1952 * Input:
1953 * ha: adapter state pointer.
1954 * event_code: async event code of the event to add to queue.
1955 * payload: event payload for the queue.
1956 * INTR_LOCK must be already obtained.
1957 *
1958 * Context:
1959 * Interrupt or Kernel context, no mailbox commands allowed.
1960 */
1961 void
1962 ql_enqueue_aen(ql_adapter_state_t *ha, uint16_t event_code, void *payload)
1963 {
1964 uint8_t new_entry; /* index to current entry */
1965 uint16_t *mbx;
1966 EXT_ASYNC_EVENT *aen_queue;
1967 ql_xioctl_t *xp = ha->xioctl;
1968
1969 QL_PRINT_9(ha, "started, event_code=%d\n",
1970 event_code);
1971
1972 if (xp == NULL) {
1973 QL_PRINT_9(ha, "no context\n");
1974 return;
1975 }
1976 aen_queue = (EXT_ASYNC_EVENT *)xp->aen_tracking_queue;
1977
1978 if (aen_queue[xp->aen_q_tail].AsyncEventCode != NULL) {
1979 /* Need to change queue pointers to make room. */
1980
1981 /* Increment tail for adding new entry. */
1982 xp->aen_q_tail++;
1983 if (xp->aen_q_tail == EXT_DEF_MAX_AEN_QUEUE) {
1984 xp->aen_q_tail = 0;
1985 }
1986 if (xp->aen_q_head == xp->aen_q_tail) {
1987 /*
1988 * We're overwriting the oldest entry, so need to
1989 * update the head pointer.
1990 */
1991 xp->aen_q_head++;
1992 if (xp->aen_q_head == EXT_DEF_MAX_AEN_QUEUE) {
1993 xp->aen_q_head = 0;
2019 /* domain */
2020 aen_queue[new_entry].Payload.RSCN.RSCNInfo[2] =
2021 LSB(mbx[1]);
2022 /* save in big endian */
2023 BIG_ENDIAN_24(&aen_queue[new_entry].
2024 Payload.RSCN.RSCNInfo[0]);
2025
2026 aen_queue[new_entry].Payload.RSCN.AddrFormat =
2027 MSB(mbx[1]);
2028
2029 break;
2030 default:
2031 /* Not supported */
2032 EL(ha, "failed, event code not supported=%xh\n",
2033 event_code);
2034 aen_queue[new_entry].AsyncEventCode = 0;
2035 break;
2036 }
2037 }
2038
2039 QL_PRINT_9(ha, "done\n");
2040 }
2041
2042 /*
2043 * ql_scsi_passthru
2044 * IOCTL SCSI passthrough.
2045 *
2046 * Input:
2047 * ha: adapter state pointer.
2048 * cmd: User space SCSI command pointer.
2049 * mode: flags.
2050 *
2051 * Returns:
2052 * None, request status indicated in cmd->Status.
2053 *
2054 * Context:
2055 * Kernel context.
2056 */
2057 static void
2058 ql_scsi_passthru(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2059 {
2083 size_t resid; /* Residual */
2084 uint8_t *cdbp; /* Requestor's CDB */
2085 uint8_t *u_sense; /* Requestor's sense buffer */
2086 uint8_t cdb_len; /* Requestor's CDB length */
2087 uint8_t direction;
2088 } scsi_req;
2089
2090 struct {
2091 uint8_t *rsp_info;
2092 uint8_t *req_sense_data;
2093 uint32_t residual_length;
2094 uint32_t rsp_info_length;
2095 uint32_t req_sense_length;
2096 uint16_t comp_status;
2097 uint8_t state_flags_l;
2098 uint8_t state_flags_h;
2099 uint8_t scsi_status_l;
2100 uint8_t scsi_status_h;
2101 } sts;
2102
2103 QL_PRINT_9(ha, "started\n");
2104
2105 /* Verify Sub Code and set cnt to needed request size. */
2106 if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) {
2107 pld_size = sizeof (EXT_SCSI_PASSTHRU);
2108 } else if (cmd->SubCode == EXT_SC_SEND_FC_SCSI_PASSTHRU) {
2109 pld_size = sizeof (EXT_FC_SCSI_PASSTHRU);
2110 } else {
2111 EL(ha, "failed, invalid SubCode=%xh\n", cmd->SubCode);
2112 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
2113 cmd->ResponseLen = 0;
2114 return;
2115 }
2116
2117 dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
2118 if (dma_mem == NULL) {
2119 EL(ha, "failed, kmem_zalloc\n");
2120 cmd->Status = EXT_STATUS_NO_MEMORY;
2121 cmd->ResponseLen = 0;
2122 return;
2123 }
2139 cmd->ResponseLen = 0;
2140 return;
2141 }
2142
2143 /*
2144 * Find fc_port from SCSI PASSTHRU structure fill in the scsi_req
2145 * request data structure.
2146 */
2147 if (cmd->SubCode == EXT_SC_SEND_SCSI_PASSTHRU) {
2148 scsi_req.lun = sp_req->TargetAddr.Lun;
2149 scsi_req.sense_length = sizeof (sp_req->SenseData);
2150 scsi_req.cdbp = &sp_req->Cdb[0];
2151 scsi_req.cdb_len = sp_req->CdbLength;
2152 scsi_req.direction = sp_req->Direction;
2153 usp_req = (EXT_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr;
2154 scsi_req.u_sense = &usp_req->SenseData[0];
2155 cmd->DetailStatus = EXT_DSTATUS_TARGET;
2156
2157 qlnt = QLNT_PORT;
2158 name = (uint8_t *)&sp_req->TargetAddr.Target;
2159 QL_PRINT_9(ha, "SubCode=%xh, Target=%lld\n",
2160 ha->instance, cmd->SubCode, sp_req->TargetAddr.Target);
2161 tq = ql_find_port(ha, name, qlnt);
2162 } else {
2163 /*
2164 * Must be FC PASSTHRU, verified above.
2165 */
2166 if (fc_req->FCScsiAddr.DestType == EXT_DEF_DESTTYPE_WWPN) {
2167 qlnt = QLNT_PORT;
2168 name = &fc_req->FCScsiAddr.DestAddr.WWPN[0];
2169 QL_PRINT_9(ha, "SubCode=%xh, "
2170 "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2171 ha->instance, cmd->SubCode, name[0], name[1],
2172 name[2], name[3], name[4], name[5], name[6],
2173 name[7]);
2174 tq = ql_find_port(ha, name, qlnt);
2175 } else if (fc_req->FCScsiAddr.DestType ==
2176 EXT_DEF_DESTTYPE_WWNN) {
2177 qlnt = QLNT_NODE;
2178 name = &fc_req->FCScsiAddr.DestAddr.WWNN[0];
2179 QL_PRINT_9(ha, "SubCode=%xh, "
2180 "wwnn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2181 ha->instance, cmd->SubCode, name[0], name[1],
2182 name[2], name[3], name[4], name[5], name[6],
2183 name[7]);
2184 tq = ql_find_port(ha, name, qlnt);
2185 } else if (fc_req->FCScsiAddr.DestType ==
2186 EXT_DEF_DESTTYPE_PORTID) {
2187 qlnt = QLNT_PID;
2188 name = &fc_req->FCScsiAddr.DestAddr.Id[0];
2189 QL_PRINT_9(ha, "SubCode=%xh, PID="
2190 "%02x%02x%02x\n", cmd->SubCode,
2191 name[0], name[1], name[2]);
2192 tq = ql_find_port(ha, name, qlnt);
2193 } else {
2194 EL(ha, "failed, SubCode=%xh invalid DestType=%xh\n",
2195 cmd->SubCode, fc_req->FCScsiAddr.DestType);
2196 cmd->Status = EXT_STATUS_INVALID_PARAM;
2197 cmd->ResponseLen = 0;
2198 return;
2199 }
2200 scsi_req.lun = fc_req->FCScsiAddr.Lun;
2201 scsi_req.sense_length = sizeof (fc_req->SenseData);
2202 scsi_req.cdbp = &sp_req->Cdb[0];
2203 scsi_req.cdb_len = sp_req->CdbLength;
2204 ufc_req = (EXT_FC_SCSI_PASSTHRU *)(uintptr_t)cmd->RequestAdr;
2205 scsi_req.u_sense = &ufc_req->SenseData[0];
2206 scsi_req.direction = fc_req->Direction;
2207 }
2208
2209 if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
2210 EL(ha, "failed, fc_port not found\n");
2221 }
2222
2223 /* Allocate command block. */
2224 if ((scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN ||
2225 scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_OUT) &&
2226 cmd->ResponseLen) {
2227 pld_size = cmd->ResponseLen;
2228 pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + pld_size);
2229 pkt = kmem_zalloc(pkt_size, KM_SLEEP);
2230 if (pkt == NULL) {
2231 EL(ha, "failed, kmem_zalloc\n");
2232 cmd->Status = EXT_STATUS_NO_MEMORY;
2233 cmd->ResponseLen = 0;
2234 return;
2235 }
2236 pld = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
2237
2238 /* Get DMA memory for the IOCB */
2239 if (ql_get_dma_mem(ha, dma_mem, pld_size, LITTLE_ENDIAN_DMA,
2240 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
2241 cmn_err(CE_WARN, "%srequest queue DMA memory "
2242 "alloc failed", QL_NAME);
2243 kmem_free(pkt, pkt_size);
2244 cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
2245 cmd->ResponseLen = 0;
2246 return;
2247 }
2248
2249 if (scsi_req.direction == EXT_DEF_SCSI_PASSTHRU_DATA_IN) {
2250 scsi_req.direction = (uint8_t)
2251 (CFG_IST(ha, CFG_ISP_FW_TYPE_2) ?
2252 CF_RD : CF_DATA_IN | CF_STAG);
2253 } else {
2254 scsi_req.direction = (uint8_t)
2255 (CFG_IST(ha, CFG_ISP_FW_TYPE_2) ?
2256 CF_WR : CF_DATA_OUT | CF_STAG);
2257 cmd->ResponseLen = 0;
2258
2259 /* Get command payload. */
2260 if (ql_get_buffer_data(
2261 (caddr_t)(uintptr_t)cmd->ResponseAdr,
2262 pld, pld_size, mode) != pld_size) {
2263 EL(ha, "failed, get_buffer_data\n");
2264 cmd->Status = EXT_STATUS_COPY_ERR;
2265
2266 kmem_free(pkt, pkt_size);
2267 ql_free_dma_resource(ha, dma_mem);
2268 kmem_free(dma_mem, sizeof (dma_mem_t));
2269 return;
2270 }
2271
2272 /* Copy out going data to DMA buffer. */
2273 ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
2274 (uint8_t *)dma_mem->bp, pld_size,
2275 DDI_DEV_AUTOINCR);
2276
2277 /* Sync DMA buffer. */
2278 (void) ddi_dma_sync(dma_mem->dma_handle, 0,
2279 dma_mem->size, DDI_DMA_SYNC_FORDEV);
2280 }
2281 } else {
2282 scsi_req.direction = (uint8_t)
2283 (CFG_IST(ha, CFG_ISP_FW_TYPE_2) ? 0 : CF_STAG);
2284 cmd->ResponseLen = 0;
2285
2286 pkt_size = sizeof (ql_mbx_iocb_t);
2287 pkt = kmem_zalloc(pkt_size, KM_SLEEP);
2288 if (pkt == NULL) {
2289 EL(ha, "failed, kmem_zalloc-2\n");
2290 cmd->Status = EXT_STATUS_NO_MEMORY;
2291 return;
2292 }
2293 pld = NULL;
2294 pld_size = 0;
2295 }
2296
2297 /* retries = ha->port_down_retry_count; */
2298 retries = 1;
2299 cmd->Status = EXT_STATUS_OK;
2300 cmd->DetailStatus = EXT_DSTATUS_NOADNL_INFO;
2301
2302 QL_PRINT_9(ha, "SCSI cdb\n");
2303 QL_DUMP_9(scsi_req.cdbp, 8, scsi_req.cdb_len);
2304
2305 do {
2306 if (DRIVER_SUSPENDED(ha)) {
2307 sts.comp_status = CS_LOOP_DOWN_ABORT;
2308 break;
2309 }
2310
2311 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
2312 uint64_t lun_addr = 0;
2313 fcp_ent_addr_t *fcp_ent_addr = 0;
2314
2315 pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
2316 pkt->cmd24.entry_count = 1;
2317
2318 /* Set LUN number and address method */
2319 lun_addr = ql_get_lun_addr(tq, scsi_req.lun);
2320 fcp_ent_addr = (fcp_ent_addr_t *)&lun_addr;
2321
2322 pkt->cmd24.fcp_lun[2] =
2323 lobyte(fcp_ent_addr->ent_addr_0);
2324 pkt->cmd24.fcp_lun[3] =
2325 hibyte(fcp_ent_addr->ent_addr_0);
2326 pkt->cmd24.fcp_lun[0] =
2327 lobyte(fcp_ent_addr->ent_addr_1);
2328 pkt->cmd24.fcp_lun[1] =
2329 hibyte(fcp_ent_addr->ent_addr_1);
2330 pkt->cmd24.fcp_lun[6] =
2331 lobyte(fcp_ent_addr->ent_addr_2);
2332 pkt->cmd24.fcp_lun[7] =
2333 hibyte(fcp_ent_addr->ent_addr_2);
2334 pkt->cmd24.fcp_lun[4] =
2335 lobyte(fcp_ent_addr->ent_addr_3);
2336 pkt->cmd24.fcp_lun[5] =
2337 hibyte(fcp_ent_addr->ent_addr_3);
2338
2339 /* Set N_port handle */
2340 pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
2341
2342 /* Set VP Index */
2343 pkt->cmd24.vp_index = ha->vp_index;
2344
2345 /* Set target ID */
2346 pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
2347 pkt->cmd24.target_id[1] = tq->d_id.b.area;
2348 pkt->cmd24.target_id[2] = tq->d_id.b.domain;
2349
2350 /* Set ISP command timeout. */
2351 pkt->cmd24.timeout = (uint16_t)LE_16(15);
2352
2353 /* Load SCSI CDB */
2354 ddi_rep_put8(ha->req_q[0]->req_ring.acc_handle,
2355 scsi_req.cdbp, pkt->cmd24.scsi_cdb,
2356 scsi_req.cdb_len, DDI_DEV_AUTOINCR);
2357 for (cnt = 0; cnt < MAX_CMDSZ;
2358 cnt = (uint16_t)(cnt + 4)) {
2359 ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
2360 + cnt, 4);
2361 }
2362
2363 /* Set tag queue control flags */
2364 pkt->cmd24.task = TA_STAG;
2365
2366 if (pld_size) {
2367 /* Set transfer direction. */
2368 pkt->cmd24.control_flags = scsi_req.direction;
2369
2370 /* Set data segment count. */
2371 pkt->cmd24.dseg_count = LE_16(1);
2372
2373 /* Load total byte count. */
2374 pkt->cmd24.total_byte_count = LE_32(pld_size);
2375
2376 /* Load data descriptor. */
2377 pkt->cmd24.dseg.address[0] = (uint32_t)
2378 LE_32(LSD(dma_mem->cookie.dmac_laddress));
2379 pkt->cmd24.dseg.address[1] = (uint32_t)
2380 LE_32(MSD(dma_mem->cookie.dmac_laddress));
2381 pkt->cmd24.dseg.length = LE_32(pld_size);
2382 }
2383 } else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
2384 pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
2385 pkt->cmd3.entry_count = 1;
2386 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
2387 pkt->cmd3.target_l = LSB(tq->loop_id);
2388 pkt->cmd3.target_h = MSB(tq->loop_id);
2389 } else {
2390 pkt->cmd3.target_h = LSB(tq->loop_id);
2391 }
2392 pkt->cmd3.lun_l = LSB(scsi_req.lun);
2393 pkt->cmd3.lun_h = MSB(scsi_req.lun);
2394 pkt->cmd3.control_flags_l = scsi_req.direction;
2395 pkt->cmd3.timeout = LE_16(15);
2396 for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) {
2397 pkt->cmd3.scsi_cdb[cnt] = scsi_req.cdbp[cnt];
2398 }
2399 if (pld_size) {
2400 pkt->cmd3.dseg_count = LE_16(1);
2401 pkt->cmd3.byte_count = LE_32(pld_size);
2402 pkt->cmd3.dseg[0].address[0] = (uint32_t)
2403 LE_32(LSD(dma_mem->cookie.dmac_laddress));
2404 pkt->cmd3.dseg[0].address[1] = (uint32_t)
2405 LE_32(MSD(dma_mem->cookie.dmac_laddress));
2406 pkt->cmd3.dseg[0].length = LE_32(pld_size);
2407 }
2408 } else {
2409 pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
2410 pkt->cmd.entry_count = 1;
2411 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
2412 pkt->cmd.target_l = LSB(tq->loop_id);
2413 pkt->cmd.target_h = MSB(tq->loop_id);
2414 } else {
2415 pkt->cmd.target_h = LSB(tq->loop_id);
2416 }
2417 pkt->cmd.lun_l = LSB(scsi_req.lun);
2418 pkt->cmd.lun_h = MSB(scsi_req.lun);
2419 pkt->cmd.control_flags_l = scsi_req.direction;
2420 pkt->cmd.timeout = LE_16(15);
2421 for (cnt = 0; cnt < scsi_req.cdb_len; cnt++) {
2422 pkt->cmd.scsi_cdb[cnt] = scsi_req.cdbp[cnt];
2423 }
2424 if (pld_size) {
2425 pkt->cmd.dseg_count = LE_16(1);
2426 pkt->cmd.byte_count = LE_32(pld_size);
2427 pkt->cmd.dseg[0].address = (uint32_t)
2428 LE_32(LSD(dma_mem->cookie.dmac_laddress));
2429 pkt->cmd.dseg[0].length = LE_32(pld_size);
2430 }
2431 }
2432 /* Go issue command and wait for completion. */
2433 QL_PRINT_9(ha, "request pkt\n");
2434 QL_DUMP_9(pkt, 8, pkt_size);
2435
2436 status = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size);
2437
2438 if (pld_size) {
2439 /* Sync in coming DMA buffer. */
2440 (void) ddi_dma_sync(dma_mem->dma_handle, 0,
2441 dma_mem->size, DDI_DMA_SYNC_FORKERNEL);
2442 /* Copy in coming DMA data. */
2443 ddi_rep_get8(dma_mem->acc_handle, (uint8_t *)pld,
2444 (uint8_t *)dma_mem->bp, pld_size,
2445 DDI_DEV_AUTOINCR);
2446 }
2447
2448 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
2449 pkt->sts24.entry_status = (uint8_t)
2450 (pkt->sts24.entry_status & 0x3c);
2451 } else {
2452 pkt->sts.entry_status = (uint8_t)
2453 (pkt->sts.entry_status & 0x7e);
2454 }
2455
2456 if (status == QL_SUCCESS && pkt->sts.entry_status != 0) {
2457 EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
2458 pkt->sts.entry_status, tq->d_id.b24);
2459 status = QL_FUNCTION_PARAMETER_ERROR;
2460 }
2461
2462 sts.comp_status = (uint16_t)
2463 (CFG_IST(ha, CFG_ISP_FW_TYPE_2) ?
2464 LE_16(pkt->sts24.comp_status) :
2465 LE_16(pkt->sts.comp_status));
2466
2467 /*
2468 * We have verified about all the request that can be so far.
2469 * Now we need to start verification of our ability to
2470 * actually issue the CDB.
2471 */
2472 if (DRIVER_SUSPENDED(ha)) {
2473 sts.comp_status = CS_LOOP_DOWN_ABORT;
2474 break;
2475 } else if (status == QL_SUCCESS &&
2476 (sts.comp_status == CS_PORT_LOGGED_OUT ||
2477 sts.comp_status == CS_PORT_UNAVAILABLE)) {
2478 EL(ha, "login retry d_id=%xh\n", tq->d_id.b24);
2479 if (tq->flags & TQF_FABRIC_DEVICE) {
2480 rval = ql_login_fport(ha, tq, tq->loop_id,
2481 LFF_NO_PLOGI, &mr);
2482 if (rval != QL_SUCCESS) {
2483 EL(ha, "failed, login_fport=%xh, "
2506 ql_free_dma_resource(ha, dma_mem);
2507 kmem_free(dma_mem, sizeof (dma_mem_t));
2508 cmd->Status = EXT_STATUS_SUSPENDED;
2509 cmd->ResponseLen = 0;
2510 return;
2511 }
2512
2513 if (status != QL_SUCCESS) {
2514 /* Command error */
2515 EL(ha, "failed, I/O\n");
2516 kmem_free(pkt, pkt_size);
2517 ql_free_dma_resource(ha, dma_mem);
2518 kmem_free(dma_mem, sizeof (dma_mem_t));
2519 cmd->Status = EXT_STATUS_ERR;
2520 cmd->DetailStatus = status;
2521 cmd->ResponseLen = 0;
2522 return;
2523 }
2524
2525 /* Setup status. */
2526 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
2527 sts.scsi_status_l = pkt->sts24.scsi_status_l;
2528 sts.scsi_status_h = pkt->sts24.scsi_status_h;
2529
2530 /* Setup residuals. */
2531 sts.residual_length = LE_32(pkt->sts24.residual_length);
2532
2533 /* Setup state flags. */
2534 sts.state_flags_l = pkt->sts24.state_flags_l;
2535 sts.state_flags_h = pkt->sts24.state_flags_h;
2536 if (pld_size && sts.comp_status != CS_DATA_UNDERRUN) {
2537 sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2538 SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD |
2539 SF_XFERRED_DATA | SF_GOT_STATUS);
2540 } else {
2541 sts.state_flags_h = (uint8_t)(sts.state_flags_h |
2542 SF_GOT_BUS | SF_GOT_TARGET | SF_SENT_CMD |
2543 SF_GOT_STATUS);
2544 }
2545 if (scsi_req.direction & CF_WR) {
2546 sts.state_flags_l = (uint8_t)(sts.state_flags_l |
2581 sts.scsi_status_h = pkt->sts.scsi_status_h;
2582
2583 /* Setup residuals. */
2584 sts.residual_length = LE_32(pkt->sts.residual_length);
2585
2586 /* Setup state flags. */
2587 sts.state_flags_l = pkt->sts.state_flags_l;
2588 sts.state_flags_h = pkt->sts.state_flags_h;
2589
2590 /* Setup FCP response info. */
2591 sts.rsp_info_length = sts.scsi_status_h & FCP_RSP_LEN_VALID ?
2592 LE_16(pkt->sts.rsp_info_length) : 0;
2593 sts.rsp_info = &pkt->sts.rsp_info[0];
2594
2595 /* Setup sense data. */
2596 sts.req_sense_length = sts.scsi_status_h & FCP_SNS_LEN_VALID ?
2597 LE_16(pkt->sts.req_sense_length) : 0;
2598 sts.req_sense_data = &pkt->sts.req_sense_data[0];
2599 }
2600
2601 QL_PRINT_9(ha, "response pkt\n");
2602 QL_DUMP_9(&pkt->sts, 8, sizeof (sts_entry_t));
2603
2604 switch (sts.comp_status) {
2605 case CS_INCOMPLETE:
2606 case CS_ABORTED:
2607 case CS_DEVICE_UNAVAILABLE:
2608 case CS_PORT_UNAVAILABLE:
2609 case CS_PORT_LOGGED_OUT:
2610 case CS_PORT_CONFIG_CHG:
2611 case CS_PORT_BUSY:
2612 case CS_LOOP_DOWN_ABORT:
2613 cmd->Status = EXT_STATUS_BUSY;
2614 break;
2615 case CS_RESET:
2616 case CS_QUEUE_FULL:
2617 cmd->Status = EXT_STATUS_ERR;
2618 break;
2619 case CS_TIMEOUT:
2620 cmd->Status = EXT_STATUS_ERR;
2621 break;
2645 scsi_req.cdbp[0] == SCMD_ERASE ||
2646 (scsi_req.cdbp[0] == SCMD_FORMAT &&
2647 (scsi_req.cdbp[1] & FPB_DATA) == 0)) {
2648 /*
2649 * Non data transfer command, clear sts_entry residual
2650 * length.
2651 */
2652 sts.residual_length = 0;
2653 cmd->ResponseLen = 0;
2654 if (sts.comp_status == CS_DATA_UNDERRUN) {
2655 sts.comp_status = CS_COMPLETE;
2656 cmd->Status = EXT_STATUS_OK;
2657 }
2658 } else {
2659 cmd->ResponseLen = pld_size;
2660 }
2661
2662 /* Correct ISP completion status */
2663 if (sts.comp_status == CS_COMPLETE && sts.scsi_status_l == 0 &&
2664 (sts.scsi_status_h & FCP_RSP_MASK) == 0) {
2665 QL_PRINT_9(ha, "Correct completion\n",
2666 ha->instance);
2667 scsi_req.resid = 0;
2668 } else if (sts.comp_status == CS_DATA_UNDERRUN) {
2669 QL_PRINT_9(ha, "Correct UNDERRUN\n",
2670 ha->instance);
2671 scsi_req.resid = sts.residual_length;
2672 if (sts.scsi_status_h & FCP_RESID_UNDER) {
2673 cmd->Status = (uint32_t)EXT_STATUS_OK;
2674
2675 cmd->ResponseLen = (uint32_t)
2676 (pld_size - scsi_req.resid);
2677 } else {
2678 EL(ha, "failed, Transfer ERROR\n");
2679 cmd->Status = EXT_STATUS_ERR;
2680 cmd->ResponseLen = 0;
2681 }
2682 } else {
2683 QL_PRINT_9(ha, "error d_id=%xh, comp_status=%xh, "
2684 "scsi_status_h=%xh, scsi_status_l=%xh\n",
2685 tq->d_id.b24, sts.comp_status, sts.scsi_status_h,
2686 sts.scsi_status_l);
2687
2688 scsi_req.resid = pld_size;
2689 /*
2690 * Handle residual count on SCSI check
2691 * condition.
2692 *
2693 * - If Residual Under / Over is set, use the
2694 * Residual Transfer Length field in IOCB.
2695 * - If Residual Under / Over is not set, and
2696 * Transferred Data bit is set in State Flags
2697 * field of IOCB, report residual value of 0
2698 * (you may want to do this for tape
2699 * Write-type commands only). This takes care
2700 * of logical end of tape problem and does
2701 * not break Unit Attention.
2702 * - If Residual Under / Over is not set, and
2703 * Transferred Data bit is not set in State
2704 * Flags, report residual value equal to
2729 } else {
2730 sense_sz = sts.req_sense_length;
2731 }
2732
2733 EL(ha, "failed, Check Condition Status, d_id=%xh\n",
2734 tq->d_id.b24);
2735 QL_DUMP_2(sts.req_sense_data, 8, sts.req_sense_length);
2736
2737 if (ddi_copyout(sts.req_sense_data, scsi_req.u_sense,
2738 (size_t)sense_sz, mode) != 0) {
2739 EL(ha, "failed, request sense ddi_copyout\n");
2740 }
2741
2742 cmd->Status = EXT_STATUS_SCSI_STATUS;
2743 cmd->DetailStatus = sts.scsi_status_l;
2744 }
2745
2746 /* Copy response payload from DMA buffer to application. */
2747 if (scsi_req.direction & (CF_RD | CF_DATA_IN) &&
2748 cmd->ResponseLen != 0) {
2749 QL_PRINT_9(ha, "Data Return resid=%lu, "
2750 "byte_count=%u, ResponseLen=%xh\n",
2751 scsi_req.resid, pld_size, cmd->ResponseLen);
2752 QL_DUMP_9(pld, 8, cmd->ResponseLen);
2753
2754 /* Send response payload. */
2755 if (ql_send_buffer_data(pld,
2756 (caddr_t)(uintptr_t)cmd->ResponseAdr,
2757 cmd->ResponseLen, mode) != cmd->ResponseLen) {
2758 EL(ha, "failed, send_buffer_data\n");
2759 cmd->Status = EXT_STATUS_COPY_ERR;
2760 cmd->ResponseLen = 0;
2761 }
2762 }
2763
2764 if (cmd->Status != EXT_STATUS_OK) {
2765 EL(ha, "failed, cmd->Status=%xh, comp_status=%xh, "
2766 "d_id=%xh\n", cmd->Status, sts.comp_status, tq->d_id.b24);
2767 } else {
2768 /*EMPTY*/
2769 QL_PRINT_9(ha, "done, ResponseLen=%d\n",
2770 ha->instance, cmd->ResponseLen);
2771 }
2772
2773 kmem_free(pkt, pkt_size);
2774 ql_free_dma_resource(ha, dma_mem);
2775 kmem_free(dma_mem, sizeof (dma_mem_t));
2776 }
2777
2778 /*
2779 * ql_wwpn_to_scsiaddr
2780 *
2781 * Input:
2782 * ha: adapter state pointer.
2783 * cmd: EXT_IOCTL cmd struct pointer.
2784 * mode: flags.
2785 *
2786 * Context:
2787 * Kernel context.
2788 */
2789 static void
2790 ql_wwpn_to_scsiaddr(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2791 {
2792 int status;
2793 uint8_t wwpn[EXT_DEF_WWN_NAME_SIZE];
2794 EXT_SCSI_ADDR *tmp_addr;
2795 ql_tgt_t *tq;
2796
2797 QL_PRINT_9(ha, "started\n");
2798
2799 if (cmd->RequestLen != EXT_DEF_WWN_NAME_SIZE) {
2800 /* Return error */
2801 EL(ha, "incorrect RequestLen\n");
2802 cmd->Status = EXT_STATUS_INVALID_PARAM;
2803 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
2804 return;
2805 }
2806
2807 status = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, wwpn,
2808 cmd->RequestLen, mode);
2809
2810 if (status != 0) {
2811 cmd->Status = EXT_STATUS_COPY_ERR;
2812 EL(ha, "failed, ddi_copyin\n");
2813 return;
2814 }
2815
2816 tq = ql_find_port(ha, wwpn, QLNT_PORT);
2817
2818 if (tq == NULL || tq->flags & TQF_INITIATOR_DEVICE) {
2819 /* no matching device */
2820 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
2821 EL(ha, "failed, device not found\n");
2822 return;
2823 }
2824
2825 /* Copy out the IDs found. For now we can only return target ID. */
2826 tmp_addr = (EXT_SCSI_ADDR *)(uintptr_t)cmd->ResponseAdr;
2827
2828 status = ddi_copyout((void *)wwpn, (void *)&tmp_addr->Target, 8, mode);
2829
2830 if (status != 0) {
2831 cmd->Status = EXT_STATUS_COPY_ERR;
2832 EL(ha, "failed, ddi_copyout\n");
2833 } else {
2834 cmd->Status = EXT_STATUS_OK;
2835 QL_PRINT_9(ha, "done\n");
2836 }
2837 }
2838
2839 /*
2840 * ql_host_idx
2841 * Gets host order index.
2842 *
2843 * Input:
2844 * ha: adapter state pointer.
2845 * cmd: EXT_IOCTL cmd struct pointer.
2846 * mode: flags.
2847 *
2848 * Returns:
2849 * None, request status indicated in cmd->Status.
2850 *
2851 * Context:
2852 * Kernel context.
2853 */
2854 static void
2855 ql_host_idx(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2856 {
2857 uint16_t idx;
2858
2859 QL_PRINT_9(ha, "started\n");
2860
2861 if (cmd->ResponseLen < sizeof (uint16_t)) {
2862 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2863 cmd->DetailStatus = sizeof (uint16_t);
2864 EL(ha, "failed, ResponseLen < Len=%xh\n", cmd->ResponseLen);
2865 cmd->ResponseLen = 0;
2866 return;
2867 }
2868
2869 idx = (uint16_t)ha->instance;
2870
2871 if (ddi_copyout((void *)&idx, (void *)(uintptr_t)(cmd->ResponseAdr),
2872 sizeof (uint16_t), mode) != 0) {
2873 cmd->Status = EXT_STATUS_COPY_ERR;
2874 cmd->ResponseLen = 0;
2875 EL(ha, "failed, ddi_copyout\n");
2876 } else {
2877 cmd->ResponseLen = sizeof (uint16_t);
2878 QL_PRINT_9(ha, "done\n");
2879 }
2880 }
2881
2882 /*
2883 * ql_host_drvname
2884 * Gets host driver name
2885 *
2886 * Input:
2887 * ha: adapter state pointer.
2888 * cmd: EXT_IOCTL cmd struct pointer.
2889 * mode: flags.
2890 *
2891 * Returns:
2892 * None, request status indicated in cmd->Status.
2893 *
2894 * Context:
2895 * Kernel context.
2896 */
2897 static void
2898 ql_host_drvname(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2899 {
2900
2901 char drvname[] = QL_NAME;
2902 uint32_t qlnamelen;
2903
2904 QL_PRINT_9(ha, "started\n");
2905
2906 qlnamelen = (uint32_t)(strlen(QL_NAME) + 1);
2907
2908 if (cmd->ResponseLen < qlnamelen) {
2909 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2910 cmd->DetailStatus = qlnamelen;
2911 EL(ha, "failed, ResponseLen: %xh, needed: %xh\n",
2912 cmd->ResponseLen, qlnamelen);
2913 cmd->ResponseLen = 0;
2914 return;
2915 }
2916
2917 if (ddi_copyout((void *)&drvname,
2918 (void *)(uintptr_t)(cmd->ResponseAdr),
2919 qlnamelen, mode) != 0) {
2920 cmd->Status = EXT_STATUS_COPY_ERR;
2921 cmd->ResponseLen = 0;
2922 EL(ha, "failed, ddi_copyout\n");
2923 } else {
2924 cmd->ResponseLen = qlnamelen - 1;
2925 }
2926
2927 QL_PRINT_9(ha, "done\n");
2928 }
2929
2930 /*
2931 * ql_read_nvram
2932 * Get NVRAM contents.
2933 *
2934 * Input:
2935 * ha: adapter state pointer.
2936 * cmd: EXT_IOCTL cmd struct pointer.
2937 * mode: flags.
2938 *
2939 * Returns:
2940 * None, request status indicated in cmd->Status.
2941 *
2942 * Context:
2943 * Kernel context.
2944 */
2945 static void
2946 ql_read_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2947 {
2948
2949 QL_PRINT_9(ha, "started\n");
2950
2951 if (cmd->ResponseLen < ha->nvram_cache->size) {
2952 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2953 cmd->DetailStatus = ha->nvram_cache->size;
2954 EL(ha, "failed, ResponseLen != NVRAM, Len=%xh\n",
2955 cmd->ResponseLen);
2956 cmd->ResponseLen = 0;
2957 return;
2958 }
2959
2960 /* Get NVRAM data. */
2961 if (ql_nv_util_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
2962 mode) != 0) {
2963 cmd->Status = EXT_STATUS_COPY_ERR;
2964 cmd->ResponseLen = 0;
2965 EL(ha, "failed, copy error\n");
2966 } else {
2967 cmd->ResponseLen = ha->nvram_cache->size;
2968 QL_PRINT_9(ha, "done\n");
2969 }
2970 }
2971
2972 /*
2973 * ql_write_nvram
2974 * Loads NVRAM contents.
2975 *
2976 * Input:
2977 * ha: adapter state pointer.
2978 * cmd: EXT_IOCTL cmd struct pointer.
2979 * mode: flags.
2980 *
2981 * Returns:
2982 * None, request status indicated in cmd->Status.
2983 *
2984 * Context:
2985 * Kernel context.
2986 */
2987 static void
2988 ql_write_nvram(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
2989 {
2990
2991 QL_PRINT_9(ha, "started\n");
2992
2993 if (cmd->RequestLen < ha->nvram_cache->size) {
2994 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
2995 cmd->DetailStatus = ha->nvram_cache->size;
2996 EL(ha, "failed, RequestLen != NVRAM, Len=%xh\n",
2997 cmd->RequestLen);
2998 return;
2999 }
3000
3001 /* Load NVRAM data. */
3002 if (ql_nv_util_load(ha, (void *)(uintptr_t)(cmd->RequestAdr),
3003 mode) != 0) {
3004 cmd->Status = EXT_STATUS_COPY_ERR;
3005 EL(ha, "failed, copy error\n");
3006 } else {
3007 /*EMPTY*/
3008 QL_PRINT_9(ha, "done\n");
3009 }
3010 }
3011
3012 /*
3013 * ql_write_vpd
3014 * Loads VPD contents.
3015 *
3016 * Input:
3017 * ha: adapter state pointer.
3018 * cmd: EXT_IOCTL cmd struct pointer.
3019 * mode: flags.
3020 *
3021 * Returns:
3022 * None, request status indicated in cmd->Status.
3023 *
3024 * Context:
3025 * Kernel context.
3026 */
3027 static void
3028 ql_write_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3029 {
3030 QL_PRINT_9(ha, "started\n");
3031
3032 int32_t rval = 0;
3033
3034 if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
3035 cmd->Status = EXT_STATUS_INVALID_REQUEST;
3036 EL(ha, "failed, invalid request for HBA\n");
3037 return;
3038 }
3039
3040 if (cmd->RequestLen < QL_24XX_VPD_SIZE) {
3041 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3042 cmd->DetailStatus = QL_24XX_VPD_SIZE;
3043 EL(ha, "failed, RequestLen != VPD len, len passed=%xh\n",
3044 cmd->RequestLen);
3045 return;
3046 }
3047
3048 /* Load VPD data. */
3049 if ((rval = ql_vpd_load(ha, (void *)(uintptr_t)(cmd->RequestAdr),
3050 mode)) != 0) {
3051 cmd->Status = EXT_STATUS_COPY_ERR;
3052 cmd->DetailStatus = rval;
3053 EL(ha, "failed, errno=%x\n", rval);
3054 } else {
3055 /*EMPTY*/
3056 QL_PRINT_9(ha, "done\n");
3057 }
3058 }
3059
3060 /*
3061 * ql_read_vpd
3062 * Dumps VPD contents.
3063 *
3064 * Input:
3065 * ha: adapter state pointer.
3066 * cmd: EXT_IOCTL cmd struct pointer.
3067 * mode: flags.
3068 *
3069 * Returns:
3070 * None, request status indicated in cmd->Status.
3071 *
3072 * Context:
3073 * Kernel context.
3074 */
3075 static void
3076 ql_read_vpd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3077 {
3078 QL_PRINT_9(ha, "started\n");
3079
3080 if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
3081 cmd->Status = EXT_STATUS_INVALID_REQUEST;
3082 EL(ha, "failed, invalid request for HBA\n");
3083 return;
3084 }
3085
3086 if (cmd->ResponseLen < QL_24XX_VPD_SIZE) {
3087 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3088 cmd->DetailStatus = QL_24XX_VPD_SIZE;
3089 EL(ha, "failed, ResponseLen < VPD len, len passed=%xh\n",
3090 cmd->ResponseLen);
3091 return;
3092 }
3093
3094 /* Dump VPD data. */
3095 if ((ql_vpd_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
3096 mode)) != 0) {
3097 cmd->Status = EXT_STATUS_COPY_ERR;
3098 EL(ha, "failed,\n");
3099 } else {
3100 /*EMPTY*/
3101 QL_PRINT_9(ha, "done\n");
3102 }
3103 }
3104
3105 /*
3106 * ql_get_fcache
3107 * Dumps flash cache contents.
3108 *
3109 * Input:
3110 * ha: adapter state pointer.
3111 * cmd: EXT_IOCTL cmd struct pointer.
3112 * mode: flags.
3113 *
3114 * Returns:
3115 * None, request status indicated in cmd->Status.
3116 *
3117 * Context:
3118 * Kernel context.
3119 */
3120 static void
3121 ql_get_fcache(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3122 {
3123 uint32_t bsize, boff, types, cpsize, hsize;
3124 ql_fcache_t *fptr;
3125
3126 QL_PRINT_9(ha, "started\n");
3127
3128 if (ha->fcache == NULL) {
3129 cmd->Status = EXT_STATUS_ERR;
3130 EL(ha, "failed, adapter fcache not setup\n");
3131 return;
3132 }
3133
3134 if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
3135 bsize = 100;
3136 } else {
3137 bsize = 400;
3138 }
3139
3140 if (cmd->ResponseLen < bsize) {
3141 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3142 cmd->DetailStatus = bsize;
3143 EL(ha, "failed, ResponseLen < %d, len passed=%xh\n",
3144 bsize, cmd->ResponseLen);
3145 return;
3146 }
3147
3148 boff = 0;
3149 bsize = 0;
3150 fptr = ha->fcache;
3151
3152 /*
3153 * For backwards compatibility, get one of each image type
3154 */
3155 types = (FTYPE_BIOS | FTYPE_FCODE | FTYPE_EFI);
3156 while ((fptr != NULL) && (fptr->buf != NULL) && (types != 0)) {
3157 /* Get the next image */
3158 if ((fptr = ql_get_fbuf(ha->fcache, types)) != NULL) {
3159
3160 cpsize = (fptr->buflen < 100 ? fptr->buflen : 100);
3161
3162 if (ddi_copyout(fptr->buf,
3163 (void *)(uintptr_t)(cmd->ResponseAdr + boff),
3164 cpsize, mode) != 0) {
3165 EL(ha, "ddicopy failed, done\n");
3166 cmd->Status = EXT_STATUS_COPY_ERR;
3167 cmd->DetailStatus = 0;
3168 return;
3169 }
3170 boff += 100;
3171 bsize += cpsize;
3172 types &= ~(fptr->type);
3173 }
3174 }
3175
3176 /*
3177 * Get the firmware image -- it needs to be last in the
3178 * buffer at offset 300 for backwards compatibility. Also for
3179 * backwards compatibility, the pci header is stripped off.
3180 */
3181 if ((fptr = ql_get_fbuf(ha->fcache, FTYPE_FW)) != NULL) {
3182
3183 hsize = sizeof (pci_header_t) + sizeof (pci_data_t);
3184 if (hsize > fptr->buflen) {
3185 EL(ha, "header size (%xh) exceeds buflen (%xh)\n",
3186 hsize, fptr->buflen);
3187 cmd->Status = EXT_STATUS_COPY_ERR;
3188 cmd->DetailStatus = 0;
3189 return;
3190 }
3191
3192 cpsize = ((fptr->buflen - hsize) < 100 ?
3193 fptr->buflen - hsize : 100);
3194
3195 if (ddi_copyout(fptr->buf + hsize,
3196 (void *)(uintptr_t)(cmd->ResponseAdr + 300),
3197 cpsize, mode) != 0) {
3198 EL(ha, "fw ddicopy failed, done\n");
3199 cmd->Status = EXT_STATUS_COPY_ERR;
3200 cmd->DetailStatus = 0;
3201 return;
3202 }
3203 bsize += 100;
3204 }
3205
3206 cmd->Status = EXT_STATUS_OK;
3207 cmd->DetailStatus = bsize;
3208
3209 QL_PRINT_9(ha, "done\n");
3210 }
3211
3212 /*
3213 * ql_get_fcache_ex
3214 * Dumps flash cache contents.
3215 *
3216 * Input:
3217 * ha: adapter state pointer.
3218 * cmd: EXT_IOCTL cmd struct pointer.
3219 * mode: flags.
3220 *
3221 * Returns:
3222 * None, request status indicated in cmd->Status.
3223 *
3224 * Context:
3225 * Kernel context.
3226 */
3227 static void
3228 ql_get_fcache_ex(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3229 {
3230 uint32_t bsize = 0;
3231 uint32_t boff = 0;
3232 ql_fcache_t *fptr;
3233
3234 QL_PRINT_9(ha, "started\n");
3235
3236 if (ha->fcache == NULL) {
3237 cmd->Status = EXT_STATUS_ERR;
3238 EL(ha, "failed, adapter fcache not setup\n");
3239 return;
3240 }
3241
3242 /* Make sure user passed enough buffer space */
3243 for (fptr = ha->fcache; fptr != NULL; fptr = fptr->next) {
3244 bsize += FBUFSIZE;
3245 }
3246
3247 if (cmd->ResponseLen < bsize) {
3248 if (cmd->ResponseLen != 0) {
3249 EL(ha, "failed, ResponseLen < %d, len passed=%xh\n",
3250 bsize, cmd->ResponseLen);
3251 }
3252 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
3253 cmd->DetailStatus = bsize;
3254 return;
3255 }
3256
3257 boff = 0;
3258 fptr = ha->fcache;
3259 while ((fptr != NULL) && (fptr->buf != NULL)) {
3260 /* Get the next image */
3261 if (ddi_copyout(fptr->buf,
3262 (void *)(uintptr_t)(cmd->ResponseAdr + boff),
3263 (fptr->buflen < FBUFSIZE ? fptr->buflen : FBUFSIZE),
3264 mode) != 0) {
3265 EL(ha, "failed, ddicopy at %xh, done\n", boff);
3266 cmd->Status = EXT_STATUS_COPY_ERR;
3267 cmd->DetailStatus = 0;
3268 return;
3269 }
3270 boff += FBUFSIZE;
3271 fptr = fptr->next;
3272 }
3273
3274 cmd->Status = EXT_STATUS_OK;
3275 cmd->DetailStatus = bsize;
3276
3277 QL_PRINT_9(ha, "done\n");
3278 }
3279
3280 /*
3281 * ql_read_flash
3282 * Get flash contents.
3283 *
3284 * Input:
3285 * ha: adapter state pointer.
3286 * cmd: EXT_IOCTL cmd struct pointer.
3287 * mode: flags.
3288 *
3289 * Returns:
3290 * None, request status indicated in cmd->Status.
3291 *
3292 * Context:
3293 * Kernel context.
3294 */
3295 static void
3296 ql_read_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3297 {
3298 ql_xioctl_t *xp = ha->xioctl;
3299
3300 QL_PRINT_9(ha, "started\n");
3301
3302 if (CFG_IST(ha, CFG_ISP_FW_TYPE_1) &&
3303 ql_stall_driver(ha, 0) != QL_SUCCESS) {
3304 EL(ha, "ql_stall_driver failed\n");
3305 ql_restart_driver(ha);
3306 cmd->Status = EXT_STATUS_BUSY;
3307 cmd->DetailStatus = xp->fdesc.flash_size;
3308 cmd->ResponseLen = 0;
3309 return;
3310 }
3311
3312 if (ql_setup_fcache(ha) != QL_SUCCESS) {
3313 cmd->Status = EXT_STATUS_ERR;
3314 cmd->DetailStatus = xp->fdesc.flash_size;
3315 EL(ha, "failed, ResponseLen=%xh, flash size=%xh\n",
3316 cmd->ResponseLen, xp->fdesc.flash_size);
3317 cmd->ResponseLen = 0;
3318 } else {
3319 /* adjust read size to flash size */
3320 if (cmd->ResponseLen > xp->fdesc.flash_size) {
3321 EL(ha, "adjusting req=%xh, max=%xh\n",
3322 cmd->ResponseLen, xp->fdesc.flash_size);
3323 cmd->ResponseLen = xp->fdesc.flash_size;
3324 }
3325
3326 /* Get flash data. */
3327 if (ql_flash_fcode_dump(ha,
3328 (void *)(uintptr_t)(cmd->ResponseAdr),
3329 (size_t)(cmd->ResponseLen), 0, mode) != 0) {
3330 cmd->Status = EXT_STATUS_COPY_ERR;
3331 cmd->ResponseLen = 0;
3332 EL(ha, "failed,\n");
3333 }
3334 }
3335
3336 /* Resume I/O */
3337 if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
3338 EL(ha, "isp_abort_needed for restart\n");
3339 ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED,
3340 DRIVER_STALL);
3341 }
3342
3343 QL_PRINT_9(ha, "done\n");
3344 }
3345
3346 /*
3347 * ql_write_flash
3348 * Loads flash contents.
3349 *
3350 * Input:
3351 * ha: adapter state pointer.
3352 * cmd: EXT_IOCTL cmd struct pointer.
3353 * mode: flags.
3354 *
3355 * Returns:
3356 * None, request status indicated in cmd->Status.
3357 *
3358 * Context:
3359 * Kernel context.
3360 */
3361 static void
3362 ql_write_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3363 {
3364 ql_xioctl_t *xp = ha->xioctl;
3365
3366 QL_PRINT_9(ha, "started\n");
3367
3368 if (CFG_IST(ha, CFG_ISP_FW_TYPE_1) &&
3369 ql_stall_driver(ha, 0) != QL_SUCCESS) {
3370 EL(ha, "ql_stall_driver failed\n");
3371 ql_restart_driver(ha);
3372 cmd->Status = EXT_STATUS_BUSY;
3373 cmd->DetailStatus = xp->fdesc.flash_size;
3374 cmd->ResponseLen = 0;
3375 return;
3376 }
3377
3378 if (ql_setup_fcache(ha) != QL_SUCCESS) {
3379 cmd->Status = EXT_STATUS_ERR;
3380 cmd->DetailStatus = xp->fdesc.flash_size;
3381 EL(ha, "failed, RequestLen=%xh, size=%xh\n",
3382 cmd->RequestLen, xp->fdesc.flash_size);
3383 cmd->ResponseLen = 0;
3384 } else {
3385 /* Load flash data. */
3386 if (cmd->RequestLen > xp->fdesc.flash_size) {
3387 cmd->Status = EXT_STATUS_ERR;
3388 cmd->DetailStatus = xp->fdesc.flash_size;
3389 EL(ha, "failed, RequestLen=%xh, flash size=%xh\n",
3390 cmd->RequestLen, xp->fdesc.flash_size);
3391 } else if (ql_flash_fcode_load(ha,
3392 (void *)(uintptr_t)(cmd->RequestAdr),
3393 (size_t)(cmd->RequestLen), mode) != 0) {
3394 cmd->Status = EXT_STATUS_COPY_ERR;
3395 EL(ha, "failed,\n");
3396 }
3397 }
3398
3399 /* Resume I/O */
3400 if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
3401 EL(ha, "isp_abort_needed for restart\n");
3402 ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED,
3403 DRIVER_STALL);
3404 }
3405
3406 QL_PRINT_9(ha, "done\n");
3407 }
3408
3409 /*
3410 * ql_diagnostic_loopback
3411 * Performs EXT_CC_LOOPBACK Command
3412 *
3413 * Input:
3414 * ha: adapter state pointer.
3415 * cmd: Local EXT_IOCTL cmd struct pointer.
3416 * mode: flags.
3417 *
3418 * Returns:
3419 * None, request status indicated in cmd->Status.
3420 *
3421 * Context:
3422 * Kernel context.
3423 */
3424 static void
3425 ql_diagnostic_loopback(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3426 {
3427 EXT_LOOPBACK_REQ plbreq;
3428 EXT_LOOPBACK_RSP plbrsp;
3429 ql_mbx_data_t mr;
3430 uint32_t rval, timer, bpsize;
3431 caddr_t bp, pld;
3432 uint16_t opt;
3433 boolean_t loop_up;
3434
3435 QL_PRINT_9(ha, "started\n");
3436
3437 /* Get loop back request. */
3438 if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
3439 (void *)&plbreq, sizeof (EXT_LOOPBACK_REQ), mode) != 0) {
3440 EL(ha, "failed, ddi_copyin\n");
3441 cmd->Status = EXT_STATUS_COPY_ERR;
3442 cmd->ResponseLen = 0;
3443 return;
3444 }
3445
3446 /* Check transfer length fits in buffer. */
3447 if (plbreq.BufferLength < plbreq.TransferCount) {
3448 EL(ha, "failed, BufferLength=%d, xfercnt=%d\n",
3449
3450 plbreq.BufferLength, plbreq.TransferCount);
3451 cmd->Status = EXT_STATUS_INVALID_PARAM;
3452 cmd->ResponseLen = 0;
3453 return;
3454 }
3455
3456 /* Allocate command memory. */
3457 bpsize = plbreq.TransferCount + 4; /* Include opcode size */
3458 bp = kmem_zalloc(bpsize, KM_SLEEP);
3459 if (bp == NULL) {
3460 EL(ha, "failed, kmem_zalloc\n");
3461 cmd->Status = EXT_STATUS_NO_MEMORY;
3462 cmd->ResponseLen = 0;
3463 return;
3464 }
3465 pld = bp + 4;
3466 *bp = 0x10; /* opcode */
3467
3468 /* Get loopback data. */
3469 if (ql_get_buffer_data((caddr_t)(uintptr_t)plbreq.BufferAddress,
3470 pld, plbreq.TransferCount, mode) != plbreq.TransferCount) {
3471 EL(ha, "failed, ddi_copyin-2\n");
3472 kmem_free(bp, bpsize);
3473 cmd->Status = EXT_STATUS_COPY_ERR;
3474 cmd->ResponseLen = 0;
3475 return;
3476 }
3477
3478 if (LOOP_RECONFIGURE(ha) ||
3479 ql_stall_driver(ha, 0) != QL_SUCCESS) {
3480 EL(ha, "failed, LOOP_NOT_READY\n");
3481 ql_restart_driver(ha);
3482 kmem_free(bp, bpsize);
3483 cmd->Status = EXT_STATUS_BUSY;
3484 cmd->ResponseLen = 0;
3485 return;
3486 }
3487 loop_up = ha->task_daemon_flags & LOOP_DOWN ? B_FALSE : B_TRUE;
3488
3489 /* Shutdown IP. */
3490 if (ha->flags & IP_INITIALIZED) {
3491 (void) ql_shutdown_ip(ha);
3492 }
3493
3494 /* determine topology so we can send the loopback or the echo */
3495 /* Echo is supported on 2300's only and above */
3496
3497 ADAPTER_STATE_LOCK(ha);
3498 ha->flags |= LOOPBACK_ACTIVE;
3499 ADAPTER_STATE_UNLOCK(ha);
3500
3501 opt = plbreq.Options;
3502
3503 if (CFG_IST(ha, CFG_FCOE_SUPPORT)) {
3504 opt = (uint16_t)(plbreq.Options & MBC_LOOPBACK_POINT_MASK);
3505 if (loop_up && opt == MBC_LOOPBACK_POINT_EXTERNAL) {
3506 if (plbreq.TransferCount > 252) {
3507 EL(ha, "transfer count (%d) > 252\n",
3508 plbreq.TransferCount);
3509 ql_restart_driver(ha);
3510 kmem_free(bp, bpsize);
3511 cmd->Status = EXT_STATUS_INVALID_PARAM;
3512 cmd->ResponseLen = 0;
3513 return;
3514 }
3515 plbrsp.CommandSent = INT_DEF_LB_ECHO_CMD;
3516 rval = ql_diag_echo(ha, pld, plbreq.TransferCount,
3517 MBC_ECHO_ELS, &mr);
3518 } else {
3519 if (CFG_IST(ha, CFG_LOOP_POINT_SUPPORT)) {
3520 (void) ql_set_loop_point(ha, opt);
3521 }
3522 plbrsp.CommandSent = INT_DEF_LB_LOOPBACK_CMD;
3523 rval = ql_diag_loopback(ha, pld, plbreq.TransferCount,
3524 opt, plbreq.IterationCount, &mr);
3525 if (mr.mb[0] == 0x4005 && mr.mb[1] == 0x17) {
3526 (void) ql_abort_isp(ha);
3527 }
3528 if (CFG_IST(ha, CFG_LOOP_POINT_SUPPORT)) {
3529 (void) ql_set_loop_point(ha, 0);
3530 }
3531 }
3532 } else {
3533 if (loop_up && (ha->topology & QL_F_PORT) &&
3534 CFG_IST(ha, CFG_LB_ECHO_SUPPORT)) {
3535 QL_PRINT_9(ha, "F_PORT topology -- using "
3536 "echo\n");
3537 plbrsp.CommandSent = INT_DEF_LB_ECHO_CMD;
3538 if ((rval = ql_diag_echo(ha, bp, bpsize,
3539 (uint16_t)(CFG_IST(ha, CFG_ISP_FW_TYPE_1) ?
3540 MBC_ECHO_64BIT : MBC_ECHO_ELS), &mr)) !=
3541 QL_SUCCESS) {
3542 rval = ql_diag_echo(ha, pld,
3543 plbreq.TransferCount,
3544 (uint16_t)(CFG_IST(ha, CFG_ISP_FW_TYPE_1) ?
3545 MBC_ECHO_64BIT : 0), &mr);
3546 }
3547 } else {
3548 plbrsp.CommandSent = INT_DEF_LB_LOOPBACK_CMD;
3549 if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
3550 opt = (uint16_t)(opt | MBC_LOOPBACK_64BIT);
3551 }
3552 rval = ql_diag_loopback(ha, pld, plbreq.TransferCount,
3553 opt, plbreq.IterationCount, &mr);
3554 }
3555 }
3556 ADAPTER_STATE_LOCK(ha);
3557 ha->flags &= ~LOOPBACK_ACTIVE;
3558 ADAPTER_STATE_UNLOCK(ha);
3559
3560 ql_restart_driver(ha);
3561 if (loop_up && opt == MBC_LOOPBACK_POINT_INTERNAL) {
3562 timer = 30;
3563 do {
3564 delay(100);
3565 } while (timer-- && LOOP_NOT_READY(ha));
3566 }
3567
3568 /* Restart IP if it was shutdown. */
3569 if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
3570 (void) ql_initialize_ip(ha);
3571 ql_isp_rcvbuf(ha);
3572 }
3573
3574 if (rval != QL_SUCCESS) {
3575 EL(ha, "failed, diagnostic_loopback_mbx=%xh\n", rval);
3576 kmem_free(bp, bpsize);
3577 cmd->Status = EXT_STATUS_MAILBOX;
3578 cmd->DetailStatus = rval;
3579 cmd->ResponseLen = 0;
3580 return;
3581 }
3582
3583 /* Return loopback data. */
3584 if (ql_send_buffer_data(pld, (caddr_t)(uintptr_t)plbreq.BufferAddress,
3585 plbreq.TransferCount, mode) != plbreq.TransferCount) {
3586 EL(ha, "failed, ddi_copyout\n");
3587 kmem_free(bp, bpsize);
3588 cmd->Status = EXT_STATUS_COPY_ERR;
3589 cmd->ResponseLen = 0;
3590 return;
3591 }
3592 kmem_free(bp, bpsize);
3593
3594 /* Return loopback results. */
3595 plbrsp.BufferAddress = plbreq.BufferAddress;
3596 plbrsp.BufferLength = plbreq.TransferCount;
3597 plbrsp.CompletionStatus = mr.mb[0];
3598
3599 if (plbrsp.CommandSent == INT_DEF_LB_ECHO_CMD) {
3600 plbrsp.CrcErrorCount = 0;
3601 plbrsp.DisparityErrorCount = 0;
3602 plbrsp.FrameLengthErrorCount = 0;
3603 plbrsp.IterationCountLastError = 0;
3604 } else {
3605 plbrsp.CrcErrorCount = mr.mb[1];
3606 plbrsp.DisparityErrorCount = mr.mb[2];
3607 plbrsp.FrameLengthErrorCount = mr.mb[3];
3608 plbrsp.IterationCountLastError =
3609 SHORT_TO_LONG(mr.mb[18], mr.mb[19]);
3610 }
3611
3612 rval = ddi_copyout((void *)&plbrsp,
3613 (void *)(uintptr_t)cmd->ResponseAdr,
3614 sizeof (EXT_LOOPBACK_RSP), mode);
3615 if (rval != 0) {
3616 EL(ha, "failed, ddi_copyout-2\n");
3617 cmd->Status = EXT_STATUS_COPY_ERR;
3618 cmd->ResponseLen = 0;
3619 return;
3620 }
3621 cmd->ResponseLen = sizeof (EXT_LOOPBACK_RSP);
3622
3623 QL_PRINT_9(ha, "done\n");
3624 }
3625
3626 /*
3627 * ql_set_loop_point
3628 * Setup loop point for port configuration.
3629 *
3630 * Input:
3631 * ha: adapter state structure.
3632 * opt: loop point option.
3633 *
3634 * Returns:
3635 * ql local function return status code.
3636 *
3637 * Context:
3638 * Kernel context.
3639 */
3640 int
3641 ql_set_loop_point(ql_adapter_state_t *ha, uint16_t opt)
3642 {
3643 ql_mbx_data_t mr;
3644 int rval;
3645 uint32_t timer;
3646
3647 QL_PRINT_9(ha, "started\n");
3648
3649 /*
3650 * We get the current port config, modify the loopback field and
3651 * write it back out.
3652 */
3653 if ((rval = ql_get_port_config(ha, &mr)) != QL_SUCCESS) {
3654 EL(ha, "get_port_config status=%xh\n", rval);
3655 return (rval);
3656 }
3657 /*
3658 * Set the loopback mode field while maintaining the others.
3659 */
3660 mr.mb[1] = (uint16_t)(mr.mb[1] & ~LOOPBACK_MODE_FIELD_MASK);
3661 if (opt == MBC_LOOPBACK_POINT_INTERNAL) {
3662 mr.mb[1] = (uint16_t)(mr.mb[1] | LOOPBACK_MODE_INTERNAL);
3663 } else if (CFG_IST(ha, CFG_CTRL_80XX) &&
3664 opt == MBC_LOOPBACK_POINT_EXTERNAL) {
3665 mr.mb[1] = (uint16_t)(mr.mb[1] | LOOPBACK_MODE_EXTERNAL);
3666 }
3667 /*
3668 * Changing the port configuration will cause the port state to cycle
3669 * down and back up. The indication that this has happened is that
3670 * the point to point flag gets set.
3671 */
3672 ADAPTER_STATE_LOCK(ha);
3673 ha->flags &= ~POINT_TO_POINT;
3674 ADAPTER_STATE_UNLOCK(ha);
3675 if ((rval = ql_set_port_config(ha, &mr)) != QL_SUCCESS) {
3676 EL(ha, "set_port_config status=%xh\n", rval);
3677 }
3678
3679 /* wait for a while */
3680 for (timer = opt ? 10 : 0; timer; timer--) {
3681 if (ha->flags & POINT_TO_POINT) {
3682 break;
3683 }
3684 /* Delay for 1000000 usec (1 second). */
3685 ql_delay(ha, 1000000);
3686 }
3687
3688 QL_PRINT_9(ha, "done\n");
3689
3690 return (rval);
3691 }
3692
3693 /*
3694 * ql_send_els_rnid
3695 * IOCTL for extended link service RNID command.
3696 *
3697 * Input:
3698 * ha: adapter state pointer.
3699 * cmd: User space CT arguments pointer.
3700 * mode: flags.
3701 *
3702 * Returns:
3703 * None, request status indicated in cmd->Status.
3704 *
3705 * Context:
3706 * Kernel context.
3707 */
3708 static void
3709 ql_send_els_rnid(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3710 {
3711 EXT_RNID_REQ tmp_rnid;
3712 port_id_t tmp_fcid;
3713 caddr_t tmp_buf, bptr;
3714 uint32_t copy_len;
3715 ql_tgt_t *tq = NULL;
3716 EXT_RNID_DATA rnid_data;
3717 uint32_t loop_ready_wait = 10 * 60 * 10;
3718 int rval = 0;
3719 uint32_t local_hba = 0;
3720
3721 QL_PRINT_9(ha, "started\n");
3722
3723 if (DRIVER_SUSPENDED(ha)) {
3724 EL(ha, "failed, LOOP_NOT_READY\n");
3725 cmd->Status = EXT_STATUS_BUSY;
3726 cmd->ResponseLen = 0;
3727 return;
3728 }
3729
3730 if (cmd->RequestLen != sizeof (EXT_RNID_REQ)) {
3731 /* parameter error */
3732 EL(ha, "failed, RequestLen < EXT_RNID_REQ, Len=%xh\n",
3733 cmd->RequestLen);
3734 cmd->Status = EXT_STATUS_INVALID_PARAM;
3735 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
3736 cmd->ResponseLen = 0;
3737 return;
3738 }
3739
3740 if (ddi_copyin((void*)(uintptr_t)cmd->RequestAdr,
3741 &tmp_rnid, cmd->RequestLen, mode) != 0) {
3742 EL(ha, "failed, ddi_copyin\n");
3743 cmd->Status = EXT_STATUS_COPY_ERR;
3744 cmd->ResponseLen = 0;
3745 return;
3746 }
3747
3748 /* Find loop ID of the device */
3749 if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_WWNN) {
3750 bptr = (caddr_t)ha->loginparams.node_ww_name.raw_wwn;
3751 if (bcmp((void *)bptr, (void *)tmp_rnid.Addr.FcAddr.WWNN,
3752 EXT_DEF_WWN_NAME_SIZE) == 0) {
3753 local_hba = 1;
3754 } else {
3755 tq = ql_find_port(ha,
3756 (uint8_t *)tmp_rnid.Addr.FcAddr.WWNN, QLNT_NODE);
3757 }
3758 } else if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_WWPN) {
3759 bptr = (caddr_t)ha->loginparams.nport_ww_name.raw_wwn;
3760 if (bcmp((void *)bptr, (void *)tmp_rnid.Addr.FcAddr.WWPN,
3761 EXT_DEF_WWN_NAME_SIZE) == 0) {
3762 local_hba = 1;
3763 } else {
3764 tq = ql_find_port(ha,
3765 (uint8_t *)tmp_rnid.Addr.FcAddr.WWPN, QLNT_PORT);
3766 }
3767 } else if (tmp_rnid.Addr.Type == EXT_DEF_TYPE_PORTID) {
3768 /*
3769 * Copy caller's d_id to tmp space.
3770 */
3771 bcopy(&tmp_rnid.Addr.FcAddr.Id[1], tmp_fcid.r.d_id,
3772 EXT_DEF_PORTID_SIZE_ACTUAL);
3773 BIG_ENDIAN_24(&tmp_fcid.r.d_id[0]);
3774
3775 if (bcmp((void *)&ha->d_id, (void *)tmp_fcid.r.d_id,
3776 EXT_DEF_PORTID_SIZE_ACTUAL) == 0) {
3777 local_hba = 1;
3778 } else {
3779 tq = ql_find_port(ha, (uint8_t *)tmp_fcid.r.d_id,
3791 }
3792
3793 if (local_hba) {
3794 rval = ql_get_rnid_params(ha, SEND_RNID_RSP_SIZE, tmp_buf);
3795 if (rval != QL_SUCCESS) {
3796 EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval);
3797 kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3798 cmd->Status = EXT_STATUS_ERR;
3799 cmd->ResponseLen = 0;
3800 return;
3801 }
3802
3803 /* Save gotten RNID data. */
3804 bcopy(tmp_buf, &rnid_data, sizeof (EXT_RNID_DATA));
3805
3806 /* Now build the Send RNID response */
3807 tmp_buf[0] = (char)(EXT_DEF_RNID_DFORMAT_TOPO_DISC);
3808 tmp_buf[1] = (2 * EXT_DEF_WWN_NAME_SIZE);
3809 tmp_buf[2] = 0;
3810 tmp_buf[3] = sizeof (EXT_RNID_DATA);
3811 bcopy(ha->loginparams.nport_ww_name.raw_wwn, &tmp_buf[4],
3812 EXT_DEF_WWN_NAME_SIZE);
3813 bcopy(ha->loginparams.node_ww_name.raw_wwn,
3814 &tmp_buf[4 + EXT_DEF_WWN_NAME_SIZE],
3815 EXT_DEF_WWN_NAME_SIZE);
3816 bcopy((uint8_t *)&rnid_data,
3817 &tmp_buf[4 + 2 * EXT_DEF_WWN_NAME_SIZE],
3818 sizeof (EXT_RNID_DATA));
3819 } else {
3820 if (tq == NULL) {
3821 /* no matching device */
3822 EL(ha, "failed, device not found\n");
3823 kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3824 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
3825 cmd->DetailStatus = EXT_DSTATUS_TARGET;
3826 cmd->ResponseLen = 0;
3827 return;
3828 }
3829
3830 /* Send command */
3831 rval = ql_send_rnid_els(ha, tq->loop_id,
3832 (uint8_t)tmp_rnid.DataFormat, SEND_RNID_RSP_SIZE, tmp_buf);
3833 if (rval != QL_SUCCESS) {
3834 EL(ha, "failed, send_rnid_mbx=%xh, id=%xh\n",
3835 rval, tq->loop_id);
3858
3859 /* Copy the response */
3860 copy_len = (cmd->ResponseLen > SEND_RNID_RSP_SIZE) ?
3861 SEND_RNID_RSP_SIZE : cmd->ResponseLen;
3862
3863 if (ql_send_buffer_data(tmp_buf, (caddr_t)(uintptr_t)cmd->ResponseAdr,
3864 copy_len, mode) != copy_len) {
3865 cmd->Status = EXT_STATUS_COPY_ERR;
3866 EL(ha, "failed, ddi_copyout\n");
3867 } else {
3868 cmd->ResponseLen = copy_len;
3869 if (copy_len < SEND_RNID_RSP_SIZE) {
3870 cmd->Status = EXT_STATUS_DATA_OVERRUN;
3871 EL(ha, "failed, EXT_STATUS_DATA_OVERRUN\n");
3872
3873 } else if (cmd->ResponseLen > SEND_RNID_RSP_SIZE) {
3874 cmd->Status = EXT_STATUS_DATA_UNDERRUN;
3875 EL(ha, "failed, EXT_STATUS_DATA_UNDERRUN\n");
3876 } else {
3877 cmd->Status = EXT_STATUS_OK;
3878 QL_PRINT_9(ha, "done\n",
3879 ha->instance);
3880 }
3881 }
3882
3883 kmem_free(tmp_buf, SEND_RNID_RSP_SIZE);
3884 }
3885
3886 /*
3887 * ql_set_host_data
3888 * Process IOCTL subcommand to set host/adapter related data.
3889 *
3890 * Input:
3891 * ha: adapter state pointer.
3892 * cmd: User space CT arguments pointer.
3893 * mode: flags.
3894 *
3895 * Returns:
3896 * None, request status indicated in cmd->Status.
3897 *
3898 * Context:
3899 * Kernel context.
3900 */
3901 static void
3902 ql_set_host_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3903 {
3904 QL_PRINT_9(ha, "started, SubCode=%d\n",
3905 cmd->SubCode);
3906
3907 /*
3908 * case off on command subcode
3909 */
3910 switch (cmd->SubCode) {
3911 case EXT_SC_SET_RNID:
3912 ql_set_rnid_parameters(ha, cmd, mode);
3913 break;
3914 case EXT_SC_RST_STATISTICS:
3915 (void) ql_reset_statistics(ha, cmd);
3916 break;
3917 case EXT_SC_SET_BEACON_STATE:
3918 ql_set_led_state(ha, cmd, mode);
3919 break;
3920 case EXT_SC_SET_PARMS:
3921 case EXT_SC_SET_BUS_MODE:
3922 case EXT_SC_SET_DR_DUMP_BUF:
3923 case EXT_SC_SET_RISC_CODE:
3924 case EXT_SC_SET_FLASH_RAM:
3925 case EXT_SC_SET_LUN_BITMASK:
3926 case EXT_SC_SET_RETRY_CNT:
3927 case EXT_SC_SET_RTIN:
3928 case EXT_SC_SET_FC_LUN_BITMASK:
3929 case EXT_SC_ADD_TARGET_DEVICE:
3930 case EXT_SC_SWAP_TARGET_DEVICE:
3931 case EXT_SC_SET_SEL_TIMEOUT:
3932 default:
3933 /* function not supported. */
3934 EL(ha, "failed, function not supported=%d\n", cmd->SubCode);
3935 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
3936 break;
3937 }
3938
3939 if (cmd->Status != EXT_STATUS_OK) {
3940 EL(ha, "failed, Status=%d\n", cmd->Status);
3941 } else {
3942 /*EMPTY*/
3943 QL_PRINT_9(ha, "done\n");
3944 }
3945 }
3946
3947 /*
3948 * ql_get_host_data
3949 * Performs EXT_CC_GET_DATA subcommands.
3950 *
3951 * Input:
3952 * ha: adapter state pointer.
3953 * cmd: Local EXT_IOCTL cmd struct pointer.
3954 * mode: flags.
3955 *
3956 * Returns:
3957 * None, request status indicated in cmd->Status.
3958 *
3959 * Context:
3960 * Kernel context.
3961 */
3962 static void
3963 ql_get_host_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
3964 {
3965 int out_size = 0;
3966
3967 QL_PRINT_9(ha, "started, SubCode=%d\n",
3968 cmd->SubCode);
3969
3970 /* case off on command subcode */
3971 switch (cmd->SubCode) {
3972 case EXT_SC_GET_STATISTICS:
3973 out_size = sizeof (EXT_HBA_PORT_STAT);
3974 break;
3975 case EXT_SC_GET_FC_STATISTICS:
3976 out_size = sizeof (EXT_HBA_PORT_STAT);
3977 break;
3978 case EXT_SC_GET_PORT_SUMMARY:
3979 out_size = sizeof (EXT_DEVICEDATA);
3980 break;
3981 case EXT_SC_GET_RNID:
3982 out_size = sizeof (EXT_RNID_DATA);
3983 break;
3984 case EXT_SC_GET_TARGET_ID:
3985 out_size = sizeof (EXT_DEST_ADDR);
3986 break;
3987 case EXT_SC_GET_BEACON_STATE:
3988 out_size = sizeof (EXT_BEACON_CONTROL);
3989 break;
3990 case EXT_SC_GET_FC4_STATISTICS:
3991 out_size = sizeof (EXT_HBA_FC4STATISTICS);
3992 break;
3993 case EXT_SC_GET_DCBX_PARAM:
3994 out_size = EXT_DEF_DCBX_PARAM_BUF_SIZE;
3995 break;
3996 case EXT_SC_GET_RESOURCE_CNTS:
3997 out_size = sizeof (EXT_RESOURCE_CNTS);
3998 break;
3999 case EXT_SC_GET_FCF_LIST:
4000 out_size = sizeof (EXT_FCF_LIST);
4001 break;
4002 case EXT_SC_GET_PRIV_STATS:
4003 out_size = cmd->ResponseLen;
4004 break;
4005 case EXT_SC_GET_SCSI_ADDR:
4006 case EXT_SC_GET_ERR_DETECTIONS:
4007 case EXT_SC_GET_BUS_MODE:
4008 case EXT_SC_GET_DR_DUMP_BUF:
4009 case EXT_SC_GET_RISC_CODE:
4010 case EXT_SC_GET_FLASH_RAM:
4011 case EXT_SC_GET_LINK_STATUS:
4012 case EXT_SC_GET_LOOP_ID:
4013 case EXT_SC_GET_LUN_BITMASK:
4014 case EXT_SC_GET_PORT_DATABASE:
4015 case EXT_SC_GET_PORT_DATABASE_MEM:
4016 case EXT_SC_GET_POSITION_MAP:
4017 case EXT_SC_GET_RETRY_CNT:
4018 case EXT_SC_GET_RTIN:
4019 case EXT_SC_GET_FC_LUN_BITMASK:
4020 case EXT_SC_GET_SEL_TIMEOUT:
4021 default:
4022 /* function not supported. */
4023 EL(ha, "failed, function not supported=%d\n", cmd->SubCode);
4024 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
4049 ql_get_statistics_fc4(ha, cmd, mode);
4050 break;
4051 case EXT_SC_GET_PORT_SUMMARY:
4052 ql_get_port_summary(ha, cmd, mode);
4053 break;
4054 case EXT_SC_GET_TARGET_ID:
4055 ql_get_target_id(ha, cmd, mode);
4056 break;
4057 case EXT_SC_GET_BEACON_STATE:
4058 ql_get_led_state(ha, cmd, mode);
4059 break;
4060 case EXT_SC_GET_DCBX_PARAM:
4061 ql_get_dcbx_parameters(ha, cmd, mode);
4062 break;
4063 case EXT_SC_GET_FCF_LIST:
4064 ql_get_fcf_list(ha, cmd, mode);
4065 break;
4066 case EXT_SC_GET_RESOURCE_CNTS:
4067 ql_get_resource_counts(ha, cmd, mode);
4068 break;
4069 case EXT_SC_GET_PRIV_STATS:
4070 ql_get_priv_stats(ha, cmd, mode);
4071 break;
4072 }
4073
4074 if (cmd->Status != EXT_STATUS_OK) {
4075 EL(ha, "failed, Status=%d\n", cmd->Status);
4076 } else {
4077 /*EMPTY*/
4078 QL_PRINT_9(ha, "done\n");
4079 }
4080 }
4081
4082 /* ******************************************************************** */
4083 /* Helper Functions */
4084 /* ******************************************************************** */
4085
4086 /*
4087 * ql_lun_count
4088 * Get numbers of LUNS on target.
4089 *
4090 * Input:
4091 * ha: adapter state pointer.
4092 * q: device queue pointer.
4093 *
4094 * Returns:
4095 * Number of LUNs.
4096 *
4097 * Context:
4098 * Kernel context.
4099 */
4100 static int
4101 ql_lun_count(ql_adapter_state_t *ha, ql_tgt_t *tq)
4102 {
4103 int cnt;
4104
4105 QL_PRINT_9(ha, "started\n");
4106
4107 /* Bypass LUNs that failed. */
4108 cnt = ql_report_lun(ha, tq);
4109 if (cnt == 0) {
4110 cnt = ql_inq_scan(ha, tq, ha->maximum_luns_per_target);
4111 }
4112
4113 QL_PRINT_9(ha, "done\n");
4114
4115 return (cnt);
4116 }
4117
4118 /*
4119 * ql_report_lun
4120 * Get numbers of LUNS using report LUN command.
4121 *
4122 * Input:
4123 * ha: adapter state pointer.
4124 * q: target queue pointer.
4125 *
4126 * Returns:
4127 * Number of LUNs.
4128 *
4129 * Context:
4130 * Kernel context.
4131 */
4132 static int
4133 ql_report_lun(ql_adapter_state_t *ha, ql_tgt_t *tq)
4134 {
4135 int rval;
4136 uint8_t retries;
4137 ql_mbx_iocb_t *pkt;
4138 ql_rpt_lun_lst_t *rpt;
4139 dma_mem_t dma_mem;
4140 uint32_t pkt_size, cnt;
4141 uint16_t comp_status;
4142 uint8_t scsi_status_h, scsi_status_l, *reqs;
4143
4144 QL_PRINT_9(ha, "started\n");
4145
4146 if (DRIVER_SUSPENDED(ha)) {
4147 EL(ha, "failed, LOOP_NOT_READY\n");
4148 return (0);
4149 }
4150
4151 pkt_size = sizeof (ql_mbx_iocb_t) + sizeof (ql_rpt_lun_lst_t);
4152 pkt = kmem_zalloc(pkt_size, KM_SLEEP);
4153 if (pkt == NULL) {
4154 EL(ha, "failed, kmem_zalloc\n");
4155 return (0);
4156 }
4157 rpt = (ql_rpt_lun_lst_t *)((caddr_t)pkt + sizeof (ql_mbx_iocb_t));
4158
4159 /* Get DMA memory for the IOCB */
4160 if (ql_get_dma_mem(ha, &dma_mem, sizeof (ql_rpt_lun_lst_t),
4161 LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN) != QL_SUCCESS) {
4162 cmn_err(CE_WARN, "%s(%d) DMA memory "
4163 "alloc failed", QL_NAME, ha->instance);
4164 kmem_free(pkt, pkt_size);
4165 return (0);
4166 }
4167
4168 for (retries = 0; retries < 4; retries++) {
4169 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
4170 pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
4171 pkt->cmd24.entry_count = 1;
4172
4173 /* Set N_port handle */
4174 pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
4175
4176 /* Set target ID */
4177 pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
4178 pkt->cmd24.target_id[1] = tq->d_id.b.area;
4179 pkt->cmd24.target_id[2] = tq->d_id.b.domain;
4180
4181 /* Set Virtual Port ID */
4182 pkt->cmd24.vp_index = ha->vp_index;
4183
4184 /* Set ISP command timeout. */
4185 pkt->cmd24.timeout = LE_16(15);
4186
4187 /* Load SCSI CDB */
4188 pkt->cmd24.scsi_cdb[0] = SCMD_REPORT_LUNS;
4189 pkt->cmd24.scsi_cdb[6] =
4193 pkt->cmd24.scsi_cdb[8] =
4194 MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4195 pkt->cmd24.scsi_cdb[9] =
4196 LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4197 for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) {
4198 ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
4199 + cnt, 4);
4200 }
4201
4202 /* Set tag queue control flags */
4203 pkt->cmd24.task = TA_STAG;
4204
4205 /* Set transfer direction. */
4206 pkt->cmd24.control_flags = CF_RD;
4207
4208 /* Set data segment count. */
4209 pkt->cmd24.dseg_count = LE_16(1);
4210
4211 /* Load total byte count. */
4212 /* Load data descriptor. */
4213 pkt->cmd24.dseg.address[0] = (uint32_t)
4214 LE_32(LSD(dma_mem.cookie.dmac_laddress));
4215 pkt->cmd24.dseg.address[1] = (uint32_t)
4216 LE_32(MSD(dma_mem.cookie.dmac_laddress));
4217 pkt->cmd24.total_byte_count =
4218 LE_32(sizeof (ql_rpt_lun_lst_t));
4219 pkt->cmd24.dseg.length =
4220 LE_32(sizeof (ql_rpt_lun_lst_t));
4221 } else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
4222 pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
4223 pkt->cmd3.entry_count = 1;
4224 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4225 pkt->cmd3.target_l = LSB(tq->loop_id);
4226 pkt->cmd3.target_h = MSB(tq->loop_id);
4227 } else {
4228 pkt->cmd3.target_h = LSB(tq->loop_id);
4229 }
4230 pkt->cmd3.control_flags_l = CF_DATA_IN | CF_STAG;
4231 pkt->cmd3.timeout = LE_16(15);
4232 pkt->cmd3.dseg_count = LE_16(1);
4233 pkt->cmd3.scsi_cdb[0] = SCMD_REPORT_LUNS;
4234 pkt->cmd3.scsi_cdb[6] =
4235 MSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4236 pkt->cmd3.scsi_cdb[7] =
4237 LSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4238 pkt->cmd3.scsi_cdb[8] =
4239 MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4240 pkt->cmd3.scsi_cdb[9] =
4241 LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4242 pkt->cmd3.byte_count =
4243 LE_32(sizeof (ql_rpt_lun_lst_t));
4244 pkt->cmd3.dseg[0].address[0] = (uint32_t)
4245 LE_32(LSD(dma_mem.cookie.dmac_laddress));
4246 pkt->cmd3.dseg[0].address[1] = (uint32_t)
4247 LE_32(MSD(dma_mem.cookie.dmac_laddress));
4248 pkt->cmd3.dseg[0].length =
4249 LE_32(sizeof (ql_rpt_lun_lst_t));
4250 } else {
4251 pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
4252 pkt->cmd.entry_count = 1;
4253 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4254 pkt->cmd.target_l = LSB(tq->loop_id);
4255 pkt->cmd.target_h = MSB(tq->loop_id);
4256 } else {
4257 pkt->cmd.target_h = LSB(tq->loop_id);
4258 }
4259 pkt->cmd.control_flags_l = CF_DATA_IN | CF_STAG;
4260 pkt->cmd.timeout = LE_16(15);
4261 pkt->cmd.dseg_count = LE_16(1);
4262 pkt->cmd.scsi_cdb[0] = SCMD_REPORT_LUNS;
4263 pkt->cmd.scsi_cdb[6] =
4264 MSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4265 pkt->cmd.scsi_cdb[7] =
4266 LSB(MSW(sizeof (ql_rpt_lun_lst_t)));
4267 pkt->cmd.scsi_cdb[8] =
4268 MSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4269 pkt->cmd.scsi_cdb[9] =
4270 LSB(LSW(sizeof (ql_rpt_lun_lst_t)));
4271 pkt->cmd.byte_count =
4272 LE_32(sizeof (ql_rpt_lun_lst_t));
4273 pkt->cmd.dseg[0].address = (uint32_t)
4274 LE_32(LSD(dma_mem.cookie.dmac_laddress));
4275 pkt->cmd.dseg[0].length =
4276 LE_32(sizeof (ql_rpt_lun_lst_t));
4277 }
4278
4279 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
4280 sizeof (ql_mbx_iocb_t));
4281
4282 /* Sync in coming DMA buffer. */
4283 (void) ddi_dma_sync(dma_mem.dma_handle, 0, dma_mem.size,
4284 DDI_DMA_SYNC_FORKERNEL);
4285 /* Copy in coming DMA data. */
4286 ddi_rep_get8(dma_mem.acc_handle, (uint8_t *)rpt,
4287 (uint8_t *)dma_mem.bp, dma_mem.size, DDI_DEV_AUTOINCR);
4288
4289 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
4290 pkt->sts24.entry_status = (uint8_t)
4291 (pkt->sts24.entry_status & 0x3c);
4292 comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
4293 scsi_status_h = pkt->sts24.scsi_status_h;
4294 scsi_status_l = pkt->sts24.scsi_status_l;
4295 cnt = scsi_status_h & FCP_RSP_LEN_VALID ?
4296 LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
4297 reqs = &pkt->sts24.rsp_sense_data[cnt];
4298 } else {
4299 pkt->sts.entry_status = (uint8_t)
4300 (pkt->sts.entry_status & 0x7e);
4301 comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
4302 scsi_status_h = pkt->sts.scsi_status_h;
4303 scsi_status_l = pkt->sts.scsi_status_l;
4304 reqs = &pkt->sts.req_sense_data[0];
4305 }
4306 if (rval == QL_SUCCESS && pkt->sts.entry_status != 0) {
4307 EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
4308 pkt->sts.entry_status, tq->d_id.b24);
4309 rval = QL_FUNCTION_PARAMETER_ERROR;
4337 if (scsi_status_l & STATUS_CHECK) {
4338 EL(ha, "STATUS_CHECK Sense Data\n%2xh%3xh"
4339 "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
4340 "%3xh%3xh%3xh%3xh%3xh%3xh%3xh\n", reqs[0],
4341 reqs[1], reqs[2], reqs[3], reqs[4],
4342 reqs[5], reqs[6], reqs[7], reqs[8],
4343 reqs[9], reqs[10], reqs[11], reqs[12],
4344 reqs[13], reqs[14], reqs[15], reqs[16],
4345 reqs[17]);
4346 }
4347 } else {
4348 break;
4349 }
4350 bzero((caddr_t)pkt, pkt_size);
4351 }
4352
4353 if (rval != QL_SUCCESS) {
4354 EL(ha, "failed=%xh\n", rval);
4355 rval = 0;
4356 } else {
4357 QL_PRINT_9(ha, "LUN list\n");
4358 QL_DUMP_9(rpt, 8, rpt->hdr.len + 8);
4359 rval = (int)(BE_32(rpt->hdr.len) / 8);
4360 }
4361
4362 kmem_free(pkt, pkt_size);
4363 ql_free_dma_resource(ha, &dma_mem);
4364
4365 QL_PRINT_9(ha, "done\n");
4366
4367 return (rval);
4368 }
4369
4370 /*
4371 * ql_inq_scan
4372 * Get numbers of LUNS using inquiry command.
4373 *
4374 * Input:
4375 * ha: adapter state pointer.
4376 * tq: target queue pointer.
4377 * count: scan for the number of existing LUNs.
4378 *
4379 * Returns:
4380 * Number of LUNs.
4381 *
4382 * Context:
4383 * Kernel context.
4384 */
4385 static int
4386 ql_inq_scan(ql_adapter_state_t *ha, ql_tgt_t *tq, int count)
4387 {
4388 int lun, cnt, rval;
4389 ql_mbx_iocb_t *pkt;
4390 uint8_t *inq;
4391 uint32_t pkt_size;
4392
4393 QL_PRINT_9(ha, "started\n");
4394
4395 pkt_size = sizeof (ql_mbx_iocb_t) + INQ_DATA_SIZE;
4396 pkt = kmem_zalloc(pkt_size, KM_SLEEP);
4397 if (pkt == NULL) {
4398 EL(ha, "failed, kmem_zalloc\n");
4399 return (0);
4400 }
4401 inq = (uint8_t *)((caddr_t)pkt + sizeof (ql_mbx_iocb_t));
4402
4403 cnt = 0;
4404 for (lun = 0; lun < MAX_LUNS; lun++) {
4405
4406 if (DRIVER_SUSPENDED(ha)) {
4407 rval = QL_LOOP_DOWN;
4408 cnt = 0;
4409 break;
4410 }
4411
4412 rval = ql_inq(ha, tq, lun, pkt, INQ_DATA_SIZE);
4413 if (rval == QL_SUCCESS) {
4414 switch (*inq) {
4415 case DTYPE_DIRECT:
4416 case DTYPE_PROCESSOR: /* Appliance. */
4417 case DTYPE_WORM:
4418 case DTYPE_RODIRECT:
4419 case DTYPE_SCANNER:
4420 case DTYPE_OPTICAL:
4421 case DTYPE_CHANGER:
4422 case DTYPE_ESI:
4423 cnt++;
4424 break;
4425 case DTYPE_SEQUENTIAL:
4426 cnt++;
4427 tq->flags |= TQF_TAPE_DEVICE;
4428 break;
4429 default:
4430 QL_PRINT_9(ha, "failed, "
4431 "unsupported device id=%xh, lun=%d, "
4432 "type=%xh\n", tq->loop_id,
4433 lun, *inq);
4434 break;
4435 }
4436
4437 if (*inq == DTYPE_ESI || cnt >= count) {
4438 break;
4439 }
4440 } else if (rval == QL_ABORTED || rval == QL_FUNCTION_TIMEOUT) {
4441 cnt = 0;
4442 break;
4443 }
4444 }
4445
4446 kmem_free(pkt, pkt_size);
4447
4448 QL_PRINT_9(ha, "done\n");
4449
4450 return (cnt);
4451 }
4452
4453 /*
4454 * ql_inq
4455 * Issue inquiry command.
4456 *
4457 * Input:
4458 * ha: adapter state pointer.
4459 * tq: target queue pointer.
4460 * lun: LUN number.
4461 * pkt: command and buffer pointer.
4462 * inq_len: amount of inquiry data.
4463 *
4464 * Returns:
4465 * ql local function return status code.
4466 *
4467 * Context:
4468 * Kernel context.
4469 */
4470 static int
4471 ql_inq(ql_adapter_state_t *ha, ql_tgt_t *tq, int lun, ql_mbx_iocb_t *pkt,
4472 uint32_t inq_len)
4473 {
4474 dma_mem_t dma_mem;
4475 int rval, retries;
4476 uint32_t pkt_size, cnt;
4477 uint16_t comp_status;
4478 uint8_t scsi_status_h, scsi_status_l, *reqs;
4479 caddr_t inq_data;
4480 uint64_t lun_addr;
4481 fcp_ent_addr_t *fcp_ent_addr = (fcp_ent_addr_t *)&lun_addr;
4482
4483 QL_PRINT_9(ha, "started\n");
4484
4485 if (DRIVER_SUSPENDED(ha)) {
4486 EL(ha, "failed, loop down\n");
4487 return (QL_FUNCTION_TIMEOUT);
4488 }
4489
4490 pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t) + inq_len);
4491 bzero((caddr_t)pkt, pkt_size);
4492
4493 inq_data = (caddr_t)pkt + sizeof (ql_mbx_iocb_t);
4494
4495 /* Get DMA memory for the IOCB */
4496 if (ql_get_dma_mem(ha, &dma_mem, inq_len,
4497 LITTLE_ENDIAN_DMA, QL_DMA_RING_ALIGN) != QL_SUCCESS) {
4498 cmn_err(CE_WARN, "%s(%d) DMA memory "
4499 "alloc failed", QL_NAME, ha->instance);
4500 return (0);
4501 }
4502
4503 for (retries = 0; retries < 4; retries++) {
4504 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
4505 pkt->cmd24.entry_type = IOCB_CMD_TYPE_7;
4506 pkt->cmd24.entry_count = 1;
4507
4508 /* Set LUN number */
4509 lun_addr = ql_get_lun_addr(tq, lun);
4510 fcp_ent_addr = (fcp_ent_addr_t *)&lun_addr;
4511 pkt->cmd24.fcp_lun[2] =
4512 lobyte(fcp_ent_addr->ent_addr_0);
4513 pkt->cmd24.fcp_lun[3] =
4514 hibyte(fcp_ent_addr->ent_addr_0);
4515 pkt->cmd24.fcp_lun[0] =
4516 lobyte(fcp_ent_addr->ent_addr_1);
4517 pkt->cmd24.fcp_lun[1] =
4518 hibyte(fcp_ent_addr->ent_addr_1);
4519 pkt->cmd24.fcp_lun[6] =
4520 lobyte(fcp_ent_addr->ent_addr_2);
4521 pkt->cmd24.fcp_lun[7] =
4522 hibyte(fcp_ent_addr->ent_addr_2);
4523 pkt->cmd24.fcp_lun[4] =
4524 lobyte(fcp_ent_addr->ent_addr_3);
4525 pkt->cmd24.fcp_lun[5] =
4526 hibyte(fcp_ent_addr->ent_addr_3);
4527
4528 /* Set N_port handle */
4529 pkt->cmd24.n_port_hdl = (uint16_t)LE_16(tq->loop_id);
4530
4531 /* Set target ID */
4532 pkt->cmd24.target_id[0] = tq->d_id.b.al_pa;
4533 pkt->cmd24.target_id[1] = tq->d_id.b.area;
4534 pkt->cmd24.target_id[2] = tq->d_id.b.domain;
4535
4536 /* Set Virtual Port ID */
4537 pkt->cmd24.vp_index = ha->vp_index;
4538
4539 /* Set ISP command timeout. */
4540 pkt->cmd24.timeout = LE_16(15);
4541
4542 /* Load SCSI CDB */
4543 pkt->cmd24.scsi_cdb[0] = SCMD_INQUIRY;
4544 pkt->cmd24.scsi_cdb[4] = LSB(LSW(inq_len));
4545 for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) {
4546 ql_chg_endian((uint8_t *)&pkt->cmd24.scsi_cdb
4547 + cnt, 4);
4548 }
4549
4550 /* Set tag queue control flags */
4551 pkt->cmd24.task = TA_STAG;
4552
4553 /* Set transfer direction. */
4554 pkt->cmd24.control_flags = CF_RD;
4555
4556 /* Set data segment count. */
4557 pkt->cmd24.dseg_count = LE_16(1);
4558
4559 /* Load total byte count. */
4560 pkt->cmd24.total_byte_count = LE_32(inq_len);
4561
4562 /* Load data descriptor. */
4563 pkt->cmd24.dseg.address[0] = (uint32_t)
4564 LE_32(LSD(dma_mem.cookie.dmac_laddress));
4565 pkt->cmd24.dseg.address[1] = (uint32_t)
4566 LE_32(MSD(dma_mem.cookie.dmac_laddress));
4567 pkt->cmd24.dseg.length = LE_32(inq_len);
4568 } else if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
4569 pkt->cmd3.entry_type = IOCB_CMD_TYPE_3;
4570 cnt = CMD_TYPE_3_DATA_SEGMENTS;
4571
4572 pkt->cmd3.entry_count = 1;
4573 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4574 pkt->cmd3.target_l = LSB(tq->loop_id);
4575 pkt->cmd3.target_h = MSB(tq->loop_id);
4576 } else {
4577 pkt->cmd3.target_h = LSB(tq->loop_id);
4578 }
4579 pkt->cmd3.lun_l = LSB(lun);
4580 pkt->cmd3.lun_h = MSB(lun);
4581 pkt->cmd3.control_flags_l = CF_DATA_IN | CF_STAG;
4582 pkt->cmd3.timeout = LE_16(15);
4583 pkt->cmd3.scsi_cdb[0] = SCMD_INQUIRY;
4584 pkt->cmd3.scsi_cdb[4] = LSB(LSW(inq_len));
4585 pkt->cmd3.dseg_count = LE_16(1);
4586 pkt->cmd3.byte_count = LE_32(inq_len);
4587 pkt->cmd3.dseg[0].address[0] = (uint32_t)
4588 LE_32(LSD(dma_mem.cookie.dmac_laddress));
4589 pkt->cmd3.dseg[0].address[1] = (uint32_t)
4590 LE_32(MSD(dma_mem.cookie.dmac_laddress));
4591 pkt->cmd3.dseg[0].length = LE_32(inq_len);
4592 } else {
4593 pkt->cmd.entry_type = IOCB_CMD_TYPE_2;
4594 cnt = CMD_TYPE_2_DATA_SEGMENTS;
4595
4596 pkt->cmd.entry_count = 1;
4597 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
4598 pkt->cmd.target_l = LSB(tq->loop_id);
4599 pkt->cmd.target_h = MSB(tq->loop_id);
4600 } else {
4601 pkt->cmd.target_h = LSB(tq->loop_id);
4602 }
4603 pkt->cmd.lun_l = LSB(lun);
4604 pkt->cmd.lun_h = MSB(lun);
4605 pkt->cmd.control_flags_l = CF_DATA_IN | CF_STAG;
4606 pkt->cmd.timeout = LE_16(15);
4607 pkt->cmd.scsi_cdb[0] = SCMD_INQUIRY;
4608 pkt->cmd.scsi_cdb[4] = LSB(LSW(inq_len));
4609 pkt->cmd.dseg_count = LE_16(1);
4610 pkt->cmd.byte_count = LE_32(inq_len);
4611 pkt->cmd.dseg[0].address = (uint32_t)
4612 LE_32(LSD(dma_mem.cookie.dmac_laddress));
4613 pkt->cmd.dseg[0].length = LE_32(inq_len);
4614 }
4615
4616 /* rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, pkt_size); */
4617 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
4618 sizeof (ql_mbx_iocb_t));
4619
4620 /* Sync in coming IOCB DMA buffer. */
4621 (void) ddi_dma_sync(dma_mem.dma_handle, 0, dma_mem.size,
4622 DDI_DMA_SYNC_FORKERNEL);
4623 /* Copy in coming DMA data. */
4624 ddi_rep_get8(dma_mem.acc_handle, (uint8_t *)inq_data,
4625 (uint8_t *)dma_mem.bp, dma_mem.size, DDI_DEV_AUTOINCR);
4626
4627 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
4628 pkt->sts24.entry_status = (uint8_t)
4629 (pkt->sts24.entry_status & 0x3c);
4630 comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
4631 scsi_status_h = pkt->sts24.scsi_status_h;
4632 scsi_status_l = pkt->sts24.scsi_status_l;
4633 cnt = scsi_status_h & FCP_RSP_LEN_VALID ?
4634 LE_32(pkt->sts24.fcp_rsp_data_length) : 0;
4635 reqs = &pkt->sts24.rsp_sense_data[cnt];
4636 } else {
4637 pkt->sts.entry_status = (uint8_t)
4638 (pkt->sts.entry_status & 0x7e);
4639 comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
4640 scsi_status_h = pkt->sts.scsi_status_h;
4641 scsi_status_l = pkt->sts.scsi_status_l;
4642 reqs = &pkt->sts.req_sense_data[0];
4643 }
4644 if (rval == QL_SUCCESS && pkt->sts.entry_status != 0) {
4645 EL(ha, "failed, entry_status=%xh, d_id=%xh\n",
4646 pkt->sts.entry_status, tq->d_id.b24);
4647 rval = QL_FUNCTION_PARAMETER_ERROR;
4662 }
4663 rval = QL_FUNCTION_FAILED;
4664 }
4665
4666 if (scsi_status_l & STATUS_CHECK) {
4667 EL(ha, "STATUS_CHECK Sense Data\n%2xh%3xh"
4668 "%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh%3xh"
4669 "%3xh%3xh%3xh%3xh%3xh%3xh%3xh\n", reqs[0],
4670 reqs[1], reqs[2], reqs[3], reqs[4],
4671 reqs[5], reqs[6], reqs[7], reqs[8],
4672 reqs[9], reqs[10], reqs[11], reqs[12],
4673 reqs[13], reqs[14], reqs[15], reqs[16],
4674 reqs[17]);
4675 }
4676 } else {
4677 break;
4678 }
4679 }
4680 ql_free_dma_resource(ha, &dma_mem);
4681
4682 QL_PRINT_9(ha, "done\n");
4683
4684 return (rval);
4685 }
4686
4687 /*
4688 * ql_get_buffer_data
4689 * Copies data from user space to kernal buffer.
4690 *
4691 * Input:
4692 * src: User source buffer address.
4693 * dst: Kernal destination buffer address.
4694 * size: Amount of data.
4695 * mode: flags.
4696 *
4697 * Returns:
4698 * Returns number of bytes transferred.
4699 *
4700 * Context:
4701 * Kernel context.
4702 */
4703 static uint32_t
4704 ql_get_buffer_data(caddr_t src, caddr_t dst, uint32_t size, int mode)
4705 {
4706 uint32_t cnt;
4707
4708 for (cnt = 0; cnt < size; cnt++) {
4709 if (ddi_copyin(src++, dst++, 1, mode) != 0) {
4710 QL_PRINT_2(NULL, "failed, ddi_copyin\n");
4711 break;
4712 }
4713 }
4714
4715 return (cnt);
4716 }
4717
4718 /*
4719 * ql_send_buffer_data
4720 * Copies data from kernal buffer to user space.
4721 *
4722 * Input:
4723 * src: Kernal source buffer address.
4724 * dst: User destination buffer address.
4725 * size: Amount of data.
4726 * mode: flags.
4727 *
4728 * Returns:
4729 * Returns number of bytes transferred.
4730 *
4731 * Context:
4732 * Kernel context.
4733 */
4734 static uint32_t
4735 ql_send_buffer_data(caddr_t src, caddr_t dst, uint32_t size, int mode)
4736 {
4737 uint32_t cnt;
4738
4739 for (cnt = 0; cnt < size; cnt++) {
4740 if (ddi_copyout(src++, dst++, 1, mode) != 0) {
4741 QL_PRINT_2(NULL, "failed, ddi_copyin\n");
4742 break;
4743 }
4744 }
4745
4746 return (cnt);
4747 }
4748
4749 /*
4750 * ql_find_port
4751 * Locates device queue.
4752 *
4753 * Input:
4754 * ha: adapter state pointer.
4755 * name: device port name.
4756 *
4757 * Returns:
4758 * Returns target queue pointer.
4759 *
4760 * Context:
4761 * Kernel context.
4811 * Get flash descriptor table.
4812 *
4813 * Input:
4814 * ha: adapter state pointer.
4815 *
4816 * Returns:
4817 * ql local function return status code.
4818 *
4819 * Context:
4820 * Kernel context.
4821 */
4822 static int
4823 ql_24xx_flash_desc(ql_adapter_state_t *ha)
4824 {
4825 uint32_t cnt;
4826 uint16_t chksum, *bp, data;
4827 int rval;
4828 flash_desc_t *fdesc;
4829 ql_xioctl_t *xp = ha->xioctl;
4830
4831 QL_PRINT_9(ha, "started\n");
4832
4833 if (ha->flash_desc_addr == 0) {
4834 QL_PRINT_9(ha, "desc ptr=0\n");
4835 return (QL_FUNCTION_FAILED);
4836 }
4837
4838 if ((fdesc = kmem_zalloc(sizeof (flash_desc_t), KM_SLEEP)) == NULL) {
4839 EL(ha, "kmem_zalloc=null\n");
4840 return (QL_MEMORY_ALLOC_FAILED);
4841 }
4842 rval = ql_dump_fcode(ha, (uint8_t *)fdesc, sizeof (flash_desc_t),
4843 ha->flash_desc_addr << 2);
4844 if (rval != QL_SUCCESS) {
4845 EL(ha, "read status=%xh\n", rval);
4846 kmem_free(fdesc, sizeof (flash_desc_t));
4847 return (rval);
4848 }
4849
4850 chksum = 0;
4851 bp = (uint16_t *)fdesc;
4852 for (cnt = 0; cnt < (sizeof (flash_desc_t)) / 2; cnt++) {
4853 data = *bp++;
4854 LITTLE_ENDIAN_16(&data);
4863 LITTLE_ENDIAN_16(&fdesc->flash_id);
4864 LITTLE_ENDIAN_32(&fdesc->block_size);
4865 LITTLE_ENDIAN_32(&fdesc->alt_block_size);
4866 LITTLE_ENDIAN_32(&fdesc->flash_size);
4867 LITTLE_ENDIAN_32(&fdesc->write_enable_data);
4868 LITTLE_ENDIAN_32(&fdesc->read_timeout);
4869
4870 /* flash size in desc table is in 1024 bytes */
4871 fdesc->flash_size = fdesc->flash_size * 0x400;
4872
4873 if (chksum != 0 || fdesc->flash_valid != FLASH_DESC_VAILD ||
4874 fdesc->flash_version != FLASH_DESC_VERSION) {
4875 EL(ha, "invalid descriptor table\n");
4876 kmem_free(fdesc, sizeof (flash_desc_t));
4877 return (QL_FUNCTION_FAILED);
4878 }
4879
4880 bcopy(fdesc, &xp->fdesc, sizeof (flash_desc_t));
4881 kmem_free(fdesc, sizeof (flash_desc_t));
4882
4883 QL_PRINT_9(ha, "done\n");
4884
4885 return (QL_SUCCESS);
4886 }
4887
4888 /*
4889 * ql_setup_flash
4890 * Gets the manufacturer and id number of the flash chip, and
4891 * sets up the size parameter.
4892 *
4893 * Input:
4894 * ha: adapter state pointer.
4895 *
4896 * Returns:
4897 * int: ql local function return status code.
4898 *
4899 * Context:
4900 * Kernel context.
4901 */
4902 static int
4903 ql_setup_flash(ql_adapter_state_t *ha)
4904 {
4905 ql_xioctl_t *xp = ha->xioctl;
4906 int rval = QL_SUCCESS;
4907
4908 if (xp->fdesc.flash_size != 0) {
4909 return (rval);
4910 }
4911
4912 if (CFG_IST(ha, CFG_CTRL_22XX) && !ha->subven_id) {
4913 return (QL_FUNCTION_FAILED);
4914 }
4915
4916 if (CFG_IST(ha, CFG_CTRL_252780818283)) {
4917 /*
4918 * Temporarily set the ha->xioctl->fdesc.flash_size to
4919 * 25xx flash size to avoid failing of ql_dump_focde.
4920 */
4921 if (CFG_IST(ha, CFG_CTRL_278083)) {
4922 ha->xioctl->fdesc.flash_size = 0x1000000;
4923 } else if (CFG_IST(ha, CFG_CTRL_82XX)) {
4924 ha->xioctl->fdesc.flash_size = 0x800000;
4925 } else if (CFG_IST(ha, CFG_CTRL_25XX)) {
4926 ha->xioctl->fdesc.flash_size = 0x200000;
4927 } else {
4928 ha->xioctl->fdesc.flash_size = 0x400000;
4929 }
4930
4931 if (ql_24xx_flash_desc(ha) == QL_SUCCESS) {
4932 EL(ha, "flash desc table ok, exit\n");
4933 return (rval);
4934 }
4935 if (CFG_IST(ha, CFG_CTRL_82XX)) {
4936 xp->fdesc.flash_manuf = MXIC_FLASH;
4937 xp->fdesc.flash_id = MXIC_FLASHID_25LXX;
4938 xp->fdesc.flash_len = 0x17;
4939 } else {
4940 (void) ql_24xx_flash_id(ha);
4941 }
4942
4943 } else if (CFG_IST(ha, CFG_CTRL_24XX)) {
4944 (void) ql_24xx_flash_id(ha);
4945 } else {
4946 ql_flash_enable(ha);
4947
4948 ql_write_flash_byte(ha, 0x5555, 0xaa);
4949 ql_write_flash_byte(ha, 0x2aaa, 0x55);
4950 ql_write_flash_byte(ha, 0x5555, 0x90);
4951 xp->fdesc.flash_manuf = (uint8_t)ql_read_flash_byte(ha, 0x0000);
4952
4953 if (CFG_IST(ha, CFG_SBUS_CARD)) {
4954 ql_write_flash_byte(ha, 0xaaaa, 0xaa);
4955 ql_write_flash_byte(ha, 0x5555, 0x55);
4956 ql_write_flash_byte(ha, 0xaaaa, 0x90);
4957 xp->fdesc.flash_id = (uint16_t)
4958 ql_read_flash_byte(ha, 0x0002);
4959 } else {
4960 ql_write_flash_byte(ha, 0x5555, 0xaa);
4961 ql_write_flash_byte(ha, 0x2aaa, 0x55);
4962 ql_write_flash_byte(ha, 0x5555, 0x90);
4963 xp->fdesc.flash_id = (uint16_t)
4964 ql_read_flash_byte(ha, 0x0001);
4965 }
4966
4967 ql_write_flash_byte(ha, 0x5555, 0xaa);
4968 ql_write_flash_byte(ha, 0x2aaa, 0x55);
4969 ql_write_flash_byte(ha, 0x5555, 0xf0);
4970
4971 ql_flash_disable(ha);
4972 }
4973
4974 /* Default flash descriptor table. */
4975 xp->fdesc.write_statusreg_cmd = 1;
4976 xp->fdesc.write_enable_bits = 0;
4977 xp->fdesc.unprotect_sector_cmd = 0;
4978 xp->fdesc.protect_sector_cmd = 0;
4979 xp->fdesc.write_disable_bits = 0xbc;
4980 xp->fdesc.block_size = 0x10000;
4981 xp->fdesc.erase_cmd = 0xd8;
4982
4983 switch (xp->fdesc.flash_manuf) {
4984 case AMD_FLASH:
4985 switch (xp->fdesc.flash_id) {
4986 case SPAN_FLASHID_16384K:
4987 if (xp->fdesc.flash_len == 0x18) {
4988 xp->fdesc.flash_size = 0x1000000;
4989 } else {
4990 rval = QL_FUNCTION_FAILED;
4991 }
4992 break;
4993 case SPAN_FLASHID_2048K:
4994 xp->fdesc.flash_size = 0x200000;
4995 break;
4996 case AMD_FLASHID_1024K:
4997 xp->fdesc.flash_size = 0x100000;
4998 break;
4999 case AMD_FLASHID_512K:
5000 case AMD_FLASHID_512Kt:
5001 case AMD_FLASHID_512Kb:
5002 if (CFG_IST(ha, CFG_SBUS_CARD)) {
5003 xp->fdesc.flash_size = QL_SBUS_FCODE_SIZE;
5004 } else {
5005 xp->fdesc.flash_size = 0x80000;
5006 }
5007 break;
5008 case AMD_FLASHID_128K:
5009 xp->fdesc.flash_size = 0x20000;
5010 break;
5011 default:
5012 rval = QL_FUNCTION_FAILED;
5013 break;
5014 }
5015 break;
5016 case ST_FLASH:
5017 switch (xp->fdesc.flash_id) {
5018 case ST_FLASHID_128K:
5019 xp->fdesc.flash_size = 0x20000;
5020 break;
5021 case ST_FLASHID_512K:
5022 xp->fdesc.flash_size = 0x80000;
5023 break;
5024 case ST_FLASHID_M25PXX:
5025 if (xp->fdesc.flash_len == 0x14) {
5026 xp->fdesc.flash_size = 0x100000;
5027 } else if (xp->fdesc.flash_len == 0x15) {
5028 xp->fdesc.flash_size = 0x200000;
5029 } else {
5030 rval = QL_FUNCTION_FAILED;
5031 }
5032 break;
5033 case ST_FLASHID_N25QXXX:
5034 if (xp->fdesc.flash_len == 0x18) {
5035 xp->fdesc.flash_size = 0x1000000;
5036 } else {
5037 rval = QL_FUNCTION_FAILED;
5038 }
5039 break;
5040 default:
5041 rval = QL_FUNCTION_FAILED;
5042 break;
5043 }
5044 break;
5045 case SST_FLASH:
5046 switch (xp->fdesc.flash_id) {
5047 case SST_FLASHID_128K:
5048 xp->fdesc.flash_size = 0x20000;
5049 break;
5050 case SST_FLASHID_1024K_A:
5051 xp->fdesc.flash_size = 0x100000;
5052 xp->fdesc.block_size = 0x8000;
5053 xp->fdesc.erase_cmd = 0x52;
5054 break;
5055 case SST_FLASHID_1024K:
5056 case SST_FLASHID_1024K_B:
5057 xp->fdesc.flash_size = 0x100000;
5058 break;
5059 case SST_FLASHID_2048K:
5060 xp->fdesc.flash_size = 0x200000;
5061 break;
5062 default:
5063 rval = QL_FUNCTION_FAILED;
5064 break;
5065 }
5066 break;
5067 case MXIC_FLASH:
5068 switch (xp->fdesc.flash_id) {
5069 case MXIC_FLASHID_512K:
5070 xp->fdesc.flash_size = 0x80000;
5071 break;
5072 case MXIC_FLASHID_1024K:
5073 xp->fdesc.flash_size = 0x100000;
5074 break;
5075 case MXIC_FLASHID_25LXX:
5076 xp->fdesc.write_disable_bits = 0xbc;
5077 if (xp->fdesc.flash_len == 0x14) {
5078 xp->fdesc.flash_size = 0x100000;
5079 } else if (xp->fdesc.flash_len == 0x15) {
5080 xp->fdesc.flash_size = 0x200000;
5081 } else if (xp->fdesc.flash_len == 0x16) {
5082 xp->fdesc.flash_size = 0x400000;
5083 } else if (xp->fdesc.flash_len == 0x17) {
5084 xp->fdesc.flash_size = 0x800000;
5085 } else if (xp->fdesc.flash_len == 0x18) {
5086 xp->fdesc.flash_size = 0x1000000;
5087 } else {
5088 rval = QL_FUNCTION_FAILED;
5089 }
5090 break;
5091 default:
5092 rval = QL_FUNCTION_FAILED;
5093 break;
5094 }
5095 break;
5096 case ATMEL_FLASH:
5097 switch (xp->fdesc.flash_id) {
5098 case ATMEL_FLASHID_1024K:
5099 xp->fdesc.flash_size = 0x100000;
5100 xp->fdesc.write_disable_bits = 0xbc;
5101 xp->fdesc.unprotect_sector_cmd = 0x39;
5102 xp->fdesc.protect_sector_cmd = 0x36;
5103 break;
5104 default:
5105 rval = QL_FUNCTION_FAILED;
5106 break;
5107 }
5108 break;
5109 case WINBOND_FLASH:
5110 switch (xp->fdesc.flash_id) {
5111 case WINBOND_FLASHID:
5112 if (xp->fdesc.flash_len == 0x15) {
5113 xp->fdesc.flash_size = 0x200000;
5114 } else if (xp->fdesc.flash_len == 0x16) {
5115 xp->fdesc.flash_size = 0x400000;
5116 } else if (xp->fdesc.flash_len == 0x17) {
5117 xp->fdesc.flash_size = 0x800000;
5118 } else if (xp->fdesc.flash_len == 0x18) {
5119 xp->fdesc.flash_size = 0x1000000;
5120 } else {
5121 rval = QL_FUNCTION_FAILED;
5122 }
5123 break;
5124 default:
5125 rval = QL_FUNCTION_FAILED;
5126 break;
5127 }
5128 break;
5129 case INTEL_FLASH:
5130 switch (xp->fdesc.flash_id) {
5131 case INTEL_FLASHID:
5132 if (xp->fdesc.flash_len == 0x11) {
5133 xp->fdesc.flash_size = 0x200000;
5134 } else if (xp->fdesc.flash_len == 0x12) {
5135 xp->fdesc.flash_size = 0x400000;
5136 } else if (xp->fdesc.flash_len == 0x13) {
5137 xp->fdesc.flash_size = 0x800000;
5138 } else {
5139 rval = QL_FUNCTION_FAILED;
5140 }
5141 break;
5142 default:
5143 rval = QL_FUNCTION_FAILED;
5144 break;
5145 }
5146 break;
5147 case EON_FLASH:
5148 switch (xp->fdesc.flash_id) {
5149 case EON_FLASHID_EN25QXXX:
5150 if (xp->fdesc.flash_len == 0x18) {
5151 xp->fdesc.flash_size = 0x1000000;
5152 } else {
5153 rval = QL_FUNCTION_FAILED;
5154 }
5155 break;
5156 default:
5157 rval = QL_FUNCTION_FAILED;
5158 break;
5159 }
5160 break;
5161 default:
5162 rval = QL_FUNCTION_FAILED;
5163 break;
5164 }
5165
5166 /* Try flash table later. */
5167 if (rval != QL_SUCCESS && CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
5168 EL(ha, "no default id\n");
5169 return (QL_SUCCESS);
5170 }
5171
5172 /*
5173 * hack for non std 2312/2322 and 6312/6322 boards. hardware people
5174 * need to use either the 128k flash chip (original), or something
5175 * larger. For driver purposes, we'll treat it as a 128k flash chip.
5176 */
5177 if ((ha->device_id == 0x2312 || ha->device_id == 0x6312 ||
5178 ha->device_id == 0x2322 || ha->device_id == 0x6322) &&
5179 (xp->fdesc.flash_size > 0x20000) &&
5180 (CFG_IST(ha, CFG_SBUS_CARD) == 0)) {
5181 EL(ha, "chip exceeds max size: %xh, using 128k\n",
5182 xp->fdesc.flash_size);
5183 xp->fdesc.flash_size = 0x20000;
5184 }
5185
5186 if (rval == QL_SUCCESS) {
5187 EL(ha, "man_id=%xh, flash_id=%xh, size=%xh\n",
5188 xp->fdesc.flash_manuf, xp->fdesc.flash_id,
5189 xp->fdesc.flash_size);
5190 } else {
5191 EL(ha, "unsupported mfr / type: man_id=%xh, flash_id=%xh\n",
5192 xp->fdesc.flash_manuf, xp->fdesc.flash_id);
5193 }
5194
5195 return (rval);
5201 *
5202 * Input:
5203 * ha: adapter state pointer.
5204 * bp: user buffer address.
5205 * size: user buffer size.
5206 * mode: flags
5207 *
5208 * Returns:
5209 *
5210 * Context:
5211 * Kernel context.
5212 */
5213 static int
5214 ql_flash_fcode_load(ql_adapter_state_t *ha, void *bp, uint32_t bsize,
5215 int mode)
5216 {
5217 uint8_t *bfp;
5218 ql_xioctl_t *xp = ha->xioctl;
5219 int rval = 0;
5220
5221 QL_PRINT_9(ha, "started\n");
5222
5223 if (bsize > xp->fdesc.flash_size) {
5224 EL(ha, "failed, bufsize: %xh, flash size: %xh\n", bsize,
5225 xp->fdesc.flash_size);
5226 return (ENOMEM);
5227 }
5228
5229 if ((bfp = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP)) == NULL) {
5230 EL(ha, "failed, kmem_zalloc\n");
5231 rval = ENOMEM;
5232 } else {
5233 if (ddi_copyin(bp, bfp, bsize, mode) != 0) {
5234 EL(ha, "failed, ddi_copyin\n");
5235 rval = EFAULT;
5236 } else if (ql_load_fcode(ha, bfp, bsize, 0) != QL_SUCCESS) {
5237 EL(ha, "failed, load_fcode\n");
5238 rval = EFAULT;
5239 } else {
5240 /* Reset caches on all adapter instances. */
5241 ql_update_flash_caches(ha);
5242 rval = 0;
5243 }
5244 kmem_free(bfp, bsize);
5245 }
5246
5247 QL_PRINT_9(ha, "done\n");
5248
5249 return (rval);
5250 }
5251
5252 /*
5253 * ql_load_fcode
5254 * Loads fcode in to flash.
5255 *
5256 * Input:
5257 * ha: adapter state pointer.
5258 * dp: data pointer.
5259 * size: data length.
5260 * addr: flash byte address.
5261 *
5262 * Returns:
5263 * ql local function return status code.
5264 *
5265 * Context:
5266 * Kernel context.
5267 */
5268 int
5269 ql_load_fcode(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size, uint32_t addr)
5270 {
5271 uint32_t cnt;
5272 int rval;
5273
5274 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
5275 return (ql_24xx_load_flash(ha, dp, size, addr));
5276 }
5277
5278 QL_PRINT_9(ha, "started\n");
5279
5280 if (CFG_IST(ha, CFG_SBUS_CARD)) {
5281 /*
5282 * sbus has an additional check to make
5283 * sure they don't brick the HBA.
5284 */
5285 if (dp[0] != 0xf1) {
5286 EL(ha, "failed, incorrect fcode for sbus\n");
5287 return (QL_FUNCTION_PARAMETER_ERROR);
5288 }
5289 }
5290
5291 GLOBAL_HW_LOCK();
5292
5293 /* Enable Flash Read/Write. */
5294 ql_flash_enable(ha);
5295
5296 /* Erase flash prior to write. */
5297 rval = ql_erase_flash(ha, 0);
5298
5300 /* Write fcode data to flash. */
5301 for (cnt = 0; cnt < (uint32_t)size; cnt++) {
5302 /* Allow other system activity. */
5303 if (cnt % 0x1000 == 0) {
5304 drv_usecwait(1);
5305 }
5306 rval = ql_program_flash_address(ha, addr++, *dp++);
5307 if (rval != QL_SUCCESS)
5308 break;
5309 }
5310 }
5311
5312 ql_flash_disable(ha);
5313
5314 GLOBAL_HW_UNLOCK();
5315
5316 if (rval != QL_SUCCESS) {
5317 EL(ha, "failed, rval=%xh\n", rval);
5318 } else {
5319 /*EMPTY*/
5320 QL_PRINT_9(ha, "done\n");
5321 }
5322 return (rval);
5323 }
5324
5325 /*
5326 * ql_flash_fcode_dump
5327 * Dumps FLASH to application.
5328 *
5329 * Input:
5330 * ha: adapter state pointer.
5331 * bp: user buffer address.
5332 * bsize: user buffer size
5333 * faddr: flash byte address
5334 * mode: flags
5335 *
5336 * Returns:
5337 *
5338 * Context:
5339 * Kernel context.
5340 */
5341 static int
5342 ql_flash_fcode_dump(ql_adapter_state_t *ha, void *bp, uint32_t bsize,
5343 uint32_t faddr, int mode)
5344 {
5345 uint8_t *bfp;
5346 int rval;
5347 ql_xioctl_t *xp = ha->xioctl;
5348
5349 QL_PRINT_9(ha, "started\n");
5350
5351 /* adjust max read size to flash size */
5352 if (bsize > xp->fdesc.flash_size) {
5353 EL(ha, "adjusting req=%xh, max=%xh\n", bsize,
5354 xp->fdesc.flash_size);
5355 bsize = xp->fdesc.flash_size;
5356 }
5357
5358 if ((bfp = (uint8_t *)kmem_zalloc(bsize, KM_SLEEP)) == NULL) {
5359 EL(ha, "failed, kmem_zalloc\n");
5360 rval = ENOMEM;
5361 } else {
5362 /* Dump Flash fcode. */
5363 rval = ql_dump_fcode(ha, bfp, bsize, faddr);
5364
5365 if (rval != QL_SUCCESS) {
5366 EL(ha, "failed, dump_fcode = %x\n", rval);
5367 rval = EFAULT;
5368 } else if (ddi_copyout(bfp, bp, bsize, mode) != 0) {
5369 EL(ha, "failed, ddi_copyout\n");
5370 rval = EFAULT;
5371 } else {
5372 rval = 0;
5373 }
5374 kmem_free(bfp, bsize);
5375 }
5376
5377 QL_PRINT_9(ha, "done\n");
5378
5379 return (rval);
5380 }
5381
5382 /*
5383 * ql_dump_fcode
5384 * Dumps fcode from flash.
5385 *
5386 * Input:
5387 * ha: adapter state pointer.
5388 * dp: data pointer.
5389 * size: data length in bytes.
5390 * startpos: starting position in flash (byte address).
5391 *
5392 * Returns:
5393 * ql local function return status code.
5394 *
5395 * Context:
5396 * Kernel context.
5397 *
5398 */
5399 int
5400 ql_dump_fcode(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size,
5401 uint32_t startpos)
5402 {
5403 uint32_t cnt, data, addr;
5404 uint8_t bp[4], *src;
5405 int fp_rval, rval = QL_SUCCESS;
5406 dma_mem_t mem;
5407
5408 QL_PRINT_9(ha, "started\n");
5409
5410 /* make sure startpos+size doesn't exceed flash */
5411 if (size + startpos > ha->xioctl->fdesc.flash_size) {
5412 EL(ha, "exceeded flash range, sz=%xh, stp=%xh, flsz=%xh\n",
5413 size, startpos, ha->xioctl->fdesc.flash_size);
5414 return (QL_FUNCTION_PARAMETER_ERROR);
5415 }
5416
5417 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
5418 /* check start addr is 32 bit aligned for 24xx */
5419 if ((startpos & 0x3) != 0) {
5420 rval = ql_24xx_read_flash(ha,
5421 ha->flash_data_addr | startpos >> 2, &data);
5422 if (rval != QL_SUCCESS) {
5423 EL(ha, "failed2, rval = %xh\n", rval);
5424 return (rval);
5425 }
5426 bp[0] = LSB(LSW(data));
5427 bp[1] = MSB(LSW(data));
5428 bp[2] = LSB(MSW(data));
5429 bp[3] = MSB(MSW(data));
5430 while (size && startpos & 0x3) {
5431 *dp++ = bp[startpos & 0x3];
5432 startpos++;
5433 size--;
5434 }
5435 if (size == 0) {
5436 QL_PRINT_9(ha, "done2\n",
5437 ha->instance);
5438 return (rval);
5439 }
5440 }
5441
5442 /* adjust 24xx start addr for 32 bit words */
5443 addr = startpos / 4 | ha->flash_data_addr;
5444 }
5445
5446 bzero(&mem, sizeof (dma_mem_t));
5447 /* Check for Fast page is supported */
5448 if ((ha->pha->task_daemon_flags & FIRMWARE_UP) &&
5449 (CFG_IST(ha, CFG_FLASH_DMA_SUPPORT))) {
5450 fp_rval = QL_SUCCESS;
5451 /* Setup DMA buffer. */
5452 rval = ql_get_dma_mem(ha, &mem, size,
5453 LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN);
5454 if (rval != QL_SUCCESS) {
5455 EL(ha, "failed, ql_get_dma_mem=%xh\n",
5456 rval);
5457 return (ENOMEM);
5458 }
5459 } else {
5460 fp_rval = QL_NOT_SUPPORTED;
5461 }
5462
5463 GLOBAL_HW_LOCK();
5464
5465 /* Enable Flash Read/Write. */
5466 if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
5467 ql_flash_enable(ha);
5468 }
5469
5470 /* Read fcode data from flash. */
5471 while (size) {
5472 /* Allow other system activity. */
5473 if (size % 0x1000 == 0) {
5474 ql_delay(ha, 10000);
5475 }
5476 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
5477 if (fp_rval == QL_SUCCESS && (addr & 0x3f) == 0) {
5478 cnt = (size + 3) >> 2;
5479 fp_rval = ql_rd_risc_ram(ha, addr,
5480 mem.cookie.dmac_laddress, cnt);
5481 if (fp_rval == QL_SUCCESS) {
5482 for (src = mem.bp; size; size--) {
5483 *dp++ = *src++;
5484 }
5485 addr += cnt;
5486 continue;
5487 }
5488 }
5489 rval = ql_24xx_read_flash(ha, addr++,
5490 &data);
5491 if (rval != QL_SUCCESS) {
5492 break;
5493 }
5494 bp[0] = LSB(LSW(data));
5495 bp[1] = MSB(LSW(data));
5496 bp[2] = LSB(MSW(data));
5497 bp[3] = MSB(MSW(data));
5498 for (cnt = 0; size && cnt < 4; size--) {
5499 *dp++ = bp[cnt++];
5500 }
5501 } else {
5502 *dp++ = (uint8_t)ql_read_flash_byte(ha, startpos++);
5503 size--;
5504 }
5505 }
5506
5507 if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
5508 ql_flash_disable(ha);
5509 }
5510
5511 GLOBAL_HW_UNLOCK();
5512
5513 if (mem.dma_handle != NULL) {
5514 ql_free_dma_resource(ha, &mem);
5515 }
5516
5517 if (rval != QL_SUCCESS) {
5518 EL(ha, "failed, rval = %xh\n", rval);
5519 } else {
5520 /*EMPTY*/
5521 QL_PRINT_9(ha, "done\n");
5522 }
5523 return (rval);
5524 }
5525
5526 /*
5527 * ql_program_flash_address
5528 * Program flash address.
5529 *
5530 * Input:
5531 * ha: adapter state pointer.
5532 * addr: flash byte address.
5533 * data: data to be written to flash.
5534 *
5535 * Returns:
5536 * ql local function return status code.
5537 *
5538 * Context:
5539 * Kernel context.
5540 */
5541 static int
5563 }
5564 return (rval);
5565 }
5566
5567 /*
5568 * ql_set_rnid_parameters
5569 * Set RNID parameters.
5570 *
5571 * Input:
5572 * ha: adapter state pointer.
5573 * cmd: User space CT arguments pointer.
5574 * mode: flags.
5575 */
5576 static void
5577 ql_set_rnid_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5578 {
5579 EXT_SET_RNID_REQ tmp_set;
5580 EXT_RNID_DATA *tmp_buf;
5581 int rval = 0;
5582
5583 QL_PRINT_9(ha, "started\n");
5584
5585 if (DRIVER_SUSPENDED(ha)) {
5586 EL(ha, "failed, LOOP_NOT_READY\n");
5587 cmd->Status = EXT_STATUS_BUSY;
5588 cmd->ResponseLen = 0;
5589 return;
5590 }
5591
5592 cmd->ResponseLen = 0; /* NO response to caller. */
5593 if (cmd->RequestLen != sizeof (EXT_SET_RNID_REQ)) {
5594 /* parameter error */
5595 EL(ha, "failed, RequestLen < EXT_SET_RNID_REQ, Len=%xh\n",
5596 cmd->RequestLen);
5597 cmd->Status = EXT_STATUS_INVALID_PARAM;
5598 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
5599 cmd->ResponseLen = 0;
5600 return;
5601 }
5602
5603 rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &tmp_set,
5628 cmd->ResponseLen = 0;
5629 return;
5630 }
5631
5632 /* Now set the requested params. */
5633 bcopy(tmp_set.IPVersion, tmp_buf->IPVersion, 2);
5634 bcopy(tmp_set.UDPPortNumber, tmp_buf->UDPPortNumber, 2);
5635 bcopy(tmp_set.IPAddress, tmp_buf->IPAddress, 16);
5636
5637 rval = ql_set_rnid_params(ha, sizeof (EXT_RNID_DATA),
5638 (caddr_t)tmp_buf);
5639 if (rval != QL_SUCCESS) {
5640 /* error */
5641 EL(ha, "failed, set_rnid_params_mbx=%xh\n", rval);
5642 cmd->Status = EXT_STATUS_ERR;
5643 cmd->ResponseLen = 0;
5644 }
5645
5646 kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5647
5648 QL_PRINT_9(ha, "done\n");
5649 }
5650
5651 /*
5652 * ql_get_rnid_parameters
5653 * Get RNID parameters.
5654 *
5655 * Input:
5656 * ha: adapter state pointer.
5657 * cmd: User space CT arguments pointer.
5658 * mode: flags.
5659 */
5660 static void
5661 ql_get_rnid_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5662 {
5663 EXT_RNID_DATA *tmp_buf;
5664 uint32_t rval;
5665
5666 QL_PRINT_9(ha, "started\n");
5667
5668 if (DRIVER_SUSPENDED(ha)) {
5669 EL(ha, "failed, LOOP_NOT_READY\n");
5670 cmd->Status = EXT_STATUS_BUSY;
5671 cmd->ResponseLen = 0;
5672 return;
5673 }
5674
5675 /* Allocate memory for command. */
5676 tmp_buf = kmem_zalloc(sizeof (EXT_RNID_DATA), KM_SLEEP);
5677 if (tmp_buf == NULL) {
5678 EL(ha, "failed, kmem_zalloc\n");
5679 cmd->Status = EXT_STATUS_NO_MEMORY;
5680 cmd->ResponseLen = 0;
5681 return;
5682 }
5683
5684 /* Send command */
5685 rval = ql_get_rnid_params(ha, sizeof (EXT_RNID_DATA),
5686 (caddr_t)tmp_buf);
5687 if (rval != QL_SUCCESS) {
5688 /* error */
5689 EL(ha, "failed, get_rnid_params_mbx=%xh\n", rval);
5690 kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5691 cmd->Status = EXT_STATUS_ERR;
5692 cmd->ResponseLen = 0;
5693 return;
5694 }
5695
5696 /* Copy the response */
5697 if (ql_send_buffer_data((caddr_t)tmp_buf,
5698 (caddr_t)(uintptr_t)cmd->ResponseAdr,
5699 sizeof (EXT_RNID_DATA), mode) != sizeof (EXT_RNID_DATA)) {
5700 EL(ha, "failed, ddi_copyout\n");
5701 cmd->Status = EXT_STATUS_COPY_ERR;
5702 cmd->ResponseLen = 0;
5703 } else {
5704 QL_PRINT_9(ha, "done\n");
5705 cmd->ResponseLen = sizeof (EXT_RNID_DATA);
5706 }
5707
5708 kmem_free(tmp_buf, sizeof (EXT_RNID_DATA));
5709 }
5710
5711 /*
5712 * ql_reset_statistics
5713 * Performs EXT_SC_RST_STATISTICS subcommand. of EXT_CC_SET_DATA.
5714 *
5715 * Input:
5716 * ha: adapter state pointer.
5717 * cmd: Local EXT_IOCTL cmd struct pointer.
5718 *
5719 * Returns:
5720 * None, request status indicated in cmd->Status.
5721 *
5722 * Context:
5723 * Kernel context.
5724 */
5725 static int
5726 ql_reset_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd)
5727 {
5728 ql_xioctl_t *xp = ha->xioctl;
5729 int rval = 0;
5730
5731 QL_PRINT_9(ha, "started\n");
5732
5733 if (DRIVER_SUSPENDED(ha)) {
5734 EL(ha, "failed, LOOP_NOT_READY\n");
5735 cmd->Status = EXT_STATUS_BUSY;
5736 cmd->ResponseLen = 0;
5737 return (QL_FUNCTION_SUSPENDED);
5738 }
5739
5740 rval = ql_reset_link_status(ha);
5741 if (rval != QL_SUCCESS) {
5742 EL(ha, "failed, reset_link_status_mbx=%xh\n", rval);
5743 cmd->Status = EXT_STATUS_MAILBOX;
5744 cmd->DetailStatus = rval;
5745 cmd->ResponseLen = 0;
5746 }
5747
5748 TASK_DAEMON_LOCK(ha);
5749 xp->IosRequested = 0;
5750 xp->BytesRequested = 0;
5751 xp->IOInputRequests = 0;
5752 xp->IOOutputRequests = 0;
5753 xp->IOControlRequests = 0;
5754 xp->IOInputMByteCnt = 0;
5755 xp->IOOutputMByteCnt = 0;
5756 xp->IOOutputByteCnt = 0;
5757 xp->IOInputByteCnt = 0;
5758 TASK_DAEMON_UNLOCK(ha);
5759
5760 INTR_LOCK(ha);
5761 xp->ControllerErrorCount = 0;
5762 xp->DeviceErrorCount = 0;
5763 xp->TotalLipResets = 0;
5764 xp->TotalInterrupts = 0;
5765 INTR_UNLOCK(ha);
5766
5767 QL_PRINT_9(ha, "done\n");
5768
5769 return (rval);
5770 }
5771
5772 /*
5773 * ql_get_statistics
5774 * Performs EXT_SC_GET_STATISTICS subcommand. of EXT_CC_GET_DATA.
5775 *
5776 * Input:
5777 * ha: adapter state pointer.
5778 * cmd: Local EXT_IOCTL cmd struct pointer.
5779 * mode: flags.
5780 *
5781 * Returns:
5782 * None, request status indicated in cmd->Status.
5783 *
5784 * Context:
5785 * Kernel context.
5786 */
5787 static void
5788 ql_get_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5789 {
5790 EXT_HBA_PORT_STAT ps = {0};
5791 ql_link_stats_t *ls;
5792 int rval;
5793 ql_xioctl_t *xp = ha->xioctl;
5794 int retry = 10;
5795
5796 QL_PRINT_9(ha, "started\n");
5797
5798 while (ha->task_daemon_flags &
5799 (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE | DRIVER_STALL)) {
5800 ql_delay(ha, 10000000); /* 10 second delay */
5801
5802 retry--;
5803
5804 if (retry == 0) { /* effectively 100 seconds */
5805 EL(ha, "failed, LOOP_NOT_READY\n");
5806 cmd->Status = EXT_STATUS_BUSY;
5807 cmd->ResponseLen = 0;
5808 return;
5809 }
5810 }
5811
5812 /* Allocate memory for command. */
5813 ls = kmem_zalloc(sizeof (ql_link_stats_t), KM_SLEEP);
5814 if (ls == NULL) {
5815 EL(ha, "failed, kmem_zalloc\n");
5816 cmd->Status = EXT_STATUS_NO_MEMORY;
5844 ps.LossOfSyncCount = LE_32(ls->sync_loss_cnt);
5845 ps.LossOfSignalsCount = LE_32(ls->signal_loss_cnt);
5846 ps.PrimitiveSeqProtocolErrorCount = LE_32(ls->prot_err_cnt);
5847 ps.InvalidTransmissionWordCount = LE_32(ls->inv_xmit_cnt);
5848 ps.InvalidCRCCount = LE_32(ls->inv_crc_cnt);
5849
5850 rval = ddi_copyout((void *)&ps,
5851 (void *)(uintptr_t)cmd->ResponseAdr,
5852 sizeof (EXT_HBA_PORT_STAT), mode);
5853 if (rval != 0) {
5854 EL(ha, "failed, ddi_copyout\n");
5855 cmd->Status = EXT_STATUS_COPY_ERR;
5856 cmd->ResponseLen = 0;
5857 } else {
5858 cmd->ResponseLen = sizeof (EXT_HBA_PORT_STAT);
5859 }
5860 }
5861
5862 kmem_free(ls, sizeof (ql_link_stats_t));
5863
5864 QL_PRINT_9(ha, "done\n");
5865 }
5866
5867 /*
5868 * ql_get_statistics_fc
5869 * Performs EXT_SC_GET_FC_STATISTICS subcommand. of EXT_CC_GET_DATA.
5870 *
5871 * Input:
5872 * ha: adapter state pointer.
5873 * cmd: Local EXT_IOCTL cmd struct pointer.
5874 * mode: flags.
5875 *
5876 * Returns:
5877 * None, request status indicated in cmd->Status.
5878 *
5879 * Context:
5880 * Kernel context.
5881 */
5882 static void
5883 ql_get_statistics_fc(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5884 {
5885 EXT_HBA_PORT_STAT ps = {0};
5886 ql_link_stats_t *ls;
5887 int rval;
5888 uint16_t qlnt;
5889 EXT_DEST_ADDR pextdestaddr;
5890 uint8_t *name;
5891 ql_tgt_t *tq = NULL;
5892 int retry = 10;
5893
5894 QL_PRINT_9(ha, "started\n");
5895
5896 if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
5897 (void *)&pextdestaddr, sizeof (EXT_DEST_ADDR), mode) != 0) {
5898 EL(ha, "failed, ddi_copyin\n");
5899 cmd->Status = EXT_STATUS_COPY_ERR;
5900 cmd->ResponseLen = 0;
5901 return;
5902 }
5903
5904 qlnt = QLNT_PORT;
5905 name = pextdestaddr.DestAddr.WWPN;
5906
5907 QL_PRINT_9(ha, "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
5908 ha->instance, name[0], name[1], name[2], name[3], name[4],
5909 name[5], name[6], name[7]);
5910
5911 tq = ql_find_port(ha, name, qlnt);
5912
5913 if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
5914 EL(ha, "failed, fc_port not found\n");
5915 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
5916 cmd->ResponseLen = 0;
5917 return;
5918 }
5919
5920 while (ha->task_daemon_flags &
5921 (ABORT_ISP_ACTIVE | LOOP_RESYNC_ACTIVE | DRIVER_STALL)) {
5922 ql_delay(ha, 10000000); /* 10 second delay */
5923
5924 retry--;
5925
5926 if (retry == 0) { /* effectively 100 seconds */
5927 EL(ha, "failed, LOOP_NOT_READY\n");
5954 ps.LossOfSignalsCount = LE_32(ls->signal_loss_cnt);
5955 ps.PrimitiveSeqProtocolErrorCount = LE_32(ls->prot_err_cnt);
5956 ps.InvalidTransmissionWordCount = LE_32(ls->inv_xmit_cnt);
5957 ps.InvalidCRCCount = LE_32(ls->inv_crc_cnt);
5958
5959 rval = ddi_copyout((void *)&ps,
5960 (void *)(uintptr_t)cmd->ResponseAdr,
5961 sizeof (EXT_HBA_PORT_STAT), mode);
5962
5963 if (rval != 0) {
5964 EL(ha, "failed, ddi_copyout\n");
5965 cmd->Status = EXT_STATUS_COPY_ERR;
5966 cmd->ResponseLen = 0;
5967 } else {
5968 cmd->ResponseLen = sizeof (EXT_HBA_PORT_STAT);
5969 }
5970 }
5971
5972 kmem_free(ls, sizeof (ql_link_stats_t));
5973
5974 QL_PRINT_9(ha, "done\n");
5975 }
5976
5977 /*
5978 * ql_get_statistics_fc4
5979 * Performs EXT_SC_GET_FC_STATISTICS subcommand. of EXT_CC_GET_DATA.
5980 *
5981 * Input:
5982 * ha: adapter state pointer.
5983 * cmd: Local EXT_IOCTL cmd struct pointer.
5984 * mode: flags.
5985 *
5986 * Returns:
5987 * None, request status indicated in cmd->Status.
5988 *
5989 * Context:
5990 * Kernel context.
5991 */
5992 static void
5993 ql_get_statistics_fc4(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
5994 {
5995 uint32_t rval;
5996 EXT_HBA_FC4STATISTICS fc4stats = {0};
5997 ql_xioctl_t *xp = ha->xioctl;
5998
5999 QL_PRINT_9(ha, "started\n");
6000
6001 fc4stats.InputRequests = xp->IOInputRequests;
6002 fc4stats.OutputRequests = xp->IOOutputRequests;
6003 fc4stats.ControlRequests = xp->IOControlRequests;
6004 fc4stats.InputMegabytes = xp->IOInputMByteCnt;
6005 fc4stats.OutputMegabytes = xp->IOOutputMByteCnt;
6006
6007 rval = ddi_copyout((void *)&fc4stats,
6008 (void *)(uintptr_t)cmd->ResponseAdr,
6009 sizeof (EXT_HBA_FC4STATISTICS), mode);
6010
6011 if (rval != 0) {
6012 EL(ha, "failed, ddi_copyout\n");
6013 cmd->Status = EXT_STATUS_COPY_ERR;
6014 cmd->ResponseLen = 0;
6015 } else {
6016 cmd->ResponseLen = sizeof (EXT_HBA_FC4STATISTICS);
6017 }
6018
6019 QL_PRINT_9(ha, "done\n");
6020 }
6021
6022 /*
6023 * ql_set_led_state
6024 * Performs EXT_SET_BEACON_STATE subcommand of EXT_CC_SET_DATA.
6025 *
6026 * Input:
6027 * ha: adapter state pointer.
6028 * cmd: Local EXT_IOCTL cmd struct pointer.
6029 * mode: flags.
6030 *
6031 * Returns:
6032 * None, request status indicated in cmd->Status.
6033 *
6034 * Context:
6035 * Kernel context.
6036 */
6037 static void
6038 ql_set_led_state(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
6039 {
6040 EXT_BEACON_CONTROL bstate;
6041 int rval;
6042 ql_mbx_data_t mr;
6043
6044 QL_PRINT_9(ha, "started\n");
6045
6046 if (cmd->RequestLen < sizeof (EXT_BEACON_CONTROL)) {
6047 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
6048 cmd->DetailStatus = sizeof (EXT_BEACON_CONTROL);
6049 EL(ha, "done - failed, RequestLen < EXT_BEACON_CONTROL,"
6050 " Len=%xh\n", cmd->RequestLen);
6051 cmd->ResponseLen = 0;
6052 return;
6053 }
6054
6055 if (!CFG_IST(ha, CFG_SET_LEDS_SUPPORT)) {
6056 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
6057 cmd->DetailStatus = 0;
6058 EL(ha, "done - failed, Invalid function for HBA model\n");
6059 cmd->ResponseLen = 0;
6060 return;
6061 }
6062
6063 rval = ddi_copyin((void*)(uintptr_t)cmd->RequestAdr, &bstate,
6064 cmd->RequestLen, mode);
6065
6066 if (rval != 0) {
6067 cmd->Status = EXT_STATUS_COPY_ERR;
6068 EL(ha, "done - failed, ddi_copyin\n");
6069 return;
6070 }
6071
6072 switch (bstate.State) {
6073 case EXT_DEF_GRN_BLINK_OFF: /* turn beacon off */
6074 if (ha->ledstate.BeaconState == BEACON_OFF) {
6075 /* not quite an error -- LED state is already off */
6076 cmd->Status = EXT_STATUS_OK;
6077 EL(ha, "LED off request -- LED is already off\n");
6078 break;
6079 }
6080
6081 if (CFG_IST(ha, CFG_CTRL_82XX)) {
6082 rval = ql_diag_beacon(ha, QL_BEACON_DISABLE,
6083 &mr);
6084
6085 if (rval == QL_SUCCESS) {
6086 ha->ledstate.BeaconState = BEACON_OFF;
6087 ha->ledstate.LEDflags = LED_ALL_OFF;
6088 cmd->Status = EXT_STATUS_OK;
6089 } else {
6090 cmd->Status = EXT_STATUS_ERR;
6091 EL(ha, "failed, disable beacon request %xh\n",
6092 bstate.State);
6093 }
6094 break;
6095 }
6096
6097 ha->ledstate.BeaconState = BEACON_OFF;
6098 ha->ledstate.LEDflags = LED_ALL_OFF;
6099
6100 if ((rval = ql_wrapup_led(ha)) != QL_SUCCESS) {
6101 cmd->Status = EXT_STATUS_MAILBOX;
6102 } else {
6103 cmd->Status = EXT_STATUS_OK;
6104 }
6105 break;
6106
6107 case EXT_DEF_GRN_BLINK_ON: /* turn beacon on */
6108 if (ha->ledstate.BeaconState == BEACON_ON) {
6109 /* not quite an error -- LED state is already on */
6110 cmd->Status = EXT_STATUS_OK;
6111 EL(ha, "LED on request - LED is already on\n");
6112 break;
6113 }
6114
6115 if (CFG_IST(ha, CFG_CTRL_82XX)) {
6116 rval = ql_diag_beacon(ha, QL_BEACON_ENABLE,
6117 &mr);
6118
6119 if (rval == QL_SUCCESS) {
6120 ha->ledstate.BeaconState = BEACON_ON;
6121 ha->ledstate.LEDflags = LED_GREEN;
6122 cmd->Status = EXT_STATUS_OK;
6123 } else {
6124 cmd->Status = EXT_STATUS_ERR;
6125 EL(ha, "failed, enable beacon request %xh\n",
6126 bstate.State);
6127 }
6128 break;
6129 }
6130
6131 if ((rval = ql_setup_led(ha)) != QL_SUCCESS) {
6132 cmd->Status = EXT_STATUS_MAILBOX;
6133 break;
6134 }
6135
6136 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
6137 ha->ledstate.LEDflags = LED_YELLOW_24 | LED_AMBER_24;
6138 } else {
6139 ha->ledstate.LEDflags = LED_GREEN;
6140 }
6141 ha->ledstate.BeaconState = BEACON_ON;
6142
6143 cmd->Status = EXT_STATUS_OK;
6144 break;
6145 default:
6146 cmd->Status = EXT_STATUS_ERR;
6147 EL(ha, "failed, unknown state request %xh\n", bstate.State);
6148 break;
6149 }
6150
6151 QL_PRINT_9(ha, "done\n");
6152 }
6153
6154 /*
6155 * ql_get_led_state
6156 * Performs EXT_GET_BEACON_STATE subcommand of EXT_CC_GET_DATA.
6157 *
6158 * Input:
6159 * ha: adapter state pointer.
6160 * cmd: Local EXT_IOCTL cmd struct pointer.
6161 * mode: flags.
6162 *
6163 * Returns:
6164 * None, request status indicated in cmd->Status.
6165 *
6166 * Context:
6167 * Kernel context.
6168 */
6169 static void
6170 ql_get_led_state(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
6171 {
6172 EXT_BEACON_CONTROL bstate = {0};
6173 uint32_t rval;
6174
6175 QL_PRINT_9(ha, "started\n");
6176
6177 if (cmd->ResponseLen < sizeof (EXT_BEACON_CONTROL)) {
6178 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
6179 cmd->DetailStatus = sizeof (EXT_BEACON_CONTROL);
6180 EL(ha, "done - failed, ResponseLen < EXT_BEACON_CONTROL,"
6181 "Len=%xh\n", cmd->ResponseLen);
6182 cmd->ResponseLen = 0;
6183 return;
6184 }
6185
6186 if (!CFG_IST(ha, CFG_SET_LEDS_SUPPORT)) {
6187 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
6188 cmd->DetailStatus = 0;
6189 EL(ha, "done - failed, Invalid function for HBA model\n");
6190 cmd->ResponseLen = 0;
6191 return;
6192 }
6193
6194 if (ha->task_daemon_flags & ABORT_ISP_ACTIVE) {
6195 cmd->Status = EXT_STATUS_BUSY;
6196 EL(ha, "done - failed, isp abort active\n");
6197 cmd->ResponseLen = 0;
6198 return;
6199 }
6200
6201 /* inform the user of the current beacon state (off or on) */
6202 bstate.State = ha->ledstate.BeaconState;
6203
6204 rval = ddi_copyout((void *)&bstate,
6205 (void *)(uintptr_t)cmd->ResponseAdr,
6206 sizeof (EXT_BEACON_CONTROL), mode);
6207
6208 if (rval != 0) {
6209 EL(ha, "failed, ddi_copyout\n");
6210 cmd->Status = EXT_STATUS_COPY_ERR;
6211 cmd->ResponseLen = 0;
6212 } else {
6213 cmd->Status = EXT_STATUS_OK;
6214 cmd->ResponseLen = sizeof (EXT_BEACON_CONTROL);
6215 }
6216
6217 QL_PRINT_9(ha, "done\n");
6218 }
6219
6220 /*
6221 * ql_blink_led
6222 * Determine the next state of the LED and drive it
6223 *
6224 * Input:
6225 * ha: adapter state pointer.
6226 *
6227 * Context:
6228 * Interrupt context.
6229 */
6230 void
6231 ql_blink_led(ql_adapter_state_t *ha)
6232 {
6233 uint32_t nextstate;
6234 ql_mbx_data_t mr;
6235
6236 QL_PRINT_9(ha, "started\n");
6237
6238 if (ha->ledstate.BeaconState == BEACON_ON) {
6239 if (CFG_IST(ha, CFG_CTRL_2363 | CFG_CTRL_2425)) {
6240 /* determine the next led state */
6241 if (CFG_IST(ha, CFG_CTRL_2425)) {
6242 nextstate = (ha->ledstate.LEDflags) &
6243 (~(RD32_IO_REG(ha, gpiod)));
6244 } else {
6245 nextstate = (ha->ledstate.LEDflags) &
6246 (~(RD16_IO_REG(ha, gpiod)));
6247 }
6248
6249 /* turn the led on or off */
6250 ql_drive_led(ha, nextstate);
6251 } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
6252 if (ha->ledstate.flags & LED_ACTIVE) {
6253 mr.mb[1] = 0x2000;
6254 mr.mb[2] = 0x4000;
6255 ha->ledstate.flags &= ~LED_ACTIVE;
6256 } else {
6257 mr.mb[1] = 0x4000;
6258 mr.mb[2] = 0x2000;
6259 ha->ledstate.flags |= LED_ACTIVE;
6260 }
6261 (void) ql_set_led_config(ha, &mr);
6262 } else if (CFG_IST(ha, CFG_CTRL_80XX)) {
6263 if (ha->ledstate.flags & LED_ACTIVE) {
6264 mr.mb[1] = 0x4000;
6265 mr.mb[2] = 0x2000;
6266 mr.mb[3] = 0x4000;
6267 mr.mb[4] = 0x4000;
6268 mr.mb[5] = 0;
6269 mr.mb[6] = 0x2000;
6270 (void) ql_set_led_config(ha, &mr);
6271 ha->ledstate.flags &= ~LED_ACTIVE;
6272 } else {
6273 mr.mb[1] = 0x4000;
6274 mr.mb[2] = 0x4000;
6275 mr.mb[3] = 0x4000;
6276 mr.mb[4] = 0x2000;
6277 mr.mb[5] = 0;
6278 mr.mb[6] = 0x2000;
6279 (void) ql_set_led_config(ha, &mr);
6280 ha->ledstate.flags |= LED_ACTIVE;
6281 }
6282 } else if (CFG_IST(ha, CFG_CTRL_83XX)) {
6283 if (ha->ledstate.flags & LED_ACTIVE) {
6284 (void) ql_write_remote_reg(ha,
6285 ha->ledstate.select,
6286 0x40004000);
6287 (void) ql_write_remote_reg(ha,
6288 ha->ledstate.select + 4,
6289 0x40004000);
6290 ha->ledstate.flags &= ~LED_ACTIVE;
6291 } else {
6292 (void) ql_write_remote_reg(ha,
6293 ha->ledstate.select,
6294 0x40002000);
6295 (void) ql_write_remote_reg(ha,
6296 ha->ledstate.select + 4,
6297 0x40002000);
6298 ha->ledstate.flags |= LED_ACTIVE;
6299 }
6300 } else if (!CFG_IST(ha, CFG_CTRL_27XX)) {
6301 EL(ha, "unsupported HBA: %xh\n", ha->device_id);
6302 }
6303 }
6304
6305 QL_PRINT_9(ha, "done\n");
6306 }
6307
6308 /*
6309 * ql_drive_led
6310 * drive the led's as determined by LEDflags
6311 *
6312 * Input:
6313 * ha: adapter state pointer.
6314 * LEDflags: LED flags
6315 *
6316 * Context:
6317 * Kernel/Interrupt context.
6318 */
6319 static void
6320 ql_drive_led(ql_adapter_state_t *ha, uint32_t LEDflags)
6321 {
6322 QL_PRINT_9(ha, "started\n");
6323
6324 if (CFG_IST(ha, CFG_CTRL_2363)) {
6325
6326 uint16_t gpio_enable, gpio_data;
6327
6328 /* setup to send new data */
6329 gpio_enable = (uint16_t)RD16_IO_REG(ha, gpioe);
6330 gpio_enable = (uint16_t)(gpio_enable | LED_MASK);
6331 WRT16_IO_REG(ha, gpioe, gpio_enable);
6332
6333 /* read current data and clear out old led data */
6334 gpio_data = (uint16_t)RD16_IO_REG(ha, gpiod);
6335 gpio_data = (uint16_t)(gpio_data & ~LED_MASK);
6336
6337 /* set in the new led data. */
6338 gpio_data = (uint16_t)(gpio_data | LEDflags);
6339
6340 /* write out the new led data */
6341 WRT16_IO_REG(ha, gpiod, gpio_data);
6342
6343 } else if (CFG_IST(ha, CFG_CTRL_2425)) {
6344 uint32_t gpio_data;
6345
6346 /* setup to send new data */
6347 gpio_data = RD32_IO_REG(ha, gpiod);
6348 gpio_data |= LED_MASK_UPDATE_24;
6349 WRT32_IO_REG(ha, gpiod, gpio_data);
6350
6351 /* read current data and clear out old led data */
6352 gpio_data = RD32_IO_REG(ha, gpiod);
6353 gpio_data &= ~LED_MASK_COLORS_24;
6354
6355 /* set in the new led data */
6356 gpio_data |= LEDflags;
6357
6358 /* write out the new led data */
6359 WRT32_IO_REG(ha, gpiod, gpio_data);
6360
6361 } else {
6362 EL(ha, "unsupported HBA: %xh\n", ha->device_id);
6363 }
6364
6365 QL_PRINT_9(ha, "done\n");
6366 }
6367
6368 /*
6369 * ql_setup_led
6370 * Setup LED for driver control
6371 *
6372 * Input:
6373 * ha: adapter state pointer.
6374 *
6375 * Context:
6376 * Kernel/Interrupt context.
6377 */
6378 static int
6379 ql_setup_led(ql_adapter_state_t *ha)
6380 {
6381 int rval = QL_SUCCESS;
6382 ql_mbx_data_t mr;
6383
6384 QL_PRINT_9(ha, "started\n");
6385
6386 if (CFG_IST(ha, CFG_CTRL_2363 | CFG_CTRL_2425)) {
6387 /* decouple the LED control from the fw */
6388 rval = ql_get_firmware_option(ha, &mr);
6389 if (rval != QL_SUCCESS) {
6390 EL(ha, "failed, get_firmware_option=%xh\n", rval);
6391 return (rval);
6392 }
6393
6394 /* set the appropriate options */
6395 mr.mb[1] = (uint16_t)(mr.mb[1] | FO1_DISABLE_GPIO);
6396
6397 /* send it back to the firmware */
6398 rval = ql_set_firmware_option(ha, &mr);
6399 if (rval != QL_SUCCESS) {
6400 EL(ha, "failed, set_firmware_option=%xh\n", rval);
6401 return (rval);
6402 }
6403
6404 /* initally, turn the LED's off */
6405 ql_drive_led(ha, LED_ALL_OFF);
6406
6407 } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
6408 (void) ql_get_led_config(ha, &ha->ledstate.cfg);
6409 mr.mb[1] = 0x2000;
6410 mr.mb[2] = 0x2000;
6411 rval = ql_set_led_config(ha, &mr);
6412
6413 } else if (CFG_IST(ha, CFG_CTRL_80XX)) {
6414 /* Save initial value */
6415 rval = ql_get_led_config(ha, &ha->ledstate.cfg);
6416 if (rval != QL_SUCCESS) {
6417 EL(ha, "failed, get_led_config=%xh\n", rval);
6418 return (rval);
6419 }
6420 mr.mb[1] = 0x4000;
6421 mr.mb[2] = 0x4000;
6422 mr.mb[3] = 0x4000;
6423 mr.mb[4] = 0x2000;
6424 mr.mb[5] = 0;
6425 mr.mb[6] = 0x2000;
6426 rval = ql_set_led_config(ha, &mr);
6427
6428 } else if (CFG_IST(ha, CFG_CTRL_83XX)) {
6429 rval = ql_get_firmware_option(ha, &mr);
6430 if (rval != QL_SUCCESS) {
6431 EL(ha, "failed, get_firmware_option=%xh\n", rval);
6432 return (rval);
6433 }
6434
6435 mr.mb[1] = (uint16_t)(mr.mb[1] | FO1_DISABLE_LEDS);
6436
6437 rval = ql_set_firmware_option(ha, &mr);
6438 if (rval != QL_SUCCESS) {
6439 EL(ha, "failed, set_firmware_option=%xh\n", rval);
6440 return (rval);
6441 }
6442
6443 (void) ql_write_remote_reg(ha, ha->ledstate.select,
6444 0x40002000);
6445 (void) ql_write_remote_reg(ha, ha->ledstate.select + 4,
6446 0x40002000);
6447
6448 } else if (CFG_IST(ha, CFG_CTRL_27XX)) {
6449 /* take control of LED */
6450 rval = ql_get_firmware_option(ha, &mr);
6451 if (rval != QL_SUCCESS) {
6452 EL(ha, "failed, get_firmware_option=%xh\n", rval);
6453 return (rval);
6454 }
6455
6456 mr.mb[1] = (uint16_t)(mr.mb[1] | FO1_DISABLE_LEDS);
6457
6458 rval = ql_set_firmware_option(ha, &mr);
6459 if (rval != QL_SUCCESS) {
6460 EL(ha, "failed, set_firmware_option=%xh\n", rval);
6461 return (rval);
6462 }
6463
6464 mr.mb[1] = 0xf;
6465 mr.mb[2] = 0x230;
6466 mr.mb[3] = 0x230;
6467 mr.mb[4] = 0x4000;
6468 rval = ql_led_config(ha, &mr);
6469 if (rval != QL_SUCCESS) {
6470 EL(ha, "failed, led_config=%xh\n", rval);
6471 return (rval);
6472 }
6473 } else {
6474 EL(ha, "unsupported HBA: %xh\n", ha->device_id);
6475 }
6476 ha->ledstate.flags |= LED_ACTIVE;
6477
6478 QL_PRINT_9(ha, "done\n");
6479
6480 return (rval);
6481 }
6482
6483 /*
6484 * ql_wrapup_led
6485 * Return LED control to the firmware
6486 *
6487 * Input:
6488 * ha: adapter state pointer.
6489 *
6490 * Context:
6491 * Kernel/Interrupt context.
6492 */
6493 static int
6494 ql_wrapup_led(ql_adapter_state_t *ha)
6495 {
6496 int rval = QL_SUCCESS;
6497 ql_mbx_data_t mr;
6498
6499 QL_PRINT_9(ha, "started\n");
6500
6501
6502 if (CFG_IST(ha, CFG_CTRL_2363 | CFG_CTRL_2425)) {
6503 uint32_t gpio_data;
6504
6505 /* Turn all LED's off */
6506 ql_drive_led(ha, LED_ALL_OFF);
6507
6508 if (CFG_IST(ha, CFG_CTRL_2425)) {
6509 /* disable the LED update mask */
6510 gpio_data = RD32_IO_REG(ha, gpiod);
6511 gpio_data &= ~LED_MASK_UPDATE_24;
6512
6513 /* write out the data */
6514 WRT32_IO_REG(ha, gpiod, gpio_data);
6515 /* give LED control back to the f/w */
6516 }
6517 rval = ql_get_firmware_option(ha, &mr);
6518 if (rval != QL_SUCCESS) {
6519 EL(ha, "failed, get_firmware_option=%xh\n", rval);
6520 return (rval);
6521 }
6522
6523 mr.mb[1] = (uint16_t)(mr.mb[1] & ~FO1_DISABLE_GPIO);
6524
6525 rval = ql_set_firmware_option(ha, &mr);
6526 if (rval != QL_SUCCESS) {
6527 EL(ha, "failed, set_firmware_option=%xh\n", rval);
6528 return (rval);
6529 }
6530 } else if (CFG_IST(ha, CFG_CTRL_8081)) {
6531 rval = ql_set_led_config(ha, &ha->ledstate.cfg);
6532
6533 } else if (CFG_IST(ha, CFG_CTRL_2783)) {
6534 /* give LED control back to the f/w */
6535 rval = ql_get_firmware_option(ha, &mr);
6536 if (rval != QL_SUCCESS) {
6537 EL(ha, "failed, get_firmware_option=%xh\n", rval);
6538 return (rval);
6539 }
6540
6541 mr.mb[1] = (uint16_t)(mr.mb[1] & ~FO1_DISABLE_LEDS);
6542
6543 rval = ql_set_firmware_option(ha, &mr);
6544 if (rval != QL_SUCCESS) {
6545 EL(ha, "failed, set_firmware_option=%xh\n", rval);
6546 return (rval);
6547 }
6548
6549 } else {
6550 EL(ha, "unsupported HBA: %xh\n", ha->device_id);
6551 }
6552
6553 QL_PRINT_9(ha, "done\n");
6554
6555 return (rval);
6556 }
6557
6558 /*
6559 * ql_get_port_summary
6560 * Performs EXT_SC_GET_PORT_SUMMARY subcommand. of EXT_CC_GET_DATA.
6561 *
6562 * The EXT_IOCTL->RequestAdr points to a single
6563 * UINT32 which identifies the device type.
6564 *
6565 * Input:
6566 * ha: adapter state pointer.
6567 * cmd: Local EXT_IOCTL cmd struct pointer.
6568 * mode: flags.
6569 *
6570 * Returns:
6571 * None, request status indicated in cmd->Status.
6572 *
6573 * Context:
6574 * Kernel context.
6575 */
6576 static void
6577 ql_get_port_summary(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
6578 {
6579 EXT_DEVICEDATA dd = {0};
6580 EXT_DEVICEDATA *uddp;
6581 ql_link_t *link;
6582 ql_tgt_t *tq;
6583 uint32_t rlen, dev_type, index;
6584 int rval = 0;
6585 EXT_DEVICEDATAENTRY *uddep, *ddep;
6586
6587 QL_PRINT_9(ha, "started\n");
6588
6589 ddep = &dd.EntryList[0];
6590
6591 /*
6592 * Get the type of device the requestor is looking for.
6593 *
6594 * We ignore this for now.
6595 */
6596 rval = ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
6597 (void *)&dev_type, sizeof (dev_type), mode);
6598 if (rval != 0) {
6599 cmd->Status = EXT_STATUS_COPY_ERR;
6600 cmd->ResponseLen = 0;
6601 EL(ha, "failed, ddi_copyin\n");
6602 return;
6603 }
6604 /*
6605 * Count the number of entries to be returned. Count devices
6606 * that are offlline, but have been persistently bound.
6607 */
6626 } else {
6627 rlen = (uint32_t)(sizeof (EXT_DEVICEDATA) +
6628 (sizeof (EXT_DEVICEDATAENTRY) * (dd.TotalDevices - 1)));
6629 }
6630 if (rlen > cmd->ResponseLen) {
6631 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
6632 cmd->DetailStatus = rlen;
6633 EL(ha, "failed, rlen > ResponseLen, rlen=%d, Len=%d\n",
6634 rlen, cmd->ResponseLen);
6635 cmd->ResponseLen = 0;
6636 return;
6637 }
6638 cmd->ResponseLen = 0;
6639 uddp = (EXT_DEVICEDATA *)(uintptr_t)cmd->ResponseAdr;
6640 uddep = &uddp->EntryList[0];
6641 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
6642 for (link = ha->dev[index].first; link != NULL;
6643 link = link->next) {
6644 tq = link->base_address;
6645 if (tq->flags & TQF_INITIATOR_DEVICE ||
6646 !VALID_TARGET_ID(ha, tq->loop_id) ||
6647 tq->d_id.b24 == FS_MANAGEMENT_SERVER) {
6648 continue; /* Skip this one */
6649 }
6650
6651 bzero((void *)ddep, sizeof (EXT_DEVICEDATAENTRY));
6652
6653 bcopy(tq->node_name, ddep->NodeWWN, 8);
6654 bcopy(tq->port_name, ddep->PortWWN, 8);
6655
6656 ddep->PortID[0] = tq->d_id.b.domain;
6657 ddep->PortID[1] = tq->d_id.b.area;
6658 ddep->PortID[2] = tq->d_id.b.al_pa;
6659
6660 bcopy(tq->port_name,
6661 (caddr_t)&ddep->TargetAddress.Target, 8);
6662
6663 ddep->DeviceFlags = tq->flags;
6664 ddep->LoopID = tq->loop_id;
6665 QL_PRINT_9(ha, "Tgt=%lld, loop=%xh, "
6666 "wwnn=%02x%02x%02x%02x%02x%02x%02x%02x, "
6667 "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
6668 ha->instance, ddep->TargetAddress.Target,
6669 ddep->LoopID, ddep->NodeWWN[0], ddep->NodeWWN[1],
6670 ddep->NodeWWN[2], ddep->NodeWWN[3],
6671 ddep->NodeWWN[4], ddep->NodeWWN[5],
6672 ddep->NodeWWN[6], ddep->NodeWWN[7],
6673 ddep->PortWWN[0], ddep->PortWWN[1],
6674 ddep->PortWWN[2], ddep->PortWWN[3],
6675 ddep->PortWWN[4], ddep->PortWWN[5],
6676 ddep->PortWWN[6], ddep->PortWWN[7]);
6677 rval = ddi_copyout((void *)ddep, (void *)uddep,
6678 sizeof (EXT_DEVICEDATAENTRY), mode);
6679
6680 if (rval != 0) {
6681 cmd->Status = EXT_STATUS_COPY_ERR;
6682 cmd->ResponseLen = 0;
6683 EL(ha, "failed, ddi_copyout\n");
6684 break;
6685 }
6686 dd.ReturnListEntryCount++;
6687 uddep++;
6688 cmd->ResponseLen += (uint32_t)
6689 sizeof (EXT_DEVICEDATAENTRY);
6690 }
6691 }
6692 rval = ddi_copyout((void *)&dd, (void *)uddp,
6693 sizeof (EXT_DEVICEDATA) - sizeof (EXT_DEVICEDATAENTRY), mode);
6694
6695 if (rval != 0) {
6696 cmd->Status = EXT_STATUS_COPY_ERR;
6697 cmd->ResponseLen = 0;
6698 EL(ha, "failed, ddi_copyout-2\n");
6699 } else {
6700 cmd->ResponseLen += (uint32_t)sizeof (EXT_DEVICEDATAENTRY);
6701 QL_PRINT_9(ha, "done\n");
6702 }
6703 }
6704
6705 /*
6706 * ql_get_target_id
6707 * Performs EXT_SC_GET_TARGET_ID subcommand. of EXT_CC_GET_DATA.
6708 *
6709 * Input:
6710 * ha: adapter state pointer.
6711 * cmd: Local EXT_IOCTL cmd struct pointer.
6712 * mode: flags.
6713 *
6714 * Returns:
6715 * None, request status indicated in cmd->Status.
6716 *
6717 * Context:
6718 * Kernel context.
6719 */
6720 static void
6721 ql_get_target_id(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
6722 {
6723 uint32_t rval;
6724 uint16_t qlnt;
6725 EXT_DEST_ADDR extdestaddr = {0};
6726 uint8_t *name;
6727 uint8_t wwpn[EXT_DEF_WWN_NAME_SIZE];
6728 ql_tgt_t *tq;
6729
6730 QL_PRINT_9(ha, "started\n");
6731
6732 if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
6733 (void*)wwpn, sizeof (EXT_DEST_ADDR), mode) != 0) {
6734 EL(ha, "failed, ddi_copyin\n");
6735 cmd->Status = EXT_STATUS_COPY_ERR;
6736 cmd->ResponseLen = 0;
6737 return;
6738 }
6739
6740 qlnt = QLNT_PORT;
6741 name = wwpn;
6742 QL_PRINT_9(ha, "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
6743 ha->instance, name[0], name[1], name[2], name[3], name[4],
6744 name[5], name[6], name[7]);
6745
6746 tq = ql_find_port(ha, name, qlnt);
6747 if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id)) {
6748 EL(ha, "failed, fc_port not found\n");
6749 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
6750 cmd->ResponseLen = 0;
6751 return;
6752 }
6753
6754 bcopy(tq->port_name, (caddr_t)&extdestaddr.DestAddr.ScsiAddr.Target, 8);
6755
6756 rval = ddi_copyout((void *)&extdestaddr,
6757 (void *)(uintptr_t)cmd->ResponseAdr, sizeof (EXT_DEST_ADDR), mode);
6758 if (rval != 0) {
6759 EL(ha, "failed, ddi_copyout\n");
6760 cmd->Status = EXT_STATUS_COPY_ERR;
6761 cmd->ResponseLen = 0;
6762 }
6763
6764 QL_PRINT_9(ha, "done\n");
6765 }
6766
6767 /*
6768 * ql_setup_fcache
6769 * Populates selected flash sections into the cache
6770 *
6771 * Input:
6772 * ha = adapter state pointer.
6773 *
6774 * Returns:
6775 * ql local function return status code.
6776 *
6777 * Context:
6778 * Kernel context.
6779 *
6780 * Note:
6781 * Driver must be in stalled state prior to entering or
6782 * add code to this function prior to calling ql_setup_flash()
6783 */
6784 int
6785 ql_setup_fcache(ql_adapter_state_t *ha)
6786 {
6787 int rval;
6788 uint32_t freadpos = 0;
6789 uint32_t fw_done = 0;
6790 ql_fcache_t *head = NULL;
6791 ql_fcache_t *tail = NULL;
6792 ql_fcache_t *ftmp;
6793
6794 QL_PRINT_10(ha, "started cfg=0x%llx\n", ha->cfg_flags);
6795
6796 /* If we already have populated it, rtn */
6797 if (ha->fcache != NULL) {
6798 EL(ha, "buffer already populated\n");
6799 return (QL_SUCCESS);
6800 }
6801
6802 ql_flash_nvram_defaults(ha);
6803
6804 if ((rval = ql_setup_flash(ha)) != QL_SUCCESS) {
6805 EL(ha, "unable to setup flash; rval=%xh\n", rval);
6806 return (rval);
6807 }
6808
6809 while (freadpos != 0xffffffff) {
6810 /* Allocate & populate this node */
6811 if ((ftmp = ql_setup_fnode(ha)) == NULL) {
6812 EL(ha, "node alloc failed\n");
6813 rval = QL_FUNCTION_FAILED;
6814 break;
6815 }
6816
6817 /* link in the new node */
6818 if (head == NULL) {
6819 head = tail = ftmp;
6820 } else {
6821 tail->next = ftmp;
6822 tail = ftmp;
6823 }
6824
6825 /* Do the firmware node first for 24xx/25xx's */
6826 if (fw_done == 0) {
6827 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
6828 freadpos = ha->flash_fw_addr << 2;
6829 }
6830 fw_done = 1;
6831 }
6832
6833 if ((rval = ql_dump_fcode(ha, ftmp->buf, FBUFSIZE,
6834 freadpos)) != QL_SUCCESS) {
6835 EL(ha, "failed, 24xx dump_fcode"
6836 " pos=%xh rval=%xh\n", freadpos, rval);
6837 rval = QL_FUNCTION_FAILED;
6838 break;
6839 }
6840
6841 /* checkout the pci data / format */
6842 if (ql_check_pci(ha, ftmp, &freadpos)) {
6843 EL(ha, "flash header incorrect\n");
6844 rval = QL_FUNCTION_FAILED;
6845 break;
6846 }
6847 }
6848
6849 if (rval != QL_SUCCESS) {
6850 /* release all resources we have */
6851 ftmp = head;
6852 while (ftmp != NULL) {
6853 tail = ftmp->next;
6854 kmem_free(ftmp->buf, FBUFSIZE);
6855 kmem_free(ftmp, sizeof (ql_fcache_t));
6856 ftmp = tail;
6857 }
6858
6859 EL(ha, "failed, done\n");
6860 } else {
6861 ha->fcache = head;
6862 QL_PRINT_10(ha, "done\n");
6863 }
6864
6865 return (rval);
6866 }
6867
6868 /*
6869 * ql_update_fcache
6870 * re-populates updated flash into the fcache. If
6871 * fcache does not exist (e.g., flash was empty/invalid on
6872 * boot), this routine will create and the populate it.
6873 *
6874 * Input:
6875 * ha = adapter state pointer.
6876 * *bpf = Pointer to flash buffer.
6877 * bsize = Size of flash buffer.
6878 *
6879 * Returns:
6880 *
6881 * Context:
6882 * Kernel context.
6883 */
6884 void
6885 ql_update_fcache(ql_adapter_state_t *ha, uint8_t *bfp, uint32_t bsize)
6886 {
6887 int rval = QL_SUCCESS;
6888 uint32_t freadpos = 0;
6889 uint32_t fw_done = 0;
6890 ql_fcache_t *head = NULL;
6891 ql_fcache_t *tail = NULL;
6892 ql_fcache_t *ftmp;
6893
6894 QL_PRINT_3(ha, "started\n");
6895
6896 while (freadpos != 0xffffffff) {
6897
6898 /* Allocate & populate this node */
6899
6900 if ((ftmp = ql_setup_fnode(ha)) == NULL) {
6901 EL(ha, "node alloc failed\n");
6902 rval = QL_FUNCTION_FAILED;
6903 break;
6904 }
6905
6906 /* link in the new node */
6907 if (head == NULL) {
6908 head = tail = ftmp;
6909 } else {
6910 tail->next = ftmp;
6911 tail = ftmp;
6912 }
6913
6914 /* Do the firmware node first for 24xx's */
6915 if (fw_done == 0) {
6916 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
6917 freadpos = ha->flash_fw_addr << 2;
6918 }
6919 fw_done = 1;
6920 }
6921
6922 /* read in first FBUFSIZE bytes of this flash section */
6923 if (freadpos + FBUFSIZE > bsize) {
6924 EL(ha, "passed buffer too small; fr=%xh, bsize=%xh\n",
6925 freadpos, bsize);
6926 rval = QL_FUNCTION_FAILED;
6927 break;
6928 }
6929 bcopy(bfp + freadpos, ftmp->buf, FBUFSIZE);
6930
6931 /* checkout the pci data / format */
6932 if (ql_check_pci(ha, ftmp, &freadpos)) {
6933 EL(ha, "flash header incorrect\n");
6934 rval = QL_FUNCTION_FAILED;
6935 break;
6936 }
6937 }
6938
6939 if (rval != QL_SUCCESS) {
6940 /*
6941 * release all resources we have
6942 */
6943 ql_fcache_rel(head);
6944 EL(ha, "failed, done\n");
6945 } else {
6946 /*
6947 * Release previous fcache resources and update with new
6948 */
6949 ql_fcache_rel(ha->fcache);
6950 ha->fcache = head;
6951
6952 QL_PRINT_3(ha, "done\n");
6953 }
6954 }
6955
6956 /*
6957 * ql_setup_fnode
6958 * Allocates fcache node
6959 *
6960 * Input:
6961 * ha = adapter state pointer.
6962 * node = point to allocated fcache node (NULL = failed)
6963 *
6964 * Returns:
6965 *
6966 * Context:
6967 * Kernel context.
6968 *
6969 * Note:
6970 * Driver must be in stalled state prior to entering or
6971 * add code to this function prior to calling ql_setup_flash()
6972 */
7020 }
7021 }
7022
7023 /*
7024 * ql_update_flash_caches
7025 * Updates driver flash caches
7026 *
7027 * Input:
7028 * ha: adapter state pointer.
7029 *
7030 * Context:
7031 * Kernel context.
7032 */
7033 static void
7034 ql_update_flash_caches(ql_adapter_state_t *ha)
7035 {
7036 uint32_t len;
7037 ql_link_t *link;
7038 ql_adapter_state_t *ha2;
7039
7040 QL_PRINT_3(ha, "started\n");
7041
7042 /* Get base path length. */
7043 for (len = (uint32_t)strlen(ha->devpath); len; len--) {
7044 if (ha->devpath[len] == ',' ||
7045 ha->devpath[len] == '@') {
7046 break;
7047 }
7048 }
7049
7050 /* Reset fcache on all adapter instances. */
7051 for (link = ql_hba.first; link != NULL; link = link->next) {
7052 ha2 = link->base_address;
7053
7054 if (strncmp(ha->devpath, ha2->devpath, len) != 0) {
7055 continue;
7056 }
7057
7058 ql_fcache_rel(ha2->fcache);
7059 ha2->fcache = NULL;
7060
7061 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
7062 if (ha2->vcache != NULL) {
7063 kmem_free(ha2->vcache, QL_24XX_VPD_SIZE);
7064 ha2->vcache = NULL;
7065 }
7066 }
7067
7068 (void) ql_setup_fcache(ha2);
7069 }
7070
7071 QL_PRINT_3(ha, "done\n");
7072 }
7073
7074 /*
7075 * ql_get_fbuf
7076 * Search the fcache list for the type specified
7077 *
7078 * Input:
7079 * fptr = Pointer to fcache linked list
7080 * ftype = Type of image to be returned.
7081 *
7082 * Returns:
7083 * Pointer to ql_fcache_t.
7084 * NULL means not found.
7085 *
7086 * Context:
7087 * Kernel context.
7088 *
7089 *
7090 */
7091 ql_fcache_t *
7111 * the flash does not have one (!!!).
7112 *
7113 * On successful pci check, nextpos adjusted to next pci header.
7114 *
7115 * Returns:
7116 * -1 --> last pci image
7117 * 0 --> pci header valid
7118 * 1 --> pci header invalid.
7119 *
7120 * Context:
7121 * Kernel context.
7122 */
7123 static int
7124 ql_check_pci(ql_adapter_state_t *ha, ql_fcache_t *fcache, uint32_t *nextpos)
7125 {
7126 pci_header_t *pcih;
7127 pci_data_t *pcid;
7128 uint32_t doff;
7129 uint8_t *pciinfo;
7130
7131 QL_PRINT_3(ha, "started\n");
7132
7133 if (fcache != NULL) {
7134 pciinfo = fcache->buf;
7135 } else {
7136 EL(ha, "failed, null fcache ptr passed\n");
7137 return (1);
7138 }
7139
7140 if (pciinfo == NULL) {
7141 EL(ha, "failed, null pciinfo ptr passed\n");
7142 return (1);
7143 }
7144
7145 if (CFG_IST(ha, CFG_SBUS_CARD)) {
7146 caddr_t bufp;
7147 uint_t len;
7148
7149 if (pciinfo[0] != SBUS_CODE_FCODE) {
7150 EL(ha, "failed, unable to detect sbus fcode\n");
7151 return (1);
7152 }
7153 fcache->type = FTYPE_FCODE;
7154
7155 /*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
7156 if (ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
7157 PROP_LEN_AND_VAL_ALLOC | DDI_PROP_DONTPASS |
7158 DDI_PROP_CANSLEEP, "version", (caddr_t)&bufp,
7159 (int *)&len) == DDI_PROP_SUCCESS) {
7160
7161 (void) snprintf(fcache->verstr,
7162 FCHBA_OPTION_ROM_VERSION_LEN, "%s", bufp);
7163 kmem_free(bufp, len);
7164 }
7165
7166 *nextpos = 0xffffffff;
7167
7168 QL_PRINT_3(ha, "CFG_SBUS_CARD, done\n");
7169
7170 return (0);
7171 }
7172
7173 if (*nextpos == ha->flash_fw_addr << 2) {
7174
7175 pci_header_t fwh = {0};
7176 pci_data_t fwd = {0};
7177 uint8_t *buf, *bufp;
7178
7179 /*
7180 * Build a pci header for the firmware module
7181 */
7182 if ((buf = (uint8_t *)(kmem_zalloc(FBUFSIZE, KM_SLEEP))) ==
7183 NULL) {
7184 EL(ha, "failed, unable to allocate buffer\n");
7185 return (1);
7186 }
7187
7188 fwh.signature[0] = PCI_HEADER0;
7197 fwd.codetype = PCI_CODE_FW;
7198 fwd.pcidatalen[0] = LSB(sizeof (pci_data_t));
7199 fwd.pcidatalen[1] = MSB(sizeof (pci_data_t));
7200
7201 bufp = buf;
7202 bcopy(&fwh, bufp, sizeof (pci_header_t));
7203 bufp += sizeof (pci_header_t);
7204 bcopy(&fwd, bufp, sizeof (pci_data_t));
7205 bufp += sizeof (pci_data_t);
7206
7207 bcopy(fcache->buf, bufp, (FBUFSIZE - sizeof (pci_header_t) -
7208 sizeof (pci_data_t)));
7209 bcopy(buf, fcache->buf, FBUFSIZE);
7210
7211 fcache->type = FTYPE_FW;
7212
7213 (void) snprintf(fcache->verstr, FCHBA_OPTION_ROM_VERSION_LEN,
7214 "%d.%02d.%02d", fcache->buf[19], fcache->buf[23],
7215 fcache->buf[27]);
7216
7217 *nextpos = ha->boot_code_addr << 2;
7218 kmem_free(buf, FBUFSIZE);
7219
7220 QL_PRINT_3(ha, "FTYPE_FW, done\n");
7221
7222 return (0);
7223 }
7224
7225 /* get to the pci header image length */
7226 pcih = (pci_header_t *)pciinfo;
7227
7228 doff = pcih->dataoffset[0] | (pcih->dataoffset[1] << 8);
7229
7230 /* some header section sanity check */
7231 if (pcih->signature[0] != PCI_HEADER0 ||
7232 pcih->signature[1] != PCI_HEADER1 || doff > 50) {
7233 EL(ha, "buffer format error: s0=%xh, s1=%xh, off=%xh\n",
7234 pcih->signature[0], pcih->signature[1], doff);
7235 return (1);
7236 }
7237
7238 pcid = (pci_data_t *)(pciinfo + doff);
7239
7240 /* a slight sanity data section check */
7241 if (pcid->signature[0] != 'P' || pcid->signature[1] != 'C' ||
7242 pcid->signature[2] != 'I' || pcid->signature[3] != 'R') {
7243 EL(ha, "failed, data sig mismatch!\n");
7244 return (1);
7245 }
7246
7247 if (pcid->indicator == PCI_IND_LAST_IMAGE) {
7248 QL_PRINT_3(ha, "last image\n");
7249 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
7250 ql_flash_layout_table(ha, *nextpos +
7251 (pcid->imagelength[0] | (pcid->imagelength[1] <<
7252 8)) * PCI_SECTOR_SIZE);
7253 (void) ql_24xx_flash_desc(ha);
7254 }
7255 *nextpos = 0xffffffff;
7256 } else {
7257 /* adjust the next flash read start position */
7258 *nextpos += (pcid->imagelength[0] |
7259 (pcid->imagelength[1] << 8)) * PCI_SECTOR_SIZE;
7260 }
7261
7262 switch (pcid->codetype) {
7263 case PCI_CODE_X86PC:
7264 fcache->type = FTYPE_BIOS;
7265 break;
7266 case PCI_CODE_FCODE:
7267 fcache->type = FTYPE_FCODE;
7268 break;
7269 case PCI_CODE_EFI:
7270 fcache->type = FTYPE_EFI;
7271 break;
7272 case PCI_CODE_HPPA:
7273 fcache->type = FTYPE_HPPA;
7274 break;
7275 default:
7276 fcache->type = FTYPE_UNKNOWN;
7277 break;
7278 }
7279
7280 (void) snprintf(fcache->verstr, FCHBA_OPTION_ROM_VERSION_LEN,
7281 "%d.%02d", pcid->revisionlevel[1], pcid->revisionlevel[0]);
7282
7283 QL_PRINT_3(ha, "done\n");
7284
7285 return (0);
7286 }
7287
7288 /*
7289 * ql_flash_layout_table
7290 * Obtains flash addresses from table
7291 *
7292 * Input:
7293 * ha: adapter state pointer.
7294 * flt_paddr: flash layout pointer address.
7295 *
7296 * Context:
7297 * Kernel context.
7298 */
7299 static void
7300 ql_flash_layout_table(ql_adapter_state_t *ha, uint32_t flt_paddr)
7301 {
7302 ql_flt_ptr_t *fptr;
7303 uint8_t *bp;
7304 int rval;
7305 uint32_t len, faddr, cnt;
7306 uint16_t chksum, w16;
7307
7308 QL_PRINT_9(ha, "started\n");
7309
7310 /* Process flash layout table header */
7311 len = sizeof (ql_flt_ptr_t);
7312 if ((bp = kmem_zalloc(len, KM_SLEEP)) == NULL) {
7313 EL(ha, "kmem_zalloc=null\n");
7314 return;
7315 }
7316
7317 /* Process pointer to flash layout table */
7318 if ((rval = ql_dump_fcode(ha, bp, len, flt_paddr)) != QL_SUCCESS) {
7319 EL(ha, "fptr dump_flash pos=%xh, status=%xh\n", flt_paddr,
7320 rval);
7321 kmem_free(bp, len);
7322 return;
7323 }
7324 fptr = (ql_flt_ptr_t *)bp;
7325
7326 /* Verify pointer to flash layout table. */
7327 for (chksum = 0, cnt = 0; cnt < len; cnt += 2) {
7328 w16 = (uint16_t)CHAR_TO_SHORT(bp[cnt], bp[cnt + 1]);
7329 chksum += w16;
7330 }
7331 if (chksum != 0 || fptr->sig[0] != 'Q' || fptr->sig[1] != 'F' ||
7332 fptr->sig[2] != 'L' || fptr->sig[3] != 'T') {
7333 EL(ha, "ptr chksum=%xh, sig=%c%c%c%c \n",
7334 chksum, fptr->sig[0],
7335 fptr->sig[1], fptr->sig[2], fptr->sig[3]);
7336 kmem_free(bp, len);
7337 return;
7338 }
7339 faddr = CHAR_TO_LONG(fptr->addr[0], fptr->addr[1], fptr->addr[2],
7340 fptr->addr[3]);
7341
7342 kmem_free(bp, len);
7343
7344 ql_process_flt(ha, faddr);
7345
7346 QL_PRINT_9(ha, "done\n");
7347 }
7348
7349 /*
7350 * ql_process_flt
7351 * Obtains flash addresses from flash layout table
7352 *
7353 * Input:
7354 * ha: adapter state pointer.
7355 * faddr: flash layout table byte address.
7356 *
7357 * Context:
7358 * Kernel context.
7359 */
7360 static void
7361 ql_process_flt(ql_adapter_state_t *ha, uint32_t faddr)
7362 {
7363 ql_flt_hdr_t *fhdr;
7364 ql_flt_region_t *frgn;
7365 uint8_t *bp, *eaddr, nv_rg, vpd_rg;
7366 int rval;
7367 uint32_t len, cnt, fe_addr;
7368 uint16_t chksum, w16;
7369
7370 QL_PRINT_9(ha, "started faddr=%xh\n", faddr);
7371
7372 /* Process flash layout table header */
7373 if ((bp = kmem_zalloc(FLASH_LAYOUT_TABLE_SIZE, KM_SLEEP)) == NULL) {
7374 EL(ha, "kmem_zalloc=null\n");
7375 return;
7376 }
7377 fhdr = (ql_flt_hdr_t *)bp;
7378
7379 /* Process flash layout table. */
7380 if ((rval = ql_dump_fcode(ha, bp, FLASH_LAYOUT_TABLE_SIZE, faddr)) !=
7381 QL_SUCCESS) {
7382 EL(ha, "fhdr dump_flash pos=%xh, status=%xh\n", faddr, rval);
7383 kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
7384 return;
7385 }
7386
7387 /* Verify flash layout table. */
7388 len = (uint32_t)(CHAR_TO_SHORT(fhdr->len[0], fhdr->len[1]) +
7389 sizeof (ql_flt_hdr_t) + sizeof (ql_flt_region_t));
7390 if (len > FLASH_LAYOUT_TABLE_SIZE) {
7391 chksum = 0xffff;
7392 } else {
7393 for (chksum = 0, cnt = 0; cnt < len; cnt += 2) {
7394 w16 = (uint16_t)CHAR_TO_SHORT(bp[cnt], bp[cnt + 1]);
7395 chksum += w16;
7396 }
7397 }
7398 w16 = CHAR_TO_SHORT(fhdr->version[0], fhdr->version[1]);
7399 if (chksum != 0 || w16 != 1) {
7400 EL(ha, "table chksum=%xh, version=%d\n", chksum, w16);
7401 kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
7402 return;
7403 }
7404 eaddr = bp + len;
7405
7406 /* Process Function/Port Configuration Map. */
7407 nv_rg = vpd_rg = 0;
7408 if (CFG_IST(ha, CFG_CTRL_82XX)) {
7409 uint16_t i;
7410 uint8_t *mbp = eaddr;
7411 ql_fp_cfg_map_t *cmp = (ql_fp_cfg_map_t *)mbp;
7412
7413 len = (uint32_t)(CHAR_TO_SHORT(cmp->hdr.len[0],
7414 cmp->hdr.len[1]));
7415 if (len > FLASH_LAYOUT_TABLE_SIZE) {
7416 chksum = 0xffff;
7417 } else {
7418 for (chksum = 0, cnt = 0; cnt < len; cnt += 2) {
7419 w16 = (uint16_t)CHAR_TO_SHORT(mbp[cnt],
7420 mbp[cnt + 1]);
7421 chksum += w16;
7422 }
7423 }
7424 w16 = CHAR_TO_SHORT(cmp->hdr.version[0], cmp->hdr.version[1]);
7425 if (chksum != 0 || w16 != 1 ||
7426 cmp->hdr.Signature[0] != 'F' ||
7427 cmp->hdr.Signature[1] != 'P' ||
7428 cmp->hdr.Signature[2] != 'C' ||
7429 cmp->hdr.Signature[3] != 'M') {
7430 EL(ha, "cfg_map chksum=%xh, version=%d, "
7431 "sig=%c%c%c%c \n", chksum, w16,
7432 cmp->hdr.Signature[0], cmp->hdr.Signature[1],
7433 cmp->hdr.Signature[2], cmp->hdr.Signature[3]);
7434 } else {
7435 cnt = (uint16_t)
7436 (CHAR_TO_SHORT(cmp->hdr.NumberEntries[0],
7437 cmp->hdr.NumberEntries[1]));
7438 /* Locate entry for function. */
7439 for (i = 0; i < cnt; i++) {
7440 if (cmp->cfg[i].FunctionType == FT_FC &&
7441 cmp->cfg[i].FunctionNumber[0] ==
7442 ha->pci_function_number &&
7443 cmp->cfg[i].FunctionNumber[1] == 0) {
7444 nv_rg = cmp->cfg[i].ConfigRegion;
7445 vpd_rg = cmp->cfg[i].VpdRegion;
7446 break;
7447 }
7448 }
7449
7450 if (nv_rg == 0 || vpd_rg == 0) {
7451 EL(ha, "cfg_map nv_rg=%d, vpd_rg=%d\n", nv_rg,
7452 vpd_rg);
7453 nv_rg = vpd_rg = 0;
7454 }
7455 }
7456 }
7457
7458 /* Process flash layout table regions */
7459 for (frgn = (ql_flt_region_t *)(bp + sizeof (ql_flt_hdr_t));
7460 (uint8_t *)frgn < eaddr; frgn++) {
7461 faddr = CHAR_TO_LONG(frgn->beg_addr[0], frgn->beg_addr[1],
7462 frgn->beg_addr[2], frgn->beg_addr[3]);
7463 faddr >>= 2;
7464 fe_addr = CHAR_TO_LONG(frgn->end_addr[0], frgn->end_addr[1],
7465 frgn->end_addr[2], frgn->end_addr[3]);
7466 fe_addr >>= 2;
7467
7468 switch (frgn->region) {
7469 case FLASH_8021_BOOTLOADER_REGION:
7470 ha->bootloader_addr = faddr;
7471 ha->bootloader_size = (fe_addr - faddr) + 1;
7472 QL_PRINT_9(ha, "bootloader_addr=%xh, "
7473 "size=%xh\n", faddr,
7474 ha->bootloader_size);
7475 break;
7476 case FLASH_FW_REGION:
7477 case FLASH_8021_FW_REGION:
7478 ha->flash_fw_addr = faddr;
7479 ha->flash_fw_size = (fe_addr - faddr) + 1;
7480 QL_PRINT_9(ha, "flash_fw_addr=%xh, "
7481 "size=%xh\n", faddr,
7482 ha->flash_fw_size);
7483 break;
7484 case FLASH_GOLDEN_FW_REGION:
7485 case FLASH_8021_GOLDEN_FW_REGION:
7486 ha->flash_golden_fw_addr = faddr;
7487 QL_PRINT_9(ha, "flash_golden_fw_addr=%xh\n",
7488 ha->instance, faddr);
7489 break;
7490 case FLASH_8021_VPD_REGION:
7491 if (!vpd_rg || vpd_rg == FLASH_8021_VPD_REGION) {
7492 ha->flash_vpd_addr = faddr;
7493 QL_PRINT_9(ha, "8021_flash_vpd_"
7494 "addr=%xh\n", faddr);
7495 }
7496 break;
7497 case FLASH_VPD_0_REGION:
7498 if (vpd_rg) {
7499 if (vpd_rg == FLASH_VPD_0_REGION) {
7500 ha->flash_vpd_addr = faddr;
7501 QL_PRINT_9(ha, "vpd_rg "
7502 "flash_vpd_addr=%xh\n",
7503 ha->instance, faddr);
7504 }
7505 } else if (ha->function_number == 0 &&
7506 !(CFG_IST(ha, CFG_CTRL_82XX))) {
7507 ha->flash_vpd_addr = faddr;
7508 QL_PRINT_9(ha, "flash_vpd_addr=%xh"
7509 "\n", faddr);
7510 }
7511 break;
7512 case FLASH_NVRAM_0_REGION:
7513 if (nv_rg) {
7514 if (nv_rg == FLASH_NVRAM_0_REGION) {
7515 ADAPTER_STATE_LOCK(ha);
7516 ha->function_number = 0;
7517 ADAPTER_STATE_UNLOCK(ha);
7518 ha->flash_nvram_addr = faddr;
7519 QL_PRINT_9(ha, "nv_rg "
7520 "flash_nvram_addr=%xh\n",
7521 ha->instance, faddr);
7522 }
7523 } else if (ha->function_number == 0) {
7524 ha->flash_nvram_addr = faddr;
7525 QL_PRINT_9(ha, "flash_nvram_addr="
7526 "%xh\n", faddr);
7527 }
7528 break;
7529 case FLASH_VPD_1_REGION:
7530 if (vpd_rg) {
7531 if (vpd_rg == FLASH_VPD_1_REGION) {
7532 ha->flash_vpd_addr = faddr;
7533 QL_PRINT_9(ha, "vpd_rg "
7534 "flash_vpd_addr=%xh\n",
7535 ha->instance, faddr);
7536 }
7537 } else if (ha->function_number &&
7538 !(CFG_IST(ha, CFG_CTRL_82XX))) {
7539 ha->flash_vpd_addr = faddr;
7540 QL_PRINT_9(ha, "flash_vpd_addr=%xh"
7541 "\n", faddr);
7542 }
7543 break;
7544 case FLASH_NVRAM_1_REGION:
7545 if (nv_rg) {
7546 if (nv_rg == FLASH_NVRAM_1_REGION) {
7547 ADAPTER_STATE_LOCK(ha);
7548 ha->function_number = 1;
7549 ADAPTER_STATE_UNLOCK(ha);
7550 ha->flash_nvram_addr = faddr;
7551 QL_PRINT_9(ha, "nv_rg "
7552 "flash_nvram_addr=%xh\n",
7553 ha->instance, faddr);
7554 }
7555 } else if (ha->function_number) {
7556 ha->flash_nvram_addr = faddr;
7557 QL_PRINT_9(ha, "flash_nvram_addr="
7558 "%xh\n", faddr);
7559 }
7560 break;
7561 case FLASH_DESC_TABLE_REGION:
7562 if (!(CFG_IST(ha, CFG_CTRL_82XX))) {
7563 ha->flash_desc_addr = faddr;
7564 QL_PRINT_9(ha, "flash_desc_addr="
7565 "%xh\n", faddr);
7566 }
7567 break;
7568 case FLASH_ERROR_LOG_0_REGION:
7569 if (ha->function_number == 0) {
7570 ha->flash_errlog_start = faddr;
7571 QL_PRINT_9(ha, "flash_errlog_addr="
7572 "%xh\n", faddr);
7573 }
7574 break;
7575 case FLASH_ERROR_LOG_1_REGION:
7576 if (ha->function_number) {
7577 ha->flash_errlog_start = faddr;
7578 QL_PRINT_9(ha, "flash_errlog_addr="
7579 "%xh\n", faddr);
7580 }
7581 break;
7582 default:
7583 break;
7584 }
7585 }
7586 kmem_free(bp, FLASH_LAYOUT_TABLE_SIZE);
7587
7588 QL_PRINT_9(ha, "done\n");
7589 }
7590
7591 /*
7592 * ql_flash_nvram_defaults
7593 * Flash default addresses.
7594 *
7595 * Input:
7596 * ha: adapter state pointer.
7597 *
7598 * Returns:
7599 * ql local function return status code.
7600 *
7601 * Context:
7602 * Kernel context.
7603 */
7604 static void
7605 ql_flash_nvram_defaults(ql_adapter_state_t *ha)
7606 {
7607 QL_PRINT_10(ha, "started\n");
7608
7609 if (ha->function_number == 3) {
7610 if (CFG_IST(ha, CFG_CTRL_27XX)) {
7611 ha->flash_nvram_addr = NVRAM_2700_FUNC3_ADDR;
7612 ha->flash_vpd_addr = VPD_2700_FUNC3_ADDR;
7613 ha->ledstate.select = BEACON_2700_FUNC3_ADDR;
7614 ha->flash_data_addr = FLASH_2700_DATA_ADDR;
7615 ha->flash_desc_addr = FLASH_2700_DESCRIPTOR_TABLE;
7616 ha->flash_fw_addr = FLASH_2700_FIRMWARE_ADDR;
7617 ha->flash_fw_size = FLASH_2700_FIRMWARE_SIZE;
7618 ha->boot_code_addr = FLASH_2700_BOOT_CODE_ADDR;
7619 } else {
7620 EL(ha, "unassigned flash fn%d addr: %x\n",
7621 ha->function_number, ha->device_id);
7622 }
7623 } else if (ha->function_number == 2) {
7624 if (CFG_IST(ha, CFG_CTRL_27XX)) {
7625 ha->flash_nvram_addr = NVRAM_2700_FUNC2_ADDR;
7626 ha->flash_vpd_addr = VPD_2700_FUNC2_ADDR;
7627 ha->ledstate.select = BEACON_2700_FUNC2_ADDR;
7628 ha->flash_data_addr = FLASH_2700_DATA_ADDR;
7629 ha->flash_desc_addr = FLASH_2700_DESCRIPTOR_TABLE;
7630 ha->flash_fw_addr = FLASH_2700_FIRMWARE_ADDR;
7631 ha->flash_fw_size = FLASH_2700_FIRMWARE_SIZE;
7632 ha->boot_code_addr = FLASH_2700_BOOT_CODE_ADDR;
7633 } else {
7634 EL(ha, "unassigned flash fn%d addr: %x\n",
7635 ha->function_number, ha->device_id);
7636 }
7637 } else if (ha->function_number == 1) {
7638 if (CFG_IST(ha, CFG_CTRL_23XX) ||
7639 (CFG_IST(ha, CFG_CTRL_63XX))) {
7640 ha->flash_nvram_addr = NVRAM_2300_FUNC1_ADDR;
7641 ha->flash_fw_addr = FLASH_2300_FIRMWARE_ADDR;
7642 ha->boot_code_addr = FLASH_2300_BOOT_CODE_ADDR;
7643 } else if (CFG_IST(ha, CFG_CTRL_24XX)) {
7644 ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
7645 ha->flash_nvram_addr = NVRAM_2400_FUNC1_ADDR;
7646 ha->flash_vpd_addr = VPD_2400_FUNC1_ADDR;
7647 ha->flash_errlog_start = FLASH_2400_ERRLOG_START_ADDR_1;
7648 ha->flash_desc_addr = FLASH_2400_DESCRIPTOR_TABLE;
7649 ha->flash_fw_addr = FLASH_2400_FIRMWARE_ADDR;
7650 ha->boot_code_addr = FLASH_2400_BOOT_CODE_ADDR;
7651 } else if (CFG_IST(ha, CFG_CTRL_25XX)) {
7652 ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
7653 ha->flash_nvram_addr = NVRAM_2500_FUNC1_ADDR;
7654 ha->flash_vpd_addr = VPD_2500_FUNC1_ADDR;
7655 ha->flash_errlog_start = FLASH_2500_ERRLOG_START_ADDR_1;
7656 ha->flash_desc_addr = FLASH_2500_DESCRIPTOR_TABLE;
7657 ha->flash_fw_addr = FLASH_2500_FIRMWARE_ADDR;
7658 ha->boot_code_addr = FLASH_2500_BOOT_CODE_ADDR;
7659 } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
7660 ha->flash_data_addr = FLASH_8100_DATA_ADDR;
7661 ha->flash_nvram_addr = NVRAM_8100_FUNC1_ADDR;
7662 ha->flash_vpd_addr = VPD_8100_FUNC1_ADDR;
7663 ha->flash_errlog_start = FLASH_8100_ERRLOG_START_ADDR_1;
7664 ha->flash_desc_addr = FLASH_8100_DESCRIPTOR_TABLE;
7665 ha->flash_fw_addr = FLASH_8100_FIRMWARE_ADDR;
7666 ha->boot_code_addr = FLASH_8100_BOOT_CODE_ADDR;
7667 } else if (CFG_IST(ha, CFG_CTRL_82XX)) {
7668 ha->flash_data_addr = 0;
7669 ha->flash_nvram_addr = NVRAM_8021_FUNC1_ADDR;
7670 ha->flash_vpd_addr = VPD_8021_FUNC1_ADDR;
7671 ha->flash_errlog_start = 0;
7672 ha->flash_desc_addr = FLASH_8021_DESCRIPTOR_TABLE;
7673 ha->flash_fw_addr = FLASH_8021_FIRMWARE_ADDR;
7674 ha->flash_fw_size = FLASH_8021_FIRMWARE_SIZE;
7675 ha->bootloader_addr = FLASH_8021_BOOTLOADER_ADDR;
7676 ha->bootloader_size = FLASH_8021_BOOTLOADER_SIZE;
7677 ha->boot_code_addr = FLASH_8021_BOOT_CODE_ADDR;
7678 } else if (CFG_IST(ha, CFG_CTRL_83XX)) {
7679 ha->flash_nvram_addr = NVRAM_8300_FC_FUNC1_ADDR;
7680 ha->flash_vpd_addr = VPD_8300_FC_FUNC1_ADDR;
7681 ha->ledstate.select = BEACON_8300_FC_FUNC1_ADDR;
7682 ha->flash_errlog_start = FLASH_8300_ERRLOG_START_ADDR_1;
7683 ha->flash_data_addr = FLASH_8300_DATA_ADDR;
7684 ha->flash_desc_addr = FLASH_8300_DESCRIPTOR_TABLE;
7685 ha->flash_fw_addr = FLASH_8300_FC_FIRMWARE_ADDR;
7686 ha->flash_fw_size = FLASH_8300_FIRMWARE_SIZE;
7687 ha->bootloader_addr = FLASH_8300_BOOTLOADER_ADDR;
7688 ha->bootloader_size = FLASH_8300_BOOTLOADER_SIZE;
7689 ha->boot_code_addr = FLASH_8300_BOOT_CODE_ADDR;
7690 } else if (CFG_IST(ha, CFG_CTRL_27XX)) {
7691 ha->flash_nvram_addr = NVRAM_2700_FUNC1_ADDR;
7692 ha->flash_vpd_addr = VPD_2700_FUNC1_ADDR;
7693 ha->ledstate.select = BEACON_2700_FUNC1_ADDR;
7694 ha->flash_data_addr = FLASH_2700_DATA_ADDR;
7695 ha->flash_desc_addr = FLASH_2700_DESCRIPTOR_TABLE;
7696 ha->flash_fw_addr = FLASH_2700_FIRMWARE_ADDR;
7697 ha->flash_fw_size = FLASH_2700_FIRMWARE_SIZE;
7698 ha->boot_code_addr = FLASH_2700_BOOT_CODE_ADDR;
7699 } else {
7700 EL(ha, "unassigned flash fn%d addr: %x\n",
7701 ha->function_number, ha->device_id);
7702 }
7703 } else if (ha->function_number == 0) {
7704 if (CFG_IST(ha, CFG_CTRL_22XX)) {
7705 ha->flash_nvram_addr = NVRAM_2200_FUNC0_ADDR;
7706 ha->flash_fw_addr = FLASH_2200_FIRMWARE_ADDR;
7707 ha->boot_code_addr = FLASH_2200_BOOT_CODE_ADDR;
7708 } else if (CFG_IST(ha, CFG_CTRL_23XX) ||
7709 (CFG_IST(ha, CFG_CTRL_63XX))) {
7710 ha->flash_nvram_addr = NVRAM_2300_FUNC0_ADDR;
7711 ha->flash_fw_addr = FLASH_2300_FIRMWARE_ADDR;
7712 ha->boot_code_addr = FLASH_2300_BOOT_CODE_ADDR;
7713 } else if (CFG_IST(ha, CFG_CTRL_24XX)) {
7714 ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
7715 ha->flash_nvram_addr = NVRAM_2400_FUNC0_ADDR;
7716 ha->flash_vpd_addr = VPD_2400_FUNC0_ADDR;
7717 ha->flash_errlog_start = FLASH_2400_ERRLOG_START_ADDR_0;
7718 ha->flash_desc_addr = FLASH_2400_DESCRIPTOR_TABLE;
7719 ha->flash_fw_addr = FLASH_2400_FIRMWARE_ADDR;
7720 ha->boot_code_addr = FLASH_2400_BOOT_CODE_ADDR;
7721 } else if (CFG_IST(ha, CFG_CTRL_25XX)) {
7722 ha->flash_data_addr = FLASH_24_25_DATA_ADDR;
7723 ha->flash_nvram_addr = NVRAM_2500_FUNC0_ADDR;
7724 ha->flash_vpd_addr = VPD_2500_FUNC0_ADDR;
7725 ha->flash_errlog_start = FLASH_2500_ERRLOG_START_ADDR_0;
7726 ha->flash_desc_addr = FLASH_2500_DESCRIPTOR_TABLE;
7727 ha->flash_fw_addr = FLASH_2500_FIRMWARE_ADDR;
7728 ha->boot_code_addr = FLASH_2500_BOOT_CODE_ADDR;
7729 } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
7730 ha->flash_data_addr = FLASH_8100_DATA_ADDR;
7731 ha->flash_nvram_addr = NVRAM_8100_FUNC0_ADDR;
7732 ha->flash_vpd_addr = VPD_8100_FUNC0_ADDR;
7733 ha->flash_errlog_start = FLASH_8100_ERRLOG_START_ADDR_0;
7734 ha->flash_desc_addr = FLASH_8100_DESCRIPTOR_TABLE;
7735 ha->flash_fw_addr = FLASH_8100_FIRMWARE_ADDR;
7736 ha->boot_code_addr = FLASH_8100_BOOT_CODE_ADDR;
7737 } else if (CFG_IST(ha, CFG_CTRL_82XX)) {
7738 ha->flash_data_addr = 0;
7739 ha->flash_nvram_addr = NVRAM_8021_FUNC0_ADDR;
7740 ha->flash_vpd_addr = VPD_8021_FUNC0_ADDR;
7741 ha->flash_errlog_start = 0;
7742 ha->flash_desc_addr = FLASH_8021_DESCRIPTOR_TABLE;
7743 ha->flash_fw_addr = FLASH_8021_FIRMWARE_ADDR;
7744 ha->flash_fw_size = FLASH_8021_FIRMWARE_SIZE;
7745 ha->bootloader_addr = FLASH_8021_BOOTLOADER_ADDR;
7746 ha->bootloader_size = FLASH_8021_BOOTLOADER_SIZE;
7747 ha->boot_code_addr = FLASH_8021_BOOT_CODE_ADDR;
7748 } else if (CFG_IST(ha, CFG_CTRL_83XX)) {
7749 ha->flash_nvram_addr = NVRAM_8300_FC_FUNC0_ADDR;
7750 ha->flash_vpd_addr = VPD_8300_FC_FUNC0_ADDR;
7751 ha->ledstate.select = BEACON_8300_FCOE_FUNC0_ADDR;
7752 ha->flash_errlog_start = FLASH_8300_ERRLOG_START_ADDR_0;
7753 ha->flash_data_addr = FLASH_8300_DATA_ADDR;
7754 ha->flash_desc_addr = FLASH_8300_DESCRIPTOR_TABLE;
7755 ha->flash_fw_addr = FLASH_8300_FC_FIRMWARE_ADDR;
7756 ha->flash_fw_size = FLASH_8300_FIRMWARE_SIZE;
7757 ha->bootloader_addr = FLASH_8300_BOOTLOADER_ADDR;
7758 ha->bootloader_size = FLASH_8300_BOOTLOADER_SIZE;
7759 ha->boot_code_addr = FLASH_8300_BOOT_CODE_ADDR;
7760 } else if (CFG_IST(ha, CFG_CTRL_27XX)) {
7761 ha->flash_nvram_addr = NVRAM_2700_FUNC0_ADDR;
7762 ha->flash_vpd_addr = VPD_2700_FUNC0_ADDR;
7763 ha->ledstate.select = BEACON_2700_FUNC0_ADDR;
7764 ha->flash_data_addr = FLASH_2700_DATA_ADDR;
7765 ha->flash_desc_addr = FLASH_2700_DESCRIPTOR_TABLE;
7766 ha->flash_fw_addr = FLASH_2700_FIRMWARE_ADDR;
7767 ha->flash_fw_size = FLASH_2700_FIRMWARE_SIZE;
7768 ha->boot_code_addr = FLASH_2700_BOOT_CODE_ADDR;
7769 } else {
7770 EL(ha, "unassigned flash fn%d addr: %x\n",
7771 ha->function_number, ha->device_id);
7772 }
7773 } else {
7774 EL(ha, "known function=%d, device_id=%x\n",
7775 ha->function_number, ha->device_id);
7776 }
7777 QL_PRINT_10(ha, "done\n");
7778 }
7779
7780 /*
7781 * ql_get_sfp
7782 * Returns sfp data to sdmapi caller
7783 *
7784 * Input:
7785 * ha: adapter state pointer.
7786 * cmd: Local EXT_IOCTL cmd struct pointer.
7787 * mode: flags.
7788 *
7789 * Returns:
7790 * None, request status indicated in cmd->Status.
7791 *
7792 * Context:
7793 * Kernel context.
7794 */
7795 static void
7796 ql_get_sfp(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7797 {
7798 QL_PRINT_9(ha, "started\n");
7799
7800 if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
7801 cmd->Status = EXT_STATUS_INVALID_REQUEST;
7802 EL(ha, "failed, invalid request for HBA\n");
7803 return;
7804 }
7805
7806 if (cmd->ResponseLen < QL_24XX_SFP_SIZE) {
7807 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
7808 cmd->DetailStatus = QL_24XX_SFP_SIZE;
7809 EL(ha, "failed, ResponseLen < SFP len, len passed=%xh\n",
7810 cmd->ResponseLen);
7811 return;
7812 }
7813
7814 /* Dump SFP data in user buffer */
7815 if ((ql_dump_sfp(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
7816 mode)) != 0) {
7817 cmd->Status = EXT_STATUS_COPY_ERR;
7818 EL(ha, "failed, copy error\n");
7819 } else {
7820 cmd->Status = EXT_STATUS_OK;
7821 }
7822
7823 QL_PRINT_9(ha, "done\n");
7824 }
7825
7826 /*
7827 * ql_dump_sfp
7828 * Dumps SFP.
7829 *
7830 * Input:
7831 * ha: adapter state pointer.
7832 * bp: buffer address.
7833 * mode: flags
7834 *
7835 * Returns:
7836 *
7837 * Context:
7838 * Kernel context.
7839 */
7840 static int
7841 ql_dump_sfp(ql_adapter_state_t *ha, void *bp, int mode)
7842 {
7843 dma_mem_t mem;
7844 uint32_t cnt;
7845 int rval2, rval = 0;
7846 uint32_t dxfer;
7847
7848 QL_PRINT_9(ha, "started\n");
7849
7850 /* Get memory for SFP. */
7851
7852 if ((rval2 = ql_get_dma_mem(ha, &mem, 64, LITTLE_ENDIAN_DMA,
7853 QL_DMA_DATA_ALIGN)) != QL_SUCCESS) {
7854 EL(ha, "failed, ql_get_dma_mem=%xh\n", rval2);
7855 return (ENOMEM);
7856 }
7857
7858 for (cnt = 0; cnt < QL_24XX_SFP_SIZE; cnt += mem.size) {
7859 rval2 = ql_read_sfp(ha, &mem,
7860 (uint16_t)(cnt < 256 ? 0xA0 : 0xA2),
7861 (uint16_t)(cnt & 0xff));
7862 if (rval2 != QL_SUCCESS) {
7863 EL(ha, "failed, read_sfp=%xh\n", rval2);
7864 rval = EFAULT;
7865 break;
7866 }
7867
7868 /* copy the data back */
7869 if ((dxfer = ql_send_buffer_data(mem.bp, bp, mem.size,
7870 mode)) != mem.size) {
7871 /* ddi copy error */
7872 EL(ha, "failed, ddi copy; byte cnt = %xh", dxfer);
7873 rval = EFAULT;
7874 break;
7875 }
7876
7877 /* adjust the buffer pointer */
7878 bp = (caddr_t)bp + mem.size;
7879 }
7880
7881 ql_free_phys(ha, &mem);
7882
7883 QL_PRINT_9(ha, "done\n");
7884
7885 return (rval);
7886 }
7887
7888 /*
7889 * ql_port_param
7890 * Retrieves or sets the firmware port speed settings
7891 *
7892 * Input:
7893 * ha: adapter state pointer.
7894 * cmd: Local EXT_IOCTL cmd struct pointer.
7895 * mode: flags.
7896 *
7897 * Returns:
7898 * None, request status indicated in cmd->Status.
7899 *
7900 * Context:
7901 * Kernel context.
7902 *
7903 */
7904 static void
7905 ql_port_param(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
7906 {
7907 uint8_t *name;
7908 ql_tgt_t *tq;
7909 EXT_PORT_PARAM port_param = {0};
7910 uint32_t rval = QL_SUCCESS;
7911 uint32_t idma_rate;
7912
7913 QL_PRINT_9(ha, "started\n");
7914
7915 if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
7916 EL(ha, "invalid request for this HBA\n");
7917 cmd->Status = EXT_STATUS_INVALID_REQUEST;
7918 cmd->ResponseLen = 0;
7919 return;
7920 }
7921
7922 if (LOOP_NOT_READY(ha)) {
7923 EL(ha, "failed, loop not ready\n");
7924 cmd->Status = EXT_STATUS_DEVICE_OFFLINE;
7925 cmd->ResponseLen = 0;
7926 return;
7927 }
7928
7929 if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
7930 (void*)&port_param, sizeof (EXT_PORT_PARAM), mode) != 0) {
7931 EL(ha, "failed, ddi_copyin\n");
7932 cmd->Status = EXT_STATUS_COPY_ERR;
7933 cmd->ResponseLen = 0;
7934 return;
7935 }
7936
7937 if (port_param.FCScsiAddr.DestType != EXT_DEF_DESTTYPE_WWPN) {
7938 EL(ha, "Unsupported dest lookup type: %xh\n",
7939 port_param.FCScsiAddr.DestType);
7940 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
7941 cmd->ResponseLen = 0;
7942 return;
7943 }
7944
7945 name = port_param.FCScsiAddr.DestAddr.WWPN;
7946
7947 QL_PRINT_9(ha, "wwpn=%02x%02x%02x%02x%02x%02x%02x%02x\n",
7948 ha->instance, name[0], name[1], name[2], name[3], name[4],
7949 name[5], name[6], name[7]);
7950
7951 tq = ql_find_port(ha, name, (uint16_t)QLNT_PORT);
7952 if (tq == NULL || !VALID_TARGET_ID(ha, tq->loop_id) ||
7953 tq->d_id.b24 == FS_MANAGEMENT_SERVER) {
7954 EL(ha, "failed, fc_port not found\n");
7955 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
7956 cmd->ResponseLen = 0;
7957 return;
7958 }
7959
7960 cmd->Status = EXT_STATUS_OK;
7961 cmd->DetailStatus = EXT_STATUS_OK;
7962
7963 switch (port_param.Mode) {
7964 case EXT_IIDMA_MODE_GET:
7965 /*
7966 * Report the firmware's port rate for the wwpn
7967 */
7968 rval = ql_iidma_rate(ha, tq->loop_id, &idma_rate,
7969 port_param.Mode);
7970
7971 if (rval != QL_SUCCESS) {
7972 EL(ha, "iidma get failed: %xh\n", rval);
7973 cmd->Status = EXT_STATUS_MAILBOX;
7978 case IIDMA_RATE_1GB:
7979 port_param.Speed =
7980 EXT_DEF_PORTSPEED_1GBIT;
7981 break;
7982 case IIDMA_RATE_2GB:
7983 port_param.Speed =
7984 EXT_DEF_PORTSPEED_2GBIT;
7985 break;
7986 case IIDMA_RATE_4GB:
7987 port_param.Speed =
7988 EXT_DEF_PORTSPEED_4GBIT;
7989 break;
7990 case IIDMA_RATE_8GB:
7991 port_param.Speed =
7992 EXT_DEF_PORTSPEED_8GBIT;
7993 break;
7994 case IIDMA_RATE_10GB:
7995 port_param.Speed =
7996 EXT_DEF_PORTSPEED_10GBIT;
7997 break;
7998 case IIDMA_RATE_16GB:
7999 port_param.Speed =
8000 EXT_DEF_PORTSPEED_16GBIT;
8001 break;
8002 case IIDMA_RATE_32GB:
8003 port_param.Speed =
8004 EXT_DEF_PORTSPEED_32GBIT;
8005 break;
8006 default:
8007 port_param.Speed =
8008 EXT_DEF_PORTSPEED_UNKNOWN;
8009 EL(ha, "failed, Port speed rate=%xh\n",
8010 idma_rate);
8011 break;
8012 }
8013
8014 /* Copy back the data */
8015 rval = ddi_copyout((void *)&port_param,
8016 (void *)(uintptr_t)cmd->ResponseAdr,
8017 sizeof (EXT_PORT_PARAM), mode);
8018
8019 if (rval != 0) {
8020 cmd->Status = EXT_STATUS_COPY_ERR;
8021 cmd->ResponseLen = 0;
8022 EL(ha, "failed, ddi_copyout\n");
8023 } else {
8024 cmd->ResponseLen = (uint32_t)
8025 sizeof (EXT_PORT_PARAM);
8028 break;
8029
8030 case EXT_IIDMA_MODE_SET:
8031 /*
8032 * Set the firmware's port rate for the wwpn
8033 */
8034 switch (port_param.Speed) {
8035 case EXT_DEF_PORTSPEED_1GBIT:
8036 idma_rate = IIDMA_RATE_1GB;
8037 break;
8038 case EXT_DEF_PORTSPEED_2GBIT:
8039 idma_rate = IIDMA_RATE_2GB;
8040 break;
8041 case EXT_DEF_PORTSPEED_4GBIT:
8042 idma_rate = IIDMA_RATE_4GB;
8043 break;
8044 case EXT_DEF_PORTSPEED_8GBIT:
8045 idma_rate = IIDMA_RATE_8GB;
8046 break;
8047 case EXT_DEF_PORTSPEED_10GBIT:
8048 idma_rate = IIDMA_RATE_10GB;
8049 break;
8050 case EXT_DEF_PORTSPEED_16GBIT:
8051 idma_rate = IIDMA_RATE_16GB;
8052 break;
8053 case EXT_DEF_PORTSPEED_32GBIT:
8054 idma_rate = IIDMA_RATE_32GB;
8055 break;
8056 default:
8057 EL(ha, "invalid set iidma rate: %x\n",
8058 port_param.Speed);
8059 cmd->Status = EXT_STATUS_INVALID_PARAM;
8060 cmd->ResponseLen = 0;
8061 rval = QL_PARAMETER_ERROR;
8062 break;
8063 }
8064
8065 if (rval == QL_SUCCESS) {
8066 rval = ql_iidma_rate(ha, tq->loop_id, &idma_rate,
8067 port_param.Mode);
8068 if (rval != QL_SUCCESS) {
8069 EL(ha, "iidma set failed: %xh\n", rval);
8070 cmd->Status = EXT_STATUS_MAILBOX;
8071 cmd->DetailStatus = rval;
8072 cmd->ResponseLen = 0;
8073 }
8074 }
8075 break;
8076 default:
8077 EL(ha, "invalid mode specified: %x\n", port_param.Mode);
8078 cmd->Status = EXT_STATUS_INVALID_PARAM;
8079 cmd->ResponseLen = 0;
8080 cmd->DetailStatus = 0;
8081 break;
8082 }
8083
8084 QL_PRINT_9(ha, "done\n");
8085 }
8086
8087 /*
8088 * ql_get_fwexttrace
8089 * Dumps f/w extended trace buffer
8090 *
8091 * Input:
8092 * ha: adapter state pointer.
8093 * bp: buffer address.
8094 * mode: flags
8095 *
8096 * Returns:
8097 *
8098 * Context:
8099 * Kernel context.
8100 */
8101 /* ARGSUSED */
8102 static void
8103 ql_get_fwexttrace(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8104 {
8105 int rval;
8106 caddr_t payload;
8107
8108 QL_PRINT_9(ha, "started\n");
8109
8110 if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
8111 EL(ha, "invalid request for this HBA\n");
8112 cmd->Status = EXT_STATUS_INVALID_REQUEST;
8113 cmd->ResponseLen = 0;
8114 return;
8115 }
8116
8117 if ((CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) == 0) ||
8118 (ha->fwexttracebuf.bp == NULL)) {
8119 EL(ha, "f/w extended trace is not enabled\n");
8120 cmd->Status = EXT_STATUS_INVALID_REQUEST;
8121 cmd->ResponseLen = 0;
8122 return;
8123 }
8124
8125 if (cmd->ResponseLen < FWEXTSIZE) {
8126 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8127 cmd->DetailStatus = FWEXTSIZE;
8128 EL(ha, "failed, ResponseLen (%xh) < %xh (FWEXTSIZE)\n",
8129 cmd->ResponseLen, FWEXTSIZE);
8130 cmd->ResponseLen = 0;
8131 return;
8132 }
8133
8134 /* Time Stamp */
8135 rval = ql_fw_etrace(ha, &ha->fwexttracebuf, FTO_INSERT_TIME_STAMP,
8136 NULL);
8137 if (rval != QL_SUCCESS) {
8138 EL(ha, "f/w extended trace insert"
8139 "time stamp failed: %xh\n", rval);
8140 cmd->Status = EXT_STATUS_ERR;
8141 cmd->ResponseLen = 0;
8142 return;
8143 }
8144
8145 /* Disable Tracing */
8146 rval = ql_fw_etrace(ha, &ha->fwexttracebuf, FTO_EXT_TRACE_DISABLE,
8147 NULL);
8148 if (rval != QL_SUCCESS) {
8149 EL(ha, "f/w extended trace disable failed: %xh\n", rval);
8150 cmd->Status = EXT_STATUS_ERR;
8151 cmd->ResponseLen = 0;
8152 return;
8153 }
8154
8155 /* Allocate payload buffer */
8156 payload = kmem_zalloc(FWEXTSIZE, KM_SLEEP);
8157 if (payload == NULL) {
8158 EL(ha, "failed, kmem_zalloc\n");
8159 cmd->Status = EXT_STATUS_NO_MEMORY;
8160 cmd->ResponseLen = 0;
8161 return;
8162 }
8163
8164 /* Sync DMA buffer. */
8165 (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
8166 FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
8167
8168 /* Copy trace buffer data. */
8169 ddi_rep_get8(ha->fwexttracebuf.acc_handle, (uint8_t *)payload,
8170 (uint8_t *)ha->fwexttracebuf.bp, FWEXTSIZE,
8171 DDI_DEV_AUTOINCR);
8172
8173 /* Send payload to application. */
8174 if (ql_send_buffer_data(payload, (caddr_t)(uintptr_t)cmd->ResponseAdr,
8175 cmd->ResponseLen, mode) != cmd->ResponseLen) {
8176 EL(ha, "failed, send_buffer_data\n");
8177 cmd->Status = EXT_STATUS_COPY_ERR;
8178 cmd->ResponseLen = 0;
8179 } else {
8180 cmd->Status = EXT_STATUS_OK;
8181 }
8182
8183 kmem_free(payload, FWEXTSIZE);
8184
8185 QL_PRINT_9(ha, "done\n");
8186 }
8187
8188 /*
8189 * ql_get_fwfcetrace
8190 * Dumps f/w fibre channel event trace buffer
8191 *
8192 * Input:
8193 * ha: adapter state pointer.
8194 * bp: buffer address.
8195 * mode: flags
8196 *
8197 * Returns:
8198 *
8199 * Context:
8200 * Kernel context.
8201 */
8202 /* ARGSUSED */
8203 static void
8204 ql_get_fwfcetrace(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8205 {
8206 int rval;
8207 caddr_t fce_trace_p;
8208 ql_mbx_data_t mr;
8209 EXT_FW_FCE_TRACE *fce_trace;
8210 size_t cnt;
8211 uint32_t *bp;
8212
8213 QL_PRINT_9(ha, "started\n");
8214
8215 if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
8216 EL(ha, "invalid request for this HBA\n");
8217 cmd->Status = EXT_STATUS_INVALID_REQUEST;
8218 cmd->ResponseLen = 0;
8219 return;
8220 }
8221
8222 if ((CFG_IST(ha, CFG_ENABLE_FWFCETRACE) == 0) ||
8223 (ha->fwfcetracebuf.bp == NULL)) {
8224 EL(ha, "f/w FCE trace is not enabled\n");
8225 cmd->Status = EXT_STATUS_INVALID_REQUEST;
8226 cmd->ResponseLen = 0;
8227 return;
8228 }
8229
8230 if (cmd->ResponseLen < FWFCESIZE) {
8231 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8232 cmd->DetailStatus = FWFCESIZE;
8233 EL(ha, "failed, ResponseLen (%xh) < %xh (FWFCESIZE)\n",
8234 cmd->ResponseLen, FWFCESIZE);
8235 cmd->ResponseLen = 0;
8236 return;
8237 }
8238
8239 /* Disable Tracing */
8240 rval = ql_fw_etrace(ha, &ha->fwfcetracebuf, FTO_FCE_TRACE_DISABLE, &mr);
8241 if (rval != QL_SUCCESS) {
8242 EL(ha, "f/w FCE trace disable failed: %xh\n", rval);
8243 cmd->Status = EXT_STATUS_ERR;
8244 cmd->ResponseLen = 0;
8245 return;
8246 }
8247
8248 /* Allocate payload buffer */
8249 fce_trace = kmem_zalloc(FWFCESIZE, KM_SLEEP);
8250 if (fce_trace == NULL) {
8251 EL(ha, "failed, kmem_zalloc\n");
8252 cmd->Status = EXT_STATUS_NO_MEMORY;
8253 cmd->ResponseLen = 0;
8254 return;
8255 }
8256 fce_trace_p = (caddr_t)&fce_trace->TraceData[0];
8257
8258 /* Copy In Ponter and Base Pointer values */
8259 fce_trace->Registers[0] = mr.mb[2];
8260 fce_trace->Registers[1] = mr.mb[3];
8261 fce_trace->Registers[2] = mr.mb[4];
8262 fce_trace->Registers[3] = mr.mb[5];
8263
8264 fce_trace->Registers[4] = LSW(ha->fwexttracebuf.cookies->dmac_address);
8265 fce_trace->Registers[5] = MSW(ha->fwexttracebuf.cookies->dmac_address);
8266 fce_trace->Registers[6] = LSW(ha->fwexttracebuf.cookies->dmac_notused);
8267 fce_trace->Registers[7] = MSW(ha->fwexttracebuf.cookies->dmac_notused);
8268
8269 /* Copy FCE Trace Enable Registers */
8270 fce_trace->Registers[8] = ha->fw_fce_trace_enable.mb[0];
8271 fce_trace->Registers[9] = ha->fw_fce_trace_enable.mb[2];
8272 fce_trace->Registers[10] = ha->fw_fce_trace_enable.mb[3];
8273 fce_trace->Registers[11] = ha->fw_fce_trace_enable.mb[4];
8274 fce_trace->Registers[12] = ha->fw_fce_trace_enable.mb[5];
8275 fce_trace->Registers[13] = ha->fw_fce_trace_enable.mb[6];
8276
8277 /* Sync DMA buffer. */
8278 (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
8279 FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
8280
8281 /* Copy trace buffer data. */
8282 ddi_rep_get8(ha->fwfcetracebuf.acc_handle, (uint8_t *)fce_trace_p,
8283 (uint8_t *)ha->fwfcetracebuf.bp, FWFCESIZE,
8284 DDI_DEV_AUTOINCR);
8285
8286 /* Swap bytes in buffer in case of Big Endian */
8287 bp = (uint32_t *)&fce_trace->TraceData[0];
8288 for (cnt = 0; cnt < (FWFCESIZE / sizeof (uint32_t)); cnt++) {
8289 LITTLE_ENDIAN_32(bp);
8290 bp++;
8291 }
8292
8293 /* Send payload to application. */
8294 if (ql_send_buffer_data((caddr_t)fce_trace,
8295 (caddr_t)(uintptr_t)cmd->ResponseAdr,
8296 cmd->ResponseLen, mode) != cmd->ResponseLen) {
8297 EL(ha, "failed, send_buffer_data\n");
8298 cmd->Status = EXT_STATUS_COPY_ERR;
8299 cmd->ResponseLen = 0;
8300 } else {
8301 cmd->Status = EXT_STATUS_OK;
8302 }
8303
8304 /* Re-enable Tracing */
8305 bzero(ha->fwfcetracebuf.bp, ha->fwfcetracebuf.size);
8306 if ((rval = ql_fw_etrace(ha, &ha->fwfcetracebuf,
8307 FTO_FCE_TRACE_ENABLE, &mr)) != QL_SUCCESS) {
8308 EL(ha, "fcetrace enable failed: %xh\n", rval);
8309 } else {
8310 ha->fw_fce_trace_enable = mr;
8311 EL(ha, "FCE Trace Re-Enabled\n");
8312 }
8313
8314 kmem_free(fce_trace, FWFCESIZE);
8315
8316 QL_PRINT_9(ha, "done\n");
8317 }
8318
8319 /*
8320 * ql_get_pci_data
8321 * Retrieves pci config space data
8322 *
8323 * Input:
8324 * ha: adapter state pointer.
8325 * cmd: Local EXT_IOCTL cmd struct pointer.
8326 * mode: flags.
8327 *
8328 * Returns:
8329 * None, request status indicated in cmd->Status.
8330 *
8331 * Context:
8332 * Kernel context.
8333 *
8334 */
8335 static void
8336 ql_get_pci_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8337 {
8338 uint8_t cap_ptr;
8339 uint8_t cap_id;
8340 uint32_t buf_size = 256;
8341
8342 QL_PRINT_9(ha, "started\n");
8343
8344 /*
8345 * First check the "Capabilities List" bit of the status register.
8346 */
8347 if (ql_pci_config_get16(ha, PCI_CONF_STAT) & PCI_STAT_CAP) {
8348 /*
8349 * Now get the capability pointer
8350 */
8351 cap_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
8352 while (cap_ptr != PCI_CAP_NEXT_PTR_NULL) {
8353 /*
8354 * Check for the pcie capability.
8355 */
8356 cap_id = (uint8_t)ql_pci_config_get8(ha, cap_ptr);
8357 if (cap_id == PCI_CAP_ID_PCI_E) {
8358 buf_size = 4096;
8359 break;
8360 }
8361 cap_ptr = (uint8_t)ql_pci_config_get8(ha,
8362 (cap_ptr + PCI_CAP_NEXT_PTR));
8365
8366 if (cmd->ResponseLen < buf_size) {
8367 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8368 cmd->DetailStatus = buf_size;
8369 EL(ha, "failed ResponseLen < buf_size, len passed=%xh\n",
8370 cmd->ResponseLen);
8371 return;
8372 }
8373
8374 /* Dump PCI config data. */
8375 if ((ql_pci_dump(ha, (void *)(uintptr_t)(cmd->ResponseAdr),
8376 buf_size, mode)) != 0) {
8377 cmd->Status = EXT_STATUS_COPY_ERR;
8378 cmd->DetailStatus = 0;
8379 EL(ha, "failed, copy err pci_dump\n");
8380 } else {
8381 cmd->Status = EXT_STATUS_OK;
8382 cmd->DetailStatus = buf_size;
8383 }
8384
8385 QL_PRINT_9(ha, "done\n");
8386 }
8387
8388 /*
8389 * ql_pci_dump
8390 * Dumps PCI config data to application buffer.
8391 *
8392 * Input:
8393 * ha = adapter state pointer.
8394 * bp = user buffer address.
8395 *
8396 * Returns:
8397 *
8398 * Context:
8399 * Kernel context.
8400 */
8401 int
8402 ql_pci_dump(ql_adapter_state_t *ha, uint32_t *bp, uint32_t pci_size, int mode)
8403 {
8404 uint32_t pci_os;
8405 uint32_t *ptr32, *org_ptr32;
8406
8407 QL_PRINT_9(ha, "started\n");
8408
8409 ptr32 = kmem_zalloc(pci_size, KM_SLEEP);
8410 if (ptr32 == NULL) {
8411 EL(ha, "failed kmem_zalloc\n");
8412 return (ENOMEM);
8413 }
8414
8415 /* store the initial value of ptr32 */
8416 org_ptr32 = ptr32;
8417 for (pci_os = 0; pci_os < pci_size; pci_os += 4) {
8418 *ptr32 = (uint32_t)ql_pci_config_get32(ha, pci_os);
8419 LITTLE_ENDIAN_32(ptr32);
8420 ptr32++;
8421 }
8422
8423 if (ddi_copyout((void *)org_ptr32, (void *)bp, pci_size, mode) !=
8424 0) {
8425 EL(ha, "failed ddi_copyout\n");
8426 kmem_free(org_ptr32, pci_size);
8427 return (EFAULT);
8428 }
8429
8430 QL_DUMP_9(org_ptr32, 8, pci_size);
8431
8432 kmem_free(org_ptr32, pci_size);
8433
8434 QL_PRINT_9(ha, "done\n");
8435
8436 return (0);
8437 }
8438
8439 /*
8440 * ql_menlo_reset
8441 * Reset Menlo
8442 *
8443 * Input:
8444 * ha: adapter state pointer.
8445 * bp: buffer address.
8446 * mode: flags
8447 *
8448 * Returns:
8449 *
8450 * Context:
8451 * Kernel context.
8452 */
8453 static void
8454 ql_menlo_reset(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8455 {
8456 EXT_MENLO_RESET rst;
8457 ql_mbx_data_t mr;
8458 int rval;
8459
8460 QL_PRINT_9(ha, "started\n");
8461
8462 if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
8463 EL(ha, "failed, invalid request for HBA\n");
8464 cmd->Status = EXT_STATUS_INVALID_REQUEST;
8465 cmd->ResponseLen = 0;
8466 return;
8467 }
8468
8469 /*
8470 * TODO: only vp_index 0 can do this (?)
8471 */
8472
8473 /* Verify the size of request structure. */
8474 if (cmd->RequestLen < sizeof (EXT_MENLO_RESET)) {
8475 /* Return error */
8476 EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
8477 sizeof (EXT_MENLO_RESET));
8478 cmd->Status = EXT_STATUS_INVALID_PARAM;
8479 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
8480 cmd->ResponseLen = 0;
8497 cmd->Status = EXT_STATUS_BUSY;
8498 cmd->ResponseLen = 0;
8499 return;
8500 }
8501
8502 rval = ql_reset_menlo(ha, &mr, rst.Flags);
8503 if (rval != QL_SUCCESS) {
8504 EL(ha, "failed, status=%xh\n", rval);
8505 cmd->Status = EXT_STATUS_MAILBOX;
8506 cmd->DetailStatus = rval;
8507 cmd->ResponseLen = 0;
8508 } else if (mr.mb[1] != 0) {
8509 EL(ha, "failed, substatus=%d\n", mr.mb[1]);
8510 cmd->Status = EXT_STATUS_ERR;
8511 cmd->DetailStatus = mr.mb[1];
8512 cmd->ResponseLen = 0;
8513 }
8514
8515 ql_restart_hba(ha);
8516
8517 QL_PRINT_9(ha, "done\n");
8518 }
8519
8520 /*
8521 * ql_menlo_get_fw_version
8522 * Get Menlo firmware version.
8523 *
8524 * Input:
8525 * ha: adapter state pointer.
8526 * bp: buffer address.
8527 * mode: flags
8528 *
8529 * Returns:
8530 *
8531 * Context:
8532 * Kernel context.
8533 */
8534 static void
8535 ql_menlo_get_fw_version(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8536 {
8537 int rval;
8538 ql_mbx_iocb_t *pkt;
8539 EXT_MENLO_GET_FW_VERSION ver = {0};
8540
8541 QL_PRINT_9(ha, "started\n");
8542
8543 if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
8544 EL(ha, "failed, invalid request for HBA\n");
8545 cmd->Status = EXT_STATUS_INVALID_REQUEST;
8546 cmd->ResponseLen = 0;
8547 return;
8548 }
8549
8550 if (cmd->ResponseLen < sizeof (EXT_MENLO_GET_FW_VERSION)) {
8551 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
8552 cmd->DetailStatus = sizeof (EXT_MENLO_GET_FW_VERSION);
8553 EL(ha, "ResponseLen=%d < %d\n", cmd->ResponseLen,
8554 sizeof (EXT_MENLO_GET_FW_VERSION));
8555 cmd->ResponseLen = 0;
8556 return;
8557 }
8558
8559 /* Allocate packet. */
8560 pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
8561 if (pkt == NULL) {
8579 /* Command error */
8580 EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval,
8581 pkt->mvfy.entry_status & 0x3c, pkt->mvfy.options_status,
8582 pkt->mvfy.failure_code);
8583 cmd->Status = EXT_STATUS_ERR;
8584 cmd->DetailStatus = rval != QL_SUCCESS ? rval :
8585 QL_FUNCTION_FAILED;
8586 cmd->ResponseLen = 0;
8587 } else if (ddi_copyout((void *)&ver,
8588 (void *)(uintptr_t)cmd->ResponseAdr,
8589 sizeof (EXT_MENLO_GET_FW_VERSION), mode) != 0) {
8590 EL(ha, "failed, ddi_copyout\n");
8591 cmd->Status = EXT_STATUS_COPY_ERR;
8592 cmd->ResponseLen = 0;
8593 } else {
8594 cmd->ResponseLen = sizeof (EXT_MENLO_GET_FW_VERSION);
8595 }
8596
8597 kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8598
8599 QL_PRINT_9(ha, "done\n");
8600 }
8601
8602 /*
8603 * ql_menlo_update_fw
8604 * Get Menlo update firmware.
8605 *
8606 * Input:
8607 * ha: adapter state pointer.
8608 * bp: buffer address.
8609 * mode: flags
8610 *
8611 * Returns:
8612 *
8613 * Context:
8614 * Kernel context.
8615 */
8616 static void
8617 ql_menlo_update_fw(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8618 {
8619 ql_mbx_iocb_t *pkt;
8620 dma_mem_t *dma_mem;
8621 EXT_MENLO_UPDATE_FW fw;
8622 uint32_t *ptr32;
8623 int rval;
8624
8625 QL_PRINT_9(ha, "started\n");
8626
8627 if ((CFG_IST(ha, CFG_CTRL_MENLO)) == 0) {
8628 EL(ha, "failed, invalid request for HBA\n");
8629 cmd->Status = EXT_STATUS_INVALID_REQUEST;
8630 cmd->ResponseLen = 0;
8631 return;
8632 }
8633
8634 /*
8635 * TODO: only vp_index 0 can do this (?)
8636 */
8637
8638 /* Verify the size of request structure. */
8639 if (cmd->RequestLen < sizeof (EXT_MENLO_UPDATE_FW)) {
8640 /* Return error */
8641 EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
8642 sizeof (EXT_MENLO_UPDATE_FW));
8643 cmd->Status = EXT_STATUS_INVALID_PARAM;
8644 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
8645 cmd->ResponseLen = 0;
8668 dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
8669 if (dma_mem == NULL) {
8670 EL(ha, "failed, kmem_zalloc\n");
8671 cmd->Status = EXT_STATUS_NO_MEMORY;
8672 cmd->ResponseLen = 0;
8673 return;
8674 }
8675 pkt = kmem_zalloc(sizeof (ql_mbx_iocb_t), KM_SLEEP);
8676 if (pkt == NULL) {
8677 EL(ha, "failed, kmem_zalloc\n");
8678 kmem_free(dma_mem, sizeof (dma_mem_t));
8679 ql_restart_hba(ha);
8680 cmd->Status = EXT_STATUS_NO_MEMORY;
8681 cmd->ResponseLen = 0;
8682 return;
8683 }
8684
8685 /* Get DMA memory for the IOCB */
8686 if (ql_get_dma_mem(ha, dma_mem, fw.TotalByteCount, LITTLE_ENDIAN_DMA,
8687 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
8688 cmn_err(CE_WARN, "%srequest queue DMA memory "
8689 "alloc failed", QL_NAME);
8690 kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8691 kmem_free(dma_mem, sizeof (dma_mem_t));
8692 ql_restart_hba(ha);
8693 cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
8694 cmd->ResponseLen = 0;
8695 return;
8696 }
8697
8698 /* Get firmware data. */
8699 if (ql_get_buffer_data((caddr_t)(uintptr_t)fw.pFwDataBytes, dma_mem->bp,
8700 fw.TotalByteCount, mode) != fw.TotalByteCount) {
8701 EL(ha, "failed, get_buffer_data\n");
8702 ql_free_dma_resource(ha, dma_mem);
8703 kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8704 kmem_free(dma_mem, sizeof (dma_mem_t));
8705 ql_restart_hba(ha);
8706 cmd->Status = EXT_STATUS_COPY_ERR;
8707 cmd->ResponseLen = 0;
8708 return;
8709 }
8710
8711 /* Sync DMA buffer. */
8712 (void) ddi_dma_sync(dma_mem->dma_handle, 0, dma_mem->size,
8713 DDI_DMA_SYNC_FORDEV);
8714
8715 pkt->mvfy.entry_type = VERIFY_MENLO_TYPE;
8716 pkt->mvfy.entry_count = 1;
8717 pkt->mvfy.options_status = (uint16_t)LE_16(fw.Flags);
8718 ptr32 = dma_mem->bp;
8719 pkt->mvfy.fw_version = LE_32(ptr32[2]);
8720 pkt->mvfy.fw_size = LE_32(fw.TotalByteCount);
8721 pkt->mvfy.fw_sequence_size = LE_32(fw.TotalByteCount);
8722 pkt->mvfy.dseg_count = LE_16(1);
8723 pkt->mvfy.dseg.address[0] = (uint32_t)
8724 LE_32(LSD(dma_mem->cookie.dmac_laddress));
8725 pkt->mvfy.dseg.address[1] = (uint32_t)
8726 LE_32(MSD(dma_mem->cookie.dmac_laddress));
8727 pkt->mvfy.dseg.length = LE_32(fw.TotalByteCount);
8728
8729 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t));
8730 LITTLE_ENDIAN_16(&pkt->mvfy.options_status);
8731 LITTLE_ENDIAN_16(&pkt->mvfy.failure_code);
8732
8733 if (rval != QL_SUCCESS || (pkt->mvfy.entry_status & 0x3c) != 0 ||
8734 pkt->mvfy.options_status != CS_COMPLETE) {
8735 /* Command error */
8736 EL(ha, "failed, status=%xh, es=%xh, cs=%xh, fc=%xh\n", rval,
8737 pkt->mvfy.entry_status & 0x3c, pkt->mvfy.options_status,
8738 pkt->mvfy.failure_code);
8739 cmd->Status = EXT_STATUS_ERR;
8740 cmd->DetailStatus = rval != QL_SUCCESS ? rval :
8741 QL_FUNCTION_FAILED;
8742 cmd->ResponseLen = 0;
8743 }
8744
8745 ql_free_dma_resource(ha, dma_mem);
8746 kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8747 kmem_free(dma_mem, sizeof (dma_mem_t));
8748 ql_restart_hba(ha);
8749
8750 QL_PRINT_9(ha, "done\n");
8751 }
8752
8753 /*
8754 * ql_menlo_manage_info
8755 * Get Menlo manage info.
8756 *
8757 * Input:
8758 * ha: adapter state pointer.
8759 * bp: buffer address.
8760 * mode: flags
8761 *
8762 * Returns:
8763 *
8764 * Context:
8765 * Kernel context.
8766 */
8767 static void
8768 ql_menlo_manage_info(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
8769 {
8770 ql_mbx_iocb_t *pkt;
8771 dma_mem_t *dma_mem = NULL;
8772 EXT_MENLO_MANAGE_INFO info;
8773 int rval;
8774
8775 QL_PRINT_9(ha, "started\n");
8776
8777
8778 /* The call is only supported for Schultz right now */
8779 if (CFG_IST(ha, CFG_FCOE_SUPPORT)) {
8780 ql_get_xgmac_statistics(ha, cmd, mode);
8781 QL_PRINT_9(ha, "CFG_FCOE_SUPPORT done\n");
8782 return;
8783 }
8784
8785 if (!CFG_IST(ha, CFG_CTRL_MENLO)) {
8786 EL(ha, "failed, invalid request for HBA\n");
8787 cmd->Status = EXT_STATUS_INVALID_REQUEST;
8788 cmd->ResponseLen = 0;
8789 return;
8790 }
8791
8792 /* Verify the size of request structure. */
8793 if (cmd->RequestLen < sizeof (EXT_MENLO_MANAGE_INFO)) {
8794 /* Return error */
8795 EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
8796 sizeof (EXT_MENLO_MANAGE_INFO));
8797 cmd->Status = EXT_STATUS_INVALID_PARAM;
8798 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
8799 cmd->ResponseLen = 0;
8800 return;
8801 }
8802
8803 /* Get manage info request. */
8804 if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr,
8805 (caddr_t)&info, sizeof (EXT_MENLO_MANAGE_INFO), mode) != 0) {
8823 pkt->mdata.entry_count = 1;
8824 pkt->mdata.options_status = (uint16_t)LE_16(info.Operation);
8825
8826 /* Get DMA memory for the IOCB */
8827 if (info.Operation == MENLO_OP_READ_MEM ||
8828 info.Operation == MENLO_OP_WRITE_MEM) {
8829 pkt->mdata.total_byte_count = LE_32(info.TotalByteCount);
8830 pkt->mdata.parameter_1 =
8831 LE_32(info.Parameters.ap.MenloMemory.StartingAddr);
8832 dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t),
8833 KM_SLEEP);
8834 if (dma_mem == NULL) {
8835 EL(ha, "failed, kmem_zalloc\n");
8836 kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8837 cmd->Status = EXT_STATUS_NO_MEMORY;
8838 cmd->ResponseLen = 0;
8839 return;
8840 }
8841 if (ql_get_dma_mem(ha, dma_mem, info.TotalByteCount,
8842 LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
8843 cmn_err(CE_WARN, "%srequest queue DMA memory "
8844 "alloc failed", QL_NAME);
8845 kmem_free(dma_mem, sizeof (dma_mem_t));
8846 kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8847 cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
8848 cmd->ResponseLen = 0;
8849 return;
8850 }
8851 if (info.Operation == MENLO_OP_WRITE_MEM) {
8852 /* Get data. */
8853 if (ql_get_buffer_data(
8854 (caddr_t)(uintptr_t)info.pDataBytes,
8855 dma_mem->bp, info.TotalByteCount, mode) !=
8856 info.TotalByteCount) {
8857 EL(ha, "failed, get_buffer_data\n");
8858 ql_free_dma_resource(ha, dma_mem);
8859 kmem_free(dma_mem, sizeof (dma_mem_t));
8860 kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8861 cmd->Status = EXT_STATUS_COPY_ERR;
8862 cmd->ResponseLen = 0;
8863 return;
8864 }
8865 (void) ddi_dma_sync(dma_mem->dma_handle, 0,
8866 dma_mem->size, DDI_DMA_SYNC_FORDEV);
8867 }
8868 pkt->mdata.dseg_count = LE_16(1);
8869 pkt->mdata.dseg.address[0] = (uint32_t)
8870 LE_32(LSD(dma_mem->cookie.dmac_laddress));
8871 pkt->mdata.dseg.address[1] = (uint32_t)
8872 LE_32(MSD(dma_mem->cookie.dmac_laddress));
8873 pkt->mdata.dseg.length = LE_32(info.TotalByteCount);
8874 } else if (info.Operation & MENLO_OP_CHANGE_CONFIG) {
8875 pkt->mdata.parameter_1 =
8876 LE_32(info.Parameters.ap.MenloConfig.ConfigParamID);
8877 pkt->mdata.parameter_2 =
8878 LE_32(info.Parameters.ap.MenloConfig.ConfigParamData0);
8879 pkt->mdata.parameter_3 =
8880 LE_32(info.Parameters.ap.MenloConfig.ConfigParamData1);
8881 } else if (info.Operation & MENLO_OP_GET_INFO) {
8882 pkt->mdata.parameter_1 =
8883 LE_32(info.Parameters.ap.MenloInfo.InfoDataType);
8884 pkt->mdata.parameter_2 =
8885 LE_32(info.Parameters.ap.MenloInfo.InfoContext);
8886 }
8887
8888 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt, sizeof (ql_mbx_iocb_t));
8889 LITTLE_ENDIAN_16(&pkt->mdata.options_status);
8890 LITTLE_ENDIAN_16(&pkt->mdata.failure_code);
8891
8892 if (rval != QL_SUCCESS || (pkt->mdata.entry_status & 0x3c) != 0 ||
8893 pkt->mdata.options_status != CS_COMPLETE) {
8897 pkt->mdata.failure_code);
8898 cmd->Status = EXT_STATUS_ERR;
8899 cmd->DetailStatus = rval != QL_SUCCESS ? rval :
8900 QL_FUNCTION_FAILED;
8901 cmd->ResponseLen = 0;
8902 } else if (info.Operation == MENLO_OP_READ_MEM) {
8903 (void) ddi_dma_sync(dma_mem->dma_handle, 0, dma_mem->size,
8904 DDI_DMA_SYNC_FORKERNEL);
8905 if (ql_send_buffer_data((caddr_t)(uintptr_t)info.pDataBytes,
8906 dma_mem->bp, info.TotalByteCount, mode) !=
8907 info.TotalByteCount) {
8908 cmd->Status = EXT_STATUS_COPY_ERR;
8909 cmd->ResponseLen = 0;
8910 }
8911 }
8912
8913 ql_free_dma_resource(ha, dma_mem);
8914 kmem_free(dma_mem, sizeof (dma_mem_t));
8915 kmem_free(pkt, sizeof (ql_mbx_iocb_t));
8916
8917 QL_PRINT_9(ha, "done\n");
8918 }
8919
8920 /*
8921 * ql_suspend_hba
8922 * Suspends all adapter ports.
8923 *
8924 * Input:
8925 * ha: adapter state pointer.
8926 * options: BIT_0 --> leave driver stalled on exit if
8927 * failed.
8928 *
8929 * Returns:
8930 * ql local function return status code.
8931 *
8932 * Context:
8933 * Kernel context.
8934 */
8935 static int
8936 ql_suspend_hba(ql_adapter_state_t *ha, uint32_t opt)
8937 {
8992 * ha: adapter state pointer.
8993 * cmd: Local EXT_IOCTL cmd struct pointer.
8994 * mode: flags.
8995 *
8996 * Returns:
8997 * None, request status indicated in cmd->Status.
8998 *
8999 * Context:
9000 * Kernel context.
9001 *
9002 */
9003 static void
9004 ql_get_vp_cnt_id(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
9005 {
9006 ql_adapter_state_t *vha;
9007 PEXT_VPORT_ID_CNT ptmp_vp;
9008 int id = 0;
9009 int rval;
9010 char name[MAXPATHLEN];
9011
9012 QL_PRINT_9(ha, "started\n");
9013
9014 /*
9015 * To be backward compatible with older API
9016 * check for the size of old EXT_VPORT_ID_CNT
9017 */
9018 if (cmd->ResponseLen < sizeof (EXT_VPORT_ID_CNT) &&
9019 (cmd->ResponseLen != EXT_OLD_VPORT_ID_CNT_SIZE)) {
9020 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
9021 cmd->DetailStatus = sizeof (EXT_VPORT_ID_CNT);
9022 EL(ha, "failed, ResponseLen < EXT_VPORT_ID_CNT, Len=%xh\n",
9023 cmd->ResponseLen);
9024 cmd->ResponseLen = 0;
9025 return;
9026 }
9027
9028 ptmp_vp = (EXT_VPORT_ID_CNT *)
9029 kmem_zalloc(sizeof (EXT_VPORT_ID_CNT), KM_SLEEP);
9030 if (ptmp_vp == NULL) {
9031 EL(ha, "failed, kmem_zalloc\n");
9032 cmd->ResponseLen = 0;
9033 return;
9034 }
9035 vha = ha->vp_next;
9036 while (vha != NULL) {
9037 ptmp_vp->VpCnt++;
9038 ptmp_vp->VpId[id] = vha->vp_index;
9039 (void) ddi_pathname(vha->dip, name);
9040 (void) strncpy((char *)ptmp_vp->vp_path[id], name,
9041 (sizeof (ptmp_vp->vp_path[id]) -1));
9042 ptmp_vp->VpDrvInst[id] = (int32_t)vha->instance;
9043 id++;
9044 vha = vha->vp_next;
9045 }
9046 rval = ddi_copyout((void *)ptmp_vp,
9047 (void *)(uintptr_t)(cmd->ResponseAdr),
9048 cmd->ResponseLen, mode);
9049 if (rval != 0) {
9050 cmd->Status = EXT_STATUS_COPY_ERR;
9051 cmd->ResponseLen = 0;
9052 EL(ha, "failed, ddi_copyout\n");
9053 } else {
9054 cmd->ResponseLen = sizeof (EXT_VPORT_ID_CNT);
9055 QL_PRINT_9(ha, "done, vport_cnt=%d\n",
9056 ha->instance, ptmp_vp->VpCnt);
9057 }
9058 kmem_free(ptmp_vp, sizeof (EXT_VPORT_ID_CNT));
9059 }
9060
9061 /*
9062 * ql_vp_ioctl
9063 * Performs all EXT_CC_VPORT_CMD functions.
9064 *
9065 * Input:
9066 * ha: adapter state pointer.
9067 * cmd: Local EXT_IOCTL cmd struct pointer.
9068 * mode: flags.
9069 *
9070 * Returns:
9071 * None, request status indicated in cmd->Status.
9072 *
9073 * Context:
9074 * Kernel context.
9075 */
9076 static void
9077 ql_vp_ioctl(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
9078 {
9079 QL_PRINT_9(ha, "started, cmd=%d\n",
9080 cmd->SubCode);
9081
9082 /* case off on command subcode */
9083 switch (cmd->SubCode) {
9084 case EXT_VF_SC_VPORT_GETINFO:
9085 ql_qry_vport(ha, cmd, mode);
9086 break;
9087 default:
9088 /* function not supported. */
9089 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
9090 EL(ha, "failed, Unsupported Subcode=%xh\n",
9091 cmd->SubCode);
9092 break;
9093 }
9094
9095 QL_PRINT_9(ha, "done\n");
9096 }
9097
9098 /*
9099 * ql_qry_vport
9100 * Performs EXT_VF_SC_VPORT_GETINFO subfunction.
9101 *
9102 * Input:
9103 * ha: adapter state pointer.
9104 * cmd: EXT_IOCTL cmd struct pointer.
9105 * mode: flags.
9106 *
9107 * Returns:
9108 * None, request status indicated in cmd->Status.
9109 *
9110 * Context:
9111 * Kernel context.
9112 */
9113 static void
9114 ql_qry_vport(ql_adapter_state_t *vha, EXT_IOCTL *cmd, int mode)
9115 {
9116 ql_adapter_state_t *tmp_vha;
9117 EXT_VPORT_INFO tmp_vport = {0};
9118
9119 QL_PRINT_9(vha, "started\n", vha->instance);
9120
9121 if (cmd->ResponseLen < sizeof (EXT_VPORT_INFO)) {
9122 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
9123 cmd->DetailStatus = sizeof (EXT_VPORT_INFO);
9124 EL(vha, "failed, ResponseLen < EXT_VPORT_INFO, Len=%xh\n",
9125 cmd->ResponseLen);
9126 cmd->ResponseLen = 0;
9127 return;
9128 }
9129
9130 /* Fill in the vport information. */
9131 bcopy(vha->loginparams.node_ww_name.raw_wwn, tmp_vport.wwnn,
9132 EXT_DEF_WWN_NAME_SIZE);
9133 bcopy(vha->loginparams.nport_ww_name.raw_wwn, tmp_vport.wwpn,
9134 EXT_DEF_WWN_NAME_SIZE);
9135 tmp_vport.state = vha->state;
9136 tmp_vport.id = vha->vp_index;
9137
9138 tmp_vha = vha->pha->vp_next;
9139 while (tmp_vha != NULL) {
9140 tmp_vport.used++;
9141 tmp_vha = tmp_vha->vp_next;
9142 }
9143
9144 if (vha->max_vports > tmp_vport.used) {
9145 tmp_vport.free = vha->max_vports - tmp_vport.used;
9146 }
9147
9148 if (ddi_copyout((void *)&tmp_vport,
9149 (void *)(uintptr_t)(cmd->ResponseAdr),
9150 sizeof (EXT_VPORT_INFO), mode) != 0) {
9151 cmd->Status = EXT_STATUS_COPY_ERR;
9152 cmd->ResponseLen = 0;
9153 EL(vha, "failed, ddi_copyout\n");
9154 } else {
9155 cmd->ResponseLen = sizeof (EXT_VPORT_INFO);
9156 QL_PRINT_9(vha, "done\n", vha->instance);
9157 }
9158 }
9159
9160 /*
9161 * ql_access_flash
9162 * Performs all EXT_CC_ACCESS_FLASH_OS functions.
9163 *
9164 * Input:
9165 * pi: port info pointer.
9166 * cmd: Local EXT_IOCTL cmd struct pointer.
9167 * mode: flags.
9168 *
9169 * Returns:
9170 * None, request status indicated in cmd->Status.
9171 *
9172 * Context:
9173 * Kernel context.
9174 */
9175 static void
9176 ql_access_flash(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
9177 {
9178 int rval;
9179
9180 QL_PRINT_9(ha, "started\n");
9181
9182 if (CFG_IST(ha, CFG_ISP_FW_TYPE_1) &&
9183 ql_stall_driver(ha, 0) != QL_SUCCESS) {
9184 EL(ha, "ql_stall_driver failed\n");
9185 ql_restart_driver(ha);
9186 cmd->Status = EXT_STATUS_BUSY;
9187 cmd->ResponseLen = 0;
9188 return;
9189 }
9190
9191 switch (cmd->SubCode) {
9192 case EXT_SC_FLASH_READ:
9193 if ((rval = ql_flash_fcode_dump(ha,
9194 (void *)(uintptr_t)(cmd->ResponseAdr),
9195 (size_t)(cmd->ResponseLen), cmd->Reserved1, mode)) != 0) {
9196 cmd->Status = EXT_STATUS_COPY_ERR;
9197 cmd->ResponseLen = 0;
9198 EL(ha, "flash_fcode_dump status=%xh\n", rval);
9199 }
9200 break;
9201 case EXT_SC_FLASH_WRITE:
9202 if ((rval = ql_r_m_w_flash(ha,
9203 (void *)(uintptr_t)(cmd->RequestAdr),
9204 (size_t)(cmd->RequestLen), cmd->Reserved1, mode)) !=
9205 QL_SUCCESS) {
9206 cmd->Status = EXT_STATUS_COPY_ERR;
9207 cmd->ResponseLen = 0;
9208 EL(ha, "r_m_w_flash status=%xh\n", rval);
9209 } else {
9210 /* Reset caches on all adapter instances. */
9211 ql_update_flash_caches(ha);
9212 }
9213 break;
9214 default:
9215 EL(ha, "unknown subcode=%xh\n", cmd->SubCode);
9216 cmd->Status = EXT_STATUS_ERR;
9217 cmd->ResponseLen = 0;
9218 break;
9219 }
9220
9221 /* Resume I/O */
9222 if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
9223 EL(ha, "isp_abort_needed for restart\n");
9224 ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED,
9225 DRIVER_STALL);
9226 }
9227
9228 QL_PRINT_9(ha, "done\n");
9229 }
9230
9231 /*
9232 * ql_reset_cmd
9233 * Performs all EXT_CC_RESET_FW_OS functions.
9234 *
9235 * Input:
9236 * ha: adapter state pointer.
9237 * cmd: Local EXT_IOCTL cmd struct pointer.
9238 *
9239 * Returns:
9240 * None, request status indicated in cmd->Status.
9241 *
9242 * Context:
9243 * Kernel context.
9244 */
9245 static void
9246 ql_reset_cmd(ql_adapter_state_t *ha, EXT_IOCTL *cmd)
9247 {
9248 uint8_t timer;
9249
9250 QL_PRINT_9(ha, "started\n");
9251
9252 switch (cmd->SubCode) {
9253 case EXT_SC_RESET_FC_FW:
9254 if (CFG_IST(ha, CFG_CTRL_82XX)) {
9255 (void) ql_8021_reset_fw(ha);
9256 } else {
9257 EL(ha, "isp_abort_needed\n");
9258 ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, 0);
9259 }
9260 for (timer = 180; timer; timer--) {
9261 ql_awaken_task_daemon(ha, NULL, 0, 0);
9262 /* Delay for 1 second. */
9263 delay(100);
9264 if (!(ha->task_daemon_flags & (ISP_ABORT_NEEDED |
9265 ABORT_ISP_ACTIVE | LOOP_RESYNC_NEEDED |
9266 LOOP_RESYNC_ACTIVE))) {
9267 break;
9268 }
9269 }
9270 break;
9271 case EXT_SC_RESET_MPI_FW:
9272 if (!(CFG_IST(ha, CFG_CTRL_8081))) {
9273 EL(ha, "invalid request for HBA\n");
9274 cmd->Status = EXT_STATUS_INVALID_REQUEST;
9275 cmd->ResponseLen = 0;
9276 } else {
9277 ADAPTER_STATE_LOCK(ha);
9278 ha->flags |= DISABLE_NIC_FW_DMP;
9279 ADAPTER_STATE_UNLOCK(ha);
9280
9281 /* Wait for I/O to stop and daemon to stall. */
9282 if (ql_suspend_hba(ha, 0) != QL_SUCCESS) {
9283 EL(ha, "ql_suspend_hba failed\n");
9284 cmd->Status = EXT_STATUS_BUSY;
9285 cmd->ResponseLen = 0;
9286 } else if (ql_restart_mpi(ha) != QL_SUCCESS) {
9287 cmd->Status = EXT_STATUS_ERR;
9288 cmd->ResponseLen = 0;
9289 } else {
9290 /*
9291 * While the restart_mpi mailbox cmd may be
9292 * done the MPI is not. Wait at least 6 sec. or
9293 * exit if the loop comes up.
9294 */
9295 for (timer = 6; timer; timer--) {
9296 if (!(ha->task_daemon_flags &
9297 LOOP_DOWN)) {
9298 break;
9299 }
9300 /* Delay for 1 second. */
9301 ql_delay(ha, 1000000);
9302 }
9303 }
9304 ql_restart_hba(ha);
9305
9306 ADAPTER_STATE_LOCK(ha);
9307 ha->flags &= ~DISABLE_NIC_FW_DMP;
9308 ADAPTER_STATE_UNLOCK(ha);
9309 }
9310 break;
9311 default:
9312 EL(ha, "unknown subcode=%xh\n", cmd->SubCode);
9313 cmd->Status = EXT_STATUS_ERR;
9314 cmd->ResponseLen = 0;
9315 break;
9316 }
9317
9318 QL_PRINT_9(ha, "done\n");
9319 }
9320
9321 /*
9322 * ql_get_dcbx_parameters
9323 * Get DCBX parameters.
9324 *
9325 * Input:
9326 * ha: adapter state pointer.
9327 * cmd: User space CT arguments pointer.
9328 * mode: flags.
9329 */
9330 static void
9331 ql_get_dcbx_parameters(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
9332 {
9333 uint8_t *tmp_buf;
9334 int rval;
9335
9336 QL_PRINT_9(ha, "started\n");
9337
9338 if (!(CFG_IST(ha, CFG_FCOE_SUPPORT))) {
9339 EL(ha, "invalid request for HBA\n");
9340 cmd->Status = EXT_STATUS_INVALID_REQUEST;
9341 cmd->ResponseLen = 0;
9342 return;
9343 }
9344
9345 /* Allocate memory for command. */
9346 tmp_buf = kmem_zalloc(EXT_DEF_DCBX_PARAM_BUF_SIZE, KM_SLEEP);
9347 if (tmp_buf == NULL) {
9348 EL(ha, "failed, kmem_zalloc\n");
9349 cmd->Status = EXT_STATUS_NO_MEMORY;
9350 cmd->ResponseLen = 0;
9351 return;
9352 }
9353 /* Send command */
9354 rval = ql_get_dcbx_params(ha, EXT_DEF_DCBX_PARAM_BUF_SIZE,
9355 (caddr_t)tmp_buf);
9356 if (rval != QL_SUCCESS) {
9357 /* error */
9358 EL(ha, "failed, get_dcbx_params_mbx=%xh\n", rval);
9359 kmem_free(tmp_buf, EXT_DEF_DCBX_PARAM_BUF_SIZE);
9360 cmd->Status = EXT_STATUS_ERR;
9361 cmd->ResponseLen = 0;
9362 return;
9363 }
9364
9365 /* Copy the response */
9366 if (ql_send_buffer_data((caddr_t)tmp_buf,
9367 (caddr_t)(uintptr_t)cmd->ResponseAdr,
9368 EXT_DEF_DCBX_PARAM_BUF_SIZE, mode) != EXT_DEF_DCBX_PARAM_BUF_SIZE) {
9369 EL(ha, "failed, ddi_copyout\n");
9370 cmd->Status = EXT_STATUS_COPY_ERR;
9371 cmd->ResponseLen = 0;
9372 } else {
9373 cmd->ResponseLen = EXT_DEF_DCBX_PARAM_BUF_SIZE;
9374 QL_PRINT_9(ha, "done\n");
9375 }
9376 kmem_free(tmp_buf, EXT_DEF_DCBX_PARAM_BUF_SIZE);
9377
9378 }
9379
9380 /*
9381 * ql_qry_cna_port
9382 * Performs EXT_SC_QUERY_CNA_PORT subfunction.
9383 *
9384 * Input:
9385 * ha: adapter state pointer.
9386 * cmd: EXT_IOCTL cmd struct pointer.
9387 * mode: flags.
9388 *
9389 * Returns:
9390 * None, request status indicated in cmd->Status.
9391 *
9392 * Context:
9393 * Kernel context.
9394 */
9395 static void
9396 ql_qry_cna_port(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
9397 {
9398 EXT_CNA_PORT cna_port = {0};
9399
9400 QL_PRINT_9(ha, "started\n");
9401
9402 if (!(CFG_IST(ha, CFG_FCOE_SUPPORT))) {
9403 EL(ha, "invalid request for HBA\n");
9404 cmd->Status = EXT_STATUS_INVALID_REQUEST;
9405 cmd->ResponseLen = 0;
9406 return;
9407 }
9408
9409 if (cmd->ResponseLen < sizeof (EXT_CNA_PORT)) {
9410 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
9411 cmd->DetailStatus = sizeof (EXT_CNA_PORT);
9412 EL(ha, "failed, ResponseLen < EXT_CNA_PORT, Len=%xh\n",
9413 cmd->ResponseLen);
9414 cmd->ResponseLen = 0;
9415 return;
9416 }
9417
9418 cna_port.VLanId = ha->fcoe_vlan_id;
9419 cna_port.FabricParam = ha->fabric_params;
9420 bcopy(ha->fcoe_vnport_mac, cna_port.VNPortMACAddress,
9421 EXT_DEF_MAC_ADDRESS_SIZE);
9422
9423 if (ddi_copyout((void *)&cna_port,
9424 (void *)(uintptr_t)(cmd->ResponseAdr),
9425 sizeof (EXT_CNA_PORT), mode) != 0) {
9426 cmd->Status = EXT_STATUS_COPY_ERR;
9427 cmd->ResponseLen = 0;
9428 EL(ha, "failed, ddi_copyout\n");
9429 } else {
9430 cmd->ResponseLen = sizeof (EXT_CNA_PORT);
9431 QL_PRINT_9(ha, "done\n");
9432 }
9433 }
9434
9435 /*
9436 * ql_qry_adapter_versions
9437 * Performs EXT_SC_QUERY_ADAPTER_VERSIONS subfunction.
9438 *
9439 * Input:
9440 * ha: adapter state pointer.
9441 * cmd: EXT_IOCTL cmd struct pointer.
9442 * mode: flags.
9443 *
9444 * Returns:
9445 * None, request status indicated in cmd->Status.
9446 *
9447 * Context:
9448 * Kernel context.
9449 */
9450 static void
9451 ql_qry_adapter_versions(ql_adapter_state_t *ha, EXT_IOCTL *cmd,
9452 int mode)
9453 {
9454 uint8_t is_8142, mpi_cap;
9455 uint32_t ver_len, transfer_size;
9456 PEXT_ADAPTERREGIONVERSION padapter_ver = NULL;
9457
9458 QL_PRINT_9(ha, "started\n");
9459
9460 /* 8142s do not have a EDC PHY firmware. */
9461 mpi_cap = (uint8_t)(ha->mpi_capability_list >> 8);
9462
9463 is_8142 = 0;
9464 /* Sizeof (Length + Reserved) = 8 Bytes */
9465 if (mpi_cap == 0x02 || mpi_cap == 0x04) {
9466 ver_len = (sizeof (EXT_REGIONVERSION) * (NO_OF_VERSIONS - 1))
9467 + 8;
9468 is_8142 = 1;
9469 } else {
9470 ver_len = (sizeof (EXT_REGIONVERSION) * NO_OF_VERSIONS) + 8;
9471 }
9472
9473 /* Allocate local memory for EXT_ADAPTERREGIONVERSION */
9474 padapter_ver = (EXT_ADAPTERREGIONVERSION *)kmem_zalloc(ver_len,
9475 KM_SLEEP);
9476
9477 if (padapter_ver == NULL) {
9478 EL(ha, "failed, kmem_zalloc\n");
9512 EL(ha, "failed, ResponseLen < ver_len, ",
9513 "RespLen=%xh ver_len=%xh\n", cmd->ResponseLen, ver_len);
9514 /* Calculate the No. of valid versions being returned. */
9515 padapter_ver->Length = (uint32_t)
9516 ((cmd->ResponseLen - 8) / sizeof (EXT_REGIONVERSION));
9517 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
9518 cmd->DetailStatus = ver_len;
9519 transfer_size = cmd->ResponseLen;
9520 } else {
9521 transfer_size = ver_len;
9522 }
9523
9524 if (ddi_copyout((void *)padapter_ver,
9525 (void *)(uintptr_t)(cmd->ResponseAdr),
9526 transfer_size, mode) != 0) {
9527 cmd->Status = EXT_STATUS_COPY_ERR;
9528 cmd->ResponseLen = 0;
9529 EL(ha, "failed, ddi_copyout\n");
9530 } else {
9531 cmd->ResponseLen = ver_len;
9532 QL_PRINT_9(ha, "done\n");
9533 }
9534
9535 kmem_free(padapter_ver, ver_len);
9536 }
9537
9538 /*
9539 * ql_get_xgmac_statistics
9540 * Get XgMac information
9541 *
9542 * Input:
9543 * ha: adapter state pointer.
9544 * cmd: EXT_IOCTL cmd struct pointer.
9545 * mode: flags.
9546 *
9547 * Returns:
9548 * None, request status indicated in cmd->Status.
9549 *
9550 * Context:
9551 * Kernel context.
9552 */
9553 static void
9554 ql_get_xgmac_statistics(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
9555 {
9556 int rval;
9557 uint32_t size;
9558 int8_t *tmp_buf;
9559 EXT_MENLO_MANAGE_INFO info;
9560
9561 QL_PRINT_9(ha, "started\n");
9562
9563 /* Verify the size of request structure. */
9564 if (cmd->RequestLen < sizeof (EXT_MENLO_MANAGE_INFO)) {
9565 /* Return error */
9566 EL(ha, "RequestLen=%d < %d\n", cmd->RequestLen,
9567 sizeof (EXT_MENLO_MANAGE_INFO));
9568 cmd->Status = EXT_STATUS_INVALID_PARAM;
9569 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
9570 cmd->ResponseLen = 0;
9571 return;
9572 }
9573
9574 /* Get manage info request. */
9575 if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr,
9576 (caddr_t)&info, sizeof (EXT_MENLO_MANAGE_INFO), mode) != 0) {
9577 EL(ha, "failed, ddi_copyin\n");
9578 cmd->Status = EXT_STATUS_COPY_ERR;
9579 cmd->ResponseLen = 0;
9580 return;
9581 }
9608 }
9609
9610 rval = ql_get_xgmac_stats(ha, size, (caddr_t)tmp_buf);
9611
9612 if (rval != QL_SUCCESS) {
9613 /* error */
9614 EL(ha, "failed, get_xgmac_stats =%xh\n", rval);
9615 kmem_free(tmp_buf, size);
9616 cmd->Status = EXT_STATUS_ERR;
9617 cmd->ResponseLen = 0;
9618 return;
9619 }
9620
9621 if (ql_send_buffer_data(tmp_buf, (caddr_t)(uintptr_t)info.pDataBytes,
9622 size, mode) != size) {
9623 EL(ha, "failed, ddi_copyout\n");
9624 cmd->Status = EXT_STATUS_COPY_ERR;
9625 cmd->ResponseLen = 0;
9626 } else {
9627 cmd->ResponseLen = info.TotalByteCount;
9628 QL_PRINT_9(ha, "done\n");
9629 }
9630 kmem_free(tmp_buf, size);
9631 QL_PRINT_9(ha, "done\n");
9632 }
9633
9634 /*
9635 * ql_get_fcf_list
9636 * Get FCF list.
9637 *
9638 * Input:
9639 * ha: adapter state pointer.
9640 * cmd: User space CT arguments pointer.
9641 * mode: flags.
9642 */
9643 static void
9644 ql_get_fcf_list(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
9645 {
9646 uint8_t *tmp_buf;
9647 int rval;
9648 EXT_FCF_LIST fcf_list = {0};
9649 ql_fcf_list_desc_t mb_fcf_list = {0};
9650
9651 QL_PRINT_9(ha, "started\n");
9652
9653 if (!(CFG_IST(ha, CFG_FCOE_SUPPORT))) {
9654 EL(ha, "invalid request for HBA\n");
9655 cmd->Status = EXT_STATUS_INVALID_REQUEST;
9656 cmd->ResponseLen = 0;
9657 return;
9658 }
9659 /* Get manage info request. */
9660 if (ddi_copyin((caddr_t)(uintptr_t)cmd->RequestAdr,
9661 (caddr_t)&fcf_list, sizeof (EXT_FCF_LIST), mode) != 0) {
9662 EL(ha, "failed, ddi_copyin\n");
9663 cmd->Status = EXT_STATUS_COPY_ERR;
9664 cmd->ResponseLen = 0;
9665 return;
9666 }
9667
9668 if (!(fcf_list.BufSize)) {
9669 /* Return error */
9670 EL(ha, "failed, fcf_list BufSize is=%xh\n",
9671 fcf_list.BufSize);
9672 cmd->Status = EXT_STATUS_INVALID_PARAM;
9673 cmd->ResponseLen = 0;
9693 /* Send command */
9694 rval = ql_get_fcf_list_mbx(ha, &mb_fcf_list, (caddr_t)tmp_buf);
9695 if (rval != QL_SUCCESS) {
9696 /* error */
9697 EL(ha, "failed, get_fcf_list_mbx=%xh\n", rval);
9698 kmem_free(tmp_buf, fcf_list.BufSize);
9699 cmd->Status = EXT_STATUS_ERR;
9700 cmd->ResponseLen = 0;
9701 return;
9702 }
9703
9704 /* Copy the response */
9705 if (ql_send_buffer_data((caddr_t)tmp_buf,
9706 (caddr_t)(uintptr_t)cmd->ResponseAdr,
9707 fcf_list.BufSize, mode) != fcf_list.BufSize) {
9708 EL(ha, "failed, ddi_copyout\n");
9709 cmd->Status = EXT_STATUS_COPY_ERR;
9710 cmd->ResponseLen = 0;
9711 } else {
9712 cmd->ResponseLen = mb_fcf_list.buffer_size;
9713 QL_PRINT_9(ha, "done\n");
9714 }
9715
9716 kmem_free(tmp_buf, fcf_list.BufSize);
9717 }
9718
9719 /*
9720 * ql_get_resource_counts
9721 * Get Resource counts:
9722 *
9723 * Input:
9724 * ha: adapter state pointer.
9725 * cmd: User space CT arguments pointer.
9726 * mode: flags.
9727 */
9728 static void
9729 ql_get_resource_counts(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
9730 {
9731 int rval;
9732 ql_mbx_data_t mr;
9733 EXT_RESOURCE_CNTS tmp_rc_cnt = {0};
9734
9735 QL_PRINT_9(ha, "started\n");
9736
9737 if (cmd->ResponseLen < sizeof (EXT_RESOURCE_CNTS)) {
9738 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
9739 cmd->DetailStatus = sizeof (EXT_RESOURCE_CNTS);
9740 EL(ha, "failed, ResponseLen < EXT_RESOURCE_CNTS, "
9741 "Len=%xh\n", cmd->ResponseLen);
9742 cmd->ResponseLen = 0;
9743 return;
9744 }
9745
9746 rval = ql_get_resource_cnts(ha, &mr);
9747 if (rval != QL_SUCCESS) {
9748 EL(ha, "resource cnt mbx failed\n");
9749 cmd->Status = EXT_STATUS_ERR;
9750 cmd->ResponseLen = 0;
9751 return;
9752 }
9753
9754 tmp_rc_cnt.OrgTgtXchgCtrlCnt = (uint32_t)mr.mb[1];
9755 tmp_rc_cnt.CurTgtXchgCtrlCnt = (uint32_t)mr.mb[2];
9756 tmp_rc_cnt.CurXchgCtrlCnt = (uint32_t)mr.mb[3];
9757 tmp_rc_cnt.OrgXchgCtrlCnt = (uint32_t)mr.mb[6];
9758 tmp_rc_cnt.CurIocbBufCnt = (uint32_t)mr.mb[7];
9759 tmp_rc_cnt.OrgIocbBufCnt = (uint32_t)mr.mb[10];
9760 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
9761 tmp_rc_cnt.NoOfSupVPs = (uint32_t)mr.mb[11];
9762 }
9763 if (CFG_IST(ha, CFG_FCOE_SUPPORT)) {
9764 tmp_rc_cnt.NoOfSupFCFs = (uint32_t)mr.mb[12];
9765 }
9766
9767 rval = ddi_copyout((void *)&tmp_rc_cnt,
9768 (void *)(uintptr_t)(cmd->ResponseAdr),
9769 sizeof (EXT_RESOURCE_CNTS), mode);
9770 if (rval != 0) {
9771 cmd->Status = EXT_STATUS_COPY_ERR;
9772 cmd->ResponseLen = 0;
9773 EL(ha, "failed, ddi_copyout\n");
9774 } else {
9775 cmd->ResponseLen = sizeof (EXT_RESOURCE_CNTS);
9776 QL_PRINT_9(ha, "done\n");
9777 }
9778 }
9779
9780 /*
9781 * ql_get_temperature
9782 * Get ASIC temperature data
9783 *
9784 * Input:
9785 * ha: adapter state pointer.
9786 * cmd: EXT_IOCTL cmd struct pointer.
9787 * mode: flags
9788 *
9789 * Returns:
9790 * None, request status indicated in cmd->Status.
9791 *
9792 * Context:
9793 * Kernel context.
9794 */
9795 static void
9796 ql_get_temperature(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
9797 {
9798 ql_mbx_data_t mr;
9799 int rval = 0;
9800 EXT_BOARD_TEMP board_temp = {0};
9801
9802 QL_PRINT_9(ha, "started\n");
9803
9804 if (!(ha->fw_ext_attributes & TEMP_SUPPORT_ISP)) {
9805 EL(ha, "invalid request for HBA\n");
9806 cmd->Status = EXT_STATUS_INVALID_REQUEST;
9807 cmd->ResponseLen = 0;
9808 return;
9809 }
9810
9811 if (cmd->ResponseLen < sizeof (EXT_BOARD_TEMP)) {
9812 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
9813 cmd->DetailStatus = sizeof (EXT_BOARD_TEMP);
9814 EL(ha, "failed, ResponseLen < EXT_BOARD_TEMP, "
9815 "Len=%xh \n", cmd->ResponseLen);
9816 cmd->ResponseLen = 0;
9817 return;
9818 }
9819
9820 switch (cmd->SubCode) {
9821 case EXT_SC_GET_BOARD_TEMP:
9822 rval = ql_get_temp(ha, &mr);
9823 if (rval != QL_SUCCESS) {
9824 /* error */
9825 EL(ha, "failed, get_temperature_mbx=%xh\n", rval);
9826 cmd->Status = EXT_STATUS_ERR;
9827 cmd->ResponseLen = 0;
9828 break;
9829 }
9830 board_temp.IntTemp = mr.mb[1];
9831
9832 rval = ddi_copyout((void *)&board_temp,
9833 (void *)(uintptr_t)(cmd->ResponseAdr),
9834 sizeof (EXT_BOARD_TEMP), mode);
9835 if (rval != 0) {
9836 cmd->Status = EXT_STATUS_COPY_ERR;
9837 cmd->ResponseLen = 0;
9838 EL(ha, "failed, ddi_copyout\n");
9839 } else {
9840 cmd->ResponseLen = sizeof (EXT_BOARD_TEMP);
9841 }
9842 break;
9843 default:
9844 EL(ha, "unknown subcode=%xh\n", cmd->SubCode);
9845 cmd->Status = EXT_STATUS_ERR;
9846 cmd->ResponseLen = 0;
9847 break;
9848 }
9849
9850 QL_PRINT_9(ha, "done\n");
9851 }
9852
9853 /*
9854 * ql_dump_cmd
9855 * Performs all EXT_CC_DUMP_OS functions.
9856 *
9857 * Input:
9858 * ha: adapter state pointer.
9859 * cmd: Local EXT_IOCTL cmd struct pointer.
9860 *
9861 * Returns:
9862 * None, request status indicated in cmd->Status.
9863 *
9864 * Context:
9865 * Kernel context.
9866 */
9867 static void
9868 ql_dump_cmd(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
9869 {
9870 caddr_t dump;
9871 uint32_t sdm_valid_dump = 0;
9872 int rval = 0;
9873
9874 QL_PRINT_9(ha, "started\n");
9875
9876 if (ha->ql_dump_state & QL_DUMP_VALID &&
9877 !(ha->ql_dump_state & QL_DUMP_UPLOADED) &&
9878 ha->ql_dump_state != NULL) {
9879 sdm_valid_dump = 1;
9880 } else {
9881 EL(ha, "dump does not exist for instance %d (%x, %p)\n",
9882 ha->instance, ha->ql_dump_state, ha->ql_dump_ptr);
9883 }
9884
9885 cmd->Status = EXT_STATUS_OK;
9886 cmd->DetailStatus = 0;
9887
9888 switch (cmd->SubCode) {
9889 case EXT_SC_DUMP_SIZE:
9890 cmd->ResponseLen = 0;
9891 if (sdm_valid_dump) {
9892 cmd->DetailStatus = ha->risc_dump_size;
9893 }
9894 break;
9895 case EXT_SC_DUMP_READ:
9896 if (!sdm_valid_dump) {
9897 cmd->Status = EXT_STATUS_INVALID_REQUEST;
9898 cmd->ResponseLen = 0;
9899 break;
9900 }
9901
9902 if (cmd->ResponseLen < ha->risc_dump_size) {
9903 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
9904 cmd->DetailStatus = ha->risc_dump_size;
9905 EL(ha, "failed, ResponseLen < %x, "
9906 "Len=%xh\n", ha->risc_dump_size,
9907 cmd->ResponseLen);
9908 break;
9909 }
9910
9911 ADAPTER_STATE_LOCK(ha);
9912 ha->flags |= DISABLE_NIC_FW_DMP;
9913 ADAPTER_STATE_UNLOCK(ha);
9914
9915 QL_DUMP_LOCK(ha);
9916
9917 dump = kmem_zalloc(ha->risc_dump_size, KM_SLEEP);
9918 cmd->ResponseLen = (uint32_t)ql_ascii_fw_dump(ha, dump);
9919
9920 if ((rval = ddi_copyout((void *)dump,
9921 (void *)(uintptr_t)(cmd->ResponseAdr), cmd->ResponseLen,
9922 mode)) != 0) {
9923 ha->ql_dump_state &= ~QL_DUMP_UPLOADED;
9924 EL(ha, "failed, ddi_copyout\n");
9925 cmd->Status = EXT_STATUS_COPY_ERR;
9926 cmd->ResponseLen = 0;
9927 } else {
9928 ha->ql_dump_state |= QL_DUMP_UPLOADED;
9929 }
9930
9931 kmem_free(dump, ha->risc_dump_size);
9932
9933 QL_DUMP_UNLOCK(ha);
9934
9935 ADAPTER_STATE_LOCK(ha);
9936 ha->flags &= ~DISABLE_NIC_FW_DMP;
9937 ADAPTER_STATE_UNLOCK(ha);
9938 break;
9939 case EXT_SC_DUMP_TRIGGER:
9940 cmd->ResponseLen = 0;
9941
9942 ADAPTER_STATE_LOCK(ha);
9943 ha->flags |= DISABLE_NIC_FW_DMP;
9944 ADAPTER_STATE_UNLOCK(ha);
9945
9946 if (sdm_valid_dump) {
9947 cmd->Status = EXT_STATUS_INVALID_REQUEST;
9948 EL(ha, "Existing dump file needs to be retrieved.\n");
9949 } else {
9950 rval = ql_dump_firmware(ha);
9951
9952 if (rval != QL_SUCCESS && rval != QL_DATA_EXISTS) {
9953 cmd->Status = EXT_STATUS_ERR;
9954 }
9955 }
9956
9957 ADAPTER_STATE_LOCK(ha);
9958 ha->flags &= ~DISABLE_NIC_FW_DMP;
9959 ADAPTER_STATE_UNLOCK(ha);
9960 break;
9961 default:
9962 EL(ha, "unknown subcode=%xh\n", cmd->SubCode);
9963 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
9964 cmd->ResponseLen = 0;
9965 break;
9966 }
9967
9968 QL_PRINT_9(ha, "done\n");
9969 }
9970
9971 /*
9972 * ql_serdes_reg
9973 * Performs all EXT_CC_SERDES_REG_OP functions.
9974 *
9975 * Input:
9976 * ha: adapter state pointer.
9977 * cmd: EXT_IOCTL cmd struct pointer.
9978 * mode: flags
9979 *
9980 * Returns:
9981 * None, request status indicated in cmd->Status.
9982 *
9983 * Context:
9984 * Kernel context.
9985 */
9986 static void
9987 ql_serdes_reg(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
9988 {
9989 ql_mbx_data_t mr = {0};
9990 int rval = 0;
9991 EXT_SERDES_REG serdes_reg = {0};
9992
9993 QL_PRINT_9(ha, "started\n");
9994
9995 /* Check if request valid for HBA */
9996 if (!(CFG_IST(ha, CFG_SERDES_SUPPORT))) {
9997 EL(ha, "invalid request for HBA\n");
9998 cmd->Status = EXT_STATUS_INVALID_REQUEST;
9999 cmd->ResponseLen = 0;
10000 return;
10001 }
10002
10003 /* Copy in the request structure. */
10004 if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
10005 (void *)&serdes_reg, sizeof (EXT_SERDES_REG), mode) != 0) {
10006 EL(ha, "failed, ddi_copyin\n");
10007 cmd->Status = EXT_STATUS_COPY_ERR;
10008 cmd->ResponseLen = 0;
10009 return;
10010 }
10011
10012 switch (cmd->SubCode) {
10013 case EXT_SC_WRITE_SERDES_REG:
10014 mr.mb[1] = serdes_reg.addr;
10015 mr.mb[2] = LSB(serdes_reg.val);
10016 mr.mb[3] = 0;
10017 mr.mb[4] = MSB(serdes_reg.val);
10018 if ((rval = ql_write_serdes(ha, &mr)) != QL_SUCCESS) {
10019 /* error */
10020 EL(ha, "failed, write_serdes_mbx=%xh\n", rval);
10021 cmd->Status = EXT_STATUS_ERR;
10022 cmd->ResponseLen = 0;
10023 break;
10024 } else {
10025 cmd->Status = EXT_STATUS_OK;
10026 }
10027 break;
10028 case EXT_SC_READ_SERDES_REG:
10029 /* Verify the size of response structure. */
10030 if (cmd->ResponseLen < sizeof (EXT_SERDES_REG)) {
10031 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
10032 cmd->DetailStatus = sizeof (EXT_SERDES_REG);
10033 EL(ha, "failed, ResponseLen < EXT_SERDES_REG, "
10034 "Len=%xh \n", cmd->ResponseLen);
10035 cmd->ResponseLen = 0;
10036 break;
10037 }
10038 mr.mb[1] = serdes_reg.addr;
10039 if ((rval = ql_read_serdes(ha, &mr)) != QL_SUCCESS) {
10040 /* error */
10041 EL(ha, "failed, read_serdes_mbx=%xh\n", rval);
10042 cmd->Status = EXT_STATUS_ERR;
10043 cmd->ResponseLen = 0;
10044 break;
10045 }
10046 serdes_reg.val = CHAR_TO_SHORT(LSB(mr.mb[1]), LSB(mr.mb[2]));
10047 /* Copy back the response data */
10048 if (ddi_copyout((void *)&serdes_reg,
10049 (void *)(uintptr_t)(cmd->ResponseAdr),
10050 sizeof (EXT_SERDES_REG), mode) != 0) {
10051 cmd->Status = EXT_STATUS_COPY_ERR;
10052 cmd->ResponseLen = 0;
10053 EL(ha, "failed, ddi_copyout\n");
10054 } else {
10055 cmd->Status = EXT_STATUS_OK;
10056 cmd->ResponseLen = sizeof (EXT_SERDES_REG);
10057 }
10058 break;
10059 default:
10060 /* Subcode not supported. */
10061 EL(ha, "unknown subcode=%xh\n", cmd->SubCode);
10062 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
10063 cmd->ResponseLen = 0;
10064 break;
10065 }
10066
10067 QL_PRINT_9(ha, "done\n");
10068 }
10069
10070 /*
10071 * ql_serdes_reg_ex
10072 * Performs all EXT_CC_SERDES_REG_OP_EX functions.
10073 *
10074 * Input:
10075 * ha: adapter state pointer.
10076 * cmd: EXT_IOCTL cmd struct pointer.
10077 * mode: flags
10078 *
10079 * Returns:
10080 * None, request status indicated in cmd->Status.
10081 *
10082 * Context:
10083 * Kernel context.
10084 */
10085 static void
10086 ql_serdes_reg_ex(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
10087 {
10088 ql_mbx_data_t mr = {0};
10089 int rval = 0;
10090 EXT_SERDES_REG_EX serdes_reg_ex = {0};
10091
10092 QL_PRINT_9(ha, "started\n");
10093
10094 /* Check if request valid for HBA */
10095 if (!(CFG_IST(ha, CFG_SERDES_SUPPORT))) {
10096 EL(ha, "invalid request for HBA\n");
10097 cmd->Status = EXT_STATUS_INVALID_REQUEST;
10098 cmd->ResponseLen = 0;
10099 return;
10100 }
10101
10102 /* Copy in the request structure. */
10103 if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
10104 (void *)&serdes_reg_ex, sizeof (EXT_SERDES_REG_EX), mode) != 0) {
10105 EL(ha, "failed, ddi_copyin\n");
10106 cmd->Status = EXT_STATUS_COPY_ERR;
10107 cmd->ResponseLen = 0;
10108 return;
10109 }
10110
10111 switch (cmd->SubCode) {
10112 case EXT_SC_WRITE_SERDES_REG:
10113 mr.mb[3] = LSW(serdes_reg_ex.addr);
10114 mr.mb[4] = MSW(serdes_reg_ex.addr);
10115 mr.mb[5] = LSW(serdes_reg_ex.val);
10116 mr.mb[6] = MSW(serdes_reg_ex.val);
10117 if ((rval = ql_write_serdes(ha, &mr)) != QL_SUCCESS) {
10118 /* error */
10119 EL(ha, "failed, write_serdes_mbx=%xh\n", rval);
10120 cmd->Status = EXT_STATUS_ERR;
10121 cmd->ResponseLen = 0;
10122 break;
10123 } else {
10124 cmd->Status = EXT_STATUS_OK;
10125 }
10126 break;
10127 case EXT_SC_READ_SERDES_REG:
10128 /* Verify the size of response structure. */
10129 if (cmd->ResponseLen < sizeof (EXT_SERDES_REG_EX)) {
10130 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
10131 cmd->DetailStatus = sizeof (EXT_SERDES_REG_EX);
10132 EL(ha, "failed, ResponseLen < EXT_SERDES_REG_EX, "
10133 "Len=%xh\n", cmd->ResponseLen);
10134 cmd->ResponseLen = 0;
10135 break;
10136 }
10137 mr.mb[3] = LSW(serdes_reg_ex.addr);
10138 mr.mb[4] = MSW(serdes_reg_ex.addr);
10139 if ((rval = ql_read_serdes(ha, &mr)) != QL_SUCCESS) {
10140 /* error */
10141 EL(ha, "failed, read_serdes_mbx=%xh\n", rval);
10142 cmd->Status = EXT_STATUS_ERR;
10143 cmd->ResponseLen = 0;
10144 break;
10145 }
10146 serdes_reg_ex.val = SHORT_TO_LONG(mr.mb[1], mr.mb[2]);
10147 /* Copy back the response data */
10148 if (ddi_copyout((void *)&serdes_reg_ex,
10149 (void *)(uintptr_t)(cmd->ResponseAdr),
10150 sizeof (EXT_SERDES_REG_EX), mode) != 0) {
10151 cmd->Status = EXT_STATUS_COPY_ERR;
10152 cmd->ResponseLen = 0;
10153 EL(ha, "failed, ddi_copyout\n");
10154 } else {
10155 cmd->Status = EXT_STATUS_OK;
10156 cmd->ResponseLen = sizeof (EXT_SERDES_REG_EX);
10157 }
10158 break;
10159 default:
10160 /* Subcode not supported. */
10161 EL(ha, "unknown subcode=%xh\n", cmd->SubCode);
10162 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
10163 cmd->ResponseLen = 0;
10164 break;
10165 }
10166
10167 QL_PRINT_9(ha, "done\n");
10168 }
10169
10170 /*
10171 * ql_els_passthru
10172 * IOCTL for extended link service passthru command.
10173 *
10174 * Input:
10175 * ha: adapter state pointer.
10176 * cmd: User space CT arguments pointer.
10177 * mode: flags.
10178 *
10179 * Returns:
10180 * None, request status indicated in cmd->Status.
10181 *
10182 * Context:
10183 * Kernel context.
10184 */
10185 static void
10186 ql_els_passthru(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
10187 {
10188 ql_mbx_iocb_t *pkt;
10189 dma_mem_t *dma_mem;
10190 caddr_t bp, pld;
10191 uint32_t pkt_size, pld_byte_cnt, cmd_size, *long_ptr;
10192 EXT_ELS_PT_REQ *pt_req;
10193 boolean_t local_hba = B_FALSE;
10194 ql_tgt_t *tq = NULL;
10195 port_id_t tmp_fcid;
10196 int rval;
10197 uint16_t comp_status;
10198
10199 QL_PRINT_9(ha, "started\n");
10200
10201 if (DRIVER_SUSPENDED(ha)) {
10202 EL(ha, "failed, LOOP_NOT_READY\n");
10203 cmd->Status = EXT_STATUS_BUSY;
10204 cmd->ResponseLen = 0;
10205 return;
10206 }
10207
10208 if (cmd->RequestLen < sizeof (EXT_ELS_PT_REQ)) {
10209 /* parameter error */
10210 EL(ha, "failed, RequestLen < EXT_ELS_PT_REQ, Len=%xh\n",
10211 cmd->RequestLen);
10212 cmd->Status = EXT_STATUS_INVALID_PARAM;
10213 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
10214 cmd->ResponseLen = 0;
10215 return;
10216 }
10217
10218 /* Allocate memory for command. */
10219 bp = kmem_zalloc(cmd->RequestLen, KM_SLEEP);
10220
10221 if (ddi_copyin((void*)(uintptr_t)cmd->RequestAdr,
10222 bp, cmd->RequestLen, mode) != 0) {
10223 EL(ha, "failed, ddi_copyin\n");
10224 kmem_free(bp, cmd->RequestLen);
10225 cmd->Status = EXT_STATUS_COPY_ERR;
10226 cmd->ResponseLen = 0;
10227 return;
10228 }
10229 pt_req = (EXT_ELS_PT_REQ *)bp;
10230
10231 QL_PRINT_9(ha, "EXT_ELS_PT_REQ\n");
10232 QL_DUMP_9((uint8_t *)pt_req, 8, sizeof (EXT_ELS_PT_REQ));
10233
10234 /* Find loop ID of the device */
10235 if (pt_req->ValidMask & EXT_DEF_WWPN_VALID) {
10236 if (bcmp(ha->loginparams.nport_ww_name.raw_wwn, pt_req->WWPN,
10237 EXT_DEF_WWN_NAME_SIZE) == 0) {
10238 local_hba = B_TRUE;
10239 } else {
10240 tq = ql_find_port(ha, pt_req->WWPN, QLNT_PORT);
10241 }
10242 } else if (pt_req->ValidMask & EXT_DEF_PID_VALID) {
10243 /*
10244 * Copy caller's d_id to tmp space.
10245 */
10246 bcopy(&pt_req->Id[1], tmp_fcid.r.d_id,
10247 EXT_DEF_PORTID_SIZE_ACTUAL);
10248 BIG_ENDIAN_24(&tmp_fcid.r.d_id[0]);
10249
10250 if (bcmp((void *)&ha->d_id, (void *)tmp_fcid.r.d_id,
10251 EXT_DEF_PORTID_SIZE_ACTUAL) == 0) {
10252 local_hba = B_TRUE;
10253 } else {
10254 tq = ql_find_port(ha, (uint8_t *)tmp_fcid.r.d_id,
10255 QLNT_PID);
10256 }
10257 } else if (pt_req->ValidMask & EXT_DEF_WWNN_VALID) {
10258 if (bcmp(ha->loginparams.node_ww_name.raw_wwn, pt_req->WWNN,
10259 EXT_DEF_WWN_NAME_SIZE) == 0) {
10260 local_hba = B_TRUE;
10261 } else {
10262 tq = ql_find_port(ha, pt_req->WWNN, QLNT_NODE);
10263 }
10264 }
10265
10266 if (local_hba == B_TRUE) {
10267 EL(ha, "failed, els to adapter\n");
10268 kmem_free(bp, cmd->RequestLen);
10269 cmd->Status = EXT_STATUS_ERR;
10270 cmd->ResponseLen = 0;
10271 return;
10272 }
10273
10274 if (tq == NULL) {
10275 /* no matching device */
10276 EL(ha, "failed, device not found\n");
10277 kmem_free(bp, cmd->RequestLen);
10278 cmd->Status = EXT_STATUS_DEV_NOT_FOUND;
10279 cmd->DetailStatus = EXT_DSTATUS_TARGET;
10280 cmd->ResponseLen = 0;
10281 return;
10282 }
10283
10284 /* Allocate a DMA Memory Descriptor */
10285 dma_mem = (dma_mem_t *)kmem_zalloc(sizeof (dma_mem_t), KM_SLEEP);
10286 if (dma_mem == NULL) {
10287 EL(ha, "failed, kmem_zalloc\n");
10288 kmem_free(bp, cmd->RequestLen);
10289 cmd->Status = EXT_STATUS_NO_MEMORY;
10290 cmd->ResponseLen = 0;
10291 return;
10292 }
10293 /* Determine maximum buffer size. */
10294 cmd_size = cmd->RequestLen - sizeof (EXT_ELS_PT_REQ);
10295 pld_byte_cnt = cmd_size < cmd->ResponseLen ? cmd->ResponseLen :
10296 cmd_size;
10297 pld = (caddr_t)(bp + sizeof (EXT_ELS_PT_REQ));
10298
10299 /* Allocate command block. */
10300 pkt_size = (uint32_t)(sizeof (ql_mbx_iocb_t));
10301 pkt = kmem_zalloc(pkt_size, KM_SLEEP);
10302 if (pkt == NULL) {
10303 EL(ha, "failed, kmem_zalloc\n");
10304 kmem_free(dma_mem, sizeof (dma_mem_t));
10305 kmem_free(bp, cmd->RequestLen);
10306 cmd->Status = EXT_STATUS_NO_MEMORY;
10307 cmd->ResponseLen = 0;
10308 return;
10309 }
10310
10311 /* Get DMA memory for the payload */
10312 if (ql_get_dma_mem(ha, dma_mem, pld_byte_cnt, LITTLE_ENDIAN_DMA,
10313 QL_DMA_RING_ALIGN) != QL_SUCCESS) {
10314 cmn_err(CE_WARN, "%sDMA memory alloc failed", QL_NAME);
10315 kmem_free(pkt, pkt_size);
10316 kmem_free(dma_mem, sizeof (dma_mem_t));
10317 kmem_free(bp, cmd->RequestLen);
10318 cmd->Status = EXT_STATUS_MS_NO_RESPONSE;
10319 cmd->ResponseLen = 0;
10320 return;
10321 }
10322
10323 /* Copy out going payload data to IOCB DMA buffer. */
10324 ddi_rep_put8(dma_mem->acc_handle, (uint8_t *)pld,
10325 (uint8_t *)dma_mem->bp, cmd_size, DDI_DEV_AUTOINCR);
10326
10327 /* Sync IOCB DMA buffer. */
10328 (void) ddi_dma_sync(dma_mem->dma_handle, 0, cmd_size,
10329 DDI_DMA_SYNC_FORDEV);
10330
10331 /*
10332 * Setup IOCB
10333 */
10334 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
10335 pkt->els.entry_type = ELS_PASSTHRU_TYPE;
10336 pkt->els.entry_count = 1;
10337
10338 /* Set loop ID */
10339 pkt->els.n_port_hdl = tq->loop_id;
10340
10341 /* Set cmd/response data segment counts. */
10342 pkt->els.xmt_dseg_count = LE_16(1);
10343 pkt->els.vp_index = ha->vp_index;
10344 pkt->els.rcv_dseg_count = LE_16(1);
10345
10346 pkt->els.els_cmd_opcode = pld[0];
10347
10348 pkt->els.d_id_7_0 = tq->d_id.b.al_pa;
10349 pkt->els.d_id_15_8 = tq->d_id.b.area;
10350 pkt->els.d_id_23_16 = tq->d_id.b.domain;
10351
10352 pkt->els.s_id_7_0 = ha->d_id.b.al_pa;
10353 pkt->els.s_id_15_8 = ha->d_id.b.area;
10354 pkt->els.s_id_23_16 = ha->d_id.b.domain;
10355
10356 /* Load rsp byte count. */
10357 pkt->els.rcv_payld_data_bcnt = LE_32(cmd->ResponseLen);
10358
10359 /* Load cmd byte count. */
10360 pkt->els.xmt_payld_data_bcnt = LE_32(cmd_size);
10361
10362 long_ptr = (uint32_t *)&pkt->els.dseg;
10363
10364 /* Load MS command entry data segments. */
10365 *long_ptr++ = (uint32_t)
10366 LE_32(LSD(dma_mem->cookie.dmac_laddress));
10367 *long_ptr++ = (uint32_t)
10368 LE_32(MSD(dma_mem->cookie.dmac_laddress));
10369 *long_ptr++ = LE_32(cmd_size);
10370
10371 /* Load MS response entry data segments. */
10372 *long_ptr++ = (uint32_t)
10373 LE_32(LSD(dma_mem->cookie.dmac_laddress));
10374 *long_ptr++ = (uint32_t)
10375 LE_32(MSD(dma_mem->cookie.dmac_laddress));
10376 *long_ptr = LE_32(cmd->ResponseLen);
10377
10378 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
10379 sizeof (ql_mbx_iocb_t));
10380
10381 comp_status = (uint16_t)LE_16(pkt->sts24.comp_status);
10382 if (rval == QL_SUCCESS && comp_status == CS_DATA_UNDERRUN) {
10383 comp_status = CS_COMPLETE;
10384 }
10385 if (rval != QL_SUCCESS ||
10386 (pkt->sts24.entry_status & 0x3c) != 0 ||
10387 comp_status != CS_COMPLETE) {
10388 EL(ha, "failed, I/O timeout, cs=%xh, es=%xh, "
10389 "rval=%xh\n",
10390 comp_status, pkt->sts24.entry_status, rval);
10391 ql_free_dma_resource(ha, dma_mem);
10392 kmem_free(pkt, pkt_size);
10393 kmem_free(dma_mem, sizeof (dma_mem_t));
10394 kmem_free(bp, cmd->RequestLen);
10395 cmd->Status = EXT_STATUS_ERR;
10396 cmd->ResponseLen = 0;
10397 return;
10398 }
10399 } else {
10400 pkt->ms.entry_type = MS_TYPE;
10401 pkt->ms.entry_count = 1;
10402
10403 /* Set loop ID */
10404 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
10405 pkt->ms.loop_id_l = LSB(tq->loop_id);
10406 pkt->ms.loop_id_h = MSB(tq->loop_id);
10407 } else {
10408 pkt->ms.loop_id_h = LSB(tq->loop_id);
10409 }
10410
10411 pkt->ms.control_flags_h = CF_ELS_PASSTHROUGH;
10412
10413 /* Set ISP command timeout. */
10414 pkt->ms.timeout = LE_16(120);
10415
10416 /* Set data segment counts. */
10417 pkt->ms.cmd_dseg_count_l = 1;
10418 pkt->ms.total_dseg_count = LE_16(2);
10419
10420 /* Response total byte count. */
10421 pkt->ms.resp_byte_count = LE_32(cmd->ResponseLen);
10422 pkt->ms.dseg[1].length = LE_32(cmd->ResponseLen);
10423
10424 /* Command total byte count. */
10425 pkt->ms.cmd_byte_count = LE_32(cmd_size);
10426 pkt->ms.dseg[0].length = LE_32(cmd_size);
10427
10428 /* Load command/response data segments. */
10429 pkt->ms.dseg[0].address[0] = (uint32_t)
10430 LE_32(LSD(dma_mem->cookie.dmac_laddress));
10431 pkt->ms.dseg[0].address[1] = (uint32_t)
10432 LE_32(MSD(dma_mem->cookie.dmac_laddress));
10433 pkt->ms.dseg[1].address[0] = (uint32_t)
10434 LE_32(LSD(dma_mem->cookie.dmac_laddress));
10435 pkt->ms.dseg[1].address[1] = (uint32_t)
10436 LE_32(MSD(dma_mem->cookie.dmac_laddress));
10437
10438 rval = ql_issue_mbx_iocb(ha, (caddr_t)pkt,
10439 sizeof (ql_mbx_iocb_t));
10440
10441 comp_status = (uint16_t)LE_16(pkt->sts.comp_status);
10442 if (rval == QL_SUCCESS && comp_status == CS_DATA_UNDERRUN) {
10443 comp_status = CS_COMPLETE;
10444 }
10445 if (rval != QL_SUCCESS ||
10446 (pkt->sts.entry_status & 0x7e) != 0 ||
10447 comp_status != CS_COMPLETE) {
10448 EL(ha, "failed, I/O timeout, cs=%xh, es=%xh, "
10449 "rval=%xh\n",
10450 comp_status, pkt->sts.entry_status, rval);
10451 ql_free_dma_resource(ha, dma_mem);
10452 kmem_free(pkt, pkt_size);
10453 kmem_free(dma_mem, sizeof (dma_mem_t));
10454 kmem_free(bp, cmd->RequestLen);
10455 cmd->Status = EXT_STATUS_ERR;
10456 cmd->ResponseLen = 0;
10457 return;
10458 }
10459 }
10460
10461 /* Sync payload DMA buffer. */
10462 (void) ddi_dma_sync(dma_mem->dma_handle, 0, cmd->ResponseLen,
10463 DDI_DMA_SYNC_FORKERNEL);
10464
10465 if (ql_send_buffer_data(dma_mem->bp,
10466 (caddr_t)(uintptr_t)cmd->ResponseAdr,
10467 cmd->ResponseLen, mode) != cmd->ResponseLen) {
10468 cmd->Status = EXT_STATUS_COPY_ERR;
10469 EL(ha, "failed, ddi_copyout\n");
10470 } else {
10471 QL_PRINT_9(ha, "els_rsp\n");
10472 QL_DUMP_9(pld, 8, cmd->ResponseLen);
10473 cmd->Status = EXT_STATUS_OK;
10474 QL_PRINT_9(ha, "done\n");
10475 }
10476
10477 ql_free_dma_resource(ha, dma_mem);
10478 kmem_free(pkt, pkt_size);
10479 kmem_free(dma_mem, sizeof (dma_mem_t));
10480 kmem_free(bp, cmd->RequestLen);
10481 }
10482
10483 /*
10484 * ql_flash_update_caps
10485 * IOCTL for flash update capabilities command.
10486 *
10487 * Input:
10488 * ha: adapter state pointer.
10489 * cmd: User space CT arguments pointer.
10490 * mode: flags.
10491 *
10492 * Returns:
10493 * None, request status indicated in cmd->Status.
10494 *
10495 * Context:
10496 * Kernel context.
10497 */
10498 static void
10499 ql_flash_update_caps(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
10500 {
10501 int rval;
10502 uint64_t cb;
10503 EXT_FLASH_UPDATE_CAPS caps = {0};
10504
10505 QL_PRINT_9(ha, "started\n");
10506
10507 cb = LONG_TO_LLONG(ha->fw_attributes, ha->fw_ext_attributes);
10508
10509 switch (cmd->SubCode) {
10510 case EXT_SC_GET_FLASH_UPDATE_CAPS:
10511 if (cmd->ResponseLen < sizeof (EXT_FLASH_UPDATE_CAPS)) {
10512 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
10513 cmd->DetailStatus = sizeof (EXT_FLASH_UPDATE_CAPS);
10514 EL(ha, "failed, ResponseLen < 0x%x, Len=0x%x\n",
10515 sizeof (EXT_FLASH_UPDATE_CAPS), cmd->ResponseLen);
10516 cmd->ResponseLen = 0;
10517 return;
10518 }
10519 caps.Capabilities = cb;
10520 caps.OutageDuration = 300; /* seconds */
10521
10522 rval = ddi_copyout((void *)&caps,
10523 (void *)(uintptr_t)(cmd->ResponseAdr),
10524 sizeof (EXT_FLASH_UPDATE_CAPS), mode);
10525 if (rval != 0) {
10526 cmd->Status = EXT_STATUS_COPY_ERR;
10527 cmd->ResponseLen = 0;
10528 EL(ha, "failed, ddi_copyout\n");
10529 } else {
10530 cmd->ResponseLen = sizeof (EXT_FLASH_UPDATE_CAPS);
10531 }
10532 break;
10533 case EXT_SC_SET_FLASH_UPDATE_CAPS:
10534 if (cmd->RequestLen < sizeof (EXT_FLASH_UPDATE_CAPS)) {
10535 /* parameter error */
10536 EL(ha, "failed, RequestLen < EXT_FLASH_UPDATE_CAPS, "
10537 "Len=%xh\n", cmd->RequestLen);
10538 cmd->Status = EXT_STATUS_INVALID_PARAM;
10539 cmd->DetailStatus = EXT_DSTATUS_REQUEST_LEN;
10540 cmd->ResponseLen = 0;
10541 return;
10542 }
10543
10544 /* Copy in the request structure. */
10545 if (ddi_copyin((void *)(uintptr_t)cmd->RequestAdr,
10546 (void *)&caps, sizeof (EXT_FLASH_UPDATE_CAPS), mode) != 0) {
10547 EL(ha, "failed, ddi_copyin\n");
10548 cmd->Status = EXT_STATUS_COPY_ERR;
10549 cmd->ResponseLen = 0;
10550 return;
10551 }
10552
10553 if (cb != caps.Capabilities || caps.OutageDuration < 300) {
10554 cmd->Status = EXT_STATUS_ERR;
10555 cmd->ResponseLen = 0;
10556 }
10557 break;
10558 default:
10559 /* Subcode not supported. */
10560 EL(ha, "unknown subcode=%xh\n", cmd->SubCode);
10561 cmd->Status = EXT_STATUS_UNSUPPORTED_SUBCODE;
10562 cmd->ResponseLen = 0;
10563 break;
10564 }
10565
10566 QL_PRINT_9(ha, "done\n");
10567 }
10568
10569 /*
10570 * ql_get_bbcr_data
10571 * IOCTL for get buffer to buffer credits command.
10572 *
10573 * Input:
10574 * ha: adapter state pointer.
10575 * cmd: User space CT arguments pointer.
10576 * mode: flags.
10577 *
10578 * Returns:
10579 * None, request status indicated in cmd->Status.
10580 *
10581 * Context:
10582 * Kernel context.
10583 */
10584 static void
10585 ql_get_bbcr_data(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
10586 {
10587 int rval;
10588 ql_mbx_data_t mr;
10589 EXT_BBCR_DATA bb = {0};
10590
10591 QL_PRINT_9(ha, "started\n");
10592
10593 if (cmd->ResponseLen < sizeof (EXT_BBCR_DATA)) {
10594 cmd->Status = EXT_STATUS_BUFFER_TOO_SMALL;
10595 cmd->DetailStatus = sizeof (EXT_BBCR_DATA);
10596 EL(ha, "failed, ResponseLen < 0x%x, Len=0x%x\n",
10597 sizeof (EXT_BBCR_DATA), cmd->ResponseLen);
10598 cmd->ResponseLen = 0;
10599 return;
10600 }
10601
10602 if (!(CFG_IST(ha, CFG_BBCR_SUPPORT))) {
10603 EL(ha, "invalid request for HBA\n");
10604 cmd->Status = EXT_STATUS_INVALID_REQUEST;
10605 cmd->ResponseLen = 0;
10606 return;
10607 }
10608 if (ha->task_daemon_flags & LOOP_DOWN) {
10609 rval = ql_get_adapter_id(ha, &mr);
10610 ha->bbcr_initial = LSB(mr.mb[15]);
10611 ha->bbcr_runtime = MSB(mr.mb[15]);
10612 bb.ConfiguredBBSCN = ha->bbcr_initial & BBCR_INITIAL_MASK;
10613 bb.NegotiatedBBSCN = ha->bbcr_runtime & BBCR_RUNTIME_MASK;
10614 bb.Status = EXT_DEF_BBCR_STATUS_UNKNOWN;
10615 bb.State = EXT_DEF_BBCR_STATE_OFFLINE;
10616 if (rval == 0x4005) {
10617 bb.mbx1 = mr.mb[1];
10618 }
10619 } else {
10620 bb.ConfiguredBBSCN = ha->bbcr_initial & BBCR_INITIAL_MASK;
10621 bb.NegotiatedBBSCN = ha->bbcr_runtime & BBCR_RUNTIME_MASK;
10622
10623 if (bb.ConfiguredBBSCN) {
10624 bb.Status = EXT_DEF_BBCR_STATUS_ENABLED;
10625 if (bb.NegotiatedBBSCN &&
10626 !(ha->bbcr_runtime & BBCR_RUNTIME_REJECT)) {
10627 bb.State = EXT_DEF_BBCR_STATE_ONLINE;
10628 } else {
10629 bb.State = EXT_DEF_BBCR_STATE_OFFLINE;
10630 if (ha->bbcr_runtime & BBCR_RUNTIME_REJECT) {
10631 bb.OfflineReasonCode =
10632 EXT_DEF_BBCR_REASON_LOGIN_REJECT;
10633 } else {
10634 bb.OfflineReasonCode =
10635 EXT_DEF_BBCR_REASON_SWITCH;
10636 }
10637 }
10638 } else {
10639 bb.Status = EXT_DEF_BBCR_STATUS_DISABLED;
10640 }
10641 }
10642
10643 rval = ddi_copyout((void *)&bb, (void *)(uintptr_t)(cmd->ResponseAdr),
10644 sizeof (EXT_BBCR_DATA), mode);
10645 if (rval != 0) {
10646 cmd->Status = EXT_STATUS_COPY_ERR;
10647 cmd->ResponseLen = 0;
10648 EL(ha, "failed, ddi_copyout\n");
10649 } else {
10650 cmd->ResponseLen = sizeof (EXT_BBCR_DATA);
10651 }
10652
10653 QL_PRINT_9(ha, "done\n");
10654 }
10655
10656 /*
10657 * ql_get_priv_stats
10658 * Performs EXT_SC_GET_PRIV_STATS subcommand. of EXT_CC_GET_DATA.
10659 *
10660 * Input:
10661 * ha: adapter state pointer.
10662 * cmd: Local EXT_IOCTL cmd struct pointer.
10663 * mode: flags.
10664 *
10665 * Returns:
10666 * None, request status indicated in cmd->Status.
10667 *
10668 * Context:
10669 * Kernel context.
10670 */
10671 static void
10672 ql_get_priv_stats(ql_adapter_state_t *ha, EXT_IOCTL *cmd, int mode)
10673 {
10674 uint8_t *ls;
10675 int rval;
10676 int retry = 10;
10677
10678 QL_PRINT_9(ha, "started\n");
10679
10680 while (ha->task_daemon_flags & (DRIVER_STALL | ABORT_ISP_ACTIVE |
10681 LOOP_RESYNC_ACTIVE)) {
10682 ql_delay(ha, 10000000); /* 10 second delay */
10683
10684 retry--;
10685
10686 if (retry == 0) { /* effectively 100 seconds */
10687 EL(ha, "failed, LOOP_NOT_READY\n");
10688 cmd->Status = EXT_STATUS_BUSY;
10689 cmd->ResponseLen = 0;
10690 return;
10691 }
10692 }
10693
10694 /* Allocate memory for command. */
10695 ls = kmem_zalloc(cmd->ResponseLen, KM_SLEEP);
10696
10697 /*
10698 * I think these are supposed to be port statistics
10699 * the loop ID or port ID should be in cmd->Instance.
10700 */
10701 rval = ql_get_status_counts(ha,
10702 ha->task_daemon_flags & LOOP_DOWN ? 0xFF : ha->loop_id,
10703 cmd->ResponseLen, (caddr_t)ls, 0);
10704 if (rval != QL_SUCCESS) {
10705 EL(ha, "failed, get_link_status=%xh, id=%xh\n", rval,
10706 ha->loop_id);
10707 cmd->Status = EXT_STATUS_MAILBOX;
10708 cmd->DetailStatus = rval;
10709 cmd->ResponseLen = 0;
10710 } else {
10711 rval = ddi_copyout((void *)&ls,
10712 (void *)(uintptr_t)cmd->ResponseAdr, cmd->ResponseLen,
10713 mode);
10714 if (rval != 0) {
10715 EL(ha, "failed, ddi_copyout\n");
10716 cmd->Status = EXT_STATUS_COPY_ERR;
10717 cmd->ResponseLen = 0;
10718 }
10719 }
10720
10721 kmem_free(ls, cmd->ResponseLen);
10722
10723 QL_PRINT_9(ha, "done\n");
10724 }
|