1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /* Copyright 2015 QLogic Corporation */
23
24 /*
25 * Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
26 */
27
28 #pragma ident "Copyright 2015 QLogic Corporation; ql_ioctl.c"
29
30 /*
31 * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
32 * Fibre Channel Adapter (FCA) driver IOCTL source file.
33 *
34 * ***********************************************************************
35 * * **
36 * * NOTICE **
37 * * COPYRIGHT (C) 1996-2015 QLOGIC CORPORATION **
38 * * ALL RIGHTS RESERVED **
39 * * **
40 * ***********************************************************************
41 *
42 */
43
44 #include <ql_apps.h>
45 #include <ql_api.h>
46 #include <ql_debug.h>
47 #include <ql_init.h>
48 #include <ql_ioctl.h>
49 #include <ql_mbx.h>
50 #include <ql_nx.h>
51 #include <ql_xioctl.h>
52
53 /*
54 * Local Function Prototypes.
55 */
56 static int ql_busy_notification(ql_adapter_state_t *);
57 static int ql_idle_notification(ql_adapter_state_t *);
58 static int ql_get_feature_bits(ql_adapter_state_t *ha, uint16_t *features);
59 static int ql_set_feature_bits(ql_adapter_state_t *ha, uint16_t features);
60 static int ql_set_nvram_adapter_defaults(ql_adapter_state_t *ha);
61 static void ql_load_nvram(ql_adapter_state_t *ha, uint8_t addr,
62 uint16_t value);
63 static int ql_24xx_load_nvram(ql_adapter_state_t *, uint32_t, uint32_t);
64 static int ql_adm_op(ql_adapter_state_t *, void *, int);
65 static int ql_adm_adapter_info(ql_adapter_state_t *, ql_adm_op_t *, int);
66 static int ql_adm_extended_logging(ql_adapter_state_t *, ql_adm_op_t *);
67 static int ql_adm_device_list(ql_adapter_state_t *, ql_adm_op_t *, int);
68 static int ql_adm_update_properties(ql_adapter_state_t *);
69 static int ql_adm_prop_update_int(ql_adapter_state_t *, ql_adm_op_t *, int);
70 static int ql_adm_loop_reset(ql_adapter_state_t *);
71 static int ql_adm_fw_dump(ql_adapter_state_t *, ql_adm_op_t *, void *, int);
72 static int ql_adm_fw_t_dump(ql_adapter_state_t *);
73 static int ql_adm_beacon(ql_adapter_state_t *, ql_adm_op_t *);
74 static int ql_adm_nvram_dump(ql_adapter_state_t *, ql_adm_op_t *, int);
75 static int ql_adm_nvram_load(ql_adapter_state_t *, ql_adm_op_t *, int);
76 static int ql_adm_flash_load(ql_adapter_state_t *, ql_adm_op_t *, int);
77 static int ql_adm_vpd_dump(ql_adapter_state_t *, ql_adm_op_t *, int);
78 static int ql_adm_vpd_load(ql_adapter_state_t *, ql_adm_op_t *, int);
79 static int ql_adm_vpd_gettag(ql_adapter_state_t *, ql_adm_op_t *, int);
80 static int ql_adm_updfwmodule(ql_adapter_state_t *, ql_adm_op_t *, int);
81 static uint8_t *ql_vpd_findtag(ql_adapter_state_t *, uint8_t *, int8_t *);
82
83 /* ************************************************************************ */
84 /* cb_ops functions */
85 /* ************************************************************************ */
86
87 /*
88 * ql_open
89 * opens device
90 *
91 * Input:
92 * dev_p = device pointer
93 * flags = open flags
94 * otype = open type
95 * cred_p = credentials pointer
96 *
97 * Returns:
98 * 0 = success
99 *
100 * Context:
101 * Kernel context.
102 */
103 /* ARGSUSED */
104 int
105 ql_open(dev_t *dev_p, int flags, int otyp, cred_t *cred_p)
106 {
107 ql_adapter_state_t *ha;
108 int rval = 0;
109
110 ha = ddi_get_soft_state(ql_state, (int32_t)getminor(*dev_p));
111 if (ha == NULL) {
112 QL_PRINT_2(NULL, "failed, no adapter\n");
113 return (ENXIO);
114 }
115
116 QL_PRINT_3(ha, "started\n");
117
118 /* Allow only character opens */
119 if (otyp != OTYP_CHR) {
120 QL_PRINT_2(ha, "failed, open type\n");
121 return (EINVAL);
122 }
123
124 ADAPTER_STATE_LOCK(ha);
125 if (flags & FEXCL && ha->flags & QL_OPENED) {
126 ADAPTER_STATE_UNLOCK(ha);
127 rval = EBUSY;
128 } else {
129 ha->flags |= QL_OPENED;
130 ADAPTER_STATE_UNLOCK(ha);
131 }
132
133 if (rval != 0) {
134 EL(ha, "failed, rval = %xh\n", rval);
135 } else {
136 /*EMPTY*/
137 QL_PRINT_3(ha, "done\n");
138 }
139 return (rval);
140 }
141
142 /*
143 * ql_close
144 * opens device
145 *
146 * Input:
147 * dev_p = device pointer
148 * flags = open flags
149 * otype = open type
150 * cred_p = credentials pointer
151 *
152 * Returns:
153 * 0 = success
154 *
155 * Context:
156 * Kernel context.
157 */
158 /* ARGSUSED */
159 int
160 ql_close(dev_t dev, int flags, int otyp, cred_t *cred_p)
161 {
162 ql_adapter_state_t *ha;
163 int rval = 0;
164
165 ha = ddi_get_soft_state(ql_state, (int32_t)getminor(dev));
166 if (ha == NULL) {
167 QL_PRINT_2(ha, "failed, no adapter\n");
168 return (ENXIO);
169 }
170
171 QL_PRINT_3(ha, "started\n");
172
173 if (otyp != OTYP_CHR) {
174 QL_PRINT_2(ha, "failed, open type\n");
175 return (EINVAL);
176 }
177
178 ADAPTER_STATE_LOCK(ha);
179 ha->flags &= ~QL_OPENED;
180 ADAPTER_STATE_UNLOCK(ha);
181
182 if (rval != 0) {
183 EL(ha, "failed, rval = %xh\n", rval);
184 } else {
185 /*EMPTY*/
186 QL_PRINT_3(ha, "done\n");
187 }
188 return (rval);
189 }
190
191 /*
192 * ql_ioctl
193 * control a character device
194 *
195 * Input:
196 * dev = device number
197 * cmd = function to perform
198 * arg = data type varies with request
199 * mode = flags
200 * cred_p = credentials pointer
201 * rval_p = pointer to result value
202 *
203 * Returns:
204 * 0 = success
205 *
206 * Context:
207 * Kernel context.
208 */
209 /* ARGSUSED */
210 int
211 ql_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred_p,
212 int *rval_p)
213 {
214 ql_adapter_state_t *ha;
215 int rval = 0;
216
217 if (ddi_in_panic()) {
218 QL_PRINT_2(NULL, "ql_ioctl: ddi_in_panic exit\n");
219 return (ENOPROTOOPT);
220 }
221
222 ha = ddi_get_soft_state(ql_state, (int32_t)getminor(dev));
223 if (ha == NULL) {
224 QL_PRINT_2(ha, "failed, no adapter\n");
225 return (ENXIO);
226 }
227
228 QL_PRINT_3(ha, "started\n");
229
230 /*
231 * Quick clean exit for qla2x00 foapi calls which are
232 * not supported in qlc.
233 */
234 if (cmd >= QL_FOAPI_START && cmd <= QL_FOAPI_END) {
235 QL_PRINT_9(ha, "failed, fo api not supported\n");
236 return (ENOTTY);
237 }
238
239 /* PWR management busy. */
240 rval = ql_busy_notification(ha);
241 if (rval != FC_SUCCESS) {
242 EL(ha, "failed, ql_busy_notification\n");
243 return (ENXIO);
244 }
245
246 rval = ql_xioctl(ha, cmd, arg, mode, cred_p, rval_p);
247 if (rval == ENOPROTOOPT || rval == EINVAL) {
248 switch (cmd) {
249 case QL_GET_ADAPTER_FEATURE_BITS: {
250 uint16_t bits;
251
252 rval = ql_get_feature_bits(ha, &bits);
253
254 if (!rval && ddi_copyout((void *)&bits, (void *)arg,
255 sizeof (bits), mode)) {
256 rval = EFAULT;
257 }
258 break;
259 }
260
261 case QL_SET_ADAPTER_FEATURE_BITS: {
262 uint16_t bits;
263
264 if (ddi_copyin((void *)arg, (void *)&bits,
265 sizeof (bits), mode)) {
266 rval = EFAULT;
267 break;
268 }
269
270 rval = ql_set_feature_bits(ha, bits);
271 break;
272 }
273
274 case QL_SET_ADAPTER_NVRAM_DEFAULTS:
275 rval = ql_set_nvram_adapter_defaults(ha);
276 break;
277
278 case QL_UTIL_LOAD:
279 rval = ql_nv_util_load(ha, (void *)arg, mode);
280 break;
281
282 case QL_UTIL_DUMP:
283 rval = ql_nv_util_dump(ha, (void *)arg, mode);
284 break;
285
286 case QL_ADM_OP:
287 rval = ql_adm_op(ha, (void *)arg, mode);
288 break;
289
290 default:
291 EL(ha, "unknown command = %d\n", cmd);
292 rval = ENOTTY;
293 break;
294 }
295 }
296
297 /* PWR management idle. */
298 (void) ql_idle_notification(ha);
299
300 if (rval != 0) {
301 /*
302 * Don't show failures caused by pps polling for
303 * non-existant virtual ports.
304 */
305 if (cmd != EXT_CC_VPORT_CMD) {
306 EL(ha, "failed, cmd=%d rval=%d\n", cmd, rval);
307 }
308 } else {
309 /*EMPTY*/
310 QL_PRINT_9(ha, "done\n");
311 }
312 return (rval);
313 }
314
315 /*
316 * ql_busy_notification
317 * Adapter busy notification.
318 *
319 * Input:
320 * ha = adapter state pointer.
321 *
322 * Returns:
323 * FC_SUCCESS
324 * FC_FAILURE
325 *
326 * Context:
327 * Kernel context.
328 */
329 static int
330 ql_busy_notification(ql_adapter_state_t *ha)
331 {
332 if (!ha->pm_capable) {
333 return (FC_SUCCESS);
334 }
335
336 QL_PRINT_9(ha, "started\n");
337
338 QL_PM_LOCK(ha);
339 ha->pm_busy++;
340 QL_PM_UNLOCK(ha);
341
342 if (pm_busy_component(ha->dip, 0) != DDI_SUCCESS) {
343 QL_PM_LOCK(ha);
344 if (ha->pm_busy) {
345 ha->pm_busy--;
346 }
347 QL_PM_UNLOCK(ha);
348
349 EL(ha, "pm_busy_component failed = %xh\n", FC_FAILURE);
350 return (FC_FAILURE);
351 }
352
353 QL_PM_LOCK(ha);
354 if (ha->power_level != PM_LEVEL_D0) {
355 QL_PM_UNLOCK(ha);
356 if (pm_raise_power(ha->dip, 0, 1) != DDI_SUCCESS) {
357 QL_PM_LOCK(ha);
358 if (ha->pm_busy) {
359 ha->pm_busy--;
360 }
361 QL_PM_UNLOCK(ha);
362 return (FC_FAILURE);
363 }
364 } else {
365 QL_PM_UNLOCK(ha);
366 }
367
368 QL_PRINT_9(ha, "done\n");
369
370 return (FC_SUCCESS);
371 }
372
373 /*
374 * ql_idle_notification
375 * Adapter idle notification.
376 *
377 * Input:
378 * ha = adapter state pointer.
379 *
380 * Returns:
381 * FC_SUCCESS
382 * FC_FAILURE
383 *
384 * Context:
385 * Kernel context.
386 */
387 static int
388 ql_idle_notification(ql_adapter_state_t *ha)
389 {
390 if (!ha->pm_capable) {
391 return (FC_SUCCESS);
392 }
393
394 QL_PRINT_9(ha, "started\n");
395
396 if (pm_idle_component(ha->dip, 0) != DDI_SUCCESS) {
397 EL(ha, "pm_idle_component failed = %xh\n", FC_FAILURE);
398 return (FC_FAILURE);
399 }
400
401 QL_PM_LOCK(ha);
402 if (ha->pm_busy) {
403 ha->pm_busy--;
404 }
405 QL_PM_UNLOCK(ha);
406
407 QL_PRINT_9(ha, "done\n");
408
409 return (FC_SUCCESS);
410 }
411
412 /*
413 * Get adapter feature bits from NVRAM
414 */
415 static int
416 ql_get_feature_bits(ql_adapter_state_t *ha, uint16_t *features)
417 {
418 int count;
419 volatile uint16_t data;
420 uint32_t nv_cmd;
421 uint32_t start_addr;
422 int rval;
423 uint32_t offset = offsetof(nvram_t, adapter_features);
424
425 QL_PRINT_9(ha, "started\n");
426
427 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
428 EL(ha, "Not supported for 24xx\n");
429 return (EINVAL);
430 }
431
432 /*
433 * The offset can't be greater than max of 8 bits and
434 * the following code breaks if the offset isn't at
435 * 2 byte boundary.
436 */
437 rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA);
438 if (rval != QL_SUCCESS) {
439 EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
440 return (EIO);
441 }
442
443 /*
444 * Have the most significant 3 bits represent the read operation
445 * followed by the 8 bits representing the offset at which we
446 * are going to perform the read operation
447 */
448 offset >>= 1;
449 offset += start_addr;
450 nv_cmd = (offset << 16) | NV_READ_OP;
451 nv_cmd <<= 5;
452
453 /*
454 * Select the chip and feed the command and address
455 */
456 for (count = 0; count < 11; count++) {
457 if (nv_cmd & BIT_31) {
458 ql_nv_write(ha, NV_DATA_OUT);
459 } else {
460 ql_nv_write(ha, 0);
461 }
462 nv_cmd <<= 1;
463 }
464
465 *features = 0;
466 for (count = 0; count < 16; count++) {
467 WRT16_IO_REG(ha, nvram, NV_SELECT | NV_CLOCK);
468 ql_nv_delay();
469
470 data = RD16_IO_REG(ha, nvram);
471 *features <<= 1;
472 if (data & NV_DATA_IN) {
473 *features = (uint16_t)(*features | 0x1);
474 }
475
476 WRT16_IO_REG(ha, nvram, NV_SELECT);
477 ql_nv_delay();
478 }
479
480 /*
481 * Deselect the chip
482 */
483 WRT16_IO_REG(ha, nvram, NV_DESELECT);
484
485 ql_release_nvram(ha);
486
487 QL_PRINT_9(ha, "done\n");
488
489 return (0);
490 }
491
492 /*
493 * Set adapter feature bits in NVRAM
494 */
495 static int
496 ql_set_feature_bits(ql_adapter_state_t *ha, uint16_t features)
497 {
498 int rval;
499 uint32_t count;
500 nvram_t *nv;
501 uint16_t *wptr;
502 uint8_t *bptr;
503 uint8_t csum;
504 uint32_t start_addr;
505
506 QL_PRINT_9(ha, "started\n");
507
508 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
509 EL(ha, "Not supported for 24xx\n");
510 return (EINVAL);
511 }
512
513 nv = kmem_zalloc(sizeof (*nv), KM_SLEEP);
514 if (nv == NULL) {
515 EL(ha, "failed, kmem_zalloc\n");
516 return (ENOMEM);
517 }
518
519 rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA);
520 if (rval != QL_SUCCESS) {
521 EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
522 kmem_free(nv, sizeof (*nv));
523 return (EIO);
524 }
525 rval = 0;
526
527 /*
528 * Read off the whole NVRAM
529 */
530 wptr = (uint16_t *)nv;
531 csum = 0;
532 for (count = 0; count < sizeof (nvram_t) / 2; count++) {
533 *wptr = (uint16_t)ql_get_nvram_word(ha, count + start_addr);
534 csum = (uint8_t)(csum + (uint8_t)*wptr);
535 csum = (uint8_t)(csum + (uint8_t)(*wptr >> 8));
536 wptr++;
537 }
538
539 /*
540 * If the checksum is BAD then fail it right here.
541 */
542 if (csum) {
543 kmem_free(nv, sizeof (*nv));
544 ql_release_nvram(ha);
545 return (EBADF);
546 }
547
548 nv->adapter_features[0] = (uint8_t)((features & 0xFF00) >> 8);
549 nv->adapter_features[1] = (uint8_t)(features & 0xFF);
550
551 /*
552 * Recompute the chesksum now
553 */
554 bptr = (uint8_t *)nv;
555 for (count = 0; count < sizeof (nvram_t) - 1; count++) {
556 csum = (uint8_t)(csum + *bptr++);
557 }
558 csum = (uint8_t)(~csum + 1);
559 nv->checksum = csum;
560
561 /*
562 * Now load the NVRAM
563 */
564 wptr = (uint16_t *)nv;
565 for (count = 0; count < sizeof (nvram_t) / 2; count++) {
566 ql_load_nvram(ha, (uint8_t)(count + start_addr), *wptr++);
567 }
568
569 /*
570 * Read NVRAM and verify the contents
571 */
572 wptr = (uint16_t *)nv;
573 csum = 0;
574 for (count = 0; count < sizeof (nvram_t) / 2; count++) {
575 if (ql_get_nvram_word(ha, count + start_addr) != *wptr) {
576 rval = EIO;
577 break;
578 }
579 csum = (uint8_t)(csum + (uint8_t)*wptr);
580 csum = (uint8_t)(csum + (uint8_t)(*wptr >> 8));
581 wptr++;
582 }
583
584 if (csum) {
585 rval = EINVAL;
586 }
587
588 kmem_free(nv, sizeof (*nv));
589 ql_release_nvram(ha);
590
591 QL_PRINT_9(ha, "done\n");
592
593 return (rval);
594 }
595
596 /*
597 * Fix this function to update just feature bits and checksum in NVRAM
598 */
599 static int
600 ql_set_nvram_adapter_defaults(ql_adapter_state_t *ha)
601 {
602 int rval;
603 uint32_t count;
604 uint32_t start_addr;
605
606 QL_PRINT_9(ha, "started\n");
607
608 rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA);
609 if (rval != QL_SUCCESS) {
610 EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
611 return (EIO);
612 }
613 rval = 0;
614
615 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
616 nvram_24xx_t *nv;
617 uint32_t *longptr;
618 uint32_t csum = 0;
619
620 nv = kmem_zalloc(sizeof (*nv), KM_SLEEP);
621 if (nv == NULL) {
622 EL(ha, "failed, kmem_zalloc\n");
623 return (ENOMEM);
624 }
625
626 nv->nvram_version[0] = LSB(ICB_24XX_VERSION);
627 nv->nvram_version[1] = MSB(ICB_24XX_VERSION);
628
629 nv->version[0] = 1;
630 nv->max_frame_length[1] = 8;
631 nv->execution_throttle[0] = 16;
632 nv->login_retry_count[0] = 8;
633
634 nv->firmware_options_1[0] = BIT_2 | BIT_1;
635 nv->firmware_options_1[1] = BIT_5;
636 nv->firmware_options_2[0] = BIT_5;
637 nv->firmware_options_2[1] = BIT_4;
638 nv->firmware_options_3[1] = BIT_6;
639
640 /*
641 * Set default host adapter parameters
642 */
643 nv->host_p[0] = BIT_4 | BIT_1;
644 nv->host_p[1] = BIT_3 | BIT_2;
645 nv->reset_delay = 5;
646 nv->max_luns_per_target[0] = 128;
647 nv->port_down_retry_count[0] = 30;
648 nv->link_down_timeout[0] = 30;
649
650 /*
651 * compute the chesksum now
652 */
653 longptr = (uint32_t *)nv;
654 csum = 0;
655 for (count = 0; count < (sizeof (nvram_24xx_t) / 4) - 1;
656 count++) {
657 csum += *longptr;
658 longptr++;
659 }
660 csum = (uint32_t)(~csum + 1);
661 LITTLE_ENDIAN_32((long)csum);
662 *longptr = csum;
663
664 /*
665 * Now load the NVRAM
666 */
667 longptr = (uint32_t *)nv;
668 for (count = 0; count < sizeof (nvram_24xx_t) / 4; count++) {
669 (void) ql_24xx_load_nvram(ha,
670 (uint32_t)(count + start_addr), *longptr++);
671 }
672
673 /*
674 * Read NVRAM and verify the contents
675 */
676 csum = 0;
677 longptr = (uint32_t *)nv;
678 for (count = 0; count < sizeof (nvram_24xx_t) / 4; count++) {
679 rval = ql_24xx_read_flash(ha, count + start_addr,
680 longptr);
681 if (rval != QL_SUCCESS) {
682 EL(ha, "24xx_read_flash failed=%xh\n", rval);
683 break;
684 }
685 csum += *longptr;
686 }
687
688 if (csum) {
689 rval = EINVAL;
690 }
691 kmem_free(nv, sizeof (nvram_24xx_t));
692 } else {
693 nvram_t *nv;
694 uint16_t *wptr;
695 uint8_t *bptr;
696 uint8_t csum;
697
698 nv = kmem_zalloc(sizeof (*nv), KM_SLEEP);
699 if (nv == NULL) {
700 EL(ha, "failed, kmem_zalloc\n");
701 return (ENOMEM);
702 }
703 /*
704 * Set default initialization control block.
705 */
706 nv->parameter_block_version = ICB_VERSION;
707 nv->firmware_options[0] = BIT_4 | BIT_3 | BIT_2 | BIT_1;
708 nv->firmware_options[1] = BIT_7 | BIT_5 | BIT_2;
709
710 nv->max_frame_length[1] = 4;
711 nv->max_iocb_allocation[1] = 1;
712 nv->execution_throttle[0] = 16;
713 nv->login_retry_count = 8;
714 nv->port_name[0] = 33;
715 nv->port_name[3] = 224;
716 nv->port_name[4] = 139;
717 nv->login_timeout = 4;
718
719 /*
720 * Set default host adapter parameters
721 */
722 nv->host_p[0] = BIT_1;
723 nv->host_p[1] = BIT_2;
724 nv->reset_delay = 5;
725 nv->port_down_retry_count = 8;
726 nv->maximum_luns_per_target[0] = 8;
727
728 /*
729 * compute the chesksum now
730 */
731 bptr = (uint8_t *)nv;
732 csum = 0;
733 for (count = 0; count < sizeof (nvram_t) - 1; count++) {
734 csum = (uint8_t)(csum + *bptr++);
735 }
736 csum = (uint8_t)(~csum + 1);
737 nv->checksum = csum;
738
739 /*
740 * Now load the NVRAM
741 */
742 wptr = (uint16_t *)nv;
743 for (count = 0; count < sizeof (nvram_t) / 2; count++) {
744 ql_load_nvram(ha, (uint8_t)(count + start_addr),
745 *wptr++);
746 }
747
748 /*
749 * Read NVRAM and verify the contents
750 */
751 wptr = (uint16_t *)nv;
752 csum = 0;
753 for (count = 0; count < sizeof (nvram_t) / 2; count++) {
754 if (ql_get_nvram_word(ha, count + start_addr) !=
755 *wptr) {
756 rval = EIO;
757 break;
758 }
759 csum = (uint8_t)(csum + (uint8_t)*wptr);
760 csum = (uint8_t)(csum + (uint8_t)(*wptr >> 8));
761 wptr++;
762 }
763 if (csum) {
764 rval = EINVAL;
765 }
766 kmem_free(nv, sizeof (*nv));
767 }
768 ql_release_nvram(ha);
769
770 QL_PRINT_9(ha, "done\n");
771
772 return (rval);
773 }
774
775 static void
776 ql_load_nvram(ql_adapter_state_t *ha, uint8_t addr, uint16_t value)
777 {
778 int count;
779 volatile uint16_t word;
780 volatile uint32_t nv_cmd;
781
782 ql_nv_write(ha, NV_DATA_OUT);
783 ql_nv_write(ha, 0);
784 ql_nv_write(ha, 0);
785
786 for (word = 0; word < 8; word++) {
787 ql_nv_write(ha, NV_DATA_OUT);
788 }
789
790 /*
791 * Deselect the chip
792 */
793 WRT16_IO_REG(ha, nvram, NV_DESELECT);
794 ql_nv_delay();
795
796 /*
797 * Erase Location
798 */
799 nv_cmd = (addr << 16) | NV_ERASE_OP;
800 nv_cmd <<= 5;
801 for (count = 0; count < 11; count++) {
802 if (nv_cmd & BIT_31) {
803 ql_nv_write(ha, NV_DATA_OUT);
804 } else {
805 ql_nv_write(ha, 0);
806 }
807 nv_cmd <<= 1;
808 }
809
810 /*
811 * Wait for Erase to Finish
812 */
813 WRT16_IO_REG(ha, nvram, NV_DESELECT);
814 ql_nv_delay();
815 WRT16_IO_REG(ha, nvram, NV_SELECT);
816 word = 0;
817 while ((word & NV_DATA_IN) == 0) {
818 ql_nv_delay();
819 word = RD16_IO_REG(ha, nvram);
820 }
821 WRT16_IO_REG(ha, nvram, NV_DESELECT);
822 ql_nv_delay();
823
824 /*
825 * Write data now
826 */
827 nv_cmd = (addr << 16) | NV_WRITE_OP;
828 nv_cmd |= value;
829 nv_cmd <<= 5;
830 for (count = 0; count < 27; count++) {
831 if (nv_cmd & BIT_31) {
832 ql_nv_write(ha, NV_DATA_OUT);
833 } else {
834 ql_nv_write(ha, 0);
835 }
836 nv_cmd <<= 1;
837 }
838
839 /*
840 * Wait for NVRAM to become ready
841 */
842 WRT16_IO_REG(ha, nvram, NV_DESELECT);
843 ql_nv_delay();
844 WRT16_IO_REG(ha, nvram, NV_SELECT);
845 word = 0;
846 while ((word & NV_DATA_IN) == 0) {
847 ql_nv_delay();
848 word = RD16_IO_REG(ha, nvram);
849 }
850 WRT16_IO_REG(ha, nvram, NV_DESELECT);
851 ql_nv_delay();
852
853 /*
854 * Disable writes
855 */
856 ql_nv_write(ha, NV_DATA_OUT);
857 for (count = 0; count < 10; count++) {
858 ql_nv_write(ha, 0);
859 }
860
861 /*
862 * Deselect the chip now
863 */
864 WRT16_IO_REG(ha, nvram, NV_DESELECT);
865 }
866
867 /*
868 * ql_24xx_load_nvram
869 * Enable NVRAM and writes a 32bit word to ISP24xx NVRAM.
870 *
871 * Input:
872 * ha: adapter state pointer.
873 * addr: NVRAM address.
874 * value: data.
875 *
876 * Returns:
877 * ql local function return status code.
878 *
879 * Context:
880 * Kernel context.
881 */
882 static int
883 ql_24xx_load_nvram(ql_adapter_state_t *ha, uint32_t addr, uint32_t value)
884 {
885 int rval;
886
887 /* Enable flash write. */
888 if (!(CFG_IST(ha, CFG_FCOE_SUPPORT))) {
889 WRT32_IO_REG(ha, ctrl_status,
890 RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
891 RD32_IO_REG(ha, ctrl_status); /* PCI Posting. */
892 }
893
894 /* Disable NVRAM write-protection. */
895 if (CFG_IST(ha, CFG_CTRL_24XX)) {
896 (void) ql_24xx_write_flash(ha, NVRAM_CONF_ADDR | 0x101, 0);
897 } else {
898 if ((rval = ql_24xx_unprotect_flash(ha)) != QL_SUCCESS) {
899 EL(ha, "unprotect_flash failed, rval=%xh\n", rval);
900 return (rval);
901 }
902 }
903
904 /* Write to flash. */
905 rval = ql_24xx_write_flash(ha, addr, value);
906
907 /* Enable NVRAM write-protection. */
908 if (CFG_IST(ha, CFG_CTRL_24XX)) {
909 /* TODO: Check if 0x8c is correct -- sb: 0x9c ? */
910 (void) ql_24xx_write_flash(ha, NVRAM_CONF_ADDR | 0x101, 0x8c);
911 } else {
912 ql_24xx_protect_flash(ha);
913 }
914
915 /* Disable flash write. */
916 if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
917 WRT32_IO_REG(ha, ctrl_status,
918 RD32_IO_REG(ha, ctrl_status) & ~ISP_FLASH_ENABLE);
919 RD32_IO_REG(ha, ctrl_status); /* PCI Posting. */
920 }
921
922 return (rval);
923 }
924
925 /*
926 * ql_nv_util_load
927 * Loads NVRAM from application.
928 *
929 * Input:
930 * ha = adapter state pointer.
931 * bp = user buffer address.
932 *
933 * Returns:
934 *
935 * Context:
936 * Kernel context.
937 */
938 int
939 ql_nv_util_load(ql_adapter_state_t *ha, void *bp, int mode)
940 {
941 uint8_t cnt;
942 void *nv;
943 uint16_t *wptr;
944 uint16_t data;
945 uint32_t start_addr, *lptr, data32;
946 nvram_t *nptr;
947 int rval;
948
949 QL_PRINT_9(ha, "started\n");
950
951 if ((nv = kmem_zalloc(ha->nvram_cache->size, KM_SLEEP)) == NULL) {
952 EL(ha, "failed, kmem_zalloc\n");
953 return (ENOMEM);
954 }
955
956 if (ddi_copyin(bp, nv, ha->nvram_cache->size, mode) != 0) {
957 EL(ha, "Buffer copy failed\n");
958 kmem_free(nv, ha->nvram_cache->size);
959 return (EFAULT);
960 }
961
962 /* See if the buffer passed to us looks sane */
963 nptr = (nvram_t *)nv;
964 if (nptr->id[0] != 'I' || nptr->id[1] != 'S' || nptr->id[2] != 'P' ||
965 nptr->id[3] != ' ') {
966 EL(ha, "failed, buffer sanity check\n");
967 kmem_free(nv, ha->nvram_cache->size);
968 return (EINVAL);
969 }
970
971 /* Quiesce I/O */
972 if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
973 EL(ha, "ql_stall_driver failed\n");
974 kmem_free(nv, ha->nvram_cache->size);
975 return (EBUSY);
976 }
977
978 rval = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA);
979 if (rval != QL_SUCCESS) {
980 EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
981 kmem_free(nv, ha->nvram_cache->size);
982 ql_restart_driver(ha);
983 return (EIO);
984 }
985
986 /* Load NVRAM. */
987 if (CFG_IST(ha, CFG_CTRL_252780818283)) {
988 GLOBAL_HW_UNLOCK();
989 start_addr &= ~ha->flash_data_addr;
990 start_addr <<= 2;
991 if ((rval = ql_r_m_w_flash(ha, bp, ha->nvram_cache->size,
992 start_addr, mode)) != QL_SUCCESS) {
993 EL(ha, "nvram load failed, rval = %0xh\n", rval);
994 }
995 GLOBAL_HW_LOCK();
996 } else if (CFG_IST(ha, CFG_CTRL_24XX)) {
997 lptr = (uint32_t *)nv;
998 for (cnt = 0; cnt < ha->nvram_cache->size / 4; cnt++) {
999 data32 = *lptr++;
1000 LITTLE_ENDIAN_32(&data32);
1001 rval = ql_24xx_load_nvram(ha, cnt + start_addr,
1002 data32);
1003 if (rval != QL_SUCCESS) {
1004 EL(ha, "failed, 24xx_load_nvram=%xh\n", rval);
1005 break;
1006 }
1007 }
1008 } else {
1009 wptr = (uint16_t *)nv;
1010 for (cnt = 0; cnt < ha->nvram_cache->size / 2; cnt++) {
1011 data = *wptr++;
1012 LITTLE_ENDIAN_16(&data);
1013 ql_load_nvram(ha, (uint8_t)(cnt + start_addr), data);
1014 }
1015 }
1016 /* switch to the new one */
1017 kmem_free(ha->nvram_cache->cache, ha->nvram_cache->size);
1018 ha->nvram_cache->cache = (void *)nptr;
1019
1020 ql_release_nvram(ha);
1021 ql_restart_driver(ha);
1022
1023 QL_PRINT_9(ha, "done\n");
1024
1025 if (rval == QL_SUCCESS) {
1026 return (0);
1027 }
1028
1029 return (EFAULT);
1030 }
1031
1032 /*
1033 * ql_nv_util_dump
1034 * Dumps NVRAM to application.
1035 *
1036 * Input:
1037 * ha = adapter state pointer.
1038 * bp = user buffer address.
1039 *
1040 * Returns:
1041 *
1042 * Context:
1043 * Kernel context.
1044 */
1045 int
1046 ql_nv_util_dump(ql_adapter_state_t *ha, void *bp, int mode)
1047 {
1048 uint32_t start_addr;
1049 int rval2, rval = 0;
1050
1051 QL_PRINT_9(ha, "started\n");
1052
1053 if (ha->nvram_cache == NULL ||
1054 ha->nvram_cache->size == NULL ||
1055 ha->nvram_cache->cache == NULL) {
1056 EL(ha, "failed, kmem_zalloc\n");
1057 return (ENOMEM);
1058 } else if (ha->nvram_cache->valid != 1) {
1059
1060 /* Quiesce I/O */
1061 if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
1062 EL(ha, "ql_stall_driver failed\n");
1063 return (EBUSY);
1064 }
1065
1066 rval2 = ql_lock_nvram(ha, &start_addr, LNF_NVRAM_DATA);
1067 if (rval2 != QL_SUCCESS) {
1068 EL(ha, "failed, ql_lock_nvram=%xh\n", rval2);
1069 ql_restart_driver(ha);
1070 return (EIO);
1071 }
1072 rval2 = ql_get_nvram(ha, ha->nvram_cache->cache,
1073 start_addr, ha->nvram_cache->size);
1074 if (rval2 != QL_SUCCESS) {
1075 rval = rval2;
1076 } else {
1077 ha->nvram_cache->valid = 1;
1078 EL(ha, "nvram cache now valid.");
1079 }
1080
1081 ql_release_nvram(ha);
1082 ql_restart_driver(ha);
1083
1084 if (rval != 0) {
1085 EL(ha, "failed to dump nvram, rval=%x\n", rval);
1086 return (rval);
1087 }
1088 }
1089
1090 if (ddi_copyout(ha->nvram_cache->cache, bp,
1091 ha->nvram_cache->size, mode) != 0) {
1092 EL(ha, "Buffer copy failed\n");
1093 return (EFAULT);
1094 }
1095
1096 QL_PRINT_9(ha, "done\n");
1097
1098 return (0);
1099 }
1100
1101 int
1102 ql_get_nvram(ql_adapter_state_t *ha, void *dest_addr, uint32_t src_addr,
1103 uint32_t size)
1104 {
1105 int rval = QL_SUCCESS;
1106 int cnt;
1107 /* Dump NVRAM. */
1108 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
1109 uint32_t *lptr = (uint32_t *)dest_addr;
1110
1111 for (cnt = 0; cnt < size / 4; cnt++) {
1112 rval = ql_24xx_read_flash(ha, src_addr++, lptr);
1113 if (rval != QL_SUCCESS) {
1114 EL(ha, "read_flash failed=%xh\n", rval);
1115 rval = EAGAIN;
1116 break;
1117 }
1118 LITTLE_ENDIAN_32(lptr);
1119 lptr++;
1120 }
1121 } else {
1122 uint16_t data;
1123 uint16_t *wptr = (uint16_t *)dest_addr;
1124
1125 for (cnt = 0; cnt < size / 2; cnt++) {
1126 data = (uint16_t)ql_get_nvram_word(ha, cnt +
1127 src_addr);
1128 LITTLE_ENDIAN_16(&data);
1129 *wptr++ = data;
1130 }
1131 }
1132 return (rval);
1133 }
1134
1135 /*
1136 * ql_vpd_load
1137 * Loads VPD from application.
1138 *
1139 * Input:
1140 * ha = adapter state pointer.
1141 * bp = user buffer address.
1142 *
1143 * Returns:
1144 *
1145 * Context:
1146 * Kernel context.
1147 */
1148 int
1149 ql_vpd_load(ql_adapter_state_t *ha, void *bp, int mode)
1150 {
1151 uint8_t cnt;
1152 uint8_t *vpd, *vpdptr, *vbuf;
1153 uint32_t start_addr, vpd_size, *lptr, data32;
1154 int rval;
1155
1156 QL_PRINT_9(ha, "started\n");
1157
1158 if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
1159 EL(ha, "unsupported adapter feature\n");
1160 return (ENOTSUP);
1161 }
1162
1163 vpd_size = QL_24XX_VPD_SIZE;
1164
1165 if ((vpd = kmem_zalloc(vpd_size, KM_SLEEP)) == NULL) {
1166 EL(ha, "failed, kmem_zalloc\n");
1167 return (ENOMEM);
1168 }
1169
1170 if (ddi_copyin(bp, vpd, vpd_size, mode) != 0) {
1171 EL(ha, "Buffer copy failed\n");
1172 kmem_free(vpd, vpd_size);
1173 return (EFAULT);
1174 }
1175
1176 /* Sanity check the user supplied data via checksum */
1177 if ((vpdptr = ql_vpd_findtag(ha, vpd, "RV")) == NULL) {
1178 EL(ha, "vpd RV tag missing\n");
1179 kmem_free(vpd, vpd_size);
1180 return (EINVAL);
1181 }
1182
1183 vpdptr += 3;
1184 cnt = 0;
1185 vbuf = vpd;
1186 while (vbuf <= vpdptr) {
1187 cnt += *vbuf++;
1188 }
1189 if (cnt != 0) {
1190 EL(ha, "mismatched checksum, cal=%xh, passed=%xh\n",
1191 (uint8_t)cnt, (uintptr_t)vpdptr);
1192 kmem_free(vpd, vpd_size);
1193 return (EINVAL);
1194 }
1195
1196 /* Quiesce I/O */
1197 if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
1198 EL(ha, "ql_stall_driver failed\n");
1199 kmem_free(vpd, vpd_size);
1200 return (EBUSY);
1201 }
1202
1203 rval = ql_lock_nvram(ha, &start_addr, LNF_VPD_DATA);
1204 if (rval != QL_SUCCESS) {
1205 EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
1206 kmem_free(vpd, vpd_size);
1207 ql_restart_driver(ha);
1208 return (EIO);
1209 }
1210
1211 /* Load VPD. */
1212 if (CFG_IST(ha, CFG_CTRL_252780818283)) {
1213 GLOBAL_HW_UNLOCK();
1214 start_addr &= ~ha->flash_data_addr;
1215 start_addr <<= 2;
1216 if ((rval = ql_r_m_w_flash(ha, bp, vpd_size, start_addr,
1217 mode)) != QL_SUCCESS) {
1218 EL(ha, "vpd load error: %xh\n", rval);
1219 }
1220 GLOBAL_HW_LOCK();
1221 } else {
1222 lptr = (uint32_t *)vpd;
1223 for (cnt = 0; cnt < vpd_size / 4; cnt++) {
1224 data32 = *lptr++;
1225 LITTLE_ENDIAN_32(&data32);
1226 rval = ql_24xx_load_nvram(ha, cnt + start_addr,
1227 data32);
1228 if (rval != QL_SUCCESS) {
1229 EL(ha, "failed, 24xx_load_nvram=%xh\n", rval);
1230 break;
1231 }
1232 }
1233 }
1234
1235 kmem_free(vpd, vpd_size);
1236
1237 /* Update the vcache */
1238 if (rval != QL_SUCCESS) {
1239 EL(ha, "failed, load\n");
1240 } else if ((ha->vcache == NULL) && ((ha->vcache =
1241 kmem_zalloc(vpd_size, KM_SLEEP)) == NULL)) {
1242 EL(ha, "failed, kmem_zalloc2\n");
1243 } else if (ddi_copyin(bp, ha->vcache, vpd_size, mode) != 0) {
1244 EL(ha, "Buffer copy2 failed\n");
1245 kmem_free(ha->vcache, vpd_size);
1246 ha->vcache = NULL;
1247 }
1248
1249 ql_release_nvram(ha);
1250 ql_restart_driver(ha);
1251
1252 QL_PRINT_9(ha, "done\n");
1253
1254 if (rval == QL_SUCCESS) {
1255 return (0);
1256 }
1257
1258 return (EFAULT);
1259 }
1260
1261 /*
1262 * ql_vpd_dump
1263 * Dumps VPD to application buffer.
1264 *
1265 * Input:
1266 * ha = adapter state pointer.
1267 * bp = user buffer address.
1268 *
1269 * Returns:
1270 *
1271 * Context:
1272 * Kernel context.
1273 */
1274 int
1275 ql_vpd_dump(ql_adapter_state_t *ha, void *bp, int mode)
1276 {
1277 uint8_t cnt;
1278 void *vpd;
1279 uint32_t start_addr, vpd_size, *lptr;
1280 int rval = 0;
1281
1282 QL_PRINT_3(ha, "started\n");
1283
1284 if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
1285 EL(ha, "unsupported adapter feature\n");
1286 return (EACCES);
1287 }
1288
1289 vpd_size = QL_24XX_VPD_SIZE;
1290
1291 if (ha->vcache != NULL) {
1292 /* copy back the vpd cache data */
1293 if (ddi_copyout(ha->vcache, bp, vpd_size, mode) != 0) {
1294 EL(ha, "Buffer copy failed\n");
1295 rval = EFAULT;
1296 }
1297 return (rval);
1298 }
1299
1300 if ((vpd = kmem_zalloc(vpd_size, KM_SLEEP)) == NULL) {
1301 EL(ha, "failed, kmem_zalloc\n");
1302 return (ENOMEM);
1303 }
1304
1305 /* Quiesce I/O */
1306 if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
1307 EL(ha, "ql_stall_driver failed\n");
1308 kmem_free(vpd, vpd_size);
1309 return (EBUSY);
1310 }
1311
1312 rval = ql_lock_nvram(ha, &start_addr, LNF_VPD_DATA);
1313 if (rval != QL_SUCCESS) {
1314 EL(ha, "failed, ql_lock_nvram=%xh\n", rval);
1315 kmem_free(vpd, vpd_size);
1316 ql_restart_driver(ha);
1317 return (EIO);
1318 }
1319
1320 /* Dump VPD. */
1321 lptr = (uint32_t *)vpd;
1322
1323 for (cnt = 0; cnt < vpd_size / 4; cnt++) {
1324 rval = ql_24xx_read_flash(ha, start_addr++, lptr);
1325 if (rval != QL_SUCCESS) {
1326 EL(ha, "read_flash failed=%xh\n", rval);
1327 rval = EAGAIN;
1328 break;
1329 }
1330 LITTLE_ENDIAN_32(lptr);
1331 lptr++;
1332 }
1333
1334 ql_release_nvram(ha);
1335 ql_restart_driver(ha);
1336
1337 if (ddi_copyout(vpd, bp, vpd_size, mode) != 0) {
1338 EL(ha, "Buffer copy failed\n");
1339 kmem_free(vpd, vpd_size);
1340 return (EFAULT);
1341 }
1342
1343 ha->vcache = vpd;
1344
1345 QL_PRINT_3(ha, "done\n");
1346
1347 if (rval != QL_SUCCESS) {
1348 return (EFAULT);
1349 } else {
1350 return (0);
1351 }
1352 }
1353
1354 /*
1355 * ql_vpd_findtag
1356 * Search the passed vpd buffer for the requested VPD tag type.
1357 *
1358 * Input:
1359 * ha = adapter state pointer.
1360 * vpdbuf = Pointer to start of the buffer to search
1361 * op = VPD opcode to find (must be NULL terminated).
1362 *
1363 * Returns:
1364 * Pointer to the opcode in the buffer if opcode found.
1365 * NULL if opcode is not found.
1366 *
1367 * Context:
1368 * Kernel context.
1369 */
1370 static uint8_t *
1371 ql_vpd_findtag(ql_adapter_state_t *ha, uint8_t *vpdbuf, int8_t *opcode)
1372 {
1373 uint8_t *vpd = vpdbuf;
1374 uint8_t *end = vpdbuf + QL_24XX_VPD_SIZE;
1375 uint32_t found = 0;
1376
1377 QL_PRINT_3(ha, "started\n");
1378
1379 if (vpdbuf == NULL || opcode == NULL) {
1380 EL(ha, "null parameter passed!\n");
1381 return (NULL);
1382 }
1383
1384 while (vpd < end) {
1385
1386 /* check for end of vpd */
1387 if (vpd[0] == VPD_TAG_END) {
1388 if (opcode[0] == VPD_TAG_END) {
1389 found = 1;
1390 } else {
1391 found = 0;
1392 }
1393 break;
1394 }
1395
1396 /* check opcode */
1397 if (bcmp(opcode, vpd, strlen(opcode)) == 0) {
1398 /* found opcode requested */
1399 found = 1;
1400 break;
1401 }
1402
1403 /*
1404 * Didn't find the opcode, so calculate start of
1405 * next tag. Depending on the current tag type,
1406 * the length field can be 1 or 2 bytes
1407 */
1408 if (!(strncmp((char *)vpd, (char *)VPD_TAG_PRODID, 1))) {
1409 vpd += (vpd[2] << 8) + vpd[1] + 3;
1410 } else if (*vpd == VPD_TAG_LRT || *vpd == VPD_TAG_LRTC) {
1411 vpd += 3;
1412 } else {
1413 vpd += vpd[2] +3;
1414 }
1415 }
1416
1417 QL_PRINT_3(ha, "done\n");
1418
1419 return (found == 1 ? vpd : NULL);
1420 }
1421
1422 /*
1423 * ql_vpd_lookup
1424 * Return the VPD data for the request VPD tag
1425 *
1426 * Input:
1427 * ha = adapter state pointer.
1428 * opcode = VPD opcode to find (must be NULL terminated).
1429 * bp = Pointer to returned data buffer.
1430 * bplen = Length of returned data buffer.
1431 *
1432 * Returns:
1433 * Length of data copied into returned data buffer.
1434 * >0 = VPD data field (NULL terminated)
1435 * 0 = no data.
1436 * -1 = Could not find opcode in vpd buffer / error.
1437 *
1438 * Context:
1439 * Kernel context.
1440 *
1441 * NB: The opcode buffer and the bp buffer *could* be the same buffer!
1442 *
1443 */
1444 int32_t
1445 ql_vpd_lookup(ql_adapter_state_t *ha, uint8_t *opcode, uint8_t *bp,
1446 int32_t bplen)
1447 {
1448 uint8_t *vpd;
1449 uint8_t *vpdbuf;
1450 int32_t len = -1;
1451
1452 QL_PRINT_3(ha, "started\n");
1453
1454 if (opcode == NULL || bp == NULL || bplen < 1) {
1455 EL(ha, "invalid parameter passed: opcode=%ph, "
1456 "bp=%ph, bplen=%xh\n", opcode, bp, bplen);
1457 return (len);
1458 }
1459
1460 if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
1461 return (len);
1462 }
1463
1464 if ((vpdbuf = (uint8_t *)kmem_zalloc(QL_24XX_VPD_SIZE,
1465 KM_SLEEP)) == NULL) {
1466 EL(ha, "unable to allocate vpd memory\n");
1467 return (len);
1468 }
1469
1470 if ((ql_vpd_dump(ha, vpdbuf, (int)FKIOCTL)) != 0) {
1471 kmem_free(vpdbuf, QL_24XX_VPD_SIZE);
1472 EL(ha, "unable to retrieve VPD data\n");
1473 return (len);
1474 }
1475
1476 if ((vpd = ql_vpd_findtag(ha, vpdbuf, (int8_t *)opcode)) != NULL) {
1477 /*
1478 * Found the tag
1479 */
1480 if (*opcode == VPD_TAG_END || *opcode == VPD_TAG_LRT ||
1481 *opcode == VPD_TAG_LRTC) {
1482 /*
1483 * we found it, but the tag doesn't have a data
1484 * field.
1485 */
1486 len = 0;
1487 } else if (!(strncmp((char *)vpd, (char *)
1488 VPD_TAG_PRODID, 1))) {
1489 len = vpd[2] << 8;
1490 len += vpd[1];
1491 } else {
1492 len = vpd[2];
1493 }
1494
1495 /*
1496 * make sure that the vpd len doesn't exceed the
1497 * vpd end
1498 */
1499 if (vpd + len > vpdbuf + QL_24XX_VPD_SIZE) {
1500 EL(ha, "vpd tag len (%xh) exceeds vpd buffer "
1501 "length\n", len);
1502 len = -1;
1503 }
1504 }
1505
1506 if (len >= 0) {
1507 /*
1508 * make sure we don't exceed callers buffer space len
1509 */
1510 if (len > bplen) {
1511 len = bplen - 1;
1512 }
1513
1514 /* copy the data back */
1515 (void) strncpy((int8_t *)bp, (int8_t *)(vpd + 3), (int64_t)len);
1516 bp[len] = NULL;
1517 } else {
1518 /* error -- couldn't find tag */
1519 bp[0] = NULL;
1520 if (opcode[1] != NULL) {
1521 EL(ha, "unable to find tag '%s'\n", opcode);
1522 } else {
1523 EL(ha, "unable to find tag '%xh'\n", opcode[0]);
1524 }
1525 }
1526
1527 kmem_free(vpdbuf, QL_24XX_VPD_SIZE);
1528
1529 QL_PRINT_3(ha, "done\n");
1530
1531 return (len);
1532 }
1533
1534 /*
1535 * ql_r_m_w_flash
1536 * Read modify write from user space to flash.
1537 *
1538 * Input:
1539 * ha: adapter state pointer.
1540 * dp: source byte pointer.
1541 * bc: byte count.
1542 * faddr: flash byte address.
1543 * mode: flags.
1544 *
1545 * Returns:
1546 * ql local function return status code.
1547 *
1548 * Context:
1549 * Kernel context.
1550 */
1551 int
1552 ql_r_m_w_flash(ql_adapter_state_t *ha, caddr_t dp, uint32_t bc, uint32_t faddr,
1553 int mode)
1554 {
1555 uint8_t *bp;
1556 uint32_t xfer, bsize, saddr, ofst;
1557 int rval = 0;
1558
1559 QL_PRINT_9(ha, "started, dp=%ph, faddr=%xh, bc=%xh\n",
1560 (void *)dp, faddr, bc);
1561
1562 bsize = ha->xioctl->fdesc.block_size;
1563 saddr = faddr & ~(bsize - 1);
1564 ofst = faddr & (bsize - 1);
1565
1566 if ((bp = kmem_zalloc(bsize, KM_SLEEP)) == NULL) {
1567 EL(ha, "kmem_zalloc=null\n");
1568 return (QL_MEMORY_ALLOC_FAILED);
1569 }
1570
1571 while (bc) {
1572 xfer = bc > bsize ? bsize : bc;
1573 if (ofst + xfer > bsize) {
1574 xfer = bsize - ofst;
1575 }
1576 QL_PRINT_9(ha, "dp=%ph, saddr=%xh, bc=%xh, "
1577 "ofst=%xh, xfer=%xh\n", (void *)dp, saddr,
1578 bc, ofst, xfer);
1579
1580 if (ofst || xfer < bsize) {
1581 /* Dump Flash sector. */
1582 if ((rval = ql_dump_fcode(ha, bp, bsize, saddr)) !=
1583 QL_SUCCESS) {
1584 EL(ha, "dump_flash status=%x\n", rval);
1585 break;
1586 }
1587 }
1588
1589 /* Set new data. */
1590 if ((rval = ddi_copyin(dp, (caddr_t)(bp + ofst), xfer,
1591 mode)) != 0) {
1592 EL(ha, "ddi_copyin status=%xh, dp=%ph, ofst=%xh, "
1593 "xfer=%xh\n", rval, (void *)dp, ofst, xfer);
1594 rval = QL_FUNCTION_FAILED;
1595 break;
1596 }
1597
1598 /* Write to flash. */
1599 if ((rval = ql_load_fcode(ha, bp, bsize, saddr)) !=
1600 QL_SUCCESS) {
1601 EL(ha, "load_flash status=%x\n", rval);
1602 break;
1603 }
1604 bc -= xfer;
1605 dp += xfer;
1606 saddr += bsize;
1607 ofst = 0;
1608 }
1609
1610 kmem_free(bp, bsize);
1611
1612 QL_PRINT_9(ha, "done\n");
1613
1614 return (rval);
1615 }
1616
1617 /*
1618 * ql_adm_op
1619 * Performs qladm utility operations
1620 *
1621 * Input:
1622 * ha: adapter state pointer.
1623 * arg: driver_op_t structure pointer.
1624 * mode: flags.
1625 *
1626 * Returns:
1627 *
1628 * Context:
1629 * Kernel context.
1630 */
1631 static int
1632 ql_adm_op(ql_adapter_state_t *ha, void *arg, int mode)
1633 {
1634 ql_adm_op_t dop;
1635 int rval = 0;
1636
1637 if (ddi_copyin(arg, &dop, sizeof (ql_adm_op_t), mode) != 0) {
1638 EL(ha, "failed, driver_op_t ddi_copyin\n");
1639 return (EFAULT);
1640 }
1641
1642 QL_PRINT_9(ha, "started, cmd=%xh, buffer=%llx,"
1643 " length=%xh, option=%xh\n", dop.cmd, dop.buffer,
1644 dop.length, dop.option);
1645
1646 switch (dop.cmd) {
1647 case QL_ADAPTER_INFO:
1648 rval = ql_adm_adapter_info(ha, &dop, mode);
1649 break;
1650
1651 case QL_EXTENDED_LOGGING:
1652 rval = ql_adm_extended_logging(ha, &dop);
1653 break;
1654
1655 case QL_LOOP_RESET:
1656 rval = ql_adm_loop_reset(ha);
1657 break;
1658
1659 case QL_DEVICE_LIST:
1660 rval = ql_adm_device_list(ha, &dop, mode);
1661 break;
1662
1663 case QL_PROP_UPDATE_INT:
1664 rval = ql_adm_prop_update_int(ha, &dop, mode);
1665 break;
1666
1667 case QL_UPDATE_PROPERTIES:
1668 rval = ql_adm_update_properties(ha);
1669 break;
1670
1671 case QL_FW_DUMP:
1672 rval = ql_adm_fw_dump(ha, &dop, arg, mode);
1673 break;
1674
1675 case QL_FW_DUMP_TRIGGER:
1676 rval = ql_adm_fw_t_dump(ha);
1677 break;
1678
1679 case QL_BEACON_ENABLE:
1680 case QL_BEACON_DISABLE:
1681 rval = ql_adm_beacon(ha, &dop);
1682 break;
1683
1684 case QL_NVRAM_LOAD:
1685 rval = ql_adm_nvram_load(ha, &dop, mode);
1686 break;
1687
1688 case QL_NVRAM_DUMP:
1689 rval = ql_adm_nvram_dump(ha, &dop, mode);
1690 break;
1691
1692 case QL_FLASH_LOAD:
1693 rval = ql_adm_flash_load(ha, &dop, mode);
1694 break;
1695
1696 case QL_VPD_LOAD:
1697 rval = ql_adm_vpd_load(ha, &dop, mode);
1698 break;
1699
1700 case QL_VPD_DUMP:
1701 rval = ql_adm_vpd_dump(ha, &dop, mode);
1702 break;
1703
1704 case QL_VPD_GETTAG:
1705 rval = ql_adm_vpd_gettag(ha, &dop, mode);
1706 break;
1707
1708 case QL_UPD_FWMODULE:
1709 rval = ql_adm_updfwmodule(ha, &dop, mode);
1710 break;
1711
1712 default:
1713 EL(ha, "unsupported driver op cmd: %x\n", dop.cmd);
1714 return (EINVAL);
1715 }
1716
1717 QL_PRINT_9(ha, "done\n");
1718
1719 return (rval);
1720 }
1721
1722 /*
1723 * ql_adm_adapter_info
1724 * Performs qladm QL_ADAPTER_INFO command
1725 *
1726 * Input:
1727 * ha: adapter state pointer.
1728 * dop: ql_adm_op_t structure pointer.
1729 * mode: flags.
1730 *
1731 * Returns:
1732 *
1733 * Context:
1734 * Kernel context.
1735 */
1736 static int
1737 ql_adm_adapter_info(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
1738 {
1739 ql_adapter_info_t hba;
1740 uint8_t *dp;
1741 uint32_t length;
1742 int rval, i;
1743
1744 QL_PRINT_9(ha, "started\n");
1745
1746 hba.device_id = ha->device_id;
1747
1748 dp = ha->loginparams.nport_ww_name.raw_wwn;
1749 bcopy(dp, hba.wwpn, 8);
1750
1751 hba.d_id = ha->d_id.b24;
1752
1753 if (ha->xioctl->fdesc.flash_size == 0 &&
1754 !(CFG_IST(ha, CFG_CTRL_22XX) && !ha->subven_id)) {
1755 if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
1756 EL(ha, "ql_stall_driver failed\n");
1757 return (EBUSY);
1758 }
1759
1760 if ((rval = ql_setup_fcache(ha)) != QL_SUCCESS) {
1761 EL(ha, "ql_setup_flash failed=%xh\n", rval);
1762 if (rval == QL_FUNCTION_TIMEOUT) {
1763 return (EBUSY);
1764 }
1765 return (EIO);
1766 }
1767
1768 /* Resume I/O */
1769 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
1770 ql_restart_driver(ha);
1771 } else {
1772 EL(ha, "isp_abort_needed for restart\n");
1773 ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED,
1774 DRIVER_STALL);
1775 }
1776 }
1777 hba.flash_size = ha->xioctl->fdesc.flash_size;
1778
1779 (void) strcpy(hba.driver_ver, QL_VERSION);
1780
1781 (void) sprintf(hba.fw_ver, "%d.%d.%d", ha->fw_major_version,
1782 ha->fw_minor_version, ha->fw_subminor_version);
1783
1784 bzero(hba.fcode_ver, sizeof (hba.fcode_ver));
1785
1786 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
1787 rval = ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
1788 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version", (caddr_t)&dp, &i);
1789 length = i;
1790 if (rval != DDI_PROP_SUCCESS) {
1791 EL(ha, "failed, ddi_getlongprop=%xh\n", rval);
1792 } else {
1793 if (length > (uint32_t)sizeof (hba.fcode_ver)) {
1794 length = sizeof (hba.fcode_ver) - 1;
1795 }
1796 bcopy((void *)dp, (void *)hba.fcode_ver, length);
1797 kmem_free(dp, length);
1798 }
1799
1800 if (ddi_copyout((void *)&hba, (void *)(uintptr_t)dop->buffer,
1801 dop->length, mode) != 0) {
1802 EL(ha, "failed, ddi_copyout\n");
1803 return (EFAULT);
1804 }
1805
1806 QL_PRINT_9(ha, "done\n");
1807
1808 return (0);
1809 }
1810
1811 /*
1812 * ql_adm_extended_logging
1813 * Performs qladm QL_EXTENDED_LOGGING command
1814 *
1815 * Input:
1816 * ha: adapter state pointer.
1817 * dop: ql_adm_op_t structure pointer.
1818 *
1819 * Returns:
1820 *
1821 * Context:
1822 * Kernel context.
1823 */
1824 static int
1825 ql_adm_extended_logging(ql_adapter_state_t *ha, ql_adm_op_t *dop)
1826 {
1827 char prop_name[MAX_PROP_LENGTH];
1828 int rval;
1829
1830 QL_PRINT_9(ha, "started\n");
1831
1832 (void) sprintf(prop_name, "hba%d-extended-logging", ha->instance);
1833
1834 /*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
1835 rval = ddi_prop_update_int(DDI_DEV_T_NONE, ha->dip, prop_name,
1836 (int)dop->option);
1837 if (rval != DDI_PROP_SUCCESS) {
1838 EL(ha, "failed, prop_update = %xh\n", rval);
1839 return (EINVAL);
1840 } else {
1841 dop->option ?
1842 (ha->cfg_flags |= CFG_ENABLE_EXTENDED_LOGGING) :
1843 (ha->cfg_flags &= ~CFG_ENABLE_EXTENDED_LOGGING);
1844 }
1845
1846 QL_PRINT_9(ha, "done\n");
1847
1848 return (0);
1849 }
1850
1851 /*
1852 * ql_adm_loop_reset
1853 * Performs qladm QL_LOOP_RESET command
1854 *
1855 * Input:
1856 * ha: adapter state pointer.
1857 *
1858 * Returns:
1859 *
1860 * Context:
1861 * Kernel context.
1862 */
1863 static int
1864 ql_adm_loop_reset(ql_adapter_state_t *ha)
1865 {
1866 int rval;
1867
1868 QL_PRINT_9(ha, "started\n");
1869
1870 if (CFG_IST(ha, CFG_CTRL_82XX)) {
1871 rval = ql_8021_fw_reload(ha);
1872 ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, 0);
1873 if (rval != QL_SUCCESS) {
1874 EL(ha, "failed, ql_8021_fw_reload=%xh\n", rval);
1875 return (EIO);
1876 }
1877 } else {
1878 if (ha->task_daemon_flags & LOOP_DOWN) {
1879 (void) ql_full_login_lip(ha);
1880 } else if ((rval = ql_full_login_lip(ha)) != QL_SUCCESS) {
1881 EL(ha, "failed, ql_initiate_lip=%xh\n", rval);
1882 return (EIO);
1883 }
1884 }
1885
1886 QL_PRINT_9(ha, "done\n");
1887
1888 return (0);
1889 }
1890
1891 /*
1892 * ql_adm_device_list
1893 * Performs qladm QL_DEVICE_LIST command
1894 *
1895 * Input:
1896 * ha: adapter state pointer.
1897 * dop: ql_adm_op_t structure pointer.
1898 * mode: flags.
1899 *
1900 * Returns:
1901 *
1902 * Context:
1903 * Kernel context.
1904 */
1905 static int
1906 ql_adm_device_list(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
1907 {
1908 ql_device_info_t dev;
1909 ql_link_t *link;
1910 ql_tgt_t *tq;
1911 uint32_t index, cnt;
1912
1913 QL_PRINT_9(ha, "started\n");
1914
1915 cnt = 0;
1916 dev.address = 0xffffffff;
1917
1918 /* Scan port list for requested target and fill in the values */
1919 for (link = NULL, index = 0;
1920 index < DEVICE_HEAD_LIST_SIZE && link == NULL; index++) {
1921 for (link = ha->dev[index].first; link != NULL;
1922 link = link->next) {
1923 tq = link->base_address;
1924
1925 if (!VALID_TARGET_ID(ha, tq->loop_id)) {
1926 continue;
1927 }
1928 if (cnt != dop->option) {
1929 cnt++;
1930 continue;
1931 }
1932 /* fill in the values */
1933 bcopy(tq->port_name, dev.wwpn, 8);
1934 dev.address = tq->d_id.b24;
1935 dev.loop_id = tq->loop_id;
1936 if (tq->flags & TQF_TAPE_DEVICE) {
1937 dev.type = FCT_TAPE;
1938 } else if (tq->flags & TQF_INITIATOR_DEVICE) {
1939 dev.type = FCT_INITIATOR;
1940 } else {
1941 dev.type = FCT_TARGET;
1942 }
1943 break;
1944 }
1945 }
1946
1947 if (ddi_copyout((void *)&dev, (void *)(uintptr_t)dop->buffer,
1948 dop->length, mode) != 0) {
1949 EL(ha, "failed, ddi_copyout\n");
1950 return (EFAULT);
1951 }
1952
1953 QL_PRINT_9(ha, "done\n");
1954
1955 return (0);
1956 }
1957
1958 /*
1959 * ql_adm_update_properties
1960 * Performs qladm QL_UPDATE_PROPERTIES command
1961 *
1962 * Input:
1963 * ha: adapter state pointer.
1964 *
1965 * Returns:
1966 *
1967 * Context:
1968 * Kernel context.
1969 */
1970 static int
1971 ql_adm_update_properties(ql_adapter_state_t *ha)
1972 {
1973 ql_comb_init_cb_t init_ctrl_blk;
1974 ql_comb_ip_init_cb_t ip_init_ctrl_blk;
1975
1976 QL_PRINT_9(ha, "started\n");
1977
1978 /* Stall driver instance. */
1979 (void) ql_stall_driver(ha, 0);
1980
1981 /* Save init control blocks. */
1982 bcopy(&ha->init_ctrl_blk, &init_ctrl_blk, sizeof (ql_comb_init_cb_t));
1983 bcopy(&ha->ip_init_ctrl_blk, &ip_init_ctrl_blk,
1984 sizeof (ql_comb_ip_init_cb_t));
1985
1986 /* Update PCI configration. */
1987 (void) ql_pci_sbus_config(ha);
1988
1989 /* Get configuration properties. */
1990 (void) ql_nvram_config(ha);
1991
1992 /* Check for init firmware required. */
1993 if (bcmp(&ha->init_ctrl_blk, &init_ctrl_blk,
1994 sizeof (ql_comb_init_cb_t)) != 0 ||
1995 bcmp(&ha->ip_init_ctrl_blk, &ip_init_ctrl_blk,
1996 sizeof (ql_comb_ip_init_cb_t)) != 0) {
1997
1998 EL(ha, "isp_abort_needed\n");
1999 ha->loop_down_timer = LOOP_DOWN_TIMER_START;
2000 TASK_DAEMON_LOCK(ha);
2001 ha->task_daemon_flags |= LOOP_DOWN | ISP_ABORT_NEEDED;
2002 TASK_DAEMON_UNLOCK(ha);
2003 }
2004
2005 /* Update AEN queue. */
2006 if (ha->xioctl->flags & QL_AEN_TRACKING_ENABLE) {
2007 ql_enqueue_aen(ha, MBA_PORT_UPDATE, NULL);
2008 }
2009
2010 /* Restart driver instance. */
2011 ql_restart_driver(ha);
2012
2013 QL_PRINT_9(ha, "done\n");
2014
2015 return (0);
2016 }
2017
2018 /*
2019 * ql_adm_prop_update_int
2020 * Performs qladm QL_PROP_UPDATE_INT command
2021 *
2022 * Input:
2023 * ha: adapter state pointer.
2024 * dop: ql_adm_op_t structure pointer.
2025 * mode: flags.
2026 *
2027 * Returns:
2028 *
2029 * Context:
2030 * Kernel context.
2031 */
2032 static int
2033 ql_adm_prop_update_int(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2034 {
2035 char *prop_name;
2036 int rval;
2037
2038 QL_PRINT_9(ha, "started\n");
2039
2040 prop_name = kmem_zalloc(dop->length, KM_SLEEP);
2041 if (prop_name == NULL) {
2042 EL(ha, "failed, kmem_zalloc\n");
2043 return (ENOMEM);
2044 }
2045
2046 if (ddi_copyin((void *)(uintptr_t)dop->buffer, prop_name, dop->length,
2047 mode) != 0) {
2048 EL(ha, "failed, prop_name ddi_copyin\n");
2049 kmem_free(prop_name, dop->length);
2050 return (EFAULT);
2051 }
2052
2053 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
2054 if ((rval = ddi_prop_update_int(DDI_DEV_T_NONE, ha->dip, prop_name,
2055 (int)dop->option)) != DDI_PROP_SUCCESS) {
2056 EL(ha, "failed, prop_update=%xh\n", rval);
2057 kmem_free(prop_name, dop->length);
2058 return (EINVAL);
2059 }
2060
2061 kmem_free(prop_name, dop->length);
2062
2063 QL_PRINT_9(ha, "done\n");
2064
2065 return (0);
2066 }
2067
2068 /*
2069 * ql_adm_fw_dump
2070 * Performs qladm QL_FW_DUMP command
2071 *
2072 * Input:
2073 * ha: adapter state pointer.
2074 * dop: ql_adm_op_t structure pointer.
2075 * udop: user space ql_adm_op_t structure pointer.
2076 * mode: flags.
2077 *
2078 * Returns:
2079 *
2080 * Context:
2081 * Kernel context.
2082 */
2083 static int
2084 ql_adm_fw_dump(ql_adapter_state_t *ha, ql_adm_op_t *dop, void *udop, int mode)
2085 {
2086 caddr_t dmp;
2087
2088 QL_PRINT_9(ha, "started\n");
2089
2090 if (dop->length < ha->risc_dump_size) {
2091 EL(ha, "failed, incorrect length=%xh, size=%xh\n",
2092 dop->length, ha->risc_dump_size);
2093 return (EINVAL);
2094 }
2095
2096 if (ha->ql_dump_state & QL_DUMP_VALID) {
2097 dmp = kmem_zalloc(ha->risc_dump_size, KM_SLEEP);
2098 if (dmp == NULL) {
2099 EL(ha, "failed, kmem_zalloc\n");
2100 return (ENOMEM);
2101 }
2102
2103 dop->length = (uint32_t)ql_ascii_fw_dump(ha, dmp);
2104 if (ddi_copyout((void *)dmp, (void *)(uintptr_t)dop->buffer,
2105 dop->length, mode) != 0) {
2106 EL(ha, "failed, ddi_copyout\n");
2107 kmem_free(dmp, ha->risc_dump_size);
2108 return (EFAULT);
2109 }
2110
2111 kmem_free(dmp, ha->risc_dump_size);
2112 ha->ql_dump_state |= QL_DUMP_UPLOADED;
2113
2114 } else {
2115 EL(ha, "failed, no dump file\n");
2116 dop->length = 0;
2117 }
2118
2119 if (ddi_copyout(dop, udop, sizeof (ql_adm_op_t), mode) != 0) {
2120 EL(ha, "failed, driver_op_t ddi_copyout\n");
2121 return (EFAULT);
2122 }
2123
2124 QL_PRINT_9(ha, "done\n");
2125
2126 return (0);
2127 }
2128
2129 /*
2130 * ql_adm_fw_t_dump
2131 * Performs qladm QL_FW_DUMP_TRIGGER command
2132 *
2133 * Input:
2134 * ha: adapter state pointer.
2135 *
2136 * Returns:
2137 *
2138 * Context:
2139 * Kernel context.
2140 */
2141 static int
2142 ql_adm_fw_t_dump(ql_adapter_state_t *ha)
2143 {
2144 int rval;
2145
2146 QL_PRINT_9(ha, "started\n");
2147
2148 if (ha->ql_dump_state & QL_DUMP_VALID) {
2149 EL(ha, "Already contains a dump file\n");
2150 return (EINVAL);
2151 }
2152 rval = ql_dump_firmware(ha);
2153
2154 QL_PRINT_9(ha, "done\n");
2155
2156 if (rval == QL_SUCCESS || rval == QL_DATA_EXISTS) {
2157 return (0);
2158 }
2159 return (EFAULT);
2160 }
2161
2162 /*
2163 * ql_adm_beacon
2164 * Performs qladm QL_BEACON_ENABLE/DISABLE command
2165 *
2166 * Input:
2167 * ha: adapter state pointer.
2168 * dop: ql_adm_op_t structure pointer.
2169 *
2170 * Returns:
2171 *
2172 * Context:
2173 * Kernel context.
2174 */
2175 static int
2176 ql_adm_beacon(ql_adapter_state_t *ha, ql_adm_op_t *dop)
2177 {
2178 int rval;
2179 ql_mbx_data_t mr;
2180
2181 if (!CFG_IST(ha, CFG_CTRL_82XX)) {
2182 return (EIO);
2183 }
2184
2185 rval = ql_diag_beacon(ha, dop->cmd, &mr);
2186
2187 if (rval == QL_SUCCESS) {
2188 return (0);
2189 }
2190
2191 return (rval);
2192 }
2193
2194
2195 /*
2196 * ql_adm_nvram_dump
2197 * Performs qladm QL_NVRAM_DUMP command
2198 *
2199 * Input:
2200 * ha: adapter state pointer.
2201 * dop: ql_adm_op_t structure pointer.
2202 * mode: flags.
2203 *
2204 * Returns:
2205 *
2206 * Context:
2207 * Kernel context.
2208 */
2209 static int
2210 ql_adm_nvram_dump(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2211 {
2212 int rval;
2213
2214 QL_PRINT_9(ha, "started\n");
2215
2216 if (dop->length < ha->nvram_cache->size) {
2217 EL(ha, "failed, length=%xh, size=%xh\n", dop->length,
2218 ha->nvram_cache->size);
2219 return (EINVAL);
2220 }
2221
2222 if ((rval = ql_nv_util_dump(ha, (void *)(uintptr_t)dop->buffer,
2223 mode)) != 0) {
2224 EL(ha, "failed, ql_nv_util_dump\n");
2225 } else {
2226 /*EMPTY*/
2227 QL_PRINT_9(ha, "done\n");
2228 }
2229
2230 return (rval);
2231 }
2232
2233 /*
2234 * ql_adm_nvram_load
2235 * Performs qladm QL_NVRAM_LOAD command
2236 *
2237 * Input:
2238 * ha: adapter state pointer.
2239 * dop: ql_adm_op_t structure pointer.
2240 * mode: flags.
2241 *
2242 * Returns:
2243 *
2244 * Context:
2245 * Kernel context.
2246 */
2247 static int
2248 ql_adm_nvram_load(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2249 {
2250 int rval;
2251
2252 QL_PRINT_9(ha, "started\n");
2253
2254 if (dop->length < ha->nvram_cache->size) {
2255 EL(ha, "failed, length=%xh, size=%xh\n", dop->length,
2256 ha->nvram_cache->size);
2257 return (EINVAL);
2258 }
2259
2260 if ((rval = ql_nv_util_load(ha, (void *)(uintptr_t)dop->buffer,
2261 mode)) != 0) {
2262 EL(ha, "failed, ql_nv_util_dump\n");
2263 } else {
2264 /*EMPTY*/
2265 QL_PRINT_9(ha, "done\n");
2266 }
2267
2268 return (rval);
2269 }
2270
2271 /*
2272 * ql_adm_flash_load
2273 * Performs qladm QL_FLASH_LOAD command
2274 *
2275 * Input:
2276 * ha: adapter state pointer.
2277 * dop: ql_adm_op_t structure pointer.
2278 * mode: flags.
2279 *
2280 * Returns:
2281 *
2282 * Context:
2283 * Kernel context.
2284 */
2285 static int
2286 ql_adm_flash_load(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2287 {
2288 uint8_t *dp;
2289 int rval;
2290
2291 QL_PRINT_9(ha, "started\n");
2292
2293 if ((dp = kmem_zalloc(dop->length, KM_SLEEP)) == NULL) {
2294 EL(ha, "failed, kmem_zalloc\n");
2295 return (ENOMEM);
2296 }
2297
2298 if (ddi_copyin((void *)(uintptr_t)dop->buffer, dp, dop->length,
2299 mode) != 0) {
2300 EL(ha, "ddi_copyin failed\n");
2301 kmem_free(dp, dop->length);
2302 return (EFAULT);
2303 }
2304
2305 if (ql_stall_driver(ha, 0) != QL_SUCCESS) {
2306 EL(ha, "ql_stall_driver failed\n");
2307 kmem_free(dp, dop->length);
2308 return (EBUSY);
2309 }
2310
2311 rval = (CFG_IST(ha, CFG_ISP_FW_TYPE_2) ?
2312 ql_24xx_load_flash(ha, dp, dop->length, dop->option) :
2313 ql_load_flash(ha, dp, dop->length));
2314
2315 ql_restart_driver(ha);
2316
2317 kmem_free(dp, dop->length);
2318
2319 if (rval != QL_SUCCESS) {
2320 EL(ha, "failed\n");
2321 return (EIO);
2322 }
2323
2324 QL_PRINT_9(ha, "done\n");
2325
2326 return (0);
2327 }
2328
2329 /*
2330 * ql_adm_vpd_dump
2331 * Performs qladm QL_VPD_DUMP command
2332 *
2333 * Input:
2334 * ha: adapter state pointer.
2335 * dop: ql_adm_op_t structure pointer.
2336 * mode: flags.
2337 *
2338 * Returns:
2339 *
2340 * Context:
2341 * Kernel context.
2342 */
2343 static int
2344 ql_adm_vpd_dump(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2345 {
2346 int rval;
2347
2348 QL_PRINT_9(ha, "started\n");
2349
2350 if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
2351 EL(ha, "hba does not support VPD\n");
2352 return (EINVAL);
2353 }
2354
2355 if (dop->length < QL_24XX_VPD_SIZE) {
2356 EL(ha, "failed, length=%xh, size=%xh\n", dop->length,
2357 QL_24XX_VPD_SIZE);
2358 return (EINVAL);
2359 }
2360
2361 if ((rval = ql_vpd_dump(ha, (void *)(uintptr_t)dop->buffer, mode))
2362 != 0) {
2363 EL(ha, "failed, ql_vpd_dump\n");
2364 } else {
2365 /*EMPTY*/
2366 QL_PRINT_9(ha, "done\n");
2367 }
2368
2369 return (rval);
2370 }
2371
2372 /*
2373 * ql_adm_vpd_load
2374 * Performs qladm QL_VPD_LOAD command
2375 *
2376 * Input:
2377 * ha: adapter state pointer.
2378 * dop: ql_adm_op_t structure pointer.
2379 * mode: flags.
2380 *
2381 * Returns:
2382 *
2383 * Context:
2384 * Kernel context.
2385 */
2386 static int
2387 ql_adm_vpd_load(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2388 {
2389 int rval;
2390
2391 QL_PRINT_9(ha, "started\n");
2392
2393 if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
2394 EL(ha, "hba does not support VPD\n");
2395 return (EINVAL);
2396 }
2397
2398 if (dop->length < QL_24XX_VPD_SIZE) {
2399 EL(ha, "failed, length=%xh, size=%xh\n", dop->length,
2400 QL_24XX_VPD_SIZE);
2401 return (EINVAL);
2402 }
2403
2404 if ((rval = ql_vpd_load(ha, (void *)(uintptr_t)dop->buffer, mode))
2405 != 0) {
2406 EL(ha, "failed, ql_vpd_dump\n");
2407 } else {
2408 /*EMPTY*/
2409 QL_PRINT_9(ha, "done\n");
2410 }
2411
2412 return (rval);
2413 }
2414
2415 /*
2416 * ql_adm_vpd_gettag
2417 * Performs qladm QL_VPD_GETTAG command
2418 *
2419 * Input:
2420 * ha: adapter state pointer.
2421 * dop: ql_adm_op_t structure pointer.
2422 * mode: flags.
2423 *
2424 * Returns:
2425 *
2426 * Context:
2427 * Kernel context.
2428 */
2429 static int
2430 ql_adm_vpd_gettag(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2431 {
2432 int rval = 0;
2433 uint8_t *lbuf;
2434
2435 QL_PRINT_9(ha, "started\n");
2436
2437 if (CFG_IST(ha, CFG_ISP_FW_TYPE_1)) {
2438 EL(ha, "hba does not support VPD\n");
2439 return (EINVAL);
2440 }
2441
2442 if ((lbuf = (uint8_t *)kmem_zalloc(dop->length, KM_SLEEP)) == NULL) {
2443 EL(ha, "mem alloc failure of %xh bytes\n", dop->length);
2444 rval = EFAULT;
2445 } else {
2446 if (ddi_copyin((void *)(uintptr_t)dop->buffer, lbuf,
2447 dop->length, mode) != 0) {
2448 EL(ha, "ddi_copyin failed\n");
2449 kmem_free(lbuf, dop->length);
2450 return (EFAULT);
2451 }
2452
2453 if ((rval = ql_vpd_lookup(ha, lbuf, lbuf, (int32_t)
2454 dop->length)) < 0) {
2455 EL(ha, "failed vpd_lookup\n");
2456 } else {
2457 if (ddi_copyout(lbuf, (void *)(uintptr_t)dop->buffer,
2458 strlen((int8_t *)lbuf) + 1, mode) != 0) {
2459 EL(ha, "failed, ddi_copyout\n");
2460 rval = EFAULT;
2461 } else {
2462 rval = 0;
2463 }
2464 }
2465 kmem_free(lbuf, dop->length);
2466 }
2467
2468 QL_PRINT_9(ha, "done\n");
2469
2470 return (rval);
2471 }
2472
2473 /*
2474 * ql_adm_updfwmodule
2475 * Performs qladm QL_UPD_FWMODULE command
2476 *
2477 * Input:
2478 * ha: adapter state pointer.
2479 * dop: ql_adm_op_t structure pointer.
2480 * mode: flags.
2481 *
2482 * Returns:
2483 *
2484 * Context:
2485 * Kernel context.
2486 */
2487 /* ARGSUSED */
2488 static int
2489 ql_adm_updfwmodule(ql_adapter_state_t *ha, ql_adm_op_t *dop, int mode)
2490 {
2491 int rval = DDI_SUCCESS;
2492 ql_link_t *link;
2493 ql_adapter_state_t *ha2 = NULL;
2494 uint16_t fw_class = (uint16_t)dop->option;
2495
2496 QL_PRINT_9(ha, "started\n");
2497
2498 /* zero the firmware module reference count */
2499 for (link = ql_hba.first; link != NULL; link = link->next) {
2500 ha2 = link->base_address;
2501 if (fw_class == ha2->fw_class) {
2502 if ((rval = ddi_modclose(ha2->fw_module)) !=
2503 DDI_SUCCESS) {
2504 EL(ha2, "modclose rval=%xh\n", rval);
2505 break;
2506 }
2507 ha2->fw_module = NULL;
2508 }
2509 }
2510
2511 /* reload the f/w modules */
2512 for (link = ql_hba.first; link != NULL; link = link->next) {
2513 ha2 = link->base_address;
2514
2515 if ((fw_class == ha2->fw_class) && (ha2->fw_class == NULL)) {
2516 if ((rval = (int32_t)ql_fwmodule_resolve(ha2)) !=
2517 QL_SUCCESS) {
2518 EL(ha2, "unable to load f/w module: '%x' "
2519 "(rval=%xh)\n", ha2->fw_class, rval);
2520 rval = EFAULT;
2521 } else {
2522 EL(ha2, "f/w module updated: '%x'\n",
2523 ha2->fw_class);
2524 }
2525
2526 EL(ha2, "isp abort needed (%d)\n", ha->instance);
2527
2528 ql_awaken_task_daemon(ha2, NULL, ISP_ABORT_NEEDED, 0);
2529
2530 rval = 0;
2531 }
2532 }
2533
2534 QL_PRINT_9(ha, "done\n");
2535
2536 return (rval);
2537 }