1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12 /*
13 * Copyright 2018 Nexenta Systems, Inc.
14 */
15
16 /*
17 * Utility routines that have common usage throughout the driver.
18 */
19 #include <smartpqi.h>
20
21 /* ---- Forward declarations for support/utility functions ---- */
22 static void reinit_io(pqi_io_request_t *io);
23 static char *cmd_state_str(pqi_cmd_state_t state);
24 static void dump_raid(pqi_state_t s, void *v, pqi_index_t idx);
25 static void dump_aio(void *v);
26 static void show_error_detail(pqi_state_t s);
27 static void cmd_finish_task(void *v);
28
29 /*
30 * []------------------------------------------------------------------[]
31 * | Entry points for this file |
32 * []------------------------------------------------------------------[]
33 */
34
35 int
36 pqi_is_offline(pqi_state_t s)
37 {
38 return (s->s_offline);
39 }
40
41 /*
42 * pqi_alloc_io -- return next available slot.
43 */
44 pqi_io_request_t *
45 pqi_alloc_io(pqi_state_t s)
46 {
47 pqi_io_request_t *io = NULL;
48 uint16_t loop;
49 uint16_t i;
50
51 mutex_enter(&s->s_io_mutex);
52 i = s->s_next_io_slot; /* just a hint */
53 s->s_io_need++;
54 for (;;) {
55 for (loop = 0; loop < s->s_max_io_slots; loop++) {
56 io = &s->s_io_rqst_pool[i];
57 i = (i + 1) % s->s_max_io_slots;
58 if (io->io_refcount == 0) {
59 io->io_refcount = 1;
60 break;
61 }
62 }
63 if (loop != s->s_max_io_slots)
64 break;
65
66 s->s_io_had2wait++;
67 s->s_io_wait_cnt++;
68 if (cv_wait_sig(&s->s_io_condvar, &s->s_io_mutex) == 0) {
69 s->s_io_sig++;
70 io = NULL;
71 break;
72 }
73 i = s->s_next_io_slot; /* just a hint */
74 }
75 s->s_next_io_slot = i;
76 mutex_exit(&s->s_io_mutex);
77
78 if (io != NULL)
79 reinit_io(io);
80 return (io);
81 }
82
83 void
84 pqi_free_io(pqi_io_request_t *io)
85 {
86 pqi_state_t s = io->io_softc;
87
88 mutex_enter(&s->s_io_mutex);
89 ASSERT(io->io_refcount == 1);
90 io->io_refcount = 0;
91 reinit_io(io);
92 if (s->s_io_wait_cnt != 0) {
93 s->s_io_wait_cnt--;
94 cv_signal(&s->s_io_condvar);
95 }
96 mutex_exit(&s->s_io_mutex);
97 }
98
99
100 void
101 pqi_dump_io(pqi_io_request_t *io)
102 {
103 pqi_iu_header_t *hdr = io->io_iu;
104 pqi_state_t s;
105
106 if (io->io_cmd != NULL) {
107 s = io->io_cmd->pc_softc;
108 } else {
109 /*
110 * Early on, during driver attach, commands are run without
111 * a pqi_cmd_t structure associated. These io requests are
112 * low level operations direct to the HBA. So, grab a
113 * reference to the first and only instance through the
114 * DDI interface. Even though there might be multiple HBA's
115 * grabbing the first is okay since dump_raid() only references
116 * the debug level which will be the same for all the
117 * controllers.
118 */
119 s = ddi_get_soft_state(pqi_state, 0);
120 }
121
122 if (hdr->iu_type == PQI_REQUEST_IU_AIO_PATH_IO) {
123 dump_aio(io->io_iu);
124 } else if (hdr->iu_type == PQI_REQUEST_IU_RAID_PATH_IO) {
125 dump_raid(s, io->io_iu, io->io_pi);
126 }
127 }
128
129 /*
130 * pqi_cmd_sm -- state machine for command
131 *
132 * NOTE: PQI_CMD_CMPLT and PQI_CMD_FATAL will drop the pd_mutex and regain
133 * it even if grab_lock==B_FALSE.
134 */
135 void
136 pqi_cmd_sm(pqi_cmd_t cmd, pqi_cmd_state_t new_state, boolean_t grab_lock)
137 {
138 pqi_device_t devp = cmd->pc_device;
139 pqi_state_t s = cmd->pc_softc;
140
141 if (cmd->pc_softc->s_debug_level & DBG_LVL_STATE) {
142 cmn_err(CE_NOTE, "%s: cmd=%p (%s) -> (%s)\n", __func__,
143 (void *)cmd, cmd_state_str(cmd->pc_cmd_state),
144 cmd_state_str(new_state));
145 }
146 cmd->pc_last_state = cmd->pc_cmd_state;
147 cmd->pc_cmd_state = new_state;
148 switch (new_state) {
149 case PQI_CMD_UNINIT:
150 break;
151
152 case PQI_CMD_CONSTRUCT:
153 break;
154
155 case PQI_CMD_INIT:
156 break;
157
158 case PQI_CMD_QUEUED:
159 if (cmd->pc_last_state == PQI_CMD_STARTED)
160 break;
161 if (grab_lock == B_TRUE)
162 mutex_enter(&devp->pd_mutex);
163 cmd->pc_start_time = gethrtime();
164 cmd->pc_expiration = cmd->pc_start_time +
165 ((hrtime_t)cmd->pc_pkt->pkt_time * NANOSEC);
166 devp->pd_active_cmds++;
167 atomic_inc_32(&s->s_cmd_queue_len);
168 list_insert_tail(&devp->pd_cmd_list, cmd);
169 if (grab_lock == B_TRUE)
170 mutex_exit(&devp->pd_mutex);
171 break;
172
173 case PQI_CMD_STARTED:
174 if (s->s_debug_level & (DBG_LVL_CDB | DBG_LVL_RQST))
175 pqi_dump_io(cmd->pc_io_rqst);
176 break;
177
178 case PQI_CMD_CMPLT:
179 if (grab_lock == B_TRUE)
180 mutex_enter(&devp->pd_mutex);
181
182 if ((cmd->pc_flags & PQI_FLAG_ABORTED) == 0) {
183 list_remove(&devp->pd_cmd_list, cmd);
184
185 devp->pd_active_cmds--;
186 atomic_dec_32(&s->s_cmd_queue_len);
187 pqi_free_io(cmd->pc_io_rqst);
188
189 cmd->pc_flags &= ~PQI_FLAG_FINISHING;
190 (void) ddi_taskq_dispatch(s->s_complete_taskq,
191 cmd_finish_task, cmd, 0);
192 }
193
194 if (grab_lock == B_TRUE)
195 mutex_exit(&devp->pd_mutex);
196
197 break;
198
199 case PQI_CMD_FATAL:
200 if ((cmd->pc_last_state == PQI_CMD_QUEUED) ||
201 (cmd->pc_last_state == PQI_CMD_STARTED)) {
202 if (grab_lock == B_TRUE)
203 mutex_enter(&devp->pd_mutex);
204
205 cmd->pc_flags |= PQI_FLAG_ABORTED;
206
207 /*
208 * If this call came from aio_io_complete() when
209 * dealing with a drive offline the flags will contain
210 * PQI_FLAG_FINISHING so just clear it here to be
211 * safe.
212 */
213 cmd->pc_flags &= ~PQI_FLAG_FINISHING;
214
215 list_remove(&devp->pd_cmd_list, cmd);
216
217 devp->pd_active_cmds--;
218 atomic_dec_32(&s->s_cmd_queue_len);
219 if (cmd->pc_io_rqst)
220 pqi_free_io(cmd->pc_io_rqst);
221
222 (void) ddi_taskq_dispatch(s->s_complete_taskq,
223 cmd_finish_task, cmd, 0);
224
225 if (grab_lock == B_TRUE)
226 mutex_exit(&devp->pd_mutex);
227 }
228 break;
229
230 case PQI_CMD_DESTRUCT:
231 if (grab_lock == B_TRUE)
232 mutex_enter(&devp->pd_mutex);
233
234 if (list_link_active(&cmd->pc_list)) {
235 list_remove(&devp->pd_cmd_list, cmd);
236 devp->pd_active_cmds--;
237 if (cmd->pc_io_rqst)
238 pqi_free_io(cmd->pc_io_rqst);
239 }
240
241 if (grab_lock == B_TRUE)
242 mutex_exit(&devp->pd_mutex);
243 break;
244
245 default:
246 /*
247 * Normally a panic or ASSERT(0) would be called
248 * for here. Except that in this case the 'cmd'
249 * memory could be coming from the kmem_cache pool
250 * which during debug gets wiped with 0xbaddcafe
251 */
252 break;
253 }
254 }
255
256
257 static uint_t supported_event_types[] = {
258 PQI_EVENT_TYPE_HOTPLUG,
259 PQI_EVENT_TYPE_HARDWARE,
260 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
261 PQI_EVENT_TYPE_LOGICAL_DEVICE,
262 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
263 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
264 PQI_EVENT_TYPE_HEARTBEAT
265 };
266
267 int
268 pqi_map_event(uint8_t event)
269 {
270 int i;
271
272 for (i = 0; i < sizeof (supported_event_types) / sizeof (uint_t); i++)
273 if (supported_event_types[i] == event)
274 return (i);
275 return (-1);
276 }
277
278 boolean_t
279 pqi_supported_event(uint8_t event)
280 {
281 return (pqi_map_event(event) == -1 ? B_FALSE : B_TRUE);
282 }
283
284 char *
285 pqi_event_to_str(uint8_t event)
286 {
287 switch (event) {
288 case PQI_EVENT_TYPE_HOTPLUG: return ("Hotplug");
289 case PQI_EVENT_TYPE_HARDWARE: return ("Hardware");
290 case PQI_EVENT_TYPE_PHYSICAL_DEVICE:
291 return ("Physical Device");
292 case PQI_EVENT_TYPE_LOGICAL_DEVICE: return ("logical Device");
293 case PQI_EVENT_TYPE_AIO_STATE_CHANGE:
294 return ("AIO State Change");
295 case PQI_EVENT_TYPE_AIO_CONFIG_CHANGE:
296 return ("AIO Config Change");
297 case PQI_EVENT_TYPE_HEARTBEAT: return ("Heartbeat");
298 default: return ("Unsupported Event Type");
299 }
300 }
301
302 char *
303 bool_to_str(int v)
304 {
305 return (v ? "T" : "f");
306 }
307
308 char *
309 dtype_to_str(int t)
310 {
311 switch (t) {
312 case DTYPE_DIRECT: return ("Direct");
313 case DTYPE_SEQUENTIAL: return ("Sequential");
314 case DTYPE_ESI: return ("ESI");
315 case DTYPE_ARRAY_CTRL: return ("RAID");
316 default: return ("Ughknown");
317 }
318 }
319
320 static ddi_dma_attr_t single_dma_attrs = {
321 DMA_ATTR_V0, /* attribute layout version */
322 0x0ull, /* address low - should be 0 (longlong) */
323 0xffffffffffffffffull, /* address high - 64-bit max */
324 0x7ffffull, /* count max - max DMA object size */
325 4096, /* allocation alignment requirements */
326 0x78, /* burstsizes - binary encoded values */
327 1, /* minxfer - gran. of DMA engine */
328 0x007ffffull, /* maxxfer - gran. of DMA engine */
329 0xffffffffull, /* max segment size (DMA boundary) */
330 1, /* For pqi_alloc_single must be contig memory */
331 512, /* granularity - device transfer size */
332 0 /* flags, set to 0 */
333 };
334
335 pqi_dma_overhead_t *
336 pqi_alloc_single(pqi_state_t s, size_t len)
337 {
338 pqi_dma_overhead_t *d;
339 ddi_dma_cookie_t cookie;
340
341 d = kmem_zalloc(sizeof (*d), KM_SLEEP);
342 d->len_to_alloc = len;
343
344 if (ddi_dma_alloc_handle(s->s_dip, &single_dma_attrs,
345 DDI_DMA_SLEEP, 0, &d->handle) != DDI_SUCCESS)
346 goto error_out;
347
348 if (ddi_dma_mem_alloc(d->handle, len, &s->s_reg_acc_attr,
349 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, 0,
350 &d->alloc_memory, &len, &d->acc) != DDI_SUCCESS)
351 goto error_out;
352
353 (void) memset(d->alloc_memory, 0, len);
354 if (ddi_dma_addr_bind_handle(d->handle, NULL, d->alloc_memory, len,
355 DDI_DMA_RDWR, DDI_DMA_SLEEP, 0, &cookie, &d->cookie_count) !=
356 DDI_SUCCESS)
357 goto error_out;
358
359 d->dma_addr = cookie.dmac_laddress;
360 if (d->cookie_count != 1)
361 ddi_dma_nextcookie(d->handle, &d->second);
362
363 return (d);
364
365 error_out:
366 pqi_free_single(s, d);
367 return (NULL);
368 }
369
370 void
371 pqi_free_single(pqi_state_t s, pqi_dma_overhead_t *d)
372 {
373 (void) ddi_dma_unbind_handle(d->handle);
374 if (d->alloc_memory != NULL)
375 ddi_dma_mem_free(&d->acc);
376 if (d->handle != NULL)
377 ddi_dma_free_handle(&d->handle);
378 ASSERT(s->s_dip != NULL);
379 kmem_free(d, sizeof (*d));
380 }
381
382 void
383 pqi_show_dev_state(pqi_state_t s)
384 {
385 uint32_t dev_status = G32(s, pqi_registers.device_status);
386
387 switch (dev_status & 0xf) {
388 case 0:
389 cmn_err(CE_NOTE, "Power_On_And_Reset\n");
390 break;
391
392 case 1:
393 cmn_err(CE_NOTE, "PQI_Status_Available\n");
394 break;
395
396 case 2:
397 cmn_err(CE_NOTE, "All_Registers_Ready\n");
398 break;
399
400 case 3:
401 cmn_err(CE_NOTE,
402 "Adminstrator_Queue_Pair_Ready\n");
403 break;
404
405 case 4:
406 cmn_err(CE_NOTE, "Error: %s %s\n",
407 dev_status & 0x100 ? "(OP OQ Error)" : "",
408 dev_status & 0x200 ? "(OP IQ Error)" : "");
409 show_error_detail(s);
410 break;
411 }
412 }
413
414 void *
415 pqi_kmem_zalloc(size_t size, int kmflag, char *file, int line, pqi_state_t s)
416 {
417 void *v;
418
419 if ((v = pqi_kmem_alloc(size, kmflag, file, line, s)) != NULL)
420 (void) memset(v, 0, size);
421
422 return (v);
423 }
424
425 void *
426 pqi_kmem_alloc(size_t size, int kmflag, char *file, int line, pqi_state_t s)
427 {
428 size_t ht_size;
429 void *v;
430 mem_check_t header;
431 mem_check_t tailer;
432 size_t size_adj;
433
434 ht_size = PQIALIGN_TYPED(sizeof (struct mem_check), 64, size_t);
435 size_adj = PQIALIGN_TYPED(size, 64, size_t);
436 v = kmem_alloc(ht_size * 2 + size_adj, kmflag);
437 if (v == NULL)
438 return (NULL);
439
440 header = v;
441 list_link_init(&header->m_node);
442 (void) strncpy(header->m_file, file, sizeof (header->m_file));
443 header->m_line = line;
444 header->m_len = ht_size * 2 + size_adj;
445 header->m_sig = MEM_CHECK_SIG;
446
447 tailer = (mem_check_t)((uintptr_t)v + ht_size + size_adj);
448 list_link_init(&tailer->m_node);
449 (void) strncpy(tailer->m_file, file, sizeof (tailer->m_file));
450 tailer->m_line = line;
451 tailer->m_len = ht_size * 2 + size_adj;
452 tailer->m_sig = MEM_CHECK_SIG;
453
454 mutex_enter(&s->s_mem_mutex);
455 list_insert_tail(&s->s_mem_check, header);
456 list_insert_tail(&s->s_mem_check, tailer);
457 mutex_exit(&s->s_mem_mutex);
458 ASSERT(s->s_dip != NULL);
459
460 return ((void *)((uintptr_t)v + ht_size));
461 }
462
463 /*ARGSUSED*/
464 void
465 pqi_kmem_free(void *v, size_t size, pqi_state_t s)
466 {
467 mem_check_t header;
468 mem_check_t tailer;
469 size_t ht_size;
470
471 ht_size = PQIALIGN_TYPED(sizeof (struct mem_check), 64, size_t);
472 header = (mem_check_t)((uintptr_t)v - ht_size);
473 ASSERT(header->m_sig == MEM_CHECK_SIG);
474
475 mutex_enter(&s->s_mem_mutex);
476 tailer = list_next(&s->s_mem_check, header);
477 list_remove(&s->s_mem_check, header);
478 list_remove(&s->s_mem_check, tailer);
479 mutex_exit(&s->s_mem_mutex);
480 ASSERT(s->s_dip != NULL);
481
482 kmem_free(header, header->m_len);
483 }
484
485 void
486 pqi_mem_check(void *v)
487 {
488 pqi_state_t s = v;
489 mem_check_t mc;
490
491 mutex_enter(&s->s_mem_mutex);
492 for (mc = list_head(&s->s_mem_check); mc != NULL;
493 mc = list_next(&s->s_mem_check, mc)) {
494 if (mc->m_sig != MEM_CHECK_SIG) {
495 cmn_err(CE_NOTE, "%s: Bad sig from %s:%d\n",
496 __func__, mc->m_file, mc->m_line);
497 ASSERT(0);
498 }
499 }
500 ASSERT(s->s_dip != NULL);
501 s->s_mem_timeo = timeout(pqi_mem_check, s,
502 drv_usectohz(5 * 1000 * 1000));
503 mutex_exit(&s->s_mem_mutex);
504 }
505
506
507 char *
508 cdb_to_str(uint8_t scsi_cmd)
509 {
510 switch (scsi_cmd) {
511 case SCMD_INQUIRY: return ("Inquiry");
512 case SCMD_TEST_UNIT_READY: return ("TestUnitReady");
513 case SCMD_READ: return ("Read");
514 case SCMD_READ_G1: return ("Read G1");
515 case SCMD_RESERVE: return ("Reserve");
516 case SCMD_RELEASE: return ("Release");
517 case SCMD_WRITE: return ("Write");
518 case SCMD_WRITE_G1: return ("Write G1");
519 case SCMD_START_STOP: return ("StartStop");
520 case SCMD_READ_CAPACITY: return ("ReadCap");
521 case SCMD_MODE_SENSE: return ("ModeSense");
522 case SCMD_MODE_SELECT: return ("ModeSelect");
523 case SCMD_SVC_ACTION_IN_G4: return ("ActionInG4");
524 case SCMD_MAINTENANCE_IN: return ("MaintenanceIn");
525 case SCMD_GDIAG: return ("ReceiveDiag");
526 case SCMD_SDIAG: return ("SendDiag");
527 case SCMD_LOG_SENSE_G1: return ("LogSenseG1");
528 case SCMD_PERSISTENT_RESERVE_IN: return ("PgrReserveIn");
529 case SCMD_PERSISTENT_RESERVE_OUT: return ("PgrReserveOut");
530 case BMIC_READ: return ("BMIC Read");
531 case BMIC_WRITE: return ("BMIC Write");
532 case CISS_REPORT_LOG: return ("CISS Report Logical");
533 case CISS_REPORT_PHYS: return ("CISS Report Physical");
534 default: return ("unmapped");
535 }
536 }
537
538 char *
539 io_status_to_str(int val)
540 {
541 switch (val) {
542 case PQI_DATA_IN_OUT_GOOD: return ("Good");
543 case PQI_DATA_IN_OUT_UNDERFLOW: return ("Underflow");
544 case PQI_DATA_IN_OUT_ERROR: return ("ERROR");
545 case PQI_DATA_IN_OUT_PROTOCOL_ERROR: return ("Protocol Error");
546 case PQI_DATA_IN_OUT_HARDWARE_ERROR: return ("Hardware Error");
547 default: return ("UNHANDLED");
548 }
549 }
550
551 char *
552 scsi_status_to_str(uint8_t val)
553 {
554 switch (val) {
555 case STATUS_GOOD: return ("Good");
556 case STATUS_CHECK: return ("Check");
557 case STATUS_MET: return ("Met");
558 case STATUS_BUSY: return ("Busy");
559 case STATUS_INTERMEDIATE: return ("Intermediate");
560 case STATUS_RESERVATION_CONFLICT: return ("Reservation Conflict");
561 case STATUS_TERMINATED: return ("Terminated");
562 case STATUS_QFULL: return ("QFull");
563 case STATUS_ACA_ACTIVE: return ("ACA Active");
564 case STATUS_TASK_ABORT: return ("Task Abort");
565 default: return ("Illegal Status");
566 }
567 }
568
569 char *
570 iu_type_to_str(int val)
571 {
572 switch (val) {
573 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS: return ("Success");
574 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS: return ("AIO Success");
575 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT: return ("General");
576 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR: return ("IO Error");
577 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR: return ("AIO IO Error");
578 case PQI_RESPONSE_IU_AIO_PATH_DISABLED: return ("AIO Path Disabled");
579 default: return ("UNHANDLED");
580 }
581 }
582
583 void
584 pqi_free_mem_len(mem_len_pair_t *m)
585 {
586 kmem_free(m->mem, m->len);
587 }
588
589 mem_len_pair_t
590 pqi_alloc_mem_len(int len)
591 {
592 mem_len_pair_t m;
593 m.len = len;
594 m.mem = kmem_alloc(m.len, KM_SLEEP);
595 *m.mem = '\0';
596 return (m);
597 }
598
599 /*
600 * []------------------------------------------------------------------[]
601 * | Support/utility functions for main functions above |
602 * []------------------------------------------------------------------[]
603 */
604
605 /*
606 * cmd_finish_task -- taskq to complete command processing
607 *
608 * Under high load the driver will run out of IO slots which causes command
609 * requests to pause until a slot is free. Calls to pkt_comp below can circle
610 * through the SCSI layer and back into the driver to start another command
611 * request and therefore possibly pause. If cmd_finish_task() was called on
612 * the interrupt thread a hang condition could occur because IO slots wouldn't
613 * be processed and then freed. So, this portion of the command completion
614 * is run on a taskq.
615 */
616 static void
617 cmd_finish_task(void *v)
618 {
619 pqi_cmd_t cmd = v;
620 struct scsi_pkt *pkt;
621
622 pkt = cmd->pc_pkt;
623 if (cmd->pc_poll)
624 sema_v(cmd->pc_poll);
625 if ((pkt->pkt_flags & FLAG_NOINTR) == 0 &&
626 (pkt->pkt_comp != NULL))
627 (*pkt->pkt_comp)(pkt);
628 }
629
630 typedef struct qual {
631 int q_val;
632 char *q_str;
633 } qual_t;
634
635 typedef struct code_qual {
636 int cq_code;
637 qual_t *cq_list;
638 } code_qual_t;
639
640 /*
641 * These messages come from pqi2r01 spec section 5.6 table 18.
642 */
643 static qual_t pair0[] = { {0, "No error"}, {0, NULL} };
644 static qual_t pair1[] = { {0, "Error detected during initialization"},
645 { 0, NULL } };
646 static qual_t pair2[] = { {1, "Invalid PD Function"},
647 {2, "Invalid paramter for PD function"},
648 {0, NULL } };
649 static qual_t pair3[] = { {0, "Error creating admin queue pair"},
650 { 1, "Error delete admin queue pair"},
651 { 0, NULL} };
652 static qual_t pair4[] = { {1, "Invalid IU type in general" },
653 {2, "Invalid IU length in general admin request"},
654 {0, NULL} };
655 static qual_t pair5[] = { {1, "Internal error" },
656 {2, "OQ spanning conflict"},
657 {0, NULL} };
658 static qual_t pair6[] = { {1, "Error completing PQI soft reset"},
659 {2, "Error completing PQI firmware reset"},
660 {3, "Error completing PQI hardware reset"},
661 {0, NULL} };
662 static code_qual_t cq_table[] = {
663 { 0, pair0 },
664 { 1, pair1 },
665 { 2, pair2 },
666 { 3, pair3 },
667 { 4, pair4 },
668 { 5, pair5 },
669 { 6, pair6 },
670 { 0, NULL },
671 };
672
673 static void
674 show_error_detail(pqi_state_t s)
675 {
676 uint32_t error_reg = G32(s, pqi_registers.device_error);
677 uint8_t code, qualifier;
678 qual_t *p;
679 code_qual_t *cq;
680
681 code = error_reg & 0xff;
682 qualifier = (error_reg >> 8) & 0xff;
683
684 for (cq = cq_table; cq->cq_list != NULL; cq++) {
685 if (cq->cq_code == code) {
686 for (p = cq->cq_list; p->q_str != NULL; p++) {
687 if (p->q_val == qualifier) {
688 cmn_err(CE_NOTE,
689 "[code=%x,qual=%x]: %s\n",
690 code, qualifier, p->q_str);
691 return;
692 }
693 }
694 }
695 }
696 cmn_err(CE_NOTE, "Undefined code(%x)/qualifier(%x)\n",
697 code, qualifier);
698 }
699
700 /*ARGSUSED*/
701 static void
702 pqi_catch_release(pqi_io_request_t *io, void *v)
703 {
704 /*
705 * This call can occur if the software times out a command because
706 * the HBA hasn't responded in the default amount of time, 10 seconds,
707 * and then the HBA responds. It's occurred a few times during testing
708 * so catch and ignore.
709 */
710 }
711
712 static void
713 reinit_io(pqi_io_request_t *io)
714 {
715 io->io_cb = pqi_catch_release;
716 io->io_status = 0;
717 io->io_error_info = NULL;
718 io->io_raid_bypass = B_FALSE;
719 io->io_context = NULL;
720 io->io_cmd = NULL;
721 }
722
723 /* ---- Non-thread safe, for debugging state display code only ---- */
724 static char bad_state_buf[64];
725
726 static char *
727 cmd_state_str(pqi_cmd_state_t state)
728 {
729 switch (state) {
730 case PQI_CMD_UNINIT: return ("Uninitialized");
731 case PQI_CMD_CONSTRUCT: return ("Construct");
732 case PQI_CMD_INIT: return ("Init");
733 case PQI_CMD_QUEUED: return ("Queued");
734 case PQI_CMD_STARTED: return ("Started");
735 case PQI_CMD_CMPLT: return ("Completed");
736 case PQI_CMD_FATAL: return ("Fatal");
737 case PQI_CMD_DESTRUCT: return ("Destruct");
738 default:
739 (void) snprintf(bad_state_buf, sizeof (bad_state_buf),
740 "BAD STATE (%x)", state);
741 return (bad_state_buf);
742 }
743 }
744
745
746 mem_len_pair_t
747 build_cdb_str(uint8_t *cdb)
748 {
749 mem_len_pair_t m = pqi_alloc_mem_len(64);
750
751 m.mem[0] = '\0';
752
753 switch (cdb[0]) {
754 case SCMD_INQUIRY:
755 MEMP("%s", cdb_to_str(cdb[0]));
756 if ((cdb[1] & 0x1) != 0)
757 MEMP(".vpd=%x", cdb[2]);
758 else if (cdb[2])
759 MEMP("Illegal CDB");
760 MEMP(".len=%x", cdb[3] << 8 | cdb[4]);
761 break;
762
763 case SCMD_READ:
764 MEMP("%s.lba=%x.len=%x", cdb_to_str(cdb[0]),
765 (cdb[1] & 0x1f) << 16 | cdb[2] << 8 | cdb[3],
766 cdb[4]);
767 break;
768
769 case SCMD_MODE_SENSE:
770 MEMP("%s.dbd=%s.pc=%x.page_code=%x.subpage=%x."
771 "len=%x", cdb_to_str(cdb[0]),
772 bool_to_str(cdb[1] & 8), cdb[2] >> 6 & 0x3,
773 cdb[2] & 0x3f, cdb[3], cdb[4]);
774 break;
775
776 case SCMD_START_STOP:
777 MEMP("%s.immed=%s.power=%x.start=%s",
778 cdb_to_str(cdb[0]), bool_to_str(cdb[1] & 1),
779 (cdb[4] >> 4) & 0xf, bool_to_str(cdb[4] & 1));
780 break;
781
782 case SCMD_SVC_ACTION_IN_G4:
783 case SCMD_READ_CAPACITY:
784 case SCMD_TEST_UNIT_READY:
785 default:
786 MEMP("%s (%x)", cdb_to_str(cdb[0]), cdb[0]);
787 break;
788 }
789 return (m);
790 }
791
792 mem_len_pair_t
793 mem_to_arraystr(uint8_t *ptr, size_t len)
794 {
795 mem_len_pair_t m = pqi_alloc_mem_len(len * 3 + 20);
796 int i;
797
798 m.mem[0] = '\0';
799 MEMP("{ ");
800 for (i = 0; i < len; i++) {
801 MEMP("%02x ", *ptr++ & 0xff);
802 }
803 MEMP(" }");
804
805 return (m);
806 }
807
808 static char lun_str[64];
809 static char *
810 lun_to_str(uint8_t *lun)
811 {
812 int i;
813 lun_str[0] = '\0';
814 for (i = 0; i < 8; i++)
815 (void) snprintf(lun_str + strlen(lun_str),
816 sizeof (lun_str) - strlen(lun_str), "%02x.", *lun++);
817 return (lun_str);
818 }
819
820 static char *
821 dir_to_str(int dir)
822 {
823 switch (dir) {
824 case SOP_NO_DIRECTION_FLAG: return ("NoDir");
825 case SOP_WRITE_FLAG: return ("Write");
826 case SOP_READ_FLAG: return ("Read");
827 case SOP_BIDIRECTIONAL: return ("RW");
828 default: return ("Oops");
829 }
830 }
831
832 static char *
833 flags_to_str(uint32_t flag)
834 {
835 switch (flag) {
836 case CISS_SG_LAST: return ("Last");
837 case CISS_SG_CHAIN: return ("Chain");
838 case CISS_SG_NORMAL: return ("Norm");
839 default: return ("Ooops");
840 }
841 }
842
843 /* ---- Only for use in dump_raid and dump_aio ---- */
844 #define SCRATCH_PRINT(args...) (void)snprintf(scratch + strlen(scratch), \
845 len - strlen(scratch), args)
846
847 static void
848 dump_raid(pqi_state_t s, void *v, pqi_index_t idx)
849 {
850 int i;
851 int len = 512;
852 caddr_t scratch;
853 pqi_raid_path_request_t *rqst = v;
854 mem_len_pair_t cdb_data;
855 caddr_t raw = v;
856
857 scratch = kmem_alloc(len, KM_SLEEP);
858 scratch[0] = '\0';
859
860 if (s->s_debug_level & DBG_LVL_RAW_RQST) {
861 SCRATCH_PRINT("RAW RQST: ");
862 for (i = 0; i < sizeof (*rqst); i++)
863 SCRATCH_PRINT("%02x:", *raw++ & 0xff);
864 cmn_err(CE_NOTE, "%s\n", scratch);
865 scratch[0] = '\0';
866 }
867
868 if (s->s_debug_level & DBG_LVL_CDB) {
869 cdb_data = build_cdb_str(rqst->rp_cdb);
870 SCRATCH_PRINT("cdb(%s),", cdb_data.mem);
871 pqi_free_mem_len(&cdb_data);
872 }
873
874 ASSERT0(rqst->header.reserved);
875 ASSERT0(rqst->reserved1);
876 ASSERT0(rqst->reserved2);
877 ASSERT0(rqst->reserved3);
878 ASSERT0(rqst->reserved4);
879 ASSERT0(rqst->reserved5);
880
881 if (s->s_debug_level & DBG_LVL_RQST) {
882 SCRATCH_PRINT("pi=%x,h(type=%x,len=%x,id=%x)", idx,
883 rqst->header.iu_type, rqst->header.iu_length,
884 rqst->header.iu_id);
885 SCRATCH_PRINT("rqst_id=%x,nexus_id=%x,len=%x,lun=(%s),"
886 "proto=%x,dir=%s,partial=%s,",
887 rqst->rp_id, rqst->rp_nexus_id, rqst->rp_data_len,
888 lun_to_str(rqst->rp_lun), rqst->protocol_specific,
889 dir_to_str(rqst->rp_data_dir),
890 bool_to_str(rqst->rp_partial));
891 SCRATCH_PRINT("fence=%s,error_idx=%x,task_attr=%x,"
892 "priority=%x,additional=%x,sg=(",
893 bool_to_str(rqst->rp_fence), rqst->rp_error_index,
894 rqst->rp_task_attr,
895 rqst->rp_pri, rqst->rp_additional_cdb);
896 for (i = 0; i < PQI_MAX_EMBEDDED_SG_DESCRIPTORS; i++) {
897 SCRATCH_PRINT("%lx:%x:%s,",
898 (long unsigned int)rqst->rp_sglist[i].sg_addr,
899 rqst->rp_sglist[i].sg_len,
900 flags_to_str(rqst->rp_sglist[i].sg_flags));
901 }
902 SCRATCH_PRINT(")");
903 }
904
905 cmn_err(CE_NOTE, "%s\n", scratch);
906 kmem_free(scratch, len);
907 }
908
909 static void
910 dump_aio(void *v)
911 {
912 pqi_aio_path_request_t *rqst = v;
913 int i;
914 int len = 512;
915 caddr_t scratch;
916 mem_len_pair_t cdb_data;
917
918 scratch = kmem_alloc(len, KM_SLEEP);
919 scratch[0] = '\0';
920
921 cdb_data = build_cdb_str(rqst->cdb);
922 SCRATCH_PRINT("cdb(%s)", cdb_data.mem);
923 pqi_free_mem_len(&cdb_data);
924
925 SCRATCH_PRINT("h(type=%x,len=%x,id=%x)",
926 rqst->header.iu_type, rqst->header.iu_length,
927 rqst->header.iu_id);
928 SCRATCH_PRINT("rqst_id=%x,nexus_id=%x,len=%x,lun=(%s),dir=%s,"
929 "partial=%s,",
930 rqst->request_id, rqst->nexus_id, rqst->buffer_length,
931 lun_to_str(rqst->lun_number),
932 dir_to_str(rqst->data_direction), bool_to_str(rqst->partial));
933 SCRATCH_PRINT("fence=%s,error_idx=%x,task_attr=%x,priority=%x,"
934 "num_sg=%x,cdb_len=%x,sg=(",
935 bool_to_str(rqst->fence), rqst->error_index, rqst->task_attribute,
936 rqst->command_priority, rqst->num_sg_descriptors, rqst->cdb_length);
937 for (i = 0; i < PQI_MAX_EMBEDDED_SG_DESCRIPTORS; i++) {
938 SCRATCH_PRINT("%lx:%x:%s,",
939 (long unsigned int)rqst->ap_sglist[i].sg_addr,
940 rqst->ap_sglist[i].sg_len,
941 flags_to_str(rqst->ap_sglist[i].sg_flags));
942 }
943 SCRATCH_PRINT(")");
944
945 cmn_err(CE_NOTE, "%s\n", scratch);
946 kmem_free(scratch, len);
947 }