1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12 /*
13 * Copyright 2018 Nexenta Systems, Inc.
14 */
15
16 /*
17 * This file contains all routines necessary to interface with SCSA trans.
18 */
19 #include <smartpqi.h>
20
21 /*
22 * []------------------------------------------------------------------[]
23 * | Forward declarations for SCSA trans routines. |
24 * []------------------------------------------------------------------[]
25 */
26 static int pqi_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
27 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
28 static void pqi_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
29 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
30 static int pqi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
31 static int pqi_scsi_reset(struct scsi_address *ap, int level);
32 static int pqi_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
33 static int pqi_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly);
34 static int pqi_scsi_setcap(struct scsi_address *ap, char *cap, int value,
35 int tgtonly);
36 static struct scsi_pkt *pqi_init_pkt(struct scsi_address *ap,
37 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen, int tgtlen,
38 int flags, int (*callback)(), caddr_t arg);
39 static void pqi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
40 static void pqi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
41 static void pqi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
42 static int pqi_reset_notify(struct scsi_address *ap, int flag,
43 void (*callback)(caddr_t), caddr_t arg);
44 static int pqi_quiesce(dev_info_t *dip);
45 static int pqi_unquiesce(dev_info_t *dip);
46 static int pqi_bus_config(dev_info_t *pdip, uint_t flag,
47 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
48
49 /* ---- Support method declaration ---- */
50 static int config_one(dev_info_t *pdip, pqi_state_t s, pqi_device_t,
51 dev_info_t **childp);
52 static void abort_all(struct scsi_address *ap, pqi_state_t s);
53 static int cmd_ext_alloc(pqi_cmd_t cmd, int kf);
54 static void cmd_ext_free(pqi_cmd_t cmd);
55 static boolean_t is_physical_dev(pqi_device_t d);
56 static boolean_t decode_to_target(void *arg, long *target);
57 static void cmd_timeout_scan(void *);
58
59 int
60 smartpqi_register_hba(pqi_state_t s)
61 {
62 scsi_hba_tran_t *tran;
63 int flags;
64 char iport_str[16];
65 int instance = ddi_get_instance(s->s_dip);
66
67 tran = s->s_tran = scsi_hba_tran_alloc(s->s_dip, SCSI_HBA_CANSLEEP);
68 if (tran == NULL)
69 return (FALSE);
70
71 tran->tran_hba_private = s;
72 tran->tran_tgt_private = NULL;
73
74 tran->tran_tgt_init = pqi_scsi_tgt_init;
75 tran->tran_tgt_free = pqi_scsi_tgt_free;
76 tran->tran_tgt_probe = scsi_hba_probe;
77
78 tran->tran_start = pqi_start;
79 tran->tran_reset = pqi_scsi_reset;
80 tran->tran_abort = pqi_scsi_abort;
81 tran->tran_getcap = pqi_scsi_getcap;
82 tran->tran_setcap = pqi_scsi_setcap;
83 tran->tran_bus_config = pqi_bus_config;
84
85 tran->tran_init_pkt = pqi_init_pkt;
86 tran->tran_destroy_pkt = pqi_destroy_pkt;
87 tran->tran_dmafree = pqi_dmafree;
88 tran->tran_sync_pkt = pqi_sync_pkt;
89
90 tran->tran_reset_notify = pqi_reset_notify;
91 tran->tran_quiesce = pqi_quiesce;
92 tran->tran_unquiesce = pqi_unquiesce;
93 tran->tran_bus_reset = NULL;
94
95 tran->tran_add_eventcall = NULL;
96 tran->tran_get_eventcookie = NULL;
97 tran->tran_post_event = NULL;
98 tran->tran_remove_eventcall = NULL;
99 tran->tran_bus_config = pqi_bus_config;
100 tran->tran_interconnect_type = INTERCONNECT_SAS;
101
102 /*
103 * scsi_vhci needs to have "initiator-port" set, but doesn't
104 * seem to care what it's set to. iSCSI uses the InitiatorName
105 * whereas mpt_sas uses the WWN port id, but this HBA doesn't
106 * have such a value. So, for now the instance number will be used.
107 */
108 (void) snprintf(iport_str, sizeof (iport_str), "0x%x", instance);
109 if (ddi_prop_update_string(DDI_DEV_T_NONE, s->s_dip,
110 SCSI_ADDR_PROP_INITIATOR_PORT, iport_str) != DDI_PROP_SUCCESS) {
111 cmn_err(CE_WARN, "%s: Failed to create prop (%s) on %d\n",
112 __func__, SCSI_ADDR_PROP_INITIATOR_PORT, instance);
113 }
114
115 flags = SCSI_HBA_ADDR_COMPLEX | SCSI_HBA_TRAN_SCB;
116 if (scsi_hba_attach_setup(s->s_dip, &s->s_msg_dma_attr, tran,
117 flags) != DDI_SUCCESS) {
118 dev_err(s->s_dip, CE_NOTE, "scsi_hba_attach_setup failed");
119 scsi_hba_tran_free(s->s_tran);
120 s->s_tran = NULL;
121 return (FALSE);
122 }
123
124 if (s->s_enable_mpxio) {
125 if (mdi_phci_register(MDI_HCI_CLASS_SCSI, s->s_dip, 0) !=
126 MDI_SUCCESS) {
127 s->s_enable_mpxio = 0;
128 }
129 }
130
131 s->s_cmd_timeout = timeout(cmd_timeout_scan, s,
132 CMD_TIMEOUT_SCAN_SECS * drv_usectohz(MICROSEC));
133
134 return (TRUE);
135 }
136
137 void
138 smartpqi_unregister_hba(pqi_state_t s)
139 {
140 if (s->s_enable_mpxio)
141 (void) mdi_phci_unregister(s->s_dip, 0);
142
143 if (s->s_cmd_timeout != NULL) {
144 (void) untimeout(s->s_cmd_timeout);
145 s->s_cmd_timeout = NULL;
146 }
147
148 if (s->s_tran == NULL)
149 return;
150 scsi_hba_tran_free(s->s_tran);
151 s->s_tran = NULL;
152 }
153
154 /*ARGSUSED*/
155 static int
156 pqi_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
157 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
158 {
159 uint16_t lun;
160 pqi_device_t d;
161 pqi_state_t s = hba_tran->tran_hba_private;
162 mdi_pathinfo_t *pip;
163 int type;
164
165 if ((lun = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
166 LUN_PROP, 0xffff)) == 0xffff) {
167 return (DDI_NOT_WELL_FORMED);
168 }
169
170 if ((d = pqi_find_target_dev(s, lun)) == NULL)
171 return (DDI_FAILURE);
172
173 scsi_device_hba_private_set(sd, d);
174
175 type = mdi_get_component_type(tgt_dip);
176 if (type == MDI_COMPONENT_CLIENT) {
177 char wwid_str[64];
178
179 if ((pip = (mdi_pathinfo_t *)sd->sd_private) == NULL)
180 return (DDI_NOT_WELL_FORMED);
181
182 (void) snprintf(wwid_str, sizeof (wwid_str), "%" PRIx64,
183 d->pd_sas_address);
184 (void) mdi_prop_update_string(pip, SCSI_ADDR_PROP_TARGET_PORT,
185 wwid_str);
186 }
187
188 return (DDI_SUCCESS);
189 }
190
191 /*ARGSUSED*/
192 static void
193 pqi_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
194 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
195 {
196 }
197
198 /*
199 * Notes:
200 * - transport the command to the addressed SCSI target/lun device
201 * - normal operation is to schedule the command to be transported,
202 * and return TRAN_ACCEPT if this is successful.
203 * - if NO_INTR, tran_start must poll device for command completion
204 */
205 /*ARGSUSED*/
206 static int
207 pqi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
208 {
209 boolean_t poll = ((pkt->pkt_flags & FLAG_NOINTR) != 0);
210 int rc;
211 pqi_cmd_t cmd = PKT2CMD(pkt);
212 pqi_state_t s = ap->a_hba_tran->tran_hba_private;
213
214 ASSERT3P(cmd->pc_pkt, ==, pkt);
215 ASSERT3P(cmd->pc_softc, ==, s);
216
217 if (pqi_is_offline(s) || !cmd->pc_device->pd_online)
218 return (TRAN_FATAL_ERROR);
219
220 /*
221 * Reinitialize some fields because the packet may have been
222 * resubmitted.
223 */
224 pkt->pkt_reason = CMD_CMPLT;
225 pkt->pkt_state = 0;
226 pkt->pkt_statistics = 0;
227
228 /* ---- Zero status byte ---- */
229 *(pkt->pkt_scbp) = 0;
230
231 if ((cmd->pc_flags & PQI_FLAG_DMA_VALID) != 0) {
232 ASSERT(cmd->pc_dma_count);
233 pkt->pkt_resid = cmd->pc_dma_count;
234
235 /* ---- Sync consistent packets first (only write data) ---- */
236 if (((cmd->pc_flags & PQI_FLAG_IO_IOPB) != 0) ||
237 ((cmd->pc_flags & PQI_FLAG_IO_READ) == 0)) {
238 (void) ddi_dma_sync(cmd->pc_dmahdl, 0, 0,
239 DDI_DMA_SYNC_FORDEV);
240 }
241 }
242
243 cmd->pc_target = ap->a_target;
244
245 mutex_enter(&s->s_mutex);
246 if (HBA_IS_QUIESCED(s) && !poll) {
247 mutex_exit(&s->s_mutex);
248 return (TRAN_BUSY);
249 }
250 mutex_exit(&s->s_mutex);
251
252 pqi_cmd_sm(cmd, PQI_CMD_QUEUED, B_TRUE);
253
254 rc = pqi_transport_command(s, cmd);
255
256 if (poll) {
257 boolean_t qnotify;
258
259 if (rc == TRAN_ACCEPT) {
260 uint32_t old_state;
261 int timeo;
262
263 timeo = pkt->pkt_time ? pkt->pkt_time :
264 SCSI_POLL_TIMEOUT;
265 timeo *= MILLISEC / 2;
266 old_state = pqi_disable_intr(s);
267 do {
268 drv_usecwait(MILLISEC / 2);
269 pqi_process_io_intr(s, &s->s_queue_groups[0]);
270 if (--timeo == 0) {
271 pkt->pkt_state |= STAT_TIMEOUT;
272 pkt->pkt_reason = CMD_TIMEOUT;
273 break;
274 }
275 } while (pkt->pkt_state == 0);
276 pqi_enable_intr(s, old_state);
277 }
278
279 scsi_hba_pkt_comp(pkt);
280
281 mutex_enter(&s->s_mutex);
282 qnotify = HBA_QUIESCED_PENDING(s);
283 mutex_exit(&s->s_mutex);
284
285 if (qnotify)
286 pqi_quiesced_notify(s);
287 }
288
289 return (rc);
290 }
291
292 static int
293 pqi_scsi_reset(struct scsi_address *ap, int level)
294 {
295 pqi_device_t d;
296 pqi_state_t s;
297 int rval = FALSE;
298
299 s = ap->a_hba_tran->tran_hba_private;
300 switch (level) {
301 case RESET_TARGET:
302 case RESET_LUN:
303 if ((d = scsi_device_hba_private_get(ap->a.a_sd)) == NULL)
304 break;
305
306 if (pqi_lun_reset(s, d) == B_TRUE)
307 rval = TRUE;
308 break;
309
310 case RESET_BUS:
311 case RESET_ALL:
312 for (d = list_head(&s->s_devnodes); d != NULL;
313 d = list_next(&s->s_devnodes, d)) {
314 (void) pqi_lun_reset(s, d);
315 }
316 rval = TRUE;
317 break;
318 }
319 return (rval);
320 }
321
322 /*
323 * abort handling:
324 *
325 * Notes:
326 * - if pkt is not NULL, abort just that command
327 * - if pkt is NULL, abort all outstanding commands for target
328 */
329 static int
330 pqi_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
331 {
332 boolean_t qnotify = B_FALSE;
333 pqi_state_t s = ADDR2PQI(ap);
334
335 if (pkt != NULL) {
336 /* ---- Abort single command ---- */
337 pqi_cmd_t cmd = PKT2CMD(pkt);
338
339 mutex_enter(&cmd->pc_device->pd_mutex);
340 pqi_fail_cmd(cmd, CMD_ABORTED, STAT_ABORTED);
341 mutex_exit(&cmd->pc_device->pd_mutex);
342 } else {
343 abort_all(ap, s);
344 }
345 qnotify = HBA_QUIESCED_PENDING(s);
346
347 if (qnotify)
348 pqi_quiesced_notify(s);
349 return (1);
350 }
351
352 /*
353 * capability handling:
354 * (*tran_getcap). Get the capability named, and return its value.
355 */
356 /*ARGSUSED*/
357 static int
358 pqi_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly)
359 {
360 pqi_state_t s = ap->a_hba_tran->tran_hba_private;
361
362 if (cap == NULL)
363 return (-1);
364 switch (scsi_hba_lookup_capstr(cap)) {
365 case SCSI_CAP_LUN_RESET:
366 return ((s->s_flags & PQI_HBA_LUN_RESET_CAP) != 0);
367 case SCSI_CAP_ARQ:
368 return ((s->s_flags & PQI_HBA_AUTO_REQUEST_SENSE) != 0);
369 case SCSI_CAP_UNTAGGED_QING:
370 return (1);
371 default:
372 return (-1);
373 }
374 }
375
376 /*
377 * (*tran_setcap). Set the capability named to the value given.
378 */
379 /*ARGSUSED*/
380 static int
381 pqi_scsi_setcap(struct scsi_address *ap, char *cap, int value, int tgtonly)
382 {
383 pqi_state_t s = ADDR2PQI(ap);
384 int rval = FALSE;
385
386 if (cap == NULL)
387 return (-1);
388
389 switch (scsi_hba_lookup_capstr(cap)) {
390 case SCSI_CAP_ARQ:
391 if (value)
392 s->s_flags |= PQI_HBA_AUTO_REQUEST_SENSE;
393 else
394 s->s_flags &= ~PQI_HBA_AUTO_REQUEST_SENSE;
395 rval = 1;
396 break;
397
398 case SCSI_CAP_LUN_RESET:
399 if (value)
400 s->s_flags |= PQI_HBA_LUN_RESET_CAP;
401 else
402 s->s_flags &= ~PQI_HBA_LUN_RESET_CAP;
403 break;
404
405 default:
406 break;
407 }
408
409 return (rval);
410 }
411
412 /*ARGSUSED*/
413 int
414 pqi_cache_constructor(void *buf, void *un, int flags)
415 {
416 pqi_cmd_t c = (pqi_cmd_t)buf;
417 pqi_state_t s = un;
418 int (*callback)(caddr_t);
419
420 c->pc_softc = s;
421 callback = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
422
423 /* ---- Allocate a DMA handle for data transfers ---- */
424 if (ddi_dma_alloc_handle(s->s_dip, &s->s_msg_dma_attr, callback,
425 NULL, &c->pc_dmahdl) != DDI_SUCCESS) {
426 dev_err(s->s_dip, CE_WARN, "Failed to alloc dma handle");
427 return (-1);
428 }
429 pqi_cmd_sm(c, PQI_CMD_CONSTRUCT, B_TRUE);
430
431 return (0);
432 }
433
434 /*ARGSUSED*/
435 void
436 pqi_cache_destructor(void *buf, void *un)
437 {
438 pqi_cmd_t cmd = buf;
439 if (cmd->pc_dmahdl != NULL) {
440 (void) ddi_dma_unbind_handle(cmd->pc_dmahdl);
441 ddi_dma_free_handle(&cmd->pc_dmahdl);
442 cmd->pc_dmahdl = NULL;
443 }
444 }
445
446 /*
447 * tran_init_pkt(9E) - allocate scsi_pkt(9S) for command
448 *
449 * One of three possibilities:
450 * - allocate scsi_pkt
451 * - allocate scsi_pkt and DMA resources
452 * - allocate DMA resources to an already-allocated pkt
453 */
454 /*ARGSUSED*/
455 static struct scsi_pkt *
456 pqi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
457 struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
458 int (*callback)(), caddr_t arg)
459 {
460 pqi_cmd_t cmd;
461 pqi_state_t s;
462 int kf = (callback == SLEEP_FUNC) ? KM_SLEEP : KM_NOSLEEP;
463 boolean_t is_new = B_FALSE;
464 int rc;
465 int i;
466 pqi_device_t devp;
467
468 s = ap->a_hba_tran->tran_hba_private;
469
470 if (pkt == NULL) {
471 ddi_dma_handle_t saved_dmahdl;
472 pqi_cmd_state_t saved_state;
473
474 if ((devp = scsi_device_hba_private_get(ap->a.a_sd)) == NULL)
475 return (NULL);
476 if ((cmd = kmem_cache_alloc(s->s_cmd_cache, kf)) == NULL)
477 return (NULL);
478
479 is_new = B_TRUE;
480 saved_dmahdl = cmd->pc_dmahdl;
481 saved_state = cmd->pc_cmd_state;
482
483 (void) memset(cmd, 0, sizeof (*cmd));
484
485 cmd->pc_dmahdl = saved_dmahdl;
486 cmd->pc_cmd_state = saved_state;
487
488 cmd->pc_device = devp;
489 cmd->pc_pkt = &cmd->pc_cached_pkt;
490 cmd->pc_softc = s;
491 cmd->pc_tgtlen = tgtlen;
492 cmd->pc_statuslen = statuslen;
493 cmd->pc_cmdlen = cmdlen;
494 cmd->pc_dma_count = 0;
495
496 pkt = cmd->pc_pkt;
497 pkt->pkt_ha_private = cmd;
498 pkt->pkt_address = *ap;
499 pkt->pkt_scbp = (uint8_t *)&cmd->pc_cmd_scb;
500 pkt->pkt_cdbp = cmd->pc_cdb;
501 pkt->pkt_private = (opaque_t)cmd->pc_tgt_priv;
502 if (pkt->pkt_time == 0)
503 pkt->pkt_time = SCSI_POLL_TIMEOUT;
504
505 if (cmdlen > sizeof (cmd->pc_cdb) ||
506 statuslen > sizeof (cmd->pc_cmd_scb) ||
507 tgtlen > sizeof (cmd->pc_tgt_priv)) {
508 if (cmd_ext_alloc(cmd, kf) != DDI_SUCCESS) {
509 dev_err(s->s_dip, CE_WARN,
510 "extent allocation failed");
511 goto out;
512 }
513 }
514 } else {
515 cmd = PKT2CMD(pkt);
516 cmd->pc_flags &= PQI_FLAGS_PERSISTENT;
517 }
518 pqi_cmd_sm(cmd, PQI_CMD_INIT, B_TRUE);
519
520 /* ---- Handle partial DMA transfer ---- */
521 if (cmd->pc_nwin > 0) {
522 if (++cmd->pc_winidx >= cmd->pc_nwin)
523 return (NULL);
524 if (ddi_dma_getwin(cmd->pc_dmahdl, cmd->pc_winidx,
525 &cmd->pc_dma_offset, &cmd->pc_dma_len, &cmd->pc_dmac,
526 &cmd->pc_dmaccount) == DDI_FAILURE)
527 return (NULL);
528 goto handle_dma_cookies;
529 }
530
531 /* ---- Setup data buffer ---- */
532 if (bp != NULL && bp->b_bcount > 0 &&
533 (cmd->pc_flags & PQI_FLAG_DMA_VALID) == 0) {
534 int dma_flags;
535
536 ASSERT(cmd->pc_dmahdl != NULL);
537
538 if ((bp->b_flags & B_READ) != 0) {
539 cmd->pc_flags |= PQI_FLAG_IO_READ;
540 dma_flags = DDI_DMA_READ;
541 } else {
542 cmd->pc_flags &= ~PQI_FLAG_IO_READ;
543 dma_flags = DDI_DMA_WRITE;
544 }
545 if ((flags & PKT_CONSISTENT) != 0) {
546 cmd->pc_flags |= PQI_FLAG_IO_IOPB;
547 dma_flags |= DDI_DMA_CONSISTENT;
548 }
549 if ((flags & PKT_DMA_PARTIAL) != 0) {
550 dma_flags |= DDI_DMA_PARTIAL;
551 }
552 rc = ddi_dma_buf_bind_handle(cmd->pc_dmahdl, bp,
553 dma_flags, callback, arg, &cmd->pc_dmac,
554 &cmd->pc_dmaccount);
555
556 if (rc == DDI_DMA_PARTIAL_MAP) {
557 (void) ddi_dma_numwin(cmd->pc_dmahdl, &cmd->pc_nwin);
558 cmd->pc_winidx = 0;
559 (void) ddi_dma_getwin(cmd->pc_dmahdl, cmd->pc_winidx,
560 &cmd->pc_dma_offset, &cmd->pc_dma_len,
561 &cmd->pc_dmac, &cmd->pc_dmaccount);
562 } else if (rc != 0 && rc != DDI_DMA_MAPPED) {
563 switch (rc) {
564 case DDI_DMA_NORESOURCES:
565 bioerror(bp, 0);
566 break;
567 case DDI_DMA_BADATTR:
568 case DDI_DMA_NOMAPPING:
569 bioerror(bp, EFAULT);
570 break;
571 case DDI_DMA_TOOBIG:
572 default:
573 bioerror(bp, EINVAL);
574 break;
575 }
576 goto out;
577 }
578
579 handle_dma_cookies:
580 ASSERT(cmd->pc_dmaccount > 0);
581 if (cmd->pc_dmaccount >
582 (sizeof (cmd->pc_cached_cookies) /
583 sizeof (ddi_dma_cookie_t))) {
584 dev_err(s->s_dip, CE_WARN,
585 "invalid cookie count: %d", cmd->pc_dmaccount);
586 goto out;
587 }
588 if (cmd->pc_dmaccount >
589 (s->s_sg_chain_buf_length / sizeof (pqi_sg_entry_t))) {
590 dev_err(s->s_dip, CE_WARN,
591 "Cookie(0x%x) verses SG(0x%lx) mismatch",
592 cmd->pc_dmaccount,
593 (unsigned long int)s->s_sg_chain_buf_length /
594 sizeof (pqi_sg_entry_t));
595 goto out;
596 }
597
598 cmd->pc_flags |= PQI_FLAG_DMA_VALID;
599 cmd->pc_dma_count = cmd->pc_dmac.dmac_size;
600 cmd->pc_cached_cookies[0] = cmd->pc_dmac;
601
602 for (i = 1; i < cmd->pc_dmaccount; i++) {
603 ddi_dma_nextcookie(cmd->pc_dmahdl, &cmd->pc_dmac);
604 cmd->pc_cached_cookies[i] = cmd->pc_dmac;
605 cmd->pc_dma_count += cmd->pc_dmac.dmac_size;
606 }
607
608 pkt->pkt_resid = bp->b_bcount - cmd->pc_dma_count;
609 }
610
611 return (pkt);
612 out:
613 pqi_cmd_sm(cmd, PQI_CMD_FATAL, B_TRUE);
614 if (is_new == B_TRUE)
615 pqi_destroy_pkt(ap, pkt);
616 return (NULL);
617 }
618
619 /*
620 * tran_destroy_pkt(9E) - scsi_pkt(9s) deallocation
621 *
622 * Notes:
623 * - also frees DMA resources if allocated
624 * - implicit DMA synchonization
625 */
626 /*ARGSUSED*/
627 static void
628 pqi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
629 {
630 pqi_cmd_t c = PKT2CMD(pkt);
631 pqi_state_t s = ADDR2PQI(ap);
632
633 if ((c->pc_flags & PQI_FLAG_DMA_VALID) != 0) {
634 c->pc_flags &= ~PQI_FLAG_DMA_VALID;
635 (void) ddi_dma_unbind_handle(c->pc_dmahdl);
636 }
637 cmd_ext_free(c);
638 pqi_cmd_sm(c, PQI_CMD_DESTRUCT, B_TRUE);
639
640 kmem_cache_free(s->s_cmd_cache, c);
641 }
642
643 /*
644 * tran_dmafree(9E) - deallocate DMA resources allocated for command
645 */
646 /*ARGSUSED*/
647 static void
648 pqi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
649 {
650 pqi_cmd_t cmd = PKT2CMD(pkt);
651
652 if (cmd->pc_flags & PQI_FLAG_DMA_VALID) {
653 cmd->pc_flags &= ~PQI_FLAG_DMA_VALID;
654 (void) ddi_dma_unbind_handle(cmd->pc_dmahdl);
655 }
656 }
657
658 /*
659 * tran_sync_pkt(9E) - explicit DMA synchronization
660 */
661 /*ARGSUSED*/
662 static void
663 pqi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
664 {
665 pqi_cmd_t cmd = PKT2CMD(pkt);
666
667 if (cmd->pc_dmahdl != NULL) {
668 (void) ddi_dma_sync(cmd->pc_dmahdl, 0, 0,
669 (cmd->pc_flags & PQI_FLAG_IO_READ) ? DDI_DMA_SYNC_FORCPU :
670 DDI_DMA_SYNC_FORDEV);
671 }
672 }
673
674 static int
675 pqi_reset_notify(struct scsi_address *ap, int flag,
676 void (*callback)(caddr_t), caddr_t arg)
677 {
678 pqi_state_t s = ADDR2PQI(ap);
679
680 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
681 &s->s_mutex, &s->s_reset_notify_listf));
682 }
683
684 /*
685 * Device / Hotplug control
686 */
687 /*ARGSUSED*/
688 static int
689 pqi_quiesce(dev_info_t *dip)
690 {
691 pqi_state_t s;
692 scsi_hba_tran_t *tran;
693
694 if ((tran = ddi_get_driver_private(dip)) == NULL ||
695 (s = TRAN2PQI(tran)) == NULL)
696 return (-1);
697
698 mutex_enter(&s->s_mutex);
699 if (!HBA_IS_QUIESCED(s))
700 s->s_flags |= PQI_HBA_QUIESCED;
701
702 if (s->s_cmd_queue_len != 0) {
703 /* ---- Outstanding commands present, wait ---- */
704 s->s_flags |= PQI_HBA_QUIESCED_PENDING;
705 cv_wait(&s->s_quiescedvar, &s->s_mutex);
706 ASSERT0(s->s_cmd_queue_len);
707 }
708 mutex_exit(&s->s_mutex);
709
710 return (0);
711 }
712
713 /*ARGSUSED*/
714 static int
715 pqi_unquiesce(dev_info_t *dip)
716 {
717 pqi_state_t s;
718 scsi_hba_tran_t *tran;
719
720 if ((tran = ddi_get_driver_private(dip)) == NULL ||
721 (s = TRAN2PQI(tran)) == NULL)
722 return (-1);
723
724 mutex_enter(&s->s_mutex);
725 if (!HBA_IS_QUIESCED(s)) {
726 mutex_exit(&s->s_mutex);
727 return (0);
728 }
729 ASSERT0(s->s_cmd_queue_len);
730 s->s_flags &= ~PQI_HBA_QUIESCED;
731 mutex_exit(&s->s_mutex);
732
733 return (0);
734 }
735
736 /*ARGSUSED*/
737 static int
738 pqi_bus_config(dev_info_t *pdip, uint_t flag,
739 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
740 {
741 scsi_hba_tran_t *tran;
742 pqi_state_t s;
743 int circ = 0;
744 int circ1 = 0;
745 int ret = NDI_FAILURE;
746 long target;
747 pqi_device_t d;
748
749 tran = ddi_get_driver_private(pdip);
750 s = tran->tran_hba_private;
751 if (pqi_is_offline(s))
752 return (NDI_FAILURE);
753
754 ndi_devi_enter(scsi_vhci_dip, &circ1);
755 ndi_devi_enter(pdip, &circ);
756 switch (op) {
757 case BUS_CONFIG_ONE:
758 if (decode_to_target(arg, &target) == B_TRUE) {
759 d = pqi_find_target_dev(s, target);
760 if (d != NULL)
761 ret = config_one(pdip, s, d, childp);
762 } else {
763 dev_err(s->s_dip, CE_WARN, "Couldn't decode %s",
764 (char *)arg);
765 }
766 flag |= NDI_MDI_FALLBACK;
767 break;
768
769 case BUS_CONFIG_DRIVER:
770 case BUS_CONFIG_ALL:
771 ret = pqi_config_all(pdip, s);
772 break;
773 default:
774 ret = NDI_FAILURE;
775 }
776 if (ret == NDI_SUCCESS)
777 ret = ndi_busop_bus_config(pdip, flag, op, arg, childp, 0);
778 ndi_devi_exit(pdip, circ);
779 ndi_devi_exit(scsi_vhci_dip, circ1);
780
781 return (ret);
782 }
783
784 pqi_device_t
785 pqi_find_target_dev(pqi_state_t s, int target)
786 {
787 pqi_device_t d;
788
789 /*
790 * Should switch to indexed array of devices that can grow
791 * as needed.
792 */
793 for (d = list_head(&s->s_devnodes); d != NULL;
794 d = list_next(&s->s_devnodes, d)) {
795 if (d->pd_target == target && d->pd_online)
796 break;
797 }
798 return (d);
799 }
800
801 int
802 pqi_config_all(dev_info_t *pdip, pqi_state_t s)
803 {
804 pqi_device_t d;
805
806 /*
807 * Make sure we bring the available devices into play first. These
808 * might be brand new devices just hotplugged into the system or
809 * they could be devices previously offlined because either they
810 * were pulled from an enclosure or a cable to the enclosure was
811 * pulled.
812 */
813 for (d = list_head(&s->s_devnodes); d != NULL;
814 d = list_next(&s->s_devnodes, d)) {
815 if (d->pd_online)
816 (void) config_one(pdip, s, d, NULL);
817 }
818
819 /*
820 * Now deal with devices that we had previously known about, but are
821 * no longer available.
822 */
823 for (d = list_head(&s->s_devnodes); d != NULL;
824 d = list_next(&s->s_devnodes, d)) {
825 if (!d->pd_online)
826 (void) config_one(pdip, s, d, NULL);
827 }
828
829 return (NDI_SUCCESS);
830 }
831
832 void
833 pqi_quiesced_notify(pqi_state_t s)
834 {
835 mutex_enter(&s->s_mutex);
836 if (s->s_cmd_queue_len == 0 &&
837 (s->s_flags & PQI_HBA_QUIESCED_PENDING) != 0) {
838 s->s_flags &= ~PQI_HBA_QUIESCED_PENDING;
839 cv_broadcast(&s->s_quiescedvar);
840 }
841 mutex_exit(&s->s_mutex);
842 }
843
844 /*
845 * []------------------------------------------------------------------[]
846 * | Support routines used only by the trans_xxx routines |
847 * []------------------------------------------------------------------[]
848 */
849
850 static void
851 cmd_timeout_scan(void *v)
852 {
853 pqi_state_t s = v;
854 pqi_device_t d;
855 pqi_cmd_t cmd;
856 hrtime_t now = gethrtime();
857 list_t to_scan;
858
859 mutex_enter(&s->s_mutex);
860 for (d = list_head(&s->s_devnodes); d != NULL;
861 d = list_next(&s->s_devnodes, d)) {
862
863 list_create(&to_scan, sizeof (struct pqi_cmd),
864 offsetof(struct pqi_cmd, pc_list));
865
866 mutex_enter(&d->pd_mutex);
867 list_move_tail(&to_scan, &d->pd_cmd_list);
868
869 while ((cmd = list_remove_head(&to_scan)) != NULL) {
870 if (cmd->pc_expiration < now) {
871 struct scsi_pkt *pkt = CMD2PKT(cmd);
872
873 pkt->pkt_reason = CMD_TIMEOUT;
874 pkt->pkt_statistics = STAT_TIMEOUT;
875
876 /*
877 * Insert the command back onto the list, with
878 * the lock held, so that the state machine
879 * can do its processing which removes the
880 * command from the list and calls pkt_comp.
881 */
882 list_insert_tail(&d->pd_cmd_list, cmd);
883 pqi_cmd_sm(cmd, PQI_CMD_FATAL, B_FALSE);
884
885 } else {
886 /*
887 * Once a command's experiation date is in
888 * the future this command and all remaining
889 * commands on the chain are in the future as
890 * well. So, add them back to the device
891 * command list lock, stock, and barrel. Then
892 * stop processing for this command.
893 */
894 list_insert_tail(&d->pd_cmd_list, cmd);
895 list_move_tail(&d->pd_cmd_list, &to_scan);
896 break;
897 }
898 }
899 mutex_exit(&d->pd_mutex);
900 }
901
902 /*
903 * Certain commands are issued and run serially through the driver.
904 * These all should complete no matter what since they are commands
905 * which are actually sent to the HBA. Yet, there have been cases
906 * where the HBA failed to respond. So, if the time is past the
907 * expired time mark the IO has having a timeout error and call the
908 * return function.
909 */
910 if (s->s_sync_io != NULL &&
911 s->s_sync_expire < now) {
912 s->s_sync_io->io_status = PQI_DATA_IN_OUT_TIMEOUT;
913 s->s_sync_io->io_cb(s->s_sync_io, s->s_sync_io->io_context);
914 }
915
916 mutex_exit(&s->s_mutex);
917 s->s_cmd_timeout = timeout(cmd_timeout_scan, s,
918 CMD_TIMEOUT_SCAN_SECS * drv_usectohz(MICROSEC));
919 }
920
921 static boolean_t
922 decode_to_target(void *arg, long *target)
923 {
924 char *ptr = arg;
925 char *tgt_ptr;
926
927 if (strncmp(NAME_DISK, ptr, sizeof (NAME_DISK) - 1) == 0) {
928 if ((tgt_ptr = strrchr(ptr, '@')) != NULL &&
929 ddi_strtol(tgt_ptr + 1, NULL, 16, target) == 0)
930 return (B_TRUE);
931 else
932 return (B_FALSE);
933 }
934 if (strncmp(NAME_ENCLOSURE, ptr, sizeof (NAME_ENCLOSURE) - 1) == 0) {
935 if ((tgt_ptr = strrchr(ptr, ',')) != NULL &&
936 ddi_strtol(tgt_ptr + 1, NULL, 16, target) == 0)
937 return (B_TRUE);
938 else
939 return (B_FALSE);
940 }
941 return (B_FALSE);
942 }
943
944 /*ARGSUSED*/
945 static void
946 abort_all(struct scsi_address *ap, pqi_state_t s)
947 {
948 pqi_device_t devp;
949
950 if ((devp = pqi_find_target_dev(s, ap->a_target)) == NULL)
951 return;
952
953 pqi_fail_drive_cmds(devp);
954 }
955
956 static boolean_t
957 create_phys_lun(pqi_state_t s, pqi_device_t d,
958 struct scsi_inquiry *inq, dev_info_t **childp)
959 {
960 char **compatible = NULL;
961 char *nodename = NULL;
962 char *scsi_binding_set;
963 int ncompatible = 0;
964 dev_info_t *dip;
965 char *wwn_str;
966 int rval;
967
968 /* ---- get the 'scsi-binding-set' property ---- */
969 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, s->s_dip,
970 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set",
971 &scsi_binding_set) != DDI_PROP_SUCCESS) {
972 scsi_binding_set = NULL;
973 }
974
975 /* ---- At this point we have a new device not in our list ---- */
976 scsi_hba_nodename_compatible_get(inq, scsi_binding_set,
977 inq->inq_dtype, NULL, &nodename, &compatible, &ncompatible);
978 if (scsi_binding_set != NULL)
979 ddi_prop_free(scsi_binding_set);
980 if (nodename == NULL)
981 return (B_FALSE);
982
983 if (ndi_devi_alloc(s->s_dip, nodename, DEVI_SID_NODEID, &dip) !=
984 NDI_SUCCESS) {
985 dev_err(s->s_dip, CE_WARN, "failed to alloc device instance");
986 goto free_nodename;
987 }
988
989 d->pd_dip = dip;
990 d->pd_pip = NULL;
991
992 if (ndi_prop_update_int(DDI_DEV_T_NONE, dip, TARGET_PROP,
993 ddi_get_instance(s->s_dip)) != DDI_PROP_SUCCESS) {
994 goto free_devi;
995 }
996
997 if (ndi_prop_update_int(DDI_DEV_T_NONE, dip, LUN_PROP, d->pd_target) !=
998 DDI_PROP_SUCCESS) {
999 goto free_devi;
1000 }
1001
1002 if (ndi_prop_update_int64(DDI_DEV_T_NONE, dip, LUN64_PROP,
1003 (int64_t)d->pd_target) != DDI_PROP_SUCCESS) {
1004 goto free_devi;
1005 }
1006
1007 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, dip, COMPAT_PROP,
1008 compatible, ncompatible) != DDI_PROP_SUCCESS) {
1009 goto free_devi;
1010 }
1011
1012 wwn_str = kmem_zalloc(MAX_NAME_PROP_SIZE, KM_SLEEP);
1013 (void) snprintf(wwn_str, MAX_NAME_PROP_SIZE, "w%016" PRIx64,
1014 d->pd_wwid);
1015 rval = ndi_prop_update_string(DDI_DEV_T_NONE, dip,
1016 SCSI_ADDR_PROP_TARGET_PORT, wwn_str);
1017 kmem_free(wwn_str, MAX_NAME_PROP_SIZE);
1018 if (rval != DDI_PROP_SUCCESS)
1019 goto free_devi;
1020
1021 if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, NDI_GUID, d->pd_guid) !=
1022 DDI_PROP_SUCCESS) {
1023 goto free_devi;
1024 }
1025
1026 if (ndi_prop_update_int(DDI_DEV_T_NONE, dip, "pm-capable", 1) !=
1027 DDI_PROP_SUCCESS) {
1028 goto free_devi;
1029 }
1030
1031 if (ndi_devi_online(dip, NDI_ONLINE_ATTACH) != NDI_SUCCESS)
1032 goto free_devi;
1033
1034 if (childp != NULL)
1035 *childp = dip;
1036
1037 scsi_hba_nodename_compatible_free(nodename, compatible);
1038 return (B_TRUE);
1039
1040 free_devi:
1041 ndi_prop_remove_all(dip);
1042 (void) ndi_devi_free(dip);
1043 d->pd_dip = NULL;
1044 free_nodename:
1045 scsi_hba_nodename_compatible_free(nodename, compatible);
1046 return (B_FALSE);
1047 }
1048
1049 static boolean_t
1050 create_virt_lun(pqi_state_t s, pqi_device_t d, struct scsi_inquiry *inq,
1051 dev_info_t **childp)
1052 {
1053 char *nodename;
1054 char **compatible;
1055 int ncompatible;
1056 int rval;
1057 mdi_pathinfo_t *pip = NULL;
1058 char *guid_ptr;
1059 char wwid_str[17];
1060 char tgt_str[17];
1061 int instance = ddi_get_instance(s->s_dip);
1062 dev_info_t *lun_dip;
1063 char *old_guid;
1064
1065 if (d->pd_pip_offlined != NULL) {
1066 lun_dip = mdi_pi_get_client(d->pd_pip_offlined);
1067 ASSERT(lun_dip != NULL);
1068
1069 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, lun_dip,
1070 (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
1071 MDI_CLIENT_GUID_PROP, &old_guid) == DDI_SUCCESS) {
1072 if (strncmp(d->pd_guid, old_guid,
1073 strlen(d->pd_guid)) == 0) {
1074 /* ---- Same path came back online ---- */
1075 (void) ddi_prop_free(old_guid);
1076 if (mdi_pi_online(d->pd_pip_offlined, 0) ==
1077 DDI_SUCCESS) {
1078 d->pd_pip = d->pd_pip_offlined;
1079 d->pd_pip_offlined = NULL;
1080 return (B_TRUE);
1081 } else {
1082 return (B_FALSE);
1083 }
1084 } else {
1085 /* ---- Different device in slot ---- */
1086 (void) ddi_prop_free(old_guid);
1087 if (mdi_pi_offline(d->pd_pip_offlined, 0) !=
1088 DDI_SUCCESS) {
1089 return (B_FALSE);
1090 }
1091 if (mdi_pi_free(d->pd_pip_offlined,
1092 MDI_CLIENT_FLAGS_NO_EVENT) != MDI_SUCCESS) {
1093 return (B_FALSE);
1094 }
1095 d->pd_pip_offlined = NULL;
1096 }
1097 } else {
1098 dev_err(s->s_dip, CE_WARN, "Can't get client-guid "
1099 "property for lun %d", d->pd_target);
1100 return (B_FALSE);
1101 }
1102 }
1103
1104 scsi_hba_nodename_compatible_get(inq, "vhci", inq->inq_dtype, NULL,
1105 &nodename, &compatible, &ncompatible);
1106 if (nodename == NULL)
1107 return (B_FALSE);
1108
1109 if (d->pd_guid != NULL) {
1110 guid_ptr = d->pd_guid;
1111 } else {
1112 (void) snprintf(wwid_str, sizeof (wwid_str), "%" PRIx64,
1113 d->pd_wwid);
1114 guid_ptr = wwid_str;
1115 }
1116 (void) snprintf(tgt_str, sizeof (tgt_str), "%x", d->pd_target);
1117 rval = mdi_pi_alloc_compatible(s->s_dip, nodename, guid_ptr, tgt_str,
1118 compatible, ncompatible, 0, &pip);
1119 if (rval == MDI_SUCCESS) {
1120 mdi_pi_set_phci_private(pip, (caddr_t)d);
1121
1122 if (mdi_prop_update_string(pip, MDI_GUID, guid_ptr) !=
1123 DDI_SUCCESS) {
1124 dev_err(s->s_dip, CE_WARN,
1125 "unable to create property (MDI_GUID) for lun %d",
1126 d->pd_target);
1127 goto cleanup;
1128 }
1129
1130 if (mdi_prop_update_int(pip, TARGET_PROP, instance) !=
1131 DDI_SUCCESS) {
1132 dev_err(s->s_dip, CE_WARN,
1133 "unable to create property (%s) for lun %d\n",
1134 TARGET_PROP, d->pd_target);
1135 goto cleanup;
1136 }
1137
1138 if (mdi_prop_update_int(pip, LUN_PROP, d->pd_target) !=
1139 DDI_SUCCESS) {
1140 dev_err(s->s_dip, CE_WARN,
1141 "unable to create property (%s) for lun %d",
1142 LUN_PROP, d->pd_target);
1143 goto cleanup;
1144 }
1145
1146 if (mdi_prop_update_string_array(pip, COMPAT_PROP,
1147 compatible, ncompatible) != DDI_SUCCESS) {
1148 dev_err(s->s_dip, CE_WARN,
1149 "unable to create property (%s) for lun %d",
1150 COMPAT_PROP, d->pd_target);
1151 goto cleanup;
1152 }
1153
1154 if (mdi_pi_online(pip, 0) == MDI_NOT_SUPPORTED)
1155 goto cleanup;
1156
1157 d->pd_dip = NULL;
1158 d->pd_pip = pip;
1159 }
1160
1161 scsi_hba_nodename_compatible_free(nodename, compatible);
1162 if (childp != NULL)
1163 *childp = mdi_pi_get_client(pip);
1164 return (B_TRUE);
1165 cleanup:
1166 scsi_hba_nodename_compatible_free(nodename, compatible);
1167 d->pd_pip = NULL;
1168 d->pd_dip = NULL;
1169 (void) mdi_prop_remove(pip, NULL);
1170 (void) mdi_pi_free(pip, 0);
1171 return (B_FALSE);
1172 }
1173
1174 static int
1175 config_one(dev_info_t *pdip, pqi_state_t s, pqi_device_t d,
1176 dev_info_t **childp)
1177 {
1178 struct scsi_inquiry inq;
1179 boolean_t rval;
1180
1181 /* ---- For now ignore logical devices ---- */
1182 if (is_physical_dev(d) == B_FALSE)
1183 return (NDI_FAILURE);
1184
1185 /* ---- Inquiry target ---- */
1186 if (!d->pd_online ||
1187 pqi_scsi_inquiry(s, d, 0, &inq, sizeof (inq)) == B_FALSE) {
1188
1189 pqi_fail_drive_cmds(d);
1190
1191 if (d->pd_dip != NULL) {
1192 (void) ndi_devi_offline(d->pd_dip,
1193 NDI_DEVFS_CLEAN | NDI_DEVI_REMOVE | NDI_DEVI_GONE);
1194 d->pd_dip = NULL;
1195 } else if (d->pd_pip != NULL) {
1196 (void) mdi_pi_offline(d->pd_pip, 0);
1197 d->pd_pip_offlined = d->pd_pip;
1198 d->pd_pip = NULL;
1199 }
1200 return (NDI_FAILURE);
1201 } else if (d->pd_dip != NULL) {
1202 if (childp != NULL)
1203 *childp = d->pd_dip;
1204 return (NDI_SUCCESS);
1205 } else if (d->pd_pip != NULL) {
1206 if (childp != NULL)
1207 *childp = mdi_pi_get_client(d->pd_pip);
1208 return (NDI_SUCCESS);
1209 }
1210
1211 d->pd_parent = pdip;
1212 if (s->s_enable_mpxio)
1213 rval = create_virt_lun(s, d, &inq, childp);
1214
1215 if (!s->s_enable_mpxio || (rval == B_FALSE))
1216 rval = create_phys_lun(s, d, &inq, childp);
1217
1218 return ((rval == B_TRUE) ? NDI_SUCCESS : NDI_FAILURE);
1219 }
1220
1221 static void
1222 cmd_ext_free(pqi_cmd_t cmd)
1223 {
1224 struct scsi_pkt *pkt = CMD2PKT(cmd);
1225
1226 if ((cmd->pc_flags & PQI_FLAG_CDB_EXT) != 0) {
1227 kmem_free(pkt->pkt_cdbp, cmd->pc_cmdlen);
1228 cmd->pc_flags &= ~PQI_FLAG_CDB_EXT;
1229 }
1230 if ((cmd->pc_flags & PQI_FLAG_SCB_EXT) != 0) {
1231 kmem_free(pkt->pkt_scbp, cmd->pc_statuslen);
1232 cmd->pc_flags &= ~PQI_FLAG_SCB_EXT;
1233 }
1234 if ((cmd->pc_flags & PQI_FLAG_PRIV_EXT) != 0) {
1235 kmem_free(pkt->pkt_private, cmd->pc_tgtlen);
1236 cmd->pc_flags &= ~PQI_FLAG_PRIV_EXT;
1237 }
1238 }
1239
1240 static int
1241 cmd_ext_alloc(pqi_cmd_t cmd, int kf)
1242 {
1243 struct scsi_pkt *pkt = CMD2PKT(cmd);
1244 void *buf;
1245
1246 if (cmd->pc_cmdlen > sizeof (cmd->pc_cdb)) {
1247 if ((buf = kmem_zalloc(cmd->pc_cmdlen, kf)) == NULL)
1248 return (DDI_FAILURE);
1249 pkt->pkt_cdbp = buf;
1250 cmd->pc_flags |= PQI_FLAG_CDB_EXT;
1251 }
1252
1253 if (cmd->pc_statuslen > sizeof (cmd->pc_cmd_scb)) {
1254 if ((buf = kmem_zalloc(cmd->pc_statuslen, kf)) == NULL)
1255 goto out;
1256 pkt->pkt_scbp = buf;
1257 cmd->pc_flags |= PQI_FLAG_SCB_EXT;
1258 }
1259
1260 if (cmd->pc_tgtlen > sizeof (cmd->pc_tgt_priv)) {
1261 if ((buf = kmem_zalloc(cmd->pc_tgtlen, kf)) == NULL)
1262 goto out;
1263 pkt->pkt_private = buf;
1264 cmd->pc_flags |= PQI_FLAG_PRIV_EXT;
1265 }
1266
1267 return (DDI_SUCCESS);
1268
1269 out:
1270 cmd_ext_free(cmd);
1271
1272 return (DDI_FAILURE);
1273 }
1274
1275 static boolean_t
1276 is_physical_dev(pqi_device_t d)
1277 {
1278 return (d->pd_phys_dev ? B_TRUE : B_FALSE);
1279 }