Print this page
NEX-17944 HBA drivers don't need the redundant devfs_clean step
Reviewed by: Dan Fields <dan.fields@nexenta.com>
Reviewed by: Rick McNeal <rick.mcneal@nexenta.com>
NEX-7908 useless assert in pvscsi_remove_from_queue()
Reviewed by: Dan Fields <dan.fields@nexenta.com>
NEX-6869 pvscsi`pvscsi_bus_config uses wrong base while configuring targets
NEX-6870 pvscsi panics while simultaneously deleting large number of disks
Reviewed by: Hans Rosenfeld <hans.rosenfeld@nexenta.com>
NEX-6382 rework pvscsi
Reviewed by: Hans Rosenfeld <hans.rosenfeld@nexenta.com>
Reviewed by: Kevin Crowe <kevin.crowe@nexenta.com>
re #10804 rb3697 Mem-leaks in libsldap (fix lint)
re #10804 rb3697 Mem-leaks in libsldap
Modifications to clean up gcc4 warnings for both with- and without-
closed nza-kernel builds.
re #10787 rb3347 Integrate pvscsi driver to nza-kernel
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/intel/io/scsi/adapters/pvscsi/pvscsi.c
+++ new/usr/src/uts/intel/io/scsi/adapters/pvscsi/pvscsi.c
1 1 /*
2 2 * This file and its contents are supplied under the terms of the
|
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
3 3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 4 * You may only use this file in accordance with the terms of version
5 5 * 1.0 of the CDDL.
6 6 *
7 7 * A full copy of the text of the CDDL should have accompanied this
8 8 * source. A copy of the CDDL is also available via the Internet at
9 9 * http://www.illumos.org/license/CDDL.
10 10 */
11 11
12 12 /*
13 - * Copyright 2016 Nexenta Systems, Inc.
13 + * Copyright 2018 Nexenta Systems, Inc.
14 14 */
15 15
16 16 #include <sys/atomic.h>
17 17 #include <sys/cmn_err.h>
18 18 #include <sys/conf.h>
19 19 #include <sys/cpuvar.h>
20 20 #include <sys/ddi.h>
21 21 #include <sys/errno.h>
22 -#include <sys/fs/dv_node.h>
23 22 #include <sys/kmem.h>
24 23 #include <sys/kmem_impl.h>
25 24 #include <sys/list.h>
26 25 #include <sys/modctl.h>
27 26 #include <sys/pci.h>
28 27 #include <sys/scsi/scsi.h>
29 28 #include <sys/sunddi.h>
30 29 #include <sys/sysmacros.h>
31 30 #include <sys/time.h>
32 31 #include <sys/types.h>
33 32
34 33 #include "pvscsi.h"
35 34 #include "pvscsi_var.h"
36 35
37 36 int pvscsi_enable_msi = 1;
38 37 int pvscsi_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_PER_RING;
39 38 int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING;
40 39
41 40 static int pvscsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
42 41
43 42 static void *pvscsi_sstate;
44 43
45 44 /* HBA DMA attributes */
46 45 static ddi_dma_attr_t pvscsi_hba_dma_attr = {
47 46 .dma_attr_version = DMA_ATTR_V0,
48 47 .dma_attr_addr_lo = 0x0000000000000000ull,
49 48 .dma_attr_addr_hi = 0xFFFFFFFFFFFFFFFFull,
50 49 .dma_attr_count_max = 0x000000007FFFFFFFull,
51 50 .dma_attr_align = 0x0000000000000001ull,
52 51 .dma_attr_burstsizes = 0x7ff,
53 52 .dma_attr_minxfer = 0x00000001u,
54 53 .dma_attr_maxxfer = 0x00000000FFFFFFFFull,
55 54 .dma_attr_seg = 0x00000000FFFFFFFFull,
56 55 .dma_attr_sgllen = 1,
57 56 .dma_attr_granular = 0x00000200u,
58 57 .dma_attr_flags = 0
59 58 };
60 59
61 60 /* DMA attributes for req/comp rings */
62 61 static ddi_dma_attr_t pvscsi_ring_dma_attr = {
63 62 .dma_attr_version = DMA_ATTR_V0,
64 63 .dma_attr_addr_lo = 0x0000000000000000ull,
65 64 .dma_attr_addr_hi = 0xFFFFFFFFFFFFFFFFull,
66 65 .dma_attr_count_max = 0x000000007FFFFFFFull,
67 66 .dma_attr_align = 0x0000000000000001ull,
68 67 .dma_attr_burstsizes = 0x7ff,
69 68 .dma_attr_minxfer = 0x00000001u,
70 69 .dma_attr_maxxfer = 0x00000000FFFFFFFFull,
71 70 .dma_attr_seg = 0x00000000FFFFFFFFull,
72 71 .dma_attr_sgllen = 1,
73 72 .dma_attr_granular = 0x00000001u,
74 73 .dma_attr_flags = 0
75 74 };
76 75
77 76 /* DMA attributes for buffer I/O */
78 77 static ddi_dma_attr_t pvscsi_io_dma_attr = {
79 78 .dma_attr_version = DMA_ATTR_V0,
80 79 .dma_attr_addr_lo = 0x0000000000000000ull,
81 80 .dma_attr_addr_hi = 0xFFFFFFFFFFFFFFFFull,
82 81 .dma_attr_count_max = 0x000000007FFFFFFFull,
83 82 .dma_attr_align = 0x0000000000000001ull,
84 83 .dma_attr_burstsizes = 0x7ff,
85 84 .dma_attr_minxfer = 0x00000001u,
86 85 .dma_attr_maxxfer = 0x00000000FFFFFFFFull,
87 86 .dma_attr_seg = 0x00000000FFFFFFFFull,
88 87 .dma_attr_sgllen = PVSCSI_MAX_SG_SIZE,
89 88 .dma_attr_granular = 0x00000200u,
90 89 .dma_attr_flags = 0
91 90 };
92 91
93 92 static ddi_device_acc_attr_t pvscsi_mmio_attr = {
94 93 DDI_DEVICE_ATTR_V1,
95 94 DDI_STRUCTURE_LE_ACC,
96 95 DDI_STRICTORDER_ACC,
97 96 DDI_DEFAULT_ACC
98 97 };
99 98
100 99 static ddi_device_acc_attr_t pvscsi_dma_attrs = {
101 100 DDI_DEVICE_ATTR_V0,
102 101 DDI_STRUCTURE_LE_ACC,
103 102 DDI_STRICTORDER_ACC,
104 103 DDI_DEFAULT_ACC,
105 104 };
106 105
107 106 static void
108 107 pvscsi_add_to_queue(pvscsi_cmd_t *cmd)
109 108 {
110 109 pvscsi_softc_t *pvs = cmd->cmd_pvs;
111 110
112 111 ASSERT(pvs != NULL);
113 112 ASSERT(mutex_owned(&pvs->mutex));
114 113 ASSERT(!list_link_active(&(cmd)->cmd_queue_node));
115 114
116 115 list_insert_tail(&pvs->cmd_queue, cmd);
|
↓ open down ↓ |
84 lines elided |
↑ open up ↑ |
117 116 pvs->cmd_queue_len++;
118 117 }
119 118
120 119 static void
121 120 pvscsi_remove_from_queue(pvscsi_cmd_t *cmd)
122 121 {
123 122 pvscsi_softc_t *pvs = cmd->cmd_pvs;
124 123
125 124 ASSERT(pvs != NULL);
126 125 ASSERT(mutex_owned(&pvs->mutex));
127 - ASSERT(list_link_active(&cmd->cmd_queue_node));
128 - ASSERT(pvs->cmd_queue_len > 0);
129 126
130 127 if (list_link_active(&cmd->cmd_queue_node)) {
128 + ASSERT(pvs->cmd_queue_len > 0);
131 129 list_remove(&pvs->cmd_queue, cmd);
132 130 pvs->cmd_queue_len--;
133 131 }
134 132 }
135 133
136 134 static uint64_t
137 135 pvscsi_map_ctx(pvscsi_softc_t *pvs, pvscsi_cmd_ctx_t *io_ctx)
138 136 {
139 137 return (io_ctx - pvs->cmd_ctx + 1);
140 138 }
141 139
142 140 static pvscsi_cmd_ctx_t *
143 141 pvscsi_lookup_ctx(pvscsi_softc_t *pvs, pvscsi_cmd_t *cmd)
144 142 {
145 143 pvscsi_cmd_ctx_t *ctx, *end;
146 144
147 145 end = &pvs->cmd_ctx[pvs->req_depth];
148 146 for (ctx = pvs->cmd_ctx; ctx < end; ctx++) {
149 147 if (ctx->cmd == cmd)
150 148 return (ctx);
151 149 }
152 150
153 151 return (NULL);
154 152 }
155 153
156 154 static pvscsi_cmd_ctx_t *
157 155 pvscsi_resolve_ctx(pvscsi_softc_t *pvs, uint64_t ctx)
158 156 {
159 157 if (ctx > 0 && ctx <= pvs->req_depth)
160 158 return (&pvs->cmd_ctx[ctx - 1]);
161 159 else
162 160 return (NULL);
163 161 }
164 162
165 163 static boolean_t
166 164 pvscsi_acquire_ctx(pvscsi_softc_t *pvs, pvscsi_cmd_t *cmd)
167 165 {
168 166 pvscsi_cmd_ctx_t *ctx;
169 167
170 168 if (list_is_empty(&pvs->cmd_ctx_pool))
171 169 return (B_FALSE);
172 170
173 171 ctx = (pvscsi_cmd_ctx_t *)list_remove_head(&pvs->cmd_ctx_pool);
174 172 ASSERT(ctx != NULL);
175 173
176 174 ctx->cmd = cmd;
177 175 cmd->ctx = ctx;
178 176
179 177 return (B_TRUE);
180 178 }
181 179
182 180 static void
183 181 pvscsi_release_ctx(pvscsi_cmd_t *cmd)
184 182 {
185 183 pvscsi_softc_t *pvs = cmd->cmd_pvs;
186 184
187 185 ASSERT(mutex_owned(&pvs->mutex));
188 186
189 187 cmd->ctx->cmd = NULL;
190 188 list_insert_tail(&pvs->cmd_ctx_pool, cmd->ctx);
191 189 cmd->ctx = NULL;
192 190 }
193 191
194 192 static uint32_t
195 193 pvscsi_reg_read(pvscsi_softc_t *pvs, uint32_t offset)
196 194 {
197 195 uint32_t ret;
198 196
199 197 ASSERT((offset & (sizeof (uint32_t) - 1)) == 0);
200 198
201 199 ret = ddi_get32(pvs->mmio_handle,
202 200 (uint32_t *)(pvs->mmio_base + offset));
203 201
204 202 return (ret);
205 203 }
206 204
207 205 static void
208 206 pvscsi_reg_write(pvscsi_softc_t *pvs, uint32_t offset, uint32_t value)
209 207 {
210 208 ASSERT((offset & (sizeof (uint32_t) - 1)) == 0);
211 209
212 210 ddi_put32(pvs->mmio_handle, (uint32_t *)(pvs->mmio_base + offset),
213 211 value);
214 212 }
215 213
216 214 static void
217 215 pvscsi_write_cmd_desc(pvscsi_softc_t *pvs, uint32_t cmd, void *desc, size_t len)
218 216 {
219 217 len /= sizeof (uint32_t);
220 218 pvscsi_reg_write(pvs, PVSCSI_REG_OFFSET_COMMAND, cmd);
221 219 ddi_rep_put32(pvs->mmio_handle, (uint32_t *)desc,
222 220 (uint32_t *)(pvs->mmio_base + PVSCSI_REG_OFFSET_COMMAND_DATA),
223 221 len, DDI_DEV_NO_AUTOINCR);
224 222 }
225 223
226 224 static uint32_t
227 225 pvscsi_read_intr_status(pvscsi_softc_t *pvs)
228 226 {
229 227 return (pvscsi_reg_read(pvs, PVSCSI_REG_OFFSET_INTR_STATUS));
230 228 }
231 229
232 230 static void
233 231 pvscsi_write_intr_status(pvscsi_softc_t *pvs, uint32_t val)
234 232 {
235 233 pvscsi_reg_write(pvs, PVSCSI_REG_OFFSET_INTR_STATUS, val);
236 234 }
237 235
238 236 static void
239 237 pvscsi_mask_intr(pvscsi_softc_t *pvs)
240 238 {
241 239 mutex_enter(&pvs->intr_mutex);
242 240
243 241 VERIFY(pvs->intr_lock_counter >= 0);
244 242
245 243 if (++pvs->intr_lock_counter == 1)
246 244 pvscsi_reg_write(pvs, PVSCSI_REG_OFFSET_INTR_MASK, 0);
247 245
248 246 mutex_exit(&pvs->intr_mutex);
249 247 }
250 248
251 249 static void
252 250 pvscsi_unmask_intr(pvscsi_softc_t *pvs)
253 251 {
254 252 mutex_enter(&pvs->intr_mutex);
255 253
256 254 VERIFY(pvs->intr_lock_counter > 0);
257 255
258 256 if (--pvs->intr_lock_counter == 0) {
259 257 pvscsi_reg_write(pvs, PVSCSI_REG_OFFSET_INTR_MASK,
260 258 PVSCSI_INTR_CMPL_MASK | PVSCSI_INTR_MSG_MASK);
261 259 }
262 260
263 261 mutex_exit(&pvs->intr_mutex);
264 262 }
265 263
266 264 static void
267 265 pvscsi_reset_hba(pvscsi_softc_t *pvs)
268 266 {
269 267 pvscsi_write_cmd_desc(pvs, PVSCSI_CMD_ADAPTER_RESET, NULL, 0);
270 268 }
271 269
272 270 static void
273 271 pvscsi_reset_bus(pvscsi_softc_t *pvs)
274 272 {
275 273 pvscsi_write_cmd_desc(pvs, PVSCSI_CMD_RESET_BUS, NULL, 0);
276 274 }
277 275
278 276 static void
279 277 pvscsi_submit_nonrw_io(pvscsi_softc_t *pvs)
280 278 {
281 279 pvscsi_reg_write(pvs, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0);
282 280 }
283 281
284 282 static void
285 283 pvscsi_submit_rw_io(pvscsi_softc_t *pvs)
286 284 {
287 285 pvscsi_reg_write(pvs, PVSCSI_REG_OFFSET_KICK_RW_IO, 0);
288 286 }
289 287
290 288
291 289 static int
292 290 pvscsi_inquiry_target(pvscsi_softc_t *pvs, int target, struct scsi_inquiry *inq)
293 291 {
294 292 int len = sizeof (struct scsi_inquiry);
295 293 int ret = -1;
296 294 struct buf *b;
297 295 struct scsi_address ap;
298 296 struct scsi_pkt *pkt;
299 297 uint8_t cdb[CDB_GROUP0];
300 298
301 299 ap.a_hba_tran = pvs->tran;
302 300 ap.a_target = (ushort_t)target;
303 301 ap.a_lun = (uchar_t)0;
304 302
305 303 if ((b = scsi_alloc_consistent_buf(&ap, (struct buf *)NULL, len,
306 304 B_READ, NULL_FUNC, NULL)) == NULL)
307 305 return (-1);
308 306
309 307 if ((pkt = scsi_init_pkt(&ap, (struct scsi_pkt *)NULL, b,
310 308 CDB_GROUP0, sizeof (struct scsi_arq_status), 0, 0,
311 309 NULL_FUNC, NULL)) == NULL)
312 310 goto free_buf;
313 311
314 312 cdb[0] = SCMD_INQUIRY;
315 313 cdb[1] = 0;
316 314 cdb[2] = 0;
317 315 cdb[3] = (len & 0xff00) >> 8;
318 316 cdb[4] = (len & 0x00ff);
319 317 cdb[5] = 0;
320 318
321 319 if (inq != NULL)
322 320 bzero(inq, sizeof (*inq));
323 321 bcopy(cdb, pkt->pkt_cdbp, CDB_GROUP0);
324 322 bzero((struct scsi_inquiry *)b->b_un.b_addr, sizeof (*inq));
325 323
326 324 if ((ret = scsi_poll(pkt)) == 0 && inq != NULL)
327 325 bcopy(b->b_un.b_addr, inq, sizeof (*inq));
328 326
329 327 scsi_destroy_pkt(pkt);
330 328
331 329 free_buf:
332 330 scsi_free_consistent_buf(b);
333 331
334 332 return (ret);
335 333 }
336 334
337 335 static int
338 336 pvscsi_config_one(dev_info_t *pdip, pvscsi_softc_t *pvs, int target,
339 337 dev_info_t **childp)
340 338 {
341 339 char **compatible = NULL;
342 340 char *nodename = NULL;
343 341 dev_info_t *dip;
344 342 int inqrc;
345 343 int ncompatible = 0;
346 344 pvscsi_device_t *devnode;
347 345 struct scsi_inquiry inq;
348 346
349 347 ASSERT(DEVI_BUSY_OWNED(pdip));
350 348
351 349 /* Inquiry target */
352 350 inqrc = pvscsi_inquiry_target(pvs, target, &inq);
|
↓ open down ↓ |
212 lines elided |
↑ open up ↑ |
353 351
354 352 /* Find devnode */
355 353 for (devnode = list_head(&pvs->devnodes); devnode != NULL;
356 354 devnode = list_next(&pvs->devnodes, devnode)) {
357 355 if (devnode->target == target)
358 356 break;
359 357 }
360 358
361 359 if (devnode != NULL) {
362 360 if (inqrc != 0) {
363 - /* Target disappeared, drop devnode */
364 - if (i_ddi_devi_attached(devnode->pdip)) {
365 - char *devname;
366 - /* Get full devname */
367 - devname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
368 - (void) ddi_deviname(devnode->pdip, devname);
369 - /* Clean cache and name */
370 - (void) devfs_clean(devnode->parent, devname + 1,
371 - DV_CLEAN_FORCE);
372 - kmem_free(devname, MAXPATHLEN);
373 - }
374 -
375 - (void) ndi_devi_offline(devnode->pdip, NDI_DEVI_REMOVE);
376 -
361 + (void) ndi_devi_offline(devnode->pdip,
362 + NDI_DEVFS_CLEAN | NDI_DEVI_REMOVE);
377 363 list_remove(&pvs->devnodes, devnode);
378 364 kmem_free(devnode, sizeof (*devnode));
379 365 } else if (childp != NULL) {
380 366 /* Target exists */
381 367 *childp = devnode->pdip;
382 368 }
383 369 return (NDI_SUCCESS);
384 370 } else if (inqrc != 0) {
385 371 /* Target doesn't exist */
386 372 return (NDI_FAILURE);
387 373 }
388 374
389 375 scsi_hba_nodename_compatible_get(&inq, NULL, inq.inq_dtype, NULL,
390 376 &nodename, &compatible, &ncompatible);
391 377 if (nodename == NULL)
392 378 goto free_nodename;
393 379
394 380 if (ndi_devi_alloc(pdip, nodename, DEVI_SID_NODEID,
395 381 &dip) != NDI_SUCCESS) {
396 382 dev_err(pvs->dip, CE_WARN, "!failed to alloc device instance");
397 383 goto free_nodename;
398 384 }
399 385
400 386 if (ndi_prop_update_string(DDI_DEV_T_NONE, dip,
401 387 "device-type", "scsi") != DDI_PROP_SUCCESS ||
402 388 ndi_prop_update_int(DDI_DEV_T_NONE, dip,
403 389 "target", target) != DDI_PROP_SUCCESS ||
404 390 ndi_prop_update_int(DDI_DEV_T_NONE, dip,
405 391 "lun", 0) != DDI_PROP_SUCCESS ||
406 392 ndi_prop_update_int(DDI_DEV_T_NONE, dip,
407 393 "pm-capable", 1) != DDI_PROP_SUCCESS ||
408 394 ndi_prop_update_string_array(DDI_DEV_T_NONE, dip,
409 395 "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) {
410 396 dev_err(pvs->dip, CE_WARN,
411 397 "!failed to update props for target %d", target);
412 398 goto free_devi;
413 399 }
414 400
415 401 if ((devnode = kmem_zalloc(sizeof (*devnode), KM_NOSLEEP)) == NULL)
416 402 goto free_devi;
417 403
418 404 if (ndi_devi_online(dip, NDI_ONLINE_ATTACH) != NDI_SUCCESS) {
419 405 dev_err(pvs->dip, CE_WARN, "!failed to online target %d",
420 406 target);
421 407 kmem_free(devnode, sizeof (*devnode));
422 408 goto free_devi;
423 409 }
424 410
425 411 devnode->target = target;
426 412 devnode->pdip = dip;
427 413 devnode->parent = pdip;
428 414 list_insert_tail(&pvs->devnodes, devnode);
429 415
430 416 if (childp != NULL)
431 417 *childp = dip;
432 418
433 419 scsi_hba_nodename_compatible_free(nodename, compatible);
434 420
435 421 return (NDI_SUCCESS);
436 422
437 423 free_devi:
438 424 ndi_prop_remove_all(dip);
439 425 (void) ndi_devi_free(dip);
440 426 free_nodename:
441 427 scsi_hba_nodename_compatible_free(nodename, compatible);
442 428
443 429 return (NDI_FAILURE);
444 430 }
445 431
446 432 static int
447 433 pvscsi_config_all(dev_info_t *pdip, pvscsi_softc_t *pvs)
448 434 {
449 435 int target;
450 436
451 437 for (target = 0; target < PVSCSI_MAXTGTS; target++) {
452 438 /* ndi_devi_enter is done in pvscsi_bus_config */
453 439 (void) pvscsi_config_one(pdip, pvs, target, NULL);
454 440 }
455 441
456 442 return (NDI_SUCCESS);
457 443 }
458 444
459 445 static pvscsi_cmd_t *
460 446 pvscsi_process_comp_ring(pvscsi_softc_t *pvs)
461 447 {
462 448 pvscsi_cmd_t **pnext_cmd = NULL;
463 449 pvscsi_cmd_t *cmd;
464 450 pvscsi_cmd_t *head = NULL;
465 451 struct PVSCSIRingsState *sdesc = RINGS_STATE(pvs);
466 452 uint32_t cmp_ne = sdesc->cmpNumEntriesLog2;
467 453
468 454 ASSERT(mutex_owned(&pvs->rx_mutex));
469 455
470 456 while (sdesc->cmpConsIdx != sdesc->cmpProdIdx) {
471 457 pvscsi_cmd_ctx_t *ctx;
472 458 struct PVSCSIRingCmpDesc *cdesc;
473 459
474 460 cdesc = CMP_RING(pvs) + (sdesc->cmpConsIdx & MASK(cmp_ne));
475 461 membar_consumer();
476 462
477 463 ctx = pvscsi_resolve_ctx(pvs, cdesc->context);
478 464 ASSERT(ctx != NULL);
479 465
480 466 if ((cmd = ctx->cmd) != NULL) {
481 467 cmd->next_cmd = NULL;
482 468
483 469 /* Save command status for further processing */
484 470 cmd->cmp_stat.host_status = cdesc->hostStatus;
485 471 cmd->cmp_stat.scsi_status = cdesc->scsiStatus;
486 472 cmd->cmp_stat.data_len = cdesc->dataLen;
487 473
488 474 /* Mark this command as arrived from hardware */
489 475 cmd->flags |= PVSCSI_FLAG_HW_STATUS;
490 476
491 477 if (head == NULL) {
492 478 head = cmd;
493 479 head->tail_cmd = cmd;
494 480 } else {
495 481 head->tail_cmd = cmd;
496 482 }
497 483
498 484 if (pnext_cmd == NULL) {
499 485 pnext_cmd = &cmd->next_cmd;
500 486 } else {
501 487 *pnext_cmd = cmd;
502 488 pnext_cmd = &cmd->next_cmd;
503 489 }
504 490 }
505 491
506 492 membar_consumer();
507 493 sdesc->cmpConsIdx++;
508 494 }
509 495
510 496 return (head);
511 497 }
512 498
513 499 static pvscsi_msg_t *
514 500 pvscsi_process_msg_ring(pvscsi_softc_t *pvs)
515 501 {
516 502 pvscsi_msg_t *msg;
517 503 struct PVSCSIRingsState *sdesc = RINGS_STATE(pvs);
518 504 struct PVSCSIRingMsgDesc *mdesc;
519 505 struct PVSCSIMsgDescDevStatusChanged *desc;
520 506 uint32_t msg_ne = sdesc->msgNumEntriesLog2;
521 507
522 508 ASSERT(mutex_owned(&pvs->rx_mutex));
523 509
524 510 if (sdesc->msgProdIdx == sdesc->msgConsIdx)
525 511 return (NULL);
526 512
527 513 mdesc = MSG_RING(pvs) + (sdesc->msgConsIdx & MASK(msg_ne));
528 514 membar_consumer();
529 515
530 516 switch (mdesc->type) {
531 517 case PVSCSI_MSG_DEV_ADDED:
532 518 case PVSCSI_MSG_DEV_REMOVED:
533 519 desc = (struct PVSCSIMsgDescDevStatusChanged *)mdesc;
534 520 msg = kmem_alloc(sizeof (pvscsi_msg_t), KM_NOSLEEP);
535 521 if (msg == NULL)
536 522 return (NULL);
537 523 msg->msg_pvs = pvs;
538 524 msg->type = mdesc->type;
539 525 msg->target = desc->target;
540 526 break;
541 527 default:
542 528 dev_err(pvs->dip, CE_WARN, "!unknown msg type: %d",
543 529 mdesc->type);
544 530 return (NULL);
545 531 }
546 532
547 533 membar_consumer();
548 534 sdesc->msgConsIdx++;
549 535
550 536 return (msg);
551 537 }
552 538
553 539 static void
554 540 pvscsi_handle_msg(void *arg)
555 541 {
556 542 pvscsi_msg_t *msg = (pvscsi_msg_t *)arg;
557 543 dev_info_t *dip = msg->msg_pvs->dip;
558 544 int circ;
559 545
560 546 ndi_devi_enter(dip, &circ);
561 547 (void) pvscsi_config_one(dip, msg->msg_pvs, msg->target, NULL);
562 548 ndi_devi_exit(dip, circ);
563 549
564 550 kmem_free(msg, sizeof (pvscsi_msg_t));
565 551 }
566 552
567 553 static int
568 554 pvscsi_abort_cmd(pvscsi_cmd_t *cmd, pvscsi_cmd_t **pending)
569 555 {
570 556 pvscsi_softc_t *pvs = cmd->cmd_pvs;
571 557 pvscsi_cmd_t *c;
572 558 pvscsi_cmd_t *done;
573 559 struct PVSCSICmdDescAbortCmd acmd;
574 560
575 561 dev_err(pvs->dip, CE_WARN, "!aborting command %p", (void *)cmd);
576 562
577 563 ASSERT(mutex_owned(&pvs->rx_mutex));
578 564 ASSERT(mutex_owned(&pvs->tx_mutex));
579 565
580 566 /* Check if the cmd was already completed by the HBA */
581 567 *pending = done = pvscsi_process_comp_ring(pvs);
582 568 for (c = done; c != NULL; c = c->next_cmd) {
583 569 if (c == cmd)
584 570 return (CMD_CMPLT);
585 571 }
586 572
587 573 /* Check if cmd was really scheduled by the HBA */
588 574 if (pvscsi_lookup_ctx(pvs, cmd) == NULL)
589 575 return (CMD_CMPLT);
590 576
591 577 /* Abort cmd in the HBA */
592 578 bzero(&acmd, sizeof (acmd));
593 579 acmd.target = cmd->cmd_target;
594 580 acmd.context = pvscsi_map_ctx(pvs, cmd->ctx);
595 581 pvscsi_write_cmd_desc(pvs, PVSCSI_CMD_ABORT_CMD, &acmd, sizeof (acmd));
596 582
597 583 /* Check if cmd was completed by the HBA before it could be aborted */
598 584 if ((done = pvscsi_process_comp_ring(pvs)) != NULL) {
599 585 done->tail_cmd->next_cmd = *pending;
600 586 *pending = done;
601 587 for (c = done; c != NULL; c = c->next_cmd) {
602 588 if (c == cmd)
603 589 return (CMD_CMPLT);
604 590 }
605 591 }
606 592
607 593 /* Release I/O ctx */
608 594 mutex_enter(&pvs->mutex);
609 595 if (cmd->ctx != NULL)
610 596 pvscsi_release_ctx(cmd);
611 597 /* Remove cmd from the queue */
612 598 pvscsi_remove_from_queue(cmd);
613 599 mutex_exit(&pvs->mutex);
614 600
615 601 /* Insert cmd at the beginning of the list */
616 602 cmd->next_cmd = *pending;
617 603 *pending = cmd;
618 604
619 605 dev_err(pvs->dip, CE_WARN, "!command %p aborted", (void *)cmd);
620 606
621 607 return (CMD_ABORTED);
622 608 }
623 609
624 610 static void
625 611 pvscsi_map_buffers(pvscsi_cmd_t *cmd, struct PVSCSIRingReqDesc *rdesc)
626 612 {
627 613 int i;
628 614
629 615 ASSERT(cmd->ctx);
630 616 ASSERT(cmd->cmd_dmaccount > 0 && cmd->cmd_dmaccount <=
631 617 PVSCSI_MAX_SG_SIZE);
632 618
633 619 rdesc->dataLen = cmd->cmd_dma_count;
634 620 rdesc->dataAddr = 0;
635 621
636 622 if (cmd->cmd_dma_count == 0)
637 623 return;
638 624
639 625 if (cmd->cmd_dmaccount > 1) {
640 626 struct PVSCSISGElement *sgl = CMD_CTX_SGLIST_VA(cmd->ctx);
641 627
642 628 for (i = 0; i < cmd->cmd_dmaccount; i++) {
643 629 sgl[i].addr = cmd->cached_cookies[i].dmac_laddress;
644 630 sgl[i].length = cmd->cached_cookies[i].dmac_size;
645 631 sgl[i].flags = 0;
646 632 }
647 633 rdesc->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
648 634 rdesc->dataAddr = (uint64_t)CMD_CTX_SGLIST_PA(cmd->ctx);
649 635 } else {
650 636 rdesc->dataAddr = cmd->cached_cookies[0].dmac_laddress;
651 637 }
652 638 }
653 639
654 640 static void
655 641 pvscsi_comp_cmd(pvscsi_cmd_t *cmd, uint8_t status)
656 642 {
657 643 struct scsi_pkt *pkt = CMD2PKT(cmd);
658 644
659 645 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
660 646 STATE_GOT_STATUS);
661 647 if ((cmd->flags & PVSCSI_FLAG_DMA_VALID) != 0)
662 648 pkt->pkt_state |= STATE_XFERRED_DATA;
663 649 pkt->pkt_reason = CMD_CMPLT;
664 650 pkt->pkt_resid = 0;
665 651 *(pkt->pkt_scbp) = status;
666 652 }
667 653
668 654 static void
669 655 pvscsi_set_status(pvscsi_cmd_t *cmd)
670 656 {
671 657 pvscsi_softc_t *pvs = cmd->cmd_pvs;
672 658 struct scsi_pkt *pkt = CMD2PKT(cmd);
673 659 uchar_t scsi_status = cmd->cmp_stat.scsi_status;
674 660 uint32_t host_status = cmd->cmp_stat.host_status;
675 661
676 662 if (scsi_status != STATUS_GOOD &&
677 663 (host_status == BTSTAT_SUCCESS ||
678 664 (host_status == BTSTAT_LINKED_COMMAND_COMPLETED) ||
679 665 (host_status == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG))) {
680 666 if (scsi_status == STATUS_CHECK) {
681 667 struct scsi_arq_status *astat = (void*)(pkt->pkt_scbp);
682 668 uint8_t *sensedata;
683 669 int arq_size;
684 670
685 671 *pkt->pkt_scbp = scsi_status;
686 672 pkt->pkt_state |= STATE_ARQ_DONE;
687 673
688 674 if ((cmd->flags & PVSCSI_FLAG_XARQ) != 0) {
689 675 arq_size = (cmd->cmd_rqslen >=
690 676 SENSE_BUFFER_SIZE) ? SENSE_BUFFER_SIZE :
691 677 cmd->cmd_rqslen;
692 678
693 679 astat->sts_rqpkt_resid = SENSE_BUFFER_SIZE -
694 680 arq_size;
695 681 sensedata = (uint8_t *)&astat->sts_sensedata;
696 682 bcopy(cmd->arqbuf->b_un.b_addr, sensedata,
697 683 arq_size);
698 684
699 685 pkt->pkt_state |= STATE_XARQ_DONE;
700 686 } else {
701 687 astat->sts_rqpkt_resid = 0;
702 688 }
703 689
704 690 astat->sts_rqpkt_statistics = 0;
705 691 astat->sts_rqpkt_reason = CMD_CMPLT;
706 692 (*(uint8_t *)&astat->sts_rqpkt_status) = STATUS_GOOD;
707 693 astat->sts_rqpkt_state = STATE_GOT_BUS |
708 694 STATE_GOT_TARGET | STATE_SENT_CMD |
709 695 STATE_XFERRED_DATA | STATE_GOT_STATUS;
710 696 }
711 697 pvscsi_comp_cmd(cmd, scsi_status);
712 698
713 699 return;
714 700 }
715 701
716 702 switch (host_status) {
717 703 case BTSTAT_SUCCESS:
718 704 case BTSTAT_LINKED_COMMAND_COMPLETED:
719 705 case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG:
720 706 pvscsi_comp_cmd(cmd, STATUS_GOOD);
721 707 break;
722 708 case BTSTAT_DATARUN:
723 709 pkt->pkt_reason = CMD_DATA_OVR;
724 710 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
725 711 STATE_SENT_CMD | STATE_GOT_STATUS |
726 712 STATE_XFERRED_DATA);
727 713 pkt->pkt_resid = 0;
728 714 break;
729 715 case BTSTAT_DATA_UNDERRUN:
730 716 pkt->pkt_reason = pkt->pkt_state |= (STATE_GOT_BUS |
731 717 STATE_GOT_TARGET | STATE_SENT_CMD | STATE_GOT_STATUS);
732 718 pkt->pkt_resid = cmd->dma_count - cmd->cmp_stat.data_len;
733 719 if (pkt->pkt_resid != cmd->dma_count)
734 720 pkt->pkt_state |= STATE_XFERRED_DATA;
735 721 break;
736 722 case BTSTAT_SELTIMEO:
737 723 pkt->pkt_reason = CMD_DEV_GONE;
738 724 pkt->pkt_state |= STATE_GOT_BUS;
739 725 break;
740 726 case BTSTAT_TAGREJECT:
741 727 pkt->pkt_reason = CMD_TAG_REJECT;
742 728 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
743 729 STATE_SENT_CMD | STATE_GOT_STATUS);
744 730 break;
745 731 case BTSTAT_BADMSG:
746 732 pkt->pkt_reason = CMD_BADMSG;
747 733 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
748 734 STATE_SENT_CMD | STATE_GOT_STATUS);
749 735 break;
750 736 case BTSTAT_SENTRST:
751 737 case BTSTAT_RECVRST:
752 738 case BTSTAT_BUSRESET:
753 739 pkt->pkt_reason = CMD_RESET;
754 740 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
755 741 STATE_SENT_CMD | STATE_GOT_STATUS);
756 742 break;
757 743 case BTSTAT_ABORTQUEUE:
758 744 pkt->pkt_reason = CMD_ABORTED;
759 745 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
760 746 STATE_SENT_CMD | STATE_GOT_STATUS);
761 747 break;
762 748 case BTSTAT_HAHARDWARE:
763 749 case BTSTAT_INVPHASE:
764 750 case BTSTAT_HATIMEOUT:
765 751 case BTSTAT_NORESPONSE:
766 752 case BTSTAT_DISCONNECT:
767 753 case BTSTAT_HASOFTWARE:
768 754 case BTSTAT_BUSFREE:
769 755 case BTSTAT_SENSFAILED:
770 756 pkt->pkt_reason = CMD_TRAN_ERR;
771 757 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
772 758 STATE_SENT_CMD | STATE_GOT_STATUS);
773 759 break;
774 760 default:
775 761 dev_err(pvs->dip, CE_WARN,
776 762 "!unknown host status code: %d", host_status);
777 763 pkt->pkt_reason = CMD_TRAN_ERR;
778 764 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
779 765 STATE_SENT_CMD | STATE_GOT_STATUS);
780 766 break;
781 767 }
782 768 }
783 769
784 770 static void
785 771 pvscsi_complete_chained(void *arg)
786 772 {
787 773 pvscsi_cmd_t *cmd = (pvscsi_cmd_t *)arg;
788 774 pvscsi_cmd_t *c;
789 775 struct scsi_pkt *pkt;
790 776
791 777 while (cmd != NULL) {
792 778 pvscsi_softc_t *pvs = cmd->cmd_pvs;
793 779
794 780 c = cmd->next_cmd;
795 781 cmd->next_cmd = NULL;
796 782
797 783 pkt = CMD2PKT(cmd);
798 784 if (pkt == NULL)
799 785 return;
800 786
801 787 if ((cmd->flags & PVSCSI_FLAG_IO_IOPB) != 0 &&
802 788 (cmd->flags & PVSCSI_FLAG_IO_READ) != 0) {
803 789 (void) ddi_dma_sync(cmd->cmd_dmahdl, 0, 0,
804 790 DDI_DMA_SYNC_FORCPU);
805 791 }
806 792
807 793 mutex_enter(&pvs->mutex);
808 794 /* Release I/O ctx */
809 795 if (cmd->ctx != NULL)
810 796 pvscsi_release_ctx(cmd);
811 797 /* Remove command from queue */
812 798 pvscsi_remove_from_queue(cmd);
813 799 mutex_exit(&pvs->mutex);
814 800
815 801 if ((cmd->flags & PVSCSI_FLAG_HW_STATUS) != 0) {
816 802 pvscsi_set_status(cmd);
817 803 } else {
818 804 ASSERT((cmd->flags & PVSCSI_FLAGS_NON_HW_COMPLETION) !=
819 805 0);
820 806
821 807 if ((cmd->flags & PVSCSI_FLAG_TIMED_OUT) != 0) {
822 808 cmd->pkt->pkt_reason = CMD_TIMEOUT;
823 809 cmd->pkt->pkt_statistics |=
824 810 (STAT_TIMEOUT | STAT_ABORTED);
825 811 } else if ((cmd->flags & PVSCSI_FLAG_ABORTED) != 0) {
826 812 cmd->pkt->pkt_reason = CMD_ABORTED;
827 813 cmd->pkt->pkt_statistics |=
828 814 (STAT_TIMEOUT | STAT_ABORTED);
829 815 } else if ((cmd->flags & PVSCSI_FLAGS_RESET) != 0) {
830 816 cmd->pkt->pkt_reason = CMD_RESET;
831 817 if ((cmd->flags & PVSCSI_FLAG_RESET_BUS) != 0) {
832 818 cmd->pkt->pkt_statistics |=
833 819 STAT_BUS_RESET;
834 820 } else {
835 821 cmd->pkt->pkt_statistics |=
836 822 STAT_DEV_RESET;
837 823 }
838 824 }
839 825 }
840 826
841 827 cmd->flags |= PVSCSI_FLAG_DONE;
842 828 cmd->flags &= ~PVSCSI_FLAG_TRANSPORT;
843 829
844 830 if ((pkt->pkt_flags & FLAG_NOINTR) == 0 &&
845 831 pkt->pkt_comp != NULL)
846 832 (*pkt->pkt_comp)(pkt);
847 833
848 834 cmd = c;
849 835 }
850 836 }
851 837
852 838 static void
853 839 pvscsi_dev_reset(pvscsi_softc_t *pvs, int target)
854 840 {
855 841 struct PVSCSICmdDescResetDevice cmd = { 0 };
856 842
857 843 cmd.target = target;
858 844 pvscsi_write_cmd_desc(pvs, PVSCSI_CMD_RESET_DEVICE, &cmd, sizeof (cmd));
859 845 }
860 846
861 847 static int
862 848 pvscsi_poll_cmd(pvscsi_softc_t *pvs, pvscsi_cmd_t *cmd)
863 849 {
864 850 boolean_t seen_intr;
865 851 int cycles = (cmd->pkt->pkt_time * 1000000) / USECS_TO_WAIT;
866 852 int i;
867 853 pvscsi_cmd_t *dcmd;
868 854 struct scsi_pkt *pkt = CMD2PKT(cmd);
869 855
870 856 /*
871 857 * Make sure we're not missing any commands completed
872 858 * concurrently before we have actually disabled interrupts.
873 859 */
874 860 mutex_enter(&pvs->rx_mutex);
875 861 dcmd = pvscsi_process_comp_ring(pvs);
876 862 mutex_exit(&pvs->rx_mutex);
877 863
878 864 pvscsi_complete_chained(dcmd);
879 865
880 866 while ((cmd->flags & PVSCSI_FLAG_DONE) == 0) {
881 867 seen_intr = B_FALSE;
882 868
883 869 /* Disable interrupts from H/W */
884 870 pvscsi_mask_intr(pvs);
885 871
886 872 /* Wait for interrupt to arrive */
887 873 for (i = 0; i < cycles; i++) {
888 874 uint32_t status;
889 875
890 876 mutex_enter(&pvs->rx_mutex);
891 877 mutex_enter(&pvs->intr_mutex);
892 878 status = pvscsi_read_intr_status(pvs);
893 879 if ((status & PVSCSI_INTR_ALL_SUPPORTED) != 0) {
894 880 /* Check completion ring */
895 881 mutex_exit(&pvs->intr_mutex);
896 882 dcmd = pvscsi_process_comp_ring(pvs);
897 883 mutex_exit(&pvs->rx_mutex);
898 884 seen_intr = B_TRUE;
899 885 break;
900 886 } else {
901 887 mutex_exit(&pvs->intr_mutex);
902 888 mutex_exit(&pvs->rx_mutex);
903 889 drv_usecwait(USECS_TO_WAIT);
904 890 }
905 891 }
906 892
907 893 /* Enable interrupts from H/W */
908 894 pvscsi_unmask_intr(pvs);
909 895
910 896 if (!seen_intr) {
911 897 /* No interrupts seen from device during the timeout */
912 898 mutex_enter(&pvs->tx_mutex);
913 899 mutex_enter(&pvs->rx_mutex);
914 900 if ((cmd->flags & PVSCSI_FLAGS_COMPLETION) != 0) {
915 901 /* Command was cancelled asynchronously */
916 902 dcmd = NULL;
917 903 } else if ((pvscsi_abort_cmd(cmd,
918 904 &dcmd)) == CMD_ABORTED) {
919 905 /* Command was cancelled in hardware */
920 906 pkt->pkt_state |= (STAT_TIMEOUT | STAT_ABORTED);
921 907 pkt->pkt_statistics |= (STAT_TIMEOUT |
922 908 STAT_ABORTED);
923 909 pkt->pkt_reason = CMD_TIMEOUT;
924 910 }
925 911 mutex_exit(&pvs->rx_mutex);
926 912 mutex_exit(&pvs->tx_mutex);
927 913
928 914 /*
929 915 * Complete commands that might be on completion list.
930 916 * Target command can also be on the list in case it was
931 917 * completed before it could be actually cancelled.
932 918 */
933 919 break;
934 920 }
935 921
936 922 pvscsi_complete_chained(dcmd);
937 923
938 924 if (!seen_intr)
939 925 break;
940 926 }
941 927
942 928 return (TRAN_ACCEPT);
943 929 }
944 930
945 931 static void
946 932 pvscsi_abort_all(struct scsi_address *ap, pvscsi_softc_t *pvs,
947 933 pvscsi_cmd_t **pending, int marker_flag)
948 934 {
949 935 int qlen = pvs->cmd_queue_len;
950 936 pvscsi_cmd_t *cmd, *pcmd, *phead = NULL;
951 937
952 938 ASSERT(mutex_owned(&pvs->rx_mutex));
953 939 ASSERT(mutex_owned(&pvs->tx_mutex));
954 940
955 941 /*
956 942 * Try to abort all queued commands, merging commands waiting
957 943 * for completion into a single list to complete them at one
958 944 * time when mutex is released.
959 945 */
960 946 while (qlen > 0) {
961 947 mutex_enter(&pvs->mutex);
962 948 cmd = list_remove_head(&pvs->cmd_queue);
963 949 ASSERT(cmd != NULL);
964 950
965 951 qlen--;
966 952
967 953 if (ap == NULL || ap->a_target == cmd->cmd_target) {
968 954 int c = --pvs->cmd_queue_len;
969 955
970 956 mutex_exit(&pvs->mutex);
971 957
972 958 if (pvscsi_abort_cmd(cmd, &pcmd) == CMD_ABORTED) {
973 959 /*
974 960 * Assume command is completely cancelled now,
975 961 * so mark it as requested.
976 962 */
977 963 cmd->flags |= marker_flag;
978 964 }
979 965
980 966 qlen -= (c - pvs->cmd_queue_len);
981 967
982 968 /*
983 969 * Now merge current pending commands with
984 970 * previous ones.
985 971 */
986 972 if (phead == NULL) {
987 973 phead = pcmd;
988 974 } else if (pcmd != NULL) {
989 975 phead->tail_cmd->next_cmd = pcmd;
990 976 phead->tail_cmd = pcmd->tail_cmd;
991 977 }
992 978 } else {
993 979 list_insert_tail(&pvs->cmd_queue, cmd);
994 980 mutex_exit(&pvs->mutex);
995 981 }
996 982 }
997 983
998 984 *pending = phead;
999 985 }
1000 986
1001 987 static void
1002 988 pvscsi_quiesce_notify(pvscsi_softc_t *pvs)
1003 989 {
1004 990 mutex_enter(&pvs->mutex);
1005 991 if (pvs->cmd_queue_len == 0 &&
1006 992 (pvs->flags & PVSCSI_HBA_QUIESCE_PENDING) != 0) {
1007 993 pvs->flags &= ~PVSCSI_HBA_QUIESCE_PENDING;
1008 994 cv_broadcast(&pvs->quiescevar);
1009 995 }
1010 996 mutex_exit(&pvs->mutex);
1011 997 }
1012 998
1013 999 static int
1014 1000 pvscsi_transport_command(pvscsi_softc_t *pvs, pvscsi_cmd_t *cmd)
1015 1001 {
1016 1002 struct PVSCSIRingReqDesc *rdesc;
1017 1003 struct PVSCSIRingsState *sdesc = RINGS_STATE(pvs);
1018 1004 struct scsi_pkt *pkt = CMD2PKT(cmd);
1019 1005 uint32_t req_ne = sdesc->reqNumEntriesLog2;
1020 1006
1021 1007 mutex_enter(&pvs->tx_mutex);
1022 1008 mutex_enter(&pvs->mutex);
1023 1009 if (!pvscsi_acquire_ctx(pvs, cmd)) {
1024 1010 mutex_exit(&pvs->mutex);
1025 1011 mutex_exit(&pvs->tx_mutex);
1026 1012 dev_err(pvs->dip, CE_WARN, "!no free ctx available");
1027 1013 return (TRAN_BUSY);
1028 1014 }
1029 1015
1030 1016 if ((sdesc->reqProdIdx - sdesc->cmpConsIdx) >= (1 << req_ne)) {
1031 1017 pvscsi_release_ctx(cmd);
1032 1018 mutex_exit(&pvs->mutex);
1033 1019 mutex_exit(&pvs->tx_mutex);
1034 1020 dev_err(pvs->dip, CE_WARN, "!no free I/O slots available");
1035 1021 return (TRAN_BUSY);
1036 1022 }
1037 1023 mutex_exit(&pvs->mutex);
1038 1024
1039 1025 cmd->flags |= PVSCSI_FLAG_TRANSPORT;
1040 1026
1041 1027 rdesc = REQ_RING(pvs) + (sdesc->reqProdIdx & MASK(req_ne));
1042 1028
1043 1029 bzero(&rdesc->lun, sizeof (rdesc->lun));
1044 1030
1045 1031 rdesc->bus = 0;
1046 1032 rdesc->target = cmd->cmd_target;
1047 1033
1048 1034 if ((cmd->flags & PVSCSI_FLAG_XARQ) != 0) {
1049 1035 bzero((void*)cmd->arqbuf->b_un.b_addr, SENSE_BUFFER_SIZE);
1050 1036 rdesc->senseLen = SENSE_BUFFER_SIZE;
1051 1037 rdesc->senseAddr = cmd->arqc.dmac_laddress;
1052 1038 } else {
1053 1039 rdesc->senseLen = 0;
1054 1040 rdesc->senseAddr = 0;
1055 1041 }
1056 1042
1057 1043 rdesc->vcpuHint = CPU->cpu_id;
1058 1044 rdesc->cdbLen = cmd->cmdlen;
1059 1045 bcopy(cmd->cmd_cdb, rdesc->cdb, cmd->cmdlen);
1060 1046
1061 1047 /* Setup tag info */
1062 1048 if ((cmd->flags & PVSCSI_FLAG_TAG) != 0)
1063 1049 rdesc->tag = cmd->tag;
1064 1050 else
1065 1051 rdesc->tag = MSG_SIMPLE_QTAG;
1066 1052
1067 1053 /* Setup I/O direction and map data buffers */
1068 1054 if ((cmd->flags & PVSCSI_FLAG_DMA_VALID) != 0) {
1069 1055 if ((cmd->flags & PVSCSI_FLAG_IO_READ) != 0)
1070 1056 rdesc->flags = PVSCSI_FLAG_CMD_DIR_TOHOST;
1071 1057 else
1072 1058 rdesc->flags = PVSCSI_FLAG_CMD_DIR_TODEVICE;
1073 1059 pvscsi_map_buffers(cmd, rdesc);
1074 1060 } else {
1075 1061 rdesc->flags = 0;
1076 1062 }
1077 1063
1078 1064 rdesc->context = pvscsi_map_ctx(pvs, cmd->ctx);
1079 1065 membar_producer();
1080 1066
1081 1067 sdesc->reqProdIdx++;
1082 1068 membar_producer();
1083 1069
1084 1070 mutex_enter(&pvs->mutex);
1085 1071 cmd->timeout_lbolt = ddi_get_lbolt() + SEC_TO_TICK(pkt->pkt_time);
1086 1072 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD);
1087 1073 pvscsi_add_to_queue(cmd);
1088 1074
1089 1075 switch (cmd->pkt->pkt_cdbp[0]) {
1090 1076 case SCMD_READ:
1091 1077 case SCMD_WRITE:
1092 1078 case SCMD_READ_G1:
1093 1079 case SCMD_WRITE_G1:
1094 1080 case SCMD_READ_G4:
1095 1081 case SCMD_WRITE_G4:
1096 1082 case SCMD_READ_G5:
1097 1083 case SCMD_WRITE_G5:
1098 1084 ASSERT((cmd->flags & PVSCSI_FLAG_DMA_VALID) != 0);
1099 1085 pvscsi_submit_rw_io(pvs);
1100 1086 break;
1101 1087 default:
1102 1088 pvscsi_submit_nonrw_io(pvs);
1103 1089 break;
1104 1090 }
1105 1091 mutex_exit(&pvs->mutex);
1106 1092 mutex_exit(&pvs->tx_mutex);
1107 1093
1108 1094 return (TRAN_ACCEPT);
1109 1095 }
1110 1096
1111 1097 static int
1112 1098 pvscsi_reset_generic(pvscsi_softc_t *pvs, struct scsi_address *ap)
1113 1099 {
1114 1100 boolean_t bus_reset = (ap == NULL);
1115 1101 int flags;
1116 1102 pvscsi_cmd_t *done, *aborted;
1117 1103
1118 1104 flags = bus_reset ? PVSCSI_FLAG_RESET_BUS : PVSCSI_FLAG_RESET_DEV;
1119 1105
1120 1106 mutex_enter(&pvs->tx_mutex);
1121 1107 mutex_enter(&pvs->rx_mutex);
1122 1108 /* Try to process pending requests */
1123 1109 done = pvscsi_process_comp_ring(pvs);
1124 1110
1125 1111 /* Abort all pending requests */
1126 1112 pvscsi_abort_all(ap, pvs, &aborted, flags);
1127 1113
1128 1114 /* Reset at hardware level */
1129 1115 if (bus_reset) {
1130 1116 pvscsi_reset_bus(pvs);
1131 1117 /* Should never happen after bus reset */
1132 1118 ASSERT(pvscsi_process_comp_ring(pvs) == NULL);
1133 1119 } else {
1134 1120 pvscsi_dev_reset(pvs, ap->a_target);
1135 1121 }
1136 1122 mutex_exit(&pvs->rx_mutex);
1137 1123 mutex_exit(&pvs->tx_mutex);
1138 1124
1139 1125 pvscsi_complete_chained(done);
1140 1126 pvscsi_complete_chained(aborted);
1141 1127
1142 1128 return (1);
1143 1129 }
1144 1130
1145 1131 static void
1146 1132 pvscsi_cmd_ext_free(pvscsi_cmd_t *cmd)
1147 1133 {
1148 1134 struct scsi_pkt *pkt = CMD2PKT(cmd);
1149 1135
1150 1136 if ((cmd->flags & PVSCSI_FLAG_CDB_EXT) != 0) {
1151 1137 kmem_free(pkt->pkt_cdbp, cmd->cmdlen);
1152 1138 cmd->flags &= ~PVSCSI_FLAG_CDB_EXT;
1153 1139 }
1154 1140 if ((cmd->flags & PVSCSI_FLAG_SCB_EXT) != 0) {
1155 1141 kmem_free(pkt->pkt_scbp, cmd->statuslen);
1156 1142 cmd->flags &= ~PVSCSI_FLAG_SCB_EXT;
1157 1143 }
1158 1144 if ((cmd->flags & PVSCSI_FLAG_PRIV_EXT) != 0) {
1159 1145 kmem_free(pkt->pkt_private, cmd->tgtlen);
1160 1146 cmd->flags &= ~PVSCSI_FLAG_PRIV_EXT;
1161 1147 }
1162 1148 }
1163 1149
1164 1150 /* ARGSUSED pvs */
1165 1151 static int
1166 1152 pvscsi_cmd_ext_alloc(pvscsi_softc_t *pvs, pvscsi_cmd_t *cmd, int kf)
1167 1153 {
1168 1154 struct scsi_pkt *pkt = CMD2PKT(cmd);
1169 1155 void *buf;
1170 1156
1171 1157 if (cmd->cmdlen > sizeof (cmd->cmd_cdb)) {
1172 1158 if ((buf = kmem_zalloc(cmd->cmdlen, kf)) == NULL)
1173 1159 return (DDI_FAILURE);
1174 1160 pkt->pkt_cdbp = buf;
1175 1161 cmd->flags |= PVSCSI_FLAG_CDB_EXT;
1176 1162 }
1177 1163
1178 1164 if (cmd->statuslen > sizeof (cmd->cmd_scb)) {
1179 1165 if ((buf = kmem_zalloc(cmd->statuslen, kf)) == NULL)
1180 1166 goto out;
1181 1167 pkt->pkt_scbp = buf;
1182 1168 cmd->flags |= PVSCSI_FLAG_SCB_EXT;
1183 1169 cmd->cmd_rqslen = (cmd->statuslen - sizeof (cmd->cmd_scb));
1184 1170 }
1185 1171
1186 1172 if (cmd->tgtlen > sizeof (cmd->tgt_priv)) {
1187 1173 if ((buf = kmem_zalloc(cmd->tgtlen, kf)) == NULL)
1188 1174 goto out;
1189 1175 pkt->pkt_private = buf;
1190 1176 cmd->flags |= PVSCSI_FLAG_PRIV_EXT;
1191 1177 }
1192 1178
1193 1179 return (DDI_SUCCESS);
1194 1180
1195 1181 out:
1196 1182 pvscsi_cmd_ext_free(cmd);
1197 1183
1198 1184 return (DDI_FAILURE);
1199 1185 }
1200 1186
1201 1187 static int
1202 1188 pvscsi_setup_dma_buffer(pvscsi_softc_t *pvs, size_t length,
1203 1189 pvscsi_dma_buf_t *buf)
1204 1190 {
1205 1191 ddi_dma_cookie_t cookie;
1206 1192 uint_t ccount;
1207 1193
1208 1194 if ((ddi_dma_alloc_handle(pvs->dip, &pvscsi_ring_dma_attr,
1209 1195 DDI_DMA_SLEEP, NULL, &buf->dma_handle)) != DDI_SUCCESS) {
1210 1196 dev_err(pvs->dip, CE_WARN, "!failed to allocate DMA handle");
1211 1197 return (DDI_FAILURE);
1212 1198 }
1213 1199
1214 1200 if ((ddi_dma_mem_alloc(buf->dma_handle, length, &pvscsi_dma_attrs,
1215 1201 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &buf->addr,
1216 1202 &buf->real_length, &buf->acc_handle)) != DDI_SUCCESS) {
1217 1203 dev_err(pvs->dip, CE_WARN,
1218 1204 "!failed to allocate %ld bytes for DMA buffer", length);
1219 1205 ddi_dma_free_handle(&buf->dma_handle);
1220 1206 return (DDI_FAILURE);
1221 1207 }
1222 1208
1223 1209 if ((ddi_dma_addr_bind_handle(buf->dma_handle, NULL, buf->addr,
1224 1210 buf->real_length, DDI_DMA_CONSISTENT | DDI_DMA_RDWR, DDI_DMA_SLEEP,
1225 1211 NULL, &cookie, &ccount)) != DDI_SUCCESS) {
1226 1212 dev_err(pvs->dip, CE_WARN, "!failed to bind DMA buffer");
1227 1213 ddi_dma_free_handle(&buf->dma_handle);
1228 1214 ddi_dma_mem_free(&buf->acc_handle);
1229 1215 return (DDI_FAILURE);
1230 1216 }
1231 1217
1232 1218 /* TODO Support multipart SG regions */
1233 1219 ASSERT(ccount == 1);
1234 1220
1235 1221 buf->pa = cookie.dmac_laddress;
1236 1222
1237 1223 return (DDI_SUCCESS);
1238 1224 }
1239 1225
1240 1226 static void
1241 1227 pvscsi_free_dma_buffer(pvscsi_dma_buf_t *buf)
1242 1228 {
1243 1229 ddi_dma_free_handle(&buf->dma_handle);
1244 1230 ddi_dma_mem_free(&buf->acc_handle);
1245 1231 }
1246 1232
1247 1233 static int
1248 1234 pvscsi_setup_sg(pvscsi_softc_t *pvs)
1249 1235 {
1250 1236 int i;
1251 1237 pvscsi_cmd_ctx_t *ctx;
1252 1238 size_t size = pvs->req_depth * sizeof (pvscsi_cmd_ctx_t);
1253 1239
1254 1240 ctx = pvs->cmd_ctx = kmem_zalloc(size, KM_SLEEP);
1255 1241
1256 1242 for (i = 0; i < pvs->req_depth; ++i, ++ctx) {
1257 1243 list_insert_tail(&pvs->cmd_ctx_pool, ctx);
1258 1244 if (pvscsi_setup_dma_buffer(pvs, PAGE_SIZE,
1259 1245 &ctx->dma_buf) != DDI_SUCCESS)
1260 1246 goto cleanup;
1261 1247 }
1262 1248
1263 1249 return (DDI_SUCCESS);
1264 1250
1265 1251 cleanup:
1266 1252 for (; i >= 0; --i, --ctx) {
1267 1253 list_remove(&pvs->cmd_ctx_pool, ctx);
1268 1254 pvscsi_free_dma_buffer(&ctx->dma_buf);
1269 1255 }
1270 1256 kmem_free(pvs->cmd_ctx, size);
1271 1257
1272 1258 return (DDI_FAILURE);
1273 1259 }
1274 1260
1275 1261 static void
1276 1262 pvscsi_free_sg(pvscsi_softc_t *pvs)
1277 1263 {
1278 1264 int i;
1279 1265 pvscsi_cmd_ctx_t *ctx = pvs->cmd_ctx;
1280 1266
1281 1267 for (i = 0; i < pvs->req_depth; ++i, ++ctx) {
1282 1268 list_remove(&pvs->cmd_ctx_pool, ctx);
1283 1269 pvscsi_free_dma_buffer(&ctx->dma_buf);
1284 1270 }
1285 1271
1286 1272 kmem_free(pvs->cmd_ctx, pvs->req_pages << PAGE_SHIFT);
1287 1273 }
1288 1274
1289 1275 static int
1290 1276 pvscsi_allocate_rings(pvscsi_softc_t *pvs)
1291 1277 {
1292 1278 /* Allocate DMA buffer for rings state */
1293 1279 if (pvscsi_setup_dma_buffer(pvs, PAGE_SIZE,
1294 1280 &pvs->rings_state_buf) != DDI_SUCCESS)
1295 1281 return (DDI_FAILURE);
1296 1282
1297 1283 /* Allocate DMA buffer for request ring */
1298 1284 pvs->req_pages = MIN(pvscsi_ring_pages, PVSCSI_MAX_NUM_PAGES_REQ_RING);
1299 1285 pvs->req_depth = pvs->req_pages * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
1300 1286 if (pvscsi_setup_dma_buffer(pvs, pvs->req_pages * PAGE_SIZE,
1301 1287 &pvs->req_ring_buf) != DDI_SUCCESS)
1302 1288 goto free_rings_state;
1303 1289
1304 1290 /* Allocate completion ring */
1305 1291 pvs->cmp_pages = MIN(pvscsi_ring_pages, PVSCSI_MAX_NUM_PAGES_CMP_RING);
1306 1292 if (pvscsi_setup_dma_buffer(pvs, pvs->cmp_pages * PAGE_SIZE,
1307 1293 &pvs->cmp_ring_buf) != DDI_SUCCESS)
1308 1294 goto free_req_buf;
1309 1295
1310 1296 /* Allocate message ring */
1311 1297 pvs->msg_pages = MIN(pvscsi_msg_ring_pages,
1312 1298 PVSCSI_MAX_NUM_PAGES_MSG_RING);
1313 1299 if (pvscsi_setup_dma_buffer(pvs, pvs->msg_pages * PAGE_SIZE,
1314 1300 &pvs->msg_ring_buf) != DDI_SUCCESS)
1315 1301 goto free_cmp_buf;
1316 1302
1317 1303 return (DDI_SUCCESS);
1318 1304
1319 1305 free_cmp_buf:
1320 1306 pvscsi_free_dma_buffer(&pvs->cmp_ring_buf);
1321 1307 free_req_buf:
1322 1308 pvscsi_free_dma_buffer(&pvs->req_ring_buf);
1323 1309 free_rings_state:
1324 1310 pvscsi_free_dma_buffer(&pvs->rings_state_buf);
1325 1311
1326 1312 return (DDI_FAILURE);
1327 1313 }
1328 1314
1329 1315 static void
1330 1316 pvscsi_free_rings(pvscsi_softc_t *pvs)
1331 1317 {
1332 1318 pvscsi_free_dma_buffer(&pvs->msg_ring_buf);
1333 1319 pvscsi_free_dma_buffer(&pvs->cmp_ring_buf);
1334 1320 pvscsi_free_dma_buffer(&pvs->req_ring_buf);
1335 1321 pvscsi_free_dma_buffer(&pvs->rings_state_buf);
1336 1322 }
1337 1323
1338 1324 static void
1339 1325 pvscsi_setup_rings(pvscsi_softc_t *pvs)
1340 1326 {
1341 1327 int i;
1342 1328 struct PVSCSICmdDescSetupMsgRing cmd_msg = { 0 };
1343 1329 struct PVSCSICmdDescSetupRings cmd = { 0 };
1344 1330 uint64_t base;
1345 1331
1346 1332 cmd.ringsStatePPN = pvs->rings_state_buf.pa >> PAGE_SHIFT;
1347 1333 cmd.reqRingNumPages = pvs->req_pages;
1348 1334 cmd.cmpRingNumPages = pvs->cmp_pages;
1349 1335
1350 1336 /* Setup request ring */
1351 1337 base = pvs->req_ring_buf.pa;
1352 1338 for (i = 0; i < pvs->req_pages; i++) {
1353 1339 cmd.reqRingPPNs[i] = base >> PAGE_SHIFT;
1354 1340 base += PAGE_SIZE;
1355 1341 }
1356 1342
1357 1343 /* Setup completion ring */
1358 1344 base = pvs->cmp_ring_buf.pa;
1359 1345 for (i = 0; i < pvs->cmp_pages; i++) {
1360 1346 cmd.cmpRingPPNs[i] = base >> PAGE_SHIFT;
1361 1347 base += PAGE_SIZE;
1362 1348 }
1363 1349
1364 1350 bzero(RINGS_STATE(pvs), PAGE_SIZE);
1365 1351 bzero(REQ_RING(pvs), pvs->req_pages * PAGE_SIZE);
1366 1352 bzero(CMP_RING(pvs), pvs->cmp_pages * PAGE_SIZE);
1367 1353
1368 1354 /* Issue SETUP command */
1369 1355 pvscsi_write_cmd_desc(pvs, PVSCSI_CMD_SETUP_RINGS, &cmd, sizeof (cmd));
1370 1356
1371 1357 /* Setup message ring */
1372 1358 cmd_msg.numPages = pvs->msg_pages;
1373 1359 base = pvs->msg_ring_buf.pa;
1374 1360
1375 1361 for (i = 0; i < pvs->msg_pages; i++) {
1376 1362 cmd_msg.ringPPNs[i] = base >> PAGE_SHIFT;
1377 1363 base += PAGE_SIZE;
1378 1364 }
1379 1365 bzero(MSG_RING(pvs), pvs->msg_pages * PAGE_SIZE);
1380 1366
1381 1367 pvscsi_write_cmd_desc(pvs, PVSCSI_CMD_SETUP_MSG_RING, &cmd_msg,
1382 1368 sizeof (cmd_msg));
1383 1369 }
1384 1370
1385 1371 static int
1386 1372 pvscsi_setup_io(pvscsi_softc_t *pvs)
1387 1373 {
1388 1374 int offset, rcount, rn, type;
1389 1375 int ret = DDI_FAILURE;
1390 1376 off_t regsize;
1391 1377 pci_regspec_t *regs;
1392 1378 uint_t regs_length;
1393 1379
1394 1380 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, pvs->dip,
1395 1381 DDI_PROP_DONTPASS, "reg", (int **)®s,
1396 1382 ®s_length) != DDI_PROP_SUCCESS) {
1397 1383 dev_err(pvs->dip, CE_WARN, "!failed to lookup 'reg' property");
1398 1384 return (DDI_FAILURE);
1399 1385 }
1400 1386
1401 1387 rcount = regs_length * sizeof (int) / sizeof (pci_regspec_t);
1402 1388
1403 1389 for (offset = PCI_CONF_BASE0; offset <= PCI_CONF_BASE5; offset += 4) {
1404 1390 for (rn = 0; rn < rcount; ++rn) {
1405 1391 if (PCI_REG_REG_G(regs[rn].pci_phys_hi) == offset) {
1406 1392 type = regs[rn].pci_phys_hi & PCI_ADDR_MASK;
1407 1393 break;
1408 1394 }
1409 1395 }
1410 1396
1411 1397 if (rn >= rcount)
1412 1398 continue;
1413 1399
1414 1400 if (type != PCI_ADDR_IO) {
1415 1401 if (ddi_dev_regsize(pvs->dip, rn,
1416 1402 ®size) != DDI_SUCCESS) {
1417 1403 dev_err(pvs->dip, CE_WARN,
1418 1404 "!failed to get size of reg %d", rn);
1419 1405 goto out;
1420 1406 }
1421 1407 if (regsize == PVSCSI_MEM_SPACE_SIZE) {
1422 1408 if (ddi_regs_map_setup(pvs->dip, rn,
1423 1409 &pvs->mmio_base, 0, 0,
1424 1410 &pvscsi_mmio_attr,
1425 1411 &pvs->mmio_handle) != DDI_SUCCESS) {
1426 1412 dev_err(pvs->dip, CE_WARN,
1427 1413 "!failed to map MMIO BAR");
1428 1414 goto out;
1429 1415 }
1430 1416 ret = DDI_SUCCESS;
1431 1417 break;
1432 1418 }
1433 1419 }
1434 1420 }
1435 1421
1436 1422 out:
1437 1423 ddi_prop_free(regs);
1438 1424
1439 1425 return (ret);
1440 1426 }
1441 1427
1442 1428 static void
1443 1429 pvscsi_free_io(pvscsi_softc_t *pvs)
1444 1430 {
1445 1431 ddi_regs_map_free(&pvs->mmio_handle);
1446 1432 }
1447 1433
1448 1434 static int
1449 1435 pvscsi_enable_intrs(pvscsi_softc_t *pvs)
1450 1436 {
1451 1437 int i, rc, intr_caps;
1452 1438
1453 1439 if ((rc = ddi_intr_get_cap(pvs->intr_htable[0], &intr_caps)) !=
1454 1440 DDI_SUCCESS) {
1455 1441 dev_err(pvs->dip, CE_WARN, "!failed to get interrupt caps");
1456 1442 return (DDI_FAILURE);
1457 1443 }
1458 1444
1459 1445 if ((intr_caps & DDI_INTR_FLAG_BLOCK) != 0) {
1460 1446 if ((rc = ddi_intr_block_enable(pvs->intr_htable,
1461 1447 pvs->intr_cnt)) != DDI_SUCCESS) {
1462 1448 dev_err(pvs->dip, CE_WARN,
1463 1449 "!failed to enable interrupt block");
1464 1450 }
1465 1451 } else {
1466 1452 for (i = 0; i < pvs->intr_cnt; i++) {
1467 1453 if ((rc = ddi_intr_enable(pvs->intr_htable[i])) ==
1468 1454 DDI_SUCCESS)
1469 1455 continue;
1470 1456 dev_err(pvs->dip, CE_WARN,
1471 1457 "!failed to enable interrupt");
1472 1458 while (--i >= 0)
1473 1459 (void) ddi_intr_disable(pvs->intr_htable[i]);
1474 1460 break;
1475 1461 }
1476 1462 }
1477 1463
1478 1464 /* Unmask interrupts */
1479 1465 if (rc == DDI_SUCCESS) {
1480 1466 pvscsi_reg_write(pvs, PVSCSI_REG_OFFSET_INTR_MASK,
1481 1467 PVSCSI_INTR_CMPL_MASK | PVSCSI_INTR_MSG_MASK);
1482 1468 }
1483 1469
1484 1470 return (rc);
1485 1471 }
1486 1472
1487 1473 /* ARGSUSED arg2 */
1488 1474 static uint32_t
1489 1475 pvscsi_intr_handler(caddr_t arg1, caddr_t arg2)
1490 1476 {
1491 1477 boolean_t handled;
1492 1478 pvscsi_softc_t *pvs = (pvscsi_softc_t *)arg1;
1493 1479 uint32_t status;
1494 1480
1495 1481 mutex_enter(&pvs->intr_mutex);
1496 1482 if (pvs->num_pollers > 0) {
1497 1483 mutex_exit(&pvs->intr_mutex);
1498 1484 return (DDI_INTR_CLAIMED);
1499 1485 }
1500 1486
1501 1487 if (pvscsi_enable_msi) {
1502 1488 handled = B_TRUE;
1503 1489 } else {
1504 1490 status = pvscsi_read_intr_status(pvs);
1505 1491 handled = (status & PVSCSI_INTR_ALL_SUPPORTED) != 0;
1506 1492 if (handled)
1507 1493 pvscsi_write_intr_status(pvs, status);
1508 1494 }
1509 1495 mutex_exit(&pvs->intr_mutex);
1510 1496
1511 1497 if (handled) {
1512 1498 boolean_t qnotify;
1513 1499 pvscsi_cmd_t *pending;
1514 1500 pvscsi_msg_t *msg;
1515 1501
1516 1502 mutex_enter(&pvs->rx_mutex);
1517 1503 pending = pvscsi_process_comp_ring(pvs);
1518 1504 msg = pvscsi_process_msg_ring(pvs);
1519 1505 mutex_exit(&pvs->rx_mutex);
1520 1506
1521 1507 mutex_enter(&pvs->mutex);
1522 1508 qnotify = HBA_QUIESCE_PENDING(pvs);
1523 1509 mutex_exit(&pvs->mutex);
1524 1510
1525 1511 if (pending != NULL && ddi_taskq_dispatch(pvs->comp_tq,
1526 1512 pvscsi_complete_chained, pending,
1527 1513 DDI_NOSLEEP) == DDI_FAILURE)
1528 1514 pvscsi_complete_chained(pending);
1529 1515
1530 1516 if (msg != NULL && ddi_taskq_dispatch(pvs->msg_tq,
1531 1517 pvscsi_handle_msg, msg, DDI_NOSLEEP) == DDI_FAILURE) {
1532 1518 dev_err(pvs->dip, CE_WARN,
1533 1519 "!failed to process msg type %d for target %d",
1534 1520 msg->type, msg->target);
1535 1521 kmem_free(msg, sizeof (pvscsi_msg_t));
1536 1522 }
1537 1523
1538 1524 if (qnotify)
1539 1525 pvscsi_quiesce_notify(pvs);
1540 1526 }
1541 1527
1542 1528 return (handled ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
1543 1529 }
1544 1530
1545 1531 static int
1546 1532 pvscsi_register_isr(pvscsi_softc_t *pvs, int type)
1547 1533 {
1548 1534 int navail, nactual;
1549 1535 int i;
1550 1536
1551 1537 if (ddi_intr_get_navail(pvs->dip, type, &navail) != DDI_SUCCESS ||
1552 1538 navail == 0) {
1553 1539 dev_err(pvs->dip, CE_WARN,
1554 1540 "!failed to get number of available interrupts of type %d",
1555 1541 type);
1556 1542 return (DDI_FAILURE);
1557 1543 }
1558 1544 navail = MIN(navail, PVSCSI_MAX_INTRS);
1559 1545
1560 1546 pvs->intr_size = navail * sizeof (ddi_intr_handle_t);
1561 1547 if ((pvs->intr_htable = kmem_alloc(pvs->intr_size, KM_SLEEP)) == NULL) {
1562 1548 dev_err(pvs->dip, CE_WARN,
1563 1549 "!failed to allocate %d bytes for interrupt hashtable",
1564 1550 pvs->intr_size);
1565 1551 return (DDI_FAILURE);
1566 1552 }
1567 1553
1568 1554 if (ddi_intr_alloc(pvs->dip, pvs->intr_htable, type, 0, navail,
1569 1555 &nactual, DDI_INTR_ALLOC_NORMAL) != DDI_SUCCESS || nactual == 0) {
1570 1556 dev_err(pvs->dip, CE_WARN, "!failed to allocate %d interrupts",
1571 1557 navail);
1572 1558 goto free_htable;
1573 1559 }
1574 1560
1575 1561 pvs->intr_cnt = nactual;
1576 1562
1577 1563 if (ddi_intr_get_pri(pvs->intr_htable[0],
1578 1564 (uint_t *)&pvs->intr_pri) != DDI_SUCCESS) {
1579 1565 dev_err(pvs->dip, CE_WARN, "!failed to get interrupt priority");
1580 1566 goto free_intrs;
1581 1567 }
1582 1568
1583 1569 for (i = 0; i < nactual; i++) {
1584 1570 if (ddi_intr_add_handler(pvs->intr_htable[i],
1585 1571 pvscsi_intr_handler, (caddr_t)pvs, NULL) != DDI_SUCCESS) {
1586 1572 dev_err(pvs->dip, CE_WARN,
1587 1573 "!failed to add interrupt handler");
1588 1574 goto free_intrs;
1589 1575 }
1590 1576 }
1591 1577
1592 1578 return (DDI_SUCCESS);
1593 1579
1594 1580 free_intrs:
1595 1581 for (i = 0; i < nactual; i++)
1596 1582 (void) ddi_intr_free(pvs->intr_htable[i]);
1597 1583 free_htable:
1598 1584 kmem_free(pvs->intr_htable, pvs->intr_size);
1599 1585
1600 1586 return (DDI_FAILURE);
1601 1587 }
1602 1588
1603 1589 static void
1604 1590 pvscsi_free_intr_resources(pvscsi_softc_t *pvs)
1605 1591 {
1606 1592 int i;
1607 1593
1608 1594 for (i = 0; i < pvs->intr_cnt; i++) {
1609 1595 (void) ddi_intr_disable(pvs->intr_htable[i]);
1610 1596 (void) ddi_intr_remove_handler(pvs->intr_htable[i]);
1611 1597 (void) ddi_intr_free(pvs->intr_htable[i]);
1612 1598 }
1613 1599 kmem_free(pvs->intr_htable, pvs->intr_size);
1614 1600 }
1615 1601
1616 1602 static int
1617 1603 pvscsi_setup_isr(pvscsi_softc_t *pvs)
1618 1604 {
1619 1605 int intr_types;
1620 1606
1621 1607 if (ddi_intr_get_supported_types(pvs->dip,
1622 1608 &intr_types) != DDI_SUCCESS) {
1623 1609 dev_err(pvs->dip, CE_WARN,
1624 1610 "!failed to get supported interrupt types");
1625 1611 return (DDI_FAILURE);
1626 1612 }
1627 1613
1628 1614 if ((intr_types & DDI_INTR_TYPE_MSIX) != 0 && pvscsi_enable_msi) {
1629 1615 if (pvscsi_register_isr(pvs,
1630 1616 DDI_INTR_TYPE_MSIX) == DDI_SUCCESS) {
1631 1617 pvs->intr_type = DDI_INTR_TYPE_MSIX;
1632 1618 } else {
1633 1619 dev_err(pvs->dip, CE_WARN,
1634 1620 "!failed to install MSI-X interrupt handler");
1635 1621 }
1636 1622 } else if ((intr_types & DDI_INTR_TYPE_MSI) != 0 && pvscsi_enable_msi) {
1637 1623 if (pvscsi_register_isr(pvs,
1638 1624 DDI_INTR_TYPE_MSI) == DDI_SUCCESS) {
1639 1625 pvs->intr_type = DDI_INTR_TYPE_MSI;
1640 1626 } else {
1641 1627 dev_err(pvs->dip, CE_WARN,
1642 1628 "!failed to install MSI interrupt handler");
1643 1629 }
1644 1630 } else if ((intr_types & DDI_INTR_TYPE_FIXED) != 0) {
1645 1631 if (pvscsi_register_isr(pvs,
1646 1632 DDI_INTR_TYPE_FIXED) == DDI_SUCCESS) {
1647 1633 pvs->intr_type = DDI_INTR_TYPE_FIXED;
1648 1634 } else {
1649 1635 dev_err(pvs->dip, CE_WARN,
1650 1636 "!failed to install FIXED interrupt handler");
1651 1637 }
1652 1638 }
1653 1639
1654 1640 return (pvs->intr_type == 0 ? DDI_FAILURE : DDI_SUCCESS);
1655 1641 }
1656 1642
1657 1643 static void
1658 1644 pvscsi_wd_thread(pvscsi_softc_t *pvs)
1659 1645 {
1660 1646 clock_t now;
1661 1647 pvscsi_cmd_t *expired, *c, *cn, **pnext;
1662 1648
1663 1649 mutex_enter(&pvs->mutex);
1664 1650 for (;;) {
1665 1651 expired = NULL;
1666 1652 pnext = NULL;
1667 1653 now = ddi_get_lbolt();
1668 1654
1669 1655 for (c = list_head(&pvs->cmd_queue); c != NULL; ) {
1670 1656 cn = list_next(&pvs->cmd_queue, c);
1671 1657
1672 1658 /*
1673 1659 * Commands with 'FLAG_NOINTR' are watched using their
1674 1660 * own timeouts, so we should not touch them.
1675 1661 */
1676 1662 if ((c->pkt->pkt_flags & FLAG_NOINTR) == 0 &&
1677 1663 now > c->timeout_lbolt) {
1678 1664 dev_err(pvs->dip, CE_WARN,
1679 1665 "!expired command: %p (%ld > %ld)",
1680 1666 (void *)c, now, c->timeout_lbolt);
1681 1667 pvscsi_remove_from_queue(c);
1682 1668 if (expired == NULL)
1683 1669 expired = c;
1684 1670 if (pnext == NULL) {
1685 1671 pnext = &c->next_cmd;
1686 1672 } else {
1687 1673 *pnext = c;
1688 1674 pnext = &c->next_cmd;
1689 1675 }
1690 1676 }
1691 1677 c = cn;
1692 1678 }
1693 1679 mutex_exit(&pvs->mutex);
1694 1680
1695 1681 /* Now cancel all expired commands */
1696 1682 if (expired != NULL) {
1697 1683 struct scsi_address sa = {0};
1698 1684 /* Build a fake SCSI address */
1699 1685 sa.a_hba_tran = pvs->tran;
1700 1686 while (expired != NULL) {
1701 1687 c = expired->next_cmd;
1702 1688 sa.a_target = expired->cmd_target;
1703 1689 sa.a_lun = 0;
1704 1690 (void) pvscsi_abort(&sa, CMD2PKT(expired));
1705 1691 expired = c;
1706 1692 }
1707 1693 }
1708 1694
1709 1695 mutex_enter(&pvs->mutex);
1710 1696 if ((pvs->flags & PVSCSI_DRIVER_SHUTDOWN) != 0) {
1711 1697 /* Finish job */
1712 1698 break;
1713 1699 }
1714 1700 if (cv_reltimedwait(&pvs->wd_condvar, &pvs->mutex,
1715 1701 SEC_TO_TICK(1), TR_CLOCK_TICK) > 0) {
1716 1702 /* Explicitly woken up, finish job */
1717 1703 break;
1718 1704 }
1719 1705 }
1720 1706
1721 1707 /* Confirm thread termination */
1722 1708 cv_signal(&pvs->syncvar);
1723 1709 mutex_exit(&pvs->mutex);
1724 1710 }
1725 1711
1726 1712 static int
1727 1713 pvscsi_ccache_constructor(void *buf, void *cdrarg, int kmflags)
1728 1714 {
1729 1715 int (*callback)(caddr_t);
1730 1716 uint_t cookiec;
1731 1717 pvscsi_cmd_t *cmd = (pvscsi_cmd_t *)buf;
1732 1718 pvscsi_softc_t *pvs = cdrarg;
1733 1719 struct scsi_address ap;
1734 1720
1735 1721 callback = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
1736 1722 ap.a_hba_tran = pvs->tran;
1737 1723 ap.a_target = 0;
1738 1724 ap.a_lun = 0;
1739 1725
1740 1726 /* Allocate a DMA handle for data transfers */
1741 1727 if ((ddi_dma_alloc_handle(pvs->dip, &pvs->io_dma_attr, callback,
1742 1728 NULL, &cmd->cmd_dmahdl)) != DDI_SUCCESS) {
1743 1729 dev_err(pvs->dip, CE_WARN, "!failed to allocate DMA handle");
1744 1730 return (-1);
1745 1731 }
1746 1732
1747 1733 /* Setup ARQ buffer */
1748 1734 if ((cmd->arqbuf = scsi_alloc_consistent_buf(&ap, (struct buf *)NULL,
1749 1735 SENSE_BUFFER_SIZE, B_READ, callback, NULL)) == NULL) {
1750 1736 dev_err(pvs->dip, CE_WARN, "!failed to allocate ARQ buffer");
1751 1737 goto free_handle;
1752 1738 }
1753 1739
1754 1740 if (ddi_dma_alloc_handle(pvs->dip, &pvs->hba_dma_attr,
1755 1741 callback, NULL, &cmd->arqhdl) != DDI_SUCCESS) {
1756 1742 dev_err(pvs->dip, CE_WARN,
1757 1743 "!failed to allocate DMA handle for ARQ buffer");
1758 1744 goto free_arqbuf;
1759 1745 }
1760 1746
1761 1747 if (ddi_dma_buf_bind_handle(cmd->arqhdl, cmd->arqbuf,
1762 1748 (DDI_DMA_READ | DDI_DMA_CONSISTENT), callback, NULL,
1763 1749 &cmd->arqc, &cookiec) != DDI_SUCCESS) {
1764 1750 dev_err(pvs->dip, CE_WARN, "!failed to bind ARQ buffer");
1765 1751 goto free_arqhdl;
1766 1752 }
1767 1753
1768 1754 return (0);
1769 1755
1770 1756 free_arqhdl:
1771 1757 ddi_dma_free_handle(&cmd->arqhdl);
1772 1758 free_arqbuf:
1773 1759 scsi_free_consistent_buf(cmd->arqbuf);
1774 1760 free_handle:
1775 1761 ddi_dma_free_handle(&cmd->cmd_dmahdl);
1776 1762
1777 1763 return (-1);
1778 1764 }
1779 1765
1780 1766 /* ARGSUSED cdrarg */
1781 1767 static void
1782 1768 pvscsi_ccache_destructor(void *buf, void *cdrarg)
1783 1769 {
1784 1770 pvscsi_cmd_t *cmd = (pvscsi_cmd_t *)buf;
1785 1771
1786 1772 if (cmd->cmd_dmahdl != NULL) {
1787 1773 (void) ddi_dma_unbind_handle(cmd->cmd_dmahdl);
1788 1774 ddi_dma_free_handle(&cmd->cmd_dmahdl);
1789 1775 cmd->cmd_dmahdl = NULL;
1790 1776 }
1791 1777
1792 1778 if (cmd->arqhdl != NULL) {
1793 1779 (void) ddi_dma_unbind_handle(cmd->arqhdl);
1794 1780 ddi_dma_free_handle(&cmd->arqhdl);
1795 1781 cmd->arqhdl = NULL;
1796 1782 }
1797 1783
1798 1784 if (cmd->arqbuf != NULL) {
1799 1785 scsi_free_consistent_buf(cmd->arqbuf);
1800 1786 cmd->arqbuf = NULL;
1801 1787 }
1802 1788 }
1803 1789
1804 1790 /* tran_* entry points and setup */
1805 1791 /* ARGSUSED hba_dip tgt_dip hba_tran */
1806 1792 static int
1807 1793 pvscsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1808 1794 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1809 1795 {
1810 1796 pvscsi_softc_t *pvs = SDEV2PRIV(sd);
1811 1797
1812 1798 ASSERT(pvs != NULL);
1813 1799
1814 1800 if (sd->sd_address.a_target >= PVSCSI_MAXTGTS)
1815 1801 return (DDI_FAILURE);
1816 1802
1817 1803 return (DDI_SUCCESS);
1818 1804 }
1819 1805
1820 1806 static int
1821 1807 pvscsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1822 1808 {
1823 1809 boolean_t poll = ((pkt->pkt_flags & FLAG_NOINTR) != 0);
1824 1810 int rc;
1825 1811 pvscsi_cmd_t *cmd = PKT2CMD(pkt);
1826 1812 pvscsi_softc_t *pvs = ap->a_hba_tran->tran_hba_private;
1827 1813
1828 1814 ASSERT(cmd->pkt == pkt);
1829 1815 ASSERT(cmd->cmd_pvs == pvs);
1830 1816
1831 1817 /*
1832 1818 * Reinitialize some fields because the packet may
1833 1819 * have been resubmitted.
1834 1820 */
1835 1821 pkt->pkt_reason = CMD_CMPLT;
1836 1822 pkt->pkt_state = 0;
1837 1823 pkt->pkt_statistics = 0;
1838 1824
1839 1825 /* Zero status byte */
1840 1826 *(pkt->pkt_scbp) = 0;
1841 1827
1842 1828 if ((cmd->flags & PVSCSI_FLAG_DMA_VALID) != 0) {
1843 1829 ASSERT(cmd->cmd_dma_count != 0);
1844 1830 pkt->pkt_resid = cmd->cmd_dma_count;
1845 1831
1846 1832 /*
1847 1833 * Consistent packets need to be synced first
1848 1834 * (only for data going out).
1849 1835 */
1850 1836 if ((cmd->flags & PVSCSI_FLAG_IO_IOPB) != 0) {
1851 1837 (void) ddi_dma_sync(cmd->cmd_dmahdl, 0, 0,
1852 1838 DDI_DMA_SYNC_FORDEV);
1853 1839 }
1854 1840 }
1855 1841
1856 1842 cmd->cmd_target = ap->a_target;
1857 1843
1858 1844 mutex_enter(&pvs->mutex);
1859 1845 if (HBA_IS_QUIESCED(pvs) && !poll) {
1860 1846 mutex_exit(&pvs->mutex);
1861 1847 return (TRAN_BUSY);
1862 1848 }
1863 1849 mutex_exit(&pvs->mutex);
1864 1850
1865 1851 rc = pvscsi_transport_command(pvs, cmd);
1866 1852
1867 1853 if (poll) {
1868 1854 pvscsi_cmd_t *dcmd;
1869 1855 boolean_t qnotify;
1870 1856
1871 1857 if (rc == TRAN_ACCEPT)
1872 1858 rc = pvscsi_poll_cmd(pvs, cmd);
1873 1859
1874 1860 mutex_enter(&pvs->rx_mutex);
1875 1861 dcmd = pvscsi_process_comp_ring(pvs);
1876 1862 mutex_exit(&pvs->rx_mutex);
1877 1863
1878 1864 mutex_enter(&pvs->mutex);
1879 1865 qnotify = HBA_QUIESCE_PENDING(pvs);
1880 1866 mutex_exit(&pvs->mutex);
1881 1867
1882 1868 pvscsi_complete_chained(dcmd);
1883 1869
1884 1870 if (qnotify)
1885 1871 pvscsi_quiesce_notify(pvs);
1886 1872 }
1887 1873
1888 1874 return (rc);
1889 1875 }
1890 1876
1891 1877 static int
1892 1878 pvscsi_reset(struct scsi_address *ap, int level)
1893 1879 {
1894 1880 pvscsi_softc_t *pvs = AP2PRIV(ap);
1895 1881
1896 1882 switch (level) {
1897 1883 case RESET_ALL:
1898 1884 return (pvscsi_reset_generic(pvs, NULL));
1899 1885 case RESET_TARGET:
1900 1886 ASSERT(ap != NULL);
1901 1887 return (pvscsi_reset_generic(pvs, ap));
1902 1888 default:
1903 1889 return (0);
1904 1890 }
1905 1891 }
1906 1892
1907 1893 static int
1908 1894 pvscsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
1909 1895 {
1910 1896 boolean_t qnotify = B_FALSE;
1911 1897 pvscsi_cmd_t *pending;
1912 1898 pvscsi_softc_t *pvs = ap->a_hba_tran->tran_hba_private;
1913 1899
1914 1900 mutex_enter(&pvs->tx_mutex);
1915 1901 mutex_enter(&pvs->rx_mutex);
1916 1902 if (pkt != NULL) {
1917 1903 /* Abort single command */
1918 1904 pvscsi_cmd_t *cmd = PKT2CMD(pkt);
1919 1905
1920 1906 if (pvscsi_abort_cmd(cmd, &pending) == CMD_ABORTED) {
1921 1907 /* Assume command is completely cancelled now */
1922 1908 cmd->flags |= PVSCSI_FLAG_ABORTED;
1923 1909 }
1924 1910 } else {
1925 1911 /* Abort all commands on the bus */
1926 1912 pvscsi_abort_all(ap, pvs, &pending, PVSCSI_FLAG_ABORTED);
1927 1913 }
1928 1914 qnotify = HBA_QUIESCE_PENDING(pvs);
1929 1915 mutex_exit(&pvs->rx_mutex);
1930 1916 mutex_exit(&pvs->tx_mutex);
1931 1917
1932 1918 pvscsi_complete_chained(pending);
1933 1919
1934 1920 if (qnotify)
1935 1921 pvscsi_quiesce_notify(pvs);
1936 1922
1937 1923 return (1);
1938 1924 }
1939 1925
1940 1926 /* ARGSUSED tgtonly */
1941 1927 static int
1942 1928 pvscsi_getcap(struct scsi_address *ap, char *cap, int tgtonly)
1943 1929 {
1944 1930 pvscsi_softc_t *pvs = ap->a_hba_tran->tran_hba_private;
1945 1931
1946 1932 if (cap == NULL)
1947 1933 return (-1);
1948 1934
1949 1935 switch (scsi_hba_lookup_capstr(cap)) {
1950 1936 case SCSI_CAP_ARQ:
1951 1937 return ((pvs->flags & PVSCSI_HBA_AUTO_REQUEST_SENSE) != 0);
1952 1938 case SCSI_CAP_UNTAGGED_QING:
1953 1939 return (1);
1954 1940 default:
1955 1941 return (-1);
1956 1942 }
1957 1943 }
1958 1944
1959 1945 /* ARGSUSED tgtonly */
1960 1946 static int
1961 1947 pvscsi_setcap(struct scsi_address *ap, char *cap, int value, int tgtonly)
1962 1948 {
1963 1949 pvscsi_softc_t *pvs = ap->a_hba_tran->tran_hba_private;
1964 1950
1965 1951 if (cap == NULL)
1966 1952 return (-1);
1967 1953
1968 1954 switch (scsi_hba_lookup_capstr(cap)) {
1969 1955 case SCSI_CAP_ARQ:
1970 1956 mutex_enter(&pvs->mutex);
1971 1957 if (value == 0)
1972 1958 pvs->flags &= ~PVSCSI_HBA_AUTO_REQUEST_SENSE;
1973 1959 else
1974 1960 pvs->flags |= PVSCSI_HBA_AUTO_REQUEST_SENSE;
1975 1961 mutex_exit(&pvs->mutex);
1976 1962 return (1);
1977 1963 default:
1978 1964 return (0);
1979 1965 }
1980 1966 }
1981 1967
1982 1968 static void
1983 1969 pvscsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1984 1970 {
1985 1971 pvscsi_cmd_t *cmd = PKT2CMD(pkt);
1986 1972 pvscsi_softc_t *pvs = ap->a_hba_tran->tran_hba_private;
1987 1973
1988 1974 ASSERT(cmd->cmd_pvs == pvs);
1989 1975
1990 1976 if ((cmd->flags & PVSCSI_FLAG_DMA_VALID) != 0) {
1991 1977 cmd->flags &= ~PVSCSI_FLAG_DMA_VALID;
1992 1978 (void) ddi_dma_unbind_handle(cmd->cmd_dmahdl);
1993 1979 }
1994 1980
1995 1981 if (cmd->ctx != NULL) {
1996 1982 mutex_enter(&pvs->mutex);
1997 1983 pvscsi_release_ctx(cmd);
1998 1984 mutex_exit(&pvs->mutex);
1999 1985 }
2000 1986
2001 1987 if ((cmd->flags & PVSCSI_FLAGS_EXT) != 0)
2002 1988 pvscsi_cmd_ext_free(cmd);
2003 1989
2004 1990 kmem_cache_free(pvs->cmd_cache, cmd);
2005 1991 }
2006 1992
2007 1993 static struct scsi_pkt *
2008 1994 pvscsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt, struct buf *bp,
2009 1995 int cmdlen, int statuslen, int tgtlen, int flags, int (*callback)(),
2010 1996 caddr_t arg)
2011 1997 {
2012 1998 boolean_t is_new;
2013 1999 int kf = (callback == SLEEP_FUNC) ? KM_SLEEP: KM_NOSLEEP;
2014 2000 int rc, i;
2015 2001 pvscsi_cmd_t *cmd;
2016 2002 pvscsi_softc_t *pvs;
2017 2003
2018 2004 pvs = ap->a_hba_tran->tran_hba_private;
2019 2005 ASSERT(pvs != NULL);
2020 2006
2021 2007 /* Allocate a new SCSI packet */
2022 2008 if (pkt == NULL) {
2023 2009 ddi_dma_handle_t saved_dmahdl, saved_arqhdl;
2024 2010 struct buf *saved_arqbuf;
2025 2011 ddi_dma_cookie_t saved_arqc;
2026 2012
2027 2013 is_new = B_TRUE;
2028 2014
2029 2015 if ((cmd = kmem_cache_alloc(pvs->cmd_cache, kf)) == NULL)
2030 2016 return (NULL);
2031 2017
2032 2018 saved_dmahdl = cmd->cmd_dmahdl;
2033 2019 saved_arqhdl = cmd->arqhdl;
2034 2020 saved_arqbuf = cmd->arqbuf;
2035 2021 saved_arqc = cmd->arqc;
2036 2022
2037 2023 bzero(cmd, sizeof (pvscsi_cmd_t) -
2038 2024 sizeof (cmd->cached_cookies));
2039 2025
2040 2026 cmd->cmd_pvs = pvs;
2041 2027 cmd->cmd_dmahdl = saved_dmahdl;
2042 2028 cmd->arqhdl = saved_arqhdl;
2043 2029 cmd->arqbuf = saved_arqbuf;
2044 2030 cmd->arqc = saved_arqc;
2045 2031
2046 2032 pkt = &cmd->cached_pkt;
2047 2033 pkt->pkt_ha_private = (opaque_t)cmd;
2048 2034 pkt->pkt_address = *ap;
2049 2035 pkt->pkt_scbp = (uint8_t *)&cmd->cmd_scb;
2050 2036 pkt->pkt_cdbp = (uint8_t *)&cmd->cmd_cdb;
2051 2037 pkt->pkt_private = (opaque_t)&cmd->tgt_priv;
2052 2038
2053 2039 cmd->tgtlen = tgtlen;
2054 2040 cmd->statuslen = statuslen;
2055 2041 cmd->cmdlen = cmdlen;
2056 2042 cmd->pkt = pkt;
2057 2043 cmd->ctx = NULL;
2058 2044
2059 2045 /* Allocate extended buffers */
2060 2046 if ((cmdlen > sizeof (cmd->cmd_cdb)) ||
2061 2047 (statuslen > sizeof (cmd->cmd_scb)) ||
2062 2048 (tgtlen > sizeof (cmd->tgt_priv))) {
2063 2049 if (pvscsi_cmd_ext_alloc(pvs, cmd, kf) != DDI_SUCCESS) {
2064 2050 dev_err(pvs->dip, CE_WARN,
2065 2051 "!extent allocation failed");
2066 2052 goto out;
2067 2053 }
2068 2054 }
2069 2055 } else {
2070 2056 is_new = B_FALSE;
2071 2057
2072 2058 cmd = PKT2CMD(pkt);
2073 2059 cmd->flags &= PVSCSI_FLAGS_PERSISTENT;
2074 2060 }
2075 2061
2076 2062 ASSERT((cmd->flags & PVSCSI_FLAG_TRANSPORT) == 0);
2077 2063
2078 2064 if ((flags & PKT_XARQ) != 0)
2079 2065 cmd->flags |= PVSCSI_FLAG_XARQ;
2080 2066
2081 2067 /* Handle partial DMA transfers */
2082 2068 if (cmd->cmd_nwin > 0) {
2083 2069 if (++cmd->cmd_winindex >= cmd->cmd_nwin)
2084 2070 return (NULL);
2085 2071 if (ddi_dma_getwin(cmd->cmd_dmahdl, cmd->cmd_winindex,
2086 2072 &cmd->cmd_dma_offset, &cmd->cmd_dma_len,
2087 2073 &cmd->cmd_dmac, &cmd->cmd_dmaccount) == DDI_FAILURE)
2088 2074 return (NULL);
2089 2075 goto handle_dma_cookies;
2090 2076 }
2091 2077
2092 2078 /* Setup data buffer */
2093 2079 if (bp != NULL && bp->b_bcount > 0 &&
2094 2080 (cmd->flags & PVSCSI_FLAG_DMA_VALID) == 0) {
2095 2081 int dma_flags;
2096 2082
2097 2083 ASSERT(cmd->cmd_dmahdl != NULL);
2098 2084
2099 2085 if ((bp->b_flags & B_READ) != 0) {
2100 2086 cmd->flags |= PVSCSI_FLAG_IO_READ;
2101 2087 dma_flags = DDI_DMA_READ;
2102 2088 } else {
2103 2089 cmd->flags &= ~PVSCSI_FLAG_IO_READ;
2104 2090 dma_flags = DDI_DMA_WRITE;
2105 2091 }
2106 2092 if ((flags & PKT_CONSISTENT) != 0) {
2107 2093 cmd->flags |= PVSCSI_FLAG_IO_IOPB;
2108 2094 dma_flags |= DDI_DMA_CONSISTENT;
2109 2095 }
2110 2096 if ((flags & PKT_DMA_PARTIAL) != 0)
2111 2097 dma_flags |= DDI_DMA_PARTIAL;
2112 2098
2113 2099 rc = ddi_dma_buf_bind_handle(cmd->cmd_dmahdl, bp,
2114 2100 dma_flags, callback, arg, &cmd->cmd_dmac,
2115 2101 &cmd->cmd_dmaccount);
2116 2102 if (rc == DDI_DMA_PARTIAL_MAP) {
2117 2103 (void) ddi_dma_numwin(cmd->cmd_dmahdl,
2118 2104 &cmd->cmd_nwin);
2119 2105 cmd->cmd_winindex = 0;
2120 2106 (void) ddi_dma_getwin(cmd->cmd_dmahdl,
2121 2107 cmd->cmd_winindex, &cmd->cmd_dma_offset,
2122 2108 &cmd->cmd_dma_len, &cmd->cmd_dmac,
2123 2109 &cmd->cmd_dmaccount);
2124 2110 } else if (rc != 0 && rc != DDI_DMA_MAPPED) {
2125 2111 switch (rc) {
2126 2112 case DDI_DMA_NORESOURCES:
2127 2113 bioerror(bp, 0);
2128 2114 break;
2129 2115 case DDI_DMA_BADATTR:
2130 2116 case DDI_DMA_NOMAPPING:
2131 2117 bioerror(bp, EFAULT);
2132 2118 break;
2133 2119 case DDI_DMA_TOOBIG:
2134 2120 default:
2135 2121 bioerror(bp, EINVAL);
2136 2122 break;
2137 2123 }
2138 2124 cmd->flags &= ~PVSCSI_FLAG_DMA_VALID;
2139 2125 goto out;
2140 2126 }
2141 2127
2142 2128 handle_dma_cookies:
2143 2129 ASSERT(cmd->cmd_dmaccount > 0);
2144 2130 if (cmd->cmd_dmaccount > PVSCSI_MAX_SG_SIZE) {
2145 2131 dev_err(pvs->dip, CE_WARN,
2146 2132 "!invalid cookie count: %d (max %d)",
2147 2133 cmd->cmd_dmaccount, PVSCSI_MAX_SG_SIZE);
2148 2134 bioerror(bp, EINVAL);
2149 2135 goto out;
2150 2136 }
2151 2137
2152 2138 cmd->flags |= PVSCSI_FLAG_DMA_VALID;
2153 2139 cmd->cmd_dma_count = cmd->cmd_dmac.dmac_size;
2154 2140 cmd->cmd_total_dma_count += cmd->cmd_dmac.dmac_size;
2155 2141
2156 2142 cmd->cached_cookies[0] = cmd->cmd_dmac;
2157 2143
2158 2144 /*
2159 2145 * Calculate total amount of bytes for this I/O and
2160 2146 * store cookies for further processing.
2161 2147 */
2162 2148 for (i = 1; i < cmd->cmd_dmaccount; i++) {
2163 2149 ddi_dma_nextcookie(cmd->cmd_dmahdl, &cmd->cmd_dmac);
2164 2150 cmd->cached_cookies[i] = cmd->cmd_dmac;
2165 2151 cmd->cmd_dma_count += cmd->cmd_dmac.dmac_size;
2166 2152 cmd->cmd_total_dma_count += cmd->cmd_dmac.dmac_size;
2167 2153 }
2168 2154
2169 2155 pkt->pkt_resid = (bp->b_bcount - cmd->cmd_total_dma_count);
2170 2156 }
2171 2157
2172 2158 return (pkt);
2173 2159
2174 2160 out:
2175 2161 if (is_new)
2176 2162 pvscsi_destroy_pkt(ap, pkt);
2177 2163
2178 2164 return (NULL);
2179 2165 }
2180 2166
2181 2167 /* ARGSUSED ap */
2182 2168 static void
2183 2169 pvscsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
2184 2170 {
2185 2171 pvscsi_cmd_t *cmd = PKT2CMD(pkt);
2186 2172
2187 2173 if ((cmd->flags & PVSCSI_FLAG_DMA_VALID) != 0) {
2188 2174 (void) ddi_dma_unbind_handle(cmd->cmd_dmahdl);
2189 2175 cmd->flags &= ~PVSCSI_FLAG_DMA_VALID;
2190 2176 }
2191 2177 }
2192 2178
2193 2179 /* ARGSUSED ap */
2194 2180 static void
2195 2181 pvscsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2196 2182 {
2197 2183 pvscsi_cmd_t *cmd = PKT2CMD(pkt);
2198 2184
2199 2185 if (cmd->cmd_dmahdl != NULL) {
2200 2186 (void) ddi_dma_sync(cmd->cmd_dmahdl, 0, 0,
2201 2187 (cmd->flags & PVSCSI_FLAG_IO_READ) ?
2202 2188 DDI_DMA_SYNC_FORCPU : DDI_DMA_SYNC_FORDEV);
2203 2189 }
2204 2190
2205 2191 }
2206 2192
2207 2193 /* ARGSUSED ap flag callback arg */
2208 2194 static int
2209 2195 pvscsi_reset_notify(struct scsi_address *ap, int flag,
2210 2196 void (*callback)(caddr_t), caddr_t arg)
2211 2197 {
2212 2198 return (DDI_FAILURE);
2213 2199 }
2214 2200
2215 2201 static int
2216 2202 pvscsi_quiesce_hba(dev_info_t *dip)
2217 2203 {
2218 2204 pvscsi_softc_t *pvs;
2219 2205 scsi_hba_tran_t *tran;
2220 2206
2221 2207 if ((tran = ddi_get_driver_private(dip)) == NULL ||
2222 2208 (pvs = TRAN2PRIV(tran)) == NULL)
2223 2209 return (-1);
2224 2210
2225 2211 mutex_enter(&pvs->mutex);
2226 2212 if (!HBA_IS_QUIESCED(pvs))
2227 2213 pvs->flags |= PVSCSI_HBA_QUIESCED;
2228 2214
2229 2215 if (pvs->cmd_queue_len != 0) {
2230 2216 /* Outstanding commands present, wait */
2231 2217 pvs->flags |= PVSCSI_HBA_QUIESCE_PENDING;
2232 2218 cv_wait(&pvs->quiescevar, &pvs->mutex);
2233 2219 ASSERT(pvs->cmd_queue_len == 0);
2234 2220 }
2235 2221 mutex_exit(&pvs->mutex);
2236 2222
2237 2223 /* Suspend taskq delivery and complete all scheduled tasks */
2238 2224 ddi_taskq_suspend(pvs->msg_tq);
2239 2225 ddi_taskq_wait(pvs->msg_tq);
2240 2226 ddi_taskq_suspend(pvs->comp_tq);
2241 2227 ddi_taskq_wait(pvs->comp_tq);
2242 2228
2243 2229 return (0);
2244 2230 }
2245 2231
2246 2232 static int
2247 2233 pvscsi_unquiesce_hba(dev_info_t *dip)
2248 2234 {
2249 2235 pvscsi_softc_t *pvs;
2250 2236 scsi_hba_tran_t *tran;
2251 2237
2252 2238 if ((tran = ddi_get_driver_private(dip)) == NULL ||
2253 2239 (pvs = TRAN2PRIV(tran)) == NULL)
2254 2240 return (-1);
2255 2241
2256 2242 mutex_enter(&pvs->mutex);
2257 2243 if (!HBA_IS_QUIESCED(pvs)) {
2258 2244 mutex_exit(&pvs->mutex);
2259 2245 return (0);
2260 2246 }
2261 2247 ASSERT(pvs->cmd_queue_len == 0);
2262 2248 pvs->flags &= ~PVSCSI_HBA_QUIESCED;
2263 2249 mutex_exit(&pvs->mutex);
2264 2250
2265 2251 /* Resume taskq delivery */
2266 2252 ddi_taskq_resume(pvs->msg_tq);
2267 2253 ddi_taskq_resume(pvs->comp_tq);
2268 2254
2269 2255 return (0);
2270 2256 }
2271 2257
2272 2258 static int
2273 2259 pvscsi_bus_config(dev_info_t *pdip, uint_t flags, ddi_bus_config_op_t op,
2274 2260 void *arg, dev_info_t **childp)
2275 2261 {
2276 2262 char *p;
2277 2263 int circ;
2278 2264 int ret = NDI_FAILURE;
2279 2265 long target = 0;
2280 2266 pvscsi_softc_t *pvs;
2281 2267 scsi_hba_tran_t *tran;
2282 2268
2283 2269 tran = ddi_get_driver_private(pdip);
2284 2270 pvs = tran->tran_hba_private;
2285 2271
2286 2272 ndi_devi_enter(pdip, &circ);
2287 2273 switch (op) {
2288 2274 case BUS_CONFIG_ONE:
2289 2275 if ((p = strrchr((char *)arg, '@')) != NULL &&
2290 2276 ddi_strtol(p + 1, NULL, 16, &target) == 0)
2291 2277 ret = pvscsi_config_one(pdip, pvs, (int)target, childp);
2292 2278 break;
2293 2279 case BUS_CONFIG_DRIVER:
2294 2280 case BUS_CONFIG_ALL:
2295 2281 ret = pvscsi_config_all(pdip, pvs);
2296 2282 break;
2297 2283 default:
2298 2284 break;
2299 2285 }
2300 2286
2301 2287 if (ret == NDI_SUCCESS)
2302 2288 ret = ndi_busop_bus_config(pdip, flags, op, arg, childp, 0);
2303 2289 ndi_devi_exit(pdip, circ);
2304 2290
2305 2291 return (ret);
2306 2292 }
2307 2293
2308 2294 static int
2309 2295 pvscsi_hba_setup(pvscsi_softc_t *pvs)
2310 2296 {
2311 2297 scsi_hba_tran_t *hba_tran;
2312 2298
2313 2299 hba_tran = pvs->tran = scsi_hba_tran_alloc(pvs->dip,
2314 2300 SCSI_HBA_CANSLEEP);
2315 2301 ASSERT(pvs->tran != NULL);
2316 2302
2317 2303 hba_tran->tran_hba_private = pvs;
2318 2304 hba_tran->tran_tgt_private = NULL;
2319 2305
2320 2306 hba_tran->tran_tgt_init = pvscsi_tgt_init;
2321 2307 hba_tran->tran_tgt_free = NULL;
2322 2308 hba_tran->tran_tgt_probe = scsi_hba_probe;
2323 2309
2324 2310 hba_tran->tran_start = pvscsi_start;
2325 2311 hba_tran->tran_reset = pvscsi_reset;
2326 2312 hba_tran->tran_abort = pvscsi_abort;
2327 2313 hba_tran->tran_getcap = pvscsi_getcap;
2328 2314 hba_tran->tran_setcap = pvscsi_setcap;
2329 2315 hba_tran->tran_init_pkt = pvscsi_init_pkt;
2330 2316 hba_tran->tran_destroy_pkt = pvscsi_destroy_pkt;
2331 2317
2332 2318 hba_tran->tran_dmafree = pvscsi_dmafree;
2333 2319 hba_tran->tran_sync_pkt = pvscsi_sync_pkt;
2334 2320 hba_tran->tran_reset_notify = pvscsi_reset_notify;
2335 2321
2336 2322 hba_tran->tran_quiesce = pvscsi_quiesce_hba;
2337 2323 hba_tran->tran_unquiesce = pvscsi_unquiesce_hba;
2338 2324 hba_tran->tran_bus_reset = NULL;
2339 2325
2340 2326 hba_tran->tran_add_eventcall = NULL;
2341 2327 hba_tran->tran_get_eventcookie = NULL;
2342 2328 hba_tran->tran_post_event = NULL;
2343 2329 hba_tran->tran_remove_eventcall = NULL;
2344 2330
2345 2331 hba_tran->tran_bus_config = pvscsi_bus_config;
2346 2332
2347 2333 hba_tran->tran_interconnect_type = INTERCONNECT_SAS;
2348 2334
2349 2335 if (scsi_hba_attach_setup(pvs->dip, &pvs->hba_dma_attr, hba_tran,
2350 2336 SCSI_HBA_TRAN_CDB | SCSI_HBA_TRAN_SCB | SCSI_HBA_TRAN_CLONE) !=
2351 2337 DDI_SUCCESS) {
2352 2338 dev_err(pvs->dip, CE_WARN, "!failed to attach HBA");
2353 2339 scsi_hba_tran_free(hba_tran);
2354 2340 pvs->tran = NULL;
2355 2341 return (-1);
2356 2342 }
2357 2343
2358 2344 return (0);
2359 2345 }
2360 2346
2361 2347 static int
2362 2348 pvscsi_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
2363 2349 {
2364 2350 int instance;
2365 2351 pvscsi_softc_t *pvs;
2366 2352 char buf[32];
2367 2353
2368 2354 ASSERT(scsi_hba_iport_unit_address(dip) == NULL);
2369 2355
2370 2356 switch (cmd) {
2371 2357 case DDI_ATTACH:
2372 2358 case DDI_RESUME:
2373 2359 break;
2374 2360 default:
2375 2361 return (DDI_FAILURE);
2376 2362 }
2377 2363
2378 2364 instance = ddi_get_instance(dip);
2379 2365
2380 2366 /* Allocate softstate information */
2381 2367 if (ddi_soft_state_zalloc(pvscsi_sstate, instance) != DDI_SUCCESS) {
2382 2368 cmn_err(CE_WARN,
2383 2369 "!ddi_soft_state_zalloc() failed for instance %d",
2384 2370 instance);
2385 2371 return (DDI_FAILURE);
2386 2372 }
2387 2373
2388 2374 if ((pvs = ddi_get_soft_state(pvscsi_sstate, instance)) == NULL) {
2389 2375 cmn_err(CE_WARN, "!failed to get soft state for instance %d",
2390 2376 instance);
2391 2377 goto fail;
2392 2378 }
2393 2379
2394 2380 /*
2395 2381 * Indicate that we are 'sizeof (scsi_*(9S))' clean, we use
2396 2382 * scsi_pkt_size() instead.
2397 2383 */
2398 2384 scsi_size_clean(dip);
2399 2385
2400 2386 /* Setup HBA instance */
2401 2387 pvs->instance = instance;
2402 2388 pvs->dip = dip;
2403 2389 pvs->hba_dma_attr = pvscsi_hba_dma_attr;
2404 2390 pvs->ring_dma_attr = pvscsi_ring_dma_attr;
2405 2391 pvs->io_dma_attr = pvscsi_io_dma_attr;
2406 2392 mutex_init(&pvs->mutex, "pvscsi instance mutex", MUTEX_DRIVER, NULL);
2407 2393 mutex_init(&pvs->intr_mutex, "pvscsi instance interrupt mutex",
2408 2394 MUTEX_DRIVER, NULL);
2409 2395 mutex_init(&pvs->rx_mutex, "pvscsi rx ring mutex", MUTEX_DRIVER, NULL);
2410 2396 mutex_init(&pvs->tx_mutex, "pvscsi tx ring mutex", MUTEX_DRIVER, NULL);
2411 2397 list_create(&pvs->cmd_ctx_pool, sizeof (pvscsi_cmd_ctx_t),
2412 2398 offsetof(pvscsi_cmd_ctx_t, list));
2413 2399 list_create(&pvs->devnodes, sizeof (pvscsi_device_t),
2414 2400 offsetof(pvscsi_device_t, list));
2415 2401 list_create(&pvs->cmd_queue, sizeof (pvscsi_cmd_t),
2416 2402 offsetof(pvscsi_cmd_t, cmd_queue_node));
2417 2403 cv_init(&pvs->syncvar, "pvscsi synchronization cv", CV_DRIVER, NULL);
2418 2404 cv_init(&pvs->wd_condvar, "pvscsi watchdog cv", CV_DRIVER, NULL);
2419 2405 cv_init(&pvs->quiescevar, "pvscsi quiesce cv", CV_DRIVER, NULL);
2420 2406
2421 2407 (void) sprintf(buf, "pvscsi%d_cache", instance);
2422 2408 pvs->cmd_cache = kmem_cache_create(buf, sizeof (pvscsi_cmd_t), 0,
2423 2409 pvscsi_ccache_constructor, pvscsi_ccache_destructor, NULL,
2424 2410 (void *)pvs, NULL, 0);
2425 2411 if (pvs->cmd_cache == NULL) {
2426 2412 dev_err(pvs->dip, CE_WARN,
2427 2413 "!failed to create a cache for SCSI commands");
2428 2414 goto fail;
2429 2415 }
2430 2416
2431 2417 if ((pvscsi_setup_io(pvs)) != DDI_SUCCESS) {
2432 2418 dev_err(pvs->dip, CE_WARN, "!failed to setup I/O region");
2433 2419 goto free_cache;
2434 2420 }
2435 2421
2436 2422 pvscsi_reset_hba(pvs);
2437 2423
2438 2424 if ((pvscsi_allocate_rings(pvs)) != DDI_SUCCESS) {
2439 2425 dev_err(pvs->dip, CE_WARN, "!failed to allocate DMA rings");
2440 2426 goto free_io;
2441 2427 }
2442 2428
2443 2429 pvscsi_setup_rings(pvs);
2444 2430
2445 2431 if (pvscsi_setup_isr(pvs) != DDI_SUCCESS) {
2446 2432 dev_err(pvs->dip, CE_WARN, "!failed to setup ISR");
2447 2433 goto free_rings;
2448 2434 }
2449 2435
2450 2436 if (pvscsi_setup_sg(pvs) != DDI_SUCCESS) {
2451 2437 dev_err(pvs->dip, CE_WARN, "!failed to setup S/G");
2452 2438 goto free_intr;
2453 2439 }
2454 2440
2455 2441 if (pvscsi_hba_setup(pvs) != 0) {
2456 2442 dev_err(pvs->dip, CE_WARN, "!failed to setup HBA");
2457 2443 goto free_sg;
2458 2444 }
2459 2445
2460 2446 if ((pvs->comp_tq = ddi_taskq_create(pvs->dip, "comp_tq",
2461 2447 MIN(UINT16_MAX, ncpus), TASKQ_DEFAULTPRI, 0)) == NULL) {
2462 2448 dev_err(pvs->dip, CE_WARN,
2463 2449 "!failed to create completion taskq");
2464 2450 goto free_sg;
2465 2451 }
2466 2452
2467 2453 if ((pvs->msg_tq = ddi_taskq_create(pvs->dip, "msg_tq",
2468 2454 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
2469 2455 dev_err(pvs->dip, CE_WARN,
2470 2456 "!failed to create message taskq");
2471 2457 goto free_comp_tq;
2472 2458 }
2473 2459
2474 2460 if (pvscsi_enable_intrs(pvs) != DDI_SUCCESS) {
2475 2461 dev_err(pvs->dip, CE_WARN, "!failed to enable interrupts");
2476 2462 goto free_msg_tq;
2477 2463 }
2478 2464
2479 2465 /* Launch watchdog thread */
2480 2466 pvs->wd_thread = thread_create(NULL, 0, pvscsi_wd_thread, pvs, 0, &p0,
2481 2467 TS_RUN, minclsyspri);
2482 2468
2483 2469 return (DDI_SUCCESS);
2484 2470
2485 2471 free_msg_tq:
2486 2472 ddi_taskq_destroy(pvs->msg_tq);
2487 2473 free_comp_tq:
2488 2474 ddi_taskq_destroy(pvs->comp_tq);
2489 2475 free_sg:
2490 2476 pvscsi_free_sg(pvs);
2491 2477 free_intr:
2492 2478 pvscsi_free_intr_resources(pvs);
2493 2479 free_rings:
2494 2480 pvscsi_reset_hba(pvs);
2495 2481 pvscsi_free_rings(pvs);
2496 2482 free_io:
2497 2483 pvscsi_free_io(pvs);
2498 2484 free_cache:
2499 2485 kmem_cache_destroy(pvs->cmd_cache);
2500 2486 fail:
2501 2487 ddi_soft_state_free(pvscsi_sstate, instance);
2502 2488
2503 2489 return (DDI_FAILURE);
2504 2490 }
2505 2491
2506 2492 static int
2507 2493 pvscsi_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
2508 2494 {
2509 2495 int instance;
2510 2496 pvscsi_softc_t *pvs;
2511 2497
2512 2498 switch (cmd) {
2513 2499 case DDI_DETACH:
2514 2500 break;
2515 2501 default:
2516 2502 return (DDI_FAILURE);
2517 2503 }
2518 2504
2519 2505 instance = ddi_get_instance(dip);
2520 2506 if ((pvs = ddi_get_soft_state(pvscsi_sstate, instance)) == NULL) {
2521 2507 cmn_err(CE_WARN, "!failed to get soft state for instance %d",
2522 2508 instance);
2523 2509 return (DDI_FAILURE);
2524 2510 }
2525 2511
2526 2512 pvscsi_reset_hba(pvs);
2527 2513 pvscsi_free_intr_resources(pvs);
2528 2514
2529 2515 /* Shutdown message taskq */
2530 2516 ddi_taskq_wait(pvs->msg_tq);
2531 2517 ddi_taskq_destroy(pvs->msg_tq);
2532 2518
2533 2519 /* Shutdown completion taskq */
2534 2520 ddi_taskq_wait(pvs->comp_tq);
2535 2521 ddi_taskq_destroy(pvs->comp_tq);
2536 2522
2537 2523 /* Shutdown watchdog thread */
2538 2524 mutex_enter(&pvs->mutex);
2539 2525 pvs->flags |= PVSCSI_DRIVER_SHUTDOWN;
2540 2526 cv_signal(&pvs->wd_condvar);
2541 2527 cv_wait(&pvs->syncvar, &pvs->mutex);
2542 2528 mutex_exit(&pvs->mutex);
2543 2529
2544 2530 pvscsi_free_sg(pvs);
2545 2531 pvscsi_free_rings(pvs);
2546 2532 pvscsi_free_io(pvs);
2547 2533
2548 2534 kmem_cache_destroy(pvs->cmd_cache);
2549 2535
2550 2536 mutex_destroy(&pvs->mutex);
2551 2537 mutex_destroy(&pvs->intr_mutex);
2552 2538 mutex_destroy(&pvs->rx_mutex);
2553 2539
2554 2540 cv_destroy(&pvs->syncvar);
2555 2541 cv_destroy(&pvs->wd_condvar);
2556 2542 cv_destroy(&pvs->quiescevar);
2557 2543
2558 2544 ddi_soft_state_free(pvscsi_sstate, instance);
2559 2545 ddi_prop_remove_all(dip);
2560 2546
2561 2547 return (DDI_SUCCESS);
2562 2548 }
2563 2549
2564 2550 static int
2565 2551 pvscsi_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
2566 2552 int *rval)
2567 2553 {
2568 2554 int ret;
2569 2555
2570 2556 if (ddi_get_soft_state(pvscsi_sstate, getminor(dev)) == NULL) {
2571 2557 cmn_err(CE_WARN, "!invalid device instance: %d", getminor(dev));
2572 2558 return (ENXIO);
2573 2559 }
2574 2560
2575 2561 /* Try to handle command in a common way */
2576 2562 if ((ret = scsi_hba_ioctl(dev, cmd, data, mode, credp, rval)) != ENOTTY)
2577 2563 return (ret);
2578 2564
2579 2565 cmn_err(CE_WARN, "!unsupported IOCTL command: 0x%X", cmd);
2580 2566
2581 2567 return (ENXIO);
2582 2568 }
2583 2569
2584 2570 static int
2585 2571 pvscsi_quiesce(dev_info_t *devi)
2586 2572 {
2587 2573 scsi_hba_tran_t *tran;
2588 2574 pvscsi_softc_t *pvs;
2589 2575
2590 2576 if ((tran = ddi_get_driver_private(devi)) == NULL)
2591 2577 return (DDI_SUCCESS);
2592 2578
2593 2579 if ((pvs = tran->tran_hba_private) == NULL)
2594 2580 return (DDI_SUCCESS);
2595 2581
2596 2582 /* Mask all interrupts from device */
2597 2583 pvscsi_reg_write(pvs, PVSCSI_REG_OFFSET_INTR_MASK, 0);
2598 2584
2599 2585 /* Reset the HBA */
2600 2586 pvscsi_reset_hba(pvs);
2601 2587
2602 2588 return (DDI_SUCCESS);
2603 2589 }
2604 2590
2605 2591 /* module */
2606 2592
2607 2593 static struct cb_ops pvscsi_cb_ops = {
2608 2594 .cb_open = scsi_hba_open,
2609 2595 .cb_close = scsi_hba_close,
2610 2596 .cb_strategy = nodev,
2611 2597 .cb_print = nodev,
2612 2598 .cb_dump = nodev,
2613 2599 .cb_read = nodev,
2614 2600 .cb_write = nodev,
2615 2601 .cb_ioctl = pvscsi_ioctl,
2616 2602 .cb_devmap = nodev,
2617 2603 .cb_mmap = nodev,
2618 2604 .cb_segmap = nodev,
2619 2605 .cb_chpoll = nochpoll,
2620 2606 .cb_prop_op = ddi_prop_op,
2621 2607 .cb_str = NULL,
2622 2608 .cb_flag = D_MP,
2623 2609 .cb_rev = CB_REV,
2624 2610 .cb_aread = nodev,
2625 2611 .cb_awrite = nodev
2626 2612 };
2627 2613
2628 2614 static struct dev_ops pvscsi_ops = {
2629 2615 .devo_rev = DEVO_REV,
2630 2616 .devo_refcnt = 0,
2631 2617 .devo_getinfo = ddi_no_info,
2632 2618 .devo_identify = nulldev,
2633 2619 .devo_probe = nulldev,
2634 2620 .devo_attach = pvscsi_attach,
2635 2621 .devo_detach = pvscsi_detach,
2636 2622 .devo_reset = nodev,
2637 2623 .devo_cb_ops = &pvscsi_cb_ops,
2638 2624 .devo_bus_ops = NULL,
2639 2625 .devo_power = NULL,
2640 2626 .devo_quiesce = pvscsi_quiesce
2641 2627 };
2642 2628
2643 2629 #define PVSCSI_IDENT "VMware PVSCSI"
2644 2630
2645 2631 static struct modldrv modldrv = {
2646 2632 &mod_driverops,
2647 2633 PVSCSI_IDENT,
2648 2634 &pvscsi_ops,
2649 2635 };
2650 2636
2651 2637 static struct modlinkage modlinkage = {
2652 2638 MODREV_1,
2653 2639 &modldrv,
2654 2640 NULL
2655 2641 };
2656 2642
2657 2643 int
2658 2644 _init(void)
2659 2645 {
2660 2646 int ret;
2661 2647
2662 2648 if ((ret = ddi_soft_state_init(&pvscsi_sstate,
2663 2649 sizeof (struct pvscsi_softc), PVSCSI_INITIAL_SSTATE_ITEMS)) != 0) {
2664 2650 cmn_err(CE_WARN, "!ddi_soft_state_init() failed");
2665 2651 return (ret);
2666 2652 }
2667 2653
2668 2654 if ((ret = scsi_hba_init(&modlinkage)) != 0) {
2669 2655 cmn_err(CE_WARN, "!scsi_hba_init() failed");
2670 2656 ddi_soft_state_fini(&pvscsi_sstate);
2671 2657 return (ret);
2672 2658 }
2673 2659
2674 2660 if ((ret = mod_install(&modlinkage)) != 0) {
2675 2661 cmn_err(CE_WARN, "!mod_install() failed");
2676 2662 ddi_soft_state_fini(&pvscsi_sstate);
2677 2663 scsi_hba_fini(&modlinkage);
2678 2664 }
2679 2665
2680 2666 return (ret);
2681 2667 }
2682 2668
2683 2669 int
2684 2670 _info(struct modinfo *modinfop)
2685 2671 {
2686 2672 return (mod_info(&modlinkage, modinfop));
2687 2673 }
2688 2674
2689 2675 int
2690 2676 _fini(void)
2691 2677 {
2692 2678 int ret;
2693 2679
2694 2680 if ((ret = mod_remove(&modlinkage)) == 0) {
2695 2681 ddi_soft_state_fini(&pvscsi_sstate);
2696 2682 scsi_hba_fini(&modlinkage);
2697 2683 }
2698 2684
2699 2685 return (ret);
2700 2686 }
|
↓ open down ↓ |
2314 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX