Print this page
| Split |
Close |
| Expand all |
| Collapse all |
--- old/./mr_sas.c
+++ new/./mr_sas.c
1 1 /*
2 2 * mr_sas.c: source for mr_sas driver
3 3 *
4 4 * Solaris MegaRAID device driver for SAS2.0 controllers
5 5 * Copyright (c) 2008-2012, LSI Logic Corporation.
6 6 * All rights reserved.
7 7 *
8 8 * Version:
9 9 * Author:
10 10 * Swaminathan K S
11 11 * Arun Chandrashekhar
12 12 * Manju R
13 13 * Rasheed
14 14 * Shakeel Bukhari
15 15 */
16 16
17 17 #include <sys/types.h>
18 18 #include <sys/param.h>
19 19 #include <sys/file.h>
20 20 #include <sys/errno.h>
21 21 #include <sys/open.h>
22 22 #include <sys/cred.h>
23 23 #include <sys/modctl.h>
24 24 #include <sys/conf.h>
25 25 #include <sys/devops.h>
26 26 #include <sys/cmn_err.h>
27 27 #include <sys/kmem.h>
28 28 #include <sys/stat.h>
29 29 #include <sys/mkdev.h>
30 30 #include <sys/pci.h>
31 31 #include <sys/scsi/scsi.h>
32 32 #include <sys/ddi.h>
33 33 #include <sys/sunddi.h>
34 34 #include <sys/atomic.h>
35 35 #include <sys/signal.h>
36 36 #include <sys/byteorder.h>
37 37 #include <sys/sdt.h>
38 38 #include <sys/fs/dv_node.h> /* devfs_clean */
39 39
40 40 #include "mr_sas.h"
41 41
42 42 /*
43 43 * Local static data
44 44 */
45 45 static void *mrsas_state = NULL;
46 46 static volatile boolean_t mrsas_relaxed_ordering = 0;
47 47 volatile int debug_level_g = CL_NONE;
48 48
49 49 static volatile int msi_enable = 1;
50 50
51 51 /* Default Timeout value to issue online controller reset */
52 52 volatile int debug_timeout_g = 0xF0; //0xB4;
53 53 /* Simulate consecutive firmware fault */
54 54 static volatile int debug_fw_faults_after_ocr_g = 0;
55 55 #ifdef OCRDEBUG
56 56 /* Simulate three consecutive timeout for an IO */
57 57 static volatile int debug_consecutive_timeout_after_ocr_g = 0;
58 58 #endif
59 59
60 60 /* Enable OCR on firmware fault */
61 61 static volatile int debug_support_ocr_isr_g = 0;
62 62 #pragma weak scsi_hba_open
63 63 #pragma weak scsi_hba_close
64 64 #pragma weak scsi_hba_ioctl
65 65
66 66
67 67
68 68 static struct mrsas_function_template mrsas_function_template_ppc = {
69 69 .read_fw_status_reg = read_fw_status_reg_ppc,
70 70 .issue_cmd = issue_cmd_ppc,
71 71 .issue_cmd_in_sync_mode = issue_cmd_in_sync_mode_ppc,
72 72 .issue_cmd_in_poll_mode = issue_cmd_in_poll_mode_ppc,
73 73 .enable_intr = enable_intr_ppc,
74 74 .disable_intr = disable_intr_ppc,
75 75 .intr_ack = intr_ack_ppc,
76 76 .init_adapter = mrsas_init_adapter_ppc
77 77 // .reset_adapter = mrsas_reset_adapter_ppc
78 78 };
79 79
80 80
81 81 static struct mrsas_function_template mrsas_function_template_fusion = {
82 82 .read_fw_status_reg = tbolt_read_fw_status_reg,
83 83 .issue_cmd = tbolt_issue_cmd,
84 84 .issue_cmd_in_sync_mode = tbolt_issue_cmd_in_sync_mode,
85 85 .issue_cmd_in_poll_mode = tbolt_issue_cmd_in_poll_mode,
86 86 .enable_intr = tbolt_enable_intr,
87 87 .disable_intr = tbolt_disable_intr,
88 88 .intr_ack = tbolt_intr_ack,
89 89 .init_adapter = mrsas_init_adapter_tbolt
90 90 // .reset_adapter = mrsas_reset_adapter_tbolt
91 91 };
92 92
93 93
94 94 ddi_dma_attr_t mrsas_generic_dma_attr = {
95 95 DMA_ATTR_V0, /* dma_attr_version */
96 96 0, /* low DMA address range */
97 97 0xFFFFFFFFU, /* high DMA address range */
98 98 0xFFFFFFFFU, /* DMA counter register */
99 99 8, /* DMA address alignment */
100 100 0x07, /* DMA burstsizes */
101 101 1, /* min DMA size */
102 102 0xFFFFFFFFU, /* max DMA size */
103 103 0xFFFFFFFFU, /* segment boundary */
104 104 MRSAS_MAX_SGE_CNT, /* dma_attr_sglen */
105 105 512, /* granularity of device */
106 106 0 /* bus specific DMA flags */
107 107 };
108 108
109 109 int32_t mrsas_max_cap_maxxfer = 0x1000000;
110 110
111 111 //Fix for: Thunderbolt controller IO timeout when IO write size is 1MEG, Limit size to 256K
112 112 uint32_t mrsas_tbolt_max_cap_maxxfer = (512 * 512);
113 113
114 114 /*
115 115 * cb_ops contains base level routines
116 116 */
117 117 static struct cb_ops mrsas_cb_ops = {
118 118 mrsas_open, /* open */
119 119 mrsas_close, /* close */
120 120 nodev, /* strategy */
121 121 nodev, /* print */
122 122 nodev, /* dump */
123 123 nodev, /* read */
124 124 nodev, /* write */
125 125 mrsas_ioctl, /* ioctl */
126 126 nodev, /* devmap */
127 127 nodev, /* mmap */
128 128 nodev, /* segmap */
129 129 nochpoll, /* poll */
130 130 nodev, /* cb_prop_op */
131 131 0, /* streamtab */
132 132 D_NEW | D_HOTPLUG, /* cb_flag */
133 133 CB_REV, /* cb_rev */
134 134 nodev, /* cb_aread */
135 135 nodev /* cb_awrite */
136 136 };
137 137
138 138 /*
139 139 * dev_ops contains configuration routines
140 140 */
141 141 static struct dev_ops mrsas_ops = {
142 142 DEVO_REV, /* rev, */
143 143 0, /* refcnt */
144 144 mrsas_getinfo, /* getinfo */
145 145 nulldev, /* identify */
146 146 nulldev, /* probe */
147 147 mrsas_attach, /* attach */
148 148 mrsas_detach, /* detach */
149 149 #if defined(__SunOS_5_11)
150 150 nodev,
151 151 #else
152 152 mrsas_reset, /* reset */
153 153 #endif /* defined(__SunOS_5_11) */
154 154 &mrsas_cb_ops, /* char/block ops */
155 155 NULL, /* bus ops */
156 156 NULL, /* power */
157 157 #ifdef __SunOS_5_11
158 158 mrsas_quiesce /* quiesce */
159 159 #endif /*__SunOS_5_11 */
160 160
161 161 };
162 162
163 163 char _depends_on[] = "misc/scsi";
164 164
165 165 static struct modldrv modldrv = {
166 166 &mod_driverops, /* module type - driver */
167 167 MRSAS_VERSION,
168 168 &mrsas_ops, /* driver ops */
169 169 };
170 170
171 171 static struct modlinkage modlinkage = {
172 172 MODREV_1, /* ml_rev - must be MODREV_1 */
173 173 &modldrv, /* ml_linkage */
174 174 NULL /* end of driver linkage */
175 175 };
176 176
177 177 static struct ddi_device_acc_attr endian_attr = {
178 178 DDI_DEVICE_ATTR_V1,
179 179 DDI_STRUCTURE_LE_ACC,
180 180 DDI_STRICTORDER_ACC,
181 181 DDI_DEFAULT_ACC
182 182 };
183 183
184 184
185 185 unsigned int enable_fp = 1;
186 186
187 187
188 188 /*
189 189 * ************************************************************************** *
190 190 * *
191 191 * common entry points - for loadable kernel modules *
192 192 * *
193 193 * ************************************************************************** *
194 194 */
195 195
196 196 /*
197 197 * _init - initialize a loadable module
198 198 * @void
199 199 *
200 200 * The driver should perform any one-time resource allocation or data
201 201 * initialization during driver loading in _init(). For example, the driver
202 202 * should initialize any mutexes global to the driver in this routine.
203 203 * The driver should not, however, use _init() to allocate or initialize
204 204 * anything that has to do with a particular instance of the device.
205 205 * Per-instance initialization must be done in attach().
206 206 */
207 207 int
208 208 _init(void)
209 209 {
210 210 int ret;
211 211
212 212 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
213 213
214 214 ret = ddi_soft_state_init(&mrsas_state,
215 215 sizeof (struct mrsas_instance), 0);
216 216
217 217 if (ret != DDI_SUCCESS) {
218 218 cmn_err(CE_WARN, "mr_sas: could not init state");
219 219 return (ret);
220 220 }
221 221
222 222 if ((ret = scsi_hba_init(&modlinkage)) != DDI_SUCCESS) {
223 223 cmn_err(CE_WARN, "mr_sas: could not init scsi hba");
224 224 ddi_soft_state_fini(&mrsas_state);
225 225 return (ret);
226 226 }
227 227
228 228 ret = mod_install(&modlinkage);
229 229
230 230 if (ret != DDI_SUCCESS) {
231 231 cmn_err(CE_WARN, "mr_sas: mod_install failed");
232 232 scsi_hba_fini(&modlinkage);
233 233 ddi_soft_state_fini(&mrsas_state);
234 234 }
235 235
236 236 return (ret);
237 237 }
238 238
239 239 /*
240 240 * _info - returns information about a loadable module.
241 241 * @void
242 242 *
243 243 * _info() is called to return module information. This is a typical entry
244 244 * point that does predefined role. It simply calls mod_info().
245 245 */
246 246 int
247 247 _info(struct modinfo *modinfop)
248 248 {
249 249 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
250 250
251 251 return (mod_info(&modlinkage, modinfop));
252 252 }
253 253
254 254 /*
255 255 * _fini - prepare a loadable module for unloading
256 256 * @void
257 257 *
258 258 * In _fini(), the driver should release any resources that were allocated in
259 259 * _init(). The driver must remove itself from the system module list.
260 260 */
261 261 int
262 262 _fini(void)
263 263 {
264 264 int ret;
265 265
266 266 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
267 267
268 268 if ((ret = mod_remove(&modlinkage)) != DDI_SUCCESS)
269 269 {
270 270 con_log(CL_ANN1, (CE_WARN, "_fini: mod_remove() failed, error 0x%X", ret));
271 271 return (ret);
272 272 }
273 273
274 274 scsi_hba_fini(&modlinkage);
275 275 con_log(CL_DLEVEL1, (CE_NOTE, "_fini: scsi_hba_fini() done."));
276 276
277 277 ddi_soft_state_fini(&mrsas_state);
278 278 con_log(CL_DLEVEL1, (CE_NOTE, "_fini: ddi_soft_state_fini() done."));
279 279
280 280 return (ret);
281 281 }
282 282
283 283
284 284 /*
285 285 * ************************************************************************** *
286 286 * *
287 287 * common entry points - for autoconfiguration *
288 288 * *
289 289 * ************************************************************************** *
290 290 */
291 291 /*
292 292 * attach - adds a device to the system as part of initialization
293 293 * @dip:
294 294 * @cmd:
295 295 *
296 296 * The kernel calls a driver's attach() entry point to attach an instance of
297 297 * a device (for MegaRAID, it is instance of a controller) or to resume
298 298 * operation for an instance of a device that has been suspended or has been
299 299 * shut down by the power management framework
300 300 * The attach() entry point typically includes the following types of
301 301 * processing:
302 302 * - allocate a soft-state structure for the device instance (for MegaRAID,
303 303 * controller instance)
|
↓ open down ↓ |
303 lines elided |
↑ open up ↑ |
304 304 * - initialize per-instance mutexes
305 305 * - initialize condition variables
306 306 * - register the device's interrupts (for MegaRAID, controller's interrupts)
307 307 * - map the registers and memory of the device instance (for MegaRAID,
308 308 * controller instance)
309 309 * - create minor device nodes for the device instance (for MegaRAID,
310 310 * controller instance)
311 311 * - report that the device instance (for MegaRAID, controller instance) has
312 312 * attached
313 313 */
314 -#if __SunOS_5_11
314 +/* #if __SunOS_5_11 */
315 +#if 0
315 316 #define DDI_PM_RESUME DDI_PM_RESUME_OBSOLETE
316 317 #define DDI_PM_SUSPEND DDI_PM_SUSPEND_OBSOLETE
317 318 #endif // __SunOS_5_11
318 319 static int
319 320 mrsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
320 321 {
321 322 int instance_no;
322 323 int nregs;
323 324 int i = 0;
324 325 uint8_t irq;
325 326 uint16_t vendor_id;
326 327 uint16_t device_id;
327 328 uint16_t subsysvid;
328 329 uint16_t subsysid;
329 330 uint16_t command;
330 331 off_t reglength = 0;
331 332 int intr_types = 0;
332 333 char *data;
333 334
334 335 scsi_hba_tran_t *tran;
335 336 ddi_dma_attr_t tran_dma_attr;
336 337 struct mrsas_instance *instance;
337 338
338 339 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
339 340
340 341 /* CONSTCOND */
341 342 ASSERT(NO_COMPETING_THREADS);
342 343
343 344 instance_no = ddi_get_instance(dip);
344 345
345 346 /*
346 347 * check to see whether this device is in a DMA-capable slot.
347 348 */
348 349 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
349 350 cmn_err(CE_WARN,
350 351 "mr_sas%d: Device in slave-only slot, unused",
351 352 instance_no);
352 353 return (DDI_FAILURE);
353 354 }
354 355
355 356 switch (cmd) {
356 357 case DDI_ATTACH:
357 358
358 359 /* allocate the soft state for the instance */
359 360 if (ddi_soft_state_zalloc(mrsas_state, instance_no)
360 361 != DDI_SUCCESS) {
361 362 cmn_err(CE_WARN,
362 363 "mr_sas%d: Failed to allocate soft state",
363 364 instance_no);
364 365
365 366 return (DDI_FAILURE);
366 367 }
367 368
368 369 instance = (struct mrsas_instance *)ddi_get_soft_state
369 370 (mrsas_state, instance_no);
370 371
371 372 if (instance == NULL) {
372 373 cmn_err(CE_WARN,
373 374 "mr_sas%d: Bad soft state", instance_no);
374 375
375 376 ddi_soft_state_free(mrsas_state, instance_no);
376 377
377 378 return (DDI_FAILURE);
378 379 }
379 380
380 381 bzero((caddr_t)instance,
381 382 sizeof (struct mrsas_instance));
382 383
383 384 instance->unroll.softs = 1;
384 385
385 386 /* Setup the PCI configuration space handles */
386 387 if (pci_config_setup(dip, &instance->pci_handle) !=
387 388 DDI_SUCCESS) {
388 389 cmn_err(CE_WARN,
389 390 "mr_sas%d: pci config setup failed ",
390 391 instance_no);
391 392
392 393 ddi_soft_state_free(mrsas_state, instance_no);
393 394 return (DDI_FAILURE);
394 395 }
395 396 if (instance->pci_handle == NULL) {
396 397 cmn_err(CE_WARN,
397 398 "mr_sas%d: pci config setup failed ",
398 399 instance_no);
399 400 ddi_soft_state_free(mrsas_state, instance_no);
400 401 return (DDI_FAILURE);
401 402 }
402 403
403 404
404 405
405 406 if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) {
406 407 cmn_err(CE_WARN,
407 408 "mr_sas: failed to get registers.");
408 409
409 410 pci_config_teardown(&instance->pci_handle);
410 411 ddi_soft_state_free(mrsas_state, instance_no);
411 412 return (DDI_FAILURE);
412 413 }
413 414
414 415 vendor_id = pci_config_get16(instance->pci_handle,
415 416 PCI_CONF_VENID);
416 417 device_id = pci_config_get16(instance->pci_handle,
417 418 PCI_CONF_DEVID);
418 419
419 420 subsysvid = pci_config_get16(instance->pci_handle,
420 421 PCI_CONF_SUBVENID);
421 422 subsysid = pci_config_get16(instance->pci_handle,
422 423 PCI_CONF_SUBSYSID);
423 424
424 425 pci_config_put16(instance->pci_handle, PCI_CONF_COMM,
425 426 (pci_config_get16(instance->pci_handle,
426 427 PCI_CONF_COMM) | PCI_COMM_ME));
427 428 irq = pci_config_get8(instance->pci_handle,
428 429 PCI_CONF_ILINE);
429 430
430 431 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: "
431 432 "0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s",
432 433 instance_no, vendor_id, device_id, subsysvid,
433 434 subsysid, irq, MRSAS_VERSION));
434 435
435 436 /* enable bus-mastering */
436 437 command = pci_config_get16(instance->pci_handle,
437 438 PCI_CONF_COMM);
438 439
439 440 if (!(command & PCI_COMM_ME)) {
440 441 command |= PCI_COMM_ME;
441 442
442 443 pci_config_put16(instance->pci_handle,
443 444 PCI_CONF_COMM, command);
444 445
445 446 con_log(CL_ANN, (CE_CONT, "mr_sas%d: "
446 447 "enable bus-mastering", instance_no));
447 448 } else {
448 449 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: "
449 450 "bus-mastering already set", instance_no));
450 451 }
451 452
452 453 /* initialize function pointers */
453 454 switch(device_id) {
454 455 case PCI_DEVICE_ID_LSI_TBOLT:
455 456 case PCI_DEVICE_ID_LSI_INVADER:
456 457 con_log(CL_ANN, (CE_NOTE,
457 458 "mr_sas: 2208 T.B. device detected"));
458 459
459 460 instance->func_ptr = &mrsas_function_template_fusion;
460 461 instance->tbolt = 1;
461 462 break;
462 463
463 464 case PCI_DEVICE_ID_LSI_2108VDE:
464 465 case PCI_DEVICE_ID_LSI_2108V:
465 466 con_log(CL_ANN, (CE_NOTE,
466 467 "mr_sas: 2108 Liberator device detected"));
467 468
468 469 instance->func_ptr = &mrsas_function_template_ppc;
469 470 break;
470 471
471 472 default:
472 473 cmn_err(CE_WARN,
473 474 "mr_sas: Invalid device detected");
474 475
475 476 pci_config_teardown(&instance->pci_handle);
476 477 ddi_soft_state_free(mrsas_state, instance_no);
477 478 return (DDI_FAILURE);
478 479
479 480 }
480 481
481 482 instance->baseaddress = pci_config_get32(
482 483 instance->pci_handle, PCI_CONF_BASE0);
483 484 instance->baseaddress &= 0x0fffc;
484 485
485 486 instance->dip = dip;
486 487 instance->vendor_id = vendor_id;
487 488 instance->device_id = device_id;
488 489 instance->subsysvid = subsysvid;
489 490 instance->subsysid = subsysid;
490 491 instance->instance = instance_no;
491 492
492 493
493 494 /* Setup register map */
494 495 if ((ddi_dev_regsize(instance->dip,
495 496 REGISTER_SET_IO_2108, ®length) != DDI_SUCCESS) ||
496 497 reglength < MINIMUM_MFI_MEM_SZ) {
497 498 goto fail_attach;
498 499 }
499 500 if (reglength > DEFAULT_MFI_MEM_SZ) {
500 501 reglength = DEFAULT_MFI_MEM_SZ;
501 502 con_log(CL_DLEVEL1, (CE_NOTE,
502 503 "mr_sas: register length to map is "
503 504 "0x%lx bytes", reglength));
504 505 }
505 506 if (ddi_regs_map_setup(instance->dip,
506 507 REGISTER_SET_IO_2108, &instance->regmap, 0,
507 508 reglength, &endian_attr, &instance->regmap_handle)
508 509 != DDI_SUCCESS) {
509 510 cmn_err(CE_WARN,
510 511 "mr_sas: couldn't map control registers");
511 512 goto fail_attach;
512 513 }
513 514 if (instance->regmap_handle == NULL) {
514 515 cmn_err(CE_WARN,
515 516 "mr_sas: couldn't map control registers");
516 517 goto fail_attach;
517 518 }
518 519
519 520 instance->unroll.regs = 1;
520 521
521 522 /*
522 523 * Disable Interrupt Now.
523 524 * Setup Software interrupt
524 525 */
525 526 instance->func_ptr->disable_intr(instance);
526 527
527 528 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
528 529 "mrsas-enable-msi", &data) == DDI_SUCCESS) {
529 530 if (strncmp(data, "no", 3) == 0) {
530 531 msi_enable = 0;
531 532 con_log(CL_ANN1, (CE_WARN,
532 533 "msi_enable = %d disabled",
533 534 msi_enable));
534 535 }
535 536 ddi_prop_free(data);
536 537 }
537 538
538 539 con_log(CL_DLEVEL1, (CE_NOTE, "msi_enable = %d", msi_enable));
539 540
540 541 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
541 542 "mrsas-enable-fp", &data) == DDI_SUCCESS) {
542 543 if (strncmp(data, "no", 3) == 0) {
543 544 enable_fp = 0;
544 545 cmn_err(CE_NOTE,
545 546 "enable_fp = %d, Fast-Path disabled.\n",
546 547 enable_fp);
547 548 }
548 549
549 550 ddi_prop_free(data);
550 551 }
551 552
552 553 cmn_err(CE_NOTE, "enable_fp = %d\n", enable_fp);
553 554
554 555 /* Check for all supported interrupt types */
555 556 if (ddi_intr_get_supported_types(
556 557 dip, &intr_types) != DDI_SUCCESS) {
557 558 cmn_err(CE_WARN,
558 559 "ddi_intr_get_supported_types() failed");
559 560 goto fail_attach;
560 561 }
561 562
562 563 con_log(CL_DLEVEL1, (CE_NOTE,
563 564 "ddi_intr_get_supported_types() ret: 0x%x",
564 565 intr_types));
565 566
566 567 /* Initialize and Setup Interrupt handler */
567 568 if (msi_enable && (intr_types & DDI_INTR_TYPE_MSIX)) {
568 569 if (mrsas_add_intrs(instance,
569 570 DDI_INTR_TYPE_MSIX) != DDI_SUCCESS) {
570 571 cmn_err(CE_WARN,
571 572 "MSIX interrupt query failed");
572 573 goto fail_attach;
573 574 }
574 575 instance->intr_type = DDI_INTR_TYPE_MSIX;
575 576 } else if (msi_enable && (intr_types &
576 577 DDI_INTR_TYPE_MSI)) {
577 578 if (mrsas_add_intrs(instance,
578 579 DDI_INTR_TYPE_MSI) != DDI_SUCCESS) {
579 580 cmn_err(CE_WARN,
580 581 "MSI interrupt query failed");
581 582 goto fail_attach;
582 583 }
583 584 instance->intr_type = DDI_INTR_TYPE_MSI;
584 585 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
585 586 msi_enable = 0;
586 587 if (mrsas_add_intrs(instance,
587 588 DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) {
588 589 cmn_err(CE_WARN,
589 590 "FIXED interrupt query failed");
590 591 goto fail_attach;
591 592 }
592 593 instance->intr_type = DDI_INTR_TYPE_FIXED;
593 594 } else {
594 595 cmn_err(CE_WARN, "Device cannot "
595 596 "suppport either FIXED or MSI/X "
596 597 "interrupts");
597 598 goto fail_attach;
598 599 }
599 600
600 601 instance->unroll.intr = 1;
601 602
602 603
603 604 /* setup the mfi based low level driver */
604 605 if (mrsas_init_adapter(instance) != DDI_SUCCESS) {
605 606 cmn_err(CE_WARN, "mr_sas: "
606 607 "could not initialize the low level driver");
607 608
608 609 goto fail_attach;
609 610 }
610 611
611 612 /* Initialize all Mutex */
612 613 INIT_LIST_HEAD(&instance->completed_pool_list);
613 614 mutex_init(&instance->completed_pool_mtx,
614 615 "completed_pool_mtx", MUTEX_DRIVER,
615 616 DDI_INTR_PRI(instance->intr_pri));
616 617
617 618 mutex_init(&instance->sync_map_mtx,
618 619 "sync_map_mtx", MUTEX_DRIVER,
619 620 DDI_INTR_PRI(instance->intr_pri));
620 621
621 622 mutex_init(&instance->app_cmd_pool_mtx,
622 623 "app_cmd_pool_mtx", MUTEX_DRIVER,
623 624 DDI_INTR_PRI(instance->intr_pri));
624 625
625 626 mutex_init(&instance->config_dev_mtx, "config_dev_mtx",
626 627 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
627 628
628 629 mutex_init(&instance->cmd_pend_mtx, "cmd_pend_mtx",
629 630 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
630 631
631 632 mutex_init(&instance->ocr_flags_mtx, "ocr_flags_mtx",
632 633 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
633 634
634 635 mutex_init(&instance->int_cmd_mtx, "int_cmd_mtx",
635 636 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
636 637 cv_init(&instance->int_cmd_cv, NULL, CV_DRIVER, NULL);
637 638
638 639 mutex_init(&instance->cmd_pool_mtx, "cmd_pool_mtx",
639 640 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
640 641
641 642 mutex_init(&instance->reg_write_mtx,"reg_write_mtx",
642 643 MUTEX_DRIVER,DDI_INTR_PRI(instance->intr_pri));
643 644
644 645 if (instance->tbolt) {
645 646 mutex_init(&instance->cmd_app_pool_mtx,
646 647 "cmd_app_pool_mtx", MUTEX_DRIVER,
647 648 DDI_INTR_PRI(instance->intr_pri));
648 649
649 650 mutex_init(&instance->chip_mtx,
650 651 "chip_mtx", MUTEX_DRIVER,
651 652 DDI_INTR_PRI(instance->intr_pri));
652 653
653 654 }
654 655
655 656 instance->unroll.mutexs = 1;
656 657
657 658 instance->timeout_id = (timeout_id_t)-1;
658 659
659 660 /* Register our soft-isr for highlevel interrupts. */
660 661 instance->isr_level = instance->intr_pri;
661 662 if (!(instance->tbolt)) {
662 663 if (instance->isr_level == HIGH_LEVEL_INTR) {
663 664 if (ddi_add_softintr(dip,
664 665 DDI_SOFTINT_HIGH,
665 666 &instance->soft_intr_id,
666 667 NULL, NULL, mrsas_softintr,
667 668 (caddr_t)instance) !=
668 669 DDI_SUCCESS) {
669 670 cmn_err(CE_WARN,
670 671 "Software ISR "
671 672 "did not register");
672 673
673 674 goto fail_attach;
674 675 }
675 676
676 677 instance->unroll.soft_isr = 1;
677 678
678 679 }
679 680 }
680 681
681 682 instance->softint_running = 0;
682 683
683 684 /* Allocate a transport structure */
684 685 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
685 686
686 687 if (tran == NULL) {
687 688 cmn_err(CE_WARN,
688 689 "scsi_hba_tran_alloc failed");
689 690 goto fail_attach;
690 691 }
691 692
692 693 instance->tran = tran;
693 694 instance->unroll.tran = 1;
694 695
695 696 tran->tran_hba_private = instance;
696 697 tran->tran_tgt_init = mrsas_tran_tgt_init;
697 698 tran->tran_tgt_probe = scsi_hba_probe;
698 699 tran->tran_tgt_free = mrsas_tran_tgt_free;
699 700 if (instance->tbolt) {
700 701 tran->tran_init_pkt =
701 702 mrsas_tbolt_tran_init_pkt;
702 703 tran->tran_start =
703 704 mrsas_tbolt_tran_start;
704 705 } else {
705 706 tran->tran_init_pkt = mrsas_tran_init_pkt;
706 707 tran->tran_start = mrsas_tran_start;
707 708 }
708 709 tran->tran_abort = mrsas_tran_abort;
709 710 tran->tran_reset = mrsas_tran_reset;
710 711 tran->tran_getcap = mrsas_tran_getcap;
711 712 tran->tran_setcap = mrsas_tran_setcap;
712 713 tran->tran_destroy_pkt = mrsas_tran_destroy_pkt;
713 714 tran->tran_dmafree = mrsas_tran_dmafree;
714 715 tran->tran_sync_pkt = mrsas_tran_sync_pkt;
715 716 tran->tran_quiesce = mrsas_tran_quiesce;
716 717 tran->tran_unquiesce = mrsas_tran_unquiesce;
717 718 tran->tran_bus_config = mrsas_tran_bus_config;
718 719
719 720 if (mrsas_relaxed_ordering)
720 721 mrsas_generic_dma_attr.dma_attr_flags |=
721 722 DDI_DMA_RELAXED_ORDERING;
722 723
723 724
724 725 tran_dma_attr = mrsas_generic_dma_attr;
725 726 tran_dma_attr.dma_attr_sgllen = instance->max_num_sge;
726 727
727 728 /* Attach this instance of the hba */
728 729 if (scsi_hba_attach_setup(dip, &tran_dma_attr, tran, 0)
729 730 != DDI_SUCCESS) {
730 731 cmn_err(CE_WARN,
731 732 "scsi_hba_attach failed");
732 733
733 734 goto fail_attach;
734 735 }
735 736 instance->unroll.tranSetup = 1;
736 737 con_log(CL_ANN1, (CE_CONT,
737 738 "scsi_hba_attach_setup() done."));
738 739
739 740
740 741 /* create devctl node for cfgadm command */
741 742 if (ddi_create_minor_node(dip, "devctl",
742 743 S_IFCHR, INST2DEVCTL(instance_no),
743 744 DDI_NT_SCSI_NEXUS, 0) == DDI_FAILURE) {
744 745 cmn_err(CE_WARN,
745 746 "mr_sas: failed to create devctl node.");
746 747
747 748 goto fail_attach;
748 749 }
749 750
750 751 instance->unroll.devctl = 1;
751 752
752 753 /* create scsi node for cfgadm command */
753 754 if (ddi_create_minor_node(dip, "scsi", S_IFCHR,
754 755 INST2SCSI(instance_no),
755 756 DDI_NT_SCSI_ATTACHMENT_POINT, 0) ==
756 757 DDI_FAILURE) {
757 758 cmn_err(CE_WARN,
758 759 "mr_sas: failed to create scsi node.");
759 760
760 761 goto fail_attach;
761 762 }
762 763
763 764 instance->unroll.scsictl = 1;
764 765
765 766 (void) sprintf(instance->iocnode, "%d:lsirdctl",
766 767 instance_no);
767 768
768 769 /*
769 770 * Create a node for applications
770 771 * for issuing ioctl to the driver.
771 772 */
772 773 if (ddi_create_minor_node(dip, instance->iocnode,
773 774 S_IFCHR, INST2LSIRDCTL(instance_no),
774 775 DDI_PSEUDO, 0) == DDI_FAILURE) {
775 776 cmn_err(CE_WARN,
776 777 "mr_sas: failed to create ioctl node.");
777 778
778 779 goto fail_attach;
779 780 }
780 781
781 782 instance->unroll.ioctl = 1;
782 783
783 784 /* Create a taskq to handle dr events */
784 785 if ((instance->taskq = ddi_taskq_create(dip,
785 786 "mrsas_dr_taskq", 1,
786 787 TASKQ_DEFAULTPRI, 0)) == NULL) {
787 788 cmn_err(CE_WARN,
788 789 "mr_sas: failed to create taskq ");
789 790 instance->taskq = NULL;
790 791 goto fail_attach;
791 792 }
792 793 instance->unroll.taskq = 1;
793 794 con_log(CL_ANN1, (CE_CONT,
794 795 "ddi_taskq_create() done."));
795 796
796 797 /* enable interrupt */
797 798 instance->func_ptr->enable_intr(instance);
798 799
799 800 /* initiate AEN */
800 801 if (start_mfi_aen(instance)) {
801 802 cmn_err(CE_WARN,
802 803 "mr_sas: failed to initiate AEN.");
803 804 goto fail_attach;
804 805 }
805 806 instance->unroll.aenPend = 1;
806 807 con_log(CL_ANN1, (CE_CONT,
807 808 "AEN started for instance %d.", instance_no));
808 809
809 810 /* Finally! We are on the air. */
810 811 ddi_report_dev(dip);
811 812
812 813 instance->mr_ld_list =
813 814 kmem_zalloc(MRDRV_MAX_LD * sizeof (struct mrsas_ld),
814 815 KM_SLEEP);
815 816 if (instance->mr_ld_list == NULL) {
816 817 cmn_err(CE_WARN,
817 818 "mr_sas attach(): failed to allocate ld_list array");
818 819 goto fail_attach;
819 820 }
820 821 instance->unroll.ldlist_buff = 1;
821 822
822 823 #ifdef PDSUPPORT
823 824 if(instance->tbolt) {
824 825 instance->mr_tbolt_pd_max = MRSAS_TBOLT_PD_TGT_MAX;
825 826 instance->mr_tbolt_pd_list =
826 827 kmem_zalloc(MRSAS_TBOLT_GET_PD_MAX(instance)
827 828 * sizeof (struct mrsas_tbolt_pd), KM_SLEEP);
828 829 ASSERT(instance->mr_tbolt_pd_list);
829 830 for (i = 0; i < instance->mr_tbolt_pd_max; i++) {
830 831 instance->mr_tbolt_pd_list[i].lun_type =
831 832 MRSAS_TBOLT_PD_LUN;
832 833 instance->mr_tbolt_pd_list[i].dev_id =
833 834 (uint8_t)i;
834 835 }
835 836
836 837 instance->unroll.pdlist_buff = 1;
837 838 }
838 839 #endif
839 840 break;
840 841 case DDI_PM_RESUME:
841 842 con_log(CL_ANN, (CE_NOTE,
842 843 "mr_sas: DDI_PM_RESUME"));
843 844 break;
844 845 case DDI_RESUME:
845 846 con_log(CL_ANN, (CE_NOTE,
846 847 "mr_sas: DDI_RESUME"));
847 848 break;
848 849 default:
849 850 con_log(CL_ANN, (CE_WARN,
850 851 "mr_sas: invalid attach cmd=%x", cmd));
851 852 return (DDI_FAILURE);
852 853 }
853 854
854 855
855 856 cmn_err(CE_NOTE, "mrsas_attach() return SUCCESS instance_num %d", instance_no);
856 857 return (DDI_SUCCESS);
857 858
858 859 fail_attach:
859 860
860 861 mrsas_undo_resources(dip, instance);
861 862
862 863 pci_config_teardown(&instance->pci_handle);
863 864 ddi_soft_state_free(mrsas_state, instance_no);
864 865
865 866 con_log(CL_ANN, (CE_WARN,
866 867 "mr_sas: return failure from mrsas_attach"));
867 868
868 869 cmn_err(CE_WARN, "mrsas_attach() return FAILURE instance_num %d", instance_no);
869 870
870 871 return (DDI_FAILURE);
871 872 }
872 873
873 874 /*
874 875 * getinfo - gets device information
875 876 * @dip:
876 877 * @cmd:
877 878 * @arg:
878 879 * @resultp:
879 880 *
880 881 * The system calls getinfo() to obtain configuration information that only
881 882 * the driver knows. The mapping of minor numbers to device instance is
882 883 * entirely under the control of the driver. The system sometimes needs to ask
883 884 * the driver which device a particular dev_t represents.
884 885 * Given the device number return the devinfo pointer from the scsi_device
885 886 * structure.
886 887 */
887 888 /*ARGSUSED*/
888 889 static int
889 890 mrsas_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
890 891 {
891 892 int rval;
892 893 int mrsas_minor = getminor((dev_t)arg);
893 894
894 895 struct mrsas_instance *instance;
895 896
896 897 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
897 898
898 899 switch (cmd) {
899 900 case DDI_INFO_DEVT2DEVINFO:
900 901 instance = (struct mrsas_instance *)
901 902 ddi_get_soft_state(mrsas_state,
902 903 MINOR2INST(mrsas_minor));
903 904
904 905 if (instance == NULL) {
905 906 *resultp = NULL;
906 907 rval = DDI_FAILURE;
907 908 } else {
908 909 *resultp = instance->dip;
909 910 rval = DDI_SUCCESS;
910 911 }
911 912 break;
912 913 case DDI_INFO_DEVT2INSTANCE:
913 914 *resultp = (void *)(intptr_t)
914 915 (MINOR2INST(getminor((dev_t)arg)));
915 916 rval = DDI_SUCCESS;
916 917 break;
917 918 default:
918 919 *resultp = NULL;
919 920 rval = DDI_FAILURE;
920 921 }
921 922
922 923 return (rval);
923 924 }
924 925
925 926 /*
926 927 * detach - detaches a device from the system
927 928 * @dip: pointer to the device's dev_info structure
928 929 * @cmd: type of detach
929 930 *
930 931 * A driver's detach() entry point is called to detach an instance of a device
931 932 * that is bound to the driver. The entry point is called with the instance of
932 933 * the device node to be detached and with DDI_DETACH, which is specified as
933 934 * the cmd argument to the entry point.
934 935 * This routine is called during driver unload. We free all the allocated
935 936 * resources and call the corresponding LLD so that it can also release all
936 937 * its resources.
937 938 */
938 939 static int
939 940 mrsas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
940 941 {
941 942 int instance_no;
942 943
943 944 struct mrsas_instance *instance;
944 945
945 946 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
946 947
947 948
948 949 /* CONSTCOND */
949 950 ASSERT(NO_COMPETING_THREADS);
950 951
951 952 instance_no = ddi_get_instance(dip);
952 953
953 954 instance = (struct mrsas_instance *)ddi_get_soft_state(mrsas_state,
954 955 instance_no);
955 956
956 957 if (!instance) {
957 958 cmn_err(CE_WARN,
958 959 "mr_sas:%d could not get instance in detach",
959 960 instance_no);
960 961
961 962 return (DDI_FAILURE);
962 963 }
963 964
964 965 con_log(CL_ANN, (CE_NOTE,
965 966 "mr_sas%d: detaching device 0x%4x:0x%4x:0x%4x:0x%4x",
966 967 instance_no, instance->vendor_id, instance->device_id,
967 968 instance->subsysvid, instance->subsysid));
968 969
969 970 switch (cmd) {
970 971 case DDI_DETACH:
971 972 con_log(CL_ANN, (CE_NOTE,
972 973 "mrsas_detach: DDI_DETACH"));
973 974
974 975 mutex_enter(&instance->config_dev_mtx);
975 976 if (instance->timeout_id != (timeout_id_t)-1) {
976 977 mutex_exit(&instance->config_dev_mtx);
977 978 (void) untimeout(instance->timeout_id);
978 979 instance->timeout_id = (timeout_id_t)-1;
979 980 mutex_enter(&instance->config_dev_mtx);
980 981 instance->unroll.timer = 0;
981 982 }
982 983 mutex_exit(&instance->config_dev_mtx);
983 984
984 985 if(instance->unroll.tranSetup == 1) {
985 986 if (scsi_hba_detach(dip) != DDI_SUCCESS) {
986 987 cmn_err(CE_WARN,
987 988 "mr_sas2%d: failed to detach", instance_no);
988 989 return (DDI_FAILURE);
989 990 }
990 991 instance->unroll.tranSetup = 0;
991 992 con_log(CL_ANN1, (CE_CONT, "scsi_hba_dettach() done."));
992 993 }
993 994
994 995 flush_cache(instance);
995 996
996 997 mrsas_undo_resources(dip, instance);
997 998
998 999 pci_config_teardown(&instance->pci_handle);
999 1000 ddi_soft_state_free(mrsas_state, instance_no);
1000 1001 break;
1001 1002
1002 1003 case DDI_PM_SUSPEND:
1003 1004 con_log(CL_ANN, (CE_NOTE,
1004 1005 "mrsas_detach: DDI_PM_SUSPEND"));
1005 1006
1006 1007 break;
1007 1008 case DDI_SUSPEND:
1008 1009 con_log(CL_ANN, (CE_NOTE,
1009 1010 "mrsas_detach: DDI_SUSPEND"));
1010 1011
1011 1012 break;
1012 1013 default:
1013 1014 con_log(CL_ANN, (CE_WARN,
1014 1015 "invalid detach command:0x%x", cmd));
1015 1016 return (DDI_FAILURE);
1016 1017 }
1017 1018
1018 1019 return (DDI_SUCCESS);
1019 1020 }
1020 1021
1021 1022
1022 1023 static int
1023 1024 mrsas_undo_resources (dev_info_t *dip, struct mrsas_instance *instance)
1024 1025 {
1025 1026 int instance_no;
1026 1027
1027 1028 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1028 1029
1029 1030
1030 1031 instance_no = ddi_get_instance(dip);
1031 1032
1032 1033
1033 1034 if(instance->unroll.ioctl == 1) {
1034 1035 ddi_remove_minor_node(dip, instance->iocnode);
1035 1036 instance->unroll.ioctl = 0;
1036 1037 }
1037 1038
1038 1039 if(instance->unroll.scsictl == 1) {
1039 1040 ddi_remove_minor_node(dip, "scsi");
1040 1041 instance->unroll.scsictl = 0;
1041 1042 }
1042 1043
1043 1044 if(instance->unroll.devctl == 1) {
1044 1045 ddi_remove_minor_node(dip, "devctl");
1045 1046 instance->unroll.devctl = 0;
1046 1047 }
1047 1048
1048 1049 if(instance->unroll.tranSetup == 1) {
1049 1050 if (scsi_hba_detach(dip) != DDI_SUCCESS) {
1050 1051 cmn_err(CE_WARN,
1051 1052 "mr_sas2%d: failed to detach", instance_no);
1052 1053 return (DDI_FAILURE);
1053 1054 }
1054 1055 instance->unroll.tranSetup = 0;
1055 1056 con_log(CL_ANN1, (CE_CONT, "scsi_hba_dettach() done."));
1056 1057 }
1057 1058
1058 1059 if(instance->unroll.tran == 1) {
1059 1060 scsi_hba_tran_free(instance->tran);
1060 1061 instance->unroll.tran = 0;
1061 1062 con_log(CL_ANN1, (CE_CONT, "scsi_hba_tran_free() done."));
1062 1063 }
1063 1064
1064 1065 if(instance->unroll.syncCmd == 1) {
1065 1066 if(instance->tbolt) {
1066 1067 if (abort_syncmap_cmd(instance, instance->map_update_cmd))
1067 1068 cmn_err(CE_WARN, "mrsas_detach: "
1068 1069 "failed to abort previous syncmap command");
1069 1070
1070 1071 instance->unroll.syncCmd = 0;
1071 1072 con_log(CL_ANN1, (CE_CONT, "sync cmd aborted, done."));
1072 1073 }
1073 1074 }
1074 1075
1075 1076 if(instance->unroll.aenPend == 1) {
1076 1077 if (abort_aen_cmd(instance, instance->aen_cmd))
1077 1078 cmn_err(CE_WARN, "mrsas_detach: "
1078 1079 "failed to abort prevous AEN command");
1079 1080
1080 1081 instance->unroll.aenPend = 0;
1081 1082 con_log(CL_ANN1, (CE_CONT, "aen cmd aborted, done."));
1082 1083 /*This means the controller is fully initialzed and running */
1083 1084 // shutdown_controller();Shutdown should be a last command to controller.
1084 1085 }
1085 1086
1086 1087
1087 1088 if(instance->unroll.timer == 1) {
1088 1089 if (instance->timeout_id != (timeout_id_t)-1) {
1089 1090 (void) untimeout(instance->timeout_id);
1090 1091 instance->timeout_id = (timeout_id_t)-1;
1091 1092
1092 1093 instance->unroll.timer = 0;
1093 1094 }
1094 1095 }
1095 1096
1096 1097 instance->func_ptr->disable_intr(instance);
1097 1098
1098 1099
1099 1100 if(instance->unroll.mutexs == 1) {
1100 1101 mutex_destroy(&instance->cmd_pool_mtx);
1101 1102 mutex_destroy(&instance->app_cmd_pool_mtx);
1102 1103 mutex_destroy(&instance->cmd_pend_mtx);
1103 1104 mutex_destroy(&instance->completed_pool_mtx);
1104 1105 mutex_destroy(&instance->sync_map_mtx);
1105 1106 mutex_destroy(&instance->int_cmd_mtx);
1106 1107 cv_destroy(&instance->int_cmd_cv);
1107 1108 mutex_destroy(&instance->config_dev_mtx);
1108 1109 mutex_destroy(&instance->ocr_flags_mtx);
1109 1110 mutex_destroy(&instance->reg_write_mtx);
1110 1111
1111 1112 if (instance->tbolt) {
1112 1113 mutex_destroy(&instance->cmd_app_pool_mtx);
1113 1114 mutex_destroy(&instance->chip_mtx);
1114 1115 }
1115 1116
1116 1117 instance->unroll.mutexs = 0;
1117 1118 con_log(CL_ANN1, (CE_CONT, "Destroy mutex & cv, done."));
1118 1119 }
1119 1120
1120 1121
1121 1122 if (instance->unroll.soft_isr == 1) {
1122 1123 ddi_remove_softintr(instance->soft_intr_id);
1123 1124 instance->unroll.soft_isr = 0;
1124 1125 }
1125 1126
1126 1127 if(instance->unroll.intr == 1) {
1127 1128 mrsas_rem_intrs(instance);
1128 1129 instance->unroll.intr = 0;
1129 1130 }
1130 1131
1131 1132
1132 1133 if(instance->unroll.taskq == 1) {
1133 1134 if (instance->taskq) {
1134 1135 ddi_taskq_destroy(instance->taskq);
1135 1136 instance->unroll.taskq = 0;
1136 1137 }
1137 1138
1138 1139 }
1139 1140
1140 1141 /*free dma memory allocated for
1141 1142 cmds/frames/queues/driver version etc */
1142 1143 if(instance->unroll.verBuff == 1) {
1143 1144 mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1144 1145 instance->unroll.verBuff = 0;
1145 1146 }
1146 1147
1147 1148 if(instance->unroll.pdlist_buff == 1) {
1148 1149 if (instance->mr_tbolt_pd_list != NULL)
1149 1150 kmem_free(instance->mr_tbolt_pd_list,
1150 1151 MRSAS_TBOLT_GET_PD_MAX(instance) * sizeof (struct mrsas_tbolt_pd));
1151 1152
1152 1153 instance->mr_tbolt_pd_list = NULL;
1153 1154 instance->unroll.pdlist_buff = 0;
1154 1155 }
1155 1156
1156 1157 if(instance->unroll.ldlist_buff == 1) {
1157 1158 if (instance->mr_ld_list != NULL)
1158 1159 kmem_free(instance->mr_ld_list, MRDRV_MAX_LD
1159 1160 * sizeof (struct mrsas_ld));
1160 1161
1161 1162 instance->mr_ld_list = NULL;
1162 1163 instance->unroll.ldlist_buff = 0;
1163 1164 }
1164 1165
1165 1166 if (instance->tbolt) {
1166 1167 if(instance->unroll.alloc_space_mpi2 == 1) {
1167 1168 free_space_for_mpi2(instance);
1168 1169 instance->unroll.alloc_space_mpi2 = 0;
1169 1170 }
1170 1171 } else {
1171 1172 if(instance->unroll.alloc_space_mfi == 1) {
1172 1173 free_space_for_mfi(instance);
1173 1174 instance->unroll.alloc_space_mfi = 0;
1174 1175 }
1175 1176 }
1176 1177
1177 1178 if(instance->unroll.regs == 1) {
1178 1179 ddi_regs_map_free(&instance->regmap_handle);
1179 1180 instance->unroll.regs = 0;
1180 1181 con_log(CL_ANN1, (CE_CONT, "ddi_regs_map_free() done."));
1181 1182 }
1182 1183
1183 1184 return (DDI_SUCCESS);
1184 1185 }
1185 1186
1186 1187
1187 1188
1188 1189 /*
1189 1190 * ************************************************************************** *
1190 1191 * *
1191 1192 * common entry points - for character driver types *
1192 1193 * *
1193 1194 * ************************************************************************** *
1194 1195 */
1195 1196 /*
1196 1197 * open - gets access to a device
1197 1198 * @dev:
1198 1199 * @openflags:
1199 1200 * @otyp:
1200 1201 * @credp:
1201 1202 *
1202 1203 * Access to a device by one or more application programs is controlled
1203 1204 * through the open() and close() entry points. The primary function of
1204 1205 * open() is to verify that the open request is allowed.
1205 1206 */
1206 1207 static int
1207 1208 mrsas_open(dev_t *dev, int openflags, int otyp, cred_t *credp)
1208 1209 {
1209 1210 int rval = 0;
1210 1211
1211 1212 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1212 1213
1213 1214 /* Check root permissions */
1214 1215 if (drv_priv(credp) != 0) {
1215 1216 con_log(CL_ANN, (CE_WARN,
1216 1217 "mr_sas: Non-root ioctl access denied!"));
1217 1218 return (EPERM);
1218 1219 }
1219 1220
1220 1221 /* Verify we are being opened as a character device */
1221 1222 if (otyp != OTYP_CHR) {
1222 1223 con_log(CL_ANN, (CE_WARN,
1223 1224 "mr_sas: ioctl node must be a char node"));
1224 1225 return (EINVAL);
1225 1226 }
1226 1227
1227 1228 if (ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(*dev)))
1228 1229 == NULL) {
1229 1230 return (ENXIO);
1230 1231 }
1231 1232
1232 1233 if (scsi_hba_open) {
1233 1234 rval = scsi_hba_open(dev, openflags, otyp, credp);
1234 1235 }
1235 1236
1236 1237 return (rval);
1237 1238 }
1238 1239
1239 1240 /*
1240 1241 * close - gives up access to a device
1241 1242 * @dev:
1242 1243 * @openflags:
1243 1244 * @otyp:
1244 1245 * @credp:
1245 1246 *
1246 1247 * close() should perform any cleanup necessary to finish using the minor
1247 1248 * device, and prepare the device (and driver) to be opened again.
1248 1249 */
1249 1250 static int
1250 1251 mrsas_close(dev_t dev, int openflags, int otyp, cred_t *credp)
1251 1252 {
1252 1253 int rval = 0;
1253 1254
1254 1255 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1255 1256
1256 1257 /* no need for locks! */
1257 1258
1258 1259 if (scsi_hba_close) {
1259 1260 rval = scsi_hba_close(dev, openflags, otyp, credp);
1260 1261 }
1261 1262
1262 1263 return (rval);
1263 1264 }
1264 1265
1265 1266 /*
1266 1267 * ioctl - performs a range of I/O commands for character drivers
1267 1268 * @dev:
1268 1269 * @cmd:
1269 1270 * @arg:
1270 1271 * @mode:
1271 1272 * @credp:
1272 1273 * @rvalp:
1273 1274 *
1274 1275 * ioctl() routine must make sure that user data is copied into or out of the
1275 1276 * kernel address space explicitly using copyin(), copyout(), ddi_copyin(),
1276 1277 * and ddi_copyout(), as appropriate.
1277 1278 * This is a wrapper routine to serialize access to the actual ioctl routine.
1278 1279 * ioctl() should return 0 on success, or the appropriate error number. The
1279 1280 * driver may also set the value returned to the calling process through rvalp.
1280 1281 */
1281 1282
1282 1283 static int
1283 1284 mrsas_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
1284 1285 int *rvalp)
1285 1286 {
1286 1287 int rval = 0;
1287 1288
1288 1289 struct mrsas_instance *instance;
1289 1290 struct mrsas_ioctl *ioctl;
1290 1291 struct mrsas_aen aen;
1291 1292 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1292 1293
1293 1294 instance = ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(dev)));
1294 1295
1295 1296 if (instance == NULL) {
1296 1297 /* invalid minor number */
1297 1298 con_log(CL_ANN, (CE_WARN, "mr_sas: adapter not found."));
1298 1299 return (ENXIO);
1299 1300 }
1300 1301
1301 1302 ioctl = (struct mrsas_ioctl *)kmem_zalloc(sizeof (struct mrsas_ioctl),
1302 1303 KM_SLEEP);
1303 1304 if (ioctl == NULL) {
1304 1305 /* Failed to allocate memory for ioctl */
1305 1306 con_log(CL_ANN, (CE_WARN, "mr_sas_ioctl: failed to allocate memory for ioctl"));
1306 1307 return (ENXIO);
1307 1308 }
1308 1309
1309 1310 switch ((uint_t)cmd) {
1310 1311 case MRSAS_IOCTL_FIRMWARE:
1311 1312 if (ddi_copyin((void *)arg, ioctl,
1312 1313 sizeof (struct mrsas_ioctl), mode)) {
1313 1314 con_log(CL_ANN, (CE_WARN, "mrsas_ioctl: "
1314 1315 "ERROR IOCTL copyin"));
1315 1316 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1316 1317 return (EFAULT);
1317 1318 }
1318 1319
1319 1320 if (ioctl->control_code == MRSAS_DRIVER_IOCTL_COMMON) {
1320 1321 rval = handle_drv_ioctl(instance, ioctl, mode);
1321 1322 } else {
1322 1323 rval = handle_mfi_ioctl(instance, ioctl, mode);
1323 1324 }
1324 1325
1325 1326 if (ddi_copyout((void *)ioctl, (void *)arg,
1326 1327 (sizeof (struct mrsas_ioctl) - 1), mode)) {
1327 1328 con_log(CL_ANN, (CE_WARN,
1328 1329 "mrsas_ioctl: copy_to_user failed"));
1329 1330 rval = 1;
1330 1331 }
1331 1332
1332 1333 break;
1333 1334 case MRSAS_IOCTL_AEN:
1334 1335 con_log(CL_ANN, (CE_NOTE,
1335 1336 "mrsas_ioctl: IOCTL Register AEN.\n"));
1336 1337
1337 1338 if (ddi_copyin((void *) arg, &aen,
1338 1339 sizeof (struct mrsas_aen), mode)) {
1339 1340 con_log(CL_ANN, (CE_WARN,
1340 1341 "mrsas_ioctl: ERROR AEN copyin"));
1341 1342 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1342 1343 return (EFAULT);
1343 1344 }
1344 1345
1345 1346 rval = handle_mfi_aen(instance, &aen);
1346 1347
1347 1348 if (ddi_copyout((void *) &aen, (void *)arg,
1348 1349 sizeof (struct mrsas_aen), mode)) {
1349 1350 con_log(CL_ANN, (CE_WARN,
1350 1351 "mrsas_ioctl: copy_to_user failed"));
1351 1352 rval = 1;
1352 1353 }
1353 1354
1354 1355 break;
1355 1356 default:
1356 1357 rval = scsi_hba_ioctl(dev, cmd, arg,
1357 1358 mode, credp, rvalp);
1358 1359
1359 1360 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_ioctl: "
1360 1361 "scsi_hba_ioctl called, ret = %x.", rval));
1361 1362 }
1362 1363
1363 1364 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1364 1365 return (rval);
1365 1366 }
1366 1367
1367 1368 /*
1368 1369 * ************************************************************************** *
1369 1370 * *
1370 1371 * common entry points - for block driver types *
1371 1372 * *
1372 1373 * ************************************************************************** *
1373 1374 */
1374 1375 /*
1375 1376 * reset - TBD
1376 1377 * @dip:
1377 1378 * @cmd:
1378 1379 *
1379 1380 * TBD
1380 1381 */
1381 1382 /*ARGSUSED*/
1382 1383 static int
1383 1384 mrsas_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
1384 1385 {
1385 1386 int instance_no;
1386 1387
1387 1388 struct mrsas_instance *instance;
1388 1389
1389 1390 instance_no = ddi_get_instance(dip);
1390 1391 instance = (struct mrsas_instance *)ddi_get_soft_state
1391 1392 (mrsas_state, instance_no);
1392 1393
1393 1394 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1394 1395
1395 1396 if (!instance) {
1396 1397 con_log(CL_ANN, (CE_WARN, "mr_sas:%d could not get adapter "
1397 1398 "in reset", instance_no));
1398 1399 return (DDI_FAILURE);
1399 1400 }
1400 1401
1401 1402 instance->func_ptr->disable_intr(instance);
1402 1403
1403 1404 con_log(CL_ANN1, (CE_CONT, "flushing cache for instance %d",
1404 1405 instance_no));
1405 1406
1406 1407 flush_cache(instance);
1407 1408
1408 1409 return (DDI_SUCCESS);
1409 1410 }
1410 1411
1411 1412
1412 1413 /*ARGSUSED*/
1413 1414 int
1414 1415 mrsas_quiesce(dev_info_t *dip)
1415 1416 {
1416 1417 int instance_no;
1417 1418
1418 1419 struct mrsas_instance *instance;
1419 1420
1420 1421 instance_no = ddi_get_instance(dip);
1421 1422 instance = (struct mrsas_instance *)ddi_get_soft_state
1422 1423 (mrsas_state, instance_no);
1423 1424
1424 1425 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1425 1426
1426 1427 if (!instance) {
1427 1428 con_log(CL_ANN1, (CE_WARN, "mr_sas:%d could not get adapter "
1428 1429 "in quiesce", instance_no));
1429 1430 return (DDI_FAILURE);
1430 1431 }
1431 1432 if (instance->deadadapter || instance->adapterresetinprogress) {
1432 1433 con_log(CL_ANN1, (CE_WARN, "mr_sas:%d adapter is not in "
1433 1434 "healthy state", instance_no));
1434 1435 return (DDI_FAILURE);
1435 1436 }
1436 1437
1437 1438 if (abort_aen_cmd(instance, instance->aen_cmd)) {
1438 1439 con_log(CL_ANN1, (CE_WARN, "mrsas_quiesce: "
1439 1440 "failed to abort prevous AEN command QUIESCE"));
1440 1441 }
1441 1442
1442 1443 if (instance->tbolt) {
1443 1444 if (abort_syncmap_cmd(instance,
1444 1445 instance->map_update_cmd)) {
1445 1446 cmn_err(CE_WARN,
1446 1447 "mrsas_detach: failed to abort "
1447 1448 "previous syncmap command");
1448 1449 return (DDI_FAILURE);
1449 1450 }
1450 1451 }
1451 1452
1452 1453 instance->func_ptr->disable_intr(instance);
1453 1454
1454 1455 con_log(CL_ANN1, (CE_CONT, "flushing cache for instance %d",
1455 1456 instance_no));
1456 1457
1457 1458 flush_cache(instance);
1458 1459
1459 1460 if (wait_for_outstanding(instance)) {
1460 1461 con_log(CL_ANN1, (CE_CONT, "wait_for_outstanding: return FAIL.\n"));
1461 1462 return (DDI_FAILURE);
1462 1463 }
1463 1464 return (DDI_SUCCESS);
1464 1465 }
1465 1466
1466 1467 /*
1467 1468 * ************************************************************************** *
1468 1469 * *
1469 1470 * entry points (SCSI HBA) *
1470 1471 * *
1471 1472 * ************************************************************************** *
1472 1473 */
1473 1474 /*
1474 1475 * tran_tgt_init - initialize a target device instance
1475 1476 * @hba_dip:
1476 1477 * @tgt_dip:
1477 1478 * @tran:
1478 1479 * @sd:
1479 1480 *
1480 1481 * The tran_tgt_init() entry point enables the HBA to allocate and initialize
1481 1482 * any per-target resources. tran_tgt_init() also enables the HBA to qualify
1482 1483 * the device's address as valid and supportable for that particular HBA.
1483 1484 * By returning DDI_FAILURE, the instance of the target driver for that device
1484 1485 * is not probed or attached.
1485 1486 */
1486 1487 /*ARGSUSED*/
1487 1488 static int
1488 1489 mrsas_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1489 1490 scsi_hba_tran_t *tran, struct scsi_device *sd)
1490 1491 {
1491 1492 struct mrsas_instance *instance;
1492 1493 uint16_t tgt = sd->sd_address.a_target;
1493 1494 uint8_t lun = sd->sd_address.a_lun;
1494 1495 dev_info_t *child = NULL;
1495 1496
1496 1497 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init target %d lun %d",
1497 1498 tgt, lun));
1498 1499
1499 1500 instance = ADDR2MR(&sd->sd_address);
1500 1501
1501 1502 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
1502 1503 /*
1503 1504 * If no persistent node exists, we don't allow .conf node
1504 1505 * to be created.
1505 1506 */
1506 1507 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
1507 1508 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init find child ="
1508 1509 " %p t = %d l = %d", (void *)child, tgt, lun));
1509 1510 if (ndi_merge_node(tgt_dip, mrsas_name_node) !=
1510 1511 DDI_SUCCESS)
1511 1512 /* Create this .conf node */
1512 1513 return (DDI_SUCCESS);
1513 1514 }
1514 1515 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init in ndi_per "
1515 1516 "DDI_FAILURE t = %d l = %d", tgt, lun));
1516 1517 return (DDI_FAILURE);
1517 1518
1518 1519 }
1519 1520
1520 1521 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init dev_dip %p tgt_dip %p",
1521 1522 (void *)instance->mr_ld_list[tgt].dip, (void *)tgt_dip));
1522 1523
1523 1524 if (tgt < MRDRV_MAX_LD && lun == 0) {
1524 1525 if (instance->mr_ld_list[tgt].dip == NULL &&
1525 1526 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0) {
1526 1527 mutex_enter(&instance->config_dev_mtx);
1527 1528 instance->mr_ld_list[tgt].dip = tgt_dip;
1528 1529 instance->mr_ld_list[tgt].lun_type = MRSAS_LD_LUN;
1529 1530 instance->mr_ld_list[tgt].flag = MRDRV_TGT_VALID;
1530 1531 mutex_exit(&instance->config_dev_mtx);
1531 1532 }
1532 1533 }
1533 1534
1534 1535 #ifdef PDSUPPORT
1535 1536 else if(instance->tbolt) {
1536 1537 if (instance->mr_tbolt_pd_list[tgt].dip == NULL) {
1537 1538 mutex_enter(&instance->config_dev_mtx);
1538 1539 instance->mr_tbolt_pd_list[tgt].dip = tgt_dip;
1539 1540 instance->mr_tbolt_pd_list[tgt].flag =
1540 1541 MRDRV_TGT_VALID;
1541 1542 mutex_exit(&instance->config_dev_mtx);
1542 1543 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_tgt_init:"
1543 1544 "t%xl%x", tgt, lun));
1544 1545 }
1545 1546 }
1546 1547 #endif
1547 1548
1548 1549 return (DDI_SUCCESS);
1549 1550 }
1550 1551
1551 1552 /*ARGSUSED*/
1552 1553 static void
1553 1554 mrsas_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1554 1555 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1555 1556 {
1556 1557 struct mrsas_instance *instance;
1557 1558 int tgt = sd->sd_address.a_target;
1558 1559 int lun = sd->sd_address.a_lun;
1559 1560
1560 1561 instance = ADDR2MR(&sd->sd_address);
1561 1562
1562 1563 con_log(CL_DLEVEL2, (CE_NOTE, "tgt_free t = %d l = %d", tgt, lun));
1563 1564
1564 1565 if (tgt < MRDRV_MAX_LD && lun == 0) {
1565 1566 if (instance->mr_ld_list[tgt].dip == tgt_dip) {
1566 1567 mutex_enter(&instance->config_dev_mtx);
1567 1568 instance->mr_ld_list[tgt].dip = NULL;
1568 1569 mutex_exit(&instance->config_dev_mtx);
1569 1570 }
1570 1571 }
1571 1572
1572 1573 #ifdef PDSUPPORT
1573 1574 else if(instance->tbolt) {
1574 1575 mutex_enter(&instance->config_dev_mtx);
1575 1576 instance->mr_tbolt_pd_list[tgt].dip = NULL;
1576 1577 mutex_exit(&instance->config_dev_mtx);
1577 1578 con_log(CL_ANN1, (CE_NOTE, "tgt_free: Setting dip = NULL"
1578 1579 "for tgt:%x", tgt));
1579 1580 }
1580 1581 #endif
1581 1582
1582 1583 }
1583 1584
1584 1585 dev_info_t *
1585 1586 mrsas_find_child(struct mrsas_instance *instance, uint16_t tgt, uint8_t lun)
1586 1587 {
1587 1588 dev_info_t *child = NULL;
1588 1589 char addr[SCSI_MAXNAMELEN];
1589 1590 char tmp[MAXNAMELEN];
1590 1591
1591 1592 (void) sprintf(addr, "%x,%x", tgt, lun);
1592 1593 for (child = ddi_get_child(instance->dip); child;
1593 1594 child = ddi_get_next_sibling(child)) {
1594 1595
1595 1596 if (ndi_dev_is_persistent_node(child) == 0) {
1596 1597 continue;
1597 1598 }
1598 1599
1599 1600 if (mrsas_name_node(child, tmp, MAXNAMELEN) !=
1600 1601 DDI_SUCCESS) {
1601 1602 continue;
1602 1603 }
1603 1604
1604 1605 if (strcmp(addr, tmp) == 0) {
1605 1606 break;
1606 1607 }
1607 1608 }
1608 1609 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_find_child: return child = %p",
1609 1610 (void *)child));
1610 1611 return (child);
1611 1612 }
1612 1613
1613 1614 /*
1614 1615 * mrsas_name_node -
1615 1616 * @dip:
1616 1617 * @name:
1617 1618 * @len:
1618 1619 */
1619 1620 static int
1620 1621 mrsas_name_node(dev_info_t *dip, char *name, int len)
1621 1622 {
1622 1623 int tgt, lun;
1623 1624
1624 1625 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1625 1626 DDI_PROP_DONTPASS, "target", -1);
1626 1627 con_log(CL_DLEVEL2, (CE_NOTE,
1627 1628 "mrsas_name_node: dip %p tgt %d", (void *)dip, tgt));
1628 1629 if (tgt == -1) {
1629 1630 return (DDI_FAILURE);
1630 1631 }
1631 1632 lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1632 1633 "lun", -1);
1633 1634 con_log(CL_DLEVEL2,
1634 1635 (CE_NOTE, "mrsas_name_node: tgt %d lun %d", tgt, lun));
1635 1636 if (lun == -1) {
1636 1637 return (DDI_FAILURE);
1637 1638 }
1638 1639 (void) snprintf(name, len, "%x,%x", tgt, lun);
1639 1640 return (DDI_SUCCESS);
1640 1641 }
1641 1642
1642 1643 /*
1643 1644 * tran_init_pkt - allocate & initialize a scsi_pkt structure
1644 1645 * @ap:
1645 1646 * @pkt:
1646 1647 * @bp:
1647 1648 * @cmdlen:
1648 1649 * @statuslen:
1649 1650 * @tgtlen:
1650 1651 * @flags:
1651 1652 * @callback:
1652 1653 *
1653 1654 * The tran_init_pkt() entry point allocates and initializes a scsi_pkt
1654 1655 * structure and DMA resources for a target driver request. The
1655 1656 * tran_init_pkt() entry point is called when the target driver calls the
1656 1657 * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point
1657 1658 * is a request to perform one or more of three possible services:
1658 1659 * - allocation and initialization of a scsi_pkt structure
1659 1660 * - allocation of DMA resources for data transfer
1660 1661 * - reallocation of DMA resources for the next portion of the data transfer
1661 1662 */
1662 1663 static struct scsi_pkt *
1663 1664 mrsas_tran_init_pkt(struct scsi_address *ap, register struct scsi_pkt *pkt,
1664 1665 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1665 1666 int flags, int (*callback)(), caddr_t arg)
1666 1667 {
1667 1668 struct scsa_cmd *acmd;
1668 1669 struct mrsas_instance *instance;
1669 1670 struct scsi_pkt *new_pkt;
1670 1671
1671 1672 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1672 1673
1673 1674 instance = ADDR2MR(ap);
1674 1675
1675 1676 /* step #1 : pkt allocation */
1676 1677 if (pkt == NULL) {
1677 1678 pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen,
1678 1679 tgtlen, sizeof (struct scsa_cmd), callback, arg);
1679 1680 if (pkt == NULL) {
1680 1681 return (NULL);
1681 1682 }
1682 1683
1683 1684 acmd = PKT2CMD(pkt);
1684 1685
1685 1686 /*
1686 1687 * Initialize the new pkt - we redundantly initialize
1687 1688 * all the fields for illustrative purposes.
1688 1689 */
1689 1690 acmd->cmd_pkt = pkt;
1690 1691 acmd->cmd_flags = 0;
1691 1692 acmd->cmd_scblen = statuslen;
1692 1693 acmd->cmd_cdblen = cmdlen;
1693 1694 acmd->cmd_dmahandle = NULL;
1694 1695 acmd->cmd_ncookies = 0;
1695 1696 acmd->cmd_cookie = 0;
1696 1697 acmd->cmd_cookiecnt = 0;
1697 1698 acmd->cmd_nwin = 0;
1698 1699
1699 1700 pkt->pkt_address = *ap;
1700 1701 pkt->pkt_comp = (void (*)())NULL;
1701 1702 pkt->pkt_flags = 0;
1702 1703 pkt->pkt_time = 0;
1703 1704 pkt->pkt_resid = 0;
1704 1705 pkt->pkt_state = 0;
1705 1706 pkt->pkt_statistics = 0;
1706 1707 pkt->pkt_reason = 0;
1707 1708 new_pkt = pkt;
1708 1709 } else {
1709 1710 acmd = PKT2CMD(pkt);
1710 1711 new_pkt = NULL;
1711 1712 }
1712 1713
1713 1714 /* step #2 : dma allocation/move */
1714 1715 if (bp && bp->b_bcount != 0) {
1715 1716 if (acmd->cmd_dmahandle == NULL) {
1716 1717 if (mrsas_dma_alloc(instance, pkt, bp, flags,
1717 1718 callback) == DDI_FAILURE) {
1718 1719 if (new_pkt) {
1719 1720 scsi_hba_pkt_free(ap, new_pkt);
1720 1721 }
1721 1722 return ((struct scsi_pkt *)NULL);
1722 1723 }
1723 1724 } else {
1724 1725 if (mrsas_dma_move(instance, pkt, bp) == DDI_FAILURE) {
1725 1726 return ((struct scsi_pkt *)NULL);
1726 1727 }
1727 1728 }
1728 1729 }
1729 1730
1730 1731 return (pkt);
1731 1732 }
1732 1733
1733 1734 /*
1734 1735 * tran_start - transport a SCSI command to the addressed target
1735 1736 * @ap:
1736 1737 * @pkt:
1737 1738 *
1738 1739 * The tran_start() entry point for a SCSI HBA driver is called to transport a
1739 1740 * SCSI command to the addressed target. The SCSI command is described
1740 1741 * entirely within the scsi_pkt structure, which the target driver allocated
1741 1742 * through the HBA driver's tran_init_pkt() entry point. If the command
1742 1743 * involves a data transfer, DMA resources must also have been allocated for
1743 1744 * the scsi_pkt structure.
1744 1745 *
1745 1746 * Return Values :
1746 1747 * TRAN_BUSY - request queue is full, no more free scbs
1747 1748 * TRAN_ACCEPT - pkt has been submitted to the instance
1748 1749 */
1749 1750 static int
1750 1751 mrsas_tran_start(struct scsi_address *ap, register struct scsi_pkt *pkt)
1751 1752 {
1752 1753 uchar_t cmd_done = 0;
1753 1754
1754 1755 struct mrsas_instance *instance = ADDR2MR(ap);
1755 1756 struct mrsas_cmd *cmd;
1756 1757
1757 1758 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1758 1759 if (instance->deadadapter == 1) {
1759 1760 con_log(CL_ANN1, (CE_WARN,
1760 1761 "mrsas_tran_start: return TRAN_FATAL_ERROR "
1761 1762 "for IO, as the HBA doesnt take any more IOs"));
1762 1763 if (pkt) {
1763 1764 pkt->pkt_reason = CMD_DEV_GONE;
1764 1765 pkt->pkt_statistics = STAT_DISCON;
1765 1766 }
1766 1767 return (TRAN_FATAL_ERROR);
1767 1768 }
1768 1769
1769 1770 if (instance->adapterresetinprogress) {
1770 1771 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_start: Reset flag set, "
1771 1772 "returning mfi_pkt and setting TRAN_BUSY\n"));
1772 1773 return (TRAN_BUSY);
1773 1774 }
1774 1775
1775 1776 con_log(CL_ANN1, (CE_CONT, "chkpnt:%s:%d:SCSI CDB[0]=0x%x time:%x",
1776 1777 __func__, __LINE__, pkt->pkt_cdbp[0], pkt->pkt_time));
1777 1778
1778 1779 pkt->pkt_reason = CMD_CMPLT;
1779 1780 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */
1780 1781
1781 1782 cmd = build_cmd(instance, ap, pkt, &cmd_done);
1782 1783
1783 1784 /*
1784 1785 * Check if the command is already completed by the mrsas_build_cmd()
1785 1786 * routine. In which case the busy_flag would be clear and scb will be
1786 1787 * NULL and appropriate reason provided in pkt_reason field
1787 1788 */
1788 1789 if (cmd_done) {
1789 1790 pkt->pkt_reason = CMD_CMPLT;
1790 1791 pkt->pkt_scbp[0] = STATUS_GOOD;
1791 1792 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1792 1793 | STATE_SENT_CMD;
1793 1794 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1794 1795 (*pkt->pkt_comp)(pkt);
1795 1796 }
1796 1797
1797 1798 return (TRAN_ACCEPT);
1798 1799 }
1799 1800
1800 1801 if (cmd == NULL) {
1801 1802 return (TRAN_BUSY);
1802 1803 }
1803 1804
1804 1805 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1805 1806 if (instance->fw_outstanding > instance->max_fw_cmds) {
1806 1807 cmn_err(CE_WARN, "mr_sas:Firmware BUSY, fw_outstanding(0x%X) > max_fw_cmds(0x%X)",
1807 1808 instance->fw_outstanding, instance->max_fw_cmds );
1808 1809 return_mfi_pkt(instance, cmd);
1809 1810 return (TRAN_BUSY);
1810 1811 }
1811 1812
1812 1813 /* Synchronize the Cmd frame for the controller */
1813 1814 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1814 1815 DDI_DMA_SYNC_FORDEV);
1815 1816 con_log(CL_ANN, (CE_CONT, "issue_cmd_ppc: SCSI CDB[0]=0x%x"
1816 1817 "cmd->index:%x\n", pkt->pkt_cdbp[0], cmd->index));
1817 1818 instance->func_ptr->issue_cmd(cmd, instance);
1818 1819
1819 1820 } else {
1820 1821 struct mrsas_header *hdr = &cmd->frame->hdr;
1821 1822
1822 1823
1823 1824 instance->func_ptr-> issue_cmd_in_poll_mode(instance, cmd);
1824 1825
1825 1826 pkt->pkt_reason = CMD_CMPLT;
1826 1827 pkt->pkt_statistics = 0;
1827 1828 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS;
1828 1829
1829 1830 switch (ddi_get8(cmd->frame_dma_obj.acc_handle,
1830 1831 &hdr->cmd_status)) {
1831 1832 case MFI_STAT_OK:
1832 1833 pkt->pkt_scbp[0] = STATUS_GOOD;
1833 1834 break;
1834 1835
1835 1836 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1836 1837 con_log(CL_ANN, (CE_CONT,
1837 1838 "mrsas_tran_start: scsi done with error"));
1838 1839 pkt->pkt_reason = CMD_CMPLT;
1839 1840 pkt->pkt_statistics = 0;
1840 1841
1841 1842 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
1842 1843 break;
1843 1844
1844 1845 case MFI_STAT_DEVICE_NOT_FOUND:
1845 1846 con_log(CL_ANN, (CE_CONT,
1846 1847 "mrsas_tran_start: device not found error"));
1847 1848 pkt->pkt_reason = CMD_DEV_GONE;
1848 1849 pkt->pkt_statistics = STAT_DISCON;
1849 1850 break;
1850 1851
1851 1852 default:
1852 1853 ((struct scsi_status *)pkt->pkt_scbp)->sts_busy = 1;
1853 1854 }
1854 1855
1855 1856 return_mfi_pkt(instance, cmd);
1856 1857
1857 1858 if (pkt->pkt_comp) {
1858 1859 (*pkt->pkt_comp)(pkt);
1859 1860 }
1860 1861
1861 1862 }
1862 1863
1863 1864 return (TRAN_ACCEPT);
1864 1865 }
1865 1866
1866 1867 /*
1867 1868 * tran_abort - Abort any commands that are currently in transport
1868 1869 * @ap:
1869 1870 * @pkt:
1870 1871 *
1871 1872 * The tran_abort() entry point for a SCSI HBA driver is called to abort any
1872 1873 * commands that are currently in transport for a particular target. This entry
1873 1874 * point is called when a target driver calls scsi_abort(). The tran_abort()
1874 1875 * entry point should attempt to abort the command denoted by the pkt
1875 1876 * parameter. If the pkt parameter is NULL, tran_abort() should attempt to
1876 1877 * abort all outstanding commands in the transport layer for the particular
1877 1878 * target or logical unit.
1878 1879 */
1879 1880 /*ARGSUSED*/
1880 1881 static int
1881 1882 mrsas_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
1882 1883 {
1883 1884 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1884 1885
1885 1886 /* abort command not supported by H/W */
1886 1887
1887 1888 return (DDI_FAILURE);
1888 1889 }
1889 1890
1890 1891 /*
1891 1892 * tran_reset - reset either the SCSI bus or target
1892 1893 * @ap:
1893 1894 * @level:
1894 1895 *
1895 1896 * The tran_reset() entry point for a SCSI HBA driver is called to reset either
1896 1897 * the SCSI bus or a particular SCSI target device. This entry point is called
1897 1898 * when a target driver calls scsi_reset(). The tran_reset() entry point must
1898 1899 * reset the SCSI bus if level is RESET_ALL. If level is RESET_TARGET, just the
1899 1900 * particular target or logical unit must be reset.
1900 1901 */
1901 1902 /*ARGSUSED*/
1902 1903 static int
1903 1904 mrsas_tran_reset(struct scsi_address *ap, int level)
1904 1905 {
1905 1906 struct mrsas_instance *instance = ADDR2MR(ap);
1906 1907
1907 1908 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1908 1909
1909 1910 if (wait_for_outstanding(instance)) {
1910 1911 con_log(CL_ANN1, (CE_CONT, "wait_for_outstanding: return FAIL.\n"));
1911 1912 return (DDI_FAILURE);
1912 1913 } else {
1913 1914 return (DDI_SUCCESS);
1914 1915 }
1915 1916 }
1916 1917
1917 1918 /*
1918 1919 * tran_bus_reset - reset the SCSI bus
1919 1920 * @dip:
1920 1921 * @level:
1921 1922 *
1922 1923 * The tran_bus_reset() vector in the scsi_hba_tran structure should be
1923 1924 * initialized during the HBA driver's attach(). The vector should point to
1924 1925 * an HBA entry point that is to be called when a user initiates a bus reset.
1925 1926 * Implementation is hardware specific. If the HBA driver cannot reset the
1926 1927 * SCSI bus without affecting the targets, the driver should fail RESET_BUS
1927 1928 * or not initialize this vector.
1928 1929 */
1929 1930 /*ARGSUSED*/
1930 1931 static int
1931 1932 mrsas_tran_bus_reset(dev_info_t *dip, int level)
1932 1933 {
1933 1934 int instance_no = ddi_get_instance(dip);
1934 1935
1935 1936 struct mrsas_instance *instance = ddi_get_soft_state(mrsas_state,
1936 1937 instance_no);
1937 1938
1938 1939 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1939 1940
1940 1941 if (wait_for_outstanding(instance)) {
1941 1942 con_log(CL_ANN1, (CE_CONT, "wait_for_outstanding: return FAIL.\n"));
1942 1943 return (DDI_FAILURE);
1943 1944 } else {
1944 1945 return (DDI_SUCCESS);
1945 1946 }
1946 1947 }
1947 1948
1948 1949 /*
1949 1950 * tran_getcap - get one of a set of SCSA-defined capabilities
1950 1951 * @ap:
1951 1952 * @cap:
1952 1953 * @whom:
1953 1954 *
1954 1955 * The target driver can request the current setting of the capability for a
1955 1956 * particular target by setting the whom parameter to nonzero. A whom value of
1956 1957 * zero indicates a request for the current setting of the general capability
1957 1958 * for the SCSI bus or for adapter hardware. The tran_getcap() should return -1
1958 1959 * for undefined capabilities or the current value of the requested capability.
1959 1960 */
1960 1961 /*ARGSUSED*/
1961 1962 static int
1962 1963 mrsas_tran_getcap(struct scsi_address *ap, char *cap, int whom)
1963 1964 {
1964 1965 int rval = 0;
1965 1966
1966 1967 struct mrsas_instance *instance = ADDR2MR(ap);
1967 1968
1968 1969 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1969 1970
1970 1971 /* we do allow inquiring about capabilities for other targets */
1971 1972 if (cap == NULL) {
1972 1973 return (-1);
1973 1974 }
1974 1975
1975 1976 switch (scsi_hba_lookup_capstr(cap)) {
1976 1977 case SCSI_CAP_DMA_MAX:
1977 1978 if (instance->tbolt) {
1978 1979 /* Limit to 256k max transfer */
1979 1980 rval = mrsas_tbolt_max_cap_maxxfer;
1980 1981 } else {
1981 1982 /* Limit to 16MB max transfer */
1982 1983 rval = mrsas_max_cap_maxxfer;
1983 1984 }
1984 1985 break;
1985 1986 case SCSI_CAP_MSG_OUT:
1986 1987 rval = 1;
1987 1988 break;
1988 1989 case SCSI_CAP_DISCONNECT:
1989 1990 rval = 0;
1990 1991 break;
1991 1992 case SCSI_CAP_SYNCHRONOUS:
1992 1993 rval = 0;
1993 1994 break;
1994 1995 case SCSI_CAP_WIDE_XFER:
1995 1996 rval = 1;
1996 1997 break;
1997 1998 case SCSI_CAP_TAGGED_QING:
1998 1999 rval = 1;
1999 2000 break;
2000 2001 case SCSI_CAP_UNTAGGED_QING:
2001 2002 rval = 1;
2002 2003 break;
2003 2004 case SCSI_CAP_PARITY:
2004 2005 rval = 1;
2005 2006 break;
2006 2007 case SCSI_CAP_INITIATOR_ID:
2007 2008 rval = instance->init_id;
2008 2009 break;
2009 2010 case SCSI_CAP_ARQ:
2010 2011 rval = 1;
2011 2012 break;
2012 2013 case SCSI_CAP_LINKED_CMDS:
2013 2014 rval = 0;
2014 2015 break;
2015 2016 case SCSI_CAP_RESET_NOTIFICATION:
2016 2017 rval = 1;
2017 2018 break;
2018 2019 case SCSI_CAP_GEOMETRY:
2019 2020 rval = -1;
2020 2021
2021 2022 break;
2022 2023 default:
2023 2024 con_log(CL_DLEVEL2, (CE_NOTE, "Default cap coming 0x%x",
2024 2025 scsi_hba_lookup_capstr(cap)));
2025 2026 rval = -1;
2026 2027 break;
2027 2028 }
2028 2029
2029 2030 return (rval);
2030 2031 }
2031 2032
2032 2033 /*
2033 2034 * tran_setcap - set one of a set of SCSA-defined capabilities
2034 2035 * @ap:
2035 2036 * @cap:
2036 2037 * @value:
2037 2038 * @whom:
2038 2039 *
2039 2040 * The target driver might request that the new value be set for a particular
2040 2041 * target by setting the whom parameter to nonzero. A whom value of zero
2041 2042 * means that request is to set the new value for the SCSI bus or for adapter
2042 2043 * hardware in general.
2043 2044 * The tran_setcap() should return the following values as appropriate:
2044 2045 * - -1 for undefined capabilities
2045 2046 * - 0 if the HBA driver cannot set the capability to the requested value
2046 2047 * - 1 if the HBA driver is able to set the capability to the requested value
2047 2048 */
2048 2049 /*ARGSUSED*/
2049 2050 static int
2050 2051 mrsas_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
2051 2052 {
2052 2053 int rval = 1;
2053 2054
2054 2055 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2055 2056
2056 2057 /* We don't allow setting capabilities for other targets */
2057 2058 if (cap == NULL || whom == 0) {
2058 2059 return (-1);
2059 2060 }
2060 2061
2061 2062 switch (scsi_hba_lookup_capstr(cap)) {
2062 2063 case SCSI_CAP_DMA_MAX:
2063 2064 case SCSI_CAP_MSG_OUT:
2064 2065 case SCSI_CAP_PARITY:
2065 2066 case SCSI_CAP_LINKED_CMDS:
2066 2067 case SCSI_CAP_RESET_NOTIFICATION:
2067 2068 case SCSI_CAP_DISCONNECT:
2068 2069 case SCSI_CAP_SYNCHRONOUS:
2069 2070 case SCSI_CAP_UNTAGGED_QING:
2070 2071 case SCSI_CAP_WIDE_XFER:
2071 2072 case SCSI_CAP_INITIATOR_ID:
2072 2073 case SCSI_CAP_ARQ:
2073 2074 /*
2074 2075 * None of these are settable via
2075 2076 * the capability interface.
2076 2077 */
2077 2078 break;
2078 2079 case SCSI_CAP_TAGGED_QING:
2079 2080 rval = 1;
2080 2081 break;
2081 2082 case SCSI_CAP_SECTOR_SIZE:
2082 2083 rval = 1;
2083 2084 break;
2084 2085
2085 2086 case SCSI_CAP_TOTAL_SECTORS:
2086 2087 rval = 1;
2087 2088 break;
2088 2089 default:
2089 2090 rval = -1;
2090 2091 break;
2091 2092 }
2092 2093
2093 2094 return (rval);
2094 2095 }
2095 2096
2096 2097 /*
2097 2098 * tran_destroy_pkt - deallocate scsi_pkt structure
2098 2099 * @ap:
2099 2100 * @pkt:
2100 2101 *
2101 2102 * The tran_destroy_pkt() entry point is the HBA driver function that
2102 2103 * deallocates scsi_pkt structures. The tran_destroy_pkt() entry point is
2103 2104 * called when the target driver calls scsi_destroy_pkt(). The
2104 2105 * tran_destroy_pkt() entry point must free any DMA resources that have been
2105 2106 * allocated for the packet. An implicit DMA synchronization occurs if the
2106 2107 * DMA resources are freed and any cached data remains after the completion
2107 2108 * of the transfer.
2108 2109 */
2109 2110 static void
2110 2111 mrsas_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2111 2112 {
2112 2113 struct scsa_cmd *acmd = PKT2CMD(pkt);
2113 2114
2114 2115 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2115 2116
2116 2117 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2117 2118 acmd->cmd_flags &= ~CFLAG_DMAVALID;
2118 2119
2119 2120 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
2120 2121
2121 2122 ddi_dma_free_handle(&acmd->cmd_dmahandle);
2122 2123
2123 2124 acmd->cmd_dmahandle = NULL;
2124 2125 }
2125 2126
2126 2127 /* free the pkt */
2127 2128 scsi_hba_pkt_free(ap, pkt);
2128 2129 }
2129 2130
2130 2131 /*
2131 2132 * tran_dmafree - deallocates DMA resources
2132 2133 * @ap:
2133 2134 * @pkt:
2134 2135 *
2135 2136 * The tran_dmafree() entry point deallocates DMAQ resources that have been
2136 2137 * allocated for a scsi_pkt structure. The tran_dmafree() entry point is
2137 2138 * called when the target driver calls scsi_dmafree(). The tran_dmafree() must
2138 2139 * free only DMA resources allocated for a scsi_pkt structure, not the
2139 2140 * scsi_pkt itself. When DMA resources are freed, a DMA synchronization is
2140 2141 * implicitly performed.
2141 2142 */
2142 2143 /*ARGSUSED*/
2143 2144 static void
2144 2145 mrsas_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
2145 2146 {
2146 2147 register struct scsa_cmd *acmd = PKT2CMD(pkt);
2147 2148
2148 2149 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2149 2150
2150 2151 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2151 2152 acmd->cmd_flags &= ~CFLAG_DMAVALID;
2152 2153
2153 2154 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
2154 2155
2155 2156 ddi_dma_free_handle(&acmd->cmd_dmahandle);
2156 2157
2157 2158 acmd->cmd_dmahandle = NULL;
2158 2159 }
2159 2160 }
2160 2161
2161 2162 /*
2162 2163 * tran_sync_pkt - synchronize the DMA object allocated
2163 2164 * @ap:
2164 2165 * @pkt:
2165 2166 *
2166 2167 * The tran_sync_pkt() entry point synchronizes the DMA object allocated for
2167 2168 * the scsi_pkt structure before or after a DMA transfer. The tran_sync_pkt()
2168 2169 * entry point is called when the target driver calls scsi_sync_pkt(). If the
2169 2170 * data transfer direction is a DMA read from device to memory, tran_sync_pkt()
2170 2171 * must synchronize the CPU's view of the data. If the data transfer direction
2171 2172 * is a DMA write from memory to device, tran_sync_pkt() must synchronize the
2172 2173 * device's view of the data.
2173 2174 */
2174 2175 /*ARGSUSED*/
2175 2176 static void
2176 2177 mrsas_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2177 2178 {
2178 2179 register struct scsa_cmd *acmd = PKT2CMD(pkt);
2179 2180
2180 2181 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2181 2182
2182 2183 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2183 2184 (void) ddi_dma_sync(acmd->cmd_dmahandle, acmd->cmd_dma_offset,
2184 2185 acmd->cmd_dma_len, (acmd->cmd_flags & CFLAG_DMASEND) ?
2185 2186 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
2186 2187 }
2187 2188 }
2188 2189
2189 2190 /*ARGSUSED*/
2190 2191 static int
2191 2192 mrsas_tran_quiesce(dev_info_t *dip)
2192 2193 {
2193 2194 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2194 2195
2195 2196 return (1);
2196 2197 }
2197 2198
2198 2199 /*ARGSUSED*/
2199 2200 static int
2200 2201 mrsas_tran_unquiesce(dev_info_t *dip)
2201 2202 {
2202 2203 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2203 2204
2204 2205 return (1);
2205 2206 }
2206 2207
2207 2208
2208 2209 /*
2209 2210 * mrsas_isr(caddr_t)
2210 2211 *
2211 2212 * The Interrupt Service Routine
2212 2213 *
2213 2214 * Collect status for all completed commands and do callback
2214 2215 *
2215 2216 */
2216 2217 static uint_t
2217 2218 mrsas_isr(struct mrsas_instance *instance)
2218 2219 {
2219 2220 int need_softintr;
2220 2221 uint32_t producer;
2221 2222 uint32_t consumer;
2222 2223 uint32_t context;
2223 2224 uint32_t status, value;
2224 2225 int retval;
2225 2226
2226 2227 struct mrsas_cmd *cmd;
2227 2228 struct mrsas_header *hdr;
2228 2229 struct scsi_pkt *pkt;
2229 2230
2230 2231 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2231 2232 ASSERT(instance);
2232 2233 if (instance->tbolt) {
2233 2234 mutex_enter(&instance->chip_mtx);
2234 2235 if ((instance->intr_type == DDI_INTR_TYPE_FIXED) &&
2235 2236 !(instance->func_ptr->intr_ack(instance))) {
2236 2237 mutex_exit(&instance->chip_mtx);
2237 2238 return (DDI_INTR_UNCLAIMED);
2238 2239 }
2239 2240 retval = mr_sas_tbolt_process_outstanding_cmd(instance);
2240 2241 mutex_exit(&instance->chip_mtx);
2241 2242 return (retval);
2242 2243 } else {
2243 2244 if ((instance->intr_type == DDI_INTR_TYPE_FIXED) &&
2244 2245 !instance->func_ptr->intr_ack(instance)) {
2245 2246 return (DDI_INTR_UNCLAIMED);
2246 2247 }
2247 2248 }
2248 2249
2249 2250 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
2250 2251 0, 0, DDI_DMA_SYNC_FORCPU);
2251 2252
2252 2253 #ifdef OCRDEBUG
2253 2254 if (debug_consecutive_timeout_after_ocr_g == 1) {
2254 2255 con_log(CL_ANN1, (CE_NOTE,
2255 2256 "simulating consecutive timeout after ocr"));
2256 2257 return (DDI_INTR_CLAIMED);
2257 2258 }
2258 2259 #endif
2259 2260
2260 2261 mutex_enter(&instance->completed_pool_mtx);
2261 2262 mutex_enter(&instance->cmd_pend_mtx);
2262 2263
2263 2264 producer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2264 2265 instance->producer);
2265 2266 consumer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2266 2267 instance->consumer);
2267 2268
2268 2269 con_log(CL_ANN, (CE_CONT, " producer %x consumer %x ",
2269 2270 producer, consumer));
2270 2271 if (producer == consumer) {
2271 2272 con_log(CL_ANN, (CE_WARN, "producer == consumer case"));
2272 2273 mutex_exit(&instance->cmd_pend_mtx);
2273 2274 mutex_exit(&instance->completed_pool_mtx);
2274 2275 return (DDI_INTR_CLAIMED);
2275 2276 }
2276 2277
2277 2278 while (consumer != producer) {
2278 2279 context = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2279 2280 &instance->reply_queue[consumer]);
2280 2281 cmd = instance->cmd_list[context];
2281 2282
2282 2283 if (cmd->sync_cmd == MRSAS_TRUE) {
2283 2284 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2284 2285 if (hdr) {
2285 2286 mlist_del_init(&cmd->list);
2286 2287 }
2287 2288 } else {
2288 2289 pkt = cmd->pkt;
2289 2290 if (pkt) {
2290 2291 mlist_del_init(&cmd->list);
2291 2292 }
2292 2293 }
2293 2294
2294 2295 mlist_add_tail(&cmd->list, &instance->completed_pool_list);
2295 2296
2296 2297 consumer++;
2297 2298 if (consumer == (instance->max_fw_cmds + 1)) {
2298 2299 consumer = 0;
2299 2300 }
2300 2301 }
2301 2302 ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
2302 2303 instance->consumer, consumer);
2303 2304 mutex_exit(&instance->cmd_pend_mtx);
2304 2305 mutex_exit(&instance->completed_pool_mtx);
2305 2306
2306 2307 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
2307 2308 0, 0, DDI_DMA_SYNC_FORDEV);
2308 2309
2309 2310 if (instance->softint_running) {
2310 2311 need_softintr = 0;
2311 2312 } else {
2312 2313 need_softintr = 1;
2313 2314 }
2314 2315
2315 2316 if (instance->isr_level == HIGH_LEVEL_INTR) {
2316 2317 if (need_softintr) {
2317 2318 ddi_trigger_softintr(instance->soft_intr_id);
2318 2319 }
2319 2320 } else {
2320 2321 /*
2321 2322 * Not a high-level interrupt, therefore call the soft level
2322 2323 * interrupt explicitly
2323 2324 */
2324 2325 (void) mrsas_softintr(instance);
2325 2326 }
2326 2327
2327 2328 return (DDI_INTR_CLAIMED);
2328 2329 }
2329 2330
2330 2331
2331 2332 /*
2332 2333 * ************************************************************************** *
2333 2334 * *
2334 2335 * libraries *
2335 2336 * *
2336 2337 * ************************************************************************** *
2337 2338 */
2338 2339 /*
2339 2340 * get_mfi_pkt : Get a command from the free pool
2340 2341 * After successful allocation, the caller of this routine
2341 2342 * must clear the frame buffer (memset to zero) before
2342 2343 * using the packet further.
2343 2344 *
2344 2345 * ***** Note *****
2345 2346 * After clearing the frame buffer the context id of the
2346 2347 * frame buffer SHOULD be restored back.
2347 2348 */
2348 2349 static struct mrsas_cmd *
2349 2350 get_mfi_pkt(struct mrsas_instance *instance)
2350 2351 {
2351 2352 mlist_t *head = &instance->cmd_pool_list;
2352 2353 struct mrsas_cmd *cmd = NULL;
2353 2354
2354 2355 mutex_enter(&instance->cmd_pool_mtx);
2355 2356 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2356 2357
2357 2358 if (!mlist_empty(head)) {
2358 2359 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2359 2360 mlist_del_init(head->next);
2360 2361 }
2361 2362 if (cmd != NULL) {
2362 2363 cmd->pkt = NULL;
2363 2364 cmd->retry_count_for_ocr = 0;
2364 2365 cmd->drv_pkt_time = 0;
2365 2366
2366 2367 }
2367 2368 mutex_exit(&instance->cmd_pool_mtx);
2368 2369
2369 2370 return (cmd);
2370 2371 }
2371 2372
2372 2373 static struct mrsas_cmd *
2373 2374 get_mfi_app_pkt(struct mrsas_instance *instance)
2374 2375 {
2375 2376 mlist_t *head = &instance->app_cmd_pool_list;
2376 2377 struct mrsas_cmd *cmd = NULL;
2377 2378
2378 2379 mutex_enter(&instance->app_cmd_pool_mtx);
2379 2380 ASSERT(mutex_owned(&instance->app_cmd_pool_mtx));
2380 2381
2381 2382 if (!mlist_empty(head)) {
2382 2383 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2383 2384 mlist_del_init(head->next);
2384 2385 }
2385 2386 if (cmd != NULL){
2386 2387 cmd->pkt = NULL;
2387 2388 cmd->retry_count_for_ocr = 0;
2388 2389 cmd->drv_pkt_time = 0;
2389 2390 }
2390 2391
2391 2392 mutex_exit(&instance->app_cmd_pool_mtx);
2392 2393
2393 2394 return (cmd);
2394 2395 }
2395 2396 /*
2396 2397 * return_mfi_pkt : Return a cmd to free command pool
2397 2398 */
2398 2399 static void
2399 2400 return_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2400 2401 {
2401 2402 mutex_enter(&instance->cmd_pool_mtx);
2402 2403 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2403 2404
2404 2405 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2405 2406
2406 2407 mutex_exit(&instance->cmd_pool_mtx);
2407 2408 }
2408 2409
2409 2410 static void
2410 2411 return_mfi_app_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2411 2412 {
2412 2413 mutex_enter(&instance->app_cmd_pool_mtx);
2413 2414 ASSERT(mutex_owned(&instance->app_cmd_pool_mtx));
2414 2415
2415 2416 mlist_add(&cmd->list, &instance->app_cmd_pool_list);
2416 2417
2417 2418 mutex_exit(&instance->app_cmd_pool_mtx);
2418 2419 }
2419 2420 void
2420 2421 push_pending_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2421 2422 {
2422 2423 struct scsi_pkt *pkt;
2423 2424 struct mrsas_header *hdr;
2424 2425 con_log(CL_DLEVEL2, (CE_NOTE, "push_pending_pkt(): Called\n"));
2425 2426 mutex_enter(&instance->cmd_pend_mtx);
2426 2427 ASSERT(mutex_owned(&instance->cmd_pend_mtx));
2427 2428 mlist_del_init(&cmd->list);
2428 2429 mlist_add_tail(&cmd->list, &instance->cmd_pend_list);
2429 2430 if (cmd->sync_cmd == MRSAS_TRUE) {
2430 2431 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2431 2432 if (hdr) {
2432 2433 con_log(CL_ANN1, (CE_CONT,
2433 2434 "push_pending_mfi_pkt: "
2434 2435 "cmd %p index %x "
2435 2436 "time %llx",
2436 2437 (void *)cmd, cmd->index,
2437 2438 gethrtime()));
2438 2439 /* Wait for specified interval */
2439 2440 cmd->drv_pkt_time = ddi_get16(
2440 2441 cmd->frame_dma_obj.acc_handle, &hdr->timeout);
2441 2442 if (cmd->drv_pkt_time < debug_timeout_g)
2442 2443 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2443 2444 con_log(CL_ANN1, (CE_CONT,
2444 2445 "push_pending_pkt(): "
2445 2446 "Called IO Timeout Value %x\n",
2446 2447 cmd->drv_pkt_time));
2447 2448 }
2448 2449 if (hdr && instance->timeout_id == (timeout_id_t)-1) {
2449 2450 instance->timeout_id = timeout(io_timeout_checker,
2450 2451 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
2451 2452 }
2452 2453 } else {
2453 2454 pkt = cmd->pkt;
2454 2455 if (pkt) {
2455 2456 con_log(CL_ANN1, (CE_CONT,
2456 2457 "push_pending_mfi_pkt: "
2457 2458 "cmd %p index %x pkt %p, "
2458 2459 "time %llx",
2459 2460 (void *)cmd, cmd->index, (void *)pkt,
2460 2461 gethrtime()));
2461 2462 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2462 2463 }
2463 2464 if (pkt && instance->timeout_id == (timeout_id_t)-1) {
2464 2465 instance->timeout_id = timeout(io_timeout_checker,
2465 2466 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
2466 2467 }
2467 2468 }
2468 2469
2469 2470 mutex_exit(&instance->cmd_pend_mtx);
2470 2471
2471 2472 }
2472 2473
2473 2474 int
2474 2475 mrsas_print_pending_cmds(struct mrsas_instance *instance)
2475 2476 {
2476 2477 mlist_t *head = &instance->cmd_pend_list;
2477 2478 mlist_t *tmp = head;
2478 2479 struct mrsas_cmd *cmd = NULL;
2479 2480 struct mrsas_header *hdr;
2480 2481 unsigned int flag = 1;
2481 2482 struct scsi_pkt *pkt;
2482 2483 int saved_level;
2483 2484 int cmd_count = 0;
2484 2485
2485 2486
2486 2487 saved_level = debug_level_g;
2487 2488 debug_level_g = CL_ANN1;
2488 2489
2489 2490 cmn_err(CE_NOTE, "mrsas_print_pending_cmds(): Called\n");
2490 2491
2491 2492 while (flag) {
2492 2493 mutex_enter(&instance->cmd_pend_mtx);
2493 2494 tmp = tmp->next;
2494 2495 if (tmp == head) {
2495 2496 mutex_exit(&instance->cmd_pend_mtx);
2496 2497 flag = 0;
2497 2498 con_log(CL_ANN1, (CE_CONT, "mrsas_print_pending_cmds(): NO MORE CMDS PENDING....\n"));
2498 2499 break;
2499 2500 } else {
2500 2501 cmd = mlist_entry(tmp, struct mrsas_cmd, list);
2501 2502 mutex_exit(&instance->cmd_pend_mtx);
2502 2503 if (cmd) {
2503 2504 if (cmd->sync_cmd == MRSAS_TRUE) {
2504 2505 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2505 2506 if (hdr) {
2506 2507 con_log(CL_ANN1, (CE_CONT,
2507 2508 "print: cmd %p index 0x%x drv_pkt_time 0x%x (NO-PKT) hdr %p\n",
2508 2509 (void *)cmd, cmd->index, cmd->drv_pkt_time, (void *)hdr));
2509 2510 }
2510 2511 } else {
2511 2512 pkt = cmd->pkt;
2512 2513 if (pkt) {
2513 2514 con_log(CL_ANN1, (CE_CONT,
2514 2515 "print: cmd %p index 0x%x drv_pkt_time 0x%x pkt %p \n",
2515 2516 (void *)cmd, cmd->index, cmd->drv_pkt_time, (void *)pkt));
2516 2517 }
2517 2518 }
2518 2519
2519 2520 if (++cmd_count == 1)
2520 2521 mrsas_print_cmd_details(instance, cmd, 0xDD);
2521 2522 else
2522 2523 mrsas_print_cmd_details(instance, cmd, 1);
2523 2524
2524 2525 }
2525 2526 }
2526 2527 }
2527 2528 con_log(CL_ANN1, (CE_CONT, "mrsas_print_pending_cmds(): Done\n"));
2528 2529
2529 2530
2530 2531 debug_level_g = saved_level;
2531 2532
2532 2533 return (DDI_SUCCESS);
2533 2534 }
2534 2535
2535 2536
2536 2537 int
2537 2538 mrsas_complete_pending_cmds(struct mrsas_instance *instance)
2538 2539 {
2539 2540
2540 2541 struct mrsas_cmd *cmd = NULL;
2541 2542 struct scsi_pkt *pkt;
2542 2543 struct mrsas_header *hdr;
2543 2544
2544 2545 struct mlist_head *pos, *next;
2545 2546
2546 2547 con_log(CL_ANN1, (CE_NOTE,
2547 2548 "mrsas_complete_pending_cmds(): Called"));
2548 2549
2549 2550 mutex_enter(&instance->cmd_pend_mtx);
2550 2551 mlist_for_each_safe(pos, next, &instance->cmd_pend_list) {
2551 2552 cmd = mlist_entry(pos, struct mrsas_cmd, list);
2552 2553 if (cmd) {
2553 2554 pkt = cmd->pkt;
2554 2555 if (pkt) { /* for IO */
2555 2556 if (((pkt->pkt_flags & FLAG_NOINTR)
2556 2557 == 0) && pkt->pkt_comp) {
2557 2558 pkt->pkt_reason
2558 2559 = CMD_DEV_GONE;
2559 2560 pkt->pkt_statistics
2560 2561 = STAT_DISCON;
2561 2562 con_log(CL_ANN1, (CE_CONT,
2562 2563 "fail and posting to scsa "
2563 2564 "cmd %p index %x"
2564 2565 " pkt %p "
2565 2566 "time : %llx",
2566 2567 (void *)cmd, cmd->index,
2567 2568 (void *)pkt, gethrtime()));
2568 2569 (*pkt->pkt_comp)(pkt);
2569 2570 }
2570 2571 } else { /* for DCMDS */
2571 2572 if (cmd->sync_cmd == MRSAS_TRUE) {
2572 2573 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2573 2574 con_log(CL_ANN1, (CE_CONT,
2574 2575 "posting invalid status to application "
2575 2576 "cmd %p index %x"
2576 2577 " hdr %p "
2577 2578 "time : %llx",
2578 2579 (void *)cmd, cmd->index,
2579 2580 (void *)hdr, gethrtime()));
2580 2581 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2581 2582 complete_cmd_in_sync_mode(instance, cmd);
2582 2583 }
2583 2584 }
2584 2585 mlist_del_init(&cmd->list);
2585 2586 } else {
2586 2587 con_log(CL_ANN1, (CE_CONT,
2587 2588 "mrsas_complete_pending_cmds:"
2588 2589 "NULL command\n"));
2589 2590 }
2590 2591 con_log(CL_ANN1, (CE_CONT,
2591 2592 "mrsas_complete_pending_cmds:"
2592 2593 "looping for more commands\n"));
2593 2594 }
2594 2595 mutex_exit(&instance->cmd_pend_mtx);
2595 2596
2596 2597 con_log(CL_ANN1, (CE_CONT, "mrsas_complete_pending_cmds(): DONE\n"));
2597 2598 return (DDI_SUCCESS);
2598 2599 }
2599 2600
2600 2601 void
2601 2602 mrsas_print_cmd_details(struct mrsas_instance *instance,
2602 2603 struct mrsas_cmd *cmd, int detail )
2603 2604 {
2604 2605 struct scsi_pkt *pkt = cmd->pkt;
2605 2606 Mpi2RaidSCSIIORequest_t *scsi_io = cmd->scsi_io_request;
2606 2607 MPI2_SCSI_IO_VENDOR_UNIQUE *raidContext;
2607 2608 uint8_t *cdb_p;
2608 2609 char str[100], *strp;
2609 2610 int i, j, len;
2610 2611 int saved_level;
2611 2612
2612 2613
2613 2614 if (detail == 0xDD) {
2614 2615 saved_level = debug_level_g;
2615 2616 debug_level_g = CL_ANN1;
2616 2617 }
2617 2618
2618 2619
2619 2620 if (instance->tbolt) {
2620 2621 con_log(CL_ANN1, (CE_CONT, "print_cmd_details: cmd %p cmd->index 0x%x SMID 0x%x timer 0x%x sec\n",
2621 2622 (void *)cmd, cmd->index, cmd->SMID, cmd->drv_pkt_time));
2622 2623 }
2623 2624 else {
2624 2625 con_log(CL_ANN1, (CE_CONT, "print_cmd_details: cmd %p cmd->index 0x%x timer 0x%x sec\n",
2625 2626 (void *)cmd, cmd->index, cmd->drv_pkt_time));
2626 2627 }
2627 2628
2628 2629 if(pkt) {
2629 2630 con_log(CL_ANN1, (CE_CONT, "scsi_pkt CDB[0]=0x%x",
2630 2631 pkt->pkt_cdbp[0]));
2631 2632 }else {
2632 2633 con_log(CL_ANN1, (CE_CONT, "NO-PKT"));
2633 2634 }
2634 2635
2635 2636 if((detail==0xDD) && instance->tbolt) {
2636 2637 con_log(CL_ANN1, (CE_CONT, "RAID_SCSI_IO_REQUEST\n"));
2637 2638 con_log(CL_ANN1, (CE_CONT, "DevHandle=0x%X Function=0x%X IoFlags=0x%X SGLFlags=0x%X DataLength=0x%X\n",
2638 2639 ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->DevHandle),
2639 2640 ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->Function),
|
↓ open down ↓ |
2315 lines elided |
↑ open up ↑ |
2640 2641 ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->IoFlags),
2641 2642 ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->SGLFlags),
2642 2643 ddi_get32(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->DataLength) ));
2643 2644
2644 2645 for(i=0; i < 32; i++)
2645 2646 con_log(CL_ANN1, (CE_CONT, "CDB[%d]=0x%x ",i,
2646 2647 ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->CDB.CDB32[i]) ));
2647 2648
2648 2649 con_log(CL_ANN1, (CE_CONT, "RAID-CONTEXT\n"));
2649 2650 con_log(CL_ANN1, (CE_CONT, "status=0x%X extStatus=0x%X ldTargetId=0x%X timeoutValue=0x%X"
2650 - "regLockFlags=0x%X RAIDFlags=0x%X regLockRowLBA=0x%lX regLockLength=0x%X spanArm=0x%X\n",
2651 + "regLockFlags=0x%X RAIDFlags=0x%X regLockRowLBA=0x%" PRIx64 " regLockLength=0x%X spanArm=0x%X\n",
2651 2652 ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->RaidContext.status),
2652 2653 ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->RaidContext.extStatus),
2653 2654 ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->RaidContext.ldTargetId),
2654 2655 ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->RaidContext.timeoutValue),
2655 2656 ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->RaidContext.regLockFlags),
2656 2657 ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->RaidContext.RAIDFlags),
2657 2658 ddi_get64(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->RaidContext.regLockRowLBA),
2658 2659 ddi_get32(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->RaidContext.regLockLength),
2659 2660 ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->RaidContext.spanArm) ));
2660 2661
2661 2662
2662 2663 }
2663 2664
2664 2665 if (detail == 0xDD) {
2665 2666 debug_level_g = saved_level;
2666 2667 }
2667 2668
2668 2669 return;
2669 2670 }
2670 2671
2671 2672
2672 2673 int
2673 2674 mrsas_issue_pending_cmds(struct mrsas_instance *instance)
2674 2675 {
2675 2676 mlist_t *head = &instance->cmd_pend_list;
2676 2677 mlist_t *tmp = head->next;
2677 2678 struct mrsas_cmd *cmd = NULL;
2678 2679 struct scsi_pkt *pkt;
2679 2680
2680 2681 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_pending_cmds(): Called"));
2681 2682 while (tmp != head) {
2682 2683 mutex_enter(&instance->cmd_pend_mtx);
2683 2684 cmd = mlist_entry(tmp, struct mrsas_cmd, list);
2684 2685 tmp = tmp->next;
2685 2686 mutex_exit(&instance->cmd_pend_mtx);
2686 2687 if (cmd) {
2687 2688 con_log(CL_ANN1, (CE_CONT,
2688 2689 "mrsas_issue_pending_cmds(): "
2689 2690 "Got a cmd: cmd %p index 0x%x drv_pkt_time 0x%x ",
2690 2691 (void *)cmd, cmd->index, cmd->drv_pkt_time));
2691 2692
2692 2693 if (cmd->drv_pkt_time < debug_timeout_g)
2693 2694 cmd->drv_pkt_time = (uint16_t)debug_timeout_g; /* Reset command timeout value */
2694 2695
2695 2696 cmd->retry_count_for_ocr++;
2696 2697
2697 2698 cmn_err(CE_CONT, "cmd retry count = %d\n",
2698 2699 cmd->retry_count_for_ocr);
2699 2700
2700 2701 if (cmd->retry_count_for_ocr > IO_RETRY_COUNT) {
2701 2702 cmn_err(CE_WARN,
2702 2703 "mrsas_issue_pending_cmds(): cmd->retry_count exceeded limit >%d\n",
2703 2704 IO_RETRY_COUNT);
2704 2705 mrsas_print_cmd_details(instance, cmd, 0xDD);
2705 2706
2706 2707 cmn_err(CE_WARN,
2707 2708 "mrsas_issue_pending_cmds():"
2708 2709 "Calling KILL Adapter\n");
2709 2710 if (instance->tbolt)
2710 2711 (void) mrsas_tbolt_kill_adapter(instance);
2711 2712 else
2712 2713 (void) mrsas_kill_adapter(instance);
2713 2714 return (DDI_FAILURE);
2714 2715 }
2715 2716
2716 2717 pkt = cmd->pkt;
2717 2718 if (pkt) {
2718 2719 con_log(CL_ANN1, (CE_CONT,
2719 2720 "PENDING PKT-CMD ISSUE: cmd %p index %x "
2720 2721 "pkt %p time %llx",
2721 2722 (void *)cmd, cmd->index,
2722 2723 (void *)pkt,
2723 2724 gethrtime()));
2724 2725
2725 2726 } else {
2726 2727 cmn_err(CE_CONT,
2727 2728 "mrsas_issue_pending_cmds(): "
2728 2729 "NO-PKT, cmd %p index 0x%x drv_pkt_time 0x%x ",
2729 2730 (void *)cmd, cmd->index, cmd->drv_pkt_time);
2730 2731 }
2731 2732
2732 2733
2733 2734 if (cmd->sync_cmd == MRSAS_TRUE) {
2734 2735 cmn_err(CE_CONT, "mrsas_issue_pending_cmds(): SYNC_CMD == TRUE \n");
2735 2736
2736 2737 instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd);
2737 2738 } else {
2738 2739 instance->func_ptr->issue_cmd(cmd, instance);
2739 2740 }
2740 2741 } else {
2741 2742 con_log(CL_ANN1, (CE_CONT,
2742 2743 "mrsas_issue_pending_cmds: NULL command\n"));
2743 2744 }
2744 2745 con_log(CL_ANN1, (CE_CONT,
2745 2746 "mrsas_issue_pending_cmds:"
2746 2747 "looping for more commands"));
2747 2748 }
2748 2749 con_log(CL_ANN1, (CE_CONT, "mrsas_issue_pending_cmds(): DONE\n"));
2749 2750 return (DDI_SUCCESS);
2750 2751 }
2751 2752
2752 2753
2753 2754
2754 2755 /*
2755 2756 * destroy_mfi_frame_pool
2756 2757 */
2757 2758 void
2758 2759 destroy_mfi_frame_pool(struct mrsas_instance *instance)
2759 2760 {
2760 2761 int i;
2761 2762 uint32_t max_cmd = instance->max_fw_cmds;
2762 2763
2763 2764 struct mrsas_cmd *cmd;
2764 2765
2765 2766 /* return all frames to pool */
2766 2767
2767 2768 for (i = 0; i < max_cmd; i++) {
2768 2769
2769 2770 cmd = instance->cmd_list[i];
2770 2771
2771 2772 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED)
2772 2773 (void) mrsas_free_dma_obj(instance, cmd->frame_dma_obj);
2773 2774
2774 2775 cmd->frame_dma_obj_status = DMA_OBJ_FREED;
2775 2776 }
2776 2777
2777 2778 }
2778 2779
2779 2780 /*
2780 2781 * create_mfi_frame_pool
2781 2782 */
2782 2783 int
2783 2784 create_mfi_frame_pool(struct mrsas_instance *instance)
2784 2785 {
2785 2786 int i = 0;
2786 2787 int cookie_cnt;
2787 2788 uint16_t max_cmd;
2788 2789 uint16_t sge_sz;
2789 2790 uint32_t sgl_sz;
2790 2791 uint32_t tot_frame_size;
2791 2792 struct mrsas_cmd *cmd;
2792 2793 int retval = DDI_SUCCESS;
2793 2794
2794 2795 max_cmd = instance->max_fw_cmds;
2795 2796 sge_sz = sizeof (struct mrsas_sge_ieee);
2796 2797 /* calculated the number of 64byte frames required for SGL */
2797 2798 sgl_sz = sge_sz * instance->max_num_sge;
2798 2799 tot_frame_size = sgl_sz + MRMFI_FRAME_SIZE + SENSE_LENGTH;
2799 2800
2800 2801 con_log(CL_DLEVEL3, (CE_NOTE, "create_mfi_frame_pool: "
2801 2802 "sgl_sz %x tot_frame_size %x", sgl_sz, tot_frame_size));
2802 2803
2803 2804 while (i < max_cmd) {
2804 2805 cmd = instance->cmd_list[i];
2805 2806
2806 2807 cmd->frame_dma_obj.size = tot_frame_size;
2807 2808 cmd->frame_dma_obj.dma_attr = mrsas_generic_dma_attr;
2808 2809 cmd->frame_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
2809 2810 cmd->frame_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
2810 2811 cmd->frame_dma_obj.dma_attr.dma_attr_sgllen = 1;
2811 2812 cmd->frame_dma_obj.dma_attr.dma_attr_align = 64;
2812 2813
2813 2814 cookie_cnt = mrsas_alloc_dma_obj(instance, &cmd->frame_dma_obj,
2814 2815 (uchar_t)DDI_STRUCTURE_LE_ACC);
2815 2816
2816 2817 if (cookie_cnt == -1 || cookie_cnt > 1) {
2817 2818 cmn_err(CE_WARN,
2818 2819 "create_mfi_frame_pool: could not alloc.");
2819 2820 retval = DDI_FAILURE;
2820 2821 goto mrsas_undo_frame_pool;
2821 2822 }
2822 2823
2823 2824 bzero(cmd->frame_dma_obj.buffer, tot_frame_size);
2824 2825
2825 2826 cmd->frame_dma_obj_status = DMA_OBJ_ALLOCATED;
2826 2827 cmd->frame = (union mrsas_frame *)cmd->frame_dma_obj.buffer;
2827 2828 cmd->frame_phys_addr =
2828 2829 cmd->frame_dma_obj.dma_cookie[0].dmac_address;
2829 2830
2830 2831 cmd->sense = (uint8_t *)(((unsigned long)
2831 2832 cmd->frame_dma_obj.buffer) +
2832 2833 tot_frame_size - SENSE_LENGTH);
2833 2834 cmd->sense_phys_addr =
2834 2835 cmd->frame_dma_obj.dma_cookie[0].dmac_address +
2835 2836 tot_frame_size - SENSE_LENGTH;
2836 2837
2837 2838 if (!cmd->frame || !cmd->sense) {
2838 2839 cmn_err(CE_WARN,
2839 2840 "mr_sas: pci_pool_alloc failed");
2840 2841 retval = ENOMEM;
2841 2842 goto mrsas_undo_frame_pool;
2842 2843 }
2843 2844
2844 2845 ddi_put32(cmd->frame_dma_obj.acc_handle,
2845 2846 &cmd->frame->io.context, cmd->index);
2846 2847 i++;
2847 2848
2848 2849 con_log(CL_DLEVEL3, (CE_NOTE, "[%x]-%x",
2849 2850 cmd->index, cmd->frame_phys_addr));
2850 2851 }
2851 2852
2852 2853 return (DDI_SUCCESS);
2853 2854
2854 2855 mrsas_undo_frame_pool:
2855 2856 if (i > 0)
2856 2857 destroy_mfi_frame_pool(instance);
2857 2858
2858 2859 return (retval);
2859 2860 }
2860 2861
2861 2862 /*
2862 2863 * free_additional_dma_buffer
2863 2864 */
2864 2865 static void
2865 2866 free_additional_dma_buffer(struct mrsas_instance *instance)
2866 2867 {
2867 2868 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
2868 2869 (void) mrsas_free_dma_obj(instance,
2869 2870 instance->mfi_internal_dma_obj);
2870 2871 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
2871 2872 }
2872 2873
2873 2874 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
2874 2875 (void) mrsas_free_dma_obj(instance,
2875 2876 instance->mfi_evt_detail_obj);
2876 2877 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
2877 2878 }
2878 2879 }
2879 2880
2880 2881 /*
2881 2882 * alloc_additional_dma_buffer
2882 2883 */
2883 2884 static int
2884 2885 alloc_additional_dma_buffer(struct mrsas_instance *instance)
2885 2886 {
2886 2887 uint32_t reply_q_sz;
2887 2888 uint32_t internal_buf_size = PAGESIZE*2;
2888 2889
2889 2890 /* max cmds plus 1 + producer & consumer */
2890 2891 reply_q_sz = sizeof (uint32_t) * (instance->max_fw_cmds + 1 + 2);
2891 2892
2892 2893 instance->mfi_internal_dma_obj.size = internal_buf_size;
2893 2894 instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
2894 2895 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
2895 2896 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
2896 2897 0xFFFFFFFFU;
2897 2898 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
2898 2899
2899 2900 if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
2900 2901 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
2901 2902 cmn_err(CE_WARN,
2902 2903 "mr_sas: could not alloc reply queue");
2903 2904 return (DDI_FAILURE);
2904 2905 }
2905 2906
2906 2907 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
2907 2908
2908 2909 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
2909 2910
2910 2911 instance->producer = (uint32_t *)((unsigned long)
2911 2912 instance->mfi_internal_dma_obj.buffer);
2912 2913 instance->consumer = (uint32_t *)((unsigned long)
2913 2914 instance->mfi_internal_dma_obj.buffer + 4);
2914 2915 instance->reply_queue = (uint32_t *)((unsigned long)
2915 2916 instance->mfi_internal_dma_obj.buffer + 8);
2916 2917 instance->internal_buf = (caddr_t)(((unsigned long)
2917 2918 instance->mfi_internal_dma_obj.buffer) + reply_q_sz + 8);
2918 2919 instance->internal_buf_dmac_add =
2919 2920 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address +
2920 2921 (reply_q_sz + 8);
2921 2922 instance->internal_buf_size = internal_buf_size -
2922 2923 (reply_q_sz + 8);
2923 2924
2924 2925 /* allocate evt_detail */
2925 2926 instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
2926 2927 instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
2927 2928 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
2928 2929 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
2929 2930 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
2930 2931 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 1;
2931 2932
2932 2933 if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
2933 2934 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
2934 2935 cmn_err(CE_WARN, "alloc_additional_dma_buffer: "
2935 2936 "could not allocate data transfer buffer.");
2936 2937 goto mrsas_undo_internal_buff;
2937 2938 }
2938 2939
2939 2940 bzero(instance->mfi_evt_detail_obj.buffer,
2940 2941 sizeof (struct mrsas_evt_detail));
2941 2942
2942 2943 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
2943 2944
2944 2945 return (DDI_SUCCESS);
2945 2946
2946 2947 mrsas_undo_internal_buff:
2947 2948 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
2948 2949 (void) mrsas_free_dma_obj(instance,
2949 2950 instance->mfi_internal_dma_obj);
2950 2951 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
2951 2952 }
2952 2953
2953 2954 return (DDI_FAILURE);
2954 2955 }
2955 2956
2956 2957
2957 2958 void
2958 2959 mrsas_free_cmd_pool(struct mrsas_instance *instance)
2959 2960 {
2960 2961 int i;
2961 2962 uint32_t max_cmd;
2962 2963 size_t sz;
2963 2964
2964 2965 /* already freed */
2965 2966 if (instance->cmd_list == NULL) {
2966 2967 return;
2967 2968 }
2968 2969
2969 2970 max_cmd = instance->max_fw_cmds;
2970 2971
2971 2972 /* size of cmd_list array */
2972 2973 sz = sizeof (struct mrsas_cmd *) * max_cmd;
2973 2974
2974 2975 /* First free each cmd */
2975 2976 for (i = 0; i < max_cmd; i++) {
2976 2977 if (instance->cmd_list[i] != NULL)
2977 2978 kmem_free(instance->cmd_list[i],sizeof (struct mrsas_cmd));
2978 2979
2979 2980 instance->cmd_list[i] = NULL;
2980 2981 }
2981 2982
2982 2983 /* Now, free cmd_list array */
2983 2984 if (instance->cmd_list != NULL)
2984 2985 kmem_free(instance->cmd_list,sz);
2985 2986
2986 2987 instance->cmd_list = NULL;
2987 2988
2988 2989 INIT_LIST_HEAD(&instance->cmd_pool_list);
2989 2990 INIT_LIST_HEAD(&instance->cmd_pend_list);
2990 2991 if (instance->tbolt) {
2991 2992 INIT_LIST_HEAD(&instance->cmd_app_pool_list);
2992 2993 }
2993 2994 else {
2994 2995 INIT_LIST_HEAD(&instance->app_cmd_pool_list);
2995 2996 }
2996 2997
2997 2998 }
2998 2999
2999 3000
3000 3001 /*
3001 3002 * mrsas_alloc_cmd_pool
3002 3003 */
3003 3004 int
3004 3005 mrsas_alloc_cmd_pool(struct mrsas_instance *instance)
3005 3006 {
3006 3007 int i;
3007 3008 int count;
3008 3009 uint32_t max_cmd;
3009 3010 uint32_t reserve_cmd;
3010 3011 size_t sz;
3011 3012
3012 3013 struct mrsas_cmd *cmd;
3013 3014
3014 3015 max_cmd = instance->max_fw_cmds;
3015 3016 con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
3016 3017 "max_cmd %x", max_cmd));
3017 3018
3018 3019
3019 3020 sz = sizeof (struct mrsas_cmd *) * max_cmd;
3020 3021
3021 3022 /*
3022 3023 * instance->cmd_list is an array of struct mrsas_cmd pointers.
3023 3024 * Allocate the dynamic array first and then allocate individual
3024 3025 * commands.
3025 3026 */
3026 3027 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
3027 3028 if (instance->cmd_list == NULL) {
3028 3029 con_log(CL_NONE, (CE_WARN,
3029 3030 "Failed to allocate memory for cmd_list"));
3030 3031 return (DDI_FAILURE);
3031 3032 }
3032 3033
3033 3034 /* create a frame pool and assign one frame to each cmd */
3034 3035 for (count = 0; count < max_cmd; count++) {
3035 3036 instance->cmd_list[count] = kmem_zalloc(sizeof (struct mrsas_cmd),
3036 3037 KM_SLEEP);
3037 3038 if (instance->cmd_list[count] == NULL) {
3038 3039 con_log(CL_NONE, (CE_WARN,
3039 3040 "Failed to allocate memory for mrsas_cmd"));
3040 3041 goto mrsas_undo_cmds;
3041 3042 }
3042 3043 }
3043 3044
3044 3045 /* add all the commands to command pool */
3045 3046
3046 3047 INIT_LIST_HEAD(&instance->cmd_pool_list);
3047 3048 INIT_LIST_HEAD(&instance->cmd_pend_list);
3048 3049 INIT_LIST_HEAD(&instance->app_cmd_pool_list);
3049 3050
3050 3051 reserve_cmd = MRSAS_APP_RESERVED_CMDS;
3051 3052
3052 3053 for (i = 0; i < reserve_cmd; i++) {
3053 3054 cmd = instance->cmd_list[i];
3054 3055 cmd->index = i;
3055 3056 mlist_add_tail(&cmd->list, &instance->app_cmd_pool_list);
3056 3057 }
3057 3058
3058 3059
3059 3060 for (i = reserve_cmd; i < max_cmd; i++) {
3060 3061 cmd = instance->cmd_list[i];
3061 3062 cmd->index = i;
3062 3063 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
3063 3064 }
3064 3065
3065 3066 return (DDI_SUCCESS);
3066 3067
3067 3068 mrsas_undo_cmds:
3068 3069 if (count > 0) {
3069 3070 /* free each cmd */
3070 3071 for (i = 0; i < count; i++) {
3071 3072 if (instance->cmd_list[i] != NULL)
3072 3073 kmem_free(instance->cmd_list[i],sizeof (struct mrsas_cmd));
3073 3074 instance->cmd_list[i] = NULL;
3074 3075 }
3075 3076 }
3076 3077
3077 3078 mrsas_undo_cmd_list:
3078 3079 if (instance->cmd_list != NULL)
3079 3080 kmem_free(instance->cmd_list,sz);
3080 3081 instance->cmd_list = NULL;
3081 3082
3082 3083 return (DDI_FAILURE);
3083 3084 }
3084 3085
3085 3086
3086 3087 /*
3087 3088 * free_space_for_mfi
3088 3089 */
3089 3090 static void
3090 3091 free_space_for_mfi(struct mrsas_instance *instance)
3091 3092 {
3092 3093
3093 3094 /* already freed */
3094 3095 if (instance->cmd_list == NULL) {
3095 3096 return;
3096 3097 }
3097 3098
3098 3099 /* Free additional dma buffer */
3099 3100 free_additional_dma_buffer(instance);
3100 3101
3101 3102 /* Free the MFI frame pool */
3102 3103 destroy_mfi_frame_pool(instance);
3103 3104
3104 3105 /* Free all the commands in the cmd_list */
3105 3106 /* Free the cmd_list buffer itself */
3106 3107 mrsas_free_cmd_pool(instance);
3107 3108 }
3108 3109
3109 3110 /*
3110 3111 * alloc_space_for_mfi
3111 3112 */
3112 3113 static int
3113 3114 alloc_space_for_mfi(struct mrsas_instance *instance)
3114 3115 {
3115 3116 /* Allocate command pool ( memory for cmd_list & individual commands )*/
3116 3117 if (mrsas_alloc_cmd_pool(instance)) {
3117 3118 cmn_err(CE_WARN, "error creating cmd pool");
3118 3119 return (DDI_FAILURE);
3119 3120 }
3120 3121
3121 3122 /* Allocate MFI Frame pool */
3122 3123 if (create_mfi_frame_pool(instance)) {
3123 3124 cmn_err(CE_WARN, "error creating frame DMA pool");
3124 3125 goto mfi_undo_cmd_pool;
3125 3126 }
3126 3127
3127 3128 /* Allocate additional DMA buffer */
3128 3129 if (alloc_additional_dma_buffer(instance)) {
3129 3130 cmn_err(CE_WARN, "error creating frame DMA pool");
3130 3131 goto mfi_undo_frame_pool;
3131 3132 }
3132 3133
3133 3134 return (DDI_SUCCESS);
3134 3135
3135 3136 mfi_undo_frame_pool:
3136 3137 destroy_mfi_frame_pool(instance);
3137 3138
3138 3139 mfi_undo_cmd_pool:
3139 3140 mrsas_free_cmd_pool(instance);
3140 3141
3141 3142 return (DDI_FAILURE);
3142 3143 }
3143 3144
3144 3145
3145 3146
3146 3147 /*
3147 3148 * get_ctrl_info
3148 3149 */
3149 3150 static int
3150 3151 get_ctrl_info(struct mrsas_instance *instance,
3151 3152 struct mrsas_ctrl_info *ctrl_info)
3152 3153 {
3153 3154 int ret = 0;
3154 3155
3155 3156 struct mrsas_cmd *cmd;
3156 3157 struct mrsas_dcmd_frame *dcmd;
3157 3158 struct mrsas_ctrl_info *ci;
3158 3159
3159 3160 if(instance->tbolt) {
3160 3161 cmd = get_raid_msg_mfi_pkt(instance);
3161 3162 }
3162 3163 else {
3163 3164 cmd = get_mfi_pkt(instance);
3164 3165 }
3165 3166
3166 3167 if (!cmd) {
3167 3168 cmn_err(CE_WARN,
3168 3169 "Failed to get a cmd from free-pool in get_ctrl_info(). fw_outstanding=0x%X max_fw_cmds=0x%X",
3169 3170 instance->fw_outstanding, instance->max_fw_cmds);
3170 3171 return (DDI_FAILURE);
3171 3172 }
3172 3173
3173 3174 /* Clear the frame buffer and assign back the context id */
3174 3175 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3175 3176 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3176 3177 cmd->index);
3177 3178
3178 3179 dcmd = &cmd->frame->dcmd;
3179 3180
3180 3181 ci = (struct mrsas_ctrl_info *)instance->internal_buf;
3181 3182
3182 3183 if (!ci) {
3183 3184 cmn_err(CE_WARN,
3184 3185 "Failed to alloc mem for ctrl info");
3185 3186 return_mfi_pkt(instance, cmd);
3186 3187 return (DDI_FAILURE);
3187 3188 }
3188 3189
3189 3190 (void) memset(ci, 0, sizeof (struct mrsas_ctrl_info));
3190 3191
3191 3192 /* for( i = 0; i < DCMD_MBOX_SZ; i++ ) dcmd->mbox.b[i] = 0; */
3192 3193 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
3193 3194
3194 3195 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3195 3196 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status,
3196 3197 MFI_CMD_STATUS_POLL_MODE);
3197 3198 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3198 3199 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3199 3200 MFI_FRAME_DIR_READ);
3200 3201 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3201 3202 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3202 3203 sizeof (struct mrsas_ctrl_info));
3203 3204 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3204 3205 MR_DCMD_CTRL_GET_INFO);
3205 3206 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3206 3207 instance->internal_buf_dmac_add);
3207 3208 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3208 3209 sizeof (struct mrsas_ctrl_info));
3209 3210
3210 3211 cmd->frame_count = 1;
3211 3212
3212 3213 if (instance->tbolt) {
3213 3214 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3214 3215 }
3215 3216
3216 3217 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3217 3218 ret = 0;
3218 3219
3219 3220 ctrl_info->max_request_size = ddi_get32(
3220 3221 cmd->frame_dma_obj.acc_handle, &ci->max_request_size);
3221 3222
3222 3223 ctrl_info->ld_present_count = ddi_get16(
3223 3224 cmd->frame_dma_obj.acc_handle, &ci->ld_present_count);
3224 3225
3225 3226 ctrl_info->properties.on_off_properties =
3226 3227 ddi_get32(cmd->frame_dma_obj.acc_handle,
3227 3228 &ci->properties.on_off_properties);
3228 3229 ddi_rep_get8(cmd->frame_dma_obj.acc_handle,
3229 3230 (uint8_t *)(ctrl_info->product_name),
3230 3231 (uint8_t *)(ci->product_name), 80 * sizeof (char),
3231 3232 DDI_DEV_AUTOINCR);
3232 3233 /* should get more members of ci with ddi_get when needed */
3233 3234 } else {
3234 3235 cmn_err(CE_WARN, "get_ctrl_info: Ctrl info failed");
3235 3236 ret = -1;
3236 3237 }
3237 3238
3238 3239 if(instance->tbolt) {
3239 3240 return_raid_msg_mfi_pkt(instance, cmd);
3240 3241 }
3241 3242 else {
3242 3243 return_mfi_pkt(instance, cmd);
3243 3244 }
3244 3245
3245 3246 return (ret);
3246 3247 }
3247 3248
3248 3249 /*
3249 3250 * abort_aen_cmd
3250 3251 */
3251 3252 static int
3252 3253 abort_aen_cmd(struct mrsas_instance *instance,
3253 3254 struct mrsas_cmd *cmd_to_abort)
3254 3255 {
3255 3256 int ret = 0;
3256 3257
3257 3258 struct mrsas_cmd *cmd;
3258 3259 struct mrsas_abort_frame *abort_fr;
3259 3260
3260 3261 con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_aen:%d", __LINE__));
3261 3262
3262 3263 if (instance->tbolt) {
3263 3264 cmd = get_raid_msg_mfi_pkt(instance);
3264 3265 } else {
3265 3266 cmd = get_mfi_pkt(instance);
3266 3267 }
3267 3268
3268 3269 if (!cmd) {
3269 3270 cmn_err(CE_WARN,
3270 3271 "Failed to get a cmd from free-pool in abort_aen_cmd(). fw_outstanding=0x%X max_fw_cmds=0x%X",
3271 3272 instance->fw_outstanding, instance->max_fw_cmds);
3272 3273 return (DDI_FAILURE);
3273 3274 }
3274 3275 /* Clear the frame buffer and assign back the context id */
3275 3276 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3276 3277 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3277 3278 cmd->index);
3278 3279
3279 3280 abort_fr = &cmd->frame->abort;
3280 3281
3281 3282 /* prepare and issue the abort frame */
3282 3283 ddi_put8(cmd->frame_dma_obj.acc_handle,
3283 3284 &abort_fr->cmd, MFI_CMD_OP_ABORT);
3284 3285 ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3285 3286 MFI_CMD_STATUS_SYNC_MODE);
3286 3287 ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3287 3288 ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3288 3289 cmd_to_abort->index);
3289 3290 ddi_put32(cmd->frame_dma_obj.acc_handle,
3290 3291 &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3291 3292 ddi_put32(cmd->frame_dma_obj.acc_handle,
3292 3293 &abort_fr->abort_mfi_phys_addr_hi, 0);
3293 3294
3294 3295 instance->aen_cmd->abort_aen = 1;
3295 3296
3296 3297 cmd->frame_count = 1;
3297 3298
3298 3299 if (instance->tbolt) {
3299 3300 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3300 3301 }
3301 3302
3302 3303 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3303 3304 con_log(CL_ANN1, (CE_WARN,
3304 3305 "abort_aen_cmd: issue_cmd_in_poll_mode failed"));
3305 3306 ret = -1;
3306 3307 } else {
3307 3308 ret = 0;
3308 3309 }
3309 3310
3310 3311 instance->aen_cmd->abort_aen = 1;
3311 3312 instance->aen_cmd = 0;
3312 3313
3313 3314 if (instance->tbolt) {
3314 3315 return_raid_msg_mfi_pkt(instance, cmd);
3315 3316 } else {
3316 3317 return_mfi_pkt(instance, cmd);
3317 3318 }
3318 3319
3319 3320 atomic_add_16(&instance->fw_outstanding, (-1));
3320 3321
3321 3322 return (ret);
3322 3323 }
3323 3324
3324 3325
3325 3326 static int
3326 3327 mrsas_build_init_cmd(struct mrsas_instance *instance, struct mrsas_cmd **cmd_ptr)
3327 3328 {
3328 3329 struct mrsas_cmd *cmd;
3329 3330 struct mrsas_init_frame *init_frame;
3330 3331 struct mrsas_init_queue_info *initq_info;
3331 3332 struct mrsas_drv_ver drv_ver_info;
3332 3333
3333 3334
3334 3335 /*
3335 3336 * Prepare a init frame. Note the init frame points to queue info
3336 3337 * structure. Each frame has SGL allocated after first 64 bytes. For
3337 3338 * this frame - since we don't need any SGL - we use SGL's space as
3338 3339 * queue info structure
3339 3340 */
3340 3341 cmd = *cmd_ptr;
3341 3342
3342 3343
3343 3344 /* Clear the frame buffer and assign back the context id */
3344 3345 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3345 3346 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3346 3347 cmd->index);
3347 3348
3348 3349 init_frame = (struct mrsas_init_frame *)cmd->frame;
3349 3350 initq_info = (struct mrsas_init_queue_info *)
3350 3351 ((unsigned long)init_frame + 64);
3351 3352
3352 3353 (void) memset(init_frame, 0, MRMFI_FRAME_SIZE);
3353 3354 (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info));
3354 3355
3355 3356 ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0);
3356 3357
3357 3358 ddi_put32(cmd->frame_dma_obj.acc_handle,
3358 3359 &initq_info->reply_queue_entries, instance->max_fw_cmds + 1);
3359 3360
3360 3361 ddi_put32(cmd->frame_dma_obj.acc_handle,
3361 3362 &initq_info->producer_index_phys_addr_hi, 0);
3362 3363 ddi_put32(cmd->frame_dma_obj.acc_handle,
3363 3364 &initq_info->producer_index_phys_addr_lo,
3364 3365 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address);
3365 3366
3366 3367 ddi_put32(cmd->frame_dma_obj.acc_handle,
3367 3368 &initq_info->consumer_index_phys_addr_hi, 0);
3368 3369 ddi_put32(cmd->frame_dma_obj.acc_handle,
3369 3370 &initq_info->consumer_index_phys_addr_lo,
3370 3371 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4);
3371 3372
3372 3373 ddi_put32(cmd->frame_dma_obj.acc_handle,
3373 3374 &initq_info->reply_queue_start_phys_addr_hi, 0);
3374 3375 ddi_put32(cmd->frame_dma_obj.acc_handle,
3375 3376 &initq_info->reply_queue_start_phys_addr_lo,
3376 3377 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8);
3377 3378
3378 3379 ddi_put8(cmd->frame_dma_obj.acc_handle,
3379 3380 &init_frame->cmd, MFI_CMD_OP_INIT);
3380 3381 ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status,
3381 3382 MFI_CMD_STATUS_POLL_MODE);
3382 3383 ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0);
3383 3384 ddi_put32(cmd->frame_dma_obj.acc_handle,
3384 3385 &init_frame->queue_info_new_phys_addr_lo,
3385 3386 cmd->frame_phys_addr + 64);
3386 3387 ddi_put32(cmd->frame_dma_obj.acc_handle,
3387 3388 &init_frame->queue_info_new_phys_addr_hi, 0);
3388 3389
3389 3390
3390 3391 /* fill driver version information*/
3391 3392 fill_up_drv_ver(&drv_ver_info);
3392 3393
3393 3394 /* allocate the driver version data transfer buffer */
3394 3395 instance->drv_ver_dma_obj.size = sizeof(drv_ver_info.drv_ver);
3395 3396 instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
3396 3397 instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3397 3398 instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3398 3399 instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
3399 3400 instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
3400 3401
3401 3402 if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
3402 3403 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3403 3404 con_log(CL_ANN, (CE_WARN,
3404 3405 "init_mfi : Could not allocate driver version buffer."));
3405 3406 return (DDI_FAILURE);
3406 3407 }
3407 3408 /* copy driver version to dma buffer*/
3408 3409 (void) memset(instance->drv_ver_dma_obj.buffer, 0,sizeof(drv_ver_info.drv_ver));
3409 3410 ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
3410 3411 (uint8_t *)drv_ver_info.drv_ver,
3411 3412 (uint8_t *)instance->drv_ver_dma_obj.buffer,
3412 3413 sizeof(drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
3413 3414
3414 3415
3415 3416 /*copy driver version physical address to init frame*/
3416 3417 ddi_put64(cmd->frame_dma_obj.acc_handle,
3417 3418 &init_frame->driverversion, instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
3418 3419
3419 3420 ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len,
3420 3421 sizeof (struct mrsas_init_queue_info));
3421 3422
3422 3423 cmd->frame_count = 1;
3423 3424
3424 3425 *cmd_ptr = cmd;
3425 3426
3426 3427 return (DDI_SUCCESS);
3427 3428 }
3428 3429
3429 3430
3430 3431 /*
3431 3432 * mrsas_init_adapter_ppc - Initialize MFI interface adapter.
3432 3433 */
3433 3434 int
3434 3435 mrsas_init_adapter_ppc (struct mrsas_instance *instance)
3435 3436 {
3436 3437 struct mrsas_cmd *cmd;
3437 3438
3438 3439 /* allocate memory for mfi adapter(cmd pool, individual commands, mfi frames etc */
3439 3440 if (alloc_space_for_mfi(instance) != DDI_SUCCESS){
3440 3441 con_log(CL_ANN, (CE_NOTE,
3441 3442 "Error, failed to allocate memory for MFI adapter"));
3442 3443 return (DDI_FAILURE);
3443 3444 }
3444 3445
3445 3446 /* Build INIT command */
3446 3447 cmd = get_mfi_pkt(instance);
3447 3448
3448 3449 if (mrsas_build_init_cmd(instance, &cmd) != DDI_SUCCESS){
3449 3450 con_log(CL_ANN, (CE_NOTE,
3450 3451 "Error, failed to build INIT command"));
3451 3452
3452 3453 goto fail_undo_alloc_mfi_space;
3453 3454 }
3454 3455
3455 3456 //Disalbe interrupt before sending init frame ( see linux driver code)
3456 3457 /* send INIT MFI frame in polled mode */
3457 3458 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3458 3459 con_log(CL_ANN, (CE_WARN, "failed to init firmware"));
3459 3460 goto fail_fw_init;
3460 3461 }
3461 3462
3462 3463 if (instance->func_ptr->read_fw_status_reg(instance) & 0x04000000) {
3463 3464 con_log(CL_ANN, (CE_NOTE, "mr_sas: IEEE SGL's supported"));
3464 3465 instance->flag_ieee = 1;
3465 3466 }
3466 3467
3467 3468 instance->unroll.alloc_space_mfi = 1;
3468 3469 instance->unroll.verBuff = 1;
3469 3470
3470 3471 return (DDI_SUCCESS);
3471 3472
3472 3473
3473 3474 fail_fw_init:
3474 3475 mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
3475 3476
3476 3477 fail_undo_alloc_mfi_space:
3477 3478 return_mfi_pkt(instance, cmd);
3478 3479 free_space_for_mfi(instance);
3479 3480
3480 3481 return (DDI_FAILURE);
3481 3482
3482 3483 }
3483 3484
3484 3485 /*
3485 3486 * mrsas_init_adapter - Initialize adapter.
3486 3487 */
3487 3488 int
3488 3489 mrsas_init_adapter (struct mrsas_instance *instance)
3489 3490 {
3490 3491 struct mrsas_ctrl_info ctrl_info;
3491 3492
3492 3493
3493 3494 /* we expect the FW state to be READY */
3494 3495 if (mfi_state_transition_to_ready(instance)) {
3495 3496 con_log(CL_ANN, (CE_WARN, "mr_sas: F/W is not ready"));
3496 3497 return (DDI_FAILURE);
3497 3498 }
3498 3499
3499 3500 /* get various operational parameters from status register */
3500 3501 instance->max_num_sge =
3501 3502 (instance->func_ptr->read_fw_status_reg(instance) &
3502 3503 0xFF0000) >> 0x10;
3503 3504 instance->max_num_sge =
3504 3505 (instance->max_num_sge > MRSAS_MAX_SGE_CNT) ?
3505 3506 MRSAS_MAX_SGE_CNT : instance->max_num_sge;
3506 3507
3507 3508 /*
3508 3509 * Reduce the max supported cmds by 1. This is to ensure that the
3509 3510 * reply_q_sz (1 more than the max cmd that driver may send)
3510 3511 * does not exceed max cmds that the FW can support
3511 3512 */
3512 3513 instance->max_fw_cmds =
3513 3514 instance->func_ptr->read_fw_status_reg(instance) & 0xFFFF;
3514 3515 instance->max_fw_cmds = instance->max_fw_cmds - 1;
3515 3516
3516 3517
3517 3518
3518 3519 /* Initialize adapter */
3519 3520 if (instance->func_ptr->init_adapter(instance) != DDI_SUCCESS) {
3520 3521 con_log(CL_ANN, (CE_WARN, "mr_sas: "
3521 3522 "could not initialize adapter"));
3522 3523 return (DDI_FAILURE);
3523 3524 }
3524 3525
3525 3526 /* gather misc FW related information */
3526 3527 instance->disable_online_ctrl_reset = 0;
3527 3528
3528 3529 if (!get_ctrl_info(instance, &ctrl_info)) {
3529 3530 instance->max_sectors_per_req = ctrl_info.max_request_size;
3530 3531 con_log(CL_ANN1, (CE_NOTE,
3531 3532 "product name %s ld present %d",
3532 3533 ctrl_info.product_name, ctrl_info.ld_present_count));
3533 3534 } else {
3534 3535 instance->max_sectors_per_req = instance->max_num_sge *
3535 3536 PAGESIZE / 512;
3536 3537 }
3537 3538
3538 3539 if (ctrl_info.properties.on_off_properties & DISABLE_OCR_PROP_FLAG) {
3539 3540 instance->disable_online_ctrl_reset = 1;
3540 3541 con_log(CL_ANN1, (CE_NOTE,
3541 3542 "Disable online control Flag is set\n"));
3542 3543 }
3543 3544 else {
3544 3545 con_log(CL_ANN1, (CE_NOTE,
3545 3546 "Disable online control Flag is not set\n"));
3546 3547 }
3547 3548
3548 3549 return (DDI_SUCCESS);
3549 3550
3550 3551 }
3551 3552
3552 3553
3553 3554
3554 3555 static int
3555 3556 mrsas_issue_init_mfi(struct mrsas_instance *instance)
3556 3557 {
3557 3558 struct mrsas_cmd *cmd;
3558 3559 struct mrsas_init_frame *init_frame;
3559 3560 struct mrsas_init_queue_info *initq_info;
3560 3561
3561 3562 /*
3562 3563 * Prepare a init frame. Note the init frame points to queue info
3563 3564 * structure. Each frame has SGL allocated after first 64 bytes. For
3564 3565 * this frame - since we don't need any SGL - we use SGL's space as
3565 3566 * queue info structure
3566 3567 */
3567 3568 con_log(CL_ANN1, (CE_NOTE,
3568 3569 "mrsas_issue_init_mfi: entry\n"));
3569 3570 cmd = get_mfi_app_pkt(instance);
3570 3571
3571 3572 if (!cmd) {
3572 3573 con_log(CL_ANN1, (CE_WARN,
3573 3574 "mrsas_issue_init_mfi: get_pkt failed\n"));
3574 3575 return (DDI_FAILURE);
3575 3576 }
3576 3577
3577 3578 /* Clear the frame buffer and assign back the context id */
3578 3579 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3579 3580 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3580 3581 cmd->index);
3581 3582
3582 3583 init_frame = (struct mrsas_init_frame *)cmd->frame;
3583 3584 initq_info = (struct mrsas_init_queue_info *)
3584 3585 ((unsigned long)init_frame + 64);
3585 3586
3586 3587 (void) memset(init_frame, 0, MRMFI_FRAME_SIZE);
3587 3588 (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info));
3588 3589
3589 3590 ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0);
3590 3591
3591 3592 ddi_put32(cmd->frame_dma_obj.acc_handle,
3592 3593 &initq_info->reply_queue_entries, instance->max_fw_cmds + 1);
3593 3594 ddi_put32(cmd->frame_dma_obj.acc_handle,
3594 3595 &initq_info->producer_index_phys_addr_hi, 0);
3595 3596 ddi_put32(cmd->frame_dma_obj.acc_handle,
3596 3597 &initq_info->producer_index_phys_addr_lo,
3597 3598 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address);
3598 3599 ddi_put32(cmd->frame_dma_obj.acc_handle,
3599 3600 &initq_info->consumer_index_phys_addr_hi, 0);
3600 3601 ddi_put32(cmd->frame_dma_obj.acc_handle,
3601 3602 &initq_info->consumer_index_phys_addr_lo,
3602 3603 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4);
3603 3604
3604 3605 ddi_put32(cmd->frame_dma_obj.acc_handle,
3605 3606 &initq_info->reply_queue_start_phys_addr_hi, 0);
3606 3607 ddi_put32(cmd->frame_dma_obj.acc_handle,
3607 3608 &initq_info->reply_queue_start_phys_addr_lo,
3608 3609 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8);
3609 3610
3610 3611 ddi_put8(cmd->frame_dma_obj.acc_handle,
3611 3612 &init_frame->cmd, MFI_CMD_OP_INIT);
3612 3613 ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status,
3613 3614 MFI_CMD_STATUS_POLL_MODE);
3614 3615 ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0);
3615 3616 ddi_put32(cmd->frame_dma_obj.acc_handle,
3616 3617 &init_frame->queue_info_new_phys_addr_lo,
3617 3618 cmd->frame_phys_addr + 64);
3618 3619 ddi_put32(cmd->frame_dma_obj.acc_handle,
3619 3620 &init_frame->queue_info_new_phys_addr_hi, 0);
3620 3621
3621 3622 ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len,
3622 3623 sizeof (struct mrsas_init_queue_info));
3623 3624
3624 3625 cmd->frame_count = 1;
3625 3626
3626 3627 /* issue the init frame in polled mode */
3627 3628 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3628 3629 con_log(CL_ANN1, (CE_WARN,
3629 3630 "mrsas_issue_init_mfi():failed to "
3630 3631 "init firmware"));
3631 3632 return_mfi_app_pkt(instance, cmd);
3632 3633 return (DDI_FAILURE);
3633 3634 }
3634 3635
3635 3636 return_mfi_app_pkt(instance, cmd);
3636 3637 con_log(CL_ANN1, (CE_CONT, "mrsas_issue_init_mfi: Done"));
3637 3638
3638 3639 return (DDI_SUCCESS);
3639 3640 }
3640 3641 /*
3641 3642 * mfi_state_transition_to_ready : Move the FW to READY state
3642 3643 *
3643 3644 * @reg_set : MFI register set
3644 3645 */
3645 3646 int
3646 3647 mfi_state_transition_to_ready(struct mrsas_instance *instance)
3647 3648 {
3648 3649 int i;
3649 3650 uint8_t max_wait;
3650 3651 uint32_t fw_ctrl = 0;
3651 3652 uint32_t fw_state;
3652 3653 uint32_t cur_state;
3653 3654 uint32_t cur_abs_reg_val;
3654 3655 uint32_t prev_abs_reg_val;
3655 3656 uint32_t status;
3656 3657
3657 3658 cur_abs_reg_val =
3658 3659 instance->func_ptr->read_fw_status_reg(instance);
3659 3660 fw_state =
3660 3661 cur_abs_reg_val & MFI_STATE_MASK;
3661 3662 con_log(CL_ANN1, (CE_CONT,
3662 3663 "mfi_state_transition_to_ready:FW state = 0x%x", fw_state));
3663 3664
3664 3665 while (fw_state != MFI_STATE_READY) {
3665 3666 con_log(CL_ANN, (CE_CONT,
3666 3667 "mfi_state_transition_to_ready:FW state%x", fw_state));
3667 3668
3668 3669 switch (fw_state) {
3669 3670 case MFI_STATE_FAULT:
3670 3671 con_log(CL_ANN, (CE_NOTE,
3671 3672 "mr_sas: FW in FAULT state!!"));
3672 3673
3673 3674 return (ENODEV);
3674 3675 case MFI_STATE_WAIT_HANDSHAKE:
3675 3676 /* set the CLR bit in IMR0 */
3676 3677 con_log(CL_ANN1, (CE_NOTE,
3677 3678 "mr_sas: FW waiting for HANDSHAKE"));
3678 3679 /*
3679 3680 * PCI_Hot Plug: MFI F/W requires
3680 3681 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
3681 3682 * to be set
3682 3683 */
3683 3684 /* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */
3684 3685 if (!instance->tbolt) {
3685 3686 WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE |
3686 3687 MFI_INIT_HOTPLUG, instance);
3687 3688 } else {
3688 3689 WR_RESERVED0_REGISTER(MFI_INIT_CLEAR_HANDSHAKE |
3689 3690 MFI_INIT_HOTPLUG, instance);
3690 3691 }
3691 3692 max_wait = (instance->tbolt == 1) ? 180 : 2;
3692 3693 cur_state = MFI_STATE_WAIT_HANDSHAKE;
3693 3694 break;
3694 3695 case MFI_STATE_BOOT_MESSAGE_PENDING:
3695 3696 /* set the CLR bit in IMR0 */
3696 3697 con_log(CL_ANN1, (CE_NOTE,
3697 3698 "mr_sas: FW state boot message pending"));
3698 3699 /*
3699 3700 * PCI_Hot Plug: MFI F/W requires
3700 3701 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
3701 3702 * to be set
3702 3703 */
3703 3704 if (!instance->tbolt) {
3704 3705 WR_IB_DOORBELL(MFI_INIT_HOTPLUG, instance);
3705 3706 } else {
3706 3707 WR_RESERVED0_REGISTER(MFI_INIT_HOTPLUG,
3707 3708 instance);
3708 3709 }
3709 3710 max_wait = (instance->tbolt == 1) ? 180 : 10;
3710 3711 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
3711 3712 break;
3712 3713 case MFI_STATE_OPERATIONAL:
3713 3714 /* bring it to READY state; assuming max wait 2 secs */
3714 3715 instance->func_ptr->disable_intr(instance);
3715 3716 con_log(CL_ANN1, (CE_NOTE,
3716 3717 "mr_sas: FW in OPERATIONAL state"));
3717 3718 /*
3718 3719 * PCI_Hot Plug: MFI F/W requires
3719 3720 * (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT)
3720 3721 * to be set
3721 3722 */
3722 3723 /* WR_IB_DOORBELL(MFI_INIT_READY, instance); */
3723 3724 if (!instance->tbolt) {
3724 3725 WR_IB_DOORBELL(MFI_RESET_FLAGS, instance);
3725 3726 } else {
3726 3727 WR_RESERVED0_REGISTER(MFI_RESET_FLAGS,
3727 3728 instance);
3728 3729
3729 3730 for (i = 0; i < (10 * 1000); i++) {
3730 3731 status =
3731 3732 RD_RESERVED0_REGISTER(instance);
3732 3733 if (status & 1)
3733 3734 delay(1 *
3734 3735 drv_usectohz(MILLISEC));
3735 3736 else
3736 3737 break;
3737 3738 }
3738 3739
3739 3740 }
3740 3741 max_wait = (instance->tbolt == 1) ? 180 : 10;
3741 3742 cur_state = MFI_STATE_OPERATIONAL;
3742 3743 break;
3743 3744 case MFI_STATE_UNDEFINED:
3744 3745 /* this state should not last for more than 2 seconds */
3745 3746 con_log(CL_ANN1, (CE_NOTE, "FW state undefined"));
3746 3747
3747 3748 max_wait = (instance->tbolt == 1) ? 180 : 2;
3748 3749 cur_state = MFI_STATE_UNDEFINED;
3749 3750 break;
3750 3751 case MFI_STATE_BB_INIT:
3751 3752 max_wait = (instance->tbolt == 1) ? 180 : 2;
3752 3753 cur_state = MFI_STATE_BB_INIT;
3753 3754 break;
3754 3755 case MFI_STATE_FW_INIT:
3755 3756 max_wait = (instance->tbolt == 1) ? 180 : 2;
3756 3757 cur_state = MFI_STATE_FW_INIT;
3757 3758 break;
3758 3759 case MFI_STATE_FW_INIT_2:
3759 3760 max_wait = 180;
3760 3761 cur_state = MFI_STATE_FW_INIT_2;
3761 3762 break;
3762 3763 case MFI_STATE_DEVICE_SCAN:
3763 3764 max_wait = 180;
3764 3765 cur_state = MFI_STATE_DEVICE_SCAN;
3765 3766 prev_abs_reg_val = cur_abs_reg_val;
3766 3767 con_log(CL_NONE, (CE_NOTE,
3767 3768 "Device scan in progress ...\n"));
3768 3769 break;
3769 3770 case MFI_STATE_FLUSH_CACHE:
3770 3771 max_wait = 180;
3771 3772 cur_state = MFI_STATE_FLUSH_CACHE;
3772 3773 break;
3773 3774 default:
3774 3775 con_log(CL_ANN1, (CE_NOTE,
3775 3776 "mr_sas: Unknown state 0x%x", fw_state));
3776 3777 return (ENODEV);
3777 3778 }
3778 3779
3779 3780 /* the cur_state should not last for more than max_wait secs */
3780 3781 for (i = 0; i < (max_wait * MILLISEC); i++) {
3781 3782 /* fw_state = RD_OB_MSG_0(instance) & MFI_STATE_MASK; */
3782 3783 cur_abs_reg_val =
3783 3784 instance->func_ptr->read_fw_status_reg(instance);
3784 3785 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
3785 3786
3786 3787 if (fw_state == cur_state) {
3787 3788 delay(1 * drv_usectohz(MILLISEC));
3788 3789 } else {
3789 3790 break;
3790 3791 }
3791 3792 }
3792 3793 if (fw_state == MFI_STATE_DEVICE_SCAN) {
3793 3794 if (prev_abs_reg_val != cur_abs_reg_val) {
3794 3795 continue;
3795 3796 }
3796 3797 }
3797 3798
3798 3799 /* return error if fw_state hasn't changed after max_wait */
3799 3800 if (fw_state == cur_state) {
3800 3801 con_log(CL_ANN1, (CE_WARN,
3801 3802 "FW state hasn't changed in %d secs", max_wait));
3802 3803 return (ENODEV);
3803 3804 }
3804 3805 };
3805 3806
3806 3807 if (!instance->tbolt) {
3807 3808 fw_ctrl = RD_IB_DOORBELL(instance);
3808 3809 con_log(CL_ANN1, (CE_CONT,
3809 3810 "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl));
3810 3811 }
3811 3812
3812 3813 #if 0
3813 3814 /*
3814 3815 * Write 0xF to the doorbell register to do the following.
3815 3816 * - Abort all outstanding commands (bit 0).
3816 3817 * - Transition from OPERATIONAL to READY state (bit 1).
3817 3818 * - Discard (possible) low MFA posted in 64-bit mode (bit-2).
3818 3819 * - Set to release FW to continue running (i.e. BIOS handshake
3819 3820 * (bit 3).
3820 3821 */
3821 3822 if (!instance->tbolt) {
3822 3823 WR_IB_DOORBELL(0xF, instance);
3823 3824 }
3824 3825 #endif
3825 3826
3826 3827 return (DDI_SUCCESS);
3827 3828 }
3828 3829
3829 3830 /*
3830 3831 * get_seq_num
3831 3832 */
3832 3833 static int
3833 3834 get_seq_num(struct mrsas_instance *instance,
3834 3835 struct mrsas_evt_log_info *eli)
3835 3836 {
3836 3837 int ret = DDI_SUCCESS;
3837 3838
3838 3839 dma_obj_t dcmd_dma_obj;
3839 3840 struct mrsas_cmd *cmd;
3840 3841 struct mrsas_dcmd_frame *dcmd;
3841 3842 struct mrsas_evt_log_info *eli_tmp;
3842 3843 if (instance->tbolt) {
3843 3844 cmd = get_raid_msg_mfi_pkt(instance);
3844 3845 } else {
3845 3846 cmd = get_mfi_pkt(instance);
3846 3847 }
3847 3848
3848 3849 if (!cmd) {
3849 3850 cmn_err(CE_WARN,
3850 3851 "Failed to get a cmd from free-pool in get_seq_num(). fw_outstanding=0x%X max_fw_cmds=0x%X",
3851 3852 instance->fw_outstanding, instance->max_fw_cmds);
3852 3853 return (ENOMEM);
3853 3854 }
3854 3855
3855 3856 /* Clear the frame buffer and assign back the context id */
3856 3857 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3857 3858 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3858 3859 cmd->index);
3859 3860
3860 3861 dcmd = &cmd->frame->dcmd;
3861 3862
3862 3863 /* allocate the data transfer buffer */
3863 3864 dcmd_dma_obj.size = sizeof (struct mrsas_evt_log_info);
3864 3865 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
3865 3866 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3866 3867 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3867 3868 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
3868 3869 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
3869 3870
3870 3871 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
3871 3872 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3872 3873 cmn_err(CE_WARN,
3873 3874 "get_seq_num: could not allocate data transfer buffer.");
3874 3875 return (DDI_FAILURE);
3875 3876 }
3876 3877
3877 3878 (void) memset(dcmd_dma_obj.buffer, 0,
3878 3879 sizeof (struct mrsas_evt_log_info));
3879 3880
3880 3881 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
3881 3882
3882 3883 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3883 3884 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
3884 3885 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3885 3886 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3886 3887 MFI_FRAME_DIR_READ);
3887 3888 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3888 3889 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3889 3890 sizeof (struct mrsas_evt_log_info));
3890 3891 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3891 3892 MR_DCMD_CTRL_EVENT_GET_INFO);
3892 3893 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3893 3894 sizeof (struct mrsas_evt_log_info));
3894 3895 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3895 3896 dcmd_dma_obj.dma_cookie[0].dmac_address);
3896 3897
3897 3898 cmd->sync_cmd = MRSAS_TRUE;
3898 3899 cmd->frame_count = 1;
3899 3900
3900 3901 if (instance->tbolt) {
3901 3902 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3902 3903 }
3903 3904
3904 3905 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
3905 3906 cmn_err(CE_WARN, "get_seq_num: "
3906 3907 "failed to issue MRSAS_DCMD_CTRL_EVENT_GET_INFO");
3907 3908 ret = DDI_FAILURE;
3908 3909 } else {
3909 3910 eli_tmp = (struct mrsas_evt_log_info *)dcmd_dma_obj.buffer;
3910 3911 eli->newest_seq_num = ddi_get32(cmd->frame_dma_obj.acc_handle,
3911 3912 &eli_tmp->newest_seq_num);
3912 3913 ret = DDI_SUCCESS;
3913 3914 }
3914 3915
3915 3916 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
3916 3917 ret = DDI_FAILURE;
3917 3918
3918 3919 if (instance->tbolt) {
3919 3920 return_raid_msg_mfi_pkt(instance, cmd);
3920 3921 } else {
3921 3922 return_mfi_pkt(instance, cmd);
3922 3923 }
3923 3924
3924 3925 return (ret);
3925 3926 }
3926 3927
3927 3928 /*
3928 3929 * start_mfi_aen
3929 3930 */
3930 3931 static int
3931 3932 start_mfi_aen(struct mrsas_instance *instance)
3932 3933 {
3933 3934 int ret = 0;
3934 3935
3935 3936 struct mrsas_evt_log_info eli;
3936 3937 union mrsas_evt_class_locale class_locale;
3937 3938
3938 3939 /* get the latest sequence number from FW */
3939 3940 (void) memset(&eli, 0, sizeof (struct mrsas_evt_log_info));
3940 3941
3941 3942 if (get_seq_num(instance, &eli)) {
3942 3943 cmn_err(CE_WARN, "start_mfi_aen: failed to get seq num");
3943 3944 return (-1);
3944 3945 }
3945 3946
3946 3947 /* register AEN with FW for latest sequence number plus 1 */
3947 3948 class_locale.members.reserved = 0;
3948 3949 class_locale.members.locale = LE_16(MR_EVT_LOCALE_ALL);
3949 3950 class_locale.members.class = MR_EVT_CLASS_INFO;
3950 3951 class_locale.word = LE_32(class_locale.word);
3951 3952 ret = register_mfi_aen(instance, eli.newest_seq_num + 1,
3952 3953 class_locale.word);
3953 3954
3954 3955 if (ret) {
3955 3956 cmn_err(CE_WARN, "start_mfi_aen: aen registration failed");
3956 3957 return (-1);
3957 3958 }
3958 3959
3959 3960
3960 3961 return (ret);
3961 3962 }
3962 3963
3963 3964 /*
3964 3965 * flush_cache
3965 3966 */
3966 3967 static void
3967 3968 flush_cache(struct mrsas_instance *instance)
3968 3969 {
3969 3970 struct mrsas_cmd *cmd = NULL;
3970 3971 struct mrsas_dcmd_frame *dcmd;
3971 3972 uint32_t max_cmd = instance->max_fw_cmds;
3972 3973 if (instance->tbolt) {
3973 3974 cmd = get_raid_msg_mfi_pkt(instance);
3974 3975 } else {
3975 3976 cmd = get_mfi_pkt(instance);
3976 3977 }
3977 3978
3978 3979 if (!cmd) {
3979 3980 cmn_err(CE_WARN,
3980 3981 "Failed to get a cmd from free-pool in flush_cache(). fw_outstanding=0x%X max_fw_cmds=0x%X",
3981 3982 instance->fw_outstanding, instance->max_fw_cmds);
3982 3983 return;
3983 3984 }
3984 3985
3985 3986 /* Clear the frame buffer and assign back the context id */
3986 3987 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3987 3988 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3988 3989 cmd->index);
3989 3990
3990 3991 dcmd = &cmd->frame->dcmd;
3991 3992
3992 3993 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
3993 3994
3994 3995 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3995 3996 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0);
3996 3997 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 0);
3997 3998 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3998 3999 MFI_FRAME_DIR_NONE);
3999 4000 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
4000 4001 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 0);
4001 4002 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
4002 4003 MR_DCMD_CTRL_CACHE_FLUSH);
4003 4004 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.b[0],
4004 4005 MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE);
4005 4006
4006 4007 cmd->frame_count = 1;
4007 4008
4008 4009 if (instance->tbolt) {
4009 4010 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
4010 4011 }
4011 4012
4012 4013 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
4013 4014 con_log(CL_ANN1, (CE_WARN,
4014 4015 "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH"));
4015 4016 }
4016 4017 con_log(CL_ANN1, (CE_CONT, "flush_cache done"));
4017 4018 if (instance->tbolt) {
4018 4019 return_raid_msg_mfi_pkt(instance, cmd);
4019 4020 } else {
4020 4021 return_mfi_pkt(instance, cmd);
4021 4022 }
4022 4023
4023 4024 }
4024 4025
4025 4026 /*
4026 4027 * service_mfi_aen- Completes an AEN command
4027 4028 * @instance: Adapter soft state
4028 4029 * @cmd: Command to be completed
4029 4030 *
4030 4031 */
4031 4032 void
4032 4033 service_mfi_aen(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
4033 4034 {
4034 4035 uint32_t seq_num;
4035 4036 uint32_t i;
4036 4037 struct mrsas_evt_detail *evt_detail =
4037 4038 (struct mrsas_evt_detail *)instance->mfi_evt_detail_obj.buffer;
4038 4039 int rval = 0;
4039 4040 int tgt = 0;
4040 4041 uint8_t dtype;
4041 4042 #ifdef PDSUPPORT
4042 4043 mrsas_pd_address_t *pd_addr;
4043 4044 #endif
4044 4045 ddi_acc_handle_t acc_handle;
4045 4046
4046 4047 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
4047 4048
4048 4049 acc_handle = cmd->frame_dma_obj.acc_handle;
4049 4050 cmd->cmd_status = ddi_get8(acc_handle, &cmd->frame->io.cmd_status);
4050 4051 if (cmd->cmd_status == ENODATA) {
4051 4052 cmd->cmd_status = 0;
4052 4053 }
4053 4054
4054 4055 /*
4055 4056 * log the MFI AEN event to the sysevent queue so that
4056 4057 * application will get noticed
4057 4058 */
4058 4059 if (ddi_log_sysevent(instance->dip, DDI_VENDOR_LSI, "LSIMEGA", "SAS",
4059 4060 NULL, NULL, DDI_NOSLEEP) != DDI_SUCCESS) {
4060 4061 int instance_no = ddi_get_instance(instance->dip);
4061 4062 con_log(CL_ANN, (CE_WARN,
4062 4063 "mr_sas%d: Failed to log AEN event", instance_no));
4063 4064 }
4064 4065 /*
4065 4066 * Check for any ld devices that has changed state. i.e. online
4066 4067 * or offline.
4067 4068 */
4068 4069 con_log(CL_ANN1, (CE_CONT,
4069 4070 "AEN: code = %x class = %x locale = %x args = %x",
4070 4071 ddi_get32(acc_handle, &evt_detail->code),
4071 4072 evt_detail->cl.members.class,
4072 4073 ddi_get16(acc_handle, &evt_detail->cl.members.locale),
4073 4074 ddi_get8(acc_handle, &evt_detail->arg_type)));
4074 4075
4075 4076 switch (ddi_get32(acc_handle, &evt_detail->code)) {
4076 4077 case MR_EVT_CFG_CLEARED: {
4077 4078 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) {
4078 4079 if (instance->mr_ld_list[tgt].dip != NULL) {
4079 4080 mutex_enter(&instance->config_dev_mtx);
4080 4081 instance->mr_ld_list[tgt].flag =
4081 4082 ~MRDRV_TGT_VALID;
4082 4083 mutex_exit(&instance->config_dev_mtx);
4083 4084 rval = mrsas_service_evt(instance, tgt, 0,
4084 4085 MRSAS_EVT_UNCONFIG_TGT, NULL);
4085 4086 con_log(CL_ANN1, (CE_WARN,
4086 4087 "mr_sas: CFG CLEARED AEN rval = %d "
4087 4088 "tgt id = %d", rval, tgt));
4088 4089 }
4089 4090 }
4090 4091 break;
4091 4092 }
4092 4093
4093 4094 case MR_EVT_LD_DELETED: {
4094 4095 tgt = ddi_get16(acc_handle, &evt_detail->args.ld.target_id);
4095 4096 mutex_enter(&instance->config_dev_mtx);
4096 4097 instance->mr_ld_list[tgt].flag = ~MRDRV_TGT_VALID;
4097 4098 mutex_exit(&instance->config_dev_mtx);
4098 4099 rval = mrsas_service_evt(instance,
4099 4100 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0,
4100 4101 MRSAS_EVT_UNCONFIG_TGT, NULL);
4101 4102 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD DELETED AEN rval = %d "
4102 4103 "tgt id = %d index = %d", rval,
4103 4104 ddi_get16(acc_handle, &evt_detail->args.ld.target_id),
4104 4105 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index)));
4105 4106 break;
4106 4107 } /* End of MR_EVT_LD_DELETED */
4107 4108
4108 4109 case MR_EVT_LD_CREATED: {
4109 4110 rval = mrsas_service_evt(instance,
4110 4111 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0,
4111 4112 MRSAS_EVT_CONFIG_TGT, NULL);
4112 4113 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD CREATED AEN rval = %d "
4113 4114 "tgt id = %d index = %d", rval,
4114 4115 ddi_get16(acc_handle, &evt_detail->args.ld.target_id),
4115 4116 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index)));
4116 4117 break;
4117 4118 } /* End of MR_EVT_LD_CREATED */
4118 4119
4119 4120 #ifdef PDSUPPORT
4120 4121 case MR_EVT_PD_REMOVED_EXT: {
4121 4122 if (instance->tbolt) {
4122 4123 pd_addr = &evt_detail->args.pd_addr;
4123 4124 dtype = pd_addr->scsi_dev_type;
4124 4125 con_log(CL_DLEVEL1, (CE_NOTE,
4125 4126 " MR_EVT_PD_REMOVED_EXT: dtype = %x,"
4126 4127 " arg_type = %d ", dtype, evt_detail->arg_type));
4127 4128 tgt = ddi_get16(acc_handle, &evt_detail->args.pd.device_id);
4128 4129 mutex_enter(&instance->config_dev_mtx);
4129 4130 instance->mr_tbolt_pd_list[tgt].flag = (uint8_t)~MRDRV_TGT_VALID;
4130 4131 mutex_exit(&instance->config_dev_mtx);
4131 4132 rval = mrsas_service_evt(instance,
4132 4133 ddi_get16(acc_handle, &evt_detail->args.pd.device_id),
4133 4134 1, MRSAS_EVT_UNCONFIG_TGT, NULL);
4134 4135 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_REMOVED:"
4135 4136 "rval = %d tgt id = %d ", rval,
4136 4137 ddi_get16(acc_handle, &evt_detail->args.pd.device_id)));
4137 4138 break;
4138 4139 }
4139 4140 }/* End of MR_EVT_PD_REMOVED_EXT */
4140 4141
4141 4142 case MR_EVT_PD_INSERTED_EXT: {
4142 4143 if (instance->tbolt) {
4143 4144 rval = mrsas_service_evt(instance,
4144 4145 ddi_get16(acc_handle, &evt_detail->args.pd.device_id),
4145 4146 1, MRSAS_EVT_CONFIG_TGT, NULL);
4146 4147 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_INSERTEDi_EXT:"
4147 4148 "rval = %d tgt id = %d ", rval,
4148 4149 ddi_get16(acc_handle, &evt_detail->args.pd.device_id)));
4149 4150 break;
4150 4151 }
4151 4152 }/* End of MR_EVT_PD_INSERTED_EXT */
4152 4153
4153 4154 case MR_EVT_PD_STATE_CHANGE: {
4154 4155 if (instance->tbolt) {
4155 4156 tgt = ddi_get16(acc_handle, &evt_detail->args.pd.device_id);
4156 4157 if ((evt_detail->args.pd_state.prevState == PD_SYSTEM) &&
4157 4158 (evt_detail->args.pd_state.newState != PD_SYSTEM)) {
4158 4159 mutex_enter(&instance->config_dev_mtx);
4159 4160 instance->mr_tbolt_pd_list[tgt].flag =
4160 4161 (uint8_t)~MRDRV_TGT_VALID;
4161 4162 mutex_exit(&instance->config_dev_mtx);
4162 4163 rval = mrsas_service_evt(instance,
4163 4164 ddi_get16(acc_handle,
4164 4165 &evt_detail->args.pd.device_id),
4165 4166 1, MRSAS_EVT_UNCONFIG_TGT, NULL);
4166 4167 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_REMOVED:"
4167 4168 "rval = %d tgt id = %d ", rval,
4168 4169 ddi_get16(acc_handle,
4169 4170 &evt_detail->args.pd.device_id)));
4170 4171 break;
4171 4172 }
4172 4173 if ((evt_detail->args.pd_state.prevState
4173 4174 == UNCONFIGURED_GOOD) &&
4174 4175 (evt_detail->args.pd_state.newState == PD_SYSTEM)) {
4175 4176 rval = mrsas_service_evt(instance,
4176 4177 ddi_get16(acc_handle,
4177 4178 &evt_detail->args.pd.device_id),
4178 4179 1, MRSAS_EVT_CONFIG_TGT, NULL);
4179 4180 con_log(CL_ANN1, (CE_WARN,
4180 4181 "mr_sas: PD_INSERTED: rval = %d "
4181 4182 " tgt id = %d ", rval,
4182 4183 ddi_get16(acc_handle,
4183 4184 &evt_detail->args.pd.device_id)));
4184 4185 break;
4185 4186 }
4186 4187 }
4187 4188 }
4188 4189 #endif
4189 4190
4190 4191 } /* End of Main Switch */
4191 4192
4192 4193 /* get copy of seq_num and class/locale for re-registration */
4193 4194 seq_num = ddi_get32(acc_handle, &evt_detail->seq_num);
4194 4195 seq_num++;
4195 4196 (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
4196 4197 sizeof (struct mrsas_evt_detail));
4197 4198
4198 4199 ddi_put8(acc_handle, &cmd->frame->dcmd.cmd_status, 0x0);
4199 4200 ddi_put32(acc_handle, &cmd->frame->dcmd.mbox.w[0], seq_num);
4200 4201
4201 4202 instance->aen_seq_num = seq_num;
4202 4203
4203 4204 cmd->frame_count = 1;
4204 4205
4205 4206 cmd->retry_count_for_ocr = 0;
4206 4207 cmd->drv_pkt_time = 0;
4207 4208
4208 4209 /* Issue the aen registration frame */
4209 4210 instance->func_ptr->issue_cmd(cmd, instance);
4210 4211 }
4211 4212
4212 4213 /*
4213 4214 * complete_cmd_in_sync_mode - Completes an internal command
4214 4215 * @instance: Adapter soft state
4215 4216 * @cmd: Command to be completed
4216 4217 *
4217 4218 * The issue_cmd_in_sync_mode() function waits for a command to complete
4218 4219 * after it issues a command. This function wakes up that waiting routine by
4219 4220 * calling wake_up() on the wait queue.
4220 4221 */
4221 4222 static void
4222 4223 complete_cmd_in_sync_mode(struct mrsas_instance *instance,
4223 4224 struct mrsas_cmd *cmd)
4224 4225 {
4225 4226 cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle,
4226 4227 &cmd->frame->io.cmd_status);
4227 4228
4228 4229 cmd->sync_cmd = MRSAS_FALSE;
4229 4230
4230 4231 con_log(CL_ANN1, (CE_NOTE, "complete_cmd_in_sync_mode called %p \n",
4231 4232 (void *)cmd));
4232 4233
4233 4234 mutex_enter(&instance->int_cmd_mtx);
4234 4235 if (cmd->cmd_status == ENODATA) {
4235 4236 cmd->cmd_status = 0;
4236 4237 }
4237 4238 cv_broadcast(&instance->int_cmd_cv);
4238 4239 mutex_exit(&instance->int_cmd_mtx);
4239 4240
4240 4241 }
4241 4242
4242 4243 /*
4243 4244 * Call this function inside mrsas_softintr.
4244 4245 * mrsas_initiate_ocr_if_fw_is_faulty - Initiates OCR if FW status is faulty
4245 4246 * @instance: Adapter soft state
4246 4247 */
4247 4248
4248 4249 static uint32_t
4249 4250 mrsas_initiate_ocr_if_fw_is_faulty(struct mrsas_instance *instance)
4250 4251 {
4251 4252 uint32_t cur_abs_reg_val;
4252 4253 uint32_t fw_state;
4253 4254
4254 4255 cur_abs_reg_val = instance->func_ptr->read_fw_status_reg(instance);
4255 4256 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
4256 4257 if (fw_state == MFI_STATE_FAULT) {
4257 4258 if (instance->disable_online_ctrl_reset == 1) {
4258 4259 cmn_err(CE_WARN,
4259 4260 "mrsas_initiate_ocr_if_fw_is_faulty: "
4260 4261 "FW in Fault state, detected in ISR: "
4261 4262 "FW doesn't support ocr ");
4262 4263
4263 4264 return (ADAPTER_RESET_NOT_REQUIRED);
4264 4265 } else {
4265 4266 con_log(CL_ANN, (CE_NOTE,
4266 4267 "mrsas_initiate_ocr_if_fw_is_faulty: "
4267 4268 "FW in Fault state, detected in ISR: FW supports ocr "));
4268 4269
4269 4270 return (ADAPTER_RESET_REQUIRED);
4270 4271 }
4271 4272 }
4272 4273
4273 4274 return (ADAPTER_RESET_NOT_REQUIRED);
4274 4275 }
4275 4276
4276 4277 /*
4277 4278 * mrsas_softintr - The Software ISR
4278 4279 * @param arg : HBA soft state
4279 4280 *
4280 4281 * called from high-level interrupt if hi-level interrupt are not there,
4281 4282 * otherwise triggered as a soft interrupt
4282 4283 */
4283 4284 static uint_t
4284 4285 mrsas_softintr(struct mrsas_instance *instance)
4285 4286 {
4286 4287 struct scsi_pkt *pkt;
4287 4288 struct scsa_cmd *acmd;
4288 4289 struct mrsas_cmd *cmd;
4289 4290 struct mlist_head *pos, *next;
4290 4291 mlist_t process_list;
4291 4292 struct mrsas_header *hdr;
4292 4293 struct scsi_arq_status *arqstat;
4293 4294
4294 4295 con_log(CL_ANN1, (CE_NOTE, "mrsas_softintr() called."));
4295 4296
4296 4297 ASSERT(instance);
4297 4298
4298 4299 mutex_enter(&instance->completed_pool_mtx);
4299 4300
4300 4301 if (mlist_empty(&instance->completed_pool_list)) {
4301 4302 mutex_exit(&instance->completed_pool_mtx);
4302 4303 return (DDI_INTR_CLAIMED);
4303 4304 }
4304 4305
4305 4306 instance->softint_running = 1;
4306 4307
4307 4308 INIT_LIST_HEAD(&process_list);
4308 4309 mlist_splice(&instance->completed_pool_list, &process_list);
4309 4310 INIT_LIST_HEAD(&instance->completed_pool_list);
4310 4311
4311 4312 mutex_exit(&instance->completed_pool_mtx);
4312 4313
4313 4314 /* perform all callbacks first, before releasing the SCBs */
4314 4315 mlist_for_each_safe(pos, next, &process_list) {
4315 4316 cmd = mlist_entry(pos, struct mrsas_cmd, list);
4316 4317
4317 4318 /* syncronize the Cmd frame for the controller */
4318 4319 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle,
4319 4320 0, 0, DDI_DMA_SYNC_FORCPU);
4320 4321
4321 4322 hdr = &cmd->frame->hdr;
4322 4323
4323 4324 /* remove the internal command from the process list */
4324 4325 mlist_del_init(&cmd->list);
4325 4326
4326 4327 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) {
4327 4328 case MFI_CMD_OP_PD_SCSI:
4328 4329 case MFI_CMD_OP_LD_SCSI:
4329 4330 case MFI_CMD_OP_LD_READ:
4330 4331 case MFI_CMD_OP_LD_WRITE:
4331 4332 /*
4332 4333 * MFI_CMD_OP_PD_SCSI and MFI_CMD_OP_LD_SCSI
4333 4334 * could have been issued either through an
4334 4335 * IO path or an IOCTL path. If it was via IOCTL,
4335 4336 * we will send it to internal completion.
4336 4337 */
4337 4338 if (cmd->sync_cmd == MRSAS_TRUE) {
4338 4339 complete_cmd_in_sync_mode(instance, cmd);
4339 4340 break;
4340 4341 }
4341 4342
4342 4343 /* regular commands */
4343 4344 acmd = cmd->cmd;
4344 4345 pkt = CMD2PKT(acmd);
4345 4346
4346 4347 if (acmd->cmd_flags & CFLAG_DMAVALID) {
4347 4348 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
4348 4349 (void) ddi_dma_sync(acmd->cmd_dmahandle,
4349 4350 acmd->cmd_dma_offset,
4350 4351 acmd->cmd_dma_len,
4351 4352 DDI_DMA_SYNC_FORCPU);
4352 4353 }
4353 4354 }
4354 4355
4355 4356 pkt->pkt_reason = CMD_CMPLT;
4356 4357 pkt->pkt_statistics = 0;
4357 4358 pkt->pkt_state = STATE_GOT_BUS
4358 4359 | STATE_GOT_TARGET | STATE_SENT_CMD
4359 4360 | STATE_XFERRED_DATA | STATE_GOT_STATUS;
4360 4361
4361 4362 con_log(CL_ANN, (CE_CONT,
4362 4363 "CDB[0] = %x completed for %s: size %lx context %x",
4363 4364 pkt->pkt_cdbp[0], ((acmd->islogical) ? "LD" : "PD"),
4364 4365 acmd->cmd_dmacount, hdr->context));
4365 4366
4366 4367 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
4367 4368 struct scsi_inquiry *inq;
4368 4369
4369 4370 if (acmd->cmd_dmacount != 0) {
4370 4371 bp_mapin(acmd->cmd_buf);
4371 4372 inq = (struct scsi_inquiry *)
4372 4373 acmd->cmd_buf->b_un.b_addr;
4373 4374
4374 4375 /* don't expose physical drives to OS */
4375 4376 if (acmd->islogical &&
4376 4377 (hdr->cmd_status == MFI_STAT_OK)) {
4377 4378 display_scsi_inquiry(
4378 4379 (caddr_t)inq);
4379 4380 } else if ((hdr->cmd_status ==
4380 4381 MFI_STAT_OK) && inq->inq_dtype ==
4381 4382 DTYPE_DIRECT) {
4382 4383
4383 4384 display_scsi_inquiry(
4384 4385 (caddr_t)inq);
4385 4386
4386 4387 /* for physical disk */
4387 4388 hdr->cmd_status =
4388 4389 MFI_STAT_DEVICE_NOT_FOUND;
4389 4390 }
4390 4391 }
4391 4392 }
4392 4393
4393 4394 switch (hdr->cmd_status) {
4394 4395 case MFI_STAT_OK:
4395 4396 pkt->pkt_scbp[0] = STATUS_GOOD;
4396 4397 break;
4397 4398 case MFI_STAT_LD_CC_IN_PROGRESS:
4398 4399 case MFI_STAT_LD_RECON_IN_PROGRESS:
4399 4400 pkt->pkt_scbp[0] = STATUS_GOOD;
4400 4401 break;
4401 4402 case MFI_STAT_LD_INIT_IN_PROGRESS:
4402 4403 con_log(CL_ANN, (CE_WARN, "Initialization in Progress"));
4403 4404 pkt->pkt_reason = CMD_TRAN_ERR;
4404 4405
4405 4406 break;
4406 4407 case MFI_STAT_SCSI_DONE_WITH_ERROR:
4407 4408 con_log(CL_ANN, (CE_CONT, "scsi_done error"));
4408 4409
4409 4410 pkt->pkt_reason = CMD_CMPLT;
4410 4411 ((struct scsi_status *)
4411 4412 pkt->pkt_scbp)->sts_chk = 1;
4412 4413
4413 4414 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
4414 4415
4415 4416 con_log(CL_ANN, (CE_WARN, "TEST_UNIT_READY fail"));
4416 4417
4417 4418 } else {
4418 4419 pkt->pkt_state |= STATE_ARQ_DONE;
4419 4420 arqstat = (void *)(pkt->pkt_scbp);
4420 4421 arqstat->sts_rqpkt_reason = CMD_CMPLT;
4421 4422 arqstat->sts_rqpkt_resid = 0;
4422 4423 arqstat->sts_rqpkt_state |=
4423 4424 STATE_GOT_BUS | STATE_GOT_TARGET
4424 4425 | STATE_SENT_CMD
4425 4426 | STATE_XFERRED_DATA;
4426 4427 *(uint8_t *)&arqstat->sts_rqpkt_status =
4427 4428 STATUS_GOOD;
4428 4429 ddi_rep_get8(
4429 4430 cmd->frame_dma_obj.acc_handle,
4430 4431 (uint8_t *)
4431 4432 &(arqstat->sts_sensedata),
4432 4433 cmd->sense,
4433 4434 sizeof (struct scsi_extended_sense),
4434 4435 DDI_DEV_AUTOINCR);
4435 4436 }
4436 4437 break;
4437 4438 case MFI_STAT_LD_OFFLINE:
4438 4439 case MFI_STAT_DEVICE_NOT_FOUND:
4439 4440 con_log(CL_ANN, (CE_CONT,
4440 4441 "mrsas_softintr:device not found error"));
4441 4442 pkt->pkt_reason = CMD_DEV_GONE;
4442 4443 pkt->pkt_statistics = STAT_DISCON;
4443 4444 break;
4444 4445 case MFI_STAT_LD_LBA_OUT_OF_RANGE:
4445 4446 pkt->pkt_state |= STATE_ARQ_DONE;
4446 4447 pkt->pkt_reason = CMD_CMPLT;
4447 4448 ((struct scsi_status *)
4448 4449 pkt->pkt_scbp)->sts_chk = 1;
4449 4450
4450 4451 arqstat = (void *)(pkt->pkt_scbp);
4451 4452 arqstat->sts_rqpkt_reason = CMD_CMPLT;
4452 4453 arqstat->sts_rqpkt_resid = 0;
4453 4454 arqstat->sts_rqpkt_state |= STATE_GOT_BUS
4454 4455 | STATE_GOT_TARGET | STATE_SENT_CMD
4455 4456 | STATE_XFERRED_DATA;
4456 4457 *(uint8_t *)&arqstat->sts_rqpkt_status =
4457 4458 STATUS_GOOD;
4458 4459
4459 4460 arqstat->sts_sensedata.es_valid = 1;
4460 4461 arqstat->sts_sensedata.es_key =
4461 4462 KEY_ILLEGAL_REQUEST;
4462 4463 arqstat->sts_sensedata.es_class =
4463 4464 CLASS_EXTENDED_SENSE;
4464 4465
4465 4466 /*
4466 4467 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
4467 4468 * ASC: 0x21h; ASCQ: 0x00h;
4468 4469 */
4469 4470 arqstat->sts_sensedata.es_add_code = 0x21;
4470 4471 arqstat->sts_sensedata.es_qual_code = 0x00;
4471 4472
4472 4473 break;
4473 4474
4474 4475 default:
4475 4476 con_log(CL_ANN, (CE_CONT, "Unknown status!"));
4476 4477 pkt->pkt_reason = CMD_TRAN_ERR;
4477 4478
4478 4479 break;
4479 4480 }
4480 4481
4481 4482 atomic_add_16(&instance->fw_outstanding, (-1));
4482 4483
4483 4484 /* Call the callback routine */
4484 4485 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
4485 4486 pkt->pkt_comp) {
4486 4487
4487 4488 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_softintr: "
4488 4489 "posting to scsa cmd %p index %x pkt %p "
4489 4490 "time %llx", (void *)cmd, cmd->index,
4490 4491 (void *)pkt, gethrtime()));
4491 4492 (*pkt->pkt_comp)(pkt);
4492 4493
4493 4494 }
4494 4495
4495 4496 return_mfi_pkt(instance, cmd);
4496 4497 break;
4497 4498
4498 4499 case MFI_CMD_OP_SMP:
4499 4500 case MFI_CMD_OP_STP:
4500 4501 complete_cmd_in_sync_mode(instance, cmd);
4501 4502 break;
4502 4503
4503 4504 case MFI_CMD_OP_DCMD:
4504 4505 /* see if got an event notification */
4505 4506 if (ddi_get32(cmd->frame_dma_obj.acc_handle,
4506 4507 &cmd->frame->dcmd.opcode) ==
4507 4508 MR_DCMD_CTRL_EVENT_WAIT) {
4508 4509 if ((instance->aen_cmd == cmd) &&
4509 4510 (instance->aen_cmd->abort_aen)) {
4510 4511 con_log(CL_ANN, (CE_WARN,
4511 4512 "mrsas_softintr: "
4512 4513 "aborted_aen returned"));
4513 4514 } else {
4514 4515 atomic_add_16(&instance->fw_outstanding,
4515 4516 (-1));
4516 4517 service_mfi_aen(instance, cmd);
4517 4518 }
4518 4519 } else {
4519 4520 complete_cmd_in_sync_mode(instance, cmd);
4520 4521 }
4521 4522
4522 4523 break;
4523 4524
4524 4525 case MFI_CMD_OP_ABORT:
4525 4526 con_log(CL_ANN, (CE_NOTE, "MFI_CMD_OP_ABORT complete"));
4526 4527 /*
4527 4528 * MFI_CMD_OP_ABORT successfully completed
4528 4529 * in the synchronous mode
4529 4530 */
4530 4531 complete_cmd_in_sync_mode(instance, cmd);
4531 4532 break;
4532 4533
4533 4534 default:
4534 4535 if (cmd->pkt != NULL) {
4535 4536 pkt = cmd->pkt;
4536 4537 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
4537 4538 pkt->pkt_comp) {
4538 4539
4539 4540 con_log(CL_ANN1, (CE_CONT, "posting to "
4540 4541 "scsa cmd %p index %x pkt %p"
4541 4542 "time %llx, default ", (void *)cmd,
4542 4543 cmd->index, (void *)pkt,
4543 4544 gethrtime()));
4544 4545
4545 4546 (*pkt->pkt_comp)(pkt);
4546 4547
4547 4548 }
4548 4549 }
4549 4550 con_log(CL_ANN, (CE_WARN, "Cmd type unknown !"));
4550 4551 break;
4551 4552 }
4552 4553 }
4553 4554
4554 4555 instance->softint_running = 0;
4555 4556
4556 4557 return (DDI_INTR_CLAIMED);
4557 4558 }
4558 4559
4559 4560 /*
4560 4561 * mrsas_alloc_dma_obj
4561 4562 *
4562 4563 * Allocate the memory and other resources for an dma object.
4563 4564 */
4564 4565 int
4565 4566 mrsas_alloc_dma_obj(struct mrsas_instance *instance, dma_obj_t *obj,
4566 4567 uchar_t endian_flags)
4567 4568 {
4568 4569 int i;
4569 4570 size_t alen = 0;
4570 4571 uint_t cookie_cnt;
4571 4572 struct ddi_device_acc_attr tmp_endian_attr;
4572 4573
4573 4574 tmp_endian_attr = endian_attr;
4574 4575 tmp_endian_attr.devacc_attr_endian_flags = endian_flags;
4575 4576 tmp_endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
4576 4577
4577 4578 i = ddi_dma_alloc_handle(instance->dip, &obj->dma_attr,
4578 4579 DDI_DMA_SLEEP, NULL, &obj->dma_handle);
4579 4580 if (i != DDI_SUCCESS) {
4580 4581
4581 4582 switch (i) {
4582 4583 case DDI_DMA_BADATTR :
4583 4584 con_log(CL_ANN, (CE_WARN,
4584 4585 "Failed ddi_dma_alloc_handle- Bad attribute"));
4585 4586 break;
4586 4587 case DDI_DMA_NORESOURCES :
4587 4588 con_log(CL_ANN, (CE_WARN,
4588 4589 "Failed ddi_dma_alloc_handle- No Resources"));
4589 4590 break;
4590 4591 default :
4591 4592 con_log(CL_ANN, (CE_WARN,
4592 4593 "Failed ddi_dma_alloc_handle: "
4593 4594 "unknown status %d", i));
4594 4595 break;
4595 4596 }
4596 4597
4597 4598 return (-1);
4598 4599 }
4599 4600
4600 4601 if ((ddi_dma_mem_alloc(obj->dma_handle, obj->size, &tmp_endian_attr,
4601 4602 DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
4602 4603 &obj->buffer, &alen, &obj->acc_handle) != DDI_SUCCESS) ||
4603 4604 alen < obj->size) {
4604 4605
4605 4606 ddi_dma_free_handle(&obj->dma_handle);
4606 4607
4607 4608 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc"));
4608 4609
4609 4610 return (-1);
4610 4611 }
4611 4612 if (obj->dma_handle == NULL) {
4612 4613 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc"));
4613 4614 return (-1);
4614 4615 }
4615 4616
4616 4617 if (ddi_dma_addr_bind_handle(obj->dma_handle, NULL, obj->buffer,
4617 4618 obj->size, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
4618 4619 NULL, &obj->dma_cookie[0], &cookie_cnt) != DDI_SUCCESS) {
4619 4620
4620 4621 ddi_dma_mem_free(&obj->acc_handle);
4621 4622 ddi_dma_free_handle(&obj->dma_handle);
4622 4623
4623 4624 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle"));
4624 4625
4625 4626 return (-1);
4626 4627 }
4627 4628 if (obj->acc_handle == NULL) {
4628 4629 ddi_dma_mem_free(&obj->acc_handle);
4629 4630 ddi_dma_free_handle(&obj->dma_handle);
4630 4631
4631 4632 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle"));
4632 4633 return (-1);
4633 4634 }
4634 4635
4635 4636
4636 4637 return (cookie_cnt);
4637 4638 }
4638 4639
4639 4640 /*
4640 4641 * mrsas_free_dma_obj(struct mrsas_instance *, dma_obj_t)
4641 4642 *
4642 4643 * De-allocate the memory and other resources for an dma object, which must
4643 4644 * have been alloated by a previous call to mrsas_alloc_dma_obj()
4644 4645 */
4645 4646 int
4646 4647 mrsas_free_dma_obj(struct mrsas_instance *instance, dma_obj_t obj)
4647 4648 {
4648 4649
4649 4650 if ( (obj.dma_handle == NULL) || (obj.acc_handle == NULL) ) {
4650 4651 return (DDI_SUCCESS);
4651 4652 }
4652 4653
4653 4654 (void) ddi_dma_unbind_handle(obj.dma_handle);
4654 4655 ddi_dma_mem_free(&obj.acc_handle);
4655 4656 ddi_dma_free_handle(&obj.dma_handle);
4656 4657 obj.acc_handle = NULL;
4657 4658 return (DDI_SUCCESS);
4658 4659 }
4659 4660
4660 4661 /*
4661 4662 * mrsas_dma_alloc(instance_t *, struct scsi_pkt *, struct buf *,
4662 4663 * int, int (*)())
4663 4664 *
4664 4665 * Allocate dma resources for a new scsi command
4665 4666 */
4666 4667 int
4667 4668 mrsas_dma_alloc(struct mrsas_instance *instance, struct scsi_pkt *pkt,
4668 4669 struct buf *bp, int flags, int (*callback)())
4669 4670 {
4670 4671 int dma_flags;
4671 4672 int (*cb)(caddr_t);
4672 4673 int i;
4673 4674
4674 4675 ddi_dma_attr_t tmp_dma_attr = mrsas_generic_dma_attr;
4675 4676 struct scsa_cmd *acmd = PKT2CMD(pkt);
4676 4677
4677 4678 acmd->cmd_buf = bp;
4678 4679
4679 4680 if (bp->b_flags & B_READ) {
4680 4681 acmd->cmd_flags &= ~CFLAG_DMASEND;
4681 4682 dma_flags = DDI_DMA_READ;
4682 4683 } else {
4683 4684 acmd->cmd_flags |= CFLAG_DMASEND;
4684 4685 dma_flags = DDI_DMA_WRITE;
4685 4686 }
4686 4687
4687 4688 if (flags & PKT_CONSISTENT) {
4688 4689 acmd->cmd_flags |= CFLAG_CONSISTENT;
4689 4690 dma_flags |= DDI_DMA_CONSISTENT;
4690 4691 }
4691 4692
4692 4693 if (flags & PKT_DMA_PARTIAL) {
4693 4694 dma_flags |= DDI_DMA_PARTIAL;
4694 4695 }
4695 4696
4696 4697 dma_flags |= DDI_DMA_REDZONE;
4697 4698
4698 4699 cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
4699 4700
4700 4701 tmp_dma_attr.dma_attr_sgllen = instance->max_num_sge;
4701 4702 tmp_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull;
4702 4703 if (instance->tbolt) {
4703 4704 //OCR-RESET FIX
4704 4705 tmp_dma_attr.dma_attr_count_max = (U64)mrsas_tbolt_max_cap_maxxfer; //limit to 256K
4705 4706 tmp_dma_attr.dma_attr_maxxfer = (U64)mrsas_tbolt_max_cap_maxxfer; //limit to 256K
4706 4707 }
4707 4708
4708 4709
4709 4710 if ((i = ddi_dma_alloc_handle(instance->dip, &tmp_dma_attr,
4710 4711 cb, 0, &acmd->cmd_dmahandle)) != DDI_SUCCESS) {
4711 4712 switch (i) {
4712 4713 case DDI_DMA_BADATTR:
4713 4714 bioerror(bp, EFAULT);
4714 4715 return (DDI_FAILURE);
4715 4716
4716 4717 case DDI_DMA_NORESOURCES:
4717 4718 bioerror(bp, 0);
4718 4719 return (DDI_FAILURE);
4719 4720
4720 4721 default:
4721 4722 con_log(CL_ANN, (CE_PANIC, "ddi_dma_alloc_handle: "
4722 4723 "impossible result (0x%x)", i));
4723 4724 bioerror(bp, EFAULT);
4724 4725 return (DDI_FAILURE);
4725 4726 }
4726 4727 }
4727 4728
4728 4729 i = ddi_dma_buf_bind_handle(acmd->cmd_dmahandle, bp, dma_flags,
4729 4730 cb, 0, &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies);
4730 4731
4731 4732 switch (i) {
4732 4733 case DDI_DMA_PARTIAL_MAP:
4733 4734 if ((dma_flags & DDI_DMA_PARTIAL) == 0) {
4734 4735 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
4735 4736 "DDI_DMA_PARTIAL_MAP impossible"));
4736 4737 goto no_dma_cookies;
4737 4738 }
4738 4739
4739 4740 if (ddi_dma_numwin(acmd->cmd_dmahandle, &acmd->cmd_nwin) ==
4740 4741 DDI_FAILURE) {
4741 4742 con_log(CL_ANN, (CE_PANIC, "ddi_dma_numwin failed"));
4742 4743 goto no_dma_cookies;
4743 4744 }
4744 4745
4745 4746 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
4746 4747 &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
4747 4748 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
4748 4749 DDI_FAILURE) {
4749 4750
4750 4751 con_log(CL_ANN, (CE_PANIC, "ddi_dma_getwin failed"));
4751 4752 goto no_dma_cookies;
4752 4753 }
4753 4754
4754 4755 goto get_dma_cookies;
4755 4756 case DDI_DMA_MAPPED:
4756 4757 acmd->cmd_nwin = 1;
4757 4758 acmd->cmd_dma_len = 0;
4758 4759 acmd->cmd_dma_offset = 0;
4759 4760
4760 4761 get_dma_cookies:
4761 4762 i = 0;
4762 4763 acmd->cmd_dmacount = 0;
4763 4764 for (;;) {
4764 4765 acmd->cmd_dmacount +=
4765 4766 acmd->cmd_dmacookies[i++].dmac_size;
4766 4767
4767 4768 if (i == instance->max_num_sge ||
4768 4769 i == acmd->cmd_ncookies)
4769 4770 break;
4770 4771
4771 4772 ddi_dma_nextcookie(acmd->cmd_dmahandle,
4772 4773 &acmd->cmd_dmacookies[i]);
4773 4774 }
4774 4775
4775 4776 acmd->cmd_cookie = i;
4776 4777 acmd->cmd_cookiecnt = i;
4777 4778
4778 4779 acmd->cmd_flags |= CFLAG_DMAVALID;
4779 4780
4780 4781 if (bp->b_bcount >= acmd->cmd_dmacount) {
4781 4782 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
4782 4783 } else {
4783 4784 pkt->pkt_resid = 0;
4784 4785 }
4785 4786
4786 4787 return (DDI_SUCCESS);
4787 4788 case DDI_DMA_NORESOURCES:
4788 4789 bioerror(bp, 0);
4789 4790 break;
4790 4791 case DDI_DMA_NOMAPPING:
4791 4792 bioerror(bp, EFAULT);
4792 4793 break;
4793 4794 case DDI_DMA_TOOBIG:
4794 4795 bioerror(bp, EINVAL);
4795 4796 break;
4796 4797 case DDI_DMA_INUSE:
4797 4798 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle:"
4798 4799 " DDI_DMA_INUSE impossible"));
4799 4800 break;
4800 4801 default:
4801 4802 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
4802 4803 "impossible result (0x%x)", i));
4803 4804 break;
4804 4805 }
4805 4806
4806 4807 no_dma_cookies:
4807 4808 ddi_dma_free_handle(&acmd->cmd_dmahandle);
4808 4809 acmd->cmd_dmahandle = NULL;
4809 4810 acmd->cmd_flags &= ~CFLAG_DMAVALID;
4810 4811 return (DDI_FAILURE);
4811 4812 }
4812 4813
4813 4814 /*
4814 4815 * mrsas_dma_move(struct mrsas_instance *, struct scsi_pkt *, struct buf *)
4815 4816 *
4816 4817 * move dma resources to next dma window
4817 4818 *
4818 4819 */
4819 4820 int
4820 4821 mrsas_dma_move(struct mrsas_instance *instance, struct scsi_pkt *pkt,
4821 4822 struct buf *bp)
4822 4823 {
4823 4824 int i = 0;
4824 4825
4825 4826 struct scsa_cmd *acmd = PKT2CMD(pkt);
4826 4827
4827 4828 /*
4828 4829 * If there are no more cookies remaining in this window,
4829 4830 * must move to the next window first.
4830 4831 */
4831 4832 if (acmd->cmd_cookie == acmd->cmd_ncookies) {
4832 4833 if (acmd->cmd_curwin == acmd->cmd_nwin && acmd->cmd_nwin == 1) {
4833 4834 return (DDI_SUCCESS);
4834 4835 }
4835 4836
4836 4837 /* at last window, cannot move */
4837 4838 if (++acmd->cmd_curwin >= acmd->cmd_nwin) {
4838 4839 return (DDI_FAILURE);
4839 4840 }
4840 4841
4841 4842 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
4842 4843 &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
4843 4844 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
4844 4845 DDI_FAILURE) {
4845 4846 return (DDI_FAILURE);
4846 4847 }
4847 4848
4848 4849 acmd->cmd_cookie = 0;
4849 4850 } else {
4850 4851 /* still more cookies in this window - get the next one */
4851 4852 ddi_dma_nextcookie(acmd->cmd_dmahandle,
4852 4853 &acmd->cmd_dmacookies[0]);
4853 4854 }
4854 4855
4855 4856 /* get remaining cookies in this window, up to our maximum */
4856 4857 for (;;) {
4857 4858 acmd->cmd_dmacount += acmd->cmd_dmacookies[i++].dmac_size;
4858 4859 acmd->cmd_cookie++;
4859 4860
4860 4861 if (i == instance->max_num_sge ||
4861 4862 acmd->cmd_cookie == acmd->cmd_ncookies) {
4862 4863 break;
4863 4864 }
4864 4865
4865 4866 ddi_dma_nextcookie(acmd->cmd_dmahandle,
4866 4867 &acmd->cmd_dmacookies[i]);
4867 4868 }
4868 4869
4869 4870 acmd->cmd_cookiecnt = i;
4870 4871
4871 4872 if (bp->b_bcount >= acmd->cmd_dmacount) {
4872 4873 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
4873 4874 } else {
4874 4875 pkt->pkt_resid = 0;
4875 4876 }
4876 4877
4877 4878 return (DDI_SUCCESS);
4878 4879 }
4879 4880
4880 4881 /*
4881 4882 * build_cmd
4882 4883 */
4883 4884 static struct mrsas_cmd *
4884 4885 build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
4885 4886 struct scsi_pkt *pkt, uchar_t *cmd_done)
4886 4887 {
4887 4888 uint16_t flags = 0;
4888 4889 uint32_t i;
4889 4890 uint32_t context;
4890 4891 uint32_t sge_bytes;
4891 4892 uint32_t tmp_data_xfer_len;
4892 4893 ddi_acc_handle_t acc_handle;
4893 4894 struct mrsas_cmd *cmd;
4894 4895 struct mrsas_sge64 *mfi_sgl;
4895 4896 struct mrsas_sge_ieee *mfi_sgl_ieee;
4896 4897 struct scsa_cmd *acmd = PKT2CMD(pkt);
4897 4898 struct mrsas_pthru_frame *pthru;
4898 4899 struct mrsas_io_frame *ldio;
4899 4900
4900 4901 /* find out if this is logical or physical drive command. */
4901 4902 acmd->islogical = MRDRV_IS_LOGICAL(ap);
4902 4903 acmd->device_id = MAP_DEVICE_ID(instance, ap);
4903 4904 *cmd_done = 0;
4904 4905
4905 4906 /* get the command packet */
4906 4907 if (!(cmd = get_mfi_pkt(instance))) {
4907 4908 cmn_err(CE_WARN,
4908 4909 "Failed to get a cmd from free-pool in build_cmd(). fw_outstanding=0x%X max_fw_cmds=0x%X",
4909 4910 instance->fw_outstanding, instance->max_fw_cmds);
4910 4911 return (NULL);
4911 4912 }
4912 4913
4913 4914 acc_handle = cmd->frame_dma_obj.acc_handle;
4914 4915
4915 4916 /* Clear the frame buffer and assign back the context id */
4916 4917 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
4917 4918 ddi_put32(acc_handle, &cmd->frame->hdr.context, cmd->index);
4918 4919
4919 4920 cmd->pkt = pkt;
4920 4921 cmd->cmd = acmd;
4921 4922
4922 4923 /* lets get the command directions */
4923 4924 if (acmd->cmd_flags & CFLAG_DMASEND) {
4924 4925 flags = MFI_FRAME_DIR_WRITE;
4925 4926
4926 4927 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
4927 4928 (void) ddi_dma_sync(acmd->cmd_dmahandle,
4928 4929 acmd->cmd_dma_offset, acmd->cmd_dma_len,
4929 4930 DDI_DMA_SYNC_FORDEV);
4930 4931 }
4931 4932 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
4932 4933 flags = MFI_FRAME_DIR_READ;
4933 4934
4934 4935 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
4935 4936 (void) ddi_dma_sync(acmd->cmd_dmahandle,
4936 4937 acmd->cmd_dma_offset, acmd->cmd_dma_len,
4937 4938 DDI_DMA_SYNC_FORCPU);
4938 4939 }
4939 4940 } else {
4940 4941 flags = MFI_FRAME_DIR_NONE;
4941 4942 }
4942 4943
4943 4944 if (instance->flag_ieee) {
4944 4945 flags |= MFI_FRAME_IEEE;
4945 4946 }
4946 4947 flags |= MFI_FRAME_SGL64;
4947 4948
4948 4949 switch (pkt->pkt_cdbp[0]) {
4949 4950
4950 4951 /*
4951 4952 * case SCMD_SYNCHRONIZE_CACHE:
4952 4953 * flush_cache(instance);
4953 4954 * return_mfi_pkt(instance, cmd);
4954 4955 * *cmd_done = 1;
4955 4956 *
4956 4957 * return (NULL);
4957 4958 */
4958 4959
4959 4960 case SCMD_READ:
4960 4961 case SCMD_WRITE:
4961 4962 case SCMD_READ_G1:
4962 4963 case SCMD_WRITE_G1:
4963 4964 case SCMD_READ_G4:
4964 4965 case SCMD_WRITE_G4:
4965 4966 case SCMD_READ_G5:
4966 4967 case SCMD_WRITE_G5:
4967 4968 if (acmd->islogical) {
4968 4969 ldio = (struct mrsas_io_frame *)cmd->frame;
4969 4970
4970 4971 /*
4971 4972 * preare the Logical IO frame:
4972 4973 * 2nd bit is zero for all read cmds
4973 4974 */
4974 4975 ddi_put8(acc_handle, &ldio->cmd,
4975 4976 (pkt->pkt_cdbp[0] & 0x02) ? MFI_CMD_OP_LD_WRITE
4976 4977 : MFI_CMD_OP_LD_READ);
4977 4978 ddi_put8(acc_handle, &ldio->cmd_status, 0x0);
4978 4979 ddi_put8(acc_handle, &ldio->scsi_status, 0x0);
4979 4980 ddi_put8(acc_handle, &ldio->target_id, acmd->device_id);
4980 4981 ddi_put16(acc_handle, &ldio->timeout, 0);
4981 4982 ddi_put8(acc_handle, &ldio->reserved_0, 0);
4982 4983 ddi_put16(acc_handle, &ldio->pad_0, 0);
4983 4984 ddi_put16(acc_handle, &ldio->flags, flags);
4984 4985
4985 4986 /* Initialize sense Information */
4986 4987 bzero(cmd->sense, SENSE_LENGTH);
4987 4988 ddi_put8(acc_handle, &ldio->sense_len, SENSE_LENGTH);
4988 4989 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_hi, 0);
4989 4990 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_lo,
4990 4991 cmd->sense_phys_addr);
4991 4992 ddi_put32(acc_handle, &ldio->start_lba_hi, 0);
4992 4993 ddi_put8(acc_handle, &ldio->access_byte,
4993 4994 (acmd->cmd_cdblen != 6) ? pkt->pkt_cdbp[1] : 0);
4994 4995 ddi_put8(acc_handle, &ldio->sge_count,
4995 4996 acmd->cmd_cookiecnt);
4996 4997 if (instance->flag_ieee) {
4997 4998 mfi_sgl_ieee =
4998 4999 (struct mrsas_sge_ieee *)&ldio->sgl;
4999 5000 } else {
5000 5001 mfi_sgl = (struct mrsas_sge64 *)&ldio->sgl;
5001 5002 }
5002 5003
5003 5004 context = ddi_get32(acc_handle, &ldio->context);
5004 5005
5005 5006 if (acmd->cmd_cdblen == CDB_GROUP0) { /* 6-byte cdb */
5006 5007 ddi_put32(acc_handle, &ldio->lba_count, (
5007 5008 (uint16_t)(pkt->pkt_cdbp[4])));
5008 5009
5009 5010 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5010 5011 ((uint32_t)(pkt->pkt_cdbp[3])) |
5011 5012 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
5012 5013 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
5013 5014 << 16)));
5014 5015 } else if (acmd->cmd_cdblen == CDB_GROUP1) { /* 10-byte cdb */
5015 5016 ddi_put32(acc_handle, &ldio->lba_count, (
5016 5017 ((uint16_t)(pkt->pkt_cdbp[8])) |
5017 5018 ((uint16_t)(pkt->pkt_cdbp[7]) << 8)));
5018 5019
5019 5020 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5020 5021 ((uint32_t)(pkt->pkt_cdbp[5])) |
5021 5022 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5022 5023 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5023 5024 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5024 5025 } else if (acmd->cmd_cdblen == CDB_GROUP5) { /* 12-byte cdb */
5025 5026 ddi_put32(acc_handle, &ldio->lba_count, (
5026 5027 ((uint32_t)(pkt->pkt_cdbp[9])) |
5027 5028 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
5028 5029 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
5029 5030 ((uint32_t)(pkt->pkt_cdbp[6]) << 24)));
5030 5031
5031 5032 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5032 5033 ((uint32_t)(pkt->pkt_cdbp[5])) |
5033 5034 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5034 5035 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5035 5036 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5036 5037 } else if (acmd->cmd_cdblen == CDB_GROUP4) { /* 16-byte cdb */
5037 5038 ddi_put32(acc_handle, &ldio->lba_count, (
5038 5039 ((uint32_t)(pkt->pkt_cdbp[13])) |
5039 5040 ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
5040 5041 ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
5041 5042 ((uint32_t)(pkt->pkt_cdbp[10]) << 24)));
5042 5043
5043 5044 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5044 5045 ((uint32_t)(pkt->pkt_cdbp[9])) |
5045 5046 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
5046 5047 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
5047 5048 ((uint32_t)(pkt->pkt_cdbp[6]) << 24)));
5048 5049
5049 5050 ddi_put32(acc_handle, &ldio->start_lba_hi, (
5050 5051 ((uint32_t)(pkt->pkt_cdbp[5])) |
5051 5052 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5052 5053 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5053 5054 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5054 5055 }
5055 5056
5056 5057 break;
5057 5058 }
5058 5059 /* fall through For all non-rd/wr cmds */
5059 5060 default:
5060 5061
5061 5062 switch (pkt->pkt_cdbp[0]) {
5062 5063 case SCMD_MODE_SENSE:
5063 5064 case SCMD_MODE_SENSE_G1: {
5064 5065 union scsi_cdb *cdbp;
5065 5066 uint16_t page_code;
5066 5067
5067 5068 cdbp = (void *)pkt->pkt_cdbp;
5068 5069 page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
5069 5070 switch (page_code) {
5070 5071 case 0x3:
5071 5072 case 0x4:
5072 5073 (void) mrsas_mode_sense_build(pkt);
5073 5074 return_mfi_pkt(instance, cmd);
5074 5075 *cmd_done = 1;
5075 5076 return (NULL);
5076 5077 }
5077 5078 break;
5078 5079 }
5079 5080 default:
5080 5081 break;
5081 5082 }
5082 5083
5083 5084 pthru = (struct mrsas_pthru_frame *)cmd->frame;
5084 5085
5085 5086 /* prepare the DCDB frame */
5086 5087 ddi_put8(acc_handle, &pthru->cmd, (acmd->islogical) ?
5087 5088 MFI_CMD_OP_LD_SCSI : MFI_CMD_OP_PD_SCSI);
5088 5089 ddi_put8(acc_handle, &pthru->cmd_status, 0x0);
5089 5090 ddi_put8(acc_handle, &pthru->scsi_status, 0x0);
5090 5091 ddi_put8(acc_handle, &pthru->target_id, acmd->device_id);
5091 5092 ddi_put8(acc_handle, &pthru->lun, 0);
5092 5093 ddi_put8(acc_handle, &pthru->cdb_len, acmd->cmd_cdblen);
5093 5094 ddi_put16(acc_handle, &pthru->timeout, 0);
5094 5095 ddi_put16(acc_handle, &pthru->flags, flags);
5095 5096 tmp_data_xfer_len = 0;
5096 5097 for (i = 0; i < acmd->cmd_cookiecnt; i++) {
5097 5098 tmp_data_xfer_len += acmd->cmd_dmacookies[i].dmac_size;
5098 5099 }
5099 5100 ddi_put32(acc_handle, &pthru->data_xfer_len,
5100 5101 tmp_data_xfer_len);
5101 5102 ddi_put8(acc_handle, &pthru->sge_count, acmd->cmd_cookiecnt);
5102 5103 if (instance->flag_ieee) {
5103 5104 mfi_sgl_ieee = (struct mrsas_sge_ieee *)&pthru->sgl;
5104 5105 } else {
5105 5106 mfi_sgl = (struct mrsas_sge64 *)&pthru->sgl;
5106 5107 }
5107 5108
5108 5109 bzero(cmd->sense, SENSE_LENGTH);
5109 5110 ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH);
5110 5111 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0);
5111 5112 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo,
5112 5113 cmd->sense_phys_addr);
5113 5114
5114 5115 context = ddi_get32(acc_handle, &pthru->context);
5115 5116 ddi_rep_put8(acc_handle, (uint8_t *)pkt->pkt_cdbp,
5116 5117 (uint8_t *)pthru->cdb, acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
5117 5118
5118 5119 break;
5119 5120 }
5120 5121 #ifdef lint
5121 5122 context = context;
5122 5123 #endif
5123 5124 /* prepare the scatter-gather list for the firmware */
5124 5125 if (instance->flag_ieee) {
5125 5126 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl_ieee++) {
5126 5127 ddi_put64(acc_handle, &mfi_sgl_ieee->phys_addr,
5127 5128 acmd->cmd_dmacookies[i].dmac_laddress);
5128 5129 ddi_put32(acc_handle, &mfi_sgl_ieee->length,
5129 5130 acmd->cmd_dmacookies[i].dmac_size);
5130 5131 }
5131 5132 sge_bytes = sizeof (struct mrsas_sge_ieee)*acmd->cmd_cookiecnt;
5132 5133 } else {
5133 5134 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl++) {
5134 5135 ddi_put64(acc_handle, &mfi_sgl->phys_addr,
5135 5136 acmd->cmd_dmacookies[i].dmac_laddress);
5136 5137 ddi_put32(acc_handle, &mfi_sgl->length,
5137 5138 acmd->cmd_dmacookies[i].dmac_size);
5138 5139 }
5139 5140 sge_bytes = sizeof (struct mrsas_sge64)*acmd->cmd_cookiecnt;
5140 5141 }
5141 5142
5142 5143 cmd->frame_count = (sge_bytes / MRMFI_FRAME_SIZE) +
5143 5144 ((sge_bytes % MRMFI_FRAME_SIZE) ? 1 : 0) + 1;
5144 5145
5145 5146 if (cmd->frame_count >= 8) {
5146 5147 cmd->frame_count = 8;
5147 5148 }
5148 5149
5149 5150 return (cmd);
5150 5151 }
5151 5152
5152 5153 /*
5153 5154 * wait_for_outstanding - Wait for all outstanding cmds
5154 5155 * @instance: Adapter soft state
5155 5156 *
5156 5157 * This function waits for upto MRDRV_RESET_WAIT_TIME seconds for FW to
5157 5158 * complete all its outstanding commands. Returns error if one or more IOs
5158 5159 * are pending after this time period.
5159 5160 */
5160 5161 static int
5161 5162 wait_for_outstanding(struct mrsas_instance *instance)
5162 5163 {
5163 5164 int i;
5164 5165 uint32_t wait_time = 90;
5165 5166
5166 5167 for (i = 0; i < wait_time; i++) {
5167 5168 if (!instance->fw_outstanding) {
5168 5169 break;
5169 5170 }
5170 5171
5171 5172 drv_usecwait(MILLISEC); /* wait for 1000 usecs */;
5172 5173 }
5173 5174
5174 5175 if (instance->fw_outstanding) {
5175 5176 return (1);
5176 5177 }
5177 5178
5178 5179 return (0);
5179 5180 }
5180 5181
5181 5182
5182 5183 /*
5183 5184 * issue_mfi_pthru
5184 5185 */
5185 5186 static int
5186 5187 issue_mfi_pthru(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5187 5188 struct mrsas_cmd *cmd, int mode)
5188 5189 {
5189 5190 void *ubuf;
5190 5191 uint32_t kphys_addr = 0;
5191 5192 uint32_t xferlen = 0;
5192 5193 uint32_t new_xfer_length =0;
5193 5194 uint_t model;
5194 5195 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5195 5196 dma_obj_t pthru_dma_obj;
5196 5197 struct mrsas_pthru_frame *kpthru;
5197 5198 struct mrsas_pthru_frame *pthru;
5198 5199 int i;
5199 5200 pthru = &cmd->frame->pthru;
5200 5201 kpthru = (struct mrsas_pthru_frame *)&ioctl->frame[0];
5201 5202
5202 5203 if (instance->adapterresetinprogress) {
5203 5204 con_log(CL_ANN1, (CE_WARN, "issue_mfi_pthru: Reset flag set, "
5204 5205 "returning mfi_pkt and setting TRAN_BUSY\n"));
5205 5206 return (DDI_FAILURE);
5206 5207 }
5207 5208 model = ddi_model_convert_from(mode & FMODELS);
5208 5209 if (model == DDI_MODEL_ILP32) {
5209 5210 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP32"));
5210 5211
5211 5212 xferlen = kpthru->sgl.sge32[0].length;
5212 5213
5213 5214 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
5214 5215 } else {
5215 5216 #ifdef _ILP32
5216 5217 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP32"));
5217 5218 xferlen = kpthru->sgl.sge32[0].length;
5218 5219 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
5219 5220 #else
5220 5221 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP64"));
5221 5222 xferlen = kpthru->sgl.sge64[0].length;
5222 5223 ubuf = (void *)(ulong_t)kpthru->sgl.sge64[0].phys_addr;
5223 5224 #endif
5224 5225 }
5225 5226
5226 5227 if (xferlen) {
5227 5228 /* means IOCTL requires DMA */
5228 5229 /* allocate the data transfer buffer */
5229 5230 //pthru_dma_obj.size = xferlen;
5230 5231 MRSAS_GET_BOUNDARY_ALIGNED_LEN(xferlen,new_xfer_length,PAGESIZE);
5231 5232 pthru_dma_obj.size = new_xfer_length;
5232 5233 pthru_dma_obj.dma_attr = mrsas_generic_dma_attr;
5233 5234 pthru_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5234 5235 pthru_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5235 5236 pthru_dma_obj.dma_attr.dma_attr_sgllen = 1;
5236 5237 pthru_dma_obj.dma_attr.dma_attr_align = 1;
5237 5238
5238 5239 /* allocate kernel buffer for DMA */
5239 5240 if (mrsas_alloc_dma_obj(instance, &pthru_dma_obj,
5240 5241 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5241 5242 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: "
5242 5243 "could not allocate data transfer buffer."));
5243 5244 return (DDI_FAILURE);
5244 5245 }
5245 5246 (void) memset(pthru_dma_obj.buffer, 0, xferlen);
5246 5247
5247 5248 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5248 5249 if (kpthru->flags & MFI_FRAME_DIR_WRITE) {
5249 5250 for (i = 0; i < xferlen; i++) {
5250 5251 if (ddi_copyin((uint8_t *)ubuf+i,
5251 5252 (uint8_t *)pthru_dma_obj.buffer+i,
5252 5253 1, mode)) {
5253 5254 con_log(CL_ANN, (CE_WARN,
5254 5255 "issue_mfi_pthru : "
5255 5256 "copy from user space failed"));
5256 5257 return (DDI_FAILURE);
5257 5258 }
5258 5259 }
5259 5260 }
5260 5261
5261 5262 kphys_addr = pthru_dma_obj.dma_cookie[0].dmac_address;
5262 5263 }
5263 5264
5264 5265 ddi_put8(acc_handle, &pthru->cmd, kpthru->cmd);
5265 5266 ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH);
5266 5267 ddi_put8(acc_handle, &pthru->cmd_status, 0);
5267 5268 ddi_put8(acc_handle, &pthru->scsi_status, 0);
5268 5269 ddi_put8(acc_handle, &pthru->target_id, kpthru->target_id);
5269 5270 ddi_put8(acc_handle, &pthru->lun, kpthru->lun);
5270 5271 ddi_put8(acc_handle, &pthru->cdb_len, kpthru->cdb_len);
5271 5272 ddi_put8(acc_handle, &pthru->sge_count, kpthru->sge_count);
5272 5273 ddi_put16(acc_handle, &pthru->timeout, kpthru->timeout);
5273 5274 ddi_put32(acc_handle, &pthru->data_xfer_len, kpthru->data_xfer_len);
5274 5275
5275 5276 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0);
5276 5277 pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
5277 5278 /*ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo, 0); */
5278 5279
5279 5280 ddi_rep_put8(acc_handle, (uint8_t *)kpthru->cdb, (uint8_t *)pthru->cdb,
5280 5281 pthru->cdb_len, DDI_DEV_AUTOINCR);
5281 5282
5282 5283 ddi_put16(acc_handle, &pthru->flags, kpthru->flags & ~MFI_FRAME_SGL64);
5283 5284 ddi_put32(acc_handle, &pthru->sgl.sge32[0].length, xferlen);
5284 5285 ddi_put32(acc_handle, &pthru->sgl.sge32[0].phys_addr, kphys_addr);
5285 5286
5286 5287 cmd->sync_cmd = MRSAS_TRUE;
5287 5288 cmd->frame_count = 1;
5288 5289
5289 5290 if (instance->tbolt) {
5290 5291 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5291 5292 }
5292 5293
5293 5294 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5294 5295 con_log(CL_ANN, (CE_WARN,
5295 5296 "issue_mfi_pthru: fw_ioctl failed"));
5296 5297 } else {
5297 5298 if (xferlen && kpthru->flags & MFI_FRAME_DIR_READ) {
5298 5299 for (i = 0; i < xferlen; i++) {
5299 5300 if (ddi_copyout(
5300 5301 (uint8_t *)pthru_dma_obj.buffer+i,
5301 5302 (uint8_t *)ubuf+i, 1, mode)) {
5302 5303 con_log(CL_ANN, (CE_WARN,
5303 5304 "issue_mfi_pthru : "
5304 5305 "copy to user space failed"));
5305 5306 return (DDI_FAILURE);
5306 5307 }
5307 5308 }
5308 5309 }
5309 5310 }
5310 5311
5311 5312 kpthru->cmd_status = ddi_get8(acc_handle, &pthru->cmd_status);
5312 5313 kpthru->scsi_status = ddi_get8(acc_handle, &pthru->scsi_status);
5313 5314
5314 5315 con_log(CL_ANN, (CE_CONT, "issue_mfi_pthru: cmd_status %x, "
5315 5316 "scsi_status %x", kpthru->cmd_status, kpthru->scsi_status));
5316 5317
5317 5318 if (kpthru->sense_len) {
5318 5319 uint sense_len = SENSE_LENGTH;
5319 5320 void *sense_ubuf = (void *)(ulong_t)kpthru->sense_buf_phys_addr_lo;
5320 5321 if (kpthru->sense_len <= SENSE_LENGTH) {
5321 5322 sense_len = kpthru->sense_len;
5322 5323 }
5323 5324
5324 5325 for (i = 0; i < sense_len; i++) {
5325 5326 if (ddi_copyout(
5326 5327 (uint8_t *)cmd->sense+i,
5327 5328 (uint8_t *)sense_ubuf+i, 1, mode)) {
5328 5329 con_log(CL_ANN, (CE_WARN,
5329 5330 "issue_mfi_pthru : "
5330 5331 "copy to user space failed"));
5331 5332 }
5332 5333 con_log(CL_DLEVEL1, (CE_WARN,
5333 5334 "Copying Sense info sense_buff[%d] = 0x%X\n",i,*((uint8_t *)cmd->sense+i)));
5334 5335 }
5335 5336 }
5336 5337 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
5337 5338 DDI_DMA_SYNC_FORDEV);
5338 5339
5339 5340 if (xferlen) {
5340 5341 /* free kernel buffer */
5341 5342 if (mrsas_free_dma_obj(instance, pthru_dma_obj) != DDI_SUCCESS)
5342 5343 return (DDI_FAILURE);
5343 5344 }
5344 5345
5345 5346 return (DDI_SUCCESS);
5346 5347 }
5347 5348
5348 5349 /*
5349 5350 * issue_mfi_dcmd
5350 5351 */
5351 5352 static int
5352 5353 issue_mfi_dcmd(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5353 5354 struct mrsas_cmd *cmd, int mode)
5354 5355 {
5355 5356 void *ubuf;
5356 5357 uint32_t kphys_addr = 0;
5357 5358 uint32_t xferlen = 0;
5358 5359 uint32_t new_xfer_length = 0;
5359 5360 uint32_t model;
5360 5361 dma_obj_t dcmd_dma_obj;
5361 5362 struct mrsas_dcmd_frame *kdcmd;
5362 5363 struct mrsas_dcmd_frame *dcmd;
5363 5364 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5364 5365 int i;
5365 5366 dcmd = &cmd->frame->dcmd;
5366 5367 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0];
5367 5368
5368 5369 if (instance->adapterresetinprogress) {
5369 5370 con_log(CL_ANN1, (CE_WARN, "Reset flag set, "
5370 5371 "returning mfi_pkt and setting TRAN_BUSY\n"));
5371 5372 return (DDI_FAILURE);
5372 5373 }
5373 5374 model = ddi_model_convert_from(mode & FMODELS);
5374 5375 if (model == DDI_MODEL_ILP32) {
5375 5376 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
5376 5377
5377 5378 xferlen = kdcmd->sgl.sge32[0].length;
5378 5379
5379 5380 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
5380 5381 } else {
5381 5382 #ifdef _ILP32
5382 5383 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
5383 5384 xferlen = kdcmd->sgl.sge32[0].length;
5384 5385 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
5385 5386 #else
5386 5387 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_LP64"));
5387 5388 xferlen = kdcmd->sgl.sge64[0].length;
5388 5389 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
5389 5390 #endif
5390 5391 }
5391 5392 if (xferlen) {
5392 5393 /* means IOCTL requires DMA */
5393 5394 /* allocate the data transfer buffer */
5394 5395 //dcmd_dma_obj.size = xferlen;
5395 5396 MRSAS_GET_BOUNDARY_ALIGNED_LEN(xferlen,new_xfer_length,PAGESIZE);
5396 5397 dcmd_dma_obj.size = new_xfer_length;
5397 5398 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
5398 5399 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5399 5400 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5400 5401 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
5401 5402 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
5402 5403
5403 5404 /* allocate kernel buffer for DMA */
5404 5405 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
5405 5406 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5406 5407 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: "
5407 5408 "could not allocate data transfer buffer."));
5408 5409 return (DDI_FAILURE);
5409 5410 }
5410 5411 (void) memset(dcmd_dma_obj.buffer, 0, xferlen);
5411 5412
5412 5413 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5413 5414 if (kdcmd->flags & MFI_FRAME_DIR_WRITE) {
5414 5415 for (i = 0; i < xferlen; i++) {
5415 5416 if (ddi_copyin((uint8_t *)ubuf + i,
5416 5417 (uint8_t *)dcmd_dma_obj.buffer + i,
5417 5418 1, mode)) {
5418 5419 con_log(CL_ANN, (CE_WARN,
5419 5420 "issue_mfi_dcmd : "
5420 5421 "copy from user space failed"));
5421 5422 return (DDI_FAILURE);
5422 5423 }
5423 5424 }
5424 5425 }
5425 5426
5426 5427 kphys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address;
5427 5428 }
5428 5429
5429 5430 ddi_put8(acc_handle, &dcmd->cmd, kdcmd->cmd);
5430 5431 ddi_put8(acc_handle, &dcmd->cmd_status, 0);
5431 5432 ddi_put8(acc_handle, &dcmd->sge_count, kdcmd->sge_count);
5432 5433 ddi_put16(acc_handle, &dcmd->timeout, kdcmd->timeout);
5433 5434 ddi_put32(acc_handle, &dcmd->data_xfer_len, kdcmd->data_xfer_len);
5434 5435 ddi_put32(acc_handle, &dcmd->opcode, kdcmd->opcode);
5435 5436
5436 5437 ddi_rep_put8(acc_handle, (uint8_t *)kdcmd->mbox.b,
5437 5438 (uint8_t *)dcmd->mbox.b, DCMD_MBOX_SZ, DDI_DEV_AUTOINCR);
5438 5439
5439 5440 ddi_put16(acc_handle, &dcmd->flags, kdcmd->flags & ~MFI_FRAME_SGL64);
5440 5441 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].length, xferlen);
5441 5442 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].phys_addr, kphys_addr);
5442 5443
5443 5444 cmd->sync_cmd = MRSAS_TRUE;
5444 5445 cmd->frame_count = 1;
5445 5446
5446 5447 if (instance->tbolt) {
5447 5448 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5448 5449 }
5449 5450
5450 5451 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5451 5452 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: fw_ioctl failed"));
5452 5453 } else {
5453 5454 if (xferlen && (kdcmd->flags & MFI_FRAME_DIR_READ)) {
5454 5455 for (i = 0; i < xferlen; i++) {
5455 5456 if (ddi_copyout(
5456 5457 (uint8_t *)dcmd_dma_obj.buffer + i,
5457 5458 (uint8_t *)ubuf + i,
5458 5459 1, mode)) {
5459 5460 con_log(CL_ANN, (CE_WARN,
5460 5461 "issue_mfi_dcmd : "
5461 5462 "copy to user space failed"));
5462 5463 return (DDI_FAILURE);
5463 5464 }
5464 5465 }
5465 5466 }
5466 5467 }
5467 5468
5468 5469 kdcmd->cmd_status = ddi_get8(acc_handle, &dcmd->cmd_status);
5469 5470 con_log(CL_ANN, (CE_CONT, "issue_mfi_dcmd: cmd_status %x", kdcmd->cmd_status));
5470 5471
5471 5472 if (xferlen) {
5472 5473 /* free kernel buffer */
5473 5474 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
5474 5475 return (DDI_FAILURE);
5475 5476 }
5476 5477
5477 5478 return (DDI_SUCCESS);
5478 5479 }
5479 5480
5480 5481 /*
5481 5482 * issue_mfi_smp
5482 5483 */
5483 5484 static int
5484 5485 issue_mfi_smp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5485 5486 struct mrsas_cmd *cmd, int mode)
5486 5487 {
5487 5488 void *request_ubuf;
5488 5489 void *response_ubuf;
5489 5490 uint32_t request_xferlen = 0;
5490 5491 uint32_t response_xferlen = 0;
5491 5492 uint32_t new_xfer_length1 = 0;
5492 5493 uint32_t new_xfer_length2 = 0;
5493 5494 uint_t model;
5494 5495 dma_obj_t request_dma_obj;
5495 5496 dma_obj_t response_dma_obj;
5496 5497 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5497 5498 struct mrsas_smp_frame *ksmp;
5498 5499 struct mrsas_smp_frame *smp;
5499 5500 struct mrsas_sge32 *sge32;
5500 5501 #ifndef _ILP32
5501 5502 struct mrsas_sge64 *sge64;
5502 5503 #endif
5503 5504 int i;
5504 5505 uint64_t tmp_sas_addr;
5505 5506
5506 5507 smp = &cmd->frame->smp;
5507 5508 ksmp = (struct mrsas_smp_frame *)&ioctl->frame[0];
5508 5509
5509 5510 if (instance->adapterresetinprogress) {
5510 5511 con_log(CL_ANN1, (CE_WARN, "Reset flag set, "
5511 5512 "returning mfi_pkt and setting TRAN_BUSY\n"));
5512 5513 return (DDI_FAILURE);
5513 5514 }
5514 5515 model = ddi_model_convert_from(mode & FMODELS);
5515 5516 if (model == DDI_MODEL_ILP32) {
5516 5517 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_ILP32"));
5517 5518
5518 5519 sge32 = &ksmp->sgl[0].sge32[0];
5519 5520 response_xferlen = sge32[0].length;
5520 5521 request_xferlen = sge32[1].length;
5521 5522 con_log(CL_ANN, (CE_CONT, "issue_mfi_smp: "
5522 5523 "response_xferlen = %x, request_xferlen = %x",
5523 5524 response_xferlen, request_xferlen));
5524 5525
5525 5526 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr;
5526 5527 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr;
5527 5528 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: "
5528 5529 "response_ubuf = %p, request_ubuf = %p",
5529 5530 response_ubuf, request_ubuf));
5530 5531 } else {
5531 5532 #ifdef _ILP32
5532 5533 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_ILP32"));
5533 5534
5534 5535 sge32 = &ksmp->sgl[0].sge32[0];
5535 5536 response_xferlen = sge32[0].length;
5536 5537 request_xferlen = sge32[1].length;
5537 5538 con_log(CL_ANN, (CE_CONT, "issue_mfi_smp: "
5538 5539 "response_xferlen = %x, request_xferlen = %x",
5539 5540 response_xferlen, request_xferlen));
5540 5541
5541 5542 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr;
5542 5543 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr;
5543 5544 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: "
5544 5545 "response_ubuf = %p, request_ubuf = %p",
5545 5546 response_ubuf, request_ubuf));
5546 5547 #else
5547 5548 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_LP64"));
5548 5549
5549 5550 sge64 = &ksmp->sgl[0].sge64[0];
5550 5551 response_xferlen = sge64[0].length;
5551 5552 request_xferlen = sge64[1].length;
5552 5553
5553 5554 response_ubuf = (void *)(ulong_t)sge64[0].phys_addr;
5554 5555 request_ubuf = (void *)(ulong_t)sge64[1].phys_addr;
5555 5556 #endif
5556 5557 }
5557 5558 if (request_xferlen) {
5558 5559 /* means IOCTL requires DMA */
5559 5560 /* allocate the data transfer buffer */
5560 5561 //request_dma_obj.size = request_xferlen;
5561 5562 MRSAS_GET_BOUNDARY_ALIGNED_LEN(request_xferlen,new_xfer_length1,PAGESIZE);
5562 5563 request_dma_obj.size = new_xfer_length1;
5563 5564 request_dma_obj.dma_attr = mrsas_generic_dma_attr;
5564 5565 request_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5565 5566 request_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5566 5567 request_dma_obj.dma_attr.dma_attr_sgllen = 1;
5567 5568 request_dma_obj.dma_attr.dma_attr_align = 1;
5568 5569
5569 5570 /* allocate kernel buffer for DMA */
5570 5571 if (mrsas_alloc_dma_obj(instance, &request_dma_obj,
5571 5572 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5572 5573 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5573 5574 "could not allocate data transfer buffer."));
5574 5575 return (DDI_FAILURE);
5575 5576 }
5576 5577 (void) memset(request_dma_obj.buffer, 0, request_xferlen);
5577 5578
5578 5579 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5579 5580 for (i = 0; i < request_xferlen; i++) {
5580 5581 if (ddi_copyin((uint8_t *)request_ubuf + i,
5581 5582 (uint8_t *)request_dma_obj.buffer + i,
5582 5583 1, mode)) {
5583 5584 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5584 5585 "copy from user space failed"));
5585 5586 return (DDI_FAILURE);
5586 5587 }
5587 5588 }
5588 5589 }
5589 5590
5590 5591 if (response_xferlen) {
5591 5592 /* means IOCTL requires DMA */
5592 5593 /* allocate the data transfer buffer */
5593 5594 //response_dma_obj.size = response_xferlen;
5594 5595 MRSAS_GET_BOUNDARY_ALIGNED_LEN(response_xferlen,new_xfer_length2,PAGESIZE);
5595 5596 response_dma_obj.size = new_xfer_length2;
5596 5597 response_dma_obj.dma_attr = mrsas_generic_dma_attr;
5597 5598 response_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5598 5599 response_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5599 5600 response_dma_obj.dma_attr.dma_attr_sgllen = 1;
5600 5601 response_dma_obj.dma_attr.dma_attr_align = 1;
5601 5602
5602 5603 /* allocate kernel buffer for DMA */
5603 5604 if (mrsas_alloc_dma_obj(instance, &response_dma_obj,
5604 5605 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5605 5606 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5606 5607 "could not allocate data transfer buffer."));
5607 5608 return (DDI_FAILURE);
5608 5609 }
5609 5610 (void) memset(response_dma_obj.buffer, 0, response_xferlen);
5610 5611
5611 5612 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5612 5613 for (i = 0; i < response_xferlen; i++) {
5613 5614 if (ddi_copyin((uint8_t *)response_ubuf + i,
5614 5615 (uint8_t *)response_dma_obj.buffer + i,
5615 5616 1, mode)) {
5616 5617 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5617 5618 "copy from user space failed"));
5618 5619 return (DDI_FAILURE);
5619 5620 }
5620 5621 }
5621 5622 }
5622 5623
5623 5624 ddi_put8(acc_handle, &smp->cmd, ksmp->cmd);
5624 5625 ddi_put8(acc_handle, &smp->cmd_status, 0);
5625 5626 ddi_put8(acc_handle, &smp->connection_status, 0);
5626 5627 ddi_put8(acc_handle, &smp->sge_count, ksmp->sge_count);
5627 5628 /* smp->context = ksmp->context; */
5628 5629 ddi_put16(acc_handle, &smp->timeout, ksmp->timeout);
5629 5630 ddi_put32(acc_handle, &smp->data_xfer_len, ksmp->data_xfer_len);
5630 5631
5631 5632 bcopy((void *)&ksmp->sas_addr, (void *)&tmp_sas_addr,
5632 5633 sizeof (uint64_t));
5633 5634 ddi_put64(acc_handle, &smp->sas_addr, tmp_sas_addr);
5634 5635
5635 5636 ddi_put16(acc_handle, &smp->flags, ksmp->flags & ~MFI_FRAME_SGL64);
5636 5637
5637 5638 model = ddi_model_convert_from(mode & FMODELS);
5638 5639 if (model == DDI_MODEL_ILP32) {
5639 5640 con_log(CL_ANN1, (CE_CONT,
5640 5641 "issue_mfi_smp: DDI_MODEL_ILP32"));
5641 5642
5642 5643 sge32 = &smp->sgl[0].sge32[0];
5643 5644 ddi_put32(acc_handle, &sge32[0].length, response_xferlen);
5644 5645 ddi_put32(acc_handle, &sge32[0].phys_addr,
5645 5646 response_dma_obj.dma_cookie[0].dmac_address);
5646 5647 ddi_put32(acc_handle, &sge32[1].length, request_xferlen);
5647 5648 ddi_put32(acc_handle, &sge32[1].phys_addr,
5648 5649 request_dma_obj.dma_cookie[0].dmac_address);
5649 5650 } else {
5650 5651 #ifdef _ILP32
5651 5652 con_log(CL_ANN1, (CE_CONT,
5652 5653 "issue_mfi_smp: DDI_MODEL_ILP32"));
5653 5654 sge32 = &smp->sgl[0].sge32[0];
5654 5655 ddi_put32(acc_handle, &sge32[0].length, response_xferlen);
5655 5656 ddi_put32(acc_handle, &sge32[0].phys_addr,
5656 5657 response_dma_obj.dma_cookie[0].dmac_address);
5657 5658 ddi_put32(acc_handle, &sge32[1].length, request_xferlen);
5658 5659 ddi_put32(acc_handle, &sge32[1].phys_addr,
5659 5660 request_dma_obj.dma_cookie[0].dmac_address);
5660 5661 #else
5661 5662 con_log(CL_ANN1, (CE_CONT,
5662 5663 "issue_mfi_smp: DDI_MODEL_LP64"));
5663 5664 sge64 = &smp->sgl[0].sge64[0];
5664 5665 ddi_put32(acc_handle, &sge64[0].length, response_xferlen);
5665 5666 ddi_put64(acc_handle, &sge64[0].phys_addr,
5666 5667 response_dma_obj.dma_cookie[0].dmac_address);
5667 5668 ddi_put32(acc_handle, &sge64[1].length, request_xferlen);
5668 5669 ddi_put64(acc_handle, &sge64[1].phys_addr,
5669 5670 request_dma_obj.dma_cookie[0].dmac_address);
5670 5671 #endif
5671 5672 }
5672 5673 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp : "
5673 5674 "smp->response_xferlen = %d, smp->request_xferlen = %d "
5674 5675 "smp->data_xfer_len = %d", ddi_get32(acc_handle, &sge32[0].length),
5675 5676 ddi_get32(acc_handle, &sge32[1].length),
5676 5677 ddi_get32(acc_handle, &smp->data_xfer_len)));
5677 5678
5678 5679 cmd->sync_cmd = MRSAS_TRUE;
5679 5680 cmd->frame_count = 1;
5680 5681
5681 5682 if (instance->tbolt) {
5682 5683 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5683 5684 }
5684 5685
5685 5686 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5686 5687 con_log(CL_ANN, (CE_WARN,
5687 5688 "issue_mfi_smp: fw_ioctl failed"));
5688 5689 } else {
5689 5690 con_log(CL_ANN1, (CE_CONT,
5690 5691 "issue_mfi_smp: copy to user space"));
5691 5692
5692 5693 if (request_xferlen) {
5693 5694 for (i = 0; i < request_xferlen; i++) {
5694 5695 if (ddi_copyout(
5695 5696 (uint8_t *)request_dma_obj.buffer +
5696 5697 i, (uint8_t *)request_ubuf + i,
5697 5698 1, mode)) {
5698 5699 con_log(CL_ANN, (CE_WARN,
5699 5700 "issue_mfi_smp : copy to user space"
5700 5701 " failed"));
5701 5702 return (DDI_FAILURE);
5702 5703 }
5703 5704 }
5704 5705 }
5705 5706
5706 5707 if (response_xferlen) {
5707 5708 for (i = 0; i < response_xferlen; i++) {
5708 5709 if (ddi_copyout(
5709 5710 (uint8_t *)response_dma_obj.buffer
5710 5711 + i, (uint8_t *)response_ubuf
5711 5712 + i, 1, mode)) {
5712 5713 con_log(CL_ANN, (CE_WARN,
5713 5714 "issue_mfi_smp : copy to "
5714 5715 "user space failed"));
5715 5716 return (DDI_FAILURE);
5716 5717 }
5717 5718 }
5718 5719 }
5719 5720 }
5720 5721
5721 5722 ksmp->cmd_status = ddi_get8(acc_handle, &smp->cmd_status);
5722 5723 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: smp->cmd_status = %d",
5723 5724 ksmp->cmd_status));
5724 5725
5725 5726 if (request_xferlen) {
5726 5727 /* free kernel buffer */
5727 5728 if (mrsas_free_dma_obj(instance, request_dma_obj) !=
5728 5729 DDI_SUCCESS)
5729 5730 return (DDI_FAILURE);
5730 5731 }
5731 5732
5732 5733 if (response_xferlen) {
5733 5734 /* free kernel buffer */
5734 5735 if (mrsas_free_dma_obj(instance, response_dma_obj) !=
5735 5736 DDI_SUCCESS)
5736 5737 return (DDI_FAILURE);
5737 5738 }
5738 5739
5739 5740 return (DDI_SUCCESS);
5740 5741 }
5741 5742
5742 5743 /*
5743 5744 * issue_mfi_stp
5744 5745 */
5745 5746 static int
5746 5747 issue_mfi_stp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5747 5748 struct mrsas_cmd *cmd, int mode)
5748 5749 {
5749 5750 void *fis_ubuf;
5750 5751 void *data_ubuf;
5751 5752 uint32_t fis_xferlen = 0;
5752 5753 uint32_t new_xfer_length1 = 0;
5753 5754 uint32_t new_xfer_length2 = 0;
5754 5755 uint32_t data_xferlen = 0;
5755 5756 uint_t model;
5756 5757 dma_obj_t fis_dma_obj;
5757 5758 dma_obj_t data_dma_obj;
5758 5759 struct mrsas_stp_frame *kstp;
5759 5760 struct mrsas_stp_frame *stp;
5760 5761 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5761 5762 int i;
5762 5763
5763 5764 stp = &cmd->frame->stp;
5764 5765 kstp = (struct mrsas_stp_frame *)&ioctl->frame[0];
5765 5766
5766 5767 if (instance->adapterresetinprogress) {
5767 5768 con_log(CL_ANN1, (CE_WARN, "Reset flag set, "
5768 5769 "returning mfi_pkt and setting TRAN_BUSY\n"));
5769 5770 return (DDI_FAILURE);
5770 5771 }
5771 5772 model = ddi_model_convert_from(mode & FMODELS);
5772 5773 if (model == DDI_MODEL_ILP32) {
5773 5774 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_ILP32"));
5774 5775
5775 5776 fis_xferlen = kstp->sgl.sge32[0].length;
5776 5777 data_xferlen = kstp->sgl.sge32[1].length;
5777 5778
5778 5779 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
5779 5780 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
5780 5781 }
5781 5782 else
5782 5783 {
5783 5784 #ifdef _ILP32
5784 5785 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_ILP32"));
5785 5786
5786 5787 fis_xferlen = kstp->sgl.sge32[0].length;
5787 5788 data_xferlen = kstp->sgl.sge32[1].length;
5788 5789
5789 5790 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
5790 5791 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
5791 5792 #else
5792 5793 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_LP64"));
5793 5794
5794 5795 fis_xferlen = kstp->sgl.sge64[0].length;
5795 5796 data_xferlen = kstp->sgl.sge64[1].length;
5796 5797
5797 5798 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge64[0].phys_addr;
5798 5799 data_ubuf = (void *)(ulong_t)kstp->sgl.sge64[1].phys_addr;
5799 5800 #endif
5800 5801 }
5801 5802
5802 5803
5803 5804 if (fis_xferlen) {
5804 5805 con_log(CL_ANN, (CE_CONT, "issue_mfi_stp: "
5805 5806 "fis_ubuf = %p fis_xferlen = %x", fis_ubuf, fis_xferlen));
5806 5807
5807 5808 /* means IOCTL requires DMA */
5808 5809 /* allocate the data transfer buffer */
5809 5810 //fis_dma_obj.size = fis_xferlen;
5810 5811 MRSAS_GET_BOUNDARY_ALIGNED_LEN(fis_xferlen,new_xfer_length1,PAGESIZE);
5811 5812 fis_dma_obj.size = new_xfer_length1;
5812 5813 fis_dma_obj.dma_attr = mrsas_generic_dma_attr;
5813 5814 fis_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5814 5815 fis_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5815 5816 fis_dma_obj.dma_attr.dma_attr_sgllen = 1;
5816 5817 fis_dma_obj.dma_attr.dma_attr_align = 1;
5817 5818
5818 5819 /* allocate kernel buffer for DMA */
5819 5820 if (mrsas_alloc_dma_obj(instance, &fis_dma_obj,
5820 5821 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5821 5822 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp : "
5822 5823 "could not allocate data transfer buffer."));
5823 5824 return (DDI_FAILURE);
5824 5825 }
5825 5826 (void) memset(fis_dma_obj.buffer, 0, fis_xferlen);
5826 5827
5827 5828 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5828 5829 for (i = 0; i < fis_xferlen; i++) {
5829 5830 if (ddi_copyin((uint8_t *)fis_ubuf + i,
5830 5831 (uint8_t *)fis_dma_obj.buffer + i, 1, mode)) {
5831 5832 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
5832 5833 "copy from user space failed"));
5833 5834 return (DDI_FAILURE);
5834 5835 }
5835 5836 }
5836 5837 }
5837 5838
5838 5839 if (data_xferlen) {
5839 5840 con_log(CL_ANN, (CE_CONT, "issue_mfi_stp: data_ubuf = %p "
5840 5841 "data_xferlen = %x", data_ubuf, data_xferlen));
5841 5842
5842 5843 /* means IOCTL requires DMA */
5843 5844 /* allocate the data transfer buffer */
5844 5845 //data_dma_obj.size = data_xferlen;
5845 5846 MRSAS_GET_BOUNDARY_ALIGNED_LEN(data_xferlen,new_xfer_length2,PAGESIZE);
5846 5847 data_dma_obj.size = new_xfer_length2;
5847 5848 data_dma_obj.dma_attr = mrsas_generic_dma_attr;
5848 5849 data_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5849 5850 data_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5850 5851 data_dma_obj.dma_attr.dma_attr_sgllen = 1;
5851 5852 data_dma_obj.dma_attr.dma_attr_align = 1;
5852 5853
5853 5854 /* allocate kernel buffer for DMA */
5854 5855 if (mrsas_alloc_dma_obj(instance, &data_dma_obj,
5855 5856 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5856 5857 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
5857 5858 "could not allocate data transfer buffer."));
5858 5859 return (DDI_FAILURE);
5859 5860 }
5860 5861 (void) memset(data_dma_obj.buffer, 0, data_xferlen);
5861 5862
5862 5863 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5863 5864 for (i = 0; i < data_xferlen; i++) {
5864 5865 if (ddi_copyin((uint8_t *)data_ubuf + i,
5865 5866 (uint8_t *)data_dma_obj.buffer + i, 1, mode)) {
5866 5867 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
5867 5868 "copy from user space failed"));
5868 5869 return (DDI_FAILURE);
5869 5870 }
5870 5871 }
5871 5872 }
5872 5873
5873 5874 ddi_put8(acc_handle, &stp->cmd, kstp->cmd);
5874 5875 ddi_put8(acc_handle, &stp->cmd_status, 0);
5875 5876 ddi_put8(acc_handle, &stp->connection_status, 0);
5876 5877 ddi_put8(acc_handle, &stp->target_id, kstp->target_id);
5877 5878 ddi_put8(acc_handle, &stp->sge_count, kstp->sge_count);
5878 5879
5879 5880 ddi_put16(acc_handle, &stp->timeout, kstp->timeout);
5880 5881 ddi_put32(acc_handle, &stp->data_xfer_len, kstp->data_xfer_len);
5881 5882
5882 5883 ddi_rep_put8(acc_handle, (uint8_t *)kstp->fis, (uint8_t *)stp->fis, 10,
5883 5884 DDI_DEV_AUTOINCR);
5884 5885
5885 5886 ddi_put16(acc_handle, &stp->flags, kstp->flags & ~MFI_FRAME_SGL64);
5886 5887 ddi_put32(acc_handle, &stp->stp_flags, kstp->stp_flags);
5887 5888 ddi_put32(acc_handle, &stp->sgl.sge32[0].length, fis_xferlen);
5888 5889 ddi_put32(acc_handle, &stp->sgl.sge32[0].phys_addr,
5889 5890 fis_dma_obj.dma_cookie[0].dmac_address);
5890 5891 ddi_put32(acc_handle, &stp->sgl.sge32[1].length, data_xferlen);
5891 5892 ddi_put32(acc_handle, &stp->sgl.sge32[1].phys_addr,
5892 5893 data_dma_obj.dma_cookie[0].dmac_address);
5893 5894
5894 5895 cmd->sync_cmd = MRSAS_TRUE;
5895 5896 cmd->frame_count = 1;
5896 5897
5897 5898 if (instance->tbolt) {
5898 5899 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5899 5900 }
5900 5901
5901 5902 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5902 5903 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: fw_ioctl failed"));
5903 5904 } else {
5904 5905
5905 5906 if (fis_xferlen) {
5906 5907 for (i = 0; i < fis_xferlen; i++) {
5907 5908 if (ddi_copyout(
5908 5909 (uint8_t *)fis_dma_obj.buffer + i,
5909 5910 (uint8_t *)fis_ubuf + i, 1, mode)) {
5910 5911 con_log(CL_ANN, (CE_WARN,
5911 5912 "issue_mfi_stp : copy to "
5912 5913 "user space failed"));
5913 5914 return (DDI_FAILURE);
5914 5915 }
5915 5916 }
5916 5917 }
5917 5918 }
5918 5919 if (data_xferlen) {
5919 5920 for (i = 0; i < data_xferlen; i++) {
5920 5921 if (ddi_copyout(
5921 5922 (uint8_t *)data_dma_obj.buffer + i,
5922 5923 (uint8_t *)data_ubuf + i, 1, mode)) {
5923 5924 con_log(CL_ANN, (CE_WARN,
5924 5925 "issue_mfi_stp : copy to"
5925 5926 " user space failed"));
5926 5927 return (DDI_FAILURE);
5927 5928 }
5928 5929 }
5929 5930 }
5930 5931
5931 5932 kstp->cmd_status = ddi_get8(acc_handle, &stp->cmd_status);
5932 5933 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: stp->cmd_status = %d",
5933 5934 kstp->cmd_status));
5934 5935
5935 5936 if (fis_xferlen) {
5936 5937 /* free kernel buffer */
5937 5938 if (mrsas_free_dma_obj(instance, fis_dma_obj) != DDI_SUCCESS)
5938 5939 return (DDI_FAILURE);
5939 5940 }
5940 5941
5941 5942 if (data_xferlen) {
5942 5943 /* free kernel buffer */
5943 5944 if (mrsas_free_dma_obj(instance, data_dma_obj) != DDI_SUCCESS)
5944 5945 return (DDI_FAILURE);
5945 5946 }
5946 5947
5947 5948 return (DDI_SUCCESS);
5948 5949 }
5949 5950
5950 5951 /*
5951 5952 * fill_up_drv_ver
5952 5953 */
5953 5954 void
5954 5955 fill_up_drv_ver(struct mrsas_drv_ver *dv)
5955 5956 {
5956 5957 (void) memset(dv, 0, sizeof (struct mrsas_drv_ver));
5957 5958
5958 5959 (void) memcpy(dv->signature, "$LSI LOGIC$", strlen("$LSI LOGIC$"));
5959 5960 (void) memcpy(dv->os_name, "Solaris", strlen("Solaris"));
5960 5961 (void) memcpy(dv->drv_name, "mr_sas", strlen("mr_sas"));
5961 5962 (void) memcpy(dv->drv_ver, MRSAS_VERSION, strlen(MRSAS_VERSION));
5962 5963 (void) memcpy(dv->drv_rel_date, MRSAS_RELDATE,
5963 5964 strlen(MRSAS_RELDATE));
5964 5965
5965 5966 }
5966 5967
5967 5968 /*
5968 5969 * handle_drv_ioctl
5969 5970 */
5970 5971 static int
5971 5972 handle_drv_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5972 5973 int mode)
5973 5974 {
5974 5975 int i;
5975 5976 int rval = DDI_SUCCESS;
5976 5977 int *props = NULL;
5977 5978 void *ubuf;
5978 5979
5979 5980 uint8_t *pci_conf_buf;
5980 5981 uint32_t xferlen;
5981 5982 uint32_t num_props;
5982 5983 uint_t model;
5983 5984 struct mrsas_dcmd_frame *kdcmd;
5984 5985 struct mrsas_drv_ver dv;
5985 5986 struct mrsas_pci_information pi;
5986 5987
5987 5988 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0];
5988 5989
5989 5990 model = ddi_model_convert_from(mode & FMODELS);
5990 5991 if (model == DDI_MODEL_ILP32) {
5991 5992 con_log(CL_ANN1, (CE_CONT,
5992 5993 "handle_drv_ioctl: DDI_MODEL_ILP32"));
5993 5994
5994 5995 xferlen = kdcmd->sgl.sge32[0].length;
5995 5996
5996 5997 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
5997 5998 } else {
5998 5999 #ifdef _ILP32
5999 6000 con_log(CL_ANN1, (CE_CONT,
6000 6001 "handle_drv_ioctl: DDI_MODEL_ILP32"));
6001 6002 xferlen = kdcmd->sgl.sge32[0].length;
6002 6003 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
6003 6004 #else
6004 6005 con_log(CL_ANN1, (CE_CONT,
6005 6006 "handle_drv_ioctl: DDI_MODEL_LP64"));
6006 6007 xferlen = kdcmd->sgl.sge64[0].length;
6007 6008 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
6008 6009 #endif
6009 6010 }
6010 6011 con_log(CL_ANN1, (CE_CONT, "handle_drv_ioctl: "
6011 6012 "dataBuf=%p size=%d bytes", ubuf, xferlen));
6012 6013
6013 6014 switch (kdcmd->opcode) {
6014 6015 case MRSAS_DRIVER_IOCTL_DRIVER_VERSION:
6015 6016 con_log(CL_ANN1, (CE_CONT, "handle_drv_ioctl: "
6016 6017 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION"));
6017 6018
6018 6019 fill_up_drv_ver(&dv);
6019 6020
6020 6021 if (ddi_copyout(&dv, ubuf, xferlen, mode)) {
6021 6022 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6022 6023 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION : "
6023 6024 "copy to user space failed"));
6024 6025 kdcmd->cmd_status = 1;
6025 6026 rval = 1;
6026 6027 } else {
6027 6028 kdcmd->cmd_status = 0;
6028 6029 }
6029 6030 break;
6030 6031 case MRSAS_DRIVER_IOCTL_PCI_INFORMATION:
6031 6032 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: "
6032 6033 "MRSAS_DRIVER_IOCTL_PCI_INFORMAITON"));
6033 6034
6034 6035 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, instance->dip,
6035 6036 0, "reg", &props, &num_props)) {
6036 6037 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6037 6038 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : "
6038 6039 "ddi_prop_look_int_array failed"));
6039 6040 rval = DDI_FAILURE;
6040 6041 } else {
6041 6042
6042 6043 pi.busNumber = (props[0] >> 16) & 0xFF;
6043 6044 pi.deviceNumber = (props[0] >> 11) & 0x1f;
6044 6045 pi.functionNumber = (props[0] >> 8) & 0x7;
6045 6046 ddi_prop_free((void *)props);
6046 6047 }
6047 6048
6048 6049 pci_conf_buf = (uint8_t *)&pi.pciHeaderInfo;
6049 6050
6050 6051 for (i = 0; i < (sizeof (struct mrsas_pci_information) -
6051 6052 offsetof(struct mrsas_pci_information, pciHeaderInfo));
6052 6053 i++) {
6053 6054 pci_conf_buf[i] =
6054 6055 pci_config_get8(instance->pci_handle, i);
6055 6056 }
6056 6057
6057 6058 if (ddi_copyout(&pi, ubuf, xferlen, mode)) {
6058 6059 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6059 6060 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : "
6060 6061 "copy to user space failed"));
6061 6062 kdcmd->cmd_status = 1;
6062 6063 rval = 1;
6063 6064 } else {
6064 6065 kdcmd->cmd_status = 0;
6065 6066 }
6066 6067 break;
6067 6068 default:
6068 6069 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6069 6070 "invalid driver specific IOCTL opcode = 0x%x",
6070 6071 kdcmd->opcode));
6071 6072 kdcmd->cmd_status = 1;
6072 6073 rval = DDI_FAILURE;
6073 6074 break;
6074 6075 }
6075 6076
6076 6077 return (rval);
6077 6078 }
6078 6079
6079 6080 /*
6080 6081 * handle_mfi_ioctl
6081 6082 */
6082 6083 static int
6083 6084 handle_mfi_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
6084 6085 int mode)
6085 6086 {
6086 6087 int rval = DDI_SUCCESS;
6087 6088
6088 6089 struct mrsas_header *hdr;
6089 6090 struct mrsas_cmd *cmd;
6090 6091
6091 6092 if (instance->tbolt) {
6092 6093 cmd = get_raid_msg_mfi_pkt(instance);
6093 6094 } else {
6094 6095 cmd = get_mfi_pkt(instance);
6095 6096 }
6096 6097 if (!cmd) {
6097 6098 cmn_err(CE_WARN,
6098 6099 "Failed to get a cmd from free-pool in handle_mfi_ioctl(). "
6099 6100 "fw_outstanding=0x%X max_fw_cmds=0x%X",
6100 6101 instance->fw_outstanding, instance->max_fw_cmds);
6101 6102 return (DDI_FAILURE);
6102 6103 }
6103 6104
6104 6105 /* Clear the frame buffer and assign back the context id */
6105 6106 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
6106 6107 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
6107 6108 cmd->index);
6108 6109
6109 6110 hdr = (struct mrsas_header *)&ioctl->frame[0];
6110 6111
6111 6112 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) {
6112 6113 case MFI_CMD_OP_DCMD:
6113 6114 rval = issue_mfi_dcmd(instance, ioctl, cmd, mode);
6114 6115 break;
6115 6116 case MFI_CMD_OP_SMP:
6116 6117 rval = issue_mfi_smp(instance, ioctl, cmd, mode);
6117 6118 break;
6118 6119 case MFI_CMD_OP_STP:
6119 6120 rval = issue_mfi_stp(instance, ioctl, cmd, mode);
6120 6121 break;
6121 6122 case MFI_CMD_OP_LD_SCSI:
6122 6123 case MFI_CMD_OP_PD_SCSI:
6123 6124 rval = issue_mfi_pthru(instance, ioctl, cmd, mode);
6124 6125 break;
6125 6126 default:
6126 6127 con_log(CL_ANN, (CE_WARN, "handle_mfi_ioctl: "
6127 6128 "invalid mfi ioctl hdr->cmd = %d", hdr->cmd));
6128 6129 rval = DDI_FAILURE;
6129 6130 break;
6130 6131 }
6131 6132
6132 6133 if (instance->tbolt) {
6133 6134 return_raid_msg_mfi_pkt(instance, cmd);
6134 6135 } else {
6135 6136 return_mfi_pkt(instance, cmd);
6136 6137 }
6137 6138
6138 6139 return (rval);
6139 6140 }
6140 6141
6141 6142 /*
6142 6143 * AEN
6143 6144 */
6144 6145 static int
6145 6146 handle_mfi_aen(struct mrsas_instance *instance, struct mrsas_aen *aen)
6146 6147 {
6147 6148 int rval = 0;
6148 6149
6149 6150 rval = register_mfi_aen(instance, instance->aen_seq_num,
6150 6151 aen->class_locale_word);
6151 6152
6152 6153 aen->cmd_status = (uint8_t)rval;
6153 6154
6154 6155 return (rval);
6155 6156 }
6156 6157
6157 6158 static int
6158 6159 register_mfi_aen(struct mrsas_instance *instance, uint32_t seq_num,
6159 6160 uint32_t class_locale_word)
6160 6161 {
6161 6162 int ret_val;
6162 6163
6163 6164 struct mrsas_cmd *cmd, *aen_cmd;
6164 6165 struct mrsas_dcmd_frame *dcmd;
6165 6166 union mrsas_evt_class_locale curr_aen;
6166 6167 union mrsas_evt_class_locale prev_aen;
6167 6168
6168 6169 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
6169 6170 /*
6170 6171 * If there an AEN pending already (aen_cmd), check if the
6171 6172 * class_locale of that pending AEN is inclusive of the new
6172 6173 * AEN request we currently have. If it is, then we don't have
6173 6174 * to do anything. In other words, whichever events the current
6174 6175 * AEN request is subscribing to, have already been subscribed
6175 6176 * to.
6176 6177 *
6177 6178 * If the old_cmd is _not_ inclusive, then we have to abort
6178 6179 * that command, form a class_locale that is superset of both
6179 6180 * old and current and re-issue to the FW
6180 6181 */
6181 6182
6182 6183 curr_aen.word = LE_32(class_locale_word);
6183 6184 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6184 6185 aen_cmd = instance->aen_cmd;
6185 6186 if (aen_cmd) {
6186 6187 prev_aen.word = ddi_get32(aen_cmd->frame_dma_obj.acc_handle,
6187 6188 &aen_cmd->frame->dcmd.mbox.w[1]);
6188 6189 prev_aen.word = LE_32(prev_aen.word);
6189 6190 prev_aen.members.locale = LE_16(prev_aen.members.locale);
6190 6191 /*
6191 6192 * A class whose enum value is smaller is inclusive of all
6192 6193 * higher values. If a PROGRESS (= -1) was previously
6193 6194 * registered, then a new registration requests for higher
6194 6195 * classes need not be sent to FW. They are automatically
6195 6196 * included.
6196 6197 *
6197 6198 * Locale numbers don't have such hierarchy. They are bitmap
6198 6199 * values
6199 6200 */
6200 6201 if ((prev_aen.members.class <= curr_aen.members.class) &&
6201 6202 !((prev_aen.members.locale & curr_aen.members.locale) ^
6202 6203 curr_aen.members.locale)) {
6203 6204 /*
6204 6205 * Previously issued event registration includes
6205 6206 * current request. Nothing to do.
6206 6207 */
6207 6208
6208 6209 return (0);
6209 6210 } else {
6210 6211 curr_aen.members.locale |= prev_aen.members.locale;
6211 6212
6212 6213 if (prev_aen.members.class < curr_aen.members.class)
6213 6214 curr_aen.members.class = prev_aen.members.class;
6214 6215
6215 6216 ret_val = abort_aen_cmd(instance, aen_cmd);
6216 6217
6217 6218 if (ret_val) {
6218 6219 con_log(CL_ANN, (CE_WARN, "register_mfi_aen: "
6219 6220 "failed to abort prevous AEN command"));
6220 6221
6221 6222 return (ret_val);
6222 6223 }
6223 6224 }
6224 6225 } else {
6225 6226 curr_aen.word = LE_32(class_locale_word);
6226 6227 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6227 6228 }
6228 6229
6229 6230 if (instance->tbolt) {
6230 6231 cmd = get_raid_msg_mfi_pkt(instance);
6231 6232 } else {
6232 6233 cmd = get_mfi_pkt(instance);
6233 6234 }
6234 6235
6235 6236 if (!cmd) {
6236 6237 cmn_err(CE_WARN,
6237 6238 "Failed to get a cmd from free-pool in register_mfi_aen(). "
6238 6239 "fw_outstanding=0x%X max_fw_cmds=0x%X",
6239 6240 instance->fw_outstanding, instance->max_fw_cmds);
6240 6241 return (ENOMEM);
6241 6242 }
6242 6243
6243 6244
6244 6245 /* Clear the frame buffer and assign back the context id */
6245 6246 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
6246 6247 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
6247 6248 cmd->index);
6248 6249
6249 6250 dcmd = &cmd->frame->dcmd;
6250 6251
6251 6252 /* for(i = 0; i < DCMD_MBOX_SZ; i++) dcmd->mbox.b[i] = 0; */
6252 6253 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
6253 6254
6254 6255 (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
6255 6256 sizeof (struct mrsas_evt_detail));
6256 6257
6257 6258 /* Prepare DCMD for aen registration */
6258 6259 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
6259 6260 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0);
6260 6261 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
6261 6262 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
6262 6263 MFI_FRAME_DIR_READ);
6263 6264 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
6264 6265 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
6265 6266 sizeof (struct mrsas_evt_detail));
6266 6267 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
6267 6268 MR_DCMD_CTRL_EVENT_WAIT);
6268 6269 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], seq_num);
6269 6270 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6270 6271 curr_aen.word = LE_32(curr_aen.word);
6271 6272 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[1],
6272 6273 curr_aen.word);
6273 6274 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
6274 6275 instance->mfi_evt_detail_obj.dma_cookie[0].dmac_address);
6275 6276 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
6276 6277 sizeof (struct mrsas_evt_detail));
6277 6278
6278 6279 instance->aen_seq_num = seq_num;
6279 6280
6280 6281
6281 6282 /*
6282 6283 * Store reference to the cmd used to register for AEN. When an
6283 6284 * application wants us to register for AEN, we have to abort this
6284 6285 * cmd and re-register with a new EVENT LOCALE supplied by that app
6285 6286 */
6286 6287 instance->aen_cmd = cmd;
6287 6288
6288 6289 cmd->frame_count = 1;
6289 6290
6290 6291 /* Issue the aen registration frame */
6291 6292 /* atomic_add_16 (&instance->fw_outstanding, 1); */
6292 6293 if (instance->tbolt) {
6293 6294 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
6294 6295 }
6295 6296 instance->func_ptr->issue_cmd(cmd, instance);
6296 6297
6297 6298 return (0);
6298 6299 }
6299 6300
6300 6301 void
6301 6302 display_scsi_inquiry(caddr_t scsi_inq)
6302 6303 {
6303 6304 #define MAX_SCSI_DEVICE_CODE 14
6304 6305 int i;
6305 6306 char inquiry_buf[256] = {0};
6306 6307 int len;
6307 6308 const char *const scsi_device_types[] = {
6308 6309 "Direct-Access ",
6309 6310 "Sequential-Access",
6310 6311 "Printer ",
6311 6312 "Processor ",
6312 6313 "WORM ",
6313 6314 "CD-ROM ",
6314 6315 "Scanner ",
6315 6316 "Optical Device ",
6316 6317 "Medium Changer ",
6317 6318 "Communications ",
6318 6319 "Unknown ",
6319 6320 "Unknown ",
6320 6321 "Unknown ",
6321 6322 "Enclosure ",
6322 6323 };
6323 6324
6324 6325 len = 0;
6325 6326
6326 6327 len += snprintf(inquiry_buf + len, 265 - len, " Vendor: ");
6327 6328 for (i = 8; i < 16; i++) {
6328 6329 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6329 6330 scsi_inq[i]);
6330 6331 }
6331 6332
6332 6333 len += snprintf(inquiry_buf + len, 265 - len, " Model: ");
6333 6334
6334 6335 for (i = 16; i < 32; i++) {
6335 6336 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6336 6337 scsi_inq[i]);
6337 6338 }
6338 6339
6339 6340 len += snprintf(inquiry_buf + len, 265 - len, " Rev: ");
6340 6341
6341 6342 for (i = 32; i < 36; i++) {
6342 6343 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6343 6344 scsi_inq[i]);
6344 6345 }
6345 6346
6346 6347 len += snprintf(inquiry_buf + len, 265 - len, "\n");
6347 6348
6348 6349
6349 6350 i = scsi_inq[0] & 0x1f;
6350 6351
6351 6352
6352 6353 len += snprintf(inquiry_buf + len, 265 - len, " Type: %s ",
6353 6354 i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] :
6354 6355 "Unknown ");
6355 6356
6356 6357
6357 6358 len += snprintf(inquiry_buf + len, 265 - len,
6358 6359 " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07);
6359 6360
6360 6361 if ((scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1) {
6361 6362 len += snprintf(inquiry_buf + len, 265 - len, " CCS\n");
6362 6363 } else {
6363 6364 len += snprintf(inquiry_buf + len, 265 - len, "\n");
6364 6365 }
6365 6366
6366 6367 con_log(CL_DLEVEL2, (CE_CONT, inquiry_buf));
6367 6368 }
6368 6369
6369 6370 void
6370 6371 io_timeout_checker(void *arg)
6371 6372 {
6372 6373 struct scsi_pkt *pkt;
6373 6374 struct mrsas_instance *instance = arg;
6374 6375 struct mrsas_cmd *cmd = NULL;
6375 6376 struct mrsas_header *hdr;
6376 6377 int time = 0;
6377 6378 int counter = 0;
6378 6379 struct mlist_head *pos, *next;
6379 6380 mlist_t process_list;
6380 6381
6381 6382 if (instance->adapterresetinprogress == 1) {
6382 6383 con_log(CL_ANN, (CE_NOTE, "io_timeout_checker:"
6383 6384 " reset in progress"));
6384 6385
6385 6386 instance->timeout_id = timeout(io_timeout_checker,
6386 6387 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
6387 6388 return;
6388 6389 }
6389 6390
6390 6391 /* See if this check needs to be in the beginning or last in ISR */
6391 6392 if (mrsas_initiate_ocr_if_fw_is_faulty(instance) == 1) {
6392 6393 cmn_err(CE_WARN, "io_timeout_checker:"
6393 6394 "FW Fault, calling reset adapter");
6394 6395 cmn_err(CE_CONT, "io_timeout_checker: fw_outstanding 0x%X max_fw_cmds 0x%X",
6395 6396 instance->fw_outstanding, instance->max_fw_cmds );
6396 6397 if (instance->adapterresetinprogress == 0) {
6397 6398 instance->adapterresetinprogress = 1;
6398 6399 if (instance->tbolt)
6399 6400 mrsas_tbolt_reset_ppc(instance);
6400 6401 else
6401 6402 mrsas_reset_ppc(instance);
6402 6403 instance->adapterresetinprogress = 0;
6403 6404 }
6404 6405 instance->timeout_id = timeout(io_timeout_checker,
6405 6406 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
6406 6407 return;
6407 6408 }
6408 6409
6409 6410 INIT_LIST_HEAD(&process_list);
6410 6411
6411 6412 mutex_enter(&instance->cmd_pend_mtx);
6412 6413 mlist_for_each_safe(pos, next, &instance->cmd_pend_list) {
6413 6414 cmd = mlist_entry(pos, struct mrsas_cmd, list);
6414 6415
6415 6416 if (cmd == NULL) {
6416 6417 continue;
6417 6418 }
6418 6419
6419 6420 if (cmd->sync_cmd == MRSAS_TRUE) {
6420 6421 hdr = (struct mrsas_header *)&cmd->frame->hdr;
6421 6422 if (hdr == NULL) {
6422 6423 continue;
6423 6424 }
6424 6425 time = --cmd->drv_pkt_time;
6425 6426 } else {
6426 6427 pkt = cmd->pkt;
6427 6428 if (pkt == NULL) {
6428 6429 continue;
6429 6430 }
6430 6431 time = --cmd->drv_pkt_time;
6431 6432 }
6432 6433 if (time <= 0) {
6433 6434 cmn_err(CE_WARN, "%llx: "
6434 6435 "io_timeout_checker: TIMING OUT: pkt "
6435 6436 ": %p, cmd %p fw_outstanding 0x%X max_fw_cmds 0x%X\n",
6436 6437 gethrtime(), (void *)pkt, (void *)cmd, instance->fw_outstanding, instance->max_fw_cmds);
6437 6438
6438 6439 counter++;
6439 6440 break;
6440 6441 }
6441 6442 }
6442 6443 mutex_exit(&instance->cmd_pend_mtx);
6443 6444
6444 6445 if (counter) {
6445 6446 if (instance->disable_online_ctrl_reset == 1) {
6446 6447 cmn_err(CE_WARN, "mr_sas %d: %s(): OCR is NOT supported by Firmware, KILL adapter!!!",
6447 6448 instance->instance, __func__);
6448 6449
6449 6450 if (instance->tbolt)
6450 6451 (void) mrsas_tbolt_kill_adapter(instance);
6451 6452 else
6452 6453 (void) mrsas_kill_adapter(instance);
6453 6454
6454 6455 return;
6455 6456 } else {
6456 6457 if (cmd->retry_count_for_ocr <= IO_RETRY_COUNT) {
6457 6458 if (instance->adapterresetinprogress == 0) {
6458 6459 if (instance->tbolt)
6459 6460 mrsas_tbolt_reset_ppc(instance);
6460 6461 else
6461 6462 mrsas_reset_ppc(instance);
6462 6463 }
6463 6464 } else {
6464 6465 cmn_err(CE_WARN,
6465 6466 "io_timeout_checker: "
6466 6467 "cmd %p cmd->index %d "
6467 6468 "timed out even after 3 resets: "
6468 6469 "so KILL adapter", (void *)cmd, cmd->index);
6469 6470
6470 6471 mrsas_print_cmd_details(instance, cmd, 0xDD);
6471 6472
6472 6473 if (instance->tbolt)
6473 6474 (void) mrsas_tbolt_kill_adapter(instance);
6474 6475 else
6475 6476 (void) mrsas_kill_adapter(instance);
6476 6477 return;
6477 6478 }
6478 6479 }
6479 6480 }
6480 6481 con_log(CL_ANN, (CE_NOTE, "mrsas: "
6481 6482 "schedule next timeout check: "
6482 6483 "do timeout \n"));
6483 6484 instance->timeout_id =
6484 6485 timeout(io_timeout_checker, (void *)instance,
6485 6486 drv_usectohz(MRSAS_1_SECOND));
6486 6487 }
6487 6488
6488 6489 static uint32_t
6489 6490 read_fw_status_reg_ppc(struct mrsas_instance *instance)
6490 6491 {
6491 6492 return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
6492 6493 }
6493 6494
6494 6495 static void
6495 6496 issue_cmd_ppc(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
6496 6497 {
6497 6498 struct scsi_pkt *pkt;
6498 6499 atomic_add_16(&instance->fw_outstanding, 1);
6499 6500
6500 6501 pkt = cmd->pkt;
6501 6502 if (pkt) {
6502 6503 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:"
6503 6504 "ISSUED CMD TO FW : called : cmd:"
6504 6505 ": %p instance : %p pkt : %p pkt_time : %x\n",
6505 6506 gethrtime(), (void *)cmd, (void *)instance,
6506 6507 (void *)pkt, cmd->drv_pkt_time));
6507 6508 if (instance->adapterresetinprogress) {
6508 6509 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
6509 6510 con_log(CL_ANN1, (CE_NOTE, "Reset the scsi_pkt timer"));
6510 6511 } else {
6511 6512 push_pending_mfi_pkt(instance, cmd);
6512 6513 }
6513 6514
6514 6515 } else {
6515 6516 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:"
6516 6517 "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
6517 6518 "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
6518 6519 }
6519 6520
6520 6521 mutex_enter(&instance->reg_write_mtx);
6521 6522 ASSERT(mutex_owned(&instance->reg_write_mtx));
6522 6523 /* Issue the command to the FW */
6523 6524 WR_IB_QPORT((cmd->frame_phys_addr) |
6524 6525 (((cmd->frame_count - 1) << 1) | 1), instance);
6525 6526 mutex_exit(&instance->reg_write_mtx);
6526 6527
6527 6528 }
6528 6529
6529 6530 /*
6530 6531 * issue_cmd_in_sync_mode
6531 6532 */
6532 6533 static int
6533 6534 issue_cmd_in_sync_mode_ppc(struct mrsas_instance *instance,
6534 6535 struct mrsas_cmd *cmd)
6535 6536 {
6536 6537 int i;
6537 6538 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * (10 * MILLISEC);
6538 6539 struct mrsas_header *hdr = &cmd->frame->hdr;
6539 6540
6540 6541 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: called"));
6541 6542
6542 6543 if (instance->adapterresetinprogress) {
6543 6544 cmd->drv_pkt_time = ddi_get16(
6544 6545 cmd->frame_dma_obj.acc_handle, &hdr->timeout);
6545 6546 if (cmd->drv_pkt_time < debug_timeout_g)
6546 6547 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
6547 6548
6548 6549 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: "
6549 6550 "issue and return in reset case\n"));
6550 6551 WR_IB_QPORT((cmd->frame_phys_addr) |
6551 6552 (((cmd->frame_count - 1) << 1) | 1), instance);
6552 6553
6553 6554 return (DDI_SUCCESS);
6554 6555 } else {
6555 6556 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: pushing the pkt\n"));
6556 6557 push_pending_mfi_pkt(instance, cmd);
6557 6558 }
6558 6559
6559 6560 cmd->cmd_status = ENODATA;
6560 6561
6561 6562 mutex_enter(&instance->reg_write_mtx);
6562 6563 ASSERT(mutex_owned(&instance->reg_write_mtx));
6563 6564 /* Issue the command to the FW */
6564 6565 WR_IB_QPORT((cmd->frame_phys_addr) |
6565 6566 (((cmd->frame_count - 1) << 1) | 1), instance);
6566 6567 mutex_exit(&instance->reg_write_mtx);
6567 6568
6568 6569 mutex_enter(&instance->int_cmd_mtx);
6569 6570 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
6570 6571 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
6571 6572 }
6572 6573 mutex_exit(&instance->int_cmd_mtx);
6573 6574
6574 6575 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done"));
6575 6576
6576 6577 if (i < (msecs -1)) {
6577 6578 return (DDI_SUCCESS);
6578 6579 } else {
6579 6580 return (DDI_FAILURE);
6580 6581 }
6581 6582 }
6582 6583
6583 6584 /*
6584 6585 * issue_cmd_in_poll_mode
6585 6586 */
6586 6587 static int
6587 6588 issue_cmd_in_poll_mode_ppc(struct mrsas_instance *instance,
6588 6589 struct mrsas_cmd *cmd)
6589 6590 {
6590 6591 int i;
6591 6592 uint16_t flags;
6592 6593 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
6593 6594 struct mrsas_header *frame_hdr;
6594 6595
6595 6596 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode_ppc: called"));
6596 6597
6597 6598 frame_hdr = (struct mrsas_header *)cmd->frame;
6598 6599 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
6599 6600 MFI_CMD_STATUS_POLL_MODE);
6600 6601 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
6601 6602 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
6602 6603
6603 6604 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
6604 6605
6605 6606 /* issue the frame using inbound queue port */
6606 6607 WR_IB_QPORT((cmd->frame_phys_addr) |
6607 6608 (((cmd->frame_count - 1) << 1) | 1), instance);
6608 6609
6609 6610 /* wait for cmd_status to change from 0xFF */
6610 6611 for (i = 0; i < msecs && (
6611 6612 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
6612 6613 == MFI_CMD_STATUS_POLL_MODE); i++) {
6613 6614 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
6614 6615 }
6615 6616
6616 6617 if (ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
6617 6618 == MFI_CMD_STATUS_POLL_MODE) {
6618 6619 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: "
6619 6620 "cmd polling timed out"));
6620 6621 return (DDI_FAILURE);
6621 6622 }
6622 6623
6623 6624 return (DDI_SUCCESS);
6624 6625 }
6625 6626
6626 6627 static void
6627 6628 enable_intr_ppc(struct mrsas_instance *instance)
6628 6629 {
6629 6630 uint32_t mask;
6630 6631
6631 6632 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: called"));
6632 6633
6633 6634 /* WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); */
6634 6635 WR_OB_DOORBELL_CLEAR(OB_DOORBELL_CLEAR_MASK, instance);
6635 6636
6636 6637 /* WR_OB_INTR_MASK(~0x80000000, instance); */
6637 6638 WR_OB_INTR_MASK(~(MFI_REPLY_2108_MESSAGE_INTR_MASK), instance);
6638 6639
6639 6640 /* dummy read to force PCI flush */
6640 6641 mask = RD_OB_INTR_MASK(instance);
6641 6642
6642 6643 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: "
6643 6644 "outbound_intr_mask = 0x%x", mask));
6644 6645 }
6645 6646
6646 6647 static void
6647 6648 disable_intr_ppc(struct mrsas_instance *instance)
6648 6649 {
6649 6650 uint32_t mask;
6650 6651
6651 6652 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: called"));
6652 6653
6653 6654 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: before : "
6654 6655 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance)));
6655 6656
6656 6657 /* WR_OB_INTR_MASK(0xFFFFFFFF, instance); */
6657 6658 WR_OB_INTR_MASK(OB_INTR_MASK, instance);
6658 6659
6659 6660 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: after : "
6660 6661 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance)));
6661 6662
6662 6663 /* dummy read to force PCI flush */
6663 6664 mask = RD_OB_INTR_MASK(instance);
6664 6665 #ifdef lint
6665 6666 mask = mask;
6666 6667 #endif
6667 6668 }
6668 6669
6669 6670 static int
6670 6671 intr_ack_ppc(struct mrsas_instance *instance)
6671 6672 {
6672 6673 uint32_t status;
6673 6674 int ret = DDI_INTR_CLAIMED;
6674 6675
6675 6676 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: called"));
6676 6677
6677 6678 /* check if it is our interrupt */
6678 6679 status = RD_OB_INTR_STATUS(instance);
6679 6680
6680 6681 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: status = 0x%x", status));
6681 6682
6682 6683 if (!(status & MFI_REPLY_2108_MESSAGE_INTR)) {
6683 6684 ret = DDI_INTR_UNCLAIMED;
6684 6685 }
6685 6686
6686 6687 if (ret == DDI_INTR_UNCLAIMED) {
6687 6688 return (ret);
6688 6689 }
6689 6690 /* clear the interrupt by writing back the same value */
6690 6691 WR_OB_DOORBELL_CLEAR(status, instance);
6691 6692
6692 6693 /* dummy READ */
6693 6694 status = RD_OB_INTR_STATUS(instance);
6694 6695
6695 6696 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: interrupt cleared"));
6696 6697
6697 6698 return (ret);
6698 6699 }
6699 6700
6700 6701 /*
6701 6702 * Marks HBA as bad. This will be called either when an
6702 6703 * IO packet times out even after 3 FW resets
6703 6704 * or FW is found to be fault even after 3 continuous resets.
6704 6705 */
6705 6706
6706 6707 static int
6707 6708 mrsas_kill_adapter(struct mrsas_instance *instance)
6708 6709 {
6709 6710 if (instance->deadadapter == 1)
6710 6711 return (DDI_FAILURE);
6711 6712
6712 6713 con_log(CL_ANN1, (CE_NOTE, "mrsas_kill_adapter: "
6713 6714 "Writing to doorbell with MFI_STOP_ADP "));
6714 6715 mutex_enter(&instance->ocr_flags_mtx);
6715 6716 instance->deadadapter = 1;
6716 6717 mutex_exit(&instance->ocr_flags_mtx);
6717 6718 instance->func_ptr->disable_intr(instance);
6718 6719 WR_IB_DOORBELL(MFI_STOP_ADP, instance);
6719 6720 (void) mrsas_complete_pending_cmds(instance);
6720 6721 return (DDI_SUCCESS);
6721 6722 }
6722 6723
6723 6724
6724 6725 static int
6725 6726 mrsas_reset_ppc(struct mrsas_instance *instance)
6726 6727 {
6727 6728 uint32_t status;
6728 6729 uint32_t retry = 0;
6729 6730 uint32_t cur_abs_reg_val;
6730 6731 uint32_t fw_state;
6731 6732
6732 6733 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
6733 6734
6734 6735 if (instance->deadadapter == 1) {
6735 6736 cmn_err(CE_WARN, "mrsas_reset_ppc: "
6736 6737 "no more resets as HBA has been marked dead ");
6737 6738 return (DDI_FAILURE);
6738 6739 }
6739 6740 mutex_enter(&instance->ocr_flags_mtx);
6740 6741 instance->adapterresetinprogress = 1;
6741 6742 mutex_exit(&instance->ocr_flags_mtx);
6742 6743 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: adpterresetinprogress "
6743 6744 "flag set, time %llx", gethrtime()));
6744 6745
6745 6746 instance->func_ptr->disable_intr(instance);
6746 6747 retry_reset:
6747 6748 WR_IB_WRITE_SEQ(0, instance);
6748 6749 WR_IB_WRITE_SEQ(4, instance);
6749 6750 WR_IB_WRITE_SEQ(0xb, instance);
6750 6751 WR_IB_WRITE_SEQ(2, instance);
6751 6752 WR_IB_WRITE_SEQ(7, instance);
6752 6753 WR_IB_WRITE_SEQ(0xd, instance);
6753 6754 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: magic number written "
6754 6755 "to write sequence register\n"));
6755 6756 delay(100 * drv_usectohz(MILLISEC));
6756 6757 status = RD_OB_DRWE(instance);
6757 6758
6758 6759 while (!(status & DIAG_WRITE_ENABLE)) {
6759 6760 delay(100 * drv_usectohz(MILLISEC));
6760 6761 status = RD_OB_DRWE(instance);
6761 6762 if (retry++ == 100) {
6762 6763 cmn_err(CE_WARN, "mrsas_reset_ppc: DRWE bit "
6763 6764 "check retry count %d\n", retry);
6764 6765 return (DDI_FAILURE);
6765 6766 }
6766 6767 }
6767 6768 WR_IB_DRWE(status | DIAG_RESET_ADAPTER, instance);
6768 6769 delay(100 * drv_usectohz(MILLISEC));
6769 6770 status = RD_OB_DRWE(instance);
6770 6771 while (status & DIAG_RESET_ADAPTER) {
6771 6772 delay(100 * drv_usectohz(MILLISEC));
6772 6773 status = RD_OB_DRWE(instance);
6773 6774 if (retry++ == 100) {
6774 6775 cmn_err(CE_WARN,
6775 6776 "mrsas_reset_ppc: RESET FAILED. KILL adapter called\n.");
6776 6777
6777 6778 (void) mrsas_kill_adapter(instance);
6778 6779 return (DDI_FAILURE);
6779 6780 }
6780 6781 }
6781 6782 con_log(CL_ANN, (CE_NOTE, "mrsas_reset_ppc: Adapter reset complete"));
6782 6783 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
6783 6784 "Calling mfi_state_transition_to_ready"));
6784 6785
6785 6786 /* Mark HBA as bad, if FW is fault after 3 continuous resets */
6786 6787 if (mfi_state_transition_to_ready(instance) ||
6787 6788 debug_fw_faults_after_ocr_g == 1) {
6788 6789 cur_abs_reg_val =
6789 6790 instance->func_ptr->read_fw_status_reg(instance);
6790 6791 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
6791 6792
6792 6793 #ifdef OCRDEBUG
6793 6794 con_log(CL_ANN1, (CE_NOTE,
6794 6795 "mrsas_reset_ppc :before fake: FW is not ready "
6795 6796 "FW state = 0x%x", fw_state));
6796 6797 if (debug_fw_faults_after_ocr_g == 1)
6797 6798 fw_state = MFI_STATE_FAULT;
6798 6799 #endif
6799 6800
6800 6801 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc : FW is not ready "
6801 6802 "FW state = 0x%x", fw_state));
6802 6803
6803 6804 if (fw_state == MFI_STATE_FAULT) {
6804 6805 /* increment the count */
6805 6806 instance->fw_fault_count_after_ocr++;
6806 6807 if (instance->fw_fault_count_after_ocr
6807 6808 < MAX_FW_RESET_COUNT) {
6808 6809 cmn_err(CE_WARN, "mrsas_reset_ppc: "
6809 6810 "FW is in fault after OCR count %d "
6810 6811 "Retry Reset",
6811 6812 instance->fw_fault_count_after_ocr);
6812 6813 goto retry_reset;
6813 6814
6814 6815 } else {
6815 6816 cmn_err(CE_WARN, "mrsas_reset_ppc: "
6816 6817 "Max Reset Count exceeded >%d"
6817 6818 "Mark HBA as bad, KILL adapter", MAX_FW_RESET_COUNT);
6818 6819
6819 6820 (void) mrsas_kill_adapter(instance);
6820 6821 return (DDI_FAILURE);
6821 6822 }
6822 6823 }
6823 6824 }
6824 6825 /* reset the counter as FW is up after OCR */
6825 6826 instance->fw_fault_count_after_ocr = 0;
6826 6827
6827 6828
6828 6829 ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
6829 6830 instance->producer, 0);
6830 6831
6831 6832 ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
6832 6833 instance->consumer, 0);
6833 6834
6834 6835 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
6835 6836 " after resetting produconsumer chck indexs:"
6836 6837 "producer %x consumer %x", *instance->producer,
6837 6838 *instance->consumer));
6838 6839
6839 6840 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
6840 6841 "Calling mrsas_issue_init_mfi"));
6841 6842 (void) mrsas_issue_init_mfi(instance);
6842 6843 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
6843 6844 "mrsas_issue_init_mfi Done"));
6844 6845
6845 6846 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
6846 6847 "Calling mrsas_print_pending_cmd\n"));
6847 6848 (void) mrsas_print_pending_cmds(instance);
6848 6849 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
6849 6850 "mrsas_print_pending_cmd done\n"));
6850 6851
6851 6852 instance->func_ptr->enable_intr(instance);
6852 6853 instance->fw_outstanding = 0;
6853 6854
6854 6855 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
6855 6856 "Calling mrsas_issue_pending_cmds"));
6856 6857 (void) mrsas_issue_pending_cmds(instance);
6857 6858 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
6858 6859 "issue_pending_cmds done.\n"));
6859 6860
6860 6861 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
6861 6862 "Calling aen registration"));
6862 6863
6863 6864
6864 6865 instance->aen_cmd->retry_count_for_ocr = 0;
6865 6866 instance->aen_cmd->drv_pkt_time = 0;
6866 6867
6867 6868 instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
6868 6869 con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag.\n"));
6869 6870
6870 6871 mutex_enter(&instance->ocr_flags_mtx);
6871 6872 instance->adapterresetinprogress = 0;
6872 6873 mutex_exit(&instance->ocr_flags_mtx);
6873 6874 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
6874 6875 "adpterresetinprogress flag unset"));
6875 6876
6876 6877 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc done\n"));
6877 6878 return (DDI_SUCCESS);
6878 6879 }
6879 6880
6880 6881
6881 6882 static int
6882 6883 mrsas_add_intrs(struct mrsas_instance *instance, int intr_type)
6883 6884 {
6884 6885
6885 6886 dev_info_t *dip = instance->dip;
6886 6887 int avail, actual, count;
6887 6888 int i, flag, ret;
6888 6889
6889 6890 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_type = %x",
6890 6891 intr_type));
6891 6892
6892 6893 /* Get number of interrupts */
6893 6894 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
6894 6895 if ((ret != DDI_SUCCESS) || (count == 0)) {
6895 6896 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_nintrs() failed:"
6896 6897 "ret %d count %d", ret, count));
6897 6898
6898 6899 return (DDI_FAILURE);
6899 6900 }
6900 6901
6901 6902 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: count = %d ", count));
6902 6903
6903 6904 /* Get number of available interrupts */
6904 6905 ret = ddi_intr_get_navail(dip, intr_type, &avail);
6905 6906 if ((ret != DDI_SUCCESS) || (avail == 0)) {
6906 6907 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_navail() failed:"
6907 6908 "ret %d avail %d", ret, avail));
6908 6909
6909 6910 return (DDI_FAILURE);
6910 6911 }
6911 6912 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: avail = %d ", avail));
6912 6913
6913 6914 /* Only one interrupt routine. So limit the count to 1 */
6914 6915 if (count > 1) {
6915 6916 count = 1;
6916 6917 }
6917 6918
6918 6919 /*
6919 6920 * Allocate an array of interrupt handlers. Currently we support
6920 6921 * only one interrupt. The framework can be extended later.
6921 6922 */
6922 6923 instance->intr_htable_size = count * sizeof (ddi_intr_handle_t);
6923 6924 instance->intr_htable = kmem_zalloc(instance->intr_htable_size, KM_SLEEP);
6924 6925 if (instance->intr_htable == NULL) {
6925 6926 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
6926 6927 "failed to allocate memory for intr-handle table"));
6927 6928 instance->intr_htable_size = 0;
6928 6929 return (DDI_FAILURE);
6929 6930 }
6930 6931
6931 6932 flag = ((intr_type == DDI_INTR_TYPE_MSI) || (intr_type ==
6932 6933 DDI_INTR_TYPE_MSIX)) ? DDI_INTR_ALLOC_STRICT:DDI_INTR_ALLOC_NORMAL;
6933 6934
6934 6935 /* Allocate interrupt */
6935 6936 ret = ddi_intr_alloc(dip, instance->intr_htable, intr_type, 0,
6936 6937 count, &actual, flag);
6937 6938
6938 6939 if ((ret != DDI_SUCCESS) || (actual == 0)) {
6939 6940 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
6940 6941 "avail = %d", avail));
6941 6942 goto mrsas_free_htable;
6942 6943 }
6943 6944
6944 6945 if (actual < count) {
6945 6946 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
6946 6947 "Requested = %d Received = %d", count, actual));
6947 6948 }
6948 6949 instance->intr_cnt = actual;
6949 6950
6950 6951 /*
6951 6952 * Get the priority of the interrupt allocated.
6952 6953 */
6953 6954 if ((ret = ddi_intr_get_pri(instance->intr_htable[0],
6954 6955 &instance->intr_pri)) != DDI_SUCCESS) {
6955 6956 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
6956 6957 "get priority call failed"));
6957 6958 goto mrsas_free_handles;
6958 6959 }
6959 6960
6960 6961 /*
6961 6962 * Test for high level mutex. we don't support them.
6962 6963 */
6963 6964 if (instance->intr_pri >= ddi_intr_get_hilevel_pri()) {
6964 6965 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
6965 6966 "High level interrupts not supported."));
6966 6967 goto mrsas_free_handles;
6967 6968 }
6968 6969
6969 6970 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_pri = 0x%x ",
6970 6971 instance->intr_pri));
6971 6972
6972 6973 /* Call ddi_intr_add_handler() */
6973 6974 for (i = 0; i < actual; i++) {
6974 6975 ret = ddi_intr_add_handler(instance->intr_htable[i],
6975 6976 (ddi_intr_handler_t *)mrsas_isr, (caddr_t)instance,
6976 6977 (caddr_t)(uintptr_t)i);
6977 6978
6978 6979 if (ret != DDI_SUCCESS) {
6979 6980 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs:"
6980 6981 "failed %d", ret));
6981 6982 goto mrsas_free_handles;
6982 6983 }
6983 6984
6984 6985 }
6985 6986
6986 6987 con_log(CL_DLEVEL1, (CE_NOTE, " ddi_intr_add_handler done"));
6987 6988
6988 6989 if ((ret = ddi_intr_get_cap(instance->intr_htable[0],
6989 6990 &instance->intr_cap)) != DDI_SUCCESS) {
6990 6991 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_cap() failed %d",
6991 6992 ret));
6992 6993 goto mrsas_free_handlers;
6993 6994 }
6994 6995
6995 6996 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) {
6996 6997 con_log(CL_ANN, (CE_WARN, "Calling ddi_intr_block _enable"));
6997 6998
6998 6999 (void) ddi_intr_block_enable(instance->intr_htable,
6999 7000 instance->intr_cnt);
7000 7001 } else {
7001 7002 con_log(CL_ANN, (CE_NOTE, " calling ddi_intr_enable"));
7002 7003
7003 7004 for (i = 0; i < instance->intr_cnt; i++) {
7004 7005 (void) ddi_intr_enable(instance->intr_htable[i]);
7005 7006 con_log(CL_ANN, (CE_NOTE, "ddi intr enable returns "
7006 7007 "%d", i));
7007 7008 }
7008 7009 }
7009 7010
7010 7011 return (DDI_SUCCESS);
7011 7012
7012 7013 mrsas_free_handlers:
7013 7014 for (i = 0; i < actual; i++)
7014 7015 {
7015 7016 (void) ddi_intr_remove_handler(instance->intr_htable[i]);
7016 7017 }
7017 7018
7018 7019 mrsas_free_handles:
7019 7020 for (i = 0; i < actual; i++)
7020 7021 {
7021 7022 (void) ddi_intr_free(instance->intr_htable[i]);
7022 7023 }
7023 7024
7024 7025 mrsas_free_htable:
7025 7026 if (instance->intr_htable != NULL)
7026 7027 kmem_free(instance->intr_htable, instance->intr_htable_size);
7027 7028
7028 7029 instance->intr_htable =NULL;
7029 7030 instance->intr_htable_size = 0;
7030 7031
7031 7032 return (DDI_FAILURE);
7032 7033
7033 7034 }
7034 7035
7035 7036
7036 7037 static void
7037 7038 mrsas_rem_intrs(struct mrsas_instance *instance)
7038 7039 {
7039 7040 int i;
7040 7041
7041 7042 con_log(CL_ANN, (CE_NOTE, "mrsas_rem_intrs called"));
7042 7043
7043 7044 /* Disable all interrupts first */
7044 7045 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) {
7045 7046 (void) ddi_intr_block_disable(instance->intr_htable,
7046 7047 instance->intr_cnt);
7047 7048 } else {
7048 7049 for (i = 0; i < instance->intr_cnt; i++) {
7049 7050 (void) ddi_intr_disable(instance->intr_htable[i]);
7050 7051 }
7051 7052 }
7052 7053
7053 7054 /* Remove all the handlers */
7054 7055
7055 7056 for (i = 0; i < instance->intr_cnt; i++) {
7056 7057 (void) ddi_intr_remove_handler(instance->intr_htable[i]);
7057 7058 (void) ddi_intr_free(instance->intr_htable[i]);
7058 7059 }
7059 7060
7060 7061 if (instance->intr_htable != NULL)
7061 7062 kmem_free(instance->intr_htable, instance->intr_htable_size);
7062 7063
7063 7064 instance->intr_htable =NULL;
7064 7065 instance->intr_htable_size = 0;
7065 7066
7066 7067 }
7067 7068
7068 7069 static int
7069 7070 mrsas_tran_bus_config(dev_info_t *parent, uint_t flags,
7070 7071 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
7071 7072 {
7072 7073 struct mrsas_instance *instance;
7073 7074 int config;
7074 7075 int rval = NDI_SUCCESS;
7075 7076
7076 7077 char *ptr = NULL;
7077 7078 int tgt, lun;
7078 7079
7079 7080 con_log(CL_ANN1, (CE_NOTE, "Bus config called for op = %x", op));
7080 7081
7081 7082 if ((instance = ddi_get_soft_state(mrsas_state,
7082 7083 ddi_get_instance(parent))) == NULL) {
7083 7084 return (NDI_FAILURE);
7084 7085 }
7085 7086
7086 7087 /* Hold nexus during bus_config */
7087 7088 ndi_devi_enter(parent, &config);
7088 7089 switch (op) {
7089 7090 case BUS_CONFIG_ONE: {
7090 7091
7091 7092 /* parse wwid/target name out of name given */
7092 7093 if ((ptr = strchr((char *)arg, '@')) == NULL) {
7093 7094 rval = NDI_FAILURE;
7094 7095 break;
7095 7096 }
7096 7097 ptr++;
7097 7098
7098 7099 if (mrsas_parse_devname(arg, &tgt, &lun) != 0) {
7099 7100 rval = NDI_FAILURE;
7100 7101 break;
7101 7102 }
7102 7103
7103 7104 if (lun == 0) {
7104 7105 rval = mrsas_config_ld(instance, tgt, lun, childp);
7105 7106 }
7106 7107 #ifdef PDSUPPORT
7107 7108 else if ( instance->tbolt == 1 && lun != 0) {
7108 7109 rval = mrsas_tbolt_config_pd(instance,
7109 7110 tgt, lun, childp);
7110 7111 }
7111 7112 #endif
7112 7113 else {
7113 7114 rval = NDI_FAILURE;
7114 7115 }
7115 7116
7116 7117 break;
7117 7118 }
7118 7119 case BUS_CONFIG_DRIVER:
7119 7120 case BUS_CONFIG_ALL: {
7120 7121
7121 7122 rval = mrsas_config_all_devices(instance);
7122 7123
7123 7124 rval = NDI_SUCCESS;
7124 7125 break;
7125 7126 }
7126 7127 }
7127 7128
7128 7129 if (rval == NDI_SUCCESS) {
7129 7130 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0);
7130 7131
7131 7132 }
7132 7133 ndi_devi_exit(parent, config);
7133 7134
7134 7135 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_bus_config: rval = %x",
7135 7136 rval));
7136 7137 return (rval);
7137 7138 }
7138 7139
7139 7140 static int
7140 7141 mrsas_config_all_devices(struct mrsas_instance *instance)
7141 7142 {
7142 7143 int rval, tgt;
7143 7144
7144 7145 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) {
7145 7146 (void) mrsas_config_ld(instance, tgt, 0, NULL);
7146 7147
7147 7148 }
7148 7149
7149 7150 #ifdef PDSUPPORT
7150 7151 /* Config PD devices connected to the card */
7151 7152 if(instance->tbolt) {
7152 7153 for (tgt = 0; tgt < instance->mr_tbolt_pd_max; tgt++) {
7153 7154 (void) mrsas_tbolt_config_pd(instance, tgt, 1, NULL);
7154 7155 }
7155 7156 }
7156 7157 #endif
7157 7158
7158 7159 rval = NDI_SUCCESS;
7159 7160 return (rval);
7160 7161 }
7161 7162
7162 7163 static int
7163 7164 mrsas_parse_devname(char *devnm, int *tgt, int *lun)
7164 7165 {
7165 7166 char devbuf[SCSI_MAXNAMELEN];
7166 7167 char *addr;
7167 7168 char *p, *tp, *lp;
7168 7169 long num;
7169 7170
7170 7171 /* Parse dev name and address */
7171 7172 (void) strcpy(devbuf, devnm);
7172 7173 addr = "";
7173 7174 for (p = devbuf; *p != '\0'; p++) {
7174 7175 if (*p == '@') {
7175 7176 addr = p + 1;
7176 7177 *p = '\0';
7177 7178 } else if (*p == ':') {
7178 7179 *p = '\0';
7179 7180 break;
7180 7181 }
7181 7182 }
7182 7183
7183 7184 /* Parse target and lun */
7184 7185 for (p = tp = addr, lp = NULL; *p != '\0'; p++) {
7185 7186 if (*p == ',') {
7186 7187 lp = p + 1;
7187 7188 *p = '\0';
7188 7189 break;
7189 7190 }
7190 7191 }
7191 7192 if (tgt && tp) {
7192 7193 if (ddi_strtol(tp, NULL, 0x10, &num)) {
7193 7194 return (DDI_FAILURE); /* Can declare this as constant */
7194 7195 }
7195 7196 *tgt = (int)num;
7196 7197 }
7197 7198 if (lun && lp) {
7198 7199 if (ddi_strtol(lp, NULL, 0x10, &num)) {
7199 7200 return (DDI_FAILURE);
7200 7201 }
7201 7202 *lun = (int)num;
7202 7203 }
7203 7204 return (DDI_SUCCESS); /* Success case */
7204 7205 }
7205 7206
7206 7207 static int
7207 7208 mrsas_config_ld(struct mrsas_instance *instance, uint16_t tgt,
7208 7209 uint8_t lun, dev_info_t **ldip)
7209 7210 {
7210 7211 struct scsi_device *sd;
7211 7212 dev_info_t *child;
7212 7213 int rval;
7213 7214
7214 7215 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_config_ld: t = %d l = %d",
7215 7216 tgt, lun));
7216 7217
7217 7218 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
7218 7219 if (ldip) {
7219 7220 *ldip = child;
7220 7221 }
7221 7222 if (instance->mr_ld_list[tgt].flag != MRDRV_TGT_VALID) {
7222 7223 rval = mrsas_service_evt(instance, tgt, 0,
7223 7224 MRSAS_EVT_UNCONFIG_TGT, NULL);
7224 7225 con_log(CL_ANN1, (CE_WARN,
7225 7226 "mr_sas: DELETING STALE ENTRY rval = %d "
7226 7227 "tgt id = %d ", rval, tgt));
7227 7228 return (NDI_FAILURE);
7228 7229 }
7229 7230 return (NDI_SUCCESS);
7230 7231 }
7231 7232
7232 7233 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
7233 7234 if (sd == NULL) {
7234 7235 con_log(CL_ANN1, (CE_WARN,
7235 7236 "mrsas_config_ld: failed to allocate mem for scsi_device"));
7236 7237 return (NDI_FAILURE);
7237 7238 }
7238 7239 sd->sd_address.a_hba_tran = instance->tran;
7239 7240 sd->sd_address.a_target = (uint16_t)tgt;
7240 7241 sd->sd_address.a_lun = (uint8_t)lun;
7241 7242
7242 7243 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS)
7243 7244 rval = mrsas_config_scsi_device(instance, sd, ldip);
7244 7245 else
7245 7246 rval = NDI_FAILURE;
7246 7247
7247 7248 /* sd_unprobe is blank now. Free buffer manually */
7248 7249 if (sd->sd_inq) {
7249 7250 kmem_free(sd->sd_inq, SUN_INQSIZE);
7250 7251 sd->sd_inq = (struct scsi_inquiry *)NULL;
7251 7252 }
7252 7253
7253 7254 kmem_free(sd, sizeof (struct scsi_device));
7254 7255 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_config_ld: return rval = %d",
7255 7256 rval));
7256 7257 return (rval);
7257 7258 }
7258 7259
7259 7260 int
7260 7261 mrsas_config_scsi_device(struct mrsas_instance *instance,
7261 7262 struct scsi_device *sd, dev_info_t **dipp)
7262 7263 {
7263 7264 char *nodename = NULL;
7264 7265 char **compatible = NULL;
7265 7266 int ncompatible = 0;
7266 7267 char *childname;
7267 7268 dev_info_t *ldip = NULL;
7268 7269 int tgt = sd->sd_address.a_target;
7269 7270 int lun = sd->sd_address.a_lun;
7270 7271 int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
7271 7272 int rval;
7272 7273
7273 7274 con_log(CL_DLEVEL1, (CE_NOTE, "mr_sas: scsi_device t%dL%d", tgt, lun));
7274 7275 scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype,
7275 7276 NULL, &nodename, &compatible, &ncompatible);
7276 7277
7277 7278 if (nodename == NULL) {
7278 7279 con_log(CL_ANN1, (CE_WARN, "mr_sas: Found no compatible driver "
7279 7280 "for t%dL%d", tgt, lun));
7280 7281 rval = NDI_FAILURE;
7281 7282 goto finish;
7282 7283 }
7283 7284
7284 7285 childname = (dtype == DTYPE_DIRECT) ? "sd" : nodename;
7285 7286 con_log(CL_DLEVEL1, (CE_NOTE,
7286 7287 "mr_sas: Childname = %2s nodename = %s", childname, nodename));
7287 7288
7288 7289 /* Create a dev node */
7289 7290 rval = ndi_devi_alloc(instance->dip, childname, DEVI_SID_NODEID, &ldip);
7290 7291 con_log(CL_DLEVEL1, (CE_NOTE,
7291 7292 "mr_sas_config_scsi_device: ndi_devi_alloc rval = %x", rval));
7292 7293 if (rval == NDI_SUCCESS) {
7293 7294 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) !=
7294 7295 DDI_PROP_SUCCESS) {
7295 7296 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7296 7297 "property for t%dl%d target", tgt, lun));
7297 7298 rval = NDI_FAILURE;
7298 7299 goto finish;
7299 7300 }
7300 7301 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun) !=
7301 7302 DDI_PROP_SUCCESS) {
7302 7303 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7303 7304 "property for t%dl%d lun", tgt, lun));
7304 7305 rval = NDI_FAILURE;
7305 7306 goto finish;
7306 7307 }
7307 7308
7308 7309 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip,
7309 7310 "compatible", compatible, ncompatible) !=
7310 7311 DDI_PROP_SUCCESS) {
7311 7312 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7312 7313 "property for t%dl%d compatible", tgt, lun));
7313 7314 rval = NDI_FAILURE;
7314 7315 goto finish;
7315 7316 }
7316 7317
7317 7318 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH);
7318 7319 if (rval != NDI_SUCCESS) {
7319 7320 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to online "
7320 7321 "t%dl%d", tgt, lun));
7321 7322 ndi_prop_remove_all(ldip);
7322 7323 (void) ndi_devi_free(ldip);
7323 7324 } else {
7324 7325 con_log(CL_ANN1, (CE_CONT, "mr_sas: online Done :"
7325 7326 "0 t%dl%d", tgt, lun));
7326 7327 }
7327 7328
7328 7329 }
7329 7330 finish:
7330 7331 if (dipp) {
7331 7332 *dipp = ldip;
7332 7333 }
7333 7334
7334 7335 con_log(CL_DLEVEL1, (CE_NOTE,
7335 7336 "mr_sas: config_scsi_device rval = %d t%dL%d",
7336 7337 rval, tgt, lun));
7337 7338 scsi_hba_nodename_compatible_free(nodename, compatible);
7338 7339 return (rval);
7339 7340 }
7340 7341
7341 7342 /*ARGSUSED*/
7342 7343 int
7343 7344 mrsas_service_evt(struct mrsas_instance *instance, int tgt, int lun, int event,
7344 7345 uint64_t wwn)
7345 7346 {
7346 7347 struct mrsas_eventinfo *mrevt = NULL;
7347 7348
7348 7349 con_log(CL_ANN1, (CE_NOTE,
7349 7350 "mrsas_service_evt called for t%dl%d event = %d",
7350 7351 tgt, lun, event));
7351 7352
7352 7353 if ((instance->taskq == NULL) || (mrevt =
7353 7354 kmem_zalloc(sizeof (struct mrsas_eventinfo), KM_NOSLEEP)) == NULL) {
7354 7355 return (ENOMEM);
7355 7356 }
7356 7357
7357 7358 mrevt->instance = instance;
7358 7359 mrevt->tgt = tgt;
7359 7360 mrevt->lun = lun;
7360 7361 mrevt->event = event;
7361 7362 mrevt->wwn = wwn;
7362 7363
7363 7364 if ((ddi_taskq_dispatch(instance->taskq,
7364 7365 (void (*)(void *))mrsas_issue_evt_taskq, mrevt, DDI_NOSLEEP)) !=
7365 7366 DDI_SUCCESS) {
7366 7367 con_log(CL_ANN1, (CE_NOTE,
7367 7368 "mr_sas: Event task failed for t%dl%d event = %d",
7368 7369 tgt, lun, event));
7369 7370 kmem_free(mrevt, sizeof (struct mrsas_eventinfo));
7370 7371 return (DDI_FAILURE);
7371 7372 }
7372 7373
7373 7374 return (DDI_SUCCESS);
7374 7375 }
7375 7376
7376 7377 static void
7377 7378 mrsas_issue_evt_taskq(struct mrsas_eventinfo *mrevt)
7378 7379 {
7379 7380 struct mrsas_instance *instance = mrevt->instance;
7380 7381 dev_info_t *dip, *pdip;
7381 7382 int circ1 = 0;
7382 7383 char *devname;
7383 7384
7384 7385 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_evt_taskq: called for"
7385 7386 " tgt %d lun %d event %d",
7386 7387 mrevt->tgt, mrevt->lun, mrevt->event));
7387 7388
7388 7389 if (mrevt->tgt < MRDRV_MAX_LD && mrevt->lun == 0) {
7389 7390 mutex_enter(&instance->config_dev_mtx);
7390 7391 dip = instance->mr_ld_list[mrevt->tgt].dip;
7391 7392 mutex_exit(&instance->config_dev_mtx);
7392 7393 }
7393 7394
7394 7395 #ifdef PDSUPPORT
7395 7396 else {
7396 7397 mutex_enter(&instance->config_dev_mtx);
7397 7398 dip = instance->mr_tbolt_pd_list[mrevt->tgt].dip;
7398 7399 mutex_exit(&instance->config_dev_mtx);
7399 7400 }
7400 7401 #endif
7401 7402
7402 7403 ndi_devi_enter(instance->dip, &circ1);
7403 7404 switch (mrevt->event) {
7404 7405 case MRSAS_EVT_CONFIG_TGT:
7405 7406 if (dip == NULL) {
7406 7407
7407 7408 if (mrevt->lun == 0) {
7408 7409 (void) mrsas_config_ld(instance, mrevt->tgt,
7409 7410 0, NULL);
7410 7411 }
7411 7412 #ifdef PDSUPPORT
7412 7413 else if (instance->tbolt) {
7413 7414 (void) mrsas_tbolt_config_pd(instance,
7414 7415 mrevt->tgt,
7415 7416 1, NULL);
7416 7417 }
7417 7418 #endif
7418 7419 con_log(CL_ANN1, (CE_NOTE,
7419 7420 "mr_sas: EVT_CONFIG_TGT called:"
7420 7421 " for tgt %d lun %d event %d",
7421 7422 mrevt->tgt, mrevt->lun, mrevt->event));
7422 7423
7423 7424 } else {
7424 7425 con_log(CL_ANN1, (CE_NOTE,
7425 7426 "mr_sas: EVT_CONFIG_TGT dip != NULL:"
7426 7427 " for tgt %d lun %d event %d",
7427 7428 mrevt->tgt, mrevt->lun, mrevt->event));
7428 7429 }
7429 7430 break;
7430 7431 case MRSAS_EVT_UNCONFIG_TGT:
7431 7432 if (dip) {
7432 7433 if (i_ddi_devi_attached(dip)) {
7433 7434
7434 7435 pdip = ddi_get_parent(dip);
7435 7436
7436 7437 devname = kmem_zalloc(MAXNAMELEN + 1, KM_SLEEP);
7437 7438 (void) ddi_deviname(dip, devname);
7438 7439
7439 7440 (void) devfs_clean(pdip, devname + 1,
7440 7441 DV_CLEAN_FORCE);
7441 7442 kmem_free(devname, MAXNAMELEN + 1);
7442 7443 }
7443 7444 (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE);
7444 7445 con_log(CL_ANN1, (CE_NOTE,
7445 7446 "mr_sas: EVT_UNCONFIG_TGT called:"
7446 7447 " for tgt %d lun %d event %d",
7447 7448 mrevt->tgt, mrevt->lun, mrevt->event));
7448 7449 } else {
7449 7450 con_log(CL_ANN1, (CE_NOTE,
7450 7451 "mr_sas: EVT_UNCONFIG_TGT dip == NULL:"
7451 7452 " for tgt %d lun %d event %d",
7452 7453 mrevt->tgt, mrevt->lun, mrevt->event));
7453 7454 }
7454 7455 break;
7455 7456 }
7456 7457 kmem_free(mrevt, sizeof (struct mrsas_eventinfo));
7457 7458 ndi_devi_exit(instance->dip, circ1);
7458 7459 }
7459 7460
7460 7461
7461 7462 int
7462 7463 mrsas_mode_sense_build(struct scsi_pkt *pkt)
7463 7464 {
7464 7465 union scsi_cdb *cdbp;
7465 7466 uint16_t page_code;
7466 7467 struct scsa_cmd *acmd;
7467 7468 struct buf *bp;
7468 7469 struct mode_header *modehdrp;
7469 7470
7470 7471 cdbp = (void *)pkt->pkt_cdbp;
7471 7472 page_code = cdbp->cdb_un.sg.scsi[0];
7472 7473 acmd = PKT2CMD(pkt);
7473 7474 bp = acmd->cmd_buf;
7474 7475 if ((!bp) && bp->b_un.b_addr && bp->b_bcount && acmd->cmd_dmacount) {
7475 7476 con_log(CL_ANN1, (CE_WARN, "Failing MODESENSE Command"));
7476 7477 /* ADD pkt statistics as Command failed. */
7477 7478 return (NULL);
7478 7479 }
7479 7480
7480 7481 bp_mapin(bp);
7481 7482 bzero(bp->b_un.b_addr, bp->b_bcount);
7482 7483
7483 7484 switch (page_code) {
7484 7485 case 0x3: {
7485 7486 struct mode_format *page3p = NULL;
7486 7487 modehdrp = (struct mode_header *)(bp->b_un.b_addr);
7487 7488 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH;
7488 7489
7489 7490 page3p = (void *)((caddr_t)modehdrp +
7490 7491 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
7491 7492 page3p->mode_page.code = 0x3;
7492 7493 page3p->mode_page.length =
7493 7494 (uchar_t)(sizeof (struct mode_format));
7494 7495 page3p->data_bytes_sect = 512;
7495 7496 page3p->sect_track = 63;
7496 7497 break;
7497 7498 }
7498 7499 case 0x4: {
7499 7500 struct mode_geometry *page4p = NULL;
7500 7501 modehdrp = (struct mode_header *)(bp->b_un.b_addr);
7501 7502 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH;
7502 7503
7503 7504 page4p = (void *)((caddr_t)modehdrp +
7504 7505 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
7505 7506 page4p->mode_page.code = 0x4;
7506 7507 page4p->mode_page.length =
7507 7508 (uchar_t)(sizeof (struct mode_geometry));
7508 7509 page4p->heads = 255;
7509 7510 page4p->rpm = 10000;
7510 7511 break;
7511 7512 }
7512 7513 default:
7513 7514 break;
7514 7515 }
7515 7516 return (NULL);
7516 7517 }
7517 7518
|
↓ open down ↓ |
4857 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX