1 /*
2 * mr_sas.c: source for mr_sas driver
3 *
4 * Solaris MegaRAID device driver for SAS2.0 controllers
5 * Copyright (c) 2008-2012, LSI Logic Corporation.
6 * All rights reserved.
7 *
8 * Version:
9 * Author:
10 * Swaminathan K S
11 * Arun Chandrashekhar
12 * Manju R
13 * Rasheed
14 * Shakeel Bukhari
15 */
16
17 #include <sys/types.h>
18 #include <sys/param.h>
19 #include <sys/file.h>
20 #include <sys/errno.h>
21 #include <sys/open.h>
22 #include <sys/cred.h>
23 #include <sys/modctl.h>
24 #include <sys/conf.h>
25 #include <sys/devops.h>
26 #include <sys/cmn_err.h>
27 #include <sys/kmem.h>
28 #include <sys/stat.h>
29 #include <sys/mkdev.h>
30 #include <sys/pci.h>
31 #include <sys/scsi/scsi.h>
32 #include <sys/ddi.h>
33 #include <sys/sunddi.h>
34 #include <sys/atomic.h>
35 #include <sys/signal.h>
36 #include <sys/byteorder.h>
37 #include <sys/sdt.h>
38 #include <sys/fs/dv_node.h> /* devfs_clean */
39
40 #include "mr_sas.h"
41
42 /*
43 * Local static data
44 */
45 static void *mrsas_state = NULL;
46 static volatile boolean_t mrsas_relaxed_ordering = 0;
47 volatile int debug_level_g = CL_NONE;
48
49 static volatile int msi_enable = 1;
50
51 /* Default Timeout value to issue online controller reset */
52 volatile int debug_timeout_g = 0xF0; //0xB4;
53 /* Simulate consecutive firmware fault */
54 static volatile int debug_fw_faults_after_ocr_g = 0;
55 #ifdef OCRDEBUG
56 /* Simulate three consecutive timeout for an IO */
57 static volatile int debug_consecutive_timeout_after_ocr_g = 0;
58 #endif
59
60 /* Enable OCR on firmware fault */
61 static volatile int debug_support_ocr_isr_g = 0;
62 #pragma weak scsi_hba_open
63 #pragma weak scsi_hba_close
64 #pragma weak scsi_hba_ioctl
65
66
67
68 static struct mrsas_function_template mrsas_function_template_ppc = {
69 .read_fw_status_reg = read_fw_status_reg_ppc,
70 .issue_cmd = issue_cmd_ppc,
71 .issue_cmd_in_sync_mode = issue_cmd_in_sync_mode_ppc,
72 .issue_cmd_in_poll_mode = issue_cmd_in_poll_mode_ppc,
73 .enable_intr = enable_intr_ppc,
74 .disable_intr = disable_intr_ppc,
75 .intr_ack = intr_ack_ppc,
76 .init_adapter = mrsas_init_adapter_ppc
77 // .reset_adapter = mrsas_reset_adapter_ppc
78 };
79
80
81 static struct mrsas_function_template mrsas_function_template_fusion = {
82 .read_fw_status_reg = tbolt_read_fw_status_reg,
83 .issue_cmd = tbolt_issue_cmd,
84 .issue_cmd_in_sync_mode = tbolt_issue_cmd_in_sync_mode,
85 .issue_cmd_in_poll_mode = tbolt_issue_cmd_in_poll_mode,
86 .enable_intr = tbolt_enable_intr,
87 .disable_intr = tbolt_disable_intr,
88 .intr_ack = tbolt_intr_ack,
89 .init_adapter = mrsas_init_adapter_tbolt
90 // .reset_adapter = mrsas_reset_adapter_tbolt
91 };
92
93
94 ddi_dma_attr_t mrsas_generic_dma_attr = {
95 DMA_ATTR_V0, /* dma_attr_version */
96 0, /* low DMA address range */
97 0xFFFFFFFFU, /* high DMA address range */
98 0xFFFFFFFFU, /* DMA counter register */
99 8, /* DMA address alignment */
100 0x07, /* DMA burstsizes */
101 1, /* min DMA size */
102 0xFFFFFFFFU, /* max DMA size */
103 0xFFFFFFFFU, /* segment boundary */
104 MRSAS_MAX_SGE_CNT, /* dma_attr_sglen */
105 512, /* granularity of device */
106 0 /* bus specific DMA flags */
107 };
108
109 int32_t mrsas_max_cap_maxxfer = 0x1000000;
110
111 //Fix for: Thunderbolt controller IO timeout when IO write size is 1MEG, Limit size to 256K
112 uint32_t mrsas_tbolt_max_cap_maxxfer = (512 * 512);
113
114 /*
115 * cb_ops contains base level routines
116 */
117 static struct cb_ops mrsas_cb_ops = {
118 mrsas_open, /* open */
119 mrsas_close, /* close */
120 nodev, /* strategy */
121 nodev, /* print */
122 nodev, /* dump */
123 nodev, /* read */
124 nodev, /* write */
125 mrsas_ioctl, /* ioctl */
126 nodev, /* devmap */
127 nodev, /* mmap */
128 nodev, /* segmap */
129 nochpoll, /* poll */
130 nodev, /* cb_prop_op */
131 0, /* streamtab */
132 D_NEW | D_HOTPLUG, /* cb_flag */
133 CB_REV, /* cb_rev */
134 nodev, /* cb_aread */
135 nodev /* cb_awrite */
136 };
137
138 /*
139 * dev_ops contains configuration routines
140 */
141 static struct dev_ops mrsas_ops = {
142 DEVO_REV, /* rev, */
143 0, /* refcnt */
144 mrsas_getinfo, /* getinfo */
145 nulldev, /* identify */
146 nulldev, /* probe */
147 mrsas_attach, /* attach */
148 mrsas_detach, /* detach */
149 #if defined(__SunOS_5_11)
150 nodev,
151 #else
152 mrsas_reset, /* reset */
153 #endif /* defined(__SunOS_5_11) */
154 &mrsas_cb_ops, /* char/block ops */
155 NULL, /* bus ops */
156 NULL, /* power */
157 #ifdef __SunOS_5_11
158 mrsas_quiesce /* quiesce */
159 #endif /*__SunOS_5_11 */
160
161 };
162
163 char _depends_on[] = "misc/scsi";
164
165 static struct modldrv modldrv = {
166 &mod_driverops, /* module type - driver */
167 MRSAS_VERSION,
168 &mrsas_ops, /* driver ops */
169 };
170
171 static struct modlinkage modlinkage = {
172 MODREV_1, /* ml_rev - must be MODREV_1 */
173 &modldrv, /* ml_linkage */
174 NULL /* end of driver linkage */
175 };
176
177 static struct ddi_device_acc_attr endian_attr = {
178 DDI_DEVICE_ATTR_V1,
179 DDI_STRUCTURE_LE_ACC,
180 DDI_STRICTORDER_ACC,
181 DDI_DEFAULT_ACC
182 };
183
184
185 unsigned int enable_fp = 1;
186
187
188 /*
189 * ************************************************************************** *
190 * *
191 * common entry points - for loadable kernel modules *
192 * *
193 * ************************************************************************** *
194 */
195
196 /*
197 * _init - initialize a loadable module
198 * @void
199 *
200 * The driver should perform any one-time resource allocation or data
201 * initialization during driver loading in _init(). For example, the driver
202 * should initialize any mutexes global to the driver in this routine.
203 * The driver should not, however, use _init() to allocate or initialize
204 * anything that has to do with a particular instance of the device.
205 * Per-instance initialization must be done in attach().
206 */
207 int
208 _init(void)
209 {
210 int ret;
211
212 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
213
214 ret = ddi_soft_state_init(&mrsas_state,
215 sizeof (struct mrsas_instance), 0);
216
217 if (ret != DDI_SUCCESS) {
218 cmn_err(CE_WARN, "mr_sas: could not init state");
219 return (ret);
220 }
221
222 if ((ret = scsi_hba_init(&modlinkage)) != DDI_SUCCESS) {
223 cmn_err(CE_WARN, "mr_sas: could not init scsi hba");
224 ddi_soft_state_fini(&mrsas_state);
225 return (ret);
226 }
227
228 ret = mod_install(&modlinkage);
229
230 if (ret != DDI_SUCCESS) {
231 cmn_err(CE_WARN, "mr_sas: mod_install failed");
232 scsi_hba_fini(&modlinkage);
233 ddi_soft_state_fini(&mrsas_state);
234 }
235
236 return (ret);
237 }
238
239 /*
240 * _info - returns information about a loadable module.
241 * @void
242 *
243 * _info() is called to return module information. This is a typical entry
244 * point that does predefined role. It simply calls mod_info().
245 */
246 int
247 _info(struct modinfo *modinfop)
248 {
249 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
250
251 return (mod_info(&modlinkage, modinfop));
252 }
253
254 /*
255 * _fini - prepare a loadable module for unloading
256 * @void
257 *
258 * In _fini(), the driver should release any resources that were allocated in
259 * _init(). The driver must remove itself from the system module list.
260 */
261 int
262 _fini(void)
263 {
264 int ret;
265
266 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
267
268 if ((ret = mod_remove(&modlinkage)) != DDI_SUCCESS)
269 {
270 con_log(CL_ANN1, (CE_WARN, "_fini: mod_remove() failed, error 0x%X", ret));
271 return (ret);
272 }
273
274 scsi_hba_fini(&modlinkage);
275 con_log(CL_DLEVEL1, (CE_NOTE, "_fini: scsi_hba_fini() done."));
276
277 ddi_soft_state_fini(&mrsas_state);
278 con_log(CL_DLEVEL1, (CE_NOTE, "_fini: ddi_soft_state_fini() done."));
279
280 return (ret);
281 }
282
283
284 /*
285 * ************************************************************************** *
286 * *
287 * common entry points - for autoconfiguration *
288 * *
289 * ************************************************************************** *
290 */
291 /*
292 * attach - adds a device to the system as part of initialization
293 * @dip:
294 * @cmd:
295 *
296 * The kernel calls a driver's attach() entry point to attach an instance of
297 * a device (for MegaRAID, it is instance of a controller) or to resume
298 * operation for an instance of a device that has been suspended or has been
299 * shut down by the power management framework
300 * The attach() entry point typically includes the following types of
301 * processing:
302 * - allocate a soft-state structure for the device instance (for MegaRAID,
303 * controller instance)
304 * - initialize per-instance mutexes
305 * - initialize condition variables
306 * - register the device's interrupts (for MegaRAID, controller's interrupts)
307 * - map the registers and memory of the device instance (for MegaRAID,
308 * controller instance)
309 * - create minor device nodes for the device instance (for MegaRAID,
310 * controller instance)
311 * - report that the device instance (for MegaRAID, controller instance) has
312 * attached
313 */
314 /* #if __SunOS_5_11 */
315 #if 0
316 #define DDI_PM_RESUME DDI_PM_RESUME_OBSOLETE
317 #define DDI_PM_SUSPEND DDI_PM_SUSPEND_OBSOLETE
318 #endif // __SunOS_5_11
319 static int
320 mrsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
321 {
322 int instance_no;
323 int nregs;
324 int i = 0;
325 uint8_t irq;
326 uint16_t vendor_id;
327 uint16_t device_id;
328 uint16_t subsysvid;
329 uint16_t subsysid;
330 uint16_t command;
331 off_t reglength = 0;
332 int intr_types = 0;
333 char *data;
334
335 scsi_hba_tran_t *tran;
336 ddi_dma_attr_t tran_dma_attr;
337 struct mrsas_instance *instance;
338
339 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
340
341 /* CONSTCOND */
342 ASSERT(NO_COMPETING_THREADS);
343
344 instance_no = ddi_get_instance(dip);
345
346 /*
347 * check to see whether this device is in a DMA-capable slot.
348 */
349 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
350 cmn_err(CE_WARN,
351 "mr_sas%d: Device in slave-only slot, unused",
352 instance_no);
353 return (DDI_FAILURE);
354 }
355
356 switch (cmd) {
357 case DDI_ATTACH:
358
359 /* allocate the soft state for the instance */
360 if (ddi_soft_state_zalloc(mrsas_state, instance_no)
361 != DDI_SUCCESS) {
362 cmn_err(CE_WARN,
363 "mr_sas%d: Failed to allocate soft state",
364 instance_no);
365
366 return (DDI_FAILURE);
367 }
368
369 instance = (struct mrsas_instance *)ddi_get_soft_state
370 (mrsas_state, instance_no);
371
372 if (instance == NULL) {
373 cmn_err(CE_WARN,
374 "mr_sas%d: Bad soft state", instance_no);
375
376 ddi_soft_state_free(mrsas_state, instance_no);
377
378 return (DDI_FAILURE);
379 }
380
381 bzero((caddr_t)instance,
382 sizeof (struct mrsas_instance));
383
384 instance->unroll.softs = 1;
385
386 /* Setup the PCI configuration space handles */
387 if (pci_config_setup(dip, &instance->pci_handle) !=
388 DDI_SUCCESS) {
389 cmn_err(CE_WARN,
390 "mr_sas%d: pci config setup failed ",
391 instance_no);
392
393 ddi_soft_state_free(mrsas_state, instance_no);
394 return (DDI_FAILURE);
395 }
396 if (instance->pci_handle == NULL) {
397 cmn_err(CE_WARN,
398 "mr_sas%d: pci config setup failed ",
399 instance_no);
400 ddi_soft_state_free(mrsas_state, instance_no);
401 return (DDI_FAILURE);
402 }
403
404
405
406 if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) {
407 cmn_err(CE_WARN,
408 "mr_sas: failed to get registers.");
409
410 pci_config_teardown(&instance->pci_handle);
411 ddi_soft_state_free(mrsas_state, instance_no);
412 return (DDI_FAILURE);
413 }
414
415 vendor_id = pci_config_get16(instance->pci_handle,
416 PCI_CONF_VENID);
417 device_id = pci_config_get16(instance->pci_handle,
418 PCI_CONF_DEVID);
419
420 subsysvid = pci_config_get16(instance->pci_handle,
421 PCI_CONF_SUBVENID);
422 subsysid = pci_config_get16(instance->pci_handle,
423 PCI_CONF_SUBSYSID);
424
425 pci_config_put16(instance->pci_handle, PCI_CONF_COMM,
426 (pci_config_get16(instance->pci_handle,
427 PCI_CONF_COMM) | PCI_COMM_ME));
428 irq = pci_config_get8(instance->pci_handle,
429 PCI_CONF_ILINE);
430
431 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: "
432 "0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s",
433 instance_no, vendor_id, device_id, subsysvid,
434 subsysid, irq, MRSAS_VERSION));
435
436 /* enable bus-mastering */
437 command = pci_config_get16(instance->pci_handle,
438 PCI_CONF_COMM);
439
440 if (!(command & PCI_COMM_ME)) {
441 command |= PCI_COMM_ME;
442
443 pci_config_put16(instance->pci_handle,
444 PCI_CONF_COMM, command);
445
446 con_log(CL_ANN, (CE_CONT, "mr_sas%d: "
447 "enable bus-mastering", instance_no));
448 } else {
449 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: "
450 "bus-mastering already set", instance_no));
451 }
452
453 /* initialize function pointers */
454 switch(device_id) {
455 case PCI_DEVICE_ID_LSI_TBOLT:
456 case PCI_DEVICE_ID_LSI_INVADER:
457 con_log(CL_ANN, (CE_NOTE,
458 "mr_sas: 2208 T.B. device detected"));
459
460 instance->func_ptr = &mrsas_function_template_fusion;
461 instance->tbolt = 1;
462 break;
463
464 case PCI_DEVICE_ID_LSI_2108VDE:
465 case PCI_DEVICE_ID_LSI_2108V:
466 con_log(CL_ANN, (CE_NOTE,
467 "mr_sas: 2108 Liberator device detected"));
468
469 instance->func_ptr = &mrsas_function_template_ppc;
470 break;
471
472 default:
473 cmn_err(CE_WARN,
474 "mr_sas: Invalid device detected");
475
476 pci_config_teardown(&instance->pci_handle);
477 ddi_soft_state_free(mrsas_state, instance_no);
478 return (DDI_FAILURE);
479
480 }
481
482 instance->baseaddress = pci_config_get32(
483 instance->pci_handle, PCI_CONF_BASE0);
484 instance->baseaddress &= 0x0fffc;
485
486 instance->dip = dip;
487 instance->vendor_id = vendor_id;
488 instance->device_id = device_id;
489 instance->subsysvid = subsysvid;
490 instance->subsysid = subsysid;
491 instance->instance = instance_no;
492
493
494 /* Setup register map */
495 if ((ddi_dev_regsize(instance->dip,
496 REGISTER_SET_IO_2108, ®length) != DDI_SUCCESS) ||
497 reglength < MINIMUM_MFI_MEM_SZ) {
498 goto fail_attach;
499 }
500 if (reglength > DEFAULT_MFI_MEM_SZ) {
501 reglength = DEFAULT_MFI_MEM_SZ;
502 con_log(CL_DLEVEL1, (CE_NOTE,
503 "mr_sas: register length to map is "
504 "0x%lx bytes", reglength));
505 }
506 if (ddi_regs_map_setup(instance->dip,
507 REGISTER_SET_IO_2108, &instance->regmap, 0,
508 reglength, &endian_attr, &instance->regmap_handle)
509 != DDI_SUCCESS) {
510 cmn_err(CE_WARN,
511 "mr_sas: couldn't map control registers");
512 goto fail_attach;
513 }
514 if (instance->regmap_handle == NULL) {
515 cmn_err(CE_WARN,
516 "mr_sas: couldn't map control registers");
517 goto fail_attach;
518 }
519
520 instance->unroll.regs = 1;
521
522 /*
523 * Disable Interrupt Now.
524 * Setup Software interrupt
525 */
526 instance->func_ptr->disable_intr(instance);
527
528 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
529 "mrsas-enable-msi", &data) == DDI_SUCCESS) {
530 if (strncmp(data, "no", 3) == 0) {
531 msi_enable = 0;
532 con_log(CL_ANN1, (CE_WARN,
533 "msi_enable = %d disabled",
534 msi_enable));
535 }
536 ddi_prop_free(data);
537 }
538
539 con_log(CL_DLEVEL1, (CE_NOTE, "msi_enable = %d", msi_enable));
540
541 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
542 "mrsas-enable-fp", &data) == DDI_SUCCESS) {
543 if (strncmp(data, "no", 3) == 0) {
544 enable_fp = 0;
545 cmn_err(CE_NOTE,
546 "enable_fp = %d, Fast-Path disabled.\n",
547 enable_fp);
548 }
549
550 ddi_prop_free(data);
551 }
552
553 cmn_err(CE_NOTE, "enable_fp = %d\n", enable_fp);
554
555 /* Check for all supported interrupt types */
556 if (ddi_intr_get_supported_types(
557 dip, &intr_types) != DDI_SUCCESS) {
558 cmn_err(CE_WARN,
559 "ddi_intr_get_supported_types() failed");
560 goto fail_attach;
561 }
562
563 con_log(CL_DLEVEL1, (CE_NOTE,
564 "ddi_intr_get_supported_types() ret: 0x%x",
565 intr_types));
566
567 /* Initialize and Setup Interrupt handler */
568 if (msi_enable && (intr_types & DDI_INTR_TYPE_MSIX)) {
569 if (mrsas_add_intrs(instance,
570 DDI_INTR_TYPE_MSIX) != DDI_SUCCESS) {
571 cmn_err(CE_WARN,
572 "MSIX interrupt query failed");
573 goto fail_attach;
574 }
575 instance->intr_type = DDI_INTR_TYPE_MSIX;
576 } else if (msi_enable && (intr_types &
577 DDI_INTR_TYPE_MSI)) {
578 if (mrsas_add_intrs(instance,
579 DDI_INTR_TYPE_MSI) != DDI_SUCCESS) {
580 cmn_err(CE_WARN,
581 "MSI interrupt query failed");
582 goto fail_attach;
583 }
584 instance->intr_type = DDI_INTR_TYPE_MSI;
585 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
586 msi_enable = 0;
587 if (mrsas_add_intrs(instance,
588 DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) {
589 cmn_err(CE_WARN,
590 "FIXED interrupt query failed");
591 goto fail_attach;
592 }
593 instance->intr_type = DDI_INTR_TYPE_FIXED;
594 } else {
595 cmn_err(CE_WARN, "Device cannot "
596 "suppport either FIXED or MSI/X "
597 "interrupts");
598 goto fail_attach;
599 }
600
601 instance->unroll.intr = 1;
602
603
604 /* setup the mfi based low level driver */
605 if (mrsas_init_adapter(instance) != DDI_SUCCESS) {
606 cmn_err(CE_WARN, "mr_sas: "
607 "could not initialize the low level driver");
608
609 goto fail_attach;
610 }
611
612 /* Initialize all Mutex */
613 INIT_LIST_HEAD(&instance->completed_pool_list);
614 mutex_init(&instance->completed_pool_mtx,
615 "completed_pool_mtx", MUTEX_DRIVER,
616 DDI_INTR_PRI(instance->intr_pri));
617
618 mutex_init(&instance->sync_map_mtx,
619 "sync_map_mtx", MUTEX_DRIVER,
620 DDI_INTR_PRI(instance->intr_pri));
621
622 mutex_init(&instance->app_cmd_pool_mtx,
623 "app_cmd_pool_mtx", MUTEX_DRIVER,
624 DDI_INTR_PRI(instance->intr_pri));
625
626 mutex_init(&instance->config_dev_mtx, "config_dev_mtx",
627 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
628
629 mutex_init(&instance->cmd_pend_mtx, "cmd_pend_mtx",
630 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
631
632 mutex_init(&instance->ocr_flags_mtx, "ocr_flags_mtx",
633 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
634
635 mutex_init(&instance->int_cmd_mtx, "int_cmd_mtx",
636 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
637 cv_init(&instance->int_cmd_cv, NULL, CV_DRIVER, NULL);
638
639 mutex_init(&instance->cmd_pool_mtx, "cmd_pool_mtx",
640 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
641
642 mutex_init(&instance->reg_write_mtx,"reg_write_mtx",
643 MUTEX_DRIVER,DDI_INTR_PRI(instance->intr_pri));
644
645 if (instance->tbolt) {
646 mutex_init(&instance->cmd_app_pool_mtx,
647 "cmd_app_pool_mtx", MUTEX_DRIVER,
648 DDI_INTR_PRI(instance->intr_pri));
649
650 mutex_init(&instance->chip_mtx,
651 "chip_mtx", MUTEX_DRIVER,
652 DDI_INTR_PRI(instance->intr_pri));
653
654 }
655
656 instance->unroll.mutexs = 1;
657
658 instance->timeout_id = (timeout_id_t)-1;
659
660 /* Register our soft-isr for highlevel interrupts. */
661 instance->isr_level = instance->intr_pri;
662 if (!(instance->tbolt)) {
663 if (instance->isr_level == HIGH_LEVEL_INTR) {
664 if (ddi_add_softintr(dip,
665 DDI_SOFTINT_HIGH,
666 &instance->soft_intr_id,
667 NULL, NULL, mrsas_softintr,
668 (caddr_t)instance) !=
669 DDI_SUCCESS) {
670 cmn_err(CE_WARN,
671 "Software ISR "
672 "did not register");
673
674 goto fail_attach;
675 }
676
677 instance->unroll.soft_isr = 1;
678
679 }
680 }
681
682 instance->softint_running = 0;
683
684 /* Allocate a transport structure */
685 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
686
687 if (tran == NULL) {
688 cmn_err(CE_WARN,
689 "scsi_hba_tran_alloc failed");
690 goto fail_attach;
691 }
692
693 instance->tran = tran;
694 instance->unroll.tran = 1;
695
696 tran->tran_hba_private = instance;
697 tran->tran_tgt_init = mrsas_tran_tgt_init;
698 tran->tran_tgt_probe = scsi_hba_probe;
699 tran->tran_tgt_free = mrsas_tran_tgt_free;
700 if (instance->tbolt) {
701 tran->tran_init_pkt =
702 mrsas_tbolt_tran_init_pkt;
703 tran->tran_start =
704 mrsas_tbolt_tran_start;
705 } else {
706 tran->tran_init_pkt = mrsas_tran_init_pkt;
707 tran->tran_start = mrsas_tran_start;
708 }
709 tran->tran_abort = mrsas_tran_abort;
710 tran->tran_reset = mrsas_tran_reset;
711 tran->tran_getcap = mrsas_tran_getcap;
712 tran->tran_setcap = mrsas_tran_setcap;
713 tran->tran_destroy_pkt = mrsas_tran_destroy_pkt;
714 tran->tran_dmafree = mrsas_tran_dmafree;
715 tran->tran_sync_pkt = mrsas_tran_sync_pkt;
716 tran->tran_quiesce = mrsas_tran_quiesce;
717 tran->tran_unquiesce = mrsas_tran_unquiesce;
718 tran->tran_bus_config = mrsas_tran_bus_config;
719
720 if (mrsas_relaxed_ordering)
721 mrsas_generic_dma_attr.dma_attr_flags |=
722 DDI_DMA_RELAXED_ORDERING;
723
724
725 tran_dma_attr = mrsas_generic_dma_attr;
726 tran_dma_attr.dma_attr_sgllen = instance->max_num_sge;
727
728 /* Attach this instance of the hba */
729 if (scsi_hba_attach_setup(dip, &tran_dma_attr, tran, 0)
730 != DDI_SUCCESS) {
731 cmn_err(CE_WARN,
732 "scsi_hba_attach failed");
733
734 goto fail_attach;
735 }
736 instance->unroll.tranSetup = 1;
737 con_log(CL_ANN1, (CE_CONT,
738 "scsi_hba_attach_setup() done."));
739
740
741 /* create devctl node for cfgadm command */
742 if (ddi_create_minor_node(dip, "devctl",
743 S_IFCHR, INST2DEVCTL(instance_no),
744 DDI_NT_SCSI_NEXUS, 0) == DDI_FAILURE) {
745 cmn_err(CE_WARN,
746 "mr_sas: failed to create devctl node.");
747
748 goto fail_attach;
749 }
750
751 instance->unroll.devctl = 1;
752
753 /* create scsi node for cfgadm command */
754 if (ddi_create_minor_node(dip, "scsi", S_IFCHR,
755 INST2SCSI(instance_no),
756 DDI_NT_SCSI_ATTACHMENT_POINT, 0) ==
757 DDI_FAILURE) {
758 cmn_err(CE_WARN,
759 "mr_sas: failed to create scsi node.");
760
761 goto fail_attach;
762 }
763
764 instance->unroll.scsictl = 1;
765
766 (void) sprintf(instance->iocnode, "%d:lsirdctl",
767 instance_no);
768
769 /*
770 * Create a node for applications
771 * for issuing ioctl to the driver.
772 */
773 if (ddi_create_minor_node(dip, instance->iocnode,
774 S_IFCHR, INST2LSIRDCTL(instance_no),
775 DDI_PSEUDO, 0) == DDI_FAILURE) {
776 cmn_err(CE_WARN,
777 "mr_sas: failed to create ioctl node.");
778
779 goto fail_attach;
780 }
781
782 instance->unroll.ioctl = 1;
783
784 /* Create a taskq to handle dr events */
785 if ((instance->taskq = ddi_taskq_create(dip,
786 "mrsas_dr_taskq", 1,
787 TASKQ_DEFAULTPRI, 0)) == NULL) {
788 cmn_err(CE_WARN,
789 "mr_sas: failed to create taskq ");
790 instance->taskq = NULL;
791 goto fail_attach;
792 }
793 instance->unroll.taskq = 1;
794 con_log(CL_ANN1, (CE_CONT,
795 "ddi_taskq_create() done."));
796
797 /* enable interrupt */
798 instance->func_ptr->enable_intr(instance);
799
800 /* initiate AEN */
801 if (start_mfi_aen(instance)) {
802 cmn_err(CE_WARN,
803 "mr_sas: failed to initiate AEN.");
804 goto fail_attach;
805 }
806 instance->unroll.aenPend = 1;
807 con_log(CL_ANN1, (CE_CONT,
808 "AEN started for instance %d.", instance_no));
809
810 /* Finally! We are on the air. */
811 ddi_report_dev(dip);
812
813 instance->mr_ld_list =
814 kmem_zalloc(MRDRV_MAX_LD * sizeof (struct mrsas_ld),
815 KM_SLEEP);
816 if (instance->mr_ld_list == NULL) {
817 cmn_err(CE_WARN,
818 "mr_sas attach(): failed to allocate ld_list array");
819 goto fail_attach;
820 }
821 instance->unroll.ldlist_buff = 1;
822
823 #ifdef PDSUPPORT
824 if(instance->tbolt) {
825 instance->mr_tbolt_pd_max = MRSAS_TBOLT_PD_TGT_MAX;
826 instance->mr_tbolt_pd_list =
827 kmem_zalloc(MRSAS_TBOLT_GET_PD_MAX(instance)
828 * sizeof (struct mrsas_tbolt_pd), KM_SLEEP);
829 ASSERT(instance->mr_tbolt_pd_list);
830 for (i = 0; i < instance->mr_tbolt_pd_max; i++) {
831 instance->mr_tbolt_pd_list[i].lun_type =
832 MRSAS_TBOLT_PD_LUN;
833 instance->mr_tbolt_pd_list[i].dev_id =
834 (uint8_t)i;
835 }
836
837 instance->unroll.pdlist_buff = 1;
838 }
839 #endif
840 break;
841 case DDI_PM_RESUME:
842 con_log(CL_ANN, (CE_NOTE,
843 "mr_sas: DDI_PM_RESUME"));
844 break;
845 case DDI_RESUME:
846 con_log(CL_ANN, (CE_NOTE,
847 "mr_sas: DDI_RESUME"));
848 break;
849 default:
850 con_log(CL_ANN, (CE_WARN,
851 "mr_sas: invalid attach cmd=%x", cmd));
852 return (DDI_FAILURE);
853 }
854
855
856 cmn_err(CE_NOTE, "mrsas_attach() return SUCCESS instance_num %d", instance_no);
857 return (DDI_SUCCESS);
858
859 fail_attach:
860
861 mrsas_undo_resources(dip, instance);
862
863 pci_config_teardown(&instance->pci_handle);
864 ddi_soft_state_free(mrsas_state, instance_no);
865
866 con_log(CL_ANN, (CE_WARN,
867 "mr_sas: return failure from mrsas_attach"));
868
869 cmn_err(CE_WARN, "mrsas_attach() return FAILURE instance_num %d", instance_no);
870
871 return (DDI_FAILURE);
872 }
873
874 /*
875 * getinfo - gets device information
876 * @dip:
877 * @cmd:
878 * @arg:
879 * @resultp:
880 *
881 * The system calls getinfo() to obtain configuration information that only
882 * the driver knows. The mapping of minor numbers to device instance is
883 * entirely under the control of the driver. The system sometimes needs to ask
884 * the driver which device a particular dev_t represents.
885 * Given the device number return the devinfo pointer from the scsi_device
886 * structure.
887 */
888 /*ARGSUSED*/
889 static int
890 mrsas_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
891 {
892 int rval;
893 int mrsas_minor = getminor((dev_t)arg);
894
895 struct mrsas_instance *instance;
896
897 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
898
899 switch (cmd) {
900 case DDI_INFO_DEVT2DEVINFO:
901 instance = (struct mrsas_instance *)
902 ddi_get_soft_state(mrsas_state,
903 MINOR2INST(mrsas_minor));
904
905 if (instance == NULL) {
906 *resultp = NULL;
907 rval = DDI_FAILURE;
908 } else {
909 *resultp = instance->dip;
910 rval = DDI_SUCCESS;
911 }
912 break;
913 case DDI_INFO_DEVT2INSTANCE:
914 *resultp = (void *)(intptr_t)
915 (MINOR2INST(getminor((dev_t)arg)));
916 rval = DDI_SUCCESS;
917 break;
918 default:
919 *resultp = NULL;
920 rval = DDI_FAILURE;
921 }
922
923 return (rval);
924 }
925
926 /*
927 * detach - detaches a device from the system
928 * @dip: pointer to the device's dev_info structure
929 * @cmd: type of detach
930 *
931 * A driver's detach() entry point is called to detach an instance of a device
932 * that is bound to the driver. The entry point is called with the instance of
933 * the device node to be detached and with DDI_DETACH, which is specified as
934 * the cmd argument to the entry point.
935 * This routine is called during driver unload. We free all the allocated
936 * resources and call the corresponding LLD so that it can also release all
937 * its resources.
938 */
939 static int
940 mrsas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
941 {
942 int instance_no;
943
944 struct mrsas_instance *instance;
945
946 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
947
948
949 /* CONSTCOND */
950 ASSERT(NO_COMPETING_THREADS);
951
952 instance_no = ddi_get_instance(dip);
953
954 instance = (struct mrsas_instance *)ddi_get_soft_state(mrsas_state,
955 instance_no);
956
957 if (!instance) {
958 cmn_err(CE_WARN,
959 "mr_sas:%d could not get instance in detach",
960 instance_no);
961
962 return (DDI_FAILURE);
963 }
964
965 con_log(CL_ANN, (CE_NOTE,
966 "mr_sas%d: detaching device 0x%4x:0x%4x:0x%4x:0x%4x",
967 instance_no, instance->vendor_id, instance->device_id,
968 instance->subsysvid, instance->subsysid));
969
970 switch (cmd) {
971 case DDI_DETACH:
972 con_log(CL_ANN, (CE_NOTE,
973 "mrsas_detach: DDI_DETACH"));
974
975 mutex_enter(&instance->config_dev_mtx);
976 if (instance->timeout_id != (timeout_id_t)-1) {
977 mutex_exit(&instance->config_dev_mtx);
978 (void) untimeout(instance->timeout_id);
979 instance->timeout_id = (timeout_id_t)-1;
980 mutex_enter(&instance->config_dev_mtx);
981 instance->unroll.timer = 0;
982 }
983 mutex_exit(&instance->config_dev_mtx);
984
985 if(instance->unroll.tranSetup == 1) {
986 if (scsi_hba_detach(dip) != DDI_SUCCESS) {
987 cmn_err(CE_WARN,
988 "mr_sas2%d: failed to detach", instance_no);
989 return (DDI_FAILURE);
990 }
991 instance->unroll.tranSetup = 0;
992 con_log(CL_ANN1, (CE_CONT, "scsi_hba_dettach() done."));
993 }
994
995 flush_cache(instance);
996
997 mrsas_undo_resources(dip, instance);
998
999 pci_config_teardown(&instance->pci_handle);
1000 ddi_soft_state_free(mrsas_state, instance_no);
1001 break;
1002
1003 case DDI_PM_SUSPEND:
1004 con_log(CL_ANN, (CE_NOTE,
1005 "mrsas_detach: DDI_PM_SUSPEND"));
1006
1007 break;
1008 case DDI_SUSPEND:
1009 con_log(CL_ANN, (CE_NOTE,
1010 "mrsas_detach: DDI_SUSPEND"));
1011
1012 break;
1013 default:
1014 con_log(CL_ANN, (CE_WARN,
1015 "invalid detach command:0x%x", cmd));
1016 return (DDI_FAILURE);
1017 }
1018
1019 return (DDI_SUCCESS);
1020 }
1021
1022
1023 static int
1024 mrsas_undo_resources (dev_info_t *dip, struct mrsas_instance *instance)
1025 {
1026 int instance_no;
1027
1028 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1029
1030
1031 instance_no = ddi_get_instance(dip);
1032
1033
1034 if(instance->unroll.ioctl == 1) {
1035 ddi_remove_minor_node(dip, instance->iocnode);
1036 instance->unroll.ioctl = 0;
1037 }
1038
1039 if(instance->unroll.scsictl == 1) {
1040 ddi_remove_minor_node(dip, "scsi");
1041 instance->unroll.scsictl = 0;
1042 }
1043
1044 if(instance->unroll.devctl == 1) {
1045 ddi_remove_minor_node(dip, "devctl");
1046 instance->unroll.devctl = 0;
1047 }
1048
1049 if(instance->unroll.tranSetup == 1) {
1050 if (scsi_hba_detach(dip) != DDI_SUCCESS) {
1051 cmn_err(CE_WARN,
1052 "mr_sas2%d: failed to detach", instance_no);
1053 return (DDI_FAILURE);
1054 }
1055 instance->unroll.tranSetup = 0;
1056 con_log(CL_ANN1, (CE_CONT, "scsi_hba_dettach() done."));
1057 }
1058
1059 if(instance->unroll.tran == 1) {
1060 scsi_hba_tran_free(instance->tran);
1061 instance->unroll.tran = 0;
1062 con_log(CL_ANN1, (CE_CONT, "scsi_hba_tran_free() done."));
1063 }
1064
1065 if(instance->unroll.syncCmd == 1) {
1066 if(instance->tbolt) {
1067 if (abort_syncmap_cmd(instance, instance->map_update_cmd))
1068 cmn_err(CE_WARN, "mrsas_detach: "
1069 "failed to abort previous syncmap command");
1070
1071 instance->unroll.syncCmd = 0;
1072 con_log(CL_ANN1, (CE_CONT, "sync cmd aborted, done."));
1073 }
1074 }
1075
1076 if(instance->unroll.aenPend == 1) {
1077 if (abort_aen_cmd(instance, instance->aen_cmd))
1078 cmn_err(CE_WARN, "mrsas_detach: "
1079 "failed to abort prevous AEN command");
1080
1081 instance->unroll.aenPend = 0;
1082 con_log(CL_ANN1, (CE_CONT, "aen cmd aborted, done."));
1083 /*This means the controller is fully initialzed and running */
1084 // shutdown_controller();Shutdown should be a last command to controller.
1085 }
1086
1087
1088 if(instance->unroll.timer == 1) {
1089 if (instance->timeout_id != (timeout_id_t)-1) {
1090 (void) untimeout(instance->timeout_id);
1091 instance->timeout_id = (timeout_id_t)-1;
1092
1093 instance->unroll.timer = 0;
1094 }
1095 }
1096
1097 instance->func_ptr->disable_intr(instance);
1098
1099
1100 if(instance->unroll.mutexs == 1) {
1101 mutex_destroy(&instance->cmd_pool_mtx);
1102 mutex_destroy(&instance->app_cmd_pool_mtx);
1103 mutex_destroy(&instance->cmd_pend_mtx);
1104 mutex_destroy(&instance->completed_pool_mtx);
1105 mutex_destroy(&instance->sync_map_mtx);
1106 mutex_destroy(&instance->int_cmd_mtx);
1107 cv_destroy(&instance->int_cmd_cv);
1108 mutex_destroy(&instance->config_dev_mtx);
1109 mutex_destroy(&instance->ocr_flags_mtx);
1110 mutex_destroy(&instance->reg_write_mtx);
1111
1112 if (instance->tbolt) {
1113 mutex_destroy(&instance->cmd_app_pool_mtx);
1114 mutex_destroy(&instance->chip_mtx);
1115 }
1116
1117 instance->unroll.mutexs = 0;
1118 con_log(CL_ANN1, (CE_CONT, "Destroy mutex & cv, done."));
1119 }
1120
1121
1122 if (instance->unroll.soft_isr == 1) {
1123 ddi_remove_softintr(instance->soft_intr_id);
1124 instance->unroll.soft_isr = 0;
1125 }
1126
1127 if(instance->unroll.intr == 1) {
1128 mrsas_rem_intrs(instance);
1129 instance->unroll.intr = 0;
1130 }
1131
1132
1133 if(instance->unroll.taskq == 1) {
1134 if (instance->taskq) {
1135 ddi_taskq_destroy(instance->taskq);
1136 instance->unroll.taskq = 0;
1137 }
1138
1139 }
1140
1141 /*free dma memory allocated for
1142 cmds/frames/queues/driver version etc */
1143 if(instance->unroll.verBuff == 1) {
1144 mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1145 instance->unroll.verBuff = 0;
1146 }
1147
1148 if(instance->unroll.pdlist_buff == 1) {
1149 if (instance->mr_tbolt_pd_list != NULL)
1150 kmem_free(instance->mr_tbolt_pd_list,
1151 MRSAS_TBOLT_GET_PD_MAX(instance) * sizeof (struct mrsas_tbolt_pd));
1152
1153 instance->mr_tbolt_pd_list = NULL;
1154 instance->unroll.pdlist_buff = 0;
1155 }
1156
1157 if(instance->unroll.ldlist_buff == 1) {
1158 if (instance->mr_ld_list != NULL)
1159 kmem_free(instance->mr_ld_list, MRDRV_MAX_LD
1160 * sizeof (struct mrsas_ld));
1161
1162 instance->mr_ld_list = NULL;
1163 instance->unroll.ldlist_buff = 0;
1164 }
1165
1166 if (instance->tbolt) {
1167 if(instance->unroll.alloc_space_mpi2 == 1) {
1168 free_space_for_mpi2(instance);
1169 instance->unroll.alloc_space_mpi2 = 0;
1170 }
1171 } else {
1172 if(instance->unroll.alloc_space_mfi == 1) {
1173 free_space_for_mfi(instance);
1174 instance->unroll.alloc_space_mfi = 0;
1175 }
1176 }
1177
1178 if(instance->unroll.regs == 1) {
1179 ddi_regs_map_free(&instance->regmap_handle);
1180 instance->unroll.regs = 0;
1181 con_log(CL_ANN1, (CE_CONT, "ddi_regs_map_free() done."));
1182 }
1183
1184 return (DDI_SUCCESS);
1185 }
1186
1187
1188
1189 /*
1190 * ************************************************************************** *
1191 * *
1192 * common entry points - for character driver types *
1193 * *
1194 * ************************************************************************** *
1195 */
1196 /*
1197 * open - gets access to a device
1198 * @dev:
1199 * @openflags:
1200 * @otyp:
1201 * @credp:
1202 *
1203 * Access to a device by one or more application programs is controlled
1204 * through the open() and close() entry points. The primary function of
1205 * open() is to verify that the open request is allowed.
1206 */
1207 static int
1208 mrsas_open(dev_t *dev, int openflags, int otyp, cred_t *credp)
1209 {
1210 int rval = 0;
1211
1212 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1213
1214 /* Check root permissions */
1215 if (drv_priv(credp) != 0) {
1216 con_log(CL_ANN, (CE_WARN,
1217 "mr_sas: Non-root ioctl access denied!"));
1218 return (EPERM);
1219 }
1220
1221 /* Verify we are being opened as a character device */
1222 if (otyp != OTYP_CHR) {
1223 con_log(CL_ANN, (CE_WARN,
1224 "mr_sas: ioctl node must be a char node"));
1225 return (EINVAL);
1226 }
1227
1228 if (ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(*dev)))
1229 == NULL) {
1230 return (ENXIO);
1231 }
1232
1233 if (scsi_hba_open) {
1234 rval = scsi_hba_open(dev, openflags, otyp, credp);
1235 }
1236
1237 return (rval);
1238 }
1239
1240 /*
1241 * close - gives up access to a device
1242 * @dev:
1243 * @openflags:
1244 * @otyp:
1245 * @credp:
1246 *
1247 * close() should perform any cleanup necessary to finish using the minor
1248 * device, and prepare the device (and driver) to be opened again.
1249 */
1250 static int
1251 mrsas_close(dev_t dev, int openflags, int otyp, cred_t *credp)
1252 {
1253 int rval = 0;
1254
1255 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1256
1257 /* no need for locks! */
1258
1259 if (scsi_hba_close) {
1260 rval = scsi_hba_close(dev, openflags, otyp, credp);
1261 }
1262
1263 return (rval);
1264 }
1265
1266 /*
1267 * ioctl - performs a range of I/O commands for character drivers
1268 * @dev:
1269 * @cmd:
1270 * @arg:
1271 * @mode:
1272 * @credp:
1273 * @rvalp:
1274 *
1275 * ioctl() routine must make sure that user data is copied into or out of the
1276 * kernel address space explicitly using copyin(), copyout(), ddi_copyin(),
1277 * and ddi_copyout(), as appropriate.
1278 * This is a wrapper routine to serialize access to the actual ioctl routine.
1279 * ioctl() should return 0 on success, or the appropriate error number. The
1280 * driver may also set the value returned to the calling process through rvalp.
1281 */
1282
1283 static int
1284 mrsas_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
1285 int *rvalp)
1286 {
1287 int rval = 0;
1288
1289 struct mrsas_instance *instance;
1290 struct mrsas_ioctl *ioctl;
1291 struct mrsas_aen aen;
1292 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1293
1294 instance = ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(dev)));
1295
1296 if (instance == NULL) {
1297 /* invalid minor number */
1298 con_log(CL_ANN, (CE_WARN, "mr_sas: adapter not found."));
1299 return (ENXIO);
1300 }
1301
1302 ioctl = (struct mrsas_ioctl *)kmem_zalloc(sizeof (struct mrsas_ioctl),
1303 KM_SLEEP);
1304 if (ioctl == NULL) {
1305 /* Failed to allocate memory for ioctl */
1306 con_log(CL_ANN, (CE_WARN, "mr_sas_ioctl: failed to allocate memory for ioctl"));
1307 return (ENXIO);
1308 }
1309
1310 switch ((uint_t)cmd) {
1311 case MRSAS_IOCTL_FIRMWARE:
1312 if (ddi_copyin((void *)arg, ioctl,
1313 sizeof (struct mrsas_ioctl), mode)) {
1314 con_log(CL_ANN, (CE_WARN, "mrsas_ioctl: "
1315 "ERROR IOCTL copyin"));
1316 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1317 return (EFAULT);
1318 }
1319
1320 if (ioctl->control_code == MRSAS_DRIVER_IOCTL_COMMON) {
1321 rval = handle_drv_ioctl(instance, ioctl, mode);
1322 } else {
1323 rval = handle_mfi_ioctl(instance, ioctl, mode);
1324 }
1325
1326 if (ddi_copyout((void *)ioctl, (void *)arg,
1327 (sizeof (struct mrsas_ioctl) - 1), mode)) {
1328 con_log(CL_ANN, (CE_WARN,
1329 "mrsas_ioctl: copy_to_user failed"));
1330 rval = 1;
1331 }
1332
1333 break;
1334 case MRSAS_IOCTL_AEN:
1335 con_log(CL_ANN, (CE_NOTE,
1336 "mrsas_ioctl: IOCTL Register AEN.\n"));
1337
1338 if (ddi_copyin((void *) arg, &aen,
1339 sizeof (struct mrsas_aen), mode)) {
1340 con_log(CL_ANN, (CE_WARN,
1341 "mrsas_ioctl: ERROR AEN copyin"));
1342 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1343 return (EFAULT);
1344 }
1345
1346 rval = handle_mfi_aen(instance, &aen);
1347
1348 if (ddi_copyout((void *) &aen, (void *)arg,
1349 sizeof (struct mrsas_aen), mode)) {
1350 con_log(CL_ANN, (CE_WARN,
1351 "mrsas_ioctl: copy_to_user failed"));
1352 rval = 1;
1353 }
1354
1355 break;
1356 default:
1357 rval = scsi_hba_ioctl(dev, cmd, arg,
1358 mode, credp, rvalp);
1359
1360 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_ioctl: "
1361 "scsi_hba_ioctl called, ret = %x.", rval));
1362 }
1363
1364 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1365 return (rval);
1366 }
1367
1368 /*
1369 * ************************************************************************** *
1370 * *
1371 * common entry points - for block driver types *
1372 * *
1373 * ************************************************************************** *
1374 */
1375 /*
1376 * reset - TBD
1377 * @dip:
1378 * @cmd:
1379 *
1380 * TBD
1381 */
1382 /*ARGSUSED*/
1383 static int
1384 mrsas_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
1385 {
1386 int instance_no;
1387
1388 struct mrsas_instance *instance;
1389
1390 instance_no = ddi_get_instance(dip);
1391 instance = (struct mrsas_instance *)ddi_get_soft_state
1392 (mrsas_state, instance_no);
1393
1394 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1395
1396 if (!instance) {
1397 con_log(CL_ANN, (CE_WARN, "mr_sas:%d could not get adapter "
1398 "in reset", instance_no));
1399 return (DDI_FAILURE);
1400 }
1401
1402 instance->func_ptr->disable_intr(instance);
1403
1404 con_log(CL_ANN1, (CE_CONT, "flushing cache for instance %d",
1405 instance_no));
1406
1407 flush_cache(instance);
1408
1409 return (DDI_SUCCESS);
1410 }
1411
1412
1413 /*ARGSUSED*/
1414 int
1415 mrsas_quiesce(dev_info_t *dip)
1416 {
1417 int instance_no;
1418
1419 struct mrsas_instance *instance;
1420
1421 instance_no = ddi_get_instance(dip);
1422 instance = (struct mrsas_instance *)ddi_get_soft_state
1423 (mrsas_state, instance_no);
1424
1425 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1426
1427 if (!instance) {
1428 con_log(CL_ANN1, (CE_WARN, "mr_sas:%d could not get adapter "
1429 "in quiesce", instance_no));
1430 return (DDI_FAILURE);
1431 }
1432 if (instance->deadadapter || instance->adapterresetinprogress) {
1433 con_log(CL_ANN1, (CE_WARN, "mr_sas:%d adapter is not in "
1434 "healthy state", instance_no));
1435 return (DDI_FAILURE);
1436 }
1437
1438 if (abort_aen_cmd(instance, instance->aen_cmd)) {
1439 con_log(CL_ANN1, (CE_WARN, "mrsas_quiesce: "
1440 "failed to abort prevous AEN command QUIESCE"));
1441 }
1442
1443 if (instance->tbolt) {
1444 if (abort_syncmap_cmd(instance,
1445 instance->map_update_cmd)) {
1446 cmn_err(CE_WARN,
1447 "mrsas_detach: failed to abort "
1448 "previous syncmap command");
1449 return (DDI_FAILURE);
1450 }
1451 }
1452
1453 instance->func_ptr->disable_intr(instance);
1454
1455 con_log(CL_ANN1, (CE_CONT, "flushing cache for instance %d",
1456 instance_no));
1457
1458 flush_cache(instance);
1459
1460 if (wait_for_outstanding(instance)) {
1461 con_log(CL_ANN1, (CE_CONT, "wait_for_outstanding: return FAIL.\n"));
1462 return (DDI_FAILURE);
1463 }
1464 return (DDI_SUCCESS);
1465 }
1466
1467 /*
1468 * ************************************************************************** *
1469 * *
1470 * entry points (SCSI HBA) *
1471 * *
1472 * ************************************************************************** *
1473 */
1474 /*
1475 * tran_tgt_init - initialize a target device instance
1476 * @hba_dip:
1477 * @tgt_dip:
1478 * @tran:
1479 * @sd:
1480 *
1481 * The tran_tgt_init() entry point enables the HBA to allocate and initialize
1482 * any per-target resources. tran_tgt_init() also enables the HBA to qualify
1483 * the device's address as valid and supportable for that particular HBA.
1484 * By returning DDI_FAILURE, the instance of the target driver for that device
1485 * is not probed or attached.
1486 */
1487 /*ARGSUSED*/
1488 static int
1489 mrsas_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1490 scsi_hba_tran_t *tran, struct scsi_device *sd)
1491 {
1492 struct mrsas_instance *instance;
1493 uint16_t tgt = sd->sd_address.a_target;
1494 uint8_t lun = sd->sd_address.a_lun;
1495 dev_info_t *child = NULL;
1496
1497 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init target %d lun %d",
1498 tgt, lun));
1499
1500 instance = ADDR2MR(&sd->sd_address);
1501
1502 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
1503 /*
1504 * If no persistent node exists, we don't allow .conf node
1505 * to be created.
1506 */
1507 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
1508 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init find child ="
1509 " %p t = %d l = %d", (void *)child, tgt, lun));
1510 if (ndi_merge_node(tgt_dip, mrsas_name_node) !=
1511 DDI_SUCCESS)
1512 /* Create this .conf node */
1513 return (DDI_SUCCESS);
1514 }
1515 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init in ndi_per "
1516 "DDI_FAILURE t = %d l = %d", tgt, lun));
1517 return (DDI_FAILURE);
1518
1519 }
1520
1521 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init dev_dip %p tgt_dip %p",
1522 (void *)instance->mr_ld_list[tgt].dip, (void *)tgt_dip));
1523
1524 if (tgt < MRDRV_MAX_LD && lun == 0) {
1525 if (instance->mr_ld_list[tgt].dip == NULL &&
1526 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0) {
1527 mutex_enter(&instance->config_dev_mtx);
1528 instance->mr_ld_list[tgt].dip = tgt_dip;
1529 instance->mr_ld_list[tgt].lun_type = MRSAS_LD_LUN;
1530 instance->mr_ld_list[tgt].flag = MRDRV_TGT_VALID;
1531 mutex_exit(&instance->config_dev_mtx);
1532 }
1533 }
1534
1535 #ifdef PDSUPPORT
1536 else if(instance->tbolt) {
1537 if (instance->mr_tbolt_pd_list[tgt].dip == NULL) {
1538 mutex_enter(&instance->config_dev_mtx);
1539 instance->mr_tbolt_pd_list[tgt].dip = tgt_dip;
1540 instance->mr_tbolt_pd_list[tgt].flag =
1541 MRDRV_TGT_VALID;
1542 mutex_exit(&instance->config_dev_mtx);
1543 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_tgt_init:"
1544 "t%xl%x", tgt, lun));
1545 }
1546 }
1547 #endif
1548
1549 return (DDI_SUCCESS);
1550 }
1551
1552 /*ARGSUSED*/
1553 static void
1554 mrsas_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1555 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1556 {
1557 struct mrsas_instance *instance;
1558 int tgt = sd->sd_address.a_target;
1559 int lun = sd->sd_address.a_lun;
1560
1561 instance = ADDR2MR(&sd->sd_address);
1562
1563 con_log(CL_DLEVEL2, (CE_NOTE, "tgt_free t = %d l = %d", tgt, lun));
1564
1565 if (tgt < MRDRV_MAX_LD && lun == 0) {
1566 if (instance->mr_ld_list[tgt].dip == tgt_dip) {
1567 mutex_enter(&instance->config_dev_mtx);
1568 instance->mr_ld_list[tgt].dip = NULL;
1569 mutex_exit(&instance->config_dev_mtx);
1570 }
1571 }
1572
1573 #ifdef PDSUPPORT
1574 else if(instance->tbolt) {
1575 mutex_enter(&instance->config_dev_mtx);
1576 instance->mr_tbolt_pd_list[tgt].dip = NULL;
1577 mutex_exit(&instance->config_dev_mtx);
1578 con_log(CL_ANN1, (CE_NOTE, "tgt_free: Setting dip = NULL"
1579 "for tgt:%x", tgt));
1580 }
1581 #endif
1582
1583 }
1584
1585 dev_info_t *
1586 mrsas_find_child(struct mrsas_instance *instance, uint16_t tgt, uint8_t lun)
1587 {
1588 dev_info_t *child = NULL;
1589 char addr[SCSI_MAXNAMELEN];
1590 char tmp[MAXNAMELEN];
1591
1592 (void) sprintf(addr, "%x,%x", tgt, lun);
1593 for (child = ddi_get_child(instance->dip); child;
1594 child = ddi_get_next_sibling(child)) {
1595
1596 if (ndi_dev_is_persistent_node(child) == 0) {
1597 continue;
1598 }
1599
1600 if (mrsas_name_node(child, tmp, MAXNAMELEN) !=
1601 DDI_SUCCESS) {
1602 continue;
1603 }
1604
1605 if (strcmp(addr, tmp) == 0) {
1606 break;
1607 }
1608 }
1609 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_find_child: return child = %p",
1610 (void *)child));
1611 return (child);
1612 }
1613
1614 /*
1615 * mrsas_name_node -
1616 * @dip:
1617 * @name:
1618 * @len:
1619 */
1620 static int
1621 mrsas_name_node(dev_info_t *dip, char *name, int len)
1622 {
1623 int tgt, lun;
1624
1625 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1626 DDI_PROP_DONTPASS, "target", -1);
1627 con_log(CL_DLEVEL2, (CE_NOTE,
1628 "mrsas_name_node: dip %p tgt %d", (void *)dip, tgt));
1629 if (tgt == -1) {
1630 return (DDI_FAILURE);
1631 }
1632 lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1633 "lun", -1);
1634 con_log(CL_DLEVEL2,
1635 (CE_NOTE, "mrsas_name_node: tgt %d lun %d", tgt, lun));
1636 if (lun == -1) {
1637 return (DDI_FAILURE);
1638 }
1639 (void) snprintf(name, len, "%x,%x", tgt, lun);
1640 return (DDI_SUCCESS);
1641 }
1642
1643 /*
1644 * tran_init_pkt - allocate & initialize a scsi_pkt structure
1645 * @ap:
1646 * @pkt:
1647 * @bp:
1648 * @cmdlen:
1649 * @statuslen:
1650 * @tgtlen:
1651 * @flags:
1652 * @callback:
1653 *
1654 * The tran_init_pkt() entry point allocates and initializes a scsi_pkt
1655 * structure and DMA resources for a target driver request. The
1656 * tran_init_pkt() entry point is called when the target driver calls the
1657 * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point
1658 * is a request to perform one or more of three possible services:
1659 * - allocation and initialization of a scsi_pkt structure
1660 * - allocation of DMA resources for data transfer
1661 * - reallocation of DMA resources for the next portion of the data transfer
1662 */
1663 static struct scsi_pkt *
1664 mrsas_tran_init_pkt(struct scsi_address *ap, register struct scsi_pkt *pkt,
1665 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1666 int flags, int (*callback)(), caddr_t arg)
1667 {
1668 struct scsa_cmd *acmd;
1669 struct mrsas_instance *instance;
1670 struct scsi_pkt *new_pkt;
1671
1672 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1673
1674 instance = ADDR2MR(ap);
1675
1676 /* step #1 : pkt allocation */
1677 if (pkt == NULL) {
1678 pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen,
1679 tgtlen, sizeof (struct scsa_cmd), callback, arg);
1680 if (pkt == NULL) {
1681 return (NULL);
1682 }
1683
1684 acmd = PKT2CMD(pkt);
1685
1686 /*
1687 * Initialize the new pkt - we redundantly initialize
1688 * all the fields for illustrative purposes.
1689 */
1690 acmd->cmd_pkt = pkt;
1691 acmd->cmd_flags = 0;
1692 acmd->cmd_scblen = statuslen;
1693 acmd->cmd_cdblen = cmdlen;
1694 acmd->cmd_dmahandle = NULL;
1695 acmd->cmd_ncookies = 0;
1696 acmd->cmd_cookie = 0;
1697 acmd->cmd_cookiecnt = 0;
1698 acmd->cmd_nwin = 0;
1699
1700 pkt->pkt_address = *ap;
1701 pkt->pkt_comp = (void (*)())NULL;
1702 pkt->pkt_flags = 0;
1703 pkt->pkt_time = 0;
1704 pkt->pkt_resid = 0;
1705 pkt->pkt_state = 0;
1706 pkt->pkt_statistics = 0;
1707 pkt->pkt_reason = 0;
1708 new_pkt = pkt;
1709 } else {
1710 acmd = PKT2CMD(pkt);
1711 new_pkt = NULL;
1712 }
1713
1714 /* step #2 : dma allocation/move */
1715 if (bp && bp->b_bcount != 0) {
1716 if (acmd->cmd_dmahandle == NULL) {
1717 if (mrsas_dma_alloc(instance, pkt, bp, flags,
1718 callback) == DDI_FAILURE) {
1719 if (new_pkt) {
1720 scsi_hba_pkt_free(ap, new_pkt);
1721 }
1722 return ((struct scsi_pkt *)NULL);
1723 }
1724 } else {
1725 if (mrsas_dma_move(instance, pkt, bp) == DDI_FAILURE) {
1726 return ((struct scsi_pkt *)NULL);
1727 }
1728 }
1729 }
1730
1731 return (pkt);
1732 }
1733
1734 /*
1735 * tran_start - transport a SCSI command to the addressed target
1736 * @ap:
1737 * @pkt:
1738 *
1739 * The tran_start() entry point for a SCSI HBA driver is called to transport a
1740 * SCSI command to the addressed target. The SCSI command is described
1741 * entirely within the scsi_pkt structure, which the target driver allocated
1742 * through the HBA driver's tran_init_pkt() entry point. If the command
1743 * involves a data transfer, DMA resources must also have been allocated for
1744 * the scsi_pkt structure.
1745 *
1746 * Return Values :
1747 * TRAN_BUSY - request queue is full, no more free scbs
1748 * TRAN_ACCEPT - pkt has been submitted to the instance
1749 */
1750 static int
1751 mrsas_tran_start(struct scsi_address *ap, register struct scsi_pkt *pkt)
1752 {
1753 uchar_t cmd_done = 0;
1754
1755 struct mrsas_instance *instance = ADDR2MR(ap);
1756 struct mrsas_cmd *cmd;
1757
1758 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1759 if (instance->deadadapter == 1) {
1760 con_log(CL_ANN1, (CE_WARN,
1761 "mrsas_tran_start: return TRAN_FATAL_ERROR "
1762 "for IO, as the HBA doesnt take any more IOs"));
1763 if (pkt) {
1764 pkt->pkt_reason = CMD_DEV_GONE;
1765 pkt->pkt_statistics = STAT_DISCON;
1766 }
1767 return (TRAN_FATAL_ERROR);
1768 }
1769
1770 if (instance->adapterresetinprogress) {
1771 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_start: Reset flag set, "
1772 "returning mfi_pkt and setting TRAN_BUSY\n"));
1773 return (TRAN_BUSY);
1774 }
1775
1776 con_log(CL_ANN1, (CE_CONT, "chkpnt:%s:%d:SCSI CDB[0]=0x%x time:%x",
1777 __func__, __LINE__, pkt->pkt_cdbp[0], pkt->pkt_time));
1778
1779 pkt->pkt_reason = CMD_CMPLT;
1780 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */
1781
1782 cmd = build_cmd(instance, ap, pkt, &cmd_done);
1783
1784 /*
1785 * Check if the command is already completed by the mrsas_build_cmd()
1786 * routine. In which case the busy_flag would be clear and scb will be
1787 * NULL and appropriate reason provided in pkt_reason field
1788 */
1789 if (cmd_done) {
1790 pkt->pkt_reason = CMD_CMPLT;
1791 pkt->pkt_scbp[0] = STATUS_GOOD;
1792 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1793 | STATE_SENT_CMD;
1794 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1795 (*pkt->pkt_comp)(pkt);
1796 }
1797
1798 return (TRAN_ACCEPT);
1799 }
1800
1801 if (cmd == NULL) {
1802 return (TRAN_BUSY);
1803 }
1804
1805 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1806 if (instance->fw_outstanding > instance->max_fw_cmds) {
1807 cmn_err(CE_WARN, "mr_sas:Firmware BUSY, fw_outstanding(0x%X) > max_fw_cmds(0x%X)",
1808 instance->fw_outstanding, instance->max_fw_cmds );
1809 return_mfi_pkt(instance, cmd);
1810 return (TRAN_BUSY);
1811 }
1812
1813 /* Synchronize the Cmd frame for the controller */
1814 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1815 DDI_DMA_SYNC_FORDEV);
1816 con_log(CL_ANN, (CE_CONT, "issue_cmd_ppc: SCSI CDB[0]=0x%x"
1817 "cmd->index:%x\n", pkt->pkt_cdbp[0], cmd->index));
1818 instance->func_ptr->issue_cmd(cmd, instance);
1819
1820 } else {
1821 struct mrsas_header *hdr = &cmd->frame->hdr;
1822
1823
1824 instance->func_ptr-> issue_cmd_in_poll_mode(instance, cmd);
1825
1826 pkt->pkt_reason = CMD_CMPLT;
1827 pkt->pkt_statistics = 0;
1828 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS;
1829
1830 switch (ddi_get8(cmd->frame_dma_obj.acc_handle,
1831 &hdr->cmd_status)) {
1832 case MFI_STAT_OK:
1833 pkt->pkt_scbp[0] = STATUS_GOOD;
1834 break;
1835
1836 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1837 con_log(CL_ANN, (CE_CONT,
1838 "mrsas_tran_start: scsi done with error"));
1839 pkt->pkt_reason = CMD_CMPLT;
1840 pkt->pkt_statistics = 0;
1841
1842 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
1843 break;
1844
1845 case MFI_STAT_DEVICE_NOT_FOUND:
1846 con_log(CL_ANN, (CE_CONT,
1847 "mrsas_tran_start: device not found error"));
1848 pkt->pkt_reason = CMD_DEV_GONE;
1849 pkt->pkt_statistics = STAT_DISCON;
1850 break;
1851
1852 default:
1853 ((struct scsi_status *)pkt->pkt_scbp)->sts_busy = 1;
1854 }
1855
1856 return_mfi_pkt(instance, cmd);
1857
1858 if (pkt->pkt_comp) {
1859 (*pkt->pkt_comp)(pkt);
1860 }
1861
1862 }
1863
1864 return (TRAN_ACCEPT);
1865 }
1866
1867 /*
1868 * tran_abort - Abort any commands that are currently in transport
1869 * @ap:
1870 * @pkt:
1871 *
1872 * The tran_abort() entry point for a SCSI HBA driver is called to abort any
1873 * commands that are currently in transport for a particular target. This entry
1874 * point is called when a target driver calls scsi_abort(). The tran_abort()
1875 * entry point should attempt to abort the command denoted by the pkt
1876 * parameter. If the pkt parameter is NULL, tran_abort() should attempt to
1877 * abort all outstanding commands in the transport layer for the particular
1878 * target or logical unit.
1879 */
1880 /*ARGSUSED*/
1881 static int
1882 mrsas_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
1883 {
1884 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1885
1886 /* abort command not supported by H/W */
1887
1888 return (DDI_FAILURE);
1889 }
1890
1891 /*
1892 * tran_reset - reset either the SCSI bus or target
1893 * @ap:
1894 * @level:
1895 *
1896 * The tran_reset() entry point for a SCSI HBA driver is called to reset either
1897 * the SCSI bus or a particular SCSI target device. This entry point is called
1898 * when a target driver calls scsi_reset(). The tran_reset() entry point must
1899 * reset the SCSI bus if level is RESET_ALL. If level is RESET_TARGET, just the
1900 * particular target or logical unit must be reset.
1901 */
1902 /*ARGSUSED*/
1903 static int
1904 mrsas_tran_reset(struct scsi_address *ap, int level)
1905 {
1906 struct mrsas_instance *instance = ADDR2MR(ap);
1907
1908 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1909
1910 if (wait_for_outstanding(instance)) {
1911 con_log(CL_ANN1, (CE_CONT, "wait_for_outstanding: return FAIL.\n"));
1912 return (DDI_FAILURE);
1913 } else {
1914 return (DDI_SUCCESS);
1915 }
1916 }
1917
1918 /*
1919 * tran_bus_reset - reset the SCSI bus
1920 * @dip:
1921 * @level:
1922 *
1923 * The tran_bus_reset() vector in the scsi_hba_tran structure should be
1924 * initialized during the HBA driver's attach(). The vector should point to
1925 * an HBA entry point that is to be called when a user initiates a bus reset.
1926 * Implementation is hardware specific. If the HBA driver cannot reset the
1927 * SCSI bus without affecting the targets, the driver should fail RESET_BUS
1928 * or not initialize this vector.
1929 */
1930 /*ARGSUSED*/
1931 static int
1932 mrsas_tran_bus_reset(dev_info_t *dip, int level)
1933 {
1934 int instance_no = ddi_get_instance(dip);
1935
1936 struct mrsas_instance *instance = ddi_get_soft_state(mrsas_state,
1937 instance_no);
1938
1939 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1940
1941 if (wait_for_outstanding(instance)) {
1942 con_log(CL_ANN1, (CE_CONT, "wait_for_outstanding: return FAIL.\n"));
1943 return (DDI_FAILURE);
1944 } else {
1945 return (DDI_SUCCESS);
1946 }
1947 }
1948
1949 /*
1950 * tran_getcap - get one of a set of SCSA-defined capabilities
1951 * @ap:
1952 * @cap:
1953 * @whom:
1954 *
1955 * The target driver can request the current setting of the capability for a
1956 * particular target by setting the whom parameter to nonzero. A whom value of
1957 * zero indicates a request for the current setting of the general capability
1958 * for the SCSI bus or for adapter hardware. The tran_getcap() should return -1
1959 * for undefined capabilities or the current value of the requested capability.
1960 */
1961 /*ARGSUSED*/
1962 static int
1963 mrsas_tran_getcap(struct scsi_address *ap, char *cap, int whom)
1964 {
1965 int rval = 0;
1966
1967 struct mrsas_instance *instance = ADDR2MR(ap);
1968
1969 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1970
1971 /* we do allow inquiring about capabilities for other targets */
1972 if (cap == NULL) {
1973 return (-1);
1974 }
1975
1976 switch (scsi_hba_lookup_capstr(cap)) {
1977 case SCSI_CAP_DMA_MAX:
1978 if (instance->tbolt) {
1979 /* Limit to 256k max transfer */
1980 rval = mrsas_tbolt_max_cap_maxxfer;
1981 } else {
1982 /* Limit to 16MB max transfer */
1983 rval = mrsas_max_cap_maxxfer;
1984 }
1985 break;
1986 case SCSI_CAP_MSG_OUT:
1987 rval = 1;
1988 break;
1989 case SCSI_CAP_DISCONNECT:
1990 rval = 0;
1991 break;
1992 case SCSI_CAP_SYNCHRONOUS:
1993 rval = 0;
1994 break;
1995 case SCSI_CAP_WIDE_XFER:
1996 rval = 1;
1997 break;
1998 case SCSI_CAP_TAGGED_QING:
1999 rval = 1;
2000 break;
2001 case SCSI_CAP_UNTAGGED_QING:
2002 rval = 1;
2003 break;
2004 case SCSI_CAP_PARITY:
2005 rval = 1;
2006 break;
2007 case SCSI_CAP_INITIATOR_ID:
2008 rval = instance->init_id;
2009 break;
2010 case SCSI_CAP_ARQ:
2011 rval = 1;
2012 break;
2013 case SCSI_CAP_LINKED_CMDS:
2014 rval = 0;
2015 break;
2016 case SCSI_CAP_RESET_NOTIFICATION:
2017 rval = 1;
2018 break;
2019 case SCSI_CAP_GEOMETRY:
2020 rval = -1;
2021
2022 break;
2023 default:
2024 con_log(CL_DLEVEL2, (CE_NOTE, "Default cap coming 0x%x",
2025 scsi_hba_lookup_capstr(cap)));
2026 rval = -1;
2027 break;
2028 }
2029
2030 return (rval);
2031 }
2032
2033 /*
2034 * tran_setcap - set one of a set of SCSA-defined capabilities
2035 * @ap:
2036 * @cap:
2037 * @value:
2038 * @whom:
2039 *
2040 * The target driver might request that the new value be set for a particular
2041 * target by setting the whom parameter to nonzero. A whom value of zero
2042 * means that request is to set the new value for the SCSI bus or for adapter
2043 * hardware in general.
2044 * The tran_setcap() should return the following values as appropriate:
2045 * - -1 for undefined capabilities
2046 * - 0 if the HBA driver cannot set the capability to the requested value
2047 * - 1 if the HBA driver is able to set the capability to the requested value
2048 */
2049 /*ARGSUSED*/
2050 static int
2051 mrsas_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
2052 {
2053 int rval = 1;
2054
2055 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2056
2057 /* We don't allow setting capabilities for other targets */
2058 if (cap == NULL || whom == 0) {
2059 return (-1);
2060 }
2061
2062 switch (scsi_hba_lookup_capstr(cap)) {
2063 case SCSI_CAP_DMA_MAX:
2064 case SCSI_CAP_MSG_OUT:
2065 case SCSI_CAP_PARITY:
2066 case SCSI_CAP_LINKED_CMDS:
2067 case SCSI_CAP_RESET_NOTIFICATION:
2068 case SCSI_CAP_DISCONNECT:
2069 case SCSI_CAP_SYNCHRONOUS:
2070 case SCSI_CAP_UNTAGGED_QING:
2071 case SCSI_CAP_WIDE_XFER:
2072 case SCSI_CAP_INITIATOR_ID:
2073 case SCSI_CAP_ARQ:
2074 /*
2075 * None of these are settable via
2076 * the capability interface.
2077 */
2078 break;
2079 case SCSI_CAP_TAGGED_QING:
2080 rval = 1;
2081 break;
2082 case SCSI_CAP_SECTOR_SIZE:
2083 rval = 1;
2084 break;
2085
2086 case SCSI_CAP_TOTAL_SECTORS:
2087 rval = 1;
2088 break;
2089 default:
2090 rval = -1;
2091 break;
2092 }
2093
2094 return (rval);
2095 }
2096
2097 /*
2098 * tran_destroy_pkt - deallocate scsi_pkt structure
2099 * @ap:
2100 * @pkt:
2101 *
2102 * The tran_destroy_pkt() entry point is the HBA driver function that
2103 * deallocates scsi_pkt structures. The tran_destroy_pkt() entry point is
2104 * called when the target driver calls scsi_destroy_pkt(). The
2105 * tran_destroy_pkt() entry point must free any DMA resources that have been
2106 * allocated for the packet. An implicit DMA synchronization occurs if the
2107 * DMA resources are freed and any cached data remains after the completion
2108 * of the transfer.
2109 */
2110 static void
2111 mrsas_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2112 {
2113 struct scsa_cmd *acmd = PKT2CMD(pkt);
2114
2115 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2116
2117 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2118 acmd->cmd_flags &= ~CFLAG_DMAVALID;
2119
2120 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
2121
2122 ddi_dma_free_handle(&acmd->cmd_dmahandle);
2123
2124 acmd->cmd_dmahandle = NULL;
2125 }
2126
2127 /* free the pkt */
2128 scsi_hba_pkt_free(ap, pkt);
2129 }
2130
2131 /*
2132 * tran_dmafree - deallocates DMA resources
2133 * @ap:
2134 * @pkt:
2135 *
2136 * The tran_dmafree() entry point deallocates DMAQ resources that have been
2137 * allocated for a scsi_pkt structure. The tran_dmafree() entry point is
2138 * called when the target driver calls scsi_dmafree(). The tran_dmafree() must
2139 * free only DMA resources allocated for a scsi_pkt structure, not the
2140 * scsi_pkt itself. When DMA resources are freed, a DMA synchronization is
2141 * implicitly performed.
2142 */
2143 /*ARGSUSED*/
2144 static void
2145 mrsas_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
2146 {
2147 register struct scsa_cmd *acmd = PKT2CMD(pkt);
2148
2149 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2150
2151 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2152 acmd->cmd_flags &= ~CFLAG_DMAVALID;
2153
2154 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
2155
2156 ddi_dma_free_handle(&acmd->cmd_dmahandle);
2157
2158 acmd->cmd_dmahandle = NULL;
2159 }
2160 }
2161
2162 /*
2163 * tran_sync_pkt - synchronize the DMA object allocated
2164 * @ap:
2165 * @pkt:
2166 *
2167 * The tran_sync_pkt() entry point synchronizes the DMA object allocated for
2168 * the scsi_pkt structure before or after a DMA transfer. The tran_sync_pkt()
2169 * entry point is called when the target driver calls scsi_sync_pkt(). If the
2170 * data transfer direction is a DMA read from device to memory, tran_sync_pkt()
2171 * must synchronize the CPU's view of the data. If the data transfer direction
2172 * is a DMA write from memory to device, tran_sync_pkt() must synchronize the
2173 * device's view of the data.
2174 */
2175 /*ARGSUSED*/
2176 static void
2177 mrsas_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2178 {
2179 register struct scsa_cmd *acmd = PKT2CMD(pkt);
2180
2181 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2182
2183 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2184 (void) ddi_dma_sync(acmd->cmd_dmahandle, acmd->cmd_dma_offset,
2185 acmd->cmd_dma_len, (acmd->cmd_flags & CFLAG_DMASEND) ?
2186 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
2187 }
2188 }
2189
2190 /*ARGSUSED*/
2191 static int
2192 mrsas_tran_quiesce(dev_info_t *dip)
2193 {
2194 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2195
2196 return (1);
2197 }
2198
2199 /*ARGSUSED*/
2200 static int
2201 mrsas_tran_unquiesce(dev_info_t *dip)
2202 {
2203 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2204
2205 return (1);
2206 }
2207
2208
2209 /*
2210 * mrsas_isr(caddr_t)
2211 *
2212 * The Interrupt Service Routine
2213 *
2214 * Collect status for all completed commands and do callback
2215 *
2216 */
2217 static uint_t
2218 mrsas_isr(struct mrsas_instance *instance)
2219 {
2220 int need_softintr;
2221 uint32_t producer;
2222 uint32_t consumer;
2223 uint32_t context;
2224 uint32_t status, value;
2225 int retval;
2226
2227 struct mrsas_cmd *cmd;
2228 struct mrsas_header *hdr;
2229 struct scsi_pkt *pkt;
2230
2231 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2232 ASSERT(instance);
2233 if (instance->tbolt) {
2234 mutex_enter(&instance->chip_mtx);
2235 if ((instance->intr_type == DDI_INTR_TYPE_FIXED) &&
2236 !(instance->func_ptr->intr_ack(instance))) {
2237 mutex_exit(&instance->chip_mtx);
2238 return (DDI_INTR_UNCLAIMED);
2239 }
2240 retval = mr_sas_tbolt_process_outstanding_cmd(instance);
2241 mutex_exit(&instance->chip_mtx);
2242 return (retval);
2243 } else {
2244 if ((instance->intr_type == DDI_INTR_TYPE_FIXED) &&
2245 !instance->func_ptr->intr_ack(instance)) {
2246 return (DDI_INTR_UNCLAIMED);
2247 }
2248 }
2249
2250 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
2251 0, 0, DDI_DMA_SYNC_FORCPU);
2252
2253 #ifdef OCRDEBUG
2254 if (debug_consecutive_timeout_after_ocr_g == 1) {
2255 con_log(CL_ANN1, (CE_NOTE,
2256 "simulating consecutive timeout after ocr"));
2257 return (DDI_INTR_CLAIMED);
2258 }
2259 #endif
2260
2261 mutex_enter(&instance->completed_pool_mtx);
2262 mutex_enter(&instance->cmd_pend_mtx);
2263
2264 producer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2265 instance->producer);
2266 consumer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2267 instance->consumer);
2268
2269 con_log(CL_ANN, (CE_CONT, " producer %x consumer %x ",
2270 producer, consumer));
2271 if (producer == consumer) {
2272 con_log(CL_ANN, (CE_WARN, "producer == consumer case"));
2273 mutex_exit(&instance->cmd_pend_mtx);
2274 mutex_exit(&instance->completed_pool_mtx);
2275 return (DDI_INTR_CLAIMED);
2276 }
2277
2278 while (consumer != producer) {
2279 context = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2280 &instance->reply_queue[consumer]);
2281 cmd = instance->cmd_list[context];
2282
2283 if (cmd->sync_cmd == MRSAS_TRUE) {
2284 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2285 if (hdr) {
2286 mlist_del_init(&cmd->list);
2287 }
2288 } else {
2289 pkt = cmd->pkt;
2290 if (pkt) {
2291 mlist_del_init(&cmd->list);
2292 }
2293 }
2294
2295 mlist_add_tail(&cmd->list, &instance->completed_pool_list);
2296
2297 consumer++;
2298 if (consumer == (instance->max_fw_cmds + 1)) {
2299 consumer = 0;
2300 }
2301 }
2302 ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
2303 instance->consumer, consumer);
2304 mutex_exit(&instance->cmd_pend_mtx);
2305 mutex_exit(&instance->completed_pool_mtx);
2306
2307 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
2308 0, 0, DDI_DMA_SYNC_FORDEV);
2309
2310 if (instance->softint_running) {
2311 need_softintr = 0;
2312 } else {
2313 need_softintr = 1;
2314 }
2315
2316 if (instance->isr_level == HIGH_LEVEL_INTR) {
2317 if (need_softintr) {
2318 ddi_trigger_softintr(instance->soft_intr_id);
2319 }
2320 } else {
2321 /*
2322 * Not a high-level interrupt, therefore call the soft level
2323 * interrupt explicitly
2324 */
2325 (void) mrsas_softintr(instance);
2326 }
2327
2328 return (DDI_INTR_CLAIMED);
2329 }
2330
2331
2332 /*
2333 * ************************************************************************** *
2334 * *
2335 * libraries *
2336 * *
2337 * ************************************************************************** *
2338 */
2339 /*
2340 * get_mfi_pkt : Get a command from the free pool
2341 * After successful allocation, the caller of this routine
2342 * must clear the frame buffer (memset to zero) before
2343 * using the packet further.
2344 *
2345 * ***** Note *****
2346 * After clearing the frame buffer the context id of the
2347 * frame buffer SHOULD be restored back.
2348 */
2349 static struct mrsas_cmd *
2350 get_mfi_pkt(struct mrsas_instance *instance)
2351 {
2352 mlist_t *head = &instance->cmd_pool_list;
2353 struct mrsas_cmd *cmd = NULL;
2354
2355 mutex_enter(&instance->cmd_pool_mtx);
2356 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2357
2358 if (!mlist_empty(head)) {
2359 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2360 mlist_del_init(head->next);
2361 }
2362 if (cmd != NULL) {
2363 cmd->pkt = NULL;
2364 cmd->retry_count_for_ocr = 0;
2365 cmd->drv_pkt_time = 0;
2366
2367 }
2368 mutex_exit(&instance->cmd_pool_mtx);
2369
2370 return (cmd);
2371 }
2372
2373 static struct mrsas_cmd *
2374 get_mfi_app_pkt(struct mrsas_instance *instance)
2375 {
2376 mlist_t *head = &instance->app_cmd_pool_list;
2377 struct mrsas_cmd *cmd = NULL;
2378
2379 mutex_enter(&instance->app_cmd_pool_mtx);
2380 ASSERT(mutex_owned(&instance->app_cmd_pool_mtx));
2381
2382 if (!mlist_empty(head)) {
2383 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2384 mlist_del_init(head->next);
2385 }
2386 if (cmd != NULL){
2387 cmd->pkt = NULL;
2388 cmd->retry_count_for_ocr = 0;
2389 cmd->drv_pkt_time = 0;
2390 }
2391
2392 mutex_exit(&instance->app_cmd_pool_mtx);
2393
2394 return (cmd);
2395 }
2396 /*
2397 * return_mfi_pkt : Return a cmd to free command pool
2398 */
2399 static void
2400 return_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2401 {
2402 mutex_enter(&instance->cmd_pool_mtx);
2403 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2404
2405 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2406
2407 mutex_exit(&instance->cmd_pool_mtx);
2408 }
2409
2410 static void
2411 return_mfi_app_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2412 {
2413 mutex_enter(&instance->app_cmd_pool_mtx);
2414 ASSERT(mutex_owned(&instance->app_cmd_pool_mtx));
2415
2416 mlist_add(&cmd->list, &instance->app_cmd_pool_list);
2417
2418 mutex_exit(&instance->app_cmd_pool_mtx);
2419 }
2420 void
2421 push_pending_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2422 {
2423 struct scsi_pkt *pkt;
2424 struct mrsas_header *hdr;
2425 con_log(CL_DLEVEL2, (CE_NOTE, "push_pending_pkt(): Called\n"));
2426 mutex_enter(&instance->cmd_pend_mtx);
2427 ASSERT(mutex_owned(&instance->cmd_pend_mtx));
2428 mlist_del_init(&cmd->list);
2429 mlist_add_tail(&cmd->list, &instance->cmd_pend_list);
2430 if (cmd->sync_cmd == MRSAS_TRUE) {
2431 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2432 if (hdr) {
2433 con_log(CL_ANN1, (CE_CONT,
2434 "push_pending_mfi_pkt: "
2435 "cmd %p index %x "
2436 "time %llx",
2437 (void *)cmd, cmd->index,
2438 gethrtime()));
2439 /* Wait for specified interval */
2440 cmd->drv_pkt_time = ddi_get16(
2441 cmd->frame_dma_obj.acc_handle, &hdr->timeout);
2442 if (cmd->drv_pkt_time < debug_timeout_g)
2443 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2444 con_log(CL_ANN1, (CE_CONT,
2445 "push_pending_pkt(): "
2446 "Called IO Timeout Value %x\n",
2447 cmd->drv_pkt_time));
2448 }
2449 if (hdr && instance->timeout_id == (timeout_id_t)-1) {
2450 instance->timeout_id = timeout(io_timeout_checker,
2451 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
2452 }
2453 } else {
2454 pkt = cmd->pkt;
2455 if (pkt) {
2456 con_log(CL_ANN1, (CE_CONT,
2457 "push_pending_mfi_pkt: "
2458 "cmd %p index %x pkt %p, "
2459 "time %llx",
2460 (void *)cmd, cmd->index, (void *)pkt,
2461 gethrtime()));
2462 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2463 }
2464 if (pkt && instance->timeout_id == (timeout_id_t)-1) {
2465 instance->timeout_id = timeout(io_timeout_checker,
2466 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
2467 }
2468 }
2469
2470 mutex_exit(&instance->cmd_pend_mtx);
2471
2472 }
2473
2474 int
2475 mrsas_print_pending_cmds(struct mrsas_instance *instance)
2476 {
2477 mlist_t *head = &instance->cmd_pend_list;
2478 mlist_t *tmp = head;
2479 struct mrsas_cmd *cmd = NULL;
2480 struct mrsas_header *hdr;
2481 unsigned int flag = 1;
2482 struct scsi_pkt *pkt;
2483 int saved_level;
2484 int cmd_count = 0;
2485
2486
2487 saved_level = debug_level_g;
2488 debug_level_g = CL_ANN1;
2489
2490 cmn_err(CE_NOTE, "mrsas_print_pending_cmds(): Called\n");
2491
2492 while (flag) {
2493 mutex_enter(&instance->cmd_pend_mtx);
2494 tmp = tmp->next;
2495 if (tmp == head) {
2496 mutex_exit(&instance->cmd_pend_mtx);
2497 flag = 0;
2498 con_log(CL_ANN1, (CE_CONT, "mrsas_print_pending_cmds(): NO MORE CMDS PENDING....\n"));
2499 break;
2500 } else {
2501 cmd = mlist_entry(tmp, struct mrsas_cmd, list);
2502 mutex_exit(&instance->cmd_pend_mtx);
2503 if (cmd) {
2504 if (cmd->sync_cmd == MRSAS_TRUE) {
2505 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2506 if (hdr) {
2507 con_log(CL_ANN1, (CE_CONT,
2508 "print: cmd %p index 0x%x drv_pkt_time 0x%x (NO-PKT) hdr %p\n",
2509 (void *)cmd, cmd->index, cmd->drv_pkt_time, (void *)hdr));
2510 }
2511 } else {
2512 pkt = cmd->pkt;
2513 if (pkt) {
2514 con_log(CL_ANN1, (CE_CONT,
2515 "print: cmd %p index 0x%x drv_pkt_time 0x%x pkt %p \n",
2516 (void *)cmd, cmd->index, cmd->drv_pkt_time, (void *)pkt));
2517 }
2518 }
2519
2520 if (++cmd_count == 1)
2521 mrsas_print_cmd_details(instance, cmd, 0xDD);
2522 else
2523 mrsas_print_cmd_details(instance, cmd, 1);
2524
2525 }
2526 }
2527 }
2528 con_log(CL_ANN1, (CE_CONT, "mrsas_print_pending_cmds(): Done\n"));
2529
2530
2531 debug_level_g = saved_level;
2532
2533 return (DDI_SUCCESS);
2534 }
2535
2536
2537 int
2538 mrsas_complete_pending_cmds(struct mrsas_instance *instance)
2539 {
2540
2541 struct mrsas_cmd *cmd = NULL;
2542 struct scsi_pkt *pkt;
2543 struct mrsas_header *hdr;
2544
2545 struct mlist_head *pos, *next;
2546
2547 con_log(CL_ANN1, (CE_NOTE,
2548 "mrsas_complete_pending_cmds(): Called"));
2549
2550 mutex_enter(&instance->cmd_pend_mtx);
2551 mlist_for_each_safe(pos, next, &instance->cmd_pend_list) {
2552 cmd = mlist_entry(pos, struct mrsas_cmd, list);
2553 if (cmd) {
2554 pkt = cmd->pkt;
2555 if (pkt) { /* for IO */
2556 if (((pkt->pkt_flags & FLAG_NOINTR)
2557 == 0) && pkt->pkt_comp) {
2558 pkt->pkt_reason
2559 = CMD_DEV_GONE;
2560 pkt->pkt_statistics
2561 = STAT_DISCON;
2562 con_log(CL_ANN1, (CE_CONT,
2563 "fail and posting to scsa "
2564 "cmd %p index %x"
2565 " pkt %p "
2566 "time : %llx",
2567 (void *)cmd, cmd->index,
2568 (void *)pkt, gethrtime()));
2569 (*pkt->pkt_comp)(pkt);
2570 }
2571 } else { /* for DCMDS */
2572 if (cmd->sync_cmd == MRSAS_TRUE) {
2573 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2574 con_log(CL_ANN1, (CE_CONT,
2575 "posting invalid status to application "
2576 "cmd %p index %x"
2577 " hdr %p "
2578 "time : %llx",
2579 (void *)cmd, cmd->index,
2580 (void *)hdr, gethrtime()));
2581 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2582 complete_cmd_in_sync_mode(instance, cmd);
2583 }
2584 }
2585 mlist_del_init(&cmd->list);
2586 } else {
2587 con_log(CL_ANN1, (CE_CONT,
2588 "mrsas_complete_pending_cmds:"
2589 "NULL command\n"));
2590 }
2591 con_log(CL_ANN1, (CE_CONT,
2592 "mrsas_complete_pending_cmds:"
2593 "looping for more commands\n"));
2594 }
2595 mutex_exit(&instance->cmd_pend_mtx);
2596
2597 con_log(CL_ANN1, (CE_CONT, "mrsas_complete_pending_cmds(): DONE\n"));
2598 return (DDI_SUCCESS);
2599 }
2600
2601 void
2602 mrsas_print_cmd_details(struct mrsas_instance *instance,
2603 struct mrsas_cmd *cmd, int detail )
2604 {
2605 struct scsi_pkt *pkt = cmd->pkt;
2606 Mpi2RaidSCSIIORequest_t *scsi_io = cmd->scsi_io_request;
2607 MPI2_SCSI_IO_VENDOR_UNIQUE *raidContext;
2608 uint8_t *cdb_p;
2609 char str[100], *strp;
2610 int i, j, len;
2611 int saved_level;
2612
2613
2614 if (detail == 0xDD) {
2615 saved_level = debug_level_g;
2616 debug_level_g = CL_ANN1;
2617 }
2618
2619
2620 if (instance->tbolt) {
2621 con_log(CL_ANN1, (CE_CONT, "print_cmd_details: cmd %p cmd->index 0x%x SMID 0x%x timer 0x%x sec\n",
2622 (void *)cmd, cmd->index, cmd->SMID, cmd->drv_pkt_time));
2623 }
2624 else {
2625 con_log(CL_ANN1, (CE_CONT, "print_cmd_details: cmd %p cmd->index 0x%x timer 0x%x sec\n",
2626 (void *)cmd, cmd->index, cmd->drv_pkt_time));
2627 }
2628
2629 if(pkt) {
2630 con_log(CL_ANN1, (CE_CONT, "scsi_pkt CDB[0]=0x%x",
2631 pkt->pkt_cdbp[0]));
2632 }else {
2633 con_log(CL_ANN1, (CE_CONT, "NO-PKT"));
2634 }
2635
2636 if((detail==0xDD) && instance->tbolt) {
2637 con_log(CL_ANN1, (CE_CONT, "RAID_SCSI_IO_REQUEST\n"));
2638 con_log(CL_ANN1, (CE_CONT, "DevHandle=0x%X Function=0x%X IoFlags=0x%X SGLFlags=0x%X DataLength=0x%X\n",
2639 ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->DevHandle),
2640 ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->Function),
2641 ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->IoFlags),
2642 ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->SGLFlags),
2643 ddi_get32(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->DataLength) ));
2644
2645 for(i=0; i < 32; i++)
2646 con_log(CL_ANN1, (CE_CONT, "CDB[%d]=0x%x ",i,
2647 ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->CDB.CDB32[i]) ));
2648
2649 con_log(CL_ANN1, (CE_CONT, "RAID-CONTEXT\n"));
2650 con_log(CL_ANN1, (CE_CONT, "status=0x%X extStatus=0x%X ldTargetId=0x%X timeoutValue=0x%X"
2651 "regLockFlags=0x%X RAIDFlags=0x%X regLockRowLBA=0x%" PRIx64 " regLockLength=0x%X spanArm=0x%X\n",
2652 ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->RaidContext.status),
2653 ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->RaidContext.extStatus),
2654 ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->RaidContext.ldTargetId),
2655 ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->RaidContext.timeoutValue),
2656 ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->RaidContext.regLockFlags),
2657 ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->RaidContext.RAIDFlags),
2658 ddi_get64(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->RaidContext.regLockRowLBA),
2659 ddi_get32(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->RaidContext.regLockLength),
2660 ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->RaidContext.spanArm) ));
2661
2662
2663 }
2664
2665 if (detail == 0xDD) {
2666 debug_level_g = saved_level;
2667 }
2668
2669 return;
2670 }
2671
2672
2673 int
2674 mrsas_issue_pending_cmds(struct mrsas_instance *instance)
2675 {
2676 mlist_t *head = &instance->cmd_pend_list;
2677 mlist_t *tmp = head->next;
2678 struct mrsas_cmd *cmd = NULL;
2679 struct scsi_pkt *pkt;
2680
2681 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_pending_cmds(): Called"));
2682 while (tmp != head) {
2683 mutex_enter(&instance->cmd_pend_mtx);
2684 cmd = mlist_entry(tmp, struct mrsas_cmd, list);
2685 tmp = tmp->next;
2686 mutex_exit(&instance->cmd_pend_mtx);
2687 if (cmd) {
2688 con_log(CL_ANN1, (CE_CONT,
2689 "mrsas_issue_pending_cmds(): "
2690 "Got a cmd: cmd %p index 0x%x drv_pkt_time 0x%x ",
2691 (void *)cmd, cmd->index, cmd->drv_pkt_time));
2692
2693 if (cmd->drv_pkt_time < debug_timeout_g)
2694 cmd->drv_pkt_time = (uint16_t)debug_timeout_g; /* Reset command timeout value */
2695
2696 cmd->retry_count_for_ocr++;
2697
2698 cmn_err(CE_CONT, "cmd retry count = %d\n",
2699 cmd->retry_count_for_ocr);
2700
2701 if (cmd->retry_count_for_ocr > IO_RETRY_COUNT) {
2702 cmn_err(CE_WARN,
2703 "mrsas_issue_pending_cmds(): cmd->retry_count exceeded limit >%d\n",
2704 IO_RETRY_COUNT);
2705 mrsas_print_cmd_details(instance, cmd, 0xDD);
2706
2707 cmn_err(CE_WARN,
2708 "mrsas_issue_pending_cmds():"
2709 "Calling KILL Adapter\n");
2710 if (instance->tbolt)
2711 (void) mrsas_tbolt_kill_adapter(instance);
2712 else
2713 (void) mrsas_kill_adapter(instance);
2714 return (DDI_FAILURE);
2715 }
2716
2717 pkt = cmd->pkt;
2718 if (pkt) {
2719 con_log(CL_ANN1, (CE_CONT,
2720 "PENDING PKT-CMD ISSUE: cmd %p index %x "
2721 "pkt %p time %llx",
2722 (void *)cmd, cmd->index,
2723 (void *)pkt,
2724 gethrtime()));
2725
2726 } else {
2727 cmn_err(CE_CONT,
2728 "mrsas_issue_pending_cmds(): "
2729 "NO-PKT, cmd %p index 0x%x drv_pkt_time 0x%x ",
2730 (void *)cmd, cmd->index, cmd->drv_pkt_time);
2731 }
2732
2733
2734 if (cmd->sync_cmd == MRSAS_TRUE) {
2735 cmn_err(CE_CONT, "mrsas_issue_pending_cmds(): SYNC_CMD == TRUE \n");
2736
2737 instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd);
2738 } else {
2739 instance->func_ptr->issue_cmd(cmd, instance);
2740 }
2741 } else {
2742 con_log(CL_ANN1, (CE_CONT,
2743 "mrsas_issue_pending_cmds: NULL command\n"));
2744 }
2745 con_log(CL_ANN1, (CE_CONT,
2746 "mrsas_issue_pending_cmds:"
2747 "looping for more commands"));
2748 }
2749 con_log(CL_ANN1, (CE_CONT, "mrsas_issue_pending_cmds(): DONE\n"));
2750 return (DDI_SUCCESS);
2751 }
2752
2753
2754
2755 /*
2756 * destroy_mfi_frame_pool
2757 */
2758 void
2759 destroy_mfi_frame_pool(struct mrsas_instance *instance)
2760 {
2761 int i;
2762 uint32_t max_cmd = instance->max_fw_cmds;
2763
2764 struct mrsas_cmd *cmd;
2765
2766 /* return all frames to pool */
2767
2768 for (i = 0; i < max_cmd; i++) {
2769
2770 cmd = instance->cmd_list[i];
2771
2772 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED)
2773 (void) mrsas_free_dma_obj(instance, cmd->frame_dma_obj);
2774
2775 cmd->frame_dma_obj_status = DMA_OBJ_FREED;
2776 }
2777
2778 }
2779
2780 /*
2781 * create_mfi_frame_pool
2782 */
2783 int
2784 create_mfi_frame_pool(struct mrsas_instance *instance)
2785 {
2786 int i = 0;
2787 int cookie_cnt;
2788 uint16_t max_cmd;
2789 uint16_t sge_sz;
2790 uint32_t sgl_sz;
2791 uint32_t tot_frame_size;
2792 struct mrsas_cmd *cmd;
2793 int retval = DDI_SUCCESS;
2794
2795 max_cmd = instance->max_fw_cmds;
2796 sge_sz = sizeof (struct mrsas_sge_ieee);
2797 /* calculated the number of 64byte frames required for SGL */
2798 sgl_sz = sge_sz * instance->max_num_sge;
2799 tot_frame_size = sgl_sz + MRMFI_FRAME_SIZE + SENSE_LENGTH;
2800
2801 con_log(CL_DLEVEL3, (CE_NOTE, "create_mfi_frame_pool: "
2802 "sgl_sz %x tot_frame_size %x", sgl_sz, tot_frame_size));
2803
2804 while (i < max_cmd) {
2805 cmd = instance->cmd_list[i];
2806
2807 cmd->frame_dma_obj.size = tot_frame_size;
2808 cmd->frame_dma_obj.dma_attr = mrsas_generic_dma_attr;
2809 cmd->frame_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
2810 cmd->frame_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
2811 cmd->frame_dma_obj.dma_attr.dma_attr_sgllen = 1;
2812 cmd->frame_dma_obj.dma_attr.dma_attr_align = 64;
2813
2814 cookie_cnt = mrsas_alloc_dma_obj(instance, &cmd->frame_dma_obj,
2815 (uchar_t)DDI_STRUCTURE_LE_ACC);
2816
2817 if (cookie_cnt == -1 || cookie_cnt > 1) {
2818 cmn_err(CE_WARN,
2819 "create_mfi_frame_pool: could not alloc.");
2820 retval = DDI_FAILURE;
2821 goto mrsas_undo_frame_pool;
2822 }
2823
2824 bzero(cmd->frame_dma_obj.buffer, tot_frame_size);
2825
2826 cmd->frame_dma_obj_status = DMA_OBJ_ALLOCATED;
2827 cmd->frame = (union mrsas_frame *)cmd->frame_dma_obj.buffer;
2828 cmd->frame_phys_addr =
2829 cmd->frame_dma_obj.dma_cookie[0].dmac_address;
2830
2831 cmd->sense = (uint8_t *)(((unsigned long)
2832 cmd->frame_dma_obj.buffer) +
2833 tot_frame_size - SENSE_LENGTH);
2834 cmd->sense_phys_addr =
2835 cmd->frame_dma_obj.dma_cookie[0].dmac_address +
2836 tot_frame_size - SENSE_LENGTH;
2837
2838 if (!cmd->frame || !cmd->sense) {
2839 cmn_err(CE_WARN,
2840 "mr_sas: pci_pool_alloc failed");
2841 retval = ENOMEM;
2842 goto mrsas_undo_frame_pool;
2843 }
2844
2845 ddi_put32(cmd->frame_dma_obj.acc_handle,
2846 &cmd->frame->io.context, cmd->index);
2847 i++;
2848
2849 con_log(CL_DLEVEL3, (CE_NOTE, "[%x]-%x",
2850 cmd->index, cmd->frame_phys_addr));
2851 }
2852
2853 return (DDI_SUCCESS);
2854
2855 mrsas_undo_frame_pool:
2856 if (i > 0)
2857 destroy_mfi_frame_pool(instance);
2858
2859 return (retval);
2860 }
2861
2862 /*
2863 * free_additional_dma_buffer
2864 */
2865 static void
2866 free_additional_dma_buffer(struct mrsas_instance *instance)
2867 {
2868 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
2869 (void) mrsas_free_dma_obj(instance,
2870 instance->mfi_internal_dma_obj);
2871 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
2872 }
2873
2874 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
2875 (void) mrsas_free_dma_obj(instance,
2876 instance->mfi_evt_detail_obj);
2877 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
2878 }
2879 }
2880
2881 /*
2882 * alloc_additional_dma_buffer
2883 */
2884 static int
2885 alloc_additional_dma_buffer(struct mrsas_instance *instance)
2886 {
2887 uint32_t reply_q_sz;
2888 uint32_t internal_buf_size = PAGESIZE*2;
2889
2890 /* max cmds plus 1 + producer & consumer */
2891 reply_q_sz = sizeof (uint32_t) * (instance->max_fw_cmds + 1 + 2);
2892
2893 instance->mfi_internal_dma_obj.size = internal_buf_size;
2894 instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
2895 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
2896 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
2897 0xFFFFFFFFU;
2898 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
2899
2900 if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
2901 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
2902 cmn_err(CE_WARN,
2903 "mr_sas: could not alloc reply queue");
2904 return (DDI_FAILURE);
2905 }
2906
2907 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
2908
2909 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
2910
2911 instance->producer = (uint32_t *)((unsigned long)
2912 instance->mfi_internal_dma_obj.buffer);
2913 instance->consumer = (uint32_t *)((unsigned long)
2914 instance->mfi_internal_dma_obj.buffer + 4);
2915 instance->reply_queue = (uint32_t *)((unsigned long)
2916 instance->mfi_internal_dma_obj.buffer + 8);
2917 instance->internal_buf = (caddr_t)(((unsigned long)
2918 instance->mfi_internal_dma_obj.buffer) + reply_q_sz + 8);
2919 instance->internal_buf_dmac_add =
2920 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address +
2921 (reply_q_sz + 8);
2922 instance->internal_buf_size = internal_buf_size -
2923 (reply_q_sz + 8);
2924
2925 /* allocate evt_detail */
2926 instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
2927 instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
2928 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
2929 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
2930 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
2931 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 1;
2932
2933 if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
2934 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
2935 cmn_err(CE_WARN, "alloc_additional_dma_buffer: "
2936 "could not allocate data transfer buffer.");
2937 goto mrsas_undo_internal_buff;
2938 }
2939
2940 bzero(instance->mfi_evt_detail_obj.buffer,
2941 sizeof (struct mrsas_evt_detail));
2942
2943 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
2944
2945 return (DDI_SUCCESS);
2946
2947 mrsas_undo_internal_buff:
2948 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
2949 (void) mrsas_free_dma_obj(instance,
2950 instance->mfi_internal_dma_obj);
2951 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
2952 }
2953
2954 return (DDI_FAILURE);
2955 }
2956
2957
2958 void
2959 mrsas_free_cmd_pool(struct mrsas_instance *instance)
2960 {
2961 int i;
2962 uint32_t max_cmd;
2963 size_t sz;
2964
2965 /* already freed */
2966 if (instance->cmd_list == NULL) {
2967 return;
2968 }
2969
2970 max_cmd = instance->max_fw_cmds;
2971
2972 /* size of cmd_list array */
2973 sz = sizeof (struct mrsas_cmd *) * max_cmd;
2974
2975 /* First free each cmd */
2976 for (i = 0; i < max_cmd; i++) {
2977 if (instance->cmd_list[i] != NULL)
2978 kmem_free(instance->cmd_list[i],sizeof (struct mrsas_cmd));
2979
2980 instance->cmd_list[i] = NULL;
2981 }
2982
2983 /* Now, free cmd_list array */
2984 if (instance->cmd_list != NULL)
2985 kmem_free(instance->cmd_list,sz);
2986
2987 instance->cmd_list = NULL;
2988
2989 INIT_LIST_HEAD(&instance->cmd_pool_list);
2990 INIT_LIST_HEAD(&instance->cmd_pend_list);
2991 if (instance->tbolt) {
2992 INIT_LIST_HEAD(&instance->cmd_app_pool_list);
2993 }
2994 else {
2995 INIT_LIST_HEAD(&instance->app_cmd_pool_list);
2996 }
2997
2998 }
2999
3000
3001 /*
3002 * mrsas_alloc_cmd_pool
3003 */
3004 int
3005 mrsas_alloc_cmd_pool(struct mrsas_instance *instance)
3006 {
3007 int i;
3008 int count;
3009 uint32_t max_cmd;
3010 uint32_t reserve_cmd;
3011 size_t sz;
3012
3013 struct mrsas_cmd *cmd;
3014
3015 max_cmd = instance->max_fw_cmds;
3016 con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
3017 "max_cmd %x", max_cmd));
3018
3019
3020 sz = sizeof (struct mrsas_cmd *) * max_cmd;
3021
3022 /*
3023 * instance->cmd_list is an array of struct mrsas_cmd pointers.
3024 * Allocate the dynamic array first and then allocate individual
3025 * commands.
3026 */
3027 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
3028 if (instance->cmd_list == NULL) {
3029 con_log(CL_NONE, (CE_WARN,
3030 "Failed to allocate memory for cmd_list"));
3031 return (DDI_FAILURE);
3032 }
3033
3034 /* create a frame pool and assign one frame to each cmd */
3035 for (count = 0; count < max_cmd; count++) {
3036 instance->cmd_list[count] = kmem_zalloc(sizeof (struct mrsas_cmd),
3037 KM_SLEEP);
3038 if (instance->cmd_list[count] == NULL) {
3039 con_log(CL_NONE, (CE_WARN,
3040 "Failed to allocate memory for mrsas_cmd"));
3041 goto mrsas_undo_cmds;
3042 }
3043 }
3044
3045 /* add all the commands to command pool */
3046
3047 INIT_LIST_HEAD(&instance->cmd_pool_list);
3048 INIT_LIST_HEAD(&instance->cmd_pend_list);
3049 INIT_LIST_HEAD(&instance->app_cmd_pool_list);
3050
3051 reserve_cmd = MRSAS_APP_RESERVED_CMDS;
3052
3053 for (i = 0; i < reserve_cmd; i++) {
3054 cmd = instance->cmd_list[i];
3055 cmd->index = i;
3056 mlist_add_tail(&cmd->list, &instance->app_cmd_pool_list);
3057 }
3058
3059
3060 for (i = reserve_cmd; i < max_cmd; i++) {
3061 cmd = instance->cmd_list[i];
3062 cmd->index = i;
3063 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
3064 }
3065
3066 return (DDI_SUCCESS);
3067
3068 mrsas_undo_cmds:
3069 if (count > 0) {
3070 /* free each cmd */
3071 for (i = 0; i < count; i++) {
3072 if (instance->cmd_list[i] != NULL)
3073 kmem_free(instance->cmd_list[i],sizeof (struct mrsas_cmd));
3074 instance->cmd_list[i] = NULL;
3075 }
3076 }
3077
3078 mrsas_undo_cmd_list:
3079 if (instance->cmd_list != NULL)
3080 kmem_free(instance->cmd_list,sz);
3081 instance->cmd_list = NULL;
3082
3083 return (DDI_FAILURE);
3084 }
3085
3086
3087 /*
3088 * free_space_for_mfi
3089 */
3090 static void
3091 free_space_for_mfi(struct mrsas_instance *instance)
3092 {
3093
3094 /* already freed */
3095 if (instance->cmd_list == NULL) {
3096 return;
3097 }
3098
3099 /* Free additional dma buffer */
3100 free_additional_dma_buffer(instance);
3101
3102 /* Free the MFI frame pool */
3103 destroy_mfi_frame_pool(instance);
3104
3105 /* Free all the commands in the cmd_list */
3106 /* Free the cmd_list buffer itself */
3107 mrsas_free_cmd_pool(instance);
3108 }
3109
3110 /*
3111 * alloc_space_for_mfi
3112 */
3113 static int
3114 alloc_space_for_mfi(struct mrsas_instance *instance)
3115 {
3116 /* Allocate command pool ( memory for cmd_list & individual commands )*/
3117 if (mrsas_alloc_cmd_pool(instance)) {
3118 cmn_err(CE_WARN, "error creating cmd pool");
3119 return (DDI_FAILURE);
3120 }
3121
3122 /* Allocate MFI Frame pool */
3123 if (create_mfi_frame_pool(instance)) {
3124 cmn_err(CE_WARN, "error creating frame DMA pool");
3125 goto mfi_undo_cmd_pool;
3126 }
3127
3128 /* Allocate additional DMA buffer */
3129 if (alloc_additional_dma_buffer(instance)) {
3130 cmn_err(CE_WARN, "error creating frame DMA pool");
3131 goto mfi_undo_frame_pool;
3132 }
3133
3134 return (DDI_SUCCESS);
3135
3136 mfi_undo_frame_pool:
3137 destroy_mfi_frame_pool(instance);
3138
3139 mfi_undo_cmd_pool:
3140 mrsas_free_cmd_pool(instance);
3141
3142 return (DDI_FAILURE);
3143 }
3144
3145
3146
3147 /*
3148 * get_ctrl_info
3149 */
3150 static int
3151 get_ctrl_info(struct mrsas_instance *instance,
3152 struct mrsas_ctrl_info *ctrl_info)
3153 {
3154 int ret = 0;
3155
3156 struct mrsas_cmd *cmd;
3157 struct mrsas_dcmd_frame *dcmd;
3158 struct mrsas_ctrl_info *ci;
3159
3160 if(instance->tbolt) {
3161 cmd = get_raid_msg_mfi_pkt(instance);
3162 }
3163 else {
3164 cmd = get_mfi_pkt(instance);
3165 }
3166
3167 if (!cmd) {
3168 cmn_err(CE_WARN,
3169 "Failed to get a cmd from free-pool in get_ctrl_info(). fw_outstanding=0x%X max_fw_cmds=0x%X",
3170 instance->fw_outstanding, instance->max_fw_cmds);
3171 return (DDI_FAILURE);
3172 }
3173
3174 /* Clear the frame buffer and assign back the context id */
3175 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3176 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3177 cmd->index);
3178
3179 dcmd = &cmd->frame->dcmd;
3180
3181 ci = (struct mrsas_ctrl_info *)instance->internal_buf;
3182
3183 if (!ci) {
3184 cmn_err(CE_WARN,
3185 "Failed to alloc mem for ctrl info");
3186 return_mfi_pkt(instance, cmd);
3187 return (DDI_FAILURE);
3188 }
3189
3190 (void) memset(ci, 0, sizeof (struct mrsas_ctrl_info));
3191
3192 /* for( i = 0; i < DCMD_MBOX_SZ; i++ ) dcmd->mbox.b[i] = 0; */
3193 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
3194
3195 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3196 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status,
3197 MFI_CMD_STATUS_POLL_MODE);
3198 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3199 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3200 MFI_FRAME_DIR_READ);
3201 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3202 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3203 sizeof (struct mrsas_ctrl_info));
3204 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3205 MR_DCMD_CTRL_GET_INFO);
3206 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3207 instance->internal_buf_dmac_add);
3208 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3209 sizeof (struct mrsas_ctrl_info));
3210
3211 cmd->frame_count = 1;
3212
3213 if (instance->tbolt) {
3214 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3215 }
3216
3217 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3218 ret = 0;
3219
3220 ctrl_info->max_request_size = ddi_get32(
3221 cmd->frame_dma_obj.acc_handle, &ci->max_request_size);
3222
3223 ctrl_info->ld_present_count = ddi_get16(
3224 cmd->frame_dma_obj.acc_handle, &ci->ld_present_count);
3225
3226 ctrl_info->properties.on_off_properties =
3227 ddi_get32(cmd->frame_dma_obj.acc_handle,
3228 &ci->properties.on_off_properties);
3229 ddi_rep_get8(cmd->frame_dma_obj.acc_handle,
3230 (uint8_t *)(ctrl_info->product_name),
3231 (uint8_t *)(ci->product_name), 80 * sizeof (char),
3232 DDI_DEV_AUTOINCR);
3233 /* should get more members of ci with ddi_get when needed */
3234 } else {
3235 cmn_err(CE_WARN, "get_ctrl_info: Ctrl info failed");
3236 ret = -1;
3237 }
3238
3239 if(instance->tbolt) {
3240 return_raid_msg_mfi_pkt(instance, cmd);
3241 }
3242 else {
3243 return_mfi_pkt(instance, cmd);
3244 }
3245
3246 return (ret);
3247 }
3248
3249 /*
3250 * abort_aen_cmd
3251 */
3252 static int
3253 abort_aen_cmd(struct mrsas_instance *instance,
3254 struct mrsas_cmd *cmd_to_abort)
3255 {
3256 int ret = 0;
3257
3258 struct mrsas_cmd *cmd;
3259 struct mrsas_abort_frame *abort_fr;
3260
3261 con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_aen:%d", __LINE__));
3262
3263 if (instance->tbolt) {
3264 cmd = get_raid_msg_mfi_pkt(instance);
3265 } else {
3266 cmd = get_mfi_pkt(instance);
3267 }
3268
3269 if (!cmd) {
3270 cmn_err(CE_WARN,
3271 "Failed to get a cmd from free-pool in abort_aen_cmd(). fw_outstanding=0x%X max_fw_cmds=0x%X",
3272 instance->fw_outstanding, instance->max_fw_cmds);
3273 return (DDI_FAILURE);
3274 }
3275 /* Clear the frame buffer and assign back the context id */
3276 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3277 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3278 cmd->index);
3279
3280 abort_fr = &cmd->frame->abort;
3281
3282 /* prepare and issue the abort frame */
3283 ddi_put8(cmd->frame_dma_obj.acc_handle,
3284 &abort_fr->cmd, MFI_CMD_OP_ABORT);
3285 ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3286 MFI_CMD_STATUS_SYNC_MODE);
3287 ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3288 ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3289 cmd_to_abort->index);
3290 ddi_put32(cmd->frame_dma_obj.acc_handle,
3291 &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3292 ddi_put32(cmd->frame_dma_obj.acc_handle,
3293 &abort_fr->abort_mfi_phys_addr_hi, 0);
3294
3295 instance->aen_cmd->abort_aen = 1;
3296
3297 cmd->frame_count = 1;
3298
3299 if (instance->tbolt) {
3300 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3301 }
3302
3303 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3304 con_log(CL_ANN1, (CE_WARN,
3305 "abort_aen_cmd: issue_cmd_in_poll_mode failed"));
3306 ret = -1;
3307 } else {
3308 ret = 0;
3309 }
3310
3311 instance->aen_cmd->abort_aen = 1;
3312 instance->aen_cmd = 0;
3313
3314 if (instance->tbolt) {
3315 return_raid_msg_mfi_pkt(instance, cmd);
3316 } else {
3317 return_mfi_pkt(instance, cmd);
3318 }
3319
3320 atomic_add_16(&instance->fw_outstanding, (-1));
3321
3322 return (ret);
3323 }
3324
3325
3326 static int
3327 mrsas_build_init_cmd(struct mrsas_instance *instance, struct mrsas_cmd **cmd_ptr)
3328 {
3329 struct mrsas_cmd *cmd;
3330 struct mrsas_init_frame *init_frame;
3331 struct mrsas_init_queue_info *initq_info;
3332 struct mrsas_drv_ver drv_ver_info;
3333
3334
3335 /*
3336 * Prepare a init frame. Note the init frame points to queue info
3337 * structure. Each frame has SGL allocated after first 64 bytes. For
3338 * this frame - since we don't need any SGL - we use SGL's space as
3339 * queue info structure
3340 */
3341 cmd = *cmd_ptr;
3342
3343
3344 /* Clear the frame buffer and assign back the context id */
3345 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3346 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3347 cmd->index);
3348
3349 init_frame = (struct mrsas_init_frame *)cmd->frame;
3350 initq_info = (struct mrsas_init_queue_info *)
3351 ((unsigned long)init_frame + 64);
3352
3353 (void) memset(init_frame, 0, MRMFI_FRAME_SIZE);
3354 (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info));
3355
3356 ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0);
3357
3358 ddi_put32(cmd->frame_dma_obj.acc_handle,
3359 &initq_info->reply_queue_entries, instance->max_fw_cmds + 1);
3360
3361 ddi_put32(cmd->frame_dma_obj.acc_handle,
3362 &initq_info->producer_index_phys_addr_hi, 0);
3363 ddi_put32(cmd->frame_dma_obj.acc_handle,
3364 &initq_info->producer_index_phys_addr_lo,
3365 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address);
3366
3367 ddi_put32(cmd->frame_dma_obj.acc_handle,
3368 &initq_info->consumer_index_phys_addr_hi, 0);
3369 ddi_put32(cmd->frame_dma_obj.acc_handle,
3370 &initq_info->consumer_index_phys_addr_lo,
3371 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4);
3372
3373 ddi_put32(cmd->frame_dma_obj.acc_handle,
3374 &initq_info->reply_queue_start_phys_addr_hi, 0);
3375 ddi_put32(cmd->frame_dma_obj.acc_handle,
3376 &initq_info->reply_queue_start_phys_addr_lo,
3377 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8);
3378
3379 ddi_put8(cmd->frame_dma_obj.acc_handle,
3380 &init_frame->cmd, MFI_CMD_OP_INIT);
3381 ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status,
3382 MFI_CMD_STATUS_POLL_MODE);
3383 ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0);
3384 ddi_put32(cmd->frame_dma_obj.acc_handle,
3385 &init_frame->queue_info_new_phys_addr_lo,
3386 cmd->frame_phys_addr + 64);
3387 ddi_put32(cmd->frame_dma_obj.acc_handle,
3388 &init_frame->queue_info_new_phys_addr_hi, 0);
3389
3390
3391 /* fill driver version information*/
3392 fill_up_drv_ver(&drv_ver_info);
3393
3394 /* allocate the driver version data transfer buffer */
3395 instance->drv_ver_dma_obj.size = sizeof(drv_ver_info.drv_ver);
3396 instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
3397 instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3398 instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3399 instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
3400 instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
3401
3402 if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
3403 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3404 con_log(CL_ANN, (CE_WARN,
3405 "init_mfi : Could not allocate driver version buffer."));
3406 return (DDI_FAILURE);
3407 }
3408 /* copy driver version to dma buffer*/
3409 (void) memset(instance->drv_ver_dma_obj.buffer, 0,sizeof(drv_ver_info.drv_ver));
3410 ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
3411 (uint8_t *)drv_ver_info.drv_ver,
3412 (uint8_t *)instance->drv_ver_dma_obj.buffer,
3413 sizeof(drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
3414
3415
3416 /*copy driver version physical address to init frame*/
3417 ddi_put64(cmd->frame_dma_obj.acc_handle,
3418 &init_frame->driverversion, instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
3419
3420 ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len,
3421 sizeof (struct mrsas_init_queue_info));
3422
3423 cmd->frame_count = 1;
3424
3425 *cmd_ptr = cmd;
3426
3427 return (DDI_SUCCESS);
3428 }
3429
3430
3431 /*
3432 * mrsas_init_adapter_ppc - Initialize MFI interface adapter.
3433 */
3434 int
3435 mrsas_init_adapter_ppc (struct mrsas_instance *instance)
3436 {
3437 struct mrsas_cmd *cmd;
3438
3439 /* allocate memory for mfi adapter(cmd pool, individual commands, mfi frames etc */
3440 if (alloc_space_for_mfi(instance) != DDI_SUCCESS){
3441 con_log(CL_ANN, (CE_NOTE,
3442 "Error, failed to allocate memory for MFI adapter"));
3443 return (DDI_FAILURE);
3444 }
3445
3446 /* Build INIT command */
3447 cmd = get_mfi_pkt(instance);
3448
3449 if (mrsas_build_init_cmd(instance, &cmd) != DDI_SUCCESS){
3450 con_log(CL_ANN, (CE_NOTE,
3451 "Error, failed to build INIT command"));
3452
3453 goto fail_undo_alloc_mfi_space;
3454 }
3455
3456 //Disalbe interrupt before sending init frame ( see linux driver code)
3457 /* send INIT MFI frame in polled mode */
3458 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3459 con_log(CL_ANN, (CE_WARN, "failed to init firmware"));
3460 goto fail_fw_init;
3461 }
3462
3463 if (instance->func_ptr->read_fw_status_reg(instance) & 0x04000000) {
3464 con_log(CL_ANN, (CE_NOTE, "mr_sas: IEEE SGL's supported"));
3465 instance->flag_ieee = 1;
3466 }
3467
3468 instance->unroll.alloc_space_mfi = 1;
3469 instance->unroll.verBuff = 1;
3470
3471 return (DDI_SUCCESS);
3472
3473
3474 fail_fw_init:
3475 mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
3476
3477 fail_undo_alloc_mfi_space:
3478 return_mfi_pkt(instance, cmd);
3479 free_space_for_mfi(instance);
3480
3481 return (DDI_FAILURE);
3482
3483 }
3484
3485 /*
3486 * mrsas_init_adapter - Initialize adapter.
3487 */
3488 int
3489 mrsas_init_adapter (struct mrsas_instance *instance)
3490 {
3491 struct mrsas_ctrl_info ctrl_info;
3492
3493
3494 /* we expect the FW state to be READY */
3495 if (mfi_state_transition_to_ready(instance)) {
3496 con_log(CL_ANN, (CE_WARN, "mr_sas: F/W is not ready"));
3497 return (DDI_FAILURE);
3498 }
3499
3500 /* get various operational parameters from status register */
3501 instance->max_num_sge =
3502 (instance->func_ptr->read_fw_status_reg(instance) &
3503 0xFF0000) >> 0x10;
3504 instance->max_num_sge =
3505 (instance->max_num_sge > MRSAS_MAX_SGE_CNT) ?
3506 MRSAS_MAX_SGE_CNT : instance->max_num_sge;
3507
3508 /*
3509 * Reduce the max supported cmds by 1. This is to ensure that the
3510 * reply_q_sz (1 more than the max cmd that driver may send)
3511 * does not exceed max cmds that the FW can support
3512 */
3513 instance->max_fw_cmds =
3514 instance->func_ptr->read_fw_status_reg(instance) & 0xFFFF;
3515 instance->max_fw_cmds = instance->max_fw_cmds - 1;
3516
3517
3518
3519 /* Initialize adapter */
3520 if (instance->func_ptr->init_adapter(instance) != DDI_SUCCESS) {
3521 con_log(CL_ANN, (CE_WARN, "mr_sas: "
3522 "could not initialize adapter"));
3523 return (DDI_FAILURE);
3524 }
3525
3526 /* gather misc FW related information */
3527 instance->disable_online_ctrl_reset = 0;
3528
3529 if (!get_ctrl_info(instance, &ctrl_info)) {
3530 instance->max_sectors_per_req = ctrl_info.max_request_size;
3531 con_log(CL_ANN1, (CE_NOTE,
3532 "product name %s ld present %d",
3533 ctrl_info.product_name, ctrl_info.ld_present_count));
3534 } else {
3535 instance->max_sectors_per_req = instance->max_num_sge *
3536 PAGESIZE / 512;
3537 }
3538
3539 if (ctrl_info.properties.on_off_properties & DISABLE_OCR_PROP_FLAG) {
3540 instance->disable_online_ctrl_reset = 1;
3541 con_log(CL_ANN1, (CE_NOTE,
3542 "Disable online control Flag is set\n"));
3543 }
3544 else {
3545 con_log(CL_ANN1, (CE_NOTE,
3546 "Disable online control Flag is not set\n"));
3547 }
3548
3549 return (DDI_SUCCESS);
3550
3551 }
3552
3553
3554
3555 static int
3556 mrsas_issue_init_mfi(struct mrsas_instance *instance)
3557 {
3558 struct mrsas_cmd *cmd;
3559 struct mrsas_init_frame *init_frame;
3560 struct mrsas_init_queue_info *initq_info;
3561
3562 /*
3563 * Prepare a init frame. Note the init frame points to queue info
3564 * structure. Each frame has SGL allocated after first 64 bytes. For
3565 * this frame - since we don't need any SGL - we use SGL's space as
3566 * queue info structure
3567 */
3568 con_log(CL_ANN1, (CE_NOTE,
3569 "mrsas_issue_init_mfi: entry\n"));
3570 cmd = get_mfi_app_pkt(instance);
3571
3572 if (!cmd) {
3573 con_log(CL_ANN1, (CE_WARN,
3574 "mrsas_issue_init_mfi: get_pkt failed\n"));
3575 return (DDI_FAILURE);
3576 }
3577
3578 /* Clear the frame buffer and assign back the context id */
3579 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3580 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3581 cmd->index);
3582
3583 init_frame = (struct mrsas_init_frame *)cmd->frame;
3584 initq_info = (struct mrsas_init_queue_info *)
3585 ((unsigned long)init_frame + 64);
3586
3587 (void) memset(init_frame, 0, MRMFI_FRAME_SIZE);
3588 (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info));
3589
3590 ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0);
3591
3592 ddi_put32(cmd->frame_dma_obj.acc_handle,
3593 &initq_info->reply_queue_entries, instance->max_fw_cmds + 1);
3594 ddi_put32(cmd->frame_dma_obj.acc_handle,
3595 &initq_info->producer_index_phys_addr_hi, 0);
3596 ddi_put32(cmd->frame_dma_obj.acc_handle,
3597 &initq_info->producer_index_phys_addr_lo,
3598 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address);
3599 ddi_put32(cmd->frame_dma_obj.acc_handle,
3600 &initq_info->consumer_index_phys_addr_hi, 0);
3601 ddi_put32(cmd->frame_dma_obj.acc_handle,
3602 &initq_info->consumer_index_phys_addr_lo,
3603 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4);
3604
3605 ddi_put32(cmd->frame_dma_obj.acc_handle,
3606 &initq_info->reply_queue_start_phys_addr_hi, 0);
3607 ddi_put32(cmd->frame_dma_obj.acc_handle,
3608 &initq_info->reply_queue_start_phys_addr_lo,
3609 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8);
3610
3611 ddi_put8(cmd->frame_dma_obj.acc_handle,
3612 &init_frame->cmd, MFI_CMD_OP_INIT);
3613 ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status,
3614 MFI_CMD_STATUS_POLL_MODE);
3615 ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0);
3616 ddi_put32(cmd->frame_dma_obj.acc_handle,
3617 &init_frame->queue_info_new_phys_addr_lo,
3618 cmd->frame_phys_addr + 64);
3619 ddi_put32(cmd->frame_dma_obj.acc_handle,
3620 &init_frame->queue_info_new_phys_addr_hi, 0);
3621
3622 ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len,
3623 sizeof (struct mrsas_init_queue_info));
3624
3625 cmd->frame_count = 1;
3626
3627 /* issue the init frame in polled mode */
3628 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3629 con_log(CL_ANN1, (CE_WARN,
3630 "mrsas_issue_init_mfi():failed to "
3631 "init firmware"));
3632 return_mfi_app_pkt(instance, cmd);
3633 return (DDI_FAILURE);
3634 }
3635
3636 return_mfi_app_pkt(instance, cmd);
3637 con_log(CL_ANN1, (CE_CONT, "mrsas_issue_init_mfi: Done"));
3638
3639 return (DDI_SUCCESS);
3640 }
3641 /*
3642 * mfi_state_transition_to_ready : Move the FW to READY state
3643 *
3644 * @reg_set : MFI register set
3645 */
3646 int
3647 mfi_state_transition_to_ready(struct mrsas_instance *instance)
3648 {
3649 int i;
3650 uint8_t max_wait;
3651 uint32_t fw_ctrl = 0;
3652 uint32_t fw_state;
3653 uint32_t cur_state;
3654 uint32_t cur_abs_reg_val;
3655 uint32_t prev_abs_reg_val;
3656 uint32_t status;
3657
3658 cur_abs_reg_val =
3659 instance->func_ptr->read_fw_status_reg(instance);
3660 fw_state =
3661 cur_abs_reg_val & MFI_STATE_MASK;
3662 con_log(CL_ANN1, (CE_CONT,
3663 "mfi_state_transition_to_ready:FW state = 0x%x", fw_state));
3664
3665 while (fw_state != MFI_STATE_READY) {
3666 con_log(CL_ANN, (CE_CONT,
3667 "mfi_state_transition_to_ready:FW state%x", fw_state));
3668
3669 switch (fw_state) {
3670 case MFI_STATE_FAULT:
3671 con_log(CL_ANN, (CE_NOTE,
3672 "mr_sas: FW in FAULT state!!"));
3673
3674 return (ENODEV);
3675 case MFI_STATE_WAIT_HANDSHAKE:
3676 /* set the CLR bit in IMR0 */
3677 con_log(CL_ANN1, (CE_NOTE,
3678 "mr_sas: FW waiting for HANDSHAKE"));
3679 /*
3680 * PCI_Hot Plug: MFI F/W requires
3681 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
3682 * to be set
3683 */
3684 /* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */
3685 if (!instance->tbolt) {
3686 WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE |
3687 MFI_INIT_HOTPLUG, instance);
3688 } else {
3689 WR_RESERVED0_REGISTER(MFI_INIT_CLEAR_HANDSHAKE |
3690 MFI_INIT_HOTPLUG, instance);
3691 }
3692 max_wait = (instance->tbolt == 1) ? 180 : 2;
3693 cur_state = MFI_STATE_WAIT_HANDSHAKE;
3694 break;
3695 case MFI_STATE_BOOT_MESSAGE_PENDING:
3696 /* set the CLR bit in IMR0 */
3697 con_log(CL_ANN1, (CE_NOTE,
3698 "mr_sas: FW state boot message pending"));
3699 /*
3700 * PCI_Hot Plug: MFI F/W requires
3701 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
3702 * to be set
3703 */
3704 if (!instance->tbolt) {
3705 WR_IB_DOORBELL(MFI_INIT_HOTPLUG, instance);
3706 } else {
3707 WR_RESERVED0_REGISTER(MFI_INIT_HOTPLUG,
3708 instance);
3709 }
3710 max_wait = (instance->tbolt == 1) ? 180 : 10;
3711 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
3712 break;
3713 case MFI_STATE_OPERATIONAL:
3714 /* bring it to READY state; assuming max wait 2 secs */
3715 instance->func_ptr->disable_intr(instance);
3716 con_log(CL_ANN1, (CE_NOTE,
3717 "mr_sas: FW in OPERATIONAL state"));
3718 /*
3719 * PCI_Hot Plug: MFI F/W requires
3720 * (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT)
3721 * to be set
3722 */
3723 /* WR_IB_DOORBELL(MFI_INIT_READY, instance); */
3724 if (!instance->tbolt) {
3725 WR_IB_DOORBELL(MFI_RESET_FLAGS, instance);
3726 } else {
3727 WR_RESERVED0_REGISTER(MFI_RESET_FLAGS,
3728 instance);
3729
3730 for (i = 0; i < (10 * 1000); i++) {
3731 status =
3732 RD_RESERVED0_REGISTER(instance);
3733 if (status & 1)
3734 delay(1 *
3735 drv_usectohz(MILLISEC));
3736 else
3737 break;
3738 }
3739
3740 }
3741 max_wait = (instance->tbolt == 1) ? 180 : 10;
3742 cur_state = MFI_STATE_OPERATIONAL;
3743 break;
3744 case MFI_STATE_UNDEFINED:
3745 /* this state should not last for more than 2 seconds */
3746 con_log(CL_ANN1, (CE_NOTE, "FW state undefined"));
3747
3748 max_wait = (instance->tbolt == 1) ? 180 : 2;
3749 cur_state = MFI_STATE_UNDEFINED;
3750 break;
3751 case MFI_STATE_BB_INIT:
3752 max_wait = (instance->tbolt == 1) ? 180 : 2;
3753 cur_state = MFI_STATE_BB_INIT;
3754 break;
3755 case MFI_STATE_FW_INIT:
3756 max_wait = (instance->tbolt == 1) ? 180 : 2;
3757 cur_state = MFI_STATE_FW_INIT;
3758 break;
3759 case MFI_STATE_FW_INIT_2:
3760 max_wait = 180;
3761 cur_state = MFI_STATE_FW_INIT_2;
3762 break;
3763 case MFI_STATE_DEVICE_SCAN:
3764 max_wait = 180;
3765 cur_state = MFI_STATE_DEVICE_SCAN;
3766 prev_abs_reg_val = cur_abs_reg_val;
3767 con_log(CL_NONE, (CE_NOTE,
3768 "Device scan in progress ...\n"));
3769 break;
3770 case MFI_STATE_FLUSH_CACHE:
3771 max_wait = 180;
3772 cur_state = MFI_STATE_FLUSH_CACHE;
3773 break;
3774 default:
3775 con_log(CL_ANN1, (CE_NOTE,
3776 "mr_sas: Unknown state 0x%x", fw_state));
3777 return (ENODEV);
3778 }
3779
3780 /* the cur_state should not last for more than max_wait secs */
3781 for (i = 0; i < (max_wait * MILLISEC); i++) {
3782 /* fw_state = RD_OB_MSG_0(instance) & MFI_STATE_MASK; */
3783 cur_abs_reg_val =
3784 instance->func_ptr->read_fw_status_reg(instance);
3785 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
3786
3787 if (fw_state == cur_state) {
3788 delay(1 * drv_usectohz(MILLISEC));
3789 } else {
3790 break;
3791 }
3792 }
3793 if (fw_state == MFI_STATE_DEVICE_SCAN) {
3794 if (prev_abs_reg_val != cur_abs_reg_val) {
3795 continue;
3796 }
3797 }
3798
3799 /* return error if fw_state hasn't changed after max_wait */
3800 if (fw_state == cur_state) {
3801 con_log(CL_ANN1, (CE_WARN,
3802 "FW state hasn't changed in %d secs", max_wait));
3803 return (ENODEV);
3804 }
3805 };
3806
3807 if (!instance->tbolt) {
3808 fw_ctrl = RD_IB_DOORBELL(instance);
3809 con_log(CL_ANN1, (CE_CONT,
3810 "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl));
3811 }
3812
3813 #if 0
3814 /*
3815 * Write 0xF to the doorbell register to do the following.
3816 * - Abort all outstanding commands (bit 0).
3817 * - Transition from OPERATIONAL to READY state (bit 1).
3818 * - Discard (possible) low MFA posted in 64-bit mode (bit-2).
3819 * - Set to release FW to continue running (i.e. BIOS handshake
3820 * (bit 3).
3821 */
3822 if (!instance->tbolt) {
3823 WR_IB_DOORBELL(0xF, instance);
3824 }
3825 #endif
3826
3827 return (DDI_SUCCESS);
3828 }
3829
3830 /*
3831 * get_seq_num
3832 */
3833 static int
3834 get_seq_num(struct mrsas_instance *instance,
3835 struct mrsas_evt_log_info *eli)
3836 {
3837 int ret = DDI_SUCCESS;
3838
3839 dma_obj_t dcmd_dma_obj;
3840 struct mrsas_cmd *cmd;
3841 struct mrsas_dcmd_frame *dcmd;
3842 struct mrsas_evt_log_info *eli_tmp;
3843 if (instance->tbolt) {
3844 cmd = get_raid_msg_mfi_pkt(instance);
3845 } else {
3846 cmd = get_mfi_pkt(instance);
3847 }
3848
3849 if (!cmd) {
3850 cmn_err(CE_WARN,
3851 "Failed to get a cmd from free-pool in get_seq_num(). fw_outstanding=0x%X max_fw_cmds=0x%X",
3852 instance->fw_outstanding, instance->max_fw_cmds);
3853 return (ENOMEM);
3854 }
3855
3856 /* Clear the frame buffer and assign back the context id */
3857 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3858 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3859 cmd->index);
3860
3861 dcmd = &cmd->frame->dcmd;
3862
3863 /* allocate the data transfer buffer */
3864 dcmd_dma_obj.size = sizeof (struct mrsas_evt_log_info);
3865 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
3866 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3867 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3868 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
3869 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
3870
3871 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
3872 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3873 cmn_err(CE_WARN,
3874 "get_seq_num: could not allocate data transfer buffer.");
3875 return (DDI_FAILURE);
3876 }
3877
3878 (void) memset(dcmd_dma_obj.buffer, 0,
3879 sizeof (struct mrsas_evt_log_info));
3880
3881 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
3882
3883 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3884 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
3885 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3886 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3887 MFI_FRAME_DIR_READ);
3888 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3889 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3890 sizeof (struct mrsas_evt_log_info));
3891 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3892 MR_DCMD_CTRL_EVENT_GET_INFO);
3893 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3894 sizeof (struct mrsas_evt_log_info));
3895 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3896 dcmd_dma_obj.dma_cookie[0].dmac_address);
3897
3898 cmd->sync_cmd = MRSAS_TRUE;
3899 cmd->frame_count = 1;
3900
3901 if (instance->tbolt) {
3902 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3903 }
3904
3905 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
3906 cmn_err(CE_WARN, "get_seq_num: "
3907 "failed to issue MRSAS_DCMD_CTRL_EVENT_GET_INFO");
3908 ret = DDI_FAILURE;
3909 } else {
3910 eli_tmp = (struct mrsas_evt_log_info *)dcmd_dma_obj.buffer;
3911 eli->newest_seq_num = ddi_get32(cmd->frame_dma_obj.acc_handle,
3912 &eli_tmp->newest_seq_num);
3913 ret = DDI_SUCCESS;
3914 }
3915
3916 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
3917 ret = DDI_FAILURE;
3918
3919 if (instance->tbolt) {
3920 return_raid_msg_mfi_pkt(instance, cmd);
3921 } else {
3922 return_mfi_pkt(instance, cmd);
3923 }
3924
3925 return (ret);
3926 }
3927
3928 /*
3929 * start_mfi_aen
3930 */
3931 static int
3932 start_mfi_aen(struct mrsas_instance *instance)
3933 {
3934 int ret = 0;
3935
3936 struct mrsas_evt_log_info eli;
3937 union mrsas_evt_class_locale class_locale;
3938
3939 /* get the latest sequence number from FW */
3940 (void) memset(&eli, 0, sizeof (struct mrsas_evt_log_info));
3941
3942 if (get_seq_num(instance, &eli)) {
3943 cmn_err(CE_WARN, "start_mfi_aen: failed to get seq num");
3944 return (-1);
3945 }
3946
3947 /* register AEN with FW for latest sequence number plus 1 */
3948 class_locale.members.reserved = 0;
3949 class_locale.members.locale = LE_16(MR_EVT_LOCALE_ALL);
3950 class_locale.members.class = MR_EVT_CLASS_INFO;
3951 class_locale.word = LE_32(class_locale.word);
3952 ret = register_mfi_aen(instance, eli.newest_seq_num + 1,
3953 class_locale.word);
3954
3955 if (ret) {
3956 cmn_err(CE_WARN, "start_mfi_aen: aen registration failed");
3957 return (-1);
3958 }
3959
3960
3961 return (ret);
3962 }
3963
3964 /*
3965 * flush_cache
3966 */
3967 static void
3968 flush_cache(struct mrsas_instance *instance)
3969 {
3970 struct mrsas_cmd *cmd = NULL;
3971 struct mrsas_dcmd_frame *dcmd;
3972 uint32_t max_cmd = instance->max_fw_cmds;
3973 if (instance->tbolt) {
3974 cmd = get_raid_msg_mfi_pkt(instance);
3975 } else {
3976 cmd = get_mfi_pkt(instance);
3977 }
3978
3979 if (!cmd) {
3980 cmn_err(CE_WARN,
3981 "Failed to get a cmd from free-pool in flush_cache(). fw_outstanding=0x%X max_fw_cmds=0x%X",
3982 instance->fw_outstanding, instance->max_fw_cmds);
3983 return;
3984 }
3985
3986 /* Clear the frame buffer and assign back the context id */
3987 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3988 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3989 cmd->index);
3990
3991 dcmd = &cmd->frame->dcmd;
3992
3993 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
3994
3995 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3996 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0);
3997 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 0);
3998 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3999 MFI_FRAME_DIR_NONE);
4000 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
4001 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 0);
4002 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
4003 MR_DCMD_CTRL_CACHE_FLUSH);
4004 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.b[0],
4005 MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE);
4006
4007 cmd->frame_count = 1;
4008
4009 if (instance->tbolt) {
4010 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
4011 }
4012
4013 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
4014 con_log(CL_ANN1, (CE_WARN,
4015 "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH"));
4016 }
4017 con_log(CL_ANN1, (CE_CONT, "flush_cache done"));
4018 if (instance->tbolt) {
4019 return_raid_msg_mfi_pkt(instance, cmd);
4020 } else {
4021 return_mfi_pkt(instance, cmd);
4022 }
4023
4024 }
4025
4026 /*
4027 * service_mfi_aen- Completes an AEN command
4028 * @instance: Adapter soft state
4029 * @cmd: Command to be completed
4030 *
4031 */
4032 void
4033 service_mfi_aen(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
4034 {
4035 uint32_t seq_num;
4036 uint32_t i;
4037 struct mrsas_evt_detail *evt_detail =
4038 (struct mrsas_evt_detail *)instance->mfi_evt_detail_obj.buffer;
4039 int rval = 0;
4040 int tgt = 0;
4041 uint8_t dtype;
4042 #ifdef PDSUPPORT
4043 mrsas_pd_address_t *pd_addr;
4044 #endif
4045 ddi_acc_handle_t acc_handle;
4046
4047 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
4048
4049 acc_handle = cmd->frame_dma_obj.acc_handle;
4050 cmd->cmd_status = ddi_get8(acc_handle, &cmd->frame->io.cmd_status);
4051 if (cmd->cmd_status == ENODATA) {
4052 cmd->cmd_status = 0;
4053 }
4054
4055 /*
4056 * log the MFI AEN event to the sysevent queue so that
4057 * application will get noticed
4058 */
4059 if (ddi_log_sysevent(instance->dip, DDI_VENDOR_LSI, "LSIMEGA", "SAS",
4060 NULL, NULL, DDI_NOSLEEP) != DDI_SUCCESS) {
4061 int instance_no = ddi_get_instance(instance->dip);
4062 con_log(CL_ANN, (CE_WARN,
4063 "mr_sas%d: Failed to log AEN event", instance_no));
4064 }
4065 /*
4066 * Check for any ld devices that has changed state. i.e. online
4067 * or offline.
4068 */
4069 con_log(CL_ANN1, (CE_CONT,
4070 "AEN: code = %x class = %x locale = %x args = %x",
4071 ddi_get32(acc_handle, &evt_detail->code),
4072 evt_detail->cl.members.class,
4073 ddi_get16(acc_handle, &evt_detail->cl.members.locale),
4074 ddi_get8(acc_handle, &evt_detail->arg_type)));
4075
4076 switch (ddi_get32(acc_handle, &evt_detail->code)) {
4077 case MR_EVT_CFG_CLEARED: {
4078 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) {
4079 if (instance->mr_ld_list[tgt].dip != NULL) {
4080 mutex_enter(&instance->config_dev_mtx);
4081 instance->mr_ld_list[tgt].flag =
4082 ~MRDRV_TGT_VALID;
4083 mutex_exit(&instance->config_dev_mtx);
4084 rval = mrsas_service_evt(instance, tgt, 0,
4085 MRSAS_EVT_UNCONFIG_TGT, NULL);
4086 con_log(CL_ANN1, (CE_WARN,
4087 "mr_sas: CFG CLEARED AEN rval = %d "
4088 "tgt id = %d", rval, tgt));
4089 }
4090 }
4091 break;
4092 }
4093
4094 case MR_EVT_LD_DELETED: {
4095 tgt = ddi_get16(acc_handle, &evt_detail->args.ld.target_id);
4096 mutex_enter(&instance->config_dev_mtx);
4097 instance->mr_ld_list[tgt].flag = ~MRDRV_TGT_VALID;
4098 mutex_exit(&instance->config_dev_mtx);
4099 rval = mrsas_service_evt(instance,
4100 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0,
4101 MRSAS_EVT_UNCONFIG_TGT, NULL);
4102 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD DELETED AEN rval = %d "
4103 "tgt id = %d index = %d", rval,
4104 ddi_get16(acc_handle, &evt_detail->args.ld.target_id),
4105 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index)));
4106 break;
4107 } /* End of MR_EVT_LD_DELETED */
4108
4109 case MR_EVT_LD_CREATED: {
4110 rval = mrsas_service_evt(instance,
4111 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0,
4112 MRSAS_EVT_CONFIG_TGT, NULL);
4113 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD CREATED AEN rval = %d "
4114 "tgt id = %d index = %d", rval,
4115 ddi_get16(acc_handle, &evt_detail->args.ld.target_id),
4116 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index)));
4117 break;
4118 } /* End of MR_EVT_LD_CREATED */
4119
4120 #ifdef PDSUPPORT
4121 case MR_EVT_PD_REMOVED_EXT: {
4122 if (instance->tbolt) {
4123 pd_addr = &evt_detail->args.pd_addr;
4124 dtype = pd_addr->scsi_dev_type;
4125 con_log(CL_DLEVEL1, (CE_NOTE,
4126 " MR_EVT_PD_REMOVED_EXT: dtype = %x,"
4127 " arg_type = %d ", dtype, evt_detail->arg_type));
4128 tgt = ddi_get16(acc_handle, &evt_detail->args.pd.device_id);
4129 mutex_enter(&instance->config_dev_mtx);
4130 instance->mr_tbolt_pd_list[tgt].flag = (uint8_t)~MRDRV_TGT_VALID;
4131 mutex_exit(&instance->config_dev_mtx);
4132 rval = mrsas_service_evt(instance,
4133 ddi_get16(acc_handle, &evt_detail->args.pd.device_id),
4134 1, MRSAS_EVT_UNCONFIG_TGT, NULL);
4135 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_REMOVED:"
4136 "rval = %d tgt id = %d ", rval,
4137 ddi_get16(acc_handle, &evt_detail->args.pd.device_id)));
4138 break;
4139 }
4140 }/* End of MR_EVT_PD_REMOVED_EXT */
4141
4142 case MR_EVT_PD_INSERTED_EXT: {
4143 if (instance->tbolt) {
4144 rval = mrsas_service_evt(instance,
4145 ddi_get16(acc_handle, &evt_detail->args.pd.device_id),
4146 1, MRSAS_EVT_CONFIG_TGT, NULL);
4147 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_INSERTEDi_EXT:"
4148 "rval = %d tgt id = %d ", rval,
4149 ddi_get16(acc_handle, &evt_detail->args.pd.device_id)));
4150 break;
4151 }
4152 }/* End of MR_EVT_PD_INSERTED_EXT */
4153
4154 case MR_EVT_PD_STATE_CHANGE: {
4155 if (instance->tbolt) {
4156 tgt = ddi_get16(acc_handle, &evt_detail->args.pd.device_id);
4157 if ((evt_detail->args.pd_state.prevState == PD_SYSTEM) &&
4158 (evt_detail->args.pd_state.newState != PD_SYSTEM)) {
4159 mutex_enter(&instance->config_dev_mtx);
4160 instance->mr_tbolt_pd_list[tgt].flag =
4161 (uint8_t)~MRDRV_TGT_VALID;
4162 mutex_exit(&instance->config_dev_mtx);
4163 rval = mrsas_service_evt(instance,
4164 ddi_get16(acc_handle,
4165 &evt_detail->args.pd.device_id),
4166 1, MRSAS_EVT_UNCONFIG_TGT, NULL);
4167 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_REMOVED:"
4168 "rval = %d tgt id = %d ", rval,
4169 ddi_get16(acc_handle,
4170 &evt_detail->args.pd.device_id)));
4171 break;
4172 }
4173 if ((evt_detail->args.pd_state.prevState
4174 == UNCONFIGURED_GOOD) &&
4175 (evt_detail->args.pd_state.newState == PD_SYSTEM)) {
4176 rval = mrsas_service_evt(instance,
4177 ddi_get16(acc_handle,
4178 &evt_detail->args.pd.device_id),
4179 1, MRSAS_EVT_CONFIG_TGT, NULL);
4180 con_log(CL_ANN1, (CE_WARN,
4181 "mr_sas: PD_INSERTED: rval = %d "
4182 " tgt id = %d ", rval,
4183 ddi_get16(acc_handle,
4184 &evt_detail->args.pd.device_id)));
4185 break;
4186 }
4187 }
4188 }
4189 #endif
4190
4191 } /* End of Main Switch */
4192
4193 /* get copy of seq_num and class/locale for re-registration */
4194 seq_num = ddi_get32(acc_handle, &evt_detail->seq_num);
4195 seq_num++;
4196 (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
4197 sizeof (struct mrsas_evt_detail));
4198
4199 ddi_put8(acc_handle, &cmd->frame->dcmd.cmd_status, 0x0);
4200 ddi_put32(acc_handle, &cmd->frame->dcmd.mbox.w[0], seq_num);
4201
4202 instance->aen_seq_num = seq_num;
4203
4204 cmd->frame_count = 1;
4205
4206 cmd->retry_count_for_ocr = 0;
4207 cmd->drv_pkt_time = 0;
4208
4209 /* Issue the aen registration frame */
4210 instance->func_ptr->issue_cmd(cmd, instance);
4211 }
4212
4213 /*
4214 * complete_cmd_in_sync_mode - Completes an internal command
4215 * @instance: Adapter soft state
4216 * @cmd: Command to be completed
4217 *
4218 * The issue_cmd_in_sync_mode() function waits for a command to complete
4219 * after it issues a command. This function wakes up that waiting routine by
4220 * calling wake_up() on the wait queue.
4221 */
4222 static void
4223 complete_cmd_in_sync_mode(struct mrsas_instance *instance,
4224 struct mrsas_cmd *cmd)
4225 {
4226 cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle,
4227 &cmd->frame->io.cmd_status);
4228
4229 cmd->sync_cmd = MRSAS_FALSE;
4230
4231 con_log(CL_ANN1, (CE_NOTE, "complete_cmd_in_sync_mode called %p \n",
4232 (void *)cmd));
4233
4234 mutex_enter(&instance->int_cmd_mtx);
4235 if (cmd->cmd_status == ENODATA) {
4236 cmd->cmd_status = 0;
4237 }
4238 cv_broadcast(&instance->int_cmd_cv);
4239 mutex_exit(&instance->int_cmd_mtx);
4240
4241 }
4242
4243 /*
4244 * Call this function inside mrsas_softintr.
4245 * mrsas_initiate_ocr_if_fw_is_faulty - Initiates OCR if FW status is faulty
4246 * @instance: Adapter soft state
4247 */
4248
4249 static uint32_t
4250 mrsas_initiate_ocr_if_fw_is_faulty(struct mrsas_instance *instance)
4251 {
4252 uint32_t cur_abs_reg_val;
4253 uint32_t fw_state;
4254
4255 cur_abs_reg_val = instance->func_ptr->read_fw_status_reg(instance);
4256 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
4257 if (fw_state == MFI_STATE_FAULT) {
4258 if (instance->disable_online_ctrl_reset == 1) {
4259 cmn_err(CE_WARN,
4260 "mrsas_initiate_ocr_if_fw_is_faulty: "
4261 "FW in Fault state, detected in ISR: "
4262 "FW doesn't support ocr ");
4263
4264 return (ADAPTER_RESET_NOT_REQUIRED);
4265 } else {
4266 con_log(CL_ANN, (CE_NOTE,
4267 "mrsas_initiate_ocr_if_fw_is_faulty: "
4268 "FW in Fault state, detected in ISR: FW supports ocr "));
4269
4270 return (ADAPTER_RESET_REQUIRED);
4271 }
4272 }
4273
4274 return (ADAPTER_RESET_NOT_REQUIRED);
4275 }
4276
4277 /*
4278 * mrsas_softintr - The Software ISR
4279 * @param arg : HBA soft state
4280 *
4281 * called from high-level interrupt if hi-level interrupt are not there,
4282 * otherwise triggered as a soft interrupt
4283 */
4284 static uint_t
4285 mrsas_softintr(struct mrsas_instance *instance)
4286 {
4287 struct scsi_pkt *pkt;
4288 struct scsa_cmd *acmd;
4289 struct mrsas_cmd *cmd;
4290 struct mlist_head *pos, *next;
4291 mlist_t process_list;
4292 struct mrsas_header *hdr;
4293 struct scsi_arq_status *arqstat;
4294
4295 con_log(CL_ANN1, (CE_NOTE, "mrsas_softintr() called."));
4296
4297 ASSERT(instance);
4298
4299 mutex_enter(&instance->completed_pool_mtx);
4300
4301 if (mlist_empty(&instance->completed_pool_list)) {
4302 mutex_exit(&instance->completed_pool_mtx);
4303 return (DDI_INTR_CLAIMED);
4304 }
4305
4306 instance->softint_running = 1;
4307
4308 INIT_LIST_HEAD(&process_list);
4309 mlist_splice(&instance->completed_pool_list, &process_list);
4310 INIT_LIST_HEAD(&instance->completed_pool_list);
4311
4312 mutex_exit(&instance->completed_pool_mtx);
4313
4314 /* perform all callbacks first, before releasing the SCBs */
4315 mlist_for_each_safe(pos, next, &process_list) {
4316 cmd = mlist_entry(pos, struct mrsas_cmd, list);
4317
4318 /* syncronize the Cmd frame for the controller */
4319 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle,
4320 0, 0, DDI_DMA_SYNC_FORCPU);
4321
4322 hdr = &cmd->frame->hdr;
4323
4324 /* remove the internal command from the process list */
4325 mlist_del_init(&cmd->list);
4326
4327 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) {
4328 case MFI_CMD_OP_PD_SCSI:
4329 case MFI_CMD_OP_LD_SCSI:
4330 case MFI_CMD_OP_LD_READ:
4331 case MFI_CMD_OP_LD_WRITE:
4332 /*
4333 * MFI_CMD_OP_PD_SCSI and MFI_CMD_OP_LD_SCSI
4334 * could have been issued either through an
4335 * IO path or an IOCTL path. If it was via IOCTL,
4336 * we will send it to internal completion.
4337 */
4338 if (cmd->sync_cmd == MRSAS_TRUE) {
4339 complete_cmd_in_sync_mode(instance, cmd);
4340 break;
4341 }
4342
4343 /* regular commands */
4344 acmd = cmd->cmd;
4345 pkt = CMD2PKT(acmd);
4346
4347 if (acmd->cmd_flags & CFLAG_DMAVALID) {
4348 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
4349 (void) ddi_dma_sync(acmd->cmd_dmahandle,
4350 acmd->cmd_dma_offset,
4351 acmd->cmd_dma_len,
4352 DDI_DMA_SYNC_FORCPU);
4353 }
4354 }
4355
4356 pkt->pkt_reason = CMD_CMPLT;
4357 pkt->pkt_statistics = 0;
4358 pkt->pkt_state = STATE_GOT_BUS
4359 | STATE_GOT_TARGET | STATE_SENT_CMD
4360 | STATE_XFERRED_DATA | STATE_GOT_STATUS;
4361
4362 con_log(CL_ANN, (CE_CONT,
4363 "CDB[0] = %x completed for %s: size %lx context %x",
4364 pkt->pkt_cdbp[0], ((acmd->islogical) ? "LD" : "PD"),
4365 acmd->cmd_dmacount, hdr->context));
4366
4367 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
4368 struct scsi_inquiry *inq;
4369
4370 if (acmd->cmd_dmacount != 0) {
4371 bp_mapin(acmd->cmd_buf);
4372 inq = (struct scsi_inquiry *)
4373 acmd->cmd_buf->b_un.b_addr;
4374
4375 /* don't expose physical drives to OS */
4376 if (acmd->islogical &&
4377 (hdr->cmd_status == MFI_STAT_OK)) {
4378 display_scsi_inquiry(
4379 (caddr_t)inq);
4380 } else if ((hdr->cmd_status ==
4381 MFI_STAT_OK) && inq->inq_dtype ==
4382 DTYPE_DIRECT) {
4383
4384 display_scsi_inquiry(
4385 (caddr_t)inq);
4386
4387 /* for physical disk */
4388 hdr->cmd_status =
4389 MFI_STAT_DEVICE_NOT_FOUND;
4390 }
4391 }
4392 }
4393
4394 switch (hdr->cmd_status) {
4395 case MFI_STAT_OK:
4396 pkt->pkt_scbp[0] = STATUS_GOOD;
4397 break;
4398 case MFI_STAT_LD_CC_IN_PROGRESS:
4399 case MFI_STAT_LD_RECON_IN_PROGRESS:
4400 pkt->pkt_scbp[0] = STATUS_GOOD;
4401 break;
4402 case MFI_STAT_LD_INIT_IN_PROGRESS:
4403 con_log(CL_ANN, (CE_WARN, "Initialization in Progress"));
4404 pkt->pkt_reason = CMD_TRAN_ERR;
4405
4406 break;
4407 case MFI_STAT_SCSI_DONE_WITH_ERROR:
4408 con_log(CL_ANN, (CE_CONT, "scsi_done error"));
4409
4410 pkt->pkt_reason = CMD_CMPLT;
4411 ((struct scsi_status *)
4412 pkt->pkt_scbp)->sts_chk = 1;
4413
4414 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
4415
4416 con_log(CL_ANN, (CE_WARN, "TEST_UNIT_READY fail"));
4417
4418 } else {
4419 pkt->pkt_state |= STATE_ARQ_DONE;
4420 arqstat = (void *)(pkt->pkt_scbp);
4421 arqstat->sts_rqpkt_reason = CMD_CMPLT;
4422 arqstat->sts_rqpkt_resid = 0;
4423 arqstat->sts_rqpkt_state |=
4424 STATE_GOT_BUS | STATE_GOT_TARGET
4425 | STATE_SENT_CMD
4426 | STATE_XFERRED_DATA;
4427 *(uint8_t *)&arqstat->sts_rqpkt_status =
4428 STATUS_GOOD;
4429 ddi_rep_get8(
4430 cmd->frame_dma_obj.acc_handle,
4431 (uint8_t *)
4432 &(arqstat->sts_sensedata),
4433 cmd->sense,
4434 sizeof (struct scsi_extended_sense),
4435 DDI_DEV_AUTOINCR);
4436 }
4437 break;
4438 case MFI_STAT_LD_OFFLINE:
4439 case MFI_STAT_DEVICE_NOT_FOUND:
4440 con_log(CL_ANN, (CE_CONT,
4441 "mrsas_softintr:device not found error"));
4442 pkt->pkt_reason = CMD_DEV_GONE;
4443 pkt->pkt_statistics = STAT_DISCON;
4444 break;
4445 case MFI_STAT_LD_LBA_OUT_OF_RANGE:
4446 pkt->pkt_state |= STATE_ARQ_DONE;
4447 pkt->pkt_reason = CMD_CMPLT;
4448 ((struct scsi_status *)
4449 pkt->pkt_scbp)->sts_chk = 1;
4450
4451 arqstat = (void *)(pkt->pkt_scbp);
4452 arqstat->sts_rqpkt_reason = CMD_CMPLT;
4453 arqstat->sts_rqpkt_resid = 0;
4454 arqstat->sts_rqpkt_state |= STATE_GOT_BUS
4455 | STATE_GOT_TARGET | STATE_SENT_CMD
4456 | STATE_XFERRED_DATA;
4457 *(uint8_t *)&arqstat->sts_rqpkt_status =
4458 STATUS_GOOD;
4459
4460 arqstat->sts_sensedata.es_valid = 1;
4461 arqstat->sts_sensedata.es_key =
4462 KEY_ILLEGAL_REQUEST;
4463 arqstat->sts_sensedata.es_class =
4464 CLASS_EXTENDED_SENSE;
4465
4466 /*
4467 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
4468 * ASC: 0x21h; ASCQ: 0x00h;
4469 */
4470 arqstat->sts_sensedata.es_add_code = 0x21;
4471 arqstat->sts_sensedata.es_qual_code = 0x00;
4472
4473 break;
4474
4475 default:
4476 con_log(CL_ANN, (CE_CONT, "Unknown status!"));
4477 pkt->pkt_reason = CMD_TRAN_ERR;
4478
4479 break;
4480 }
4481
4482 atomic_add_16(&instance->fw_outstanding, (-1));
4483
4484 /* Call the callback routine */
4485 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
4486 pkt->pkt_comp) {
4487
4488 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_softintr: "
4489 "posting to scsa cmd %p index %x pkt %p "
4490 "time %llx", (void *)cmd, cmd->index,
4491 (void *)pkt, gethrtime()));
4492 (*pkt->pkt_comp)(pkt);
4493
4494 }
4495
4496 return_mfi_pkt(instance, cmd);
4497 break;
4498
4499 case MFI_CMD_OP_SMP:
4500 case MFI_CMD_OP_STP:
4501 complete_cmd_in_sync_mode(instance, cmd);
4502 break;
4503
4504 case MFI_CMD_OP_DCMD:
4505 /* see if got an event notification */
4506 if (ddi_get32(cmd->frame_dma_obj.acc_handle,
4507 &cmd->frame->dcmd.opcode) ==
4508 MR_DCMD_CTRL_EVENT_WAIT) {
4509 if ((instance->aen_cmd == cmd) &&
4510 (instance->aen_cmd->abort_aen)) {
4511 con_log(CL_ANN, (CE_WARN,
4512 "mrsas_softintr: "
4513 "aborted_aen returned"));
4514 } else {
4515 atomic_add_16(&instance->fw_outstanding,
4516 (-1));
4517 service_mfi_aen(instance, cmd);
4518 }
4519 } else {
4520 complete_cmd_in_sync_mode(instance, cmd);
4521 }
4522
4523 break;
4524
4525 case MFI_CMD_OP_ABORT:
4526 con_log(CL_ANN, (CE_NOTE, "MFI_CMD_OP_ABORT complete"));
4527 /*
4528 * MFI_CMD_OP_ABORT successfully completed
4529 * in the synchronous mode
4530 */
4531 complete_cmd_in_sync_mode(instance, cmd);
4532 break;
4533
4534 default:
4535 if (cmd->pkt != NULL) {
4536 pkt = cmd->pkt;
4537 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
4538 pkt->pkt_comp) {
4539
4540 con_log(CL_ANN1, (CE_CONT, "posting to "
4541 "scsa cmd %p index %x pkt %p"
4542 "time %llx, default ", (void *)cmd,
4543 cmd->index, (void *)pkt,
4544 gethrtime()));
4545
4546 (*pkt->pkt_comp)(pkt);
4547
4548 }
4549 }
4550 con_log(CL_ANN, (CE_WARN, "Cmd type unknown !"));
4551 break;
4552 }
4553 }
4554
4555 instance->softint_running = 0;
4556
4557 return (DDI_INTR_CLAIMED);
4558 }
4559
4560 /*
4561 * mrsas_alloc_dma_obj
4562 *
4563 * Allocate the memory and other resources for an dma object.
4564 */
4565 int
4566 mrsas_alloc_dma_obj(struct mrsas_instance *instance, dma_obj_t *obj,
4567 uchar_t endian_flags)
4568 {
4569 int i;
4570 size_t alen = 0;
4571 uint_t cookie_cnt;
4572 struct ddi_device_acc_attr tmp_endian_attr;
4573
4574 tmp_endian_attr = endian_attr;
4575 tmp_endian_attr.devacc_attr_endian_flags = endian_flags;
4576 tmp_endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
4577
4578 i = ddi_dma_alloc_handle(instance->dip, &obj->dma_attr,
4579 DDI_DMA_SLEEP, NULL, &obj->dma_handle);
4580 if (i != DDI_SUCCESS) {
4581
4582 switch (i) {
4583 case DDI_DMA_BADATTR :
4584 con_log(CL_ANN, (CE_WARN,
4585 "Failed ddi_dma_alloc_handle- Bad attribute"));
4586 break;
4587 case DDI_DMA_NORESOURCES :
4588 con_log(CL_ANN, (CE_WARN,
4589 "Failed ddi_dma_alloc_handle- No Resources"));
4590 break;
4591 default :
4592 con_log(CL_ANN, (CE_WARN,
4593 "Failed ddi_dma_alloc_handle: "
4594 "unknown status %d", i));
4595 break;
4596 }
4597
4598 return (-1);
4599 }
4600
4601 if ((ddi_dma_mem_alloc(obj->dma_handle, obj->size, &tmp_endian_attr,
4602 DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
4603 &obj->buffer, &alen, &obj->acc_handle) != DDI_SUCCESS) ||
4604 alen < obj->size) {
4605
4606 ddi_dma_free_handle(&obj->dma_handle);
4607
4608 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc"));
4609
4610 return (-1);
4611 }
4612 if (obj->dma_handle == NULL) {
4613 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc"));
4614 return (-1);
4615 }
4616
4617 if (ddi_dma_addr_bind_handle(obj->dma_handle, NULL, obj->buffer,
4618 obj->size, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
4619 NULL, &obj->dma_cookie[0], &cookie_cnt) != DDI_SUCCESS) {
4620
4621 ddi_dma_mem_free(&obj->acc_handle);
4622 ddi_dma_free_handle(&obj->dma_handle);
4623
4624 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle"));
4625
4626 return (-1);
4627 }
4628 if (obj->acc_handle == NULL) {
4629 ddi_dma_mem_free(&obj->acc_handle);
4630 ddi_dma_free_handle(&obj->dma_handle);
4631
4632 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle"));
4633 return (-1);
4634 }
4635
4636
4637 return (cookie_cnt);
4638 }
4639
4640 /*
4641 * mrsas_free_dma_obj(struct mrsas_instance *, dma_obj_t)
4642 *
4643 * De-allocate the memory and other resources for an dma object, which must
4644 * have been alloated by a previous call to mrsas_alloc_dma_obj()
4645 */
4646 int
4647 mrsas_free_dma_obj(struct mrsas_instance *instance, dma_obj_t obj)
4648 {
4649
4650 if ( (obj.dma_handle == NULL) || (obj.acc_handle == NULL) ) {
4651 return (DDI_SUCCESS);
4652 }
4653
4654 (void) ddi_dma_unbind_handle(obj.dma_handle);
4655 ddi_dma_mem_free(&obj.acc_handle);
4656 ddi_dma_free_handle(&obj.dma_handle);
4657 obj.acc_handle = NULL;
4658 return (DDI_SUCCESS);
4659 }
4660
4661 /*
4662 * mrsas_dma_alloc(instance_t *, struct scsi_pkt *, struct buf *,
4663 * int, int (*)())
4664 *
4665 * Allocate dma resources for a new scsi command
4666 */
4667 int
4668 mrsas_dma_alloc(struct mrsas_instance *instance, struct scsi_pkt *pkt,
4669 struct buf *bp, int flags, int (*callback)())
4670 {
4671 int dma_flags;
4672 int (*cb)(caddr_t);
4673 int i;
4674
4675 ddi_dma_attr_t tmp_dma_attr = mrsas_generic_dma_attr;
4676 struct scsa_cmd *acmd = PKT2CMD(pkt);
4677
4678 acmd->cmd_buf = bp;
4679
4680 if (bp->b_flags & B_READ) {
4681 acmd->cmd_flags &= ~CFLAG_DMASEND;
4682 dma_flags = DDI_DMA_READ;
4683 } else {
4684 acmd->cmd_flags |= CFLAG_DMASEND;
4685 dma_flags = DDI_DMA_WRITE;
4686 }
4687
4688 if (flags & PKT_CONSISTENT) {
4689 acmd->cmd_flags |= CFLAG_CONSISTENT;
4690 dma_flags |= DDI_DMA_CONSISTENT;
4691 }
4692
4693 if (flags & PKT_DMA_PARTIAL) {
4694 dma_flags |= DDI_DMA_PARTIAL;
4695 }
4696
4697 dma_flags |= DDI_DMA_REDZONE;
4698
4699 cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
4700
4701 tmp_dma_attr.dma_attr_sgllen = instance->max_num_sge;
4702 tmp_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull;
4703 if (instance->tbolt) {
4704 //OCR-RESET FIX
4705 tmp_dma_attr.dma_attr_count_max = (U64)mrsas_tbolt_max_cap_maxxfer; //limit to 256K
4706 tmp_dma_attr.dma_attr_maxxfer = (U64)mrsas_tbolt_max_cap_maxxfer; //limit to 256K
4707 }
4708
4709
4710 if ((i = ddi_dma_alloc_handle(instance->dip, &tmp_dma_attr,
4711 cb, 0, &acmd->cmd_dmahandle)) != DDI_SUCCESS) {
4712 switch (i) {
4713 case DDI_DMA_BADATTR:
4714 bioerror(bp, EFAULT);
4715 return (DDI_FAILURE);
4716
4717 case DDI_DMA_NORESOURCES:
4718 bioerror(bp, 0);
4719 return (DDI_FAILURE);
4720
4721 default:
4722 con_log(CL_ANN, (CE_PANIC, "ddi_dma_alloc_handle: "
4723 "impossible result (0x%x)", i));
4724 bioerror(bp, EFAULT);
4725 return (DDI_FAILURE);
4726 }
4727 }
4728
4729 i = ddi_dma_buf_bind_handle(acmd->cmd_dmahandle, bp, dma_flags,
4730 cb, 0, &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies);
4731
4732 switch (i) {
4733 case DDI_DMA_PARTIAL_MAP:
4734 if ((dma_flags & DDI_DMA_PARTIAL) == 0) {
4735 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
4736 "DDI_DMA_PARTIAL_MAP impossible"));
4737 goto no_dma_cookies;
4738 }
4739
4740 if (ddi_dma_numwin(acmd->cmd_dmahandle, &acmd->cmd_nwin) ==
4741 DDI_FAILURE) {
4742 con_log(CL_ANN, (CE_PANIC, "ddi_dma_numwin failed"));
4743 goto no_dma_cookies;
4744 }
4745
4746 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
4747 &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
4748 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
4749 DDI_FAILURE) {
4750
4751 con_log(CL_ANN, (CE_PANIC, "ddi_dma_getwin failed"));
4752 goto no_dma_cookies;
4753 }
4754
4755 goto get_dma_cookies;
4756 case DDI_DMA_MAPPED:
4757 acmd->cmd_nwin = 1;
4758 acmd->cmd_dma_len = 0;
4759 acmd->cmd_dma_offset = 0;
4760
4761 get_dma_cookies:
4762 i = 0;
4763 acmd->cmd_dmacount = 0;
4764 for (;;) {
4765 acmd->cmd_dmacount +=
4766 acmd->cmd_dmacookies[i++].dmac_size;
4767
4768 if (i == instance->max_num_sge ||
4769 i == acmd->cmd_ncookies)
4770 break;
4771
4772 ddi_dma_nextcookie(acmd->cmd_dmahandle,
4773 &acmd->cmd_dmacookies[i]);
4774 }
4775
4776 acmd->cmd_cookie = i;
4777 acmd->cmd_cookiecnt = i;
4778
4779 acmd->cmd_flags |= CFLAG_DMAVALID;
4780
4781 if (bp->b_bcount >= acmd->cmd_dmacount) {
4782 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
4783 } else {
4784 pkt->pkt_resid = 0;
4785 }
4786
4787 return (DDI_SUCCESS);
4788 case DDI_DMA_NORESOURCES:
4789 bioerror(bp, 0);
4790 break;
4791 case DDI_DMA_NOMAPPING:
4792 bioerror(bp, EFAULT);
4793 break;
4794 case DDI_DMA_TOOBIG:
4795 bioerror(bp, EINVAL);
4796 break;
4797 case DDI_DMA_INUSE:
4798 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle:"
4799 " DDI_DMA_INUSE impossible"));
4800 break;
4801 default:
4802 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
4803 "impossible result (0x%x)", i));
4804 break;
4805 }
4806
4807 no_dma_cookies:
4808 ddi_dma_free_handle(&acmd->cmd_dmahandle);
4809 acmd->cmd_dmahandle = NULL;
4810 acmd->cmd_flags &= ~CFLAG_DMAVALID;
4811 return (DDI_FAILURE);
4812 }
4813
4814 /*
4815 * mrsas_dma_move(struct mrsas_instance *, struct scsi_pkt *, struct buf *)
4816 *
4817 * move dma resources to next dma window
4818 *
4819 */
4820 int
4821 mrsas_dma_move(struct mrsas_instance *instance, struct scsi_pkt *pkt,
4822 struct buf *bp)
4823 {
4824 int i = 0;
4825
4826 struct scsa_cmd *acmd = PKT2CMD(pkt);
4827
4828 /*
4829 * If there are no more cookies remaining in this window,
4830 * must move to the next window first.
4831 */
4832 if (acmd->cmd_cookie == acmd->cmd_ncookies) {
4833 if (acmd->cmd_curwin == acmd->cmd_nwin && acmd->cmd_nwin == 1) {
4834 return (DDI_SUCCESS);
4835 }
4836
4837 /* at last window, cannot move */
4838 if (++acmd->cmd_curwin >= acmd->cmd_nwin) {
4839 return (DDI_FAILURE);
4840 }
4841
4842 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
4843 &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
4844 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
4845 DDI_FAILURE) {
4846 return (DDI_FAILURE);
4847 }
4848
4849 acmd->cmd_cookie = 0;
4850 } else {
4851 /* still more cookies in this window - get the next one */
4852 ddi_dma_nextcookie(acmd->cmd_dmahandle,
4853 &acmd->cmd_dmacookies[0]);
4854 }
4855
4856 /* get remaining cookies in this window, up to our maximum */
4857 for (;;) {
4858 acmd->cmd_dmacount += acmd->cmd_dmacookies[i++].dmac_size;
4859 acmd->cmd_cookie++;
4860
4861 if (i == instance->max_num_sge ||
4862 acmd->cmd_cookie == acmd->cmd_ncookies) {
4863 break;
4864 }
4865
4866 ddi_dma_nextcookie(acmd->cmd_dmahandle,
4867 &acmd->cmd_dmacookies[i]);
4868 }
4869
4870 acmd->cmd_cookiecnt = i;
4871
4872 if (bp->b_bcount >= acmd->cmd_dmacount) {
4873 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
4874 } else {
4875 pkt->pkt_resid = 0;
4876 }
4877
4878 return (DDI_SUCCESS);
4879 }
4880
4881 /*
4882 * build_cmd
4883 */
4884 static struct mrsas_cmd *
4885 build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
4886 struct scsi_pkt *pkt, uchar_t *cmd_done)
4887 {
4888 uint16_t flags = 0;
4889 uint32_t i;
4890 uint32_t context;
4891 uint32_t sge_bytes;
4892 uint32_t tmp_data_xfer_len;
4893 ddi_acc_handle_t acc_handle;
4894 struct mrsas_cmd *cmd;
4895 struct mrsas_sge64 *mfi_sgl;
4896 struct mrsas_sge_ieee *mfi_sgl_ieee;
4897 struct scsa_cmd *acmd = PKT2CMD(pkt);
4898 struct mrsas_pthru_frame *pthru;
4899 struct mrsas_io_frame *ldio;
4900
4901 /* find out if this is logical or physical drive command. */
4902 acmd->islogical = MRDRV_IS_LOGICAL(ap);
4903 acmd->device_id = MAP_DEVICE_ID(instance, ap);
4904 *cmd_done = 0;
4905
4906 /* get the command packet */
4907 if (!(cmd = get_mfi_pkt(instance))) {
4908 cmn_err(CE_WARN,
4909 "Failed to get a cmd from free-pool in build_cmd(). fw_outstanding=0x%X max_fw_cmds=0x%X",
4910 instance->fw_outstanding, instance->max_fw_cmds);
4911 return (NULL);
4912 }
4913
4914 acc_handle = cmd->frame_dma_obj.acc_handle;
4915
4916 /* Clear the frame buffer and assign back the context id */
4917 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
4918 ddi_put32(acc_handle, &cmd->frame->hdr.context, cmd->index);
4919
4920 cmd->pkt = pkt;
4921 cmd->cmd = acmd;
4922
4923 /* lets get the command directions */
4924 if (acmd->cmd_flags & CFLAG_DMASEND) {
4925 flags = MFI_FRAME_DIR_WRITE;
4926
4927 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
4928 (void) ddi_dma_sync(acmd->cmd_dmahandle,
4929 acmd->cmd_dma_offset, acmd->cmd_dma_len,
4930 DDI_DMA_SYNC_FORDEV);
4931 }
4932 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
4933 flags = MFI_FRAME_DIR_READ;
4934
4935 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
4936 (void) ddi_dma_sync(acmd->cmd_dmahandle,
4937 acmd->cmd_dma_offset, acmd->cmd_dma_len,
4938 DDI_DMA_SYNC_FORCPU);
4939 }
4940 } else {
4941 flags = MFI_FRAME_DIR_NONE;
4942 }
4943
4944 if (instance->flag_ieee) {
4945 flags |= MFI_FRAME_IEEE;
4946 }
4947 flags |= MFI_FRAME_SGL64;
4948
4949 switch (pkt->pkt_cdbp[0]) {
4950
4951 /*
4952 * case SCMD_SYNCHRONIZE_CACHE:
4953 * flush_cache(instance);
4954 * return_mfi_pkt(instance, cmd);
4955 * *cmd_done = 1;
4956 *
4957 * return (NULL);
4958 */
4959
4960 case SCMD_READ:
4961 case SCMD_WRITE:
4962 case SCMD_READ_G1:
4963 case SCMD_WRITE_G1:
4964 case SCMD_READ_G4:
4965 case SCMD_WRITE_G4:
4966 case SCMD_READ_G5:
4967 case SCMD_WRITE_G5:
4968 if (acmd->islogical) {
4969 ldio = (struct mrsas_io_frame *)cmd->frame;
4970
4971 /*
4972 * preare the Logical IO frame:
4973 * 2nd bit is zero for all read cmds
4974 */
4975 ddi_put8(acc_handle, &ldio->cmd,
4976 (pkt->pkt_cdbp[0] & 0x02) ? MFI_CMD_OP_LD_WRITE
4977 : MFI_CMD_OP_LD_READ);
4978 ddi_put8(acc_handle, &ldio->cmd_status, 0x0);
4979 ddi_put8(acc_handle, &ldio->scsi_status, 0x0);
4980 ddi_put8(acc_handle, &ldio->target_id, acmd->device_id);
4981 ddi_put16(acc_handle, &ldio->timeout, 0);
4982 ddi_put8(acc_handle, &ldio->reserved_0, 0);
4983 ddi_put16(acc_handle, &ldio->pad_0, 0);
4984 ddi_put16(acc_handle, &ldio->flags, flags);
4985
4986 /* Initialize sense Information */
4987 bzero(cmd->sense, SENSE_LENGTH);
4988 ddi_put8(acc_handle, &ldio->sense_len, SENSE_LENGTH);
4989 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_hi, 0);
4990 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_lo,
4991 cmd->sense_phys_addr);
4992 ddi_put32(acc_handle, &ldio->start_lba_hi, 0);
4993 ddi_put8(acc_handle, &ldio->access_byte,
4994 (acmd->cmd_cdblen != 6) ? pkt->pkt_cdbp[1] : 0);
4995 ddi_put8(acc_handle, &ldio->sge_count,
4996 acmd->cmd_cookiecnt);
4997 if (instance->flag_ieee) {
4998 mfi_sgl_ieee =
4999 (struct mrsas_sge_ieee *)&ldio->sgl;
5000 } else {
5001 mfi_sgl = (struct mrsas_sge64 *)&ldio->sgl;
5002 }
5003
5004 context = ddi_get32(acc_handle, &ldio->context);
5005
5006 if (acmd->cmd_cdblen == CDB_GROUP0) { /* 6-byte cdb */
5007 ddi_put32(acc_handle, &ldio->lba_count, (
5008 (uint16_t)(pkt->pkt_cdbp[4])));
5009
5010 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5011 ((uint32_t)(pkt->pkt_cdbp[3])) |
5012 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
5013 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
5014 << 16)));
5015 } else if (acmd->cmd_cdblen == CDB_GROUP1) { /* 10-byte cdb */
5016 ddi_put32(acc_handle, &ldio->lba_count, (
5017 ((uint16_t)(pkt->pkt_cdbp[8])) |
5018 ((uint16_t)(pkt->pkt_cdbp[7]) << 8)));
5019
5020 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5021 ((uint32_t)(pkt->pkt_cdbp[5])) |
5022 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5023 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5024 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5025 } else if (acmd->cmd_cdblen == CDB_GROUP5) { /* 12-byte cdb */
5026 ddi_put32(acc_handle, &ldio->lba_count, (
5027 ((uint32_t)(pkt->pkt_cdbp[9])) |
5028 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
5029 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
5030 ((uint32_t)(pkt->pkt_cdbp[6]) << 24)));
5031
5032 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5033 ((uint32_t)(pkt->pkt_cdbp[5])) |
5034 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5035 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5036 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5037 } else if (acmd->cmd_cdblen == CDB_GROUP4) { /* 16-byte cdb */
5038 ddi_put32(acc_handle, &ldio->lba_count, (
5039 ((uint32_t)(pkt->pkt_cdbp[13])) |
5040 ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
5041 ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
5042 ((uint32_t)(pkt->pkt_cdbp[10]) << 24)));
5043
5044 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5045 ((uint32_t)(pkt->pkt_cdbp[9])) |
5046 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
5047 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
5048 ((uint32_t)(pkt->pkt_cdbp[6]) << 24)));
5049
5050 ddi_put32(acc_handle, &ldio->start_lba_hi, (
5051 ((uint32_t)(pkt->pkt_cdbp[5])) |
5052 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5053 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5054 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5055 }
5056
5057 break;
5058 }
5059 /* fall through For all non-rd/wr cmds */
5060 default:
5061
5062 switch (pkt->pkt_cdbp[0]) {
5063 case SCMD_MODE_SENSE:
5064 case SCMD_MODE_SENSE_G1: {
5065 union scsi_cdb *cdbp;
5066 uint16_t page_code;
5067
5068 cdbp = (void *)pkt->pkt_cdbp;
5069 page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
5070 switch (page_code) {
5071 case 0x3:
5072 case 0x4:
5073 (void) mrsas_mode_sense_build(pkt);
5074 return_mfi_pkt(instance, cmd);
5075 *cmd_done = 1;
5076 return (NULL);
5077 }
5078 break;
5079 }
5080 default:
5081 break;
5082 }
5083
5084 pthru = (struct mrsas_pthru_frame *)cmd->frame;
5085
5086 /* prepare the DCDB frame */
5087 ddi_put8(acc_handle, &pthru->cmd, (acmd->islogical) ?
5088 MFI_CMD_OP_LD_SCSI : MFI_CMD_OP_PD_SCSI);
5089 ddi_put8(acc_handle, &pthru->cmd_status, 0x0);
5090 ddi_put8(acc_handle, &pthru->scsi_status, 0x0);
5091 ddi_put8(acc_handle, &pthru->target_id, acmd->device_id);
5092 ddi_put8(acc_handle, &pthru->lun, 0);
5093 ddi_put8(acc_handle, &pthru->cdb_len, acmd->cmd_cdblen);
5094 ddi_put16(acc_handle, &pthru->timeout, 0);
5095 ddi_put16(acc_handle, &pthru->flags, flags);
5096 tmp_data_xfer_len = 0;
5097 for (i = 0; i < acmd->cmd_cookiecnt; i++) {
5098 tmp_data_xfer_len += acmd->cmd_dmacookies[i].dmac_size;
5099 }
5100 ddi_put32(acc_handle, &pthru->data_xfer_len,
5101 tmp_data_xfer_len);
5102 ddi_put8(acc_handle, &pthru->sge_count, acmd->cmd_cookiecnt);
5103 if (instance->flag_ieee) {
5104 mfi_sgl_ieee = (struct mrsas_sge_ieee *)&pthru->sgl;
5105 } else {
5106 mfi_sgl = (struct mrsas_sge64 *)&pthru->sgl;
5107 }
5108
5109 bzero(cmd->sense, SENSE_LENGTH);
5110 ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH);
5111 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0);
5112 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo,
5113 cmd->sense_phys_addr);
5114
5115 context = ddi_get32(acc_handle, &pthru->context);
5116 ddi_rep_put8(acc_handle, (uint8_t *)pkt->pkt_cdbp,
5117 (uint8_t *)pthru->cdb, acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
5118
5119 break;
5120 }
5121 #ifdef lint
5122 context = context;
5123 #endif
5124 /* prepare the scatter-gather list for the firmware */
5125 if (instance->flag_ieee) {
5126 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl_ieee++) {
5127 ddi_put64(acc_handle, &mfi_sgl_ieee->phys_addr,
5128 acmd->cmd_dmacookies[i].dmac_laddress);
5129 ddi_put32(acc_handle, &mfi_sgl_ieee->length,
5130 acmd->cmd_dmacookies[i].dmac_size);
5131 }
5132 sge_bytes = sizeof (struct mrsas_sge_ieee)*acmd->cmd_cookiecnt;
5133 } else {
5134 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl++) {
5135 ddi_put64(acc_handle, &mfi_sgl->phys_addr,
5136 acmd->cmd_dmacookies[i].dmac_laddress);
5137 ddi_put32(acc_handle, &mfi_sgl->length,
5138 acmd->cmd_dmacookies[i].dmac_size);
5139 }
5140 sge_bytes = sizeof (struct mrsas_sge64)*acmd->cmd_cookiecnt;
5141 }
5142
5143 cmd->frame_count = (sge_bytes / MRMFI_FRAME_SIZE) +
5144 ((sge_bytes % MRMFI_FRAME_SIZE) ? 1 : 0) + 1;
5145
5146 if (cmd->frame_count >= 8) {
5147 cmd->frame_count = 8;
5148 }
5149
5150 return (cmd);
5151 }
5152
5153 /*
5154 * wait_for_outstanding - Wait for all outstanding cmds
5155 * @instance: Adapter soft state
5156 *
5157 * This function waits for upto MRDRV_RESET_WAIT_TIME seconds for FW to
5158 * complete all its outstanding commands. Returns error if one or more IOs
5159 * are pending after this time period.
5160 */
5161 static int
5162 wait_for_outstanding(struct mrsas_instance *instance)
5163 {
5164 int i;
5165 uint32_t wait_time = 90;
5166
5167 for (i = 0; i < wait_time; i++) {
5168 if (!instance->fw_outstanding) {
5169 break;
5170 }
5171
5172 drv_usecwait(MILLISEC); /* wait for 1000 usecs */;
5173 }
5174
5175 if (instance->fw_outstanding) {
5176 return (1);
5177 }
5178
5179 return (0);
5180 }
5181
5182
5183 /*
5184 * issue_mfi_pthru
5185 */
5186 static int
5187 issue_mfi_pthru(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5188 struct mrsas_cmd *cmd, int mode)
5189 {
5190 void *ubuf;
5191 uint32_t kphys_addr = 0;
5192 uint32_t xferlen = 0;
5193 uint32_t new_xfer_length =0;
5194 uint_t model;
5195 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5196 dma_obj_t pthru_dma_obj;
5197 struct mrsas_pthru_frame *kpthru;
5198 struct mrsas_pthru_frame *pthru;
5199 int i;
5200 pthru = &cmd->frame->pthru;
5201 kpthru = (struct mrsas_pthru_frame *)&ioctl->frame[0];
5202
5203 if (instance->adapterresetinprogress) {
5204 con_log(CL_ANN1, (CE_WARN, "issue_mfi_pthru: Reset flag set, "
5205 "returning mfi_pkt and setting TRAN_BUSY\n"));
5206 return (DDI_FAILURE);
5207 }
5208 model = ddi_model_convert_from(mode & FMODELS);
5209 if (model == DDI_MODEL_ILP32) {
5210 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP32"));
5211
5212 xferlen = kpthru->sgl.sge32[0].length;
5213
5214 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
5215 } else {
5216 #ifdef _ILP32
5217 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP32"));
5218 xferlen = kpthru->sgl.sge32[0].length;
5219 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
5220 #else
5221 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP64"));
5222 xferlen = kpthru->sgl.sge64[0].length;
5223 ubuf = (void *)(ulong_t)kpthru->sgl.sge64[0].phys_addr;
5224 #endif
5225 }
5226
5227 if (xferlen) {
5228 /* means IOCTL requires DMA */
5229 /* allocate the data transfer buffer */
5230 //pthru_dma_obj.size = xferlen;
5231 MRSAS_GET_BOUNDARY_ALIGNED_LEN(xferlen,new_xfer_length,PAGESIZE);
5232 pthru_dma_obj.size = new_xfer_length;
5233 pthru_dma_obj.dma_attr = mrsas_generic_dma_attr;
5234 pthru_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5235 pthru_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5236 pthru_dma_obj.dma_attr.dma_attr_sgllen = 1;
5237 pthru_dma_obj.dma_attr.dma_attr_align = 1;
5238
5239 /* allocate kernel buffer for DMA */
5240 if (mrsas_alloc_dma_obj(instance, &pthru_dma_obj,
5241 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5242 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: "
5243 "could not allocate data transfer buffer."));
5244 return (DDI_FAILURE);
5245 }
5246 (void) memset(pthru_dma_obj.buffer, 0, xferlen);
5247
5248 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5249 if (kpthru->flags & MFI_FRAME_DIR_WRITE) {
5250 for (i = 0; i < xferlen; i++) {
5251 if (ddi_copyin((uint8_t *)ubuf+i,
5252 (uint8_t *)pthru_dma_obj.buffer+i,
5253 1, mode)) {
5254 con_log(CL_ANN, (CE_WARN,
5255 "issue_mfi_pthru : "
5256 "copy from user space failed"));
5257 return (DDI_FAILURE);
5258 }
5259 }
5260 }
5261
5262 kphys_addr = pthru_dma_obj.dma_cookie[0].dmac_address;
5263 }
5264
5265 ddi_put8(acc_handle, &pthru->cmd, kpthru->cmd);
5266 ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH);
5267 ddi_put8(acc_handle, &pthru->cmd_status, 0);
5268 ddi_put8(acc_handle, &pthru->scsi_status, 0);
5269 ddi_put8(acc_handle, &pthru->target_id, kpthru->target_id);
5270 ddi_put8(acc_handle, &pthru->lun, kpthru->lun);
5271 ddi_put8(acc_handle, &pthru->cdb_len, kpthru->cdb_len);
5272 ddi_put8(acc_handle, &pthru->sge_count, kpthru->sge_count);
5273 ddi_put16(acc_handle, &pthru->timeout, kpthru->timeout);
5274 ddi_put32(acc_handle, &pthru->data_xfer_len, kpthru->data_xfer_len);
5275
5276 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0);
5277 pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
5278 /*ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo, 0); */
5279
5280 ddi_rep_put8(acc_handle, (uint8_t *)kpthru->cdb, (uint8_t *)pthru->cdb,
5281 pthru->cdb_len, DDI_DEV_AUTOINCR);
5282
5283 ddi_put16(acc_handle, &pthru->flags, kpthru->flags & ~MFI_FRAME_SGL64);
5284 ddi_put32(acc_handle, &pthru->sgl.sge32[0].length, xferlen);
5285 ddi_put32(acc_handle, &pthru->sgl.sge32[0].phys_addr, kphys_addr);
5286
5287 cmd->sync_cmd = MRSAS_TRUE;
5288 cmd->frame_count = 1;
5289
5290 if (instance->tbolt) {
5291 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5292 }
5293
5294 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5295 con_log(CL_ANN, (CE_WARN,
5296 "issue_mfi_pthru: fw_ioctl failed"));
5297 } else {
5298 if (xferlen && kpthru->flags & MFI_FRAME_DIR_READ) {
5299 for (i = 0; i < xferlen; i++) {
5300 if (ddi_copyout(
5301 (uint8_t *)pthru_dma_obj.buffer+i,
5302 (uint8_t *)ubuf+i, 1, mode)) {
5303 con_log(CL_ANN, (CE_WARN,
5304 "issue_mfi_pthru : "
5305 "copy to user space failed"));
5306 return (DDI_FAILURE);
5307 }
5308 }
5309 }
5310 }
5311
5312 kpthru->cmd_status = ddi_get8(acc_handle, &pthru->cmd_status);
5313 kpthru->scsi_status = ddi_get8(acc_handle, &pthru->scsi_status);
5314
5315 con_log(CL_ANN, (CE_CONT, "issue_mfi_pthru: cmd_status %x, "
5316 "scsi_status %x", kpthru->cmd_status, kpthru->scsi_status));
5317
5318 if (kpthru->sense_len) {
5319 uint sense_len = SENSE_LENGTH;
5320 void *sense_ubuf = (void *)(ulong_t)kpthru->sense_buf_phys_addr_lo;
5321 if (kpthru->sense_len <= SENSE_LENGTH) {
5322 sense_len = kpthru->sense_len;
5323 }
5324
5325 for (i = 0; i < sense_len; i++) {
5326 if (ddi_copyout(
5327 (uint8_t *)cmd->sense+i,
5328 (uint8_t *)sense_ubuf+i, 1, mode)) {
5329 con_log(CL_ANN, (CE_WARN,
5330 "issue_mfi_pthru : "
5331 "copy to user space failed"));
5332 }
5333 con_log(CL_DLEVEL1, (CE_WARN,
5334 "Copying Sense info sense_buff[%d] = 0x%X\n",i,*((uint8_t *)cmd->sense+i)));
5335 }
5336 }
5337 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
5338 DDI_DMA_SYNC_FORDEV);
5339
5340 if (xferlen) {
5341 /* free kernel buffer */
5342 if (mrsas_free_dma_obj(instance, pthru_dma_obj) != DDI_SUCCESS)
5343 return (DDI_FAILURE);
5344 }
5345
5346 return (DDI_SUCCESS);
5347 }
5348
5349 /*
5350 * issue_mfi_dcmd
5351 */
5352 static int
5353 issue_mfi_dcmd(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5354 struct mrsas_cmd *cmd, int mode)
5355 {
5356 void *ubuf;
5357 uint32_t kphys_addr = 0;
5358 uint32_t xferlen = 0;
5359 uint32_t new_xfer_length = 0;
5360 uint32_t model;
5361 dma_obj_t dcmd_dma_obj;
5362 struct mrsas_dcmd_frame *kdcmd;
5363 struct mrsas_dcmd_frame *dcmd;
5364 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5365 int i;
5366 dcmd = &cmd->frame->dcmd;
5367 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0];
5368
5369 if (instance->adapterresetinprogress) {
5370 con_log(CL_ANN1, (CE_WARN, "Reset flag set, "
5371 "returning mfi_pkt and setting TRAN_BUSY\n"));
5372 return (DDI_FAILURE);
5373 }
5374 model = ddi_model_convert_from(mode & FMODELS);
5375 if (model == DDI_MODEL_ILP32) {
5376 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
5377
5378 xferlen = kdcmd->sgl.sge32[0].length;
5379
5380 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
5381 } else {
5382 #ifdef _ILP32
5383 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
5384 xferlen = kdcmd->sgl.sge32[0].length;
5385 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
5386 #else
5387 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_LP64"));
5388 xferlen = kdcmd->sgl.sge64[0].length;
5389 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
5390 #endif
5391 }
5392 if (xferlen) {
5393 /* means IOCTL requires DMA */
5394 /* allocate the data transfer buffer */
5395 //dcmd_dma_obj.size = xferlen;
5396 MRSAS_GET_BOUNDARY_ALIGNED_LEN(xferlen,new_xfer_length,PAGESIZE);
5397 dcmd_dma_obj.size = new_xfer_length;
5398 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
5399 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5400 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5401 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
5402 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
5403
5404 /* allocate kernel buffer for DMA */
5405 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
5406 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5407 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: "
5408 "could not allocate data transfer buffer."));
5409 return (DDI_FAILURE);
5410 }
5411 (void) memset(dcmd_dma_obj.buffer, 0, xferlen);
5412
5413 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5414 if (kdcmd->flags & MFI_FRAME_DIR_WRITE) {
5415 for (i = 0; i < xferlen; i++) {
5416 if (ddi_copyin((uint8_t *)ubuf + i,
5417 (uint8_t *)dcmd_dma_obj.buffer + i,
5418 1, mode)) {
5419 con_log(CL_ANN, (CE_WARN,
5420 "issue_mfi_dcmd : "
5421 "copy from user space failed"));
5422 return (DDI_FAILURE);
5423 }
5424 }
5425 }
5426
5427 kphys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address;
5428 }
5429
5430 ddi_put8(acc_handle, &dcmd->cmd, kdcmd->cmd);
5431 ddi_put8(acc_handle, &dcmd->cmd_status, 0);
5432 ddi_put8(acc_handle, &dcmd->sge_count, kdcmd->sge_count);
5433 ddi_put16(acc_handle, &dcmd->timeout, kdcmd->timeout);
5434 ddi_put32(acc_handle, &dcmd->data_xfer_len, kdcmd->data_xfer_len);
5435 ddi_put32(acc_handle, &dcmd->opcode, kdcmd->opcode);
5436
5437 ddi_rep_put8(acc_handle, (uint8_t *)kdcmd->mbox.b,
5438 (uint8_t *)dcmd->mbox.b, DCMD_MBOX_SZ, DDI_DEV_AUTOINCR);
5439
5440 ddi_put16(acc_handle, &dcmd->flags, kdcmd->flags & ~MFI_FRAME_SGL64);
5441 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].length, xferlen);
5442 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].phys_addr, kphys_addr);
5443
5444 cmd->sync_cmd = MRSAS_TRUE;
5445 cmd->frame_count = 1;
5446
5447 if (instance->tbolt) {
5448 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5449 }
5450
5451 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5452 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: fw_ioctl failed"));
5453 } else {
5454 if (xferlen && (kdcmd->flags & MFI_FRAME_DIR_READ)) {
5455 for (i = 0; i < xferlen; i++) {
5456 if (ddi_copyout(
5457 (uint8_t *)dcmd_dma_obj.buffer + i,
5458 (uint8_t *)ubuf + i,
5459 1, mode)) {
5460 con_log(CL_ANN, (CE_WARN,
5461 "issue_mfi_dcmd : "
5462 "copy to user space failed"));
5463 return (DDI_FAILURE);
5464 }
5465 }
5466 }
5467 }
5468
5469 kdcmd->cmd_status = ddi_get8(acc_handle, &dcmd->cmd_status);
5470 con_log(CL_ANN, (CE_CONT, "issue_mfi_dcmd: cmd_status %x", kdcmd->cmd_status));
5471
5472 if (xferlen) {
5473 /* free kernel buffer */
5474 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
5475 return (DDI_FAILURE);
5476 }
5477
5478 return (DDI_SUCCESS);
5479 }
5480
5481 /*
5482 * issue_mfi_smp
5483 */
5484 static int
5485 issue_mfi_smp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5486 struct mrsas_cmd *cmd, int mode)
5487 {
5488 void *request_ubuf;
5489 void *response_ubuf;
5490 uint32_t request_xferlen = 0;
5491 uint32_t response_xferlen = 0;
5492 uint32_t new_xfer_length1 = 0;
5493 uint32_t new_xfer_length2 = 0;
5494 uint_t model;
5495 dma_obj_t request_dma_obj;
5496 dma_obj_t response_dma_obj;
5497 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5498 struct mrsas_smp_frame *ksmp;
5499 struct mrsas_smp_frame *smp;
5500 struct mrsas_sge32 *sge32;
5501 #ifndef _ILP32
5502 struct mrsas_sge64 *sge64;
5503 #endif
5504 int i;
5505 uint64_t tmp_sas_addr;
5506
5507 smp = &cmd->frame->smp;
5508 ksmp = (struct mrsas_smp_frame *)&ioctl->frame[0];
5509
5510 if (instance->adapterresetinprogress) {
5511 con_log(CL_ANN1, (CE_WARN, "Reset flag set, "
5512 "returning mfi_pkt and setting TRAN_BUSY\n"));
5513 return (DDI_FAILURE);
5514 }
5515 model = ddi_model_convert_from(mode & FMODELS);
5516 if (model == DDI_MODEL_ILP32) {
5517 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_ILP32"));
5518
5519 sge32 = &ksmp->sgl[0].sge32[0];
5520 response_xferlen = sge32[0].length;
5521 request_xferlen = sge32[1].length;
5522 con_log(CL_ANN, (CE_CONT, "issue_mfi_smp: "
5523 "response_xferlen = %x, request_xferlen = %x",
5524 response_xferlen, request_xferlen));
5525
5526 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr;
5527 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr;
5528 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: "
5529 "response_ubuf = %p, request_ubuf = %p",
5530 response_ubuf, request_ubuf));
5531 } else {
5532 #ifdef _ILP32
5533 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_ILP32"));
5534
5535 sge32 = &ksmp->sgl[0].sge32[0];
5536 response_xferlen = sge32[0].length;
5537 request_xferlen = sge32[1].length;
5538 con_log(CL_ANN, (CE_CONT, "issue_mfi_smp: "
5539 "response_xferlen = %x, request_xferlen = %x",
5540 response_xferlen, request_xferlen));
5541
5542 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr;
5543 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr;
5544 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: "
5545 "response_ubuf = %p, request_ubuf = %p",
5546 response_ubuf, request_ubuf));
5547 #else
5548 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_LP64"));
5549
5550 sge64 = &ksmp->sgl[0].sge64[0];
5551 response_xferlen = sge64[0].length;
5552 request_xferlen = sge64[1].length;
5553
5554 response_ubuf = (void *)(ulong_t)sge64[0].phys_addr;
5555 request_ubuf = (void *)(ulong_t)sge64[1].phys_addr;
5556 #endif
5557 }
5558 if (request_xferlen) {
5559 /* means IOCTL requires DMA */
5560 /* allocate the data transfer buffer */
5561 //request_dma_obj.size = request_xferlen;
5562 MRSAS_GET_BOUNDARY_ALIGNED_LEN(request_xferlen,new_xfer_length1,PAGESIZE);
5563 request_dma_obj.size = new_xfer_length1;
5564 request_dma_obj.dma_attr = mrsas_generic_dma_attr;
5565 request_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5566 request_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5567 request_dma_obj.dma_attr.dma_attr_sgllen = 1;
5568 request_dma_obj.dma_attr.dma_attr_align = 1;
5569
5570 /* allocate kernel buffer for DMA */
5571 if (mrsas_alloc_dma_obj(instance, &request_dma_obj,
5572 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5573 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5574 "could not allocate data transfer buffer."));
5575 return (DDI_FAILURE);
5576 }
5577 (void) memset(request_dma_obj.buffer, 0, request_xferlen);
5578
5579 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5580 for (i = 0; i < request_xferlen; i++) {
5581 if (ddi_copyin((uint8_t *)request_ubuf + i,
5582 (uint8_t *)request_dma_obj.buffer + i,
5583 1, mode)) {
5584 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5585 "copy from user space failed"));
5586 return (DDI_FAILURE);
5587 }
5588 }
5589 }
5590
5591 if (response_xferlen) {
5592 /* means IOCTL requires DMA */
5593 /* allocate the data transfer buffer */
5594 //response_dma_obj.size = response_xferlen;
5595 MRSAS_GET_BOUNDARY_ALIGNED_LEN(response_xferlen,new_xfer_length2,PAGESIZE);
5596 response_dma_obj.size = new_xfer_length2;
5597 response_dma_obj.dma_attr = mrsas_generic_dma_attr;
5598 response_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5599 response_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5600 response_dma_obj.dma_attr.dma_attr_sgllen = 1;
5601 response_dma_obj.dma_attr.dma_attr_align = 1;
5602
5603 /* allocate kernel buffer for DMA */
5604 if (mrsas_alloc_dma_obj(instance, &response_dma_obj,
5605 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5606 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5607 "could not allocate data transfer buffer."));
5608 return (DDI_FAILURE);
5609 }
5610 (void) memset(response_dma_obj.buffer, 0, response_xferlen);
5611
5612 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5613 for (i = 0; i < response_xferlen; i++) {
5614 if (ddi_copyin((uint8_t *)response_ubuf + i,
5615 (uint8_t *)response_dma_obj.buffer + i,
5616 1, mode)) {
5617 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5618 "copy from user space failed"));
5619 return (DDI_FAILURE);
5620 }
5621 }
5622 }
5623
5624 ddi_put8(acc_handle, &smp->cmd, ksmp->cmd);
5625 ddi_put8(acc_handle, &smp->cmd_status, 0);
5626 ddi_put8(acc_handle, &smp->connection_status, 0);
5627 ddi_put8(acc_handle, &smp->sge_count, ksmp->sge_count);
5628 /* smp->context = ksmp->context; */
5629 ddi_put16(acc_handle, &smp->timeout, ksmp->timeout);
5630 ddi_put32(acc_handle, &smp->data_xfer_len, ksmp->data_xfer_len);
5631
5632 bcopy((void *)&ksmp->sas_addr, (void *)&tmp_sas_addr,
5633 sizeof (uint64_t));
5634 ddi_put64(acc_handle, &smp->sas_addr, tmp_sas_addr);
5635
5636 ddi_put16(acc_handle, &smp->flags, ksmp->flags & ~MFI_FRAME_SGL64);
5637
5638 model = ddi_model_convert_from(mode & FMODELS);
5639 if (model == DDI_MODEL_ILP32) {
5640 con_log(CL_ANN1, (CE_CONT,
5641 "issue_mfi_smp: DDI_MODEL_ILP32"));
5642
5643 sge32 = &smp->sgl[0].sge32[0];
5644 ddi_put32(acc_handle, &sge32[0].length, response_xferlen);
5645 ddi_put32(acc_handle, &sge32[0].phys_addr,
5646 response_dma_obj.dma_cookie[0].dmac_address);
5647 ddi_put32(acc_handle, &sge32[1].length, request_xferlen);
5648 ddi_put32(acc_handle, &sge32[1].phys_addr,
5649 request_dma_obj.dma_cookie[0].dmac_address);
5650 } else {
5651 #ifdef _ILP32
5652 con_log(CL_ANN1, (CE_CONT,
5653 "issue_mfi_smp: DDI_MODEL_ILP32"));
5654 sge32 = &smp->sgl[0].sge32[0];
5655 ddi_put32(acc_handle, &sge32[0].length, response_xferlen);
5656 ddi_put32(acc_handle, &sge32[0].phys_addr,
5657 response_dma_obj.dma_cookie[0].dmac_address);
5658 ddi_put32(acc_handle, &sge32[1].length, request_xferlen);
5659 ddi_put32(acc_handle, &sge32[1].phys_addr,
5660 request_dma_obj.dma_cookie[0].dmac_address);
5661 #else
5662 con_log(CL_ANN1, (CE_CONT,
5663 "issue_mfi_smp: DDI_MODEL_LP64"));
5664 sge64 = &smp->sgl[0].sge64[0];
5665 ddi_put32(acc_handle, &sge64[0].length, response_xferlen);
5666 ddi_put64(acc_handle, &sge64[0].phys_addr,
5667 response_dma_obj.dma_cookie[0].dmac_address);
5668 ddi_put32(acc_handle, &sge64[1].length, request_xferlen);
5669 ddi_put64(acc_handle, &sge64[1].phys_addr,
5670 request_dma_obj.dma_cookie[0].dmac_address);
5671 #endif
5672 }
5673 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp : "
5674 "smp->response_xferlen = %d, smp->request_xferlen = %d "
5675 "smp->data_xfer_len = %d", ddi_get32(acc_handle, &sge32[0].length),
5676 ddi_get32(acc_handle, &sge32[1].length),
5677 ddi_get32(acc_handle, &smp->data_xfer_len)));
5678
5679 cmd->sync_cmd = MRSAS_TRUE;
5680 cmd->frame_count = 1;
5681
5682 if (instance->tbolt) {
5683 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5684 }
5685
5686 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5687 con_log(CL_ANN, (CE_WARN,
5688 "issue_mfi_smp: fw_ioctl failed"));
5689 } else {
5690 con_log(CL_ANN1, (CE_CONT,
5691 "issue_mfi_smp: copy to user space"));
5692
5693 if (request_xferlen) {
5694 for (i = 0; i < request_xferlen; i++) {
5695 if (ddi_copyout(
5696 (uint8_t *)request_dma_obj.buffer +
5697 i, (uint8_t *)request_ubuf + i,
5698 1, mode)) {
5699 con_log(CL_ANN, (CE_WARN,
5700 "issue_mfi_smp : copy to user space"
5701 " failed"));
5702 return (DDI_FAILURE);
5703 }
5704 }
5705 }
5706
5707 if (response_xferlen) {
5708 for (i = 0; i < response_xferlen; i++) {
5709 if (ddi_copyout(
5710 (uint8_t *)response_dma_obj.buffer
5711 + i, (uint8_t *)response_ubuf
5712 + i, 1, mode)) {
5713 con_log(CL_ANN, (CE_WARN,
5714 "issue_mfi_smp : copy to "
5715 "user space failed"));
5716 return (DDI_FAILURE);
5717 }
5718 }
5719 }
5720 }
5721
5722 ksmp->cmd_status = ddi_get8(acc_handle, &smp->cmd_status);
5723 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: smp->cmd_status = %d",
5724 ksmp->cmd_status));
5725
5726 if (request_xferlen) {
5727 /* free kernel buffer */
5728 if (mrsas_free_dma_obj(instance, request_dma_obj) !=
5729 DDI_SUCCESS)
5730 return (DDI_FAILURE);
5731 }
5732
5733 if (response_xferlen) {
5734 /* free kernel buffer */
5735 if (mrsas_free_dma_obj(instance, response_dma_obj) !=
5736 DDI_SUCCESS)
5737 return (DDI_FAILURE);
5738 }
5739
5740 return (DDI_SUCCESS);
5741 }
5742
5743 /*
5744 * issue_mfi_stp
5745 */
5746 static int
5747 issue_mfi_stp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5748 struct mrsas_cmd *cmd, int mode)
5749 {
5750 void *fis_ubuf;
5751 void *data_ubuf;
5752 uint32_t fis_xferlen = 0;
5753 uint32_t new_xfer_length1 = 0;
5754 uint32_t new_xfer_length2 = 0;
5755 uint32_t data_xferlen = 0;
5756 uint_t model;
5757 dma_obj_t fis_dma_obj;
5758 dma_obj_t data_dma_obj;
5759 struct mrsas_stp_frame *kstp;
5760 struct mrsas_stp_frame *stp;
5761 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5762 int i;
5763
5764 stp = &cmd->frame->stp;
5765 kstp = (struct mrsas_stp_frame *)&ioctl->frame[0];
5766
5767 if (instance->adapterresetinprogress) {
5768 con_log(CL_ANN1, (CE_WARN, "Reset flag set, "
5769 "returning mfi_pkt and setting TRAN_BUSY\n"));
5770 return (DDI_FAILURE);
5771 }
5772 model = ddi_model_convert_from(mode & FMODELS);
5773 if (model == DDI_MODEL_ILP32) {
5774 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_ILP32"));
5775
5776 fis_xferlen = kstp->sgl.sge32[0].length;
5777 data_xferlen = kstp->sgl.sge32[1].length;
5778
5779 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
5780 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
5781 }
5782 else
5783 {
5784 #ifdef _ILP32
5785 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_ILP32"));
5786
5787 fis_xferlen = kstp->sgl.sge32[0].length;
5788 data_xferlen = kstp->sgl.sge32[1].length;
5789
5790 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
5791 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
5792 #else
5793 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_LP64"));
5794
5795 fis_xferlen = kstp->sgl.sge64[0].length;
5796 data_xferlen = kstp->sgl.sge64[1].length;
5797
5798 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge64[0].phys_addr;
5799 data_ubuf = (void *)(ulong_t)kstp->sgl.sge64[1].phys_addr;
5800 #endif
5801 }
5802
5803
5804 if (fis_xferlen) {
5805 con_log(CL_ANN, (CE_CONT, "issue_mfi_stp: "
5806 "fis_ubuf = %p fis_xferlen = %x", fis_ubuf, fis_xferlen));
5807
5808 /* means IOCTL requires DMA */
5809 /* allocate the data transfer buffer */
5810 //fis_dma_obj.size = fis_xferlen;
5811 MRSAS_GET_BOUNDARY_ALIGNED_LEN(fis_xferlen,new_xfer_length1,PAGESIZE);
5812 fis_dma_obj.size = new_xfer_length1;
5813 fis_dma_obj.dma_attr = mrsas_generic_dma_attr;
5814 fis_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5815 fis_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5816 fis_dma_obj.dma_attr.dma_attr_sgllen = 1;
5817 fis_dma_obj.dma_attr.dma_attr_align = 1;
5818
5819 /* allocate kernel buffer for DMA */
5820 if (mrsas_alloc_dma_obj(instance, &fis_dma_obj,
5821 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5822 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp : "
5823 "could not allocate data transfer buffer."));
5824 return (DDI_FAILURE);
5825 }
5826 (void) memset(fis_dma_obj.buffer, 0, fis_xferlen);
5827
5828 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5829 for (i = 0; i < fis_xferlen; i++) {
5830 if (ddi_copyin((uint8_t *)fis_ubuf + i,
5831 (uint8_t *)fis_dma_obj.buffer + i, 1, mode)) {
5832 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
5833 "copy from user space failed"));
5834 return (DDI_FAILURE);
5835 }
5836 }
5837 }
5838
5839 if (data_xferlen) {
5840 con_log(CL_ANN, (CE_CONT, "issue_mfi_stp: data_ubuf = %p "
5841 "data_xferlen = %x", data_ubuf, data_xferlen));
5842
5843 /* means IOCTL requires DMA */
5844 /* allocate the data transfer buffer */
5845 //data_dma_obj.size = data_xferlen;
5846 MRSAS_GET_BOUNDARY_ALIGNED_LEN(data_xferlen,new_xfer_length2,PAGESIZE);
5847 data_dma_obj.size = new_xfer_length2;
5848 data_dma_obj.dma_attr = mrsas_generic_dma_attr;
5849 data_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5850 data_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5851 data_dma_obj.dma_attr.dma_attr_sgllen = 1;
5852 data_dma_obj.dma_attr.dma_attr_align = 1;
5853
5854 /* allocate kernel buffer for DMA */
5855 if (mrsas_alloc_dma_obj(instance, &data_dma_obj,
5856 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5857 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
5858 "could not allocate data transfer buffer."));
5859 return (DDI_FAILURE);
5860 }
5861 (void) memset(data_dma_obj.buffer, 0, data_xferlen);
5862
5863 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5864 for (i = 0; i < data_xferlen; i++) {
5865 if (ddi_copyin((uint8_t *)data_ubuf + i,
5866 (uint8_t *)data_dma_obj.buffer + i, 1, mode)) {
5867 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
5868 "copy from user space failed"));
5869 return (DDI_FAILURE);
5870 }
5871 }
5872 }
5873
5874 ddi_put8(acc_handle, &stp->cmd, kstp->cmd);
5875 ddi_put8(acc_handle, &stp->cmd_status, 0);
5876 ddi_put8(acc_handle, &stp->connection_status, 0);
5877 ddi_put8(acc_handle, &stp->target_id, kstp->target_id);
5878 ddi_put8(acc_handle, &stp->sge_count, kstp->sge_count);
5879
5880 ddi_put16(acc_handle, &stp->timeout, kstp->timeout);
5881 ddi_put32(acc_handle, &stp->data_xfer_len, kstp->data_xfer_len);
5882
5883 ddi_rep_put8(acc_handle, (uint8_t *)kstp->fis, (uint8_t *)stp->fis, 10,
5884 DDI_DEV_AUTOINCR);
5885
5886 ddi_put16(acc_handle, &stp->flags, kstp->flags & ~MFI_FRAME_SGL64);
5887 ddi_put32(acc_handle, &stp->stp_flags, kstp->stp_flags);
5888 ddi_put32(acc_handle, &stp->sgl.sge32[0].length, fis_xferlen);
5889 ddi_put32(acc_handle, &stp->sgl.sge32[0].phys_addr,
5890 fis_dma_obj.dma_cookie[0].dmac_address);
5891 ddi_put32(acc_handle, &stp->sgl.sge32[1].length, data_xferlen);
5892 ddi_put32(acc_handle, &stp->sgl.sge32[1].phys_addr,
5893 data_dma_obj.dma_cookie[0].dmac_address);
5894
5895 cmd->sync_cmd = MRSAS_TRUE;
5896 cmd->frame_count = 1;
5897
5898 if (instance->tbolt) {
5899 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5900 }
5901
5902 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5903 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: fw_ioctl failed"));
5904 } else {
5905
5906 if (fis_xferlen) {
5907 for (i = 0; i < fis_xferlen; i++) {
5908 if (ddi_copyout(
5909 (uint8_t *)fis_dma_obj.buffer + i,
5910 (uint8_t *)fis_ubuf + i, 1, mode)) {
5911 con_log(CL_ANN, (CE_WARN,
5912 "issue_mfi_stp : copy to "
5913 "user space failed"));
5914 return (DDI_FAILURE);
5915 }
5916 }
5917 }
5918 }
5919 if (data_xferlen) {
5920 for (i = 0; i < data_xferlen; i++) {
5921 if (ddi_copyout(
5922 (uint8_t *)data_dma_obj.buffer + i,
5923 (uint8_t *)data_ubuf + i, 1, mode)) {
5924 con_log(CL_ANN, (CE_WARN,
5925 "issue_mfi_stp : copy to"
5926 " user space failed"));
5927 return (DDI_FAILURE);
5928 }
5929 }
5930 }
5931
5932 kstp->cmd_status = ddi_get8(acc_handle, &stp->cmd_status);
5933 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: stp->cmd_status = %d",
5934 kstp->cmd_status));
5935
5936 if (fis_xferlen) {
5937 /* free kernel buffer */
5938 if (mrsas_free_dma_obj(instance, fis_dma_obj) != DDI_SUCCESS)
5939 return (DDI_FAILURE);
5940 }
5941
5942 if (data_xferlen) {
5943 /* free kernel buffer */
5944 if (mrsas_free_dma_obj(instance, data_dma_obj) != DDI_SUCCESS)
5945 return (DDI_FAILURE);
5946 }
5947
5948 return (DDI_SUCCESS);
5949 }
5950
5951 /*
5952 * fill_up_drv_ver
5953 */
5954 void
5955 fill_up_drv_ver(struct mrsas_drv_ver *dv)
5956 {
5957 (void) memset(dv, 0, sizeof (struct mrsas_drv_ver));
5958
5959 (void) memcpy(dv->signature, "$LSI LOGIC$", strlen("$LSI LOGIC$"));
5960 (void) memcpy(dv->os_name, "Solaris", strlen("Solaris"));
5961 (void) memcpy(dv->drv_name, "mr_sas", strlen("mr_sas"));
5962 (void) memcpy(dv->drv_ver, MRSAS_VERSION, strlen(MRSAS_VERSION));
5963 (void) memcpy(dv->drv_rel_date, MRSAS_RELDATE,
5964 strlen(MRSAS_RELDATE));
5965
5966 }
5967
5968 /*
5969 * handle_drv_ioctl
5970 */
5971 static int
5972 handle_drv_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5973 int mode)
5974 {
5975 int i;
5976 int rval = DDI_SUCCESS;
5977 int *props = NULL;
5978 void *ubuf;
5979
5980 uint8_t *pci_conf_buf;
5981 uint32_t xferlen;
5982 uint32_t num_props;
5983 uint_t model;
5984 struct mrsas_dcmd_frame *kdcmd;
5985 struct mrsas_drv_ver dv;
5986 struct mrsas_pci_information pi;
5987
5988 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0];
5989
5990 model = ddi_model_convert_from(mode & FMODELS);
5991 if (model == DDI_MODEL_ILP32) {
5992 con_log(CL_ANN1, (CE_CONT,
5993 "handle_drv_ioctl: DDI_MODEL_ILP32"));
5994
5995 xferlen = kdcmd->sgl.sge32[0].length;
5996
5997 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
5998 } else {
5999 #ifdef _ILP32
6000 con_log(CL_ANN1, (CE_CONT,
6001 "handle_drv_ioctl: DDI_MODEL_ILP32"));
6002 xferlen = kdcmd->sgl.sge32[0].length;
6003 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
6004 #else
6005 con_log(CL_ANN1, (CE_CONT,
6006 "handle_drv_ioctl: DDI_MODEL_LP64"));
6007 xferlen = kdcmd->sgl.sge64[0].length;
6008 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
6009 #endif
6010 }
6011 con_log(CL_ANN1, (CE_CONT, "handle_drv_ioctl: "
6012 "dataBuf=%p size=%d bytes", ubuf, xferlen));
6013
6014 switch (kdcmd->opcode) {
6015 case MRSAS_DRIVER_IOCTL_DRIVER_VERSION:
6016 con_log(CL_ANN1, (CE_CONT, "handle_drv_ioctl: "
6017 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION"));
6018
6019 fill_up_drv_ver(&dv);
6020
6021 if (ddi_copyout(&dv, ubuf, xferlen, mode)) {
6022 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6023 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION : "
6024 "copy to user space failed"));
6025 kdcmd->cmd_status = 1;
6026 rval = 1;
6027 } else {
6028 kdcmd->cmd_status = 0;
6029 }
6030 break;
6031 case MRSAS_DRIVER_IOCTL_PCI_INFORMATION:
6032 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: "
6033 "MRSAS_DRIVER_IOCTL_PCI_INFORMAITON"));
6034
6035 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, instance->dip,
6036 0, "reg", &props, &num_props)) {
6037 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6038 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : "
6039 "ddi_prop_look_int_array failed"));
6040 rval = DDI_FAILURE;
6041 } else {
6042
6043 pi.busNumber = (props[0] >> 16) & 0xFF;
6044 pi.deviceNumber = (props[0] >> 11) & 0x1f;
6045 pi.functionNumber = (props[0] >> 8) & 0x7;
6046 ddi_prop_free((void *)props);
6047 }
6048
6049 pci_conf_buf = (uint8_t *)&pi.pciHeaderInfo;
6050
6051 for (i = 0; i < (sizeof (struct mrsas_pci_information) -
6052 offsetof(struct mrsas_pci_information, pciHeaderInfo));
6053 i++) {
6054 pci_conf_buf[i] =
6055 pci_config_get8(instance->pci_handle, i);
6056 }
6057
6058 if (ddi_copyout(&pi, ubuf, xferlen, mode)) {
6059 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6060 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : "
6061 "copy to user space failed"));
6062 kdcmd->cmd_status = 1;
6063 rval = 1;
6064 } else {
6065 kdcmd->cmd_status = 0;
6066 }
6067 break;
6068 default:
6069 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6070 "invalid driver specific IOCTL opcode = 0x%x",
6071 kdcmd->opcode));
6072 kdcmd->cmd_status = 1;
6073 rval = DDI_FAILURE;
6074 break;
6075 }
6076
6077 return (rval);
6078 }
6079
6080 /*
6081 * handle_mfi_ioctl
6082 */
6083 static int
6084 handle_mfi_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
6085 int mode)
6086 {
6087 int rval = DDI_SUCCESS;
6088
6089 struct mrsas_header *hdr;
6090 struct mrsas_cmd *cmd;
6091
6092 if (instance->tbolt) {
6093 cmd = get_raid_msg_mfi_pkt(instance);
6094 } else {
6095 cmd = get_mfi_pkt(instance);
6096 }
6097 if (!cmd) {
6098 cmn_err(CE_WARN,
6099 "Failed to get a cmd from free-pool in handle_mfi_ioctl(). "
6100 "fw_outstanding=0x%X max_fw_cmds=0x%X",
6101 instance->fw_outstanding, instance->max_fw_cmds);
6102 return (DDI_FAILURE);
6103 }
6104
6105 /* Clear the frame buffer and assign back the context id */
6106 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
6107 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
6108 cmd->index);
6109
6110 hdr = (struct mrsas_header *)&ioctl->frame[0];
6111
6112 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) {
6113 case MFI_CMD_OP_DCMD:
6114 rval = issue_mfi_dcmd(instance, ioctl, cmd, mode);
6115 break;
6116 case MFI_CMD_OP_SMP:
6117 rval = issue_mfi_smp(instance, ioctl, cmd, mode);
6118 break;
6119 case MFI_CMD_OP_STP:
6120 rval = issue_mfi_stp(instance, ioctl, cmd, mode);
6121 break;
6122 case MFI_CMD_OP_LD_SCSI:
6123 case MFI_CMD_OP_PD_SCSI:
6124 rval = issue_mfi_pthru(instance, ioctl, cmd, mode);
6125 break;
6126 default:
6127 con_log(CL_ANN, (CE_WARN, "handle_mfi_ioctl: "
6128 "invalid mfi ioctl hdr->cmd = %d", hdr->cmd));
6129 rval = DDI_FAILURE;
6130 break;
6131 }
6132
6133 if (instance->tbolt) {
6134 return_raid_msg_mfi_pkt(instance, cmd);
6135 } else {
6136 return_mfi_pkt(instance, cmd);
6137 }
6138
6139 return (rval);
6140 }
6141
6142 /*
6143 * AEN
6144 */
6145 static int
6146 handle_mfi_aen(struct mrsas_instance *instance, struct mrsas_aen *aen)
6147 {
6148 int rval = 0;
6149
6150 rval = register_mfi_aen(instance, instance->aen_seq_num,
6151 aen->class_locale_word);
6152
6153 aen->cmd_status = (uint8_t)rval;
6154
6155 return (rval);
6156 }
6157
6158 static int
6159 register_mfi_aen(struct mrsas_instance *instance, uint32_t seq_num,
6160 uint32_t class_locale_word)
6161 {
6162 int ret_val;
6163
6164 struct mrsas_cmd *cmd, *aen_cmd;
6165 struct mrsas_dcmd_frame *dcmd;
6166 union mrsas_evt_class_locale curr_aen;
6167 union mrsas_evt_class_locale prev_aen;
6168
6169 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
6170 /*
6171 * If there an AEN pending already (aen_cmd), check if the
6172 * class_locale of that pending AEN is inclusive of the new
6173 * AEN request we currently have. If it is, then we don't have
6174 * to do anything. In other words, whichever events the current
6175 * AEN request is subscribing to, have already been subscribed
6176 * to.
6177 *
6178 * If the old_cmd is _not_ inclusive, then we have to abort
6179 * that command, form a class_locale that is superset of both
6180 * old and current and re-issue to the FW
6181 */
6182
6183 curr_aen.word = LE_32(class_locale_word);
6184 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6185 aen_cmd = instance->aen_cmd;
6186 if (aen_cmd) {
6187 prev_aen.word = ddi_get32(aen_cmd->frame_dma_obj.acc_handle,
6188 &aen_cmd->frame->dcmd.mbox.w[1]);
6189 prev_aen.word = LE_32(prev_aen.word);
6190 prev_aen.members.locale = LE_16(prev_aen.members.locale);
6191 /*
6192 * A class whose enum value is smaller is inclusive of all
6193 * higher values. If a PROGRESS (= -1) was previously
6194 * registered, then a new registration requests for higher
6195 * classes need not be sent to FW. They are automatically
6196 * included.
6197 *
6198 * Locale numbers don't have such hierarchy. They are bitmap
6199 * values
6200 */
6201 if ((prev_aen.members.class <= curr_aen.members.class) &&
6202 !((prev_aen.members.locale & curr_aen.members.locale) ^
6203 curr_aen.members.locale)) {
6204 /*
6205 * Previously issued event registration includes
6206 * current request. Nothing to do.
6207 */
6208
6209 return (0);
6210 } else {
6211 curr_aen.members.locale |= prev_aen.members.locale;
6212
6213 if (prev_aen.members.class < curr_aen.members.class)
6214 curr_aen.members.class = prev_aen.members.class;
6215
6216 ret_val = abort_aen_cmd(instance, aen_cmd);
6217
6218 if (ret_val) {
6219 con_log(CL_ANN, (CE_WARN, "register_mfi_aen: "
6220 "failed to abort prevous AEN command"));
6221
6222 return (ret_val);
6223 }
6224 }
6225 } else {
6226 curr_aen.word = LE_32(class_locale_word);
6227 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6228 }
6229
6230 if (instance->tbolt) {
6231 cmd = get_raid_msg_mfi_pkt(instance);
6232 } else {
6233 cmd = get_mfi_pkt(instance);
6234 }
6235
6236 if (!cmd) {
6237 cmn_err(CE_WARN,
6238 "Failed to get a cmd from free-pool in register_mfi_aen(). "
6239 "fw_outstanding=0x%X max_fw_cmds=0x%X",
6240 instance->fw_outstanding, instance->max_fw_cmds);
6241 return (ENOMEM);
6242 }
6243
6244
6245 /* Clear the frame buffer and assign back the context id */
6246 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
6247 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
6248 cmd->index);
6249
6250 dcmd = &cmd->frame->dcmd;
6251
6252 /* for(i = 0; i < DCMD_MBOX_SZ; i++) dcmd->mbox.b[i] = 0; */
6253 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
6254
6255 (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
6256 sizeof (struct mrsas_evt_detail));
6257
6258 /* Prepare DCMD for aen registration */
6259 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
6260 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0);
6261 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
6262 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
6263 MFI_FRAME_DIR_READ);
6264 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
6265 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
6266 sizeof (struct mrsas_evt_detail));
6267 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
6268 MR_DCMD_CTRL_EVENT_WAIT);
6269 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], seq_num);
6270 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6271 curr_aen.word = LE_32(curr_aen.word);
6272 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[1],
6273 curr_aen.word);
6274 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
6275 instance->mfi_evt_detail_obj.dma_cookie[0].dmac_address);
6276 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
6277 sizeof (struct mrsas_evt_detail));
6278
6279 instance->aen_seq_num = seq_num;
6280
6281
6282 /*
6283 * Store reference to the cmd used to register for AEN. When an
6284 * application wants us to register for AEN, we have to abort this
6285 * cmd and re-register with a new EVENT LOCALE supplied by that app
6286 */
6287 instance->aen_cmd = cmd;
6288
6289 cmd->frame_count = 1;
6290
6291 /* Issue the aen registration frame */
6292 /* atomic_add_16 (&instance->fw_outstanding, 1); */
6293 if (instance->tbolt) {
6294 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
6295 }
6296 instance->func_ptr->issue_cmd(cmd, instance);
6297
6298 return (0);
6299 }
6300
6301 void
6302 display_scsi_inquiry(caddr_t scsi_inq)
6303 {
6304 #define MAX_SCSI_DEVICE_CODE 14
6305 int i;
6306 char inquiry_buf[256] = {0};
6307 int len;
6308 const char *const scsi_device_types[] = {
6309 "Direct-Access ",
6310 "Sequential-Access",
6311 "Printer ",
6312 "Processor ",
6313 "WORM ",
6314 "CD-ROM ",
6315 "Scanner ",
6316 "Optical Device ",
6317 "Medium Changer ",
6318 "Communications ",
6319 "Unknown ",
6320 "Unknown ",
6321 "Unknown ",
6322 "Enclosure ",
6323 };
6324
6325 len = 0;
6326
6327 len += snprintf(inquiry_buf + len, 265 - len, " Vendor: ");
6328 for (i = 8; i < 16; i++) {
6329 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6330 scsi_inq[i]);
6331 }
6332
6333 len += snprintf(inquiry_buf + len, 265 - len, " Model: ");
6334
6335 for (i = 16; i < 32; i++) {
6336 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6337 scsi_inq[i]);
6338 }
6339
6340 len += snprintf(inquiry_buf + len, 265 - len, " Rev: ");
6341
6342 for (i = 32; i < 36; i++) {
6343 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6344 scsi_inq[i]);
6345 }
6346
6347 len += snprintf(inquiry_buf + len, 265 - len, "\n");
6348
6349
6350 i = scsi_inq[0] & 0x1f;
6351
6352
6353 len += snprintf(inquiry_buf + len, 265 - len, " Type: %s ",
6354 i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] :
6355 "Unknown ");
6356
6357
6358 len += snprintf(inquiry_buf + len, 265 - len,
6359 " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07);
6360
6361 if ((scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1) {
6362 len += snprintf(inquiry_buf + len, 265 - len, " CCS\n");
6363 } else {
6364 len += snprintf(inquiry_buf + len, 265 - len, "\n");
6365 }
6366
6367 con_log(CL_DLEVEL2, (CE_CONT, inquiry_buf));
6368 }
6369
6370 void
6371 io_timeout_checker(void *arg)
6372 {
6373 struct scsi_pkt *pkt;
6374 struct mrsas_instance *instance = arg;
6375 struct mrsas_cmd *cmd = NULL;
6376 struct mrsas_header *hdr;
6377 int time = 0;
6378 int counter = 0;
6379 struct mlist_head *pos, *next;
6380 mlist_t process_list;
6381
6382 if (instance->adapterresetinprogress == 1) {
6383 con_log(CL_ANN, (CE_NOTE, "io_timeout_checker:"
6384 " reset in progress"));
6385
6386 instance->timeout_id = timeout(io_timeout_checker,
6387 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
6388 return;
6389 }
6390
6391 /* See if this check needs to be in the beginning or last in ISR */
6392 if (mrsas_initiate_ocr_if_fw_is_faulty(instance) == 1) {
6393 cmn_err(CE_WARN, "io_timeout_checker:"
6394 "FW Fault, calling reset adapter");
6395 cmn_err(CE_CONT, "io_timeout_checker: fw_outstanding 0x%X max_fw_cmds 0x%X",
6396 instance->fw_outstanding, instance->max_fw_cmds );
6397 if (instance->adapterresetinprogress == 0) {
6398 instance->adapterresetinprogress = 1;
6399 if (instance->tbolt)
6400 mrsas_tbolt_reset_ppc(instance);
6401 else
6402 mrsas_reset_ppc(instance);
6403 instance->adapterresetinprogress = 0;
6404 }
6405 instance->timeout_id = timeout(io_timeout_checker,
6406 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
6407 return;
6408 }
6409
6410 INIT_LIST_HEAD(&process_list);
6411
6412 mutex_enter(&instance->cmd_pend_mtx);
6413 mlist_for_each_safe(pos, next, &instance->cmd_pend_list) {
6414 cmd = mlist_entry(pos, struct mrsas_cmd, list);
6415
6416 if (cmd == NULL) {
6417 continue;
6418 }
6419
6420 if (cmd->sync_cmd == MRSAS_TRUE) {
6421 hdr = (struct mrsas_header *)&cmd->frame->hdr;
6422 if (hdr == NULL) {
6423 continue;
6424 }
6425 time = --cmd->drv_pkt_time;
6426 } else {
6427 pkt = cmd->pkt;
6428 if (pkt == NULL) {
6429 continue;
6430 }
6431 time = --cmd->drv_pkt_time;
6432 }
6433 if (time <= 0) {
6434 cmn_err(CE_WARN, "%llx: "
6435 "io_timeout_checker: TIMING OUT: pkt "
6436 ": %p, cmd %p fw_outstanding 0x%X max_fw_cmds 0x%X\n",
6437 gethrtime(), (void *)pkt, (void *)cmd, instance->fw_outstanding, instance->max_fw_cmds);
6438
6439 counter++;
6440 break;
6441 }
6442 }
6443 mutex_exit(&instance->cmd_pend_mtx);
6444
6445 if (counter) {
6446 if (instance->disable_online_ctrl_reset == 1) {
6447 cmn_err(CE_WARN, "mr_sas %d: %s(): OCR is NOT supported by Firmware, KILL adapter!!!",
6448 instance->instance, __func__);
6449
6450 if (instance->tbolt)
6451 (void) mrsas_tbolt_kill_adapter(instance);
6452 else
6453 (void) mrsas_kill_adapter(instance);
6454
6455 return;
6456 } else {
6457 if (cmd->retry_count_for_ocr <= IO_RETRY_COUNT) {
6458 if (instance->adapterresetinprogress == 0) {
6459 if (instance->tbolt)
6460 mrsas_tbolt_reset_ppc(instance);
6461 else
6462 mrsas_reset_ppc(instance);
6463 }
6464 } else {
6465 cmn_err(CE_WARN,
6466 "io_timeout_checker: "
6467 "cmd %p cmd->index %d "
6468 "timed out even after 3 resets: "
6469 "so KILL adapter", (void *)cmd, cmd->index);
6470
6471 mrsas_print_cmd_details(instance, cmd, 0xDD);
6472
6473 if (instance->tbolt)
6474 (void) mrsas_tbolt_kill_adapter(instance);
6475 else
6476 (void) mrsas_kill_adapter(instance);
6477 return;
6478 }
6479 }
6480 }
6481 con_log(CL_ANN, (CE_NOTE, "mrsas: "
6482 "schedule next timeout check: "
6483 "do timeout \n"));
6484 instance->timeout_id =
6485 timeout(io_timeout_checker, (void *)instance,
6486 drv_usectohz(MRSAS_1_SECOND));
6487 }
6488
6489 static uint32_t
6490 read_fw_status_reg_ppc(struct mrsas_instance *instance)
6491 {
6492 return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
6493 }
6494
6495 static void
6496 issue_cmd_ppc(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
6497 {
6498 struct scsi_pkt *pkt;
6499 atomic_add_16(&instance->fw_outstanding, 1);
6500
6501 pkt = cmd->pkt;
6502 if (pkt) {
6503 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:"
6504 "ISSUED CMD TO FW : called : cmd:"
6505 ": %p instance : %p pkt : %p pkt_time : %x\n",
6506 gethrtime(), (void *)cmd, (void *)instance,
6507 (void *)pkt, cmd->drv_pkt_time));
6508 if (instance->adapterresetinprogress) {
6509 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
6510 con_log(CL_ANN1, (CE_NOTE, "Reset the scsi_pkt timer"));
6511 } else {
6512 push_pending_mfi_pkt(instance, cmd);
6513 }
6514
6515 } else {
6516 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:"
6517 "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
6518 "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
6519 }
6520
6521 mutex_enter(&instance->reg_write_mtx);
6522 ASSERT(mutex_owned(&instance->reg_write_mtx));
6523 /* Issue the command to the FW */
6524 WR_IB_QPORT((cmd->frame_phys_addr) |
6525 (((cmd->frame_count - 1) << 1) | 1), instance);
6526 mutex_exit(&instance->reg_write_mtx);
6527
6528 }
6529
6530 /*
6531 * issue_cmd_in_sync_mode
6532 */
6533 static int
6534 issue_cmd_in_sync_mode_ppc(struct mrsas_instance *instance,
6535 struct mrsas_cmd *cmd)
6536 {
6537 int i;
6538 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * (10 * MILLISEC);
6539 struct mrsas_header *hdr = &cmd->frame->hdr;
6540
6541 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: called"));
6542
6543 if (instance->adapterresetinprogress) {
6544 cmd->drv_pkt_time = ddi_get16(
6545 cmd->frame_dma_obj.acc_handle, &hdr->timeout);
6546 if (cmd->drv_pkt_time < debug_timeout_g)
6547 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
6548
6549 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: "
6550 "issue and return in reset case\n"));
6551 WR_IB_QPORT((cmd->frame_phys_addr) |
6552 (((cmd->frame_count - 1) << 1) | 1), instance);
6553
6554 return (DDI_SUCCESS);
6555 } else {
6556 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: pushing the pkt\n"));
6557 push_pending_mfi_pkt(instance, cmd);
6558 }
6559
6560 cmd->cmd_status = ENODATA;
6561
6562 mutex_enter(&instance->reg_write_mtx);
6563 ASSERT(mutex_owned(&instance->reg_write_mtx));
6564 /* Issue the command to the FW */
6565 WR_IB_QPORT((cmd->frame_phys_addr) |
6566 (((cmd->frame_count - 1) << 1) | 1), instance);
6567 mutex_exit(&instance->reg_write_mtx);
6568
6569 mutex_enter(&instance->int_cmd_mtx);
6570 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
6571 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
6572 }
6573 mutex_exit(&instance->int_cmd_mtx);
6574
6575 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done"));
6576
6577 if (i < (msecs -1)) {
6578 return (DDI_SUCCESS);
6579 } else {
6580 return (DDI_FAILURE);
6581 }
6582 }
6583
6584 /*
6585 * issue_cmd_in_poll_mode
6586 */
6587 static int
6588 issue_cmd_in_poll_mode_ppc(struct mrsas_instance *instance,
6589 struct mrsas_cmd *cmd)
6590 {
6591 int i;
6592 uint16_t flags;
6593 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
6594 struct mrsas_header *frame_hdr;
6595
6596 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode_ppc: called"));
6597
6598 frame_hdr = (struct mrsas_header *)cmd->frame;
6599 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
6600 MFI_CMD_STATUS_POLL_MODE);
6601 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
6602 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
6603
6604 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
6605
6606 /* issue the frame using inbound queue port */
6607 WR_IB_QPORT((cmd->frame_phys_addr) |
6608 (((cmd->frame_count - 1) << 1) | 1), instance);
6609
6610 /* wait for cmd_status to change from 0xFF */
6611 for (i = 0; i < msecs && (
6612 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
6613 == MFI_CMD_STATUS_POLL_MODE); i++) {
6614 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
6615 }
6616
6617 if (ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
6618 == MFI_CMD_STATUS_POLL_MODE) {
6619 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: "
6620 "cmd polling timed out"));
6621 return (DDI_FAILURE);
6622 }
6623
6624 return (DDI_SUCCESS);
6625 }
6626
6627 static void
6628 enable_intr_ppc(struct mrsas_instance *instance)
6629 {
6630 uint32_t mask;
6631
6632 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: called"));
6633
6634 /* WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); */
6635 WR_OB_DOORBELL_CLEAR(OB_DOORBELL_CLEAR_MASK, instance);
6636
6637 /* WR_OB_INTR_MASK(~0x80000000, instance); */
6638 WR_OB_INTR_MASK(~(MFI_REPLY_2108_MESSAGE_INTR_MASK), instance);
6639
6640 /* dummy read to force PCI flush */
6641 mask = RD_OB_INTR_MASK(instance);
6642
6643 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: "
6644 "outbound_intr_mask = 0x%x", mask));
6645 }
6646
6647 static void
6648 disable_intr_ppc(struct mrsas_instance *instance)
6649 {
6650 uint32_t mask;
6651
6652 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: called"));
6653
6654 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: before : "
6655 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance)));
6656
6657 /* WR_OB_INTR_MASK(0xFFFFFFFF, instance); */
6658 WR_OB_INTR_MASK(OB_INTR_MASK, instance);
6659
6660 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: after : "
6661 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance)));
6662
6663 /* dummy read to force PCI flush */
6664 mask = RD_OB_INTR_MASK(instance);
6665 #ifdef lint
6666 mask = mask;
6667 #endif
6668 }
6669
6670 static int
6671 intr_ack_ppc(struct mrsas_instance *instance)
6672 {
6673 uint32_t status;
6674 int ret = DDI_INTR_CLAIMED;
6675
6676 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: called"));
6677
6678 /* check if it is our interrupt */
6679 status = RD_OB_INTR_STATUS(instance);
6680
6681 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: status = 0x%x", status));
6682
6683 if (!(status & MFI_REPLY_2108_MESSAGE_INTR)) {
6684 ret = DDI_INTR_UNCLAIMED;
6685 }
6686
6687 if (ret == DDI_INTR_UNCLAIMED) {
6688 return (ret);
6689 }
6690 /* clear the interrupt by writing back the same value */
6691 WR_OB_DOORBELL_CLEAR(status, instance);
6692
6693 /* dummy READ */
6694 status = RD_OB_INTR_STATUS(instance);
6695
6696 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: interrupt cleared"));
6697
6698 return (ret);
6699 }
6700
6701 /*
6702 * Marks HBA as bad. This will be called either when an
6703 * IO packet times out even after 3 FW resets
6704 * or FW is found to be fault even after 3 continuous resets.
6705 */
6706
6707 static int
6708 mrsas_kill_adapter(struct mrsas_instance *instance)
6709 {
6710 if (instance->deadadapter == 1)
6711 return (DDI_FAILURE);
6712
6713 con_log(CL_ANN1, (CE_NOTE, "mrsas_kill_adapter: "
6714 "Writing to doorbell with MFI_STOP_ADP "));
6715 mutex_enter(&instance->ocr_flags_mtx);
6716 instance->deadadapter = 1;
6717 mutex_exit(&instance->ocr_flags_mtx);
6718 instance->func_ptr->disable_intr(instance);
6719 WR_IB_DOORBELL(MFI_STOP_ADP, instance);
6720 (void) mrsas_complete_pending_cmds(instance);
6721 return (DDI_SUCCESS);
6722 }
6723
6724
6725 static int
6726 mrsas_reset_ppc(struct mrsas_instance *instance)
6727 {
6728 uint32_t status;
6729 uint32_t retry = 0;
6730 uint32_t cur_abs_reg_val;
6731 uint32_t fw_state;
6732
6733 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
6734
6735 if (instance->deadadapter == 1) {
6736 cmn_err(CE_WARN, "mrsas_reset_ppc: "
6737 "no more resets as HBA has been marked dead ");
6738 return (DDI_FAILURE);
6739 }
6740 mutex_enter(&instance->ocr_flags_mtx);
6741 instance->adapterresetinprogress = 1;
6742 mutex_exit(&instance->ocr_flags_mtx);
6743 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: adpterresetinprogress "
6744 "flag set, time %llx", gethrtime()));
6745
6746 instance->func_ptr->disable_intr(instance);
6747 retry_reset:
6748 WR_IB_WRITE_SEQ(0, instance);
6749 WR_IB_WRITE_SEQ(4, instance);
6750 WR_IB_WRITE_SEQ(0xb, instance);
6751 WR_IB_WRITE_SEQ(2, instance);
6752 WR_IB_WRITE_SEQ(7, instance);
6753 WR_IB_WRITE_SEQ(0xd, instance);
6754 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: magic number written "
6755 "to write sequence register\n"));
6756 delay(100 * drv_usectohz(MILLISEC));
6757 status = RD_OB_DRWE(instance);
6758
6759 while (!(status & DIAG_WRITE_ENABLE)) {
6760 delay(100 * drv_usectohz(MILLISEC));
6761 status = RD_OB_DRWE(instance);
6762 if (retry++ == 100) {
6763 cmn_err(CE_WARN, "mrsas_reset_ppc: DRWE bit "
6764 "check retry count %d\n", retry);
6765 return (DDI_FAILURE);
6766 }
6767 }
6768 WR_IB_DRWE(status | DIAG_RESET_ADAPTER, instance);
6769 delay(100 * drv_usectohz(MILLISEC));
6770 status = RD_OB_DRWE(instance);
6771 while (status & DIAG_RESET_ADAPTER) {
6772 delay(100 * drv_usectohz(MILLISEC));
6773 status = RD_OB_DRWE(instance);
6774 if (retry++ == 100) {
6775 cmn_err(CE_WARN,
6776 "mrsas_reset_ppc: RESET FAILED. KILL adapter called\n.");
6777
6778 (void) mrsas_kill_adapter(instance);
6779 return (DDI_FAILURE);
6780 }
6781 }
6782 con_log(CL_ANN, (CE_NOTE, "mrsas_reset_ppc: Adapter reset complete"));
6783 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
6784 "Calling mfi_state_transition_to_ready"));
6785
6786 /* Mark HBA as bad, if FW is fault after 3 continuous resets */
6787 if (mfi_state_transition_to_ready(instance) ||
6788 debug_fw_faults_after_ocr_g == 1) {
6789 cur_abs_reg_val =
6790 instance->func_ptr->read_fw_status_reg(instance);
6791 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
6792
6793 #ifdef OCRDEBUG
6794 con_log(CL_ANN1, (CE_NOTE,
6795 "mrsas_reset_ppc :before fake: FW is not ready "
6796 "FW state = 0x%x", fw_state));
6797 if (debug_fw_faults_after_ocr_g == 1)
6798 fw_state = MFI_STATE_FAULT;
6799 #endif
6800
6801 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc : FW is not ready "
6802 "FW state = 0x%x", fw_state));
6803
6804 if (fw_state == MFI_STATE_FAULT) {
6805 /* increment the count */
6806 instance->fw_fault_count_after_ocr++;
6807 if (instance->fw_fault_count_after_ocr
6808 < MAX_FW_RESET_COUNT) {
6809 cmn_err(CE_WARN, "mrsas_reset_ppc: "
6810 "FW is in fault after OCR count %d "
6811 "Retry Reset",
6812 instance->fw_fault_count_after_ocr);
6813 goto retry_reset;
6814
6815 } else {
6816 cmn_err(CE_WARN, "mrsas_reset_ppc: "
6817 "Max Reset Count exceeded >%d"
6818 "Mark HBA as bad, KILL adapter", MAX_FW_RESET_COUNT);
6819
6820 (void) mrsas_kill_adapter(instance);
6821 return (DDI_FAILURE);
6822 }
6823 }
6824 }
6825 /* reset the counter as FW is up after OCR */
6826 instance->fw_fault_count_after_ocr = 0;
6827
6828
6829 ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
6830 instance->producer, 0);
6831
6832 ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
6833 instance->consumer, 0);
6834
6835 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
6836 " after resetting produconsumer chck indexs:"
6837 "producer %x consumer %x", *instance->producer,
6838 *instance->consumer));
6839
6840 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
6841 "Calling mrsas_issue_init_mfi"));
6842 (void) mrsas_issue_init_mfi(instance);
6843 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
6844 "mrsas_issue_init_mfi Done"));
6845
6846 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
6847 "Calling mrsas_print_pending_cmd\n"));
6848 (void) mrsas_print_pending_cmds(instance);
6849 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
6850 "mrsas_print_pending_cmd done\n"));
6851
6852 instance->func_ptr->enable_intr(instance);
6853 instance->fw_outstanding = 0;
6854
6855 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
6856 "Calling mrsas_issue_pending_cmds"));
6857 (void) mrsas_issue_pending_cmds(instance);
6858 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
6859 "issue_pending_cmds done.\n"));
6860
6861 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
6862 "Calling aen registration"));
6863
6864
6865 instance->aen_cmd->retry_count_for_ocr = 0;
6866 instance->aen_cmd->drv_pkt_time = 0;
6867
6868 instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
6869 con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag.\n"));
6870
6871 mutex_enter(&instance->ocr_flags_mtx);
6872 instance->adapterresetinprogress = 0;
6873 mutex_exit(&instance->ocr_flags_mtx);
6874 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
6875 "adpterresetinprogress flag unset"));
6876
6877 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc done\n"));
6878 return (DDI_SUCCESS);
6879 }
6880
6881
6882 static int
6883 mrsas_add_intrs(struct mrsas_instance *instance, int intr_type)
6884 {
6885
6886 dev_info_t *dip = instance->dip;
6887 int avail, actual, count;
6888 int i, flag, ret;
6889
6890 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_type = %x",
6891 intr_type));
6892
6893 /* Get number of interrupts */
6894 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
6895 if ((ret != DDI_SUCCESS) || (count == 0)) {
6896 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_nintrs() failed:"
6897 "ret %d count %d", ret, count));
6898
6899 return (DDI_FAILURE);
6900 }
6901
6902 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: count = %d ", count));
6903
6904 /* Get number of available interrupts */
6905 ret = ddi_intr_get_navail(dip, intr_type, &avail);
6906 if ((ret != DDI_SUCCESS) || (avail == 0)) {
6907 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_navail() failed:"
6908 "ret %d avail %d", ret, avail));
6909
6910 return (DDI_FAILURE);
6911 }
6912 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: avail = %d ", avail));
6913
6914 /* Only one interrupt routine. So limit the count to 1 */
6915 if (count > 1) {
6916 count = 1;
6917 }
6918
6919 /*
6920 * Allocate an array of interrupt handlers. Currently we support
6921 * only one interrupt. The framework can be extended later.
6922 */
6923 instance->intr_htable_size = count * sizeof (ddi_intr_handle_t);
6924 instance->intr_htable = kmem_zalloc(instance->intr_htable_size, KM_SLEEP);
6925 if (instance->intr_htable == NULL) {
6926 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
6927 "failed to allocate memory for intr-handle table"));
6928 instance->intr_htable_size = 0;
6929 return (DDI_FAILURE);
6930 }
6931
6932 flag = ((intr_type == DDI_INTR_TYPE_MSI) || (intr_type ==
6933 DDI_INTR_TYPE_MSIX)) ? DDI_INTR_ALLOC_STRICT:DDI_INTR_ALLOC_NORMAL;
6934
6935 /* Allocate interrupt */
6936 ret = ddi_intr_alloc(dip, instance->intr_htable, intr_type, 0,
6937 count, &actual, flag);
6938
6939 if ((ret != DDI_SUCCESS) || (actual == 0)) {
6940 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
6941 "avail = %d", avail));
6942 goto mrsas_free_htable;
6943 }
6944
6945 if (actual < count) {
6946 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
6947 "Requested = %d Received = %d", count, actual));
6948 }
6949 instance->intr_cnt = actual;
6950
6951 /*
6952 * Get the priority of the interrupt allocated.
6953 */
6954 if ((ret = ddi_intr_get_pri(instance->intr_htable[0],
6955 &instance->intr_pri)) != DDI_SUCCESS) {
6956 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
6957 "get priority call failed"));
6958 goto mrsas_free_handles;
6959 }
6960
6961 /*
6962 * Test for high level mutex. we don't support them.
6963 */
6964 if (instance->intr_pri >= ddi_intr_get_hilevel_pri()) {
6965 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
6966 "High level interrupts not supported."));
6967 goto mrsas_free_handles;
6968 }
6969
6970 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_pri = 0x%x ",
6971 instance->intr_pri));
6972
6973 /* Call ddi_intr_add_handler() */
6974 for (i = 0; i < actual; i++) {
6975 ret = ddi_intr_add_handler(instance->intr_htable[i],
6976 (ddi_intr_handler_t *)mrsas_isr, (caddr_t)instance,
6977 (caddr_t)(uintptr_t)i);
6978
6979 if (ret != DDI_SUCCESS) {
6980 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs:"
6981 "failed %d", ret));
6982 goto mrsas_free_handles;
6983 }
6984
6985 }
6986
6987 con_log(CL_DLEVEL1, (CE_NOTE, " ddi_intr_add_handler done"));
6988
6989 if ((ret = ddi_intr_get_cap(instance->intr_htable[0],
6990 &instance->intr_cap)) != DDI_SUCCESS) {
6991 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_cap() failed %d",
6992 ret));
6993 goto mrsas_free_handlers;
6994 }
6995
6996 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) {
6997 con_log(CL_ANN, (CE_WARN, "Calling ddi_intr_block _enable"));
6998
6999 (void) ddi_intr_block_enable(instance->intr_htable,
7000 instance->intr_cnt);
7001 } else {
7002 con_log(CL_ANN, (CE_NOTE, " calling ddi_intr_enable"));
7003
7004 for (i = 0; i < instance->intr_cnt; i++) {
7005 (void) ddi_intr_enable(instance->intr_htable[i]);
7006 con_log(CL_ANN, (CE_NOTE, "ddi intr enable returns "
7007 "%d", i));
7008 }
7009 }
7010
7011 return (DDI_SUCCESS);
7012
7013 mrsas_free_handlers:
7014 for (i = 0; i < actual; i++)
7015 {
7016 (void) ddi_intr_remove_handler(instance->intr_htable[i]);
7017 }
7018
7019 mrsas_free_handles:
7020 for (i = 0; i < actual; i++)
7021 {
7022 (void) ddi_intr_free(instance->intr_htable[i]);
7023 }
7024
7025 mrsas_free_htable:
7026 if (instance->intr_htable != NULL)
7027 kmem_free(instance->intr_htable, instance->intr_htable_size);
7028
7029 instance->intr_htable =NULL;
7030 instance->intr_htable_size = 0;
7031
7032 return (DDI_FAILURE);
7033
7034 }
7035
7036
7037 static void
7038 mrsas_rem_intrs(struct mrsas_instance *instance)
7039 {
7040 int i;
7041
7042 con_log(CL_ANN, (CE_NOTE, "mrsas_rem_intrs called"));
7043
7044 /* Disable all interrupts first */
7045 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) {
7046 (void) ddi_intr_block_disable(instance->intr_htable,
7047 instance->intr_cnt);
7048 } else {
7049 for (i = 0; i < instance->intr_cnt; i++) {
7050 (void) ddi_intr_disable(instance->intr_htable[i]);
7051 }
7052 }
7053
7054 /* Remove all the handlers */
7055
7056 for (i = 0; i < instance->intr_cnt; i++) {
7057 (void) ddi_intr_remove_handler(instance->intr_htable[i]);
7058 (void) ddi_intr_free(instance->intr_htable[i]);
7059 }
7060
7061 if (instance->intr_htable != NULL)
7062 kmem_free(instance->intr_htable, instance->intr_htable_size);
7063
7064 instance->intr_htable =NULL;
7065 instance->intr_htable_size = 0;
7066
7067 }
7068
7069 static int
7070 mrsas_tran_bus_config(dev_info_t *parent, uint_t flags,
7071 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
7072 {
7073 struct mrsas_instance *instance;
7074 int config;
7075 int rval = NDI_SUCCESS;
7076
7077 char *ptr = NULL;
7078 int tgt, lun;
7079
7080 con_log(CL_ANN1, (CE_NOTE, "Bus config called for op = %x", op));
7081
7082 if ((instance = ddi_get_soft_state(mrsas_state,
7083 ddi_get_instance(parent))) == NULL) {
7084 return (NDI_FAILURE);
7085 }
7086
7087 /* Hold nexus during bus_config */
7088 ndi_devi_enter(parent, &config);
7089 switch (op) {
7090 case BUS_CONFIG_ONE: {
7091
7092 /* parse wwid/target name out of name given */
7093 if ((ptr = strchr((char *)arg, '@')) == NULL) {
7094 rval = NDI_FAILURE;
7095 break;
7096 }
7097 ptr++;
7098
7099 if (mrsas_parse_devname(arg, &tgt, &lun) != 0) {
7100 rval = NDI_FAILURE;
7101 break;
7102 }
7103
7104 if (lun == 0) {
7105 rval = mrsas_config_ld(instance, tgt, lun, childp);
7106 }
7107 #ifdef PDSUPPORT
7108 else if ( instance->tbolt == 1 && lun != 0) {
7109 rval = mrsas_tbolt_config_pd(instance,
7110 tgt, lun, childp);
7111 }
7112 #endif
7113 else {
7114 rval = NDI_FAILURE;
7115 }
7116
7117 break;
7118 }
7119 case BUS_CONFIG_DRIVER:
7120 case BUS_CONFIG_ALL: {
7121
7122 rval = mrsas_config_all_devices(instance);
7123
7124 rval = NDI_SUCCESS;
7125 break;
7126 }
7127 }
7128
7129 if (rval == NDI_SUCCESS) {
7130 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0);
7131
7132 }
7133 ndi_devi_exit(parent, config);
7134
7135 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_bus_config: rval = %x",
7136 rval));
7137 return (rval);
7138 }
7139
7140 static int
7141 mrsas_config_all_devices(struct mrsas_instance *instance)
7142 {
7143 int rval, tgt;
7144
7145 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) {
7146 (void) mrsas_config_ld(instance, tgt, 0, NULL);
7147
7148 }
7149
7150 #ifdef PDSUPPORT
7151 /* Config PD devices connected to the card */
7152 if(instance->tbolt) {
7153 for (tgt = 0; tgt < instance->mr_tbolt_pd_max; tgt++) {
7154 (void) mrsas_tbolt_config_pd(instance, tgt, 1, NULL);
7155 }
7156 }
7157 #endif
7158
7159 rval = NDI_SUCCESS;
7160 return (rval);
7161 }
7162
7163 static int
7164 mrsas_parse_devname(char *devnm, int *tgt, int *lun)
7165 {
7166 char devbuf[SCSI_MAXNAMELEN];
7167 char *addr;
7168 char *p, *tp, *lp;
7169 long num;
7170
7171 /* Parse dev name and address */
7172 (void) strcpy(devbuf, devnm);
7173 addr = "";
7174 for (p = devbuf; *p != '\0'; p++) {
7175 if (*p == '@') {
7176 addr = p + 1;
7177 *p = '\0';
7178 } else if (*p == ':') {
7179 *p = '\0';
7180 break;
7181 }
7182 }
7183
7184 /* Parse target and lun */
7185 for (p = tp = addr, lp = NULL; *p != '\0'; p++) {
7186 if (*p == ',') {
7187 lp = p + 1;
7188 *p = '\0';
7189 break;
7190 }
7191 }
7192 if (tgt && tp) {
7193 if (ddi_strtol(tp, NULL, 0x10, &num)) {
7194 return (DDI_FAILURE); /* Can declare this as constant */
7195 }
7196 *tgt = (int)num;
7197 }
7198 if (lun && lp) {
7199 if (ddi_strtol(lp, NULL, 0x10, &num)) {
7200 return (DDI_FAILURE);
7201 }
7202 *lun = (int)num;
7203 }
7204 return (DDI_SUCCESS); /* Success case */
7205 }
7206
7207 static int
7208 mrsas_config_ld(struct mrsas_instance *instance, uint16_t tgt,
7209 uint8_t lun, dev_info_t **ldip)
7210 {
7211 struct scsi_device *sd;
7212 dev_info_t *child;
7213 int rval;
7214
7215 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_config_ld: t = %d l = %d",
7216 tgt, lun));
7217
7218 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
7219 if (ldip) {
7220 *ldip = child;
7221 }
7222 if (instance->mr_ld_list[tgt].flag != MRDRV_TGT_VALID) {
7223 rval = mrsas_service_evt(instance, tgt, 0,
7224 MRSAS_EVT_UNCONFIG_TGT, NULL);
7225 con_log(CL_ANN1, (CE_WARN,
7226 "mr_sas: DELETING STALE ENTRY rval = %d "
7227 "tgt id = %d ", rval, tgt));
7228 return (NDI_FAILURE);
7229 }
7230 return (NDI_SUCCESS);
7231 }
7232
7233 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
7234 if (sd == NULL) {
7235 con_log(CL_ANN1, (CE_WARN,
7236 "mrsas_config_ld: failed to allocate mem for scsi_device"));
7237 return (NDI_FAILURE);
7238 }
7239 sd->sd_address.a_hba_tran = instance->tran;
7240 sd->sd_address.a_target = (uint16_t)tgt;
7241 sd->sd_address.a_lun = (uint8_t)lun;
7242
7243 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS)
7244 rval = mrsas_config_scsi_device(instance, sd, ldip);
7245 else
7246 rval = NDI_FAILURE;
7247
7248 /* sd_unprobe is blank now. Free buffer manually */
7249 if (sd->sd_inq) {
7250 kmem_free(sd->sd_inq, SUN_INQSIZE);
7251 sd->sd_inq = (struct scsi_inquiry *)NULL;
7252 }
7253
7254 kmem_free(sd, sizeof (struct scsi_device));
7255 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_config_ld: return rval = %d",
7256 rval));
7257 return (rval);
7258 }
7259
7260 int
7261 mrsas_config_scsi_device(struct mrsas_instance *instance,
7262 struct scsi_device *sd, dev_info_t **dipp)
7263 {
7264 char *nodename = NULL;
7265 char **compatible = NULL;
7266 int ncompatible = 0;
7267 char *childname;
7268 dev_info_t *ldip = NULL;
7269 int tgt = sd->sd_address.a_target;
7270 int lun = sd->sd_address.a_lun;
7271 int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
7272 int rval;
7273
7274 con_log(CL_DLEVEL1, (CE_NOTE, "mr_sas: scsi_device t%dL%d", tgt, lun));
7275 scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype,
7276 NULL, &nodename, &compatible, &ncompatible);
7277
7278 if (nodename == NULL) {
7279 con_log(CL_ANN1, (CE_WARN, "mr_sas: Found no compatible driver "
7280 "for t%dL%d", tgt, lun));
7281 rval = NDI_FAILURE;
7282 goto finish;
7283 }
7284
7285 childname = (dtype == DTYPE_DIRECT) ? "sd" : nodename;
7286 con_log(CL_DLEVEL1, (CE_NOTE,
7287 "mr_sas: Childname = %2s nodename = %s", childname, nodename));
7288
7289 /* Create a dev node */
7290 rval = ndi_devi_alloc(instance->dip, childname, DEVI_SID_NODEID, &ldip);
7291 con_log(CL_DLEVEL1, (CE_NOTE,
7292 "mr_sas_config_scsi_device: ndi_devi_alloc rval = %x", rval));
7293 if (rval == NDI_SUCCESS) {
7294 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) !=
7295 DDI_PROP_SUCCESS) {
7296 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7297 "property for t%dl%d target", tgt, lun));
7298 rval = NDI_FAILURE;
7299 goto finish;
7300 }
7301 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun) !=
7302 DDI_PROP_SUCCESS) {
7303 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7304 "property for t%dl%d lun", tgt, lun));
7305 rval = NDI_FAILURE;
7306 goto finish;
7307 }
7308
7309 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip,
7310 "compatible", compatible, ncompatible) !=
7311 DDI_PROP_SUCCESS) {
7312 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7313 "property for t%dl%d compatible", tgt, lun));
7314 rval = NDI_FAILURE;
7315 goto finish;
7316 }
7317
7318 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH);
7319 if (rval != NDI_SUCCESS) {
7320 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to online "
7321 "t%dl%d", tgt, lun));
7322 ndi_prop_remove_all(ldip);
7323 (void) ndi_devi_free(ldip);
7324 } else {
7325 con_log(CL_ANN1, (CE_CONT, "mr_sas: online Done :"
7326 "0 t%dl%d", tgt, lun));
7327 }
7328
7329 }
7330 finish:
7331 if (dipp) {
7332 *dipp = ldip;
7333 }
7334
7335 con_log(CL_DLEVEL1, (CE_NOTE,
7336 "mr_sas: config_scsi_device rval = %d t%dL%d",
7337 rval, tgt, lun));
7338 scsi_hba_nodename_compatible_free(nodename, compatible);
7339 return (rval);
7340 }
7341
7342 /*ARGSUSED*/
7343 int
7344 mrsas_service_evt(struct mrsas_instance *instance, int tgt, int lun, int event,
7345 uint64_t wwn)
7346 {
7347 struct mrsas_eventinfo *mrevt = NULL;
7348
7349 con_log(CL_ANN1, (CE_NOTE,
7350 "mrsas_service_evt called for t%dl%d event = %d",
7351 tgt, lun, event));
7352
7353 if ((instance->taskq == NULL) || (mrevt =
7354 kmem_zalloc(sizeof (struct mrsas_eventinfo), KM_NOSLEEP)) == NULL) {
7355 return (ENOMEM);
7356 }
7357
7358 mrevt->instance = instance;
7359 mrevt->tgt = tgt;
7360 mrevt->lun = lun;
7361 mrevt->event = event;
7362 mrevt->wwn = wwn;
7363
7364 if ((ddi_taskq_dispatch(instance->taskq,
7365 (void (*)(void *))mrsas_issue_evt_taskq, mrevt, DDI_NOSLEEP)) !=
7366 DDI_SUCCESS) {
7367 con_log(CL_ANN1, (CE_NOTE,
7368 "mr_sas: Event task failed for t%dl%d event = %d",
7369 tgt, lun, event));
7370 kmem_free(mrevt, sizeof (struct mrsas_eventinfo));
7371 return (DDI_FAILURE);
7372 }
7373
7374 return (DDI_SUCCESS);
7375 }
7376
7377 static void
7378 mrsas_issue_evt_taskq(struct mrsas_eventinfo *mrevt)
7379 {
7380 struct mrsas_instance *instance = mrevt->instance;
7381 dev_info_t *dip, *pdip;
7382 int circ1 = 0;
7383 char *devname;
7384
7385 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_evt_taskq: called for"
7386 " tgt %d lun %d event %d",
7387 mrevt->tgt, mrevt->lun, mrevt->event));
7388
7389 if (mrevt->tgt < MRDRV_MAX_LD && mrevt->lun == 0) {
7390 mutex_enter(&instance->config_dev_mtx);
7391 dip = instance->mr_ld_list[mrevt->tgt].dip;
7392 mutex_exit(&instance->config_dev_mtx);
7393 }
7394
7395 #ifdef PDSUPPORT
7396 else {
7397 mutex_enter(&instance->config_dev_mtx);
7398 dip = instance->mr_tbolt_pd_list[mrevt->tgt].dip;
7399 mutex_exit(&instance->config_dev_mtx);
7400 }
7401 #endif
7402
7403 ndi_devi_enter(instance->dip, &circ1);
7404 switch (mrevt->event) {
7405 case MRSAS_EVT_CONFIG_TGT:
7406 if (dip == NULL) {
7407
7408 if (mrevt->lun == 0) {
7409 (void) mrsas_config_ld(instance, mrevt->tgt,
7410 0, NULL);
7411 }
7412 #ifdef PDSUPPORT
7413 else if (instance->tbolt) {
7414 (void) mrsas_tbolt_config_pd(instance,
7415 mrevt->tgt,
7416 1, NULL);
7417 }
7418 #endif
7419 con_log(CL_ANN1, (CE_NOTE,
7420 "mr_sas: EVT_CONFIG_TGT called:"
7421 " for tgt %d lun %d event %d",
7422 mrevt->tgt, mrevt->lun, mrevt->event));
7423
7424 } else {
7425 con_log(CL_ANN1, (CE_NOTE,
7426 "mr_sas: EVT_CONFIG_TGT dip != NULL:"
7427 " for tgt %d lun %d event %d",
7428 mrevt->tgt, mrevt->lun, mrevt->event));
7429 }
7430 break;
7431 case MRSAS_EVT_UNCONFIG_TGT:
7432 if (dip) {
7433 if (i_ddi_devi_attached(dip)) {
7434
7435 pdip = ddi_get_parent(dip);
7436
7437 devname = kmem_zalloc(MAXNAMELEN + 1, KM_SLEEP);
7438 (void) ddi_deviname(dip, devname);
7439
7440 (void) devfs_clean(pdip, devname + 1,
7441 DV_CLEAN_FORCE);
7442 kmem_free(devname, MAXNAMELEN + 1);
7443 }
7444 (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE);
7445 con_log(CL_ANN1, (CE_NOTE,
7446 "mr_sas: EVT_UNCONFIG_TGT called:"
7447 " for tgt %d lun %d event %d",
7448 mrevt->tgt, mrevt->lun, mrevt->event));
7449 } else {
7450 con_log(CL_ANN1, (CE_NOTE,
7451 "mr_sas: EVT_UNCONFIG_TGT dip == NULL:"
7452 " for tgt %d lun %d event %d",
7453 mrevt->tgt, mrevt->lun, mrevt->event));
7454 }
7455 break;
7456 }
7457 kmem_free(mrevt, sizeof (struct mrsas_eventinfo));
7458 ndi_devi_exit(instance->dip, circ1);
7459 }
7460
7461
7462 int
7463 mrsas_mode_sense_build(struct scsi_pkt *pkt)
7464 {
7465 union scsi_cdb *cdbp;
7466 uint16_t page_code;
7467 struct scsa_cmd *acmd;
7468 struct buf *bp;
7469 struct mode_header *modehdrp;
7470
7471 cdbp = (void *)pkt->pkt_cdbp;
7472 page_code = cdbp->cdb_un.sg.scsi[0];
7473 acmd = PKT2CMD(pkt);
7474 bp = acmd->cmd_buf;
7475 if ((!bp) && bp->b_un.b_addr && bp->b_bcount && acmd->cmd_dmacount) {
7476 con_log(CL_ANN1, (CE_WARN, "Failing MODESENSE Command"));
7477 /* ADD pkt statistics as Command failed. */
7478 return (NULL);
7479 }
7480
7481 bp_mapin(bp);
7482 bzero(bp->b_un.b_addr, bp->b_bcount);
7483
7484 switch (page_code) {
7485 case 0x3: {
7486 struct mode_format *page3p = NULL;
7487 modehdrp = (struct mode_header *)(bp->b_un.b_addr);
7488 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH;
7489
7490 page3p = (void *)((caddr_t)modehdrp +
7491 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
7492 page3p->mode_page.code = 0x3;
7493 page3p->mode_page.length =
7494 (uchar_t)(sizeof (struct mode_format));
7495 page3p->data_bytes_sect = 512;
7496 page3p->sect_track = 63;
7497 break;
7498 }
7499 case 0x4: {
7500 struct mode_geometry *page4p = NULL;
7501 modehdrp = (struct mode_header *)(bp->b_un.b_addr);
7502 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH;
7503
7504 page4p = (void *)((caddr_t)modehdrp +
7505 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
7506 page4p->mode_page.code = 0x4;
7507 page4p->mode_page.length =
7508 (uchar_t)(sizeof (struct mode_geometry));
7509 page4p->heads = 255;
7510 page4p->rpm = 10000;
7511 break;
7512 }
7513 default:
7514 break;
7515 }
7516 return (NULL);
7517 }
7518