1 /*
2 * mr_sas.c: source for mr_sas driver
3 *
4 * Solaris MegaRAID device driver for SAS2.0 controllers
5 * Copyright (c) 2008-2012, LSI Logic Corporation.
6 * All rights reserved.
7 *
8 * Version:
9 * Author:
10 * Swaminathan K S
11 * Arun Chandrashekhar
12 * Manju R
13 * Rasheed
14 * Shakeel Bukhari
15 */
16
17 #include <sys/types.h>
18 #include <sys/param.h>
19 #include <sys/file.h>
20 #include <sys/errno.h>
21 #include <sys/open.h>
22 #include <sys/cred.h>
23 #include <sys/modctl.h>
24 #include <sys/conf.h>
25 #include <sys/devops.h>
26 #include <sys/cmn_err.h>
27 #include <sys/kmem.h>
28 #include <sys/stat.h>
29 #include <sys/mkdev.h>
30 #include <sys/pci.h>
31 #include <sys/scsi/scsi.h>
32 #include <sys/ddi.h>
33 #include <sys/sunddi.h>
34 #include <sys/atomic.h>
35 #include <sys/signal.h>
36 #include <sys/byteorder.h>
37 #include <sys/sdt.h>
38 #include <sys/fs/dv_node.h> /* devfs_clean */
39
40 #include "mr_sas.h"
41
42 /*
43 * Local static data
44 */
45 static void *mrsas_state = NULL;
46 static volatile boolean_t mrsas_relaxed_ordering = 0;
47 volatile int debug_level_g = CL_NONE;
48
49 static volatile int msi_enable = 1;
50
51 /* Default Timeout value to issue online controller reset */
52 volatile int debug_timeout_g = 0xF0; //0xB4;
53 /* Simulate consecutive firmware fault */
54 static volatile int debug_fw_faults_after_ocr_g = 0;
55 #ifdef OCRDEBUG
56 /* Simulate three consecutive timeout for an IO */
57 static volatile int debug_consecutive_timeout_after_ocr_g = 0;
58 #endif
59
60 /* Enable OCR on firmware fault */
61 static volatile int debug_support_ocr_isr_g = 0;
62 #pragma weak scsi_hba_open
63 #pragma weak scsi_hba_close
64 #pragma weak scsi_hba_ioctl
65
66
67
68 static struct mrsas_function_template mrsas_function_template_ppc = {
69 .read_fw_status_reg = read_fw_status_reg_ppc,
70 .issue_cmd = issue_cmd_ppc,
71 .issue_cmd_in_sync_mode = issue_cmd_in_sync_mode_ppc,
72 .issue_cmd_in_poll_mode = issue_cmd_in_poll_mode_ppc,
73 .enable_intr = enable_intr_ppc,
74 .disable_intr = disable_intr_ppc,
75 .intr_ack = intr_ack_ppc,
76 .init_adapter = mrsas_init_adapter_ppc
77 // .reset_adapter = mrsas_reset_adapter_ppc
78 };
79
80
81 static struct mrsas_function_template mrsas_function_template_fusion = {
82 .read_fw_status_reg = tbolt_read_fw_status_reg,
83 .issue_cmd = tbolt_issue_cmd,
84 .issue_cmd_in_sync_mode = tbolt_issue_cmd_in_sync_mode,
85 .issue_cmd_in_poll_mode = tbolt_issue_cmd_in_poll_mode,
86 .enable_intr = tbolt_enable_intr,
87 .disable_intr = tbolt_disable_intr,
88 .intr_ack = tbolt_intr_ack,
89 .init_adapter = mrsas_init_adapter_tbolt
90 // .reset_adapter = mrsas_reset_adapter_tbolt
91 };
92
93
94 ddi_dma_attr_t mrsas_generic_dma_attr = {
95 DMA_ATTR_V0, /* dma_attr_version */
96 0, /* low DMA address range */
97 0xFFFFFFFFU, /* high DMA address range */
98 0xFFFFFFFFU, /* DMA counter register */
99 8, /* DMA address alignment */
100 0x07, /* DMA burstsizes */
101 1, /* min DMA size */
102 0xFFFFFFFFU, /* max DMA size */
103 0xFFFFFFFFU, /* segment boundary */
104 MRSAS_MAX_SGE_CNT, /* dma_attr_sglen */
105 512, /* granularity of device */
106 0 /* bus specific DMA flags */
107 };
108
109 int32_t mrsas_max_cap_maxxfer = 0x1000000;
110
111 //Fix for: Thunderbolt controller IO timeout when IO write size is 1MEG, Limit size to 256K
112 uint32_t mrsas_tbolt_max_cap_maxxfer = (512 * 512);
113
114 /*
115 * cb_ops contains base level routines
116 */
117 static struct cb_ops mrsas_cb_ops = {
118 mrsas_open, /* open */
119 mrsas_close, /* close */
120 nodev, /* strategy */
121 nodev, /* print */
122 nodev, /* dump */
123 nodev, /* read */
124 nodev, /* write */
125 mrsas_ioctl, /* ioctl */
126 nodev, /* devmap */
127 nodev, /* mmap */
128 nodev, /* segmap */
129 nochpoll, /* poll */
130 nodev, /* cb_prop_op */
131 0, /* streamtab */
132 D_NEW | D_HOTPLUG, /* cb_flag */
133 CB_REV, /* cb_rev */
134 nodev, /* cb_aread */
135 nodev /* cb_awrite */
136 };
137
138 /*
139 * dev_ops contains configuration routines
140 */
141 static struct dev_ops mrsas_ops = {
142 DEVO_REV, /* rev, */
143 0, /* refcnt */
144 mrsas_getinfo, /* getinfo */
145 nulldev, /* identify */
146 nulldev, /* probe */
147 mrsas_attach, /* attach */
148 mrsas_detach, /* detach */
149 #if defined(__SunOS_5_11)
150 nodev,
151 #else
152 mrsas_reset, /* reset */
153 #endif /* defined(__SunOS_5_11) */
154 &mrsas_cb_ops, /* char/block ops */
155 NULL, /* bus ops */
156 NULL, /* power */
157 #ifdef __SunOS_5_11
158 mrsas_quiesce /* quiesce */
159 #endif /*__SunOS_5_11 */
160
161 };
162
163 char _depends_on[] = "misc/scsi";
164
165 static struct modldrv modldrv = {
166 &mod_driverops, /* module type - driver */
167 MRSAS_VERSION,
168 &mrsas_ops, /* driver ops */
169 };
170
171 static struct modlinkage modlinkage = {
172 MODREV_1, /* ml_rev - must be MODREV_1 */
173 &modldrv, /* ml_linkage */
174 NULL /* end of driver linkage */
175 };
176
177 static struct ddi_device_acc_attr endian_attr = {
178 DDI_DEVICE_ATTR_V1,
179 DDI_STRUCTURE_LE_ACC,
180 DDI_STRICTORDER_ACC,
181 DDI_DEFAULT_ACC
182 };
183
184
185 unsigned int enable_fp = 1;
186
187
188 /*
189 * ************************************************************************** *
190 * *
191 * common entry points - for loadable kernel modules *
192 * *
193 * ************************************************************************** *
194 */
195
196 /*
197 * _init - initialize a loadable module
198 * @void
199 *
200 * The driver should perform any one-time resource allocation or data
201 * initialization during driver loading in _init(). For example, the driver
202 * should initialize any mutexes global to the driver in this routine.
203 * The driver should not, however, use _init() to allocate or initialize
204 * anything that has to do with a particular instance of the device.
205 * Per-instance initialization must be done in attach().
206 */
207 int
208 _init(void)
209 {
210 int ret;
211
212 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
213
214 ret = ddi_soft_state_init(&mrsas_state,
215 sizeof (struct mrsas_instance), 0);
216
217 if (ret != DDI_SUCCESS) {
218 cmn_err(CE_WARN, "mr_sas: could not init state");
219 return (ret);
220 }
221
222 if ((ret = scsi_hba_init(&modlinkage)) != DDI_SUCCESS) {
223 cmn_err(CE_WARN, "mr_sas: could not init scsi hba");
224 ddi_soft_state_fini(&mrsas_state);
225 return (ret);
226 }
227
228 ret = mod_install(&modlinkage);
229
230 if (ret != DDI_SUCCESS) {
231 cmn_err(CE_WARN, "mr_sas: mod_install failed");
232 scsi_hba_fini(&modlinkage);
233 ddi_soft_state_fini(&mrsas_state);
234 }
235
236 return (ret);
237 }
238
239 /*
240 * _info - returns information about a loadable module.
241 * @void
242 *
243 * _info() is called to return module information. This is a typical entry
244 * point that does predefined role. It simply calls mod_info().
245 */
246 int
247 _info(struct modinfo *modinfop)
248 {
249 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
250
251 return (mod_info(&modlinkage, modinfop));
252 }
253
254 /*
255 * _fini - prepare a loadable module for unloading
256 * @void
257 *
258 * In _fini(), the driver should release any resources that were allocated in
259 * _init(). The driver must remove itself from the system module list.
260 */
261 int
262 _fini(void)
263 {
264 int ret;
265
266 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
267
268 if ((ret = mod_remove(&modlinkage)) != DDI_SUCCESS)
269 {
270 con_log(CL_ANN1, (CE_WARN, "_fini: mod_remove() failed, error 0x%X", ret));
271 return (ret);
272 }
273
274 scsi_hba_fini(&modlinkage);
275 con_log(CL_DLEVEL1, (CE_NOTE, "_fini: scsi_hba_fini() done."));
276
277 ddi_soft_state_fini(&mrsas_state);
278 con_log(CL_DLEVEL1, (CE_NOTE, "_fini: ddi_soft_state_fini() done."));
279
280 return (ret);
281 }
282
283
284 /*
285 * ************************************************************************** *
286 * *
287 * common entry points - for autoconfiguration *
288 * *
289 * ************************************************************************** *
290 */
291 /*
292 * attach - adds a device to the system as part of initialization
293 * @dip:
294 * @cmd:
295 *
296 * The kernel calls a driver's attach() entry point to attach an instance of
297 * a device (for MegaRAID, it is instance of a controller) or to resume
298 * operation for an instance of a device that has been suspended or has been
299 * shut down by the power management framework
300 * The attach() entry point typically includes the following types of
301 * processing:
302 * - allocate a soft-state structure for the device instance (for MegaRAID,
303 * controller instance)
304 * - initialize per-instance mutexes
305 * - initialize condition variables
306 * - register the device's interrupts (for MegaRAID, controller's interrupts)
307 * - map the registers and memory of the device instance (for MegaRAID,
308 * controller instance)
309 * - create minor device nodes for the device instance (for MegaRAID,
310 * controller instance)
311 * - report that the device instance (for MegaRAID, controller instance) has
312 * attached
313 */
314 #if __SunOS_5_11
315 #define DDI_PM_RESUME DDI_PM_RESUME_OBSOLETE
316 #define DDI_PM_SUSPEND DDI_PM_SUSPEND_OBSOLETE
317 #endif // __SunOS_5_11
318 static int
319 mrsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
320 {
321 int instance_no;
322 int nregs;
323 int i = 0;
324 uint8_t irq;
325 uint16_t vendor_id;
326 uint16_t device_id;
327 uint16_t subsysvid;
328 uint16_t subsysid;
329 uint16_t command;
330 off_t reglength = 0;
331 int intr_types = 0;
332 char *data;
333
334 scsi_hba_tran_t *tran;
335 ddi_dma_attr_t tran_dma_attr;
336 struct mrsas_instance *instance;
337
338 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
339
340 /* CONSTCOND */
341 ASSERT(NO_COMPETING_THREADS);
342
343 instance_no = ddi_get_instance(dip);
344
345 /*
346 * check to see whether this device is in a DMA-capable slot.
347 */
348 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
349 cmn_err(CE_WARN,
350 "mr_sas%d: Device in slave-only slot, unused",
351 instance_no);
352 return (DDI_FAILURE);
353 }
354
355 switch (cmd) {
356 case DDI_ATTACH:
357
358 /* allocate the soft state for the instance */
359 if (ddi_soft_state_zalloc(mrsas_state, instance_no)
360 != DDI_SUCCESS) {
361 cmn_err(CE_WARN,
362 "mr_sas%d: Failed to allocate soft state",
363 instance_no);
364
365 return (DDI_FAILURE);
366 }
367
368 instance = (struct mrsas_instance *)ddi_get_soft_state
369 (mrsas_state, instance_no);
370
371 if (instance == NULL) {
372 cmn_err(CE_WARN,
373 "mr_sas%d: Bad soft state", instance_no);
374
375 ddi_soft_state_free(mrsas_state, instance_no);
376
377 return (DDI_FAILURE);
378 }
379
380 bzero((caddr_t)instance,
381 sizeof (struct mrsas_instance));
382
383 instance->unroll.softs = 1;
384
385 /* Setup the PCI configuration space handles */
386 if (pci_config_setup(dip, &instance->pci_handle) !=
387 DDI_SUCCESS) {
388 cmn_err(CE_WARN,
389 "mr_sas%d: pci config setup failed ",
390 instance_no);
391
392 ddi_soft_state_free(mrsas_state, instance_no);
393 return (DDI_FAILURE);
394 }
395 if (instance->pci_handle == NULL) {
396 cmn_err(CE_WARN,
397 "mr_sas%d: pci config setup failed ",
398 instance_no);
399 ddi_soft_state_free(mrsas_state, instance_no);
400 return (DDI_FAILURE);
401 }
402
403
404
405 if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) {
406 cmn_err(CE_WARN,
407 "mr_sas: failed to get registers.");
408
409 pci_config_teardown(&instance->pci_handle);
410 ddi_soft_state_free(mrsas_state, instance_no);
411 return (DDI_FAILURE);
412 }
413
414 vendor_id = pci_config_get16(instance->pci_handle,
415 PCI_CONF_VENID);
416 device_id = pci_config_get16(instance->pci_handle,
417 PCI_CONF_DEVID);
418
419 subsysvid = pci_config_get16(instance->pci_handle,
420 PCI_CONF_SUBVENID);
421 subsysid = pci_config_get16(instance->pci_handle,
422 PCI_CONF_SUBSYSID);
423
424 pci_config_put16(instance->pci_handle, PCI_CONF_COMM,
425 (pci_config_get16(instance->pci_handle,
426 PCI_CONF_COMM) | PCI_COMM_ME));
427 irq = pci_config_get8(instance->pci_handle,
428 PCI_CONF_ILINE);
429
430 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: "
431 "0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s",
432 instance_no, vendor_id, device_id, subsysvid,
433 subsysid, irq, MRSAS_VERSION));
434
435 /* enable bus-mastering */
436 command = pci_config_get16(instance->pci_handle,
437 PCI_CONF_COMM);
438
439 if (!(command & PCI_COMM_ME)) {
440 command |= PCI_COMM_ME;
441
442 pci_config_put16(instance->pci_handle,
443 PCI_CONF_COMM, command);
444
445 con_log(CL_ANN, (CE_CONT, "mr_sas%d: "
446 "enable bus-mastering", instance_no));
447 } else {
448 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: "
449 "bus-mastering already set", instance_no));
450 }
451
452 /* initialize function pointers */
453 switch(device_id) {
454 case PCI_DEVICE_ID_LSI_TBOLT:
455 case PCI_DEVICE_ID_LSI_INVADER:
456 con_log(CL_ANN, (CE_NOTE,
457 "mr_sas: 2208 T.B. device detected"));
458
459 instance->func_ptr = &mrsas_function_template_fusion;
460 instance->tbolt = 1;
461 break;
462
463 case PCI_DEVICE_ID_LSI_2108VDE:
464 case PCI_DEVICE_ID_LSI_2108V:
465 con_log(CL_ANN, (CE_NOTE,
466 "mr_sas: 2108 Liberator device detected"));
467
468 instance->func_ptr = &mrsas_function_template_ppc;
469 break;
470
471 default:
472 cmn_err(CE_WARN,
473 "mr_sas: Invalid device detected");
474
475 pci_config_teardown(&instance->pci_handle);
476 ddi_soft_state_free(mrsas_state, instance_no);
477 return (DDI_FAILURE);
478
479 }
480
481 instance->baseaddress = pci_config_get32(
482 instance->pci_handle, PCI_CONF_BASE0);
483 instance->baseaddress &= 0x0fffc;
484
485 instance->dip = dip;
486 instance->vendor_id = vendor_id;
487 instance->device_id = device_id;
488 instance->subsysvid = subsysvid;
489 instance->subsysid = subsysid;
490 instance->instance = instance_no;
491
492
493 /* Setup register map */
494 if ((ddi_dev_regsize(instance->dip,
495 REGISTER_SET_IO_2108, ®length) != DDI_SUCCESS) ||
496 reglength < MINIMUM_MFI_MEM_SZ) {
497 goto fail_attach;
498 }
499 if (reglength > DEFAULT_MFI_MEM_SZ) {
500 reglength = DEFAULT_MFI_MEM_SZ;
501 con_log(CL_DLEVEL1, (CE_NOTE,
502 "mr_sas: register length to map is "
503 "0x%lx bytes", reglength));
504 }
505 if (ddi_regs_map_setup(instance->dip,
506 REGISTER_SET_IO_2108, &instance->regmap, 0,
507 reglength, &endian_attr, &instance->regmap_handle)
508 != DDI_SUCCESS) {
509 cmn_err(CE_WARN,
510 "mr_sas: couldn't map control registers");
511 goto fail_attach;
512 }
513 if (instance->regmap_handle == NULL) {
514 cmn_err(CE_WARN,
515 "mr_sas: couldn't map control registers");
516 goto fail_attach;
517 }
518
519 instance->unroll.regs = 1;
520
521 /*
522 * Disable Interrupt Now.
523 * Setup Software interrupt
524 */
525 instance->func_ptr->disable_intr(instance);
526
527 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
528 "mrsas-enable-msi", &data) == DDI_SUCCESS) {
529 if (strncmp(data, "no", 3) == 0) {
530 msi_enable = 0;
531 con_log(CL_ANN1, (CE_WARN,
532 "msi_enable = %d disabled",
533 msi_enable));
534 }
535 ddi_prop_free(data);
536 }
537
538 con_log(CL_DLEVEL1, (CE_NOTE, "msi_enable = %d", msi_enable));
539
540 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
541 "mrsas-enable-fp", &data) == DDI_SUCCESS) {
542 if (strncmp(data, "no", 3) == 0) {
543 enable_fp = 0;
544 cmn_err(CE_NOTE,
545 "enable_fp = %d, Fast-Path disabled.\n",
546 enable_fp);
547 }
548
549 ddi_prop_free(data);
550 }
551
552 cmn_err(CE_NOTE, "enable_fp = %d\n", enable_fp);
553
554 /* Check for all supported interrupt types */
555 if (ddi_intr_get_supported_types(
556 dip, &intr_types) != DDI_SUCCESS) {
557 cmn_err(CE_WARN,
558 "ddi_intr_get_supported_types() failed");
559 goto fail_attach;
560 }
561
562 con_log(CL_DLEVEL1, (CE_NOTE,
563 "ddi_intr_get_supported_types() ret: 0x%x",
564 intr_types));
565
566 /* Initialize and Setup Interrupt handler */
567 if (msi_enable && (intr_types & DDI_INTR_TYPE_MSIX)) {
568 if (mrsas_add_intrs(instance,
569 DDI_INTR_TYPE_MSIX) != DDI_SUCCESS) {
570 cmn_err(CE_WARN,
571 "MSIX interrupt query failed");
572 goto fail_attach;
573 }
574 instance->intr_type = DDI_INTR_TYPE_MSIX;
575 } else if (msi_enable && (intr_types &
576 DDI_INTR_TYPE_MSI)) {
577 if (mrsas_add_intrs(instance,
578 DDI_INTR_TYPE_MSI) != DDI_SUCCESS) {
579 cmn_err(CE_WARN,
580 "MSI interrupt query failed");
581 goto fail_attach;
582 }
583 instance->intr_type = DDI_INTR_TYPE_MSI;
584 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
585 msi_enable = 0;
586 if (mrsas_add_intrs(instance,
587 DDI_INTR_TYPE_FIXED) != DDI_SUCCESS) {
588 cmn_err(CE_WARN,
589 "FIXED interrupt query failed");
590 goto fail_attach;
591 }
592 instance->intr_type = DDI_INTR_TYPE_FIXED;
593 } else {
594 cmn_err(CE_WARN, "Device cannot "
595 "suppport either FIXED or MSI/X "
596 "interrupts");
597 goto fail_attach;
598 }
599
600 instance->unroll.intr = 1;
601
602
603 /* setup the mfi based low level driver */
604 if (mrsas_init_adapter(instance) != DDI_SUCCESS) {
605 cmn_err(CE_WARN, "mr_sas: "
606 "could not initialize the low level driver");
607
608 goto fail_attach;
609 }
610
611 /* Initialize all Mutex */
612 INIT_LIST_HEAD(&instance->completed_pool_list);
613 mutex_init(&instance->completed_pool_mtx,
614 "completed_pool_mtx", MUTEX_DRIVER,
615 DDI_INTR_PRI(instance->intr_pri));
616
617 mutex_init(&instance->sync_map_mtx,
618 "sync_map_mtx", MUTEX_DRIVER,
619 DDI_INTR_PRI(instance->intr_pri));
620
621 mutex_init(&instance->app_cmd_pool_mtx,
622 "app_cmd_pool_mtx", MUTEX_DRIVER,
623 DDI_INTR_PRI(instance->intr_pri));
624
625 mutex_init(&instance->config_dev_mtx, "config_dev_mtx",
626 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
627
628 mutex_init(&instance->cmd_pend_mtx, "cmd_pend_mtx",
629 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
630
631 mutex_init(&instance->ocr_flags_mtx, "ocr_flags_mtx",
632 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
633
634 mutex_init(&instance->int_cmd_mtx, "int_cmd_mtx",
635 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
636 cv_init(&instance->int_cmd_cv, NULL, CV_DRIVER, NULL);
637
638 mutex_init(&instance->cmd_pool_mtx, "cmd_pool_mtx",
639 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
640
641 mutex_init(&instance->reg_write_mtx,"reg_write_mtx",
642 MUTEX_DRIVER,DDI_INTR_PRI(instance->intr_pri));
643
644 if (instance->tbolt) {
645 mutex_init(&instance->cmd_app_pool_mtx,
646 "cmd_app_pool_mtx", MUTEX_DRIVER,
647 DDI_INTR_PRI(instance->intr_pri));
648
649 mutex_init(&instance->chip_mtx,
650 "chip_mtx", MUTEX_DRIVER,
651 DDI_INTR_PRI(instance->intr_pri));
652
653 }
654
655 instance->unroll.mutexs = 1;
656
657 instance->timeout_id = (timeout_id_t)-1;
658
659 /* Register our soft-isr for highlevel interrupts. */
660 instance->isr_level = instance->intr_pri;
661 if (!(instance->tbolt)) {
662 if (instance->isr_level == HIGH_LEVEL_INTR) {
663 if (ddi_add_softintr(dip,
664 DDI_SOFTINT_HIGH,
665 &instance->soft_intr_id,
666 NULL, NULL, mrsas_softintr,
667 (caddr_t)instance) !=
668 DDI_SUCCESS) {
669 cmn_err(CE_WARN,
670 "Software ISR "
671 "did not register");
672
673 goto fail_attach;
674 }
675
676 instance->unroll.soft_isr = 1;
677
678 }
679 }
680
681 instance->softint_running = 0;
682
683 /* Allocate a transport structure */
684 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
685
686 if (tran == NULL) {
687 cmn_err(CE_WARN,
688 "scsi_hba_tran_alloc failed");
689 goto fail_attach;
690 }
691
692 instance->tran = tran;
693 instance->unroll.tran = 1;
694
695 tran->tran_hba_private = instance;
696 tran->tran_tgt_init = mrsas_tran_tgt_init;
697 tran->tran_tgt_probe = scsi_hba_probe;
698 tran->tran_tgt_free = mrsas_tran_tgt_free;
699 if (instance->tbolt) {
700 tran->tran_init_pkt =
701 mrsas_tbolt_tran_init_pkt;
702 tran->tran_start =
703 mrsas_tbolt_tran_start;
704 } else {
705 tran->tran_init_pkt = mrsas_tran_init_pkt;
706 tran->tran_start = mrsas_tran_start;
707 }
708 tran->tran_abort = mrsas_tran_abort;
709 tran->tran_reset = mrsas_tran_reset;
710 tran->tran_getcap = mrsas_tran_getcap;
711 tran->tran_setcap = mrsas_tran_setcap;
712 tran->tran_destroy_pkt = mrsas_tran_destroy_pkt;
713 tran->tran_dmafree = mrsas_tran_dmafree;
714 tran->tran_sync_pkt = mrsas_tran_sync_pkt;
715 tran->tran_quiesce = mrsas_tran_quiesce;
716 tran->tran_unquiesce = mrsas_tran_unquiesce;
717 tran->tran_bus_config = mrsas_tran_bus_config;
718
719 if (mrsas_relaxed_ordering)
720 mrsas_generic_dma_attr.dma_attr_flags |=
721 DDI_DMA_RELAXED_ORDERING;
722
723
724 tran_dma_attr = mrsas_generic_dma_attr;
725 tran_dma_attr.dma_attr_sgllen = instance->max_num_sge;
726
727 /* Attach this instance of the hba */
728 if (scsi_hba_attach_setup(dip, &tran_dma_attr, tran, 0)
729 != DDI_SUCCESS) {
730 cmn_err(CE_WARN,
731 "scsi_hba_attach failed");
732
733 goto fail_attach;
734 }
735 instance->unroll.tranSetup = 1;
736 con_log(CL_ANN1, (CE_CONT,
737 "scsi_hba_attach_setup() done."));
738
739
740 /* create devctl node for cfgadm command */
741 if (ddi_create_minor_node(dip, "devctl",
742 S_IFCHR, INST2DEVCTL(instance_no),
743 DDI_NT_SCSI_NEXUS, 0) == DDI_FAILURE) {
744 cmn_err(CE_WARN,
745 "mr_sas: failed to create devctl node.");
746
747 goto fail_attach;
748 }
749
750 instance->unroll.devctl = 1;
751
752 /* create scsi node for cfgadm command */
753 if (ddi_create_minor_node(dip, "scsi", S_IFCHR,
754 INST2SCSI(instance_no),
755 DDI_NT_SCSI_ATTACHMENT_POINT, 0) ==
756 DDI_FAILURE) {
757 cmn_err(CE_WARN,
758 "mr_sas: failed to create scsi node.");
759
760 goto fail_attach;
761 }
762
763 instance->unroll.scsictl = 1;
764
765 (void) sprintf(instance->iocnode, "%d:lsirdctl",
766 instance_no);
767
768 /*
769 * Create a node for applications
770 * for issuing ioctl to the driver.
771 */
772 if (ddi_create_minor_node(dip, instance->iocnode,
773 S_IFCHR, INST2LSIRDCTL(instance_no),
774 DDI_PSEUDO, 0) == DDI_FAILURE) {
775 cmn_err(CE_WARN,
776 "mr_sas: failed to create ioctl node.");
777
778 goto fail_attach;
779 }
780
781 instance->unroll.ioctl = 1;
782
783 /* Create a taskq to handle dr events */
784 if ((instance->taskq = ddi_taskq_create(dip,
785 "mrsas_dr_taskq", 1,
786 TASKQ_DEFAULTPRI, 0)) == NULL) {
787 cmn_err(CE_WARN,
788 "mr_sas: failed to create taskq ");
789 instance->taskq = NULL;
790 goto fail_attach;
791 }
792 instance->unroll.taskq = 1;
793 con_log(CL_ANN1, (CE_CONT,
794 "ddi_taskq_create() done."));
795
796 /* enable interrupt */
797 instance->func_ptr->enable_intr(instance);
798
799 /* initiate AEN */
800 if (start_mfi_aen(instance)) {
801 cmn_err(CE_WARN,
802 "mr_sas: failed to initiate AEN.");
803 goto fail_attach;
804 }
805 instance->unroll.aenPend = 1;
806 con_log(CL_ANN1, (CE_CONT,
807 "AEN started for instance %d.", instance_no));
808
809 /* Finally! We are on the air. */
810 ddi_report_dev(dip);
811
812 instance->mr_ld_list =
813 kmem_zalloc(MRDRV_MAX_LD * sizeof (struct mrsas_ld),
814 KM_SLEEP);
815 if (instance->mr_ld_list == NULL) {
816 cmn_err(CE_WARN,
817 "mr_sas attach(): failed to allocate ld_list array");
818 goto fail_attach;
819 }
820 instance->unroll.ldlist_buff = 1;
821
822 #ifdef PDSUPPORT
823 if(instance->tbolt) {
824 instance->mr_tbolt_pd_max = MRSAS_TBOLT_PD_TGT_MAX;
825 instance->mr_tbolt_pd_list =
826 kmem_zalloc(MRSAS_TBOLT_GET_PD_MAX(instance)
827 * sizeof (struct mrsas_tbolt_pd), KM_SLEEP);
828 ASSERT(instance->mr_tbolt_pd_list);
829 for (i = 0; i < instance->mr_tbolt_pd_max; i++) {
830 instance->mr_tbolt_pd_list[i].lun_type =
831 MRSAS_TBOLT_PD_LUN;
832 instance->mr_tbolt_pd_list[i].dev_id =
833 (uint8_t)i;
834 }
835
836 instance->unroll.pdlist_buff = 1;
837 }
838 #endif
839 break;
840 case DDI_PM_RESUME:
841 con_log(CL_ANN, (CE_NOTE,
842 "mr_sas: DDI_PM_RESUME"));
843 break;
844 case DDI_RESUME:
845 con_log(CL_ANN, (CE_NOTE,
846 "mr_sas: DDI_RESUME"));
847 break;
848 default:
849 con_log(CL_ANN, (CE_WARN,
850 "mr_sas: invalid attach cmd=%x", cmd));
851 return (DDI_FAILURE);
852 }
853
854
855 cmn_err(CE_NOTE, "mrsas_attach() return SUCCESS instance_num %d", instance_no);
856 return (DDI_SUCCESS);
857
858 fail_attach:
859
860 mrsas_undo_resources(dip, instance);
861
862 pci_config_teardown(&instance->pci_handle);
863 ddi_soft_state_free(mrsas_state, instance_no);
864
865 con_log(CL_ANN, (CE_WARN,
866 "mr_sas: return failure from mrsas_attach"));
867
868 cmn_err(CE_WARN, "mrsas_attach() return FAILURE instance_num %d", instance_no);
869
870 return (DDI_FAILURE);
871 }
872
873 /*
874 * getinfo - gets device information
875 * @dip:
876 * @cmd:
877 * @arg:
878 * @resultp:
879 *
880 * The system calls getinfo() to obtain configuration information that only
881 * the driver knows. The mapping of minor numbers to device instance is
882 * entirely under the control of the driver. The system sometimes needs to ask
883 * the driver which device a particular dev_t represents.
884 * Given the device number return the devinfo pointer from the scsi_device
885 * structure.
886 */
887 /*ARGSUSED*/
888 static int
889 mrsas_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
890 {
891 int rval;
892 int mrsas_minor = getminor((dev_t)arg);
893
894 struct mrsas_instance *instance;
895
896 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
897
898 switch (cmd) {
899 case DDI_INFO_DEVT2DEVINFO:
900 instance = (struct mrsas_instance *)
901 ddi_get_soft_state(mrsas_state,
902 MINOR2INST(mrsas_minor));
903
904 if (instance == NULL) {
905 *resultp = NULL;
906 rval = DDI_FAILURE;
907 } else {
908 *resultp = instance->dip;
909 rval = DDI_SUCCESS;
910 }
911 break;
912 case DDI_INFO_DEVT2INSTANCE:
913 *resultp = (void *)(intptr_t)
914 (MINOR2INST(getminor((dev_t)arg)));
915 rval = DDI_SUCCESS;
916 break;
917 default:
918 *resultp = NULL;
919 rval = DDI_FAILURE;
920 }
921
922 return (rval);
923 }
924
925 /*
926 * detach - detaches a device from the system
927 * @dip: pointer to the device's dev_info structure
928 * @cmd: type of detach
929 *
930 * A driver's detach() entry point is called to detach an instance of a device
931 * that is bound to the driver. The entry point is called with the instance of
932 * the device node to be detached and with DDI_DETACH, which is specified as
933 * the cmd argument to the entry point.
934 * This routine is called during driver unload. We free all the allocated
935 * resources and call the corresponding LLD so that it can also release all
936 * its resources.
937 */
938 static int
939 mrsas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
940 {
941 int instance_no;
942
943 struct mrsas_instance *instance;
944
945 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
946
947
948 /* CONSTCOND */
949 ASSERT(NO_COMPETING_THREADS);
950
951 instance_no = ddi_get_instance(dip);
952
953 instance = (struct mrsas_instance *)ddi_get_soft_state(mrsas_state,
954 instance_no);
955
956 if (!instance) {
957 cmn_err(CE_WARN,
958 "mr_sas:%d could not get instance in detach",
959 instance_no);
960
961 return (DDI_FAILURE);
962 }
963
964 con_log(CL_ANN, (CE_NOTE,
965 "mr_sas%d: detaching device 0x%4x:0x%4x:0x%4x:0x%4x",
966 instance_no, instance->vendor_id, instance->device_id,
967 instance->subsysvid, instance->subsysid));
968
969 switch (cmd) {
970 case DDI_DETACH:
971 con_log(CL_ANN, (CE_NOTE,
972 "mrsas_detach: DDI_DETACH"));
973
974 mutex_enter(&instance->config_dev_mtx);
975 if (instance->timeout_id != (timeout_id_t)-1) {
976 mutex_exit(&instance->config_dev_mtx);
977 (void) untimeout(instance->timeout_id);
978 instance->timeout_id = (timeout_id_t)-1;
979 mutex_enter(&instance->config_dev_mtx);
980 instance->unroll.timer = 0;
981 }
982 mutex_exit(&instance->config_dev_mtx);
983
984 if(instance->unroll.tranSetup == 1) {
985 if (scsi_hba_detach(dip) != DDI_SUCCESS) {
986 cmn_err(CE_WARN,
987 "mr_sas2%d: failed to detach", instance_no);
988 return (DDI_FAILURE);
989 }
990 instance->unroll.tranSetup = 0;
991 con_log(CL_ANN1, (CE_CONT, "scsi_hba_dettach() done."));
992 }
993
994 flush_cache(instance);
995
996 mrsas_undo_resources(dip, instance);
997
998 pci_config_teardown(&instance->pci_handle);
999 ddi_soft_state_free(mrsas_state, instance_no);
1000 break;
1001
1002 case DDI_PM_SUSPEND:
1003 con_log(CL_ANN, (CE_NOTE,
1004 "mrsas_detach: DDI_PM_SUSPEND"));
1005
1006 break;
1007 case DDI_SUSPEND:
1008 con_log(CL_ANN, (CE_NOTE,
1009 "mrsas_detach: DDI_SUSPEND"));
1010
1011 break;
1012 default:
1013 con_log(CL_ANN, (CE_WARN,
1014 "invalid detach command:0x%x", cmd));
1015 return (DDI_FAILURE);
1016 }
1017
1018 return (DDI_SUCCESS);
1019 }
1020
1021
1022 static int
1023 mrsas_undo_resources (dev_info_t *dip, struct mrsas_instance *instance)
1024 {
1025 int instance_no;
1026
1027 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1028
1029
1030 instance_no = ddi_get_instance(dip);
1031
1032
1033 if(instance->unroll.ioctl == 1) {
1034 ddi_remove_minor_node(dip, instance->iocnode);
1035 instance->unroll.ioctl = 0;
1036 }
1037
1038 if(instance->unroll.scsictl == 1) {
1039 ddi_remove_minor_node(dip, "scsi");
1040 instance->unroll.scsictl = 0;
1041 }
1042
1043 if(instance->unroll.devctl == 1) {
1044 ddi_remove_minor_node(dip, "devctl");
1045 instance->unroll.devctl = 0;
1046 }
1047
1048 if(instance->unroll.tranSetup == 1) {
1049 if (scsi_hba_detach(dip) != DDI_SUCCESS) {
1050 cmn_err(CE_WARN,
1051 "mr_sas2%d: failed to detach", instance_no);
1052 return (DDI_FAILURE);
1053 }
1054 instance->unroll.tranSetup = 0;
1055 con_log(CL_ANN1, (CE_CONT, "scsi_hba_dettach() done."));
1056 }
1057
1058 if(instance->unroll.tran == 1) {
1059 scsi_hba_tran_free(instance->tran);
1060 instance->unroll.tran = 0;
1061 con_log(CL_ANN1, (CE_CONT, "scsi_hba_tran_free() done."));
1062 }
1063
1064 if(instance->unroll.syncCmd == 1) {
1065 if(instance->tbolt) {
1066 if (abort_syncmap_cmd(instance, instance->map_update_cmd))
1067 cmn_err(CE_WARN, "mrsas_detach: "
1068 "failed to abort previous syncmap command");
1069
1070 instance->unroll.syncCmd = 0;
1071 con_log(CL_ANN1, (CE_CONT, "sync cmd aborted, done."));
1072 }
1073 }
1074
1075 if(instance->unroll.aenPend == 1) {
1076 if (abort_aen_cmd(instance, instance->aen_cmd))
1077 cmn_err(CE_WARN, "mrsas_detach: "
1078 "failed to abort prevous AEN command");
1079
1080 instance->unroll.aenPend = 0;
1081 con_log(CL_ANN1, (CE_CONT, "aen cmd aborted, done."));
1082 /*This means the controller is fully initialzed and running */
1083 // shutdown_controller();Shutdown should be a last command to controller.
1084 }
1085
1086
1087 if(instance->unroll.timer == 1) {
1088 if (instance->timeout_id != (timeout_id_t)-1) {
1089 (void) untimeout(instance->timeout_id);
1090 instance->timeout_id = (timeout_id_t)-1;
1091
1092 instance->unroll.timer = 0;
1093 }
1094 }
1095
1096 instance->func_ptr->disable_intr(instance);
1097
1098
1099 if(instance->unroll.mutexs == 1) {
1100 mutex_destroy(&instance->cmd_pool_mtx);
1101 mutex_destroy(&instance->app_cmd_pool_mtx);
1102 mutex_destroy(&instance->cmd_pend_mtx);
1103 mutex_destroy(&instance->completed_pool_mtx);
1104 mutex_destroy(&instance->sync_map_mtx);
1105 mutex_destroy(&instance->int_cmd_mtx);
1106 cv_destroy(&instance->int_cmd_cv);
1107 mutex_destroy(&instance->config_dev_mtx);
1108 mutex_destroy(&instance->ocr_flags_mtx);
1109 mutex_destroy(&instance->reg_write_mtx);
1110
1111 if (instance->tbolt) {
1112 mutex_destroy(&instance->cmd_app_pool_mtx);
1113 mutex_destroy(&instance->chip_mtx);
1114 }
1115
1116 instance->unroll.mutexs = 0;
1117 con_log(CL_ANN1, (CE_CONT, "Destroy mutex & cv, done."));
1118 }
1119
1120
1121 if (instance->unroll.soft_isr == 1) {
1122 ddi_remove_softintr(instance->soft_intr_id);
1123 instance->unroll.soft_isr = 0;
1124 }
1125
1126 if(instance->unroll.intr == 1) {
1127 mrsas_rem_intrs(instance);
1128 instance->unroll.intr = 0;
1129 }
1130
1131
1132 if(instance->unroll.taskq == 1) {
1133 if (instance->taskq) {
1134 ddi_taskq_destroy(instance->taskq);
1135 instance->unroll.taskq = 0;
1136 }
1137
1138 }
1139
1140 /*free dma memory allocated for
1141 cmds/frames/queues/driver version etc */
1142 if(instance->unroll.verBuff == 1) {
1143 mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1144 instance->unroll.verBuff = 0;
1145 }
1146
1147 if(instance->unroll.pdlist_buff == 1) {
1148 if (instance->mr_tbolt_pd_list != NULL)
1149 kmem_free(instance->mr_tbolt_pd_list,
1150 MRSAS_TBOLT_GET_PD_MAX(instance) * sizeof (struct mrsas_tbolt_pd));
1151
1152 instance->mr_tbolt_pd_list = NULL;
1153 instance->unroll.pdlist_buff = 0;
1154 }
1155
1156 if(instance->unroll.ldlist_buff == 1) {
1157 if (instance->mr_ld_list != NULL)
1158 kmem_free(instance->mr_ld_list, MRDRV_MAX_LD
1159 * sizeof (struct mrsas_ld));
1160
1161 instance->mr_ld_list = NULL;
1162 instance->unroll.ldlist_buff = 0;
1163 }
1164
1165 if (instance->tbolt) {
1166 if(instance->unroll.alloc_space_mpi2 == 1) {
1167 free_space_for_mpi2(instance);
1168 instance->unroll.alloc_space_mpi2 = 0;
1169 }
1170 } else {
1171 if(instance->unroll.alloc_space_mfi == 1) {
1172 free_space_for_mfi(instance);
1173 instance->unroll.alloc_space_mfi = 0;
1174 }
1175 }
1176
1177 if(instance->unroll.regs == 1) {
1178 ddi_regs_map_free(&instance->regmap_handle);
1179 instance->unroll.regs = 0;
1180 con_log(CL_ANN1, (CE_CONT, "ddi_regs_map_free() done."));
1181 }
1182
1183 return (DDI_SUCCESS);
1184 }
1185
1186
1187
1188 /*
1189 * ************************************************************************** *
1190 * *
1191 * common entry points - for character driver types *
1192 * *
1193 * ************************************************************************** *
1194 */
1195 /*
1196 * open - gets access to a device
1197 * @dev:
1198 * @openflags:
1199 * @otyp:
1200 * @credp:
1201 *
1202 * Access to a device by one or more application programs is controlled
1203 * through the open() and close() entry points. The primary function of
1204 * open() is to verify that the open request is allowed.
1205 */
1206 static int
1207 mrsas_open(dev_t *dev, int openflags, int otyp, cred_t *credp)
1208 {
1209 int rval = 0;
1210
1211 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1212
1213 /* Check root permissions */
1214 if (drv_priv(credp) != 0) {
1215 con_log(CL_ANN, (CE_WARN,
1216 "mr_sas: Non-root ioctl access denied!"));
1217 return (EPERM);
1218 }
1219
1220 /* Verify we are being opened as a character device */
1221 if (otyp != OTYP_CHR) {
1222 con_log(CL_ANN, (CE_WARN,
1223 "mr_sas: ioctl node must be a char node"));
1224 return (EINVAL);
1225 }
1226
1227 if (ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(*dev)))
1228 == NULL) {
1229 return (ENXIO);
1230 }
1231
1232 if (scsi_hba_open) {
1233 rval = scsi_hba_open(dev, openflags, otyp, credp);
1234 }
1235
1236 return (rval);
1237 }
1238
1239 /*
1240 * close - gives up access to a device
1241 * @dev:
1242 * @openflags:
1243 * @otyp:
1244 * @credp:
1245 *
1246 * close() should perform any cleanup necessary to finish using the minor
1247 * device, and prepare the device (and driver) to be opened again.
1248 */
1249 static int
1250 mrsas_close(dev_t dev, int openflags, int otyp, cred_t *credp)
1251 {
1252 int rval = 0;
1253
1254 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1255
1256 /* no need for locks! */
1257
1258 if (scsi_hba_close) {
1259 rval = scsi_hba_close(dev, openflags, otyp, credp);
1260 }
1261
1262 return (rval);
1263 }
1264
1265 /*
1266 * ioctl - performs a range of I/O commands for character drivers
1267 * @dev:
1268 * @cmd:
1269 * @arg:
1270 * @mode:
1271 * @credp:
1272 * @rvalp:
1273 *
1274 * ioctl() routine must make sure that user data is copied into or out of the
1275 * kernel address space explicitly using copyin(), copyout(), ddi_copyin(),
1276 * and ddi_copyout(), as appropriate.
1277 * This is a wrapper routine to serialize access to the actual ioctl routine.
1278 * ioctl() should return 0 on success, or the appropriate error number. The
1279 * driver may also set the value returned to the calling process through rvalp.
1280 */
1281
1282 static int
1283 mrsas_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
1284 int *rvalp)
1285 {
1286 int rval = 0;
1287
1288 struct mrsas_instance *instance;
1289 struct mrsas_ioctl *ioctl;
1290 struct mrsas_aen aen;
1291 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1292
1293 instance = ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(dev)));
1294
1295 if (instance == NULL) {
1296 /* invalid minor number */
1297 con_log(CL_ANN, (CE_WARN, "mr_sas: adapter not found."));
1298 return (ENXIO);
1299 }
1300
1301 ioctl = (struct mrsas_ioctl *)kmem_zalloc(sizeof (struct mrsas_ioctl),
1302 KM_SLEEP);
1303 if (ioctl == NULL) {
1304 /* Failed to allocate memory for ioctl */
1305 con_log(CL_ANN, (CE_WARN, "mr_sas_ioctl: failed to allocate memory for ioctl"));
1306 return (ENXIO);
1307 }
1308
1309 switch ((uint_t)cmd) {
1310 case MRSAS_IOCTL_FIRMWARE:
1311 if (ddi_copyin((void *)arg, ioctl,
1312 sizeof (struct mrsas_ioctl), mode)) {
1313 con_log(CL_ANN, (CE_WARN, "mrsas_ioctl: "
1314 "ERROR IOCTL copyin"));
1315 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1316 return (EFAULT);
1317 }
1318
1319 if (ioctl->control_code == MRSAS_DRIVER_IOCTL_COMMON) {
1320 rval = handle_drv_ioctl(instance, ioctl, mode);
1321 } else {
1322 rval = handle_mfi_ioctl(instance, ioctl, mode);
1323 }
1324
1325 if (ddi_copyout((void *)ioctl, (void *)arg,
1326 (sizeof (struct mrsas_ioctl) - 1), mode)) {
1327 con_log(CL_ANN, (CE_WARN,
1328 "mrsas_ioctl: copy_to_user failed"));
1329 rval = 1;
1330 }
1331
1332 break;
1333 case MRSAS_IOCTL_AEN:
1334 con_log(CL_ANN, (CE_NOTE,
1335 "mrsas_ioctl: IOCTL Register AEN.\n"));
1336
1337 if (ddi_copyin((void *) arg, &aen,
1338 sizeof (struct mrsas_aen), mode)) {
1339 con_log(CL_ANN, (CE_WARN,
1340 "mrsas_ioctl: ERROR AEN copyin"));
1341 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1342 return (EFAULT);
1343 }
1344
1345 rval = handle_mfi_aen(instance, &aen);
1346
1347 if (ddi_copyout((void *) &aen, (void *)arg,
1348 sizeof (struct mrsas_aen), mode)) {
1349 con_log(CL_ANN, (CE_WARN,
1350 "mrsas_ioctl: copy_to_user failed"));
1351 rval = 1;
1352 }
1353
1354 break;
1355 default:
1356 rval = scsi_hba_ioctl(dev, cmd, arg,
1357 mode, credp, rvalp);
1358
1359 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_ioctl: "
1360 "scsi_hba_ioctl called, ret = %x.", rval));
1361 }
1362
1363 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1364 return (rval);
1365 }
1366
1367 /*
1368 * ************************************************************************** *
1369 * *
1370 * common entry points - for block driver types *
1371 * *
1372 * ************************************************************************** *
1373 */
1374 /*
1375 * reset - TBD
1376 * @dip:
1377 * @cmd:
1378 *
1379 * TBD
1380 */
1381 /*ARGSUSED*/
1382 static int
1383 mrsas_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
1384 {
1385 int instance_no;
1386
1387 struct mrsas_instance *instance;
1388
1389 instance_no = ddi_get_instance(dip);
1390 instance = (struct mrsas_instance *)ddi_get_soft_state
1391 (mrsas_state, instance_no);
1392
1393 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1394
1395 if (!instance) {
1396 con_log(CL_ANN, (CE_WARN, "mr_sas:%d could not get adapter "
1397 "in reset", instance_no));
1398 return (DDI_FAILURE);
1399 }
1400
1401 instance->func_ptr->disable_intr(instance);
1402
1403 con_log(CL_ANN1, (CE_CONT, "flushing cache for instance %d",
1404 instance_no));
1405
1406 flush_cache(instance);
1407
1408 return (DDI_SUCCESS);
1409 }
1410
1411
1412 /*ARGSUSED*/
1413 int
1414 mrsas_quiesce(dev_info_t *dip)
1415 {
1416 int instance_no;
1417
1418 struct mrsas_instance *instance;
1419
1420 instance_no = ddi_get_instance(dip);
1421 instance = (struct mrsas_instance *)ddi_get_soft_state
1422 (mrsas_state, instance_no);
1423
1424 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1425
1426 if (!instance) {
1427 con_log(CL_ANN1, (CE_WARN, "mr_sas:%d could not get adapter "
1428 "in quiesce", instance_no));
1429 return (DDI_FAILURE);
1430 }
1431 if (instance->deadadapter || instance->adapterresetinprogress) {
1432 con_log(CL_ANN1, (CE_WARN, "mr_sas:%d adapter is not in "
1433 "healthy state", instance_no));
1434 return (DDI_FAILURE);
1435 }
1436
1437 if (abort_aen_cmd(instance, instance->aen_cmd)) {
1438 con_log(CL_ANN1, (CE_WARN, "mrsas_quiesce: "
1439 "failed to abort prevous AEN command QUIESCE"));
1440 }
1441
1442 if (instance->tbolt) {
1443 if (abort_syncmap_cmd(instance,
1444 instance->map_update_cmd)) {
1445 cmn_err(CE_WARN,
1446 "mrsas_detach: failed to abort "
1447 "previous syncmap command");
1448 return (DDI_FAILURE);
1449 }
1450 }
1451
1452 instance->func_ptr->disable_intr(instance);
1453
1454 con_log(CL_ANN1, (CE_CONT, "flushing cache for instance %d",
1455 instance_no));
1456
1457 flush_cache(instance);
1458
1459 if (wait_for_outstanding(instance)) {
1460 con_log(CL_ANN1, (CE_CONT, "wait_for_outstanding: return FAIL.\n"));
1461 return (DDI_FAILURE);
1462 }
1463 return (DDI_SUCCESS);
1464 }
1465
1466 /*
1467 * ************************************************************************** *
1468 * *
1469 * entry points (SCSI HBA) *
1470 * *
1471 * ************************************************************************** *
1472 */
1473 /*
1474 * tran_tgt_init - initialize a target device instance
1475 * @hba_dip:
1476 * @tgt_dip:
1477 * @tran:
1478 * @sd:
1479 *
1480 * The tran_tgt_init() entry point enables the HBA to allocate and initialize
1481 * any per-target resources. tran_tgt_init() also enables the HBA to qualify
1482 * the device's address as valid and supportable for that particular HBA.
1483 * By returning DDI_FAILURE, the instance of the target driver for that device
1484 * is not probed or attached.
1485 */
1486 /*ARGSUSED*/
1487 static int
1488 mrsas_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1489 scsi_hba_tran_t *tran, struct scsi_device *sd)
1490 {
1491 struct mrsas_instance *instance;
1492 uint16_t tgt = sd->sd_address.a_target;
1493 uint8_t lun = sd->sd_address.a_lun;
1494 dev_info_t *child = NULL;
1495
1496 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init target %d lun %d",
1497 tgt, lun));
1498
1499 instance = ADDR2MR(&sd->sd_address);
1500
1501 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
1502 /*
1503 * If no persistent node exists, we don't allow .conf node
1504 * to be created.
1505 */
1506 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
1507 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init find child ="
1508 " %p t = %d l = %d", (void *)child, tgt, lun));
1509 if (ndi_merge_node(tgt_dip, mrsas_name_node) !=
1510 DDI_SUCCESS)
1511 /* Create this .conf node */
1512 return (DDI_SUCCESS);
1513 }
1514 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init in ndi_per "
1515 "DDI_FAILURE t = %d l = %d", tgt, lun));
1516 return (DDI_FAILURE);
1517
1518 }
1519
1520 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init dev_dip %p tgt_dip %p",
1521 (void *)instance->mr_ld_list[tgt].dip, (void *)tgt_dip));
1522
1523 if (tgt < MRDRV_MAX_LD && lun == 0) {
1524 if (instance->mr_ld_list[tgt].dip == NULL &&
1525 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0) {
1526 mutex_enter(&instance->config_dev_mtx);
1527 instance->mr_ld_list[tgt].dip = tgt_dip;
1528 instance->mr_ld_list[tgt].lun_type = MRSAS_LD_LUN;
1529 instance->mr_ld_list[tgt].flag = MRDRV_TGT_VALID;
1530 mutex_exit(&instance->config_dev_mtx);
1531 }
1532 }
1533
1534 #ifdef PDSUPPORT
1535 else if(instance->tbolt) {
1536 if (instance->mr_tbolt_pd_list[tgt].dip == NULL) {
1537 mutex_enter(&instance->config_dev_mtx);
1538 instance->mr_tbolt_pd_list[tgt].dip = tgt_dip;
1539 instance->mr_tbolt_pd_list[tgt].flag =
1540 MRDRV_TGT_VALID;
1541 mutex_exit(&instance->config_dev_mtx);
1542 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_tgt_init:"
1543 "t%xl%x", tgt, lun));
1544 }
1545 }
1546 #endif
1547
1548 return (DDI_SUCCESS);
1549 }
1550
1551 /*ARGSUSED*/
1552 static void
1553 mrsas_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1554 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1555 {
1556 struct mrsas_instance *instance;
1557 int tgt = sd->sd_address.a_target;
1558 int lun = sd->sd_address.a_lun;
1559
1560 instance = ADDR2MR(&sd->sd_address);
1561
1562 con_log(CL_DLEVEL2, (CE_NOTE, "tgt_free t = %d l = %d", tgt, lun));
1563
1564 if (tgt < MRDRV_MAX_LD && lun == 0) {
1565 if (instance->mr_ld_list[tgt].dip == tgt_dip) {
1566 mutex_enter(&instance->config_dev_mtx);
1567 instance->mr_ld_list[tgt].dip = NULL;
1568 mutex_exit(&instance->config_dev_mtx);
1569 }
1570 }
1571
1572 #ifdef PDSUPPORT
1573 else if(instance->tbolt) {
1574 mutex_enter(&instance->config_dev_mtx);
1575 instance->mr_tbolt_pd_list[tgt].dip = NULL;
1576 mutex_exit(&instance->config_dev_mtx);
1577 con_log(CL_ANN1, (CE_NOTE, "tgt_free: Setting dip = NULL"
1578 "for tgt:%x", tgt));
1579 }
1580 #endif
1581
1582 }
1583
1584 dev_info_t *
1585 mrsas_find_child(struct mrsas_instance *instance, uint16_t tgt, uint8_t lun)
1586 {
1587 dev_info_t *child = NULL;
1588 char addr[SCSI_MAXNAMELEN];
1589 char tmp[MAXNAMELEN];
1590
1591 (void) sprintf(addr, "%x,%x", tgt, lun);
1592 for (child = ddi_get_child(instance->dip); child;
1593 child = ddi_get_next_sibling(child)) {
1594
1595 if (ndi_dev_is_persistent_node(child) == 0) {
1596 continue;
1597 }
1598
1599 if (mrsas_name_node(child, tmp, MAXNAMELEN) !=
1600 DDI_SUCCESS) {
1601 continue;
1602 }
1603
1604 if (strcmp(addr, tmp) == 0) {
1605 break;
1606 }
1607 }
1608 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_find_child: return child = %p",
1609 (void *)child));
1610 return (child);
1611 }
1612
1613 /*
1614 * mrsas_name_node -
1615 * @dip:
1616 * @name:
1617 * @len:
1618 */
1619 static int
1620 mrsas_name_node(dev_info_t *dip, char *name, int len)
1621 {
1622 int tgt, lun;
1623
1624 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1625 DDI_PROP_DONTPASS, "target", -1);
1626 con_log(CL_DLEVEL2, (CE_NOTE,
1627 "mrsas_name_node: dip %p tgt %d", (void *)dip, tgt));
1628 if (tgt == -1) {
1629 return (DDI_FAILURE);
1630 }
1631 lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1632 "lun", -1);
1633 con_log(CL_DLEVEL2,
1634 (CE_NOTE, "mrsas_name_node: tgt %d lun %d", tgt, lun));
1635 if (lun == -1) {
1636 return (DDI_FAILURE);
1637 }
1638 (void) snprintf(name, len, "%x,%x", tgt, lun);
1639 return (DDI_SUCCESS);
1640 }
1641
1642 /*
1643 * tran_init_pkt - allocate & initialize a scsi_pkt structure
1644 * @ap:
1645 * @pkt:
1646 * @bp:
1647 * @cmdlen:
1648 * @statuslen:
1649 * @tgtlen:
1650 * @flags:
1651 * @callback:
1652 *
1653 * The tran_init_pkt() entry point allocates and initializes a scsi_pkt
1654 * structure and DMA resources for a target driver request. The
1655 * tran_init_pkt() entry point is called when the target driver calls the
1656 * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point
1657 * is a request to perform one or more of three possible services:
1658 * - allocation and initialization of a scsi_pkt structure
1659 * - allocation of DMA resources for data transfer
1660 * - reallocation of DMA resources for the next portion of the data transfer
1661 */
1662 static struct scsi_pkt *
1663 mrsas_tran_init_pkt(struct scsi_address *ap, register struct scsi_pkt *pkt,
1664 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1665 int flags, int (*callback)(), caddr_t arg)
1666 {
1667 struct scsa_cmd *acmd;
1668 struct mrsas_instance *instance;
1669 struct scsi_pkt *new_pkt;
1670
1671 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1672
1673 instance = ADDR2MR(ap);
1674
1675 /* step #1 : pkt allocation */
1676 if (pkt == NULL) {
1677 pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen,
1678 tgtlen, sizeof (struct scsa_cmd), callback, arg);
1679 if (pkt == NULL) {
1680 return (NULL);
1681 }
1682
1683 acmd = PKT2CMD(pkt);
1684
1685 /*
1686 * Initialize the new pkt - we redundantly initialize
1687 * all the fields for illustrative purposes.
1688 */
1689 acmd->cmd_pkt = pkt;
1690 acmd->cmd_flags = 0;
1691 acmd->cmd_scblen = statuslen;
1692 acmd->cmd_cdblen = cmdlen;
1693 acmd->cmd_dmahandle = NULL;
1694 acmd->cmd_ncookies = 0;
1695 acmd->cmd_cookie = 0;
1696 acmd->cmd_cookiecnt = 0;
1697 acmd->cmd_nwin = 0;
1698
1699 pkt->pkt_address = *ap;
1700 pkt->pkt_comp = (void (*)())NULL;
1701 pkt->pkt_flags = 0;
1702 pkt->pkt_time = 0;
1703 pkt->pkt_resid = 0;
1704 pkt->pkt_state = 0;
1705 pkt->pkt_statistics = 0;
1706 pkt->pkt_reason = 0;
1707 new_pkt = pkt;
1708 } else {
1709 acmd = PKT2CMD(pkt);
1710 new_pkt = NULL;
1711 }
1712
1713 /* step #2 : dma allocation/move */
1714 if (bp && bp->b_bcount != 0) {
1715 if (acmd->cmd_dmahandle == NULL) {
1716 if (mrsas_dma_alloc(instance, pkt, bp, flags,
1717 callback) == DDI_FAILURE) {
1718 if (new_pkt) {
1719 scsi_hba_pkt_free(ap, new_pkt);
1720 }
1721 return ((struct scsi_pkt *)NULL);
1722 }
1723 } else {
1724 if (mrsas_dma_move(instance, pkt, bp) == DDI_FAILURE) {
1725 return ((struct scsi_pkt *)NULL);
1726 }
1727 }
1728 }
1729
1730 return (pkt);
1731 }
1732
1733 /*
1734 * tran_start - transport a SCSI command to the addressed target
1735 * @ap:
1736 * @pkt:
1737 *
1738 * The tran_start() entry point for a SCSI HBA driver is called to transport a
1739 * SCSI command to the addressed target. The SCSI command is described
1740 * entirely within the scsi_pkt structure, which the target driver allocated
1741 * through the HBA driver's tran_init_pkt() entry point. If the command
1742 * involves a data transfer, DMA resources must also have been allocated for
1743 * the scsi_pkt structure.
1744 *
1745 * Return Values :
1746 * TRAN_BUSY - request queue is full, no more free scbs
1747 * TRAN_ACCEPT - pkt has been submitted to the instance
1748 */
1749 static int
1750 mrsas_tran_start(struct scsi_address *ap, register struct scsi_pkt *pkt)
1751 {
1752 uchar_t cmd_done = 0;
1753
1754 struct mrsas_instance *instance = ADDR2MR(ap);
1755 struct mrsas_cmd *cmd;
1756
1757 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1758 if (instance->deadadapter == 1) {
1759 con_log(CL_ANN1, (CE_WARN,
1760 "mrsas_tran_start: return TRAN_FATAL_ERROR "
1761 "for IO, as the HBA doesnt take any more IOs"));
1762 if (pkt) {
1763 pkt->pkt_reason = CMD_DEV_GONE;
1764 pkt->pkt_statistics = STAT_DISCON;
1765 }
1766 return (TRAN_FATAL_ERROR);
1767 }
1768
1769 if (instance->adapterresetinprogress) {
1770 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_start: Reset flag set, "
1771 "returning mfi_pkt and setting TRAN_BUSY\n"));
1772 return (TRAN_BUSY);
1773 }
1774
1775 con_log(CL_ANN1, (CE_CONT, "chkpnt:%s:%d:SCSI CDB[0]=0x%x time:%x",
1776 __func__, __LINE__, pkt->pkt_cdbp[0], pkt->pkt_time));
1777
1778 pkt->pkt_reason = CMD_CMPLT;
1779 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */
1780
1781 cmd = build_cmd(instance, ap, pkt, &cmd_done);
1782
1783 /*
1784 * Check if the command is already completed by the mrsas_build_cmd()
1785 * routine. In which case the busy_flag would be clear and scb will be
1786 * NULL and appropriate reason provided in pkt_reason field
1787 */
1788 if (cmd_done) {
1789 pkt->pkt_reason = CMD_CMPLT;
1790 pkt->pkt_scbp[0] = STATUS_GOOD;
1791 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1792 | STATE_SENT_CMD;
1793 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1794 (*pkt->pkt_comp)(pkt);
1795 }
1796
1797 return (TRAN_ACCEPT);
1798 }
1799
1800 if (cmd == NULL) {
1801 return (TRAN_BUSY);
1802 }
1803
1804 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1805 if (instance->fw_outstanding > instance->max_fw_cmds) {
1806 cmn_err(CE_WARN, "mr_sas:Firmware BUSY, fw_outstanding(0x%X) > max_fw_cmds(0x%X)",
1807 instance->fw_outstanding, instance->max_fw_cmds );
1808 return_mfi_pkt(instance, cmd);
1809 return (TRAN_BUSY);
1810 }
1811
1812 /* Synchronize the Cmd frame for the controller */
1813 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1814 DDI_DMA_SYNC_FORDEV);
1815 con_log(CL_ANN, (CE_CONT, "issue_cmd_ppc: SCSI CDB[0]=0x%x"
1816 "cmd->index:%x\n", pkt->pkt_cdbp[0], cmd->index));
1817 instance->func_ptr->issue_cmd(cmd, instance);
1818
1819 } else {
1820 struct mrsas_header *hdr = &cmd->frame->hdr;
1821
1822
1823 instance->func_ptr-> issue_cmd_in_poll_mode(instance, cmd);
1824
1825 pkt->pkt_reason = CMD_CMPLT;
1826 pkt->pkt_statistics = 0;
1827 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS;
1828
1829 switch (ddi_get8(cmd->frame_dma_obj.acc_handle,
1830 &hdr->cmd_status)) {
1831 case MFI_STAT_OK:
1832 pkt->pkt_scbp[0] = STATUS_GOOD;
1833 break;
1834
1835 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1836 con_log(CL_ANN, (CE_CONT,
1837 "mrsas_tran_start: scsi done with error"));
1838 pkt->pkt_reason = CMD_CMPLT;
1839 pkt->pkt_statistics = 0;
1840
1841 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
1842 break;
1843
1844 case MFI_STAT_DEVICE_NOT_FOUND:
1845 con_log(CL_ANN, (CE_CONT,
1846 "mrsas_tran_start: device not found error"));
1847 pkt->pkt_reason = CMD_DEV_GONE;
1848 pkt->pkt_statistics = STAT_DISCON;
1849 break;
1850
1851 default:
1852 ((struct scsi_status *)pkt->pkt_scbp)->sts_busy = 1;
1853 }
1854
1855 return_mfi_pkt(instance, cmd);
1856
1857 if (pkt->pkt_comp) {
1858 (*pkt->pkt_comp)(pkt);
1859 }
1860
1861 }
1862
1863 return (TRAN_ACCEPT);
1864 }
1865
1866 /*
1867 * tran_abort - Abort any commands that are currently in transport
1868 * @ap:
1869 * @pkt:
1870 *
1871 * The tran_abort() entry point for a SCSI HBA driver is called to abort any
1872 * commands that are currently in transport for a particular target. This entry
1873 * point is called when a target driver calls scsi_abort(). The tran_abort()
1874 * entry point should attempt to abort the command denoted by the pkt
1875 * parameter. If the pkt parameter is NULL, tran_abort() should attempt to
1876 * abort all outstanding commands in the transport layer for the particular
1877 * target or logical unit.
1878 */
1879 /*ARGSUSED*/
1880 static int
1881 mrsas_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
1882 {
1883 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1884
1885 /* abort command not supported by H/W */
1886
1887 return (DDI_FAILURE);
1888 }
1889
1890 /*
1891 * tran_reset - reset either the SCSI bus or target
1892 * @ap:
1893 * @level:
1894 *
1895 * The tran_reset() entry point for a SCSI HBA driver is called to reset either
1896 * the SCSI bus or a particular SCSI target device. This entry point is called
1897 * when a target driver calls scsi_reset(). The tran_reset() entry point must
1898 * reset the SCSI bus if level is RESET_ALL. If level is RESET_TARGET, just the
1899 * particular target or logical unit must be reset.
1900 */
1901 /*ARGSUSED*/
1902 static int
1903 mrsas_tran_reset(struct scsi_address *ap, int level)
1904 {
1905 struct mrsas_instance *instance = ADDR2MR(ap);
1906
1907 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1908
1909 if (wait_for_outstanding(instance)) {
1910 con_log(CL_ANN1, (CE_CONT, "wait_for_outstanding: return FAIL.\n"));
1911 return (DDI_FAILURE);
1912 } else {
1913 return (DDI_SUCCESS);
1914 }
1915 }
1916
1917 /*
1918 * tran_bus_reset - reset the SCSI bus
1919 * @dip:
1920 * @level:
1921 *
1922 * The tran_bus_reset() vector in the scsi_hba_tran structure should be
1923 * initialized during the HBA driver's attach(). The vector should point to
1924 * an HBA entry point that is to be called when a user initiates a bus reset.
1925 * Implementation is hardware specific. If the HBA driver cannot reset the
1926 * SCSI bus without affecting the targets, the driver should fail RESET_BUS
1927 * or not initialize this vector.
1928 */
1929 /*ARGSUSED*/
1930 static int
1931 mrsas_tran_bus_reset(dev_info_t *dip, int level)
1932 {
1933 int instance_no = ddi_get_instance(dip);
1934
1935 struct mrsas_instance *instance = ddi_get_soft_state(mrsas_state,
1936 instance_no);
1937
1938 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1939
1940 if (wait_for_outstanding(instance)) {
1941 con_log(CL_ANN1, (CE_CONT, "wait_for_outstanding: return FAIL.\n"));
1942 return (DDI_FAILURE);
1943 } else {
1944 return (DDI_SUCCESS);
1945 }
1946 }
1947
1948 /*
1949 * tran_getcap - get one of a set of SCSA-defined capabilities
1950 * @ap:
1951 * @cap:
1952 * @whom:
1953 *
1954 * The target driver can request the current setting of the capability for a
1955 * particular target by setting the whom parameter to nonzero. A whom value of
1956 * zero indicates a request for the current setting of the general capability
1957 * for the SCSI bus or for adapter hardware. The tran_getcap() should return -1
1958 * for undefined capabilities or the current value of the requested capability.
1959 */
1960 /*ARGSUSED*/
1961 static int
1962 mrsas_tran_getcap(struct scsi_address *ap, char *cap, int whom)
1963 {
1964 int rval = 0;
1965
1966 struct mrsas_instance *instance = ADDR2MR(ap);
1967
1968 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1969
1970 /* we do allow inquiring about capabilities for other targets */
1971 if (cap == NULL) {
1972 return (-1);
1973 }
1974
1975 switch (scsi_hba_lookup_capstr(cap)) {
1976 case SCSI_CAP_DMA_MAX:
1977 if (instance->tbolt) {
1978 /* Limit to 256k max transfer */
1979 rval = mrsas_tbolt_max_cap_maxxfer;
1980 } else {
1981 /* Limit to 16MB max transfer */
1982 rval = mrsas_max_cap_maxxfer;
1983 }
1984 break;
1985 case SCSI_CAP_MSG_OUT:
1986 rval = 1;
1987 break;
1988 case SCSI_CAP_DISCONNECT:
1989 rval = 0;
1990 break;
1991 case SCSI_CAP_SYNCHRONOUS:
1992 rval = 0;
1993 break;
1994 case SCSI_CAP_WIDE_XFER:
1995 rval = 1;
1996 break;
1997 case SCSI_CAP_TAGGED_QING:
1998 rval = 1;
1999 break;
2000 case SCSI_CAP_UNTAGGED_QING:
2001 rval = 1;
2002 break;
2003 case SCSI_CAP_PARITY:
2004 rval = 1;
2005 break;
2006 case SCSI_CAP_INITIATOR_ID:
2007 rval = instance->init_id;
2008 break;
2009 case SCSI_CAP_ARQ:
2010 rval = 1;
2011 break;
2012 case SCSI_CAP_LINKED_CMDS:
2013 rval = 0;
2014 break;
2015 case SCSI_CAP_RESET_NOTIFICATION:
2016 rval = 1;
2017 break;
2018 case SCSI_CAP_GEOMETRY:
2019 rval = -1;
2020
2021 break;
2022 default:
2023 con_log(CL_DLEVEL2, (CE_NOTE, "Default cap coming 0x%x",
2024 scsi_hba_lookup_capstr(cap)));
2025 rval = -1;
2026 break;
2027 }
2028
2029 return (rval);
2030 }
2031
2032 /*
2033 * tran_setcap - set one of a set of SCSA-defined capabilities
2034 * @ap:
2035 * @cap:
2036 * @value:
2037 * @whom:
2038 *
2039 * The target driver might request that the new value be set for a particular
2040 * target by setting the whom parameter to nonzero. A whom value of zero
2041 * means that request is to set the new value for the SCSI bus or for adapter
2042 * hardware in general.
2043 * The tran_setcap() should return the following values as appropriate:
2044 * - -1 for undefined capabilities
2045 * - 0 if the HBA driver cannot set the capability to the requested value
2046 * - 1 if the HBA driver is able to set the capability to the requested value
2047 */
2048 /*ARGSUSED*/
2049 static int
2050 mrsas_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
2051 {
2052 int rval = 1;
2053
2054 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2055
2056 /* We don't allow setting capabilities for other targets */
2057 if (cap == NULL || whom == 0) {
2058 return (-1);
2059 }
2060
2061 switch (scsi_hba_lookup_capstr(cap)) {
2062 case SCSI_CAP_DMA_MAX:
2063 case SCSI_CAP_MSG_OUT:
2064 case SCSI_CAP_PARITY:
2065 case SCSI_CAP_LINKED_CMDS:
2066 case SCSI_CAP_RESET_NOTIFICATION:
2067 case SCSI_CAP_DISCONNECT:
2068 case SCSI_CAP_SYNCHRONOUS:
2069 case SCSI_CAP_UNTAGGED_QING:
2070 case SCSI_CAP_WIDE_XFER:
2071 case SCSI_CAP_INITIATOR_ID:
2072 case SCSI_CAP_ARQ:
2073 /*
2074 * None of these are settable via
2075 * the capability interface.
2076 */
2077 break;
2078 case SCSI_CAP_TAGGED_QING:
2079 rval = 1;
2080 break;
2081 case SCSI_CAP_SECTOR_SIZE:
2082 rval = 1;
2083 break;
2084
2085 case SCSI_CAP_TOTAL_SECTORS:
2086 rval = 1;
2087 break;
2088 default:
2089 rval = -1;
2090 break;
2091 }
2092
2093 return (rval);
2094 }
2095
2096 /*
2097 * tran_destroy_pkt - deallocate scsi_pkt structure
2098 * @ap:
2099 * @pkt:
2100 *
2101 * The tran_destroy_pkt() entry point is the HBA driver function that
2102 * deallocates scsi_pkt structures. The tran_destroy_pkt() entry point is
2103 * called when the target driver calls scsi_destroy_pkt(). The
2104 * tran_destroy_pkt() entry point must free any DMA resources that have been
2105 * allocated for the packet. An implicit DMA synchronization occurs if the
2106 * DMA resources are freed and any cached data remains after the completion
2107 * of the transfer.
2108 */
2109 static void
2110 mrsas_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2111 {
2112 struct scsa_cmd *acmd = PKT2CMD(pkt);
2113
2114 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2115
2116 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2117 acmd->cmd_flags &= ~CFLAG_DMAVALID;
2118
2119 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
2120
2121 ddi_dma_free_handle(&acmd->cmd_dmahandle);
2122
2123 acmd->cmd_dmahandle = NULL;
2124 }
2125
2126 /* free the pkt */
2127 scsi_hba_pkt_free(ap, pkt);
2128 }
2129
2130 /*
2131 * tran_dmafree - deallocates DMA resources
2132 * @ap:
2133 * @pkt:
2134 *
2135 * The tran_dmafree() entry point deallocates DMAQ resources that have been
2136 * allocated for a scsi_pkt structure. The tran_dmafree() entry point is
2137 * called when the target driver calls scsi_dmafree(). The tran_dmafree() must
2138 * free only DMA resources allocated for a scsi_pkt structure, not the
2139 * scsi_pkt itself. When DMA resources are freed, a DMA synchronization is
2140 * implicitly performed.
2141 */
2142 /*ARGSUSED*/
2143 static void
2144 mrsas_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
2145 {
2146 register struct scsa_cmd *acmd = PKT2CMD(pkt);
2147
2148 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2149
2150 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2151 acmd->cmd_flags &= ~CFLAG_DMAVALID;
2152
2153 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
2154
2155 ddi_dma_free_handle(&acmd->cmd_dmahandle);
2156
2157 acmd->cmd_dmahandle = NULL;
2158 }
2159 }
2160
2161 /*
2162 * tran_sync_pkt - synchronize the DMA object allocated
2163 * @ap:
2164 * @pkt:
2165 *
2166 * The tran_sync_pkt() entry point synchronizes the DMA object allocated for
2167 * the scsi_pkt structure before or after a DMA transfer. The tran_sync_pkt()
2168 * entry point is called when the target driver calls scsi_sync_pkt(). If the
2169 * data transfer direction is a DMA read from device to memory, tran_sync_pkt()
2170 * must synchronize the CPU's view of the data. If the data transfer direction
2171 * is a DMA write from memory to device, tran_sync_pkt() must synchronize the
2172 * device's view of the data.
2173 */
2174 /*ARGSUSED*/
2175 static void
2176 mrsas_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2177 {
2178 register struct scsa_cmd *acmd = PKT2CMD(pkt);
2179
2180 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2181
2182 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2183 (void) ddi_dma_sync(acmd->cmd_dmahandle, acmd->cmd_dma_offset,
2184 acmd->cmd_dma_len, (acmd->cmd_flags & CFLAG_DMASEND) ?
2185 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
2186 }
2187 }
2188
2189 /*ARGSUSED*/
2190 static int
2191 mrsas_tran_quiesce(dev_info_t *dip)
2192 {
2193 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2194
2195 return (1);
2196 }
2197
2198 /*ARGSUSED*/
2199 static int
2200 mrsas_tran_unquiesce(dev_info_t *dip)
2201 {
2202 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2203
2204 return (1);
2205 }
2206
2207
2208 /*
2209 * mrsas_isr(caddr_t)
2210 *
2211 * The Interrupt Service Routine
2212 *
2213 * Collect status for all completed commands and do callback
2214 *
2215 */
2216 static uint_t
2217 mrsas_isr(struct mrsas_instance *instance)
2218 {
2219 int need_softintr;
2220 uint32_t producer;
2221 uint32_t consumer;
2222 uint32_t context;
2223 uint32_t status, value;
2224 int retval;
2225
2226 struct mrsas_cmd *cmd;
2227 struct mrsas_header *hdr;
2228 struct scsi_pkt *pkt;
2229
2230 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2231 ASSERT(instance);
2232 if (instance->tbolt) {
2233 mutex_enter(&instance->chip_mtx);
2234 if ((instance->intr_type == DDI_INTR_TYPE_FIXED) &&
2235 !(instance->func_ptr->intr_ack(instance))) {
2236 mutex_exit(&instance->chip_mtx);
2237 return (DDI_INTR_UNCLAIMED);
2238 }
2239 retval = mr_sas_tbolt_process_outstanding_cmd(instance);
2240 mutex_exit(&instance->chip_mtx);
2241 return (retval);
2242 } else {
2243 if ((instance->intr_type == DDI_INTR_TYPE_FIXED) &&
2244 !instance->func_ptr->intr_ack(instance)) {
2245 return (DDI_INTR_UNCLAIMED);
2246 }
2247 }
2248
2249 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
2250 0, 0, DDI_DMA_SYNC_FORCPU);
2251
2252 #ifdef OCRDEBUG
2253 if (debug_consecutive_timeout_after_ocr_g == 1) {
2254 con_log(CL_ANN1, (CE_NOTE,
2255 "simulating consecutive timeout after ocr"));
2256 return (DDI_INTR_CLAIMED);
2257 }
2258 #endif
2259
2260 mutex_enter(&instance->completed_pool_mtx);
2261 mutex_enter(&instance->cmd_pend_mtx);
2262
2263 producer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2264 instance->producer);
2265 consumer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2266 instance->consumer);
2267
2268 con_log(CL_ANN, (CE_CONT, " producer %x consumer %x ",
2269 producer, consumer));
2270 if (producer == consumer) {
2271 con_log(CL_ANN, (CE_WARN, "producer == consumer case"));
2272 mutex_exit(&instance->cmd_pend_mtx);
2273 mutex_exit(&instance->completed_pool_mtx);
2274 return (DDI_INTR_CLAIMED);
2275 }
2276
2277 while (consumer != producer) {
2278 context = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2279 &instance->reply_queue[consumer]);
2280 cmd = instance->cmd_list[context];
2281
2282 if (cmd->sync_cmd == MRSAS_TRUE) {
2283 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2284 if (hdr) {
2285 mlist_del_init(&cmd->list);
2286 }
2287 } else {
2288 pkt = cmd->pkt;
2289 if (pkt) {
2290 mlist_del_init(&cmd->list);
2291 }
2292 }
2293
2294 mlist_add_tail(&cmd->list, &instance->completed_pool_list);
2295
2296 consumer++;
2297 if (consumer == (instance->max_fw_cmds + 1)) {
2298 consumer = 0;
2299 }
2300 }
2301 ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
2302 instance->consumer, consumer);
2303 mutex_exit(&instance->cmd_pend_mtx);
2304 mutex_exit(&instance->completed_pool_mtx);
2305
2306 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
2307 0, 0, DDI_DMA_SYNC_FORDEV);
2308
2309 if (instance->softint_running) {
2310 need_softintr = 0;
2311 } else {
2312 need_softintr = 1;
2313 }
2314
2315 if (instance->isr_level == HIGH_LEVEL_INTR) {
2316 if (need_softintr) {
2317 ddi_trigger_softintr(instance->soft_intr_id);
2318 }
2319 } else {
2320 /*
2321 * Not a high-level interrupt, therefore call the soft level
2322 * interrupt explicitly
2323 */
2324 (void) mrsas_softintr(instance);
2325 }
2326
2327 return (DDI_INTR_CLAIMED);
2328 }
2329
2330
2331 /*
2332 * ************************************************************************** *
2333 * *
2334 * libraries *
2335 * *
2336 * ************************************************************************** *
2337 */
2338 /*
2339 * get_mfi_pkt : Get a command from the free pool
2340 * After successful allocation, the caller of this routine
2341 * must clear the frame buffer (memset to zero) before
2342 * using the packet further.
2343 *
2344 * ***** Note *****
2345 * After clearing the frame buffer the context id of the
2346 * frame buffer SHOULD be restored back.
2347 */
2348 static struct mrsas_cmd *
2349 get_mfi_pkt(struct mrsas_instance *instance)
2350 {
2351 mlist_t *head = &instance->cmd_pool_list;
2352 struct mrsas_cmd *cmd = NULL;
2353
2354 mutex_enter(&instance->cmd_pool_mtx);
2355 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2356
2357 if (!mlist_empty(head)) {
2358 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2359 mlist_del_init(head->next);
2360 }
2361 if (cmd != NULL) {
2362 cmd->pkt = NULL;
2363 cmd->retry_count_for_ocr = 0;
2364 cmd->drv_pkt_time = 0;
2365
2366 }
2367 mutex_exit(&instance->cmd_pool_mtx);
2368
2369 return (cmd);
2370 }
2371
2372 static struct mrsas_cmd *
2373 get_mfi_app_pkt(struct mrsas_instance *instance)
2374 {
2375 mlist_t *head = &instance->app_cmd_pool_list;
2376 struct mrsas_cmd *cmd = NULL;
2377
2378 mutex_enter(&instance->app_cmd_pool_mtx);
2379 ASSERT(mutex_owned(&instance->app_cmd_pool_mtx));
2380
2381 if (!mlist_empty(head)) {
2382 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2383 mlist_del_init(head->next);
2384 }
2385 if (cmd != NULL){
2386 cmd->pkt = NULL;
2387 cmd->retry_count_for_ocr = 0;
2388 cmd->drv_pkt_time = 0;
2389 }
2390
2391 mutex_exit(&instance->app_cmd_pool_mtx);
2392
2393 return (cmd);
2394 }
2395 /*
2396 * return_mfi_pkt : Return a cmd to free command pool
2397 */
2398 static void
2399 return_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2400 {
2401 mutex_enter(&instance->cmd_pool_mtx);
2402 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2403
2404 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2405
2406 mutex_exit(&instance->cmd_pool_mtx);
2407 }
2408
2409 static void
2410 return_mfi_app_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2411 {
2412 mutex_enter(&instance->app_cmd_pool_mtx);
2413 ASSERT(mutex_owned(&instance->app_cmd_pool_mtx));
2414
2415 mlist_add(&cmd->list, &instance->app_cmd_pool_list);
2416
2417 mutex_exit(&instance->app_cmd_pool_mtx);
2418 }
2419 void
2420 push_pending_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2421 {
2422 struct scsi_pkt *pkt;
2423 struct mrsas_header *hdr;
2424 con_log(CL_DLEVEL2, (CE_NOTE, "push_pending_pkt(): Called\n"));
2425 mutex_enter(&instance->cmd_pend_mtx);
2426 ASSERT(mutex_owned(&instance->cmd_pend_mtx));
2427 mlist_del_init(&cmd->list);
2428 mlist_add_tail(&cmd->list, &instance->cmd_pend_list);
2429 if (cmd->sync_cmd == MRSAS_TRUE) {
2430 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2431 if (hdr) {
2432 con_log(CL_ANN1, (CE_CONT,
2433 "push_pending_mfi_pkt: "
2434 "cmd %p index %x "
2435 "time %llx",
2436 (void *)cmd, cmd->index,
2437 gethrtime()));
2438 /* Wait for specified interval */
2439 cmd->drv_pkt_time = ddi_get16(
2440 cmd->frame_dma_obj.acc_handle, &hdr->timeout);
2441 if (cmd->drv_pkt_time < debug_timeout_g)
2442 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2443 con_log(CL_ANN1, (CE_CONT,
2444 "push_pending_pkt(): "
2445 "Called IO Timeout Value %x\n",
2446 cmd->drv_pkt_time));
2447 }
2448 if (hdr && instance->timeout_id == (timeout_id_t)-1) {
2449 instance->timeout_id = timeout(io_timeout_checker,
2450 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
2451 }
2452 } else {
2453 pkt = cmd->pkt;
2454 if (pkt) {
2455 con_log(CL_ANN1, (CE_CONT,
2456 "push_pending_mfi_pkt: "
2457 "cmd %p index %x pkt %p, "
2458 "time %llx",
2459 (void *)cmd, cmd->index, (void *)pkt,
2460 gethrtime()));
2461 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2462 }
2463 if (pkt && instance->timeout_id == (timeout_id_t)-1) {
2464 instance->timeout_id = timeout(io_timeout_checker,
2465 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
2466 }
2467 }
2468
2469 mutex_exit(&instance->cmd_pend_mtx);
2470
2471 }
2472
2473 int
2474 mrsas_print_pending_cmds(struct mrsas_instance *instance)
2475 {
2476 mlist_t *head = &instance->cmd_pend_list;
2477 mlist_t *tmp = head;
2478 struct mrsas_cmd *cmd = NULL;
2479 struct mrsas_header *hdr;
2480 unsigned int flag = 1;
2481 struct scsi_pkt *pkt;
2482 int saved_level;
2483 int cmd_count = 0;
2484
2485
2486 saved_level = debug_level_g;
2487 debug_level_g = CL_ANN1;
2488
2489 cmn_err(CE_NOTE, "mrsas_print_pending_cmds(): Called\n");
2490
2491 while (flag) {
2492 mutex_enter(&instance->cmd_pend_mtx);
2493 tmp = tmp->next;
2494 if (tmp == head) {
2495 mutex_exit(&instance->cmd_pend_mtx);
2496 flag = 0;
2497 con_log(CL_ANN1, (CE_CONT, "mrsas_print_pending_cmds(): NO MORE CMDS PENDING....\n"));
2498 break;
2499 } else {
2500 cmd = mlist_entry(tmp, struct mrsas_cmd, list);
2501 mutex_exit(&instance->cmd_pend_mtx);
2502 if (cmd) {
2503 if (cmd->sync_cmd == MRSAS_TRUE) {
2504 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2505 if (hdr) {
2506 con_log(CL_ANN1, (CE_CONT,
2507 "print: cmd %p index 0x%x drv_pkt_time 0x%x (NO-PKT) hdr %p\n",
2508 (void *)cmd, cmd->index, cmd->drv_pkt_time, (void *)hdr));
2509 }
2510 } else {
2511 pkt = cmd->pkt;
2512 if (pkt) {
2513 con_log(CL_ANN1, (CE_CONT,
2514 "print: cmd %p index 0x%x drv_pkt_time 0x%x pkt %p \n",
2515 (void *)cmd, cmd->index, cmd->drv_pkt_time, (void *)pkt));
2516 }
2517 }
2518
2519 if (++cmd_count == 1)
2520 mrsas_print_cmd_details(instance, cmd, 0xDD);
2521 else
2522 mrsas_print_cmd_details(instance, cmd, 1);
2523
2524 }
2525 }
2526 }
2527 con_log(CL_ANN1, (CE_CONT, "mrsas_print_pending_cmds(): Done\n"));
2528
2529
2530 debug_level_g = saved_level;
2531
2532 return (DDI_SUCCESS);
2533 }
2534
2535
2536 int
2537 mrsas_complete_pending_cmds(struct mrsas_instance *instance)
2538 {
2539
2540 struct mrsas_cmd *cmd = NULL;
2541 struct scsi_pkt *pkt;
2542 struct mrsas_header *hdr;
2543
2544 struct mlist_head *pos, *next;
2545
2546 con_log(CL_ANN1, (CE_NOTE,
2547 "mrsas_complete_pending_cmds(): Called"));
2548
2549 mutex_enter(&instance->cmd_pend_mtx);
2550 mlist_for_each_safe(pos, next, &instance->cmd_pend_list) {
2551 cmd = mlist_entry(pos, struct mrsas_cmd, list);
2552 if (cmd) {
2553 pkt = cmd->pkt;
2554 if (pkt) { /* for IO */
2555 if (((pkt->pkt_flags & FLAG_NOINTR)
2556 == 0) && pkt->pkt_comp) {
2557 pkt->pkt_reason
2558 = CMD_DEV_GONE;
2559 pkt->pkt_statistics
2560 = STAT_DISCON;
2561 con_log(CL_ANN1, (CE_CONT,
2562 "fail and posting to scsa "
2563 "cmd %p index %x"
2564 " pkt %p "
2565 "time : %llx",
2566 (void *)cmd, cmd->index,
2567 (void *)pkt, gethrtime()));
2568 (*pkt->pkt_comp)(pkt);
2569 }
2570 } else { /* for DCMDS */
2571 if (cmd->sync_cmd == MRSAS_TRUE) {
2572 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2573 con_log(CL_ANN1, (CE_CONT,
2574 "posting invalid status to application "
2575 "cmd %p index %x"
2576 " hdr %p "
2577 "time : %llx",
2578 (void *)cmd, cmd->index,
2579 (void *)hdr, gethrtime()));
2580 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2581 complete_cmd_in_sync_mode(instance, cmd);
2582 }
2583 }
2584 mlist_del_init(&cmd->list);
2585 } else {
2586 con_log(CL_ANN1, (CE_CONT,
2587 "mrsas_complete_pending_cmds:"
2588 "NULL command\n"));
2589 }
2590 con_log(CL_ANN1, (CE_CONT,
2591 "mrsas_complete_pending_cmds:"
2592 "looping for more commands\n"));
2593 }
2594 mutex_exit(&instance->cmd_pend_mtx);
2595
2596 con_log(CL_ANN1, (CE_CONT, "mrsas_complete_pending_cmds(): DONE\n"));
2597 return (DDI_SUCCESS);
2598 }
2599
2600 void
2601 mrsas_print_cmd_details(struct mrsas_instance *instance,
2602 struct mrsas_cmd *cmd, int detail )
2603 {
2604 struct scsi_pkt *pkt = cmd->pkt;
2605 Mpi2RaidSCSIIORequest_t *scsi_io = cmd->scsi_io_request;
2606 MPI2_SCSI_IO_VENDOR_UNIQUE *raidContext;
2607 uint8_t *cdb_p;
2608 char str[100], *strp;
2609 int i, j, len;
2610 int saved_level;
2611
2612
2613 if (detail == 0xDD) {
2614 saved_level = debug_level_g;
2615 debug_level_g = CL_ANN1;
2616 }
2617
2618
2619 if (instance->tbolt) {
2620 con_log(CL_ANN1, (CE_CONT, "print_cmd_details: cmd %p cmd->index 0x%x SMID 0x%x timer 0x%x sec\n",
2621 (void *)cmd, cmd->index, cmd->SMID, cmd->drv_pkt_time));
2622 }
2623 else {
2624 con_log(CL_ANN1, (CE_CONT, "print_cmd_details: cmd %p cmd->index 0x%x timer 0x%x sec\n",
2625 (void *)cmd, cmd->index, cmd->drv_pkt_time));
2626 }
2627
2628 if(pkt) {
2629 con_log(CL_ANN1, (CE_CONT, "scsi_pkt CDB[0]=0x%x",
2630 pkt->pkt_cdbp[0]));
2631 }else {
2632 con_log(CL_ANN1, (CE_CONT, "NO-PKT"));
2633 }
2634
2635 if((detail==0xDD) && instance->tbolt) {
2636 con_log(CL_ANN1, (CE_CONT, "RAID_SCSI_IO_REQUEST\n"));
2637 con_log(CL_ANN1, (CE_CONT, "DevHandle=0x%X Function=0x%X IoFlags=0x%X SGLFlags=0x%X DataLength=0x%X\n",
2638 ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->DevHandle),
2639 ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->Function),
2640 ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->IoFlags),
2641 ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->SGLFlags),
2642 ddi_get32(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->DataLength) ));
2643
2644 for(i=0; i < 32; i++)
2645 con_log(CL_ANN1, (CE_CONT, "CDB[%d]=0x%x ",i,
2646 ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->CDB.CDB32[i]) ));
2647
2648 con_log(CL_ANN1, (CE_CONT, "RAID-CONTEXT\n"));
2649 con_log(CL_ANN1, (CE_CONT, "status=0x%X extStatus=0x%X ldTargetId=0x%X timeoutValue=0x%X"
2650 "regLockFlags=0x%X RAIDFlags=0x%X regLockRowLBA=0x%lX regLockLength=0x%X spanArm=0x%X\n",
2651 ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->RaidContext.status),
2652 ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->RaidContext.extStatus),
2653 ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->RaidContext.ldTargetId),
2654 ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->RaidContext.timeoutValue),
2655 ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->RaidContext.regLockFlags),
2656 ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->RaidContext.RAIDFlags),
2657 ddi_get64(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->RaidContext.regLockRowLBA),
2658 ddi_get32(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->RaidContext.regLockLength),
2659 ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_io->RaidContext.spanArm) ));
2660
2661
2662 }
2663
2664 if (detail == 0xDD) {
2665 debug_level_g = saved_level;
2666 }
2667
2668 return;
2669 }
2670
2671
2672 int
2673 mrsas_issue_pending_cmds(struct mrsas_instance *instance)
2674 {
2675 mlist_t *head = &instance->cmd_pend_list;
2676 mlist_t *tmp = head->next;
2677 struct mrsas_cmd *cmd = NULL;
2678 struct scsi_pkt *pkt;
2679
2680 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_pending_cmds(): Called"));
2681 while (tmp != head) {
2682 mutex_enter(&instance->cmd_pend_mtx);
2683 cmd = mlist_entry(tmp, struct mrsas_cmd, list);
2684 tmp = tmp->next;
2685 mutex_exit(&instance->cmd_pend_mtx);
2686 if (cmd) {
2687 con_log(CL_ANN1, (CE_CONT,
2688 "mrsas_issue_pending_cmds(): "
2689 "Got a cmd: cmd %p index 0x%x drv_pkt_time 0x%x ",
2690 (void *)cmd, cmd->index, cmd->drv_pkt_time));
2691
2692 if (cmd->drv_pkt_time < debug_timeout_g)
2693 cmd->drv_pkt_time = (uint16_t)debug_timeout_g; /* Reset command timeout value */
2694
2695 cmd->retry_count_for_ocr++;
2696
2697 cmn_err(CE_CONT, "cmd retry count = %d\n",
2698 cmd->retry_count_for_ocr);
2699
2700 if (cmd->retry_count_for_ocr > IO_RETRY_COUNT) {
2701 cmn_err(CE_WARN,
2702 "mrsas_issue_pending_cmds(): cmd->retry_count exceeded limit >%d\n",
2703 IO_RETRY_COUNT);
2704 mrsas_print_cmd_details(instance, cmd, 0xDD);
2705
2706 cmn_err(CE_WARN,
2707 "mrsas_issue_pending_cmds():"
2708 "Calling KILL Adapter\n");
2709 if (instance->tbolt)
2710 (void) mrsas_tbolt_kill_adapter(instance);
2711 else
2712 (void) mrsas_kill_adapter(instance);
2713 return (DDI_FAILURE);
2714 }
2715
2716 pkt = cmd->pkt;
2717 if (pkt) {
2718 con_log(CL_ANN1, (CE_CONT,
2719 "PENDING PKT-CMD ISSUE: cmd %p index %x "
2720 "pkt %p time %llx",
2721 (void *)cmd, cmd->index,
2722 (void *)pkt,
2723 gethrtime()));
2724
2725 } else {
2726 cmn_err(CE_CONT,
2727 "mrsas_issue_pending_cmds(): "
2728 "NO-PKT, cmd %p index 0x%x drv_pkt_time 0x%x ",
2729 (void *)cmd, cmd->index, cmd->drv_pkt_time);
2730 }
2731
2732
2733 if (cmd->sync_cmd == MRSAS_TRUE) {
2734 cmn_err(CE_CONT, "mrsas_issue_pending_cmds(): SYNC_CMD == TRUE \n");
2735
2736 instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd);
2737 } else {
2738 instance->func_ptr->issue_cmd(cmd, instance);
2739 }
2740 } else {
2741 con_log(CL_ANN1, (CE_CONT,
2742 "mrsas_issue_pending_cmds: NULL command\n"));
2743 }
2744 con_log(CL_ANN1, (CE_CONT,
2745 "mrsas_issue_pending_cmds:"
2746 "looping for more commands"));
2747 }
2748 con_log(CL_ANN1, (CE_CONT, "mrsas_issue_pending_cmds(): DONE\n"));
2749 return (DDI_SUCCESS);
2750 }
2751
2752
2753
2754 /*
2755 * destroy_mfi_frame_pool
2756 */
2757 void
2758 destroy_mfi_frame_pool(struct mrsas_instance *instance)
2759 {
2760 int i;
2761 uint32_t max_cmd = instance->max_fw_cmds;
2762
2763 struct mrsas_cmd *cmd;
2764
2765 /* return all frames to pool */
2766
2767 for (i = 0; i < max_cmd; i++) {
2768
2769 cmd = instance->cmd_list[i];
2770
2771 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED)
2772 (void) mrsas_free_dma_obj(instance, cmd->frame_dma_obj);
2773
2774 cmd->frame_dma_obj_status = DMA_OBJ_FREED;
2775 }
2776
2777 }
2778
2779 /*
2780 * create_mfi_frame_pool
2781 */
2782 int
2783 create_mfi_frame_pool(struct mrsas_instance *instance)
2784 {
2785 int i = 0;
2786 int cookie_cnt;
2787 uint16_t max_cmd;
2788 uint16_t sge_sz;
2789 uint32_t sgl_sz;
2790 uint32_t tot_frame_size;
2791 struct mrsas_cmd *cmd;
2792 int retval = DDI_SUCCESS;
2793
2794 max_cmd = instance->max_fw_cmds;
2795 sge_sz = sizeof (struct mrsas_sge_ieee);
2796 /* calculated the number of 64byte frames required for SGL */
2797 sgl_sz = sge_sz * instance->max_num_sge;
2798 tot_frame_size = sgl_sz + MRMFI_FRAME_SIZE + SENSE_LENGTH;
2799
2800 con_log(CL_DLEVEL3, (CE_NOTE, "create_mfi_frame_pool: "
2801 "sgl_sz %x tot_frame_size %x", sgl_sz, tot_frame_size));
2802
2803 while (i < max_cmd) {
2804 cmd = instance->cmd_list[i];
2805
2806 cmd->frame_dma_obj.size = tot_frame_size;
2807 cmd->frame_dma_obj.dma_attr = mrsas_generic_dma_attr;
2808 cmd->frame_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
2809 cmd->frame_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
2810 cmd->frame_dma_obj.dma_attr.dma_attr_sgllen = 1;
2811 cmd->frame_dma_obj.dma_attr.dma_attr_align = 64;
2812
2813 cookie_cnt = mrsas_alloc_dma_obj(instance, &cmd->frame_dma_obj,
2814 (uchar_t)DDI_STRUCTURE_LE_ACC);
2815
2816 if (cookie_cnt == -1 || cookie_cnt > 1) {
2817 cmn_err(CE_WARN,
2818 "create_mfi_frame_pool: could not alloc.");
2819 retval = DDI_FAILURE;
2820 goto mrsas_undo_frame_pool;
2821 }
2822
2823 bzero(cmd->frame_dma_obj.buffer, tot_frame_size);
2824
2825 cmd->frame_dma_obj_status = DMA_OBJ_ALLOCATED;
2826 cmd->frame = (union mrsas_frame *)cmd->frame_dma_obj.buffer;
2827 cmd->frame_phys_addr =
2828 cmd->frame_dma_obj.dma_cookie[0].dmac_address;
2829
2830 cmd->sense = (uint8_t *)(((unsigned long)
2831 cmd->frame_dma_obj.buffer) +
2832 tot_frame_size - SENSE_LENGTH);
2833 cmd->sense_phys_addr =
2834 cmd->frame_dma_obj.dma_cookie[0].dmac_address +
2835 tot_frame_size - SENSE_LENGTH;
2836
2837 if (!cmd->frame || !cmd->sense) {
2838 cmn_err(CE_WARN,
2839 "mr_sas: pci_pool_alloc failed");
2840 retval = ENOMEM;
2841 goto mrsas_undo_frame_pool;
2842 }
2843
2844 ddi_put32(cmd->frame_dma_obj.acc_handle,
2845 &cmd->frame->io.context, cmd->index);
2846 i++;
2847
2848 con_log(CL_DLEVEL3, (CE_NOTE, "[%x]-%x",
2849 cmd->index, cmd->frame_phys_addr));
2850 }
2851
2852 return (DDI_SUCCESS);
2853
2854 mrsas_undo_frame_pool:
2855 if (i > 0)
2856 destroy_mfi_frame_pool(instance);
2857
2858 return (retval);
2859 }
2860
2861 /*
2862 * free_additional_dma_buffer
2863 */
2864 static void
2865 free_additional_dma_buffer(struct mrsas_instance *instance)
2866 {
2867 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
2868 (void) mrsas_free_dma_obj(instance,
2869 instance->mfi_internal_dma_obj);
2870 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
2871 }
2872
2873 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
2874 (void) mrsas_free_dma_obj(instance,
2875 instance->mfi_evt_detail_obj);
2876 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
2877 }
2878 }
2879
2880 /*
2881 * alloc_additional_dma_buffer
2882 */
2883 static int
2884 alloc_additional_dma_buffer(struct mrsas_instance *instance)
2885 {
2886 uint32_t reply_q_sz;
2887 uint32_t internal_buf_size = PAGESIZE*2;
2888
2889 /* max cmds plus 1 + producer & consumer */
2890 reply_q_sz = sizeof (uint32_t) * (instance->max_fw_cmds + 1 + 2);
2891
2892 instance->mfi_internal_dma_obj.size = internal_buf_size;
2893 instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
2894 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
2895 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
2896 0xFFFFFFFFU;
2897 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
2898
2899 if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
2900 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
2901 cmn_err(CE_WARN,
2902 "mr_sas: could not alloc reply queue");
2903 return (DDI_FAILURE);
2904 }
2905
2906 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
2907
2908 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
2909
2910 instance->producer = (uint32_t *)((unsigned long)
2911 instance->mfi_internal_dma_obj.buffer);
2912 instance->consumer = (uint32_t *)((unsigned long)
2913 instance->mfi_internal_dma_obj.buffer + 4);
2914 instance->reply_queue = (uint32_t *)((unsigned long)
2915 instance->mfi_internal_dma_obj.buffer + 8);
2916 instance->internal_buf = (caddr_t)(((unsigned long)
2917 instance->mfi_internal_dma_obj.buffer) + reply_q_sz + 8);
2918 instance->internal_buf_dmac_add =
2919 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address +
2920 (reply_q_sz + 8);
2921 instance->internal_buf_size = internal_buf_size -
2922 (reply_q_sz + 8);
2923
2924 /* allocate evt_detail */
2925 instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
2926 instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
2927 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
2928 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
2929 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
2930 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 1;
2931
2932 if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
2933 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
2934 cmn_err(CE_WARN, "alloc_additional_dma_buffer: "
2935 "could not allocate data transfer buffer.");
2936 goto mrsas_undo_internal_buff;
2937 }
2938
2939 bzero(instance->mfi_evt_detail_obj.buffer,
2940 sizeof (struct mrsas_evt_detail));
2941
2942 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
2943
2944 return (DDI_SUCCESS);
2945
2946 mrsas_undo_internal_buff:
2947 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
2948 (void) mrsas_free_dma_obj(instance,
2949 instance->mfi_internal_dma_obj);
2950 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
2951 }
2952
2953 return (DDI_FAILURE);
2954 }
2955
2956
2957 void
2958 mrsas_free_cmd_pool(struct mrsas_instance *instance)
2959 {
2960 int i;
2961 uint32_t max_cmd;
2962 size_t sz;
2963
2964 /* already freed */
2965 if (instance->cmd_list == NULL) {
2966 return;
2967 }
2968
2969 max_cmd = instance->max_fw_cmds;
2970
2971 /* size of cmd_list array */
2972 sz = sizeof (struct mrsas_cmd *) * max_cmd;
2973
2974 /* First free each cmd */
2975 for (i = 0; i < max_cmd; i++) {
2976 if (instance->cmd_list[i] != NULL)
2977 kmem_free(instance->cmd_list[i],sizeof (struct mrsas_cmd));
2978
2979 instance->cmd_list[i] = NULL;
2980 }
2981
2982 /* Now, free cmd_list array */
2983 if (instance->cmd_list != NULL)
2984 kmem_free(instance->cmd_list,sz);
2985
2986 instance->cmd_list = NULL;
2987
2988 INIT_LIST_HEAD(&instance->cmd_pool_list);
2989 INIT_LIST_HEAD(&instance->cmd_pend_list);
2990 if (instance->tbolt) {
2991 INIT_LIST_HEAD(&instance->cmd_app_pool_list);
2992 }
2993 else {
2994 INIT_LIST_HEAD(&instance->app_cmd_pool_list);
2995 }
2996
2997 }
2998
2999
3000 /*
3001 * mrsas_alloc_cmd_pool
3002 */
3003 int
3004 mrsas_alloc_cmd_pool(struct mrsas_instance *instance)
3005 {
3006 int i;
3007 int count;
3008 uint32_t max_cmd;
3009 uint32_t reserve_cmd;
3010 size_t sz;
3011
3012 struct mrsas_cmd *cmd;
3013
3014 max_cmd = instance->max_fw_cmds;
3015 con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
3016 "max_cmd %x", max_cmd));
3017
3018
3019 sz = sizeof (struct mrsas_cmd *) * max_cmd;
3020
3021 /*
3022 * instance->cmd_list is an array of struct mrsas_cmd pointers.
3023 * Allocate the dynamic array first and then allocate individual
3024 * commands.
3025 */
3026 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
3027 if (instance->cmd_list == NULL) {
3028 con_log(CL_NONE, (CE_WARN,
3029 "Failed to allocate memory for cmd_list"));
3030 return (DDI_FAILURE);
3031 }
3032
3033 /* create a frame pool and assign one frame to each cmd */
3034 for (count = 0; count < max_cmd; count++) {
3035 instance->cmd_list[count] = kmem_zalloc(sizeof (struct mrsas_cmd),
3036 KM_SLEEP);
3037 if (instance->cmd_list[count] == NULL) {
3038 con_log(CL_NONE, (CE_WARN,
3039 "Failed to allocate memory for mrsas_cmd"));
3040 goto mrsas_undo_cmds;
3041 }
3042 }
3043
3044 /* add all the commands to command pool */
3045
3046 INIT_LIST_HEAD(&instance->cmd_pool_list);
3047 INIT_LIST_HEAD(&instance->cmd_pend_list);
3048 INIT_LIST_HEAD(&instance->app_cmd_pool_list);
3049
3050 reserve_cmd = MRSAS_APP_RESERVED_CMDS;
3051
3052 for (i = 0; i < reserve_cmd; i++) {
3053 cmd = instance->cmd_list[i];
3054 cmd->index = i;
3055 mlist_add_tail(&cmd->list, &instance->app_cmd_pool_list);
3056 }
3057
3058
3059 for (i = reserve_cmd; i < max_cmd; i++) {
3060 cmd = instance->cmd_list[i];
3061 cmd->index = i;
3062 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
3063 }
3064
3065 return (DDI_SUCCESS);
3066
3067 mrsas_undo_cmds:
3068 if (count > 0) {
3069 /* free each cmd */
3070 for (i = 0; i < count; i++) {
3071 if (instance->cmd_list[i] != NULL)
3072 kmem_free(instance->cmd_list[i],sizeof (struct mrsas_cmd));
3073 instance->cmd_list[i] = NULL;
3074 }
3075 }
3076
3077 mrsas_undo_cmd_list:
3078 if (instance->cmd_list != NULL)
3079 kmem_free(instance->cmd_list,sz);
3080 instance->cmd_list = NULL;
3081
3082 return (DDI_FAILURE);
3083 }
3084
3085
3086 /*
3087 * free_space_for_mfi
3088 */
3089 static void
3090 free_space_for_mfi(struct mrsas_instance *instance)
3091 {
3092
3093 /* already freed */
3094 if (instance->cmd_list == NULL) {
3095 return;
3096 }
3097
3098 /* Free additional dma buffer */
3099 free_additional_dma_buffer(instance);
3100
3101 /* Free the MFI frame pool */
3102 destroy_mfi_frame_pool(instance);
3103
3104 /* Free all the commands in the cmd_list */
3105 /* Free the cmd_list buffer itself */
3106 mrsas_free_cmd_pool(instance);
3107 }
3108
3109 /*
3110 * alloc_space_for_mfi
3111 */
3112 static int
3113 alloc_space_for_mfi(struct mrsas_instance *instance)
3114 {
3115 /* Allocate command pool ( memory for cmd_list & individual commands )*/
3116 if (mrsas_alloc_cmd_pool(instance)) {
3117 cmn_err(CE_WARN, "error creating cmd pool");
3118 return (DDI_FAILURE);
3119 }
3120
3121 /* Allocate MFI Frame pool */
3122 if (create_mfi_frame_pool(instance)) {
3123 cmn_err(CE_WARN, "error creating frame DMA pool");
3124 goto mfi_undo_cmd_pool;
3125 }
3126
3127 /* Allocate additional DMA buffer */
3128 if (alloc_additional_dma_buffer(instance)) {
3129 cmn_err(CE_WARN, "error creating frame DMA pool");
3130 goto mfi_undo_frame_pool;
3131 }
3132
3133 return (DDI_SUCCESS);
3134
3135 mfi_undo_frame_pool:
3136 destroy_mfi_frame_pool(instance);
3137
3138 mfi_undo_cmd_pool:
3139 mrsas_free_cmd_pool(instance);
3140
3141 return (DDI_FAILURE);
3142 }
3143
3144
3145
3146 /*
3147 * get_ctrl_info
3148 */
3149 static int
3150 get_ctrl_info(struct mrsas_instance *instance,
3151 struct mrsas_ctrl_info *ctrl_info)
3152 {
3153 int ret = 0;
3154
3155 struct mrsas_cmd *cmd;
3156 struct mrsas_dcmd_frame *dcmd;
3157 struct mrsas_ctrl_info *ci;
3158
3159 if(instance->tbolt) {
3160 cmd = get_raid_msg_mfi_pkt(instance);
3161 }
3162 else {
3163 cmd = get_mfi_pkt(instance);
3164 }
3165
3166 if (!cmd) {
3167 cmn_err(CE_WARN,
3168 "Failed to get a cmd from free-pool in get_ctrl_info(). fw_outstanding=0x%X max_fw_cmds=0x%X",
3169 instance->fw_outstanding, instance->max_fw_cmds);
3170 return (DDI_FAILURE);
3171 }
3172
3173 /* Clear the frame buffer and assign back the context id */
3174 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3175 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3176 cmd->index);
3177
3178 dcmd = &cmd->frame->dcmd;
3179
3180 ci = (struct mrsas_ctrl_info *)instance->internal_buf;
3181
3182 if (!ci) {
3183 cmn_err(CE_WARN,
3184 "Failed to alloc mem for ctrl info");
3185 return_mfi_pkt(instance, cmd);
3186 return (DDI_FAILURE);
3187 }
3188
3189 (void) memset(ci, 0, sizeof (struct mrsas_ctrl_info));
3190
3191 /* for( i = 0; i < DCMD_MBOX_SZ; i++ ) dcmd->mbox.b[i] = 0; */
3192 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
3193
3194 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3195 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status,
3196 MFI_CMD_STATUS_POLL_MODE);
3197 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3198 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3199 MFI_FRAME_DIR_READ);
3200 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3201 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3202 sizeof (struct mrsas_ctrl_info));
3203 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3204 MR_DCMD_CTRL_GET_INFO);
3205 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3206 instance->internal_buf_dmac_add);
3207 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3208 sizeof (struct mrsas_ctrl_info));
3209
3210 cmd->frame_count = 1;
3211
3212 if (instance->tbolt) {
3213 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3214 }
3215
3216 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3217 ret = 0;
3218
3219 ctrl_info->max_request_size = ddi_get32(
3220 cmd->frame_dma_obj.acc_handle, &ci->max_request_size);
3221
3222 ctrl_info->ld_present_count = ddi_get16(
3223 cmd->frame_dma_obj.acc_handle, &ci->ld_present_count);
3224
3225 ctrl_info->properties.on_off_properties =
3226 ddi_get32(cmd->frame_dma_obj.acc_handle,
3227 &ci->properties.on_off_properties);
3228 ddi_rep_get8(cmd->frame_dma_obj.acc_handle,
3229 (uint8_t *)(ctrl_info->product_name),
3230 (uint8_t *)(ci->product_name), 80 * sizeof (char),
3231 DDI_DEV_AUTOINCR);
3232 /* should get more members of ci with ddi_get when needed */
3233 } else {
3234 cmn_err(CE_WARN, "get_ctrl_info: Ctrl info failed");
3235 ret = -1;
3236 }
3237
3238 if(instance->tbolt) {
3239 return_raid_msg_mfi_pkt(instance, cmd);
3240 }
3241 else {
3242 return_mfi_pkt(instance, cmd);
3243 }
3244
3245 return (ret);
3246 }
3247
3248 /*
3249 * abort_aen_cmd
3250 */
3251 static int
3252 abort_aen_cmd(struct mrsas_instance *instance,
3253 struct mrsas_cmd *cmd_to_abort)
3254 {
3255 int ret = 0;
3256
3257 struct mrsas_cmd *cmd;
3258 struct mrsas_abort_frame *abort_fr;
3259
3260 con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_aen:%d", __LINE__));
3261
3262 if (instance->tbolt) {
3263 cmd = get_raid_msg_mfi_pkt(instance);
3264 } else {
3265 cmd = get_mfi_pkt(instance);
3266 }
3267
3268 if (!cmd) {
3269 cmn_err(CE_WARN,
3270 "Failed to get a cmd from free-pool in abort_aen_cmd(). fw_outstanding=0x%X max_fw_cmds=0x%X",
3271 instance->fw_outstanding, instance->max_fw_cmds);
3272 return (DDI_FAILURE);
3273 }
3274 /* Clear the frame buffer and assign back the context id */
3275 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3276 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3277 cmd->index);
3278
3279 abort_fr = &cmd->frame->abort;
3280
3281 /* prepare and issue the abort frame */
3282 ddi_put8(cmd->frame_dma_obj.acc_handle,
3283 &abort_fr->cmd, MFI_CMD_OP_ABORT);
3284 ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3285 MFI_CMD_STATUS_SYNC_MODE);
3286 ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3287 ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3288 cmd_to_abort->index);
3289 ddi_put32(cmd->frame_dma_obj.acc_handle,
3290 &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3291 ddi_put32(cmd->frame_dma_obj.acc_handle,
3292 &abort_fr->abort_mfi_phys_addr_hi, 0);
3293
3294 instance->aen_cmd->abort_aen = 1;
3295
3296 cmd->frame_count = 1;
3297
3298 if (instance->tbolt) {
3299 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3300 }
3301
3302 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3303 con_log(CL_ANN1, (CE_WARN,
3304 "abort_aen_cmd: issue_cmd_in_poll_mode failed"));
3305 ret = -1;
3306 } else {
3307 ret = 0;
3308 }
3309
3310 instance->aen_cmd->abort_aen = 1;
3311 instance->aen_cmd = 0;
3312
3313 if (instance->tbolt) {
3314 return_raid_msg_mfi_pkt(instance, cmd);
3315 } else {
3316 return_mfi_pkt(instance, cmd);
3317 }
3318
3319 atomic_add_16(&instance->fw_outstanding, (-1));
3320
3321 return (ret);
3322 }
3323
3324
3325 static int
3326 mrsas_build_init_cmd(struct mrsas_instance *instance, struct mrsas_cmd **cmd_ptr)
3327 {
3328 struct mrsas_cmd *cmd;
3329 struct mrsas_init_frame *init_frame;
3330 struct mrsas_init_queue_info *initq_info;
3331 struct mrsas_drv_ver drv_ver_info;
3332
3333
3334 /*
3335 * Prepare a init frame. Note the init frame points to queue info
3336 * structure. Each frame has SGL allocated after first 64 bytes. For
3337 * this frame - since we don't need any SGL - we use SGL's space as
3338 * queue info structure
3339 */
3340 cmd = *cmd_ptr;
3341
3342
3343 /* Clear the frame buffer and assign back the context id */
3344 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3345 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3346 cmd->index);
3347
3348 init_frame = (struct mrsas_init_frame *)cmd->frame;
3349 initq_info = (struct mrsas_init_queue_info *)
3350 ((unsigned long)init_frame + 64);
3351
3352 (void) memset(init_frame, 0, MRMFI_FRAME_SIZE);
3353 (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info));
3354
3355 ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0);
3356
3357 ddi_put32(cmd->frame_dma_obj.acc_handle,
3358 &initq_info->reply_queue_entries, instance->max_fw_cmds + 1);
3359
3360 ddi_put32(cmd->frame_dma_obj.acc_handle,
3361 &initq_info->producer_index_phys_addr_hi, 0);
3362 ddi_put32(cmd->frame_dma_obj.acc_handle,
3363 &initq_info->producer_index_phys_addr_lo,
3364 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address);
3365
3366 ddi_put32(cmd->frame_dma_obj.acc_handle,
3367 &initq_info->consumer_index_phys_addr_hi, 0);
3368 ddi_put32(cmd->frame_dma_obj.acc_handle,
3369 &initq_info->consumer_index_phys_addr_lo,
3370 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4);
3371
3372 ddi_put32(cmd->frame_dma_obj.acc_handle,
3373 &initq_info->reply_queue_start_phys_addr_hi, 0);
3374 ddi_put32(cmd->frame_dma_obj.acc_handle,
3375 &initq_info->reply_queue_start_phys_addr_lo,
3376 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8);
3377
3378 ddi_put8(cmd->frame_dma_obj.acc_handle,
3379 &init_frame->cmd, MFI_CMD_OP_INIT);
3380 ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status,
3381 MFI_CMD_STATUS_POLL_MODE);
3382 ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0);
3383 ddi_put32(cmd->frame_dma_obj.acc_handle,
3384 &init_frame->queue_info_new_phys_addr_lo,
3385 cmd->frame_phys_addr + 64);
3386 ddi_put32(cmd->frame_dma_obj.acc_handle,
3387 &init_frame->queue_info_new_phys_addr_hi, 0);
3388
3389
3390 /* fill driver version information*/
3391 fill_up_drv_ver(&drv_ver_info);
3392
3393 /* allocate the driver version data transfer buffer */
3394 instance->drv_ver_dma_obj.size = sizeof(drv_ver_info.drv_ver);
3395 instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
3396 instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3397 instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3398 instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
3399 instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
3400
3401 if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
3402 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3403 con_log(CL_ANN, (CE_WARN,
3404 "init_mfi : Could not allocate driver version buffer."));
3405 return (DDI_FAILURE);
3406 }
3407 /* copy driver version to dma buffer*/
3408 (void) memset(instance->drv_ver_dma_obj.buffer, 0,sizeof(drv_ver_info.drv_ver));
3409 ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
3410 (uint8_t *)drv_ver_info.drv_ver,
3411 (uint8_t *)instance->drv_ver_dma_obj.buffer,
3412 sizeof(drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
3413
3414
3415 /*copy driver version physical address to init frame*/
3416 ddi_put64(cmd->frame_dma_obj.acc_handle,
3417 &init_frame->driverversion, instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
3418
3419 ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len,
3420 sizeof (struct mrsas_init_queue_info));
3421
3422 cmd->frame_count = 1;
3423
3424 *cmd_ptr = cmd;
3425
3426 return (DDI_SUCCESS);
3427 }
3428
3429
3430 /*
3431 * mrsas_init_adapter_ppc - Initialize MFI interface adapter.
3432 */
3433 int
3434 mrsas_init_adapter_ppc (struct mrsas_instance *instance)
3435 {
3436 struct mrsas_cmd *cmd;
3437
3438 /* allocate memory for mfi adapter(cmd pool, individual commands, mfi frames etc */
3439 if (alloc_space_for_mfi(instance) != DDI_SUCCESS){
3440 con_log(CL_ANN, (CE_NOTE,
3441 "Error, failed to allocate memory for MFI adapter"));
3442 return (DDI_FAILURE);
3443 }
3444
3445 /* Build INIT command */
3446 cmd = get_mfi_pkt(instance);
3447
3448 if (mrsas_build_init_cmd(instance, &cmd) != DDI_SUCCESS){
3449 con_log(CL_ANN, (CE_NOTE,
3450 "Error, failed to build INIT command"));
3451
3452 goto fail_undo_alloc_mfi_space;
3453 }
3454
3455 //Disalbe interrupt before sending init frame ( see linux driver code)
3456 /* send INIT MFI frame in polled mode */
3457 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3458 con_log(CL_ANN, (CE_WARN, "failed to init firmware"));
3459 goto fail_fw_init;
3460 }
3461
3462 if (instance->func_ptr->read_fw_status_reg(instance) & 0x04000000) {
3463 con_log(CL_ANN, (CE_NOTE, "mr_sas: IEEE SGL's supported"));
3464 instance->flag_ieee = 1;
3465 }
3466
3467 instance->unroll.alloc_space_mfi = 1;
3468 instance->unroll.verBuff = 1;
3469
3470 return (DDI_SUCCESS);
3471
3472
3473 fail_fw_init:
3474 mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
3475
3476 fail_undo_alloc_mfi_space:
3477 return_mfi_pkt(instance, cmd);
3478 free_space_for_mfi(instance);
3479
3480 return (DDI_FAILURE);
3481
3482 }
3483
3484 /*
3485 * mrsas_init_adapter - Initialize adapter.
3486 */
3487 int
3488 mrsas_init_adapter (struct mrsas_instance *instance)
3489 {
3490 struct mrsas_ctrl_info ctrl_info;
3491
3492
3493 /* we expect the FW state to be READY */
3494 if (mfi_state_transition_to_ready(instance)) {
3495 con_log(CL_ANN, (CE_WARN, "mr_sas: F/W is not ready"));
3496 return (DDI_FAILURE);
3497 }
3498
3499 /* get various operational parameters from status register */
3500 instance->max_num_sge =
3501 (instance->func_ptr->read_fw_status_reg(instance) &
3502 0xFF0000) >> 0x10;
3503 instance->max_num_sge =
3504 (instance->max_num_sge > MRSAS_MAX_SGE_CNT) ?
3505 MRSAS_MAX_SGE_CNT : instance->max_num_sge;
3506
3507 /*
3508 * Reduce the max supported cmds by 1. This is to ensure that the
3509 * reply_q_sz (1 more than the max cmd that driver may send)
3510 * does not exceed max cmds that the FW can support
3511 */
3512 instance->max_fw_cmds =
3513 instance->func_ptr->read_fw_status_reg(instance) & 0xFFFF;
3514 instance->max_fw_cmds = instance->max_fw_cmds - 1;
3515
3516
3517
3518 /* Initialize adapter */
3519 if (instance->func_ptr->init_adapter(instance) != DDI_SUCCESS) {
3520 con_log(CL_ANN, (CE_WARN, "mr_sas: "
3521 "could not initialize adapter"));
3522 return (DDI_FAILURE);
3523 }
3524
3525 /* gather misc FW related information */
3526 instance->disable_online_ctrl_reset = 0;
3527
3528 if (!get_ctrl_info(instance, &ctrl_info)) {
3529 instance->max_sectors_per_req = ctrl_info.max_request_size;
3530 con_log(CL_ANN1, (CE_NOTE,
3531 "product name %s ld present %d",
3532 ctrl_info.product_name, ctrl_info.ld_present_count));
3533 } else {
3534 instance->max_sectors_per_req = instance->max_num_sge *
3535 PAGESIZE / 512;
3536 }
3537
3538 if (ctrl_info.properties.on_off_properties & DISABLE_OCR_PROP_FLAG) {
3539 instance->disable_online_ctrl_reset = 1;
3540 con_log(CL_ANN1, (CE_NOTE,
3541 "Disable online control Flag is set\n"));
3542 }
3543 else {
3544 con_log(CL_ANN1, (CE_NOTE,
3545 "Disable online control Flag is not set\n"));
3546 }
3547
3548 return (DDI_SUCCESS);
3549
3550 }
3551
3552
3553
3554 static int
3555 mrsas_issue_init_mfi(struct mrsas_instance *instance)
3556 {
3557 struct mrsas_cmd *cmd;
3558 struct mrsas_init_frame *init_frame;
3559 struct mrsas_init_queue_info *initq_info;
3560
3561 /*
3562 * Prepare a init frame. Note the init frame points to queue info
3563 * structure. Each frame has SGL allocated after first 64 bytes. For
3564 * this frame - since we don't need any SGL - we use SGL's space as
3565 * queue info structure
3566 */
3567 con_log(CL_ANN1, (CE_NOTE,
3568 "mrsas_issue_init_mfi: entry\n"));
3569 cmd = get_mfi_app_pkt(instance);
3570
3571 if (!cmd) {
3572 con_log(CL_ANN1, (CE_WARN,
3573 "mrsas_issue_init_mfi: get_pkt failed\n"));
3574 return (DDI_FAILURE);
3575 }
3576
3577 /* Clear the frame buffer and assign back the context id */
3578 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3579 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3580 cmd->index);
3581
3582 init_frame = (struct mrsas_init_frame *)cmd->frame;
3583 initq_info = (struct mrsas_init_queue_info *)
3584 ((unsigned long)init_frame + 64);
3585
3586 (void) memset(init_frame, 0, MRMFI_FRAME_SIZE);
3587 (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info));
3588
3589 ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0);
3590
3591 ddi_put32(cmd->frame_dma_obj.acc_handle,
3592 &initq_info->reply_queue_entries, instance->max_fw_cmds + 1);
3593 ddi_put32(cmd->frame_dma_obj.acc_handle,
3594 &initq_info->producer_index_phys_addr_hi, 0);
3595 ddi_put32(cmd->frame_dma_obj.acc_handle,
3596 &initq_info->producer_index_phys_addr_lo,
3597 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address);
3598 ddi_put32(cmd->frame_dma_obj.acc_handle,
3599 &initq_info->consumer_index_phys_addr_hi, 0);
3600 ddi_put32(cmd->frame_dma_obj.acc_handle,
3601 &initq_info->consumer_index_phys_addr_lo,
3602 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4);
3603
3604 ddi_put32(cmd->frame_dma_obj.acc_handle,
3605 &initq_info->reply_queue_start_phys_addr_hi, 0);
3606 ddi_put32(cmd->frame_dma_obj.acc_handle,
3607 &initq_info->reply_queue_start_phys_addr_lo,
3608 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8);
3609
3610 ddi_put8(cmd->frame_dma_obj.acc_handle,
3611 &init_frame->cmd, MFI_CMD_OP_INIT);
3612 ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status,
3613 MFI_CMD_STATUS_POLL_MODE);
3614 ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0);
3615 ddi_put32(cmd->frame_dma_obj.acc_handle,
3616 &init_frame->queue_info_new_phys_addr_lo,
3617 cmd->frame_phys_addr + 64);
3618 ddi_put32(cmd->frame_dma_obj.acc_handle,
3619 &init_frame->queue_info_new_phys_addr_hi, 0);
3620
3621 ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len,
3622 sizeof (struct mrsas_init_queue_info));
3623
3624 cmd->frame_count = 1;
3625
3626 /* issue the init frame in polled mode */
3627 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3628 con_log(CL_ANN1, (CE_WARN,
3629 "mrsas_issue_init_mfi():failed to "
3630 "init firmware"));
3631 return_mfi_app_pkt(instance, cmd);
3632 return (DDI_FAILURE);
3633 }
3634
3635 return_mfi_app_pkt(instance, cmd);
3636 con_log(CL_ANN1, (CE_CONT, "mrsas_issue_init_mfi: Done"));
3637
3638 return (DDI_SUCCESS);
3639 }
3640 /*
3641 * mfi_state_transition_to_ready : Move the FW to READY state
3642 *
3643 * @reg_set : MFI register set
3644 */
3645 int
3646 mfi_state_transition_to_ready(struct mrsas_instance *instance)
3647 {
3648 int i;
3649 uint8_t max_wait;
3650 uint32_t fw_ctrl = 0;
3651 uint32_t fw_state;
3652 uint32_t cur_state;
3653 uint32_t cur_abs_reg_val;
3654 uint32_t prev_abs_reg_val;
3655 uint32_t status;
3656
3657 cur_abs_reg_val =
3658 instance->func_ptr->read_fw_status_reg(instance);
3659 fw_state =
3660 cur_abs_reg_val & MFI_STATE_MASK;
3661 con_log(CL_ANN1, (CE_CONT,
3662 "mfi_state_transition_to_ready:FW state = 0x%x", fw_state));
3663
3664 while (fw_state != MFI_STATE_READY) {
3665 con_log(CL_ANN, (CE_CONT,
3666 "mfi_state_transition_to_ready:FW state%x", fw_state));
3667
3668 switch (fw_state) {
3669 case MFI_STATE_FAULT:
3670 con_log(CL_ANN, (CE_NOTE,
3671 "mr_sas: FW in FAULT state!!"));
3672
3673 return (ENODEV);
3674 case MFI_STATE_WAIT_HANDSHAKE:
3675 /* set the CLR bit in IMR0 */
3676 con_log(CL_ANN1, (CE_NOTE,
3677 "mr_sas: FW waiting for HANDSHAKE"));
3678 /*
3679 * PCI_Hot Plug: MFI F/W requires
3680 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
3681 * to be set
3682 */
3683 /* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */
3684 if (!instance->tbolt) {
3685 WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE |
3686 MFI_INIT_HOTPLUG, instance);
3687 } else {
3688 WR_RESERVED0_REGISTER(MFI_INIT_CLEAR_HANDSHAKE |
3689 MFI_INIT_HOTPLUG, instance);
3690 }
3691 max_wait = (instance->tbolt == 1) ? 180 : 2;
3692 cur_state = MFI_STATE_WAIT_HANDSHAKE;
3693 break;
3694 case MFI_STATE_BOOT_MESSAGE_PENDING:
3695 /* set the CLR bit in IMR0 */
3696 con_log(CL_ANN1, (CE_NOTE,
3697 "mr_sas: FW state boot message pending"));
3698 /*
3699 * PCI_Hot Plug: MFI F/W requires
3700 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
3701 * to be set
3702 */
3703 if (!instance->tbolt) {
3704 WR_IB_DOORBELL(MFI_INIT_HOTPLUG, instance);
3705 } else {
3706 WR_RESERVED0_REGISTER(MFI_INIT_HOTPLUG,
3707 instance);
3708 }
3709 max_wait = (instance->tbolt == 1) ? 180 : 10;
3710 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
3711 break;
3712 case MFI_STATE_OPERATIONAL:
3713 /* bring it to READY state; assuming max wait 2 secs */
3714 instance->func_ptr->disable_intr(instance);
3715 con_log(CL_ANN1, (CE_NOTE,
3716 "mr_sas: FW in OPERATIONAL state"));
3717 /*
3718 * PCI_Hot Plug: MFI F/W requires
3719 * (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT)
3720 * to be set
3721 */
3722 /* WR_IB_DOORBELL(MFI_INIT_READY, instance); */
3723 if (!instance->tbolt) {
3724 WR_IB_DOORBELL(MFI_RESET_FLAGS, instance);
3725 } else {
3726 WR_RESERVED0_REGISTER(MFI_RESET_FLAGS,
3727 instance);
3728
3729 for (i = 0; i < (10 * 1000); i++) {
3730 status =
3731 RD_RESERVED0_REGISTER(instance);
3732 if (status & 1)
3733 delay(1 *
3734 drv_usectohz(MILLISEC));
3735 else
3736 break;
3737 }
3738
3739 }
3740 max_wait = (instance->tbolt == 1) ? 180 : 10;
3741 cur_state = MFI_STATE_OPERATIONAL;
3742 break;
3743 case MFI_STATE_UNDEFINED:
3744 /* this state should not last for more than 2 seconds */
3745 con_log(CL_ANN1, (CE_NOTE, "FW state undefined"));
3746
3747 max_wait = (instance->tbolt == 1) ? 180 : 2;
3748 cur_state = MFI_STATE_UNDEFINED;
3749 break;
3750 case MFI_STATE_BB_INIT:
3751 max_wait = (instance->tbolt == 1) ? 180 : 2;
3752 cur_state = MFI_STATE_BB_INIT;
3753 break;
3754 case MFI_STATE_FW_INIT:
3755 max_wait = (instance->tbolt == 1) ? 180 : 2;
3756 cur_state = MFI_STATE_FW_INIT;
3757 break;
3758 case MFI_STATE_FW_INIT_2:
3759 max_wait = 180;
3760 cur_state = MFI_STATE_FW_INIT_2;
3761 break;
3762 case MFI_STATE_DEVICE_SCAN:
3763 max_wait = 180;
3764 cur_state = MFI_STATE_DEVICE_SCAN;
3765 prev_abs_reg_val = cur_abs_reg_val;
3766 con_log(CL_NONE, (CE_NOTE,
3767 "Device scan in progress ...\n"));
3768 break;
3769 case MFI_STATE_FLUSH_CACHE:
3770 max_wait = 180;
3771 cur_state = MFI_STATE_FLUSH_CACHE;
3772 break;
3773 default:
3774 con_log(CL_ANN1, (CE_NOTE,
3775 "mr_sas: Unknown state 0x%x", fw_state));
3776 return (ENODEV);
3777 }
3778
3779 /* the cur_state should not last for more than max_wait secs */
3780 for (i = 0; i < (max_wait * MILLISEC); i++) {
3781 /* fw_state = RD_OB_MSG_0(instance) & MFI_STATE_MASK; */
3782 cur_abs_reg_val =
3783 instance->func_ptr->read_fw_status_reg(instance);
3784 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
3785
3786 if (fw_state == cur_state) {
3787 delay(1 * drv_usectohz(MILLISEC));
3788 } else {
3789 break;
3790 }
3791 }
3792 if (fw_state == MFI_STATE_DEVICE_SCAN) {
3793 if (prev_abs_reg_val != cur_abs_reg_val) {
3794 continue;
3795 }
3796 }
3797
3798 /* return error if fw_state hasn't changed after max_wait */
3799 if (fw_state == cur_state) {
3800 con_log(CL_ANN1, (CE_WARN,
3801 "FW state hasn't changed in %d secs", max_wait));
3802 return (ENODEV);
3803 }
3804 };
3805
3806 if (!instance->tbolt) {
3807 fw_ctrl = RD_IB_DOORBELL(instance);
3808 con_log(CL_ANN1, (CE_CONT,
3809 "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl));
3810 }
3811
3812 #if 0
3813 /*
3814 * Write 0xF to the doorbell register to do the following.
3815 * - Abort all outstanding commands (bit 0).
3816 * - Transition from OPERATIONAL to READY state (bit 1).
3817 * - Discard (possible) low MFA posted in 64-bit mode (bit-2).
3818 * - Set to release FW to continue running (i.e. BIOS handshake
3819 * (bit 3).
3820 */
3821 if (!instance->tbolt) {
3822 WR_IB_DOORBELL(0xF, instance);
3823 }
3824 #endif
3825
3826 return (DDI_SUCCESS);
3827 }
3828
3829 /*
3830 * get_seq_num
3831 */
3832 static int
3833 get_seq_num(struct mrsas_instance *instance,
3834 struct mrsas_evt_log_info *eli)
3835 {
3836 int ret = DDI_SUCCESS;
3837
3838 dma_obj_t dcmd_dma_obj;
3839 struct mrsas_cmd *cmd;
3840 struct mrsas_dcmd_frame *dcmd;
3841 struct mrsas_evt_log_info *eli_tmp;
3842 if (instance->tbolt) {
3843 cmd = get_raid_msg_mfi_pkt(instance);
3844 } else {
3845 cmd = get_mfi_pkt(instance);
3846 }
3847
3848 if (!cmd) {
3849 cmn_err(CE_WARN,
3850 "Failed to get a cmd from free-pool in get_seq_num(). fw_outstanding=0x%X max_fw_cmds=0x%X",
3851 instance->fw_outstanding, instance->max_fw_cmds);
3852 return (ENOMEM);
3853 }
3854
3855 /* Clear the frame buffer and assign back the context id */
3856 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3857 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3858 cmd->index);
3859
3860 dcmd = &cmd->frame->dcmd;
3861
3862 /* allocate the data transfer buffer */
3863 dcmd_dma_obj.size = sizeof (struct mrsas_evt_log_info);
3864 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
3865 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3866 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3867 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
3868 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
3869
3870 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
3871 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3872 cmn_err(CE_WARN,
3873 "get_seq_num: could not allocate data transfer buffer.");
3874 return (DDI_FAILURE);
3875 }
3876
3877 (void) memset(dcmd_dma_obj.buffer, 0,
3878 sizeof (struct mrsas_evt_log_info));
3879
3880 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
3881
3882 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3883 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
3884 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3885 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3886 MFI_FRAME_DIR_READ);
3887 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3888 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3889 sizeof (struct mrsas_evt_log_info));
3890 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3891 MR_DCMD_CTRL_EVENT_GET_INFO);
3892 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3893 sizeof (struct mrsas_evt_log_info));
3894 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3895 dcmd_dma_obj.dma_cookie[0].dmac_address);
3896
3897 cmd->sync_cmd = MRSAS_TRUE;
3898 cmd->frame_count = 1;
3899
3900 if (instance->tbolt) {
3901 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3902 }
3903
3904 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
3905 cmn_err(CE_WARN, "get_seq_num: "
3906 "failed to issue MRSAS_DCMD_CTRL_EVENT_GET_INFO");
3907 ret = DDI_FAILURE;
3908 } else {
3909 eli_tmp = (struct mrsas_evt_log_info *)dcmd_dma_obj.buffer;
3910 eli->newest_seq_num = ddi_get32(cmd->frame_dma_obj.acc_handle,
3911 &eli_tmp->newest_seq_num);
3912 ret = DDI_SUCCESS;
3913 }
3914
3915 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
3916 ret = DDI_FAILURE;
3917
3918 if (instance->tbolt) {
3919 return_raid_msg_mfi_pkt(instance, cmd);
3920 } else {
3921 return_mfi_pkt(instance, cmd);
3922 }
3923
3924 return (ret);
3925 }
3926
3927 /*
3928 * start_mfi_aen
3929 */
3930 static int
3931 start_mfi_aen(struct mrsas_instance *instance)
3932 {
3933 int ret = 0;
3934
3935 struct mrsas_evt_log_info eli;
3936 union mrsas_evt_class_locale class_locale;
3937
3938 /* get the latest sequence number from FW */
3939 (void) memset(&eli, 0, sizeof (struct mrsas_evt_log_info));
3940
3941 if (get_seq_num(instance, &eli)) {
3942 cmn_err(CE_WARN, "start_mfi_aen: failed to get seq num");
3943 return (-1);
3944 }
3945
3946 /* register AEN with FW for latest sequence number plus 1 */
3947 class_locale.members.reserved = 0;
3948 class_locale.members.locale = LE_16(MR_EVT_LOCALE_ALL);
3949 class_locale.members.class = MR_EVT_CLASS_INFO;
3950 class_locale.word = LE_32(class_locale.word);
3951 ret = register_mfi_aen(instance, eli.newest_seq_num + 1,
3952 class_locale.word);
3953
3954 if (ret) {
3955 cmn_err(CE_WARN, "start_mfi_aen: aen registration failed");
3956 return (-1);
3957 }
3958
3959
3960 return (ret);
3961 }
3962
3963 /*
3964 * flush_cache
3965 */
3966 static void
3967 flush_cache(struct mrsas_instance *instance)
3968 {
3969 struct mrsas_cmd *cmd = NULL;
3970 struct mrsas_dcmd_frame *dcmd;
3971 uint32_t max_cmd = instance->max_fw_cmds;
3972 if (instance->tbolt) {
3973 cmd = get_raid_msg_mfi_pkt(instance);
3974 } else {
3975 cmd = get_mfi_pkt(instance);
3976 }
3977
3978 if (!cmd) {
3979 cmn_err(CE_WARN,
3980 "Failed to get a cmd from free-pool in flush_cache(). fw_outstanding=0x%X max_fw_cmds=0x%X",
3981 instance->fw_outstanding, instance->max_fw_cmds);
3982 return;
3983 }
3984
3985 /* Clear the frame buffer and assign back the context id */
3986 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3987 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3988 cmd->index);
3989
3990 dcmd = &cmd->frame->dcmd;
3991
3992 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
3993
3994 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3995 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0);
3996 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 0);
3997 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3998 MFI_FRAME_DIR_NONE);
3999 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
4000 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 0);
4001 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
4002 MR_DCMD_CTRL_CACHE_FLUSH);
4003 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.b[0],
4004 MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE);
4005
4006 cmd->frame_count = 1;
4007
4008 if (instance->tbolt) {
4009 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
4010 }
4011
4012 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
4013 con_log(CL_ANN1, (CE_WARN,
4014 "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH"));
4015 }
4016 con_log(CL_ANN1, (CE_CONT, "flush_cache done"));
4017 if (instance->tbolt) {
4018 return_raid_msg_mfi_pkt(instance, cmd);
4019 } else {
4020 return_mfi_pkt(instance, cmd);
4021 }
4022
4023 }
4024
4025 /*
4026 * service_mfi_aen- Completes an AEN command
4027 * @instance: Adapter soft state
4028 * @cmd: Command to be completed
4029 *
4030 */
4031 void
4032 service_mfi_aen(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
4033 {
4034 uint32_t seq_num;
4035 uint32_t i;
4036 struct mrsas_evt_detail *evt_detail =
4037 (struct mrsas_evt_detail *)instance->mfi_evt_detail_obj.buffer;
4038 int rval = 0;
4039 int tgt = 0;
4040 uint8_t dtype;
4041 #ifdef PDSUPPORT
4042 mrsas_pd_address_t *pd_addr;
4043 #endif
4044 ddi_acc_handle_t acc_handle;
4045
4046 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
4047
4048 acc_handle = cmd->frame_dma_obj.acc_handle;
4049 cmd->cmd_status = ddi_get8(acc_handle, &cmd->frame->io.cmd_status);
4050 if (cmd->cmd_status == ENODATA) {
4051 cmd->cmd_status = 0;
4052 }
4053
4054 /*
4055 * log the MFI AEN event to the sysevent queue so that
4056 * application will get noticed
4057 */
4058 if (ddi_log_sysevent(instance->dip, DDI_VENDOR_LSI, "LSIMEGA", "SAS",
4059 NULL, NULL, DDI_NOSLEEP) != DDI_SUCCESS) {
4060 int instance_no = ddi_get_instance(instance->dip);
4061 con_log(CL_ANN, (CE_WARN,
4062 "mr_sas%d: Failed to log AEN event", instance_no));
4063 }
4064 /*
4065 * Check for any ld devices that has changed state. i.e. online
4066 * or offline.
4067 */
4068 con_log(CL_ANN1, (CE_CONT,
4069 "AEN: code = %x class = %x locale = %x args = %x",
4070 ddi_get32(acc_handle, &evt_detail->code),
4071 evt_detail->cl.members.class,
4072 ddi_get16(acc_handle, &evt_detail->cl.members.locale),
4073 ddi_get8(acc_handle, &evt_detail->arg_type)));
4074
4075 switch (ddi_get32(acc_handle, &evt_detail->code)) {
4076 case MR_EVT_CFG_CLEARED: {
4077 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) {
4078 if (instance->mr_ld_list[tgt].dip != NULL) {
4079 mutex_enter(&instance->config_dev_mtx);
4080 instance->mr_ld_list[tgt].flag =
4081 ~MRDRV_TGT_VALID;
4082 mutex_exit(&instance->config_dev_mtx);
4083 rval = mrsas_service_evt(instance, tgt, 0,
4084 MRSAS_EVT_UNCONFIG_TGT, NULL);
4085 con_log(CL_ANN1, (CE_WARN,
4086 "mr_sas: CFG CLEARED AEN rval = %d "
4087 "tgt id = %d", rval, tgt));
4088 }
4089 }
4090 break;
4091 }
4092
4093 case MR_EVT_LD_DELETED: {
4094 tgt = ddi_get16(acc_handle, &evt_detail->args.ld.target_id);
4095 mutex_enter(&instance->config_dev_mtx);
4096 instance->mr_ld_list[tgt].flag = ~MRDRV_TGT_VALID;
4097 mutex_exit(&instance->config_dev_mtx);
4098 rval = mrsas_service_evt(instance,
4099 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0,
4100 MRSAS_EVT_UNCONFIG_TGT, NULL);
4101 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD DELETED AEN rval = %d "
4102 "tgt id = %d index = %d", rval,
4103 ddi_get16(acc_handle, &evt_detail->args.ld.target_id),
4104 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index)));
4105 break;
4106 } /* End of MR_EVT_LD_DELETED */
4107
4108 case MR_EVT_LD_CREATED: {
4109 rval = mrsas_service_evt(instance,
4110 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0,
4111 MRSAS_EVT_CONFIG_TGT, NULL);
4112 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD CREATED AEN rval = %d "
4113 "tgt id = %d index = %d", rval,
4114 ddi_get16(acc_handle, &evt_detail->args.ld.target_id),
4115 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index)));
4116 break;
4117 } /* End of MR_EVT_LD_CREATED */
4118
4119 #ifdef PDSUPPORT
4120 case MR_EVT_PD_REMOVED_EXT: {
4121 if (instance->tbolt) {
4122 pd_addr = &evt_detail->args.pd_addr;
4123 dtype = pd_addr->scsi_dev_type;
4124 con_log(CL_DLEVEL1, (CE_NOTE,
4125 " MR_EVT_PD_REMOVED_EXT: dtype = %x,"
4126 " arg_type = %d ", dtype, evt_detail->arg_type));
4127 tgt = ddi_get16(acc_handle, &evt_detail->args.pd.device_id);
4128 mutex_enter(&instance->config_dev_mtx);
4129 instance->mr_tbolt_pd_list[tgt].flag = (uint8_t)~MRDRV_TGT_VALID;
4130 mutex_exit(&instance->config_dev_mtx);
4131 rval = mrsas_service_evt(instance,
4132 ddi_get16(acc_handle, &evt_detail->args.pd.device_id),
4133 1, MRSAS_EVT_UNCONFIG_TGT, NULL);
4134 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_REMOVED:"
4135 "rval = %d tgt id = %d ", rval,
4136 ddi_get16(acc_handle, &evt_detail->args.pd.device_id)));
4137 break;
4138 }
4139 }/* End of MR_EVT_PD_REMOVED_EXT */
4140
4141 case MR_EVT_PD_INSERTED_EXT: {
4142 if (instance->tbolt) {
4143 rval = mrsas_service_evt(instance,
4144 ddi_get16(acc_handle, &evt_detail->args.pd.device_id),
4145 1, MRSAS_EVT_CONFIG_TGT, NULL);
4146 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_INSERTEDi_EXT:"
4147 "rval = %d tgt id = %d ", rval,
4148 ddi_get16(acc_handle, &evt_detail->args.pd.device_id)));
4149 break;
4150 }
4151 }/* End of MR_EVT_PD_INSERTED_EXT */
4152
4153 case MR_EVT_PD_STATE_CHANGE: {
4154 if (instance->tbolt) {
4155 tgt = ddi_get16(acc_handle, &evt_detail->args.pd.device_id);
4156 if ((evt_detail->args.pd_state.prevState == PD_SYSTEM) &&
4157 (evt_detail->args.pd_state.newState != PD_SYSTEM)) {
4158 mutex_enter(&instance->config_dev_mtx);
4159 instance->mr_tbolt_pd_list[tgt].flag =
4160 (uint8_t)~MRDRV_TGT_VALID;
4161 mutex_exit(&instance->config_dev_mtx);
4162 rval = mrsas_service_evt(instance,
4163 ddi_get16(acc_handle,
4164 &evt_detail->args.pd.device_id),
4165 1, MRSAS_EVT_UNCONFIG_TGT, NULL);
4166 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_REMOVED:"
4167 "rval = %d tgt id = %d ", rval,
4168 ddi_get16(acc_handle,
4169 &evt_detail->args.pd.device_id)));
4170 break;
4171 }
4172 if ((evt_detail->args.pd_state.prevState
4173 == UNCONFIGURED_GOOD) &&
4174 (evt_detail->args.pd_state.newState == PD_SYSTEM)) {
4175 rval = mrsas_service_evt(instance,
4176 ddi_get16(acc_handle,
4177 &evt_detail->args.pd.device_id),
4178 1, MRSAS_EVT_CONFIG_TGT, NULL);
4179 con_log(CL_ANN1, (CE_WARN,
4180 "mr_sas: PD_INSERTED: rval = %d "
4181 " tgt id = %d ", rval,
4182 ddi_get16(acc_handle,
4183 &evt_detail->args.pd.device_id)));
4184 break;
4185 }
4186 }
4187 }
4188 #endif
4189
4190 } /* End of Main Switch */
4191
4192 /* get copy of seq_num and class/locale for re-registration */
4193 seq_num = ddi_get32(acc_handle, &evt_detail->seq_num);
4194 seq_num++;
4195 (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
4196 sizeof (struct mrsas_evt_detail));
4197
4198 ddi_put8(acc_handle, &cmd->frame->dcmd.cmd_status, 0x0);
4199 ddi_put32(acc_handle, &cmd->frame->dcmd.mbox.w[0], seq_num);
4200
4201 instance->aen_seq_num = seq_num;
4202
4203 cmd->frame_count = 1;
4204
4205 cmd->retry_count_for_ocr = 0;
4206 cmd->drv_pkt_time = 0;
4207
4208 /* Issue the aen registration frame */
4209 instance->func_ptr->issue_cmd(cmd, instance);
4210 }
4211
4212 /*
4213 * complete_cmd_in_sync_mode - Completes an internal command
4214 * @instance: Adapter soft state
4215 * @cmd: Command to be completed
4216 *
4217 * The issue_cmd_in_sync_mode() function waits for a command to complete
4218 * after it issues a command. This function wakes up that waiting routine by
4219 * calling wake_up() on the wait queue.
4220 */
4221 static void
4222 complete_cmd_in_sync_mode(struct mrsas_instance *instance,
4223 struct mrsas_cmd *cmd)
4224 {
4225 cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle,
4226 &cmd->frame->io.cmd_status);
4227
4228 cmd->sync_cmd = MRSAS_FALSE;
4229
4230 con_log(CL_ANN1, (CE_NOTE, "complete_cmd_in_sync_mode called %p \n",
4231 (void *)cmd));
4232
4233 mutex_enter(&instance->int_cmd_mtx);
4234 if (cmd->cmd_status == ENODATA) {
4235 cmd->cmd_status = 0;
4236 }
4237 cv_broadcast(&instance->int_cmd_cv);
4238 mutex_exit(&instance->int_cmd_mtx);
4239
4240 }
4241
4242 /*
4243 * Call this function inside mrsas_softintr.
4244 * mrsas_initiate_ocr_if_fw_is_faulty - Initiates OCR if FW status is faulty
4245 * @instance: Adapter soft state
4246 */
4247
4248 static uint32_t
4249 mrsas_initiate_ocr_if_fw_is_faulty(struct mrsas_instance *instance)
4250 {
4251 uint32_t cur_abs_reg_val;
4252 uint32_t fw_state;
4253
4254 cur_abs_reg_val = instance->func_ptr->read_fw_status_reg(instance);
4255 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
4256 if (fw_state == MFI_STATE_FAULT) {
4257 if (instance->disable_online_ctrl_reset == 1) {
4258 cmn_err(CE_WARN,
4259 "mrsas_initiate_ocr_if_fw_is_faulty: "
4260 "FW in Fault state, detected in ISR: "
4261 "FW doesn't support ocr ");
4262
4263 return (ADAPTER_RESET_NOT_REQUIRED);
4264 } else {
4265 con_log(CL_ANN, (CE_NOTE,
4266 "mrsas_initiate_ocr_if_fw_is_faulty: "
4267 "FW in Fault state, detected in ISR: FW supports ocr "));
4268
4269 return (ADAPTER_RESET_REQUIRED);
4270 }
4271 }
4272
4273 return (ADAPTER_RESET_NOT_REQUIRED);
4274 }
4275
4276 /*
4277 * mrsas_softintr - The Software ISR
4278 * @param arg : HBA soft state
4279 *
4280 * called from high-level interrupt if hi-level interrupt are not there,
4281 * otherwise triggered as a soft interrupt
4282 */
4283 static uint_t
4284 mrsas_softintr(struct mrsas_instance *instance)
4285 {
4286 struct scsi_pkt *pkt;
4287 struct scsa_cmd *acmd;
4288 struct mrsas_cmd *cmd;
4289 struct mlist_head *pos, *next;
4290 mlist_t process_list;
4291 struct mrsas_header *hdr;
4292 struct scsi_arq_status *arqstat;
4293
4294 con_log(CL_ANN1, (CE_NOTE, "mrsas_softintr() called."));
4295
4296 ASSERT(instance);
4297
4298 mutex_enter(&instance->completed_pool_mtx);
4299
4300 if (mlist_empty(&instance->completed_pool_list)) {
4301 mutex_exit(&instance->completed_pool_mtx);
4302 return (DDI_INTR_CLAIMED);
4303 }
4304
4305 instance->softint_running = 1;
4306
4307 INIT_LIST_HEAD(&process_list);
4308 mlist_splice(&instance->completed_pool_list, &process_list);
4309 INIT_LIST_HEAD(&instance->completed_pool_list);
4310
4311 mutex_exit(&instance->completed_pool_mtx);
4312
4313 /* perform all callbacks first, before releasing the SCBs */
4314 mlist_for_each_safe(pos, next, &process_list) {
4315 cmd = mlist_entry(pos, struct mrsas_cmd, list);
4316
4317 /* syncronize the Cmd frame for the controller */
4318 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle,
4319 0, 0, DDI_DMA_SYNC_FORCPU);
4320
4321 hdr = &cmd->frame->hdr;
4322
4323 /* remove the internal command from the process list */
4324 mlist_del_init(&cmd->list);
4325
4326 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) {
4327 case MFI_CMD_OP_PD_SCSI:
4328 case MFI_CMD_OP_LD_SCSI:
4329 case MFI_CMD_OP_LD_READ:
4330 case MFI_CMD_OP_LD_WRITE:
4331 /*
4332 * MFI_CMD_OP_PD_SCSI and MFI_CMD_OP_LD_SCSI
4333 * could have been issued either through an
4334 * IO path or an IOCTL path. If it was via IOCTL,
4335 * we will send it to internal completion.
4336 */
4337 if (cmd->sync_cmd == MRSAS_TRUE) {
4338 complete_cmd_in_sync_mode(instance, cmd);
4339 break;
4340 }
4341
4342 /* regular commands */
4343 acmd = cmd->cmd;
4344 pkt = CMD2PKT(acmd);
4345
4346 if (acmd->cmd_flags & CFLAG_DMAVALID) {
4347 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
4348 (void) ddi_dma_sync(acmd->cmd_dmahandle,
4349 acmd->cmd_dma_offset,
4350 acmd->cmd_dma_len,
4351 DDI_DMA_SYNC_FORCPU);
4352 }
4353 }
4354
4355 pkt->pkt_reason = CMD_CMPLT;
4356 pkt->pkt_statistics = 0;
4357 pkt->pkt_state = STATE_GOT_BUS
4358 | STATE_GOT_TARGET | STATE_SENT_CMD
4359 | STATE_XFERRED_DATA | STATE_GOT_STATUS;
4360
4361 con_log(CL_ANN, (CE_CONT,
4362 "CDB[0] = %x completed for %s: size %lx context %x",
4363 pkt->pkt_cdbp[0], ((acmd->islogical) ? "LD" : "PD"),
4364 acmd->cmd_dmacount, hdr->context));
4365
4366 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
4367 struct scsi_inquiry *inq;
4368
4369 if (acmd->cmd_dmacount != 0) {
4370 bp_mapin(acmd->cmd_buf);
4371 inq = (struct scsi_inquiry *)
4372 acmd->cmd_buf->b_un.b_addr;
4373
4374 /* don't expose physical drives to OS */
4375 if (acmd->islogical &&
4376 (hdr->cmd_status == MFI_STAT_OK)) {
4377 display_scsi_inquiry(
4378 (caddr_t)inq);
4379 } else if ((hdr->cmd_status ==
4380 MFI_STAT_OK) && inq->inq_dtype ==
4381 DTYPE_DIRECT) {
4382
4383 display_scsi_inquiry(
4384 (caddr_t)inq);
4385
4386 /* for physical disk */
4387 hdr->cmd_status =
4388 MFI_STAT_DEVICE_NOT_FOUND;
4389 }
4390 }
4391 }
4392
4393 switch (hdr->cmd_status) {
4394 case MFI_STAT_OK:
4395 pkt->pkt_scbp[0] = STATUS_GOOD;
4396 break;
4397 case MFI_STAT_LD_CC_IN_PROGRESS:
4398 case MFI_STAT_LD_RECON_IN_PROGRESS:
4399 pkt->pkt_scbp[0] = STATUS_GOOD;
4400 break;
4401 case MFI_STAT_LD_INIT_IN_PROGRESS:
4402 con_log(CL_ANN, (CE_WARN, "Initialization in Progress"));
4403 pkt->pkt_reason = CMD_TRAN_ERR;
4404
4405 break;
4406 case MFI_STAT_SCSI_DONE_WITH_ERROR:
4407 con_log(CL_ANN, (CE_CONT, "scsi_done error"));
4408
4409 pkt->pkt_reason = CMD_CMPLT;
4410 ((struct scsi_status *)
4411 pkt->pkt_scbp)->sts_chk = 1;
4412
4413 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
4414
4415 con_log(CL_ANN, (CE_WARN, "TEST_UNIT_READY fail"));
4416
4417 } else {
4418 pkt->pkt_state |= STATE_ARQ_DONE;
4419 arqstat = (void *)(pkt->pkt_scbp);
4420 arqstat->sts_rqpkt_reason = CMD_CMPLT;
4421 arqstat->sts_rqpkt_resid = 0;
4422 arqstat->sts_rqpkt_state |=
4423 STATE_GOT_BUS | STATE_GOT_TARGET
4424 | STATE_SENT_CMD
4425 | STATE_XFERRED_DATA;
4426 *(uint8_t *)&arqstat->sts_rqpkt_status =
4427 STATUS_GOOD;
4428 ddi_rep_get8(
4429 cmd->frame_dma_obj.acc_handle,
4430 (uint8_t *)
4431 &(arqstat->sts_sensedata),
4432 cmd->sense,
4433 sizeof (struct scsi_extended_sense),
4434 DDI_DEV_AUTOINCR);
4435 }
4436 break;
4437 case MFI_STAT_LD_OFFLINE:
4438 case MFI_STAT_DEVICE_NOT_FOUND:
4439 con_log(CL_ANN, (CE_CONT,
4440 "mrsas_softintr:device not found error"));
4441 pkt->pkt_reason = CMD_DEV_GONE;
4442 pkt->pkt_statistics = STAT_DISCON;
4443 break;
4444 case MFI_STAT_LD_LBA_OUT_OF_RANGE:
4445 pkt->pkt_state |= STATE_ARQ_DONE;
4446 pkt->pkt_reason = CMD_CMPLT;
4447 ((struct scsi_status *)
4448 pkt->pkt_scbp)->sts_chk = 1;
4449
4450 arqstat = (void *)(pkt->pkt_scbp);
4451 arqstat->sts_rqpkt_reason = CMD_CMPLT;
4452 arqstat->sts_rqpkt_resid = 0;
4453 arqstat->sts_rqpkt_state |= STATE_GOT_BUS
4454 | STATE_GOT_TARGET | STATE_SENT_CMD
4455 | STATE_XFERRED_DATA;
4456 *(uint8_t *)&arqstat->sts_rqpkt_status =
4457 STATUS_GOOD;
4458
4459 arqstat->sts_sensedata.es_valid = 1;
4460 arqstat->sts_sensedata.es_key =
4461 KEY_ILLEGAL_REQUEST;
4462 arqstat->sts_sensedata.es_class =
4463 CLASS_EXTENDED_SENSE;
4464
4465 /*
4466 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
4467 * ASC: 0x21h; ASCQ: 0x00h;
4468 */
4469 arqstat->sts_sensedata.es_add_code = 0x21;
4470 arqstat->sts_sensedata.es_qual_code = 0x00;
4471
4472 break;
4473
4474 default:
4475 con_log(CL_ANN, (CE_CONT, "Unknown status!"));
4476 pkt->pkt_reason = CMD_TRAN_ERR;
4477
4478 break;
4479 }
4480
4481 atomic_add_16(&instance->fw_outstanding, (-1));
4482
4483 /* Call the callback routine */
4484 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
4485 pkt->pkt_comp) {
4486
4487 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_softintr: "
4488 "posting to scsa cmd %p index %x pkt %p "
4489 "time %llx", (void *)cmd, cmd->index,
4490 (void *)pkt, gethrtime()));
4491 (*pkt->pkt_comp)(pkt);
4492
4493 }
4494
4495 return_mfi_pkt(instance, cmd);
4496 break;
4497
4498 case MFI_CMD_OP_SMP:
4499 case MFI_CMD_OP_STP:
4500 complete_cmd_in_sync_mode(instance, cmd);
4501 break;
4502
4503 case MFI_CMD_OP_DCMD:
4504 /* see if got an event notification */
4505 if (ddi_get32(cmd->frame_dma_obj.acc_handle,
4506 &cmd->frame->dcmd.opcode) ==
4507 MR_DCMD_CTRL_EVENT_WAIT) {
4508 if ((instance->aen_cmd == cmd) &&
4509 (instance->aen_cmd->abort_aen)) {
4510 con_log(CL_ANN, (CE_WARN,
4511 "mrsas_softintr: "
4512 "aborted_aen returned"));
4513 } else {
4514 atomic_add_16(&instance->fw_outstanding,
4515 (-1));
4516 service_mfi_aen(instance, cmd);
4517 }
4518 } else {
4519 complete_cmd_in_sync_mode(instance, cmd);
4520 }
4521
4522 break;
4523
4524 case MFI_CMD_OP_ABORT:
4525 con_log(CL_ANN, (CE_NOTE, "MFI_CMD_OP_ABORT complete"));
4526 /*
4527 * MFI_CMD_OP_ABORT successfully completed
4528 * in the synchronous mode
4529 */
4530 complete_cmd_in_sync_mode(instance, cmd);
4531 break;
4532
4533 default:
4534 if (cmd->pkt != NULL) {
4535 pkt = cmd->pkt;
4536 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
4537 pkt->pkt_comp) {
4538
4539 con_log(CL_ANN1, (CE_CONT, "posting to "
4540 "scsa cmd %p index %x pkt %p"
4541 "time %llx, default ", (void *)cmd,
4542 cmd->index, (void *)pkt,
4543 gethrtime()));
4544
4545 (*pkt->pkt_comp)(pkt);
4546
4547 }
4548 }
4549 con_log(CL_ANN, (CE_WARN, "Cmd type unknown !"));
4550 break;
4551 }
4552 }
4553
4554 instance->softint_running = 0;
4555
4556 return (DDI_INTR_CLAIMED);
4557 }
4558
4559 /*
4560 * mrsas_alloc_dma_obj
4561 *
4562 * Allocate the memory and other resources for an dma object.
4563 */
4564 int
4565 mrsas_alloc_dma_obj(struct mrsas_instance *instance, dma_obj_t *obj,
4566 uchar_t endian_flags)
4567 {
4568 int i;
4569 size_t alen = 0;
4570 uint_t cookie_cnt;
4571 struct ddi_device_acc_attr tmp_endian_attr;
4572
4573 tmp_endian_attr = endian_attr;
4574 tmp_endian_attr.devacc_attr_endian_flags = endian_flags;
4575 tmp_endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
4576
4577 i = ddi_dma_alloc_handle(instance->dip, &obj->dma_attr,
4578 DDI_DMA_SLEEP, NULL, &obj->dma_handle);
4579 if (i != DDI_SUCCESS) {
4580
4581 switch (i) {
4582 case DDI_DMA_BADATTR :
4583 con_log(CL_ANN, (CE_WARN,
4584 "Failed ddi_dma_alloc_handle- Bad attribute"));
4585 break;
4586 case DDI_DMA_NORESOURCES :
4587 con_log(CL_ANN, (CE_WARN,
4588 "Failed ddi_dma_alloc_handle- No Resources"));
4589 break;
4590 default :
4591 con_log(CL_ANN, (CE_WARN,
4592 "Failed ddi_dma_alloc_handle: "
4593 "unknown status %d", i));
4594 break;
4595 }
4596
4597 return (-1);
4598 }
4599
4600 if ((ddi_dma_mem_alloc(obj->dma_handle, obj->size, &tmp_endian_attr,
4601 DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
4602 &obj->buffer, &alen, &obj->acc_handle) != DDI_SUCCESS) ||
4603 alen < obj->size) {
4604
4605 ddi_dma_free_handle(&obj->dma_handle);
4606
4607 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc"));
4608
4609 return (-1);
4610 }
4611 if (obj->dma_handle == NULL) {
4612 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc"));
4613 return (-1);
4614 }
4615
4616 if (ddi_dma_addr_bind_handle(obj->dma_handle, NULL, obj->buffer,
4617 obj->size, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
4618 NULL, &obj->dma_cookie[0], &cookie_cnt) != DDI_SUCCESS) {
4619
4620 ddi_dma_mem_free(&obj->acc_handle);
4621 ddi_dma_free_handle(&obj->dma_handle);
4622
4623 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle"));
4624
4625 return (-1);
4626 }
4627 if (obj->acc_handle == NULL) {
4628 ddi_dma_mem_free(&obj->acc_handle);
4629 ddi_dma_free_handle(&obj->dma_handle);
4630
4631 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle"));
4632 return (-1);
4633 }
4634
4635
4636 return (cookie_cnt);
4637 }
4638
4639 /*
4640 * mrsas_free_dma_obj(struct mrsas_instance *, dma_obj_t)
4641 *
4642 * De-allocate the memory and other resources for an dma object, which must
4643 * have been alloated by a previous call to mrsas_alloc_dma_obj()
4644 */
4645 int
4646 mrsas_free_dma_obj(struct mrsas_instance *instance, dma_obj_t obj)
4647 {
4648
4649 if ( (obj.dma_handle == NULL) || (obj.acc_handle == NULL) ) {
4650 return (DDI_SUCCESS);
4651 }
4652
4653 (void) ddi_dma_unbind_handle(obj.dma_handle);
4654 ddi_dma_mem_free(&obj.acc_handle);
4655 ddi_dma_free_handle(&obj.dma_handle);
4656 obj.acc_handle = NULL;
4657 return (DDI_SUCCESS);
4658 }
4659
4660 /*
4661 * mrsas_dma_alloc(instance_t *, struct scsi_pkt *, struct buf *,
4662 * int, int (*)())
4663 *
4664 * Allocate dma resources for a new scsi command
4665 */
4666 int
4667 mrsas_dma_alloc(struct mrsas_instance *instance, struct scsi_pkt *pkt,
4668 struct buf *bp, int flags, int (*callback)())
4669 {
4670 int dma_flags;
4671 int (*cb)(caddr_t);
4672 int i;
4673
4674 ddi_dma_attr_t tmp_dma_attr = mrsas_generic_dma_attr;
4675 struct scsa_cmd *acmd = PKT2CMD(pkt);
4676
4677 acmd->cmd_buf = bp;
4678
4679 if (bp->b_flags & B_READ) {
4680 acmd->cmd_flags &= ~CFLAG_DMASEND;
4681 dma_flags = DDI_DMA_READ;
4682 } else {
4683 acmd->cmd_flags |= CFLAG_DMASEND;
4684 dma_flags = DDI_DMA_WRITE;
4685 }
4686
4687 if (flags & PKT_CONSISTENT) {
4688 acmd->cmd_flags |= CFLAG_CONSISTENT;
4689 dma_flags |= DDI_DMA_CONSISTENT;
4690 }
4691
4692 if (flags & PKT_DMA_PARTIAL) {
4693 dma_flags |= DDI_DMA_PARTIAL;
4694 }
4695
4696 dma_flags |= DDI_DMA_REDZONE;
4697
4698 cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
4699
4700 tmp_dma_attr.dma_attr_sgllen = instance->max_num_sge;
4701 tmp_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull;
4702 if (instance->tbolt) {
4703 //OCR-RESET FIX
4704 tmp_dma_attr.dma_attr_count_max = (U64)mrsas_tbolt_max_cap_maxxfer; //limit to 256K
4705 tmp_dma_attr.dma_attr_maxxfer = (U64)mrsas_tbolt_max_cap_maxxfer; //limit to 256K
4706 }
4707
4708
4709 if ((i = ddi_dma_alloc_handle(instance->dip, &tmp_dma_attr,
4710 cb, 0, &acmd->cmd_dmahandle)) != DDI_SUCCESS) {
4711 switch (i) {
4712 case DDI_DMA_BADATTR:
4713 bioerror(bp, EFAULT);
4714 return (DDI_FAILURE);
4715
4716 case DDI_DMA_NORESOURCES:
4717 bioerror(bp, 0);
4718 return (DDI_FAILURE);
4719
4720 default:
4721 con_log(CL_ANN, (CE_PANIC, "ddi_dma_alloc_handle: "
4722 "impossible result (0x%x)", i));
4723 bioerror(bp, EFAULT);
4724 return (DDI_FAILURE);
4725 }
4726 }
4727
4728 i = ddi_dma_buf_bind_handle(acmd->cmd_dmahandle, bp, dma_flags,
4729 cb, 0, &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies);
4730
4731 switch (i) {
4732 case DDI_DMA_PARTIAL_MAP:
4733 if ((dma_flags & DDI_DMA_PARTIAL) == 0) {
4734 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
4735 "DDI_DMA_PARTIAL_MAP impossible"));
4736 goto no_dma_cookies;
4737 }
4738
4739 if (ddi_dma_numwin(acmd->cmd_dmahandle, &acmd->cmd_nwin) ==
4740 DDI_FAILURE) {
4741 con_log(CL_ANN, (CE_PANIC, "ddi_dma_numwin failed"));
4742 goto no_dma_cookies;
4743 }
4744
4745 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
4746 &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
4747 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
4748 DDI_FAILURE) {
4749
4750 con_log(CL_ANN, (CE_PANIC, "ddi_dma_getwin failed"));
4751 goto no_dma_cookies;
4752 }
4753
4754 goto get_dma_cookies;
4755 case DDI_DMA_MAPPED:
4756 acmd->cmd_nwin = 1;
4757 acmd->cmd_dma_len = 0;
4758 acmd->cmd_dma_offset = 0;
4759
4760 get_dma_cookies:
4761 i = 0;
4762 acmd->cmd_dmacount = 0;
4763 for (;;) {
4764 acmd->cmd_dmacount +=
4765 acmd->cmd_dmacookies[i++].dmac_size;
4766
4767 if (i == instance->max_num_sge ||
4768 i == acmd->cmd_ncookies)
4769 break;
4770
4771 ddi_dma_nextcookie(acmd->cmd_dmahandle,
4772 &acmd->cmd_dmacookies[i]);
4773 }
4774
4775 acmd->cmd_cookie = i;
4776 acmd->cmd_cookiecnt = i;
4777
4778 acmd->cmd_flags |= CFLAG_DMAVALID;
4779
4780 if (bp->b_bcount >= acmd->cmd_dmacount) {
4781 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
4782 } else {
4783 pkt->pkt_resid = 0;
4784 }
4785
4786 return (DDI_SUCCESS);
4787 case DDI_DMA_NORESOURCES:
4788 bioerror(bp, 0);
4789 break;
4790 case DDI_DMA_NOMAPPING:
4791 bioerror(bp, EFAULT);
4792 break;
4793 case DDI_DMA_TOOBIG:
4794 bioerror(bp, EINVAL);
4795 break;
4796 case DDI_DMA_INUSE:
4797 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle:"
4798 " DDI_DMA_INUSE impossible"));
4799 break;
4800 default:
4801 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
4802 "impossible result (0x%x)", i));
4803 break;
4804 }
4805
4806 no_dma_cookies:
4807 ddi_dma_free_handle(&acmd->cmd_dmahandle);
4808 acmd->cmd_dmahandle = NULL;
4809 acmd->cmd_flags &= ~CFLAG_DMAVALID;
4810 return (DDI_FAILURE);
4811 }
4812
4813 /*
4814 * mrsas_dma_move(struct mrsas_instance *, struct scsi_pkt *, struct buf *)
4815 *
4816 * move dma resources to next dma window
4817 *
4818 */
4819 int
4820 mrsas_dma_move(struct mrsas_instance *instance, struct scsi_pkt *pkt,
4821 struct buf *bp)
4822 {
4823 int i = 0;
4824
4825 struct scsa_cmd *acmd = PKT2CMD(pkt);
4826
4827 /*
4828 * If there are no more cookies remaining in this window,
4829 * must move to the next window first.
4830 */
4831 if (acmd->cmd_cookie == acmd->cmd_ncookies) {
4832 if (acmd->cmd_curwin == acmd->cmd_nwin && acmd->cmd_nwin == 1) {
4833 return (DDI_SUCCESS);
4834 }
4835
4836 /* at last window, cannot move */
4837 if (++acmd->cmd_curwin >= acmd->cmd_nwin) {
4838 return (DDI_FAILURE);
4839 }
4840
4841 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
4842 &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
4843 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
4844 DDI_FAILURE) {
4845 return (DDI_FAILURE);
4846 }
4847
4848 acmd->cmd_cookie = 0;
4849 } else {
4850 /* still more cookies in this window - get the next one */
4851 ddi_dma_nextcookie(acmd->cmd_dmahandle,
4852 &acmd->cmd_dmacookies[0]);
4853 }
4854
4855 /* get remaining cookies in this window, up to our maximum */
4856 for (;;) {
4857 acmd->cmd_dmacount += acmd->cmd_dmacookies[i++].dmac_size;
4858 acmd->cmd_cookie++;
4859
4860 if (i == instance->max_num_sge ||
4861 acmd->cmd_cookie == acmd->cmd_ncookies) {
4862 break;
4863 }
4864
4865 ddi_dma_nextcookie(acmd->cmd_dmahandle,
4866 &acmd->cmd_dmacookies[i]);
4867 }
4868
4869 acmd->cmd_cookiecnt = i;
4870
4871 if (bp->b_bcount >= acmd->cmd_dmacount) {
4872 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
4873 } else {
4874 pkt->pkt_resid = 0;
4875 }
4876
4877 return (DDI_SUCCESS);
4878 }
4879
4880 /*
4881 * build_cmd
4882 */
4883 static struct mrsas_cmd *
4884 build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
4885 struct scsi_pkt *pkt, uchar_t *cmd_done)
4886 {
4887 uint16_t flags = 0;
4888 uint32_t i;
4889 uint32_t context;
4890 uint32_t sge_bytes;
4891 uint32_t tmp_data_xfer_len;
4892 ddi_acc_handle_t acc_handle;
4893 struct mrsas_cmd *cmd;
4894 struct mrsas_sge64 *mfi_sgl;
4895 struct mrsas_sge_ieee *mfi_sgl_ieee;
4896 struct scsa_cmd *acmd = PKT2CMD(pkt);
4897 struct mrsas_pthru_frame *pthru;
4898 struct mrsas_io_frame *ldio;
4899
4900 /* find out if this is logical or physical drive command. */
4901 acmd->islogical = MRDRV_IS_LOGICAL(ap);
4902 acmd->device_id = MAP_DEVICE_ID(instance, ap);
4903 *cmd_done = 0;
4904
4905 /* get the command packet */
4906 if (!(cmd = get_mfi_pkt(instance))) {
4907 cmn_err(CE_WARN,
4908 "Failed to get a cmd from free-pool in build_cmd(). fw_outstanding=0x%X max_fw_cmds=0x%X",
4909 instance->fw_outstanding, instance->max_fw_cmds);
4910 return (NULL);
4911 }
4912
4913 acc_handle = cmd->frame_dma_obj.acc_handle;
4914
4915 /* Clear the frame buffer and assign back the context id */
4916 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
4917 ddi_put32(acc_handle, &cmd->frame->hdr.context, cmd->index);
4918
4919 cmd->pkt = pkt;
4920 cmd->cmd = acmd;
4921
4922 /* lets get the command directions */
4923 if (acmd->cmd_flags & CFLAG_DMASEND) {
4924 flags = MFI_FRAME_DIR_WRITE;
4925
4926 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
4927 (void) ddi_dma_sync(acmd->cmd_dmahandle,
4928 acmd->cmd_dma_offset, acmd->cmd_dma_len,
4929 DDI_DMA_SYNC_FORDEV);
4930 }
4931 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
4932 flags = MFI_FRAME_DIR_READ;
4933
4934 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
4935 (void) ddi_dma_sync(acmd->cmd_dmahandle,
4936 acmd->cmd_dma_offset, acmd->cmd_dma_len,
4937 DDI_DMA_SYNC_FORCPU);
4938 }
4939 } else {
4940 flags = MFI_FRAME_DIR_NONE;
4941 }
4942
4943 if (instance->flag_ieee) {
4944 flags |= MFI_FRAME_IEEE;
4945 }
4946 flags |= MFI_FRAME_SGL64;
4947
4948 switch (pkt->pkt_cdbp[0]) {
4949
4950 /*
4951 * case SCMD_SYNCHRONIZE_CACHE:
4952 * flush_cache(instance);
4953 * return_mfi_pkt(instance, cmd);
4954 * *cmd_done = 1;
4955 *
4956 * return (NULL);
4957 */
4958
4959 case SCMD_READ:
4960 case SCMD_WRITE:
4961 case SCMD_READ_G1:
4962 case SCMD_WRITE_G1:
4963 case SCMD_READ_G4:
4964 case SCMD_WRITE_G4:
4965 case SCMD_READ_G5:
4966 case SCMD_WRITE_G5:
4967 if (acmd->islogical) {
4968 ldio = (struct mrsas_io_frame *)cmd->frame;
4969
4970 /*
4971 * preare the Logical IO frame:
4972 * 2nd bit is zero for all read cmds
4973 */
4974 ddi_put8(acc_handle, &ldio->cmd,
4975 (pkt->pkt_cdbp[0] & 0x02) ? MFI_CMD_OP_LD_WRITE
4976 : MFI_CMD_OP_LD_READ);
4977 ddi_put8(acc_handle, &ldio->cmd_status, 0x0);
4978 ddi_put8(acc_handle, &ldio->scsi_status, 0x0);
4979 ddi_put8(acc_handle, &ldio->target_id, acmd->device_id);
4980 ddi_put16(acc_handle, &ldio->timeout, 0);
4981 ddi_put8(acc_handle, &ldio->reserved_0, 0);
4982 ddi_put16(acc_handle, &ldio->pad_0, 0);
4983 ddi_put16(acc_handle, &ldio->flags, flags);
4984
4985 /* Initialize sense Information */
4986 bzero(cmd->sense, SENSE_LENGTH);
4987 ddi_put8(acc_handle, &ldio->sense_len, SENSE_LENGTH);
4988 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_hi, 0);
4989 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_lo,
4990 cmd->sense_phys_addr);
4991 ddi_put32(acc_handle, &ldio->start_lba_hi, 0);
4992 ddi_put8(acc_handle, &ldio->access_byte,
4993 (acmd->cmd_cdblen != 6) ? pkt->pkt_cdbp[1] : 0);
4994 ddi_put8(acc_handle, &ldio->sge_count,
4995 acmd->cmd_cookiecnt);
4996 if (instance->flag_ieee) {
4997 mfi_sgl_ieee =
4998 (struct mrsas_sge_ieee *)&ldio->sgl;
4999 } else {
5000 mfi_sgl = (struct mrsas_sge64 *)&ldio->sgl;
5001 }
5002
5003 context = ddi_get32(acc_handle, &ldio->context);
5004
5005 if (acmd->cmd_cdblen == CDB_GROUP0) { /* 6-byte cdb */
5006 ddi_put32(acc_handle, &ldio->lba_count, (
5007 (uint16_t)(pkt->pkt_cdbp[4])));
5008
5009 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5010 ((uint32_t)(pkt->pkt_cdbp[3])) |
5011 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
5012 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
5013 << 16)));
5014 } else if (acmd->cmd_cdblen == CDB_GROUP1) { /* 10-byte cdb */
5015 ddi_put32(acc_handle, &ldio->lba_count, (
5016 ((uint16_t)(pkt->pkt_cdbp[8])) |
5017 ((uint16_t)(pkt->pkt_cdbp[7]) << 8)));
5018
5019 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5020 ((uint32_t)(pkt->pkt_cdbp[5])) |
5021 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5022 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5023 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5024 } else if (acmd->cmd_cdblen == CDB_GROUP5) { /* 12-byte cdb */
5025 ddi_put32(acc_handle, &ldio->lba_count, (
5026 ((uint32_t)(pkt->pkt_cdbp[9])) |
5027 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
5028 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
5029 ((uint32_t)(pkt->pkt_cdbp[6]) << 24)));
5030
5031 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5032 ((uint32_t)(pkt->pkt_cdbp[5])) |
5033 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5034 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5035 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5036 } else if (acmd->cmd_cdblen == CDB_GROUP4) { /* 16-byte cdb */
5037 ddi_put32(acc_handle, &ldio->lba_count, (
5038 ((uint32_t)(pkt->pkt_cdbp[13])) |
5039 ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
5040 ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
5041 ((uint32_t)(pkt->pkt_cdbp[10]) << 24)));
5042
5043 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5044 ((uint32_t)(pkt->pkt_cdbp[9])) |
5045 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
5046 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
5047 ((uint32_t)(pkt->pkt_cdbp[6]) << 24)));
5048
5049 ddi_put32(acc_handle, &ldio->start_lba_hi, (
5050 ((uint32_t)(pkt->pkt_cdbp[5])) |
5051 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5052 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5053 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5054 }
5055
5056 break;
5057 }
5058 /* fall through For all non-rd/wr cmds */
5059 default:
5060
5061 switch (pkt->pkt_cdbp[0]) {
5062 case SCMD_MODE_SENSE:
5063 case SCMD_MODE_SENSE_G1: {
5064 union scsi_cdb *cdbp;
5065 uint16_t page_code;
5066
5067 cdbp = (void *)pkt->pkt_cdbp;
5068 page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
5069 switch (page_code) {
5070 case 0x3:
5071 case 0x4:
5072 (void) mrsas_mode_sense_build(pkt);
5073 return_mfi_pkt(instance, cmd);
5074 *cmd_done = 1;
5075 return (NULL);
5076 }
5077 break;
5078 }
5079 default:
5080 break;
5081 }
5082
5083 pthru = (struct mrsas_pthru_frame *)cmd->frame;
5084
5085 /* prepare the DCDB frame */
5086 ddi_put8(acc_handle, &pthru->cmd, (acmd->islogical) ?
5087 MFI_CMD_OP_LD_SCSI : MFI_CMD_OP_PD_SCSI);
5088 ddi_put8(acc_handle, &pthru->cmd_status, 0x0);
5089 ddi_put8(acc_handle, &pthru->scsi_status, 0x0);
5090 ddi_put8(acc_handle, &pthru->target_id, acmd->device_id);
5091 ddi_put8(acc_handle, &pthru->lun, 0);
5092 ddi_put8(acc_handle, &pthru->cdb_len, acmd->cmd_cdblen);
5093 ddi_put16(acc_handle, &pthru->timeout, 0);
5094 ddi_put16(acc_handle, &pthru->flags, flags);
5095 tmp_data_xfer_len = 0;
5096 for (i = 0; i < acmd->cmd_cookiecnt; i++) {
5097 tmp_data_xfer_len += acmd->cmd_dmacookies[i].dmac_size;
5098 }
5099 ddi_put32(acc_handle, &pthru->data_xfer_len,
5100 tmp_data_xfer_len);
5101 ddi_put8(acc_handle, &pthru->sge_count, acmd->cmd_cookiecnt);
5102 if (instance->flag_ieee) {
5103 mfi_sgl_ieee = (struct mrsas_sge_ieee *)&pthru->sgl;
5104 } else {
5105 mfi_sgl = (struct mrsas_sge64 *)&pthru->sgl;
5106 }
5107
5108 bzero(cmd->sense, SENSE_LENGTH);
5109 ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH);
5110 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0);
5111 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo,
5112 cmd->sense_phys_addr);
5113
5114 context = ddi_get32(acc_handle, &pthru->context);
5115 ddi_rep_put8(acc_handle, (uint8_t *)pkt->pkt_cdbp,
5116 (uint8_t *)pthru->cdb, acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
5117
5118 break;
5119 }
5120 #ifdef lint
5121 context = context;
5122 #endif
5123 /* prepare the scatter-gather list for the firmware */
5124 if (instance->flag_ieee) {
5125 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl_ieee++) {
5126 ddi_put64(acc_handle, &mfi_sgl_ieee->phys_addr,
5127 acmd->cmd_dmacookies[i].dmac_laddress);
5128 ddi_put32(acc_handle, &mfi_sgl_ieee->length,
5129 acmd->cmd_dmacookies[i].dmac_size);
5130 }
5131 sge_bytes = sizeof (struct mrsas_sge_ieee)*acmd->cmd_cookiecnt;
5132 } else {
5133 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl++) {
5134 ddi_put64(acc_handle, &mfi_sgl->phys_addr,
5135 acmd->cmd_dmacookies[i].dmac_laddress);
5136 ddi_put32(acc_handle, &mfi_sgl->length,
5137 acmd->cmd_dmacookies[i].dmac_size);
5138 }
5139 sge_bytes = sizeof (struct mrsas_sge64)*acmd->cmd_cookiecnt;
5140 }
5141
5142 cmd->frame_count = (sge_bytes / MRMFI_FRAME_SIZE) +
5143 ((sge_bytes % MRMFI_FRAME_SIZE) ? 1 : 0) + 1;
5144
5145 if (cmd->frame_count >= 8) {
5146 cmd->frame_count = 8;
5147 }
5148
5149 return (cmd);
5150 }
5151
5152 /*
5153 * wait_for_outstanding - Wait for all outstanding cmds
5154 * @instance: Adapter soft state
5155 *
5156 * This function waits for upto MRDRV_RESET_WAIT_TIME seconds for FW to
5157 * complete all its outstanding commands. Returns error if one or more IOs
5158 * are pending after this time period.
5159 */
5160 static int
5161 wait_for_outstanding(struct mrsas_instance *instance)
5162 {
5163 int i;
5164 uint32_t wait_time = 90;
5165
5166 for (i = 0; i < wait_time; i++) {
5167 if (!instance->fw_outstanding) {
5168 break;
5169 }
5170
5171 drv_usecwait(MILLISEC); /* wait for 1000 usecs */;
5172 }
5173
5174 if (instance->fw_outstanding) {
5175 return (1);
5176 }
5177
5178 return (0);
5179 }
5180
5181
5182 /*
5183 * issue_mfi_pthru
5184 */
5185 static int
5186 issue_mfi_pthru(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5187 struct mrsas_cmd *cmd, int mode)
5188 {
5189 void *ubuf;
5190 uint32_t kphys_addr = 0;
5191 uint32_t xferlen = 0;
5192 uint32_t new_xfer_length =0;
5193 uint_t model;
5194 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5195 dma_obj_t pthru_dma_obj;
5196 struct mrsas_pthru_frame *kpthru;
5197 struct mrsas_pthru_frame *pthru;
5198 int i;
5199 pthru = &cmd->frame->pthru;
5200 kpthru = (struct mrsas_pthru_frame *)&ioctl->frame[0];
5201
5202 if (instance->adapterresetinprogress) {
5203 con_log(CL_ANN1, (CE_WARN, "issue_mfi_pthru: Reset flag set, "
5204 "returning mfi_pkt and setting TRAN_BUSY\n"));
5205 return (DDI_FAILURE);
5206 }
5207 model = ddi_model_convert_from(mode & FMODELS);
5208 if (model == DDI_MODEL_ILP32) {
5209 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP32"));
5210
5211 xferlen = kpthru->sgl.sge32[0].length;
5212
5213 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
5214 } else {
5215 #ifdef _ILP32
5216 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP32"));
5217 xferlen = kpthru->sgl.sge32[0].length;
5218 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
5219 #else
5220 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP64"));
5221 xferlen = kpthru->sgl.sge64[0].length;
5222 ubuf = (void *)(ulong_t)kpthru->sgl.sge64[0].phys_addr;
5223 #endif
5224 }
5225
5226 if (xferlen) {
5227 /* means IOCTL requires DMA */
5228 /* allocate the data transfer buffer */
5229 //pthru_dma_obj.size = xferlen;
5230 MRSAS_GET_BOUNDARY_ALIGNED_LEN(xferlen,new_xfer_length,PAGESIZE);
5231 pthru_dma_obj.size = new_xfer_length;
5232 pthru_dma_obj.dma_attr = mrsas_generic_dma_attr;
5233 pthru_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5234 pthru_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5235 pthru_dma_obj.dma_attr.dma_attr_sgllen = 1;
5236 pthru_dma_obj.dma_attr.dma_attr_align = 1;
5237
5238 /* allocate kernel buffer for DMA */
5239 if (mrsas_alloc_dma_obj(instance, &pthru_dma_obj,
5240 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5241 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: "
5242 "could not allocate data transfer buffer."));
5243 return (DDI_FAILURE);
5244 }
5245 (void) memset(pthru_dma_obj.buffer, 0, xferlen);
5246
5247 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5248 if (kpthru->flags & MFI_FRAME_DIR_WRITE) {
5249 for (i = 0; i < xferlen; i++) {
5250 if (ddi_copyin((uint8_t *)ubuf+i,
5251 (uint8_t *)pthru_dma_obj.buffer+i,
5252 1, mode)) {
5253 con_log(CL_ANN, (CE_WARN,
5254 "issue_mfi_pthru : "
5255 "copy from user space failed"));
5256 return (DDI_FAILURE);
5257 }
5258 }
5259 }
5260
5261 kphys_addr = pthru_dma_obj.dma_cookie[0].dmac_address;
5262 }
5263
5264 ddi_put8(acc_handle, &pthru->cmd, kpthru->cmd);
5265 ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH);
5266 ddi_put8(acc_handle, &pthru->cmd_status, 0);
5267 ddi_put8(acc_handle, &pthru->scsi_status, 0);
5268 ddi_put8(acc_handle, &pthru->target_id, kpthru->target_id);
5269 ddi_put8(acc_handle, &pthru->lun, kpthru->lun);
5270 ddi_put8(acc_handle, &pthru->cdb_len, kpthru->cdb_len);
5271 ddi_put8(acc_handle, &pthru->sge_count, kpthru->sge_count);
5272 ddi_put16(acc_handle, &pthru->timeout, kpthru->timeout);
5273 ddi_put32(acc_handle, &pthru->data_xfer_len, kpthru->data_xfer_len);
5274
5275 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0);
5276 pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
5277 /*ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo, 0); */
5278
5279 ddi_rep_put8(acc_handle, (uint8_t *)kpthru->cdb, (uint8_t *)pthru->cdb,
5280 pthru->cdb_len, DDI_DEV_AUTOINCR);
5281
5282 ddi_put16(acc_handle, &pthru->flags, kpthru->flags & ~MFI_FRAME_SGL64);
5283 ddi_put32(acc_handle, &pthru->sgl.sge32[0].length, xferlen);
5284 ddi_put32(acc_handle, &pthru->sgl.sge32[0].phys_addr, kphys_addr);
5285
5286 cmd->sync_cmd = MRSAS_TRUE;
5287 cmd->frame_count = 1;
5288
5289 if (instance->tbolt) {
5290 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5291 }
5292
5293 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5294 con_log(CL_ANN, (CE_WARN,
5295 "issue_mfi_pthru: fw_ioctl failed"));
5296 } else {
5297 if (xferlen && kpthru->flags & MFI_FRAME_DIR_READ) {
5298 for (i = 0; i < xferlen; i++) {
5299 if (ddi_copyout(
5300 (uint8_t *)pthru_dma_obj.buffer+i,
5301 (uint8_t *)ubuf+i, 1, mode)) {
5302 con_log(CL_ANN, (CE_WARN,
5303 "issue_mfi_pthru : "
5304 "copy to user space failed"));
5305 return (DDI_FAILURE);
5306 }
5307 }
5308 }
5309 }
5310
5311 kpthru->cmd_status = ddi_get8(acc_handle, &pthru->cmd_status);
5312 kpthru->scsi_status = ddi_get8(acc_handle, &pthru->scsi_status);
5313
5314 con_log(CL_ANN, (CE_CONT, "issue_mfi_pthru: cmd_status %x, "
5315 "scsi_status %x", kpthru->cmd_status, kpthru->scsi_status));
5316
5317 if (kpthru->sense_len) {
5318 uint sense_len = SENSE_LENGTH;
5319 void *sense_ubuf = (void *)(ulong_t)kpthru->sense_buf_phys_addr_lo;
5320 if (kpthru->sense_len <= SENSE_LENGTH) {
5321 sense_len = kpthru->sense_len;
5322 }
5323
5324 for (i = 0; i < sense_len; i++) {
5325 if (ddi_copyout(
5326 (uint8_t *)cmd->sense+i,
5327 (uint8_t *)sense_ubuf+i, 1, mode)) {
5328 con_log(CL_ANN, (CE_WARN,
5329 "issue_mfi_pthru : "
5330 "copy to user space failed"));
5331 }
5332 con_log(CL_DLEVEL1, (CE_WARN,
5333 "Copying Sense info sense_buff[%d] = 0x%X\n",i,*((uint8_t *)cmd->sense+i)));
5334 }
5335 }
5336 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
5337 DDI_DMA_SYNC_FORDEV);
5338
5339 if (xferlen) {
5340 /* free kernel buffer */
5341 if (mrsas_free_dma_obj(instance, pthru_dma_obj) != DDI_SUCCESS)
5342 return (DDI_FAILURE);
5343 }
5344
5345 return (DDI_SUCCESS);
5346 }
5347
5348 /*
5349 * issue_mfi_dcmd
5350 */
5351 static int
5352 issue_mfi_dcmd(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5353 struct mrsas_cmd *cmd, int mode)
5354 {
5355 void *ubuf;
5356 uint32_t kphys_addr = 0;
5357 uint32_t xferlen = 0;
5358 uint32_t new_xfer_length = 0;
5359 uint32_t model;
5360 dma_obj_t dcmd_dma_obj;
5361 struct mrsas_dcmd_frame *kdcmd;
5362 struct mrsas_dcmd_frame *dcmd;
5363 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5364 int i;
5365 dcmd = &cmd->frame->dcmd;
5366 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0];
5367
5368 if (instance->adapterresetinprogress) {
5369 con_log(CL_ANN1, (CE_WARN, "Reset flag set, "
5370 "returning mfi_pkt and setting TRAN_BUSY\n"));
5371 return (DDI_FAILURE);
5372 }
5373 model = ddi_model_convert_from(mode & FMODELS);
5374 if (model == DDI_MODEL_ILP32) {
5375 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
5376
5377 xferlen = kdcmd->sgl.sge32[0].length;
5378
5379 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
5380 } else {
5381 #ifdef _ILP32
5382 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
5383 xferlen = kdcmd->sgl.sge32[0].length;
5384 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
5385 #else
5386 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_LP64"));
5387 xferlen = kdcmd->sgl.sge64[0].length;
5388 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
5389 #endif
5390 }
5391 if (xferlen) {
5392 /* means IOCTL requires DMA */
5393 /* allocate the data transfer buffer */
5394 //dcmd_dma_obj.size = xferlen;
5395 MRSAS_GET_BOUNDARY_ALIGNED_LEN(xferlen,new_xfer_length,PAGESIZE);
5396 dcmd_dma_obj.size = new_xfer_length;
5397 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
5398 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5399 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5400 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
5401 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
5402
5403 /* allocate kernel buffer for DMA */
5404 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
5405 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5406 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: "
5407 "could not allocate data transfer buffer."));
5408 return (DDI_FAILURE);
5409 }
5410 (void) memset(dcmd_dma_obj.buffer, 0, xferlen);
5411
5412 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5413 if (kdcmd->flags & MFI_FRAME_DIR_WRITE) {
5414 for (i = 0; i < xferlen; i++) {
5415 if (ddi_copyin((uint8_t *)ubuf + i,
5416 (uint8_t *)dcmd_dma_obj.buffer + i,
5417 1, mode)) {
5418 con_log(CL_ANN, (CE_WARN,
5419 "issue_mfi_dcmd : "
5420 "copy from user space failed"));
5421 return (DDI_FAILURE);
5422 }
5423 }
5424 }
5425
5426 kphys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address;
5427 }
5428
5429 ddi_put8(acc_handle, &dcmd->cmd, kdcmd->cmd);
5430 ddi_put8(acc_handle, &dcmd->cmd_status, 0);
5431 ddi_put8(acc_handle, &dcmd->sge_count, kdcmd->sge_count);
5432 ddi_put16(acc_handle, &dcmd->timeout, kdcmd->timeout);
5433 ddi_put32(acc_handle, &dcmd->data_xfer_len, kdcmd->data_xfer_len);
5434 ddi_put32(acc_handle, &dcmd->opcode, kdcmd->opcode);
5435
5436 ddi_rep_put8(acc_handle, (uint8_t *)kdcmd->mbox.b,
5437 (uint8_t *)dcmd->mbox.b, DCMD_MBOX_SZ, DDI_DEV_AUTOINCR);
5438
5439 ddi_put16(acc_handle, &dcmd->flags, kdcmd->flags & ~MFI_FRAME_SGL64);
5440 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].length, xferlen);
5441 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].phys_addr, kphys_addr);
5442
5443 cmd->sync_cmd = MRSAS_TRUE;
5444 cmd->frame_count = 1;
5445
5446 if (instance->tbolt) {
5447 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5448 }
5449
5450 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5451 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: fw_ioctl failed"));
5452 } else {
5453 if (xferlen && (kdcmd->flags & MFI_FRAME_DIR_READ)) {
5454 for (i = 0; i < xferlen; i++) {
5455 if (ddi_copyout(
5456 (uint8_t *)dcmd_dma_obj.buffer + i,
5457 (uint8_t *)ubuf + i,
5458 1, mode)) {
5459 con_log(CL_ANN, (CE_WARN,
5460 "issue_mfi_dcmd : "
5461 "copy to user space failed"));
5462 return (DDI_FAILURE);
5463 }
5464 }
5465 }
5466 }
5467
5468 kdcmd->cmd_status = ddi_get8(acc_handle, &dcmd->cmd_status);
5469 con_log(CL_ANN, (CE_CONT, "issue_mfi_dcmd: cmd_status %x", kdcmd->cmd_status));
5470
5471 if (xferlen) {
5472 /* free kernel buffer */
5473 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
5474 return (DDI_FAILURE);
5475 }
5476
5477 return (DDI_SUCCESS);
5478 }
5479
5480 /*
5481 * issue_mfi_smp
5482 */
5483 static int
5484 issue_mfi_smp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5485 struct mrsas_cmd *cmd, int mode)
5486 {
5487 void *request_ubuf;
5488 void *response_ubuf;
5489 uint32_t request_xferlen = 0;
5490 uint32_t response_xferlen = 0;
5491 uint32_t new_xfer_length1 = 0;
5492 uint32_t new_xfer_length2 = 0;
5493 uint_t model;
5494 dma_obj_t request_dma_obj;
5495 dma_obj_t response_dma_obj;
5496 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5497 struct mrsas_smp_frame *ksmp;
5498 struct mrsas_smp_frame *smp;
5499 struct mrsas_sge32 *sge32;
5500 #ifndef _ILP32
5501 struct mrsas_sge64 *sge64;
5502 #endif
5503 int i;
5504 uint64_t tmp_sas_addr;
5505
5506 smp = &cmd->frame->smp;
5507 ksmp = (struct mrsas_smp_frame *)&ioctl->frame[0];
5508
5509 if (instance->adapterresetinprogress) {
5510 con_log(CL_ANN1, (CE_WARN, "Reset flag set, "
5511 "returning mfi_pkt and setting TRAN_BUSY\n"));
5512 return (DDI_FAILURE);
5513 }
5514 model = ddi_model_convert_from(mode & FMODELS);
5515 if (model == DDI_MODEL_ILP32) {
5516 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_ILP32"));
5517
5518 sge32 = &ksmp->sgl[0].sge32[0];
5519 response_xferlen = sge32[0].length;
5520 request_xferlen = sge32[1].length;
5521 con_log(CL_ANN, (CE_CONT, "issue_mfi_smp: "
5522 "response_xferlen = %x, request_xferlen = %x",
5523 response_xferlen, request_xferlen));
5524
5525 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr;
5526 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr;
5527 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: "
5528 "response_ubuf = %p, request_ubuf = %p",
5529 response_ubuf, request_ubuf));
5530 } else {
5531 #ifdef _ILP32
5532 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_ILP32"));
5533
5534 sge32 = &ksmp->sgl[0].sge32[0];
5535 response_xferlen = sge32[0].length;
5536 request_xferlen = sge32[1].length;
5537 con_log(CL_ANN, (CE_CONT, "issue_mfi_smp: "
5538 "response_xferlen = %x, request_xferlen = %x",
5539 response_xferlen, request_xferlen));
5540
5541 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr;
5542 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr;
5543 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: "
5544 "response_ubuf = %p, request_ubuf = %p",
5545 response_ubuf, request_ubuf));
5546 #else
5547 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_LP64"));
5548
5549 sge64 = &ksmp->sgl[0].sge64[0];
5550 response_xferlen = sge64[0].length;
5551 request_xferlen = sge64[1].length;
5552
5553 response_ubuf = (void *)(ulong_t)sge64[0].phys_addr;
5554 request_ubuf = (void *)(ulong_t)sge64[1].phys_addr;
5555 #endif
5556 }
5557 if (request_xferlen) {
5558 /* means IOCTL requires DMA */
5559 /* allocate the data transfer buffer */
5560 //request_dma_obj.size = request_xferlen;
5561 MRSAS_GET_BOUNDARY_ALIGNED_LEN(request_xferlen,new_xfer_length1,PAGESIZE);
5562 request_dma_obj.size = new_xfer_length1;
5563 request_dma_obj.dma_attr = mrsas_generic_dma_attr;
5564 request_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5565 request_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5566 request_dma_obj.dma_attr.dma_attr_sgllen = 1;
5567 request_dma_obj.dma_attr.dma_attr_align = 1;
5568
5569 /* allocate kernel buffer for DMA */
5570 if (mrsas_alloc_dma_obj(instance, &request_dma_obj,
5571 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5572 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5573 "could not allocate data transfer buffer."));
5574 return (DDI_FAILURE);
5575 }
5576 (void) memset(request_dma_obj.buffer, 0, request_xferlen);
5577
5578 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5579 for (i = 0; i < request_xferlen; i++) {
5580 if (ddi_copyin((uint8_t *)request_ubuf + i,
5581 (uint8_t *)request_dma_obj.buffer + i,
5582 1, mode)) {
5583 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5584 "copy from user space failed"));
5585 return (DDI_FAILURE);
5586 }
5587 }
5588 }
5589
5590 if (response_xferlen) {
5591 /* means IOCTL requires DMA */
5592 /* allocate the data transfer buffer */
5593 //response_dma_obj.size = response_xferlen;
5594 MRSAS_GET_BOUNDARY_ALIGNED_LEN(response_xferlen,new_xfer_length2,PAGESIZE);
5595 response_dma_obj.size = new_xfer_length2;
5596 response_dma_obj.dma_attr = mrsas_generic_dma_attr;
5597 response_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5598 response_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5599 response_dma_obj.dma_attr.dma_attr_sgllen = 1;
5600 response_dma_obj.dma_attr.dma_attr_align = 1;
5601
5602 /* allocate kernel buffer for DMA */
5603 if (mrsas_alloc_dma_obj(instance, &response_dma_obj,
5604 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5605 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5606 "could not allocate data transfer buffer."));
5607 return (DDI_FAILURE);
5608 }
5609 (void) memset(response_dma_obj.buffer, 0, response_xferlen);
5610
5611 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5612 for (i = 0; i < response_xferlen; i++) {
5613 if (ddi_copyin((uint8_t *)response_ubuf + i,
5614 (uint8_t *)response_dma_obj.buffer + i,
5615 1, mode)) {
5616 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5617 "copy from user space failed"));
5618 return (DDI_FAILURE);
5619 }
5620 }
5621 }
5622
5623 ddi_put8(acc_handle, &smp->cmd, ksmp->cmd);
5624 ddi_put8(acc_handle, &smp->cmd_status, 0);
5625 ddi_put8(acc_handle, &smp->connection_status, 0);
5626 ddi_put8(acc_handle, &smp->sge_count, ksmp->sge_count);
5627 /* smp->context = ksmp->context; */
5628 ddi_put16(acc_handle, &smp->timeout, ksmp->timeout);
5629 ddi_put32(acc_handle, &smp->data_xfer_len, ksmp->data_xfer_len);
5630
5631 bcopy((void *)&ksmp->sas_addr, (void *)&tmp_sas_addr,
5632 sizeof (uint64_t));
5633 ddi_put64(acc_handle, &smp->sas_addr, tmp_sas_addr);
5634
5635 ddi_put16(acc_handle, &smp->flags, ksmp->flags & ~MFI_FRAME_SGL64);
5636
5637 model = ddi_model_convert_from(mode & FMODELS);
5638 if (model == DDI_MODEL_ILP32) {
5639 con_log(CL_ANN1, (CE_CONT,
5640 "issue_mfi_smp: DDI_MODEL_ILP32"));
5641
5642 sge32 = &smp->sgl[0].sge32[0];
5643 ddi_put32(acc_handle, &sge32[0].length, response_xferlen);
5644 ddi_put32(acc_handle, &sge32[0].phys_addr,
5645 response_dma_obj.dma_cookie[0].dmac_address);
5646 ddi_put32(acc_handle, &sge32[1].length, request_xferlen);
5647 ddi_put32(acc_handle, &sge32[1].phys_addr,
5648 request_dma_obj.dma_cookie[0].dmac_address);
5649 } else {
5650 #ifdef _ILP32
5651 con_log(CL_ANN1, (CE_CONT,
5652 "issue_mfi_smp: DDI_MODEL_ILP32"));
5653 sge32 = &smp->sgl[0].sge32[0];
5654 ddi_put32(acc_handle, &sge32[0].length, response_xferlen);
5655 ddi_put32(acc_handle, &sge32[0].phys_addr,
5656 response_dma_obj.dma_cookie[0].dmac_address);
5657 ddi_put32(acc_handle, &sge32[1].length, request_xferlen);
5658 ddi_put32(acc_handle, &sge32[1].phys_addr,
5659 request_dma_obj.dma_cookie[0].dmac_address);
5660 #else
5661 con_log(CL_ANN1, (CE_CONT,
5662 "issue_mfi_smp: DDI_MODEL_LP64"));
5663 sge64 = &smp->sgl[0].sge64[0];
5664 ddi_put32(acc_handle, &sge64[0].length, response_xferlen);
5665 ddi_put64(acc_handle, &sge64[0].phys_addr,
5666 response_dma_obj.dma_cookie[0].dmac_address);
5667 ddi_put32(acc_handle, &sge64[1].length, request_xferlen);
5668 ddi_put64(acc_handle, &sge64[1].phys_addr,
5669 request_dma_obj.dma_cookie[0].dmac_address);
5670 #endif
5671 }
5672 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp : "
5673 "smp->response_xferlen = %d, smp->request_xferlen = %d "
5674 "smp->data_xfer_len = %d", ddi_get32(acc_handle, &sge32[0].length),
5675 ddi_get32(acc_handle, &sge32[1].length),
5676 ddi_get32(acc_handle, &smp->data_xfer_len)));
5677
5678 cmd->sync_cmd = MRSAS_TRUE;
5679 cmd->frame_count = 1;
5680
5681 if (instance->tbolt) {
5682 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5683 }
5684
5685 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5686 con_log(CL_ANN, (CE_WARN,
5687 "issue_mfi_smp: fw_ioctl failed"));
5688 } else {
5689 con_log(CL_ANN1, (CE_CONT,
5690 "issue_mfi_smp: copy to user space"));
5691
5692 if (request_xferlen) {
5693 for (i = 0; i < request_xferlen; i++) {
5694 if (ddi_copyout(
5695 (uint8_t *)request_dma_obj.buffer +
5696 i, (uint8_t *)request_ubuf + i,
5697 1, mode)) {
5698 con_log(CL_ANN, (CE_WARN,
5699 "issue_mfi_smp : copy to user space"
5700 " failed"));
5701 return (DDI_FAILURE);
5702 }
5703 }
5704 }
5705
5706 if (response_xferlen) {
5707 for (i = 0; i < response_xferlen; i++) {
5708 if (ddi_copyout(
5709 (uint8_t *)response_dma_obj.buffer
5710 + i, (uint8_t *)response_ubuf
5711 + i, 1, mode)) {
5712 con_log(CL_ANN, (CE_WARN,
5713 "issue_mfi_smp : copy to "
5714 "user space failed"));
5715 return (DDI_FAILURE);
5716 }
5717 }
5718 }
5719 }
5720
5721 ksmp->cmd_status = ddi_get8(acc_handle, &smp->cmd_status);
5722 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: smp->cmd_status = %d",
5723 ksmp->cmd_status));
5724
5725 if (request_xferlen) {
5726 /* free kernel buffer */
5727 if (mrsas_free_dma_obj(instance, request_dma_obj) !=
5728 DDI_SUCCESS)
5729 return (DDI_FAILURE);
5730 }
5731
5732 if (response_xferlen) {
5733 /* free kernel buffer */
5734 if (mrsas_free_dma_obj(instance, response_dma_obj) !=
5735 DDI_SUCCESS)
5736 return (DDI_FAILURE);
5737 }
5738
5739 return (DDI_SUCCESS);
5740 }
5741
5742 /*
5743 * issue_mfi_stp
5744 */
5745 static int
5746 issue_mfi_stp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5747 struct mrsas_cmd *cmd, int mode)
5748 {
5749 void *fis_ubuf;
5750 void *data_ubuf;
5751 uint32_t fis_xferlen = 0;
5752 uint32_t new_xfer_length1 = 0;
5753 uint32_t new_xfer_length2 = 0;
5754 uint32_t data_xferlen = 0;
5755 uint_t model;
5756 dma_obj_t fis_dma_obj;
5757 dma_obj_t data_dma_obj;
5758 struct mrsas_stp_frame *kstp;
5759 struct mrsas_stp_frame *stp;
5760 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5761 int i;
5762
5763 stp = &cmd->frame->stp;
5764 kstp = (struct mrsas_stp_frame *)&ioctl->frame[0];
5765
5766 if (instance->adapterresetinprogress) {
5767 con_log(CL_ANN1, (CE_WARN, "Reset flag set, "
5768 "returning mfi_pkt and setting TRAN_BUSY\n"));
5769 return (DDI_FAILURE);
5770 }
5771 model = ddi_model_convert_from(mode & FMODELS);
5772 if (model == DDI_MODEL_ILP32) {
5773 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_ILP32"));
5774
5775 fis_xferlen = kstp->sgl.sge32[0].length;
5776 data_xferlen = kstp->sgl.sge32[1].length;
5777
5778 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
5779 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
5780 }
5781 else
5782 {
5783 #ifdef _ILP32
5784 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_ILP32"));
5785
5786 fis_xferlen = kstp->sgl.sge32[0].length;
5787 data_xferlen = kstp->sgl.sge32[1].length;
5788
5789 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
5790 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
5791 #else
5792 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_LP64"));
5793
5794 fis_xferlen = kstp->sgl.sge64[0].length;
5795 data_xferlen = kstp->sgl.sge64[1].length;
5796
5797 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge64[0].phys_addr;
5798 data_ubuf = (void *)(ulong_t)kstp->sgl.sge64[1].phys_addr;
5799 #endif
5800 }
5801
5802
5803 if (fis_xferlen) {
5804 con_log(CL_ANN, (CE_CONT, "issue_mfi_stp: "
5805 "fis_ubuf = %p fis_xferlen = %x", fis_ubuf, fis_xferlen));
5806
5807 /* means IOCTL requires DMA */
5808 /* allocate the data transfer buffer */
5809 //fis_dma_obj.size = fis_xferlen;
5810 MRSAS_GET_BOUNDARY_ALIGNED_LEN(fis_xferlen,new_xfer_length1,PAGESIZE);
5811 fis_dma_obj.size = new_xfer_length1;
5812 fis_dma_obj.dma_attr = mrsas_generic_dma_attr;
5813 fis_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5814 fis_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5815 fis_dma_obj.dma_attr.dma_attr_sgllen = 1;
5816 fis_dma_obj.dma_attr.dma_attr_align = 1;
5817
5818 /* allocate kernel buffer for DMA */
5819 if (mrsas_alloc_dma_obj(instance, &fis_dma_obj,
5820 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5821 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp : "
5822 "could not allocate data transfer buffer."));
5823 return (DDI_FAILURE);
5824 }
5825 (void) memset(fis_dma_obj.buffer, 0, fis_xferlen);
5826
5827 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5828 for (i = 0; i < fis_xferlen; i++) {
5829 if (ddi_copyin((uint8_t *)fis_ubuf + i,
5830 (uint8_t *)fis_dma_obj.buffer + i, 1, mode)) {
5831 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
5832 "copy from user space failed"));
5833 return (DDI_FAILURE);
5834 }
5835 }
5836 }
5837
5838 if (data_xferlen) {
5839 con_log(CL_ANN, (CE_CONT, "issue_mfi_stp: data_ubuf = %p "
5840 "data_xferlen = %x", data_ubuf, data_xferlen));
5841
5842 /* means IOCTL requires DMA */
5843 /* allocate the data transfer buffer */
5844 //data_dma_obj.size = data_xferlen;
5845 MRSAS_GET_BOUNDARY_ALIGNED_LEN(data_xferlen,new_xfer_length2,PAGESIZE);
5846 data_dma_obj.size = new_xfer_length2;
5847 data_dma_obj.dma_attr = mrsas_generic_dma_attr;
5848 data_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5849 data_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5850 data_dma_obj.dma_attr.dma_attr_sgllen = 1;
5851 data_dma_obj.dma_attr.dma_attr_align = 1;
5852
5853 /* allocate kernel buffer for DMA */
5854 if (mrsas_alloc_dma_obj(instance, &data_dma_obj,
5855 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5856 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
5857 "could not allocate data transfer buffer."));
5858 return (DDI_FAILURE);
5859 }
5860 (void) memset(data_dma_obj.buffer, 0, data_xferlen);
5861
5862 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5863 for (i = 0; i < data_xferlen; i++) {
5864 if (ddi_copyin((uint8_t *)data_ubuf + i,
5865 (uint8_t *)data_dma_obj.buffer + i, 1, mode)) {
5866 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
5867 "copy from user space failed"));
5868 return (DDI_FAILURE);
5869 }
5870 }
5871 }
5872
5873 ddi_put8(acc_handle, &stp->cmd, kstp->cmd);
5874 ddi_put8(acc_handle, &stp->cmd_status, 0);
5875 ddi_put8(acc_handle, &stp->connection_status, 0);
5876 ddi_put8(acc_handle, &stp->target_id, kstp->target_id);
5877 ddi_put8(acc_handle, &stp->sge_count, kstp->sge_count);
5878
5879 ddi_put16(acc_handle, &stp->timeout, kstp->timeout);
5880 ddi_put32(acc_handle, &stp->data_xfer_len, kstp->data_xfer_len);
5881
5882 ddi_rep_put8(acc_handle, (uint8_t *)kstp->fis, (uint8_t *)stp->fis, 10,
5883 DDI_DEV_AUTOINCR);
5884
5885 ddi_put16(acc_handle, &stp->flags, kstp->flags & ~MFI_FRAME_SGL64);
5886 ddi_put32(acc_handle, &stp->stp_flags, kstp->stp_flags);
5887 ddi_put32(acc_handle, &stp->sgl.sge32[0].length, fis_xferlen);
5888 ddi_put32(acc_handle, &stp->sgl.sge32[0].phys_addr,
5889 fis_dma_obj.dma_cookie[0].dmac_address);
5890 ddi_put32(acc_handle, &stp->sgl.sge32[1].length, data_xferlen);
5891 ddi_put32(acc_handle, &stp->sgl.sge32[1].phys_addr,
5892 data_dma_obj.dma_cookie[0].dmac_address);
5893
5894 cmd->sync_cmd = MRSAS_TRUE;
5895 cmd->frame_count = 1;
5896
5897 if (instance->tbolt) {
5898 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5899 }
5900
5901 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5902 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: fw_ioctl failed"));
5903 } else {
5904
5905 if (fis_xferlen) {
5906 for (i = 0; i < fis_xferlen; i++) {
5907 if (ddi_copyout(
5908 (uint8_t *)fis_dma_obj.buffer + i,
5909 (uint8_t *)fis_ubuf + i, 1, mode)) {
5910 con_log(CL_ANN, (CE_WARN,
5911 "issue_mfi_stp : copy to "
5912 "user space failed"));
5913 return (DDI_FAILURE);
5914 }
5915 }
5916 }
5917 }
5918 if (data_xferlen) {
5919 for (i = 0; i < data_xferlen; i++) {
5920 if (ddi_copyout(
5921 (uint8_t *)data_dma_obj.buffer + i,
5922 (uint8_t *)data_ubuf + i, 1, mode)) {
5923 con_log(CL_ANN, (CE_WARN,
5924 "issue_mfi_stp : copy to"
5925 " user space failed"));
5926 return (DDI_FAILURE);
5927 }
5928 }
5929 }
5930
5931 kstp->cmd_status = ddi_get8(acc_handle, &stp->cmd_status);
5932 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: stp->cmd_status = %d",
5933 kstp->cmd_status));
5934
5935 if (fis_xferlen) {
5936 /* free kernel buffer */
5937 if (mrsas_free_dma_obj(instance, fis_dma_obj) != DDI_SUCCESS)
5938 return (DDI_FAILURE);
5939 }
5940
5941 if (data_xferlen) {
5942 /* free kernel buffer */
5943 if (mrsas_free_dma_obj(instance, data_dma_obj) != DDI_SUCCESS)
5944 return (DDI_FAILURE);
5945 }
5946
5947 return (DDI_SUCCESS);
5948 }
5949
5950 /*
5951 * fill_up_drv_ver
5952 */
5953 void
5954 fill_up_drv_ver(struct mrsas_drv_ver *dv)
5955 {
5956 (void) memset(dv, 0, sizeof (struct mrsas_drv_ver));
5957
5958 (void) memcpy(dv->signature, "$LSI LOGIC$", strlen("$LSI LOGIC$"));
5959 (void) memcpy(dv->os_name, "Solaris", strlen("Solaris"));
5960 (void) memcpy(dv->drv_name, "mr_sas", strlen("mr_sas"));
5961 (void) memcpy(dv->drv_ver, MRSAS_VERSION, strlen(MRSAS_VERSION));
5962 (void) memcpy(dv->drv_rel_date, MRSAS_RELDATE,
5963 strlen(MRSAS_RELDATE));
5964
5965 }
5966
5967 /*
5968 * handle_drv_ioctl
5969 */
5970 static int
5971 handle_drv_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5972 int mode)
5973 {
5974 int i;
5975 int rval = DDI_SUCCESS;
5976 int *props = NULL;
5977 void *ubuf;
5978
5979 uint8_t *pci_conf_buf;
5980 uint32_t xferlen;
5981 uint32_t num_props;
5982 uint_t model;
5983 struct mrsas_dcmd_frame *kdcmd;
5984 struct mrsas_drv_ver dv;
5985 struct mrsas_pci_information pi;
5986
5987 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0];
5988
5989 model = ddi_model_convert_from(mode & FMODELS);
5990 if (model == DDI_MODEL_ILP32) {
5991 con_log(CL_ANN1, (CE_CONT,
5992 "handle_drv_ioctl: DDI_MODEL_ILP32"));
5993
5994 xferlen = kdcmd->sgl.sge32[0].length;
5995
5996 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
5997 } else {
5998 #ifdef _ILP32
5999 con_log(CL_ANN1, (CE_CONT,
6000 "handle_drv_ioctl: DDI_MODEL_ILP32"));
6001 xferlen = kdcmd->sgl.sge32[0].length;
6002 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
6003 #else
6004 con_log(CL_ANN1, (CE_CONT,
6005 "handle_drv_ioctl: DDI_MODEL_LP64"));
6006 xferlen = kdcmd->sgl.sge64[0].length;
6007 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
6008 #endif
6009 }
6010 con_log(CL_ANN1, (CE_CONT, "handle_drv_ioctl: "
6011 "dataBuf=%p size=%d bytes", ubuf, xferlen));
6012
6013 switch (kdcmd->opcode) {
6014 case MRSAS_DRIVER_IOCTL_DRIVER_VERSION:
6015 con_log(CL_ANN1, (CE_CONT, "handle_drv_ioctl: "
6016 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION"));
6017
6018 fill_up_drv_ver(&dv);
6019
6020 if (ddi_copyout(&dv, ubuf, xferlen, mode)) {
6021 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6022 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION : "
6023 "copy to user space failed"));
6024 kdcmd->cmd_status = 1;
6025 rval = 1;
6026 } else {
6027 kdcmd->cmd_status = 0;
6028 }
6029 break;
6030 case MRSAS_DRIVER_IOCTL_PCI_INFORMATION:
6031 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: "
6032 "MRSAS_DRIVER_IOCTL_PCI_INFORMAITON"));
6033
6034 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, instance->dip,
6035 0, "reg", &props, &num_props)) {
6036 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6037 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : "
6038 "ddi_prop_look_int_array failed"));
6039 rval = DDI_FAILURE;
6040 } else {
6041
6042 pi.busNumber = (props[0] >> 16) & 0xFF;
6043 pi.deviceNumber = (props[0] >> 11) & 0x1f;
6044 pi.functionNumber = (props[0] >> 8) & 0x7;
6045 ddi_prop_free((void *)props);
6046 }
6047
6048 pci_conf_buf = (uint8_t *)&pi.pciHeaderInfo;
6049
6050 for (i = 0; i < (sizeof (struct mrsas_pci_information) -
6051 offsetof(struct mrsas_pci_information, pciHeaderInfo));
6052 i++) {
6053 pci_conf_buf[i] =
6054 pci_config_get8(instance->pci_handle, i);
6055 }
6056
6057 if (ddi_copyout(&pi, ubuf, xferlen, mode)) {
6058 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6059 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : "
6060 "copy to user space failed"));
6061 kdcmd->cmd_status = 1;
6062 rval = 1;
6063 } else {
6064 kdcmd->cmd_status = 0;
6065 }
6066 break;
6067 default:
6068 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6069 "invalid driver specific IOCTL opcode = 0x%x",
6070 kdcmd->opcode));
6071 kdcmd->cmd_status = 1;
6072 rval = DDI_FAILURE;
6073 break;
6074 }
6075
6076 return (rval);
6077 }
6078
6079 /*
6080 * handle_mfi_ioctl
6081 */
6082 static int
6083 handle_mfi_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
6084 int mode)
6085 {
6086 int rval = DDI_SUCCESS;
6087
6088 struct mrsas_header *hdr;
6089 struct mrsas_cmd *cmd;
6090
6091 if (instance->tbolt) {
6092 cmd = get_raid_msg_mfi_pkt(instance);
6093 } else {
6094 cmd = get_mfi_pkt(instance);
6095 }
6096 if (!cmd) {
6097 cmn_err(CE_WARN,
6098 "Failed to get a cmd from free-pool in handle_mfi_ioctl(). "
6099 "fw_outstanding=0x%X max_fw_cmds=0x%X",
6100 instance->fw_outstanding, instance->max_fw_cmds);
6101 return (DDI_FAILURE);
6102 }
6103
6104 /* Clear the frame buffer and assign back the context id */
6105 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
6106 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
6107 cmd->index);
6108
6109 hdr = (struct mrsas_header *)&ioctl->frame[0];
6110
6111 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) {
6112 case MFI_CMD_OP_DCMD:
6113 rval = issue_mfi_dcmd(instance, ioctl, cmd, mode);
6114 break;
6115 case MFI_CMD_OP_SMP:
6116 rval = issue_mfi_smp(instance, ioctl, cmd, mode);
6117 break;
6118 case MFI_CMD_OP_STP:
6119 rval = issue_mfi_stp(instance, ioctl, cmd, mode);
6120 break;
6121 case MFI_CMD_OP_LD_SCSI:
6122 case MFI_CMD_OP_PD_SCSI:
6123 rval = issue_mfi_pthru(instance, ioctl, cmd, mode);
6124 break;
6125 default:
6126 con_log(CL_ANN, (CE_WARN, "handle_mfi_ioctl: "
6127 "invalid mfi ioctl hdr->cmd = %d", hdr->cmd));
6128 rval = DDI_FAILURE;
6129 break;
6130 }
6131
6132 if (instance->tbolt) {
6133 return_raid_msg_mfi_pkt(instance, cmd);
6134 } else {
6135 return_mfi_pkt(instance, cmd);
6136 }
6137
6138 return (rval);
6139 }
6140
6141 /*
6142 * AEN
6143 */
6144 static int
6145 handle_mfi_aen(struct mrsas_instance *instance, struct mrsas_aen *aen)
6146 {
6147 int rval = 0;
6148
6149 rval = register_mfi_aen(instance, instance->aen_seq_num,
6150 aen->class_locale_word);
6151
6152 aen->cmd_status = (uint8_t)rval;
6153
6154 return (rval);
6155 }
6156
6157 static int
6158 register_mfi_aen(struct mrsas_instance *instance, uint32_t seq_num,
6159 uint32_t class_locale_word)
6160 {
6161 int ret_val;
6162
6163 struct mrsas_cmd *cmd, *aen_cmd;
6164 struct mrsas_dcmd_frame *dcmd;
6165 union mrsas_evt_class_locale curr_aen;
6166 union mrsas_evt_class_locale prev_aen;
6167
6168 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
6169 /*
6170 * If there an AEN pending already (aen_cmd), check if the
6171 * class_locale of that pending AEN is inclusive of the new
6172 * AEN request we currently have. If it is, then we don't have
6173 * to do anything. In other words, whichever events the current
6174 * AEN request is subscribing to, have already been subscribed
6175 * to.
6176 *
6177 * If the old_cmd is _not_ inclusive, then we have to abort
6178 * that command, form a class_locale that is superset of both
6179 * old and current and re-issue to the FW
6180 */
6181
6182 curr_aen.word = LE_32(class_locale_word);
6183 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6184 aen_cmd = instance->aen_cmd;
6185 if (aen_cmd) {
6186 prev_aen.word = ddi_get32(aen_cmd->frame_dma_obj.acc_handle,
6187 &aen_cmd->frame->dcmd.mbox.w[1]);
6188 prev_aen.word = LE_32(prev_aen.word);
6189 prev_aen.members.locale = LE_16(prev_aen.members.locale);
6190 /*
6191 * A class whose enum value is smaller is inclusive of all
6192 * higher values. If a PROGRESS (= -1) was previously
6193 * registered, then a new registration requests for higher
6194 * classes need not be sent to FW. They are automatically
6195 * included.
6196 *
6197 * Locale numbers don't have such hierarchy. They are bitmap
6198 * values
6199 */
6200 if ((prev_aen.members.class <= curr_aen.members.class) &&
6201 !((prev_aen.members.locale & curr_aen.members.locale) ^
6202 curr_aen.members.locale)) {
6203 /*
6204 * Previously issued event registration includes
6205 * current request. Nothing to do.
6206 */
6207
6208 return (0);
6209 } else {
6210 curr_aen.members.locale |= prev_aen.members.locale;
6211
6212 if (prev_aen.members.class < curr_aen.members.class)
6213 curr_aen.members.class = prev_aen.members.class;
6214
6215 ret_val = abort_aen_cmd(instance, aen_cmd);
6216
6217 if (ret_val) {
6218 con_log(CL_ANN, (CE_WARN, "register_mfi_aen: "
6219 "failed to abort prevous AEN command"));
6220
6221 return (ret_val);
6222 }
6223 }
6224 } else {
6225 curr_aen.word = LE_32(class_locale_word);
6226 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6227 }
6228
6229 if (instance->tbolt) {
6230 cmd = get_raid_msg_mfi_pkt(instance);
6231 } else {
6232 cmd = get_mfi_pkt(instance);
6233 }
6234
6235 if (!cmd) {
6236 cmn_err(CE_WARN,
6237 "Failed to get a cmd from free-pool in register_mfi_aen(). "
6238 "fw_outstanding=0x%X max_fw_cmds=0x%X",
6239 instance->fw_outstanding, instance->max_fw_cmds);
6240 return (ENOMEM);
6241 }
6242
6243
6244 /* Clear the frame buffer and assign back the context id */
6245 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
6246 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
6247 cmd->index);
6248
6249 dcmd = &cmd->frame->dcmd;
6250
6251 /* for(i = 0; i < DCMD_MBOX_SZ; i++) dcmd->mbox.b[i] = 0; */
6252 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
6253
6254 (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
6255 sizeof (struct mrsas_evt_detail));
6256
6257 /* Prepare DCMD for aen registration */
6258 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
6259 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0);
6260 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
6261 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
6262 MFI_FRAME_DIR_READ);
6263 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
6264 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
6265 sizeof (struct mrsas_evt_detail));
6266 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
6267 MR_DCMD_CTRL_EVENT_WAIT);
6268 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], seq_num);
6269 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6270 curr_aen.word = LE_32(curr_aen.word);
6271 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[1],
6272 curr_aen.word);
6273 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
6274 instance->mfi_evt_detail_obj.dma_cookie[0].dmac_address);
6275 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
6276 sizeof (struct mrsas_evt_detail));
6277
6278 instance->aen_seq_num = seq_num;
6279
6280
6281 /*
6282 * Store reference to the cmd used to register for AEN. When an
6283 * application wants us to register for AEN, we have to abort this
6284 * cmd and re-register with a new EVENT LOCALE supplied by that app
6285 */
6286 instance->aen_cmd = cmd;
6287
6288 cmd->frame_count = 1;
6289
6290 /* Issue the aen registration frame */
6291 /* atomic_add_16 (&instance->fw_outstanding, 1); */
6292 if (instance->tbolt) {
6293 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
6294 }
6295 instance->func_ptr->issue_cmd(cmd, instance);
6296
6297 return (0);
6298 }
6299
6300 void
6301 display_scsi_inquiry(caddr_t scsi_inq)
6302 {
6303 #define MAX_SCSI_DEVICE_CODE 14
6304 int i;
6305 char inquiry_buf[256] = {0};
6306 int len;
6307 const char *const scsi_device_types[] = {
6308 "Direct-Access ",
6309 "Sequential-Access",
6310 "Printer ",
6311 "Processor ",
6312 "WORM ",
6313 "CD-ROM ",
6314 "Scanner ",
6315 "Optical Device ",
6316 "Medium Changer ",
6317 "Communications ",
6318 "Unknown ",
6319 "Unknown ",
6320 "Unknown ",
6321 "Enclosure ",
6322 };
6323
6324 len = 0;
6325
6326 len += snprintf(inquiry_buf + len, 265 - len, " Vendor: ");
6327 for (i = 8; i < 16; i++) {
6328 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6329 scsi_inq[i]);
6330 }
6331
6332 len += snprintf(inquiry_buf + len, 265 - len, " Model: ");
6333
6334 for (i = 16; i < 32; i++) {
6335 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6336 scsi_inq[i]);
6337 }
6338
6339 len += snprintf(inquiry_buf + len, 265 - len, " Rev: ");
6340
6341 for (i = 32; i < 36; i++) {
6342 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6343 scsi_inq[i]);
6344 }
6345
6346 len += snprintf(inquiry_buf + len, 265 - len, "\n");
6347
6348
6349 i = scsi_inq[0] & 0x1f;
6350
6351
6352 len += snprintf(inquiry_buf + len, 265 - len, " Type: %s ",
6353 i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] :
6354 "Unknown ");
6355
6356
6357 len += snprintf(inquiry_buf + len, 265 - len,
6358 " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07);
6359
6360 if ((scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1) {
6361 len += snprintf(inquiry_buf + len, 265 - len, " CCS\n");
6362 } else {
6363 len += snprintf(inquiry_buf + len, 265 - len, "\n");
6364 }
6365
6366 con_log(CL_DLEVEL2, (CE_CONT, inquiry_buf));
6367 }
6368
6369 void
6370 io_timeout_checker(void *arg)
6371 {
6372 struct scsi_pkt *pkt;
6373 struct mrsas_instance *instance = arg;
6374 struct mrsas_cmd *cmd = NULL;
6375 struct mrsas_header *hdr;
6376 int time = 0;
6377 int counter = 0;
6378 struct mlist_head *pos, *next;
6379 mlist_t process_list;
6380
6381 if (instance->adapterresetinprogress == 1) {
6382 con_log(CL_ANN, (CE_NOTE, "io_timeout_checker:"
6383 " reset in progress"));
6384
6385 instance->timeout_id = timeout(io_timeout_checker,
6386 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
6387 return;
6388 }
6389
6390 /* See if this check needs to be in the beginning or last in ISR */
6391 if (mrsas_initiate_ocr_if_fw_is_faulty(instance) == 1) {
6392 cmn_err(CE_WARN, "io_timeout_checker:"
6393 "FW Fault, calling reset adapter");
6394 cmn_err(CE_CONT, "io_timeout_checker: fw_outstanding 0x%X max_fw_cmds 0x%X",
6395 instance->fw_outstanding, instance->max_fw_cmds );
6396 if (instance->adapterresetinprogress == 0) {
6397 instance->adapterresetinprogress = 1;
6398 if (instance->tbolt)
6399 mrsas_tbolt_reset_ppc(instance);
6400 else
6401 mrsas_reset_ppc(instance);
6402 instance->adapterresetinprogress = 0;
6403 }
6404 instance->timeout_id = timeout(io_timeout_checker,
6405 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
6406 return;
6407 }
6408
6409 INIT_LIST_HEAD(&process_list);
6410
6411 mutex_enter(&instance->cmd_pend_mtx);
6412 mlist_for_each_safe(pos, next, &instance->cmd_pend_list) {
6413 cmd = mlist_entry(pos, struct mrsas_cmd, list);
6414
6415 if (cmd == NULL) {
6416 continue;
6417 }
6418
6419 if (cmd->sync_cmd == MRSAS_TRUE) {
6420 hdr = (struct mrsas_header *)&cmd->frame->hdr;
6421 if (hdr == NULL) {
6422 continue;
6423 }
6424 time = --cmd->drv_pkt_time;
6425 } else {
6426 pkt = cmd->pkt;
6427 if (pkt == NULL) {
6428 continue;
6429 }
6430 time = --cmd->drv_pkt_time;
6431 }
6432 if (time <= 0) {
6433 cmn_err(CE_WARN, "%llx: "
6434 "io_timeout_checker: TIMING OUT: pkt "
6435 ": %p, cmd %p fw_outstanding 0x%X max_fw_cmds 0x%X\n",
6436 gethrtime(), (void *)pkt, (void *)cmd, instance->fw_outstanding, instance->max_fw_cmds);
6437
6438 counter++;
6439 break;
6440 }
6441 }
6442 mutex_exit(&instance->cmd_pend_mtx);
6443
6444 if (counter) {
6445 if (instance->disable_online_ctrl_reset == 1) {
6446 cmn_err(CE_WARN, "mr_sas %d: %s(): OCR is NOT supported by Firmware, KILL adapter!!!",
6447 instance->instance, __func__);
6448
6449 if (instance->tbolt)
6450 (void) mrsas_tbolt_kill_adapter(instance);
6451 else
6452 (void) mrsas_kill_adapter(instance);
6453
6454 return;
6455 } else {
6456 if (cmd->retry_count_for_ocr <= IO_RETRY_COUNT) {
6457 if (instance->adapterresetinprogress == 0) {
6458 if (instance->tbolt)
6459 mrsas_tbolt_reset_ppc(instance);
6460 else
6461 mrsas_reset_ppc(instance);
6462 }
6463 } else {
6464 cmn_err(CE_WARN,
6465 "io_timeout_checker: "
6466 "cmd %p cmd->index %d "
6467 "timed out even after 3 resets: "
6468 "so KILL adapter", (void *)cmd, cmd->index);
6469
6470 mrsas_print_cmd_details(instance, cmd, 0xDD);
6471
6472 if (instance->tbolt)
6473 (void) mrsas_tbolt_kill_adapter(instance);
6474 else
6475 (void) mrsas_kill_adapter(instance);
6476 return;
6477 }
6478 }
6479 }
6480 con_log(CL_ANN, (CE_NOTE, "mrsas: "
6481 "schedule next timeout check: "
6482 "do timeout \n"));
6483 instance->timeout_id =
6484 timeout(io_timeout_checker, (void *)instance,
6485 drv_usectohz(MRSAS_1_SECOND));
6486 }
6487
6488 static uint32_t
6489 read_fw_status_reg_ppc(struct mrsas_instance *instance)
6490 {
6491 return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
6492 }
6493
6494 static void
6495 issue_cmd_ppc(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
6496 {
6497 struct scsi_pkt *pkt;
6498 atomic_add_16(&instance->fw_outstanding, 1);
6499
6500 pkt = cmd->pkt;
6501 if (pkt) {
6502 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:"
6503 "ISSUED CMD TO FW : called : cmd:"
6504 ": %p instance : %p pkt : %p pkt_time : %x\n",
6505 gethrtime(), (void *)cmd, (void *)instance,
6506 (void *)pkt, cmd->drv_pkt_time));
6507 if (instance->adapterresetinprogress) {
6508 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
6509 con_log(CL_ANN1, (CE_NOTE, "Reset the scsi_pkt timer"));
6510 } else {
6511 push_pending_mfi_pkt(instance, cmd);
6512 }
6513
6514 } else {
6515 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:"
6516 "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
6517 "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
6518 }
6519
6520 mutex_enter(&instance->reg_write_mtx);
6521 ASSERT(mutex_owned(&instance->reg_write_mtx));
6522 /* Issue the command to the FW */
6523 WR_IB_QPORT((cmd->frame_phys_addr) |
6524 (((cmd->frame_count - 1) << 1) | 1), instance);
6525 mutex_exit(&instance->reg_write_mtx);
6526
6527 }
6528
6529 /*
6530 * issue_cmd_in_sync_mode
6531 */
6532 static int
6533 issue_cmd_in_sync_mode_ppc(struct mrsas_instance *instance,
6534 struct mrsas_cmd *cmd)
6535 {
6536 int i;
6537 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * (10 * MILLISEC);
6538 struct mrsas_header *hdr = &cmd->frame->hdr;
6539
6540 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: called"));
6541
6542 if (instance->adapterresetinprogress) {
6543 cmd->drv_pkt_time = ddi_get16(
6544 cmd->frame_dma_obj.acc_handle, &hdr->timeout);
6545 if (cmd->drv_pkt_time < debug_timeout_g)
6546 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
6547
6548 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: "
6549 "issue and return in reset case\n"));
6550 WR_IB_QPORT((cmd->frame_phys_addr) |
6551 (((cmd->frame_count - 1) << 1) | 1), instance);
6552
6553 return (DDI_SUCCESS);
6554 } else {
6555 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: pushing the pkt\n"));
6556 push_pending_mfi_pkt(instance, cmd);
6557 }
6558
6559 cmd->cmd_status = ENODATA;
6560
6561 mutex_enter(&instance->reg_write_mtx);
6562 ASSERT(mutex_owned(&instance->reg_write_mtx));
6563 /* Issue the command to the FW */
6564 WR_IB_QPORT((cmd->frame_phys_addr) |
6565 (((cmd->frame_count - 1) << 1) | 1), instance);
6566 mutex_exit(&instance->reg_write_mtx);
6567
6568 mutex_enter(&instance->int_cmd_mtx);
6569 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
6570 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
6571 }
6572 mutex_exit(&instance->int_cmd_mtx);
6573
6574 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done"));
6575
6576 if (i < (msecs -1)) {
6577 return (DDI_SUCCESS);
6578 } else {
6579 return (DDI_FAILURE);
6580 }
6581 }
6582
6583 /*
6584 * issue_cmd_in_poll_mode
6585 */
6586 static int
6587 issue_cmd_in_poll_mode_ppc(struct mrsas_instance *instance,
6588 struct mrsas_cmd *cmd)
6589 {
6590 int i;
6591 uint16_t flags;
6592 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
6593 struct mrsas_header *frame_hdr;
6594
6595 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode_ppc: called"));
6596
6597 frame_hdr = (struct mrsas_header *)cmd->frame;
6598 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
6599 MFI_CMD_STATUS_POLL_MODE);
6600 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
6601 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
6602
6603 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
6604
6605 /* issue the frame using inbound queue port */
6606 WR_IB_QPORT((cmd->frame_phys_addr) |
6607 (((cmd->frame_count - 1) << 1) | 1), instance);
6608
6609 /* wait for cmd_status to change from 0xFF */
6610 for (i = 0; i < msecs && (
6611 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
6612 == MFI_CMD_STATUS_POLL_MODE); i++) {
6613 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
6614 }
6615
6616 if (ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
6617 == MFI_CMD_STATUS_POLL_MODE) {
6618 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: "
6619 "cmd polling timed out"));
6620 return (DDI_FAILURE);
6621 }
6622
6623 return (DDI_SUCCESS);
6624 }
6625
6626 static void
6627 enable_intr_ppc(struct mrsas_instance *instance)
6628 {
6629 uint32_t mask;
6630
6631 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: called"));
6632
6633 /* WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); */
6634 WR_OB_DOORBELL_CLEAR(OB_DOORBELL_CLEAR_MASK, instance);
6635
6636 /* WR_OB_INTR_MASK(~0x80000000, instance); */
6637 WR_OB_INTR_MASK(~(MFI_REPLY_2108_MESSAGE_INTR_MASK), instance);
6638
6639 /* dummy read to force PCI flush */
6640 mask = RD_OB_INTR_MASK(instance);
6641
6642 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: "
6643 "outbound_intr_mask = 0x%x", mask));
6644 }
6645
6646 static void
6647 disable_intr_ppc(struct mrsas_instance *instance)
6648 {
6649 uint32_t mask;
6650
6651 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: called"));
6652
6653 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: before : "
6654 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance)));
6655
6656 /* WR_OB_INTR_MASK(0xFFFFFFFF, instance); */
6657 WR_OB_INTR_MASK(OB_INTR_MASK, instance);
6658
6659 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: after : "
6660 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance)));
6661
6662 /* dummy read to force PCI flush */
6663 mask = RD_OB_INTR_MASK(instance);
6664 #ifdef lint
6665 mask = mask;
6666 #endif
6667 }
6668
6669 static int
6670 intr_ack_ppc(struct mrsas_instance *instance)
6671 {
6672 uint32_t status;
6673 int ret = DDI_INTR_CLAIMED;
6674
6675 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: called"));
6676
6677 /* check if it is our interrupt */
6678 status = RD_OB_INTR_STATUS(instance);
6679
6680 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: status = 0x%x", status));
6681
6682 if (!(status & MFI_REPLY_2108_MESSAGE_INTR)) {
6683 ret = DDI_INTR_UNCLAIMED;
6684 }
6685
6686 if (ret == DDI_INTR_UNCLAIMED) {
6687 return (ret);
6688 }
6689 /* clear the interrupt by writing back the same value */
6690 WR_OB_DOORBELL_CLEAR(status, instance);
6691
6692 /* dummy READ */
6693 status = RD_OB_INTR_STATUS(instance);
6694
6695 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: interrupt cleared"));
6696
6697 return (ret);
6698 }
6699
6700 /*
6701 * Marks HBA as bad. This will be called either when an
6702 * IO packet times out even after 3 FW resets
6703 * or FW is found to be fault even after 3 continuous resets.
6704 */
6705
6706 static int
6707 mrsas_kill_adapter(struct mrsas_instance *instance)
6708 {
6709 if (instance->deadadapter == 1)
6710 return (DDI_FAILURE);
6711
6712 con_log(CL_ANN1, (CE_NOTE, "mrsas_kill_adapter: "
6713 "Writing to doorbell with MFI_STOP_ADP "));
6714 mutex_enter(&instance->ocr_flags_mtx);
6715 instance->deadadapter = 1;
6716 mutex_exit(&instance->ocr_flags_mtx);
6717 instance->func_ptr->disable_intr(instance);
6718 WR_IB_DOORBELL(MFI_STOP_ADP, instance);
6719 (void) mrsas_complete_pending_cmds(instance);
6720 return (DDI_SUCCESS);
6721 }
6722
6723
6724 static int
6725 mrsas_reset_ppc(struct mrsas_instance *instance)
6726 {
6727 uint32_t status;
6728 uint32_t retry = 0;
6729 uint32_t cur_abs_reg_val;
6730 uint32_t fw_state;
6731
6732 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
6733
6734 if (instance->deadadapter == 1) {
6735 cmn_err(CE_WARN, "mrsas_reset_ppc: "
6736 "no more resets as HBA has been marked dead ");
6737 return (DDI_FAILURE);
6738 }
6739 mutex_enter(&instance->ocr_flags_mtx);
6740 instance->adapterresetinprogress = 1;
6741 mutex_exit(&instance->ocr_flags_mtx);
6742 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: adpterresetinprogress "
6743 "flag set, time %llx", gethrtime()));
6744
6745 instance->func_ptr->disable_intr(instance);
6746 retry_reset:
6747 WR_IB_WRITE_SEQ(0, instance);
6748 WR_IB_WRITE_SEQ(4, instance);
6749 WR_IB_WRITE_SEQ(0xb, instance);
6750 WR_IB_WRITE_SEQ(2, instance);
6751 WR_IB_WRITE_SEQ(7, instance);
6752 WR_IB_WRITE_SEQ(0xd, instance);
6753 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: magic number written "
6754 "to write sequence register\n"));
6755 delay(100 * drv_usectohz(MILLISEC));
6756 status = RD_OB_DRWE(instance);
6757
6758 while (!(status & DIAG_WRITE_ENABLE)) {
6759 delay(100 * drv_usectohz(MILLISEC));
6760 status = RD_OB_DRWE(instance);
6761 if (retry++ == 100) {
6762 cmn_err(CE_WARN, "mrsas_reset_ppc: DRWE bit "
6763 "check retry count %d\n", retry);
6764 return (DDI_FAILURE);
6765 }
6766 }
6767 WR_IB_DRWE(status | DIAG_RESET_ADAPTER, instance);
6768 delay(100 * drv_usectohz(MILLISEC));
6769 status = RD_OB_DRWE(instance);
6770 while (status & DIAG_RESET_ADAPTER) {
6771 delay(100 * drv_usectohz(MILLISEC));
6772 status = RD_OB_DRWE(instance);
6773 if (retry++ == 100) {
6774 cmn_err(CE_WARN,
6775 "mrsas_reset_ppc: RESET FAILED. KILL adapter called\n.");
6776
6777 (void) mrsas_kill_adapter(instance);
6778 return (DDI_FAILURE);
6779 }
6780 }
6781 con_log(CL_ANN, (CE_NOTE, "mrsas_reset_ppc: Adapter reset complete"));
6782 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
6783 "Calling mfi_state_transition_to_ready"));
6784
6785 /* Mark HBA as bad, if FW is fault after 3 continuous resets */
6786 if (mfi_state_transition_to_ready(instance) ||
6787 debug_fw_faults_after_ocr_g == 1) {
6788 cur_abs_reg_val =
6789 instance->func_ptr->read_fw_status_reg(instance);
6790 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
6791
6792 #ifdef OCRDEBUG
6793 con_log(CL_ANN1, (CE_NOTE,
6794 "mrsas_reset_ppc :before fake: FW is not ready "
6795 "FW state = 0x%x", fw_state));
6796 if (debug_fw_faults_after_ocr_g == 1)
6797 fw_state = MFI_STATE_FAULT;
6798 #endif
6799
6800 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc : FW is not ready "
6801 "FW state = 0x%x", fw_state));
6802
6803 if (fw_state == MFI_STATE_FAULT) {
6804 /* increment the count */
6805 instance->fw_fault_count_after_ocr++;
6806 if (instance->fw_fault_count_after_ocr
6807 < MAX_FW_RESET_COUNT) {
6808 cmn_err(CE_WARN, "mrsas_reset_ppc: "
6809 "FW is in fault after OCR count %d "
6810 "Retry Reset",
6811 instance->fw_fault_count_after_ocr);
6812 goto retry_reset;
6813
6814 } else {
6815 cmn_err(CE_WARN, "mrsas_reset_ppc: "
6816 "Max Reset Count exceeded >%d"
6817 "Mark HBA as bad, KILL adapter", MAX_FW_RESET_COUNT);
6818
6819 (void) mrsas_kill_adapter(instance);
6820 return (DDI_FAILURE);
6821 }
6822 }
6823 }
6824 /* reset the counter as FW is up after OCR */
6825 instance->fw_fault_count_after_ocr = 0;
6826
6827
6828 ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
6829 instance->producer, 0);
6830
6831 ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
6832 instance->consumer, 0);
6833
6834 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
6835 " after resetting produconsumer chck indexs:"
6836 "producer %x consumer %x", *instance->producer,
6837 *instance->consumer));
6838
6839 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
6840 "Calling mrsas_issue_init_mfi"));
6841 (void) mrsas_issue_init_mfi(instance);
6842 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
6843 "mrsas_issue_init_mfi Done"));
6844
6845 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
6846 "Calling mrsas_print_pending_cmd\n"));
6847 (void) mrsas_print_pending_cmds(instance);
6848 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
6849 "mrsas_print_pending_cmd done\n"));
6850
6851 instance->func_ptr->enable_intr(instance);
6852 instance->fw_outstanding = 0;
6853
6854 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
6855 "Calling mrsas_issue_pending_cmds"));
6856 (void) mrsas_issue_pending_cmds(instance);
6857 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
6858 "issue_pending_cmds done.\n"));
6859
6860 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
6861 "Calling aen registration"));
6862
6863
6864 instance->aen_cmd->retry_count_for_ocr = 0;
6865 instance->aen_cmd->drv_pkt_time = 0;
6866
6867 instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
6868 con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag.\n"));
6869
6870 mutex_enter(&instance->ocr_flags_mtx);
6871 instance->adapterresetinprogress = 0;
6872 mutex_exit(&instance->ocr_flags_mtx);
6873 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
6874 "adpterresetinprogress flag unset"));
6875
6876 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc done\n"));
6877 return (DDI_SUCCESS);
6878 }
6879
6880
6881 static int
6882 mrsas_add_intrs(struct mrsas_instance *instance, int intr_type)
6883 {
6884
6885 dev_info_t *dip = instance->dip;
6886 int avail, actual, count;
6887 int i, flag, ret;
6888
6889 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_type = %x",
6890 intr_type));
6891
6892 /* Get number of interrupts */
6893 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
6894 if ((ret != DDI_SUCCESS) || (count == 0)) {
6895 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_nintrs() failed:"
6896 "ret %d count %d", ret, count));
6897
6898 return (DDI_FAILURE);
6899 }
6900
6901 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: count = %d ", count));
6902
6903 /* Get number of available interrupts */
6904 ret = ddi_intr_get_navail(dip, intr_type, &avail);
6905 if ((ret != DDI_SUCCESS) || (avail == 0)) {
6906 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_navail() failed:"
6907 "ret %d avail %d", ret, avail));
6908
6909 return (DDI_FAILURE);
6910 }
6911 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: avail = %d ", avail));
6912
6913 /* Only one interrupt routine. So limit the count to 1 */
6914 if (count > 1) {
6915 count = 1;
6916 }
6917
6918 /*
6919 * Allocate an array of interrupt handlers. Currently we support
6920 * only one interrupt. The framework can be extended later.
6921 */
6922 instance->intr_htable_size = count * sizeof (ddi_intr_handle_t);
6923 instance->intr_htable = kmem_zalloc(instance->intr_htable_size, KM_SLEEP);
6924 if (instance->intr_htable == NULL) {
6925 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
6926 "failed to allocate memory for intr-handle table"));
6927 instance->intr_htable_size = 0;
6928 return (DDI_FAILURE);
6929 }
6930
6931 flag = ((intr_type == DDI_INTR_TYPE_MSI) || (intr_type ==
6932 DDI_INTR_TYPE_MSIX)) ? DDI_INTR_ALLOC_STRICT:DDI_INTR_ALLOC_NORMAL;
6933
6934 /* Allocate interrupt */
6935 ret = ddi_intr_alloc(dip, instance->intr_htable, intr_type, 0,
6936 count, &actual, flag);
6937
6938 if ((ret != DDI_SUCCESS) || (actual == 0)) {
6939 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
6940 "avail = %d", avail));
6941 goto mrsas_free_htable;
6942 }
6943
6944 if (actual < count) {
6945 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
6946 "Requested = %d Received = %d", count, actual));
6947 }
6948 instance->intr_cnt = actual;
6949
6950 /*
6951 * Get the priority of the interrupt allocated.
6952 */
6953 if ((ret = ddi_intr_get_pri(instance->intr_htable[0],
6954 &instance->intr_pri)) != DDI_SUCCESS) {
6955 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
6956 "get priority call failed"));
6957 goto mrsas_free_handles;
6958 }
6959
6960 /*
6961 * Test for high level mutex. we don't support them.
6962 */
6963 if (instance->intr_pri >= ddi_intr_get_hilevel_pri()) {
6964 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
6965 "High level interrupts not supported."));
6966 goto mrsas_free_handles;
6967 }
6968
6969 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_pri = 0x%x ",
6970 instance->intr_pri));
6971
6972 /* Call ddi_intr_add_handler() */
6973 for (i = 0; i < actual; i++) {
6974 ret = ddi_intr_add_handler(instance->intr_htable[i],
6975 (ddi_intr_handler_t *)mrsas_isr, (caddr_t)instance,
6976 (caddr_t)(uintptr_t)i);
6977
6978 if (ret != DDI_SUCCESS) {
6979 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs:"
6980 "failed %d", ret));
6981 goto mrsas_free_handles;
6982 }
6983
6984 }
6985
6986 con_log(CL_DLEVEL1, (CE_NOTE, " ddi_intr_add_handler done"));
6987
6988 if ((ret = ddi_intr_get_cap(instance->intr_htable[0],
6989 &instance->intr_cap)) != DDI_SUCCESS) {
6990 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_cap() failed %d",
6991 ret));
6992 goto mrsas_free_handlers;
6993 }
6994
6995 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) {
6996 con_log(CL_ANN, (CE_WARN, "Calling ddi_intr_block _enable"));
6997
6998 (void) ddi_intr_block_enable(instance->intr_htable,
6999 instance->intr_cnt);
7000 } else {
7001 con_log(CL_ANN, (CE_NOTE, " calling ddi_intr_enable"));
7002
7003 for (i = 0; i < instance->intr_cnt; i++) {
7004 (void) ddi_intr_enable(instance->intr_htable[i]);
7005 con_log(CL_ANN, (CE_NOTE, "ddi intr enable returns "
7006 "%d", i));
7007 }
7008 }
7009
7010 return (DDI_SUCCESS);
7011
7012 mrsas_free_handlers:
7013 for (i = 0; i < actual; i++)
7014 {
7015 (void) ddi_intr_remove_handler(instance->intr_htable[i]);
7016 }
7017
7018 mrsas_free_handles:
7019 for (i = 0; i < actual; i++)
7020 {
7021 (void) ddi_intr_free(instance->intr_htable[i]);
7022 }
7023
7024 mrsas_free_htable:
7025 if (instance->intr_htable != NULL)
7026 kmem_free(instance->intr_htable, instance->intr_htable_size);
7027
7028 instance->intr_htable =NULL;
7029 instance->intr_htable_size = 0;
7030
7031 return (DDI_FAILURE);
7032
7033 }
7034
7035
7036 static void
7037 mrsas_rem_intrs(struct mrsas_instance *instance)
7038 {
7039 int i;
7040
7041 con_log(CL_ANN, (CE_NOTE, "mrsas_rem_intrs called"));
7042
7043 /* Disable all interrupts first */
7044 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) {
7045 (void) ddi_intr_block_disable(instance->intr_htable,
7046 instance->intr_cnt);
7047 } else {
7048 for (i = 0; i < instance->intr_cnt; i++) {
7049 (void) ddi_intr_disable(instance->intr_htable[i]);
7050 }
7051 }
7052
7053 /* Remove all the handlers */
7054
7055 for (i = 0; i < instance->intr_cnt; i++) {
7056 (void) ddi_intr_remove_handler(instance->intr_htable[i]);
7057 (void) ddi_intr_free(instance->intr_htable[i]);
7058 }
7059
7060 if (instance->intr_htable != NULL)
7061 kmem_free(instance->intr_htable, instance->intr_htable_size);
7062
7063 instance->intr_htable =NULL;
7064 instance->intr_htable_size = 0;
7065
7066 }
7067
7068 static int
7069 mrsas_tran_bus_config(dev_info_t *parent, uint_t flags,
7070 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
7071 {
7072 struct mrsas_instance *instance;
7073 int config;
7074 int rval = NDI_SUCCESS;
7075
7076 char *ptr = NULL;
7077 int tgt, lun;
7078
7079 con_log(CL_ANN1, (CE_NOTE, "Bus config called for op = %x", op));
7080
7081 if ((instance = ddi_get_soft_state(mrsas_state,
7082 ddi_get_instance(parent))) == NULL) {
7083 return (NDI_FAILURE);
7084 }
7085
7086 /* Hold nexus during bus_config */
7087 ndi_devi_enter(parent, &config);
7088 switch (op) {
7089 case BUS_CONFIG_ONE: {
7090
7091 /* parse wwid/target name out of name given */
7092 if ((ptr = strchr((char *)arg, '@')) == NULL) {
7093 rval = NDI_FAILURE;
7094 break;
7095 }
7096 ptr++;
7097
7098 if (mrsas_parse_devname(arg, &tgt, &lun) != 0) {
7099 rval = NDI_FAILURE;
7100 break;
7101 }
7102
7103 if (lun == 0) {
7104 rval = mrsas_config_ld(instance, tgt, lun, childp);
7105 }
7106 #ifdef PDSUPPORT
7107 else if ( instance->tbolt == 1 && lun != 0) {
7108 rval = mrsas_tbolt_config_pd(instance,
7109 tgt, lun, childp);
7110 }
7111 #endif
7112 else {
7113 rval = NDI_FAILURE;
7114 }
7115
7116 break;
7117 }
7118 case BUS_CONFIG_DRIVER:
7119 case BUS_CONFIG_ALL: {
7120
7121 rval = mrsas_config_all_devices(instance);
7122
7123 rval = NDI_SUCCESS;
7124 break;
7125 }
7126 }
7127
7128 if (rval == NDI_SUCCESS) {
7129 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0);
7130
7131 }
7132 ndi_devi_exit(parent, config);
7133
7134 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_bus_config: rval = %x",
7135 rval));
7136 return (rval);
7137 }
7138
7139 static int
7140 mrsas_config_all_devices(struct mrsas_instance *instance)
7141 {
7142 int rval, tgt;
7143
7144 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) {
7145 (void) mrsas_config_ld(instance, tgt, 0, NULL);
7146
7147 }
7148
7149 #ifdef PDSUPPORT
7150 /* Config PD devices connected to the card */
7151 if(instance->tbolt) {
7152 for (tgt = 0; tgt < instance->mr_tbolt_pd_max; tgt++) {
7153 (void) mrsas_tbolt_config_pd(instance, tgt, 1, NULL);
7154 }
7155 }
7156 #endif
7157
7158 rval = NDI_SUCCESS;
7159 return (rval);
7160 }
7161
7162 static int
7163 mrsas_parse_devname(char *devnm, int *tgt, int *lun)
7164 {
7165 char devbuf[SCSI_MAXNAMELEN];
7166 char *addr;
7167 char *p, *tp, *lp;
7168 long num;
7169
7170 /* Parse dev name and address */
7171 (void) strcpy(devbuf, devnm);
7172 addr = "";
7173 for (p = devbuf; *p != '\0'; p++) {
7174 if (*p == '@') {
7175 addr = p + 1;
7176 *p = '\0';
7177 } else if (*p == ':') {
7178 *p = '\0';
7179 break;
7180 }
7181 }
7182
7183 /* Parse target and lun */
7184 for (p = tp = addr, lp = NULL; *p != '\0'; p++) {
7185 if (*p == ',') {
7186 lp = p + 1;
7187 *p = '\0';
7188 break;
7189 }
7190 }
7191 if (tgt && tp) {
7192 if (ddi_strtol(tp, NULL, 0x10, &num)) {
7193 return (DDI_FAILURE); /* Can declare this as constant */
7194 }
7195 *tgt = (int)num;
7196 }
7197 if (lun && lp) {
7198 if (ddi_strtol(lp, NULL, 0x10, &num)) {
7199 return (DDI_FAILURE);
7200 }
7201 *lun = (int)num;
7202 }
7203 return (DDI_SUCCESS); /* Success case */
7204 }
7205
7206 static int
7207 mrsas_config_ld(struct mrsas_instance *instance, uint16_t tgt,
7208 uint8_t lun, dev_info_t **ldip)
7209 {
7210 struct scsi_device *sd;
7211 dev_info_t *child;
7212 int rval;
7213
7214 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_config_ld: t = %d l = %d",
7215 tgt, lun));
7216
7217 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
7218 if (ldip) {
7219 *ldip = child;
7220 }
7221 if (instance->mr_ld_list[tgt].flag != MRDRV_TGT_VALID) {
7222 rval = mrsas_service_evt(instance, tgt, 0,
7223 MRSAS_EVT_UNCONFIG_TGT, NULL);
7224 con_log(CL_ANN1, (CE_WARN,
7225 "mr_sas: DELETING STALE ENTRY rval = %d "
7226 "tgt id = %d ", rval, tgt));
7227 return (NDI_FAILURE);
7228 }
7229 return (NDI_SUCCESS);
7230 }
7231
7232 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
7233 if (sd == NULL) {
7234 con_log(CL_ANN1, (CE_WARN,
7235 "mrsas_config_ld: failed to allocate mem for scsi_device"));
7236 return (NDI_FAILURE);
7237 }
7238 sd->sd_address.a_hba_tran = instance->tran;
7239 sd->sd_address.a_target = (uint16_t)tgt;
7240 sd->sd_address.a_lun = (uint8_t)lun;
7241
7242 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS)
7243 rval = mrsas_config_scsi_device(instance, sd, ldip);
7244 else
7245 rval = NDI_FAILURE;
7246
7247 /* sd_unprobe is blank now. Free buffer manually */
7248 if (sd->sd_inq) {
7249 kmem_free(sd->sd_inq, SUN_INQSIZE);
7250 sd->sd_inq = (struct scsi_inquiry *)NULL;
7251 }
7252
7253 kmem_free(sd, sizeof (struct scsi_device));
7254 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_config_ld: return rval = %d",
7255 rval));
7256 return (rval);
7257 }
7258
7259 int
7260 mrsas_config_scsi_device(struct mrsas_instance *instance,
7261 struct scsi_device *sd, dev_info_t **dipp)
7262 {
7263 char *nodename = NULL;
7264 char **compatible = NULL;
7265 int ncompatible = 0;
7266 char *childname;
7267 dev_info_t *ldip = NULL;
7268 int tgt = sd->sd_address.a_target;
7269 int lun = sd->sd_address.a_lun;
7270 int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
7271 int rval;
7272
7273 con_log(CL_DLEVEL1, (CE_NOTE, "mr_sas: scsi_device t%dL%d", tgt, lun));
7274 scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype,
7275 NULL, &nodename, &compatible, &ncompatible);
7276
7277 if (nodename == NULL) {
7278 con_log(CL_ANN1, (CE_WARN, "mr_sas: Found no compatible driver "
7279 "for t%dL%d", tgt, lun));
7280 rval = NDI_FAILURE;
7281 goto finish;
7282 }
7283
7284 childname = (dtype == DTYPE_DIRECT) ? "sd" : nodename;
7285 con_log(CL_DLEVEL1, (CE_NOTE,
7286 "mr_sas: Childname = %2s nodename = %s", childname, nodename));
7287
7288 /* Create a dev node */
7289 rval = ndi_devi_alloc(instance->dip, childname, DEVI_SID_NODEID, &ldip);
7290 con_log(CL_DLEVEL1, (CE_NOTE,
7291 "mr_sas_config_scsi_device: ndi_devi_alloc rval = %x", rval));
7292 if (rval == NDI_SUCCESS) {
7293 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) !=
7294 DDI_PROP_SUCCESS) {
7295 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7296 "property for t%dl%d target", tgt, lun));
7297 rval = NDI_FAILURE;
7298 goto finish;
7299 }
7300 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun) !=
7301 DDI_PROP_SUCCESS) {
7302 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7303 "property for t%dl%d lun", tgt, lun));
7304 rval = NDI_FAILURE;
7305 goto finish;
7306 }
7307
7308 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip,
7309 "compatible", compatible, ncompatible) !=
7310 DDI_PROP_SUCCESS) {
7311 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7312 "property for t%dl%d compatible", tgt, lun));
7313 rval = NDI_FAILURE;
7314 goto finish;
7315 }
7316
7317 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH);
7318 if (rval != NDI_SUCCESS) {
7319 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to online "
7320 "t%dl%d", tgt, lun));
7321 ndi_prop_remove_all(ldip);
7322 (void) ndi_devi_free(ldip);
7323 } else {
7324 con_log(CL_ANN1, (CE_CONT, "mr_sas: online Done :"
7325 "0 t%dl%d", tgt, lun));
7326 }
7327
7328 }
7329 finish:
7330 if (dipp) {
7331 *dipp = ldip;
7332 }
7333
7334 con_log(CL_DLEVEL1, (CE_NOTE,
7335 "mr_sas: config_scsi_device rval = %d t%dL%d",
7336 rval, tgt, lun));
7337 scsi_hba_nodename_compatible_free(nodename, compatible);
7338 return (rval);
7339 }
7340
7341 /*ARGSUSED*/
7342 int
7343 mrsas_service_evt(struct mrsas_instance *instance, int tgt, int lun, int event,
7344 uint64_t wwn)
7345 {
7346 struct mrsas_eventinfo *mrevt = NULL;
7347
7348 con_log(CL_ANN1, (CE_NOTE,
7349 "mrsas_service_evt called for t%dl%d event = %d",
7350 tgt, lun, event));
7351
7352 if ((instance->taskq == NULL) || (mrevt =
7353 kmem_zalloc(sizeof (struct mrsas_eventinfo), KM_NOSLEEP)) == NULL) {
7354 return (ENOMEM);
7355 }
7356
7357 mrevt->instance = instance;
7358 mrevt->tgt = tgt;
7359 mrevt->lun = lun;
7360 mrevt->event = event;
7361 mrevt->wwn = wwn;
7362
7363 if ((ddi_taskq_dispatch(instance->taskq,
7364 (void (*)(void *))mrsas_issue_evt_taskq, mrevt, DDI_NOSLEEP)) !=
7365 DDI_SUCCESS) {
7366 con_log(CL_ANN1, (CE_NOTE,
7367 "mr_sas: Event task failed for t%dl%d event = %d",
7368 tgt, lun, event));
7369 kmem_free(mrevt, sizeof (struct mrsas_eventinfo));
7370 return (DDI_FAILURE);
7371 }
7372
7373 return (DDI_SUCCESS);
7374 }
7375
7376 static void
7377 mrsas_issue_evt_taskq(struct mrsas_eventinfo *mrevt)
7378 {
7379 struct mrsas_instance *instance = mrevt->instance;
7380 dev_info_t *dip, *pdip;
7381 int circ1 = 0;
7382 char *devname;
7383
7384 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_evt_taskq: called for"
7385 " tgt %d lun %d event %d",
7386 mrevt->tgt, mrevt->lun, mrevt->event));
7387
7388 if (mrevt->tgt < MRDRV_MAX_LD && mrevt->lun == 0) {
7389 mutex_enter(&instance->config_dev_mtx);
7390 dip = instance->mr_ld_list[mrevt->tgt].dip;
7391 mutex_exit(&instance->config_dev_mtx);
7392 }
7393
7394 #ifdef PDSUPPORT
7395 else {
7396 mutex_enter(&instance->config_dev_mtx);
7397 dip = instance->mr_tbolt_pd_list[mrevt->tgt].dip;
7398 mutex_exit(&instance->config_dev_mtx);
7399 }
7400 #endif
7401
7402 ndi_devi_enter(instance->dip, &circ1);
7403 switch (mrevt->event) {
7404 case MRSAS_EVT_CONFIG_TGT:
7405 if (dip == NULL) {
7406
7407 if (mrevt->lun == 0) {
7408 (void) mrsas_config_ld(instance, mrevt->tgt,
7409 0, NULL);
7410 }
7411 #ifdef PDSUPPORT
7412 else if (instance->tbolt) {
7413 (void) mrsas_tbolt_config_pd(instance,
7414 mrevt->tgt,
7415 1, NULL);
7416 }
7417 #endif
7418 con_log(CL_ANN1, (CE_NOTE,
7419 "mr_sas: EVT_CONFIG_TGT called:"
7420 " for tgt %d lun %d event %d",
7421 mrevt->tgt, mrevt->lun, mrevt->event));
7422
7423 } else {
7424 con_log(CL_ANN1, (CE_NOTE,
7425 "mr_sas: EVT_CONFIG_TGT dip != NULL:"
7426 " for tgt %d lun %d event %d",
7427 mrevt->tgt, mrevt->lun, mrevt->event));
7428 }
7429 break;
7430 case MRSAS_EVT_UNCONFIG_TGT:
7431 if (dip) {
7432 if (i_ddi_devi_attached(dip)) {
7433
7434 pdip = ddi_get_parent(dip);
7435
7436 devname = kmem_zalloc(MAXNAMELEN + 1, KM_SLEEP);
7437 (void) ddi_deviname(dip, devname);
7438
7439 (void) devfs_clean(pdip, devname + 1,
7440 DV_CLEAN_FORCE);
7441 kmem_free(devname, MAXNAMELEN + 1);
7442 }
7443 (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE);
7444 con_log(CL_ANN1, (CE_NOTE,
7445 "mr_sas: EVT_UNCONFIG_TGT called:"
7446 " for tgt %d lun %d event %d",
7447 mrevt->tgt, mrevt->lun, mrevt->event));
7448 } else {
7449 con_log(CL_ANN1, (CE_NOTE,
7450 "mr_sas: EVT_UNCONFIG_TGT dip == NULL:"
7451 " for tgt %d lun %d event %d",
7452 mrevt->tgt, mrevt->lun, mrevt->event));
7453 }
7454 break;
7455 }
7456 kmem_free(mrevt, sizeof (struct mrsas_eventinfo));
7457 ndi_devi_exit(instance->dip, circ1);
7458 }
7459
7460
7461 int
7462 mrsas_mode_sense_build(struct scsi_pkt *pkt)
7463 {
7464 union scsi_cdb *cdbp;
7465 uint16_t page_code;
7466 struct scsa_cmd *acmd;
7467 struct buf *bp;
7468 struct mode_header *modehdrp;
7469
7470 cdbp = (void *)pkt->pkt_cdbp;
7471 page_code = cdbp->cdb_un.sg.scsi[0];
7472 acmd = PKT2CMD(pkt);
7473 bp = acmd->cmd_buf;
7474 if ((!bp) && bp->b_un.b_addr && bp->b_bcount && acmd->cmd_dmacount) {
7475 con_log(CL_ANN1, (CE_WARN, "Failing MODESENSE Command"));
7476 /* ADD pkt statistics as Command failed. */
7477 return (NULL);
7478 }
7479
7480 bp_mapin(bp);
7481 bzero(bp->b_un.b_addr, bp->b_bcount);
7482
7483 switch (page_code) {
7484 case 0x3: {
7485 struct mode_format *page3p = NULL;
7486 modehdrp = (struct mode_header *)(bp->b_un.b_addr);
7487 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH;
7488
7489 page3p = (void *)((caddr_t)modehdrp +
7490 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
7491 page3p->mode_page.code = 0x3;
7492 page3p->mode_page.length =
7493 (uchar_t)(sizeof (struct mode_format));
7494 page3p->data_bytes_sect = 512;
7495 page3p->sect_track = 63;
7496 break;
7497 }
7498 case 0x4: {
7499 struct mode_geometry *page4p = NULL;
7500 modehdrp = (struct mode_header *)(bp->b_un.b_addr);
7501 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH;
7502
7503 page4p = (void *)((caddr_t)modehdrp +
7504 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
7505 page4p->mode_page.code = 0x4;
7506 page4p->mode_page.length =
7507 (uchar_t)(sizeof (struct mode_geometry));
7508 page4p->heads = 255;
7509 page4p->rpm = 10000;
7510 break;
7511 }
7512 default:
7513 break;
7514 }
7515 return (NULL);
7516 }
7517