1 /*
2 * mr_sas.c: source for mr_sas driver
3 *
4 * Solaris MegaRAID device driver for SAS2.0 controllers
5 * Copyright (c) 2008-2012, LSI Logic Corporation.
6 * All rights reserved.
7 *
8 * Version:
9 * Author:
10 * Swaminathan K S
11 * Arun Chandrashekhar
12 * Manju R
13 * Rasheed
14 * Shakeel Bukhari
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions are met:
18 *
19 * 1. Redistributions of source code must retain the above copyright notice,
20 * this list of conditions and the following disclaimer.
21 *
22 * 2. Redistributions in binary form must reproduce the above copyright notice,
23 * this list of conditions and the following disclaimer in the documentation
24 * and/or other materials provided with the distribution.
25 *
26 * 3. Neither the name of the author nor the names of its contributors may be
27 * used to endorse or promote products derived from this software without
28 * specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
33 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
34 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
35 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
36 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
37 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
38 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
39 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
40 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
41 * DAMAGE.
42 */
43
44 /*
45 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
46 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
47 * Copyright 2012 Nexenta System, Inc. All rights reserved.
48 */
49
50 #include <sys/types.h>
51 #include <sys/param.h>
52 #include <sys/file.h>
53 #include <sys/errno.h>
54 #include <sys/open.h>
55 #include <sys/cred.h>
56 #include <sys/modctl.h>
57 #include <sys/conf.h>
58 #include <sys/devops.h>
59 #include <sys/cmn_err.h>
60 #include <sys/kmem.h>
61 #include <sys/stat.h>
62 #include <sys/mkdev.h>
63 #include <sys/pci.h>
64 #include <sys/scsi/scsi.h>
65 #include <sys/ddi.h>
66 #include <sys/sunddi.h>
67 #include <sys/atomic.h>
68 #include <sys/signal.h>
69 #include <sys/byteorder.h>
70 #include <sys/sdt.h>
71 #include <sys/fs/dv_node.h> /* devfs_clean */
72
73 #include "mr_sas.h"
74
75 /*
76 * FMA header files
77 */
78 #include <sys/ddifm.h>
79 #include <sys/fm/protocol.h>
80 #include <sys/fm/util.h>
81 #include <sys/fm/io/ddi.h>
82
83 /*
84 * Local static data
85 */
86 static void *mrsas_state = NULL;
87 static volatile boolean_t mrsas_relaxed_ordering = B_TRUE;
88 volatile int debug_level_g = CL_NONE;
89 static volatile int msi_enable = 1;
90 static volatile int ctio_enable = 1;
91
92 /* Default Timeout value to issue online controller reset */
93 volatile int debug_timeout_g = 0xF0; /* 0xB4; */
94 /* Simulate consecutive firmware fault */
95 static volatile int debug_fw_faults_after_ocr_g = 0;
96 #ifdef OCRDEBUG
97 /* Simulate three consecutive timeout for an IO */
98 static volatile int debug_consecutive_timeout_after_ocr_g = 0;
99 #endif
100
101 #if 0
102 /* Enable OCR on firmware fault */
103 static volatile int debug_support_ocr_isr_g = 0;
104 #endif
105 #pragma weak scsi_hba_open
106 #pragma weak scsi_hba_close
107 #pragma weak scsi_hba_ioctl
108
109 /* Local static prototypes. */
110 static int mrsas_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
111 static int mrsas_attach(dev_info_t *, ddi_attach_cmd_t);
112 #ifdef __sparc
113 static int mrsas_reset(dev_info_t *, ddi_reset_cmd_t);
114 #else
115 static int mrsas_quiesce(dev_info_t *);
116 #endif
117 static int mrsas_detach(dev_info_t *, ddi_detach_cmd_t);
118 static int mrsas_open(dev_t *, int, int, cred_t *);
119 static int mrsas_close(dev_t, int, int, cred_t *);
120 static int mrsas_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
121
122 static int mrsas_tran_tgt_init(dev_info_t *, dev_info_t *,
123 scsi_hba_tran_t *, struct scsi_device *);
124 static struct scsi_pkt *mrsas_tran_init_pkt(struct scsi_address *, register
125 struct scsi_pkt *, struct buf *, int, int, int, int,
126 int (*)(), caddr_t);
127 static int mrsas_tran_start(struct scsi_address *,
128 register struct scsi_pkt *);
129 static int mrsas_tran_abort(struct scsi_address *, struct scsi_pkt *);
130 static int mrsas_tran_reset(struct scsi_address *, int);
131 #if 0
132 static int mrsas_tran_bus_reset(dev_info_t *, int);
133 #endif
134 static int mrsas_tran_getcap(struct scsi_address *, char *, int);
135 static int mrsas_tran_setcap(struct scsi_address *, char *, int, int);
136 static void mrsas_tran_destroy_pkt(struct scsi_address *,
137 struct scsi_pkt *);
138 static void mrsas_tran_dmafree(struct scsi_address *, struct scsi_pkt *);
139 static void mrsas_tran_sync_pkt(struct scsi_address *, struct scsi_pkt *);
140 static int mrsas_tran_quiesce(dev_info_t *dip);
141 static int mrsas_tran_unquiesce(dev_info_t *dip);
142 static uint_t mrsas_isr();
143 static uint_t mrsas_softintr();
144 static void mrsas_undo_resources(dev_info_t *, struct mrsas_instance *);
145 static struct mrsas_cmd *get_mfi_pkt(struct mrsas_instance *);
146 static void return_mfi_pkt(struct mrsas_instance *,
147 struct mrsas_cmd *);
148
149 static void free_space_for_mfi(struct mrsas_instance *);
150 static uint32_t read_fw_status_reg_ppc(struct mrsas_instance *);
151 static void issue_cmd_ppc(struct mrsas_cmd *, struct mrsas_instance *);
152 static int issue_cmd_in_poll_mode_ppc(struct mrsas_instance *,
153 struct mrsas_cmd *);
154 static int issue_cmd_in_sync_mode_ppc(struct mrsas_instance *,
155 struct mrsas_cmd *);
156 static void enable_intr_ppc(struct mrsas_instance *);
157 static void disable_intr_ppc(struct mrsas_instance *);
158 static int intr_ack_ppc(struct mrsas_instance *);
159 static void flush_cache(struct mrsas_instance *instance);
160 void display_scsi_inquiry(caddr_t);
161 static int start_mfi_aen(struct mrsas_instance *instance);
162 static int handle_drv_ioctl(struct mrsas_instance *instance,
163 struct mrsas_ioctl *ioctl, int mode);
164 static int handle_mfi_ioctl(struct mrsas_instance *instance,
165 struct mrsas_ioctl *ioctl, int mode);
166 static int handle_mfi_aen(struct mrsas_instance *instance,
167 struct mrsas_aen *aen);
168 static struct mrsas_cmd *build_cmd(struct mrsas_instance *,
169 struct scsi_address *, struct scsi_pkt *, uchar_t *);
170 static int alloc_additional_dma_buffer(struct mrsas_instance *);
171 static void complete_cmd_in_sync_mode(struct mrsas_instance *,
172 struct mrsas_cmd *);
173 static int mrsas_kill_adapter(struct mrsas_instance *);
174 static int mrsas_issue_init_mfi(struct mrsas_instance *);
175 static int mrsas_reset_ppc(struct mrsas_instance *);
176 static uint32_t mrsas_initiate_ocr_if_fw_is_faulty(struct mrsas_instance *);
177 static int wait_for_outstanding(struct mrsas_instance *instance);
178 static int register_mfi_aen(struct mrsas_instance *instance,
179 uint32_t seq_num, uint32_t class_locale_word);
180 static int issue_mfi_pthru(struct mrsas_instance *instance, struct
181 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
182 static int issue_mfi_dcmd(struct mrsas_instance *instance, struct
183 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
184 static int issue_mfi_smp(struct mrsas_instance *instance, struct
185 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
186 static int issue_mfi_stp(struct mrsas_instance *instance, struct
187 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
188 static int abort_aen_cmd(struct mrsas_instance *instance,
189 struct mrsas_cmd *cmd_to_abort);
190
191 static void mrsas_rem_intrs(struct mrsas_instance *instance);
192 static int mrsas_add_intrs(struct mrsas_instance *instance, int intr_type);
193
194 static void mrsas_tran_tgt_free(dev_info_t *, dev_info_t *,
195 scsi_hba_tran_t *, struct scsi_device *);
196 static int mrsas_tran_bus_config(dev_info_t *, uint_t,
197 ddi_bus_config_op_t, void *, dev_info_t **);
198 static int mrsas_parse_devname(char *, int *, int *);
199 static int mrsas_config_all_devices(struct mrsas_instance *);
200 static int mrsas_config_ld(struct mrsas_instance *, uint16_t,
201 uint8_t, dev_info_t **);
202 static int mrsas_name_node(dev_info_t *, char *, int);
203 static void mrsas_issue_evt_taskq(struct mrsas_eventinfo *);
204 static void free_additional_dma_buffer(struct mrsas_instance *);
205 static void io_timeout_checker(void *);
206 static void mrsas_fm_init(struct mrsas_instance *);
207 static void mrsas_fm_fini(struct mrsas_instance *);
208
209 static struct mrsas_function_template mrsas_function_template_ppc = {
210 .read_fw_status_reg = read_fw_status_reg_ppc,
211 .issue_cmd = issue_cmd_ppc,
212 .issue_cmd_in_sync_mode = issue_cmd_in_sync_mode_ppc,
213 .issue_cmd_in_poll_mode = issue_cmd_in_poll_mode_ppc,
214 .enable_intr = enable_intr_ppc,
215 .disable_intr = disable_intr_ppc,
216 .intr_ack = intr_ack_ppc,
217 .init_adapter = mrsas_init_adapter_ppc
218 /* .reset_adapter = mrsas_reset_adapter_ppc */
219 };
220
221
222 static struct mrsas_function_template mrsas_function_template_fusion = {
223 .read_fw_status_reg = tbolt_read_fw_status_reg,
224 .issue_cmd = tbolt_issue_cmd,
225 .issue_cmd_in_sync_mode = tbolt_issue_cmd_in_sync_mode,
226 .issue_cmd_in_poll_mode = tbolt_issue_cmd_in_poll_mode,
227 .enable_intr = tbolt_enable_intr,
228 .disable_intr = tbolt_disable_intr,
229 .intr_ack = tbolt_intr_ack,
230 .init_adapter = mrsas_init_adapter_tbolt
231 /* .reset_adapter = mrsas_reset_adapter_tbolt */
232 };
233
234
235 ddi_dma_attr_t mrsas_generic_dma_attr = {
236 DMA_ATTR_V0, /* dma_attr_version */
237 0, /* low DMA address range */
238 0xFFFFFFFFU, /* high DMA address range */
239 0xFFFFFFFFU, /* DMA counter register */
240 8, /* DMA address alignment */
241 0x07, /* DMA burstsizes */
242 1, /* min DMA size */
243 0xFFFFFFFFU, /* max DMA size */
244 0xFFFFFFFFU, /* segment boundary */
245 MRSAS_MAX_SGE_CNT, /* dma_attr_sglen */
246 512, /* granularity of device */
247 0 /* bus specific DMA flags */
248 };
249
250 int32_t mrsas_max_cap_maxxfer = 0x1000000;
251
252 /*
253 * Fix for: Thunderbolt controller IO timeout when IO write size is 1MEG,
254 * Limit size to 256K
255 */
256 uint32_t mrsas_tbolt_max_cap_maxxfer = (512 * 512);
257
258 /*
259 * cb_ops contains base level routines
260 */
261 static struct cb_ops mrsas_cb_ops = {
262 mrsas_open, /* open */
263 mrsas_close, /* close */
264 nodev, /* strategy */
265 nodev, /* print */
266 nodev, /* dump */
267 nodev, /* read */
268 nodev, /* write */
269 mrsas_ioctl, /* ioctl */
270 nodev, /* devmap */
271 nodev, /* mmap */
272 nodev, /* segmap */
273 nochpoll, /* poll */
274 nodev, /* cb_prop_op */
275 0, /* streamtab */
276 D_NEW | D_HOTPLUG, /* cb_flag */
277 CB_REV, /* cb_rev */
278 nodev, /* cb_aread */
279 nodev /* cb_awrite */
280 };
281
282 /*
283 * dev_ops contains configuration routines
284 */
285 static struct dev_ops mrsas_ops = {
286 DEVO_REV, /* rev, */
287 0, /* refcnt */
288 mrsas_getinfo, /* getinfo */
289 nulldev, /* identify */
290 nulldev, /* probe */
291 mrsas_attach, /* attach */
292 mrsas_detach, /* detach */
293 #ifdef __sparc
294 mrsas_reset, /* reset */
295 #else /* __sparc */
296 nodev,
297 #endif /* __sparc */
298 &mrsas_cb_ops, /* char/block ops */
299 NULL, /* bus ops */
300 NULL, /* power */
301 #ifdef __sparc
302 ddi_quiesce_not_needed
303 #else /* __sparc */
304 mrsas_quiesce /* quiesce */
305 #endif /* __sparc */
306
307 };
308
309 static struct modldrv modldrv = {
310 &mod_driverops, /* module type - driver */
311 MRSAS_VERSION,
312 &mrsas_ops, /* driver ops */
313 };
314
315 static struct modlinkage modlinkage = {
316 MODREV_1, /* ml_rev - must be MODREV_1 */
317 &modldrv, /* ml_linkage */
318 NULL /* end of driver linkage */
319 };
320
321 static struct ddi_device_acc_attr endian_attr = {
322 DDI_DEVICE_ATTR_V1,
323 DDI_STRUCTURE_LE_ACC,
324 DDI_STRICTORDER_ACC,
325 DDI_DEFAULT_ACC
326 };
327
328
329 unsigned int enable_fp = 1;
330
331
332 /*
333 * ************************************************************************** *
334 * *
335 * common entry points - for loadable kernel modules *
336 * *
337 * ************************************************************************** *
338 */
339
340 /*
341 * _init - initialize a loadable module
342 * @void
343 *
344 * The driver should perform any one-time resource allocation or data
345 * initialization during driver loading in _init(). For example, the driver
346 * should initialize any mutexes global to the driver in this routine.
347 * The driver should not, however, use _init() to allocate or initialize
348 * anything that has to do with a particular instance of the device.
349 * Per-instance initialization must be done in attach().
350 */
351 int
352 _init(void)
353 {
354 int ret;
355
356 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
357
358 ret = ddi_soft_state_init(&mrsas_state,
359 sizeof (struct mrsas_instance), 0);
360
361 if (ret != DDI_SUCCESS) {
362 cmn_err(CE_WARN, "mr_sas: could not init state");
363 return (ret);
364 }
365
366 if ((ret = scsi_hba_init(&modlinkage)) != DDI_SUCCESS) {
367 cmn_err(CE_WARN, "mr_sas: could not init scsi hba");
368 ddi_soft_state_fini(&mrsas_state);
369 return (ret);
370 }
371
372 ret = mod_install(&modlinkage);
373
374 if (ret != DDI_SUCCESS) {
375 cmn_err(CE_WARN, "mr_sas: mod_install failed");
376 scsi_hba_fini(&modlinkage);
377 ddi_soft_state_fini(&mrsas_state);
378 }
379
380 return (ret);
381 }
382
383 /*
384 * _info - returns information about a loadable module.
385 * @void
386 *
387 * _info() is called to return module information. This is a typical entry
388 * point that does predefined role. It simply calls mod_info().
389 */
390 int
391 _info(struct modinfo *modinfop)
392 {
393 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
394
395 return (mod_info(&modlinkage, modinfop));
396 }
397
398 /*
399 * _fini - prepare a loadable module for unloading
400 * @void
401 *
402 * In _fini(), the driver should release any resources that were allocated in
403 * _init(). The driver must remove itself from the system module list.
404 */
405 int
406 _fini(void)
407 {
408 int ret;
409
410 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
411
412 if ((ret = mod_remove(&modlinkage)) != DDI_SUCCESS) {
413 con_log(CL_ANN1,
414 (CE_WARN, "_fini: mod_remove() failed, error 0x%X", ret));
415 return (ret);
416 }
417
418 scsi_hba_fini(&modlinkage);
419 con_log(CL_DLEVEL1, (CE_NOTE, "_fini: scsi_hba_fini() done."));
420
421 ddi_soft_state_fini(&mrsas_state);
422 con_log(CL_DLEVEL1, (CE_NOTE, "_fini: ddi_soft_state_fini() done."));
423
424 return (ret);
425 }
426
427
428 /*
429 * ************************************************************************** *
430 * *
431 * common entry points - for autoconfiguration *
432 * *
433 * ************************************************************************** *
434 */
435 /*
436 * attach - adds a device to the system as part of initialization
437 * @dip:
438 * @cmd:
439 *
440 * The kernel calls a driver's attach() entry point to attach an instance of
441 * a device (for MegaRAID, it is instance of a controller) or to resume
442 * operation for an instance of a device that has been suspended or has been
443 * shut down by the power management framework
444 * The attach() entry point typically includes the following types of
445 * processing:
446 * - allocate a soft-state structure for the device instance (for MegaRAID,
447 * controller instance)
448 * - initialize per-instance mutexes
449 * - initialize condition variables
450 * - register the device's interrupts (for MegaRAID, controller's interrupts)
451 * - map the registers and memory of the device instance (for MegaRAID,
452 * controller instance)
453 * - create minor device nodes for the device instance (for MegaRAID,
454 * controller instance)
455 * - report that the device instance (for MegaRAID, controller instance) has
456 * attached
457 */
458 static int
459 mrsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
460 {
461 int instance_no;
462 int nregs;
463 int i = 0;
464 uint8_t irq;
465 uint16_t vendor_id;
466 uint16_t device_id;
467 uint16_t subsysvid;
468 uint16_t subsysid;
469 uint16_t command;
470 off_t reglength = 0;
471 int intr_types = 0;
472 char *data;
473
474 scsi_hba_tran_t *tran;
475 ddi_dma_attr_t tran_dma_attr;
476 struct mrsas_instance *instance;
477
478 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
479
480 /* CONSTCOND */
481 ASSERT(NO_COMPETING_THREADS);
482
483 instance_no = ddi_get_instance(dip);
484
485 /*
486 * check to see whether this device is in a DMA-capable slot.
487 */
488 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
489 cmn_err(CE_WARN,
490 "mr_sas%d: Device in slave-only slot, unused",
491 instance_no);
492 return (DDI_FAILURE);
493 }
494
495 switch (cmd) {
496 case DDI_ATTACH:
497 /* allocate the soft state for the instance */
498 if (ddi_soft_state_zalloc(mrsas_state, instance_no)
499 != DDI_SUCCESS) {
500 cmn_err(CE_WARN,
501 "mr_sas%d: Failed to allocate soft state",
502 instance_no);
503 return (DDI_FAILURE);
504 }
505
506 instance = (struct mrsas_instance *)ddi_get_soft_state
507 (mrsas_state, instance_no);
508
509 if (instance == NULL) {
510 cmn_err(CE_WARN,
511 "mr_sas%d: Bad soft state", instance_no);
512 ddi_soft_state_free(mrsas_state, instance_no);
513 return (DDI_FAILURE);
514 }
515
516 bzero(instance, sizeof (struct mrsas_instance));
517
518 instance->unroll.softs = 1;
519
520 /* Setup the PCI configuration space handles */
521 if (pci_config_setup(dip, &instance->pci_handle) !=
522 DDI_SUCCESS) {
523 cmn_err(CE_WARN,
524 "mr_sas%d: pci config setup failed ",
525 instance_no);
526
527 ddi_soft_state_free(mrsas_state, instance_no);
528 return (DDI_FAILURE);
529 }
530 if (instance->pci_handle == NULL) {
531 cmn_err(CE_WARN,
532 "mr_sas%d: pci config setup failed ",
533 instance_no);
534 ddi_soft_state_free(mrsas_state, instance_no);
535 return (DDI_FAILURE);
536 }
537
538
539
540 if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) {
541 cmn_err(CE_WARN,
542 "mr_sas: failed to get registers.");
543
544 pci_config_teardown(&instance->pci_handle);
545 ddi_soft_state_free(mrsas_state, instance_no);
546 return (DDI_FAILURE);
547 }
548
549 vendor_id = pci_config_get16(instance->pci_handle,
550 PCI_CONF_VENID);
551 device_id = pci_config_get16(instance->pci_handle,
552 PCI_CONF_DEVID);
553
554 subsysvid = pci_config_get16(instance->pci_handle,
555 PCI_CONF_SUBVENID);
556 subsysid = pci_config_get16(instance->pci_handle,
557 PCI_CONF_SUBSYSID);
558
559 pci_config_put16(instance->pci_handle, PCI_CONF_COMM,
560 (pci_config_get16(instance->pci_handle,
561 PCI_CONF_COMM) | PCI_COMM_ME));
562 irq = pci_config_get8(instance->pci_handle,
563 PCI_CONF_ILINE);
564
565 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: "
566 "0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s",
567 instance_no, vendor_id, device_id, subsysvid,
568 subsysid, irq, MRSAS_VERSION));
569
570 /* enable bus-mastering */
571 command = pci_config_get16(instance->pci_handle,
572 PCI_CONF_COMM);
573
574 if (!(command & PCI_COMM_ME)) {
575 command |= PCI_COMM_ME;
576
577 pci_config_put16(instance->pci_handle,
578 PCI_CONF_COMM, command);
579
580 con_log(CL_ANN, (CE_CONT, "mr_sas%d: "
581 "enable bus-mastering", instance_no));
582 } else {
583 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: "
584 "bus-mastering already set", instance_no));
585 }
586
587 /* initialize function pointers */
588 switch (device_id) {
589 case PCI_DEVICE_ID_LSI_TBOLT:
590 case PCI_DEVICE_ID_LSI_INVADER:
591 con_log(CL_ANN, (CE_NOTE,
592 "mr_sas: 2208 T.B. device detected"));
593
594 instance->func_ptr =
595 &mrsas_function_template_fusion;
596 instance->tbolt = 1;
597 break;
598
599 case PCI_DEVICE_ID_LSI_2108VDE:
600 case PCI_DEVICE_ID_LSI_2108V:
601 con_log(CL_ANN, (CE_NOTE,
602 "mr_sas: 2108 Liberator device detected"));
603
604 instance->func_ptr =
605 &mrsas_function_template_ppc;
606 break;
607
608 default:
609 cmn_err(CE_WARN,
610 "mr_sas: Invalid device detected");
611
612 pci_config_teardown(&instance->pci_handle);
613 ddi_soft_state_free(mrsas_state, instance_no);
614 return (DDI_FAILURE);
615 }
616
617 instance->baseaddress = pci_config_get32(
618 instance->pci_handle, PCI_CONF_BASE0);
619 instance->baseaddress &= 0x0fffc;
620
621 instance->dip = dip;
622 instance->vendor_id = vendor_id;
623 instance->device_id = device_id;
624 instance->subsysvid = subsysvid;
625 instance->subsysid = subsysid;
626 instance->instance = instance_no;
627
628 /* Initialize FMA */
629 instance->fm_capabilities = ddi_prop_get_int(
630 DDI_DEV_T_ANY, instance->dip, DDI_PROP_DONTPASS,
631 "fm-capable", DDI_FM_EREPORT_CAPABLE |
632 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE
633 | DDI_FM_ERRCB_CAPABLE);
634
635 mrsas_fm_init(instance);
636
637 /* Setup register map */
638 if ((ddi_dev_regsize(instance->dip,
639 REGISTER_SET_IO_2108, ®length) != DDI_SUCCESS) ||
640 reglength < MINIMUM_MFI_MEM_SZ) {
641 goto fail_attach;
642 }
643 if (reglength > DEFAULT_MFI_MEM_SZ) {
644 reglength = DEFAULT_MFI_MEM_SZ;
645 con_log(CL_DLEVEL1, (CE_NOTE,
646 "mr_sas: register length to map is 0x%lx bytes",
647 reglength));
648 }
649 if (ddi_regs_map_setup(instance->dip,
650 REGISTER_SET_IO_2108, &instance->regmap, 0,
651 reglength, &endian_attr, &instance->regmap_handle)
652 != DDI_SUCCESS) {
653 cmn_err(CE_WARN,
654 "mr_sas: couldn't map control registers");
655 goto fail_attach;
656 }
657 if (instance->regmap_handle == NULL) {
658 cmn_err(CE_WARN,
659 "mr_sas: couldn't map control registers");
660 goto fail_attach;
661 }
662
663 instance->unroll.regs = 1;
664
665 /*
666 * Disable Interrupt Now.
667 * Setup Software interrupt
668 */
669 instance->func_ptr->disable_intr(instance);
670
671 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
672 "mrsas-enable-msi", &data) == DDI_SUCCESS) {
673 if (strncmp(data, "no", 3) == 0) {
674 msi_enable = 0;
675 con_log(CL_ANN1, (CE_WARN,
676 "msi_enable = %d disabled", msi_enable));
677 }
678 ddi_prop_free(data);
679 }
680
681 con_log(CL_DLEVEL1, (CE_NOTE, "msi_enable = %d", msi_enable));
682
683 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
684 "mrsas-enable-fp", &data) == DDI_SUCCESS) {
685 if (strncmp(data, "no", 3) == 0) {
686 enable_fp = 0;
687 cmn_err(CE_NOTE,
688 "enable_fp = %d, Fast-Path disabled.\n",
689 enable_fp);
690 }
691
692 ddi_prop_free(data);
693 }
694
695 cmn_err(CE_NOTE, "enable_fp = %d\n", enable_fp);
696
697 /* Check for all supported interrupt types */
698 if (ddi_intr_get_supported_types(
699 dip, &intr_types) != DDI_SUCCESS) {
700 cmn_err(CE_WARN,
701 "ddi_intr_get_supported_types() failed");
702 goto fail_attach;
703 }
704
705 con_log(CL_DLEVEL1, (CE_NOTE,
706 "ddi_intr_get_supported_types() ret: 0x%x", intr_types));
707
708 /* Initialize and Setup Interrupt handler */
709 if (msi_enable && (intr_types & DDI_INTR_TYPE_MSIX)) {
710 if (mrsas_add_intrs(instance, DDI_INTR_TYPE_MSIX) !=
711 DDI_SUCCESS) {
712 cmn_err(CE_WARN,
713 "MSIX interrupt query failed");
714 goto fail_attach;
715 }
716 instance->intr_type = DDI_INTR_TYPE_MSIX;
717 } else if (msi_enable && (intr_types & DDI_INTR_TYPE_MSI)) {
718 if (mrsas_add_intrs(instance, DDI_INTR_TYPE_MSI) !=
719 DDI_SUCCESS) {
720 cmn_err(CE_WARN,
721 "MSI interrupt query failed");
722 goto fail_attach;
723 }
724 instance->intr_type = DDI_INTR_TYPE_MSI;
725 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
726 msi_enable = 0;
727 if (mrsas_add_intrs(instance, DDI_INTR_TYPE_FIXED) !=
728 DDI_SUCCESS) {
729 cmn_err(CE_WARN,
730 "FIXED interrupt query failed");
731 goto fail_attach;
732 }
733 instance->intr_type = DDI_INTR_TYPE_FIXED;
734 } else {
735 cmn_err(CE_WARN, "Device cannot "
736 "suppport either FIXED or MSI/X "
737 "interrupts");
738 goto fail_attach;
739 }
740
741 instance->unroll.intr = 1;
742
743 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
744 "mrsas-enable-ctio", &data) == DDI_SUCCESS) {
745 if (strncmp(data, "no", 3) == 0) {
746 ctio_enable = 0;
747 con_log(CL_ANN1, (CE_WARN,
748 "ctio_enable = %d disabled", ctio_enable));
749 }
750 ddi_prop_free(data);
751 }
752
753 con_log(CL_DLEVEL1, (CE_WARN, "ctio_enable = %d", ctio_enable));
754
755 /* setup the mfi based low level driver */
756 if (mrsas_init_adapter(instance) != DDI_SUCCESS) {
757 cmn_err(CE_WARN, "mr_sas: "
758 "could not initialize the low level driver");
759
760 goto fail_attach;
761 }
762
763 /* Initialize all Mutex */
764 INIT_LIST_HEAD(&instance->completed_pool_list);
765 mutex_init(&instance->completed_pool_mtx,
766 "completed_pool_mtx", MUTEX_DRIVER,
767 DDI_INTR_PRI(instance->intr_pri));
768
769 mutex_init(&instance->sync_map_mtx,
770 "sync_map_mtx", MUTEX_DRIVER,
771 DDI_INTR_PRI(instance->intr_pri));
772
773 mutex_init(&instance->app_cmd_pool_mtx,
774 "app_cmd_pool_mtx", MUTEX_DRIVER,
775 DDI_INTR_PRI(instance->intr_pri));
776
777 mutex_init(&instance->config_dev_mtx, "config_dev_mtx",
778 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
779
780 mutex_init(&instance->cmd_pend_mtx, "cmd_pend_mtx",
781 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
782
783 mutex_init(&instance->ocr_flags_mtx, "ocr_flags_mtx",
784 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
785
786 mutex_init(&instance->int_cmd_mtx, "int_cmd_mtx",
787 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
788 cv_init(&instance->int_cmd_cv, NULL, CV_DRIVER, NULL);
789
790 mutex_init(&instance->cmd_pool_mtx, "cmd_pool_mtx",
791 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
792
793 mutex_init(&instance->reg_write_mtx, "reg_write_mtx",
794 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
795
796 if (instance->tbolt) {
797 mutex_init(&instance->cmd_app_pool_mtx,
798 "cmd_app_pool_mtx", MUTEX_DRIVER,
799 DDI_INTR_PRI(instance->intr_pri));
800
801 mutex_init(&instance->chip_mtx,
802 "chip_mtx", MUTEX_DRIVER,
803 DDI_INTR_PRI(instance->intr_pri));
804
805 }
806
807 instance->unroll.mutexs = 1;
808
809 instance->timeout_id = (timeout_id_t)-1;
810
811 /* Register our soft-isr for highlevel interrupts. */
812 instance->isr_level = instance->intr_pri;
813 if (!(instance->tbolt)) {
814 if (instance->isr_level == HIGH_LEVEL_INTR) {
815 if (ddi_add_softintr(dip,
816 DDI_SOFTINT_HIGH,
817 &instance->soft_intr_id, NULL, NULL,
818 mrsas_softintr, (caddr_t)instance) !=
819 DDI_SUCCESS) {
820 cmn_err(CE_WARN,
821 "Software ISR did not register");
822
823 goto fail_attach;
824 }
825
826 instance->unroll.soft_isr = 1;
827
828 }
829 }
830
831 instance->softint_running = 0;
832
833 /* Allocate a transport structure */
834 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
835
836 if (tran == NULL) {
837 cmn_err(CE_WARN,
838 "scsi_hba_tran_alloc failed");
839 goto fail_attach;
840 }
841
842 instance->tran = tran;
843 instance->unroll.tran = 1;
844
845 tran->tran_hba_private = instance;
846 tran->tran_tgt_init = mrsas_tran_tgt_init;
847 tran->tran_tgt_probe = scsi_hba_probe;
848 tran->tran_tgt_free = mrsas_tran_tgt_free;
849 if (instance->tbolt) {
850 tran->tran_init_pkt =
851 mrsas_tbolt_tran_init_pkt;
852 tran->tran_start =
853 mrsas_tbolt_tran_start;
854 } else {
855 tran->tran_init_pkt = mrsas_tran_init_pkt;
856 tran->tran_start = mrsas_tran_start;
857 }
858 tran->tran_abort = mrsas_tran_abort;
859 tran->tran_reset = mrsas_tran_reset;
860 tran->tran_getcap = mrsas_tran_getcap;
861 tran->tran_setcap = mrsas_tran_setcap;
862 tran->tran_destroy_pkt = mrsas_tran_destroy_pkt;
863 tran->tran_dmafree = mrsas_tran_dmafree;
864 tran->tran_sync_pkt = mrsas_tran_sync_pkt;
865 tran->tran_quiesce = mrsas_tran_quiesce;
866 tran->tran_unquiesce = mrsas_tran_unquiesce;
867 tran->tran_bus_config = mrsas_tran_bus_config;
868
869 if (mrsas_relaxed_ordering)
870 mrsas_generic_dma_attr.dma_attr_flags |=
871 DDI_DMA_RELAXED_ORDERING;
872
873
874 tran_dma_attr = mrsas_generic_dma_attr;
875 tran_dma_attr.dma_attr_sgllen = instance->max_num_sge;
876
877 /* Attach this instance of the hba */
878 if (scsi_hba_attach_setup(dip, &tran_dma_attr, tran, 0)
879 != DDI_SUCCESS) {
880 cmn_err(CE_WARN,
881 "scsi_hba_attach failed");
882
883 goto fail_attach;
884 }
885 instance->unroll.tranSetup = 1;
886 con_log(CL_ANN1,
887 (CE_CONT, "scsi_hba_attach_setup() done."));
888
889 /* create devctl node for cfgadm command */
890 if (ddi_create_minor_node(dip, "devctl",
891 S_IFCHR, INST2DEVCTL(instance_no),
892 DDI_NT_SCSI_NEXUS, 0) == DDI_FAILURE) {
893 cmn_err(CE_WARN,
894 "mr_sas: failed to create devctl node.");
895
896 goto fail_attach;
897 }
898
899 instance->unroll.devctl = 1;
900
901 /* create scsi node for cfgadm command */
902 if (ddi_create_minor_node(dip, "scsi", S_IFCHR,
903 INST2SCSI(instance_no), DDI_NT_SCSI_ATTACHMENT_POINT, 0) ==
904 DDI_FAILURE) {
905 cmn_err(CE_WARN,
906 "mr_sas: failed to create scsi node.");
907
908 goto fail_attach;
909 }
910
911 instance->unroll.scsictl = 1;
912
913 (void) sprintf(instance->iocnode, "%d:lsirdctl",
914 instance_no);
915
916 /*
917 * Create a node for applications
918 * for issuing ioctl to the driver.
919 */
920 if (ddi_create_minor_node(dip, instance->iocnode,
921 S_IFCHR, INST2LSIRDCTL(instance_no), DDI_PSEUDO, 0) ==
922 DDI_FAILURE) {
923 cmn_err(CE_WARN,
924 "mr_sas: failed to create ioctl node.");
925
926 goto fail_attach;
927 }
928
929 instance->unroll.ioctl = 1;
930
931 /* Create a taskq to handle dr events */
932 if ((instance->taskq = ddi_taskq_create(dip,
933 "mrsas_dr_taskq", 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
934 cmn_err(CE_WARN,
935 "mr_sas: failed to create taskq ");
936 instance->taskq = NULL;
937 goto fail_attach;
938 }
939 instance->unroll.taskq = 1;
940 con_log(CL_ANN1, (CE_CONT, "ddi_taskq_create() done."));
941
942 /* enable interrupt */
943 instance->func_ptr->enable_intr(instance);
944
945 /* initiate AEN */
946 if (start_mfi_aen(instance)) {
947 cmn_err(CE_WARN,
948 "mr_sas: failed to initiate AEN.");
949 goto fail_attach;
950 }
951 instance->unroll.aenPend = 1;
952 con_log(CL_ANN1,
953 (CE_CONT, "AEN started for instance %d.", instance_no));
954
955 /* Finally! We are on the air. */
956 ddi_report_dev(dip);
957
958 /* FMA handle checking. */
959 if (mrsas_check_acc_handle(instance->regmap_handle) !=
960 DDI_SUCCESS) {
961 goto fail_attach;
962 }
963 if (mrsas_check_acc_handle(instance->pci_handle) !=
964 DDI_SUCCESS) {
965 goto fail_attach;
966 }
967
968 instance->mr_ld_list =
969 kmem_zalloc(MRDRV_MAX_LD * sizeof (struct mrsas_ld),
970 KM_SLEEP);
971 if (instance->mr_ld_list == NULL) {
972 cmn_err(CE_WARN, "mr_sas attach(): "
973 "failed to allocate ld_list array");
974 goto fail_attach;
975 }
976 instance->unroll.ldlist_buff = 1;
977
978 #ifdef PDSUPPORT
979 if (instance->tbolt) {
980 instance->mr_tbolt_pd_max = MRSAS_TBOLT_PD_TGT_MAX;
981 instance->mr_tbolt_pd_list =
982 kmem_zalloc(MRSAS_TBOLT_GET_PD_MAX(instance) *
983 sizeof (struct mrsas_tbolt_pd), KM_SLEEP);
984 ASSERT(instance->mr_tbolt_pd_list);
985 for (i = 0; i < instance->mr_tbolt_pd_max; i++) {
986 instance->mr_tbolt_pd_list[i].lun_type =
987 MRSAS_TBOLT_PD_LUN;
988 instance->mr_tbolt_pd_list[i].dev_id =
989 (uint8_t)i;
990 }
991
992 instance->unroll.pdlist_buff = 1;
993 }
994 #endif
995 break;
996 case DDI_PM_RESUME:
997 con_log(CL_ANN, (CE_NOTE, "mr_sas: DDI_PM_RESUME"));
998 break;
999 case DDI_RESUME:
1000 con_log(CL_ANN, (CE_NOTE, "mr_sas: DDI_RESUME"));
1001 break;
1002 default:
1003 con_log(CL_ANN,
1004 (CE_WARN, "mr_sas: invalid attach cmd=%x", cmd));
1005 return (DDI_FAILURE);
1006 }
1007
1008
1009 cmn_err(CE_NOTE, "mrsas_attach() return SUCCESS instance_num %d",
1010 instance_no);
1011 return (DDI_SUCCESS);
1012
1013 fail_attach:
1014
1015 mrsas_undo_resources(dip, instance);
1016
1017 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
1018 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
1019
1020 mrsas_fm_fini(instance);
1021
1022 pci_config_teardown(&instance->pci_handle);
1023 ddi_soft_state_free(mrsas_state, instance_no);
1024
1025 con_log(CL_ANN, (CE_WARN, "mr_sas: return failure from mrsas_attach"));
1026
1027 cmn_err(CE_WARN, "mrsas_attach() return FAILURE instance_num %d",
1028 instance_no);
1029
1030 return (DDI_FAILURE);
1031 }
1032
1033 /*
1034 * getinfo - gets device information
1035 * @dip:
1036 * @cmd:
1037 * @arg:
1038 * @resultp:
1039 *
1040 * The system calls getinfo() to obtain configuration information that only
1041 * the driver knows. The mapping of minor numbers to device instance is
1042 * entirely under the control of the driver. The system sometimes needs to ask
1043 * the driver which device a particular dev_t represents.
1044 * Given the device number return the devinfo pointer from the scsi_device
1045 * structure.
1046 */
1047 /*ARGSUSED*/
1048 static int
1049 mrsas_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
1050 {
1051 int rval;
1052 int mrsas_minor = getminor((dev_t)arg);
1053
1054 struct mrsas_instance *instance;
1055
1056 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1057
1058 switch (cmd) {
1059 case DDI_INFO_DEVT2DEVINFO:
1060 instance = (struct mrsas_instance *)
1061 ddi_get_soft_state(mrsas_state,
1062 MINOR2INST(mrsas_minor));
1063
1064 if (instance == NULL) {
1065 *resultp = NULL;
1066 rval = DDI_FAILURE;
1067 } else {
1068 *resultp = instance->dip;
1069 rval = DDI_SUCCESS;
1070 }
1071 break;
1072 case DDI_INFO_DEVT2INSTANCE:
1073 *resultp = (void *)(intptr_t)
1074 (MINOR2INST(getminor((dev_t)arg)));
1075 rval = DDI_SUCCESS;
1076 break;
1077 default:
1078 *resultp = NULL;
1079 rval = DDI_FAILURE;
1080 }
1081
1082 return (rval);
1083 }
1084
1085 /*
1086 * detach - detaches a device from the system
1087 * @dip: pointer to the device's dev_info structure
1088 * @cmd: type of detach
1089 *
1090 * A driver's detach() entry point is called to detach an instance of a device
1091 * that is bound to the driver. The entry point is called with the instance of
1092 * the device node to be detached and with DDI_DETACH, which is specified as
1093 * the cmd argument to the entry point.
1094 * This routine is called during driver unload. We free all the allocated
1095 * resources and call the corresponding LLD so that it can also release all
1096 * its resources.
1097 */
1098 static int
1099 mrsas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1100 {
1101 int instance_no;
1102
1103 struct mrsas_instance *instance;
1104
1105 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1106
1107
1108 /* CONSTCOND */
1109 ASSERT(NO_COMPETING_THREADS);
1110
1111 instance_no = ddi_get_instance(dip);
1112
1113 instance = (struct mrsas_instance *)ddi_get_soft_state(mrsas_state,
1114 instance_no);
1115
1116 if (!instance) {
1117 cmn_err(CE_WARN,
1118 "mr_sas:%d could not get instance in detach",
1119 instance_no);
1120
1121 return (DDI_FAILURE);
1122 }
1123
1124 con_log(CL_ANN, (CE_NOTE,
1125 "mr_sas%d: detaching device 0x%4x:0x%4x:0x%4x:0x%4x",
1126 instance_no, instance->vendor_id, instance->device_id,
1127 instance->subsysvid, instance->subsysid));
1128
1129 switch (cmd) {
1130 case DDI_DETACH:
1131 con_log(CL_ANN, (CE_NOTE,
1132 "mrsas_detach: DDI_DETACH"));
1133
1134 mutex_enter(&instance->config_dev_mtx);
1135 if (instance->timeout_id != (timeout_id_t)-1) {
1136 mutex_exit(&instance->config_dev_mtx);
1137 (void) untimeout(instance->timeout_id);
1138 instance->timeout_id = (timeout_id_t)-1;
1139 mutex_enter(&instance->config_dev_mtx);
1140 instance->unroll.timer = 0;
1141 }
1142 mutex_exit(&instance->config_dev_mtx);
1143
1144 if (instance->unroll.tranSetup == 1) {
1145 if (scsi_hba_detach(dip) != DDI_SUCCESS) {
1146 cmn_err(CE_WARN,
1147 "mr_sas2%d: failed to detach",
1148 instance_no);
1149 return (DDI_FAILURE);
1150 }
1151 instance->unroll.tranSetup = 0;
1152 con_log(CL_ANN1,
1153 (CE_CONT, "scsi_hba_dettach() done."));
1154 }
1155
1156 flush_cache(instance);
1157
1158 mrsas_undo_resources(dip, instance);
1159
1160 mrsas_fm_fini(instance);
1161
1162 pci_config_teardown(&instance->pci_handle);
1163 ddi_soft_state_free(mrsas_state, instance_no);
1164 break;
1165
1166 case DDI_PM_SUSPEND:
1167 con_log(CL_ANN, (CE_NOTE,
1168 "mrsas_detach: DDI_PM_SUSPEND"));
1169
1170 break;
1171 case DDI_SUSPEND:
1172 con_log(CL_ANN, (CE_NOTE,
1173 "mrsas_detach: DDI_SUSPEND"));
1174
1175 break;
1176 default:
1177 con_log(CL_ANN, (CE_WARN,
1178 "invalid detach command:0x%x", cmd));
1179 return (DDI_FAILURE);
1180 }
1181
1182 return (DDI_SUCCESS);
1183 }
1184
1185
1186 static void
1187 mrsas_undo_resources(dev_info_t *dip, struct mrsas_instance *instance)
1188 {
1189 int instance_no;
1190
1191 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1192
1193
1194 instance_no = ddi_get_instance(dip);
1195
1196
1197 if (instance->unroll.ioctl == 1) {
1198 ddi_remove_minor_node(dip, instance->iocnode);
1199 instance->unroll.ioctl = 0;
1200 }
1201
1202 if (instance->unroll.scsictl == 1) {
1203 ddi_remove_minor_node(dip, "scsi");
1204 instance->unroll.scsictl = 0;
1205 }
1206
1207 if (instance->unroll.devctl == 1) {
1208 ddi_remove_minor_node(dip, "devctl");
1209 instance->unroll.devctl = 0;
1210 }
1211
1212 if (instance->unroll.tranSetup == 1) {
1213 if (scsi_hba_detach(dip) != DDI_SUCCESS) {
1214 cmn_err(CE_WARN,
1215 "mr_sas2%d: failed to detach", instance_no);
1216 return; /* DDI_FAILURE */
1217 }
1218 instance->unroll.tranSetup = 0;
1219 con_log(CL_ANN1, (CE_CONT, "scsi_hba_dettach() done."));
1220 }
1221
1222 if (instance->unroll.tran == 1) {
1223 scsi_hba_tran_free(instance->tran);
1224 instance->unroll.tran = 0;
1225 con_log(CL_ANN1, (CE_CONT, "scsi_hba_tran_free() done."));
1226 }
1227
1228 if (instance->unroll.syncCmd == 1) {
1229 if (instance->tbolt) {
1230 if (abort_syncmap_cmd(instance,
1231 instance->map_update_cmd)) {
1232 cmn_err(CE_WARN, "mrsas_detach: "
1233 "failed to abort previous syncmap command");
1234 }
1235
1236 instance->unroll.syncCmd = 0;
1237 con_log(CL_ANN1, (CE_CONT, "sync cmd aborted, done."));
1238 }
1239 }
1240
1241 if (instance->unroll.aenPend == 1) {
1242 if (abort_aen_cmd(instance, instance->aen_cmd))
1243 cmn_err(CE_WARN, "mrsas_detach: "
1244 "failed to abort prevous AEN command");
1245
1246 instance->unroll.aenPend = 0;
1247 con_log(CL_ANN1, (CE_CONT, "aen cmd aborted, done."));
1248 /* This means the controller is fully initialzed and running */
1249 /* Shutdown should be a last command to controller. */
1250 /* shutdown_controller(); */
1251 }
1252
1253
1254 if (instance->unroll.timer == 1) {
1255 if (instance->timeout_id != (timeout_id_t)-1) {
1256 (void) untimeout(instance->timeout_id);
1257 instance->timeout_id = (timeout_id_t)-1;
1258
1259 instance->unroll.timer = 0;
1260 }
1261 }
1262
1263 instance->func_ptr->disable_intr(instance);
1264
1265
1266 if (instance->unroll.mutexs == 1) {
1267 mutex_destroy(&instance->cmd_pool_mtx);
1268 mutex_destroy(&instance->app_cmd_pool_mtx);
1269 mutex_destroy(&instance->cmd_pend_mtx);
1270 mutex_destroy(&instance->completed_pool_mtx);
1271 mutex_destroy(&instance->sync_map_mtx);
1272 mutex_destroy(&instance->int_cmd_mtx);
1273 cv_destroy(&instance->int_cmd_cv);
1274 mutex_destroy(&instance->config_dev_mtx);
1275 mutex_destroy(&instance->ocr_flags_mtx);
1276 mutex_destroy(&instance->reg_write_mtx);
1277
1278 if (instance->tbolt) {
1279 mutex_destroy(&instance->cmd_app_pool_mtx);
1280 mutex_destroy(&instance->chip_mtx);
1281 }
1282
1283 instance->unroll.mutexs = 0;
1284 con_log(CL_ANN1, (CE_CONT, "Destroy mutex & cv, done."));
1285 }
1286
1287
1288 if (instance->unroll.soft_isr == 1) {
1289 ddi_remove_softintr(instance->soft_intr_id);
1290 instance->unroll.soft_isr = 0;
1291 }
1292
1293 if (instance->unroll.intr == 1) {
1294 mrsas_rem_intrs(instance);
1295 instance->unroll.intr = 0;
1296 }
1297
1298
1299 if (instance->unroll.taskq == 1) {
1300 if (instance->taskq) {
1301 ddi_taskq_destroy(instance->taskq);
1302 instance->unroll.taskq = 0;
1303 }
1304
1305 }
1306
1307 /*
1308 * free dma memory allocated for
1309 * cmds/frames/queues/driver version etc
1310 */
1311 if (instance->unroll.verBuff == 1) {
1312 (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1313 instance->unroll.verBuff = 0;
1314 }
1315
1316 if (instance->unroll.pdlist_buff == 1) {
1317 if (instance->mr_tbolt_pd_list != NULL) {
1318 kmem_free(instance->mr_tbolt_pd_list,
1319 MRSAS_TBOLT_GET_PD_MAX(instance) *
1320 sizeof (struct mrsas_tbolt_pd));
1321 }
1322
1323 instance->mr_tbolt_pd_list = NULL;
1324 instance->unroll.pdlist_buff = 0;
1325 }
1326
1327 if (instance->unroll.ldlist_buff == 1) {
1328 if (instance->mr_ld_list != NULL) {
1329 kmem_free(instance->mr_ld_list, MRDRV_MAX_LD
1330 * sizeof (struct mrsas_ld));
1331 }
1332
1333 instance->mr_ld_list = NULL;
1334 instance->unroll.ldlist_buff = 0;
1335 }
1336
1337 if (instance->tbolt) {
1338 if (instance->unroll.alloc_space_mpi2 == 1) {
1339 free_space_for_mpi2(instance);
1340 instance->unroll.alloc_space_mpi2 = 0;
1341 }
1342 } else {
1343 if (instance->unroll.alloc_space_mfi == 1) {
1344 free_space_for_mfi(instance);
1345 instance->unroll.alloc_space_mfi = 0;
1346 }
1347 }
1348
1349 if (instance->unroll.regs == 1) {
1350 ddi_regs_map_free(&instance->regmap_handle);
1351 instance->unroll.regs = 0;
1352 con_log(CL_ANN1, (CE_CONT, "ddi_regs_map_free() done."));
1353 }
1354 }
1355
1356
1357
1358 /*
1359 * ************************************************************************** *
1360 * *
1361 * common entry points - for character driver types *
1362 * *
1363 * ************************************************************************** *
1364 */
1365 /*
1366 * open - gets access to a device
1367 * @dev:
1368 * @openflags:
1369 * @otyp:
1370 * @credp:
1371 *
1372 * Access to a device by one or more application programs is controlled
1373 * through the open() and close() entry points. The primary function of
1374 * open() is to verify that the open request is allowed.
1375 */
1376 static int
1377 mrsas_open(dev_t *dev, int openflags, int otyp, cred_t *credp)
1378 {
1379 int rval = 0;
1380
1381 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1382
1383 /* Check root permissions */
1384 if (drv_priv(credp) != 0) {
1385 con_log(CL_ANN, (CE_WARN,
1386 "mr_sas: Non-root ioctl access denied!"));
1387 return (EPERM);
1388 }
1389
1390 /* Verify we are being opened as a character device */
1391 if (otyp != OTYP_CHR) {
1392 con_log(CL_ANN, (CE_WARN,
1393 "mr_sas: ioctl node must be a char node"));
1394 return (EINVAL);
1395 }
1396
1397 if (ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(*dev)))
1398 == NULL) {
1399 return (ENXIO);
1400 }
1401
1402 if (scsi_hba_open) {
1403 rval = scsi_hba_open(dev, openflags, otyp, credp);
1404 }
1405
1406 return (rval);
1407 }
1408
1409 /*
1410 * close - gives up access to a device
1411 * @dev:
1412 * @openflags:
1413 * @otyp:
1414 * @credp:
1415 *
1416 * close() should perform any cleanup necessary to finish using the minor
1417 * device, and prepare the device (and driver) to be opened again.
1418 */
1419 static int
1420 mrsas_close(dev_t dev, int openflags, int otyp, cred_t *credp)
1421 {
1422 int rval = 0;
1423
1424 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1425
1426 /* no need for locks! */
1427
1428 if (scsi_hba_close) {
1429 rval = scsi_hba_close(dev, openflags, otyp, credp);
1430 }
1431
1432 return (rval);
1433 }
1434
1435 /*
1436 * ioctl - performs a range of I/O commands for character drivers
1437 * @dev:
1438 * @cmd:
1439 * @arg:
1440 * @mode:
1441 * @credp:
1442 * @rvalp:
1443 *
1444 * ioctl() routine must make sure that user data is copied into or out of the
1445 * kernel address space explicitly using copyin(), copyout(), ddi_copyin(),
1446 * and ddi_copyout(), as appropriate.
1447 * This is a wrapper routine to serialize access to the actual ioctl routine.
1448 * ioctl() should return 0 on success, or the appropriate error number. The
1449 * driver may also set the value returned to the calling process through rvalp.
1450 */
1451
1452 static int
1453 mrsas_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
1454 int *rvalp)
1455 {
1456 int rval = 0;
1457
1458 struct mrsas_instance *instance;
1459 struct mrsas_ioctl *ioctl;
1460 struct mrsas_aen aen;
1461 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1462
1463 instance = ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(dev)));
1464
1465 if (instance == NULL) {
1466 /* invalid minor number */
1467 con_log(CL_ANN, (CE_WARN, "mr_sas: adapter not found."));
1468 return (ENXIO);
1469 }
1470
1471 ioctl = (struct mrsas_ioctl *)kmem_zalloc(sizeof (struct mrsas_ioctl),
1472 KM_SLEEP);
1473 if (ioctl == NULL) {
1474 /* Failed to allocate memory for ioctl */
1475 con_log(CL_ANN, (CE_WARN, "mr_sas_ioctl: "
1476 "failed to allocate memory for ioctl"));
1477 return (ENXIO);
1478 }
1479
1480 switch ((uint_t)cmd) {
1481 case MRSAS_IOCTL_FIRMWARE:
1482 if (ddi_copyin((void *)arg, ioctl,
1483 sizeof (struct mrsas_ioctl), mode)) {
1484 con_log(CL_ANN, (CE_WARN, "mrsas_ioctl: "
1485 "ERROR IOCTL copyin"));
1486 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1487 return (EFAULT);
1488 }
1489
1490 if (ioctl->control_code == MRSAS_DRIVER_IOCTL_COMMON) {
1491 rval = handle_drv_ioctl(instance, ioctl, mode);
1492 } else {
1493 rval = handle_mfi_ioctl(instance, ioctl, mode);
1494 }
1495
1496 if (ddi_copyout((void *)ioctl, (void *)arg,
1497 (sizeof (struct mrsas_ioctl) - 1), mode)) {
1498 con_log(CL_ANN, (CE_WARN,
1499 "mrsas_ioctl: copy_to_user failed"));
1500 rval = 1;
1501 }
1502
1503 break;
1504 case MRSAS_IOCTL_AEN:
1505 con_log(CL_ANN,
1506 (CE_NOTE, "mrsas_ioctl: IOCTL Register AEN.\n"));
1507
1508 if (ddi_copyin((void *) arg, &aen,
1509 sizeof (struct mrsas_aen), mode)) {
1510 con_log(CL_ANN, (CE_WARN,
1511 "mrsas_ioctl: ERROR AEN copyin"));
1512 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1513 return (EFAULT);
1514 }
1515
1516 rval = handle_mfi_aen(instance, &aen);
1517
1518 if (ddi_copyout((void *) &aen, (void *)arg,
1519 sizeof (struct mrsas_aen), mode)) {
1520 con_log(CL_ANN, (CE_WARN,
1521 "mrsas_ioctl: copy_to_user failed"));
1522 rval = 1;
1523 }
1524
1525 break;
1526 default:
1527 rval = scsi_hba_ioctl(dev, cmd, arg,
1528 mode, credp, rvalp);
1529
1530 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_ioctl: "
1531 "scsi_hba_ioctl called, ret = %x.", rval));
1532 }
1533
1534 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1535 return (rval);
1536 }
1537
1538 /*
1539 * ************************************************************************** *
1540 * *
1541 * common entry points - for block driver types *
1542 * *
1543 * ************************************************************************** *
1544 */
1545 #ifdef __sparc
1546 /*
1547 * reset - TBD
1548 * @dip:
1549 * @cmd:
1550 *
1551 * TBD
1552 */
1553 /*ARGSUSED*/
1554 static int
1555 mrsas_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
1556 {
1557 int instance_no;
1558
1559 struct mrsas_instance *instance;
1560
1561 instance_no = ddi_get_instance(dip);
1562 instance = (struct mrsas_instance *)ddi_get_soft_state
1563 (mrsas_state, instance_no);
1564
1565 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1566
1567 if (!instance) {
1568 con_log(CL_ANN, (CE_WARN, "mr_sas:%d could not get adapter "
1569 "in reset", instance_no));
1570 return (DDI_FAILURE);
1571 }
1572
1573 instance->func_ptr->disable_intr(instance);
1574
1575 con_log(CL_ANN1, (CE_CONT, "flushing cache for instance %d",
1576 instance_no));
1577
1578 flush_cache(instance);
1579
1580 return (DDI_SUCCESS);
1581 }
1582 #else /* __sparc */
1583 /*ARGSUSED*/
1584 static int
1585 mrsas_quiesce(dev_info_t *dip)
1586 {
1587 int instance_no;
1588
1589 struct mrsas_instance *instance;
1590
1591 instance_no = ddi_get_instance(dip);
1592 instance = (struct mrsas_instance *)ddi_get_soft_state
1593 (mrsas_state, instance_no);
1594
1595 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1596
1597 if (!instance) {
1598 con_log(CL_ANN1, (CE_WARN, "mr_sas:%d could not get adapter "
1599 "in quiesce", instance_no));
1600 return (DDI_FAILURE);
1601 }
1602 if (instance->deadadapter || instance->adapterresetinprogress) {
1603 con_log(CL_ANN1, (CE_WARN, "mr_sas:%d adapter is not in "
1604 "healthy state", instance_no));
1605 return (DDI_FAILURE);
1606 }
1607
1608 if (abort_aen_cmd(instance, instance->aen_cmd)) {
1609 con_log(CL_ANN1, (CE_WARN, "mrsas_quiesce: "
1610 "failed to abort prevous AEN command QUIESCE"));
1611 }
1612
1613 if (instance->tbolt) {
1614 if (abort_syncmap_cmd(instance,
1615 instance->map_update_cmd)) {
1616 cmn_err(CE_WARN,
1617 "mrsas_detach: failed to abort "
1618 "previous syncmap command");
1619 return (DDI_FAILURE);
1620 }
1621 }
1622
1623 instance->func_ptr->disable_intr(instance);
1624
1625 con_log(CL_ANN1, (CE_CONT, "flushing cache for instance %d",
1626 instance_no));
1627
1628 flush_cache(instance);
1629
1630 if (wait_for_outstanding(instance)) {
1631 con_log(CL_ANN1,
1632 (CE_CONT, "wait_for_outstanding: return FAIL.\n"));
1633 return (DDI_FAILURE);
1634 }
1635 return (DDI_SUCCESS);
1636 }
1637 #endif /* __sparc */
1638
1639 /*
1640 * ************************************************************************** *
1641 * *
1642 * entry points (SCSI HBA) *
1643 * *
1644 * ************************************************************************** *
1645 */
1646 /*
1647 * tran_tgt_init - initialize a target device instance
1648 * @hba_dip:
1649 * @tgt_dip:
1650 * @tran:
1651 * @sd:
1652 *
1653 * The tran_tgt_init() entry point enables the HBA to allocate and initialize
1654 * any per-target resources. tran_tgt_init() also enables the HBA to qualify
1655 * the device's address as valid and supportable for that particular HBA.
1656 * By returning DDI_FAILURE, the instance of the target driver for that device
1657 * is not probed or attached.
1658 */
1659 /*ARGSUSED*/
1660 static int
1661 mrsas_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1662 scsi_hba_tran_t *tran, struct scsi_device *sd)
1663 {
1664 struct mrsas_instance *instance;
1665 uint16_t tgt = sd->sd_address.a_target;
1666 uint8_t lun = sd->sd_address.a_lun;
1667 dev_info_t *child = NULL;
1668
1669 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init target %d lun %d",
1670 tgt, lun));
1671
1672 instance = ADDR2MR(&sd->sd_address);
1673
1674 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
1675 /*
1676 * If no persistent node exists, we don't allow .conf node
1677 * to be created.
1678 */
1679 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
1680 con_log(CL_DLEVEL2,
1681 (CE_NOTE, "mrsas_tgt_init find child ="
1682 " %p t = %d l = %d", (void *)child, tgt, lun));
1683 if (ndi_merge_node(tgt_dip, mrsas_name_node) !=
1684 DDI_SUCCESS)
1685 /* Create this .conf node */
1686 return (DDI_SUCCESS);
1687 }
1688 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init in ndi_per "
1689 "DDI_FAILURE t = %d l = %d", tgt, lun));
1690 return (DDI_FAILURE);
1691
1692 }
1693
1694 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init dev_dip %p tgt_dip %p",
1695 (void *)instance->mr_ld_list[tgt].dip, (void *)tgt_dip));
1696
1697 if (tgt < MRDRV_MAX_LD && lun == 0) {
1698 if (instance->mr_ld_list[tgt].dip == NULL &&
1699 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0) {
1700 mutex_enter(&instance->config_dev_mtx);
1701 instance->mr_ld_list[tgt].dip = tgt_dip;
1702 instance->mr_ld_list[tgt].lun_type = MRSAS_LD_LUN;
1703 instance->mr_ld_list[tgt].flag = MRDRV_TGT_VALID;
1704 mutex_exit(&instance->config_dev_mtx);
1705 }
1706 }
1707
1708 #ifdef PDSUPPORT
1709 else if (instance->tbolt) {
1710 if (instance->mr_tbolt_pd_list[tgt].dip == NULL) {
1711 mutex_enter(&instance->config_dev_mtx);
1712 instance->mr_tbolt_pd_list[tgt].dip = tgt_dip;
1713 instance->mr_tbolt_pd_list[tgt].flag =
1714 MRDRV_TGT_VALID;
1715 mutex_exit(&instance->config_dev_mtx);
1716 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_tgt_init:"
1717 "t%xl%x", tgt, lun));
1718 }
1719 }
1720 #endif
1721
1722 return (DDI_SUCCESS);
1723 }
1724
1725 /*ARGSUSED*/
1726 static void
1727 mrsas_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1728 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1729 {
1730 struct mrsas_instance *instance;
1731 int tgt = sd->sd_address.a_target;
1732 int lun = sd->sd_address.a_lun;
1733
1734 instance = ADDR2MR(&sd->sd_address);
1735
1736 con_log(CL_DLEVEL2, (CE_NOTE, "tgt_free t = %d l = %d", tgt, lun));
1737
1738 if (tgt < MRDRV_MAX_LD && lun == 0) {
1739 if (instance->mr_ld_list[tgt].dip == tgt_dip) {
1740 mutex_enter(&instance->config_dev_mtx);
1741 instance->mr_ld_list[tgt].dip = NULL;
1742 mutex_exit(&instance->config_dev_mtx);
1743 }
1744 }
1745
1746 #ifdef PDSUPPORT
1747 else if (instance->tbolt) {
1748 mutex_enter(&instance->config_dev_mtx);
1749 instance->mr_tbolt_pd_list[tgt].dip = NULL;
1750 mutex_exit(&instance->config_dev_mtx);
1751 con_log(CL_ANN1, (CE_NOTE, "tgt_free: Setting dip = NULL"
1752 "for tgt:%x", tgt));
1753 }
1754 #endif
1755
1756 }
1757
1758 dev_info_t *
1759 mrsas_find_child(struct mrsas_instance *instance, uint16_t tgt, uint8_t lun)
1760 {
1761 dev_info_t *child = NULL;
1762 char addr[SCSI_MAXNAMELEN];
1763 char tmp[MAXNAMELEN];
1764
1765 (void) sprintf(addr, "%x,%x", tgt, lun);
1766 for (child = ddi_get_child(instance->dip); child;
1767 child = ddi_get_next_sibling(child)) {
1768
1769 /* XXX KEBE ASKS - why was this added?! */
1770 if (ndi_dev_is_persistent_node(child) == 0) {
1771 continue;
1772 }
1773
1774 if (mrsas_name_node(child, tmp, MAXNAMELEN) !=
1775 DDI_SUCCESS) {
1776 continue;
1777 }
1778
1779 if (strcmp(addr, tmp) == 0) {
1780 break;
1781 }
1782 }
1783 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_find_child: return child = %p",
1784 (void *)child));
1785 return (child);
1786 }
1787
1788 /*
1789 * mrsas_name_node -
1790 * @dip:
1791 * @name:
1792 * @len:
1793 */
1794 static int
1795 mrsas_name_node(dev_info_t *dip, char *name, int len)
1796 {
1797 int tgt, lun;
1798
1799 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1800 DDI_PROP_DONTPASS, "target", -1);
1801 con_log(CL_DLEVEL2, (CE_NOTE,
1802 "mrsas_name_node: dip %p tgt %d", (void *)dip, tgt));
1803 if (tgt == -1) {
1804 return (DDI_FAILURE);
1805 }
1806 lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1807 "lun", -1);
1808 con_log(CL_DLEVEL2,
1809 (CE_NOTE, "mrsas_name_node: tgt %d lun %d", tgt, lun));
1810 if (lun == -1) {
1811 return (DDI_FAILURE);
1812 }
1813 (void) snprintf(name, len, "%x,%x", tgt, lun);
1814 return (DDI_SUCCESS);
1815 }
1816
1817 /*
1818 * tran_init_pkt - allocate & initialize a scsi_pkt structure
1819 * @ap:
1820 * @pkt:
1821 * @bp:
1822 * @cmdlen:
1823 * @statuslen:
1824 * @tgtlen:
1825 * @flags:
1826 * @callback:
1827 *
1828 * The tran_init_pkt() entry point allocates and initializes a scsi_pkt
1829 * structure and DMA resources for a target driver request. The
1830 * tran_init_pkt() entry point is called when the target driver calls the
1831 * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point
1832 * is a request to perform one or more of three possible services:
1833 * - allocation and initialization of a scsi_pkt structure
1834 * - allocation of DMA resources for data transfer
1835 * - reallocation of DMA resources for the next portion of the data transfer
1836 */
1837 static struct scsi_pkt *
1838 mrsas_tran_init_pkt(struct scsi_address *ap, register struct scsi_pkt *pkt,
1839 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1840 int flags, int (*callback)(), caddr_t arg)
1841 {
1842 struct scsa_cmd *acmd;
1843 struct mrsas_instance *instance;
1844 struct scsi_pkt *new_pkt;
1845
1846 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1847
1848 instance = ADDR2MR(ap);
1849
1850 /* step #1 : pkt allocation */
1851 if (pkt == NULL) {
1852 pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen,
1853 tgtlen, sizeof (struct scsa_cmd), callback, arg);
1854 if (pkt == NULL) {
1855 return (NULL);
1856 }
1857
1858 acmd = PKT2CMD(pkt);
1859
1860 /*
1861 * Initialize the new pkt - we redundantly initialize
1862 * all the fields for illustrative purposes.
1863 */
1864 acmd->cmd_pkt = pkt;
1865 acmd->cmd_flags = 0;
1866 acmd->cmd_scblen = statuslen;
1867 acmd->cmd_cdblen = cmdlen;
1868 acmd->cmd_dmahandle = NULL;
1869 acmd->cmd_ncookies = 0;
1870 acmd->cmd_cookie = 0;
1871 acmd->cmd_cookiecnt = 0;
1872 acmd->cmd_nwin = 0;
1873
1874 pkt->pkt_address = *ap;
1875 pkt->pkt_comp = (void (*)())NULL;
1876 pkt->pkt_flags = 0;
1877 pkt->pkt_time = 0;
1878 pkt->pkt_resid = 0;
1879 pkt->pkt_state = 0;
1880 pkt->pkt_statistics = 0;
1881 pkt->pkt_reason = 0;
1882 new_pkt = pkt;
1883 } else {
1884 acmd = PKT2CMD(pkt);
1885 new_pkt = NULL;
1886 }
1887
1888 /* step #2 : dma allocation/move */
1889 if (bp && bp->b_bcount != 0) {
1890 if (acmd->cmd_dmahandle == NULL) {
1891 if (mrsas_dma_alloc(instance, pkt, bp, flags,
1892 callback) == DDI_FAILURE) {
1893 if (new_pkt) {
1894 scsi_hba_pkt_free(ap, new_pkt);
1895 }
1896 return ((struct scsi_pkt *)NULL);
1897 }
1898 } else {
1899 if (mrsas_dma_move(instance, pkt, bp) == DDI_FAILURE) {
1900 return ((struct scsi_pkt *)NULL);
1901 }
1902 }
1903 }
1904
1905 return (pkt);
1906 }
1907
1908 /*
1909 * tran_start - transport a SCSI command to the addressed target
1910 * @ap:
1911 * @pkt:
1912 *
1913 * The tran_start() entry point for a SCSI HBA driver is called to transport a
1914 * SCSI command to the addressed target. The SCSI command is described
1915 * entirely within the scsi_pkt structure, which the target driver allocated
1916 * through the HBA driver's tran_init_pkt() entry point. If the command
1917 * involves a data transfer, DMA resources must also have been allocated for
1918 * the scsi_pkt structure.
1919 *
1920 * Return Values :
1921 * TRAN_BUSY - request queue is full, no more free scbs
1922 * TRAN_ACCEPT - pkt has been submitted to the instance
1923 */
1924 static int
1925 mrsas_tran_start(struct scsi_address *ap, register struct scsi_pkt *pkt)
1926 {
1927 uchar_t cmd_done = 0;
1928
1929 struct mrsas_instance *instance = ADDR2MR(ap);
1930 struct mrsas_cmd *cmd;
1931
1932 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1933 if (instance->deadadapter == 1) {
1934 con_log(CL_ANN1, (CE_WARN,
1935 "mrsas_tran_start: return TRAN_FATAL_ERROR "
1936 "for IO, as the HBA doesnt take any more IOs"));
1937 if (pkt) {
1938 pkt->pkt_reason = CMD_DEV_GONE;
1939 pkt->pkt_statistics = STAT_DISCON;
1940 }
1941 return (TRAN_FATAL_ERROR);
1942 }
1943
1944 if (instance->adapterresetinprogress) {
1945 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_start: Reset flag set, "
1946 "returning mfi_pkt and setting TRAN_BUSY\n"));
1947 return (TRAN_BUSY);
1948 }
1949
1950 con_log(CL_ANN1, (CE_CONT, "chkpnt:%s:%d:SCSI CDB[0]=0x%x time:%x",
1951 __func__, __LINE__, pkt->pkt_cdbp[0], pkt->pkt_time));
1952
1953 pkt->pkt_reason = CMD_CMPLT;
1954 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */
1955
1956 cmd = build_cmd(instance, ap, pkt, &cmd_done);
1957
1958 /*
1959 * Check if the command is already completed by the mrsas_build_cmd()
1960 * routine. In which case the busy_flag would be clear and scb will be
1961 * NULL and appropriate reason provided in pkt_reason field
1962 */
1963 if (cmd_done) {
1964 pkt->pkt_reason = CMD_CMPLT;
1965 pkt->pkt_scbp[0] = STATUS_GOOD;
1966 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1967 | STATE_SENT_CMD;
1968 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1969 (*pkt->pkt_comp)(pkt);
1970 }
1971
1972 return (TRAN_ACCEPT);
1973 }
1974
1975 if (cmd == NULL) {
1976 return (TRAN_BUSY);
1977 }
1978
1979 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1980 if (instance->fw_outstanding > instance->max_fw_cmds) {
1981 con_log(CL_ANN, (CE_CONT, "mr_sas:Firmware busy"));
1982 DTRACE_PROBE2(start_tran_err,
1983 uint16_t, instance->fw_outstanding,
1984 uint16_t, instance->max_fw_cmds);
1985 return_mfi_pkt(instance, cmd);
1986 return (TRAN_BUSY);
1987 }
1988
1989 /* Synchronize the Cmd frame for the controller */
1990 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1991 DDI_DMA_SYNC_FORDEV);
1992 con_log(CL_ANN, (CE_CONT, "issue_cmd_ppc: SCSI CDB[0]=0x%x"
1993 "cmd->index:%x\n", pkt->pkt_cdbp[0], cmd->index));
1994 instance->func_ptr->issue_cmd(cmd, instance);
1995
1996 } else {
1997 struct mrsas_header *hdr = &cmd->frame->hdr;
1998
1999 /* cmd->sync_cmd = MRSAS_TRUE; */ /* KEBE asks, inherit? */
2000
2001 instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd);
2002
2003 pkt->pkt_reason = CMD_CMPLT;
2004 pkt->pkt_statistics = 0;
2005 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS;
2006
2007 switch (ddi_get8(cmd->frame_dma_obj.acc_handle,
2008 &hdr->cmd_status)) {
2009 case MFI_STAT_OK:
2010 pkt->pkt_scbp[0] = STATUS_GOOD;
2011 break;
2012
2013 case MFI_STAT_SCSI_DONE_WITH_ERROR:
2014 con_log(CL_ANN, (CE_CONT,
2015 "mrsas_tran_start: scsi done with error"));
2016 pkt->pkt_reason = CMD_CMPLT;
2017 pkt->pkt_statistics = 0;
2018
2019 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2020 break;
2021
2022 case MFI_STAT_DEVICE_NOT_FOUND:
2023 con_log(CL_ANN, (CE_CONT,
2024 "mrsas_tran_start: device not found error"));
2025 pkt->pkt_reason = CMD_DEV_GONE;
2026 pkt->pkt_statistics = STAT_DISCON;
2027 break;
2028
2029 default:
2030 ((struct scsi_status *)pkt->pkt_scbp)->sts_busy = 1;
2031 }
2032
2033 (void) mrsas_common_check(instance, cmd);
2034 DTRACE_PROBE2(start_nointr_done, uint8_t, hdr->cmd,
2035 uint8_t, hdr->cmd_status);
2036 return_mfi_pkt(instance, cmd);
2037
2038 if (pkt->pkt_comp) {
2039 (*pkt->pkt_comp)(pkt);
2040 }
2041
2042 }
2043
2044 return (TRAN_ACCEPT);
2045 }
2046
2047 /*
2048 * tran_abort - Abort any commands that are currently in transport
2049 * @ap:
2050 * @pkt:
2051 *
2052 * The tran_abort() entry point for a SCSI HBA driver is called to abort any
2053 * commands that are currently in transport for a particular target. This entry
2054 * point is called when a target driver calls scsi_abort(). The tran_abort()
2055 * entry point should attempt to abort the command denoted by the pkt
2056 * parameter. If the pkt parameter is NULL, tran_abort() should attempt to
2057 * abort all outstanding commands in the transport layer for the particular
2058 * target or logical unit.
2059 */
2060 /*ARGSUSED*/
2061 static int
2062 mrsas_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
2063 {
2064 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2065
2066 /* abort command not supported by H/W */
2067
2068 return (DDI_FAILURE);
2069 }
2070
2071 /*
2072 * tran_reset - reset either the SCSI bus or target
2073 * @ap:
2074 * @level:
2075 *
2076 * The tran_reset() entry point for a SCSI HBA driver is called to reset either
2077 * the SCSI bus or a particular SCSI target device. This entry point is called
2078 * when a target driver calls scsi_reset(). The tran_reset() entry point must
2079 * reset the SCSI bus if level is RESET_ALL. If level is RESET_TARGET, just the
2080 * particular target or logical unit must be reset.
2081 */
2082 /*ARGSUSED*/
2083 static int
2084 mrsas_tran_reset(struct scsi_address *ap, int level)
2085 {
2086 struct mrsas_instance *instance = ADDR2MR(ap);
2087
2088 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2089
2090 if (wait_for_outstanding(instance)) {
2091 con_log(CL_ANN1,
2092 (CE_CONT, "wait_for_outstanding: return FAIL.\n"));
2093 return (DDI_FAILURE);
2094 } else {
2095 return (DDI_SUCCESS);
2096 }
2097 }
2098
2099 #if 0
2100 /*
2101 * tran_bus_reset - reset the SCSI bus
2102 * @dip:
2103 * @level:
2104 *
2105 * The tran_bus_reset() vector in the scsi_hba_tran structure should be
2106 * initialized during the HBA driver's attach(). The vector should point to
2107 * an HBA entry point that is to be called when a user initiates a bus reset.
2108 * Implementation is hardware specific. If the HBA driver cannot reset the
2109 * SCSI bus without affecting the targets, the driver should fail RESET_BUS
2110 * or not initialize this vector.
2111 */
2112 /*ARGSUSED*/
2113 static int
2114 mrsas_tran_bus_reset(dev_info_t *dip, int level)
2115 {
2116 int instance_no = ddi_get_instance(dip);
2117
2118 struct mrsas_instance *instance = ddi_get_soft_state(mrsas_state,
2119 instance_no);
2120
2121 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2122
2123 if (wait_for_outstanding(instance)) {
2124 con_log(CL_ANN1,
2125 (CE_CONT, "wait_for_outstanding: return FAIL.\n"));
2126 return (DDI_FAILURE);
2127 } else {
2128 return (DDI_SUCCESS);
2129 }
2130 }
2131 #endif
2132
2133 /*
2134 * tran_getcap - get one of a set of SCSA-defined capabilities
2135 * @ap:
2136 * @cap:
2137 * @whom:
2138 *
2139 * The target driver can request the current setting of the capability for a
2140 * particular target by setting the whom parameter to nonzero. A whom value of
2141 * zero indicates a request for the current setting of the general capability
2142 * for the SCSI bus or for adapter hardware. The tran_getcap() should return -1
2143 * for undefined capabilities or the current value of the requested capability.
2144 */
2145 /*ARGSUSED*/
2146 static int
2147 mrsas_tran_getcap(struct scsi_address *ap, char *cap, int whom)
2148 {
2149 int rval = 0;
2150
2151 struct mrsas_instance *instance = ADDR2MR(ap);
2152
2153 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2154
2155 /* we do allow inquiring about capabilities for other targets */
2156 if (cap == NULL) {
2157 return (-1);
2158 }
2159
2160 switch (scsi_hba_lookup_capstr(cap)) {
2161 case SCSI_CAP_DMA_MAX:
2162 if (instance->tbolt) {
2163 /* Limit to 256k max transfer */
2164 rval = mrsas_tbolt_max_cap_maxxfer;
2165 } else {
2166 /* Limit to 16MB max transfer */
2167 rval = mrsas_max_cap_maxxfer;
2168 }
2169 break;
2170 case SCSI_CAP_MSG_OUT:
2171 rval = 1;
2172 break;
2173 case SCSI_CAP_DISCONNECT:
2174 rval = 0;
2175 break;
2176 case SCSI_CAP_SYNCHRONOUS:
2177 rval = 0;
2178 break;
2179 case SCSI_CAP_WIDE_XFER:
2180 rval = 1;
2181 break;
2182 case SCSI_CAP_TAGGED_QING:
2183 rval = 1;
2184 break;
2185 case SCSI_CAP_UNTAGGED_QING:
2186 rval = 1;
2187 break;
2188 case SCSI_CAP_PARITY:
2189 rval = 1;
2190 break;
2191 case SCSI_CAP_INITIATOR_ID:
2192 rval = instance->init_id;
2193 break;
2194 case SCSI_CAP_ARQ:
2195 rval = 1;
2196 break;
2197 case SCSI_CAP_LINKED_CMDS:
2198 rval = 0;
2199 break;
2200 case SCSI_CAP_RESET_NOTIFICATION:
2201 rval = 1;
2202 break;
2203 case SCSI_CAP_GEOMETRY:
2204 rval = -1;
2205
2206 break;
2207 default:
2208 con_log(CL_DLEVEL2, (CE_NOTE, "Default cap coming 0x%x",
2209 scsi_hba_lookup_capstr(cap)));
2210 rval = -1;
2211 break;
2212 }
2213
2214 return (rval);
2215 }
2216
2217 /*
2218 * tran_setcap - set one of a set of SCSA-defined capabilities
2219 * @ap:
2220 * @cap:
2221 * @value:
2222 * @whom:
2223 *
2224 * The target driver might request that the new value be set for a particular
2225 * target by setting the whom parameter to nonzero. A whom value of zero
2226 * means that request is to set the new value for the SCSI bus or for adapter
2227 * hardware in general.
2228 * The tran_setcap() should return the following values as appropriate:
2229 * - -1 for undefined capabilities
2230 * - 0 if the HBA driver cannot set the capability to the requested value
2231 * - 1 if the HBA driver is able to set the capability to the requested value
2232 */
2233 /*ARGSUSED*/
2234 static int
2235 mrsas_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
2236 {
2237 int rval = 1;
2238
2239 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2240
2241 /* We don't allow setting capabilities for other targets */
2242 if (cap == NULL || whom == 0) {
2243 return (-1);
2244 }
2245
2246 switch (scsi_hba_lookup_capstr(cap)) {
2247 case SCSI_CAP_DMA_MAX:
2248 case SCSI_CAP_MSG_OUT:
2249 case SCSI_CAP_PARITY:
2250 case SCSI_CAP_LINKED_CMDS:
2251 case SCSI_CAP_RESET_NOTIFICATION:
2252 case SCSI_CAP_DISCONNECT:
2253 case SCSI_CAP_SYNCHRONOUS:
2254 case SCSI_CAP_UNTAGGED_QING:
2255 case SCSI_CAP_WIDE_XFER:
2256 case SCSI_CAP_INITIATOR_ID:
2257 case SCSI_CAP_ARQ:
2258 /*
2259 * None of these are settable via
2260 * the capability interface.
2261 */
2262 break;
2263 case SCSI_CAP_TAGGED_QING:
2264 rval = 1;
2265 break;
2266 case SCSI_CAP_SECTOR_SIZE:
2267 rval = 1;
2268 break;
2269
2270 case SCSI_CAP_TOTAL_SECTORS:
2271 rval = 1;
2272 break;
2273 default:
2274 rval = -1;
2275 break;
2276 }
2277
2278 return (rval);
2279 }
2280
2281 /*
2282 * tran_destroy_pkt - deallocate scsi_pkt structure
2283 * @ap:
2284 * @pkt:
2285 *
2286 * The tran_destroy_pkt() entry point is the HBA driver function that
2287 * deallocates scsi_pkt structures. The tran_destroy_pkt() entry point is
2288 * called when the target driver calls scsi_destroy_pkt(). The
2289 * tran_destroy_pkt() entry point must free any DMA resources that have been
2290 * allocated for the packet. An implicit DMA synchronization occurs if the
2291 * DMA resources are freed and any cached data remains after the completion
2292 * of the transfer.
2293 */
2294 static void
2295 mrsas_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2296 {
2297 struct scsa_cmd *acmd = PKT2CMD(pkt);
2298
2299 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2300
2301 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2302 acmd->cmd_flags &= ~CFLAG_DMAVALID;
2303
2304 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
2305
2306 ddi_dma_free_handle(&acmd->cmd_dmahandle);
2307
2308 acmd->cmd_dmahandle = NULL;
2309 }
2310
2311 /* free the pkt */
2312 scsi_hba_pkt_free(ap, pkt);
2313 }
2314
2315 /*
2316 * tran_dmafree - deallocates DMA resources
2317 * @ap:
2318 * @pkt:
2319 *
2320 * The tran_dmafree() entry point deallocates DMAQ resources that have been
2321 * allocated for a scsi_pkt structure. The tran_dmafree() entry point is
2322 * called when the target driver calls scsi_dmafree(). The tran_dmafree() must
2323 * free only DMA resources allocated for a scsi_pkt structure, not the
2324 * scsi_pkt itself. When DMA resources are freed, a DMA synchronization is
2325 * implicitly performed.
2326 */
2327 /*ARGSUSED*/
2328 static void
2329 mrsas_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
2330 {
2331 register struct scsa_cmd *acmd = PKT2CMD(pkt);
2332
2333 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2334
2335 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2336 acmd->cmd_flags &= ~CFLAG_DMAVALID;
2337
2338 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
2339
2340 ddi_dma_free_handle(&acmd->cmd_dmahandle);
2341
2342 acmd->cmd_dmahandle = NULL;
2343 }
2344 }
2345
2346 /*
2347 * tran_sync_pkt - synchronize the DMA object allocated
2348 * @ap:
2349 * @pkt:
2350 *
2351 * The tran_sync_pkt() entry point synchronizes the DMA object allocated for
2352 * the scsi_pkt structure before or after a DMA transfer. The tran_sync_pkt()
2353 * entry point is called when the target driver calls scsi_sync_pkt(). If the
2354 * data transfer direction is a DMA read from device to memory, tran_sync_pkt()
2355 * must synchronize the CPU's view of the data. If the data transfer direction
2356 * is a DMA write from memory to device, tran_sync_pkt() must synchronize the
2357 * device's view of the data.
2358 */
2359 /*ARGSUSED*/
2360 static void
2361 mrsas_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2362 {
2363 register struct scsa_cmd *acmd = PKT2CMD(pkt);
2364
2365 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2366
2367 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2368 (void) ddi_dma_sync(acmd->cmd_dmahandle, acmd->cmd_dma_offset,
2369 acmd->cmd_dma_len, (acmd->cmd_flags & CFLAG_DMASEND) ?
2370 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
2371 }
2372 }
2373
2374 /*ARGSUSED*/
2375 static int
2376 mrsas_tran_quiesce(dev_info_t *dip)
2377 {
2378 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2379
2380 return (1);
2381 }
2382
2383 /*ARGSUSED*/
2384 static int
2385 mrsas_tran_unquiesce(dev_info_t *dip)
2386 {
2387 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2388
2389 return (1);
2390 }
2391
2392
2393 /*
2394 * mrsas_isr(caddr_t)
2395 *
2396 * The Interrupt Service Routine
2397 *
2398 * Collect status for all completed commands and do callback
2399 *
2400 */
2401 static uint_t
2402 mrsas_isr(struct mrsas_instance *instance)
2403 {
2404 int need_softintr;
2405 uint32_t producer;
2406 uint32_t consumer;
2407 uint32_t context;
2408 int retval;
2409
2410 struct mrsas_cmd *cmd;
2411 struct mrsas_header *hdr;
2412 struct scsi_pkt *pkt;
2413
2414 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2415 ASSERT(instance);
2416 if (instance->tbolt) {
2417 mutex_enter(&instance->chip_mtx);
2418 if ((instance->intr_type == DDI_INTR_TYPE_FIXED) &&
2419 !(instance->func_ptr->intr_ack(instance))) {
2420 mutex_exit(&instance->chip_mtx);
2421 return (DDI_INTR_UNCLAIMED);
2422 }
2423 retval = mr_sas_tbolt_process_outstanding_cmd(instance);
2424 mutex_exit(&instance->chip_mtx);
2425 return (retval);
2426 } else {
2427 if ((instance->intr_type == DDI_INTR_TYPE_FIXED) &&
2428 !instance->func_ptr->intr_ack(instance)) {
2429 return (DDI_INTR_UNCLAIMED);
2430 }
2431 }
2432
2433 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
2434 0, 0, DDI_DMA_SYNC_FORCPU);
2435
2436 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
2437 != DDI_SUCCESS) {
2438 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2439 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2440 con_log(CL_ANN1, (CE_WARN,
2441 "mr_sas_isr(): FMA check, returning DDI_INTR_UNCLAIMED"));
2442 return (DDI_INTR_CLAIMED);
2443 }
2444 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2445
2446 #ifdef OCRDEBUG
2447 if (debug_consecutive_timeout_after_ocr_g == 1) {
2448 con_log(CL_ANN1, (CE_NOTE,
2449 "simulating consecutive timeout after ocr"));
2450 return (DDI_INTR_CLAIMED);
2451 }
2452 #endif
2453
2454 mutex_enter(&instance->completed_pool_mtx);
2455 mutex_enter(&instance->cmd_pend_mtx);
2456
2457 producer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2458 instance->producer);
2459 consumer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2460 instance->consumer);
2461
2462 con_log(CL_ANN, (CE_CONT, " producer %x consumer %x ",
2463 producer, consumer));
2464 if (producer == consumer) {
2465 con_log(CL_ANN, (CE_WARN, "producer == consumer case"));
2466 DTRACE_PROBE2(isr_pc_err, uint32_t, producer,
2467 uint32_t, consumer);
2468 mutex_exit(&instance->cmd_pend_mtx);
2469 mutex_exit(&instance->completed_pool_mtx);
2470 return (DDI_INTR_CLAIMED);
2471 }
2472
2473 while (consumer != producer) {
2474 context = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2475 &instance->reply_queue[consumer]);
2476 cmd = instance->cmd_list[context];
2477
2478 if (cmd->sync_cmd == MRSAS_TRUE) {
2479 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2480 if (hdr) {
2481 mlist_del_init(&cmd->list);
2482 }
2483 } else {
2484 pkt = cmd->pkt;
2485 if (pkt) {
2486 mlist_del_init(&cmd->list);
2487 }
2488 }
2489
2490 mlist_add_tail(&cmd->list, &instance->completed_pool_list);
2491
2492 consumer++;
2493 if (consumer == (instance->max_fw_cmds + 1)) {
2494 consumer = 0;
2495 }
2496 }
2497 ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
2498 instance->consumer, consumer);
2499 mutex_exit(&instance->cmd_pend_mtx);
2500 mutex_exit(&instance->completed_pool_mtx);
2501
2502 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
2503 0, 0, DDI_DMA_SYNC_FORDEV);
2504
2505 if (instance->softint_running) {
2506 need_softintr = 0;
2507 } else {
2508 need_softintr = 1;
2509 }
2510
2511 if (instance->isr_level == HIGH_LEVEL_INTR) {
2512 if (need_softintr) {
2513 ddi_trigger_softintr(instance->soft_intr_id);
2514 }
2515 } else {
2516 /*
2517 * Not a high-level interrupt, therefore call the soft level
2518 * interrupt explicitly
2519 */
2520 (void) mrsas_softintr(instance);
2521 }
2522
2523 return (DDI_INTR_CLAIMED);
2524 }
2525
2526
2527 /*
2528 * ************************************************************************** *
2529 * *
2530 * libraries *
2531 * *
2532 * ************************************************************************** *
2533 */
2534 /*
2535 * get_mfi_pkt : Get a command from the free pool
2536 * After successful allocation, the caller of this routine
2537 * must clear the frame buffer (memset to zero) before
2538 * using the packet further.
2539 *
2540 * ***** Note *****
2541 * After clearing the frame buffer the context id of the
2542 * frame buffer SHOULD be restored back.
2543 */
2544 static struct mrsas_cmd *
2545 get_mfi_pkt(struct mrsas_instance *instance)
2546 {
2547 mlist_t *head = &instance->cmd_pool_list;
2548 struct mrsas_cmd *cmd = NULL;
2549
2550 mutex_enter(&instance->cmd_pool_mtx);
2551 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2552
2553 if (!mlist_empty(head)) {
2554 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2555 mlist_del_init(head->next);
2556 }
2557 if (cmd != NULL) {
2558 cmd->pkt = NULL;
2559 cmd->retry_count_for_ocr = 0;
2560 cmd->drv_pkt_time = 0;
2561
2562 }
2563 mutex_exit(&instance->cmd_pool_mtx);
2564
2565 return (cmd);
2566 }
2567
2568 static struct mrsas_cmd *
2569 get_mfi_app_pkt(struct mrsas_instance *instance)
2570 {
2571 mlist_t *head = &instance->app_cmd_pool_list;
2572 struct mrsas_cmd *cmd = NULL;
2573
2574 mutex_enter(&instance->app_cmd_pool_mtx);
2575 ASSERT(mutex_owned(&instance->app_cmd_pool_mtx));
2576
2577 if (!mlist_empty(head)) {
2578 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2579 mlist_del_init(head->next);
2580 }
2581 if (cmd != NULL) {
2582 cmd->pkt = NULL;
2583 cmd->retry_count_for_ocr = 0;
2584 cmd->drv_pkt_time = 0;
2585 }
2586
2587 mutex_exit(&instance->app_cmd_pool_mtx);
2588
2589 return (cmd);
2590 }
2591 /*
2592 * return_mfi_pkt : Return a cmd to free command pool
2593 */
2594 static void
2595 return_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2596 {
2597 mutex_enter(&instance->cmd_pool_mtx);
2598 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2599 /* use mlist_add_tail for debug assistance */
2600 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2601
2602 mutex_exit(&instance->cmd_pool_mtx);
2603 }
2604
2605 static void
2606 return_mfi_app_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2607 {
2608 mutex_enter(&instance->app_cmd_pool_mtx);
2609 ASSERT(mutex_owned(&instance->app_cmd_pool_mtx));
2610
2611 mlist_add(&cmd->list, &instance->app_cmd_pool_list);
2612
2613 mutex_exit(&instance->app_cmd_pool_mtx);
2614 }
2615 void
2616 push_pending_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2617 {
2618 struct scsi_pkt *pkt;
2619 struct mrsas_header *hdr;
2620 con_log(CL_DLEVEL2, (CE_NOTE, "push_pending_pkt(): Called\n"));
2621 mutex_enter(&instance->cmd_pend_mtx);
2622 ASSERT(mutex_owned(&instance->cmd_pend_mtx));
2623 mlist_del_init(&cmd->list);
2624 mlist_add_tail(&cmd->list, &instance->cmd_pend_list);
2625 if (cmd->sync_cmd == MRSAS_TRUE) {
2626 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2627 if (hdr) {
2628 con_log(CL_ANN1, (CE_CONT,
2629 "push_pending_mfi_pkt: "
2630 "cmd %p index %x "
2631 "time %llx",
2632 (void *)cmd, cmd->index,
2633 gethrtime()));
2634 /* Wait for specified interval */
2635 cmd->drv_pkt_time = ddi_get16(
2636 cmd->frame_dma_obj.acc_handle, &hdr->timeout);
2637 if (cmd->drv_pkt_time < debug_timeout_g)
2638 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2639 con_log(CL_ANN1, (CE_CONT,
2640 "push_pending_pkt(): "
2641 "Called IO Timeout Value %x\n",
2642 cmd->drv_pkt_time));
2643 }
2644 if (hdr && instance->timeout_id == (timeout_id_t)-1) {
2645 instance->timeout_id = timeout(io_timeout_checker,
2646 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
2647 }
2648 } else {
2649 pkt = cmd->pkt;
2650 if (pkt) {
2651 con_log(CL_ANN1, (CE_CONT,
2652 "push_pending_mfi_pkt: "
2653 "cmd %p index %x pkt %p, "
2654 "time %llx",
2655 (void *)cmd, cmd->index, (void *)pkt,
2656 gethrtime()));
2657 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2658 }
2659 if (pkt && instance->timeout_id == (timeout_id_t)-1) {
2660 instance->timeout_id = timeout(io_timeout_checker,
2661 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
2662 }
2663 }
2664
2665 mutex_exit(&instance->cmd_pend_mtx);
2666
2667 }
2668
2669 int
2670 mrsas_print_pending_cmds(struct mrsas_instance *instance)
2671 {
2672 mlist_t *head = &instance->cmd_pend_list;
2673 mlist_t *tmp = head;
2674 struct mrsas_cmd *cmd = NULL;
2675 struct mrsas_header *hdr;
2676 unsigned int flag = 1;
2677 struct scsi_pkt *pkt;
2678 int saved_level;
2679 int cmd_count = 0;
2680
2681
2682 saved_level = debug_level_g;
2683 debug_level_g = CL_ANN1;
2684
2685 cmn_err(CE_NOTE, "mrsas_print_pending_cmds(): Called\n");
2686
2687 while (flag) {
2688 mutex_enter(&instance->cmd_pend_mtx);
2689 tmp = tmp->next;
2690 if (tmp == head) {
2691 mutex_exit(&instance->cmd_pend_mtx);
2692 flag = 0;
2693 con_log(CL_ANN1, (CE_CONT, "mrsas_print_pending_cmds():"
2694 " NO MORE CMDS PENDING....\n"));
2695 break;
2696 } else {
2697 cmd = mlist_entry(tmp, struct mrsas_cmd, list);
2698 mutex_exit(&instance->cmd_pend_mtx);
2699 if (cmd) {
2700 if (cmd->sync_cmd == MRSAS_TRUE) {
2701 hdr = (struct mrsas_header *)
2702 &cmd->frame->hdr;
2703 if (hdr) {
2704 con_log(CL_ANN1, (CE_CONT,
2705 "print: cmd %p index 0x%x "
2706 "drv_pkt_time 0x%x (NO-PKT)"
2707 " hdr %p\n", (void *)cmd,
2708 cmd->index,
2709 cmd->drv_pkt_time,
2710 (void *)hdr));
2711 }
2712 } else {
2713 pkt = cmd->pkt;
2714 if (pkt) {
2715 con_log(CL_ANN1, (CE_CONT,
2716 "print: cmd %p index 0x%x "
2717 "drv_pkt_time 0x%x pkt %p \n",
2718 (void *)cmd, cmd->index,
2719 cmd->drv_pkt_time, (void *)pkt));
2720 }
2721 }
2722
2723 if (++cmd_count == 1) {
2724 mrsas_print_cmd_details(instance, cmd,
2725 0xDD);
2726 } else {
2727 mrsas_print_cmd_details(instance, cmd,
2728 1);
2729 }
2730
2731 }
2732 }
2733 }
2734 con_log(CL_ANN1, (CE_CONT, "mrsas_print_pending_cmds(): Done\n"));
2735
2736
2737 debug_level_g = saved_level;
2738
2739 return (DDI_SUCCESS);
2740 }
2741
2742
2743 int
2744 mrsas_complete_pending_cmds(struct mrsas_instance *instance)
2745 {
2746
2747 struct mrsas_cmd *cmd = NULL;
2748 struct scsi_pkt *pkt;
2749 struct mrsas_header *hdr;
2750
2751 struct mlist_head *pos, *next;
2752
2753 con_log(CL_ANN1, (CE_NOTE,
2754 "mrsas_complete_pending_cmds(): Called"));
2755
2756 mutex_enter(&instance->cmd_pend_mtx);
2757 mlist_for_each_safe(pos, next, &instance->cmd_pend_list) {
2758 cmd = mlist_entry(pos, struct mrsas_cmd, list);
2759 if (cmd) {
2760 pkt = cmd->pkt;
2761 if (pkt) { /* for IO */
2762 if (((pkt->pkt_flags & FLAG_NOINTR)
2763 == 0) && pkt->pkt_comp) {
2764 pkt->pkt_reason
2765 = CMD_DEV_GONE;
2766 pkt->pkt_statistics
2767 = STAT_DISCON;
2768 con_log(CL_ANN1, (CE_CONT,
2769 "fail and posting to scsa "
2770 "cmd %p index %x"
2771 " pkt %p "
2772 "time : %llx",
2773 (void *)cmd, cmd->index,
2774 (void *)pkt, gethrtime()));
2775 (*pkt->pkt_comp)(pkt);
2776 }
2777 } else { /* for DCMDS */
2778 if (cmd->sync_cmd == MRSAS_TRUE) {
2779 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2780 con_log(CL_ANN1, (CE_CONT,
2781 "posting invalid status to application "
2782 "cmd %p index %x"
2783 " hdr %p "
2784 "time : %llx",
2785 (void *)cmd, cmd->index,
2786 (void *)hdr, gethrtime()));
2787 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2788 complete_cmd_in_sync_mode(instance, cmd);
2789 }
2790 }
2791 mlist_del_init(&cmd->list);
2792 } else {
2793 con_log(CL_ANN1, (CE_CONT,
2794 "mrsas_complete_pending_cmds:"
2795 "NULL command\n"));
2796 }
2797 con_log(CL_ANN1, (CE_CONT,
2798 "mrsas_complete_pending_cmds:"
2799 "looping for more commands\n"));
2800 }
2801 mutex_exit(&instance->cmd_pend_mtx);
2802
2803 con_log(CL_ANN1, (CE_CONT, "mrsas_complete_pending_cmds(): DONE\n"));
2804 return (DDI_SUCCESS);
2805 }
2806
2807 void
2808 mrsas_print_cmd_details(struct mrsas_instance *instance, struct mrsas_cmd *cmd,
2809 int detail)
2810 {
2811 struct scsi_pkt *pkt = cmd->pkt;
2812 Mpi2RaidSCSIIORequest_t *scsi_io = cmd->scsi_io_request;
2813 int i;
2814 int saved_level;
2815 ddi_acc_handle_t acc_handle =
2816 instance->mpi2_frame_pool_dma_obj.acc_handle;
2817
2818 if (detail == 0xDD) {
2819 saved_level = debug_level_g;
2820 debug_level_g = CL_ANN1;
2821 }
2822
2823
2824 if (instance->tbolt) {
2825 con_log(CL_ANN1, (CE_CONT, "print_cmd_details: cmd %p "
2826 "cmd->index 0x%x SMID 0x%x timer 0x%x sec\n",
2827 (void *)cmd, cmd->index, cmd->SMID, cmd->drv_pkt_time));
2828 } else {
2829 con_log(CL_ANN1, (CE_CONT, "print_cmd_details: cmd %p "
2830 "cmd->index 0x%x timer 0x%x sec\n",
2831 (void *)cmd, cmd->index, cmd->drv_pkt_time));
2832 }
2833
2834 if (pkt) {
2835 con_log(CL_ANN1, (CE_CONT, "scsi_pkt CDB[0]=0x%x",
2836 pkt->pkt_cdbp[0]));
2837 } else {
2838 con_log(CL_ANN1, (CE_CONT, "NO-PKT"));
2839 }
2840
2841 if ((detail == 0xDD) && instance->tbolt) {
2842 con_log(CL_ANN1, (CE_CONT, "RAID_SCSI_IO_REQUEST\n"));
2843 con_log(CL_ANN1, (CE_CONT, "DevHandle=0x%X Function=0x%X "
2844 "IoFlags=0x%X SGLFlags=0x%X DataLength=0x%X\n",
2845 ddi_get16(acc_handle, &scsi_io->DevHandle),
2846 ddi_get8(acc_handle, &scsi_io->Function),
2847 ddi_get16(acc_handle, &scsi_io->IoFlags),
2848 ddi_get16(acc_handle, &scsi_io->SGLFlags),
2849 ddi_get32(acc_handle, &scsi_io->DataLength)));
2850
2851 for (i = 0; i < 32; i++) {
2852 con_log(CL_ANN1, (CE_CONT, "CDB[%d]=0x%x ", i,
2853 ddi_get8(acc_handle, &scsi_io->CDB.CDB32[i])));
2854 }
2855
2856 con_log(CL_ANN1, (CE_CONT, "RAID-CONTEXT\n"));
2857 con_log(CL_ANN1, (CE_CONT, "status=0x%X extStatus=0x%X "
2858 "ldTargetId=0x%X timeoutValue=0x%X regLockFlags=0x%X "
2859 "RAIDFlags=0x%X regLockRowLBA=0x%" PRIu64
2860 " regLockLength=0x%X spanArm=0x%X\n",
2861 ddi_get8(acc_handle, &scsi_io->RaidContext.status),
2862 ddi_get8(acc_handle, &scsi_io->RaidContext.extStatus),
2863 ddi_get16(acc_handle, &scsi_io->RaidContext.ldTargetId),
2864 ddi_get16(acc_handle, &scsi_io->RaidContext.timeoutValue),
2865 ddi_get8(acc_handle, &scsi_io->RaidContext.regLockFlags),
2866 ddi_get8(acc_handle, &scsi_io->RaidContext.RAIDFlags),
2867 ddi_get64(acc_handle, &scsi_io->RaidContext.regLockRowLBA),
2868 ddi_get32(acc_handle, &scsi_io->RaidContext.regLockLength),
2869 ddi_get8(acc_handle, &scsi_io->RaidContext.spanArm)));
2870 }
2871
2872 if (detail == 0xDD) {
2873 debug_level_g = saved_level;
2874 }
2875 }
2876
2877
2878 int
2879 mrsas_issue_pending_cmds(struct mrsas_instance *instance)
2880 {
2881 mlist_t *head = &instance->cmd_pend_list;
2882 mlist_t *tmp = head->next;
2883 struct mrsas_cmd *cmd = NULL;
2884 struct scsi_pkt *pkt;
2885
2886 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_pending_cmds(): Called"));
2887 while (tmp != head) {
2888 mutex_enter(&instance->cmd_pend_mtx);
2889 cmd = mlist_entry(tmp, struct mrsas_cmd, list);
2890 tmp = tmp->next;
2891 mutex_exit(&instance->cmd_pend_mtx);
2892 if (cmd) {
2893 con_log(CL_ANN1, (CE_CONT,
2894 "mrsas_issue_pending_cmds(): "
2895 "Got a cmd: cmd %p index 0x%x drv_pkt_time 0x%x ",
2896 (void *)cmd, cmd->index, cmd->drv_pkt_time));
2897
2898 /* Reset command timeout value */
2899 if (cmd->drv_pkt_time < debug_timeout_g)
2900 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2901
2902 cmd->retry_count_for_ocr++;
2903
2904 cmn_err(CE_CONT, "cmd retry count = %d\n",
2905 cmd->retry_count_for_ocr);
2906
2907 if (cmd->retry_count_for_ocr > IO_RETRY_COUNT) {
2908 cmn_err(CE_WARN, "mrsas_issue_pending_cmds(): "
2909 "cmd->retry_count exceeded limit >%d\n",
2910 IO_RETRY_COUNT);
2911 mrsas_print_cmd_details(instance, cmd, 0xDD);
2912
2913 cmn_err(CE_WARN,
2914 "mrsas_issue_pending_cmds():"
2915 "Calling KILL Adapter\n");
2916 if (instance->tbolt)
2917 mrsas_tbolt_kill_adapter(instance);
2918 else
2919 (void) mrsas_kill_adapter(instance);
2920 return (DDI_FAILURE);
2921 }
2922
2923 pkt = cmd->pkt;
2924 if (pkt) {
2925 con_log(CL_ANN1, (CE_CONT,
2926 "PENDING PKT-CMD ISSUE: cmd %p index %x "
2927 "pkt %p time %llx",
2928 (void *)cmd, cmd->index,
2929 (void *)pkt,
2930 gethrtime()));
2931
2932 } else {
2933 cmn_err(CE_CONT,
2934 "mrsas_issue_pending_cmds(): NO-PKT, "
2935 "cmd %p index 0x%x drv_pkt_time 0x%x ",
2936 (void *)cmd, cmd->index, cmd->drv_pkt_time);
2937 }
2938
2939
2940 if (cmd->sync_cmd == MRSAS_TRUE) {
2941 cmn_err(CE_CONT, "mrsas_issue_pending_cmds(): "
2942 "SYNC_CMD == TRUE \n");
2943 instance->func_ptr->issue_cmd_in_sync_mode(
2944 instance, cmd);
2945 } else {
2946 instance->func_ptr->issue_cmd(cmd, instance);
2947 }
2948 } else {
2949 con_log(CL_ANN1, (CE_CONT,
2950 "mrsas_issue_pending_cmds: NULL command\n"));
2951 }
2952 con_log(CL_ANN1, (CE_CONT,
2953 "mrsas_issue_pending_cmds:"
2954 "looping for more commands"));
2955 }
2956 con_log(CL_ANN1, (CE_CONT, "mrsas_issue_pending_cmds(): DONE\n"));
2957 return (DDI_SUCCESS);
2958 }
2959
2960
2961
2962 /*
2963 * destroy_mfi_frame_pool
2964 */
2965 void
2966 destroy_mfi_frame_pool(struct mrsas_instance *instance)
2967 {
2968 int i;
2969 uint32_t max_cmd = instance->max_fw_cmds;
2970
2971 struct mrsas_cmd *cmd;
2972
2973 /* return all frames to pool */
2974
2975 for (i = 0; i < max_cmd; i++) {
2976
2977 cmd = instance->cmd_list[i];
2978
2979 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED)
2980 (void) mrsas_free_dma_obj(instance, cmd->frame_dma_obj);
2981
2982 cmd->frame_dma_obj_status = DMA_OBJ_FREED;
2983 }
2984
2985 }
2986
2987 /*
2988 * create_mfi_frame_pool
2989 */
2990 int
2991 create_mfi_frame_pool(struct mrsas_instance *instance)
2992 {
2993 int i = 0;
2994 int cookie_cnt;
2995 uint16_t max_cmd;
2996 uint16_t sge_sz;
2997 uint32_t sgl_sz;
2998 uint32_t tot_frame_size;
2999 struct mrsas_cmd *cmd;
3000 int retval = DDI_SUCCESS;
3001
3002 max_cmd = instance->max_fw_cmds;
3003 sge_sz = sizeof (struct mrsas_sge_ieee);
3004 /* calculated the number of 64byte frames required for SGL */
3005 sgl_sz = sge_sz * instance->max_num_sge;
3006 tot_frame_size = sgl_sz + MRMFI_FRAME_SIZE + SENSE_LENGTH;
3007
3008 con_log(CL_DLEVEL3, (CE_NOTE, "create_mfi_frame_pool: "
3009 "sgl_sz %x tot_frame_size %x", sgl_sz, tot_frame_size));
3010
3011 while (i < max_cmd) {
3012 cmd = instance->cmd_list[i];
3013
3014 cmd->frame_dma_obj.size = tot_frame_size;
3015 cmd->frame_dma_obj.dma_attr = mrsas_generic_dma_attr;
3016 cmd->frame_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3017 cmd->frame_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3018 cmd->frame_dma_obj.dma_attr.dma_attr_sgllen = 1;
3019 cmd->frame_dma_obj.dma_attr.dma_attr_align = 64;
3020
3021 cookie_cnt = mrsas_alloc_dma_obj(instance, &cmd->frame_dma_obj,
3022 (uchar_t)DDI_STRUCTURE_LE_ACC);
3023
3024 if (cookie_cnt == -1 || cookie_cnt > 1) {
3025 cmn_err(CE_WARN,
3026 "create_mfi_frame_pool: could not alloc.");
3027 retval = DDI_FAILURE;
3028 goto mrsas_undo_frame_pool;
3029 }
3030
3031 bzero(cmd->frame_dma_obj.buffer, tot_frame_size);
3032
3033 cmd->frame_dma_obj_status = DMA_OBJ_ALLOCATED;
3034 cmd->frame = (union mrsas_frame *)cmd->frame_dma_obj.buffer;
3035 cmd->frame_phys_addr =
3036 cmd->frame_dma_obj.dma_cookie[0].dmac_address;
3037
3038 cmd->sense = (uint8_t *)(((unsigned long)
3039 cmd->frame_dma_obj.buffer) +
3040 tot_frame_size - SENSE_LENGTH);
3041 cmd->sense_phys_addr =
3042 cmd->frame_dma_obj.dma_cookie[0].dmac_address +
3043 tot_frame_size - SENSE_LENGTH;
3044
3045 if (!cmd->frame || !cmd->sense) {
3046 cmn_err(CE_WARN,
3047 "mr_sas: pci_pool_alloc failed");
3048 retval = ENOMEM;
3049 goto mrsas_undo_frame_pool;
3050 }
3051
3052 ddi_put32(cmd->frame_dma_obj.acc_handle,
3053 &cmd->frame->io.context, cmd->index);
3054 i++;
3055
3056 con_log(CL_DLEVEL3, (CE_NOTE, "[%x]-%x",
3057 cmd->index, cmd->frame_phys_addr));
3058 }
3059
3060 return (DDI_SUCCESS);
3061
3062 mrsas_undo_frame_pool:
3063 if (i > 0)
3064 destroy_mfi_frame_pool(instance);
3065
3066 return (retval);
3067 }
3068
3069 /*
3070 * free_additional_dma_buffer
3071 */
3072 static void
3073 free_additional_dma_buffer(struct mrsas_instance *instance)
3074 {
3075 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
3076 (void) mrsas_free_dma_obj(instance,
3077 instance->mfi_internal_dma_obj);
3078 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
3079 }
3080
3081 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
3082 (void) mrsas_free_dma_obj(instance,
3083 instance->mfi_evt_detail_obj);
3084 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
3085 }
3086 }
3087
3088 /*
3089 * alloc_additional_dma_buffer
3090 */
3091 static int
3092 alloc_additional_dma_buffer(struct mrsas_instance *instance)
3093 {
3094 uint32_t reply_q_sz;
3095 uint32_t internal_buf_size = PAGESIZE*2;
3096
3097 /* max cmds plus 1 + producer & consumer */
3098 reply_q_sz = sizeof (uint32_t) * (instance->max_fw_cmds + 1 + 2);
3099
3100 instance->mfi_internal_dma_obj.size = internal_buf_size;
3101 instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
3102 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3103 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
3104 0xFFFFFFFFU;
3105 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
3106
3107 if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
3108 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3109 cmn_err(CE_WARN,
3110 "mr_sas: could not alloc reply queue");
3111 return (DDI_FAILURE);
3112 }
3113
3114 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
3115
3116 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
3117
3118 instance->producer = (uint32_t *)((unsigned long)
3119 instance->mfi_internal_dma_obj.buffer);
3120 instance->consumer = (uint32_t *)((unsigned long)
3121 instance->mfi_internal_dma_obj.buffer + 4);
3122 instance->reply_queue = (uint32_t *)((unsigned long)
3123 instance->mfi_internal_dma_obj.buffer + 8);
3124 instance->internal_buf = (caddr_t)(((unsigned long)
3125 instance->mfi_internal_dma_obj.buffer) + reply_q_sz + 8);
3126 instance->internal_buf_dmac_add =
3127 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address +
3128 (reply_q_sz + 8);
3129 instance->internal_buf_size = internal_buf_size -
3130 (reply_q_sz + 8);
3131
3132 /* allocate evt_detail */
3133 instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
3134 instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
3135 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3136 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3137 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
3138 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 1;
3139
3140 if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
3141 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3142 cmn_err(CE_WARN, "alloc_additional_dma_buffer: "
3143 "could not allocate data transfer buffer.");
3144 goto mrsas_undo_internal_buff;
3145 }
3146
3147 bzero(instance->mfi_evt_detail_obj.buffer,
3148 sizeof (struct mrsas_evt_detail));
3149
3150 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
3151
3152 return (DDI_SUCCESS);
3153
3154 mrsas_undo_internal_buff:
3155 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
3156 (void) mrsas_free_dma_obj(instance,
3157 instance->mfi_internal_dma_obj);
3158 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
3159 }
3160
3161 return (DDI_FAILURE);
3162 }
3163
3164
3165 void
3166 mrsas_free_cmd_pool(struct mrsas_instance *instance)
3167 {
3168 int i;
3169 uint32_t max_cmd;
3170 size_t sz;
3171
3172 /* already freed */
3173 if (instance->cmd_list == NULL) {
3174 return;
3175 }
3176
3177 max_cmd = instance->max_fw_cmds;
3178
3179 /* size of cmd_list array */
3180 sz = sizeof (struct mrsas_cmd *) * max_cmd;
3181
3182 /* First free each cmd */
3183 for (i = 0; i < max_cmd; i++) {
3184 if (instance->cmd_list[i] != NULL) {
3185 kmem_free(instance->cmd_list[i],
3186 sizeof (struct mrsas_cmd));
3187 }
3188
3189 instance->cmd_list[i] = NULL;
3190 }
3191
3192 /* Now, free cmd_list array */
3193 if (instance->cmd_list != NULL)
3194 kmem_free(instance->cmd_list, sz);
3195
3196 instance->cmd_list = NULL;
3197
3198 INIT_LIST_HEAD(&instance->cmd_pool_list);
3199 INIT_LIST_HEAD(&instance->cmd_pend_list);
3200 if (instance->tbolt) {
3201 INIT_LIST_HEAD(&instance->cmd_app_pool_list);
3202 } else {
3203 INIT_LIST_HEAD(&instance->app_cmd_pool_list);
3204 }
3205
3206 }
3207
3208
3209 /*
3210 * mrsas_alloc_cmd_pool
3211 */
3212 int
3213 mrsas_alloc_cmd_pool(struct mrsas_instance *instance)
3214 {
3215 int i;
3216 int count;
3217 uint32_t max_cmd;
3218 uint32_t reserve_cmd;
3219 size_t sz;
3220
3221 struct mrsas_cmd *cmd;
3222
3223 max_cmd = instance->max_fw_cmds;
3224 con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
3225 "max_cmd %x", max_cmd));
3226
3227
3228 sz = sizeof (struct mrsas_cmd *) * max_cmd;
3229
3230 /*
3231 * instance->cmd_list is an array of struct mrsas_cmd pointers.
3232 * Allocate the dynamic array first and then allocate individual
3233 * commands.
3234 */
3235 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
3236 if (instance->cmd_list == NULL) {
3237 con_log(CL_NONE, (CE_WARN,
3238 "Failed to allocate memory for cmd_list"));
3239 return (DDI_FAILURE);
3240 }
3241
3242 /* create a frame pool and assign one frame to each cmd */
3243 for (count = 0; count < max_cmd; count++) {
3244 instance->cmd_list[count] =
3245 kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP);
3246 if (instance->cmd_list[count] == NULL) {
3247 con_log(CL_NONE, (CE_WARN,
3248 "Failed to allocate memory for mrsas_cmd"));
3249 goto mrsas_undo_cmds;
3250 }
3251 }
3252
3253 /* add all the commands to command pool */
3254
3255 INIT_LIST_HEAD(&instance->cmd_pool_list);
3256 INIT_LIST_HEAD(&instance->cmd_pend_list);
3257 INIT_LIST_HEAD(&instance->app_cmd_pool_list);
3258
3259 reserve_cmd = MRSAS_APP_RESERVED_CMDS;
3260
3261 for (i = 0; i < reserve_cmd; i++) {
3262 cmd = instance->cmd_list[i];
3263 cmd->index = i;
3264 mlist_add_tail(&cmd->list, &instance->app_cmd_pool_list);
3265 }
3266
3267
3268 for (i = reserve_cmd; i < max_cmd; i++) {
3269 cmd = instance->cmd_list[i];
3270 cmd->index = i;
3271 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
3272 }
3273
3274 return (DDI_SUCCESS);
3275
3276 mrsas_undo_cmds:
3277 if (count > 0) {
3278 /* free each cmd */
3279 for (i = 0; i < count; i++) {
3280 if (instance->cmd_list[i] != NULL) {
3281 kmem_free(instance->cmd_list[i],
3282 sizeof (struct mrsas_cmd));
3283 }
3284 instance->cmd_list[i] = NULL;
3285 }
3286 }
3287
3288 mrsas_undo_cmd_list:
3289 if (instance->cmd_list != NULL)
3290 kmem_free(instance->cmd_list, sz);
3291 instance->cmd_list = NULL;
3292
3293 return (DDI_FAILURE);
3294 }
3295
3296
3297 /*
3298 * free_space_for_mfi
3299 */
3300 static void
3301 free_space_for_mfi(struct mrsas_instance *instance)
3302 {
3303
3304 /* already freed */
3305 if (instance->cmd_list == NULL) {
3306 return;
3307 }
3308
3309 /* Free additional dma buffer */
3310 free_additional_dma_buffer(instance);
3311
3312 /* Free the MFI frame pool */
3313 destroy_mfi_frame_pool(instance);
3314
3315 /* Free all the commands in the cmd_list */
3316 /* Free the cmd_list buffer itself */
3317 mrsas_free_cmd_pool(instance);
3318 }
3319
3320 /*
3321 * alloc_space_for_mfi
3322 */
3323 static int
3324 alloc_space_for_mfi(struct mrsas_instance *instance)
3325 {
3326 /* Allocate command pool (memory for cmd_list & individual commands) */
3327 if (mrsas_alloc_cmd_pool(instance)) {
3328 cmn_err(CE_WARN, "error creating cmd pool");
3329 return (DDI_FAILURE);
3330 }
3331
3332 /* Allocate MFI Frame pool */
3333 if (create_mfi_frame_pool(instance)) {
3334 cmn_err(CE_WARN, "error creating frame DMA pool");
3335 goto mfi_undo_cmd_pool;
3336 }
3337
3338 /* Allocate additional DMA buffer */
3339 if (alloc_additional_dma_buffer(instance)) {
3340 cmn_err(CE_WARN, "error creating frame DMA pool");
3341 goto mfi_undo_frame_pool;
3342 }
3343
3344 return (DDI_SUCCESS);
3345
3346 mfi_undo_frame_pool:
3347 destroy_mfi_frame_pool(instance);
3348
3349 mfi_undo_cmd_pool:
3350 mrsas_free_cmd_pool(instance);
3351
3352 return (DDI_FAILURE);
3353 }
3354
3355
3356
3357 /*
3358 * get_ctrl_info
3359 */
3360 static int
3361 get_ctrl_info(struct mrsas_instance *instance,
3362 struct mrsas_ctrl_info *ctrl_info)
3363 {
3364 int ret = 0;
3365
3366 struct mrsas_cmd *cmd;
3367 struct mrsas_dcmd_frame *dcmd;
3368 struct mrsas_ctrl_info *ci;
3369
3370 if (instance->tbolt) {
3371 cmd = get_raid_msg_mfi_pkt(instance);
3372 } else {
3373 cmd = get_mfi_pkt(instance);
3374 }
3375
3376 if (!cmd) {
3377 con_log(CL_ANN, (CE_WARN,
3378 "Failed to get a cmd for ctrl info"));
3379 DTRACE_PROBE2(info_mfi_err, uint16_t, instance->fw_outstanding,
3380 uint16_t, instance->max_fw_cmds);
3381 return (DDI_FAILURE);
3382 }
3383
3384 /* Clear the frame buffer and assign back the context id */
3385 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3386 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3387 cmd->index);
3388
3389 dcmd = &cmd->frame->dcmd;
3390
3391 ci = (struct mrsas_ctrl_info *)instance->internal_buf;
3392
3393 if (!ci) {
3394 cmn_err(CE_WARN,
3395 "Failed to alloc mem for ctrl info");
3396 return_mfi_pkt(instance, cmd);
3397 return (DDI_FAILURE);
3398 }
3399
3400 (void) memset(ci, 0, sizeof (struct mrsas_ctrl_info));
3401
3402 /* for( i = 0; i < DCMD_MBOX_SZ; i++ ) dcmd->mbox.b[i] = 0; */
3403 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
3404
3405 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3406 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status,
3407 MFI_CMD_STATUS_POLL_MODE);
3408 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3409 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3410 MFI_FRAME_DIR_READ);
3411 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3412 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3413 sizeof (struct mrsas_ctrl_info));
3414 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3415 MR_DCMD_CTRL_GET_INFO);
3416 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3417 instance->internal_buf_dmac_add);
3418 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3419 sizeof (struct mrsas_ctrl_info));
3420
3421 cmd->frame_count = 1;
3422
3423 if (instance->tbolt) {
3424 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3425 }
3426
3427 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3428 ret = 0;
3429
3430 ctrl_info->max_request_size = ddi_get32(
3431 cmd->frame_dma_obj.acc_handle, &ci->max_request_size);
3432
3433 ctrl_info->ld_present_count = ddi_get16(
3434 cmd->frame_dma_obj.acc_handle, &ci->ld_present_count);
3435
3436 ctrl_info->properties.on_off_properties = ddi_get32(
3437 cmd->frame_dma_obj.acc_handle,
3438 &ci->properties.on_off_properties);
3439 ddi_rep_get8(cmd->frame_dma_obj.acc_handle,
3440 (uint8_t *)(ctrl_info->product_name),
3441 (uint8_t *)(ci->product_name), 80 * sizeof (char),
3442 DDI_DEV_AUTOINCR);
3443 /* should get more members of ci with ddi_get when needed */
3444 } else {
3445 cmn_err(CE_WARN, "get_ctrl_info: Ctrl info failed");
3446 ret = -1;
3447 }
3448
3449 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) {
3450 ret = -1;
3451 }
3452 if (instance->tbolt) {
3453 return_raid_msg_mfi_pkt(instance, cmd);
3454 } else {
3455 return_mfi_pkt(instance, cmd);
3456 }
3457
3458 return (ret);
3459 }
3460
3461 /*
3462 * abort_aen_cmd
3463 */
3464 static int
3465 abort_aen_cmd(struct mrsas_instance *instance,
3466 struct mrsas_cmd *cmd_to_abort)
3467 {
3468 int ret = 0;
3469
3470 struct mrsas_cmd *cmd;
3471 struct mrsas_abort_frame *abort_fr;
3472
3473 con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_aen:%d", __LINE__));
3474
3475 if (instance->tbolt) {
3476 cmd = get_raid_msg_mfi_pkt(instance);
3477 } else {
3478 cmd = get_mfi_pkt(instance);
3479 }
3480
3481 if (!cmd) {
3482 con_log(CL_ANN1, (CE_WARN,
3483 "abort_aen_cmd():Failed to get a cmd for abort_aen_cmd"));
3484 DTRACE_PROBE2(abort_mfi_err, uint16_t, instance->fw_outstanding,
3485 uint16_t, instance->max_fw_cmds);
3486 return (DDI_FAILURE);
3487 }
3488
3489 /* Clear the frame buffer and assign back the context id */
3490 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3491 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3492 cmd->index);
3493
3494 abort_fr = &cmd->frame->abort;
3495
3496 /* prepare and issue the abort frame */
3497 ddi_put8(cmd->frame_dma_obj.acc_handle,
3498 &abort_fr->cmd, MFI_CMD_OP_ABORT);
3499 ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3500 MFI_CMD_STATUS_SYNC_MODE);
3501 ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3502 ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3503 cmd_to_abort->index);
3504 ddi_put32(cmd->frame_dma_obj.acc_handle,
3505 &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3506 ddi_put32(cmd->frame_dma_obj.acc_handle,
3507 &abort_fr->abort_mfi_phys_addr_hi, 0);
3508
3509 instance->aen_cmd->abort_aen = 1;
3510
3511 /* cmd->sync_cmd = MRSAS_TRUE; */ /* KEBE ASKS, inherit? */
3512 cmd->frame_count = 1;
3513
3514 if (instance->tbolt) {
3515 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3516 }
3517
3518 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3519 con_log(CL_ANN1, (CE_WARN,
3520 "abort_aen_cmd: issue_cmd_in_poll_mode failed"));
3521 ret = -1;
3522 } else {
3523 ret = 0;
3524 }
3525
3526 instance->aen_cmd->abort_aen = 1;
3527 instance->aen_cmd = 0;
3528
3529 if (instance->tbolt) {
3530 return_raid_msg_mfi_pkt(instance, cmd);
3531 } else {
3532 return_mfi_pkt(instance, cmd);
3533 }
3534
3535 atomic_add_16(&instance->fw_outstanding, (-1));
3536
3537 return (ret);
3538 }
3539
3540
3541 static int
3542 mrsas_build_init_cmd(struct mrsas_instance *instance,
3543 struct mrsas_cmd **cmd_ptr)
3544 {
3545 struct mrsas_cmd *cmd;
3546 struct mrsas_init_frame *init_frame;
3547 struct mrsas_init_queue_info *initq_info;
3548 struct mrsas_drv_ver drv_ver_info;
3549
3550
3551 /*
3552 * Prepare a init frame. Note the init frame points to queue info
3553 * structure. Each frame has SGL allocated after first 64 bytes. For
3554 * this frame - since we don't need any SGL - we use SGL's space as
3555 * queue info structure
3556 */
3557 cmd = *cmd_ptr;
3558
3559
3560 /* Clear the frame buffer and assign back the context id */
3561 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3562 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3563 cmd->index);
3564
3565 init_frame = (struct mrsas_init_frame *)cmd->frame;
3566 initq_info = (struct mrsas_init_queue_info *)
3567 ((unsigned long)init_frame + 64);
3568
3569 (void) memset(init_frame, 0, MRMFI_FRAME_SIZE);
3570 (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info));
3571
3572 ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0);
3573
3574 ddi_put32(cmd->frame_dma_obj.acc_handle,
3575 &initq_info->reply_queue_entries, instance->max_fw_cmds + 1);
3576
3577 ddi_put32(cmd->frame_dma_obj.acc_handle,
3578 &initq_info->producer_index_phys_addr_hi, 0);
3579 ddi_put32(cmd->frame_dma_obj.acc_handle,
3580 &initq_info->producer_index_phys_addr_lo,
3581 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address);
3582
3583 ddi_put32(cmd->frame_dma_obj.acc_handle,
3584 &initq_info->consumer_index_phys_addr_hi, 0);
3585 ddi_put32(cmd->frame_dma_obj.acc_handle,
3586 &initq_info->consumer_index_phys_addr_lo,
3587 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4);
3588
3589 ddi_put32(cmd->frame_dma_obj.acc_handle,
3590 &initq_info->reply_queue_start_phys_addr_hi, 0);
3591 ddi_put32(cmd->frame_dma_obj.acc_handle,
3592 &initq_info->reply_queue_start_phys_addr_lo,
3593 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8);
3594
3595 ddi_put8(cmd->frame_dma_obj.acc_handle,
3596 &init_frame->cmd, MFI_CMD_OP_INIT);
3597 ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status,
3598 MFI_CMD_STATUS_POLL_MODE);
3599 ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0);
3600 ddi_put32(cmd->frame_dma_obj.acc_handle,
3601 &init_frame->queue_info_new_phys_addr_lo,
3602 cmd->frame_phys_addr + 64);
3603 ddi_put32(cmd->frame_dma_obj.acc_handle,
3604 &init_frame->queue_info_new_phys_addr_hi, 0);
3605
3606
3607 /* fill driver version information */
3608 fill_up_drv_ver(&drv_ver_info);
3609
3610 /* allocate the driver version data transfer buffer */
3611 instance->drv_ver_dma_obj.size = sizeof (drv_ver_info.drv_ver);
3612 instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
3613 instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3614 instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3615 instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
3616 instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
3617
3618 if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
3619 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3620 con_log(CL_ANN, (CE_WARN,
3621 "init_mfi : Could not allocate driver version buffer."));
3622 return (DDI_FAILURE);
3623 }
3624 /* copy driver version to dma buffer */
3625 (void) memset(instance->drv_ver_dma_obj.buffer, 0,
3626 sizeof (drv_ver_info.drv_ver));
3627 ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
3628 (uint8_t *)drv_ver_info.drv_ver,
3629 (uint8_t *)instance->drv_ver_dma_obj.buffer,
3630 sizeof (drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
3631
3632
3633 /* copy driver version physical address to init frame */
3634 ddi_put64(cmd->frame_dma_obj.acc_handle, &init_frame->driverversion,
3635 instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
3636
3637 ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len,
3638 sizeof (struct mrsas_init_queue_info));
3639
3640 cmd->frame_count = 1;
3641
3642 *cmd_ptr = cmd;
3643
3644 return (DDI_SUCCESS);
3645 }
3646
3647
3648 /*
3649 * mrsas_init_adapter_ppc - Initialize MFI interface adapter.
3650 */
3651 int
3652 mrsas_init_adapter_ppc(struct mrsas_instance *instance)
3653 {
3654 struct mrsas_cmd *cmd;
3655
3656 /*
3657 * allocate memory for mfi adapter(cmd pool, individual commands, mfi
3658 * frames etc
3659 */
3660 if (alloc_space_for_mfi(instance) != DDI_SUCCESS) {
3661 con_log(CL_ANN, (CE_NOTE,
3662 "Error, failed to allocate memory for MFI adapter"));
3663 return (DDI_FAILURE);
3664 }
3665
3666 /* Build INIT command */
3667 cmd = get_mfi_pkt(instance);
3668
3669 if (mrsas_build_init_cmd(instance, &cmd) != DDI_SUCCESS) {
3670 con_log(CL_ANN,
3671 (CE_NOTE, "Error, failed to build INIT command"));
3672
3673 goto fail_undo_alloc_mfi_space;
3674 }
3675
3676 /*
3677 * Disable interrupt before sending init frame ( see linux driver code)
3678 * send INIT MFI frame in polled mode
3679 */
3680 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3681 con_log(CL_ANN, (CE_WARN, "failed to init firmware"));
3682 goto fail_fw_init;
3683 }
3684
3685 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS)
3686 goto fail_fw_init;
3687 /* return_mfi_pkt(instance, cmd); */ /* XXX KEBE ASKS, inherit? */
3688
3689 if (ctio_enable &&
3690 (instance->func_ptr->read_fw_status_reg(instance) & 0x04000000)) {
3691 con_log(CL_ANN, (CE_NOTE, "mr_sas: IEEE SGL's supported"));
3692 instance->flag_ieee = 1;
3693 } else {
3694 instance->flag_ieee = 0;
3695 }
3696
3697 instance->unroll.alloc_space_mfi = 1;
3698 instance->unroll.verBuff = 1;
3699
3700 return (DDI_SUCCESS);
3701
3702
3703 fail_fw_init:
3704 (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
3705
3706 fail_undo_alloc_mfi_space:
3707 return_mfi_pkt(instance, cmd);
3708 free_space_for_mfi(instance);
3709
3710 return (DDI_FAILURE);
3711
3712 }
3713
3714 /*
3715 * mrsas_init_adapter - Initialize adapter.
3716 */
3717 int
3718 mrsas_init_adapter(struct mrsas_instance *instance)
3719 {
3720 struct mrsas_ctrl_info ctrl_info;
3721
3722
3723 /* we expect the FW state to be READY */
3724 if (mfi_state_transition_to_ready(instance)) {
3725 con_log(CL_ANN, (CE_WARN, "mr_sas: F/W is not ready"));
3726 return (DDI_FAILURE);
3727 }
3728
3729 /* get various operational parameters from status register */
3730 instance->max_num_sge =
3731 (instance->func_ptr->read_fw_status_reg(instance) &
3732 0xFF0000) >> 0x10;
3733 instance->max_num_sge =
3734 (instance->max_num_sge > MRSAS_MAX_SGE_CNT) ?
3735 MRSAS_MAX_SGE_CNT : instance->max_num_sge;
3736
3737 /*
3738 * Reduce the max supported cmds by 1. This is to ensure that the
3739 * reply_q_sz (1 more than the max cmd that driver may send)
3740 * does not exceed max cmds that the FW can support
3741 */
3742 instance->max_fw_cmds =
3743 instance->func_ptr->read_fw_status_reg(instance) & 0xFFFF;
3744 instance->max_fw_cmds = instance->max_fw_cmds - 1;
3745
3746
3747
3748 /* Initialize adapter */
3749 if (instance->func_ptr->init_adapter(instance) != DDI_SUCCESS) {
3750 con_log(CL_ANN,
3751 (CE_WARN, "mr_sas: could not initialize adapter"));
3752 return (DDI_FAILURE);
3753 }
3754
3755 /* gather misc FW related information */
3756 instance->disable_online_ctrl_reset = 0;
3757
3758 if (!get_ctrl_info(instance, &ctrl_info)) {
3759 instance->max_sectors_per_req = ctrl_info.max_request_size;
3760 con_log(CL_ANN1, (CE_NOTE,
3761 "product name %s ld present %d",
3762 ctrl_info.product_name, ctrl_info.ld_present_count));
3763 } else {
3764 instance->max_sectors_per_req = instance->max_num_sge *
3765 PAGESIZE / 512;
3766 }
3767
3768 if (ctrl_info.properties.on_off_properties & DISABLE_OCR_PROP_FLAG) {
3769 instance->disable_online_ctrl_reset = 1;
3770 con_log(CL_ANN1,
3771 (CE_NOTE, "Disable online control Flag is set\n"));
3772 } else {
3773 con_log(CL_ANN1,
3774 (CE_NOTE, "Disable online control Flag is not set\n"));
3775 }
3776
3777 return (DDI_SUCCESS);
3778
3779 }
3780
3781
3782
3783 static int
3784 mrsas_issue_init_mfi(struct mrsas_instance *instance)
3785 {
3786 struct mrsas_cmd *cmd;
3787 struct mrsas_init_frame *init_frame;
3788 struct mrsas_init_queue_info *initq_info;
3789
3790 /*
3791 * Prepare a init frame. Note the init frame points to queue info
3792 * structure. Each frame has SGL allocated after first 64 bytes. For
3793 * this frame - since we don't need any SGL - we use SGL's space as
3794 * queue info structure
3795 */
3796 con_log(CL_ANN1, (CE_NOTE,
3797 "mrsas_issue_init_mfi: entry\n"));
3798 cmd = get_mfi_app_pkt(instance);
3799
3800 if (!cmd) {
3801 con_log(CL_ANN1, (CE_WARN,
3802 "mrsas_issue_init_mfi: get_pkt failed\n"));
3803 return (DDI_FAILURE);
3804 }
3805
3806 /* Clear the frame buffer and assign back the context id */
3807 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3808 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3809 cmd->index);
3810
3811 init_frame = (struct mrsas_init_frame *)cmd->frame;
3812 initq_info = (struct mrsas_init_queue_info *)
3813 ((unsigned long)init_frame + 64);
3814
3815 (void) memset(init_frame, 0, MRMFI_FRAME_SIZE);
3816 (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info));
3817
3818 ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0);
3819
3820 ddi_put32(cmd->frame_dma_obj.acc_handle,
3821 &initq_info->reply_queue_entries, instance->max_fw_cmds + 1);
3822 ddi_put32(cmd->frame_dma_obj.acc_handle,
3823 &initq_info->producer_index_phys_addr_hi, 0);
3824 ddi_put32(cmd->frame_dma_obj.acc_handle,
3825 &initq_info->producer_index_phys_addr_lo,
3826 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address);
3827 ddi_put32(cmd->frame_dma_obj.acc_handle,
3828 &initq_info->consumer_index_phys_addr_hi, 0);
3829 ddi_put32(cmd->frame_dma_obj.acc_handle,
3830 &initq_info->consumer_index_phys_addr_lo,
3831 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4);
3832
3833 ddi_put32(cmd->frame_dma_obj.acc_handle,
3834 &initq_info->reply_queue_start_phys_addr_hi, 0);
3835 ddi_put32(cmd->frame_dma_obj.acc_handle,
3836 &initq_info->reply_queue_start_phys_addr_lo,
3837 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8);
3838
3839 ddi_put8(cmd->frame_dma_obj.acc_handle,
3840 &init_frame->cmd, MFI_CMD_OP_INIT);
3841 ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status,
3842 MFI_CMD_STATUS_POLL_MODE);
3843 ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0);
3844 ddi_put32(cmd->frame_dma_obj.acc_handle,
3845 &init_frame->queue_info_new_phys_addr_lo,
3846 cmd->frame_phys_addr + 64);
3847 ddi_put32(cmd->frame_dma_obj.acc_handle,
3848 &init_frame->queue_info_new_phys_addr_hi, 0);
3849
3850 ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len,
3851 sizeof (struct mrsas_init_queue_info));
3852
3853 cmd->frame_count = 1;
3854
3855 /* issue the init frame in polled mode */
3856 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3857 con_log(CL_ANN1, (CE_WARN,
3858 "mrsas_issue_init_mfi():failed to "
3859 "init firmware"));
3860 return_mfi_app_pkt(instance, cmd);
3861 return (DDI_FAILURE);
3862 }
3863
3864 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) {
3865 return_mfi_pkt(instance, cmd);
3866 return (DDI_FAILURE);
3867 }
3868
3869 return_mfi_app_pkt(instance, cmd);
3870 con_log(CL_ANN1, (CE_CONT, "mrsas_issue_init_mfi: Done"));
3871
3872 return (DDI_SUCCESS);
3873 }
3874 /*
3875 * mfi_state_transition_to_ready : Move the FW to READY state
3876 *
3877 * @reg_set : MFI register set
3878 */
3879 int
3880 mfi_state_transition_to_ready(struct mrsas_instance *instance)
3881 {
3882 int i;
3883 uint8_t max_wait;
3884 uint32_t fw_ctrl = 0;
3885 uint32_t fw_state;
3886 uint32_t cur_state;
3887 uint32_t cur_abs_reg_val;
3888 uint32_t prev_abs_reg_val;
3889 uint32_t status;
3890
3891 cur_abs_reg_val =
3892 instance->func_ptr->read_fw_status_reg(instance);
3893 fw_state =
3894 cur_abs_reg_val & MFI_STATE_MASK;
3895 con_log(CL_ANN1, (CE_CONT,
3896 "mfi_state_transition_to_ready:FW state = 0x%x", fw_state));
3897
3898 while (fw_state != MFI_STATE_READY) {
3899 con_log(CL_ANN, (CE_CONT,
3900 "mfi_state_transition_to_ready:FW state%x", fw_state));
3901
3902 switch (fw_state) {
3903 case MFI_STATE_FAULT:
3904 con_log(CL_ANN, (CE_NOTE,
3905 "mr_sas: FW in FAULT state!!"));
3906
3907 return (ENODEV);
3908 case MFI_STATE_WAIT_HANDSHAKE:
3909 /* set the CLR bit in IMR0 */
3910 con_log(CL_ANN1, (CE_NOTE,
3911 "mr_sas: FW waiting for HANDSHAKE"));
3912 /*
3913 * PCI_Hot Plug: MFI F/W requires
3914 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
3915 * to be set
3916 */
3917 /* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */
3918 if (!instance->tbolt) {
3919 WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE |
3920 MFI_INIT_HOTPLUG, instance);
3921 } else {
3922 WR_RESERVED0_REGISTER(MFI_INIT_CLEAR_HANDSHAKE |
3923 MFI_INIT_HOTPLUG, instance);
3924 }
3925 max_wait = (instance->tbolt == 1) ? 180 : 2;
3926 cur_state = MFI_STATE_WAIT_HANDSHAKE;
3927 break;
3928 case MFI_STATE_BOOT_MESSAGE_PENDING:
3929 /* set the CLR bit in IMR0 */
3930 con_log(CL_ANN1, (CE_NOTE,
3931 "mr_sas: FW state boot message pending"));
3932 /*
3933 * PCI_Hot Plug: MFI F/W requires
3934 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
3935 * to be set
3936 */
3937 if (!instance->tbolt) {
3938 WR_IB_DOORBELL(MFI_INIT_HOTPLUG, instance);
3939 } else {
3940 WR_RESERVED0_REGISTER(MFI_INIT_HOTPLUG,
3941 instance);
3942 }
3943 max_wait = (instance->tbolt == 1) ? 180 : 10;
3944 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
3945 break;
3946 case MFI_STATE_OPERATIONAL:
3947 /* bring it to READY state; assuming max wait 2 secs */
3948 instance->func_ptr->disable_intr(instance);
3949 con_log(CL_ANN1, (CE_NOTE,
3950 "mr_sas: FW in OPERATIONAL state"));
3951 /*
3952 * PCI_Hot Plug: MFI F/W requires
3953 * (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT)
3954 * to be set
3955 */
3956 /* WR_IB_DOORBELL(MFI_INIT_READY, instance); */
3957 if (!instance->tbolt) {
3958 WR_IB_DOORBELL(MFI_RESET_FLAGS, instance);
3959 } else {
3960 WR_RESERVED0_REGISTER(MFI_RESET_FLAGS,
3961 instance);
3962
3963 for (i = 0; i < (10 * 1000); i++) {
3964 status =
3965 RD_RESERVED0_REGISTER(instance);
3966 if (status & 1) {
3967 delay(1 *
3968 drv_usectohz(MILLISEC));
3969 } else {
3970 break;
3971 }
3972 }
3973
3974 }
3975 max_wait = (instance->tbolt == 1) ? 180 : 10;
3976 cur_state = MFI_STATE_OPERATIONAL;
3977 break;
3978 case MFI_STATE_UNDEFINED:
3979 /* this state should not last for more than 2 seconds */
3980 con_log(CL_ANN1, (CE_NOTE, "FW state undefined"));
3981
3982 max_wait = (instance->tbolt == 1) ? 180 : 2;
3983 cur_state = MFI_STATE_UNDEFINED;
3984 break;
3985 case MFI_STATE_BB_INIT:
3986 max_wait = (instance->tbolt == 1) ? 180 : 2;
3987 cur_state = MFI_STATE_BB_INIT;
3988 break;
3989 case MFI_STATE_FW_INIT:
3990 max_wait = (instance->tbolt == 1) ? 180 : 2;
3991 cur_state = MFI_STATE_FW_INIT;
3992 break;
3993 case MFI_STATE_FW_INIT_2:
3994 max_wait = 180;
3995 cur_state = MFI_STATE_FW_INIT_2;
3996 break;
3997 case MFI_STATE_DEVICE_SCAN:
3998 max_wait = 180;
3999 cur_state = MFI_STATE_DEVICE_SCAN;
4000 prev_abs_reg_val = cur_abs_reg_val;
4001 con_log(CL_NONE, (CE_NOTE,
4002 "Device scan in progress ...\n"));
4003 break;
4004 case MFI_STATE_FLUSH_CACHE:
4005 max_wait = 180;
4006 cur_state = MFI_STATE_FLUSH_CACHE;
4007 break;
4008 default:
4009 con_log(CL_ANN1, (CE_NOTE,
4010 "mr_sas: Unknown state 0x%x", fw_state));
4011 return (ENODEV);
4012 }
4013
4014 /* the cur_state should not last for more than max_wait secs */
4015 for (i = 0; i < (max_wait * MILLISEC); i++) {
4016 /* fw_state = RD_OB_MSG_0(instance) & MFI_STATE_MASK; */
4017 cur_abs_reg_val =
4018 instance->func_ptr->read_fw_status_reg(instance);
4019 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
4020
4021 if (fw_state == cur_state) {
4022 delay(1 * drv_usectohz(MILLISEC));
4023 } else {
4024 break;
4025 }
4026 }
4027 if (fw_state == MFI_STATE_DEVICE_SCAN) {
4028 if (prev_abs_reg_val != cur_abs_reg_val) {
4029 continue;
4030 }
4031 }
4032
4033 /* return error if fw_state hasn't changed after max_wait */
4034 if (fw_state == cur_state) {
4035 con_log(CL_ANN1, (CE_WARN,
4036 "FW state hasn't changed in %d secs", max_wait));
4037 return (ENODEV);
4038 }
4039 };
4040
4041 if (!instance->tbolt) {
4042 fw_ctrl = RD_IB_DOORBELL(instance);
4043 con_log(CL_ANN1, (CE_CONT,
4044 "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl));
4045 }
4046
4047 #if 0 /* XXX KEBE ASKS, remove and use like pre-2208? */
4048 /*
4049 * Write 0xF to the doorbell register to do the following.
4050 * - Abort all outstanding commands (bit 0).
4051 * - Transition from OPERATIONAL to READY state (bit 1).
4052 * - Discard (possible) low MFA posted in 64-bit mode (bit-2).
4053 * - Set to release FW to continue running (i.e. BIOS handshake
4054 * (bit 3).
4055 */
4056 if (!instance->tbolt) {
4057 WR_IB_DOORBELL(0xF, instance);
4058 }
4059 #endif
4060 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
4061 return (ENODEV);
4062 }
4063
4064 return (DDI_SUCCESS);
4065 }
4066
4067 /*
4068 * get_seq_num
4069 */
4070 static int
4071 get_seq_num(struct mrsas_instance *instance,
4072 struct mrsas_evt_log_info *eli)
4073 {
4074 int ret = DDI_SUCCESS;
4075
4076 dma_obj_t dcmd_dma_obj;
4077 struct mrsas_cmd *cmd;
4078 struct mrsas_dcmd_frame *dcmd;
4079 struct mrsas_evt_log_info *eli_tmp;
4080 if (instance->tbolt) {
4081 cmd = get_raid_msg_mfi_pkt(instance);
4082 } else {
4083 cmd = get_mfi_pkt(instance);
4084 }
4085
4086 if (!cmd) {
4087 cmn_err(CE_WARN, "mr_sas: failed to get a cmd");
4088 DTRACE_PROBE2(seq_num_mfi_err, uint16_t,
4089 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
4090 return (ENOMEM);
4091 }
4092
4093 /* Clear the frame buffer and assign back the context id */
4094 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
4095 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
4096 cmd->index);
4097
4098 dcmd = &cmd->frame->dcmd;
4099
4100 /* allocate the data transfer buffer */
4101 dcmd_dma_obj.size = sizeof (struct mrsas_evt_log_info);
4102 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
4103 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
4104 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
4105 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
4106 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
4107
4108 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
4109 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
4110 cmn_err(CE_WARN,
4111 "get_seq_num: could not allocate data transfer buffer.");
4112 return (DDI_FAILURE);
4113 }
4114
4115 (void) memset(dcmd_dma_obj.buffer, 0,
4116 sizeof (struct mrsas_evt_log_info));
4117
4118 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
4119
4120 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
4121 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
4122 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
4123 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
4124 MFI_FRAME_DIR_READ);
4125 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
4126 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
4127 sizeof (struct mrsas_evt_log_info));
4128 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
4129 MR_DCMD_CTRL_EVENT_GET_INFO);
4130 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
4131 sizeof (struct mrsas_evt_log_info));
4132 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
4133 dcmd_dma_obj.dma_cookie[0].dmac_address);
4134
4135 cmd->sync_cmd = MRSAS_TRUE;
4136 cmd->frame_count = 1;
4137
4138 if (instance->tbolt) {
4139 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
4140 }
4141
4142 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
4143 cmn_err(CE_WARN, "get_seq_num: "
4144 "failed to issue MRSAS_DCMD_CTRL_EVENT_GET_INFO");
4145 ret = DDI_FAILURE;
4146 } else {
4147 eli_tmp = (struct mrsas_evt_log_info *)dcmd_dma_obj.buffer;
4148 eli->newest_seq_num = ddi_get32(cmd->frame_dma_obj.acc_handle,
4149 &eli_tmp->newest_seq_num);
4150 ret = DDI_SUCCESS;
4151 }
4152
4153 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
4154 ret = DDI_FAILURE;
4155
4156 if (instance->tbolt) {
4157 return_raid_msg_mfi_pkt(instance, cmd);
4158 } else {
4159 return_mfi_pkt(instance, cmd);
4160 }
4161
4162 return (ret);
4163 }
4164
4165 /*
4166 * start_mfi_aen
4167 */
4168 static int
4169 start_mfi_aen(struct mrsas_instance *instance)
4170 {
4171 int ret = 0;
4172
4173 struct mrsas_evt_log_info eli;
4174 union mrsas_evt_class_locale class_locale;
4175
4176 /* get the latest sequence number from FW */
4177 (void) memset(&eli, 0, sizeof (struct mrsas_evt_log_info));
4178
4179 if (get_seq_num(instance, &eli)) {
4180 cmn_err(CE_WARN, "start_mfi_aen: failed to get seq num");
4181 return (-1);
4182 }
4183
4184 /* register AEN with FW for latest sequence number plus 1 */
4185 class_locale.members.reserved = 0;
4186 class_locale.members.locale = LE_16(MR_EVT_LOCALE_ALL);
4187 class_locale.members.class = MR_EVT_CLASS_INFO;
4188 class_locale.word = LE_32(class_locale.word);
4189 ret = register_mfi_aen(instance, eli.newest_seq_num + 1,
4190 class_locale.word);
4191
4192 if (ret) {
4193 cmn_err(CE_WARN, "start_mfi_aen: aen registration failed");
4194 return (-1);
4195 }
4196
4197
4198 return (ret);
4199 }
4200
4201 /*
4202 * flush_cache
4203 */
4204 static void
4205 flush_cache(struct mrsas_instance *instance)
4206 {
4207 struct mrsas_cmd *cmd = NULL;
4208 struct mrsas_dcmd_frame *dcmd;
4209 if (instance->tbolt) {
4210 cmd = get_raid_msg_mfi_pkt(instance);
4211 } else {
4212 cmd = get_mfi_pkt(instance);
4213 }
4214
4215 if (!cmd) {
4216 con_log(CL_ANN1, (CE_WARN,
4217 "flush_cache():Failed to get a cmd for flush_cache"));
4218 DTRACE_PROBE2(flush_cache_err, uint16_t,
4219 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
4220 return;
4221 }
4222
4223 /* Clear the frame buffer and assign back the context id */
4224 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
4225 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
4226 cmd->index);
4227
4228 dcmd = &cmd->frame->dcmd;
4229
4230 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
4231
4232 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
4233 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0);
4234 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 0);
4235 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
4236 MFI_FRAME_DIR_NONE);
4237 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
4238 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 0);
4239 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
4240 MR_DCMD_CTRL_CACHE_FLUSH);
4241 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.b[0],
4242 MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE);
4243
4244 cmd->frame_count = 1;
4245
4246 if (instance->tbolt) {
4247 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
4248 }
4249
4250 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
4251 con_log(CL_ANN1, (CE_WARN,
4252 "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH"));
4253 }
4254 con_log(CL_ANN1, (CE_CONT, "flush_cache done"));
4255 if (instance->tbolt) {
4256 return_raid_msg_mfi_pkt(instance, cmd);
4257 } else {
4258 return_mfi_pkt(instance, cmd);
4259 }
4260
4261 }
4262
4263 /*
4264 * service_mfi_aen- Completes an AEN command
4265 * @instance: Adapter soft state
4266 * @cmd: Command to be completed
4267 *
4268 */
4269 void
4270 service_mfi_aen(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
4271 {
4272 uint32_t seq_num;
4273 struct mrsas_evt_detail *evt_detail =
4274 (struct mrsas_evt_detail *)instance->mfi_evt_detail_obj.buffer;
4275 int rval = 0;
4276 int tgt = 0;
4277 uint8_t dtype;
4278 #ifdef PDSUPPORT
4279 mrsas_pd_address_t *pd_addr;
4280 #endif
4281 ddi_acc_handle_t acc_handle;
4282
4283 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
4284
4285 acc_handle = cmd->frame_dma_obj.acc_handle;
4286 cmd->cmd_status = ddi_get8(acc_handle, &cmd->frame->io.cmd_status);
4287 if (cmd->cmd_status == ENODATA) {
4288 cmd->cmd_status = 0;
4289 }
4290
4291 /*
4292 * log the MFI AEN event to the sysevent queue so that
4293 * application will get noticed
4294 */
4295 if (ddi_log_sysevent(instance->dip, DDI_VENDOR_LSI, "LSIMEGA", "SAS",
4296 NULL, NULL, DDI_NOSLEEP) != DDI_SUCCESS) {
4297 int instance_no = ddi_get_instance(instance->dip);
4298 con_log(CL_ANN, (CE_WARN,
4299 "mr_sas%d: Failed to log AEN event", instance_no));
4300 }
4301 /*
4302 * Check for any ld devices that has changed state. i.e. online
4303 * or offline.
4304 */
4305 con_log(CL_ANN1, (CE_CONT,
4306 "AEN: code = %x class = %x locale = %x args = %x",
4307 ddi_get32(acc_handle, &evt_detail->code),
4308 evt_detail->cl.members.class,
4309 ddi_get16(acc_handle, &evt_detail->cl.members.locale),
4310 ddi_get8(acc_handle, &evt_detail->arg_type)));
4311
4312 switch (ddi_get32(acc_handle, &evt_detail->code)) {
4313 case MR_EVT_CFG_CLEARED: {
4314 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) {
4315 if (instance->mr_ld_list[tgt].dip != NULL) {
4316 mutex_enter(&instance->config_dev_mtx);
4317 instance->mr_ld_list[tgt].flag =
4318 (uint8_t)~MRDRV_TGT_VALID;
4319 mutex_exit(&instance->config_dev_mtx);
4320 rval = mrsas_service_evt(instance, tgt, 0,
4321 MRSAS_EVT_UNCONFIG_TGT, NULL);
4322 con_log(CL_ANN1, (CE_WARN,
4323 "mr_sas: CFG CLEARED AEN rval = %d "
4324 "tgt id = %d", rval, tgt));
4325 }
4326 }
4327 break;
4328 }
4329
4330 case MR_EVT_LD_DELETED: {
4331 tgt = ddi_get16(acc_handle, &evt_detail->args.ld.target_id);
4332 mutex_enter(&instance->config_dev_mtx);
4333 instance->mr_ld_list[tgt].flag = (uint8_t)~MRDRV_TGT_VALID;
4334 mutex_exit(&instance->config_dev_mtx);
4335 rval = mrsas_service_evt(instance,
4336 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0,
4337 MRSAS_EVT_UNCONFIG_TGT, NULL);
4338 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD DELETED AEN rval = %d "
4339 "tgt id = %d index = %d", rval,
4340 ddi_get16(acc_handle, &evt_detail->args.ld.target_id),
4341 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index)));
4342 break;
4343 } /* End of MR_EVT_LD_DELETED */
4344
4345 case MR_EVT_LD_CREATED: {
4346 rval = mrsas_service_evt(instance,
4347 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0,
4348 MRSAS_EVT_CONFIG_TGT, NULL);
4349 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD CREATED AEN rval = %d "
4350 "tgt id = %d index = %d", rval,
4351 ddi_get16(acc_handle, &evt_detail->args.ld.target_id),
4352 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index)));
4353 break;
4354 } /* End of MR_EVT_LD_CREATED */
4355
4356 #ifdef PDSUPPORT
4357 case MR_EVT_PD_REMOVED_EXT: {
4358 if (instance->tbolt) {
4359 pd_addr = &evt_detail->args.pd_addr;
4360 dtype = pd_addr->scsi_dev_type;
4361 con_log(CL_DLEVEL1, (CE_NOTE,
4362 " MR_EVT_PD_REMOVED_EXT: dtype = %x,"
4363 " arg_type = %d ", dtype, evt_detail->arg_type));
4364 tgt = ddi_get16(acc_handle,
4365 &evt_detail->args.pd.device_id);
4366 mutex_enter(&instance->config_dev_mtx);
4367 instance->mr_tbolt_pd_list[tgt].flag =
4368 (uint8_t)~MRDRV_TGT_VALID;
4369 mutex_exit(&instance->config_dev_mtx);
4370 rval = mrsas_service_evt(instance, ddi_get16(
4371 acc_handle, &evt_detail->args.pd.device_id),
4372 1, MRSAS_EVT_UNCONFIG_TGT, NULL);
4373 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_REMOVED:"
4374 "rval = %d tgt id = %d ", rval,
4375 ddi_get16(acc_handle,
4376 &evt_detail->args.pd.device_id)));
4377 }
4378 break;
4379 } /* End of MR_EVT_PD_REMOVED_EXT */
4380
4381 case MR_EVT_PD_INSERTED_EXT: {
4382 if (instance->tbolt) {
4383 rval = mrsas_service_evt(instance,
4384 ddi_get16(acc_handle,
4385 &evt_detail->args.pd.device_id),
4386 1, MRSAS_EVT_CONFIG_TGT, NULL);
4387 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_INSERTEDi_EXT:"
4388 "rval = %d tgt id = %d ", rval,
4389 ddi_get16(acc_handle,
4390 &evt_detail->args.pd.device_id)));
4391 }
4392 break;
4393 } /* End of MR_EVT_PD_INSERTED_EXT */
4394
4395 case MR_EVT_PD_STATE_CHANGE: {
4396 if (instance->tbolt) {
4397 tgt = ddi_get16(acc_handle,
4398 &evt_detail->args.pd.device_id);
4399 if ((evt_detail->args.pd_state.prevState ==
4400 PD_SYSTEM) &&
4401 (evt_detail->args.pd_state.newState != PD_SYSTEM)) {
4402 mutex_enter(&instance->config_dev_mtx);
4403 instance->mr_tbolt_pd_list[tgt].flag =
4404 (uint8_t)~MRDRV_TGT_VALID;
4405 mutex_exit(&instance->config_dev_mtx);
4406 rval = mrsas_service_evt(instance,
4407 ddi_get16(acc_handle,
4408 &evt_detail->args.pd.device_id),
4409 1, MRSAS_EVT_UNCONFIG_TGT, NULL);
4410 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_REMOVED:"
4411 "rval = %d tgt id = %d ", rval,
4412 ddi_get16(acc_handle,
4413 &evt_detail->args.pd.device_id)));
4414 break;
4415 }
4416 if ((evt_detail->args.pd_state.prevState
4417 == UNCONFIGURED_GOOD) &&
4418 (evt_detail->args.pd_state.newState == PD_SYSTEM)) {
4419 rval = mrsas_service_evt(instance,
4420 ddi_get16(acc_handle,
4421 &evt_detail->args.pd.device_id),
4422 1, MRSAS_EVT_CONFIG_TGT, NULL);
4423 con_log(CL_ANN1, (CE_WARN,
4424 "mr_sas: PD_INSERTED: rval = %d "
4425 " tgt id = %d ", rval,
4426 ddi_get16(acc_handle,
4427 &evt_detail->args.pd.device_id)));
4428 break;
4429 }
4430 }
4431 break;
4432 }
4433 #endif
4434
4435 } /* End of Main Switch */
4436
4437 /* get copy of seq_num and class/locale for re-registration */
4438 seq_num = ddi_get32(acc_handle, &evt_detail->seq_num);
4439 seq_num++;
4440 (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
4441 sizeof (struct mrsas_evt_detail));
4442
4443 ddi_put8(acc_handle, &cmd->frame->dcmd.cmd_status, 0x0);
4444 ddi_put32(acc_handle, &cmd->frame->dcmd.mbox.w[0], seq_num);
4445
4446 instance->aen_seq_num = seq_num;
4447
4448 cmd->frame_count = 1;
4449
4450 cmd->retry_count_for_ocr = 0;
4451 cmd->drv_pkt_time = 0;
4452
4453 /* Issue the aen registration frame */
4454 instance->func_ptr->issue_cmd(cmd, instance);
4455 }
4456
4457 /*
4458 * complete_cmd_in_sync_mode - Completes an internal command
4459 * @instance: Adapter soft state
4460 * @cmd: Command to be completed
4461 *
4462 * The issue_cmd_in_sync_mode() function waits for a command to complete
4463 * after it issues a command. This function wakes up that waiting routine by
4464 * calling wake_up() on the wait queue.
4465 */
4466 static void
4467 complete_cmd_in_sync_mode(struct mrsas_instance *instance,
4468 struct mrsas_cmd *cmd)
4469 {
4470 cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle,
4471 &cmd->frame->io.cmd_status);
4472
4473 cmd->sync_cmd = MRSAS_FALSE;
4474
4475 con_log(CL_ANN1, (CE_NOTE, "complete_cmd_in_sync_mode called %p \n",
4476 (void *)cmd));
4477
4478 mutex_enter(&instance->int_cmd_mtx);
4479 if (cmd->cmd_status == ENODATA) {
4480 cmd->cmd_status = 0;
4481 }
4482 cv_broadcast(&instance->int_cmd_cv);
4483 mutex_exit(&instance->int_cmd_mtx);
4484
4485 }
4486
4487 /*
4488 * Call this function inside mrsas_softintr.
4489 * mrsas_initiate_ocr_if_fw_is_faulty - Initiates OCR if FW status is faulty
4490 * @instance: Adapter soft state
4491 */
4492
4493 static uint32_t
4494 mrsas_initiate_ocr_if_fw_is_faulty(struct mrsas_instance *instance)
4495 {
4496 uint32_t cur_abs_reg_val;
4497 uint32_t fw_state;
4498
4499 cur_abs_reg_val = instance->func_ptr->read_fw_status_reg(instance);
4500 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
4501 if (fw_state == MFI_STATE_FAULT) {
4502 if (instance->disable_online_ctrl_reset == 1) {
4503 cmn_err(CE_WARN,
4504 "mrsas_initiate_ocr_if_fw_is_faulty: "
4505 "FW in Fault state, detected in ISR: "
4506 "FW doesn't support ocr ");
4507
4508 return (ADAPTER_RESET_NOT_REQUIRED);
4509 } else {
4510 con_log(CL_ANN, (CE_NOTE,
4511 "mrsas_initiate_ocr_if_fw_is_faulty: FW in Fault "
4512 "state, detected in ISR: FW supports ocr "));
4513
4514 return (ADAPTER_RESET_REQUIRED);
4515 }
4516 }
4517
4518 return (ADAPTER_RESET_NOT_REQUIRED);
4519 }
4520
4521 /*
4522 * mrsas_softintr - The Software ISR
4523 * @param arg : HBA soft state
4524 *
4525 * called from high-level interrupt if hi-level interrupt are not there,
4526 * otherwise triggered as a soft interrupt
4527 */
4528 static uint_t
4529 mrsas_softintr(struct mrsas_instance *instance)
4530 {
4531 struct scsi_pkt *pkt;
4532 struct scsa_cmd *acmd;
4533 struct mrsas_cmd *cmd;
4534 struct mlist_head *pos, *next;
4535 mlist_t process_list;
4536 struct mrsas_header *hdr;
4537 struct scsi_arq_status *arqstat;
4538
4539 con_log(CL_ANN1, (CE_NOTE, "mrsas_softintr() called."));
4540
4541 ASSERT(instance);
4542
4543 mutex_enter(&instance->completed_pool_mtx);
4544
4545 if (mlist_empty(&instance->completed_pool_list)) {
4546 mutex_exit(&instance->completed_pool_mtx);
4547 return (DDI_INTR_CLAIMED);
4548 }
4549
4550 instance->softint_running = 1;
4551
4552 INIT_LIST_HEAD(&process_list);
4553 mlist_splice(&instance->completed_pool_list, &process_list);
4554 INIT_LIST_HEAD(&instance->completed_pool_list);
4555
4556 mutex_exit(&instance->completed_pool_mtx);
4557
4558 /* perform all callbacks first, before releasing the SCBs */
4559 mlist_for_each_safe(pos, next, &process_list) {
4560 cmd = mlist_entry(pos, struct mrsas_cmd, list);
4561
4562 /* syncronize the Cmd frame for the controller */
4563 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle,
4564 0, 0, DDI_DMA_SYNC_FORCPU);
4565
4566 if (mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) !=
4567 DDI_SUCCESS) {
4568 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
4569 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4570 con_log(CL_ANN1, (CE_WARN,
4571 "mrsas_softintr: "
4572 "FMA check reports DMA handle failure"));
4573 return (DDI_INTR_CLAIMED);
4574 }
4575
4576 hdr = &cmd->frame->hdr;
4577
4578 /* remove the internal command from the process list */
4579 mlist_del_init(&cmd->list);
4580
4581 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) {
4582 case MFI_CMD_OP_PD_SCSI:
4583 case MFI_CMD_OP_LD_SCSI:
4584 case MFI_CMD_OP_LD_READ:
4585 case MFI_CMD_OP_LD_WRITE:
4586 /*
4587 * MFI_CMD_OP_PD_SCSI and MFI_CMD_OP_LD_SCSI
4588 * could have been issued either through an
4589 * IO path or an IOCTL path. If it was via IOCTL,
4590 * we will send it to internal completion.
4591 */
4592 if (cmd->sync_cmd == MRSAS_TRUE) {
4593 complete_cmd_in_sync_mode(instance, cmd);
4594 break;
4595 }
4596
4597 /* regular commands */
4598 acmd = cmd->cmd;
4599 pkt = CMD2PKT(acmd);
4600
4601 if (acmd->cmd_flags & CFLAG_DMAVALID) {
4602 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
4603 (void) ddi_dma_sync(acmd->cmd_dmahandle,
4604 acmd->cmd_dma_offset,
4605 acmd->cmd_dma_len,
4606 DDI_DMA_SYNC_FORCPU);
4607 }
4608 }
4609
4610 pkt->pkt_reason = CMD_CMPLT;
4611 pkt->pkt_statistics = 0;
4612 pkt->pkt_state = STATE_GOT_BUS
4613 | STATE_GOT_TARGET | STATE_SENT_CMD
4614 | STATE_XFERRED_DATA | STATE_GOT_STATUS;
4615
4616 con_log(CL_ANN, (CE_CONT,
4617 "CDB[0] = %x completed for %s: size %lx context %x",
4618 pkt->pkt_cdbp[0], ((acmd->islogical) ? "LD" : "PD"),
4619 acmd->cmd_dmacount, hdr->context));
4620 DTRACE_PROBE3(softintr_cdb, uint8_t, pkt->pkt_cdbp[0],
4621 uint_t, acmd->cmd_cdblen, ulong_t,
4622 acmd->cmd_dmacount);
4623
4624 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
4625 struct scsi_inquiry *inq;
4626
4627 if (acmd->cmd_dmacount != 0) {
4628 bp_mapin(acmd->cmd_buf);
4629 inq = (struct scsi_inquiry *)
4630 acmd->cmd_buf->b_un.b_addr;
4631
4632 /* don't expose physical drives to OS */
4633 if (acmd->islogical &&
4634 (hdr->cmd_status == MFI_STAT_OK)) {
4635 display_scsi_inquiry(
4636 (caddr_t)inq);
4637 } else if ((hdr->cmd_status ==
4638 MFI_STAT_OK) && inq->inq_dtype ==
4639 DTYPE_DIRECT) {
4640
4641 display_scsi_inquiry(
4642 (caddr_t)inq);
4643
4644 /* for physical disk */
4645 hdr->cmd_status =
4646 MFI_STAT_DEVICE_NOT_FOUND;
4647 }
4648 }
4649 }
4650
4651 DTRACE_PROBE2(softintr_done, uint8_t, hdr->cmd,
4652 uint8_t, hdr->cmd_status);
4653
4654 switch (hdr->cmd_status) {
4655 case MFI_STAT_OK:
4656 pkt->pkt_scbp[0] = STATUS_GOOD;
4657 break;
4658 case MFI_STAT_LD_CC_IN_PROGRESS:
4659 case MFI_STAT_LD_RECON_IN_PROGRESS:
4660 pkt->pkt_scbp[0] = STATUS_GOOD;
4661 break;
4662 case MFI_STAT_LD_INIT_IN_PROGRESS:
4663 con_log(CL_ANN,
4664 (CE_WARN, "Initialization in Progress"));
4665 pkt->pkt_reason = CMD_TRAN_ERR;
4666
4667 break;
4668 case MFI_STAT_SCSI_DONE_WITH_ERROR:
4669 con_log(CL_ANN, (CE_CONT, "scsi_done error"));
4670
4671 pkt->pkt_reason = CMD_CMPLT;
4672 ((struct scsi_status *)
4673 pkt->pkt_scbp)->sts_chk = 1;
4674
4675 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
4676 con_log(CL_ANN,
4677 (CE_WARN, "TEST_UNIT_READY fail"));
4678 } else {
4679 pkt->pkt_state |= STATE_ARQ_DONE;
4680 arqstat = (void *)(pkt->pkt_scbp);
4681 arqstat->sts_rqpkt_reason = CMD_CMPLT;
4682 arqstat->sts_rqpkt_resid = 0;
4683 arqstat->sts_rqpkt_state |=
4684 STATE_GOT_BUS | STATE_GOT_TARGET
4685 | STATE_SENT_CMD
4686 | STATE_XFERRED_DATA;
4687 *(uint8_t *)&arqstat->sts_rqpkt_status =
4688 STATUS_GOOD;
4689 ddi_rep_get8(
4690 cmd->frame_dma_obj.acc_handle,
4691 (uint8_t *)
4692 &(arqstat->sts_sensedata),
4693 cmd->sense,
4694 sizeof (struct scsi_extended_sense),
4695 DDI_DEV_AUTOINCR);
4696 }
4697 break;
4698 case MFI_STAT_LD_OFFLINE:
4699 case MFI_STAT_DEVICE_NOT_FOUND:
4700 con_log(CL_ANN, (CE_CONT,
4701 "mrsas_softintr:device not found error"));
4702 pkt->pkt_reason = CMD_DEV_GONE;
4703 pkt->pkt_statistics = STAT_DISCON;
4704 break;
4705 case MFI_STAT_LD_LBA_OUT_OF_RANGE:
4706 pkt->pkt_state |= STATE_ARQ_DONE;
4707 pkt->pkt_reason = CMD_CMPLT;
4708 ((struct scsi_status *)
4709 pkt->pkt_scbp)->sts_chk = 1;
4710
4711 arqstat = (void *)(pkt->pkt_scbp);
4712 arqstat->sts_rqpkt_reason = CMD_CMPLT;
4713 arqstat->sts_rqpkt_resid = 0;
4714 arqstat->sts_rqpkt_state |= STATE_GOT_BUS
4715 | STATE_GOT_TARGET | STATE_SENT_CMD
4716 | STATE_XFERRED_DATA;
4717 *(uint8_t *)&arqstat->sts_rqpkt_status =
4718 STATUS_GOOD;
4719
4720 arqstat->sts_sensedata.es_valid = 1;
4721 arqstat->sts_sensedata.es_key =
4722 KEY_ILLEGAL_REQUEST;
4723 arqstat->sts_sensedata.es_class =
4724 CLASS_EXTENDED_SENSE;
4725
4726 /*
4727 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
4728 * ASC: 0x21h; ASCQ: 0x00h;
4729 */
4730 arqstat->sts_sensedata.es_add_code = 0x21;
4731 arqstat->sts_sensedata.es_qual_code = 0x00;
4732
4733 break;
4734
4735 default:
4736 con_log(CL_ANN, (CE_CONT, "Unknown status!"));
4737 pkt->pkt_reason = CMD_TRAN_ERR;
4738
4739 break;
4740 }
4741
4742 atomic_add_16(&instance->fw_outstanding, (-1));
4743
4744 (void) mrsas_common_check(instance, cmd);
4745
4746 if (acmd->cmd_dmahandle) {
4747 if (mrsas_check_dma_handle(
4748 acmd->cmd_dmahandle) != DDI_SUCCESS) {
4749 ddi_fm_service_impact(instance->dip,
4750 DDI_SERVICE_UNAFFECTED);
4751 pkt->pkt_reason = CMD_TRAN_ERR;
4752 pkt->pkt_statistics = 0;
4753 }
4754 }
4755
4756 /* Call the callback routine */
4757 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
4758 pkt->pkt_comp) {
4759
4760 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_softintr: "
4761 "posting to scsa cmd %p index %x pkt %p "
4762 "time %llx", (void *)cmd, cmd->index,
4763 (void *)pkt, gethrtime()));
4764 (*pkt->pkt_comp)(pkt);
4765
4766 }
4767
4768 return_mfi_pkt(instance, cmd);
4769 break;
4770
4771 case MFI_CMD_OP_SMP:
4772 case MFI_CMD_OP_STP:
4773 complete_cmd_in_sync_mode(instance, cmd);
4774 break;
4775
4776 case MFI_CMD_OP_DCMD:
4777 /* see if got an event notification */
4778 if (ddi_get32(cmd->frame_dma_obj.acc_handle,
4779 &cmd->frame->dcmd.opcode) ==
4780 MR_DCMD_CTRL_EVENT_WAIT) {
4781 if ((instance->aen_cmd == cmd) &&
4782 (instance->aen_cmd->abort_aen)) {
4783 con_log(CL_ANN, (CE_WARN,
4784 "mrsas_softintr: "
4785 "aborted_aen returned"));
4786 } else {
4787 atomic_add_16(&instance->fw_outstanding,
4788 (-1));
4789 service_mfi_aen(instance, cmd);
4790 }
4791 } else {
4792 complete_cmd_in_sync_mode(instance, cmd);
4793 }
4794
4795 break;
4796
4797 case MFI_CMD_OP_ABORT:
4798 con_log(CL_ANN, (CE_NOTE, "MFI_CMD_OP_ABORT complete"));
4799 /*
4800 * MFI_CMD_OP_ABORT successfully completed
4801 * in the synchronous mode
4802 */
4803 complete_cmd_in_sync_mode(instance, cmd);
4804 break;
4805
4806 default:
4807 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
4808 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4809
4810 if (cmd->pkt != NULL) {
4811 pkt = cmd->pkt;
4812 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
4813 pkt->pkt_comp) {
4814
4815 con_log(CL_ANN1, (CE_CONT, "posting to "
4816 "scsa cmd %p index %x pkt %p"
4817 "time %llx, default ", (void *)cmd,
4818 cmd->index, (void *)pkt,
4819 gethrtime()));
4820
4821 (*pkt->pkt_comp)(pkt);
4822
4823 }
4824 }
4825 con_log(CL_ANN, (CE_WARN, "Cmd type unknown !"));
4826 break;
4827 }
4828 }
4829
4830 instance->softint_running = 0;
4831
4832 return (DDI_INTR_CLAIMED);
4833 }
4834
4835 /*
4836 * mrsas_alloc_dma_obj
4837 *
4838 * Allocate the memory and other resources for an dma object.
4839 */
4840 int
4841 mrsas_alloc_dma_obj(struct mrsas_instance *instance, dma_obj_t *obj,
4842 uchar_t endian_flags)
4843 {
4844 int i;
4845 size_t alen = 0;
4846 uint_t cookie_cnt;
4847 struct ddi_device_acc_attr tmp_endian_attr;
4848
4849 tmp_endian_attr = endian_attr;
4850 tmp_endian_attr.devacc_attr_endian_flags = endian_flags;
4851 tmp_endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
4852
4853 i = ddi_dma_alloc_handle(instance->dip, &obj->dma_attr,
4854 DDI_DMA_SLEEP, NULL, &obj->dma_handle);
4855 if (i != DDI_SUCCESS) {
4856
4857 switch (i) {
4858 case DDI_DMA_BADATTR :
4859 con_log(CL_ANN, (CE_WARN,
4860 "Failed ddi_dma_alloc_handle- Bad attribute"));
4861 break;
4862 case DDI_DMA_NORESOURCES :
4863 con_log(CL_ANN, (CE_WARN,
4864 "Failed ddi_dma_alloc_handle- No Resources"));
4865 break;
4866 default :
4867 con_log(CL_ANN, (CE_WARN,
4868 "Failed ddi_dma_alloc_handle: "
4869 "unknown status %d", i));
4870 break;
4871 }
4872
4873 return (-1);
4874 }
4875
4876 if ((ddi_dma_mem_alloc(obj->dma_handle, obj->size, &tmp_endian_attr,
4877 DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
4878 &obj->buffer, &alen, &obj->acc_handle) != DDI_SUCCESS) ||
4879 alen < obj->size) {
4880
4881 ddi_dma_free_handle(&obj->dma_handle);
4882
4883 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc"));
4884
4885 return (-1);
4886 }
4887 if (obj->dma_handle == NULL) {
4888 /* XXX KEBE ASKS --> fm_service_impact()? */
4889 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc"));
4890 return (-1);
4891 }
4892
4893 if (ddi_dma_addr_bind_handle(obj->dma_handle, NULL, obj->buffer,
4894 obj->size, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
4895 NULL, &obj->dma_cookie[0], &cookie_cnt) != DDI_SUCCESS) {
4896
4897 ddi_dma_mem_free(&obj->acc_handle);
4898 ddi_dma_free_handle(&obj->dma_handle);
4899
4900 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle"));
4901
4902 return (-1);
4903 }
4904 if (obj->acc_handle == NULL) {
4905 /* XXX KEBE ASKS --> fm_service_impact()? */
4906 ddi_dma_mem_free(&obj->acc_handle);
4907 ddi_dma_free_handle(&obj->dma_handle);
4908
4909 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle"));
4910 return (-1);
4911 }
4912
4913 if (mrsas_check_dma_handle(obj->dma_handle) != DDI_SUCCESS) {
4914 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4915 return (-1);
4916 }
4917
4918 if (mrsas_check_acc_handle(obj->acc_handle) != DDI_SUCCESS) {
4919 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4920 return (-1);
4921 }
4922
4923 return (cookie_cnt);
4924 }
4925
4926 /*
4927 * mrsas_free_dma_obj(struct mrsas_instance *, dma_obj_t)
4928 *
4929 * De-allocate the memory and other resources for an dma object, which must
4930 * have been alloated by a previous call to mrsas_alloc_dma_obj()
4931 */
4932 /* ARGSUSED */
4933 int
4934 mrsas_free_dma_obj(struct mrsas_instance *instance, dma_obj_t obj)
4935 {
4936
4937 if ((obj.dma_handle == NULL) || (obj.acc_handle == NULL)) {
4938 return (DDI_SUCCESS);
4939 }
4940
4941 /*
4942 * NOTE: These check-handle functions fail if *_handle == NULL, but
4943 * this function succeeds because of the previous check.
4944 */
4945 if (mrsas_check_dma_handle(obj.dma_handle) != DDI_SUCCESS) {
4946 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4947 return (DDI_FAILURE);
4948 }
4949
4950 if (mrsas_check_acc_handle(obj.acc_handle) != DDI_SUCCESS) {
4951 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4952 return (DDI_FAILURE);
4953 }
4954
4955 (void) ddi_dma_unbind_handle(obj.dma_handle);
4956 ddi_dma_mem_free(&obj.acc_handle);
4957 ddi_dma_free_handle(&obj.dma_handle);
4958 obj.acc_handle = NULL;
4959 return (DDI_SUCCESS);
4960 }
4961
4962 /*
4963 * mrsas_dma_alloc(instance_t *, struct scsi_pkt *, struct buf *,
4964 * int, int (*)())
4965 *
4966 * Allocate dma resources for a new scsi command
4967 */
4968 int
4969 mrsas_dma_alloc(struct mrsas_instance *instance, struct scsi_pkt *pkt,
4970 struct buf *bp, int flags, int (*callback)())
4971 {
4972 int dma_flags;
4973 int (*cb)(caddr_t);
4974 int i;
4975
4976 ddi_dma_attr_t tmp_dma_attr = mrsas_generic_dma_attr;
4977 struct scsa_cmd *acmd = PKT2CMD(pkt);
4978
4979 acmd->cmd_buf = bp;
4980
4981 if (bp->b_flags & B_READ) {
4982 acmd->cmd_flags &= ~CFLAG_DMASEND;
4983 dma_flags = DDI_DMA_READ;
4984 } else {
4985 acmd->cmd_flags |= CFLAG_DMASEND;
4986 dma_flags = DDI_DMA_WRITE;
4987 }
4988
4989 if (flags & PKT_CONSISTENT) {
4990 acmd->cmd_flags |= CFLAG_CONSISTENT;
4991 dma_flags |= DDI_DMA_CONSISTENT;
4992 }
4993
4994 if (flags & PKT_DMA_PARTIAL) {
4995 dma_flags |= DDI_DMA_PARTIAL;
4996 }
4997
4998 dma_flags |= DDI_DMA_REDZONE;
4999
5000 cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
5001
5002 tmp_dma_attr.dma_attr_sgllen = instance->max_num_sge;
5003 tmp_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull;
5004 if (instance->tbolt) {
5005 /* OCR-RESET FIX */
5006 tmp_dma_attr.dma_attr_count_max =
5007 (U64)mrsas_tbolt_max_cap_maxxfer; /* limit to 256K */
5008 tmp_dma_attr.dma_attr_maxxfer =
5009 (U64)mrsas_tbolt_max_cap_maxxfer; /* limit to 256K */
5010 }
5011
5012 if ((i = ddi_dma_alloc_handle(instance->dip, &tmp_dma_attr,
5013 cb, 0, &acmd->cmd_dmahandle)) != DDI_SUCCESS) {
5014 switch (i) {
5015 case DDI_DMA_BADATTR:
5016 bioerror(bp, EFAULT);
5017 return (DDI_FAILURE);
5018
5019 case DDI_DMA_NORESOURCES:
5020 bioerror(bp, 0);
5021 return (DDI_FAILURE);
5022
5023 default:
5024 con_log(CL_ANN, (CE_PANIC, "ddi_dma_alloc_handle: "
5025 "impossible result (0x%x)", i));
5026 bioerror(bp, EFAULT);
5027 return (DDI_FAILURE);
5028 }
5029 }
5030
5031 i = ddi_dma_buf_bind_handle(acmd->cmd_dmahandle, bp, dma_flags,
5032 cb, 0, &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies);
5033
5034 switch (i) {
5035 case DDI_DMA_PARTIAL_MAP:
5036 if ((dma_flags & DDI_DMA_PARTIAL) == 0) {
5037 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
5038 "DDI_DMA_PARTIAL_MAP impossible"));
5039 goto no_dma_cookies;
5040 }
5041
5042 if (ddi_dma_numwin(acmd->cmd_dmahandle, &acmd->cmd_nwin) ==
5043 DDI_FAILURE) {
5044 con_log(CL_ANN, (CE_PANIC, "ddi_dma_numwin failed"));
5045 goto no_dma_cookies;
5046 }
5047
5048 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
5049 &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
5050 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
5051 DDI_FAILURE) {
5052
5053 con_log(CL_ANN, (CE_PANIC, "ddi_dma_getwin failed"));
5054 goto no_dma_cookies;
5055 }
5056
5057 goto get_dma_cookies;
5058 case DDI_DMA_MAPPED:
5059 acmd->cmd_nwin = 1;
5060 acmd->cmd_dma_len = 0;
5061 acmd->cmd_dma_offset = 0;
5062
5063 get_dma_cookies:
5064 i = 0;
5065 acmd->cmd_dmacount = 0;
5066 for (;;) {
5067 acmd->cmd_dmacount +=
5068 acmd->cmd_dmacookies[i++].dmac_size;
5069
5070 if (i == instance->max_num_sge ||
5071 i == acmd->cmd_ncookies)
5072 break;
5073
5074 ddi_dma_nextcookie(acmd->cmd_dmahandle,
5075 &acmd->cmd_dmacookies[i]);
5076 }
5077
5078 acmd->cmd_cookie = i;
5079 acmd->cmd_cookiecnt = i;
5080
5081 acmd->cmd_flags |= CFLAG_DMAVALID;
5082
5083 if (bp->b_bcount >= acmd->cmd_dmacount) {
5084 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
5085 } else {
5086 pkt->pkt_resid = 0;
5087 }
5088
5089 return (DDI_SUCCESS);
5090 case DDI_DMA_NORESOURCES:
5091 bioerror(bp, 0);
5092 break;
5093 case DDI_DMA_NOMAPPING:
5094 bioerror(bp, EFAULT);
5095 break;
5096 case DDI_DMA_TOOBIG:
5097 bioerror(bp, EINVAL);
5098 break;
5099 case DDI_DMA_INUSE:
5100 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle:"
5101 " DDI_DMA_INUSE impossible"));
5102 break;
5103 default:
5104 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
5105 "impossible result (0x%x)", i));
5106 break;
5107 }
5108
5109 no_dma_cookies:
5110 ddi_dma_free_handle(&acmd->cmd_dmahandle);
5111 acmd->cmd_dmahandle = NULL;
5112 acmd->cmd_flags &= ~CFLAG_DMAVALID;
5113 return (DDI_FAILURE);
5114 }
5115
5116 /*
5117 * mrsas_dma_move(struct mrsas_instance *, struct scsi_pkt *, struct buf *)
5118 *
5119 * move dma resources to next dma window
5120 *
5121 */
5122 int
5123 mrsas_dma_move(struct mrsas_instance *instance, struct scsi_pkt *pkt,
5124 struct buf *bp)
5125 {
5126 int i = 0;
5127
5128 struct scsa_cmd *acmd = PKT2CMD(pkt);
5129
5130 /*
5131 * If there are no more cookies remaining in this window,
5132 * must move to the next window first.
5133 */
5134 if (acmd->cmd_cookie == acmd->cmd_ncookies) {
5135 if (acmd->cmd_curwin == acmd->cmd_nwin && acmd->cmd_nwin == 1) {
5136 return (DDI_SUCCESS);
5137 }
5138
5139 /* at last window, cannot move */
5140 if (++acmd->cmd_curwin >= acmd->cmd_nwin) {
5141 return (DDI_FAILURE);
5142 }
5143
5144 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
5145 &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
5146 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
5147 DDI_FAILURE) {
5148 return (DDI_FAILURE);
5149 }
5150
5151 acmd->cmd_cookie = 0;
5152 } else {
5153 /* still more cookies in this window - get the next one */
5154 ddi_dma_nextcookie(acmd->cmd_dmahandle,
5155 &acmd->cmd_dmacookies[0]);
5156 }
5157
5158 /* get remaining cookies in this window, up to our maximum */
5159 for (;;) {
5160 acmd->cmd_dmacount += acmd->cmd_dmacookies[i++].dmac_size;
5161 acmd->cmd_cookie++;
5162
5163 if (i == instance->max_num_sge ||
5164 acmd->cmd_cookie == acmd->cmd_ncookies) {
5165 break;
5166 }
5167
5168 ddi_dma_nextcookie(acmd->cmd_dmahandle,
5169 &acmd->cmd_dmacookies[i]);
5170 }
5171
5172 acmd->cmd_cookiecnt = i;
5173
5174 if (bp->b_bcount >= acmd->cmd_dmacount) {
5175 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
5176 } else {
5177 pkt->pkt_resid = 0;
5178 }
5179
5180 return (DDI_SUCCESS);
5181 }
5182
5183 /*
5184 * build_cmd
5185 */
5186 static struct mrsas_cmd *
5187 build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
5188 struct scsi_pkt *pkt, uchar_t *cmd_done)
5189 {
5190 uint16_t flags = 0;
5191 uint32_t i;
5192 uint32_t context;
5193 uint32_t sge_bytes;
5194 uint32_t tmp_data_xfer_len;
5195 ddi_acc_handle_t acc_handle;
5196 struct mrsas_cmd *cmd;
5197 struct mrsas_sge64 *mfi_sgl;
5198 struct mrsas_sge_ieee *mfi_sgl_ieee;
5199 struct scsa_cmd *acmd = PKT2CMD(pkt);
5200 struct mrsas_pthru_frame *pthru;
5201 struct mrsas_io_frame *ldio;
5202
5203 /* find out if this is logical or physical drive command. */
5204 acmd->islogical = MRDRV_IS_LOGICAL(ap);
5205 acmd->device_id = MAP_DEVICE_ID(instance, ap);
5206 *cmd_done = 0;
5207
5208 /* get the command packet */
5209 if (!(cmd = get_mfi_pkt(instance))) {
5210 DTRACE_PROBE2(build_cmd_mfi_err, uint16_t,
5211 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
5212 return (NULL);
5213 }
5214
5215 acc_handle = cmd->frame_dma_obj.acc_handle;
5216
5217 /* Clear the frame buffer and assign back the context id */
5218 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
5219 ddi_put32(acc_handle, &cmd->frame->hdr.context, cmd->index);
5220
5221 cmd->pkt = pkt;
5222 cmd->cmd = acmd;
5223 DTRACE_PROBE3(build_cmds, uint8_t, pkt->pkt_cdbp[0],
5224 ulong_t, acmd->cmd_dmacount, ulong_t, acmd->cmd_dma_len);
5225
5226 /* lets get the command directions */
5227 if (acmd->cmd_flags & CFLAG_DMASEND) {
5228 flags = MFI_FRAME_DIR_WRITE;
5229
5230 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
5231 (void) ddi_dma_sync(acmd->cmd_dmahandle,
5232 acmd->cmd_dma_offset, acmd->cmd_dma_len,
5233 DDI_DMA_SYNC_FORDEV);
5234 }
5235 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
5236 flags = MFI_FRAME_DIR_READ;
5237
5238 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
5239 (void) ddi_dma_sync(acmd->cmd_dmahandle,
5240 acmd->cmd_dma_offset, acmd->cmd_dma_len,
5241 DDI_DMA_SYNC_FORCPU);
5242 }
5243 } else {
5244 flags = MFI_FRAME_DIR_NONE;
5245 }
5246
5247 if (instance->flag_ieee) {
5248 flags |= MFI_FRAME_IEEE;
5249 }
5250 flags |= MFI_FRAME_SGL64;
5251
5252 switch (pkt->pkt_cdbp[0]) {
5253
5254 /*
5255 * case SCMD_SYNCHRONIZE_CACHE:
5256 * flush_cache(instance);
5257 * return_mfi_pkt(instance, cmd);
5258 * *cmd_done = 1;
5259 *
5260 * return (NULL);
5261 */
5262
5263 case SCMD_READ:
5264 case SCMD_WRITE:
5265 case SCMD_READ_G1:
5266 case SCMD_WRITE_G1:
5267 case SCMD_READ_G4:
5268 case SCMD_WRITE_G4:
5269 case SCMD_READ_G5:
5270 case SCMD_WRITE_G5:
5271 if (acmd->islogical) {
5272 ldio = (struct mrsas_io_frame *)cmd->frame;
5273
5274 /*
5275 * preare the Logical IO frame:
5276 * 2nd bit is zero for all read cmds
5277 */
5278 ddi_put8(acc_handle, &ldio->cmd,
5279 (pkt->pkt_cdbp[0] & 0x02) ? MFI_CMD_OP_LD_WRITE
5280 : MFI_CMD_OP_LD_READ);
5281 ddi_put8(acc_handle, &ldio->cmd_status, 0x0);
5282 ddi_put8(acc_handle, &ldio->scsi_status, 0x0);
5283 ddi_put8(acc_handle, &ldio->target_id, acmd->device_id);
5284 ddi_put16(acc_handle, &ldio->timeout, 0);
5285 ddi_put8(acc_handle, &ldio->reserved_0, 0);
5286 ddi_put16(acc_handle, &ldio->pad_0, 0);
5287 ddi_put16(acc_handle, &ldio->flags, flags);
5288
5289 /* Initialize sense Information */
5290 bzero(cmd->sense, SENSE_LENGTH);
5291 ddi_put8(acc_handle, &ldio->sense_len, SENSE_LENGTH);
5292 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_hi, 0);
5293 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_lo,
5294 cmd->sense_phys_addr);
5295 ddi_put32(acc_handle, &ldio->start_lba_hi, 0);
5296 ddi_put8(acc_handle, &ldio->access_byte,
5297 (acmd->cmd_cdblen != 6) ? pkt->pkt_cdbp[1] : 0);
5298 ddi_put8(acc_handle, &ldio->sge_count,
5299 acmd->cmd_cookiecnt);
5300 if (instance->flag_ieee) {
5301 mfi_sgl_ieee =
5302 (struct mrsas_sge_ieee *)&ldio->sgl;
5303 } else {
5304 mfi_sgl = (struct mrsas_sge64 *)&ldio->sgl;
5305 }
5306
5307 context = ddi_get32(acc_handle, &ldio->context);
5308
5309 if (acmd->cmd_cdblen == CDB_GROUP0) {
5310 /* 6-byte cdb */
5311 ddi_put32(acc_handle, &ldio->lba_count, (
5312 (uint16_t)(pkt->pkt_cdbp[4])));
5313
5314 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5315 ((uint32_t)(pkt->pkt_cdbp[3])) |
5316 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
5317 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
5318 << 16)));
5319 } else if (acmd->cmd_cdblen == CDB_GROUP1) {
5320 /* 10-byte cdb */
5321 ddi_put32(acc_handle, &ldio->lba_count, (
5322 ((uint16_t)(pkt->pkt_cdbp[8])) |
5323 ((uint16_t)(pkt->pkt_cdbp[7]) << 8)));
5324
5325 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5326 ((uint32_t)(pkt->pkt_cdbp[5])) |
5327 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5328 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5329 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5330 } else if (acmd->cmd_cdblen == CDB_GROUP5) {
5331 /* 12-byte cdb */
5332 ddi_put32(acc_handle, &ldio->lba_count, (
5333 ((uint32_t)(pkt->pkt_cdbp[9])) |
5334 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
5335 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
5336 ((uint32_t)(pkt->pkt_cdbp[6]) << 24)));
5337
5338 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5339 ((uint32_t)(pkt->pkt_cdbp[5])) |
5340 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5341 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5342 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5343 } else if (acmd->cmd_cdblen == CDB_GROUP4) {
5344 /* 16-byte cdb */
5345 ddi_put32(acc_handle, &ldio->lba_count, (
5346 ((uint32_t)(pkt->pkt_cdbp[13])) |
5347 ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
5348 ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
5349 ((uint32_t)(pkt->pkt_cdbp[10]) << 24)));
5350
5351 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5352 ((uint32_t)(pkt->pkt_cdbp[9])) |
5353 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
5354 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
5355 ((uint32_t)(pkt->pkt_cdbp[6]) << 24)));
5356
5357 ddi_put32(acc_handle, &ldio->start_lba_hi, (
5358 ((uint32_t)(pkt->pkt_cdbp[5])) |
5359 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5360 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5361 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5362 }
5363
5364 break;
5365 }
5366 /* fall through For all non-rd/wr cmds */
5367 default:
5368
5369 switch (pkt->pkt_cdbp[0]) {
5370 case SCMD_MODE_SENSE:
5371 case SCMD_MODE_SENSE_G1: {
5372 union scsi_cdb *cdbp;
5373 uint16_t page_code;
5374
5375 cdbp = (void *)pkt->pkt_cdbp;
5376 page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
5377 switch (page_code) {
5378 case 0x3:
5379 case 0x4:
5380 (void) mrsas_mode_sense_build(pkt);
5381 return_mfi_pkt(instance, cmd);
5382 *cmd_done = 1;
5383 return (NULL);
5384 }
5385 break;
5386 }
5387 default:
5388 break;
5389 }
5390
5391 pthru = (struct mrsas_pthru_frame *)cmd->frame;
5392
5393 /* prepare the DCDB frame */
5394 ddi_put8(acc_handle, &pthru->cmd, (acmd->islogical) ?
5395 MFI_CMD_OP_LD_SCSI : MFI_CMD_OP_PD_SCSI);
5396 ddi_put8(acc_handle, &pthru->cmd_status, 0x0);
5397 ddi_put8(acc_handle, &pthru->scsi_status, 0x0);
5398 ddi_put8(acc_handle, &pthru->target_id, acmd->device_id);
5399 ddi_put8(acc_handle, &pthru->lun, 0);
5400 ddi_put8(acc_handle, &pthru->cdb_len, acmd->cmd_cdblen);
5401 ddi_put16(acc_handle, &pthru->timeout, 0);
5402 ddi_put16(acc_handle, &pthru->flags, flags);
5403 tmp_data_xfer_len = 0;
5404 for (i = 0; i < acmd->cmd_cookiecnt; i++) {
5405 tmp_data_xfer_len += acmd->cmd_dmacookies[i].dmac_size;
5406 }
5407 ddi_put32(acc_handle, &pthru->data_xfer_len,
5408 tmp_data_xfer_len);
5409 ddi_put8(acc_handle, &pthru->sge_count, acmd->cmd_cookiecnt);
5410 if (instance->flag_ieee) {
5411 mfi_sgl_ieee = (struct mrsas_sge_ieee *)&pthru->sgl;
5412 } else {
5413 mfi_sgl = (struct mrsas_sge64 *)&pthru->sgl;
5414 }
5415
5416 bzero(cmd->sense, SENSE_LENGTH);
5417 ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH);
5418 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0);
5419 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo,
5420 cmd->sense_phys_addr);
5421
5422 context = ddi_get32(acc_handle, &pthru->context);
5423 ddi_rep_put8(acc_handle, (uint8_t *)pkt->pkt_cdbp,
5424 (uint8_t *)pthru->cdb, acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
5425
5426 break;
5427 }
5428 #ifdef lint
5429 context = context;
5430 #endif
5431 /* prepare the scatter-gather list for the firmware */
5432 if (instance->flag_ieee) {
5433 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl_ieee++) {
5434 ddi_put64(acc_handle, &mfi_sgl_ieee->phys_addr,
5435 acmd->cmd_dmacookies[i].dmac_laddress);
5436 ddi_put32(acc_handle, &mfi_sgl_ieee->length,
5437 acmd->cmd_dmacookies[i].dmac_size);
5438 }
5439 sge_bytes = sizeof (struct mrsas_sge_ieee)*acmd->cmd_cookiecnt;
5440 } else {
5441 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl++) {
5442 ddi_put64(acc_handle, &mfi_sgl->phys_addr,
5443 acmd->cmd_dmacookies[i].dmac_laddress);
5444 ddi_put32(acc_handle, &mfi_sgl->length,
5445 acmd->cmd_dmacookies[i].dmac_size);
5446 }
5447 sge_bytes = sizeof (struct mrsas_sge64)*acmd->cmd_cookiecnt;
5448 }
5449
5450 cmd->frame_count = (sge_bytes / MRMFI_FRAME_SIZE) +
5451 ((sge_bytes % MRMFI_FRAME_SIZE) ? 1 : 0) + 1;
5452
5453 if (cmd->frame_count >= 8) {
5454 cmd->frame_count = 8;
5455 }
5456
5457 return (cmd);
5458 }
5459
5460 #ifndef __sparc
5461 /*
5462 * wait_for_outstanding - Wait for all outstanding cmds
5463 * @instance: Adapter soft state
5464 *
5465 * This function waits for upto MRDRV_RESET_WAIT_TIME seconds for FW to
5466 * complete all its outstanding commands. Returns error if one or more IOs
5467 * are pending after this time period.
5468 */
5469 static int
5470 wait_for_outstanding(struct mrsas_instance *instance)
5471 {
5472 int i;
5473 uint32_t wait_time = 90;
5474
5475 for (i = 0; i < wait_time; i++) {
5476 if (!instance->fw_outstanding) {
5477 break;
5478 }
5479
5480 drv_usecwait(MILLISEC); /* wait for 1000 usecs */;
5481 }
5482
5483 if (instance->fw_outstanding) {
5484 return (1);
5485 }
5486
5487 return (0);
5488 }
5489 #endif /* __sparc */
5490
5491 /*
5492 * issue_mfi_pthru
5493 */
5494 static int
5495 issue_mfi_pthru(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5496 struct mrsas_cmd *cmd, int mode)
5497 {
5498 void *ubuf;
5499 uint32_t kphys_addr = 0;
5500 uint32_t xferlen = 0;
5501 uint32_t new_xfer_length = 0;
5502 uint_t model;
5503 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5504 dma_obj_t pthru_dma_obj;
5505 struct mrsas_pthru_frame *kpthru;
5506 struct mrsas_pthru_frame *pthru;
5507 int i;
5508 pthru = &cmd->frame->pthru;
5509 kpthru = (struct mrsas_pthru_frame *)&ioctl->frame[0];
5510
5511 if (instance->adapterresetinprogress) {
5512 con_log(CL_ANN1, (CE_WARN, "issue_mfi_pthru: Reset flag set, "
5513 "returning mfi_pkt and setting TRAN_BUSY\n"));
5514 return (DDI_FAILURE);
5515 }
5516 model = ddi_model_convert_from(mode & FMODELS);
5517 if (model == DDI_MODEL_ILP32) {
5518 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP32"));
5519
5520 xferlen = kpthru->sgl.sge32[0].length;
5521
5522 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
5523 } else {
5524 #ifdef _ILP32
5525 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP32"));
5526 xferlen = kpthru->sgl.sge32[0].length;
5527 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
5528 #else
5529 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP64"));
5530 xferlen = kpthru->sgl.sge64[0].length;
5531 ubuf = (void *)(ulong_t)kpthru->sgl.sge64[0].phys_addr;
5532 #endif
5533 }
5534
5535 if (xferlen) {
5536 /* means IOCTL requires DMA */
5537 /* allocate the data transfer buffer */
5538 /* pthru_dma_obj.size = xferlen; */
5539 MRSAS_GET_BOUNDARY_ALIGNED_LEN(xferlen, new_xfer_length,
5540 PAGESIZE);
5541 pthru_dma_obj.size = new_xfer_length;
5542 pthru_dma_obj.dma_attr = mrsas_generic_dma_attr;
5543 pthru_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5544 pthru_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5545 pthru_dma_obj.dma_attr.dma_attr_sgllen = 1;
5546 pthru_dma_obj.dma_attr.dma_attr_align = 1;
5547
5548 /* allocate kernel buffer for DMA */
5549 if (mrsas_alloc_dma_obj(instance, &pthru_dma_obj,
5550 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5551 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: "
5552 "could not allocate data transfer buffer."));
5553 return (DDI_FAILURE);
5554 }
5555 (void) memset(pthru_dma_obj.buffer, 0, xferlen);
5556
5557 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5558 if (kpthru->flags & MFI_FRAME_DIR_WRITE) {
5559 for (i = 0; i < xferlen; i++) {
5560 if (ddi_copyin((uint8_t *)ubuf+i,
5561 (uint8_t *)pthru_dma_obj.buffer+i,
5562 1, mode)) {
5563 con_log(CL_ANN, (CE_WARN,
5564 "issue_mfi_pthru : "
5565 "copy from user space failed"));
5566 return (DDI_FAILURE);
5567 }
5568 }
5569 }
5570
5571 kphys_addr = pthru_dma_obj.dma_cookie[0].dmac_address;
5572 }
5573
5574 ddi_put8(acc_handle, &pthru->cmd, kpthru->cmd);
5575 ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH);
5576 ddi_put8(acc_handle, &pthru->cmd_status, 0);
5577 ddi_put8(acc_handle, &pthru->scsi_status, 0);
5578 ddi_put8(acc_handle, &pthru->target_id, kpthru->target_id);
5579 ddi_put8(acc_handle, &pthru->lun, kpthru->lun);
5580 ddi_put8(acc_handle, &pthru->cdb_len, kpthru->cdb_len);
5581 ddi_put8(acc_handle, &pthru->sge_count, kpthru->sge_count);
5582 ddi_put16(acc_handle, &pthru->timeout, kpthru->timeout);
5583 ddi_put32(acc_handle, &pthru->data_xfer_len, kpthru->data_xfer_len);
5584
5585 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0);
5586 pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
5587 /* ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo, 0); */
5588
5589 ddi_rep_put8(acc_handle, (uint8_t *)kpthru->cdb, (uint8_t *)pthru->cdb,
5590 pthru->cdb_len, DDI_DEV_AUTOINCR);
5591
5592 ddi_put16(acc_handle, &pthru->flags, kpthru->flags & ~MFI_FRAME_SGL64);
5593 ddi_put32(acc_handle, &pthru->sgl.sge32[0].length, xferlen);
5594 ddi_put32(acc_handle, &pthru->sgl.sge32[0].phys_addr, kphys_addr);
5595
5596 cmd->sync_cmd = MRSAS_TRUE;
5597 cmd->frame_count = 1;
5598
5599 if (instance->tbolt) {
5600 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5601 }
5602
5603 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5604 con_log(CL_ANN, (CE_WARN,
5605 "issue_mfi_pthru: fw_ioctl failed"));
5606 } else {
5607 if (xferlen && kpthru->flags & MFI_FRAME_DIR_READ) {
5608 for (i = 0; i < xferlen; i++) {
5609 if (ddi_copyout(
5610 (uint8_t *)pthru_dma_obj.buffer+i,
5611 (uint8_t *)ubuf+i, 1, mode)) {
5612 con_log(CL_ANN, (CE_WARN,
5613 "issue_mfi_pthru : "
5614 "copy to user space failed"));
5615 return (DDI_FAILURE);
5616 }
5617 }
5618 }
5619 }
5620
5621 kpthru->cmd_status = ddi_get8(acc_handle, &pthru->cmd_status);
5622 kpthru->scsi_status = ddi_get8(acc_handle, &pthru->scsi_status);
5623
5624 con_log(CL_ANN, (CE_CONT, "issue_mfi_pthru: cmd_status %x, "
5625 "scsi_status %x", kpthru->cmd_status, kpthru->scsi_status));
5626 DTRACE_PROBE3(issue_pthru, uint8_t, kpthru->cmd, uint8_t,
5627 kpthru->cmd_status, uint8_t, kpthru->scsi_status);
5628
5629 if (kpthru->sense_len) {
5630 uint_t sense_len = SENSE_LENGTH;
5631 void *sense_ubuf =
5632 (void *)(ulong_t)kpthru->sense_buf_phys_addr_lo;
5633 if (kpthru->sense_len <= SENSE_LENGTH) {
5634 sense_len = kpthru->sense_len;
5635 }
5636
5637 for (i = 0; i < sense_len; i++) {
5638 if (ddi_copyout(
5639 (uint8_t *)cmd->sense+i,
5640 (uint8_t *)sense_ubuf+i, 1, mode)) {
5641 con_log(CL_ANN, (CE_WARN,
5642 "issue_mfi_pthru : "
5643 "copy to user space failed"));
5644 }
5645 con_log(CL_DLEVEL1, (CE_WARN,
5646 "Copying Sense info sense_buff[%d] = 0x%X\n",
5647 i, *((uint8_t *)cmd->sense + i)));
5648 }
5649 }
5650 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
5651 DDI_DMA_SYNC_FORDEV);
5652
5653 if (xferlen) {
5654 /* free kernel buffer */
5655 if (mrsas_free_dma_obj(instance, pthru_dma_obj) != DDI_SUCCESS)
5656 return (DDI_FAILURE);
5657 }
5658
5659 return (DDI_SUCCESS);
5660 }
5661
5662 /*
5663 * issue_mfi_dcmd
5664 */
5665 static int
5666 issue_mfi_dcmd(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5667 struct mrsas_cmd *cmd, int mode)
5668 {
5669 void *ubuf;
5670 uint32_t kphys_addr = 0;
5671 uint32_t xferlen = 0;
5672 uint32_t new_xfer_length = 0;
5673 uint32_t model;
5674 dma_obj_t dcmd_dma_obj;
5675 struct mrsas_dcmd_frame *kdcmd;
5676 struct mrsas_dcmd_frame *dcmd;
5677 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5678 int i;
5679 dcmd = &cmd->frame->dcmd;
5680 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0];
5681
5682 if (instance->adapterresetinprogress) {
5683 con_log(CL_ANN1, (CE_WARN, "Reset flag set, "
5684 "returning mfi_pkt and setting TRAN_BUSY\n"));
5685 return (DDI_FAILURE);
5686 }
5687 model = ddi_model_convert_from(mode & FMODELS);
5688 if (model == DDI_MODEL_ILP32) {
5689 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
5690
5691 xferlen = kdcmd->sgl.sge32[0].length;
5692
5693 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
5694 } else {
5695 #ifdef _ILP32
5696 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
5697 xferlen = kdcmd->sgl.sge32[0].length;
5698 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
5699 #else
5700 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_LP64"));
5701 xferlen = kdcmd->sgl.sge64[0].length;
5702 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
5703 #endif
5704 }
5705 if (xferlen) {
5706 /* means IOCTL requires DMA */
5707 /* allocate the data transfer buffer */
5708 /* dcmd_dma_obj.size = xferlen; */
5709 MRSAS_GET_BOUNDARY_ALIGNED_LEN(xferlen, new_xfer_length,
5710 PAGESIZE);
5711 dcmd_dma_obj.size = new_xfer_length;
5712 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
5713 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5714 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5715 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
5716 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
5717
5718 /* allocate kernel buffer for DMA */
5719 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
5720 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5721 con_log(CL_ANN,
5722 (CE_WARN, "issue_mfi_dcmd: could not "
5723 "allocate data transfer buffer."));
5724 return (DDI_FAILURE);
5725 }
5726 (void) memset(dcmd_dma_obj.buffer, 0, xferlen);
5727
5728 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5729 if (kdcmd->flags & MFI_FRAME_DIR_WRITE) {
5730 for (i = 0; i < xferlen; i++) {
5731 if (ddi_copyin((uint8_t *)ubuf + i,
5732 (uint8_t *)dcmd_dma_obj.buffer + i,
5733 1, mode)) {
5734 con_log(CL_ANN, (CE_WARN,
5735 "issue_mfi_dcmd : "
5736 "copy from user space failed"));
5737 return (DDI_FAILURE);
5738 }
5739 }
5740 }
5741
5742 kphys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address;
5743 }
5744
5745 ddi_put8(acc_handle, &dcmd->cmd, kdcmd->cmd);
5746 ddi_put8(acc_handle, &dcmd->cmd_status, 0);
5747 ddi_put8(acc_handle, &dcmd->sge_count, kdcmd->sge_count);
5748 ddi_put16(acc_handle, &dcmd->timeout, kdcmd->timeout);
5749 ddi_put32(acc_handle, &dcmd->data_xfer_len, kdcmd->data_xfer_len);
5750 ddi_put32(acc_handle, &dcmd->opcode, kdcmd->opcode);
5751
5752 ddi_rep_put8(acc_handle, (uint8_t *)kdcmd->mbox.b,
5753 (uint8_t *)dcmd->mbox.b, DCMD_MBOX_SZ, DDI_DEV_AUTOINCR);
5754
5755 ddi_put16(acc_handle, &dcmd->flags, kdcmd->flags & ~MFI_FRAME_SGL64);
5756 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].length, xferlen);
5757 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].phys_addr, kphys_addr);
5758
5759 cmd->sync_cmd = MRSAS_TRUE;
5760 cmd->frame_count = 1;
5761
5762 if (instance->tbolt) {
5763 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5764 }
5765
5766 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5767 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: fw_ioctl failed"));
5768 } else {
5769 if (xferlen && (kdcmd->flags & MFI_FRAME_DIR_READ)) {
5770 for (i = 0; i < xferlen; i++) {
5771 if (ddi_copyout(
5772 (uint8_t *)dcmd_dma_obj.buffer + i,
5773 (uint8_t *)ubuf + i,
5774 1, mode)) {
5775 con_log(CL_ANN, (CE_WARN,
5776 "issue_mfi_dcmd : "
5777 "copy to user space failed"));
5778 return (DDI_FAILURE);
5779 }
5780 }
5781 }
5782 }
5783
5784 kdcmd->cmd_status = ddi_get8(acc_handle, &dcmd->cmd_status);
5785 con_log(CL_ANN,
5786 (CE_CONT, "issue_mfi_dcmd: cmd_status %x", kdcmd->cmd_status));
5787 DTRACE_PROBE3(issue_dcmd, uint32_t, kdcmd->opcode, uint8_t,
5788 kdcmd->cmd, uint8_t, kdcmd->cmd_status);
5789
5790 if (xferlen) {
5791 /* free kernel buffer */
5792 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
5793 return (DDI_FAILURE);
5794 }
5795
5796 return (DDI_SUCCESS);
5797 }
5798
5799 /*
5800 * issue_mfi_smp
5801 */
5802 static int
5803 issue_mfi_smp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5804 struct mrsas_cmd *cmd, int mode)
5805 {
5806 void *request_ubuf;
5807 void *response_ubuf;
5808 uint32_t request_xferlen = 0;
5809 uint32_t response_xferlen = 0;
5810 uint32_t new_xfer_length1 = 0;
5811 uint32_t new_xfer_length2 = 0;
5812 uint_t model;
5813 dma_obj_t request_dma_obj;
5814 dma_obj_t response_dma_obj;
5815 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5816 struct mrsas_smp_frame *ksmp;
5817 struct mrsas_smp_frame *smp;
5818 struct mrsas_sge32 *sge32;
5819 #ifndef _ILP32
5820 struct mrsas_sge64 *sge64;
5821 #endif
5822 int i;
5823 uint64_t tmp_sas_addr;
5824
5825 smp = &cmd->frame->smp;
5826 ksmp = (struct mrsas_smp_frame *)&ioctl->frame[0];
5827
5828 if (instance->adapterresetinprogress) {
5829 con_log(CL_ANN1, (CE_WARN, "Reset flag set, "
5830 "returning mfi_pkt and setting TRAN_BUSY\n"));
5831 return (DDI_FAILURE);
5832 }
5833 model = ddi_model_convert_from(mode & FMODELS);
5834 if (model == DDI_MODEL_ILP32) {
5835 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_ILP32"));
5836
5837 sge32 = &ksmp->sgl[0].sge32[0];
5838 response_xferlen = sge32[0].length;
5839 request_xferlen = sge32[1].length;
5840 con_log(CL_ANN, (CE_CONT, "issue_mfi_smp: "
5841 "response_xferlen = %x, request_xferlen = %x",
5842 response_xferlen, request_xferlen));
5843
5844 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr;
5845 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr;
5846 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: "
5847 "response_ubuf = %p, request_ubuf = %p",
5848 response_ubuf, request_ubuf));
5849 } else {
5850 #ifdef _ILP32
5851 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_ILP32"));
5852
5853 sge32 = &ksmp->sgl[0].sge32[0];
5854 response_xferlen = sge32[0].length;
5855 request_xferlen = sge32[1].length;
5856 con_log(CL_ANN, (CE_CONT, "issue_mfi_smp: "
5857 "response_xferlen = %x, request_xferlen = %x",
5858 response_xferlen, request_xferlen));
5859
5860 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr;
5861 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr;
5862 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: "
5863 "response_ubuf = %p, request_ubuf = %p",
5864 response_ubuf, request_ubuf));
5865 #else
5866 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_LP64"));
5867
5868 sge64 = &ksmp->sgl[0].sge64[0];
5869 response_xferlen = sge64[0].length;
5870 request_xferlen = sge64[1].length;
5871
5872 response_ubuf = (void *)(ulong_t)sge64[0].phys_addr;
5873 request_ubuf = (void *)(ulong_t)sge64[1].phys_addr;
5874 #endif
5875 }
5876 if (request_xferlen) {
5877 /* means IOCTL requires DMA */
5878 /* allocate the data transfer buffer */
5879 /* request_dma_obj.size = request_xferlen; */
5880 MRSAS_GET_BOUNDARY_ALIGNED_LEN(request_xferlen,
5881 new_xfer_length1, PAGESIZE);
5882 request_dma_obj.size = new_xfer_length1;
5883 request_dma_obj.dma_attr = mrsas_generic_dma_attr;
5884 request_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5885 request_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5886 request_dma_obj.dma_attr.dma_attr_sgllen = 1;
5887 request_dma_obj.dma_attr.dma_attr_align = 1;
5888
5889 /* allocate kernel buffer for DMA */
5890 if (mrsas_alloc_dma_obj(instance, &request_dma_obj,
5891 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5892 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5893 "could not allocate data transfer buffer."));
5894 return (DDI_FAILURE);
5895 }
5896 (void) memset(request_dma_obj.buffer, 0, request_xferlen);
5897
5898 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5899 for (i = 0; i < request_xferlen; i++) {
5900 if (ddi_copyin((uint8_t *)request_ubuf + i,
5901 (uint8_t *)request_dma_obj.buffer + i,
5902 1, mode)) {
5903 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5904 "copy from user space failed"));
5905 return (DDI_FAILURE);
5906 }
5907 }
5908 }
5909
5910 if (response_xferlen) {
5911 /* means IOCTL requires DMA */
5912 /* allocate the data transfer buffer */
5913 /* response_dma_obj.size = response_xferlen; */
5914 MRSAS_GET_BOUNDARY_ALIGNED_LEN(response_xferlen,
5915 new_xfer_length2, PAGESIZE);
5916 response_dma_obj.size = new_xfer_length2;
5917 response_dma_obj.dma_attr = mrsas_generic_dma_attr;
5918 response_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5919 response_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5920 response_dma_obj.dma_attr.dma_attr_sgllen = 1;
5921 response_dma_obj.dma_attr.dma_attr_align = 1;
5922
5923 /* allocate kernel buffer for DMA */
5924 if (mrsas_alloc_dma_obj(instance, &response_dma_obj,
5925 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5926 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5927 "could not allocate data transfer buffer."));
5928 return (DDI_FAILURE);
5929 }
5930 (void) memset(response_dma_obj.buffer, 0, response_xferlen);
5931
5932 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5933 for (i = 0; i < response_xferlen; i++) {
5934 if (ddi_copyin((uint8_t *)response_ubuf + i,
5935 (uint8_t *)response_dma_obj.buffer + i,
5936 1, mode)) {
5937 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5938 "copy from user space failed"));
5939 return (DDI_FAILURE);
5940 }
5941 }
5942 }
5943
5944 ddi_put8(acc_handle, &smp->cmd, ksmp->cmd);
5945 ddi_put8(acc_handle, &smp->cmd_status, 0);
5946 ddi_put8(acc_handle, &smp->connection_status, 0);
5947 ddi_put8(acc_handle, &smp->sge_count, ksmp->sge_count);
5948 /* smp->context = ksmp->context; */
5949 ddi_put16(acc_handle, &smp->timeout, ksmp->timeout);
5950 ddi_put32(acc_handle, &smp->data_xfer_len, ksmp->data_xfer_len);
5951
5952 bcopy((void *)&ksmp->sas_addr, (void *)&tmp_sas_addr,
5953 sizeof (uint64_t));
5954 ddi_put64(acc_handle, &smp->sas_addr, tmp_sas_addr);
5955
5956 ddi_put16(acc_handle, &smp->flags, ksmp->flags & ~MFI_FRAME_SGL64);
5957
5958 model = ddi_model_convert_from(mode & FMODELS);
5959 if (model == DDI_MODEL_ILP32) {
5960 con_log(CL_ANN1, (CE_CONT,
5961 "issue_mfi_smp: DDI_MODEL_ILP32"));
5962
5963 sge32 = &smp->sgl[0].sge32[0];
5964 ddi_put32(acc_handle, &sge32[0].length, response_xferlen);
5965 ddi_put32(acc_handle, &sge32[0].phys_addr,
5966 response_dma_obj.dma_cookie[0].dmac_address);
5967 ddi_put32(acc_handle, &sge32[1].length, request_xferlen);
5968 ddi_put32(acc_handle, &sge32[1].phys_addr,
5969 request_dma_obj.dma_cookie[0].dmac_address);
5970 } else {
5971 #ifdef _ILP32
5972 con_log(CL_ANN1, (CE_CONT,
5973 "issue_mfi_smp: DDI_MODEL_ILP32"));
5974 sge32 = &smp->sgl[0].sge32[0];
5975 ddi_put32(acc_handle, &sge32[0].length, response_xferlen);
5976 ddi_put32(acc_handle, &sge32[0].phys_addr,
5977 response_dma_obj.dma_cookie[0].dmac_address);
5978 ddi_put32(acc_handle, &sge32[1].length, request_xferlen);
5979 ddi_put32(acc_handle, &sge32[1].phys_addr,
5980 request_dma_obj.dma_cookie[0].dmac_address);
5981 #else
5982 con_log(CL_ANN1, (CE_CONT,
5983 "issue_mfi_smp: DDI_MODEL_LP64"));
5984 sge64 = &smp->sgl[0].sge64[0];
5985 ddi_put32(acc_handle, &sge64[0].length, response_xferlen);
5986 ddi_put64(acc_handle, &sge64[0].phys_addr,
5987 response_dma_obj.dma_cookie[0].dmac_address);
5988 ddi_put32(acc_handle, &sge64[1].length, request_xferlen);
5989 ddi_put64(acc_handle, &sge64[1].phys_addr,
5990 request_dma_obj.dma_cookie[0].dmac_address);
5991 #endif
5992 }
5993 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp : "
5994 "smp->response_xferlen = %d, smp->request_xferlen = %d "
5995 "smp->data_xfer_len = %d", ddi_get32(acc_handle, &sge32[0].length),
5996 ddi_get32(acc_handle, &sge32[1].length),
5997 ddi_get32(acc_handle, &smp->data_xfer_len)));
5998
5999 cmd->sync_cmd = MRSAS_TRUE;
6000 cmd->frame_count = 1;
6001
6002 if (instance->tbolt) {
6003 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
6004 }
6005
6006 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
6007 con_log(CL_ANN, (CE_WARN,
6008 "issue_mfi_smp: fw_ioctl failed"));
6009 } else {
6010 con_log(CL_ANN1, (CE_CONT,
6011 "issue_mfi_smp: copy to user space"));
6012
6013 if (request_xferlen) {
6014 for (i = 0; i < request_xferlen; i++) {
6015 if (ddi_copyout(
6016 (uint8_t *)request_dma_obj.buffer +
6017 i, (uint8_t *)request_ubuf + i,
6018 1, mode)) {
6019 con_log(CL_ANN, (CE_WARN,
6020 "issue_mfi_smp : copy to user space"
6021 " failed"));
6022 return (DDI_FAILURE);
6023 }
6024 }
6025 }
6026
6027 if (response_xferlen) {
6028 for (i = 0; i < response_xferlen; i++) {
6029 if (ddi_copyout(
6030 (uint8_t *)response_dma_obj.buffer
6031 + i, (uint8_t *)response_ubuf
6032 + i, 1, mode)) {
6033 con_log(CL_ANN, (CE_WARN,
6034 "issue_mfi_smp : copy to "
6035 "user space failed"));
6036 return (DDI_FAILURE);
6037 }
6038 }
6039 }
6040 }
6041
6042 ksmp->cmd_status = ddi_get8(acc_handle, &smp->cmd_status);
6043 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: smp->cmd_status = %d",
6044 ksmp->cmd_status));
6045 DTRACE_PROBE2(issue_smp, uint8_t, ksmp->cmd, uint8_t, ksmp->cmd_status);
6046
6047 if (request_xferlen) {
6048 /* free kernel buffer */
6049 if (mrsas_free_dma_obj(instance, request_dma_obj) !=
6050 DDI_SUCCESS)
6051 return (DDI_FAILURE);
6052 }
6053
6054 if (response_xferlen) {
6055 /* free kernel buffer */
6056 if (mrsas_free_dma_obj(instance, response_dma_obj) !=
6057 DDI_SUCCESS)
6058 return (DDI_FAILURE);
6059 }
6060
6061 return (DDI_SUCCESS);
6062 }
6063
6064 /*
6065 * issue_mfi_stp
6066 */
6067 static int
6068 issue_mfi_stp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
6069 struct mrsas_cmd *cmd, int mode)
6070 {
6071 void *fis_ubuf;
6072 void *data_ubuf;
6073 uint32_t fis_xferlen = 0;
6074 uint32_t new_xfer_length1 = 0;
6075 uint32_t new_xfer_length2 = 0;
6076 uint32_t data_xferlen = 0;
6077 uint_t model;
6078 dma_obj_t fis_dma_obj;
6079 dma_obj_t data_dma_obj;
6080 struct mrsas_stp_frame *kstp;
6081 struct mrsas_stp_frame *stp;
6082 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
6083 int i;
6084
6085 stp = &cmd->frame->stp;
6086 kstp = (struct mrsas_stp_frame *)&ioctl->frame[0];
6087
6088 if (instance->adapterresetinprogress) {
6089 con_log(CL_ANN1, (CE_WARN, "Reset flag set, "
6090 "returning mfi_pkt and setting TRAN_BUSY\n"));
6091 return (DDI_FAILURE);
6092 }
6093 model = ddi_model_convert_from(mode & FMODELS);
6094 if (model == DDI_MODEL_ILP32) {
6095 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_ILP32"));
6096
6097 fis_xferlen = kstp->sgl.sge32[0].length;
6098 data_xferlen = kstp->sgl.sge32[1].length;
6099
6100 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
6101 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
6102 } else {
6103 #ifdef _ILP32
6104 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_ILP32"));
6105
6106 fis_xferlen = kstp->sgl.sge32[0].length;
6107 data_xferlen = kstp->sgl.sge32[1].length;
6108
6109 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
6110 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
6111 #else
6112 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_LP64"));
6113
6114 fis_xferlen = kstp->sgl.sge64[0].length;
6115 data_xferlen = kstp->sgl.sge64[1].length;
6116
6117 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge64[0].phys_addr;
6118 data_ubuf = (void *)(ulong_t)kstp->sgl.sge64[1].phys_addr;
6119 #endif
6120 }
6121
6122
6123 if (fis_xferlen) {
6124 con_log(CL_ANN, (CE_CONT, "issue_mfi_stp: "
6125 "fis_ubuf = %p fis_xferlen = %x", fis_ubuf, fis_xferlen));
6126
6127 /* means IOCTL requires DMA */
6128 /* allocate the data transfer buffer */
6129 /* fis_dma_obj.size = fis_xferlen; */
6130 MRSAS_GET_BOUNDARY_ALIGNED_LEN(fis_xferlen,
6131 new_xfer_length1, PAGESIZE);
6132 fis_dma_obj.size = new_xfer_length1;
6133 fis_dma_obj.dma_attr = mrsas_generic_dma_attr;
6134 fis_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
6135 fis_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
6136 fis_dma_obj.dma_attr.dma_attr_sgllen = 1;
6137 fis_dma_obj.dma_attr.dma_attr_align = 1;
6138
6139 /* allocate kernel buffer for DMA */
6140 if (mrsas_alloc_dma_obj(instance, &fis_dma_obj,
6141 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
6142 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp : "
6143 "could not allocate data transfer buffer."));
6144 return (DDI_FAILURE);
6145 }
6146 (void) memset(fis_dma_obj.buffer, 0, fis_xferlen);
6147
6148 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
6149 for (i = 0; i < fis_xferlen; i++) {
6150 if (ddi_copyin((uint8_t *)fis_ubuf + i,
6151 (uint8_t *)fis_dma_obj.buffer + i, 1, mode)) {
6152 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
6153 "copy from user space failed"));
6154 return (DDI_FAILURE);
6155 }
6156 }
6157 }
6158
6159 if (data_xferlen) {
6160 con_log(CL_ANN, (CE_CONT, "issue_mfi_stp: data_ubuf = %p "
6161 "data_xferlen = %x", data_ubuf, data_xferlen));
6162
6163 /* means IOCTL requires DMA */
6164 /* allocate the data transfer buffer */
6165 /* data_dma_obj.size = data_xferlen; */
6166 MRSAS_GET_BOUNDARY_ALIGNED_LEN(data_xferlen, new_xfer_length2,
6167 PAGESIZE);
6168 data_dma_obj.size = new_xfer_length2;
6169 data_dma_obj.dma_attr = mrsas_generic_dma_attr;
6170 data_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
6171 data_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
6172 data_dma_obj.dma_attr.dma_attr_sgllen = 1;
6173 data_dma_obj.dma_attr.dma_attr_align = 1;
6174
6175 /* allocate kernel buffer for DMA */
6176 if (mrsas_alloc_dma_obj(instance, &data_dma_obj,
6177 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
6178 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
6179 "could not allocate data transfer buffer."));
6180 return (DDI_FAILURE);
6181 }
6182 (void) memset(data_dma_obj.buffer, 0, data_xferlen);
6183
6184 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
6185 for (i = 0; i < data_xferlen; i++) {
6186 if (ddi_copyin((uint8_t *)data_ubuf + i,
6187 (uint8_t *)data_dma_obj.buffer + i, 1, mode)) {
6188 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
6189 "copy from user space failed"));
6190 return (DDI_FAILURE);
6191 }
6192 }
6193 }
6194
6195 ddi_put8(acc_handle, &stp->cmd, kstp->cmd);
6196 ddi_put8(acc_handle, &stp->cmd_status, 0);
6197 ddi_put8(acc_handle, &stp->connection_status, 0);
6198 ddi_put8(acc_handle, &stp->target_id, kstp->target_id);
6199 ddi_put8(acc_handle, &stp->sge_count, kstp->sge_count);
6200
6201 ddi_put16(acc_handle, &stp->timeout, kstp->timeout);
6202 ddi_put32(acc_handle, &stp->data_xfer_len, kstp->data_xfer_len);
6203
6204 ddi_rep_put8(acc_handle, (uint8_t *)kstp->fis, (uint8_t *)stp->fis, 10,
6205 DDI_DEV_AUTOINCR);
6206
6207 ddi_put16(acc_handle, &stp->flags, kstp->flags & ~MFI_FRAME_SGL64);
6208 ddi_put32(acc_handle, &stp->stp_flags, kstp->stp_flags);
6209 ddi_put32(acc_handle, &stp->sgl.sge32[0].length, fis_xferlen);
6210 ddi_put32(acc_handle, &stp->sgl.sge32[0].phys_addr,
6211 fis_dma_obj.dma_cookie[0].dmac_address);
6212 ddi_put32(acc_handle, &stp->sgl.sge32[1].length, data_xferlen);
6213 ddi_put32(acc_handle, &stp->sgl.sge32[1].phys_addr,
6214 data_dma_obj.dma_cookie[0].dmac_address);
6215
6216 cmd->sync_cmd = MRSAS_TRUE;
6217 cmd->frame_count = 1;
6218
6219 if (instance->tbolt) {
6220 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
6221 }
6222
6223 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
6224 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: fw_ioctl failed"));
6225 } else {
6226
6227 if (fis_xferlen) {
6228 for (i = 0; i < fis_xferlen; i++) {
6229 if (ddi_copyout(
6230 (uint8_t *)fis_dma_obj.buffer + i,
6231 (uint8_t *)fis_ubuf + i, 1, mode)) {
6232 con_log(CL_ANN, (CE_WARN,
6233 "issue_mfi_stp : copy to "
6234 "user space failed"));
6235 return (DDI_FAILURE);
6236 }
6237 }
6238 }
6239 }
6240 if (data_xferlen) {
6241 for (i = 0; i < data_xferlen; i++) {
6242 if (ddi_copyout(
6243 (uint8_t *)data_dma_obj.buffer + i,
6244 (uint8_t *)data_ubuf + i, 1, mode)) {
6245 con_log(CL_ANN, (CE_WARN,
6246 "issue_mfi_stp : copy to"
6247 " user space failed"));
6248 return (DDI_FAILURE);
6249 }
6250 }
6251 }
6252
6253 kstp->cmd_status = ddi_get8(acc_handle, &stp->cmd_status);
6254 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: stp->cmd_status = %d",
6255 kstp->cmd_status));
6256 DTRACE_PROBE2(issue_stp, uint8_t, kstp->cmd, uint8_t, kstp->cmd_status);
6257
6258 if (fis_xferlen) {
6259 /* free kernel buffer */
6260 if (mrsas_free_dma_obj(instance, fis_dma_obj) != DDI_SUCCESS)
6261 return (DDI_FAILURE);
6262 }
6263
6264 if (data_xferlen) {
6265 /* free kernel buffer */
6266 if (mrsas_free_dma_obj(instance, data_dma_obj) != DDI_SUCCESS)
6267 return (DDI_FAILURE);
6268 }
6269
6270 return (DDI_SUCCESS);
6271 }
6272
6273 /*
6274 * fill_up_drv_ver
6275 */
6276 void
6277 fill_up_drv_ver(struct mrsas_drv_ver *dv)
6278 {
6279 (void) memset(dv, 0, sizeof (struct mrsas_drv_ver));
6280
6281 (void) memcpy(dv->signature, "$LSI LOGIC$", strlen("$LSI LOGIC$"));
6282 (void) memcpy(dv->os_name, "Solaris", strlen("Solaris"));
6283 (void) memcpy(dv->drv_name, "mr_sas", strlen("mr_sas"));
6284 (void) memcpy(dv->drv_ver, MRSAS_VERSION, strlen(MRSAS_VERSION));
6285 (void) memcpy(dv->drv_rel_date, MRSAS_RELDATE,
6286 strlen(MRSAS_RELDATE));
6287
6288 }
6289
6290 /*
6291 * handle_drv_ioctl
6292 */
6293 static int
6294 handle_drv_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
6295 int mode)
6296 {
6297 int i;
6298 int rval = DDI_SUCCESS;
6299 int *props = NULL;
6300 void *ubuf;
6301
6302 uint8_t *pci_conf_buf;
6303 uint32_t xferlen;
6304 uint32_t num_props;
6305 uint_t model;
6306 struct mrsas_dcmd_frame *kdcmd;
6307 struct mrsas_drv_ver dv;
6308 struct mrsas_pci_information pi;
6309
6310 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0];
6311
6312 model = ddi_model_convert_from(mode & FMODELS);
6313 if (model == DDI_MODEL_ILP32) {
6314 con_log(CL_ANN1, (CE_CONT,
6315 "handle_drv_ioctl: DDI_MODEL_ILP32"));
6316
6317 xferlen = kdcmd->sgl.sge32[0].length;
6318
6319 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
6320 } else {
6321 #ifdef _ILP32
6322 con_log(CL_ANN1, (CE_CONT,
6323 "handle_drv_ioctl: DDI_MODEL_ILP32"));
6324 xferlen = kdcmd->sgl.sge32[0].length;
6325 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
6326 #else
6327 con_log(CL_ANN1, (CE_CONT,
6328 "handle_drv_ioctl: DDI_MODEL_LP64"));
6329 xferlen = kdcmd->sgl.sge64[0].length;
6330 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
6331 #endif
6332 }
6333 con_log(CL_ANN1, (CE_CONT, "handle_drv_ioctl: "
6334 "dataBuf=%p size=%d bytes", ubuf, xferlen));
6335
6336 switch (kdcmd->opcode) {
6337 case MRSAS_DRIVER_IOCTL_DRIVER_VERSION:
6338 con_log(CL_ANN1, (CE_CONT, "handle_drv_ioctl: "
6339 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION"));
6340
6341 fill_up_drv_ver(&dv);
6342
6343 if (ddi_copyout(&dv, ubuf, xferlen, mode)) {
6344 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6345 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION : "
6346 "copy to user space failed"));
6347 kdcmd->cmd_status = 1;
6348 rval = 1;
6349 } else {
6350 kdcmd->cmd_status = 0;
6351 }
6352 break;
6353 case MRSAS_DRIVER_IOCTL_PCI_INFORMATION:
6354 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: "
6355 "MRSAS_DRIVER_IOCTL_PCI_INFORMAITON"));
6356
6357 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, instance->dip,
6358 0, "reg", &props, &num_props)) {
6359 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6360 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : "
6361 "ddi_prop_look_int_array failed"));
6362 rval = DDI_FAILURE;
6363 } else {
6364
6365 pi.busNumber = (props[0] >> 16) & 0xFF;
6366 pi.deviceNumber = (props[0] >> 11) & 0x1f;
6367 pi.functionNumber = (props[0] >> 8) & 0x7;
6368 ddi_prop_free((void *)props);
6369 }
6370
6371 pci_conf_buf = (uint8_t *)&pi.pciHeaderInfo;
6372
6373 for (i = 0; i < (sizeof (struct mrsas_pci_information) -
6374 offsetof(struct mrsas_pci_information, pciHeaderInfo));
6375 i++) {
6376 pci_conf_buf[i] =
6377 pci_config_get8(instance->pci_handle, i);
6378 }
6379
6380 if (ddi_copyout(&pi, ubuf, xferlen, mode)) {
6381 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6382 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : "
6383 "copy to user space failed"));
6384 kdcmd->cmd_status = 1;
6385 rval = 1;
6386 } else {
6387 kdcmd->cmd_status = 0;
6388 }
6389 break;
6390 default:
6391 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6392 "invalid driver specific IOCTL opcode = 0x%x",
6393 kdcmd->opcode));
6394 kdcmd->cmd_status = 1;
6395 rval = DDI_FAILURE;
6396 break;
6397 }
6398
6399 return (rval);
6400 }
6401
6402 /*
6403 * handle_mfi_ioctl
6404 */
6405 static int
6406 handle_mfi_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
6407 int mode)
6408 {
6409 int rval = DDI_SUCCESS;
6410
6411 struct mrsas_header *hdr;
6412 struct mrsas_cmd *cmd;
6413
6414 if (instance->tbolt) {
6415 cmd = get_raid_msg_mfi_pkt(instance);
6416 } else {
6417 cmd = get_mfi_pkt(instance);
6418 }
6419 if (!cmd) {
6420 con_log(CL_ANN, (CE_WARN, "mr_sas: "
6421 "failed to get a cmd packet"));
6422 DTRACE_PROBE2(mfi_ioctl_err, uint16_t,
6423 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
6424 return (DDI_FAILURE);
6425 }
6426
6427 /* Clear the frame buffer and assign back the context id */
6428 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
6429 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
6430 cmd->index);
6431
6432 hdr = (struct mrsas_header *)&ioctl->frame[0];
6433
6434 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) {
6435 case MFI_CMD_OP_DCMD:
6436 rval = issue_mfi_dcmd(instance, ioctl, cmd, mode);
6437 break;
6438 case MFI_CMD_OP_SMP:
6439 rval = issue_mfi_smp(instance, ioctl, cmd, mode);
6440 break;
6441 case MFI_CMD_OP_STP:
6442 rval = issue_mfi_stp(instance, ioctl, cmd, mode);
6443 break;
6444 case MFI_CMD_OP_LD_SCSI:
6445 case MFI_CMD_OP_PD_SCSI:
6446 rval = issue_mfi_pthru(instance, ioctl, cmd, mode);
6447 break;
6448 default:
6449 con_log(CL_ANN, (CE_WARN, "handle_mfi_ioctl: "
6450 "invalid mfi ioctl hdr->cmd = %d", hdr->cmd));
6451 rval = DDI_FAILURE;
6452 break;
6453 }
6454
6455 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS)
6456 rval = DDI_FAILURE;
6457
6458 if (instance->tbolt) {
6459 return_raid_msg_mfi_pkt(instance, cmd);
6460 } else {
6461 return_mfi_pkt(instance, cmd);
6462 }
6463
6464 return (rval);
6465 }
6466
6467 /*
6468 * AEN
6469 */
6470 static int
6471 handle_mfi_aen(struct mrsas_instance *instance, struct mrsas_aen *aen)
6472 {
6473 int rval = 0;
6474
6475 rval = register_mfi_aen(instance, instance->aen_seq_num,
6476 aen->class_locale_word);
6477
6478 aen->cmd_status = (uint8_t)rval;
6479
6480 return (rval);
6481 }
6482
6483 static int
6484 register_mfi_aen(struct mrsas_instance *instance, uint32_t seq_num,
6485 uint32_t class_locale_word)
6486 {
6487 int ret_val;
6488
6489 struct mrsas_cmd *cmd, *aen_cmd;
6490 struct mrsas_dcmd_frame *dcmd;
6491 union mrsas_evt_class_locale curr_aen;
6492 union mrsas_evt_class_locale prev_aen;
6493
6494 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
6495 /*
6496 * If there an AEN pending already (aen_cmd), check if the
6497 * class_locale of that pending AEN is inclusive of the new
6498 * AEN request we currently have. If it is, then we don't have
6499 * to do anything. In other words, whichever events the current
6500 * AEN request is subscribing to, have already been subscribed
6501 * to.
6502 *
6503 * If the old_cmd is _not_ inclusive, then we have to abort
6504 * that command, form a class_locale that is superset of both
6505 * old and current and re-issue to the FW
6506 */
6507
6508 curr_aen.word = LE_32(class_locale_word);
6509 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6510 aen_cmd = instance->aen_cmd;
6511 if (aen_cmd) {
6512 prev_aen.word = ddi_get32(aen_cmd->frame_dma_obj.acc_handle,
6513 &aen_cmd->frame->dcmd.mbox.w[1]);
6514 prev_aen.word = LE_32(prev_aen.word);
6515 prev_aen.members.locale = LE_16(prev_aen.members.locale);
6516 /*
6517 * A class whose enum value is smaller is inclusive of all
6518 * higher values. If a PROGRESS (= -1) was previously
6519 * registered, then a new registration requests for higher
6520 * classes need not be sent to FW. They are automatically
6521 * included.
6522 *
6523 * Locale numbers don't have such hierarchy. They are bitmap
6524 * values
6525 */
6526 if ((prev_aen.members.class <= curr_aen.members.class) &&
6527 !((prev_aen.members.locale & curr_aen.members.locale) ^
6528 curr_aen.members.locale)) {
6529 /*
6530 * Previously issued event registration includes
6531 * current request. Nothing to do.
6532 */
6533
6534 return (0);
6535 } else {
6536 curr_aen.members.locale |= prev_aen.members.locale;
6537
6538 if (prev_aen.members.class < curr_aen.members.class)
6539 curr_aen.members.class = prev_aen.members.class;
6540
6541 ret_val = abort_aen_cmd(instance, aen_cmd);
6542
6543 if (ret_val) {
6544 con_log(CL_ANN, (CE_WARN, "register_mfi_aen: "
6545 "failed to abort prevous AEN command"));
6546
6547 return (ret_val);
6548 }
6549 }
6550 } else {
6551 curr_aen.word = LE_32(class_locale_word);
6552 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6553 }
6554
6555 if (instance->tbolt) {
6556 cmd = get_raid_msg_mfi_pkt(instance);
6557 } else {
6558 cmd = get_mfi_pkt(instance);
6559 }
6560
6561 if (!cmd) {
6562 DTRACE_PROBE2(mfi_aen_err, uint16_t, instance->fw_outstanding,
6563 uint16_t, instance->max_fw_cmds);
6564 return (ENOMEM);
6565 }
6566
6567 /* Clear the frame buffer and assign back the context id */
6568 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
6569 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
6570 cmd->index);
6571
6572 dcmd = &cmd->frame->dcmd;
6573
6574 /* for(i = 0; i < DCMD_MBOX_SZ; i++) dcmd->mbox.b[i] = 0; */
6575 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
6576
6577 (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
6578 sizeof (struct mrsas_evt_detail));
6579
6580 /* Prepare DCMD for aen registration */
6581 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
6582 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0);
6583 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
6584 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
6585 MFI_FRAME_DIR_READ);
6586 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
6587 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
6588 sizeof (struct mrsas_evt_detail));
6589 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
6590 MR_DCMD_CTRL_EVENT_WAIT);
6591 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], seq_num);
6592 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6593 curr_aen.word = LE_32(curr_aen.word);
6594 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[1],
6595 curr_aen.word);
6596 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
6597 instance->mfi_evt_detail_obj.dma_cookie[0].dmac_address);
6598 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
6599 sizeof (struct mrsas_evt_detail));
6600
6601 instance->aen_seq_num = seq_num;
6602
6603
6604 /*
6605 * Store reference to the cmd used to register for AEN. When an
6606 * application wants us to register for AEN, we have to abort this
6607 * cmd and re-register with a new EVENT LOCALE supplied by that app
6608 */
6609 instance->aen_cmd = cmd;
6610
6611 cmd->frame_count = 1;
6612
6613 /* Issue the aen registration frame */
6614 /* atomic_add_16 (&instance->fw_outstanding, 1); */
6615 if (instance->tbolt) {
6616 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
6617 }
6618 instance->func_ptr->issue_cmd(cmd, instance);
6619
6620 return (0);
6621 }
6622
6623 void
6624 display_scsi_inquiry(caddr_t scsi_inq)
6625 {
6626 #define MAX_SCSI_DEVICE_CODE 14
6627 int i;
6628 char inquiry_buf[256] = {0};
6629 int len;
6630 const char *const scsi_device_types[] = {
6631 "Direct-Access ",
6632 "Sequential-Access",
6633 "Printer ",
6634 "Processor ",
6635 "WORM ",
6636 "CD-ROM ",
6637 "Scanner ",
6638 "Optical Device ",
6639 "Medium Changer ",
6640 "Communications ",
6641 "Unknown ",
6642 "Unknown ",
6643 "Unknown ",
6644 "Enclosure ",
6645 };
6646
6647 len = 0;
6648
6649 len += snprintf(inquiry_buf + len, 265 - len, " Vendor: ");
6650 for (i = 8; i < 16; i++) {
6651 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6652 scsi_inq[i]);
6653 }
6654
6655 len += snprintf(inquiry_buf + len, 265 - len, " Model: ");
6656
6657 for (i = 16; i < 32; i++) {
6658 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6659 scsi_inq[i]);
6660 }
6661
6662 len += snprintf(inquiry_buf + len, 265 - len, " Rev: ");
6663
6664 for (i = 32; i < 36; i++) {
6665 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6666 scsi_inq[i]);
6667 }
6668
6669 len += snprintf(inquiry_buf + len, 265 - len, "\n");
6670
6671
6672 i = scsi_inq[0] & 0x1f;
6673
6674
6675 len += snprintf(inquiry_buf + len, 265 - len, " Type: %s ",
6676 i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] :
6677 "Unknown ");
6678
6679
6680 len += snprintf(inquiry_buf + len, 265 - len,
6681 " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07);
6682
6683 if ((scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1) {
6684 len += snprintf(inquiry_buf + len, 265 - len, " CCS\n");
6685 } else {
6686 len += snprintf(inquiry_buf + len, 265 - len, "\n");
6687 }
6688
6689 con_log(CL_DLEVEL2, (CE_CONT, inquiry_buf));
6690 }
6691
6692 static void
6693 io_timeout_checker(void *arg)
6694 {
6695 struct scsi_pkt *pkt;
6696 struct mrsas_instance *instance = arg;
6697 struct mrsas_cmd *cmd = NULL;
6698 struct mrsas_header *hdr;
6699 int time = 0;
6700 int counter = 0;
6701 struct mlist_head *pos, *next;
6702 mlist_t process_list;
6703
6704 if (instance->adapterresetinprogress == 1) {
6705 con_log(CL_ANN, (CE_NOTE, "io_timeout_checker:"
6706 " reset in progress"));
6707
6708 instance->timeout_id = timeout(io_timeout_checker,
6709 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
6710 return;
6711 }
6712
6713 /* See if this check needs to be in the beginning or last in ISR */
6714 if (mrsas_initiate_ocr_if_fw_is_faulty(instance) == 1) {
6715 cmn_err(CE_WARN, "io_timeout_checker: "
6716 "FW Fault, calling reset adapter");
6717 cmn_err(CE_CONT, "io_timeout_checker: "
6718 "fw_outstanding 0x%X max_fw_cmds 0x%X",
6719 instance->fw_outstanding, instance->max_fw_cmds);
6720 if (instance->adapterresetinprogress == 0) {
6721 instance->adapterresetinprogress = 1;
6722 if (instance->tbolt)
6723 (void) mrsas_tbolt_reset_ppc(instance);
6724 else
6725 (void) mrsas_reset_ppc(instance);
6726 instance->adapterresetinprogress = 0;
6727 }
6728 instance->timeout_id = timeout(io_timeout_checker,
6729 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
6730 return;
6731 }
6732
6733 INIT_LIST_HEAD(&process_list);
6734
6735 mutex_enter(&instance->cmd_pend_mtx);
6736 mlist_for_each_safe(pos, next, &instance->cmd_pend_list) {
6737 cmd = mlist_entry(pos, struct mrsas_cmd, list);
6738
6739 if (cmd == NULL) {
6740 continue;
6741 }
6742
6743 if (cmd->sync_cmd == MRSAS_TRUE) {
6744 hdr = (struct mrsas_header *)&cmd->frame->hdr;
6745 if (hdr == NULL) {
6746 continue;
6747 }
6748 time = --cmd->drv_pkt_time;
6749 } else {
6750 pkt = cmd->pkt;
6751 if (pkt == NULL) {
6752 continue;
6753 }
6754 time = --cmd->drv_pkt_time;
6755 }
6756 if (time <= 0) {
6757 cmn_err(CE_WARN, "%llx: "
6758 "io_timeout_checker: TIMING OUT: pkt: %p, "
6759 "cmd %p fw_outstanding 0x%X max_fw_cmds 0x%X\n",
6760 gethrtime(), (void *)pkt, (void *)cmd,
6761 instance->fw_outstanding, instance->max_fw_cmds);
6762
6763 counter++;
6764 break;
6765 }
6766 }
6767 mutex_exit(&instance->cmd_pend_mtx);
6768
6769 if (counter) {
6770 if (instance->disable_online_ctrl_reset == 1) {
6771 cmn_err(CE_WARN, "mr_sas %d: %s(): OCR is NOT "
6772 "supported by Firmware, KILL adapter!!!",
6773 instance->instance, __func__);
6774
6775 if (instance->tbolt)
6776 mrsas_tbolt_kill_adapter(instance);
6777 else
6778 (void) mrsas_kill_adapter(instance);
6779
6780 return;
6781 } else {
6782 if (cmd->retry_count_for_ocr <= IO_RETRY_COUNT) {
6783 if (instance->adapterresetinprogress == 0) {
6784 if (instance->tbolt) {
6785 (void) mrsas_tbolt_reset_ppc(
6786 instance);
6787 } else {
6788 (void) mrsas_reset_ppc(
6789 instance);
6790 }
6791 }
6792 } else {
6793 cmn_err(CE_WARN,
6794 "io_timeout_checker: "
6795 "cmd %p cmd->index %d "
6796 "timed out even after 3 resets: "
6797 "so KILL adapter", (void *)cmd, cmd->index);
6798
6799 mrsas_print_cmd_details(instance, cmd, 0xDD);
6800
6801 if (instance->tbolt)
6802 mrsas_tbolt_kill_adapter(instance);
6803 else
6804 (void) mrsas_kill_adapter(instance);
6805 return;
6806 }
6807 }
6808 }
6809 con_log(CL_ANN, (CE_NOTE, "mrsas: "
6810 "schedule next timeout check: "
6811 "do timeout \n"));
6812 instance->timeout_id =
6813 timeout(io_timeout_checker, (void *)instance,
6814 drv_usectohz(MRSAS_1_SECOND));
6815 }
6816
6817 static uint32_t
6818 read_fw_status_reg_ppc(struct mrsas_instance *instance)
6819 {
6820 return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
6821 }
6822
6823 static void
6824 issue_cmd_ppc(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
6825 {
6826 struct scsi_pkt *pkt;
6827 atomic_add_16(&instance->fw_outstanding, 1);
6828
6829 pkt = cmd->pkt;
6830 if (pkt) {
6831 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:"
6832 "ISSUED CMD TO FW : called : cmd:"
6833 ": %p instance : %p pkt : %p pkt_time : %x\n",
6834 gethrtime(), (void *)cmd, (void *)instance,
6835 (void *)pkt, cmd->drv_pkt_time));
6836 if (instance->adapterresetinprogress) {
6837 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
6838 con_log(CL_ANN1, (CE_NOTE, "Reset the scsi_pkt timer"));
6839 } else {
6840 push_pending_mfi_pkt(instance, cmd);
6841 }
6842
6843 } else {
6844 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:"
6845 "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
6846 "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
6847 }
6848
6849 mutex_enter(&instance->reg_write_mtx);
6850 ASSERT(mutex_owned(&instance->reg_write_mtx));
6851 /* Issue the command to the FW */
6852 WR_IB_QPORT((cmd->frame_phys_addr) |
6853 (((cmd->frame_count - 1) << 1) | 1), instance);
6854 mutex_exit(&instance->reg_write_mtx);
6855
6856 }
6857
6858 /*
6859 * issue_cmd_in_sync_mode
6860 */
6861 static int
6862 issue_cmd_in_sync_mode_ppc(struct mrsas_instance *instance,
6863 struct mrsas_cmd *cmd)
6864 {
6865 int i;
6866 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * (10 * MILLISEC);
6867 struct mrsas_header *hdr = &cmd->frame->hdr;
6868
6869 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: called"));
6870
6871 if (instance->adapterresetinprogress) {
6872 cmd->drv_pkt_time = ddi_get16(
6873 cmd->frame_dma_obj.acc_handle, &hdr->timeout);
6874 if (cmd->drv_pkt_time < debug_timeout_g)
6875 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
6876
6877 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: "
6878 "issue and return in reset case\n"));
6879 WR_IB_QPORT((cmd->frame_phys_addr) |
6880 (((cmd->frame_count - 1) << 1) | 1), instance);
6881
6882 return (DDI_SUCCESS);
6883 } else {
6884 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: pushing the pkt\n"));
6885 push_pending_mfi_pkt(instance, cmd);
6886 }
6887
6888 cmd->cmd_status = ENODATA;
6889
6890 mutex_enter(&instance->reg_write_mtx);
6891 ASSERT(mutex_owned(&instance->reg_write_mtx));
6892 /* Issue the command to the FW */
6893 WR_IB_QPORT((cmd->frame_phys_addr) |
6894 (((cmd->frame_count - 1) << 1) | 1), instance);
6895 mutex_exit(&instance->reg_write_mtx);
6896
6897 mutex_enter(&instance->int_cmd_mtx);
6898 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
6899 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
6900 }
6901 mutex_exit(&instance->int_cmd_mtx);
6902
6903 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done"));
6904
6905 if (i < (msecs -1)) {
6906 return (DDI_SUCCESS);
6907 } else {
6908 return (DDI_FAILURE);
6909 }
6910 }
6911
6912 /*
6913 * issue_cmd_in_poll_mode
6914 */
6915 static int
6916 issue_cmd_in_poll_mode_ppc(struct mrsas_instance *instance,
6917 struct mrsas_cmd *cmd)
6918 {
6919 int i;
6920 uint16_t flags;
6921 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
6922 struct mrsas_header *frame_hdr;
6923
6924 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode_ppc: called"));
6925
6926 frame_hdr = (struct mrsas_header *)cmd->frame;
6927 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
6928 MFI_CMD_STATUS_POLL_MODE);
6929 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
6930 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
6931
6932 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
6933
6934 /* issue the frame using inbound queue port */
6935 WR_IB_QPORT((cmd->frame_phys_addr) |
6936 (((cmd->frame_count - 1) << 1) | 1), instance);
6937
6938 /* wait for cmd_status to change from 0xFF */
6939 for (i = 0; i < msecs && (
6940 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
6941 == MFI_CMD_STATUS_POLL_MODE); i++) {
6942 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
6943 }
6944
6945 if (ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
6946 == MFI_CMD_STATUS_POLL_MODE) {
6947 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: "
6948 "cmd polling timed out"));
6949 return (DDI_FAILURE);
6950 }
6951
6952 return (DDI_SUCCESS);
6953 }
6954
6955 static void
6956 enable_intr_ppc(struct mrsas_instance *instance)
6957 {
6958 uint32_t mask;
6959
6960 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: called"));
6961
6962 /* WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); */
6963 WR_OB_DOORBELL_CLEAR(OB_DOORBELL_CLEAR_MASK, instance);
6964
6965 /* WR_OB_INTR_MASK(~0x80000000, instance); */
6966 WR_OB_INTR_MASK(~(MFI_REPLY_2108_MESSAGE_INTR_MASK), instance);
6967
6968 /* dummy read to force PCI flush */
6969 mask = RD_OB_INTR_MASK(instance);
6970
6971 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: "
6972 "outbound_intr_mask = 0x%x", mask));
6973 }
6974
6975 static void
6976 disable_intr_ppc(struct mrsas_instance *instance)
6977 {
6978 uint32_t mask;
6979
6980 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: called"));
6981
6982 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: before : "
6983 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance)));
6984
6985 /* WR_OB_INTR_MASK(0xFFFFFFFF, instance); */
6986 WR_OB_INTR_MASK(OB_INTR_MASK, instance);
6987
6988 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: after : "
6989 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance)));
6990
6991 /* dummy read to force PCI flush */
6992 mask = RD_OB_INTR_MASK(instance);
6993 #ifdef lint
6994 mask = mask;
6995 #endif
6996 }
6997
6998 static int
6999 intr_ack_ppc(struct mrsas_instance *instance)
7000 {
7001 uint32_t status;
7002 int ret = DDI_INTR_CLAIMED;
7003
7004 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: called"));
7005
7006 /* check if it is our interrupt */
7007 status = RD_OB_INTR_STATUS(instance);
7008
7009 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: status = 0x%x", status));
7010
7011 if (!(status & MFI_REPLY_2108_MESSAGE_INTR)) {
7012 ret = DDI_INTR_UNCLAIMED;
7013 }
7014
7015 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
7016 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
7017 ret = DDI_INTR_UNCLAIMED;
7018 }
7019
7020 if (ret == DDI_INTR_UNCLAIMED) {
7021 return (ret);
7022 }
7023 /* clear the interrupt by writing back the same value */
7024 WR_OB_DOORBELL_CLEAR(status, instance);
7025
7026 /* dummy READ */
7027 status = RD_OB_INTR_STATUS(instance);
7028
7029 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: interrupt cleared"));
7030
7031 return (ret);
7032 }
7033
7034 /*
7035 * Marks HBA as bad. This will be called either when an
7036 * IO packet times out even after 3 FW resets
7037 * or FW is found to be fault even after 3 continuous resets.
7038 */
7039
7040 static int
7041 mrsas_kill_adapter(struct mrsas_instance *instance)
7042 {
7043 if (instance->deadadapter == 1)
7044 return (DDI_FAILURE);
7045
7046 con_log(CL_ANN1, (CE_NOTE, "mrsas_kill_adapter: "
7047 "Writing to doorbell with MFI_STOP_ADP "));
7048 mutex_enter(&instance->ocr_flags_mtx);
7049 instance->deadadapter = 1;
7050 mutex_exit(&instance->ocr_flags_mtx);
7051 instance->func_ptr->disable_intr(instance);
7052 WR_IB_DOORBELL(MFI_STOP_ADP, instance);
7053 (void) mrsas_complete_pending_cmds(instance);
7054 return (DDI_SUCCESS);
7055 }
7056
7057
7058 static int
7059 mrsas_reset_ppc(struct mrsas_instance *instance)
7060 {
7061 uint32_t status;
7062 uint32_t retry = 0;
7063 uint32_t cur_abs_reg_val;
7064 uint32_t fw_state;
7065
7066 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
7067
7068 if (instance->deadadapter == 1) {
7069 cmn_err(CE_WARN, "mrsas_reset_ppc: "
7070 "no more resets as HBA has been marked dead ");
7071 return (DDI_FAILURE);
7072 }
7073 mutex_enter(&instance->ocr_flags_mtx);
7074 instance->adapterresetinprogress = 1;
7075 mutex_exit(&instance->ocr_flags_mtx);
7076 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: adpterresetinprogress "
7077 "flag set, time %llx", gethrtime()));
7078
7079 instance->func_ptr->disable_intr(instance);
7080 retry_reset:
7081 WR_IB_WRITE_SEQ(0, instance);
7082 WR_IB_WRITE_SEQ(4, instance);
7083 WR_IB_WRITE_SEQ(0xb, instance);
7084 WR_IB_WRITE_SEQ(2, instance);
7085 WR_IB_WRITE_SEQ(7, instance);
7086 WR_IB_WRITE_SEQ(0xd, instance);
7087 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: magic number written "
7088 "to write sequence register\n"));
7089 delay(100 * drv_usectohz(MILLISEC));
7090 status = RD_OB_DRWE(instance);
7091
7092 while (!(status & DIAG_WRITE_ENABLE)) {
7093 delay(100 * drv_usectohz(MILLISEC));
7094 status = RD_OB_DRWE(instance);
7095 if (retry++ == 100) {
7096 cmn_err(CE_WARN, "mrsas_reset_ppc: DRWE bit "
7097 "check retry count %d\n", retry);
7098 return (DDI_FAILURE);
7099 }
7100 }
7101 WR_IB_DRWE(status | DIAG_RESET_ADAPTER, instance);
7102 delay(100 * drv_usectohz(MILLISEC));
7103 status = RD_OB_DRWE(instance);
7104 while (status & DIAG_RESET_ADAPTER) {
7105 delay(100 * drv_usectohz(MILLISEC));
7106 status = RD_OB_DRWE(instance);
7107 if (retry++ == 100) {
7108 cmn_err(CE_WARN, "mrsas_reset_ppc: "
7109 "RESET FAILED. KILL adapter called\n.");
7110
7111 (void) mrsas_kill_adapter(instance);
7112 return (DDI_FAILURE);
7113 }
7114 }
7115 con_log(CL_ANN, (CE_NOTE, "mrsas_reset_ppc: Adapter reset complete"));
7116 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7117 "Calling mfi_state_transition_to_ready"));
7118
7119 /* Mark HBA as bad, if FW is fault after 3 continuous resets */
7120 if (mfi_state_transition_to_ready(instance) ||
7121 debug_fw_faults_after_ocr_g == 1) {
7122 cur_abs_reg_val =
7123 instance->func_ptr->read_fw_status_reg(instance);
7124 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
7125
7126 #ifdef OCRDEBUG
7127 con_log(CL_ANN1, (CE_NOTE,
7128 "mrsas_reset_ppc :before fake: FW is not ready "
7129 "FW state = 0x%x", fw_state));
7130 if (debug_fw_faults_after_ocr_g == 1)
7131 fw_state = MFI_STATE_FAULT;
7132 #endif
7133
7134 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc : FW is not ready "
7135 "FW state = 0x%x", fw_state));
7136
7137 if (fw_state == MFI_STATE_FAULT) {
7138 /* increment the count */
7139 instance->fw_fault_count_after_ocr++;
7140 if (instance->fw_fault_count_after_ocr
7141 < MAX_FW_RESET_COUNT) {
7142 cmn_err(CE_WARN, "mrsas_reset_ppc: "
7143 "FW is in fault after OCR count %d "
7144 "Retry Reset",
7145 instance->fw_fault_count_after_ocr);
7146 goto retry_reset;
7147
7148 } else {
7149 cmn_err(CE_WARN, "mrsas_reset_ppc: "
7150 "Max Reset Count exceeded >%d"
7151 "Mark HBA as bad, KILL adapter",
7152 MAX_FW_RESET_COUNT);
7153
7154 (void) mrsas_kill_adapter(instance);
7155 return (DDI_FAILURE);
7156 }
7157 }
7158 }
7159 /* reset the counter as FW is up after OCR */
7160 instance->fw_fault_count_after_ocr = 0;
7161
7162
7163 ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
7164 instance->producer, 0);
7165
7166 ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
7167 instance->consumer, 0);
7168
7169 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7170 " after resetting produconsumer chck indexs:"
7171 "producer %x consumer %x", *instance->producer,
7172 *instance->consumer));
7173
7174 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7175 "Calling mrsas_issue_init_mfi"));
7176 (void) mrsas_issue_init_mfi(instance);
7177 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7178 "mrsas_issue_init_mfi Done"));
7179
7180 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7181 "Calling mrsas_print_pending_cmd\n"));
7182 (void) mrsas_print_pending_cmds(instance);
7183 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7184 "mrsas_print_pending_cmd done\n"));
7185
7186 instance->func_ptr->enable_intr(instance);
7187 instance->fw_outstanding = 0;
7188
7189 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7190 "Calling mrsas_issue_pending_cmds"));
7191 (void) mrsas_issue_pending_cmds(instance);
7192 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7193 "issue_pending_cmds done.\n"));
7194
7195 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7196 "Calling aen registration"));
7197
7198
7199 instance->aen_cmd->retry_count_for_ocr = 0;
7200 instance->aen_cmd->drv_pkt_time = 0;
7201
7202 instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
7203 con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag.\n"));
7204
7205 mutex_enter(&instance->ocr_flags_mtx);
7206 instance->adapterresetinprogress = 0;
7207 mutex_exit(&instance->ocr_flags_mtx);
7208 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7209 "adpterresetinprogress flag unset"));
7210
7211 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc done\n"));
7212 return (DDI_SUCCESS);
7213 }
7214
7215 /*
7216 * FMA functions.
7217 */
7218 int
7219 mrsas_common_check(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
7220 {
7221 int ret = DDI_SUCCESS;
7222
7223 if (cmd != NULL &&
7224 mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) !=
7225 DDI_SUCCESS) {
7226 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7227 if (cmd->pkt != NULL) {
7228 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7229 cmd->pkt->pkt_statistics = 0;
7230 }
7231 ret = DDI_FAILURE;
7232 }
7233 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
7234 != DDI_SUCCESS) {
7235 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7236 if (cmd != NULL && cmd->pkt != NULL) {
7237 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7238 cmd->pkt->pkt_statistics = 0;
7239 }
7240 ret = DDI_FAILURE;
7241 }
7242 if (mrsas_check_dma_handle(instance->mfi_evt_detail_obj.dma_handle) !=
7243 DDI_SUCCESS) {
7244 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7245 if (cmd != NULL && cmd->pkt != NULL) {
7246 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7247 cmd->pkt->pkt_statistics = 0;
7248 }
7249 ret = DDI_FAILURE;
7250 }
7251 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
7252 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7253
7254 ddi_fm_acc_err_clear(instance->regmap_handle, DDI_FME_VER0);
7255
7256 if (cmd != NULL && cmd->pkt != NULL) {
7257 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7258 cmd->pkt->pkt_statistics = 0;
7259 }
7260 ret = DDI_FAILURE;
7261 }
7262
7263 return (ret);
7264 }
7265
7266 /*ARGSUSED*/
7267 static int
7268 mrsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
7269 {
7270 /*
7271 * as the driver can always deal with an error in any dma or
7272 * access handle, we can just return the fme_status value.
7273 */
7274 pci_ereport_post(dip, err, NULL);
7275 return (err->fme_status);
7276 }
7277
7278 static void
7279 mrsas_fm_init(struct mrsas_instance *instance)
7280 {
7281 /* Need to change iblock to priority for new MSI intr */
7282 ddi_iblock_cookie_t fm_ibc;
7283
7284 /* Only register with IO Fault Services if we have some capability */
7285 if (instance->fm_capabilities) {
7286 /* Adjust access and dma attributes for FMA */
7287 endian_attr.devacc_attr_access = DDI_FLAGERR_ACC;
7288 mrsas_generic_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
7289
7290 /*
7291 * Register capabilities with IO Fault Services.
7292 * fm_capabilities will be updated to indicate
7293 * capabilities actually supported (not requested.)
7294 */
7295
7296 ddi_fm_init(instance->dip, &instance->fm_capabilities, &fm_ibc);
7297
7298 /*
7299 * Initialize pci ereport capabilities if ereport
7300 * capable (should always be.)
7301 */
7302
7303 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) ||
7304 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7305 pci_ereport_setup(instance->dip);
7306 }
7307
7308 /*
7309 * Register error callback if error callback capable.
7310 */
7311 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7312 ddi_fm_handler_register(instance->dip,
7313 mrsas_fm_error_cb, (void*) instance);
7314 }
7315 } else {
7316 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
7317 mrsas_generic_dma_attr.dma_attr_flags = 0;
7318 }
7319 }
7320
7321 static void
7322 mrsas_fm_fini(struct mrsas_instance *instance)
7323 {
7324 /* Only unregister FMA capabilities if registered */
7325 if (instance->fm_capabilities) {
7326 /*
7327 * Un-register error callback if error callback capable.
7328 */
7329 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7330 ddi_fm_handler_unregister(instance->dip);
7331 }
7332
7333 /*
7334 * Release any resources allocated by pci_ereport_setup()
7335 */
7336 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) ||
7337 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7338 pci_ereport_teardown(instance->dip);
7339 }
7340
7341 /* Unregister from IO Fault Services */
7342 ddi_fm_fini(instance->dip);
7343
7344 /* Adjust access and dma attributes for FMA */
7345 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
7346 mrsas_generic_dma_attr.dma_attr_flags = 0;
7347 }
7348 }
7349
7350 int
7351 mrsas_check_acc_handle(ddi_acc_handle_t handle)
7352 {
7353 ddi_fm_error_t de;
7354
7355 if (handle == NULL) {
7356 return (DDI_FAILURE);
7357 }
7358
7359 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
7360
7361 return (de.fme_status);
7362 }
7363
7364 int
7365 mrsas_check_dma_handle(ddi_dma_handle_t handle)
7366 {
7367 ddi_fm_error_t de;
7368
7369 if (handle == NULL) {
7370 return (DDI_FAILURE);
7371 }
7372
7373 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
7374
7375 return (de.fme_status);
7376 }
7377
7378 void
7379 mrsas_fm_ereport(struct mrsas_instance *instance, char *detail)
7380 {
7381 uint64_t ena;
7382 char buf[FM_MAX_CLASS];
7383
7384 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
7385 ena = fm_ena_generate(0, FM_ENA_FMT1);
7386 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities)) {
7387 ddi_fm_ereport_post(instance->dip, buf, ena, DDI_NOSLEEP,
7388 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL);
7389 }
7390 }
7391
7392 static int
7393 mrsas_add_intrs(struct mrsas_instance *instance, int intr_type)
7394 {
7395
7396 dev_info_t *dip = instance->dip;
7397 int avail, actual, count;
7398 int i, flag, ret;
7399
7400 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_type = %x",
7401 intr_type));
7402
7403 /* Get number of interrupts */
7404 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
7405 if ((ret != DDI_SUCCESS) || (count == 0)) {
7406 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_nintrs() failed:"
7407 "ret %d count %d", ret, count));
7408
7409 return (DDI_FAILURE);
7410 }
7411
7412 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: count = %d ", count));
7413
7414 /* Get number of available interrupts */
7415 ret = ddi_intr_get_navail(dip, intr_type, &avail);
7416 if ((ret != DDI_SUCCESS) || (avail == 0)) {
7417 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_navail() failed:"
7418 "ret %d avail %d", ret, avail));
7419
7420 return (DDI_FAILURE);
7421 }
7422 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: avail = %d ", avail));
7423
7424 /* Only one interrupt routine. So limit the count to 1 */
7425 if (count > 1) {
7426 count = 1;
7427 }
7428
7429 /*
7430 * Allocate an array of interrupt handlers. Currently we support
7431 * only one interrupt. The framework can be extended later.
7432 */
7433 instance->intr_htable_size = count * sizeof (ddi_intr_handle_t);
7434 instance->intr_htable = kmem_zalloc(instance->intr_htable_size,
7435 KM_SLEEP);
7436 if (instance->intr_htable == NULL) {
7437 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7438 "failed to allocate memory for intr-handle table"));
7439 instance->intr_htable_size = 0;
7440 return (DDI_FAILURE);
7441 }
7442
7443 flag = ((intr_type == DDI_INTR_TYPE_MSI) ||
7444 (intr_type == DDI_INTR_TYPE_MSIX)) ?
7445 DDI_INTR_ALLOC_STRICT : DDI_INTR_ALLOC_NORMAL;
7446
7447 /* Allocate interrupt */
7448 ret = ddi_intr_alloc(dip, instance->intr_htable, intr_type, 0,
7449 count, &actual, flag);
7450
7451 if ((ret != DDI_SUCCESS) || (actual == 0)) {
7452 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7453 "avail = %d", avail));
7454 goto mrsas_free_htable;
7455 }
7456
7457 if (actual < count) {
7458 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7459 "Requested = %d Received = %d", count, actual));
7460 }
7461 instance->intr_cnt = actual;
7462
7463 /*
7464 * Get the priority of the interrupt allocated.
7465 */
7466 if ((ret = ddi_intr_get_pri(instance->intr_htable[0],
7467 &instance->intr_pri)) != DDI_SUCCESS) {
7468 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7469 "get priority call failed"));
7470 goto mrsas_free_handles;
7471 }
7472
7473 /*
7474 * Test for high level mutex. we don't support them.
7475 */
7476 if (instance->intr_pri >= ddi_intr_get_hilevel_pri()) {
7477 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7478 "High level interrupts not supported."));
7479 goto mrsas_free_handles;
7480 }
7481
7482 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_pri = 0x%x ",
7483 instance->intr_pri));
7484
7485 /* Call ddi_intr_add_handler() */
7486 for (i = 0; i < actual; i++) {
7487 ret = ddi_intr_add_handler(instance->intr_htable[i],
7488 (ddi_intr_handler_t *)mrsas_isr, (caddr_t)instance,
7489 (caddr_t)(uintptr_t)i);
7490
7491 if (ret != DDI_SUCCESS) {
7492 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs:"
7493 "failed %d", ret));
7494 goto mrsas_free_handles;
7495 }
7496
7497 }
7498
7499 con_log(CL_DLEVEL1, (CE_NOTE, " ddi_intr_add_handler done"));
7500
7501 if ((ret = ddi_intr_get_cap(instance->intr_htable[0],
7502 &instance->intr_cap)) != DDI_SUCCESS) {
7503 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_cap() failed %d",
7504 ret));
7505 goto mrsas_free_handlers;
7506 }
7507
7508 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) {
7509 con_log(CL_ANN, (CE_WARN, "Calling ddi_intr_block _enable"));
7510
7511 (void) ddi_intr_block_enable(instance->intr_htable,
7512 instance->intr_cnt);
7513 } else {
7514 con_log(CL_ANN, (CE_NOTE, " calling ddi_intr_enable"));
7515
7516 for (i = 0; i < instance->intr_cnt; i++) {
7517 (void) ddi_intr_enable(instance->intr_htable[i]);
7518 con_log(CL_ANN, (CE_NOTE, "ddi intr enable returns "
7519 "%d", i));
7520 }
7521 }
7522
7523 return (DDI_SUCCESS);
7524
7525 mrsas_free_handlers:
7526 for (i = 0; i < actual; i++)
7527 (void) ddi_intr_remove_handler(instance->intr_htable[i]);
7528
7529 mrsas_free_handles:
7530 for (i = 0; i < actual; i++)
7531 (void) ddi_intr_free(instance->intr_htable[i]);
7532
7533 mrsas_free_htable:
7534 if (instance->intr_htable != NULL)
7535 kmem_free(instance->intr_htable, instance->intr_htable_size);
7536
7537 instance->intr_htable = NULL;
7538 instance->intr_htable_size = 0;
7539
7540 return (DDI_FAILURE);
7541
7542 }
7543
7544
7545 static void
7546 mrsas_rem_intrs(struct mrsas_instance *instance)
7547 {
7548 int i;
7549
7550 con_log(CL_ANN, (CE_NOTE, "mrsas_rem_intrs called"));
7551
7552 /* Disable all interrupts first */
7553 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) {
7554 (void) ddi_intr_block_disable(instance->intr_htable,
7555 instance->intr_cnt);
7556 } else {
7557 for (i = 0; i < instance->intr_cnt; i++) {
7558 (void) ddi_intr_disable(instance->intr_htable[i]);
7559 }
7560 }
7561
7562 /* Remove all the handlers */
7563
7564 for (i = 0; i < instance->intr_cnt; i++) {
7565 (void) ddi_intr_remove_handler(instance->intr_htable[i]);
7566 (void) ddi_intr_free(instance->intr_htable[i]);
7567 }
7568
7569 if (instance->intr_htable != NULL)
7570 kmem_free(instance->intr_htable, instance->intr_htable_size);
7571
7572 instance->intr_htable = NULL;
7573 instance->intr_htable_size = 0;
7574
7575 }
7576
7577 static int
7578 mrsas_tran_bus_config(dev_info_t *parent, uint_t flags,
7579 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
7580 {
7581 struct mrsas_instance *instance;
7582 int config;
7583 int rval = NDI_SUCCESS;
7584
7585 char *ptr = NULL;
7586 int tgt, lun;
7587
7588 con_log(CL_ANN1, (CE_NOTE, "Bus config called for op = %x", op));
7589
7590 if ((instance = ddi_get_soft_state(mrsas_state,
7591 ddi_get_instance(parent))) == NULL) {
7592 return (NDI_FAILURE);
7593 }
7594
7595 /* Hold nexus during bus_config */
7596 ndi_devi_enter(parent, &config);
7597 switch (op) {
7598 case BUS_CONFIG_ONE: {
7599
7600 /* parse wwid/target name out of name given */
7601 if ((ptr = strchr((char *)arg, '@')) == NULL) {
7602 rval = NDI_FAILURE;
7603 break;
7604 }
7605 ptr++;
7606
7607 if (mrsas_parse_devname(arg, &tgt, &lun) != 0) {
7608 rval = NDI_FAILURE;
7609 break;
7610 }
7611
7612 if (lun == 0) {
7613 rval = mrsas_config_ld(instance, tgt, lun, childp);
7614 #ifdef PDSUPPORT
7615 } else if (instance->tbolt == 1 && lun != 0) {
7616 rval = mrsas_tbolt_config_pd(instance,
7617 tgt, lun, childp);
7618 #endif
7619 } else {
7620 rval = NDI_FAILURE;
7621 }
7622
7623 break;
7624 }
7625 case BUS_CONFIG_DRIVER:
7626 case BUS_CONFIG_ALL: {
7627
7628 rval = mrsas_config_all_devices(instance);
7629
7630 rval = NDI_SUCCESS;
7631 break;
7632 }
7633 }
7634
7635 if (rval == NDI_SUCCESS) {
7636 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0);
7637
7638 }
7639 ndi_devi_exit(parent, config);
7640
7641 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_bus_config: rval = %x",
7642 rval));
7643 return (rval);
7644 }
7645
7646 static int
7647 mrsas_config_all_devices(struct mrsas_instance *instance)
7648 {
7649 int rval, tgt;
7650
7651 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) {
7652 (void) mrsas_config_ld(instance, tgt, 0, NULL);
7653
7654 }
7655
7656 #ifdef PDSUPPORT
7657 /* Config PD devices connected to the card */
7658 if (instance->tbolt) {
7659 for (tgt = 0; tgt < instance->mr_tbolt_pd_max; tgt++) {
7660 (void) mrsas_tbolt_config_pd(instance, tgt, 1, NULL);
7661 }
7662 }
7663 #endif
7664
7665 rval = NDI_SUCCESS;
7666 return (rval);
7667 }
7668
7669 static int
7670 mrsas_parse_devname(char *devnm, int *tgt, int *lun)
7671 {
7672 char devbuf[SCSI_MAXNAMELEN];
7673 char *addr;
7674 char *p, *tp, *lp;
7675 long num;
7676
7677 /* Parse dev name and address */
7678 (void) strcpy(devbuf, devnm);
7679 addr = "";
7680 for (p = devbuf; *p != '\0'; p++) {
7681 if (*p == '@') {
7682 addr = p + 1;
7683 *p = '\0';
7684 } else if (*p == ':') {
7685 *p = '\0';
7686 break;
7687 }
7688 }
7689
7690 /* Parse target and lun */
7691 for (p = tp = addr, lp = NULL; *p != '\0'; p++) {
7692 if (*p == ',') {
7693 lp = p + 1;
7694 *p = '\0';
7695 break;
7696 }
7697 }
7698 if (tgt && tp) {
7699 if (ddi_strtol(tp, NULL, 0x10, &num)) {
7700 return (DDI_FAILURE); /* Can declare this as constant */
7701 }
7702 *tgt = (int)num;
7703 }
7704 if (lun && lp) {
7705 if (ddi_strtol(lp, NULL, 0x10, &num)) {
7706 return (DDI_FAILURE);
7707 }
7708 *lun = (int)num;
7709 }
7710 return (DDI_SUCCESS); /* Success case */
7711 }
7712
7713 static int
7714 mrsas_config_ld(struct mrsas_instance *instance, uint16_t tgt,
7715 uint8_t lun, dev_info_t **ldip)
7716 {
7717 struct scsi_device *sd;
7718 dev_info_t *child;
7719 int rval;
7720
7721 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_config_ld: t = %d l = %d",
7722 tgt, lun));
7723
7724 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
7725 if (ldip) {
7726 *ldip = child;
7727 }
7728 if (instance->mr_ld_list[tgt].flag != MRDRV_TGT_VALID) {
7729 rval = mrsas_service_evt(instance, tgt, 0,
7730 MRSAS_EVT_UNCONFIG_TGT, NULL);
7731 con_log(CL_ANN1, (CE_WARN,
7732 "mr_sas: DELETING STALE ENTRY rval = %d "
7733 "tgt id = %d ", rval, tgt));
7734 return (NDI_FAILURE);
7735 }
7736 return (NDI_SUCCESS);
7737 }
7738
7739 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
7740 if (sd == NULL) {
7741 con_log(CL_ANN1, (CE_WARN, "mrsas_config_ld: "
7742 "failed to allocate mem for scsi_device"));
7743 return (NDI_FAILURE);
7744 }
7745 sd->sd_address.a_hba_tran = instance->tran;
7746 sd->sd_address.a_target = (uint16_t)tgt;
7747 sd->sd_address.a_lun = (uint8_t)lun;
7748
7749 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS)
7750 rval = mrsas_config_scsi_device(instance, sd, ldip);
7751 else
7752 rval = NDI_FAILURE;
7753
7754 /* sd_unprobe is blank now. Free buffer manually */
7755 if (sd->sd_inq) {
7756 kmem_free(sd->sd_inq, SUN_INQSIZE);
7757 sd->sd_inq = (struct scsi_inquiry *)NULL;
7758 }
7759
7760 kmem_free(sd, sizeof (struct scsi_device));
7761 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_config_ld: return rval = %d",
7762 rval));
7763 return (rval);
7764 }
7765
7766 int
7767 mrsas_config_scsi_device(struct mrsas_instance *instance,
7768 struct scsi_device *sd, dev_info_t **dipp)
7769 {
7770 char *nodename = NULL;
7771 char **compatible = NULL;
7772 int ncompatible = 0;
7773 char *childname;
7774 dev_info_t *ldip = NULL;
7775 int tgt = sd->sd_address.a_target;
7776 int lun = sd->sd_address.a_lun;
7777 int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
7778 int rval;
7779
7780 con_log(CL_DLEVEL1, (CE_NOTE, "mr_sas: scsi_device t%dL%d", tgt, lun));
7781 scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype,
7782 NULL, &nodename, &compatible, &ncompatible);
7783
7784 if (nodename == NULL) {
7785 con_log(CL_ANN1, (CE_WARN, "mr_sas: Found no compatible driver "
7786 "for t%dL%d", tgt, lun));
7787 rval = NDI_FAILURE;
7788 goto finish;
7789 }
7790
7791 childname = (dtype == DTYPE_DIRECT) ? "sd" : nodename;
7792 con_log(CL_DLEVEL1, (CE_NOTE,
7793 "mr_sas: Childname = %2s nodename = %s", childname, nodename));
7794
7795 /* Create a dev node */
7796 rval = ndi_devi_alloc(instance->dip, childname, DEVI_SID_NODEID, &ldip);
7797 con_log(CL_DLEVEL1, (CE_NOTE,
7798 "mr_sas_config_scsi_device: ndi_devi_alloc rval = %x", rval));
7799 if (rval == NDI_SUCCESS) {
7800 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) !=
7801 DDI_PROP_SUCCESS) {
7802 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7803 "property for t%dl%d target", tgt, lun));
7804 rval = NDI_FAILURE;
7805 goto finish;
7806 }
7807 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun) !=
7808 DDI_PROP_SUCCESS) {
7809 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7810 "property for t%dl%d lun", tgt, lun));
7811 rval = NDI_FAILURE;
7812 goto finish;
7813 }
7814
7815 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip,
7816 "compatible", compatible, ncompatible) !=
7817 DDI_PROP_SUCCESS) {
7818 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7819 "property for t%dl%d compatible", tgt, lun));
7820 rval = NDI_FAILURE;
7821 goto finish;
7822 }
7823
7824 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH);
7825 if (rval != NDI_SUCCESS) {
7826 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to online "
7827 "t%dl%d", tgt, lun));
7828 ndi_prop_remove_all(ldip);
7829 (void) ndi_devi_free(ldip);
7830 } else {
7831 con_log(CL_ANN1, (CE_CONT, "mr_sas: online Done :"
7832 "0 t%dl%d", tgt, lun));
7833 }
7834
7835 }
7836 finish:
7837 if (dipp) {
7838 *dipp = ldip;
7839 }
7840
7841 con_log(CL_DLEVEL1, (CE_NOTE,
7842 "mr_sas: config_scsi_device rval = %d t%dL%d",
7843 rval, tgt, lun));
7844 scsi_hba_nodename_compatible_free(nodename, compatible);
7845 return (rval);
7846 }
7847
7848 /*ARGSUSED*/
7849 int
7850 mrsas_service_evt(struct mrsas_instance *instance, int tgt, int lun, int event,
7851 uint64_t wwn)
7852 {
7853 struct mrsas_eventinfo *mrevt = NULL;
7854
7855 con_log(CL_ANN1, (CE_NOTE,
7856 "mrsas_service_evt called for t%dl%d event = %d",
7857 tgt, lun, event));
7858
7859 if ((instance->taskq == NULL) || (mrevt =
7860 kmem_zalloc(sizeof (struct mrsas_eventinfo), KM_NOSLEEP)) == NULL) {
7861 return (ENOMEM);
7862 }
7863
7864 mrevt->instance = instance;
7865 mrevt->tgt = tgt;
7866 mrevt->lun = lun;
7867 mrevt->event = event;
7868 mrevt->wwn = wwn;
7869
7870 if ((ddi_taskq_dispatch(instance->taskq,
7871 (void (*)(void *))mrsas_issue_evt_taskq, mrevt, DDI_NOSLEEP)) !=
7872 DDI_SUCCESS) {
7873 con_log(CL_ANN1, (CE_NOTE,
7874 "mr_sas: Event task failed for t%dl%d event = %d",
7875 tgt, lun, event));
7876 kmem_free(mrevt, sizeof (struct mrsas_eventinfo));
7877 return (DDI_FAILURE);
7878 }
7879 DTRACE_PROBE3(service_evt, int, tgt, int, lun, int, event);
7880 return (DDI_SUCCESS);
7881 }
7882
7883 static void
7884 mrsas_issue_evt_taskq(struct mrsas_eventinfo *mrevt)
7885 {
7886 struct mrsas_instance *instance = mrevt->instance;
7887 dev_info_t *dip, *pdip;
7888 int circ1 = 0;
7889 char *devname;
7890
7891 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_evt_taskq: called for"
7892 " tgt %d lun %d event %d",
7893 mrevt->tgt, mrevt->lun, mrevt->event));
7894
7895 if (mrevt->tgt < MRDRV_MAX_LD && mrevt->lun == 0) {
7896 mutex_enter(&instance->config_dev_mtx);
7897 dip = instance->mr_ld_list[mrevt->tgt].dip;
7898 mutex_exit(&instance->config_dev_mtx);
7899 #ifdef PDSUPPORT
7900 } else {
7901 mutex_enter(&instance->config_dev_mtx);
7902 dip = instance->mr_tbolt_pd_list[mrevt->tgt].dip;
7903 mutex_exit(&instance->config_dev_mtx);
7904 #endif
7905 }
7906
7907
7908 ndi_devi_enter(instance->dip, &circ1);
7909 switch (mrevt->event) {
7910 case MRSAS_EVT_CONFIG_TGT:
7911 if (dip == NULL) {
7912
7913 if (mrevt->lun == 0) {
7914 (void) mrsas_config_ld(instance, mrevt->tgt,
7915 0, NULL);
7916 #ifdef PDSUPPORT
7917 } else if (instance->tbolt) {
7918 (void) mrsas_tbolt_config_pd(instance,
7919 mrevt->tgt,
7920 1, NULL);
7921 #endif
7922 }
7923 con_log(CL_ANN1, (CE_NOTE,
7924 "mr_sas: EVT_CONFIG_TGT called:"
7925 " for tgt %d lun %d event %d",
7926 mrevt->tgt, mrevt->lun, mrevt->event));
7927
7928 } else {
7929 con_log(CL_ANN1, (CE_NOTE,
7930 "mr_sas: EVT_CONFIG_TGT dip != NULL:"
7931 " for tgt %d lun %d event %d",
7932 mrevt->tgt, mrevt->lun, mrevt->event));
7933 }
7934 break;
7935 case MRSAS_EVT_UNCONFIG_TGT:
7936 if (dip) {
7937 if (i_ddi_devi_attached(dip)) {
7938
7939 pdip = ddi_get_parent(dip);
7940
7941 devname = kmem_zalloc(MAXNAMELEN + 1, KM_SLEEP);
7942 (void) ddi_deviname(dip, devname);
7943
7944 (void) devfs_clean(pdip, devname + 1,
7945 DV_CLEAN_FORCE);
7946 kmem_free(devname, MAXNAMELEN + 1);
7947 }
7948 (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE);
7949 con_log(CL_ANN1, (CE_NOTE,
7950 "mr_sas: EVT_UNCONFIG_TGT called:"
7951 " for tgt %d lun %d event %d",
7952 mrevt->tgt, mrevt->lun, mrevt->event));
7953 } else {
7954 con_log(CL_ANN1, (CE_NOTE,
7955 "mr_sas: EVT_UNCONFIG_TGT dip == NULL:"
7956 " for tgt %d lun %d event %d",
7957 mrevt->tgt, mrevt->lun, mrevt->event));
7958 }
7959 break;
7960 }
7961 kmem_free(mrevt, sizeof (struct mrsas_eventinfo));
7962 ndi_devi_exit(instance->dip, circ1);
7963 }
7964
7965
7966 int
7967 mrsas_mode_sense_build(struct scsi_pkt *pkt)
7968 {
7969 union scsi_cdb *cdbp;
7970 uint16_t page_code;
7971 struct scsa_cmd *acmd;
7972 struct buf *bp;
7973 struct mode_header *modehdrp;
7974
7975 cdbp = (void *)pkt->pkt_cdbp;
7976 page_code = cdbp->cdb_un.sg.scsi[0];
7977 acmd = PKT2CMD(pkt);
7978 bp = acmd->cmd_buf;
7979 if ((!bp) && bp->b_un.b_addr && bp->b_bcount && acmd->cmd_dmacount) {
7980 con_log(CL_ANN1, (CE_WARN, "Failing MODESENSE Command"));
7981 /* ADD pkt statistics as Command failed. */
7982 return (NULL);
7983 }
7984
7985 bp_mapin(bp);
7986 bzero(bp->b_un.b_addr, bp->b_bcount);
7987
7988 switch (page_code) {
7989 case 0x3: {
7990 struct mode_format *page3p = NULL;
7991 modehdrp = (struct mode_header *)(bp->b_un.b_addr);
7992 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH;
7993
7994 page3p = (void *)((caddr_t)modehdrp +
7995 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
7996 page3p->mode_page.code = 0x3;
7997 page3p->mode_page.length =
7998 (uchar_t)(sizeof (struct mode_format));
7999 page3p->data_bytes_sect = 512;
8000 page3p->sect_track = 63;
8001 break;
8002 }
8003 case 0x4: {
8004 struct mode_geometry *page4p = NULL;
8005 modehdrp = (struct mode_header *)(bp->b_un.b_addr);
8006 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH;
8007
8008 page4p = (void *)((caddr_t)modehdrp +
8009 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
8010 page4p->mode_page.code = 0x4;
8011 page4p->mode_page.length =
8012 (uchar_t)(sizeof (struct mode_geometry));
8013 page4p->heads = 255;
8014 page4p->rpm = 10000;
8015 break;
8016 }
8017 default:
8018 break;
8019 }
8020 return (NULL);
8021 }