1 /*
2 * mr_sas.c: source for mr_sas driver
3 *
4 * Solaris MegaRAID device driver for SAS2.0 controllers
5 * Copyright (c) 2008-2012, LSI Logic Corporation.
6 * All rights reserved.
7 *
8 * Version:
9 * Author:
10 * Swaminathan K S
11 * Arun Chandrashekhar
12 * Manju R
13 * Rasheed
14 * Shakeel Bukhari
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions are met:
18 *
19 * 1. Redistributions of source code must retain the above copyright notice,
20 * this list of conditions and the following disclaimer.
21 *
22 * 2. Redistributions in binary form must reproduce the above copyright notice,
23 * this list of conditions and the following disclaimer in the documentation
24 * and/or other materials provided with the distribution.
25 *
26 * 3. Neither the name of the author nor the names of its contributors may be
27 * used to endorse or promote products derived from this software without
28 * specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
33 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
34 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
35 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
36 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
37 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
38 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
39 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
40 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
41 * DAMAGE.
42 */
43
44 /*
45 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
46 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
47 * Copyright 2012 Nexenta System, Inc. All rights reserved.
48 */
49
50 #include <sys/types.h>
51 #include <sys/param.h>
52 #include <sys/file.h>
53 #include <sys/errno.h>
54 #include <sys/open.h>
55 #include <sys/cred.h>
56 #include <sys/modctl.h>
57 #include <sys/conf.h>
58 #include <sys/devops.h>
59 #include <sys/cmn_err.h>
60 #include <sys/kmem.h>
61 #include <sys/stat.h>
62 #include <sys/mkdev.h>
63 #include <sys/pci.h>
64 #include <sys/scsi/scsi.h>
65 #include <sys/ddi.h>
66 #include <sys/sunddi.h>
67 #include <sys/atomic.h>
68 #include <sys/signal.h>
69 #include <sys/byteorder.h>
70 #include <sys/sdt.h>
71 #include <sys/fs/dv_node.h> /* devfs_clean */
72
73 #include "mr_sas.h"
74
75 /*
76 * FMA header files
77 */
78 #include <sys/ddifm.h>
79 #include <sys/fm/protocol.h>
80 #include <sys/fm/util.h>
81 #include <sys/fm/io/ddi.h>
82
83 /*
84 * Local static data
85 */
86 static void *mrsas_state = NULL;
87 static volatile boolean_t mrsas_relaxed_ordering = B_TRUE;
88 volatile int debug_level_g = CL_NONE;
89 static volatile int msi_enable = 1;
90 static volatile int ctio_enable = 1;
91
92 /* Default Timeout value to issue online controller reset */
93 volatile int debug_timeout_g = 0xF0; /* 0xB4; */
94 /* Simulate consecutive firmware fault */
95 static volatile int debug_fw_faults_after_ocr_g = 0;
96 #ifdef OCRDEBUG
97 /* Simulate three consecutive timeout for an IO */
98 static volatile int debug_consecutive_timeout_after_ocr_g = 0;
99 #endif
100
101 #if 0
102 /* Enable OCR on firmware fault */
103 static volatile int debug_support_ocr_isr_g = 0;
104 #endif
105 #pragma weak scsi_hba_open
106 #pragma weak scsi_hba_close
107 #pragma weak scsi_hba_ioctl
108
109 /* Local static prototypes. */
110 static int mrsas_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
111 static int mrsas_attach(dev_info_t *, ddi_attach_cmd_t);
112 #ifdef __sparc
113 static int mrsas_reset(dev_info_t *, ddi_reset_cmd_t);
114 #else
115 static int mrsas_quiesce(dev_info_t *);
116 #endif
117 static int mrsas_detach(dev_info_t *, ddi_detach_cmd_t);
118 static int mrsas_open(dev_t *, int, int, cred_t *);
119 static int mrsas_close(dev_t, int, int, cred_t *);
120 static int mrsas_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
121
122 static int mrsas_tran_tgt_init(dev_info_t *, dev_info_t *,
123 scsi_hba_tran_t *, struct scsi_device *);
124 static struct scsi_pkt *mrsas_tran_init_pkt(struct scsi_address *, register
125 struct scsi_pkt *, struct buf *, int, int, int, int,
126 int (*)(), caddr_t);
127 static int mrsas_tran_start(struct scsi_address *,
128 register struct scsi_pkt *);
129 static int mrsas_tran_abort(struct scsi_address *, struct scsi_pkt *);
130 static int mrsas_tran_reset(struct scsi_address *, int);
131 #if 0
132 static int mrsas_tran_bus_reset(dev_info_t *, int);
133 #endif
134 static int mrsas_tran_getcap(struct scsi_address *, char *, int);
135 static int mrsas_tran_setcap(struct scsi_address *, char *, int, int);
136 static void mrsas_tran_destroy_pkt(struct scsi_address *,
137 struct scsi_pkt *);
138 static void mrsas_tran_dmafree(struct scsi_address *, struct scsi_pkt *);
139 static void mrsas_tran_sync_pkt(struct scsi_address *, struct scsi_pkt *);
140 static int mrsas_tran_quiesce(dev_info_t *dip);
141 static int mrsas_tran_unquiesce(dev_info_t *dip);
142 static uint_t mrsas_isr();
143 static uint_t mrsas_softintr();
144 static void mrsas_undo_resources(dev_info_t *, struct mrsas_instance *);
145 static struct mrsas_cmd *get_mfi_pkt(struct mrsas_instance *);
146 static void return_mfi_pkt(struct mrsas_instance *,
147 struct mrsas_cmd *);
148
149 static void free_space_for_mfi(struct mrsas_instance *);
150 static uint32_t read_fw_status_reg_ppc(struct mrsas_instance *);
151 static void issue_cmd_ppc(struct mrsas_cmd *, struct mrsas_instance *);
152 static int issue_cmd_in_poll_mode_ppc(struct mrsas_instance *,
153 struct mrsas_cmd *);
154 static int issue_cmd_in_sync_mode_ppc(struct mrsas_instance *,
155 struct mrsas_cmd *);
156 static void enable_intr_ppc(struct mrsas_instance *);
157 static void disable_intr_ppc(struct mrsas_instance *);
158 static int intr_ack_ppc(struct mrsas_instance *);
159 static void flush_cache(struct mrsas_instance *instance);
160 void display_scsi_inquiry(caddr_t);
161 static int start_mfi_aen(struct mrsas_instance *instance);
162 static int handle_drv_ioctl(struct mrsas_instance *instance,
163 struct mrsas_ioctl *ioctl, int mode);
164 static int handle_mfi_ioctl(struct mrsas_instance *instance,
165 struct mrsas_ioctl *ioctl, int mode);
166 static int handle_mfi_aen(struct mrsas_instance *instance,
167 struct mrsas_aen *aen);
168 static struct mrsas_cmd *build_cmd(struct mrsas_instance *,
169 struct scsi_address *, struct scsi_pkt *, uchar_t *);
170 static int alloc_additional_dma_buffer(struct mrsas_instance *);
171 static void complete_cmd_in_sync_mode(struct mrsas_instance *,
172 struct mrsas_cmd *);
173 static int mrsas_kill_adapter(struct mrsas_instance *);
174 static int mrsas_issue_init_mfi(struct mrsas_instance *);
175 static int mrsas_reset_ppc(struct mrsas_instance *);
176 static uint32_t mrsas_initiate_ocr_if_fw_is_faulty(struct mrsas_instance *);
177 static int wait_for_outstanding(struct mrsas_instance *instance);
178 static int register_mfi_aen(struct mrsas_instance *instance,
179 uint32_t seq_num, uint32_t class_locale_word);
180 static int issue_mfi_pthru(struct mrsas_instance *instance, struct
181 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
182 static int issue_mfi_dcmd(struct mrsas_instance *instance, struct
183 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
184 static int issue_mfi_smp(struct mrsas_instance *instance, struct
185 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
186 static int issue_mfi_stp(struct mrsas_instance *instance, struct
187 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
188 static int abort_aen_cmd(struct mrsas_instance *instance,
189 struct mrsas_cmd *cmd_to_abort);
190
191 static void mrsas_rem_intrs(struct mrsas_instance *instance);
192 static int mrsas_add_intrs(struct mrsas_instance *instance, int intr_type);
193
194 static void mrsas_tran_tgt_free(dev_info_t *, dev_info_t *,
195 scsi_hba_tran_t *, struct scsi_device *);
196 static int mrsas_tran_bus_config(dev_info_t *, uint_t,
197 ddi_bus_config_op_t, void *, dev_info_t **);
198 static int mrsas_parse_devname(char *, int *, int *);
199 static int mrsas_config_all_devices(struct mrsas_instance *);
200 static int mrsas_config_ld(struct mrsas_instance *, uint16_t,
201 uint8_t, dev_info_t **);
202 static int mrsas_name_node(dev_info_t *, char *, int);
203 static void mrsas_issue_evt_taskq(struct mrsas_eventinfo *);
204 static void free_additional_dma_buffer(struct mrsas_instance *);
205 static void io_timeout_checker(void *);
206 static void mrsas_fm_init(struct mrsas_instance *);
207 static void mrsas_fm_fini(struct mrsas_instance *);
208
209 static struct mrsas_function_template mrsas_function_template_ppc = {
210 .read_fw_status_reg = read_fw_status_reg_ppc,
211 .issue_cmd = issue_cmd_ppc,
212 .issue_cmd_in_sync_mode = issue_cmd_in_sync_mode_ppc,
213 .issue_cmd_in_poll_mode = issue_cmd_in_poll_mode_ppc,
214 .enable_intr = enable_intr_ppc,
215 .disable_intr = disable_intr_ppc,
216 .intr_ack = intr_ack_ppc,
217 .init_adapter = mrsas_init_adapter_ppc
218 };
219
220
221 static struct mrsas_function_template mrsas_function_template_fusion = {
222 .read_fw_status_reg = tbolt_read_fw_status_reg,
223 .issue_cmd = tbolt_issue_cmd,
224 .issue_cmd_in_sync_mode = tbolt_issue_cmd_in_sync_mode,
225 .issue_cmd_in_poll_mode = tbolt_issue_cmd_in_poll_mode,
226 .enable_intr = tbolt_enable_intr,
227 .disable_intr = tbolt_disable_intr,
228 .intr_ack = tbolt_intr_ack,
229 .init_adapter = mrsas_init_adapter_tbolt
230 /* .reset_adapter = mrsas_reset_adapter_tbolt */
231 };
232
233
234 ddi_dma_attr_t mrsas_generic_dma_attr = {
235 DMA_ATTR_V0, /* dma_attr_version */
236 0, /* low DMA address range */
237 0xFFFFFFFFU, /* high DMA address range */
238 0xFFFFFFFFU, /* DMA counter register */
239 8, /* DMA address alignment */
240 0x07, /* DMA burstsizes */
241 1, /* min DMA size */
242 0xFFFFFFFFU, /* max DMA size */
243 0xFFFFFFFFU, /* segment boundary */
244 MRSAS_MAX_SGE_CNT, /* dma_attr_sglen */
245 512, /* granularity of device */
246 0 /* bus specific DMA flags */
247 };
248
249 int32_t mrsas_max_cap_maxxfer = 0x1000000;
250
251 /*
252 * Fix for: Thunderbolt controller IO timeout when IO write size is 1MEG,
253 * Limit size to 256K
254 */
255 uint32_t mrsas_tbolt_max_cap_maxxfer = (512 * 512);
256
257 /*
258 * cb_ops contains base level routines
259 */
260 static struct cb_ops mrsas_cb_ops = {
261 mrsas_open, /* open */
262 mrsas_close, /* close */
263 nodev, /* strategy */
264 nodev, /* print */
265 nodev, /* dump */
266 nodev, /* read */
267 nodev, /* write */
268 mrsas_ioctl, /* ioctl */
269 nodev, /* devmap */
270 nodev, /* mmap */
271 nodev, /* segmap */
272 nochpoll, /* poll */
273 nodev, /* cb_prop_op */
274 0, /* streamtab */
275 D_NEW | D_HOTPLUG, /* cb_flag */
276 CB_REV, /* cb_rev */
277 nodev, /* cb_aread */
278 nodev /* cb_awrite */
279 };
280
281 /*
282 * dev_ops contains configuration routines
283 */
284 static struct dev_ops mrsas_ops = {
285 DEVO_REV, /* rev, */
286 0, /* refcnt */
287 mrsas_getinfo, /* getinfo */
288 nulldev, /* identify */
289 nulldev, /* probe */
290 mrsas_attach, /* attach */
291 mrsas_detach, /* detach */
292 #ifdef __sparc
293 mrsas_reset, /* reset */
294 #else /* __sparc */
295 nodev,
296 #endif /* __sparc */
297 &mrsas_cb_ops, /* char/block ops */
298 NULL, /* bus ops */
299 NULL, /* power */
300 #ifdef __sparc
301 ddi_quiesce_not_needed
302 #else /* __sparc */
303 mrsas_quiesce /* quiesce */
304 #endif /* __sparc */
305 };
306
307 static struct modldrv modldrv = {
308 &mod_driverops, /* module type - driver */
309 MRSAS_VERSION,
310 &mrsas_ops, /* driver ops */
311 };
312
313 static struct modlinkage modlinkage = {
314 MODREV_1, /* ml_rev - must be MODREV_1 */
315 &modldrv, /* ml_linkage */
316 NULL /* end of driver linkage */
317 };
318
319 static struct ddi_device_acc_attr endian_attr = {
320 DDI_DEVICE_ATTR_V1,
321 DDI_STRUCTURE_LE_ACC,
322 DDI_STRICTORDER_ACC,
323 DDI_DEFAULT_ACC
324 };
325
326 /* Use the LSI Fast Path for the 2208 (tbolt) commands. */
327 unsigned int enable_fp = 1;
328
329
330 /*
331 * ************************************************************************** *
332 * *
333 * common entry points - for loadable kernel modules *
334 * *
335 * ************************************************************************** *
336 */
337
338 /*
339 * _init - initialize a loadable module
340 * @void
341 *
342 * The driver should perform any one-time resource allocation or data
343 * initialization during driver loading in _init(). For example, the driver
344 * should initialize any mutexes global to the driver in this routine.
345 * The driver should not, however, use _init() to allocate or initialize
346 * anything that has to do with a particular instance of the device.
347 * Per-instance initialization must be done in attach().
348 */
349 int
350 _init(void)
351 {
352 int ret;
353
354 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
355
356 ret = ddi_soft_state_init(&mrsas_state,
357 sizeof (struct mrsas_instance), 0);
358
359 if (ret != DDI_SUCCESS) {
360 cmn_err(CE_WARN, "mr_sas: could not init state");
361 return (ret);
362 }
363
364 if ((ret = scsi_hba_init(&modlinkage)) != DDI_SUCCESS) {
365 cmn_err(CE_WARN, "mr_sas: could not init scsi hba");
366 ddi_soft_state_fini(&mrsas_state);
367 return (ret);
368 }
369
370 ret = mod_install(&modlinkage);
371
372 if (ret != DDI_SUCCESS) {
373 cmn_err(CE_WARN, "mr_sas: mod_install failed");
374 scsi_hba_fini(&modlinkage);
375 ddi_soft_state_fini(&mrsas_state);
376 }
377
378 return (ret);
379 }
380
381 /*
382 * _info - returns information about a loadable module.
383 * @void
384 *
385 * _info() is called to return module information. This is a typical entry
386 * point that does predefined role. It simply calls mod_info().
387 */
388 int
389 _info(struct modinfo *modinfop)
390 {
391 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
392
393 return (mod_info(&modlinkage, modinfop));
394 }
395
396 /*
397 * _fini - prepare a loadable module for unloading
398 * @void
399 *
400 * In _fini(), the driver should release any resources that were allocated in
401 * _init(). The driver must remove itself from the system module list.
402 */
403 int
404 _fini(void)
405 {
406 int ret;
407
408 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
409
410 if ((ret = mod_remove(&modlinkage)) != DDI_SUCCESS) {
411 con_log(CL_ANN1,
412 (CE_WARN, "_fini: mod_remove() failed, error 0x%X", ret));
413 return (ret);
414 }
415
416 scsi_hba_fini(&modlinkage);
417 con_log(CL_DLEVEL1, (CE_NOTE, "_fini: scsi_hba_fini() done."));
418
419 ddi_soft_state_fini(&mrsas_state);
420 con_log(CL_DLEVEL1, (CE_NOTE, "_fini: ddi_soft_state_fini() done."));
421
422 return (ret);
423 }
424
425
426 /*
427 * ************************************************************************** *
428 * *
429 * common entry points - for autoconfiguration *
430 * *
431 * ************************************************************************** *
432 */
433 /*
434 * attach - adds a device to the system as part of initialization
435 * @dip:
436 * @cmd:
437 *
438 * The kernel calls a driver's attach() entry point to attach an instance of
439 * a device (for MegaRAID, it is instance of a controller) or to resume
440 * operation for an instance of a device that has been suspended or has been
441 * shut down by the power management framework
442 * The attach() entry point typically includes the following types of
443 * processing:
444 * - allocate a soft-state structure for the device instance (for MegaRAID,
445 * controller instance)
446 * - initialize per-instance mutexes
447 * - initialize condition variables
448 * - register the device's interrupts (for MegaRAID, controller's interrupts)
449 * - map the registers and memory of the device instance (for MegaRAID,
450 * controller instance)
451 * - create minor device nodes for the device instance (for MegaRAID,
452 * controller instance)
453 * - report that the device instance (for MegaRAID, controller instance) has
454 * attached
455 */
456 static int
457 mrsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
458 {
459 int instance_no;
460 int nregs;
461 int i = 0;
462 uint8_t irq;
463 uint16_t vendor_id;
464 uint16_t device_id;
465 uint16_t subsysvid;
466 uint16_t subsysid;
467 uint16_t command;
468 off_t reglength = 0;
469 int intr_types = 0;
470 char *data;
471
472 scsi_hba_tran_t *tran;
473 ddi_dma_attr_t tran_dma_attr;
474 struct mrsas_instance *instance;
475
476 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
477
478 /* CONSTCOND */
479 ASSERT(NO_COMPETING_THREADS);
480
481 instance_no = ddi_get_instance(dip);
482
483 /*
484 * check to see whether this device is in a DMA-capable slot.
485 */
486 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
487 cmn_err(CE_WARN,
488 "mr_sas%d: Device in slave-only slot, unused",
489 instance_no);
490 return (DDI_FAILURE);
491 }
492
493 switch (cmd) {
494 case DDI_ATTACH:
495 /* allocate the soft state for the instance */
496 if (ddi_soft_state_zalloc(mrsas_state, instance_no)
497 != DDI_SUCCESS) {
498 cmn_err(CE_WARN,
499 "mr_sas%d: Failed to allocate soft state",
500 instance_no);
501 return (DDI_FAILURE);
502 }
503
504 instance = (struct mrsas_instance *)ddi_get_soft_state
505 (mrsas_state, instance_no);
506
507 if (instance == NULL) {
508 cmn_err(CE_WARN,
509 "mr_sas%d: Bad soft state", instance_no);
510 ddi_soft_state_free(mrsas_state, instance_no);
511 return (DDI_FAILURE);
512 }
513
514 instance->unroll.softs = 1;
515
516 /* Setup the PCI configuration space handles */
517 if (pci_config_setup(dip, &instance->pci_handle) !=
518 DDI_SUCCESS) {
519 cmn_err(CE_WARN,
520 "mr_sas%d: pci config setup failed ",
521 instance_no);
522
523 ddi_soft_state_free(mrsas_state, instance_no);
524 return (DDI_FAILURE);
525 }
526
527 if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) {
528 cmn_err(CE_WARN,
529 "mr_sas: failed to get registers.");
530
531 pci_config_teardown(&instance->pci_handle);
532 ddi_soft_state_free(mrsas_state, instance_no);
533 return (DDI_FAILURE);
534 }
535
536 vendor_id = pci_config_get16(instance->pci_handle,
537 PCI_CONF_VENID);
538 device_id = pci_config_get16(instance->pci_handle,
539 PCI_CONF_DEVID);
540
541 subsysvid = pci_config_get16(instance->pci_handle,
542 PCI_CONF_SUBVENID);
543 subsysid = pci_config_get16(instance->pci_handle,
544 PCI_CONF_SUBSYSID);
545
546 pci_config_put16(instance->pci_handle, PCI_CONF_COMM,
547 (pci_config_get16(instance->pci_handle,
548 PCI_CONF_COMM) | PCI_COMM_ME));
549 irq = pci_config_get8(instance->pci_handle,
550 PCI_CONF_ILINE);
551
552 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: "
553 "0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s",
554 instance_no, vendor_id, device_id, subsysvid,
555 subsysid, irq, MRSAS_VERSION));
556
557 /* enable bus-mastering */
558 command = pci_config_get16(instance->pci_handle,
559 PCI_CONF_COMM);
560
561 if (!(command & PCI_COMM_ME)) {
562 command |= PCI_COMM_ME;
563
564 pci_config_put16(instance->pci_handle,
565 PCI_CONF_COMM, command);
566
567 con_log(CL_ANN, (CE_CONT, "mr_sas%d: "
568 "enable bus-mastering", instance_no));
569 } else {
570 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: "
571 "bus-mastering already set", instance_no));
572 }
573
574 /* initialize function pointers */
575 switch (device_id) {
576 case PCI_DEVICE_ID_LSI_TBOLT:
577 case PCI_DEVICE_ID_LSI_INVADER:
578 con_log(CL_ANN, (CE_NOTE,
579 "mr_sas: 2208 T.B. device detected"));
580
581 instance->func_ptr =
582 &mrsas_function_template_fusion;
583 instance->tbolt = 1;
584 break;
585
586 case PCI_DEVICE_ID_LSI_2108VDE:
587 case PCI_DEVICE_ID_LSI_2108V:
588 con_log(CL_ANN, (CE_NOTE,
589 "mr_sas: 2108 Liberator device detected"));
590
591 instance->func_ptr =
592 &mrsas_function_template_ppc;
593 break;
594
595 default:
596 cmn_err(CE_WARN,
597 "mr_sas: Invalid device detected");
598
599 pci_config_teardown(&instance->pci_handle);
600 ddi_soft_state_free(mrsas_state, instance_no);
601 return (DDI_FAILURE);
602 }
603
604 instance->baseaddress = pci_config_get32(
605 instance->pci_handle, PCI_CONF_BASE0);
606 instance->baseaddress &= 0x0fffc;
607
608 instance->dip = dip;
609 instance->vendor_id = vendor_id;
610 instance->device_id = device_id;
611 instance->subsysvid = subsysvid;
612 instance->subsysid = subsysid;
613 instance->instance = instance_no;
614
615 /* Initialize FMA */
616 instance->fm_capabilities = ddi_prop_get_int(
617 DDI_DEV_T_ANY, instance->dip, DDI_PROP_DONTPASS,
618 "fm-capable", DDI_FM_EREPORT_CAPABLE |
619 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE
620 | DDI_FM_ERRCB_CAPABLE);
621
622 mrsas_fm_init(instance);
623
624 /* Setup register map */
625 if ((ddi_dev_regsize(instance->dip,
626 REGISTER_SET_IO_2108, ®length) != DDI_SUCCESS) ||
627 reglength < MINIMUM_MFI_MEM_SZ) {
628 goto fail_attach;
629 }
630 if (reglength > DEFAULT_MFI_MEM_SZ) {
631 reglength = DEFAULT_MFI_MEM_SZ;
632 con_log(CL_DLEVEL1, (CE_NOTE,
633 "mr_sas: register length to map is 0x%lx bytes",
634 reglength));
635 }
636 if (ddi_regs_map_setup(instance->dip,
637 REGISTER_SET_IO_2108, &instance->regmap, 0,
638 reglength, &endian_attr, &instance->regmap_handle)
639 != DDI_SUCCESS) {
640 cmn_err(CE_WARN,
641 "mr_sas: couldn't map control registers");
642 goto fail_attach;
643 }
644
645 instance->unroll.regs = 1;
646
647 /*
648 * Disable Interrupt Now.
649 * Setup Software interrupt
650 */
651 instance->func_ptr->disable_intr(instance);
652
653 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
654 "mrsas-enable-msi", &data) == DDI_SUCCESS) {
655 if (strncmp(data, "no", 3) == 0) {
656 msi_enable = 0;
657 con_log(CL_ANN1, (CE_WARN,
658 "msi_enable = %d disabled", msi_enable));
659 }
660 ddi_prop_free(data);
661 }
662
663 con_log(CL_DLEVEL1, (CE_NOTE, "msi_enable = %d", msi_enable));
664
665 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
666 "mrsas-enable-fp", &data) == DDI_SUCCESS) {
667 if (strncmp(data, "no", 3) == 0) {
668 enable_fp = 0;
669 cmn_err(CE_NOTE,
670 "enable_fp = %d, Fast-Path disabled.\n",
671 enable_fp);
672 }
673
674 ddi_prop_free(data);
675 }
676
677 cmn_err(CE_NOTE, "enable_fp = %d\n", enable_fp);
678
679 /* Check for all supported interrupt types */
680 if (ddi_intr_get_supported_types(
681 dip, &intr_types) != DDI_SUCCESS) {
682 cmn_err(CE_WARN,
683 "ddi_intr_get_supported_types() failed");
684 goto fail_attach;
685 }
686
687 con_log(CL_DLEVEL1, (CE_NOTE,
688 "ddi_intr_get_supported_types() ret: 0x%x", intr_types));
689
690 /* Initialize and Setup Interrupt handler */
691 if (msi_enable && (intr_types & DDI_INTR_TYPE_MSIX)) {
692 if (mrsas_add_intrs(instance, DDI_INTR_TYPE_MSIX) !=
693 DDI_SUCCESS) {
694 cmn_err(CE_WARN,
695 "MSIX interrupt query failed");
696 goto fail_attach;
697 }
698 instance->intr_type = DDI_INTR_TYPE_MSIX;
699 } else if (msi_enable && (intr_types & DDI_INTR_TYPE_MSI)) {
700 if (mrsas_add_intrs(instance, DDI_INTR_TYPE_MSI) !=
701 DDI_SUCCESS) {
702 cmn_err(CE_WARN,
703 "MSI interrupt query failed");
704 goto fail_attach;
705 }
706 instance->intr_type = DDI_INTR_TYPE_MSI;
707 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
708 msi_enable = 0;
709 if (mrsas_add_intrs(instance, DDI_INTR_TYPE_FIXED) !=
710 DDI_SUCCESS) {
711 cmn_err(CE_WARN,
712 "FIXED interrupt query failed");
713 goto fail_attach;
714 }
715 instance->intr_type = DDI_INTR_TYPE_FIXED;
716 } else {
717 cmn_err(CE_WARN, "Device cannot "
718 "suppport either FIXED or MSI/X "
719 "interrupts");
720 goto fail_attach;
721 }
722
723 instance->unroll.intr = 1;
724
725 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
726 "mrsas-enable-ctio", &data) == DDI_SUCCESS) {
727 if (strncmp(data, "no", 3) == 0) {
728 ctio_enable = 0;
729 con_log(CL_ANN1, (CE_WARN,
730 "ctio_enable = %d disabled", ctio_enable));
731 }
732 ddi_prop_free(data);
733 }
734
735 con_log(CL_DLEVEL1, (CE_WARN, "ctio_enable = %d", ctio_enable));
736
737 /* setup the mfi based low level driver */
738 if (mrsas_init_adapter(instance) != DDI_SUCCESS) {
739 cmn_err(CE_WARN, "mr_sas: "
740 "could not initialize the low level driver");
741
742 goto fail_attach;
743 }
744
745 /* Initialize all Mutex */
746 INIT_LIST_HEAD(&instance->completed_pool_list);
747 mutex_init(&instance->completed_pool_mtx,
748 "completed_pool_mtx", MUTEX_DRIVER,
749 DDI_INTR_PRI(instance->intr_pri));
750
751 mutex_init(&instance->sync_map_mtx,
752 "sync_map_mtx", MUTEX_DRIVER,
753 DDI_INTR_PRI(instance->intr_pri));
754
755 mutex_init(&instance->app_cmd_pool_mtx,
756 "app_cmd_pool_mtx", MUTEX_DRIVER,
757 DDI_INTR_PRI(instance->intr_pri));
758
759 mutex_init(&instance->config_dev_mtx, "config_dev_mtx",
760 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
761
762 mutex_init(&instance->cmd_pend_mtx, "cmd_pend_mtx",
763 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
764
765 mutex_init(&instance->ocr_flags_mtx, "ocr_flags_mtx",
766 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
767
768 mutex_init(&instance->int_cmd_mtx, "int_cmd_mtx",
769 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
770 cv_init(&instance->int_cmd_cv, NULL, CV_DRIVER, NULL);
771
772 mutex_init(&instance->cmd_pool_mtx, "cmd_pool_mtx",
773 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
774
775 mutex_init(&instance->reg_write_mtx, "reg_write_mtx",
776 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
777
778 if (instance->tbolt) {
779 mutex_init(&instance->cmd_app_pool_mtx,
780 "cmd_app_pool_mtx", MUTEX_DRIVER,
781 DDI_INTR_PRI(instance->intr_pri));
782
783 mutex_init(&instance->chip_mtx,
784 "chip_mtx", MUTEX_DRIVER,
785 DDI_INTR_PRI(instance->intr_pri));
786
787 }
788
789 instance->unroll.mutexs = 1;
790
791 instance->timeout_id = (timeout_id_t)-1;
792
793 /* Register our soft-isr for highlevel interrupts. */
794 instance->isr_level = instance->intr_pri;
795 if (!(instance->tbolt)) {
796 if (instance->isr_level == HIGH_LEVEL_INTR) {
797 if (ddi_add_softintr(dip,
798 DDI_SOFTINT_HIGH,
799 &instance->soft_intr_id, NULL, NULL,
800 mrsas_softintr, (caddr_t)instance) !=
801 DDI_SUCCESS) {
802 cmn_err(CE_WARN,
803 "Software ISR did not register");
804
805 goto fail_attach;
806 }
807
808 instance->unroll.soft_isr = 1;
809
810 }
811 }
812
813 instance->softint_running = 0;
814
815 /* Allocate a transport structure */
816 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
817
818 if (tran == NULL) {
819 cmn_err(CE_WARN,
820 "scsi_hba_tran_alloc failed");
821 goto fail_attach;
822 }
823
824 instance->tran = tran;
825 instance->unroll.tran = 1;
826
827 tran->tran_hba_private = instance;
828 tran->tran_tgt_init = mrsas_tran_tgt_init;
829 tran->tran_tgt_probe = scsi_hba_probe;
830 tran->tran_tgt_free = mrsas_tran_tgt_free;
831 if (instance->tbolt) {
832 tran->tran_init_pkt =
833 mrsas_tbolt_tran_init_pkt;
834 tran->tran_start =
835 mrsas_tbolt_tran_start;
836 } else {
837 tran->tran_init_pkt = mrsas_tran_init_pkt;
838 tran->tran_start = mrsas_tran_start;
839 }
840 tran->tran_abort = mrsas_tran_abort;
841 tran->tran_reset = mrsas_tran_reset;
842 tran->tran_getcap = mrsas_tran_getcap;
843 tran->tran_setcap = mrsas_tran_setcap;
844 tran->tran_destroy_pkt = mrsas_tran_destroy_pkt;
845 tran->tran_dmafree = mrsas_tran_dmafree;
846 tran->tran_sync_pkt = mrsas_tran_sync_pkt;
847 tran->tran_quiesce = mrsas_tran_quiesce;
848 tran->tran_unquiesce = mrsas_tran_unquiesce;
849 tran->tran_bus_config = mrsas_tran_bus_config;
850
851 if (mrsas_relaxed_ordering)
852 mrsas_generic_dma_attr.dma_attr_flags |=
853 DDI_DMA_RELAXED_ORDERING;
854
855
856 tran_dma_attr = mrsas_generic_dma_attr;
857 tran_dma_attr.dma_attr_sgllen = instance->max_num_sge;
858
859 /* Attach this instance of the hba */
860 if (scsi_hba_attach_setup(dip, &tran_dma_attr, tran, 0)
861 != DDI_SUCCESS) {
862 cmn_err(CE_WARN,
863 "scsi_hba_attach failed");
864
865 goto fail_attach;
866 }
867 instance->unroll.tranSetup = 1;
868 con_log(CL_ANN1,
869 (CE_CONT, "scsi_hba_attach_setup() done."));
870
871 /* create devctl node for cfgadm command */
872 if (ddi_create_minor_node(dip, "devctl",
873 S_IFCHR, INST2DEVCTL(instance_no),
874 DDI_NT_SCSI_NEXUS, 0) == DDI_FAILURE) {
875 cmn_err(CE_WARN,
876 "mr_sas: failed to create devctl node.");
877
878 goto fail_attach;
879 }
880
881 instance->unroll.devctl = 1;
882
883 /* create scsi node for cfgadm command */
884 if (ddi_create_minor_node(dip, "scsi", S_IFCHR,
885 INST2SCSI(instance_no), DDI_NT_SCSI_ATTACHMENT_POINT, 0) ==
886 DDI_FAILURE) {
887 cmn_err(CE_WARN,
888 "mr_sas: failed to create scsi node.");
889
890 goto fail_attach;
891 }
892
893 instance->unroll.scsictl = 1;
894
895 (void) sprintf(instance->iocnode, "%d:lsirdctl",
896 instance_no);
897
898 /*
899 * Create a node for applications
900 * for issuing ioctl to the driver.
901 */
902 if (ddi_create_minor_node(dip, instance->iocnode,
903 S_IFCHR, INST2LSIRDCTL(instance_no), DDI_PSEUDO, 0) ==
904 DDI_FAILURE) {
905 cmn_err(CE_WARN,
906 "mr_sas: failed to create ioctl node.");
907
908 goto fail_attach;
909 }
910
911 instance->unroll.ioctl = 1;
912
913 /* Create a taskq to handle dr events */
914 if ((instance->taskq = ddi_taskq_create(dip,
915 "mrsas_dr_taskq", 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
916 cmn_err(CE_WARN,
917 "mr_sas: failed to create taskq ");
918 instance->taskq = NULL;
919 goto fail_attach;
920 }
921 instance->unroll.taskq = 1;
922 con_log(CL_ANN1, (CE_CONT, "ddi_taskq_create() done."));
923
924 /* enable interrupt */
925 instance->func_ptr->enable_intr(instance);
926
927 /* initiate AEN */
928 if (start_mfi_aen(instance)) {
929 cmn_err(CE_WARN,
930 "mr_sas: failed to initiate AEN.");
931 goto fail_attach;
932 }
933 instance->unroll.aenPend = 1;
934 con_log(CL_ANN1,
935 (CE_CONT, "AEN started for instance %d.", instance_no));
936
937 /* Finally! We are on the air. */
938 ddi_report_dev(dip);
939
940 /* FMA handle checking. */
941 if (mrsas_check_acc_handle(instance->regmap_handle) !=
942 DDI_SUCCESS) {
943 goto fail_attach;
944 }
945 if (mrsas_check_acc_handle(instance->pci_handle) !=
946 DDI_SUCCESS) {
947 goto fail_attach;
948 }
949
950 instance->mr_ld_list =
951 kmem_zalloc(MRDRV_MAX_LD * sizeof (struct mrsas_ld),
952 KM_SLEEP);
953 if (instance->mr_ld_list == NULL) {
954 cmn_err(CE_WARN, "mr_sas attach(): "
955 "failed to allocate ld_list array");
956 goto fail_attach;
957 }
958 instance->unroll.ldlist_buff = 1;
959
960 #ifdef PDSUPPORT
961 if (instance->tbolt) {
962 instance->mr_tbolt_pd_max = MRSAS_TBOLT_PD_TGT_MAX;
963 instance->mr_tbolt_pd_list =
964 kmem_zalloc(MRSAS_TBOLT_GET_PD_MAX(instance) *
965 sizeof (struct mrsas_tbolt_pd), KM_SLEEP);
966 ASSERT(instance->mr_tbolt_pd_list);
967 for (i = 0; i < instance->mr_tbolt_pd_max; i++) {
968 instance->mr_tbolt_pd_list[i].lun_type =
969 MRSAS_TBOLT_PD_LUN;
970 instance->mr_tbolt_pd_list[i].dev_id =
971 (uint8_t)i;
972 }
973
974 instance->unroll.pdlist_buff = 1;
975 }
976 #endif
977 break;
978 case DDI_PM_RESUME:
979 con_log(CL_ANN, (CE_NOTE, "mr_sas: DDI_PM_RESUME"));
980 break;
981 case DDI_RESUME:
982 con_log(CL_ANN, (CE_NOTE, "mr_sas: DDI_RESUME"));
983 break;
984 default:
985 con_log(CL_ANN,
986 (CE_WARN, "mr_sas: invalid attach cmd=%x", cmd));
987 return (DDI_FAILURE);
988 }
989
990
991 cmn_err(CE_NOTE, "mrsas_attach() return SUCCESS instance_num %d",
992 instance_no);
993 return (DDI_SUCCESS);
994
995 fail_attach:
996
997 mrsas_undo_resources(dip, instance);
998
999 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
1000 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
1001
1002 mrsas_fm_fini(instance);
1003
1004 pci_config_teardown(&instance->pci_handle);
1005 ddi_soft_state_free(mrsas_state, instance_no);
1006
1007 con_log(CL_ANN, (CE_WARN, "mr_sas: return failure from mrsas_attach"));
1008
1009 cmn_err(CE_WARN, "mrsas_attach() return FAILURE instance_num %d",
1010 instance_no);
1011
1012 return (DDI_FAILURE);
1013 }
1014
1015 /*
1016 * getinfo - gets device information
1017 * @dip:
1018 * @cmd:
1019 * @arg:
1020 * @resultp:
1021 *
1022 * The system calls getinfo() to obtain configuration information that only
1023 * the driver knows. The mapping of minor numbers to device instance is
1024 * entirely under the control of the driver. The system sometimes needs to ask
1025 * the driver which device a particular dev_t represents.
1026 * Given the device number return the devinfo pointer from the scsi_device
1027 * structure.
1028 */
1029 /*ARGSUSED*/
1030 static int
1031 mrsas_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
1032 {
1033 int rval;
1034 int mrsas_minor = getminor((dev_t)arg);
1035
1036 struct mrsas_instance *instance;
1037
1038 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1039
1040 switch (cmd) {
1041 case DDI_INFO_DEVT2DEVINFO:
1042 instance = (struct mrsas_instance *)
1043 ddi_get_soft_state(mrsas_state,
1044 MINOR2INST(mrsas_minor));
1045
1046 if (instance == NULL) {
1047 *resultp = NULL;
1048 rval = DDI_FAILURE;
1049 } else {
1050 *resultp = instance->dip;
1051 rval = DDI_SUCCESS;
1052 }
1053 break;
1054 case DDI_INFO_DEVT2INSTANCE:
1055 *resultp = (void *)(intptr_t)
1056 (MINOR2INST(getminor((dev_t)arg)));
1057 rval = DDI_SUCCESS;
1058 break;
1059 default:
1060 *resultp = NULL;
1061 rval = DDI_FAILURE;
1062 }
1063
1064 return (rval);
1065 }
1066
1067 /*
1068 * detach - detaches a device from the system
1069 * @dip: pointer to the device's dev_info structure
1070 * @cmd: type of detach
1071 *
1072 * A driver's detach() entry point is called to detach an instance of a device
1073 * that is bound to the driver. The entry point is called with the instance of
1074 * the device node to be detached and with DDI_DETACH, which is specified as
1075 * the cmd argument to the entry point.
1076 * This routine is called during driver unload. We free all the allocated
1077 * resources and call the corresponding LLD so that it can also release all
1078 * its resources.
1079 */
1080 static int
1081 mrsas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1082 {
1083 int instance_no;
1084
1085 struct mrsas_instance *instance;
1086
1087 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1088
1089
1090 /* CONSTCOND */
1091 ASSERT(NO_COMPETING_THREADS);
1092
1093 instance_no = ddi_get_instance(dip);
1094
1095 instance = (struct mrsas_instance *)ddi_get_soft_state(mrsas_state,
1096 instance_no);
1097
1098 if (!instance) {
1099 cmn_err(CE_WARN,
1100 "mr_sas:%d could not get instance in detach",
1101 instance_no);
1102
1103 return (DDI_FAILURE);
1104 }
1105
1106 con_log(CL_ANN, (CE_NOTE,
1107 "mr_sas%d: detaching device 0x%4x:0x%4x:0x%4x:0x%4x",
1108 instance_no, instance->vendor_id, instance->device_id,
1109 instance->subsysvid, instance->subsysid));
1110
1111 switch (cmd) {
1112 case DDI_DETACH:
1113 con_log(CL_ANN, (CE_NOTE,
1114 "mrsas_detach: DDI_DETACH"));
1115
1116 mutex_enter(&instance->config_dev_mtx);
1117 if (instance->timeout_id != (timeout_id_t)-1) {
1118 mutex_exit(&instance->config_dev_mtx);
1119 (void) untimeout(instance->timeout_id);
1120 instance->timeout_id = (timeout_id_t)-1;
1121 mutex_enter(&instance->config_dev_mtx);
1122 instance->unroll.timer = 0;
1123 }
1124 mutex_exit(&instance->config_dev_mtx);
1125
1126 if (instance->unroll.tranSetup == 1) {
1127 if (scsi_hba_detach(dip) != DDI_SUCCESS) {
1128 cmn_err(CE_WARN,
1129 "mr_sas2%d: failed to detach",
1130 instance_no);
1131 return (DDI_FAILURE);
1132 }
1133 instance->unroll.tranSetup = 0;
1134 con_log(CL_ANN1,
1135 (CE_CONT, "scsi_hba_dettach() done."));
1136 }
1137
1138 flush_cache(instance);
1139
1140 mrsas_undo_resources(dip, instance);
1141
1142 mrsas_fm_fini(instance);
1143
1144 pci_config_teardown(&instance->pci_handle);
1145 ddi_soft_state_free(mrsas_state, instance_no);
1146 break;
1147
1148 case DDI_PM_SUSPEND:
1149 con_log(CL_ANN, (CE_NOTE,
1150 "mrsas_detach: DDI_PM_SUSPEND"));
1151
1152 break;
1153 case DDI_SUSPEND:
1154 con_log(CL_ANN, (CE_NOTE,
1155 "mrsas_detach: DDI_SUSPEND"));
1156
1157 break;
1158 default:
1159 con_log(CL_ANN, (CE_WARN,
1160 "invalid detach command:0x%x", cmd));
1161 return (DDI_FAILURE);
1162 }
1163
1164 return (DDI_SUCCESS);
1165 }
1166
1167
1168 static void
1169 mrsas_undo_resources(dev_info_t *dip, struct mrsas_instance *instance)
1170 {
1171 int instance_no;
1172
1173 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1174
1175
1176 instance_no = ddi_get_instance(dip);
1177
1178
1179 if (instance->unroll.ioctl == 1) {
1180 ddi_remove_minor_node(dip, instance->iocnode);
1181 instance->unroll.ioctl = 0;
1182 }
1183
1184 if (instance->unroll.scsictl == 1) {
1185 ddi_remove_minor_node(dip, "scsi");
1186 instance->unroll.scsictl = 0;
1187 }
1188
1189 if (instance->unroll.devctl == 1) {
1190 ddi_remove_minor_node(dip, "devctl");
1191 instance->unroll.devctl = 0;
1192 }
1193
1194 if (instance->unroll.tranSetup == 1) {
1195 if (scsi_hba_detach(dip) != DDI_SUCCESS) {
1196 cmn_err(CE_WARN,
1197 "mr_sas2%d: failed to detach", instance_no);
1198 return; /* DDI_FAILURE */
1199 }
1200 instance->unroll.tranSetup = 0;
1201 con_log(CL_ANN1, (CE_CONT, "scsi_hba_dettach() done."));
1202 }
1203
1204 if (instance->unroll.tran == 1) {
1205 scsi_hba_tran_free(instance->tran);
1206 instance->unroll.tran = 0;
1207 con_log(CL_ANN1, (CE_CONT, "scsi_hba_tran_free() done."));
1208 }
1209
1210 if (instance->unroll.syncCmd == 1) {
1211 if (instance->tbolt) {
1212 if (abort_syncmap_cmd(instance,
1213 instance->map_update_cmd)) {
1214 cmn_err(CE_WARN, "mrsas_detach: "
1215 "failed to abort previous syncmap command");
1216 }
1217
1218 instance->unroll.syncCmd = 0;
1219 con_log(CL_ANN1, (CE_CONT, "sync cmd aborted, done."));
1220 }
1221 }
1222
1223 if (instance->unroll.aenPend == 1) {
1224 if (abort_aen_cmd(instance, instance->aen_cmd))
1225 cmn_err(CE_WARN, "mrsas_detach: "
1226 "failed to abort prevous AEN command");
1227
1228 instance->unroll.aenPend = 0;
1229 con_log(CL_ANN1, (CE_CONT, "aen cmd aborted, done."));
1230 /* This means the controller is fully initialized and running */
1231 /* Shutdown should be a last command to controller. */
1232 /* shutdown_controller(); */
1233 }
1234
1235
1236 if (instance->unroll.timer == 1) {
1237 if (instance->timeout_id != (timeout_id_t)-1) {
1238 (void) untimeout(instance->timeout_id);
1239 instance->timeout_id = (timeout_id_t)-1;
1240
1241 instance->unroll.timer = 0;
1242 }
1243 }
1244
1245 instance->func_ptr->disable_intr(instance);
1246
1247
1248 if (instance->unroll.mutexs == 1) {
1249 mutex_destroy(&instance->cmd_pool_mtx);
1250 mutex_destroy(&instance->app_cmd_pool_mtx);
1251 mutex_destroy(&instance->cmd_pend_mtx);
1252 mutex_destroy(&instance->completed_pool_mtx);
1253 mutex_destroy(&instance->sync_map_mtx);
1254 mutex_destroy(&instance->int_cmd_mtx);
1255 cv_destroy(&instance->int_cmd_cv);
1256 mutex_destroy(&instance->config_dev_mtx);
1257 mutex_destroy(&instance->ocr_flags_mtx);
1258 mutex_destroy(&instance->reg_write_mtx);
1259
1260 if (instance->tbolt) {
1261 mutex_destroy(&instance->cmd_app_pool_mtx);
1262 mutex_destroy(&instance->chip_mtx);
1263 }
1264
1265 instance->unroll.mutexs = 0;
1266 con_log(CL_ANN1, (CE_CONT, "Destroy mutex & cv, done."));
1267 }
1268
1269
1270 if (instance->unroll.soft_isr == 1) {
1271 ddi_remove_softintr(instance->soft_intr_id);
1272 instance->unroll.soft_isr = 0;
1273 }
1274
1275 if (instance->unroll.intr == 1) {
1276 mrsas_rem_intrs(instance);
1277 instance->unroll.intr = 0;
1278 }
1279
1280
1281 if (instance->unroll.taskq == 1) {
1282 if (instance->taskq) {
1283 ddi_taskq_destroy(instance->taskq);
1284 instance->unroll.taskq = 0;
1285 }
1286
1287 }
1288
1289 /*
1290 * free dma memory allocated for
1291 * cmds/frames/queues/driver version etc
1292 */
1293 if (instance->unroll.verBuff == 1) {
1294 (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1295 instance->unroll.verBuff = 0;
1296 }
1297
1298 if (instance->unroll.pdlist_buff == 1) {
1299 if (instance->mr_tbolt_pd_list != NULL) {
1300 kmem_free(instance->mr_tbolt_pd_list,
1301 MRSAS_TBOLT_GET_PD_MAX(instance) *
1302 sizeof (struct mrsas_tbolt_pd));
1303 }
1304
1305 instance->mr_tbolt_pd_list = NULL;
1306 instance->unroll.pdlist_buff = 0;
1307 }
1308
1309 if (instance->unroll.ldlist_buff == 1) {
1310 if (instance->mr_ld_list != NULL) {
1311 kmem_free(instance->mr_ld_list, MRDRV_MAX_LD
1312 * sizeof (struct mrsas_ld));
1313 }
1314
1315 instance->mr_ld_list = NULL;
1316 instance->unroll.ldlist_buff = 0;
1317 }
1318
1319 if (instance->tbolt) {
1320 if (instance->unroll.alloc_space_mpi2 == 1) {
1321 free_space_for_mpi2(instance);
1322 instance->unroll.alloc_space_mpi2 = 0;
1323 }
1324 } else {
1325 if (instance->unroll.alloc_space_mfi == 1) {
1326 free_space_for_mfi(instance);
1327 instance->unroll.alloc_space_mfi = 0;
1328 }
1329 }
1330
1331 if (instance->unroll.regs == 1) {
1332 ddi_regs_map_free(&instance->regmap_handle);
1333 instance->unroll.regs = 0;
1334 con_log(CL_ANN1, (CE_CONT, "ddi_regs_map_free() done."));
1335 }
1336 }
1337
1338
1339
1340 /*
1341 * ************************************************************************** *
1342 * *
1343 * common entry points - for character driver types *
1344 * *
1345 * ************************************************************************** *
1346 */
1347 /*
1348 * open - gets access to a device
1349 * @dev:
1350 * @openflags:
1351 * @otyp:
1352 * @credp:
1353 *
1354 * Access to a device by one or more application programs is controlled
1355 * through the open() and close() entry points. The primary function of
1356 * open() is to verify that the open request is allowed.
1357 */
1358 static int
1359 mrsas_open(dev_t *dev, int openflags, int otyp, cred_t *credp)
1360 {
1361 int rval = 0;
1362
1363 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1364
1365 /* Check root permissions */
1366 if (drv_priv(credp) != 0) {
1367 con_log(CL_ANN, (CE_WARN,
1368 "mr_sas: Non-root ioctl access denied!"));
1369 return (EPERM);
1370 }
1371
1372 /* Verify we are being opened as a character device */
1373 if (otyp != OTYP_CHR) {
1374 con_log(CL_ANN, (CE_WARN,
1375 "mr_sas: ioctl node must be a char node"));
1376 return (EINVAL);
1377 }
1378
1379 if (ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(*dev)))
1380 == NULL) {
1381 return (ENXIO);
1382 }
1383
1384 if (scsi_hba_open) {
1385 rval = scsi_hba_open(dev, openflags, otyp, credp);
1386 }
1387
1388 return (rval);
1389 }
1390
1391 /*
1392 * close - gives up access to a device
1393 * @dev:
1394 * @openflags:
1395 * @otyp:
1396 * @credp:
1397 *
1398 * close() should perform any cleanup necessary to finish using the minor
1399 * device, and prepare the device (and driver) to be opened again.
1400 */
1401 static int
1402 mrsas_close(dev_t dev, int openflags, int otyp, cred_t *credp)
1403 {
1404 int rval = 0;
1405
1406 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1407
1408 /* no need for locks! */
1409
1410 if (scsi_hba_close) {
1411 rval = scsi_hba_close(dev, openflags, otyp, credp);
1412 }
1413
1414 return (rval);
1415 }
1416
1417 /*
1418 * ioctl - performs a range of I/O commands for character drivers
1419 * @dev:
1420 * @cmd:
1421 * @arg:
1422 * @mode:
1423 * @credp:
1424 * @rvalp:
1425 *
1426 * ioctl() routine must make sure that user data is copied into or out of the
1427 * kernel address space explicitly using copyin(), copyout(), ddi_copyin(),
1428 * and ddi_copyout(), as appropriate.
1429 * This is a wrapper routine to serialize access to the actual ioctl routine.
1430 * ioctl() should return 0 on success, or the appropriate error number. The
1431 * driver may also set the value returned to the calling process through rvalp.
1432 */
1433
1434 static int
1435 mrsas_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
1436 int *rvalp)
1437 {
1438 int rval = 0;
1439
1440 struct mrsas_instance *instance;
1441 struct mrsas_ioctl *ioctl;
1442 struct mrsas_aen aen;
1443 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1444
1445 instance = ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(dev)));
1446
1447 if (instance == NULL) {
1448 /* invalid minor number */
1449 con_log(CL_ANN, (CE_WARN, "mr_sas: adapter not found."));
1450 return (ENXIO);
1451 }
1452
1453 ioctl = (struct mrsas_ioctl *)kmem_zalloc(sizeof (struct mrsas_ioctl),
1454 KM_SLEEP);
1455 if (ioctl == NULL) {
1456 /* Failed to allocate memory for ioctl */
1457 con_log(CL_ANN, (CE_WARN, "mr_sas_ioctl: "
1458 "failed to allocate memory for ioctl"));
1459 return (ENOMEM);
1460 }
1461
1462 switch ((uint_t)cmd) {
1463 case MRSAS_IOCTL_FIRMWARE:
1464 if (ddi_copyin((void *)arg, ioctl,
1465 sizeof (struct mrsas_ioctl), mode)) {
1466 con_log(CL_ANN, (CE_WARN, "mrsas_ioctl: "
1467 "ERROR IOCTL copyin"));
1468 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1469 return (EFAULT);
1470 }
1471
1472 if (ioctl->control_code == MRSAS_DRIVER_IOCTL_COMMON) {
1473 rval = handle_drv_ioctl(instance, ioctl, mode);
1474 } else {
1475 rval = handle_mfi_ioctl(instance, ioctl, mode);
1476 }
1477
1478 if (ddi_copyout((void *)ioctl, (void *)arg,
1479 (sizeof (struct mrsas_ioctl) - 1), mode)) {
1480 con_log(CL_ANN, (CE_WARN,
1481 "mrsas_ioctl: copy_to_user failed"));
1482 rval = 1;
1483 }
1484
1485 break;
1486 case MRSAS_IOCTL_AEN:
1487 con_log(CL_ANN,
1488 (CE_NOTE, "mrsas_ioctl: IOCTL Register AEN.\n"));
1489
1490 if (ddi_copyin((void *) arg, &aen,
1491 sizeof (struct mrsas_aen), mode)) {
1492 con_log(CL_ANN, (CE_WARN,
1493 "mrsas_ioctl: ERROR AEN copyin"));
1494 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1495 return (EFAULT);
1496 }
1497
1498 rval = handle_mfi_aen(instance, &aen);
1499
1500 if (ddi_copyout((void *) &aen, (void *)arg,
1501 sizeof (struct mrsas_aen), mode)) {
1502 con_log(CL_ANN, (CE_WARN,
1503 "mrsas_ioctl: copy_to_user failed"));
1504 rval = 1;
1505 }
1506
1507 break;
1508 default:
1509 rval = scsi_hba_ioctl(dev, cmd, arg,
1510 mode, credp, rvalp);
1511
1512 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_ioctl: "
1513 "scsi_hba_ioctl called, ret = %x.", rval));
1514 }
1515
1516 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1517 return (rval);
1518 }
1519
1520 /*
1521 * ************************************************************************** *
1522 * *
1523 * common entry points - for block driver types *
1524 * *
1525 * ************************************************************************** *
1526 */
1527 #ifdef __sparc
1528 /*
1529 * reset - TBD
1530 * @dip:
1531 * @cmd:
1532 *
1533 * TBD
1534 */
1535 /*ARGSUSED*/
1536 static int
1537 mrsas_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
1538 {
1539 int instance_no;
1540
1541 struct mrsas_instance *instance;
1542
1543 instance_no = ddi_get_instance(dip);
1544 instance = (struct mrsas_instance *)ddi_get_soft_state
1545 (mrsas_state, instance_no);
1546
1547 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1548
1549 if (!instance) {
1550 con_log(CL_ANN, (CE_WARN, "mr_sas:%d could not get adapter "
1551 "in reset", instance_no));
1552 return (DDI_FAILURE);
1553 }
1554
1555 instance->func_ptr->disable_intr(instance);
1556
1557 con_log(CL_ANN1, (CE_CONT, "flushing cache for instance %d",
1558 instance_no));
1559
1560 flush_cache(instance);
1561
1562 return (DDI_SUCCESS);
1563 }
1564 #else /* __sparc */
1565 /*ARGSUSED*/
1566 static int
1567 mrsas_quiesce(dev_info_t *dip)
1568 {
1569 int instance_no;
1570
1571 struct mrsas_instance *instance;
1572
1573 instance_no = ddi_get_instance(dip);
1574 instance = (struct mrsas_instance *)ddi_get_soft_state
1575 (mrsas_state, instance_no);
1576
1577 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1578
1579 if (!instance) {
1580 con_log(CL_ANN1, (CE_WARN, "mr_sas:%d could not get adapter "
1581 "in quiesce", instance_no));
1582 return (DDI_FAILURE);
1583 }
1584 if (instance->deadadapter || instance->adapterresetinprogress) {
1585 con_log(CL_ANN1, (CE_WARN, "mr_sas:%d adapter is not in "
1586 "healthy state", instance_no));
1587 return (DDI_FAILURE);
1588 }
1589
1590 if (abort_aen_cmd(instance, instance->aen_cmd)) {
1591 con_log(CL_ANN1, (CE_WARN, "mrsas_quiesce: "
1592 "failed to abort prevous AEN command QUIESCE"));
1593 }
1594
1595 if (instance->tbolt) {
1596 if (abort_syncmap_cmd(instance,
1597 instance->map_update_cmd)) {
1598 cmn_err(CE_WARN,
1599 "mrsas_detach: failed to abort "
1600 "previous syncmap command");
1601 return (DDI_FAILURE);
1602 }
1603 }
1604
1605 instance->func_ptr->disable_intr(instance);
1606
1607 con_log(CL_ANN1, (CE_CONT, "flushing cache for instance %d",
1608 instance_no));
1609
1610 flush_cache(instance);
1611
1612 if (wait_for_outstanding(instance)) {
1613 con_log(CL_ANN1,
1614 (CE_CONT, "wait_for_outstanding: return FAIL.\n"));
1615 return (DDI_FAILURE);
1616 }
1617 return (DDI_SUCCESS);
1618 }
1619 #endif /* __sparc */
1620
1621 /*
1622 * ************************************************************************** *
1623 * *
1624 * entry points (SCSI HBA) *
1625 * *
1626 * ************************************************************************** *
1627 */
1628 /*
1629 * tran_tgt_init - initialize a target device instance
1630 * @hba_dip:
1631 * @tgt_dip:
1632 * @tran:
1633 * @sd:
1634 *
1635 * The tran_tgt_init() entry point enables the HBA to allocate and initialize
1636 * any per-target resources. tran_tgt_init() also enables the HBA to qualify
1637 * the device's address as valid and supportable for that particular HBA.
1638 * By returning DDI_FAILURE, the instance of the target driver for that device
1639 * is not probed or attached.
1640 */
1641 /*ARGSUSED*/
1642 static int
1643 mrsas_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1644 scsi_hba_tran_t *tran, struct scsi_device *sd)
1645 {
1646 struct mrsas_instance *instance;
1647 uint16_t tgt = sd->sd_address.a_target;
1648 uint8_t lun = sd->sd_address.a_lun;
1649 dev_info_t *child = NULL;
1650
1651 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init target %d lun %d",
1652 tgt, lun));
1653
1654 instance = ADDR2MR(&sd->sd_address);
1655
1656 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
1657 /*
1658 * If no persistent node exists, we don't allow .conf node
1659 * to be created.
1660 */
1661 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
1662 con_log(CL_DLEVEL2,
1663 (CE_NOTE, "mrsas_tgt_init find child ="
1664 " %p t = %d l = %d", (void *)child, tgt, lun));
1665 if (ndi_merge_node(tgt_dip, mrsas_name_node) !=
1666 DDI_SUCCESS)
1667 /* Create this .conf node */
1668 return (DDI_SUCCESS);
1669 }
1670 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init in ndi_per "
1671 "DDI_FAILURE t = %d l = %d", tgt, lun));
1672 return (DDI_FAILURE);
1673
1674 }
1675
1676 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init dev_dip %p tgt_dip %p",
1677 (void *)instance->mr_ld_list[tgt].dip, (void *)tgt_dip));
1678
1679 if (tgt < MRDRV_MAX_LD && lun == 0) {
1680 if (instance->mr_ld_list[tgt].dip == NULL &&
1681 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0) {
1682 mutex_enter(&instance->config_dev_mtx);
1683 instance->mr_ld_list[tgt].dip = tgt_dip;
1684 instance->mr_ld_list[tgt].lun_type = MRSAS_LD_LUN;
1685 instance->mr_ld_list[tgt].flag = MRDRV_TGT_VALID;
1686 mutex_exit(&instance->config_dev_mtx);
1687 }
1688 }
1689
1690 #ifdef PDSUPPORT
1691 else if (instance->tbolt) {
1692 if (instance->mr_tbolt_pd_list[tgt].dip == NULL) {
1693 mutex_enter(&instance->config_dev_mtx);
1694 instance->mr_tbolt_pd_list[tgt].dip = tgt_dip;
1695 instance->mr_tbolt_pd_list[tgt].flag =
1696 MRDRV_TGT_VALID;
1697 mutex_exit(&instance->config_dev_mtx);
1698 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_tgt_init:"
1699 "t%xl%x", tgt, lun));
1700 }
1701 }
1702 #endif
1703
1704 return (DDI_SUCCESS);
1705 }
1706
1707 /*ARGSUSED*/
1708 static void
1709 mrsas_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1710 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1711 {
1712 struct mrsas_instance *instance;
1713 int tgt = sd->sd_address.a_target;
1714 int lun = sd->sd_address.a_lun;
1715
1716 instance = ADDR2MR(&sd->sd_address);
1717
1718 con_log(CL_DLEVEL2, (CE_NOTE, "tgt_free t = %d l = %d", tgt, lun));
1719
1720 if (tgt < MRDRV_MAX_LD && lun == 0) {
1721 if (instance->mr_ld_list[tgt].dip == tgt_dip) {
1722 mutex_enter(&instance->config_dev_mtx);
1723 instance->mr_ld_list[tgt].dip = NULL;
1724 mutex_exit(&instance->config_dev_mtx);
1725 }
1726 }
1727
1728 #ifdef PDSUPPORT
1729 else if (instance->tbolt) {
1730 mutex_enter(&instance->config_dev_mtx);
1731 instance->mr_tbolt_pd_list[tgt].dip = NULL;
1732 mutex_exit(&instance->config_dev_mtx);
1733 con_log(CL_ANN1, (CE_NOTE, "tgt_free: Setting dip = NULL"
1734 "for tgt:%x", tgt));
1735 }
1736 #endif
1737
1738 }
1739
1740 dev_info_t *
1741 mrsas_find_child(struct mrsas_instance *instance, uint16_t tgt, uint8_t lun)
1742 {
1743 dev_info_t *child = NULL;
1744 char addr[SCSI_MAXNAMELEN];
1745 char tmp[MAXNAMELEN];
1746
1747 (void) sprintf(addr, "%x,%x", tgt, lun);
1748 for (child = ddi_get_child(instance->dip); child;
1749 child = ddi_get_next_sibling(child)) {
1750
1751 /* XXX KEBE ASKS - why was this added?! */
1752 if (ndi_dev_is_persistent_node(child) == 0) {
1753 continue;
1754 }
1755
1756 if (mrsas_name_node(child, tmp, MAXNAMELEN) !=
1757 DDI_SUCCESS) {
1758 continue;
1759 }
1760
1761 if (strcmp(addr, tmp) == 0) {
1762 break;
1763 }
1764 }
1765 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_find_child: return child = %p",
1766 (void *)child));
1767 return (child);
1768 }
1769
1770 /*
1771 * mrsas_name_node -
1772 * @dip:
1773 * @name:
1774 * @len:
1775 */
1776 static int
1777 mrsas_name_node(dev_info_t *dip, char *name, int len)
1778 {
1779 int tgt, lun;
1780
1781 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1782 DDI_PROP_DONTPASS, "target", -1);
1783 con_log(CL_DLEVEL2, (CE_NOTE,
1784 "mrsas_name_node: dip %p tgt %d", (void *)dip, tgt));
1785 if (tgt == -1) {
1786 return (DDI_FAILURE);
1787 }
1788 lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1789 "lun", -1);
1790 con_log(CL_DLEVEL2,
1791 (CE_NOTE, "mrsas_name_node: tgt %d lun %d", tgt, lun));
1792 if (lun == -1) {
1793 return (DDI_FAILURE);
1794 }
1795 (void) snprintf(name, len, "%x,%x", tgt, lun);
1796 return (DDI_SUCCESS);
1797 }
1798
1799 /*
1800 * tran_init_pkt - allocate & initialize a scsi_pkt structure
1801 * @ap:
1802 * @pkt:
1803 * @bp:
1804 * @cmdlen:
1805 * @statuslen:
1806 * @tgtlen:
1807 * @flags:
1808 * @callback:
1809 *
1810 * The tran_init_pkt() entry point allocates and initializes a scsi_pkt
1811 * structure and DMA resources for a target driver request. The
1812 * tran_init_pkt() entry point is called when the target driver calls the
1813 * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point
1814 * is a request to perform one or more of three possible services:
1815 * - allocation and initialization of a scsi_pkt structure
1816 * - allocation of DMA resources for data transfer
1817 * - reallocation of DMA resources for the next portion of the data transfer
1818 */
1819 static struct scsi_pkt *
1820 mrsas_tran_init_pkt(struct scsi_address *ap, register struct scsi_pkt *pkt,
1821 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1822 int flags, int (*callback)(), caddr_t arg)
1823 {
1824 struct scsa_cmd *acmd;
1825 struct mrsas_instance *instance;
1826 struct scsi_pkt *new_pkt;
1827
1828 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1829
1830 instance = ADDR2MR(ap);
1831
1832 /* step #1 : pkt allocation */
1833 if (pkt == NULL) {
1834 pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen,
1835 tgtlen, sizeof (struct scsa_cmd), callback, arg);
1836 if (pkt == NULL) {
1837 return (NULL);
1838 }
1839
1840 acmd = PKT2CMD(pkt);
1841
1842 /*
1843 * Initialize the new pkt - we redundantly initialize
1844 * all the fields for illustrative purposes.
1845 */
1846 acmd->cmd_pkt = pkt;
1847 acmd->cmd_flags = 0;
1848 acmd->cmd_scblen = statuslen;
1849 acmd->cmd_cdblen = cmdlen;
1850 acmd->cmd_dmahandle = NULL;
1851 acmd->cmd_ncookies = 0;
1852 acmd->cmd_cookie = 0;
1853 acmd->cmd_cookiecnt = 0;
1854 acmd->cmd_nwin = 0;
1855
1856 pkt->pkt_address = *ap;
1857 pkt->pkt_comp = (void (*)())NULL;
1858 pkt->pkt_flags = 0;
1859 pkt->pkt_time = 0;
1860 pkt->pkt_resid = 0;
1861 pkt->pkt_state = 0;
1862 pkt->pkt_statistics = 0;
1863 pkt->pkt_reason = 0;
1864 new_pkt = pkt;
1865 } else {
1866 acmd = PKT2CMD(pkt);
1867 new_pkt = NULL;
1868 }
1869
1870 /* step #2 : dma allocation/move */
1871 if (bp && bp->b_bcount != 0) {
1872 if (acmd->cmd_dmahandle == NULL) {
1873 if (mrsas_dma_alloc(instance, pkt, bp, flags,
1874 callback) == DDI_FAILURE) {
1875 if (new_pkt) {
1876 scsi_hba_pkt_free(ap, new_pkt);
1877 }
1878 return ((struct scsi_pkt *)NULL);
1879 }
1880 } else {
1881 if (mrsas_dma_move(instance, pkt, bp) == DDI_FAILURE) {
1882 return ((struct scsi_pkt *)NULL);
1883 }
1884 }
1885 }
1886
1887 return (pkt);
1888 }
1889
1890 /*
1891 * tran_start - transport a SCSI command to the addressed target
1892 * @ap:
1893 * @pkt:
1894 *
1895 * The tran_start() entry point for a SCSI HBA driver is called to transport a
1896 * SCSI command to the addressed target. The SCSI command is described
1897 * entirely within the scsi_pkt structure, which the target driver allocated
1898 * through the HBA driver's tran_init_pkt() entry point. If the command
1899 * involves a data transfer, DMA resources must also have been allocated for
1900 * the scsi_pkt structure.
1901 *
1902 * Return Values :
1903 * TRAN_BUSY - request queue is full, no more free scbs
1904 * TRAN_ACCEPT - pkt has been submitted to the instance
1905 */
1906 static int
1907 mrsas_tran_start(struct scsi_address *ap, register struct scsi_pkt *pkt)
1908 {
1909 uchar_t cmd_done = 0;
1910
1911 struct mrsas_instance *instance = ADDR2MR(ap);
1912 struct mrsas_cmd *cmd;
1913
1914 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1915 if (instance->deadadapter == 1) {
1916 con_log(CL_ANN1, (CE_WARN,
1917 "mrsas_tran_start: return TRAN_FATAL_ERROR "
1918 "for IO, as the HBA doesnt take any more IOs"));
1919 if (pkt) {
1920 pkt->pkt_reason = CMD_DEV_GONE;
1921 pkt->pkt_statistics = STAT_DISCON;
1922 }
1923 return (TRAN_FATAL_ERROR);
1924 }
1925
1926 if (instance->adapterresetinprogress) {
1927 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_start: Reset flag set, "
1928 "returning mfi_pkt and setting TRAN_BUSY\n"));
1929 return (TRAN_BUSY);
1930 }
1931
1932 con_log(CL_ANN1, (CE_CONT, "chkpnt:%s:%d:SCSI CDB[0]=0x%x time:%x",
1933 __func__, __LINE__, pkt->pkt_cdbp[0], pkt->pkt_time));
1934
1935 pkt->pkt_reason = CMD_CMPLT;
1936 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */
1937
1938 cmd = build_cmd(instance, ap, pkt, &cmd_done);
1939
1940 /*
1941 * Check if the command is already completed by the mrsas_build_cmd()
1942 * routine. In which case the busy_flag would be clear and scb will be
1943 * NULL and appropriate reason provided in pkt_reason field
1944 */
1945 if (cmd_done) {
1946 pkt->pkt_reason = CMD_CMPLT;
1947 pkt->pkt_scbp[0] = STATUS_GOOD;
1948 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1949 | STATE_SENT_CMD;
1950 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1951 (*pkt->pkt_comp)(pkt);
1952 }
1953
1954 return (TRAN_ACCEPT);
1955 }
1956
1957 if (cmd == NULL) {
1958 return (TRAN_BUSY);
1959 }
1960
1961 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1962 if (instance->fw_outstanding > instance->max_fw_cmds) {
1963 con_log(CL_ANN, (CE_CONT, "mr_sas:Firmware busy"));
1964 DTRACE_PROBE2(start_tran_err,
1965 uint16_t, instance->fw_outstanding,
1966 uint16_t, instance->max_fw_cmds);
1967 return_mfi_pkt(instance, cmd);
1968 return (TRAN_BUSY);
1969 }
1970
1971 /* Synchronize the Cmd frame for the controller */
1972 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1973 DDI_DMA_SYNC_FORDEV);
1974 con_log(CL_ANN, (CE_CONT, "issue_cmd_ppc: SCSI CDB[0]=0x%x"
1975 "cmd->index:%x\n", pkt->pkt_cdbp[0], cmd->index));
1976 instance->func_ptr->issue_cmd(cmd, instance);
1977
1978 } else {
1979 struct mrsas_header *hdr = &cmd->frame->hdr;
1980
1981 /* cmd->sync_cmd = MRSAS_TRUE; */ /* KEBE asks, inherit? */
1982
1983 instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd);
1984
1985 pkt->pkt_reason = CMD_CMPLT;
1986 pkt->pkt_statistics = 0;
1987 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS;
1988
1989 switch (ddi_get8(cmd->frame_dma_obj.acc_handle,
1990 &hdr->cmd_status)) {
1991 case MFI_STAT_OK:
1992 pkt->pkt_scbp[0] = STATUS_GOOD;
1993 break;
1994
1995 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1996 con_log(CL_ANN, (CE_CONT,
1997 "mrsas_tran_start: scsi done with error"));
1998 pkt->pkt_reason = CMD_CMPLT;
1999 pkt->pkt_statistics = 0;
2000
2001 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2002 break;
2003
2004 case MFI_STAT_DEVICE_NOT_FOUND:
2005 con_log(CL_ANN, (CE_CONT,
2006 "mrsas_tran_start: device not found error"));
2007 pkt->pkt_reason = CMD_DEV_GONE;
2008 pkt->pkt_statistics = STAT_DISCON;
2009 break;
2010
2011 default:
2012 ((struct scsi_status *)pkt->pkt_scbp)->sts_busy = 1;
2013 }
2014
2015 (void) mrsas_common_check(instance, cmd);
2016 DTRACE_PROBE2(start_nointr_done, uint8_t, hdr->cmd,
2017 uint8_t, hdr->cmd_status);
2018 return_mfi_pkt(instance, cmd);
2019
2020 if (pkt->pkt_comp) {
2021 (*pkt->pkt_comp)(pkt);
2022 }
2023
2024 }
2025
2026 return (TRAN_ACCEPT);
2027 }
2028
2029 /*
2030 * tran_abort - Abort any commands that are currently in transport
2031 * @ap:
2032 * @pkt:
2033 *
2034 * The tran_abort() entry point for a SCSI HBA driver is called to abort any
2035 * commands that are currently in transport for a particular target. This entry
2036 * point is called when a target driver calls scsi_abort(). The tran_abort()
2037 * entry point should attempt to abort the command denoted by the pkt
2038 * parameter. If the pkt parameter is NULL, tran_abort() should attempt to
2039 * abort all outstanding commands in the transport layer for the particular
2040 * target or logical unit.
2041 */
2042 /*ARGSUSED*/
2043 static int
2044 mrsas_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
2045 {
2046 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2047
2048 /* abort command not supported by H/W */
2049
2050 return (DDI_FAILURE);
2051 }
2052
2053 /*
2054 * tran_reset - reset either the SCSI bus or target
2055 * @ap:
2056 * @level:
2057 *
2058 * The tran_reset() entry point for a SCSI HBA driver is called to reset either
2059 * the SCSI bus or a particular SCSI target device. This entry point is called
2060 * when a target driver calls scsi_reset(). The tran_reset() entry point must
2061 * reset the SCSI bus if level is RESET_ALL. If level is RESET_TARGET, just the
2062 * particular target or logical unit must be reset.
2063 */
2064 /*ARGSUSED*/
2065 static int
2066 mrsas_tran_reset(struct scsi_address *ap, int level)
2067 {
2068 struct mrsas_instance *instance = ADDR2MR(ap);
2069
2070 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2071
2072 if (wait_for_outstanding(instance)) {
2073 con_log(CL_ANN1,
2074 (CE_CONT, "wait_for_outstanding: return FAIL.\n"));
2075 return (DDI_FAILURE);
2076 } else {
2077 return (DDI_SUCCESS);
2078 }
2079 }
2080
2081 #if 0
2082 /*
2083 * tran_bus_reset - reset the SCSI bus
2084 * @dip:
2085 * @level:
2086 *
2087 * The tran_bus_reset() vector in the scsi_hba_tran structure should be
2088 * initialized during the HBA driver's attach(). The vector should point to
2089 * an HBA entry point that is to be called when a user initiates a bus reset.
2090 * Implementation is hardware specific. If the HBA driver cannot reset the
2091 * SCSI bus without affecting the targets, the driver should fail RESET_BUS
2092 * or not initialize this vector.
2093 */
2094 /*ARGSUSED*/
2095 static int
2096 mrsas_tran_bus_reset(dev_info_t *dip, int level)
2097 {
2098 int instance_no = ddi_get_instance(dip);
2099
2100 struct mrsas_instance *instance = ddi_get_soft_state(mrsas_state,
2101 instance_no);
2102
2103 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2104
2105 if (wait_for_outstanding(instance)) {
2106 con_log(CL_ANN1,
2107 (CE_CONT, "wait_for_outstanding: return FAIL.\n"));
2108 return (DDI_FAILURE);
2109 } else {
2110 return (DDI_SUCCESS);
2111 }
2112 }
2113 #endif
2114
2115 /*
2116 * tran_getcap - get one of a set of SCSA-defined capabilities
2117 * @ap:
2118 * @cap:
2119 * @whom:
2120 *
2121 * The target driver can request the current setting of the capability for a
2122 * particular target by setting the whom parameter to nonzero. A whom value of
2123 * zero indicates a request for the current setting of the general capability
2124 * for the SCSI bus or for adapter hardware. The tran_getcap() should return -1
2125 * for undefined capabilities or the current value of the requested capability.
2126 */
2127 /*ARGSUSED*/
2128 static int
2129 mrsas_tran_getcap(struct scsi_address *ap, char *cap, int whom)
2130 {
2131 int rval = 0;
2132
2133 struct mrsas_instance *instance = ADDR2MR(ap);
2134
2135 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2136
2137 /* we do allow inquiring about capabilities for other targets */
2138 if (cap == NULL) {
2139 return (-1);
2140 }
2141
2142 switch (scsi_hba_lookup_capstr(cap)) {
2143 case SCSI_CAP_DMA_MAX:
2144 if (instance->tbolt) {
2145 /* Limit to 256k max transfer */
2146 rval = mrsas_tbolt_max_cap_maxxfer;
2147 } else {
2148 /* Limit to 16MB max transfer */
2149 rval = mrsas_max_cap_maxxfer;
2150 }
2151 break;
2152 case SCSI_CAP_MSG_OUT:
2153 rval = 1;
2154 break;
2155 case SCSI_CAP_DISCONNECT:
2156 rval = 0;
2157 break;
2158 case SCSI_CAP_SYNCHRONOUS:
2159 rval = 0;
2160 break;
2161 case SCSI_CAP_WIDE_XFER:
2162 rval = 1;
2163 break;
2164 case SCSI_CAP_TAGGED_QING:
2165 rval = 1;
2166 break;
2167 case SCSI_CAP_UNTAGGED_QING:
2168 rval = 1;
2169 break;
2170 case SCSI_CAP_PARITY:
2171 rval = 1;
2172 break;
2173 case SCSI_CAP_INITIATOR_ID:
2174 rval = instance->init_id;
2175 break;
2176 case SCSI_CAP_ARQ:
2177 rval = 1;
2178 break;
2179 case SCSI_CAP_LINKED_CMDS:
2180 rval = 0;
2181 break;
2182 case SCSI_CAP_RESET_NOTIFICATION:
2183 rval = 1;
2184 break;
2185 case SCSI_CAP_GEOMETRY:
2186 rval = -1;
2187
2188 break;
2189 default:
2190 con_log(CL_DLEVEL2, (CE_NOTE, "Default cap coming 0x%x",
2191 scsi_hba_lookup_capstr(cap)));
2192 rval = -1;
2193 break;
2194 }
2195
2196 return (rval);
2197 }
2198
2199 /*
2200 * tran_setcap - set one of a set of SCSA-defined capabilities
2201 * @ap:
2202 * @cap:
2203 * @value:
2204 * @whom:
2205 *
2206 * The target driver might request that the new value be set for a particular
2207 * target by setting the whom parameter to nonzero. A whom value of zero
2208 * means that request is to set the new value for the SCSI bus or for adapter
2209 * hardware in general.
2210 * The tran_setcap() should return the following values as appropriate:
2211 * - -1 for undefined capabilities
2212 * - 0 if the HBA driver cannot set the capability to the requested value
2213 * - 1 if the HBA driver is able to set the capability to the requested value
2214 */
2215 /*ARGSUSED*/
2216 static int
2217 mrsas_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
2218 {
2219 int rval = 1;
2220
2221 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2222
2223 /* We don't allow setting capabilities for other targets */
2224 if (cap == NULL || whom == 0) {
2225 return (-1);
2226 }
2227
2228 switch (scsi_hba_lookup_capstr(cap)) {
2229 case SCSI_CAP_DMA_MAX:
2230 case SCSI_CAP_MSG_OUT:
2231 case SCSI_CAP_PARITY:
2232 case SCSI_CAP_LINKED_CMDS:
2233 case SCSI_CAP_RESET_NOTIFICATION:
2234 case SCSI_CAP_DISCONNECT:
2235 case SCSI_CAP_SYNCHRONOUS:
2236 case SCSI_CAP_UNTAGGED_QING:
2237 case SCSI_CAP_WIDE_XFER:
2238 case SCSI_CAP_INITIATOR_ID:
2239 case SCSI_CAP_ARQ:
2240 /*
2241 * None of these are settable via
2242 * the capability interface.
2243 */
2244 break;
2245 case SCSI_CAP_TAGGED_QING:
2246 rval = 1;
2247 break;
2248 case SCSI_CAP_SECTOR_SIZE:
2249 rval = 1;
2250 break;
2251
2252 case SCSI_CAP_TOTAL_SECTORS:
2253 rval = 1;
2254 break;
2255 default:
2256 rval = -1;
2257 break;
2258 }
2259
2260 return (rval);
2261 }
2262
2263 /*
2264 * tran_destroy_pkt - deallocate scsi_pkt structure
2265 * @ap:
2266 * @pkt:
2267 *
2268 * The tran_destroy_pkt() entry point is the HBA driver function that
2269 * deallocates scsi_pkt structures. The tran_destroy_pkt() entry point is
2270 * called when the target driver calls scsi_destroy_pkt(). The
2271 * tran_destroy_pkt() entry point must free any DMA resources that have been
2272 * allocated for the packet. An implicit DMA synchronization occurs if the
2273 * DMA resources are freed and any cached data remains after the completion
2274 * of the transfer.
2275 */
2276 static void
2277 mrsas_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2278 {
2279 struct scsa_cmd *acmd = PKT2CMD(pkt);
2280
2281 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2282
2283 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2284 acmd->cmd_flags &= ~CFLAG_DMAVALID;
2285
2286 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
2287
2288 ddi_dma_free_handle(&acmd->cmd_dmahandle);
2289
2290 acmd->cmd_dmahandle = NULL;
2291 }
2292
2293 /* free the pkt */
2294 scsi_hba_pkt_free(ap, pkt);
2295 }
2296
2297 /*
2298 * tran_dmafree - deallocates DMA resources
2299 * @ap:
2300 * @pkt:
2301 *
2302 * The tran_dmafree() entry point deallocates DMAQ resources that have been
2303 * allocated for a scsi_pkt structure. The tran_dmafree() entry point is
2304 * called when the target driver calls scsi_dmafree(). The tran_dmafree() must
2305 * free only DMA resources allocated for a scsi_pkt structure, not the
2306 * scsi_pkt itself. When DMA resources are freed, a DMA synchronization is
2307 * implicitly performed.
2308 */
2309 /*ARGSUSED*/
2310 static void
2311 mrsas_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
2312 {
2313 register struct scsa_cmd *acmd = PKT2CMD(pkt);
2314
2315 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2316
2317 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2318 acmd->cmd_flags &= ~CFLAG_DMAVALID;
2319
2320 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
2321
2322 ddi_dma_free_handle(&acmd->cmd_dmahandle);
2323
2324 acmd->cmd_dmahandle = NULL;
2325 }
2326 }
2327
2328 /*
2329 * tran_sync_pkt - synchronize the DMA object allocated
2330 * @ap:
2331 * @pkt:
2332 *
2333 * The tran_sync_pkt() entry point synchronizes the DMA object allocated for
2334 * the scsi_pkt structure before or after a DMA transfer. The tran_sync_pkt()
2335 * entry point is called when the target driver calls scsi_sync_pkt(). If the
2336 * data transfer direction is a DMA read from device to memory, tran_sync_pkt()
2337 * must synchronize the CPU's view of the data. If the data transfer direction
2338 * is a DMA write from memory to device, tran_sync_pkt() must synchronize the
2339 * device's view of the data.
2340 */
2341 /*ARGSUSED*/
2342 static void
2343 mrsas_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2344 {
2345 register struct scsa_cmd *acmd = PKT2CMD(pkt);
2346
2347 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2348
2349 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2350 (void) ddi_dma_sync(acmd->cmd_dmahandle, acmd->cmd_dma_offset,
2351 acmd->cmd_dma_len, (acmd->cmd_flags & CFLAG_DMASEND) ?
2352 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
2353 }
2354 }
2355
2356 /*ARGSUSED*/
2357 static int
2358 mrsas_tran_quiesce(dev_info_t *dip)
2359 {
2360 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2361
2362 return (1);
2363 }
2364
2365 /*ARGSUSED*/
2366 static int
2367 mrsas_tran_unquiesce(dev_info_t *dip)
2368 {
2369 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2370
2371 return (1);
2372 }
2373
2374
2375 /*
2376 * mrsas_isr(caddr_t)
2377 *
2378 * The Interrupt Service Routine
2379 *
2380 * Collect status for all completed commands and do callback
2381 *
2382 */
2383 static uint_t
2384 mrsas_isr(struct mrsas_instance *instance)
2385 {
2386 int need_softintr;
2387 uint32_t producer;
2388 uint32_t consumer;
2389 uint32_t context;
2390 int retval;
2391
2392 struct mrsas_cmd *cmd;
2393 struct mrsas_header *hdr;
2394 struct scsi_pkt *pkt;
2395
2396 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2397 ASSERT(instance);
2398 if (instance->tbolt) {
2399 mutex_enter(&instance->chip_mtx);
2400 if ((instance->intr_type == DDI_INTR_TYPE_FIXED) &&
2401 !(instance->func_ptr->intr_ack(instance))) {
2402 mutex_exit(&instance->chip_mtx);
2403 return (DDI_INTR_UNCLAIMED);
2404 }
2405 retval = mr_sas_tbolt_process_outstanding_cmd(instance);
2406 mutex_exit(&instance->chip_mtx);
2407 return (retval);
2408 } else {
2409 if ((instance->intr_type == DDI_INTR_TYPE_FIXED) &&
2410 !instance->func_ptr->intr_ack(instance)) {
2411 return (DDI_INTR_UNCLAIMED);
2412 }
2413 }
2414
2415 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
2416 0, 0, DDI_DMA_SYNC_FORCPU);
2417
2418 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
2419 != DDI_SUCCESS) {
2420 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2421 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2422 con_log(CL_ANN1, (CE_WARN,
2423 "mr_sas_isr(): FMA check, returning DDI_INTR_UNCLAIMED"));
2424 return (DDI_INTR_CLAIMED);
2425 }
2426 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2427
2428 #ifdef OCRDEBUG
2429 if (debug_consecutive_timeout_after_ocr_g == 1) {
2430 con_log(CL_ANN1, (CE_NOTE,
2431 "simulating consecutive timeout after ocr"));
2432 return (DDI_INTR_CLAIMED);
2433 }
2434 #endif
2435
2436 mutex_enter(&instance->completed_pool_mtx);
2437 mutex_enter(&instance->cmd_pend_mtx);
2438
2439 producer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2440 instance->producer);
2441 consumer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2442 instance->consumer);
2443
2444 con_log(CL_ANN, (CE_CONT, " producer %x consumer %x ",
2445 producer, consumer));
2446 if (producer == consumer) {
2447 con_log(CL_ANN, (CE_WARN, "producer == consumer case"));
2448 DTRACE_PROBE2(isr_pc_err, uint32_t, producer,
2449 uint32_t, consumer);
2450 mutex_exit(&instance->cmd_pend_mtx);
2451 mutex_exit(&instance->completed_pool_mtx);
2452 return (DDI_INTR_CLAIMED);
2453 }
2454
2455 while (consumer != producer) {
2456 context = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2457 &instance->reply_queue[consumer]);
2458 cmd = instance->cmd_list[context];
2459
2460 if (cmd->sync_cmd == MRSAS_TRUE) {
2461 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2462 if (hdr) {
2463 mlist_del_init(&cmd->list);
2464 }
2465 } else {
2466 pkt = cmd->pkt;
2467 if (pkt) {
2468 mlist_del_init(&cmd->list);
2469 }
2470 }
2471
2472 mlist_add_tail(&cmd->list, &instance->completed_pool_list);
2473
2474 consumer++;
2475 if (consumer == (instance->max_fw_cmds + 1)) {
2476 consumer = 0;
2477 }
2478 }
2479 ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
2480 instance->consumer, consumer);
2481 mutex_exit(&instance->cmd_pend_mtx);
2482 mutex_exit(&instance->completed_pool_mtx);
2483
2484 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
2485 0, 0, DDI_DMA_SYNC_FORDEV);
2486
2487 if (instance->softint_running) {
2488 need_softintr = 0;
2489 } else {
2490 need_softintr = 1;
2491 }
2492
2493 if (instance->isr_level == HIGH_LEVEL_INTR) {
2494 if (need_softintr) {
2495 ddi_trigger_softintr(instance->soft_intr_id);
2496 }
2497 } else {
2498 /*
2499 * Not a high-level interrupt, therefore call the soft level
2500 * interrupt explicitly
2501 */
2502 (void) mrsas_softintr(instance);
2503 }
2504
2505 return (DDI_INTR_CLAIMED);
2506 }
2507
2508
2509 /*
2510 * ************************************************************************** *
2511 * *
2512 * libraries *
2513 * *
2514 * ************************************************************************** *
2515 */
2516 /*
2517 * get_mfi_pkt : Get a command from the free pool
2518 * After successful allocation, the caller of this routine
2519 * must clear the frame buffer (memset to zero) before
2520 * using the packet further.
2521 *
2522 * ***** Note *****
2523 * After clearing the frame buffer the context id of the
2524 * frame buffer SHOULD be restored back.
2525 */
2526 static struct mrsas_cmd *
2527 get_mfi_pkt(struct mrsas_instance *instance)
2528 {
2529 mlist_t *head = &instance->cmd_pool_list;
2530 struct mrsas_cmd *cmd = NULL;
2531
2532 mutex_enter(&instance->cmd_pool_mtx);
2533
2534 if (!mlist_empty(head)) {
2535 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2536 mlist_del_init(head->next);
2537 }
2538 if (cmd != NULL) {
2539 cmd->pkt = NULL;
2540 cmd->retry_count_for_ocr = 0;
2541 cmd->drv_pkt_time = 0;
2542
2543 }
2544 mutex_exit(&instance->cmd_pool_mtx);
2545
2546 return (cmd);
2547 }
2548
2549 static struct mrsas_cmd *
2550 get_mfi_app_pkt(struct mrsas_instance *instance)
2551 {
2552 mlist_t *head = &instance->app_cmd_pool_list;
2553 struct mrsas_cmd *cmd = NULL;
2554
2555 mutex_enter(&instance->app_cmd_pool_mtx);
2556
2557 if (!mlist_empty(head)) {
2558 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2559 mlist_del_init(head->next);
2560 }
2561 if (cmd != NULL) {
2562 cmd->pkt = NULL;
2563 cmd->retry_count_for_ocr = 0;
2564 cmd->drv_pkt_time = 0;
2565 }
2566
2567 mutex_exit(&instance->app_cmd_pool_mtx);
2568
2569 return (cmd);
2570 }
2571 /*
2572 * return_mfi_pkt : Return a cmd to free command pool
2573 */
2574 static void
2575 return_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2576 {
2577 mutex_enter(&instance->cmd_pool_mtx);
2578 /* use mlist_add_tail for debug assistance */
2579 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2580
2581 mutex_exit(&instance->cmd_pool_mtx);
2582 }
2583
2584 static void
2585 return_mfi_app_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2586 {
2587 mutex_enter(&instance->app_cmd_pool_mtx);
2588
2589 mlist_add(&cmd->list, &instance->app_cmd_pool_list);
2590
2591 mutex_exit(&instance->app_cmd_pool_mtx);
2592 }
2593 void
2594 push_pending_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2595 {
2596 struct scsi_pkt *pkt;
2597 struct mrsas_header *hdr;
2598 con_log(CL_DLEVEL2, (CE_NOTE, "push_pending_pkt(): Called\n"));
2599 mutex_enter(&instance->cmd_pend_mtx);
2600 mlist_del_init(&cmd->list);
2601 mlist_add_tail(&cmd->list, &instance->cmd_pend_list);
2602 if (cmd->sync_cmd == MRSAS_TRUE) {
2603 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2604 if (hdr) {
2605 con_log(CL_ANN1, (CE_CONT,
2606 "push_pending_mfi_pkt: "
2607 "cmd %p index %x "
2608 "time %llx",
2609 (void *)cmd, cmd->index,
2610 gethrtime()));
2611 /* Wait for specified interval */
2612 cmd->drv_pkt_time = ddi_get16(
2613 cmd->frame_dma_obj.acc_handle, &hdr->timeout);
2614 if (cmd->drv_pkt_time < debug_timeout_g)
2615 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2616 con_log(CL_ANN1, (CE_CONT,
2617 "push_pending_pkt(): "
2618 "Called IO Timeout Value %x\n",
2619 cmd->drv_pkt_time));
2620 }
2621 if (hdr && instance->timeout_id == (timeout_id_t)-1) {
2622 instance->timeout_id = timeout(io_timeout_checker,
2623 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
2624 }
2625 } else {
2626 pkt = cmd->pkt;
2627 if (pkt) {
2628 con_log(CL_ANN1, (CE_CONT,
2629 "push_pending_mfi_pkt: "
2630 "cmd %p index %x pkt %p, "
2631 "time %llx",
2632 (void *)cmd, cmd->index, (void *)pkt,
2633 gethrtime()));
2634 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2635 }
2636 if (pkt && instance->timeout_id == (timeout_id_t)-1) {
2637 instance->timeout_id = timeout(io_timeout_checker,
2638 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
2639 }
2640 }
2641
2642 mutex_exit(&instance->cmd_pend_mtx);
2643
2644 }
2645
2646 int
2647 mrsas_print_pending_cmds(struct mrsas_instance *instance)
2648 {
2649 mlist_t *head = &instance->cmd_pend_list;
2650 mlist_t *tmp = head;
2651 struct mrsas_cmd *cmd = NULL;
2652 struct mrsas_header *hdr;
2653 unsigned int flag = 1;
2654 struct scsi_pkt *pkt;
2655 int saved_level;
2656 int cmd_count = 0;
2657
2658 saved_level = debug_level_g;
2659 debug_level_g = CL_ANN1;
2660
2661 cmn_err(CE_NOTE, "mrsas_print_pending_cmds(): Called\n");
2662
2663 while (flag) {
2664 mutex_enter(&instance->cmd_pend_mtx);
2665 tmp = tmp->next;
2666 if (tmp == head) {
2667 mutex_exit(&instance->cmd_pend_mtx);
2668 flag = 0;
2669 con_log(CL_ANN1, (CE_CONT, "mrsas_print_pending_cmds():"
2670 " NO MORE CMDS PENDING....\n"));
2671 break;
2672 } else {
2673 cmd = mlist_entry(tmp, struct mrsas_cmd, list);
2674 mutex_exit(&instance->cmd_pend_mtx);
2675 if (cmd) {
2676 if (cmd->sync_cmd == MRSAS_TRUE) {
2677 hdr = (struct mrsas_header *)
2678 &cmd->frame->hdr;
2679 if (hdr) {
2680 con_log(CL_ANN1, (CE_CONT,
2681 "print: cmd %p index 0x%x "
2682 "drv_pkt_time 0x%x (NO-PKT)"
2683 " hdr %p\n", (void *)cmd,
2684 cmd->index,
2685 cmd->drv_pkt_time,
2686 (void *)hdr));
2687 }
2688 } else {
2689 pkt = cmd->pkt;
2690 if (pkt) {
2691 con_log(CL_ANN1, (CE_CONT,
2692 "print: cmd %p index 0x%x "
2693 "drv_pkt_time 0x%x pkt %p \n",
2694 (void *)cmd, cmd->index,
2695 cmd->drv_pkt_time, (void *)pkt));
2696 }
2697 }
2698
2699 if (++cmd_count == 1) {
2700 mrsas_print_cmd_details(instance, cmd,
2701 0xDD);
2702 } else {
2703 mrsas_print_cmd_details(instance, cmd,
2704 1);
2705 }
2706
2707 }
2708 }
2709 }
2710 con_log(CL_ANN1, (CE_CONT, "mrsas_print_pending_cmds(): Done\n"));
2711
2712
2713 debug_level_g = saved_level;
2714
2715 return (DDI_SUCCESS);
2716 }
2717
2718
2719 int
2720 mrsas_complete_pending_cmds(struct mrsas_instance *instance)
2721 {
2722
2723 struct mrsas_cmd *cmd = NULL;
2724 struct scsi_pkt *pkt;
2725 struct mrsas_header *hdr;
2726
2727 struct mlist_head *pos, *next;
2728
2729 con_log(CL_ANN1, (CE_NOTE,
2730 "mrsas_complete_pending_cmds(): Called"));
2731
2732 mutex_enter(&instance->cmd_pend_mtx);
2733 mlist_for_each_safe(pos, next, &instance->cmd_pend_list) {
2734 cmd = mlist_entry(pos, struct mrsas_cmd, list);
2735 if (cmd) {
2736 pkt = cmd->pkt;
2737 if (pkt) { /* for IO */
2738 if (((pkt->pkt_flags & FLAG_NOINTR)
2739 == 0) && pkt->pkt_comp) {
2740 pkt->pkt_reason
2741 = CMD_DEV_GONE;
2742 pkt->pkt_statistics
2743 = STAT_DISCON;
2744 con_log(CL_ANN1, (CE_CONT,
2745 "fail and posting to scsa "
2746 "cmd %p index %x"
2747 " pkt %p "
2748 "time : %llx",
2749 (void *)cmd, cmd->index,
2750 (void *)pkt, gethrtime()));
2751 (*pkt->pkt_comp)(pkt);
2752 }
2753 } else { /* for DCMDS */
2754 if (cmd->sync_cmd == MRSAS_TRUE) {
2755 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2756 con_log(CL_ANN1, (CE_CONT,
2757 "posting invalid status to application "
2758 "cmd %p index %x"
2759 " hdr %p "
2760 "time : %llx",
2761 (void *)cmd, cmd->index,
2762 (void *)hdr, gethrtime()));
2763 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2764 complete_cmd_in_sync_mode(instance, cmd);
2765 }
2766 }
2767 mlist_del_init(&cmd->list);
2768 } else {
2769 con_log(CL_ANN1, (CE_CONT,
2770 "mrsas_complete_pending_cmds:"
2771 "NULL command\n"));
2772 }
2773 con_log(CL_ANN1, (CE_CONT,
2774 "mrsas_complete_pending_cmds:"
2775 "looping for more commands\n"));
2776 }
2777 mutex_exit(&instance->cmd_pend_mtx);
2778
2779 con_log(CL_ANN1, (CE_CONT, "mrsas_complete_pending_cmds(): DONE\n"));
2780 return (DDI_SUCCESS);
2781 }
2782
2783 void
2784 mrsas_print_cmd_details(struct mrsas_instance *instance, struct mrsas_cmd *cmd,
2785 int detail)
2786 {
2787 struct scsi_pkt *pkt = cmd->pkt;
2788 Mpi2RaidSCSIIORequest_t *scsi_io = cmd->scsi_io_request;
2789 int i;
2790 int saved_level;
2791 ddi_acc_handle_t acc_handle =
2792 instance->mpi2_frame_pool_dma_obj.acc_handle;
2793
2794 if (detail == 0xDD) {
2795 saved_level = debug_level_g;
2796 debug_level_g = CL_ANN1;
2797 }
2798
2799
2800 if (instance->tbolt) {
2801 con_log(CL_ANN1, (CE_CONT, "print_cmd_details: cmd %p "
2802 "cmd->index 0x%x SMID 0x%x timer 0x%x sec\n",
2803 (void *)cmd, cmd->index, cmd->SMID, cmd->drv_pkt_time));
2804 } else {
2805 con_log(CL_ANN1, (CE_CONT, "print_cmd_details: cmd %p "
2806 "cmd->index 0x%x timer 0x%x sec\n",
2807 (void *)cmd, cmd->index, cmd->drv_pkt_time));
2808 }
2809
2810 if (pkt) {
2811 con_log(CL_ANN1, (CE_CONT, "scsi_pkt CDB[0]=0x%x",
2812 pkt->pkt_cdbp[0]));
2813 } else {
2814 con_log(CL_ANN1, (CE_CONT, "NO-PKT"));
2815 }
2816
2817 if ((detail == 0xDD) && instance->tbolt) {
2818 con_log(CL_ANN1, (CE_CONT, "RAID_SCSI_IO_REQUEST\n"));
2819 con_log(CL_ANN1, (CE_CONT, "DevHandle=0x%X Function=0x%X "
2820 "IoFlags=0x%X SGLFlags=0x%X DataLength=0x%X\n",
2821 ddi_get16(acc_handle, &scsi_io->DevHandle),
2822 ddi_get8(acc_handle, &scsi_io->Function),
2823 ddi_get16(acc_handle, &scsi_io->IoFlags),
2824 ddi_get16(acc_handle, &scsi_io->SGLFlags),
2825 ddi_get32(acc_handle, &scsi_io->DataLength)));
2826
2827 for (i = 0; i < 32; i++) {
2828 con_log(CL_ANN1, (CE_CONT, "CDB[%d]=0x%x ", i,
2829 ddi_get8(acc_handle, &scsi_io->CDB.CDB32[i])));
2830 }
2831
2832 con_log(CL_ANN1, (CE_CONT, "RAID-CONTEXT\n"));
2833 con_log(CL_ANN1, (CE_CONT, "status=0x%X extStatus=0x%X "
2834 "ldTargetId=0x%X timeoutValue=0x%X regLockFlags=0x%X "
2835 "RAIDFlags=0x%X regLockRowLBA=0x%" PRIu64
2836 " regLockLength=0x%X spanArm=0x%X\n",
2837 ddi_get8(acc_handle, &scsi_io->RaidContext.status),
2838 ddi_get8(acc_handle, &scsi_io->RaidContext.extStatus),
2839 ddi_get16(acc_handle, &scsi_io->RaidContext.ldTargetId),
2840 ddi_get16(acc_handle, &scsi_io->RaidContext.timeoutValue),
2841 ddi_get8(acc_handle, &scsi_io->RaidContext.regLockFlags),
2842 ddi_get8(acc_handle, &scsi_io->RaidContext.RAIDFlags),
2843 ddi_get64(acc_handle, &scsi_io->RaidContext.regLockRowLBA),
2844 ddi_get32(acc_handle, &scsi_io->RaidContext.regLockLength),
2845 ddi_get8(acc_handle, &scsi_io->RaidContext.spanArm)));
2846 }
2847
2848 if (detail == 0xDD) {
2849 debug_level_g = saved_level;
2850 }
2851 }
2852
2853
2854 int
2855 mrsas_issue_pending_cmds(struct mrsas_instance *instance)
2856 {
2857 mlist_t *head = &instance->cmd_pend_list;
2858 mlist_t *tmp = head->next;
2859 struct mrsas_cmd *cmd = NULL;
2860 struct scsi_pkt *pkt;
2861
2862 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_pending_cmds(): Called"));
2863 while (tmp != head) {
2864 mutex_enter(&instance->cmd_pend_mtx);
2865 cmd = mlist_entry(tmp, struct mrsas_cmd, list);
2866 tmp = tmp->next;
2867 mutex_exit(&instance->cmd_pend_mtx);
2868 if (cmd) {
2869 con_log(CL_ANN1, (CE_CONT,
2870 "mrsas_issue_pending_cmds(): "
2871 "Got a cmd: cmd %p index 0x%x drv_pkt_time 0x%x ",
2872 (void *)cmd, cmd->index, cmd->drv_pkt_time));
2873
2874 /* Reset command timeout value */
2875 if (cmd->drv_pkt_time < debug_timeout_g)
2876 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2877
2878 cmd->retry_count_for_ocr++;
2879
2880 cmn_err(CE_CONT, "cmd retry count = %d\n",
2881 cmd->retry_count_for_ocr);
2882
2883 if (cmd->retry_count_for_ocr > IO_RETRY_COUNT) {
2884 cmn_err(CE_WARN, "mrsas_issue_pending_cmds(): "
2885 "cmd->retry_count exceeded limit >%d\n",
2886 IO_RETRY_COUNT);
2887 mrsas_print_cmd_details(instance, cmd, 0xDD);
2888
2889 cmn_err(CE_WARN,
2890 "mrsas_issue_pending_cmds():"
2891 "Calling KILL Adapter\n");
2892 if (instance->tbolt)
2893 mrsas_tbolt_kill_adapter(instance);
2894 else
2895 (void) mrsas_kill_adapter(instance);
2896 return (DDI_FAILURE);
2897 }
2898
2899 pkt = cmd->pkt;
2900 if (pkt) {
2901 con_log(CL_ANN1, (CE_CONT,
2902 "PENDING PKT-CMD ISSUE: cmd %p index %x "
2903 "pkt %p time %llx",
2904 (void *)cmd, cmd->index,
2905 (void *)pkt,
2906 gethrtime()));
2907
2908 } else {
2909 cmn_err(CE_CONT,
2910 "mrsas_issue_pending_cmds(): NO-PKT, "
2911 "cmd %p index 0x%x drv_pkt_time 0x%x ",
2912 (void *)cmd, cmd->index, cmd->drv_pkt_time);
2913 }
2914
2915
2916 if (cmd->sync_cmd == MRSAS_TRUE) {
2917 cmn_err(CE_CONT, "mrsas_issue_pending_cmds(): "
2918 "SYNC_CMD == TRUE \n");
2919 instance->func_ptr->issue_cmd_in_sync_mode(
2920 instance, cmd);
2921 } else {
2922 instance->func_ptr->issue_cmd(cmd, instance);
2923 }
2924 } else {
2925 con_log(CL_ANN1, (CE_CONT,
2926 "mrsas_issue_pending_cmds: NULL command\n"));
2927 }
2928 con_log(CL_ANN1, (CE_CONT,
2929 "mrsas_issue_pending_cmds:"
2930 "looping for more commands"));
2931 }
2932 con_log(CL_ANN1, (CE_CONT, "mrsas_issue_pending_cmds(): DONE\n"));
2933 return (DDI_SUCCESS);
2934 }
2935
2936
2937
2938 /*
2939 * destroy_mfi_frame_pool
2940 */
2941 void
2942 destroy_mfi_frame_pool(struct mrsas_instance *instance)
2943 {
2944 int i;
2945 uint32_t max_cmd = instance->max_fw_cmds;
2946
2947 struct mrsas_cmd *cmd;
2948
2949 /* return all frames to pool */
2950
2951 for (i = 0; i < max_cmd; i++) {
2952
2953 cmd = instance->cmd_list[i];
2954
2955 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED)
2956 (void) mrsas_free_dma_obj(instance, cmd->frame_dma_obj);
2957
2958 cmd->frame_dma_obj_status = DMA_OBJ_FREED;
2959 }
2960
2961 }
2962
2963 /*
2964 * create_mfi_frame_pool
2965 */
2966 int
2967 create_mfi_frame_pool(struct mrsas_instance *instance)
2968 {
2969 int i = 0;
2970 int cookie_cnt;
2971 uint16_t max_cmd;
2972 uint16_t sge_sz;
2973 uint32_t sgl_sz;
2974 uint32_t tot_frame_size;
2975 struct mrsas_cmd *cmd;
2976 int retval = DDI_SUCCESS;
2977
2978 max_cmd = instance->max_fw_cmds;
2979 sge_sz = sizeof (struct mrsas_sge_ieee);
2980 /* calculated the number of 64byte frames required for SGL */
2981 sgl_sz = sge_sz * instance->max_num_sge;
2982 tot_frame_size = sgl_sz + MRMFI_FRAME_SIZE + SENSE_LENGTH;
2983
2984 con_log(CL_DLEVEL3, (CE_NOTE, "create_mfi_frame_pool: "
2985 "sgl_sz %x tot_frame_size %x", sgl_sz, tot_frame_size));
2986
2987 while (i < max_cmd) {
2988 cmd = instance->cmd_list[i];
2989
2990 cmd->frame_dma_obj.size = tot_frame_size;
2991 cmd->frame_dma_obj.dma_attr = mrsas_generic_dma_attr;
2992 cmd->frame_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
2993 cmd->frame_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
2994 cmd->frame_dma_obj.dma_attr.dma_attr_sgllen = 1;
2995 cmd->frame_dma_obj.dma_attr.dma_attr_align = 64;
2996
2997 cookie_cnt = mrsas_alloc_dma_obj(instance, &cmd->frame_dma_obj,
2998 (uchar_t)DDI_STRUCTURE_LE_ACC);
2999
3000 if (cookie_cnt == -1 || cookie_cnt > 1) {
3001 cmn_err(CE_WARN,
3002 "create_mfi_frame_pool: could not alloc.");
3003 retval = DDI_FAILURE;
3004 goto mrsas_undo_frame_pool;
3005 }
3006
3007 bzero(cmd->frame_dma_obj.buffer, tot_frame_size);
3008
3009 cmd->frame_dma_obj_status = DMA_OBJ_ALLOCATED;
3010 cmd->frame = (union mrsas_frame *)cmd->frame_dma_obj.buffer;
3011 cmd->frame_phys_addr =
3012 cmd->frame_dma_obj.dma_cookie[0].dmac_address;
3013
3014 cmd->sense = (uint8_t *)(((unsigned long)
3015 cmd->frame_dma_obj.buffer) +
3016 tot_frame_size - SENSE_LENGTH);
3017 cmd->sense_phys_addr =
3018 cmd->frame_dma_obj.dma_cookie[0].dmac_address +
3019 tot_frame_size - SENSE_LENGTH;
3020
3021 if (!cmd->frame || !cmd->sense) {
3022 cmn_err(CE_WARN,
3023 "mr_sas: pci_pool_alloc failed");
3024 retval = ENOMEM;
3025 goto mrsas_undo_frame_pool;
3026 }
3027
3028 ddi_put32(cmd->frame_dma_obj.acc_handle,
3029 &cmd->frame->io.context, cmd->index);
3030 i++;
3031
3032 con_log(CL_DLEVEL3, (CE_NOTE, "[%x]-%x",
3033 cmd->index, cmd->frame_phys_addr));
3034 }
3035
3036 return (DDI_SUCCESS);
3037
3038 mrsas_undo_frame_pool:
3039 if (i > 0)
3040 destroy_mfi_frame_pool(instance);
3041
3042 return (retval);
3043 }
3044
3045 /*
3046 * free_additional_dma_buffer
3047 */
3048 static void
3049 free_additional_dma_buffer(struct mrsas_instance *instance)
3050 {
3051 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
3052 (void) mrsas_free_dma_obj(instance,
3053 instance->mfi_internal_dma_obj);
3054 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
3055 }
3056
3057 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
3058 (void) mrsas_free_dma_obj(instance,
3059 instance->mfi_evt_detail_obj);
3060 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
3061 }
3062 }
3063
3064 /*
3065 * alloc_additional_dma_buffer
3066 */
3067 static int
3068 alloc_additional_dma_buffer(struct mrsas_instance *instance)
3069 {
3070 uint32_t reply_q_sz;
3071 uint32_t internal_buf_size = PAGESIZE*2;
3072
3073 /* max cmds plus 1 + producer & consumer */
3074 reply_q_sz = sizeof (uint32_t) * (instance->max_fw_cmds + 1 + 2);
3075
3076 instance->mfi_internal_dma_obj.size = internal_buf_size;
3077 instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
3078 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3079 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
3080 0xFFFFFFFFU;
3081 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
3082
3083 if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
3084 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3085 cmn_err(CE_WARN,
3086 "mr_sas: could not alloc reply queue");
3087 return (DDI_FAILURE);
3088 }
3089
3090 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
3091
3092 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
3093
3094 instance->producer = (uint32_t *)((unsigned long)
3095 instance->mfi_internal_dma_obj.buffer);
3096 instance->consumer = (uint32_t *)((unsigned long)
3097 instance->mfi_internal_dma_obj.buffer + 4);
3098 instance->reply_queue = (uint32_t *)((unsigned long)
3099 instance->mfi_internal_dma_obj.buffer + 8);
3100 instance->internal_buf = (caddr_t)(((unsigned long)
3101 instance->mfi_internal_dma_obj.buffer) + reply_q_sz + 8);
3102 instance->internal_buf_dmac_add =
3103 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address +
3104 (reply_q_sz + 8);
3105 instance->internal_buf_size = internal_buf_size -
3106 (reply_q_sz + 8);
3107
3108 /* allocate evt_detail */
3109 instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
3110 instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
3111 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3112 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3113 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
3114 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 1;
3115
3116 if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
3117 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3118 cmn_err(CE_WARN, "alloc_additional_dma_buffer: "
3119 "could not allocate data transfer buffer.");
3120 goto mrsas_undo_internal_buff;
3121 }
3122
3123 bzero(instance->mfi_evt_detail_obj.buffer,
3124 sizeof (struct mrsas_evt_detail));
3125
3126 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
3127
3128 return (DDI_SUCCESS);
3129
3130 mrsas_undo_internal_buff:
3131 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
3132 (void) mrsas_free_dma_obj(instance,
3133 instance->mfi_internal_dma_obj);
3134 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
3135 }
3136
3137 return (DDI_FAILURE);
3138 }
3139
3140
3141 void
3142 mrsas_free_cmd_pool(struct mrsas_instance *instance)
3143 {
3144 int i;
3145 uint32_t max_cmd;
3146 size_t sz;
3147
3148 /* already freed */
3149 if (instance->cmd_list == NULL) {
3150 return;
3151 }
3152
3153 max_cmd = instance->max_fw_cmds;
3154
3155 /* size of cmd_list array */
3156 sz = sizeof (struct mrsas_cmd *) * max_cmd;
3157
3158 /* First free each cmd */
3159 for (i = 0; i < max_cmd; i++) {
3160 if (instance->cmd_list[i] != NULL) {
3161 kmem_free(instance->cmd_list[i],
3162 sizeof (struct mrsas_cmd));
3163 }
3164
3165 instance->cmd_list[i] = NULL;
3166 }
3167
3168 /* Now, free cmd_list array */
3169 if (instance->cmd_list != NULL)
3170 kmem_free(instance->cmd_list, sz);
3171
3172 instance->cmd_list = NULL;
3173
3174 INIT_LIST_HEAD(&instance->cmd_pool_list);
3175 INIT_LIST_HEAD(&instance->cmd_pend_list);
3176 if (instance->tbolt) {
3177 INIT_LIST_HEAD(&instance->cmd_app_pool_list);
3178 } else {
3179 INIT_LIST_HEAD(&instance->app_cmd_pool_list);
3180 }
3181
3182 }
3183
3184
3185 /*
3186 * mrsas_alloc_cmd_pool
3187 */
3188 int
3189 mrsas_alloc_cmd_pool(struct mrsas_instance *instance)
3190 {
3191 int i;
3192 int count;
3193 uint32_t max_cmd;
3194 uint32_t reserve_cmd;
3195 size_t sz;
3196
3197 struct mrsas_cmd *cmd;
3198
3199 max_cmd = instance->max_fw_cmds;
3200 con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
3201 "max_cmd %x", max_cmd));
3202
3203
3204 sz = sizeof (struct mrsas_cmd *) * max_cmd;
3205
3206 /*
3207 * instance->cmd_list is an array of struct mrsas_cmd pointers.
3208 * Allocate the dynamic array first and then allocate individual
3209 * commands.
3210 */
3211 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
3212 if (instance->cmd_list == NULL) {
3213 con_log(CL_NONE, (CE_WARN,
3214 "Failed to allocate memory for cmd_list"));
3215 return (DDI_FAILURE);
3216 }
3217
3218 /* create a frame pool and assign one frame to each cmd */
3219 for (count = 0; count < max_cmd; count++) {
3220 instance->cmd_list[count] =
3221 kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP);
3222 if (instance->cmd_list[count] == NULL) {
3223 con_log(CL_NONE, (CE_WARN,
3224 "Failed to allocate memory for mrsas_cmd"));
3225 goto mrsas_undo_cmds;
3226 }
3227 }
3228
3229 /* add all the commands to command pool */
3230
3231 INIT_LIST_HEAD(&instance->cmd_pool_list);
3232 INIT_LIST_HEAD(&instance->cmd_pend_list);
3233 INIT_LIST_HEAD(&instance->app_cmd_pool_list);
3234
3235 reserve_cmd = MRSAS_APP_RESERVED_CMDS;
3236
3237 for (i = 0; i < reserve_cmd; i++) {
3238 cmd = instance->cmd_list[i];
3239 cmd->index = i;
3240 mlist_add_tail(&cmd->list, &instance->app_cmd_pool_list);
3241 }
3242
3243
3244 for (i = reserve_cmd; i < max_cmd; i++) {
3245 cmd = instance->cmd_list[i];
3246 cmd->index = i;
3247 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
3248 }
3249
3250 return (DDI_SUCCESS);
3251
3252 mrsas_undo_cmds:
3253 if (count > 0) {
3254 /* free each cmd */
3255 for (i = 0; i < count; i++) {
3256 if (instance->cmd_list[i] != NULL) {
3257 kmem_free(instance->cmd_list[i],
3258 sizeof (struct mrsas_cmd));
3259 }
3260 instance->cmd_list[i] = NULL;
3261 }
3262 }
3263
3264 mrsas_undo_cmd_list:
3265 if (instance->cmd_list != NULL)
3266 kmem_free(instance->cmd_list, sz);
3267 instance->cmd_list = NULL;
3268
3269 return (DDI_FAILURE);
3270 }
3271
3272
3273 /*
3274 * free_space_for_mfi
3275 */
3276 static void
3277 free_space_for_mfi(struct mrsas_instance *instance)
3278 {
3279
3280 /* already freed */
3281 if (instance->cmd_list == NULL) {
3282 return;
3283 }
3284
3285 /* Free additional dma buffer */
3286 free_additional_dma_buffer(instance);
3287
3288 /* Free the MFI frame pool */
3289 destroy_mfi_frame_pool(instance);
3290
3291 /* Free all the commands in the cmd_list */
3292 /* Free the cmd_list buffer itself */
3293 mrsas_free_cmd_pool(instance);
3294 }
3295
3296 /*
3297 * alloc_space_for_mfi
3298 */
3299 static int
3300 alloc_space_for_mfi(struct mrsas_instance *instance)
3301 {
3302 /* Allocate command pool (memory for cmd_list & individual commands) */
3303 if (mrsas_alloc_cmd_pool(instance)) {
3304 cmn_err(CE_WARN, "error creating cmd pool");
3305 return (DDI_FAILURE);
3306 }
3307
3308 /* Allocate MFI Frame pool */
3309 if (create_mfi_frame_pool(instance)) {
3310 cmn_err(CE_WARN, "error creating frame DMA pool");
3311 goto mfi_undo_cmd_pool;
3312 }
3313
3314 /* Allocate additional DMA buffer */
3315 if (alloc_additional_dma_buffer(instance)) {
3316 cmn_err(CE_WARN, "error creating frame DMA pool");
3317 goto mfi_undo_frame_pool;
3318 }
3319
3320 return (DDI_SUCCESS);
3321
3322 mfi_undo_frame_pool:
3323 destroy_mfi_frame_pool(instance);
3324
3325 mfi_undo_cmd_pool:
3326 mrsas_free_cmd_pool(instance);
3327
3328 return (DDI_FAILURE);
3329 }
3330
3331
3332
3333 /*
3334 * get_ctrl_info
3335 */
3336 static int
3337 get_ctrl_info(struct mrsas_instance *instance,
3338 struct mrsas_ctrl_info *ctrl_info)
3339 {
3340 int ret = 0;
3341
3342 struct mrsas_cmd *cmd;
3343 struct mrsas_dcmd_frame *dcmd;
3344 struct mrsas_ctrl_info *ci;
3345
3346 if (instance->tbolt) {
3347 cmd = get_raid_msg_mfi_pkt(instance);
3348 } else {
3349 cmd = get_mfi_pkt(instance);
3350 }
3351
3352 if (!cmd) {
3353 con_log(CL_ANN, (CE_WARN,
3354 "Failed to get a cmd for ctrl info"));
3355 DTRACE_PROBE2(info_mfi_err, uint16_t, instance->fw_outstanding,
3356 uint16_t, instance->max_fw_cmds);
3357 return (DDI_FAILURE);
3358 }
3359
3360 /* Clear the frame buffer and assign back the context id */
3361 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3362 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3363 cmd->index);
3364
3365 dcmd = &cmd->frame->dcmd;
3366
3367 ci = (struct mrsas_ctrl_info *)instance->internal_buf;
3368
3369 if (!ci) {
3370 cmn_err(CE_WARN,
3371 "Failed to alloc mem for ctrl info");
3372 return_mfi_pkt(instance, cmd);
3373 return (DDI_FAILURE);
3374 }
3375
3376 (void) memset(ci, 0, sizeof (struct mrsas_ctrl_info));
3377
3378 /* for( i = 0; i < DCMD_MBOX_SZ; i++ ) dcmd->mbox.b[i] = 0; */
3379 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
3380
3381 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3382 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status,
3383 MFI_CMD_STATUS_POLL_MODE);
3384 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3385 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3386 MFI_FRAME_DIR_READ);
3387 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3388 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3389 sizeof (struct mrsas_ctrl_info));
3390 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3391 MR_DCMD_CTRL_GET_INFO);
3392 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3393 instance->internal_buf_dmac_add);
3394 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3395 sizeof (struct mrsas_ctrl_info));
3396
3397 cmd->frame_count = 1;
3398
3399 if (instance->tbolt) {
3400 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3401 }
3402
3403 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3404 ret = 0;
3405
3406 ctrl_info->max_request_size = ddi_get32(
3407 cmd->frame_dma_obj.acc_handle, &ci->max_request_size);
3408
3409 ctrl_info->ld_present_count = ddi_get16(
3410 cmd->frame_dma_obj.acc_handle, &ci->ld_present_count);
3411
3412 ctrl_info->properties.on_off_properties = ddi_get32(
3413 cmd->frame_dma_obj.acc_handle,
3414 &ci->properties.on_off_properties);
3415 ddi_rep_get8(cmd->frame_dma_obj.acc_handle,
3416 (uint8_t *)(ctrl_info->product_name),
3417 (uint8_t *)(ci->product_name), 80 * sizeof (char),
3418 DDI_DEV_AUTOINCR);
3419 /* should get more members of ci with ddi_get when needed */
3420 } else {
3421 cmn_err(CE_WARN, "get_ctrl_info: Ctrl info failed");
3422 ret = -1;
3423 }
3424
3425 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) {
3426 ret = -1;
3427 }
3428 if (instance->tbolt) {
3429 return_raid_msg_mfi_pkt(instance, cmd);
3430 } else {
3431 return_mfi_pkt(instance, cmd);
3432 }
3433
3434 return (ret);
3435 }
3436
3437 /*
3438 * abort_aen_cmd
3439 */
3440 static int
3441 abort_aen_cmd(struct mrsas_instance *instance,
3442 struct mrsas_cmd *cmd_to_abort)
3443 {
3444 int ret = 0;
3445
3446 struct mrsas_cmd *cmd;
3447 struct mrsas_abort_frame *abort_fr;
3448
3449 con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_aen:%d", __LINE__));
3450
3451 if (instance->tbolt) {
3452 cmd = get_raid_msg_mfi_pkt(instance);
3453 } else {
3454 cmd = get_mfi_pkt(instance);
3455 }
3456
3457 if (!cmd) {
3458 con_log(CL_ANN1, (CE_WARN,
3459 "abort_aen_cmd():Failed to get a cmd for abort_aen_cmd"));
3460 DTRACE_PROBE2(abort_mfi_err, uint16_t, instance->fw_outstanding,
3461 uint16_t, instance->max_fw_cmds);
3462 return (DDI_FAILURE);
3463 }
3464
3465 /* Clear the frame buffer and assign back the context id */
3466 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3467 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3468 cmd->index);
3469
3470 abort_fr = &cmd->frame->abort;
3471
3472 /* prepare and issue the abort frame */
3473 ddi_put8(cmd->frame_dma_obj.acc_handle,
3474 &abort_fr->cmd, MFI_CMD_OP_ABORT);
3475 ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3476 MFI_CMD_STATUS_SYNC_MODE);
3477 ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3478 ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3479 cmd_to_abort->index);
3480 ddi_put32(cmd->frame_dma_obj.acc_handle,
3481 &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3482 ddi_put32(cmd->frame_dma_obj.acc_handle,
3483 &abort_fr->abort_mfi_phys_addr_hi, 0);
3484
3485 instance->aen_cmd->abort_aen = 1;
3486
3487 /* cmd->sync_cmd = MRSAS_TRUE; */ /* KEBE ASKS, inherit? */
3488 cmd->frame_count = 1;
3489
3490 if (instance->tbolt) {
3491 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3492 }
3493
3494 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3495 con_log(CL_ANN1, (CE_WARN,
3496 "abort_aen_cmd: issue_cmd_in_poll_mode failed"));
3497 ret = -1;
3498 } else {
3499 ret = 0;
3500 }
3501
3502 instance->aen_cmd->abort_aen = 1;
3503 instance->aen_cmd = 0;
3504
3505 if (instance->tbolt) {
3506 return_raid_msg_mfi_pkt(instance, cmd);
3507 } else {
3508 return_mfi_pkt(instance, cmd);
3509 }
3510
3511 atomic_add_16(&instance->fw_outstanding, (-1));
3512
3513 return (ret);
3514 }
3515
3516
3517 static int
3518 mrsas_build_init_cmd(struct mrsas_instance *instance,
3519 struct mrsas_cmd **cmd_ptr)
3520 {
3521 struct mrsas_cmd *cmd;
3522 struct mrsas_init_frame *init_frame;
3523 struct mrsas_init_queue_info *initq_info;
3524 struct mrsas_drv_ver drv_ver_info;
3525
3526
3527 /*
3528 * Prepare a init frame. Note the init frame points to queue info
3529 * structure. Each frame has SGL allocated after first 64 bytes. For
3530 * this frame - since we don't need any SGL - we use SGL's space as
3531 * queue info structure
3532 */
3533 cmd = *cmd_ptr;
3534
3535
3536 /* Clear the frame buffer and assign back the context id */
3537 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3538 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3539 cmd->index);
3540
3541 init_frame = (struct mrsas_init_frame *)cmd->frame;
3542 initq_info = (struct mrsas_init_queue_info *)
3543 ((unsigned long)init_frame + 64);
3544
3545 (void) memset(init_frame, 0, MRMFI_FRAME_SIZE);
3546 (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info));
3547
3548 ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0);
3549
3550 ddi_put32(cmd->frame_dma_obj.acc_handle,
3551 &initq_info->reply_queue_entries, instance->max_fw_cmds + 1);
3552
3553 ddi_put32(cmd->frame_dma_obj.acc_handle,
3554 &initq_info->producer_index_phys_addr_hi, 0);
3555 ddi_put32(cmd->frame_dma_obj.acc_handle,
3556 &initq_info->producer_index_phys_addr_lo,
3557 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address);
3558
3559 ddi_put32(cmd->frame_dma_obj.acc_handle,
3560 &initq_info->consumer_index_phys_addr_hi, 0);
3561 ddi_put32(cmd->frame_dma_obj.acc_handle,
3562 &initq_info->consumer_index_phys_addr_lo,
3563 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4);
3564
3565 ddi_put32(cmd->frame_dma_obj.acc_handle,
3566 &initq_info->reply_queue_start_phys_addr_hi, 0);
3567 ddi_put32(cmd->frame_dma_obj.acc_handle,
3568 &initq_info->reply_queue_start_phys_addr_lo,
3569 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8);
3570
3571 ddi_put8(cmd->frame_dma_obj.acc_handle,
3572 &init_frame->cmd, MFI_CMD_OP_INIT);
3573 ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status,
3574 MFI_CMD_STATUS_POLL_MODE);
3575 ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0);
3576 ddi_put32(cmd->frame_dma_obj.acc_handle,
3577 &init_frame->queue_info_new_phys_addr_lo,
3578 cmd->frame_phys_addr + 64);
3579 ddi_put32(cmd->frame_dma_obj.acc_handle,
3580 &init_frame->queue_info_new_phys_addr_hi, 0);
3581
3582
3583 /* fill driver version information */
3584 fill_up_drv_ver(&drv_ver_info);
3585
3586 /* allocate the driver version data transfer buffer */
3587 instance->drv_ver_dma_obj.size = sizeof (drv_ver_info.drv_ver);
3588 instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
3589 instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3590 instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3591 instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
3592 instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
3593
3594 if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
3595 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3596 con_log(CL_ANN, (CE_WARN,
3597 "init_mfi : Could not allocate driver version buffer."));
3598 return (DDI_FAILURE);
3599 }
3600 /* copy driver version to dma buffer */
3601 (void) memset(instance->drv_ver_dma_obj.buffer, 0,
3602 sizeof (drv_ver_info.drv_ver));
3603 ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
3604 (uint8_t *)drv_ver_info.drv_ver,
3605 (uint8_t *)instance->drv_ver_dma_obj.buffer,
3606 sizeof (drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
3607
3608
3609 /* copy driver version physical address to init frame */
3610 ddi_put64(cmd->frame_dma_obj.acc_handle, &init_frame->driverversion,
3611 instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
3612
3613 ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len,
3614 sizeof (struct mrsas_init_queue_info));
3615
3616 cmd->frame_count = 1;
3617
3618 *cmd_ptr = cmd;
3619
3620 return (DDI_SUCCESS);
3621 }
3622
3623
3624 /*
3625 * mrsas_init_adapter_ppc - Initialize MFI interface adapter.
3626 */
3627 int
3628 mrsas_init_adapter_ppc(struct mrsas_instance *instance)
3629 {
3630 struct mrsas_cmd *cmd;
3631
3632 /*
3633 * allocate memory for mfi adapter(cmd pool, individual commands, mfi
3634 * frames etc
3635 */
3636 if (alloc_space_for_mfi(instance) != DDI_SUCCESS) {
3637 con_log(CL_ANN, (CE_NOTE,
3638 "Error, failed to allocate memory for MFI adapter"));
3639 return (DDI_FAILURE);
3640 }
3641
3642 /* Build INIT command */
3643 cmd = get_mfi_pkt(instance);
3644
3645 if (mrsas_build_init_cmd(instance, &cmd) != DDI_SUCCESS) {
3646 con_log(CL_ANN,
3647 (CE_NOTE, "Error, failed to build INIT command"));
3648
3649 goto fail_undo_alloc_mfi_space;
3650 }
3651
3652 /*
3653 * Disable interrupt before sending init frame ( see linux driver code)
3654 * send INIT MFI frame in polled mode
3655 */
3656 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3657 con_log(CL_ANN, (CE_WARN, "failed to init firmware"));
3658 goto fail_fw_init;
3659 }
3660
3661 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS)
3662 goto fail_fw_init;
3663 return_mfi_pkt(instance, cmd);
3664
3665 if (ctio_enable &&
3666 (instance->func_ptr->read_fw_status_reg(instance) & 0x04000000)) {
3667 con_log(CL_ANN, (CE_NOTE, "mr_sas: IEEE SGL's supported"));
3668 instance->flag_ieee = 1;
3669 } else {
3670 instance->flag_ieee = 0;
3671 }
3672
3673 instance->unroll.alloc_space_mfi = 1;
3674 instance->unroll.verBuff = 1;
3675
3676 return (DDI_SUCCESS);
3677
3678
3679 fail_fw_init:
3680 (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
3681
3682 fail_undo_alloc_mfi_space:
3683 return_mfi_pkt(instance, cmd);
3684 free_space_for_mfi(instance);
3685
3686 return (DDI_FAILURE);
3687
3688 }
3689
3690 /*
3691 * mrsas_init_adapter - Initialize adapter.
3692 */
3693 int
3694 mrsas_init_adapter(struct mrsas_instance *instance)
3695 {
3696 struct mrsas_ctrl_info ctrl_info;
3697
3698
3699 /* we expect the FW state to be READY */
3700 if (mfi_state_transition_to_ready(instance)) {
3701 con_log(CL_ANN, (CE_WARN, "mr_sas: F/W is not ready"));
3702 return (DDI_FAILURE);
3703 }
3704
3705 /* get various operational parameters from status register */
3706 instance->max_num_sge =
3707 (instance->func_ptr->read_fw_status_reg(instance) &
3708 0xFF0000) >> 0x10;
3709 instance->max_num_sge =
3710 (instance->max_num_sge > MRSAS_MAX_SGE_CNT) ?
3711 MRSAS_MAX_SGE_CNT : instance->max_num_sge;
3712
3713 /*
3714 * Reduce the max supported cmds by 1. This is to ensure that the
3715 * reply_q_sz (1 more than the max cmd that driver may send)
3716 * does not exceed max cmds that the FW can support
3717 */
3718 instance->max_fw_cmds =
3719 instance->func_ptr->read_fw_status_reg(instance) & 0xFFFF;
3720 instance->max_fw_cmds = instance->max_fw_cmds - 1;
3721
3722
3723
3724 /* Initialize adapter */
3725 if (instance->func_ptr->init_adapter(instance) != DDI_SUCCESS) {
3726 con_log(CL_ANN,
3727 (CE_WARN, "mr_sas: could not initialize adapter"));
3728 return (DDI_FAILURE);
3729 }
3730
3731 /* gather misc FW related information */
3732 instance->disable_online_ctrl_reset = 0;
3733
3734 if (!get_ctrl_info(instance, &ctrl_info)) {
3735 instance->max_sectors_per_req = ctrl_info.max_request_size;
3736 con_log(CL_ANN1, (CE_NOTE,
3737 "product name %s ld present %d",
3738 ctrl_info.product_name, ctrl_info.ld_present_count));
3739 } else {
3740 instance->max_sectors_per_req = instance->max_num_sge *
3741 PAGESIZE / 512;
3742 }
3743
3744 if (ctrl_info.properties.on_off_properties & DISABLE_OCR_PROP_FLAG) {
3745 instance->disable_online_ctrl_reset = 1;
3746 con_log(CL_ANN1,
3747 (CE_NOTE, "Disable online control Flag is set\n"));
3748 } else {
3749 con_log(CL_ANN1,
3750 (CE_NOTE, "Disable online control Flag is not set\n"));
3751 }
3752
3753 return (DDI_SUCCESS);
3754
3755 }
3756
3757
3758
3759 static int
3760 mrsas_issue_init_mfi(struct mrsas_instance *instance)
3761 {
3762 struct mrsas_cmd *cmd;
3763 struct mrsas_init_frame *init_frame;
3764 struct mrsas_init_queue_info *initq_info;
3765
3766 /*
3767 * Prepare a init frame. Note the init frame points to queue info
3768 * structure. Each frame has SGL allocated after first 64 bytes. For
3769 * this frame - since we don't need any SGL - we use SGL's space as
3770 * queue info structure
3771 */
3772 con_log(CL_ANN1, (CE_NOTE,
3773 "mrsas_issue_init_mfi: entry\n"));
3774 cmd = get_mfi_app_pkt(instance);
3775
3776 if (!cmd) {
3777 con_log(CL_ANN1, (CE_WARN,
3778 "mrsas_issue_init_mfi: get_pkt failed\n"));
3779 return (DDI_FAILURE);
3780 }
3781
3782 /* Clear the frame buffer and assign back the context id */
3783 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3784 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3785 cmd->index);
3786
3787 init_frame = (struct mrsas_init_frame *)cmd->frame;
3788 initq_info = (struct mrsas_init_queue_info *)
3789 ((unsigned long)init_frame + 64);
3790
3791 (void) memset(init_frame, 0, MRMFI_FRAME_SIZE);
3792 (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info));
3793
3794 ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0);
3795
3796 ddi_put32(cmd->frame_dma_obj.acc_handle,
3797 &initq_info->reply_queue_entries, instance->max_fw_cmds + 1);
3798 ddi_put32(cmd->frame_dma_obj.acc_handle,
3799 &initq_info->producer_index_phys_addr_hi, 0);
3800 ddi_put32(cmd->frame_dma_obj.acc_handle,
3801 &initq_info->producer_index_phys_addr_lo,
3802 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address);
3803 ddi_put32(cmd->frame_dma_obj.acc_handle,
3804 &initq_info->consumer_index_phys_addr_hi, 0);
3805 ddi_put32(cmd->frame_dma_obj.acc_handle,
3806 &initq_info->consumer_index_phys_addr_lo,
3807 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4);
3808
3809 ddi_put32(cmd->frame_dma_obj.acc_handle,
3810 &initq_info->reply_queue_start_phys_addr_hi, 0);
3811 ddi_put32(cmd->frame_dma_obj.acc_handle,
3812 &initq_info->reply_queue_start_phys_addr_lo,
3813 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8);
3814
3815 ddi_put8(cmd->frame_dma_obj.acc_handle,
3816 &init_frame->cmd, MFI_CMD_OP_INIT);
3817 ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status,
3818 MFI_CMD_STATUS_POLL_MODE);
3819 ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0);
3820 ddi_put32(cmd->frame_dma_obj.acc_handle,
3821 &init_frame->queue_info_new_phys_addr_lo,
3822 cmd->frame_phys_addr + 64);
3823 ddi_put32(cmd->frame_dma_obj.acc_handle,
3824 &init_frame->queue_info_new_phys_addr_hi, 0);
3825
3826 ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len,
3827 sizeof (struct mrsas_init_queue_info));
3828
3829 cmd->frame_count = 1;
3830
3831 /* issue the init frame in polled mode */
3832 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3833 con_log(CL_ANN1, (CE_WARN,
3834 "mrsas_issue_init_mfi():failed to "
3835 "init firmware"));
3836 return_mfi_app_pkt(instance, cmd);
3837 return (DDI_FAILURE);
3838 }
3839
3840 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) {
3841 return_mfi_pkt(instance, cmd);
3842 return (DDI_FAILURE);
3843 }
3844
3845 return_mfi_app_pkt(instance, cmd);
3846 con_log(CL_ANN1, (CE_CONT, "mrsas_issue_init_mfi: Done"));
3847
3848 return (DDI_SUCCESS);
3849 }
3850 /*
3851 * mfi_state_transition_to_ready : Move the FW to READY state
3852 *
3853 * @reg_set : MFI register set
3854 */
3855 int
3856 mfi_state_transition_to_ready(struct mrsas_instance *instance)
3857 {
3858 int i;
3859 uint8_t max_wait;
3860 uint32_t fw_ctrl = 0;
3861 uint32_t fw_state;
3862 uint32_t cur_state;
3863 uint32_t cur_abs_reg_val;
3864 uint32_t prev_abs_reg_val;
3865 uint32_t status;
3866
3867 cur_abs_reg_val =
3868 instance->func_ptr->read_fw_status_reg(instance);
3869 fw_state =
3870 cur_abs_reg_val & MFI_STATE_MASK;
3871 con_log(CL_ANN1, (CE_CONT,
3872 "mfi_state_transition_to_ready:FW state = 0x%x", fw_state));
3873
3874 while (fw_state != MFI_STATE_READY) {
3875 con_log(CL_ANN, (CE_CONT,
3876 "mfi_state_transition_to_ready:FW state%x", fw_state));
3877
3878 switch (fw_state) {
3879 case MFI_STATE_FAULT:
3880 con_log(CL_ANN, (CE_NOTE,
3881 "mr_sas: FW in FAULT state!!"));
3882
3883 return (ENODEV);
3884 case MFI_STATE_WAIT_HANDSHAKE:
3885 /* set the CLR bit in IMR0 */
3886 con_log(CL_ANN1, (CE_NOTE,
3887 "mr_sas: FW waiting for HANDSHAKE"));
3888 /*
3889 * PCI_Hot Plug: MFI F/W requires
3890 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
3891 * to be set
3892 */
3893 /* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */
3894 if (!instance->tbolt) {
3895 WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE |
3896 MFI_INIT_HOTPLUG, instance);
3897 } else {
3898 WR_RESERVED0_REGISTER(MFI_INIT_CLEAR_HANDSHAKE |
3899 MFI_INIT_HOTPLUG, instance);
3900 }
3901 max_wait = (instance->tbolt == 1) ? 180 : 2;
3902 cur_state = MFI_STATE_WAIT_HANDSHAKE;
3903 break;
3904 case MFI_STATE_BOOT_MESSAGE_PENDING:
3905 /* set the CLR bit in IMR0 */
3906 con_log(CL_ANN1, (CE_NOTE,
3907 "mr_sas: FW state boot message pending"));
3908 /*
3909 * PCI_Hot Plug: MFI F/W requires
3910 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
3911 * to be set
3912 */
3913 if (!instance->tbolt) {
3914 WR_IB_DOORBELL(MFI_INIT_HOTPLUG, instance);
3915 } else {
3916 WR_RESERVED0_REGISTER(MFI_INIT_HOTPLUG,
3917 instance);
3918 }
3919 max_wait = (instance->tbolt == 1) ? 180 : 10;
3920 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
3921 break;
3922 case MFI_STATE_OPERATIONAL:
3923 /* bring it to READY state; assuming max wait 2 secs */
3924 instance->func_ptr->disable_intr(instance);
3925 con_log(CL_ANN1, (CE_NOTE,
3926 "mr_sas: FW in OPERATIONAL state"));
3927 /*
3928 * PCI_Hot Plug: MFI F/W requires
3929 * (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT)
3930 * to be set
3931 */
3932 /* WR_IB_DOORBELL(MFI_INIT_READY, instance); */
3933 if (!instance->tbolt) {
3934 WR_IB_DOORBELL(MFI_RESET_FLAGS, instance);
3935 } else {
3936 WR_RESERVED0_REGISTER(MFI_RESET_FLAGS,
3937 instance);
3938
3939 for (i = 0; i < (10 * 1000); i++) {
3940 status =
3941 RD_RESERVED0_REGISTER(instance);
3942 if (status & 1) {
3943 delay(1 *
3944 drv_usectohz(MILLISEC));
3945 } else {
3946 break;
3947 }
3948 }
3949
3950 }
3951 max_wait = (instance->tbolt == 1) ? 180 : 10;
3952 cur_state = MFI_STATE_OPERATIONAL;
3953 break;
3954 case MFI_STATE_UNDEFINED:
3955 /* this state should not last for more than 2 seconds */
3956 con_log(CL_ANN1, (CE_NOTE, "FW state undefined"));
3957
3958 max_wait = (instance->tbolt == 1) ? 180 : 2;
3959 cur_state = MFI_STATE_UNDEFINED;
3960 break;
3961 case MFI_STATE_BB_INIT:
3962 max_wait = (instance->tbolt == 1) ? 180 : 2;
3963 cur_state = MFI_STATE_BB_INIT;
3964 break;
3965 case MFI_STATE_FW_INIT:
3966 max_wait = (instance->tbolt == 1) ? 180 : 2;
3967 cur_state = MFI_STATE_FW_INIT;
3968 break;
3969 case MFI_STATE_FW_INIT_2:
3970 max_wait = 180;
3971 cur_state = MFI_STATE_FW_INIT_2;
3972 break;
3973 case MFI_STATE_DEVICE_SCAN:
3974 max_wait = 180;
3975 cur_state = MFI_STATE_DEVICE_SCAN;
3976 prev_abs_reg_val = cur_abs_reg_val;
3977 con_log(CL_NONE, (CE_NOTE,
3978 "Device scan in progress ...\n"));
3979 break;
3980 case MFI_STATE_FLUSH_CACHE:
3981 max_wait = 180;
3982 cur_state = MFI_STATE_FLUSH_CACHE;
3983 break;
3984 default:
3985 con_log(CL_ANN1, (CE_NOTE,
3986 "mr_sas: Unknown state 0x%x", fw_state));
3987 return (ENODEV);
3988 }
3989
3990 /* the cur_state should not last for more than max_wait secs */
3991 for (i = 0; i < (max_wait * MILLISEC); i++) {
3992 /* fw_state = RD_OB_MSG_0(instance) & MFI_STATE_MASK; */
3993 cur_abs_reg_val =
3994 instance->func_ptr->read_fw_status_reg(instance);
3995 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
3996
3997 if (fw_state == cur_state) {
3998 delay(1 * drv_usectohz(MILLISEC));
3999 } else {
4000 break;
4001 }
4002 }
4003 if (fw_state == MFI_STATE_DEVICE_SCAN) {
4004 if (prev_abs_reg_val != cur_abs_reg_val) {
4005 continue;
4006 }
4007 }
4008
4009 /* return error if fw_state hasn't changed after max_wait */
4010 if (fw_state == cur_state) {
4011 con_log(CL_ANN1, (CE_WARN,
4012 "FW state hasn't changed in %d secs", max_wait));
4013 return (ENODEV);
4014 }
4015 };
4016
4017 if (!instance->tbolt) {
4018 fw_ctrl = RD_IB_DOORBELL(instance);
4019 con_log(CL_ANN1, (CE_CONT,
4020 "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl));
4021 }
4022
4023 #if 0 /* XXX KEBE ASKS, remove and use like pre-2208? */
4024 /*
4025 * Write 0xF to the doorbell register to do the following.
4026 * - Abort all outstanding commands (bit 0).
4027 * - Transition from OPERATIONAL to READY state (bit 1).
4028 * - Discard (possible) low MFA posted in 64-bit mode (bit-2).
4029 * - Set to release FW to continue running (i.e. BIOS handshake
4030 * (bit 3).
4031 */
4032 if (!instance->tbolt) {
4033 WR_IB_DOORBELL(0xF, instance);
4034 }
4035 #endif
4036 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
4037 return (ENODEV);
4038 }
4039
4040 return (DDI_SUCCESS);
4041 }
4042
4043 /*
4044 * get_seq_num
4045 */
4046 static int
4047 get_seq_num(struct mrsas_instance *instance,
4048 struct mrsas_evt_log_info *eli)
4049 {
4050 int ret = DDI_SUCCESS;
4051
4052 dma_obj_t dcmd_dma_obj;
4053 struct mrsas_cmd *cmd;
4054 struct mrsas_dcmd_frame *dcmd;
4055 struct mrsas_evt_log_info *eli_tmp;
4056 if (instance->tbolt) {
4057 cmd = get_raid_msg_mfi_pkt(instance);
4058 } else {
4059 cmd = get_mfi_pkt(instance);
4060 }
4061
4062 if (!cmd) {
4063 cmn_err(CE_WARN, "mr_sas: failed to get a cmd");
4064 DTRACE_PROBE2(seq_num_mfi_err, uint16_t,
4065 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
4066 return (ENOMEM);
4067 }
4068
4069 /* Clear the frame buffer and assign back the context id */
4070 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
4071 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
4072 cmd->index);
4073
4074 dcmd = &cmd->frame->dcmd;
4075
4076 /* allocate the data transfer buffer */
4077 dcmd_dma_obj.size = sizeof (struct mrsas_evt_log_info);
4078 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
4079 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
4080 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
4081 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
4082 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
4083
4084 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
4085 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
4086 cmn_err(CE_WARN,
4087 "get_seq_num: could not allocate data transfer buffer.");
4088 return (DDI_FAILURE);
4089 }
4090
4091 (void) memset(dcmd_dma_obj.buffer, 0,
4092 sizeof (struct mrsas_evt_log_info));
4093
4094 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
4095
4096 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
4097 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
4098 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
4099 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
4100 MFI_FRAME_DIR_READ);
4101 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
4102 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
4103 sizeof (struct mrsas_evt_log_info));
4104 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
4105 MR_DCMD_CTRL_EVENT_GET_INFO);
4106 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
4107 sizeof (struct mrsas_evt_log_info));
4108 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
4109 dcmd_dma_obj.dma_cookie[0].dmac_address);
4110
4111 cmd->sync_cmd = MRSAS_TRUE;
4112 cmd->frame_count = 1;
4113
4114 if (instance->tbolt) {
4115 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
4116 }
4117
4118 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
4119 cmn_err(CE_WARN, "get_seq_num: "
4120 "failed to issue MRSAS_DCMD_CTRL_EVENT_GET_INFO");
4121 ret = DDI_FAILURE;
4122 } else {
4123 eli_tmp = (struct mrsas_evt_log_info *)dcmd_dma_obj.buffer;
4124 eli->newest_seq_num = ddi_get32(cmd->frame_dma_obj.acc_handle,
4125 &eli_tmp->newest_seq_num);
4126 ret = DDI_SUCCESS;
4127 }
4128
4129 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
4130 ret = DDI_FAILURE;
4131
4132 if (instance->tbolt) {
4133 return_raid_msg_mfi_pkt(instance, cmd);
4134 } else {
4135 return_mfi_pkt(instance, cmd);
4136 }
4137
4138 return (ret);
4139 }
4140
4141 /*
4142 * start_mfi_aen
4143 */
4144 static int
4145 start_mfi_aen(struct mrsas_instance *instance)
4146 {
4147 int ret = 0;
4148
4149 struct mrsas_evt_log_info eli;
4150 union mrsas_evt_class_locale class_locale;
4151
4152 /* get the latest sequence number from FW */
4153 (void) memset(&eli, 0, sizeof (struct mrsas_evt_log_info));
4154
4155 if (get_seq_num(instance, &eli)) {
4156 cmn_err(CE_WARN, "start_mfi_aen: failed to get seq num");
4157 return (-1);
4158 }
4159
4160 /* register AEN with FW for latest sequence number plus 1 */
4161 class_locale.members.reserved = 0;
4162 class_locale.members.locale = LE_16(MR_EVT_LOCALE_ALL);
4163 class_locale.members.class = MR_EVT_CLASS_INFO;
4164 class_locale.word = LE_32(class_locale.word);
4165 ret = register_mfi_aen(instance, eli.newest_seq_num + 1,
4166 class_locale.word);
4167
4168 if (ret) {
4169 cmn_err(CE_WARN, "start_mfi_aen: aen registration failed");
4170 return (-1);
4171 }
4172
4173
4174 return (ret);
4175 }
4176
4177 /*
4178 * flush_cache
4179 */
4180 static void
4181 flush_cache(struct mrsas_instance *instance)
4182 {
4183 struct mrsas_cmd *cmd = NULL;
4184 struct mrsas_dcmd_frame *dcmd;
4185 if (instance->tbolt) {
4186 cmd = get_raid_msg_mfi_pkt(instance);
4187 } else {
4188 cmd = get_mfi_pkt(instance);
4189 }
4190
4191 if (!cmd) {
4192 con_log(CL_ANN1, (CE_WARN,
4193 "flush_cache():Failed to get a cmd for flush_cache"));
4194 DTRACE_PROBE2(flush_cache_err, uint16_t,
4195 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
4196 return;
4197 }
4198
4199 /* Clear the frame buffer and assign back the context id */
4200 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
4201 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
4202 cmd->index);
4203
4204 dcmd = &cmd->frame->dcmd;
4205
4206 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
4207
4208 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
4209 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0);
4210 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 0);
4211 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
4212 MFI_FRAME_DIR_NONE);
4213 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
4214 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 0);
4215 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
4216 MR_DCMD_CTRL_CACHE_FLUSH);
4217 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.b[0],
4218 MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE);
4219
4220 cmd->frame_count = 1;
4221
4222 if (instance->tbolt) {
4223 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
4224 }
4225
4226 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
4227 con_log(CL_ANN1, (CE_WARN,
4228 "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH"));
4229 }
4230 con_log(CL_ANN1, (CE_CONT, "flush_cache done"));
4231 if (instance->tbolt) {
4232 return_raid_msg_mfi_pkt(instance, cmd);
4233 } else {
4234 return_mfi_pkt(instance, cmd);
4235 }
4236
4237 }
4238
4239 /*
4240 * service_mfi_aen- Completes an AEN command
4241 * @instance: Adapter soft state
4242 * @cmd: Command to be completed
4243 *
4244 */
4245 void
4246 service_mfi_aen(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
4247 {
4248 uint32_t seq_num;
4249 struct mrsas_evt_detail *evt_detail =
4250 (struct mrsas_evt_detail *)instance->mfi_evt_detail_obj.buffer;
4251 int rval = 0;
4252 int tgt = 0;
4253 uint8_t dtype;
4254 #ifdef PDSUPPORT
4255 mrsas_pd_address_t *pd_addr;
4256 #endif
4257 ddi_acc_handle_t acc_handle;
4258
4259 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
4260
4261 acc_handle = cmd->frame_dma_obj.acc_handle;
4262 cmd->cmd_status = ddi_get8(acc_handle, &cmd->frame->io.cmd_status);
4263 if (cmd->cmd_status == ENODATA) {
4264 cmd->cmd_status = 0;
4265 }
4266
4267 /*
4268 * log the MFI AEN event to the sysevent queue so that
4269 * application will get noticed
4270 */
4271 if (ddi_log_sysevent(instance->dip, DDI_VENDOR_LSI, "LSIMEGA", "SAS",
4272 NULL, NULL, DDI_NOSLEEP) != DDI_SUCCESS) {
4273 int instance_no = ddi_get_instance(instance->dip);
4274 con_log(CL_ANN, (CE_WARN,
4275 "mr_sas%d: Failed to log AEN event", instance_no));
4276 }
4277 /*
4278 * Check for any ld devices that has changed state. i.e. online
4279 * or offline.
4280 */
4281 con_log(CL_ANN1, (CE_CONT,
4282 "AEN: code = %x class = %x locale = %x args = %x",
4283 ddi_get32(acc_handle, &evt_detail->code),
4284 evt_detail->cl.members.class,
4285 ddi_get16(acc_handle, &evt_detail->cl.members.locale),
4286 ddi_get8(acc_handle, &evt_detail->arg_type)));
4287
4288 switch (ddi_get32(acc_handle, &evt_detail->code)) {
4289 case MR_EVT_CFG_CLEARED: {
4290 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) {
4291 if (instance->mr_ld_list[tgt].dip != NULL) {
4292 mutex_enter(&instance->config_dev_mtx);
4293 instance->mr_ld_list[tgt].flag =
4294 (uint8_t)~MRDRV_TGT_VALID;
4295 mutex_exit(&instance->config_dev_mtx);
4296 rval = mrsas_service_evt(instance, tgt, 0,
4297 MRSAS_EVT_UNCONFIG_TGT, NULL);
4298 con_log(CL_ANN1, (CE_WARN,
4299 "mr_sas: CFG CLEARED AEN rval = %d "
4300 "tgt id = %d", rval, tgt));
4301 }
4302 }
4303 break;
4304 }
4305
4306 case MR_EVT_LD_DELETED: {
4307 tgt = ddi_get16(acc_handle, &evt_detail->args.ld.target_id);
4308 mutex_enter(&instance->config_dev_mtx);
4309 instance->mr_ld_list[tgt].flag = (uint8_t)~MRDRV_TGT_VALID;
4310 mutex_exit(&instance->config_dev_mtx);
4311 rval = mrsas_service_evt(instance,
4312 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0,
4313 MRSAS_EVT_UNCONFIG_TGT, NULL);
4314 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD DELETED AEN rval = %d "
4315 "tgt id = %d index = %d", rval,
4316 ddi_get16(acc_handle, &evt_detail->args.ld.target_id),
4317 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index)));
4318 break;
4319 } /* End of MR_EVT_LD_DELETED */
4320
4321 case MR_EVT_LD_CREATED: {
4322 rval = mrsas_service_evt(instance,
4323 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0,
4324 MRSAS_EVT_CONFIG_TGT, NULL);
4325 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD CREATED AEN rval = %d "
4326 "tgt id = %d index = %d", rval,
4327 ddi_get16(acc_handle, &evt_detail->args.ld.target_id),
4328 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index)));
4329 break;
4330 } /* End of MR_EVT_LD_CREATED */
4331
4332 #ifdef PDSUPPORT
4333 case MR_EVT_PD_REMOVED_EXT: {
4334 if (instance->tbolt) {
4335 pd_addr = &evt_detail->args.pd_addr;
4336 dtype = pd_addr->scsi_dev_type;
4337 con_log(CL_DLEVEL1, (CE_NOTE,
4338 " MR_EVT_PD_REMOVED_EXT: dtype = %x,"
4339 " arg_type = %d ", dtype, evt_detail->arg_type));
4340 tgt = ddi_get16(acc_handle,
4341 &evt_detail->args.pd.device_id);
4342 mutex_enter(&instance->config_dev_mtx);
4343 instance->mr_tbolt_pd_list[tgt].flag =
4344 (uint8_t)~MRDRV_TGT_VALID;
4345 mutex_exit(&instance->config_dev_mtx);
4346 rval = mrsas_service_evt(instance, ddi_get16(
4347 acc_handle, &evt_detail->args.pd.device_id),
4348 1, MRSAS_EVT_UNCONFIG_TGT, NULL);
4349 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_REMOVED:"
4350 "rval = %d tgt id = %d ", rval,
4351 ddi_get16(acc_handle,
4352 &evt_detail->args.pd.device_id)));
4353 }
4354 break;
4355 } /* End of MR_EVT_PD_REMOVED_EXT */
4356
4357 case MR_EVT_PD_INSERTED_EXT: {
4358 if (instance->tbolt) {
4359 rval = mrsas_service_evt(instance,
4360 ddi_get16(acc_handle,
4361 &evt_detail->args.pd.device_id),
4362 1, MRSAS_EVT_CONFIG_TGT, NULL);
4363 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_INSERTEDi_EXT:"
4364 "rval = %d tgt id = %d ", rval,
4365 ddi_get16(acc_handle,
4366 &evt_detail->args.pd.device_id)));
4367 }
4368 break;
4369 } /* End of MR_EVT_PD_INSERTED_EXT */
4370
4371 case MR_EVT_PD_STATE_CHANGE: {
4372 if (instance->tbolt) {
4373 tgt = ddi_get16(acc_handle,
4374 &evt_detail->args.pd.device_id);
4375 if ((evt_detail->args.pd_state.prevState ==
4376 PD_SYSTEM) &&
4377 (evt_detail->args.pd_state.newState != PD_SYSTEM)) {
4378 mutex_enter(&instance->config_dev_mtx);
4379 instance->mr_tbolt_pd_list[tgt].flag =
4380 (uint8_t)~MRDRV_TGT_VALID;
4381 mutex_exit(&instance->config_dev_mtx);
4382 rval = mrsas_service_evt(instance,
4383 ddi_get16(acc_handle,
4384 &evt_detail->args.pd.device_id),
4385 1, MRSAS_EVT_UNCONFIG_TGT, NULL);
4386 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_REMOVED:"
4387 "rval = %d tgt id = %d ", rval,
4388 ddi_get16(acc_handle,
4389 &evt_detail->args.pd.device_id)));
4390 break;
4391 }
4392 if ((evt_detail->args.pd_state.prevState
4393 == UNCONFIGURED_GOOD) &&
4394 (evt_detail->args.pd_state.newState == PD_SYSTEM)) {
4395 rval = mrsas_service_evt(instance,
4396 ddi_get16(acc_handle,
4397 &evt_detail->args.pd.device_id),
4398 1, MRSAS_EVT_CONFIG_TGT, NULL);
4399 con_log(CL_ANN1, (CE_WARN,
4400 "mr_sas: PD_INSERTED: rval = %d "
4401 " tgt id = %d ", rval,
4402 ddi_get16(acc_handle,
4403 &evt_detail->args.pd.device_id)));
4404 break;
4405 }
4406 }
4407 break;
4408 }
4409 #endif
4410
4411 } /* End of Main Switch */
4412
4413 /* get copy of seq_num and class/locale for re-registration */
4414 seq_num = ddi_get32(acc_handle, &evt_detail->seq_num);
4415 seq_num++;
4416 (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
4417 sizeof (struct mrsas_evt_detail));
4418
4419 ddi_put8(acc_handle, &cmd->frame->dcmd.cmd_status, 0x0);
4420 ddi_put32(acc_handle, &cmd->frame->dcmd.mbox.w[0], seq_num);
4421
4422 instance->aen_seq_num = seq_num;
4423
4424 cmd->frame_count = 1;
4425
4426 cmd->retry_count_for_ocr = 0;
4427 cmd->drv_pkt_time = 0;
4428
4429 /* Issue the aen registration frame */
4430 instance->func_ptr->issue_cmd(cmd, instance);
4431 }
4432
4433 /*
4434 * complete_cmd_in_sync_mode - Completes an internal command
4435 * @instance: Adapter soft state
4436 * @cmd: Command to be completed
4437 *
4438 * The issue_cmd_in_sync_mode() function waits for a command to complete
4439 * after it issues a command. This function wakes up that waiting routine by
4440 * calling wake_up() on the wait queue.
4441 */
4442 static void
4443 complete_cmd_in_sync_mode(struct mrsas_instance *instance,
4444 struct mrsas_cmd *cmd)
4445 {
4446 cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle,
4447 &cmd->frame->io.cmd_status);
4448
4449 cmd->sync_cmd = MRSAS_FALSE;
4450
4451 con_log(CL_ANN1, (CE_NOTE, "complete_cmd_in_sync_mode called %p \n",
4452 (void *)cmd));
4453
4454 mutex_enter(&instance->int_cmd_mtx);
4455 if (cmd->cmd_status == ENODATA) {
4456 cmd->cmd_status = 0;
4457 }
4458 cv_broadcast(&instance->int_cmd_cv);
4459 mutex_exit(&instance->int_cmd_mtx);
4460
4461 }
4462
4463 /*
4464 * Call this function inside mrsas_softintr.
4465 * mrsas_initiate_ocr_if_fw_is_faulty - Initiates OCR if FW status is faulty
4466 * @instance: Adapter soft state
4467 */
4468
4469 static uint32_t
4470 mrsas_initiate_ocr_if_fw_is_faulty(struct mrsas_instance *instance)
4471 {
4472 uint32_t cur_abs_reg_val;
4473 uint32_t fw_state;
4474
4475 cur_abs_reg_val = instance->func_ptr->read_fw_status_reg(instance);
4476 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
4477 if (fw_state == MFI_STATE_FAULT) {
4478 if (instance->disable_online_ctrl_reset == 1) {
4479 cmn_err(CE_WARN,
4480 "mrsas_initiate_ocr_if_fw_is_faulty: "
4481 "FW in Fault state, detected in ISR: "
4482 "FW doesn't support ocr ");
4483
4484 return (ADAPTER_RESET_NOT_REQUIRED);
4485 } else {
4486 con_log(CL_ANN, (CE_NOTE,
4487 "mrsas_initiate_ocr_if_fw_is_faulty: FW in Fault "
4488 "state, detected in ISR: FW supports ocr "));
4489
4490 return (ADAPTER_RESET_REQUIRED);
4491 }
4492 }
4493
4494 return (ADAPTER_RESET_NOT_REQUIRED);
4495 }
4496
4497 /*
4498 * mrsas_softintr - The Software ISR
4499 * @param arg : HBA soft state
4500 *
4501 * called from high-level interrupt if hi-level interrupt are not there,
4502 * otherwise triggered as a soft interrupt
4503 */
4504 static uint_t
4505 mrsas_softintr(struct mrsas_instance *instance)
4506 {
4507 struct scsi_pkt *pkt;
4508 struct scsa_cmd *acmd;
4509 struct mrsas_cmd *cmd;
4510 struct mlist_head *pos, *next;
4511 mlist_t process_list;
4512 struct mrsas_header *hdr;
4513 struct scsi_arq_status *arqstat;
4514
4515 con_log(CL_ANN1, (CE_NOTE, "mrsas_softintr() called."));
4516
4517 ASSERT(instance);
4518
4519 mutex_enter(&instance->completed_pool_mtx);
4520
4521 if (mlist_empty(&instance->completed_pool_list)) {
4522 mutex_exit(&instance->completed_pool_mtx);
4523 return (DDI_INTR_CLAIMED);
4524 }
4525
4526 instance->softint_running = 1;
4527
4528 INIT_LIST_HEAD(&process_list);
4529 mlist_splice(&instance->completed_pool_list, &process_list);
4530 INIT_LIST_HEAD(&instance->completed_pool_list);
4531
4532 mutex_exit(&instance->completed_pool_mtx);
4533
4534 /* perform all callbacks first, before releasing the SCBs */
4535 mlist_for_each_safe(pos, next, &process_list) {
4536 cmd = mlist_entry(pos, struct mrsas_cmd, list);
4537
4538 /* syncronize the Cmd frame for the controller */
4539 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle,
4540 0, 0, DDI_DMA_SYNC_FORCPU);
4541
4542 if (mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) !=
4543 DDI_SUCCESS) {
4544 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
4545 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4546 con_log(CL_ANN1, (CE_WARN,
4547 "mrsas_softintr: "
4548 "FMA check reports DMA handle failure"));
4549 return (DDI_INTR_CLAIMED);
4550 }
4551
4552 hdr = &cmd->frame->hdr;
4553
4554 /* remove the internal command from the process list */
4555 mlist_del_init(&cmd->list);
4556
4557 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) {
4558 case MFI_CMD_OP_PD_SCSI:
4559 case MFI_CMD_OP_LD_SCSI:
4560 case MFI_CMD_OP_LD_READ:
4561 case MFI_CMD_OP_LD_WRITE:
4562 /*
4563 * MFI_CMD_OP_PD_SCSI and MFI_CMD_OP_LD_SCSI
4564 * could have been issued either through an
4565 * IO path or an IOCTL path. If it was via IOCTL,
4566 * we will send it to internal completion.
4567 */
4568 if (cmd->sync_cmd == MRSAS_TRUE) {
4569 complete_cmd_in_sync_mode(instance, cmd);
4570 break;
4571 }
4572
4573 /* regular commands */
4574 acmd = cmd->cmd;
4575 pkt = CMD2PKT(acmd);
4576
4577 if (acmd->cmd_flags & CFLAG_DMAVALID) {
4578 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
4579 (void) ddi_dma_sync(acmd->cmd_dmahandle,
4580 acmd->cmd_dma_offset,
4581 acmd->cmd_dma_len,
4582 DDI_DMA_SYNC_FORCPU);
4583 }
4584 }
4585
4586 pkt->pkt_reason = CMD_CMPLT;
4587 pkt->pkt_statistics = 0;
4588 pkt->pkt_state = STATE_GOT_BUS
4589 | STATE_GOT_TARGET | STATE_SENT_CMD
4590 | STATE_XFERRED_DATA | STATE_GOT_STATUS;
4591
4592 con_log(CL_ANN, (CE_CONT,
4593 "CDB[0] = %x completed for %s: size %lx context %x",
4594 pkt->pkt_cdbp[0], ((acmd->islogical) ? "LD" : "PD"),
4595 acmd->cmd_dmacount, hdr->context));
4596 DTRACE_PROBE3(softintr_cdb, uint8_t, pkt->pkt_cdbp[0],
4597 uint_t, acmd->cmd_cdblen, ulong_t,
4598 acmd->cmd_dmacount);
4599
4600 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
4601 struct scsi_inquiry *inq;
4602
4603 if (acmd->cmd_dmacount != 0) {
4604 bp_mapin(acmd->cmd_buf);
4605 inq = (struct scsi_inquiry *)
4606 acmd->cmd_buf->b_un.b_addr;
4607
4608 /* don't expose physical drives to OS */
4609 if (acmd->islogical &&
4610 (hdr->cmd_status == MFI_STAT_OK)) {
4611 display_scsi_inquiry(
4612 (caddr_t)inq);
4613 } else if ((hdr->cmd_status ==
4614 MFI_STAT_OK) && inq->inq_dtype ==
4615 DTYPE_DIRECT) {
4616
4617 display_scsi_inquiry(
4618 (caddr_t)inq);
4619
4620 /* for physical disk */
4621 hdr->cmd_status =
4622 MFI_STAT_DEVICE_NOT_FOUND;
4623 }
4624 }
4625 }
4626
4627 DTRACE_PROBE2(softintr_done, uint8_t, hdr->cmd,
4628 uint8_t, hdr->cmd_status);
4629
4630 switch (hdr->cmd_status) {
4631 case MFI_STAT_OK:
4632 pkt->pkt_scbp[0] = STATUS_GOOD;
4633 break;
4634 case MFI_STAT_LD_CC_IN_PROGRESS:
4635 case MFI_STAT_LD_RECON_IN_PROGRESS:
4636 pkt->pkt_scbp[0] = STATUS_GOOD;
4637 break;
4638 case MFI_STAT_LD_INIT_IN_PROGRESS:
4639 con_log(CL_ANN,
4640 (CE_WARN, "Initialization in Progress"));
4641 pkt->pkt_reason = CMD_TRAN_ERR;
4642
4643 break;
4644 case MFI_STAT_SCSI_DONE_WITH_ERROR:
4645 con_log(CL_ANN, (CE_CONT, "scsi_done error"));
4646
4647 pkt->pkt_reason = CMD_CMPLT;
4648 ((struct scsi_status *)
4649 pkt->pkt_scbp)->sts_chk = 1;
4650
4651 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
4652 con_log(CL_ANN,
4653 (CE_WARN, "TEST_UNIT_READY fail"));
4654 } else {
4655 pkt->pkt_state |= STATE_ARQ_DONE;
4656 arqstat = (void *)(pkt->pkt_scbp);
4657 arqstat->sts_rqpkt_reason = CMD_CMPLT;
4658 arqstat->sts_rqpkt_resid = 0;
4659 arqstat->sts_rqpkt_state |=
4660 STATE_GOT_BUS | STATE_GOT_TARGET
4661 | STATE_SENT_CMD
4662 | STATE_XFERRED_DATA;
4663 *(uint8_t *)&arqstat->sts_rqpkt_status =
4664 STATUS_GOOD;
4665 ddi_rep_get8(
4666 cmd->frame_dma_obj.acc_handle,
4667 (uint8_t *)
4668 &(arqstat->sts_sensedata),
4669 cmd->sense,
4670 sizeof (struct scsi_extended_sense),
4671 DDI_DEV_AUTOINCR);
4672 }
4673 break;
4674 case MFI_STAT_LD_OFFLINE:
4675 case MFI_STAT_DEVICE_NOT_FOUND:
4676 con_log(CL_ANN, (CE_CONT,
4677 "mrsas_softintr:device not found error"));
4678 pkt->pkt_reason = CMD_DEV_GONE;
4679 pkt->pkt_statistics = STAT_DISCON;
4680 break;
4681 case MFI_STAT_LD_LBA_OUT_OF_RANGE:
4682 pkt->pkt_state |= STATE_ARQ_DONE;
4683 pkt->pkt_reason = CMD_CMPLT;
4684 ((struct scsi_status *)
4685 pkt->pkt_scbp)->sts_chk = 1;
4686
4687 arqstat = (void *)(pkt->pkt_scbp);
4688 arqstat->sts_rqpkt_reason = CMD_CMPLT;
4689 arqstat->sts_rqpkt_resid = 0;
4690 arqstat->sts_rqpkt_state |= STATE_GOT_BUS
4691 | STATE_GOT_TARGET | STATE_SENT_CMD
4692 | STATE_XFERRED_DATA;
4693 *(uint8_t *)&arqstat->sts_rqpkt_status =
4694 STATUS_GOOD;
4695
4696 arqstat->sts_sensedata.es_valid = 1;
4697 arqstat->sts_sensedata.es_key =
4698 KEY_ILLEGAL_REQUEST;
4699 arqstat->sts_sensedata.es_class =
4700 CLASS_EXTENDED_SENSE;
4701
4702 /*
4703 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
4704 * ASC: 0x21h; ASCQ: 0x00h;
4705 */
4706 arqstat->sts_sensedata.es_add_code = 0x21;
4707 arqstat->sts_sensedata.es_qual_code = 0x00;
4708
4709 break;
4710
4711 default:
4712 con_log(CL_ANN, (CE_CONT, "Unknown status!"));
4713 pkt->pkt_reason = CMD_TRAN_ERR;
4714
4715 break;
4716 }
4717
4718 atomic_add_16(&instance->fw_outstanding, (-1));
4719
4720 (void) mrsas_common_check(instance, cmd);
4721
4722 if (acmd->cmd_dmahandle) {
4723 if (mrsas_check_dma_handle(
4724 acmd->cmd_dmahandle) != DDI_SUCCESS) {
4725 ddi_fm_service_impact(instance->dip,
4726 DDI_SERVICE_UNAFFECTED);
4727 pkt->pkt_reason = CMD_TRAN_ERR;
4728 pkt->pkt_statistics = 0;
4729 }
4730 }
4731
4732 /* Call the callback routine */
4733 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
4734 pkt->pkt_comp) {
4735
4736 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_softintr: "
4737 "posting to scsa cmd %p index %x pkt %p "
4738 "time %llx", (void *)cmd, cmd->index,
4739 (void *)pkt, gethrtime()));
4740 (*pkt->pkt_comp)(pkt);
4741
4742 }
4743
4744 return_mfi_pkt(instance, cmd);
4745 break;
4746
4747 case MFI_CMD_OP_SMP:
4748 case MFI_CMD_OP_STP:
4749 complete_cmd_in_sync_mode(instance, cmd);
4750 break;
4751
4752 case MFI_CMD_OP_DCMD:
4753 /* see if got an event notification */
4754 if (ddi_get32(cmd->frame_dma_obj.acc_handle,
4755 &cmd->frame->dcmd.opcode) ==
4756 MR_DCMD_CTRL_EVENT_WAIT) {
4757 if ((instance->aen_cmd == cmd) &&
4758 (instance->aen_cmd->abort_aen)) {
4759 con_log(CL_ANN, (CE_WARN,
4760 "mrsas_softintr: "
4761 "aborted_aen returned"));
4762 } else {
4763 atomic_add_16(&instance->fw_outstanding,
4764 (-1));
4765 service_mfi_aen(instance, cmd);
4766 }
4767 } else {
4768 complete_cmd_in_sync_mode(instance, cmd);
4769 }
4770
4771 break;
4772
4773 case MFI_CMD_OP_ABORT:
4774 con_log(CL_ANN, (CE_NOTE, "MFI_CMD_OP_ABORT complete"));
4775 /*
4776 * MFI_CMD_OP_ABORT successfully completed
4777 * in the synchronous mode
4778 */
4779 complete_cmd_in_sync_mode(instance, cmd);
4780 break;
4781
4782 default:
4783 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
4784 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4785
4786 if (cmd->pkt != NULL) {
4787 pkt = cmd->pkt;
4788 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
4789 pkt->pkt_comp) {
4790
4791 con_log(CL_ANN1, (CE_CONT, "posting to "
4792 "scsa cmd %p index %x pkt %p"
4793 "time %llx, default ", (void *)cmd,
4794 cmd->index, (void *)pkt,
4795 gethrtime()));
4796
4797 (*pkt->pkt_comp)(pkt);
4798
4799 }
4800 }
4801 con_log(CL_ANN, (CE_WARN, "Cmd type unknown !"));
4802 break;
4803 }
4804 }
4805
4806 instance->softint_running = 0;
4807
4808 return (DDI_INTR_CLAIMED);
4809 }
4810
4811 /*
4812 * mrsas_alloc_dma_obj
4813 *
4814 * Allocate the memory and other resources for an dma object.
4815 */
4816 int
4817 mrsas_alloc_dma_obj(struct mrsas_instance *instance, dma_obj_t *obj,
4818 uchar_t endian_flags)
4819 {
4820 int i;
4821 size_t alen = 0;
4822 uint_t cookie_cnt;
4823 struct ddi_device_acc_attr tmp_endian_attr;
4824
4825 tmp_endian_attr = endian_attr;
4826 tmp_endian_attr.devacc_attr_endian_flags = endian_flags;
4827 tmp_endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
4828
4829 i = ddi_dma_alloc_handle(instance->dip, &obj->dma_attr,
4830 DDI_DMA_SLEEP, NULL, &obj->dma_handle);
4831 if (i != DDI_SUCCESS) {
4832
4833 switch (i) {
4834 case DDI_DMA_BADATTR :
4835 con_log(CL_ANN, (CE_WARN,
4836 "Failed ddi_dma_alloc_handle- Bad attribute"));
4837 break;
4838 case DDI_DMA_NORESOURCES :
4839 con_log(CL_ANN, (CE_WARN,
4840 "Failed ddi_dma_alloc_handle- No Resources"));
4841 break;
4842 default :
4843 con_log(CL_ANN, (CE_WARN,
4844 "Failed ddi_dma_alloc_handle: "
4845 "unknown status %d", i));
4846 break;
4847 }
4848
4849 return (-1);
4850 }
4851
4852 if ((ddi_dma_mem_alloc(obj->dma_handle, obj->size, &tmp_endian_attr,
4853 DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
4854 &obj->buffer, &alen, &obj->acc_handle) != DDI_SUCCESS) ||
4855 alen < obj->size) {
4856
4857 ddi_dma_free_handle(&obj->dma_handle);
4858
4859 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc"));
4860
4861 return (-1);
4862 }
4863
4864 if (ddi_dma_addr_bind_handle(obj->dma_handle, NULL, obj->buffer,
4865 obj->size, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
4866 NULL, &obj->dma_cookie[0], &cookie_cnt) != DDI_SUCCESS) {
4867
4868 ddi_dma_mem_free(&obj->acc_handle);
4869 ddi_dma_free_handle(&obj->dma_handle);
4870
4871 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle"));
4872
4873 return (-1);
4874 }
4875
4876 if (mrsas_check_dma_handle(obj->dma_handle) != DDI_SUCCESS) {
4877 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4878 return (-1);
4879 }
4880
4881 if (mrsas_check_acc_handle(obj->acc_handle) != DDI_SUCCESS) {
4882 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4883 return (-1);
4884 }
4885
4886 return (cookie_cnt);
4887 }
4888
4889 /*
4890 * mrsas_free_dma_obj(struct mrsas_instance *, dma_obj_t)
4891 *
4892 * De-allocate the memory and other resources for an dma object, which must
4893 * have been alloated by a previous call to mrsas_alloc_dma_obj()
4894 */
4895 /* ARGSUSED */
4896 int
4897 mrsas_free_dma_obj(struct mrsas_instance *instance, dma_obj_t obj)
4898 {
4899
4900 if ((obj.dma_handle == NULL) || (obj.acc_handle == NULL)) {
4901 return (DDI_SUCCESS);
4902 }
4903
4904 /*
4905 * NOTE: These check-handle functions fail if *_handle == NULL, but
4906 * this function succeeds because of the previous check.
4907 */
4908 if (mrsas_check_dma_handle(obj.dma_handle) != DDI_SUCCESS) {
4909 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4910 return (DDI_FAILURE);
4911 }
4912
4913 if (mrsas_check_acc_handle(obj.acc_handle) != DDI_SUCCESS) {
4914 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4915 return (DDI_FAILURE);
4916 }
4917
4918 (void) ddi_dma_unbind_handle(obj.dma_handle);
4919 ddi_dma_mem_free(&obj.acc_handle);
4920 ddi_dma_free_handle(&obj.dma_handle);
4921 obj.acc_handle = NULL;
4922 return (DDI_SUCCESS);
4923 }
4924
4925 /*
4926 * mrsas_dma_alloc(instance_t *, struct scsi_pkt *, struct buf *,
4927 * int, int (*)())
4928 *
4929 * Allocate dma resources for a new scsi command
4930 */
4931 int
4932 mrsas_dma_alloc(struct mrsas_instance *instance, struct scsi_pkt *pkt,
4933 struct buf *bp, int flags, int (*callback)())
4934 {
4935 int dma_flags;
4936 int (*cb)(caddr_t);
4937 int i;
4938
4939 ddi_dma_attr_t tmp_dma_attr = mrsas_generic_dma_attr;
4940 struct scsa_cmd *acmd = PKT2CMD(pkt);
4941
4942 acmd->cmd_buf = bp;
4943
4944 if (bp->b_flags & B_READ) {
4945 acmd->cmd_flags &= ~CFLAG_DMASEND;
4946 dma_flags = DDI_DMA_READ;
4947 } else {
4948 acmd->cmd_flags |= CFLAG_DMASEND;
4949 dma_flags = DDI_DMA_WRITE;
4950 }
4951
4952 if (flags & PKT_CONSISTENT) {
4953 acmd->cmd_flags |= CFLAG_CONSISTENT;
4954 dma_flags |= DDI_DMA_CONSISTENT;
4955 }
4956
4957 if (flags & PKT_DMA_PARTIAL) {
4958 dma_flags |= DDI_DMA_PARTIAL;
4959 }
4960
4961 dma_flags |= DDI_DMA_REDZONE;
4962
4963 cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
4964
4965 tmp_dma_attr.dma_attr_sgllen = instance->max_num_sge;
4966 tmp_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull;
4967 if (instance->tbolt) {
4968 /* OCR-RESET FIX */
4969 tmp_dma_attr.dma_attr_count_max =
4970 (U64)mrsas_tbolt_max_cap_maxxfer; /* limit to 256K */
4971 tmp_dma_attr.dma_attr_maxxfer =
4972 (U64)mrsas_tbolt_max_cap_maxxfer; /* limit to 256K */
4973 }
4974
4975 if ((i = ddi_dma_alloc_handle(instance->dip, &tmp_dma_attr,
4976 cb, 0, &acmd->cmd_dmahandle)) != DDI_SUCCESS) {
4977 switch (i) {
4978 case DDI_DMA_BADATTR:
4979 bioerror(bp, EFAULT);
4980 return (DDI_FAILURE);
4981
4982 case DDI_DMA_NORESOURCES:
4983 bioerror(bp, 0);
4984 return (DDI_FAILURE);
4985
4986 default:
4987 con_log(CL_ANN, (CE_PANIC, "ddi_dma_alloc_handle: "
4988 "impossible result (0x%x)", i));
4989 bioerror(bp, EFAULT);
4990 return (DDI_FAILURE);
4991 }
4992 }
4993
4994 i = ddi_dma_buf_bind_handle(acmd->cmd_dmahandle, bp, dma_flags,
4995 cb, 0, &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies);
4996
4997 switch (i) {
4998 case DDI_DMA_PARTIAL_MAP:
4999 if ((dma_flags & DDI_DMA_PARTIAL) == 0) {
5000 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
5001 "DDI_DMA_PARTIAL_MAP impossible"));
5002 goto no_dma_cookies;
5003 }
5004
5005 if (ddi_dma_numwin(acmd->cmd_dmahandle, &acmd->cmd_nwin) ==
5006 DDI_FAILURE) {
5007 con_log(CL_ANN, (CE_PANIC, "ddi_dma_numwin failed"));
5008 goto no_dma_cookies;
5009 }
5010
5011 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
5012 &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
5013 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
5014 DDI_FAILURE) {
5015
5016 con_log(CL_ANN, (CE_PANIC, "ddi_dma_getwin failed"));
5017 goto no_dma_cookies;
5018 }
5019
5020 goto get_dma_cookies;
5021 case DDI_DMA_MAPPED:
5022 acmd->cmd_nwin = 1;
5023 acmd->cmd_dma_len = 0;
5024 acmd->cmd_dma_offset = 0;
5025
5026 get_dma_cookies:
5027 i = 0;
5028 acmd->cmd_dmacount = 0;
5029 for (;;) {
5030 acmd->cmd_dmacount +=
5031 acmd->cmd_dmacookies[i++].dmac_size;
5032
5033 if (i == instance->max_num_sge ||
5034 i == acmd->cmd_ncookies)
5035 break;
5036
5037 ddi_dma_nextcookie(acmd->cmd_dmahandle,
5038 &acmd->cmd_dmacookies[i]);
5039 }
5040
5041 acmd->cmd_cookie = i;
5042 acmd->cmd_cookiecnt = i;
5043
5044 acmd->cmd_flags |= CFLAG_DMAVALID;
5045
5046 if (bp->b_bcount >= acmd->cmd_dmacount) {
5047 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
5048 } else {
5049 pkt->pkt_resid = 0;
5050 }
5051
5052 return (DDI_SUCCESS);
5053 case DDI_DMA_NORESOURCES:
5054 bioerror(bp, 0);
5055 break;
5056 case DDI_DMA_NOMAPPING:
5057 bioerror(bp, EFAULT);
5058 break;
5059 case DDI_DMA_TOOBIG:
5060 bioerror(bp, EINVAL);
5061 break;
5062 case DDI_DMA_INUSE:
5063 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle:"
5064 " DDI_DMA_INUSE impossible"));
5065 break;
5066 default:
5067 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
5068 "impossible result (0x%x)", i));
5069 break;
5070 }
5071
5072 no_dma_cookies:
5073 ddi_dma_free_handle(&acmd->cmd_dmahandle);
5074 acmd->cmd_dmahandle = NULL;
5075 acmd->cmd_flags &= ~CFLAG_DMAVALID;
5076 return (DDI_FAILURE);
5077 }
5078
5079 /*
5080 * mrsas_dma_move(struct mrsas_instance *, struct scsi_pkt *, struct buf *)
5081 *
5082 * move dma resources to next dma window
5083 *
5084 */
5085 int
5086 mrsas_dma_move(struct mrsas_instance *instance, struct scsi_pkt *pkt,
5087 struct buf *bp)
5088 {
5089 int i = 0;
5090
5091 struct scsa_cmd *acmd = PKT2CMD(pkt);
5092
5093 /*
5094 * If there are no more cookies remaining in this window,
5095 * must move to the next window first.
5096 */
5097 if (acmd->cmd_cookie == acmd->cmd_ncookies) {
5098 if (acmd->cmd_curwin == acmd->cmd_nwin && acmd->cmd_nwin == 1) {
5099 return (DDI_SUCCESS);
5100 }
5101
5102 /* at last window, cannot move */
5103 if (++acmd->cmd_curwin >= acmd->cmd_nwin) {
5104 return (DDI_FAILURE);
5105 }
5106
5107 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
5108 &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
5109 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
5110 DDI_FAILURE) {
5111 return (DDI_FAILURE);
5112 }
5113
5114 acmd->cmd_cookie = 0;
5115 } else {
5116 /* still more cookies in this window - get the next one */
5117 ddi_dma_nextcookie(acmd->cmd_dmahandle,
5118 &acmd->cmd_dmacookies[0]);
5119 }
5120
5121 /* get remaining cookies in this window, up to our maximum */
5122 for (;;) {
5123 acmd->cmd_dmacount += acmd->cmd_dmacookies[i++].dmac_size;
5124 acmd->cmd_cookie++;
5125
5126 if (i == instance->max_num_sge ||
5127 acmd->cmd_cookie == acmd->cmd_ncookies) {
5128 break;
5129 }
5130
5131 ddi_dma_nextcookie(acmd->cmd_dmahandle,
5132 &acmd->cmd_dmacookies[i]);
5133 }
5134
5135 acmd->cmd_cookiecnt = i;
5136
5137 if (bp->b_bcount >= acmd->cmd_dmacount) {
5138 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
5139 } else {
5140 pkt->pkt_resid = 0;
5141 }
5142
5143 return (DDI_SUCCESS);
5144 }
5145
5146 /*
5147 * build_cmd
5148 */
5149 static struct mrsas_cmd *
5150 build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
5151 struct scsi_pkt *pkt, uchar_t *cmd_done)
5152 {
5153 uint16_t flags = 0;
5154 uint32_t i;
5155 uint32_t context;
5156 uint32_t sge_bytes;
5157 uint32_t tmp_data_xfer_len;
5158 ddi_acc_handle_t acc_handle;
5159 struct mrsas_cmd *cmd;
5160 struct mrsas_sge64 *mfi_sgl;
5161 struct mrsas_sge_ieee *mfi_sgl_ieee;
5162 struct scsa_cmd *acmd = PKT2CMD(pkt);
5163 struct mrsas_pthru_frame *pthru;
5164 struct mrsas_io_frame *ldio;
5165
5166 /* find out if this is logical or physical drive command. */
5167 acmd->islogical = MRDRV_IS_LOGICAL(ap);
5168 acmd->device_id = MAP_DEVICE_ID(instance, ap);
5169 *cmd_done = 0;
5170
5171 /* get the command packet */
5172 if (!(cmd = get_mfi_pkt(instance))) {
5173 DTRACE_PROBE2(build_cmd_mfi_err, uint16_t,
5174 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
5175 return (NULL);
5176 }
5177
5178 acc_handle = cmd->frame_dma_obj.acc_handle;
5179
5180 /* Clear the frame buffer and assign back the context id */
5181 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
5182 ddi_put32(acc_handle, &cmd->frame->hdr.context, cmd->index);
5183
5184 cmd->pkt = pkt;
5185 cmd->cmd = acmd;
5186 DTRACE_PROBE3(build_cmds, uint8_t, pkt->pkt_cdbp[0],
5187 ulong_t, acmd->cmd_dmacount, ulong_t, acmd->cmd_dma_len);
5188
5189 /* lets get the command directions */
5190 if (acmd->cmd_flags & CFLAG_DMASEND) {
5191 flags = MFI_FRAME_DIR_WRITE;
5192
5193 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
5194 (void) ddi_dma_sync(acmd->cmd_dmahandle,
5195 acmd->cmd_dma_offset, acmd->cmd_dma_len,
5196 DDI_DMA_SYNC_FORDEV);
5197 }
5198 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
5199 flags = MFI_FRAME_DIR_READ;
5200
5201 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
5202 (void) ddi_dma_sync(acmd->cmd_dmahandle,
5203 acmd->cmd_dma_offset, acmd->cmd_dma_len,
5204 DDI_DMA_SYNC_FORCPU);
5205 }
5206 } else {
5207 flags = MFI_FRAME_DIR_NONE;
5208 }
5209
5210 if (instance->flag_ieee) {
5211 flags |= MFI_FRAME_IEEE;
5212 }
5213 flags |= MFI_FRAME_SGL64;
5214
5215 switch (pkt->pkt_cdbp[0]) {
5216
5217 /*
5218 * case SCMD_SYNCHRONIZE_CACHE:
5219 * flush_cache(instance);
5220 * return_mfi_pkt(instance, cmd);
5221 * *cmd_done = 1;
5222 *
5223 * return (NULL);
5224 */
5225
5226 case SCMD_READ:
5227 case SCMD_WRITE:
5228 case SCMD_READ_G1:
5229 case SCMD_WRITE_G1:
5230 case SCMD_READ_G4:
5231 case SCMD_WRITE_G4:
5232 case SCMD_READ_G5:
5233 case SCMD_WRITE_G5:
5234 if (acmd->islogical) {
5235 ldio = (struct mrsas_io_frame *)cmd->frame;
5236
5237 /*
5238 * preare the Logical IO frame:
5239 * 2nd bit is zero for all read cmds
5240 */
5241 ddi_put8(acc_handle, &ldio->cmd,
5242 (pkt->pkt_cdbp[0] & 0x02) ? MFI_CMD_OP_LD_WRITE
5243 : MFI_CMD_OP_LD_READ);
5244 ddi_put8(acc_handle, &ldio->cmd_status, 0x0);
5245 ddi_put8(acc_handle, &ldio->scsi_status, 0x0);
5246 ddi_put8(acc_handle, &ldio->target_id, acmd->device_id);
5247 ddi_put16(acc_handle, &ldio->timeout, 0);
5248 ddi_put8(acc_handle, &ldio->reserved_0, 0);
5249 ddi_put16(acc_handle, &ldio->pad_0, 0);
5250 ddi_put16(acc_handle, &ldio->flags, flags);
5251
5252 /* Initialize sense Information */
5253 bzero(cmd->sense, SENSE_LENGTH);
5254 ddi_put8(acc_handle, &ldio->sense_len, SENSE_LENGTH);
5255 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_hi, 0);
5256 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_lo,
5257 cmd->sense_phys_addr);
5258 ddi_put32(acc_handle, &ldio->start_lba_hi, 0);
5259 ddi_put8(acc_handle, &ldio->access_byte,
5260 (acmd->cmd_cdblen != 6) ? pkt->pkt_cdbp[1] : 0);
5261 ddi_put8(acc_handle, &ldio->sge_count,
5262 acmd->cmd_cookiecnt);
5263 if (instance->flag_ieee) {
5264 mfi_sgl_ieee =
5265 (struct mrsas_sge_ieee *)&ldio->sgl;
5266 } else {
5267 mfi_sgl = (struct mrsas_sge64 *)&ldio->sgl;
5268 }
5269
5270 context = ddi_get32(acc_handle, &ldio->context);
5271
5272 if (acmd->cmd_cdblen == CDB_GROUP0) {
5273 /* 6-byte cdb */
5274 ddi_put32(acc_handle, &ldio->lba_count, (
5275 (uint16_t)(pkt->pkt_cdbp[4])));
5276
5277 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5278 ((uint32_t)(pkt->pkt_cdbp[3])) |
5279 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
5280 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
5281 << 16)));
5282 } else if (acmd->cmd_cdblen == CDB_GROUP1) {
5283 /* 10-byte cdb */
5284 ddi_put32(acc_handle, &ldio->lba_count, (
5285 ((uint16_t)(pkt->pkt_cdbp[8])) |
5286 ((uint16_t)(pkt->pkt_cdbp[7]) << 8)));
5287
5288 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5289 ((uint32_t)(pkt->pkt_cdbp[5])) |
5290 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5291 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5292 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5293 } else if (acmd->cmd_cdblen == CDB_GROUP5) {
5294 /* 12-byte cdb */
5295 ddi_put32(acc_handle, &ldio->lba_count, (
5296 ((uint32_t)(pkt->pkt_cdbp[9])) |
5297 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
5298 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
5299 ((uint32_t)(pkt->pkt_cdbp[6]) << 24)));
5300
5301 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5302 ((uint32_t)(pkt->pkt_cdbp[5])) |
5303 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5304 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5305 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5306 } else if (acmd->cmd_cdblen == CDB_GROUP4) {
5307 /* 16-byte cdb */
5308 ddi_put32(acc_handle, &ldio->lba_count, (
5309 ((uint32_t)(pkt->pkt_cdbp[13])) |
5310 ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
5311 ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
5312 ((uint32_t)(pkt->pkt_cdbp[10]) << 24)));
5313
5314 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5315 ((uint32_t)(pkt->pkt_cdbp[9])) |
5316 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
5317 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
5318 ((uint32_t)(pkt->pkt_cdbp[6]) << 24)));
5319
5320 ddi_put32(acc_handle, &ldio->start_lba_hi, (
5321 ((uint32_t)(pkt->pkt_cdbp[5])) |
5322 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5323 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5324 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5325 }
5326
5327 break;
5328 }
5329 /* fall through For all non-rd/wr cmds */
5330 default:
5331
5332 switch (pkt->pkt_cdbp[0]) {
5333 case SCMD_MODE_SENSE:
5334 case SCMD_MODE_SENSE_G1: {
5335 union scsi_cdb *cdbp;
5336 uint16_t page_code;
5337
5338 cdbp = (void *)pkt->pkt_cdbp;
5339 page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
5340 switch (page_code) {
5341 case 0x3:
5342 case 0x4:
5343 (void) mrsas_mode_sense_build(pkt);
5344 return_mfi_pkt(instance, cmd);
5345 *cmd_done = 1;
5346 return (NULL);
5347 }
5348 break;
5349 }
5350 default:
5351 break;
5352 }
5353
5354 pthru = (struct mrsas_pthru_frame *)cmd->frame;
5355
5356 /* prepare the DCDB frame */
5357 ddi_put8(acc_handle, &pthru->cmd, (acmd->islogical) ?
5358 MFI_CMD_OP_LD_SCSI : MFI_CMD_OP_PD_SCSI);
5359 ddi_put8(acc_handle, &pthru->cmd_status, 0x0);
5360 ddi_put8(acc_handle, &pthru->scsi_status, 0x0);
5361 ddi_put8(acc_handle, &pthru->target_id, acmd->device_id);
5362 ddi_put8(acc_handle, &pthru->lun, 0);
5363 ddi_put8(acc_handle, &pthru->cdb_len, acmd->cmd_cdblen);
5364 ddi_put16(acc_handle, &pthru->timeout, 0);
5365 ddi_put16(acc_handle, &pthru->flags, flags);
5366 tmp_data_xfer_len = 0;
5367 for (i = 0; i < acmd->cmd_cookiecnt; i++) {
5368 tmp_data_xfer_len += acmd->cmd_dmacookies[i].dmac_size;
5369 }
5370 ddi_put32(acc_handle, &pthru->data_xfer_len,
5371 tmp_data_xfer_len);
5372 ddi_put8(acc_handle, &pthru->sge_count, acmd->cmd_cookiecnt);
5373 if (instance->flag_ieee) {
5374 mfi_sgl_ieee = (struct mrsas_sge_ieee *)&pthru->sgl;
5375 } else {
5376 mfi_sgl = (struct mrsas_sge64 *)&pthru->sgl;
5377 }
5378
5379 bzero(cmd->sense, SENSE_LENGTH);
5380 ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH);
5381 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0);
5382 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo,
5383 cmd->sense_phys_addr);
5384
5385 context = ddi_get32(acc_handle, &pthru->context);
5386 ddi_rep_put8(acc_handle, (uint8_t *)pkt->pkt_cdbp,
5387 (uint8_t *)pthru->cdb, acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
5388
5389 break;
5390 }
5391 #ifdef lint
5392 context = context;
5393 #endif
5394 /* prepare the scatter-gather list for the firmware */
5395 if (instance->flag_ieee) {
5396 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl_ieee++) {
5397 ddi_put64(acc_handle, &mfi_sgl_ieee->phys_addr,
5398 acmd->cmd_dmacookies[i].dmac_laddress);
5399 ddi_put32(acc_handle, &mfi_sgl_ieee->length,
5400 acmd->cmd_dmacookies[i].dmac_size);
5401 }
5402 sge_bytes = sizeof (struct mrsas_sge_ieee)*acmd->cmd_cookiecnt;
5403 } else {
5404 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl++) {
5405 ddi_put64(acc_handle, &mfi_sgl->phys_addr,
5406 acmd->cmd_dmacookies[i].dmac_laddress);
5407 ddi_put32(acc_handle, &mfi_sgl->length,
5408 acmd->cmd_dmacookies[i].dmac_size);
5409 }
5410 sge_bytes = sizeof (struct mrsas_sge64)*acmd->cmd_cookiecnt;
5411 }
5412
5413 cmd->frame_count = (sge_bytes / MRMFI_FRAME_SIZE) +
5414 ((sge_bytes % MRMFI_FRAME_SIZE) ? 1 : 0) + 1;
5415
5416 if (cmd->frame_count >= 8) {
5417 cmd->frame_count = 8;
5418 }
5419
5420 return (cmd);
5421 }
5422
5423 #ifndef __sparc
5424 /*
5425 * wait_for_outstanding - Wait for all outstanding cmds
5426 * @instance: Adapter soft state
5427 *
5428 * This function waits for upto MRDRV_RESET_WAIT_TIME seconds for FW to
5429 * complete all its outstanding commands. Returns error if one or more IOs
5430 * are pending after this time period.
5431 */
5432 static int
5433 wait_for_outstanding(struct mrsas_instance *instance)
5434 {
5435 int i;
5436 uint32_t wait_time = 90;
5437
5438 for (i = 0; i < wait_time; i++) {
5439 if (!instance->fw_outstanding) {
5440 break;
5441 }
5442
5443 drv_usecwait(MILLISEC); /* wait for 1000 usecs */;
5444 }
5445
5446 if (instance->fw_outstanding) {
5447 return (1);
5448 }
5449
5450 return (0);
5451 }
5452 #endif /* __sparc */
5453
5454 /*
5455 * issue_mfi_pthru
5456 */
5457 static int
5458 issue_mfi_pthru(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5459 struct mrsas_cmd *cmd, int mode)
5460 {
5461 void *ubuf;
5462 uint32_t kphys_addr = 0;
5463 uint32_t xferlen = 0;
5464 uint32_t new_xfer_length = 0;
5465 uint_t model;
5466 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5467 dma_obj_t pthru_dma_obj;
5468 struct mrsas_pthru_frame *kpthru;
5469 struct mrsas_pthru_frame *pthru;
5470 int i;
5471 pthru = &cmd->frame->pthru;
5472 kpthru = (struct mrsas_pthru_frame *)&ioctl->frame[0];
5473
5474 if (instance->adapterresetinprogress) {
5475 con_log(CL_ANN1, (CE_WARN, "issue_mfi_pthru: Reset flag set, "
5476 "returning mfi_pkt and setting TRAN_BUSY\n"));
5477 return (DDI_FAILURE);
5478 }
5479 model = ddi_model_convert_from(mode & FMODELS);
5480 if (model == DDI_MODEL_ILP32) {
5481 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP32"));
5482
5483 xferlen = kpthru->sgl.sge32[0].length;
5484
5485 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
5486 } else {
5487 #ifdef _ILP32
5488 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP32"));
5489 xferlen = kpthru->sgl.sge32[0].length;
5490 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
5491 #else
5492 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP64"));
5493 xferlen = kpthru->sgl.sge64[0].length;
5494 ubuf = (void *)(ulong_t)kpthru->sgl.sge64[0].phys_addr;
5495 #endif
5496 }
5497
5498 if (xferlen) {
5499 /* means IOCTL requires DMA */
5500 /* allocate the data transfer buffer */
5501 /* pthru_dma_obj.size = xferlen; */
5502 MRSAS_GET_BOUNDARY_ALIGNED_LEN(xferlen, new_xfer_length,
5503 PAGESIZE);
5504 pthru_dma_obj.size = new_xfer_length;
5505 pthru_dma_obj.dma_attr = mrsas_generic_dma_attr;
5506 pthru_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5507 pthru_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5508 pthru_dma_obj.dma_attr.dma_attr_sgllen = 1;
5509 pthru_dma_obj.dma_attr.dma_attr_align = 1;
5510
5511 /* allocate kernel buffer for DMA */
5512 if (mrsas_alloc_dma_obj(instance, &pthru_dma_obj,
5513 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5514 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: "
5515 "could not allocate data transfer buffer."));
5516 return (DDI_FAILURE);
5517 }
5518 (void) memset(pthru_dma_obj.buffer, 0, xferlen);
5519
5520 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5521 if (kpthru->flags & MFI_FRAME_DIR_WRITE) {
5522 for (i = 0; i < xferlen; i++) {
5523 if (ddi_copyin((uint8_t *)ubuf+i,
5524 (uint8_t *)pthru_dma_obj.buffer+i,
5525 1, mode)) {
5526 con_log(CL_ANN, (CE_WARN,
5527 "issue_mfi_pthru : "
5528 "copy from user space failed"));
5529 return (DDI_FAILURE);
5530 }
5531 }
5532 }
5533
5534 kphys_addr = pthru_dma_obj.dma_cookie[0].dmac_address;
5535 }
5536
5537 ddi_put8(acc_handle, &pthru->cmd, kpthru->cmd);
5538 ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH);
5539 ddi_put8(acc_handle, &pthru->cmd_status, 0);
5540 ddi_put8(acc_handle, &pthru->scsi_status, 0);
5541 ddi_put8(acc_handle, &pthru->target_id, kpthru->target_id);
5542 ddi_put8(acc_handle, &pthru->lun, kpthru->lun);
5543 ddi_put8(acc_handle, &pthru->cdb_len, kpthru->cdb_len);
5544 ddi_put8(acc_handle, &pthru->sge_count, kpthru->sge_count);
5545 ddi_put16(acc_handle, &pthru->timeout, kpthru->timeout);
5546 ddi_put32(acc_handle, &pthru->data_xfer_len, kpthru->data_xfer_len);
5547
5548 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0);
5549 pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
5550 /* ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo, 0); */
5551
5552 ddi_rep_put8(acc_handle, (uint8_t *)kpthru->cdb, (uint8_t *)pthru->cdb,
5553 pthru->cdb_len, DDI_DEV_AUTOINCR);
5554
5555 ddi_put16(acc_handle, &pthru->flags, kpthru->flags & ~MFI_FRAME_SGL64);
5556 ddi_put32(acc_handle, &pthru->sgl.sge32[0].length, xferlen);
5557 ddi_put32(acc_handle, &pthru->sgl.sge32[0].phys_addr, kphys_addr);
5558
5559 cmd->sync_cmd = MRSAS_TRUE;
5560 cmd->frame_count = 1;
5561
5562 if (instance->tbolt) {
5563 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5564 }
5565
5566 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5567 con_log(CL_ANN, (CE_WARN,
5568 "issue_mfi_pthru: fw_ioctl failed"));
5569 } else {
5570 if (xferlen && kpthru->flags & MFI_FRAME_DIR_READ) {
5571 for (i = 0; i < xferlen; i++) {
5572 if (ddi_copyout(
5573 (uint8_t *)pthru_dma_obj.buffer+i,
5574 (uint8_t *)ubuf+i, 1, mode)) {
5575 con_log(CL_ANN, (CE_WARN,
5576 "issue_mfi_pthru : "
5577 "copy to user space failed"));
5578 return (DDI_FAILURE);
5579 }
5580 }
5581 }
5582 }
5583
5584 kpthru->cmd_status = ddi_get8(acc_handle, &pthru->cmd_status);
5585 kpthru->scsi_status = ddi_get8(acc_handle, &pthru->scsi_status);
5586
5587 con_log(CL_ANN, (CE_CONT, "issue_mfi_pthru: cmd_status %x, "
5588 "scsi_status %x", kpthru->cmd_status, kpthru->scsi_status));
5589 DTRACE_PROBE3(issue_pthru, uint8_t, kpthru->cmd, uint8_t,
5590 kpthru->cmd_status, uint8_t, kpthru->scsi_status);
5591
5592 if (kpthru->sense_len) {
5593 uint_t sense_len = SENSE_LENGTH;
5594 void *sense_ubuf =
5595 (void *)(ulong_t)kpthru->sense_buf_phys_addr_lo;
5596 if (kpthru->sense_len <= SENSE_LENGTH) {
5597 sense_len = kpthru->sense_len;
5598 }
5599
5600 for (i = 0; i < sense_len; i++) {
5601 if (ddi_copyout(
5602 (uint8_t *)cmd->sense+i,
5603 (uint8_t *)sense_ubuf+i, 1, mode)) {
5604 con_log(CL_ANN, (CE_WARN,
5605 "issue_mfi_pthru : "
5606 "copy to user space failed"));
5607 }
5608 con_log(CL_DLEVEL1, (CE_WARN,
5609 "Copying Sense info sense_buff[%d] = 0x%X\n",
5610 i, *((uint8_t *)cmd->sense + i)));
5611 }
5612 }
5613 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
5614 DDI_DMA_SYNC_FORDEV);
5615
5616 if (xferlen) {
5617 /* free kernel buffer */
5618 if (mrsas_free_dma_obj(instance, pthru_dma_obj) != DDI_SUCCESS)
5619 return (DDI_FAILURE);
5620 }
5621
5622 return (DDI_SUCCESS);
5623 }
5624
5625 /*
5626 * issue_mfi_dcmd
5627 */
5628 static int
5629 issue_mfi_dcmd(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5630 struct mrsas_cmd *cmd, int mode)
5631 {
5632 void *ubuf;
5633 uint32_t kphys_addr = 0;
5634 uint32_t xferlen = 0;
5635 uint32_t new_xfer_length = 0;
5636 uint32_t model;
5637 dma_obj_t dcmd_dma_obj;
5638 struct mrsas_dcmd_frame *kdcmd;
5639 struct mrsas_dcmd_frame *dcmd;
5640 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5641 int i;
5642 dcmd = &cmd->frame->dcmd;
5643 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0];
5644
5645 if (instance->adapterresetinprogress) {
5646 con_log(CL_ANN1, (CE_WARN, "Reset flag set, "
5647 "returning mfi_pkt and setting TRAN_BUSY\n"));
5648 return (DDI_FAILURE);
5649 }
5650 model = ddi_model_convert_from(mode & FMODELS);
5651 if (model == DDI_MODEL_ILP32) {
5652 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
5653
5654 xferlen = kdcmd->sgl.sge32[0].length;
5655
5656 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
5657 } else {
5658 #ifdef _ILP32
5659 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
5660 xferlen = kdcmd->sgl.sge32[0].length;
5661 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
5662 #else
5663 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_LP64"));
5664 xferlen = kdcmd->sgl.sge64[0].length;
5665 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
5666 #endif
5667 }
5668 if (xferlen) {
5669 /* means IOCTL requires DMA */
5670 /* allocate the data transfer buffer */
5671 /* dcmd_dma_obj.size = xferlen; */
5672 MRSAS_GET_BOUNDARY_ALIGNED_LEN(xferlen, new_xfer_length,
5673 PAGESIZE);
5674 dcmd_dma_obj.size = new_xfer_length;
5675 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
5676 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5677 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5678 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
5679 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
5680
5681 /* allocate kernel buffer for DMA */
5682 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
5683 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5684 con_log(CL_ANN,
5685 (CE_WARN, "issue_mfi_dcmd: could not "
5686 "allocate data transfer buffer."));
5687 return (DDI_FAILURE);
5688 }
5689 (void) memset(dcmd_dma_obj.buffer, 0, xferlen);
5690
5691 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5692 if (kdcmd->flags & MFI_FRAME_DIR_WRITE) {
5693 for (i = 0; i < xferlen; i++) {
5694 if (ddi_copyin((uint8_t *)ubuf + i,
5695 (uint8_t *)dcmd_dma_obj.buffer + i,
5696 1, mode)) {
5697 con_log(CL_ANN, (CE_WARN,
5698 "issue_mfi_dcmd : "
5699 "copy from user space failed"));
5700 return (DDI_FAILURE);
5701 }
5702 }
5703 }
5704
5705 kphys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address;
5706 }
5707
5708 ddi_put8(acc_handle, &dcmd->cmd, kdcmd->cmd);
5709 ddi_put8(acc_handle, &dcmd->cmd_status, 0);
5710 ddi_put8(acc_handle, &dcmd->sge_count, kdcmd->sge_count);
5711 ddi_put16(acc_handle, &dcmd->timeout, kdcmd->timeout);
5712 ddi_put32(acc_handle, &dcmd->data_xfer_len, kdcmd->data_xfer_len);
5713 ddi_put32(acc_handle, &dcmd->opcode, kdcmd->opcode);
5714
5715 ddi_rep_put8(acc_handle, (uint8_t *)kdcmd->mbox.b,
5716 (uint8_t *)dcmd->mbox.b, DCMD_MBOX_SZ, DDI_DEV_AUTOINCR);
5717
5718 ddi_put16(acc_handle, &dcmd->flags, kdcmd->flags & ~MFI_FRAME_SGL64);
5719 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].length, xferlen);
5720 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].phys_addr, kphys_addr);
5721
5722 cmd->sync_cmd = MRSAS_TRUE;
5723 cmd->frame_count = 1;
5724
5725 if (instance->tbolt) {
5726 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5727 }
5728
5729 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5730 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: fw_ioctl failed"));
5731 } else {
5732 if (xferlen && (kdcmd->flags & MFI_FRAME_DIR_READ)) {
5733 for (i = 0; i < xferlen; i++) {
5734 if (ddi_copyout(
5735 (uint8_t *)dcmd_dma_obj.buffer + i,
5736 (uint8_t *)ubuf + i,
5737 1, mode)) {
5738 con_log(CL_ANN, (CE_WARN,
5739 "issue_mfi_dcmd : "
5740 "copy to user space failed"));
5741 return (DDI_FAILURE);
5742 }
5743 }
5744 }
5745 }
5746
5747 kdcmd->cmd_status = ddi_get8(acc_handle, &dcmd->cmd_status);
5748 con_log(CL_ANN,
5749 (CE_CONT, "issue_mfi_dcmd: cmd_status %x", kdcmd->cmd_status));
5750 DTRACE_PROBE3(issue_dcmd, uint32_t, kdcmd->opcode, uint8_t,
5751 kdcmd->cmd, uint8_t, kdcmd->cmd_status);
5752
5753 if (xferlen) {
5754 /* free kernel buffer */
5755 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
5756 return (DDI_FAILURE);
5757 }
5758
5759 return (DDI_SUCCESS);
5760 }
5761
5762 /*
5763 * issue_mfi_smp
5764 */
5765 static int
5766 issue_mfi_smp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5767 struct mrsas_cmd *cmd, int mode)
5768 {
5769 void *request_ubuf;
5770 void *response_ubuf;
5771 uint32_t request_xferlen = 0;
5772 uint32_t response_xferlen = 0;
5773 uint32_t new_xfer_length1 = 0;
5774 uint32_t new_xfer_length2 = 0;
5775 uint_t model;
5776 dma_obj_t request_dma_obj;
5777 dma_obj_t response_dma_obj;
5778 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5779 struct mrsas_smp_frame *ksmp;
5780 struct mrsas_smp_frame *smp;
5781 struct mrsas_sge32 *sge32;
5782 #ifndef _ILP32
5783 struct mrsas_sge64 *sge64;
5784 #endif
5785 int i;
5786 uint64_t tmp_sas_addr;
5787
5788 smp = &cmd->frame->smp;
5789 ksmp = (struct mrsas_smp_frame *)&ioctl->frame[0];
5790
5791 if (instance->adapterresetinprogress) {
5792 con_log(CL_ANN1, (CE_WARN, "Reset flag set, "
5793 "returning mfi_pkt and setting TRAN_BUSY\n"));
5794 return (DDI_FAILURE);
5795 }
5796 model = ddi_model_convert_from(mode & FMODELS);
5797 if (model == DDI_MODEL_ILP32) {
5798 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_ILP32"));
5799
5800 sge32 = &ksmp->sgl[0].sge32[0];
5801 response_xferlen = sge32[0].length;
5802 request_xferlen = sge32[1].length;
5803 con_log(CL_ANN, (CE_CONT, "issue_mfi_smp: "
5804 "response_xferlen = %x, request_xferlen = %x",
5805 response_xferlen, request_xferlen));
5806
5807 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr;
5808 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr;
5809 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: "
5810 "response_ubuf = %p, request_ubuf = %p",
5811 response_ubuf, request_ubuf));
5812 } else {
5813 #ifdef _ILP32
5814 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_ILP32"));
5815
5816 sge32 = &ksmp->sgl[0].sge32[0];
5817 response_xferlen = sge32[0].length;
5818 request_xferlen = sge32[1].length;
5819 con_log(CL_ANN, (CE_CONT, "issue_mfi_smp: "
5820 "response_xferlen = %x, request_xferlen = %x",
5821 response_xferlen, request_xferlen));
5822
5823 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr;
5824 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr;
5825 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: "
5826 "response_ubuf = %p, request_ubuf = %p",
5827 response_ubuf, request_ubuf));
5828 #else
5829 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_LP64"));
5830
5831 sge64 = &ksmp->sgl[0].sge64[0];
5832 response_xferlen = sge64[0].length;
5833 request_xferlen = sge64[1].length;
5834
5835 response_ubuf = (void *)(ulong_t)sge64[0].phys_addr;
5836 request_ubuf = (void *)(ulong_t)sge64[1].phys_addr;
5837 #endif
5838 }
5839 if (request_xferlen) {
5840 /* means IOCTL requires DMA */
5841 /* allocate the data transfer buffer */
5842 /* request_dma_obj.size = request_xferlen; */
5843 MRSAS_GET_BOUNDARY_ALIGNED_LEN(request_xferlen,
5844 new_xfer_length1, PAGESIZE);
5845 request_dma_obj.size = new_xfer_length1;
5846 request_dma_obj.dma_attr = mrsas_generic_dma_attr;
5847 request_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5848 request_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5849 request_dma_obj.dma_attr.dma_attr_sgllen = 1;
5850 request_dma_obj.dma_attr.dma_attr_align = 1;
5851
5852 /* allocate kernel buffer for DMA */
5853 if (mrsas_alloc_dma_obj(instance, &request_dma_obj,
5854 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5855 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5856 "could not allocate data transfer buffer."));
5857 return (DDI_FAILURE);
5858 }
5859 (void) memset(request_dma_obj.buffer, 0, request_xferlen);
5860
5861 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5862 for (i = 0; i < request_xferlen; i++) {
5863 if (ddi_copyin((uint8_t *)request_ubuf + i,
5864 (uint8_t *)request_dma_obj.buffer + i,
5865 1, mode)) {
5866 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5867 "copy from user space failed"));
5868 return (DDI_FAILURE);
5869 }
5870 }
5871 }
5872
5873 if (response_xferlen) {
5874 /* means IOCTL requires DMA */
5875 /* allocate the data transfer buffer */
5876 /* response_dma_obj.size = response_xferlen; */
5877 MRSAS_GET_BOUNDARY_ALIGNED_LEN(response_xferlen,
5878 new_xfer_length2, PAGESIZE);
5879 response_dma_obj.size = new_xfer_length2;
5880 response_dma_obj.dma_attr = mrsas_generic_dma_attr;
5881 response_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5882 response_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5883 response_dma_obj.dma_attr.dma_attr_sgllen = 1;
5884 response_dma_obj.dma_attr.dma_attr_align = 1;
5885
5886 /* allocate kernel buffer for DMA */
5887 if (mrsas_alloc_dma_obj(instance, &response_dma_obj,
5888 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5889 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5890 "could not allocate data transfer buffer."));
5891 return (DDI_FAILURE);
5892 }
5893 (void) memset(response_dma_obj.buffer, 0, response_xferlen);
5894
5895 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5896 for (i = 0; i < response_xferlen; i++) {
5897 if (ddi_copyin((uint8_t *)response_ubuf + i,
5898 (uint8_t *)response_dma_obj.buffer + i,
5899 1, mode)) {
5900 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5901 "copy from user space failed"));
5902 return (DDI_FAILURE);
5903 }
5904 }
5905 }
5906
5907 ddi_put8(acc_handle, &smp->cmd, ksmp->cmd);
5908 ddi_put8(acc_handle, &smp->cmd_status, 0);
5909 ddi_put8(acc_handle, &smp->connection_status, 0);
5910 ddi_put8(acc_handle, &smp->sge_count, ksmp->sge_count);
5911 /* smp->context = ksmp->context; */
5912 ddi_put16(acc_handle, &smp->timeout, ksmp->timeout);
5913 ddi_put32(acc_handle, &smp->data_xfer_len, ksmp->data_xfer_len);
5914
5915 bcopy((void *)&ksmp->sas_addr, (void *)&tmp_sas_addr,
5916 sizeof (uint64_t));
5917 ddi_put64(acc_handle, &smp->sas_addr, tmp_sas_addr);
5918
5919 ddi_put16(acc_handle, &smp->flags, ksmp->flags & ~MFI_FRAME_SGL64);
5920
5921 model = ddi_model_convert_from(mode & FMODELS);
5922 if (model == DDI_MODEL_ILP32) {
5923 con_log(CL_ANN1, (CE_CONT,
5924 "issue_mfi_smp: DDI_MODEL_ILP32"));
5925
5926 sge32 = &smp->sgl[0].sge32[0];
5927 ddi_put32(acc_handle, &sge32[0].length, response_xferlen);
5928 ddi_put32(acc_handle, &sge32[0].phys_addr,
5929 response_dma_obj.dma_cookie[0].dmac_address);
5930 ddi_put32(acc_handle, &sge32[1].length, request_xferlen);
5931 ddi_put32(acc_handle, &sge32[1].phys_addr,
5932 request_dma_obj.dma_cookie[0].dmac_address);
5933 } else {
5934 #ifdef _ILP32
5935 con_log(CL_ANN1, (CE_CONT,
5936 "issue_mfi_smp: DDI_MODEL_ILP32"));
5937 sge32 = &smp->sgl[0].sge32[0];
5938 ddi_put32(acc_handle, &sge32[0].length, response_xferlen);
5939 ddi_put32(acc_handle, &sge32[0].phys_addr,
5940 response_dma_obj.dma_cookie[0].dmac_address);
5941 ddi_put32(acc_handle, &sge32[1].length, request_xferlen);
5942 ddi_put32(acc_handle, &sge32[1].phys_addr,
5943 request_dma_obj.dma_cookie[0].dmac_address);
5944 #else
5945 con_log(CL_ANN1, (CE_CONT,
5946 "issue_mfi_smp: DDI_MODEL_LP64"));
5947 sge64 = &smp->sgl[0].sge64[0];
5948 ddi_put32(acc_handle, &sge64[0].length, response_xferlen);
5949 ddi_put64(acc_handle, &sge64[0].phys_addr,
5950 response_dma_obj.dma_cookie[0].dmac_address);
5951 ddi_put32(acc_handle, &sge64[1].length, request_xferlen);
5952 ddi_put64(acc_handle, &sge64[1].phys_addr,
5953 request_dma_obj.dma_cookie[0].dmac_address);
5954 #endif
5955 }
5956 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp : "
5957 "smp->response_xferlen = %d, smp->request_xferlen = %d "
5958 "smp->data_xfer_len = %d", ddi_get32(acc_handle, &sge32[0].length),
5959 ddi_get32(acc_handle, &sge32[1].length),
5960 ddi_get32(acc_handle, &smp->data_xfer_len)));
5961
5962 cmd->sync_cmd = MRSAS_TRUE;
5963 cmd->frame_count = 1;
5964
5965 if (instance->tbolt) {
5966 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5967 }
5968
5969 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5970 con_log(CL_ANN, (CE_WARN,
5971 "issue_mfi_smp: fw_ioctl failed"));
5972 } else {
5973 con_log(CL_ANN1, (CE_CONT,
5974 "issue_mfi_smp: copy to user space"));
5975
5976 if (request_xferlen) {
5977 for (i = 0; i < request_xferlen; i++) {
5978 if (ddi_copyout(
5979 (uint8_t *)request_dma_obj.buffer +
5980 i, (uint8_t *)request_ubuf + i,
5981 1, mode)) {
5982 con_log(CL_ANN, (CE_WARN,
5983 "issue_mfi_smp : copy to user space"
5984 " failed"));
5985 return (DDI_FAILURE);
5986 }
5987 }
5988 }
5989
5990 if (response_xferlen) {
5991 for (i = 0; i < response_xferlen; i++) {
5992 if (ddi_copyout(
5993 (uint8_t *)response_dma_obj.buffer
5994 + i, (uint8_t *)response_ubuf
5995 + i, 1, mode)) {
5996 con_log(CL_ANN, (CE_WARN,
5997 "issue_mfi_smp : copy to "
5998 "user space failed"));
5999 return (DDI_FAILURE);
6000 }
6001 }
6002 }
6003 }
6004
6005 ksmp->cmd_status = ddi_get8(acc_handle, &smp->cmd_status);
6006 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: smp->cmd_status = %d",
6007 ksmp->cmd_status));
6008 DTRACE_PROBE2(issue_smp, uint8_t, ksmp->cmd, uint8_t, ksmp->cmd_status);
6009
6010 if (request_xferlen) {
6011 /* free kernel buffer */
6012 if (mrsas_free_dma_obj(instance, request_dma_obj) !=
6013 DDI_SUCCESS)
6014 return (DDI_FAILURE);
6015 }
6016
6017 if (response_xferlen) {
6018 /* free kernel buffer */
6019 if (mrsas_free_dma_obj(instance, response_dma_obj) !=
6020 DDI_SUCCESS)
6021 return (DDI_FAILURE);
6022 }
6023
6024 return (DDI_SUCCESS);
6025 }
6026
6027 /*
6028 * issue_mfi_stp
6029 */
6030 static int
6031 issue_mfi_stp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
6032 struct mrsas_cmd *cmd, int mode)
6033 {
6034 void *fis_ubuf;
6035 void *data_ubuf;
6036 uint32_t fis_xferlen = 0;
6037 uint32_t new_xfer_length1 = 0;
6038 uint32_t new_xfer_length2 = 0;
6039 uint32_t data_xferlen = 0;
6040 uint_t model;
6041 dma_obj_t fis_dma_obj;
6042 dma_obj_t data_dma_obj;
6043 struct mrsas_stp_frame *kstp;
6044 struct mrsas_stp_frame *stp;
6045 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
6046 int i;
6047
6048 stp = &cmd->frame->stp;
6049 kstp = (struct mrsas_stp_frame *)&ioctl->frame[0];
6050
6051 if (instance->adapterresetinprogress) {
6052 con_log(CL_ANN1, (CE_WARN, "Reset flag set, "
6053 "returning mfi_pkt and setting TRAN_BUSY\n"));
6054 return (DDI_FAILURE);
6055 }
6056 model = ddi_model_convert_from(mode & FMODELS);
6057 if (model == DDI_MODEL_ILP32) {
6058 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_ILP32"));
6059
6060 fis_xferlen = kstp->sgl.sge32[0].length;
6061 data_xferlen = kstp->sgl.sge32[1].length;
6062
6063 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
6064 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
6065 } else {
6066 #ifdef _ILP32
6067 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_ILP32"));
6068
6069 fis_xferlen = kstp->sgl.sge32[0].length;
6070 data_xferlen = kstp->sgl.sge32[1].length;
6071
6072 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
6073 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
6074 #else
6075 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_LP64"));
6076
6077 fis_xferlen = kstp->sgl.sge64[0].length;
6078 data_xferlen = kstp->sgl.sge64[1].length;
6079
6080 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge64[0].phys_addr;
6081 data_ubuf = (void *)(ulong_t)kstp->sgl.sge64[1].phys_addr;
6082 #endif
6083 }
6084
6085
6086 if (fis_xferlen) {
6087 con_log(CL_ANN, (CE_CONT, "issue_mfi_stp: "
6088 "fis_ubuf = %p fis_xferlen = %x", fis_ubuf, fis_xferlen));
6089
6090 /* means IOCTL requires DMA */
6091 /* allocate the data transfer buffer */
6092 /* fis_dma_obj.size = fis_xferlen; */
6093 MRSAS_GET_BOUNDARY_ALIGNED_LEN(fis_xferlen,
6094 new_xfer_length1, PAGESIZE);
6095 fis_dma_obj.size = new_xfer_length1;
6096 fis_dma_obj.dma_attr = mrsas_generic_dma_attr;
6097 fis_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
6098 fis_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
6099 fis_dma_obj.dma_attr.dma_attr_sgllen = 1;
6100 fis_dma_obj.dma_attr.dma_attr_align = 1;
6101
6102 /* allocate kernel buffer for DMA */
6103 if (mrsas_alloc_dma_obj(instance, &fis_dma_obj,
6104 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
6105 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp : "
6106 "could not allocate data transfer buffer."));
6107 return (DDI_FAILURE);
6108 }
6109 (void) memset(fis_dma_obj.buffer, 0, fis_xferlen);
6110
6111 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
6112 for (i = 0; i < fis_xferlen; i++) {
6113 if (ddi_copyin((uint8_t *)fis_ubuf + i,
6114 (uint8_t *)fis_dma_obj.buffer + i, 1, mode)) {
6115 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
6116 "copy from user space failed"));
6117 return (DDI_FAILURE);
6118 }
6119 }
6120 }
6121
6122 if (data_xferlen) {
6123 con_log(CL_ANN, (CE_CONT, "issue_mfi_stp: data_ubuf = %p "
6124 "data_xferlen = %x", data_ubuf, data_xferlen));
6125
6126 /* means IOCTL requires DMA */
6127 /* allocate the data transfer buffer */
6128 /* data_dma_obj.size = data_xferlen; */
6129 MRSAS_GET_BOUNDARY_ALIGNED_LEN(data_xferlen, new_xfer_length2,
6130 PAGESIZE);
6131 data_dma_obj.size = new_xfer_length2;
6132 data_dma_obj.dma_attr = mrsas_generic_dma_attr;
6133 data_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
6134 data_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
6135 data_dma_obj.dma_attr.dma_attr_sgllen = 1;
6136 data_dma_obj.dma_attr.dma_attr_align = 1;
6137
6138 /* allocate kernel buffer for DMA */
6139 if (mrsas_alloc_dma_obj(instance, &data_dma_obj,
6140 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
6141 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
6142 "could not allocate data transfer buffer."));
6143 return (DDI_FAILURE);
6144 }
6145 (void) memset(data_dma_obj.buffer, 0, data_xferlen);
6146
6147 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
6148 for (i = 0; i < data_xferlen; i++) {
6149 if (ddi_copyin((uint8_t *)data_ubuf + i,
6150 (uint8_t *)data_dma_obj.buffer + i, 1, mode)) {
6151 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
6152 "copy from user space failed"));
6153 return (DDI_FAILURE);
6154 }
6155 }
6156 }
6157
6158 ddi_put8(acc_handle, &stp->cmd, kstp->cmd);
6159 ddi_put8(acc_handle, &stp->cmd_status, 0);
6160 ddi_put8(acc_handle, &stp->connection_status, 0);
6161 ddi_put8(acc_handle, &stp->target_id, kstp->target_id);
6162 ddi_put8(acc_handle, &stp->sge_count, kstp->sge_count);
6163
6164 ddi_put16(acc_handle, &stp->timeout, kstp->timeout);
6165 ddi_put32(acc_handle, &stp->data_xfer_len, kstp->data_xfer_len);
6166
6167 ddi_rep_put8(acc_handle, (uint8_t *)kstp->fis, (uint8_t *)stp->fis, 10,
6168 DDI_DEV_AUTOINCR);
6169
6170 ddi_put16(acc_handle, &stp->flags, kstp->flags & ~MFI_FRAME_SGL64);
6171 ddi_put32(acc_handle, &stp->stp_flags, kstp->stp_flags);
6172 ddi_put32(acc_handle, &stp->sgl.sge32[0].length, fis_xferlen);
6173 ddi_put32(acc_handle, &stp->sgl.sge32[0].phys_addr,
6174 fis_dma_obj.dma_cookie[0].dmac_address);
6175 ddi_put32(acc_handle, &stp->sgl.sge32[1].length, data_xferlen);
6176 ddi_put32(acc_handle, &stp->sgl.sge32[1].phys_addr,
6177 data_dma_obj.dma_cookie[0].dmac_address);
6178
6179 cmd->sync_cmd = MRSAS_TRUE;
6180 cmd->frame_count = 1;
6181
6182 if (instance->tbolt) {
6183 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
6184 }
6185
6186 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
6187 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: fw_ioctl failed"));
6188 } else {
6189
6190 if (fis_xferlen) {
6191 for (i = 0; i < fis_xferlen; i++) {
6192 if (ddi_copyout(
6193 (uint8_t *)fis_dma_obj.buffer + i,
6194 (uint8_t *)fis_ubuf + i, 1, mode)) {
6195 con_log(CL_ANN, (CE_WARN,
6196 "issue_mfi_stp : copy to "
6197 "user space failed"));
6198 return (DDI_FAILURE);
6199 }
6200 }
6201 }
6202 }
6203 if (data_xferlen) {
6204 for (i = 0; i < data_xferlen; i++) {
6205 if (ddi_copyout(
6206 (uint8_t *)data_dma_obj.buffer + i,
6207 (uint8_t *)data_ubuf + i, 1, mode)) {
6208 con_log(CL_ANN, (CE_WARN,
6209 "issue_mfi_stp : copy to"
6210 " user space failed"));
6211 return (DDI_FAILURE);
6212 }
6213 }
6214 }
6215
6216 kstp->cmd_status = ddi_get8(acc_handle, &stp->cmd_status);
6217 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: stp->cmd_status = %d",
6218 kstp->cmd_status));
6219 DTRACE_PROBE2(issue_stp, uint8_t, kstp->cmd, uint8_t, kstp->cmd_status);
6220
6221 if (fis_xferlen) {
6222 /* free kernel buffer */
6223 if (mrsas_free_dma_obj(instance, fis_dma_obj) != DDI_SUCCESS)
6224 return (DDI_FAILURE);
6225 }
6226
6227 if (data_xferlen) {
6228 /* free kernel buffer */
6229 if (mrsas_free_dma_obj(instance, data_dma_obj) != DDI_SUCCESS)
6230 return (DDI_FAILURE);
6231 }
6232
6233 return (DDI_SUCCESS);
6234 }
6235
6236 /*
6237 * fill_up_drv_ver
6238 */
6239 void
6240 fill_up_drv_ver(struct mrsas_drv_ver *dv)
6241 {
6242 (void) memset(dv, 0, sizeof (struct mrsas_drv_ver));
6243
6244 (void) memcpy(dv->signature, "$LSI LOGIC$", strlen("$LSI LOGIC$"));
6245 (void) memcpy(dv->os_name, "Solaris", strlen("Solaris"));
6246 (void) memcpy(dv->drv_name, "mr_sas", strlen("mr_sas"));
6247 (void) memcpy(dv->drv_ver, MRSAS_VERSION, strlen(MRSAS_VERSION));
6248 (void) memcpy(dv->drv_rel_date, MRSAS_RELDATE,
6249 strlen(MRSAS_RELDATE));
6250
6251 }
6252
6253 /*
6254 * handle_drv_ioctl
6255 */
6256 static int
6257 handle_drv_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
6258 int mode)
6259 {
6260 int i;
6261 int rval = DDI_SUCCESS;
6262 int *props = NULL;
6263 void *ubuf;
6264
6265 uint8_t *pci_conf_buf;
6266 uint32_t xferlen;
6267 uint32_t num_props;
6268 uint_t model;
6269 struct mrsas_dcmd_frame *kdcmd;
6270 struct mrsas_drv_ver dv;
6271 struct mrsas_pci_information pi;
6272
6273 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0];
6274
6275 model = ddi_model_convert_from(mode & FMODELS);
6276 if (model == DDI_MODEL_ILP32) {
6277 con_log(CL_ANN1, (CE_CONT,
6278 "handle_drv_ioctl: DDI_MODEL_ILP32"));
6279
6280 xferlen = kdcmd->sgl.sge32[0].length;
6281
6282 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
6283 } else {
6284 #ifdef _ILP32
6285 con_log(CL_ANN1, (CE_CONT,
6286 "handle_drv_ioctl: DDI_MODEL_ILP32"));
6287 xferlen = kdcmd->sgl.sge32[0].length;
6288 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
6289 #else
6290 con_log(CL_ANN1, (CE_CONT,
6291 "handle_drv_ioctl: DDI_MODEL_LP64"));
6292 xferlen = kdcmd->sgl.sge64[0].length;
6293 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
6294 #endif
6295 }
6296 con_log(CL_ANN1, (CE_CONT, "handle_drv_ioctl: "
6297 "dataBuf=%p size=%d bytes", ubuf, xferlen));
6298
6299 switch (kdcmd->opcode) {
6300 case MRSAS_DRIVER_IOCTL_DRIVER_VERSION:
6301 con_log(CL_ANN1, (CE_CONT, "handle_drv_ioctl: "
6302 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION"));
6303
6304 fill_up_drv_ver(&dv);
6305
6306 if (ddi_copyout(&dv, ubuf, xferlen, mode)) {
6307 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6308 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION : "
6309 "copy to user space failed"));
6310 kdcmd->cmd_status = 1;
6311 rval = 1;
6312 } else {
6313 kdcmd->cmd_status = 0;
6314 }
6315 break;
6316 case MRSAS_DRIVER_IOCTL_PCI_INFORMATION:
6317 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: "
6318 "MRSAS_DRIVER_IOCTL_PCI_INFORMAITON"));
6319
6320 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, instance->dip,
6321 0, "reg", &props, &num_props)) {
6322 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6323 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : "
6324 "ddi_prop_look_int_array failed"));
6325 rval = DDI_FAILURE;
6326 } else {
6327
6328 pi.busNumber = (props[0] >> 16) & 0xFF;
6329 pi.deviceNumber = (props[0] >> 11) & 0x1f;
6330 pi.functionNumber = (props[0] >> 8) & 0x7;
6331 ddi_prop_free((void *)props);
6332 }
6333
6334 pci_conf_buf = (uint8_t *)&pi.pciHeaderInfo;
6335
6336 for (i = 0; i < (sizeof (struct mrsas_pci_information) -
6337 offsetof(struct mrsas_pci_information, pciHeaderInfo));
6338 i++) {
6339 pci_conf_buf[i] =
6340 pci_config_get8(instance->pci_handle, i);
6341 }
6342
6343 if (ddi_copyout(&pi, ubuf, xferlen, mode)) {
6344 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6345 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : "
6346 "copy to user space failed"));
6347 kdcmd->cmd_status = 1;
6348 rval = 1;
6349 } else {
6350 kdcmd->cmd_status = 0;
6351 }
6352 break;
6353 default:
6354 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6355 "invalid driver specific IOCTL opcode = 0x%x",
6356 kdcmd->opcode));
6357 kdcmd->cmd_status = 1;
6358 rval = DDI_FAILURE;
6359 break;
6360 }
6361
6362 return (rval);
6363 }
6364
6365 /*
6366 * handle_mfi_ioctl
6367 */
6368 static int
6369 handle_mfi_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
6370 int mode)
6371 {
6372 int rval = DDI_SUCCESS;
6373
6374 struct mrsas_header *hdr;
6375 struct mrsas_cmd *cmd;
6376
6377 if (instance->tbolt) {
6378 cmd = get_raid_msg_mfi_pkt(instance);
6379 } else {
6380 cmd = get_mfi_pkt(instance);
6381 }
6382 if (!cmd) {
6383 con_log(CL_ANN, (CE_WARN, "mr_sas: "
6384 "failed to get a cmd packet"));
6385 DTRACE_PROBE2(mfi_ioctl_err, uint16_t,
6386 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
6387 return (DDI_FAILURE);
6388 }
6389
6390 /* Clear the frame buffer and assign back the context id */
6391 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
6392 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
6393 cmd->index);
6394
6395 hdr = (struct mrsas_header *)&ioctl->frame[0];
6396
6397 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) {
6398 case MFI_CMD_OP_DCMD:
6399 rval = issue_mfi_dcmd(instance, ioctl, cmd, mode);
6400 break;
6401 case MFI_CMD_OP_SMP:
6402 rval = issue_mfi_smp(instance, ioctl, cmd, mode);
6403 break;
6404 case MFI_CMD_OP_STP:
6405 rval = issue_mfi_stp(instance, ioctl, cmd, mode);
6406 break;
6407 case MFI_CMD_OP_LD_SCSI:
6408 case MFI_CMD_OP_PD_SCSI:
6409 rval = issue_mfi_pthru(instance, ioctl, cmd, mode);
6410 break;
6411 default:
6412 con_log(CL_ANN, (CE_WARN, "handle_mfi_ioctl: "
6413 "invalid mfi ioctl hdr->cmd = %d", hdr->cmd));
6414 rval = DDI_FAILURE;
6415 break;
6416 }
6417
6418 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS)
6419 rval = DDI_FAILURE;
6420
6421 if (instance->tbolt) {
6422 return_raid_msg_mfi_pkt(instance, cmd);
6423 } else {
6424 return_mfi_pkt(instance, cmd);
6425 }
6426
6427 return (rval);
6428 }
6429
6430 /*
6431 * AEN
6432 */
6433 static int
6434 handle_mfi_aen(struct mrsas_instance *instance, struct mrsas_aen *aen)
6435 {
6436 int rval = 0;
6437
6438 rval = register_mfi_aen(instance, instance->aen_seq_num,
6439 aen->class_locale_word);
6440
6441 aen->cmd_status = (uint8_t)rval;
6442
6443 return (rval);
6444 }
6445
6446 static int
6447 register_mfi_aen(struct mrsas_instance *instance, uint32_t seq_num,
6448 uint32_t class_locale_word)
6449 {
6450 int ret_val;
6451
6452 struct mrsas_cmd *cmd, *aen_cmd;
6453 struct mrsas_dcmd_frame *dcmd;
6454 union mrsas_evt_class_locale curr_aen;
6455 union mrsas_evt_class_locale prev_aen;
6456
6457 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
6458 /*
6459 * If there an AEN pending already (aen_cmd), check if the
6460 * class_locale of that pending AEN is inclusive of the new
6461 * AEN request we currently have. If it is, then we don't have
6462 * to do anything. In other words, whichever events the current
6463 * AEN request is subscribing to, have already been subscribed
6464 * to.
6465 *
6466 * If the old_cmd is _not_ inclusive, then we have to abort
6467 * that command, form a class_locale that is superset of both
6468 * old and current and re-issue to the FW
6469 */
6470
6471 curr_aen.word = LE_32(class_locale_word);
6472 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6473 aen_cmd = instance->aen_cmd;
6474 if (aen_cmd) {
6475 prev_aen.word = ddi_get32(aen_cmd->frame_dma_obj.acc_handle,
6476 &aen_cmd->frame->dcmd.mbox.w[1]);
6477 prev_aen.word = LE_32(prev_aen.word);
6478 prev_aen.members.locale = LE_16(prev_aen.members.locale);
6479 /*
6480 * A class whose enum value is smaller is inclusive of all
6481 * higher values. If a PROGRESS (= -1) was previously
6482 * registered, then a new registration requests for higher
6483 * classes need not be sent to FW. They are automatically
6484 * included.
6485 *
6486 * Locale numbers don't have such hierarchy. They are bitmap
6487 * values
6488 */
6489 if ((prev_aen.members.class <= curr_aen.members.class) &&
6490 !((prev_aen.members.locale & curr_aen.members.locale) ^
6491 curr_aen.members.locale)) {
6492 /*
6493 * Previously issued event registration includes
6494 * current request. Nothing to do.
6495 */
6496
6497 return (0);
6498 } else {
6499 curr_aen.members.locale |= prev_aen.members.locale;
6500
6501 if (prev_aen.members.class < curr_aen.members.class)
6502 curr_aen.members.class = prev_aen.members.class;
6503
6504 ret_val = abort_aen_cmd(instance, aen_cmd);
6505
6506 if (ret_val) {
6507 con_log(CL_ANN, (CE_WARN, "register_mfi_aen: "
6508 "failed to abort prevous AEN command"));
6509
6510 return (ret_val);
6511 }
6512 }
6513 } else {
6514 curr_aen.word = LE_32(class_locale_word);
6515 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6516 }
6517
6518 if (instance->tbolt) {
6519 cmd = get_raid_msg_mfi_pkt(instance);
6520 } else {
6521 cmd = get_mfi_pkt(instance);
6522 }
6523
6524 if (!cmd) {
6525 DTRACE_PROBE2(mfi_aen_err, uint16_t, instance->fw_outstanding,
6526 uint16_t, instance->max_fw_cmds);
6527 return (ENOMEM);
6528 }
6529
6530 /* Clear the frame buffer and assign back the context id */
6531 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
6532 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
6533 cmd->index);
6534
6535 dcmd = &cmd->frame->dcmd;
6536
6537 /* for(i = 0; i < DCMD_MBOX_SZ; i++) dcmd->mbox.b[i] = 0; */
6538 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
6539
6540 (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
6541 sizeof (struct mrsas_evt_detail));
6542
6543 /* Prepare DCMD for aen registration */
6544 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
6545 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0);
6546 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
6547 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
6548 MFI_FRAME_DIR_READ);
6549 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
6550 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
6551 sizeof (struct mrsas_evt_detail));
6552 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
6553 MR_DCMD_CTRL_EVENT_WAIT);
6554 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], seq_num);
6555 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6556 curr_aen.word = LE_32(curr_aen.word);
6557 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[1],
6558 curr_aen.word);
6559 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
6560 instance->mfi_evt_detail_obj.dma_cookie[0].dmac_address);
6561 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
6562 sizeof (struct mrsas_evt_detail));
6563
6564 instance->aen_seq_num = seq_num;
6565
6566
6567 /*
6568 * Store reference to the cmd used to register for AEN. When an
6569 * application wants us to register for AEN, we have to abort this
6570 * cmd and re-register with a new EVENT LOCALE supplied by that app
6571 */
6572 instance->aen_cmd = cmd;
6573
6574 cmd->frame_count = 1;
6575
6576 /* Issue the aen registration frame */
6577 /* atomic_add_16 (&instance->fw_outstanding, 1); */
6578 if (instance->tbolt) {
6579 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
6580 }
6581 instance->func_ptr->issue_cmd(cmd, instance);
6582
6583 return (0);
6584 }
6585
6586 void
6587 display_scsi_inquiry(caddr_t scsi_inq)
6588 {
6589 #define MAX_SCSI_DEVICE_CODE 14
6590 int i;
6591 char inquiry_buf[256] = {0};
6592 int len;
6593 const char *const scsi_device_types[] = {
6594 "Direct-Access ",
6595 "Sequential-Access",
6596 "Printer ",
6597 "Processor ",
6598 "WORM ",
6599 "CD-ROM ",
6600 "Scanner ",
6601 "Optical Device ",
6602 "Medium Changer ",
6603 "Communications ",
6604 "Unknown ",
6605 "Unknown ",
6606 "Unknown ",
6607 "Enclosure ",
6608 };
6609
6610 len = 0;
6611
6612 len += snprintf(inquiry_buf + len, 265 - len, " Vendor: ");
6613 for (i = 8; i < 16; i++) {
6614 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6615 scsi_inq[i]);
6616 }
6617
6618 len += snprintf(inquiry_buf + len, 265 - len, " Model: ");
6619
6620 for (i = 16; i < 32; i++) {
6621 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6622 scsi_inq[i]);
6623 }
6624
6625 len += snprintf(inquiry_buf + len, 265 - len, " Rev: ");
6626
6627 for (i = 32; i < 36; i++) {
6628 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6629 scsi_inq[i]);
6630 }
6631
6632 len += snprintf(inquiry_buf + len, 265 - len, "\n");
6633
6634
6635 i = scsi_inq[0] & 0x1f;
6636
6637
6638 len += snprintf(inquiry_buf + len, 265 - len, " Type: %s ",
6639 i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] :
6640 "Unknown ");
6641
6642
6643 len += snprintf(inquiry_buf + len, 265 - len,
6644 " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07);
6645
6646 if ((scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1) {
6647 len += snprintf(inquiry_buf + len, 265 - len, " CCS\n");
6648 } else {
6649 len += snprintf(inquiry_buf + len, 265 - len, "\n");
6650 }
6651
6652 con_log(CL_DLEVEL2, (CE_CONT, inquiry_buf));
6653 }
6654
6655 static void
6656 io_timeout_checker(void *arg)
6657 {
6658 struct scsi_pkt *pkt;
6659 struct mrsas_instance *instance = arg;
6660 struct mrsas_cmd *cmd = NULL;
6661 struct mrsas_header *hdr;
6662 int time = 0;
6663 int counter = 0;
6664 struct mlist_head *pos, *next;
6665 mlist_t process_list;
6666
6667 if (instance->adapterresetinprogress == 1) {
6668 con_log(CL_ANN, (CE_NOTE, "io_timeout_checker:"
6669 " reset in progress"));
6670
6671 instance->timeout_id = timeout(io_timeout_checker,
6672 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
6673 return;
6674 }
6675
6676 /* See if this check needs to be in the beginning or last in ISR */
6677 if (mrsas_initiate_ocr_if_fw_is_faulty(instance) == 1) {
6678 cmn_err(CE_WARN, "io_timeout_checker: "
6679 "FW Fault, calling reset adapter");
6680 cmn_err(CE_CONT, "io_timeout_checker: "
6681 "fw_outstanding 0x%X max_fw_cmds 0x%X",
6682 instance->fw_outstanding, instance->max_fw_cmds);
6683 if (instance->adapterresetinprogress == 0) {
6684 instance->adapterresetinprogress = 1;
6685 if (instance->tbolt)
6686 (void) mrsas_tbolt_reset_ppc(instance);
6687 else
6688 (void) mrsas_reset_ppc(instance);
6689 instance->adapterresetinprogress = 0;
6690 }
6691 instance->timeout_id = timeout(io_timeout_checker,
6692 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
6693 return;
6694 }
6695
6696 INIT_LIST_HEAD(&process_list);
6697
6698 mutex_enter(&instance->cmd_pend_mtx);
6699 mlist_for_each_safe(pos, next, &instance->cmd_pend_list) {
6700 cmd = mlist_entry(pos, struct mrsas_cmd, list);
6701
6702 if (cmd == NULL) {
6703 continue;
6704 }
6705
6706 if (cmd->sync_cmd == MRSAS_TRUE) {
6707 hdr = (struct mrsas_header *)&cmd->frame->hdr;
6708 if (hdr == NULL) {
6709 continue;
6710 }
6711 time = --cmd->drv_pkt_time;
6712 } else {
6713 pkt = cmd->pkt;
6714 if (pkt == NULL) {
6715 continue;
6716 }
6717 time = --cmd->drv_pkt_time;
6718 }
6719 if (time <= 0) {
6720 cmn_err(CE_WARN, "%llx: "
6721 "io_timeout_checker: TIMING OUT: pkt: %p, "
6722 "cmd %p fw_outstanding 0x%X max_fw_cmds 0x%X\n",
6723 gethrtime(), (void *)pkt, (void *)cmd,
6724 instance->fw_outstanding, instance->max_fw_cmds);
6725
6726 counter++;
6727 break;
6728 }
6729 }
6730 mutex_exit(&instance->cmd_pend_mtx);
6731
6732 if (counter) {
6733 if (instance->disable_online_ctrl_reset == 1) {
6734 cmn_err(CE_WARN, "mr_sas %d: %s(): OCR is NOT "
6735 "supported by Firmware, KILL adapter!!!",
6736 instance->instance, __func__);
6737
6738 if (instance->tbolt)
6739 mrsas_tbolt_kill_adapter(instance);
6740 else
6741 (void) mrsas_kill_adapter(instance);
6742
6743 return;
6744 } else {
6745 if (cmd->retry_count_for_ocr <= IO_RETRY_COUNT) {
6746 if (instance->adapterresetinprogress == 0) {
6747 if (instance->tbolt) {
6748 (void) mrsas_tbolt_reset_ppc(
6749 instance);
6750 } else {
6751 (void) mrsas_reset_ppc(
6752 instance);
6753 }
6754 }
6755 } else {
6756 cmn_err(CE_WARN,
6757 "io_timeout_checker: "
6758 "cmd %p cmd->index %d "
6759 "timed out even after 3 resets: "
6760 "so KILL adapter", (void *)cmd, cmd->index);
6761
6762 mrsas_print_cmd_details(instance, cmd, 0xDD);
6763
6764 if (instance->tbolt)
6765 mrsas_tbolt_kill_adapter(instance);
6766 else
6767 (void) mrsas_kill_adapter(instance);
6768 return;
6769 }
6770 }
6771 }
6772 con_log(CL_ANN, (CE_NOTE, "mrsas: "
6773 "schedule next timeout check: "
6774 "do timeout \n"));
6775 instance->timeout_id =
6776 timeout(io_timeout_checker, (void *)instance,
6777 drv_usectohz(MRSAS_1_SECOND));
6778 }
6779
6780 static uint32_t
6781 read_fw_status_reg_ppc(struct mrsas_instance *instance)
6782 {
6783 return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
6784 }
6785
6786 static void
6787 issue_cmd_ppc(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
6788 {
6789 struct scsi_pkt *pkt;
6790 atomic_add_16(&instance->fw_outstanding, 1);
6791
6792 pkt = cmd->pkt;
6793 if (pkt) {
6794 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:"
6795 "ISSUED CMD TO FW : called : cmd:"
6796 ": %p instance : %p pkt : %p pkt_time : %x\n",
6797 gethrtime(), (void *)cmd, (void *)instance,
6798 (void *)pkt, cmd->drv_pkt_time));
6799 if (instance->adapterresetinprogress) {
6800 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
6801 con_log(CL_ANN1, (CE_NOTE, "Reset the scsi_pkt timer"));
6802 } else {
6803 push_pending_mfi_pkt(instance, cmd);
6804 }
6805
6806 } else {
6807 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:"
6808 "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
6809 "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
6810 }
6811
6812 mutex_enter(&instance->reg_write_mtx);
6813 /* Issue the command to the FW */
6814 WR_IB_QPORT((cmd->frame_phys_addr) |
6815 (((cmd->frame_count - 1) << 1) | 1), instance);
6816 mutex_exit(&instance->reg_write_mtx);
6817
6818 }
6819
6820 /*
6821 * issue_cmd_in_sync_mode
6822 */
6823 static int
6824 issue_cmd_in_sync_mode_ppc(struct mrsas_instance *instance,
6825 struct mrsas_cmd *cmd)
6826 {
6827 int i;
6828 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * (10 * MILLISEC);
6829 struct mrsas_header *hdr = &cmd->frame->hdr;
6830
6831 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: called"));
6832
6833 if (instance->adapterresetinprogress) {
6834 cmd->drv_pkt_time = ddi_get16(
6835 cmd->frame_dma_obj.acc_handle, &hdr->timeout);
6836 if (cmd->drv_pkt_time < debug_timeout_g)
6837 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
6838
6839 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: "
6840 "issue and return in reset case\n"));
6841 WR_IB_QPORT((cmd->frame_phys_addr) |
6842 (((cmd->frame_count - 1) << 1) | 1), instance);
6843
6844 return (DDI_SUCCESS);
6845 } else {
6846 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: pushing the pkt\n"));
6847 push_pending_mfi_pkt(instance, cmd);
6848 }
6849
6850 cmd->cmd_status = ENODATA;
6851
6852 mutex_enter(&instance->reg_write_mtx);
6853 /* Issue the command to the FW */
6854 WR_IB_QPORT((cmd->frame_phys_addr) |
6855 (((cmd->frame_count - 1) << 1) | 1), instance);
6856 mutex_exit(&instance->reg_write_mtx);
6857
6858 mutex_enter(&instance->int_cmd_mtx);
6859 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
6860 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
6861 }
6862 mutex_exit(&instance->int_cmd_mtx);
6863
6864 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done"));
6865
6866 if (i < (msecs -1)) {
6867 return (DDI_SUCCESS);
6868 } else {
6869 return (DDI_FAILURE);
6870 }
6871 }
6872
6873 /*
6874 * issue_cmd_in_poll_mode
6875 */
6876 static int
6877 issue_cmd_in_poll_mode_ppc(struct mrsas_instance *instance,
6878 struct mrsas_cmd *cmd)
6879 {
6880 int i;
6881 uint16_t flags;
6882 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
6883 struct mrsas_header *frame_hdr;
6884
6885 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode_ppc: called"));
6886
6887 frame_hdr = (struct mrsas_header *)cmd->frame;
6888 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
6889 MFI_CMD_STATUS_POLL_MODE);
6890 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
6891 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
6892
6893 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
6894
6895 /* issue the frame using inbound queue port */
6896 WR_IB_QPORT((cmd->frame_phys_addr) |
6897 (((cmd->frame_count - 1) << 1) | 1), instance);
6898
6899 /* wait for cmd_status to change from 0xFF */
6900 for (i = 0; i < msecs && (
6901 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
6902 == MFI_CMD_STATUS_POLL_MODE); i++) {
6903 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
6904 }
6905
6906 if (ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
6907 == MFI_CMD_STATUS_POLL_MODE) {
6908 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: "
6909 "cmd polling timed out"));
6910 return (DDI_FAILURE);
6911 }
6912
6913 return (DDI_SUCCESS);
6914 }
6915
6916 static void
6917 enable_intr_ppc(struct mrsas_instance *instance)
6918 {
6919 uint32_t mask;
6920
6921 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: called"));
6922
6923 /* WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); */
6924 WR_OB_DOORBELL_CLEAR(OB_DOORBELL_CLEAR_MASK, instance);
6925
6926 /* WR_OB_INTR_MASK(~0x80000000, instance); */
6927 WR_OB_INTR_MASK(~(MFI_REPLY_2108_MESSAGE_INTR_MASK), instance);
6928
6929 /* dummy read to force PCI flush */
6930 mask = RD_OB_INTR_MASK(instance);
6931
6932 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: "
6933 "outbound_intr_mask = 0x%x", mask));
6934 }
6935
6936 static void
6937 disable_intr_ppc(struct mrsas_instance *instance)
6938 {
6939 uint32_t mask;
6940
6941 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: called"));
6942
6943 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: before : "
6944 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance)));
6945
6946 /* WR_OB_INTR_MASK(0xFFFFFFFF, instance); */
6947 WR_OB_INTR_MASK(OB_INTR_MASK, instance);
6948
6949 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: after : "
6950 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance)));
6951
6952 /* dummy read to force PCI flush */
6953 mask = RD_OB_INTR_MASK(instance);
6954 #ifdef lint
6955 mask = mask;
6956 #endif
6957 }
6958
6959 static int
6960 intr_ack_ppc(struct mrsas_instance *instance)
6961 {
6962 uint32_t status;
6963 int ret = DDI_INTR_CLAIMED;
6964
6965 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: called"));
6966
6967 /* check if it is our interrupt */
6968 status = RD_OB_INTR_STATUS(instance);
6969
6970 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: status = 0x%x", status));
6971
6972 if (!(status & MFI_REPLY_2108_MESSAGE_INTR)) {
6973 ret = DDI_INTR_UNCLAIMED;
6974 }
6975
6976 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
6977 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
6978 ret = DDI_INTR_UNCLAIMED;
6979 }
6980
6981 if (ret == DDI_INTR_UNCLAIMED) {
6982 return (ret);
6983 }
6984 /* clear the interrupt by writing back the same value */
6985 WR_OB_DOORBELL_CLEAR(status, instance);
6986
6987 /* dummy READ */
6988 status = RD_OB_INTR_STATUS(instance);
6989
6990 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: interrupt cleared"));
6991
6992 return (ret);
6993 }
6994
6995 /*
6996 * Marks HBA as bad. This will be called either when an
6997 * IO packet times out even after 3 FW resets
6998 * or FW is found to be fault even after 3 continuous resets.
6999 */
7000
7001 static int
7002 mrsas_kill_adapter(struct mrsas_instance *instance)
7003 {
7004 if (instance->deadadapter == 1)
7005 return (DDI_FAILURE);
7006
7007 con_log(CL_ANN1, (CE_NOTE, "mrsas_kill_adapter: "
7008 "Writing to doorbell with MFI_STOP_ADP "));
7009 mutex_enter(&instance->ocr_flags_mtx);
7010 instance->deadadapter = 1;
7011 mutex_exit(&instance->ocr_flags_mtx);
7012 instance->func_ptr->disable_intr(instance);
7013 WR_IB_DOORBELL(MFI_STOP_ADP, instance);
7014 (void) mrsas_complete_pending_cmds(instance);
7015 return (DDI_SUCCESS);
7016 }
7017
7018
7019 static int
7020 mrsas_reset_ppc(struct mrsas_instance *instance)
7021 {
7022 uint32_t status;
7023 uint32_t retry = 0;
7024 uint32_t cur_abs_reg_val;
7025 uint32_t fw_state;
7026
7027 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
7028
7029 if (instance->deadadapter == 1) {
7030 cmn_err(CE_WARN, "mrsas_reset_ppc: "
7031 "no more resets as HBA has been marked dead ");
7032 return (DDI_FAILURE);
7033 }
7034 mutex_enter(&instance->ocr_flags_mtx);
7035 instance->adapterresetinprogress = 1;
7036 mutex_exit(&instance->ocr_flags_mtx);
7037 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: adpterresetinprogress "
7038 "flag set, time %llx", gethrtime()));
7039
7040 instance->func_ptr->disable_intr(instance);
7041 retry_reset:
7042 WR_IB_WRITE_SEQ(0, instance);
7043 WR_IB_WRITE_SEQ(4, instance);
7044 WR_IB_WRITE_SEQ(0xb, instance);
7045 WR_IB_WRITE_SEQ(2, instance);
7046 WR_IB_WRITE_SEQ(7, instance);
7047 WR_IB_WRITE_SEQ(0xd, instance);
7048 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: magic number written "
7049 "to write sequence register\n"));
7050 delay(100 * drv_usectohz(MILLISEC));
7051 status = RD_OB_DRWE(instance);
7052
7053 while (!(status & DIAG_WRITE_ENABLE)) {
7054 delay(100 * drv_usectohz(MILLISEC));
7055 status = RD_OB_DRWE(instance);
7056 if (retry++ == 100) {
7057 cmn_err(CE_WARN, "mrsas_reset_ppc: DRWE bit "
7058 "check retry count %d\n", retry);
7059 return (DDI_FAILURE);
7060 }
7061 }
7062 WR_IB_DRWE(status | DIAG_RESET_ADAPTER, instance);
7063 delay(100 * drv_usectohz(MILLISEC));
7064 status = RD_OB_DRWE(instance);
7065 while (status & DIAG_RESET_ADAPTER) {
7066 delay(100 * drv_usectohz(MILLISEC));
7067 status = RD_OB_DRWE(instance);
7068 if (retry++ == 100) {
7069 cmn_err(CE_WARN, "mrsas_reset_ppc: "
7070 "RESET FAILED. KILL adapter called\n.");
7071
7072 (void) mrsas_kill_adapter(instance);
7073 return (DDI_FAILURE);
7074 }
7075 }
7076 con_log(CL_ANN, (CE_NOTE, "mrsas_reset_ppc: Adapter reset complete"));
7077 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7078 "Calling mfi_state_transition_to_ready"));
7079
7080 /* Mark HBA as bad, if FW is fault after 3 continuous resets */
7081 if (mfi_state_transition_to_ready(instance) ||
7082 debug_fw_faults_after_ocr_g == 1) {
7083 cur_abs_reg_val =
7084 instance->func_ptr->read_fw_status_reg(instance);
7085 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
7086
7087 #ifdef OCRDEBUG
7088 con_log(CL_ANN1, (CE_NOTE,
7089 "mrsas_reset_ppc :before fake: FW is not ready "
7090 "FW state = 0x%x", fw_state));
7091 if (debug_fw_faults_after_ocr_g == 1)
7092 fw_state = MFI_STATE_FAULT;
7093 #endif
7094
7095 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc : FW is not ready "
7096 "FW state = 0x%x", fw_state));
7097
7098 if (fw_state == MFI_STATE_FAULT) {
7099 /* increment the count */
7100 instance->fw_fault_count_after_ocr++;
7101 if (instance->fw_fault_count_after_ocr
7102 < MAX_FW_RESET_COUNT) {
7103 cmn_err(CE_WARN, "mrsas_reset_ppc: "
7104 "FW is in fault after OCR count %d "
7105 "Retry Reset",
7106 instance->fw_fault_count_after_ocr);
7107 goto retry_reset;
7108
7109 } else {
7110 cmn_err(CE_WARN, "mrsas_reset_ppc: "
7111 "Max Reset Count exceeded >%d"
7112 "Mark HBA as bad, KILL adapter",
7113 MAX_FW_RESET_COUNT);
7114
7115 (void) mrsas_kill_adapter(instance);
7116 return (DDI_FAILURE);
7117 }
7118 }
7119 }
7120 /* reset the counter as FW is up after OCR */
7121 instance->fw_fault_count_after_ocr = 0;
7122
7123
7124 ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
7125 instance->producer, 0);
7126
7127 ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
7128 instance->consumer, 0);
7129
7130 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7131 " after resetting produconsumer chck indexs:"
7132 "producer %x consumer %x", *instance->producer,
7133 *instance->consumer));
7134
7135 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7136 "Calling mrsas_issue_init_mfi"));
7137 (void) mrsas_issue_init_mfi(instance);
7138 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7139 "mrsas_issue_init_mfi Done"));
7140
7141 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7142 "Calling mrsas_print_pending_cmd\n"));
7143 (void) mrsas_print_pending_cmds(instance);
7144 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7145 "mrsas_print_pending_cmd done\n"));
7146
7147 instance->func_ptr->enable_intr(instance);
7148 instance->fw_outstanding = 0;
7149
7150 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7151 "Calling mrsas_issue_pending_cmds"));
7152 (void) mrsas_issue_pending_cmds(instance);
7153 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7154 "issue_pending_cmds done.\n"));
7155
7156 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7157 "Calling aen registration"));
7158
7159
7160 instance->aen_cmd->retry_count_for_ocr = 0;
7161 instance->aen_cmd->drv_pkt_time = 0;
7162
7163 instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
7164 con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag.\n"));
7165
7166 mutex_enter(&instance->ocr_flags_mtx);
7167 instance->adapterresetinprogress = 0;
7168 mutex_exit(&instance->ocr_flags_mtx);
7169 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7170 "adpterresetinprogress flag unset"));
7171
7172 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc done\n"));
7173 return (DDI_SUCCESS);
7174 }
7175
7176 /*
7177 * FMA functions.
7178 */
7179 int
7180 mrsas_common_check(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
7181 {
7182 int ret = DDI_SUCCESS;
7183
7184 if (cmd != NULL &&
7185 mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) !=
7186 DDI_SUCCESS) {
7187 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7188 if (cmd->pkt != NULL) {
7189 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7190 cmd->pkt->pkt_statistics = 0;
7191 }
7192 ret = DDI_FAILURE;
7193 }
7194 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
7195 != DDI_SUCCESS) {
7196 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7197 if (cmd != NULL && cmd->pkt != NULL) {
7198 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7199 cmd->pkt->pkt_statistics = 0;
7200 }
7201 ret = DDI_FAILURE;
7202 }
7203 if (mrsas_check_dma_handle(instance->mfi_evt_detail_obj.dma_handle) !=
7204 DDI_SUCCESS) {
7205 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7206 if (cmd != NULL && cmd->pkt != NULL) {
7207 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7208 cmd->pkt->pkt_statistics = 0;
7209 }
7210 ret = DDI_FAILURE;
7211 }
7212 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
7213 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7214
7215 ddi_fm_acc_err_clear(instance->regmap_handle, DDI_FME_VER0);
7216
7217 if (cmd != NULL && cmd->pkt != NULL) {
7218 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7219 cmd->pkt->pkt_statistics = 0;
7220 }
7221 ret = DDI_FAILURE;
7222 }
7223
7224 return (ret);
7225 }
7226
7227 /*ARGSUSED*/
7228 static int
7229 mrsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
7230 {
7231 /*
7232 * as the driver can always deal with an error in any dma or
7233 * access handle, we can just return the fme_status value.
7234 */
7235 pci_ereport_post(dip, err, NULL);
7236 return (err->fme_status);
7237 }
7238
7239 static void
7240 mrsas_fm_init(struct mrsas_instance *instance)
7241 {
7242 /* Need to change iblock to priority for new MSI intr */
7243 ddi_iblock_cookie_t fm_ibc;
7244
7245 /* Only register with IO Fault Services if we have some capability */
7246 if (instance->fm_capabilities) {
7247 /* Adjust access and dma attributes for FMA */
7248 endian_attr.devacc_attr_access = DDI_FLAGERR_ACC;
7249 mrsas_generic_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
7250
7251 /*
7252 * Register capabilities with IO Fault Services.
7253 * fm_capabilities will be updated to indicate
7254 * capabilities actually supported (not requested.)
7255 */
7256
7257 ddi_fm_init(instance->dip, &instance->fm_capabilities, &fm_ibc);
7258
7259 /*
7260 * Initialize pci ereport capabilities if ereport
7261 * capable (should always be.)
7262 */
7263
7264 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) ||
7265 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7266 pci_ereport_setup(instance->dip);
7267 }
7268
7269 /*
7270 * Register error callback if error callback capable.
7271 */
7272 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7273 ddi_fm_handler_register(instance->dip,
7274 mrsas_fm_error_cb, (void*) instance);
7275 }
7276 } else {
7277 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
7278 mrsas_generic_dma_attr.dma_attr_flags = 0;
7279 }
7280 }
7281
7282 static void
7283 mrsas_fm_fini(struct mrsas_instance *instance)
7284 {
7285 /* Only unregister FMA capabilities if registered */
7286 if (instance->fm_capabilities) {
7287 /*
7288 * Un-register error callback if error callback capable.
7289 */
7290 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7291 ddi_fm_handler_unregister(instance->dip);
7292 }
7293
7294 /*
7295 * Release any resources allocated by pci_ereport_setup()
7296 */
7297 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) ||
7298 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7299 pci_ereport_teardown(instance->dip);
7300 }
7301
7302 /* Unregister from IO Fault Services */
7303 ddi_fm_fini(instance->dip);
7304
7305 /* Adjust access and dma attributes for FMA */
7306 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
7307 mrsas_generic_dma_attr.dma_attr_flags = 0;
7308 }
7309 }
7310
7311 int
7312 mrsas_check_acc_handle(ddi_acc_handle_t handle)
7313 {
7314 ddi_fm_error_t de;
7315
7316 if (handle == NULL) {
7317 return (DDI_FAILURE);
7318 }
7319
7320 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
7321
7322 return (de.fme_status);
7323 }
7324
7325 int
7326 mrsas_check_dma_handle(ddi_dma_handle_t handle)
7327 {
7328 ddi_fm_error_t de;
7329
7330 if (handle == NULL) {
7331 return (DDI_FAILURE);
7332 }
7333
7334 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
7335
7336 return (de.fme_status);
7337 }
7338
7339 void
7340 mrsas_fm_ereport(struct mrsas_instance *instance, char *detail)
7341 {
7342 uint64_t ena;
7343 char buf[FM_MAX_CLASS];
7344
7345 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
7346 ena = fm_ena_generate(0, FM_ENA_FMT1);
7347 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities)) {
7348 ddi_fm_ereport_post(instance->dip, buf, ena, DDI_NOSLEEP,
7349 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL);
7350 }
7351 }
7352
7353 static int
7354 mrsas_add_intrs(struct mrsas_instance *instance, int intr_type)
7355 {
7356
7357 dev_info_t *dip = instance->dip;
7358 int avail, actual, count;
7359 int i, flag, ret;
7360
7361 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_type = %x",
7362 intr_type));
7363
7364 /* Get number of interrupts */
7365 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
7366 if ((ret != DDI_SUCCESS) || (count == 0)) {
7367 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_nintrs() failed:"
7368 "ret %d count %d", ret, count));
7369
7370 return (DDI_FAILURE);
7371 }
7372
7373 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: count = %d ", count));
7374
7375 /* Get number of available interrupts */
7376 ret = ddi_intr_get_navail(dip, intr_type, &avail);
7377 if ((ret != DDI_SUCCESS) || (avail == 0)) {
7378 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_navail() failed:"
7379 "ret %d avail %d", ret, avail));
7380
7381 return (DDI_FAILURE);
7382 }
7383 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: avail = %d ", avail));
7384
7385 /* Only one interrupt routine. So limit the count to 1 */
7386 if (count > 1) {
7387 count = 1;
7388 }
7389
7390 /*
7391 * Allocate an array of interrupt handlers. Currently we support
7392 * only one interrupt. The framework can be extended later.
7393 */
7394 instance->intr_htable_size = count * sizeof (ddi_intr_handle_t);
7395 instance->intr_htable = kmem_zalloc(instance->intr_htable_size,
7396 KM_SLEEP);
7397 if (instance->intr_htable == NULL) {
7398 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7399 "failed to allocate memory for intr-handle table"));
7400 instance->intr_htable_size = 0;
7401 return (DDI_FAILURE);
7402 }
7403
7404 flag = ((intr_type == DDI_INTR_TYPE_MSI) ||
7405 (intr_type == DDI_INTR_TYPE_MSIX)) ?
7406 DDI_INTR_ALLOC_STRICT : DDI_INTR_ALLOC_NORMAL;
7407
7408 /* Allocate interrupt */
7409 ret = ddi_intr_alloc(dip, instance->intr_htable, intr_type, 0,
7410 count, &actual, flag);
7411
7412 if ((ret != DDI_SUCCESS) || (actual == 0)) {
7413 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7414 "avail = %d", avail));
7415 goto mrsas_free_htable;
7416 }
7417
7418 if (actual < count) {
7419 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7420 "Requested = %d Received = %d", count, actual));
7421 }
7422 instance->intr_cnt = actual;
7423
7424 /*
7425 * Get the priority of the interrupt allocated.
7426 */
7427 if ((ret = ddi_intr_get_pri(instance->intr_htable[0],
7428 &instance->intr_pri)) != DDI_SUCCESS) {
7429 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7430 "get priority call failed"));
7431 goto mrsas_free_handles;
7432 }
7433
7434 /*
7435 * Test for high level mutex. we don't support them.
7436 */
7437 if (instance->intr_pri >= ddi_intr_get_hilevel_pri()) {
7438 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7439 "High level interrupts not supported."));
7440 goto mrsas_free_handles;
7441 }
7442
7443 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_pri = 0x%x ",
7444 instance->intr_pri));
7445
7446 /* Call ddi_intr_add_handler() */
7447 for (i = 0; i < actual; i++) {
7448 ret = ddi_intr_add_handler(instance->intr_htable[i],
7449 (ddi_intr_handler_t *)mrsas_isr, (caddr_t)instance,
7450 (caddr_t)(uintptr_t)i);
7451
7452 if (ret != DDI_SUCCESS) {
7453 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs:"
7454 "failed %d", ret));
7455 goto mrsas_free_handles;
7456 }
7457
7458 }
7459
7460 con_log(CL_DLEVEL1, (CE_NOTE, " ddi_intr_add_handler done"));
7461
7462 if ((ret = ddi_intr_get_cap(instance->intr_htable[0],
7463 &instance->intr_cap)) != DDI_SUCCESS) {
7464 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_cap() failed %d",
7465 ret));
7466 goto mrsas_free_handlers;
7467 }
7468
7469 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) {
7470 con_log(CL_ANN, (CE_WARN, "Calling ddi_intr_block _enable"));
7471
7472 (void) ddi_intr_block_enable(instance->intr_htable,
7473 instance->intr_cnt);
7474 } else {
7475 con_log(CL_ANN, (CE_NOTE, " calling ddi_intr_enable"));
7476
7477 for (i = 0; i < instance->intr_cnt; i++) {
7478 (void) ddi_intr_enable(instance->intr_htable[i]);
7479 con_log(CL_ANN, (CE_NOTE, "ddi intr enable returns "
7480 "%d", i));
7481 }
7482 }
7483
7484 return (DDI_SUCCESS);
7485
7486 mrsas_free_handlers:
7487 for (i = 0; i < actual; i++)
7488 (void) ddi_intr_remove_handler(instance->intr_htable[i]);
7489
7490 mrsas_free_handles:
7491 for (i = 0; i < actual; i++)
7492 (void) ddi_intr_free(instance->intr_htable[i]);
7493
7494 mrsas_free_htable:
7495 if (instance->intr_htable != NULL)
7496 kmem_free(instance->intr_htable, instance->intr_htable_size);
7497
7498 instance->intr_htable = NULL;
7499 instance->intr_htable_size = 0;
7500
7501 return (DDI_FAILURE);
7502
7503 }
7504
7505
7506 static void
7507 mrsas_rem_intrs(struct mrsas_instance *instance)
7508 {
7509 int i;
7510
7511 con_log(CL_ANN, (CE_NOTE, "mrsas_rem_intrs called"));
7512
7513 /* Disable all interrupts first */
7514 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) {
7515 (void) ddi_intr_block_disable(instance->intr_htable,
7516 instance->intr_cnt);
7517 } else {
7518 for (i = 0; i < instance->intr_cnt; i++) {
7519 (void) ddi_intr_disable(instance->intr_htable[i]);
7520 }
7521 }
7522
7523 /* Remove all the handlers */
7524
7525 for (i = 0; i < instance->intr_cnt; i++) {
7526 (void) ddi_intr_remove_handler(instance->intr_htable[i]);
7527 (void) ddi_intr_free(instance->intr_htable[i]);
7528 }
7529
7530 if (instance->intr_htable != NULL)
7531 kmem_free(instance->intr_htable, instance->intr_htable_size);
7532
7533 instance->intr_htable = NULL;
7534 instance->intr_htable_size = 0;
7535
7536 }
7537
7538 static int
7539 mrsas_tran_bus_config(dev_info_t *parent, uint_t flags,
7540 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
7541 {
7542 struct mrsas_instance *instance;
7543 int config;
7544 int rval = NDI_SUCCESS;
7545
7546 char *ptr = NULL;
7547 int tgt, lun;
7548
7549 con_log(CL_ANN1, (CE_NOTE, "Bus config called for op = %x", op));
7550
7551 if ((instance = ddi_get_soft_state(mrsas_state,
7552 ddi_get_instance(parent))) == NULL) {
7553 return (NDI_FAILURE);
7554 }
7555
7556 /* Hold nexus during bus_config */
7557 ndi_devi_enter(parent, &config);
7558 switch (op) {
7559 case BUS_CONFIG_ONE: {
7560
7561 /* parse wwid/target name out of name given */
7562 if ((ptr = strchr((char *)arg, '@')) == NULL) {
7563 rval = NDI_FAILURE;
7564 break;
7565 }
7566 ptr++;
7567
7568 if (mrsas_parse_devname(arg, &tgt, &lun) != 0) {
7569 rval = NDI_FAILURE;
7570 break;
7571 }
7572
7573 if (lun == 0) {
7574 rval = mrsas_config_ld(instance, tgt, lun, childp);
7575 #ifdef PDSUPPORT
7576 } else if (instance->tbolt == 1 && lun != 0) {
7577 rval = mrsas_tbolt_config_pd(instance,
7578 tgt, lun, childp);
7579 #endif
7580 } else {
7581 rval = NDI_FAILURE;
7582 }
7583
7584 break;
7585 }
7586 case BUS_CONFIG_DRIVER:
7587 case BUS_CONFIG_ALL: {
7588
7589 rval = mrsas_config_all_devices(instance);
7590
7591 rval = NDI_SUCCESS;
7592 break;
7593 }
7594 }
7595
7596 if (rval == NDI_SUCCESS) {
7597 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0);
7598
7599 }
7600 ndi_devi_exit(parent, config);
7601
7602 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_bus_config: rval = %x",
7603 rval));
7604 return (rval);
7605 }
7606
7607 static int
7608 mrsas_config_all_devices(struct mrsas_instance *instance)
7609 {
7610 int rval, tgt;
7611
7612 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) {
7613 (void) mrsas_config_ld(instance, tgt, 0, NULL);
7614
7615 }
7616
7617 #ifdef PDSUPPORT
7618 /* Config PD devices connected to the card */
7619 if (instance->tbolt) {
7620 for (tgt = 0; tgt < instance->mr_tbolt_pd_max; tgt++) {
7621 (void) mrsas_tbolt_config_pd(instance, tgt, 1, NULL);
7622 }
7623 }
7624 #endif
7625
7626 rval = NDI_SUCCESS;
7627 return (rval);
7628 }
7629
7630 static int
7631 mrsas_parse_devname(char *devnm, int *tgt, int *lun)
7632 {
7633 char devbuf[SCSI_MAXNAMELEN];
7634 char *addr;
7635 char *p, *tp, *lp;
7636 long num;
7637
7638 /* Parse dev name and address */
7639 (void) strcpy(devbuf, devnm);
7640 addr = "";
7641 for (p = devbuf; *p != '\0'; p++) {
7642 if (*p == '@') {
7643 addr = p + 1;
7644 *p = '\0';
7645 } else if (*p == ':') {
7646 *p = '\0';
7647 break;
7648 }
7649 }
7650
7651 /* Parse target and lun */
7652 for (p = tp = addr, lp = NULL; *p != '\0'; p++) {
7653 if (*p == ',') {
7654 lp = p + 1;
7655 *p = '\0';
7656 break;
7657 }
7658 }
7659 if (tgt && tp) {
7660 if (ddi_strtol(tp, NULL, 0x10, &num)) {
7661 return (DDI_FAILURE); /* Can declare this as constant */
7662 }
7663 *tgt = (int)num;
7664 }
7665 if (lun && lp) {
7666 if (ddi_strtol(lp, NULL, 0x10, &num)) {
7667 return (DDI_FAILURE);
7668 }
7669 *lun = (int)num;
7670 }
7671 return (DDI_SUCCESS); /* Success case */
7672 }
7673
7674 static int
7675 mrsas_config_ld(struct mrsas_instance *instance, uint16_t tgt,
7676 uint8_t lun, dev_info_t **ldip)
7677 {
7678 struct scsi_device *sd;
7679 dev_info_t *child;
7680 int rval;
7681
7682 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_config_ld: t = %d l = %d",
7683 tgt, lun));
7684
7685 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
7686 if (ldip) {
7687 *ldip = child;
7688 }
7689 if (instance->mr_ld_list[tgt].flag != MRDRV_TGT_VALID) {
7690 rval = mrsas_service_evt(instance, tgt, 0,
7691 MRSAS_EVT_UNCONFIG_TGT, NULL);
7692 con_log(CL_ANN1, (CE_WARN,
7693 "mr_sas: DELETING STALE ENTRY rval = %d "
7694 "tgt id = %d ", rval, tgt));
7695 return (NDI_FAILURE);
7696 }
7697 return (NDI_SUCCESS);
7698 }
7699
7700 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
7701 if (sd == NULL) {
7702 con_log(CL_ANN1, (CE_WARN, "mrsas_config_ld: "
7703 "failed to allocate mem for scsi_device"));
7704 return (NDI_FAILURE);
7705 }
7706 sd->sd_address.a_hba_tran = instance->tran;
7707 sd->sd_address.a_target = (uint16_t)tgt;
7708 sd->sd_address.a_lun = (uint8_t)lun;
7709
7710 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS)
7711 rval = mrsas_config_scsi_device(instance, sd, ldip);
7712 else
7713 rval = NDI_FAILURE;
7714
7715 /* sd_unprobe is blank now. Free buffer manually */
7716 if (sd->sd_inq) {
7717 kmem_free(sd->sd_inq, SUN_INQSIZE);
7718 sd->sd_inq = (struct scsi_inquiry *)NULL;
7719 }
7720
7721 kmem_free(sd, sizeof (struct scsi_device));
7722 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_config_ld: return rval = %d",
7723 rval));
7724 return (rval);
7725 }
7726
7727 int
7728 mrsas_config_scsi_device(struct mrsas_instance *instance,
7729 struct scsi_device *sd, dev_info_t **dipp)
7730 {
7731 char *nodename = NULL;
7732 char **compatible = NULL;
7733 int ncompatible = 0;
7734 char *childname;
7735 dev_info_t *ldip = NULL;
7736 int tgt = sd->sd_address.a_target;
7737 int lun = sd->sd_address.a_lun;
7738 int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
7739 int rval;
7740
7741 con_log(CL_DLEVEL1, (CE_NOTE, "mr_sas: scsi_device t%dL%d", tgt, lun));
7742 scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype,
7743 NULL, &nodename, &compatible, &ncompatible);
7744
7745 if (nodename == NULL) {
7746 con_log(CL_ANN1, (CE_WARN, "mr_sas: Found no compatible driver "
7747 "for t%dL%d", tgt, lun));
7748 rval = NDI_FAILURE;
7749 goto finish;
7750 }
7751
7752 childname = (dtype == DTYPE_DIRECT) ? "sd" : nodename;
7753 con_log(CL_DLEVEL1, (CE_NOTE,
7754 "mr_sas: Childname = %2s nodename = %s", childname, nodename));
7755
7756 /* Create a dev node */
7757 rval = ndi_devi_alloc(instance->dip, childname, DEVI_SID_NODEID, &ldip);
7758 con_log(CL_DLEVEL1, (CE_NOTE,
7759 "mr_sas_config_scsi_device: ndi_devi_alloc rval = %x", rval));
7760 if (rval == NDI_SUCCESS) {
7761 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) !=
7762 DDI_PROP_SUCCESS) {
7763 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7764 "property for t%dl%d target", tgt, lun));
7765 rval = NDI_FAILURE;
7766 goto finish;
7767 }
7768 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun) !=
7769 DDI_PROP_SUCCESS) {
7770 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7771 "property for t%dl%d lun", tgt, lun));
7772 rval = NDI_FAILURE;
7773 goto finish;
7774 }
7775
7776 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip,
7777 "compatible", compatible, ncompatible) !=
7778 DDI_PROP_SUCCESS) {
7779 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7780 "property for t%dl%d compatible", tgt, lun));
7781 rval = NDI_FAILURE;
7782 goto finish;
7783 }
7784
7785 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH);
7786 if (rval != NDI_SUCCESS) {
7787 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to online "
7788 "t%dl%d", tgt, lun));
7789 ndi_prop_remove_all(ldip);
7790 (void) ndi_devi_free(ldip);
7791 } else {
7792 con_log(CL_ANN1, (CE_CONT, "mr_sas: online Done :"
7793 "0 t%dl%d", tgt, lun));
7794 }
7795
7796 }
7797 finish:
7798 if (dipp) {
7799 *dipp = ldip;
7800 }
7801
7802 con_log(CL_DLEVEL1, (CE_NOTE,
7803 "mr_sas: config_scsi_device rval = %d t%dL%d",
7804 rval, tgt, lun));
7805 scsi_hba_nodename_compatible_free(nodename, compatible);
7806 return (rval);
7807 }
7808
7809 /*ARGSUSED*/
7810 int
7811 mrsas_service_evt(struct mrsas_instance *instance, int tgt, int lun, int event,
7812 uint64_t wwn)
7813 {
7814 struct mrsas_eventinfo *mrevt = NULL;
7815
7816 con_log(CL_ANN1, (CE_NOTE,
7817 "mrsas_service_evt called for t%dl%d event = %d",
7818 tgt, lun, event));
7819
7820 if ((instance->taskq == NULL) || (mrevt =
7821 kmem_zalloc(sizeof (struct mrsas_eventinfo), KM_NOSLEEP)) == NULL) {
7822 return (ENOMEM);
7823 }
7824
7825 mrevt->instance = instance;
7826 mrevt->tgt = tgt;
7827 mrevt->lun = lun;
7828 mrevt->event = event;
7829 mrevt->wwn = wwn;
7830
7831 if ((ddi_taskq_dispatch(instance->taskq,
7832 (void (*)(void *))mrsas_issue_evt_taskq, mrevt, DDI_NOSLEEP)) !=
7833 DDI_SUCCESS) {
7834 con_log(CL_ANN1, (CE_NOTE,
7835 "mr_sas: Event task failed for t%dl%d event = %d",
7836 tgt, lun, event));
7837 kmem_free(mrevt, sizeof (struct mrsas_eventinfo));
7838 return (DDI_FAILURE);
7839 }
7840 DTRACE_PROBE3(service_evt, int, tgt, int, lun, int, event);
7841 return (DDI_SUCCESS);
7842 }
7843
7844 static void
7845 mrsas_issue_evt_taskq(struct mrsas_eventinfo *mrevt)
7846 {
7847 struct mrsas_instance *instance = mrevt->instance;
7848 dev_info_t *dip, *pdip;
7849 int circ1 = 0;
7850 char *devname;
7851
7852 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_evt_taskq: called for"
7853 " tgt %d lun %d event %d",
7854 mrevt->tgt, mrevt->lun, mrevt->event));
7855
7856 if (mrevt->tgt < MRDRV_MAX_LD && mrevt->lun == 0) {
7857 mutex_enter(&instance->config_dev_mtx);
7858 dip = instance->mr_ld_list[mrevt->tgt].dip;
7859 mutex_exit(&instance->config_dev_mtx);
7860 #ifdef PDSUPPORT
7861 } else {
7862 mutex_enter(&instance->config_dev_mtx);
7863 dip = instance->mr_tbolt_pd_list[mrevt->tgt].dip;
7864 mutex_exit(&instance->config_dev_mtx);
7865 #endif
7866 }
7867
7868
7869 ndi_devi_enter(instance->dip, &circ1);
7870 switch (mrevt->event) {
7871 case MRSAS_EVT_CONFIG_TGT:
7872 if (dip == NULL) {
7873
7874 if (mrevt->lun == 0) {
7875 (void) mrsas_config_ld(instance, mrevt->tgt,
7876 0, NULL);
7877 #ifdef PDSUPPORT
7878 } else if (instance->tbolt) {
7879 (void) mrsas_tbolt_config_pd(instance,
7880 mrevt->tgt,
7881 1, NULL);
7882 #endif
7883 }
7884 con_log(CL_ANN1, (CE_NOTE,
7885 "mr_sas: EVT_CONFIG_TGT called:"
7886 " for tgt %d lun %d event %d",
7887 mrevt->tgt, mrevt->lun, mrevt->event));
7888
7889 } else {
7890 con_log(CL_ANN1, (CE_NOTE,
7891 "mr_sas: EVT_CONFIG_TGT dip != NULL:"
7892 " for tgt %d lun %d event %d",
7893 mrevt->tgt, mrevt->lun, mrevt->event));
7894 }
7895 break;
7896 case MRSAS_EVT_UNCONFIG_TGT:
7897 if (dip) {
7898 if (i_ddi_devi_attached(dip)) {
7899
7900 pdip = ddi_get_parent(dip);
7901
7902 devname = kmem_zalloc(MAXNAMELEN + 1, KM_SLEEP);
7903 (void) ddi_deviname(dip, devname);
7904
7905 (void) devfs_clean(pdip, devname + 1,
7906 DV_CLEAN_FORCE);
7907 kmem_free(devname, MAXNAMELEN + 1);
7908 }
7909 (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE);
7910 con_log(CL_ANN1, (CE_NOTE,
7911 "mr_sas: EVT_UNCONFIG_TGT called:"
7912 " for tgt %d lun %d event %d",
7913 mrevt->tgt, mrevt->lun, mrevt->event));
7914 } else {
7915 con_log(CL_ANN1, (CE_NOTE,
7916 "mr_sas: EVT_UNCONFIG_TGT dip == NULL:"
7917 " for tgt %d lun %d event %d",
7918 mrevt->tgt, mrevt->lun, mrevt->event));
7919 }
7920 break;
7921 }
7922 kmem_free(mrevt, sizeof (struct mrsas_eventinfo));
7923 ndi_devi_exit(instance->dip, circ1);
7924 }
7925
7926
7927 int
7928 mrsas_mode_sense_build(struct scsi_pkt *pkt)
7929 {
7930 union scsi_cdb *cdbp;
7931 uint16_t page_code;
7932 struct scsa_cmd *acmd;
7933 struct buf *bp;
7934 struct mode_header *modehdrp;
7935
7936 cdbp = (void *)pkt->pkt_cdbp;
7937 page_code = cdbp->cdb_un.sg.scsi[0];
7938 acmd = PKT2CMD(pkt);
7939 bp = acmd->cmd_buf;
7940 if ((!bp) && bp->b_un.b_addr && bp->b_bcount && acmd->cmd_dmacount) {
7941 con_log(CL_ANN1, (CE_WARN, "Failing MODESENSE Command"));
7942 /* ADD pkt statistics as Command failed. */
7943 return (NULL);
7944 }
7945
7946 bp_mapin(bp);
7947 bzero(bp->b_un.b_addr, bp->b_bcount);
7948
7949 switch (page_code) {
7950 case 0x3: {
7951 struct mode_format *page3p = NULL;
7952 modehdrp = (struct mode_header *)(bp->b_un.b_addr);
7953 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH;
7954
7955 page3p = (void *)((caddr_t)modehdrp +
7956 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
7957 page3p->mode_page.code = 0x3;
7958 page3p->mode_page.length =
7959 (uchar_t)(sizeof (struct mode_format));
7960 page3p->data_bytes_sect = 512;
7961 page3p->sect_track = 63;
7962 break;
7963 }
7964 case 0x4: {
7965 struct mode_geometry *page4p = NULL;
7966 modehdrp = (struct mode_header *)(bp->b_un.b_addr);
7967 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH;
7968
7969 page4p = (void *)((caddr_t)modehdrp +
7970 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
7971 page4p->mode_page.code = 0x4;
7972 page4p->mode_page.length =
7973 (uchar_t)(sizeof (struct mode_geometry));
7974 page4p->heads = 255;
7975 page4p->rpm = 10000;
7976 break;
7977 }
7978 default:
7979 break;
7980 }
7981 return (NULL);
7982 }