Print this page
*** NO COMMENTS ***
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/mr_sas/mr_sas.c
+++ new/usr/src/uts/common/io/mr_sas/mr_sas.c
1 1 /*
2 2 * mr_sas.c: source for mr_sas driver
3 3 *
4 4 * Solaris MegaRAID device driver for SAS2.0 controllers
5 5 * Copyright (c) 2008-2012, LSI Logic Corporation.
6 6 * All rights reserved.
7 7 *
8 8 * Version:
9 9 * Author:
10 10 * Swaminathan K S
11 11 * Arun Chandrashekhar
12 12 * Manju R
13 13 * Rasheed
14 14 * Shakeel Bukhari
15 15 *
16 16 * Redistribution and use in source and binary forms, with or without
17 17 * modification, are permitted provided that the following conditions are met:
18 18 *
19 19 * 1. Redistributions of source code must retain the above copyright notice,
20 20 * this list of conditions and the following disclaimer.
21 21 *
22 22 * 2. Redistributions in binary form must reproduce the above copyright notice,
23 23 * this list of conditions and the following disclaimer in the documentation
24 24 * and/or other materials provided with the distribution.
25 25 *
26 26 * 3. Neither the name of the author nor the names of its contributors may be
27 27 * used to endorse or promote products derived from this software without
28 28 * specific prior written permission.
29 29 *
30 30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
33 33 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
34 34 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
35 35 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
36 36 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
37 37 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
38 38 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
39 39 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
40 40 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
41 41 * DAMAGE.
42 42 */
43 43
44 44 /*
45 45 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
46 46 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
47 47 * Copyright 2012 Nexenta System, Inc. All rights reserved.
48 48 */
49 49
50 50 #include <sys/types.h>
51 51 #include <sys/param.h>
52 52 #include <sys/file.h>
53 53 #include <sys/errno.h>
54 54 #include <sys/open.h>
55 55 #include <sys/cred.h>
56 56 #include <sys/modctl.h>
57 57 #include <sys/conf.h>
58 58 #include <sys/devops.h>
59 59 #include <sys/cmn_err.h>
60 60 #include <sys/kmem.h>
61 61 #include <sys/stat.h>
62 62 #include <sys/mkdev.h>
63 63 #include <sys/pci.h>
64 64 #include <sys/scsi/scsi.h>
65 65 #include <sys/ddi.h>
66 66 #include <sys/sunddi.h>
67 67 #include <sys/atomic.h>
68 68 #include <sys/signal.h>
69 69 #include <sys/byteorder.h>
70 70 #include <sys/sdt.h>
71 71 #include <sys/fs/dv_node.h> /* devfs_clean */
72 72
73 73 #include "mr_sas.h"
74 74
75 75 /*
76 76 * FMA header files
77 77 */
78 78 #include <sys/ddifm.h>
79 79 #include <sys/fm/protocol.h>
80 80 #include <sys/fm/util.h>
81 81 #include <sys/fm/io/ddi.h>
82 82
83 83 /*
84 84 * Local static data
85 85 */
86 86 static void *mrsas_state = NULL;
87 87 static volatile boolean_t mrsas_relaxed_ordering = B_TRUE;
88 88 volatile int debug_level_g = CL_NONE;
89 89 static volatile int msi_enable = 1;
90 90 static volatile int ctio_enable = 1;
91 91
92 92 /* Default Timeout value to issue online controller reset */
93 93 volatile int debug_timeout_g = 0xF0; /* 0xB4; */
94 94 /* Simulate consecutive firmware fault */
95 95 static volatile int debug_fw_faults_after_ocr_g = 0;
96 96 #ifdef OCRDEBUG
97 97 /* Simulate three consecutive timeout for an IO */
98 98 static volatile int debug_consecutive_timeout_after_ocr_g = 0;
99 99 #endif
100 100
101 101 #pragma weak scsi_hba_open
102 102 #pragma weak scsi_hba_close
103 103 #pragma weak scsi_hba_ioctl
104 104
105 105 /* Local static prototypes. */
106 106 static int mrsas_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
107 107 static int mrsas_attach(dev_info_t *, ddi_attach_cmd_t);
108 108 #ifdef __sparc
109 109 static int mrsas_reset(dev_info_t *, ddi_reset_cmd_t);
110 110 #else
111 111 static int mrsas_quiesce(dev_info_t *);
112 112 #endif
113 113 static int mrsas_detach(dev_info_t *, ddi_detach_cmd_t);
114 114 static int mrsas_open(dev_t *, int, int, cred_t *);
115 115 static int mrsas_close(dev_t, int, int, cred_t *);
116 116 static int mrsas_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
117 117
118 118 static int mrsas_tran_tgt_init(dev_info_t *, dev_info_t *,
119 119 scsi_hba_tran_t *, struct scsi_device *);
120 120 static struct scsi_pkt *mrsas_tran_init_pkt(struct scsi_address *, register
121 121 struct scsi_pkt *, struct buf *, int, int, int, int,
122 122 int (*)(), caddr_t);
123 123 static int mrsas_tran_start(struct scsi_address *,
124 124 register struct scsi_pkt *);
125 125 static int mrsas_tran_abort(struct scsi_address *, struct scsi_pkt *);
126 126 static int mrsas_tran_reset(struct scsi_address *, int);
127 127 static int mrsas_tran_getcap(struct scsi_address *, char *, int);
128 128 static int mrsas_tran_setcap(struct scsi_address *, char *, int, int);
129 129 static void mrsas_tran_destroy_pkt(struct scsi_address *,
130 130 struct scsi_pkt *);
131 131 static void mrsas_tran_dmafree(struct scsi_address *, struct scsi_pkt *);
132 132 static void mrsas_tran_sync_pkt(struct scsi_address *, struct scsi_pkt *);
133 133 static int mrsas_tran_quiesce(dev_info_t *dip);
134 134 static int mrsas_tran_unquiesce(dev_info_t *dip);
135 135 static uint_t mrsas_isr();
136 136 static uint_t mrsas_softintr();
137 137 static void mrsas_undo_resources(dev_info_t *, struct mrsas_instance *);
138 138 static struct mrsas_cmd *get_mfi_pkt(struct mrsas_instance *);
139 139 static void return_mfi_pkt(struct mrsas_instance *,
140 140 struct mrsas_cmd *);
141 141
142 142 static void free_space_for_mfi(struct mrsas_instance *);
143 143 static uint32_t read_fw_status_reg_ppc(struct mrsas_instance *);
144 144 static void issue_cmd_ppc(struct mrsas_cmd *, struct mrsas_instance *);
145 145 static int issue_cmd_in_poll_mode_ppc(struct mrsas_instance *,
146 146 struct mrsas_cmd *);
147 147 static int issue_cmd_in_sync_mode_ppc(struct mrsas_instance *,
148 148 struct mrsas_cmd *);
149 149 static void enable_intr_ppc(struct mrsas_instance *);
150 150 static void disable_intr_ppc(struct mrsas_instance *);
151 151 static int intr_ack_ppc(struct mrsas_instance *);
152 152 static void flush_cache(struct mrsas_instance *instance);
153 153 void display_scsi_inquiry(caddr_t);
154 154 static int start_mfi_aen(struct mrsas_instance *instance);
155 155 static int handle_drv_ioctl(struct mrsas_instance *instance,
156 156 struct mrsas_ioctl *ioctl, int mode);
157 157 static int handle_mfi_ioctl(struct mrsas_instance *instance,
158 158 struct mrsas_ioctl *ioctl, int mode);
159 159 static int handle_mfi_aen(struct mrsas_instance *instance,
160 160 struct mrsas_aen *aen);
161 161 static struct mrsas_cmd *build_cmd(struct mrsas_instance *,
162 162 struct scsi_address *, struct scsi_pkt *, uchar_t *);
163 163 static int alloc_additional_dma_buffer(struct mrsas_instance *);
164 164 static void complete_cmd_in_sync_mode(struct mrsas_instance *,
165 165 struct mrsas_cmd *);
166 166 static int mrsas_kill_adapter(struct mrsas_instance *);
167 167 static int mrsas_issue_init_mfi(struct mrsas_instance *);
168 168 static int mrsas_reset_ppc(struct mrsas_instance *);
169 169 static uint32_t mrsas_initiate_ocr_if_fw_is_faulty(struct mrsas_instance *);
170 170 static int wait_for_outstanding(struct mrsas_instance *instance);
171 171 static int register_mfi_aen(struct mrsas_instance *instance,
172 172 uint32_t seq_num, uint32_t class_locale_word);
173 173 static int issue_mfi_pthru(struct mrsas_instance *instance, struct
174 174 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
175 175 static int issue_mfi_dcmd(struct mrsas_instance *instance, struct
176 176 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
177 177 static int issue_mfi_smp(struct mrsas_instance *instance, struct
178 178 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
179 179 static int issue_mfi_stp(struct mrsas_instance *instance, struct
180 180 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
181 181 static int abort_aen_cmd(struct mrsas_instance *instance,
182 182 struct mrsas_cmd *cmd_to_abort);
183 183
184 184 static void mrsas_rem_intrs(struct mrsas_instance *instance);
185 185 static int mrsas_add_intrs(struct mrsas_instance *instance, int intr_type);
186 186
187 187 static void mrsas_tran_tgt_free(dev_info_t *, dev_info_t *,
188 188 scsi_hba_tran_t *, struct scsi_device *);
189 189 static int mrsas_tran_bus_config(dev_info_t *, uint_t,
190 190 ddi_bus_config_op_t, void *, dev_info_t **);
191 191 static int mrsas_parse_devname(char *, int *, int *);
192 192 static int mrsas_config_all_devices(struct mrsas_instance *);
193 193 static int mrsas_config_ld(struct mrsas_instance *, uint16_t,
194 194 uint8_t, dev_info_t **);
195 195 static int mrsas_name_node(dev_info_t *, char *, int);
196 196 static void mrsas_issue_evt_taskq(struct mrsas_eventinfo *);
197 197 static void free_additional_dma_buffer(struct mrsas_instance *);
198 198 static void io_timeout_checker(void *);
199 199 static void mrsas_fm_init(struct mrsas_instance *);
200 200 static void mrsas_fm_fini(struct mrsas_instance *);
201 201
202 202 static struct mrsas_function_template mrsas_function_template_ppc = {
203 203 .read_fw_status_reg = read_fw_status_reg_ppc,
204 204 .issue_cmd = issue_cmd_ppc,
205 205 .issue_cmd_in_sync_mode = issue_cmd_in_sync_mode_ppc,
206 206 .issue_cmd_in_poll_mode = issue_cmd_in_poll_mode_ppc,
207 207 .enable_intr = enable_intr_ppc,
208 208 .disable_intr = disable_intr_ppc,
209 209 .intr_ack = intr_ack_ppc,
210 210 .init_adapter = mrsas_init_adapter_ppc
211 211 };
212 212
213 213
214 214 static struct mrsas_function_template mrsas_function_template_fusion = {
215 215 .read_fw_status_reg = tbolt_read_fw_status_reg,
216 216 .issue_cmd = tbolt_issue_cmd,
217 217 .issue_cmd_in_sync_mode = tbolt_issue_cmd_in_sync_mode,
218 218 .issue_cmd_in_poll_mode = tbolt_issue_cmd_in_poll_mode,
219 219 .enable_intr = tbolt_enable_intr,
220 220 .disable_intr = tbolt_disable_intr,
221 221 .intr_ack = tbolt_intr_ack,
222 222 .init_adapter = mrsas_init_adapter_tbolt
223 223 };
224 224
225 225
226 226 ddi_dma_attr_t mrsas_generic_dma_attr = {
227 227 DMA_ATTR_V0, /* dma_attr_version */
228 228 0, /* low DMA address range */
229 229 0xFFFFFFFFU, /* high DMA address range */
230 230 0xFFFFFFFFU, /* DMA counter register */
231 231 8, /* DMA address alignment */
232 232 0x07, /* DMA burstsizes */
233 233 1, /* min DMA size */
234 234 0xFFFFFFFFU, /* max DMA size */
235 235 0xFFFFFFFFU, /* segment boundary */
236 236 MRSAS_MAX_SGE_CNT, /* dma_attr_sglen */
237 237 512, /* granularity of device */
238 238 0 /* bus specific DMA flags */
239 239 };
240 240
241 241 int32_t mrsas_max_cap_maxxfer = 0x1000000;
242 242
243 243 /*
244 244 * Fix for: Thunderbolt controller IO timeout when IO write size is 1MEG,
245 245 * Limit size to 256K
246 246 */
247 247 uint32_t mrsas_tbolt_max_cap_maxxfer = (512 * 512);
248 248
249 249 /*
250 250 * cb_ops contains base level routines
251 251 */
252 252 static struct cb_ops mrsas_cb_ops = {
253 253 mrsas_open, /* open */
254 254 mrsas_close, /* close */
255 255 nodev, /* strategy */
256 256 nodev, /* print */
257 257 nodev, /* dump */
258 258 nodev, /* read */
259 259 nodev, /* write */
260 260 mrsas_ioctl, /* ioctl */
261 261 nodev, /* devmap */
262 262 nodev, /* mmap */
263 263 nodev, /* segmap */
264 264 nochpoll, /* poll */
265 265 nodev, /* cb_prop_op */
266 266 0, /* streamtab */
267 267 D_NEW | D_HOTPLUG, /* cb_flag */
268 268 CB_REV, /* cb_rev */
269 269 nodev, /* cb_aread */
270 270 nodev /* cb_awrite */
271 271 };
272 272
273 273 /*
274 274 * dev_ops contains configuration routines
275 275 */
276 276 static struct dev_ops mrsas_ops = {
277 277 DEVO_REV, /* rev, */
278 278 0, /* refcnt */
279 279 mrsas_getinfo, /* getinfo */
280 280 nulldev, /* identify */
281 281 nulldev, /* probe */
282 282 mrsas_attach, /* attach */
283 283 mrsas_detach, /* detach */
284 284 #ifdef __sparc
285 285 mrsas_reset, /* reset */
286 286 #else /* __sparc */
287 287 nodev,
288 288 #endif /* __sparc */
289 289 &mrsas_cb_ops, /* char/block ops */
290 290 NULL, /* bus ops */
291 291 NULL, /* power */
292 292 #ifdef __sparc
293 293 ddi_quiesce_not_needed
294 294 #else /* __sparc */
295 295 mrsas_quiesce /* quiesce */
296 296 #endif /* __sparc */
297 297 };
298 298
299 299 static struct modldrv modldrv = {
300 300 &mod_driverops, /* module type - driver */
301 301 MRSAS_VERSION,
302 302 &mrsas_ops, /* driver ops */
303 303 };
304 304
305 305 static struct modlinkage modlinkage = {
306 306 MODREV_1, /* ml_rev - must be MODREV_1 */
307 307 &modldrv, /* ml_linkage */
308 308 NULL /* end of driver linkage */
309 309 };
310 310
311 311 static struct ddi_device_acc_attr endian_attr = {
312 312 DDI_DEVICE_ATTR_V1,
313 313 DDI_STRUCTURE_LE_ACC,
314 314 DDI_STRICTORDER_ACC,
315 315 DDI_DEFAULT_ACC
316 316 };
317 317
318 318 /* Use the LSI Fast Path for the 2208 (tbolt) commands. */
319 319 unsigned int enable_fp = 1;
320 320
321 321
322 322 /*
323 323 * ************************************************************************** *
324 324 * *
325 325 * common entry points - for loadable kernel modules *
326 326 * *
327 327 * ************************************************************************** *
328 328 */
329 329
330 330 /*
331 331 * _init - initialize a loadable module
332 332 * @void
333 333 *
334 334 * The driver should perform any one-time resource allocation or data
335 335 * initialization during driver loading in _init(). For example, the driver
336 336 * should initialize any mutexes global to the driver in this routine.
337 337 * The driver should not, however, use _init() to allocate or initialize
338 338 * anything that has to do with a particular instance of the device.
339 339 * Per-instance initialization must be done in attach().
340 340 */
341 341 int
342 342 _init(void)
343 343 {
344 344 int ret;
345 345
346 346 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
347 347
348 348 ret = ddi_soft_state_init(&mrsas_state,
349 349 sizeof (struct mrsas_instance), 0);
350 350
351 351 if (ret != DDI_SUCCESS) {
352 352 cmn_err(CE_WARN, "mr_sas: could not init state");
353 353 return (ret);
354 354 }
355 355
356 356 if ((ret = scsi_hba_init(&modlinkage)) != DDI_SUCCESS) {
357 357 cmn_err(CE_WARN, "mr_sas: could not init scsi hba");
358 358 ddi_soft_state_fini(&mrsas_state);
359 359 return (ret);
360 360 }
361 361
362 362 ret = mod_install(&modlinkage);
363 363
364 364 if (ret != DDI_SUCCESS) {
365 365 cmn_err(CE_WARN, "mr_sas: mod_install failed");
366 366 scsi_hba_fini(&modlinkage);
367 367 ddi_soft_state_fini(&mrsas_state);
368 368 }
369 369
370 370 return (ret);
371 371 }
372 372
373 373 /*
374 374 * _info - returns information about a loadable module.
375 375 * @void
376 376 *
377 377 * _info() is called to return module information. This is a typical entry
378 378 * point that does predefined role. It simply calls mod_info().
379 379 */
380 380 int
381 381 _info(struct modinfo *modinfop)
382 382 {
383 383 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
384 384
385 385 return (mod_info(&modlinkage, modinfop));
386 386 }
387 387
388 388 /*
389 389 * _fini - prepare a loadable module for unloading
390 390 * @void
391 391 *
392 392 * In _fini(), the driver should release any resources that were allocated in
393 393 * _init(). The driver must remove itself from the system module list.
394 394 */
395 395 int
396 396 _fini(void)
397 397 {
398 398 int ret;
399 399
400 400 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
401 401
402 402 if ((ret = mod_remove(&modlinkage)) != DDI_SUCCESS) {
403 403 con_log(CL_ANN1,
404 404 (CE_WARN, "_fini: mod_remove() failed, error 0x%X", ret));
405 405 return (ret);
406 406 }
407 407
408 408 scsi_hba_fini(&modlinkage);
409 409 con_log(CL_DLEVEL1, (CE_NOTE, "_fini: scsi_hba_fini() done."));
410 410
411 411 ddi_soft_state_fini(&mrsas_state);
412 412 con_log(CL_DLEVEL1, (CE_NOTE, "_fini: ddi_soft_state_fini() done."));
413 413
414 414 return (ret);
415 415 }
416 416
417 417
418 418 /*
419 419 * ************************************************************************** *
420 420 * *
421 421 * common entry points - for autoconfiguration *
422 422 * *
423 423 * ************************************************************************** *
424 424 */
425 425 /*
426 426 * attach - adds a device to the system as part of initialization
427 427 * @dip:
428 428 * @cmd:
429 429 *
430 430 * The kernel calls a driver's attach() entry point to attach an instance of
431 431 * a device (for MegaRAID, it is instance of a controller) or to resume
432 432 * operation for an instance of a device that has been suspended or has been
433 433 * shut down by the power management framework
434 434 * The attach() entry point typically includes the following types of
435 435 * processing:
436 436 * - allocate a soft-state structure for the device instance (for MegaRAID,
437 437 * controller instance)
438 438 * - initialize per-instance mutexes
439 439 * - initialize condition variables
440 440 * - register the device's interrupts (for MegaRAID, controller's interrupts)
441 441 * - map the registers and memory of the device instance (for MegaRAID,
442 442 * controller instance)
443 443 * - create minor device nodes for the device instance (for MegaRAID,
444 444 * controller instance)
445 445 * - report that the device instance (for MegaRAID, controller instance) has
446 446 * attached
447 447 */
448 448 static int
449 449 mrsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
450 450 {
451 451 int instance_no;
452 452 int nregs;
453 453 int i = 0;
454 454 uint8_t irq;
455 455 uint16_t vendor_id;
456 456 uint16_t device_id;
457 457 uint16_t subsysvid;
458 458 uint16_t subsysid;
459 459 uint16_t command;
460 460 off_t reglength = 0;
461 461 int intr_types = 0;
462 462 char *data;
463 463
464 464 scsi_hba_tran_t *tran;
465 465 ddi_dma_attr_t tran_dma_attr;
466 466 struct mrsas_instance *instance;
467 467
468 468 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
469 469
470 470 /* CONSTCOND */
471 471 ASSERT(NO_COMPETING_THREADS);
472 472
473 473 instance_no = ddi_get_instance(dip);
474 474
475 475 /*
476 476 * check to see whether this device is in a DMA-capable slot.
477 477 */
478 478 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
479 479 cmn_err(CE_WARN,
480 480 "mr_sas%d: Device in slave-only slot, unused",
481 481 instance_no);
482 482 return (DDI_FAILURE);
483 483 }
484 484
485 485 switch (cmd) {
486 486 case DDI_ATTACH:
487 487 /* allocate the soft state for the instance */
488 488 if (ddi_soft_state_zalloc(mrsas_state, instance_no)
489 489 != DDI_SUCCESS) {
490 490 cmn_err(CE_WARN,
491 491 "mr_sas%d: Failed to allocate soft state",
492 492 instance_no);
493 493 return (DDI_FAILURE);
494 494 }
495 495
496 496 instance = (struct mrsas_instance *)ddi_get_soft_state
497 497 (mrsas_state, instance_no);
498 498
499 499 if (instance == NULL) {
500 500 cmn_err(CE_WARN,
501 501 "mr_sas%d: Bad soft state", instance_no);
502 502 ddi_soft_state_free(mrsas_state, instance_no);
503 503 return (DDI_FAILURE);
504 504 }
505 505
506 506 instance->unroll.softs = 1;
507 507
508 508 /* Setup the PCI configuration space handles */
509 509 if (pci_config_setup(dip, &instance->pci_handle) !=
510 510 DDI_SUCCESS) {
511 511 cmn_err(CE_WARN,
512 512 "mr_sas%d: pci config setup failed ",
513 513 instance_no);
514 514
515 515 ddi_soft_state_free(mrsas_state, instance_no);
516 516 return (DDI_FAILURE);
517 517 }
518 518
519 519 if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) {
520 520 cmn_err(CE_WARN,
521 521 "mr_sas: failed to get registers.");
522 522
523 523 pci_config_teardown(&instance->pci_handle);
524 524 ddi_soft_state_free(mrsas_state, instance_no);
525 525 return (DDI_FAILURE);
526 526 }
527 527
528 528 vendor_id = pci_config_get16(instance->pci_handle,
529 529 PCI_CONF_VENID);
530 530 device_id = pci_config_get16(instance->pci_handle,
531 531 PCI_CONF_DEVID);
532 532
533 533 subsysvid = pci_config_get16(instance->pci_handle,
534 534 PCI_CONF_SUBVENID);
535 535 subsysid = pci_config_get16(instance->pci_handle,
536 536 PCI_CONF_SUBSYSID);
537 537
538 538 pci_config_put16(instance->pci_handle, PCI_CONF_COMM,
539 539 (pci_config_get16(instance->pci_handle,
540 540 PCI_CONF_COMM) | PCI_COMM_ME));
541 541 irq = pci_config_get8(instance->pci_handle,
542 542 PCI_CONF_ILINE);
543 543
544 544 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: "
545 545 "0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s",
546 546 instance_no, vendor_id, device_id, subsysvid,
547 547 subsysid, irq, MRSAS_VERSION));
548 548
549 549 /* enable bus-mastering */
550 550 command = pci_config_get16(instance->pci_handle,
551 551 PCI_CONF_COMM);
552 552
553 553 if (!(command & PCI_COMM_ME)) {
554 554 command |= PCI_COMM_ME;
555 555
556 556 pci_config_put16(instance->pci_handle,
557 557 PCI_CONF_COMM, command);
558 558
559 559 con_log(CL_ANN, (CE_CONT, "mr_sas%d: "
560 560 "enable bus-mastering", instance_no));
561 561 } else {
562 562 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: "
563 563 "bus-mastering already set", instance_no));
564 564 }
565 565
566 566 /* initialize function pointers */
567 567 switch (device_id) {
568 568 case PCI_DEVICE_ID_LSI_TBOLT:
569 569 case PCI_DEVICE_ID_LSI_INVADER:
570 570 con_log(CL_ANN, (CE_NOTE,
571 571 "mr_sas: 2208 T.B. device detected"));
572 572
573 573 instance->func_ptr =
574 574 &mrsas_function_template_fusion;
575 575 instance->tbolt = 1;
576 576 break;
577 577
578 578 case PCI_DEVICE_ID_LSI_2108VDE:
579 579 case PCI_DEVICE_ID_LSI_2108V:
580 580 con_log(CL_ANN, (CE_NOTE,
581 581 "mr_sas: 2108 Liberator device detected"));
582 582
583 583 instance->func_ptr =
584 584 &mrsas_function_template_ppc;
585 585 break;
586 586
587 587 default:
588 588 cmn_err(CE_WARN,
589 589 "mr_sas: Invalid device detected");
590 590
591 591 pci_config_teardown(&instance->pci_handle);
592 592 ddi_soft_state_free(mrsas_state, instance_no);
593 593 return (DDI_FAILURE);
594 594 }
595 595
596 596 instance->baseaddress = pci_config_get32(
597 597 instance->pci_handle, PCI_CONF_BASE0);
598 598 instance->baseaddress &= 0x0fffc;
599 599
600 600 instance->dip = dip;
601 601 instance->vendor_id = vendor_id;
602 602 instance->device_id = device_id;
603 603 instance->subsysvid = subsysvid;
604 604 instance->subsysid = subsysid;
605 605 instance->instance = instance_no;
606 606
607 607 /* Initialize FMA */
608 608 instance->fm_capabilities = ddi_prop_get_int(
609 609 DDI_DEV_T_ANY, instance->dip, DDI_PROP_DONTPASS,
610 610 "fm-capable", DDI_FM_EREPORT_CAPABLE |
611 611 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE
612 612 | DDI_FM_ERRCB_CAPABLE);
613 613
614 614 mrsas_fm_init(instance);
615 615
616 616 /* Setup register map */
617 617 if ((ddi_dev_regsize(instance->dip,
618 618 REGISTER_SET_IO_2108, ®length) != DDI_SUCCESS) ||
619 619 reglength < MINIMUM_MFI_MEM_SZ) {
620 620 goto fail_attach;
621 621 }
622 622 if (reglength > DEFAULT_MFI_MEM_SZ) {
623 623 reglength = DEFAULT_MFI_MEM_SZ;
624 624 con_log(CL_DLEVEL1, (CE_NOTE,
625 625 "mr_sas: register length to map is 0x%lx bytes",
626 626 reglength));
627 627 }
628 628 if (ddi_regs_map_setup(instance->dip,
629 629 REGISTER_SET_IO_2108, &instance->regmap, 0,
630 630 reglength, &endian_attr, &instance->regmap_handle)
631 631 != DDI_SUCCESS) {
632 632 cmn_err(CE_WARN,
633 633 "mr_sas: couldn't map control registers");
634 634 goto fail_attach;
635 635 }
636 636
637 637 instance->unroll.regs = 1;
638 638
639 639 /*
640 640 * Disable Interrupt Now.
641 641 * Setup Software interrupt
642 642 */
643 643 instance->func_ptr->disable_intr(instance);
644 644
645 645 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
646 646 "mrsas-enable-msi", &data) == DDI_SUCCESS) {
647 647 if (strncmp(data, "no", 3) == 0) {
648 648 msi_enable = 0;
649 649 con_log(CL_ANN1, (CE_WARN,
650 650 "msi_enable = %d disabled", msi_enable));
651 651 }
652 652 ddi_prop_free(data);
653 653 }
654 654
655 655 con_log(CL_DLEVEL1, (CE_NOTE, "msi_enable = %d", msi_enable));
656 656
657 657 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
658 658 "mrsas-enable-fp", &data) == DDI_SUCCESS) {
659 659 if (strncmp(data, "no", 3) == 0) {
660 660 enable_fp = 0;
661 661 cmn_err(CE_NOTE,
662 662 "enable_fp = %d, Fast-Path disabled.\n",
663 663 enable_fp);
664 664 }
665 665
666 666 ddi_prop_free(data);
667 667 }
668 668
669 669 con_log(CL_DLEVEL1, (CE_NOTE, "enable_fp = %d\n", enable_fp));
670 670
671 671 /* Check for all supported interrupt types */
672 672 if (ddi_intr_get_supported_types(
673 673 dip, &intr_types) != DDI_SUCCESS) {
674 674 cmn_err(CE_WARN,
675 675 "ddi_intr_get_supported_types() failed");
676 676 goto fail_attach;
677 677 }
678 678
679 679 con_log(CL_DLEVEL1, (CE_NOTE,
680 680 "ddi_intr_get_supported_types() ret: 0x%x", intr_types));
681 681
682 682 /* Initialize and Setup Interrupt handler */
683 683 if (msi_enable && (intr_types & DDI_INTR_TYPE_MSIX)) {
684 684 if (mrsas_add_intrs(instance, DDI_INTR_TYPE_MSIX) !=
685 685 DDI_SUCCESS) {
686 686 cmn_err(CE_WARN,
687 687 "MSIX interrupt query failed");
688 688 goto fail_attach;
689 689 }
690 690 instance->intr_type = DDI_INTR_TYPE_MSIX;
691 691 } else if (msi_enable && (intr_types & DDI_INTR_TYPE_MSI)) {
692 692 if (mrsas_add_intrs(instance, DDI_INTR_TYPE_MSI) !=
693 693 DDI_SUCCESS) {
694 694 cmn_err(CE_WARN,
695 695 "MSI interrupt query failed");
696 696 goto fail_attach;
697 697 }
698 698 instance->intr_type = DDI_INTR_TYPE_MSI;
699 699 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
700 700 msi_enable = 0;
701 701 if (mrsas_add_intrs(instance, DDI_INTR_TYPE_FIXED) !=
702 702 DDI_SUCCESS) {
703 703 cmn_err(CE_WARN,
704 704 "FIXED interrupt query failed");
705 705 goto fail_attach;
706 706 }
707 707 instance->intr_type = DDI_INTR_TYPE_FIXED;
708 708 } else {
709 709 cmn_err(CE_WARN, "Device cannot "
710 710 "suppport either FIXED or MSI/X "
711 711 "interrupts");
712 712 goto fail_attach;
713 713 }
714 714
715 715 instance->unroll.intr = 1;
716 716
717 717 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
718 718 "mrsas-enable-ctio", &data) == DDI_SUCCESS) {
719 719 if (strncmp(data, "no", 3) == 0) {
720 720 ctio_enable = 0;
721 721 con_log(CL_ANN1, (CE_WARN,
722 722 "ctio_enable = %d disabled", ctio_enable));
723 723 }
724 724 ddi_prop_free(data);
725 725 }
726 726
727 727 con_log(CL_DLEVEL1, (CE_WARN, "ctio_enable = %d", ctio_enable));
728 728
|
↓ open down ↓ |
728 lines elided |
↑ open up ↑ |
729 729 /* setup the mfi based low level driver */
730 730 if (mrsas_init_adapter(instance) != DDI_SUCCESS) {
731 731 cmn_err(CE_WARN, "mr_sas: "
732 732 "could not initialize the low level driver");
733 733
734 734 goto fail_attach;
735 735 }
736 736
737 737 /* Initialize all Mutex */
738 738 INIT_LIST_HEAD(&instance->completed_pool_list);
739 - mutex_init(&instance->completed_pool_mtx,
740 - "completed_pool_mtx", MUTEX_DRIVER,
741 - DDI_INTR_PRI(instance->intr_pri));
739 + mutex_init(&instance->completed_pool_mtx, NULL,
740 + MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
742 741
743 - mutex_init(&instance->sync_map_mtx,
744 - "sync_map_mtx", MUTEX_DRIVER,
745 - DDI_INTR_PRI(instance->intr_pri));
742 + mutex_init(&instance->sync_map_mtx, NULL,
743 + MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
746 744
747 - mutex_init(&instance->app_cmd_pool_mtx,
748 - "app_cmd_pool_mtx", MUTEX_DRIVER,
749 - DDI_INTR_PRI(instance->intr_pri));
745 + mutex_init(&instance->app_cmd_pool_mtx, NULL,
746 + MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
750 747
751 - mutex_init(&instance->config_dev_mtx, "config_dev_mtx",
748 + mutex_init(&instance->config_dev_mtx, NULL,
752 749 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
753 750
754 - mutex_init(&instance->cmd_pend_mtx, "cmd_pend_mtx",
751 + mutex_init(&instance->cmd_pend_mtx, NULL,
755 752 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
756 753
757 - mutex_init(&instance->ocr_flags_mtx, "ocr_flags_mtx",
754 + mutex_init(&instance->ocr_flags_mtx, NULL,
758 755 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
759 756
760 - mutex_init(&instance->int_cmd_mtx, "int_cmd_mtx",
757 + mutex_init(&instance->int_cmd_mtx, NULL,
761 758 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
762 759 cv_init(&instance->int_cmd_cv, NULL, CV_DRIVER, NULL);
763 760
764 - mutex_init(&instance->cmd_pool_mtx, "cmd_pool_mtx",
761 + mutex_init(&instance->cmd_pool_mtx, NULL,
765 762 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
766 763
767 - mutex_init(&instance->reg_write_mtx, "reg_write_mtx",
764 + mutex_init(&instance->reg_write_mtx, NULL,
768 765 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
769 766
770 767 if (instance->tbolt) {
771 - mutex_init(&instance->cmd_app_pool_mtx,
772 - "cmd_app_pool_mtx", MUTEX_DRIVER,
773 - DDI_INTR_PRI(instance->intr_pri));
768 + mutex_init(&instance->cmd_app_pool_mtx, NULL,
769 + MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
774 770
775 - mutex_init(&instance->chip_mtx,
776 - "chip_mtx", MUTEX_DRIVER,
777 - DDI_INTR_PRI(instance->intr_pri));
771 + mutex_init(&instance->chip_mtx, NULL,
772 + MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
778 773
779 774 }
780 775
781 776 instance->unroll.mutexs = 1;
782 777
783 778 instance->timeout_id = (timeout_id_t)-1;
784 779
785 780 /* Register our soft-isr for highlevel interrupts. */
786 781 instance->isr_level = instance->intr_pri;
787 782 if (!(instance->tbolt)) {
788 783 if (instance->isr_level == HIGH_LEVEL_INTR) {
789 784 if (ddi_add_softintr(dip,
790 785 DDI_SOFTINT_HIGH,
791 786 &instance->soft_intr_id, NULL, NULL,
792 787 mrsas_softintr, (caddr_t)instance) !=
793 788 DDI_SUCCESS) {
794 789 cmn_err(CE_WARN,
795 790 "Software ISR did not register");
796 791
797 792 goto fail_attach;
798 793 }
799 794
800 795 instance->unroll.soft_isr = 1;
801 796
802 797 }
803 798 }
804 799
805 800 instance->softint_running = 0;
806 801
807 802 /* Allocate a transport structure */
808 803 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
809 804
810 805 if (tran == NULL) {
811 806 cmn_err(CE_WARN,
812 807 "scsi_hba_tran_alloc failed");
813 808 goto fail_attach;
814 809 }
815 810
816 811 instance->tran = tran;
817 812 instance->unroll.tran = 1;
818 813
819 814 tran->tran_hba_private = instance;
820 815 tran->tran_tgt_init = mrsas_tran_tgt_init;
821 816 tran->tran_tgt_probe = scsi_hba_probe;
822 817 tran->tran_tgt_free = mrsas_tran_tgt_free;
823 818 if (instance->tbolt) {
824 819 tran->tran_init_pkt =
825 820 mrsas_tbolt_tran_init_pkt;
826 821 tran->tran_start =
827 822 mrsas_tbolt_tran_start;
828 823 } else {
829 824 tran->tran_init_pkt = mrsas_tran_init_pkt;
830 825 tran->tran_start = mrsas_tran_start;
831 826 }
832 827 tran->tran_abort = mrsas_tran_abort;
833 828 tran->tran_reset = mrsas_tran_reset;
834 829 tran->tran_getcap = mrsas_tran_getcap;
835 830 tran->tran_setcap = mrsas_tran_setcap;
836 831 tran->tran_destroy_pkt = mrsas_tran_destroy_pkt;
837 832 tran->tran_dmafree = mrsas_tran_dmafree;
838 833 tran->tran_sync_pkt = mrsas_tran_sync_pkt;
839 834 tran->tran_quiesce = mrsas_tran_quiesce;
840 835 tran->tran_unquiesce = mrsas_tran_unquiesce;
841 836 tran->tran_bus_config = mrsas_tran_bus_config;
842 837
843 838 if (mrsas_relaxed_ordering)
844 839 mrsas_generic_dma_attr.dma_attr_flags |=
845 840 DDI_DMA_RELAXED_ORDERING;
846 841
847 842
848 843 tran_dma_attr = mrsas_generic_dma_attr;
849 844 tran_dma_attr.dma_attr_sgllen = instance->max_num_sge;
850 845
851 846 /* Attach this instance of the hba */
852 847 if (scsi_hba_attach_setup(dip, &tran_dma_attr, tran, 0)
853 848 != DDI_SUCCESS) {
854 849 cmn_err(CE_WARN,
855 850 "scsi_hba_attach failed");
856 851
857 852 goto fail_attach;
858 853 }
859 854 instance->unroll.tranSetup = 1;
860 855 con_log(CL_ANN1,
861 856 (CE_CONT, "scsi_hba_attach_setup() done."));
862 857
863 858 /* create devctl node for cfgadm command */
864 859 if (ddi_create_minor_node(dip, "devctl",
865 860 S_IFCHR, INST2DEVCTL(instance_no),
866 861 DDI_NT_SCSI_NEXUS, 0) == DDI_FAILURE) {
867 862 cmn_err(CE_WARN,
868 863 "mr_sas: failed to create devctl node.");
869 864
870 865 goto fail_attach;
871 866 }
872 867
873 868 instance->unroll.devctl = 1;
874 869
875 870 /* create scsi node for cfgadm command */
876 871 if (ddi_create_minor_node(dip, "scsi", S_IFCHR,
877 872 INST2SCSI(instance_no), DDI_NT_SCSI_ATTACHMENT_POINT, 0) ==
878 873 DDI_FAILURE) {
879 874 cmn_err(CE_WARN,
880 875 "mr_sas: failed to create scsi node.");
881 876
882 877 goto fail_attach;
883 878 }
884 879
885 880 instance->unroll.scsictl = 1;
886 881
887 882 (void) sprintf(instance->iocnode, "%d:lsirdctl",
888 883 instance_no);
889 884
890 885 /*
891 886 * Create a node for applications
892 887 * for issuing ioctl to the driver.
893 888 */
894 889 if (ddi_create_minor_node(dip, instance->iocnode,
895 890 S_IFCHR, INST2LSIRDCTL(instance_no), DDI_PSEUDO, 0) ==
896 891 DDI_FAILURE) {
897 892 cmn_err(CE_WARN,
898 893 "mr_sas: failed to create ioctl node.");
899 894
900 895 goto fail_attach;
901 896 }
902 897
903 898 instance->unroll.ioctl = 1;
904 899
905 900 /* Create a taskq to handle dr events */
906 901 if ((instance->taskq = ddi_taskq_create(dip,
907 902 "mrsas_dr_taskq", 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
908 903 cmn_err(CE_WARN,
909 904 "mr_sas: failed to create taskq ");
910 905 instance->taskq = NULL;
911 906 goto fail_attach;
912 907 }
913 908 instance->unroll.taskq = 1;
914 909 con_log(CL_ANN1, (CE_CONT, "ddi_taskq_create() done."));
915 910
916 911 /* enable interrupt */
917 912 instance->func_ptr->enable_intr(instance);
918 913
919 914 /* initiate AEN */
920 915 if (start_mfi_aen(instance)) {
921 916 cmn_err(CE_WARN,
922 917 "mr_sas: failed to initiate AEN.");
923 918 goto fail_attach;
924 919 }
925 920 instance->unroll.aenPend = 1;
926 921 con_log(CL_ANN1,
927 922 (CE_CONT, "AEN started for instance %d.", instance_no));
928 923
929 924 /* Finally! We are on the air. */
930 925 ddi_report_dev(dip);
931 926
932 927 /* FMA handle checking. */
933 928 if (mrsas_check_acc_handle(instance->regmap_handle) !=
934 929 DDI_SUCCESS) {
|
↓ open down ↓ |
147 lines elided |
↑ open up ↑ |
935 930 goto fail_attach;
936 931 }
937 932 if (mrsas_check_acc_handle(instance->pci_handle) !=
938 933 DDI_SUCCESS) {
939 934 goto fail_attach;
940 935 }
941 936
942 937 instance->mr_ld_list =
943 938 kmem_zalloc(MRDRV_MAX_LD * sizeof (struct mrsas_ld),
944 939 KM_SLEEP);
945 - if (instance->mr_ld_list == NULL) {
946 - cmn_err(CE_WARN, "mr_sas attach(): "
947 - "failed to allocate ld_list array");
948 - goto fail_attach;
949 - }
950 940 instance->unroll.ldlist_buff = 1;
951 941
952 942 #ifdef PDSUPPORT
953 943 if (instance->tbolt) {
954 944 instance->mr_tbolt_pd_max = MRSAS_TBOLT_PD_TGT_MAX;
955 945 instance->mr_tbolt_pd_list =
956 946 kmem_zalloc(MRSAS_TBOLT_GET_PD_MAX(instance) *
957 947 sizeof (struct mrsas_tbolt_pd), KM_SLEEP);
958 948 ASSERT(instance->mr_tbolt_pd_list);
959 949 for (i = 0; i < instance->mr_tbolt_pd_max; i++) {
960 950 instance->mr_tbolt_pd_list[i].lun_type =
961 951 MRSAS_TBOLT_PD_LUN;
962 952 instance->mr_tbolt_pd_list[i].dev_id =
963 953 (uint8_t)i;
964 954 }
965 955
966 956 instance->unroll.pdlist_buff = 1;
967 957 }
968 958 #endif
969 959 break;
970 960 case DDI_PM_RESUME:
971 961 con_log(CL_ANN, (CE_NOTE, "mr_sas: DDI_PM_RESUME"));
972 962 break;
973 963 case DDI_RESUME:
974 964 con_log(CL_ANN, (CE_NOTE, "mr_sas: DDI_RESUME"));
975 965 break;
976 966 default:
977 967 con_log(CL_ANN,
978 968 (CE_WARN, "mr_sas: invalid attach cmd=%x", cmd));
979 969 return (DDI_FAILURE);
980 970 }
981 971
982 972
983 973 con_log(CL_DLEVEL1,
984 974 (CE_NOTE, "mrsas_attach() return SUCCESS instance_num %d",
985 975 instance_no));
986 976 return (DDI_SUCCESS);
987 977
988 978 fail_attach:
989 979
990 980 mrsas_undo_resources(dip, instance);
991 981
992 982 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
993 983 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
994 984
995 985 mrsas_fm_fini(instance);
996 986
997 987 pci_config_teardown(&instance->pci_handle);
998 988 ddi_soft_state_free(mrsas_state, instance_no);
999 989
1000 990 con_log(CL_ANN, (CE_WARN, "mr_sas: return failure from mrsas_attach"));
1001 991
1002 992 cmn_err(CE_WARN, "mrsas_attach() return FAILURE instance_num %d",
1003 993 instance_no);
1004 994
1005 995 return (DDI_FAILURE);
1006 996 }
1007 997
1008 998 /*
1009 999 * getinfo - gets device information
1010 1000 * @dip:
1011 1001 * @cmd:
1012 1002 * @arg:
1013 1003 * @resultp:
1014 1004 *
1015 1005 * The system calls getinfo() to obtain configuration information that only
1016 1006 * the driver knows. The mapping of minor numbers to device instance is
1017 1007 * entirely under the control of the driver. The system sometimes needs to ask
1018 1008 * the driver which device a particular dev_t represents.
1019 1009 * Given the device number return the devinfo pointer from the scsi_device
1020 1010 * structure.
1021 1011 */
1022 1012 /*ARGSUSED*/
1023 1013 static int
1024 1014 mrsas_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
1025 1015 {
1026 1016 int rval;
1027 1017 int mrsas_minor = getminor((dev_t)arg);
1028 1018
1029 1019 struct mrsas_instance *instance;
1030 1020
1031 1021 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1032 1022
1033 1023 switch (cmd) {
1034 1024 case DDI_INFO_DEVT2DEVINFO:
1035 1025 instance = (struct mrsas_instance *)
1036 1026 ddi_get_soft_state(mrsas_state,
1037 1027 MINOR2INST(mrsas_minor));
1038 1028
1039 1029 if (instance == NULL) {
1040 1030 *resultp = NULL;
1041 1031 rval = DDI_FAILURE;
1042 1032 } else {
1043 1033 *resultp = instance->dip;
1044 1034 rval = DDI_SUCCESS;
1045 1035 }
1046 1036 break;
1047 1037 case DDI_INFO_DEVT2INSTANCE:
1048 1038 *resultp = (void *)(intptr_t)
1049 1039 (MINOR2INST(getminor((dev_t)arg)));
1050 1040 rval = DDI_SUCCESS;
1051 1041 break;
1052 1042 default:
1053 1043 *resultp = NULL;
1054 1044 rval = DDI_FAILURE;
1055 1045 }
1056 1046
1057 1047 return (rval);
1058 1048 }
1059 1049
1060 1050 /*
1061 1051 * detach - detaches a device from the system
1062 1052 * @dip: pointer to the device's dev_info structure
1063 1053 * @cmd: type of detach
1064 1054 *
1065 1055 * A driver's detach() entry point is called to detach an instance of a device
1066 1056 * that is bound to the driver. The entry point is called with the instance of
1067 1057 * the device node to be detached and with DDI_DETACH, which is specified as
1068 1058 * the cmd argument to the entry point.
1069 1059 * This routine is called during driver unload. We free all the allocated
1070 1060 * resources and call the corresponding LLD so that it can also release all
1071 1061 * its resources.
1072 1062 */
1073 1063 static int
1074 1064 mrsas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1075 1065 {
1076 1066 int instance_no;
1077 1067
1078 1068 struct mrsas_instance *instance;
1079 1069
1080 1070 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1081 1071
1082 1072
1083 1073 /* CONSTCOND */
1084 1074 ASSERT(NO_COMPETING_THREADS);
1085 1075
1086 1076 instance_no = ddi_get_instance(dip);
1087 1077
1088 1078 instance = (struct mrsas_instance *)ddi_get_soft_state(mrsas_state,
1089 1079 instance_no);
1090 1080
1091 1081 if (!instance) {
1092 1082 cmn_err(CE_WARN,
1093 1083 "mr_sas:%d could not get instance in detach",
1094 1084 instance_no);
1095 1085
1096 1086 return (DDI_FAILURE);
1097 1087 }
1098 1088
1099 1089 con_log(CL_ANN, (CE_NOTE,
1100 1090 "mr_sas%d: detaching device 0x%4x:0x%4x:0x%4x:0x%4x",
1101 1091 instance_no, instance->vendor_id, instance->device_id,
1102 1092 instance->subsysvid, instance->subsysid));
1103 1093
1104 1094 switch (cmd) {
1105 1095 case DDI_DETACH:
1106 1096 con_log(CL_ANN, (CE_NOTE,
1107 1097 "mrsas_detach: DDI_DETACH"));
1108 1098
1109 1099 mutex_enter(&instance->config_dev_mtx);
1110 1100 if (instance->timeout_id != (timeout_id_t)-1) {
1111 1101 mutex_exit(&instance->config_dev_mtx);
1112 1102 (void) untimeout(instance->timeout_id);
1113 1103 instance->timeout_id = (timeout_id_t)-1;
1114 1104 mutex_enter(&instance->config_dev_mtx);
1115 1105 instance->unroll.timer = 0;
1116 1106 }
1117 1107 mutex_exit(&instance->config_dev_mtx);
1118 1108
1119 1109 if (instance->unroll.tranSetup == 1) {
1120 1110 if (scsi_hba_detach(dip) != DDI_SUCCESS) {
1121 1111 cmn_err(CE_WARN,
1122 1112 "mr_sas2%d: failed to detach",
1123 1113 instance_no);
1124 1114 return (DDI_FAILURE);
1125 1115 }
1126 1116 instance->unroll.tranSetup = 0;
1127 1117 con_log(CL_ANN1,
1128 1118 (CE_CONT, "scsi_hba_dettach() done."));
1129 1119 }
1130 1120
1131 1121 flush_cache(instance);
1132 1122
1133 1123 mrsas_undo_resources(dip, instance);
1134 1124
1135 1125 mrsas_fm_fini(instance);
1136 1126
1137 1127 pci_config_teardown(&instance->pci_handle);
1138 1128 ddi_soft_state_free(mrsas_state, instance_no);
1139 1129 break;
1140 1130
1141 1131 case DDI_PM_SUSPEND:
1142 1132 con_log(CL_ANN, (CE_NOTE,
1143 1133 "mrsas_detach: DDI_PM_SUSPEND"));
1144 1134
1145 1135 break;
1146 1136 case DDI_SUSPEND:
1147 1137 con_log(CL_ANN, (CE_NOTE,
1148 1138 "mrsas_detach: DDI_SUSPEND"));
1149 1139
1150 1140 break;
1151 1141 default:
1152 1142 con_log(CL_ANN, (CE_WARN,
1153 1143 "invalid detach command:0x%x", cmd));
1154 1144 return (DDI_FAILURE);
1155 1145 }
1156 1146
1157 1147 return (DDI_SUCCESS);
1158 1148 }
1159 1149
1160 1150
1161 1151 static void
1162 1152 mrsas_undo_resources(dev_info_t *dip, struct mrsas_instance *instance)
1163 1153 {
1164 1154 int instance_no;
1165 1155
1166 1156 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1167 1157
1168 1158
1169 1159 instance_no = ddi_get_instance(dip);
1170 1160
1171 1161
1172 1162 if (instance->unroll.ioctl == 1) {
1173 1163 ddi_remove_minor_node(dip, instance->iocnode);
1174 1164 instance->unroll.ioctl = 0;
1175 1165 }
1176 1166
1177 1167 if (instance->unroll.scsictl == 1) {
1178 1168 ddi_remove_minor_node(dip, "scsi");
1179 1169 instance->unroll.scsictl = 0;
1180 1170 }
1181 1171
1182 1172 if (instance->unroll.devctl == 1) {
1183 1173 ddi_remove_minor_node(dip, "devctl");
1184 1174 instance->unroll.devctl = 0;
1185 1175 }
1186 1176
1187 1177 if (instance->unroll.tranSetup == 1) {
1188 1178 if (scsi_hba_detach(dip) != DDI_SUCCESS) {
1189 1179 cmn_err(CE_WARN,
1190 1180 "mr_sas2%d: failed to detach", instance_no);
1191 1181 return; /* DDI_FAILURE */
1192 1182 }
1193 1183 instance->unroll.tranSetup = 0;
1194 1184 con_log(CL_ANN1, (CE_CONT, "scsi_hba_dettach() done."));
1195 1185 }
1196 1186
1197 1187 if (instance->unroll.tran == 1) {
1198 1188 scsi_hba_tran_free(instance->tran);
1199 1189 instance->unroll.tran = 0;
1200 1190 con_log(CL_ANN1, (CE_CONT, "scsi_hba_tran_free() done."));
1201 1191 }
1202 1192
1203 1193 if (instance->unroll.syncCmd == 1) {
1204 1194 if (instance->tbolt) {
1205 1195 if (abort_syncmap_cmd(instance,
1206 1196 instance->map_update_cmd)) {
1207 1197 cmn_err(CE_WARN, "mrsas_detach: "
1208 1198 "failed to abort previous syncmap command");
1209 1199 }
1210 1200
1211 1201 instance->unroll.syncCmd = 0;
1212 1202 con_log(CL_ANN1, (CE_CONT, "sync cmd aborted, done."));
1213 1203 }
1214 1204 }
1215 1205
1216 1206 if (instance->unroll.aenPend == 1) {
1217 1207 if (abort_aen_cmd(instance, instance->aen_cmd))
1218 1208 cmn_err(CE_WARN, "mrsas_detach: "
1219 1209 "failed to abort prevous AEN command");
1220 1210
1221 1211 instance->unroll.aenPend = 0;
1222 1212 con_log(CL_ANN1, (CE_CONT, "aen cmd aborted, done."));
1223 1213 /* This means the controller is fully initialized and running */
1224 1214 /* Shutdown should be a last command to controller. */
1225 1215 /* shutdown_controller(); */
1226 1216 }
1227 1217
1228 1218
1229 1219 if (instance->unroll.timer == 1) {
1230 1220 if (instance->timeout_id != (timeout_id_t)-1) {
1231 1221 (void) untimeout(instance->timeout_id);
1232 1222 instance->timeout_id = (timeout_id_t)-1;
1233 1223
1234 1224 instance->unroll.timer = 0;
1235 1225 }
1236 1226 }
1237 1227
1238 1228 instance->func_ptr->disable_intr(instance);
1239 1229
1240 1230
1241 1231 if (instance->unroll.mutexs == 1) {
1242 1232 mutex_destroy(&instance->cmd_pool_mtx);
1243 1233 mutex_destroy(&instance->app_cmd_pool_mtx);
1244 1234 mutex_destroy(&instance->cmd_pend_mtx);
1245 1235 mutex_destroy(&instance->completed_pool_mtx);
1246 1236 mutex_destroy(&instance->sync_map_mtx);
1247 1237 mutex_destroy(&instance->int_cmd_mtx);
1248 1238 cv_destroy(&instance->int_cmd_cv);
1249 1239 mutex_destroy(&instance->config_dev_mtx);
1250 1240 mutex_destroy(&instance->ocr_flags_mtx);
1251 1241 mutex_destroy(&instance->reg_write_mtx);
1252 1242
1253 1243 if (instance->tbolt) {
1254 1244 mutex_destroy(&instance->cmd_app_pool_mtx);
1255 1245 mutex_destroy(&instance->chip_mtx);
1256 1246 }
1257 1247
1258 1248 instance->unroll.mutexs = 0;
1259 1249 con_log(CL_ANN1, (CE_CONT, "Destroy mutex & cv, done."));
1260 1250 }
1261 1251
1262 1252
1263 1253 if (instance->unroll.soft_isr == 1) {
1264 1254 ddi_remove_softintr(instance->soft_intr_id);
1265 1255 instance->unroll.soft_isr = 0;
1266 1256 }
1267 1257
1268 1258 if (instance->unroll.intr == 1) {
1269 1259 mrsas_rem_intrs(instance);
1270 1260 instance->unroll.intr = 0;
1271 1261 }
1272 1262
1273 1263
1274 1264 if (instance->unroll.taskq == 1) {
1275 1265 if (instance->taskq) {
1276 1266 ddi_taskq_destroy(instance->taskq);
1277 1267 instance->unroll.taskq = 0;
1278 1268 }
1279 1269
1280 1270 }
1281 1271
1282 1272 /*
1283 1273 * free dma memory allocated for
1284 1274 * cmds/frames/queues/driver version etc
1285 1275 */
1286 1276 if (instance->unroll.verBuff == 1) {
1287 1277 (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1288 1278 instance->unroll.verBuff = 0;
1289 1279 }
1290 1280
1291 1281 if (instance->unroll.pdlist_buff == 1) {
1292 1282 if (instance->mr_tbolt_pd_list != NULL) {
1293 1283 kmem_free(instance->mr_tbolt_pd_list,
1294 1284 MRSAS_TBOLT_GET_PD_MAX(instance) *
1295 1285 sizeof (struct mrsas_tbolt_pd));
1296 1286 }
1297 1287
1298 1288 instance->mr_tbolt_pd_list = NULL;
1299 1289 instance->unroll.pdlist_buff = 0;
1300 1290 }
1301 1291
1302 1292 if (instance->unroll.ldlist_buff == 1) {
1303 1293 if (instance->mr_ld_list != NULL) {
1304 1294 kmem_free(instance->mr_ld_list, MRDRV_MAX_LD
1305 1295 * sizeof (struct mrsas_ld));
1306 1296 }
1307 1297
1308 1298 instance->mr_ld_list = NULL;
1309 1299 instance->unroll.ldlist_buff = 0;
1310 1300 }
1311 1301
1312 1302 if (instance->tbolt) {
1313 1303 if (instance->unroll.alloc_space_mpi2 == 1) {
1314 1304 free_space_for_mpi2(instance);
1315 1305 instance->unroll.alloc_space_mpi2 = 0;
1316 1306 }
1317 1307 } else {
1318 1308 if (instance->unroll.alloc_space_mfi == 1) {
1319 1309 free_space_for_mfi(instance);
1320 1310 instance->unroll.alloc_space_mfi = 0;
1321 1311 }
1322 1312 }
1323 1313
1324 1314 if (instance->unroll.regs == 1) {
1325 1315 ddi_regs_map_free(&instance->regmap_handle);
1326 1316 instance->unroll.regs = 0;
1327 1317 con_log(CL_ANN1, (CE_CONT, "ddi_regs_map_free() done."));
1328 1318 }
1329 1319 }
1330 1320
1331 1321
1332 1322
1333 1323 /*
1334 1324 * ************************************************************************** *
1335 1325 * *
1336 1326 * common entry points - for character driver types *
1337 1327 * *
1338 1328 * ************************************************************************** *
1339 1329 */
1340 1330 /*
1341 1331 * open - gets access to a device
1342 1332 * @dev:
1343 1333 * @openflags:
1344 1334 * @otyp:
1345 1335 * @credp:
1346 1336 *
1347 1337 * Access to a device by one or more application programs is controlled
1348 1338 * through the open() and close() entry points. The primary function of
1349 1339 * open() is to verify that the open request is allowed.
1350 1340 */
1351 1341 static int
1352 1342 mrsas_open(dev_t *dev, int openflags, int otyp, cred_t *credp)
1353 1343 {
1354 1344 int rval = 0;
1355 1345
1356 1346 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1357 1347
1358 1348 /* Check root permissions */
1359 1349 if (drv_priv(credp) != 0) {
1360 1350 con_log(CL_ANN, (CE_WARN,
1361 1351 "mr_sas: Non-root ioctl access denied!"));
1362 1352 return (EPERM);
1363 1353 }
1364 1354
1365 1355 /* Verify we are being opened as a character device */
1366 1356 if (otyp != OTYP_CHR) {
1367 1357 con_log(CL_ANN, (CE_WARN,
1368 1358 "mr_sas: ioctl node must be a char node"));
1369 1359 return (EINVAL);
1370 1360 }
1371 1361
1372 1362 if (ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(*dev)))
1373 1363 == NULL) {
1374 1364 return (ENXIO);
1375 1365 }
1376 1366
1377 1367 if (scsi_hba_open) {
1378 1368 rval = scsi_hba_open(dev, openflags, otyp, credp);
1379 1369 }
1380 1370
1381 1371 return (rval);
1382 1372 }
1383 1373
1384 1374 /*
1385 1375 * close - gives up access to a device
1386 1376 * @dev:
1387 1377 * @openflags:
1388 1378 * @otyp:
1389 1379 * @credp:
1390 1380 *
1391 1381 * close() should perform any cleanup necessary to finish using the minor
1392 1382 * device, and prepare the device (and driver) to be opened again.
1393 1383 */
1394 1384 static int
1395 1385 mrsas_close(dev_t dev, int openflags, int otyp, cred_t *credp)
1396 1386 {
1397 1387 int rval = 0;
1398 1388
1399 1389 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1400 1390
1401 1391 /* no need for locks! */
1402 1392
1403 1393 if (scsi_hba_close) {
1404 1394 rval = scsi_hba_close(dev, openflags, otyp, credp);
1405 1395 }
1406 1396
1407 1397 return (rval);
1408 1398 }
1409 1399
1410 1400 /*
1411 1401 * ioctl - performs a range of I/O commands for character drivers
1412 1402 * @dev:
1413 1403 * @cmd:
1414 1404 * @arg:
1415 1405 * @mode:
1416 1406 * @credp:
1417 1407 * @rvalp:
1418 1408 *
1419 1409 * ioctl() routine must make sure that user data is copied into or out of the
1420 1410 * kernel address space explicitly using copyin(), copyout(), ddi_copyin(),
1421 1411 * and ddi_copyout(), as appropriate.
1422 1412 * This is a wrapper routine to serialize access to the actual ioctl routine.
1423 1413 * ioctl() should return 0 on success, or the appropriate error number. The
1424 1414 * driver may also set the value returned to the calling process through rvalp.
1425 1415 */
1426 1416
1427 1417 static int
1428 1418 mrsas_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
1429 1419 int *rvalp)
1430 1420 {
1431 1421 int rval = 0;
1432 1422
1433 1423 struct mrsas_instance *instance;
1434 1424 struct mrsas_ioctl *ioctl;
1435 1425 struct mrsas_aen aen;
1436 1426 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1437 1427
|
↓ open down ↓ |
478 lines elided |
↑ open up ↑ |
1438 1428 instance = ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(dev)));
1439 1429
1440 1430 if (instance == NULL) {
1441 1431 /* invalid minor number */
1442 1432 con_log(CL_ANN, (CE_WARN, "mr_sas: adapter not found."));
1443 1433 return (ENXIO);
1444 1434 }
1445 1435
1446 1436 ioctl = (struct mrsas_ioctl *)kmem_zalloc(sizeof (struct mrsas_ioctl),
1447 1437 KM_SLEEP);
1448 - if (ioctl == NULL) {
1449 - /* Failed to allocate memory for ioctl */
1450 - con_log(CL_ANN, (CE_WARN, "mr_sas_ioctl: "
1451 - "failed to allocate memory for ioctl"));
1452 - return (ENOMEM);
1453 - }
1438 + ASSERT(ioctl);
1454 1439
1455 1440 switch ((uint_t)cmd) {
1456 1441 case MRSAS_IOCTL_FIRMWARE:
1457 1442 if (ddi_copyin((void *)arg, ioctl,
1458 1443 sizeof (struct mrsas_ioctl), mode)) {
1459 1444 con_log(CL_ANN, (CE_WARN, "mrsas_ioctl: "
1460 1445 "ERROR IOCTL copyin"));
1461 1446 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1462 1447 return (EFAULT);
1463 1448 }
1464 1449
1465 1450 if (ioctl->control_code == MRSAS_DRIVER_IOCTL_COMMON) {
1466 1451 rval = handle_drv_ioctl(instance, ioctl, mode);
1467 1452 } else {
1468 1453 rval = handle_mfi_ioctl(instance, ioctl, mode);
1469 1454 }
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
1470 1455
1471 1456 if (ddi_copyout((void *)ioctl, (void *)arg,
1472 1457 (sizeof (struct mrsas_ioctl) - 1), mode)) {
1473 1458 con_log(CL_ANN, (CE_WARN,
1474 1459 "mrsas_ioctl: copy_to_user failed"));
1475 1460 rval = 1;
1476 1461 }
1477 1462
1478 1463 break;
1479 1464 case MRSAS_IOCTL_AEN:
1480 - con_log(CL_ANN,
1481 - (CE_NOTE, "mrsas_ioctl: IOCTL Register AEN.\n"));
1482 -
1483 1465 if (ddi_copyin((void *) arg, &aen,
1484 1466 sizeof (struct mrsas_aen), mode)) {
1485 1467 con_log(CL_ANN, (CE_WARN,
1486 1468 "mrsas_ioctl: ERROR AEN copyin"));
1487 1469 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1488 1470 return (EFAULT);
1489 1471 }
1490 1472
1491 1473 rval = handle_mfi_aen(instance, &aen);
1492 1474
1493 1475 if (ddi_copyout((void *) &aen, (void *)arg,
1494 1476 sizeof (struct mrsas_aen), mode)) {
1495 1477 con_log(CL_ANN, (CE_WARN,
1496 1478 "mrsas_ioctl: copy_to_user failed"));
1497 1479 rval = 1;
1498 1480 }
1499 1481
1500 1482 break;
1501 1483 default:
1502 1484 rval = scsi_hba_ioctl(dev, cmd, arg,
1503 1485 mode, credp, rvalp);
1504 1486
1505 1487 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_ioctl: "
1506 1488 "scsi_hba_ioctl called, ret = %x.", rval));
1507 1489 }
1508 1490
1509 1491 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1510 1492 return (rval);
1511 1493 }
1512 1494
1513 1495 /*
1514 1496 * ************************************************************************** *
1515 1497 * *
1516 1498 * common entry points - for block driver types *
1517 1499 * *
1518 1500 * ************************************************************************** *
1519 1501 */
1520 1502 #ifdef __sparc
1521 1503 /*
1522 1504 * reset - TBD
1523 1505 * @dip:
1524 1506 * @cmd:
1525 1507 *
1526 1508 * TBD
1527 1509 */
1528 1510 /*ARGSUSED*/
1529 1511 static int
1530 1512 mrsas_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
1531 1513 {
1532 1514 int instance_no;
1533 1515
1534 1516 struct mrsas_instance *instance;
1535 1517
1536 1518 instance_no = ddi_get_instance(dip);
1537 1519 instance = (struct mrsas_instance *)ddi_get_soft_state
1538 1520 (mrsas_state, instance_no);
1539 1521
1540 1522 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1541 1523
1542 1524 if (!instance) {
1543 1525 con_log(CL_ANN, (CE_WARN, "mr_sas:%d could not get adapter "
1544 1526 "in reset", instance_no));
1545 1527 return (DDI_FAILURE);
1546 1528 }
1547 1529
1548 1530 instance->func_ptr->disable_intr(instance);
1549 1531
1550 1532 con_log(CL_ANN1, (CE_CONT, "flushing cache for instance %d",
1551 1533 instance_no));
1552 1534
1553 1535 flush_cache(instance);
1554 1536
1555 1537 return (DDI_SUCCESS);
1556 1538 }
1557 1539 #else /* __sparc */
1558 1540 /*ARGSUSED*/
1559 1541 static int
1560 1542 mrsas_quiesce(dev_info_t *dip)
1561 1543 {
1562 1544 int instance_no;
1563 1545
1564 1546 struct mrsas_instance *instance;
1565 1547
1566 1548 instance_no = ddi_get_instance(dip);
1567 1549 instance = (struct mrsas_instance *)ddi_get_soft_state
1568 1550 (mrsas_state, instance_no);
1569 1551
1570 1552 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1571 1553
1572 1554 if (!instance) {
1573 1555 con_log(CL_ANN1, (CE_WARN, "mr_sas:%d could not get adapter "
1574 1556 "in quiesce", instance_no));
1575 1557 return (DDI_FAILURE);
1576 1558 }
1577 1559 if (instance->deadadapter || instance->adapterresetinprogress) {
1578 1560 con_log(CL_ANN1, (CE_WARN, "mr_sas:%d adapter is not in "
1579 1561 "healthy state", instance_no));
1580 1562 return (DDI_FAILURE);
1581 1563 }
1582 1564
1583 1565 if (abort_aen_cmd(instance, instance->aen_cmd)) {
1584 1566 con_log(CL_ANN1, (CE_WARN, "mrsas_quiesce: "
1585 1567 "failed to abort prevous AEN command QUIESCE"));
1586 1568 }
1587 1569
1588 1570 if (instance->tbolt) {
1589 1571 if (abort_syncmap_cmd(instance,
1590 1572 instance->map_update_cmd)) {
1591 1573 cmn_err(CE_WARN,
1592 1574 "mrsas_detach: failed to abort "
1593 1575 "previous syncmap command");
1594 1576 return (DDI_FAILURE);
1595 1577 }
1596 1578 }
1597 1579
1598 1580 instance->func_ptr->disable_intr(instance);
1599 1581
1600 1582 con_log(CL_ANN1, (CE_CONT, "flushing cache for instance %d",
1601 1583 instance_no));
1602 1584
1603 1585 flush_cache(instance);
1604 1586
1605 1587 if (wait_for_outstanding(instance)) {
1606 1588 con_log(CL_ANN1,
1607 1589 (CE_CONT, "wait_for_outstanding: return FAIL.\n"));
1608 1590 return (DDI_FAILURE);
1609 1591 }
1610 1592 return (DDI_SUCCESS);
1611 1593 }
1612 1594 #endif /* __sparc */
1613 1595
1614 1596 /*
1615 1597 * ************************************************************************** *
1616 1598 * *
1617 1599 * entry points (SCSI HBA) *
1618 1600 * *
1619 1601 * ************************************************************************** *
1620 1602 */
1621 1603 /*
1622 1604 * tran_tgt_init - initialize a target device instance
1623 1605 * @hba_dip:
1624 1606 * @tgt_dip:
1625 1607 * @tran:
1626 1608 * @sd:
1627 1609 *
1628 1610 * The tran_tgt_init() entry point enables the HBA to allocate and initialize
1629 1611 * any per-target resources. tran_tgt_init() also enables the HBA to qualify
1630 1612 * the device's address as valid and supportable for that particular HBA.
1631 1613 * By returning DDI_FAILURE, the instance of the target driver for that device
1632 1614 * is not probed or attached.
1633 1615 */
1634 1616 /*ARGSUSED*/
1635 1617 static int
1636 1618 mrsas_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1637 1619 scsi_hba_tran_t *tran, struct scsi_device *sd)
1638 1620 {
1639 1621 struct mrsas_instance *instance;
1640 1622 uint16_t tgt = sd->sd_address.a_target;
1641 1623 uint8_t lun = sd->sd_address.a_lun;
1642 1624 dev_info_t *child = NULL;
1643 1625
1644 1626 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init target %d lun %d",
1645 1627 tgt, lun));
1646 1628
1647 1629 instance = ADDR2MR(&sd->sd_address);
1648 1630
1649 1631 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
1650 1632 /*
1651 1633 * If no persistent node exists, we don't allow .conf node
1652 1634 * to be created.
1653 1635 */
1654 1636 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
1655 1637 con_log(CL_DLEVEL2,
1656 1638 (CE_NOTE, "mrsas_tgt_init find child ="
1657 1639 " %p t = %d l = %d", (void *)child, tgt, lun));
1658 1640 if (ndi_merge_node(tgt_dip, mrsas_name_node) !=
1659 1641 DDI_SUCCESS)
1660 1642 /* Create this .conf node */
1661 1643 return (DDI_SUCCESS);
1662 1644 }
1663 1645 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init in ndi_per "
1664 1646 "DDI_FAILURE t = %d l = %d", tgt, lun));
1665 1647 return (DDI_FAILURE);
1666 1648
1667 1649 }
1668 1650
1669 1651 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init dev_dip %p tgt_dip %p",
1670 1652 (void *)instance->mr_ld_list[tgt].dip, (void *)tgt_dip));
1671 1653
1672 1654 if (tgt < MRDRV_MAX_LD && lun == 0) {
1673 1655 if (instance->mr_ld_list[tgt].dip == NULL &&
1674 1656 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0) {
1675 1657 mutex_enter(&instance->config_dev_mtx);
1676 1658 instance->mr_ld_list[tgt].dip = tgt_dip;
1677 1659 instance->mr_ld_list[tgt].lun_type = MRSAS_LD_LUN;
1678 1660 instance->mr_ld_list[tgt].flag = MRDRV_TGT_VALID;
1679 1661 mutex_exit(&instance->config_dev_mtx);
1680 1662 }
1681 1663 }
1682 1664
1683 1665 #ifdef PDSUPPORT
1684 1666 else if (instance->tbolt) {
1685 1667 if (instance->mr_tbolt_pd_list[tgt].dip == NULL) {
1686 1668 mutex_enter(&instance->config_dev_mtx);
1687 1669 instance->mr_tbolt_pd_list[tgt].dip = tgt_dip;
1688 1670 instance->mr_tbolt_pd_list[tgt].flag =
1689 1671 MRDRV_TGT_VALID;
1690 1672 mutex_exit(&instance->config_dev_mtx);
1691 1673 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_tgt_init:"
1692 1674 "t%xl%x", tgt, lun));
1693 1675 }
1694 1676 }
1695 1677 #endif
1696 1678
1697 1679 return (DDI_SUCCESS);
1698 1680 }
1699 1681
1700 1682 /*ARGSUSED*/
1701 1683 static void
1702 1684 mrsas_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1703 1685 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1704 1686 {
1705 1687 struct mrsas_instance *instance;
1706 1688 int tgt = sd->sd_address.a_target;
1707 1689 int lun = sd->sd_address.a_lun;
1708 1690
1709 1691 instance = ADDR2MR(&sd->sd_address);
1710 1692
1711 1693 con_log(CL_DLEVEL2, (CE_NOTE, "tgt_free t = %d l = %d", tgt, lun));
1712 1694
1713 1695 if (tgt < MRDRV_MAX_LD && lun == 0) {
1714 1696 if (instance->mr_ld_list[tgt].dip == tgt_dip) {
1715 1697 mutex_enter(&instance->config_dev_mtx);
1716 1698 instance->mr_ld_list[tgt].dip = NULL;
1717 1699 mutex_exit(&instance->config_dev_mtx);
1718 1700 }
1719 1701 }
1720 1702
1721 1703 #ifdef PDSUPPORT
1722 1704 else if (instance->tbolt) {
1723 1705 mutex_enter(&instance->config_dev_mtx);
1724 1706 instance->mr_tbolt_pd_list[tgt].dip = NULL;
1725 1707 mutex_exit(&instance->config_dev_mtx);
1726 1708 con_log(CL_ANN1, (CE_NOTE, "tgt_free: Setting dip = NULL"
1727 1709 "for tgt:%x", tgt));
1728 1710 }
1729 1711 #endif
1730 1712
1731 1713 }
1732 1714
1733 1715 dev_info_t *
1734 1716 mrsas_find_child(struct mrsas_instance *instance, uint16_t tgt, uint8_t lun)
1735 1717 {
1736 1718 dev_info_t *child = NULL;
1737 1719 char addr[SCSI_MAXNAMELEN];
1738 1720 char tmp[MAXNAMELEN];
1739 1721
1740 1722 (void) sprintf(addr, "%x,%x", tgt, lun);
1741 1723 for (child = ddi_get_child(instance->dip); child;
1742 1724 child = ddi_get_next_sibling(child)) {
1743 1725
1744 1726 if (ndi_dev_is_persistent_node(child) == 0) {
1745 1727 continue;
1746 1728 }
1747 1729
1748 1730 if (mrsas_name_node(child, tmp, MAXNAMELEN) !=
1749 1731 DDI_SUCCESS) {
1750 1732 continue;
1751 1733 }
1752 1734
1753 1735 if (strcmp(addr, tmp) == 0) {
1754 1736 break;
1755 1737 }
1756 1738 }
1757 1739 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_find_child: return child = %p",
1758 1740 (void *)child));
1759 1741 return (child);
1760 1742 }
1761 1743
1762 1744 /*
1763 1745 * mrsas_name_node -
1764 1746 * @dip:
1765 1747 * @name:
1766 1748 * @len:
1767 1749 */
1768 1750 static int
1769 1751 mrsas_name_node(dev_info_t *dip, char *name, int len)
1770 1752 {
1771 1753 int tgt, lun;
1772 1754
1773 1755 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1774 1756 DDI_PROP_DONTPASS, "target", -1);
1775 1757 con_log(CL_DLEVEL2, (CE_NOTE,
1776 1758 "mrsas_name_node: dip %p tgt %d", (void *)dip, tgt));
1777 1759 if (tgt == -1) {
1778 1760 return (DDI_FAILURE);
1779 1761 }
1780 1762 lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1781 1763 "lun", -1);
1782 1764 con_log(CL_DLEVEL2,
1783 1765 (CE_NOTE, "mrsas_name_node: tgt %d lun %d", tgt, lun));
1784 1766 if (lun == -1) {
1785 1767 return (DDI_FAILURE);
1786 1768 }
1787 1769 (void) snprintf(name, len, "%x,%x", tgt, lun);
1788 1770 return (DDI_SUCCESS);
1789 1771 }
1790 1772
1791 1773 /*
1792 1774 * tran_init_pkt - allocate & initialize a scsi_pkt structure
1793 1775 * @ap:
1794 1776 * @pkt:
1795 1777 * @bp:
1796 1778 * @cmdlen:
1797 1779 * @statuslen:
1798 1780 * @tgtlen:
1799 1781 * @flags:
1800 1782 * @callback:
1801 1783 *
1802 1784 * The tran_init_pkt() entry point allocates and initializes a scsi_pkt
1803 1785 * structure and DMA resources for a target driver request. The
1804 1786 * tran_init_pkt() entry point is called when the target driver calls the
1805 1787 * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point
1806 1788 * is a request to perform one or more of three possible services:
1807 1789 * - allocation and initialization of a scsi_pkt structure
1808 1790 * - allocation of DMA resources for data transfer
1809 1791 * - reallocation of DMA resources for the next portion of the data transfer
1810 1792 */
1811 1793 static struct scsi_pkt *
1812 1794 mrsas_tran_init_pkt(struct scsi_address *ap, register struct scsi_pkt *pkt,
1813 1795 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1814 1796 int flags, int (*callback)(), caddr_t arg)
1815 1797 {
1816 1798 struct scsa_cmd *acmd;
1817 1799 struct mrsas_instance *instance;
1818 1800 struct scsi_pkt *new_pkt;
1819 1801
1820 1802 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1821 1803
1822 1804 instance = ADDR2MR(ap);
1823 1805
1824 1806 /* step #1 : pkt allocation */
1825 1807 if (pkt == NULL) {
1826 1808 pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen,
1827 1809 tgtlen, sizeof (struct scsa_cmd), callback, arg);
1828 1810 if (pkt == NULL) {
1829 1811 return (NULL);
1830 1812 }
1831 1813
1832 1814 acmd = PKT2CMD(pkt);
1833 1815
1834 1816 /*
1835 1817 * Initialize the new pkt - we redundantly initialize
1836 1818 * all the fields for illustrative purposes.
1837 1819 */
1838 1820 acmd->cmd_pkt = pkt;
1839 1821 acmd->cmd_flags = 0;
1840 1822 acmd->cmd_scblen = statuslen;
1841 1823 acmd->cmd_cdblen = cmdlen;
1842 1824 acmd->cmd_dmahandle = NULL;
1843 1825 acmd->cmd_ncookies = 0;
1844 1826 acmd->cmd_cookie = 0;
1845 1827 acmd->cmd_cookiecnt = 0;
1846 1828 acmd->cmd_nwin = 0;
1847 1829
1848 1830 pkt->pkt_address = *ap;
1849 1831 pkt->pkt_comp = (void (*)())NULL;
1850 1832 pkt->pkt_flags = 0;
1851 1833 pkt->pkt_time = 0;
1852 1834 pkt->pkt_resid = 0;
1853 1835 pkt->pkt_state = 0;
1854 1836 pkt->pkt_statistics = 0;
1855 1837 pkt->pkt_reason = 0;
1856 1838 new_pkt = pkt;
1857 1839 } else {
1858 1840 acmd = PKT2CMD(pkt);
1859 1841 new_pkt = NULL;
1860 1842 }
1861 1843
1862 1844 /* step #2 : dma allocation/move */
1863 1845 if (bp && bp->b_bcount != 0) {
1864 1846 if (acmd->cmd_dmahandle == NULL) {
1865 1847 if (mrsas_dma_alloc(instance, pkt, bp, flags,
1866 1848 callback) == DDI_FAILURE) {
1867 1849 if (new_pkt) {
1868 1850 scsi_hba_pkt_free(ap, new_pkt);
1869 1851 }
1870 1852 return ((struct scsi_pkt *)NULL);
1871 1853 }
1872 1854 } else {
1873 1855 if (mrsas_dma_move(instance, pkt, bp) == DDI_FAILURE) {
1874 1856 return ((struct scsi_pkt *)NULL);
1875 1857 }
1876 1858 }
1877 1859 }
1878 1860
1879 1861 return (pkt);
1880 1862 }
1881 1863
1882 1864 /*
1883 1865 * tran_start - transport a SCSI command to the addressed target
1884 1866 * @ap:
1885 1867 * @pkt:
1886 1868 *
1887 1869 * The tran_start() entry point for a SCSI HBA driver is called to transport a
1888 1870 * SCSI command to the addressed target. The SCSI command is described
1889 1871 * entirely within the scsi_pkt structure, which the target driver allocated
1890 1872 * through the HBA driver's tran_init_pkt() entry point. If the command
1891 1873 * involves a data transfer, DMA resources must also have been allocated for
1892 1874 * the scsi_pkt structure.
1893 1875 *
1894 1876 * Return Values :
1895 1877 * TRAN_BUSY - request queue is full, no more free scbs
1896 1878 * TRAN_ACCEPT - pkt has been submitted to the instance
1897 1879 */
1898 1880 static int
1899 1881 mrsas_tran_start(struct scsi_address *ap, register struct scsi_pkt *pkt)
1900 1882 {
1901 1883 uchar_t cmd_done = 0;
1902 1884
1903 1885 struct mrsas_instance *instance = ADDR2MR(ap);
1904 1886 struct mrsas_cmd *cmd;
1905 1887
1906 1888 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1907 1889 if (instance->deadadapter == 1) {
1908 1890 con_log(CL_ANN1, (CE_WARN,
1909 1891 "mrsas_tran_start: return TRAN_FATAL_ERROR "
1910 1892 "for IO, as the HBA doesnt take any more IOs"));
1911 1893 if (pkt) {
1912 1894 pkt->pkt_reason = CMD_DEV_GONE;
1913 1895 pkt->pkt_statistics = STAT_DISCON;
1914 1896 }
1915 1897 return (TRAN_FATAL_ERROR);
1916 1898 }
1917 1899
1918 1900 if (instance->adapterresetinprogress) {
1919 1901 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_start: Reset flag set, "
1920 1902 "returning mfi_pkt and setting TRAN_BUSY\n"));
1921 1903 return (TRAN_BUSY);
1922 1904 }
1923 1905
1924 1906 con_log(CL_ANN1, (CE_CONT, "chkpnt:%s:%d:SCSI CDB[0]=0x%x time:%x",
1925 1907 __func__, __LINE__, pkt->pkt_cdbp[0], pkt->pkt_time));
1926 1908
1927 1909 pkt->pkt_reason = CMD_CMPLT;
1928 1910 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */
1929 1911
1930 1912 cmd = build_cmd(instance, ap, pkt, &cmd_done);
1931 1913
1932 1914 /*
1933 1915 * Check if the command is already completed by the mrsas_build_cmd()
1934 1916 * routine. In which case the busy_flag would be clear and scb will be
1935 1917 * NULL and appropriate reason provided in pkt_reason field
1936 1918 */
1937 1919 if (cmd_done) {
1938 1920 pkt->pkt_reason = CMD_CMPLT;
1939 1921 pkt->pkt_scbp[0] = STATUS_GOOD;
1940 1922 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1941 1923 | STATE_SENT_CMD;
1942 1924 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1943 1925 (*pkt->pkt_comp)(pkt);
1944 1926 }
1945 1927
1946 1928 return (TRAN_ACCEPT);
1947 1929 }
1948 1930
1949 1931 if (cmd == NULL) {
1950 1932 return (TRAN_BUSY);
1951 1933 }
1952 1934
1953 1935 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1954 1936 if (instance->fw_outstanding > instance->max_fw_cmds) {
1955 1937 con_log(CL_ANN, (CE_CONT, "mr_sas:Firmware busy"));
1956 1938 DTRACE_PROBE2(start_tran_err,
1957 1939 uint16_t, instance->fw_outstanding,
1958 1940 uint16_t, instance->max_fw_cmds);
1959 1941 return_mfi_pkt(instance, cmd);
1960 1942 return (TRAN_BUSY);
1961 1943 }
1962 1944
1963 1945 /* Synchronize the Cmd frame for the controller */
1964 1946 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1965 1947 DDI_DMA_SYNC_FORDEV);
1966 1948 con_log(CL_ANN, (CE_CONT, "issue_cmd_ppc: SCSI CDB[0]=0x%x"
1967 1949 "cmd->index:%x\n", pkt->pkt_cdbp[0], cmd->index));
1968 1950 instance->func_ptr->issue_cmd(cmd, instance);
1969 1951
1970 1952 } else {
1971 1953 struct mrsas_header *hdr = &cmd->frame->hdr;
1972 1954
1973 1955 instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd);
1974 1956
1975 1957 pkt->pkt_reason = CMD_CMPLT;
1976 1958 pkt->pkt_statistics = 0;
1977 1959 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS;
1978 1960
1979 1961 switch (ddi_get8(cmd->frame_dma_obj.acc_handle,
1980 1962 &hdr->cmd_status)) {
1981 1963 case MFI_STAT_OK:
1982 1964 pkt->pkt_scbp[0] = STATUS_GOOD;
1983 1965 break;
1984 1966
1985 1967 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1986 1968 con_log(CL_ANN, (CE_CONT,
1987 1969 "mrsas_tran_start: scsi done with error"));
1988 1970 pkt->pkt_reason = CMD_CMPLT;
1989 1971 pkt->pkt_statistics = 0;
1990 1972
1991 1973 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
1992 1974 break;
1993 1975
1994 1976 case MFI_STAT_DEVICE_NOT_FOUND:
1995 1977 con_log(CL_ANN, (CE_CONT,
1996 1978 "mrsas_tran_start: device not found error"));
1997 1979 pkt->pkt_reason = CMD_DEV_GONE;
1998 1980 pkt->pkt_statistics = STAT_DISCON;
1999 1981 break;
2000 1982
2001 1983 default:
2002 1984 ((struct scsi_status *)pkt->pkt_scbp)->sts_busy = 1;
2003 1985 }
2004 1986
2005 1987 (void) mrsas_common_check(instance, cmd);
2006 1988 DTRACE_PROBE2(start_nointr_done, uint8_t, hdr->cmd,
2007 1989 uint8_t, hdr->cmd_status);
2008 1990 return_mfi_pkt(instance, cmd);
2009 1991
2010 1992 if (pkt->pkt_comp) {
2011 1993 (*pkt->pkt_comp)(pkt);
2012 1994 }
2013 1995
2014 1996 }
2015 1997
2016 1998 return (TRAN_ACCEPT);
2017 1999 }
2018 2000
2019 2001 /*
2020 2002 * tran_abort - Abort any commands that are currently in transport
2021 2003 * @ap:
2022 2004 * @pkt:
2023 2005 *
2024 2006 * The tran_abort() entry point for a SCSI HBA driver is called to abort any
2025 2007 * commands that are currently in transport for a particular target. This entry
2026 2008 * point is called when a target driver calls scsi_abort(). The tran_abort()
2027 2009 * entry point should attempt to abort the command denoted by the pkt
2028 2010 * parameter. If the pkt parameter is NULL, tran_abort() should attempt to
2029 2011 * abort all outstanding commands in the transport layer for the particular
2030 2012 * target or logical unit.
2031 2013 */
2032 2014 /*ARGSUSED*/
2033 2015 static int
2034 2016 mrsas_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
2035 2017 {
2036 2018 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2037 2019
2038 2020 /* abort command not supported by H/W */
2039 2021
2040 2022 return (DDI_FAILURE);
2041 2023 }
2042 2024
2043 2025 /*
2044 2026 * tran_reset - reset either the SCSI bus or target
2045 2027 * @ap:
2046 2028 * @level:
2047 2029 *
2048 2030 * The tran_reset() entry point for a SCSI HBA driver is called to reset either
2049 2031 * the SCSI bus or a particular SCSI target device. This entry point is called
2050 2032 * when a target driver calls scsi_reset(). The tran_reset() entry point must
2051 2033 * reset the SCSI bus if level is RESET_ALL. If level is RESET_TARGET, just the
2052 2034 * particular target or logical unit must be reset.
2053 2035 */
2054 2036 /*ARGSUSED*/
2055 2037 static int
2056 2038 mrsas_tran_reset(struct scsi_address *ap, int level)
2057 2039 {
2058 2040 struct mrsas_instance *instance = ADDR2MR(ap);
2059 2041
2060 2042 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2061 2043
2062 2044 if (wait_for_outstanding(instance)) {
2063 2045 con_log(CL_ANN1,
2064 2046 (CE_CONT, "wait_for_outstanding: return FAIL.\n"));
2065 2047 return (DDI_FAILURE);
2066 2048 } else {
2067 2049 return (DDI_SUCCESS);
2068 2050 }
2069 2051 }
2070 2052
2071 2053 #if 0
2072 2054 /*
2073 2055 * tran_bus_reset - reset the SCSI bus
2074 2056 * @dip:
2075 2057 * @level:
2076 2058 *
2077 2059 * The tran_bus_reset() vector in the scsi_hba_tran structure should be
2078 2060 * initialized during the HBA driver's attach(). The vector should point to
2079 2061 * an HBA entry point that is to be called when a user initiates a bus reset.
2080 2062 * Implementation is hardware specific. If the HBA driver cannot reset the
2081 2063 * SCSI bus without affecting the targets, the driver should fail RESET_BUS
2082 2064 * or not initialize this vector.
2083 2065 */
2084 2066 /*ARGSUSED*/
2085 2067 static int
2086 2068 mrsas_tran_bus_reset(dev_info_t *dip, int level)
2087 2069 {
2088 2070 int instance_no = ddi_get_instance(dip);
2089 2071
2090 2072 struct mrsas_instance *instance = ddi_get_soft_state(mrsas_state,
2091 2073 instance_no);
2092 2074
2093 2075 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2094 2076
2095 2077 if (wait_for_outstanding(instance)) {
2096 2078 con_log(CL_ANN1,
2097 2079 (CE_CONT, "wait_for_outstanding: return FAIL.\n"));
2098 2080 return (DDI_FAILURE);
2099 2081 } else {
2100 2082 return (DDI_SUCCESS);
2101 2083 }
2102 2084 }
2103 2085 #endif
2104 2086
2105 2087 /*
2106 2088 * tran_getcap - get one of a set of SCSA-defined capabilities
2107 2089 * @ap:
2108 2090 * @cap:
2109 2091 * @whom:
2110 2092 *
2111 2093 * The target driver can request the current setting of the capability for a
2112 2094 * particular target by setting the whom parameter to nonzero. A whom value of
2113 2095 * zero indicates a request for the current setting of the general capability
2114 2096 * for the SCSI bus or for adapter hardware. The tran_getcap() should return -1
2115 2097 * for undefined capabilities or the current value of the requested capability.
2116 2098 */
2117 2099 /*ARGSUSED*/
2118 2100 static int
2119 2101 mrsas_tran_getcap(struct scsi_address *ap, char *cap, int whom)
2120 2102 {
2121 2103 int rval = 0;
2122 2104
2123 2105 struct mrsas_instance *instance = ADDR2MR(ap);
2124 2106
2125 2107 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2126 2108
2127 2109 /* we do allow inquiring about capabilities for other targets */
2128 2110 if (cap == NULL) {
2129 2111 return (-1);
2130 2112 }
2131 2113
2132 2114 switch (scsi_hba_lookup_capstr(cap)) {
2133 2115 case SCSI_CAP_DMA_MAX:
2134 2116 if (instance->tbolt) {
2135 2117 /* Limit to 256k max transfer */
2136 2118 rval = mrsas_tbolt_max_cap_maxxfer;
2137 2119 } else {
2138 2120 /* Limit to 16MB max transfer */
2139 2121 rval = mrsas_max_cap_maxxfer;
2140 2122 }
2141 2123 break;
2142 2124 case SCSI_CAP_MSG_OUT:
2143 2125 rval = 1;
2144 2126 break;
2145 2127 case SCSI_CAP_DISCONNECT:
2146 2128 rval = 0;
2147 2129 break;
2148 2130 case SCSI_CAP_SYNCHRONOUS:
2149 2131 rval = 0;
2150 2132 break;
2151 2133 case SCSI_CAP_WIDE_XFER:
2152 2134 rval = 1;
2153 2135 break;
2154 2136 case SCSI_CAP_TAGGED_QING:
2155 2137 rval = 1;
2156 2138 break;
2157 2139 case SCSI_CAP_UNTAGGED_QING:
2158 2140 rval = 1;
2159 2141 break;
2160 2142 case SCSI_CAP_PARITY:
2161 2143 rval = 1;
2162 2144 break;
2163 2145 case SCSI_CAP_INITIATOR_ID:
2164 2146 rval = instance->init_id;
2165 2147 break;
2166 2148 case SCSI_CAP_ARQ:
2167 2149 rval = 1;
2168 2150 break;
2169 2151 case SCSI_CAP_LINKED_CMDS:
2170 2152 rval = 0;
2171 2153 break;
2172 2154 case SCSI_CAP_RESET_NOTIFICATION:
2173 2155 rval = 1;
2174 2156 break;
2175 2157 case SCSI_CAP_GEOMETRY:
2176 2158 rval = -1;
2177 2159
2178 2160 break;
2179 2161 default:
2180 2162 con_log(CL_DLEVEL2, (CE_NOTE, "Default cap coming 0x%x",
2181 2163 scsi_hba_lookup_capstr(cap)));
2182 2164 rval = -1;
2183 2165 break;
2184 2166 }
2185 2167
2186 2168 return (rval);
2187 2169 }
2188 2170
2189 2171 /*
2190 2172 * tran_setcap - set one of a set of SCSA-defined capabilities
2191 2173 * @ap:
2192 2174 * @cap:
2193 2175 * @value:
2194 2176 * @whom:
2195 2177 *
2196 2178 * The target driver might request that the new value be set for a particular
2197 2179 * target by setting the whom parameter to nonzero. A whom value of zero
2198 2180 * means that request is to set the new value for the SCSI bus or for adapter
2199 2181 * hardware in general.
2200 2182 * The tran_setcap() should return the following values as appropriate:
2201 2183 * - -1 for undefined capabilities
2202 2184 * - 0 if the HBA driver cannot set the capability to the requested value
2203 2185 * - 1 if the HBA driver is able to set the capability to the requested value
2204 2186 */
2205 2187 /*ARGSUSED*/
2206 2188 static int
2207 2189 mrsas_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
2208 2190 {
2209 2191 int rval = 1;
2210 2192
2211 2193 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2212 2194
2213 2195 /* We don't allow setting capabilities for other targets */
2214 2196 if (cap == NULL || whom == 0) {
2215 2197 return (-1);
2216 2198 }
2217 2199
2218 2200 switch (scsi_hba_lookup_capstr(cap)) {
2219 2201 case SCSI_CAP_DMA_MAX:
2220 2202 case SCSI_CAP_MSG_OUT:
2221 2203 case SCSI_CAP_PARITY:
2222 2204 case SCSI_CAP_LINKED_CMDS:
2223 2205 case SCSI_CAP_RESET_NOTIFICATION:
2224 2206 case SCSI_CAP_DISCONNECT:
2225 2207 case SCSI_CAP_SYNCHRONOUS:
2226 2208 case SCSI_CAP_UNTAGGED_QING:
2227 2209 case SCSI_CAP_WIDE_XFER:
2228 2210 case SCSI_CAP_INITIATOR_ID:
2229 2211 case SCSI_CAP_ARQ:
2230 2212 /*
2231 2213 * None of these are settable via
2232 2214 * the capability interface.
2233 2215 */
2234 2216 break;
2235 2217 case SCSI_CAP_TAGGED_QING:
2236 2218 rval = 1;
2237 2219 break;
2238 2220 case SCSI_CAP_SECTOR_SIZE:
2239 2221 rval = 1;
2240 2222 break;
2241 2223
2242 2224 case SCSI_CAP_TOTAL_SECTORS:
2243 2225 rval = 1;
2244 2226 break;
2245 2227 default:
2246 2228 rval = -1;
2247 2229 break;
2248 2230 }
2249 2231
2250 2232 return (rval);
2251 2233 }
2252 2234
2253 2235 /*
2254 2236 * tran_destroy_pkt - deallocate scsi_pkt structure
2255 2237 * @ap:
2256 2238 * @pkt:
2257 2239 *
2258 2240 * The tran_destroy_pkt() entry point is the HBA driver function that
2259 2241 * deallocates scsi_pkt structures. The tran_destroy_pkt() entry point is
2260 2242 * called when the target driver calls scsi_destroy_pkt(). The
2261 2243 * tran_destroy_pkt() entry point must free any DMA resources that have been
2262 2244 * allocated for the packet. An implicit DMA synchronization occurs if the
2263 2245 * DMA resources are freed and any cached data remains after the completion
2264 2246 * of the transfer.
2265 2247 */
2266 2248 static void
2267 2249 mrsas_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2268 2250 {
2269 2251 struct scsa_cmd *acmd = PKT2CMD(pkt);
2270 2252
2271 2253 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2272 2254
2273 2255 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2274 2256 acmd->cmd_flags &= ~CFLAG_DMAVALID;
2275 2257
2276 2258 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
2277 2259
2278 2260 ddi_dma_free_handle(&acmd->cmd_dmahandle);
2279 2261
2280 2262 acmd->cmd_dmahandle = NULL;
2281 2263 }
2282 2264
2283 2265 /* free the pkt */
2284 2266 scsi_hba_pkt_free(ap, pkt);
2285 2267 }
2286 2268
2287 2269 /*
2288 2270 * tran_dmafree - deallocates DMA resources
2289 2271 * @ap:
2290 2272 * @pkt:
2291 2273 *
2292 2274 * The tran_dmafree() entry point deallocates DMAQ resources that have been
2293 2275 * allocated for a scsi_pkt structure. The tran_dmafree() entry point is
2294 2276 * called when the target driver calls scsi_dmafree(). The tran_dmafree() must
2295 2277 * free only DMA resources allocated for a scsi_pkt structure, not the
2296 2278 * scsi_pkt itself. When DMA resources are freed, a DMA synchronization is
2297 2279 * implicitly performed.
2298 2280 */
2299 2281 /*ARGSUSED*/
2300 2282 static void
2301 2283 mrsas_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
2302 2284 {
2303 2285 register struct scsa_cmd *acmd = PKT2CMD(pkt);
2304 2286
2305 2287 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2306 2288
2307 2289 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2308 2290 acmd->cmd_flags &= ~CFLAG_DMAVALID;
2309 2291
2310 2292 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
2311 2293
2312 2294 ddi_dma_free_handle(&acmd->cmd_dmahandle);
2313 2295
2314 2296 acmd->cmd_dmahandle = NULL;
2315 2297 }
2316 2298 }
2317 2299
2318 2300 /*
2319 2301 * tran_sync_pkt - synchronize the DMA object allocated
2320 2302 * @ap:
2321 2303 * @pkt:
2322 2304 *
2323 2305 * The tran_sync_pkt() entry point synchronizes the DMA object allocated for
2324 2306 * the scsi_pkt structure before or after a DMA transfer. The tran_sync_pkt()
2325 2307 * entry point is called when the target driver calls scsi_sync_pkt(). If the
2326 2308 * data transfer direction is a DMA read from device to memory, tran_sync_pkt()
2327 2309 * must synchronize the CPU's view of the data. If the data transfer direction
2328 2310 * is a DMA write from memory to device, tran_sync_pkt() must synchronize the
2329 2311 * device's view of the data.
2330 2312 */
2331 2313 /*ARGSUSED*/
2332 2314 static void
2333 2315 mrsas_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2334 2316 {
2335 2317 register struct scsa_cmd *acmd = PKT2CMD(pkt);
2336 2318
2337 2319 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2338 2320
2339 2321 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2340 2322 (void) ddi_dma_sync(acmd->cmd_dmahandle, acmd->cmd_dma_offset,
2341 2323 acmd->cmd_dma_len, (acmd->cmd_flags & CFLAG_DMASEND) ?
2342 2324 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
2343 2325 }
2344 2326 }
2345 2327
2346 2328 /*ARGSUSED*/
2347 2329 static int
2348 2330 mrsas_tran_quiesce(dev_info_t *dip)
2349 2331 {
2350 2332 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2351 2333
2352 2334 return (1);
2353 2335 }
2354 2336
2355 2337 /*ARGSUSED*/
2356 2338 static int
2357 2339 mrsas_tran_unquiesce(dev_info_t *dip)
2358 2340 {
2359 2341 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2360 2342
2361 2343 return (1);
2362 2344 }
2363 2345
2364 2346
2365 2347 /*
2366 2348 * mrsas_isr(caddr_t)
2367 2349 *
2368 2350 * The Interrupt Service Routine
2369 2351 *
2370 2352 * Collect status for all completed commands and do callback
2371 2353 *
2372 2354 */
2373 2355 static uint_t
2374 2356 mrsas_isr(struct mrsas_instance *instance)
2375 2357 {
2376 2358 int need_softintr;
2377 2359 uint32_t producer;
2378 2360 uint32_t consumer;
2379 2361 uint32_t context;
2380 2362 int retval;
2381 2363
2382 2364 struct mrsas_cmd *cmd;
2383 2365 struct mrsas_header *hdr;
2384 2366 struct scsi_pkt *pkt;
2385 2367
2386 2368 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2387 2369 ASSERT(instance);
2388 2370 if (instance->tbolt) {
2389 2371 mutex_enter(&instance->chip_mtx);
2390 2372 if ((instance->intr_type == DDI_INTR_TYPE_FIXED) &&
2391 2373 !(instance->func_ptr->intr_ack(instance))) {
2392 2374 mutex_exit(&instance->chip_mtx);
2393 2375 return (DDI_INTR_UNCLAIMED);
2394 2376 }
2395 2377 retval = mr_sas_tbolt_process_outstanding_cmd(instance);
2396 2378 mutex_exit(&instance->chip_mtx);
2397 2379 return (retval);
2398 2380 } else {
2399 2381 if ((instance->intr_type == DDI_INTR_TYPE_FIXED) &&
2400 2382 !instance->func_ptr->intr_ack(instance)) {
2401 2383 return (DDI_INTR_UNCLAIMED);
2402 2384 }
2403 2385 }
2404 2386
2405 2387 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
2406 2388 0, 0, DDI_DMA_SYNC_FORCPU);
2407 2389
2408 2390 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
2409 2391 != DDI_SUCCESS) {
2410 2392 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2411 2393 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2412 2394 con_log(CL_ANN1, (CE_WARN,
2413 2395 "mr_sas_isr(): FMA check, returning DDI_INTR_UNCLAIMED"));
2414 2396 return (DDI_INTR_CLAIMED);
2415 2397 }
2416 2398 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2417 2399
2418 2400 #ifdef OCRDEBUG
2419 2401 if (debug_consecutive_timeout_after_ocr_g == 1) {
2420 2402 con_log(CL_ANN1, (CE_NOTE,
2421 2403 "simulating consecutive timeout after ocr"));
2422 2404 return (DDI_INTR_CLAIMED);
2423 2405 }
2424 2406 #endif
2425 2407
2426 2408 mutex_enter(&instance->completed_pool_mtx);
2427 2409 mutex_enter(&instance->cmd_pend_mtx);
2428 2410
2429 2411 producer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2430 2412 instance->producer);
2431 2413 consumer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2432 2414 instance->consumer);
2433 2415
2434 2416 con_log(CL_ANN, (CE_CONT, " producer %x consumer %x ",
2435 2417 producer, consumer));
2436 2418 if (producer == consumer) {
2437 2419 con_log(CL_ANN, (CE_WARN, "producer == consumer case"));
2438 2420 DTRACE_PROBE2(isr_pc_err, uint32_t, producer,
2439 2421 uint32_t, consumer);
2440 2422 mutex_exit(&instance->cmd_pend_mtx);
2441 2423 mutex_exit(&instance->completed_pool_mtx);
2442 2424 return (DDI_INTR_CLAIMED);
2443 2425 }
2444 2426
2445 2427 while (consumer != producer) {
2446 2428 context = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2447 2429 &instance->reply_queue[consumer]);
2448 2430 cmd = instance->cmd_list[context];
2449 2431
2450 2432 if (cmd->sync_cmd == MRSAS_TRUE) {
2451 2433 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2452 2434 if (hdr) {
2453 2435 mlist_del_init(&cmd->list);
2454 2436 }
2455 2437 } else {
2456 2438 pkt = cmd->pkt;
2457 2439 if (pkt) {
2458 2440 mlist_del_init(&cmd->list);
2459 2441 }
2460 2442 }
2461 2443
2462 2444 mlist_add_tail(&cmd->list, &instance->completed_pool_list);
2463 2445
2464 2446 consumer++;
2465 2447 if (consumer == (instance->max_fw_cmds + 1)) {
2466 2448 consumer = 0;
2467 2449 }
2468 2450 }
2469 2451 ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
2470 2452 instance->consumer, consumer);
2471 2453 mutex_exit(&instance->cmd_pend_mtx);
2472 2454 mutex_exit(&instance->completed_pool_mtx);
2473 2455
2474 2456 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
2475 2457 0, 0, DDI_DMA_SYNC_FORDEV);
2476 2458
2477 2459 if (instance->softint_running) {
2478 2460 need_softintr = 0;
2479 2461 } else {
2480 2462 need_softintr = 1;
2481 2463 }
2482 2464
2483 2465 if (instance->isr_level == HIGH_LEVEL_INTR) {
2484 2466 if (need_softintr) {
2485 2467 ddi_trigger_softintr(instance->soft_intr_id);
2486 2468 }
2487 2469 } else {
2488 2470 /*
2489 2471 * Not a high-level interrupt, therefore call the soft level
2490 2472 * interrupt explicitly
2491 2473 */
2492 2474 (void) mrsas_softintr(instance);
2493 2475 }
2494 2476
2495 2477 return (DDI_INTR_CLAIMED);
2496 2478 }
2497 2479
2498 2480
2499 2481 /*
2500 2482 * ************************************************************************** *
2501 2483 * *
2502 2484 * libraries *
2503 2485 * *
2504 2486 * ************************************************************************** *
2505 2487 */
2506 2488 /*
2507 2489 * get_mfi_pkt : Get a command from the free pool
2508 2490 * After successful allocation, the caller of this routine
2509 2491 * must clear the frame buffer (memset to zero) before
2510 2492 * using the packet further.
2511 2493 *
2512 2494 * ***** Note *****
2513 2495 * After clearing the frame buffer the context id of the
2514 2496 * frame buffer SHOULD be restored back.
2515 2497 */
2516 2498 static struct mrsas_cmd *
2517 2499 get_mfi_pkt(struct mrsas_instance *instance)
2518 2500 {
2519 2501 mlist_t *head = &instance->cmd_pool_list;
2520 2502 struct mrsas_cmd *cmd = NULL;
2521 2503
2522 2504 mutex_enter(&instance->cmd_pool_mtx);
2523 2505
2524 2506 if (!mlist_empty(head)) {
2525 2507 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2526 2508 mlist_del_init(head->next);
2527 2509 }
2528 2510 if (cmd != NULL) {
2529 2511 cmd->pkt = NULL;
2530 2512 cmd->retry_count_for_ocr = 0;
2531 2513 cmd->drv_pkt_time = 0;
2532 2514
2533 2515 }
2534 2516 mutex_exit(&instance->cmd_pool_mtx);
2535 2517
2536 2518 return (cmd);
2537 2519 }
2538 2520
2539 2521 static struct mrsas_cmd *
2540 2522 get_mfi_app_pkt(struct mrsas_instance *instance)
2541 2523 {
2542 2524 mlist_t *head = &instance->app_cmd_pool_list;
2543 2525 struct mrsas_cmd *cmd = NULL;
2544 2526
2545 2527 mutex_enter(&instance->app_cmd_pool_mtx);
2546 2528
2547 2529 if (!mlist_empty(head)) {
2548 2530 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2549 2531 mlist_del_init(head->next);
2550 2532 }
2551 2533 if (cmd != NULL) {
2552 2534 cmd->pkt = NULL;
2553 2535 cmd->retry_count_for_ocr = 0;
2554 2536 cmd->drv_pkt_time = 0;
2555 2537 }
2556 2538
2557 2539 mutex_exit(&instance->app_cmd_pool_mtx);
2558 2540
2559 2541 return (cmd);
2560 2542 }
2561 2543 /*
2562 2544 * return_mfi_pkt : Return a cmd to free command pool
2563 2545 */
2564 2546 static void
2565 2547 return_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2566 2548 {
2567 2549 mutex_enter(&instance->cmd_pool_mtx);
2568 2550 /* use mlist_add_tail for debug assistance */
2569 2551 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2570 2552
2571 2553 mutex_exit(&instance->cmd_pool_mtx);
2572 2554 }
2573 2555
2574 2556 static void
2575 2557 return_mfi_app_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2576 2558 {
2577 2559 mutex_enter(&instance->app_cmd_pool_mtx);
2578 2560
2579 2561 mlist_add(&cmd->list, &instance->app_cmd_pool_list);
2580 2562
2581 2563 mutex_exit(&instance->app_cmd_pool_mtx);
2582 2564 }
2583 2565 void
2584 2566 push_pending_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2585 2567 {
2586 2568 struct scsi_pkt *pkt;
2587 2569 struct mrsas_header *hdr;
2588 2570 con_log(CL_DLEVEL2, (CE_NOTE, "push_pending_pkt(): Called\n"));
2589 2571 mutex_enter(&instance->cmd_pend_mtx);
2590 2572 mlist_del_init(&cmd->list);
2591 2573 mlist_add_tail(&cmd->list, &instance->cmd_pend_list);
2592 2574 if (cmd->sync_cmd == MRSAS_TRUE) {
2593 2575 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2594 2576 if (hdr) {
2595 2577 con_log(CL_ANN1, (CE_CONT,
2596 2578 "push_pending_mfi_pkt: "
2597 2579 "cmd %p index %x "
2598 2580 "time %llx",
2599 2581 (void *)cmd, cmd->index,
2600 2582 gethrtime()));
2601 2583 /* Wait for specified interval */
2602 2584 cmd->drv_pkt_time = ddi_get16(
2603 2585 cmd->frame_dma_obj.acc_handle, &hdr->timeout);
2604 2586 if (cmd->drv_pkt_time < debug_timeout_g)
2605 2587 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2606 2588 con_log(CL_ANN1, (CE_CONT,
2607 2589 "push_pending_pkt(): "
2608 2590 "Called IO Timeout Value %x\n",
2609 2591 cmd->drv_pkt_time));
2610 2592 }
2611 2593 if (hdr && instance->timeout_id == (timeout_id_t)-1) {
2612 2594 instance->timeout_id = timeout(io_timeout_checker,
2613 2595 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
2614 2596 }
2615 2597 } else {
2616 2598 pkt = cmd->pkt;
2617 2599 if (pkt) {
2618 2600 con_log(CL_ANN1, (CE_CONT,
2619 2601 "push_pending_mfi_pkt: "
2620 2602 "cmd %p index %x pkt %p, "
2621 2603 "time %llx",
2622 2604 (void *)cmd, cmd->index, (void *)pkt,
2623 2605 gethrtime()));
2624 2606 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2625 2607 }
2626 2608 if (pkt && instance->timeout_id == (timeout_id_t)-1) {
2627 2609 instance->timeout_id = timeout(io_timeout_checker,
2628 2610 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
2629 2611 }
2630 2612 }
2631 2613
2632 2614 mutex_exit(&instance->cmd_pend_mtx);
2633 2615
2634 2616 }
2635 2617
2636 2618 int
2637 2619 mrsas_print_pending_cmds(struct mrsas_instance *instance)
2638 2620 {
2639 2621 mlist_t *head = &instance->cmd_pend_list;
2640 2622 mlist_t *tmp = head;
2641 2623 struct mrsas_cmd *cmd = NULL;
2642 2624 struct mrsas_header *hdr;
2643 2625 unsigned int flag = 1;
2644 2626 struct scsi_pkt *pkt;
2645 2627 int saved_level;
2646 2628 int cmd_count = 0;
2647 2629
2648 2630 saved_level = debug_level_g;
2649 2631 debug_level_g = CL_ANN1;
2650 2632
2651 2633 cmn_err(CE_NOTE, "mrsas_print_pending_cmds(): Called\n");
2652 2634
2653 2635 while (flag) {
2654 2636 mutex_enter(&instance->cmd_pend_mtx);
2655 2637 tmp = tmp->next;
2656 2638 if (tmp == head) {
2657 2639 mutex_exit(&instance->cmd_pend_mtx);
2658 2640 flag = 0;
2659 2641 con_log(CL_ANN1, (CE_CONT, "mrsas_print_pending_cmds():"
2660 2642 " NO MORE CMDS PENDING....\n"));
2661 2643 break;
2662 2644 } else {
2663 2645 cmd = mlist_entry(tmp, struct mrsas_cmd, list);
2664 2646 mutex_exit(&instance->cmd_pend_mtx);
2665 2647 if (cmd) {
2666 2648 if (cmd->sync_cmd == MRSAS_TRUE) {
2667 2649 hdr = (struct mrsas_header *)
2668 2650 &cmd->frame->hdr;
2669 2651 if (hdr) {
2670 2652 con_log(CL_ANN1, (CE_CONT,
2671 2653 "print: cmd %p index 0x%x "
2672 2654 "drv_pkt_time 0x%x (NO-PKT)"
2673 2655 " hdr %p\n", (void *)cmd,
2674 2656 cmd->index,
2675 2657 cmd->drv_pkt_time,
2676 2658 (void *)hdr));
2677 2659 }
2678 2660 } else {
2679 2661 pkt = cmd->pkt;
2680 2662 if (pkt) {
2681 2663 con_log(CL_ANN1, (CE_CONT,
2682 2664 "print: cmd %p index 0x%x "
2683 2665 "drv_pkt_time 0x%x pkt %p \n",
2684 2666 (void *)cmd, cmd->index,
2685 2667 cmd->drv_pkt_time, (void *)pkt));
2686 2668 }
2687 2669 }
2688 2670
2689 2671 if (++cmd_count == 1) {
2690 2672 mrsas_print_cmd_details(instance, cmd,
2691 2673 0xDD);
2692 2674 } else {
2693 2675 mrsas_print_cmd_details(instance, cmd,
2694 2676 1);
2695 2677 }
2696 2678
2697 2679 }
2698 2680 }
2699 2681 }
2700 2682 con_log(CL_ANN1, (CE_CONT, "mrsas_print_pending_cmds(): Done\n"));
2701 2683
2702 2684
2703 2685 debug_level_g = saved_level;
2704 2686
2705 2687 return (DDI_SUCCESS);
2706 2688 }
2707 2689
2708 2690
2709 2691 int
2710 2692 mrsas_complete_pending_cmds(struct mrsas_instance *instance)
2711 2693 {
2712 2694
2713 2695 struct mrsas_cmd *cmd = NULL;
2714 2696 struct scsi_pkt *pkt;
2715 2697 struct mrsas_header *hdr;
2716 2698
2717 2699 struct mlist_head *pos, *next;
2718 2700
2719 2701 con_log(CL_ANN1, (CE_NOTE,
2720 2702 "mrsas_complete_pending_cmds(): Called"));
2721 2703
2722 2704 mutex_enter(&instance->cmd_pend_mtx);
2723 2705 mlist_for_each_safe(pos, next, &instance->cmd_pend_list) {
2724 2706 cmd = mlist_entry(pos, struct mrsas_cmd, list);
2725 2707 if (cmd) {
2726 2708 pkt = cmd->pkt;
2727 2709 if (pkt) { /* for IO */
2728 2710 if (((pkt->pkt_flags & FLAG_NOINTR)
2729 2711 == 0) && pkt->pkt_comp) {
2730 2712 pkt->pkt_reason
2731 2713 = CMD_DEV_GONE;
2732 2714 pkt->pkt_statistics
2733 2715 = STAT_DISCON;
2734 2716 con_log(CL_ANN1, (CE_CONT,
2735 2717 "fail and posting to scsa "
2736 2718 "cmd %p index %x"
2737 2719 " pkt %p "
2738 2720 "time : %llx",
2739 2721 (void *)cmd, cmd->index,
2740 2722 (void *)pkt, gethrtime()));
2741 2723 (*pkt->pkt_comp)(pkt);
2742 2724 }
2743 2725 } else { /* for DCMDS */
2744 2726 if (cmd->sync_cmd == MRSAS_TRUE) {
2745 2727 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2746 2728 con_log(CL_ANN1, (CE_CONT,
2747 2729 "posting invalid status to application "
2748 2730 "cmd %p index %x"
2749 2731 " hdr %p "
2750 2732 "time : %llx",
2751 2733 (void *)cmd, cmd->index,
2752 2734 (void *)hdr, gethrtime()));
2753 2735 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2754 2736 complete_cmd_in_sync_mode(instance, cmd);
2755 2737 }
2756 2738 }
2757 2739 mlist_del_init(&cmd->list);
2758 2740 } else {
2759 2741 con_log(CL_ANN1, (CE_CONT,
2760 2742 "mrsas_complete_pending_cmds:"
2761 2743 "NULL command\n"));
2762 2744 }
2763 2745 con_log(CL_ANN1, (CE_CONT,
2764 2746 "mrsas_complete_pending_cmds:"
2765 2747 "looping for more commands\n"));
2766 2748 }
2767 2749 mutex_exit(&instance->cmd_pend_mtx);
2768 2750
2769 2751 con_log(CL_ANN1, (CE_CONT, "mrsas_complete_pending_cmds(): DONE\n"));
2770 2752 return (DDI_SUCCESS);
2771 2753 }
2772 2754
2773 2755 void
2774 2756 mrsas_print_cmd_details(struct mrsas_instance *instance, struct mrsas_cmd *cmd,
2775 2757 int detail)
2776 2758 {
2777 2759 struct scsi_pkt *pkt = cmd->pkt;
2778 2760 Mpi2RaidSCSIIORequest_t *scsi_io = cmd->scsi_io_request;
2779 2761 int i;
2780 2762 int saved_level;
2781 2763 ddi_acc_handle_t acc_handle =
2782 2764 instance->mpi2_frame_pool_dma_obj.acc_handle;
2783 2765
2784 2766 if (detail == 0xDD) {
2785 2767 saved_level = debug_level_g;
2786 2768 debug_level_g = CL_ANN1;
2787 2769 }
2788 2770
2789 2771
2790 2772 if (instance->tbolt) {
2791 2773 con_log(CL_ANN1, (CE_CONT, "print_cmd_details: cmd %p "
2792 2774 "cmd->index 0x%x SMID 0x%x timer 0x%x sec\n",
2793 2775 (void *)cmd, cmd->index, cmd->SMID, cmd->drv_pkt_time));
2794 2776 } else {
2795 2777 con_log(CL_ANN1, (CE_CONT, "print_cmd_details: cmd %p "
2796 2778 "cmd->index 0x%x timer 0x%x sec\n",
2797 2779 (void *)cmd, cmd->index, cmd->drv_pkt_time));
2798 2780 }
2799 2781
2800 2782 if (pkt) {
2801 2783 con_log(CL_ANN1, (CE_CONT, "scsi_pkt CDB[0]=0x%x",
2802 2784 pkt->pkt_cdbp[0]));
2803 2785 } else {
2804 2786 con_log(CL_ANN1, (CE_CONT, "NO-PKT"));
2805 2787 }
2806 2788
2807 2789 if ((detail == 0xDD) && instance->tbolt) {
2808 2790 con_log(CL_ANN1, (CE_CONT, "RAID_SCSI_IO_REQUEST\n"));
2809 2791 con_log(CL_ANN1, (CE_CONT, "DevHandle=0x%X Function=0x%X "
2810 2792 "IoFlags=0x%X SGLFlags=0x%X DataLength=0x%X\n",
2811 2793 ddi_get16(acc_handle, &scsi_io->DevHandle),
2812 2794 ddi_get8(acc_handle, &scsi_io->Function),
2813 2795 ddi_get16(acc_handle, &scsi_io->IoFlags),
2814 2796 ddi_get16(acc_handle, &scsi_io->SGLFlags),
2815 2797 ddi_get32(acc_handle, &scsi_io->DataLength)));
2816 2798
2817 2799 for (i = 0; i < 32; i++) {
2818 2800 con_log(CL_ANN1, (CE_CONT, "CDB[%d]=0x%x ", i,
2819 2801 ddi_get8(acc_handle, &scsi_io->CDB.CDB32[i])));
2820 2802 }
2821 2803
2822 2804 con_log(CL_ANN1, (CE_CONT, "RAID-CONTEXT\n"));
2823 2805 con_log(CL_ANN1, (CE_CONT, "status=0x%X extStatus=0x%X "
2824 2806 "ldTargetId=0x%X timeoutValue=0x%X regLockFlags=0x%X "
2825 2807 "RAIDFlags=0x%X regLockRowLBA=0x%" PRIu64
2826 2808 " regLockLength=0x%X spanArm=0x%X\n",
2827 2809 ddi_get8(acc_handle, &scsi_io->RaidContext.status),
2828 2810 ddi_get8(acc_handle, &scsi_io->RaidContext.extStatus),
2829 2811 ddi_get16(acc_handle, &scsi_io->RaidContext.ldTargetId),
2830 2812 ddi_get16(acc_handle, &scsi_io->RaidContext.timeoutValue),
2831 2813 ddi_get8(acc_handle, &scsi_io->RaidContext.regLockFlags),
2832 2814 ddi_get8(acc_handle, &scsi_io->RaidContext.RAIDFlags),
2833 2815 ddi_get64(acc_handle, &scsi_io->RaidContext.regLockRowLBA),
2834 2816 ddi_get32(acc_handle, &scsi_io->RaidContext.regLockLength),
2835 2817 ddi_get8(acc_handle, &scsi_io->RaidContext.spanArm)));
2836 2818 }
2837 2819
2838 2820 if (detail == 0xDD) {
2839 2821 debug_level_g = saved_level;
2840 2822 }
2841 2823 }
2842 2824
2843 2825
2844 2826 int
2845 2827 mrsas_issue_pending_cmds(struct mrsas_instance *instance)
2846 2828 {
2847 2829 mlist_t *head = &instance->cmd_pend_list;
2848 2830 mlist_t *tmp = head->next;
2849 2831 struct mrsas_cmd *cmd = NULL;
2850 2832 struct scsi_pkt *pkt;
2851 2833
2852 2834 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_pending_cmds(): Called"));
2853 2835 while (tmp != head) {
2854 2836 mutex_enter(&instance->cmd_pend_mtx);
2855 2837 cmd = mlist_entry(tmp, struct mrsas_cmd, list);
2856 2838 tmp = tmp->next;
2857 2839 mutex_exit(&instance->cmd_pend_mtx);
2858 2840 if (cmd) {
2859 2841 con_log(CL_ANN1, (CE_CONT,
2860 2842 "mrsas_issue_pending_cmds(): "
2861 2843 "Got a cmd: cmd %p index 0x%x drv_pkt_time 0x%x ",
2862 2844 (void *)cmd, cmd->index, cmd->drv_pkt_time));
2863 2845
2864 2846 /* Reset command timeout value */
2865 2847 if (cmd->drv_pkt_time < debug_timeout_g)
2866 2848 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2867 2849
2868 2850 cmd->retry_count_for_ocr++;
2869 2851
2870 2852 cmn_err(CE_CONT, "cmd retry count = %d\n",
2871 2853 cmd->retry_count_for_ocr);
2872 2854
2873 2855 if (cmd->retry_count_for_ocr > IO_RETRY_COUNT) {
2874 2856 cmn_err(CE_WARN, "mrsas_issue_pending_cmds(): "
2875 2857 "cmd->retry_count exceeded limit >%d\n",
2876 2858 IO_RETRY_COUNT);
2877 2859 mrsas_print_cmd_details(instance, cmd, 0xDD);
2878 2860
2879 2861 cmn_err(CE_WARN,
2880 2862 "mrsas_issue_pending_cmds():"
2881 2863 "Calling KILL Adapter\n");
2882 2864 if (instance->tbolt)
2883 2865 mrsas_tbolt_kill_adapter(instance);
2884 2866 else
2885 2867 (void) mrsas_kill_adapter(instance);
2886 2868 return (DDI_FAILURE);
2887 2869 }
2888 2870
2889 2871 pkt = cmd->pkt;
2890 2872 if (pkt) {
2891 2873 con_log(CL_ANN1, (CE_CONT,
2892 2874 "PENDING PKT-CMD ISSUE: cmd %p index %x "
2893 2875 "pkt %p time %llx",
2894 2876 (void *)cmd, cmd->index,
2895 2877 (void *)pkt,
2896 2878 gethrtime()));
2897 2879
2898 2880 } else {
2899 2881 cmn_err(CE_CONT,
2900 2882 "mrsas_issue_pending_cmds(): NO-PKT, "
2901 2883 "cmd %p index 0x%x drv_pkt_time 0x%x ",
2902 2884 (void *)cmd, cmd->index, cmd->drv_pkt_time);
2903 2885 }
2904 2886
2905 2887
2906 2888 if (cmd->sync_cmd == MRSAS_TRUE) {
2907 2889 cmn_err(CE_CONT, "mrsas_issue_pending_cmds(): "
2908 2890 "SYNC_CMD == TRUE \n");
2909 2891 instance->func_ptr->issue_cmd_in_sync_mode(
2910 2892 instance, cmd);
2911 2893 } else {
2912 2894 instance->func_ptr->issue_cmd(cmd, instance);
2913 2895 }
2914 2896 } else {
2915 2897 con_log(CL_ANN1, (CE_CONT,
2916 2898 "mrsas_issue_pending_cmds: NULL command\n"));
2917 2899 }
2918 2900 con_log(CL_ANN1, (CE_CONT,
2919 2901 "mrsas_issue_pending_cmds:"
2920 2902 "looping for more commands"));
2921 2903 }
2922 2904 con_log(CL_ANN1, (CE_CONT, "mrsas_issue_pending_cmds(): DONE\n"));
2923 2905 return (DDI_SUCCESS);
2924 2906 }
2925 2907
2926 2908
2927 2909
2928 2910 /*
2929 2911 * destroy_mfi_frame_pool
2930 2912 */
2931 2913 void
2932 2914 destroy_mfi_frame_pool(struct mrsas_instance *instance)
2933 2915 {
2934 2916 int i;
2935 2917 uint32_t max_cmd = instance->max_fw_cmds;
2936 2918
2937 2919 struct mrsas_cmd *cmd;
2938 2920
2939 2921 /* return all frames to pool */
2940 2922
2941 2923 for (i = 0; i < max_cmd; i++) {
2942 2924
2943 2925 cmd = instance->cmd_list[i];
2944 2926
2945 2927 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED)
2946 2928 (void) mrsas_free_dma_obj(instance, cmd->frame_dma_obj);
2947 2929
2948 2930 cmd->frame_dma_obj_status = DMA_OBJ_FREED;
2949 2931 }
2950 2932
2951 2933 }
2952 2934
2953 2935 /*
2954 2936 * create_mfi_frame_pool
2955 2937 */
2956 2938 int
2957 2939 create_mfi_frame_pool(struct mrsas_instance *instance)
2958 2940 {
2959 2941 int i = 0;
2960 2942 int cookie_cnt;
2961 2943 uint16_t max_cmd;
2962 2944 uint16_t sge_sz;
2963 2945 uint32_t sgl_sz;
2964 2946 uint32_t tot_frame_size;
2965 2947 struct mrsas_cmd *cmd;
2966 2948 int retval = DDI_SUCCESS;
2967 2949
2968 2950 max_cmd = instance->max_fw_cmds;
2969 2951 sge_sz = sizeof (struct mrsas_sge_ieee);
2970 2952 /* calculated the number of 64byte frames required for SGL */
2971 2953 sgl_sz = sge_sz * instance->max_num_sge;
2972 2954 tot_frame_size = sgl_sz + MRMFI_FRAME_SIZE + SENSE_LENGTH;
2973 2955
2974 2956 con_log(CL_DLEVEL3, (CE_NOTE, "create_mfi_frame_pool: "
2975 2957 "sgl_sz %x tot_frame_size %x", sgl_sz, tot_frame_size));
2976 2958
2977 2959 while (i < max_cmd) {
2978 2960 cmd = instance->cmd_list[i];
2979 2961
2980 2962 cmd->frame_dma_obj.size = tot_frame_size;
2981 2963 cmd->frame_dma_obj.dma_attr = mrsas_generic_dma_attr;
2982 2964 cmd->frame_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
2983 2965 cmd->frame_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
2984 2966 cmd->frame_dma_obj.dma_attr.dma_attr_sgllen = 1;
2985 2967 cmd->frame_dma_obj.dma_attr.dma_attr_align = 64;
2986 2968
2987 2969 cookie_cnt = mrsas_alloc_dma_obj(instance, &cmd->frame_dma_obj,
2988 2970 (uchar_t)DDI_STRUCTURE_LE_ACC);
2989 2971
2990 2972 if (cookie_cnt == -1 || cookie_cnt > 1) {
2991 2973 cmn_err(CE_WARN,
2992 2974 "create_mfi_frame_pool: could not alloc.");
2993 2975 retval = DDI_FAILURE;
2994 2976 goto mrsas_undo_frame_pool;
2995 2977 }
2996 2978
2997 2979 bzero(cmd->frame_dma_obj.buffer, tot_frame_size);
2998 2980
2999 2981 cmd->frame_dma_obj_status = DMA_OBJ_ALLOCATED;
3000 2982 cmd->frame = (union mrsas_frame *)cmd->frame_dma_obj.buffer;
3001 2983 cmd->frame_phys_addr =
3002 2984 cmd->frame_dma_obj.dma_cookie[0].dmac_address;
3003 2985
3004 2986 cmd->sense = (uint8_t *)(((unsigned long)
3005 2987 cmd->frame_dma_obj.buffer) +
3006 2988 tot_frame_size - SENSE_LENGTH);
3007 2989 cmd->sense_phys_addr =
3008 2990 cmd->frame_dma_obj.dma_cookie[0].dmac_address +
3009 2991 tot_frame_size - SENSE_LENGTH;
3010 2992
3011 2993 if (!cmd->frame || !cmd->sense) {
3012 2994 cmn_err(CE_WARN,
3013 2995 "mr_sas: pci_pool_alloc failed");
3014 2996 retval = ENOMEM;
3015 2997 goto mrsas_undo_frame_pool;
3016 2998 }
3017 2999
3018 3000 ddi_put32(cmd->frame_dma_obj.acc_handle,
3019 3001 &cmd->frame->io.context, cmd->index);
3020 3002 i++;
3021 3003
3022 3004 con_log(CL_DLEVEL3, (CE_NOTE, "[%x]-%x",
3023 3005 cmd->index, cmd->frame_phys_addr));
3024 3006 }
3025 3007
3026 3008 return (DDI_SUCCESS);
3027 3009
3028 3010 mrsas_undo_frame_pool:
3029 3011 if (i > 0)
3030 3012 destroy_mfi_frame_pool(instance);
3031 3013
3032 3014 return (retval);
3033 3015 }
3034 3016
3035 3017 /*
3036 3018 * free_additional_dma_buffer
3037 3019 */
3038 3020 static void
3039 3021 free_additional_dma_buffer(struct mrsas_instance *instance)
3040 3022 {
3041 3023 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
3042 3024 (void) mrsas_free_dma_obj(instance,
3043 3025 instance->mfi_internal_dma_obj);
3044 3026 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
3045 3027 }
3046 3028
3047 3029 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
3048 3030 (void) mrsas_free_dma_obj(instance,
3049 3031 instance->mfi_evt_detail_obj);
3050 3032 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
3051 3033 }
3052 3034 }
3053 3035
3054 3036 /*
3055 3037 * alloc_additional_dma_buffer
3056 3038 */
3057 3039 static int
3058 3040 alloc_additional_dma_buffer(struct mrsas_instance *instance)
3059 3041 {
3060 3042 uint32_t reply_q_sz;
3061 3043 uint32_t internal_buf_size = PAGESIZE*2;
3062 3044
3063 3045 /* max cmds plus 1 + producer & consumer */
3064 3046 reply_q_sz = sizeof (uint32_t) * (instance->max_fw_cmds + 1 + 2);
3065 3047
3066 3048 instance->mfi_internal_dma_obj.size = internal_buf_size;
3067 3049 instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
3068 3050 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3069 3051 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
3070 3052 0xFFFFFFFFU;
3071 3053 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
3072 3054
3073 3055 if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
3074 3056 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3075 3057 cmn_err(CE_WARN,
3076 3058 "mr_sas: could not alloc reply queue");
3077 3059 return (DDI_FAILURE);
3078 3060 }
3079 3061
3080 3062 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
3081 3063
3082 3064 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
3083 3065
3084 3066 instance->producer = (uint32_t *)((unsigned long)
3085 3067 instance->mfi_internal_dma_obj.buffer);
3086 3068 instance->consumer = (uint32_t *)((unsigned long)
3087 3069 instance->mfi_internal_dma_obj.buffer + 4);
3088 3070 instance->reply_queue = (uint32_t *)((unsigned long)
3089 3071 instance->mfi_internal_dma_obj.buffer + 8);
3090 3072 instance->internal_buf = (caddr_t)(((unsigned long)
3091 3073 instance->mfi_internal_dma_obj.buffer) + reply_q_sz + 8);
3092 3074 instance->internal_buf_dmac_add =
3093 3075 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address +
3094 3076 (reply_q_sz + 8);
3095 3077 instance->internal_buf_size = internal_buf_size -
3096 3078 (reply_q_sz + 8);
3097 3079
3098 3080 /* allocate evt_detail */
3099 3081 instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
3100 3082 instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
3101 3083 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3102 3084 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3103 3085 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
3104 3086 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 1;
3105 3087
3106 3088 if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
3107 3089 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3108 3090 cmn_err(CE_WARN, "alloc_additional_dma_buffer: "
3109 3091 "could not allocate data transfer buffer.");
3110 3092 goto mrsas_undo_internal_buff;
3111 3093 }
3112 3094
3113 3095 bzero(instance->mfi_evt_detail_obj.buffer,
3114 3096 sizeof (struct mrsas_evt_detail));
3115 3097
3116 3098 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
3117 3099
3118 3100 return (DDI_SUCCESS);
3119 3101
3120 3102 mrsas_undo_internal_buff:
3121 3103 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
3122 3104 (void) mrsas_free_dma_obj(instance,
3123 3105 instance->mfi_internal_dma_obj);
3124 3106 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
3125 3107 }
3126 3108
3127 3109 return (DDI_FAILURE);
3128 3110 }
3129 3111
3130 3112
3131 3113 void
3132 3114 mrsas_free_cmd_pool(struct mrsas_instance *instance)
3133 3115 {
3134 3116 int i;
3135 3117 uint32_t max_cmd;
3136 3118 size_t sz;
3137 3119
3138 3120 /* already freed */
3139 3121 if (instance->cmd_list == NULL) {
3140 3122 return;
3141 3123 }
3142 3124
3143 3125 max_cmd = instance->max_fw_cmds;
3144 3126
3145 3127 /* size of cmd_list array */
3146 3128 sz = sizeof (struct mrsas_cmd *) * max_cmd;
3147 3129
3148 3130 /* First free each cmd */
3149 3131 for (i = 0; i < max_cmd; i++) {
3150 3132 if (instance->cmd_list[i] != NULL) {
3151 3133 kmem_free(instance->cmd_list[i],
3152 3134 sizeof (struct mrsas_cmd));
3153 3135 }
3154 3136
3155 3137 instance->cmd_list[i] = NULL;
3156 3138 }
3157 3139
3158 3140 /* Now, free cmd_list array */
3159 3141 if (instance->cmd_list != NULL)
3160 3142 kmem_free(instance->cmd_list, sz);
3161 3143
3162 3144 instance->cmd_list = NULL;
3163 3145
3164 3146 INIT_LIST_HEAD(&instance->cmd_pool_list);
3165 3147 INIT_LIST_HEAD(&instance->cmd_pend_list);
3166 3148 if (instance->tbolt) {
3167 3149 INIT_LIST_HEAD(&instance->cmd_app_pool_list);
3168 3150 } else {
3169 3151 INIT_LIST_HEAD(&instance->app_cmd_pool_list);
3170 3152 }
3171 3153
3172 3154 }
3173 3155
3174 3156
3175 3157 /*
3176 3158 * mrsas_alloc_cmd_pool
3177 3159 */
3178 3160 int
3179 3161 mrsas_alloc_cmd_pool(struct mrsas_instance *instance)
3180 3162 {
3181 3163 int i;
3182 3164 int count;
3183 3165 uint32_t max_cmd;
3184 3166 uint32_t reserve_cmd;
3185 3167 size_t sz;
3186 3168
3187 3169 struct mrsas_cmd *cmd;
3188 3170
3189 3171 max_cmd = instance->max_fw_cmds;
3190 3172 con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
3191 3173 "max_cmd %x", max_cmd));
|
↓ open down ↓ |
1699 lines elided |
↑ open up ↑ |
3192 3174
3193 3175
3194 3176 sz = sizeof (struct mrsas_cmd *) * max_cmd;
3195 3177
3196 3178 /*
3197 3179 * instance->cmd_list is an array of struct mrsas_cmd pointers.
3198 3180 * Allocate the dynamic array first and then allocate individual
3199 3181 * commands.
3200 3182 */
3201 3183 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
3202 - if (instance->cmd_list == NULL) {
3203 - con_log(CL_NONE, (CE_WARN,
3204 - "Failed to allocate memory for cmd_list"));
3205 - return (DDI_FAILURE);
3206 - }
3184 + ASSERT(instance->cmd_list);
3207 3185
3208 3186 /* create a frame pool and assign one frame to each cmd */
3209 3187 for (count = 0; count < max_cmd; count++) {
3210 3188 instance->cmd_list[count] =
3211 3189 kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP);
3212 - if (instance->cmd_list[count] == NULL) {
3213 - con_log(CL_NONE, (CE_WARN,
3214 - "Failed to allocate memory for mrsas_cmd"));
3215 - goto mrsas_undo_cmds;
3216 - }
3190 + ASSERT(instance->cmd_list[count]);
3217 3191 }
3218 3192
3219 3193 /* add all the commands to command pool */
3220 3194
3221 3195 INIT_LIST_HEAD(&instance->cmd_pool_list);
3222 3196 INIT_LIST_HEAD(&instance->cmd_pend_list);
3223 3197 INIT_LIST_HEAD(&instance->app_cmd_pool_list);
3224 3198
3225 3199 reserve_cmd = MRSAS_APP_RESERVED_CMDS;
3226 3200
3227 3201 for (i = 0; i < reserve_cmd; i++) {
3228 3202 cmd = instance->cmd_list[i];
3229 3203 cmd->index = i;
3230 3204 mlist_add_tail(&cmd->list, &instance->app_cmd_pool_list);
3231 3205 }
3232 3206
3233 3207
3234 3208 for (i = reserve_cmd; i < max_cmd; i++) {
3235 3209 cmd = instance->cmd_list[i];
3236 3210 cmd->index = i;
3237 3211 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
3238 3212 }
3239 3213
3240 3214 return (DDI_SUCCESS);
3241 3215
3242 3216 mrsas_undo_cmds:
3243 3217 if (count > 0) {
3244 3218 /* free each cmd */
3245 3219 for (i = 0; i < count; i++) {
3246 3220 if (instance->cmd_list[i] != NULL) {
3247 3221 kmem_free(instance->cmd_list[i],
3248 3222 sizeof (struct mrsas_cmd));
3249 3223 }
3250 3224 instance->cmd_list[i] = NULL;
3251 3225 }
3252 3226 }
3253 3227
3254 3228 mrsas_undo_cmd_list:
3255 3229 if (instance->cmd_list != NULL)
3256 3230 kmem_free(instance->cmd_list, sz);
3257 3231 instance->cmd_list = NULL;
3258 3232
3259 3233 return (DDI_FAILURE);
3260 3234 }
3261 3235
3262 3236
3263 3237 /*
3264 3238 * free_space_for_mfi
3265 3239 */
3266 3240 static void
3267 3241 free_space_for_mfi(struct mrsas_instance *instance)
3268 3242 {
3269 3243
3270 3244 /* already freed */
3271 3245 if (instance->cmd_list == NULL) {
3272 3246 return;
3273 3247 }
3274 3248
3275 3249 /* Free additional dma buffer */
3276 3250 free_additional_dma_buffer(instance);
3277 3251
3278 3252 /* Free the MFI frame pool */
3279 3253 destroy_mfi_frame_pool(instance);
3280 3254
3281 3255 /* Free all the commands in the cmd_list */
3282 3256 /* Free the cmd_list buffer itself */
3283 3257 mrsas_free_cmd_pool(instance);
3284 3258 }
3285 3259
3286 3260 /*
3287 3261 * alloc_space_for_mfi
3288 3262 */
3289 3263 static int
3290 3264 alloc_space_for_mfi(struct mrsas_instance *instance)
3291 3265 {
3292 3266 /* Allocate command pool (memory for cmd_list & individual commands) */
3293 3267 if (mrsas_alloc_cmd_pool(instance)) {
3294 3268 cmn_err(CE_WARN, "error creating cmd pool");
3295 3269 return (DDI_FAILURE);
3296 3270 }
3297 3271
3298 3272 /* Allocate MFI Frame pool */
3299 3273 if (create_mfi_frame_pool(instance)) {
3300 3274 cmn_err(CE_WARN, "error creating frame DMA pool");
3301 3275 goto mfi_undo_cmd_pool;
3302 3276 }
3303 3277
3304 3278 /* Allocate additional DMA buffer */
3305 3279 if (alloc_additional_dma_buffer(instance)) {
3306 3280 cmn_err(CE_WARN, "error creating frame DMA pool");
3307 3281 goto mfi_undo_frame_pool;
3308 3282 }
3309 3283
3310 3284 return (DDI_SUCCESS);
3311 3285
3312 3286 mfi_undo_frame_pool:
3313 3287 destroy_mfi_frame_pool(instance);
3314 3288
3315 3289 mfi_undo_cmd_pool:
3316 3290 mrsas_free_cmd_pool(instance);
3317 3291
3318 3292 return (DDI_FAILURE);
3319 3293 }
3320 3294
3321 3295
3322 3296
3323 3297 /*
3324 3298 * get_ctrl_info
3325 3299 */
3326 3300 static int
3327 3301 get_ctrl_info(struct mrsas_instance *instance,
3328 3302 struct mrsas_ctrl_info *ctrl_info)
3329 3303 {
3330 3304 int ret = 0;
3331 3305
3332 3306 struct mrsas_cmd *cmd;
3333 3307 struct mrsas_dcmd_frame *dcmd;
3334 3308 struct mrsas_ctrl_info *ci;
3335 3309
3336 3310 if (instance->tbolt) {
3337 3311 cmd = get_raid_msg_mfi_pkt(instance);
3338 3312 } else {
3339 3313 cmd = get_mfi_pkt(instance);
3340 3314 }
3341 3315
3342 3316 if (!cmd) {
3343 3317 con_log(CL_ANN, (CE_WARN,
3344 3318 "Failed to get a cmd for ctrl info"));
3345 3319 DTRACE_PROBE2(info_mfi_err, uint16_t, instance->fw_outstanding,
3346 3320 uint16_t, instance->max_fw_cmds);
3347 3321 return (DDI_FAILURE);
3348 3322 }
3349 3323
3350 3324 /* Clear the frame buffer and assign back the context id */
3351 3325 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3352 3326 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3353 3327 cmd->index);
3354 3328
3355 3329 dcmd = &cmd->frame->dcmd;
3356 3330
3357 3331 ci = (struct mrsas_ctrl_info *)instance->internal_buf;
3358 3332
3359 3333 if (!ci) {
3360 3334 cmn_err(CE_WARN,
3361 3335 "Failed to alloc mem for ctrl info");
3362 3336 return_mfi_pkt(instance, cmd);
3363 3337 return (DDI_FAILURE);
3364 3338 }
3365 3339
3366 3340 (void) memset(ci, 0, sizeof (struct mrsas_ctrl_info));
3367 3341
3368 3342 /* for( i = 0; i < DCMD_MBOX_SZ; i++ ) dcmd->mbox.b[i] = 0; */
3369 3343 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
3370 3344
3371 3345 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3372 3346 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status,
3373 3347 MFI_CMD_STATUS_POLL_MODE);
3374 3348 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3375 3349 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3376 3350 MFI_FRAME_DIR_READ);
3377 3351 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3378 3352 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3379 3353 sizeof (struct mrsas_ctrl_info));
3380 3354 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3381 3355 MR_DCMD_CTRL_GET_INFO);
3382 3356 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3383 3357 instance->internal_buf_dmac_add);
3384 3358 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3385 3359 sizeof (struct mrsas_ctrl_info));
3386 3360
3387 3361 cmd->frame_count = 1;
3388 3362
3389 3363 if (instance->tbolt) {
3390 3364 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3391 3365 }
3392 3366
3393 3367 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3394 3368 ret = 0;
3395 3369
3396 3370 ctrl_info->max_request_size = ddi_get32(
3397 3371 cmd->frame_dma_obj.acc_handle, &ci->max_request_size);
3398 3372
3399 3373 ctrl_info->ld_present_count = ddi_get16(
3400 3374 cmd->frame_dma_obj.acc_handle, &ci->ld_present_count);
3401 3375
3402 3376 ctrl_info->properties.on_off_properties = ddi_get32(
3403 3377 cmd->frame_dma_obj.acc_handle,
3404 3378 &ci->properties.on_off_properties);
3405 3379 ddi_rep_get8(cmd->frame_dma_obj.acc_handle,
3406 3380 (uint8_t *)(ctrl_info->product_name),
3407 3381 (uint8_t *)(ci->product_name), 80 * sizeof (char),
3408 3382 DDI_DEV_AUTOINCR);
3409 3383 /* should get more members of ci with ddi_get when needed */
3410 3384 } else {
3411 3385 cmn_err(CE_WARN, "get_ctrl_info: Ctrl info failed");
3412 3386 ret = -1;
3413 3387 }
3414 3388
3415 3389 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) {
3416 3390 ret = -1;
3417 3391 }
3418 3392 if (instance->tbolt) {
3419 3393 return_raid_msg_mfi_pkt(instance, cmd);
3420 3394 } else {
3421 3395 return_mfi_pkt(instance, cmd);
3422 3396 }
3423 3397
3424 3398 return (ret);
3425 3399 }
3426 3400
3427 3401 /*
3428 3402 * abort_aen_cmd
3429 3403 */
3430 3404 static int
3431 3405 abort_aen_cmd(struct mrsas_instance *instance,
3432 3406 struct mrsas_cmd *cmd_to_abort)
3433 3407 {
3434 3408 int ret = 0;
3435 3409
3436 3410 struct mrsas_cmd *cmd;
3437 3411 struct mrsas_abort_frame *abort_fr;
3438 3412
3439 3413 con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_aen:%d", __LINE__));
3440 3414
3441 3415 if (instance->tbolt) {
3442 3416 cmd = get_raid_msg_mfi_pkt(instance);
3443 3417 } else {
3444 3418 cmd = get_mfi_pkt(instance);
3445 3419 }
3446 3420
3447 3421 if (!cmd) {
3448 3422 con_log(CL_ANN1, (CE_WARN,
3449 3423 "abort_aen_cmd():Failed to get a cmd for abort_aen_cmd"));
3450 3424 DTRACE_PROBE2(abort_mfi_err, uint16_t, instance->fw_outstanding,
3451 3425 uint16_t, instance->max_fw_cmds);
3452 3426 return (DDI_FAILURE);
3453 3427 }
3454 3428
3455 3429 /* Clear the frame buffer and assign back the context id */
3456 3430 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3457 3431 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3458 3432 cmd->index);
3459 3433
3460 3434 abort_fr = &cmd->frame->abort;
3461 3435
3462 3436 /* prepare and issue the abort frame */
3463 3437 ddi_put8(cmd->frame_dma_obj.acc_handle,
3464 3438 &abort_fr->cmd, MFI_CMD_OP_ABORT);
3465 3439 ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3466 3440 MFI_CMD_STATUS_SYNC_MODE);
3467 3441 ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3468 3442 ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3469 3443 cmd_to_abort->index);
3470 3444 ddi_put32(cmd->frame_dma_obj.acc_handle,
3471 3445 &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3472 3446 ddi_put32(cmd->frame_dma_obj.acc_handle,
3473 3447 &abort_fr->abort_mfi_phys_addr_hi, 0);
3474 3448
3475 3449 instance->aen_cmd->abort_aen = 1;
3476 3450
3477 3451 cmd->frame_count = 1;
3478 3452
3479 3453 if (instance->tbolt) {
3480 3454 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3481 3455 }
3482 3456
3483 3457 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3484 3458 con_log(CL_ANN1, (CE_WARN,
3485 3459 "abort_aen_cmd: issue_cmd_in_poll_mode failed"));
3486 3460 ret = -1;
3487 3461 } else {
3488 3462 ret = 0;
3489 3463 }
3490 3464
3491 3465 instance->aen_cmd->abort_aen = 1;
3492 3466 instance->aen_cmd = 0;
3493 3467
3494 3468 if (instance->tbolt) {
3495 3469 return_raid_msg_mfi_pkt(instance, cmd);
3496 3470 } else {
3497 3471 return_mfi_pkt(instance, cmd);
3498 3472 }
3499 3473
3500 3474 atomic_add_16(&instance->fw_outstanding, (-1));
3501 3475
3502 3476 return (ret);
3503 3477 }
3504 3478
3505 3479
3506 3480 static int
3507 3481 mrsas_build_init_cmd(struct mrsas_instance *instance,
3508 3482 struct mrsas_cmd **cmd_ptr)
3509 3483 {
3510 3484 struct mrsas_cmd *cmd;
3511 3485 struct mrsas_init_frame *init_frame;
3512 3486 struct mrsas_init_queue_info *initq_info;
3513 3487 struct mrsas_drv_ver drv_ver_info;
3514 3488
3515 3489
3516 3490 /*
3517 3491 * Prepare a init frame. Note the init frame points to queue info
3518 3492 * structure. Each frame has SGL allocated after first 64 bytes. For
3519 3493 * this frame - since we don't need any SGL - we use SGL's space as
3520 3494 * queue info structure
3521 3495 */
3522 3496 cmd = *cmd_ptr;
3523 3497
3524 3498
3525 3499 /* Clear the frame buffer and assign back the context id */
3526 3500 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3527 3501 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3528 3502 cmd->index);
3529 3503
3530 3504 init_frame = (struct mrsas_init_frame *)cmd->frame;
3531 3505 initq_info = (struct mrsas_init_queue_info *)
3532 3506 ((unsigned long)init_frame + 64);
3533 3507
3534 3508 (void) memset(init_frame, 0, MRMFI_FRAME_SIZE);
3535 3509 (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info));
3536 3510
3537 3511 ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0);
3538 3512
3539 3513 ddi_put32(cmd->frame_dma_obj.acc_handle,
3540 3514 &initq_info->reply_queue_entries, instance->max_fw_cmds + 1);
3541 3515
3542 3516 ddi_put32(cmd->frame_dma_obj.acc_handle,
3543 3517 &initq_info->producer_index_phys_addr_hi, 0);
3544 3518 ddi_put32(cmd->frame_dma_obj.acc_handle,
3545 3519 &initq_info->producer_index_phys_addr_lo,
3546 3520 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address);
3547 3521
3548 3522 ddi_put32(cmd->frame_dma_obj.acc_handle,
3549 3523 &initq_info->consumer_index_phys_addr_hi, 0);
3550 3524 ddi_put32(cmd->frame_dma_obj.acc_handle,
3551 3525 &initq_info->consumer_index_phys_addr_lo,
3552 3526 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4);
3553 3527
3554 3528 ddi_put32(cmd->frame_dma_obj.acc_handle,
3555 3529 &initq_info->reply_queue_start_phys_addr_hi, 0);
3556 3530 ddi_put32(cmd->frame_dma_obj.acc_handle,
3557 3531 &initq_info->reply_queue_start_phys_addr_lo,
3558 3532 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8);
3559 3533
3560 3534 ddi_put8(cmd->frame_dma_obj.acc_handle,
3561 3535 &init_frame->cmd, MFI_CMD_OP_INIT);
3562 3536 ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status,
3563 3537 MFI_CMD_STATUS_POLL_MODE);
3564 3538 ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0);
3565 3539 ddi_put32(cmd->frame_dma_obj.acc_handle,
3566 3540 &init_frame->queue_info_new_phys_addr_lo,
3567 3541 cmd->frame_phys_addr + 64);
3568 3542 ddi_put32(cmd->frame_dma_obj.acc_handle,
3569 3543 &init_frame->queue_info_new_phys_addr_hi, 0);
3570 3544
3571 3545
3572 3546 /* fill driver version information */
3573 3547 fill_up_drv_ver(&drv_ver_info);
3574 3548
3575 3549 /* allocate the driver version data transfer buffer */
3576 3550 instance->drv_ver_dma_obj.size = sizeof (drv_ver_info.drv_ver);
3577 3551 instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
3578 3552 instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3579 3553 instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3580 3554 instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
3581 3555 instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
3582 3556
3583 3557 if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
3584 3558 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3585 3559 con_log(CL_ANN, (CE_WARN,
3586 3560 "init_mfi : Could not allocate driver version buffer."));
3587 3561 return (DDI_FAILURE);
3588 3562 }
3589 3563 /* copy driver version to dma buffer */
3590 3564 (void) memset(instance->drv_ver_dma_obj.buffer, 0,
3591 3565 sizeof (drv_ver_info.drv_ver));
3592 3566 ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
3593 3567 (uint8_t *)drv_ver_info.drv_ver,
3594 3568 (uint8_t *)instance->drv_ver_dma_obj.buffer,
3595 3569 sizeof (drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
3596 3570
3597 3571
3598 3572 /* copy driver version physical address to init frame */
3599 3573 ddi_put64(cmd->frame_dma_obj.acc_handle, &init_frame->driverversion,
3600 3574 instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
3601 3575
3602 3576 ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len,
3603 3577 sizeof (struct mrsas_init_queue_info));
3604 3578
3605 3579 cmd->frame_count = 1;
3606 3580
3607 3581 *cmd_ptr = cmd;
3608 3582
3609 3583 return (DDI_SUCCESS);
3610 3584 }
3611 3585
3612 3586
3613 3587 /*
3614 3588 * mrsas_init_adapter_ppc - Initialize MFI interface adapter.
3615 3589 */
3616 3590 int
3617 3591 mrsas_init_adapter_ppc(struct mrsas_instance *instance)
3618 3592 {
3619 3593 struct mrsas_cmd *cmd;
3620 3594
3621 3595 /*
3622 3596 * allocate memory for mfi adapter(cmd pool, individual commands, mfi
3623 3597 * frames etc
3624 3598 */
3625 3599 if (alloc_space_for_mfi(instance) != DDI_SUCCESS) {
3626 3600 con_log(CL_ANN, (CE_NOTE,
3627 3601 "Error, failed to allocate memory for MFI adapter"));
3628 3602 return (DDI_FAILURE);
3629 3603 }
3630 3604
3631 3605 /* Build INIT command */
3632 3606 cmd = get_mfi_pkt(instance);
3633 3607
3634 3608 if (mrsas_build_init_cmd(instance, &cmd) != DDI_SUCCESS) {
3635 3609 con_log(CL_ANN,
3636 3610 (CE_NOTE, "Error, failed to build INIT command"));
3637 3611
3638 3612 goto fail_undo_alloc_mfi_space;
3639 3613 }
3640 3614
3641 3615 /*
3642 3616 * Disable interrupt before sending init frame ( see linux driver code)
3643 3617 * send INIT MFI frame in polled mode
3644 3618 */
3645 3619 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3646 3620 con_log(CL_ANN, (CE_WARN, "failed to init firmware"));
3647 3621 goto fail_fw_init;
3648 3622 }
3649 3623
3650 3624 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS)
3651 3625 goto fail_fw_init;
3652 3626 return_mfi_pkt(instance, cmd);
3653 3627
3654 3628 if (ctio_enable &&
3655 3629 (instance->func_ptr->read_fw_status_reg(instance) & 0x04000000)) {
3656 3630 con_log(CL_ANN, (CE_NOTE, "mr_sas: IEEE SGL's supported"));
3657 3631 instance->flag_ieee = 1;
3658 3632 } else {
3659 3633 instance->flag_ieee = 0;
3660 3634 }
3661 3635
3662 3636 instance->unroll.alloc_space_mfi = 1;
3663 3637 instance->unroll.verBuff = 1;
3664 3638
3665 3639 return (DDI_SUCCESS);
3666 3640
3667 3641
3668 3642 fail_fw_init:
3669 3643 (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
3670 3644
3671 3645 fail_undo_alloc_mfi_space:
3672 3646 return_mfi_pkt(instance, cmd);
3673 3647 free_space_for_mfi(instance);
3674 3648
3675 3649 return (DDI_FAILURE);
3676 3650
3677 3651 }
3678 3652
3679 3653 /*
3680 3654 * mrsas_init_adapter - Initialize adapter.
3681 3655 */
3682 3656 int
3683 3657 mrsas_init_adapter(struct mrsas_instance *instance)
3684 3658 {
3685 3659 struct mrsas_ctrl_info ctrl_info;
3686 3660
3687 3661
3688 3662 /* we expect the FW state to be READY */
3689 3663 if (mfi_state_transition_to_ready(instance)) {
3690 3664 con_log(CL_ANN, (CE_WARN, "mr_sas: F/W is not ready"));
3691 3665 return (DDI_FAILURE);
3692 3666 }
3693 3667
3694 3668 /* get various operational parameters from status register */
3695 3669 instance->max_num_sge =
3696 3670 (instance->func_ptr->read_fw_status_reg(instance) &
3697 3671 0xFF0000) >> 0x10;
3698 3672 instance->max_num_sge =
3699 3673 (instance->max_num_sge > MRSAS_MAX_SGE_CNT) ?
3700 3674 MRSAS_MAX_SGE_CNT : instance->max_num_sge;
3701 3675
3702 3676 /*
3703 3677 * Reduce the max supported cmds by 1. This is to ensure that the
3704 3678 * reply_q_sz (1 more than the max cmd that driver may send)
3705 3679 * does not exceed max cmds that the FW can support
3706 3680 */
3707 3681 instance->max_fw_cmds =
3708 3682 instance->func_ptr->read_fw_status_reg(instance) & 0xFFFF;
3709 3683 instance->max_fw_cmds = instance->max_fw_cmds - 1;
3710 3684
3711 3685
3712 3686
3713 3687 /* Initialize adapter */
3714 3688 if (instance->func_ptr->init_adapter(instance) != DDI_SUCCESS) {
3715 3689 con_log(CL_ANN,
3716 3690 (CE_WARN, "mr_sas: could not initialize adapter"));
3717 3691 return (DDI_FAILURE);
3718 3692 }
3719 3693
3720 3694 /* gather misc FW related information */
3721 3695 instance->disable_online_ctrl_reset = 0;
3722 3696
|
↓ open down ↓ |
496 lines elided |
↑ open up ↑ |
3723 3697 if (!get_ctrl_info(instance, &ctrl_info)) {
3724 3698 instance->max_sectors_per_req = ctrl_info.max_request_size;
3725 3699 con_log(CL_ANN1, (CE_NOTE,
3726 3700 "product name %s ld present %d",
3727 3701 ctrl_info.product_name, ctrl_info.ld_present_count));
3728 3702 } else {
3729 3703 instance->max_sectors_per_req = instance->max_num_sge *
3730 3704 PAGESIZE / 512;
3731 3705 }
3732 3706
3733 - if (ctrl_info.properties.on_off_properties & DISABLE_OCR_PROP_FLAG) {
3707 + if (ctrl_info.properties.on_off_properties & DISABLE_OCR_PROP_FLAG)
3734 3708 instance->disable_online_ctrl_reset = 1;
3735 - con_log(CL_ANN1,
3736 - (CE_NOTE, "Disable online control Flag is set\n"));
3737 - } else {
3738 - con_log(CL_ANN1,
3739 - (CE_NOTE, "Disable online control Flag is not set\n"));
3740 - }
3741 3709
3742 3710 return (DDI_SUCCESS);
3743 3711
3744 3712 }
3745 3713
3746 3714
3747 3715
3748 3716 static int
3749 3717 mrsas_issue_init_mfi(struct mrsas_instance *instance)
3750 3718 {
3751 3719 struct mrsas_cmd *cmd;
3752 3720 struct mrsas_init_frame *init_frame;
3753 3721 struct mrsas_init_queue_info *initq_info;
3754 3722
3755 3723 /*
3756 3724 * Prepare a init frame. Note the init frame points to queue info
3757 3725 * structure. Each frame has SGL allocated after first 64 bytes. For
3758 3726 * this frame - since we don't need any SGL - we use SGL's space as
3759 3727 * queue info structure
3760 3728 */
3761 3729 con_log(CL_ANN1, (CE_NOTE,
3762 3730 "mrsas_issue_init_mfi: entry\n"));
3763 3731 cmd = get_mfi_app_pkt(instance);
3764 3732
3765 3733 if (!cmd) {
3766 3734 con_log(CL_ANN1, (CE_WARN,
3767 3735 "mrsas_issue_init_mfi: get_pkt failed\n"));
3768 3736 return (DDI_FAILURE);
3769 3737 }
3770 3738
3771 3739 /* Clear the frame buffer and assign back the context id */
3772 3740 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3773 3741 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3774 3742 cmd->index);
3775 3743
3776 3744 init_frame = (struct mrsas_init_frame *)cmd->frame;
3777 3745 initq_info = (struct mrsas_init_queue_info *)
3778 3746 ((unsigned long)init_frame + 64);
3779 3747
3780 3748 (void) memset(init_frame, 0, MRMFI_FRAME_SIZE);
3781 3749 (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info));
3782 3750
3783 3751 ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0);
3784 3752
3785 3753 ddi_put32(cmd->frame_dma_obj.acc_handle,
3786 3754 &initq_info->reply_queue_entries, instance->max_fw_cmds + 1);
3787 3755 ddi_put32(cmd->frame_dma_obj.acc_handle,
3788 3756 &initq_info->producer_index_phys_addr_hi, 0);
3789 3757 ddi_put32(cmd->frame_dma_obj.acc_handle,
3790 3758 &initq_info->producer_index_phys_addr_lo,
3791 3759 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address);
3792 3760 ddi_put32(cmd->frame_dma_obj.acc_handle,
3793 3761 &initq_info->consumer_index_phys_addr_hi, 0);
3794 3762 ddi_put32(cmd->frame_dma_obj.acc_handle,
3795 3763 &initq_info->consumer_index_phys_addr_lo,
3796 3764 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4);
3797 3765
3798 3766 ddi_put32(cmd->frame_dma_obj.acc_handle,
3799 3767 &initq_info->reply_queue_start_phys_addr_hi, 0);
3800 3768 ddi_put32(cmd->frame_dma_obj.acc_handle,
3801 3769 &initq_info->reply_queue_start_phys_addr_lo,
3802 3770 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8);
3803 3771
3804 3772 ddi_put8(cmd->frame_dma_obj.acc_handle,
3805 3773 &init_frame->cmd, MFI_CMD_OP_INIT);
3806 3774 ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status,
3807 3775 MFI_CMD_STATUS_POLL_MODE);
3808 3776 ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0);
3809 3777 ddi_put32(cmd->frame_dma_obj.acc_handle,
3810 3778 &init_frame->queue_info_new_phys_addr_lo,
3811 3779 cmd->frame_phys_addr + 64);
3812 3780 ddi_put32(cmd->frame_dma_obj.acc_handle,
3813 3781 &init_frame->queue_info_new_phys_addr_hi, 0);
3814 3782
3815 3783 ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len,
3816 3784 sizeof (struct mrsas_init_queue_info));
3817 3785
3818 3786 cmd->frame_count = 1;
3819 3787
3820 3788 /* issue the init frame in polled mode */
3821 3789 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3822 3790 con_log(CL_ANN1, (CE_WARN,
3823 3791 "mrsas_issue_init_mfi():failed to "
3824 3792 "init firmware"));
3825 3793 return_mfi_app_pkt(instance, cmd);
3826 3794 return (DDI_FAILURE);
3827 3795 }
3828 3796
3829 3797 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) {
3830 3798 return_mfi_pkt(instance, cmd);
3831 3799 return (DDI_FAILURE);
3832 3800 }
3833 3801
3834 3802 return_mfi_app_pkt(instance, cmd);
3835 3803 con_log(CL_ANN1, (CE_CONT, "mrsas_issue_init_mfi: Done"));
3836 3804
3837 3805 return (DDI_SUCCESS);
3838 3806 }
3839 3807 /*
3840 3808 * mfi_state_transition_to_ready : Move the FW to READY state
3841 3809 *
3842 3810 * @reg_set : MFI register set
3843 3811 */
3844 3812 int
3845 3813 mfi_state_transition_to_ready(struct mrsas_instance *instance)
3846 3814 {
3847 3815 int i;
3848 3816 uint8_t max_wait;
3849 3817 uint32_t fw_ctrl = 0;
3850 3818 uint32_t fw_state;
3851 3819 uint32_t cur_state;
3852 3820 uint32_t cur_abs_reg_val;
3853 3821 uint32_t prev_abs_reg_val;
3854 3822 uint32_t status;
3855 3823
3856 3824 cur_abs_reg_val =
3857 3825 instance->func_ptr->read_fw_status_reg(instance);
3858 3826 fw_state =
3859 3827 cur_abs_reg_val & MFI_STATE_MASK;
3860 3828 con_log(CL_ANN1, (CE_CONT,
3861 3829 "mfi_state_transition_to_ready:FW state = 0x%x", fw_state));
3862 3830
3863 3831 while (fw_state != MFI_STATE_READY) {
3864 3832 con_log(CL_ANN, (CE_CONT,
3865 3833 "mfi_state_transition_to_ready:FW state%x", fw_state));
3866 3834
3867 3835 switch (fw_state) {
3868 3836 case MFI_STATE_FAULT:
3869 3837 con_log(CL_ANN, (CE_NOTE,
3870 3838 "mr_sas: FW in FAULT state!!"));
3871 3839
3872 3840 return (ENODEV);
3873 3841 case MFI_STATE_WAIT_HANDSHAKE:
3874 3842 /* set the CLR bit in IMR0 */
3875 3843 con_log(CL_ANN1, (CE_NOTE,
3876 3844 "mr_sas: FW waiting for HANDSHAKE"));
3877 3845 /*
3878 3846 * PCI_Hot Plug: MFI F/W requires
3879 3847 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
3880 3848 * to be set
3881 3849 */
3882 3850 /* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */
3883 3851 if (!instance->tbolt) {
3884 3852 WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE |
3885 3853 MFI_INIT_HOTPLUG, instance);
3886 3854 } else {
3887 3855 WR_RESERVED0_REGISTER(MFI_INIT_CLEAR_HANDSHAKE |
3888 3856 MFI_INIT_HOTPLUG, instance);
3889 3857 }
3890 3858 max_wait = (instance->tbolt == 1) ? 180 : 2;
3891 3859 cur_state = MFI_STATE_WAIT_HANDSHAKE;
3892 3860 break;
3893 3861 case MFI_STATE_BOOT_MESSAGE_PENDING:
3894 3862 /* set the CLR bit in IMR0 */
3895 3863 con_log(CL_ANN1, (CE_NOTE,
3896 3864 "mr_sas: FW state boot message pending"));
3897 3865 /*
3898 3866 * PCI_Hot Plug: MFI F/W requires
3899 3867 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
3900 3868 * to be set
3901 3869 */
3902 3870 if (!instance->tbolt) {
3903 3871 WR_IB_DOORBELL(MFI_INIT_HOTPLUG, instance);
3904 3872 } else {
3905 3873 WR_RESERVED0_REGISTER(MFI_INIT_HOTPLUG,
3906 3874 instance);
3907 3875 }
3908 3876 max_wait = (instance->tbolt == 1) ? 180 : 10;
3909 3877 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
3910 3878 break;
3911 3879 case MFI_STATE_OPERATIONAL:
3912 3880 /* bring it to READY state; assuming max wait 2 secs */
3913 3881 instance->func_ptr->disable_intr(instance);
3914 3882 con_log(CL_ANN1, (CE_NOTE,
3915 3883 "mr_sas: FW in OPERATIONAL state"));
3916 3884 /*
3917 3885 * PCI_Hot Plug: MFI F/W requires
3918 3886 * (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT)
3919 3887 * to be set
3920 3888 */
3921 3889 /* WR_IB_DOORBELL(MFI_INIT_READY, instance); */
3922 3890 if (!instance->tbolt) {
3923 3891 WR_IB_DOORBELL(MFI_RESET_FLAGS, instance);
3924 3892 } else {
3925 3893 WR_RESERVED0_REGISTER(MFI_RESET_FLAGS,
3926 3894 instance);
3927 3895
3928 3896 for (i = 0; i < (10 * 1000); i++) {
3929 3897 status =
3930 3898 RD_RESERVED0_REGISTER(instance);
3931 3899 if (status & 1) {
3932 3900 delay(1 *
3933 3901 drv_usectohz(MILLISEC));
3934 3902 } else {
3935 3903 break;
3936 3904 }
3937 3905 }
3938 3906
3939 3907 }
3940 3908 max_wait = (instance->tbolt == 1) ? 180 : 10;
3941 3909 cur_state = MFI_STATE_OPERATIONAL;
3942 3910 break;
3943 3911 case MFI_STATE_UNDEFINED:
3944 3912 /* this state should not last for more than 2 seconds */
3945 3913 con_log(CL_ANN1, (CE_NOTE, "FW state undefined"));
3946 3914
3947 3915 max_wait = (instance->tbolt == 1) ? 180 : 2;
3948 3916 cur_state = MFI_STATE_UNDEFINED;
3949 3917 break;
3950 3918 case MFI_STATE_BB_INIT:
3951 3919 max_wait = (instance->tbolt == 1) ? 180 : 2;
3952 3920 cur_state = MFI_STATE_BB_INIT;
3953 3921 break;
3954 3922 case MFI_STATE_FW_INIT:
3955 3923 max_wait = (instance->tbolt == 1) ? 180 : 2;
3956 3924 cur_state = MFI_STATE_FW_INIT;
3957 3925 break;
3958 3926 case MFI_STATE_FW_INIT_2:
3959 3927 max_wait = 180;
3960 3928 cur_state = MFI_STATE_FW_INIT_2;
3961 3929 break;
3962 3930 case MFI_STATE_DEVICE_SCAN:
3963 3931 max_wait = 180;
3964 3932 cur_state = MFI_STATE_DEVICE_SCAN;
3965 3933 prev_abs_reg_val = cur_abs_reg_val;
3966 3934 con_log(CL_NONE, (CE_NOTE,
3967 3935 "Device scan in progress ...\n"));
3968 3936 break;
3969 3937 case MFI_STATE_FLUSH_CACHE:
3970 3938 max_wait = 180;
3971 3939 cur_state = MFI_STATE_FLUSH_CACHE;
3972 3940 break;
3973 3941 default:
3974 3942 con_log(CL_ANN1, (CE_NOTE,
3975 3943 "mr_sas: Unknown state 0x%x", fw_state));
3976 3944 return (ENODEV);
3977 3945 }
3978 3946
3979 3947 /* the cur_state should not last for more than max_wait secs */
3980 3948 for (i = 0; i < (max_wait * MILLISEC); i++) {
3981 3949 /* fw_state = RD_OB_MSG_0(instance) & MFI_STATE_MASK; */
3982 3950 cur_abs_reg_val =
3983 3951 instance->func_ptr->read_fw_status_reg(instance);
3984 3952 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
3985 3953
3986 3954 if (fw_state == cur_state) {
3987 3955 delay(1 * drv_usectohz(MILLISEC));
3988 3956 } else {
3989 3957 break;
3990 3958 }
3991 3959 }
3992 3960 if (fw_state == MFI_STATE_DEVICE_SCAN) {
3993 3961 if (prev_abs_reg_val != cur_abs_reg_val) {
3994 3962 continue;
3995 3963 }
3996 3964 }
3997 3965
3998 3966 /* return error if fw_state hasn't changed after max_wait */
3999 3967 if (fw_state == cur_state) {
4000 3968 con_log(CL_ANN1, (CE_WARN,
4001 3969 "FW state hasn't changed in %d secs", max_wait));
4002 3970 return (ENODEV);
4003 3971 }
4004 3972 };
4005 3973
4006 3974 if (!instance->tbolt) {
4007 3975 fw_ctrl = RD_IB_DOORBELL(instance);
4008 3976 con_log(CL_ANN1, (CE_CONT,
4009 3977 "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl));
4010 3978
4011 3979 /*
4012 3980 * Write 0xF to the doorbell register to do the following.
|
↓ open down ↓ |
262 lines elided |
↑ open up ↑ |
4013 3981 * - Abort all outstanding commands (bit 0).
4014 3982 * - Transition from OPERATIONAL to READY state (bit 1).
4015 3983 * - Discard (possible) low MFA posted in 64-bit mode (bit-2).
4016 3984 * - Set to release FW to continue running (i.e. BIOS handshake
4017 3985 * (bit 3).
4018 3986 */
4019 3987 WR_IB_DOORBELL(0xF, instance);
4020 3988 }
4021 3989
4022 3990 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
4023 - return (ENODEV);
3991 + return (EIO);
4024 3992 }
4025 3993
4026 3994 return (DDI_SUCCESS);
4027 3995 }
4028 3996
4029 3997 /*
4030 3998 * get_seq_num
4031 3999 */
4032 4000 static int
4033 4001 get_seq_num(struct mrsas_instance *instance,
4034 4002 struct mrsas_evt_log_info *eli)
4035 4003 {
4036 4004 int ret = DDI_SUCCESS;
4037 4005
4038 4006 dma_obj_t dcmd_dma_obj;
4039 4007 struct mrsas_cmd *cmd;
4040 4008 struct mrsas_dcmd_frame *dcmd;
4041 4009 struct mrsas_evt_log_info *eli_tmp;
4042 4010 if (instance->tbolt) {
4043 4011 cmd = get_raid_msg_mfi_pkt(instance);
4044 4012 } else {
4045 4013 cmd = get_mfi_pkt(instance);
4046 4014 }
4047 4015
4048 4016 if (!cmd) {
4049 4017 cmn_err(CE_WARN, "mr_sas: failed to get a cmd");
4050 4018 DTRACE_PROBE2(seq_num_mfi_err, uint16_t,
4051 4019 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
4052 4020 return (ENOMEM);
4053 4021 }
4054 4022
4055 4023 /* Clear the frame buffer and assign back the context id */
4056 4024 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
4057 4025 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
4058 4026 cmd->index);
4059 4027
4060 4028 dcmd = &cmd->frame->dcmd;
4061 4029
4062 4030 /* allocate the data transfer buffer */
4063 4031 dcmd_dma_obj.size = sizeof (struct mrsas_evt_log_info);
4064 4032 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
4065 4033 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
4066 4034 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
4067 4035 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
4068 4036 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
4069 4037
4070 4038 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
4071 4039 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
4072 4040 cmn_err(CE_WARN,
4073 4041 "get_seq_num: could not allocate data transfer buffer.");
4074 4042 return (DDI_FAILURE);
4075 4043 }
4076 4044
4077 4045 (void) memset(dcmd_dma_obj.buffer, 0,
4078 4046 sizeof (struct mrsas_evt_log_info));
4079 4047
4080 4048 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
4081 4049
4082 4050 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
4083 4051 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
4084 4052 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
4085 4053 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
4086 4054 MFI_FRAME_DIR_READ);
4087 4055 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
4088 4056 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
4089 4057 sizeof (struct mrsas_evt_log_info));
4090 4058 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
4091 4059 MR_DCMD_CTRL_EVENT_GET_INFO);
4092 4060 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
4093 4061 sizeof (struct mrsas_evt_log_info));
4094 4062 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
4095 4063 dcmd_dma_obj.dma_cookie[0].dmac_address);
4096 4064
4097 4065 cmd->sync_cmd = MRSAS_TRUE;
4098 4066 cmd->frame_count = 1;
4099 4067
4100 4068 if (instance->tbolt) {
4101 4069 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
4102 4070 }
4103 4071
4104 4072 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
4105 4073 cmn_err(CE_WARN, "get_seq_num: "
4106 4074 "failed to issue MRSAS_DCMD_CTRL_EVENT_GET_INFO");
4107 4075 ret = DDI_FAILURE;
4108 4076 } else {
4109 4077 eli_tmp = (struct mrsas_evt_log_info *)dcmd_dma_obj.buffer;
4110 4078 eli->newest_seq_num = ddi_get32(cmd->frame_dma_obj.acc_handle,
4111 4079 &eli_tmp->newest_seq_num);
4112 4080 ret = DDI_SUCCESS;
4113 4081 }
4114 4082
4115 4083 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
4116 4084 ret = DDI_FAILURE;
4117 4085
4118 4086 if (instance->tbolt) {
4119 4087 return_raid_msg_mfi_pkt(instance, cmd);
4120 4088 } else {
4121 4089 return_mfi_pkt(instance, cmd);
4122 4090 }
4123 4091
4124 4092 return (ret);
4125 4093 }
4126 4094
4127 4095 /*
4128 4096 * start_mfi_aen
4129 4097 */
4130 4098 static int
4131 4099 start_mfi_aen(struct mrsas_instance *instance)
4132 4100 {
4133 4101 int ret = 0;
4134 4102
4135 4103 struct mrsas_evt_log_info eli;
4136 4104 union mrsas_evt_class_locale class_locale;
4137 4105
4138 4106 /* get the latest sequence number from FW */
4139 4107 (void) memset(&eli, 0, sizeof (struct mrsas_evt_log_info));
4140 4108
4141 4109 if (get_seq_num(instance, &eli)) {
4142 4110 cmn_err(CE_WARN, "start_mfi_aen: failed to get seq num");
4143 4111 return (-1);
4144 4112 }
4145 4113
4146 4114 /* register AEN with FW for latest sequence number plus 1 */
4147 4115 class_locale.members.reserved = 0;
4148 4116 class_locale.members.locale = LE_16(MR_EVT_LOCALE_ALL);
4149 4117 class_locale.members.class = MR_EVT_CLASS_INFO;
4150 4118 class_locale.word = LE_32(class_locale.word);
4151 4119 ret = register_mfi_aen(instance, eli.newest_seq_num + 1,
4152 4120 class_locale.word);
4153 4121
4154 4122 if (ret) {
4155 4123 cmn_err(CE_WARN, "start_mfi_aen: aen registration failed");
4156 4124 return (-1);
4157 4125 }
4158 4126
4159 4127
4160 4128 return (ret);
4161 4129 }
4162 4130
4163 4131 /*
4164 4132 * flush_cache
4165 4133 */
4166 4134 static void
4167 4135 flush_cache(struct mrsas_instance *instance)
4168 4136 {
4169 4137 struct mrsas_cmd *cmd = NULL;
4170 4138 struct mrsas_dcmd_frame *dcmd;
4171 4139 if (instance->tbolt) {
4172 4140 cmd = get_raid_msg_mfi_pkt(instance);
4173 4141 } else {
4174 4142 cmd = get_mfi_pkt(instance);
4175 4143 }
4176 4144
4177 4145 if (!cmd) {
4178 4146 con_log(CL_ANN1, (CE_WARN,
4179 4147 "flush_cache():Failed to get a cmd for flush_cache"));
4180 4148 DTRACE_PROBE2(flush_cache_err, uint16_t,
4181 4149 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
4182 4150 return;
4183 4151 }
4184 4152
4185 4153 /* Clear the frame buffer and assign back the context id */
4186 4154 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
4187 4155 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
4188 4156 cmd->index);
4189 4157
4190 4158 dcmd = &cmd->frame->dcmd;
4191 4159
4192 4160 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
4193 4161
4194 4162 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
4195 4163 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0);
4196 4164 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 0);
4197 4165 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
4198 4166 MFI_FRAME_DIR_NONE);
4199 4167 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
4200 4168 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 0);
4201 4169 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
4202 4170 MR_DCMD_CTRL_CACHE_FLUSH);
4203 4171 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.b[0],
4204 4172 MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE);
4205 4173
4206 4174 cmd->frame_count = 1;
4207 4175
4208 4176 if (instance->tbolt) {
4209 4177 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
4210 4178 }
4211 4179
4212 4180 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
4213 4181 con_log(CL_ANN1, (CE_WARN,
4214 4182 "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH"));
4215 4183 }
4216 4184 con_log(CL_ANN1, (CE_CONT, "flush_cache done"));
4217 4185 if (instance->tbolt) {
4218 4186 return_raid_msg_mfi_pkt(instance, cmd);
4219 4187 } else {
4220 4188 return_mfi_pkt(instance, cmd);
4221 4189 }
4222 4190
4223 4191 }
4224 4192
4225 4193 /*
4226 4194 * service_mfi_aen- Completes an AEN command
4227 4195 * @instance: Adapter soft state
4228 4196 * @cmd: Command to be completed
4229 4197 *
4230 4198 */
4231 4199 void
4232 4200 service_mfi_aen(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
4233 4201 {
4234 4202 uint32_t seq_num;
4235 4203 struct mrsas_evt_detail *evt_detail =
4236 4204 (struct mrsas_evt_detail *)instance->mfi_evt_detail_obj.buffer;
4237 4205 int rval = 0;
4238 4206 int tgt = 0;
4239 4207 uint8_t dtype;
4240 4208 #ifdef PDSUPPORT
4241 4209 mrsas_pd_address_t *pd_addr;
4242 4210 #endif
4243 4211 ddi_acc_handle_t acc_handle;
4244 4212
4245 4213 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
4246 4214
4247 4215 acc_handle = cmd->frame_dma_obj.acc_handle;
4248 4216 cmd->cmd_status = ddi_get8(acc_handle, &cmd->frame->io.cmd_status);
4249 4217 if (cmd->cmd_status == ENODATA) {
4250 4218 cmd->cmd_status = 0;
4251 4219 }
4252 4220
4253 4221 /*
4254 4222 * log the MFI AEN event to the sysevent queue so that
4255 4223 * application will get noticed
4256 4224 */
4257 4225 if (ddi_log_sysevent(instance->dip, DDI_VENDOR_LSI, "LSIMEGA", "SAS",
4258 4226 NULL, NULL, DDI_NOSLEEP) != DDI_SUCCESS) {
4259 4227 int instance_no = ddi_get_instance(instance->dip);
4260 4228 con_log(CL_ANN, (CE_WARN,
4261 4229 "mr_sas%d: Failed to log AEN event", instance_no));
4262 4230 }
4263 4231 /*
4264 4232 * Check for any ld devices that has changed state. i.e. online
4265 4233 * or offline.
4266 4234 */
4267 4235 con_log(CL_ANN1, (CE_CONT,
4268 4236 "AEN: code = %x class = %x locale = %x args = %x",
4269 4237 ddi_get32(acc_handle, &evt_detail->code),
4270 4238 evt_detail->cl.members.class,
4271 4239 ddi_get16(acc_handle, &evt_detail->cl.members.locale),
4272 4240 ddi_get8(acc_handle, &evt_detail->arg_type)));
4273 4241
4274 4242 switch (ddi_get32(acc_handle, &evt_detail->code)) {
4275 4243 case MR_EVT_CFG_CLEARED: {
4276 4244 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) {
4277 4245 if (instance->mr_ld_list[tgt].dip != NULL) {
4278 4246 mutex_enter(&instance->config_dev_mtx);
4279 4247 instance->mr_ld_list[tgt].flag =
4280 4248 (uint8_t)~MRDRV_TGT_VALID;
4281 4249 mutex_exit(&instance->config_dev_mtx);
4282 4250 rval = mrsas_service_evt(instance, tgt, 0,
4283 4251 MRSAS_EVT_UNCONFIG_TGT, NULL);
4284 4252 con_log(CL_ANN1, (CE_WARN,
4285 4253 "mr_sas: CFG CLEARED AEN rval = %d "
4286 4254 "tgt id = %d", rval, tgt));
4287 4255 }
4288 4256 }
4289 4257 break;
4290 4258 }
4291 4259
4292 4260 case MR_EVT_LD_DELETED: {
4293 4261 tgt = ddi_get16(acc_handle, &evt_detail->args.ld.target_id);
4294 4262 mutex_enter(&instance->config_dev_mtx);
4295 4263 instance->mr_ld_list[tgt].flag = (uint8_t)~MRDRV_TGT_VALID;
4296 4264 mutex_exit(&instance->config_dev_mtx);
4297 4265 rval = mrsas_service_evt(instance,
4298 4266 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0,
4299 4267 MRSAS_EVT_UNCONFIG_TGT, NULL);
4300 4268 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD DELETED AEN rval = %d "
4301 4269 "tgt id = %d index = %d", rval,
4302 4270 ddi_get16(acc_handle, &evt_detail->args.ld.target_id),
4303 4271 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index)));
4304 4272 break;
4305 4273 } /* End of MR_EVT_LD_DELETED */
4306 4274
4307 4275 case MR_EVT_LD_CREATED: {
4308 4276 rval = mrsas_service_evt(instance,
4309 4277 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0,
4310 4278 MRSAS_EVT_CONFIG_TGT, NULL);
4311 4279 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD CREATED AEN rval = %d "
4312 4280 "tgt id = %d index = %d", rval,
4313 4281 ddi_get16(acc_handle, &evt_detail->args.ld.target_id),
4314 4282 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index)));
4315 4283 break;
4316 4284 } /* End of MR_EVT_LD_CREATED */
4317 4285
4318 4286 #ifdef PDSUPPORT
4319 4287 case MR_EVT_PD_REMOVED_EXT: {
4320 4288 if (instance->tbolt) {
4321 4289 pd_addr = &evt_detail->args.pd_addr;
4322 4290 dtype = pd_addr->scsi_dev_type;
4323 4291 con_log(CL_DLEVEL1, (CE_NOTE,
4324 4292 " MR_EVT_PD_REMOVED_EXT: dtype = %x,"
4325 4293 " arg_type = %d ", dtype, evt_detail->arg_type));
4326 4294 tgt = ddi_get16(acc_handle,
4327 4295 &evt_detail->args.pd.device_id);
4328 4296 mutex_enter(&instance->config_dev_mtx);
4329 4297 instance->mr_tbolt_pd_list[tgt].flag =
4330 4298 (uint8_t)~MRDRV_TGT_VALID;
4331 4299 mutex_exit(&instance->config_dev_mtx);
4332 4300 rval = mrsas_service_evt(instance, ddi_get16(
4333 4301 acc_handle, &evt_detail->args.pd.device_id),
4334 4302 1, MRSAS_EVT_UNCONFIG_TGT, NULL);
4335 4303 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_REMOVED:"
4336 4304 "rval = %d tgt id = %d ", rval,
4337 4305 ddi_get16(acc_handle,
4338 4306 &evt_detail->args.pd.device_id)));
4339 4307 }
4340 4308 break;
4341 4309 } /* End of MR_EVT_PD_REMOVED_EXT */
4342 4310
4343 4311 case MR_EVT_PD_INSERTED_EXT: {
4344 4312 if (instance->tbolt) {
4345 4313 rval = mrsas_service_evt(instance,
4346 4314 ddi_get16(acc_handle,
4347 4315 &evt_detail->args.pd.device_id),
4348 4316 1, MRSAS_EVT_CONFIG_TGT, NULL);
4349 4317 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_INSERTEDi_EXT:"
4350 4318 "rval = %d tgt id = %d ", rval,
4351 4319 ddi_get16(acc_handle,
4352 4320 &evt_detail->args.pd.device_id)));
4353 4321 }
4354 4322 break;
4355 4323 } /* End of MR_EVT_PD_INSERTED_EXT */
4356 4324
4357 4325 case MR_EVT_PD_STATE_CHANGE: {
4358 4326 if (instance->tbolt) {
4359 4327 tgt = ddi_get16(acc_handle,
4360 4328 &evt_detail->args.pd.device_id);
4361 4329 if ((evt_detail->args.pd_state.prevState ==
4362 4330 PD_SYSTEM) &&
4363 4331 (evt_detail->args.pd_state.newState != PD_SYSTEM)) {
4364 4332 mutex_enter(&instance->config_dev_mtx);
4365 4333 instance->mr_tbolt_pd_list[tgt].flag =
4366 4334 (uint8_t)~MRDRV_TGT_VALID;
4367 4335 mutex_exit(&instance->config_dev_mtx);
4368 4336 rval = mrsas_service_evt(instance,
4369 4337 ddi_get16(acc_handle,
4370 4338 &evt_detail->args.pd.device_id),
4371 4339 1, MRSAS_EVT_UNCONFIG_TGT, NULL);
4372 4340 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_REMOVED:"
4373 4341 "rval = %d tgt id = %d ", rval,
4374 4342 ddi_get16(acc_handle,
4375 4343 &evt_detail->args.pd.device_id)));
4376 4344 break;
4377 4345 }
4378 4346 if ((evt_detail->args.pd_state.prevState
4379 4347 == UNCONFIGURED_GOOD) &&
4380 4348 (evt_detail->args.pd_state.newState == PD_SYSTEM)) {
4381 4349 rval = mrsas_service_evt(instance,
4382 4350 ddi_get16(acc_handle,
4383 4351 &evt_detail->args.pd.device_id),
4384 4352 1, MRSAS_EVT_CONFIG_TGT, NULL);
4385 4353 con_log(CL_ANN1, (CE_WARN,
4386 4354 "mr_sas: PD_INSERTED: rval = %d "
4387 4355 " tgt id = %d ", rval,
4388 4356 ddi_get16(acc_handle,
4389 4357 &evt_detail->args.pd.device_id)));
4390 4358 break;
4391 4359 }
4392 4360 }
4393 4361 break;
4394 4362 }
4395 4363 #endif
4396 4364
4397 4365 } /* End of Main Switch */
4398 4366
4399 4367 /* get copy of seq_num and class/locale for re-registration */
4400 4368 seq_num = ddi_get32(acc_handle, &evt_detail->seq_num);
4401 4369 seq_num++;
4402 4370 (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
4403 4371 sizeof (struct mrsas_evt_detail));
4404 4372
4405 4373 ddi_put8(acc_handle, &cmd->frame->dcmd.cmd_status, 0x0);
4406 4374 ddi_put32(acc_handle, &cmd->frame->dcmd.mbox.w[0], seq_num);
4407 4375
4408 4376 instance->aen_seq_num = seq_num;
4409 4377
4410 4378 cmd->frame_count = 1;
4411 4379
4412 4380 cmd->retry_count_for_ocr = 0;
4413 4381 cmd->drv_pkt_time = 0;
4414 4382
4415 4383 /* Issue the aen registration frame */
4416 4384 instance->func_ptr->issue_cmd(cmd, instance);
4417 4385 }
4418 4386
4419 4387 /*
4420 4388 * complete_cmd_in_sync_mode - Completes an internal command
4421 4389 * @instance: Adapter soft state
4422 4390 * @cmd: Command to be completed
4423 4391 *
4424 4392 * The issue_cmd_in_sync_mode() function waits for a command to complete
4425 4393 * after it issues a command. This function wakes up that waiting routine by
4426 4394 * calling wake_up() on the wait queue.
4427 4395 */
4428 4396 static void
4429 4397 complete_cmd_in_sync_mode(struct mrsas_instance *instance,
4430 4398 struct mrsas_cmd *cmd)
4431 4399 {
4432 4400 cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle,
4433 4401 &cmd->frame->io.cmd_status);
4434 4402
4435 4403 cmd->sync_cmd = MRSAS_FALSE;
4436 4404
4437 4405 con_log(CL_ANN1, (CE_NOTE, "complete_cmd_in_sync_mode called %p \n",
4438 4406 (void *)cmd));
4439 4407
4440 4408 mutex_enter(&instance->int_cmd_mtx);
4441 4409 if (cmd->cmd_status == ENODATA) {
4442 4410 cmd->cmd_status = 0;
4443 4411 }
4444 4412 cv_broadcast(&instance->int_cmd_cv);
4445 4413 mutex_exit(&instance->int_cmd_mtx);
4446 4414
4447 4415 }
4448 4416
4449 4417 /*
4450 4418 * Call this function inside mrsas_softintr.
4451 4419 * mrsas_initiate_ocr_if_fw_is_faulty - Initiates OCR if FW status is faulty
4452 4420 * @instance: Adapter soft state
4453 4421 */
4454 4422
4455 4423 static uint32_t
4456 4424 mrsas_initiate_ocr_if_fw_is_faulty(struct mrsas_instance *instance)
4457 4425 {
4458 4426 uint32_t cur_abs_reg_val;
4459 4427 uint32_t fw_state;
4460 4428
4461 4429 cur_abs_reg_val = instance->func_ptr->read_fw_status_reg(instance);
4462 4430 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
4463 4431 if (fw_state == MFI_STATE_FAULT) {
4464 4432 if (instance->disable_online_ctrl_reset == 1) {
4465 4433 cmn_err(CE_WARN,
4466 4434 "mrsas_initiate_ocr_if_fw_is_faulty: "
4467 4435 "FW in Fault state, detected in ISR: "
4468 4436 "FW doesn't support ocr ");
4469 4437
4470 4438 return (ADAPTER_RESET_NOT_REQUIRED);
4471 4439 } else {
4472 4440 con_log(CL_ANN, (CE_NOTE,
4473 4441 "mrsas_initiate_ocr_if_fw_is_faulty: FW in Fault "
4474 4442 "state, detected in ISR: FW supports ocr "));
4475 4443
4476 4444 return (ADAPTER_RESET_REQUIRED);
4477 4445 }
4478 4446 }
4479 4447
4480 4448 return (ADAPTER_RESET_NOT_REQUIRED);
4481 4449 }
4482 4450
4483 4451 /*
4484 4452 * mrsas_softintr - The Software ISR
4485 4453 * @param arg : HBA soft state
4486 4454 *
4487 4455 * called from high-level interrupt if hi-level interrupt are not there,
4488 4456 * otherwise triggered as a soft interrupt
4489 4457 */
4490 4458 static uint_t
4491 4459 mrsas_softintr(struct mrsas_instance *instance)
4492 4460 {
4493 4461 struct scsi_pkt *pkt;
4494 4462 struct scsa_cmd *acmd;
4495 4463 struct mrsas_cmd *cmd;
4496 4464 struct mlist_head *pos, *next;
4497 4465 mlist_t process_list;
4498 4466 struct mrsas_header *hdr;
4499 4467 struct scsi_arq_status *arqstat;
4500 4468
4501 4469 con_log(CL_ANN1, (CE_NOTE, "mrsas_softintr() called."));
4502 4470
4503 4471 ASSERT(instance);
4504 4472
4505 4473 mutex_enter(&instance->completed_pool_mtx);
4506 4474
4507 4475 if (mlist_empty(&instance->completed_pool_list)) {
4508 4476 mutex_exit(&instance->completed_pool_mtx);
4509 4477 return (DDI_INTR_CLAIMED);
4510 4478 }
4511 4479
4512 4480 instance->softint_running = 1;
4513 4481
4514 4482 INIT_LIST_HEAD(&process_list);
4515 4483 mlist_splice(&instance->completed_pool_list, &process_list);
4516 4484 INIT_LIST_HEAD(&instance->completed_pool_list);
4517 4485
4518 4486 mutex_exit(&instance->completed_pool_mtx);
4519 4487
4520 4488 /* perform all callbacks first, before releasing the SCBs */
4521 4489 mlist_for_each_safe(pos, next, &process_list) {
4522 4490 cmd = mlist_entry(pos, struct mrsas_cmd, list);
4523 4491
4524 4492 /* syncronize the Cmd frame for the controller */
4525 4493 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle,
4526 4494 0, 0, DDI_DMA_SYNC_FORCPU);
4527 4495
4528 4496 if (mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) !=
4529 4497 DDI_SUCCESS) {
4530 4498 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
4531 4499 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4532 4500 con_log(CL_ANN1, (CE_WARN,
4533 4501 "mrsas_softintr: "
4534 4502 "FMA check reports DMA handle failure"));
4535 4503 return (DDI_INTR_CLAIMED);
4536 4504 }
4537 4505
4538 4506 hdr = &cmd->frame->hdr;
4539 4507
4540 4508 /* remove the internal command from the process list */
4541 4509 mlist_del_init(&cmd->list);
4542 4510
4543 4511 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) {
4544 4512 case MFI_CMD_OP_PD_SCSI:
4545 4513 case MFI_CMD_OP_LD_SCSI:
4546 4514 case MFI_CMD_OP_LD_READ:
4547 4515 case MFI_CMD_OP_LD_WRITE:
4548 4516 /*
4549 4517 * MFI_CMD_OP_PD_SCSI and MFI_CMD_OP_LD_SCSI
4550 4518 * could have been issued either through an
4551 4519 * IO path or an IOCTL path. If it was via IOCTL,
4552 4520 * we will send it to internal completion.
4553 4521 */
4554 4522 if (cmd->sync_cmd == MRSAS_TRUE) {
4555 4523 complete_cmd_in_sync_mode(instance, cmd);
4556 4524 break;
4557 4525 }
4558 4526
4559 4527 /* regular commands */
4560 4528 acmd = cmd->cmd;
4561 4529 pkt = CMD2PKT(acmd);
4562 4530
4563 4531 if (acmd->cmd_flags & CFLAG_DMAVALID) {
4564 4532 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
4565 4533 (void) ddi_dma_sync(acmd->cmd_dmahandle,
4566 4534 acmd->cmd_dma_offset,
4567 4535 acmd->cmd_dma_len,
4568 4536 DDI_DMA_SYNC_FORCPU);
4569 4537 }
4570 4538 }
4571 4539
4572 4540 pkt->pkt_reason = CMD_CMPLT;
4573 4541 pkt->pkt_statistics = 0;
4574 4542 pkt->pkt_state = STATE_GOT_BUS
4575 4543 | STATE_GOT_TARGET | STATE_SENT_CMD
4576 4544 | STATE_XFERRED_DATA | STATE_GOT_STATUS;
4577 4545
4578 4546 con_log(CL_ANN, (CE_CONT,
4579 4547 "CDB[0] = %x completed for %s: size %lx context %x",
4580 4548 pkt->pkt_cdbp[0], ((acmd->islogical) ? "LD" : "PD"),
4581 4549 acmd->cmd_dmacount, hdr->context));
4582 4550 DTRACE_PROBE3(softintr_cdb, uint8_t, pkt->pkt_cdbp[0],
4583 4551 uint_t, acmd->cmd_cdblen, ulong_t,
4584 4552 acmd->cmd_dmacount);
4585 4553
4586 4554 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
4587 4555 struct scsi_inquiry *inq;
4588 4556
4589 4557 if (acmd->cmd_dmacount != 0) {
4590 4558 bp_mapin(acmd->cmd_buf);
4591 4559 inq = (struct scsi_inquiry *)
4592 4560 acmd->cmd_buf->b_un.b_addr;
4593 4561
4594 4562 /* don't expose physical drives to OS */
4595 4563 if (acmd->islogical &&
4596 4564 (hdr->cmd_status == MFI_STAT_OK)) {
4597 4565 display_scsi_inquiry(
4598 4566 (caddr_t)inq);
4599 4567 } else if ((hdr->cmd_status ==
4600 4568 MFI_STAT_OK) && inq->inq_dtype ==
4601 4569 DTYPE_DIRECT) {
4602 4570
4603 4571 display_scsi_inquiry(
4604 4572 (caddr_t)inq);
4605 4573
4606 4574 /* for physical disk */
4607 4575 hdr->cmd_status =
4608 4576 MFI_STAT_DEVICE_NOT_FOUND;
4609 4577 }
4610 4578 }
4611 4579 }
4612 4580
4613 4581 DTRACE_PROBE2(softintr_done, uint8_t, hdr->cmd,
4614 4582 uint8_t, hdr->cmd_status);
4615 4583
4616 4584 switch (hdr->cmd_status) {
4617 4585 case MFI_STAT_OK:
4618 4586 pkt->pkt_scbp[0] = STATUS_GOOD;
4619 4587 break;
4620 4588 case MFI_STAT_LD_CC_IN_PROGRESS:
4621 4589 case MFI_STAT_LD_RECON_IN_PROGRESS:
4622 4590 pkt->pkt_scbp[0] = STATUS_GOOD;
4623 4591 break;
4624 4592 case MFI_STAT_LD_INIT_IN_PROGRESS:
4625 4593 con_log(CL_ANN,
4626 4594 (CE_WARN, "Initialization in Progress"));
4627 4595 pkt->pkt_reason = CMD_TRAN_ERR;
4628 4596
4629 4597 break;
4630 4598 case MFI_STAT_SCSI_DONE_WITH_ERROR:
4631 4599 con_log(CL_ANN, (CE_CONT, "scsi_done error"));
4632 4600
4633 4601 pkt->pkt_reason = CMD_CMPLT;
4634 4602 ((struct scsi_status *)
4635 4603 pkt->pkt_scbp)->sts_chk = 1;
4636 4604
4637 4605 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
4638 4606 con_log(CL_ANN,
4639 4607 (CE_WARN, "TEST_UNIT_READY fail"));
4640 4608 } else {
4641 4609 pkt->pkt_state |= STATE_ARQ_DONE;
4642 4610 arqstat = (void *)(pkt->pkt_scbp);
4643 4611 arqstat->sts_rqpkt_reason = CMD_CMPLT;
4644 4612 arqstat->sts_rqpkt_resid = 0;
4645 4613 arqstat->sts_rqpkt_state |=
4646 4614 STATE_GOT_BUS | STATE_GOT_TARGET
4647 4615 | STATE_SENT_CMD
4648 4616 | STATE_XFERRED_DATA;
4649 4617 *(uint8_t *)&arqstat->sts_rqpkt_status =
4650 4618 STATUS_GOOD;
4651 4619 ddi_rep_get8(
4652 4620 cmd->frame_dma_obj.acc_handle,
4653 4621 (uint8_t *)
4654 4622 &(arqstat->sts_sensedata),
4655 4623 cmd->sense,
4656 4624 sizeof (struct scsi_extended_sense),
4657 4625 DDI_DEV_AUTOINCR);
4658 4626 }
4659 4627 break;
4660 4628 case MFI_STAT_LD_OFFLINE:
4661 4629 case MFI_STAT_DEVICE_NOT_FOUND:
4662 4630 con_log(CL_ANN, (CE_CONT,
4663 4631 "mrsas_softintr:device not found error"));
4664 4632 pkt->pkt_reason = CMD_DEV_GONE;
4665 4633 pkt->pkt_statistics = STAT_DISCON;
4666 4634 break;
4667 4635 case MFI_STAT_LD_LBA_OUT_OF_RANGE:
4668 4636 pkt->pkt_state |= STATE_ARQ_DONE;
4669 4637 pkt->pkt_reason = CMD_CMPLT;
4670 4638 ((struct scsi_status *)
4671 4639 pkt->pkt_scbp)->sts_chk = 1;
4672 4640
4673 4641 arqstat = (void *)(pkt->pkt_scbp);
4674 4642 arqstat->sts_rqpkt_reason = CMD_CMPLT;
4675 4643 arqstat->sts_rqpkt_resid = 0;
4676 4644 arqstat->sts_rqpkt_state |= STATE_GOT_BUS
4677 4645 | STATE_GOT_TARGET | STATE_SENT_CMD
4678 4646 | STATE_XFERRED_DATA;
4679 4647 *(uint8_t *)&arqstat->sts_rqpkt_status =
4680 4648 STATUS_GOOD;
4681 4649
4682 4650 arqstat->sts_sensedata.es_valid = 1;
4683 4651 arqstat->sts_sensedata.es_key =
4684 4652 KEY_ILLEGAL_REQUEST;
4685 4653 arqstat->sts_sensedata.es_class =
4686 4654 CLASS_EXTENDED_SENSE;
4687 4655
4688 4656 /*
4689 4657 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
4690 4658 * ASC: 0x21h; ASCQ: 0x00h;
4691 4659 */
4692 4660 arqstat->sts_sensedata.es_add_code = 0x21;
4693 4661 arqstat->sts_sensedata.es_qual_code = 0x00;
4694 4662
4695 4663 break;
4696 4664
4697 4665 default:
4698 4666 con_log(CL_ANN, (CE_CONT, "Unknown status!"));
4699 4667 pkt->pkt_reason = CMD_TRAN_ERR;
4700 4668
4701 4669 break;
4702 4670 }
4703 4671
4704 4672 atomic_add_16(&instance->fw_outstanding, (-1));
4705 4673
4706 4674 (void) mrsas_common_check(instance, cmd);
4707 4675
4708 4676 if (acmd->cmd_dmahandle) {
4709 4677 if (mrsas_check_dma_handle(
4710 4678 acmd->cmd_dmahandle) != DDI_SUCCESS) {
4711 4679 ddi_fm_service_impact(instance->dip,
4712 4680 DDI_SERVICE_UNAFFECTED);
4713 4681 pkt->pkt_reason = CMD_TRAN_ERR;
4714 4682 pkt->pkt_statistics = 0;
4715 4683 }
4716 4684 }
4717 4685
4718 4686 /* Call the callback routine */
4719 4687 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
4720 4688 pkt->pkt_comp) {
4721 4689
4722 4690 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_softintr: "
4723 4691 "posting to scsa cmd %p index %x pkt %p "
4724 4692 "time %llx", (void *)cmd, cmd->index,
4725 4693 (void *)pkt, gethrtime()));
4726 4694 (*pkt->pkt_comp)(pkt);
4727 4695
4728 4696 }
4729 4697
4730 4698 return_mfi_pkt(instance, cmd);
4731 4699 break;
4732 4700
4733 4701 case MFI_CMD_OP_SMP:
4734 4702 case MFI_CMD_OP_STP:
4735 4703 complete_cmd_in_sync_mode(instance, cmd);
4736 4704 break;
4737 4705
4738 4706 case MFI_CMD_OP_DCMD:
4739 4707 /* see if got an event notification */
4740 4708 if (ddi_get32(cmd->frame_dma_obj.acc_handle,
4741 4709 &cmd->frame->dcmd.opcode) ==
4742 4710 MR_DCMD_CTRL_EVENT_WAIT) {
4743 4711 if ((instance->aen_cmd == cmd) &&
4744 4712 (instance->aen_cmd->abort_aen)) {
4745 4713 con_log(CL_ANN, (CE_WARN,
4746 4714 "mrsas_softintr: "
4747 4715 "aborted_aen returned"));
4748 4716 } else {
4749 4717 atomic_add_16(&instance->fw_outstanding,
4750 4718 (-1));
4751 4719 service_mfi_aen(instance, cmd);
4752 4720 }
4753 4721 } else {
4754 4722 complete_cmd_in_sync_mode(instance, cmd);
4755 4723 }
4756 4724
4757 4725 break;
4758 4726
4759 4727 case MFI_CMD_OP_ABORT:
4760 4728 con_log(CL_ANN, (CE_NOTE, "MFI_CMD_OP_ABORT complete"));
4761 4729 /*
4762 4730 * MFI_CMD_OP_ABORT successfully completed
4763 4731 * in the synchronous mode
4764 4732 */
4765 4733 complete_cmd_in_sync_mode(instance, cmd);
4766 4734 break;
4767 4735
4768 4736 default:
4769 4737 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
4770 4738 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4771 4739
4772 4740 if (cmd->pkt != NULL) {
4773 4741 pkt = cmd->pkt;
4774 4742 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
4775 4743 pkt->pkt_comp) {
4776 4744
4777 4745 con_log(CL_ANN1, (CE_CONT, "posting to "
4778 4746 "scsa cmd %p index %x pkt %p"
4779 4747 "time %llx, default ", (void *)cmd,
4780 4748 cmd->index, (void *)pkt,
4781 4749 gethrtime()));
4782 4750
4783 4751 (*pkt->pkt_comp)(pkt);
4784 4752
4785 4753 }
4786 4754 }
4787 4755 con_log(CL_ANN, (CE_WARN, "Cmd type unknown !"));
4788 4756 break;
4789 4757 }
4790 4758 }
4791 4759
4792 4760 instance->softint_running = 0;
4793 4761
4794 4762 return (DDI_INTR_CLAIMED);
4795 4763 }
4796 4764
4797 4765 /*
4798 4766 * mrsas_alloc_dma_obj
4799 4767 *
4800 4768 * Allocate the memory and other resources for an dma object.
4801 4769 */
4802 4770 int
4803 4771 mrsas_alloc_dma_obj(struct mrsas_instance *instance, dma_obj_t *obj,
4804 4772 uchar_t endian_flags)
4805 4773 {
4806 4774 int i;
4807 4775 size_t alen = 0;
4808 4776 uint_t cookie_cnt;
4809 4777 struct ddi_device_acc_attr tmp_endian_attr;
4810 4778
4811 4779 tmp_endian_attr = endian_attr;
4812 4780 tmp_endian_attr.devacc_attr_endian_flags = endian_flags;
4813 4781 tmp_endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
4814 4782
4815 4783 i = ddi_dma_alloc_handle(instance->dip, &obj->dma_attr,
4816 4784 DDI_DMA_SLEEP, NULL, &obj->dma_handle);
4817 4785 if (i != DDI_SUCCESS) {
4818 4786
4819 4787 switch (i) {
4820 4788 case DDI_DMA_BADATTR :
4821 4789 con_log(CL_ANN, (CE_WARN,
4822 4790 "Failed ddi_dma_alloc_handle- Bad attribute"));
4823 4791 break;
4824 4792 case DDI_DMA_NORESOURCES :
4825 4793 con_log(CL_ANN, (CE_WARN,
4826 4794 "Failed ddi_dma_alloc_handle- No Resources"));
4827 4795 break;
4828 4796 default :
4829 4797 con_log(CL_ANN, (CE_WARN,
4830 4798 "Failed ddi_dma_alloc_handle: "
4831 4799 "unknown status %d", i));
4832 4800 break;
4833 4801 }
4834 4802
4835 4803 return (-1);
4836 4804 }
4837 4805
4838 4806 if ((ddi_dma_mem_alloc(obj->dma_handle, obj->size, &tmp_endian_attr,
4839 4807 DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
4840 4808 &obj->buffer, &alen, &obj->acc_handle) != DDI_SUCCESS) ||
4841 4809 alen < obj->size) {
4842 4810
4843 4811 ddi_dma_free_handle(&obj->dma_handle);
4844 4812
4845 4813 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc"));
4846 4814
4847 4815 return (-1);
4848 4816 }
4849 4817
4850 4818 if (ddi_dma_addr_bind_handle(obj->dma_handle, NULL, obj->buffer,
4851 4819 obj->size, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
4852 4820 NULL, &obj->dma_cookie[0], &cookie_cnt) != DDI_SUCCESS) {
4853 4821
4854 4822 ddi_dma_mem_free(&obj->acc_handle);
4855 4823 ddi_dma_free_handle(&obj->dma_handle);
4856 4824
4857 4825 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle"));
4858 4826
4859 4827 return (-1);
4860 4828 }
4861 4829
4862 4830 if (mrsas_check_dma_handle(obj->dma_handle) != DDI_SUCCESS) {
4863 4831 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4864 4832 return (-1);
4865 4833 }
4866 4834
4867 4835 if (mrsas_check_acc_handle(obj->acc_handle) != DDI_SUCCESS) {
4868 4836 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4869 4837 return (-1);
4870 4838 }
|
↓ open down ↓ |
837 lines elided |
↑ open up ↑ |
4871 4839
4872 4840 return (cookie_cnt);
4873 4841 }
4874 4842
4875 4843 /*
4876 4844 * mrsas_free_dma_obj(struct mrsas_instance *, dma_obj_t)
4877 4845 *
4878 4846 * De-allocate the memory and other resources for an dma object, which must
4879 4847 * have been alloated by a previous call to mrsas_alloc_dma_obj()
4880 4848 */
4881 -/* ARGSUSED */
4882 4849 int
4883 4850 mrsas_free_dma_obj(struct mrsas_instance *instance, dma_obj_t obj)
4884 4851 {
4885 4852
4886 4853 if ((obj.dma_handle == NULL) || (obj.acc_handle == NULL)) {
4887 4854 return (DDI_SUCCESS);
4888 4855 }
4889 4856
4890 4857 /*
4891 4858 * NOTE: These check-handle functions fail if *_handle == NULL, but
4892 4859 * this function succeeds because of the previous check.
4893 4860 */
4894 4861 if (mrsas_check_dma_handle(obj.dma_handle) != DDI_SUCCESS) {
4895 4862 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4896 4863 return (DDI_FAILURE);
4897 4864 }
4898 4865
4899 4866 if (mrsas_check_acc_handle(obj.acc_handle) != DDI_SUCCESS) {
4900 4867 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4901 4868 return (DDI_FAILURE);
4902 4869 }
4903 4870
4904 4871 (void) ddi_dma_unbind_handle(obj.dma_handle);
4905 4872 ddi_dma_mem_free(&obj.acc_handle);
4906 4873 ddi_dma_free_handle(&obj.dma_handle);
4907 4874 obj.acc_handle = NULL;
4908 4875 return (DDI_SUCCESS);
4909 4876 }
4910 4877
4911 4878 /*
4912 4879 * mrsas_dma_alloc(instance_t *, struct scsi_pkt *, struct buf *,
4913 4880 * int, int (*)())
4914 4881 *
4915 4882 * Allocate dma resources for a new scsi command
4916 4883 */
4917 4884 int
4918 4885 mrsas_dma_alloc(struct mrsas_instance *instance, struct scsi_pkt *pkt,
4919 4886 struct buf *bp, int flags, int (*callback)())
4920 4887 {
4921 4888 int dma_flags;
4922 4889 int (*cb)(caddr_t);
4923 4890 int i;
4924 4891
4925 4892 ddi_dma_attr_t tmp_dma_attr = mrsas_generic_dma_attr;
4926 4893 struct scsa_cmd *acmd = PKT2CMD(pkt);
4927 4894
4928 4895 acmd->cmd_buf = bp;
4929 4896
4930 4897 if (bp->b_flags & B_READ) {
4931 4898 acmd->cmd_flags &= ~CFLAG_DMASEND;
4932 4899 dma_flags = DDI_DMA_READ;
4933 4900 } else {
4934 4901 acmd->cmd_flags |= CFLAG_DMASEND;
4935 4902 dma_flags = DDI_DMA_WRITE;
4936 4903 }
4937 4904
4938 4905 if (flags & PKT_CONSISTENT) {
4939 4906 acmd->cmd_flags |= CFLAG_CONSISTENT;
4940 4907 dma_flags |= DDI_DMA_CONSISTENT;
4941 4908 }
4942 4909
4943 4910 if (flags & PKT_DMA_PARTIAL) {
4944 4911 dma_flags |= DDI_DMA_PARTIAL;
4945 4912 }
4946 4913
4947 4914 dma_flags |= DDI_DMA_REDZONE;
4948 4915
4949 4916 cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
4950 4917
4951 4918 tmp_dma_attr.dma_attr_sgllen = instance->max_num_sge;
4952 4919 tmp_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull;
4953 4920 if (instance->tbolt) {
4954 4921 /* OCR-RESET FIX */
4955 4922 tmp_dma_attr.dma_attr_count_max =
4956 4923 (U64)mrsas_tbolt_max_cap_maxxfer; /* limit to 256K */
4957 4924 tmp_dma_attr.dma_attr_maxxfer =
4958 4925 (U64)mrsas_tbolt_max_cap_maxxfer; /* limit to 256K */
4959 4926 }
4960 4927
4961 4928 if ((i = ddi_dma_alloc_handle(instance->dip, &tmp_dma_attr,
4962 4929 cb, 0, &acmd->cmd_dmahandle)) != DDI_SUCCESS) {
4963 4930 switch (i) {
4964 4931 case DDI_DMA_BADATTR:
4965 4932 bioerror(bp, EFAULT);
4966 4933 return (DDI_FAILURE);
4967 4934
4968 4935 case DDI_DMA_NORESOURCES:
4969 4936 bioerror(bp, 0);
4970 4937 return (DDI_FAILURE);
4971 4938
4972 4939 default:
4973 4940 con_log(CL_ANN, (CE_PANIC, "ddi_dma_alloc_handle: "
4974 4941 "impossible result (0x%x)", i));
4975 4942 bioerror(bp, EFAULT);
4976 4943 return (DDI_FAILURE);
4977 4944 }
4978 4945 }
4979 4946
4980 4947 i = ddi_dma_buf_bind_handle(acmd->cmd_dmahandle, bp, dma_flags,
4981 4948 cb, 0, &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies);
4982 4949
4983 4950 switch (i) {
4984 4951 case DDI_DMA_PARTIAL_MAP:
4985 4952 if ((dma_flags & DDI_DMA_PARTIAL) == 0) {
4986 4953 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
4987 4954 "DDI_DMA_PARTIAL_MAP impossible"));
4988 4955 goto no_dma_cookies;
4989 4956 }
4990 4957
4991 4958 if (ddi_dma_numwin(acmd->cmd_dmahandle, &acmd->cmd_nwin) ==
4992 4959 DDI_FAILURE) {
4993 4960 con_log(CL_ANN, (CE_PANIC, "ddi_dma_numwin failed"));
4994 4961 goto no_dma_cookies;
4995 4962 }
4996 4963
4997 4964 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
4998 4965 &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
4999 4966 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
5000 4967 DDI_FAILURE) {
5001 4968
5002 4969 con_log(CL_ANN, (CE_PANIC, "ddi_dma_getwin failed"));
5003 4970 goto no_dma_cookies;
5004 4971 }
5005 4972
5006 4973 goto get_dma_cookies;
5007 4974 case DDI_DMA_MAPPED:
5008 4975 acmd->cmd_nwin = 1;
5009 4976 acmd->cmd_dma_len = 0;
5010 4977 acmd->cmd_dma_offset = 0;
5011 4978
5012 4979 get_dma_cookies:
5013 4980 i = 0;
5014 4981 acmd->cmd_dmacount = 0;
5015 4982 for (;;) {
5016 4983 acmd->cmd_dmacount +=
5017 4984 acmd->cmd_dmacookies[i++].dmac_size;
5018 4985
5019 4986 if (i == instance->max_num_sge ||
5020 4987 i == acmd->cmd_ncookies)
5021 4988 break;
5022 4989
5023 4990 ddi_dma_nextcookie(acmd->cmd_dmahandle,
5024 4991 &acmd->cmd_dmacookies[i]);
5025 4992 }
5026 4993
5027 4994 acmd->cmd_cookie = i;
5028 4995 acmd->cmd_cookiecnt = i;
5029 4996
5030 4997 acmd->cmd_flags |= CFLAG_DMAVALID;
5031 4998
5032 4999 if (bp->b_bcount >= acmd->cmd_dmacount) {
5033 5000 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
5034 5001 } else {
5035 5002 pkt->pkt_resid = 0;
5036 5003 }
5037 5004
5038 5005 return (DDI_SUCCESS);
5039 5006 case DDI_DMA_NORESOURCES:
5040 5007 bioerror(bp, 0);
5041 5008 break;
5042 5009 case DDI_DMA_NOMAPPING:
5043 5010 bioerror(bp, EFAULT);
5044 5011 break;
5045 5012 case DDI_DMA_TOOBIG:
5046 5013 bioerror(bp, EINVAL);
5047 5014 break;
5048 5015 case DDI_DMA_INUSE:
5049 5016 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle:"
5050 5017 " DDI_DMA_INUSE impossible"));
5051 5018 break;
5052 5019 default:
5053 5020 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
5054 5021 "impossible result (0x%x)", i));
5055 5022 break;
5056 5023 }
5057 5024
5058 5025 no_dma_cookies:
5059 5026 ddi_dma_free_handle(&acmd->cmd_dmahandle);
5060 5027 acmd->cmd_dmahandle = NULL;
5061 5028 acmd->cmd_flags &= ~CFLAG_DMAVALID;
5062 5029 return (DDI_FAILURE);
5063 5030 }
5064 5031
5065 5032 /*
5066 5033 * mrsas_dma_move(struct mrsas_instance *, struct scsi_pkt *, struct buf *)
5067 5034 *
5068 5035 * move dma resources to next dma window
5069 5036 *
5070 5037 */
5071 5038 int
5072 5039 mrsas_dma_move(struct mrsas_instance *instance, struct scsi_pkt *pkt,
5073 5040 struct buf *bp)
5074 5041 {
5075 5042 int i = 0;
5076 5043
5077 5044 struct scsa_cmd *acmd = PKT2CMD(pkt);
5078 5045
5079 5046 /*
5080 5047 * If there are no more cookies remaining in this window,
5081 5048 * must move to the next window first.
5082 5049 */
5083 5050 if (acmd->cmd_cookie == acmd->cmd_ncookies) {
5084 5051 if (acmd->cmd_curwin == acmd->cmd_nwin && acmd->cmd_nwin == 1) {
5085 5052 return (DDI_SUCCESS);
5086 5053 }
5087 5054
5088 5055 /* at last window, cannot move */
5089 5056 if (++acmd->cmd_curwin >= acmd->cmd_nwin) {
5090 5057 return (DDI_FAILURE);
5091 5058 }
5092 5059
5093 5060 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
5094 5061 &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
5095 5062 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
5096 5063 DDI_FAILURE) {
5097 5064 return (DDI_FAILURE);
5098 5065 }
5099 5066
5100 5067 acmd->cmd_cookie = 0;
5101 5068 } else {
5102 5069 /* still more cookies in this window - get the next one */
5103 5070 ddi_dma_nextcookie(acmd->cmd_dmahandle,
5104 5071 &acmd->cmd_dmacookies[0]);
5105 5072 }
5106 5073
5107 5074 /* get remaining cookies in this window, up to our maximum */
5108 5075 for (;;) {
5109 5076 acmd->cmd_dmacount += acmd->cmd_dmacookies[i++].dmac_size;
5110 5077 acmd->cmd_cookie++;
5111 5078
5112 5079 if (i == instance->max_num_sge ||
5113 5080 acmd->cmd_cookie == acmd->cmd_ncookies) {
5114 5081 break;
5115 5082 }
5116 5083
5117 5084 ddi_dma_nextcookie(acmd->cmd_dmahandle,
5118 5085 &acmd->cmd_dmacookies[i]);
5119 5086 }
5120 5087
5121 5088 acmd->cmd_cookiecnt = i;
5122 5089
5123 5090 if (bp->b_bcount >= acmd->cmd_dmacount) {
5124 5091 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
5125 5092 } else {
5126 5093 pkt->pkt_resid = 0;
5127 5094 }
5128 5095
5129 5096 return (DDI_SUCCESS);
5130 5097 }
5131 5098
5132 5099 /*
5133 5100 * build_cmd
5134 5101 */
5135 5102 static struct mrsas_cmd *
5136 5103 build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
5137 5104 struct scsi_pkt *pkt, uchar_t *cmd_done)
5138 5105 {
5139 5106 uint16_t flags = 0;
5140 5107 uint32_t i;
5141 5108 uint32_t context;
5142 5109 uint32_t sge_bytes;
5143 5110 uint32_t tmp_data_xfer_len;
5144 5111 ddi_acc_handle_t acc_handle;
5145 5112 struct mrsas_cmd *cmd;
5146 5113 struct mrsas_sge64 *mfi_sgl;
5147 5114 struct mrsas_sge_ieee *mfi_sgl_ieee;
5148 5115 struct scsa_cmd *acmd = PKT2CMD(pkt);
5149 5116 struct mrsas_pthru_frame *pthru;
5150 5117 struct mrsas_io_frame *ldio;
5151 5118
5152 5119 /* find out if this is logical or physical drive command. */
5153 5120 acmd->islogical = MRDRV_IS_LOGICAL(ap);
5154 5121 acmd->device_id = MAP_DEVICE_ID(instance, ap);
5155 5122 *cmd_done = 0;
5156 5123
5157 5124 /* get the command packet */
5158 5125 if (!(cmd = get_mfi_pkt(instance))) {
5159 5126 DTRACE_PROBE2(build_cmd_mfi_err, uint16_t,
5160 5127 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
5161 5128 return (NULL);
5162 5129 }
5163 5130
5164 5131 acc_handle = cmd->frame_dma_obj.acc_handle;
5165 5132
5166 5133 /* Clear the frame buffer and assign back the context id */
5167 5134 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
5168 5135 ddi_put32(acc_handle, &cmd->frame->hdr.context, cmd->index);
5169 5136
5170 5137 cmd->pkt = pkt;
5171 5138 cmd->cmd = acmd;
5172 5139 DTRACE_PROBE3(build_cmds, uint8_t, pkt->pkt_cdbp[0],
5173 5140 ulong_t, acmd->cmd_dmacount, ulong_t, acmd->cmd_dma_len);
5174 5141
5175 5142 /* lets get the command directions */
5176 5143 if (acmd->cmd_flags & CFLAG_DMASEND) {
5177 5144 flags = MFI_FRAME_DIR_WRITE;
5178 5145
5179 5146 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
5180 5147 (void) ddi_dma_sync(acmd->cmd_dmahandle,
5181 5148 acmd->cmd_dma_offset, acmd->cmd_dma_len,
5182 5149 DDI_DMA_SYNC_FORDEV);
5183 5150 }
5184 5151 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
5185 5152 flags = MFI_FRAME_DIR_READ;
5186 5153
5187 5154 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
5188 5155 (void) ddi_dma_sync(acmd->cmd_dmahandle,
5189 5156 acmd->cmd_dma_offset, acmd->cmd_dma_len,
5190 5157 DDI_DMA_SYNC_FORCPU);
5191 5158 }
5192 5159 } else {
5193 5160 flags = MFI_FRAME_DIR_NONE;
5194 5161 }
5195 5162
5196 5163 if (instance->flag_ieee) {
5197 5164 flags |= MFI_FRAME_IEEE;
5198 5165 }
5199 5166 flags |= MFI_FRAME_SGL64;
5200 5167
5201 5168 switch (pkt->pkt_cdbp[0]) {
5202 5169
5203 5170 /*
5204 5171 * case SCMD_SYNCHRONIZE_CACHE:
5205 5172 * flush_cache(instance);
5206 5173 * return_mfi_pkt(instance, cmd);
5207 5174 * *cmd_done = 1;
5208 5175 *
5209 5176 * return (NULL);
5210 5177 */
5211 5178
5212 5179 case SCMD_READ:
5213 5180 case SCMD_WRITE:
5214 5181 case SCMD_READ_G1:
5215 5182 case SCMD_WRITE_G1:
5216 5183 case SCMD_READ_G4:
5217 5184 case SCMD_WRITE_G4:
5218 5185 case SCMD_READ_G5:
5219 5186 case SCMD_WRITE_G5:
5220 5187 if (acmd->islogical) {
5221 5188 ldio = (struct mrsas_io_frame *)cmd->frame;
5222 5189
5223 5190 /*
5224 5191 * preare the Logical IO frame:
5225 5192 * 2nd bit is zero for all read cmds
5226 5193 */
5227 5194 ddi_put8(acc_handle, &ldio->cmd,
5228 5195 (pkt->pkt_cdbp[0] & 0x02) ? MFI_CMD_OP_LD_WRITE
5229 5196 : MFI_CMD_OP_LD_READ);
5230 5197 ddi_put8(acc_handle, &ldio->cmd_status, 0x0);
5231 5198 ddi_put8(acc_handle, &ldio->scsi_status, 0x0);
5232 5199 ddi_put8(acc_handle, &ldio->target_id, acmd->device_id);
5233 5200 ddi_put16(acc_handle, &ldio->timeout, 0);
5234 5201 ddi_put8(acc_handle, &ldio->reserved_0, 0);
5235 5202 ddi_put16(acc_handle, &ldio->pad_0, 0);
5236 5203 ddi_put16(acc_handle, &ldio->flags, flags);
5237 5204
5238 5205 /* Initialize sense Information */
5239 5206 bzero(cmd->sense, SENSE_LENGTH);
5240 5207 ddi_put8(acc_handle, &ldio->sense_len, SENSE_LENGTH);
5241 5208 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_hi, 0);
5242 5209 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_lo,
5243 5210 cmd->sense_phys_addr);
5244 5211 ddi_put32(acc_handle, &ldio->start_lba_hi, 0);
5245 5212 ddi_put8(acc_handle, &ldio->access_byte,
5246 5213 (acmd->cmd_cdblen != 6) ? pkt->pkt_cdbp[1] : 0);
5247 5214 ddi_put8(acc_handle, &ldio->sge_count,
5248 5215 acmd->cmd_cookiecnt);
5249 5216 if (instance->flag_ieee) {
5250 5217 mfi_sgl_ieee =
5251 5218 (struct mrsas_sge_ieee *)&ldio->sgl;
5252 5219 } else {
5253 5220 mfi_sgl = (struct mrsas_sge64 *)&ldio->sgl;
5254 5221 }
5255 5222
5256 5223 context = ddi_get32(acc_handle, &ldio->context);
5257 5224
5258 5225 if (acmd->cmd_cdblen == CDB_GROUP0) {
5259 5226 /* 6-byte cdb */
5260 5227 ddi_put32(acc_handle, &ldio->lba_count, (
5261 5228 (uint16_t)(pkt->pkt_cdbp[4])));
5262 5229
5263 5230 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5264 5231 ((uint32_t)(pkt->pkt_cdbp[3])) |
5265 5232 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
5266 5233 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
5267 5234 << 16)));
5268 5235 } else if (acmd->cmd_cdblen == CDB_GROUP1) {
5269 5236 /* 10-byte cdb */
5270 5237 ddi_put32(acc_handle, &ldio->lba_count, (
5271 5238 ((uint16_t)(pkt->pkt_cdbp[8])) |
5272 5239 ((uint16_t)(pkt->pkt_cdbp[7]) << 8)));
5273 5240
5274 5241 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5275 5242 ((uint32_t)(pkt->pkt_cdbp[5])) |
5276 5243 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5277 5244 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5278 5245 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5279 5246 } else if (acmd->cmd_cdblen == CDB_GROUP5) {
5280 5247 /* 12-byte cdb */
5281 5248 ddi_put32(acc_handle, &ldio->lba_count, (
5282 5249 ((uint32_t)(pkt->pkt_cdbp[9])) |
5283 5250 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
5284 5251 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
5285 5252 ((uint32_t)(pkt->pkt_cdbp[6]) << 24)));
5286 5253
5287 5254 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5288 5255 ((uint32_t)(pkt->pkt_cdbp[5])) |
5289 5256 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5290 5257 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5291 5258 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5292 5259 } else if (acmd->cmd_cdblen == CDB_GROUP4) {
5293 5260 /* 16-byte cdb */
5294 5261 ddi_put32(acc_handle, &ldio->lba_count, (
5295 5262 ((uint32_t)(pkt->pkt_cdbp[13])) |
5296 5263 ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
5297 5264 ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
5298 5265 ((uint32_t)(pkt->pkt_cdbp[10]) << 24)));
5299 5266
5300 5267 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5301 5268 ((uint32_t)(pkt->pkt_cdbp[9])) |
5302 5269 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
5303 5270 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
5304 5271 ((uint32_t)(pkt->pkt_cdbp[6]) << 24)));
5305 5272
5306 5273 ddi_put32(acc_handle, &ldio->start_lba_hi, (
5307 5274 ((uint32_t)(pkt->pkt_cdbp[5])) |
5308 5275 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5309 5276 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5310 5277 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5311 5278 }
5312 5279
5313 5280 break;
5314 5281 }
5315 5282 /* fall through For all non-rd/wr cmds */
5316 5283 default:
5317 5284
5318 5285 switch (pkt->pkt_cdbp[0]) {
5319 5286 case SCMD_MODE_SENSE:
5320 5287 case SCMD_MODE_SENSE_G1: {
5321 5288 union scsi_cdb *cdbp;
5322 5289 uint16_t page_code;
5323 5290
5324 5291 cdbp = (void *)pkt->pkt_cdbp;
5325 5292 page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
5326 5293 switch (page_code) {
5327 5294 case 0x3:
5328 5295 case 0x4:
5329 5296 (void) mrsas_mode_sense_build(pkt);
5330 5297 return_mfi_pkt(instance, cmd);
5331 5298 *cmd_done = 1;
5332 5299 return (NULL);
5333 5300 }
5334 5301 break;
5335 5302 }
5336 5303 default:
5337 5304 break;
5338 5305 }
5339 5306
5340 5307 pthru = (struct mrsas_pthru_frame *)cmd->frame;
5341 5308
5342 5309 /* prepare the DCDB frame */
5343 5310 ddi_put8(acc_handle, &pthru->cmd, (acmd->islogical) ?
5344 5311 MFI_CMD_OP_LD_SCSI : MFI_CMD_OP_PD_SCSI);
5345 5312 ddi_put8(acc_handle, &pthru->cmd_status, 0x0);
5346 5313 ddi_put8(acc_handle, &pthru->scsi_status, 0x0);
5347 5314 ddi_put8(acc_handle, &pthru->target_id, acmd->device_id);
5348 5315 ddi_put8(acc_handle, &pthru->lun, 0);
5349 5316 ddi_put8(acc_handle, &pthru->cdb_len, acmd->cmd_cdblen);
5350 5317 ddi_put16(acc_handle, &pthru->timeout, 0);
5351 5318 ddi_put16(acc_handle, &pthru->flags, flags);
5352 5319 tmp_data_xfer_len = 0;
5353 5320 for (i = 0; i < acmd->cmd_cookiecnt; i++) {
5354 5321 tmp_data_xfer_len += acmd->cmd_dmacookies[i].dmac_size;
5355 5322 }
5356 5323 ddi_put32(acc_handle, &pthru->data_xfer_len,
5357 5324 tmp_data_xfer_len);
5358 5325 ddi_put8(acc_handle, &pthru->sge_count, acmd->cmd_cookiecnt);
5359 5326 if (instance->flag_ieee) {
5360 5327 mfi_sgl_ieee = (struct mrsas_sge_ieee *)&pthru->sgl;
5361 5328 } else {
5362 5329 mfi_sgl = (struct mrsas_sge64 *)&pthru->sgl;
5363 5330 }
5364 5331
5365 5332 bzero(cmd->sense, SENSE_LENGTH);
5366 5333 ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH);
5367 5334 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0);
5368 5335 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo,
5369 5336 cmd->sense_phys_addr);
5370 5337
5371 5338 context = ddi_get32(acc_handle, &pthru->context);
5372 5339 ddi_rep_put8(acc_handle, (uint8_t *)pkt->pkt_cdbp,
5373 5340 (uint8_t *)pthru->cdb, acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
5374 5341
5375 5342 break;
5376 5343 }
5377 5344 #ifdef lint
5378 5345 context = context;
5379 5346 #endif
5380 5347 /* prepare the scatter-gather list for the firmware */
5381 5348 if (instance->flag_ieee) {
5382 5349 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl_ieee++) {
5383 5350 ddi_put64(acc_handle, &mfi_sgl_ieee->phys_addr,
5384 5351 acmd->cmd_dmacookies[i].dmac_laddress);
5385 5352 ddi_put32(acc_handle, &mfi_sgl_ieee->length,
5386 5353 acmd->cmd_dmacookies[i].dmac_size);
5387 5354 }
5388 5355 sge_bytes = sizeof (struct mrsas_sge_ieee)*acmd->cmd_cookiecnt;
5389 5356 } else {
5390 5357 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl++) {
5391 5358 ddi_put64(acc_handle, &mfi_sgl->phys_addr,
5392 5359 acmd->cmd_dmacookies[i].dmac_laddress);
5393 5360 ddi_put32(acc_handle, &mfi_sgl->length,
5394 5361 acmd->cmd_dmacookies[i].dmac_size);
5395 5362 }
5396 5363 sge_bytes = sizeof (struct mrsas_sge64)*acmd->cmd_cookiecnt;
5397 5364 }
5398 5365
5399 5366 cmd->frame_count = (sge_bytes / MRMFI_FRAME_SIZE) +
5400 5367 ((sge_bytes % MRMFI_FRAME_SIZE) ? 1 : 0) + 1;
5401 5368
5402 5369 if (cmd->frame_count >= 8) {
5403 5370 cmd->frame_count = 8;
5404 5371 }
5405 5372
5406 5373 return (cmd);
5407 5374 }
5408 5375
5409 5376 #ifndef __sparc
5410 5377 /*
5411 5378 * wait_for_outstanding - Wait for all outstanding cmds
5412 5379 * @instance: Adapter soft state
5413 5380 *
5414 5381 * This function waits for upto MRDRV_RESET_WAIT_TIME seconds for FW to
5415 5382 * complete all its outstanding commands. Returns error if one or more IOs
5416 5383 * are pending after this time period.
5417 5384 */
5418 5385 static int
5419 5386 wait_for_outstanding(struct mrsas_instance *instance)
5420 5387 {
5421 5388 int i;
5422 5389 uint32_t wait_time = 90;
5423 5390
5424 5391 for (i = 0; i < wait_time; i++) {
5425 5392 if (!instance->fw_outstanding) {
5426 5393 break;
5427 5394 }
5428 5395
5429 5396 drv_usecwait(MILLISEC); /* wait for 1000 usecs */;
5430 5397 }
5431 5398
5432 5399 if (instance->fw_outstanding) {
5433 5400 return (1);
5434 5401 }
5435 5402
5436 5403 return (0);
5437 5404 }
5438 5405 #endif /* __sparc */
5439 5406
5440 5407 /*
5441 5408 * issue_mfi_pthru
5442 5409 */
5443 5410 static int
5444 5411 issue_mfi_pthru(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5445 5412 struct mrsas_cmd *cmd, int mode)
5446 5413 {
5447 5414 void *ubuf;
5448 5415 uint32_t kphys_addr = 0;
5449 5416 uint32_t xferlen = 0;
5450 5417 uint32_t new_xfer_length = 0;
5451 5418 uint_t model;
5452 5419 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5453 5420 dma_obj_t pthru_dma_obj;
5454 5421 struct mrsas_pthru_frame *kpthru;
5455 5422 struct mrsas_pthru_frame *pthru;
5456 5423 int i;
5457 5424 pthru = &cmd->frame->pthru;
5458 5425 kpthru = (struct mrsas_pthru_frame *)&ioctl->frame[0];
5459 5426
5460 5427 if (instance->adapterresetinprogress) {
5461 5428 con_log(CL_ANN1, (CE_WARN, "issue_mfi_pthru: Reset flag set, "
5462 5429 "returning mfi_pkt and setting TRAN_BUSY\n"));
5463 5430 return (DDI_FAILURE);
5464 5431 }
5465 5432 model = ddi_model_convert_from(mode & FMODELS);
5466 5433 if (model == DDI_MODEL_ILP32) {
5467 5434 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP32"));
5468 5435
5469 5436 xferlen = kpthru->sgl.sge32[0].length;
5470 5437
5471 5438 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
5472 5439 } else {
5473 5440 #ifdef _ILP32
5474 5441 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP32"));
5475 5442 xferlen = kpthru->sgl.sge32[0].length;
5476 5443 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
5477 5444 #else
5478 5445 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP64"));
5479 5446 xferlen = kpthru->sgl.sge64[0].length;
5480 5447 ubuf = (void *)(ulong_t)kpthru->sgl.sge64[0].phys_addr;
5481 5448 #endif
5482 5449 }
5483 5450
5484 5451 if (xferlen) {
5485 5452 /* means IOCTL requires DMA */
5486 5453 /* allocate the data transfer buffer */
5487 5454 /* pthru_dma_obj.size = xferlen; */
5488 5455 MRSAS_GET_BOUNDARY_ALIGNED_LEN(xferlen, new_xfer_length,
5489 5456 PAGESIZE);
5490 5457 pthru_dma_obj.size = new_xfer_length;
5491 5458 pthru_dma_obj.dma_attr = mrsas_generic_dma_attr;
5492 5459 pthru_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5493 5460 pthru_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5494 5461 pthru_dma_obj.dma_attr.dma_attr_sgllen = 1;
5495 5462 pthru_dma_obj.dma_attr.dma_attr_align = 1;
5496 5463
5497 5464 /* allocate kernel buffer for DMA */
5498 5465 if (mrsas_alloc_dma_obj(instance, &pthru_dma_obj,
5499 5466 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5500 5467 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: "
5501 5468 "could not allocate data transfer buffer."));
5502 5469 return (DDI_FAILURE);
5503 5470 }
5504 5471 (void) memset(pthru_dma_obj.buffer, 0, xferlen);
5505 5472
5506 5473 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5507 5474 if (kpthru->flags & MFI_FRAME_DIR_WRITE) {
5508 5475 for (i = 0; i < xferlen; i++) {
5509 5476 if (ddi_copyin((uint8_t *)ubuf+i,
5510 5477 (uint8_t *)pthru_dma_obj.buffer+i,
5511 5478 1, mode)) {
5512 5479 con_log(CL_ANN, (CE_WARN,
5513 5480 "issue_mfi_pthru : "
5514 5481 "copy from user space failed"));
5515 5482 return (DDI_FAILURE);
5516 5483 }
5517 5484 }
5518 5485 }
5519 5486
5520 5487 kphys_addr = pthru_dma_obj.dma_cookie[0].dmac_address;
5521 5488 }
5522 5489
5523 5490 ddi_put8(acc_handle, &pthru->cmd, kpthru->cmd);
5524 5491 ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH);
5525 5492 ddi_put8(acc_handle, &pthru->cmd_status, 0);
5526 5493 ddi_put8(acc_handle, &pthru->scsi_status, 0);
5527 5494 ddi_put8(acc_handle, &pthru->target_id, kpthru->target_id);
5528 5495 ddi_put8(acc_handle, &pthru->lun, kpthru->lun);
5529 5496 ddi_put8(acc_handle, &pthru->cdb_len, kpthru->cdb_len);
5530 5497 ddi_put8(acc_handle, &pthru->sge_count, kpthru->sge_count);
5531 5498 ddi_put16(acc_handle, &pthru->timeout, kpthru->timeout);
5532 5499 ddi_put32(acc_handle, &pthru->data_xfer_len, kpthru->data_xfer_len);
5533 5500
5534 5501 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0);
5535 5502 pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
5536 5503 /* ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo, 0); */
5537 5504
5538 5505 ddi_rep_put8(acc_handle, (uint8_t *)kpthru->cdb, (uint8_t *)pthru->cdb,
5539 5506 pthru->cdb_len, DDI_DEV_AUTOINCR);
5540 5507
5541 5508 ddi_put16(acc_handle, &pthru->flags, kpthru->flags & ~MFI_FRAME_SGL64);
5542 5509 ddi_put32(acc_handle, &pthru->sgl.sge32[0].length, xferlen);
5543 5510 ddi_put32(acc_handle, &pthru->sgl.sge32[0].phys_addr, kphys_addr);
5544 5511
5545 5512 cmd->sync_cmd = MRSAS_TRUE;
5546 5513 cmd->frame_count = 1;
5547 5514
5548 5515 if (instance->tbolt) {
5549 5516 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5550 5517 }
5551 5518
5552 5519 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5553 5520 con_log(CL_ANN, (CE_WARN,
5554 5521 "issue_mfi_pthru: fw_ioctl failed"));
5555 5522 } else {
5556 5523 if (xferlen && kpthru->flags & MFI_FRAME_DIR_READ) {
5557 5524 for (i = 0; i < xferlen; i++) {
5558 5525 if (ddi_copyout(
5559 5526 (uint8_t *)pthru_dma_obj.buffer+i,
5560 5527 (uint8_t *)ubuf+i, 1, mode)) {
5561 5528 con_log(CL_ANN, (CE_WARN,
5562 5529 "issue_mfi_pthru : "
5563 5530 "copy to user space failed"));
5564 5531 return (DDI_FAILURE);
5565 5532 }
5566 5533 }
5567 5534 }
5568 5535 }
5569 5536
5570 5537 kpthru->cmd_status = ddi_get8(acc_handle, &pthru->cmd_status);
5571 5538 kpthru->scsi_status = ddi_get8(acc_handle, &pthru->scsi_status);
5572 5539
5573 5540 con_log(CL_ANN, (CE_CONT, "issue_mfi_pthru: cmd_status %x, "
5574 5541 "scsi_status %x", kpthru->cmd_status, kpthru->scsi_status));
5575 5542 DTRACE_PROBE3(issue_pthru, uint8_t, kpthru->cmd, uint8_t,
5576 5543 kpthru->cmd_status, uint8_t, kpthru->scsi_status);
5577 5544
5578 5545 if (kpthru->sense_len) {
5579 5546 uint_t sense_len = SENSE_LENGTH;
5580 5547 void *sense_ubuf =
5581 5548 (void *)(ulong_t)kpthru->sense_buf_phys_addr_lo;
5582 5549 if (kpthru->sense_len <= SENSE_LENGTH) {
5583 5550 sense_len = kpthru->sense_len;
5584 5551 }
|
↓ open down ↓ |
693 lines elided |
↑ open up ↑ |
5585 5552
5586 5553 for (i = 0; i < sense_len; i++) {
5587 5554 if (ddi_copyout(
5588 5555 (uint8_t *)cmd->sense+i,
5589 5556 (uint8_t *)sense_ubuf+i, 1, mode)) {
5590 5557 con_log(CL_ANN, (CE_WARN,
5591 5558 "issue_mfi_pthru : "
5592 5559 "copy to user space failed"));
5593 5560 }
5594 5561 con_log(CL_DLEVEL1, (CE_WARN,
5595 - "Copying Sense info sense_buff[%d] = 0x%X\n",
5562 + "Copying Sense info sense_buff[%d] = 0x%X",
5596 5563 i, *((uint8_t *)cmd->sense + i)));
5597 5564 }
5598 5565 }
5599 5566 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
5600 5567 DDI_DMA_SYNC_FORDEV);
5601 5568
5602 5569 if (xferlen) {
5603 5570 /* free kernel buffer */
5604 5571 if (mrsas_free_dma_obj(instance, pthru_dma_obj) != DDI_SUCCESS)
5605 5572 return (DDI_FAILURE);
5606 5573 }
5607 5574
5608 5575 return (DDI_SUCCESS);
5609 5576 }
5610 5577
5611 5578 /*
5612 5579 * issue_mfi_dcmd
5613 5580 */
5614 5581 static int
5615 5582 issue_mfi_dcmd(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5616 5583 struct mrsas_cmd *cmd, int mode)
5617 5584 {
5618 5585 void *ubuf;
5619 5586 uint32_t kphys_addr = 0;
5620 5587 uint32_t xferlen = 0;
5621 5588 uint32_t new_xfer_length = 0;
|
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
5622 5589 uint32_t model;
5623 5590 dma_obj_t dcmd_dma_obj;
5624 5591 struct mrsas_dcmd_frame *kdcmd;
5625 5592 struct mrsas_dcmd_frame *dcmd;
5626 5593 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5627 5594 int i;
5628 5595 dcmd = &cmd->frame->dcmd;
5629 5596 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0];
5630 5597
5631 5598 if (instance->adapterresetinprogress) {
5632 - con_log(CL_ANN1, (CE_WARN, "Reset flag set, "
5633 - "returning mfi_pkt and setting TRAN_BUSY\n"));
5599 + con_log(CL_ANN1, (CE_NOTE, "Reset flag set, "
5600 + "returning mfi_pkt and setting TRAN_BUSY"));
5634 5601 return (DDI_FAILURE);
5635 5602 }
5636 5603 model = ddi_model_convert_from(mode & FMODELS);
5637 5604 if (model == DDI_MODEL_ILP32) {
5638 5605 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
5639 5606
5640 5607 xferlen = kdcmd->sgl.sge32[0].length;
5641 5608
5642 5609 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
5643 5610 } else {
5644 5611 #ifdef _ILP32
5645 5612 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
5646 5613 xferlen = kdcmd->sgl.sge32[0].length;
5647 5614 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
5648 5615 #else
5649 5616 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_LP64"));
5650 5617 xferlen = kdcmd->sgl.sge64[0].length;
5651 5618 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
5652 5619 #endif
5653 5620 }
5654 5621 if (xferlen) {
5655 5622 /* means IOCTL requires DMA */
5656 5623 /* allocate the data transfer buffer */
5657 5624 /* dcmd_dma_obj.size = xferlen; */
5658 5625 MRSAS_GET_BOUNDARY_ALIGNED_LEN(xferlen, new_xfer_length,
5659 5626 PAGESIZE);
5660 5627 dcmd_dma_obj.size = new_xfer_length;
5661 5628 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
5662 5629 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5663 5630 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5664 5631 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
5665 5632 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
5666 5633
5667 5634 /* allocate kernel buffer for DMA */
5668 5635 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
5669 5636 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5670 5637 con_log(CL_ANN,
5671 5638 (CE_WARN, "issue_mfi_dcmd: could not "
5672 5639 "allocate data transfer buffer."));
5673 5640 return (DDI_FAILURE);
5674 5641 }
5675 5642 (void) memset(dcmd_dma_obj.buffer, 0, xferlen);
5676 5643
5677 5644 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5678 5645 if (kdcmd->flags & MFI_FRAME_DIR_WRITE) {
5679 5646 for (i = 0; i < xferlen; i++) {
5680 5647 if (ddi_copyin((uint8_t *)ubuf + i,
5681 5648 (uint8_t *)dcmd_dma_obj.buffer + i,
5682 5649 1, mode)) {
5683 5650 con_log(CL_ANN, (CE_WARN,
5684 5651 "issue_mfi_dcmd : "
5685 5652 "copy from user space failed"));
5686 5653 return (DDI_FAILURE);
5687 5654 }
5688 5655 }
5689 5656 }
5690 5657
5691 5658 kphys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address;
5692 5659 }
5693 5660
5694 5661 ddi_put8(acc_handle, &dcmd->cmd, kdcmd->cmd);
5695 5662 ddi_put8(acc_handle, &dcmd->cmd_status, 0);
5696 5663 ddi_put8(acc_handle, &dcmd->sge_count, kdcmd->sge_count);
5697 5664 ddi_put16(acc_handle, &dcmd->timeout, kdcmd->timeout);
5698 5665 ddi_put32(acc_handle, &dcmd->data_xfer_len, kdcmd->data_xfer_len);
5699 5666 ddi_put32(acc_handle, &dcmd->opcode, kdcmd->opcode);
5700 5667
5701 5668 ddi_rep_put8(acc_handle, (uint8_t *)kdcmd->mbox.b,
5702 5669 (uint8_t *)dcmd->mbox.b, DCMD_MBOX_SZ, DDI_DEV_AUTOINCR);
5703 5670
5704 5671 ddi_put16(acc_handle, &dcmd->flags, kdcmd->flags & ~MFI_FRAME_SGL64);
5705 5672 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].length, xferlen);
5706 5673 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].phys_addr, kphys_addr);
5707 5674
5708 5675 cmd->sync_cmd = MRSAS_TRUE;
5709 5676 cmd->frame_count = 1;
5710 5677
5711 5678 if (instance->tbolt) {
5712 5679 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5713 5680 }
5714 5681
5715 5682 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5716 5683 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: fw_ioctl failed"));
5717 5684 } else {
5718 5685 if (xferlen && (kdcmd->flags & MFI_FRAME_DIR_READ)) {
5719 5686 for (i = 0; i < xferlen; i++) {
5720 5687 if (ddi_copyout(
5721 5688 (uint8_t *)dcmd_dma_obj.buffer + i,
5722 5689 (uint8_t *)ubuf + i,
5723 5690 1, mode)) {
5724 5691 con_log(CL_ANN, (CE_WARN,
5725 5692 "issue_mfi_dcmd : "
5726 5693 "copy to user space failed"));
5727 5694 return (DDI_FAILURE);
5728 5695 }
5729 5696 }
5730 5697 }
5731 5698 }
5732 5699
5733 5700 kdcmd->cmd_status = ddi_get8(acc_handle, &dcmd->cmd_status);
5734 5701 con_log(CL_ANN,
5735 5702 (CE_CONT, "issue_mfi_dcmd: cmd_status %x", kdcmd->cmd_status));
5736 5703 DTRACE_PROBE3(issue_dcmd, uint32_t, kdcmd->opcode, uint8_t,
5737 5704 kdcmd->cmd, uint8_t, kdcmd->cmd_status);
5738 5705
5739 5706 if (xferlen) {
5740 5707 /* free kernel buffer */
5741 5708 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
5742 5709 return (DDI_FAILURE);
5743 5710 }
5744 5711
5745 5712 return (DDI_SUCCESS);
5746 5713 }
5747 5714
5748 5715 /*
5749 5716 * issue_mfi_smp
5750 5717 */
5751 5718 static int
5752 5719 issue_mfi_smp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5753 5720 struct mrsas_cmd *cmd, int mode)
5754 5721 {
5755 5722 void *request_ubuf;
5756 5723 void *response_ubuf;
5757 5724 uint32_t request_xferlen = 0;
5758 5725 uint32_t response_xferlen = 0;
5759 5726 uint32_t new_xfer_length1 = 0;
5760 5727 uint32_t new_xfer_length2 = 0;
5761 5728 uint_t model;
5762 5729 dma_obj_t request_dma_obj;
5763 5730 dma_obj_t response_dma_obj;
5764 5731 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5765 5732 struct mrsas_smp_frame *ksmp;
5766 5733 struct mrsas_smp_frame *smp;
5767 5734 struct mrsas_sge32 *sge32;
5768 5735 #ifndef _ILP32
5769 5736 struct mrsas_sge64 *sge64;
5770 5737 #endif
5771 5738 int i;
5772 5739 uint64_t tmp_sas_addr;
5773 5740
5774 5741 smp = &cmd->frame->smp;
5775 5742 ksmp = (struct mrsas_smp_frame *)&ioctl->frame[0];
5776 5743
5777 5744 if (instance->adapterresetinprogress) {
5778 5745 con_log(CL_ANN1, (CE_WARN, "Reset flag set, "
5779 5746 "returning mfi_pkt and setting TRAN_BUSY\n"));
5780 5747 return (DDI_FAILURE);
5781 5748 }
5782 5749 model = ddi_model_convert_from(mode & FMODELS);
5783 5750 if (model == DDI_MODEL_ILP32) {
5784 5751 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_ILP32"));
5785 5752
5786 5753 sge32 = &ksmp->sgl[0].sge32[0];
5787 5754 response_xferlen = sge32[0].length;
5788 5755 request_xferlen = sge32[1].length;
5789 5756 con_log(CL_ANN, (CE_CONT, "issue_mfi_smp: "
5790 5757 "response_xferlen = %x, request_xferlen = %x",
5791 5758 response_xferlen, request_xferlen));
5792 5759
5793 5760 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr;
5794 5761 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr;
5795 5762 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: "
5796 5763 "response_ubuf = %p, request_ubuf = %p",
5797 5764 response_ubuf, request_ubuf));
5798 5765 } else {
5799 5766 #ifdef _ILP32
5800 5767 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_ILP32"));
5801 5768
5802 5769 sge32 = &ksmp->sgl[0].sge32[0];
5803 5770 response_xferlen = sge32[0].length;
5804 5771 request_xferlen = sge32[1].length;
5805 5772 con_log(CL_ANN, (CE_CONT, "issue_mfi_smp: "
5806 5773 "response_xferlen = %x, request_xferlen = %x",
5807 5774 response_xferlen, request_xferlen));
5808 5775
5809 5776 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr;
5810 5777 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr;
5811 5778 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: "
5812 5779 "response_ubuf = %p, request_ubuf = %p",
5813 5780 response_ubuf, request_ubuf));
5814 5781 #else
5815 5782 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_LP64"));
5816 5783
5817 5784 sge64 = &ksmp->sgl[0].sge64[0];
5818 5785 response_xferlen = sge64[0].length;
5819 5786 request_xferlen = sge64[1].length;
5820 5787
5821 5788 response_ubuf = (void *)(ulong_t)sge64[0].phys_addr;
5822 5789 request_ubuf = (void *)(ulong_t)sge64[1].phys_addr;
5823 5790 #endif
5824 5791 }
5825 5792 if (request_xferlen) {
5826 5793 /* means IOCTL requires DMA */
5827 5794 /* allocate the data transfer buffer */
5828 5795 /* request_dma_obj.size = request_xferlen; */
5829 5796 MRSAS_GET_BOUNDARY_ALIGNED_LEN(request_xferlen,
5830 5797 new_xfer_length1, PAGESIZE);
5831 5798 request_dma_obj.size = new_xfer_length1;
5832 5799 request_dma_obj.dma_attr = mrsas_generic_dma_attr;
5833 5800 request_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5834 5801 request_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5835 5802 request_dma_obj.dma_attr.dma_attr_sgllen = 1;
5836 5803 request_dma_obj.dma_attr.dma_attr_align = 1;
5837 5804
5838 5805 /* allocate kernel buffer for DMA */
5839 5806 if (mrsas_alloc_dma_obj(instance, &request_dma_obj,
5840 5807 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5841 5808 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5842 5809 "could not allocate data transfer buffer."));
5843 5810 return (DDI_FAILURE);
5844 5811 }
5845 5812 (void) memset(request_dma_obj.buffer, 0, request_xferlen);
5846 5813
5847 5814 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5848 5815 for (i = 0; i < request_xferlen; i++) {
5849 5816 if (ddi_copyin((uint8_t *)request_ubuf + i,
5850 5817 (uint8_t *)request_dma_obj.buffer + i,
5851 5818 1, mode)) {
5852 5819 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5853 5820 "copy from user space failed"));
5854 5821 return (DDI_FAILURE);
5855 5822 }
5856 5823 }
5857 5824 }
5858 5825
5859 5826 if (response_xferlen) {
5860 5827 /* means IOCTL requires DMA */
5861 5828 /* allocate the data transfer buffer */
5862 5829 /* response_dma_obj.size = response_xferlen; */
5863 5830 MRSAS_GET_BOUNDARY_ALIGNED_LEN(response_xferlen,
5864 5831 new_xfer_length2, PAGESIZE);
5865 5832 response_dma_obj.size = new_xfer_length2;
5866 5833 response_dma_obj.dma_attr = mrsas_generic_dma_attr;
5867 5834 response_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5868 5835 response_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5869 5836 response_dma_obj.dma_attr.dma_attr_sgllen = 1;
5870 5837 response_dma_obj.dma_attr.dma_attr_align = 1;
5871 5838
5872 5839 /* allocate kernel buffer for DMA */
5873 5840 if (mrsas_alloc_dma_obj(instance, &response_dma_obj,
5874 5841 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5875 5842 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5876 5843 "could not allocate data transfer buffer."));
5877 5844 return (DDI_FAILURE);
5878 5845 }
5879 5846 (void) memset(response_dma_obj.buffer, 0, response_xferlen);
5880 5847
5881 5848 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5882 5849 for (i = 0; i < response_xferlen; i++) {
5883 5850 if (ddi_copyin((uint8_t *)response_ubuf + i,
5884 5851 (uint8_t *)response_dma_obj.buffer + i,
5885 5852 1, mode)) {
5886 5853 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5887 5854 "copy from user space failed"));
5888 5855 return (DDI_FAILURE);
5889 5856 }
5890 5857 }
5891 5858 }
5892 5859
5893 5860 ddi_put8(acc_handle, &smp->cmd, ksmp->cmd);
5894 5861 ddi_put8(acc_handle, &smp->cmd_status, 0);
5895 5862 ddi_put8(acc_handle, &smp->connection_status, 0);
5896 5863 ddi_put8(acc_handle, &smp->sge_count, ksmp->sge_count);
5897 5864 /* smp->context = ksmp->context; */
5898 5865 ddi_put16(acc_handle, &smp->timeout, ksmp->timeout);
5899 5866 ddi_put32(acc_handle, &smp->data_xfer_len, ksmp->data_xfer_len);
5900 5867
5901 5868 bcopy((void *)&ksmp->sas_addr, (void *)&tmp_sas_addr,
5902 5869 sizeof (uint64_t));
5903 5870 ddi_put64(acc_handle, &smp->sas_addr, tmp_sas_addr);
5904 5871
5905 5872 ddi_put16(acc_handle, &smp->flags, ksmp->flags & ~MFI_FRAME_SGL64);
5906 5873
5907 5874 model = ddi_model_convert_from(mode & FMODELS);
5908 5875 if (model == DDI_MODEL_ILP32) {
5909 5876 con_log(CL_ANN1, (CE_CONT,
5910 5877 "issue_mfi_smp: DDI_MODEL_ILP32"));
5911 5878
5912 5879 sge32 = &smp->sgl[0].sge32[0];
5913 5880 ddi_put32(acc_handle, &sge32[0].length, response_xferlen);
5914 5881 ddi_put32(acc_handle, &sge32[0].phys_addr,
5915 5882 response_dma_obj.dma_cookie[0].dmac_address);
5916 5883 ddi_put32(acc_handle, &sge32[1].length, request_xferlen);
5917 5884 ddi_put32(acc_handle, &sge32[1].phys_addr,
5918 5885 request_dma_obj.dma_cookie[0].dmac_address);
5919 5886 } else {
5920 5887 #ifdef _ILP32
5921 5888 con_log(CL_ANN1, (CE_CONT,
5922 5889 "issue_mfi_smp: DDI_MODEL_ILP32"));
5923 5890 sge32 = &smp->sgl[0].sge32[0];
5924 5891 ddi_put32(acc_handle, &sge32[0].length, response_xferlen);
5925 5892 ddi_put32(acc_handle, &sge32[0].phys_addr,
5926 5893 response_dma_obj.dma_cookie[0].dmac_address);
5927 5894 ddi_put32(acc_handle, &sge32[1].length, request_xferlen);
5928 5895 ddi_put32(acc_handle, &sge32[1].phys_addr,
5929 5896 request_dma_obj.dma_cookie[0].dmac_address);
5930 5897 #else
5931 5898 con_log(CL_ANN1, (CE_CONT,
5932 5899 "issue_mfi_smp: DDI_MODEL_LP64"));
5933 5900 sge64 = &smp->sgl[0].sge64[0];
5934 5901 ddi_put32(acc_handle, &sge64[0].length, response_xferlen);
5935 5902 ddi_put64(acc_handle, &sge64[0].phys_addr,
5936 5903 response_dma_obj.dma_cookie[0].dmac_address);
5937 5904 ddi_put32(acc_handle, &sge64[1].length, request_xferlen);
5938 5905 ddi_put64(acc_handle, &sge64[1].phys_addr,
5939 5906 request_dma_obj.dma_cookie[0].dmac_address);
5940 5907 #endif
5941 5908 }
5942 5909 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp : "
5943 5910 "smp->response_xferlen = %d, smp->request_xferlen = %d "
5944 5911 "smp->data_xfer_len = %d", ddi_get32(acc_handle, &sge32[0].length),
5945 5912 ddi_get32(acc_handle, &sge32[1].length),
5946 5913 ddi_get32(acc_handle, &smp->data_xfer_len)));
5947 5914
5948 5915 cmd->sync_cmd = MRSAS_TRUE;
5949 5916 cmd->frame_count = 1;
5950 5917
5951 5918 if (instance->tbolt) {
5952 5919 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5953 5920 }
5954 5921
5955 5922 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5956 5923 con_log(CL_ANN, (CE_WARN,
5957 5924 "issue_mfi_smp: fw_ioctl failed"));
5958 5925 } else {
5959 5926 con_log(CL_ANN1, (CE_CONT,
5960 5927 "issue_mfi_smp: copy to user space"));
5961 5928
5962 5929 if (request_xferlen) {
5963 5930 for (i = 0; i < request_xferlen; i++) {
5964 5931 if (ddi_copyout(
5965 5932 (uint8_t *)request_dma_obj.buffer +
5966 5933 i, (uint8_t *)request_ubuf + i,
5967 5934 1, mode)) {
5968 5935 con_log(CL_ANN, (CE_WARN,
5969 5936 "issue_mfi_smp : copy to user space"
5970 5937 " failed"));
5971 5938 return (DDI_FAILURE);
5972 5939 }
5973 5940 }
5974 5941 }
5975 5942
5976 5943 if (response_xferlen) {
5977 5944 for (i = 0; i < response_xferlen; i++) {
5978 5945 if (ddi_copyout(
5979 5946 (uint8_t *)response_dma_obj.buffer
5980 5947 + i, (uint8_t *)response_ubuf
5981 5948 + i, 1, mode)) {
5982 5949 con_log(CL_ANN, (CE_WARN,
5983 5950 "issue_mfi_smp : copy to "
5984 5951 "user space failed"));
5985 5952 return (DDI_FAILURE);
5986 5953 }
5987 5954 }
5988 5955 }
5989 5956 }
5990 5957
5991 5958 ksmp->cmd_status = ddi_get8(acc_handle, &smp->cmd_status);
5992 5959 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: smp->cmd_status = %d",
5993 5960 ksmp->cmd_status));
5994 5961 DTRACE_PROBE2(issue_smp, uint8_t, ksmp->cmd, uint8_t, ksmp->cmd_status);
5995 5962
5996 5963 if (request_xferlen) {
5997 5964 /* free kernel buffer */
5998 5965 if (mrsas_free_dma_obj(instance, request_dma_obj) !=
5999 5966 DDI_SUCCESS)
6000 5967 return (DDI_FAILURE);
6001 5968 }
6002 5969
6003 5970 if (response_xferlen) {
6004 5971 /* free kernel buffer */
6005 5972 if (mrsas_free_dma_obj(instance, response_dma_obj) !=
6006 5973 DDI_SUCCESS)
6007 5974 return (DDI_FAILURE);
6008 5975 }
6009 5976
6010 5977 return (DDI_SUCCESS);
6011 5978 }
6012 5979
6013 5980 /*
6014 5981 * issue_mfi_stp
6015 5982 */
6016 5983 static int
6017 5984 issue_mfi_stp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
6018 5985 struct mrsas_cmd *cmd, int mode)
6019 5986 {
6020 5987 void *fis_ubuf;
6021 5988 void *data_ubuf;
6022 5989 uint32_t fis_xferlen = 0;
6023 5990 uint32_t new_xfer_length1 = 0;
6024 5991 uint32_t new_xfer_length2 = 0;
6025 5992 uint32_t data_xferlen = 0;
6026 5993 uint_t model;
6027 5994 dma_obj_t fis_dma_obj;
6028 5995 dma_obj_t data_dma_obj;
6029 5996 struct mrsas_stp_frame *kstp;
6030 5997 struct mrsas_stp_frame *stp;
6031 5998 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
6032 5999 int i;
6033 6000
6034 6001 stp = &cmd->frame->stp;
6035 6002 kstp = (struct mrsas_stp_frame *)&ioctl->frame[0];
6036 6003
6037 6004 if (instance->adapterresetinprogress) {
6038 6005 con_log(CL_ANN1, (CE_WARN, "Reset flag set, "
6039 6006 "returning mfi_pkt and setting TRAN_BUSY\n"));
6040 6007 return (DDI_FAILURE);
6041 6008 }
6042 6009 model = ddi_model_convert_from(mode & FMODELS);
6043 6010 if (model == DDI_MODEL_ILP32) {
6044 6011 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_ILP32"));
6045 6012
6046 6013 fis_xferlen = kstp->sgl.sge32[0].length;
6047 6014 data_xferlen = kstp->sgl.sge32[1].length;
6048 6015
6049 6016 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
6050 6017 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
6051 6018 } else {
6052 6019 #ifdef _ILP32
6053 6020 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_ILP32"));
6054 6021
6055 6022 fis_xferlen = kstp->sgl.sge32[0].length;
6056 6023 data_xferlen = kstp->sgl.sge32[1].length;
6057 6024
6058 6025 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
6059 6026 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
6060 6027 #else
6061 6028 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_LP64"));
6062 6029
6063 6030 fis_xferlen = kstp->sgl.sge64[0].length;
6064 6031 data_xferlen = kstp->sgl.sge64[1].length;
6065 6032
6066 6033 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge64[0].phys_addr;
6067 6034 data_ubuf = (void *)(ulong_t)kstp->sgl.sge64[1].phys_addr;
6068 6035 #endif
6069 6036 }
6070 6037
6071 6038
6072 6039 if (fis_xferlen) {
6073 6040 con_log(CL_ANN, (CE_CONT, "issue_mfi_stp: "
6074 6041 "fis_ubuf = %p fis_xferlen = %x", fis_ubuf, fis_xferlen));
6075 6042
6076 6043 /* means IOCTL requires DMA */
6077 6044 /* allocate the data transfer buffer */
6078 6045 /* fis_dma_obj.size = fis_xferlen; */
6079 6046 MRSAS_GET_BOUNDARY_ALIGNED_LEN(fis_xferlen,
6080 6047 new_xfer_length1, PAGESIZE);
6081 6048 fis_dma_obj.size = new_xfer_length1;
6082 6049 fis_dma_obj.dma_attr = mrsas_generic_dma_attr;
6083 6050 fis_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
6084 6051 fis_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
6085 6052 fis_dma_obj.dma_attr.dma_attr_sgllen = 1;
6086 6053 fis_dma_obj.dma_attr.dma_attr_align = 1;
6087 6054
6088 6055 /* allocate kernel buffer for DMA */
6089 6056 if (mrsas_alloc_dma_obj(instance, &fis_dma_obj,
6090 6057 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
6091 6058 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp : "
6092 6059 "could not allocate data transfer buffer."));
6093 6060 return (DDI_FAILURE);
6094 6061 }
6095 6062 (void) memset(fis_dma_obj.buffer, 0, fis_xferlen);
6096 6063
6097 6064 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
6098 6065 for (i = 0; i < fis_xferlen; i++) {
6099 6066 if (ddi_copyin((uint8_t *)fis_ubuf + i,
6100 6067 (uint8_t *)fis_dma_obj.buffer + i, 1, mode)) {
6101 6068 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
6102 6069 "copy from user space failed"));
6103 6070 return (DDI_FAILURE);
6104 6071 }
6105 6072 }
6106 6073 }
6107 6074
6108 6075 if (data_xferlen) {
6109 6076 con_log(CL_ANN, (CE_CONT, "issue_mfi_stp: data_ubuf = %p "
6110 6077 "data_xferlen = %x", data_ubuf, data_xferlen));
6111 6078
6112 6079 /* means IOCTL requires DMA */
6113 6080 /* allocate the data transfer buffer */
6114 6081 /* data_dma_obj.size = data_xferlen; */
6115 6082 MRSAS_GET_BOUNDARY_ALIGNED_LEN(data_xferlen, new_xfer_length2,
6116 6083 PAGESIZE);
6117 6084 data_dma_obj.size = new_xfer_length2;
6118 6085 data_dma_obj.dma_attr = mrsas_generic_dma_attr;
6119 6086 data_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
6120 6087 data_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
6121 6088 data_dma_obj.dma_attr.dma_attr_sgllen = 1;
6122 6089 data_dma_obj.dma_attr.dma_attr_align = 1;
6123 6090
6124 6091 /* allocate kernel buffer for DMA */
6125 6092 if (mrsas_alloc_dma_obj(instance, &data_dma_obj,
6126 6093 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
6127 6094 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
6128 6095 "could not allocate data transfer buffer."));
6129 6096 return (DDI_FAILURE);
6130 6097 }
6131 6098 (void) memset(data_dma_obj.buffer, 0, data_xferlen);
6132 6099
6133 6100 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
6134 6101 for (i = 0; i < data_xferlen; i++) {
6135 6102 if (ddi_copyin((uint8_t *)data_ubuf + i,
6136 6103 (uint8_t *)data_dma_obj.buffer + i, 1, mode)) {
6137 6104 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
6138 6105 "copy from user space failed"));
6139 6106 return (DDI_FAILURE);
6140 6107 }
6141 6108 }
6142 6109 }
6143 6110
6144 6111 ddi_put8(acc_handle, &stp->cmd, kstp->cmd);
6145 6112 ddi_put8(acc_handle, &stp->cmd_status, 0);
6146 6113 ddi_put8(acc_handle, &stp->connection_status, 0);
6147 6114 ddi_put8(acc_handle, &stp->target_id, kstp->target_id);
6148 6115 ddi_put8(acc_handle, &stp->sge_count, kstp->sge_count);
6149 6116
6150 6117 ddi_put16(acc_handle, &stp->timeout, kstp->timeout);
6151 6118 ddi_put32(acc_handle, &stp->data_xfer_len, kstp->data_xfer_len);
6152 6119
6153 6120 ddi_rep_put8(acc_handle, (uint8_t *)kstp->fis, (uint8_t *)stp->fis, 10,
6154 6121 DDI_DEV_AUTOINCR);
6155 6122
6156 6123 ddi_put16(acc_handle, &stp->flags, kstp->flags & ~MFI_FRAME_SGL64);
6157 6124 ddi_put32(acc_handle, &stp->stp_flags, kstp->stp_flags);
6158 6125 ddi_put32(acc_handle, &stp->sgl.sge32[0].length, fis_xferlen);
6159 6126 ddi_put32(acc_handle, &stp->sgl.sge32[0].phys_addr,
6160 6127 fis_dma_obj.dma_cookie[0].dmac_address);
6161 6128 ddi_put32(acc_handle, &stp->sgl.sge32[1].length, data_xferlen);
6162 6129 ddi_put32(acc_handle, &stp->sgl.sge32[1].phys_addr,
6163 6130 data_dma_obj.dma_cookie[0].dmac_address);
6164 6131
6165 6132 cmd->sync_cmd = MRSAS_TRUE;
6166 6133 cmd->frame_count = 1;
6167 6134
6168 6135 if (instance->tbolt) {
6169 6136 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
6170 6137 }
6171 6138
6172 6139 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
6173 6140 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: fw_ioctl failed"));
6174 6141 } else {
6175 6142
6176 6143 if (fis_xferlen) {
6177 6144 for (i = 0; i < fis_xferlen; i++) {
6178 6145 if (ddi_copyout(
6179 6146 (uint8_t *)fis_dma_obj.buffer + i,
6180 6147 (uint8_t *)fis_ubuf + i, 1, mode)) {
6181 6148 con_log(CL_ANN, (CE_WARN,
6182 6149 "issue_mfi_stp : copy to "
6183 6150 "user space failed"));
6184 6151 return (DDI_FAILURE);
6185 6152 }
6186 6153 }
6187 6154 }
6188 6155 }
6189 6156 if (data_xferlen) {
6190 6157 for (i = 0; i < data_xferlen; i++) {
6191 6158 if (ddi_copyout(
6192 6159 (uint8_t *)data_dma_obj.buffer + i,
6193 6160 (uint8_t *)data_ubuf + i, 1, mode)) {
6194 6161 con_log(CL_ANN, (CE_WARN,
6195 6162 "issue_mfi_stp : copy to"
6196 6163 " user space failed"));
6197 6164 return (DDI_FAILURE);
6198 6165 }
6199 6166 }
6200 6167 }
6201 6168
6202 6169 kstp->cmd_status = ddi_get8(acc_handle, &stp->cmd_status);
6203 6170 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: stp->cmd_status = %d",
6204 6171 kstp->cmd_status));
6205 6172 DTRACE_PROBE2(issue_stp, uint8_t, kstp->cmd, uint8_t, kstp->cmd_status);
6206 6173
6207 6174 if (fis_xferlen) {
6208 6175 /* free kernel buffer */
6209 6176 if (mrsas_free_dma_obj(instance, fis_dma_obj) != DDI_SUCCESS)
6210 6177 return (DDI_FAILURE);
6211 6178 }
6212 6179
6213 6180 if (data_xferlen) {
6214 6181 /* free kernel buffer */
6215 6182 if (mrsas_free_dma_obj(instance, data_dma_obj) != DDI_SUCCESS)
6216 6183 return (DDI_FAILURE);
6217 6184 }
6218 6185
6219 6186 return (DDI_SUCCESS);
6220 6187 }
6221 6188
6222 6189 /*
6223 6190 * fill_up_drv_ver
6224 6191 */
6225 6192 void
6226 6193 fill_up_drv_ver(struct mrsas_drv_ver *dv)
6227 6194 {
6228 6195 (void) memset(dv, 0, sizeof (struct mrsas_drv_ver));
6229 6196
6230 6197 (void) memcpy(dv->signature, "$LSI LOGIC$", strlen("$LSI LOGIC$"));
6231 6198 (void) memcpy(dv->os_name, "Solaris", strlen("Solaris"));
6232 6199 (void) memcpy(dv->drv_name, "mr_sas", strlen("mr_sas"));
6233 6200 (void) memcpy(dv->drv_ver, MRSAS_VERSION, strlen(MRSAS_VERSION));
6234 6201 (void) memcpy(dv->drv_rel_date, MRSAS_RELDATE,
6235 6202 strlen(MRSAS_RELDATE));
6236 6203
6237 6204 }
6238 6205
6239 6206 /*
6240 6207 * handle_drv_ioctl
6241 6208 */
6242 6209 static int
6243 6210 handle_drv_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
6244 6211 int mode)
6245 6212 {
6246 6213 int i;
6247 6214 int rval = DDI_SUCCESS;
6248 6215 int *props = NULL;
6249 6216 void *ubuf;
6250 6217
6251 6218 uint8_t *pci_conf_buf;
6252 6219 uint32_t xferlen;
6253 6220 uint32_t num_props;
6254 6221 uint_t model;
6255 6222 struct mrsas_dcmd_frame *kdcmd;
6256 6223 struct mrsas_drv_ver dv;
6257 6224 struct mrsas_pci_information pi;
6258 6225
6259 6226 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0];
6260 6227
6261 6228 model = ddi_model_convert_from(mode & FMODELS);
6262 6229 if (model == DDI_MODEL_ILP32) {
6263 6230 con_log(CL_ANN1, (CE_CONT,
6264 6231 "handle_drv_ioctl: DDI_MODEL_ILP32"));
6265 6232
6266 6233 xferlen = kdcmd->sgl.sge32[0].length;
6267 6234
6268 6235 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
6269 6236 } else {
6270 6237 #ifdef _ILP32
6271 6238 con_log(CL_ANN1, (CE_CONT,
6272 6239 "handle_drv_ioctl: DDI_MODEL_ILP32"));
6273 6240 xferlen = kdcmd->sgl.sge32[0].length;
6274 6241 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
6275 6242 #else
6276 6243 con_log(CL_ANN1, (CE_CONT,
6277 6244 "handle_drv_ioctl: DDI_MODEL_LP64"));
6278 6245 xferlen = kdcmd->sgl.sge64[0].length;
6279 6246 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
6280 6247 #endif
6281 6248 }
6282 6249 con_log(CL_ANN1, (CE_CONT, "handle_drv_ioctl: "
6283 6250 "dataBuf=%p size=%d bytes", ubuf, xferlen));
6284 6251
6285 6252 switch (kdcmd->opcode) {
6286 6253 case MRSAS_DRIVER_IOCTL_DRIVER_VERSION:
6287 6254 con_log(CL_ANN1, (CE_CONT, "handle_drv_ioctl: "
6288 6255 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION"));
6289 6256
6290 6257 fill_up_drv_ver(&dv);
6291 6258
6292 6259 if (ddi_copyout(&dv, ubuf, xferlen, mode)) {
6293 6260 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6294 6261 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION : "
6295 6262 "copy to user space failed"));
6296 6263 kdcmd->cmd_status = 1;
6297 6264 rval = 1;
6298 6265 } else {
6299 6266 kdcmd->cmd_status = 0;
6300 6267 }
6301 6268 break;
6302 6269 case MRSAS_DRIVER_IOCTL_PCI_INFORMATION:
6303 6270 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: "
6304 6271 "MRSAS_DRIVER_IOCTL_PCI_INFORMAITON"));
6305 6272
6306 6273 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, instance->dip,
6307 6274 0, "reg", &props, &num_props)) {
6308 6275 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6309 6276 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : "
6310 6277 "ddi_prop_look_int_array failed"));
6311 6278 rval = DDI_FAILURE;
6312 6279 } else {
6313 6280
6314 6281 pi.busNumber = (props[0] >> 16) & 0xFF;
6315 6282 pi.deviceNumber = (props[0] >> 11) & 0x1f;
6316 6283 pi.functionNumber = (props[0] >> 8) & 0x7;
6317 6284 ddi_prop_free((void *)props);
6318 6285 }
6319 6286
6320 6287 pci_conf_buf = (uint8_t *)&pi.pciHeaderInfo;
6321 6288
6322 6289 for (i = 0; i < (sizeof (struct mrsas_pci_information) -
6323 6290 offsetof(struct mrsas_pci_information, pciHeaderInfo));
6324 6291 i++) {
6325 6292 pci_conf_buf[i] =
6326 6293 pci_config_get8(instance->pci_handle, i);
6327 6294 }
6328 6295
6329 6296 if (ddi_copyout(&pi, ubuf, xferlen, mode)) {
6330 6297 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6331 6298 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : "
6332 6299 "copy to user space failed"));
6333 6300 kdcmd->cmd_status = 1;
6334 6301 rval = 1;
6335 6302 } else {
6336 6303 kdcmd->cmd_status = 0;
6337 6304 }
6338 6305 break;
6339 6306 default:
6340 6307 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6341 6308 "invalid driver specific IOCTL opcode = 0x%x",
6342 6309 kdcmd->opcode));
6343 6310 kdcmd->cmd_status = 1;
6344 6311 rval = DDI_FAILURE;
6345 6312 break;
6346 6313 }
6347 6314
6348 6315 return (rval);
6349 6316 }
6350 6317
6351 6318 /*
6352 6319 * handle_mfi_ioctl
6353 6320 */
6354 6321 static int
6355 6322 handle_mfi_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
6356 6323 int mode)
6357 6324 {
6358 6325 int rval = DDI_SUCCESS;
6359 6326
6360 6327 struct mrsas_header *hdr;
6361 6328 struct mrsas_cmd *cmd;
6362 6329
6363 6330 if (instance->tbolt) {
6364 6331 cmd = get_raid_msg_mfi_pkt(instance);
6365 6332 } else {
6366 6333 cmd = get_mfi_pkt(instance);
6367 6334 }
6368 6335 if (!cmd) {
6369 6336 con_log(CL_ANN, (CE_WARN, "mr_sas: "
6370 6337 "failed to get a cmd packet"));
6371 6338 DTRACE_PROBE2(mfi_ioctl_err, uint16_t,
6372 6339 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
6373 6340 return (DDI_FAILURE);
6374 6341 }
6375 6342
6376 6343 /* Clear the frame buffer and assign back the context id */
6377 6344 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
6378 6345 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
6379 6346 cmd->index);
6380 6347
6381 6348 hdr = (struct mrsas_header *)&ioctl->frame[0];
6382 6349
6383 6350 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) {
6384 6351 case MFI_CMD_OP_DCMD:
6385 6352 rval = issue_mfi_dcmd(instance, ioctl, cmd, mode);
6386 6353 break;
6387 6354 case MFI_CMD_OP_SMP:
6388 6355 rval = issue_mfi_smp(instance, ioctl, cmd, mode);
6389 6356 break;
6390 6357 case MFI_CMD_OP_STP:
6391 6358 rval = issue_mfi_stp(instance, ioctl, cmd, mode);
6392 6359 break;
6393 6360 case MFI_CMD_OP_LD_SCSI:
6394 6361 case MFI_CMD_OP_PD_SCSI:
6395 6362 rval = issue_mfi_pthru(instance, ioctl, cmd, mode);
6396 6363 break;
6397 6364 default:
6398 6365 con_log(CL_ANN, (CE_WARN, "handle_mfi_ioctl: "
6399 6366 "invalid mfi ioctl hdr->cmd = %d", hdr->cmd));
6400 6367 rval = DDI_FAILURE;
6401 6368 break;
6402 6369 }
6403 6370
6404 6371 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS)
6405 6372 rval = DDI_FAILURE;
6406 6373
6407 6374 if (instance->tbolt) {
6408 6375 return_raid_msg_mfi_pkt(instance, cmd);
6409 6376 } else {
6410 6377 return_mfi_pkt(instance, cmd);
6411 6378 }
6412 6379
6413 6380 return (rval);
6414 6381 }
6415 6382
6416 6383 /*
6417 6384 * AEN
6418 6385 */
6419 6386 static int
6420 6387 handle_mfi_aen(struct mrsas_instance *instance, struct mrsas_aen *aen)
6421 6388 {
6422 6389 int rval = 0;
6423 6390
6424 6391 rval = register_mfi_aen(instance, instance->aen_seq_num,
6425 6392 aen->class_locale_word);
6426 6393
6427 6394 aen->cmd_status = (uint8_t)rval;
6428 6395
6429 6396 return (rval);
6430 6397 }
6431 6398
6432 6399 static int
6433 6400 register_mfi_aen(struct mrsas_instance *instance, uint32_t seq_num,
6434 6401 uint32_t class_locale_word)
6435 6402 {
6436 6403 int ret_val;
6437 6404
6438 6405 struct mrsas_cmd *cmd, *aen_cmd;
6439 6406 struct mrsas_dcmd_frame *dcmd;
6440 6407 union mrsas_evt_class_locale curr_aen;
6441 6408 union mrsas_evt_class_locale prev_aen;
6442 6409
6443 6410 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
6444 6411 /*
6445 6412 * If there an AEN pending already (aen_cmd), check if the
6446 6413 * class_locale of that pending AEN is inclusive of the new
6447 6414 * AEN request we currently have. If it is, then we don't have
6448 6415 * to do anything. In other words, whichever events the current
6449 6416 * AEN request is subscribing to, have already been subscribed
6450 6417 * to.
6451 6418 *
6452 6419 * If the old_cmd is _not_ inclusive, then we have to abort
6453 6420 * that command, form a class_locale that is superset of both
6454 6421 * old and current and re-issue to the FW
6455 6422 */
6456 6423
6457 6424 curr_aen.word = LE_32(class_locale_word);
6458 6425 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6459 6426 aen_cmd = instance->aen_cmd;
6460 6427 if (aen_cmd) {
6461 6428 prev_aen.word = ddi_get32(aen_cmd->frame_dma_obj.acc_handle,
6462 6429 &aen_cmd->frame->dcmd.mbox.w[1]);
6463 6430 prev_aen.word = LE_32(prev_aen.word);
6464 6431 prev_aen.members.locale = LE_16(prev_aen.members.locale);
6465 6432 /*
6466 6433 * A class whose enum value is smaller is inclusive of all
6467 6434 * higher values. If a PROGRESS (= -1) was previously
6468 6435 * registered, then a new registration requests for higher
6469 6436 * classes need not be sent to FW. They are automatically
6470 6437 * included.
6471 6438 *
6472 6439 * Locale numbers don't have such hierarchy. They are bitmap
6473 6440 * values
6474 6441 */
6475 6442 if ((prev_aen.members.class <= curr_aen.members.class) &&
6476 6443 !((prev_aen.members.locale & curr_aen.members.locale) ^
6477 6444 curr_aen.members.locale)) {
6478 6445 /*
6479 6446 * Previously issued event registration includes
6480 6447 * current request. Nothing to do.
6481 6448 */
6482 6449
6483 6450 return (0);
6484 6451 } else {
6485 6452 curr_aen.members.locale |= prev_aen.members.locale;
6486 6453
6487 6454 if (prev_aen.members.class < curr_aen.members.class)
6488 6455 curr_aen.members.class = prev_aen.members.class;
6489 6456
6490 6457 ret_val = abort_aen_cmd(instance, aen_cmd);
6491 6458
6492 6459 if (ret_val) {
6493 6460 con_log(CL_ANN, (CE_WARN, "register_mfi_aen: "
6494 6461 "failed to abort prevous AEN command"));
6495 6462
6496 6463 return (ret_val);
6497 6464 }
6498 6465 }
6499 6466 } else {
6500 6467 curr_aen.word = LE_32(class_locale_word);
6501 6468 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6502 6469 }
6503 6470
6504 6471 if (instance->tbolt) {
6505 6472 cmd = get_raid_msg_mfi_pkt(instance);
6506 6473 } else {
6507 6474 cmd = get_mfi_pkt(instance);
6508 6475 }
6509 6476
6510 6477 if (!cmd) {
6511 6478 DTRACE_PROBE2(mfi_aen_err, uint16_t, instance->fw_outstanding,
6512 6479 uint16_t, instance->max_fw_cmds);
6513 6480 return (ENOMEM);
6514 6481 }
6515 6482
6516 6483 /* Clear the frame buffer and assign back the context id */
6517 6484 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
6518 6485 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
6519 6486 cmd->index);
6520 6487
6521 6488 dcmd = &cmd->frame->dcmd;
6522 6489
6523 6490 /* for(i = 0; i < DCMD_MBOX_SZ; i++) dcmd->mbox.b[i] = 0; */
6524 6491 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
6525 6492
6526 6493 (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
6527 6494 sizeof (struct mrsas_evt_detail));
6528 6495
6529 6496 /* Prepare DCMD for aen registration */
6530 6497 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
6531 6498 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0);
6532 6499 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
6533 6500 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
6534 6501 MFI_FRAME_DIR_READ);
6535 6502 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
6536 6503 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
6537 6504 sizeof (struct mrsas_evt_detail));
6538 6505 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
6539 6506 MR_DCMD_CTRL_EVENT_WAIT);
6540 6507 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], seq_num);
6541 6508 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6542 6509 curr_aen.word = LE_32(curr_aen.word);
6543 6510 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[1],
6544 6511 curr_aen.word);
6545 6512 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
6546 6513 instance->mfi_evt_detail_obj.dma_cookie[0].dmac_address);
6547 6514 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
6548 6515 sizeof (struct mrsas_evt_detail));
6549 6516
6550 6517 instance->aen_seq_num = seq_num;
6551 6518
6552 6519
6553 6520 /*
6554 6521 * Store reference to the cmd used to register for AEN. When an
6555 6522 * application wants us to register for AEN, we have to abort this
6556 6523 * cmd and re-register with a new EVENT LOCALE supplied by that app
6557 6524 */
6558 6525 instance->aen_cmd = cmd;
6559 6526
6560 6527 cmd->frame_count = 1;
6561 6528
6562 6529 /* Issue the aen registration frame */
6563 6530 /* atomic_add_16 (&instance->fw_outstanding, 1); */
6564 6531 if (instance->tbolt) {
6565 6532 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
6566 6533 }
6567 6534 instance->func_ptr->issue_cmd(cmd, instance);
6568 6535
6569 6536 return (0);
6570 6537 }
6571 6538
6572 6539 void
6573 6540 display_scsi_inquiry(caddr_t scsi_inq)
6574 6541 {
6575 6542 #define MAX_SCSI_DEVICE_CODE 14
6576 6543 int i;
6577 6544 char inquiry_buf[256] = {0};
6578 6545 int len;
6579 6546 const char *const scsi_device_types[] = {
6580 6547 "Direct-Access ",
6581 6548 "Sequential-Access",
6582 6549 "Printer ",
6583 6550 "Processor ",
6584 6551 "WORM ",
6585 6552 "CD-ROM ",
6586 6553 "Scanner ",
6587 6554 "Optical Device ",
6588 6555 "Medium Changer ",
6589 6556 "Communications ",
6590 6557 "Unknown ",
6591 6558 "Unknown ",
6592 6559 "Unknown ",
6593 6560 "Enclosure ",
6594 6561 };
6595 6562
6596 6563 len = 0;
6597 6564
6598 6565 len += snprintf(inquiry_buf + len, 265 - len, " Vendor: ");
6599 6566 for (i = 8; i < 16; i++) {
6600 6567 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6601 6568 scsi_inq[i]);
6602 6569 }
6603 6570
6604 6571 len += snprintf(inquiry_buf + len, 265 - len, " Model: ");
6605 6572
6606 6573 for (i = 16; i < 32; i++) {
6607 6574 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6608 6575 scsi_inq[i]);
6609 6576 }
6610 6577
6611 6578 len += snprintf(inquiry_buf + len, 265 - len, " Rev: ");
6612 6579
6613 6580 for (i = 32; i < 36; i++) {
6614 6581 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6615 6582 scsi_inq[i]);
6616 6583 }
6617 6584
6618 6585 len += snprintf(inquiry_buf + len, 265 - len, "\n");
6619 6586
6620 6587
6621 6588 i = scsi_inq[0] & 0x1f;
6622 6589
6623 6590
6624 6591 len += snprintf(inquiry_buf + len, 265 - len, " Type: %s ",
6625 6592 i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] :
6626 6593 "Unknown ");
6627 6594
6628 6595
6629 6596 len += snprintf(inquiry_buf + len, 265 - len,
6630 6597 " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07);
6631 6598
6632 6599 if ((scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1) {
6633 6600 len += snprintf(inquiry_buf + len, 265 - len, " CCS\n");
6634 6601 } else {
6635 6602 len += snprintf(inquiry_buf + len, 265 - len, "\n");
6636 6603 }
6637 6604
6638 6605 con_log(CL_DLEVEL2, (CE_CONT, inquiry_buf));
6639 6606 }
6640 6607
6641 6608 static void
6642 6609 io_timeout_checker(void *arg)
6643 6610 {
6644 6611 struct scsi_pkt *pkt;
6645 6612 struct mrsas_instance *instance = arg;
6646 6613 struct mrsas_cmd *cmd = NULL;
6647 6614 struct mrsas_header *hdr;
6648 6615 int time = 0;
6649 6616 int counter = 0;
6650 6617 struct mlist_head *pos, *next;
6651 6618 mlist_t process_list;
6652 6619
6653 6620 if (instance->adapterresetinprogress == 1) {
6654 6621 con_log(CL_ANN, (CE_NOTE, "io_timeout_checker:"
6655 6622 " reset in progress"));
6656 6623
6657 6624 instance->timeout_id = timeout(io_timeout_checker,
6658 6625 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
6659 6626 return;
6660 6627 }
6661 6628
6662 6629 /* See if this check needs to be in the beginning or last in ISR */
6663 6630 if (mrsas_initiate_ocr_if_fw_is_faulty(instance) == 1) {
6664 6631 cmn_err(CE_WARN, "io_timeout_checker: "
6665 6632 "FW Fault, calling reset adapter");
6666 6633 cmn_err(CE_CONT, "io_timeout_checker: "
6667 6634 "fw_outstanding 0x%X max_fw_cmds 0x%X",
6668 6635 instance->fw_outstanding, instance->max_fw_cmds);
6669 6636 if (instance->adapterresetinprogress == 0) {
6670 6637 instance->adapterresetinprogress = 1;
6671 6638 if (instance->tbolt)
6672 6639 (void) mrsas_tbolt_reset_ppc(instance);
6673 6640 else
6674 6641 (void) mrsas_reset_ppc(instance);
6675 6642 instance->adapterresetinprogress = 0;
6676 6643 }
6677 6644 instance->timeout_id = timeout(io_timeout_checker,
6678 6645 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
6679 6646 return;
6680 6647 }
6681 6648
6682 6649 INIT_LIST_HEAD(&process_list);
6683 6650
6684 6651 mutex_enter(&instance->cmd_pend_mtx);
6685 6652 mlist_for_each_safe(pos, next, &instance->cmd_pend_list) {
6686 6653 cmd = mlist_entry(pos, struct mrsas_cmd, list);
6687 6654
6688 6655 if (cmd == NULL) {
6689 6656 continue;
6690 6657 }
6691 6658
6692 6659 if (cmd->sync_cmd == MRSAS_TRUE) {
6693 6660 hdr = (struct mrsas_header *)&cmd->frame->hdr;
6694 6661 if (hdr == NULL) {
6695 6662 continue;
6696 6663 }
6697 6664 time = --cmd->drv_pkt_time;
6698 6665 } else {
6699 6666 pkt = cmd->pkt;
6700 6667 if (pkt == NULL) {
6701 6668 continue;
6702 6669 }
6703 6670 time = --cmd->drv_pkt_time;
6704 6671 }
6705 6672 if (time <= 0) {
6706 6673 cmn_err(CE_WARN, "%llx: "
6707 6674 "io_timeout_checker: TIMING OUT: pkt: %p, "
6708 6675 "cmd %p fw_outstanding 0x%X max_fw_cmds 0x%X\n",
6709 6676 gethrtime(), (void *)pkt, (void *)cmd,
6710 6677 instance->fw_outstanding, instance->max_fw_cmds);
6711 6678
6712 6679 counter++;
6713 6680 break;
6714 6681 }
6715 6682 }
6716 6683 mutex_exit(&instance->cmd_pend_mtx);
6717 6684
6718 6685 if (counter) {
6719 6686 if (instance->disable_online_ctrl_reset == 1) {
6720 6687 cmn_err(CE_WARN, "mr_sas %d: %s(): OCR is NOT "
6721 6688 "supported by Firmware, KILL adapter!!!",
6722 6689 instance->instance, __func__);
6723 6690
6724 6691 if (instance->tbolt)
6725 6692 mrsas_tbolt_kill_adapter(instance);
6726 6693 else
6727 6694 (void) mrsas_kill_adapter(instance);
6728 6695
6729 6696 return;
6730 6697 } else {
6731 6698 if (cmd->retry_count_for_ocr <= IO_RETRY_COUNT) {
6732 6699 if (instance->adapterresetinprogress == 0) {
6733 6700 if (instance->tbolt) {
6734 6701 (void) mrsas_tbolt_reset_ppc(
6735 6702 instance);
6736 6703 } else {
6737 6704 (void) mrsas_reset_ppc(
6738 6705 instance);
6739 6706 }
6740 6707 }
6741 6708 } else {
6742 6709 cmn_err(CE_WARN,
6743 6710 "io_timeout_checker: "
6744 6711 "cmd %p cmd->index %d "
6745 6712 "timed out even after 3 resets: "
6746 6713 "so KILL adapter", (void *)cmd, cmd->index);
6747 6714
6748 6715 mrsas_print_cmd_details(instance, cmd, 0xDD);
6749 6716
6750 6717 if (instance->tbolt)
6751 6718 mrsas_tbolt_kill_adapter(instance);
6752 6719 else
6753 6720 (void) mrsas_kill_adapter(instance);
6754 6721 return;
6755 6722 }
6756 6723 }
6757 6724 }
6758 6725 con_log(CL_ANN, (CE_NOTE, "mrsas: "
6759 6726 "schedule next timeout check: "
6760 6727 "do timeout \n"));
6761 6728 instance->timeout_id =
6762 6729 timeout(io_timeout_checker, (void *)instance,
6763 6730 drv_usectohz(MRSAS_1_SECOND));
6764 6731 }
6765 6732
6766 6733 static uint32_t
6767 6734 read_fw_status_reg_ppc(struct mrsas_instance *instance)
6768 6735 {
6769 6736 return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
6770 6737 }
6771 6738
6772 6739 static void
6773 6740 issue_cmd_ppc(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
6774 6741 {
6775 6742 struct scsi_pkt *pkt;
6776 6743 atomic_add_16(&instance->fw_outstanding, 1);
6777 6744
6778 6745 pkt = cmd->pkt;
6779 6746 if (pkt) {
6780 6747 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:"
6781 6748 "ISSUED CMD TO FW : called : cmd:"
6782 6749 ": %p instance : %p pkt : %p pkt_time : %x\n",
6783 6750 gethrtime(), (void *)cmd, (void *)instance,
6784 6751 (void *)pkt, cmd->drv_pkt_time));
6785 6752 if (instance->adapterresetinprogress) {
6786 6753 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
6787 6754 con_log(CL_ANN1, (CE_NOTE, "Reset the scsi_pkt timer"));
6788 6755 } else {
6789 6756 push_pending_mfi_pkt(instance, cmd);
6790 6757 }
6791 6758
6792 6759 } else {
6793 6760 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:"
6794 6761 "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
6795 6762 "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
6796 6763 }
6797 6764
6798 6765 mutex_enter(&instance->reg_write_mtx);
6799 6766 /* Issue the command to the FW */
6800 6767 WR_IB_QPORT((cmd->frame_phys_addr) |
6801 6768 (((cmd->frame_count - 1) << 1) | 1), instance);
6802 6769 mutex_exit(&instance->reg_write_mtx);
6803 6770
6804 6771 }
6805 6772
6806 6773 /*
6807 6774 * issue_cmd_in_sync_mode
6808 6775 */
6809 6776 static int
6810 6777 issue_cmd_in_sync_mode_ppc(struct mrsas_instance *instance,
6811 6778 struct mrsas_cmd *cmd)
6812 6779 {
6813 6780 int i;
6814 6781 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * (10 * MILLISEC);
6815 6782 struct mrsas_header *hdr = &cmd->frame->hdr;
6816 6783
6817 6784 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: called"));
6818 6785
6819 6786 if (instance->adapterresetinprogress) {
6820 6787 cmd->drv_pkt_time = ddi_get16(
6821 6788 cmd->frame_dma_obj.acc_handle, &hdr->timeout);
6822 6789 if (cmd->drv_pkt_time < debug_timeout_g)
6823 6790 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
6824 6791
6825 6792 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: "
6826 6793 "issue and return in reset case\n"));
6827 6794 WR_IB_QPORT((cmd->frame_phys_addr) |
6828 6795 (((cmd->frame_count - 1) << 1) | 1), instance);
6829 6796
6830 6797 return (DDI_SUCCESS);
6831 6798 } else {
6832 6799 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: pushing the pkt\n"));
6833 6800 push_pending_mfi_pkt(instance, cmd);
6834 6801 }
6835 6802
6836 6803 cmd->cmd_status = ENODATA;
6837 6804
6838 6805 mutex_enter(&instance->reg_write_mtx);
6839 6806 /* Issue the command to the FW */
6840 6807 WR_IB_QPORT((cmd->frame_phys_addr) |
6841 6808 (((cmd->frame_count - 1) << 1) | 1), instance);
6842 6809 mutex_exit(&instance->reg_write_mtx);
6843 6810
6844 6811 mutex_enter(&instance->int_cmd_mtx);
6845 6812 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
6846 6813 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
6847 6814 }
6848 6815 mutex_exit(&instance->int_cmd_mtx);
6849 6816
6850 6817 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done"));
6851 6818
6852 6819 if (i < (msecs -1)) {
6853 6820 return (DDI_SUCCESS);
6854 6821 } else {
6855 6822 return (DDI_FAILURE);
6856 6823 }
6857 6824 }
6858 6825
6859 6826 /*
6860 6827 * issue_cmd_in_poll_mode
6861 6828 */
6862 6829 static int
6863 6830 issue_cmd_in_poll_mode_ppc(struct mrsas_instance *instance,
6864 6831 struct mrsas_cmd *cmd)
6865 6832 {
6866 6833 int i;
6867 6834 uint16_t flags;
6868 6835 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
6869 6836 struct mrsas_header *frame_hdr;
6870 6837
6871 6838 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode_ppc: called"));
6872 6839
6873 6840 frame_hdr = (struct mrsas_header *)cmd->frame;
6874 6841 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
6875 6842 MFI_CMD_STATUS_POLL_MODE);
6876 6843 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
6877 6844 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
6878 6845
6879 6846 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
6880 6847
6881 6848 /* issue the frame using inbound queue port */
6882 6849 WR_IB_QPORT((cmd->frame_phys_addr) |
6883 6850 (((cmd->frame_count - 1) << 1) | 1), instance);
6884 6851
6885 6852 /* wait for cmd_status to change from 0xFF */
6886 6853 for (i = 0; i < msecs && (
6887 6854 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
6888 6855 == MFI_CMD_STATUS_POLL_MODE); i++) {
6889 6856 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
6890 6857 }
6891 6858
6892 6859 if (ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
6893 6860 == MFI_CMD_STATUS_POLL_MODE) {
6894 6861 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: "
6895 6862 "cmd polling timed out"));
6896 6863 return (DDI_FAILURE);
6897 6864 }
6898 6865
6899 6866 return (DDI_SUCCESS);
6900 6867 }
6901 6868
6902 6869 static void
6903 6870 enable_intr_ppc(struct mrsas_instance *instance)
6904 6871 {
6905 6872 uint32_t mask;
6906 6873
6907 6874 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: called"));
6908 6875
6909 6876 /* WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); */
6910 6877 WR_OB_DOORBELL_CLEAR(OB_DOORBELL_CLEAR_MASK, instance);
6911 6878
6912 6879 /* WR_OB_INTR_MASK(~0x80000000, instance); */
6913 6880 WR_OB_INTR_MASK(~(MFI_REPLY_2108_MESSAGE_INTR_MASK), instance);
6914 6881
6915 6882 /* dummy read to force PCI flush */
6916 6883 mask = RD_OB_INTR_MASK(instance);
6917 6884
6918 6885 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: "
6919 6886 "outbound_intr_mask = 0x%x", mask));
6920 6887 }
6921 6888
6922 6889 static void
6923 6890 disable_intr_ppc(struct mrsas_instance *instance)
6924 6891 {
6925 6892 uint32_t mask;
6926 6893
6927 6894 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: called"));
6928 6895
6929 6896 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: before : "
6930 6897 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance)));
6931 6898
6932 6899 /* WR_OB_INTR_MASK(0xFFFFFFFF, instance); */
6933 6900 WR_OB_INTR_MASK(OB_INTR_MASK, instance);
6934 6901
6935 6902 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: after : "
6936 6903 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance)));
6937 6904
6938 6905 /* dummy read to force PCI flush */
6939 6906 mask = RD_OB_INTR_MASK(instance);
6940 6907 #ifdef lint
6941 6908 mask = mask;
6942 6909 #endif
6943 6910 }
6944 6911
6945 6912 static int
6946 6913 intr_ack_ppc(struct mrsas_instance *instance)
6947 6914 {
6948 6915 uint32_t status;
6949 6916 int ret = DDI_INTR_CLAIMED;
6950 6917
6951 6918 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: called"));
6952 6919
6953 6920 /* check if it is our interrupt */
6954 6921 status = RD_OB_INTR_STATUS(instance);
6955 6922
6956 6923 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: status = 0x%x", status));
6957 6924
6958 6925 if (!(status & MFI_REPLY_2108_MESSAGE_INTR)) {
6959 6926 ret = DDI_INTR_UNCLAIMED;
6960 6927 }
6961 6928
6962 6929 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
6963 6930 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
6964 6931 ret = DDI_INTR_UNCLAIMED;
6965 6932 }
6966 6933
6967 6934 if (ret == DDI_INTR_UNCLAIMED) {
6968 6935 return (ret);
6969 6936 }
6970 6937 /* clear the interrupt by writing back the same value */
6971 6938 WR_OB_DOORBELL_CLEAR(status, instance);
6972 6939
6973 6940 /* dummy READ */
6974 6941 status = RD_OB_INTR_STATUS(instance);
6975 6942
6976 6943 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: interrupt cleared"));
6977 6944
6978 6945 return (ret);
6979 6946 }
6980 6947
6981 6948 /*
6982 6949 * Marks HBA as bad. This will be called either when an
6983 6950 * IO packet times out even after 3 FW resets
6984 6951 * or FW is found to be fault even after 3 continuous resets.
6985 6952 */
6986 6953
6987 6954 static int
6988 6955 mrsas_kill_adapter(struct mrsas_instance *instance)
6989 6956 {
6990 6957 if (instance->deadadapter == 1)
6991 6958 return (DDI_FAILURE);
6992 6959
6993 6960 con_log(CL_ANN1, (CE_NOTE, "mrsas_kill_adapter: "
6994 6961 "Writing to doorbell with MFI_STOP_ADP "));
6995 6962 mutex_enter(&instance->ocr_flags_mtx);
6996 6963 instance->deadadapter = 1;
6997 6964 mutex_exit(&instance->ocr_flags_mtx);
6998 6965 instance->func_ptr->disable_intr(instance);
6999 6966 WR_IB_DOORBELL(MFI_STOP_ADP, instance);
7000 6967 (void) mrsas_complete_pending_cmds(instance);
7001 6968 return (DDI_SUCCESS);
7002 6969 }
7003 6970
7004 6971
7005 6972 static int
7006 6973 mrsas_reset_ppc(struct mrsas_instance *instance)
7007 6974 {
7008 6975 uint32_t status;
7009 6976 uint32_t retry = 0;
7010 6977 uint32_t cur_abs_reg_val;
7011 6978 uint32_t fw_state;
7012 6979
7013 6980 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
7014 6981
7015 6982 if (instance->deadadapter == 1) {
7016 6983 cmn_err(CE_WARN, "mrsas_reset_ppc: "
7017 6984 "no more resets as HBA has been marked dead ");
7018 6985 return (DDI_FAILURE);
7019 6986 }
7020 6987 mutex_enter(&instance->ocr_flags_mtx);
7021 6988 instance->adapterresetinprogress = 1;
7022 6989 mutex_exit(&instance->ocr_flags_mtx);
7023 6990 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: adpterresetinprogress "
7024 6991 "flag set, time %llx", gethrtime()));
7025 6992
7026 6993 instance->func_ptr->disable_intr(instance);
7027 6994 retry_reset:
7028 6995 WR_IB_WRITE_SEQ(0, instance);
7029 6996 WR_IB_WRITE_SEQ(4, instance);
7030 6997 WR_IB_WRITE_SEQ(0xb, instance);
7031 6998 WR_IB_WRITE_SEQ(2, instance);
7032 6999 WR_IB_WRITE_SEQ(7, instance);
7033 7000 WR_IB_WRITE_SEQ(0xd, instance);
|
↓ open down ↓ |
1390 lines elided |
↑ open up ↑ |
7034 7001 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: magic number written "
7035 7002 "to write sequence register\n"));
7036 7003 delay(100 * drv_usectohz(MILLISEC));
7037 7004 status = RD_OB_DRWE(instance);
7038 7005
7039 7006 while (!(status & DIAG_WRITE_ENABLE)) {
7040 7007 delay(100 * drv_usectohz(MILLISEC));
7041 7008 status = RD_OB_DRWE(instance);
7042 7009 if (retry++ == 100) {
7043 7010 cmn_err(CE_WARN, "mrsas_reset_ppc: DRWE bit "
7044 - "check retry count %d\n", retry);
7011 + "check retry count %d", retry);
7045 7012 return (DDI_FAILURE);
7046 7013 }
7047 7014 }
7048 7015 WR_IB_DRWE(status | DIAG_RESET_ADAPTER, instance);
7049 7016 delay(100 * drv_usectohz(MILLISEC));
7050 7017 status = RD_OB_DRWE(instance);
7051 7018 while (status & DIAG_RESET_ADAPTER) {
7052 7019 delay(100 * drv_usectohz(MILLISEC));
7053 7020 status = RD_OB_DRWE(instance);
7054 7021 if (retry++ == 100) {
7055 7022 cmn_err(CE_WARN, "mrsas_reset_ppc: "
7056 - "RESET FAILED. KILL adapter called\n.");
7023 + "RESET FAILED. KILL adapter called.");
7057 7024
7058 7025 (void) mrsas_kill_adapter(instance);
7059 7026 return (DDI_FAILURE);
7060 7027 }
7061 7028 }
7062 7029 con_log(CL_ANN, (CE_NOTE, "mrsas_reset_ppc: Adapter reset complete"));
7063 7030 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7064 7031 "Calling mfi_state_transition_to_ready"));
7065 7032
7066 7033 /* Mark HBA as bad, if FW is fault after 3 continuous resets */
7067 7034 if (mfi_state_transition_to_ready(instance) ||
7068 7035 debug_fw_faults_after_ocr_g == 1) {
7069 7036 cur_abs_reg_val =
7070 7037 instance->func_ptr->read_fw_status_reg(instance);
7071 7038 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
7072 7039
7073 7040 #ifdef OCRDEBUG
7074 7041 con_log(CL_ANN1, (CE_NOTE,
7075 7042 "mrsas_reset_ppc :before fake: FW is not ready "
7076 7043 "FW state = 0x%x", fw_state));
7077 7044 if (debug_fw_faults_after_ocr_g == 1)
7078 7045 fw_state = MFI_STATE_FAULT;
7079 7046 #endif
7080 7047
7081 7048 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc : FW is not ready "
7082 7049 "FW state = 0x%x", fw_state));
7083 7050
7084 7051 if (fw_state == MFI_STATE_FAULT) {
7085 7052 /* increment the count */
7086 7053 instance->fw_fault_count_after_ocr++;
7087 7054 if (instance->fw_fault_count_after_ocr
7088 7055 < MAX_FW_RESET_COUNT) {
7089 7056 cmn_err(CE_WARN, "mrsas_reset_ppc: "
7090 7057 "FW is in fault after OCR count %d "
7091 7058 "Retry Reset",
7092 7059 instance->fw_fault_count_after_ocr);
7093 7060 goto retry_reset;
7094 7061
7095 7062 } else {
7096 7063 cmn_err(CE_WARN, "mrsas_reset_ppc: "
7097 7064 "Max Reset Count exceeded >%d"
7098 7065 "Mark HBA as bad, KILL adapter",
7099 7066 MAX_FW_RESET_COUNT);
7100 7067
7101 7068 (void) mrsas_kill_adapter(instance);
7102 7069 return (DDI_FAILURE);
7103 7070 }
7104 7071 }
7105 7072 }
7106 7073 /* reset the counter as FW is up after OCR */
7107 7074 instance->fw_fault_count_after_ocr = 0;
7108 7075
7109 7076
7110 7077 ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
7111 7078 instance->producer, 0);
7112 7079
7113 7080 ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
7114 7081 instance->consumer, 0);
7115 7082
7116 7083 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7117 7084 " after resetting produconsumer chck indexs:"
7118 7085 "producer %x consumer %x", *instance->producer,
7119 7086 *instance->consumer));
7120 7087
7121 7088 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7122 7089 "Calling mrsas_issue_init_mfi"));
7123 7090 (void) mrsas_issue_init_mfi(instance);
7124 7091 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7125 7092 "mrsas_issue_init_mfi Done"));
7126 7093
7127 7094 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7128 7095 "Calling mrsas_print_pending_cmd\n"));
7129 7096 (void) mrsas_print_pending_cmds(instance);
7130 7097 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7131 7098 "mrsas_print_pending_cmd done\n"));
7132 7099
7133 7100 instance->func_ptr->enable_intr(instance);
7134 7101 instance->fw_outstanding = 0;
7135 7102
7136 7103 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7137 7104 "Calling mrsas_issue_pending_cmds"));
7138 7105 (void) mrsas_issue_pending_cmds(instance);
7139 7106 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7140 7107 "issue_pending_cmds done.\n"));
7141 7108
7142 7109 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7143 7110 "Calling aen registration"));
7144 7111
7145 7112
7146 7113 instance->aen_cmd->retry_count_for_ocr = 0;
7147 7114 instance->aen_cmd->drv_pkt_time = 0;
7148 7115
7149 7116 instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
7150 7117 con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag.\n"));
7151 7118
7152 7119 mutex_enter(&instance->ocr_flags_mtx);
7153 7120 instance->adapterresetinprogress = 0;
7154 7121 mutex_exit(&instance->ocr_flags_mtx);
7155 7122 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7156 7123 "adpterresetinprogress flag unset"));
7157 7124
7158 7125 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc done\n"));
7159 7126 return (DDI_SUCCESS);
7160 7127 }
7161 7128
7162 7129 /*
7163 7130 * FMA functions.
7164 7131 */
7165 7132 int
7166 7133 mrsas_common_check(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
7167 7134 {
7168 7135 int ret = DDI_SUCCESS;
7169 7136
7170 7137 if (cmd != NULL &&
7171 7138 mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) !=
7172 7139 DDI_SUCCESS) {
7173 7140 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7174 7141 if (cmd->pkt != NULL) {
7175 7142 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7176 7143 cmd->pkt->pkt_statistics = 0;
7177 7144 }
7178 7145 ret = DDI_FAILURE;
7179 7146 }
7180 7147 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
7181 7148 != DDI_SUCCESS) {
7182 7149 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7183 7150 if (cmd != NULL && cmd->pkt != NULL) {
7184 7151 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7185 7152 cmd->pkt->pkt_statistics = 0;
7186 7153 }
7187 7154 ret = DDI_FAILURE;
7188 7155 }
7189 7156 if (mrsas_check_dma_handle(instance->mfi_evt_detail_obj.dma_handle) !=
7190 7157 DDI_SUCCESS) {
7191 7158 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7192 7159 if (cmd != NULL && cmd->pkt != NULL) {
7193 7160 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7194 7161 cmd->pkt->pkt_statistics = 0;
7195 7162 }
7196 7163 ret = DDI_FAILURE;
7197 7164 }
7198 7165 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
7199 7166 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7200 7167
7201 7168 ddi_fm_acc_err_clear(instance->regmap_handle, DDI_FME_VER0);
7202 7169
7203 7170 if (cmd != NULL && cmd->pkt != NULL) {
7204 7171 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7205 7172 cmd->pkt->pkt_statistics = 0;
7206 7173 }
7207 7174 ret = DDI_FAILURE;
7208 7175 }
7209 7176
7210 7177 return (ret);
7211 7178 }
7212 7179
7213 7180 /*ARGSUSED*/
7214 7181 static int
7215 7182 mrsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
7216 7183 {
7217 7184 /*
7218 7185 * as the driver can always deal with an error in any dma or
7219 7186 * access handle, we can just return the fme_status value.
7220 7187 */
7221 7188 pci_ereport_post(dip, err, NULL);
7222 7189 return (err->fme_status);
7223 7190 }
7224 7191
7225 7192 static void
7226 7193 mrsas_fm_init(struct mrsas_instance *instance)
7227 7194 {
7228 7195 /* Need to change iblock to priority for new MSI intr */
7229 7196 ddi_iblock_cookie_t fm_ibc;
7230 7197
7231 7198 /* Only register with IO Fault Services if we have some capability */
7232 7199 if (instance->fm_capabilities) {
7233 7200 /* Adjust access and dma attributes for FMA */
7234 7201 endian_attr.devacc_attr_access = DDI_FLAGERR_ACC;
7235 7202 mrsas_generic_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
7236 7203
7237 7204 /*
7238 7205 * Register capabilities with IO Fault Services.
7239 7206 * fm_capabilities will be updated to indicate
7240 7207 * capabilities actually supported (not requested.)
7241 7208 */
7242 7209
7243 7210 ddi_fm_init(instance->dip, &instance->fm_capabilities, &fm_ibc);
7244 7211
7245 7212 /*
7246 7213 * Initialize pci ereport capabilities if ereport
7247 7214 * capable (should always be.)
7248 7215 */
7249 7216
7250 7217 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) ||
7251 7218 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7252 7219 pci_ereport_setup(instance->dip);
7253 7220 }
7254 7221
7255 7222 /*
7256 7223 * Register error callback if error callback capable.
7257 7224 */
7258 7225 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7259 7226 ddi_fm_handler_register(instance->dip,
7260 7227 mrsas_fm_error_cb, (void*) instance);
7261 7228 }
7262 7229 } else {
7263 7230 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
7264 7231 mrsas_generic_dma_attr.dma_attr_flags = 0;
7265 7232 }
7266 7233 }
7267 7234
7268 7235 static void
7269 7236 mrsas_fm_fini(struct mrsas_instance *instance)
7270 7237 {
7271 7238 /* Only unregister FMA capabilities if registered */
7272 7239 if (instance->fm_capabilities) {
7273 7240 /*
7274 7241 * Un-register error callback if error callback capable.
7275 7242 */
7276 7243 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7277 7244 ddi_fm_handler_unregister(instance->dip);
7278 7245 }
7279 7246
7280 7247 /*
7281 7248 * Release any resources allocated by pci_ereport_setup()
7282 7249 */
7283 7250 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) ||
7284 7251 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7285 7252 pci_ereport_teardown(instance->dip);
7286 7253 }
7287 7254
7288 7255 /* Unregister from IO Fault Services */
7289 7256 ddi_fm_fini(instance->dip);
7290 7257
7291 7258 /* Adjust access and dma attributes for FMA */
7292 7259 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
7293 7260 mrsas_generic_dma_attr.dma_attr_flags = 0;
7294 7261 }
7295 7262 }
7296 7263
7297 7264 int
7298 7265 mrsas_check_acc_handle(ddi_acc_handle_t handle)
7299 7266 {
7300 7267 ddi_fm_error_t de;
7301 7268
7302 7269 if (handle == NULL) {
7303 7270 return (DDI_FAILURE);
7304 7271 }
7305 7272
7306 7273 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
7307 7274
7308 7275 return (de.fme_status);
7309 7276 }
7310 7277
7311 7278 int
7312 7279 mrsas_check_dma_handle(ddi_dma_handle_t handle)
7313 7280 {
7314 7281 ddi_fm_error_t de;
7315 7282
7316 7283 if (handle == NULL) {
7317 7284 return (DDI_FAILURE);
7318 7285 }
7319 7286
7320 7287 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
7321 7288
7322 7289 return (de.fme_status);
7323 7290 }
7324 7291
7325 7292 void
7326 7293 mrsas_fm_ereport(struct mrsas_instance *instance, char *detail)
7327 7294 {
7328 7295 uint64_t ena;
7329 7296 char buf[FM_MAX_CLASS];
7330 7297
7331 7298 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
7332 7299 ena = fm_ena_generate(0, FM_ENA_FMT1);
7333 7300 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities)) {
7334 7301 ddi_fm_ereport_post(instance->dip, buf, ena, DDI_NOSLEEP,
7335 7302 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL);
7336 7303 }
7337 7304 }
7338 7305
7339 7306 static int
7340 7307 mrsas_add_intrs(struct mrsas_instance *instance, int intr_type)
7341 7308 {
7342 7309
7343 7310 dev_info_t *dip = instance->dip;
7344 7311 int avail, actual, count;
7345 7312 int i, flag, ret;
7346 7313
7347 7314 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_type = %x",
7348 7315 intr_type));
7349 7316
7350 7317 /* Get number of interrupts */
7351 7318 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
7352 7319 if ((ret != DDI_SUCCESS) || (count == 0)) {
7353 7320 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_nintrs() failed:"
7354 7321 "ret %d count %d", ret, count));
7355 7322
7356 7323 return (DDI_FAILURE);
7357 7324 }
7358 7325
7359 7326 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: count = %d ", count));
7360 7327
7361 7328 /* Get number of available interrupts */
7362 7329 ret = ddi_intr_get_navail(dip, intr_type, &avail);
7363 7330 if ((ret != DDI_SUCCESS) || (avail == 0)) {
7364 7331 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_navail() failed:"
7365 7332 "ret %d avail %d", ret, avail));
7366 7333
7367 7334 return (DDI_FAILURE);
7368 7335 }
7369 7336 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: avail = %d ", avail));
7370 7337
7371 7338 /* Only one interrupt routine. So limit the count to 1 */
7372 7339 if (count > 1) {
|
↓ open down ↓ |
306 lines elided |
↑ open up ↑ |
7373 7340 count = 1;
7374 7341 }
7375 7342
7376 7343 /*
7377 7344 * Allocate an array of interrupt handlers. Currently we support
7378 7345 * only one interrupt. The framework can be extended later.
7379 7346 */
7380 7347 instance->intr_htable_size = count * sizeof (ddi_intr_handle_t);
7381 7348 instance->intr_htable = kmem_zalloc(instance->intr_htable_size,
7382 7349 KM_SLEEP);
7383 - if (instance->intr_htable == NULL) {
7384 - con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7385 - "failed to allocate memory for intr-handle table"));
7386 - instance->intr_htable_size = 0;
7387 - return (DDI_FAILURE);
7388 - }
7350 + ASSERT(instance->intr_htable);
7389 7351
7390 7352 flag = ((intr_type == DDI_INTR_TYPE_MSI) ||
7391 7353 (intr_type == DDI_INTR_TYPE_MSIX)) ?
7392 7354 DDI_INTR_ALLOC_STRICT : DDI_INTR_ALLOC_NORMAL;
7393 7355
7394 7356 /* Allocate interrupt */
7395 7357 ret = ddi_intr_alloc(dip, instance->intr_htable, intr_type, 0,
7396 7358 count, &actual, flag);
7397 7359
7398 7360 if ((ret != DDI_SUCCESS) || (actual == 0)) {
7399 7361 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7400 7362 "avail = %d", avail));
7401 7363 goto mrsas_free_htable;
7402 7364 }
7403 7365
7404 7366 if (actual < count) {
7405 7367 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7406 7368 "Requested = %d Received = %d", count, actual));
7407 7369 }
7408 7370 instance->intr_cnt = actual;
7409 7371
7410 7372 /*
7411 7373 * Get the priority of the interrupt allocated.
7412 7374 */
7413 7375 if ((ret = ddi_intr_get_pri(instance->intr_htable[0],
7414 7376 &instance->intr_pri)) != DDI_SUCCESS) {
7415 7377 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7416 7378 "get priority call failed"));
7417 7379 goto mrsas_free_handles;
7418 7380 }
7419 7381
7420 7382 /*
7421 7383 * Test for high level mutex. we don't support them.
7422 7384 */
7423 7385 if (instance->intr_pri >= ddi_intr_get_hilevel_pri()) {
7424 7386 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7425 7387 "High level interrupts not supported."));
7426 7388 goto mrsas_free_handles;
7427 7389 }
7428 7390
7429 7391 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_pri = 0x%x ",
7430 7392 instance->intr_pri));
7431 7393
7432 7394 /* Call ddi_intr_add_handler() */
7433 7395 for (i = 0; i < actual; i++) {
7434 7396 ret = ddi_intr_add_handler(instance->intr_htable[i],
7435 7397 (ddi_intr_handler_t *)mrsas_isr, (caddr_t)instance,
7436 7398 (caddr_t)(uintptr_t)i);
7437 7399
7438 7400 if (ret != DDI_SUCCESS) {
7439 7401 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs:"
7440 7402 "failed %d", ret));
7441 7403 goto mrsas_free_handles;
7442 7404 }
7443 7405
7444 7406 }
7445 7407
7446 7408 con_log(CL_DLEVEL1, (CE_NOTE, " ddi_intr_add_handler done"));
7447 7409
7448 7410 if ((ret = ddi_intr_get_cap(instance->intr_htable[0],
7449 7411 &instance->intr_cap)) != DDI_SUCCESS) {
7450 7412 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_cap() failed %d",
7451 7413 ret));
7452 7414 goto mrsas_free_handlers;
7453 7415 }
7454 7416
7455 7417 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) {
7456 7418 con_log(CL_ANN, (CE_WARN, "Calling ddi_intr_block _enable"));
7457 7419
7458 7420 (void) ddi_intr_block_enable(instance->intr_htable,
7459 7421 instance->intr_cnt);
7460 7422 } else {
7461 7423 con_log(CL_ANN, (CE_NOTE, " calling ddi_intr_enable"));
7462 7424
7463 7425 for (i = 0; i < instance->intr_cnt; i++) {
7464 7426 (void) ddi_intr_enable(instance->intr_htable[i]);
7465 7427 con_log(CL_ANN, (CE_NOTE, "ddi intr enable returns "
7466 7428 "%d", i));
7467 7429 }
7468 7430 }
7469 7431
7470 7432 return (DDI_SUCCESS);
7471 7433
7472 7434 mrsas_free_handlers:
7473 7435 for (i = 0; i < actual; i++)
7474 7436 (void) ddi_intr_remove_handler(instance->intr_htable[i]);
7475 7437
7476 7438 mrsas_free_handles:
7477 7439 for (i = 0; i < actual; i++)
7478 7440 (void) ddi_intr_free(instance->intr_htable[i]);
7479 7441
7480 7442 mrsas_free_htable:
7481 7443 if (instance->intr_htable != NULL)
7482 7444 kmem_free(instance->intr_htable, instance->intr_htable_size);
7483 7445
7484 7446 instance->intr_htable = NULL;
7485 7447 instance->intr_htable_size = 0;
7486 7448
7487 7449 return (DDI_FAILURE);
7488 7450
7489 7451 }
7490 7452
7491 7453
7492 7454 static void
7493 7455 mrsas_rem_intrs(struct mrsas_instance *instance)
7494 7456 {
7495 7457 int i;
7496 7458
7497 7459 con_log(CL_ANN, (CE_NOTE, "mrsas_rem_intrs called"));
7498 7460
7499 7461 /* Disable all interrupts first */
7500 7462 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) {
7501 7463 (void) ddi_intr_block_disable(instance->intr_htable,
7502 7464 instance->intr_cnt);
7503 7465 } else {
7504 7466 for (i = 0; i < instance->intr_cnt; i++) {
7505 7467 (void) ddi_intr_disable(instance->intr_htable[i]);
7506 7468 }
7507 7469 }
7508 7470
7509 7471 /* Remove all the handlers */
7510 7472
7511 7473 for (i = 0; i < instance->intr_cnt; i++) {
7512 7474 (void) ddi_intr_remove_handler(instance->intr_htable[i]);
7513 7475 (void) ddi_intr_free(instance->intr_htable[i]);
7514 7476 }
7515 7477
7516 7478 if (instance->intr_htable != NULL)
7517 7479 kmem_free(instance->intr_htable, instance->intr_htable_size);
7518 7480
7519 7481 instance->intr_htable = NULL;
7520 7482 instance->intr_htable_size = 0;
7521 7483
7522 7484 }
7523 7485
7524 7486 static int
7525 7487 mrsas_tran_bus_config(dev_info_t *parent, uint_t flags,
7526 7488 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
7527 7489 {
7528 7490 struct mrsas_instance *instance;
7529 7491 int config;
7530 7492 int rval = NDI_SUCCESS;
7531 7493
7532 7494 char *ptr = NULL;
7533 7495 int tgt, lun;
7534 7496
7535 7497 con_log(CL_ANN1, (CE_NOTE, "Bus config called for op = %x", op));
7536 7498
7537 7499 if ((instance = ddi_get_soft_state(mrsas_state,
7538 7500 ddi_get_instance(parent))) == NULL) {
7539 7501 return (NDI_FAILURE);
7540 7502 }
7541 7503
7542 7504 /* Hold nexus during bus_config */
7543 7505 ndi_devi_enter(parent, &config);
7544 7506 switch (op) {
7545 7507 case BUS_CONFIG_ONE: {
7546 7508
7547 7509 /* parse wwid/target name out of name given */
7548 7510 if ((ptr = strchr((char *)arg, '@')) == NULL) {
7549 7511 rval = NDI_FAILURE;
7550 7512 break;
7551 7513 }
7552 7514 ptr++;
7553 7515
7554 7516 if (mrsas_parse_devname(arg, &tgt, &lun) != 0) {
7555 7517 rval = NDI_FAILURE;
7556 7518 break;
7557 7519 }
7558 7520
7559 7521 if (lun == 0) {
7560 7522 rval = mrsas_config_ld(instance, tgt, lun, childp);
7561 7523 #ifdef PDSUPPORT
7562 7524 } else if (instance->tbolt == 1 && lun != 0) {
7563 7525 rval = mrsas_tbolt_config_pd(instance,
7564 7526 tgt, lun, childp);
7565 7527 #endif
7566 7528 } else {
7567 7529 rval = NDI_FAILURE;
7568 7530 }
7569 7531
7570 7532 break;
7571 7533 }
7572 7534 case BUS_CONFIG_DRIVER:
7573 7535 case BUS_CONFIG_ALL: {
7574 7536
7575 7537 rval = mrsas_config_all_devices(instance);
7576 7538
7577 7539 rval = NDI_SUCCESS;
7578 7540 break;
7579 7541 }
7580 7542 }
7581 7543
7582 7544 if (rval == NDI_SUCCESS) {
7583 7545 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0);
7584 7546
7585 7547 }
7586 7548 ndi_devi_exit(parent, config);
7587 7549
7588 7550 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_bus_config: rval = %x",
7589 7551 rval));
7590 7552 return (rval);
7591 7553 }
7592 7554
7593 7555 static int
7594 7556 mrsas_config_all_devices(struct mrsas_instance *instance)
7595 7557 {
7596 7558 int rval, tgt;
7597 7559
7598 7560 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) {
7599 7561 (void) mrsas_config_ld(instance, tgt, 0, NULL);
7600 7562
7601 7563 }
7602 7564
7603 7565 #ifdef PDSUPPORT
7604 7566 /* Config PD devices connected to the card */
7605 7567 if (instance->tbolt) {
7606 7568 for (tgt = 0; tgt < instance->mr_tbolt_pd_max; tgt++) {
7607 7569 (void) mrsas_tbolt_config_pd(instance, tgt, 1, NULL);
7608 7570 }
7609 7571 }
7610 7572 #endif
7611 7573
7612 7574 rval = NDI_SUCCESS;
7613 7575 return (rval);
7614 7576 }
7615 7577
7616 7578 static int
7617 7579 mrsas_parse_devname(char *devnm, int *tgt, int *lun)
7618 7580 {
7619 7581 char devbuf[SCSI_MAXNAMELEN];
7620 7582 char *addr;
7621 7583 char *p, *tp, *lp;
7622 7584 long num;
7623 7585
7624 7586 /* Parse dev name and address */
7625 7587 (void) strcpy(devbuf, devnm);
7626 7588 addr = "";
7627 7589 for (p = devbuf; *p != '\0'; p++) {
7628 7590 if (*p == '@') {
7629 7591 addr = p + 1;
7630 7592 *p = '\0';
7631 7593 } else if (*p == ':') {
7632 7594 *p = '\0';
7633 7595 break;
7634 7596 }
7635 7597 }
7636 7598
7637 7599 /* Parse target and lun */
7638 7600 for (p = tp = addr, lp = NULL; *p != '\0'; p++) {
7639 7601 if (*p == ',') {
7640 7602 lp = p + 1;
7641 7603 *p = '\0';
7642 7604 break;
7643 7605 }
7644 7606 }
7645 7607 if (tgt && tp) {
7646 7608 if (ddi_strtol(tp, NULL, 0x10, &num)) {
7647 7609 return (DDI_FAILURE); /* Can declare this as constant */
7648 7610 }
7649 7611 *tgt = (int)num;
7650 7612 }
7651 7613 if (lun && lp) {
7652 7614 if (ddi_strtol(lp, NULL, 0x10, &num)) {
7653 7615 return (DDI_FAILURE);
7654 7616 }
7655 7617 *lun = (int)num;
7656 7618 }
7657 7619 return (DDI_SUCCESS); /* Success case */
7658 7620 }
7659 7621
7660 7622 static int
7661 7623 mrsas_config_ld(struct mrsas_instance *instance, uint16_t tgt,
7662 7624 uint8_t lun, dev_info_t **ldip)
7663 7625 {
7664 7626 struct scsi_device *sd;
7665 7627 dev_info_t *child;
7666 7628 int rval;
7667 7629
7668 7630 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_config_ld: t = %d l = %d",
7669 7631 tgt, lun));
7670 7632
7671 7633 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
7672 7634 if (ldip) {
7673 7635 *ldip = child;
7674 7636 }
7675 7637 if (instance->mr_ld_list[tgt].flag != MRDRV_TGT_VALID) {
7676 7638 rval = mrsas_service_evt(instance, tgt, 0,
|
↓ open down ↓ |
278 lines elided |
↑ open up ↑ |
7677 7639 MRSAS_EVT_UNCONFIG_TGT, NULL);
7678 7640 con_log(CL_ANN1, (CE_WARN,
7679 7641 "mr_sas: DELETING STALE ENTRY rval = %d "
7680 7642 "tgt id = %d ", rval, tgt));
7681 7643 return (NDI_FAILURE);
7682 7644 }
7683 7645 return (NDI_SUCCESS);
7684 7646 }
7685 7647
7686 7648 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
7687 - if (sd == NULL) {
7688 - con_log(CL_ANN1, (CE_WARN, "mrsas_config_ld: "
7689 - "failed to allocate mem for scsi_device"));
7690 - return (NDI_FAILURE);
7691 - }
7692 7649 sd->sd_address.a_hba_tran = instance->tran;
7693 7650 sd->sd_address.a_target = (uint16_t)tgt;
7694 7651 sd->sd_address.a_lun = (uint8_t)lun;
7695 7652
7696 7653 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS)
7697 7654 rval = mrsas_config_scsi_device(instance, sd, ldip);
7698 7655 else
7699 7656 rval = NDI_FAILURE;
7700 7657
7701 7658 /* sd_unprobe is blank now. Free buffer manually */
7702 7659 if (sd->sd_inq) {
7703 7660 kmem_free(sd->sd_inq, SUN_INQSIZE);
7704 7661 sd->sd_inq = (struct scsi_inquiry *)NULL;
7705 7662 }
7706 7663
7707 7664 kmem_free(sd, sizeof (struct scsi_device));
7708 7665 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_config_ld: return rval = %d",
7709 7666 rval));
7710 7667 return (rval);
7711 7668 }
7712 7669
7713 7670 int
7714 7671 mrsas_config_scsi_device(struct mrsas_instance *instance,
7715 7672 struct scsi_device *sd, dev_info_t **dipp)
7716 7673 {
7717 7674 char *nodename = NULL;
7718 7675 char **compatible = NULL;
7719 7676 int ncompatible = 0;
7720 7677 char *childname;
7721 7678 dev_info_t *ldip = NULL;
7722 7679 int tgt = sd->sd_address.a_target;
7723 7680 int lun = sd->sd_address.a_lun;
7724 7681 int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
7725 7682 int rval;
7726 7683
7727 7684 con_log(CL_DLEVEL1, (CE_NOTE, "mr_sas: scsi_device t%dL%d", tgt, lun));
7728 7685 scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype,
7729 7686 NULL, &nodename, &compatible, &ncompatible);
7730 7687
7731 7688 if (nodename == NULL) {
7732 7689 con_log(CL_ANN1, (CE_WARN, "mr_sas: Found no compatible driver "
7733 7690 "for t%dL%d", tgt, lun));
7734 7691 rval = NDI_FAILURE;
7735 7692 goto finish;
7736 7693 }
7737 7694
7738 7695 childname = (dtype == DTYPE_DIRECT) ? "sd" : nodename;
7739 7696 con_log(CL_DLEVEL1, (CE_NOTE,
7740 7697 "mr_sas: Childname = %2s nodename = %s", childname, nodename));
7741 7698
7742 7699 /* Create a dev node */
7743 7700 rval = ndi_devi_alloc(instance->dip, childname, DEVI_SID_NODEID, &ldip);
7744 7701 con_log(CL_DLEVEL1, (CE_NOTE,
7745 7702 "mr_sas_config_scsi_device: ndi_devi_alloc rval = %x", rval));
7746 7703 if (rval == NDI_SUCCESS) {
7747 7704 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) !=
7748 7705 DDI_PROP_SUCCESS) {
7749 7706 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7750 7707 "property for t%dl%d target", tgt, lun));
7751 7708 rval = NDI_FAILURE;
7752 7709 goto finish;
7753 7710 }
7754 7711 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun) !=
7755 7712 DDI_PROP_SUCCESS) {
7756 7713 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7757 7714 "property for t%dl%d lun", tgt, lun));
7758 7715 rval = NDI_FAILURE;
7759 7716 goto finish;
7760 7717 }
7761 7718
7762 7719 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip,
7763 7720 "compatible", compatible, ncompatible) !=
7764 7721 DDI_PROP_SUCCESS) {
7765 7722 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7766 7723 "property for t%dl%d compatible", tgt, lun));
7767 7724 rval = NDI_FAILURE;
7768 7725 goto finish;
7769 7726 }
7770 7727
7771 7728 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH);
7772 7729 if (rval != NDI_SUCCESS) {
7773 7730 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to online "
7774 7731 "t%dl%d", tgt, lun));
7775 7732 ndi_prop_remove_all(ldip);
7776 7733 (void) ndi_devi_free(ldip);
7777 7734 } else {
7778 7735 con_log(CL_ANN1, (CE_CONT, "mr_sas: online Done :"
7779 7736 "0 t%dl%d", tgt, lun));
7780 7737 }
7781 7738
7782 7739 }
7783 7740 finish:
7784 7741 if (dipp) {
7785 7742 *dipp = ldip;
7786 7743 }
7787 7744
7788 7745 con_log(CL_DLEVEL1, (CE_NOTE,
7789 7746 "mr_sas: config_scsi_device rval = %d t%dL%d",
7790 7747 rval, tgt, lun));
7791 7748 scsi_hba_nodename_compatible_free(nodename, compatible);
7792 7749 return (rval);
7793 7750 }
7794 7751
7795 7752 /*ARGSUSED*/
7796 7753 int
7797 7754 mrsas_service_evt(struct mrsas_instance *instance, int tgt, int lun, int event,
7798 7755 uint64_t wwn)
7799 7756 {
7800 7757 struct mrsas_eventinfo *mrevt = NULL;
7801 7758
7802 7759 con_log(CL_ANN1, (CE_NOTE,
7803 7760 "mrsas_service_evt called for t%dl%d event = %d",
7804 7761 tgt, lun, event));
7805 7762
7806 7763 if ((instance->taskq == NULL) || (mrevt =
7807 7764 kmem_zalloc(sizeof (struct mrsas_eventinfo), KM_NOSLEEP)) == NULL) {
7808 7765 return (ENOMEM);
7809 7766 }
7810 7767
7811 7768 mrevt->instance = instance;
7812 7769 mrevt->tgt = tgt;
7813 7770 mrevt->lun = lun;
7814 7771 mrevt->event = event;
7815 7772 mrevt->wwn = wwn;
7816 7773
7817 7774 if ((ddi_taskq_dispatch(instance->taskq,
7818 7775 (void (*)(void *))mrsas_issue_evt_taskq, mrevt, DDI_NOSLEEP)) !=
7819 7776 DDI_SUCCESS) {
7820 7777 con_log(CL_ANN1, (CE_NOTE,
7821 7778 "mr_sas: Event task failed for t%dl%d event = %d",
7822 7779 tgt, lun, event));
7823 7780 kmem_free(mrevt, sizeof (struct mrsas_eventinfo));
7824 7781 return (DDI_FAILURE);
7825 7782 }
7826 7783 DTRACE_PROBE3(service_evt, int, tgt, int, lun, int, event);
7827 7784 return (DDI_SUCCESS);
7828 7785 }
7829 7786
7830 7787 static void
7831 7788 mrsas_issue_evt_taskq(struct mrsas_eventinfo *mrevt)
7832 7789 {
7833 7790 struct mrsas_instance *instance = mrevt->instance;
7834 7791 dev_info_t *dip, *pdip;
7835 7792 int circ1 = 0;
7836 7793 char *devname;
7837 7794
7838 7795 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_evt_taskq: called for"
7839 7796 " tgt %d lun %d event %d",
7840 7797 mrevt->tgt, mrevt->lun, mrevt->event));
7841 7798
7842 7799 if (mrevt->tgt < MRDRV_MAX_LD && mrevt->lun == 0) {
7843 7800 mutex_enter(&instance->config_dev_mtx);
7844 7801 dip = instance->mr_ld_list[mrevt->tgt].dip;
7845 7802 mutex_exit(&instance->config_dev_mtx);
7846 7803 #ifdef PDSUPPORT
7847 7804 } else {
7848 7805 mutex_enter(&instance->config_dev_mtx);
7849 7806 dip = instance->mr_tbolt_pd_list[mrevt->tgt].dip;
7850 7807 mutex_exit(&instance->config_dev_mtx);
7851 7808 #endif
7852 7809 }
7853 7810
7854 7811
7855 7812 ndi_devi_enter(instance->dip, &circ1);
7856 7813 switch (mrevt->event) {
7857 7814 case MRSAS_EVT_CONFIG_TGT:
7858 7815 if (dip == NULL) {
7859 7816
7860 7817 if (mrevt->lun == 0) {
7861 7818 (void) mrsas_config_ld(instance, mrevt->tgt,
7862 7819 0, NULL);
7863 7820 #ifdef PDSUPPORT
7864 7821 } else if (instance->tbolt) {
7865 7822 (void) mrsas_tbolt_config_pd(instance,
7866 7823 mrevt->tgt,
7867 7824 1, NULL);
7868 7825 #endif
7869 7826 }
7870 7827 con_log(CL_ANN1, (CE_NOTE,
7871 7828 "mr_sas: EVT_CONFIG_TGT called:"
7872 7829 " for tgt %d lun %d event %d",
7873 7830 mrevt->tgt, mrevt->lun, mrevt->event));
7874 7831
7875 7832 } else {
7876 7833 con_log(CL_ANN1, (CE_NOTE,
7877 7834 "mr_sas: EVT_CONFIG_TGT dip != NULL:"
7878 7835 " for tgt %d lun %d event %d",
7879 7836 mrevt->tgt, mrevt->lun, mrevt->event));
7880 7837 }
7881 7838 break;
7882 7839 case MRSAS_EVT_UNCONFIG_TGT:
7883 7840 if (dip) {
7884 7841 if (i_ddi_devi_attached(dip)) {
7885 7842
7886 7843 pdip = ddi_get_parent(dip);
7887 7844
7888 7845 devname = kmem_zalloc(MAXNAMELEN + 1, KM_SLEEP);
7889 7846 (void) ddi_deviname(dip, devname);
7890 7847
7891 7848 (void) devfs_clean(pdip, devname + 1,
7892 7849 DV_CLEAN_FORCE);
7893 7850 kmem_free(devname, MAXNAMELEN + 1);
7894 7851 }
7895 7852 (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE);
7896 7853 con_log(CL_ANN1, (CE_NOTE,
7897 7854 "mr_sas: EVT_UNCONFIG_TGT called:"
7898 7855 " for tgt %d lun %d event %d",
7899 7856 mrevt->tgt, mrevt->lun, mrevt->event));
7900 7857 } else {
7901 7858 con_log(CL_ANN1, (CE_NOTE,
7902 7859 "mr_sas: EVT_UNCONFIG_TGT dip == NULL:"
7903 7860 " for tgt %d lun %d event %d",
7904 7861 mrevt->tgt, mrevt->lun, mrevt->event));
7905 7862 }
7906 7863 break;
7907 7864 }
7908 7865 kmem_free(mrevt, sizeof (struct mrsas_eventinfo));
7909 7866 ndi_devi_exit(instance->dip, circ1);
7910 7867 }
7911 7868
7912 7869
7913 7870 int
7914 7871 mrsas_mode_sense_build(struct scsi_pkt *pkt)
7915 7872 {
7916 7873 union scsi_cdb *cdbp;
7917 7874 uint16_t page_code;
7918 7875 struct scsa_cmd *acmd;
7919 7876 struct buf *bp;
7920 7877 struct mode_header *modehdrp;
7921 7878
7922 7879 cdbp = (void *)pkt->pkt_cdbp;
7923 7880 page_code = cdbp->cdb_un.sg.scsi[0];
7924 7881 acmd = PKT2CMD(pkt);
7925 7882 bp = acmd->cmd_buf;
7926 7883 if ((!bp) && bp->b_un.b_addr && bp->b_bcount && acmd->cmd_dmacount) {
7927 7884 con_log(CL_ANN1, (CE_WARN, "Failing MODESENSE Command"));
7928 7885 /* ADD pkt statistics as Command failed. */
7929 7886 return (NULL);
7930 7887 }
7931 7888
7932 7889 bp_mapin(bp);
7933 7890 bzero(bp->b_un.b_addr, bp->b_bcount);
7934 7891
7935 7892 switch (page_code) {
7936 7893 case 0x3: {
7937 7894 struct mode_format *page3p = NULL;
7938 7895 modehdrp = (struct mode_header *)(bp->b_un.b_addr);
7939 7896 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH;
7940 7897
7941 7898 page3p = (void *)((caddr_t)modehdrp +
7942 7899 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
7943 7900 page3p->mode_page.code = 0x3;
7944 7901 page3p->mode_page.length =
7945 7902 (uchar_t)(sizeof (struct mode_format));
7946 7903 page3p->data_bytes_sect = 512;
7947 7904 page3p->sect_track = 63;
7948 7905 break;
7949 7906 }
7950 7907 case 0x4: {
7951 7908 struct mode_geometry *page4p = NULL;
7952 7909 modehdrp = (struct mode_header *)(bp->b_un.b_addr);
7953 7910 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH;
7954 7911
7955 7912 page4p = (void *)((caddr_t)modehdrp +
7956 7913 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
7957 7914 page4p->mode_page.code = 0x4;
7958 7915 page4p->mode_page.length =
7959 7916 (uchar_t)(sizeof (struct mode_geometry));
7960 7917 page4p->heads = 255;
7961 7918 page4p->rpm = 10000;
7962 7919 break;
7963 7920 }
7964 7921 default:
7965 7922 break;
7966 7923 }
7967 7924 return (NULL);
7968 7925 }
|
↓ open down ↓ |
267 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX