Print this page
Code review comments
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/mr_sas/mr_sas.c
+++ new/usr/src/uts/common/io/mr_sas/mr_sas.c
1 1 /*
2 2 * mr_sas.c: source for mr_sas driver
3 3 *
4 4 * Solaris MegaRAID device driver for SAS2.0 controllers
5 5 * Copyright (c) 2008-2012, LSI Logic Corporation.
6 6 * All rights reserved.
7 7 *
8 8 * Version:
9 9 * Author:
10 10 * Swaminathan K S
11 11 * Arun Chandrashekhar
12 12 * Manju R
13 13 * Rasheed
14 14 * Shakeel Bukhari
15 15 *
16 16 * Redistribution and use in source and binary forms, with or without
17 17 * modification, are permitted provided that the following conditions are met:
18 18 *
19 19 * 1. Redistributions of source code must retain the above copyright notice,
20 20 * this list of conditions and the following disclaimer.
21 21 *
22 22 * 2. Redistributions in binary form must reproduce the above copyright notice,
23 23 * this list of conditions and the following disclaimer in the documentation
24 24 * and/or other materials provided with the distribution.
25 25 *
26 26 * 3. Neither the name of the author nor the names of its contributors may be
27 27 * used to endorse or promote products derived from this software without
28 28 * specific prior written permission.
29 29 *
30 30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
33 33 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
34 34 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
35 35 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
36 36 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
37 37 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
38 38 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
39 39 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
40 40 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
41 41 * DAMAGE.
42 42 */
43 43
44 44 /*
45 45 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
46 46 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
47 47 * Copyright 2012 Nexenta System, Inc. All rights reserved.
48 48 */
49 49
50 50 #include <sys/types.h>
51 51 #include <sys/param.h>
52 52 #include <sys/file.h>
53 53 #include <sys/errno.h>
54 54 #include <sys/open.h>
55 55 #include <sys/cred.h>
56 56 #include <sys/modctl.h>
57 57 #include <sys/conf.h>
58 58 #include <sys/devops.h>
59 59 #include <sys/cmn_err.h>
60 60 #include <sys/kmem.h>
61 61 #include <sys/stat.h>
62 62 #include <sys/mkdev.h>
63 63 #include <sys/pci.h>
64 64 #include <sys/scsi/scsi.h>
65 65 #include <sys/ddi.h>
66 66 #include <sys/sunddi.h>
67 67 #include <sys/atomic.h>
68 68 #include <sys/signal.h>
69 69 #include <sys/byteorder.h>
70 70 #include <sys/sdt.h>
71 71 #include <sys/fs/dv_node.h> /* devfs_clean */
72 72
73 73 #include "mr_sas.h"
74 74
75 75 /*
76 76 * FMA header files
77 77 */
78 78 #include <sys/ddifm.h>
79 79 #include <sys/fm/protocol.h>
80 80 #include <sys/fm/util.h>
81 81 #include <sys/fm/io/ddi.h>
82 82
83 83 /*
84 84 * Local static data
85 85 */
86 86 static void *mrsas_state = NULL;
87 87 static volatile boolean_t mrsas_relaxed_ordering = B_TRUE;
88 88 volatile int debug_level_g = CL_NONE;
89 89 static volatile int msi_enable = 1;
90 90 static volatile int ctio_enable = 1;
91 91
92 92 /* Default Timeout value to issue online controller reset */
93 93 volatile int debug_timeout_g = 0xF0; /* 0xB4; */
94 94 /* Simulate consecutive firmware fault */
95 95 static volatile int debug_fw_faults_after_ocr_g = 0;
96 96 #ifdef OCRDEBUG
97 97 /* Simulate three consecutive timeout for an IO */
98 98 static volatile int debug_consecutive_timeout_after_ocr_g = 0;
99 99 #endif
100 100
101 101 #if 0
102 102 /* Enable OCR on firmware fault */
103 103 static volatile int debug_support_ocr_isr_g = 0;
104 104 #endif
105 105 #pragma weak scsi_hba_open
106 106 #pragma weak scsi_hba_close
107 107 #pragma weak scsi_hba_ioctl
108 108
109 109 /* Local static prototypes. */
110 110 static int mrsas_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
111 111 static int mrsas_attach(dev_info_t *, ddi_attach_cmd_t);
112 112 #ifdef __sparc
113 113 static int mrsas_reset(dev_info_t *, ddi_reset_cmd_t);
114 114 #else
115 115 static int mrsas_quiesce(dev_info_t *);
116 116 #endif
117 117 static int mrsas_detach(dev_info_t *, ddi_detach_cmd_t);
118 118 static int mrsas_open(dev_t *, int, int, cred_t *);
119 119 static int mrsas_close(dev_t, int, int, cred_t *);
120 120 static int mrsas_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
121 121
122 122 static int mrsas_tran_tgt_init(dev_info_t *, dev_info_t *,
123 123 scsi_hba_tran_t *, struct scsi_device *);
124 124 static struct scsi_pkt *mrsas_tran_init_pkt(struct scsi_address *, register
125 125 struct scsi_pkt *, struct buf *, int, int, int, int,
126 126 int (*)(), caddr_t);
127 127 static int mrsas_tran_start(struct scsi_address *,
128 128 register struct scsi_pkt *);
129 129 static int mrsas_tran_abort(struct scsi_address *, struct scsi_pkt *);
130 130 static int mrsas_tran_reset(struct scsi_address *, int);
131 131 #if 0
132 132 static int mrsas_tran_bus_reset(dev_info_t *, int);
133 133 #endif
134 134 static int mrsas_tran_getcap(struct scsi_address *, char *, int);
135 135 static int mrsas_tran_setcap(struct scsi_address *, char *, int, int);
136 136 static void mrsas_tran_destroy_pkt(struct scsi_address *,
137 137 struct scsi_pkt *);
138 138 static void mrsas_tran_dmafree(struct scsi_address *, struct scsi_pkt *);
139 139 static void mrsas_tran_sync_pkt(struct scsi_address *, struct scsi_pkt *);
140 140 static int mrsas_tran_quiesce(dev_info_t *dip);
141 141 static int mrsas_tran_unquiesce(dev_info_t *dip);
142 142 static uint_t mrsas_isr();
143 143 static uint_t mrsas_softintr();
144 144 static void mrsas_undo_resources(dev_info_t *, struct mrsas_instance *);
145 145 static struct mrsas_cmd *get_mfi_pkt(struct mrsas_instance *);
146 146 static void return_mfi_pkt(struct mrsas_instance *,
147 147 struct mrsas_cmd *);
148 148
149 149 static void free_space_for_mfi(struct mrsas_instance *);
150 150 static uint32_t read_fw_status_reg_ppc(struct mrsas_instance *);
151 151 static void issue_cmd_ppc(struct mrsas_cmd *, struct mrsas_instance *);
152 152 static int issue_cmd_in_poll_mode_ppc(struct mrsas_instance *,
153 153 struct mrsas_cmd *);
154 154 static int issue_cmd_in_sync_mode_ppc(struct mrsas_instance *,
155 155 struct mrsas_cmd *);
156 156 static void enable_intr_ppc(struct mrsas_instance *);
157 157 static void disable_intr_ppc(struct mrsas_instance *);
158 158 static int intr_ack_ppc(struct mrsas_instance *);
159 159 static void flush_cache(struct mrsas_instance *instance);
160 160 void display_scsi_inquiry(caddr_t);
161 161 static int start_mfi_aen(struct mrsas_instance *instance);
162 162 static int handle_drv_ioctl(struct mrsas_instance *instance,
163 163 struct mrsas_ioctl *ioctl, int mode);
164 164 static int handle_mfi_ioctl(struct mrsas_instance *instance,
165 165 struct mrsas_ioctl *ioctl, int mode);
166 166 static int handle_mfi_aen(struct mrsas_instance *instance,
167 167 struct mrsas_aen *aen);
168 168 static struct mrsas_cmd *build_cmd(struct mrsas_instance *,
169 169 struct scsi_address *, struct scsi_pkt *, uchar_t *);
170 170 static int alloc_additional_dma_buffer(struct mrsas_instance *);
171 171 static void complete_cmd_in_sync_mode(struct mrsas_instance *,
172 172 struct mrsas_cmd *);
173 173 static int mrsas_kill_adapter(struct mrsas_instance *);
174 174 static int mrsas_issue_init_mfi(struct mrsas_instance *);
175 175 static int mrsas_reset_ppc(struct mrsas_instance *);
176 176 static uint32_t mrsas_initiate_ocr_if_fw_is_faulty(struct mrsas_instance *);
177 177 static int wait_for_outstanding(struct mrsas_instance *instance);
178 178 static int register_mfi_aen(struct mrsas_instance *instance,
179 179 uint32_t seq_num, uint32_t class_locale_word);
180 180 static int issue_mfi_pthru(struct mrsas_instance *instance, struct
181 181 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
182 182 static int issue_mfi_dcmd(struct mrsas_instance *instance, struct
183 183 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
184 184 static int issue_mfi_smp(struct mrsas_instance *instance, struct
185 185 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
186 186 static int issue_mfi_stp(struct mrsas_instance *instance, struct
187 187 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
188 188 static int abort_aen_cmd(struct mrsas_instance *instance,
189 189 struct mrsas_cmd *cmd_to_abort);
190 190
191 191 static void mrsas_rem_intrs(struct mrsas_instance *instance);
192 192 static int mrsas_add_intrs(struct mrsas_instance *instance, int intr_type);
193 193
194 194 static void mrsas_tran_tgt_free(dev_info_t *, dev_info_t *,
195 195 scsi_hba_tran_t *, struct scsi_device *);
196 196 static int mrsas_tran_bus_config(dev_info_t *, uint_t,
197 197 ddi_bus_config_op_t, void *, dev_info_t **);
198 198 static int mrsas_parse_devname(char *, int *, int *);
199 199 static int mrsas_config_all_devices(struct mrsas_instance *);
200 200 static int mrsas_config_ld(struct mrsas_instance *, uint16_t,
201 201 uint8_t, dev_info_t **);
202 202 static int mrsas_name_node(dev_info_t *, char *, int);
203 203 static void mrsas_issue_evt_taskq(struct mrsas_eventinfo *);
204 204 static void free_additional_dma_buffer(struct mrsas_instance *);
205 205 static void io_timeout_checker(void *);
206 206 static void mrsas_fm_init(struct mrsas_instance *);
207 207 static void mrsas_fm_fini(struct mrsas_instance *);
|
↓ open down ↓ |
207 lines elided |
↑ open up ↑ |
208 208
209 209 static struct mrsas_function_template mrsas_function_template_ppc = {
210 210 .read_fw_status_reg = read_fw_status_reg_ppc,
211 211 .issue_cmd = issue_cmd_ppc,
212 212 .issue_cmd_in_sync_mode = issue_cmd_in_sync_mode_ppc,
213 213 .issue_cmd_in_poll_mode = issue_cmd_in_poll_mode_ppc,
214 214 .enable_intr = enable_intr_ppc,
215 215 .disable_intr = disable_intr_ppc,
216 216 .intr_ack = intr_ack_ppc,
217 217 .init_adapter = mrsas_init_adapter_ppc
218 -/* .reset_adapter = mrsas_reset_adapter_ppc */
219 218 };
220 219
221 220
222 221 static struct mrsas_function_template mrsas_function_template_fusion = {
223 222 .read_fw_status_reg = tbolt_read_fw_status_reg,
224 223 .issue_cmd = tbolt_issue_cmd,
225 224 .issue_cmd_in_sync_mode = tbolt_issue_cmd_in_sync_mode,
226 225 .issue_cmd_in_poll_mode = tbolt_issue_cmd_in_poll_mode,
227 226 .enable_intr = tbolt_enable_intr,
228 227 .disable_intr = tbolt_disable_intr,
229 228 .intr_ack = tbolt_intr_ack,
230 229 .init_adapter = mrsas_init_adapter_tbolt
231 230 /* .reset_adapter = mrsas_reset_adapter_tbolt */
232 231 };
233 232
234 233
235 234 ddi_dma_attr_t mrsas_generic_dma_attr = {
236 235 DMA_ATTR_V0, /* dma_attr_version */
237 236 0, /* low DMA address range */
238 237 0xFFFFFFFFU, /* high DMA address range */
239 238 0xFFFFFFFFU, /* DMA counter register */
240 239 8, /* DMA address alignment */
241 240 0x07, /* DMA burstsizes */
242 241 1, /* min DMA size */
243 242 0xFFFFFFFFU, /* max DMA size */
244 243 0xFFFFFFFFU, /* segment boundary */
245 244 MRSAS_MAX_SGE_CNT, /* dma_attr_sglen */
246 245 512, /* granularity of device */
247 246 0 /* bus specific DMA flags */
248 247 };
249 248
250 249 int32_t mrsas_max_cap_maxxfer = 0x1000000;
251 250
252 251 /*
253 252 * Fix for: Thunderbolt controller IO timeout when IO write size is 1MEG,
254 253 * Limit size to 256K
255 254 */
256 255 uint32_t mrsas_tbolt_max_cap_maxxfer = (512 * 512);
257 256
258 257 /*
259 258 * cb_ops contains base level routines
260 259 */
261 260 static struct cb_ops mrsas_cb_ops = {
262 261 mrsas_open, /* open */
263 262 mrsas_close, /* close */
264 263 nodev, /* strategy */
265 264 nodev, /* print */
266 265 nodev, /* dump */
267 266 nodev, /* read */
268 267 nodev, /* write */
269 268 mrsas_ioctl, /* ioctl */
270 269 nodev, /* devmap */
271 270 nodev, /* mmap */
272 271 nodev, /* segmap */
273 272 nochpoll, /* poll */
274 273 nodev, /* cb_prop_op */
275 274 0, /* streamtab */
276 275 D_NEW | D_HOTPLUG, /* cb_flag */
277 276 CB_REV, /* cb_rev */
278 277 nodev, /* cb_aread */
279 278 nodev /* cb_awrite */
280 279 };
281 280
282 281 /*
283 282 * dev_ops contains configuration routines
284 283 */
285 284 static struct dev_ops mrsas_ops = {
286 285 DEVO_REV, /* rev, */
287 286 0, /* refcnt */
288 287 mrsas_getinfo, /* getinfo */
289 288 nulldev, /* identify */
290 289 nulldev, /* probe */
291 290 mrsas_attach, /* attach */
292 291 mrsas_detach, /* detach */
293 292 #ifdef __sparc
294 293 mrsas_reset, /* reset */
295 294 #else /* __sparc */
|
↓ open down ↓ |
67 lines elided |
↑ open up ↑ |
296 295 nodev,
297 296 #endif /* __sparc */
298 297 &mrsas_cb_ops, /* char/block ops */
299 298 NULL, /* bus ops */
300 299 NULL, /* power */
301 300 #ifdef __sparc
302 301 ddi_quiesce_not_needed
303 302 #else /* __sparc */
304 303 mrsas_quiesce /* quiesce */
305 304 #endif /* __sparc */
306 -
307 305 };
308 306
309 307 static struct modldrv modldrv = {
310 308 &mod_driverops, /* module type - driver */
311 309 MRSAS_VERSION,
312 310 &mrsas_ops, /* driver ops */
313 311 };
314 312
315 313 static struct modlinkage modlinkage = {
316 314 MODREV_1, /* ml_rev - must be MODREV_1 */
317 315 &modldrv, /* ml_linkage */
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
318 316 NULL /* end of driver linkage */
319 317 };
320 318
321 319 static struct ddi_device_acc_attr endian_attr = {
322 320 DDI_DEVICE_ATTR_V1,
323 321 DDI_STRUCTURE_LE_ACC,
324 322 DDI_STRICTORDER_ACC,
325 323 DDI_DEFAULT_ACC
326 324 };
327 325
328 -
326 +/* Use the LSI Fast Path for the 2208 (tbolt) commands. */
329 327 unsigned int enable_fp = 1;
330 328
331 329
332 330 /*
333 331 * ************************************************************************** *
334 332 * *
335 333 * common entry points - for loadable kernel modules *
336 334 * *
337 335 * ************************************************************************** *
338 336 */
339 337
340 338 /*
341 339 * _init - initialize a loadable module
342 340 * @void
343 341 *
344 342 * The driver should perform any one-time resource allocation or data
345 343 * initialization during driver loading in _init(). For example, the driver
346 344 * should initialize any mutexes global to the driver in this routine.
347 345 * The driver should not, however, use _init() to allocate or initialize
348 346 * anything that has to do with a particular instance of the device.
349 347 * Per-instance initialization must be done in attach().
350 348 */
351 349 int
352 350 _init(void)
353 351 {
354 352 int ret;
355 353
356 354 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
357 355
358 356 ret = ddi_soft_state_init(&mrsas_state,
359 357 sizeof (struct mrsas_instance), 0);
360 358
361 359 if (ret != DDI_SUCCESS) {
362 360 cmn_err(CE_WARN, "mr_sas: could not init state");
363 361 return (ret);
364 362 }
365 363
366 364 if ((ret = scsi_hba_init(&modlinkage)) != DDI_SUCCESS) {
367 365 cmn_err(CE_WARN, "mr_sas: could not init scsi hba");
368 366 ddi_soft_state_fini(&mrsas_state);
369 367 return (ret);
370 368 }
371 369
372 370 ret = mod_install(&modlinkage);
373 371
374 372 if (ret != DDI_SUCCESS) {
375 373 cmn_err(CE_WARN, "mr_sas: mod_install failed");
376 374 scsi_hba_fini(&modlinkage);
377 375 ddi_soft_state_fini(&mrsas_state);
378 376 }
379 377
380 378 return (ret);
381 379 }
382 380
383 381 /*
384 382 * _info - returns information about a loadable module.
385 383 * @void
386 384 *
387 385 * _info() is called to return module information. This is a typical entry
388 386 * point that does predefined role. It simply calls mod_info().
389 387 */
390 388 int
391 389 _info(struct modinfo *modinfop)
392 390 {
393 391 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
394 392
395 393 return (mod_info(&modlinkage, modinfop));
396 394 }
397 395
398 396 /*
399 397 * _fini - prepare a loadable module for unloading
400 398 * @void
401 399 *
402 400 * In _fini(), the driver should release any resources that were allocated in
403 401 * _init(). The driver must remove itself from the system module list.
404 402 */
405 403 int
406 404 _fini(void)
407 405 {
408 406 int ret;
409 407
410 408 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
411 409
412 410 if ((ret = mod_remove(&modlinkage)) != DDI_SUCCESS) {
413 411 con_log(CL_ANN1,
414 412 (CE_WARN, "_fini: mod_remove() failed, error 0x%X", ret));
415 413 return (ret);
416 414 }
417 415
418 416 scsi_hba_fini(&modlinkage);
419 417 con_log(CL_DLEVEL1, (CE_NOTE, "_fini: scsi_hba_fini() done."));
420 418
421 419 ddi_soft_state_fini(&mrsas_state);
422 420 con_log(CL_DLEVEL1, (CE_NOTE, "_fini: ddi_soft_state_fini() done."));
423 421
424 422 return (ret);
425 423 }
426 424
427 425
428 426 /*
429 427 * ************************************************************************** *
430 428 * *
431 429 * common entry points - for autoconfiguration *
432 430 * *
433 431 * ************************************************************************** *
434 432 */
435 433 /*
436 434 * attach - adds a device to the system as part of initialization
437 435 * @dip:
438 436 * @cmd:
439 437 *
440 438 * The kernel calls a driver's attach() entry point to attach an instance of
441 439 * a device (for MegaRAID, it is instance of a controller) or to resume
442 440 * operation for an instance of a device that has been suspended or has been
443 441 * shut down by the power management framework
444 442 * The attach() entry point typically includes the following types of
445 443 * processing:
446 444 * - allocate a soft-state structure for the device instance (for MegaRAID,
447 445 * controller instance)
448 446 * - initialize per-instance mutexes
449 447 * - initialize condition variables
450 448 * - register the device's interrupts (for MegaRAID, controller's interrupts)
451 449 * - map the registers and memory of the device instance (for MegaRAID,
452 450 * controller instance)
453 451 * - create minor device nodes for the device instance (for MegaRAID,
454 452 * controller instance)
455 453 * - report that the device instance (for MegaRAID, controller instance) has
456 454 * attached
457 455 */
458 456 static int
459 457 mrsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
460 458 {
461 459 int instance_no;
462 460 int nregs;
463 461 int i = 0;
464 462 uint8_t irq;
465 463 uint16_t vendor_id;
466 464 uint16_t device_id;
467 465 uint16_t subsysvid;
468 466 uint16_t subsysid;
469 467 uint16_t command;
470 468 off_t reglength = 0;
471 469 int intr_types = 0;
472 470 char *data;
473 471
474 472 scsi_hba_tran_t *tran;
475 473 ddi_dma_attr_t tran_dma_attr;
476 474 struct mrsas_instance *instance;
477 475
478 476 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
479 477
480 478 /* CONSTCOND */
481 479 ASSERT(NO_COMPETING_THREADS);
482 480
483 481 instance_no = ddi_get_instance(dip);
484 482
485 483 /*
486 484 * check to see whether this device is in a DMA-capable slot.
487 485 */
488 486 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
489 487 cmn_err(CE_WARN,
490 488 "mr_sas%d: Device in slave-only slot, unused",
491 489 instance_no);
492 490 return (DDI_FAILURE);
493 491 }
494 492
495 493 switch (cmd) {
496 494 case DDI_ATTACH:
497 495 /* allocate the soft state for the instance */
498 496 if (ddi_soft_state_zalloc(mrsas_state, instance_no)
499 497 != DDI_SUCCESS) {
500 498 cmn_err(CE_WARN,
501 499 "mr_sas%d: Failed to allocate soft state",
502 500 instance_no);
503 501 return (DDI_FAILURE);
504 502 }
505 503
|
↓ open down ↓ |
167 lines elided |
↑ open up ↑ |
506 504 instance = (struct mrsas_instance *)ddi_get_soft_state
507 505 (mrsas_state, instance_no);
508 506
509 507 if (instance == NULL) {
510 508 cmn_err(CE_WARN,
511 509 "mr_sas%d: Bad soft state", instance_no);
512 510 ddi_soft_state_free(mrsas_state, instance_no);
513 511 return (DDI_FAILURE);
514 512 }
515 513
516 - bzero(instance, sizeof (struct mrsas_instance));
517 -
518 514 instance->unroll.softs = 1;
519 515
520 516 /* Setup the PCI configuration space handles */
521 517 if (pci_config_setup(dip, &instance->pci_handle) !=
522 518 DDI_SUCCESS) {
523 519 cmn_err(CE_WARN,
524 520 "mr_sas%d: pci config setup failed ",
525 521 instance_no);
526 522
527 523 ddi_soft_state_free(mrsas_state, instance_no);
528 524 return (DDI_FAILURE);
529 525 }
530 - if (instance->pci_handle == NULL) {
531 - cmn_err(CE_WARN,
532 - "mr_sas%d: pci config setup failed ",
533 - instance_no);
534 - ddi_soft_state_free(mrsas_state, instance_no);
535 - return (DDI_FAILURE);
536 - }
537 526
538 -
539 -
540 527 if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) {
541 528 cmn_err(CE_WARN,
542 529 "mr_sas: failed to get registers.");
543 530
544 531 pci_config_teardown(&instance->pci_handle);
545 532 ddi_soft_state_free(mrsas_state, instance_no);
546 533 return (DDI_FAILURE);
547 534 }
548 535
549 536 vendor_id = pci_config_get16(instance->pci_handle,
550 537 PCI_CONF_VENID);
551 538 device_id = pci_config_get16(instance->pci_handle,
552 539 PCI_CONF_DEVID);
553 540
554 541 subsysvid = pci_config_get16(instance->pci_handle,
555 542 PCI_CONF_SUBVENID);
556 543 subsysid = pci_config_get16(instance->pci_handle,
557 544 PCI_CONF_SUBSYSID);
558 545
559 546 pci_config_put16(instance->pci_handle, PCI_CONF_COMM,
560 547 (pci_config_get16(instance->pci_handle,
561 548 PCI_CONF_COMM) | PCI_COMM_ME));
562 549 irq = pci_config_get8(instance->pci_handle,
563 550 PCI_CONF_ILINE);
564 551
565 552 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: "
566 553 "0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s",
567 554 instance_no, vendor_id, device_id, subsysvid,
568 555 subsysid, irq, MRSAS_VERSION));
569 556
570 557 /* enable bus-mastering */
571 558 command = pci_config_get16(instance->pci_handle,
572 559 PCI_CONF_COMM);
573 560
574 561 if (!(command & PCI_COMM_ME)) {
575 562 command |= PCI_COMM_ME;
576 563
577 564 pci_config_put16(instance->pci_handle,
578 565 PCI_CONF_COMM, command);
579 566
580 567 con_log(CL_ANN, (CE_CONT, "mr_sas%d: "
581 568 "enable bus-mastering", instance_no));
582 569 } else {
583 570 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: "
584 571 "bus-mastering already set", instance_no));
585 572 }
586 573
587 574 /* initialize function pointers */
588 575 switch (device_id) {
589 576 case PCI_DEVICE_ID_LSI_TBOLT:
590 577 case PCI_DEVICE_ID_LSI_INVADER:
591 578 con_log(CL_ANN, (CE_NOTE,
592 579 "mr_sas: 2208 T.B. device detected"));
593 580
594 581 instance->func_ptr =
595 582 &mrsas_function_template_fusion;
596 583 instance->tbolt = 1;
597 584 break;
598 585
599 586 case PCI_DEVICE_ID_LSI_2108VDE:
600 587 case PCI_DEVICE_ID_LSI_2108V:
601 588 con_log(CL_ANN, (CE_NOTE,
602 589 "mr_sas: 2108 Liberator device detected"));
603 590
604 591 instance->func_ptr =
605 592 &mrsas_function_template_ppc;
606 593 break;
607 594
608 595 default:
609 596 cmn_err(CE_WARN,
610 597 "mr_sas: Invalid device detected");
611 598
612 599 pci_config_teardown(&instance->pci_handle);
613 600 ddi_soft_state_free(mrsas_state, instance_no);
614 601 return (DDI_FAILURE);
615 602 }
616 603
617 604 instance->baseaddress = pci_config_get32(
618 605 instance->pci_handle, PCI_CONF_BASE0);
619 606 instance->baseaddress &= 0x0fffc;
620 607
621 608 instance->dip = dip;
622 609 instance->vendor_id = vendor_id;
623 610 instance->device_id = device_id;
624 611 instance->subsysvid = subsysvid;
625 612 instance->subsysid = subsysid;
626 613 instance->instance = instance_no;
627 614
628 615 /* Initialize FMA */
629 616 instance->fm_capabilities = ddi_prop_get_int(
630 617 DDI_DEV_T_ANY, instance->dip, DDI_PROP_DONTPASS,
631 618 "fm-capable", DDI_FM_EREPORT_CAPABLE |
632 619 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE
633 620 | DDI_FM_ERRCB_CAPABLE);
634 621
635 622 mrsas_fm_init(instance);
636 623
637 624 /* Setup register map */
638 625 if ((ddi_dev_regsize(instance->dip,
639 626 REGISTER_SET_IO_2108, ®length) != DDI_SUCCESS) ||
640 627 reglength < MINIMUM_MFI_MEM_SZ) {
641 628 goto fail_attach;
642 629 }
643 630 if (reglength > DEFAULT_MFI_MEM_SZ) {
644 631 reglength = DEFAULT_MFI_MEM_SZ;
645 632 con_log(CL_DLEVEL1, (CE_NOTE,
646 633 "mr_sas: register length to map is 0x%lx bytes",
|
↓ open down ↓ |
97 lines elided |
↑ open up ↑ |
647 634 reglength));
648 635 }
649 636 if (ddi_regs_map_setup(instance->dip,
650 637 REGISTER_SET_IO_2108, &instance->regmap, 0,
651 638 reglength, &endian_attr, &instance->regmap_handle)
652 639 != DDI_SUCCESS) {
653 640 cmn_err(CE_WARN,
654 641 "mr_sas: couldn't map control registers");
655 642 goto fail_attach;
656 643 }
657 - if (instance->regmap_handle == NULL) {
658 - cmn_err(CE_WARN,
659 - "mr_sas: couldn't map control registers");
660 - goto fail_attach;
661 - }
662 644
663 645 instance->unroll.regs = 1;
664 646
665 647 /*
666 648 * Disable Interrupt Now.
667 649 * Setup Software interrupt
668 650 */
669 651 instance->func_ptr->disable_intr(instance);
670 652
671 653 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
672 654 "mrsas-enable-msi", &data) == DDI_SUCCESS) {
673 655 if (strncmp(data, "no", 3) == 0) {
674 656 msi_enable = 0;
675 657 con_log(CL_ANN1, (CE_WARN,
676 658 "msi_enable = %d disabled", msi_enable));
677 659 }
678 660 ddi_prop_free(data);
679 661 }
680 662
681 663 con_log(CL_DLEVEL1, (CE_NOTE, "msi_enable = %d", msi_enable));
682 664
683 665 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
684 666 "mrsas-enable-fp", &data) == DDI_SUCCESS) {
685 667 if (strncmp(data, "no", 3) == 0) {
686 668 enable_fp = 0;
687 669 cmn_err(CE_NOTE,
688 670 "enable_fp = %d, Fast-Path disabled.\n",
689 671 enable_fp);
690 672 }
691 673
692 674 ddi_prop_free(data);
693 675 }
694 676
695 677 cmn_err(CE_NOTE, "enable_fp = %d\n", enable_fp);
696 678
697 679 /* Check for all supported interrupt types */
698 680 if (ddi_intr_get_supported_types(
699 681 dip, &intr_types) != DDI_SUCCESS) {
700 682 cmn_err(CE_WARN,
701 683 "ddi_intr_get_supported_types() failed");
702 684 goto fail_attach;
703 685 }
704 686
705 687 con_log(CL_DLEVEL1, (CE_NOTE,
706 688 "ddi_intr_get_supported_types() ret: 0x%x", intr_types));
707 689
708 690 /* Initialize and Setup Interrupt handler */
709 691 if (msi_enable && (intr_types & DDI_INTR_TYPE_MSIX)) {
710 692 if (mrsas_add_intrs(instance, DDI_INTR_TYPE_MSIX) !=
711 693 DDI_SUCCESS) {
712 694 cmn_err(CE_WARN,
713 695 "MSIX interrupt query failed");
714 696 goto fail_attach;
715 697 }
716 698 instance->intr_type = DDI_INTR_TYPE_MSIX;
717 699 } else if (msi_enable && (intr_types & DDI_INTR_TYPE_MSI)) {
718 700 if (mrsas_add_intrs(instance, DDI_INTR_TYPE_MSI) !=
719 701 DDI_SUCCESS) {
720 702 cmn_err(CE_WARN,
721 703 "MSI interrupt query failed");
722 704 goto fail_attach;
723 705 }
724 706 instance->intr_type = DDI_INTR_TYPE_MSI;
725 707 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
726 708 msi_enable = 0;
727 709 if (mrsas_add_intrs(instance, DDI_INTR_TYPE_FIXED) !=
728 710 DDI_SUCCESS) {
729 711 cmn_err(CE_WARN,
730 712 "FIXED interrupt query failed");
731 713 goto fail_attach;
732 714 }
733 715 instance->intr_type = DDI_INTR_TYPE_FIXED;
734 716 } else {
735 717 cmn_err(CE_WARN, "Device cannot "
736 718 "suppport either FIXED or MSI/X "
737 719 "interrupts");
738 720 goto fail_attach;
739 721 }
740 722
741 723 instance->unroll.intr = 1;
742 724
743 725 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
744 726 "mrsas-enable-ctio", &data) == DDI_SUCCESS) {
745 727 if (strncmp(data, "no", 3) == 0) {
746 728 ctio_enable = 0;
747 729 con_log(CL_ANN1, (CE_WARN,
748 730 "ctio_enable = %d disabled", ctio_enable));
749 731 }
750 732 ddi_prop_free(data);
751 733 }
752 734
753 735 con_log(CL_DLEVEL1, (CE_WARN, "ctio_enable = %d", ctio_enable));
754 736
755 737 /* setup the mfi based low level driver */
756 738 if (mrsas_init_adapter(instance) != DDI_SUCCESS) {
757 739 cmn_err(CE_WARN, "mr_sas: "
758 740 "could not initialize the low level driver");
759 741
760 742 goto fail_attach;
761 743 }
762 744
763 745 /* Initialize all Mutex */
764 746 INIT_LIST_HEAD(&instance->completed_pool_list);
765 747 mutex_init(&instance->completed_pool_mtx,
766 748 "completed_pool_mtx", MUTEX_DRIVER,
767 749 DDI_INTR_PRI(instance->intr_pri));
768 750
769 751 mutex_init(&instance->sync_map_mtx,
770 752 "sync_map_mtx", MUTEX_DRIVER,
771 753 DDI_INTR_PRI(instance->intr_pri));
772 754
773 755 mutex_init(&instance->app_cmd_pool_mtx,
774 756 "app_cmd_pool_mtx", MUTEX_DRIVER,
775 757 DDI_INTR_PRI(instance->intr_pri));
776 758
777 759 mutex_init(&instance->config_dev_mtx, "config_dev_mtx",
778 760 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
779 761
780 762 mutex_init(&instance->cmd_pend_mtx, "cmd_pend_mtx",
781 763 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
782 764
783 765 mutex_init(&instance->ocr_flags_mtx, "ocr_flags_mtx",
784 766 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
785 767
786 768 mutex_init(&instance->int_cmd_mtx, "int_cmd_mtx",
787 769 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
788 770 cv_init(&instance->int_cmd_cv, NULL, CV_DRIVER, NULL);
789 771
790 772 mutex_init(&instance->cmd_pool_mtx, "cmd_pool_mtx",
791 773 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
792 774
793 775 mutex_init(&instance->reg_write_mtx, "reg_write_mtx",
794 776 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
795 777
796 778 if (instance->tbolt) {
797 779 mutex_init(&instance->cmd_app_pool_mtx,
798 780 "cmd_app_pool_mtx", MUTEX_DRIVER,
799 781 DDI_INTR_PRI(instance->intr_pri));
800 782
801 783 mutex_init(&instance->chip_mtx,
802 784 "chip_mtx", MUTEX_DRIVER,
803 785 DDI_INTR_PRI(instance->intr_pri));
804 786
805 787 }
806 788
807 789 instance->unroll.mutexs = 1;
808 790
809 791 instance->timeout_id = (timeout_id_t)-1;
810 792
811 793 /* Register our soft-isr for highlevel interrupts. */
812 794 instance->isr_level = instance->intr_pri;
813 795 if (!(instance->tbolt)) {
814 796 if (instance->isr_level == HIGH_LEVEL_INTR) {
815 797 if (ddi_add_softintr(dip,
816 798 DDI_SOFTINT_HIGH,
817 799 &instance->soft_intr_id, NULL, NULL,
818 800 mrsas_softintr, (caddr_t)instance) !=
819 801 DDI_SUCCESS) {
820 802 cmn_err(CE_WARN,
821 803 "Software ISR did not register");
822 804
823 805 goto fail_attach;
824 806 }
825 807
826 808 instance->unroll.soft_isr = 1;
827 809
828 810 }
829 811 }
830 812
831 813 instance->softint_running = 0;
832 814
833 815 /* Allocate a transport structure */
834 816 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
835 817
836 818 if (tran == NULL) {
837 819 cmn_err(CE_WARN,
838 820 "scsi_hba_tran_alloc failed");
839 821 goto fail_attach;
840 822 }
841 823
842 824 instance->tran = tran;
843 825 instance->unroll.tran = 1;
844 826
845 827 tran->tran_hba_private = instance;
846 828 tran->tran_tgt_init = mrsas_tran_tgt_init;
847 829 tran->tran_tgt_probe = scsi_hba_probe;
848 830 tran->tran_tgt_free = mrsas_tran_tgt_free;
849 831 if (instance->tbolt) {
850 832 tran->tran_init_pkt =
851 833 mrsas_tbolt_tran_init_pkt;
852 834 tran->tran_start =
853 835 mrsas_tbolt_tran_start;
854 836 } else {
855 837 tran->tran_init_pkt = mrsas_tran_init_pkt;
856 838 tran->tran_start = mrsas_tran_start;
857 839 }
858 840 tran->tran_abort = mrsas_tran_abort;
859 841 tran->tran_reset = mrsas_tran_reset;
860 842 tran->tran_getcap = mrsas_tran_getcap;
861 843 tran->tran_setcap = mrsas_tran_setcap;
862 844 tran->tran_destroy_pkt = mrsas_tran_destroy_pkt;
863 845 tran->tran_dmafree = mrsas_tran_dmafree;
864 846 tran->tran_sync_pkt = mrsas_tran_sync_pkt;
865 847 tran->tran_quiesce = mrsas_tran_quiesce;
866 848 tran->tran_unquiesce = mrsas_tran_unquiesce;
867 849 tran->tran_bus_config = mrsas_tran_bus_config;
868 850
869 851 if (mrsas_relaxed_ordering)
870 852 mrsas_generic_dma_attr.dma_attr_flags |=
871 853 DDI_DMA_RELAXED_ORDERING;
872 854
873 855
874 856 tran_dma_attr = mrsas_generic_dma_attr;
875 857 tran_dma_attr.dma_attr_sgllen = instance->max_num_sge;
876 858
877 859 /* Attach this instance of the hba */
878 860 if (scsi_hba_attach_setup(dip, &tran_dma_attr, tran, 0)
879 861 != DDI_SUCCESS) {
880 862 cmn_err(CE_WARN,
881 863 "scsi_hba_attach failed");
882 864
883 865 goto fail_attach;
884 866 }
885 867 instance->unroll.tranSetup = 1;
886 868 con_log(CL_ANN1,
887 869 (CE_CONT, "scsi_hba_attach_setup() done."));
888 870
889 871 /* create devctl node for cfgadm command */
890 872 if (ddi_create_minor_node(dip, "devctl",
891 873 S_IFCHR, INST2DEVCTL(instance_no),
892 874 DDI_NT_SCSI_NEXUS, 0) == DDI_FAILURE) {
893 875 cmn_err(CE_WARN,
894 876 "mr_sas: failed to create devctl node.");
895 877
896 878 goto fail_attach;
897 879 }
898 880
899 881 instance->unroll.devctl = 1;
900 882
901 883 /* create scsi node for cfgadm command */
902 884 if (ddi_create_minor_node(dip, "scsi", S_IFCHR,
903 885 INST2SCSI(instance_no), DDI_NT_SCSI_ATTACHMENT_POINT, 0) ==
904 886 DDI_FAILURE) {
905 887 cmn_err(CE_WARN,
906 888 "mr_sas: failed to create scsi node.");
907 889
908 890 goto fail_attach;
909 891 }
910 892
911 893 instance->unroll.scsictl = 1;
912 894
913 895 (void) sprintf(instance->iocnode, "%d:lsirdctl",
914 896 instance_no);
915 897
916 898 /*
917 899 * Create a node for applications
918 900 * for issuing ioctl to the driver.
919 901 */
920 902 if (ddi_create_minor_node(dip, instance->iocnode,
921 903 S_IFCHR, INST2LSIRDCTL(instance_no), DDI_PSEUDO, 0) ==
922 904 DDI_FAILURE) {
923 905 cmn_err(CE_WARN,
924 906 "mr_sas: failed to create ioctl node.");
925 907
926 908 goto fail_attach;
927 909 }
928 910
929 911 instance->unroll.ioctl = 1;
930 912
931 913 /* Create a taskq to handle dr events */
932 914 if ((instance->taskq = ddi_taskq_create(dip,
933 915 "mrsas_dr_taskq", 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
934 916 cmn_err(CE_WARN,
935 917 "mr_sas: failed to create taskq ");
936 918 instance->taskq = NULL;
937 919 goto fail_attach;
938 920 }
939 921 instance->unroll.taskq = 1;
940 922 con_log(CL_ANN1, (CE_CONT, "ddi_taskq_create() done."));
941 923
942 924 /* enable interrupt */
943 925 instance->func_ptr->enable_intr(instance);
944 926
945 927 /* initiate AEN */
946 928 if (start_mfi_aen(instance)) {
947 929 cmn_err(CE_WARN,
948 930 "mr_sas: failed to initiate AEN.");
949 931 goto fail_attach;
950 932 }
951 933 instance->unroll.aenPend = 1;
952 934 con_log(CL_ANN1,
953 935 (CE_CONT, "AEN started for instance %d.", instance_no));
954 936
955 937 /* Finally! We are on the air. */
956 938 ddi_report_dev(dip);
957 939
958 940 /* FMA handle checking. */
959 941 if (mrsas_check_acc_handle(instance->regmap_handle) !=
960 942 DDI_SUCCESS) {
961 943 goto fail_attach;
962 944 }
963 945 if (mrsas_check_acc_handle(instance->pci_handle) !=
964 946 DDI_SUCCESS) {
965 947 goto fail_attach;
966 948 }
967 949
968 950 instance->mr_ld_list =
969 951 kmem_zalloc(MRDRV_MAX_LD * sizeof (struct mrsas_ld),
970 952 KM_SLEEP);
971 953 if (instance->mr_ld_list == NULL) {
972 954 cmn_err(CE_WARN, "mr_sas attach(): "
973 955 "failed to allocate ld_list array");
974 956 goto fail_attach;
975 957 }
976 958 instance->unroll.ldlist_buff = 1;
977 959
978 960 #ifdef PDSUPPORT
979 961 if (instance->tbolt) {
980 962 instance->mr_tbolt_pd_max = MRSAS_TBOLT_PD_TGT_MAX;
981 963 instance->mr_tbolt_pd_list =
982 964 kmem_zalloc(MRSAS_TBOLT_GET_PD_MAX(instance) *
983 965 sizeof (struct mrsas_tbolt_pd), KM_SLEEP);
984 966 ASSERT(instance->mr_tbolt_pd_list);
985 967 for (i = 0; i < instance->mr_tbolt_pd_max; i++) {
986 968 instance->mr_tbolt_pd_list[i].lun_type =
987 969 MRSAS_TBOLT_PD_LUN;
988 970 instance->mr_tbolt_pd_list[i].dev_id =
989 971 (uint8_t)i;
990 972 }
991 973
992 974 instance->unroll.pdlist_buff = 1;
993 975 }
994 976 #endif
995 977 break;
996 978 case DDI_PM_RESUME:
997 979 con_log(CL_ANN, (CE_NOTE, "mr_sas: DDI_PM_RESUME"));
998 980 break;
999 981 case DDI_RESUME:
1000 982 con_log(CL_ANN, (CE_NOTE, "mr_sas: DDI_RESUME"));
1001 983 break;
1002 984 default:
1003 985 con_log(CL_ANN,
1004 986 (CE_WARN, "mr_sas: invalid attach cmd=%x", cmd));
1005 987 return (DDI_FAILURE);
1006 988 }
1007 989
1008 990
1009 991 cmn_err(CE_NOTE, "mrsas_attach() return SUCCESS instance_num %d",
1010 992 instance_no);
1011 993 return (DDI_SUCCESS);
1012 994
1013 995 fail_attach:
1014 996
1015 997 mrsas_undo_resources(dip, instance);
1016 998
1017 999 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
1018 1000 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
1019 1001
1020 1002 mrsas_fm_fini(instance);
1021 1003
1022 1004 pci_config_teardown(&instance->pci_handle);
1023 1005 ddi_soft_state_free(mrsas_state, instance_no);
1024 1006
1025 1007 con_log(CL_ANN, (CE_WARN, "mr_sas: return failure from mrsas_attach"));
1026 1008
1027 1009 cmn_err(CE_WARN, "mrsas_attach() return FAILURE instance_num %d",
1028 1010 instance_no);
1029 1011
1030 1012 return (DDI_FAILURE);
1031 1013 }
1032 1014
1033 1015 /*
1034 1016 * getinfo - gets device information
1035 1017 * @dip:
1036 1018 * @cmd:
1037 1019 * @arg:
1038 1020 * @resultp:
1039 1021 *
1040 1022 * The system calls getinfo() to obtain configuration information that only
1041 1023 * the driver knows. The mapping of minor numbers to device instance is
1042 1024 * entirely under the control of the driver. The system sometimes needs to ask
1043 1025 * the driver which device a particular dev_t represents.
1044 1026 * Given the device number return the devinfo pointer from the scsi_device
1045 1027 * structure.
1046 1028 */
1047 1029 /*ARGSUSED*/
1048 1030 static int
1049 1031 mrsas_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
1050 1032 {
1051 1033 int rval;
1052 1034 int mrsas_minor = getminor((dev_t)arg);
1053 1035
1054 1036 struct mrsas_instance *instance;
1055 1037
1056 1038 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1057 1039
1058 1040 switch (cmd) {
1059 1041 case DDI_INFO_DEVT2DEVINFO:
1060 1042 instance = (struct mrsas_instance *)
1061 1043 ddi_get_soft_state(mrsas_state,
1062 1044 MINOR2INST(mrsas_minor));
1063 1045
1064 1046 if (instance == NULL) {
1065 1047 *resultp = NULL;
1066 1048 rval = DDI_FAILURE;
1067 1049 } else {
1068 1050 *resultp = instance->dip;
1069 1051 rval = DDI_SUCCESS;
1070 1052 }
1071 1053 break;
1072 1054 case DDI_INFO_DEVT2INSTANCE:
1073 1055 *resultp = (void *)(intptr_t)
1074 1056 (MINOR2INST(getminor((dev_t)arg)));
1075 1057 rval = DDI_SUCCESS;
1076 1058 break;
1077 1059 default:
1078 1060 *resultp = NULL;
1079 1061 rval = DDI_FAILURE;
1080 1062 }
1081 1063
1082 1064 return (rval);
1083 1065 }
1084 1066
1085 1067 /*
1086 1068 * detach - detaches a device from the system
1087 1069 * @dip: pointer to the device's dev_info structure
1088 1070 * @cmd: type of detach
1089 1071 *
1090 1072 * A driver's detach() entry point is called to detach an instance of a device
1091 1073 * that is bound to the driver. The entry point is called with the instance of
1092 1074 * the device node to be detached and with DDI_DETACH, which is specified as
1093 1075 * the cmd argument to the entry point.
1094 1076 * This routine is called during driver unload. We free all the allocated
1095 1077 * resources and call the corresponding LLD so that it can also release all
1096 1078 * its resources.
1097 1079 */
1098 1080 static int
1099 1081 mrsas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1100 1082 {
1101 1083 int instance_no;
1102 1084
1103 1085 struct mrsas_instance *instance;
1104 1086
1105 1087 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1106 1088
1107 1089
1108 1090 /* CONSTCOND */
1109 1091 ASSERT(NO_COMPETING_THREADS);
1110 1092
1111 1093 instance_no = ddi_get_instance(dip);
1112 1094
1113 1095 instance = (struct mrsas_instance *)ddi_get_soft_state(mrsas_state,
1114 1096 instance_no);
1115 1097
1116 1098 if (!instance) {
1117 1099 cmn_err(CE_WARN,
1118 1100 "mr_sas:%d could not get instance in detach",
1119 1101 instance_no);
1120 1102
1121 1103 return (DDI_FAILURE);
1122 1104 }
1123 1105
1124 1106 con_log(CL_ANN, (CE_NOTE,
1125 1107 "mr_sas%d: detaching device 0x%4x:0x%4x:0x%4x:0x%4x",
1126 1108 instance_no, instance->vendor_id, instance->device_id,
1127 1109 instance->subsysvid, instance->subsysid));
1128 1110
1129 1111 switch (cmd) {
1130 1112 case DDI_DETACH:
1131 1113 con_log(CL_ANN, (CE_NOTE,
1132 1114 "mrsas_detach: DDI_DETACH"));
1133 1115
1134 1116 mutex_enter(&instance->config_dev_mtx);
1135 1117 if (instance->timeout_id != (timeout_id_t)-1) {
1136 1118 mutex_exit(&instance->config_dev_mtx);
1137 1119 (void) untimeout(instance->timeout_id);
1138 1120 instance->timeout_id = (timeout_id_t)-1;
1139 1121 mutex_enter(&instance->config_dev_mtx);
1140 1122 instance->unroll.timer = 0;
1141 1123 }
1142 1124 mutex_exit(&instance->config_dev_mtx);
1143 1125
1144 1126 if (instance->unroll.tranSetup == 1) {
1145 1127 if (scsi_hba_detach(dip) != DDI_SUCCESS) {
1146 1128 cmn_err(CE_WARN,
1147 1129 "mr_sas2%d: failed to detach",
1148 1130 instance_no);
1149 1131 return (DDI_FAILURE);
1150 1132 }
1151 1133 instance->unroll.tranSetup = 0;
1152 1134 con_log(CL_ANN1,
1153 1135 (CE_CONT, "scsi_hba_dettach() done."));
1154 1136 }
1155 1137
1156 1138 flush_cache(instance);
1157 1139
1158 1140 mrsas_undo_resources(dip, instance);
1159 1141
1160 1142 mrsas_fm_fini(instance);
1161 1143
1162 1144 pci_config_teardown(&instance->pci_handle);
1163 1145 ddi_soft_state_free(mrsas_state, instance_no);
1164 1146 break;
1165 1147
1166 1148 case DDI_PM_SUSPEND:
1167 1149 con_log(CL_ANN, (CE_NOTE,
1168 1150 "mrsas_detach: DDI_PM_SUSPEND"));
1169 1151
1170 1152 break;
1171 1153 case DDI_SUSPEND:
1172 1154 con_log(CL_ANN, (CE_NOTE,
1173 1155 "mrsas_detach: DDI_SUSPEND"));
1174 1156
1175 1157 break;
1176 1158 default:
1177 1159 con_log(CL_ANN, (CE_WARN,
1178 1160 "invalid detach command:0x%x", cmd));
1179 1161 return (DDI_FAILURE);
1180 1162 }
1181 1163
1182 1164 return (DDI_SUCCESS);
1183 1165 }
1184 1166
1185 1167
1186 1168 static void
|
↓ open down ↓ |
515 lines elided |
↑ open up ↑ |
1187 1169 mrsas_undo_resources(dev_info_t *dip, struct mrsas_instance *instance)
1188 1170 {
1189 1171 int instance_no;
1190 1172
1191 1173 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1192 1174
1193 1175
1194 1176 instance_no = ddi_get_instance(dip);
1195 1177
1196 1178
1197 - if (instance->unroll.ioctl == 1) {
1179 + if (instance->unroll.ioctl == 1) {
1198 1180 ddi_remove_minor_node(dip, instance->iocnode);
1199 1181 instance->unroll.ioctl = 0;
1200 1182 }
1201 1183
1202 - if (instance->unroll.scsictl == 1) {
1184 + if (instance->unroll.scsictl == 1) {
1203 1185 ddi_remove_minor_node(dip, "scsi");
1204 1186 instance->unroll.scsictl = 0;
1205 1187 }
1206 1188
1207 - if (instance->unroll.devctl == 1) {
1189 + if (instance->unroll.devctl == 1) {
1208 1190 ddi_remove_minor_node(dip, "devctl");
1209 1191 instance->unroll.devctl = 0;
1210 1192 }
1211 1193
1212 - if (instance->unroll.tranSetup == 1) {
1194 + if (instance->unroll.tranSetup == 1) {
1213 1195 if (scsi_hba_detach(dip) != DDI_SUCCESS) {
1214 1196 cmn_err(CE_WARN,
1215 1197 "mr_sas2%d: failed to detach", instance_no);
1216 1198 return; /* DDI_FAILURE */
1217 1199 }
1218 1200 instance->unroll.tranSetup = 0;
1219 1201 con_log(CL_ANN1, (CE_CONT, "scsi_hba_dettach() done."));
1220 1202 }
1221 1203
1222 1204 if (instance->unroll.tran == 1) {
1223 1205 scsi_hba_tran_free(instance->tran);
1224 1206 instance->unroll.tran = 0;
1225 1207 con_log(CL_ANN1, (CE_CONT, "scsi_hba_tran_free() done."));
1226 1208 }
1227 1209
1228 - if (instance->unroll.syncCmd == 1) {
1210 + if (instance->unroll.syncCmd == 1) {
1229 1211 if (instance->tbolt) {
1230 1212 if (abort_syncmap_cmd(instance,
1231 1213 instance->map_update_cmd)) {
1232 1214 cmn_err(CE_WARN, "mrsas_detach: "
1233 1215 "failed to abort previous syncmap command");
1234 1216 }
1235 1217
1236 1218 instance->unroll.syncCmd = 0;
1237 1219 con_log(CL_ANN1, (CE_CONT, "sync cmd aborted, done."));
1238 1220 }
1239 1221 }
1240 1222
1241 - if (instance->unroll.aenPend == 1) {
1223 + if (instance->unroll.aenPend == 1) {
1242 1224 if (abort_aen_cmd(instance, instance->aen_cmd))
1243 1225 cmn_err(CE_WARN, "mrsas_detach: "
1244 1226 "failed to abort prevous AEN command");
1245 1227
1246 1228 instance->unroll.aenPend = 0;
1247 1229 con_log(CL_ANN1, (CE_CONT, "aen cmd aborted, done."));
1248 - /* This means the controller is fully initialzed and running */
1230 + /* This means the controller is fully initialized and running */
1249 1231 /* Shutdown should be a last command to controller. */
1250 1232 /* shutdown_controller(); */
1251 1233 }
1252 1234
1253 1235
1254 1236 if (instance->unroll.timer == 1) {
1255 - if (instance->timeout_id != (timeout_id_t)-1) {
1237 + if (instance->timeout_id != (timeout_id_t)-1) {
1256 1238 (void) untimeout(instance->timeout_id);
1257 1239 instance->timeout_id = (timeout_id_t)-1;
1258 1240
1259 1241 instance->unroll.timer = 0;
1260 1242 }
1261 1243 }
1262 1244
1263 1245 instance->func_ptr->disable_intr(instance);
1264 1246
1265 1247
1266 - if (instance->unroll.mutexs == 1) {
1248 + if (instance->unroll.mutexs == 1) {
1267 1249 mutex_destroy(&instance->cmd_pool_mtx);
1268 1250 mutex_destroy(&instance->app_cmd_pool_mtx);
1269 1251 mutex_destroy(&instance->cmd_pend_mtx);
1270 1252 mutex_destroy(&instance->completed_pool_mtx);
1271 1253 mutex_destroy(&instance->sync_map_mtx);
1272 1254 mutex_destroy(&instance->int_cmd_mtx);
1273 1255 cv_destroy(&instance->int_cmd_cv);
1274 1256 mutex_destroy(&instance->config_dev_mtx);
1275 1257 mutex_destroy(&instance->ocr_flags_mtx);
1276 1258 mutex_destroy(&instance->reg_write_mtx);
1277 1259
1278 1260 if (instance->tbolt) {
1279 1261 mutex_destroy(&instance->cmd_app_pool_mtx);
1280 1262 mutex_destroy(&instance->chip_mtx);
1281 1263 }
1282 1264
1283 1265 instance->unroll.mutexs = 0;
1284 1266 con_log(CL_ANN1, (CE_CONT, "Destroy mutex & cv, done."));
1285 1267 }
1286 1268
1287 1269
1288 1270 if (instance->unroll.soft_isr == 1) {
1289 1271 ddi_remove_softintr(instance->soft_intr_id);
1290 1272 instance->unroll.soft_isr = 0;
1291 1273 }
1292 1274
1293 1275 if (instance->unroll.intr == 1) {
1294 1276 mrsas_rem_intrs(instance);
1295 1277 instance->unroll.intr = 0;
1296 1278 }
1297 1279
1298 1280
1299 1281 if (instance->unroll.taskq == 1) {
1300 1282 if (instance->taskq) {
1301 1283 ddi_taskq_destroy(instance->taskq);
1302 1284 instance->unroll.taskq = 0;
1303 1285 }
1304 1286
1305 1287 }
1306 1288
1307 1289 /*
1308 1290 * free dma memory allocated for
1309 1291 * cmds/frames/queues/driver version etc
1310 1292 */
1311 1293 if (instance->unroll.verBuff == 1) {
1312 1294 (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1313 1295 instance->unroll.verBuff = 0;
1314 1296 }
1315 1297
1316 1298 if (instance->unroll.pdlist_buff == 1) {
1317 1299 if (instance->mr_tbolt_pd_list != NULL) {
1318 1300 kmem_free(instance->mr_tbolt_pd_list,
1319 1301 MRSAS_TBOLT_GET_PD_MAX(instance) *
1320 1302 sizeof (struct mrsas_tbolt_pd));
1321 1303 }
1322 1304
1323 1305 instance->mr_tbolt_pd_list = NULL;
1324 1306 instance->unroll.pdlist_buff = 0;
1325 1307 }
1326 1308
1327 1309 if (instance->unroll.ldlist_buff == 1) {
1328 1310 if (instance->mr_ld_list != NULL) {
1329 1311 kmem_free(instance->mr_ld_list, MRDRV_MAX_LD
1330 1312 * sizeof (struct mrsas_ld));
1331 1313 }
1332 1314
1333 1315 instance->mr_ld_list = NULL;
1334 1316 instance->unroll.ldlist_buff = 0;
1335 1317 }
1336 1318
1337 1319 if (instance->tbolt) {
1338 1320 if (instance->unroll.alloc_space_mpi2 == 1) {
1339 1321 free_space_for_mpi2(instance);
1340 1322 instance->unroll.alloc_space_mpi2 = 0;
1341 1323 }
1342 1324 } else {
1343 1325 if (instance->unroll.alloc_space_mfi == 1) {
1344 1326 free_space_for_mfi(instance);
1345 1327 instance->unroll.alloc_space_mfi = 0;
1346 1328 }
1347 1329 }
1348 1330
1349 1331 if (instance->unroll.regs == 1) {
1350 1332 ddi_regs_map_free(&instance->regmap_handle);
1351 1333 instance->unroll.regs = 0;
1352 1334 con_log(CL_ANN1, (CE_CONT, "ddi_regs_map_free() done."));
1353 1335 }
1354 1336 }
1355 1337
1356 1338
1357 1339
1358 1340 /*
1359 1341 * ************************************************************************** *
1360 1342 * *
1361 1343 * common entry points - for character driver types *
1362 1344 * *
1363 1345 * ************************************************************************** *
1364 1346 */
1365 1347 /*
1366 1348 * open - gets access to a device
1367 1349 * @dev:
1368 1350 * @openflags:
1369 1351 * @otyp:
1370 1352 * @credp:
1371 1353 *
1372 1354 * Access to a device by one or more application programs is controlled
1373 1355 * through the open() and close() entry points. The primary function of
1374 1356 * open() is to verify that the open request is allowed.
1375 1357 */
1376 1358 static int
1377 1359 mrsas_open(dev_t *dev, int openflags, int otyp, cred_t *credp)
1378 1360 {
1379 1361 int rval = 0;
1380 1362
1381 1363 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1382 1364
1383 1365 /* Check root permissions */
1384 1366 if (drv_priv(credp) != 0) {
1385 1367 con_log(CL_ANN, (CE_WARN,
1386 1368 "mr_sas: Non-root ioctl access denied!"));
1387 1369 return (EPERM);
1388 1370 }
1389 1371
1390 1372 /* Verify we are being opened as a character device */
1391 1373 if (otyp != OTYP_CHR) {
1392 1374 con_log(CL_ANN, (CE_WARN,
1393 1375 "mr_sas: ioctl node must be a char node"));
1394 1376 return (EINVAL);
1395 1377 }
1396 1378
1397 1379 if (ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(*dev)))
1398 1380 == NULL) {
1399 1381 return (ENXIO);
1400 1382 }
1401 1383
1402 1384 if (scsi_hba_open) {
1403 1385 rval = scsi_hba_open(dev, openflags, otyp, credp);
1404 1386 }
1405 1387
1406 1388 return (rval);
1407 1389 }
1408 1390
1409 1391 /*
1410 1392 * close - gives up access to a device
1411 1393 * @dev:
1412 1394 * @openflags:
1413 1395 * @otyp:
1414 1396 * @credp:
1415 1397 *
1416 1398 * close() should perform any cleanup necessary to finish using the minor
1417 1399 * device, and prepare the device (and driver) to be opened again.
1418 1400 */
1419 1401 static int
1420 1402 mrsas_close(dev_t dev, int openflags, int otyp, cred_t *credp)
1421 1403 {
1422 1404 int rval = 0;
1423 1405
1424 1406 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1425 1407
1426 1408 /* no need for locks! */
1427 1409
1428 1410 if (scsi_hba_close) {
1429 1411 rval = scsi_hba_close(dev, openflags, otyp, credp);
1430 1412 }
1431 1413
1432 1414 return (rval);
1433 1415 }
1434 1416
1435 1417 /*
1436 1418 * ioctl - performs a range of I/O commands for character drivers
1437 1419 * @dev:
1438 1420 * @cmd:
1439 1421 * @arg:
1440 1422 * @mode:
1441 1423 * @credp:
1442 1424 * @rvalp:
1443 1425 *
1444 1426 * ioctl() routine must make sure that user data is copied into or out of the
1445 1427 * kernel address space explicitly using copyin(), copyout(), ddi_copyin(),
1446 1428 * and ddi_copyout(), as appropriate.
1447 1429 * This is a wrapper routine to serialize access to the actual ioctl routine.
1448 1430 * ioctl() should return 0 on success, or the appropriate error number. The
1449 1431 * driver may also set the value returned to the calling process through rvalp.
1450 1432 */
1451 1433
1452 1434 static int
1453 1435 mrsas_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
1454 1436 int *rvalp)
1455 1437 {
1456 1438 int rval = 0;
1457 1439
1458 1440 struct mrsas_instance *instance;
1459 1441 struct mrsas_ioctl *ioctl;
1460 1442 struct mrsas_aen aen;
1461 1443 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1462 1444
1463 1445 instance = ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(dev)));
1464 1446
1465 1447 if (instance == NULL) {
1466 1448 /* invalid minor number */
|
↓ open down ↓ |
190 lines elided |
↑ open up ↑ |
1467 1449 con_log(CL_ANN, (CE_WARN, "mr_sas: adapter not found."));
1468 1450 return (ENXIO);
1469 1451 }
1470 1452
1471 1453 ioctl = (struct mrsas_ioctl *)kmem_zalloc(sizeof (struct mrsas_ioctl),
1472 1454 KM_SLEEP);
1473 1455 if (ioctl == NULL) {
1474 1456 /* Failed to allocate memory for ioctl */
1475 1457 con_log(CL_ANN, (CE_WARN, "mr_sas_ioctl: "
1476 1458 "failed to allocate memory for ioctl"));
1477 - return (ENXIO);
1459 + return (ENOMEM);
1478 1460 }
1479 1461
1480 1462 switch ((uint_t)cmd) {
1481 1463 case MRSAS_IOCTL_FIRMWARE:
1482 1464 if (ddi_copyin((void *)arg, ioctl,
1483 1465 sizeof (struct mrsas_ioctl), mode)) {
1484 1466 con_log(CL_ANN, (CE_WARN, "mrsas_ioctl: "
1485 1467 "ERROR IOCTL copyin"));
1486 1468 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1487 1469 return (EFAULT);
1488 1470 }
1489 1471
1490 1472 if (ioctl->control_code == MRSAS_DRIVER_IOCTL_COMMON) {
1491 1473 rval = handle_drv_ioctl(instance, ioctl, mode);
1492 1474 } else {
1493 1475 rval = handle_mfi_ioctl(instance, ioctl, mode);
1494 1476 }
1495 1477
1496 1478 if (ddi_copyout((void *)ioctl, (void *)arg,
1497 1479 (sizeof (struct mrsas_ioctl) - 1), mode)) {
1498 1480 con_log(CL_ANN, (CE_WARN,
1499 1481 "mrsas_ioctl: copy_to_user failed"));
1500 1482 rval = 1;
1501 1483 }
1502 1484
1503 1485 break;
1504 1486 case MRSAS_IOCTL_AEN:
1505 1487 con_log(CL_ANN,
1506 1488 (CE_NOTE, "mrsas_ioctl: IOCTL Register AEN.\n"));
1507 1489
1508 1490 if (ddi_copyin((void *) arg, &aen,
1509 1491 sizeof (struct mrsas_aen), mode)) {
1510 1492 con_log(CL_ANN, (CE_WARN,
1511 1493 "mrsas_ioctl: ERROR AEN copyin"));
1512 1494 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1513 1495 return (EFAULT);
1514 1496 }
1515 1497
1516 1498 rval = handle_mfi_aen(instance, &aen);
1517 1499
1518 1500 if (ddi_copyout((void *) &aen, (void *)arg,
1519 1501 sizeof (struct mrsas_aen), mode)) {
1520 1502 con_log(CL_ANN, (CE_WARN,
1521 1503 "mrsas_ioctl: copy_to_user failed"));
1522 1504 rval = 1;
1523 1505 }
1524 1506
1525 1507 break;
1526 1508 default:
1527 1509 rval = scsi_hba_ioctl(dev, cmd, arg,
1528 1510 mode, credp, rvalp);
1529 1511
1530 1512 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_ioctl: "
1531 1513 "scsi_hba_ioctl called, ret = %x.", rval));
1532 1514 }
1533 1515
1534 1516 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1535 1517 return (rval);
1536 1518 }
1537 1519
1538 1520 /*
1539 1521 * ************************************************************************** *
1540 1522 * *
1541 1523 * common entry points - for block driver types *
1542 1524 * *
1543 1525 * ************************************************************************** *
1544 1526 */
1545 1527 #ifdef __sparc
1546 1528 /*
1547 1529 * reset - TBD
1548 1530 * @dip:
1549 1531 * @cmd:
1550 1532 *
1551 1533 * TBD
1552 1534 */
1553 1535 /*ARGSUSED*/
1554 1536 static int
1555 1537 mrsas_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
1556 1538 {
1557 1539 int instance_no;
1558 1540
1559 1541 struct mrsas_instance *instance;
1560 1542
1561 1543 instance_no = ddi_get_instance(dip);
1562 1544 instance = (struct mrsas_instance *)ddi_get_soft_state
1563 1545 (mrsas_state, instance_no);
1564 1546
1565 1547 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1566 1548
1567 1549 if (!instance) {
1568 1550 con_log(CL_ANN, (CE_WARN, "mr_sas:%d could not get adapter "
1569 1551 "in reset", instance_no));
1570 1552 return (DDI_FAILURE);
1571 1553 }
1572 1554
1573 1555 instance->func_ptr->disable_intr(instance);
1574 1556
1575 1557 con_log(CL_ANN1, (CE_CONT, "flushing cache for instance %d",
1576 1558 instance_no));
1577 1559
1578 1560 flush_cache(instance);
1579 1561
1580 1562 return (DDI_SUCCESS);
1581 1563 }
1582 1564 #else /* __sparc */
1583 1565 /*ARGSUSED*/
1584 1566 static int
1585 1567 mrsas_quiesce(dev_info_t *dip)
1586 1568 {
1587 1569 int instance_no;
1588 1570
1589 1571 struct mrsas_instance *instance;
1590 1572
1591 1573 instance_no = ddi_get_instance(dip);
1592 1574 instance = (struct mrsas_instance *)ddi_get_soft_state
1593 1575 (mrsas_state, instance_no);
1594 1576
1595 1577 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1596 1578
1597 1579 if (!instance) {
1598 1580 con_log(CL_ANN1, (CE_WARN, "mr_sas:%d could not get adapter "
1599 1581 "in quiesce", instance_no));
1600 1582 return (DDI_FAILURE);
1601 1583 }
1602 1584 if (instance->deadadapter || instance->adapterresetinprogress) {
1603 1585 con_log(CL_ANN1, (CE_WARN, "mr_sas:%d adapter is not in "
1604 1586 "healthy state", instance_no));
1605 1587 return (DDI_FAILURE);
1606 1588 }
1607 1589
1608 1590 if (abort_aen_cmd(instance, instance->aen_cmd)) {
1609 1591 con_log(CL_ANN1, (CE_WARN, "mrsas_quiesce: "
1610 1592 "failed to abort prevous AEN command QUIESCE"));
1611 1593 }
1612 1594
1613 1595 if (instance->tbolt) {
1614 1596 if (abort_syncmap_cmd(instance,
1615 1597 instance->map_update_cmd)) {
1616 1598 cmn_err(CE_WARN,
1617 1599 "mrsas_detach: failed to abort "
1618 1600 "previous syncmap command");
1619 1601 return (DDI_FAILURE);
1620 1602 }
1621 1603 }
1622 1604
1623 1605 instance->func_ptr->disable_intr(instance);
1624 1606
1625 1607 con_log(CL_ANN1, (CE_CONT, "flushing cache for instance %d",
1626 1608 instance_no));
1627 1609
1628 1610 flush_cache(instance);
1629 1611
1630 1612 if (wait_for_outstanding(instance)) {
1631 1613 con_log(CL_ANN1,
1632 1614 (CE_CONT, "wait_for_outstanding: return FAIL.\n"));
1633 1615 return (DDI_FAILURE);
1634 1616 }
1635 1617 return (DDI_SUCCESS);
1636 1618 }
1637 1619 #endif /* __sparc */
1638 1620
1639 1621 /*
1640 1622 * ************************************************************************** *
1641 1623 * *
1642 1624 * entry points (SCSI HBA) *
1643 1625 * *
1644 1626 * ************************************************************************** *
1645 1627 */
1646 1628 /*
1647 1629 * tran_tgt_init - initialize a target device instance
1648 1630 * @hba_dip:
1649 1631 * @tgt_dip:
1650 1632 * @tran:
1651 1633 * @sd:
1652 1634 *
1653 1635 * The tran_tgt_init() entry point enables the HBA to allocate and initialize
1654 1636 * any per-target resources. tran_tgt_init() also enables the HBA to qualify
1655 1637 * the device's address as valid and supportable for that particular HBA.
1656 1638 * By returning DDI_FAILURE, the instance of the target driver for that device
1657 1639 * is not probed or attached.
1658 1640 */
1659 1641 /*ARGSUSED*/
1660 1642 static int
1661 1643 mrsas_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1662 1644 scsi_hba_tran_t *tran, struct scsi_device *sd)
1663 1645 {
1664 1646 struct mrsas_instance *instance;
1665 1647 uint16_t tgt = sd->sd_address.a_target;
1666 1648 uint8_t lun = sd->sd_address.a_lun;
1667 1649 dev_info_t *child = NULL;
1668 1650
1669 1651 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init target %d lun %d",
1670 1652 tgt, lun));
1671 1653
1672 1654 instance = ADDR2MR(&sd->sd_address);
1673 1655
1674 1656 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
1675 1657 /*
1676 1658 * If no persistent node exists, we don't allow .conf node
1677 1659 * to be created.
1678 1660 */
1679 1661 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
1680 1662 con_log(CL_DLEVEL2,
1681 1663 (CE_NOTE, "mrsas_tgt_init find child ="
1682 1664 " %p t = %d l = %d", (void *)child, tgt, lun));
1683 1665 if (ndi_merge_node(tgt_dip, mrsas_name_node) !=
1684 1666 DDI_SUCCESS)
1685 1667 /* Create this .conf node */
1686 1668 return (DDI_SUCCESS);
1687 1669 }
1688 1670 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init in ndi_per "
1689 1671 "DDI_FAILURE t = %d l = %d", tgt, lun));
1690 1672 return (DDI_FAILURE);
1691 1673
1692 1674 }
1693 1675
1694 1676 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init dev_dip %p tgt_dip %p",
1695 1677 (void *)instance->mr_ld_list[tgt].dip, (void *)tgt_dip));
1696 1678
1697 1679 if (tgt < MRDRV_MAX_LD && lun == 0) {
1698 1680 if (instance->mr_ld_list[tgt].dip == NULL &&
1699 1681 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0) {
1700 1682 mutex_enter(&instance->config_dev_mtx);
1701 1683 instance->mr_ld_list[tgt].dip = tgt_dip;
1702 1684 instance->mr_ld_list[tgt].lun_type = MRSAS_LD_LUN;
1703 1685 instance->mr_ld_list[tgt].flag = MRDRV_TGT_VALID;
1704 1686 mutex_exit(&instance->config_dev_mtx);
1705 1687 }
1706 1688 }
1707 1689
1708 1690 #ifdef PDSUPPORT
1709 1691 else if (instance->tbolt) {
1710 1692 if (instance->mr_tbolt_pd_list[tgt].dip == NULL) {
1711 1693 mutex_enter(&instance->config_dev_mtx);
1712 1694 instance->mr_tbolt_pd_list[tgt].dip = tgt_dip;
1713 1695 instance->mr_tbolt_pd_list[tgt].flag =
1714 1696 MRDRV_TGT_VALID;
1715 1697 mutex_exit(&instance->config_dev_mtx);
1716 1698 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_tgt_init:"
1717 1699 "t%xl%x", tgt, lun));
1718 1700 }
1719 1701 }
1720 1702 #endif
1721 1703
1722 1704 return (DDI_SUCCESS);
1723 1705 }
1724 1706
1725 1707 /*ARGSUSED*/
1726 1708 static void
1727 1709 mrsas_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1728 1710 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1729 1711 {
1730 1712 struct mrsas_instance *instance;
1731 1713 int tgt = sd->sd_address.a_target;
1732 1714 int lun = sd->sd_address.a_lun;
1733 1715
1734 1716 instance = ADDR2MR(&sd->sd_address);
1735 1717
1736 1718 con_log(CL_DLEVEL2, (CE_NOTE, "tgt_free t = %d l = %d", tgt, lun));
1737 1719
1738 1720 if (tgt < MRDRV_MAX_LD && lun == 0) {
1739 1721 if (instance->mr_ld_list[tgt].dip == tgt_dip) {
1740 1722 mutex_enter(&instance->config_dev_mtx);
1741 1723 instance->mr_ld_list[tgt].dip = NULL;
1742 1724 mutex_exit(&instance->config_dev_mtx);
1743 1725 }
1744 1726 }
1745 1727
1746 1728 #ifdef PDSUPPORT
1747 1729 else if (instance->tbolt) {
1748 1730 mutex_enter(&instance->config_dev_mtx);
1749 1731 instance->mr_tbolt_pd_list[tgt].dip = NULL;
1750 1732 mutex_exit(&instance->config_dev_mtx);
1751 1733 con_log(CL_ANN1, (CE_NOTE, "tgt_free: Setting dip = NULL"
1752 1734 "for tgt:%x", tgt));
1753 1735 }
1754 1736 #endif
1755 1737
1756 1738 }
1757 1739
1758 1740 dev_info_t *
1759 1741 mrsas_find_child(struct mrsas_instance *instance, uint16_t tgt, uint8_t lun)
1760 1742 {
1761 1743 dev_info_t *child = NULL;
1762 1744 char addr[SCSI_MAXNAMELEN];
1763 1745 char tmp[MAXNAMELEN];
1764 1746
1765 1747 (void) sprintf(addr, "%x,%x", tgt, lun);
1766 1748 for (child = ddi_get_child(instance->dip); child;
1767 1749 child = ddi_get_next_sibling(child)) {
1768 1750
1769 1751 /* XXX KEBE ASKS - why was this added?! */
1770 1752 if (ndi_dev_is_persistent_node(child) == 0) {
1771 1753 continue;
1772 1754 }
1773 1755
1774 1756 if (mrsas_name_node(child, tmp, MAXNAMELEN) !=
1775 1757 DDI_SUCCESS) {
1776 1758 continue;
1777 1759 }
1778 1760
1779 1761 if (strcmp(addr, tmp) == 0) {
1780 1762 break;
1781 1763 }
1782 1764 }
1783 1765 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_find_child: return child = %p",
1784 1766 (void *)child));
1785 1767 return (child);
1786 1768 }
1787 1769
1788 1770 /*
1789 1771 * mrsas_name_node -
1790 1772 * @dip:
1791 1773 * @name:
1792 1774 * @len:
1793 1775 */
1794 1776 static int
1795 1777 mrsas_name_node(dev_info_t *dip, char *name, int len)
1796 1778 {
1797 1779 int tgt, lun;
1798 1780
1799 1781 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1800 1782 DDI_PROP_DONTPASS, "target", -1);
1801 1783 con_log(CL_DLEVEL2, (CE_NOTE,
1802 1784 "mrsas_name_node: dip %p tgt %d", (void *)dip, tgt));
1803 1785 if (tgt == -1) {
1804 1786 return (DDI_FAILURE);
1805 1787 }
1806 1788 lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1807 1789 "lun", -1);
1808 1790 con_log(CL_DLEVEL2,
1809 1791 (CE_NOTE, "mrsas_name_node: tgt %d lun %d", tgt, lun));
1810 1792 if (lun == -1) {
1811 1793 return (DDI_FAILURE);
1812 1794 }
1813 1795 (void) snprintf(name, len, "%x,%x", tgt, lun);
1814 1796 return (DDI_SUCCESS);
1815 1797 }
1816 1798
1817 1799 /*
1818 1800 * tran_init_pkt - allocate & initialize a scsi_pkt structure
1819 1801 * @ap:
1820 1802 * @pkt:
1821 1803 * @bp:
1822 1804 * @cmdlen:
1823 1805 * @statuslen:
1824 1806 * @tgtlen:
1825 1807 * @flags:
1826 1808 * @callback:
1827 1809 *
1828 1810 * The tran_init_pkt() entry point allocates and initializes a scsi_pkt
1829 1811 * structure and DMA resources for a target driver request. The
1830 1812 * tran_init_pkt() entry point is called when the target driver calls the
1831 1813 * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point
1832 1814 * is a request to perform one or more of three possible services:
1833 1815 * - allocation and initialization of a scsi_pkt structure
1834 1816 * - allocation of DMA resources for data transfer
1835 1817 * - reallocation of DMA resources for the next portion of the data transfer
1836 1818 */
1837 1819 static struct scsi_pkt *
1838 1820 mrsas_tran_init_pkt(struct scsi_address *ap, register struct scsi_pkt *pkt,
1839 1821 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1840 1822 int flags, int (*callback)(), caddr_t arg)
1841 1823 {
1842 1824 struct scsa_cmd *acmd;
1843 1825 struct mrsas_instance *instance;
1844 1826 struct scsi_pkt *new_pkt;
1845 1827
1846 1828 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1847 1829
1848 1830 instance = ADDR2MR(ap);
1849 1831
1850 1832 /* step #1 : pkt allocation */
1851 1833 if (pkt == NULL) {
1852 1834 pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen,
1853 1835 tgtlen, sizeof (struct scsa_cmd), callback, arg);
1854 1836 if (pkt == NULL) {
1855 1837 return (NULL);
1856 1838 }
1857 1839
1858 1840 acmd = PKT2CMD(pkt);
1859 1841
1860 1842 /*
1861 1843 * Initialize the new pkt - we redundantly initialize
1862 1844 * all the fields for illustrative purposes.
1863 1845 */
1864 1846 acmd->cmd_pkt = pkt;
1865 1847 acmd->cmd_flags = 0;
1866 1848 acmd->cmd_scblen = statuslen;
1867 1849 acmd->cmd_cdblen = cmdlen;
1868 1850 acmd->cmd_dmahandle = NULL;
1869 1851 acmd->cmd_ncookies = 0;
1870 1852 acmd->cmd_cookie = 0;
1871 1853 acmd->cmd_cookiecnt = 0;
1872 1854 acmd->cmd_nwin = 0;
1873 1855
1874 1856 pkt->pkt_address = *ap;
1875 1857 pkt->pkt_comp = (void (*)())NULL;
1876 1858 pkt->pkt_flags = 0;
1877 1859 pkt->pkt_time = 0;
1878 1860 pkt->pkt_resid = 0;
1879 1861 pkt->pkt_state = 0;
1880 1862 pkt->pkt_statistics = 0;
1881 1863 pkt->pkt_reason = 0;
1882 1864 new_pkt = pkt;
1883 1865 } else {
1884 1866 acmd = PKT2CMD(pkt);
1885 1867 new_pkt = NULL;
1886 1868 }
1887 1869
1888 1870 /* step #2 : dma allocation/move */
1889 1871 if (bp && bp->b_bcount != 0) {
1890 1872 if (acmd->cmd_dmahandle == NULL) {
1891 1873 if (mrsas_dma_alloc(instance, pkt, bp, flags,
1892 1874 callback) == DDI_FAILURE) {
1893 1875 if (new_pkt) {
1894 1876 scsi_hba_pkt_free(ap, new_pkt);
1895 1877 }
1896 1878 return ((struct scsi_pkt *)NULL);
1897 1879 }
1898 1880 } else {
1899 1881 if (mrsas_dma_move(instance, pkt, bp) == DDI_FAILURE) {
1900 1882 return ((struct scsi_pkt *)NULL);
1901 1883 }
1902 1884 }
1903 1885 }
1904 1886
1905 1887 return (pkt);
1906 1888 }
1907 1889
1908 1890 /*
1909 1891 * tran_start - transport a SCSI command to the addressed target
1910 1892 * @ap:
1911 1893 * @pkt:
1912 1894 *
1913 1895 * The tran_start() entry point for a SCSI HBA driver is called to transport a
1914 1896 * SCSI command to the addressed target. The SCSI command is described
1915 1897 * entirely within the scsi_pkt structure, which the target driver allocated
1916 1898 * through the HBA driver's tran_init_pkt() entry point. If the command
1917 1899 * involves a data transfer, DMA resources must also have been allocated for
1918 1900 * the scsi_pkt structure.
1919 1901 *
1920 1902 * Return Values :
1921 1903 * TRAN_BUSY - request queue is full, no more free scbs
1922 1904 * TRAN_ACCEPT - pkt has been submitted to the instance
1923 1905 */
1924 1906 static int
1925 1907 mrsas_tran_start(struct scsi_address *ap, register struct scsi_pkt *pkt)
1926 1908 {
1927 1909 uchar_t cmd_done = 0;
1928 1910
1929 1911 struct mrsas_instance *instance = ADDR2MR(ap);
1930 1912 struct mrsas_cmd *cmd;
1931 1913
1932 1914 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1933 1915 if (instance->deadadapter == 1) {
1934 1916 con_log(CL_ANN1, (CE_WARN,
1935 1917 "mrsas_tran_start: return TRAN_FATAL_ERROR "
1936 1918 "for IO, as the HBA doesnt take any more IOs"));
1937 1919 if (pkt) {
1938 1920 pkt->pkt_reason = CMD_DEV_GONE;
1939 1921 pkt->pkt_statistics = STAT_DISCON;
1940 1922 }
1941 1923 return (TRAN_FATAL_ERROR);
1942 1924 }
1943 1925
1944 1926 if (instance->adapterresetinprogress) {
1945 1927 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_start: Reset flag set, "
1946 1928 "returning mfi_pkt and setting TRAN_BUSY\n"));
1947 1929 return (TRAN_BUSY);
1948 1930 }
1949 1931
1950 1932 con_log(CL_ANN1, (CE_CONT, "chkpnt:%s:%d:SCSI CDB[0]=0x%x time:%x",
1951 1933 __func__, __LINE__, pkt->pkt_cdbp[0], pkt->pkt_time));
1952 1934
1953 1935 pkt->pkt_reason = CMD_CMPLT;
1954 1936 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */
1955 1937
1956 1938 cmd = build_cmd(instance, ap, pkt, &cmd_done);
1957 1939
1958 1940 /*
1959 1941 * Check if the command is already completed by the mrsas_build_cmd()
1960 1942 * routine. In which case the busy_flag would be clear and scb will be
1961 1943 * NULL and appropriate reason provided in pkt_reason field
1962 1944 */
1963 1945 if (cmd_done) {
1964 1946 pkt->pkt_reason = CMD_CMPLT;
1965 1947 pkt->pkt_scbp[0] = STATUS_GOOD;
1966 1948 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1967 1949 | STATE_SENT_CMD;
1968 1950 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1969 1951 (*pkt->pkt_comp)(pkt);
1970 1952 }
1971 1953
1972 1954 return (TRAN_ACCEPT);
1973 1955 }
1974 1956
1975 1957 if (cmd == NULL) {
1976 1958 return (TRAN_BUSY);
1977 1959 }
1978 1960
1979 1961 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1980 1962 if (instance->fw_outstanding > instance->max_fw_cmds) {
1981 1963 con_log(CL_ANN, (CE_CONT, "mr_sas:Firmware busy"));
1982 1964 DTRACE_PROBE2(start_tran_err,
1983 1965 uint16_t, instance->fw_outstanding,
1984 1966 uint16_t, instance->max_fw_cmds);
1985 1967 return_mfi_pkt(instance, cmd);
1986 1968 return (TRAN_BUSY);
1987 1969 }
1988 1970
1989 1971 /* Synchronize the Cmd frame for the controller */
1990 1972 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1991 1973 DDI_DMA_SYNC_FORDEV);
1992 1974 con_log(CL_ANN, (CE_CONT, "issue_cmd_ppc: SCSI CDB[0]=0x%x"
1993 1975 "cmd->index:%x\n", pkt->pkt_cdbp[0], cmd->index));
1994 1976 instance->func_ptr->issue_cmd(cmd, instance);
1995 1977
1996 1978 } else {
1997 1979 struct mrsas_header *hdr = &cmd->frame->hdr;
1998 1980
1999 1981 /* cmd->sync_cmd = MRSAS_TRUE; */ /* KEBE asks, inherit? */
2000 1982
2001 1983 instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd);
2002 1984
2003 1985 pkt->pkt_reason = CMD_CMPLT;
2004 1986 pkt->pkt_statistics = 0;
2005 1987 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS;
2006 1988
2007 1989 switch (ddi_get8(cmd->frame_dma_obj.acc_handle,
2008 1990 &hdr->cmd_status)) {
2009 1991 case MFI_STAT_OK:
2010 1992 pkt->pkt_scbp[0] = STATUS_GOOD;
2011 1993 break;
2012 1994
2013 1995 case MFI_STAT_SCSI_DONE_WITH_ERROR:
2014 1996 con_log(CL_ANN, (CE_CONT,
2015 1997 "mrsas_tran_start: scsi done with error"));
2016 1998 pkt->pkt_reason = CMD_CMPLT;
2017 1999 pkt->pkt_statistics = 0;
2018 2000
2019 2001 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2020 2002 break;
2021 2003
2022 2004 case MFI_STAT_DEVICE_NOT_FOUND:
2023 2005 con_log(CL_ANN, (CE_CONT,
2024 2006 "mrsas_tran_start: device not found error"));
2025 2007 pkt->pkt_reason = CMD_DEV_GONE;
2026 2008 pkt->pkt_statistics = STAT_DISCON;
2027 2009 break;
2028 2010
2029 2011 default:
2030 2012 ((struct scsi_status *)pkt->pkt_scbp)->sts_busy = 1;
2031 2013 }
2032 2014
2033 2015 (void) mrsas_common_check(instance, cmd);
2034 2016 DTRACE_PROBE2(start_nointr_done, uint8_t, hdr->cmd,
2035 2017 uint8_t, hdr->cmd_status);
2036 2018 return_mfi_pkt(instance, cmd);
2037 2019
2038 2020 if (pkt->pkt_comp) {
2039 2021 (*pkt->pkt_comp)(pkt);
2040 2022 }
2041 2023
2042 2024 }
2043 2025
2044 2026 return (TRAN_ACCEPT);
2045 2027 }
2046 2028
2047 2029 /*
2048 2030 * tran_abort - Abort any commands that are currently in transport
2049 2031 * @ap:
2050 2032 * @pkt:
2051 2033 *
2052 2034 * The tran_abort() entry point for a SCSI HBA driver is called to abort any
2053 2035 * commands that are currently in transport for a particular target. This entry
2054 2036 * point is called when a target driver calls scsi_abort(). The tran_abort()
2055 2037 * entry point should attempt to abort the command denoted by the pkt
2056 2038 * parameter. If the pkt parameter is NULL, tran_abort() should attempt to
2057 2039 * abort all outstanding commands in the transport layer for the particular
2058 2040 * target or logical unit.
2059 2041 */
2060 2042 /*ARGSUSED*/
2061 2043 static int
2062 2044 mrsas_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
2063 2045 {
2064 2046 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2065 2047
2066 2048 /* abort command not supported by H/W */
2067 2049
2068 2050 return (DDI_FAILURE);
2069 2051 }
2070 2052
2071 2053 /*
2072 2054 * tran_reset - reset either the SCSI bus or target
2073 2055 * @ap:
2074 2056 * @level:
2075 2057 *
2076 2058 * The tran_reset() entry point for a SCSI HBA driver is called to reset either
2077 2059 * the SCSI bus or a particular SCSI target device. This entry point is called
2078 2060 * when a target driver calls scsi_reset(). The tran_reset() entry point must
2079 2061 * reset the SCSI bus if level is RESET_ALL. If level is RESET_TARGET, just the
2080 2062 * particular target or logical unit must be reset.
2081 2063 */
2082 2064 /*ARGSUSED*/
2083 2065 static int
2084 2066 mrsas_tran_reset(struct scsi_address *ap, int level)
2085 2067 {
2086 2068 struct mrsas_instance *instance = ADDR2MR(ap);
2087 2069
2088 2070 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2089 2071
2090 2072 if (wait_for_outstanding(instance)) {
2091 2073 con_log(CL_ANN1,
2092 2074 (CE_CONT, "wait_for_outstanding: return FAIL.\n"));
2093 2075 return (DDI_FAILURE);
2094 2076 } else {
2095 2077 return (DDI_SUCCESS);
2096 2078 }
2097 2079 }
2098 2080
2099 2081 #if 0
2100 2082 /*
2101 2083 * tran_bus_reset - reset the SCSI bus
2102 2084 * @dip:
2103 2085 * @level:
2104 2086 *
2105 2087 * The tran_bus_reset() vector in the scsi_hba_tran structure should be
2106 2088 * initialized during the HBA driver's attach(). The vector should point to
2107 2089 * an HBA entry point that is to be called when a user initiates a bus reset.
2108 2090 * Implementation is hardware specific. If the HBA driver cannot reset the
2109 2091 * SCSI bus without affecting the targets, the driver should fail RESET_BUS
2110 2092 * or not initialize this vector.
2111 2093 */
2112 2094 /*ARGSUSED*/
2113 2095 static int
2114 2096 mrsas_tran_bus_reset(dev_info_t *dip, int level)
2115 2097 {
2116 2098 int instance_no = ddi_get_instance(dip);
2117 2099
2118 2100 struct mrsas_instance *instance = ddi_get_soft_state(mrsas_state,
2119 2101 instance_no);
2120 2102
2121 2103 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2122 2104
2123 2105 if (wait_for_outstanding(instance)) {
2124 2106 con_log(CL_ANN1,
2125 2107 (CE_CONT, "wait_for_outstanding: return FAIL.\n"));
2126 2108 return (DDI_FAILURE);
2127 2109 } else {
2128 2110 return (DDI_SUCCESS);
2129 2111 }
2130 2112 }
2131 2113 #endif
2132 2114
2133 2115 /*
2134 2116 * tran_getcap - get one of a set of SCSA-defined capabilities
2135 2117 * @ap:
2136 2118 * @cap:
2137 2119 * @whom:
2138 2120 *
2139 2121 * The target driver can request the current setting of the capability for a
2140 2122 * particular target by setting the whom parameter to nonzero. A whom value of
2141 2123 * zero indicates a request for the current setting of the general capability
2142 2124 * for the SCSI bus or for adapter hardware. The tran_getcap() should return -1
2143 2125 * for undefined capabilities or the current value of the requested capability.
2144 2126 */
2145 2127 /*ARGSUSED*/
2146 2128 static int
2147 2129 mrsas_tran_getcap(struct scsi_address *ap, char *cap, int whom)
2148 2130 {
2149 2131 int rval = 0;
2150 2132
2151 2133 struct mrsas_instance *instance = ADDR2MR(ap);
2152 2134
2153 2135 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2154 2136
2155 2137 /* we do allow inquiring about capabilities for other targets */
2156 2138 if (cap == NULL) {
2157 2139 return (-1);
2158 2140 }
2159 2141
2160 2142 switch (scsi_hba_lookup_capstr(cap)) {
2161 2143 case SCSI_CAP_DMA_MAX:
2162 2144 if (instance->tbolt) {
2163 2145 /* Limit to 256k max transfer */
2164 2146 rval = mrsas_tbolt_max_cap_maxxfer;
2165 2147 } else {
2166 2148 /* Limit to 16MB max transfer */
2167 2149 rval = mrsas_max_cap_maxxfer;
2168 2150 }
2169 2151 break;
2170 2152 case SCSI_CAP_MSG_OUT:
2171 2153 rval = 1;
2172 2154 break;
2173 2155 case SCSI_CAP_DISCONNECT:
2174 2156 rval = 0;
2175 2157 break;
2176 2158 case SCSI_CAP_SYNCHRONOUS:
2177 2159 rval = 0;
2178 2160 break;
2179 2161 case SCSI_CAP_WIDE_XFER:
2180 2162 rval = 1;
2181 2163 break;
2182 2164 case SCSI_CAP_TAGGED_QING:
2183 2165 rval = 1;
2184 2166 break;
2185 2167 case SCSI_CAP_UNTAGGED_QING:
2186 2168 rval = 1;
2187 2169 break;
2188 2170 case SCSI_CAP_PARITY:
2189 2171 rval = 1;
2190 2172 break;
2191 2173 case SCSI_CAP_INITIATOR_ID:
2192 2174 rval = instance->init_id;
2193 2175 break;
2194 2176 case SCSI_CAP_ARQ:
2195 2177 rval = 1;
2196 2178 break;
2197 2179 case SCSI_CAP_LINKED_CMDS:
2198 2180 rval = 0;
2199 2181 break;
2200 2182 case SCSI_CAP_RESET_NOTIFICATION:
2201 2183 rval = 1;
2202 2184 break;
2203 2185 case SCSI_CAP_GEOMETRY:
2204 2186 rval = -1;
2205 2187
2206 2188 break;
2207 2189 default:
2208 2190 con_log(CL_DLEVEL2, (CE_NOTE, "Default cap coming 0x%x",
2209 2191 scsi_hba_lookup_capstr(cap)));
2210 2192 rval = -1;
2211 2193 break;
2212 2194 }
2213 2195
2214 2196 return (rval);
2215 2197 }
2216 2198
2217 2199 /*
2218 2200 * tran_setcap - set one of a set of SCSA-defined capabilities
2219 2201 * @ap:
2220 2202 * @cap:
2221 2203 * @value:
2222 2204 * @whom:
2223 2205 *
2224 2206 * The target driver might request that the new value be set for a particular
2225 2207 * target by setting the whom parameter to nonzero. A whom value of zero
2226 2208 * means that request is to set the new value for the SCSI bus or for adapter
2227 2209 * hardware in general.
2228 2210 * The tran_setcap() should return the following values as appropriate:
2229 2211 * - -1 for undefined capabilities
2230 2212 * - 0 if the HBA driver cannot set the capability to the requested value
2231 2213 * - 1 if the HBA driver is able to set the capability to the requested value
2232 2214 */
2233 2215 /*ARGSUSED*/
2234 2216 static int
2235 2217 mrsas_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
2236 2218 {
2237 2219 int rval = 1;
2238 2220
2239 2221 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2240 2222
2241 2223 /* We don't allow setting capabilities for other targets */
2242 2224 if (cap == NULL || whom == 0) {
2243 2225 return (-1);
2244 2226 }
2245 2227
2246 2228 switch (scsi_hba_lookup_capstr(cap)) {
2247 2229 case SCSI_CAP_DMA_MAX:
2248 2230 case SCSI_CAP_MSG_OUT:
2249 2231 case SCSI_CAP_PARITY:
2250 2232 case SCSI_CAP_LINKED_CMDS:
2251 2233 case SCSI_CAP_RESET_NOTIFICATION:
2252 2234 case SCSI_CAP_DISCONNECT:
2253 2235 case SCSI_CAP_SYNCHRONOUS:
2254 2236 case SCSI_CAP_UNTAGGED_QING:
2255 2237 case SCSI_CAP_WIDE_XFER:
2256 2238 case SCSI_CAP_INITIATOR_ID:
2257 2239 case SCSI_CAP_ARQ:
2258 2240 /*
2259 2241 * None of these are settable via
2260 2242 * the capability interface.
2261 2243 */
2262 2244 break;
2263 2245 case SCSI_CAP_TAGGED_QING:
2264 2246 rval = 1;
2265 2247 break;
2266 2248 case SCSI_CAP_SECTOR_SIZE:
2267 2249 rval = 1;
2268 2250 break;
2269 2251
2270 2252 case SCSI_CAP_TOTAL_SECTORS:
2271 2253 rval = 1;
2272 2254 break;
2273 2255 default:
2274 2256 rval = -1;
2275 2257 break;
2276 2258 }
2277 2259
2278 2260 return (rval);
2279 2261 }
2280 2262
2281 2263 /*
2282 2264 * tran_destroy_pkt - deallocate scsi_pkt structure
2283 2265 * @ap:
2284 2266 * @pkt:
2285 2267 *
2286 2268 * The tran_destroy_pkt() entry point is the HBA driver function that
2287 2269 * deallocates scsi_pkt structures. The tran_destroy_pkt() entry point is
2288 2270 * called when the target driver calls scsi_destroy_pkt(). The
2289 2271 * tran_destroy_pkt() entry point must free any DMA resources that have been
2290 2272 * allocated for the packet. An implicit DMA synchronization occurs if the
2291 2273 * DMA resources are freed and any cached data remains after the completion
2292 2274 * of the transfer.
2293 2275 */
2294 2276 static void
2295 2277 mrsas_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2296 2278 {
2297 2279 struct scsa_cmd *acmd = PKT2CMD(pkt);
2298 2280
2299 2281 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2300 2282
2301 2283 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2302 2284 acmd->cmd_flags &= ~CFLAG_DMAVALID;
2303 2285
2304 2286 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
2305 2287
2306 2288 ddi_dma_free_handle(&acmd->cmd_dmahandle);
2307 2289
2308 2290 acmd->cmd_dmahandle = NULL;
2309 2291 }
2310 2292
2311 2293 /* free the pkt */
2312 2294 scsi_hba_pkt_free(ap, pkt);
2313 2295 }
2314 2296
2315 2297 /*
2316 2298 * tran_dmafree - deallocates DMA resources
2317 2299 * @ap:
2318 2300 * @pkt:
2319 2301 *
2320 2302 * The tran_dmafree() entry point deallocates DMAQ resources that have been
2321 2303 * allocated for a scsi_pkt structure. The tran_dmafree() entry point is
2322 2304 * called when the target driver calls scsi_dmafree(). The tran_dmafree() must
2323 2305 * free only DMA resources allocated for a scsi_pkt structure, not the
2324 2306 * scsi_pkt itself. When DMA resources are freed, a DMA synchronization is
2325 2307 * implicitly performed.
2326 2308 */
2327 2309 /*ARGSUSED*/
2328 2310 static void
2329 2311 mrsas_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
2330 2312 {
2331 2313 register struct scsa_cmd *acmd = PKT2CMD(pkt);
2332 2314
2333 2315 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2334 2316
2335 2317 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2336 2318 acmd->cmd_flags &= ~CFLAG_DMAVALID;
2337 2319
2338 2320 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
2339 2321
2340 2322 ddi_dma_free_handle(&acmd->cmd_dmahandle);
2341 2323
2342 2324 acmd->cmd_dmahandle = NULL;
2343 2325 }
2344 2326 }
2345 2327
2346 2328 /*
2347 2329 * tran_sync_pkt - synchronize the DMA object allocated
2348 2330 * @ap:
2349 2331 * @pkt:
2350 2332 *
2351 2333 * The tran_sync_pkt() entry point synchronizes the DMA object allocated for
2352 2334 * the scsi_pkt structure before or after a DMA transfer. The tran_sync_pkt()
2353 2335 * entry point is called when the target driver calls scsi_sync_pkt(). If the
2354 2336 * data transfer direction is a DMA read from device to memory, tran_sync_pkt()
2355 2337 * must synchronize the CPU's view of the data. If the data transfer direction
2356 2338 * is a DMA write from memory to device, tran_sync_pkt() must synchronize the
2357 2339 * device's view of the data.
2358 2340 */
2359 2341 /*ARGSUSED*/
2360 2342 static void
2361 2343 mrsas_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2362 2344 {
2363 2345 register struct scsa_cmd *acmd = PKT2CMD(pkt);
2364 2346
2365 2347 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2366 2348
2367 2349 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2368 2350 (void) ddi_dma_sync(acmd->cmd_dmahandle, acmd->cmd_dma_offset,
2369 2351 acmd->cmd_dma_len, (acmd->cmd_flags & CFLAG_DMASEND) ?
2370 2352 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
2371 2353 }
2372 2354 }
2373 2355
2374 2356 /*ARGSUSED*/
2375 2357 static int
2376 2358 mrsas_tran_quiesce(dev_info_t *dip)
2377 2359 {
2378 2360 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2379 2361
2380 2362 return (1);
2381 2363 }
2382 2364
2383 2365 /*ARGSUSED*/
2384 2366 static int
2385 2367 mrsas_tran_unquiesce(dev_info_t *dip)
2386 2368 {
2387 2369 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2388 2370
2389 2371 return (1);
2390 2372 }
2391 2373
2392 2374
2393 2375 /*
2394 2376 * mrsas_isr(caddr_t)
2395 2377 *
2396 2378 * The Interrupt Service Routine
2397 2379 *
2398 2380 * Collect status for all completed commands and do callback
2399 2381 *
2400 2382 */
2401 2383 static uint_t
2402 2384 mrsas_isr(struct mrsas_instance *instance)
2403 2385 {
2404 2386 int need_softintr;
2405 2387 uint32_t producer;
2406 2388 uint32_t consumer;
2407 2389 uint32_t context;
2408 2390 int retval;
2409 2391
2410 2392 struct mrsas_cmd *cmd;
2411 2393 struct mrsas_header *hdr;
2412 2394 struct scsi_pkt *pkt;
2413 2395
2414 2396 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2415 2397 ASSERT(instance);
2416 2398 if (instance->tbolt) {
2417 2399 mutex_enter(&instance->chip_mtx);
2418 2400 if ((instance->intr_type == DDI_INTR_TYPE_FIXED) &&
2419 2401 !(instance->func_ptr->intr_ack(instance))) {
2420 2402 mutex_exit(&instance->chip_mtx);
2421 2403 return (DDI_INTR_UNCLAIMED);
2422 2404 }
2423 2405 retval = mr_sas_tbolt_process_outstanding_cmd(instance);
2424 2406 mutex_exit(&instance->chip_mtx);
2425 2407 return (retval);
2426 2408 } else {
2427 2409 if ((instance->intr_type == DDI_INTR_TYPE_FIXED) &&
2428 2410 !instance->func_ptr->intr_ack(instance)) {
2429 2411 return (DDI_INTR_UNCLAIMED);
2430 2412 }
2431 2413 }
2432 2414
2433 2415 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
2434 2416 0, 0, DDI_DMA_SYNC_FORCPU);
2435 2417
2436 2418 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
2437 2419 != DDI_SUCCESS) {
2438 2420 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2439 2421 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2440 2422 con_log(CL_ANN1, (CE_WARN,
2441 2423 "mr_sas_isr(): FMA check, returning DDI_INTR_UNCLAIMED"));
2442 2424 return (DDI_INTR_CLAIMED);
2443 2425 }
2444 2426 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2445 2427
2446 2428 #ifdef OCRDEBUG
2447 2429 if (debug_consecutive_timeout_after_ocr_g == 1) {
2448 2430 con_log(CL_ANN1, (CE_NOTE,
2449 2431 "simulating consecutive timeout after ocr"));
2450 2432 return (DDI_INTR_CLAIMED);
2451 2433 }
2452 2434 #endif
2453 2435
2454 2436 mutex_enter(&instance->completed_pool_mtx);
2455 2437 mutex_enter(&instance->cmd_pend_mtx);
2456 2438
2457 2439 producer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2458 2440 instance->producer);
2459 2441 consumer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2460 2442 instance->consumer);
2461 2443
2462 2444 con_log(CL_ANN, (CE_CONT, " producer %x consumer %x ",
2463 2445 producer, consumer));
2464 2446 if (producer == consumer) {
2465 2447 con_log(CL_ANN, (CE_WARN, "producer == consumer case"));
2466 2448 DTRACE_PROBE2(isr_pc_err, uint32_t, producer,
2467 2449 uint32_t, consumer);
2468 2450 mutex_exit(&instance->cmd_pend_mtx);
2469 2451 mutex_exit(&instance->completed_pool_mtx);
2470 2452 return (DDI_INTR_CLAIMED);
2471 2453 }
2472 2454
2473 2455 while (consumer != producer) {
2474 2456 context = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2475 2457 &instance->reply_queue[consumer]);
2476 2458 cmd = instance->cmd_list[context];
2477 2459
2478 2460 if (cmd->sync_cmd == MRSAS_TRUE) {
2479 2461 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2480 2462 if (hdr) {
2481 2463 mlist_del_init(&cmd->list);
2482 2464 }
2483 2465 } else {
2484 2466 pkt = cmd->pkt;
2485 2467 if (pkt) {
2486 2468 mlist_del_init(&cmd->list);
2487 2469 }
2488 2470 }
2489 2471
2490 2472 mlist_add_tail(&cmd->list, &instance->completed_pool_list);
2491 2473
2492 2474 consumer++;
2493 2475 if (consumer == (instance->max_fw_cmds + 1)) {
2494 2476 consumer = 0;
2495 2477 }
2496 2478 }
2497 2479 ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
2498 2480 instance->consumer, consumer);
2499 2481 mutex_exit(&instance->cmd_pend_mtx);
2500 2482 mutex_exit(&instance->completed_pool_mtx);
2501 2483
2502 2484 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
2503 2485 0, 0, DDI_DMA_SYNC_FORDEV);
2504 2486
2505 2487 if (instance->softint_running) {
2506 2488 need_softintr = 0;
2507 2489 } else {
2508 2490 need_softintr = 1;
2509 2491 }
2510 2492
2511 2493 if (instance->isr_level == HIGH_LEVEL_INTR) {
2512 2494 if (need_softintr) {
2513 2495 ddi_trigger_softintr(instance->soft_intr_id);
2514 2496 }
2515 2497 } else {
2516 2498 /*
2517 2499 * Not a high-level interrupt, therefore call the soft level
2518 2500 * interrupt explicitly
2519 2501 */
2520 2502 (void) mrsas_softintr(instance);
2521 2503 }
2522 2504
2523 2505 return (DDI_INTR_CLAIMED);
2524 2506 }
2525 2507
2526 2508
2527 2509 /*
2528 2510 * ************************************************************************** *
2529 2511 * *
2530 2512 * libraries *
2531 2513 * *
2532 2514 * ************************************************************************** *
2533 2515 */
2534 2516 /*
2535 2517 * get_mfi_pkt : Get a command from the free pool
2536 2518 * After successful allocation, the caller of this routine
2537 2519 * must clear the frame buffer (memset to zero) before
2538 2520 * using the packet further.
2539 2521 *
2540 2522 * ***** Note *****
|
↓ open down ↓ |
1053 lines elided |
↑ open up ↑ |
2541 2523 * After clearing the frame buffer the context id of the
2542 2524 * frame buffer SHOULD be restored back.
2543 2525 */
2544 2526 static struct mrsas_cmd *
2545 2527 get_mfi_pkt(struct mrsas_instance *instance)
2546 2528 {
2547 2529 mlist_t *head = &instance->cmd_pool_list;
2548 2530 struct mrsas_cmd *cmd = NULL;
2549 2531
2550 2532 mutex_enter(&instance->cmd_pool_mtx);
2551 - ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2552 2533
2553 2534 if (!mlist_empty(head)) {
2554 2535 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2555 2536 mlist_del_init(head->next);
2556 2537 }
2557 2538 if (cmd != NULL) {
2558 2539 cmd->pkt = NULL;
2559 2540 cmd->retry_count_for_ocr = 0;
2560 2541 cmd->drv_pkt_time = 0;
2561 2542
2562 2543 }
2563 2544 mutex_exit(&instance->cmd_pool_mtx);
2564 2545
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
2565 2546 return (cmd);
2566 2547 }
2567 2548
2568 2549 static struct mrsas_cmd *
2569 2550 get_mfi_app_pkt(struct mrsas_instance *instance)
2570 2551 {
2571 2552 mlist_t *head = &instance->app_cmd_pool_list;
2572 2553 struct mrsas_cmd *cmd = NULL;
2573 2554
2574 2555 mutex_enter(&instance->app_cmd_pool_mtx);
2575 - ASSERT(mutex_owned(&instance->app_cmd_pool_mtx));
2576 2556
2577 2557 if (!mlist_empty(head)) {
2578 2558 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2579 2559 mlist_del_init(head->next);
2580 2560 }
2581 2561 if (cmd != NULL) {
2582 2562 cmd->pkt = NULL;
2583 2563 cmd->retry_count_for_ocr = 0;
2584 2564 cmd->drv_pkt_time = 0;
2585 2565 }
2586 2566
2587 2567 mutex_exit(&instance->app_cmd_pool_mtx);
|
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
2588 2568
2589 2569 return (cmd);
2590 2570 }
2591 2571 /*
2592 2572 * return_mfi_pkt : Return a cmd to free command pool
2593 2573 */
2594 2574 static void
2595 2575 return_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2596 2576 {
2597 2577 mutex_enter(&instance->cmd_pool_mtx);
2598 - ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2599 2578 /* use mlist_add_tail for debug assistance */
2600 2579 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2601 2580
2602 2581 mutex_exit(&instance->cmd_pool_mtx);
2603 2582 }
2604 2583
2605 2584 static void
2606 2585 return_mfi_app_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2607 2586 {
2608 2587 mutex_enter(&instance->app_cmd_pool_mtx);
2609 - ASSERT(mutex_owned(&instance->app_cmd_pool_mtx));
2610 2588
2611 2589 mlist_add(&cmd->list, &instance->app_cmd_pool_list);
2612 2590
2613 2591 mutex_exit(&instance->app_cmd_pool_mtx);
2614 2592 }
2615 2593 void
2616 2594 push_pending_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2617 2595 {
2618 2596 struct scsi_pkt *pkt;
2619 2597 struct mrsas_header *hdr;
2620 2598 con_log(CL_DLEVEL2, (CE_NOTE, "push_pending_pkt(): Called\n"));
2621 2599 mutex_enter(&instance->cmd_pend_mtx);
2622 - ASSERT(mutex_owned(&instance->cmd_pend_mtx));
2623 2600 mlist_del_init(&cmd->list);
2624 2601 mlist_add_tail(&cmd->list, &instance->cmd_pend_list);
2625 2602 if (cmd->sync_cmd == MRSAS_TRUE) {
2626 2603 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2627 2604 if (hdr) {
2628 2605 con_log(CL_ANN1, (CE_CONT,
2629 2606 "push_pending_mfi_pkt: "
2630 2607 "cmd %p index %x "
2631 2608 "time %llx",
2632 2609 (void *)cmd, cmd->index,
2633 2610 gethrtime()));
2634 2611 /* Wait for specified interval */
2635 2612 cmd->drv_pkt_time = ddi_get16(
2636 2613 cmd->frame_dma_obj.acc_handle, &hdr->timeout);
2637 2614 if (cmd->drv_pkt_time < debug_timeout_g)
2638 2615 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2639 2616 con_log(CL_ANN1, (CE_CONT,
2640 2617 "push_pending_pkt(): "
2641 2618 "Called IO Timeout Value %x\n",
2642 2619 cmd->drv_pkt_time));
2643 2620 }
2644 2621 if (hdr && instance->timeout_id == (timeout_id_t)-1) {
2645 2622 instance->timeout_id = timeout(io_timeout_checker,
2646 2623 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
2647 2624 }
2648 2625 } else {
2649 2626 pkt = cmd->pkt;
2650 2627 if (pkt) {
2651 2628 con_log(CL_ANN1, (CE_CONT,
2652 2629 "push_pending_mfi_pkt: "
2653 2630 "cmd %p index %x pkt %p, "
2654 2631 "time %llx",
2655 2632 (void *)cmd, cmd->index, (void *)pkt,
2656 2633 gethrtime()));
2657 2634 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2658 2635 }
2659 2636 if (pkt && instance->timeout_id == (timeout_id_t)-1) {
2660 2637 instance->timeout_id = timeout(io_timeout_checker,
2661 2638 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
2662 2639 }
2663 2640 }
2664 2641
2665 2642 mutex_exit(&instance->cmd_pend_mtx);
2666 2643
2667 2644 }
2668 2645
2669 2646 int
2670 2647 mrsas_print_pending_cmds(struct mrsas_instance *instance)
|
↓ open down ↓ |
38 lines elided |
↑ open up ↑ |
2671 2648 {
2672 2649 mlist_t *head = &instance->cmd_pend_list;
2673 2650 mlist_t *tmp = head;
2674 2651 struct mrsas_cmd *cmd = NULL;
2675 2652 struct mrsas_header *hdr;
2676 2653 unsigned int flag = 1;
2677 2654 struct scsi_pkt *pkt;
2678 2655 int saved_level;
2679 2656 int cmd_count = 0;
2680 2657
2681 -
2682 2658 saved_level = debug_level_g;
2683 2659 debug_level_g = CL_ANN1;
2684 2660
2685 2661 cmn_err(CE_NOTE, "mrsas_print_pending_cmds(): Called\n");
2686 2662
2687 2663 while (flag) {
2688 2664 mutex_enter(&instance->cmd_pend_mtx);
2689 2665 tmp = tmp->next;
2690 2666 if (tmp == head) {
2691 2667 mutex_exit(&instance->cmd_pend_mtx);
2692 2668 flag = 0;
2693 2669 con_log(CL_ANN1, (CE_CONT, "mrsas_print_pending_cmds():"
2694 2670 " NO MORE CMDS PENDING....\n"));
2695 2671 break;
2696 2672 } else {
2697 2673 cmd = mlist_entry(tmp, struct mrsas_cmd, list);
2698 2674 mutex_exit(&instance->cmd_pend_mtx);
2699 2675 if (cmd) {
2700 2676 if (cmd->sync_cmd == MRSAS_TRUE) {
2701 2677 hdr = (struct mrsas_header *)
2702 2678 &cmd->frame->hdr;
2703 2679 if (hdr) {
2704 2680 con_log(CL_ANN1, (CE_CONT,
2705 2681 "print: cmd %p index 0x%x "
2706 2682 "drv_pkt_time 0x%x (NO-PKT)"
2707 2683 " hdr %p\n", (void *)cmd,
2708 2684 cmd->index,
2709 2685 cmd->drv_pkt_time,
2710 2686 (void *)hdr));
2711 2687 }
2712 2688 } else {
2713 2689 pkt = cmd->pkt;
2714 2690 if (pkt) {
2715 2691 con_log(CL_ANN1, (CE_CONT,
2716 2692 "print: cmd %p index 0x%x "
2717 2693 "drv_pkt_time 0x%x pkt %p \n",
2718 2694 (void *)cmd, cmd->index,
2719 2695 cmd->drv_pkt_time, (void *)pkt));
2720 2696 }
2721 2697 }
2722 2698
2723 2699 if (++cmd_count == 1) {
2724 2700 mrsas_print_cmd_details(instance, cmd,
2725 2701 0xDD);
2726 2702 } else {
2727 2703 mrsas_print_cmd_details(instance, cmd,
2728 2704 1);
2729 2705 }
2730 2706
2731 2707 }
2732 2708 }
2733 2709 }
2734 2710 con_log(CL_ANN1, (CE_CONT, "mrsas_print_pending_cmds(): Done\n"));
2735 2711
2736 2712
2737 2713 debug_level_g = saved_level;
2738 2714
2739 2715 return (DDI_SUCCESS);
2740 2716 }
2741 2717
2742 2718
2743 2719 int
2744 2720 mrsas_complete_pending_cmds(struct mrsas_instance *instance)
2745 2721 {
2746 2722
2747 2723 struct mrsas_cmd *cmd = NULL;
2748 2724 struct scsi_pkt *pkt;
2749 2725 struct mrsas_header *hdr;
2750 2726
2751 2727 struct mlist_head *pos, *next;
2752 2728
2753 2729 con_log(CL_ANN1, (CE_NOTE,
2754 2730 "mrsas_complete_pending_cmds(): Called"));
2755 2731
2756 2732 mutex_enter(&instance->cmd_pend_mtx);
2757 2733 mlist_for_each_safe(pos, next, &instance->cmd_pend_list) {
2758 2734 cmd = mlist_entry(pos, struct mrsas_cmd, list);
2759 2735 if (cmd) {
2760 2736 pkt = cmd->pkt;
2761 2737 if (pkt) { /* for IO */
2762 2738 if (((pkt->pkt_flags & FLAG_NOINTR)
2763 2739 == 0) && pkt->pkt_comp) {
2764 2740 pkt->pkt_reason
2765 2741 = CMD_DEV_GONE;
2766 2742 pkt->pkt_statistics
2767 2743 = STAT_DISCON;
2768 2744 con_log(CL_ANN1, (CE_CONT,
2769 2745 "fail and posting to scsa "
2770 2746 "cmd %p index %x"
2771 2747 " pkt %p "
2772 2748 "time : %llx",
2773 2749 (void *)cmd, cmd->index,
2774 2750 (void *)pkt, gethrtime()));
2775 2751 (*pkt->pkt_comp)(pkt);
2776 2752 }
2777 2753 } else { /* for DCMDS */
2778 2754 if (cmd->sync_cmd == MRSAS_TRUE) {
2779 2755 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2780 2756 con_log(CL_ANN1, (CE_CONT,
2781 2757 "posting invalid status to application "
2782 2758 "cmd %p index %x"
2783 2759 " hdr %p "
2784 2760 "time : %llx",
2785 2761 (void *)cmd, cmd->index,
2786 2762 (void *)hdr, gethrtime()));
2787 2763 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2788 2764 complete_cmd_in_sync_mode(instance, cmd);
2789 2765 }
2790 2766 }
2791 2767 mlist_del_init(&cmd->list);
2792 2768 } else {
2793 2769 con_log(CL_ANN1, (CE_CONT,
2794 2770 "mrsas_complete_pending_cmds:"
2795 2771 "NULL command\n"));
2796 2772 }
2797 2773 con_log(CL_ANN1, (CE_CONT,
2798 2774 "mrsas_complete_pending_cmds:"
2799 2775 "looping for more commands\n"));
2800 2776 }
2801 2777 mutex_exit(&instance->cmd_pend_mtx);
2802 2778
2803 2779 con_log(CL_ANN1, (CE_CONT, "mrsas_complete_pending_cmds(): DONE\n"));
2804 2780 return (DDI_SUCCESS);
2805 2781 }
2806 2782
2807 2783 void
2808 2784 mrsas_print_cmd_details(struct mrsas_instance *instance, struct mrsas_cmd *cmd,
2809 2785 int detail)
2810 2786 {
2811 2787 struct scsi_pkt *pkt = cmd->pkt;
2812 2788 Mpi2RaidSCSIIORequest_t *scsi_io = cmd->scsi_io_request;
2813 2789 int i;
2814 2790 int saved_level;
2815 2791 ddi_acc_handle_t acc_handle =
2816 2792 instance->mpi2_frame_pool_dma_obj.acc_handle;
2817 2793
2818 2794 if (detail == 0xDD) {
2819 2795 saved_level = debug_level_g;
2820 2796 debug_level_g = CL_ANN1;
2821 2797 }
2822 2798
2823 2799
2824 2800 if (instance->tbolt) {
2825 2801 con_log(CL_ANN1, (CE_CONT, "print_cmd_details: cmd %p "
2826 2802 "cmd->index 0x%x SMID 0x%x timer 0x%x sec\n",
2827 2803 (void *)cmd, cmd->index, cmd->SMID, cmd->drv_pkt_time));
2828 2804 } else {
2829 2805 con_log(CL_ANN1, (CE_CONT, "print_cmd_details: cmd %p "
2830 2806 "cmd->index 0x%x timer 0x%x sec\n",
2831 2807 (void *)cmd, cmd->index, cmd->drv_pkt_time));
2832 2808 }
2833 2809
2834 2810 if (pkt) {
2835 2811 con_log(CL_ANN1, (CE_CONT, "scsi_pkt CDB[0]=0x%x",
2836 2812 pkt->pkt_cdbp[0]));
2837 2813 } else {
2838 2814 con_log(CL_ANN1, (CE_CONT, "NO-PKT"));
2839 2815 }
2840 2816
2841 2817 if ((detail == 0xDD) && instance->tbolt) {
2842 2818 con_log(CL_ANN1, (CE_CONT, "RAID_SCSI_IO_REQUEST\n"));
2843 2819 con_log(CL_ANN1, (CE_CONT, "DevHandle=0x%X Function=0x%X "
2844 2820 "IoFlags=0x%X SGLFlags=0x%X DataLength=0x%X\n",
2845 2821 ddi_get16(acc_handle, &scsi_io->DevHandle),
2846 2822 ddi_get8(acc_handle, &scsi_io->Function),
2847 2823 ddi_get16(acc_handle, &scsi_io->IoFlags),
2848 2824 ddi_get16(acc_handle, &scsi_io->SGLFlags),
2849 2825 ddi_get32(acc_handle, &scsi_io->DataLength)));
2850 2826
2851 2827 for (i = 0; i < 32; i++) {
2852 2828 con_log(CL_ANN1, (CE_CONT, "CDB[%d]=0x%x ", i,
2853 2829 ddi_get8(acc_handle, &scsi_io->CDB.CDB32[i])));
2854 2830 }
2855 2831
2856 2832 con_log(CL_ANN1, (CE_CONT, "RAID-CONTEXT\n"));
2857 2833 con_log(CL_ANN1, (CE_CONT, "status=0x%X extStatus=0x%X "
2858 2834 "ldTargetId=0x%X timeoutValue=0x%X regLockFlags=0x%X "
2859 2835 "RAIDFlags=0x%X regLockRowLBA=0x%" PRIu64
2860 2836 " regLockLength=0x%X spanArm=0x%X\n",
2861 2837 ddi_get8(acc_handle, &scsi_io->RaidContext.status),
2862 2838 ddi_get8(acc_handle, &scsi_io->RaidContext.extStatus),
2863 2839 ddi_get16(acc_handle, &scsi_io->RaidContext.ldTargetId),
2864 2840 ddi_get16(acc_handle, &scsi_io->RaidContext.timeoutValue),
2865 2841 ddi_get8(acc_handle, &scsi_io->RaidContext.regLockFlags),
2866 2842 ddi_get8(acc_handle, &scsi_io->RaidContext.RAIDFlags),
2867 2843 ddi_get64(acc_handle, &scsi_io->RaidContext.regLockRowLBA),
2868 2844 ddi_get32(acc_handle, &scsi_io->RaidContext.regLockLength),
2869 2845 ddi_get8(acc_handle, &scsi_io->RaidContext.spanArm)));
2870 2846 }
2871 2847
2872 2848 if (detail == 0xDD) {
2873 2849 debug_level_g = saved_level;
2874 2850 }
2875 2851 }
2876 2852
2877 2853
2878 2854 int
2879 2855 mrsas_issue_pending_cmds(struct mrsas_instance *instance)
2880 2856 {
2881 2857 mlist_t *head = &instance->cmd_pend_list;
2882 2858 mlist_t *tmp = head->next;
2883 2859 struct mrsas_cmd *cmd = NULL;
2884 2860 struct scsi_pkt *pkt;
2885 2861
2886 2862 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_pending_cmds(): Called"));
2887 2863 while (tmp != head) {
2888 2864 mutex_enter(&instance->cmd_pend_mtx);
2889 2865 cmd = mlist_entry(tmp, struct mrsas_cmd, list);
2890 2866 tmp = tmp->next;
2891 2867 mutex_exit(&instance->cmd_pend_mtx);
2892 2868 if (cmd) {
2893 2869 con_log(CL_ANN1, (CE_CONT,
2894 2870 "mrsas_issue_pending_cmds(): "
2895 2871 "Got a cmd: cmd %p index 0x%x drv_pkt_time 0x%x ",
2896 2872 (void *)cmd, cmd->index, cmd->drv_pkt_time));
2897 2873
2898 2874 /* Reset command timeout value */
2899 2875 if (cmd->drv_pkt_time < debug_timeout_g)
2900 2876 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2901 2877
2902 2878 cmd->retry_count_for_ocr++;
2903 2879
2904 2880 cmn_err(CE_CONT, "cmd retry count = %d\n",
2905 2881 cmd->retry_count_for_ocr);
2906 2882
2907 2883 if (cmd->retry_count_for_ocr > IO_RETRY_COUNT) {
2908 2884 cmn_err(CE_WARN, "mrsas_issue_pending_cmds(): "
2909 2885 "cmd->retry_count exceeded limit >%d\n",
2910 2886 IO_RETRY_COUNT);
2911 2887 mrsas_print_cmd_details(instance, cmd, 0xDD);
2912 2888
2913 2889 cmn_err(CE_WARN,
2914 2890 "mrsas_issue_pending_cmds():"
2915 2891 "Calling KILL Adapter\n");
2916 2892 if (instance->tbolt)
2917 2893 mrsas_tbolt_kill_adapter(instance);
2918 2894 else
2919 2895 (void) mrsas_kill_adapter(instance);
2920 2896 return (DDI_FAILURE);
2921 2897 }
2922 2898
2923 2899 pkt = cmd->pkt;
2924 2900 if (pkt) {
2925 2901 con_log(CL_ANN1, (CE_CONT,
2926 2902 "PENDING PKT-CMD ISSUE: cmd %p index %x "
2927 2903 "pkt %p time %llx",
2928 2904 (void *)cmd, cmd->index,
2929 2905 (void *)pkt,
2930 2906 gethrtime()));
2931 2907
2932 2908 } else {
2933 2909 cmn_err(CE_CONT,
2934 2910 "mrsas_issue_pending_cmds(): NO-PKT, "
2935 2911 "cmd %p index 0x%x drv_pkt_time 0x%x ",
2936 2912 (void *)cmd, cmd->index, cmd->drv_pkt_time);
2937 2913 }
2938 2914
2939 2915
2940 2916 if (cmd->sync_cmd == MRSAS_TRUE) {
2941 2917 cmn_err(CE_CONT, "mrsas_issue_pending_cmds(): "
2942 2918 "SYNC_CMD == TRUE \n");
2943 2919 instance->func_ptr->issue_cmd_in_sync_mode(
2944 2920 instance, cmd);
2945 2921 } else {
2946 2922 instance->func_ptr->issue_cmd(cmd, instance);
2947 2923 }
2948 2924 } else {
2949 2925 con_log(CL_ANN1, (CE_CONT,
2950 2926 "mrsas_issue_pending_cmds: NULL command\n"));
2951 2927 }
2952 2928 con_log(CL_ANN1, (CE_CONT,
2953 2929 "mrsas_issue_pending_cmds:"
2954 2930 "looping for more commands"));
2955 2931 }
2956 2932 con_log(CL_ANN1, (CE_CONT, "mrsas_issue_pending_cmds(): DONE\n"));
2957 2933 return (DDI_SUCCESS);
2958 2934 }
2959 2935
2960 2936
2961 2937
2962 2938 /*
2963 2939 * destroy_mfi_frame_pool
2964 2940 */
2965 2941 void
2966 2942 destroy_mfi_frame_pool(struct mrsas_instance *instance)
2967 2943 {
2968 2944 int i;
2969 2945 uint32_t max_cmd = instance->max_fw_cmds;
2970 2946
2971 2947 struct mrsas_cmd *cmd;
2972 2948
2973 2949 /* return all frames to pool */
2974 2950
2975 2951 for (i = 0; i < max_cmd; i++) {
2976 2952
2977 2953 cmd = instance->cmd_list[i];
2978 2954
2979 2955 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED)
2980 2956 (void) mrsas_free_dma_obj(instance, cmd->frame_dma_obj);
2981 2957
2982 2958 cmd->frame_dma_obj_status = DMA_OBJ_FREED;
2983 2959 }
2984 2960
2985 2961 }
2986 2962
2987 2963 /*
2988 2964 * create_mfi_frame_pool
2989 2965 */
2990 2966 int
2991 2967 create_mfi_frame_pool(struct mrsas_instance *instance)
2992 2968 {
2993 2969 int i = 0;
2994 2970 int cookie_cnt;
2995 2971 uint16_t max_cmd;
2996 2972 uint16_t sge_sz;
2997 2973 uint32_t sgl_sz;
2998 2974 uint32_t tot_frame_size;
2999 2975 struct mrsas_cmd *cmd;
3000 2976 int retval = DDI_SUCCESS;
3001 2977
3002 2978 max_cmd = instance->max_fw_cmds;
3003 2979 sge_sz = sizeof (struct mrsas_sge_ieee);
3004 2980 /* calculated the number of 64byte frames required for SGL */
3005 2981 sgl_sz = sge_sz * instance->max_num_sge;
3006 2982 tot_frame_size = sgl_sz + MRMFI_FRAME_SIZE + SENSE_LENGTH;
3007 2983
3008 2984 con_log(CL_DLEVEL3, (CE_NOTE, "create_mfi_frame_pool: "
3009 2985 "sgl_sz %x tot_frame_size %x", sgl_sz, tot_frame_size));
3010 2986
3011 2987 while (i < max_cmd) {
3012 2988 cmd = instance->cmd_list[i];
3013 2989
3014 2990 cmd->frame_dma_obj.size = tot_frame_size;
3015 2991 cmd->frame_dma_obj.dma_attr = mrsas_generic_dma_attr;
3016 2992 cmd->frame_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3017 2993 cmd->frame_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3018 2994 cmd->frame_dma_obj.dma_attr.dma_attr_sgllen = 1;
3019 2995 cmd->frame_dma_obj.dma_attr.dma_attr_align = 64;
3020 2996
3021 2997 cookie_cnt = mrsas_alloc_dma_obj(instance, &cmd->frame_dma_obj,
3022 2998 (uchar_t)DDI_STRUCTURE_LE_ACC);
3023 2999
3024 3000 if (cookie_cnt == -1 || cookie_cnt > 1) {
3025 3001 cmn_err(CE_WARN,
3026 3002 "create_mfi_frame_pool: could not alloc.");
3027 3003 retval = DDI_FAILURE;
3028 3004 goto mrsas_undo_frame_pool;
3029 3005 }
3030 3006
3031 3007 bzero(cmd->frame_dma_obj.buffer, tot_frame_size);
3032 3008
3033 3009 cmd->frame_dma_obj_status = DMA_OBJ_ALLOCATED;
3034 3010 cmd->frame = (union mrsas_frame *)cmd->frame_dma_obj.buffer;
3035 3011 cmd->frame_phys_addr =
3036 3012 cmd->frame_dma_obj.dma_cookie[0].dmac_address;
3037 3013
3038 3014 cmd->sense = (uint8_t *)(((unsigned long)
3039 3015 cmd->frame_dma_obj.buffer) +
3040 3016 tot_frame_size - SENSE_LENGTH);
3041 3017 cmd->sense_phys_addr =
3042 3018 cmd->frame_dma_obj.dma_cookie[0].dmac_address +
3043 3019 tot_frame_size - SENSE_LENGTH;
3044 3020
3045 3021 if (!cmd->frame || !cmd->sense) {
3046 3022 cmn_err(CE_WARN,
3047 3023 "mr_sas: pci_pool_alloc failed");
3048 3024 retval = ENOMEM;
3049 3025 goto mrsas_undo_frame_pool;
3050 3026 }
3051 3027
3052 3028 ddi_put32(cmd->frame_dma_obj.acc_handle,
3053 3029 &cmd->frame->io.context, cmd->index);
3054 3030 i++;
3055 3031
3056 3032 con_log(CL_DLEVEL3, (CE_NOTE, "[%x]-%x",
3057 3033 cmd->index, cmd->frame_phys_addr));
3058 3034 }
3059 3035
3060 3036 return (DDI_SUCCESS);
3061 3037
3062 3038 mrsas_undo_frame_pool:
3063 3039 if (i > 0)
3064 3040 destroy_mfi_frame_pool(instance);
3065 3041
3066 3042 return (retval);
3067 3043 }
3068 3044
3069 3045 /*
3070 3046 * free_additional_dma_buffer
3071 3047 */
3072 3048 static void
3073 3049 free_additional_dma_buffer(struct mrsas_instance *instance)
3074 3050 {
3075 3051 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
3076 3052 (void) mrsas_free_dma_obj(instance,
3077 3053 instance->mfi_internal_dma_obj);
3078 3054 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
3079 3055 }
3080 3056
3081 3057 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
3082 3058 (void) mrsas_free_dma_obj(instance,
3083 3059 instance->mfi_evt_detail_obj);
3084 3060 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
3085 3061 }
3086 3062 }
3087 3063
3088 3064 /*
3089 3065 * alloc_additional_dma_buffer
3090 3066 */
3091 3067 static int
3092 3068 alloc_additional_dma_buffer(struct mrsas_instance *instance)
3093 3069 {
3094 3070 uint32_t reply_q_sz;
3095 3071 uint32_t internal_buf_size = PAGESIZE*2;
3096 3072
3097 3073 /* max cmds plus 1 + producer & consumer */
3098 3074 reply_q_sz = sizeof (uint32_t) * (instance->max_fw_cmds + 1 + 2);
3099 3075
3100 3076 instance->mfi_internal_dma_obj.size = internal_buf_size;
3101 3077 instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
3102 3078 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3103 3079 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
3104 3080 0xFFFFFFFFU;
3105 3081 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
3106 3082
3107 3083 if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
3108 3084 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3109 3085 cmn_err(CE_WARN,
3110 3086 "mr_sas: could not alloc reply queue");
3111 3087 return (DDI_FAILURE);
3112 3088 }
3113 3089
3114 3090 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
3115 3091
3116 3092 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
3117 3093
3118 3094 instance->producer = (uint32_t *)((unsigned long)
3119 3095 instance->mfi_internal_dma_obj.buffer);
3120 3096 instance->consumer = (uint32_t *)((unsigned long)
3121 3097 instance->mfi_internal_dma_obj.buffer + 4);
3122 3098 instance->reply_queue = (uint32_t *)((unsigned long)
3123 3099 instance->mfi_internal_dma_obj.buffer + 8);
3124 3100 instance->internal_buf = (caddr_t)(((unsigned long)
3125 3101 instance->mfi_internal_dma_obj.buffer) + reply_q_sz + 8);
3126 3102 instance->internal_buf_dmac_add =
3127 3103 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address +
3128 3104 (reply_q_sz + 8);
3129 3105 instance->internal_buf_size = internal_buf_size -
3130 3106 (reply_q_sz + 8);
3131 3107
3132 3108 /* allocate evt_detail */
3133 3109 instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
3134 3110 instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
3135 3111 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3136 3112 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3137 3113 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
3138 3114 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 1;
3139 3115
3140 3116 if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
3141 3117 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3142 3118 cmn_err(CE_WARN, "alloc_additional_dma_buffer: "
3143 3119 "could not allocate data transfer buffer.");
3144 3120 goto mrsas_undo_internal_buff;
3145 3121 }
3146 3122
3147 3123 bzero(instance->mfi_evt_detail_obj.buffer,
3148 3124 sizeof (struct mrsas_evt_detail));
3149 3125
3150 3126 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
3151 3127
3152 3128 return (DDI_SUCCESS);
3153 3129
3154 3130 mrsas_undo_internal_buff:
3155 3131 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
3156 3132 (void) mrsas_free_dma_obj(instance,
3157 3133 instance->mfi_internal_dma_obj);
3158 3134 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
3159 3135 }
3160 3136
3161 3137 return (DDI_FAILURE);
3162 3138 }
3163 3139
3164 3140
3165 3141 void
3166 3142 mrsas_free_cmd_pool(struct mrsas_instance *instance)
3167 3143 {
3168 3144 int i;
3169 3145 uint32_t max_cmd;
3170 3146 size_t sz;
3171 3147
3172 3148 /* already freed */
3173 3149 if (instance->cmd_list == NULL) {
3174 3150 return;
3175 3151 }
3176 3152
3177 3153 max_cmd = instance->max_fw_cmds;
3178 3154
3179 3155 /* size of cmd_list array */
3180 3156 sz = sizeof (struct mrsas_cmd *) * max_cmd;
3181 3157
3182 3158 /* First free each cmd */
3183 3159 for (i = 0; i < max_cmd; i++) {
3184 3160 if (instance->cmd_list[i] != NULL) {
3185 3161 kmem_free(instance->cmd_list[i],
3186 3162 sizeof (struct mrsas_cmd));
3187 3163 }
3188 3164
3189 3165 instance->cmd_list[i] = NULL;
3190 3166 }
3191 3167
3192 3168 /* Now, free cmd_list array */
3193 3169 if (instance->cmd_list != NULL)
3194 3170 kmem_free(instance->cmd_list, sz);
3195 3171
3196 3172 instance->cmd_list = NULL;
3197 3173
3198 3174 INIT_LIST_HEAD(&instance->cmd_pool_list);
3199 3175 INIT_LIST_HEAD(&instance->cmd_pend_list);
3200 3176 if (instance->tbolt) {
3201 3177 INIT_LIST_HEAD(&instance->cmd_app_pool_list);
3202 3178 } else {
3203 3179 INIT_LIST_HEAD(&instance->app_cmd_pool_list);
3204 3180 }
3205 3181
3206 3182 }
3207 3183
3208 3184
3209 3185 /*
3210 3186 * mrsas_alloc_cmd_pool
3211 3187 */
3212 3188 int
3213 3189 mrsas_alloc_cmd_pool(struct mrsas_instance *instance)
3214 3190 {
3215 3191 int i;
3216 3192 int count;
3217 3193 uint32_t max_cmd;
3218 3194 uint32_t reserve_cmd;
3219 3195 size_t sz;
3220 3196
3221 3197 struct mrsas_cmd *cmd;
3222 3198
3223 3199 max_cmd = instance->max_fw_cmds;
3224 3200 con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
3225 3201 "max_cmd %x", max_cmd));
3226 3202
3227 3203
3228 3204 sz = sizeof (struct mrsas_cmd *) * max_cmd;
3229 3205
3230 3206 /*
3231 3207 * instance->cmd_list is an array of struct mrsas_cmd pointers.
3232 3208 * Allocate the dynamic array first and then allocate individual
3233 3209 * commands.
3234 3210 */
3235 3211 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
3236 3212 if (instance->cmd_list == NULL) {
3237 3213 con_log(CL_NONE, (CE_WARN,
3238 3214 "Failed to allocate memory for cmd_list"));
3239 3215 return (DDI_FAILURE);
3240 3216 }
3241 3217
3242 3218 /* create a frame pool and assign one frame to each cmd */
3243 3219 for (count = 0; count < max_cmd; count++) {
3244 3220 instance->cmd_list[count] =
3245 3221 kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP);
3246 3222 if (instance->cmd_list[count] == NULL) {
3247 3223 con_log(CL_NONE, (CE_WARN,
3248 3224 "Failed to allocate memory for mrsas_cmd"));
3249 3225 goto mrsas_undo_cmds;
3250 3226 }
3251 3227 }
3252 3228
3253 3229 /* add all the commands to command pool */
3254 3230
3255 3231 INIT_LIST_HEAD(&instance->cmd_pool_list);
3256 3232 INIT_LIST_HEAD(&instance->cmd_pend_list);
3257 3233 INIT_LIST_HEAD(&instance->app_cmd_pool_list);
3258 3234
3259 3235 reserve_cmd = MRSAS_APP_RESERVED_CMDS;
3260 3236
3261 3237 for (i = 0; i < reserve_cmd; i++) {
3262 3238 cmd = instance->cmd_list[i];
3263 3239 cmd->index = i;
3264 3240 mlist_add_tail(&cmd->list, &instance->app_cmd_pool_list);
3265 3241 }
3266 3242
3267 3243
3268 3244 for (i = reserve_cmd; i < max_cmd; i++) {
3269 3245 cmd = instance->cmd_list[i];
3270 3246 cmd->index = i;
3271 3247 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
3272 3248 }
3273 3249
3274 3250 return (DDI_SUCCESS);
3275 3251
3276 3252 mrsas_undo_cmds:
3277 3253 if (count > 0) {
3278 3254 /* free each cmd */
3279 3255 for (i = 0; i < count; i++) {
3280 3256 if (instance->cmd_list[i] != NULL) {
3281 3257 kmem_free(instance->cmd_list[i],
3282 3258 sizeof (struct mrsas_cmd));
3283 3259 }
3284 3260 instance->cmd_list[i] = NULL;
3285 3261 }
3286 3262 }
3287 3263
3288 3264 mrsas_undo_cmd_list:
3289 3265 if (instance->cmd_list != NULL)
3290 3266 kmem_free(instance->cmd_list, sz);
3291 3267 instance->cmd_list = NULL;
3292 3268
3293 3269 return (DDI_FAILURE);
3294 3270 }
3295 3271
3296 3272
3297 3273 /*
3298 3274 * free_space_for_mfi
3299 3275 */
3300 3276 static void
3301 3277 free_space_for_mfi(struct mrsas_instance *instance)
3302 3278 {
3303 3279
3304 3280 /* already freed */
3305 3281 if (instance->cmd_list == NULL) {
3306 3282 return;
3307 3283 }
3308 3284
3309 3285 /* Free additional dma buffer */
3310 3286 free_additional_dma_buffer(instance);
3311 3287
3312 3288 /* Free the MFI frame pool */
3313 3289 destroy_mfi_frame_pool(instance);
3314 3290
3315 3291 /* Free all the commands in the cmd_list */
3316 3292 /* Free the cmd_list buffer itself */
3317 3293 mrsas_free_cmd_pool(instance);
3318 3294 }
3319 3295
3320 3296 /*
3321 3297 * alloc_space_for_mfi
3322 3298 */
3323 3299 static int
3324 3300 alloc_space_for_mfi(struct mrsas_instance *instance)
3325 3301 {
3326 3302 /* Allocate command pool (memory for cmd_list & individual commands) */
3327 3303 if (mrsas_alloc_cmd_pool(instance)) {
3328 3304 cmn_err(CE_WARN, "error creating cmd pool");
3329 3305 return (DDI_FAILURE);
3330 3306 }
3331 3307
3332 3308 /* Allocate MFI Frame pool */
3333 3309 if (create_mfi_frame_pool(instance)) {
3334 3310 cmn_err(CE_WARN, "error creating frame DMA pool");
3335 3311 goto mfi_undo_cmd_pool;
3336 3312 }
3337 3313
3338 3314 /* Allocate additional DMA buffer */
3339 3315 if (alloc_additional_dma_buffer(instance)) {
3340 3316 cmn_err(CE_WARN, "error creating frame DMA pool");
3341 3317 goto mfi_undo_frame_pool;
3342 3318 }
3343 3319
3344 3320 return (DDI_SUCCESS);
3345 3321
3346 3322 mfi_undo_frame_pool:
3347 3323 destroy_mfi_frame_pool(instance);
3348 3324
3349 3325 mfi_undo_cmd_pool:
3350 3326 mrsas_free_cmd_pool(instance);
3351 3327
3352 3328 return (DDI_FAILURE);
3353 3329 }
3354 3330
3355 3331
3356 3332
3357 3333 /*
3358 3334 * get_ctrl_info
3359 3335 */
3360 3336 static int
3361 3337 get_ctrl_info(struct mrsas_instance *instance,
3362 3338 struct mrsas_ctrl_info *ctrl_info)
3363 3339 {
3364 3340 int ret = 0;
3365 3341
3366 3342 struct mrsas_cmd *cmd;
3367 3343 struct mrsas_dcmd_frame *dcmd;
3368 3344 struct mrsas_ctrl_info *ci;
3369 3345
3370 3346 if (instance->tbolt) {
3371 3347 cmd = get_raid_msg_mfi_pkt(instance);
3372 3348 } else {
3373 3349 cmd = get_mfi_pkt(instance);
3374 3350 }
3375 3351
3376 3352 if (!cmd) {
3377 3353 con_log(CL_ANN, (CE_WARN,
3378 3354 "Failed to get a cmd for ctrl info"));
3379 3355 DTRACE_PROBE2(info_mfi_err, uint16_t, instance->fw_outstanding,
3380 3356 uint16_t, instance->max_fw_cmds);
3381 3357 return (DDI_FAILURE);
3382 3358 }
3383 3359
3384 3360 /* Clear the frame buffer and assign back the context id */
3385 3361 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3386 3362 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3387 3363 cmd->index);
3388 3364
3389 3365 dcmd = &cmd->frame->dcmd;
3390 3366
3391 3367 ci = (struct mrsas_ctrl_info *)instance->internal_buf;
3392 3368
3393 3369 if (!ci) {
3394 3370 cmn_err(CE_WARN,
3395 3371 "Failed to alloc mem for ctrl info");
3396 3372 return_mfi_pkt(instance, cmd);
3397 3373 return (DDI_FAILURE);
3398 3374 }
3399 3375
3400 3376 (void) memset(ci, 0, sizeof (struct mrsas_ctrl_info));
3401 3377
3402 3378 /* for( i = 0; i < DCMD_MBOX_SZ; i++ ) dcmd->mbox.b[i] = 0; */
3403 3379 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
3404 3380
3405 3381 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3406 3382 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status,
3407 3383 MFI_CMD_STATUS_POLL_MODE);
3408 3384 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3409 3385 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3410 3386 MFI_FRAME_DIR_READ);
3411 3387 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3412 3388 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3413 3389 sizeof (struct mrsas_ctrl_info));
3414 3390 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3415 3391 MR_DCMD_CTRL_GET_INFO);
3416 3392 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3417 3393 instance->internal_buf_dmac_add);
3418 3394 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3419 3395 sizeof (struct mrsas_ctrl_info));
3420 3396
3421 3397 cmd->frame_count = 1;
3422 3398
3423 3399 if (instance->tbolt) {
3424 3400 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3425 3401 }
3426 3402
3427 3403 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3428 3404 ret = 0;
3429 3405
3430 3406 ctrl_info->max_request_size = ddi_get32(
3431 3407 cmd->frame_dma_obj.acc_handle, &ci->max_request_size);
3432 3408
3433 3409 ctrl_info->ld_present_count = ddi_get16(
3434 3410 cmd->frame_dma_obj.acc_handle, &ci->ld_present_count);
3435 3411
3436 3412 ctrl_info->properties.on_off_properties = ddi_get32(
3437 3413 cmd->frame_dma_obj.acc_handle,
3438 3414 &ci->properties.on_off_properties);
3439 3415 ddi_rep_get8(cmd->frame_dma_obj.acc_handle,
3440 3416 (uint8_t *)(ctrl_info->product_name),
3441 3417 (uint8_t *)(ci->product_name), 80 * sizeof (char),
3442 3418 DDI_DEV_AUTOINCR);
3443 3419 /* should get more members of ci with ddi_get when needed */
3444 3420 } else {
3445 3421 cmn_err(CE_WARN, "get_ctrl_info: Ctrl info failed");
3446 3422 ret = -1;
3447 3423 }
3448 3424
3449 3425 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) {
3450 3426 ret = -1;
3451 3427 }
3452 3428 if (instance->tbolt) {
3453 3429 return_raid_msg_mfi_pkt(instance, cmd);
3454 3430 } else {
3455 3431 return_mfi_pkt(instance, cmd);
3456 3432 }
3457 3433
3458 3434 return (ret);
3459 3435 }
3460 3436
3461 3437 /*
3462 3438 * abort_aen_cmd
3463 3439 */
3464 3440 static int
3465 3441 abort_aen_cmd(struct mrsas_instance *instance,
3466 3442 struct mrsas_cmd *cmd_to_abort)
3467 3443 {
3468 3444 int ret = 0;
3469 3445
3470 3446 struct mrsas_cmd *cmd;
3471 3447 struct mrsas_abort_frame *abort_fr;
3472 3448
3473 3449 con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_aen:%d", __LINE__));
3474 3450
3475 3451 if (instance->tbolt) {
3476 3452 cmd = get_raid_msg_mfi_pkt(instance);
3477 3453 } else {
3478 3454 cmd = get_mfi_pkt(instance);
3479 3455 }
3480 3456
3481 3457 if (!cmd) {
3482 3458 con_log(CL_ANN1, (CE_WARN,
3483 3459 "abort_aen_cmd():Failed to get a cmd for abort_aen_cmd"));
3484 3460 DTRACE_PROBE2(abort_mfi_err, uint16_t, instance->fw_outstanding,
3485 3461 uint16_t, instance->max_fw_cmds);
3486 3462 return (DDI_FAILURE);
3487 3463 }
3488 3464
3489 3465 /* Clear the frame buffer and assign back the context id */
3490 3466 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3491 3467 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3492 3468 cmd->index);
3493 3469
3494 3470 abort_fr = &cmd->frame->abort;
3495 3471
3496 3472 /* prepare and issue the abort frame */
3497 3473 ddi_put8(cmd->frame_dma_obj.acc_handle,
3498 3474 &abort_fr->cmd, MFI_CMD_OP_ABORT);
3499 3475 ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3500 3476 MFI_CMD_STATUS_SYNC_MODE);
3501 3477 ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3502 3478 ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3503 3479 cmd_to_abort->index);
3504 3480 ddi_put32(cmd->frame_dma_obj.acc_handle,
3505 3481 &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3506 3482 ddi_put32(cmd->frame_dma_obj.acc_handle,
3507 3483 &abort_fr->abort_mfi_phys_addr_hi, 0);
3508 3484
3509 3485 instance->aen_cmd->abort_aen = 1;
3510 3486
3511 3487 /* cmd->sync_cmd = MRSAS_TRUE; */ /* KEBE ASKS, inherit? */
3512 3488 cmd->frame_count = 1;
3513 3489
3514 3490 if (instance->tbolt) {
3515 3491 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3516 3492 }
3517 3493
3518 3494 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3519 3495 con_log(CL_ANN1, (CE_WARN,
3520 3496 "abort_aen_cmd: issue_cmd_in_poll_mode failed"));
3521 3497 ret = -1;
3522 3498 } else {
3523 3499 ret = 0;
3524 3500 }
3525 3501
3526 3502 instance->aen_cmd->abort_aen = 1;
3527 3503 instance->aen_cmd = 0;
3528 3504
3529 3505 if (instance->tbolt) {
3530 3506 return_raid_msg_mfi_pkt(instance, cmd);
3531 3507 } else {
3532 3508 return_mfi_pkt(instance, cmd);
3533 3509 }
3534 3510
3535 3511 atomic_add_16(&instance->fw_outstanding, (-1));
3536 3512
3537 3513 return (ret);
3538 3514 }
3539 3515
3540 3516
3541 3517 static int
3542 3518 mrsas_build_init_cmd(struct mrsas_instance *instance,
3543 3519 struct mrsas_cmd **cmd_ptr)
3544 3520 {
3545 3521 struct mrsas_cmd *cmd;
3546 3522 struct mrsas_init_frame *init_frame;
3547 3523 struct mrsas_init_queue_info *initq_info;
3548 3524 struct mrsas_drv_ver drv_ver_info;
3549 3525
3550 3526
3551 3527 /*
3552 3528 * Prepare a init frame. Note the init frame points to queue info
3553 3529 * structure. Each frame has SGL allocated after first 64 bytes. For
3554 3530 * this frame - since we don't need any SGL - we use SGL's space as
3555 3531 * queue info structure
3556 3532 */
3557 3533 cmd = *cmd_ptr;
3558 3534
3559 3535
3560 3536 /* Clear the frame buffer and assign back the context id */
3561 3537 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3562 3538 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3563 3539 cmd->index);
3564 3540
3565 3541 init_frame = (struct mrsas_init_frame *)cmd->frame;
3566 3542 initq_info = (struct mrsas_init_queue_info *)
3567 3543 ((unsigned long)init_frame + 64);
3568 3544
3569 3545 (void) memset(init_frame, 0, MRMFI_FRAME_SIZE);
3570 3546 (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info));
3571 3547
3572 3548 ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0);
3573 3549
3574 3550 ddi_put32(cmd->frame_dma_obj.acc_handle,
3575 3551 &initq_info->reply_queue_entries, instance->max_fw_cmds + 1);
3576 3552
3577 3553 ddi_put32(cmd->frame_dma_obj.acc_handle,
3578 3554 &initq_info->producer_index_phys_addr_hi, 0);
3579 3555 ddi_put32(cmd->frame_dma_obj.acc_handle,
3580 3556 &initq_info->producer_index_phys_addr_lo,
3581 3557 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address);
3582 3558
3583 3559 ddi_put32(cmd->frame_dma_obj.acc_handle,
3584 3560 &initq_info->consumer_index_phys_addr_hi, 0);
3585 3561 ddi_put32(cmd->frame_dma_obj.acc_handle,
3586 3562 &initq_info->consumer_index_phys_addr_lo,
3587 3563 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4);
3588 3564
3589 3565 ddi_put32(cmd->frame_dma_obj.acc_handle,
3590 3566 &initq_info->reply_queue_start_phys_addr_hi, 0);
3591 3567 ddi_put32(cmd->frame_dma_obj.acc_handle,
3592 3568 &initq_info->reply_queue_start_phys_addr_lo,
3593 3569 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8);
3594 3570
3595 3571 ddi_put8(cmd->frame_dma_obj.acc_handle,
3596 3572 &init_frame->cmd, MFI_CMD_OP_INIT);
3597 3573 ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status,
3598 3574 MFI_CMD_STATUS_POLL_MODE);
3599 3575 ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0);
3600 3576 ddi_put32(cmd->frame_dma_obj.acc_handle,
3601 3577 &init_frame->queue_info_new_phys_addr_lo,
3602 3578 cmd->frame_phys_addr + 64);
3603 3579 ddi_put32(cmd->frame_dma_obj.acc_handle,
3604 3580 &init_frame->queue_info_new_phys_addr_hi, 0);
3605 3581
3606 3582
3607 3583 /* fill driver version information */
3608 3584 fill_up_drv_ver(&drv_ver_info);
3609 3585
3610 3586 /* allocate the driver version data transfer buffer */
3611 3587 instance->drv_ver_dma_obj.size = sizeof (drv_ver_info.drv_ver);
3612 3588 instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
3613 3589 instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3614 3590 instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3615 3591 instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
3616 3592 instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
3617 3593
3618 3594 if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
3619 3595 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3620 3596 con_log(CL_ANN, (CE_WARN,
3621 3597 "init_mfi : Could not allocate driver version buffer."));
3622 3598 return (DDI_FAILURE);
3623 3599 }
3624 3600 /* copy driver version to dma buffer */
3625 3601 (void) memset(instance->drv_ver_dma_obj.buffer, 0,
3626 3602 sizeof (drv_ver_info.drv_ver));
3627 3603 ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
3628 3604 (uint8_t *)drv_ver_info.drv_ver,
3629 3605 (uint8_t *)instance->drv_ver_dma_obj.buffer,
3630 3606 sizeof (drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
3631 3607
3632 3608
3633 3609 /* copy driver version physical address to init frame */
3634 3610 ddi_put64(cmd->frame_dma_obj.acc_handle, &init_frame->driverversion,
3635 3611 instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
3636 3612
3637 3613 ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len,
3638 3614 sizeof (struct mrsas_init_queue_info));
3639 3615
3640 3616 cmd->frame_count = 1;
3641 3617
3642 3618 *cmd_ptr = cmd;
3643 3619
3644 3620 return (DDI_SUCCESS);
3645 3621 }
3646 3622
3647 3623
3648 3624 /*
3649 3625 * mrsas_init_adapter_ppc - Initialize MFI interface adapter.
3650 3626 */
3651 3627 int
3652 3628 mrsas_init_adapter_ppc(struct mrsas_instance *instance)
3653 3629 {
3654 3630 struct mrsas_cmd *cmd;
3655 3631
3656 3632 /*
3657 3633 * allocate memory for mfi adapter(cmd pool, individual commands, mfi
3658 3634 * frames etc
3659 3635 */
3660 3636 if (alloc_space_for_mfi(instance) != DDI_SUCCESS) {
3661 3637 con_log(CL_ANN, (CE_NOTE,
3662 3638 "Error, failed to allocate memory for MFI adapter"));
3663 3639 return (DDI_FAILURE);
3664 3640 }
3665 3641
3666 3642 /* Build INIT command */
3667 3643 cmd = get_mfi_pkt(instance);
3668 3644
3669 3645 if (mrsas_build_init_cmd(instance, &cmd) != DDI_SUCCESS) {
3670 3646 con_log(CL_ANN,
3671 3647 (CE_NOTE, "Error, failed to build INIT command"));
3672 3648
3673 3649 goto fail_undo_alloc_mfi_space;
3674 3650 }
3675 3651
3676 3652 /*
|
↓ open down ↓ |
985 lines elided |
↑ open up ↑ |
3677 3653 * Disable interrupt before sending init frame ( see linux driver code)
3678 3654 * send INIT MFI frame in polled mode
3679 3655 */
3680 3656 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3681 3657 con_log(CL_ANN, (CE_WARN, "failed to init firmware"));
3682 3658 goto fail_fw_init;
3683 3659 }
3684 3660
3685 3661 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS)
3686 3662 goto fail_fw_init;
3687 - /* return_mfi_pkt(instance, cmd); */ /* XXX KEBE ASKS, inherit? */
3663 + return_mfi_pkt(instance, cmd);
3688 3664
3689 3665 if (ctio_enable &&
3690 3666 (instance->func_ptr->read_fw_status_reg(instance) & 0x04000000)) {
3691 3667 con_log(CL_ANN, (CE_NOTE, "mr_sas: IEEE SGL's supported"));
3692 3668 instance->flag_ieee = 1;
3693 3669 } else {
3694 3670 instance->flag_ieee = 0;
3695 3671 }
3696 3672
3697 3673 instance->unroll.alloc_space_mfi = 1;
3698 3674 instance->unroll.verBuff = 1;
3699 3675
3700 3676 return (DDI_SUCCESS);
3701 3677
3702 3678
3703 3679 fail_fw_init:
3704 3680 (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
3705 3681
3706 3682 fail_undo_alloc_mfi_space:
3707 3683 return_mfi_pkt(instance, cmd);
3708 3684 free_space_for_mfi(instance);
3709 3685
3710 3686 return (DDI_FAILURE);
3711 3687
3712 3688 }
3713 3689
3714 3690 /*
3715 3691 * mrsas_init_adapter - Initialize adapter.
3716 3692 */
3717 3693 int
3718 3694 mrsas_init_adapter(struct mrsas_instance *instance)
3719 3695 {
3720 3696 struct mrsas_ctrl_info ctrl_info;
3721 3697
3722 3698
3723 3699 /* we expect the FW state to be READY */
3724 3700 if (mfi_state_transition_to_ready(instance)) {
3725 3701 con_log(CL_ANN, (CE_WARN, "mr_sas: F/W is not ready"));
3726 3702 return (DDI_FAILURE);
3727 3703 }
3728 3704
3729 3705 /* get various operational parameters from status register */
3730 3706 instance->max_num_sge =
3731 3707 (instance->func_ptr->read_fw_status_reg(instance) &
3732 3708 0xFF0000) >> 0x10;
3733 3709 instance->max_num_sge =
3734 3710 (instance->max_num_sge > MRSAS_MAX_SGE_CNT) ?
3735 3711 MRSAS_MAX_SGE_CNT : instance->max_num_sge;
3736 3712
3737 3713 /*
3738 3714 * Reduce the max supported cmds by 1. This is to ensure that the
3739 3715 * reply_q_sz (1 more than the max cmd that driver may send)
3740 3716 * does not exceed max cmds that the FW can support
3741 3717 */
3742 3718 instance->max_fw_cmds =
3743 3719 instance->func_ptr->read_fw_status_reg(instance) & 0xFFFF;
3744 3720 instance->max_fw_cmds = instance->max_fw_cmds - 1;
3745 3721
3746 3722
3747 3723
3748 3724 /* Initialize adapter */
3749 3725 if (instance->func_ptr->init_adapter(instance) != DDI_SUCCESS) {
3750 3726 con_log(CL_ANN,
3751 3727 (CE_WARN, "mr_sas: could not initialize adapter"));
3752 3728 return (DDI_FAILURE);
3753 3729 }
3754 3730
3755 3731 /* gather misc FW related information */
3756 3732 instance->disable_online_ctrl_reset = 0;
3757 3733
3758 3734 if (!get_ctrl_info(instance, &ctrl_info)) {
3759 3735 instance->max_sectors_per_req = ctrl_info.max_request_size;
3760 3736 con_log(CL_ANN1, (CE_NOTE,
3761 3737 "product name %s ld present %d",
3762 3738 ctrl_info.product_name, ctrl_info.ld_present_count));
3763 3739 } else {
3764 3740 instance->max_sectors_per_req = instance->max_num_sge *
3765 3741 PAGESIZE / 512;
3766 3742 }
3767 3743
3768 3744 if (ctrl_info.properties.on_off_properties & DISABLE_OCR_PROP_FLAG) {
3769 3745 instance->disable_online_ctrl_reset = 1;
3770 3746 con_log(CL_ANN1,
3771 3747 (CE_NOTE, "Disable online control Flag is set\n"));
3772 3748 } else {
3773 3749 con_log(CL_ANN1,
3774 3750 (CE_NOTE, "Disable online control Flag is not set\n"));
3775 3751 }
3776 3752
3777 3753 return (DDI_SUCCESS);
3778 3754
3779 3755 }
3780 3756
3781 3757
3782 3758
3783 3759 static int
3784 3760 mrsas_issue_init_mfi(struct mrsas_instance *instance)
3785 3761 {
3786 3762 struct mrsas_cmd *cmd;
3787 3763 struct mrsas_init_frame *init_frame;
3788 3764 struct mrsas_init_queue_info *initq_info;
3789 3765
3790 3766 /*
3791 3767 * Prepare a init frame. Note the init frame points to queue info
3792 3768 * structure. Each frame has SGL allocated after first 64 bytes. For
3793 3769 * this frame - since we don't need any SGL - we use SGL's space as
3794 3770 * queue info structure
3795 3771 */
3796 3772 con_log(CL_ANN1, (CE_NOTE,
3797 3773 "mrsas_issue_init_mfi: entry\n"));
3798 3774 cmd = get_mfi_app_pkt(instance);
3799 3775
3800 3776 if (!cmd) {
3801 3777 con_log(CL_ANN1, (CE_WARN,
3802 3778 "mrsas_issue_init_mfi: get_pkt failed\n"));
3803 3779 return (DDI_FAILURE);
3804 3780 }
3805 3781
3806 3782 /* Clear the frame buffer and assign back the context id */
3807 3783 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3808 3784 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3809 3785 cmd->index);
3810 3786
3811 3787 init_frame = (struct mrsas_init_frame *)cmd->frame;
3812 3788 initq_info = (struct mrsas_init_queue_info *)
3813 3789 ((unsigned long)init_frame + 64);
3814 3790
3815 3791 (void) memset(init_frame, 0, MRMFI_FRAME_SIZE);
3816 3792 (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info));
3817 3793
3818 3794 ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0);
3819 3795
3820 3796 ddi_put32(cmd->frame_dma_obj.acc_handle,
3821 3797 &initq_info->reply_queue_entries, instance->max_fw_cmds + 1);
3822 3798 ddi_put32(cmd->frame_dma_obj.acc_handle,
3823 3799 &initq_info->producer_index_phys_addr_hi, 0);
3824 3800 ddi_put32(cmd->frame_dma_obj.acc_handle,
3825 3801 &initq_info->producer_index_phys_addr_lo,
3826 3802 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address);
3827 3803 ddi_put32(cmd->frame_dma_obj.acc_handle,
3828 3804 &initq_info->consumer_index_phys_addr_hi, 0);
3829 3805 ddi_put32(cmd->frame_dma_obj.acc_handle,
3830 3806 &initq_info->consumer_index_phys_addr_lo,
3831 3807 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4);
3832 3808
3833 3809 ddi_put32(cmd->frame_dma_obj.acc_handle,
3834 3810 &initq_info->reply_queue_start_phys_addr_hi, 0);
3835 3811 ddi_put32(cmd->frame_dma_obj.acc_handle,
3836 3812 &initq_info->reply_queue_start_phys_addr_lo,
3837 3813 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8);
3838 3814
3839 3815 ddi_put8(cmd->frame_dma_obj.acc_handle,
3840 3816 &init_frame->cmd, MFI_CMD_OP_INIT);
3841 3817 ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status,
3842 3818 MFI_CMD_STATUS_POLL_MODE);
3843 3819 ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0);
3844 3820 ddi_put32(cmd->frame_dma_obj.acc_handle,
3845 3821 &init_frame->queue_info_new_phys_addr_lo,
3846 3822 cmd->frame_phys_addr + 64);
3847 3823 ddi_put32(cmd->frame_dma_obj.acc_handle,
3848 3824 &init_frame->queue_info_new_phys_addr_hi, 0);
3849 3825
3850 3826 ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len,
3851 3827 sizeof (struct mrsas_init_queue_info));
3852 3828
3853 3829 cmd->frame_count = 1;
3854 3830
3855 3831 /* issue the init frame in polled mode */
3856 3832 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3857 3833 con_log(CL_ANN1, (CE_WARN,
3858 3834 "mrsas_issue_init_mfi():failed to "
3859 3835 "init firmware"));
3860 3836 return_mfi_app_pkt(instance, cmd);
3861 3837 return (DDI_FAILURE);
3862 3838 }
3863 3839
3864 3840 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) {
3865 3841 return_mfi_pkt(instance, cmd);
3866 3842 return (DDI_FAILURE);
3867 3843 }
3868 3844
3869 3845 return_mfi_app_pkt(instance, cmd);
3870 3846 con_log(CL_ANN1, (CE_CONT, "mrsas_issue_init_mfi: Done"));
3871 3847
3872 3848 return (DDI_SUCCESS);
3873 3849 }
3874 3850 /*
3875 3851 * mfi_state_transition_to_ready : Move the FW to READY state
3876 3852 *
3877 3853 * @reg_set : MFI register set
3878 3854 */
3879 3855 int
3880 3856 mfi_state_transition_to_ready(struct mrsas_instance *instance)
3881 3857 {
3882 3858 int i;
3883 3859 uint8_t max_wait;
3884 3860 uint32_t fw_ctrl = 0;
3885 3861 uint32_t fw_state;
3886 3862 uint32_t cur_state;
3887 3863 uint32_t cur_abs_reg_val;
3888 3864 uint32_t prev_abs_reg_val;
3889 3865 uint32_t status;
3890 3866
3891 3867 cur_abs_reg_val =
3892 3868 instance->func_ptr->read_fw_status_reg(instance);
3893 3869 fw_state =
3894 3870 cur_abs_reg_val & MFI_STATE_MASK;
3895 3871 con_log(CL_ANN1, (CE_CONT,
3896 3872 "mfi_state_transition_to_ready:FW state = 0x%x", fw_state));
3897 3873
3898 3874 while (fw_state != MFI_STATE_READY) {
3899 3875 con_log(CL_ANN, (CE_CONT,
3900 3876 "mfi_state_transition_to_ready:FW state%x", fw_state));
3901 3877
3902 3878 switch (fw_state) {
3903 3879 case MFI_STATE_FAULT:
3904 3880 con_log(CL_ANN, (CE_NOTE,
3905 3881 "mr_sas: FW in FAULT state!!"));
3906 3882
3907 3883 return (ENODEV);
3908 3884 case MFI_STATE_WAIT_HANDSHAKE:
3909 3885 /* set the CLR bit in IMR0 */
3910 3886 con_log(CL_ANN1, (CE_NOTE,
3911 3887 "mr_sas: FW waiting for HANDSHAKE"));
3912 3888 /*
3913 3889 * PCI_Hot Plug: MFI F/W requires
3914 3890 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
3915 3891 * to be set
3916 3892 */
3917 3893 /* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */
3918 3894 if (!instance->tbolt) {
3919 3895 WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE |
3920 3896 MFI_INIT_HOTPLUG, instance);
3921 3897 } else {
3922 3898 WR_RESERVED0_REGISTER(MFI_INIT_CLEAR_HANDSHAKE |
3923 3899 MFI_INIT_HOTPLUG, instance);
3924 3900 }
3925 3901 max_wait = (instance->tbolt == 1) ? 180 : 2;
3926 3902 cur_state = MFI_STATE_WAIT_HANDSHAKE;
3927 3903 break;
3928 3904 case MFI_STATE_BOOT_MESSAGE_PENDING:
3929 3905 /* set the CLR bit in IMR0 */
3930 3906 con_log(CL_ANN1, (CE_NOTE,
3931 3907 "mr_sas: FW state boot message pending"));
3932 3908 /*
3933 3909 * PCI_Hot Plug: MFI F/W requires
3934 3910 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
3935 3911 * to be set
3936 3912 */
3937 3913 if (!instance->tbolt) {
3938 3914 WR_IB_DOORBELL(MFI_INIT_HOTPLUG, instance);
3939 3915 } else {
3940 3916 WR_RESERVED0_REGISTER(MFI_INIT_HOTPLUG,
3941 3917 instance);
3942 3918 }
3943 3919 max_wait = (instance->tbolt == 1) ? 180 : 10;
3944 3920 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
3945 3921 break;
3946 3922 case MFI_STATE_OPERATIONAL:
3947 3923 /* bring it to READY state; assuming max wait 2 secs */
3948 3924 instance->func_ptr->disable_intr(instance);
3949 3925 con_log(CL_ANN1, (CE_NOTE,
3950 3926 "mr_sas: FW in OPERATIONAL state"));
3951 3927 /*
3952 3928 * PCI_Hot Plug: MFI F/W requires
3953 3929 * (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT)
3954 3930 * to be set
3955 3931 */
3956 3932 /* WR_IB_DOORBELL(MFI_INIT_READY, instance); */
3957 3933 if (!instance->tbolt) {
3958 3934 WR_IB_DOORBELL(MFI_RESET_FLAGS, instance);
3959 3935 } else {
3960 3936 WR_RESERVED0_REGISTER(MFI_RESET_FLAGS,
3961 3937 instance);
3962 3938
3963 3939 for (i = 0; i < (10 * 1000); i++) {
3964 3940 status =
3965 3941 RD_RESERVED0_REGISTER(instance);
3966 3942 if (status & 1) {
3967 3943 delay(1 *
3968 3944 drv_usectohz(MILLISEC));
3969 3945 } else {
3970 3946 break;
3971 3947 }
3972 3948 }
3973 3949
3974 3950 }
3975 3951 max_wait = (instance->tbolt == 1) ? 180 : 10;
3976 3952 cur_state = MFI_STATE_OPERATIONAL;
3977 3953 break;
3978 3954 case MFI_STATE_UNDEFINED:
3979 3955 /* this state should not last for more than 2 seconds */
3980 3956 con_log(CL_ANN1, (CE_NOTE, "FW state undefined"));
3981 3957
3982 3958 max_wait = (instance->tbolt == 1) ? 180 : 2;
3983 3959 cur_state = MFI_STATE_UNDEFINED;
3984 3960 break;
3985 3961 case MFI_STATE_BB_INIT:
3986 3962 max_wait = (instance->tbolt == 1) ? 180 : 2;
3987 3963 cur_state = MFI_STATE_BB_INIT;
3988 3964 break;
3989 3965 case MFI_STATE_FW_INIT:
3990 3966 max_wait = (instance->tbolt == 1) ? 180 : 2;
3991 3967 cur_state = MFI_STATE_FW_INIT;
3992 3968 break;
3993 3969 case MFI_STATE_FW_INIT_2:
3994 3970 max_wait = 180;
3995 3971 cur_state = MFI_STATE_FW_INIT_2;
3996 3972 break;
3997 3973 case MFI_STATE_DEVICE_SCAN:
3998 3974 max_wait = 180;
3999 3975 cur_state = MFI_STATE_DEVICE_SCAN;
4000 3976 prev_abs_reg_val = cur_abs_reg_val;
4001 3977 con_log(CL_NONE, (CE_NOTE,
4002 3978 "Device scan in progress ...\n"));
4003 3979 break;
4004 3980 case MFI_STATE_FLUSH_CACHE:
4005 3981 max_wait = 180;
4006 3982 cur_state = MFI_STATE_FLUSH_CACHE;
4007 3983 break;
4008 3984 default:
4009 3985 con_log(CL_ANN1, (CE_NOTE,
4010 3986 "mr_sas: Unknown state 0x%x", fw_state));
4011 3987 return (ENODEV);
4012 3988 }
4013 3989
4014 3990 /* the cur_state should not last for more than max_wait secs */
4015 3991 for (i = 0; i < (max_wait * MILLISEC); i++) {
4016 3992 /* fw_state = RD_OB_MSG_0(instance) & MFI_STATE_MASK; */
4017 3993 cur_abs_reg_val =
4018 3994 instance->func_ptr->read_fw_status_reg(instance);
4019 3995 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
4020 3996
4021 3997 if (fw_state == cur_state) {
4022 3998 delay(1 * drv_usectohz(MILLISEC));
4023 3999 } else {
4024 4000 break;
4025 4001 }
4026 4002 }
4027 4003 if (fw_state == MFI_STATE_DEVICE_SCAN) {
4028 4004 if (prev_abs_reg_val != cur_abs_reg_val) {
4029 4005 continue;
4030 4006 }
4031 4007 }
4032 4008
4033 4009 /* return error if fw_state hasn't changed after max_wait */
4034 4010 if (fw_state == cur_state) {
4035 4011 con_log(CL_ANN1, (CE_WARN,
4036 4012 "FW state hasn't changed in %d secs", max_wait));
4037 4013 return (ENODEV);
4038 4014 }
4039 4015 };
4040 4016
4041 4017 if (!instance->tbolt) {
4042 4018 fw_ctrl = RD_IB_DOORBELL(instance);
4043 4019 con_log(CL_ANN1, (CE_CONT,
4044 4020 "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl));
4045 4021 }
4046 4022
4047 4023 #if 0 /* XXX KEBE ASKS, remove and use like pre-2208? */
4048 4024 /*
4049 4025 * Write 0xF to the doorbell register to do the following.
4050 4026 * - Abort all outstanding commands (bit 0).
4051 4027 * - Transition from OPERATIONAL to READY state (bit 1).
4052 4028 * - Discard (possible) low MFA posted in 64-bit mode (bit-2).
4053 4029 * - Set to release FW to continue running (i.e. BIOS handshake
4054 4030 * (bit 3).
4055 4031 */
4056 4032 if (!instance->tbolt) {
4057 4033 WR_IB_DOORBELL(0xF, instance);
4058 4034 }
4059 4035 #endif
4060 4036 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
4061 4037 return (ENODEV);
4062 4038 }
4063 4039
4064 4040 return (DDI_SUCCESS);
4065 4041 }
4066 4042
4067 4043 /*
4068 4044 * get_seq_num
4069 4045 */
4070 4046 static int
4071 4047 get_seq_num(struct mrsas_instance *instance,
4072 4048 struct mrsas_evt_log_info *eli)
4073 4049 {
4074 4050 int ret = DDI_SUCCESS;
4075 4051
4076 4052 dma_obj_t dcmd_dma_obj;
4077 4053 struct mrsas_cmd *cmd;
4078 4054 struct mrsas_dcmd_frame *dcmd;
4079 4055 struct mrsas_evt_log_info *eli_tmp;
4080 4056 if (instance->tbolt) {
4081 4057 cmd = get_raid_msg_mfi_pkt(instance);
4082 4058 } else {
4083 4059 cmd = get_mfi_pkt(instance);
4084 4060 }
4085 4061
4086 4062 if (!cmd) {
4087 4063 cmn_err(CE_WARN, "mr_sas: failed to get a cmd");
4088 4064 DTRACE_PROBE2(seq_num_mfi_err, uint16_t,
4089 4065 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
4090 4066 return (ENOMEM);
4091 4067 }
4092 4068
4093 4069 /* Clear the frame buffer and assign back the context id */
4094 4070 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
4095 4071 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
4096 4072 cmd->index);
4097 4073
4098 4074 dcmd = &cmd->frame->dcmd;
4099 4075
4100 4076 /* allocate the data transfer buffer */
4101 4077 dcmd_dma_obj.size = sizeof (struct mrsas_evt_log_info);
4102 4078 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
4103 4079 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
4104 4080 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
4105 4081 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
4106 4082 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
4107 4083
4108 4084 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
4109 4085 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
4110 4086 cmn_err(CE_WARN,
4111 4087 "get_seq_num: could not allocate data transfer buffer.");
4112 4088 return (DDI_FAILURE);
4113 4089 }
4114 4090
4115 4091 (void) memset(dcmd_dma_obj.buffer, 0,
4116 4092 sizeof (struct mrsas_evt_log_info));
4117 4093
4118 4094 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
4119 4095
4120 4096 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
4121 4097 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
4122 4098 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
4123 4099 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
4124 4100 MFI_FRAME_DIR_READ);
4125 4101 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
4126 4102 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
4127 4103 sizeof (struct mrsas_evt_log_info));
4128 4104 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
4129 4105 MR_DCMD_CTRL_EVENT_GET_INFO);
4130 4106 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
4131 4107 sizeof (struct mrsas_evt_log_info));
4132 4108 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
4133 4109 dcmd_dma_obj.dma_cookie[0].dmac_address);
4134 4110
4135 4111 cmd->sync_cmd = MRSAS_TRUE;
4136 4112 cmd->frame_count = 1;
4137 4113
4138 4114 if (instance->tbolt) {
4139 4115 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
4140 4116 }
4141 4117
4142 4118 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
4143 4119 cmn_err(CE_WARN, "get_seq_num: "
4144 4120 "failed to issue MRSAS_DCMD_CTRL_EVENT_GET_INFO");
4145 4121 ret = DDI_FAILURE;
4146 4122 } else {
4147 4123 eli_tmp = (struct mrsas_evt_log_info *)dcmd_dma_obj.buffer;
4148 4124 eli->newest_seq_num = ddi_get32(cmd->frame_dma_obj.acc_handle,
4149 4125 &eli_tmp->newest_seq_num);
4150 4126 ret = DDI_SUCCESS;
4151 4127 }
4152 4128
4153 4129 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
4154 4130 ret = DDI_FAILURE;
4155 4131
4156 4132 if (instance->tbolt) {
4157 4133 return_raid_msg_mfi_pkt(instance, cmd);
4158 4134 } else {
4159 4135 return_mfi_pkt(instance, cmd);
4160 4136 }
4161 4137
4162 4138 return (ret);
4163 4139 }
4164 4140
4165 4141 /*
4166 4142 * start_mfi_aen
4167 4143 */
4168 4144 static int
4169 4145 start_mfi_aen(struct mrsas_instance *instance)
4170 4146 {
4171 4147 int ret = 0;
4172 4148
4173 4149 struct mrsas_evt_log_info eli;
4174 4150 union mrsas_evt_class_locale class_locale;
4175 4151
4176 4152 /* get the latest sequence number from FW */
4177 4153 (void) memset(&eli, 0, sizeof (struct mrsas_evt_log_info));
4178 4154
4179 4155 if (get_seq_num(instance, &eli)) {
4180 4156 cmn_err(CE_WARN, "start_mfi_aen: failed to get seq num");
4181 4157 return (-1);
4182 4158 }
4183 4159
4184 4160 /* register AEN with FW for latest sequence number plus 1 */
4185 4161 class_locale.members.reserved = 0;
4186 4162 class_locale.members.locale = LE_16(MR_EVT_LOCALE_ALL);
4187 4163 class_locale.members.class = MR_EVT_CLASS_INFO;
4188 4164 class_locale.word = LE_32(class_locale.word);
4189 4165 ret = register_mfi_aen(instance, eli.newest_seq_num + 1,
4190 4166 class_locale.word);
4191 4167
4192 4168 if (ret) {
4193 4169 cmn_err(CE_WARN, "start_mfi_aen: aen registration failed");
4194 4170 return (-1);
4195 4171 }
4196 4172
4197 4173
4198 4174 return (ret);
4199 4175 }
4200 4176
4201 4177 /*
4202 4178 * flush_cache
4203 4179 */
4204 4180 static void
4205 4181 flush_cache(struct mrsas_instance *instance)
4206 4182 {
4207 4183 struct mrsas_cmd *cmd = NULL;
4208 4184 struct mrsas_dcmd_frame *dcmd;
4209 4185 if (instance->tbolt) {
4210 4186 cmd = get_raid_msg_mfi_pkt(instance);
4211 4187 } else {
4212 4188 cmd = get_mfi_pkt(instance);
4213 4189 }
4214 4190
4215 4191 if (!cmd) {
4216 4192 con_log(CL_ANN1, (CE_WARN,
4217 4193 "flush_cache():Failed to get a cmd for flush_cache"));
4218 4194 DTRACE_PROBE2(flush_cache_err, uint16_t,
4219 4195 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
4220 4196 return;
4221 4197 }
4222 4198
4223 4199 /* Clear the frame buffer and assign back the context id */
4224 4200 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
4225 4201 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
4226 4202 cmd->index);
4227 4203
4228 4204 dcmd = &cmd->frame->dcmd;
4229 4205
4230 4206 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
4231 4207
4232 4208 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
4233 4209 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0);
4234 4210 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 0);
4235 4211 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
4236 4212 MFI_FRAME_DIR_NONE);
4237 4213 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
4238 4214 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 0);
4239 4215 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
4240 4216 MR_DCMD_CTRL_CACHE_FLUSH);
4241 4217 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.b[0],
4242 4218 MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE);
4243 4219
4244 4220 cmd->frame_count = 1;
4245 4221
4246 4222 if (instance->tbolt) {
4247 4223 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
4248 4224 }
4249 4225
4250 4226 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
4251 4227 con_log(CL_ANN1, (CE_WARN,
4252 4228 "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH"));
4253 4229 }
4254 4230 con_log(CL_ANN1, (CE_CONT, "flush_cache done"));
4255 4231 if (instance->tbolt) {
4256 4232 return_raid_msg_mfi_pkt(instance, cmd);
4257 4233 } else {
4258 4234 return_mfi_pkt(instance, cmd);
4259 4235 }
4260 4236
4261 4237 }
4262 4238
4263 4239 /*
4264 4240 * service_mfi_aen- Completes an AEN command
4265 4241 * @instance: Adapter soft state
4266 4242 * @cmd: Command to be completed
4267 4243 *
4268 4244 */
4269 4245 void
4270 4246 service_mfi_aen(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
4271 4247 {
4272 4248 uint32_t seq_num;
4273 4249 struct mrsas_evt_detail *evt_detail =
4274 4250 (struct mrsas_evt_detail *)instance->mfi_evt_detail_obj.buffer;
4275 4251 int rval = 0;
4276 4252 int tgt = 0;
4277 4253 uint8_t dtype;
4278 4254 #ifdef PDSUPPORT
4279 4255 mrsas_pd_address_t *pd_addr;
4280 4256 #endif
4281 4257 ddi_acc_handle_t acc_handle;
4282 4258
4283 4259 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
4284 4260
4285 4261 acc_handle = cmd->frame_dma_obj.acc_handle;
4286 4262 cmd->cmd_status = ddi_get8(acc_handle, &cmd->frame->io.cmd_status);
4287 4263 if (cmd->cmd_status == ENODATA) {
4288 4264 cmd->cmd_status = 0;
4289 4265 }
4290 4266
4291 4267 /*
4292 4268 * log the MFI AEN event to the sysevent queue so that
4293 4269 * application will get noticed
4294 4270 */
4295 4271 if (ddi_log_sysevent(instance->dip, DDI_VENDOR_LSI, "LSIMEGA", "SAS",
4296 4272 NULL, NULL, DDI_NOSLEEP) != DDI_SUCCESS) {
4297 4273 int instance_no = ddi_get_instance(instance->dip);
4298 4274 con_log(CL_ANN, (CE_WARN,
4299 4275 "mr_sas%d: Failed to log AEN event", instance_no));
4300 4276 }
4301 4277 /*
4302 4278 * Check for any ld devices that has changed state. i.e. online
4303 4279 * or offline.
4304 4280 */
4305 4281 con_log(CL_ANN1, (CE_CONT,
4306 4282 "AEN: code = %x class = %x locale = %x args = %x",
4307 4283 ddi_get32(acc_handle, &evt_detail->code),
4308 4284 evt_detail->cl.members.class,
4309 4285 ddi_get16(acc_handle, &evt_detail->cl.members.locale),
4310 4286 ddi_get8(acc_handle, &evt_detail->arg_type)));
4311 4287
4312 4288 switch (ddi_get32(acc_handle, &evt_detail->code)) {
4313 4289 case MR_EVT_CFG_CLEARED: {
4314 4290 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) {
4315 4291 if (instance->mr_ld_list[tgt].dip != NULL) {
4316 4292 mutex_enter(&instance->config_dev_mtx);
4317 4293 instance->mr_ld_list[tgt].flag =
4318 4294 (uint8_t)~MRDRV_TGT_VALID;
4319 4295 mutex_exit(&instance->config_dev_mtx);
4320 4296 rval = mrsas_service_evt(instance, tgt, 0,
4321 4297 MRSAS_EVT_UNCONFIG_TGT, NULL);
4322 4298 con_log(CL_ANN1, (CE_WARN,
4323 4299 "mr_sas: CFG CLEARED AEN rval = %d "
4324 4300 "tgt id = %d", rval, tgt));
4325 4301 }
4326 4302 }
4327 4303 break;
4328 4304 }
4329 4305
4330 4306 case MR_EVT_LD_DELETED: {
4331 4307 tgt = ddi_get16(acc_handle, &evt_detail->args.ld.target_id);
4332 4308 mutex_enter(&instance->config_dev_mtx);
4333 4309 instance->mr_ld_list[tgt].flag = (uint8_t)~MRDRV_TGT_VALID;
4334 4310 mutex_exit(&instance->config_dev_mtx);
4335 4311 rval = mrsas_service_evt(instance,
4336 4312 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0,
4337 4313 MRSAS_EVT_UNCONFIG_TGT, NULL);
4338 4314 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD DELETED AEN rval = %d "
4339 4315 "tgt id = %d index = %d", rval,
4340 4316 ddi_get16(acc_handle, &evt_detail->args.ld.target_id),
4341 4317 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index)));
4342 4318 break;
4343 4319 } /* End of MR_EVT_LD_DELETED */
4344 4320
4345 4321 case MR_EVT_LD_CREATED: {
4346 4322 rval = mrsas_service_evt(instance,
4347 4323 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0,
4348 4324 MRSAS_EVT_CONFIG_TGT, NULL);
4349 4325 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD CREATED AEN rval = %d "
4350 4326 "tgt id = %d index = %d", rval,
4351 4327 ddi_get16(acc_handle, &evt_detail->args.ld.target_id),
4352 4328 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index)));
4353 4329 break;
4354 4330 } /* End of MR_EVT_LD_CREATED */
4355 4331
4356 4332 #ifdef PDSUPPORT
4357 4333 case MR_EVT_PD_REMOVED_EXT: {
4358 4334 if (instance->tbolt) {
4359 4335 pd_addr = &evt_detail->args.pd_addr;
4360 4336 dtype = pd_addr->scsi_dev_type;
4361 4337 con_log(CL_DLEVEL1, (CE_NOTE,
4362 4338 " MR_EVT_PD_REMOVED_EXT: dtype = %x,"
4363 4339 " arg_type = %d ", dtype, evt_detail->arg_type));
4364 4340 tgt = ddi_get16(acc_handle,
4365 4341 &evt_detail->args.pd.device_id);
4366 4342 mutex_enter(&instance->config_dev_mtx);
4367 4343 instance->mr_tbolt_pd_list[tgt].flag =
4368 4344 (uint8_t)~MRDRV_TGT_VALID;
4369 4345 mutex_exit(&instance->config_dev_mtx);
4370 4346 rval = mrsas_service_evt(instance, ddi_get16(
4371 4347 acc_handle, &evt_detail->args.pd.device_id),
4372 4348 1, MRSAS_EVT_UNCONFIG_TGT, NULL);
4373 4349 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_REMOVED:"
4374 4350 "rval = %d tgt id = %d ", rval,
4375 4351 ddi_get16(acc_handle,
4376 4352 &evt_detail->args.pd.device_id)));
4377 4353 }
4378 4354 break;
4379 4355 } /* End of MR_EVT_PD_REMOVED_EXT */
4380 4356
4381 4357 case MR_EVT_PD_INSERTED_EXT: {
4382 4358 if (instance->tbolt) {
4383 4359 rval = mrsas_service_evt(instance,
4384 4360 ddi_get16(acc_handle,
4385 4361 &evt_detail->args.pd.device_id),
4386 4362 1, MRSAS_EVT_CONFIG_TGT, NULL);
4387 4363 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_INSERTEDi_EXT:"
4388 4364 "rval = %d tgt id = %d ", rval,
4389 4365 ddi_get16(acc_handle,
4390 4366 &evt_detail->args.pd.device_id)));
4391 4367 }
4392 4368 break;
4393 4369 } /* End of MR_EVT_PD_INSERTED_EXT */
4394 4370
4395 4371 case MR_EVT_PD_STATE_CHANGE: {
4396 4372 if (instance->tbolt) {
4397 4373 tgt = ddi_get16(acc_handle,
4398 4374 &evt_detail->args.pd.device_id);
4399 4375 if ((evt_detail->args.pd_state.prevState ==
4400 4376 PD_SYSTEM) &&
4401 4377 (evt_detail->args.pd_state.newState != PD_SYSTEM)) {
4402 4378 mutex_enter(&instance->config_dev_mtx);
4403 4379 instance->mr_tbolt_pd_list[tgt].flag =
4404 4380 (uint8_t)~MRDRV_TGT_VALID;
4405 4381 mutex_exit(&instance->config_dev_mtx);
4406 4382 rval = mrsas_service_evt(instance,
4407 4383 ddi_get16(acc_handle,
4408 4384 &evt_detail->args.pd.device_id),
4409 4385 1, MRSAS_EVT_UNCONFIG_TGT, NULL);
4410 4386 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_REMOVED:"
4411 4387 "rval = %d tgt id = %d ", rval,
4412 4388 ddi_get16(acc_handle,
4413 4389 &evt_detail->args.pd.device_id)));
4414 4390 break;
4415 4391 }
4416 4392 if ((evt_detail->args.pd_state.prevState
4417 4393 == UNCONFIGURED_GOOD) &&
4418 4394 (evt_detail->args.pd_state.newState == PD_SYSTEM)) {
4419 4395 rval = mrsas_service_evt(instance,
4420 4396 ddi_get16(acc_handle,
4421 4397 &evt_detail->args.pd.device_id),
4422 4398 1, MRSAS_EVT_CONFIG_TGT, NULL);
4423 4399 con_log(CL_ANN1, (CE_WARN,
4424 4400 "mr_sas: PD_INSERTED: rval = %d "
4425 4401 " tgt id = %d ", rval,
4426 4402 ddi_get16(acc_handle,
4427 4403 &evt_detail->args.pd.device_id)));
4428 4404 break;
4429 4405 }
4430 4406 }
4431 4407 break;
4432 4408 }
4433 4409 #endif
4434 4410
4435 4411 } /* End of Main Switch */
4436 4412
4437 4413 /* get copy of seq_num and class/locale for re-registration */
4438 4414 seq_num = ddi_get32(acc_handle, &evt_detail->seq_num);
4439 4415 seq_num++;
4440 4416 (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
4441 4417 sizeof (struct mrsas_evt_detail));
4442 4418
4443 4419 ddi_put8(acc_handle, &cmd->frame->dcmd.cmd_status, 0x0);
4444 4420 ddi_put32(acc_handle, &cmd->frame->dcmd.mbox.w[0], seq_num);
4445 4421
4446 4422 instance->aen_seq_num = seq_num;
4447 4423
4448 4424 cmd->frame_count = 1;
4449 4425
4450 4426 cmd->retry_count_for_ocr = 0;
4451 4427 cmd->drv_pkt_time = 0;
4452 4428
4453 4429 /* Issue the aen registration frame */
4454 4430 instance->func_ptr->issue_cmd(cmd, instance);
4455 4431 }
4456 4432
4457 4433 /*
4458 4434 * complete_cmd_in_sync_mode - Completes an internal command
4459 4435 * @instance: Adapter soft state
4460 4436 * @cmd: Command to be completed
4461 4437 *
4462 4438 * The issue_cmd_in_sync_mode() function waits for a command to complete
4463 4439 * after it issues a command. This function wakes up that waiting routine by
4464 4440 * calling wake_up() on the wait queue.
4465 4441 */
4466 4442 static void
4467 4443 complete_cmd_in_sync_mode(struct mrsas_instance *instance,
4468 4444 struct mrsas_cmd *cmd)
4469 4445 {
4470 4446 cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle,
4471 4447 &cmd->frame->io.cmd_status);
4472 4448
4473 4449 cmd->sync_cmd = MRSAS_FALSE;
4474 4450
4475 4451 con_log(CL_ANN1, (CE_NOTE, "complete_cmd_in_sync_mode called %p \n",
4476 4452 (void *)cmd));
4477 4453
4478 4454 mutex_enter(&instance->int_cmd_mtx);
4479 4455 if (cmd->cmd_status == ENODATA) {
4480 4456 cmd->cmd_status = 0;
4481 4457 }
4482 4458 cv_broadcast(&instance->int_cmd_cv);
4483 4459 mutex_exit(&instance->int_cmd_mtx);
4484 4460
4485 4461 }
4486 4462
4487 4463 /*
4488 4464 * Call this function inside mrsas_softintr.
4489 4465 * mrsas_initiate_ocr_if_fw_is_faulty - Initiates OCR if FW status is faulty
4490 4466 * @instance: Adapter soft state
4491 4467 */
4492 4468
4493 4469 static uint32_t
4494 4470 mrsas_initiate_ocr_if_fw_is_faulty(struct mrsas_instance *instance)
4495 4471 {
4496 4472 uint32_t cur_abs_reg_val;
4497 4473 uint32_t fw_state;
4498 4474
4499 4475 cur_abs_reg_val = instance->func_ptr->read_fw_status_reg(instance);
4500 4476 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
4501 4477 if (fw_state == MFI_STATE_FAULT) {
4502 4478 if (instance->disable_online_ctrl_reset == 1) {
4503 4479 cmn_err(CE_WARN,
4504 4480 "mrsas_initiate_ocr_if_fw_is_faulty: "
4505 4481 "FW in Fault state, detected in ISR: "
4506 4482 "FW doesn't support ocr ");
4507 4483
4508 4484 return (ADAPTER_RESET_NOT_REQUIRED);
4509 4485 } else {
4510 4486 con_log(CL_ANN, (CE_NOTE,
4511 4487 "mrsas_initiate_ocr_if_fw_is_faulty: FW in Fault "
4512 4488 "state, detected in ISR: FW supports ocr "));
4513 4489
4514 4490 return (ADAPTER_RESET_REQUIRED);
4515 4491 }
4516 4492 }
4517 4493
4518 4494 return (ADAPTER_RESET_NOT_REQUIRED);
4519 4495 }
4520 4496
4521 4497 /*
4522 4498 * mrsas_softintr - The Software ISR
4523 4499 * @param arg : HBA soft state
4524 4500 *
4525 4501 * called from high-level interrupt if hi-level interrupt are not there,
4526 4502 * otherwise triggered as a soft interrupt
4527 4503 */
4528 4504 static uint_t
4529 4505 mrsas_softintr(struct mrsas_instance *instance)
4530 4506 {
4531 4507 struct scsi_pkt *pkt;
4532 4508 struct scsa_cmd *acmd;
4533 4509 struct mrsas_cmd *cmd;
4534 4510 struct mlist_head *pos, *next;
4535 4511 mlist_t process_list;
4536 4512 struct mrsas_header *hdr;
4537 4513 struct scsi_arq_status *arqstat;
4538 4514
4539 4515 con_log(CL_ANN1, (CE_NOTE, "mrsas_softintr() called."));
4540 4516
4541 4517 ASSERT(instance);
4542 4518
4543 4519 mutex_enter(&instance->completed_pool_mtx);
4544 4520
4545 4521 if (mlist_empty(&instance->completed_pool_list)) {
4546 4522 mutex_exit(&instance->completed_pool_mtx);
4547 4523 return (DDI_INTR_CLAIMED);
4548 4524 }
4549 4525
4550 4526 instance->softint_running = 1;
4551 4527
4552 4528 INIT_LIST_HEAD(&process_list);
4553 4529 mlist_splice(&instance->completed_pool_list, &process_list);
4554 4530 INIT_LIST_HEAD(&instance->completed_pool_list);
4555 4531
4556 4532 mutex_exit(&instance->completed_pool_mtx);
4557 4533
4558 4534 /* perform all callbacks first, before releasing the SCBs */
4559 4535 mlist_for_each_safe(pos, next, &process_list) {
4560 4536 cmd = mlist_entry(pos, struct mrsas_cmd, list);
4561 4537
4562 4538 /* syncronize the Cmd frame for the controller */
4563 4539 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle,
4564 4540 0, 0, DDI_DMA_SYNC_FORCPU);
4565 4541
4566 4542 if (mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) !=
4567 4543 DDI_SUCCESS) {
4568 4544 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
4569 4545 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4570 4546 con_log(CL_ANN1, (CE_WARN,
4571 4547 "mrsas_softintr: "
4572 4548 "FMA check reports DMA handle failure"));
4573 4549 return (DDI_INTR_CLAIMED);
4574 4550 }
4575 4551
4576 4552 hdr = &cmd->frame->hdr;
4577 4553
4578 4554 /* remove the internal command from the process list */
4579 4555 mlist_del_init(&cmd->list);
4580 4556
4581 4557 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) {
4582 4558 case MFI_CMD_OP_PD_SCSI:
4583 4559 case MFI_CMD_OP_LD_SCSI:
4584 4560 case MFI_CMD_OP_LD_READ:
4585 4561 case MFI_CMD_OP_LD_WRITE:
4586 4562 /*
4587 4563 * MFI_CMD_OP_PD_SCSI and MFI_CMD_OP_LD_SCSI
4588 4564 * could have been issued either through an
4589 4565 * IO path or an IOCTL path. If it was via IOCTL,
4590 4566 * we will send it to internal completion.
4591 4567 */
4592 4568 if (cmd->sync_cmd == MRSAS_TRUE) {
4593 4569 complete_cmd_in_sync_mode(instance, cmd);
4594 4570 break;
4595 4571 }
4596 4572
4597 4573 /* regular commands */
4598 4574 acmd = cmd->cmd;
4599 4575 pkt = CMD2PKT(acmd);
4600 4576
4601 4577 if (acmd->cmd_flags & CFLAG_DMAVALID) {
4602 4578 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
4603 4579 (void) ddi_dma_sync(acmd->cmd_dmahandle,
4604 4580 acmd->cmd_dma_offset,
4605 4581 acmd->cmd_dma_len,
4606 4582 DDI_DMA_SYNC_FORCPU);
4607 4583 }
4608 4584 }
4609 4585
4610 4586 pkt->pkt_reason = CMD_CMPLT;
4611 4587 pkt->pkt_statistics = 0;
4612 4588 pkt->pkt_state = STATE_GOT_BUS
4613 4589 | STATE_GOT_TARGET | STATE_SENT_CMD
4614 4590 | STATE_XFERRED_DATA | STATE_GOT_STATUS;
4615 4591
4616 4592 con_log(CL_ANN, (CE_CONT,
4617 4593 "CDB[0] = %x completed for %s: size %lx context %x",
4618 4594 pkt->pkt_cdbp[0], ((acmd->islogical) ? "LD" : "PD"),
4619 4595 acmd->cmd_dmacount, hdr->context));
4620 4596 DTRACE_PROBE3(softintr_cdb, uint8_t, pkt->pkt_cdbp[0],
4621 4597 uint_t, acmd->cmd_cdblen, ulong_t,
4622 4598 acmd->cmd_dmacount);
4623 4599
4624 4600 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
4625 4601 struct scsi_inquiry *inq;
4626 4602
4627 4603 if (acmd->cmd_dmacount != 0) {
4628 4604 bp_mapin(acmd->cmd_buf);
4629 4605 inq = (struct scsi_inquiry *)
4630 4606 acmd->cmd_buf->b_un.b_addr;
4631 4607
4632 4608 /* don't expose physical drives to OS */
4633 4609 if (acmd->islogical &&
4634 4610 (hdr->cmd_status == MFI_STAT_OK)) {
4635 4611 display_scsi_inquiry(
4636 4612 (caddr_t)inq);
4637 4613 } else if ((hdr->cmd_status ==
4638 4614 MFI_STAT_OK) && inq->inq_dtype ==
4639 4615 DTYPE_DIRECT) {
4640 4616
4641 4617 display_scsi_inquiry(
4642 4618 (caddr_t)inq);
4643 4619
4644 4620 /* for physical disk */
4645 4621 hdr->cmd_status =
4646 4622 MFI_STAT_DEVICE_NOT_FOUND;
4647 4623 }
4648 4624 }
4649 4625 }
4650 4626
4651 4627 DTRACE_PROBE2(softintr_done, uint8_t, hdr->cmd,
4652 4628 uint8_t, hdr->cmd_status);
4653 4629
4654 4630 switch (hdr->cmd_status) {
4655 4631 case MFI_STAT_OK:
4656 4632 pkt->pkt_scbp[0] = STATUS_GOOD;
4657 4633 break;
4658 4634 case MFI_STAT_LD_CC_IN_PROGRESS:
4659 4635 case MFI_STAT_LD_RECON_IN_PROGRESS:
4660 4636 pkt->pkt_scbp[0] = STATUS_GOOD;
4661 4637 break;
4662 4638 case MFI_STAT_LD_INIT_IN_PROGRESS:
4663 4639 con_log(CL_ANN,
4664 4640 (CE_WARN, "Initialization in Progress"));
4665 4641 pkt->pkt_reason = CMD_TRAN_ERR;
4666 4642
4667 4643 break;
4668 4644 case MFI_STAT_SCSI_DONE_WITH_ERROR:
4669 4645 con_log(CL_ANN, (CE_CONT, "scsi_done error"));
4670 4646
4671 4647 pkt->pkt_reason = CMD_CMPLT;
4672 4648 ((struct scsi_status *)
4673 4649 pkt->pkt_scbp)->sts_chk = 1;
4674 4650
4675 4651 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
4676 4652 con_log(CL_ANN,
4677 4653 (CE_WARN, "TEST_UNIT_READY fail"));
4678 4654 } else {
4679 4655 pkt->pkt_state |= STATE_ARQ_DONE;
4680 4656 arqstat = (void *)(pkt->pkt_scbp);
4681 4657 arqstat->sts_rqpkt_reason = CMD_CMPLT;
4682 4658 arqstat->sts_rqpkt_resid = 0;
4683 4659 arqstat->sts_rqpkt_state |=
4684 4660 STATE_GOT_BUS | STATE_GOT_TARGET
4685 4661 | STATE_SENT_CMD
4686 4662 | STATE_XFERRED_DATA;
4687 4663 *(uint8_t *)&arqstat->sts_rqpkt_status =
4688 4664 STATUS_GOOD;
4689 4665 ddi_rep_get8(
4690 4666 cmd->frame_dma_obj.acc_handle,
4691 4667 (uint8_t *)
4692 4668 &(arqstat->sts_sensedata),
4693 4669 cmd->sense,
4694 4670 sizeof (struct scsi_extended_sense),
4695 4671 DDI_DEV_AUTOINCR);
4696 4672 }
4697 4673 break;
4698 4674 case MFI_STAT_LD_OFFLINE:
4699 4675 case MFI_STAT_DEVICE_NOT_FOUND:
4700 4676 con_log(CL_ANN, (CE_CONT,
4701 4677 "mrsas_softintr:device not found error"));
4702 4678 pkt->pkt_reason = CMD_DEV_GONE;
4703 4679 pkt->pkt_statistics = STAT_DISCON;
4704 4680 break;
4705 4681 case MFI_STAT_LD_LBA_OUT_OF_RANGE:
4706 4682 pkt->pkt_state |= STATE_ARQ_DONE;
4707 4683 pkt->pkt_reason = CMD_CMPLT;
4708 4684 ((struct scsi_status *)
4709 4685 pkt->pkt_scbp)->sts_chk = 1;
4710 4686
4711 4687 arqstat = (void *)(pkt->pkt_scbp);
4712 4688 arqstat->sts_rqpkt_reason = CMD_CMPLT;
4713 4689 arqstat->sts_rqpkt_resid = 0;
4714 4690 arqstat->sts_rqpkt_state |= STATE_GOT_BUS
4715 4691 | STATE_GOT_TARGET | STATE_SENT_CMD
4716 4692 | STATE_XFERRED_DATA;
4717 4693 *(uint8_t *)&arqstat->sts_rqpkt_status =
4718 4694 STATUS_GOOD;
4719 4695
4720 4696 arqstat->sts_sensedata.es_valid = 1;
4721 4697 arqstat->sts_sensedata.es_key =
4722 4698 KEY_ILLEGAL_REQUEST;
4723 4699 arqstat->sts_sensedata.es_class =
4724 4700 CLASS_EXTENDED_SENSE;
4725 4701
4726 4702 /*
4727 4703 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
4728 4704 * ASC: 0x21h; ASCQ: 0x00h;
4729 4705 */
4730 4706 arqstat->sts_sensedata.es_add_code = 0x21;
4731 4707 arqstat->sts_sensedata.es_qual_code = 0x00;
4732 4708
4733 4709 break;
4734 4710
4735 4711 default:
4736 4712 con_log(CL_ANN, (CE_CONT, "Unknown status!"));
4737 4713 pkt->pkt_reason = CMD_TRAN_ERR;
4738 4714
4739 4715 break;
4740 4716 }
4741 4717
4742 4718 atomic_add_16(&instance->fw_outstanding, (-1));
4743 4719
4744 4720 (void) mrsas_common_check(instance, cmd);
4745 4721
4746 4722 if (acmd->cmd_dmahandle) {
4747 4723 if (mrsas_check_dma_handle(
4748 4724 acmd->cmd_dmahandle) != DDI_SUCCESS) {
4749 4725 ddi_fm_service_impact(instance->dip,
4750 4726 DDI_SERVICE_UNAFFECTED);
4751 4727 pkt->pkt_reason = CMD_TRAN_ERR;
4752 4728 pkt->pkt_statistics = 0;
4753 4729 }
4754 4730 }
4755 4731
4756 4732 /* Call the callback routine */
4757 4733 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
4758 4734 pkt->pkt_comp) {
4759 4735
4760 4736 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_softintr: "
4761 4737 "posting to scsa cmd %p index %x pkt %p "
4762 4738 "time %llx", (void *)cmd, cmd->index,
4763 4739 (void *)pkt, gethrtime()));
4764 4740 (*pkt->pkt_comp)(pkt);
4765 4741
4766 4742 }
4767 4743
4768 4744 return_mfi_pkt(instance, cmd);
4769 4745 break;
4770 4746
4771 4747 case MFI_CMD_OP_SMP:
4772 4748 case MFI_CMD_OP_STP:
4773 4749 complete_cmd_in_sync_mode(instance, cmd);
4774 4750 break;
4775 4751
4776 4752 case MFI_CMD_OP_DCMD:
4777 4753 /* see if got an event notification */
4778 4754 if (ddi_get32(cmd->frame_dma_obj.acc_handle,
4779 4755 &cmd->frame->dcmd.opcode) ==
4780 4756 MR_DCMD_CTRL_EVENT_WAIT) {
4781 4757 if ((instance->aen_cmd == cmd) &&
4782 4758 (instance->aen_cmd->abort_aen)) {
4783 4759 con_log(CL_ANN, (CE_WARN,
4784 4760 "mrsas_softintr: "
4785 4761 "aborted_aen returned"));
4786 4762 } else {
4787 4763 atomic_add_16(&instance->fw_outstanding,
4788 4764 (-1));
4789 4765 service_mfi_aen(instance, cmd);
4790 4766 }
4791 4767 } else {
4792 4768 complete_cmd_in_sync_mode(instance, cmd);
4793 4769 }
4794 4770
4795 4771 break;
4796 4772
4797 4773 case MFI_CMD_OP_ABORT:
4798 4774 con_log(CL_ANN, (CE_NOTE, "MFI_CMD_OP_ABORT complete"));
4799 4775 /*
4800 4776 * MFI_CMD_OP_ABORT successfully completed
4801 4777 * in the synchronous mode
4802 4778 */
4803 4779 complete_cmd_in_sync_mode(instance, cmd);
4804 4780 break;
4805 4781
4806 4782 default:
4807 4783 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
4808 4784 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4809 4785
4810 4786 if (cmd->pkt != NULL) {
4811 4787 pkt = cmd->pkt;
4812 4788 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
4813 4789 pkt->pkt_comp) {
4814 4790
4815 4791 con_log(CL_ANN1, (CE_CONT, "posting to "
4816 4792 "scsa cmd %p index %x pkt %p"
4817 4793 "time %llx, default ", (void *)cmd,
4818 4794 cmd->index, (void *)pkt,
4819 4795 gethrtime()));
4820 4796
4821 4797 (*pkt->pkt_comp)(pkt);
4822 4798
4823 4799 }
4824 4800 }
4825 4801 con_log(CL_ANN, (CE_WARN, "Cmd type unknown !"));
4826 4802 break;
4827 4803 }
4828 4804 }
4829 4805
4830 4806 instance->softint_running = 0;
4831 4807
4832 4808 return (DDI_INTR_CLAIMED);
4833 4809 }
4834 4810
4835 4811 /*
4836 4812 * mrsas_alloc_dma_obj
4837 4813 *
4838 4814 * Allocate the memory and other resources for an dma object.
4839 4815 */
4840 4816 int
4841 4817 mrsas_alloc_dma_obj(struct mrsas_instance *instance, dma_obj_t *obj,
4842 4818 uchar_t endian_flags)
4843 4819 {
4844 4820 int i;
4845 4821 size_t alen = 0;
4846 4822 uint_t cookie_cnt;
4847 4823 struct ddi_device_acc_attr tmp_endian_attr;
4848 4824
4849 4825 tmp_endian_attr = endian_attr;
4850 4826 tmp_endian_attr.devacc_attr_endian_flags = endian_flags;
4851 4827 tmp_endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
4852 4828
4853 4829 i = ddi_dma_alloc_handle(instance->dip, &obj->dma_attr,
4854 4830 DDI_DMA_SLEEP, NULL, &obj->dma_handle);
4855 4831 if (i != DDI_SUCCESS) {
4856 4832
4857 4833 switch (i) {
4858 4834 case DDI_DMA_BADATTR :
4859 4835 con_log(CL_ANN, (CE_WARN,
4860 4836 "Failed ddi_dma_alloc_handle- Bad attribute"));
4861 4837 break;
4862 4838 case DDI_DMA_NORESOURCES :
4863 4839 con_log(CL_ANN, (CE_WARN,
4864 4840 "Failed ddi_dma_alloc_handle- No Resources"));
4865 4841 break;
4866 4842 default :
4867 4843 con_log(CL_ANN, (CE_WARN,
4868 4844 "Failed ddi_dma_alloc_handle: "
4869 4845 "unknown status %d", i));
4870 4846 break;
4871 4847 }
4872 4848
4873 4849 return (-1);
4874 4850 }
4875 4851
4876 4852 if ((ddi_dma_mem_alloc(obj->dma_handle, obj->size, &tmp_endian_attr,
|
↓ open down ↓ |
1179 lines elided |
↑ open up ↑ |
4877 4853 DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
4878 4854 &obj->buffer, &alen, &obj->acc_handle) != DDI_SUCCESS) ||
4879 4855 alen < obj->size) {
4880 4856
4881 4857 ddi_dma_free_handle(&obj->dma_handle);
4882 4858
4883 4859 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc"));
4884 4860
4885 4861 return (-1);
4886 4862 }
4887 - if (obj->dma_handle == NULL) {
4888 - /* XXX KEBE ASKS --> fm_service_impact()? */
4889 - con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc"));
4890 - return (-1);
4891 - }
4892 4863
4893 4864 if (ddi_dma_addr_bind_handle(obj->dma_handle, NULL, obj->buffer,
4894 4865 obj->size, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
4895 4866 NULL, &obj->dma_cookie[0], &cookie_cnt) != DDI_SUCCESS) {
4896 4867
4897 4868 ddi_dma_mem_free(&obj->acc_handle);
4898 4869 ddi_dma_free_handle(&obj->dma_handle);
4899 4870
4900 4871 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle"));
4901 4872
4902 4873 return (-1);
4903 4874 }
4904 - if (obj->acc_handle == NULL) {
4905 - /* XXX KEBE ASKS --> fm_service_impact()? */
4906 - ddi_dma_mem_free(&obj->acc_handle);
4907 - ddi_dma_free_handle(&obj->dma_handle);
4908 4875
4909 - con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle"));
4910 - return (-1);
4911 - }
4912 -
4913 4876 if (mrsas_check_dma_handle(obj->dma_handle) != DDI_SUCCESS) {
4914 4877 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4915 4878 return (-1);
4916 4879 }
4917 4880
4918 4881 if (mrsas_check_acc_handle(obj->acc_handle) != DDI_SUCCESS) {
4919 4882 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4920 4883 return (-1);
4921 4884 }
4922 4885
4923 4886 return (cookie_cnt);
4924 4887 }
4925 4888
4926 4889 /*
4927 4890 * mrsas_free_dma_obj(struct mrsas_instance *, dma_obj_t)
4928 4891 *
4929 4892 * De-allocate the memory and other resources for an dma object, which must
4930 4893 * have been alloated by a previous call to mrsas_alloc_dma_obj()
4931 4894 */
4932 4895 /* ARGSUSED */
4933 4896 int
4934 4897 mrsas_free_dma_obj(struct mrsas_instance *instance, dma_obj_t obj)
4935 4898 {
4936 4899
4937 4900 if ((obj.dma_handle == NULL) || (obj.acc_handle == NULL)) {
4938 4901 return (DDI_SUCCESS);
4939 4902 }
4940 4903
4941 4904 /*
4942 4905 * NOTE: These check-handle functions fail if *_handle == NULL, but
4943 4906 * this function succeeds because of the previous check.
4944 4907 */
4945 4908 if (mrsas_check_dma_handle(obj.dma_handle) != DDI_SUCCESS) {
4946 4909 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4947 4910 return (DDI_FAILURE);
4948 4911 }
4949 4912
4950 4913 if (mrsas_check_acc_handle(obj.acc_handle) != DDI_SUCCESS) {
4951 4914 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4952 4915 return (DDI_FAILURE);
4953 4916 }
4954 4917
4955 4918 (void) ddi_dma_unbind_handle(obj.dma_handle);
4956 4919 ddi_dma_mem_free(&obj.acc_handle);
4957 4920 ddi_dma_free_handle(&obj.dma_handle);
4958 4921 obj.acc_handle = NULL;
4959 4922 return (DDI_SUCCESS);
4960 4923 }
4961 4924
4962 4925 /*
4963 4926 * mrsas_dma_alloc(instance_t *, struct scsi_pkt *, struct buf *,
4964 4927 * int, int (*)())
4965 4928 *
4966 4929 * Allocate dma resources for a new scsi command
4967 4930 */
4968 4931 int
4969 4932 mrsas_dma_alloc(struct mrsas_instance *instance, struct scsi_pkt *pkt,
4970 4933 struct buf *bp, int flags, int (*callback)())
4971 4934 {
4972 4935 int dma_flags;
4973 4936 int (*cb)(caddr_t);
4974 4937 int i;
4975 4938
4976 4939 ddi_dma_attr_t tmp_dma_attr = mrsas_generic_dma_attr;
4977 4940 struct scsa_cmd *acmd = PKT2CMD(pkt);
4978 4941
4979 4942 acmd->cmd_buf = bp;
4980 4943
4981 4944 if (bp->b_flags & B_READ) {
4982 4945 acmd->cmd_flags &= ~CFLAG_DMASEND;
4983 4946 dma_flags = DDI_DMA_READ;
4984 4947 } else {
4985 4948 acmd->cmd_flags |= CFLAG_DMASEND;
4986 4949 dma_flags = DDI_DMA_WRITE;
4987 4950 }
4988 4951
4989 4952 if (flags & PKT_CONSISTENT) {
4990 4953 acmd->cmd_flags |= CFLAG_CONSISTENT;
4991 4954 dma_flags |= DDI_DMA_CONSISTENT;
4992 4955 }
4993 4956
4994 4957 if (flags & PKT_DMA_PARTIAL) {
4995 4958 dma_flags |= DDI_DMA_PARTIAL;
4996 4959 }
4997 4960
4998 4961 dma_flags |= DDI_DMA_REDZONE;
4999 4962
5000 4963 cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
5001 4964
5002 4965 tmp_dma_attr.dma_attr_sgllen = instance->max_num_sge;
5003 4966 tmp_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull;
5004 4967 if (instance->tbolt) {
5005 4968 /* OCR-RESET FIX */
5006 4969 tmp_dma_attr.dma_attr_count_max =
5007 4970 (U64)mrsas_tbolt_max_cap_maxxfer; /* limit to 256K */
5008 4971 tmp_dma_attr.dma_attr_maxxfer =
5009 4972 (U64)mrsas_tbolt_max_cap_maxxfer; /* limit to 256K */
5010 4973 }
5011 4974
5012 4975 if ((i = ddi_dma_alloc_handle(instance->dip, &tmp_dma_attr,
5013 4976 cb, 0, &acmd->cmd_dmahandle)) != DDI_SUCCESS) {
5014 4977 switch (i) {
5015 4978 case DDI_DMA_BADATTR:
5016 4979 bioerror(bp, EFAULT);
5017 4980 return (DDI_FAILURE);
5018 4981
5019 4982 case DDI_DMA_NORESOURCES:
5020 4983 bioerror(bp, 0);
5021 4984 return (DDI_FAILURE);
5022 4985
5023 4986 default:
5024 4987 con_log(CL_ANN, (CE_PANIC, "ddi_dma_alloc_handle: "
5025 4988 "impossible result (0x%x)", i));
5026 4989 bioerror(bp, EFAULT);
5027 4990 return (DDI_FAILURE);
5028 4991 }
5029 4992 }
5030 4993
5031 4994 i = ddi_dma_buf_bind_handle(acmd->cmd_dmahandle, bp, dma_flags,
5032 4995 cb, 0, &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies);
5033 4996
5034 4997 switch (i) {
5035 4998 case DDI_DMA_PARTIAL_MAP:
5036 4999 if ((dma_flags & DDI_DMA_PARTIAL) == 0) {
5037 5000 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
5038 5001 "DDI_DMA_PARTIAL_MAP impossible"));
5039 5002 goto no_dma_cookies;
5040 5003 }
5041 5004
5042 5005 if (ddi_dma_numwin(acmd->cmd_dmahandle, &acmd->cmd_nwin) ==
5043 5006 DDI_FAILURE) {
5044 5007 con_log(CL_ANN, (CE_PANIC, "ddi_dma_numwin failed"));
5045 5008 goto no_dma_cookies;
5046 5009 }
5047 5010
5048 5011 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
5049 5012 &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
5050 5013 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
5051 5014 DDI_FAILURE) {
5052 5015
5053 5016 con_log(CL_ANN, (CE_PANIC, "ddi_dma_getwin failed"));
5054 5017 goto no_dma_cookies;
5055 5018 }
5056 5019
5057 5020 goto get_dma_cookies;
5058 5021 case DDI_DMA_MAPPED:
5059 5022 acmd->cmd_nwin = 1;
5060 5023 acmd->cmd_dma_len = 0;
5061 5024 acmd->cmd_dma_offset = 0;
5062 5025
5063 5026 get_dma_cookies:
5064 5027 i = 0;
5065 5028 acmd->cmd_dmacount = 0;
5066 5029 for (;;) {
5067 5030 acmd->cmd_dmacount +=
5068 5031 acmd->cmd_dmacookies[i++].dmac_size;
5069 5032
5070 5033 if (i == instance->max_num_sge ||
5071 5034 i == acmd->cmd_ncookies)
5072 5035 break;
5073 5036
5074 5037 ddi_dma_nextcookie(acmd->cmd_dmahandle,
5075 5038 &acmd->cmd_dmacookies[i]);
5076 5039 }
5077 5040
5078 5041 acmd->cmd_cookie = i;
5079 5042 acmd->cmd_cookiecnt = i;
5080 5043
5081 5044 acmd->cmd_flags |= CFLAG_DMAVALID;
5082 5045
5083 5046 if (bp->b_bcount >= acmd->cmd_dmacount) {
5084 5047 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
5085 5048 } else {
5086 5049 pkt->pkt_resid = 0;
5087 5050 }
5088 5051
5089 5052 return (DDI_SUCCESS);
5090 5053 case DDI_DMA_NORESOURCES:
5091 5054 bioerror(bp, 0);
5092 5055 break;
5093 5056 case DDI_DMA_NOMAPPING:
5094 5057 bioerror(bp, EFAULT);
5095 5058 break;
5096 5059 case DDI_DMA_TOOBIG:
5097 5060 bioerror(bp, EINVAL);
5098 5061 break;
5099 5062 case DDI_DMA_INUSE:
5100 5063 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle:"
5101 5064 " DDI_DMA_INUSE impossible"));
5102 5065 break;
5103 5066 default:
5104 5067 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
5105 5068 "impossible result (0x%x)", i));
5106 5069 break;
5107 5070 }
5108 5071
5109 5072 no_dma_cookies:
5110 5073 ddi_dma_free_handle(&acmd->cmd_dmahandle);
5111 5074 acmd->cmd_dmahandle = NULL;
5112 5075 acmd->cmd_flags &= ~CFLAG_DMAVALID;
5113 5076 return (DDI_FAILURE);
5114 5077 }
5115 5078
5116 5079 /*
5117 5080 * mrsas_dma_move(struct mrsas_instance *, struct scsi_pkt *, struct buf *)
5118 5081 *
5119 5082 * move dma resources to next dma window
5120 5083 *
5121 5084 */
5122 5085 int
5123 5086 mrsas_dma_move(struct mrsas_instance *instance, struct scsi_pkt *pkt,
5124 5087 struct buf *bp)
5125 5088 {
5126 5089 int i = 0;
5127 5090
5128 5091 struct scsa_cmd *acmd = PKT2CMD(pkt);
5129 5092
5130 5093 /*
5131 5094 * If there are no more cookies remaining in this window,
5132 5095 * must move to the next window first.
5133 5096 */
5134 5097 if (acmd->cmd_cookie == acmd->cmd_ncookies) {
5135 5098 if (acmd->cmd_curwin == acmd->cmd_nwin && acmd->cmd_nwin == 1) {
5136 5099 return (DDI_SUCCESS);
5137 5100 }
5138 5101
5139 5102 /* at last window, cannot move */
5140 5103 if (++acmd->cmd_curwin >= acmd->cmd_nwin) {
5141 5104 return (DDI_FAILURE);
5142 5105 }
5143 5106
5144 5107 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
5145 5108 &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
5146 5109 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
5147 5110 DDI_FAILURE) {
5148 5111 return (DDI_FAILURE);
5149 5112 }
5150 5113
5151 5114 acmd->cmd_cookie = 0;
5152 5115 } else {
5153 5116 /* still more cookies in this window - get the next one */
5154 5117 ddi_dma_nextcookie(acmd->cmd_dmahandle,
5155 5118 &acmd->cmd_dmacookies[0]);
5156 5119 }
5157 5120
5158 5121 /* get remaining cookies in this window, up to our maximum */
5159 5122 for (;;) {
5160 5123 acmd->cmd_dmacount += acmd->cmd_dmacookies[i++].dmac_size;
5161 5124 acmd->cmd_cookie++;
5162 5125
5163 5126 if (i == instance->max_num_sge ||
5164 5127 acmd->cmd_cookie == acmd->cmd_ncookies) {
5165 5128 break;
5166 5129 }
5167 5130
5168 5131 ddi_dma_nextcookie(acmd->cmd_dmahandle,
5169 5132 &acmd->cmd_dmacookies[i]);
5170 5133 }
5171 5134
5172 5135 acmd->cmd_cookiecnt = i;
5173 5136
5174 5137 if (bp->b_bcount >= acmd->cmd_dmacount) {
5175 5138 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
5176 5139 } else {
5177 5140 pkt->pkt_resid = 0;
5178 5141 }
5179 5142
5180 5143 return (DDI_SUCCESS);
5181 5144 }
5182 5145
5183 5146 /*
5184 5147 * build_cmd
5185 5148 */
5186 5149 static struct mrsas_cmd *
5187 5150 build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
5188 5151 struct scsi_pkt *pkt, uchar_t *cmd_done)
5189 5152 {
5190 5153 uint16_t flags = 0;
5191 5154 uint32_t i;
5192 5155 uint32_t context;
5193 5156 uint32_t sge_bytes;
5194 5157 uint32_t tmp_data_xfer_len;
5195 5158 ddi_acc_handle_t acc_handle;
5196 5159 struct mrsas_cmd *cmd;
5197 5160 struct mrsas_sge64 *mfi_sgl;
5198 5161 struct mrsas_sge_ieee *mfi_sgl_ieee;
5199 5162 struct scsa_cmd *acmd = PKT2CMD(pkt);
5200 5163 struct mrsas_pthru_frame *pthru;
5201 5164 struct mrsas_io_frame *ldio;
5202 5165
5203 5166 /* find out if this is logical or physical drive command. */
5204 5167 acmd->islogical = MRDRV_IS_LOGICAL(ap);
5205 5168 acmd->device_id = MAP_DEVICE_ID(instance, ap);
5206 5169 *cmd_done = 0;
5207 5170
5208 5171 /* get the command packet */
5209 5172 if (!(cmd = get_mfi_pkt(instance))) {
5210 5173 DTRACE_PROBE2(build_cmd_mfi_err, uint16_t,
5211 5174 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
5212 5175 return (NULL);
5213 5176 }
5214 5177
5215 5178 acc_handle = cmd->frame_dma_obj.acc_handle;
5216 5179
5217 5180 /* Clear the frame buffer and assign back the context id */
5218 5181 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
5219 5182 ddi_put32(acc_handle, &cmd->frame->hdr.context, cmd->index);
5220 5183
5221 5184 cmd->pkt = pkt;
5222 5185 cmd->cmd = acmd;
5223 5186 DTRACE_PROBE3(build_cmds, uint8_t, pkt->pkt_cdbp[0],
5224 5187 ulong_t, acmd->cmd_dmacount, ulong_t, acmd->cmd_dma_len);
5225 5188
5226 5189 /* lets get the command directions */
5227 5190 if (acmd->cmd_flags & CFLAG_DMASEND) {
5228 5191 flags = MFI_FRAME_DIR_WRITE;
5229 5192
5230 5193 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
5231 5194 (void) ddi_dma_sync(acmd->cmd_dmahandle,
5232 5195 acmd->cmd_dma_offset, acmd->cmd_dma_len,
5233 5196 DDI_DMA_SYNC_FORDEV);
5234 5197 }
5235 5198 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
5236 5199 flags = MFI_FRAME_DIR_READ;
5237 5200
5238 5201 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
5239 5202 (void) ddi_dma_sync(acmd->cmd_dmahandle,
5240 5203 acmd->cmd_dma_offset, acmd->cmd_dma_len,
5241 5204 DDI_DMA_SYNC_FORCPU);
5242 5205 }
5243 5206 } else {
5244 5207 flags = MFI_FRAME_DIR_NONE;
5245 5208 }
5246 5209
5247 5210 if (instance->flag_ieee) {
5248 5211 flags |= MFI_FRAME_IEEE;
5249 5212 }
5250 5213 flags |= MFI_FRAME_SGL64;
5251 5214
5252 5215 switch (pkt->pkt_cdbp[0]) {
5253 5216
5254 5217 /*
5255 5218 * case SCMD_SYNCHRONIZE_CACHE:
5256 5219 * flush_cache(instance);
5257 5220 * return_mfi_pkt(instance, cmd);
5258 5221 * *cmd_done = 1;
5259 5222 *
5260 5223 * return (NULL);
5261 5224 */
5262 5225
5263 5226 case SCMD_READ:
5264 5227 case SCMD_WRITE:
5265 5228 case SCMD_READ_G1:
5266 5229 case SCMD_WRITE_G1:
5267 5230 case SCMD_READ_G4:
5268 5231 case SCMD_WRITE_G4:
5269 5232 case SCMD_READ_G5:
5270 5233 case SCMD_WRITE_G5:
5271 5234 if (acmd->islogical) {
5272 5235 ldio = (struct mrsas_io_frame *)cmd->frame;
5273 5236
5274 5237 /*
5275 5238 * preare the Logical IO frame:
5276 5239 * 2nd bit is zero for all read cmds
5277 5240 */
5278 5241 ddi_put8(acc_handle, &ldio->cmd,
5279 5242 (pkt->pkt_cdbp[0] & 0x02) ? MFI_CMD_OP_LD_WRITE
5280 5243 : MFI_CMD_OP_LD_READ);
5281 5244 ddi_put8(acc_handle, &ldio->cmd_status, 0x0);
5282 5245 ddi_put8(acc_handle, &ldio->scsi_status, 0x0);
5283 5246 ddi_put8(acc_handle, &ldio->target_id, acmd->device_id);
5284 5247 ddi_put16(acc_handle, &ldio->timeout, 0);
5285 5248 ddi_put8(acc_handle, &ldio->reserved_0, 0);
5286 5249 ddi_put16(acc_handle, &ldio->pad_0, 0);
5287 5250 ddi_put16(acc_handle, &ldio->flags, flags);
5288 5251
5289 5252 /* Initialize sense Information */
5290 5253 bzero(cmd->sense, SENSE_LENGTH);
5291 5254 ddi_put8(acc_handle, &ldio->sense_len, SENSE_LENGTH);
5292 5255 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_hi, 0);
5293 5256 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_lo,
5294 5257 cmd->sense_phys_addr);
5295 5258 ddi_put32(acc_handle, &ldio->start_lba_hi, 0);
5296 5259 ddi_put8(acc_handle, &ldio->access_byte,
5297 5260 (acmd->cmd_cdblen != 6) ? pkt->pkt_cdbp[1] : 0);
5298 5261 ddi_put8(acc_handle, &ldio->sge_count,
5299 5262 acmd->cmd_cookiecnt);
5300 5263 if (instance->flag_ieee) {
5301 5264 mfi_sgl_ieee =
5302 5265 (struct mrsas_sge_ieee *)&ldio->sgl;
5303 5266 } else {
5304 5267 mfi_sgl = (struct mrsas_sge64 *)&ldio->sgl;
5305 5268 }
5306 5269
5307 5270 context = ddi_get32(acc_handle, &ldio->context);
5308 5271
5309 5272 if (acmd->cmd_cdblen == CDB_GROUP0) {
5310 5273 /* 6-byte cdb */
5311 5274 ddi_put32(acc_handle, &ldio->lba_count, (
5312 5275 (uint16_t)(pkt->pkt_cdbp[4])));
5313 5276
5314 5277 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5315 5278 ((uint32_t)(pkt->pkt_cdbp[3])) |
5316 5279 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
5317 5280 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
5318 5281 << 16)));
5319 5282 } else if (acmd->cmd_cdblen == CDB_GROUP1) {
5320 5283 /* 10-byte cdb */
5321 5284 ddi_put32(acc_handle, &ldio->lba_count, (
5322 5285 ((uint16_t)(pkt->pkt_cdbp[8])) |
5323 5286 ((uint16_t)(pkt->pkt_cdbp[7]) << 8)));
5324 5287
5325 5288 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5326 5289 ((uint32_t)(pkt->pkt_cdbp[5])) |
5327 5290 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5328 5291 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5329 5292 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5330 5293 } else if (acmd->cmd_cdblen == CDB_GROUP5) {
5331 5294 /* 12-byte cdb */
5332 5295 ddi_put32(acc_handle, &ldio->lba_count, (
5333 5296 ((uint32_t)(pkt->pkt_cdbp[9])) |
5334 5297 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
5335 5298 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
5336 5299 ((uint32_t)(pkt->pkt_cdbp[6]) << 24)));
5337 5300
5338 5301 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5339 5302 ((uint32_t)(pkt->pkt_cdbp[5])) |
5340 5303 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5341 5304 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5342 5305 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5343 5306 } else if (acmd->cmd_cdblen == CDB_GROUP4) {
5344 5307 /* 16-byte cdb */
5345 5308 ddi_put32(acc_handle, &ldio->lba_count, (
5346 5309 ((uint32_t)(pkt->pkt_cdbp[13])) |
5347 5310 ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
5348 5311 ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
5349 5312 ((uint32_t)(pkt->pkt_cdbp[10]) << 24)));
5350 5313
5351 5314 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5352 5315 ((uint32_t)(pkt->pkt_cdbp[9])) |
5353 5316 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
5354 5317 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
5355 5318 ((uint32_t)(pkt->pkt_cdbp[6]) << 24)));
5356 5319
5357 5320 ddi_put32(acc_handle, &ldio->start_lba_hi, (
5358 5321 ((uint32_t)(pkt->pkt_cdbp[5])) |
5359 5322 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5360 5323 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5361 5324 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5362 5325 }
5363 5326
5364 5327 break;
5365 5328 }
5366 5329 /* fall through For all non-rd/wr cmds */
5367 5330 default:
5368 5331
5369 5332 switch (pkt->pkt_cdbp[0]) {
5370 5333 case SCMD_MODE_SENSE:
5371 5334 case SCMD_MODE_SENSE_G1: {
5372 5335 union scsi_cdb *cdbp;
5373 5336 uint16_t page_code;
5374 5337
5375 5338 cdbp = (void *)pkt->pkt_cdbp;
5376 5339 page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
5377 5340 switch (page_code) {
5378 5341 case 0x3:
5379 5342 case 0x4:
5380 5343 (void) mrsas_mode_sense_build(pkt);
5381 5344 return_mfi_pkt(instance, cmd);
5382 5345 *cmd_done = 1;
5383 5346 return (NULL);
5384 5347 }
5385 5348 break;
5386 5349 }
5387 5350 default:
5388 5351 break;
5389 5352 }
5390 5353
5391 5354 pthru = (struct mrsas_pthru_frame *)cmd->frame;
5392 5355
5393 5356 /* prepare the DCDB frame */
5394 5357 ddi_put8(acc_handle, &pthru->cmd, (acmd->islogical) ?
5395 5358 MFI_CMD_OP_LD_SCSI : MFI_CMD_OP_PD_SCSI);
5396 5359 ddi_put8(acc_handle, &pthru->cmd_status, 0x0);
5397 5360 ddi_put8(acc_handle, &pthru->scsi_status, 0x0);
5398 5361 ddi_put8(acc_handle, &pthru->target_id, acmd->device_id);
5399 5362 ddi_put8(acc_handle, &pthru->lun, 0);
5400 5363 ddi_put8(acc_handle, &pthru->cdb_len, acmd->cmd_cdblen);
5401 5364 ddi_put16(acc_handle, &pthru->timeout, 0);
5402 5365 ddi_put16(acc_handle, &pthru->flags, flags);
5403 5366 tmp_data_xfer_len = 0;
5404 5367 for (i = 0; i < acmd->cmd_cookiecnt; i++) {
5405 5368 tmp_data_xfer_len += acmd->cmd_dmacookies[i].dmac_size;
5406 5369 }
5407 5370 ddi_put32(acc_handle, &pthru->data_xfer_len,
5408 5371 tmp_data_xfer_len);
5409 5372 ddi_put8(acc_handle, &pthru->sge_count, acmd->cmd_cookiecnt);
5410 5373 if (instance->flag_ieee) {
5411 5374 mfi_sgl_ieee = (struct mrsas_sge_ieee *)&pthru->sgl;
5412 5375 } else {
5413 5376 mfi_sgl = (struct mrsas_sge64 *)&pthru->sgl;
5414 5377 }
5415 5378
5416 5379 bzero(cmd->sense, SENSE_LENGTH);
5417 5380 ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH);
5418 5381 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0);
5419 5382 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo,
5420 5383 cmd->sense_phys_addr);
5421 5384
5422 5385 context = ddi_get32(acc_handle, &pthru->context);
5423 5386 ddi_rep_put8(acc_handle, (uint8_t *)pkt->pkt_cdbp,
5424 5387 (uint8_t *)pthru->cdb, acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
5425 5388
5426 5389 break;
5427 5390 }
5428 5391 #ifdef lint
5429 5392 context = context;
5430 5393 #endif
5431 5394 /* prepare the scatter-gather list for the firmware */
5432 5395 if (instance->flag_ieee) {
5433 5396 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl_ieee++) {
5434 5397 ddi_put64(acc_handle, &mfi_sgl_ieee->phys_addr,
5435 5398 acmd->cmd_dmacookies[i].dmac_laddress);
5436 5399 ddi_put32(acc_handle, &mfi_sgl_ieee->length,
5437 5400 acmd->cmd_dmacookies[i].dmac_size);
5438 5401 }
5439 5402 sge_bytes = sizeof (struct mrsas_sge_ieee)*acmd->cmd_cookiecnt;
5440 5403 } else {
5441 5404 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl++) {
5442 5405 ddi_put64(acc_handle, &mfi_sgl->phys_addr,
5443 5406 acmd->cmd_dmacookies[i].dmac_laddress);
5444 5407 ddi_put32(acc_handle, &mfi_sgl->length,
5445 5408 acmd->cmd_dmacookies[i].dmac_size);
5446 5409 }
5447 5410 sge_bytes = sizeof (struct mrsas_sge64)*acmd->cmd_cookiecnt;
5448 5411 }
5449 5412
5450 5413 cmd->frame_count = (sge_bytes / MRMFI_FRAME_SIZE) +
5451 5414 ((sge_bytes % MRMFI_FRAME_SIZE) ? 1 : 0) + 1;
5452 5415
5453 5416 if (cmd->frame_count >= 8) {
5454 5417 cmd->frame_count = 8;
5455 5418 }
5456 5419
5457 5420 return (cmd);
5458 5421 }
5459 5422
5460 5423 #ifndef __sparc
5461 5424 /*
5462 5425 * wait_for_outstanding - Wait for all outstanding cmds
5463 5426 * @instance: Adapter soft state
5464 5427 *
5465 5428 * This function waits for upto MRDRV_RESET_WAIT_TIME seconds for FW to
5466 5429 * complete all its outstanding commands. Returns error if one or more IOs
5467 5430 * are pending after this time period.
5468 5431 */
5469 5432 static int
5470 5433 wait_for_outstanding(struct mrsas_instance *instance)
5471 5434 {
5472 5435 int i;
5473 5436 uint32_t wait_time = 90;
5474 5437
5475 5438 for (i = 0; i < wait_time; i++) {
5476 5439 if (!instance->fw_outstanding) {
5477 5440 break;
5478 5441 }
5479 5442
5480 5443 drv_usecwait(MILLISEC); /* wait for 1000 usecs */;
5481 5444 }
5482 5445
5483 5446 if (instance->fw_outstanding) {
5484 5447 return (1);
5485 5448 }
5486 5449
5487 5450 return (0);
5488 5451 }
5489 5452 #endif /* __sparc */
5490 5453
|
↓ open down ↓ |
568 lines elided |
↑ open up ↑ |
5491 5454 /*
5492 5455 * issue_mfi_pthru
5493 5456 */
5494 5457 static int
5495 5458 issue_mfi_pthru(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5496 5459 struct mrsas_cmd *cmd, int mode)
5497 5460 {
5498 5461 void *ubuf;
5499 5462 uint32_t kphys_addr = 0;
5500 5463 uint32_t xferlen = 0;
5501 - uint32_t new_xfer_length = 0;
5464 + uint32_t new_xfer_length = 0;
5502 5465 uint_t model;
5503 5466 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5504 5467 dma_obj_t pthru_dma_obj;
5505 5468 struct mrsas_pthru_frame *kpthru;
5506 5469 struct mrsas_pthru_frame *pthru;
5507 5470 int i;
5508 5471 pthru = &cmd->frame->pthru;
5509 5472 kpthru = (struct mrsas_pthru_frame *)&ioctl->frame[0];
5510 5473
5511 5474 if (instance->adapterresetinprogress) {
5512 5475 con_log(CL_ANN1, (CE_WARN, "issue_mfi_pthru: Reset flag set, "
5513 5476 "returning mfi_pkt and setting TRAN_BUSY\n"));
5514 5477 return (DDI_FAILURE);
5515 5478 }
5516 5479 model = ddi_model_convert_from(mode & FMODELS);
5517 5480 if (model == DDI_MODEL_ILP32) {
5518 5481 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP32"));
5519 5482
5520 5483 xferlen = kpthru->sgl.sge32[0].length;
5521 5484
5522 5485 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
5523 5486 } else {
5524 5487 #ifdef _ILP32
5525 5488 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP32"));
5526 5489 xferlen = kpthru->sgl.sge32[0].length;
5527 5490 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
5528 5491 #else
5529 5492 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP64"));
5530 5493 xferlen = kpthru->sgl.sge64[0].length;
5531 5494 ubuf = (void *)(ulong_t)kpthru->sgl.sge64[0].phys_addr;
5532 5495 #endif
5533 5496 }
5534 5497
5535 5498 if (xferlen) {
5536 5499 /* means IOCTL requires DMA */
5537 5500 /* allocate the data transfer buffer */
5538 5501 /* pthru_dma_obj.size = xferlen; */
5539 5502 MRSAS_GET_BOUNDARY_ALIGNED_LEN(xferlen, new_xfer_length,
5540 5503 PAGESIZE);
5541 5504 pthru_dma_obj.size = new_xfer_length;
5542 5505 pthru_dma_obj.dma_attr = mrsas_generic_dma_attr;
5543 5506 pthru_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5544 5507 pthru_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5545 5508 pthru_dma_obj.dma_attr.dma_attr_sgllen = 1;
5546 5509 pthru_dma_obj.dma_attr.dma_attr_align = 1;
5547 5510
5548 5511 /* allocate kernel buffer for DMA */
5549 5512 if (mrsas_alloc_dma_obj(instance, &pthru_dma_obj,
5550 5513 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5551 5514 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: "
5552 5515 "could not allocate data transfer buffer."));
5553 5516 return (DDI_FAILURE);
5554 5517 }
5555 5518 (void) memset(pthru_dma_obj.buffer, 0, xferlen);
5556 5519
5557 5520 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5558 5521 if (kpthru->flags & MFI_FRAME_DIR_WRITE) {
5559 5522 for (i = 0; i < xferlen; i++) {
5560 5523 if (ddi_copyin((uint8_t *)ubuf+i,
5561 5524 (uint8_t *)pthru_dma_obj.buffer+i,
5562 5525 1, mode)) {
5563 5526 con_log(CL_ANN, (CE_WARN,
5564 5527 "issue_mfi_pthru : "
5565 5528 "copy from user space failed"));
5566 5529 return (DDI_FAILURE);
5567 5530 }
5568 5531 }
5569 5532 }
5570 5533
5571 5534 kphys_addr = pthru_dma_obj.dma_cookie[0].dmac_address;
5572 5535 }
5573 5536
5574 5537 ddi_put8(acc_handle, &pthru->cmd, kpthru->cmd);
5575 5538 ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH);
5576 5539 ddi_put8(acc_handle, &pthru->cmd_status, 0);
5577 5540 ddi_put8(acc_handle, &pthru->scsi_status, 0);
5578 5541 ddi_put8(acc_handle, &pthru->target_id, kpthru->target_id);
5579 5542 ddi_put8(acc_handle, &pthru->lun, kpthru->lun);
5580 5543 ddi_put8(acc_handle, &pthru->cdb_len, kpthru->cdb_len);
5581 5544 ddi_put8(acc_handle, &pthru->sge_count, kpthru->sge_count);
5582 5545 ddi_put16(acc_handle, &pthru->timeout, kpthru->timeout);
5583 5546 ddi_put32(acc_handle, &pthru->data_xfer_len, kpthru->data_xfer_len);
5584 5547
5585 5548 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0);
5586 5549 pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
5587 5550 /* ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo, 0); */
5588 5551
5589 5552 ddi_rep_put8(acc_handle, (uint8_t *)kpthru->cdb, (uint8_t *)pthru->cdb,
5590 5553 pthru->cdb_len, DDI_DEV_AUTOINCR);
5591 5554
5592 5555 ddi_put16(acc_handle, &pthru->flags, kpthru->flags & ~MFI_FRAME_SGL64);
5593 5556 ddi_put32(acc_handle, &pthru->sgl.sge32[0].length, xferlen);
5594 5557 ddi_put32(acc_handle, &pthru->sgl.sge32[0].phys_addr, kphys_addr);
5595 5558
5596 5559 cmd->sync_cmd = MRSAS_TRUE;
5597 5560 cmd->frame_count = 1;
5598 5561
5599 5562 if (instance->tbolt) {
5600 5563 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5601 5564 }
5602 5565
5603 5566 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5604 5567 con_log(CL_ANN, (CE_WARN,
5605 5568 "issue_mfi_pthru: fw_ioctl failed"));
5606 5569 } else {
5607 5570 if (xferlen && kpthru->flags & MFI_FRAME_DIR_READ) {
5608 5571 for (i = 0; i < xferlen; i++) {
5609 5572 if (ddi_copyout(
5610 5573 (uint8_t *)pthru_dma_obj.buffer+i,
5611 5574 (uint8_t *)ubuf+i, 1, mode)) {
5612 5575 con_log(CL_ANN, (CE_WARN,
5613 5576 "issue_mfi_pthru : "
5614 5577 "copy to user space failed"));
5615 5578 return (DDI_FAILURE);
5616 5579 }
5617 5580 }
5618 5581 }
5619 5582 }
5620 5583
5621 5584 kpthru->cmd_status = ddi_get8(acc_handle, &pthru->cmd_status);
5622 5585 kpthru->scsi_status = ddi_get8(acc_handle, &pthru->scsi_status);
5623 5586
5624 5587 con_log(CL_ANN, (CE_CONT, "issue_mfi_pthru: cmd_status %x, "
5625 5588 "scsi_status %x", kpthru->cmd_status, kpthru->scsi_status));
5626 5589 DTRACE_PROBE3(issue_pthru, uint8_t, kpthru->cmd, uint8_t,
5627 5590 kpthru->cmd_status, uint8_t, kpthru->scsi_status);
5628 5591
5629 5592 if (kpthru->sense_len) {
5630 5593 uint_t sense_len = SENSE_LENGTH;
5631 5594 void *sense_ubuf =
5632 5595 (void *)(ulong_t)kpthru->sense_buf_phys_addr_lo;
5633 5596 if (kpthru->sense_len <= SENSE_LENGTH) {
5634 5597 sense_len = kpthru->sense_len;
5635 5598 }
5636 5599
5637 5600 for (i = 0; i < sense_len; i++) {
5638 5601 if (ddi_copyout(
5639 5602 (uint8_t *)cmd->sense+i,
5640 5603 (uint8_t *)sense_ubuf+i, 1, mode)) {
5641 5604 con_log(CL_ANN, (CE_WARN,
5642 5605 "issue_mfi_pthru : "
5643 5606 "copy to user space failed"));
5644 5607 }
5645 5608 con_log(CL_DLEVEL1, (CE_WARN,
5646 5609 "Copying Sense info sense_buff[%d] = 0x%X\n",
5647 5610 i, *((uint8_t *)cmd->sense + i)));
5648 5611 }
5649 5612 }
5650 5613 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
5651 5614 DDI_DMA_SYNC_FORDEV);
5652 5615
5653 5616 if (xferlen) {
5654 5617 /* free kernel buffer */
5655 5618 if (mrsas_free_dma_obj(instance, pthru_dma_obj) != DDI_SUCCESS)
5656 5619 return (DDI_FAILURE);
5657 5620 }
5658 5621
5659 5622 return (DDI_SUCCESS);
5660 5623 }
5661 5624
5662 5625 /*
5663 5626 * issue_mfi_dcmd
5664 5627 */
5665 5628 static int
5666 5629 issue_mfi_dcmd(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5667 5630 struct mrsas_cmd *cmd, int mode)
5668 5631 {
5669 5632 void *ubuf;
5670 5633 uint32_t kphys_addr = 0;
5671 5634 uint32_t xferlen = 0;
5672 5635 uint32_t new_xfer_length = 0;
5673 5636 uint32_t model;
5674 5637 dma_obj_t dcmd_dma_obj;
5675 5638 struct mrsas_dcmd_frame *kdcmd;
5676 5639 struct mrsas_dcmd_frame *dcmd;
5677 5640 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5678 5641 int i;
5679 5642 dcmd = &cmd->frame->dcmd;
5680 5643 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0];
5681 5644
5682 5645 if (instance->adapterresetinprogress) {
5683 5646 con_log(CL_ANN1, (CE_WARN, "Reset flag set, "
5684 5647 "returning mfi_pkt and setting TRAN_BUSY\n"));
5685 5648 return (DDI_FAILURE);
5686 5649 }
5687 5650 model = ddi_model_convert_from(mode & FMODELS);
5688 5651 if (model == DDI_MODEL_ILP32) {
5689 5652 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
5690 5653
5691 5654 xferlen = kdcmd->sgl.sge32[0].length;
5692 5655
5693 5656 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
5694 5657 } else {
5695 5658 #ifdef _ILP32
5696 5659 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
5697 5660 xferlen = kdcmd->sgl.sge32[0].length;
5698 5661 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
5699 5662 #else
5700 5663 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_LP64"));
5701 5664 xferlen = kdcmd->sgl.sge64[0].length;
5702 5665 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
5703 5666 #endif
5704 5667 }
5705 5668 if (xferlen) {
5706 5669 /* means IOCTL requires DMA */
5707 5670 /* allocate the data transfer buffer */
5708 5671 /* dcmd_dma_obj.size = xferlen; */
5709 5672 MRSAS_GET_BOUNDARY_ALIGNED_LEN(xferlen, new_xfer_length,
5710 5673 PAGESIZE);
5711 5674 dcmd_dma_obj.size = new_xfer_length;
5712 5675 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
5713 5676 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5714 5677 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5715 5678 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
5716 5679 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
5717 5680
5718 5681 /* allocate kernel buffer for DMA */
5719 5682 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
5720 5683 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5721 5684 con_log(CL_ANN,
5722 5685 (CE_WARN, "issue_mfi_dcmd: could not "
5723 5686 "allocate data transfer buffer."));
5724 5687 return (DDI_FAILURE);
5725 5688 }
5726 5689 (void) memset(dcmd_dma_obj.buffer, 0, xferlen);
5727 5690
5728 5691 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5729 5692 if (kdcmd->flags & MFI_FRAME_DIR_WRITE) {
5730 5693 for (i = 0; i < xferlen; i++) {
5731 5694 if (ddi_copyin((uint8_t *)ubuf + i,
5732 5695 (uint8_t *)dcmd_dma_obj.buffer + i,
5733 5696 1, mode)) {
5734 5697 con_log(CL_ANN, (CE_WARN,
5735 5698 "issue_mfi_dcmd : "
5736 5699 "copy from user space failed"));
5737 5700 return (DDI_FAILURE);
5738 5701 }
5739 5702 }
5740 5703 }
5741 5704
5742 5705 kphys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address;
5743 5706 }
5744 5707
5745 5708 ddi_put8(acc_handle, &dcmd->cmd, kdcmd->cmd);
5746 5709 ddi_put8(acc_handle, &dcmd->cmd_status, 0);
5747 5710 ddi_put8(acc_handle, &dcmd->sge_count, kdcmd->sge_count);
5748 5711 ddi_put16(acc_handle, &dcmd->timeout, kdcmd->timeout);
5749 5712 ddi_put32(acc_handle, &dcmd->data_xfer_len, kdcmd->data_xfer_len);
5750 5713 ddi_put32(acc_handle, &dcmd->opcode, kdcmd->opcode);
5751 5714
5752 5715 ddi_rep_put8(acc_handle, (uint8_t *)kdcmd->mbox.b,
5753 5716 (uint8_t *)dcmd->mbox.b, DCMD_MBOX_SZ, DDI_DEV_AUTOINCR);
5754 5717
5755 5718 ddi_put16(acc_handle, &dcmd->flags, kdcmd->flags & ~MFI_FRAME_SGL64);
5756 5719 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].length, xferlen);
5757 5720 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].phys_addr, kphys_addr);
5758 5721
5759 5722 cmd->sync_cmd = MRSAS_TRUE;
5760 5723 cmd->frame_count = 1;
5761 5724
5762 5725 if (instance->tbolt) {
5763 5726 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5764 5727 }
5765 5728
5766 5729 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5767 5730 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: fw_ioctl failed"));
5768 5731 } else {
5769 5732 if (xferlen && (kdcmd->flags & MFI_FRAME_DIR_READ)) {
5770 5733 for (i = 0; i < xferlen; i++) {
5771 5734 if (ddi_copyout(
5772 5735 (uint8_t *)dcmd_dma_obj.buffer + i,
5773 5736 (uint8_t *)ubuf + i,
5774 5737 1, mode)) {
5775 5738 con_log(CL_ANN, (CE_WARN,
5776 5739 "issue_mfi_dcmd : "
5777 5740 "copy to user space failed"));
5778 5741 return (DDI_FAILURE);
5779 5742 }
5780 5743 }
5781 5744 }
5782 5745 }
5783 5746
5784 5747 kdcmd->cmd_status = ddi_get8(acc_handle, &dcmd->cmd_status);
5785 5748 con_log(CL_ANN,
5786 5749 (CE_CONT, "issue_mfi_dcmd: cmd_status %x", kdcmd->cmd_status));
5787 5750 DTRACE_PROBE3(issue_dcmd, uint32_t, kdcmd->opcode, uint8_t,
5788 5751 kdcmd->cmd, uint8_t, kdcmd->cmd_status);
5789 5752
5790 5753 if (xferlen) {
5791 5754 /* free kernel buffer */
5792 5755 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
5793 5756 return (DDI_FAILURE);
5794 5757 }
5795 5758
5796 5759 return (DDI_SUCCESS);
5797 5760 }
5798 5761
5799 5762 /*
5800 5763 * issue_mfi_smp
5801 5764 */
5802 5765 static int
5803 5766 issue_mfi_smp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5804 5767 struct mrsas_cmd *cmd, int mode)
5805 5768 {
5806 5769 void *request_ubuf;
5807 5770 void *response_ubuf;
5808 5771 uint32_t request_xferlen = 0;
5809 5772 uint32_t response_xferlen = 0;
5810 5773 uint32_t new_xfer_length1 = 0;
5811 5774 uint32_t new_xfer_length2 = 0;
5812 5775 uint_t model;
5813 5776 dma_obj_t request_dma_obj;
5814 5777 dma_obj_t response_dma_obj;
5815 5778 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5816 5779 struct mrsas_smp_frame *ksmp;
5817 5780 struct mrsas_smp_frame *smp;
5818 5781 struct mrsas_sge32 *sge32;
5819 5782 #ifndef _ILP32
5820 5783 struct mrsas_sge64 *sge64;
5821 5784 #endif
5822 5785 int i;
5823 5786 uint64_t tmp_sas_addr;
5824 5787
5825 5788 smp = &cmd->frame->smp;
5826 5789 ksmp = (struct mrsas_smp_frame *)&ioctl->frame[0];
5827 5790
5828 5791 if (instance->adapterresetinprogress) {
5829 5792 con_log(CL_ANN1, (CE_WARN, "Reset flag set, "
5830 5793 "returning mfi_pkt and setting TRAN_BUSY\n"));
5831 5794 return (DDI_FAILURE);
5832 5795 }
5833 5796 model = ddi_model_convert_from(mode & FMODELS);
5834 5797 if (model == DDI_MODEL_ILP32) {
5835 5798 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_ILP32"));
5836 5799
5837 5800 sge32 = &ksmp->sgl[0].sge32[0];
5838 5801 response_xferlen = sge32[0].length;
5839 5802 request_xferlen = sge32[1].length;
5840 5803 con_log(CL_ANN, (CE_CONT, "issue_mfi_smp: "
5841 5804 "response_xferlen = %x, request_xferlen = %x",
5842 5805 response_xferlen, request_xferlen));
5843 5806
5844 5807 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr;
5845 5808 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr;
5846 5809 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: "
5847 5810 "response_ubuf = %p, request_ubuf = %p",
5848 5811 response_ubuf, request_ubuf));
5849 5812 } else {
5850 5813 #ifdef _ILP32
5851 5814 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_ILP32"));
5852 5815
5853 5816 sge32 = &ksmp->sgl[0].sge32[0];
5854 5817 response_xferlen = sge32[0].length;
5855 5818 request_xferlen = sge32[1].length;
5856 5819 con_log(CL_ANN, (CE_CONT, "issue_mfi_smp: "
5857 5820 "response_xferlen = %x, request_xferlen = %x",
5858 5821 response_xferlen, request_xferlen));
5859 5822
5860 5823 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr;
5861 5824 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr;
5862 5825 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: "
5863 5826 "response_ubuf = %p, request_ubuf = %p",
5864 5827 response_ubuf, request_ubuf));
5865 5828 #else
5866 5829 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_LP64"));
5867 5830
5868 5831 sge64 = &ksmp->sgl[0].sge64[0];
5869 5832 response_xferlen = sge64[0].length;
5870 5833 request_xferlen = sge64[1].length;
5871 5834
5872 5835 response_ubuf = (void *)(ulong_t)sge64[0].phys_addr;
5873 5836 request_ubuf = (void *)(ulong_t)sge64[1].phys_addr;
5874 5837 #endif
5875 5838 }
5876 5839 if (request_xferlen) {
5877 5840 /* means IOCTL requires DMA */
5878 5841 /* allocate the data transfer buffer */
5879 5842 /* request_dma_obj.size = request_xferlen; */
5880 5843 MRSAS_GET_BOUNDARY_ALIGNED_LEN(request_xferlen,
5881 5844 new_xfer_length1, PAGESIZE);
5882 5845 request_dma_obj.size = new_xfer_length1;
5883 5846 request_dma_obj.dma_attr = mrsas_generic_dma_attr;
5884 5847 request_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5885 5848 request_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5886 5849 request_dma_obj.dma_attr.dma_attr_sgllen = 1;
5887 5850 request_dma_obj.dma_attr.dma_attr_align = 1;
5888 5851
5889 5852 /* allocate kernel buffer for DMA */
5890 5853 if (mrsas_alloc_dma_obj(instance, &request_dma_obj,
5891 5854 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5892 5855 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5893 5856 "could not allocate data transfer buffer."));
5894 5857 return (DDI_FAILURE);
5895 5858 }
5896 5859 (void) memset(request_dma_obj.buffer, 0, request_xferlen);
5897 5860
5898 5861 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5899 5862 for (i = 0; i < request_xferlen; i++) {
5900 5863 if (ddi_copyin((uint8_t *)request_ubuf + i,
5901 5864 (uint8_t *)request_dma_obj.buffer + i,
5902 5865 1, mode)) {
5903 5866 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5904 5867 "copy from user space failed"));
5905 5868 return (DDI_FAILURE);
5906 5869 }
5907 5870 }
5908 5871 }
5909 5872
5910 5873 if (response_xferlen) {
5911 5874 /* means IOCTL requires DMA */
5912 5875 /* allocate the data transfer buffer */
5913 5876 /* response_dma_obj.size = response_xferlen; */
5914 5877 MRSAS_GET_BOUNDARY_ALIGNED_LEN(response_xferlen,
5915 5878 new_xfer_length2, PAGESIZE);
5916 5879 response_dma_obj.size = new_xfer_length2;
5917 5880 response_dma_obj.dma_attr = mrsas_generic_dma_attr;
5918 5881 response_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5919 5882 response_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5920 5883 response_dma_obj.dma_attr.dma_attr_sgllen = 1;
5921 5884 response_dma_obj.dma_attr.dma_attr_align = 1;
5922 5885
5923 5886 /* allocate kernel buffer for DMA */
5924 5887 if (mrsas_alloc_dma_obj(instance, &response_dma_obj,
5925 5888 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5926 5889 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5927 5890 "could not allocate data transfer buffer."));
5928 5891 return (DDI_FAILURE);
5929 5892 }
5930 5893 (void) memset(response_dma_obj.buffer, 0, response_xferlen);
5931 5894
5932 5895 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5933 5896 for (i = 0; i < response_xferlen; i++) {
5934 5897 if (ddi_copyin((uint8_t *)response_ubuf + i,
5935 5898 (uint8_t *)response_dma_obj.buffer + i,
5936 5899 1, mode)) {
5937 5900 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5938 5901 "copy from user space failed"));
5939 5902 return (DDI_FAILURE);
5940 5903 }
5941 5904 }
5942 5905 }
5943 5906
5944 5907 ddi_put8(acc_handle, &smp->cmd, ksmp->cmd);
5945 5908 ddi_put8(acc_handle, &smp->cmd_status, 0);
5946 5909 ddi_put8(acc_handle, &smp->connection_status, 0);
5947 5910 ddi_put8(acc_handle, &smp->sge_count, ksmp->sge_count);
5948 5911 /* smp->context = ksmp->context; */
5949 5912 ddi_put16(acc_handle, &smp->timeout, ksmp->timeout);
5950 5913 ddi_put32(acc_handle, &smp->data_xfer_len, ksmp->data_xfer_len);
5951 5914
5952 5915 bcopy((void *)&ksmp->sas_addr, (void *)&tmp_sas_addr,
5953 5916 sizeof (uint64_t));
5954 5917 ddi_put64(acc_handle, &smp->sas_addr, tmp_sas_addr);
5955 5918
5956 5919 ddi_put16(acc_handle, &smp->flags, ksmp->flags & ~MFI_FRAME_SGL64);
5957 5920
5958 5921 model = ddi_model_convert_from(mode & FMODELS);
5959 5922 if (model == DDI_MODEL_ILP32) {
5960 5923 con_log(CL_ANN1, (CE_CONT,
5961 5924 "issue_mfi_smp: DDI_MODEL_ILP32"));
5962 5925
5963 5926 sge32 = &smp->sgl[0].sge32[0];
5964 5927 ddi_put32(acc_handle, &sge32[0].length, response_xferlen);
5965 5928 ddi_put32(acc_handle, &sge32[0].phys_addr,
5966 5929 response_dma_obj.dma_cookie[0].dmac_address);
5967 5930 ddi_put32(acc_handle, &sge32[1].length, request_xferlen);
5968 5931 ddi_put32(acc_handle, &sge32[1].phys_addr,
5969 5932 request_dma_obj.dma_cookie[0].dmac_address);
5970 5933 } else {
5971 5934 #ifdef _ILP32
5972 5935 con_log(CL_ANN1, (CE_CONT,
5973 5936 "issue_mfi_smp: DDI_MODEL_ILP32"));
5974 5937 sge32 = &smp->sgl[0].sge32[0];
5975 5938 ddi_put32(acc_handle, &sge32[0].length, response_xferlen);
5976 5939 ddi_put32(acc_handle, &sge32[0].phys_addr,
5977 5940 response_dma_obj.dma_cookie[0].dmac_address);
5978 5941 ddi_put32(acc_handle, &sge32[1].length, request_xferlen);
5979 5942 ddi_put32(acc_handle, &sge32[1].phys_addr,
5980 5943 request_dma_obj.dma_cookie[0].dmac_address);
5981 5944 #else
5982 5945 con_log(CL_ANN1, (CE_CONT,
5983 5946 "issue_mfi_smp: DDI_MODEL_LP64"));
5984 5947 sge64 = &smp->sgl[0].sge64[0];
5985 5948 ddi_put32(acc_handle, &sge64[0].length, response_xferlen);
5986 5949 ddi_put64(acc_handle, &sge64[0].phys_addr,
5987 5950 response_dma_obj.dma_cookie[0].dmac_address);
5988 5951 ddi_put32(acc_handle, &sge64[1].length, request_xferlen);
5989 5952 ddi_put64(acc_handle, &sge64[1].phys_addr,
5990 5953 request_dma_obj.dma_cookie[0].dmac_address);
5991 5954 #endif
5992 5955 }
5993 5956 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp : "
5994 5957 "smp->response_xferlen = %d, smp->request_xferlen = %d "
5995 5958 "smp->data_xfer_len = %d", ddi_get32(acc_handle, &sge32[0].length),
5996 5959 ddi_get32(acc_handle, &sge32[1].length),
5997 5960 ddi_get32(acc_handle, &smp->data_xfer_len)));
5998 5961
5999 5962 cmd->sync_cmd = MRSAS_TRUE;
6000 5963 cmd->frame_count = 1;
6001 5964
6002 5965 if (instance->tbolt) {
6003 5966 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
6004 5967 }
6005 5968
6006 5969 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
6007 5970 con_log(CL_ANN, (CE_WARN,
6008 5971 "issue_mfi_smp: fw_ioctl failed"));
6009 5972 } else {
6010 5973 con_log(CL_ANN1, (CE_CONT,
6011 5974 "issue_mfi_smp: copy to user space"));
6012 5975
6013 5976 if (request_xferlen) {
6014 5977 for (i = 0; i < request_xferlen; i++) {
6015 5978 if (ddi_copyout(
6016 5979 (uint8_t *)request_dma_obj.buffer +
6017 5980 i, (uint8_t *)request_ubuf + i,
6018 5981 1, mode)) {
6019 5982 con_log(CL_ANN, (CE_WARN,
6020 5983 "issue_mfi_smp : copy to user space"
6021 5984 " failed"));
6022 5985 return (DDI_FAILURE);
6023 5986 }
6024 5987 }
6025 5988 }
6026 5989
6027 5990 if (response_xferlen) {
6028 5991 for (i = 0; i < response_xferlen; i++) {
6029 5992 if (ddi_copyout(
6030 5993 (uint8_t *)response_dma_obj.buffer
6031 5994 + i, (uint8_t *)response_ubuf
6032 5995 + i, 1, mode)) {
6033 5996 con_log(CL_ANN, (CE_WARN,
6034 5997 "issue_mfi_smp : copy to "
6035 5998 "user space failed"));
6036 5999 return (DDI_FAILURE);
6037 6000 }
6038 6001 }
6039 6002 }
6040 6003 }
6041 6004
6042 6005 ksmp->cmd_status = ddi_get8(acc_handle, &smp->cmd_status);
6043 6006 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: smp->cmd_status = %d",
6044 6007 ksmp->cmd_status));
6045 6008 DTRACE_PROBE2(issue_smp, uint8_t, ksmp->cmd, uint8_t, ksmp->cmd_status);
6046 6009
6047 6010 if (request_xferlen) {
6048 6011 /* free kernel buffer */
6049 6012 if (mrsas_free_dma_obj(instance, request_dma_obj) !=
6050 6013 DDI_SUCCESS)
6051 6014 return (DDI_FAILURE);
6052 6015 }
6053 6016
6054 6017 if (response_xferlen) {
6055 6018 /* free kernel buffer */
6056 6019 if (mrsas_free_dma_obj(instance, response_dma_obj) !=
6057 6020 DDI_SUCCESS)
6058 6021 return (DDI_FAILURE);
6059 6022 }
6060 6023
6061 6024 return (DDI_SUCCESS);
6062 6025 }
6063 6026
6064 6027 /*
6065 6028 * issue_mfi_stp
6066 6029 */
6067 6030 static int
6068 6031 issue_mfi_stp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
6069 6032 struct mrsas_cmd *cmd, int mode)
6070 6033 {
6071 6034 void *fis_ubuf;
6072 6035 void *data_ubuf;
6073 6036 uint32_t fis_xferlen = 0;
6074 6037 uint32_t new_xfer_length1 = 0;
6075 6038 uint32_t new_xfer_length2 = 0;
6076 6039 uint32_t data_xferlen = 0;
6077 6040 uint_t model;
6078 6041 dma_obj_t fis_dma_obj;
6079 6042 dma_obj_t data_dma_obj;
6080 6043 struct mrsas_stp_frame *kstp;
6081 6044 struct mrsas_stp_frame *stp;
6082 6045 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
6083 6046 int i;
6084 6047
6085 6048 stp = &cmd->frame->stp;
6086 6049 kstp = (struct mrsas_stp_frame *)&ioctl->frame[0];
6087 6050
6088 6051 if (instance->adapterresetinprogress) {
6089 6052 con_log(CL_ANN1, (CE_WARN, "Reset flag set, "
6090 6053 "returning mfi_pkt and setting TRAN_BUSY\n"));
6091 6054 return (DDI_FAILURE);
6092 6055 }
6093 6056 model = ddi_model_convert_from(mode & FMODELS);
6094 6057 if (model == DDI_MODEL_ILP32) {
6095 6058 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_ILP32"));
6096 6059
6097 6060 fis_xferlen = kstp->sgl.sge32[0].length;
6098 6061 data_xferlen = kstp->sgl.sge32[1].length;
6099 6062
6100 6063 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
6101 6064 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
6102 6065 } else {
6103 6066 #ifdef _ILP32
6104 6067 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_ILP32"));
6105 6068
6106 6069 fis_xferlen = kstp->sgl.sge32[0].length;
6107 6070 data_xferlen = kstp->sgl.sge32[1].length;
6108 6071
6109 6072 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
6110 6073 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
6111 6074 #else
6112 6075 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_LP64"));
6113 6076
6114 6077 fis_xferlen = kstp->sgl.sge64[0].length;
6115 6078 data_xferlen = kstp->sgl.sge64[1].length;
6116 6079
6117 6080 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge64[0].phys_addr;
6118 6081 data_ubuf = (void *)(ulong_t)kstp->sgl.sge64[1].phys_addr;
6119 6082 #endif
6120 6083 }
6121 6084
6122 6085
6123 6086 if (fis_xferlen) {
6124 6087 con_log(CL_ANN, (CE_CONT, "issue_mfi_stp: "
6125 6088 "fis_ubuf = %p fis_xferlen = %x", fis_ubuf, fis_xferlen));
6126 6089
6127 6090 /* means IOCTL requires DMA */
6128 6091 /* allocate the data transfer buffer */
6129 6092 /* fis_dma_obj.size = fis_xferlen; */
6130 6093 MRSAS_GET_BOUNDARY_ALIGNED_LEN(fis_xferlen,
6131 6094 new_xfer_length1, PAGESIZE);
6132 6095 fis_dma_obj.size = new_xfer_length1;
6133 6096 fis_dma_obj.dma_attr = mrsas_generic_dma_attr;
6134 6097 fis_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
6135 6098 fis_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
6136 6099 fis_dma_obj.dma_attr.dma_attr_sgllen = 1;
6137 6100 fis_dma_obj.dma_attr.dma_attr_align = 1;
6138 6101
6139 6102 /* allocate kernel buffer for DMA */
6140 6103 if (mrsas_alloc_dma_obj(instance, &fis_dma_obj,
6141 6104 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
6142 6105 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp : "
6143 6106 "could not allocate data transfer buffer."));
6144 6107 return (DDI_FAILURE);
6145 6108 }
6146 6109 (void) memset(fis_dma_obj.buffer, 0, fis_xferlen);
6147 6110
6148 6111 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
6149 6112 for (i = 0; i < fis_xferlen; i++) {
6150 6113 if (ddi_copyin((uint8_t *)fis_ubuf + i,
6151 6114 (uint8_t *)fis_dma_obj.buffer + i, 1, mode)) {
6152 6115 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
6153 6116 "copy from user space failed"));
6154 6117 return (DDI_FAILURE);
6155 6118 }
6156 6119 }
6157 6120 }
6158 6121
6159 6122 if (data_xferlen) {
6160 6123 con_log(CL_ANN, (CE_CONT, "issue_mfi_stp: data_ubuf = %p "
6161 6124 "data_xferlen = %x", data_ubuf, data_xferlen));
6162 6125
6163 6126 /* means IOCTL requires DMA */
6164 6127 /* allocate the data transfer buffer */
6165 6128 /* data_dma_obj.size = data_xferlen; */
6166 6129 MRSAS_GET_BOUNDARY_ALIGNED_LEN(data_xferlen, new_xfer_length2,
6167 6130 PAGESIZE);
6168 6131 data_dma_obj.size = new_xfer_length2;
6169 6132 data_dma_obj.dma_attr = mrsas_generic_dma_attr;
6170 6133 data_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
6171 6134 data_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
6172 6135 data_dma_obj.dma_attr.dma_attr_sgllen = 1;
6173 6136 data_dma_obj.dma_attr.dma_attr_align = 1;
6174 6137
6175 6138 /* allocate kernel buffer for DMA */
6176 6139 if (mrsas_alloc_dma_obj(instance, &data_dma_obj,
6177 6140 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
6178 6141 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
6179 6142 "could not allocate data transfer buffer."));
6180 6143 return (DDI_FAILURE);
6181 6144 }
6182 6145 (void) memset(data_dma_obj.buffer, 0, data_xferlen);
6183 6146
6184 6147 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
6185 6148 for (i = 0; i < data_xferlen; i++) {
6186 6149 if (ddi_copyin((uint8_t *)data_ubuf + i,
6187 6150 (uint8_t *)data_dma_obj.buffer + i, 1, mode)) {
6188 6151 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
6189 6152 "copy from user space failed"));
6190 6153 return (DDI_FAILURE);
6191 6154 }
6192 6155 }
6193 6156 }
6194 6157
6195 6158 ddi_put8(acc_handle, &stp->cmd, kstp->cmd);
6196 6159 ddi_put8(acc_handle, &stp->cmd_status, 0);
6197 6160 ddi_put8(acc_handle, &stp->connection_status, 0);
6198 6161 ddi_put8(acc_handle, &stp->target_id, kstp->target_id);
6199 6162 ddi_put8(acc_handle, &stp->sge_count, kstp->sge_count);
6200 6163
6201 6164 ddi_put16(acc_handle, &stp->timeout, kstp->timeout);
6202 6165 ddi_put32(acc_handle, &stp->data_xfer_len, kstp->data_xfer_len);
6203 6166
6204 6167 ddi_rep_put8(acc_handle, (uint8_t *)kstp->fis, (uint8_t *)stp->fis, 10,
6205 6168 DDI_DEV_AUTOINCR);
6206 6169
6207 6170 ddi_put16(acc_handle, &stp->flags, kstp->flags & ~MFI_FRAME_SGL64);
6208 6171 ddi_put32(acc_handle, &stp->stp_flags, kstp->stp_flags);
6209 6172 ddi_put32(acc_handle, &stp->sgl.sge32[0].length, fis_xferlen);
6210 6173 ddi_put32(acc_handle, &stp->sgl.sge32[0].phys_addr,
6211 6174 fis_dma_obj.dma_cookie[0].dmac_address);
6212 6175 ddi_put32(acc_handle, &stp->sgl.sge32[1].length, data_xferlen);
6213 6176 ddi_put32(acc_handle, &stp->sgl.sge32[1].phys_addr,
6214 6177 data_dma_obj.dma_cookie[0].dmac_address);
6215 6178
6216 6179 cmd->sync_cmd = MRSAS_TRUE;
6217 6180 cmd->frame_count = 1;
6218 6181
6219 6182 if (instance->tbolt) {
6220 6183 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
6221 6184 }
6222 6185
6223 6186 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
6224 6187 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: fw_ioctl failed"));
6225 6188 } else {
6226 6189
6227 6190 if (fis_xferlen) {
6228 6191 for (i = 0; i < fis_xferlen; i++) {
6229 6192 if (ddi_copyout(
6230 6193 (uint8_t *)fis_dma_obj.buffer + i,
6231 6194 (uint8_t *)fis_ubuf + i, 1, mode)) {
6232 6195 con_log(CL_ANN, (CE_WARN,
6233 6196 "issue_mfi_stp : copy to "
6234 6197 "user space failed"));
6235 6198 return (DDI_FAILURE);
6236 6199 }
6237 6200 }
6238 6201 }
6239 6202 }
6240 6203 if (data_xferlen) {
6241 6204 for (i = 0; i < data_xferlen; i++) {
6242 6205 if (ddi_copyout(
6243 6206 (uint8_t *)data_dma_obj.buffer + i,
6244 6207 (uint8_t *)data_ubuf + i, 1, mode)) {
6245 6208 con_log(CL_ANN, (CE_WARN,
6246 6209 "issue_mfi_stp : copy to"
6247 6210 " user space failed"));
6248 6211 return (DDI_FAILURE);
6249 6212 }
6250 6213 }
6251 6214 }
6252 6215
6253 6216 kstp->cmd_status = ddi_get8(acc_handle, &stp->cmd_status);
6254 6217 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: stp->cmd_status = %d",
6255 6218 kstp->cmd_status));
6256 6219 DTRACE_PROBE2(issue_stp, uint8_t, kstp->cmd, uint8_t, kstp->cmd_status);
6257 6220
6258 6221 if (fis_xferlen) {
6259 6222 /* free kernel buffer */
6260 6223 if (mrsas_free_dma_obj(instance, fis_dma_obj) != DDI_SUCCESS)
6261 6224 return (DDI_FAILURE);
6262 6225 }
6263 6226
6264 6227 if (data_xferlen) {
6265 6228 /* free kernel buffer */
6266 6229 if (mrsas_free_dma_obj(instance, data_dma_obj) != DDI_SUCCESS)
6267 6230 return (DDI_FAILURE);
6268 6231 }
6269 6232
6270 6233 return (DDI_SUCCESS);
6271 6234 }
6272 6235
6273 6236 /*
6274 6237 * fill_up_drv_ver
6275 6238 */
6276 6239 void
6277 6240 fill_up_drv_ver(struct mrsas_drv_ver *dv)
6278 6241 {
6279 6242 (void) memset(dv, 0, sizeof (struct mrsas_drv_ver));
6280 6243
6281 6244 (void) memcpy(dv->signature, "$LSI LOGIC$", strlen("$LSI LOGIC$"));
6282 6245 (void) memcpy(dv->os_name, "Solaris", strlen("Solaris"));
6283 6246 (void) memcpy(dv->drv_name, "mr_sas", strlen("mr_sas"));
6284 6247 (void) memcpy(dv->drv_ver, MRSAS_VERSION, strlen(MRSAS_VERSION));
6285 6248 (void) memcpy(dv->drv_rel_date, MRSAS_RELDATE,
6286 6249 strlen(MRSAS_RELDATE));
6287 6250
6288 6251 }
6289 6252
6290 6253 /*
6291 6254 * handle_drv_ioctl
6292 6255 */
6293 6256 static int
6294 6257 handle_drv_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
6295 6258 int mode)
6296 6259 {
6297 6260 int i;
6298 6261 int rval = DDI_SUCCESS;
6299 6262 int *props = NULL;
6300 6263 void *ubuf;
6301 6264
6302 6265 uint8_t *pci_conf_buf;
6303 6266 uint32_t xferlen;
6304 6267 uint32_t num_props;
6305 6268 uint_t model;
6306 6269 struct mrsas_dcmd_frame *kdcmd;
6307 6270 struct mrsas_drv_ver dv;
6308 6271 struct mrsas_pci_information pi;
6309 6272
6310 6273 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0];
6311 6274
6312 6275 model = ddi_model_convert_from(mode & FMODELS);
6313 6276 if (model == DDI_MODEL_ILP32) {
6314 6277 con_log(CL_ANN1, (CE_CONT,
6315 6278 "handle_drv_ioctl: DDI_MODEL_ILP32"));
6316 6279
6317 6280 xferlen = kdcmd->sgl.sge32[0].length;
6318 6281
6319 6282 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
6320 6283 } else {
6321 6284 #ifdef _ILP32
6322 6285 con_log(CL_ANN1, (CE_CONT,
6323 6286 "handle_drv_ioctl: DDI_MODEL_ILP32"));
6324 6287 xferlen = kdcmd->sgl.sge32[0].length;
6325 6288 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
6326 6289 #else
6327 6290 con_log(CL_ANN1, (CE_CONT,
6328 6291 "handle_drv_ioctl: DDI_MODEL_LP64"));
6329 6292 xferlen = kdcmd->sgl.sge64[0].length;
6330 6293 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
6331 6294 #endif
6332 6295 }
6333 6296 con_log(CL_ANN1, (CE_CONT, "handle_drv_ioctl: "
6334 6297 "dataBuf=%p size=%d bytes", ubuf, xferlen));
6335 6298
6336 6299 switch (kdcmd->opcode) {
6337 6300 case MRSAS_DRIVER_IOCTL_DRIVER_VERSION:
6338 6301 con_log(CL_ANN1, (CE_CONT, "handle_drv_ioctl: "
6339 6302 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION"));
6340 6303
6341 6304 fill_up_drv_ver(&dv);
6342 6305
6343 6306 if (ddi_copyout(&dv, ubuf, xferlen, mode)) {
6344 6307 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6345 6308 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION : "
6346 6309 "copy to user space failed"));
6347 6310 kdcmd->cmd_status = 1;
6348 6311 rval = 1;
6349 6312 } else {
6350 6313 kdcmd->cmd_status = 0;
6351 6314 }
6352 6315 break;
6353 6316 case MRSAS_DRIVER_IOCTL_PCI_INFORMATION:
6354 6317 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: "
6355 6318 "MRSAS_DRIVER_IOCTL_PCI_INFORMAITON"));
6356 6319
6357 6320 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, instance->dip,
6358 6321 0, "reg", &props, &num_props)) {
6359 6322 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6360 6323 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : "
6361 6324 "ddi_prop_look_int_array failed"));
6362 6325 rval = DDI_FAILURE;
6363 6326 } else {
6364 6327
6365 6328 pi.busNumber = (props[0] >> 16) & 0xFF;
6366 6329 pi.deviceNumber = (props[0] >> 11) & 0x1f;
6367 6330 pi.functionNumber = (props[0] >> 8) & 0x7;
6368 6331 ddi_prop_free((void *)props);
6369 6332 }
6370 6333
6371 6334 pci_conf_buf = (uint8_t *)&pi.pciHeaderInfo;
6372 6335
6373 6336 for (i = 0; i < (sizeof (struct mrsas_pci_information) -
6374 6337 offsetof(struct mrsas_pci_information, pciHeaderInfo));
6375 6338 i++) {
6376 6339 pci_conf_buf[i] =
6377 6340 pci_config_get8(instance->pci_handle, i);
6378 6341 }
6379 6342
6380 6343 if (ddi_copyout(&pi, ubuf, xferlen, mode)) {
6381 6344 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6382 6345 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : "
6383 6346 "copy to user space failed"));
6384 6347 kdcmd->cmd_status = 1;
6385 6348 rval = 1;
6386 6349 } else {
6387 6350 kdcmd->cmd_status = 0;
6388 6351 }
6389 6352 break;
6390 6353 default:
6391 6354 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6392 6355 "invalid driver specific IOCTL opcode = 0x%x",
6393 6356 kdcmd->opcode));
6394 6357 kdcmd->cmd_status = 1;
6395 6358 rval = DDI_FAILURE;
6396 6359 break;
6397 6360 }
6398 6361
6399 6362 return (rval);
6400 6363 }
6401 6364
6402 6365 /*
6403 6366 * handle_mfi_ioctl
6404 6367 */
6405 6368 static int
6406 6369 handle_mfi_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
6407 6370 int mode)
6408 6371 {
6409 6372 int rval = DDI_SUCCESS;
6410 6373
6411 6374 struct mrsas_header *hdr;
6412 6375 struct mrsas_cmd *cmd;
6413 6376
6414 6377 if (instance->tbolt) {
6415 6378 cmd = get_raid_msg_mfi_pkt(instance);
6416 6379 } else {
6417 6380 cmd = get_mfi_pkt(instance);
6418 6381 }
6419 6382 if (!cmd) {
6420 6383 con_log(CL_ANN, (CE_WARN, "mr_sas: "
6421 6384 "failed to get a cmd packet"));
6422 6385 DTRACE_PROBE2(mfi_ioctl_err, uint16_t,
6423 6386 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
6424 6387 return (DDI_FAILURE);
6425 6388 }
6426 6389
6427 6390 /* Clear the frame buffer and assign back the context id */
6428 6391 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
6429 6392 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
6430 6393 cmd->index);
6431 6394
6432 6395 hdr = (struct mrsas_header *)&ioctl->frame[0];
6433 6396
6434 6397 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) {
6435 6398 case MFI_CMD_OP_DCMD:
6436 6399 rval = issue_mfi_dcmd(instance, ioctl, cmd, mode);
6437 6400 break;
6438 6401 case MFI_CMD_OP_SMP:
6439 6402 rval = issue_mfi_smp(instance, ioctl, cmd, mode);
6440 6403 break;
6441 6404 case MFI_CMD_OP_STP:
6442 6405 rval = issue_mfi_stp(instance, ioctl, cmd, mode);
6443 6406 break;
6444 6407 case MFI_CMD_OP_LD_SCSI:
6445 6408 case MFI_CMD_OP_PD_SCSI:
6446 6409 rval = issue_mfi_pthru(instance, ioctl, cmd, mode);
6447 6410 break;
6448 6411 default:
6449 6412 con_log(CL_ANN, (CE_WARN, "handle_mfi_ioctl: "
6450 6413 "invalid mfi ioctl hdr->cmd = %d", hdr->cmd));
6451 6414 rval = DDI_FAILURE;
6452 6415 break;
6453 6416 }
6454 6417
6455 6418 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS)
6456 6419 rval = DDI_FAILURE;
6457 6420
6458 6421 if (instance->tbolt) {
6459 6422 return_raid_msg_mfi_pkt(instance, cmd);
6460 6423 } else {
6461 6424 return_mfi_pkt(instance, cmd);
6462 6425 }
6463 6426
6464 6427 return (rval);
6465 6428 }
6466 6429
6467 6430 /*
6468 6431 * AEN
6469 6432 */
6470 6433 static int
6471 6434 handle_mfi_aen(struct mrsas_instance *instance, struct mrsas_aen *aen)
6472 6435 {
6473 6436 int rval = 0;
6474 6437
6475 6438 rval = register_mfi_aen(instance, instance->aen_seq_num,
6476 6439 aen->class_locale_word);
6477 6440
6478 6441 aen->cmd_status = (uint8_t)rval;
6479 6442
6480 6443 return (rval);
6481 6444 }
6482 6445
6483 6446 static int
6484 6447 register_mfi_aen(struct mrsas_instance *instance, uint32_t seq_num,
6485 6448 uint32_t class_locale_word)
6486 6449 {
6487 6450 int ret_val;
6488 6451
6489 6452 struct mrsas_cmd *cmd, *aen_cmd;
6490 6453 struct mrsas_dcmd_frame *dcmd;
6491 6454 union mrsas_evt_class_locale curr_aen;
6492 6455 union mrsas_evt_class_locale prev_aen;
6493 6456
6494 6457 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
6495 6458 /*
6496 6459 * If there an AEN pending already (aen_cmd), check if the
6497 6460 * class_locale of that pending AEN is inclusive of the new
6498 6461 * AEN request we currently have. If it is, then we don't have
6499 6462 * to do anything. In other words, whichever events the current
6500 6463 * AEN request is subscribing to, have already been subscribed
6501 6464 * to.
6502 6465 *
6503 6466 * If the old_cmd is _not_ inclusive, then we have to abort
6504 6467 * that command, form a class_locale that is superset of both
6505 6468 * old and current and re-issue to the FW
6506 6469 */
6507 6470
6508 6471 curr_aen.word = LE_32(class_locale_word);
6509 6472 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6510 6473 aen_cmd = instance->aen_cmd;
6511 6474 if (aen_cmd) {
6512 6475 prev_aen.word = ddi_get32(aen_cmd->frame_dma_obj.acc_handle,
6513 6476 &aen_cmd->frame->dcmd.mbox.w[1]);
6514 6477 prev_aen.word = LE_32(prev_aen.word);
6515 6478 prev_aen.members.locale = LE_16(prev_aen.members.locale);
6516 6479 /*
6517 6480 * A class whose enum value is smaller is inclusive of all
6518 6481 * higher values. If a PROGRESS (= -1) was previously
6519 6482 * registered, then a new registration requests for higher
6520 6483 * classes need not be sent to FW. They are automatically
6521 6484 * included.
6522 6485 *
6523 6486 * Locale numbers don't have such hierarchy. They are bitmap
6524 6487 * values
6525 6488 */
6526 6489 if ((prev_aen.members.class <= curr_aen.members.class) &&
6527 6490 !((prev_aen.members.locale & curr_aen.members.locale) ^
6528 6491 curr_aen.members.locale)) {
6529 6492 /*
6530 6493 * Previously issued event registration includes
6531 6494 * current request. Nothing to do.
6532 6495 */
6533 6496
6534 6497 return (0);
6535 6498 } else {
6536 6499 curr_aen.members.locale |= prev_aen.members.locale;
6537 6500
6538 6501 if (prev_aen.members.class < curr_aen.members.class)
6539 6502 curr_aen.members.class = prev_aen.members.class;
6540 6503
6541 6504 ret_val = abort_aen_cmd(instance, aen_cmd);
6542 6505
6543 6506 if (ret_val) {
6544 6507 con_log(CL_ANN, (CE_WARN, "register_mfi_aen: "
6545 6508 "failed to abort prevous AEN command"));
6546 6509
6547 6510 return (ret_val);
6548 6511 }
6549 6512 }
6550 6513 } else {
6551 6514 curr_aen.word = LE_32(class_locale_word);
6552 6515 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6553 6516 }
6554 6517
6555 6518 if (instance->tbolt) {
6556 6519 cmd = get_raid_msg_mfi_pkt(instance);
6557 6520 } else {
6558 6521 cmd = get_mfi_pkt(instance);
6559 6522 }
6560 6523
6561 6524 if (!cmd) {
6562 6525 DTRACE_PROBE2(mfi_aen_err, uint16_t, instance->fw_outstanding,
6563 6526 uint16_t, instance->max_fw_cmds);
6564 6527 return (ENOMEM);
6565 6528 }
6566 6529
6567 6530 /* Clear the frame buffer and assign back the context id */
6568 6531 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
6569 6532 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
6570 6533 cmd->index);
6571 6534
6572 6535 dcmd = &cmd->frame->dcmd;
6573 6536
6574 6537 /* for(i = 0; i < DCMD_MBOX_SZ; i++) dcmd->mbox.b[i] = 0; */
6575 6538 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
6576 6539
6577 6540 (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
6578 6541 sizeof (struct mrsas_evt_detail));
6579 6542
6580 6543 /* Prepare DCMD for aen registration */
6581 6544 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
6582 6545 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0);
6583 6546 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
6584 6547 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
6585 6548 MFI_FRAME_DIR_READ);
6586 6549 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
6587 6550 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
6588 6551 sizeof (struct mrsas_evt_detail));
6589 6552 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
6590 6553 MR_DCMD_CTRL_EVENT_WAIT);
6591 6554 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], seq_num);
6592 6555 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6593 6556 curr_aen.word = LE_32(curr_aen.word);
6594 6557 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[1],
6595 6558 curr_aen.word);
6596 6559 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
6597 6560 instance->mfi_evt_detail_obj.dma_cookie[0].dmac_address);
6598 6561 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
6599 6562 sizeof (struct mrsas_evt_detail));
6600 6563
6601 6564 instance->aen_seq_num = seq_num;
6602 6565
6603 6566
6604 6567 /*
6605 6568 * Store reference to the cmd used to register for AEN. When an
6606 6569 * application wants us to register for AEN, we have to abort this
6607 6570 * cmd and re-register with a new EVENT LOCALE supplied by that app
6608 6571 */
6609 6572 instance->aen_cmd = cmd;
6610 6573
6611 6574 cmd->frame_count = 1;
6612 6575
6613 6576 /* Issue the aen registration frame */
6614 6577 /* atomic_add_16 (&instance->fw_outstanding, 1); */
6615 6578 if (instance->tbolt) {
6616 6579 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
6617 6580 }
6618 6581 instance->func_ptr->issue_cmd(cmd, instance);
6619 6582
6620 6583 return (0);
6621 6584 }
6622 6585
6623 6586 void
6624 6587 display_scsi_inquiry(caddr_t scsi_inq)
6625 6588 {
6626 6589 #define MAX_SCSI_DEVICE_CODE 14
6627 6590 int i;
6628 6591 char inquiry_buf[256] = {0};
6629 6592 int len;
6630 6593 const char *const scsi_device_types[] = {
6631 6594 "Direct-Access ",
6632 6595 "Sequential-Access",
6633 6596 "Printer ",
6634 6597 "Processor ",
6635 6598 "WORM ",
6636 6599 "CD-ROM ",
6637 6600 "Scanner ",
6638 6601 "Optical Device ",
6639 6602 "Medium Changer ",
6640 6603 "Communications ",
6641 6604 "Unknown ",
6642 6605 "Unknown ",
6643 6606 "Unknown ",
6644 6607 "Enclosure ",
6645 6608 };
6646 6609
6647 6610 len = 0;
6648 6611
6649 6612 len += snprintf(inquiry_buf + len, 265 - len, " Vendor: ");
6650 6613 for (i = 8; i < 16; i++) {
6651 6614 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6652 6615 scsi_inq[i]);
6653 6616 }
6654 6617
6655 6618 len += snprintf(inquiry_buf + len, 265 - len, " Model: ");
6656 6619
6657 6620 for (i = 16; i < 32; i++) {
6658 6621 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6659 6622 scsi_inq[i]);
6660 6623 }
6661 6624
6662 6625 len += snprintf(inquiry_buf + len, 265 - len, " Rev: ");
6663 6626
6664 6627 for (i = 32; i < 36; i++) {
6665 6628 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6666 6629 scsi_inq[i]);
6667 6630 }
6668 6631
6669 6632 len += snprintf(inquiry_buf + len, 265 - len, "\n");
6670 6633
6671 6634
6672 6635 i = scsi_inq[0] & 0x1f;
6673 6636
6674 6637
6675 6638 len += snprintf(inquiry_buf + len, 265 - len, " Type: %s ",
6676 6639 i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] :
6677 6640 "Unknown ");
6678 6641
6679 6642
6680 6643 len += snprintf(inquiry_buf + len, 265 - len,
6681 6644 " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07);
6682 6645
6683 6646 if ((scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1) {
6684 6647 len += snprintf(inquiry_buf + len, 265 - len, " CCS\n");
6685 6648 } else {
6686 6649 len += snprintf(inquiry_buf + len, 265 - len, "\n");
6687 6650 }
6688 6651
6689 6652 con_log(CL_DLEVEL2, (CE_CONT, inquiry_buf));
6690 6653 }
6691 6654
6692 6655 static void
6693 6656 io_timeout_checker(void *arg)
6694 6657 {
6695 6658 struct scsi_pkt *pkt;
6696 6659 struct mrsas_instance *instance = arg;
6697 6660 struct mrsas_cmd *cmd = NULL;
6698 6661 struct mrsas_header *hdr;
6699 6662 int time = 0;
6700 6663 int counter = 0;
6701 6664 struct mlist_head *pos, *next;
6702 6665 mlist_t process_list;
6703 6666
6704 6667 if (instance->adapterresetinprogress == 1) {
6705 6668 con_log(CL_ANN, (CE_NOTE, "io_timeout_checker:"
6706 6669 " reset in progress"));
6707 6670
6708 6671 instance->timeout_id = timeout(io_timeout_checker,
6709 6672 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
6710 6673 return;
6711 6674 }
6712 6675
6713 6676 /* See if this check needs to be in the beginning or last in ISR */
6714 6677 if (mrsas_initiate_ocr_if_fw_is_faulty(instance) == 1) {
6715 6678 cmn_err(CE_WARN, "io_timeout_checker: "
6716 6679 "FW Fault, calling reset adapter");
6717 6680 cmn_err(CE_CONT, "io_timeout_checker: "
6718 6681 "fw_outstanding 0x%X max_fw_cmds 0x%X",
6719 6682 instance->fw_outstanding, instance->max_fw_cmds);
6720 6683 if (instance->adapterresetinprogress == 0) {
6721 6684 instance->adapterresetinprogress = 1;
6722 6685 if (instance->tbolt)
6723 6686 (void) mrsas_tbolt_reset_ppc(instance);
6724 6687 else
6725 6688 (void) mrsas_reset_ppc(instance);
6726 6689 instance->adapterresetinprogress = 0;
6727 6690 }
6728 6691 instance->timeout_id = timeout(io_timeout_checker,
6729 6692 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
6730 6693 return;
6731 6694 }
6732 6695
6733 6696 INIT_LIST_HEAD(&process_list);
6734 6697
6735 6698 mutex_enter(&instance->cmd_pend_mtx);
6736 6699 mlist_for_each_safe(pos, next, &instance->cmd_pend_list) {
6737 6700 cmd = mlist_entry(pos, struct mrsas_cmd, list);
6738 6701
6739 6702 if (cmd == NULL) {
6740 6703 continue;
6741 6704 }
6742 6705
6743 6706 if (cmd->sync_cmd == MRSAS_TRUE) {
6744 6707 hdr = (struct mrsas_header *)&cmd->frame->hdr;
6745 6708 if (hdr == NULL) {
6746 6709 continue;
6747 6710 }
6748 6711 time = --cmd->drv_pkt_time;
6749 6712 } else {
6750 6713 pkt = cmd->pkt;
6751 6714 if (pkt == NULL) {
6752 6715 continue;
6753 6716 }
6754 6717 time = --cmd->drv_pkt_time;
6755 6718 }
6756 6719 if (time <= 0) {
6757 6720 cmn_err(CE_WARN, "%llx: "
6758 6721 "io_timeout_checker: TIMING OUT: pkt: %p, "
6759 6722 "cmd %p fw_outstanding 0x%X max_fw_cmds 0x%X\n",
6760 6723 gethrtime(), (void *)pkt, (void *)cmd,
6761 6724 instance->fw_outstanding, instance->max_fw_cmds);
6762 6725
6763 6726 counter++;
6764 6727 break;
6765 6728 }
6766 6729 }
6767 6730 mutex_exit(&instance->cmd_pend_mtx);
6768 6731
6769 6732 if (counter) {
6770 6733 if (instance->disable_online_ctrl_reset == 1) {
6771 6734 cmn_err(CE_WARN, "mr_sas %d: %s(): OCR is NOT "
6772 6735 "supported by Firmware, KILL adapter!!!",
6773 6736 instance->instance, __func__);
6774 6737
6775 6738 if (instance->tbolt)
6776 6739 mrsas_tbolt_kill_adapter(instance);
6777 6740 else
6778 6741 (void) mrsas_kill_adapter(instance);
6779 6742
6780 6743 return;
6781 6744 } else {
6782 6745 if (cmd->retry_count_for_ocr <= IO_RETRY_COUNT) {
6783 6746 if (instance->adapterresetinprogress == 0) {
6784 6747 if (instance->tbolt) {
6785 6748 (void) mrsas_tbolt_reset_ppc(
6786 6749 instance);
6787 6750 } else {
6788 6751 (void) mrsas_reset_ppc(
6789 6752 instance);
6790 6753 }
6791 6754 }
6792 6755 } else {
6793 6756 cmn_err(CE_WARN,
6794 6757 "io_timeout_checker: "
6795 6758 "cmd %p cmd->index %d "
6796 6759 "timed out even after 3 resets: "
6797 6760 "so KILL adapter", (void *)cmd, cmd->index);
6798 6761
6799 6762 mrsas_print_cmd_details(instance, cmd, 0xDD);
6800 6763
6801 6764 if (instance->tbolt)
6802 6765 mrsas_tbolt_kill_adapter(instance);
6803 6766 else
6804 6767 (void) mrsas_kill_adapter(instance);
6805 6768 return;
6806 6769 }
6807 6770 }
6808 6771 }
6809 6772 con_log(CL_ANN, (CE_NOTE, "mrsas: "
6810 6773 "schedule next timeout check: "
6811 6774 "do timeout \n"));
6812 6775 instance->timeout_id =
6813 6776 timeout(io_timeout_checker, (void *)instance,
6814 6777 drv_usectohz(MRSAS_1_SECOND));
6815 6778 }
6816 6779
6817 6780 static uint32_t
6818 6781 read_fw_status_reg_ppc(struct mrsas_instance *instance)
6819 6782 {
6820 6783 return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
6821 6784 }
6822 6785
6823 6786 static void
6824 6787 issue_cmd_ppc(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
6825 6788 {
6826 6789 struct scsi_pkt *pkt;
6827 6790 atomic_add_16(&instance->fw_outstanding, 1);
6828 6791
6829 6792 pkt = cmd->pkt;
6830 6793 if (pkt) {
6831 6794 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:"
6832 6795 "ISSUED CMD TO FW : called : cmd:"
6833 6796 ": %p instance : %p pkt : %p pkt_time : %x\n",
6834 6797 gethrtime(), (void *)cmd, (void *)instance,
6835 6798 (void *)pkt, cmd->drv_pkt_time));
6836 6799 if (instance->adapterresetinprogress) {
6837 6800 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
6838 6801 con_log(CL_ANN1, (CE_NOTE, "Reset the scsi_pkt timer"));
6839 6802 } else {
|
↓ open down ↓ |
1328 lines elided |
↑ open up ↑ |
6840 6803 push_pending_mfi_pkt(instance, cmd);
6841 6804 }
6842 6805
6843 6806 } else {
6844 6807 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:"
6845 6808 "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
6846 6809 "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
6847 6810 }
6848 6811
6849 6812 mutex_enter(&instance->reg_write_mtx);
6850 - ASSERT(mutex_owned(&instance->reg_write_mtx));
6851 6813 /* Issue the command to the FW */
6852 6814 WR_IB_QPORT((cmd->frame_phys_addr) |
6853 6815 (((cmd->frame_count - 1) << 1) | 1), instance);
6854 6816 mutex_exit(&instance->reg_write_mtx);
6855 6817
6856 6818 }
6857 6819
6858 6820 /*
6859 6821 * issue_cmd_in_sync_mode
6860 6822 */
6861 6823 static int
6862 6824 issue_cmd_in_sync_mode_ppc(struct mrsas_instance *instance,
6863 6825 struct mrsas_cmd *cmd)
6864 6826 {
6865 6827 int i;
6866 6828 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * (10 * MILLISEC);
6867 6829 struct mrsas_header *hdr = &cmd->frame->hdr;
6868 6830
6869 6831 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: called"));
6870 6832
6871 6833 if (instance->adapterresetinprogress) {
6872 6834 cmd->drv_pkt_time = ddi_get16(
6873 6835 cmd->frame_dma_obj.acc_handle, &hdr->timeout);
6874 6836 if (cmd->drv_pkt_time < debug_timeout_g)
6875 6837 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
6876 6838
6877 6839 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: "
6878 6840 "issue and return in reset case\n"));
6879 6841 WR_IB_QPORT((cmd->frame_phys_addr) |
6880 6842 (((cmd->frame_count - 1) << 1) | 1), instance);
|
↓ open down ↓ |
20 lines elided |
↑ open up ↑ |
6881 6843
6882 6844 return (DDI_SUCCESS);
6883 6845 } else {
6884 6846 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: pushing the pkt\n"));
6885 6847 push_pending_mfi_pkt(instance, cmd);
6886 6848 }
6887 6849
6888 6850 cmd->cmd_status = ENODATA;
6889 6851
6890 6852 mutex_enter(&instance->reg_write_mtx);
6891 - ASSERT(mutex_owned(&instance->reg_write_mtx));
6892 6853 /* Issue the command to the FW */
6893 6854 WR_IB_QPORT((cmd->frame_phys_addr) |
6894 6855 (((cmd->frame_count - 1) << 1) | 1), instance);
6895 6856 mutex_exit(&instance->reg_write_mtx);
6896 6857
6897 6858 mutex_enter(&instance->int_cmd_mtx);
6898 6859 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
6899 6860 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
6900 6861 }
6901 6862 mutex_exit(&instance->int_cmd_mtx);
6902 6863
6903 6864 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done"));
6904 6865
6905 6866 if (i < (msecs -1)) {
6906 6867 return (DDI_SUCCESS);
6907 6868 } else {
6908 6869 return (DDI_FAILURE);
6909 6870 }
6910 6871 }
6911 6872
6912 6873 /*
6913 6874 * issue_cmd_in_poll_mode
6914 6875 */
6915 6876 static int
6916 6877 issue_cmd_in_poll_mode_ppc(struct mrsas_instance *instance,
6917 6878 struct mrsas_cmd *cmd)
6918 6879 {
6919 6880 int i;
6920 6881 uint16_t flags;
6921 6882 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
6922 6883 struct mrsas_header *frame_hdr;
6923 6884
6924 6885 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode_ppc: called"));
6925 6886
6926 6887 frame_hdr = (struct mrsas_header *)cmd->frame;
6927 6888 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
6928 6889 MFI_CMD_STATUS_POLL_MODE);
6929 6890 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
6930 6891 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
6931 6892
6932 6893 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
6933 6894
6934 6895 /* issue the frame using inbound queue port */
6935 6896 WR_IB_QPORT((cmd->frame_phys_addr) |
6936 6897 (((cmd->frame_count - 1) << 1) | 1), instance);
6937 6898
6938 6899 /* wait for cmd_status to change from 0xFF */
6939 6900 for (i = 0; i < msecs && (
6940 6901 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
6941 6902 == MFI_CMD_STATUS_POLL_MODE); i++) {
6942 6903 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
6943 6904 }
6944 6905
6945 6906 if (ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
6946 6907 == MFI_CMD_STATUS_POLL_MODE) {
6947 6908 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: "
6948 6909 "cmd polling timed out"));
6949 6910 return (DDI_FAILURE);
6950 6911 }
6951 6912
6952 6913 return (DDI_SUCCESS);
6953 6914 }
6954 6915
6955 6916 static void
6956 6917 enable_intr_ppc(struct mrsas_instance *instance)
6957 6918 {
6958 6919 uint32_t mask;
6959 6920
6960 6921 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: called"));
6961 6922
6962 6923 /* WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); */
6963 6924 WR_OB_DOORBELL_CLEAR(OB_DOORBELL_CLEAR_MASK, instance);
6964 6925
6965 6926 /* WR_OB_INTR_MASK(~0x80000000, instance); */
6966 6927 WR_OB_INTR_MASK(~(MFI_REPLY_2108_MESSAGE_INTR_MASK), instance);
6967 6928
6968 6929 /* dummy read to force PCI flush */
6969 6930 mask = RD_OB_INTR_MASK(instance);
6970 6931
6971 6932 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: "
6972 6933 "outbound_intr_mask = 0x%x", mask));
6973 6934 }
6974 6935
6975 6936 static void
6976 6937 disable_intr_ppc(struct mrsas_instance *instance)
6977 6938 {
6978 6939 uint32_t mask;
6979 6940
6980 6941 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: called"));
6981 6942
6982 6943 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: before : "
6983 6944 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance)));
6984 6945
6985 6946 /* WR_OB_INTR_MASK(0xFFFFFFFF, instance); */
6986 6947 WR_OB_INTR_MASK(OB_INTR_MASK, instance);
6987 6948
6988 6949 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: after : "
6989 6950 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance)));
6990 6951
6991 6952 /* dummy read to force PCI flush */
6992 6953 mask = RD_OB_INTR_MASK(instance);
6993 6954 #ifdef lint
6994 6955 mask = mask;
6995 6956 #endif
6996 6957 }
6997 6958
6998 6959 static int
6999 6960 intr_ack_ppc(struct mrsas_instance *instance)
7000 6961 {
7001 6962 uint32_t status;
7002 6963 int ret = DDI_INTR_CLAIMED;
7003 6964
7004 6965 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: called"));
7005 6966
7006 6967 /* check if it is our interrupt */
7007 6968 status = RD_OB_INTR_STATUS(instance);
7008 6969
7009 6970 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: status = 0x%x", status));
7010 6971
7011 6972 if (!(status & MFI_REPLY_2108_MESSAGE_INTR)) {
7012 6973 ret = DDI_INTR_UNCLAIMED;
7013 6974 }
7014 6975
7015 6976 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
7016 6977 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
7017 6978 ret = DDI_INTR_UNCLAIMED;
7018 6979 }
7019 6980
7020 6981 if (ret == DDI_INTR_UNCLAIMED) {
7021 6982 return (ret);
7022 6983 }
7023 6984 /* clear the interrupt by writing back the same value */
7024 6985 WR_OB_DOORBELL_CLEAR(status, instance);
7025 6986
7026 6987 /* dummy READ */
7027 6988 status = RD_OB_INTR_STATUS(instance);
7028 6989
7029 6990 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: interrupt cleared"));
7030 6991
7031 6992 return (ret);
7032 6993 }
7033 6994
7034 6995 /*
7035 6996 * Marks HBA as bad. This will be called either when an
7036 6997 * IO packet times out even after 3 FW resets
7037 6998 * or FW is found to be fault even after 3 continuous resets.
7038 6999 */
7039 7000
7040 7001 static int
7041 7002 mrsas_kill_adapter(struct mrsas_instance *instance)
7042 7003 {
7043 7004 if (instance->deadadapter == 1)
7044 7005 return (DDI_FAILURE);
7045 7006
7046 7007 con_log(CL_ANN1, (CE_NOTE, "mrsas_kill_adapter: "
7047 7008 "Writing to doorbell with MFI_STOP_ADP "));
7048 7009 mutex_enter(&instance->ocr_flags_mtx);
7049 7010 instance->deadadapter = 1;
7050 7011 mutex_exit(&instance->ocr_flags_mtx);
7051 7012 instance->func_ptr->disable_intr(instance);
7052 7013 WR_IB_DOORBELL(MFI_STOP_ADP, instance);
7053 7014 (void) mrsas_complete_pending_cmds(instance);
7054 7015 return (DDI_SUCCESS);
7055 7016 }
7056 7017
7057 7018
7058 7019 static int
7059 7020 mrsas_reset_ppc(struct mrsas_instance *instance)
7060 7021 {
7061 7022 uint32_t status;
7062 7023 uint32_t retry = 0;
7063 7024 uint32_t cur_abs_reg_val;
7064 7025 uint32_t fw_state;
7065 7026
7066 7027 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
7067 7028
7068 7029 if (instance->deadadapter == 1) {
7069 7030 cmn_err(CE_WARN, "mrsas_reset_ppc: "
7070 7031 "no more resets as HBA has been marked dead ");
7071 7032 return (DDI_FAILURE);
7072 7033 }
7073 7034 mutex_enter(&instance->ocr_flags_mtx);
7074 7035 instance->adapterresetinprogress = 1;
7075 7036 mutex_exit(&instance->ocr_flags_mtx);
7076 7037 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: adpterresetinprogress "
7077 7038 "flag set, time %llx", gethrtime()));
7078 7039
7079 7040 instance->func_ptr->disable_intr(instance);
7080 7041 retry_reset:
7081 7042 WR_IB_WRITE_SEQ(0, instance);
7082 7043 WR_IB_WRITE_SEQ(4, instance);
7083 7044 WR_IB_WRITE_SEQ(0xb, instance);
7084 7045 WR_IB_WRITE_SEQ(2, instance);
7085 7046 WR_IB_WRITE_SEQ(7, instance);
7086 7047 WR_IB_WRITE_SEQ(0xd, instance);
7087 7048 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: magic number written "
7088 7049 "to write sequence register\n"));
7089 7050 delay(100 * drv_usectohz(MILLISEC));
7090 7051 status = RD_OB_DRWE(instance);
7091 7052
7092 7053 while (!(status & DIAG_WRITE_ENABLE)) {
7093 7054 delay(100 * drv_usectohz(MILLISEC));
7094 7055 status = RD_OB_DRWE(instance);
7095 7056 if (retry++ == 100) {
7096 7057 cmn_err(CE_WARN, "mrsas_reset_ppc: DRWE bit "
7097 7058 "check retry count %d\n", retry);
7098 7059 return (DDI_FAILURE);
7099 7060 }
7100 7061 }
7101 7062 WR_IB_DRWE(status | DIAG_RESET_ADAPTER, instance);
7102 7063 delay(100 * drv_usectohz(MILLISEC));
7103 7064 status = RD_OB_DRWE(instance);
7104 7065 while (status & DIAG_RESET_ADAPTER) {
7105 7066 delay(100 * drv_usectohz(MILLISEC));
7106 7067 status = RD_OB_DRWE(instance);
7107 7068 if (retry++ == 100) {
7108 7069 cmn_err(CE_WARN, "mrsas_reset_ppc: "
7109 7070 "RESET FAILED. KILL adapter called\n.");
7110 7071
7111 7072 (void) mrsas_kill_adapter(instance);
7112 7073 return (DDI_FAILURE);
7113 7074 }
7114 7075 }
7115 7076 con_log(CL_ANN, (CE_NOTE, "mrsas_reset_ppc: Adapter reset complete"));
7116 7077 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7117 7078 "Calling mfi_state_transition_to_ready"));
7118 7079
7119 7080 /* Mark HBA as bad, if FW is fault after 3 continuous resets */
7120 7081 if (mfi_state_transition_to_ready(instance) ||
7121 7082 debug_fw_faults_after_ocr_g == 1) {
7122 7083 cur_abs_reg_val =
7123 7084 instance->func_ptr->read_fw_status_reg(instance);
7124 7085 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
7125 7086
7126 7087 #ifdef OCRDEBUG
7127 7088 con_log(CL_ANN1, (CE_NOTE,
7128 7089 "mrsas_reset_ppc :before fake: FW is not ready "
7129 7090 "FW state = 0x%x", fw_state));
7130 7091 if (debug_fw_faults_after_ocr_g == 1)
7131 7092 fw_state = MFI_STATE_FAULT;
7132 7093 #endif
7133 7094
7134 7095 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc : FW is not ready "
7135 7096 "FW state = 0x%x", fw_state));
7136 7097
7137 7098 if (fw_state == MFI_STATE_FAULT) {
7138 7099 /* increment the count */
7139 7100 instance->fw_fault_count_after_ocr++;
7140 7101 if (instance->fw_fault_count_after_ocr
7141 7102 < MAX_FW_RESET_COUNT) {
7142 7103 cmn_err(CE_WARN, "mrsas_reset_ppc: "
7143 7104 "FW is in fault after OCR count %d "
7144 7105 "Retry Reset",
7145 7106 instance->fw_fault_count_after_ocr);
7146 7107 goto retry_reset;
7147 7108
7148 7109 } else {
7149 7110 cmn_err(CE_WARN, "mrsas_reset_ppc: "
7150 7111 "Max Reset Count exceeded >%d"
7151 7112 "Mark HBA as bad, KILL adapter",
7152 7113 MAX_FW_RESET_COUNT);
7153 7114
7154 7115 (void) mrsas_kill_adapter(instance);
7155 7116 return (DDI_FAILURE);
7156 7117 }
7157 7118 }
7158 7119 }
7159 7120 /* reset the counter as FW is up after OCR */
7160 7121 instance->fw_fault_count_after_ocr = 0;
7161 7122
7162 7123
7163 7124 ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
7164 7125 instance->producer, 0);
7165 7126
7166 7127 ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
7167 7128 instance->consumer, 0);
7168 7129
7169 7130 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7170 7131 " after resetting produconsumer chck indexs:"
7171 7132 "producer %x consumer %x", *instance->producer,
7172 7133 *instance->consumer));
7173 7134
7174 7135 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7175 7136 "Calling mrsas_issue_init_mfi"));
7176 7137 (void) mrsas_issue_init_mfi(instance);
7177 7138 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7178 7139 "mrsas_issue_init_mfi Done"));
7179 7140
7180 7141 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7181 7142 "Calling mrsas_print_pending_cmd\n"));
7182 7143 (void) mrsas_print_pending_cmds(instance);
|
↓ open down ↓ |
281 lines elided |
↑ open up ↑ |
7183 7144 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7184 7145 "mrsas_print_pending_cmd done\n"));
7185 7146
7186 7147 instance->func_ptr->enable_intr(instance);
7187 7148 instance->fw_outstanding = 0;
7188 7149
7189 7150 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7190 7151 "Calling mrsas_issue_pending_cmds"));
7191 7152 (void) mrsas_issue_pending_cmds(instance);
7192 7153 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7193 - "issue_pending_cmds done.\n"));
7154 + "issue_pending_cmds done.\n"));
7194 7155
7195 7156 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7196 7157 "Calling aen registration"));
7197 7158
7198 7159
7199 7160 instance->aen_cmd->retry_count_for_ocr = 0;
7200 7161 instance->aen_cmd->drv_pkt_time = 0;
7201 7162
7202 7163 instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
7203 7164 con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag.\n"));
7204 7165
7205 7166 mutex_enter(&instance->ocr_flags_mtx);
7206 7167 instance->adapterresetinprogress = 0;
7207 7168 mutex_exit(&instance->ocr_flags_mtx);
7208 7169 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7209 7170 "adpterresetinprogress flag unset"));
7210 7171
7211 7172 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc done\n"));
7212 7173 return (DDI_SUCCESS);
7213 7174 }
7214 7175
7215 7176 /*
7216 7177 * FMA functions.
7217 7178 */
7218 7179 int
7219 7180 mrsas_common_check(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
7220 7181 {
7221 7182 int ret = DDI_SUCCESS;
7222 7183
7223 7184 if (cmd != NULL &&
7224 7185 mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) !=
7225 7186 DDI_SUCCESS) {
7226 7187 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7227 7188 if (cmd->pkt != NULL) {
7228 7189 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7229 7190 cmd->pkt->pkt_statistics = 0;
7230 7191 }
7231 7192 ret = DDI_FAILURE;
7232 7193 }
7233 7194 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
7234 7195 != DDI_SUCCESS) {
7235 7196 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7236 7197 if (cmd != NULL && cmd->pkt != NULL) {
7237 7198 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7238 7199 cmd->pkt->pkt_statistics = 0;
7239 7200 }
7240 7201 ret = DDI_FAILURE;
7241 7202 }
7242 7203 if (mrsas_check_dma_handle(instance->mfi_evt_detail_obj.dma_handle) !=
7243 7204 DDI_SUCCESS) {
7244 7205 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7245 7206 if (cmd != NULL && cmd->pkt != NULL) {
7246 7207 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7247 7208 cmd->pkt->pkt_statistics = 0;
7248 7209 }
7249 7210 ret = DDI_FAILURE;
7250 7211 }
7251 7212 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
7252 7213 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7253 7214
7254 7215 ddi_fm_acc_err_clear(instance->regmap_handle, DDI_FME_VER0);
7255 7216
7256 7217 if (cmd != NULL && cmd->pkt != NULL) {
7257 7218 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7258 7219 cmd->pkt->pkt_statistics = 0;
7259 7220 }
7260 7221 ret = DDI_FAILURE;
7261 7222 }
7262 7223
7263 7224 return (ret);
7264 7225 }
7265 7226
7266 7227 /*ARGSUSED*/
7267 7228 static int
7268 7229 mrsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
7269 7230 {
7270 7231 /*
7271 7232 * as the driver can always deal with an error in any dma or
7272 7233 * access handle, we can just return the fme_status value.
7273 7234 */
7274 7235 pci_ereport_post(dip, err, NULL);
7275 7236 return (err->fme_status);
7276 7237 }
7277 7238
7278 7239 static void
7279 7240 mrsas_fm_init(struct mrsas_instance *instance)
7280 7241 {
7281 7242 /* Need to change iblock to priority for new MSI intr */
7282 7243 ddi_iblock_cookie_t fm_ibc;
7283 7244
7284 7245 /* Only register with IO Fault Services if we have some capability */
7285 7246 if (instance->fm_capabilities) {
7286 7247 /* Adjust access and dma attributes for FMA */
7287 7248 endian_attr.devacc_attr_access = DDI_FLAGERR_ACC;
7288 7249 mrsas_generic_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
7289 7250
7290 7251 /*
7291 7252 * Register capabilities with IO Fault Services.
7292 7253 * fm_capabilities will be updated to indicate
7293 7254 * capabilities actually supported (not requested.)
7294 7255 */
7295 7256
7296 7257 ddi_fm_init(instance->dip, &instance->fm_capabilities, &fm_ibc);
7297 7258
7298 7259 /*
7299 7260 * Initialize pci ereport capabilities if ereport
7300 7261 * capable (should always be.)
7301 7262 */
7302 7263
7303 7264 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) ||
7304 7265 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7305 7266 pci_ereport_setup(instance->dip);
7306 7267 }
7307 7268
7308 7269 /*
7309 7270 * Register error callback if error callback capable.
7310 7271 */
7311 7272 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7312 7273 ddi_fm_handler_register(instance->dip,
7313 7274 mrsas_fm_error_cb, (void*) instance);
7314 7275 }
7315 7276 } else {
7316 7277 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
7317 7278 mrsas_generic_dma_attr.dma_attr_flags = 0;
7318 7279 }
7319 7280 }
7320 7281
7321 7282 static void
7322 7283 mrsas_fm_fini(struct mrsas_instance *instance)
7323 7284 {
7324 7285 /* Only unregister FMA capabilities if registered */
7325 7286 if (instance->fm_capabilities) {
7326 7287 /*
7327 7288 * Un-register error callback if error callback capable.
7328 7289 */
7329 7290 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7330 7291 ddi_fm_handler_unregister(instance->dip);
7331 7292 }
7332 7293
7333 7294 /*
7334 7295 * Release any resources allocated by pci_ereport_setup()
7335 7296 */
7336 7297 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) ||
7337 7298 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7338 7299 pci_ereport_teardown(instance->dip);
7339 7300 }
7340 7301
7341 7302 /* Unregister from IO Fault Services */
7342 7303 ddi_fm_fini(instance->dip);
7343 7304
7344 7305 /* Adjust access and dma attributes for FMA */
7345 7306 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
7346 7307 mrsas_generic_dma_attr.dma_attr_flags = 0;
7347 7308 }
7348 7309 }
7349 7310
7350 7311 int
7351 7312 mrsas_check_acc_handle(ddi_acc_handle_t handle)
7352 7313 {
7353 7314 ddi_fm_error_t de;
7354 7315
7355 7316 if (handle == NULL) {
7356 7317 return (DDI_FAILURE);
7357 7318 }
7358 7319
7359 7320 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
7360 7321
7361 7322 return (de.fme_status);
7362 7323 }
7363 7324
7364 7325 int
7365 7326 mrsas_check_dma_handle(ddi_dma_handle_t handle)
7366 7327 {
7367 7328 ddi_fm_error_t de;
7368 7329
7369 7330 if (handle == NULL) {
7370 7331 return (DDI_FAILURE);
7371 7332 }
7372 7333
7373 7334 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
7374 7335
7375 7336 return (de.fme_status);
7376 7337 }
7377 7338
7378 7339 void
7379 7340 mrsas_fm_ereport(struct mrsas_instance *instance, char *detail)
7380 7341 {
7381 7342 uint64_t ena;
7382 7343 char buf[FM_MAX_CLASS];
7383 7344
7384 7345 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
7385 7346 ena = fm_ena_generate(0, FM_ENA_FMT1);
7386 7347 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities)) {
7387 7348 ddi_fm_ereport_post(instance->dip, buf, ena, DDI_NOSLEEP,
7388 7349 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL);
7389 7350 }
7390 7351 }
7391 7352
7392 7353 static int
7393 7354 mrsas_add_intrs(struct mrsas_instance *instance, int intr_type)
7394 7355 {
7395 7356
7396 7357 dev_info_t *dip = instance->dip;
7397 7358 int avail, actual, count;
7398 7359 int i, flag, ret;
7399 7360
7400 7361 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_type = %x",
7401 7362 intr_type));
7402 7363
7403 7364 /* Get number of interrupts */
7404 7365 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
7405 7366 if ((ret != DDI_SUCCESS) || (count == 0)) {
7406 7367 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_nintrs() failed:"
7407 7368 "ret %d count %d", ret, count));
7408 7369
7409 7370 return (DDI_FAILURE);
7410 7371 }
7411 7372
7412 7373 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: count = %d ", count));
7413 7374
7414 7375 /* Get number of available interrupts */
7415 7376 ret = ddi_intr_get_navail(dip, intr_type, &avail);
7416 7377 if ((ret != DDI_SUCCESS) || (avail == 0)) {
7417 7378 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_navail() failed:"
7418 7379 "ret %d avail %d", ret, avail));
7419 7380
7420 7381 return (DDI_FAILURE);
7421 7382 }
7422 7383 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: avail = %d ", avail));
7423 7384
7424 7385 /* Only one interrupt routine. So limit the count to 1 */
7425 7386 if (count > 1) {
7426 7387 count = 1;
7427 7388 }
7428 7389
7429 7390 /*
7430 7391 * Allocate an array of interrupt handlers. Currently we support
7431 7392 * only one interrupt. The framework can be extended later.
7432 7393 */
7433 7394 instance->intr_htable_size = count * sizeof (ddi_intr_handle_t);
7434 7395 instance->intr_htable = kmem_zalloc(instance->intr_htable_size,
7435 7396 KM_SLEEP);
7436 7397 if (instance->intr_htable == NULL) {
7437 7398 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7438 7399 "failed to allocate memory for intr-handle table"));
7439 7400 instance->intr_htable_size = 0;
7440 7401 return (DDI_FAILURE);
7441 7402 }
7442 7403
7443 7404 flag = ((intr_type == DDI_INTR_TYPE_MSI) ||
7444 7405 (intr_type == DDI_INTR_TYPE_MSIX)) ?
7445 7406 DDI_INTR_ALLOC_STRICT : DDI_INTR_ALLOC_NORMAL;
7446 7407
7447 7408 /* Allocate interrupt */
7448 7409 ret = ddi_intr_alloc(dip, instance->intr_htable, intr_type, 0,
7449 7410 count, &actual, flag);
7450 7411
7451 7412 if ((ret != DDI_SUCCESS) || (actual == 0)) {
7452 7413 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7453 7414 "avail = %d", avail));
7454 7415 goto mrsas_free_htable;
7455 7416 }
7456 7417
7457 7418 if (actual < count) {
7458 7419 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7459 7420 "Requested = %d Received = %d", count, actual));
7460 7421 }
7461 7422 instance->intr_cnt = actual;
7462 7423
7463 7424 /*
7464 7425 * Get the priority of the interrupt allocated.
7465 7426 */
7466 7427 if ((ret = ddi_intr_get_pri(instance->intr_htable[0],
7467 7428 &instance->intr_pri)) != DDI_SUCCESS) {
7468 7429 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7469 7430 "get priority call failed"));
7470 7431 goto mrsas_free_handles;
7471 7432 }
7472 7433
7473 7434 /*
7474 7435 * Test for high level mutex. we don't support them.
7475 7436 */
7476 7437 if (instance->intr_pri >= ddi_intr_get_hilevel_pri()) {
7477 7438 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7478 7439 "High level interrupts not supported."));
7479 7440 goto mrsas_free_handles;
7480 7441 }
7481 7442
7482 7443 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_pri = 0x%x ",
7483 7444 instance->intr_pri));
7484 7445
7485 7446 /* Call ddi_intr_add_handler() */
7486 7447 for (i = 0; i < actual; i++) {
7487 7448 ret = ddi_intr_add_handler(instance->intr_htable[i],
7488 7449 (ddi_intr_handler_t *)mrsas_isr, (caddr_t)instance,
7489 7450 (caddr_t)(uintptr_t)i);
7490 7451
7491 7452 if (ret != DDI_SUCCESS) {
7492 7453 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs:"
7493 7454 "failed %d", ret));
7494 7455 goto mrsas_free_handles;
7495 7456 }
7496 7457
7497 7458 }
7498 7459
7499 7460 con_log(CL_DLEVEL1, (CE_NOTE, " ddi_intr_add_handler done"));
7500 7461
7501 7462 if ((ret = ddi_intr_get_cap(instance->intr_htable[0],
7502 7463 &instance->intr_cap)) != DDI_SUCCESS) {
7503 7464 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_cap() failed %d",
7504 7465 ret));
7505 7466 goto mrsas_free_handlers;
7506 7467 }
7507 7468
7508 7469 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) {
7509 7470 con_log(CL_ANN, (CE_WARN, "Calling ddi_intr_block _enable"));
7510 7471
7511 7472 (void) ddi_intr_block_enable(instance->intr_htable,
7512 7473 instance->intr_cnt);
7513 7474 } else {
7514 7475 con_log(CL_ANN, (CE_NOTE, " calling ddi_intr_enable"));
7515 7476
7516 7477 for (i = 0; i < instance->intr_cnt; i++) {
7517 7478 (void) ddi_intr_enable(instance->intr_htable[i]);
7518 7479 con_log(CL_ANN, (CE_NOTE, "ddi intr enable returns "
7519 7480 "%d", i));
7520 7481 }
7521 7482 }
7522 7483
7523 7484 return (DDI_SUCCESS);
7524 7485
7525 7486 mrsas_free_handlers:
7526 7487 for (i = 0; i < actual; i++)
7527 7488 (void) ddi_intr_remove_handler(instance->intr_htable[i]);
7528 7489
7529 7490 mrsas_free_handles:
7530 7491 for (i = 0; i < actual; i++)
7531 7492 (void) ddi_intr_free(instance->intr_htable[i]);
7532 7493
7533 7494 mrsas_free_htable:
7534 7495 if (instance->intr_htable != NULL)
7535 7496 kmem_free(instance->intr_htable, instance->intr_htable_size);
7536 7497
7537 7498 instance->intr_htable = NULL;
7538 7499 instance->intr_htable_size = 0;
7539 7500
7540 7501 return (DDI_FAILURE);
7541 7502
7542 7503 }
7543 7504
7544 7505
7545 7506 static void
7546 7507 mrsas_rem_intrs(struct mrsas_instance *instance)
7547 7508 {
7548 7509 int i;
7549 7510
7550 7511 con_log(CL_ANN, (CE_NOTE, "mrsas_rem_intrs called"));
7551 7512
7552 7513 /* Disable all interrupts first */
7553 7514 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) {
7554 7515 (void) ddi_intr_block_disable(instance->intr_htable,
7555 7516 instance->intr_cnt);
7556 7517 } else {
7557 7518 for (i = 0; i < instance->intr_cnt; i++) {
7558 7519 (void) ddi_intr_disable(instance->intr_htable[i]);
7559 7520 }
7560 7521 }
7561 7522
7562 7523 /* Remove all the handlers */
7563 7524
7564 7525 for (i = 0; i < instance->intr_cnt; i++) {
7565 7526 (void) ddi_intr_remove_handler(instance->intr_htable[i]);
7566 7527 (void) ddi_intr_free(instance->intr_htable[i]);
7567 7528 }
7568 7529
7569 7530 if (instance->intr_htable != NULL)
7570 7531 kmem_free(instance->intr_htable, instance->intr_htable_size);
7571 7532
7572 7533 instance->intr_htable = NULL;
7573 7534 instance->intr_htable_size = 0;
7574 7535
7575 7536 }
7576 7537
7577 7538 static int
7578 7539 mrsas_tran_bus_config(dev_info_t *parent, uint_t flags,
7579 7540 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
7580 7541 {
7581 7542 struct mrsas_instance *instance;
7582 7543 int config;
7583 7544 int rval = NDI_SUCCESS;
7584 7545
7585 7546 char *ptr = NULL;
7586 7547 int tgt, lun;
7587 7548
7588 7549 con_log(CL_ANN1, (CE_NOTE, "Bus config called for op = %x", op));
7589 7550
7590 7551 if ((instance = ddi_get_soft_state(mrsas_state,
7591 7552 ddi_get_instance(parent))) == NULL) {
7592 7553 return (NDI_FAILURE);
7593 7554 }
7594 7555
7595 7556 /* Hold nexus during bus_config */
7596 7557 ndi_devi_enter(parent, &config);
7597 7558 switch (op) {
7598 7559 case BUS_CONFIG_ONE: {
7599 7560
7600 7561 /* parse wwid/target name out of name given */
7601 7562 if ((ptr = strchr((char *)arg, '@')) == NULL) {
7602 7563 rval = NDI_FAILURE;
7603 7564 break;
7604 7565 }
7605 7566 ptr++;
7606 7567
7607 7568 if (mrsas_parse_devname(arg, &tgt, &lun) != 0) {
7608 7569 rval = NDI_FAILURE;
7609 7570 break;
7610 7571 }
7611 7572
7612 7573 if (lun == 0) {
7613 7574 rval = mrsas_config_ld(instance, tgt, lun, childp);
7614 7575 #ifdef PDSUPPORT
7615 7576 } else if (instance->tbolt == 1 && lun != 0) {
7616 7577 rval = mrsas_tbolt_config_pd(instance,
7617 7578 tgt, lun, childp);
7618 7579 #endif
7619 7580 } else {
7620 7581 rval = NDI_FAILURE;
7621 7582 }
7622 7583
7623 7584 break;
7624 7585 }
7625 7586 case BUS_CONFIG_DRIVER:
7626 7587 case BUS_CONFIG_ALL: {
7627 7588
7628 7589 rval = mrsas_config_all_devices(instance);
7629 7590
7630 7591 rval = NDI_SUCCESS;
7631 7592 break;
7632 7593 }
7633 7594 }
7634 7595
7635 7596 if (rval == NDI_SUCCESS) {
7636 7597 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0);
7637 7598
7638 7599 }
7639 7600 ndi_devi_exit(parent, config);
7640 7601
7641 7602 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_bus_config: rval = %x",
7642 7603 rval));
7643 7604 return (rval);
7644 7605 }
7645 7606
7646 7607 static int
7647 7608 mrsas_config_all_devices(struct mrsas_instance *instance)
7648 7609 {
7649 7610 int rval, tgt;
7650 7611
7651 7612 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) {
7652 7613 (void) mrsas_config_ld(instance, tgt, 0, NULL);
7653 7614
7654 7615 }
7655 7616
7656 7617 #ifdef PDSUPPORT
7657 7618 /* Config PD devices connected to the card */
7658 7619 if (instance->tbolt) {
7659 7620 for (tgt = 0; tgt < instance->mr_tbolt_pd_max; tgt++) {
7660 7621 (void) mrsas_tbolt_config_pd(instance, tgt, 1, NULL);
7661 7622 }
7662 7623 }
7663 7624 #endif
7664 7625
7665 7626 rval = NDI_SUCCESS;
7666 7627 return (rval);
7667 7628 }
7668 7629
7669 7630 static int
7670 7631 mrsas_parse_devname(char *devnm, int *tgt, int *lun)
7671 7632 {
7672 7633 char devbuf[SCSI_MAXNAMELEN];
7673 7634 char *addr;
7674 7635 char *p, *tp, *lp;
7675 7636 long num;
7676 7637
7677 7638 /* Parse dev name and address */
7678 7639 (void) strcpy(devbuf, devnm);
7679 7640 addr = "";
7680 7641 for (p = devbuf; *p != '\0'; p++) {
7681 7642 if (*p == '@') {
7682 7643 addr = p + 1;
7683 7644 *p = '\0';
7684 7645 } else if (*p == ':') {
7685 7646 *p = '\0';
7686 7647 break;
7687 7648 }
7688 7649 }
7689 7650
7690 7651 /* Parse target and lun */
7691 7652 for (p = tp = addr, lp = NULL; *p != '\0'; p++) {
7692 7653 if (*p == ',') {
7693 7654 lp = p + 1;
7694 7655 *p = '\0';
7695 7656 break;
7696 7657 }
7697 7658 }
7698 7659 if (tgt && tp) {
7699 7660 if (ddi_strtol(tp, NULL, 0x10, &num)) {
7700 7661 return (DDI_FAILURE); /* Can declare this as constant */
7701 7662 }
7702 7663 *tgt = (int)num;
7703 7664 }
7704 7665 if (lun && lp) {
7705 7666 if (ddi_strtol(lp, NULL, 0x10, &num)) {
7706 7667 return (DDI_FAILURE);
7707 7668 }
7708 7669 *lun = (int)num;
7709 7670 }
7710 7671 return (DDI_SUCCESS); /* Success case */
7711 7672 }
7712 7673
7713 7674 static int
7714 7675 mrsas_config_ld(struct mrsas_instance *instance, uint16_t tgt,
7715 7676 uint8_t lun, dev_info_t **ldip)
7716 7677 {
7717 7678 struct scsi_device *sd;
7718 7679 dev_info_t *child;
7719 7680 int rval;
7720 7681
7721 7682 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_config_ld: t = %d l = %d",
7722 7683 tgt, lun));
7723 7684
7724 7685 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
7725 7686 if (ldip) {
7726 7687 *ldip = child;
7727 7688 }
7728 7689 if (instance->mr_ld_list[tgt].flag != MRDRV_TGT_VALID) {
7729 7690 rval = mrsas_service_evt(instance, tgt, 0,
7730 7691 MRSAS_EVT_UNCONFIG_TGT, NULL);
7731 7692 con_log(CL_ANN1, (CE_WARN,
7732 7693 "mr_sas: DELETING STALE ENTRY rval = %d "
7733 7694 "tgt id = %d ", rval, tgt));
7734 7695 return (NDI_FAILURE);
7735 7696 }
7736 7697 return (NDI_SUCCESS);
7737 7698 }
7738 7699
7739 7700 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
7740 7701 if (sd == NULL) {
7741 7702 con_log(CL_ANN1, (CE_WARN, "mrsas_config_ld: "
7742 7703 "failed to allocate mem for scsi_device"));
7743 7704 return (NDI_FAILURE);
7744 7705 }
7745 7706 sd->sd_address.a_hba_tran = instance->tran;
7746 7707 sd->sd_address.a_target = (uint16_t)tgt;
7747 7708 sd->sd_address.a_lun = (uint8_t)lun;
7748 7709
7749 7710 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS)
7750 7711 rval = mrsas_config_scsi_device(instance, sd, ldip);
7751 7712 else
7752 7713 rval = NDI_FAILURE;
7753 7714
7754 7715 /* sd_unprobe is blank now. Free buffer manually */
7755 7716 if (sd->sd_inq) {
7756 7717 kmem_free(sd->sd_inq, SUN_INQSIZE);
7757 7718 sd->sd_inq = (struct scsi_inquiry *)NULL;
7758 7719 }
7759 7720
7760 7721 kmem_free(sd, sizeof (struct scsi_device));
7761 7722 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_config_ld: return rval = %d",
7762 7723 rval));
7763 7724 return (rval);
7764 7725 }
7765 7726
7766 7727 int
7767 7728 mrsas_config_scsi_device(struct mrsas_instance *instance,
7768 7729 struct scsi_device *sd, dev_info_t **dipp)
7769 7730 {
7770 7731 char *nodename = NULL;
7771 7732 char **compatible = NULL;
7772 7733 int ncompatible = 0;
7773 7734 char *childname;
7774 7735 dev_info_t *ldip = NULL;
7775 7736 int tgt = sd->sd_address.a_target;
7776 7737 int lun = sd->sd_address.a_lun;
7777 7738 int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
7778 7739 int rval;
7779 7740
7780 7741 con_log(CL_DLEVEL1, (CE_NOTE, "mr_sas: scsi_device t%dL%d", tgt, lun));
7781 7742 scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype,
7782 7743 NULL, &nodename, &compatible, &ncompatible);
7783 7744
7784 7745 if (nodename == NULL) {
7785 7746 con_log(CL_ANN1, (CE_WARN, "mr_sas: Found no compatible driver "
7786 7747 "for t%dL%d", tgt, lun));
7787 7748 rval = NDI_FAILURE;
7788 7749 goto finish;
7789 7750 }
7790 7751
7791 7752 childname = (dtype == DTYPE_DIRECT) ? "sd" : nodename;
7792 7753 con_log(CL_DLEVEL1, (CE_NOTE,
7793 7754 "mr_sas: Childname = %2s nodename = %s", childname, nodename));
7794 7755
7795 7756 /* Create a dev node */
7796 7757 rval = ndi_devi_alloc(instance->dip, childname, DEVI_SID_NODEID, &ldip);
7797 7758 con_log(CL_DLEVEL1, (CE_NOTE,
7798 7759 "mr_sas_config_scsi_device: ndi_devi_alloc rval = %x", rval));
7799 7760 if (rval == NDI_SUCCESS) {
7800 7761 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) !=
7801 7762 DDI_PROP_SUCCESS) {
7802 7763 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7803 7764 "property for t%dl%d target", tgt, lun));
7804 7765 rval = NDI_FAILURE;
7805 7766 goto finish;
7806 7767 }
7807 7768 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun) !=
7808 7769 DDI_PROP_SUCCESS) {
7809 7770 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7810 7771 "property for t%dl%d lun", tgt, lun));
7811 7772 rval = NDI_FAILURE;
7812 7773 goto finish;
7813 7774 }
7814 7775
7815 7776 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip,
7816 7777 "compatible", compatible, ncompatible) !=
7817 7778 DDI_PROP_SUCCESS) {
7818 7779 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7819 7780 "property for t%dl%d compatible", tgt, lun));
7820 7781 rval = NDI_FAILURE;
7821 7782 goto finish;
7822 7783 }
7823 7784
7824 7785 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH);
7825 7786 if (rval != NDI_SUCCESS) {
7826 7787 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to online "
7827 7788 "t%dl%d", tgt, lun));
7828 7789 ndi_prop_remove_all(ldip);
7829 7790 (void) ndi_devi_free(ldip);
7830 7791 } else {
7831 7792 con_log(CL_ANN1, (CE_CONT, "mr_sas: online Done :"
7832 7793 "0 t%dl%d", tgt, lun));
7833 7794 }
7834 7795
7835 7796 }
7836 7797 finish:
7837 7798 if (dipp) {
7838 7799 *dipp = ldip;
7839 7800 }
7840 7801
7841 7802 con_log(CL_DLEVEL1, (CE_NOTE,
7842 7803 "mr_sas: config_scsi_device rval = %d t%dL%d",
7843 7804 rval, tgt, lun));
7844 7805 scsi_hba_nodename_compatible_free(nodename, compatible);
7845 7806 return (rval);
7846 7807 }
7847 7808
7848 7809 /*ARGSUSED*/
7849 7810 int
7850 7811 mrsas_service_evt(struct mrsas_instance *instance, int tgt, int lun, int event,
7851 7812 uint64_t wwn)
7852 7813 {
7853 7814 struct mrsas_eventinfo *mrevt = NULL;
7854 7815
7855 7816 con_log(CL_ANN1, (CE_NOTE,
7856 7817 "mrsas_service_evt called for t%dl%d event = %d",
7857 7818 tgt, lun, event));
7858 7819
7859 7820 if ((instance->taskq == NULL) || (mrevt =
7860 7821 kmem_zalloc(sizeof (struct mrsas_eventinfo), KM_NOSLEEP)) == NULL) {
7861 7822 return (ENOMEM);
7862 7823 }
7863 7824
7864 7825 mrevt->instance = instance;
7865 7826 mrevt->tgt = tgt;
7866 7827 mrevt->lun = lun;
7867 7828 mrevt->event = event;
7868 7829 mrevt->wwn = wwn;
7869 7830
7870 7831 if ((ddi_taskq_dispatch(instance->taskq,
7871 7832 (void (*)(void *))mrsas_issue_evt_taskq, mrevt, DDI_NOSLEEP)) !=
7872 7833 DDI_SUCCESS) {
7873 7834 con_log(CL_ANN1, (CE_NOTE,
7874 7835 "mr_sas: Event task failed for t%dl%d event = %d",
7875 7836 tgt, lun, event));
7876 7837 kmem_free(mrevt, sizeof (struct mrsas_eventinfo));
7877 7838 return (DDI_FAILURE);
7878 7839 }
7879 7840 DTRACE_PROBE3(service_evt, int, tgt, int, lun, int, event);
7880 7841 return (DDI_SUCCESS);
7881 7842 }
7882 7843
7883 7844 static void
7884 7845 mrsas_issue_evt_taskq(struct mrsas_eventinfo *mrevt)
7885 7846 {
7886 7847 struct mrsas_instance *instance = mrevt->instance;
7887 7848 dev_info_t *dip, *pdip;
7888 7849 int circ1 = 0;
7889 7850 char *devname;
7890 7851
7891 7852 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_evt_taskq: called for"
7892 7853 " tgt %d lun %d event %d",
7893 7854 mrevt->tgt, mrevt->lun, mrevt->event));
7894 7855
7895 7856 if (mrevt->tgt < MRDRV_MAX_LD && mrevt->lun == 0) {
7896 7857 mutex_enter(&instance->config_dev_mtx);
7897 7858 dip = instance->mr_ld_list[mrevt->tgt].dip;
7898 7859 mutex_exit(&instance->config_dev_mtx);
7899 7860 #ifdef PDSUPPORT
7900 7861 } else {
7901 7862 mutex_enter(&instance->config_dev_mtx);
7902 7863 dip = instance->mr_tbolt_pd_list[mrevt->tgt].dip;
7903 7864 mutex_exit(&instance->config_dev_mtx);
7904 7865 #endif
7905 7866 }
7906 7867
7907 7868
7908 7869 ndi_devi_enter(instance->dip, &circ1);
7909 7870 switch (mrevt->event) {
7910 7871 case MRSAS_EVT_CONFIG_TGT:
7911 7872 if (dip == NULL) {
7912 7873
7913 7874 if (mrevt->lun == 0) {
7914 7875 (void) mrsas_config_ld(instance, mrevt->tgt,
7915 7876 0, NULL);
7916 7877 #ifdef PDSUPPORT
7917 7878 } else if (instance->tbolt) {
7918 7879 (void) mrsas_tbolt_config_pd(instance,
7919 7880 mrevt->tgt,
7920 7881 1, NULL);
7921 7882 #endif
7922 7883 }
7923 7884 con_log(CL_ANN1, (CE_NOTE,
7924 7885 "mr_sas: EVT_CONFIG_TGT called:"
7925 7886 " for tgt %d lun %d event %d",
7926 7887 mrevt->tgt, mrevt->lun, mrevt->event));
7927 7888
7928 7889 } else {
7929 7890 con_log(CL_ANN1, (CE_NOTE,
7930 7891 "mr_sas: EVT_CONFIG_TGT dip != NULL:"
7931 7892 " for tgt %d lun %d event %d",
7932 7893 mrevt->tgt, mrevt->lun, mrevt->event));
7933 7894 }
7934 7895 break;
7935 7896 case MRSAS_EVT_UNCONFIG_TGT:
7936 7897 if (dip) {
7937 7898 if (i_ddi_devi_attached(dip)) {
7938 7899
7939 7900 pdip = ddi_get_parent(dip);
7940 7901
7941 7902 devname = kmem_zalloc(MAXNAMELEN + 1, KM_SLEEP);
7942 7903 (void) ddi_deviname(dip, devname);
7943 7904
7944 7905 (void) devfs_clean(pdip, devname + 1,
7945 7906 DV_CLEAN_FORCE);
7946 7907 kmem_free(devname, MAXNAMELEN + 1);
7947 7908 }
7948 7909 (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE);
7949 7910 con_log(CL_ANN1, (CE_NOTE,
7950 7911 "mr_sas: EVT_UNCONFIG_TGT called:"
7951 7912 " for tgt %d lun %d event %d",
7952 7913 mrevt->tgt, mrevt->lun, mrevt->event));
7953 7914 } else {
7954 7915 con_log(CL_ANN1, (CE_NOTE,
7955 7916 "mr_sas: EVT_UNCONFIG_TGT dip == NULL:"
7956 7917 " for tgt %d lun %d event %d",
7957 7918 mrevt->tgt, mrevt->lun, mrevt->event));
7958 7919 }
7959 7920 break;
7960 7921 }
7961 7922 kmem_free(mrevt, sizeof (struct mrsas_eventinfo));
7962 7923 ndi_devi_exit(instance->dip, circ1);
7963 7924 }
7964 7925
7965 7926
7966 7927 int
7967 7928 mrsas_mode_sense_build(struct scsi_pkt *pkt)
7968 7929 {
7969 7930 union scsi_cdb *cdbp;
7970 7931 uint16_t page_code;
7971 7932 struct scsa_cmd *acmd;
7972 7933 struct buf *bp;
7973 7934 struct mode_header *modehdrp;
7974 7935
7975 7936 cdbp = (void *)pkt->pkt_cdbp;
7976 7937 page_code = cdbp->cdb_un.sg.scsi[0];
7977 7938 acmd = PKT2CMD(pkt);
7978 7939 bp = acmd->cmd_buf;
7979 7940 if ((!bp) && bp->b_un.b_addr && bp->b_bcount && acmd->cmd_dmacount) {
7980 7941 con_log(CL_ANN1, (CE_WARN, "Failing MODESENSE Command"));
7981 7942 /* ADD pkt statistics as Command failed. */
7982 7943 return (NULL);
7983 7944 }
7984 7945
7985 7946 bp_mapin(bp);
7986 7947 bzero(bp->b_un.b_addr, bp->b_bcount);
7987 7948
7988 7949 switch (page_code) {
7989 7950 case 0x3: {
7990 7951 struct mode_format *page3p = NULL;
7991 7952 modehdrp = (struct mode_header *)(bp->b_un.b_addr);
7992 7953 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH;
7993 7954
7994 7955 page3p = (void *)((caddr_t)modehdrp +
7995 7956 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
7996 7957 page3p->mode_page.code = 0x3;
7997 7958 page3p->mode_page.length =
7998 7959 (uchar_t)(sizeof (struct mode_format));
7999 7960 page3p->data_bytes_sect = 512;
8000 7961 page3p->sect_track = 63;
8001 7962 break;
8002 7963 }
8003 7964 case 0x4: {
8004 7965 struct mode_geometry *page4p = NULL;
8005 7966 modehdrp = (struct mode_header *)(bp->b_un.b_addr);
8006 7967 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH;
8007 7968
8008 7969 page4p = (void *)((caddr_t)modehdrp +
8009 7970 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
8010 7971 page4p->mode_page.code = 0x4;
8011 7972 page4p->mode_page.length =
8012 7973 (uchar_t)(sizeof (struct mode_geometry));
8013 7974 page4p->heads = 255;
8014 7975 page4p->rpm = 10000;
8015 7976 break;
8016 7977 }
8017 7978 default:
8018 7979 break;
8019 7980 }
8020 7981 return (NULL);
8021 7982 }
|
↓ open down ↓ |
818 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX