Print this page
3500 Support LSI SAS2008 (Falcon) Skinny FW for mr_sas(7D)
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/mr_sas/mr_sas.c
+++ new/usr/src/uts/common/io/mr_sas/mr_sas.c
1 1 /*
2 2 * mr_sas.c: source for mr_sas driver
3 3 *
4 4 * Solaris MegaRAID device driver for SAS2.0 controllers
5 5 * Copyright (c) 2008-2012, LSI Logic Corporation.
6 6 * All rights reserved.
7 7 *
8 8 * Version:
9 9 * Author:
10 10 * Swaminathan K S
11 11 * Arun Chandrashekhar
12 12 * Manju R
13 13 * Rasheed
14 14 * Shakeel Bukhari
15 15 *
16 16 * Redistribution and use in source and binary forms, with or without
17 17 * modification, are permitted provided that the following conditions are met:
18 18 *
19 19 * 1. Redistributions of source code must retain the above copyright notice,
20 20 * this list of conditions and the following disclaimer.
21 21 *
22 22 * 2. Redistributions in binary form must reproduce the above copyright notice,
23 23 * this list of conditions and the following disclaimer in the documentation
24 24 * and/or other materials provided with the distribution.
25 25 *
26 26 * 3. Neither the name of the author nor the names of its contributors may be
27 27 * used to endorse or promote products derived from this software without
28 28 * specific prior written permission.
29 29 *
30 30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
33 33 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
34 34 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
35 35 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
36 36 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
|
↓ open down ↓ |
36 lines elided |
↑ open up ↑ |
37 37 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
38 38 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
39 39 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
40 40 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
41 41 * DAMAGE.
42 42 */
43 43
44 44 /*
45 45 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
46 46 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
47 - * Copyright 2012 Nexenta System, Inc. All rights reserved.
47 + * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
48 48 */
49 49
50 50 #include <sys/types.h>
51 51 #include <sys/param.h>
52 52 #include <sys/file.h>
53 53 #include <sys/errno.h>
54 54 #include <sys/open.h>
55 55 #include <sys/cred.h>
56 56 #include <sys/modctl.h>
57 57 #include <sys/conf.h>
58 58 #include <sys/devops.h>
59 59 #include <sys/cmn_err.h>
60 60 #include <sys/kmem.h>
61 61 #include <sys/stat.h>
62 62 #include <sys/mkdev.h>
63 63 #include <sys/pci.h>
64 64 #include <sys/scsi/scsi.h>
65 65 #include <sys/ddi.h>
66 66 #include <sys/sunddi.h>
67 67 #include <sys/atomic.h>
68 68 #include <sys/signal.h>
69 69 #include <sys/byteorder.h>
70 70 #include <sys/sdt.h>
71 71 #include <sys/fs/dv_node.h> /* devfs_clean */
72 72
|
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
73 73 #include "mr_sas.h"
74 74
75 75 /*
76 76 * FMA header files
77 77 */
78 78 #include <sys/ddifm.h>
79 79 #include <sys/fm/protocol.h>
80 80 #include <sys/fm/util.h>
81 81 #include <sys/fm/io/ddi.h>
82 82
83 +/* Macros to help Skinny and stock 2108/MFI live together. */
84 +#define WR_IB_PICK_QPORT(addr, instance) \
85 + if ((instance)->skinny) { \
86 + WR_IB_LOW_QPORT((addr), (instance)); \
87 + WR_IB_HIGH_QPORT(0, (instance)); \
88 + } else { \
89 + WR_IB_QPORT((addr), (instance)); \
90 + }
91 +
83 92 /*
84 93 * Local static data
85 94 */
86 95 static void *mrsas_state = NULL;
87 96 static volatile boolean_t mrsas_relaxed_ordering = B_TRUE;
88 97 volatile int debug_level_g = CL_NONE;
89 98 static volatile int msi_enable = 1;
90 99 static volatile int ctio_enable = 1;
91 100
92 101 /* Default Timeout value to issue online controller reset */
93 102 volatile int debug_timeout_g = 0xF0; /* 0xB4; */
94 103 /* Simulate consecutive firmware fault */
95 104 static volatile int debug_fw_faults_after_ocr_g = 0;
96 105 #ifdef OCRDEBUG
97 106 /* Simulate three consecutive timeout for an IO */
98 107 static volatile int debug_consecutive_timeout_after_ocr_g = 0;
99 108 #endif
100 109
101 110 #pragma weak scsi_hba_open
102 111 #pragma weak scsi_hba_close
103 112 #pragma weak scsi_hba_ioctl
104 113
105 114 /* Local static prototypes. */
106 115 static int mrsas_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
107 116 static int mrsas_attach(dev_info_t *, ddi_attach_cmd_t);
108 117 #ifdef __sparc
109 118 static int mrsas_reset(dev_info_t *, ddi_reset_cmd_t);
110 119 #else
111 120 static int mrsas_quiesce(dev_info_t *);
112 121 #endif
113 122 static int mrsas_detach(dev_info_t *, ddi_detach_cmd_t);
114 123 static int mrsas_open(dev_t *, int, int, cred_t *);
115 124 static int mrsas_close(dev_t, int, int, cred_t *);
116 125 static int mrsas_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
117 126
118 127 static int mrsas_tran_tgt_init(dev_info_t *, dev_info_t *,
119 128 scsi_hba_tran_t *, struct scsi_device *);
120 129 static struct scsi_pkt *mrsas_tran_init_pkt(struct scsi_address *, register
121 130 struct scsi_pkt *, struct buf *, int, int, int, int,
122 131 int (*)(), caddr_t);
123 132 static int mrsas_tran_start(struct scsi_address *,
124 133 register struct scsi_pkt *);
125 134 static int mrsas_tran_abort(struct scsi_address *, struct scsi_pkt *);
126 135 static int mrsas_tran_reset(struct scsi_address *, int);
127 136 static int mrsas_tran_getcap(struct scsi_address *, char *, int);
|
↓ open down ↓ |
35 lines elided |
↑ open up ↑ |
128 137 static int mrsas_tran_setcap(struct scsi_address *, char *, int, int);
129 138 static void mrsas_tran_destroy_pkt(struct scsi_address *,
130 139 struct scsi_pkt *);
131 140 static void mrsas_tran_dmafree(struct scsi_address *, struct scsi_pkt *);
132 141 static void mrsas_tran_sync_pkt(struct scsi_address *, struct scsi_pkt *);
133 142 static int mrsas_tran_quiesce(dev_info_t *dip);
134 143 static int mrsas_tran_unquiesce(dev_info_t *dip);
135 144 static uint_t mrsas_isr();
136 145 static uint_t mrsas_softintr();
137 146 static void mrsas_undo_resources(dev_info_t *, struct mrsas_instance *);
138 -static struct mrsas_cmd *get_mfi_pkt(struct mrsas_instance *);
139 -static void return_mfi_pkt(struct mrsas_instance *,
140 - struct mrsas_cmd *);
141 147
142 148 static void free_space_for_mfi(struct mrsas_instance *);
143 149 static uint32_t read_fw_status_reg_ppc(struct mrsas_instance *);
144 150 static void issue_cmd_ppc(struct mrsas_cmd *, struct mrsas_instance *);
145 151 static int issue_cmd_in_poll_mode_ppc(struct mrsas_instance *,
146 152 struct mrsas_cmd *);
147 153 static int issue_cmd_in_sync_mode_ppc(struct mrsas_instance *,
148 154 struct mrsas_cmd *);
149 155 static void enable_intr_ppc(struct mrsas_instance *);
150 156 static void disable_intr_ppc(struct mrsas_instance *);
151 157 static int intr_ack_ppc(struct mrsas_instance *);
152 158 static void flush_cache(struct mrsas_instance *instance);
153 159 void display_scsi_inquiry(caddr_t);
154 160 static int start_mfi_aen(struct mrsas_instance *instance);
155 161 static int handle_drv_ioctl(struct mrsas_instance *instance,
156 162 struct mrsas_ioctl *ioctl, int mode);
157 163 static int handle_mfi_ioctl(struct mrsas_instance *instance,
158 164 struct mrsas_ioctl *ioctl, int mode);
159 165 static int handle_mfi_aen(struct mrsas_instance *instance,
160 166 struct mrsas_aen *aen);
161 167 static struct mrsas_cmd *build_cmd(struct mrsas_instance *,
162 168 struct scsi_address *, struct scsi_pkt *, uchar_t *);
163 169 static int alloc_additional_dma_buffer(struct mrsas_instance *);
164 170 static void complete_cmd_in_sync_mode(struct mrsas_instance *,
165 171 struct mrsas_cmd *);
166 172 static int mrsas_kill_adapter(struct mrsas_instance *);
167 173 static int mrsas_issue_init_mfi(struct mrsas_instance *);
168 174 static int mrsas_reset_ppc(struct mrsas_instance *);
169 175 static uint32_t mrsas_initiate_ocr_if_fw_is_faulty(struct mrsas_instance *);
170 176 static int wait_for_outstanding(struct mrsas_instance *instance);
171 177 static int register_mfi_aen(struct mrsas_instance *instance,
172 178 uint32_t seq_num, uint32_t class_locale_word);
173 179 static int issue_mfi_pthru(struct mrsas_instance *instance, struct
174 180 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
175 181 static int issue_mfi_dcmd(struct mrsas_instance *instance, struct
176 182 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
177 183 static int issue_mfi_smp(struct mrsas_instance *instance, struct
178 184 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
179 185 static int issue_mfi_stp(struct mrsas_instance *instance, struct
180 186 mrsas_ioctl *ioctl, struct mrsas_cmd *cmd, int mode);
181 187 static int abort_aen_cmd(struct mrsas_instance *instance,
182 188 struct mrsas_cmd *cmd_to_abort);
183 189
184 190 static void mrsas_rem_intrs(struct mrsas_instance *instance);
185 191 static int mrsas_add_intrs(struct mrsas_instance *instance, int intr_type);
186 192
187 193 static void mrsas_tran_tgt_free(dev_info_t *, dev_info_t *,
188 194 scsi_hba_tran_t *, struct scsi_device *);
189 195 static int mrsas_tran_bus_config(dev_info_t *, uint_t,
190 196 ddi_bus_config_op_t, void *, dev_info_t **);
191 197 static int mrsas_parse_devname(char *, int *, int *);
192 198 static int mrsas_config_all_devices(struct mrsas_instance *);
193 199 static int mrsas_config_ld(struct mrsas_instance *, uint16_t,
194 200 uint8_t, dev_info_t **);
195 201 static int mrsas_name_node(dev_info_t *, char *, int);
196 202 static void mrsas_issue_evt_taskq(struct mrsas_eventinfo *);
197 203 static void free_additional_dma_buffer(struct mrsas_instance *);
198 204 static void io_timeout_checker(void *);
199 205 static void mrsas_fm_init(struct mrsas_instance *);
200 206 static void mrsas_fm_fini(struct mrsas_instance *);
201 207
202 208 static struct mrsas_function_template mrsas_function_template_ppc = {
203 209 .read_fw_status_reg = read_fw_status_reg_ppc,
204 210 .issue_cmd = issue_cmd_ppc,
205 211 .issue_cmd_in_sync_mode = issue_cmd_in_sync_mode_ppc,
206 212 .issue_cmd_in_poll_mode = issue_cmd_in_poll_mode_ppc,
207 213 .enable_intr = enable_intr_ppc,
208 214 .disable_intr = disable_intr_ppc,
209 215 .intr_ack = intr_ack_ppc,
210 216 .init_adapter = mrsas_init_adapter_ppc
211 217 };
212 218
213 219
214 220 static struct mrsas_function_template mrsas_function_template_fusion = {
215 221 .read_fw_status_reg = tbolt_read_fw_status_reg,
216 222 .issue_cmd = tbolt_issue_cmd,
217 223 .issue_cmd_in_sync_mode = tbolt_issue_cmd_in_sync_mode,
218 224 .issue_cmd_in_poll_mode = tbolt_issue_cmd_in_poll_mode,
219 225 .enable_intr = tbolt_enable_intr,
220 226 .disable_intr = tbolt_disable_intr,
221 227 .intr_ack = tbolt_intr_ack,
222 228 .init_adapter = mrsas_init_adapter_tbolt
223 229 };
224 230
225 231
226 232 ddi_dma_attr_t mrsas_generic_dma_attr = {
227 233 DMA_ATTR_V0, /* dma_attr_version */
228 234 0, /* low DMA address range */
229 235 0xFFFFFFFFU, /* high DMA address range */
230 236 0xFFFFFFFFU, /* DMA counter register */
231 237 8, /* DMA address alignment */
232 238 0x07, /* DMA burstsizes */
233 239 1, /* min DMA size */
234 240 0xFFFFFFFFU, /* max DMA size */
235 241 0xFFFFFFFFU, /* segment boundary */
236 242 MRSAS_MAX_SGE_CNT, /* dma_attr_sglen */
237 243 512, /* granularity of device */
238 244 0 /* bus specific DMA flags */
239 245 };
240 246
241 247 int32_t mrsas_max_cap_maxxfer = 0x1000000;
242 248
243 249 /*
244 250 * Fix for: Thunderbolt controller IO timeout when IO write size is 1MEG,
245 251 * Limit size to 256K
246 252 */
247 253 uint32_t mrsas_tbolt_max_cap_maxxfer = (512 * 512);
248 254
249 255 /*
250 256 * cb_ops contains base level routines
251 257 */
252 258 static struct cb_ops mrsas_cb_ops = {
253 259 mrsas_open, /* open */
254 260 mrsas_close, /* close */
255 261 nodev, /* strategy */
256 262 nodev, /* print */
257 263 nodev, /* dump */
258 264 nodev, /* read */
259 265 nodev, /* write */
260 266 mrsas_ioctl, /* ioctl */
261 267 nodev, /* devmap */
262 268 nodev, /* mmap */
263 269 nodev, /* segmap */
264 270 nochpoll, /* poll */
265 271 nodev, /* cb_prop_op */
266 272 0, /* streamtab */
267 273 D_NEW | D_HOTPLUG, /* cb_flag */
268 274 CB_REV, /* cb_rev */
269 275 nodev, /* cb_aread */
270 276 nodev /* cb_awrite */
271 277 };
272 278
273 279 /*
274 280 * dev_ops contains configuration routines
275 281 */
276 282 static struct dev_ops mrsas_ops = {
277 283 DEVO_REV, /* rev, */
278 284 0, /* refcnt */
279 285 mrsas_getinfo, /* getinfo */
280 286 nulldev, /* identify */
281 287 nulldev, /* probe */
282 288 mrsas_attach, /* attach */
283 289 mrsas_detach, /* detach */
284 290 #ifdef __sparc
285 291 mrsas_reset, /* reset */
286 292 #else /* __sparc */
287 293 nodev,
288 294 #endif /* __sparc */
289 295 &mrsas_cb_ops, /* char/block ops */
290 296 NULL, /* bus ops */
291 297 NULL, /* power */
292 298 #ifdef __sparc
293 299 ddi_quiesce_not_needed
294 300 #else /* __sparc */
295 301 mrsas_quiesce /* quiesce */
296 302 #endif /* __sparc */
297 303 };
298 304
299 305 static struct modldrv modldrv = {
300 306 &mod_driverops, /* module type - driver */
301 307 MRSAS_VERSION,
302 308 &mrsas_ops, /* driver ops */
303 309 };
304 310
305 311 static struct modlinkage modlinkage = {
306 312 MODREV_1, /* ml_rev - must be MODREV_1 */
307 313 &modldrv, /* ml_linkage */
308 314 NULL /* end of driver linkage */
309 315 };
310 316
311 317 static struct ddi_device_acc_attr endian_attr = {
312 318 DDI_DEVICE_ATTR_V1,
313 319 DDI_STRUCTURE_LE_ACC,
314 320 DDI_STRICTORDER_ACC,
315 321 DDI_DEFAULT_ACC
316 322 };
317 323
318 324 /* Use the LSI Fast Path for the 2208 (tbolt) commands. */
319 325 unsigned int enable_fp = 1;
320 326
321 327
322 328 /*
323 329 * ************************************************************************** *
324 330 * *
325 331 * common entry points - for loadable kernel modules *
326 332 * *
327 333 * ************************************************************************** *
328 334 */
329 335
330 336 /*
331 337 * _init - initialize a loadable module
332 338 * @void
333 339 *
334 340 * The driver should perform any one-time resource allocation or data
335 341 * initialization during driver loading in _init(). For example, the driver
336 342 * should initialize any mutexes global to the driver in this routine.
337 343 * The driver should not, however, use _init() to allocate or initialize
338 344 * anything that has to do with a particular instance of the device.
339 345 * Per-instance initialization must be done in attach().
340 346 */
341 347 int
342 348 _init(void)
343 349 {
344 350 int ret;
345 351
346 352 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
347 353
348 354 ret = ddi_soft_state_init(&mrsas_state,
349 355 sizeof (struct mrsas_instance), 0);
350 356
351 357 if (ret != DDI_SUCCESS) {
352 358 cmn_err(CE_WARN, "mr_sas: could not init state");
353 359 return (ret);
354 360 }
355 361
356 362 if ((ret = scsi_hba_init(&modlinkage)) != DDI_SUCCESS) {
357 363 cmn_err(CE_WARN, "mr_sas: could not init scsi hba");
358 364 ddi_soft_state_fini(&mrsas_state);
359 365 return (ret);
360 366 }
361 367
362 368 ret = mod_install(&modlinkage);
363 369
364 370 if (ret != DDI_SUCCESS) {
365 371 cmn_err(CE_WARN, "mr_sas: mod_install failed");
366 372 scsi_hba_fini(&modlinkage);
367 373 ddi_soft_state_fini(&mrsas_state);
368 374 }
369 375
370 376 return (ret);
371 377 }
372 378
373 379 /*
374 380 * _info - returns information about a loadable module.
375 381 * @void
376 382 *
377 383 * _info() is called to return module information. This is a typical entry
378 384 * point that does predefined role. It simply calls mod_info().
379 385 */
380 386 int
381 387 _info(struct modinfo *modinfop)
382 388 {
383 389 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
384 390
385 391 return (mod_info(&modlinkage, modinfop));
386 392 }
387 393
388 394 /*
389 395 * _fini - prepare a loadable module for unloading
390 396 * @void
391 397 *
392 398 * In _fini(), the driver should release any resources that were allocated in
393 399 * _init(). The driver must remove itself from the system module list.
394 400 */
395 401 int
396 402 _fini(void)
397 403 {
398 404 int ret;
399 405
400 406 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
401 407
402 408 if ((ret = mod_remove(&modlinkage)) != DDI_SUCCESS) {
403 409 con_log(CL_ANN1,
404 410 (CE_WARN, "_fini: mod_remove() failed, error 0x%X", ret));
405 411 return (ret);
406 412 }
407 413
408 414 scsi_hba_fini(&modlinkage);
409 415 con_log(CL_DLEVEL1, (CE_NOTE, "_fini: scsi_hba_fini() done."));
410 416
411 417 ddi_soft_state_fini(&mrsas_state);
412 418 con_log(CL_DLEVEL1, (CE_NOTE, "_fini: ddi_soft_state_fini() done."));
413 419
414 420 return (ret);
415 421 }
416 422
417 423
418 424 /*
419 425 * ************************************************************************** *
420 426 * *
421 427 * common entry points - for autoconfiguration *
422 428 * *
423 429 * ************************************************************************** *
424 430 */
425 431 /*
426 432 * attach - adds a device to the system as part of initialization
427 433 * @dip:
428 434 * @cmd:
429 435 *
430 436 * The kernel calls a driver's attach() entry point to attach an instance of
431 437 * a device (for MegaRAID, it is instance of a controller) or to resume
432 438 * operation for an instance of a device that has been suspended or has been
433 439 * shut down by the power management framework
434 440 * The attach() entry point typically includes the following types of
435 441 * processing:
436 442 * - allocate a soft-state structure for the device instance (for MegaRAID,
437 443 * controller instance)
438 444 * - initialize per-instance mutexes
439 445 * - initialize condition variables
440 446 * - register the device's interrupts (for MegaRAID, controller's interrupts)
441 447 * - map the registers and memory of the device instance (for MegaRAID,
442 448 * controller instance)
443 449 * - create minor device nodes for the device instance (for MegaRAID,
444 450 * controller instance)
445 451 * - report that the device instance (for MegaRAID, controller instance) has
446 452 * attached
447 453 */
448 454 static int
449 455 mrsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
450 456 {
451 457 int instance_no;
452 458 int nregs;
453 459 int i = 0;
454 460 uint8_t irq;
455 461 uint16_t vendor_id;
456 462 uint16_t device_id;
457 463 uint16_t subsysvid;
458 464 uint16_t subsysid;
459 465 uint16_t command;
460 466 off_t reglength = 0;
461 467 int intr_types = 0;
462 468 char *data;
463 469
464 470 scsi_hba_tran_t *tran;
465 471 ddi_dma_attr_t tran_dma_attr;
466 472 struct mrsas_instance *instance;
467 473
468 474 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
469 475
470 476 /* CONSTCOND */
471 477 ASSERT(NO_COMPETING_THREADS);
472 478
473 479 instance_no = ddi_get_instance(dip);
474 480
475 481 /*
476 482 * check to see whether this device is in a DMA-capable slot.
477 483 */
478 484 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
479 485 cmn_err(CE_WARN,
480 486 "mr_sas%d: Device in slave-only slot, unused",
481 487 instance_no);
482 488 return (DDI_FAILURE);
483 489 }
484 490
485 491 switch (cmd) {
486 492 case DDI_ATTACH:
487 493 /* allocate the soft state for the instance */
488 494 if (ddi_soft_state_zalloc(mrsas_state, instance_no)
489 495 != DDI_SUCCESS) {
490 496 cmn_err(CE_WARN,
491 497 "mr_sas%d: Failed to allocate soft state",
492 498 instance_no);
493 499 return (DDI_FAILURE);
494 500 }
495 501
496 502 instance = (struct mrsas_instance *)ddi_get_soft_state
497 503 (mrsas_state, instance_no);
498 504
499 505 if (instance == NULL) {
500 506 cmn_err(CE_WARN,
501 507 "mr_sas%d: Bad soft state", instance_no);
502 508 ddi_soft_state_free(mrsas_state, instance_no);
503 509 return (DDI_FAILURE);
504 510 }
505 511
506 512 instance->unroll.softs = 1;
507 513
508 514 /* Setup the PCI configuration space handles */
509 515 if (pci_config_setup(dip, &instance->pci_handle) !=
510 516 DDI_SUCCESS) {
511 517 cmn_err(CE_WARN,
512 518 "mr_sas%d: pci config setup failed ",
513 519 instance_no);
514 520
515 521 ddi_soft_state_free(mrsas_state, instance_no);
516 522 return (DDI_FAILURE);
517 523 }
518 524
519 525 if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS) {
520 526 cmn_err(CE_WARN,
521 527 "mr_sas: failed to get registers.");
522 528
523 529 pci_config_teardown(&instance->pci_handle);
524 530 ddi_soft_state_free(mrsas_state, instance_no);
525 531 return (DDI_FAILURE);
526 532 }
527 533
528 534 vendor_id = pci_config_get16(instance->pci_handle,
529 535 PCI_CONF_VENID);
530 536 device_id = pci_config_get16(instance->pci_handle,
531 537 PCI_CONF_DEVID);
532 538
533 539 subsysvid = pci_config_get16(instance->pci_handle,
534 540 PCI_CONF_SUBVENID);
535 541 subsysid = pci_config_get16(instance->pci_handle,
536 542 PCI_CONF_SUBSYSID);
537 543
538 544 pci_config_put16(instance->pci_handle, PCI_CONF_COMM,
539 545 (pci_config_get16(instance->pci_handle,
540 546 PCI_CONF_COMM) | PCI_COMM_ME));
541 547 irq = pci_config_get8(instance->pci_handle,
542 548 PCI_CONF_ILINE);
543 549
544 550 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: "
545 551 "0x%x:0x%x 0x%x:0x%x, irq:%d drv-ver:%s",
546 552 instance_no, vendor_id, device_id, subsysvid,
547 553 subsysid, irq, MRSAS_VERSION));
548 554
549 555 /* enable bus-mastering */
550 556 command = pci_config_get16(instance->pci_handle,
551 557 PCI_CONF_COMM);
552 558
553 559 if (!(command & PCI_COMM_ME)) {
554 560 command |= PCI_COMM_ME;
555 561
556 562 pci_config_put16(instance->pci_handle,
557 563 PCI_CONF_COMM, command);
558 564
559 565 con_log(CL_ANN, (CE_CONT, "mr_sas%d: "
560 566 "enable bus-mastering", instance_no));
561 567 } else {
562 568 con_log(CL_DLEVEL1, (CE_CONT, "mr_sas%d: "
563 569 "bus-mastering already set", instance_no));
564 570 }
565 571
566 572 /* initialize function pointers */
567 573 switch (device_id) {
|
↓ open down ↓ |
417 lines elided |
↑ open up ↑ |
568 574 case PCI_DEVICE_ID_LSI_TBOLT:
569 575 case PCI_DEVICE_ID_LSI_INVADER:
570 576 con_log(CL_ANN, (CE_NOTE,
571 577 "mr_sas: 2208 T.B. device detected"));
572 578
573 579 instance->func_ptr =
574 580 &mrsas_function_template_fusion;
575 581 instance->tbolt = 1;
576 582 break;
577 583
584 + case PCI_DEVICE_ID_LSI_SKINNY:
585 + case PCI_DEVICE_ID_LSI_SKINNY_NEW:
586 + /*
587 + * FALLTHRU to PPC-style functions, but mark this
588 + * instance as Skinny, because the register set is
589 + * slightly different (See WR_IB_PICK_QPORT), and
590 + * certain other features are available to a Skinny
591 + * HBA.
592 + */
593 + instance->skinny = 1;
594 + /* FALLTHRU */
595 +
578 596 case PCI_DEVICE_ID_LSI_2108VDE:
579 597 case PCI_DEVICE_ID_LSI_2108V:
580 598 con_log(CL_ANN, (CE_NOTE,
581 599 "mr_sas: 2108 Liberator device detected"));
582 600
583 601 instance->func_ptr =
584 602 &mrsas_function_template_ppc;
585 603 break;
586 604
587 605 default:
588 606 cmn_err(CE_WARN,
589 607 "mr_sas: Invalid device detected");
590 608
591 609 pci_config_teardown(&instance->pci_handle);
592 610 ddi_soft_state_free(mrsas_state, instance_no);
593 611 return (DDI_FAILURE);
594 612 }
595 613
596 614 instance->baseaddress = pci_config_get32(
597 615 instance->pci_handle, PCI_CONF_BASE0);
598 616 instance->baseaddress &= 0x0fffc;
599 617
600 618 instance->dip = dip;
601 619 instance->vendor_id = vendor_id;
602 620 instance->device_id = device_id;
603 621 instance->subsysvid = subsysvid;
604 622 instance->subsysid = subsysid;
605 623 instance->instance = instance_no;
606 624
607 625 /* Initialize FMA */
608 626 instance->fm_capabilities = ddi_prop_get_int(
609 627 DDI_DEV_T_ANY, instance->dip, DDI_PROP_DONTPASS,
610 628 "fm-capable", DDI_FM_EREPORT_CAPABLE |
611 629 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE
612 630 | DDI_FM_ERRCB_CAPABLE);
613 631
614 632 mrsas_fm_init(instance);
615 633
616 634 /* Setup register map */
617 635 if ((ddi_dev_regsize(instance->dip,
618 636 REGISTER_SET_IO_2108, ®length) != DDI_SUCCESS) ||
619 637 reglength < MINIMUM_MFI_MEM_SZ) {
620 638 goto fail_attach;
621 639 }
622 640 if (reglength > DEFAULT_MFI_MEM_SZ) {
623 641 reglength = DEFAULT_MFI_MEM_SZ;
624 642 con_log(CL_DLEVEL1, (CE_NOTE,
625 643 "mr_sas: register length to map is 0x%lx bytes",
626 644 reglength));
627 645 }
628 646 if (ddi_regs_map_setup(instance->dip,
629 647 REGISTER_SET_IO_2108, &instance->regmap, 0,
630 648 reglength, &endian_attr, &instance->regmap_handle)
631 649 != DDI_SUCCESS) {
632 650 cmn_err(CE_WARN,
633 651 "mr_sas: couldn't map control registers");
634 652 goto fail_attach;
635 653 }
636 654
637 655 instance->unroll.regs = 1;
638 656
639 657 /*
640 658 * Disable Interrupt Now.
641 659 * Setup Software interrupt
642 660 */
643 661 instance->func_ptr->disable_intr(instance);
644 662
645 663 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
646 664 "mrsas-enable-msi", &data) == DDI_SUCCESS) {
647 665 if (strncmp(data, "no", 3) == 0) {
648 666 msi_enable = 0;
649 667 con_log(CL_ANN1, (CE_WARN,
650 668 "msi_enable = %d disabled", msi_enable));
651 669 }
652 670 ddi_prop_free(data);
653 671 }
654 672
655 673 con_log(CL_DLEVEL1, (CE_NOTE, "msi_enable = %d", msi_enable));
656 674
657 675 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
658 676 "mrsas-enable-fp", &data) == DDI_SUCCESS) {
659 677 if (strncmp(data, "no", 3) == 0) {
660 678 enable_fp = 0;
661 679 cmn_err(CE_NOTE,
662 680 "enable_fp = %d, Fast-Path disabled.\n",
663 681 enable_fp);
664 682 }
665 683
666 684 ddi_prop_free(data);
667 685 }
668 686
669 687 con_log(CL_DLEVEL1, (CE_NOTE, "enable_fp = %d\n", enable_fp));
670 688
671 689 /* Check for all supported interrupt types */
672 690 if (ddi_intr_get_supported_types(
673 691 dip, &intr_types) != DDI_SUCCESS) {
674 692 cmn_err(CE_WARN,
675 693 "ddi_intr_get_supported_types() failed");
676 694 goto fail_attach;
677 695 }
678 696
679 697 con_log(CL_DLEVEL1, (CE_NOTE,
680 698 "ddi_intr_get_supported_types() ret: 0x%x", intr_types));
681 699
682 700 /* Initialize and Setup Interrupt handler */
683 701 if (msi_enable && (intr_types & DDI_INTR_TYPE_MSIX)) {
684 702 if (mrsas_add_intrs(instance, DDI_INTR_TYPE_MSIX) !=
685 703 DDI_SUCCESS) {
686 704 cmn_err(CE_WARN,
687 705 "MSIX interrupt query failed");
688 706 goto fail_attach;
689 707 }
690 708 instance->intr_type = DDI_INTR_TYPE_MSIX;
691 709 } else if (msi_enable && (intr_types & DDI_INTR_TYPE_MSI)) {
692 710 if (mrsas_add_intrs(instance, DDI_INTR_TYPE_MSI) !=
693 711 DDI_SUCCESS) {
694 712 cmn_err(CE_WARN,
695 713 "MSI interrupt query failed");
696 714 goto fail_attach;
697 715 }
698 716 instance->intr_type = DDI_INTR_TYPE_MSI;
699 717 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
700 718 msi_enable = 0;
701 719 if (mrsas_add_intrs(instance, DDI_INTR_TYPE_FIXED) !=
702 720 DDI_SUCCESS) {
703 721 cmn_err(CE_WARN,
704 722 "FIXED interrupt query failed");
705 723 goto fail_attach;
706 724 }
707 725 instance->intr_type = DDI_INTR_TYPE_FIXED;
708 726 } else {
709 727 cmn_err(CE_WARN, "Device cannot "
710 728 "suppport either FIXED or MSI/X "
711 729 "interrupts");
712 730 goto fail_attach;
713 731 }
714 732
715 733 instance->unroll.intr = 1;
716 734
717 735 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
718 736 "mrsas-enable-ctio", &data) == DDI_SUCCESS) {
719 737 if (strncmp(data, "no", 3) == 0) {
720 738 ctio_enable = 0;
721 739 con_log(CL_ANN1, (CE_WARN,
722 740 "ctio_enable = %d disabled", ctio_enable));
723 741 }
724 742 ddi_prop_free(data);
725 743 }
726 744
727 745 con_log(CL_DLEVEL1, (CE_WARN, "ctio_enable = %d", ctio_enable));
728 746
729 747 /* setup the mfi based low level driver */
730 748 if (mrsas_init_adapter(instance) != DDI_SUCCESS) {
731 749 cmn_err(CE_WARN, "mr_sas: "
732 750 "could not initialize the low level driver");
733 751
734 752 goto fail_attach;
735 753 }
736 754
737 755 /* Initialize all Mutex */
738 756 INIT_LIST_HEAD(&instance->completed_pool_list);
739 757 mutex_init(&instance->completed_pool_mtx, NULL,
740 758 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
741 759
742 760 mutex_init(&instance->sync_map_mtx, NULL,
743 761 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
744 762
745 763 mutex_init(&instance->app_cmd_pool_mtx, NULL,
746 764 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
747 765
748 766 mutex_init(&instance->config_dev_mtx, NULL,
749 767 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
750 768
751 769 mutex_init(&instance->cmd_pend_mtx, NULL,
752 770 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
753 771
754 772 mutex_init(&instance->ocr_flags_mtx, NULL,
755 773 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
756 774
757 775 mutex_init(&instance->int_cmd_mtx, NULL,
758 776 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
759 777 cv_init(&instance->int_cmd_cv, NULL, CV_DRIVER, NULL);
760 778
761 779 mutex_init(&instance->cmd_pool_mtx, NULL,
762 780 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
763 781
764 782 mutex_init(&instance->reg_write_mtx, NULL,
765 783 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
766 784
767 785 if (instance->tbolt) {
768 786 mutex_init(&instance->cmd_app_pool_mtx, NULL,
769 787 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
770 788
771 789 mutex_init(&instance->chip_mtx, NULL,
772 790 MUTEX_DRIVER, DDI_INTR_PRI(instance->intr_pri));
773 791
774 792 }
775 793
776 794 instance->unroll.mutexs = 1;
777 795
778 796 instance->timeout_id = (timeout_id_t)-1;
779 797
780 798 /* Register our soft-isr for highlevel interrupts. */
781 799 instance->isr_level = instance->intr_pri;
782 800 if (!(instance->tbolt)) {
783 801 if (instance->isr_level == HIGH_LEVEL_INTR) {
784 802 if (ddi_add_softintr(dip,
785 803 DDI_SOFTINT_HIGH,
786 804 &instance->soft_intr_id, NULL, NULL,
787 805 mrsas_softintr, (caddr_t)instance) !=
788 806 DDI_SUCCESS) {
789 807 cmn_err(CE_WARN,
790 808 "Software ISR did not register");
791 809
792 810 goto fail_attach;
793 811 }
794 812
795 813 instance->unroll.soft_isr = 1;
796 814
797 815 }
798 816 }
799 817
800 818 instance->softint_running = 0;
801 819
802 820 /* Allocate a transport structure */
803 821 tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
804 822
805 823 if (tran == NULL) {
806 824 cmn_err(CE_WARN,
807 825 "scsi_hba_tran_alloc failed");
|
↓ open down ↓ |
220 lines elided |
↑ open up ↑ |
808 826 goto fail_attach;
809 827 }
810 828
811 829 instance->tran = tran;
812 830 instance->unroll.tran = 1;
813 831
814 832 tran->tran_hba_private = instance;
815 833 tran->tran_tgt_init = mrsas_tran_tgt_init;
816 834 tran->tran_tgt_probe = scsi_hba_probe;
817 835 tran->tran_tgt_free = mrsas_tran_tgt_free;
818 - if (instance->tbolt) {
819 - tran->tran_init_pkt =
820 - mrsas_tbolt_tran_init_pkt;
821 - tran->tran_start =
822 - mrsas_tbolt_tran_start;
823 - } else {
824 - tran->tran_init_pkt = mrsas_tran_init_pkt;
825 - tran->tran_start = mrsas_tran_start;
826 - }
836 + tran->tran_init_pkt = mrsas_tran_init_pkt;
837 + if (instance->tbolt)
838 + tran->tran_start = mrsas_tbolt_tran_start;
839 + else
840 + tran->tran_start = mrsas_tran_start;
827 841 tran->tran_abort = mrsas_tran_abort;
828 842 tran->tran_reset = mrsas_tran_reset;
829 843 tran->tran_getcap = mrsas_tran_getcap;
830 844 tran->tran_setcap = mrsas_tran_setcap;
831 845 tran->tran_destroy_pkt = mrsas_tran_destroy_pkt;
832 846 tran->tran_dmafree = mrsas_tran_dmafree;
833 847 tran->tran_sync_pkt = mrsas_tran_sync_pkt;
834 848 tran->tran_quiesce = mrsas_tran_quiesce;
835 849 tran->tran_unquiesce = mrsas_tran_unquiesce;
836 850 tran->tran_bus_config = mrsas_tran_bus_config;
837 851
838 852 if (mrsas_relaxed_ordering)
839 853 mrsas_generic_dma_attr.dma_attr_flags |=
840 854 DDI_DMA_RELAXED_ORDERING;
841 855
842 856
843 857 tran_dma_attr = mrsas_generic_dma_attr;
844 858 tran_dma_attr.dma_attr_sgllen = instance->max_num_sge;
845 859
846 860 /* Attach this instance of the hba */
847 861 if (scsi_hba_attach_setup(dip, &tran_dma_attr, tran, 0)
848 862 != DDI_SUCCESS) {
849 863 cmn_err(CE_WARN,
850 864 "scsi_hba_attach failed");
851 865
852 866 goto fail_attach;
853 867 }
854 868 instance->unroll.tranSetup = 1;
855 869 con_log(CL_ANN1,
856 870 (CE_CONT, "scsi_hba_attach_setup() done."));
857 871
858 872 /* create devctl node for cfgadm command */
859 873 if (ddi_create_minor_node(dip, "devctl",
860 874 S_IFCHR, INST2DEVCTL(instance_no),
861 875 DDI_NT_SCSI_NEXUS, 0) == DDI_FAILURE) {
862 876 cmn_err(CE_WARN,
863 877 "mr_sas: failed to create devctl node.");
864 878
865 879 goto fail_attach;
866 880 }
867 881
868 882 instance->unroll.devctl = 1;
869 883
870 884 /* create scsi node for cfgadm command */
871 885 if (ddi_create_minor_node(dip, "scsi", S_IFCHR,
872 886 INST2SCSI(instance_no), DDI_NT_SCSI_ATTACHMENT_POINT, 0) ==
873 887 DDI_FAILURE) {
874 888 cmn_err(CE_WARN,
875 889 "mr_sas: failed to create scsi node.");
876 890
877 891 goto fail_attach;
878 892 }
879 893
880 894 instance->unroll.scsictl = 1;
881 895
882 896 (void) sprintf(instance->iocnode, "%d:lsirdctl",
883 897 instance_no);
884 898
885 899 /*
886 900 * Create a node for applications
887 901 * for issuing ioctl to the driver.
888 902 */
889 903 if (ddi_create_minor_node(dip, instance->iocnode,
890 904 S_IFCHR, INST2LSIRDCTL(instance_no), DDI_PSEUDO, 0) ==
891 905 DDI_FAILURE) {
892 906 cmn_err(CE_WARN,
893 907 "mr_sas: failed to create ioctl node.");
894 908
895 909 goto fail_attach;
896 910 }
897 911
898 912 instance->unroll.ioctl = 1;
899 913
900 914 /* Create a taskq to handle dr events */
901 915 if ((instance->taskq = ddi_taskq_create(dip,
902 916 "mrsas_dr_taskq", 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
903 917 cmn_err(CE_WARN,
904 918 "mr_sas: failed to create taskq ");
905 919 instance->taskq = NULL;
906 920 goto fail_attach;
907 921 }
908 922 instance->unroll.taskq = 1;
909 923 con_log(CL_ANN1, (CE_CONT, "ddi_taskq_create() done."));
910 924
911 925 /* enable interrupt */
912 926 instance->func_ptr->enable_intr(instance);
913 927
914 928 /* initiate AEN */
915 929 if (start_mfi_aen(instance)) {
916 930 cmn_err(CE_WARN,
917 931 "mr_sas: failed to initiate AEN.");
918 932 goto fail_attach;
919 933 }
920 934 instance->unroll.aenPend = 1;
921 935 con_log(CL_ANN1,
922 936 (CE_CONT, "AEN started for instance %d.", instance_no));
923 937
924 938 /* Finally! We are on the air. */
925 939 ddi_report_dev(dip);
926 940
927 941 /* FMA handle checking. */
928 942 if (mrsas_check_acc_handle(instance->regmap_handle) !=
929 943 DDI_SUCCESS) {
930 944 goto fail_attach;
931 945 }
932 946 if (mrsas_check_acc_handle(instance->pci_handle) !=
|
↓ open down ↓ |
96 lines elided |
↑ open up ↑ |
933 947 DDI_SUCCESS) {
934 948 goto fail_attach;
935 949 }
936 950
937 951 instance->mr_ld_list =
938 952 kmem_zalloc(MRDRV_MAX_LD * sizeof (struct mrsas_ld),
939 953 KM_SLEEP);
940 954 instance->unroll.ldlist_buff = 1;
941 955
942 956 #ifdef PDSUPPORT
943 - if (instance->tbolt) {
957 + if (instance->tbolt || instance->skinny) {
944 958 instance->mr_tbolt_pd_max = MRSAS_TBOLT_PD_TGT_MAX;
945 959 instance->mr_tbolt_pd_list =
946 960 kmem_zalloc(MRSAS_TBOLT_GET_PD_MAX(instance) *
947 961 sizeof (struct mrsas_tbolt_pd), KM_SLEEP);
948 962 ASSERT(instance->mr_tbolt_pd_list);
949 963 for (i = 0; i < instance->mr_tbolt_pd_max; i++) {
950 964 instance->mr_tbolt_pd_list[i].lun_type =
951 965 MRSAS_TBOLT_PD_LUN;
952 966 instance->mr_tbolt_pd_list[i].dev_id =
953 967 (uint8_t)i;
954 968 }
955 969
956 970 instance->unroll.pdlist_buff = 1;
957 971 }
958 972 #endif
959 973 break;
960 974 case DDI_PM_RESUME:
961 975 con_log(CL_ANN, (CE_NOTE, "mr_sas: DDI_PM_RESUME"));
962 976 break;
963 977 case DDI_RESUME:
964 978 con_log(CL_ANN, (CE_NOTE, "mr_sas: DDI_RESUME"));
965 979 break;
966 980 default:
967 981 con_log(CL_ANN,
968 982 (CE_WARN, "mr_sas: invalid attach cmd=%x", cmd));
969 983 return (DDI_FAILURE);
970 984 }
971 985
972 986
973 987 con_log(CL_DLEVEL1,
974 988 (CE_NOTE, "mrsas_attach() return SUCCESS instance_num %d",
975 989 instance_no));
976 990 return (DDI_SUCCESS);
977 991
978 992 fail_attach:
979 993
980 994 mrsas_undo_resources(dip, instance);
981 995
982 996 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
983 997 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
984 998
985 999 mrsas_fm_fini(instance);
986 1000
987 1001 pci_config_teardown(&instance->pci_handle);
988 1002 ddi_soft_state_free(mrsas_state, instance_no);
989 1003
990 1004 con_log(CL_ANN, (CE_WARN, "mr_sas: return failure from mrsas_attach"));
991 1005
992 1006 cmn_err(CE_WARN, "mrsas_attach() return FAILURE instance_num %d",
993 1007 instance_no);
994 1008
995 1009 return (DDI_FAILURE);
996 1010 }
997 1011
998 1012 /*
999 1013 * getinfo - gets device information
1000 1014 * @dip:
1001 1015 * @cmd:
1002 1016 * @arg:
1003 1017 * @resultp:
1004 1018 *
1005 1019 * The system calls getinfo() to obtain configuration information that only
1006 1020 * the driver knows. The mapping of minor numbers to device instance is
1007 1021 * entirely under the control of the driver. The system sometimes needs to ask
1008 1022 * the driver which device a particular dev_t represents.
1009 1023 * Given the device number return the devinfo pointer from the scsi_device
1010 1024 * structure.
1011 1025 */
1012 1026 /*ARGSUSED*/
1013 1027 static int
1014 1028 mrsas_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
1015 1029 {
1016 1030 int rval;
1017 1031 int mrsas_minor = getminor((dev_t)arg);
1018 1032
1019 1033 struct mrsas_instance *instance;
1020 1034
1021 1035 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1022 1036
1023 1037 switch (cmd) {
1024 1038 case DDI_INFO_DEVT2DEVINFO:
1025 1039 instance = (struct mrsas_instance *)
1026 1040 ddi_get_soft_state(mrsas_state,
1027 1041 MINOR2INST(mrsas_minor));
1028 1042
1029 1043 if (instance == NULL) {
1030 1044 *resultp = NULL;
1031 1045 rval = DDI_FAILURE;
1032 1046 } else {
1033 1047 *resultp = instance->dip;
1034 1048 rval = DDI_SUCCESS;
1035 1049 }
1036 1050 break;
1037 1051 case DDI_INFO_DEVT2INSTANCE:
1038 1052 *resultp = (void *)(intptr_t)
1039 1053 (MINOR2INST(getminor((dev_t)arg)));
1040 1054 rval = DDI_SUCCESS;
1041 1055 break;
1042 1056 default:
1043 1057 *resultp = NULL;
1044 1058 rval = DDI_FAILURE;
1045 1059 }
1046 1060
1047 1061 return (rval);
1048 1062 }
1049 1063
1050 1064 /*
1051 1065 * detach - detaches a device from the system
1052 1066 * @dip: pointer to the device's dev_info structure
1053 1067 * @cmd: type of detach
1054 1068 *
1055 1069 * A driver's detach() entry point is called to detach an instance of a device
1056 1070 * that is bound to the driver. The entry point is called with the instance of
1057 1071 * the device node to be detached and with DDI_DETACH, which is specified as
1058 1072 * the cmd argument to the entry point.
1059 1073 * This routine is called during driver unload. We free all the allocated
1060 1074 * resources and call the corresponding LLD so that it can also release all
1061 1075 * its resources.
1062 1076 */
1063 1077 static int
1064 1078 mrsas_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1065 1079 {
1066 1080 int instance_no;
1067 1081
1068 1082 struct mrsas_instance *instance;
1069 1083
1070 1084 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1071 1085
1072 1086
1073 1087 /* CONSTCOND */
1074 1088 ASSERT(NO_COMPETING_THREADS);
1075 1089
1076 1090 instance_no = ddi_get_instance(dip);
1077 1091
1078 1092 instance = (struct mrsas_instance *)ddi_get_soft_state(mrsas_state,
1079 1093 instance_no);
1080 1094
1081 1095 if (!instance) {
1082 1096 cmn_err(CE_WARN,
1083 1097 "mr_sas:%d could not get instance in detach",
1084 1098 instance_no);
1085 1099
1086 1100 return (DDI_FAILURE);
1087 1101 }
1088 1102
1089 1103 con_log(CL_ANN, (CE_NOTE,
1090 1104 "mr_sas%d: detaching device 0x%4x:0x%4x:0x%4x:0x%4x",
1091 1105 instance_no, instance->vendor_id, instance->device_id,
1092 1106 instance->subsysvid, instance->subsysid));
1093 1107
1094 1108 switch (cmd) {
1095 1109 case DDI_DETACH:
1096 1110 con_log(CL_ANN, (CE_NOTE,
1097 1111 "mrsas_detach: DDI_DETACH"));
1098 1112
1099 1113 mutex_enter(&instance->config_dev_mtx);
1100 1114 if (instance->timeout_id != (timeout_id_t)-1) {
1101 1115 mutex_exit(&instance->config_dev_mtx);
1102 1116 (void) untimeout(instance->timeout_id);
1103 1117 instance->timeout_id = (timeout_id_t)-1;
1104 1118 mutex_enter(&instance->config_dev_mtx);
1105 1119 instance->unroll.timer = 0;
1106 1120 }
1107 1121 mutex_exit(&instance->config_dev_mtx);
1108 1122
1109 1123 if (instance->unroll.tranSetup == 1) {
1110 1124 if (scsi_hba_detach(dip) != DDI_SUCCESS) {
1111 1125 cmn_err(CE_WARN,
1112 1126 "mr_sas2%d: failed to detach",
1113 1127 instance_no);
1114 1128 return (DDI_FAILURE);
1115 1129 }
1116 1130 instance->unroll.tranSetup = 0;
1117 1131 con_log(CL_ANN1,
1118 1132 (CE_CONT, "scsi_hba_dettach() done."));
1119 1133 }
1120 1134
1121 1135 flush_cache(instance);
1122 1136
1123 1137 mrsas_undo_resources(dip, instance);
1124 1138
1125 1139 mrsas_fm_fini(instance);
1126 1140
1127 1141 pci_config_teardown(&instance->pci_handle);
1128 1142 ddi_soft_state_free(mrsas_state, instance_no);
1129 1143 break;
1130 1144
1131 1145 case DDI_PM_SUSPEND:
1132 1146 con_log(CL_ANN, (CE_NOTE,
1133 1147 "mrsas_detach: DDI_PM_SUSPEND"));
1134 1148
1135 1149 break;
1136 1150 case DDI_SUSPEND:
1137 1151 con_log(CL_ANN, (CE_NOTE,
1138 1152 "mrsas_detach: DDI_SUSPEND"));
1139 1153
1140 1154 break;
1141 1155 default:
1142 1156 con_log(CL_ANN, (CE_WARN,
1143 1157 "invalid detach command:0x%x", cmd));
1144 1158 return (DDI_FAILURE);
1145 1159 }
1146 1160
1147 1161 return (DDI_SUCCESS);
1148 1162 }
1149 1163
1150 1164
1151 1165 static void
1152 1166 mrsas_undo_resources(dev_info_t *dip, struct mrsas_instance *instance)
1153 1167 {
1154 1168 int instance_no;
1155 1169
1156 1170 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1157 1171
1158 1172
1159 1173 instance_no = ddi_get_instance(dip);
1160 1174
1161 1175
1162 1176 if (instance->unroll.ioctl == 1) {
1163 1177 ddi_remove_minor_node(dip, instance->iocnode);
1164 1178 instance->unroll.ioctl = 0;
1165 1179 }
1166 1180
1167 1181 if (instance->unroll.scsictl == 1) {
1168 1182 ddi_remove_minor_node(dip, "scsi");
1169 1183 instance->unroll.scsictl = 0;
1170 1184 }
1171 1185
1172 1186 if (instance->unroll.devctl == 1) {
1173 1187 ddi_remove_minor_node(dip, "devctl");
1174 1188 instance->unroll.devctl = 0;
1175 1189 }
1176 1190
1177 1191 if (instance->unroll.tranSetup == 1) {
1178 1192 if (scsi_hba_detach(dip) != DDI_SUCCESS) {
1179 1193 cmn_err(CE_WARN,
1180 1194 "mr_sas2%d: failed to detach", instance_no);
1181 1195 return; /* DDI_FAILURE */
1182 1196 }
1183 1197 instance->unroll.tranSetup = 0;
1184 1198 con_log(CL_ANN1, (CE_CONT, "scsi_hba_dettach() done."));
1185 1199 }
1186 1200
1187 1201 if (instance->unroll.tran == 1) {
1188 1202 scsi_hba_tran_free(instance->tran);
1189 1203 instance->unroll.tran = 0;
1190 1204 con_log(CL_ANN1, (CE_CONT, "scsi_hba_tran_free() done."));
1191 1205 }
1192 1206
1193 1207 if (instance->unroll.syncCmd == 1) {
1194 1208 if (instance->tbolt) {
1195 1209 if (abort_syncmap_cmd(instance,
1196 1210 instance->map_update_cmd)) {
1197 1211 cmn_err(CE_WARN, "mrsas_detach: "
1198 1212 "failed to abort previous syncmap command");
1199 1213 }
1200 1214
1201 1215 instance->unroll.syncCmd = 0;
1202 1216 con_log(CL_ANN1, (CE_CONT, "sync cmd aborted, done."));
1203 1217 }
1204 1218 }
1205 1219
1206 1220 if (instance->unroll.aenPend == 1) {
1207 1221 if (abort_aen_cmd(instance, instance->aen_cmd))
1208 1222 cmn_err(CE_WARN, "mrsas_detach: "
1209 1223 "failed to abort prevous AEN command");
1210 1224
1211 1225 instance->unroll.aenPend = 0;
1212 1226 con_log(CL_ANN1, (CE_CONT, "aen cmd aborted, done."));
1213 1227 /* This means the controller is fully initialized and running */
1214 1228 /* Shutdown should be a last command to controller. */
1215 1229 /* shutdown_controller(); */
1216 1230 }
1217 1231
1218 1232
1219 1233 if (instance->unroll.timer == 1) {
1220 1234 if (instance->timeout_id != (timeout_id_t)-1) {
1221 1235 (void) untimeout(instance->timeout_id);
1222 1236 instance->timeout_id = (timeout_id_t)-1;
1223 1237
1224 1238 instance->unroll.timer = 0;
1225 1239 }
1226 1240 }
1227 1241
1228 1242 instance->func_ptr->disable_intr(instance);
1229 1243
1230 1244
1231 1245 if (instance->unroll.mutexs == 1) {
1232 1246 mutex_destroy(&instance->cmd_pool_mtx);
1233 1247 mutex_destroy(&instance->app_cmd_pool_mtx);
1234 1248 mutex_destroy(&instance->cmd_pend_mtx);
1235 1249 mutex_destroy(&instance->completed_pool_mtx);
1236 1250 mutex_destroy(&instance->sync_map_mtx);
1237 1251 mutex_destroy(&instance->int_cmd_mtx);
1238 1252 cv_destroy(&instance->int_cmd_cv);
1239 1253 mutex_destroy(&instance->config_dev_mtx);
1240 1254 mutex_destroy(&instance->ocr_flags_mtx);
1241 1255 mutex_destroy(&instance->reg_write_mtx);
1242 1256
1243 1257 if (instance->tbolt) {
1244 1258 mutex_destroy(&instance->cmd_app_pool_mtx);
1245 1259 mutex_destroy(&instance->chip_mtx);
1246 1260 }
1247 1261
1248 1262 instance->unroll.mutexs = 0;
1249 1263 con_log(CL_ANN1, (CE_CONT, "Destroy mutex & cv, done."));
1250 1264 }
1251 1265
1252 1266
1253 1267 if (instance->unroll.soft_isr == 1) {
1254 1268 ddi_remove_softintr(instance->soft_intr_id);
1255 1269 instance->unroll.soft_isr = 0;
1256 1270 }
1257 1271
1258 1272 if (instance->unroll.intr == 1) {
1259 1273 mrsas_rem_intrs(instance);
1260 1274 instance->unroll.intr = 0;
1261 1275 }
1262 1276
1263 1277
1264 1278 if (instance->unroll.taskq == 1) {
1265 1279 if (instance->taskq) {
1266 1280 ddi_taskq_destroy(instance->taskq);
1267 1281 instance->unroll.taskq = 0;
1268 1282 }
1269 1283
1270 1284 }
1271 1285
1272 1286 /*
1273 1287 * free dma memory allocated for
1274 1288 * cmds/frames/queues/driver version etc
1275 1289 */
1276 1290 if (instance->unroll.verBuff == 1) {
1277 1291 (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1278 1292 instance->unroll.verBuff = 0;
1279 1293 }
1280 1294
1281 1295 if (instance->unroll.pdlist_buff == 1) {
1282 1296 if (instance->mr_tbolt_pd_list != NULL) {
1283 1297 kmem_free(instance->mr_tbolt_pd_list,
1284 1298 MRSAS_TBOLT_GET_PD_MAX(instance) *
1285 1299 sizeof (struct mrsas_tbolt_pd));
1286 1300 }
1287 1301
1288 1302 instance->mr_tbolt_pd_list = NULL;
1289 1303 instance->unroll.pdlist_buff = 0;
1290 1304 }
1291 1305
1292 1306 if (instance->unroll.ldlist_buff == 1) {
1293 1307 if (instance->mr_ld_list != NULL) {
1294 1308 kmem_free(instance->mr_ld_list, MRDRV_MAX_LD
1295 1309 * sizeof (struct mrsas_ld));
1296 1310 }
1297 1311
1298 1312 instance->mr_ld_list = NULL;
1299 1313 instance->unroll.ldlist_buff = 0;
1300 1314 }
1301 1315
1302 1316 if (instance->tbolt) {
1303 1317 if (instance->unroll.alloc_space_mpi2 == 1) {
1304 1318 free_space_for_mpi2(instance);
1305 1319 instance->unroll.alloc_space_mpi2 = 0;
1306 1320 }
1307 1321 } else {
1308 1322 if (instance->unroll.alloc_space_mfi == 1) {
1309 1323 free_space_for_mfi(instance);
1310 1324 instance->unroll.alloc_space_mfi = 0;
1311 1325 }
1312 1326 }
1313 1327
1314 1328 if (instance->unroll.regs == 1) {
1315 1329 ddi_regs_map_free(&instance->regmap_handle);
1316 1330 instance->unroll.regs = 0;
1317 1331 con_log(CL_ANN1, (CE_CONT, "ddi_regs_map_free() done."));
1318 1332 }
1319 1333 }
1320 1334
1321 1335
1322 1336
1323 1337 /*
1324 1338 * ************************************************************************** *
1325 1339 * *
1326 1340 * common entry points - for character driver types *
1327 1341 * *
1328 1342 * ************************************************************************** *
1329 1343 */
1330 1344 /*
1331 1345 * open - gets access to a device
1332 1346 * @dev:
1333 1347 * @openflags:
1334 1348 * @otyp:
1335 1349 * @credp:
1336 1350 *
1337 1351 * Access to a device by one or more application programs is controlled
1338 1352 * through the open() and close() entry points. The primary function of
1339 1353 * open() is to verify that the open request is allowed.
1340 1354 */
1341 1355 static int
1342 1356 mrsas_open(dev_t *dev, int openflags, int otyp, cred_t *credp)
1343 1357 {
1344 1358 int rval = 0;
1345 1359
1346 1360 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1347 1361
1348 1362 /* Check root permissions */
1349 1363 if (drv_priv(credp) != 0) {
1350 1364 con_log(CL_ANN, (CE_WARN,
1351 1365 "mr_sas: Non-root ioctl access denied!"));
1352 1366 return (EPERM);
1353 1367 }
1354 1368
1355 1369 /* Verify we are being opened as a character device */
1356 1370 if (otyp != OTYP_CHR) {
1357 1371 con_log(CL_ANN, (CE_WARN,
1358 1372 "mr_sas: ioctl node must be a char node"));
1359 1373 return (EINVAL);
1360 1374 }
1361 1375
1362 1376 if (ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(*dev)))
1363 1377 == NULL) {
1364 1378 return (ENXIO);
1365 1379 }
1366 1380
1367 1381 if (scsi_hba_open) {
1368 1382 rval = scsi_hba_open(dev, openflags, otyp, credp);
1369 1383 }
1370 1384
1371 1385 return (rval);
1372 1386 }
1373 1387
1374 1388 /*
1375 1389 * close - gives up access to a device
1376 1390 * @dev:
1377 1391 * @openflags:
1378 1392 * @otyp:
1379 1393 * @credp:
1380 1394 *
1381 1395 * close() should perform any cleanup necessary to finish using the minor
1382 1396 * device, and prepare the device (and driver) to be opened again.
1383 1397 */
1384 1398 static int
1385 1399 mrsas_close(dev_t dev, int openflags, int otyp, cred_t *credp)
1386 1400 {
1387 1401 int rval = 0;
1388 1402
1389 1403 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1390 1404
1391 1405 /* no need for locks! */
1392 1406
1393 1407 if (scsi_hba_close) {
1394 1408 rval = scsi_hba_close(dev, openflags, otyp, credp);
1395 1409 }
1396 1410
1397 1411 return (rval);
1398 1412 }
1399 1413
1400 1414 /*
1401 1415 * ioctl - performs a range of I/O commands for character drivers
1402 1416 * @dev:
1403 1417 * @cmd:
1404 1418 * @arg:
1405 1419 * @mode:
1406 1420 * @credp:
1407 1421 * @rvalp:
1408 1422 *
1409 1423 * ioctl() routine must make sure that user data is copied into or out of the
1410 1424 * kernel address space explicitly using copyin(), copyout(), ddi_copyin(),
1411 1425 * and ddi_copyout(), as appropriate.
1412 1426 * This is a wrapper routine to serialize access to the actual ioctl routine.
1413 1427 * ioctl() should return 0 on success, or the appropriate error number. The
1414 1428 * driver may also set the value returned to the calling process through rvalp.
1415 1429 */
1416 1430
1417 1431 static int
1418 1432 mrsas_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
1419 1433 int *rvalp)
1420 1434 {
1421 1435 int rval = 0;
1422 1436
1423 1437 struct mrsas_instance *instance;
1424 1438 struct mrsas_ioctl *ioctl;
1425 1439 struct mrsas_aen aen;
1426 1440 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1427 1441
1428 1442 instance = ddi_get_soft_state(mrsas_state, MINOR2INST(getminor(dev)));
1429 1443
1430 1444 if (instance == NULL) {
1431 1445 /* invalid minor number */
1432 1446 con_log(CL_ANN, (CE_WARN, "mr_sas: adapter not found."));
1433 1447 return (ENXIO);
1434 1448 }
1435 1449
1436 1450 ioctl = (struct mrsas_ioctl *)kmem_zalloc(sizeof (struct mrsas_ioctl),
1437 1451 KM_SLEEP);
1438 1452 ASSERT(ioctl);
1439 1453
1440 1454 switch ((uint_t)cmd) {
1441 1455 case MRSAS_IOCTL_FIRMWARE:
1442 1456 if (ddi_copyin((void *)arg, ioctl,
1443 1457 sizeof (struct mrsas_ioctl), mode)) {
1444 1458 con_log(CL_ANN, (CE_WARN, "mrsas_ioctl: "
1445 1459 "ERROR IOCTL copyin"));
1446 1460 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1447 1461 return (EFAULT);
1448 1462 }
1449 1463
1450 1464 if (ioctl->control_code == MRSAS_DRIVER_IOCTL_COMMON) {
1451 1465 rval = handle_drv_ioctl(instance, ioctl, mode);
1452 1466 } else {
1453 1467 rval = handle_mfi_ioctl(instance, ioctl, mode);
1454 1468 }
1455 1469
1456 1470 if (ddi_copyout((void *)ioctl, (void *)arg,
1457 1471 (sizeof (struct mrsas_ioctl) - 1), mode)) {
1458 1472 con_log(CL_ANN, (CE_WARN,
1459 1473 "mrsas_ioctl: copy_to_user failed"));
1460 1474 rval = 1;
1461 1475 }
1462 1476
1463 1477 break;
1464 1478 case MRSAS_IOCTL_AEN:
1465 1479 if (ddi_copyin((void *) arg, &aen,
1466 1480 sizeof (struct mrsas_aen), mode)) {
1467 1481 con_log(CL_ANN, (CE_WARN,
1468 1482 "mrsas_ioctl: ERROR AEN copyin"));
1469 1483 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1470 1484 return (EFAULT);
1471 1485 }
1472 1486
1473 1487 rval = handle_mfi_aen(instance, &aen);
1474 1488
1475 1489 if (ddi_copyout((void *) &aen, (void *)arg,
1476 1490 sizeof (struct mrsas_aen), mode)) {
1477 1491 con_log(CL_ANN, (CE_WARN,
1478 1492 "mrsas_ioctl: copy_to_user failed"));
1479 1493 rval = 1;
1480 1494 }
1481 1495
1482 1496 break;
1483 1497 default:
1484 1498 rval = scsi_hba_ioctl(dev, cmd, arg,
1485 1499 mode, credp, rvalp);
1486 1500
1487 1501 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_ioctl: "
1488 1502 "scsi_hba_ioctl called, ret = %x.", rval));
1489 1503 }
1490 1504
1491 1505 kmem_free(ioctl, sizeof (struct mrsas_ioctl));
1492 1506 return (rval);
1493 1507 }
1494 1508
1495 1509 /*
1496 1510 * ************************************************************************** *
1497 1511 * *
1498 1512 * common entry points - for block driver types *
1499 1513 * *
1500 1514 * ************************************************************************** *
1501 1515 */
1502 1516 #ifdef __sparc
1503 1517 /*
1504 1518 * reset - TBD
1505 1519 * @dip:
1506 1520 * @cmd:
1507 1521 *
1508 1522 * TBD
1509 1523 */
1510 1524 /*ARGSUSED*/
1511 1525 static int
1512 1526 mrsas_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
1513 1527 {
1514 1528 int instance_no;
1515 1529
1516 1530 struct mrsas_instance *instance;
1517 1531
1518 1532 instance_no = ddi_get_instance(dip);
1519 1533 instance = (struct mrsas_instance *)ddi_get_soft_state
1520 1534 (mrsas_state, instance_no);
1521 1535
1522 1536 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1523 1537
1524 1538 if (!instance) {
1525 1539 con_log(CL_ANN, (CE_WARN, "mr_sas:%d could not get adapter "
1526 1540 "in reset", instance_no));
1527 1541 return (DDI_FAILURE);
1528 1542 }
1529 1543
1530 1544 instance->func_ptr->disable_intr(instance);
1531 1545
1532 1546 con_log(CL_ANN1, (CE_CONT, "flushing cache for instance %d",
1533 1547 instance_no));
1534 1548
1535 1549 flush_cache(instance);
1536 1550
1537 1551 return (DDI_SUCCESS);
1538 1552 }
1539 1553 #else /* __sparc */
1540 1554 /*ARGSUSED*/
1541 1555 static int
1542 1556 mrsas_quiesce(dev_info_t *dip)
1543 1557 {
1544 1558 int instance_no;
1545 1559
1546 1560 struct mrsas_instance *instance;
1547 1561
1548 1562 instance_no = ddi_get_instance(dip);
1549 1563 instance = (struct mrsas_instance *)ddi_get_soft_state
1550 1564 (mrsas_state, instance_no);
1551 1565
1552 1566 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1553 1567
1554 1568 if (!instance) {
1555 1569 con_log(CL_ANN1, (CE_WARN, "mr_sas:%d could not get adapter "
1556 1570 "in quiesce", instance_no));
1557 1571 return (DDI_FAILURE);
1558 1572 }
1559 1573 if (instance->deadadapter || instance->adapterresetinprogress) {
1560 1574 con_log(CL_ANN1, (CE_WARN, "mr_sas:%d adapter is not in "
1561 1575 "healthy state", instance_no));
1562 1576 return (DDI_FAILURE);
1563 1577 }
1564 1578
1565 1579 if (abort_aen_cmd(instance, instance->aen_cmd)) {
1566 1580 con_log(CL_ANN1, (CE_WARN, "mrsas_quiesce: "
1567 1581 "failed to abort prevous AEN command QUIESCE"));
1568 1582 }
1569 1583
1570 1584 if (instance->tbolt) {
1571 1585 if (abort_syncmap_cmd(instance,
1572 1586 instance->map_update_cmd)) {
1573 1587 cmn_err(CE_WARN,
1574 1588 "mrsas_detach: failed to abort "
1575 1589 "previous syncmap command");
1576 1590 return (DDI_FAILURE);
1577 1591 }
1578 1592 }
1579 1593
1580 1594 instance->func_ptr->disable_intr(instance);
1581 1595
1582 1596 con_log(CL_ANN1, (CE_CONT, "flushing cache for instance %d",
1583 1597 instance_no));
1584 1598
1585 1599 flush_cache(instance);
1586 1600
1587 1601 if (wait_for_outstanding(instance)) {
1588 1602 con_log(CL_ANN1,
1589 1603 (CE_CONT, "wait_for_outstanding: return FAIL.\n"));
1590 1604 return (DDI_FAILURE);
1591 1605 }
1592 1606 return (DDI_SUCCESS);
1593 1607 }
1594 1608 #endif /* __sparc */
1595 1609
1596 1610 /*
1597 1611 * ************************************************************************** *
1598 1612 * *
1599 1613 * entry points (SCSI HBA) *
1600 1614 * *
1601 1615 * ************************************************************************** *
1602 1616 */
1603 1617 /*
1604 1618 * tran_tgt_init - initialize a target device instance
1605 1619 * @hba_dip:
1606 1620 * @tgt_dip:
1607 1621 * @tran:
1608 1622 * @sd:
1609 1623 *
1610 1624 * The tran_tgt_init() entry point enables the HBA to allocate and initialize
1611 1625 * any per-target resources. tran_tgt_init() also enables the HBA to qualify
1612 1626 * the device's address as valid and supportable for that particular HBA.
1613 1627 * By returning DDI_FAILURE, the instance of the target driver for that device
1614 1628 * is not probed or attached.
1615 1629 */
1616 1630 /*ARGSUSED*/
1617 1631 static int
1618 1632 mrsas_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1619 1633 scsi_hba_tran_t *tran, struct scsi_device *sd)
1620 1634 {
1621 1635 struct mrsas_instance *instance;
1622 1636 uint16_t tgt = sd->sd_address.a_target;
1623 1637 uint8_t lun = sd->sd_address.a_lun;
1624 1638 dev_info_t *child = NULL;
1625 1639
1626 1640 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init target %d lun %d",
1627 1641 tgt, lun));
1628 1642
1629 1643 instance = ADDR2MR(&sd->sd_address);
1630 1644
1631 1645 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
1632 1646 /*
1633 1647 * If no persistent node exists, we don't allow .conf node
1634 1648 * to be created.
1635 1649 */
1636 1650 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
1637 1651 con_log(CL_DLEVEL2,
1638 1652 (CE_NOTE, "mrsas_tgt_init find child ="
1639 1653 " %p t = %d l = %d", (void *)child, tgt, lun));
1640 1654 if (ndi_merge_node(tgt_dip, mrsas_name_node) !=
1641 1655 DDI_SUCCESS)
1642 1656 /* Create this .conf node */
1643 1657 return (DDI_SUCCESS);
1644 1658 }
1645 1659 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init in ndi_per "
1646 1660 "DDI_FAILURE t = %d l = %d", tgt, lun));
1647 1661 return (DDI_FAILURE);
1648 1662
1649 1663 }
1650 1664
1651 1665 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_tgt_init dev_dip %p tgt_dip %p",
1652 1666 (void *)instance->mr_ld_list[tgt].dip, (void *)tgt_dip));
1653 1667
1654 1668 if (tgt < MRDRV_MAX_LD && lun == 0) {
1655 1669 if (instance->mr_ld_list[tgt].dip == NULL &&
|
↓ open down ↓ |
702 lines elided |
↑ open up ↑ |
1656 1670 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0) {
1657 1671 mutex_enter(&instance->config_dev_mtx);
1658 1672 instance->mr_ld_list[tgt].dip = tgt_dip;
1659 1673 instance->mr_ld_list[tgt].lun_type = MRSAS_LD_LUN;
1660 1674 instance->mr_ld_list[tgt].flag = MRDRV_TGT_VALID;
1661 1675 mutex_exit(&instance->config_dev_mtx);
1662 1676 }
1663 1677 }
1664 1678
1665 1679 #ifdef PDSUPPORT
1666 - else if (instance->tbolt) {
1680 + else if (instance->tbolt || instance->skinny) {
1667 1681 if (instance->mr_tbolt_pd_list[tgt].dip == NULL) {
1668 1682 mutex_enter(&instance->config_dev_mtx);
1669 1683 instance->mr_tbolt_pd_list[tgt].dip = tgt_dip;
1670 1684 instance->mr_tbolt_pd_list[tgt].flag =
1671 1685 MRDRV_TGT_VALID;
1672 1686 mutex_exit(&instance->config_dev_mtx);
1673 1687 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_tgt_init:"
1674 1688 "t%xl%x", tgt, lun));
1675 1689 }
1676 1690 }
1677 1691 #endif
1678 1692
1679 1693 return (DDI_SUCCESS);
1680 1694 }
1681 1695
1682 1696 /*ARGSUSED*/
1683 1697 static void
1684 1698 mrsas_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1685 1699 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1686 1700 {
1687 1701 struct mrsas_instance *instance;
1688 1702 int tgt = sd->sd_address.a_target;
1689 1703 int lun = sd->sd_address.a_lun;
1690 1704
1691 1705 instance = ADDR2MR(&sd->sd_address);
1692 1706
1693 1707 con_log(CL_DLEVEL2, (CE_NOTE, "tgt_free t = %d l = %d", tgt, lun));
|
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
1694 1708
1695 1709 if (tgt < MRDRV_MAX_LD && lun == 0) {
1696 1710 if (instance->mr_ld_list[tgt].dip == tgt_dip) {
1697 1711 mutex_enter(&instance->config_dev_mtx);
1698 1712 instance->mr_ld_list[tgt].dip = NULL;
1699 1713 mutex_exit(&instance->config_dev_mtx);
1700 1714 }
1701 1715 }
1702 1716
1703 1717 #ifdef PDSUPPORT
1704 - else if (instance->tbolt) {
1718 + else if (instance->tbolt || instance->skinny) {
1705 1719 mutex_enter(&instance->config_dev_mtx);
1706 1720 instance->mr_tbolt_pd_list[tgt].dip = NULL;
1707 1721 mutex_exit(&instance->config_dev_mtx);
1708 1722 con_log(CL_ANN1, (CE_NOTE, "tgt_free: Setting dip = NULL"
1709 1723 "for tgt:%x", tgt));
1710 1724 }
1711 1725 #endif
1712 1726
1713 1727 }
1714 1728
1715 1729 dev_info_t *
1716 1730 mrsas_find_child(struct mrsas_instance *instance, uint16_t tgt, uint8_t lun)
1717 1731 {
1718 1732 dev_info_t *child = NULL;
1719 1733 char addr[SCSI_MAXNAMELEN];
1720 1734 char tmp[MAXNAMELEN];
1721 1735
1722 1736 (void) sprintf(addr, "%x,%x", tgt, lun);
1723 1737 for (child = ddi_get_child(instance->dip); child;
1724 1738 child = ddi_get_next_sibling(child)) {
1725 1739
1726 1740 if (ndi_dev_is_persistent_node(child) == 0) {
1727 1741 continue;
1728 1742 }
1729 1743
1730 1744 if (mrsas_name_node(child, tmp, MAXNAMELEN) !=
1731 1745 DDI_SUCCESS) {
1732 1746 continue;
1733 1747 }
1734 1748
1735 1749 if (strcmp(addr, tmp) == 0) {
1736 1750 break;
1737 1751 }
1738 1752 }
1739 1753 con_log(CL_DLEVEL2, (CE_NOTE, "mrsas_find_child: return child = %p",
1740 1754 (void *)child));
1741 1755 return (child);
1742 1756 }
1743 1757
1744 1758 /*
1745 1759 * mrsas_name_node -
1746 1760 * @dip:
1747 1761 * @name:
1748 1762 * @len:
1749 1763 */
1750 1764 static int
1751 1765 mrsas_name_node(dev_info_t *dip, char *name, int len)
1752 1766 {
1753 1767 int tgt, lun;
1754 1768
1755 1769 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1756 1770 DDI_PROP_DONTPASS, "target", -1);
1757 1771 con_log(CL_DLEVEL2, (CE_NOTE,
1758 1772 "mrsas_name_node: dip %p tgt %d", (void *)dip, tgt));
1759 1773 if (tgt == -1) {
1760 1774 return (DDI_FAILURE);
1761 1775 }
1762 1776 lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1763 1777 "lun", -1);
1764 1778 con_log(CL_DLEVEL2,
1765 1779 (CE_NOTE, "mrsas_name_node: tgt %d lun %d", tgt, lun));
1766 1780 if (lun == -1) {
1767 1781 return (DDI_FAILURE);
1768 1782 }
1769 1783 (void) snprintf(name, len, "%x,%x", tgt, lun);
1770 1784 return (DDI_SUCCESS);
1771 1785 }
1772 1786
1773 1787 /*
1774 1788 * tran_init_pkt - allocate & initialize a scsi_pkt structure
1775 1789 * @ap:
1776 1790 * @pkt:
1777 1791 * @bp:
1778 1792 * @cmdlen:
1779 1793 * @statuslen:
1780 1794 * @tgtlen:
1781 1795 * @flags:
1782 1796 * @callback:
1783 1797 *
1784 1798 * The tran_init_pkt() entry point allocates and initializes a scsi_pkt
1785 1799 * structure and DMA resources for a target driver request. The
1786 1800 * tran_init_pkt() entry point is called when the target driver calls the
1787 1801 * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point
1788 1802 * is a request to perform one or more of three possible services:
1789 1803 * - allocation and initialization of a scsi_pkt structure
1790 1804 * - allocation of DMA resources for data transfer
1791 1805 * - reallocation of DMA resources for the next portion of the data transfer
1792 1806 */
1793 1807 static struct scsi_pkt *
1794 1808 mrsas_tran_init_pkt(struct scsi_address *ap, register struct scsi_pkt *pkt,
1795 1809 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1796 1810 int flags, int (*callback)(), caddr_t arg)
1797 1811 {
1798 1812 struct scsa_cmd *acmd;
1799 1813 struct mrsas_instance *instance;
1800 1814 struct scsi_pkt *new_pkt;
1801 1815
1802 1816 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1803 1817
1804 1818 instance = ADDR2MR(ap);
1805 1819
1806 1820 /* step #1 : pkt allocation */
1807 1821 if (pkt == NULL) {
1808 1822 pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen,
1809 1823 tgtlen, sizeof (struct scsa_cmd), callback, arg);
1810 1824 if (pkt == NULL) {
1811 1825 return (NULL);
1812 1826 }
1813 1827
1814 1828 acmd = PKT2CMD(pkt);
1815 1829
1816 1830 /*
1817 1831 * Initialize the new pkt - we redundantly initialize
1818 1832 * all the fields for illustrative purposes.
1819 1833 */
1820 1834 acmd->cmd_pkt = pkt;
1821 1835 acmd->cmd_flags = 0;
1822 1836 acmd->cmd_scblen = statuslen;
1823 1837 acmd->cmd_cdblen = cmdlen;
1824 1838 acmd->cmd_dmahandle = NULL;
1825 1839 acmd->cmd_ncookies = 0;
1826 1840 acmd->cmd_cookie = 0;
1827 1841 acmd->cmd_cookiecnt = 0;
1828 1842 acmd->cmd_nwin = 0;
1829 1843
1830 1844 pkt->pkt_address = *ap;
1831 1845 pkt->pkt_comp = (void (*)())NULL;
1832 1846 pkt->pkt_flags = 0;
1833 1847 pkt->pkt_time = 0;
1834 1848 pkt->pkt_resid = 0;
1835 1849 pkt->pkt_state = 0;
1836 1850 pkt->pkt_statistics = 0;
1837 1851 pkt->pkt_reason = 0;
1838 1852 new_pkt = pkt;
1839 1853 } else {
1840 1854 acmd = PKT2CMD(pkt);
1841 1855 new_pkt = NULL;
1842 1856 }
1843 1857
1844 1858 /* step #2 : dma allocation/move */
1845 1859 if (bp && bp->b_bcount != 0) {
1846 1860 if (acmd->cmd_dmahandle == NULL) {
1847 1861 if (mrsas_dma_alloc(instance, pkt, bp, flags,
1848 1862 callback) == DDI_FAILURE) {
1849 1863 if (new_pkt) {
1850 1864 scsi_hba_pkt_free(ap, new_pkt);
1851 1865 }
1852 1866 return ((struct scsi_pkt *)NULL);
1853 1867 }
1854 1868 } else {
1855 1869 if (mrsas_dma_move(instance, pkt, bp) == DDI_FAILURE) {
1856 1870 return ((struct scsi_pkt *)NULL);
1857 1871 }
1858 1872 }
1859 1873 }
1860 1874
1861 1875 return (pkt);
1862 1876 }
1863 1877
1864 1878 /*
1865 1879 * tran_start - transport a SCSI command to the addressed target
1866 1880 * @ap:
1867 1881 * @pkt:
1868 1882 *
1869 1883 * The tran_start() entry point for a SCSI HBA driver is called to transport a
1870 1884 * SCSI command to the addressed target. The SCSI command is described
1871 1885 * entirely within the scsi_pkt structure, which the target driver allocated
1872 1886 * through the HBA driver's tran_init_pkt() entry point. If the command
1873 1887 * involves a data transfer, DMA resources must also have been allocated for
1874 1888 * the scsi_pkt structure.
1875 1889 *
1876 1890 * Return Values :
1877 1891 * TRAN_BUSY - request queue is full, no more free scbs
1878 1892 * TRAN_ACCEPT - pkt has been submitted to the instance
1879 1893 */
1880 1894 static int
1881 1895 mrsas_tran_start(struct scsi_address *ap, register struct scsi_pkt *pkt)
1882 1896 {
1883 1897 uchar_t cmd_done = 0;
1884 1898
1885 1899 struct mrsas_instance *instance = ADDR2MR(ap);
1886 1900 struct mrsas_cmd *cmd;
1887 1901
1888 1902 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1889 1903 if (instance->deadadapter == 1) {
1890 1904 con_log(CL_ANN1, (CE_WARN,
1891 1905 "mrsas_tran_start: return TRAN_FATAL_ERROR "
1892 1906 "for IO, as the HBA doesnt take any more IOs"));
1893 1907 if (pkt) {
1894 1908 pkt->pkt_reason = CMD_DEV_GONE;
1895 1909 pkt->pkt_statistics = STAT_DISCON;
1896 1910 }
1897 1911 return (TRAN_FATAL_ERROR);
1898 1912 }
1899 1913
1900 1914 if (instance->adapterresetinprogress) {
1901 1915 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_start: Reset flag set, "
1902 1916 "returning mfi_pkt and setting TRAN_BUSY\n"));
1903 1917 return (TRAN_BUSY);
1904 1918 }
1905 1919
1906 1920 con_log(CL_ANN1, (CE_CONT, "chkpnt:%s:%d:SCSI CDB[0]=0x%x time:%x",
1907 1921 __func__, __LINE__, pkt->pkt_cdbp[0], pkt->pkt_time));
1908 1922
1909 1923 pkt->pkt_reason = CMD_CMPLT;
1910 1924 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */
1911 1925
1912 1926 cmd = build_cmd(instance, ap, pkt, &cmd_done);
1913 1927
1914 1928 /*
1915 1929 * Check if the command is already completed by the mrsas_build_cmd()
1916 1930 * routine. In which case the busy_flag would be clear and scb will be
1917 1931 * NULL and appropriate reason provided in pkt_reason field
1918 1932 */
1919 1933 if (cmd_done) {
1920 1934 pkt->pkt_reason = CMD_CMPLT;
1921 1935 pkt->pkt_scbp[0] = STATUS_GOOD;
1922 1936 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1923 1937 | STATE_SENT_CMD;
1924 1938 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1925 1939 (*pkt->pkt_comp)(pkt);
1926 1940 }
1927 1941
1928 1942 return (TRAN_ACCEPT);
1929 1943 }
1930 1944
|
↓ open down ↓ |
216 lines elided |
↑ open up ↑ |
1931 1945 if (cmd == NULL) {
1932 1946 return (TRAN_BUSY);
1933 1947 }
1934 1948
1935 1949 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1936 1950 if (instance->fw_outstanding > instance->max_fw_cmds) {
1937 1951 con_log(CL_ANN, (CE_CONT, "mr_sas:Firmware busy"));
1938 1952 DTRACE_PROBE2(start_tran_err,
1939 1953 uint16_t, instance->fw_outstanding,
1940 1954 uint16_t, instance->max_fw_cmds);
1941 - return_mfi_pkt(instance, cmd);
1955 + mrsas_return_mfi_pkt(instance, cmd);
1942 1956 return (TRAN_BUSY);
1943 1957 }
1944 1958
1945 1959 /* Synchronize the Cmd frame for the controller */
1946 1960 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1947 1961 DDI_DMA_SYNC_FORDEV);
1948 1962 con_log(CL_ANN, (CE_CONT, "issue_cmd_ppc: SCSI CDB[0]=0x%x"
1949 1963 "cmd->index:%x\n", pkt->pkt_cdbp[0], cmd->index));
1950 1964 instance->func_ptr->issue_cmd(cmd, instance);
1951 1965
1952 1966 } else {
1953 1967 struct mrsas_header *hdr = &cmd->frame->hdr;
1954 1968
1955 1969 instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd);
1956 1970
1957 1971 pkt->pkt_reason = CMD_CMPLT;
1958 1972 pkt->pkt_statistics = 0;
1959 1973 pkt->pkt_state |= STATE_XFERRED_DATA | STATE_GOT_STATUS;
1960 1974
1961 1975 switch (ddi_get8(cmd->frame_dma_obj.acc_handle,
1962 1976 &hdr->cmd_status)) {
1963 1977 case MFI_STAT_OK:
1964 1978 pkt->pkt_scbp[0] = STATUS_GOOD;
1965 1979 break;
1966 1980
1967 1981 case MFI_STAT_SCSI_DONE_WITH_ERROR:
1968 1982 con_log(CL_ANN, (CE_CONT,
1969 1983 "mrsas_tran_start: scsi done with error"));
1970 1984 pkt->pkt_reason = CMD_CMPLT;
1971 1985 pkt->pkt_statistics = 0;
1972 1986
1973 1987 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
1974 1988 break;
1975 1989
1976 1990 case MFI_STAT_DEVICE_NOT_FOUND:
1977 1991 con_log(CL_ANN, (CE_CONT,
1978 1992 "mrsas_tran_start: device not found error"));
1979 1993 pkt->pkt_reason = CMD_DEV_GONE;
|
↓ open down ↓ |
28 lines elided |
↑ open up ↑ |
1980 1994 pkt->pkt_statistics = STAT_DISCON;
1981 1995 break;
1982 1996
1983 1997 default:
1984 1998 ((struct scsi_status *)pkt->pkt_scbp)->sts_busy = 1;
1985 1999 }
1986 2000
1987 2001 (void) mrsas_common_check(instance, cmd);
1988 2002 DTRACE_PROBE2(start_nointr_done, uint8_t, hdr->cmd,
1989 2003 uint8_t, hdr->cmd_status);
1990 - return_mfi_pkt(instance, cmd);
2004 + mrsas_return_mfi_pkt(instance, cmd);
1991 2005
1992 2006 if (pkt->pkt_comp) {
1993 2007 (*pkt->pkt_comp)(pkt);
1994 2008 }
1995 2009
1996 2010 }
1997 2011
1998 2012 return (TRAN_ACCEPT);
1999 2013 }
2000 2014
2001 2015 /*
2002 2016 * tran_abort - Abort any commands that are currently in transport
2003 2017 * @ap:
2004 2018 * @pkt:
2005 2019 *
2006 2020 * The tran_abort() entry point for a SCSI HBA driver is called to abort any
2007 2021 * commands that are currently in transport for a particular target. This entry
2008 2022 * point is called when a target driver calls scsi_abort(). The tran_abort()
2009 2023 * entry point should attempt to abort the command denoted by the pkt
2010 2024 * parameter. If the pkt parameter is NULL, tran_abort() should attempt to
2011 2025 * abort all outstanding commands in the transport layer for the particular
2012 2026 * target or logical unit.
2013 2027 */
2014 2028 /*ARGSUSED*/
2015 2029 static int
2016 2030 mrsas_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
2017 2031 {
2018 2032 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2019 2033
2020 2034 /* abort command not supported by H/W */
2021 2035
2022 2036 return (DDI_FAILURE);
2023 2037 }
2024 2038
2025 2039 /*
2026 2040 * tran_reset - reset either the SCSI bus or target
2027 2041 * @ap:
2028 2042 * @level:
2029 2043 *
2030 2044 * The tran_reset() entry point for a SCSI HBA driver is called to reset either
2031 2045 * the SCSI bus or a particular SCSI target device. This entry point is called
2032 2046 * when a target driver calls scsi_reset(). The tran_reset() entry point must
2033 2047 * reset the SCSI bus if level is RESET_ALL. If level is RESET_TARGET, just the
2034 2048 * particular target or logical unit must be reset.
2035 2049 */
2036 2050 /*ARGSUSED*/
2037 2051 static int
2038 2052 mrsas_tran_reset(struct scsi_address *ap, int level)
2039 2053 {
2040 2054 struct mrsas_instance *instance = ADDR2MR(ap);
2041 2055
2042 2056 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2043 2057
2044 2058 if (wait_for_outstanding(instance)) {
2045 2059 con_log(CL_ANN1,
2046 2060 (CE_CONT, "wait_for_outstanding: return FAIL.\n"));
2047 2061 return (DDI_FAILURE);
2048 2062 } else {
2049 2063 return (DDI_SUCCESS);
2050 2064 }
2051 2065 }
2052 2066
2053 2067 /*
2054 2068 * tran_getcap - get one of a set of SCSA-defined capabilities
2055 2069 * @ap:
2056 2070 * @cap:
2057 2071 * @whom:
2058 2072 *
2059 2073 * The target driver can request the current setting of the capability for a
2060 2074 * particular target by setting the whom parameter to nonzero. A whom value of
2061 2075 * zero indicates a request for the current setting of the general capability
2062 2076 * for the SCSI bus or for adapter hardware. The tran_getcap() should return -1
2063 2077 * for undefined capabilities or the current value of the requested capability.
2064 2078 */
2065 2079 /*ARGSUSED*/
2066 2080 static int
2067 2081 mrsas_tran_getcap(struct scsi_address *ap, char *cap, int whom)
2068 2082 {
2069 2083 int rval = 0;
2070 2084
2071 2085 struct mrsas_instance *instance = ADDR2MR(ap);
2072 2086
2073 2087 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2074 2088
2075 2089 /* we do allow inquiring about capabilities for other targets */
2076 2090 if (cap == NULL) {
2077 2091 return (-1);
2078 2092 }
2079 2093
2080 2094 switch (scsi_hba_lookup_capstr(cap)) {
2081 2095 case SCSI_CAP_DMA_MAX:
2082 2096 if (instance->tbolt) {
2083 2097 /* Limit to 256k max transfer */
2084 2098 rval = mrsas_tbolt_max_cap_maxxfer;
2085 2099 } else {
2086 2100 /* Limit to 16MB max transfer */
2087 2101 rval = mrsas_max_cap_maxxfer;
2088 2102 }
2089 2103 break;
2090 2104 case SCSI_CAP_MSG_OUT:
2091 2105 rval = 1;
2092 2106 break;
2093 2107 case SCSI_CAP_DISCONNECT:
2094 2108 rval = 0;
2095 2109 break;
2096 2110 case SCSI_CAP_SYNCHRONOUS:
2097 2111 rval = 0;
2098 2112 break;
2099 2113 case SCSI_CAP_WIDE_XFER:
2100 2114 rval = 1;
2101 2115 break;
2102 2116 case SCSI_CAP_TAGGED_QING:
2103 2117 rval = 1;
2104 2118 break;
2105 2119 case SCSI_CAP_UNTAGGED_QING:
2106 2120 rval = 1;
2107 2121 break;
2108 2122 case SCSI_CAP_PARITY:
2109 2123 rval = 1;
2110 2124 break;
2111 2125 case SCSI_CAP_INITIATOR_ID:
2112 2126 rval = instance->init_id;
2113 2127 break;
2114 2128 case SCSI_CAP_ARQ:
2115 2129 rval = 1;
2116 2130 break;
2117 2131 case SCSI_CAP_LINKED_CMDS:
2118 2132 rval = 0;
2119 2133 break;
2120 2134 case SCSI_CAP_RESET_NOTIFICATION:
2121 2135 rval = 1;
2122 2136 break;
2123 2137 case SCSI_CAP_GEOMETRY:
2124 2138 rval = -1;
2125 2139
2126 2140 break;
2127 2141 default:
2128 2142 con_log(CL_DLEVEL2, (CE_NOTE, "Default cap coming 0x%x",
2129 2143 scsi_hba_lookup_capstr(cap)));
2130 2144 rval = -1;
2131 2145 break;
2132 2146 }
2133 2147
2134 2148 return (rval);
2135 2149 }
2136 2150
2137 2151 /*
2138 2152 * tran_setcap - set one of a set of SCSA-defined capabilities
2139 2153 * @ap:
2140 2154 * @cap:
2141 2155 * @value:
2142 2156 * @whom:
2143 2157 *
2144 2158 * The target driver might request that the new value be set for a particular
2145 2159 * target by setting the whom parameter to nonzero. A whom value of zero
2146 2160 * means that request is to set the new value for the SCSI bus or for adapter
2147 2161 * hardware in general.
2148 2162 * The tran_setcap() should return the following values as appropriate:
2149 2163 * - -1 for undefined capabilities
2150 2164 * - 0 if the HBA driver cannot set the capability to the requested value
2151 2165 * - 1 if the HBA driver is able to set the capability to the requested value
2152 2166 */
2153 2167 /*ARGSUSED*/
2154 2168 static int
2155 2169 mrsas_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
2156 2170 {
2157 2171 int rval = 1;
2158 2172
2159 2173 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2160 2174
2161 2175 /* We don't allow setting capabilities for other targets */
2162 2176 if (cap == NULL || whom == 0) {
2163 2177 return (-1);
2164 2178 }
2165 2179
2166 2180 switch (scsi_hba_lookup_capstr(cap)) {
2167 2181 case SCSI_CAP_DMA_MAX:
2168 2182 case SCSI_CAP_MSG_OUT:
2169 2183 case SCSI_CAP_PARITY:
2170 2184 case SCSI_CAP_LINKED_CMDS:
2171 2185 case SCSI_CAP_RESET_NOTIFICATION:
2172 2186 case SCSI_CAP_DISCONNECT:
2173 2187 case SCSI_CAP_SYNCHRONOUS:
2174 2188 case SCSI_CAP_UNTAGGED_QING:
2175 2189 case SCSI_CAP_WIDE_XFER:
2176 2190 case SCSI_CAP_INITIATOR_ID:
2177 2191 case SCSI_CAP_ARQ:
2178 2192 /*
2179 2193 * None of these are settable via
2180 2194 * the capability interface.
2181 2195 */
2182 2196 break;
2183 2197 case SCSI_CAP_TAGGED_QING:
2184 2198 rval = 1;
2185 2199 break;
2186 2200 case SCSI_CAP_SECTOR_SIZE:
2187 2201 rval = 1;
2188 2202 break;
2189 2203
2190 2204 case SCSI_CAP_TOTAL_SECTORS:
2191 2205 rval = 1;
2192 2206 break;
2193 2207 default:
2194 2208 rval = -1;
2195 2209 break;
2196 2210 }
2197 2211
2198 2212 return (rval);
2199 2213 }
2200 2214
2201 2215 /*
2202 2216 * tran_destroy_pkt - deallocate scsi_pkt structure
2203 2217 * @ap:
2204 2218 * @pkt:
2205 2219 *
2206 2220 * The tran_destroy_pkt() entry point is the HBA driver function that
2207 2221 * deallocates scsi_pkt structures. The tran_destroy_pkt() entry point is
2208 2222 * called when the target driver calls scsi_destroy_pkt(). The
2209 2223 * tran_destroy_pkt() entry point must free any DMA resources that have been
2210 2224 * allocated for the packet. An implicit DMA synchronization occurs if the
2211 2225 * DMA resources are freed and any cached data remains after the completion
2212 2226 * of the transfer.
2213 2227 */
2214 2228 static void
2215 2229 mrsas_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2216 2230 {
2217 2231 struct scsa_cmd *acmd = PKT2CMD(pkt);
2218 2232
2219 2233 con_log(CL_DLEVEL2, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2220 2234
2221 2235 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2222 2236 acmd->cmd_flags &= ~CFLAG_DMAVALID;
2223 2237
2224 2238 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
2225 2239
2226 2240 ddi_dma_free_handle(&acmd->cmd_dmahandle);
2227 2241
2228 2242 acmd->cmd_dmahandle = NULL;
2229 2243 }
2230 2244
2231 2245 /* free the pkt */
2232 2246 scsi_hba_pkt_free(ap, pkt);
2233 2247 }
2234 2248
2235 2249 /*
2236 2250 * tran_dmafree - deallocates DMA resources
2237 2251 * @ap:
2238 2252 * @pkt:
2239 2253 *
2240 2254 * The tran_dmafree() entry point deallocates DMAQ resources that have been
2241 2255 * allocated for a scsi_pkt structure. The tran_dmafree() entry point is
2242 2256 * called when the target driver calls scsi_dmafree(). The tran_dmafree() must
2243 2257 * free only DMA resources allocated for a scsi_pkt structure, not the
2244 2258 * scsi_pkt itself. When DMA resources are freed, a DMA synchronization is
2245 2259 * implicitly performed.
2246 2260 */
2247 2261 /*ARGSUSED*/
2248 2262 static void
2249 2263 mrsas_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
2250 2264 {
2251 2265 register struct scsa_cmd *acmd = PKT2CMD(pkt);
2252 2266
2253 2267 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2254 2268
2255 2269 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2256 2270 acmd->cmd_flags &= ~CFLAG_DMAVALID;
2257 2271
2258 2272 (void) ddi_dma_unbind_handle(acmd->cmd_dmahandle);
2259 2273
2260 2274 ddi_dma_free_handle(&acmd->cmd_dmahandle);
2261 2275
2262 2276 acmd->cmd_dmahandle = NULL;
2263 2277 }
2264 2278 }
2265 2279
2266 2280 /*
2267 2281 * tran_sync_pkt - synchronize the DMA object allocated
2268 2282 * @ap:
2269 2283 * @pkt:
2270 2284 *
2271 2285 * The tran_sync_pkt() entry point synchronizes the DMA object allocated for
2272 2286 * the scsi_pkt structure before or after a DMA transfer. The tran_sync_pkt()
2273 2287 * entry point is called when the target driver calls scsi_sync_pkt(). If the
2274 2288 * data transfer direction is a DMA read from device to memory, tran_sync_pkt()
2275 2289 * must synchronize the CPU's view of the data. If the data transfer direction
2276 2290 * is a DMA write from memory to device, tran_sync_pkt() must synchronize the
2277 2291 * device's view of the data.
2278 2292 */
2279 2293 /*ARGSUSED*/
2280 2294 static void
2281 2295 mrsas_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2282 2296 {
2283 2297 register struct scsa_cmd *acmd = PKT2CMD(pkt);
2284 2298
2285 2299 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2286 2300
2287 2301 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2288 2302 (void) ddi_dma_sync(acmd->cmd_dmahandle, acmd->cmd_dma_offset,
2289 2303 acmd->cmd_dma_len, (acmd->cmd_flags & CFLAG_DMASEND) ?
2290 2304 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
2291 2305 }
2292 2306 }
2293 2307
2294 2308 /*ARGSUSED*/
2295 2309 static int
2296 2310 mrsas_tran_quiesce(dev_info_t *dip)
2297 2311 {
2298 2312 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2299 2313
2300 2314 return (1);
2301 2315 }
2302 2316
2303 2317 /*ARGSUSED*/
2304 2318 static int
2305 2319 mrsas_tran_unquiesce(dev_info_t *dip)
2306 2320 {
2307 2321 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2308 2322
2309 2323 return (1);
2310 2324 }
2311 2325
2312 2326
2313 2327 /*
2314 2328 * mrsas_isr(caddr_t)
2315 2329 *
2316 2330 * The Interrupt Service Routine
2317 2331 *
2318 2332 * Collect status for all completed commands and do callback
2319 2333 *
2320 2334 */
2321 2335 static uint_t
2322 2336 mrsas_isr(struct mrsas_instance *instance)
2323 2337 {
2324 2338 int need_softintr;
2325 2339 uint32_t producer;
2326 2340 uint32_t consumer;
2327 2341 uint32_t context;
2328 2342 int retval;
2329 2343
2330 2344 struct mrsas_cmd *cmd;
2331 2345 struct mrsas_header *hdr;
2332 2346 struct scsi_pkt *pkt;
2333 2347
2334 2348 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2335 2349 ASSERT(instance);
2336 2350 if (instance->tbolt) {
2337 2351 mutex_enter(&instance->chip_mtx);
2338 2352 if ((instance->intr_type == DDI_INTR_TYPE_FIXED) &&
2339 2353 !(instance->func_ptr->intr_ack(instance))) {
2340 2354 mutex_exit(&instance->chip_mtx);
2341 2355 return (DDI_INTR_UNCLAIMED);
2342 2356 }
2343 2357 retval = mr_sas_tbolt_process_outstanding_cmd(instance);
2344 2358 mutex_exit(&instance->chip_mtx);
2345 2359 return (retval);
2346 2360 } else {
2347 2361 if ((instance->intr_type == DDI_INTR_TYPE_FIXED) &&
2348 2362 !instance->func_ptr->intr_ack(instance)) {
2349 2363 return (DDI_INTR_UNCLAIMED);
2350 2364 }
2351 2365 }
2352 2366
2353 2367 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
2354 2368 0, 0, DDI_DMA_SYNC_FORCPU);
2355 2369
2356 2370 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
2357 2371 != DDI_SUCCESS) {
2358 2372 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2359 2373 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2360 2374 con_log(CL_ANN1, (CE_WARN,
2361 2375 "mr_sas_isr(): FMA check, returning DDI_INTR_UNCLAIMED"));
2362 2376 return (DDI_INTR_CLAIMED);
2363 2377 }
2364 2378 con_log(CL_ANN1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
2365 2379
2366 2380 #ifdef OCRDEBUG
2367 2381 if (debug_consecutive_timeout_after_ocr_g == 1) {
2368 2382 con_log(CL_ANN1, (CE_NOTE,
2369 2383 "simulating consecutive timeout after ocr"));
2370 2384 return (DDI_INTR_CLAIMED);
2371 2385 }
2372 2386 #endif
2373 2387
2374 2388 mutex_enter(&instance->completed_pool_mtx);
2375 2389 mutex_enter(&instance->cmd_pend_mtx);
2376 2390
2377 2391 producer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2378 2392 instance->producer);
2379 2393 consumer = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2380 2394 instance->consumer);
2381 2395
2382 2396 con_log(CL_ANN, (CE_CONT, " producer %x consumer %x ",
2383 2397 producer, consumer));
2384 2398 if (producer == consumer) {
2385 2399 con_log(CL_ANN, (CE_WARN, "producer == consumer case"));
2386 2400 DTRACE_PROBE2(isr_pc_err, uint32_t, producer,
2387 2401 uint32_t, consumer);
2388 2402 mutex_exit(&instance->cmd_pend_mtx);
2389 2403 mutex_exit(&instance->completed_pool_mtx);
2390 2404 return (DDI_INTR_CLAIMED);
2391 2405 }
2392 2406
2393 2407 while (consumer != producer) {
2394 2408 context = ddi_get32(instance->mfi_internal_dma_obj.acc_handle,
2395 2409 &instance->reply_queue[consumer]);
2396 2410 cmd = instance->cmd_list[context];
2397 2411
2398 2412 if (cmd->sync_cmd == MRSAS_TRUE) {
2399 2413 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2400 2414 if (hdr) {
2401 2415 mlist_del_init(&cmd->list);
2402 2416 }
2403 2417 } else {
2404 2418 pkt = cmd->pkt;
2405 2419 if (pkt) {
2406 2420 mlist_del_init(&cmd->list);
2407 2421 }
2408 2422 }
2409 2423
2410 2424 mlist_add_tail(&cmd->list, &instance->completed_pool_list);
2411 2425
2412 2426 consumer++;
2413 2427 if (consumer == (instance->max_fw_cmds + 1)) {
2414 2428 consumer = 0;
2415 2429 }
2416 2430 }
2417 2431 ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
2418 2432 instance->consumer, consumer);
2419 2433 mutex_exit(&instance->cmd_pend_mtx);
2420 2434 mutex_exit(&instance->completed_pool_mtx);
2421 2435
2422 2436 (void) ddi_dma_sync(instance->mfi_internal_dma_obj.dma_handle,
2423 2437 0, 0, DDI_DMA_SYNC_FORDEV);
2424 2438
2425 2439 if (instance->softint_running) {
2426 2440 need_softintr = 0;
2427 2441 } else {
2428 2442 need_softintr = 1;
2429 2443 }
2430 2444
2431 2445 if (instance->isr_level == HIGH_LEVEL_INTR) {
2432 2446 if (need_softintr) {
2433 2447 ddi_trigger_softintr(instance->soft_intr_id);
2434 2448 }
2435 2449 } else {
2436 2450 /*
2437 2451 * Not a high-level interrupt, therefore call the soft level
2438 2452 * interrupt explicitly
2439 2453 */
2440 2454 (void) mrsas_softintr(instance);
2441 2455 }
2442 2456
2443 2457 return (DDI_INTR_CLAIMED);
2444 2458 }
2445 2459
2446 2460
2447 2461 /*
2448 2462 * ************************************************************************** *
2449 2463 * *
2450 2464 * libraries *
2451 2465 * *
2452 2466 * ************************************************************************** *
2453 2467 */
|
↓ open down ↓ |
453 lines elided |
↑ open up ↑ |
2454 2468 /*
2455 2469 * get_mfi_pkt : Get a command from the free pool
2456 2470 * After successful allocation, the caller of this routine
2457 2471 * must clear the frame buffer (memset to zero) before
2458 2472 * using the packet further.
2459 2473 *
2460 2474 * ***** Note *****
2461 2475 * After clearing the frame buffer the context id of the
2462 2476 * frame buffer SHOULD be restored back.
2463 2477 */
2464 -static struct mrsas_cmd *
2465 -get_mfi_pkt(struct mrsas_instance *instance)
2478 +struct mrsas_cmd *
2479 +mrsas_get_mfi_pkt(struct mrsas_instance *instance)
2466 2480 {
2467 2481 mlist_t *head = &instance->cmd_pool_list;
2468 2482 struct mrsas_cmd *cmd = NULL;
2469 2483
2470 2484 mutex_enter(&instance->cmd_pool_mtx);
2471 2485
2472 2486 if (!mlist_empty(head)) {
2473 2487 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2474 2488 mlist_del_init(head->next);
2475 2489 }
2476 2490 if (cmd != NULL) {
2477 2491 cmd->pkt = NULL;
2478 2492 cmd->retry_count_for_ocr = 0;
2479 2493 cmd->drv_pkt_time = 0;
2480 2494
2481 2495 }
2482 2496 mutex_exit(&instance->cmd_pool_mtx);
2483 2497
2484 2498 return (cmd);
2485 2499 }
2486 2500
2487 2501 static struct mrsas_cmd *
2488 2502 get_mfi_app_pkt(struct mrsas_instance *instance)
2489 2503 {
2490 2504 mlist_t *head = &instance->app_cmd_pool_list;
2491 2505 struct mrsas_cmd *cmd = NULL;
2492 2506
2493 2507 mutex_enter(&instance->app_cmd_pool_mtx);
2494 2508
2495 2509 if (!mlist_empty(head)) {
2496 2510 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2497 2511 mlist_del_init(head->next);
2498 2512 }
2499 2513 if (cmd != NULL) {
2500 2514 cmd->pkt = NULL;
2501 2515 cmd->retry_count_for_ocr = 0;
|
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
2502 2516 cmd->drv_pkt_time = 0;
2503 2517 }
2504 2518
2505 2519 mutex_exit(&instance->app_cmd_pool_mtx);
2506 2520
2507 2521 return (cmd);
2508 2522 }
2509 2523 /*
2510 2524 * return_mfi_pkt : Return a cmd to free command pool
2511 2525 */
2512 -static void
2513 -return_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2526 +void
2527 +mrsas_return_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2514 2528 {
2515 2529 mutex_enter(&instance->cmd_pool_mtx);
2516 2530 /* use mlist_add_tail for debug assistance */
2517 2531 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2518 2532
2519 2533 mutex_exit(&instance->cmd_pool_mtx);
2520 2534 }
2521 2535
2522 2536 static void
2523 2537 return_mfi_app_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2524 2538 {
2525 2539 mutex_enter(&instance->app_cmd_pool_mtx);
2526 2540
2527 2541 mlist_add(&cmd->list, &instance->app_cmd_pool_list);
2528 2542
2529 2543 mutex_exit(&instance->app_cmd_pool_mtx);
2530 2544 }
2531 2545 void
2532 2546 push_pending_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2533 2547 {
2534 2548 struct scsi_pkt *pkt;
2535 2549 struct mrsas_header *hdr;
2536 2550 con_log(CL_DLEVEL2, (CE_NOTE, "push_pending_pkt(): Called\n"));
2537 2551 mutex_enter(&instance->cmd_pend_mtx);
2538 2552 mlist_del_init(&cmd->list);
2539 2553 mlist_add_tail(&cmd->list, &instance->cmd_pend_list);
2540 2554 if (cmd->sync_cmd == MRSAS_TRUE) {
2541 2555 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2542 2556 if (hdr) {
2543 2557 con_log(CL_ANN1, (CE_CONT,
2544 2558 "push_pending_mfi_pkt: "
2545 2559 "cmd %p index %x "
2546 2560 "time %llx",
2547 2561 (void *)cmd, cmd->index,
2548 2562 gethrtime()));
2549 2563 /* Wait for specified interval */
2550 2564 cmd->drv_pkt_time = ddi_get16(
2551 2565 cmd->frame_dma_obj.acc_handle, &hdr->timeout);
2552 2566 if (cmd->drv_pkt_time < debug_timeout_g)
2553 2567 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2554 2568 con_log(CL_ANN1, (CE_CONT,
2555 2569 "push_pending_pkt(): "
2556 2570 "Called IO Timeout Value %x\n",
2557 2571 cmd->drv_pkt_time));
2558 2572 }
2559 2573 if (hdr && instance->timeout_id == (timeout_id_t)-1) {
2560 2574 instance->timeout_id = timeout(io_timeout_checker,
2561 2575 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
2562 2576 }
2563 2577 } else {
2564 2578 pkt = cmd->pkt;
2565 2579 if (pkt) {
2566 2580 con_log(CL_ANN1, (CE_CONT,
2567 2581 "push_pending_mfi_pkt: "
2568 2582 "cmd %p index %x pkt %p, "
2569 2583 "time %llx",
2570 2584 (void *)cmd, cmd->index, (void *)pkt,
2571 2585 gethrtime()));
2572 2586 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2573 2587 }
2574 2588 if (pkt && instance->timeout_id == (timeout_id_t)-1) {
2575 2589 instance->timeout_id = timeout(io_timeout_checker,
2576 2590 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
2577 2591 }
2578 2592 }
2579 2593
2580 2594 mutex_exit(&instance->cmd_pend_mtx);
2581 2595
2582 2596 }
2583 2597
2584 2598 int
2585 2599 mrsas_print_pending_cmds(struct mrsas_instance *instance)
2586 2600 {
2587 2601 mlist_t *head = &instance->cmd_pend_list;
2588 2602 mlist_t *tmp = head;
2589 2603 struct mrsas_cmd *cmd = NULL;
2590 2604 struct mrsas_header *hdr;
2591 2605 unsigned int flag = 1;
2592 2606 struct scsi_pkt *pkt;
2593 2607 int saved_level;
2594 2608 int cmd_count = 0;
2595 2609
2596 2610 saved_level = debug_level_g;
2597 2611 debug_level_g = CL_ANN1;
2598 2612
2599 2613 cmn_err(CE_NOTE, "mrsas_print_pending_cmds(): Called\n");
2600 2614
2601 2615 while (flag) {
2602 2616 mutex_enter(&instance->cmd_pend_mtx);
2603 2617 tmp = tmp->next;
2604 2618 if (tmp == head) {
2605 2619 mutex_exit(&instance->cmd_pend_mtx);
2606 2620 flag = 0;
2607 2621 con_log(CL_ANN1, (CE_CONT, "mrsas_print_pending_cmds():"
2608 2622 " NO MORE CMDS PENDING....\n"));
2609 2623 break;
2610 2624 } else {
2611 2625 cmd = mlist_entry(tmp, struct mrsas_cmd, list);
2612 2626 mutex_exit(&instance->cmd_pend_mtx);
2613 2627 if (cmd) {
2614 2628 if (cmd->sync_cmd == MRSAS_TRUE) {
2615 2629 hdr = (struct mrsas_header *)
2616 2630 &cmd->frame->hdr;
2617 2631 if (hdr) {
2618 2632 con_log(CL_ANN1, (CE_CONT,
2619 2633 "print: cmd %p index 0x%x "
2620 2634 "drv_pkt_time 0x%x (NO-PKT)"
2621 2635 " hdr %p\n", (void *)cmd,
2622 2636 cmd->index,
2623 2637 cmd->drv_pkt_time,
2624 2638 (void *)hdr));
2625 2639 }
2626 2640 } else {
2627 2641 pkt = cmd->pkt;
2628 2642 if (pkt) {
2629 2643 con_log(CL_ANN1, (CE_CONT,
2630 2644 "print: cmd %p index 0x%x "
2631 2645 "drv_pkt_time 0x%x pkt %p \n",
2632 2646 (void *)cmd, cmd->index,
2633 2647 cmd->drv_pkt_time, (void *)pkt));
2634 2648 }
2635 2649 }
2636 2650
2637 2651 if (++cmd_count == 1) {
2638 2652 mrsas_print_cmd_details(instance, cmd,
2639 2653 0xDD);
2640 2654 } else {
2641 2655 mrsas_print_cmd_details(instance, cmd,
2642 2656 1);
2643 2657 }
2644 2658
2645 2659 }
2646 2660 }
2647 2661 }
2648 2662 con_log(CL_ANN1, (CE_CONT, "mrsas_print_pending_cmds(): Done\n"));
2649 2663
2650 2664
2651 2665 debug_level_g = saved_level;
2652 2666
2653 2667 return (DDI_SUCCESS);
2654 2668 }
2655 2669
2656 2670
2657 2671 int
2658 2672 mrsas_complete_pending_cmds(struct mrsas_instance *instance)
2659 2673 {
2660 2674
2661 2675 struct mrsas_cmd *cmd = NULL;
2662 2676 struct scsi_pkt *pkt;
2663 2677 struct mrsas_header *hdr;
2664 2678
2665 2679 struct mlist_head *pos, *next;
2666 2680
2667 2681 con_log(CL_ANN1, (CE_NOTE,
2668 2682 "mrsas_complete_pending_cmds(): Called"));
2669 2683
2670 2684 mutex_enter(&instance->cmd_pend_mtx);
2671 2685 mlist_for_each_safe(pos, next, &instance->cmd_pend_list) {
2672 2686 cmd = mlist_entry(pos, struct mrsas_cmd, list);
2673 2687 if (cmd) {
2674 2688 pkt = cmd->pkt;
2675 2689 if (pkt) { /* for IO */
2676 2690 if (((pkt->pkt_flags & FLAG_NOINTR)
2677 2691 == 0) && pkt->pkt_comp) {
2678 2692 pkt->pkt_reason
2679 2693 = CMD_DEV_GONE;
2680 2694 pkt->pkt_statistics
2681 2695 = STAT_DISCON;
2682 2696 con_log(CL_ANN1, (CE_CONT,
2683 2697 "fail and posting to scsa "
2684 2698 "cmd %p index %x"
2685 2699 " pkt %p "
2686 2700 "time : %llx",
2687 2701 (void *)cmd, cmd->index,
2688 2702 (void *)pkt, gethrtime()));
2689 2703 (*pkt->pkt_comp)(pkt);
2690 2704 }
2691 2705 } else { /* for DCMDS */
2692 2706 if (cmd->sync_cmd == MRSAS_TRUE) {
2693 2707 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2694 2708 con_log(CL_ANN1, (CE_CONT,
2695 2709 "posting invalid status to application "
2696 2710 "cmd %p index %x"
2697 2711 " hdr %p "
2698 2712 "time : %llx",
2699 2713 (void *)cmd, cmd->index,
2700 2714 (void *)hdr, gethrtime()));
2701 2715 hdr->cmd_status = MFI_STAT_INVALID_STATUS;
2702 2716 complete_cmd_in_sync_mode(instance, cmd);
2703 2717 }
2704 2718 }
2705 2719 mlist_del_init(&cmd->list);
2706 2720 } else {
2707 2721 con_log(CL_ANN1, (CE_CONT,
2708 2722 "mrsas_complete_pending_cmds:"
2709 2723 "NULL command\n"));
2710 2724 }
2711 2725 con_log(CL_ANN1, (CE_CONT,
2712 2726 "mrsas_complete_pending_cmds:"
2713 2727 "looping for more commands\n"));
2714 2728 }
2715 2729 mutex_exit(&instance->cmd_pend_mtx);
2716 2730
2717 2731 con_log(CL_ANN1, (CE_CONT, "mrsas_complete_pending_cmds(): DONE\n"));
2718 2732 return (DDI_SUCCESS);
2719 2733 }
2720 2734
2721 2735 void
2722 2736 mrsas_print_cmd_details(struct mrsas_instance *instance, struct mrsas_cmd *cmd,
2723 2737 int detail)
2724 2738 {
2725 2739 struct scsi_pkt *pkt = cmd->pkt;
2726 2740 Mpi2RaidSCSIIORequest_t *scsi_io = cmd->scsi_io_request;
2727 2741 int i;
2728 2742 int saved_level;
2729 2743 ddi_acc_handle_t acc_handle =
2730 2744 instance->mpi2_frame_pool_dma_obj.acc_handle;
2731 2745
2732 2746 if (detail == 0xDD) {
2733 2747 saved_level = debug_level_g;
2734 2748 debug_level_g = CL_ANN1;
2735 2749 }
2736 2750
2737 2751
2738 2752 if (instance->tbolt) {
2739 2753 con_log(CL_ANN1, (CE_CONT, "print_cmd_details: cmd %p "
2740 2754 "cmd->index 0x%x SMID 0x%x timer 0x%x sec\n",
2741 2755 (void *)cmd, cmd->index, cmd->SMID, cmd->drv_pkt_time));
2742 2756 } else {
2743 2757 con_log(CL_ANN1, (CE_CONT, "print_cmd_details: cmd %p "
2744 2758 "cmd->index 0x%x timer 0x%x sec\n",
2745 2759 (void *)cmd, cmd->index, cmd->drv_pkt_time));
2746 2760 }
2747 2761
2748 2762 if (pkt) {
2749 2763 con_log(CL_ANN1, (CE_CONT, "scsi_pkt CDB[0]=0x%x",
2750 2764 pkt->pkt_cdbp[0]));
2751 2765 } else {
2752 2766 con_log(CL_ANN1, (CE_CONT, "NO-PKT"));
2753 2767 }
2754 2768
2755 2769 if ((detail == 0xDD) && instance->tbolt) {
2756 2770 con_log(CL_ANN1, (CE_CONT, "RAID_SCSI_IO_REQUEST\n"));
2757 2771 con_log(CL_ANN1, (CE_CONT, "DevHandle=0x%X Function=0x%X "
2758 2772 "IoFlags=0x%X SGLFlags=0x%X DataLength=0x%X\n",
2759 2773 ddi_get16(acc_handle, &scsi_io->DevHandle),
2760 2774 ddi_get8(acc_handle, &scsi_io->Function),
2761 2775 ddi_get16(acc_handle, &scsi_io->IoFlags),
2762 2776 ddi_get16(acc_handle, &scsi_io->SGLFlags),
2763 2777 ddi_get32(acc_handle, &scsi_io->DataLength)));
2764 2778
2765 2779 for (i = 0; i < 32; i++) {
2766 2780 con_log(CL_ANN1, (CE_CONT, "CDB[%d]=0x%x ", i,
2767 2781 ddi_get8(acc_handle, &scsi_io->CDB.CDB32[i])));
2768 2782 }
2769 2783
2770 2784 con_log(CL_ANN1, (CE_CONT, "RAID-CONTEXT\n"));
2771 2785 con_log(CL_ANN1, (CE_CONT, "status=0x%X extStatus=0x%X "
2772 2786 "ldTargetId=0x%X timeoutValue=0x%X regLockFlags=0x%X "
2773 2787 "RAIDFlags=0x%X regLockRowLBA=0x%" PRIu64
2774 2788 " regLockLength=0x%X spanArm=0x%X\n",
2775 2789 ddi_get8(acc_handle, &scsi_io->RaidContext.status),
2776 2790 ddi_get8(acc_handle, &scsi_io->RaidContext.extStatus),
2777 2791 ddi_get16(acc_handle, &scsi_io->RaidContext.ldTargetId),
2778 2792 ddi_get16(acc_handle, &scsi_io->RaidContext.timeoutValue),
2779 2793 ddi_get8(acc_handle, &scsi_io->RaidContext.regLockFlags),
2780 2794 ddi_get8(acc_handle, &scsi_io->RaidContext.RAIDFlags),
2781 2795 ddi_get64(acc_handle, &scsi_io->RaidContext.regLockRowLBA),
2782 2796 ddi_get32(acc_handle, &scsi_io->RaidContext.regLockLength),
2783 2797 ddi_get8(acc_handle, &scsi_io->RaidContext.spanArm)));
2784 2798 }
2785 2799
2786 2800 if (detail == 0xDD) {
2787 2801 debug_level_g = saved_level;
2788 2802 }
2789 2803 }
2790 2804
2791 2805
2792 2806 int
2793 2807 mrsas_issue_pending_cmds(struct mrsas_instance *instance)
2794 2808 {
2795 2809 mlist_t *head = &instance->cmd_pend_list;
2796 2810 mlist_t *tmp = head->next;
2797 2811 struct mrsas_cmd *cmd = NULL;
2798 2812 struct scsi_pkt *pkt;
2799 2813
2800 2814 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_pending_cmds(): Called"));
2801 2815 while (tmp != head) {
2802 2816 mutex_enter(&instance->cmd_pend_mtx);
2803 2817 cmd = mlist_entry(tmp, struct mrsas_cmd, list);
2804 2818 tmp = tmp->next;
2805 2819 mutex_exit(&instance->cmd_pend_mtx);
2806 2820 if (cmd) {
2807 2821 con_log(CL_ANN1, (CE_CONT,
2808 2822 "mrsas_issue_pending_cmds(): "
2809 2823 "Got a cmd: cmd %p index 0x%x drv_pkt_time 0x%x ",
2810 2824 (void *)cmd, cmd->index, cmd->drv_pkt_time));
2811 2825
2812 2826 /* Reset command timeout value */
2813 2827 if (cmd->drv_pkt_time < debug_timeout_g)
2814 2828 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2815 2829
2816 2830 cmd->retry_count_for_ocr++;
2817 2831
2818 2832 cmn_err(CE_CONT, "cmd retry count = %d\n",
2819 2833 cmd->retry_count_for_ocr);
2820 2834
2821 2835 if (cmd->retry_count_for_ocr > IO_RETRY_COUNT) {
2822 2836 cmn_err(CE_WARN, "mrsas_issue_pending_cmds(): "
2823 2837 "cmd->retry_count exceeded limit >%d\n",
2824 2838 IO_RETRY_COUNT);
2825 2839 mrsas_print_cmd_details(instance, cmd, 0xDD);
2826 2840
2827 2841 cmn_err(CE_WARN,
2828 2842 "mrsas_issue_pending_cmds():"
2829 2843 "Calling KILL Adapter\n");
2830 2844 if (instance->tbolt)
2831 2845 mrsas_tbolt_kill_adapter(instance);
2832 2846 else
2833 2847 (void) mrsas_kill_adapter(instance);
2834 2848 return (DDI_FAILURE);
2835 2849 }
2836 2850
2837 2851 pkt = cmd->pkt;
2838 2852 if (pkt) {
2839 2853 con_log(CL_ANN1, (CE_CONT,
2840 2854 "PENDING PKT-CMD ISSUE: cmd %p index %x "
2841 2855 "pkt %p time %llx",
2842 2856 (void *)cmd, cmd->index,
2843 2857 (void *)pkt,
2844 2858 gethrtime()));
2845 2859
2846 2860 } else {
2847 2861 cmn_err(CE_CONT,
2848 2862 "mrsas_issue_pending_cmds(): NO-PKT, "
2849 2863 "cmd %p index 0x%x drv_pkt_time 0x%x ",
2850 2864 (void *)cmd, cmd->index, cmd->drv_pkt_time);
2851 2865 }
2852 2866
2853 2867
2854 2868 if (cmd->sync_cmd == MRSAS_TRUE) {
2855 2869 cmn_err(CE_CONT, "mrsas_issue_pending_cmds(): "
2856 2870 "SYNC_CMD == TRUE \n");
2857 2871 instance->func_ptr->issue_cmd_in_sync_mode(
2858 2872 instance, cmd);
2859 2873 } else {
2860 2874 instance->func_ptr->issue_cmd(cmd, instance);
2861 2875 }
2862 2876 } else {
2863 2877 con_log(CL_ANN1, (CE_CONT,
2864 2878 "mrsas_issue_pending_cmds: NULL command\n"));
2865 2879 }
2866 2880 con_log(CL_ANN1, (CE_CONT,
2867 2881 "mrsas_issue_pending_cmds:"
2868 2882 "looping for more commands"));
2869 2883 }
2870 2884 con_log(CL_ANN1, (CE_CONT, "mrsas_issue_pending_cmds(): DONE\n"));
2871 2885 return (DDI_SUCCESS);
2872 2886 }
2873 2887
2874 2888
2875 2889
2876 2890 /*
2877 2891 * destroy_mfi_frame_pool
2878 2892 */
2879 2893 void
2880 2894 destroy_mfi_frame_pool(struct mrsas_instance *instance)
2881 2895 {
2882 2896 int i;
2883 2897 uint32_t max_cmd = instance->max_fw_cmds;
2884 2898
2885 2899 struct mrsas_cmd *cmd;
2886 2900
2887 2901 /* return all frames to pool */
2888 2902
2889 2903 for (i = 0; i < max_cmd; i++) {
2890 2904
2891 2905 cmd = instance->cmd_list[i];
2892 2906
2893 2907 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED)
2894 2908 (void) mrsas_free_dma_obj(instance, cmd->frame_dma_obj);
2895 2909
2896 2910 cmd->frame_dma_obj_status = DMA_OBJ_FREED;
2897 2911 }
2898 2912
2899 2913 }
2900 2914
2901 2915 /*
2902 2916 * create_mfi_frame_pool
2903 2917 */
2904 2918 int
2905 2919 create_mfi_frame_pool(struct mrsas_instance *instance)
2906 2920 {
2907 2921 int i = 0;
2908 2922 int cookie_cnt;
2909 2923 uint16_t max_cmd;
2910 2924 uint16_t sge_sz;
2911 2925 uint32_t sgl_sz;
2912 2926 uint32_t tot_frame_size;
2913 2927 struct mrsas_cmd *cmd;
2914 2928 int retval = DDI_SUCCESS;
2915 2929
2916 2930 max_cmd = instance->max_fw_cmds;
2917 2931 sge_sz = sizeof (struct mrsas_sge_ieee);
2918 2932 /* calculated the number of 64byte frames required for SGL */
2919 2933 sgl_sz = sge_sz * instance->max_num_sge;
2920 2934 tot_frame_size = sgl_sz + MRMFI_FRAME_SIZE + SENSE_LENGTH;
2921 2935
2922 2936 con_log(CL_DLEVEL3, (CE_NOTE, "create_mfi_frame_pool: "
2923 2937 "sgl_sz %x tot_frame_size %x", sgl_sz, tot_frame_size));
2924 2938
2925 2939 while (i < max_cmd) {
2926 2940 cmd = instance->cmd_list[i];
2927 2941
2928 2942 cmd->frame_dma_obj.size = tot_frame_size;
2929 2943 cmd->frame_dma_obj.dma_attr = mrsas_generic_dma_attr;
2930 2944 cmd->frame_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
2931 2945 cmd->frame_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
2932 2946 cmd->frame_dma_obj.dma_attr.dma_attr_sgllen = 1;
2933 2947 cmd->frame_dma_obj.dma_attr.dma_attr_align = 64;
2934 2948
2935 2949 cookie_cnt = mrsas_alloc_dma_obj(instance, &cmd->frame_dma_obj,
2936 2950 (uchar_t)DDI_STRUCTURE_LE_ACC);
2937 2951
2938 2952 if (cookie_cnt == -1 || cookie_cnt > 1) {
2939 2953 cmn_err(CE_WARN,
2940 2954 "create_mfi_frame_pool: could not alloc.");
2941 2955 retval = DDI_FAILURE;
2942 2956 goto mrsas_undo_frame_pool;
2943 2957 }
2944 2958
2945 2959 bzero(cmd->frame_dma_obj.buffer, tot_frame_size);
2946 2960
2947 2961 cmd->frame_dma_obj_status = DMA_OBJ_ALLOCATED;
2948 2962 cmd->frame = (union mrsas_frame *)cmd->frame_dma_obj.buffer;
2949 2963 cmd->frame_phys_addr =
2950 2964 cmd->frame_dma_obj.dma_cookie[0].dmac_address;
2951 2965
2952 2966 cmd->sense = (uint8_t *)(((unsigned long)
2953 2967 cmd->frame_dma_obj.buffer) +
2954 2968 tot_frame_size - SENSE_LENGTH);
2955 2969 cmd->sense_phys_addr =
2956 2970 cmd->frame_dma_obj.dma_cookie[0].dmac_address +
2957 2971 tot_frame_size - SENSE_LENGTH;
2958 2972
2959 2973 if (!cmd->frame || !cmd->sense) {
2960 2974 cmn_err(CE_WARN,
2961 2975 "mr_sas: pci_pool_alloc failed");
2962 2976 retval = ENOMEM;
2963 2977 goto mrsas_undo_frame_pool;
2964 2978 }
2965 2979
2966 2980 ddi_put32(cmd->frame_dma_obj.acc_handle,
2967 2981 &cmd->frame->io.context, cmd->index);
2968 2982 i++;
2969 2983
2970 2984 con_log(CL_DLEVEL3, (CE_NOTE, "[%x]-%x",
2971 2985 cmd->index, cmd->frame_phys_addr));
2972 2986 }
2973 2987
2974 2988 return (DDI_SUCCESS);
2975 2989
2976 2990 mrsas_undo_frame_pool:
2977 2991 if (i > 0)
2978 2992 destroy_mfi_frame_pool(instance);
2979 2993
2980 2994 return (retval);
2981 2995 }
2982 2996
2983 2997 /*
2984 2998 * free_additional_dma_buffer
2985 2999 */
2986 3000 static void
2987 3001 free_additional_dma_buffer(struct mrsas_instance *instance)
2988 3002 {
2989 3003 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
2990 3004 (void) mrsas_free_dma_obj(instance,
2991 3005 instance->mfi_internal_dma_obj);
2992 3006 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
2993 3007 }
2994 3008
2995 3009 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
2996 3010 (void) mrsas_free_dma_obj(instance,
2997 3011 instance->mfi_evt_detail_obj);
2998 3012 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
2999 3013 }
3000 3014 }
3001 3015
3002 3016 /*
3003 3017 * alloc_additional_dma_buffer
3004 3018 */
3005 3019 static int
3006 3020 alloc_additional_dma_buffer(struct mrsas_instance *instance)
3007 3021 {
3008 3022 uint32_t reply_q_sz;
3009 3023 uint32_t internal_buf_size = PAGESIZE*2;
3010 3024
3011 3025 /* max cmds plus 1 + producer & consumer */
3012 3026 reply_q_sz = sizeof (uint32_t) * (instance->max_fw_cmds + 1 + 2);
3013 3027
3014 3028 instance->mfi_internal_dma_obj.size = internal_buf_size;
3015 3029 instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
3016 3030 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3017 3031 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
3018 3032 0xFFFFFFFFU;
3019 3033 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
3020 3034
3021 3035 if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
3022 3036 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3023 3037 cmn_err(CE_WARN,
3024 3038 "mr_sas: could not alloc reply queue");
3025 3039 return (DDI_FAILURE);
3026 3040 }
3027 3041
3028 3042 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
3029 3043
3030 3044 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
3031 3045
3032 3046 instance->producer = (uint32_t *)((unsigned long)
3033 3047 instance->mfi_internal_dma_obj.buffer);
3034 3048 instance->consumer = (uint32_t *)((unsigned long)
3035 3049 instance->mfi_internal_dma_obj.buffer + 4);
3036 3050 instance->reply_queue = (uint32_t *)((unsigned long)
3037 3051 instance->mfi_internal_dma_obj.buffer + 8);
3038 3052 instance->internal_buf = (caddr_t)(((unsigned long)
3039 3053 instance->mfi_internal_dma_obj.buffer) + reply_q_sz + 8);
3040 3054 instance->internal_buf_dmac_add =
3041 3055 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address +
3042 3056 (reply_q_sz + 8);
3043 3057 instance->internal_buf_size = internal_buf_size -
3044 3058 (reply_q_sz + 8);
3045 3059
3046 3060 /* allocate evt_detail */
3047 3061 instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
3048 3062 instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
3049 3063 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3050 3064 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3051 3065 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
3052 3066 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 1;
3053 3067
3054 3068 if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
3055 3069 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3056 3070 cmn_err(CE_WARN, "alloc_additional_dma_buffer: "
3057 3071 "could not allocate data transfer buffer.");
3058 3072 goto mrsas_undo_internal_buff;
3059 3073 }
3060 3074
3061 3075 bzero(instance->mfi_evt_detail_obj.buffer,
3062 3076 sizeof (struct mrsas_evt_detail));
3063 3077
3064 3078 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
3065 3079
3066 3080 return (DDI_SUCCESS);
3067 3081
3068 3082 mrsas_undo_internal_buff:
3069 3083 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
3070 3084 (void) mrsas_free_dma_obj(instance,
3071 3085 instance->mfi_internal_dma_obj);
3072 3086 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
3073 3087 }
3074 3088
3075 3089 return (DDI_FAILURE);
3076 3090 }
3077 3091
3078 3092
3079 3093 void
3080 3094 mrsas_free_cmd_pool(struct mrsas_instance *instance)
3081 3095 {
3082 3096 int i;
3083 3097 uint32_t max_cmd;
3084 3098 size_t sz;
3085 3099
3086 3100 /* already freed */
3087 3101 if (instance->cmd_list == NULL) {
3088 3102 return;
3089 3103 }
3090 3104
3091 3105 max_cmd = instance->max_fw_cmds;
3092 3106
3093 3107 /* size of cmd_list array */
3094 3108 sz = sizeof (struct mrsas_cmd *) * max_cmd;
3095 3109
3096 3110 /* First free each cmd */
3097 3111 for (i = 0; i < max_cmd; i++) {
3098 3112 if (instance->cmd_list[i] != NULL) {
3099 3113 kmem_free(instance->cmd_list[i],
3100 3114 sizeof (struct mrsas_cmd));
3101 3115 }
3102 3116
3103 3117 instance->cmd_list[i] = NULL;
3104 3118 }
3105 3119
3106 3120 /* Now, free cmd_list array */
3107 3121 if (instance->cmd_list != NULL)
3108 3122 kmem_free(instance->cmd_list, sz);
3109 3123
3110 3124 instance->cmd_list = NULL;
3111 3125
3112 3126 INIT_LIST_HEAD(&instance->cmd_pool_list);
3113 3127 INIT_LIST_HEAD(&instance->cmd_pend_list);
3114 3128 if (instance->tbolt) {
3115 3129 INIT_LIST_HEAD(&instance->cmd_app_pool_list);
3116 3130 } else {
3117 3131 INIT_LIST_HEAD(&instance->app_cmd_pool_list);
3118 3132 }
3119 3133
3120 3134 }
3121 3135
3122 3136
3123 3137 /*
3124 3138 * mrsas_alloc_cmd_pool
3125 3139 */
3126 3140 int
3127 3141 mrsas_alloc_cmd_pool(struct mrsas_instance *instance)
3128 3142 {
3129 3143 int i;
3130 3144 int count;
3131 3145 uint32_t max_cmd;
3132 3146 uint32_t reserve_cmd;
3133 3147 size_t sz;
3134 3148
3135 3149 struct mrsas_cmd *cmd;
3136 3150
3137 3151 max_cmd = instance->max_fw_cmds;
3138 3152 con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
3139 3153 "max_cmd %x", max_cmd));
3140 3154
3141 3155
3142 3156 sz = sizeof (struct mrsas_cmd *) * max_cmd;
3143 3157
3144 3158 /*
3145 3159 * instance->cmd_list is an array of struct mrsas_cmd pointers.
3146 3160 * Allocate the dynamic array first and then allocate individual
3147 3161 * commands.
3148 3162 */
3149 3163 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
3150 3164 ASSERT(instance->cmd_list);
3151 3165
3152 3166 /* create a frame pool and assign one frame to each cmd */
3153 3167 for (count = 0; count < max_cmd; count++) {
3154 3168 instance->cmd_list[count] =
|
↓ open down ↓ |
631 lines elided |
↑ open up ↑ |
3155 3169 kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP);
3156 3170 ASSERT(instance->cmd_list[count]);
3157 3171 }
3158 3172
3159 3173 /* add all the commands to command pool */
3160 3174
3161 3175 INIT_LIST_HEAD(&instance->cmd_pool_list);
3162 3176 INIT_LIST_HEAD(&instance->cmd_pend_list);
3163 3177 INIT_LIST_HEAD(&instance->app_cmd_pool_list);
3164 3178
3165 - reserve_cmd = MRSAS_APP_RESERVED_CMDS;
3179 + /*
3180 + * When max_cmd is lower than MRSAS_APP_RESERVED_CMDS, how do I split
3181 + * into app_cmd and regular cmd? For now, just take
3182 + * max(1/8th of max, 4);
3183 + */
3184 + reserve_cmd = min(MRSAS_APP_RESERVED_CMDS,
3185 + max(max_cmd >> 3, MRSAS_APP_MIN_RESERVED_CMDS));
3166 3186
3167 3187 for (i = 0; i < reserve_cmd; i++) {
3168 3188 cmd = instance->cmd_list[i];
3169 3189 cmd->index = i;
3170 3190 mlist_add_tail(&cmd->list, &instance->app_cmd_pool_list);
3171 3191 }
3172 3192
3173 3193
3174 3194 for (i = reserve_cmd; i < max_cmd; i++) {
3175 3195 cmd = instance->cmd_list[i];
3176 3196 cmd->index = i;
3177 3197 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
3178 3198 }
3179 3199
3180 3200 return (DDI_SUCCESS);
3181 3201
3182 3202 mrsas_undo_cmds:
3183 3203 if (count > 0) {
3184 3204 /* free each cmd */
3185 3205 for (i = 0; i < count; i++) {
3186 3206 if (instance->cmd_list[i] != NULL) {
3187 3207 kmem_free(instance->cmd_list[i],
3188 3208 sizeof (struct mrsas_cmd));
3189 3209 }
3190 3210 instance->cmd_list[i] = NULL;
3191 3211 }
3192 3212 }
3193 3213
3194 3214 mrsas_undo_cmd_list:
3195 3215 if (instance->cmd_list != NULL)
3196 3216 kmem_free(instance->cmd_list, sz);
3197 3217 instance->cmd_list = NULL;
3198 3218
3199 3219 return (DDI_FAILURE);
3200 3220 }
3201 3221
3202 3222
3203 3223 /*
3204 3224 * free_space_for_mfi
3205 3225 */
3206 3226 static void
3207 3227 free_space_for_mfi(struct mrsas_instance *instance)
3208 3228 {
3209 3229
3210 3230 /* already freed */
3211 3231 if (instance->cmd_list == NULL) {
3212 3232 return;
3213 3233 }
3214 3234
3215 3235 /* Free additional dma buffer */
3216 3236 free_additional_dma_buffer(instance);
3217 3237
3218 3238 /* Free the MFI frame pool */
3219 3239 destroy_mfi_frame_pool(instance);
3220 3240
3221 3241 /* Free all the commands in the cmd_list */
3222 3242 /* Free the cmd_list buffer itself */
3223 3243 mrsas_free_cmd_pool(instance);
3224 3244 }
3225 3245
3226 3246 /*
3227 3247 * alloc_space_for_mfi
3228 3248 */
3229 3249 static int
3230 3250 alloc_space_for_mfi(struct mrsas_instance *instance)
3231 3251 {
3232 3252 /* Allocate command pool (memory for cmd_list & individual commands) */
3233 3253 if (mrsas_alloc_cmd_pool(instance)) {
3234 3254 cmn_err(CE_WARN, "error creating cmd pool");
3235 3255 return (DDI_FAILURE);
3236 3256 }
3237 3257
3238 3258 /* Allocate MFI Frame pool */
3239 3259 if (create_mfi_frame_pool(instance)) {
3240 3260 cmn_err(CE_WARN, "error creating frame DMA pool");
3241 3261 goto mfi_undo_cmd_pool;
3242 3262 }
3243 3263
3244 3264 /* Allocate additional DMA buffer */
3245 3265 if (alloc_additional_dma_buffer(instance)) {
3246 3266 cmn_err(CE_WARN, "error creating frame DMA pool");
3247 3267 goto mfi_undo_frame_pool;
3248 3268 }
3249 3269
3250 3270 return (DDI_SUCCESS);
3251 3271
3252 3272 mfi_undo_frame_pool:
3253 3273 destroy_mfi_frame_pool(instance);
3254 3274
3255 3275 mfi_undo_cmd_pool:
3256 3276 mrsas_free_cmd_pool(instance);
3257 3277
3258 3278 return (DDI_FAILURE);
3259 3279 }
3260 3280
3261 3281
3262 3282
3263 3283 /*
3264 3284 * get_ctrl_info
3265 3285 */
3266 3286 static int
3267 3287 get_ctrl_info(struct mrsas_instance *instance,
3268 3288 struct mrsas_ctrl_info *ctrl_info)
|
↓ open down ↓ |
93 lines elided |
↑ open up ↑ |
3269 3289 {
3270 3290 int ret = 0;
3271 3291
3272 3292 struct mrsas_cmd *cmd;
3273 3293 struct mrsas_dcmd_frame *dcmd;
3274 3294 struct mrsas_ctrl_info *ci;
3275 3295
3276 3296 if (instance->tbolt) {
3277 3297 cmd = get_raid_msg_mfi_pkt(instance);
3278 3298 } else {
3279 - cmd = get_mfi_pkt(instance);
3299 + cmd = mrsas_get_mfi_pkt(instance);
3280 3300 }
3281 3301
3282 3302 if (!cmd) {
3283 3303 con_log(CL_ANN, (CE_WARN,
3284 3304 "Failed to get a cmd for ctrl info"));
3285 3305 DTRACE_PROBE2(info_mfi_err, uint16_t, instance->fw_outstanding,
3286 3306 uint16_t, instance->max_fw_cmds);
3287 3307 return (DDI_FAILURE);
3288 3308 }
3289 3309
3290 3310 /* Clear the frame buffer and assign back the context id */
3291 3311 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
|
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
3292 3312 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3293 3313 cmd->index);
3294 3314
3295 3315 dcmd = &cmd->frame->dcmd;
3296 3316
3297 3317 ci = (struct mrsas_ctrl_info *)instance->internal_buf;
3298 3318
3299 3319 if (!ci) {
3300 3320 cmn_err(CE_WARN,
3301 3321 "Failed to alloc mem for ctrl info");
3302 - return_mfi_pkt(instance, cmd);
3322 + mrsas_return_mfi_pkt(instance, cmd);
3303 3323 return (DDI_FAILURE);
3304 3324 }
3305 3325
3306 3326 (void) memset(ci, 0, sizeof (struct mrsas_ctrl_info));
3307 3327
3308 3328 /* for( i = 0; i < DCMD_MBOX_SZ; i++ ) dcmd->mbox.b[i] = 0; */
3309 3329 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
3310 3330
3311 3331 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3312 3332 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status,
3313 3333 MFI_CMD_STATUS_POLL_MODE);
3314 3334 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3315 3335 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3316 3336 MFI_FRAME_DIR_READ);
3317 3337 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3318 3338 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3319 3339 sizeof (struct mrsas_ctrl_info));
3320 3340 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3321 3341 MR_DCMD_CTRL_GET_INFO);
3322 3342 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3323 3343 instance->internal_buf_dmac_add);
3324 3344 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3325 3345 sizeof (struct mrsas_ctrl_info));
3326 3346
3327 3347 cmd->frame_count = 1;
3328 3348
3329 3349 if (instance->tbolt) {
3330 3350 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3331 3351 }
3332 3352
3333 3353 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3334 3354 ret = 0;
3335 3355
3336 3356 ctrl_info->max_request_size = ddi_get32(
3337 3357 cmd->frame_dma_obj.acc_handle, &ci->max_request_size);
3338 3358
3339 3359 ctrl_info->ld_present_count = ddi_get16(
3340 3360 cmd->frame_dma_obj.acc_handle, &ci->ld_present_count);
3341 3361
3342 3362 ctrl_info->properties.on_off_properties = ddi_get32(
3343 3363 cmd->frame_dma_obj.acc_handle,
3344 3364 &ci->properties.on_off_properties);
3345 3365 ddi_rep_get8(cmd->frame_dma_obj.acc_handle,
3346 3366 (uint8_t *)(ctrl_info->product_name),
3347 3367 (uint8_t *)(ci->product_name), 80 * sizeof (char),
3348 3368 DDI_DEV_AUTOINCR);
3349 3369 /* should get more members of ci with ddi_get when needed */
3350 3370 } else {
|
↓ open down ↓ |
38 lines elided |
↑ open up ↑ |
3351 3371 cmn_err(CE_WARN, "get_ctrl_info: Ctrl info failed");
3352 3372 ret = -1;
3353 3373 }
3354 3374
3355 3375 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) {
3356 3376 ret = -1;
3357 3377 }
3358 3378 if (instance->tbolt) {
3359 3379 return_raid_msg_mfi_pkt(instance, cmd);
3360 3380 } else {
3361 - return_mfi_pkt(instance, cmd);
3381 + mrsas_return_mfi_pkt(instance, cmd);
3362 3382 }
3363 3383
3364 3384 return (ret);
3365 3385 }
3366 3386
3367 3387 /*
3368 3388 * abort_aen_cmd
3369 3389 */
3370 3390 static int
3371 3391 abort_aen_cmd(struct mrsas_instance *instance,
3372 3392 struct mrsas_cmd *cmd_to_abort)
3373 3393 {
|
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
3374 3394 int ret = 0;
3375 3395
3376 3396 struct mrsas_cmd *cmd;
3377 3397 struct mrsas_abort_frame *abort_fr;
3378 3398
3379 3399 con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_aen:%d", __LINE__));
3380 3400
3381 3401 if (instance->tbolt) {
3382 3402 cmd = get_raid_msg_mfi_pkt(instance);
3383 3403 } else {
3384 - cmd = get_mfi_pkt(instance);
3404 + cmd = mrsas_get_mfi_pkt(instance);
3385 3405 }
3386 3406
3387 3407 if (!cmd) {
3388 3408 con_log(CL_ANN1, (CE_WARN,
3389 3409 "abort_aen_cmd():Failed to get a cmd for abort_aen_cmd"));
3390 3410 DTRACE_PROBE2(abort_mfi_err, uint16_t, instance->fw_outstanding,
3391 3411 uint16_t, instance->max_fw_cmds);
3392 3412 return (DDI_FAILURE);
3393 3413 }
3394 3414
3395 3415 /* Clear the frame buffer and assign back the context id */
3396 3416 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3397 3417 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3398 3418 cmd->index);
3399 3419
3400 3420 abort_fr = &cmd->frame->abort;
3401 3421
3402 3422 /* prepare and issue the abort frame */
3403 3423 ddi_put8(cmd->frame_dma_obj.acc_handle,
3404 3424 &abort_fr->cmd, MFI_CMD_OP_ABORT);
3405 3425 ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3406 3426 MFI_CMD_STATUS_SYNC_MODE);
3407 3427 ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3408 3428 ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3409 3429 cmd_to_abort->index);
3410 3430 ddi_put32(cmd->frame_dma_obj.acc_handle,
3411 3431 &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3412 3432 ddi_put32(cmd->frame_dma_obj.acc_handle,
3413 3433 &abort_fr->abort_mfi_phys_addr_hi, 0);
3414 3434
3415 3435 instance->aen_cmd->abort_aen = 1;
3416 3436
3417 3437 cmd->frame_count = 1;
3418 3438
3419 3439 if (instance->tbolt) {
3420 3440 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3421 3441 }
3422 3442
3423 3443 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3424 3444 con_log(CL_ANN1, (CE_WARN,
3425 3445 "abort_aen_cmd: issue_cmd_in_poll_mode failed"));
3426 3446 ret = -1;
|
↓ open down ↓ |
32 lines elided |
↑ open up ↑ |
3427 3447 } else {
3428 3448 ret = 0;
3429 3449 }
3430 3450
3431 3451 instance->aen_cmd->abort_aen = 1;
3432 3452 instance->aen_cmd = 0;
3433 3453
3434 3454 if (instance->tbolt) {
3435 3455 return_raid_msg_mfi_pkt(instance, cmd);
3436 3456 } else {
3437 - return_mfi_pkt(instance, cmd);
3457 + mrsas_return_mfi_pkt(instance, cmd);
3438 3458 }
3439 3459
3440 3460 atomic_add_16(&instance->fw_outstanding, (-1));
3441 3461
3442 3462 return (ret);
3443 3463 }
3444 3464
3445 3465
3446 3466 static int
3447 3467 mrsas_build_init_cmd(struct mrsas_instance *instance,
3448 3468 struct mrsas_cmd **cmd_ptr)
3449 3469 {
3450 3470 struct mrsas_cmd *cmd;
3451 3471 struct mrsas_init_frame *init_frame;
3452 3472 struct mrsas_init_queue_info *initq_info;
3453 3473 struct mrsas_drv_ver drv_ver_info;
3454 3474
3455 3475
3456 3476 /*
3457 3477 * Prepare a init frame. Note the init frame points to queue info
3458 3478 * structure. Each frame has SGL allocated after first 64 bytes. For
3459 3479 * this frame - since we don't need any SGL - we use SGL's space as
3460 3480 * queue info structure
3461 3481 */
3462 3482 cmd = *cmd_ptr;
3463 3483
3464 3484
3465 3485 /* Clear the frame buffer and assign back the context id */
3466 3486 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3467 3487 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3468 3488 cmd->index);
3469 3489
3470 3490 init_frame = (struct mrsas_init_frame *)cmd->frame;
3471 3491 initq_info = (struct mrsas_init_queue_info *)
3472 3492 ((unsigned long)init_frame + 64);
3473 3493
3474 3494 (void) memset(init_frame, 0, MRMFI_FRAME_SIZE);
3475 3495 (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info));
3476 3496
3477 3497 ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0);
3478 3498
3479 3499 ddi_put32(cmd->frame_dma_obj.acc_handle,
3480 3500 &initq_info->reply_queue_entries, instance->max_fw_cmds + 1);
3481 3501
3482 3502 ddi_put32(cmd->frame_dma_obj.acc_handle,
3483 3503 &initq_info->producer_index_phys_addr_hi, 0);
3484 3504 ddi_put32(cmd->frame_dma_obj.acc_handle,
3485 3505 &initq_info->producer_index_phys_addr_lo,
3486 3506 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address);
3487 3507
3488 3508 ddi_put32(cmd->frame_dma_obj.acc_handle,
3489 3509 &initq_info->consumer_index_phys_addr_hi, 0);
3490 3510 ddi_put32(cmd->frame_dma_obj.acc_handle,
3491 3511 &initq_info->consumer_index_phys_addr_lo,
3492 3512 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4);
3493 3513
3494 3514 ddi_put32(cmd->frame_dma_obj.acc_handle,
3495 3515 &initq_info->reply_queue_start_phys_addr_hi, 0);
3496 3516 ddi_put32(cmd->frame_dma_obj.acc_handle,
3497 3517 &initq_info->reply_queue_start_phys_addr_lo,
3498 3518 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8);
3499 3519
3500 3520 ddi_put8(cmd->frame_dma_obj.acc_handle,
3501 3521 &init_frame->cmd, MFI_CMD_OP_INIT);
3502 3522 ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status,
3503 3523 MFI_CMD_STATUS_POLL_MODE);
3504 3524 ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0);
3505 3525 ddi_put32(cmd->frame_dma_obj.acc_handle,
3506 3526 &init_frame->queue_info_new_phys_addr_lo,
3507 3527 cmd->frame_phys_addr + 64);
3508 3528 ddi_put32(cmd->frame_dma_obj.acc_handle,
3509 3529 &init_frame->queue_info_new_phys_addr_hi, 0);
3510 3530
3511 3531
3512 3532 /* fill driver version information */
3513 3533 fill_up_drv_ver(&drv_ver_info);
3514 3534
3515 3535 /* allocate the driver version data transfer buffer */
3516 3536 instance->drv_ver_dma_obj.size = sizeof (drv_ver_info.drv_ver);
3517 3537 instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
3518 3538 instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
3519 3539 instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
3520 3540 instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
3521 3541 instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
3522 3542
3523 3543 if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
3524 3544 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
3525 3545 con_log(CL_ANN, (CE_WARN,
3526 3546 "init_mfi : Could not allocate driver version buffer."));
3527 3547 return (DDI_FAILURE);
3528 3548 }
3529 3549 /* copy driver version to dma buffer */
3530 3550 (void) memset(instance->drv_ver_dma_obj.buffer, 0,
3531 3551 sizeof (drv_ver_info.drv_ver));
3532 3552 ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
3533 3553 (uint8_t *)drv_ver_info.drv_ver,
3534 3554 (uint8_t *)instance->drv_ver_dma_obj.buffer,
3535 3555 sizeof (drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
3536 3556
3537 3557
3538 3558 /* copy driver version physical address to init frame */
3539 3559 ddi_put64(cmd->frame_dma_obj.acc_handle, &init_frame->driverversion,
3540 3560 instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
3541 3561
3542 3562 ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len,
3543 3563 sizeof (struct mrsas_init_queue_info));
3544 3564
3545 3565 cmd->frame_count = 1;
3546 3566
3547 3567 *cmd_ptr = cmd;
3548 3568
3549 3569 return (DDI_SUCCESS);
3550 3570 }
3551 3571
3552 3572
3553 3573 /*
3554 3574 * mrsas_init_adapter_ppc - Initialize MFI interface adapter.
3555 3575 */
3556 3576 int
3557 3577 mrsas_init_adapter_ppc(struct mrsas_instance *instance)
3558 3578 {
3559 3579 struct mrsas_cmd *cmd;
3560 3580
3561 3581 /*
|
↓ open down ↓ |
114 lines elided |
↑ open up ↑ |
3562 3582 * allocate memory for mfi adapter(cmd pool, individual commands, mfi
3563 3583 * frames etc
3564 3584 */
3565 3585 if (alloc_space_for_mfi(instance) != DDI_SUCCESS) {
3566 3586 con_log(CL_ANN, (CE_NOTE,
3567 3587 "Error, failed to allocate memory for MFI adapter"));
3568 3588 return (DDI_FAILURE);
3569 3589 }
3570 3590
3571 3591 /* Build INIT command */
3572 - cmd = get_mfi_pkt(instance);
3592 + cmd = mrsas_get_mfi_pkt(instance);
3593 + if (cmd == NULL) {
3594 + DTRACE_PROBE2(init_adapter_mfi_err, uint16_t,
3595 + instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
3596 + return (DDI_FAILURE);
3597 + }
3573 3598
3574 3599 if (mrsas_build_init_cmd(instance, &cmd) != DDI_SUCCESS) {
3575 3600 con_log(CL_ANN,
3576 3601 (CE_NOTE, "Error, failed to build INIT command"));
3577 3602
3578 3603 goto fail_undo_alloc_mfi_space;
3579 3604 }
3580 3605
3581 3606 /*
3582 3607 * Disable interrupt before sending init frame ( see linux driver code)
3583 3608 * send INIT MFI frame in polled mode
3584 3609 */
3585 3610 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3586 3611 con_log(CL_ANN, (CE_WARN, "failed to init firmware"));
3587 3612 goto fail_fw_init;
3588 3613 }
3589 3614
3590 3615 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS)
3591 3616 goto fail_fw_init;
3592 - return_mfi_pkt(instance, cmd);
3617 + mrsas_return_mfi_pkt(instance, cmd);
3593 3618
3594 3619 if (ctio_enable &&
3595 3620 (instance->func_ptr->read_fw_status_reg(instance) & 0x04000000)) {
3596 3621 con_log(CL_ANN, (CE_NOTE, "mr_sas: IEEE SGL's supported"));
3597 3622 instance->flag_ieee = 1;
3598 3623 } else {
3599 3624 instance->flag_ieee = 0;
3600 3625 }
3601 3626
3627 + ASSERT(!instance->skinny || instance->flag_ieee);
3628 +
3602 3629 instance->unroll.alloc_space_mfi = 1;
3603 3630 instance->unroll.verBuff = 1;
3604 3631
3605 3632 return (DDI_SUCCESS);
3606 3633
3607 3634
3608 3635 fail_fw_init:
3609 3636 (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
3610 3637
3611 3638 fail_undo_alloc_mfi_space:
3612 - return_mfi_pkt(instance, cmd);
3639 + mrsas_return_mfi_pkt(instance, cmd);
3613 3640 free_space_for_mfi(instance);
3614 3641
3615 3642 return (DDI_FAILURE);
3616 3643
3617 3644 }
3618 3645
3619 3646 /*
3620 3647 * mrsas_init_adapter - Initialize adapter.
3621 3648 */
3622 3649 int
3623 3650 mrsas_init_adapter(struct mrsas_instance *instance)
3624 3651 {
3625 3652 struct mrsas_ctrl_info ctrl_info;
3626 3653
3627 3654
3628 3655 /* we expect the FW state to be READY */
3629 3656 if (mfi_state_transition_to_ready(instance)) {
3630 3657 con_log(CL_ANN, (CE_WARN, "mr_sas: F/W is not ready"));
3631 3658 return (DDI_FAILURE);
3632 3659 }
3633 3660
3634 3661 /* get various operational parameters from status register */
3635 3662 instance->max_num_sge =
3636 3663 (instance->func_ptr->read_fw_status_reg(instance) &
3637 3664 0xFF0000) >> 0x10;
3638 3665 instance->max_num_sge =
3639 3666 (instance->max_num_sge > MRSAS_MAX_SGE_CNT) ?
3640 3667 MRSAS_MAX_SGE_CNT : instance->max_num_sge;
3641 3668
3642 3669 /*
3643 3670 * Reduce the max supported cmds by 1. This is to ensure that the
3644 3671 * reply_q_sz (1 more than the max cmd that driver may send)
3645 3672 * does not exceed max cmds that the FW can support
3646 3673 */
3647 3674 instance->max_fw_cmds =
3648 3675 instance->func_ptr->read_fw_status_reg(instance) & 0xFFFF;
3649 3676 instance->max_fw_cmds = instance->max_fw_cmds - 1;
3650 3677
3651 3678
3652 3679
3653 3680 /* Initialize adapter */
3654 3681 if (instance->func_ptr->init_adapter(instance) != DDI_SUCCESS) {
3655 3682 con_log(CL_ANN,
3656 3683 (CE_WARN, "mr_sas: could not initialize adapter"));
3657 3684 return (DDI_FAILURE);
3658 3685 }
3659 3686
3660 3687 /* gather misc FW related information */
3661 3688 instance->disable_online_ctrl_reset = 0;
3662 3689
3663 3690 if (!get_ctrl_info(instance, &ctrl_info)) {
3664 3691 instance->max_sectors_per_req = ctrl_info.max_request_size;
3665 3692 con_log(CL_ANN1, (CE_NOTE,
3666 3693 "product name %s ld present %d",
3667 3694 ctrl_info.product_name, ctrl_info.ld_present_count));
3668 3695 } else {
3669 3696 instance->max_sectors_per_req = instance->max_num_sge *
3670 3697 PAGESIZE / 512;
3671 3698 }
3672 3699
3673 3700 if (ctrl_info.properties.on_off_properties & DISABLE_OCR_PROP_FLAG)
3674 3701 instance->disable_online_ctrl_reset = 1;
3675 3702
3676 3703 return (DDI_SUCCESS);
3677 3704
3678 3705 }
3679 3706
3680 3707
3681 3708
3682 3709 static int
3683 3710 mrsas_issue_init_mfi(struct mrsas_instance *instance)
3684 3711 {
3685 3712 struct mrsas_cmd *cmd;
3686 3713 struct mrsas_init_frame *init_frame;
3687 3714 struct mrsas_init_queue_info *initq_info;
3688 3715
3689 3716 /*
3690 3717 * Prepare a init frame. Note the init frame points to queue info
3691 3718 * structure. Each frame has SGL allocated after first 64 bytes. For
3692 3719 * this frame - since we don't need any SGL - we use SGL's space as
3693 3720 * queue info structure
3694 3721 */
3695 3722 con_log(CL_ANN1, (CE_NOTE,
3696 3723 "mrsas_issue_init_mfi: entry\n"));
3697 3724 cmd = get_mfi_app_pkt(instance);
3698 3725
3699 3726 if (!cmd) {
3700 3727 con_log(CL_ANN1, (CE_WARN,
3701 3728 "mrsas_issue_init_mfi: get_pkt failed\n"));
3702 3729 return (DDI_FAILURE);
3703 3730 }
3704 3731
3705 3732 /* Clear the frame buffer and assign back the context id */
3706 3733 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3707 3734 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3708 3735 cmd->index);
3709 3736
3710 3737 init_frame = (struct mrsas_init_frame *)cmd->frame;
3711 3738 initq_info = (struct mrsas_init_queue_info *)
3712 3739 ((unsigned long)init_frame + 64);
3713 3740
3714 3741 (void) memset(init_frame, 0, MRMFI_FRAME_SIZE);
3715 3742 (void) memset(initq_info, 0, sizeof (struct mrsas_init_queue_info));
3716 3743
3717 3744 ddi_put32(cmd->frame_dma_obj.acc_handle, &initq_info->init_flags, 0);
3718 3745
3719 3746 ddi_put32(cmd->frame_dma_obj.acc_handle,
3720 3747 &initq_info->reply_queue_entries, instance->max_fw_cmds + 1);
3721 3748 ddi_put32(cmd->frame_dma_obj.acc_handle,
3722 3749 &initq_info->producer_index_phys_addr_hi, 0);
3723 3750 ddi_put32(cmd->frame_dma_obj.acc_handle,
3724 3751 &initq_info->producer_index_phys_addr_lo,
3725 3752 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address);
3726 3753 ddi_put32(cmd->frame_dma_obj.acc_handle,
3727 3754 &initq_info->consumer_index_phys_addr_hi, 0);
3728 3755 ddi_put32(cmd->frame_dma_obj.acc_handle,
3729 3756 &initq_info->consumer_index_phys_addr_lo,
3730 3757 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 4);
3731 3758
3732 3759 ddi_put32(cmd->frame_dma_obj.acc_handle,
3733 3760 &initq_info->reply_queue_start_phys_addr_hi, 0);
3734 3761 ddi_put32(cmd->frame_dma_obj.acc_handle,
3735 3762 &initq_info->reply_queue_start_phys_addr_lo,
3736 3763 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address + 8);
3737 3764
3738 3765 ddi_put8(cmd->frame_dma_obj.acc_handle,
3739 3766 &init_frame->cmd, MFI_CMD_OP_INIT);
3740 3767 ddi_put8(cmd->frame_dma_obj.acc_handle, &init_frame->cmd_status,
3741 3768 MFI_CMD_STATUS_POLL_MODE);
3742 3769 ddi_put16(cmd->frame_dma_obj.acc_handle, &init_frame->flags, 0);
3743 3770 ddi_put32(cmd->frame_dma_obj.acc_handle,
3744 3771 &init_frame->queue_info_new_phys_addr_lo,
3745 3772 cmd->frame_phys_addr + 64);
3746 3773 ddi_put32(cmd->frame_dma_obj.acc_handle,
3747 3774 &init_frame->queue_info_new_phys_addr_hi, 0);
3748 3775
3749 3776 ddi_put32(cmd->frame_dma_obj.acc_handle, &init_frame->data_xfer_len,
3750 3777 sizeof (struct mrsas_init_queue_info));
3751 3778
3752 3779 cmd->frame_count = 1;
3753 3780
|
↓ open down ↓ |
131 lines elided |
↑ open up ↑ |
3754 3781 /* issue the init frame in polled mode */
3755 3782 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3756 3783 con_log(CL_ANN1, (CE_WARN,
3757 3784 "mrsas_issue_init_mfi():failed to "
3758 3785 "init firmware"));
3759 3786 return_mfi_app_pkt(instance, cmd);
3760 3787 return (DDI_FAILURE);
3761 3788 }
3762 3789
3763 3790 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS) {
3764 - return_mfi_pkt(instance, cmd);
3791 + return_mfi_app_pkt(instance, cmd);
3765 3792 return (DDI_FAILURE);
3766 3793 }
3767 3794
3768 3795 return_mfi_app_pkt(instance, cmd);
3769 3796 con_log(CL_ANN1, (CE_CONT, "mrsas_issue_init_mfi: Done"));
3770 3797
3771 3798 return (DDI_SUCCESS);
3772 3799 }
3773 3800 /*
3774 3801 * mfi_state_transition_to_ready : Move the FW to READY state
3775 3802 *
3776 3803 * @reg_set : MFI register set
3777 3804 */
3778 3805 int
3779 3806 mfi_state_transition_to_ready(struct mrsas_instance *instance)
3780 3807 {
3781 3808 int i;
3782 3809 uint8_t max_wait;
3783 3810 uint32_t fw_ctrl = 0;
3784 3811 uint32_t fw_state;
3785 3812 uint32_t cur_state;
3786 3813 uint32_t cur_abs_reg_val;
3787 3814 uint32_t prev_abs_reg_val;
3788 3815 uint32_t status;
3789 3816
3790 3817 cur_abs_reg_val =
3791 3818 instance->func_ptr->read_fw_status_reg(instance);
3792 3819 fw_state =
3793 3820 cur_abs_reg_val & MFI_STATE_MASK;
3794 3821 con_log(CL_ANN1, (CE_CONT,
3795 3822 "mfi_state_transition_to_ready:FW state = 0x%x", fw_state));
3796 3823
3797 3824 while (fw_state != MFI_STATE_READY) {
3798 3825 con_log(CL_ANN, (CE_CONT,
3799 3826 "mfi_state_transition_to_ready:FW state%x", fw_state));
3800 3827
3801 3828 switch (fw_state) {
3802 3829 case MFI_STATE_FAULT:
3803 3830 con_log(CL_ANN, (CE_NOTE,
3804 3831 "mr_sas: FW in FAULT state!!"));
3805 3832
3806 3833 return (ENODEV);
|
↓ open down ↓ |
32 lines elided |
↑ open up ↑ |
3807 3834 case MFI_STATE_WAIT_HANDSHAKE:
3808 3835 /* set the CLR bit in IMR0 */
3809 3836 con_log(CL_ANN1, (CE_NOTE,
3810 3837 "mr_sas: FW waiting for HANDSHAKE"));
3811 3838 /*
3812 3839 * PCI_Hot Plug: MFI F/W requires
3813 3840 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
3814 3841 * to be set
3815 3842 */
3816 3843 /* WR_IB_MSG_0(MFI_INIT_CLEAR_HANDSHAKE, instance); */
3817 - if (!instance->tbolt) {
3844 + if (!instance->tbolt && !instance->skinny) {
3818 3845 WR_IB_DOORBELL(MFI_INIT_CLEAR_HANDSHAKE |
3819 3846 MFI_INIT_HOTPLUG, instance);
3820 3847 } else {
3821 3848 WR_RESERVED0_REGISTER(MFI_INIT_CLEAR_HANDSHAKE |
3822 3849 MFI_INIT_HOTPLUG, instance);
3823 3850 }
3824 3851 max_wait = (instance->tbolt == 1) ? 180 : 2;
3825 3852 cur_state = MFI_STATE_WAIT_HANDSHAKE;
3826 3853 break;
3827 3854 case MFI_STATE_BOOT_MESSAGE_PENDING:
3828 3855 /* set the CLR bit in IMR0 */
3829 3856 con_log(CL_ANN1, (CE_NOTE,
3830 3857 "mr_sas: FW state boot message pending"));
3831 3858 /*
3832 3859 * PCI_Hot Plug: MFI F/W requires
3833 3860 * (MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG)
3834 3861 * to be set
3835 3862 */
3836 - if (!instance->tbolt) {
3863 + if (!instance->tbolt && !instance->skinny) {
3837 3864 WR_IB_DOORBELL(MFI_INIT_HOTPLUG, instance);
3838 3865 } else {
3839 3866 WR_RESERVED0_REGISTER(MFI_INIT_HOTPLUG,
3840 3867 instance);
3841 3868 }
3842 3869 max_wait = (instance->tbolt == 1) ? 180 : 10;
3843 3870 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING;
3844 3871 break;
3845 3872 case MFI_STATE_OPERATIONAL:
3846 3873 /* bring it to READY state; assuming max wait 2 secs */
3847 3874 instance->func_ptr->disable_intr(instance);
3848 3875 con_log(CL_ANN1, (CE_NOTE,
3849 3876 "mr_sas: FW in OPERATIONAL state"));
3850 3877 /*
3851 3878 * PCI_Hot Plug: MFI F/W requires
3852 3879 * (MFI_INIT_READY | MFI_INIT_MFIMODE | MFI_INIT_ABORT)
3853 3880 * to be set
3854 3881 */
3855 3882 /* WR_IB_DOORBELL(MFI_INIT_READY, instance); */
3856 - if (!instance->tbolt) {
3883 + if (!instance->tbolt && !instance->skinny) {
3857 3884 WR_IB_DOORBELL(MFI_RESET_FLAGS, instance);
3858 3885 } else {
3859 3886 WR_RESERVED0_REGISTER(MFI_RESET_FLAGS,
3860 3887 instance);
3861 3888
3862 3889 for (i = 0; i < (10 * 1000); i++) {
3863 3890 status =
3864 3891 RD_RESERVED0_REGISTER(instance);
3865 3892 if (status & 1) {
3866 3893 delay(1 *
3867 3894 drv_usectohz(MILLISEC));
3868 3895 } else {
3869 3896 break;
3870 3897 }
3871 3898 }
3872 3899
3873 3900 }
3874 3901 max_wait = (instance->tbolt == 1) ? 180 : 10;
3875 3902 cur_state = MFI_STATE_OPERATIONAL;
3876 3903 break;
3877 3904 case MFI_STATE_UNDEFINED:
3878 3905 /* this state should not last for more than 2 seconds */
3879 3906 con_log(CL_ANN1, (CE_NOTE, "FW state undefined"));
3880 3907
3881 3908 max_wait = (instance->tbolt == 1) ? 180 : 2;
3882 3909 cur_state = MFI_STATE_UNDEFINED;
3883 3910 break;
3884 3911 case MFI_STATE_BB_INIT:
3885 3912 max_wait = (instance->tbolt == 1) ? 180 : 2;
3886 3913 cur_state = MFI_STATE_BB_INIT;
3887 3914 break;
3888 3915 case MFI_STATE_FW_INIT:
3889 3916 max_wait = (instance->tbolt == 1) ? 180 : 2;
3890 3917 cur_state = MFI_STATE_FW_INIT;
3891 3918 break;
3892 3919 case MFI_STATE_FW_INIT_2:
3893 3920 max_wait = 180;
3894 3921 cur_state = MFI_STATE_FW_INIT_2;
3895 3922 break;
3896 3923 case MFI_STATE_DEVICE_SCAN:
3897 3924 max_wait = 180;
3898 3925 cur_state = MFI_STATE_DEVICE_SCAN;
3899 3926 prev_abs_reg_val = cur_abs_reg_val;
3900 3927 con_log(CL_NONE, (CE_NOTE,
3901 3928 "Device scan in progress ...\n"));
3902 3929 break;
3903 3930 case MFI_STATE_FLUSH_CACHE:
3904 3931 max_wait = 180;
3905 3932 cur_state = MFI_STATE_FLUSH_CACHE;
3906 3933 break;
3907 3934 default:
3908 3935 con_log(CL_ANN1, (CE_NOTE,
3909 3936 "mr_sas: Unknown state 0x%x", fw_state));
3910 3937 return (ENODEV);
3911 3938 }
3912 3939
3913 3940 /* the cur_state should not last for more than max_wait secs */
3914 3941 for (i = 0; i < (max_wait * MILLISEC); i++) {
3915 3942 /* fw_state = RD_OB_MSG_0(instance) & MFI_STATE_MASK; */
3916 3943 cur_abs_reg_val =
3917 3944 instance->func_ptr->read_fw_status_reg(instance);
3918 3945 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
3919 3946
3920 3947 if (fw_state == cur_state) {
3921 3948 delay(1 * drv_usectohz(MILLISEC));
3922 3949 } else {
3923 3950 break;
3924 3951 }
3925 3952 }
3926 3953 if (fw_state == MFI_STATE_DEVICE_SCAN) {
3927 3954 if (prev_abs_reg_val != cur_abs_reg_val) {
3928 3955 continue;
3929 3956 }
|
↓ open down ↓ |
63 lines elided |
↑ open up ↑ |
3930 3957 }
3931 3958
3932 3959 /* return error if fw_state hasn't changed after max_wait */
3933 3960 if (fw_state == cur_state) {
3934 3961 con_log(CL_ANN1, (CE_WARN,
3935 3962 "FW state hasn't changed in %d secs", max_wait));
3936 3963 return (ENODEV);
3937 3964 }
3938 3965 };
3939 3966
3940 - if (!instance->tbolt) {
3967 + /* This may also need to apply to Skinny, but for now, don't worry. */
3968 + if (!instance->tbolt && !instance->skinny) {
3941 3969 fw_ctrl = RD_IB_DOORBELL(instance);
3942 3970 con_log(CL_ANN1, (CE_CONT,
3943 3971 "mfi_state_transition_to_ready:FW ctrl = 0x%x", fw_ctrl));
3944 3972
3945 3973 /*
3946 3974 * Write 0xF to the doorbell register to do the following.
3947 3975 * - Abort all outstanding commands (bit 0).
3948 3976 * - Transition from OPERATIONAL to READY state (bit 1).
3949 3977 * - Discard (possible) low MFA posted in 64-bit mode (bit-2).
3950 3978 * - Set to release FW to continue running (i.e. BIOS handshake
3951 3979 * (bit 3).
3952 3980 */
3953 3981 WR_IB_DOORBELL(0xF, instance);
3954 3982 }
3955 3983
3956 3984 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
3957 3985 return (EIO);
3958 3986 }
3959 3987
3960 3988 return (DDI_SUCCESS);
3961 3989 }
3962 3990
3963 3991 /*
3964 3992 * get_seq_num
3965 3993 */
3966 3994 static int
3967 3995 get_seq_num(struct mrsas_instance *instance,
3968 3996 struct mrsas_evt_log_info *eli)
|
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
3969 3997 {
3970 3998 int ret = DDI_SUCCESS;
3971 3999
3972 4000 dma_obj_t dcmd_dma_obj;
3973 4001 struct mrsas_cmd *cmd;
3974 4002 struct mrsas_dcmd_frame *dcmd;
3975 4003 struct mrsas_evt_log_info *eli_tmp;
3976 4004 if (instance->tbolt) {
3977 4005 cmd = get_raid_msg_mfi_pkt(instance);
3978 4006 } else {
3979 - cmd = get_mfi_pkt(instance);
4007 + cmd = mrsas_get_mfi_pkt(instance);
3980 4008 }
3981 4009
3982 4010 if (!cmd) {
3983 4011 cmn_err(CE_WARN, "mr_sas: failed to get a cmd");
3984 4012 DTRACE_PROBE2(seq_num_mfi_err, uint16_t,
3985 4013 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
3986 4014 return (ENOMEM);
3987 4015 }
3988 4016
3989 4017 /* Clear the frame buffer and assign back the context id */
3990 4018 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3991 4019 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3992 4020 cmd->index);
3993 4021
3994 4022 dcmd = &cmd->frame->dcmd;
3995 4023
3996 4024 /* allocate the data transfer buffer */
3997 4025 dcmd_dma_obj.size = sizeof (struct mrsas_evt_log_info);
3998 4026 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
3999 4027 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
4000 4028 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
4001 4029 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
4002 4030 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
4003 4031
4004 4032 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
4005 4033 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
4006 4034 cmn_err(CE_WARN,
4007 4035 "get_seq_num: could not allocate data transfer buffer.");
4008 4036 return (DDI_FAILURE);
4009 4037 }
4010 4038
4011 4039 (void) memset(dcmd_dma_obj.buffer, 0,
4012 4040 sizeof (struct mrsas_evt_log_info));
4013 4041
4014 4042 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
4015 4043
4016 4044 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
4017 4045 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
4018 4046 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
4019 4047 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
4020 4048 MFI_FRAME_DIR_READ);
4021 4049 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
4022 4050 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
4023 4051 sizeof (struct mrsas_evt_log_info));
4024 4052 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
4025 4053 MR_DCMD_CTRL_EVENT_GET_INFO);
4026 4054 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
4027 4055 sizeof (struct mrsas_evt_log_info));
4028 4056 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
4029 4057 dcmd_dma_obj.dma_cookie[0].dmac_address);
4030 4058
4031 4059 cmd->sync_cmd = MRSAS_TRUE;
4032 4060 cmd->frame_count = 1;
4033 4061
4034 4062 if (instance->tbolt) {
4035 4063 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
4036 4064 }
4037 4065
4038 4066 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
4039 4067 cmn_err(CE_WARN, "get_seq_num: "
4040 4068 "failed to issue MRSAS_DCMD_CTRL_EVENT_GET_INFO");
4041 4069 ret = DDI_FAILURE;
4042 4070 } else {
4043 4071 eli_tmp = (struct mrsas_evt_log_info *)dcmd_dma_obj.buffer;
4044 4072 eli->newest_seq_num = ddi_get32(cmd->frame_dma_obj.acc_handle,
|
↓ open down ↓ |
55 lines elided |
↑ open up ↑ |
4045 4073 &eli_tmp->newest_seq_num);
4046 4074 ret = DDI_SUCCESS;
4047 4075 }
4048 4076
4049 4077 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
4050 4078 ret = DDI_FAILURE;
4051 4079
4052 4080 if (instance->tbolt) {
4053 4081 return_raid_msg_mfi_pkt(instance, cmd);
4054 4082 } else {
4055 - return_mfi_pkt(instance, cmd);
4083 + mrsas_return_mfi_pkt(instance, cmd);
4056 4084 }
4057 4085
4058 4086 return (ret);
4059 4087 }
4060 4088
4061 4089 /*
4062 4090 * start_mfi_aen
4063 4091 */
4064 4092 static int
4065 4093 start_mfi_aen(struct mrsas_instance *instance)
4066 4094 {
4067 4095 int ret = 0;
4068 4096
4069 4097 struct mrsas_evt_log_info eli;
4070 4098 union mrsas_evt_class_locale class_locale;
4071 4099
4072 4100 /* get the latest sequence number from FW */
4073 4101 (void) memset(&eli, 0, sizeof (struct mrsas_evt_log_info));
4074 4102
4075 4103 if (get_seq_num(instance, &eli)) {
4076 4104 cmn_err(CE_WARN, "start_mfi_aen: failed to get seq num");
4077 4105 return (-1);
4078 4106 }
4079 4107
4080 4108 /* register AEN with FW for latest sequence number plus 1 */
4081 4109 class_locale.members.reserved = 0;
4082 4110 class_locale.members.locale = LE_16(MR_EVT_LOCALE_ALL);
4083 4111 class_locale.members.class = MR_EVT_CLASS_INFO;
4084 4112 class_locale.word = LE_32(class_locale.word);
4085 4113 ret = register_mfi_aen(instance, eli.newest_seq_num + 1,
4086 4114 class_locale.word);
4087 4115
4088 4116 if (ret) {
4089 4117 cmn_err(CE_WARN, "start_mfi_aen: aen registration failed");
4090 4118 return (-1);
4091 4119 }
4092 4120
4093 4121
4094 4122 return (ret);
4095 4123 }
4096 4124
4097 4125 /*
|
↓ open down ↓ |
32 lines elided |
↑ open up ↑ |
4098 4126 * flush_cache
4099 4127 */
4100 4128 static void
4101 4129 flush_cache(struct mrsas_instance *instance)
4102 4130 {
4103 4131 struct mrsas_cmd *cmd = NULL;
4104 4132 struct mrsas_dcmd_frame *dcmd;
4105 4133 if (instance->tbolt) {
4106 4134 cmd = get_raid_msg_mfi_pkt(instance);
4107 4135 } else {
4108 - cmd = get_mfi_pkt(instance);
4136 + cmd = mrsas_get_mfi_pkt(instance);
4109 4137 }
4110 4138
4111 4139 if (!cmd) {
4112 4140 con_log(CL_ANN1, (CE_WARN,
4113 4141 "flush_cache():Failed to get a cmd for flush_cache"));
4114 4142 DTRACE_PROBE2(flush_cache_err, uint16_t,
4115 4143 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
4116 4144 return;
4117 4145 }
4118 4146
4119 4147 /* Clear the frame buffer and assign back the context id */
4120 4148 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
4121 4149 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
4122 4150 cmd->index);
4123 4151
4124 4152 dcmd = &cmd->frame->dcmd;
4125 4153
4126 4154 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
4127 4155
4128 4156 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
4129 4157 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0);
4130 4158 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 0);
4131 4159 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
4132 4160 MFI_FRAME_DIR_NONE);
4133 4161 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
4134 4162 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len, 0);
4135 4163 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
4136 4164 MR_DCMD_CTRL_CACHE_FLUSH);
4137 4165 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.b[0],
4138 4166 MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE);
4139 4167
4140 4168 cmd->frame_count = 1;
4141 4169
4142 4170 if (instance->tbolt) {
4143 4171 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
|
↓ open down ↓ |
25 lines elided |
↑ open up ↑ |
4144 4172 }
4145 4173
4146 4174 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
4147 4175 con_log(CL_ANN1, (CE_WARN,
4148 4176 "flush_cache: failed to issue MFI_DCMD_CTRL_CACHE_FLUSH"));
4149 4177 }
4150 4178 con_log(CL_ANN1, (CE_CONT, "flush_cache done"));
4151 4179 if (instance->tbolt) {
4152 4180 return_raid_msg_mfi_pkt(instance, cmd);
4153 4181 } else {
4154 - return_mfi_pkt(instance, cmd);
4182 + mrsas_return_mfi_pkt(instance, cmd);
4155 4183 }
4156 4184
4157 4185 }
4158 4186
4159 4187 /*
4160 4188 * service_mfi_aen- Completes an AEN command
4161 4189 * @instance: Adapter soft state
4162 4190 * @cmd: Command to be completed
4163 4191 *
4164 4192 */
4165 4193 void
4166 4194 service_mfi_aen(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
4167 4195 {
4168 4196 uint32_t seq_num;
4169 4197 struct mrsas_evt_detail *evt_detail =
4170 4198 (struct mrsas_evt_detail *)instance->mfi_evt_detail_obj.buffer;
4171 4199 int rval = 0;
4172 4200 int tgt = 0;
4173 4201 uint8_t dtype;
4174 4202 #ifdef PDSUPPORT
4175 4203 mrsas_pd_address_t *pd_addr;
4176 4204 #endif
4177 4205 ddi_acc_handle_t acc_handle;
4178 4206
4179 4207 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
4180 4208
4181 4209 acc_handle = cmd->frame_dma_obj.acc_handle;
4182 4210 cmd->cmd_status = ddi_get8(acc_handle, &cmd->frame->io.cmd_status);
4183 4211 if (cmd->cmd_status == ENODATA) {
4184 4212 cmd->cmd_status = 0;
4185 4213 }
4186 4214
4187 4215 /*
4188 4216 * log the MFI AEN event to the sysevent queue so that
4189 4217 * application will get noticed
4190 4218 */
4191 4219 if (ddi_log_sysevent(instance->dip, DDI_VENDOR_LSI, "LSIMEGA", "SAS",
4192 4220 NULL, NULL, DDI_NOSLEEP) != DDI_SUCCESS) {
4193 4221 int instance_no = ddi_get_instance(instance->dip);
4194 4222 con_log(CL_ANN, (CE_WARN,
4195 4223 "mr_sas%d: Failed to log AEN event", instance_no));
4196 4224 }
4197 4225 /*
4198 4226 * Check for any ld devices that has changed state. i.e. online
4199 4227 * or offline.
4200 4228 */
4201 4229 con_log(CL_ANN1, (CE_CONT,
4202 4230 "AEN: code = %x class = %x locale = %x args = %x",
4203 4231 ddi_get32(acc_handle, &evt_detail->code),
4204 4232 evt_detail->cl.members.class,
4205 4233 ddi_get16(acc_handle, &evt_detail->cl.members.locale),
4206 4234 ddi_get8(acc_handle, &evt_detail->arg_type)));
4207 4235
4208 4236 switch (ddi_get32(acc_handle, &evt_detail->code)) {
4209 4237 case MR_EVT_CFG_CLEARED: {
4210 4238 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) {
4211 4239 if (instance->mr_ld_list[tgt].dip != NULL) {
4212 4240 mutex_enter(&instance->config_dev_mtx);
4213 4241 instance->mr_ld_list[tgt].flag =
4214 4242 (uint8_t)~MRDRV_TGT_VALID;
4215 4243 mutex_exit(&instance->config_dev_mtx);
4216 4244 rval = mrsas_service_evt(instance, tgt, 0,
4217 4245 MRSAS_EVT_UNCONFIG_TGT, NULL);
4218 4246 con_log(CL_ANN1, (CE_WARN,
4219 4247 "mr_sas: CFG CLEARED AEN rval = %d "
4220 4248 "tgt id = %d", rval, tgt));
4221 4249 }
4222 4250 }
4223 4251 break;
4224 4252 }
4225 4253
4226 4254 case MR_EVT_LD_DELETED: {
4227 4255 tgt = ddi_get16(acc_handle, &evt_detail->args.ld.target_id);
4228 4256 mutex_enter(&instance->config_dev_mtx);
4229 4257 instance->mr_ld_list[tgt].flag = (uint8_t)~MRDRV_TGT_VALID;
4230 4258 mutex_exit(&instance->config_dev_mtx);
4231 4259 rval = mrsas_service_evt(instance,
4232 4260 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0,
4233 4261 MRSAS_EVT_UNCONFIG_TGT, NULL);
4234 4262 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD DELETED AEN rval = %d "
4235 4263 "tgt id = %d index = %d", rval,
4236 4264 ddi_get16(acc_handle, &evt_detail->args.ld.target_id),
4237 4265 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index)));
4238 4266 break;
4239 4267 } /* End of MR_EVT_LD_DELETED */
4240 4268
4241 4269 case MR_EVT_LD_CREATED: {
4242 4270 rval = mrsas_service_evt(instance,
4243 4271 ddi_get16(acc_handle, &evt_detail->args.ld.target_id), 0,
|
↓ open down ↓ |
79 lines elided |
↑ open up ↑ |
4244 4272 MRSAS_EVT_CONFIG_TGT, NULL);
4245 4273 con_log(CL_ANN1, (CE_WARN, "mr_sas: LD CREATED AEN rval = %d "
4246 4274 "tgt id = %d index = %d", rval,
4247 4275 ddi_get16(acc_handle, &evt_detail->args.ld.target_id),
4248 4276 ddi_get8(acc_handle, &evt_detail->args.ld.ld_index)));
4249 4277 break;
4250 4278 } /* End of MR_EVT_LD_CREATED */
4251 4279
4252 4280 #ifdef PDSUPPORT
4253 4281 case MR_EVT_PD_REMOVED_EXT: {
4254 - if (instance->tbolt) {
4282 + if (instance->tbolt || instance->skinny) {
4255 4283 pd_addr = &evt_detail->args.pd_addr;
4256 4284 dtype = pd_addr->scsi_dev_type;
4257 4285 con_log(CL_DLEVEL1, (CE_NOTE,
4258 4286 " MR_EVT_PD_REMOVED_EXT: dtype = %x,"
4259 4287 " arg_type = %d ", dtype, evt_detail->arg_type));
4260 4288 tgt = ddi_get16(acc_handle,
4261 4289 &evt_detail->args.pd.device_id);
4262 4290 mutex_enter(&instance->config_dev_mtx);
4263 4291 instance->mr_tbolt_pd_list[tgt].flag =
4264 4292 (uint8_t)~MRDRV_TGT_VALID;
4265 4293 mutex_exit(&instance->config_dev_mtx);
4266 4294 rval = mrsas_service_evt(instance, ddi_get16(
4267 4295 acc_handle, &evt_detail->args.pd.device_id),
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
4268 4296 1, MRSAS_EVT_UNCONFIG_TGT, NULL);
4269 4297 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_REMOVED:"
4270 4298 "rval = %d tgt id = %d ", rval,
4271 4299 ddi_get16(acc_handle,
4272 4300 &evt_detail->args.pd.device_id)));
4273 4301 }
4274 4302 break;
4275 4303 } /* End of MR_EVT_PD_REMOVED_EXT */
4276 4304
4277 4305 case MR_EVT_PD_INSERTED_EXT: {
4278 - if (instance->tbolt) {
4306 + if (instance->tbolt || instance->skinny) {
4279 4307 rval = mrsas_service_evt(instance,
4280 4308 ddi_get16(acc_handle,
4281 4309 &evt_detail->args.pd.device_id),
4282 4310 1, MRSAS_EVT_CONFIG_TGT, NULL);
4283 4311 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_INSERTEDi_EXT:"
4284 4312 "rval = %d tgt id = %d ", rval,
4285 4313 ddi_get16(acc_handle,
4286 4314 &evt_detail->args.pd.device_id)));
4287 4315 }
4288 4316 break;
4289 4317 } /* End of MR_EVT_PD_INSERTED_EXT */
4290 4318
4291 4319 case MR_EVT_PD_STATE_CHANGE: {
4292 - if (instance->tbolt) {
4320 + if (instance->tbolt || instance->skinny) {
4293 4321 tgt = ddi_get16(acc_handle,
4294 4322 &evt_detail->args.pd.device_id);
4295 4323 if ((evt_detail->args.pd_state.prevState ==
4296 4324 PD_SYSTEM) &&
4297 4325 (evt_detail->args.pd_state.newState != PD_SYSTEM)) {
4298 4326 mutex_enter(&instance->config_dev_mtx);
4299 4327 instance->mr_tbolt_pd_list[tgt].flag =
4300 4328 (uint8_t)~MRDRV_TGT_VALID;
4301 4329 mutex_exit(&instance->config_dev_mtx);
4302 4330 rval = mrsas_service_evt(instance,
4303 4331 ddi_get16(acc_handle,
4304 4332 &evt_detail->args.pd.device_id),
4305 4333 1, MRSAS_EVT_UNCONFIG_TGT, NULL);
4306 4334 con_log(CL_ANN1, (CE_WARN, "mr_sas: PD_REMOVED:"
4307 4335 "rval = %d tgt id = %d ", rval,
4308 4336 ddi_get16(acc_handle,
4309 4337 &evt_detail->args.pd.device_id)));
4310 4338 break;
4311 4339 }
4312 4340 if ((evt_detail->args.pd_state.prevState
4313 4341 == UNCONFIGURED_GOOD) &&
4314 4342 (evt_detail->args.pd_state.newState == PD_SYSTEM)) {
4315 4343 rval = mrsas_service_evt(instance,
4316 4344 ddi_get16(acc_handle,
4317 4345 &evt_detail->args.pd.device_id),
4318 4346 1, MRSAS_EVT_CONFIG_TGT, NULL);
4319 4347 con_log(CL_ANN1, (CE_WARN,
4320 4348 "mr_sas: PD_INSERTED: rval = %d "
4321 4349 " tgt id = %d ", rval,
4322 4350 ddi_get16(acc_handle,
4323 4351 &evt_detail->args.pd.device_id)));
4324 4352 break;
4325 4353 }
4326 4354 }
4327 4355 break;
4328 4356 }
4329 4357 #endif
4330 4358
4331 4359 } /* End of Main Switch */
4332 4360
4333 4361 /* get copy of seq_num and class/locale for re-registration */
4334 4362 seq_num = ddi_get32(acc_handle, &evt_detail->seq_num);
4335 4363 seq_num++;
4336 4364 (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
4337 4365 sizeof (struct mrsas_evt_detail));
4338 4366
4339 4367 ddi_put8(acc_handle, &cmd->frame->dcmd.cmd_status, 0x0);
4340 4368 ddi_put32(acc_handle, &cmd->frame->dcmd.mbox.w[0], seq_num);
4341 4369
4342 4370 instance->aen_seq_num = seq_num;
4343 4371
4344 4372 cmd->frame_count = 1;
4345 4373
4346 4374 cmd->retry_count_for_ocr = 0;
4347 4375 cmd->drv_pkt_time = 0;
4348 4376
4349 4377 /* Issue the aen registration frame */
4350 4378 instance->func_ptr->issue_cmd(cmd, instance);
4351 4379 }
4352 4380
4353 4381 /*
4354 4382 * complete_cmd_in_sync_mode - Completes an internal command
4355 4383 * @instance: Adapter soft state
4356 4384 * @cmd: Command to be completed
4357 4385 *
4358 4386 * The issue_cmd_in_sync_mode() function waits for a command to complete
4359 4387 * after it issues a command. This function wakes up that waiting routine by
4360 4388 * calling wake_up() on the wait queue.
4361 4389 */
4362 4390 static void
4363 4391 complete_cmd_in_sync_mode(struct mrsas_instance *instance,
4364 4392 struct mrsas_cmd *cmd)
4365 4393 {
4366 4394 cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle,
4367 4395 &cmd->frame->io.cmd_status);
4368 4396
4369 4397 cmd->sync_cmd = MRSAS_FALSE;
4370 4398
4371 4399 con_log(CL_ANN1, (CE_NOTE, "complete_cmd_in_sync_mode called %p \n",
4372 4400 (void *)cmd));
4373 4401
4374 4402 mutex_enter(&instance->int_cmd_mtx);
4375 4403 if (cmd->cmd_status == ENODATA) {
4376 4404 cmd->cmd_status = 0;
4377 4405 }
4378 4406 cv_broadcast(&instance->int_cmd_cv);
4379 4407 mutex_exit(&instance->int_cmd_mtx);
4380 4408
4381 4409 }
4382 4410
4383 4411 /*
4384 4412 * Call this function inside mrsas_softintr.
4385 4413 * mrsas_initiate_ocr_if_fw_is_faulty - Initiates OCR if FW status is faulty
4386 4414 * @instance: Adapter soft state
4387 4415 */
4388 4416
4389 4417 static uint32_t
4390 4418 mrsas_initiate_ocr_if_fw_is_faulty(struct mrsas_instance *instance)
4391 4419 {
4392 4420 uint32_t cur_abs_reg_val;
4393 4421 uint32_t fw_state;
4394 4422
4395 4423 cur_abs_reg_val = instance->func_ptr->read_fw_status_reg(instance);
4396 4424 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
4397 4425 if (fw_state == MFI_STATE_FAULT) {
4398 4426 if (instance->disable_online_ctrl_reset == 1) {
4399 4427 cmn_err(CE_WARN,
4400 4428 "mrsas_initiate_ocr_if_fw_is_faulty: "
4401 4429 "FW in Fault state, detected in ISR: "
4402 4430 "FW doesn't support ocr ");
4403 4431
4404 4432 return (ADAPTER_RESET_NOT_REQUIRED);
4405 4433 } else {
4406 4434 con_log(CL_ANN, (CE_NOTE,
4407 4435 "mrsas_initiate_ocr_if_fw_is_faulty: FW in Fault "
4408 4436 "state, detected in ISR: FW supports ocr "));
4409 4437
4410 4438 return (ADAPTER_RESET_REQUIRED);
4411 4439 }
4412 4440 }
4413 4441
4414 4442 return (ADAPTER_RESET_NOT_REQUIRED);
4415 4443 }
4416 4444
4417 4445 /*
4418 4446 * mrsas_softintr - The Software ISR
4419 4447 * @param arg : HBA soft state
4420 4448 *
4421 4449 * called from high-level interrupt if hi-level interrupt are not there,
4422 4450 * otherwise triggered as a soft interrupt
4423 4451 */
4424 4452 static uint_t
4425 4453 mrsas_softintr(struct mrsas_instance *instance)
4426 4454 {
4427 4455 struct scsi_pkt *pkt;
4428 4456 struct scsa_cmd *acmd;
4429 4457 struct mrsas_cmd *cmd;
4430 4458 struct mlist_head *pos, *next;
4431 4459 mlist_t process_list;
4432 4460 struct mrsas_header *hdr;
4433 4461 struct scsi_arq_status *arqstat;
4434 4462
4435 4463 con_log(CL_ANN1, (CE_NOTE, "mrsas_softintr() called."));
4436 4464
4437 4465 ASSERT(instance);
4438 4466
4439 4467 mutex_enter(&instance->completed_pool_mtx);
4440 4468
4441 4469 if (mlist_empty(&instance->completed_pool_list)) {
4442 4470 mutex_exit(&instance->completed_pool_mtx);
4443 4471 return (DDI_INTR_CLAIMED);
4444 4472 }
4445 4473
4446 4474 instance->softint_running = 1;
4447 4475
4448 4476 INIT_LIST_HEAD(&process_list);
4449 4477 mlist_splice(&instance->completed_pool_list, &process_list);
4450 4478 INIT_LIST_HEAD(&instance->completed_pool_list);
4451 4479
4452 4480 mutex_exit(&instance->completed_pool_mtx);
4453 4481
4454 4482 /* perform all callbacks first, before releasing the SCBs */
4455 4483 mlist_for_each_safe(pos, next, &process_list) {
4456 4484 cmd = mlist_entry(pos, struct mrsas_cmd, list);
4457 4485
4458 4486 /* syncronize the Cmd frame for the controller */
4459 4487 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle,
4460 4488 0, 0, DDI_DMA_SYNC_FORCPU);
4461 4489
4462 4490 if (mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) !=
4463 4491 DDI_SUCCESS) {
4464 4492 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
4465 4493 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4466 4494 con_log(CL_ANN1, (CE_WARN,
4467 4495 "mrsas_softintr: "
4468 4496 "FMA check reports DMA handle failure"));
4469 4497 return (DDI_INTR_CLAIMED);
4470 4498 }
4471 4499
4472 4500 hdr = &cmd->frame->hdr;
4473 4501
4474 4502 /* remove the internal command from the process list */
4475 4503 mlist_del_init(&cmd->list);
4476 4504
4477 4505 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) {
4478 4506 case MFI_CMD_OP_PD_SCSI:
4479 4507 case MFI_CMD_OP_LD_SCSI:
4480 4508 case MFI_CMD_OP_LD_READ:
4481 4509 case MFI_CMD_OP_LD_WRITE:
4482 4510 /*
4483 4511 * MFI_CMD_OP_PD_SCSI and MFI_CMD_OP_LD_SCSI
4484 4512 * could have been issued either through an
4485 4513 * IO path or an IOCTL path. If it was via IOCTL,
4486 4514 * we will send it to internal completion.
4487 4515 */
4488 4516 if (cmd->sync_cmd == MRSAS_TRUE) {
4489 4517 complete_cmd_in_sync_mode(instance, cmd);
4490 4518 break;
4491 4519 }
4492 4520
4493 4521 /* regular commands */
4494 4522 acmd = cmd->cmd;
4495 4523 pkt = CMD2PKT(acmd);
4496 4524
4497 4525 if (acmd->cmd_flags & CFLAG_DMAVALID) {
4498 4526 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
4499 4527 (void) ddi_dma_sync(acmd->cmd_dmahandle,
4500 4528 acmd->cmd_dma_offset,
4501 4529 acmd->cmd_dma_len,
4502 4530 DDI_DMA_SYNC_FORCPU);
4503 4531 }
4504 4532 }
4505 4533
4506 4534 pkt->pkt_reason = CMD_CMPLT;
4507 4535 pkt->pkt_statistics = 0;
4508 4536 pkt->pkt_state = STATE_GOT_BUS
4509 4537 | STATE_GOT_TARGET | STATE_SENT_CMD
4510 4538 | STATE_XFERRED_DATA | STATE_GOT_STATUS;
4511 4539
4512 4540 con_log(CL_ANN, (CE_CONT,
4513 4541 "CDB[0] = %x completed for %s: size %lx context %x",
4514 4542 pkt->pkt_cdbp[0], ((acmd->islogical) ? "LD" : "PD"),
4515 4543 acmd->cmd_dmacount, hdr->context));
4516 4544 DTRACE_PROBE3(softintr_cdb, uint8_t, pkt->pkt_cdbp[0],
4517 4545 uint_t, acmd->cmd_cdblen, ulong_t,
|
↓ open down ↓ |
215 lines elided |
↑ open up ↑ |
4518 4546 acmd->cmd_dmacount);
4519 4547
4520 4548 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
4521 4549 struct scsi_inquiry *inq;
4522 4550
4523 4551 if (acmd->cmd_dmacount != 0) {
4524 4552 bp_mapin(acmd->cmd_buf);
4525 4553 inq = (struct scsi_inquiry *)
4526 4554 acmd->cmd_buf->b_un.b_addr;
4527 4555
4556 +#ifdef PDSUPPORT
4557 + if (hdr->cmd_status == MFI_STAT_OK) {
4558 + display_scsi_inquiry(
4559 + (caddr_t)inq);
4560 + }
4561 +#else
4528 4562 /* don't expose physical drives to OS */
4529 4563 if (acmd->islogical &&
4530 4564 (hdr->cmd_status == MFI_STAT_OK)) {
4531 4565 display_scsi_inquiry(
4532 4566 (caddr_t)inq);
4533 4567 } else if ((hdr->cmd_status ==
4534 4568 MFI_STAT_OK) && inq->inq_dtype ==
4535 4569 DTYPE_DIRECT) {
4536 4570
4537 4571 display_scsi_inquiry(
4538 4572 (caddr_t)inq);
4539 4573
4540 4574 /* for physical disk */
4541 4575 hdr->cmd_status =
4542 4576 MFI_STAT_DEVICE_NOT_FOUND;
4543 4577 }
4578 +#endif /* PDSUPPORT */
4544 4579 }
4545 4580 }
4546 4581
4547 4582 DTRACE_PROBE2(softintr_done, uint8_t, hdr->cmd,
4548 4583 uint8_t, hdr->cmd_status);
4549 4584
4550 4585 switch (hdr->cmd_status) {
4551 4586 case MFI_STAT_OK:
4552 4587 pkt->pkt_scbp[0] = STATUS_GOOD;
4553 4588 break;
4554 4589 case MFI_STAT_LD_CC_IN_PROGRESS:
4555 4590 case MFI_STAT_LD_RECON_IN_PROGRESS:
4556 4591 pkt->pkt_scbp[0] = STATUS_GOOD;
4557 4592 break;
4558 4593 case MFI_STAT_LD_INIT_IN_PROGRESS:
4559 4594 con_log(CL_ANN,
4560 4595 (CE_WARN, "Initialization in Progress"));
4561 4596 pkt->pkt_reason = CMD_TRAN_ERR;
4562 4597
4563 4598 break;
4564 4599 case MFI_STAT_SCSI_DONE_WITH_ERROR:
4565 4600 con_log(CL_ANN, (CE_CONT, "scsi_done error"));
4566 4601
4567 4602 pkt->pkt_reason = CMD_CMPLT;
4568 4603 ((struct scsi_status *)
4569 4604 pkt->pkt_scbp)->sts_chk = 1;
4570 4605
4571 4606 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
4572 4607 con_log(CL_ANN,
4573 4608 (CE_WARN, "TEST_UNIT_READY fail"));
4574 4609 } else {
4575 4610 pkt->pkt_state |= STATE_ARQ_DONE;
4576 4611 arqstat = (void *)(pkt->pkt_scbp);
4577 4612 arqstat->sts_rqpkt_reason = CMD_CMPLT;
4578 4613 arqstat->sts_rqpkt_resid = 0;
4579 4614 arqstat->sts_rqpkt_state |=
4580 4615 STATE_GOT_BUS | STATE_GOT_TARGET
4581 4616 | STATE_SENT_CMD
4582 4617 | STATE_XFERRED_DATA;
4583 4618 *(uint8_t *)&arqstat->sts_rqpkt_status =
4584 4619 STATUS_GOOD;
4585 4620 ddi_rep_get8(
4586 4621 cmd->frame_dma_obj.acc_handle,
4587 4622 (uint8_t *)
4588 4623 &(arqstat->sts_sensedata),
4589 4624 cmd->sense,
4590 4625 sizeof (struct scsi_extended_sense),
4591 4626 DDI_DEV_AUTOINCR);
4592 4627 }
4593 4628 break;
4594 4629 case MFI_STAT_LD_OFFLINE:
4595 4630 case MFI_STAT_DEVICE_NOT_FOUND:
4596 4631 con_log(CL_ANN, (CE_CONT,
4597 4632 "mrsas_softintr:device not found error"));
4598 4633 pkt->pkt_reason = CMD_DEV_GONE;
4599 4634 pkt->pkt_statistics = STAT_DISCON;
4600 4635 break;
4601 4636 case MFI_STAT_LD_LBA_OUT_OF_RANGE:
4602 4637 pkt->pkt_state |= STATE_ARQ_DONE;
4603 4638 pkt->pkt_reason = CMD_CMPLT;
4604 4639 ((struct scsi_status *)
4605 4640 pkt->pkt_scbp)->sts_chk = 1;
4606 4641
4607 4642 arqstat = (void *)(pkt->pkt_scbp);
4608 4643 arqstat->sts_rqpkt_reason = CMD_CMPLT;
4609 4644 arqstat->sts_rqpkt_resid = 0;
4610 4645 arqstat->sts_rqpkt_state |= STATE_GOT_BUS
4611 4646 | STATE_GOT_TARGET | STATE_SENT_CMD
4612 4647 | STATE_XFERRED_DATA;
4613 4648 *(uint8_t *)&arqstat->sts_rqpkt_status =
4614 4649 STATUS_GOOD;
4615 4650
4616 4651 arqstat->sts_sensedata.es_valid = 1;
4617 4652 arqstat->sts_sensedata.es_key =
4618 4653 KEY_ILLEGAL_REQUEST;
4619 4654 arqstat->sts_sensedata.es_class =
4620 4655 CLASS_EXTENDED_SENSE;
4621 4656
4622 4657 /*
4623 4658 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
4624 4659 * ASC: 0x21h; ASCQ: 0x00h;
4625 4660 */
4626 4661 arqstat->sts_sensedata.es_add_code = 0x21;
4627 4662 arqstat->sts_sensedata.es_qual_code = 0x00;
4628 4663
4629 4664 break;
4630 4665
4631 4666 default:
4632 4667 con_log(CL_ANN, (CE_CONT, "Unknown status!"));
4633 4668 pkt->pkt_reason = CMD_TRAN_ERR;
4634 4669
4635 4670 break;
4636 4671 }
4637 4672
4638 4673 atomic_add_16(&instance->fw_outstanding, (-1));
4639 4674
4640 4675 (void) mrsas_common_check(instance, cmd);
4641 4676
|
↓ open down ↓ |
88 lines elided |
↑ open up ↑ |
4642 4677 if (acmd->cmd_dmahandle) {
4643 4678 if (mrsas_check_dma_handle(
4644 4679 acmd->cmd_dmahandle) != DDI_SUCCESS) {
4645 4680 ddi_fm_service_impact(instance->dip,
4646 4681 DDI_SERVICE_UNAFFECTED);
4647 4682 pkt->pkt_reason = CMD_TRAN_ERR;
4648 4683 pkt->pkt_statistics = 0;
4649 4684 }
4650 4685 }
4651 4686
4687 + mrsas_return_mfi_pkt(instance, cmd);
4688 +
4652 4689 /* Call the callback routine */
4653 4690 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
4654 4691 pkt->pkt_comp) {
4655 -
4656 - con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_softintr: "
4657 - "posting to scsa cmd %p index %x pkt %p "
4658 - "time %llx", (void *)cmd, cmd->index,
4659 - (void *)pkt, gethrtime()));
4660 4692 (*pkt->pkt_comp)(pkt);
4661 -
4662 4693 }
4663 4694
4664 - return_mfi_pkt(instance, cmd);
4665 4695 break;
4666 4696
4667 4697 case MFI_CMD_OP_SMP:
4668 4698 case MFI_CMD_OP_STP:
4669 4699 complete_cmd_in_sync_mode(instance, cmd);
4670 4700 break;
4671 4701
4672 4702 case MFI_CMD_OP_DCMD:
4673 4703 /* see if got an event notification */
4674 4704 if (ddi_get32(cmd->frame_dma_obj.acc_handle,
4675 4705 &cmd->frame->dcmd.opcode) ==
4676 4706 MR_DCMD_CTRL_EVENT_WAIT) {
4677 4707 if ((instance->aen_cmd == cmd) &&
4678 4708 (instance->aen_cmd->abort_aen)) {
4679 4709 con_log(CL_ANN, (CE_WARN,
4680 4710 "mrsas_softintr: "
4681 4711 "aborted_aen returned"));
4682 4712 } else {
4683 4713 atomic_add_16(&instance->fw_outstanding,
4684 4714 (-1));
4685 4715 service_mfi_aen(instance, cmd);
4686 4716 }
4687 4717 } else {
4688 4718 complete_cmd_in_sync_mode(instance, cmd);
4689 4719 }
4690 4720
4691 4721 break;
4692 4722
4693 4723 case MFI_CMD_OP_ABORT:
4694 4724 con_log(CL_ANN, (CE_NOTE, "MFI_CMD_OP_ABORT complete"));
4695 4725 /*
4696 4726 * MFI_CMD_OP_ABORT successfully completed
4697 4727 * in the synchronous mode
4698 4728 */
4699 4729 complete_cmd_in_sync_mode(instance, cmd);
4700 4730 break;
4701 4731
4702 4732 default:
4703 4733 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
4704 4734 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4705 4735
4706 4736 if (cmd->pkt != NULL) {
4707 4737 pkt = cmd->pkt;
4708 4738 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
4709 4739 pkt->pkt_comp) {
4710 4740
4711 4741 con_log(CL_ANN1, (CE_CONT, "posting to "
4712 4742 "scsa cmd %p index %x pkt %p"
4713 4743 "time %llx, default ", (void *)cmd,
4714 4744 cmd->index, (void *)pkt,
4715 4745 gethrtime()));
4716 4746
4717 4747 (*pkt->pkt_comp)(pkt);
4718 4748
4719 4749 }
4720 4750 }
4721 4751 con_log(CL_ANN, (CE_WARN, "Cmd type unknown !"));
4722 4752 break;
4723 4753 }
4724 4754 }
4725 4755
4726 4756 instance->softint_running = 0;
4727 4757
4728 4758 return (DDI_INTR_CLAIMED);
4729 4759 }
4730 4760
4731 4761 /*
4732 4762 * mrsas_alloc_dma_obj
4733 4763 *
4734 4764 * Allocate the memory and other resources for an dma object.
4735 4765 */
4736 4766 int
4737 4767 mrsas_alloc_dma_obj(struct mrsas_instance *instance, dma_obj_t *obj,
4738 4768 uchar_t endian_flags)
4739 4769 {
4740 4770 int i;
4741 4771 size_t alen = 0;
4742 4772 uint_t cookie_cnt;
4743 4773 struct ddi_device_acc_attr tmp_endian_attr;
4744 4774
4745 4775 tmp_endian_attr = endian_attr;
4746 4776 tmp_endian_attr.devacc_attr_endian_flags = endian_flags;
4747 4777 tmp_endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
4748 4778
4749 4779 i = ddi_dma_alloc_handle(instance->dip, &obj->dma_attr,
4750 4780 DDI_DMA_SLEEP, NULL, &obj->dma_handle);
4751 4781 if (i != DDI_SUCCESS) {
4752 4782
4753 4783 switch (i) {
4754 4784 case DDI_DMA_BADATTR :
4755 4785 con_log(CL_ANN, (CE_WARN,
4756 4786 "Failed ddi_dma_alloc_handle- Bad attribute"));
4757 4787 break;
4758 4788 case DDI_DMA_NORESOURCES :
4759 4789 con_log(CL_ANN, (CE_WARN,
4760 4790 "Failed ddi_dma_alloc_handle- No Resources"));
4761 4791 break;
4762 4792 default :
4763 4793 con_log(CL_ANN, (CE_WARN,
4764 4794 "Failed ddi_dma_alloc_handle: "
4765 4795 "unknown status %d", i));
4766 4796 break;
4767 4797 }
4768 4798
4769 4799 return (-1);
4770 4800 }
4771 4801
4772 4802 if ((ddi_dma_mem_alloc(obj->dma_handle, obj->size, &tmp_endian_attr,
4773 4803 DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP, NULL,
4774 4804 &obj->buffer, &alen, &obj->acc_handle) != DDI_SUCCESS) ||
4775 4805 alen < obj->size) {
4776 4806
4777 4807 ddi_dma_free_handle(&obj->dma_handle);
4778 4808
4779 4809 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_mem_alloc"));
4780 4810
4781 4811 return (-1);
4782 4812 }
4783 4813
4784 4814 if (ddi_dma_addr_bind_handle(obj->dma_handle, NULL, obj->buffer,
4785 4815 obj->size, DDI_DMA_RDWR | DDI_DMA_STREAMING, DDI_DMA_SLEEP,
4786 4816 NULL, &obj->dma_cookie[0], &cookie_cnt) != DDI_SUCCESS) {
4787 4817
4788 4818 ddi_dma_mem_free(&obj->acc_handle);
4789 4819 ddi_dma_free_handle(&obj->dma_handle);
4790 4820
4791 4821 con_log(CL_ANN, (CE_WARN, "Failed : ddi_dma_addr_bind_handle"));
4792 4822
4793 4823 return (-1);
4794 4824 }
4795 4825
4796 4826 if (mrsas_check_dma_handle(obj->dma_handle) != DDI_SUCCESS) {
4797 4827 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4798 4828 return (-1);
4799 4829 }
4800 4830
4801 4831 if (mrsas_check_acc_handle(obj->acc_handle) != DDI_SUCCESS) {
4802 4832 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
4803 4833 return (-1);
4804 4834 }
4805 4835
4806 4836 return (cookie_cnt);
4807 4837 }
4808 4838
4809 4839 /*
4810 4840 * mrsas_free_dma_obj(struct mrsas_instance *, dma_obj_t)
4811 4841 *
4812 4842 * De-allocate the memory and other resources for an dma object, which must
4813 4843 * have been alloated by a previous call to mrsas_alloc_dma_obj()
4814 4844 */
4815 4845 int
4816 4846 mrsas_free_dma_obj(struct mrsas_instance *instance, dma_obj_t obj)
4817 4847 {
4818 4848
4819 4849 if ((obj.dma_handle == NULL) || (obj.acc_handle == NULL)) {
4820 4850 return (DDI_SUCCESS);
4821 4851 }
4822 4852
4823 4853 /*
4824 4854 * NOTE: These check-handle functions fail if *_handle == NULL, but
4825 4855 * this function succeeds because of the previous check.
4826 4856 */
4827 4857 if (mrsas_check_dma_handle(obj.dma_handle) != DDI_SUCCESS) {
4828 4858 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4829 4859 return (DDI_FAILURE);
4830 4860 }
4831 4861
4832 4862 if (mrsas_check_acc_handle(obj.acc_handle) != DDI_SUCCESS) {
4833 4863 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
4834 4864 return (DDI_FAILURE);
4835 4865 }
4836 4866
4837 4867 (void) ddi_dma_unbind_handle(obj.dma_handle);
4838 4868 ddi_dma_mem_free(&obj.acc_handle);
4839 4869 ddi_dma_free_handle(&obj.dma_handle);
4840 4870 obj.acc_handle = NULL;
4841 4871 return (DDI_SUCCESS);
4842 4872 }
4843 4873
4844 4874 /*
4845 4875 * mrsas_dma_alloc(instance_t *, struct scsi_pkt *, struct buf *,
4846 4876 * int, int (*)())
4847 4877 *
4848 4878 * Allocate dma resources for a new scsi command
4849 4879 */
4850 4880 int
4851 4881 mrsas_dma_alloc(struct mrsas_instance *instance, struct scsi_pkt *pkt,
4852 4882 struct buf *bp, int flags, int (*callback)())
4853 4883 {
4854 4884 int dma_flags;
4855 4885 int (*cb)(caddr_t);
4856 4886 int i;
4857 4887
4858 4888 ddi_dma_attr_t tmp_dma_attr = mrsas_generic_dma_attr;
4859 4889 struct scsa_cmd *acmd = PKT2CMD(pkt);
4860 4890
4861 4891 acmd->cmd_buf = bp;
4862 4892
4863 4893 if (bp->b_flags & B_READ) {
4864 4894 acmd->cmd_flags &= ~CFLAG_DMASEND;
4865 4895 dma_flags = DDI_DMA_READ;
4866 4896 } else {
4867 4897 acmd->cmd_flags |= CFLAG_DMASEND;
4868 4898 dma_flags = DDI_DMA_WRITE;
4869 4899 }
4870 4900
4871 4901 if (flags & PKT_CONSISTENT) {
4872 4902 acmd->cmd_flags |= CFLAG_CONSISTENT;
4873 4903 dma_flags |= DDI_DMA_CONSISTENT;
4874 4904 }
4875 4905
4876 4906 if (flags & PKT_DMA_PARTIAL) {
4877 4907 dma_flags |= DDI_DMA_PARTIAL;
4878 4908 }
4879 4909
4880 4910 dma_flags |= DDI_DMA_REDZONE;
4881 4911
4882 4912 cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
4883 4913
4884 4914 tmp_dma_attr.dma_attr_sgllen = instance->max_num_sge;
4885 4915 tmp_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull;
4886 4916 if (instance->tbolt) {
4887 4917 /* OCR-RESET FIX */
4888 4918 tmp_dma_attr.dma_attr_count_max =
4889 4919 (U64)mrsas_tbolt_max_cap_maxxfer; /* limit to 256K */
4890 4920 tmp_dma_attr.dma_attr_maxxfer =
4891 4921 (U64)mrsas_tbolt_max_cap_maxxfer; /* limit to 256K */
4892 4922 }
4893 4923
4894 4924 if ((i = ddi_dma_alloc_handle(instance->dip, &tmp_dma_attr,
4895 4925 cb, 0, &acmd->cmd_dmahandle)) != DDI_SUCCESS) {
4896 4926 switch (i) {
4897 4927 case DDI_DMA_BADATTR:
4898 4928 bioerror(bp, EFAULT);
4899 4929 return (DDI_FAILURE);
4900 4930
4901 4931 case DDI_DMA_NORESOURCES:
4902 4932 bioerror(bp, 0);
4903 4933 return (DDI_FAILURE);
4904 4934
4905 4935 default:
4906 4936 con_log(CL_ANN, (CE_PANIC, "ddi_dma_alloc_handle: "
4907 4937 "impossible result (0x%x)", i));
4908 4938 bioerror(bp, EFAULT);
4909 4939 return (DDI_FAILURE);
4910 4940 }
4911 4941 }
4912 4942
4913 4943 i = ddi_dma_buf_bind_handle(acmd->cmd_dmahandle, bp, dma_flags,
4914 4944 cb, 0, &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies);
4915 4945
4916 4946 switch (i) {
4917 4947 case DDI_DMA_PARTIAL_MAP:
4918 4948 if ((dma_flags & DDI_DMA_PARTIAL) == 0) {
4919 4949 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
4920 4950 "DDI_DMA_PARTIAL_MAP impossible"));
4921 4951 goto no_dma_cookies;
4922 4952 }
4923 4953
4924 4954 if (ddi_dma_numwin(acmd->cmd_dmahandle, &acmd->cmd_nwin) ==
4925 4955 DDI_FAILURE) {
4926 4956 con_log(CL_ANN, (CE_PANIC, "ddi_dma_numwin failed"));
4927 4957 goto no_dma_cookies;
4928 4958 }
4929 4959
4930 4960 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
4931 4961 &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
4932 4962 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
4933 4963 DDI_FAILURE) {
4934 4964
4935 4965 con_log(CL_ANN, (CE_PANIC, "ddi_dma_getwin failed"));
4936 4966 goto no_dma_cookies;
4937 4967 }
4938 4968
4939 4969 goto get_dma_cookies;
4940 4970 case DDI_DMA_MAPPED:
4941 4971 acmd->cmd_nwin = 1;
4942 4972 acmd->cmd_dma_len = 0;
4943 4973 acmd->cmd_dma_offset = 0;
4944 4974
4945 4975 get_dma_cookies:
4946 4976 i = 0;
4947 4977 acmd->cmd_dmacount = 0;
4948 4978 for (;;) {
4949 4979 acmd->cmd_dmacount +=
4950 4980 acmd->cmd_dmacookies[i++].dmac_size;
4951 4981
4952 4982 if (i == instance->max_num_sge ||
4953 4983 i == acmd->cmd_ncookies)
4954 4984 break;
4955 4985
4956 4986 ddi_dma_nextcookie(acmd->cmd_dmahandle,
4957 4987 &acmd->cmd_dmacookies[i]);
4958 4988 }
4959 4989
4960 4990 acmd->cmd_cookie = i;
4961 4991 acmd->cmd_cookiecnt = i;
4962 4992
4963 4993 acmd->cmd_flags |= CFLAG_DMAVALID;
4964 4994
4965 4995 if (bp->b_bcount >= acmd->cmd_dmacount) {
4966 4996 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
4967 4997 } else {
4968 4998 pkt->pkt_resid = 0;
4969 4999 }
4970 5000
4971 5001 return (DDI_SUCCESS);
4972 5002 case DDI_DMA_NORESOURCES:
4973 5003 bioerror(bp, 0);
4974 5004 break;
4975 5005 case DDI_DMA_NOMAPPING:
4976 5006 bioerror(bp, EFAULT);
4977 5007 break;
4978 5008 case DDI_DMA_TOOBIG:
4979 5009 bioerror(bp, EINVAL);
4980 5010 break;
4981 5011 case DDI_DMA_INUSE:
4982 5012 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle:"
4983 5013 " DDI_DMA_INUSE impossible"));
4984 5014 break;
4985 5015 default:
4986 5016 con_log(CL_ANN, (CE_PANIC, "ddi_dma_buf_bind_handle: "
4987 5017 "impossible result (0x%x)", i));
4988 5018 break;
4989 5019 }
4990 5020
4991 5021 no_dma_cookies:
4992 5022 ddi_dma_free_handle(&acmd->cmd_dmahandle);
4993 5023 acmd->cmd_dmahandle = NULL;
4994 5024 acmd->cmd_flags &= ~CFLAG_DMAVALID;
4995 5025 return (DDI_FAILURE);
4996 5026 }
4997 5027
4998 5028 /*
4999 5029 * mrsas_dma_move(struct mrsas_instance *, struct scsi_pkt *, struct buf *)
5000 5030 *
5001 5031 * move dma resources to next dma window
5002 5032 *
5003 5033 */
5004 5034 int
5005 5035 mrsas_dma_move(struct mrsas_instance *instance, struct scsi_pkt *pkt,
5006 5036 struct buf *bp)
5007 5037 {
5008 5038 int i = 0;
5009 5039
5010 5040 struct scsa_cmd *acmd = PKT2CMD(pkt);
5011 5041
5012 5042 /*
5013 5043 * If there are no more cookies remaining in this window,
5014 5044 * must move to the next window first.
5015 5045 */
5016 5046 if (acmd->cmd_cookie == acmd->cmd_ncookies) {
5017 5047 if (acmd->cmd_curwin == acmd->cmd_nwin && acmd->cmd_nwin == 1) {
5018 5048 return (DDI_SUCCESS);
5019 5049 }
5020 5050
5021 5051 /* at last window, cannot move */
5022 5052 if (++acmd->cmd_curwin >= acmd->cmd_nwin) {
5023 5053 return (DDI_FAILURE);
5024 5054 }
5025 5055
5026 5056 if (ddi_dma_getwin(acmd->cmd_dmahandle, acmd->cmd_curwin,
5027 5057 &acmd->cmd_dma_offset, &acmd->cmd_dma_len,
5028 5058 &acmd->cmd_dmacookies[0], &acmd->cmd_ncookies) ==
5029 5059 DDI_FAILURE) {
5030 5060 return (DDI_FAILURE);
5031 5061 }
5032 5062
5033 5063 acmd->cmd_cookie = 0;
5034 5064 } else {
5035 5065 /* still more cookies in this window - get the next one */
5036 5066 ddi_dma_nextcookie(acmd->cmd_dmahandle,
5037 5067 &acmd->cmd_dmacookies[0]);
5038 5068 }
5039 5069
5040 5070 /* get remaining cookies in this window, up to our maximum */
5041 5071 for (;;) {
5042 5072 acmd->cmd_dmacount += acmd->cmd_dmacookies[i++].dmac_size;
5043 5073 acmd->cmd_cookie++;
5044 5074
5045 5075 if (i == instance->max_num_sge ||
5046 5076 acmd->cmd_cookie == acmd->cmd_ncookies) {
5047 5077 break;
5048 5078 }
5049 5079
5050 5080 ddi_dma_nextcookie(acmd->cmd_dmahandle,
5051 5081 &acmd->cmd_dmacookies[i]);
5052 5082 }
5053 5083
5054 5084 acmd->cmd_cookiecnt = i;
5055 5085
5056 5086 if (bp->b_bcount >= acmd->cmd_dmacount) {
5057 5087 pkt->pkt_resid = bp->b_bcount - acmd->cmd_dmacount;
5058 5088 } else {
5059 5089 pkt->pkt_resid = 0;
5060 5090 }
5061 5091
5062 5092 return (DDI_SUCCESS);
5063 5093 }
5064 5094
5065 5095 /*
5066 5096 * build_cmd
5067 5097 */
5068 5098 static struct mrsas_cmd *
5069 5099 build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
5070 5100 struct scsi_pkt *pkt, uchar_t *cmd_done)
5071 5101 {
5072 5102 uint16_t flags = 0;
5073 5103 uint32_t i;
5074 5104 uint32_t context;
5075 5105 uint32_t sge_bytes;
5076 5106 uint32_t tmp_data_xfer_len;
5077 5107 ddi_acc_handle_t acc_handle;
5078 5108 struct mrsas_cmd *cmd;
5079 5109 struct mrsas_sge64 *mfi_sgl;
5080 5110 struct mrsas_sge_ieee *mfi_sgl_ieee;
|
↓ open down ↓ |
406 lines elided |
↑ open up ↑ |
5081 5111 struct scsa_cmd *acmd = PKT2CMD(pkt);
5082 5112 struct mrsas_pthru_frame *pthru;
5083 5113 struct mrsas_io_frame *ldio;
5084 5114
5085 5115 /* find out if this is logical or physical drive command. */
5086 5116 acmd->islogical = MRDRV_IS_LOGICAL(ap);
5087 5117 acmd->device_id = MAP_DEVICE_ID(instance, ap);
5088 5118 *cmd_done = 0;
5089 5119
5090 5120 /* get the command packet */
5091 - if (!(cmd = get_mfi_pkt(instance))) {
5121 + if (!(cmd = mrsas_get_mfi_pkt(instance))) {
5092 5122 DTRACE_PROBE2(build_cmd_mfi_err, uint16_t,
5093 5123 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
5094 5124 return (NULL);
5095 5125 }
5096 5126
5097 5127 acc_handle = cmd->frame_dma_obj.acc_handle;
5098 5128
5099 5129 /* Clear the frame buffer and assign back the context id */
5100 5130 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
5101 5131 ddi_put32(acc_handle, &cmd->frame->hdr.context, cmd->index);
5102 5132
5103 5133 cmd->pkt = pkt;
5104 5134 cmd->cmd = acmd;
5105 5135 DTRACE_PROBE3(build_cmds, uint8_t, pkt->pkt_cdbp[0],
5106 5136 ulong_t, acmd->cmd_dmacount, ulong_t, acmd->cmd_dma_len);
5107 5137
5108 5138 /* lets get the command directions */
5109 5139 if (acmd->cmd_flags & CFLAG_DMASEND) {
5110 5140 flags = MFI_FRAME_DIR_WRITE;
5111 5141
5112 5142 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
5113 5143 (void) ddi_dma_sync(acmd->cmd_dmahandle,
5114 5144 acmd->cmd_dma_offset, acmd->cmd_dma_len,
5115 5145 DDI_DMA_SYNC_FORDEV);
5116 5146 }
5117 5147 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
5118 5148 flags = MFI_FRAME_DIR_READ;
5119 5149
5120 5150 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
5121 5151 (void) ddi_dma_sync(acmd->cmd_dmahandle,
5122 5152 acmd->cmd_dma_offset, acmd->cmd_dma_len,
5123 5153 DDI_DMA_SYNC_FORCPU);
5124 5154 }
5125 5155 } else {
5126 5156 flags = MFI_FRAME_DIR_NONE;
5127 5157 }
5128 5158
|
↓ open down ↓ |
27 lines elided |
↑ open up ↑ |
5129 5159 if (instance->flag_ieee) {
5130 5160 flags |= MFI_FRAME_IEEE;
5131 5161 }
5132 5162 flags |= MFI_FRAME_SGL64;
5133 5163
5134 5164 switch (pkt->pkt_cdbp[0]) {
5135 5165
5136 5166 /*
5137 5167 * case SCMD_SYNCHRONIZE_CACHE:
5138 5168 * flush_cache(instance);
5139 - * return_mfi_pkt(instance, cmd);
5169 + * mrsas_return_mfi_pkt(instance, cmd);
5140 5170 * *cmd_done = 1;
5141 5171 *
5142 5172 * return (NULL);
5143 5173 */
5144 5174
5145 5175 case SCMD_READ:
5146 5176 case SCMD_WRITE:
5147 5177 case SCMD_READ_G1:
5148 5178 case SCMD_WRITE_G1:
5149 5179 case SCMD_READ_G4:
5150 5180 case SCMD_WRITE_G4:
5151 5181 case SCMD_READ_G5:
5152 5182 case SCMD_WRITE_G5:
5153 5183 if (acmd->islogical) {
5154 5184 ldio = (struct mrsas_io_frame *)cmd->frame;
5155 5185
5156 5186 /*
5157 5187 * preare the Logical IO frame:
5158 5188 * 2nd bit is zero for all read cmds
5159 5189 */
5160 5190 ddi_put8(acc_handle, &ldio->cmd,
5161 5191 (pkt->pkt_cdbp[0] & 0x02) ? MFI_CMD_OP_LD_WRITE
5162 5192 : MFI_CMD_OP_LD_READ);
5163 5193 ddi_put8(acc_handle, &ldio->cmd_status, 0x0);
5164 5194 ddi_put8(acc_handle, &ldio->scsi_status, 0x0);
5165 5195 ddi_put8(acc_handle, &ldio->target_id, acmd->device_id);
5166 5196 ddi_put16(acc_handle, &ldio->timeout, 0);
5167 5197 ddi_put8(acc_handle, &ldio->reserved_0, 0);
5168 5198 ddi_put16(acc_handle, &ldio->pad_0, 0);
5169 5199 ddi_put16(acc_handle, &ldio->flags, flags);
5170 5200
5171 5201 /* Initialize sense Information */
5172 5202 bzero(cmd->sense, SENSE_LENGTH);
5173 5203 ddi_put8(acc_handle, &ldio->sense_len, SENSE_LENGTH);
5174 5204 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_hi, 0);
5175 5205 ddi_put32(acc_handle, &ldio->sense_buf_phys_addr_lo,
5176 5206 cmd->sense_phys_addr);
5177 5207 ddi_put32(acc_handle, &ldio->start_lba_hi, 0);
5178 5208 ddi_put8(acc_handle, &ldio->access_byte,
5179 5209 (acmd->cmd_cdblen != 6) ? pkt->pkt_cdbp[1] : 0);
5180 5210 ddi_put8(acc_handle, &ldio->sge_count,
5181 5211 acmd->cmd_cookiecnt);
5182 5212 if (instance->flag_ieee) {
5183 5213 mfi_sgl_ieee =
5184 5214 (struct mrsas_sge_ieee *)&ldio->sgl;
5185 5215 } else {
5186 5216 mfi_sgl = (struct mrsas_sge64 *)&ldio->sgl;
5187 5217 }
5188 5218
5189 5219 context = ddi_get32(acc_handle, &ldio->context);
5190 5220
5191 5221 if (acmd->cmd_cdblen == CDB_GROUP0) {
5192 5222 /* 6-byte cdb */
5193 5223 ddi_put32(acc_handle, &ldio->lba_count, (
5194 5224 (uint16_t)(pkt->pkt_cdbp[4])));
5195 5225
5196 5226 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5197 5227 ((uint32_t)(pkt->pkt_cdbp[3])) |
5198 5228 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
5199 5229 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
5200 5230 << 16)));
5201 5231 } else if (acmd->cmd_cdblen == CDB_GROUP1) {
5202 5232 /* 10-byte cdb */
5203 5233 ddi_put32(acc_handle, &ldio->lba_count, (
5204 5234 ((uint16_t)(pkt->pkt_cdbp[8])) |
5205 5235 ((uint16_t)(pkt->pkt_cdbp[7]) << 8)));
5206 5236
5207 5237 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5208 5238 ((uint32_t)(pkt->pkt_cdbp[5])) |
5209 5239 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5210 5240 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5211 5241 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5212 5242 } else if (acmd->cmd_cdblen == CDB_GROUP5) {
5213 5243 /* 12-byte cdb */
5214 5244 ddi_put32(acc_handle, &ldio->lba_count, (
5215 5245 ((uint32_t)(pkt->pkt_cdbp[9])) |
5216 5246 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
5217 5247 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
5218 5248 ((uint32_t)(pkt->pkt_cdbp[6]) << 24)));
5219 5249
5220 5250 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5221 5251 ((uint32_t)(pkt->pkt_cdbp[5])) |
5222 5252 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5223 5253 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5224 5254 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5225 5255 } else if (acmd->cmd_cdblen == CDB_GROUP4) {
5226 5256 /* 16-byte cdb */
5227 5257 ddi_put32(acc_handle, &ldio->lba_count, (
5228 5258 ((uint32_t)(pkt->pkt_cdbp[13])) |
5229 5259 ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
5230 5260 ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
5231 5261 ((uint32_t)(pkt->pkt_cdbp[10]) << 24)));
5232 5262
5233 5263 ddi_put32(acc_handle, &ldio->start_lba_lo, (
5234 5264 ((uint32_t)(pkt->pkt_cdbp[9])) |
5235 5265 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
5236 5266 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
5237 5267 ((uint32_t)(pkt->pkt_cdbp[6]) << 24)));
|
↓ open down ↓ |
88 lines elided |
↑ open up ↑ |
5238 5268
5239 5269 ddi_put32(acc_handle, &ldio->start_lba_hi, (
5240 5270 ((uint32_t)(pkt->pkt_cdbp[5])) |
5241 5271 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
5242 5272 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
5243 5273 ((uint32_t)(pkt->pkt_cdbp[2]) << 24)));
5244 5274 }
5245 5275
5246 5276 break;
5247 5277 }
5248 - /* fall through For all non-rd/wr cmds */
5278 + /* fall through For all non-rd/wr and physical disk cmds */
5249 5279 default:
5250 5280
5251 5281 switch (pkt->pkt_cdbp[0]) {
5252 5282 case SCMD_MODE_SENSE:
5253 5283 case SCMD_MODE_SENSE_G1: {
5254 5284 union scsi_cdb *cdbp;
5255 5285 uint16_t page_code;
5256 5286
5257 5287 cdbp = (void *)pkt->pkt_cdbp;
5258 5288 page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
5259 5289 switch (page_code) {
5260 5290 case 0x3:
5261 5291 case 0x4:
5262 5292 (void) mrsas_mode_sense_build(pkt);
5263 - return_mfi_pkt(instance, cmd);
5293 + mrsas_return_mfi_pkt(instance, cmd);
5264 5294 *cmd_done = 1;
5265 5295 return (NULL);
5266 5296 }
5267 5297 break;
5268 5298 }
5269 5299 default:
5270 5300 break;
5271 5301 }
5272 5302
5273 5303 pthru = (struct mrsas_pthru_frame *)cmd->frame;
5274 5304
5275 5305 /* prepare the DCDB frame */
5276 5306 ddi_put8(acc_handle, &pthru->cmd, (acmd->islogical) ?
5277 5307 MFI_CMD_OP_LD_SCSI : MFI_CMD_OP_PD_SCSI);
5278 5308 ddi_put8(acc_handle, &pthru->cmd_status, 0x0);
5279 5309 ddi_put8(acc_handle, &pthru->scsi_status, 0x0);
5280 5310 ddi_put8(acc_handle, &pthru->target_id, acmd->device_id);
5281 5311 ddi_put8(acc_handle, &pthru->lun, 0);
5282 5312 ddi_put8(acc_handle, &pthru->cdb_len, acmd->cmd_cdblen);
5283 5313 ddi_put16(acc_handle, &pthru->timeout, 0);
5284 5314 ddi_put16(acc_handle, &pthru->flags, flags);
5285 5315 tmp_data_xfer_len = 0;
5286 5316 for (i = 0; i < acmd->cmd_cookiecnt; i++) {
5287 5317 tmp_data_xfer_len += acmd->cmd_dmacookies[i].dmac_size;
5288 5318 }
5289 5319 ddi_put32(acc_handle, &pthru->data_xfer_len,
5290 5320 tmp_data_xfer_len);
5291 5321 ddi_put8(acc_handle, &pthru->sge_count, acmd->cmd_cookiecnt);
5292 5322 if (instance->flag_ieee) {
5293 5323 mfi_sgl_ieee = (struct mrsas_sge_ieee *)&pthru->sgl;
5294 5324 } else {
5295 5325 mfi_sgl = (struct mrsas_sge64 *)&pthru->sgl;
5296 5326 }
5297 5327
5298 5328 bzero(cmd->sense, SENSE_LENGTH);
5299 5329 ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH);
5300 5330 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0);
5301 5331 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo,
5302 5332 cmd->sense_phys_addr);
5303 5333
5304 5334 context = ddi_get32(acc_handle, &pthru->context);
5305 5335 ddi_rep_put8(acc_handle, (uint8_t *)pkt->pkt_cdbp,
5306 5336 (uint8_t *)pthru->cdb, acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
5307 5337
5308 5338 break;
5309 5339 }
5310 5340 #ifdef lint
5311 5341 context = context;
5312 5342 #endif
5313 5343 /* prepare the scatter-gather list for the firmware */
5314 5344 if (instance->flag_ieee) {
5315 5345 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl_ieee++) {
5316 5346 ddi_put64(acc_handle, &mfi_sgl_ieee->phys_addr,
5317 5347 acmd->cmd_dmacookies[i].dmac_laddress);
5318 5348 ddi_put32(acc_handle, &mfi_sgl_ieee->length,
5319 5349 acmd->cmd_dmacookies[i].dmac_size);
5320 5350 }
5321 5351 sge_bytes = sizeof (struct mrsas_sge_ieee)*acmd->cmd_cookiecnt;
5322 5352 } else {
5323 5353 for (i = 0; i < acmd->cmd_cookiecnt; i++, mfi_sgl++) {
5324 5354 ddi_put64(acc_handle, &mfi_sgl->phys_addr,
5325 5355 acmd->cmd_dmacookies[i].dmac_laddress);
5326 5356 ddi_put32(acc_handle, &mfi_sgl->length,
5327 5357 acmd->cmd_dmacookies[i].dmac_size);
5328 5358 }
5329 5359 sge_bytes = sizeof (struct mrsas_sge64)*acmd->cmd_cookiecnt;
5330 5360 }
5331 5361
5332 5362 cmd->frame_count = (sge_bytes / MRMFI_FRAME_SIZE) +
5333 5363 ((sge_bytes % MRMFI_FRAME_SIZE) ? 1 : 0) + 1;
5334 5364
5335 5365 if (cmd->frame_count >= 8) {
5336 5366 cmd->frame_count = 8;
5337 5367 }
5338 5368
5339 5369 return (cmd);
5340 5370 }
5341 5371
5342 5372 /*
5343 5373 * wait_for_outstanding - Wait for all outstanding cmds
5344 5374 * @instance: Adapter soft state
5345 5375 *
5346 5376 * This function waits for upto MRDRV_RESET_WAIT_TIME seconds for FW to
5347 5377 * complete all its outstanding commands. Returns error if one or more IOs
5348 5378 * are pending after this time period.
5349 5379 */
5350 5380 static int
5351 5381 wait_for_outstanding(struct mrsas_instance *instance)
5352 5382 {
5353 5383 int i;
5354 5384 uint32_t wait_time = 90;
5355 5385
5356 5386 for (i = 0; i < wait_time; i++) {
5357 5387 if (!instance->fw_outstanding) {
5358 5388 break;
5359 5389 }
5360 5390
5361 5391 drv_usecwait(MILLISEC); /* wait for 1000 usecs */;
5362 5392 }
5363 5393
5364 5394 if (instance->fw_outstanding) {
5365 5395 return (1);
5366 5396 }
5367 5397
5368 5398 return (0);
5369 5399 }
5370 5400
5371 5401 /*
5372 5402 * issue_mfi_pthru
5373 5403 */
5374 5404 static int
5375 5405 issue_mfi_pthru(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5376 5406 struct mrsas_cmd *cmd, int mode)
5377 5407 {
5378 5408 void *ubuf;
5379 5409 uint32_t kphys_addr = 0;
5380 5410 uint32_t xferlen = 0;
5381 5411 uint32_t new_xfer_length = 0;
5382 5412 uint_t model;
5383 5413 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5384 5414 dma_obj_t pthru_dma_obj;
5385 5415 struct mrsas_pthru_frame *kpthru;
5386 5416 struct mrsas_pthru_frame *pthru;
5387 5417 int i;
5388 5418 pthru = &cmd->frame->pthru;
5389 5419 kpthru = (struct mrsas_pthru_frame *)&ioctl->frame[0];
5390 5420
5391 5421 if (instance->adapterresetinprogress) {
5392 5422 con_log(CL_ANN1, (CE_WARN, "issue_mfi_pthru: Reset flag set, "
5393 5423 "returning mfi_pkt and setting TRAN_BUSY\n"));
5394 5424 return (DDI_FAILURE);
5395 5425 }
5396 5426 model = ddi_model_convert_from(mode & FMODELS);
5397 5427 if (model == DDI_MODEL_ILP32) {
5398 5428 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP32"));
5399 5429
5400 5430 xferlen = kpthru->sgl.sge32[0].length;
5401 5431
5402 5432 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
5403 5433 } else {
5404 5434 #ifdef _ILP32
5405 5435 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP32"));
5406 5436 xferlen = kpthru->sgl.sge32[0].length;
5407 5437 ubuf = (void *)(ulong_t)kpthru->sgl.sge32[0].phys_addr;
5408 5438 #else
5409 5439 con_log(CL_ANN1, (CE_CONT, "issue_mfi_pthru: DDI_MODEL_LP64"));
5410 5440 xferlen = kpthru->sgl.sge64[0].length;
5411 5441 ubuf = (void *)(ulong_t)kpthru->sgl.sge64[0].phys_addr;
5412 5442 #endif
5413 5443 }
5414 5444
5415 5445 if (xferlen) {
5416 5446 /* means IOCTL requires DMA */
5417 5447 /* allocate the data transfer buffer */
5418 5448 /* pthru_dma_obj.size = xferlen; */
5419 5449 MRSAS_GET_BOUNDARY_ALIGNED_LEN(xferlen, new_xfer_length,
5420 5450 PAGESIZE);
5421 5451 pthru_dma_obj.size = new_xfer_length;
5422 5452 pthru_dma_obj.dma_attr = mrsas_generic_dma_attr;
5423 5453 pthru_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5424 5454 pthru_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5425 5455 pthru_dma_obj.dma_attr.dma_attr_sgllen = 1;
5426 5456 pthru_dma_obj.dma_attr.dma_attr_align = 1;
5427 5457
5428 5458 /* allocate kernel buffer for DMA */
5429 5459 if (mrsas_alloc_dma_obj(instance, &pthru_dma_obj,
5430 5460 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5431 5461 con_log(CL_ANN, (CE_WARN, "issue_mfi_pthru: "
5432 5462 "could not allocate data transfer buffer."));
5433 5463 return (DDI_FAILURE);
5434 5464 }
5435 5465 (void) memset(pthru_dma_obj.buffer, 0, xferlen);
5436 5466
5437 5467 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5438 5468 if (kpthru->flags & MFI_FRAME_DIR_WRITE) {
5439 5469 for (i = 0; i < xferlen; i++) {
5440 5470 if (ddi_copyin((uint8_t *)ubuf+i,
5441 5471 (uint8_t *)pthru_dma_obj.buffer+i,
5442 5472 1, mode)) {
5443 5473 con_log(CL_ANN, (CE_WARN,
5444 5474 "issue_mfi_pthru : "
5445 5475 "copy from user space failed"));
5446 5476 return (DDI_FAILURE);
5447 5477 }
5448 5478 }
5449 5479 }
5450 5480
5451 5481 kphys_addr = pthru_dma_obj.dma_cookie[0].dmac_address;
5452 5482 }
5453 5483
5454 5484 ddi_put8(acc_handle, &pthru->cmd, kpthru->cmd);
5455 5485 ddi_put8(acc_handle, &pthru->sense_len, SENSE_LENGTH);
5456 5486 ddi_put8(acc_handle, &pthru->cmd_status, 0);
5457 5487 ddi_put8(acc_handle, &pthru->scsi_status, 0);
5458 5488 ddi_put8(acc_handle, &pthru->target_id, kpthru->target_id);
5459 5489 ddi_put8(acc_handle, &pthru->lun, kpthru->lun);
5460 5490 ddi_put8(acc_handle, &pthru->cdb_len, kpthru->cdb_len);
5461 5491 ddi_put8(acc_handle, &pthru->sge_count, kpthru->sge_count);
5462 5492 ddi_put16(acc_handle, &pthru->timeout, kpthru->timeout);
5463 5493 ddi_put32(acc_handle, &pthru->data_xfer_len, kpthru->data_xfer_len);
5464 5494
5465 5495 ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_hi, 0);
5466 5496 pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr;
5467 5497 /* ddi_put32(acc_handle, &pthru->sense_buf_phys_addr_lo, 0); */
5468 5498
5469 5499 ddi_rep_put8(acc_handle, (uint8_t *)kpthru->cdb, (uint8_t *)pthru->cdb,
5470 5500 pthru->cdb_len, DDI_DEV_AUTOINCR);
5471 5501
5472 5502 ddi_put16(acc_handle, &pthru->flags, kpthru->flags & ~MFI_FRAME_SGL64);
5473 5503 ddi_put32(acc_handle, &pthru->sgl.sge32[0].length, xferlen);
5474 5504 ddi_put32(acc_handle, &pthru->sgl.sge32[0].phys_addr, kphys_addr);
5475 5505
5476 5506 cmd->sync_cmd = MRSAS_TRUE;
5477 5507 cmd->frame_count = 1;
5478 5508
5479 5509 if (instance->tbolt) {
5480 5510 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5481 5511 }
5482 5512
5483 5513 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5484 5514 con_log(CL_ANN, (CE_WARN,
5485 5515 "issue_mfi_pthru: fw_ioctl failed"));
5486 5516 } else {
5487 5517 if (xferlen && kpthru->flags & MFI_FRAME_DIR_READ) {
5488 5518 for (i = 0; i < xferlen; i++) {
5489 5519 if (ddi_copyout(
5490 5520 (uint8_t *)pthru_dma_obj.buffer+i,
5491 5521 (uint8_t *)ubuf+i, 1, mode)) {
5492 5522 con_log(CL_ANN, (CE_WARN,
5493 5523 "issue_mfi_pthru : "
5494 5524 "copy to user space failed"));
5495 5525 return (DDI_FAILURE);
5496 5526 }
5497 5527 }
5498 5528 }
5499 5529 }
5500 5530
5501 5531 kpthru->cmd_status = ddi_get8(acc_handle, &pthru->cmd_status);
5502 5532 kpthru->scsi_status = ddi_get8(acc_handle, &pthru->scsi_status);
5503 5533
5504 5534 con_log(CL_ANN, (CE_CONT, "issue_mfi_pthru: cmd_status %x, "
5505 5535 "scsi_status %x", kpthru->cmd_status, kpthru->scsi_status));
5506 5536 DTRACE_PROBE3(issue_pthru, uint8_t, kpthru->cmd, uint8_t,
5507 5537 kpthru->cmd_status, uint8_t, kpthru->scsi_status);
5508 5538
5509 5539 if (kpthru->sense_len) {
5510 5540 uint_t sense_len = SENSE_LENGTH;
5511 5541 void *sense_ubuf =
5512 5542 (void *)(ulong_t)kpthru->sense_buf_phys_addr_lo;
5513 5543 if (kpthru->sense_len <= SENSE_LENGTH) {
5514 5544 sense_len = kpthru->sense_len;
5515 5545 }
5516 5546
5517 5547 for (i = 0; i < sense_len; i++) {
5518 5548 if (ddi_copyout(
5519 5549 (uint8_t *)cmd->sense+i,
5520 5550 (uint8_t *)sense_ubuf+i, 1, mode)) {
5521 5551 con_log(CL_ANN, (CE_WARN,
5522 5552 "issue_mfi_pthru : "
5523 5553 "copy to user space failed"));
5524 5554 }
5525 5555 con_log(CL_DLEVEL1, (CE_WARN,
5526 5556 "Copying Sense info sense_buff[%d] = 0x%X",
5527 5557 i, *((uint8_t *)cmd->sense + i)));
5528 5558 }
5529 5559 }
5530 5560 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
5531 5561 DDI_DMA_SYNC_FORDEV);
5532 5562
5533 5563 if (xferlen) {
5534 5564 /* free kernel buffer */
5535 5565 if (mrsas_free_dma_obj(instance, pthru_dma_obj) != DDI_SUCCESS)
5536 5566 return (DDI_FAILURE);
5537 5567 }
5538 5568
5539 5569 return (DDI_SUCCESS);
5540 5570 }
5541 5571
5542 5572 /*
5543 5573 * issue_mfi_dcmd
5544 5574 */
5545 5575 static int
5546 5576 issue_mfi_dcmd(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5547 5577 struct mrsas_cmd *cmd, int mode)
5548 5578 {
5549 5579 void *ubuf;
5550 5580 uint32_t kphys_addr = 0;
5551 5581 uint32_t xferlen = 0;
5552 5582 uint32_t new_xfer_length = 0;
5553 5583 uint32_t model;
5554 5584 dma_obj_t dcmd_dma_obj;
5555 5585 struct mrsas_dcmd_frame *kdcmd;
5556 5586 struct mrsas_dcmd_frame *dcmd;
5557 5587 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5558 5588 int i;
5559 5589 dcmd = &cmd->frame->dcmd;
5560 5590 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0];
5561 5591
5562 5592 if (instance->adapterresetinprogress) {
5563 5593 con_log(CL_ANN1, (CE_NOTE, "Reset flag set, "
5564 5594 "returning mfi_pkt and setting TRAN_BUSY"));
5565 5595 return (DDI_FAILURE);
5566 5596 }
5567 5597 model = ddi_model_convert_from(mode & FMODELS);
5568 5598 if (model == DDI_MODEL_ILP32) {
5569 5599 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
5570 5600
5571 5601 xferlen = kdcmd->sgl.sge32[0].length;
5572 5602
5573 5603 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
5574 5604 } else {
5575 5605 #ifdef _ILP32
5576 5606 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_ILP32"));
5577 5607 xferlen = kdcmd->sgl.sge32[0].length;
5578 5608 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
5579 5609 #else
5580 5610 con_log(CL_ANN1, (CE_CONT, "issue_mfi_dcmd: DDI_MODEL_LP64"));
5581 5611 xferlen = kdcmd->sgl.sge64[0].length;
5582 5612 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
5583 5613 #endif
5584 5614 }
5585 5615 if (xferlen) {
5586 5616 /* means IOCTL requires DMA */
5587 5617 /* allocate the data transfer buffer */
5588 5618 /* dcmd_dma_obj.size = xferlen; */
5589 5619 MRSAS_GET_BOUNDARY_ALIGNED_LEN(xferlen, new_xfer_length,
5590 5620 PAGESIZE);
5591 5621 dcmd_dma_obj.size = new_xfer_length;
5592 5622 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
5593 5623 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5594 5624 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5595 5625 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
5596 5626 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
5597 5627
5598 5628 /* allocate kernel buffer for DMA */
5599 5629 if (mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
5600 5630 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5601 5631 con_log(CL_ANN,
5602 5632 (CE_WARN, "issue_mfi_dcmd: could not "
5603 5633 "allocate data transfer buffer."));
5604 5634 return (DDI_FAILURE);
5605 5635 }
5606 5636 (void) memset(dcmd_dma_obj.buffer, 0, xferlen);
5607 5637
5608 5638 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5609 5639 if (kdcmd->flags & MFI_FRAME_DIR_WRITE) {
5610 5640 for (i = 0; i < xferlen; i++) {
5611 5641 if (ddi_copyin((uint8_t *)ubuf + i,
5612 5642 (uint8_t *)dcmd_dma_obj.buffer + i,
5613 5643 1, mode)) {
5614 5644 con_log(CL_ANN, (CE_WARN,
5615 5645 "issue_mfi_dcmd : "
5616 5646 "copy from user space failed"));
5617 5647 return (DDI_FAILURE);
5618 5648 }
5619 5649 }
5620 5650 }
5621 5651
5622 5652 kphys_addr = dcmd_dma_obj.dma_cookie[0].dmac_address;
5623 5653 }
5624 5654
5625 5655 ddi_put8(acc_handle, &dcmd->cmd, kdcmd->cmd);
5626 5656 ddi_put8(acc_handle, &dcmd->cmd_status, 0);
5627 5657 ddi_put8(acc_handle, &dcmd->sge_count, kdcmd->sge_count);
5628 5658 ddi_put16(acc_handle, &dcmd->timeout, kdcmd->timeout);
5629 5659 ddi_put32(acc_handle, &dcmd->data_xfer_len, kdcmd->data_xfer_len);
5630 5660 ddi_put32(acc_handle, &dcmd->opcode, kdcmd->opcode);
5631 5661
5632 5662 ddi_rep_put8(acc_handle, (uint8_t *)kdcmd->mbox.b,
5633 5663 (uint8_t *)dcmd->mbox.b, DCMD_MBOX_SZ, DDI_DEV_AUTOINCR);
5634 5664
5635 5665 ddi_put16(acc_handle, &dcmd->flags, kdcmd->flags & ~MFI_FRAME_SGL64);
5636 5666 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].length, xferlen);
5637 5667 ddi_put32(acc_handle, &dcmd->sgl.sge32[0].phys_addr, kphys_addr);
5638 5668
5639 5669 cmd->sync_cmd = MRSAS_TRUE;
5640 5670 cmd->frame_count = 1;
5641 5671
5642 5672 if (instance->tbolt) {
5643 5673 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5644 5674 }
5645 5675
5646 5676 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5647 5677 con_log(CL_ANN, (CE_WARN, "issue_mfi_dcmd: fw_ioctl failed"));
5648 5678 } else {
5649 5679 if (xferlen && (kdcmd->flags & MFI_FRAME_DIR_READ)) {
5650 5680 for (i = 0; i < xferlen; i++) {
5651 5681 if (ddi_copyout(
5652 5682 (uint8_t *)dcmd_dma_obj.buffer + i,
5653 5683 (uint8_t *)ubuf + i,
5654 5684 1, mode)) {
5655 5685 con_log(CL_ANN, (CE_WARN,
5656 5686 "issue_mfi_dcmd : "
5657 5687 "copy to user space failed"));
5658 5688 return (DDI_FAILURE);
5659 5689 }
5660 5690 }
5661 5691 }
5662 5692 }
5663 5693
5664 5694 kdcmd->cmd_status = ddi_get8(acc_handle, &dcmd->cmd_status);
5665 5695 con_log(CL_ANN,
5666 5696 (CE_CONT, "issue_mfi_dcmd: cmd_status %x", kdcmd->cmd_status));
5667 5697 DTRACE_PROBE3(issue_dcmd, uint32_t, kdcmd->opcode, uint8_t,
5668 5698 kdcmd->cmd, uint8_t, kdcmd->cmd_status);
5669 5699
5670 5700 if (xferlen) {
5671 5701 /* free kernel buffer */
5672 5702 if (mrsas_free_dma_obj(instance, dcmd_dma_obj) != DDI_SUCCESS)
5673 5703 return (DDI_FAILURE);
5674 5704 }
5675 5705
5676 5706 return (DDI_SUCCESS);
5677 5707 }
5678 5708
5679 5709 /*
5680 5710 * issue_mfi_smp
5681 5711 */
5682 5712 static int
5683 5713 issue_mfi_smp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5684 5714 struct mrsas_cmd *cmd, int mode)
5685 5715 {
5686 5716 void *request_ubuf;
5687 5717 void *response_ubuf;
5688 5718 uint32_t request_xferlen = 0;
5689 5719 uint32_t response_xferlen = 0;
5690 5720 uint32_t new_xfer_length1 = 0;
5691 5721 uint32_t new_xfer_length2 = 0;
5692 5722 uint_t model;
5693 5723 dma_obj_t request_dma_obj;
5694 5724 dma_obj_t response_dma_obj;
5695 5725 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5696 5726 struct mrsas_smp_frame *ksmp;
5697 5727 struct mrsas_smp_frame *smp;
5698 5728 struct mrsas_sge32 *sge32;
5699 5729 #ifndef _ILP32
5700 5730 struct mrsas_sge64 *sge64;
5701 5731 #endif
5702 5732 int i;
5703 5733 uint64_t tmp_sas_addr;
5704 5734
5705 5735 smp = &cmd->frame->smp;
5706 5736 ksmp = (struct mrsas_smp_frame *)&ioctl->frame[0];
5707 5737
5708 5738 if (instance->adapterresetinprogress) {
5709 5739 con_log(CL_ANN1, (CE_WARN, "Reset flag set, "
5710 5740 "returning mfi_pkt and setting TRAN_BUSY\n"));
5711 5741 return (DDI_FAILURE);
5712 5742 }
5713 5743 model = ddi_model_convert_from(mode & FMODELS);
5714 5744 if (model == DDI_MODEL_ILP32) {
5715 5745 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_ILP32"));
5716 5746
5717 5747 sge32 = &ksmp->sgl[0].sge32[0];
5718 5748 response_xferlen = sge32[0].length;
5719 5749 request_xferlen = sge32[1].length;
5720 5750 con_log(CL_ANN, (CE_CONT, "issue_mfi_smp: "
5721 5751 "response_xferlen = %x, request_xferlen = %x",
5722 5752 response_xferlen, request_xferlen));
5723 5753
5724 5754 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr;
5725 5755 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr;
5726 5756 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: "
5727 5757 "response_ubuf = %p, request_ubuf = %p",
5728 5758 response_ubuf, request_ubuf));
5729 5759 } else {
5730 5760 #ifdef _ILP32
5731 5761 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_ILP32"));
5732 5762
5733 5763 sge32 = &ksmp->sgl[0].sge32[0];
5734 5764 response_xferlen = sge32[0].length;
5735 5765 request_xferlen = sge32[1].length;
5736 5766 con_log(CL_ANN, (CE_CONT, "issue_mfi_smp: "
5737 5767 "response_xferlen = %x, request_xferlen = %x",
5738 5768 response_xferlen, request_xferlen));
5739 5769
5740 5770 response_ubuf = (void *)(ulong_t)sge32[0].phys_addr;
5741 5771 request_ubuf = (void *)(ulong_t)sge32[1].phys_addr;
5742 5772 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: "
5743 5773 "response_ubuf = %p, request_ubuf = %p",
5744 5774 response_ubuf, request_ubuf));
5745 5775 #else
5746 5776 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp: DDI_MODEL_LP64"));
5747 5777
5748 5778 sge64 = &ksmp->sgl[0].sge64[0];
5749 5779 response_xferlen = sge64[0].length;
5750 5780 request_xferlen = sge64[1].length;
5751 5781
5752 5782 response_ubuf = (void *)(ulong_t)sge64[0].phys_addr;
5753 5783 request_ubuf = (void *)(ulong_t)sge64[1].phys_addr;
5754 5784 #endif
5755 5785 }
5756 5786 if (request_xferlen) {
5757 5787 /* means IOCTL requires DMA */
5758 5788 /* allocate the data transfer buffer */
5759 5789 /* request_dma_obj.size = request_xferlen; */
5760 5790 MRSAS_GET_BOUNDARY_ALIGNED_LEN(request_xferlen,
5761 5791 new_xfer_length1, PAGESIZE);
5762 5792 request_dma_obj.size = new_xfer_length1;
5763 5793 request_dma_obj.dma_attr = mrsas_generic_dma_attr;
5764 5794 request_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5765 5795 request_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5766 5796 request_dma_obj.dma_attr.dma_attr_sgllen = 1;
5767 5797 request_dma_obj.dma_attr.dma_attr_align = 1;
5768 5798
5769 5799 /* allocate kernel buffer for DMA */
5770 5800 if (mrsas_alloc_dma_obj(instance, &request_dma_obj,
5771 5801 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5772 5802 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5773 5803 "could not allocate data transfer buffer."));
5774 5804 return (DDI_FAILURE);
5775 5805 }
5776 5806 (void) memset(request_dma_obj.buffer, 0, request_xferlen);
5777 5807
5778 5808 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5779 5809 for (i = 0; i < request_xferlen; i++) {
5780 5810 if (ddi_copyin((uint8_t *)request_ubuf + i,
5781 5811 (uint8_t *)request_dma_obj.buffer + i,
5782 5812 1, mode)) {
5783 5813 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5784 5814 "copy from user space failed"));
5785 5815 return (DDI_FAILURE);
5786 5816 }
5787 5817 }
5788 5818 }
5789 5819
5790 5820 if (response_xferlen) {
5791 5821 /* means IOCTL requires DMA */
5792 5822 /* allocate the data transfer buffer */
5793 5823 /* response_dma_obj.size = response_xferlen; */
5794 5824 MRSAS_GET_BOUNDARY_ALIGNED_LEN(response_xferlen,
5795 5825 new_xfer_length2, PAGESIZE);
5796 5826 response_dma_obj.size = new_xfer_length2;
5797 5827 response_dma_obj.dma_attr = mrsas_generic_dma_attr;
5798 5828 response_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
5799 5829 response_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
5800 5830 response_dma_obj.dma_attr.dma_attr_sgllen = 1;
5801 5831 response_dma_obj.dma_attr.dma_attr_align = 1;
5802 5832
5803 5833 /* allocate kernel buffer for DMA */
5804 5834 if (mrsas_alloc_dma_obj(instance, &response_dma_obj,
5805 5835 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
5806 5836 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5807 5837 "could not allocate data transfer buffer."));
5808 5838 return (DDI_FAILURE);
5809 5839 }
5810 5840 (void) memset(response_dma_obj.buffer, 0, response_xferlen);
5811 5841
5812 5842 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
5813 5843 for (i = 0; i < response_xferlen; i++) {
5814 5844 if (ddi_copyin((uint8_t *)response_ubuf + i,
5815 5845 (uint8_t *)response_dma_obj.buffer + i,
5816 5846 1, mode)) {
5817 5847 con_log(CL_ANN, (CE_WARN, "issue_mfi_smp: "
5818 5848 "copy from user space failed"));
5819 5849 return (DDI_FAILURE);
5820 5850 }
5821 5851 }
5822 5852 }
5823 5853
5824 5854 ddi_put8(acc_handle, &smp->cmd, ksmp->cmd);
5825 5855 ddi_put8(acc_handle, &smp->cmd_status, 0);
5826 5856 ddi_put8(acc_handle, &smp->connection_status, 0);
5827 5857 ddi_put8(acc_handle, &smp->sge_count, ksmp->sge_count);
5828 5858 /* smp->context = ksmp->context; */
5829 5859 ddi_put16(acc_handle, &smp->timeout, ksmp->timeout);
5830 5860 ddi_put32(acc_handle, &smp->data_xfer_len, ksmp->data_xfer_len);
5831 5861
5832 5862 bcopy((void *)&ksmp->sas_addr, (void *)&tmp_sas_addr,
5833 5863 sizeof (uint64_t));
5834 5864 ddi_put64(acc_handle, &smp->sas_addr, tmp_sas_addr);
5835 5865
5836 5866 ddi_put16(acc_handle, &smp->flags, ksmp->flags & ~MFI_FRAME_SGL64);
5837 5867
5838 5868 model = ddi_model_convert_from(mode & FMODELS);
5839 5869 if (model == DDI_MODEL_ILP32) {
5840 5870 con_log(CL_ANN1, (CE_CONT,
5841 5871 "issue_mfi_smp: DDI_MODEL_ILP32"));
5842 5872
5843 5873 sge32 = &smp->sgl[0].sge32[0];
5844 5874 ddi_put32(acc_handle, &sge32[0].length, response_xferlen);
5845 5875 ddi_put32(acc_handle, &sge32[0].phys_addr,
5846 5876 response_dma_obj.dma_cookie[0].dmac_address);
5847 5877 ddi_put32(acc_handle, &sge32[1].length, request_xferlen);
5848 5878 ddi_put32(acc_handle, &sge32[1].phys_addr,
5849 5879 request_dma_obj.dma_cookie[0].dmac_address);
5850 5880 } else {
5851 5881 #ifdef _ILP32
5852 5882 con_log(CL_ANN1, (CE_CONT,
5853 5883 "issue_mfi_smp: DDI_MODEL_ILP32"));
5854 5884 sge32 = &smp->sgl[0].sge32[0];
5855 5885 ddi_put32(acc_handle, &sge32[0].length, response_xferlen);
5856 5886 ddi_put32(acc_handle, &sge32[0].phys_addr,
5857 5887 response_dma_obj.dma_cookie[0].dmac_address);
5858 5888 ddi_put32(acc_handle, &sge32[1].length, request_xferlen);
5859 5889 ddi_put32(acc_handle, &sge32[1].phys_addr,
5860 5890 request_dma_obj.dma_cookie[0].dmac_address);
5861 5891 #else
5862 5892 con_log(CL_ANN1, (CE_CONT,
5863 5893 "issue_mfi_smp: DDI_MODEL_LP64"));
5864 5894 sge64 = &smp->sgl[0].sge64[0];
5865 5895 ddi_put32(acc_handle, &sge64[0].length, response_xferlen);
5866 5896 ddi_put64(acc_handle, &sge64[0].phys_addr,
5867 5897 response_dma_obj.dma_cookie[0].dmac_address);
5868 5898 ddi_put32(acc_handle, &sge64[1].length, request_xferlen);
5869 5899 ddi_put64(acc_handle, &sge64[1].phys_addr,
5870 5900 request_dma_obj.dma_cookie[0].dmac_address);
5871 5901 #endif
5872 5902 }
5873 5903 con_log(CL_ANN1, (CE_CONT, "issue_mfi_smp : "
5874 5904 "smp->response_xferlen = %d, smp->request_xferlen = %d "
5875 5905 "smp->data_xfer_len = %d", ddi_get32(acc_handle, &sge32[0].length),
5876 5906 ddi_get32(acc_handle, &sge32[1].length),
5877 5907 ddi_get32(acc_handle, &smp->data_xfer_len)));
5878 5908
5879 5909 cmd->sync_cmd = MRSAS_TRUE;
5880 5910 cmd->frame_count = 1;
5881 5911
5882 5912 if (instance->tbolt) {
5883 5913 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
5884 5914 }
5885 5915
5886 5916 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
5887 5917 con_log(CL_ANN, (CE_WARN,
5888 5918 "issue_mfi_smp: fw_ioctl failed"));
5889 5919 } else {
5890 5920 con_log(CL_ANN1, (CE_CONT,
5891 5921 "issue_mfi_smp: copy to user space"));
5892 5922
5893 5923 if (request_xferlen) {
5894 5924 for (i = 0; i < request_xferlen; i++) {
5895 5925 if (ddi_copyout(
5896 5926 (uint8_t *)request_dma_obj.buffer +
5897 5927 i, (uint8_t *)request_ubuf + i,
5898 5928 1, mode)) {
5899 5929 con_log(CL_ANN, (CE_WARN,
5900 5930 "issue_mfi_smp : copy to user space"
5901 5931 " failed"));
5902 5932 return (DDI_FAILURE);
5903 5933 }
5904 5934 }
5905 5935 }
5906 5936
5907 5937 if (response_xferlen) {
5908 5938 for (i = 0; i < response_xferlen; i++) {
5909 5939 if (ddi_copyout(
5910 5940 (uint8_t *)response_dma_obj.buffer
5911 5941 + i, (uint8_t *)response_ubuf
5912 5942 + i, 1, mode)) {
5913 5943 con_log(CL_ANN, (CE_WARN,
5914 5944 "issue_mfi_smp : copy to "
5915 5945 "user space failed"));
5916 5946 return (DDI_FAILURE);
5917 5947 }
5918 5948 }
5919 5949 }
5920 5950 }
5921 5951
5922 5952 ksmp->cmd_status = ddi_get8(acc_handle, &smp->cmd_status);
5923 5953 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_smp: smp->cmd_status = %d",
5924 5954 ksmp->cmd_status));
5925 5955 DTRACE_PROBE2(issue_smp, uint8_t, ksmp->cmd, uint8_t, ksmp->cmd_status);
5926 5956
5927 5957 if (request_xferlen) {
5928 5958 /* free kernel buffer */
5929 5959 if (mrsas_free_dma_obj(instance, request_dma_obj) !=
5930 5960 DDI_SUCCESS)
5931 5961 return (DDI_FAILURE);
5932 5962 }
5933 5963
5934 5964 if (response_xferlen) {
5935 5965 /* free kernel buffer */
5936 5966 if (mrsas_free_dma_obj(instance, response_dma_obj) !=
5937 5967 DDI_SUCCESS)
5938 5968 return (DDI_FAILURE);
5939 5969 }
5940 5970
5941 5971 return (DDI_SUCCESS);
5942 5972 }
5943 5973
5944 5974 /*
5945 5975 * issue_mfi_stp
5946 5976 */
5947 5977 static int
5948 5978 issue_mfi_stp(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
5949 5979 struct mrsas_cmd *cmd, int mode)
5950 5980 {
5951 5981 void *fis_ubuf;
5952 5982 void *data_ubuf;
5953 5983 uint32_t fis_xferlen = 0;
5954 5984 uint32_t new_xfer_length1 = 0;
5955 5985 uint32_t new_xfer_length2 = 0;
5956 5986 uint32_t data_xferlen = 0;
5957 5987 uint_t model;
5958 5988 dma_obj_t fis_dma_obj;
5959 5989 dma_obj_t data_dma_obj;
5960 5990 struct mrsas_stp_frame *kstp;
5961 5991 struct mrsas_stp_frame *stp;
5962 5992 ddi_acc_handle_t acc_handle = cmd->frame_dma_obj.acc_handle;
5963 5993 int i;
5964 5994
5965 5995 stp = &cmd->frame->stp;
5966 5996 kstp = (struct mrsas_stp_frame *)&ioctl->frame[0];
5967 5997
5968 5998 if (instance->adapterresetinprogress) {
5969 5999 con_log(CL_ANN1, (CE_WARN, "Reset flag set, "
5970 6000 "returning mfi_pkt and setting TRAN_BUSY\n"));
5971 6001 return (DDI_FAILURE);
5972 6002 }
5973 6003 model = ddi_model_convert_from(mode & FMODELS);
5974 6004 if (model == DDI_MODEL_ILP32) {
5975 6005 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_ILP32"));
5976 6006
5977 6007 fis_xferlen = kstp->sgl.sge32[0].length;
5978 6008 data_xferlen = kstp->sgl.sge32[1].length;
5979 6009
5980 6010 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
5981 6011 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
5982 6012 } else {
5983 6013 #ifdef _ILP32
5984 6014 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_ILP32"));
5985 6015
5986 6016 fis_xferlen = kstp->sgl.sge32[0].length;
5987 6017 data_xferlen = kstp->sgl.sge32[1].length;
5988 6018
5989 6019 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge32[0].phys_addr;
5990 6020 data_ubuf = (void *)(ulong_t)kstp->sgl.sge32[1].phys_addr;
5991 6021 #else
5992 6022 con_log(CL_ANN1, (CE_CONT, "issue_mfi_stp: DDI_MODEL_LP64"));
5993 6023
5994 6024 fis_xferlen = kstp->sgl.sge64[0].length;
5995 6025 data_xferlen = kstp->sgl.sge64[1].length;
5996 6026
5997 6027 fis_ubuf = (void *)(ulong_t)kstp->sgl.sge64[0].phys_addr;
5998 6028 data_ubuf = (void *)(ulong_t)kstp->sgl.sge64[1].phys_addr;
5999 6029 #endif
6000 6030 }
6001 6031
6002 6032
6003 6033 if (fis_xferlen) {
6004 6034 con_log(CL_ANN, (CE_CONT, "issue_mfi_stp: "
6005 6035 "fis_ubuf = %p fis_xferlen = %x", fis_ubuf, fis_xferlen));
6006 6036
6007 6037 /* means IOCTL requires DMA */
6008 6038 /* allocate the data transfer buffer */
6009 6039 /* fis_dma_obj.size = fis_xferlen; */
6010 6040 MRSAS_GET_BOUNDARY_ALIGNED_LEN(fis_xferlen,
6011 6041 new_xfer_length1, PAGESIZE);
6012 6042 fis_dma_obj.size = new_xfer_length1;
6013 6043 fis_dma_obj.dma_attr = mrsas_generic_dma_attr;
6014 6044 fis_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
6015 6045 fis_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
6016 6046 fis_dma_obj.dma_attr.dma_attr_sgllen = 1;
6017 6047 fis_dma_obj.dma_attr.dma_attr_align = 1;
6018 6048
6019 6049 /* allocate kernel buffer for DMA */
6020 6050 if (mrsas_alloc_dma_obj(instance, &fis_dma_obj,
6021 6051 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
6022 6052 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp : "
6023 6053 "could not allocate data transfer buffer."));
6024 6054 return (DDI_FAILURE);
6025 6055 }
6026 6056 (void) memset(fis_dma_obj.buffer, 0, fis_xferlen);
6027 6057
6028 6058 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
6029 6059 for (i = 0; i < fis_xferlen; i++) {
6030 6060 if (ddi_copyin((uint8_t *)fis_ubuf + i,
6031 6061 (uint8_t *)fis_dma_obj.buffer + i, 1, mode)) {
6032 6062 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
6033 6063 "copy from user space failed"));
6034 6064 return (DDI_FAILURE);
6035 6065 }
6036 6066 }
6037 6067 }
6038 6068
6039 6069 if (data_xferlen) {
6040 6070 con_log(CL_ANN, (CE_CONT, "issue_mfi_stp: data_ubuf = %p "
6041 6071 "data_xferlen = %x", data_ubuf, data_xferlen));
6042 6072
6043 6073 /* means IOCTL requires DMA */
6044 6074 /* allocate the data transfer buffer */
6045 6075 /* data_dma_obj.size = data_xferlen; */
6046 6076 MRSAS_GET_BOUNDARY_ALIGNED_LEN(data_xferlen, new_xfer_length2,
6047 6077 PAGESIZE);
6048 6078 data_dma_obj.size = new_xfer_length2;
6049 6079 data_dma_obj.dma_attr = mrsas_generic_dma_attr;
6050 6080 data_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
6051 6081 data_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
6052 6082 data_dma_obj.dma_attr.dma_attr_sgllen = 1;
6053 6083 data_dma_obj.dma_attr.dma_attr_align = 1;
6054 6084
6055 6085 /* allocate kernel buffer for DMA */
6056 6086 if (mrsas_alloc_dma_obj(instance, &data_dma_obj,
6057 6087 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
6058 6088 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
6059 6089 "could not allocate data transfer buffer."));
6060 6090 return (DDI_FAILURE);
6061 6091 }
6062 6092 (void) memset(data_dma_obj.buffer, 0, data_xferlen);
6063 6093
6064 6094 /* If IOCTL requires DMA WRITE, do ddi_copyin IOCTL data copy */
6065 6095 for (i = 0; i < data_xferlen; i++) {
6066 6096 if (ddi_copyin((uint8_t *)data_ubuf + i,
6067 6097 (uint8_t *)data_dma_obj.buffer + i, 1, mode)) {
6068 6098 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: "
6069 6099 "copy from user space failed"));
6070 6100 return (DDI_FAILURE);
6071 6101 }
6072 6102 }
6073 6103 }
6074 6104
6075 6105 ddi_put8(acc_handle, &stp->cmd, kstp->cmd);
6076 6106 ddi_put8(acc_handle, &stp->cmd_status, 0);
6077 6107 ddi_put8(acc_handle, &stp->connection_status, 0);
6078 6108 ddi_put8(acc_handle, &stp->target_id, kstp->target_id);
6079 6109 ddi_put8(acc_handle, &stp->sge_count, kstp->sge_count);
6080 6110
6081 6111 ddi_put16(acc_handle, &stp->timeout, kstp->timeout);
6082 6112 ddi_put32(acc_handle, &stp->data_xfer_len, kstp->data_xfer_len);
6083 6113
6084 6114 ddi_rep_put8(acc_handle, (uint8_t *)kstp->fis, (uint8_t *)stp->fis, 10,
6085 6115 DDI_DEV_AUTOINCR);
6086 6116
6087 6117 ddi_put16(acc_handle, &stp->flags, kstp->flags & ~MFI_FRAME_SGL64);
6088 6118 ddi_put32(acc_handle, &stp->stp_flags, kstp->stp_flags);
6089 6119 ddi_put32(acc_handle, &stp->sgl.sge32[0].length, fis_xferlen);
6090 6120 ddi_put32(acc_handle, &stp->sgl.sge32[0].phys_addr,
6091 6121 fis_dma_obj.dma_cookie[0].dmac_address);
6092 6122 ddi_put32(acc_handle, &stp->sgl.sge32[1].length, data_xferlen);
6093 6123 ddi_put32(acc_handle, &stp->sgl.sge32[1].phys_addr,
6094 6124 data_dma_obj.dma_cookie[0].dmac_address);
6095 6125
6096 6126 cmd->sync_cmd = MRSAS_TRUE;
6097 6127 cmd->frame_count = 1;
6098 6128
6099 6129 if (instance->tbolt) {
6100 6130 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
6101 6131 }
6102 6132
6103 6133 if (instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd)) {
6104 6134 con_log(CL_ANN, (CE_WARN, "issue_mfi_stp: fw_ioctl failed"));
6105 6135 } else {
6106 6136
6107 6137 if (fis_xferlen) {
6108 6138 for (i = 0; i < fis_xferlen; i++) {
6109 6139 if (ddi_copyout(
6110 6140 (uint8_t *)fis_dma_obj.buffer + i,
6111 6141 (uint8_t *)fis_ubuf + i, 1, mode)) {
6112 6142 con_log(CL_ANN, (CE_WARN,
6113 6143 "issue_mfi_stp : copy to "
6114 6144 "user space failed"));
6115 6145 return (DDI_FAILURE);
6116 6146 }
6117 6147 }
6118 6148 }
6119 6149 }
6120 6150 if (data_xferlen) {
6121 6151 for (i = 0; i < data_xferlen; i++) {
6122 6152 if (ddi_copyout(
6123 6153 (uint8_t *)data_dma_obj.buffer + i,
6124 6154 (uint8_t *)data_ubuf + i, 1, mode)) {
6125 6155 con_log(CL_ANN, (CE_WARN,
6126 6156 "issue_mfi_stp : copy to"
6127 6157 " user space failed"));
6128 6158 return (DDI_FAILURE);
6129 6159 }
6130 6160 }
6131 6161 }
6132 6162
6133 6163 kstp->cmd_status = ddi_get8(acc_handle, &stp->cmd_status);
6134 6164 con_log(CL_ANN1, (CE_NOTE, "issue_mfi_stp: stp->cmd_status = %d",
6135 6165 kstp->cmd_status));
6136 6166 DTRACE_PROBE2(issue_stp, uint8_t, kstp->cmd, uint8_t, kstp->cmd_status);
6137 6167
6138 6168 if (fis_xferlen) {
6139 6169 /* free kernel buffer */
6140 6170 if (mrsas_free_dma_obj(instance, fis_dma_obj) != DDI_SUCCESS)
6141 6171 return (DDI_FAILURE);
6142 6172 }
6143 6173
6144 6174 if (data_xferlen) {
6145 6175 /* free kernel buffer */
6146 6176 if (mrsas_free_dma_obj(instance, data_dma_obj) != DDI_SUCCESS)
6147 6177 return (DDI_FAILURE);
6148 6178 }
6149 6179
6150 6180 return (DDI_SUCCESS);
6151 6181 }
6152 6182
6153 6183 /*
6154 6184 * fill_up_drv_ver
6155 6185 */
6156 6186 void
6157 6187 fill_up_drv_ver(struct mrsas_drv_ver *dv)
6158 6188 {
6159 6189 (void) memset(dv, 0, sizeof (struct mrsas_drv_ver));
6160 6190
6161 6191 (void) memcpy(dv->signature, "$LSI LOGIC$", strlen("$LSI LOGIC$"));
6162 6192 (void) memcpy(dv->os_name, "Solaris", strlen("Solaris"));
6163 6193 (void) memcpy(dv->drv_name, "mr_sas", strlen("mr_sas"));
6164 6194 (void) memcpy(dv->drv_ver, MRSAS_VERSION, strlen(MRSAS_VERSION));
6165 6195 (void) memcpy(dv->drv_rel_date, MRSAS_RELDATE,
6166 6196 strlen(MRSAS_RELDATE));
6167 6197
6168 6198 }
6169 6199
6170 6200 /*
6171 6201 * handle_drv_ioctl
6172 6202 */
6173 6203 static int
6174 6204 handle_drv_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
6175 6205 int mode)
6176 6206 {
6177 6207 int i;
6178 6208 int rval = DDI_SUCCESS;
6179 6209 int *props = NULL;
6180 6210 void *ubuf;
6181 6211
6182 6212 uint8_t *pci_conf_buf;
6183 6213 uint32_t xferlen;
6184 6214 uint32_t num_props;
6185 6215 uint_t model;
6186 6216 struct mrsas_dcmd_frame *kdcmd;
6187 6217 struct mrsas_drv_ver dv;
6188 6218 struct mrsas_pci_information pi;
6189 6219
6190 6220 kdcmd = (struct mrsas_dcmd_frame *)&ioctl->frame[0];
6191 6221
6192 6222 model = ddi_model_convert_from(mode & FMODELS);
6193 6223 if (model == DDI_MODEL_ILP32) {
6194 6224 con_log(CL_ANN1, (CE_CONT,
6195 6225 "handle_drv_ioctl: DDI_MODEL_ILP32"));
6196 6226
6197 6227 xferlen = kdcmd->sgl.sge32[0].length;
6198 6228
6199 6229 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
6200 6230 } else {
6201 6231 #ifdef _ILP32
6202 6232 con_log(CL_ANN1, (CE_CONT,
6203 6233 "handle_drv_ioctl: DDI_MODEL_ILP32"));
6204 6234 xferlen = kdcmd->sgl.sge32[0].length;
6205 6235 ubuf = (void *)(ulong_t)kdcmd->sgl.sge32[0].phys_addr;
6206 6236 #else
6207 6237 con_log(CL_ANN1, (CE_CONT,
6208 6238 "handle_drv_ioctl: DDI_MODEL_LP64"));
6209 6239 xferlen = kdcmd->sgl.sge64[0].length;
6210 6240 ubuf = (void *)(ulong_t)kdcmd->sgl.sge64[0].phys_addr;
6211 6241 #endif
6212 6242 }
6213 6243 con_log(CL_ANN1, (CE_CONT, "handle_drv_ioctl: "
6214 6244 "dataBuf=%p size=%d bytes", ubuf, xferlen));
6215 6245
6216 6246 switch (kdcmd->opcode) {
6217 6247 case MRSAS_DRIVER_IOCTL_DRIVER_VERSION:
6218 6248 con_log(CL_ANN1, (CE_CONT, "handle_drv_ioctl: "
6219 6249 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION"));
6220 6250
6221 6251 fill_up_drv_ver(&dv);
6222 6252
6223 6253 if (ddi_copyout(&dv, ubuf, xferlen, mode)) {
6224 6254 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6225 6255 "MRSAS_DRIVER_IOCTL_DRIVER_VERSION : "
6226 6256 "copy to user space failed"));
6227 6257 kdcmd->cmd_status = 1;
6228 6258 rval = 1;
6229 6259 } else {
6230 6260 kdcmd->cmd_status = 0;
6231 6261 }
6232 6262 break;
6233 6263 case MRSAS_DRIVER_IOCTL_PCI_INFORMATION:
6234 6264 con_log(CL_ANN1, (CE_NOTE, "handle_drv_ioctl: "
6235 6265 "MRSAS_DRIVER_IOCTL_PCI_INFORMAITON"));
6236 6266
6237 6267 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, instance->dip,
6238 6268 0, "reg", &props, &num_props)) {
6239 6269 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6240 6270 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : "
6241 6271 "ddi_prop_look_int_array failed"));
6242 6272 rval = DDI_FAILURE;
6243 6273 } else {
6244 6274
6245 6275 pi.busNumber = (props[0] >> 16) & 0xFF;
6246 6276 pi.deviceNumber = (props[0] >> 11) & 0x1f;
6247 6277 pi.functionNumber = (props[0] >> 8) & 0x7;
6248 6278 ddi_prop_free((void *)props);
6249 6279 }
6250 6280
6251 6281 pci_conf_buf = (uint8_t *)&pi.pciHeaderInfo;
6252 6282
6253 6283 for (i = 0; i < (sizeof (struct mrsas_pci_information) -
6254 6284 offsetof(struct mrsas_pci_information, pciHeaderInfo));
6255 6285 i++) {
6256 6286 pci_conf_buf[i] =
6257 6287 pci_config_get8(instance->pci_handle, i);
6258 6288 }
6259 6289
6260 6290 if (ddi_copyout(&pi, ubuf, xferlen, mode)) {
6261 6291 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6262 6292 "MRSAS_DRIVER_IOCTL_PCI_INFORMATION : "
6263 6293 "copy to user space failed"));
6264 6294 kdcmd->cmd_status = 1;
6265 6295 rval = 1;
6266 6296 } else {
6267 6297 kdcmd->cmd_status = 0;
6268 6298 }
6269 6299 break;
6270 6300 default:
6271 6301 con_log(CL_ANN, (CE_WARN, "handle_drv_ioctl: "
6272 6302 "invalid driver specific IOCTL opcode = 0x%x",
6273 6303 kdcmd->opcode));
6274 6304 kdcmd->cmd_status = 1;
6275 6305 rval = DDI_FAILURE;
6276 6306 break;
6277 6307 }
6278 6308
6279 6309 return (rval);
6280 6310 }
6281 6311
6282 6312 /*
6283 6313 * handle_mfi_ioctl
6284 6314 */
6285 6315 static int
6286 6316 handle_mfi_ioctl(struct mrsas_instance *instance, struct mrsas_ioctl *ioctl,
|
↓ open down ↓ |
1013 lines elided |
↑ open up ↑ |
6287 6317 int mode)
6288 6318 {
6289 6319 int rval = DDI_SUCCESS;
6290 6320
6291 6321 struct mrsas_header *hdr;
6292 6322 struct mrsas_cmd *cmd;
6293 6323
6294 6324 if (instance->tbolt) {
6295 6325 cmd = get_raid_msg_mfi_pkt(instance);
6296 6326 } else {
6297 - cmd = get_mfi_pkt(instance);
6327 + cmd = mrsas_get_mfi_pkt(instance);
6298 6328 }
6299 6329 if (!cmd) {
6300 6330 con_log(CL_ANN, (CE_WARN, "mr_sas: "
6301 6331 "failed to get a cmd packet"));
6302 6332 DTRACE_PROBE2(mfi_ioctl_err, uint16_t,
6303 6333 instance->fw_outstanding, uint16_t, instance->max_fw_cmds);
6304 6334 return (DDI_FAILURE);
6305 6335 }
6306 6336
6307 6337 /* Clear the frame buffer and assign back the context id */
6308 6338 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
6309 6339 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
6310 6340 cmd->index);
6311 6341
6312 6342 hdr = (struct mrsas_header *)&ioctl->frame[0];
6313 6343
6314 6344 switch (ddi_get8(cmd->frame_dma_obj.acc_handle, &hdr->cmd)) {
6315 6345 case MFI_CMD_OP_DCMD:
6316 6346 rval = issue_mfi_dcmd(instance, ioctl, cmd, mode);
6317 6347 break;
6318 6348 case MFI_CMD_OP_SMP:
6319 6349 rval = issue_mfi_smp(instance, ioctl, cmd, mode);
6320 6350 break;
6321 6351 case MFI_CMD_OP_STP:
6322 6352 rval = issue_mfi_stp(instance, ioctl, cmd, mode);
6323 6353 break;
6324 6354 case MFI_CMD_OP_LD_SCSI:
6325 6355 case MFI_CMD_OP_PD_SCSI:
6326 6356 rval = issue_mfi_pthru(instance, ioctl, cmd, mode);
6327 6357 break;
6328 6358 default:
6329 6359 con_log(CL_ANN, (CE_WARN, "handle_mfi_ioctl: "
6330 6360 "invalid mfi ioctl hdr->cmd = %d", hdr->cmd));
|
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
6331 6361 rval = DDI_FAILURE;
6332 6362 break;
6333 6363 }
6334 6364
6335 6365 if (mrsas_common_check(instance, cmd) != DDI_SUCCESS)
6336 6366 rval = DDI_FAILURE;
6337 6367
6338 6368 if (instance->tbolt) {
6339 6369 return_raid_msg_mfi_pkt(instance, cmd);
6340 6370 } else {
6341 - return_mfi_pkt(instance, cmd);
6371 + mrsas_return_mfi_pkt(instance, cmd);
6342 6372 }
6343 6373
6344 6374 return (rval);
6345 6375 }
6346 6376
6347 6377 /*
6348 6378 * AEN
6349 6379 */
6350 6380 static int
6351 6381 handle_mfi_aen(struct mrsas_instance *instance, struct mrsas_aen *aen)
6352 6382 {
6353 6383 int rval = 0;
6354 6384
6355 6385 rval = register_mfi_aen(instance, instance->aen_seq_num,
6356 6386 aen->class_locale_word);
6357 6387
6358 6388 aen->cmd_status = (uint8_t)rval;
6359 6389
6360 6390 return (rval);
6361 6391 }
6362 6392
6363 6393 static int
6364 6394 register_mfi_aen(struct mrsas_instance *instance, uint32_t seq_num,
6365 6395 uint32_t class_locale_word)
6366 6396 {
6367 6397 int ret_val;
6368 6398
6369 6399 struct mrsas_cmd *cmd, *aen_cmd;
6370 6400 struct mrsas_dcmd_frame *dcmd;
6371 6401 union mrsas_evt_class_locale curr_aen;
6372 6402 union mrsas_evt_class_locale prev_aen;
6373 6403
6374 6404 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
6375 6405 /*
6376 6406 * If there an AEN pending already (aen_cmd), check if the
6377 6407 * class_locale of that pending AEN is inclusive of the new
6378 6408 * AEN request we currently have. If it is, then we don't have
6379 6409 * to do anything. In other words, whichever events the current
6380 6410 * AEN request is subscribing to, have already been subscribed
6381 6411 * to.
6382 6412 *
6383 6413 * If the old_cmd is _not_ inclusive, then we have to abort
6384 6414 * that command, form a class_locale that is superset of both
6385 6415 * old and current and re-issue to the FW
6386 6416 */
6387 6417
6388 6418 curr_aen.word = LE_32(class_locale_word);
6389 6419 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6390 6420 aen_cmd = instance->aen_cmd;
6391 6421 if (aen_cmd) {
6392 6422 prev_aen.word = ddi_get32(aen_cmd->frame_dma_obj.acc_handle,
6393 6423 &aen_cmd->frame->dcmd.mbox.w[1]);
6394 6424 prev_aen.word = LE_32(prev_aen.word);
6395 6425 prev_aen.members.locale = LE_16(prev_aen.members.locale);
6396 6426 /*
6397 6427 * A class whose enum value is smaller is inclusive of all
6398 6428 * higher values. If a PROGRESS (= -1) was previously
6399 6429 * registered, then a new registration requests for higher
6400 6430 * classes need not be sent to FW. They are automatically
6401 6431 * included.
6402 6432 *
6403 6433 * Locale numbers don't have such hierarchy. They are bitmap
6404 6434 * values
6405 6435 */
6406 6436 if ((prev_aen.members.class <= curr_aen.members.class) &&
6407 6437 !((prev_aen.members.locale & curr_aen.members.locale) ^
6408 6438 curr_aen.members.locale)) {
6409 6439 /*
6410 6440 * Previously issued event registration includes
6411 6441 * current request. Nothing to do.
6412 6442 */
6413 6443
6414 6444 return (0);
6415 6445 } else {
6416 6446 curr_aen.members.locale |= prev_aen.members.locale;
6417 6447
6418 6448 if (prev_aen.members.class < curr_aen.members.class)
6419 6449 curr_aen.members.class = prev_aen.members.class;
6420 6450
6421 6451 ret_val = abort_aen_cmd(instance, aen_cmd);
6422 6452
6423 6453 if (ret_val) {
6424 6454 con_log(CL_ANN, (CE_WARN, "register_mfi_aen: "
6425 6455 "failed to abort prevous AEN command"));
6426 6456
6427 6457 return (ret_val);
|
↓ open down ↓ |
76 lines elided |
↑ open up ↑ |
6428 6458 }
6429 6459 }
6430 6460 } else {
6431 6461 curr_aen.word = LE_32(class_locale_word);
6432 6462 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6433 6463 }
6434 6464
6435 6465 if (instance->tbolt) {
6436 6466 cmd = get_raid_msg_mfi_pkt(instance);
6437 6467 } else {
6438 - cmd = get_mfi_pkt(instance);
6468 + cmd = mrsas_get_mfi_pkt(instance);
6439 6469 }
6440 6470
6441 6471 if (!cmd) {
6442 6472 DTRACE_PROBE2(mfi_aen_err, uint16_t, instance->fw_outstanding,
6443 6473 uint16_t, instance->max_fw_cmds);
6444 6474 return (ENOMEM);
6445 6475 }
6446 6476
6447 6477 /* Clear the frame buffer and assign back the context id */
6448 6478 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
6449 6479 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
6450 6480 cmd->index);
6451 6481
6452 6482 dcmd = &cmd->frame->dcmd;
6453 6483
6454 6484 /* for(i = 0; i < DCMD_MBOX_SZ; i++) dcmd->mbox.b[i] = 0; */
6455 6485 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
6456 6486
6457 6487 (void) memset(instance->mfi_evt_detail_obj.buffer, 0,
6458 6488 sizeof (struct mrsas_evt_detail));
6459 6489
6460 6490 /* Prepare DCMD for aen registration */
6461 6491 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
6462 6492 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0x0);
6463 6493 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
6464 6494 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
6465 6495 MFI_FRAME_DIR_READ);
6466 6496 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
6467 6497 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
6468 6498 sizeof (struct mrsas_evt_detail));
6469 6499 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
6470 6500 MR_DCMD_CTRL_EVENT_WAIT);
6471 6501 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], seq_num);
6472 6502 curr_aen.members.locale = LE_16(curr_aen.members.locale);
6473 6503 curr_aen.word = LE_32(curr_aen.word);
6474 6504 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[1],
6475 6505 curr_aen.word);
6476 6506 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
6477 6507 instance->mfi_evt_detail_obj.dma_cookie[0].dmac_address);
6478 6508 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
6479 6509 sizeof (struct mrsas_evt_detail));
6480 6510
6481 6511 instance->aen_seq_num = seq_num;
6482 6512
6483 6513
6484 6514 /*
6485 6515 * Store reference to the cmd used to register for AEN. When an
6486 6516 * application wants us to register for AEN, we have to abort this
6487 6517 * cmd and re-register with a new EVENT LOCALE supplied by that app
6488 6518 */
6489 6519 instance->aen_cmd = cmd;
6490 6520
6491 6521 cmd->frame_count = 1;
6492 6522
6493 6523 /* Issue the aen registration frame */
6494 6524 /* atomic_add_16 (&instance->fw_outstanding, 1); */
6495 6525 if (instance->tbolt) {
6496 6526 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
6497 6527 }
6498 6528 instance->func_ptr->issue_cmd(cmd, instance);
6499 6529
6500 6530 return (0);
6501 6531 }
6502 6532
6503 6533 void
6504 6534 display_scsi_inquiry(caddr_t scsi_inq)
6505 6535 {
6506 6536 #define MAX_SCSI_DEVICE_CODE 14
6507 6537 int i;
6508 6538 char inquiry_buf[256] = {0};
6509 6539 int len;
6510 6540 const char *const scsi_device_types[] = {
6511 6541 "Direct-Access ",
6512 6542 "Sequential-Access",
6513 6543 "Printer ",
6514 6544 "Processor ",
6515 6545 "WORM ",
6516 6546 "CD-ROM ",
6517 6547 "Scanner ",
6518 6548 "Optical Device ",
6519 6549 "Medium Changer ",
6520 6550 "Communications ",
6521 6551 "Unknown ",
6522 6552 "Unknown ",
6523 6553 "Unknown ",
6524 6554 "Enclosure ",
6525 6555 };
6526 6556
6527 6557 len = 0;
6528 6558
6529 6559 len += snprintf(inquiry_buf + len, 265 - len, " Vendor: ");
6530 6560 for (i = 8; i < 16; i++) {
6531 6561 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6532 6562 scsi_inq[i]);
6533 6563 }
6534 6564
6535 6565 len += snprintf(inquiry_buf + len, 265 - len, " Model: ");
6536 6566
6537 6567 for (i = 16; i < 32; i++) {
6538 6568 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6539 6569 scsi_inq[i]);
6540 6570 }
6541 6571
6542 6572 len += snprintf(inquiry_buf + len, 265 - len, " Rev: ");
6543 6573
6544 6574 for (i = 32; i < 36; i++) {
6545 6575 len += snprintf(inquiry_buf + len, 265 - len, "%c",
6546 6576 scsi_inq[i]);
6547 6577 }
6548 6578
6549 6579 len += snprintf(inquiry_buf + len, 265 - len, "\n");
6550 6580
6551 6581
6552 6582 i = scsi_inq[0] & 0x1f;
6553 6583
6554 6584
6555 6585 len += snprintf(inquiry_buf + len, 265 - len, " Type: %s ",
6556 6586 i < MAX_SCSI_DEVICE_CODE ? scsi_device_types[i] :
6557 6587 "Unknown ");
6558 6588
6559 6589
6560 6590 len += snprintf(inquiry_buf + len, 265 - len,
6561 6591 " ANSI SCSI revision: %02x", scsi_inq[2] & 0x07);
6562 6592
6563 6593 if ((scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1) {
6564 6594 len += snprintf(inquiry_buf + len, 265 - len, " CCS\n");
6565 6595 } else {
6566 6596 len += snprintf(inquiry_buf + len, 265 - len, "\n");
6567 6597 }
6568 6598
6569 6599 con_log(CL_DLEVEL2, (CE_CONT, inquiry_buf));
6570 6600 }
6571 6601
6572 6602 static void
6573 6603 io_timeout_checker(void *arg)
6574 6604 {
6575 6605 struct scsi_pkt *pkt;
6576 6606 struct mrsas_instance *instance = arg;
6577 6607 struct mrsas_cmd *cmd = NULL;
6578 6608 struct mrsas_header *hdr;
6579 6609 int time = 0;
6580 6610 int counter = 0;
6581 6611 struct mlist_head *pos, *next;
6582 6612 mlist_t process_list;
6583 6613
6584 6614 if (instance->adapterresetinprogress == 1) {
6585 6615 con_log(CL_ANN, (CE_NOTE, "io_timeout_checker:"
6586 6616 " reset in progress"));
6587 6617
6588 6618 instance->timeout_id = timeout(io_timeout_checker,
6589 6619 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
6590 6620 return;
6591 6621 }
6592 6622
6593 6623 /* See if this check needs to be in the beginning or last in ISR */
6594 6624 if (mrsas_initiate_ocr_if_fw_is_faulty(instance) == 1) {
6595 6625 cmn_err(CE_WARN, "io_timeout_checker: "
6596 6626 "FW Fault, calling reset adapter");
6597 6627 cmn_err(CE_CONT, "io_timeout_checker: "
6598 6628 "fw_outstanding 0x%X max_fw_cmds 0x%X",
6599 6629 instance->fw_outstanding, instance->max_fw_cmds);
6600 6630 if (instance->adapterresetinprogress == 0) {
6601 6631 instance->adapterresetinprogress = 1;
6602 6632 if (instance->tbolt)
6603 6633 (void) mrsas_tbolt_reset_ppc(instance);
6604 6634 else
6605 6635 (void) mrsas_reset_ppc(instance);
6606 6636 instance->adapterresetinprogress = 0;
6607 6637 }
6608 6638 instance->timeout_id = timeout(io_timeout_checker,
6609 6639 (void *) instance, drv_usectohz(MRSAS_1_SECOND));
6610 6640 return;
6611 6641 }
6612 6642
6613 6643 INIT_LIST_HEAD(&process_list);
6614 6644
6615 6645 mutex_enter(&instance->cmd_pend_mtx);
6616 6646 mlist_for_each_safe(pos, next, &instance->cmd_pend_list) {
6617 6647 cmd = mlist_entry(pos, struct mrsas_cmd, list);
6618 6648
6619 6649 if (cmd == NULL) {
6620 6650 continue;
6621 6651 }
6622 6652
6623 6653 if (cmd->sync_cmd == MRSAS_TRUE) {
6624 6654 hdr = (struct mrsas_header *)&cmd->frame->hdr;
6625 6655 if (hdr == NULL) {
6626 6656 continue;
6627 6657 }
6628 6658 time = --cmd->drv_pkt_time;
6629 6659 } else {
6630 6660 pkt = cmd->pkt;
6631 6661 if (pkt == NULL) {
6632 6662 continue;
6633 6663 }
6634 6664 time = --cmd->drv_pkt_time;
6635 6665 }
6636 6666 if (time <= 0) {
6637 6667 cmn_err(CE_WARN, "%llx: "
6638 6668 "io_timeout_checker: TIMING OUT: pkt: %p, "
6639 6669 "cmd %p fw_outstanding 0x%X max_fw_cmds 0x%X\n",
6640 6670 gethrtime(), (void *)pkt, (void *)cmd,
6641 6671 instance->fw_outstanding, instance->max_fw_cmds);
6642 6672
6643 6673 counter++;
6644 6674 break;
6645 6675 }
6646 6676 }
6647 6677 mutex_exit(&instance->cmd_pend_mtx);
6648 6678
6649 6679 if (counter) {
6650 6680 if (instance->disable_online_ctrl_reset == 1) {
6651 6681 cmn_err(CE_WARN, "mr_sas %d: %s(): OCR is NOT "
6652 6682 "supported by Firmware, KILL adapter!!!",
6653 6683 instance->instance, __func__);
6654 6684
6655 6685 if (instance->tbolt)
6656 6686 mrsas_tbolt_kill_adapter(instance);
6657 6687 else
6658 6688 (void) mrsas_kill_adapter(instance);
6659 6689
6660 6690 return;
6661 6691 } else {
6662 6692 if (cmd->retry_count_for_ocr <= IO_RETRY_COUNT) {
6663 6693 if (instance->adapterresetinprogress == 0) {
6664 6694 if (instance->tbolt) {
6665 6695 (void) mrsas_tbolt_reset_ppc(
6666 6696 instance);
6667 6697 } else {
6668 6698 (void) mrsas_reset_ppc(
6669 6699 instance);
6670 6700 }
6671 6701 }
6672 6702 } else {
6673 6703 cmn_err(CE_WARN,
6674 6704 "io_timeout_checker: "
6675 6705 "cmd %p cmd->index %d "
6676 6706 "timed out even after 3 resets: "
6677 6707 "so KILL adapter", (void *)cmd, cmd->index);
6678 6708
6679 6709 mrsas_print_cmd_details(instance, cmd, 0xDD);
6680 6710
6681 6711 if (instance->tbolt)
6682 6712 mrsas_tbolt_kill_adapter(instance);
6683 6713 else
6684 6714 (void) mrsas_kill_adapter(instance);
6685 6715 return;
6686 6716 }
6687 6717 }
6688 6718 }
6689 6719 con_log(CL_ANN, (CE_NOTE, "mrsas: "
6690 6720 "schedule next timeout check: "
6691 6721 "do timeout \n"));
6692 6722 instance->timeout_id =
6693 6723 timeout(io_timeout_checker, (void *)instance,
6694 6724 drv_usectohz(MRSAS_1_SECOND));
6695 6725 }
6696 6726
6697 6727 static uint32_t
6698 6728 read_fw_status_reg_ppc(struct mrsas_instance *instance)
6699 6729 {
6700 6730 return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
6701 6731 }
6702 6732
6703 6733 static void
6704 6734 issue_cmd_ppc(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
6705 6735 {
6706 6736 struct scsi_pkt *pkt;
6707 6737 atomic_add_16(&instance->fw_outstanding, 1);
6708 6738
6709 6739 pkt = cmd->pkt;
6710 6740 if (pkt) {
6711 6741 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:"
6712 6742 "ISSUED CMD TO FW : called : cmd:"
6713 6743 ": %p instance : %p pkt : %p pkt_time : %x\n",
6714 6744 gethrtime(), (void *)cmd, (void *)instance,
6715 6745 (void *)pkt, cmd->drv_pkt_time));
6716 6746 if (instance->adapterresetinprogress) {
6717 6747 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
6718 6748 con_log(CL_ANN1, (CE_NOTE, "Reset the scsi_pkt timer"));
6719 6749 } else {
6720 6750 push_pending_mfi_pkt(instance, cmd);
|
↓ open down ↓ |
272 lines elided |
↑ open up ↑ |
6721 6751 }
6722 6752
6723 6753 } else {
6724 6754 con_log(CL_DLEVEL1, (CE_NOTE, "%llx : issue_cmd_ppc:"
6725 6755 "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
6726 6756 "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
6727 6757 }
6728 6758
6729 6759 mutex_enter(&instance->reg_write_mtx);
6730 6760 /* Issue the command to the FW */
6731 - WR_IB_QPORT((cmd->frame_phys_addr) |
6761 + WR_IB_PICK_QPORT((cmd->frame_phys_addr) |
6732 6762 (((cmd->frame_count - 1) << 1) | 1), instance);
6733 6763 mutex_exit(&instance->reg_write_mtx);
6734 6764
6735 6765 }
6736 6766
6737 6767 /*
6738 6768 * issue_cmd_in_sync_mode
6739 6769 */
6740 6770 static int
6741 6771 issue_cmd_in_sync_mode_ppc(struct mrsas_instance *instance,
6742 -struct mrsas_cmd *cmd)
6772 + struct mrsas_cmd *cmd)
6743 6773 {
6744 6774 int i;
6745 - uint32_t msecs = MFI_POLL_TIMEOUT_SECS * (10 * MILLISEC);
6775 + uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
6746 6776 struct mrsas_header *hdr = &cmd->frame->hdr;
6747 6777
6748 6778 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: called"));
6749 6779
6750 6780 if (instance->adapterresetinprogress) {
6751 6781 cmd->drv_pkt_time = ddi_get16(
6752 6782 cmd->frame_dma_obj.acc_handle, &hdr->timeout);
6753 6783 if (cmd->drv_pkt_time < debug_timeout_g)
6754 6784 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
6755 6785
6756 6786 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: "
6757 6787 "issue and return in reset case\n"));
6758 - WR_IB_QPORT((cmd->frame_phys_addr) |
6788 + WR_IB_PICK_QPORT((cmd->frame_phys_addr) |
6759 6789 (((cmd->frame_count - 1) << 1) | 1), instance);
6760 6790
6761 6791 return (DDI_SUCCESS);
6762 6792 } else {
6763 6793 con_log(CL_ANN1, (CE_NOTE, "sync_mode_ppc: pushing the pkt\n"));
6764 6794 push_pending_mfi_pkt(instance, cmd);
6765 6795 }
6766 6796
6767 6797 cmd->cmd_status = ENODATA;
6768 6798
6769 6799 mutex_enter(&instance->reg_write_mtx);
6770 6800 /* Issue the command to the FW */
6771 - WR_IB_QPORT((cmd->frame_phys_addr) |
6801 + WR_IB_PICK_QPORT((cmd->frame_phys_addr) |
6772 6802 (((cmd->frame_count - 1) << 1) | 1), instance);
6773 6803 mutex_exit(&instance->reg_write_mtx);
6774 6804
6775 6805 mutex_enter(&instance->int_cmd_mtx);
6776 6806 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
6777 6807 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
6778 6808 }
6779 6809 mutex_exit(&instance->int_cmd_mtx);
6780 6810
6781 6811 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_sync_mode_ppc: done"));
6782 6812
6783 6813 if (i < (msecs -1)) {
6784 6814 return (DDI_SUCCESS);
6785 6815 } else {
6786 6816 return (DDI_FAILURE);
6787 6817 }
6788 6818 }
6789 6819
6790 6820 /*
6791 6821 * issue_cmd_in_poll_mode
6792 6822 */
6793 6823 static int
6794 6824 issue_cmd_in_poll_mode_ppc(struct mrsas_instance *instance,
6795 6825 struct mrsas_cmd *cmd)
6796 6826 {
6797 6827 int i;
6798 6828 uint16_t flags;
6799 6829 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
6800 6830 struct mrsas_header *frame_hdr;
6801 6831
6802 6832 con_log(CL_ANN1, (CE_NOTE, "issue_cmd_in_poll_mode_ppc: called"));
|
↓ open down ↓ |
21 lines elided |
↑ open up ↑ |
6803 6833
6804 6834 frame_hdr = (struct mrsas_header *)cmd->frame;
6805 6835 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
6806 6836 MFI_CMD_STATUS_POLL_MODE);
6807 6837 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
6808 6838 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
6809 6839
6810 6840 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
6811 6841
6812 6842 /* issue the frame using inbound queue port */
6813 - WR_IB_QPORT((cmd->frame_phys_addr) |
6843 + WR_IB_PICK_QPORT((cmd->frame_phys_addr) |
6814 6844 (((cmd->frame_count - 1) << 1) | 1), instance);
6815 6845
6816 6846 /* wait for cmd_status to change from 0xFF */
6817 6847 for (i = 0; i < msecs && (
6818 6848 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
6819 6849 == MFI_CMD_STATUS_POLL_MODE); i++) {
6820 6850 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
6821 6851 }
6822 6852
6823 6853 if (ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
6824 6854 == MFI_CMD_STATUS_POLL_MODE) {
6825 6855 con_log(CL_ANN, (CE_NOTE, "issue_cmd_in_poll_mode: "
6826 6856 "cmd polling timed out"));
6827 6857 return (DDI_FAILURE);
6828 6858 }
6829 6859
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
6830 6860 return (DDI_SUCCESS);
6831 6861 }
6832 6862
6833 6863 static void
6834 6864 enable_intr_ppc(struct mrsas_instance *instance)
6835 6865 {
6836 6866 uint32_t mask;
6837 6867
6838 6868 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: called"));
6839 6869
6840 - /* WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); */
6841 - WR_OB_DOORBELL_CLEAR(OB_DOORBELL_CLEAR_MASK, instance);
6870 + if (instance->skinny) {
6871 + /* For SKINNY, write ~0x1, from BSD's mfi driver. */
6872 + WR_OB_INTR_MASK(0xfffffffe, instance);
6873 + } else {
6874 + /* WR_OB_DOORBELL_CLEAR(0xFFFFFFFF, instance); */
6875 + WR_OB_DOORBELL_CLEAR(OB_DOORBELL_CLEAR_MASK, instance);
6842 6876
6843 - /* WR_OB_INTR_MASK(~0x80000000, instance); */
6844 - WR_OB_INTR_MASK(~(MFI_REPLY_2108_MESSAGE_INTR_MASK), instance);
6877 + /* WR_OB_INTR_MASK(~0x80000000, instance); */
6878 + WR_OB_INTR_MASK(~(MFI_REPLY_2108_MESSAGE_INTR_MASK), instance);
6879 + }
6845 6880
6846 6881 /* dummy read to force PCI flush */
6847 6882 mask = RD_OB_INTR_MASK(instance);
6848 6883
6849 6884 con_log(CL_ANN1, (CE_NOTE, "enable_intr_ppc: "
6850 6885 "outbound_intr_mask = 0x%x", mask));
6851 6886 }
6852 6887
6853 6888 static void
6854 6889 disable_intr_ppc(struct mrsas_instance *instance)
6855 6890 {
6856 6891 uint32_t mask;
6857 6892
6858 6893 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: called"));
6859 6894
6860 6895 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: before : "
6861 6896 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance)));
6862 6897
6863 - /* WR_OB_INTR_MASK(0xFFFFFFFF, instance); */
6898 + /* For now, assume there are no extras needed for Skinny support. */
6899 +
6864 6900 WR_OB_INTR_MASK(OB_INTR_MASK, instance);
6865 6901
6866 6902 con_log(CL_ANN1, (CE_NOTE, "disable_intr_ppc: after : "
6867 6903 "outbound_intr_mask = 0x%x", RD_OB_INTR_MASK(instance)));
6868 6904
6869 6905 /* dummy read to force PCI flush */
6870 6906 mask = RD_OB_INTR_MASK(instance);
6871 6907 #ifdef lint
6872 6908 mask = mask;
6873 6909 #endif
6874 6910 }
6875 6911
6876 6912 static int
6877 6913 intr_ack_ppc(struct mrsas_instance *instance)
6878 6914 {
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
6879 6915 uint32_t status;
6880 6916 int ret = DDI_INTR_CLAIMED;
6881 6917
6882 6918 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: called"));
6883 6919
6884 6920 /* check if it is our interrupt */
6885 6921 status = RD_OB_INTR_STATUS(instance);
6886 6922
6887 6923 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: status = 0x%x", status));
6888 6924
6925 + /*
6926 + * NOTE: Some drivers call out SKINNY here, but the return is the same
6927 + * for SKINNY and 2108.
6928 + */
6889 6929 if (!(status & MFI_REPLY_2108_MESSAGE_INTR)) {
6890 6930 ret = DDI_INTR_UNCLAIMED;
6891 6931 }
6892 6932
6893 6933 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
6894 6934 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
6895 6935 ret = DDI_INTR_UNCLAIMED;
6896 6936 }
6897 6937
6898 6938 if (ret == DDI_INTR_UNCLAIMED) {
6899 6939 return (ret);
6900 6940 }
6901 - /* clear the interrupt by writing back the same value */
6902 - WR_OB_DOORBELL_CLEAR(status, instance);
6903 6941
6942 + /*
6943 + * Clear the interrupt by writing back the same value.
6944 + * Another case where SKINNY is slightly different.
6945 + */
6946 + if (instance->skinny) {
6947 + WR_OB_INTR_STATUS(status, instance);
6948 + } else {
6949 + WR_OB_DOORBELL_CLEAR(status, instance);
6950 + }
6951 +
6904 6952 /* dummy READ */
6905 6953 status = RD_OB_INTR_STATUS(instance);
6906 6954
6907 6955 con_log(CL_ANN1, (CE_NOTE, "intr_ack_ppc: interrupt cleared"));
6908 6956
6909 6957 return (ret);
6910 6958 }
6911 6959
6912 6960 /*
6913 6961 * Marks HBA as bad. This will be called either when an
6914 6962 * IO packet times out even after 3 FW resets
6915 6963 * or FW is found to be fault even after 3 continuous resets.
6916 6964 */
6917 6965
6918 6966 static int
6919 6967 mrsas_kill_adapter(struct mrsas_instance *instance)
6920 6968 {
6921 6969 if (instance->deadadapter == 1)
6922 6970 return (DDI_FAILURE);
6923 6971
6924 6972 con_log(CL_ANN1, (CE_NOTE, "mrsas_kill_adapter: "
6925 6973 "Writing to doorbell with MFI_STOP_ADP "));
6926 6974 mutex_enter(&instance->ocr_flags_mtx);
6927 6975 instance->deadadapter = 1;
6928 6976 mutex_exit(&instance->ocr_flags_mtx);
6929 6977 instance->func_ptr->disable_intr(instance);
6930 6978 WR_IB_DOORBELL(MFI_STOP_ADP, instance);
6931 6979 (void) mrsas_complete_pending_cmds(instance);
6932 6980 return (DDI_SUCCESS);
6933 6981 }
6934 6982
6935 6983
6936 6984 static int
6937 6985 mrsas_reset_ppc(struct mrsas_instance *instance)
6938 6986 {
6939 6987 uint32_t status;
6940 6988 uint32_t retry = 0;
6941 6989 uint32_t cur_abs_reg_val;
6942 6990 uint32_t fw_state;
6943 6991
6944 6992 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
6945 6993
6946 6994 if (instance->deadadapter == 1) {
6947 6995 cmn_err(CE_WARN, "mrsas_reset_ppc: "
6948 6996 "no more resets as HBA has been marked dead ");
6949 6997 return (DDI_FAILURE);
6950 6998 }
6951 6999 mutex_enter(&instance->ocr_flags_mtx);
6952 7000 instance->adapterresetinprogress = 1;
6953 7001 mutex_exit(&instance->ocr_flags_mtx);
6954 7002 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: adpterresetinprogress "
6955 7003 "flag set, time %llx", gethrtime()));
6956 7004
6957 7005 instance->func_ptr->disable_intr(instance);
6958 7006 retry_reset:
6959 7007 WR_IB_WRITE_SEQ(0, instance);
6960 7008 WR_IB_WRITE_SEQ(4, instance);
6961 7009 WR_IB_WRITE_SEQ(0xb, instance);
6962 7010 WR_IB_WRITE_SEQ(2, instance);
6963 7011 WR_IB_WRITE_SEQ(7, instance);
6964 7012 WR_IB_WRITE_SEQ(0xd, instance);
6965 7013 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: magic number written "
6966 7014 "to write sequence register\n"));
6967 7015 delay(100 * drv_usectohz(MILLISEC));
6968 7016 status = RD_OB_DRWE(instance);
6969 7017
6970 7018 while (!(status & DIAG_WRITE_ENABLE)) {
6971 7019 delay(100 * drv_usectohz(MILLISEC));
6972 7020 status = RD_OB_DRWE(instance);
6973 7021 if (retry++ == 100) {
6974 7022 cmn_err(CE_WARN, "mrsas_reset_ppc: DRWE bit "
6975 7023 "check retry count %d", retry);
6976 7024 return (DDI_FAILURE);
6977 7025 }
6978 7026 }
6979 7027 WR_IB_DRWE(status | DIAG_RESET_ADAPTER, instance);
6980 7028 delay(100 * drv_usectohz(MILLISEC));
6981 7029 status = RD_OB_DRWE(instance);
6982 7030 while (status & DIAG_RESET_ADAPTER) {
6983 7031 delay(100 * drv_usectohz(MILLISEC));
6984 7032 status = RD_OB_DRWE(instance);
6985 7033 if (retry++ == 100) {
6986 7034 cmn_err(CE_WARN, "mrsas_reset_ppc: "
6987 7035 "RESET FAILED. KILL adapter called.");
6988 7036
6989 7037 (void) mrsas_kill_adapter(instance);
6990 7038 return (DDI_FAILURE);
6991 7039 }
6992 7040 }
6993 7041 con_log(CL_ANN, (CE_NOTE, "mrsas_reset_ppc: Adapter reset complete"));
6994 7042 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
6995 7043 "Calling mfi_state_transition_to_ready"));
6996 7044
6997 7045 /* Mark HBA as bad, if FW is fault after 3 continuous resets */
6998 7046 if (mfi_state_transition_to_ready(instance) ||
6999 7047 debug_fw_faults_after_ocr_g == 1) {
7000 7048 cur_abs_reg_val =
7001 7049 instance->func_ptr->read_fw_status_reg(instance);
7002 7050 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
7003 7051
7004 7052 #ifdef OCRDEBUG
7005 7053 con_log(CL_ANN1, (CE_NOTE,
7006 7054 "mrsas_reset_ppc :before fake: FW is not ready "
7007 7055 "FW state = 0x%x", fw_state));
7008 7056 if (debug_fw_faults_after_ocr_g == 1)
7009 7057 fw_state = MFI_STATE_FAULT;
7010 7058 #endif
7011 7059
7012 7060 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc : FW is not ready "
7013 7061 "FW state = 0x%x", fw_state));
7014 7062
7015 7063 if (fw_state == MFI_STATE_FAULT) {
7016 7064 /* increment the count */
7017 7065 instance->fw_fault_count_after_ocr++;
7018 7066 if (instance->fw_fault_count_after_ocr
7019 7067 < MAX_FW_RESET_COUNT) {
7020 7068 cmn_err(CE_WARN, "mrsas_reset_ppc: "
7021 7069 "FW is in fault after OCR count %d "
7022 7070 "Retry Reset",
7023 7071 instance->fw_fault_count_after_ocr);
7024 7072 goto retry_reset;
7025 7073
7026 7074 } else {
7027 7075 cmn_err(CE_WARN, "mrsas_reset_ppc: "
7028 7076 "Max Reset Count exceeded >%d"
7029 7077 "Mark HBA as bad, KILL adapter",
7030 7078 MAX_FW_RESET_COUNT);
7031 7079
7032 7080 (void) mrsas_kill_adapter(instance);
7033 7081 return (DDI_FAILURE);
7034 7082 }
7035 7083 }
7036 7084 }
7037 7085 /* reset the counter as FW is up after OCR */
7038 7086 instance->fw_fault_count_after_ocr = 0;
7039 7087
7040 7088
7041 7089 ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
7042 7090 instance->producer, 0);
7043 7091
7044 7092 ddi_put32(instance->mfi_internal_dma_obj.acc_handle,
7045 7093 instance->consumer, 0);
7046 7094
7047 7095 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7048 7096 " after resetting produconsumer chck indexs:"
7049 7097 "producer %x consumer %x", *instance->producer,
7050 7098 *instance->consumer));
7051 7099
7052 7100 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7053 7101 "Calling mrsas_issue_init_mfi"));
7054 7102 (void) mrsas_issue_init_mfi(instance);
7055 7103 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7056 7104 "mrsas_issue_init_mfi Done"));
7057 7105
7058 7106 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7059 7107 "Calling mrsas_print_pending_cmd\n"));
7060 7108 (void) mrsas_print_pending_cmds(instance);
7061 7109 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7062 7110 "mrsas_print_pending_cmd done\n"));
7063 7111
7064 7112 instance->func_ptr->enable_intr(instance);
7065 7113 instance->fw_outstanding = 0;
7066 7114
7067 7115 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7068 7116 "Calling mrsas_issue_pending_cmds"));
7069 7117 (void) mrsas_issue_pending_cmds(instance);
7070 7118 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7071 7119 "issue_pending_cmds done.\n"));
7072 7120
7073 7121 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7074 7122 "Calling aen registration"));
7075 7123
7076 7124
7077 7125 instance->aen_cmd->retry_count_for_ocr = 0;
7078 7126 instance->aen_cmd->drv_pkt_time = 0;
7079 7127
7080 7128 instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
7081 7129 con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag.\n"));
7082 7130
7083 7131 mutex_enter(&instance->ocr_flags_mtx);
7084 7132 instance->adapterresetinprogress = 0;
7085 7133 mutex_exit(&instance->ocr_flags_mtx);
7086 7134 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc: "
7087 7135 "adpterresetinprogress flag unset"));
7088 7136
7089 7137 con_log(CL_ANN1, (CE_NOTE, "mrsas_reset_ppc done\n"));
7090 7138 return (DDI_SUCCESS);
7091 7139 }
7092 7140
7093 7141 /*
7094 7142 * FMA functions.
7095 7143 */
7096 7144 int
7097 7145 mrsas_common_check(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
7098 7146 {
7099 7147 int ret = DDI_SUCCESS;
7100 7148
7101 7149 if (cmd != NULL &&
7102 7150 mrsas_check_dma_handle(cmd->frame_dma_obj.dma_handle) !=
7103 7151 DDI_SUCCESS) {
7104 7152 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7105 7153 if (cmd->pkt != NULL) {
7106 7154 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7107 7155 cmd->pkt->pkt_statistics = 0;
7108 7156 }
7109 7157 ret = DDI_FAILURE;
7110 7158 }
7111 7159 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
7112 7160 != DDI_SUCCESS) {
7113 7161 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7114 7162 if (cmd != NULL && cmd->pkt != NULL) {
7115 7163 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7116 7164 cmd->pkt->pkt_statistics = 0;
7117 7165 }
7118 7166 ret = DDI_FAILURE;
7119 7167 }
7120 7168 if (mrsas_check_dma_handle(instance->mfi_evt_detail_obj.dma_handle) !=
7121 7169 DDI_SUCCESS) {
7122 7170 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7123 7171 if (cmd != NULL && cmd->pkt != NULL) {
7124 7172 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7125 7173 cmd->pkt->pkt_statistics = 0;
7126 7174 }
7127 7175 ret = DDI_FAILURE;
7128 7176 }
7129 7177 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
7130 7178 ddi_fm_service_impact(instance->dip, DDI_SERVICE_UNAFFECTED);
7131 7179
7132 7180 ddi_fm_acc_err_clear(instance->regmap_handle, DDI_FME_VER0);
7133 7181
7134 7182 if (cmd != NULL && cmd->pkt != NULL) {
7135 7183 cmd->pkt->pkt_reason = CMD_TRAN_ERR;
7136 7184 cmd->pkt->pkt_statistics = 0;
7137 7185 }
7138 7186 ret = DDI_FAILURE;
7139 7187 }
7140 7188
7141 7189 return (ret);
7142 7190 }
7143 7191
7144 7192 /*ARGSUSED*/
7145 7193 static int
7146 7194 mrsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
7147 7195 {
7148 7196 /*
7149 7197 * as the driver can always deal with an error in any dma or
7150 7198 * access handle, we can just return the fme_status value.
7151 7199 */
7152 7200 pci_ereport_post(dip, err, NULL);
7153 7201 return (err->fme_status);
7154 7202 }
7155 7203
7156 7204 static void
7157 7205 mrsas_fm_init(struct mrsas_instance *instance)
7158 7206 {
7159 7207 /* Need to change iblock to priority for new MSI intr */
7160 7208 ddi_iblock_cookie_t fm_ibc;
7161 7209
7162 7210 /* Only register with IO Fault Services if we have some capability */
7163 7211 if (instance->fm_capabilities) {
7164 7212 /* Adjust access and dma attributes for FMA */
7165 7213 endian_attr.devacc_attr_access = DDI_FLAGERR_ACC;
7166 7214 mrsas_generic_dma_attr.dma_attr_flags = DDI_DMA_FLAGERR;
7167 7215
7168 7216 /*
7169 7217 * Register capabilities with IO Fault Services.
7170 7218 * fm_capabilities will be updated to indicate
7171 7219 * capabilities actually supported (not requested.)
7172 7220 */
7173 7221
7174 7222 ddi_fm_init(instance->dip, &instance->fm_capabilities, &fm_ibc);
7175 7223
7176 7224 /*
7177 7225 * Initialize pci ereport capabilities if ereport
7178 7226 * capable (should always be.)
7179 7227 */
7180 7228
7181 7229 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) ||
7182 7230 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7183 7231 pci_ereport_setup(instance->dip);
7184 7232 }
7185 7233
7186 7234 /*
7187 7235 * Register error callback if error callback capable.
7188 7236 */
7189 7237 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7190 7238 ddi_fm_handler_register(instance->dip,
7191 7239 mrsas_fm_error_cb, (void*) instance);
7192 7240 }
7193 7241 } else {
7194 7242 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
7195 7243 mrsas_generic_dma_attr.dma_attr_flags = 0;
7196 7244 }
7197 7245 }
7198 7246
7199 7247 static void
7200 7248 mrsas_fm_fini(struct mrsas_instance *instance)
7201 7249 {
7202 7250 /* Only unregister FMA capabilities if registered */
7203 7251 if (instance->fm_capabilities) {
7204 7252 /*
7205 7253 * Un-register error callback if error callback capable.
7206 7254 */
7207 7255 if (DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7208 7256 ddi_fm_handler_unregister(instance->dip);
7209 7257 }
7210 7258
7211 7259 /*
7212 7260 * Release any resources allocated by pci_ereport_setup()
7213 7261 */
7214 7262 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities) ||
7215 7263 DDI_FM_ERRCB_CAP(instance->fm_capabilities)) {
7216 7264 pci_ereport_teardown(instance->dip);
7217 7265 }
7218 7266
7219 7267 /* Unregister from IO Fault Services */
7220 7268 ddi_fm_fini(instance->dip);
7221 7269
7222 7270 /* Adjust access and dma attributes for FMA */
7223 7271 endian_attr.devacc_attr_access = DDI_DEFAULT_ACC;
7224 7272 mrsas_generic_dma_attr.dma_attr_flags = 0;
7225 7273 }
7226 7274 }
7227 7275
7228 7276 int
7229 7277 mrsas_check_acc_handle(ddi_acc_handle_t handle)
7230 7278 {
7231 7279 ddi_fm_error_t de;
7232 7280
7233 7281 if (handle == NULL) {
7234 7282 return (DDI_FAILURE);
7235 7283 }
7236 7284
7237 7285 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
7238 7286
7239 7287 return (de.fme_status);
7240 7288 }
7241 7289
7242 7290 int
7243 7291 mrsas_check_dma_handle(ddi_dma_handle_t handle)
7244 7292 {
7245 7293 ddi_fm_error_t de;
7246 7294
7247 7295 if (handle == NULL) {
7248 7296 return (DDI_FAILURE);
7249 7297 }
7250 7298
7251 7299 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
7252 7300
7253 7301 return (de.fme_status);
7254 7302 }
7255 7303
7256 7304 void
7257 7305 mrsas_fm_ereport(struct mrsas_instance *instance, char *detail)
7258 7306 {
7259 7307 uint64_t ena;
7260 7308 char buf[FM_MAX_CLASS];
7261 7309
7262 7310 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
7263 7311 ena = fm_ena_generate(0, FM_ENA_FMT1);
7264 7312 if (DDI_FM_EREPORT_CAP(instance->fm_capabilities)) {
7265 7313 ddi_fm_ereport_post(instance->dip, buf, ena, DDI_NOSLEEP,
7266 7314 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL);
7267 7315 }
7268 7316 }
7269 7317
7270 7318 static int
7271 7319 mrsas_add_intrs(struct mrsas_instance *instance, int intr_type)
7272 7320 {
7273 7321
7274 7322 dev_info_t *dip = instance->dip;
7275 7323 int avail, actual, count;
7276 7324 int i, flag, ret;
7277 7325
7278 7326 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_type = %x",
7279 7327 intr_type));
7280 7328
7281 7329 /* Get number of interrupts */
7282 7330 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
7283 7331 if ((ret != DDI_SUCCESS) || (count == 0)) {
7284 7332 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_nintrs() failed:"
7285 7333 "ret %d count %d", ret, count));
7286 7334
7287 7335 return (DDI_FAILURE);
7288 7336 }
7289 7337
7290 7338 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: count = %d ", count));
7291 7339
7292 7340 /* Get number of available interrupts */
7293 7341 ret = ddi_intr_get_navail(dip, intr_type, &avail);
7294 7342 if ((ret != DDI_SUCCESS) || (avail == 0)) {
7295 7343 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_navail() failed:"
7296 7344 "ret %d avail %d", ret, avail));
7297 7345
7298 7346 return (DDI_FAILURE);
7299 7347 }
7300 7348 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: avail = %d ", avail));
7301 7349
7302 7350 /* Only one interrupt routine. So limit the count to 1 */
7303 7351 if (count > 1) {
7304 7352 count = 1;
7305 7353 }
7306 7354
7307 7355 /*
7308 7356 * Allocate an array of interrupt handlers. Currently we support
7309 7357 * only one interrupt. The framework can be extended later.
7310 7358 */
7311 7359 instance->intr_htable_size = count * sizeof (ddi_intr_handle_t);
7312 7360 instance->intr_htable = kmem_zalloc(instance->intr_htable_size,
7313 7361 KM_SLEEP);
7314 7362 ASSERT(instance->intr_htable);
7315 7363
7316 7364 flag = ((intr_type == DDI_INTR_TYPE_MSI) ||
7317 7365 (intr_type == DDI_INTR_TYPE_MSIX)) ?
7318 7366 DDI_INTR_ALLOC_STRICT : DDI_INTR_ALLOC_NORMAL;
7319 7367
7320 7368 /* Allocate interrupt */
7321 7369 ret = ddi_intr_alloc(dip, instance->intr_htable, intr_type, 0,
7322 7370 count, &actual, flag);
7323 7371
7324 7372 if ((ret != DDI_SUCCESS) || (actual == 0)) {
7325 7373 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7326 7374 "avail = %d", avail));
7327 7375 goto mrsas_free_htable;
7328 7376 }
7329 7377
7330 7378 if (actual < count) {
7331 7379 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7332 7380 "Requested = %d Received = %d", count, actual));
7333 7381 }
7334 7382 instance->intr_cnt = actual;
7335 7383
7336 7384 /*
7337 7385 * Get the priority of the interrupt allocated.
7338 7386 */
7339 7387 if ((ret = ddi_intr_get_pri(instance->intr_htable[0],
7340 7388 &instance->intr_pri)) != DDI_SUCCESS) {
7341 7389 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7342 7390 "get priority call failed"));
7343 7391 goto mrsas_free_handles;
7344 7392 }
7345 7393
7346 7394 /*
7347 7395 * Test for high level mutex. we don't support them.
7348 7396 */
7349 7397 if (instance->intr_pri >= ddi_intr_get_hilevel_pri()) {
7350 7398 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs: "
7351 7399 "High level interrupts not supported."));
7352 7400 goto mrsas_free_handles;
7353 7401 }
7354 7402
7355 7403 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_add_intrs: intr_pri = 0x%x ",
7356 7404 instance->intr_pri));
7357 7405
7358 7406 /* Call ddi_intr_add_handler() */
7359 7407 for (i = 0; i < actual; i++) {
7360 7408 ret = ddi_intr_add_handler(instance->intr_htable[i],
7361 7409 (ddi_intr_handler_t *)mrsas_isr, (caddr_t)instance,
7362 7410 (caddr_t)(uintptr_t)i);
7363 7411
7364 7412 if (ret != DDI_SUCCESS) {
7365 7413 con_log(CL_ANN, (CE_WARN, "mrsas_add_intrs:"
7366 7414 "failed %d", ret));
7367 7415 goto mrsas_free_handles;
7368 7416 }
7369 7417
7370 7418 }
7371 7419
7372 7420 con_log(CL_DLEVEL1, (CE_NOTE, " ddi_intr_add_handler done"));
7373 7421
7374 7422 if ((ret = ddi_intr_get_cap(instance->intr_htable[0],
7375 7423 &instance->intr_cap)) != DDI_SUCCESS) {
7376 7424 con_log(CL_ANN, (CE_WARN, "ddi_intr_get_cap() failed %d",
7377 7425 ret));
7378 7426 goto mrsas_free_handlers;
7379 7427 }
7380 7428
7381 7429 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) {
7382 7430 con_log(CL_ANN, (CE_WARN, "Calling ddi_intr_block _enable"));
7383 7431
7384 7432 (void) ddi_intr_block_enable(instance->intr_htable,
7385 7433 instance->intr_cnt);
7386 7434 } else {
7387 7435 con_log(CL_ANN, (CE_NOTE, " calling ddi_intr_enable"));
7388 7436
7389 7437 for (i = 0; i < instance->intr_cnt; i++) {
7390 7438 (void) ddi_intr_enable(instance->intr_htable[i]);
7391 7439 con_log(CL_ANN, (CE_NOTE, "ddi intr enable returns "
7392 7440 "%d", i));
7393 7441 }
7394 7442 }
7395 7443
7396 7444 return (DDI_SUCCESS);
7397 7445
7398 7446 mrsas_free_handlers:
7399 7447 for (i = 0; i < actual; i++)
7400 7448 (void) ddi_intr_remove_handler(instance->intr_htable[i]);
7401 7449
7402 7450 mrsas_free_handles:
7403 7451 for (i = 0; i < actual; i++)
7404 7452 (void) ddi_intr_free(instance->intr_htable[i]);
7405 7453
7406 7454 mrsas_free_htable:
7407 7455 if (instance->intr_htable != NULL)
7408 7456 kmem_free(instance->intr_htable, instance->intr_htable_size);
7409 7457
7410 7458 instance->intr_htable = NULL;
7411 7459 instance->intr_htable_size = 0;
7412 7460
7413 7461 return (DDI_FAILURE);
7414 7462
7415 7463 }
7416 7464
7417 7465
7418 7466 static void
7419 7467 mrsas_rem_intrs(struct mrsas_instance *instance)
7420 7468 {
7421 7469 int i;
7422 7470
7423 7471 con_log(CL_ANN, (CE_NOTE, "mrsas_rem_intrs called"));
7424 7472
7425 7473 /* Disable all interrupts first */
7426 7474 if (instance->intr_cap & DDI_INTR_FLAG_BLOCK) {
7427 7475 (void) ddi_intr_block_disable(instance->intr_htable,
7428 7476 instance->intr_cnt);
7429 7477 } else {
7430 7478 for (i = 0; i < instance->intr_cnt; i++) {
7431 7479 (void) ddi_intr_disable(instance->intr_htable[i]);
7432 7480 }
7433 7481 }
7434 7482
7435 7483 /* Remove all the handlers */
7436 7484
7437 7485 for (i = 0; i < instance->intr_cnt; i++) {
7438 7486 (void) ddi_intr_remove_handler(instance->intr_htable[i]);
7439 7487 (void) ddi_intr_free(instance->intr_htable[i]);
7440 7488 }
7441 7489
7442 7490 if (instance->intr_htable != NULL)
7443 7491 kmem_free(instance->intr_htable, instance->intr_htable_size);
7444 7492
7445 7493 instance->intr_htable = NULL;
7446 7494 instance->intr_htable_size = 0;
7447 7495
7448 7496 }
7449 7497
7450 7498 static int
7451 7499 mrsas_tran_bus_config(dev_info_t *parent, uint_t flags,
7452 7500 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
7453 7501 {
7454 7502 struct mrsas_instance *instance;
7455 7503 int config;
7456 7504 int rval = NDI_SUCCESS;
7457 7505
7458 7506 char *ptr = NULL;
7459 7507 int tgt, lun;
7460 7508
7461 7509 con_log(CL_ANN1, (CE_NOTE, "Bus config called for op = %x", op));
7462 7510
7463 7511 if ((instance = ddi_get_soft_state(mrsas_state,
7464 7512 ddi_get_instance(parent))) == NULL) {
7465 7513 return (NDI_FAILURE);
7466 7514 }
7467 7515
7468 7516 /* Hold nexus during bus_config */
7469 7517 ndi_devi_enter(parent, &config);
7470 7518 switch (op) {
7471 7519 case BUS_CONFIG_ONE: {
7472 7520
7473 7521 /* parse wwid/target name out of name given */
7474 7522 if ((ptr = strchr((char *)arg, '@')) == NULL) {
7475 7523 rval = NDI_FAILURE;
7476 7524 break;
7477 7525 }
|
↓ open down ↓ |
564 lines elided |
↑ open up ↑ |
7478 7526 ptr++;
7479 7527
7480 7528 if (mrsas_parse_devname(arg, &tgt, &lun) != 0) {
7481 7529 rval = NDI_FAILURE;
7482 7530 break;
7483 7531 }
7484 7532
7485 7533 if (lun == 0) {
7486 7534 rval = mrsas_config_ld(instance, tgt, lun, childp);
7487 7535 #ifdef PDSUPPORT
7488 - } else if (instance->tbolt == 1 && lun != 0) {
7536 + } else if ((instance->tbolt || instance->skinny) && lun != 0) {
7489 7537 rval = mrsas_tbolt_config_pd(instance,
7490 7538 tgt, lun, childp);
7491 7539 #endif
7492 7540 } else {
7493 7541 rval = NDI_FAILURE;
7494 7542 }
7495 7543
7496 7544 break;
7497 7545 }
7498 7546 case BUS_CONFIG_DRIVER:
7499 7547 case BUS_CONFIG_ALL: {
7500 7548
7501 7549 rval = mrsas_config_all_devices(instance);
7502 7550
7503 7551 rval = NDI_SUCCESS;
7504 7552 break;
7505 7553 }
7506 7554 }
7507 7555
7508 7556 if (rval == NDI_SUCCESS) {
7509 7557 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0);
7510 7558
7511 7559 }
7512 7560 ndi_devi_exit(parent, config);
7513 7561
7514 7562 con_log(CL_ANN1, (CE_NOTE, "mrsas_tran_bus_config: rval = %x",
7515 7563 rval));
7516 7564 return (rval);
7517 7565 }
7518 7566
7519 7567 static int
7520 7568 mrsas_config_all_devices(struct mrsas_instance *instance)
|
↓ open down ↓ |
22 lines elided |
↑ open up ↑ |
7521 7569 {
7522 7570 int rval, tgt;
7523 7571
7524 7572 for (tgt = 0; tgt < MRDRV_MAX_LD; tgt++) {
7525 7573 (void) mrsas_config_ld(instance, tgt, 0, NULL);
7526 7574
7527 7575 }
7528 7576
7529 7577 #ifdef PDSUPPORT
7530 7578 /* Config PD devices connected to the card */
7531 - if (instance->tbolt) {
7579 + if (instance->tbolt || instance->skinny) {
7532 7580 for (tgt = 0; tgt < instance->mr_tbolt_pd_max; tgt++) {
7533 7581 (void) mrsas_tbolt_config_pd(instance, tgt, 1, NULL);
7534 7582 }
7535 7583 }
7536 7584 #endif
7537 7585
7538 7586 rval = NDI_SUCCESS;
7539 7587 return (rval);
7540 7588 }
7541 7589
7542 7590 static int
7543 7591 mrsas_parse_devname(char *devnm, int *tgt, int *lun)
7544 7592 {
7545 7593 char devbuf[SCSI_MAXNAMELEN];
7546 7594 char *addr;
7547 7595 char *p, *tp, *lp;
7548 7596 long num;
7549 7597
7550 7598 /* Parse dev name and address */
7551 7599 (void) strcpy(devbuf, devnm);
7552 7600 addr = "";
7553 7601 for (p = devbuf; *p != '\0'; p++) {
7554 7602 if (*p == '@') {
7555 7603 addr = p + 1;
7556 7604 *p = '\0';
7557 7605 } else if (*p == ':') {
7558 7606 *p = '\0';
7559 7607 break;
7560 7608 }
7561 7609 }
7562 7610
7563 7611 /* Parse target and lun */
7564 7612 for (p = tp = addr, lp = NULL; *p != '\0'; p++) {
7565 7613 if (*p == ',') {
7566 7614 lp = p + 1;
7567 7615 *p = '\0';
7568 7616 break;
7569 7617 }
7570 7618 }
7571 7619 if (tgt && tp) {
7572 7620 if (ddi_strtol(tp, NULL, 0x10, &num)) {
7573 7621 return (DDI_FAILURE); /* Can declare this as constant */
7574 7622 }
7575 7623 *tgt = (int)num;
7576 7624 }
7577 7625 if (lun && lp) {
7578 7626 if (ddi_strtol(lp, NULL, 0x10, &num)) {
7579 7627 return (DDI_FAILURE);
7580 7628 }
7581 7629 *lun = (int)num;
7582 7630 }
7583 7631 return (DDI_SUCCESS); /* Success case */
7584 7632 }
7585 7633
7586 7634 static int
7587 7635 mrsas_config_ld(struct mrsas_instance *instance, uint16_t tgt,
7588 7636 uint8_t lun, dev_info_t **ldip)
7589 7637 {
7590 7638 struct scsi_device *sd;
7591 7639 dev_info_t *child;
7592 7640 int rval;
7593 7641
7594 7642 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_config_ld: t = %d l = %d",
7595 7643 tgt, lun));
7596 7644
7597 7645 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
7598 7646 if (ldip) {
7599 7647 *ldip = child;
7600 7648 }
7601 7649 if (instance->mr_ld_list[tgt].flag != MRDRV_TGT_VALID) {
7602 7650 rval = mrsas_service_evt(instance, tgt, 0,
7603 7651 MRSAS_EVT_UNCONFIG_TGT, NULL);
7604 7652 con_log(CL_ANN1, (CE_WARN,
7605 7653 "mr_sas: DELETING STALE ENTRY rval = %d "
7606 7654 "tgt id = %d ", rval, tgt));
7607 7655 return (NDI_FAILURE);
7608 7656 }
7609 7657 return (NDI_SUCCESS);
7610 7658 }
7611 7659
7612 7660 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
7613 7661 sd->sd_address.a_hba_tran = instance->tran;
7614 7662 sd->sd_address.a_target = (uint16_t)tgt;
7615 7663 sd->sd_address.a_lun = (uint8_t)lun;
7616 7664
7617 7665 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS)
7618 7666 rval = mrsas_config_scsi_device(instance, sd, ldip);
7619 7667 else
7620 7668 rval = NDI_FAILURE;
7621 7669
7622 7670 /* sd_unprobe is blank now. Free buffer manually */
7623 7671 if (sd->sd_inq) {
7624 7672 kmem_free(sd->sd_inq, SUN_INQSIZE);
7625 7673 sd->sd_inq = (struct scsi_inquiry *)NULL;
7626 7674 }
7627 7675
7628 7676 kmem_free(sd, sizeof (struct scsi_device));
7629 7677 con_log(CL_DLEVEL1, (CE_NOTE, "mrsas_config_ld: return rval = %d",
7630 7678 rval));
7631 7679 return (rval);
7632 7680 }
7633 7681
7634 7682 int
7635 7683 mrsas_config_scsi_device(struct mrsas_instance *instance,
7636 7684 struct scsi_device *sd, dev_info_t **dipp)
7637 7685 {
7638 7686 char *nodename = NULL;
7639 7687 char **compatible = NULL;
7640 7688 int ncompatible = 0;
7641 7689 char *childname;
7642 7690 dev_info_t *ldip = NULL;
7643 7691 int tgt = sd->sd_address.a_target;
7644 7692 int lun = sd->sd_address.a_lun;
7645 7693 int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
7646 7694 int rval;
7647 7695
7648 7696 con_log(CL_DLEVEL1, (CE_NOTE, "mr_sas: scsi_device t%dL%d", tgt, lun));
7649 7697 scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype,
7650 7698 NULL, &nodename, &compatible, &ncompatible);
7651 7699
7652 7700 if (nodename == NULL) {
7653 7701 con_log(CL_ANN1, (CE_WARN, "mr_sas: Found no compatible driver "
7654 7702 "for t%dL%d", tgt, lun));
7655 7703 rval = NDI_FAILURE;
7656 7704 goto finish;
7657 7705 }
7658 7706
7659 7707 childname = (dtype == DTYPE_DIRECT) ? "sd" : nodename;
7660 7708 con_log(CL_DLEVEL1, (CE_NOTE,
7661 7709 "mr_sas: Childname = %2s nodename = %s", childname, nodename));
7662 7710
7663 7711 /* Create a dev node */
7664 7712 rval = ndi_devi_alloc(instance->dip, childname, DEVI_SID_NODEID, &ldip);
7665 7713 con_log(CL_DLEVEL1, (CE_NOTE,
7666 7714 "mr_sas_config_scsi_device: ndi_devi_alloc rval = %x", rval));
7667 7715 if (rval == NDI_SUCCESS) {
7668 7716 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) !=
7669 7717 DDI_PROP_SUCCESS) {
7670 7718 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7671 7719 "property for t%dl%d target", tgt, lun));
7672 7720 rval = NDI_FAILURE;
7673 7721 goto finish;
7674 7722 }
7675 7723 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun) !=
7676 7724 DDI_PROP_SUCCESS) {
7677 7725 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7678 7726 "property for t%dl%d lun", tgt, lun));
7679 7727 rval = NDI_FAILURE;
7680 7728 goto finish;
7681 7729 }
7682 7730
7683 7731 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip,
7684 7732 "compatible", compatible, ncompatible) !=
7685 7733 DDI_PROP_SUCCESS) {
7686 7734 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to create "
7687 7735 "property for t%dl%d compatible", tgt, lun));
7688 7736 rval = NDI_FAILURE;
7689 7737 goto finish;
7690 7738 }
7691 7739
7692 7740 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH);
7693 7741 if (rval != NDI_SUCCESS) {
7694 7742 con_log(CL_ANN1, (CE_WARN, "mr_sas: unable to online "
7695 7743 "t%dl%d", tgt, lun));
7696 7744 ndi_prop_remove_all(ldip);
7697 7745 (void) ndi_devi_free(ldip);
7698 7746 } else {
7699 7747 con_log(CL_ANN1, (CE_CONT, "mr_sas: online Done :"
7700 7748 "0 t%dl%d", tgt, lun));
7701 7749 }
7702 7750
7703 7751 }
7704 7752 finish:
7705 7753 if (dipp) {
7706 7754 *dipp = ldip;
7707 7755 }
7708 7756
7709 7757 con_log(CL_DLEVEL1, (CE_NOTE,
7710 7758 "mr_sas: config_scsi_device rval = %d t%dL%d",
7711 7759 rval, tgt, lun));
7712 7760 scsi_hba_nodename_compatible_free(nodename, compatible);
7713 7761 return (rval);
7714 7762 }
7715 7763
7716 7764 /*ARGSUSED*/
7717 7765 int
7718 7766 mrsas_service_evt(struct mrsas_instance *instance, int tgt, int lun, int event,
7719 7767 uint64_t wwn)
7720 7768 {
7721 7769 struct mrsas_eventinfo *mrevt = NULL;
7722 7770
7723 7771 con_log(CL_ANN1, (CE_NOTE,
7724 7772 "mrsas_service_evt called for t%dl%d event = %d",
7725 7773 tgt, lun, event));
7726 7774
7727 7775 if ((instance->taskq == NULL) || (mrevt =
7728 7776 kmem_zalloc(sizeof (struct mrsas_eventinfo), KM_NOSLEEP)) == NULL) {
7729 7777 return (ENOMEM);
7730 7778 }
7731 7779
7732 7780 mrevt->instance = instance;
7733 7781 mrevt->tgt = tgt;
7734 7782 mrevt->lun = lun;
7735 7783 mrevt->event = event;
7736 7784 mrevt->wwn = wwn;
7737 7785
7738 7786 if ((ddi_taskq_dispatch(instance->taskq,
7739 7787 (void (*)(void *))mrsas_issue_evt_taskq, mrevt, DDI_NOSLEEP)) !=
7740 7788 DDI_SUCCESS) {
7741 7789 con_log(CL_ANN1, (CE_NOTE,
7742 7790 "mr_sas: Event task failed for t%dl%d event = %d",
7743 7791 tgt, lun, event));
7744 7792 kmem_free(mrevt, sizeof (struct mrsas_eventinfo));
7745 7793 return (DDI_FAILURE);
7746 7794 }
7747 7795 DTRACE_PROBE3(service_evt, int, tgt, int, lun, int, event);
7748 7796 return (DDI_SUCCESS);
7749 7797 }
7750 7798
7751 7799 static void
7752 7800 mrsas_issue_evt_taskq(struct mrsas_eventinfo *mrevt)
7753 7801 {
7754 7802 struct mrsas_instance *instance = mrevt->instance;
7755 7803 dev_info_t *dip, *pdip;
7756 7804 int circ1 = 0;
7757 7805 char *devname;
7758 7806
7759 7807 con_log(CL_ANN1, (CE_NOTE, "mrsas_issue_evt_taskq: called for"
7760 7808 " tgt %d lun %d event %d",
7761 7809 mrevt->tgt, mrevt->lun, mrevt->event));
7762 7810
7763 7811 if (mrevt->tgt < MRDRV_MAX_LD && mrevt->lun == 0) {
7764 7812 mutex_enter(&instance->config_dev_mtx);
7765 7813 dip = instance->mr_ld_list[mrevt->tgt].dip;
7766 7814 mutex_exit(&instance->config_dev_mtx);
7767 7815 #ifdef PDSUPPORT
7768 7816 } else {
7769 7817 mutex_enter(&instance->config_dev_mtx);
7770 7818 dip = instance->mr_tbolt_pd_list[mrevt->tgt].dip;
7771 7819 mutex_exit(&instance->config_dev_mtx);
7772 7820 #endif
7773 7821 }
7774 7822
|
↓ open down ↓ |
233 lines elided |
↑ open up ↑ |
7775 7823
7776 7824 ndi_devi_enter(instance->dip, &circ1);
7777 7825 switch (mrevt->event) {
7778 7826 case MRSAS_EVT_CONFIG_TGT:
7779 7827 if (dip == NULL) {
7780 7828
7781 7829 if (mrevt->lun == 0) {
7782 7830 (void) mrsas_config_ld(instance, mrevt->tgt,
7783 7831 0, NULL);
7784 7832 #ifdef PDSUPPORT
7785 - } else if (instance->tbolt) {
7833 + } else if (instance->tbolt || instance->skinny) {
7786 7834 (void) mrsas_tbolt_config_pd(instance,
7787 7835 mrevt->tgt,
7788 7836 1, NULL);
7789 7837 #endif
7790 7838 }
7791 7839 con_log(CL_ANN1, (CE_NOTE,
7792 7840 "mr_sas: EVT_CONFIG_TGT called:"
7793 7841 " for tgt %d lun %d event %d",
7794 7842 mrevt->tgt, mrevt->lun, mrevt->event));
7795 7843
7796 7844 } else {
7797 7845 con_log(CL_ANN1, (CE_NOTE,
7798 7846 "mr_sas: EVT_CONFIG_TGT dip != NULL:"
7799 7847 " for tgt %d lun %d event %d",
7800 7848 mrevt->tgt, mrevt->lun, mrevt->event));
7801 7849 }
7802 7850 break;
7803 7851 case MRSAS_EVT_UNCONFIG_TGT:
7804 7852 if (dip) {
7805 7853 if (i_ddi_devi_attached(dip)) {
7806 7854
7807 7855 pdip = ddi_get_parent(dip);
7808 7856
7809 7857 devname = kmem_zalloc(MAXNAMELEN + 1, KM_SLEEP);
7810 7858 (void) ddi_deviname(dip, devname);
7811 7859
7812 7860 (void) devfs_clean(pdip, devname + 1,
7813 7861 DV_CLEAN_FORCE);
7814 7862 kmem_free(devname, MAXNAMELEN + 1);
7815 7863 }
7816 7864 (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE);
7817 7865 con_log(CL_ANN1, (CE_NOTE,
7818 7866 "mr_sas: EVT_UNCONFIG_TGT called:"
7819 7867 " for tgt %d lun %d event %d",
7820 7868 mrevt->tgt, mrevt->lun, mrevt->event));
7821 7869 } else {
7822 7870 con_log(CL_ANN1, (CE_NOTE,
7823 7871 "mr_sas: EVT_UNCONFIG_TGT dip == NULL:"
7824 7872 " for tgt %d lun %d event %d",
7825 7873 mrevt->tgt, mrevt->lun, mrevt->event));
7826 7874 }
7827 7875 break;
7828 7876 }
7829 7877 kmem_free(mrevt, sizeof (struct mrsas_eventinfo));
7830 7878 ndi_devi_exit(instance->dip, circ1);
7831 7879 }
7832 7880
7833 7881
7834 7882 int
7835 7883 mrsas_mode_sense_build(struct scsi_pkt *pkt)
7836 7884 {
7837 7885 union scsi_cdb *cdbp;
7838 7886 uint16_t page_code;
7839 7887 struct scsa_cmd *acmd;
7840 7888 struct buf *bp;
7841 7889 struct mode_header *modehdrp;
7842 7890
7843 7891 cdbp = (void *)pkt->pkt_cdbp;
7844 7892 page_code = cdbp->cdb_un.sg.scsi[0];
7845 7893 acmd = PKT2CMD(pkt);
7846 7894 bp = acmd->cmd_buf;
7847 7895 if ((!bp) && bp->b_un.b_addr && bp->b_bcount && acmd->cmd_dmacount) {
7848 7896 con_log(CL_ANN1, (CE_WARN, "Failing MODESENSE Command"));
7849 7897 /* ADD pkt statistics as Command failed. */
7850 7898 return (NULL);
7851 7899 }
7852 7900
7853 7901 bp_mapin(bp);
7854 7902 bzero(bp->b_un.b_addr, bp->b_bcount);
7855 7903
7856 7904 switch (page_code) {
7857 7905 case 0x3: {
7858 7906 struct mode_format *page3p = NULL;
7859 7907 modehdrp = (struct mode_header *)(bp->b_un.b_addr);
7860 7908 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH;
7861 7909
7862 7910 page3p = (void *)((caddr_t)modehdrp +
7863 7911 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
7864 7912 page3p->mode_page.code = 0x3;
7865 7913 page3p->mode_page.length =
7866 7914 (uchar_t)(sizeof (struct mode_format));
7867 7915 page3p->data_bytes_sect = 512;
7868 7916 page3p->sect_track = 63;
7869 7917 break;
7870 7918 }
7871 7919 case 0x4: {
7872 7920 struct mode_geometry *page4p = NULL;
7873 7921 modehdrp = (struct mode_header *)(bp->b_un.b_addr);
7874 7922 modehdrp->bdesc_length = MODE_BLK_DESC_LENGTH;
7875 7923
7876 7924 page4p = (void *)((caddr_t)modehdrp +
7877 7925 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
7878 7926 page4p->mode_page.code = 0x4;
7879 7927 page4p->mode_page.length =
7880 7928 (uchar_t)(sizeof (struct mode_geometry));
7881 7929 page4p->heads = 255;
7882 7930 page4p->rpm = 10000;
7883 7931 break;
7884 7932 }
7885 7933 default:
7886 7934 break;
7887 7935 }
7888 7936 return (NULL);
7889 7937 }
|
↓ open down ↓ |
94 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX