Print this page
3500 Support LSI SAS2008 (Falcon) Skinny FW for mr_sas(7D)
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/mr_sas/mr_sas_tbolt.c
+++ new/usr/src/uts/common/io/mr_sas/mr_sas_tbolt.c
1 1 /*
2 2 * mr_sas_tbolt.c: source for mr_sas driver for New Generation.
3 3 * i.e. Thunderbolt and Invader
4 4 *
5 5 * Solaris MegaRAID device driver for SAS2.0 controllers
6 6 * Copyright (c) 2008-2012, LSI Logic Corporation.
7 7 * All rights reserved.
|
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
8 8 *
9 9 * Version:
10 10 * Author:
11 11 * Swaminathan K S
12 12 * Arun Chandrashekhar
13 13 * Manju R
14 14 * Rasheed
15 15 * Shakeel Bukhari
16 16 */
17 17
18 +/*
19 + * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
20 + */
18 21
22 +
19 23 #include <sys/types.h>
20 24 #include <sys/file.h>
21 25 #include <sys/atomic.h>
22 26 #include <sys/scsi/scsi.h>
23 27 #include <sys/byteorder.h>
24 28 #include "ld_pd_map.h"
25 29 #include "mr_sas.h"
26 30 #include "fusion.h"
27 31
28 32 /*
29 33 * FMA header files
30 34 */
31 35 #include <sys/ddifm.h>
32 36 #include <sys/fm/protocol.h>
33 37 #include <sys/fm/util.h>
34 38 #include <sys/fm/io/ddi.h>
35 39
36 40
37 41 /* Pre-TB command size and TB command size. */
38 42 #define MR_COMMAND_SIZE (64*20) /* 1280 bytes */
|
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
39 43 MR_LD_RAID *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map);
40 44 U16 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map);
41 45 U16 MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map);
42 46 U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO, struct IO_REQUEST_INFO *);
43 47 extern ddi_dma_attr_t mrsas_generic_dma_attr;
44 48 extern uint32_t mrsas_tbolt_max_cap_maxxfer;
45 49 extern struct ddi_device_acc_attr endian_attr;
46 50 extern int debug_level_g;
47 51 extern unsigned int enable_fp;
48 52 volatile int dump_io_wait_time = 90;
49 -extern void
50 -io_timeout_checker(void *arg);
51 53 extern volatile int debug_timeout_g;
52 54 extern int mrsas_issue_pending_cmds(struct mrsas_instance *);
53 55 extern int mrsas_complete_pending_cmds(struct mrsas_instance *instance);
54 56 extern void push_pending_mfi_pkt(struct mrsas_instance *,
55 57 struct mrsas_cmd *);
56 58 extern U8 MR_BuildRaidContext(struct mrsas_instance *, struct IO_REQUEST_INFO *,
57 59 MPI2_SCSI_IO_VENDOR_UNIQUE *, MR_FW_RAID_MAP_ALL *);
58 60
59 61 /* Local static prototypes. */
60 62 static struct mrsas_cmd *mrsas_tbolt_build_cmd(struct mrsas_instance *,
61 63 struct scsi_address *, struct scsi_pkt *, uchar_t *);
62 64 static void mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr,
63 65 U64 start_blk, U32 num_blocks);
64 66 static int mrsas_tbolt_check_map_info(struct mrsas_instance *);
65 67 static int mrsas_tbolt_sync_map_info(struct mrsas_instance *);
66 68 static int mrsas_tbolt_prepare_pkt(struct scsa_cmd *);
67 69 static int mrsas_tbolt_ioc_init(struct mrsas_instance *, dma_obj_t *);
68 70 #ifdef PDSUPPORT
69 71 static void mrsas_tbolt_get_pd_info(struct mrsas_instance *,
70 72 struct mrsas_tbolt_pd_info *, int);
71 73 #endif /* PDSUPPORT */
72 74
73 75 static int debug_tbolt_fw_faults_after_ocr_g = 0;
74 76
75 77 /*
76 78 * destroy_mfi_mpi_frame_pool
77 79 */
78 80 void
79 81 destroy_mfi_mpi_frame_pool(struct mrsas_instance *instance)
80 82 {
81 83 int i;
82 84
83 85 struct mrsas_cmd *cmd;
84 86
85 87 /* return all mfi frames to pool */
86 88 for (i = 0; i < MRSAS_APP_RESERVED_CMDS; i++) {
87 89 cmd = instance->cmd_list[i];
88 90 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED) {
89 91 (void) mrsas_free_dma_obj(instance,
90 92 cmd->frame_dma_obj);
91 93 }
92 94 cmd->frame_dma_obj_status = DMA_OBJ_FREED;
93 95 }
94 96 }
95 97
96 98 /*
97 99 * destroy_mpi2_frame_pool
98 100 */
99 101 void
100 102 destroy_mpi2_frame_pool(struct mrsas_instance *instance)
101 103 {
102 104
103 105 if (instance->mpi2_frame_pool_dma_obj.status == DMA_OBJ_ALLOCATED) {
104 106 (void) mrsas_free_dma_obj(instance,
105 107 instance->mpi2_frame_pool_dma_obj);
106 108 instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_FREED;
107 109 }
108 110 }
109 111
110 112
111 113 /*
112 114 * mrsas_tbolt_free_additional_dma_buffer
113 115 */
114 116 void
115 117 mrsas_tbolt_free_additional_dma_buffer(struct mrsas_instance *instance)
116 118 {
117 119 int i;
118 120
119 121 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
120 122 (void) mrsas_free_dma_obj(instance,
121 123 instance->mfi_internal_dma_obj);
122 124 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
123 125 }
124 126 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
125 127 (void) mrsas_free_dma_obj(instance,
126 128 instance->mfi_evt_detail_obj);
127 129 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
128 130 }
129 131
130 132 for (i = 0; i < 2; i++) {
131 133 if (instance->ld_map_obj[i].status == DMA_OBJ_ALLOCATED) {
132 134 (void) mrsas_free_dma_obj(instance,
133 135 instance->ld_map_obj[i]);
134 136 instance->ld_map_obj[i].status = DMA_OBJ_FREED;
135 137 }
136 138 }
137 139 }
138 140
139 141
140 142 /*
141 143 * free_req_desc_pool
142 144 */
143 145 void
144 146 free_req_rep_desc_pool(struct mrsas_instance *instance)
145 147 {
146 148 if (instance->request_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
147 149 (void) mrsas_free_dma_obj(instance,
148 150 instance->request_desc_dma_obj);
149 151 instance->request_desc_dma_obj.status = DMA_OBJ_FREED;
150 152 }
151 153
152 154 if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
153 155 (void) mrsas_free_dma_obj(instance,
154 156 instance->reply_desc_dma_obj);
155 157 instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
156 158 }
157 159
158 160
159 161 }
160 162
161 163
162 164 /*
163 165 * ThunderBolt(TB) Request Message Frame Pool
164 166 */
165 167 int
166 168 create_mpi2_frame_pool(struct mrsas_instance *instance)
167 169 {
168 170 int i = 0;
169 171 uint16_t max_cmd;
170 172 uint32_t sgl_sz;
171 173 uint32_t raid_msg_size;
172 174 uint32_t total_size;
173 175 uint32_t offset;
174 176 uint32_t io_req_base_phys;
175 177 uint8_t *io_req_base;
176 178 struct mrsas_cmd *cmd;
177 179
178 180 max_cmd = instance->max_fw_cmds;
179 181
180 182 sgl_sz = 1024;
181 183 raid_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
182 184
183 185 /* Allocating additional 256 bytes to accomodate SMID 0. */
184 186 total_size = MRSAS_THUNDERBOLT_MSG_SIZE + (max_cmd * raid_msg_size) +
185 187 (max_cmd * sgl_sz) + (max_cmd * SENSE_LENGTH);
186 188
187 189 con_log(CL_ANN1, (CE_NOTE, "create_mpi2_frame_pool: "
188 190 "max_cmd %x", max_cmd));
189 191
190 192 con_log(CL_DLEVEL3, (CE_NOTE, "create_mpi2_frame_pool: "
191 193 "request message frame pool size %x", total_size));
192 194
193 195 /*
194 196 * ThunderBolt(TB) We need to create a single chunk of DMA'ble memory
195 197 * and then split the memory to 1024 commands. Each command should be
196 198 * able to contain a RAID MESSAGE FRAME which will embed a MFI_FRAME
197 199 * within it. Further refer the "alloc_req_rep_desc" function where
198 200 * we allocate request/reply descriptors queues for a clue.
199 201 */
200 202
201 203 instance->mpi2_frame_pool_dma_obj.size = total_size;
202 204 instance->mpi2_frame_pool_dma_obj.dma_attr = mrsas_generic_dma_attr;
203 205 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_addr_hi =
204 206 0xFFFFFFFFU;
205 207 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_count_max =
206 208 0xFFFFFFFFU;
207 209 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_sgllen = 1;
208 210 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_align = 256;
209 211
210 212 if (mrsas_alloc_dma_obj(instance, &instance->mpi2_frame_pool_dma_obj,
211 213 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
212 214 cmn_err(CE_WARN,
213 215 "mr_sas: could not alloc mpi2 frame pool");
214 216 return (DDI_FAILURE);
215 217 }
216 218
217 219 bzero(instance->mpi2_frame_pool_dma_obj.buffer, total_size);
218 220 instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_ALLOCATED;
219 221
220 222 instance->io_request_frames =
221 223 (uint8_t *)instance->mpi2_frame_pool_dma_obj.buffer;
222 224 instance->io_request_frames_phy =
223 225 (uint32_t)
224 226 instance->mpi2_frame_pool_dma_obj.dma_cookie[0].dmac_address;
225 227
226 228 con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames 0x%p",
227 229 (void *)instance->io_request_frames));
228 230
229 231 con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames_phy 0x%x",
230 232 instance->io_request_frames_phy));
231 233
232 234 io_req_base = (uint8_t *)instance->io_request_frames +
233 235 MRSAS_THUNDERBOLT_MSG_SIZE;
234 236 io_req_base_phys = instance->io_request_frames_phy +
235 237 MRSAS_THUNDERBOLT_MSG_SIZE;
236 238
237 239 con_log(CL_DLEVEL3, (CE_NOTE,
238 240 "io req_base_phys 0x%x", io_req_base_phys));
239 241
240 242 for (i = 0; i < max_cmd; i++) {
241 243 cmd = instance->cmd_list[i];
242 244
243 245 offset = i * MRSAS_THUNDERBOLT_MSG_SIZE;
244 246
245 247 cmd->scsi_io_request = (Mpi2RaidSCSIIORequest_t *)
246 248 ((uint8_t *)io_req_base + offset);
247 249 cmd->scsi_io_request_phys_addr = io_req_base_phys + offset;
248 250
249 251 cmd->sgl = (Mpi2SGEIOUnion_t *)((uint8_t *)io_req_base +
250 252 (max_cmd * raid_msg_size) + i * sgl_sz);
251 253
252 254 cmd->sgl_phys_addr = (io_req_base_phys +
253 255 (max_cmd * raid_msg_size) + i * sgl_sz);
254 256
255 257 cmd->sense1 = (uint8_t *)((uint8_t *)io_req_base +
256 258 (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
257 259 (i * SENSE_LENGTH));
258 260
259 261 cmd->sense_phys_addr1 = (io_req_base_phys +
260 262 (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
261 263 (i * SENSE_LENGTH));
262 264
263 265
264 266 cmd->SMID = i + 1;
265 267
266 268 con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Addr [%x]0x%p",
267 269 cmd->index, (void *)cmd->scsi_io_request));
268 270
269 271 con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Phys Addr [%x]0x%x",
270 272 cmd->index, cmd->scsi_io_request_phys_addr));
271 273
272 274 con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr [%x]0x%p",
273 275 cmd->index, (void *)cmd->sense1));
274 276
275 277 con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr Phys [%x]0x%x",
276 278 cmd->index, cmd->sense_phys_addr1));
277 279
278 280 con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers [%x]0x%p",
279 281 cmd->index, (void *)cmd->sgl));
280 282
281 283 con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers phys [%x]0x%x",
282 284 cmd->index, cmd->sgl_phys_addr));
283 285 }
284 286
285 287 return (DDI_SUCCESS);
286 288
287 289 }
288 290
289 291
290 292 /*
291 293 * alloc_additional_dma_buffer for AEN
292 294 */
293 295 int
294 296 mrsas_tbolt_alloc_additional_dma_buffer(struct mrsas_instance *instance)
295 297 {
296 298 uint32_t internal_buf_size = PAGESIZE*2;
297 299 int i;
298 300
299 301 /* Initialize buffer status as free */
300 302 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
301 303 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
302 304 instance->ld_map_obj[0].status = DMA_OBJ_FREED;
303 305 instance->ld_map_obj[1].status = DMA_OBJ_FREED;
304 306
305 307
306 308 instance->mfi_internal_dma_obj.size = internal_buf_size;
307 309 instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
308 310 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
309 311 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
310 312 0xFFFFFFFFU;
311 313 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
312 314
313 315 if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
314 316 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
315 317 cmn_err(CE_WARN,
316 318 "mr_sas: could not alloc reply queue");
317 319 return (DDI_FAILURE);
318 320 }
319 321
320 322 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
321 323
322 324 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
323 325 instance->internal_buf =
324 326 (caddr_t)(((unsigned long)instance->mfi_internal_dma_obj.buffer));
325 327 instance->internal_buf_dmac_add =
326 328 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address;
327 329 instance->internal_buf_size = internal_buf_size;
328 330
329 331 /* allocate evt_detail */
330 332 instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
331 333 instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
332 334 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
333 335 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
334 336 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
335 337 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 8;
336 338
337 339 if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
338 340 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
339 341 cmn_err(CE_WARN, "mrsas_tbolt_alloc_additional_dma_buffer: "
340 342 "could not allocate data transfer buffer.");
341 343 goto fail_tbolt_additional_buff;
342 344 }
343 345
344 346 bzero(instance->mfi_evt_detail_obj.buffer,
345 347 sizeof (struct mrsas_evt_detail));
346 348
347 349 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
348 350
349 351 instance->size_map_info = sizeof (MR_FW_RAID_MAP) +
350 352 (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
351 353
352 354 for (i = 0; i < 2; i++) {
353 355 /* allocate the data transfer buffer */
354 356 instance->ld_map_obj[i].size = instance->size_map_info;
355 357 instance->ld_map_obj[i].dma_attr = mrsas_generic_dma_attr;
356 358 instance->ld_map_obj[i].dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
357 359 instance->ld_map_obj[i].dma_attr.dma_attr_count_max =
358 360 0xFFFFFFFFU;
359 361 instance->ld_map_obj[i].dma_attr.dma_attr_sgllen = 1;
360 362 instance->ld_map_obj[i].dma_attr.dma_attr_align = 1;
361 363
362 364 if (mrsas_alloc_dma_obj(instance, &instance->ld_map_obj[i],
363 365 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
364 366 cmn_err(CE_WARN,
365 367 "could not allocate data transfer buffer.");
366 368 goto fail_tbolt_additional_buff;
367 369 }
368 370
369 371 instance->ld_map_obj[i].status |= DMA_OBJ_ALLOCATED;
370 372
371 373 bzero(instance->ld_map_obj[i].buffer, instance->size_map_info);
372 374
373 375 instance->ld_map[i] =
374 376 (MR_FW_RAID_MAP_ALL *)instance->ld_map_obj[i].buffer;
375 377 instance->ld_map_phy[i] = (uint32_t)instance->
376 378 ld_map_obj[i].dma_cookie[0].dmac_address;
377 379
378 380 con_log(CL_DLEVEL3, (CE_NOTE,
379 381 "ld_map Addr Phys 0x%x", instance->ld_map_phy[i]));
380 382
381 383 con_log(CL_DLEVEL3, (CE_NOTE,
382 384 "size_map_info 0x%x", instance->size_map_info));
383 385 }
384 386
385 387 return (DDI_SUCCESS);
386 388
387 389 fail_tbolt_additional_buff:
388 390 mrsas_tbolt_free_additional_dma_buffer(instance);
389 391
390 392 return (DDI_FAILURE);
391 393 }
392 394
393 395 MRSAS_REQUEST_DESCRIPTOR_UNION *
394 396 mr_sas_get_request_descriptor(struct mrsas_instance *instance, uint16_t index)
395 397 {
396 398 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
397 399
398 400 if (index > instance->max_fw_cmds) {
399 401 con_log(CL_ANN1, (CE_NOTE,
400 402 "Invalid SMID 0x%x request for descriptor", index));
401 403 con_log(CL_ANN1, (CE_NOTE,
402 404 "max_fw_cmds : 0x%x", instance->max_fw_cmds));
403 405 return (NULL);
404 406 }
405 407
406 408 req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
407 409 ((char *)instance->request_message_pool +
408 410 (sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION) * index));
409 411
410 412 con_log(CL_ANN1, (CE_NOTE,
411 413 "request descriptor : 0x%08lx", (unsigned long)req_desc));
412 414
413 415 con_log(CL_ANN1, (CE_NOTE,
414 416 "request descriptor base phy : 0x%08lx",
415 417 (unsigned long)instance->request_message_pool_phy));
416 418
417 419 return ((MRSAS_REQUEST_DESCRIPTOR_UNION *)req_desc);
418 420 }
419 421
420 422
421 423 /*
422 424 * Allocate Request and Reply Queue Descriptors.
423 425 */
424 426 int
425 427 alloc_req_rep_desc(struct mrsas_instance *instance)
426 428 {
427 429 uint32_t request_q_sz, reply_q_sz;
428 430 int i, max_reply_q_sz;
429 431 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
430 432
431 433 /*
432 434 * ThunderBolt(TB) There's no longer producer consumer mechanism.
433 435 * Once we have an interrupt we are supposed to scan through the list of
434 436 * reply descriptors and process them accordingly. We would be needing
435 437 * to allocate memory for 1024 reply descriptors
436 438 */
437 439
438 440 /* Allocate Reply Descriptors */
439 441 con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
440 442 (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
441 443
442 444 /* reply queue size should be multiple of 16 */
443 445 max_reply_q_sz = ((instance->max_fw_cmds + 1 + 15)/16)*16;
444 446
445 447 reply_q_sz = 8 * max_reply_q_sz;
446 448
447 449
448 450 con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
449 451 (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
450 452
451 453 instance->reply_desc_dma_obj.size = reply_q_sz;
452 454 instance->reply_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
453 455 instance->reply_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
454 456 instance->reply_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
455 457 instance->reply_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
456 458 instance->reply_desc_dma_obj.dma_attr.dma_attr_align = 16;
457 459
458 460 if (mrsas_alloc_dma_obj(instance, &instance->reply_desc_dma_obj,
459 461 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
460 462 cmn_err(CE_WARN,
461 463 "mr_sas: could not alloc reply queue");
462 464 return (DDI_FAILURE);
463 465 }
464 466
465 467 bzero(instance->reply_desc_dma_obj.buffer, reply_q_sz);
466 468 instance->reply_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
467 469
468 470 /* virtual address of reply queue */
469 471 instance->reply_frame_pool = (MPI2_REPLY_DESCRIPTORS_UNION *)(
470 472 instance->reply_desc_dma_obj.buffer);
471 473
472 474 instance->reply_q_depth = max_reply_q_sz;
473 475
474 476 con_log(CL_ANN1, (CE_NOTE, "[reply queue depth]0x%x",
475 477 instance->reply_q_depth));
476 478
477 479 con_log(CL_ANN1, (CE_NOTE, "[reply queue virt addr]0x%p",
478 480 (void *)instance->reply_frame_pool));
479 481
480 482 /* initializing reply address to 0xFFFFFFFF */
481 483 reply_desc = instance->reply_frame_pool;
482 484
483 485 for (i = 0; i < instance->reply_q_depth; i++) {
484 486 reply_desc->Words = (uint64_t)~0;
485 487 reply_desc++;
486 488 }
487 489
488 490
489 491 instance->reply_frame_pool_phy =
490 492 (uint32_t)instance->reply_desc_dma_obj.dma_cookie[0].dmac_address;
491 493
492 494 con_log(CL_ANN1, (CE_NOTE,
493 495 "[reply queue phys addr]0x%x", instance->reply_frame_pool_phy));
494 496
495 497
496 498 instance->reply_pool_limit_phy = (instance->reply_frame_pool_phy +
497 499 reply_q_sz);
498 500
499 501 con_log(CL_ANN1, (CE_NOTE, "[reply pool limit phys addr]0x%x",
500 502 instance->reply_pool_limit_phy));
501 503
502 504
503 505 con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
504 506 (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
505 507
506 508 /* Allocate Request Descriptors */
507 509 con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
508 510 (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
509 511
510 512 request_q_sz = 8 *
511 513 (instance->max_fw_cmds);
512 514
513 515 instance->request_desc_dma_obj.size = request_q_sz;
514 516 instance->request_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
515 517 instance->request_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
516 518 instance->request_desc_dma_obj.dma_attr.dma_attr_count_max =
517 519 0xFFFFFFFFU;
518 520 instance->request_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
519 521 instance->request_desc_dma_obj.dma_attr.dma_attr_align = 16;
520 522
521 523 if (mrsas_alloc_dma_obj(instance, &instance->request_desc_dma_obj,
522 524 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
523 525 cmn_err(CE_WARN,
524 526 "mr_sas: could not alloc request queue desc");
525 527 goto fail_undo_reply_queue;
526 528 }
527 529
528 530 bzero(instance->request_desc_dma_obj.buffer, request_q_sz);
529 531 instance->request_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
530 532
531 533 /* virtual address of request queue desc */
532 534 instance->request_message_pool = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
533 535 (instance->request_desc_dma_obj.buffer);
534 536
535 537 instance->request_message_pool_phy =
536 538 (uint32_t)instance->request_desc_dma_obj.dma_cookie[0].dmac_address;
537 539
538 540 return (DDI_SUCCESS);
539 541
540 542 fail_undo_reply_queue:
541 543 if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
542 544 (void) mrsas_free_dma_obj(instance,
543 545 instance->reply_desc_dma_obj);
544 546 instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
545 547 }
546 548
547 549 return (DDI_FAILURE);
548 550 }
549 551
550 552 /*
551 553 * mrsas_alloc_cmd_pool_tbolt
552 554 *
553 555 * TODO: merge tbolt-specific code into mrsas_alloc_cmd_pool() to have single
554 556 * routine
555 557 */
556 558 int
557 559 mrsas_alloc_cmd_pool_tbolt(struct mrsas_instance *instance)
558 560 {
559 561 int i;
560 562 int count;
561 563 uint32_t max_cmd;
562 564 uint32_t reserve_cmd;
563 565 size_t sz;
564 566
565 567 struct mrsas_cmd *cmd;
566 568
567 569 max_cmd = instance->max_fw_cmds;
568 570 con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
569 571 "max_cmd %x", max_cmd));
570 572
571 573
572 574 sz = sizeof (struct mrsas_cmd *) * max_cmd;
573 575
574 576 /*
575 577 * instance->cmd_list is an array of struct mrsas_cmd pointers.
576 578 * Allocate the dynamic array first and then allocate individual
577 579 * commands.
578 580 */
579 581 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
580 582
581 583 /* create a frame pool and assign one frame to each cmd */
582 584 for (count = 0; count < max_cmd; count++) {
583 585 instance->cmd_list[count] =
584 586 kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP);
585 587 }
586 588
587 589 /* add all the commands to command pool */
588 590
589 591 INIT_LIST_HEAD(&instance->cmd_pool_list);
590 592 INIT_LIST_HEAD(&instance->cmd_pend_list);
591 593 INIT_LIST_HEAD(&instance->cmd_app_pool_list);
592 594
593 595 reserve_cmd = MRSAS_APP_RESERVED_CMDS;
594 596
595 597 /* cmd index 0 reservered for IOC INIT */
596 598 for (i = 1; i < reserve_cmd; i++) {
597 599 cmd = instance->cmd_list[i];
598 600 cmd->index = i;
599 601 mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
600 602 }
601 603
602 604
603 605 for (i = reserve_cmd; i < max_cmd; i++) {
604 606 cmd = instance->cmd_list[i];
605 607 cmd->index = i;
606 608 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
607 609 }
608 610
609 611 return (DDI_SUCCESS);
610 612
611 613 mrsas_undo_cmds:
612 614 if (count > 0) {
613 615 /* free each cmd */
614 616 for (i = 0; i < count; i++) {
615 617 if (instance->cmd_list[i] != NULL) {
616 618 kmem_free(instance->cmd_list[i],
617 619 sizeof (struct mrsas_cmd));
618 620 }
619 621 instance->cmd_list[i] = NULL;
620 622 }
621 623 }
622 624
623 625 mrsas_undo_cmd_list:
624 626 if (instance->cmd_list != NULL)
625 627 kmem_free(instance->cmd_list, sz);
626 628 instance->cmd_list = NULL;
627 629
628 630 return (DDI_FAILURE);
629 631 }
630 632
631 633
632 634 /*
633 635 * free_space_for_mpi2
634 636 */
635 637 void
636 638 free_space_for_mpi2(struct mrsas_instance *instance)
637 639 {
638 640 /* already freed */
639 641 if (instance->cmd_list == NULL) {
640 642 return;
641 643 }
642 644
643 645 /* First free the additional DMA buffer */
644 646 mrsas_tbolt_free_additional_dma_buffer(instance);
645 647
646 648 /* Free the request/reply descriptor pool */
647 649 free_req_rep_desc_pool(instance);
648 650
649 651 /* Free the MPI message pool */
650 652 destroy_mpi2_frame_pool(instance);
651 653
652 654 /* Free the MFI frame pool */
653 655 destroy_mfi_frame_pool(instance);
654 656
655 657 /* Free all the commands in the cmd_list */
656 658 /* Free the cmd_list buffer itself */
657 659 mrsas_free_cmd_pool(instance);
658 660 }
659 661
660 662
661 663 /*
662 664 * ThunderBolt(TB) memory allocations for commands/messages/frames.
663 665 */
664 666 int
665 667 alloc_space_for_mpi2(struct mrsas_instance *instance)
666 668 {
667 669 /* Allocate command pool (memory for cmd_list & individual commands) */
668 670 if (mrsas_alloc_cmd_pool_tbolt(instance)) {
669 671 cmn_err(CE_WARN, "Error creating cmd pool");
670 672 return (DDI_FAILURE);
671 673 }
672 674
673 675 /* Initialize single reply size and Message size */
674 676 instance->reply_size = MRSAS_THUNDERBOLT_REPLY_SIZE;
675 677 instance->raid_io_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
676 678
677 679 instance->max_sge_in_main_msg = (MRSAS_THUNDERBOLT_MSG_SIZE -
678 680 (sizeof (MPI2_RAID_SCSI_IO_REQUEST) -
679 681 sizeof (MPI2_SGE_IO_UNION)))/ sizeof (MPI2_SGE_IO_UNION);
680 682 instance->max_sge_in_chain = (MR_COMMAND_SIZE -
681 683 MRSAS_THUNDERBOLT_MSG_SIZE) / sizeof (MPI2_SGE_IO_UNION);
682 684
683 685 /* Reduce SG count by 1 to take care of group cmds feature in FW */
684 686 instance->max_num_sge = (instance->max_sge_in_main_msg +
685 687 instance->max_sge_in_chain - 2);
686 688 instance->chain_offset_mpt_msg =
687 689 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16;
688 690 instance->chain_offset_io_req = (MRSAS_THUNDERBOLT_MSG_SIZE -
689 691 sizeof (MPI2_SGE_IO_UNION)) / 16;
690 692 instance->reply_read_index = 0;
691 693
692 694
693 695 /* Allocate Request and Reply descriptors Array */
694 696 /* Make sure the buffer is aligned to 8 for req/rep descriptor Pool */
695 697 if (alloc_req_rep_desc(instance)) {
696 698 cmn_err(CE_WARN,
697 699 "Error, allocating memory for descripter-pool");
698 700 goto mpi2_undo_cmd_pool;
699 701 }
700 702 con_log(CL_ANN1, (CE_NOTE, "[request message pool phys addr]0x%x",
701 703 instance->request_message_pool_phy));
702 704
703 705
704 706 /* Allocate MFI Frame pool - for MPI-MFI passthru commands */
705 707 if (create_mfi_frame_pool(instance)) {
706 708 cmn_err(CE_WARN,
707 709 "Error, allocating memory for MFI frame-pool");
708 710 goto mpi2_undo_descripter_pool;
709 711 }
710 712
711 713
712 714 /* Allocate MPI2 Message pool */
713 715 /*
714 716 * Make sure the buffer is alligned to 256 for raid message packet
715 717 * create a io request pool and assign one frame to each cmd
716 718 */
717 719
718 720 if (create_mpi2_frame_pool(instance)) {
719 721 cmn_err(CE_WARN,
720 722 "Error, allocating memory for MPI2 Message-pool");
721 723 goto mpi2_undo_mfi_frame_pool;
722 724 }
723 725
724 726 #ifdef DEBUG
725 727 con_log(CL_ANN1, (CE_CONT, "[max_sge_in_main_msg]0x%x",
726 728 instance->max_sge_in_main_msg));
727 729 con_log(CL_ANN1, (CE_CONT, "[max_sge_in_chain]0x%x",
728 730 instance->max_sge_in_chain));
729 731 con_log(CL_ANN1, (CE_CONT,
730 732 "[max_sge]0x%x", instance->max_num_sge));
731 733 con_log(CL_ANN1, (CE_CONT, "[chain_offset_mpt_msg]0x%x",
732 734 instance->chain_offset_mpt_msg));
733 735 con_log(CL_ANN1, (CE_CONT, "[chain_offset_io_req]0x%x",
734 736 instance->chain_offset_io_req));
735 737 #endif
736 738
737 739
738 740 /* Allocate additional dma buffer */
739 741 if (mrsas_tbolt_alloc_additional_dma_buffer(instance)) {
740 742 cmn_err(CE_WARN,
741 743 "Error, allocating tbolt additional DMA buffer");
742 744 goto mpi2_undo_message_pool;
743 745 }
744 746
745 747 return (DDI_SUCCESS);
746 748
747 749 mpi2_undo_message_pool:
748 750 destroy_mpi2_frame_pool(instance);
749 751
750 752 mpi2_undo_mfi_frame_pool:
751 753 destroy_mfi_frame_pool(instance);
752 754
753 755 mpi2_undo_descripter_pool:
754 756 free_req_rep_desc_pool(instance);
755 757
756 758 mpi2_undo_cmd_pool:
757 759 mrsas_free_cmd_pool(instance);
758 760
759 761 return (DDI_FAILURE);
760 762 }
761 763
762 764
763 765 /*
764 766 * mrsas_init_adapter_tbolt - Initialize fusion interface adapter.
765 767 */
766 768 int
767 769 mrsas_init_adapter_tbolt(struct mrsas_instance *instance)
768 770 {
769 771
770 772 /*
771 773 * Reduce the max supported cmds by 1. This is to ensure that the
772 774 * reply_q_sz (1 more than the max cmd that driver may send)
773 775 * does not exceed max cmds that the FW can support
774 776 */
775 777
776 778 if (instance->max_fw_cmds > 1008) {
777 779 instance->max_fw_cmds = 1008;
778 780 instance->max_fw_cmds = instance->max_fw_cmds-1;
779 781 }
780 782
781 783 con_log(CL_ANN, (CE_NOTE, "mrsas_init_adapter_tbolt: "
782 784 " instance->max_fw_cmds 0x%X.", instance->max_fw_cmds));
783 785
784 786
785 787 /* create a pool of commands */
786 788 if (alloc_space_for_mpi2(instance) != DDI_SUCCESS) {
787 789 cmn_err(CE_WARN,
788 790 " alloc_space_for_mpi2() failed.");
789 791
790 792 return (DDI_FAILURE);
791 793 }
792 794
793 795 /* Send ioc init message */
794 796 /* NOTE: the issue_init call does FMA checking already. */
795 797 if (mrsas_issue_init_mpi2(instance) != DDI_SUCCESS) {
796 798 cmn_err(CE_WARN,
797 799 " mrsas_issue_init_mpi2() failed.");
798 800
799 801 goto fail_init_fusion;
800 802 }
801 803
802 804 instance->unroll.alloc_space_mpi2 = 1;
803 805
804 806 con_log(CL_ANN, (CE_NOTE,
805 807 "mrsas_init_adapter_tbolt: SUCCESSFUL"));
806 808
807 809 return (DDI_SUCCESS);
808 810
809 811 fail_init_fusion:
810 812 free_space_for_mpi2(instance);
811 813
812 814 return (DDI_FAILURE);
813 815 }
814 816
815 817
816 818
817 819 /*
818 820 * init_mpi2
819 821 */
820 822 int
821 823 mrsas_issue_init_mpi2(struct mrsas_instance *instance)
822 824 {
823 825 dma_obj_t init2_dma_obj;
824 826 int ret_val = DDI_SUCCESS;
825 827
826 828 /* allocate DMA buffer for IOC INIT message */
827 829 init2_dma_obj.size = sizeof (Mpi2IOCInitRequest_t);
828 830 init2_dma_obj.dma_attr = mrsas_generic_dma_attr;
829 831 init2_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
830 832 init2_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
831 833 init2_dma_obj.dma_attr.dma_attr_sgllen = 1;
832 834 init2_dma_obj.dma_attr.dma_attr_align = 256;
833 835
834 836 if (mrsas_alloc_dma_obj(instance, &init2_dma_obj,
835 837 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
836 838 cmn_err(CE_WARN, "mr_sas_issue_init_mpi2 "
837 839 "could not allocate data transfer buffer.");
838 840 return (DDI_FAILURE);
839 841 }
840 842 (void) memset(init2_dma_obj.buffer, 2, sizeof (Mpi2IOCInitRequest_t));
841 843
842 844 con_log(CL_ANN1, (CE_NOTE,
843 845 "mrsas_issue_init_mpi2 _phys adr: %x",
844 846 init2_dma_obj.dma_cookie[0].dmac_address));
845 847
846 848
847 849 /* Initialize and send ioc init message */
848 850 ret_val = mrsas_tbolt_ioc_init(instance, &init2_dma_obj);
849 851 if (ret_val == DDI_FAILURE) {
850 852 con_log(CL_ANN1, (CE_WARN,
851 853 "mrsas_issue_init_mpi2: Failed"));
852 854 goto fail_init_mpi2;
853 855 }
854 856
855 857 /* free IOC init DMA buffer */
856 858 if (mrsas_free_dma_obj(instance, init2_dma_obj)
857 859 != DDI_SUCCESS) {
858 860 con_log(CL_ANN1, (CE_WARN,
859 861 "mrsas_issue_init_mpi2: Free Failed"));
860 862 return (DDI_FAILURE);
861 863 }
862 864
863 865 /* Get/Check and sync ld_map info */
864 866 instance->map_id = 0;
865 867 if (mrsas_tbolt_check_map_info(instance) == DDI_SUCCESS)
866 868 (void) mrsas_tbolt_sync_map_info(instance);
867 869
868 870
869 871 /* No mrsas_cmd to send, so send NULL. */
870 872 if (mrsas_common_check(instance, NULL) != DDI_SUCCESS)
871 873 goto fail_init_mpi2;
872 874
873 875 con_log(CL_ANN, (CE_NOTE,
874 876 "mrsas_issue_init_mpi2: SUCCESSFUL"));
875 877
876 878 return (DDI_SUCCESS);
877 879
878 880 fail_init_mpi2:
879 881 (void) mrsas_free_dma_obj(instance, init2_dma_obj);
880 882
881 883 return (DDI_FAILURE);
882 884 }
883 885
884 886 static int
885 887 mrsas_tbolt_ioc_init(struct mrsas_instance *instance, dma_obj_t *mpi2_dma_obj)
886 888 {
887 889 int numbytes;
888 890 uint16_t flags;
889 891 struct mrsas_init_frame2 *mfiFrameInit2;
890 892 struct mrsas_header *frame_hdr;
891 893 Mpi2IOCInitRequest_t *init;
892 894 struct mrsas_cmd *cmd = NULL;
893 895 struct mrsas_drv_ver drv_ver_info;
894 896 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
895 897
896 898 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
897 899
898 900
899 901 #ifdef DEBUG
900 902 con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
901 903 (int)sizeof (*mfiFrameInit2)));
902 904 con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n", (int)sizeof (*init)));
903 905 con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
904 906 (int)sizeof (struct mrsas_init_frame2)));
905 907 con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n",
906 908 (int)sizeof (Mpi2IOCInitRequest_t)));
907 909 #endif
908 910
909 911 init = (Mpi2IOCInitRequest_t *)mpi2_dma_obj->buffer;
910 912 numbytes = sizeof (*init);
911 913 bzero(init, numbytes);
912 914
913 915 ddi_put8(mpi2_dma_obj->acc_handle, &init->Function,
914 916 MPI2_FUNCTION_IOC_INIT);
915 917
916 918 ddi_put8(mpi2_dma_obj->acc_handle, &init->WhoInit,
917 919 MPI2_WHOINIT_HOST_DRIVER);
918 920
919 921 /* set MsgVersion and HeaderVersion host driver was built with */
920 922 ddi_put16(mpi2_dma_obj->acc_handle, &init->MsgVersion,
921 923 MPI2_VERSION);
922 924
923 925 ddi_put16(mpi2_dma_obj->acc_handle, &init->HeaderVersion,
924 926 MPI2_HEADER_VERSION);
925 927
926 928 ddi_put16(mpi2_dma_obj->acc_handle, &init->SystemRequestFrameSize,
927 929 instance->raid_io_msg_size / 4);
928 930
929 931 ddi_put16(mpi2_dma_obj->acc_handle, &init->ReplyFreeQueueDepth,
930 932 0);
931 933
932 934 ddi_put16(mpi2_dma_obj->acc_handle,
933 935 &init->ReplyDescriptorPostQueueDepth,
934 936 instance->reply_q_depth);
935 937 /*
936 938 * These addresses are set using the DMA cookie addresses from when the
937 939 * memory was allocated. Sense buffer hi address should be 0.
938 940 * ddi_put32(accessp, &init->SenseBufferAddressHigh, 0);
939 941 */
940 942
941 943 ddi_put32(mpi2_dma_obj->acc_handle,
942 944 &init->SenseBufferAddressHigh, 0);
943 945
944 946 ddi_put64(mpi2_dma_obj->acc_handle,
945 947 (uint64_t *)&init->SystemRequestFrameBaseAddress,
946 948 instance->io_request_frames_phy);
947 949
948 950 ddi_put64(mpi2_dma_obj->acc_handle,
949 951 &init->ReplyDescriptorPostQueueAddress,
950 952 instance->reply_frame_pool_phy);
951 953
952 954 ddi_put64(mpi2_dma_obj->acc_handle,
953 955 &init->ReplyFreeQueueAddress, 0);
954 956
955 957 cmd = instance->cmd_list[0];
956 958 if (cmd == NULL) {
957 959 return (DDI_FAILURE);
958 960 }
959 961 cmd->retry_count_for_ocr = 0;
960 962 cmd->pkt = NULL;
961 963 cmd->drv_pkt_time = 0;
962 964
963 965 mfiFrameInit2 = (struct mrsas_init_frame2 *)cmd->scsi_io_request;
964 966 con_log(CL_ANN1, (CE_CONT, "[mfi vaddr]%p", (void *)mfiFrameInit2));
965 967
966 968 frame_hdr = &cmd->frame->hdr;
967 969
968 970 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
969 971 MFI_CMD_STATUS_POLL_MODE);
970 972
971 973 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
972 974
973 975 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
974 976
975 977 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
976 978
977 979 con_log(CL_ANN, (CE_CONT,
978 980 "mrsas_tbolt_ioc_init: SMID:%x\n", cmd->SMID));
979 981
980 982 /* Init the MFI Header */
981 983 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
982 984 &mfiFrameInit2->cmd, MFI_CMD_OP_INIT);
983 985
984 986 con_log(CL_ANN1, (CE_CONT, "[CMD]%x", mfiFrameInit2->cmd));
985 987
986 988 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
987 989 &mfiFrameInit2->cmd_status,
988 990 MFI_STAT_INVALID_STATUS);
989 991
990 992 con_log(CL_ANN1, (CE_CONT, "[Status]%x", mfiFrameInit2->cmd_status));
991 993
992 994 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
993 995 &mfiFrameInit2->queue_info_new_phys_addr_lo,
994 996 mpi2_dma_obj->dma_cookie[0].dmac_address);
995 997
996 998 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
997 999 &mfiFrameInit2->data_xfer_len,
998 1000 sizeof (Mpi2IOCInitRequest_t));
999 1001
1000 1002 con_log(CL_ANN1, (CE_CONT, "[reply q desc addr]%x",
1001 1003 (int)init->ReplyDescriptorPostQueueAddress));
1002 1004
1003 1005 /* fill driver version information */
1004 1006 fill_up_drv_ver(&drv_ver_info);
1005 1007
1006 1008 /* allocate the driver version data transfer buffer */
1007 1009 instance->drv_ver_dma_obj.size = sizeof (drv_ver_info.drv_ver);
1008 1010 instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
1009 1011 instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1010 1012 instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1011 1013 instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
1012 1014 instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
1013 1015
1014 1016 if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
1015 1017 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
1016 1018 cmn_err(CE_WARN,
1017 1019 "fusion init: Could not allocate driver version buffer.");
1018 1020 return (DDI_FAILURE);
1019 1021 }
1020 1022 /* copy driver version to dma buffer */
1021 1023 bzero(instance->drv_ver_dma_obj.buffer, sizeof (drv_ver_info.drv_ver));
1022 1024 ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
1023 1025 (uint8_t *)drv_ver_info.drv_ver,
1024 1026 (uint8_t *)instance->drv_ver_dma_obj.buffer,
1025 1027 sizeof (drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
1026 1028
1027 1029 /* send driver version physical address to firmware */
1028 1030 ddi_put64(cmd->frame_dma_obj.acc_handle, &mfiFrameInit2->driverversion,
1029 1031 instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
1030 1032
1031 1033 con_log(CL_ANN1, (CE_CONT, "[MPIINIT2 frame Phys addr ]0x%x len = %x",
1032 1034 mfiFrameInit2->queue_info_new_phys_addr_lo,
1033 1035 (int)sizeof (Mpi2IOCInitRequest_t)));
1034 1036
1035 1037 con_log(CL_ANN1, (CE_CONT, "[Length]%x", mfiFrameInit2->data_xfer_len));
1036 1038
1037 1039 con_log(CL_ANN1, (CE_CONT, "[MFI frame Phys Address]%x len = %x",
1038 1040 cmd->scsi_io_request_phys_addr,
1039 1041 (int)sizeof (struct mrsas_init_frame2)));
1040 1042
1041 1043 /* disable interrupts before sending INIT2 frame */
1042 1044 instance->func_ptr->disable_intr(instance);
1043 1045
1044 1046 req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
1045 1047 instance->request_message_pool;
1046 1048 req_desc->Words = cmd->scsi_io_request_phys_addr;
1047 1049 req_desc->MFAIo.RequestFlags =
1048 1050 (MPI2_REQ_DESCRIPT_FLAGS_MFA << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1049 1051
1050 1052 cmd->request_desc = req_desc;
1051 1053
1052 1054 /* issue the init frame */
1053 1055 instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd);
1054 1056
1055 1057 con_log(CL_ANN1, (CE_CONT, "[cmd = %d] ", frame_hdr->cmd));
1056 1058 con_log(CL_ANN1, (CE_CONT, "[cmd Status= %x] ",
1057 1059 frame_hdr->cmd_status));
1058 1060
1059 1061 if (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1060 1062 &mfiFrameInit2->cmd_status) == 0) {
1061 1063 con_log(CL_ANN, (CE_NOTE, "INIT2 Success"));
1062 1064 } else {
1063 1065 con_log(CL_ANN, (CE_WARN, "INIT2 Fail"));
1064 1066 mrsas_dump_reply_desc(instance);
1065 1067 goto fail_ioc_init;
1066 1068 }
1067 1069
1068 1070 mrsas_dump_reply_desc(instance);
1069 1071
1070 1072 instance->unroll.verBuff = 1;
1071 1073
1072 1074 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_ioc_init: SUCCESSFUL"));
1073 1075
1074 1076 return (DDI_SUCCESS);
1075 1077
1076 1078
1077 1079 fail_ioc_init:
1078 1080
1079 1081 (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1080 1082
1081 1083 return (DDI_FAILURE);
1082 1084 }
1083 1085
1084 1086 int
1085 1087 wait_for_outstanding_poll_io(struct mrsas_instance *instance)
1086 1088 {
1087 1089 int i;
1088 1090 uint32_t wait_time = dump_io_wait_time;
1089 1091 for (i = 0; i < wait_time; i++) {
1090 1092 /*
1091 1093 * Check For Outstanding poll Commands
1092 1094 * except ldsync command and aen command
1093 1095 */
1094 1096 if (instance->fw_outstanding <= 2) {
1095 1097 break;
1096 1098 }
1097 1099 drv_usecwait(10*MILLISEC);
1098 1100 /* complete commands from reply queue */
1099 1101 (void) mr_sas_tbolt_process_outstanding_cmd(instance);
1100 1102 }
1101 1103 if (instance->fw_outstanding > 2) {
1102 1104 return (1);
1103 1105 }
1104 1106 return (0);
1105 1107 }
1106 1108 /*
1107 1109 * scsi_pkt handling
1108 1110 *
1109 1111 * Visible to the external world via the transport structure.
1110 1112 */
1111 1113
1112 1114 int
1113 1115 mrsas_tbolt_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1114 1116 {
1115 1117 struct mrsas_instance *instance = ADDR2MR(ap);
1116 1118 struct scsa_cmd *acmd = PKT2CMD(pkt);
1117 1119 struct mrsas_cmd *cmd = NULL;
1118 1120 uchar_t cmd_done = 0;
1119 1121
1120 1122 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1121 1123 if (instance->deadadapter == 1) {
1122 1124 cmn_err(CE_WARN,
1123 1125 "mrsas_tran_start:TBOLT return TRAN_FATAL_ERROR "
1124 1126 "for IO, as the HBA doesnt take any more IOs");
1125 1127 if (pkt) {
1126 1128 pkt->pkt_reason = CMD_DEV_GONE;
1127 1129 pkt->pkt_statistics = STAT_DISCON;
1128 1130 }
1129 1131 return (TRAN_FATAL_ERROR);
1130 1132 }
1131 1133 if (instance->adapterresetinprogress) {
1132 1134 con_log(CL_ANN, (CE_NOTE, "Reset flag set, "
1133 1135 "returning mfi_pkt and setting TRAN_BUSY\n"));
1134 1136 return (TRAN_BUSY);
1135 1137 }
1136 1138 (void) mrsas_tbolt_prepare_pkt(acmd);
1137 1139
1138 1140 cmd = mrsas_tbolt_build_cmd(instance, ap, pkt, &cmd_done);
1139 1141
1140 1142 /*
1141 1143 * Check if the command is already completed by the mrsas_build_cmd()
1142 1144 * routine. In which case the busy_flag would be clear and scb will be
1143 1145 * NULL and appropriate reason provided in pkt_reason field
1144 1146 */
1145 1147 if (cmd_done) {
1146 1148 pkt->pkt_reason = CMD_CMPLT;
1147 1149 pkt->pkt_scbp[0] = STATUS_GOOD;
1148 1150 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1149 1151 | STATE_SENT_CMD;
1150 1152 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1151 1153 (*pkt->pkt_comp)(pkt);
1152 1154 }
1153 1155
1154 1156 return (TRAN_ACCEPT);
1155 1157 }
1156 1158
1157 1159 if (cmd == NULL) {
1158 1160 return (TRAN_BUSY);
1159 1161 }
1160 1162
1161 1163
1162 1164 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1163 1165 if (instance->fw_outstanding > instance->max_fw_cmds) {
1164 1166 cmn_err(CE_WARN,
1165 1167 "Command Queue Full... Returning BUSY");
1166 1168 return_raid_msg_pkt(instance, cmd);
1167 1169 return (TRAN_BUSY);
1168 1170 }
1169 1171
1170 1172 /* Synchronize the Cmd frame for the controller */
1171 1173 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1172 1174 DDI_DMA_SYNC_FORDEV);
1173 1175
1174 1176 con_log(CL_ANN, (CE_CONT, "tbolt_issue_cmd: SCSI CDB[0]=0x%x "
1175 1177 "cmd->index:0x%x SMID 0x%x\n", pkt->pkt_cdbp[0],
1176 1178 cmd->index, cmd->SMID));
1177 1179
1178 1180 instance->func_ptr->issue_cmd(cmd, instance);
1179 1181 } else {
1180 1182 instance->func_ptr->issue_cmd(cmd, instance);
1181 1183 (void) wait_for_outstanding_poll_io(instance);
1182 1184 (void) mrsas_common_check(instance, cmd);
1183 1185 }
1184 1186
1185 1187 return (TRAN_ACCEPT);
1186 1188 }
1187 1189
1188 1190 /*
1189 1191 * prepare the pkt:
1190 1192 * the pkt may have been resubmitted or just reused so
1191 1193 * initialize some fields and do some checks.
1192 1194 */
1193 1195 static int
1194 1196 mrsas_tbolt_prepare_pkt(struct scsa_cmd *acmd)
1195 1197 {
1196 1198 struct scsi_pkt *pkt = CMD2PKT(acmd);
1197 1199
1198 1200
1199 1201 /*
1200 1202 * Reinitialize some fields that need it; the packet may
1201 1203 * have been resubmitted
1202 1204 */
1203 1205 pkt->pkt_reason = CMD_CMPLT;
1204 1206 pkt->pkt_state = 0;
1205 1207 pkt->pkt_statistics = 0;
1206 1208 pkt->pkt_resid = 0;
1207 1209
1208 1210 /*
1209 1211 * zero status byte.
1210 1212 */
1211 1213 *(pkt->pkt_scbp) = 0;
1212 1214
1213 1215 return (0);
1214 1216 }
1215 1217
1216 1218
1217 1219 int
1218 1220 mr_sas_tbolt_build_sgl(struct mrsas_instance *instance,
1219 1221 struct scsa_cmd *acmd,
1220 1222 struct mrsas_cmd *cmd,
1221 1223 Mpi2RaidSCSIIORequest_t *scsi_raid_io,
1222 1224 uint32_t *datalen)
1223 1225 {
1224 1226 uint32_t MaxSGEs;
1225 1227 int sg_to_process;
1226 1228 uint32_t i, j;
1227 1229 uint32_t numElements, endElement;
1228 1230 Mpi25IeeeSgeChain64_t *ieeeChainElement = NULL;
1229 1231 Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee = NULL;
1230 1232 ddi_acc_handle_t acc_handle =
1231 1233 instance->mpi2_frame_pool_dma_obj.acc_handle;
1232 1234
1233 1235 con_log(CL_ANN1, (CE_NOTE,
1234 1236 "chkpnt: Building Chained SGL :%d", __LINE__));
1235 1237
1236 1238 /* Calulate SGE size in number of Words(32bit) */
1237 1239 /* Clear the datalen before updating it. */
1238 1240 *datalen = 0;
1239 1241
1240 1242 MaxSGEs = instance->max_sge_in_main_msg;
1241 1243
1242 1244 ddi_put16(acc_handle, &scsi_raid_io->SGLFlags,
1243 1245 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
1244 1246
1245 1247 /* set data transfer flag. */
1246 1248 if (acmd->cmd_flags & CFLAG_DMASEND) {
1247 1249 ddi_put32(acc_handle, &scsi_raid_io->Control,
1248 1250 MPI2_SCSIIO_CONTROL_WRITE);
1249 1251 } else {
1250 1252 ddi_put32(acc_handle, &scsi_raid_io->Control,
1251 1253 MPI2_SCSIIO_CONTROL_READ);
1252 1254 }
1253 1255
1254 1256
1255 1257 numElements = acmd->cmd_cookiecnt;
1256 1258
1257 1259 con_log(CL_DLEVEL1, (CE_NOTE, "[SGE Count]:%x", numElements));
1258 1260
1259 1261 if (numElements > instance->max_num_sge) {
1260 1262 con_log(CL_ANN, (CE_NOTE,
1261 1263 "[Max SGE Count Exceeded]:%x", numElements));
1262 1264 return (numElements);
1263 1265 }
1264 1266
1265 1267 ddi_put8(acc_handle, &scsi_raid_io->RaidContext.numSGE,
1266 1268 (uint8_t)numElements);
1267 1269
1268 1270 /* set end element in main message frame */
1269 1271 endElement = (numElements <= MaxSGEs) ? numElements : (MaxSGEs - 1);
1270 1272
1271 1273 /* prepare the scatter-gather list for the firmware */
1272 1274 scsi_raid_io_sgl_ieee =
1273 1275 (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
1274 1276
1275 1277 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1276 1278 Mpi25IeeeSgeChain64_t *sgl_ptr_end = scsi_raid_io_sgl_ieee;
1277 1279 sgl_ptr_end += instance->max_sge_in_main_msg - 1;
1278 1280
1279 1281 ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
1280 1282 }
1281 1283
1282 1284 for (i = 0; i < endElement; i++, scsi_raid_io_sgl_ieee++) {
1283 1285 ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1284 1286 acmd->cmd_dmacookies[i].dmac_laddress);
1285 1287
1286 1288 ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1287 1289 acmd->cmd_dmacookies[i].dmac_size);
1288 1290
1289 1291 ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1290 1292
1291 1293 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1292 1294 if (i == (numElements - 1)) {
1293 1295 ddi_put8(acc_handle,
1294 1296 &scsi_raid_io_sgl_ieee->Flags,
1295 1297 IEEE_SGE_FLAGS_END_OF_LIST);
1296 1298 }
1297 1299 }
1298 1300
1299 1301 *datalen += acmd->cmd_dmacookies[i].dmac_size;
1300 1302
1301 1303 #ifdef DEBUG
1302 1304 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Address]: %" PRIx64,
1303 1305 scsi_raid_io_sgl_ieee->Address));
1304 1306 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Length]:%x",
1305 1307 scsi_raid_io_sgl_ieee->Length));
1306 1308 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Flags]:%x",
1307 1309 scsi_raid_io_sgl_ieee->Flags));
1308 1310 #endif
1309 1311
1310 1312 }
1311 1313
1312 1314 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset, 0);
1313 1315
1314 1316 /* check if chained SGL required */
1315 1317 if (i < numElements) {
1316 1318
1317 1319 con_log(CL_ANN1, (CE_NOTE, "[Chain Element index]:%x", i));
1318 1320
1319 1321 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1320 1322 uint16_t ioFlags =
1321 1323 ddi_get16(acc_handle, &scsi_raid_io->IoFlags);
1322 1324
1323 1325 if ((ioFlags &
1324 1326 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
1325 1327 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) {
1326 1328 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1327 1329 (U8)instance->chain_offset_io_req);
1328 1330 } else {
1329 1331 ddi_put8(acc_handle,
1330 1332 &scsi_raid_io->ChainOffset, 0);
1331 1333 }
1332 1334 } else {
1333 1335 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1334 1336 (U8)instance->chain_offset_io_req);
1335 1337 }
1336 1338
1337 1339 /* prepare physical chain element */
1338 1340 ieeeChainElement = scsi_raid_io_sgl_ieee;
1339 1341
1340 1342 ddi_put8(acc_handle, &ieeeChainElement->NextChainOffset, 0);
1341 1343
1342 1344 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1343 1345 ddi_put8(acc_handle, &ieeeChainElement->Flags,
1344 1346 IEEE_SGE_FLAGS_CHAIN_ELEMENT);
1345 1347 } else {
1346 1348 ddi_put8(acc_handle, &ieeeChainElement->Flags,
1347 1349 (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1348 1350 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
1349 1351 }
1350 1352
1351 1353 ddi_put32(acc_handle, &ieeeChainElement->Length,
1352 1354 (sizeof (MPI2_SGE_IO_UNION) * (numElements - i)));
1353 1355
1354 1356 ddi_put64(acc_handle, &ieeeChainElement->Address,
1355 1357 (U64)cmd->sgl_phys_addr);
1356 1358
1357 1359 sg_to_process = numElements - i;
1358 1360
1359 1361 con_log(CL_ANN1, (CE_NOTE,
1360 1362 "[Additional SGE Count]:%x", endElement));
1361 1363
1362 1364 /* point to the chained SGL buffer */
1363 1365 scsi_raid_io_sgl_ieee = (Mpi25IeeeSgeChain64_t *)cmd->sgl;
1364 1366
1365 1367 /* build rest of the SGL in chained buffer */
1366 1368 for (j = 0; j < sg_to_process; j++, scsi_raid_io_sgl_ieee++) {
1367 1369 con_log(CL_DLEVEL3, (CE_NOTE, "[remaining SGL]:%x", i));
1368 1370
1369 1371 ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1370 1372 acmd->cmd_dmacookies[i].dmac_laddress);
1371 1373
1372 1374 ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1373 1375 acmd->cmd_dmacookies[i].dmac_size);
1374 1376
1375 1377 ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1376 1378
1377 1379 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1378 1380 if (i == (numElements - 1)) {
1379 1381 ddi_put8(acc_handle,
1380 1382 &scsi_raid_io_sgl_ieee->Flags,
1381 1383 IEEE_SGE_FLAGS_END_OF_LIST);
1382 1384 }
1383 1385 }
1384 1386
1385 1387 *datalen += acmd->cmd_dmacookies[i].dmac_size;
1386 1388
1387 1389 #if DEBUG
1388 1390 con_log(CL_DLEVEL1, (CE_NOTE,
1389 1391 "[SGL Address]: %" PRIx64,
1390 1392 scsi_raid_io_sgl_ieee->Address));
1391 1393 con_log(CL_DLEVEL1, (CE_NOTE,
1392 1394 "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
1393 1395 con_log(CL_DLEVEL1, (CE_NOTE,
1394 1396 "[SGL Flags]:%x", scsi_raid_io_sgl_ieee->Flags));
1395 1397 #endif
1396 1398
1397 1399 i++;
1398 1400 }
1399 1401 }
1400 1402
1401 1403 return (0);
1402 1404 } /*end of BuildScatterGather */
1403 1405
1404 1406
1405 1407 /*
1406 1408 * build_cmd
1407 1409 */
1408 1410 static struct mrsas_cmd *
1409 1411 mrsas_tbolt_build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
1410 1412 struct scsi_pkt *pkt, uchar_t *cmd_done)
1411 1413 {
1412 1414 uint8_t fp_possible = 0;
1413 1415 uint32_t index;
1414 1416 uint32_t lba_count = 0;
1415 1417 uint32_t start_lba_hi = 0;
1416 1418 uint32_t start_lba_lo = 0;
1417 1419 ddi_acc_handle_t acc_handle =
1418 1420 instance->mpi2_frame_pool_dma_obj.acc_handle;
1419 1421 struct mrsas_cmd *cmd = NULL;
1420 1422 struct scsa_cmd *acmd = PKT2CMD(pkt);
1421 1423 MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
1422 1424 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
1423 1425 uint32_t datalen;
1424 1426 struct IO_REQUEST_INFO io_info;
1425 1427 MR_FW_RAID_MAP_ALL *local_map_ptr;
1426 1428 uint16_t pd_cmd_cdblen;
1427 1429
1428 1430 con_log(CL_DLEVEL1, (CE_NOTE,
1429 1431 "chkpnt: Entered mrsas_tbolt_build_cmd:%d", __LINE__));
1430 1432
1431 1433 /* find out if this is logical or physical drive command. */
1432 1434 acmd->islogical = MRDRV_IS_LOGICAL(ap);
1433 1435 acmd->device_id = MAP_DEVICE_ID(instance, ap);
1434 1436
1435 1437 *cmd_done = 0;
1436 1438
1437 1439 /* get the command packet */
1438 1440 if (!(cmd = get_raid_msg_pkt(instance))) {
1439 1441 return (NULL);
1440 1442 }
1441 1443
1442 1444 index = cmd->index;
1443 1445 ReqDescUnion = mr_sas_get_request_descriptor(instance, index);
1444 1446 ReqDescUnion->Words = 0;
1445 1447 ReqDescUnion->SCSIIO.SMID = cmd->SMID;
1446 1448 ReqDescUnion->SCSIIO.RequestFlags =
1447 1449 (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1448 1450 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1449 1451
1450 1452
1451 1453 cmd->request_desc = ReqDescUnion;
1452 1454 cmd->pkt = pkt;
1453 1455 cmd->cmd = acmd;
1454 1456
1455 1457 /* lets get the command directions */
1456 1458 if (acmd->cmd_flags & CFLAG_DMASEND) {
1457 1459 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1458 1460 (void) ddi_dma_sync(acmd->cmd_dmahandle,
1459 1461 acmd->cmd_dma_offset, acmd->cmd_dma_len,
1460 1462 DDI_DMA_SYNC_FORDEV);
1461 1463 }
1462 1464 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
1463 1465 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1464 1466 (void) ddi_dma_sync(acmd->cmd_dmahandle,
1465 1467 acmd->cmd_dma_offset, acmd->cmd_dma_len,
1466 1468 DDI_DMA_SYNC_FORCPU);
1467 1469 }
1468 1470 } else {
1469 1471 con_log(CL_ANN, (CE_NOTE, "NO DMA"));
1470 1472 }
1471 1473
1472 1474
1473 1475 /* get SCSI_IO raid message frame pointer */
1474 1476 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
1475 1477
1476 1478 /* zero out SCSI_IO raid message frame */
1477 1479 bzero(scsi_raid_io, sizeof (Mpi2RaidSCSIIORequest_t));
1478 1480
1479 1481 /* Set the ldTargetId set by BuildRaidContext() */
1480 1482 ddi_put16(acc_handle, &scsi_raid_io->RaidContext.ldTargetId,
1481 1483 acmd->device_id);
1482 1484
1483 1485 /* Copy CDB to scsi_io_request message frame */
1484 1486 ddi_rep_put8(acc_handle,
1485 1487 (uint8_t *)pkt->pkt_cdbp, (uint8_t *)scsi_raid_io->CDB.CDB32,
1486 1488 acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
1487 1489
1488 1490 /*
1489 1491 * Just the CDB length, rest of the Flags are zero
1490 1492 * This will be modified later.
1491 1493 */
1492 1494 ddi_put16(acc_handle, &scsi_raid_io->IoFlags, acmd->cmd_cdblen);
1493 1495
1494 1496 pd_cmd_cdblen = acmd->cmd_cdblen;
1495 1497
1496 1498 switch (pkt->pkt_cdbp[0]) {
1497 1499 case SCMD_READ:
1498 1500 case SCMD_WRITE:
1499 1501 case SCMD_READ_G1:
1500 1502 case SCMD_WRITE_G1:
1501 1503 case SCMD_READ_G4:
1502 1504 case SCMD_WRITE_G4:
1503 1505 case SCMD_READ_G5:
1504 1506 case SCMD_WRITE_G5:
1505 1507
1506 1508 if (acmd->islogical) {
1507 1509 /* Initialize sense Information */
1508 1510 if (cmd->sense1 == NULL) {
1509 1511 con_log(CL_ANN, (CE_NOTE, "tbolt_build_cmd: "
1510 1512 "Sense buffer ptr NULL "));
1511 1513 }
1512 1514 bzero(cmd->sense1, SENSE_LENGTH);
1513 1515 con_log(CL_DLEVEL2, (CE_NOTE, "tbolt_build_cmd "
1514 1516 "CDB[0] = %x\n", pkt->pkt_cdbp[0]));
1515 1517
1516 1518 if (acmd->cmd_cdblen == CDB_GROUP0) {
1517 1519 /* 6-byte cdb */
1518 1520 lba_count = (uint16_t)(pkt->pkt_cdbp[4]);
1519 1521 start_lba_lo = ((uint32_t)(pkt->pkt_cdbp[3]) |
1520 1522 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
1521 1523 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
1522 1524 << 16));
1523 1525 } else if (acmd->cmd_cdblen == CDB_GROUP1) {
1524 1526 /* 10-byte cdb */
1525 1527 lba_count =
1526 1528 (((uint16_t)(pkt->pkt_cdbp[8])) |
1527 1529 ((uint16_t)(pkt->pkt_cdbp[7]) << 8));
1528 1530
1529 1531 start_lba_lo =
1530 1532 (((uint32_t)(pkt->pkt_cdbp[5])) |
1531 1533 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1532 1534 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1533 1535 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1534 1536
1535 1537 } else if (acmd->cmd_cdblen == CDB_GROUP5) {
1536 1538 /* 12-byte cdb */
1537 1539 lba_count = (
1538 1540 ((uint32_t)(pkt->pkt_cdbp[9])) |
1539 1541 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1540 1542 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1541 1543 ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1542 1544
1543 1545 start_lba_lo =
1544 1546 (((uint32_t)(pkt->pkt_cdbp[5])) |
1545 1547 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1546 1548 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1547 1549 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1548 1550
1549 1551 } else if (acmd->cmd_cdblen == CDB_GROUP4) {
1550 1552 /* 16-byte cdb */
1551 1553 lba_count = (
1552 1554 ((uint32_t)(pkt->pkt_cdbp[13])) |
1553 1555 ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
1554 1556 ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
1555 1557 ((uint32_t)(pkt->pkt_cdbp[10]) << 24));
1556 1558
1557 1559 start_lba_lo = (
1558 1560 ((uint32_t)(pkt->pkt_cdbp[9])) |
1559 1561 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1560 1562 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1561 1563 ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1562 1564
1563 1565 start_lba_hi = (
1564 1566 ((uint32_t)(pkt->pkt_cdbp[5])) |
1565 1567 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1566 1568 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1567 1569 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1568 1570 }
1569 1571
1570 1572 if (instance->tbolt &&
1571 1573 ((lba_count * 512) > mrsas_tbolt_max_cap_maxxfer)) {
1572 1574 cmn_err(CE_WARN, " IO SECTOR COUNT exceeds "
1573 1575 "controller limit 0x%x sectors",
1574 1576 lba_count);
1575 1577 }
1576 1578
1577 1579 bzero(&io_info, sizeof (struct IO_REQUEST_INFO));
1578 1580 io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) |
1579 1581 start_lba_lo;
1580 1582 io_info.numBlocks = lba_count;
1581 1583 io_info.ldTgtId = acmd->device_id;
1582 1584
1583 1585 if (acmd->cmd_flags & CFLAG_DMASEND)
1584 1586 io_info.isRead = 0;
1585 1587 else
1586 1588 io_info.isRead = 1;
1587 1589
1588 1590
1589 1591 /* Acquire SYNC MAP UPDATE lock */
1590 1592 mutex_enter(&instance->sync_map_mtx);
1591 1593
1592 1594 local_map_ptr =
1593 1595 instance->ld_map[(instance->map_id & 1)];
1594 1596
1595 1597 if ((MR_TargetIdToLdGet(
1596 1598 acmd->device_id, local_map_ptr) >=
1597 1599 MAX_LOGICAL_DRIVES) || !instance->fast_path_io) {
1598 1600 cmn_err(CE_NOTE, "Fast Path NOT Possible, "
1599 1601 "targetId >= MAX_LOGICAL_DRIVES || "
1600 1602 "!instance->fast_path_io");
1601 1603 fp_possible = 0;
1602 1604 /* Set Regionlock flags to BYPASS */
1603 1605 /* io_request->RaidContext.regLockFlags = 0; */
1604 1606 ddi_put8(acc_handle,
1605 1607 &scsi_raid_io->RaidContext.regLockFlags, 0);
1606 1608 } else {
1607 1609 if (MR_BuildRaidContext(instance, &io_info,
1608 1610 &scsi_raid_io->RaidContext, local_map_ptr))
1609 1611 fp_possible = io_info.fpOkForIo;
1610 1612 }
1611 1613
1612 1614 if (!enable_fp)
1613 1615 fp_possible = 0;
1614 1616
1615 1617 con_log(CL_ANN1, (CE_NOTE, "enable_fp %d "
1616 1618 "instance->fast_path_io %d fp_possible %d",
1617 1619 enable_fp, instance->fast_path_io, fp_possible));
1618 1620
1619 1621 if (fp_possible) {
1620 1622
1621 1623 /* Check for DIF enabled LD */
1622 1624 if (MR_CheckDIF(acmd->device_id, local_map_ptr)) {
1623 1625 /* Prepare 32 Byte CDB for DIF capable Disk */
1624 1626 mrsas_tbolt_prepare_cdb(instance,
1625 1627 scsi_raid_io->CDB.CDB32,
1626 1628 &io_info, scsi_raid_io, start_lba_lo);
1627 1629 } else {
1628 1630 mrsas_tbolt_set_pd_lba(scsi_raid_io->CDB.CDB32,
1629 1631 (uint8_t *)&pd_cmd_cdblen,
1630 1632 io_info.pdBlock, io_info.numBlocks);
1631 1633 ddi_put16(acc_handle,
1632 1634 &scsi_raid_io->IoFlags, pd_cmd_cdblen);
1633 1635 }
1634 1636
1635 1637 ddi_put8(acc_handle, &scsi_raid_io->Function,
1636 1638 MPI2_FUNCTION_SCSI_IO_REQUEST);
1637 1639
1638 1640 ReqDescUnion->SCSIIO.RequestFlags =
1639 1641 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1640 1642 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1641 1643
1642 1644 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1643 1645 uint8_t regLockFlags = ddi_get8(acc_handle,
1644 1646 &scsi_raid_io->RaidContext.regLockFlags);
1645 1647 uint16_t IoFlags = ddi_get16(acc_handle,
1646 1648 &scsi_raid_io->IoFlags);
1647 1649
1648 1650 if (regLockFlags == REGION_TYPE_UNUSED)
1649 1651 ReqDescUnion->SCSIIO.RequestFlags =
1650 1652 (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1651 1653 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1652 1654
1653 1655 IoFlags |=
1654 1656 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1655 1657 regLockFlags |=
1656 1658 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
1657 1659 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1658 1660
1659 1661 ddi_put8(acc_handle,
1660 1662 &scsi_raid_io->ChainOffset, 0);
1661 1663 ddi_put8(acc_handle,
1662 1664 &scsi_raid_io->RaidContext.nsegType,
1663 1665 ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1664 1666 MPI2_TYPE_CUDA));
1665 1667 ddi_put8(acc_handle,
1666 1668 &scsi_raid_io->RaidContext.regLockFlags,
1667 1669 regLockFlags);
1668 1670 ddi_put16(acc_handle,
1669 1671 &scsi_raid_io->IoFlags, IoFlags);
1670 1672 }
1671 1673
1672 1674 if ((instance->load_balance_info[
1673 1675 acmd->device_id].loadBalanceFlag) &&
1674 1676 (io_info.isRead)) {
1675 1677 io_info.devHandle =
1676 1678 get_updated_dev_handle(&instance->
1677 1679 load_balance_info[acmd->device_id],
1678 1680 &io_info);
1679 1681 cmd->load_balance_flag |=
1680 1682 MEGASAS_LOAD_BALANCE_FLAG;
1681 1683 } else {
1682 1684 cmd->load_balance_flag &=
1683 1685 ~MEGASAS_LOAD_BALANCE_FLAG;
1684 1686 }
1685 1687
1686 1688 ReqDescUnion->SCSIIO.DevHandle = io_info.devHandle;
1687 1689 ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1688 1690 io_info.devHandle);
1689 1691
1690 1692 } else {
1691 1693 ddi_put8(acc_handle, &scsi_raid_io->Function,
1692 1694 MPI2_FUNCTION_LD_IO_REQUEST);
1693 1695
1694 1696 ddi_put16(acc_handle,
1695 1697 &scsi_raid_io->DevHandle, acmd->device_id);
1696 1698
1697 1699 ReqDescUnion->SCSIIO.RequestFlags =
1698 1700 (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1699 1701 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1700 1702
1701 1703 ddi_put16(acc_handle,
1702 1704 &scsi_raid_io->RaidContext.timeoutValue,
1703 1705 local_map_ptr->raidMap.fpPdIoTimeoutSec);
1704 1706
1705 1707 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1706 1708 uint8_t regLockFlags = ddi_get8(acc_handle,
1707 1709 &scsi_raid_io->RaidContext.regLockFlags);
1708 1710
1709 1711 if (regLockFlags == REGION_TYPE_UNUSED) {
1710 1712 ReqDescUnion->SCSIIO.RequestFlags =
1711 1713 (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1712 1714 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1713 1715 }
1714 1716
1715 1717 regLockFlags |=
1716 1718 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
1717 1719 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1718 1720
1719 1721 ddi_put8(acc_handle,
1720 1722 &scsi_raid_io->RaidContext.nsegType,
1721 1723 ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1722 1724 MPI2_TYPE_CUDA));
1723 1725 ddi_put8(acc_handle,
1724 1726 &scsi_raid_io->RaidContext.regLockFlags,
1725 1727 regLockFlags);
1726 1728 }
1727 1729 } /* Not FP */
1728 1730
1729 1731 /* Release SYNC MAP UPDATE lock */
1730 1732 mutex_exit(&instance->sync_map_mtx);
1731 1733
1732 1734
1733 1735 /*
1734 1736 * Set sense buffer physical address/length in scsi_io_request.
1735 1737 */
1736 1738 ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
1737 1739 cmd->sense_phys_addr1);
1738 1740 ddi_put8(acc_handle, &scsi_raid_io->SenseBufferLength,
1739 1741 SENSE_LENGTH);
1740 1742
1741 1743 /* Construct SGL */
1742 1744 ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
1743 1745 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1744 1746
1745 1747 (void) mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1746 1748 scsi_raid_io, &datalen);
1747 1749
1748 1750 ddi_put32(acc_handle, &scsi_raid_io->DataLength, datalen);
1749 1751
1750 1752 break;
1751 1753 #ifndef PDSUPPORT /* if PDSUPPORT, skip break and fall through */
1752 1754 } else {
1753 1755 break;
1754 1756 #endif
1755 1757 }
1756 1758 /* fall through For all non-rd/wr cmds */
1757 1759 default:
1758 1760 switch (pkt->pkt_cdbp[0]) {
1759 1761 case 0x35: { /* SCMD_SYNCHRONIZE_CACHE */
1760 1762 return_raid_msg_pkt(instance, cmd);
1761 1763 *cmd_done = 1;
1762 1764 return (NULL);
1763 1765 }
1764 1766
1765 1767 case SCMD_MODE_SENSE:
1766 1768 case SCMD_MODE_SENSE_G1: {
1767 1769 union scsi_cdb *cdbp;
1768 1770 uint16_t page_code;
1769 1771
1770 1772 cdbp = (void *)pkt->pkt_cdbp;
1771 1773 page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
1772 1774 switch (page_code) {
1773 1775 case 0x3:
1774 1776 case 0x4:
1775 1777 (void) mrsas_mode_sense_build(pkt);
1776 1778 return_raid_msg_pkt(instance, cmd);
1777 1779 *cmd_done = 1;
1778 1780 return (NULL);
1779 1781 }
1780 1782 break;
1781 1783 }
1782 1784
1783 1785 default: {
1784 1786 /*
1785 1787 * Here we need to handle PASSTHRU for
1786 1788 * Logical Devices. Like Inquiry etc.
1787 1789 */
1788 1790
1789 1791 if (!(acmd->islogical)) {
1790 1792
1791 1793 /* Acquire SYNC MAP UPDATE lock */
1792 1794 mutex_enter(&instance->sync_map_mtx);
1793 1795
1794 1796 local_map_ptr =
1795 1797 instance->ld_map[(instance->map_id & 1)];
1796 1798
1797 1799 ddi_put8(acc_handle, &scsi_raid_io->Function,
1798 1800 MPI2_FUNCTION_SCSI_IO_REQUEST);
1799 1801
1800 1802 ReqDescUnion->SCSIIO.RequestFlags =
1801 1803 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1802 1804 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1803 1805
1804 1806 ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1805 1807 local_map_ptr->raidMap.
1806 1808 devHndlInfo[acmd->device_id].curDevHdl);
1807 1809
1808 1810
1809 1811 /* Set regLockFlasgs to REGION_TYPE_BYPASS */
1810 1812 ddi_put8(acc_handle,
1811 1813 &scsi_raid_io->RaidContext.regLockFlags, 0);
1812 1814 ddi_put64(acc_handle,
1813 1815 &scsi_raid_io->RaidContext.regLockRowLBA,
1814 1816 0);
1815 1817 ddi_put32(acc_handle,
1816 1818 &scsi_raid_io->RaidContext.regLockLength,
1817 1819 0);
1818 1820 ddi_put8(acc_handle,
1819 1821 &scsi_raid_io->RaidContext.RAIDFlags,
1820 1822 MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD <<
1821 1823 MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
1822 1824 ddi_put16(acc_handle,
1823 1825 &scsi_raid_io->RaidContext.timeoutValue,
1824 1826 local_map_ptr->raidMap.fpPdIoTimeoutSec);
1825 1827 ddi_put16(acc_handle,
1826 1828 &scsi_raid_io->RaidContext.ldTargetId,
1827 1829 acmd->device_id);
1828 1830 ddi_put8(acc_handle,
1829 1831 &scsi_raid_io->LUN[1], acmd->lun);
1830 1832
1831 1833 /* Release SYNC MAP UPDATE lock */
1832 1834 mutex_exit(&instance->sync_map_mtx);
1833 1835
1834 1836 } else {
1835 1837 ddi_put8(acc_handle, &scsi_raid_io->Function,
1836 1838 MPI2_FUNCTION_LD_IO_REQUEST);
1837 1839 ddi_put8(acc_handle,
1838 1840 &scsi_raid_io->LUN[1], acmd->lun);
1839 1841 ddi_put16(acc_handle,
1840 1842 &scsi_raid_io->DevHandle, acmd->device_id);
1841 1843 ReqDescUnion->SCSIIO.RequestFlags =
1842 1844 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1843 1845 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1844 1846 }
1845 1847
1846 1848 /*
1847 1849 * Set sense buffer physical address/length in
1848 1850 * scsi_io_request.
1849 1851 */
1850 1852 ddi_put32(acc_handle,
1851 1853 &scsi_raid_io->SenseBufferLowAddress,
1852 1854 cmd->sense_phys_addr1);
1853 1855 ddi_put8(acc_handle,
1854 1856 &scsi_raid_io->SenseBufferLength, SENSE_LENGTH);
1855 1857
1856 1858 /* Construct SGL */
1857 1859 ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
1858 1860 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1859 1861
1860 1862 (void) mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1861 1863 scsi_raid_io, &datalen);
1862 1864
1863 1865 ddi_put32(acc_handle,
1864 1866 &scsi_raid_io->DataLength, datalen);
1865 1867
1866 1868
1867 1869 con_log(CL_ANN, (CE_CONT,
1868 1870 "tbolt_build_cmd CDB[0] =%x, TargetID =%x\n",
1869 1871 pkt->pkt_cdbp[0], acmd->device_id));
1870 1872 con_log(CL_DLEVEL1, (CE_CONT,
1871 1873 "data length = %x\n",
1872 1874 scsi_raid_io->DataLength));
1873 1875 con_log(CL_DLEVEL1, (CE_CONT,
1874 1876 "cdb length = %x\n",
|
↓ open down ↓ |
1814 lines elided |
↑ open up ↑ |
1875 1877 acmd->cmd_cdblen));
1876 1878 }
1877 1879 break;
1878 1880 }
1879 1881
1880 1882 }
1881 1883
1882 1884 return (cmd);
1883 1885 }
1884 1886
1885 -/*
1886 - * mrsas_tbolt_tran_init_pkt - allocate & initialize a scsi_pkt structure
1887 - * @ap:
1888 - * @pkt:
1889 - * @bp:
1890 - * @cmdlen:
1891 - * @statuslen:
1892 - * @tgtlen:
1893 - * @flags:
1894 - * @callback:
1895 - *
1896 - * The tran_init_pkt() entry point allocates and initializes a scsi_pkt
1897 - * structure and DMA resources for a target driver request. The
1898 - * tran_init_pkt() entry point is called when the target driver calls the
1899 - * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point
1900 - * is a request to perform one or more of three possible services:
1901 - * - allocation and initialization of a scsi_pkt structure
1902 - * - allocation of DMA resources for data transfer
1903 - * - reallocation of DMA resources for the next portion of the data transfer
1904 - */
1905 -struct scsi_pkt *
1906 -mrsas_tbolt_tran_init_pkt(struct scsi_address *ap,
1907 - register struct scsi_pkt *pkt,
1908 - struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1909 - int flags, int (*callback)(), caddr_t arg)
1910 -{
1911 - struct scsa_cmd *acmd;
1912 - struct mrsas_instance *instance;
1913 - struct scsi_pkt *new_pkt;
1914 -
1915 - instance = ADDR2MR(ap);
1916 -
1917 - /* step #1 : pkt allocation */
1918 - if (pkt == NULL) {
1919 - pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen,
1920 - tgtlen, sizeof (struct scsa_cmd), callback, arg);
1921 - if (pkt == NULL) {
1922 - return (NULL);
1923 - }
1924 -
1925 - acmd = PKT2CMD(pkt);
1926 -
1927 - /*
1928 - * Initialize the new pkt - we redundantly initialize
1929 - * all the fields for illustrative purposes.
1930 - */
1931 - acmd->cmd_pkt = pkt;
1932 - acmd->cmd_flags = 0;
1933 - acmd->cmd_scblen = statuslen;
1934 - acmd->cmd_cdblen = cmdlen;
1935 - acmd->cmd_dmahandle = NULL;
1936 - acmd->cmd_ncookies = 0;
1937 - acmd->cmd_cookie = 0;
1938 - acmd->cmd_cookiecnt = 0;
1939 - acmd->cmd_nwin = 0;
1940 -
1941 - pkt->pkt_address = *ap;
1942 - pkt->pkt_comp = (void (*)())NULL;
1943 - pkt->pkt_flags = 0;
1944 - pkt->pkt_time = 0;
1945 - pkt->pkt_resid = 0;
1946 - pkt->pkt_state = 0;
1947 - pkt->pkt_statistics = 0;
1948 - pkt->pkt_reason = 0;
1949 - new_pkt = pkt;
1950 - } else {
1951 - acmd = PKT2CMD(pkt);
1952 - new_pkt = NULL;
1953 - }
1954 -
1955 - /* step #2 : dma allocation/move */
1956 - if (bp && bp->b_bcount != 0) {
1957 - if (acmd->cmd_dmahandle == NULL) {
1958 - if (mrsas_dma_alloc(instance, pkt, bp, flags,
1959 - callback) == DDI_FAILURE) {
1960 - if (new_pkt) {
1961 - scsi_hba_pkt_free(ap, new_pkt);
1962 - }
1963 - return ((struct scsi_pkt *)NULL);
1964 - }
1965 - } else {
1966 - if (mrsas_dma_move(instance, pkt, bp) == DDI_FAILURE) {
1967 - return ((struct scsi_pkt *)NULL);
1968 - }
1969 - }
1970 - }
1971 - return (pkt);
1972 -}
1973 -
1974 -
1975 1887 uint32_t
1976 1888 tbolt_read_fw_status_reg(struct mrsas_instance *instance)
1977 1889 {
1978 1890 return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
1979 1891 }
1980 1892
1981 1893 void
1982 1894 tbolt_issue_cmd(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
1983 1895 {
1984 1896 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1985 1897 atomic_add_16(&instance->fw_outstanding, 1);
1986 1898
1987 1899 struct scsi_pkt *pkt;
1988 1900
1989 1901 con_log(CL_ANN1,
1990 1902 (CE_NOTE, "tbolt_issue_cmd: cmd->[SMID]=0x%X", cmd->SMID));
1991 1903
1992 1904 con_log(CL_DLEVEL1, (CE_CONT,
1993 1905 " [req desc Words] %" PRIx64 " \n", req_desc->Words));
1994 1906 con_log(CL_DLEVEL1, (CE_CONT,
1995 1907 " [req desc low part] %x \n",
1996 1908 (uint_t)(req_desc->Words & 0xffffffffff)));
1997 1909 con_log(CL_DLEVEL1, (CE_CONT,
1998 1910 " [req desc high part] %x \n", (uint_t)(req_desc->Words >> 32)));
1999 1911 pkt = cmd->pkt;
2000 1912
2001 1913 if (pkt) {
2002 1914 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
2003 1915 "ISSUED CMD TO FW : called : cmd:"
2004 1916 ": %p instance : %p pkt : %p pkt_time : %x\n",
2005 1917 gethrtime(), (void *)cmd, (void *)instance,
2006 1918 (void *)pkt, cmd->drv_pkt_time));
2007 1919 if (instance->adapterresetinprogress) {
2008 1920 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2009 1921 con_log(CL_ANN, (CE_NOTE,
2010 1922 "TBOLT Reset the scsi_pkt timer"));
2011 1923 } else {
2012 1924 push_pending_mfi_pkt(instance, cmd);
2013 1925 }
2014 1926
2015 1927 } else {
2016 1928 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
2017 1929 "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
2018 1930 "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
2019 1931 }
2020 1932
2021 1933 /* Issue the command to the FW */
2022 1934 mutex_enter(&instance->reg_write_mtx);
2023 1935 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2024 1936 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2025 1937 mutex_exit(&instance->reg_write_mtx);
2026 1938 }
2027 1939
2028 1940 /*
2029 1941 * issue_cmd_in_sync_mode
2030 1942 */
2031 1943 int
2032 1944 tbolt_issue_cmd_in_sync_mode(struct mrsas_instance *instance,
2033 1945 struct mrsas_cmd *cmd)
2034 1946 {
2035 1947 int i;
2036 1948 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
2037 1949 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
2038 1950
2039 1951 struct mrsas_header *hdr;
2040 1952 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2041 1953
2042 1954 con_log(CL_ANN,
2043 1955 (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: cmd->[SMID]=0x%X",
2044 1956 cmd->SMID));
2045 1957
2046 1958
2047 1959 if (instance->adapterresetinprogress) {
2048 1960 cmd->drv_pkt_time = ddi_get16
2049 1961 (cmd->frame_dma_obj.acc_handle, &hdr->timeout);
2050 1962 if (cmd->drv_pkt_time < debug_timeout_g)
2051 1963 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2052 1964 con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode:"
2053 1965 "RESET-IN-PROGRESS, issue cmd & return."));
2054 1966
2055 1967 mutex_enter(&instance->reg_write_mtx);
2056 1968 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2057 1969 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2058 1970 mutex_exit(&instance->reg_write_mtx);
2059 1971
2060 1972 return (DDI_SUCCESS);
2061 1973 } else {
2062 1974 con_log(CL_ANN1, (CE_NOTE,
2063 1975 "tbolt_issue_cmd_in_sync_mode: pushing the pkt"));
2064 1976 push_pending_mfi_pkt(instance, cmd);
2065 1977 }
2066 1978
2067 1979 con_log(CL_DLEVEL2, (CE_NOTE,
2068 1980 "HighQport offset :%p",
2069 1981 (void *)((uintptr_t)(instance)->regmap + IB_HIGH_QPORT)));
2070 1982 con_log(CL_DLEVEL2, (CE_NOTE,
2071 1983 "LowQport offset :%p",
2072 1984 (void *)((uintptr_t)(instance)->regmap + IB_LOW_QPORT)));
2073 1985
2074 1986 cmd->sync_cmd = MRSAS_TRUE;
2075 1987 cmd->cmd_status = ENODATA;
2076 1988
2077 1989
2078 1990 mutex_enter(&instance->reg_write_mtx);
2079 1991 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2080 1992 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2081 1993 mutex_exit(&instance->reg_write_mtx);
2082 1994
2083 1995 con_log(CL_ANN1, (CE_NOTE,
2084 1996 " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
2085 1997 con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
2086 1998 (uint_t)(req_desc->Words & 0xffffffff)));
2087 1999
2088 2000 mutex_enter(&instance->int_cmd_mtx);
2089 2001 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
2090 2002 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
2091 2003 }
2092 2004 mutex_exit(&instance->int_cmd_mtx);
2093 2005
2094 2006
2095 2007 if (i < (msecs -1)) {
2096 2008 return (DDI_SUCCESS);
2097 2009 } else {
2098 2010 return (DDI_FAILURE);
2099 2011 }
2100 2012 }
2101 2013
2102 2014 /*
2103 2015 * issue_cmd_in_poll_mode
2104 2016 */
2105 2017 int
2106 2018 tbolt_issue_cmd_in_poll_mode(struct mrsas_instance *instance,
2107 2019 struct mrsas_cmd *cmd)
2108 2020 {
2109 2021 int i;
2110 2022 uint16_t flags;
2111 2023 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
2112 2024 struct mrsas_header *frame_hdr;
2113 2025
2114 2026 con_log(CL_ANN,
2115 2027 (CE_NOTE, "tbolt_issue_cmd_in_poll_mode: cmd->[SMID]=0x%X",
2116 2028 cmd->SMID));
2117 2029
2118 2030 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
2119 2031
2120 2032 frame_hdr = (struct mrsas_header *)&cmd->frame->hdr;
2121 2033 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
2122 2034 MFI_CMD_STATUS_POLL_MODE);
2123 2035 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
2124 2036 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2125 2037 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
2126 2038
2127 2039 con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
2128 2040 (uint_t)(req_desc->Words & 0xffffffff)));
2129 2041 con_log(CL_ANN1, (CE_NOTE,
2130 2042 " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
2131 2043
2132 2044 /* issue the frame using inbound queue port */
2133 2045 mutex_enter(&instance->reg_write_mtx);
2134 2046 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2135 2047 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2136 2048 mutex_exit(&instance->reg_write_mtx);
2137 2049
2138 2050 for (i = 0; i < msecs && (
2139 2051 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
2140 2052 == MFI_CMD_STATUS_POLL_MODE); i++) {
2141 2053 /* wait for cmd_status to change from 0xFF */
2142 2054 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
2143 2055 }
2144 2056
2145 2057 if (ddi_get8(cmd->frame_dma_obj.acc_handle,
2146 2058 &frame_hdr->cmd_status) == MFI_CMD_STATUS_POLL_MODE) {
2147 2059 con_log(CL_ANN1, (CE_NOTE,
2148 2060 " cmd failed %" PRIx64, (req_desc->Words)));
2149 2061 return (DDI_FAILURE);
2150 2062 }
2151 2063
2152 2064 return (DDI_SUCCESS);
2153 2065 }
2154 2066
2155 2067 void
2156 2068 tbolt_enable_intr(struct mrsas_instance *instance)
2157 2069 {
2158 2070 /* TODO: For Thunderbolt/Invader also clear intr on enable */
2159 2071 /* writel(~0, ®s->outbound_intr_status); */
2160 2072 /* readl(®s->outbound_intr_status); */
2161 2073
2162 2074 WR_OB_INTR_MASK(~(MFI_FUSION_ENABLE_INTERRUPT_MASK), instance);
2163 2075
2164 2076 /* dummy read to force PCI flush */
2165 2077 (void) RD_OB_INTR_MASK(instance);
2166 2078
2167 2079 }
2168 2080
2169 2081 void
2170 2082 tbolt_disable_intr(struct mrsas_instance *instance)
2171 2083 {
2172 2084 uint32_t mask = 0xFFFFFFFF;
2173 2085
2174 2086 WR_OB_INTR_MASK(mask, instance);
2175 2087
2176 2088 /* Dummy readl to force pci flush */
2177 2089
2178 2090 (void) RD_OB_INTR_MASK(instance);
2179 2091 }
2180 2092
2181 2093
2182 2094 int
2183 2095 tbolt_intr_ack(struct mrsas_instance *instance)
2184 2096 {
2185 2097 uint32_t status;
2186 2098
2187 2099 /* check if it is our interrupt */
2188 2100 status = RD_OB_INTR_STATUS(instance);
2189 2101 con_log(CL_ANN1, (CE_NOTE,
2190 2102 "chkpnt: Entered tbolt_intr_ack status = %d", status));
2191 2103
2192 2104 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2193 2105 return (DDI_INTR_UNCLAIMED);
2194 2106 }
2195 2107
2196 2108 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
2197 2109 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2198 2110 return (DDI_INTR_UNCLAIMED);
2199 2111 }
2200 2112
2201 2113 if ((status & 1) || (status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2202 2114 /* clear the interrupt by writing back the same value */
2203 2115 WR_OB_INTR_STATUS(status, instance);
2204 2116 /* dummy READ */
2205 2117 (void) RD_OB_INTR_STATUS(instance);
2206 2118 }
2207 2119 return (DDI_INTR_CLAIMED);
2208 2120 }
2209 2121
2210 2122 /*
2211 2123 * get_raid_msg_pkt : Get a command from the free pool
2212 2124 * After successful allocation, the caller of this routine
2213 2125 * must clear the frame buffer (memset to zero) before
2214 2126 * using the packet further.
2215 2127 *
2216 2128 * ***** Note *****
2217 2129 * After clearing the frame buffer the context id of the
2218 2130 * frame buffer SHOULD be restored back.
2219 2131 */
2220 2132
2221 2133 struct mrsas_cmd *
2222 2134 get_raid_msg_pkt(struct mrsas_instance *instance)
2223 2135 {
2224 2136 mlist_t *head = &instance->cmd_pool_list;
2225 2137 struct mrsas_cmd *cmd = NULL;
2226 2138
2227 2139 mutex_enter(&instance->cmd_pool_mtx);
2228 2140 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2229 2141
2230 2142
2231 2143 if (!mlist_empty(head)) {
2232 2144 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2233 2145 mlist_del_init(head->next);
2234 2146 }
2235 2147 if (cmd != NULL) {
2236 2148 cmd->pkt = NULL;
2237 2149 cmd->retry_count_for_ocr = 0;
2238 2150 cmd->drv_pkt_time = 0;
2239 2151 }
2240 2152 mutex_exit(&instance->cmd_pool_mtx);
2241 2153
2242 2154 if (cmd != NULL)
2243 2155 bzero(cmd->scsi_io_request,
2244 2156 sizeof (Mpi2RaidSCSIIORequest_t));
2245 2157 return (cmd);
2246 2158 }
2247 2159
2248 2160 struct mrsas_cmd *
2249 2161 get_raid_msg_mfi_pkt(struct mrsas_instance *instance)
2250 2162 {
2251 2163 mlist_t *head = &instance->cmd_app_pool_list;
2252 2164 struct mrsas_cmd *cmd = NULL;
2253 2165
2254 2166 mutex_enter(&instance->cmd_app_pool_mtx);
2255 2167 ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2256 2168
2257 2169 if (!mlist_empty(head)) {
2258 2170 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2259 2171 mlist_del_init(head->next);
2260 2172 }
2261 2173 if (cmd != NULL) {
2262 2174 cmd->retry_count_for_ocr = 0;
2263 2175 cmd->drv_pkt_time = 0;
2264 2176 cmd->pkt = NULL;
2265 2177 cmd->request_desc = NULL;
2266 2178
2267 2179 }
2268 2180
2269 2181 mutex_exit(&instance->cmd_app_pool_mtx);
2270 2182
2271 2183 if (cmd != NULL) {
2272 2184 bzero(cmd->scsi_io_request,
2273 2185 sizeof (Mpi2RaidSCSIIORequest_t));
2274 2186 }
2275 2187
2276 2188 return (cmd);
2277 2189 }
2278 2190
2279 2191 /*
2280 2192 * return_raid_msg_pkt : Return a cmd to free command pool
2281 2193 */
2282 2194 void
2283 2195 return_raid_msg_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2284 2196 {
2285 2197 mutex_enter(&instance->cmd_pool_mtx);
2286 2198 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2287 2199
2288 2200
2289 2201 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2290 2202
2291 2203 mutex_exit(&instance->cmd_pool_mtx);
2292 2204 }
2293 2205
2294 2206 void
2295 2207 return_raid_msg_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2296 2208 {
2297 2209 mutex_enter(&instance->cmd_app_pool_mtx);
2298 2210 ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2299 2211
2300 2212 mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
2301 2213
2302 2214 mutex_exit(&instance->cmd_app_pool_mtx);
2303 2215 }
2304 2216
2305 2217
2306 2218 void
2307 2219 mr_sas_tbolt_build_mfi_cmd(struct mrsas_instance *instance,
2308 2220 struct mrsas_cmd *cmd)
2309 2221 {
2310 2222 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2311 2223 Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee;
2312 2224 MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
2313 2225 uint32_t index;
2314 2226 ddi_acc_handle_t acc_handle =
2315 2227 instance->mpi2_frame_pool_dma_obj.acc_handle;
2316 2228
2317 2229 if (!instance->tbolt) {
2318 2230 con_log(CL_ANN, (CE_NOTE, "Not MFA enabled."));
2319 2231 return;
2320 2232 }
2321 2233
2322 2234 index = cmd->index;
2323 2235
2324 2236 ReqDescUnion = mr_sas_get_request_descriptor(instance, index);
2325 2237
2326 2238 if (!ReqDescUnion) {
2327 2239 con_log(CL_ANN1, (CE_NOTE, "[NULL REQDESC]"));
2328 2240 return;
2329 2241 }
2330 2242
2331 2243 con_log(CL_ANN1, (CE_NOTE, "[SMID]%x", cmd->SMID));
2332 2244
2333 2245 ReqDescUnion->Words = 0;
2334 2246
2335 2247 ReqDescUnion->SCSIIO.RequestFlags =
2336 2248 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2337 2249 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2338 2250
2339 2251 ReqDescUnion->SCSIIO.SMID = cmd->SMID;
2340 2252
2341 2253 cmd->request_desc = ReqDescUnion;
2342 2254
2343 2255 /* get raid message frame pointer */
2344 2256 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2345 2257
2346 2258 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
2347 2259 Mpi25IeeeSgeChain64_t *sgl_ptr_end = (Mpi25IeeeSgeChain64_t *)
2348 2260 &scsi_raid_io->SGL.IeeeChain;
2349 2261 sgl_ptr_end += instance->max_sge_in_main_msg - 1;
2350 2262 ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
2351 2263 }
2352 2264
2353 2265 ddi_put8(acc_handle, &scsi_raid_io->Function,
2354 2266 MPI2_FUNCTION_PASSTHRU_IO_REQUEST);
2355 2267
2356 2268 ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
2357 2269 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
2358 2270
2359 2271 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
2360 2272 (U8)offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16);
2361 2273
2362 2274 ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
2363 2275 cmd->sense_phys_addr1);
2364 2276
2365 2277
2366 2278 scsi_raid_io_sgl_ieee =
2367 2279 (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
2368 2280
2369 2281 ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
2370 2282 (U64)cmd->frame_phys_addr);
2371 2283
2372 2284 ddi_put8(acc_handle,
2373 2285 &scsi_raid_io_sgl_ieee->Flags, (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2374 2286 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
2375 2287 /* LSI put hardcoded 1024 instead of MEGASAS_MAX_SZ_CHAIN_FRAME. */
2376 2288 ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length, 1024);
2377 2289
2378 2290 con_log(CL_ANN1, (CE_NOTE,
2379 2291 "[MFI CMD PHY ADDRESS]:%" PRIx64,
2380 2292 scsi_raid_io_sgl_ieee->Address));
2381 2293 con_log(CL_ANN1, (CE_NOTE,
2382 2294 "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
2383 2295 con_log(CL_ANN1, (CE_NOTE, "[SGL Flags]:%x",
2384 2296 scsi_raid_io_sgl_ieee->Flags));
2385 2297 }
2386 2298
2387 2299
2388 2300 void
2389 2301 tbolt_complete_cmd(struct mrsas_instance *instance,
2390 2302 struct mrsas_cmd *cmd)
2391 2303 {
2392 2304 uint8_t status;
2393 2305 uint8_t extStatus;
2394 2306 uint8_t arm;
2395 2307 struct scsa_cmd *acmd;
2396 2308 struct scsi_pkt *pkt;
2397 2309 struct scsi_arq_status *arqstat;
2398 2310 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2399 2311 LD_LOAD_BALANCE_INFO *lbinfo;
2400 2312 ddi_acc_handle_t acc_handle =
2401 2313 instance->mpi2_frame_pool_dma_obj.acc_handle;
2402 2314
2403 2315 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2404 2316
2405 2317 status = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.status);
2406 2318 extStatus = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.extStatus);
2407 2319
2408 2320 con_log(CL_DLEVEL3, (CE_NOTE, "status %x", status));
2409 2321 con_log(CL_DLEVEL3, (CE_NOTE, "extStatus %x", extStatus));
2410 2322
2411 2323 if (status != MFI_STAT_OK) {
2412 2324 con_log(CL_ANN, (CE_WARN,
2413 2325 "IO Cmd Failed SMID %x", cmd->SMID));
2414 2326 } else {
2415 2327 con_log(CL_ANN, (CE_NOTE,
2416 2328 "IO Cmd Success SMID %x", cmd->SMID));
2417 2329 }
2418 2330
2419 2331 /* regular commands */
2420 2332
2421 2333 switch (ddi_get8(acc_handle, &scsi_raid_io->Function)) {
2422 2334
2423 2335 case MPI2_FUNCTION_SCSI_IO_REQUEST : /* Fast Path IO. */
2424 2336 acmd = (struct scsa_cmd *)cmd->cmd;
2425 2337 lbinfo = &instance->load_balance_info[acmd->device_id];
2426 2338
2427 2339 if (cmd->load_balance_flag & MEGASAS_LOAD_BALANCE_FLAG) {
2428 2340 arm = lbinfo->raid1DevHandle[0] ==
2429 2341 scsi_raid_io->DevHandle ? 0 : 1;
2430 2342
2431 2343 lbinfo->scsi_pending_cmds[arm]--;
2432 2344 cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG;
2433 2345 }
2434 2346 con_log(CL_DLEVEL3, (CE_NOTE,
2435 2347 "FastPath IO Completion Success "));
2436 2348 /* FALLTHRU */
2437 2349
2438 2350 case MPI2_FUNCTION_LD_IO_REQUEST : { /* Regular Path IO. */
2439 2351 acmd = (struct scsa_cmd *)cmd->cmd;
2440 2352 pkt = (struct scsi_pkt *)CMD2PKT(acmd);
2441 2353
2442 2354 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2443 2355 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
2444 2356 (void) ddi_dma_sync(acmd->cmd_dmahandle,
2445 2357 acmd->cmd_dma_offset, acmd->cmd_dma_len,
2446 2358 DDI_DMA_SYNC_FORCPU);
2447 2359 }
2448 2360 }
2449 2361
2450 2362 pkt->pkt_reason = CMD_CMPLT;
2451 2363 pkt->pkt_statistics = 0;
2452 2364 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
2453 2365 STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS;
2454 2366
2455 2367 con_log(CL_ANN, (CE_CONT, " CDB[0] = %x completed for %s: "
2456 2368 "size %lx SMID %x cmd_status %x", pkt->pkt_cdbp[0],
2457 2369 ((acmd->islogical) ? "LD" : "PD"),
2458 2370 acmd->cmd_dmacount, cmd->SMID, status));
2459 2371
2460 2372 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
2461 2373 struct scsi_inquiry *inq;
2462 2374
2463 2375 if (acmd->cmd_dmacount != 0) {
2464 2376 bp_mapin(acmd->cmd_buf);
2465 2377 inq = (struct scsi_inquiry *)
2466 2378 acmd->cmd_buf->b_un.b_addr;
2467 2379
2468 2380 /* don't expose physical drives to OS */
2469 2381 if (acmd->islogical &&
2470 2382 (status == MFI_STAT_OK)) {
2471 2383 display_scsi_inquiry((caddr_t)inq);
2472 2384 #ifdef PDSUPPORT
2473 2385 } else if ((status == MFI_STAT_OK) &&
2474 2386 inq->inq_dtype == DTYPE_DIRECT) {
2475 2387 display_scsi_inquiry((caddr_t)inq);
2476 2388 #endif
2477 2389 } else {
2478 2390 /* for physical disk */
2479 2391 status = MFI_STAT_DEVICE_NOT_FOUND;
2480 2392 }
2481 2393 }
2482 2394 }
2483 2395
2484 2396 switch (status) {
2485 2397 case MFI_STAT_OK:
2486 2398 pkt->pkt_scbp[0] = STATUS_GOOD;
2487 2399 break;
2488 2400 case MFI_STAT_LD_CC_IN_PROGRESS:
2489 2401 case MFI_STAT_LD_RECON_IN_PROGRESS:
2490 2402 pkt->pkt_scbp[0] = STATUS_GOOD;
2491 2403 break;
2492 2404 case MFI_STAT_LD_INIT_IN_PROGRESS:
2493 2405 pkt->pkt_reason = CMD_TRAN_ERR;
2494 2406 break;
2495 2407 case MFI_STAT_SCSI_IO_FAILED:
2496 2408 cmn_err(CE_WARN, "tbolt_complete_cmd: scsi_io failed");
2497 2409 pkt->pkt_reason = CMD_TRAN_ERR;
2498 2410 break;
2499 2411 case MFI_STAT_SCSI_DONE_WITH_ERROR:
2500 2412 con_log(CL_ANN, (CE_WARN,
2501 2413 "tbolt_complete_cmd: scsi_done with error"));
2502 2414
2503 2415 pkt->pkt_reason = CMD_CMPLT;
2504 2416 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2505 2417
2506 2418 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
2507 2419 con_log(CL_ANN,
2508 2420 (CE_WARN, "TEST_UNIT_READY fail"));
2509 2421 } else {
2510 2422 pkt->pkt_state |= STATE_ARQ_DONE;
2511 2423 arqstat = (void *)(pkt->pkt_scbp);
2512 2424 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2513 2425 arqstat->sts_rqpkt_resid = 0;
2514 2426 arqstat->sts_rqpkt_state |=
2515 2427 STATE_GOT_BUS | STATE_GOT_TARGET
2516 2428 | STATE_SENT_CMD
2517 2429 | STATE_XFERRED_DATA;
2518 2430 *(uint8_t *)&arqstat->sts_rqpkt_status =
2519 2431 STATUS_GOOD;
2520 2432 con_log(CL_ANN1,
2521 2433 (CE_NOTE, "Copying Sense data %x",
2522 2434 cmd->SMID));
2523 2435
2524 2436 ddi_rep_get8(acc_handle,
2525 2437 (uint8_t *)&(arqstat->sts_sensedata),
2526 2438 cmd->sense1,
2527 2439 sizeof (struct scsi_extended_sense),
2528 2440 DDI_DEV_AUTOINCR);
2529 2441
2530 2442 }
2531 2443 break;
2532 2444 case MFI_STAT_LD_OFFLINE:
2533 2445 cmn_err(CE_WARN,
2534 2446 "tbolt_complete_cmd: ld offline "
2535 2447 "CDB[0]=0x%x targetId=0x%x devhandle=0x%x",
2536 2448 /* UNDO: */
2537 2449 ddi_get8(acc_handle, &scsi_raid_io->CDB.CDB32[0]),
2538 2450
2539 2451 ddi_get16(acc_handle,
2540 2452 &scsi_raid_io->RaidContext.ldTargetId),
2541 2453
2542 2454 ddi_get16(acc_handle, &scsi_raid_io->DevHandle));
2543 2455
2544 2456 pkt->pkt_reason = CMD_DEV_GONE;
2545 2457 pkt->pkt_statistics = STAT_DISCON;
2546 2458 break;
2547 2459 case MFI_STAT_DEVICE_NOT_FOUND:
2548 2460 con_log(CL_ANN, (CE_CONT,
2549 2461 "tbolt_complete_cmd: device not found error"));
2550 2462 pkt->pkt_reason = CMD_DEV_GONE;
2551 2463 pkt->pkt_statistics = STAT_DISCON;
2552 2464 break;
2553 2465
2554 2466 case MFI_STAT_LD_LBA_OUT_OF_RANGE:
2555 2467 pkt->pkt_state |= STATE_ARQ_DONE;
2556 2468 pkt->pkt_reason = CMD_CMPLT;
2557 2469 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2558 2470
2559 2471 arqstat = (void *)(pkt->pkt_scbp);
2560 2472 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2561 2473 arqstat->sts_rqpkt_resid = 0;
2562 2474 arqstat->sts_rqpkt_state |= STATE_GOT_BUS
2563 2475 | STATE_GOT_TARGET | STATE_SENT_CMD
2564 2476 | STATE_XFERRED_DATA;
2565 2477 *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
2566 2478
2567 2479 arqstat->sts_sensedata.es_valid = 1;
2568 2480 arqstat->sts_sensedata.es_key = KEY_ILLEGAL_REQUEST;
2569 2481 arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
2570 2482
2571 2483 /*
2572 2484 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
2573 2485 * ASC: 0x21h; ASCQ: 0x00h;
2574 2486 */
2575 2487 arqstat->sts_sensedata.es_add_code = 0x21;
2576 2488 arqstat->sts_sensedata.es_qual_code = 0x00;
2577 2489 break;
2578 2490 case MFI_STAT_INVALID_CMD:
2579 2491 case MFI_STAT_INVALID_DCMD:
2580 2492 case MFI_STAT_INVALID_PARAMETER:
2581 2493 case MFI_STAT_INVALID_SEQUENCE_NUMBER:
2582 2494 default:
2583 2495 cmn_err(CE_WARN, "tbolt_complete_cmd: Unknown status!");
2584 2496 pkt->pkt_reason = CMD_TRAN_ERR;
2585 2497
2586 2498 break;
2587 2499 }
2588 2500
2589 2501 atomic_add_16(&instance->fw_outstanding, (-1));
2590 2502
2591 2503 (void) mrsas_common_check(instance, cmd);
2592 2504 if (acmd->cmd_dmahandle) {
2593 2505 if (mrsas_check_dma_handle(acmd->cmd_dmahandle) !=
2594 2506 DDI_SUCCESS) {
2595 2507 ddi_fm_service_impact(instance->dip,
2596 2508 DDI_SERVICE_UNAFFECTED);
2597 2509 pkt->pkt_reason = CMD_TRAN_ERR;
2598 2510 pkt->pkt_statistics = 0;
2599 2511 }
2600 2512 }
2601 2513
2602 2514 /* Call the callback routine */
2603 2515 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp)
2604 2516 (*pkt->pkt_comp)(pkt);
2605 2517
2606 2518 con_log(CL_ANN1, (CE_NOTE, "Free smid %x", cmd->SMID));
2607 2519
2608 2520 ddi_put8(acc_handle, &scsi_raid_io->RaidContext.status, 0);
2609 2521
2610 2522 ddi_put8(acc_handle, &scsi_raid_io->RaidContext.extStatus, 0);
2611 2523
2612 2524 return_raid_msg_pkt(instance, cmd);
2613 2525 break;
2614 2526 }
2615 2527 case MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFA command. */
2616 2528
2617 2529 if (cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO &&
2618 2530 cmd->frame->dcmd.mbox.b[1] == 1) {
2619 2531
2620 2532 mutex_enter(&instance->sync_map_mtx);
2621 2533
2622 2534 con_log(CL_ANN, (CE_NOTE,
2623 2535 "LDMAP sync command SMID RECEIVED 0x%X",
2624 2536 cmd->SMID));
2625 2537 if (cmd->frame->hdr.cmd_status != 0) {
2626 2538 cmn_err(CE_WARN,
2627 2539 "map sync failed, status = 0x%x.",
2628 2540 cmd->frame->hdr.cmd_status);
2629 2541 } else {
2630 2542 instance->map_id++;
2631 2543 cmn_err(CE_NOTE,
2632 2544 "map sync received, switched map_id to %"
2633 2545 PRIu64 " \n", instance->map_id);
2634 2546 }
2635 2547
2636 2548 if (MR_ValidateMapInfo(instance->ld_map[
2637 2549 (instance->map_id & 1)],
2638 2550 instance->load_balance_info)) {
2639 2551 instance->fast_path_io = 1;
2640 2552 } else {
2641 2553 instance->fast_path_io = 0;
2642 2554 }
2643 2555
2644 2556 con_log(CL_ANN, (CE_NOTE,
2645 2557 "instance->fast_path_io %d",
2646 2558 instance->fast_path_io));
2647 2559
2648 2560 instance->unroll.syncCmd = 0;
2649 2561
2650 2562 if (instance->map_update_cmd == cmd) {
2651 2563 return_raid_msg_pkt(instance, cmd);
2652 2564 atomic_add_16(&instance->fw_outstanding, (-1));
2653 2565 (void) mrsas_tbolt_sync_map_info(instance);
2654 2566 }
2655 2567
2656 2568 cmn_err(CE_NOTE, "LDMAP sync completed.");
2657 2569 mutex_exit(&instance->sync_map_mtx);
2658 2570 break;
2659 2571 }
2660 2572
2661 2573 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) {
2662 2574 con_log(CL_ANN1, (CE_CONT,
2663 2575 "AEN command SMID RECEIVED 0x%X",
2664 2576 cmd->SMID));
2665 2577 if ((instance->aen_cmd == cmd) &&
2666 2578 (instance->aen_cmd->abort_aen)) {
2667 2579 con_log(CL_ANN, (CE_WARN, "mrsas_softintr: "
2668 2580 "aborted_aen returned"));
2669 2581 } else {
2670 2582 atomic_add_16(&instance->fw_outstanding, (-1));
2671 2583 service_mfi_aen(instance, cmd);
2672 2584 }
2673 2585 }
2674 2586
2675 2587 if (cmd->sync_cmd == MRSAS_TRUE) {
2676 2588 con_log(CL_ANN1, (CE_CONT,
2677 2589 "Sync-mode Command Response SMID RECEIVED 0x%X",
2678 2590 cmd->SMID));
2679 2591
2680 2592 tbolt_complete_cmd_in_sync_mode(instance, cmd);
2681 2593 } else {
2682 2594 con_log(CL_ANN, (CE_CONT,
2683 2595 "tbolt_complete_cmd: Wrong SMID RECEIVED 0x%X",
2684 2596 cmd->SMID));
2685 2597 }
2686 2598 break;
2687 2599 default:
2688 2600 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2689 2601 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2690 2602
2691 2603 /* free message */
2692 2604 con_log(CL_ANN,
2693 2605 (CE_NOTE, "tbolt_complete_cmd: Unknown Type!!!!!!!!"));
2694 2606 break;
2695 2607 }
2696 2608 }
2697 2609
2698 2610 uint_t
2699 2611 mr_sas_tbolt_process_outstanding_cmd(struct mrsas_instance *instance)
2700 2612 {
2701 2613 uint8_t replyType;
2702 2614 Mpi2SCSIIOSuccessReplyDescriptor_t *replyDesc;
2703 2615 Mpi2ReplyDescriptorsUnion_t *desc;
2704 2616 uint16_t smid;
2705 2617 union desc_value d_val;
2706 2618 struct mrsas_cmd *cmd;
2707 2619
2708 2620 struct mrsas_header *hdr;
2709 2621 struct scsi_pkt *pkt;
2710 2622
2711 2623 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2712 2624 0, 0, DDI_DMA_SYNC_FORDEV);
2713 2625
2714 2626 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2715 2627 0, 0, DDI_DMA_SYNC_FORCPU);
2716 2628
2717 2629 desc = instance->reply_frame_pool;
2718 2630 desc += instance->reply_read_index;
2719 2631
2720 2632 replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2721 2633 replyType = replyDesc->ReplyFlags &
2722 2634 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2723 2635
2724 2636 if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2725 2637 return (DDI_INTR_UNCLAIMED);
2726 2638
2727 2639 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
2728 2640 != DDI_SUCCESS) {
2729 2641 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2730 2642 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2731 2643 con_log(CL_ANN1,
2732 2644 (CE_WARN, "mr_sas_tbolt_process_outstanding_cmd(): "
2733 2645 "FMA check, returning DDI_INTR_UNCLAIMED"));
2734 2646 return (DDI_INTR_CLAIMED);
2735 2647 }
2736 2648
2737 2649 con_log(CL_ANN1, (CE_NOTE, "Reply Desc = %p Words = %" PRIx64,
2738 2650 (void *)desc, desc->Words));
2739 2651
2740 2652 d_val.word = desc->Words;
2741 2653
2742 2654
2743 2655 /* Read Reply descriptor */
2744 2656 while ((d_val.u1.low != 0xffffffff) &&
2745 2657 (d_val.u1.high != 0xffffffff)) {
2746 2658
2747 2659 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2748 2660 0, 0, DDI_DMA_SYNC_FORCPU);
2749 2661
2750 2662 smid = replyDesc->SMID;
2751 2663
2752 2664 if (!smid || smid > instance->max_fw_cmds + 1) {
2753 2665 con_log(CL_ANN1, (CE_NOTE,
2754 2666 "Reply Desc at Break = %p Words = %" PRIx64,
2755 2667 (void *)desc, desc->Words));
2756 2668 break;
2757 2669 }
2758 2670
2759 2671 cmd = instance->cmd_list[smid - 1];
2760 2672 if (!cmd) {
2761 2673 con_log(CL_ANN1, (CE_NOTE, "mr_sas_tbolt_process_"
2762 2674 "outstanding_cmd: Invalid command "
2763 2675 " or Poll commad Received in completion path"));
2764 2676 } else {
2765 2677 mutex_enter(&instance->cmd_pend_mtx);
2766 2678 if (cmd->sync_cmd == MRSAS_TRUE) {
2767 2679 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2768 2680 if (hdr) {
2769 2681 con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2770 2682 "tbolt_process_outstanding_cmd:"
2771 2683 " mlist_del_init(&cmd->list)."));
2772 2684 mlist_del_init(&cmd->list);
2773 2685 }
2774 2686 } else {
2775 2687 pkt = cmd->pkt;
2776 2688 if (pkt) {
2777 2689 con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2778 2690 "tbolt_process_outstanding_cmd:"
2779 2691 "mlist_del_init(&cmd->list)."));
2780 2692 mlist_del_init(&cmd->list);
2781 2693 }
2782 2694 }
2783 2695
2784 2696 mutex_exit(&instance->cmd_pend_mtx);
2785 2697
2786 2698 tbolt_complete_cmd(instance, cmd);
2787 2699 }
2788 2700 /* set it back to all 1s. */
2789 2701 desc->Words = -1LL;
2790 2702
2791 2703 instance->reply_read_index++;
2792 2704
2793 2705 if (instance->reply_read_index >= (instance->reply_q_depth)) {
2794 2706 con_log(CL_ANN1, (CE_NOTE, "wrap around"));
2795 2707 instance->reply_read_index = 0;
2796 2708 }
2797 2709
2798 2710 /* Get the next reply descriptor */
2799 2711 if (!instance->reply_read_index)
2800 2712 desc = instance->reply_frame_pool;
2801 2713 else
2802 2714 desc++;
2803 2715
2804 2716 replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2805 2717
2806 2718 d_val.word = desc->Words;
2807 2719
2808 2720 con_log(CL_ANN1, (CE_NOTE,
2809 2721 "Next Reply Desc = %p Words = %" PRIx64,
2810 2722 (void *)desc, desc->Words));
2811 2723
2812 2724 replyType = replyDesc->ReplyFlags &
2813 2725 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2814 2726
2815 2727 if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2816 2728 break;
2817 2729
2818 2730 } /* End of while loop. */
2819 2731
2820 2732 /* update replyIndex to FW */
2821 2733 WR_MPI2_REPLY_POST_INDEX(instance->reply_read_index, instance);
2822 2734
2823 2735
2824 2736 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2825 2737 0, 0, DDI_DMA_SYNC_FORDEV);
2826 2738
2827 2739 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2828 2740 0, 0, DDI_DMA_SYNC_FORCPU);
2829 2741 return (DDI_INTR_CLAIMED);
2830 2742 }
2831 2743
2832 2744
2833 2745
2834 2746
2835 2747 /*
2836 2748 * complete_cmd_in_sync_mode - Completes an internal command
2837 2749 * @instance: Adapter soft state
2838 2750 * @cmd: Command to be completed
2839 2751 *
2840 2752 * The issue_cmd_in_sync_mode() function waits for a command to complete
2841 2753 * after it issues a command. This function wakes up that waiting routine by
2842 2754 * calling wake_up() on the wait queue.
2843 2755 */
2844 2756 void
2845 2757 tbolt_complete_cmd_in_sync_mode(struct mrsas_instance *instance,
2846 2758 struct mrsas_cmd *cmd)
2847 2759 {
2848 2760
2849 2761 cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle,
2850 2762 &cmd->frame->io.cmd_status);
2851 2763
2852 2764 cmd->sync_cmd = MRSAS_FALSE;
2853 2765
2854 2766 mutex_enter(&instance->int_cmd_mtx);
2855 2767 if (cmd->cmd_status == ENODATA) {
2856 2768 cmd->cmd_status = 0;
2857 2769 }
2858 2770 cv_broadcast(&instance->int_cmd_cv);
2859 2771 mutex_exit(&instance->int_cmd_mtx);
2860 2772
2861 2773 }
2862 2774
2863 2775 /*
2864 2776 * mrsas_tbolt_get_ld_map_info - Returns ld_map structure
2865 2777 * instance: Adapter soft state
2866 2778 *
2867 2779 * Issues an internal command (DCMD) to get the FW's controller PD
2868 2780 * list structure. This information is mainly used to find out SYSTEM
2869 2781 * supported by the FW.
2870 2782 */
2871 2783 int
2872 2784 mrsas_tbolt_get_ld_map_info(struct mrsas_instance *instance)
2873 2785 {
2874 2786 int ret = 0;
2875 2787 struct mrsas_cmd *cmd = NULL;
2876 2788 struct mrsas_dcmd_frame *dcmd;
2877 2789 MR_FW_RAID_MAP_ALL *ci;
2878 2790 uint32_t ci_h = 0;
2879 2791 U32 size_map_info;
2880 2792
2881 2793 cmd = get_raid_msg_pkt(instance);
2882 2794
2883 2795 if (cmd == NULL) {
2884 2796 cmn_err(CE_WARN,
2885 2797 "Failed to get a cmd from free-pool in get_ld_map_info()");
2886 2798 return (DDI_FAILURE);
2887 2799 }
2888 2800
2889 2801 dcmd = &cmd->frame->dcmd;
2890 2802
2891 2803 size_map_info = sizeof (MR_FW_RAID_MAP) +
2892 2804 (sizeof (MR_LD_SPAN_MAP) *
2893 2805 (MAX_LOGICAL_DRIVES - 1));
2894 2806
2895 2807 con_log(CL_ANN, (CE_NOTE,
2896 2808 "size_map_info : 0x%x", size_map_info));
2897 2809
2898 2810 ci = instance->ld_map[(instance->map_id & 1)];
2899 2811 ci_h = instance->ld_map_phy[(instance->map_id & 1)];
2900 2812
2901 2813 if (!ci) {
2902 2814 cmn_err(CE_WARN, "Failed to alloc mem for ld_map_info");
2903 2815 return_raid_msg_pkt(instance, cmd);
2904 2816 return (-1);
2905 2817 }
2906 2818
2907 2819 bzero(ci, sizeof (*ci));
2908 2820 bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
2909 2821
2910 2822 dcmd->cmd = MFI_CMD_OP_DCMD;
2911 2823 dcmd->cmd_status = 0xFF;
2912 2824 dcmd->sge_count = 1;
2913 2825 dcmd->flags = MFI_FRAME_DIR_READ;
2914 2826 dcmd->timeout = 0;
2915 2827 dcmd->pad_0 = 0;
2916 2828 dcmd->data_xfer_len = size_map_info;
2917 2829 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
2918 2830 dcmd->sgl.sge32[0].phys_addr = ci_h;
2919 2831 dcmd->sgl.sge32[0].length = size_map_info;
2920 2832
2921 2833
2922 2834 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
2923 2835
2924 2836 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2925 2837 ret = 0;
2926 2838 con_log(CL_ANN1, (CE_NOTE, "Get LD Map Info success"));
2927 2839 } else {
2928 2840 cmn_err(CE_WARN, "Get LD Map Info failed");
2929 2841 ret = -1;
2930 2842 }
2931 2843
2932 2844 return_raid_msg_pkt(instance, cmd);
2933 2845
2934 2846 return (ret);
2935 2847 }
2936 2848
2937 2849 void
2938 2850 mrsas_dump_reply_desc(struct mrsas_instance *instance)
2939 2851 {
2940 2852 uint32_t i;
2941 2853 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
2942 2854 union desc_value d_val;
2943 2855
2944 2856 reply_desc = instance->reply_frame_pool;
2945 2857
2946 2858 for (i = 0; i < instance->reply_q_depth; i++, reply_desc++) {
2947 2859 d_val.word = reply_desc->Words;
2948 2860 con_log(CL_DLEVEL3, (CE_NOTE,
2949 2861 "i=%d, %x:%x",
2950 2862 i, d_val.u1.high, d_val.u1.low));
2951 2863 }
2952 2864 }
2953 2865
2954 2866 /*
2955 2867 * mrsas_tbolt_command_create - Create command for fast path.
2956 2868 * @io_info: MegaRAID IO request packet pointer.
2957 2869 * @ref_tag: Reference tag for RD/WRPROTECT
2958 2870 *
2959 2871 * Create the command for fast path.
2960 2872 */
2961 2873 void
2962 2874 mrsas_tbolt_prepare_cdb(struct mrsas_instance *instance, U8 cdb[],
2963 2875 struct IO_REQUEST_INFO *io_info, Mpi2RaidSCSIIORequest_t *scsi_io_request,
2964 2876 U32 ref_tag)
2965 2877 {
2966 2878 uint16_t EEDPFlags;
2967 2879 uint32_t Control;
2968 2880 ddi_acc_handle_t acc_handle =
2969 2881 instance->mpi2_frame_pool_dma_obj.acc_handle;
2970 2882
2971 2883 /* Prepare 32-byte CDB if DIF is supported on this device */
2972 2884 con_log(CL_ANN, (CE_NOTE, "Prepare DIF CDB"));
2973 2885
2974 2886 bzero(cdb, 32);
2975 2887
2976 2888 cdb[0] = MRSAS_SCSI_VARIABLE_LENGTH_CMD;
2977 2889
2978 2890
2979 2891 cdb[7] = MRSAS_SCSI_ADDL_CDB_LEN;
2980 2892
2981 2893 if (io_info->isRead)
2982 2894 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32;
2983 2895 else
2984 2896 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32;
2985 2897
2986 2898 /* Verify within linux driver, set to MEGASAS_RD_WR_PROTECT_CHECK_ALL */
2987 2899 cdb[10] = MRSAS_RD_WR_PROTECT;
2988 2900
2989 2901 /* LOGICAL BLOCK ADDRESS */
2990 2902 cdb[12] = (U8)(((io_info->pdBlock) >> 56) & 0xff);
2991 2903 cdb[13] = (U8)(((io_info->pdBlock) >> 48) & 0xff);
2992 2904 cdb[14] = (U8)(((io_info->pdBlock) >> 40) & 0xff);
2993 2905 cdb[15] = (U8)(((io_info->pdBlock) >> 32) & 0xff);
2994 2906 cdb[16] = (U8)(((io_info->pdBlock) >> 24) & 0xff);
2995 2907 cdb[17] = (U8)(((io_info->pdBlock) >> 16) & 0xff);
2996 2908 cdb[18] = (U8)(((io_info->pdBlock) >> 8) & 0xff);
2997 2909 cdb[19] = (U8)((io_info->pdBlock) & 0xff);
2998 2910
2999 2911 /* Logical block reference tag */
3000 2912 ddi_put32(acc_handle, &scsi_io_request->CDB.EEDP32.PrimaryReferenceTag,
3001 2913 BE_32(ref_tag));
3002 2914
3003 2915 ddi_put16(acc_handle,
3004 2916 &scsi_io_request->CDB.EEDP32.PrimaryApplicationTagMask, 0xffff);
3005 2917
3006 2918 ddi_put32(acc_handle, &scsi_io_request->DataLength,
3007 2919 ((io_info->numBlocks)*512));
3008 2920 /* Specify 32-byte cdb */
3009 2921 ddi_put16(acc_handle, &scsi_io_request->IoFlags, 32);
3010 2922
3011 2923 /* Transfer length */
3012 2924 cdb[28] = (U8)(((io_info->numBlocks) >> 24) & 0xff);
3013 2925 cdb[29] = (U8)(((io_info->numBlocks) >> 16) & 0xff);
3014 2926 cdb[30] = (U8)(((io_info->numBlocks) >> 8) & 0xff);
3015 2927 cdb[31] = (U8)((io_info->numBlocks) & 0xff);
3016 2928
3017 2929 /* set SCSI IO EEDPFlags */
3018 2930 EEDPFlags = ddi_get16(acc_handle, &scsi_io_request->EEDPFlags);
3019 2931 Control = ddi_get32(acc_handle, &scsi_io_request->Control);
3020 2932
3021 2933 /* set SCSI IO EEDPFlags bits */
3022 2934 if (io_info->isRead) {
3023 2935 /*
3024 2936 * For READ commands, the EEDPFlags shall be set to specify to
3025 2937 * Increment the Primary Reference Tag, to Check the Reference
3026 2938 * Tag, and to Check and Remove the Protection Information
3027 2939 * fields.
3028 2940 */
3029 2941 EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
3030 2942 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
3031 2943 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
3032 2944 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
3033 2945 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
3034 2946 } else {
3035 2947 /*
3036 2948 * For WRITE commands, the EEDPFlags shall be set to specify to
3037 2949 * Increment the Primary Reference Tag, and to Insert
3038 2950 * Protection Information fields.
3039 2951 */
3040 2952 EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
3041 2953 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
3042 2954 }
3043 2955 Control |= (0x4 << 26);
3044 2956
3045 2957 ddi_put16(acc_handle, &scsi_io_request->EEDPFlags, EEDPFlags);
3046 2958 ddi_put32(acc_handle, &scsi_io_request->Control, Control);
3047 2959 ddi_put32(acc_handle,
3048 2960 &scsi_io_request->EEDPBlockSize, MRSAS_EEDPBLOCKSIZE);
3049 2961 }
3050 2962
3051 2963
3052 2964 /*
3053 2965 * mrsas_tbolt_set_pd_lba - Sets PD LBA
3054 2966 * @cdb: CDB
3055 2967 * @cdb_len: cdb length
3056 2968 * @start_blk: Start block of IO
3057 2969 *
3058 2970 * Used to set the PD LBA in CDB for FP IOs
3059 2971 */
3060 2972 static void
3061 2973 mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr, U64 start_blk,
3062 2974 U32 num_blocks)
3063 2975 {
3064 2976 U8 cdb_len = *cdb_len_ptr;
3065 2977 U8 flagvals = 0, opcode = 0, groupnum = 0, control = 0;
3066 2978
3067 2979 /* Some drives don't support 16/12 byte CDB's, convert to 10 */
3068 2980 if (((cdb_len == 12) || (cdb_len == 16)) &&
3069 2981 (start_blk <= 0xffffffff)) {
3070 2982 if (cdb_len == 16) {
3071 2983 con_log(CL_ANN,
3072 2984 (CE_NOTE, "Converting READ/WRITE(16) to READ10"));
3073 2985 opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
3074 2986 flagvals = cdb[1];
3075 2987 groupnum = cdb[14];
3076 2988 control = cdb[15];
3077 2989 } else {
3078 2990 con_log(CL_ANN,
3079 2991 (CE_NOTE, "Converting READ/WRITE(12) to READ10"));
3080 2992 opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
3081 2993 flagvals = cdb[1];
3082 2994 groupnum = cdb[10];
3083 2995 control = cdb[11];
3084 2996 }
3085 2997
3086 2998 bzero(cdb, sizeof (cdb));
3087 2999
3088 3000 cdb[0] = opcode;
3089 3001 cdb[1] = flagvals;
3090 3002 cdb[6] = groupnum;
3091 3003 cdb[9] = control;
3092 3004 /* Set transfer length */
3093 3005 cdb[8] = (U8)(num_blocks & 0xff);
3094 3006 cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3095 3007 cdb_len = 10;
3096 3008 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
3097 3009 /* Convert to 16 byte CDB for large LBA's */
3098 3010 con_log(CL_ANN,
3099 3011 (CE_NOTE, "Converting 6/10/12 CDB to 16 byte CDB"));
3100 3012 switch (cdb_len) {
3101 3013 case 6:
3102 3014 opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
3103 3015 control = cdb[5];
3104 3016 break;
3105 3017 case 10:
3106 3018 opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16;
3107 3019 flagvals = cdb[1];
3108 3020 groupnum = cdb[6];
3109 3021 control = cdb[9];
3110 3022 break;
3111 3023 case 12:
3112 3024 opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16;
3113 3025 flagvals = cdb[1];
3114 3026 groupnum = cdb[10];
3115 3027 control = cdb[11];
3116 3028 break;
3117 3029 }
3118 3030
3119 3031 bzero(cdb, sizeof (cdb));
3120 3032
3121 3033 cdb[0] = opcode;
3122 3034 cdb[1] = flagvals;
3123 3035 cdb[14] = groupnum;
3124 3036 cdb[15] = control;
3125 3037
3126 3038 /* Transfer length */
3127 3039 cdb[13] = (U8)(num_blocks & 0xff);
3128 3040 cdb[12] = (U8)((num_blocks >> 8) & 0xff);
3129 3041 cdb[11] = (U8)((num_blocks >> 16) & 0xff);
3130 3042 cdb[10] = (U8)((num_blocks >> 24) & 0xff);
3131 3043
3132 3044 /* Specify 16-byte cdb */
3133 3045 cdb_len = 16;
3134 3046 } else if ((cdb_len == 6) && (start_blk > 0x1fffff)) {
3135 3047 /* convert to 10 byte CDB */
3136 3048 opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10;
3137 3049 control = cdb[5];
3138 3050
3139 3051 bzero(cdb, sizeof (cdb));
3140 3052 cdb[0] = opcode;
3141 3053 cdb[9] = control;
3142 3054
3143 3055 /* Set transfer length */
3144 3056 cdb[8] = (U8)(num_blocks & 0xff);
3145 3057 cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3146 3058
3147 3059 /* Specify 10-byte cdb */
3148 3060 cdb_len = 10;
3149 3061 }
3150 3062
3151 3063
3152 3064 /* Fall through Normal case, just load LBA here */
3153 3065 switch (cdb_len) {
3154 3066 case 6:
3155 3067 {
3156 3068 U8 val = cdb[1] & 0xE0;
3157 3069 cdb[3] = (U8)(start_blk & 0xff);
3158 3070 cdb[2] = (U8)((start_blk >> 8) & 0xff);
3159 3071 cdb[1] = val | ((U8)(start_blk >> 16) & 0x1f);
3160 3072 break;
3161 3073 }
3162 3074 case 10:
3163 3075 cdb[5] = (U8)(start_blk & 0xff);
3164 3076 cdb[4] = (U8)((start_blk >> 8) & 0xff);
3165 3077 cdb[3] = (U8)((start_blk >> 16) & 0xff);
3166 3078 cdb[2] = (U8)((start_blk >> 24) & 0xff);
3167 3079 break;
3168 3080 case 12:
3169 3081 cdb[5] = (U8)(start_blk & 0xff);
3170 3082 cdb[4] = (U8)((start_blk >> 8) & 0xff);
3171 3083 cdb[3] = (U8)((start_blk >> 16) & 0xff);
3172 3084 cdb[2] = (U8)((start_blk >> 24) & 0xff);
3173 3085 break;
3174 3086
3175 3087 case 16:
3176 3088 cdb[9] = (U8)(start_blk & 0xff);
3177 3089 cdb[8] = (U8)((start_blk >> 8) & 0xff);
3178 3090 cdb[7] = (U8)((start_blk >> 16) & 0xff);
3179 3091 cdb[6] = (U8)((start_blk >> 24) & 0xff);
3180 3092 cdb[5] = (U8)((start_blk >> 32) & 0xff);
3181 3093 cdb[4] = (U8)((start_blk >> 40) & 0xff);
3182 3094 cdb[3] = (U8)((start_blk >> 48) & 0xff);
3183 3095 cdb[2] = (U8)((start_blk >> 56) & 0xff);
3184 3096 break;
3185 3097 }
3186 3098
3187 3099 *cdb_len_ptr = cdb_len;
3188 3100 }
3189 3101
3190 3102
3191 3103 static int
3192 3104 mrsas_tbolt_check_map_info(struct mrsas_instance *instance)
3193 3105 {
3194 3106 MR_FW_RAID_MAP_ALL *ld_map;
3195 3107
3196 3108 if (!mrsas_tbolt_get_ld_map_info(instance)) {
3197 3109
3198 3110 ld_map = instance->ld_map[(instance->map_id & 1)];
3199 3111
3200 3112 con_log(CL_ANN1, (CE_NOTE, "ldCount=%d, map size=%d",
3201 3113 ld_map->raidMap.ldCount, ld_map->raidMap.totalSize));
3202 3114
3203 3115 if (MR_ValidateMapInfo(instance->ld_map[
3204 3116 (instance->map_id & 1)], instance->load_balance_info)) {
3205 3117 con_log(CL_ANN,
3206 3118 (CE_CONT, "MR_ValidateMapInfo success"));
3207 3119
3208 3120 instance->fast_path_io = 1;
3209 3121 con_log(CL_ANN,
3210 3122 (CE_NOTE, "instance->fast_path_io %d",
3211 3123 instance->fast_path_io));
3212 3124
3213 3125 return (DDI_SUCCESS);
3214 3126 }
3215 3127
3216 3128 }
3217 3129
3218 3130 instance->fast_path_io = 0;
3219 3131 cmn_err(CE_WARN, "MR_ValidateMapInfo failed");
3220 3132 con_log(CL_ANN, (CE_NOTE,
3221 3133 "instance->fast_path_io %d", instance->fast_path_io));
3222 3134
3223 3135 return (DDI_FAILURE);
3224 3136 }
3225 3137
3226 3138 /*
3227 3139 * Marks HBA as bad. This will be called either when an
3228 3140 * IO packet times out even after 3 FW resets
3229 3141 * or FW is found to be fault even after 3 continuous resets.
3230 3142 */
3231 3143
3232 3144 void
3233 3145 mrsas_tbolt_kill_adapter(struct mrsas_instance *instance)
3234 3146 {
3235 3147 cmn_err(CE_NOTE, "TBOLT Kill adapter called");
3236 3148
3237 3149 if (instance->deadadapter == 1)
3238 3150 return;
3239 3151
3240 3152 con_log(CL_ANN1, (CE_NOTE, "tbolt_kill_adapter: "
3241 3153 "Writing to doorbell with MFI_STOP_ADP "));
3242 3154 mutex_enter(&instance->ocr_flags_mtx);
3243 3155 instance->deadadapter = 1;
3244 3156 mutex_exit(&instance->ocr_flags_mtx);
3245 3157 instance->func_ptr->disable_intr(instance);
3246 3158 WR_RESERVED0_REGISTER(MFI_STOP_ADP, instance);
3247 3159 /* Flush */
3248 3160 (void) RD_RESERVED0_REGISTER(instance);
3249 3161
3250 3162 (void) mrsas_print_pending_cmds(instance);
3251 3163 (void) mrsas_complete_pending_cmds(instance);
3252 3164 }
3253 3165
3254 3166 void
3255 3167 mrsas_reset_reply_desc(struct mrsas_instance *instance)
3256 3168 {
3257 3169 int i;
3258 3170 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
3259 3171 instance->reply_read_index = 0;
3260 3172
3261 3173 /* initializing reply address to 0xFFFFFFFF */
3262 3174 reply_desc = instance->reply_frame_pool;
3263 3175
3264 3176 for (i = 0; i < instance->reply_q_depth; i++) {
3265 3177 reply_desc->Words = (uint64_t)~0;
3266 3178 reply_desc++;
3267 3179 }
3268 3180 }
3269 3181
3270 3182 int
3271 3183 mrsas_tbolt_reset_ppc(struct mrsas_instance *instance)
3272 3184 {
3273 3185 uint32_t status = 0x00;
3274 3186 uint32_t retry = 0;
3275 3187 uint32_t cur_abs_reg_val;
3276 3188 uint32_t fw_state;
3277 3189 uint32_t abs_state;
3278 3190 uint32_t i;
3279 3191
3280 3192 con_log(CL_ANN, (CE_NOTE,
3281 3193 "mrsas_tbolt_reset_ppc entered"));
3282 3194
3283 3195 if (instance->deadadapter == 1) {
3284 3196 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3285 3197 "no more resets as HBA has been marked dead ");
3286 3198 return (DDI_FAILURE);
3287 3199 }
3288 3200
3289 3201 mutex_enter(&instance->ocr_flags_mtx);
3290 3202 instance->adapterresetinprogress = 1;
3291 3203 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc:"
3292 3204 "adpterresetinprogress flag set, time %llx", gethrtime()));
3293 3205 mutex_exit(&instance->ocr_flags_mtx);
3294 3206
3295 3207 instance->func_ptr->disable_intr(instance);
3296 3208
3297 3209 /* Add delay inorder to complete the ioctl & io cmds in-flight */
3298 3210 for (i = 0; i < 3000; i++) {
3299 3211 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
3300 3212 }
3301 3213
3302 3214 instance->reply_read_index = 0;
3303 3215
3304 3216 retry_reset:
3305 3217 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3306 3218 ":Resetting TBOLT "));
3307 3219
3308 3220 WR_TBOLT_IB_WRITE_SEQ(0xF, instance);
3309 3221 WR_TBOLT_IB_WRITE_SEQ(4, instance);
3310 3222 WR_TBOLT_IB_WRITE_SEQ(0xb, instance);
3311 3223 WR_TBOLT_IB_WRITE_SEQ(2, instance);
3312 3224 WR_TBOLT_IB_WRITE_SEQ(7, instance);
3313 3225 WR_TBOLT_IB_WRITE_SEQ(0xd, instance);
3314 3226 con_log(CL_ANN1, (CE_NOTE,
3315 3227 "mrsas_tbolt_reset_ppc: magic number written "
3316 3228 "to write sequence register"));
3317 3229 delay(100 * drv_usectohz(MILLISEC));
3318 3230 status = RD_TBOLT_HOST_DIAG(instance);
3319 3231 con_log(CL_ANN1, (CE_NOTE,
3320 3232 "mrsas_tbolt_reset_ppc: READ HOSTDIAG SUCCESS "
3321 3233 "to write sequence register"));
3322 3234
3323 3235 while (status & DIAG_TBOLT_RESET_ADAPTER) {
3324 3236 delay(100 * drv_usectohz(MILLISEC));
3325 3237 status = RD_TBOLT_HOST_DIAG(instance);
3326 3238 if (retry++ == 100) {
3327 3239 cmn_err(CE_WARN,
3328 3240 "mrsas_tbolt_reset_ppc:"
3329 3241 "resetadapter bit is set already "
3330 3242 "check retry count %d", retry);
3331 3243 return (DDI_FAILURE);
3332 3244 }
3333 3245 }
3334 3246
3335 3247 WR_TBOLT_HOST_DIAG(status | DIAG_TBOLT_RESET_ADAPTER, instance);
3336 3248 delay(100 * drv_usectohz(MILLISEC));
3337 3249
3338 3250 ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3339 3251 (uint8_t *)((uintptr_t)(instance)->regmap +
3340 3252 RESET_TBOLT_STATUS_OFF), 4, DDI_DEV_AUTOINCR);
3341 3253
3342 3254 while ((status & DIAG_TBOLT_RESET_ADAPTER)) {
3343 3255 delay(100 * drv_usectohz(MILLISEC));
3344 3256 ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3345 3257 (uint8_t *)((uintptr_t)(instance)->regmap +
3346 3258 RESET_TBOLT_STATUS_OFF), 4, DDI_DEV_AUTOINCR);
3347 3259 if (retry++ == 100) {
3348 3260 /* Dont call kill adapter here */
3349 3261 /* RESET BIT ADAPTER is cleared by firmare */
3350 3262 /* mrsas_tbolt_kill_adapter(instance); */
3351 3263 cmn_err(CE_WARN,
3352 3264 "mr_sas %d: %s(): RESET FAILED; return failure!!!",
3353 3265 instance->instance, __func__);
3354 3266 return (DDI_FAILURE);
3355 3267 }
3356 3268 }
3357 3269
3358 3270 con_log(CL_ANN,
3359 3271 (CE_NOTE, "mrsas_tbolt_reset_ppc: Adapter reset complete"));
3360 3272 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3361 3273 "Calling mfi_state_transition_to_ready"));
3362 3274
3363 3275 abs_state = instance->func_ptr->read_fw_status_reg(instance);
3364 3276 retry = 0;
3365 3277 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3366 3278 delay(100 * drv_usectohz(MILLISEC));
3367 3279 abs_state = instance->func_ptr->read_fw_status_reg(instance);
3368 3280 }
3369 3281 if (abs_state <= MFI_STATE_FW_INIT) {
3370 3282 cmn_err(CE_WARN,
3371 3283 "mrsas_tbolt_reset_ppc: firmware state < MFI_STATE_FW_INIT"
3372 3284 "state = 0x%x, RETRY RESET.", abs_state);
3373 3285 goto retry_reset;
3374 3286 }
3375 3287
3376 3288 /* Mark HBA as bad, if FW is fault after 3 continuous resets */
3377 3289 if (mfi_state_transition_to_ready(instance) ||
3378 3290 debug_tbolt_fw_faults_after_ocr_g == 1) {
3379 3291 cur_abs_reg_val =
3380 3292 instance->func_ptr->read_fw_status_reg(instance);
3381 3293 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
3382 3294
3383 3295 con_log(CL_ANN1, (CE_NOTE,
3384 3296 "mrsas_tbolt_reset_ppc :before fake: FW is not ready "
3385 3297 "FW state = 0x%x", fw_state));
3386 3298 if (debug_tbolt_fw_faults_after_ocr_g == 1)
3387 3299 fw_state = MFI_STATE_FAULT;
3388 3300
3389 3301 con_log(CL_ANN,
3390 3302 (CE_NOTE, "mrsas_tbolt_reset_ppc : FW is not ready "
3391 3303 "FW state = 0x%x", fw_state));
3392 3304
3393 3305 if (fw_state == MFI_STATE_FAULT) {
3394 3306 /* increment the count */
3395 3307 instance->fw_fault_count_after_ocr++;
3396 3308 if (instance->fw_fault_count_after_ocr
3397 3309 < MAX_FW_RESET_COUNT) {
3398 3310 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3399 3311 "FW is in fault after OCR count %d "
3400 3312 "Retry Reset",
3401 3313 instance->fw_fault_count_after_ocr);
3402 3314 goto retry_reset;
3403 3315
3404 3316 } else {
3405 3317 cmn_err(CE_WARN, "mrsas %d: %s:"
3406 3318 "Max Reset Count exceeded >%d"
3407 3319 "Mark HBA as bad, KILL adapter",
3408 3320 instance->instance, __func__,
3409 3321 MAX_FW_RESET_COUNT);
3410 3322
3411 3323 mrsas_tbolt_kill_adapter(instance);
3412 3324 return (DDI_FAILURE);
3413 3325 }
3414 3326 }
3415 3327 }
3416 3328
3417 3329 /* reset the counter as FW is up after OCR */
3418 3330 instance->fw_fault_count_after_ocr = 0;
3419 3331
3420 3332 mrsas_reset_reply_desc(instance);
3421 3333
3422 3334
3423 3335 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3424 3336 "Calling mrsas_issue_init_mpi2"));
3425 3337 abs_state = mrsas_issue_init_mpi2(instance);
3426 3338 if (abs_state == (uint32_t)DDI_FAILURE) {
3427 3339 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3428 3340 "INIT failed Retrying Reset");
3429 3341 goto retry_reset;
3430 3342 }
3431 3343 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3432 3344 "mrsas_issue_init_mpi2 Done"));
3433 3345
3434 3346 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3435 3347 "Calling mrsas_print_pending_cmd"));
3436 3348 (void) mrsas_print_pending_cmds(instance);
3437 3349 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3438 3350 "mrsas_print_pending_cmd done"));
3439 3351
3440 3352 instance->func_ptr->enable_intr(instance);
3441 3353 instance->fw_outstanding = 0;
3442 3354
3443 3355 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3444 3356 "Calling mrsas_issue_pending_cmds"));
3445 3357 (void) mrsas_issue_pending_cmds(instance);
3446 3358 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3447 3359 "issue_pending_cmds done."));
3448 3360
3449 3361 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3450 3362 "Calling aen registration"));
3451 3363
3452 3364 instance->aen_cmd->retry_count_for_ocr = 0;
3453 3365 instance->aen_cmd->drv_pkt_time = 0;
3454 3366
3455 3367 instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
3456 3368
3457 3369 con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag."));
3458 3370 mutex_enter(&instance->ocr_flags_mtx);
3459 3371 instance->adapterresetinprogress = 0;
3460 3372 mutex_exit(&instance->ocr_flags_mtx);
3461 3373 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3462 3374 "adpterresetinprogress flag unset"));
3463 3375
3464 3376 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc done"));
3465 3377 return (DDI_SUCCESS);
3466 3378
3467 3379 }
3468 3380
3469 3381
3470 3382 /*
3471 3383 * mrsas_sync_map_info - Returns FW's ld_map structure
3472 3384 * @instance: Adapter soft state
3473 3385 *
3474 3386 * Issues an internal command (DCMD) to get the FW's controller PD
3475 3387 * list structure. This information is mainly used to find out SYSTEM
3476 3388 * supported by the FW.
3477 3389 */
3478 3390
3479 3391 static int
3480 3392 mrsas_tbolt_sync_map_info(struct mrsas_instance *instance)
3481 3393 {
3482 3394 int ret = 0, i;
3483 3395 struct mrsas_cmd *cmd = NULL;
3484 3396 struct mrsas_dcmd_frame *dcmd;
3485 3397 uint32_t size_sync_info, num_lds;
3486 3398 LD_TARGET_SYNC *ci = NULL;
3487 3399 MR_FW_RAID_MAP_ALL *map;
3488 3400 MR_LD_RAID *raid;
3489 3401 LD_TARGET_SYNC *ld_sync;
3490 3402 uint32_t ci_h = 0;
3491 3403 uint32_t size_map_info;
3492 3404
3493 3405 cmd = get_raid_msg_pkt(instance);
3494 3406
3495 3407 if (cmd == NULL) {
3496 3408 cmn_err(CE_WARN, "Failed to get a cmd from free-pool in "
3497 3409 "mrsas_tbolt_sync_map_info(). ");
3498 3410 return (DDI_FAILURE);
3499 3411 }
3500 3412
3501 3413 /* Clear the frame buffer and assign back the context id */
3502 3414 bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3503 3415 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3504 3416 cmd->index);
3505 3417 bzero(cmd->scsi_io_request, sizeof (Mpi2RaidSCSIIORequest_t));
3506 3418
3507 3419
3508 3420 map = instance->ld_map[instance->map_id & 1];
3509 3421
3510 3422 num_lds = map->raidMap.ldCount;
3511 3423
3512 3424 dcmd = &cmd->frame->dcmd;
3513 3425
3514 3426 size_sync_info = sizeof (LD_TARGET_SYNC) * num_lds;
3515 3427
3516 3428 con_log(CL_ANN, (CE_NOTE, "size_sync_info =0x%x ; ld count = 0x%x",
3517 3429 size_sync_info, num_lds));
3518 3430
3519 3431 ci = (LD_TARGET_SYNC *)instance->ld_map[(instance->map_id - 1) & 1];
3520 3432
3521 3433 bzero(ci, sizeof (MR_FW_RAID_MAP_ALL));
3522 3434 ci_h = instance->ld_map_phy[(instance->map_id - 1) & 1];
3523 3435
3524 3436 bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
3525 3437
3526 3438 ld_sync = (LD_TARGET_SYNC *)ci;
3527 3439
3528 3440 for (i = 0; i < num_lds; i++, ld_sync++) {
3529 3441 raid = MR_LdRaidGet(i, map);
3530 3442
3531 3443 con_log(CL_ANN1,
3532 3444 (CE_NOTE, "i : 0x%x, Seq Num : 0x%x, Sync Reqd : 0x%x",
3533 3445 i, raid->seqNum, raid->flags.ldSyncRequired));
3534 3446
3535 3447 ld_sync->ldTargetId = MR_GetLDTgtId(i, map);
3536 3448
3537 3449 con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, tgt : 0x%x",
3538 3450 i, ld_sync->ldTargetId));
3539 3451
3540 3452 ld_sync->seqNum = raid->seqNum;
3541 3453 }
3542 3454
3543 3455
3544 3456 size_map_info = sizeof (MR_FW_RAID_MAP) +
3545 3457 (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
3546 3458
3547 3459 dcmd->cmd = MFI_CMD_OP_DCMD;
3548 3460 dcmd->cmd_status = 0xFF;
3549 3461 dcmd->sge_count = 1;
3550 3462 dcmd->flags = MFI_FRAME_DIR_WRITE;
3551 3463 dcmd->timeout = 0;
3552 3464 dcmd->pad_0 = 0;
3553 3465 dcmd->data_xfer_len = size_map_info;
3554 3466 ASSERT(num_lds <= 255);
3555 3467 dcmd->mbox.b[0] = (U8)num_lds;
3556 3468 dcmd->mbox.b[1] = 1; /* Pend */
3557 3469 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3558 3470 dcmd->sgl.sge32[0].phys_addr = ci_h;
3559 3471 dcmd->sgl.sge32[0].length = size_map_info;
3560 3472
3561 3473
3562 3474 instance->map_update_cmd = cmd;
3563 3475 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3564 3476
3565 3477 instance->func_ptr->issue_cmd(cmd, instance);
3566 3478
3567 3479 instance->unroll.syncCmd = 1;
3568 3480 con_log(CL_ANN1, (CE_NOTE, "sync cmd issued. [SMID]:%x", cmd->SMID));
3569 3481
3570 3482 return (ret);
3571 3483 }
3572 3484
3573 3485 /*
3574 3486 * abort_syncmap_cmd
3575 3487 */
3576 3488 int
3577 3489 abort_syncmap_cmd(struct mrsas_instance *instance,
3578 3490 struct mrsas_cmd *cmd_to_abort)
3579 3491 {
3580 3492 int ret = 0;
3581 3493
3582 3494 struct mrsas_cmd *cmd;
3583 3495 struct mrsas_abort_frame *abort_fr;
3584 3496
3585 3497 con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_ldsync:%d", __LINE__));
3586 3498
3587 3499 cmd = get_raid_msg_mfi_pkt(instance);
3588 3500
3589 3501 if (!cmd) {
3590 3502 cmn_err(CE_WARN,
3591 3503 "Failed to get a cmd from free-pool abort_syncmap_cmd().");
3592 3504 return (DDI_FAILURE);
3593 3505 }
3594 3506 /* Clear the frame buffer and assign back the context id */
3595 3507 bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3596 3508 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3597 3509 cmd->index);
3598 3510
3599 3511 abort_fr = &cmd->frame->abort;
3600 3512
3601 3513 /* prepare and issue the abort frame */
3602 3514 ddi_put8(cmd->frame_dma_obj.acc_handle,
3603 3515 &abort_fr->cmd, MFI_CMD_OP_ABORT);
3604 3516 ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3605 3517 MFI_CMD_STATUS_SYNC_MODE);
3606 3518 ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3607 3519 ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3608 3520 cmd_to_abort->index);
3609 3521 ddi_put32(cmd->frame_dma_obj.acc_handle,
3610 3522 &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3611 3523 ddi_put32(cmd->frame_dma_obj.acc_handle,
3612 3524 &abort_fr->abort_mfi_phys_addr_hi, 0);
3613 3525
3614 3526 cmd->frame_count = 1;
3615 3527
3616 3528 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3617 3529
3618 3530 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3619 3531 con_log(CL_ANN1, (CE_WARN,
3620 3532 "abort_ldsync_cmd: issue_cmd_in_poll_mode failed"));
3621 3533 ret = -1;
3622 3534 } else {
3623 3535 ret = 0;
3624 3536 }
|
↓ open down ↓ |
1640 lines elided |
↑ open up ↑ |
3625 3537
3626 3538 return_raid_msg_mfi_pkt(instance, cmd);
3627 3539
3628 3540 atomic_add_16(&instance->fw_outstanding, (-1));
3629 3541
3630 3542 return (ret);
3631 3543 }
3632 3544
3633 3545
3634 3546 #ifdef PDSUPPORT
3547 +/*
3548 + * Even though these functions were originally intended for 2208 only, it
3549 + * turns out they're useful for "Skinny" support as well. In a perfect world,
3550 + * these two functions would be either in mr_sas.c, or in their own new source
3551 + * file. Since this driver needs some cleanup anyway, keep this portion in
3552 + * mind as well.
3553 + */
3554 +
3635 3555 int
3636 3556 mrsas_tbolt_config_pd(struct mrsas_instance *instance, uint16_t tgt,
3637 3557 uint8_t lun, dev_info_t **ldip)
3638 3558 {
3639 3559 struct scsi_device *sd;
3640 3560 dev_info_t *child;
3641 3561 int rval, dtype;
3642 3562 struct mrsas_tbolt_pd_info *pds = NULL;
3643 3563
3644 3564 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_config_pd: t = %d l = %d",
3645 3565 tgt, lun));
3646 3566
3647 3567 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
3648 3568 if (ldip) {
3649 3569 *ldip = child;
3650 3570 }
3651 3571 if (instance->mr_tbolt_pd_list[tgt].flag != MRDRV_TGT_VALID) {
3652 3572 rval = mrsas_service_evt(instance, tgt, 1,
3653 3573 MRSAS_EVT_UNCONFIG_TGT, NULL);
3654 3574 con_log(CL_ANN1, (CE_WARN,
3655 3575 "mr_sas:DELETING STALE ENTRY rval = %d "
3656 3576 "tgt id = %d", rval, tgt));
3657 3577 return (NDI_FAILURE);
3658 3578 }
3659 3579 return (NDI_SUCCESS);
3660 3580 }
3661 3581
3662 3582 pds = (struct mrsas_tbolt_pd_info *)
3663 3583 kmem_zalloc(sizeof (struct mrsas_tbolt_pd_info), KM_SLEEP);
3664 3584 mrsas_tbolt_get_pd_info(instance, pds, tgt);
3665 3585 dtype = pds->scsiDevType;
3666 3586
3667 3587 /* Check for Disk */
3668 3588 if ((dtype == DTYPE_DIRECT)) {
3669 3589 if ((dtype == DTYPE_DIRECT) &&
3670 3590 (LE_16(pds->fwState) != PD_SYSTEM)) {
3671 3591 kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3672 3592 return (NDI_FAILURE);
3673 3593 }
3674 3594 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
3675 3595 sd->sd_address.a_hba_tran = instance->tran;
3676 3596 sd->sd_address.a_target = (uint16_t)tgt;
3677 3597 sd->sd_address.a_lun = (uint8_t)lun;
3678 3598
3679 3599 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS) {
3680 3600 rval = mrsas_config_scsi_device(instance, sd, ldip);
3681 3601 con_log(CL_DLEVEL1, (CE_NOTE,
3682 3602 "Phys. device found: tgt %d dtype %d: %s",
3683 3603 tgt, dtype, sd->sd_inq->inq_vid));
3684 3604 } else {
3685 3605 rval = NDI_FAILURE;
3686 3606 con_log(CL_DLEVEL1, (CE_NOTE, "Phys. device Not found "
|
↓ open down ↓ |
42 lines elided |
↑ open up ↑ |
3687 3607 "scsi_hba_probe Failed: tgt %d dtype %d: %s",
3688 3608 tgt, dtype, sd->sd_inq->inq_vid));
3689 3609 }
3690 3610
3691 3611 /* sd_unprobe is blank now. Free buffer manually */
3692 3612 if (sd->sd_inq) {
3693 3613 kmem_free(sd->sd_inq, SUN_INQSIZE);
3694 3614 sd->sd_inq = (struct scsi_inquiry *)NULL;
3695 3615 }
3696 3616 kmem_free(sd, sizeof (struct scsi_device));
3697 - rval = NDI_SUCCESS;
3698 3617 } else {
3699 3618 con_log(CL_ANN1, (CE_NOTE,
3700 3619 "Device not supported: tgt %d lun %d dtype %d",
3701 3620 tgt, lun, dtype));
3702 3621 rval = NDI_FAILURE;
3703 3622 }
3704 3623
3705 3624 kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3706 3625 con_log(CL_ANN1, (CE_NOTE, "mrsas_config_pd: return rval = %d",
3707 3626 rval));
3708 3627 return (rval);
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
3709 3628 }
3710 3629
3711 3630 static void
3712 3631 mrsas_tbolt_get_pd_info(struct mrsas_instance *instance,
3713 3632 struct mrsas_tbolt_pd_info *pds, int tgt)
3714 3633 {
3715 3634 struct mrsas_cmd *cmd;
3716 3635 struct mrsas_dcmd_frame *dcmd;
3717 3636 dma_obj_t dcmd_dma_obj;
3718 3637
3719 - cmd = get_raid_msg_pkt(instance);
3638 + ASSERT(instance->tbolt || instance->skinny);
3720 3639
3640 + if (instance->tbolt)
3641 + cmd = get_raid_msg_pkt(instance);
3642 + else
3643 + cmd = mrsas_get_mfi_pkt(instance);
3644 +
3721 3645 if (!cmd) {
3722 3646 con_log(CL_ANN1,
3723 3647 (CE_WARN, "Failed to get a cmd for get pd info"));
3724 3648 return;
3725 3649 }
3726 3650
3727 3651 /* Clear the frame buffer and assign back the context id */
3728 3652 bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3729 3653 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3730 3654 cmd->index);
3731 3655
3732 3656
3733 3657 dcmd = &cmd->frame->dcmd;
3734 3658 dcmd_dma_obj.size = sizeof (struct mrsas_tbolt_pd_info);
3735 3659 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
3736 3660 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff;
3737 3661 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff;
3738 3662 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
3739 3663 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
3740 3664
3741 3665 (void) mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
3742 3666 DDI_STRUCTURE_LE_ACC);
3743 3667 bzero(dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info));
3744 3668 bzero(dcmd->mbox.b, 12);
3745 3669 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3746 3670 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
3747 3671 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3748 3672 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3749 3673 MFI_FRAME_DIR_READ);
3750 3674 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3751 3675 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3752 3676 sizeof (struct mrsas_tbolt_pd_info));
3753 3677 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
|
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
3754 3678 MR_DCMD_PD_GET_INFO);
3755 3679 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], tgt);
3756 3680 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3757 3681 sizeof (struct mrsas_tbolt_pd_info));
3758 3682 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3759 3683 dcmd_dma_obj.dma_cookie[0].dmac_address);
3760 3684
3761 3685 cmd->sync_cmd = MRSAS_TRUE;
3762 3686 cmd->frame_count = 1;
3763 3687
3764 - if (instance->tbolt) {
3688 + if (instance->tbolt)
3765 3689 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3766 - }
3767 3690
3768 3691 instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd);
3769 3692
3770 3693 ddi_rep_get8(cmd->frame_dma_obj.acc_handle, (uint8_t *)pds,
3771 3694 (uint8_t *)dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info),
3772 3695 DDI_DEV_AUTOINCR);
3773 3696 (void) mrsas_free_dma_obj(instance, dcmd_dma_obj);
3774 - return_raid_msg_pkt(instance, cmd);
3697 +
3698 + if (instance->tbolt)
3699 + return_raid_msg_pkt(instance, cmd);
3700 + else
3701 + mrsas_return_mfi_pkt(instance, cmd);
3775 3702 }
3776 3703 #endif
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX