1 /*
2 * mr_sas_tbolt.c: source for mr_sas driver for New Generation.
3 * i.e. Thunderbolt and Invader
4 *
5 * Solaris MegaRAID device driver for SAS2.0 controllers
6 * Copyright (c) 2008-2012, LSI Logic Corporation.
7 * All rights reserved.
8 *
9 * Version:
10 * Author:
11 * Swaminathan K S
12 * Arun Chandrashekhar
13 * Manju R
14 * Rasheed
15 * Shakeel Bukhari
16 */
17
18
19 #include <sys/types.h>
20 #include <sys/file.h>
21 #include <sys/atomic.h>
22 #include <sys/scsi/scsi.h>
23 #include <sys/byteorder.h>
24 #include "ld_pd_map.h"
25 #include "mr_sas.h"
26 #include "fusion.h"
27
28
29 // Pre-TB command size and TB command size.
30 #define MR_COMMAND_SIZE (64*20) // 1280 bytes
31 MR_LD_RAID *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map);
32 U16 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map);
33 U16 MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map);
34 U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *in_info);
35 extern ddi_dma_attr_t mrsas_generic_dma_attr;
36 extern uint32_t mrsas_tbolt_max_cap_maxxfer;
37 extern struct ddi_device_acc_attr endian_attr;
38 extern int debug_level_g;
39 extern unsigned int enable_fp;
40 volatile int dump_io_wait_time = 90;
41 extern void
42 io_timeout_checker(void *arg);
43 extern int
44 mfi_state_transition_to_ready(struct mrsas_instance *instance);
45 extern volatile int debug_timeout_g;
46 extern int mrsas_issue_pending_cmds(struct mrsas_instance *);
47 extern int mrsas_complete_pending_cmds(struct mrsas_instance *instance);
48 extern void push_pending_mfi_pkt(struct mrsas_instance *,
49 struct mrsas_cmd *);
50 extern U8 MR_BuildRaidContext(struct mrsas_instance *, struct IO_REQUEST_INFO *,
51 MPI2_SCSI_IO_VENDOR_UNIQUE *, MR_FW_RAID_MAP_ALL *);
52
53 static volatile int debug_tbolt_fw_faults_after_ocr_g = 0;
54
55 /*
56 * destroy_mfi_mpi_frame_pool
57 */
58 void
59 destroy_mfi_mpi_frame_pool(struct mrsas_instance *instance)
60 {
61 int i;
62
63 struct mrsas_cmd *cmd;
64
65 /* return all mfi frames to pool */
66 for (i = 0; i < MRSAS_APP_RESERVED_CMDS; i++) {
67 cmd = instance->cmd_list[i];
68 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED)
69 (void) mrsas_free_dma_obj(instance,
70 cmd->frame_dma_obj);
71 cmd->frame_dma_obj_status = DMA_OBJ_FREED;
72 }
73 }
74
75 /*
76 * destroy_mpi2_frame_pool
77 */
78 void
79 destroy_mpi2_frame_pool(struct mrsas_instance *instance)
80 {
81
82 if (instance->mpi2_frame_pool_dma_obj.status == DMA_OBJ_ALLOCATED) {
83 (void) mrsas_free_dma_obj(instance,
84 instance->mpi2_frame_pool_dma_obj);
85 instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_FREED;
86 }
87 }
88
89
90 /*
91 * mrsas_tbolt_free_additional_dma_buffer
92 */
93 void
94 mrsas_tbolt_free_additional_dma_buffer(struct mrsas_instance *instance)
95 {
96 int i;
97 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
98 (void) mrsas_free_dma_obj(instance,
99 instance->mfi_internal_dma_obj);
100 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
101 }
102 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
103 (void) mrsas_free_dma_obj(instance,
104 instance->mfi_evt_detail_obj);
105 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
106 }
107
108 for (i = 0; i < 2; i++) {
109 if (instance->ld_map_obj[i].status == DMA_OBJ_ALLOCATED) {
110 (void) mrsas_free_dma_obj(instance,
111 instance->ld_map_obj[i]);
112 instance->ld_map_obj[i].status = DMA_OBJ_FREED;
113 }
114 }
115 }
116
117
118 /*
119 * free_req_desc_pool
120 */
121 void
122 free_req_rep_desc_pool(struct mrsas_instance *instance)
123 {
124 if (instance->request_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
125 (void) mrsas_free_dma_obj(instance,
126 instance->request_desc_dma_obj);
127 instance->request_desc_dma_obj.status = DMA_OBJ_FREED;
128 }
129
130 if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
131 (void) mrsas_free_dma_obj(instance,
132 instance->reply_desc_dma_obj);
133 instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
134 }
135
136
137 }
138
139
140 /*
141 * ThunderBolt(TB) Request Message Frame Pool
142 */
143 int
144 create_mpi2_frame_pool(struct mrsas_instance *instance)
145 {
146 int i = 0;
147 int cookie_cnt;
148 uint16_t max_cmd;
149 uint32_t sgl_sz;
150 uint32_t raid_msg_size;
151 uint32_t total_size;
152 uint32_t offset;
153 uint32_t io_req_base_phys;
154 uint8_t *io_req_base;
155 struct mrsas_cmd *cmd;
156
157 max_cmd = instance->max_fw_cmds;
158
159 sgl_sz = 1024;
160 raid_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
161
162 // Allocating additional 256 bytes to accomodate SMID 0.
163 total_size = MRSAS_THUNDERBOLT_MSG_SIZE + (max_cmd * raid_msg_size) +
164 (max_cmd * sgl_sz) + (max_cmd * SENSE_LENGTH);
165
166 con_log(CL_ANN1, (CE_NOTE, "create_mpi2_frame_pool: "
167 "max_cmd %x ", max_cmd));
168
169 con_log(CL_DLEVEL3, (CE_NOTE, "create_mpi2_frame_pool: "
170 "request message frame pool size %x", total_size));
171
172 /*
173 * ThunderBolt(TB) We need to create a single chunk of DMA'ble memory
174 * and then split the memory to 1024 commands. Each command should be
175 * able to contain a RAID MESSAGE FRAME which will embed a MFI_FRAME
176 * within it. Further refer the "alloc_req_rep_desc" function where
177 * we allocate request/reply descriptors queues for a clue.
178 */
179
180 instance->mpi2_frame_pool_dma_obj.size = total_size;
181 instance->mpi2_frame_pool_dma_obj.dma_attr = mrsas_generic_dma_attr;
182 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_addr_hi =
183 0xFFFFFFFFU;
184 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_count_max =
185 0xFFFFFFFFU;
186 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_sgllen = 1;
187 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_align = 256;
188
189 if (mrsas_alloc_dma_obj(instance, &instance->mpi2_frame_pool_dma_obj,
190 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
191 cmn_err(CE_WARN,
192 "mr_sas: could not alloc mpi2 frame pool");
193 return (DDI_FAILURE);
194 }
195
196 bzero(instance->mpi2_frame_pool_dma_obj.buffer, total_size);
197 instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_ALLOCATED;
198
199 instance->io_request_frames =
200 (uint8_t *)instance->mpi2_frame_pool_dma_obj.buffer;
201 instance->io_request_frames_phy =
202 (uint32_t)
203 instance->mpi2_frame_pool_dma_obj.dma_cookie[0].dmac_address;
204
205 con_log(CL_DLEVEL3, (CE_NOTE,
206 "io_request_frames 0x%p",
207 instance->io_request_frames));
208
209 con_log(CL_DLEVEL3, (CE_NOTE,
210 "io_request_frames_phy 0x%x",
211 instance->io_request_frames_phy));
212
213 io_req_base = (uint8_t *)instance->io_request_frames +
214 MRSAS_THUNDERBOLT_MSG_SIZE;
215 io_req_base_phys = instance->io_request_frames_phy +
216 MRSAS_THUNDERBOLT_MSG_SIZE;
217
218 con_log(CL_DLEVEL3, (CE_NOTE,
219 "io req_base_phys 0x%x", io_req_base_phys));
220
221 for (i = 0; i < max_cmd; i++) {
222 cmd = instance->cmd_list[i];
223
224 offset = i * MRSAS_THUNDERBOLT_MSG_SIZE;
225
226 cmd->scsi_io_request = (Mpi2RaidSCSIIORequest_t *)
227 ((uint8_t *)io_req_base + offset);
228 cmd->scsi_io_request_phys_addr = io_req_base_phys + offset;
229
230 cmd->sgl = (Mpi2SGEIOUnion_t *)
231 ((uint8_t *)io_req_base +
232 (max_cmd * raid_msg_size) + i * sgl_sz);
233
234 cmd->sgl_phys_addr =
235 (io_req_base_phys +
236 (max_cmd * raid_msg_size) + i * sgl_sz);
237
238 cmd->sense1 = (uint8_t *)
239 ((uint8_t *)io_req_base +
240 (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
241 (i * SENSE_LENGTH));
242
243 cmd->sense_phys_addr1 =
244 (io_req_base_phys +
245 (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
246 (i * SENSE_LENGTH));
247
248
249 cmd->SMID = i+1;
250
251 con_log(CL_DLEVEL3, (CE_NOTE,
252 "Frame Pool Addr [%x]0x%p",
253 cmd->index, cmd->scsi_io_request));
254
255 con_log(CL_DLEVEL3, (CE_NOTE,
256 "Frame Pool Phys Addr [%x]0x%x",
257 cmd->index, cmd->scsi_io_request_phys_addr));
258
259 con_log(CL_DLEVEL3, (CE_NOTE,
260 "Sense Addr [%x]0x%p",
261 cmd->index, cmd->sense1));
262
263 con_log(CL_DLEVEL3, (CE_NOTE,
264 "Sense Addr Phys [%x]0x%x",
265 cmd->index, cmd->sense_phys_addr1));
266
267
268 con_log(CL_DLEVEL3, (CE_NOTE,
269 "Sgl bufffers [%x]0x%p",
270 cmd->index, cmd->sgl));
271
272 con_log(CL_DLEVEL3, (CE_NOTE,
273 "Sgl bufffers phys [%x]0x%x",
274 cmd->index, cmd->sgl_phys_addr));
275 }
276
277 return (DDI_SUCCESS);
278
279 }
280
281
282 /*
283 * alloc_additional_dma_buffer for AEN
284 */
285 int
286 mrsas_tbolt_alloc_additional_dma_buffer(struct mrsas_instance *instance)
287 {
288 uint32_t internal_buf_size = PAGESIZE*2;
289 int i;
290
291 /* Initialize buffer status as free */
292 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
293 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
294 instance->ld_map_obj[0].status = DMA_OBJ_FREED;
295 instance->ld_map_obj[1].status = DMA_OBJ_FREED;
296
297
298 instance->mfi_internal_dma_obj.size = internal_buf_size;
299 instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
300 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
301 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
302 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
303
304 if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
305 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
306 cmn_err(CE_WARN,
307 "mr_sas: could not alloc reply queue");
308 return (DDI_FAILURE);
309 }
310
311 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
312
313 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
314 instance->internal_buf = (caddr_t)(((unsigned long)
315 instance->mfi_internal_dma_obj.buffer));
316 instance->internal_buf_dmac_add =
317 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address;
318 instance->internal_buf_size = internal_buf_size;
319
320 /* allocate evt_detail */
321 instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
322 instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
323 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
324 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
325 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
326 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 8;
327
328 if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
329 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
330 cmn_err(CE_WARN,
331 "mrsas_tbolt_alloc_additional_dma_buffer: "
332 "could not allocate data transfer buffer.");
333 goto fail_tbolt_additional_buff;
334 }
335
336 bzero(instance->mfi_evt_detail_obj.buffer,
337 sizeof (struct mrsas_evt_detail));
338
339 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
340
341 instance->size_map_info = sizeof (MR_FW_RAID_MAP) +
342 (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
343
344 for (i = 0; i < 2; i++) {
345 /* allocate the data transfer buffer */
346 instance->ld_map_obj[i].size = instance->size_map_info;
347 instance->ld_map_obj[i].dma_attr = mrsas_generic_dma_attr;
348 instance->ld_map_obj[i].dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
349 instance->ld_map_obj[i].dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
350 instance->ld_map_obj[i].dma_attr.dma_attr_sgllen = 1;
351 instance->ld_map_obj[i].dma_attr.dma_attr_align = 1;
352
353 if (mrsas_alloc_dma_obj(instance, &instance->ld_map_obj[i],
354 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
355 cmn_err(CE_WARN,
356 "could not allocate data transfer buffer.");
357 goto fail_tbolt_additional_buff;
358 }
359
360 instance->ld_map_obj[i].status |= DMA_OBJ_ALLOCATED;
361
362 (void) memset(instance->ld_map_obj[i].buffer, 0,
363 instance->size_map_info);
364
365 instance->ld_map[i] = (MR_FW_RAID_MAP_ALL *)instance->ld_map_obj[i].buffer;
366 instance->ld_map_phy[i] =
367 (uint32_t)instance->ld_map_obj[i].dma_cookie[0].dmac_address;
368
369 con_log(CL_DLEVEL3, (CE_NOTE,
370 "ld_map Addr Phys 0x%x", instance->ld_map_phy[i]));
371
372 con_log(CL_DLEVEL3, (CE_NOTE,
373 "size_map_info 0x%x", instance->size_map_info));
374
375 }
376
377 return (DDI_SUCCESS);
378
379 fail_tbolt_additional_buff:
380 mrsas_tbolt_free_additional_dma_buffer(instance);
381
382 return (DDI_FAILURE);
383 }
384
385 MRSAS_REQUEST_DESCRIPTOR_UNION *
386 mr_sas_get_request_descriptor(struct mrsas_instance *instance,
387 uint16_t index, struct mrsas_cmd *cmd)
388 {
389 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
390
391 if (index > instance->max_fw_cmds) {
392 con_log(CL_ANN1, (CE_NOTE,
393 "Invalid SMID 0x%x request for descriptor", index));
394 con_log(CL_ANN1, (CE_NOTE,
395 "max_fw_cmds : 0x%x\n", instance->max_fw_cmds));
396 return (NULL);
397 }
398
399 req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
400 ((char *)instance->request_message_pool +
401 (sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION) * index));
402
403 con_log(CL_ANN1, (CE_NOTE,
404 "request descriptor : 0x%08lx\n", (unsigned long)req_desc));
405
406 con_log(CL_ANN1, (CE_NOTE,
407 "request descriptor base phy : 0x%08lx\n",
408 (unsigned long)instance->request_message_pool_phy));
409
410 return ((MRSAS_REQUEST_DESCRIPTOR_UNION *)req_desc);
411 }
412
413
414 /*
415 * Allocate Request and Reply Queue Descriptors.
416 */
417 int
418 alloc_req_rep_desc(struct mrsas_instance *instance)
419 {
420 uint32_t request_q_sz, reply_q_sz;
421 int i, max_request_q_sz, max_reply_q_sz;
422 uint64_t request_desc;
423 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
424 uint64_t *reply_ptr;
425
426 /*
427 * ThunderBolt(TB) There's no longer producer consumer mechanism.
428 * Once we have an interrupt we are supposed to scan through the list of
429 * reply descriptors and process them accordingly. We would be needing
430 * to allocate memory for 1024 reply descriptors
431 */
432
433 /* Allocate Reply Descriptors */
434 con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x\n",
435 (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
436
437 // reply queue size should be multiple of 16
438 max_reply_q_sz = ((instance->max_fw_cmds + 1 + 15)/16)*16;
439
440 reply_q_sz = 8 * max_reply_q_sz;
441
442
443 con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x\n",
444 (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
445
446 instance->reply_desc_dma_obj.size = reply_q_sz;
447 instance->reply_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
448 instance->reply_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
449 instance->reply_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
450 instance->reply_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
451 instance->reply_desc_dma_obj.dma_attr.dma_attr_align = 16;
452
453 if (mrsas_alloc_dma_obj(instance, &instance->reply_desc_dma_obj,
454 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
455 cmn_err(CE_WARN,
456 "mr_sas: could not alloc reply queue");
457 return (DDI_FAILURE);
458 }
459
460 bzero(instance->reply_desc_dma_obj.buffer, reply_q_sz);
461 instance->reply_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
462
463 // virtual address of reply queue
464 instance->reply_frame_pool = (MPI2_REPLY_DESCRIPTORS_UNION *)(
465 instance->reply_desc_dma_obj.buffer);
466
467 instance->reply_q_depth = max_reply_q_sz;
468
469 con_log(CL_ANN1, (CE_NOTE, "[reply queue depth]0x%x",
470 instance->reply_q_depth));
471
472 con_log(CL_ANN1, (CE_NOTE, "[reply queue virt addr]0x%p",
473 instance->reply_frame_pool));
474
475 /* initializing reply address to 0xFFFFFFFF */
476 reply_desc = instance->reply_frame_pool;
477
478 for (i = 0; i < instance->reply_q_depth; i++) {
479 reply_desc->Words = (uint64_t)~0;
480 reply_desc++;
481 }
482
483
484 instance->reply_frame_pool_phy =
485 (uint32_t)instance->reply_desc_dma_obj.dma_cookie[0].dmac_address;
486
487 con_log(CL_ANN1, (CE_NOTE,
488 "[reply queue phys addr]0x%x", instance->reply_frame_pool_phy));
489
490
491 instance->reply_pool_limit_phy = (instance->reply_frame_pool_phy +
492 reply_q_sz);
493
494 con_log(CL_ANN1, (CE_NOTE, "[reply pool limit phys addr]0x%x",
495 instance->reply_pool_limit_phy));
496
497
498 con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x\n",
499 (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
500
501 /* Allocate Request Descriptors */
502 con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x\n",
503 (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
504
505 request_q_sz = 8 *
506 (instance->max_fw_cmds);
507
508 instance->request_desc_dma_obj.size = request_q_sz;
509 instance->request_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
510 instance->request_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
511 instance->request_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
512 instance->request_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
513 instance->request_desc_dma_obj.dma_attr.dma_attr_align = 16;
514
515 if (mrsas_alloc_dma_obj(instance, &instance->request_desc_dma_obj,
516 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
517 cmn_err(CE_WARN,
518 "mr_sas: could not alloc request queue desc");
519 goto fail_undo_reply_queue;
520 }
521
522 bzero(instance->request_desc_dma_obj.buffer, request_q_sz);
523 instance->request_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
524
525 /* virtual address of request queue desc */
526 instance->request_message_pool = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
527 (instance->request_desc_dma_obj.buffer);
528
529 instance->request_message_pool_phy =
530 (uint32_t)instance->request_desc_dma_obj.dma_cookie[0].dmac_address;
531
532 max_request_q_sz = instance->max_fw_cmds;
533
534 return (DDI_SUCCESS);
535
536 fail_undo_reply_queue:
537 if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
538 (void) mrsas_free_dma_obj(instance,
539 instance->reply_desc_dma_obj);
540 instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
541 }
542
543 return (DDI_FAILURE);
544 }
545
546 /*
547 * mrsas_alloc_cmd_pool_tbolt
548 * TODO: merge tbolt-specific codee into mrsas_alloc_cmd_pool() to have single routine
549 */
550 int
551 mrsas_alloc_cmd_pool_tbolt(struct mrsas_instance *instance)
552 {
553 int i;
554 int count;
555 uint32_t max_cmd;
556 uint32_t reserve_cmd;
557 size_t sz;
558
559 struct mrsas_cmd *cmd;
560
561 max_cmd = instance->max_fw_cmds;
562 con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
563 "max_cmd %x", max_cmd));
564
565
566 sz = sizeof (struct mrsas_cmd *) * max_cmd;
567
568 /*
569 * instance->cmd_list is an array of struct mrsas_cmd pointers.
570 * Allocate the dynamic array first and then allocate individual
571 * commands.
572 */
573 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
574 if (instance->cmd_list == NULL) {
575 con_log(CL_NONE, (CE_WARN,
576 "Failed to allocate memory for cmd_list"));
577 return (DDI_FAILURE);
578 }
579
580 /* create a frame pool and assign one frame to each cmd */
581 for (count = 0; count < max_cmd; count++) {
582 instance->cmd_list[count] = kmem_zalloc(sizeof (struct mrsas_cmd),
583 KM_SLEEP);
584 if (instance->cmd_list[count] == NULL) {
585 con_log(CL_NONE, (CE_WARN,
586 "Failed to allocate memory for mrsas_cmd"));
587 goto mrsas_undo_cmds;
588 }
589 }
590
591 /* add all the commands to command pool */
592
593 INIT_LIST_HEAD(&instance->cmd_pool_list);
594 INIT_LIST_HEAD(&instance->cmd_pend_list);
595 INIT_LIST_HEAD(&instance->cmd_app_pool_list);
596
597 reserve_cmd = MRSAS_APP_RESERVED_CMDS;
598
599 for (i = 1; i < reserve_cmd; i++) { //cmd index 0 reservered for IOC INIT
600 cmd = instance->cmd_list[i];
601 cmd->index = i;
602 mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
603 }
604
605
606 for (i = reserve_cmd; i < max_cmd; i++) {
607 cmd = instance->cmd_list[i];
608 cmd->index = i;
609 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
610 }
611
612 return (DDI_SUCCESS);
613
614 mrsas_undo_cmds:
615 if (count > 0) {
616 /* free each cmd */
617 for (i = 0; i < count; i++) {
618 if (instance->cmd_list[i] != NULL)
619 kmem_free(instance->cmd_list[i],sizeof (struct mrsas_cmd));
620 instance->cmd_list[i] = NULL;
621 }
622 }
623
624 mrsas_undo_cmd_list:
625 if (instance->cmd_list != NULL)
626 kmem_free(instance->cmd_list,sz);
627 instance->cmd_list = NULL;
628
629 return (DDI_FAILURE);
630 }
631
632
633 /*
634 * free_space_for_mpi2
635 */
636 void
637 free_space_for_mpi2(struct mrsas_instance *instance)
638 {
639 /* already freed */
640 if (instance->cmd_list == NULL) {
641 return;
642 }
643
644 /* First free the additional DMA buffer */
645 mrsas_tbolt_free_additional_dma_buffer(instance);
646
647 /* Free the request/reply descriptor pool */
648 free_req_rep_desc_pool(instance);
649
650 /* Free the MPI message pool */
651 destroy_mpi2_frame_pool(instance);
652
653 /* Free the MFI frame pool */
654 destroy_mfi_frame_pool(instance);
655
656 /* Free all the commands in the cmd_list */
657 /* Free the cmd_list buffer itself */
658 mrsas_free_cmd_pool(instance);
659 }
660
661
662 /*
663 * ThunderBolt(TB) memory allocations for commands/messages/frames.
664 */
665 int
666 alloc_space_for_mpi2(struct mrsas_instance *instance)
667 {
668 /* Allocate command pool ( memory for cmd_list & individual commands )*/
669 if (mrsas_alloc_cmd_pool_tbolt(instance)) {
670 cmn_err(CE_WARN, "Error creating cmd pool");
671 return (DDI_FAILURE);
672 }
673
674 /* Initialize single reply size and Message size */
675 instance->reply_size = MRSAS_THUNDERBOLT_REPLY_SIZE;
676 instance->raid_io_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
677
678 instance->max_sge_in_main_msg = (MRSAS_THUNDERBOLT_MSG_SIZE -
679 (sizeof (MPI2_RAID_SCSI_IO_REQUEST) -
680 sizeof (MPI2_SGE_IO_UNION)))/ sizeof (MPI2_SGE_IO_UNION);
681 instance->max_sge_in_chain = (MR_COMMAND_SIZE -
682 MRSAS_THUNDERBOLT_MSG_SIZE) / sizeof (MPI2_SGE_IO_UNION);
683
684 /* Reduce SG count by 1 to take care of group cmds feature in FW */
685 instance->max_num_sge = (instance->max_sge_in_main_msg +
686 instance->max_sge_in_chain - 2);
687 instance->chain_offset_mpt_msg =
688 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16;
689 instance->chain_offset_io_req = (MRSAS_THUNDERBOLT_MSG_SIZE -
690 sizeof (MPI2_SGE_IO_UNION)) / 16;
691 instance->reply_read_index = 0;
692
693
694 /* Allocate Request and Reply descriptors Array */
695 /* Make sure the buffer is aligned to 8 for req/rep descriptor Pool */
696 if (alloc_req_rep_desc(instance)) {
697 cmn_err(CE_WARN,
698 "Error, allocating memory for descripter-pool");
699 goto mpi2_undo_cmd_pool;
700 }
701 con_log(CL_ANN1, (CE_NOTE, "[request message pool phys addr]0x%x",
702 instance->request_message_pool_phy));
703
704
705 /* Allocate MFI Frame pool - for MPI-MFI passthru commands */
706 if (create_mfi_frame_pool(instance)) {
707 cmn_err(CE_WARN,
708 "Error, allocating memory for MFI frame-pool");
709 goto mpi2_undo_descripter_pool;
710 }
711
712
713 /* Allocate MPI2 Message pool */
714 /*
715 * Make sure the buffer is alligned to 256 for raid message packet
716 * create a io request pool and assign one frame to each cmd
717 */
718
719 if (create_mpi2_frame_pool(instance)) {
720 cmn_err(CE_WARN,
721 "Error, allocating memory for MPI2 Message-pool");
722 goto mpi2_undo_mfi_frame_pool;
723 }
724
725 #ifdef DEBUG
726 con_log(CL_ANN1, (CE_CONT, "[max_sge_in_main_msg]0x%x",
727 instance->max_sge_in_main_msg));
728 con_log(CL_ANN1, (CE_CONT, "[max_sge_in_chain]0x%x",
729 instance->max_sge_in_chain));
730 con_log(CL_ANN1, (CE_CONT,
731 "[max_sge]0x%x", instance->max_num_sge));
732 con_log(CL_ANN1, (CE_CONT, "[chain_offset_mpt_msg]0x%x",
733 instance->chain_offset_mpt_msg));
734 con_log(CL_ANN1, (CE_CONT, "[chain_offset_io_req]0x%x",
735 instance->chain_offset_io_req));
736 #endif
737
738
739 /* Allocate additional dma buffer */
740 if (mrsas_tbolt_alloc_additional_dma_buffer(instance)) {
741 cmn_err(CE_WARN,
742 "Error, allocating tbolt additional DMA buffer");
743 goto mpi2_undo_message_pool;
744 }
745
746 return (DDI_SUCCESS);
747
748 mpi2_undo_message_pool:
749 destroy_mpi2_frame_pool(instance);
750
751 mpi2_undo_mfi_frame_pool:
752 destroy_mfi_frame_pool(instance);
753
754 mpi2_undo_descripter_pool:
755 free_req_rep_desc_pool(instance);
756
757 mpi2_undo_cmd_pool:
758 mrsas_free_cmd_pool(instance);
759
760 return (DDI_FAILURE);
761 }
762
763
764 /*
765 * mrsas_init_adapter_tbolt - Initialize fusion interface adapter.
766 */
767 int
768 mrsas_init_adapter_tbolt (struct mrsas_instance *instance)
769 {
770
771 /*
772 * Reduce the max supported cmds by 1. This is to ensure that the
773 * reply_q_sz (1 more than the max cmd that driver may send)
774 * does not exceed max cmds that the FW can support
775 */
776
777 if (instance->max_fw_cmds > 1008) {
778 instance->max_fw_cmds = 1008;
779 instance->max_fw_cmds = instance->max_fw_cmds-1;
780 }
781
782 con_log(CL_ANN, (CE_NOTE, "mrsas_init_adapter_tbolt: "
783 " instance->max_fw_cmds 0x%X.", instance->max_fw_cmds));
784
785
786 /* create a pool of commands */
787 if ( alloc_space_for_mpi2(instance) != DDI_SUCCESS) {
788 cmn_err(CE_WARN,
789 " alloc_space_for_mpi2() failed.");
790
791 return (DDI_FAILURE);
792 }
793
794 /* Send ioc init message */
795 if ( mrsas_issue_init_mpi2(instance) != DDI_SUCCESS) {
796 cmn_err(CE_WARN,
797 " mrsas_issue_init_mpi2() failed.");
798
799 goto fail_init_fusion;
800 }
801
802 instance->unroll.alloc_space_mpi2 = 1;
803
804 con_log(CL_ANN, (CE_NOTE,
805 "mrsas_init_adapter_tbolt: SUCCESSFULL\n"));
806
807 return (DDI_SUCCESS);
808
809 fail_init_fusion:
810
811 fail_undo_alloc_mpi2:
812 free_space_for_mpi2(instance);
813
814 return (DDI_FAILURE);
815 }
816
817
818
819 /*
820 * init_mpi2
821 */
822 int
823 mrsas_issue_init_mpi2(struct mrsas_instance *instance)
824 {
825 dma_obj_t init2_dma_obj;
826 int ret_val = DDI_SUCCESS;
827
828 /* allocate DMA buffer for IOC INIT message */
829 init2_dma_obj.size = sizeof (Mpi2IOCInitRequest_t);
830 init2_dma_obj.dma_attr = mrsas_generic_dma_attr;
831 init2_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
832 init2_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
833 init2_dma_obj.dma_attr.dma_attr_sgllen = 1;
834 init2_dma_obj.dma_attr.dma_attr_align = 256;
835
836 if (mrsas_alloc_dma_obj(instance, &init2_dma_obj,
837 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
838 cmn_err(CE_WARN, "mr_sas_issue_init_mpi2 "
839 "could not allocate data transfer buffer.");
840 return (DDI_FAILURE);
841 }
842 (void) memset(init2_dma_obj.buffer, 2,
843 sizeof (Mpi2IOCInitRequest_t));
844
845 con_log(CL_ANN1, (CE_NOTE,
846 "mrsas_issue_init_mpi2 _phys adr: %x \n",
847 init2_dma_obj.dma_cookie[0].dmac_address));
848
849
850 /* Initialize and send ioc init message */
851 ret_val = mrsas_tbolt_ioc_init(instance, &init2_dma_obj,
852 init2_dma_obj.acc_handle);
853 if (ret_val == DDI_FAILURE) {
854 con_log(CL_ANN1, (CE_WARN,
855 "mrsas_issue_init_mpi2: Failed\n"));
856 goto fail_init_mpi2;
857 }
858
859 /* free IOC init DMA buffer */
860 if (mrsas_free_dma_obj(instance, init2_dma_obj)
861 != DDI_SUCCESS) {
862 con_log(CL_ANN1, (CE_WARN,
863 "mrsas_issue_init_mpi2: Free Failed\n"));
864 return (DDI_FAILURE);
865 }
866
867
868 /* Get/Check and sync ld_map info */
869 instance->map_id = 0;
870 if( mrsas_tbolt_check_map_info(instance) == DDI_SUCCESS )
871 mrsas_tbolt_sync_map_info(instance);
872
873 con_log(CL_ANN, (CE_NOTE,
874 "mrsas_issue_init_mpi2: SUCCESSFULL\n"));
875
876 return (DDI_SUCCESS);
877
878 fail_init_mpi2:
879 mrsas_free_dma_obj(instance, init2_dma_obj);
880
881 return (DDI_FAILURE);
882 }
883
884 int
885 mrsas_tbolt_ioc_init(struct mrsas_instance *instance, dma_obj_t *mpi2_dma_obj,
886 ddi_acc_handle_t accessp)
887 {
888 int numbytes, i;
889 int ret = DDI_SUCCESS;
890 uint16_t flags;
891 int status;
892 timespec_t time;
893 uint64_t mSec;
894 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
895 struct mrsas_init_frame2 *mfiFrameInit2;
896 struct mrsas_header *frame_hdr;
897 Mpi2IOCInitRequest_t *init;
898 struct mrsas_cmd *cmd = NULL;
899 struct mrsas_drv_ver drv_ver_info;
900 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
901
902
903 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
904
905
906 #ifdef DEBUG
907 con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
908 (int)sizeof (*mfiFrameInit2)));
909 con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n", (int)sizeof (*init)));
910 con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
911 (int)sizeof (struct mrsas_init_frame2)));
912 con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n",
913 (int)sizeof (Mpi2IOCInitRequest_t)));
914 #endif
915
916 init = (Mpi2IOCInitRequest_t *)mpi2_dma_obj->buffer;
917 numbytes = sizeof (*init);
918 bzero(init, numbytes);
919
920 ddi_put8(mpi2_dma_obj->acc_handle, &init->Function,
921 MPI2_FUNCTION_IOC_INIT);
922
923 ddi_put8(mpi2_dma_obj->acc_handle, &init->WhoInit,
924 MPI2_WHOINIT_HOST_DRIVER);
925
926 /* set MsgVersion and HeaderVersion host driver was built with */
927 ddi_put16(mpi2_dma_obj->acc_handle, &init->MsgVersion,
928 MPI2_VERSION);
929
930 ddi_put16(mpi2_dma_obj->acc_handle, &init->HeaderVersion,
931 MPI2_HEADER_VERSION);
932
933 ddi_put16(mpi2_dma_obj->acc_handle, &init->SystemRequestFrameSize,
934 instance->raid_io_msg_size / 4);
935
936 ddi_put16(mpi2_dma_obj->acc_handle, &init->ReplyFreeQueueDepth,
937 0);
938
939 ddi_put16(mpi2_dma_obj->acc_handle,
940 &init->ReplyDescriptorPostQueueDepth,
941 instance->reply_q_depth);
942 /*
943 * These addresses are set using the DMA cookie addresses from when the
944 * memory was allocated. Sense buffer hi address should be 0.
945 * ddi_put32(accessp, &init->SenseBufferAddressHigh, 0);
946 */
947
948 ddi_put32(mpi2_dma_obj->acc_handle,
949 &init->SenseBufferAddressHigh, 0);
950
951 ddi_put64(mpi2_dma_obj->acc_handle,
952 (uint64_t *)&init->SystemRequestFrameBaseAddress,
953 instance->io_request_frames_phy);
954
955 ddi_put64(mpi2_dma_obj->acc_handle,
956 &init->ReplyDescriptorPostQueueAddress,
957 instance->reply_frame_pool_phy);
958
959 ddi_put64(mpi2_dma_obj->acc_handle,
960 &init->ReplyFreeQueueAddress, 0);
961
962 cmd = instance->cmd_list[0];
963 if (cmd == NULL) {
964 return (DDI_FAILURE);
965 }
966 cmd->retry_count_for_ocr = 0;
967 cmd->pkt = NULL;
968 cmd->drv_pkt_time = 0;
969
970 mfiFrameInit2 = (struct mrsas_init_frame2 *)cmd->scsi_io_request;
971 con_log(CL_ANN1, (CE_CONT, "[mfi vaddr]%p", mfiFrameInit2));
972
973 frame_hdr = &cmd->frame->hdr;
974
975 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
976 MFI_CMD_STATUS_POLL_MODE);
977
978 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
979
980 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
981
982 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
983
984 con_log(CL_ANN, (CE_CONT,
985 "mrsas_tbolt_ioc_init: SMID:%x\n", cmd->SMID));
986
987 // Init the MFI Header
988 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
989 &mfiFrameInit2->cmd, MFI_CMD_OP_INIT);
990
991 con_log(CL_ANN1, (CE_CONT, "[CMD]%x", mfiFrameInit2->cmd));
992
993 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
994 &mfiFrameInit2->cmd_status,
995 MFI_STAT_INVALID_STATUS);
996
997 con_log(CL_ANN1, (CE_CONT, "[Status]%x", mfiFrameInit2->cmd_status));
998
999 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1000 &mfiFrameInit2->queue_info_new_phys_addr_lo,
1001 mpi2_dma_obj->dma_cookie[0].dmac_address);
1002
1003 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1004 &mfiFrameInit2->data_xfer_len,
1005 sizeof (Mpi2IOCInitRequest_t));
1006
1007 con_log(CL_ANN1, (CE_CONT, "[reply q desc addr]%x",
1008 (int)init->ReplyDescriptorPostQueueAddress));
1009
1010 /* fill driver version information*/
1011 fill_up_drv_ver(&drv_ver_info);
1012
1013 /* allocate the driver version data transfer buffer */
1014 instance->drv_ver_dma_obj.size = sizeof(drv_ver_info.drv_ver);
1015 instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
1016 instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1017 instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1018 instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
1019 instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
1020
1021 if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
1022 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
1023 cmn_err(CE_WARN,
1024 "fusion init: Could not allocate driver version buffer.");
1025 return (DDI_FAILURE);
1026 }
1027 /* copy driver version to dma buffer*/
1028 (void) memset(instance->drv_ver_dma_obj.buffer, 0,sizeof(drv_ver_info.drv_ver));
1029 ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
1030 (uint8_t *)drv_ver_info.drv_ver,
1031 (uint8_t *)instance->drv_ver_dma_obj.buffer,
1032 sizeof(drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
1033
1034 /*send driver version physical address to firmware*/
1035 ddi_put64(cmd->frame_dma_obj.acc_handle,
1036 &mfiFrameInit2->driverversion, instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
1037
1038 con_log(CL_ANN1, (CE_CONT, "[MPIINIT2 frame Phys addr ]0x%x len = %x",
1039 mfiFrameInit2->queue_info_new_phys_addr_lo,
1040 (int)sizeof (Mpi2IOCInitRequest_t)));
1041
1042 con_log(CL_ANN1, (CE_CONT, "[Length]%x", mfiFrameInit2->data_xfer_len));
1043
1044 con_log(CL_ANN1, (CE_CONT, "[MFI frame Phys Address]%x len = %x",
1045 cmd->scsi_io_request_phys_addr,
1046 (int) sizeof (struct mrsas_init_frame2)));
1047
1048 /* disable interrupts before sending INIT2 frame */
1049 instance->func_ptr->disable_intr(instance);
1050
1051 req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
1052 instance->request_message_pool;
1053 req_desc->Words = cmd->scsi_io_request_phys_addr;
1054 req_desc->MFAIo.RequestFlags =
1055 (MPI2_REQ_DESCRIPT_FLAGS_MFA << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1056
1057 cmd->request_desc = req_desc;
1058
1059 /* issue the init frame */
1060 instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd);
1061
1062 con_log(CL_ANN1, (CE_CONT, "[cmd = %d] ", frame_hdr->cmd));
1063 con_log(CL_ANN1, (CE_CONT, "[cmd Status= %x] ",
1064 frame_hdr->cmd_status));
1065
1066 if (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1067 &mfiFrameInit2->cmd_status) == 0) {
1068 con_log(CL_ANN, (CE_NOTE, "INIT2 Success"));
1069 ret = DDI_SUCCESS;
1070 } else {
1071 con_log(CL_ANN, (CE_WARN, "INIT2 Fail"));
1072 mrsas_dump_reply_desc(instance);
1073 goto fail_ioc_init;
1074 }
1075
1076 mrsas_dump_reply_desc(instance);
1077
1078 instance->unroll.verBuff = 1;
1079
1080 con_log(CL_ANN, (CE_NOTE,
1081 "mrsas_tbolt_ioc_init: SUCCESSFULL\n"));
1082
1083
1084 return (DDI_SUCCESS);
1085
1086
1087 fail_ioc_init:
1088
1089 mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1090
1091 return (DDI_FAILURE);
1092 }
1093
1094 int wait_for_outstanding_poll_io(struct mrsas_instance *instance)
1095 {
1096 int i;
1097 uint32_t wait_time = dump_io_wait_time;
1098 for (i = 0; i < wait_time; i++) {
1099 /*
1100 * Check For Outstanding poll Commands
1101 * except ldsync command and aen command
1102 */
1103 if (instance->fw_outstanding <= 2) {
1104 break;
1105 }
1106 drv_usecwait(10*MILLISEC);
1107 /* complete commands from reply queue */
1108 (void) mr_sas_tbolt_process_outstanding_cmd(instance);
1109 }
1110 if (instance->fw_outstanding > 2) {
1111 return (1);
1112 }
1113 return (0);
1114 }
1115 /*
1116 * scsi_pkt handling
1117 *
1118 * Visible to the external world via the transport structure.
1119 */
1120
1121 int
1122 mrsas_tbolt_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1123 {
1124 struct mrsas_instance *instance = ADDR2MR(ap);
1125 struct scsa_cmd *acmd = PKT2CMD(pkt);
1126 struct mrsas_cmd *cmd = NULL;
1127 int rval, i;
1128 uchar_t cmd_done = 0;
1129 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
1130 uint32_t msecs = 120 * MILLISEC;
1131
1132 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1133 if (instance->deadadapter == 1) {
1134 cmn_err(CE_WARN,
1135 "mrsas_tran_start:TBOLT return TRAN_FATAL_ERROR "
1136 "for IO, as the HBA doesnt take any more IOs");
1137 if (pkt) {
1138 pkt->pkt_reason = CMD_DEV_GONE;
1139 pkt->pkt_statistics = STAT_DISCON;
1140 }
1141 return (TRAN_FATAL_ERROR);
1142 }
1143 if (instance->adapterresetinprogress) {
1144 con_log(CL_ANN, (CE_NOTE, "Reset flag set, "
1145 "returning mfi_pkt and setting TRAN_BUSY\n"));
1146 return (TRAN_BUSY);
1147 }
1148 rval = mrsas_tbolt_prepare_pkt(acmd);
1149
1150 cmd = mrsas_tbolt_build_cmd(instance, ap, pkt, &cmd_done);
1151
1152 /*
1153 * Check if the command is already completed by the mrsas_build_cmd()
1154 * routine. In which case the busy_flag would be clear and scb will be
1155 * NULL and appropriate reason provided in pkt_reason field
1156 */
1157 if (cmd_done) {
1158 pkt->pkt_reason = CMD_CMPLT;
1159 pkt->pkt_scbp[0] = STATUS_GOOD;
1160 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1161 | STATE_SENT_CMD;
1162 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1163 (*pkt->pkt_comp)(pkt);
1164 }
1165
1166 return (TRAN_ACCEPT);
1167 }
1168
1169 if (cmd == NULL) {
1170 return (TRAN_BUSY);
1171 }
1172
1173
1174 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1175 if (instance->fw_outstanding > instance->max_fw_cmds) {
1176 cmn_err(CE_WARN,
1177 "Command Queue Full... Returning BUSY \n");
1178 return_raid_msg_pkt(instance, cmd);
1179 return (TRAN_BUSY);
1180 }
1181
1182 /* Synchronize the Cmd frame for the controller */
1183 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1184 DDI_DMA_SYNC_FORDEV);
1185
1186 con_log(CL_ANN, (CE_CONT, "tbolt_issue_cmd: SCSI CDB[0]=0x%x "
1187 "cmd->index:0x%x SMID 0x%x\n", pkt->pkt_cdbp[0], cmd->index, cmd->SMID));
1188
1189 instance->func_ptr->issue_cmd(cmd, instance);
1190
1191 return (TRAN_ACCEPT);
1192
1193 } else {
1194 instance->func_ptr->issue_cmd(cmd, instance);
1195 (void) wait_for_outstanding_poll_io(instance);
1196 return (TRAN_ACCEPT);
1197 }
1198 }
1199
1200 /*
1201 * prepare the pkt:
1202 * the pkt may have been resubmitted or just reused so
1203 * initialize some fields and do some checks.
1204 */
1205 int
1206 mrsas_tbolt_prepare_pkt(struct scsa_cmd *acmd)
1207 {
1208 struct scsi_pkt *pkt = CMD2PKT(acmd);
1209
1210
1211 /*
1212 * Reinitialize some fields that need it; the packet may
1213 * have been resubmitted
1214 */
1215 pkt->pkt_reason = CMD_CMPLT;
1216 pkt->pkt_state = 0;
1217 pkt->pkt_statistics = 0;
1218 pkt->pkt_resid = 0;
1219
1220 /*
1221 * zero status byte.
1222 */
1223 *(pkt->pkt_scbp) = 0;
1224
1225 return (0);
1226 }
1227
1228
1229 int
1230 mr_sas_tbolt_build_sgl(struct mrsas_instance *instance,
1231 struct scsa_cmd *acmd,
1232 struct mrsas_cmd *cmd,
1233 Mpi2RaidSCSIIORequest_t *scsi_raid_io,
1234 uint32_t *datalen)
1235 {
1236 uint32_t MaxSGEs;
1237 int sg_to_process;
1238 uint32_t i, j, SGEdwords = 0;
1239 uint32_t numElements, endElement;
1240 Mpi25IeeeSgeChain64_t *ieeeChainElement = NULL;
1241 Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee = NULL;
1242 uint32_t SGLFlags = 0;
1243
1244 con_log(CL_ANN1, (CE_NOTE,
1245 "chkpnt: Building Chained SGL :%d", __LINE__));
1246
1247 /* Calulate SGE size in number of Words(32bit) */
1248 /* Clear the datalen before updating it. */
1249 *datalen = 0;
1250
1251 SGEdwords = sizeof (Mpi25IeeeSgeChain64_t) / 4;
1252
1253 MaxSGEs = instance->max_sge_in_main_msg;
1254
1255 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1256 &scsi_raid_io->SGLFlags,
1257 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
1258
1259 // set data transfer flag.
1260 if (acmd->cmd_flags & CFLAG_DMASEND) {
1261 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1262 &scsi_raid_io->Control,
1263 MPI2_SCSIIO_CONTROL_WRITE);
1264 } else {
1265 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1266 &scsi_raid_io->Control, MPI2_SCSIIO_CONTROL_READ);
1267 }
1268
1269
1270 numElements = acmd->cmd_cookiecnt;
1271
1272 con_log(CL_DLEVEL1, (CE_NOTE, "[SGE Count]:%x", numElements));
1273
1274 if (numElements > instance->max_num_sge) {
1275 con_log(CL_ANN, (CE_NOTE,
1276 "[Max SGE Count Exceeded]:%x", numElements));
1277 return (numElements);
1278 }
1279
1280 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1281 &scsi_raid_io->RaidContext.numSGE, (uint8_t)numElements);
1282
1283 /* set end element in main message frame */
1284 endElement = (numElements <= MaxSGEs) ? numElements : (MaxSGEs - 1);
1285
1286 /* prepare the scatter-gather list for the firmware */
1287 scsi_raid_io_sgl_ieee =
1288 (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
1289
1290 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1291 Mpi25IeeeSgeChain64_t *sgl_ptr_end = scsi_raid_io_sgl_ieee;
1292 sgl_ptr_end += instance->max_sge_in_main_msg - 1;
1293
1294 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1295 &sgl_ptr_end->Flags, 0);
1296 }
1297
1298 for (i = 0; i < endElement; i++, scsi_raid_io_sgl_ieee++) {
1299 ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
1300 &scsi_raid_io_sgl_ieee->Address,
1301 acmd->cmd_dmacookies[i].dmac_laddress);
1302
1303 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1304 &scsi_raid_io_sgl_ieee->Length,
1305 acmd->cmd_dmacookies[i].dmac_size);
1306
1307 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1308 &scsi_raid_io_sgl_ieee->Flags, 0);
1309
1310 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1311 if (i == (numElements - 1))
1312 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1313 &scsi_raid_io_sgl_ieee->Flags, IEEE_SGE_FLAGS_END_OF_LIST);
1314 }
1315
1316 *datalen += acmd->cmd_dmacookies[i].dmac_size;
1317
1318 #ifdef DEBUG
1319 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Address]: %" PRIx64,
1320 scsi_raid_io_sgl_ieee->Address));
1321 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Length]:%x",
1322 scsi_raid_io_sgl_ieee->Length));
1323 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Flags]:%x",
1324 scsi_raid_io_sgl_ieee->Flags));
1325 #endif
1326
1327 }
1328
1329 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1330 &scsi_raid_io->ChainOffset, 0);
1331
1332 /* check if chained SGL required */
1333 if (i < numElements) {
1334
1335 con_log(CL_ANN1, (CE_NOTE, "[Chain Element index]:%x", i));
1336
1337 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1338 uint16_t ioFlags = ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1339 &scsi_raid_io->IoFlags);
1340
1341 if ((ioFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
1342 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1343 &scsi_raid_io->ChainOffset, (U8)instance->chain_offset_io_req);
1344 else
1345 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1346 &scsi_raid_io->ChainOffset, 0);
1347 }
1348 else {
1349 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1350 &scsi_raid_io->ChainOffset, (U8)instance->chain_offset_io_req);
1351 }
1352
1353 /* prepare physical chain element */
1354 ieeeChainElement = scsi_raid_io_sgl_ieee;
1355
1356 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1357 &ieeeChainElement->NextChainOffset, 0);
1358
1359 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER)
1360 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1361 &ieeeChainElement->Flags, IEEE_SGE_FLAGS_CHAIN_ELEMENT );
1362 else
1363 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1364 &ieeeChainElement->Flags,
1365 (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1366 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
1367
1368 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1369 &ieeeChainElement->Length,
1370 (sizeof (MPI2_SGE_IO_UNION) * (numElements - i)));
1371
1372 ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
1373 &ieeeChainElement->Address,
1374 (U64)cmd->sgl_phys_addr);
1375
1376 sg_to_process = numElements - i;
1377
1378 con_log(CL_ANN1, (CE_NOTE,
1379 "[Additional SGE Count]:%x", endElement));
1380
1381 /* point to the chained SGL buffer */
1382 scsi_raid_io_sgl_ieee = (Mpi25IeeeSgeChain64_t *)cmd->sgl;
1383
1384 /* build rest of the SGL in chained buffer */
1385 for (j = 0; j < sg_to_process; j++, scsi_raid_io_sgl_ieee++) {
1386 con_log(CL_DLEVEL3, (CE_NOTE, "[remaining SGL]:%x", i));
1387
1388 ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
1389 &scsi_raid_io_sgl_ieee->Address,
1390 acmd->cmd_dmacookies[i].dmac_laddress);
1391
1392 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1393 &scsi_raid_io_sgl_ieee->Length,
1394 acmd->cmd_dmacookies[i].dmac_size);
1395
1396 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1397 &scsi_raid_io_sgl_ieee->Flags, 0);
1398
1399 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1400 if (i == (numElements - 1))
1401 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1402 &scsi_raid_io_sgl_ieee->Flags, IEEE_SGE_FLAGS_END_OF_LIST);
1403 }
1404
1405 *datalen += acmd->cmd_dmacookies[i].dmac_size;
1406
1407 #if DEBUG
1408 con_log(CL_DLEVEL1, (CE_NOTE,
1409 "[SGL Address]: %" PRIx64,
1410 scsi_raid_io_sgl_ieee->Address));
1411 con_log(CL_DLEVEL1, (CE_NOTE,
1412 "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
1413 con_log(CL_DLEVEL1, (CE_NOTE,
1414 "[SGL Flags]:%x", scsi_raid_io_sgl_ieee->Flags));
1415 #endif
1416
1417 i++;
1418 }
1419 }
1420
1421 return (0);
1422 } /*end of BuildScatterGather */
1423
1424
1425 /*
1426 * build_cmd
1427 */
1428 struct mrsas_cmd *
1429 mrsas_tbolt_build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
1430 struct scsi_pkt *pkt, uchar_t *cmd_done)
1431 {
1432 uint8_t fp_possible = 0;
1433 uint16_t flags = 0;
1434 uint32_t i, index;
1435 uint32_t context;
1436 uint32_t sge_bytes;
1437 uint8_t ChainOffsetValue;
1438 uint32_t SGLFlags;
1439 uint32_t lba_count=0;
1440 uint32_t start_lba_hi=0;
1441 uint32_t start_lba_lo=0;
1442 ddi_acc_handle_t acc_handle;
1443 struct mrsas_cmd *cmd = NULL;
1444 struct scsa_cmd *acmd = PKT2CMD(pkt);
1445 MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
1446 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
1447 Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee;
1448 uint32_t datalen;
1449 struct IO_REQUEST_INFO io_info;
1450 MR_FW_RAID_MAP_ALL *local_map_ptr;
1451 MR_LD_RAID *raid;
1452 U32 ld;
1453 uint16_t pd_cmd_cdblen;
1454
1455 con_log(CL_DLEVEL1, (CE_NOTE,
1456 "chkpnt: Entered mrsas_tbolt_build_cmd:%d", __LINE__));
1457
1458 /* find out if this is logical or physical drive command. */
1459 acmd->islogical = MRDRV_IS_LOGICAL(ap);
1460 acmd->device_id = MAP_DEVICE_ID(instance, ap);
1461
1462 *cmd_done = 0;
1463
1464 /* get the command packet */
1465 if (!(cmd = get_raid_msg_pkt(instance))) {
1466 return (NULL);
1467 }
1468
1469 index = cmd->index;
1470 ReqDescUnion = mr_sas_get_request_descriptor(instance, index, cmd);
1471 ReqDescUnion->Words = 0;
1472 ReqDescUnion->SCSIIO.SMID = cmd->SMID;
1473 ReqDescUnion->SCSIIO.RequestFlags =
1474 (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1475 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1476
1477
1478 cmd->request_desc = ReqDescUnion;
1479 cmd->pkt = pkt;
1480 cmd->cmd = acmd;
1481
1482 /* lets get the command directions */
1483 if (acmd->cmd_flags & CFLAG_DMASEND) {
1484 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1485 (void) ddi_dma_sync(acmd->cmd_dmahandle,
1486 acmd->cmd_dma_offset, acmd->cmd_dma_len,
1487 DDI_DMA_SYNC_FORDEV);
1488 }
1489 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
1490 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1491 (void) ddi_dma_sync(acmd->cmd_dmahandle,
1492 acmd->cmd_dma_offset, acmd->cmd_dma_len,
1493 DDI_DMA_SYNC_FORCPU);
1494 }
1495 } else {
1496 con_log(CL_ANN, (CE_NOTE, "NO DMA\n"));
1497 }
1498
1499
1500 // get SCSI_IO raid message frame pointer
1501 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
1502
1503 /* zero out SCSI_IO raid message frame */
1504 memset(scsi_raid_io, 0, sizeof(Mpi2RaidSCSIIORequest_t));
1505
1506 /*Set the ldTargetId set by BuildRaidContext() */
1507 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1508 &scsi_raid_io->RaidContext.ldTargetId,
1509 acmd->device_id);
1510
1511 /* Copy CDB to scsi_io_request message frame */
1512 ddi_rep_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1513 (uint8_t *)pkt->pkt_cdbp,
1514 (uint8_t *)scsi_raid_io->CDB.CDB32,
1515 acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
1516
1517 /*
1518 * Just the CDB length,rest of the Flags are zero
1519 * This will be modified later.
1520 */
1521 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1522 &scsi_raid_io->IoFlags,
1523 acmd->cmd_cdblen);
1524
1525 pd_cmd_cdblen = acmd->cmd_cdblen;
1526
1527 switch (pkt->pkt_cdbp[0]) {
1528 case SCMD_READ:
1529 case SCMD_WRITE:
1530 case SCMD_READ_G1:
1531 case SCMD_WRITE_G1:
1532 case SCMD_READ_G4:
1533 case SCMD_WRITE_G4:
1534 case SCMD_READ_G5:
1535 case SCMD_WRITE_G5:
1536
1537 if (acmd->islogical) {
1538 /* Initialize sense Information */
1539 if (cmd->sense1 == NULL) {
1540 con_log(CL_ANN, (CE_NOTE,
1541 "tbolt_build_cmd: Sense buffer ptr NULL \n"));
1542 }
1543 bzero(cmd->sense1, SENSE_LENGTH);
1544 con_log(CL_DLEVEL2, (CE_NOTE,
1545 "tbolt_build_cmd CDB[0] = %x\n", pkt->pkt_cdbp[0]));
1546
1547 if (acmd->cmd_cdblen == CDB_GROUP0) { /* 6-byte cdb */
1548 lba_count = (uint16_t)(pkt->pkt_cdbp[4]);
1549 start_lba_lo =
1550 ((uint32_t)(pkt->pkt_cdbp[3]) |
1551 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
1552 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F) << 16));
1553 } else if (acmd->cmd_cdblen == CDB_GROUP1) { /* 10-byte cdb */
1554 lba_count =
1555 (((uint16_t)(pkt->pkt_cdbp[8])) |
1556 ((uint16_t)(pkt->pkt_cdbp[7]) << 8));
1557
1558 start_lba_lo =
1559 (((uint32_t)(pkt->pkt_cdbp[5])) |
1560 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1561 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1562 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1563
1564 } else if (acmd->cmd_cdblen == CDB_GROUP5) { /* 12-byte cdb */
1565 lba_count = (
1566 ((uint32_t)(pkt->pkt_cdbp[9])) |
1567 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1568 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1569 ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1570
1571 start_lba_lo =
1572 (((uint32_t)(pkt->pkt_cdbp[5])) |
1573 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1574 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1575 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1576
1577 } else if (acmd->cmd_cdblen == CDB_GROUP4) { /* 16-byte cdb */
1578 lba_count = (
1579 ((uint32_t)(pkt->pkt_cdbp[13])) |
1580 ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
1581 ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
1582 ((uint32_t)(pkt->pkt_cdbp[10]) << 24));
1583
1584 start_lba_lo = (
1585 ((uint32_t)(pkt->pkt_cdbp[9])) |
1586 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1587 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1588 ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1589
1590 start_lba_hi = (
1591 ((uint32_t)(pkt->pkt_cdbp[5])) |
1592 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1593 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1594 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1595 }
1596
1597 if (instance->tbolt &&
1598 ((lba_count * 512) > mrsas_tbolt_max_cap_maxxfer) )
1599 cmn_err(CE_WARN," IO SECTOR COUNT exceeds controller limit 0x%x sectors\n", lba_count);
1600
1601 memset(&io_info, 0, sizeof (struct IO_REQUEST_INFO));
1602 io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) | start_lba_lo;
1603 io_info.numBlocks = lba_count;
1604 io_info.ldTgtId = acmd->device_id;
1605
1606 if (acmd->cmd_flags & CFLAG_DMASEND)
1607 io_info.isRead = 0;
1608 else
1609 io_info.isRead = 1;
1610
1611
1612 /*Aquire SYNC MAP UPDATE lock */
1613 mutex_enter(&instance->sync_map_mtx);
1614
1615 local_map_ptr = instance->ld_map[(instance->map_id & 1)];
1616
1617 if ( (MR_TargetIdToLdGet(acmd->device_id, local_map_ptr) >= MAX_LOGICAL_DRIVES) || !instance->fast_path_io ){
1618 cmn_err(CE_NOTE,
1619 "Fast Path NOT Possible, targetId >= MAX_LOGICAL_DRIVES || !instance->fast_path_io\n");
1620 fp_possible = 0;
1621 /* Set Regionlock flags to BYPASS
1622 io_request->RaidContext.regLockFlags = 0; */
1623 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1624 &scsi_raid_io->RaidContext.regLockFlags, 0);
1625 } else {
1626 if (MR_BuildRaidContext(instance, &io_info,
1627 &scsi_raid_io->RaidContext, local_map_ptr))
1628 fp_possible = io_info.fpOkForIo;
1629 }
1630
1631 if (!enable_fp) {
1632 fp_possible = 0;
1633 }
1634 con_log(CL_ANN1, (CE_NOTE,
1635 "enable_fp %d instance->fast_path_io %d fp_possible %d \n",
1636 enable_fp, instance->fast_path_io, fp_possible));
1637
1638 if (fp_possible) {
1639
1640 /* Check for DIF enabled LD */
1641 if (MR_CheckDIF(acmd->device_id, local_map_ptr)) {
1642 /* Prepare 32 Byte CDB for DIF capable Disk */
1643 mrsas_tbolt_prepare_cdb(instance,
1644 scsi_raid_io->CDB.CDB32,
1645 &io_info,
1646 scsi_raid_io,
1647 start_lba_lo);
1648 } else {
1649 mrsas_tbolt_set_pd_lba(scsi_raid_io->CDB.CDB32,
1650 (uint8_t *)&pd_cmd_cdblen, io_info.pdBlock, io_info.numBlocks, 0);
1651 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1652 &scsi_raid_io->IoFlags,
1653 pd_cmd_cdblen);
1654 }
1655
1656 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1657 &scsi_raid_io->Function,
1658 MPI2_FUNCTION_SCSI_IO_REQUEST);
1659
1660 ReqDescUnion->SCSIIO.RequestFlags =
1661 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1662 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1663
1664 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1665 uint8_t regLockFlags = ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1666 &scsi_raid_io->RaidContext.regLockFlags);
1667 uint16_t IoFlags = ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1668 &scsi_raid_io->IoFlags);
1669
1670 if (regLockFlags == REGION_TYPE_UNUSED)
1671 ReqDescUnion->SCSIIO.RequestFlags =
1672 (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1673
1674 IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1675 regLockFlags |= (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | MR_RL_FLAGS_SEQ_NUM_ENABLE);
1676
1677 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1678 &scsi_raid_io->ChainOffset, 0);
1679 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1680 &scsi_raid_io->RaidContext.nsegType, ((0x01 << MPI2_NSEG_FLAGS_SHIFT) | MPI2_TYPE_CUDA));
1681 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1682 &scsi_raid_io->RaidContext.regLockFlags, regLockFlags);
1683 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1684 &scsi_raid_io->IoFlags, IoFlags);
1685 }
1686
1687 if ((instance->load_balance_info[acmd->device_id].loadBalanceFlag) && (io_info.isRead)) {
1688 io_info.devHandle = get_updated_dev_handle(&instance->load_balance_info[acmd->device_id], &io_info);
1689 cmd->load_balance_flag |= MEGASAS_LOAD_BALANCE_FLAG;
1690 } else
1691 cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG;
1692
1693 ReqDescUnion->SCSIIO.DevHandle = io_info.devHandle;
1694 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1695 &scsi_raid_io->DevHandle,
1696 io_info.devHandle);
1697
1698 } else {
1699 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1700 &scsi_raid_io->Function,
1701 MPI2_FUNCTION_LD_IO_REQUEST);
1702
1703 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1704 &scsi_raid_io->DevHandle, acmd->device_id);
1705
1706 ReqDescUnion->SCSIIO.RequestFlags =
1707 (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1708 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1709
1710 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1711 &scsi_raid_io->RaidContext.timeoutValue, local_map_ptr->raidMap.fpPdIoTimeoutSec);
1712
1713 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1714 uint8_t regLockFlags = ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1715 &scsi_raid_io->RaidContext.regLockFlags);
1716
1717 if (regLockFlags == REGION_TYPE_UNUSED)
1718 ReqDescUnion->SCSIIO.RequestFlags =
1719 (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1720
1721 regLockFlags |= (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 | MR_RL_FLAGS_SEQ_NUM_ENABLE);
1722
1723 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1724 &scsi_raid_io->RaidContext.nsegType, ((0x01 << MPI2_NSEG_FLAGS_SHIFT) | MPI2_TYPE_CUDA));
1725 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1726 &scsi_raid_io->RaidContext.regLockFlags, regLockFlags);
1727 }
1728
1729 } /* Not FP */
1730
1731 /*Release SYNC MAP UPDATE lock */
1732 mutex_exit(&instance->sync_map_mtx);
1733
1734
1735 /* Set sense buffer physical address/length in scsi_io_request.*/
1736 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1737 &scsi_raid_io->SenseBufferLowAddress,
1738 cmd->sense_phys_addr1);
1739 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1740 &scsi_raid_io->SenseBufferLength,
1741 SENSE_LENGTH);
1742
1743 /* Construct SGL*/
1744 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1745 &scsi_raid_io->SGLOffset0,
1746 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1747
1748 mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1749 scsi_raid_io, &datalen);
1750
1751 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1752 &scsi_raid_io->DataLength, datalen);
1753
1754 break;
1755
1756 }
1757 else {
1758 #ifndef PDSUPPORT /* if PDSUPPORT, skip break and fall through */
1759 break;
1760 #endif
1761 }
1762 /* fall through For all non-rd/wr cmds */
1763 default:
1764 switch (pkt->pkt_cdbp[0]) {
1765 case 0x35: { // SCMD_SYNCHRONIZE_CACHE
1766 return_raid_msg_pkt(instance, cmd);
1767 *cmd_done = 1;
1768 return (NULL);
1769 }
1770
1771 case SCMD_MODE_SENSE:
1772 case SCMD_MODE_SENSE_G1: {
1773 union scsi_cdb *cdbp;
1774 uint16_t page_code;
1775
1776 cdbp = (void *)pkt->pkt_cdbp;
1777 page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
1778 switch (page_code) {
1779 case 0x3:
1780 case 0x4:
1781 (void) mrsas_mode_sense_build(pkt);
1782 return_raid_msg_pkt(instance, cmd);
1783 *cmd_done = 1;
1784 return (NULL);
1785 }
1786 break;
1787 }
1788
1789 default: {
1790 /* Here we need to handle PASSTHRU for
1791 Logical Devices. Like Inquiry etc.*/
1792
1793 if(!(acmd->islogical)) {
1794
1795 /* Aquire SYNC MAP UPDATE lock */
1796 mutex_enter(&instance->sync_map_mtx);
1797
1798 local_map_ptr = instance->ld_map[(instance->map_id & 1)];
1799
1800 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1801 &scsi_raid_io->Function, MPI2_FUNCTION_SCSI_IO_REQUEST);
1802
1803 ReqDescUnion->SCSIIO.RequestFlags =
1804 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1805 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1806
1807 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1808 &scsi_raid_io->DevHandle,
1809 local_map_ptr->raidMap.devHndlInfo[acmd->device_id].curDevHdl);
1810
1811
1812 /*Set regLockFlasgs to REGION_TYPE_BYPASS */
1813 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1814 &scsi_raid_io->RaidContext.regLockFlags, 0);
1815 ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
1816 &scsi_raid_io->RaidContext.regLockRowLBA, 0);
1817 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1818 &scsi_raid_io->RaidContext.regLockLength, 0);
1819 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_raid_io->RaidContext.RAIDFlags,
1820 MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
1821 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1822 &scsi_raid_io->RaidContext.timeoutValue, local_map_ptr->raidMap.fpPdIoTimeoutSec);
1823 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1824 &scsi_raid_io->RaidContext.ldTargetId, acmd->device_id);
1825 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1826 &scsi_raid_io->LUN[1], acmd->lun);
1827
1828 /* Release SYNC MAP UPDATE lock */
1829 mutex_exit(&instance->sync_map_mtx);
1830
1831 } else {
1832 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1833 &scsi_raid_io->Function, MPI2_FUNCTION_LD_IO_REQUEST);
1834 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1835 &scsi_raid_io->LUN[1], acmd->lun);
1836 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1837 &scsi_raid_io->DevHandle, acmd->device_id);
1838 ReqDescUnion->SCSIIO.RequestFlags =
1839 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1840 }
1841
1842 /* Set sense buffer physical address/length in scsi_io_request.*/
1843 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1844 &scsi_raid_io->SenseBufferLowAddress,
1845 cmd->sense_phys_addr1);
1846 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1847 &scsi_raid_io->SenseBufferLength, SENSE_LENGTH);
1848
1849 /* Construct SGL*/
1850 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1851 &scsi_raid_io->SGLOffset0,
1852 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1853
1854 mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1855 scsi_raid_io, &datalen);
1856
1857 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1858 &scsi_raid_io->DataLength, datalen);
1859
1860
1861 con_log(CL_ANN, (CE_CONT,
1862 "tbolt_build_cmd CDB[0] =%x, TargetID =%x\n",
1863 pkt->pkt_cdbp[0], acmd->device_id));
1864 con_log(CL_DLEVEL1, (CE_CONT,
1865 "data length = %x\n",
1866 scsi_raid_io->DataLength));
1867 con_log(CL_DLEVEL1, (CE_CONT,
1868 "cdb length = %x\n",
1869 acmd->cmd_cdblen));
1870 }
1871 break;
1872 }
1873
1874 }
1875 #ifdef lint
1876 context = context;
1877 #endif
1878
1879 return (cmd);
1880 }
1881
1882 /*
1883 * mrsas_tbolt_tran_init_pkt - allocate & initialize a scsi_pkt structure
1884 * @ap:
1885 * @pkt:
1886 * @bp:
1887 * @cmdlen:
1888 * @statuslen:
1889 * @tgtlen:
1890 * @flags:
1891 * @callback:
1892 *
1893 * The tran_init_pkt() entry point allocates and initializes a scsi_pkt
1894 * structure and DMA resources for a target driver request. The
1895 * tran_init_pkt() entry point is called when the target driver calls the
1896 * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point
1897 * is a request to perform one or more of three possible services:
1898 * - allocation and initialization of a scsi_pkt structure
1899 * - allocation of DMA resources for data transfer
1900 * - reallocation of DMA resources for the next portion of the data transfer
1901 */
1902 struct scsi_pkt *
1903 mrsas_tbolt_tran_init_pkt(struct scsi_address *ap,
1904 register struct scsi_pkt *pkt,
1905 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1906 int flags, int (*callback)(), caddr_t arg)
1907 {
1908 struct scsa_cmd *acmd;
1909 struct mrsas_instance *instance;
1910 struct scsi_pkt *new_pkt;
1911
1912 instance = ADDR2MR(ap);
1913
1914 /* step #1 : pkt allocation */
1915 if (pkt == NULL) {
1916 pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen,
1917 tgtlen, sizeof (struct scsa_cmd), callback, arg);
1918 if (pkt == NULL) {
1919 return (NULL);
1920 }
1921
1922 acmd = PKT2CMD(pkt);
1923
1924 /*
1925 * Initialize the new pkt - we redundantly initialize
1926 * all the fields for illustrative purposes.
1927 */
1928 acmd->cmd_pkt = pkt;
1929 acmd->cmd_flags = 0;
1930 acmd->cmd_scblen = statuslen;
1931 acmd->cmd_cdblen = cmdlen;
1932 acmd->cmd_dmahandle = NULL;
1933 acmd->cmd_ncookies = 0;
1934 acmd->cmd_cookie = 0;
1935 acmd->cmd_cookiecnt = 0;
1936 acmd->cmd_nwin = 0;
1937
1938 pkt->pkt_address = *ap;
1939 pkt->pkt_comp = (void (*)())NULL;
1940 pkt->pkt_flags = 0;
1941 pkt->pkt_time = 0;
1942 pkt->pkt_resid = 0;
1943 pkt->pkt_state = 0;
1944 pkt->pkt_statistics = 0;
1945 pkt->pkt_reason = 0;
1946 new_pkt = pkt;
1947 } else {
1948 acmd = PKT2CMD(pkt);
1949 new_pkt = NULL;
1950 }
1951
1952 /* step #2 : dma allocation/move */
1953 if (bp && bp->b_bcount != 0) {
1954 if (acmd->cmd_dmahandle == NULL) {
1955 if (mrsas_dma_alloc(instance, pkt, bp, flags,
1956 callback) == DDI_FAILURE) {
1957 if (new_pkt) {
1958 scsi_hba_pkt_free(ap, new_pkt);
1959 }
1960 return ((struct scsi_pkt *)NULL);
1961 }
1962 } else {
1963 if (mrsas_dma_move(instance, pkt, bp) == DDI_FAILURE) {
1964 return ((struct scsi_pkt *)NULL);
1965 }
1966 }
1967 }
1968 return (pkt);
1969 }
1970
1971
1972 uint32_t
1973 tbolt_read_fw_status_reg(struct mrsas_instance *instance)
1974 {
1975 return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
1976 }
1977
1978 void
1979 tbolt_issue_cmd(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
1980 {
1981 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1982 atomic_add_16(&instance->fw_outstanding, 1);
1983
1984 struct scsi_pkt *pkt;
1985
1986 con_log(CL_ANN1, (CE_NOTE, "tbolt_issue_cmd: cmd->[SMID]=0x%X", cmd->SMID));
1987
1988 con_log(CL_DLEVEL1, (CE_CONT,
1989 " [req desc Words] %" PRIx64 " \n", req_desc->Words));
1990 con_log(CL_DLEVEL1, (CE_CONT,
1991 " [req desc low part] %x \n",
1992 (uint_t)(req_desc->Words & 0xffffffffff)));
1993 con_log(CL_DLEVEL1, (CE_CONT,
1994 " [req desc high part] %x \n", (uint_t)(req_desc->Words >> 32)));
1995 pkt = cmd->pkt;
1996
1997 if (pkt) {
1998 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
1999 "ISSUED CMD TO FW : called : cmd:"
2000 ": %p instance : %p pkt : %p pkt_time : %x\n",
2001 gethrtime(), (void *)cmd, (void *)instance,
2002 (void *)pkt, cmd->drv_pkt_time));
2003 if (instance->adapterresetinprogress) {
2004 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2005 con_log(CL_ANN, (CE_NOTE,
2006 "TBOLT Reset the scsi_pkt timer"));
2007 } else {
2008 push_pending_mfi_pkt(instance, cmd);
2009 }
2010
2011 } else {
2012 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
2013 "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
2014 "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
2015 }
2016
2017 /* Issue the command to the FW */
2018 mutex_enter(&instance->reg_write_mtx);
2019 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2020 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2021 mutex_exit(&instance->reg_write_mtx);
2022 }
2023
2024 /*
2025 * issue_cmd_in_sync_mode
2026 */
2027 int
2028 tbolt_issue_cmd_in_sync_mode(struct mrsas_instance *instance,
2029 struct mrsas_cmd *cmd)
2030 {
2031 int i;
2032 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
2033 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
2034
2035 struct mrsas_header *hdr;
2036 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2037
2038 con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: cmd->[SMID]=0x%X", cmd->SMID));
2039
2040
2041 if (instance->adapterresetinprogress) {
2042 cmd->drv_pkt_time = ddi_get16
2043 (cmd->frame_dma_obj.acc_handle, &hdr->timeout);
2044 if (cmd->drv_pkt_time < debug_timeout_g)
2045 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2046 con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode:"
2047 "RESET-IN-PROGRESS, issue cmd & return.\n"));
2048
2049 mutex_enter(&instance->reg_write_mtx);
2050 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2051 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2052 mutex_exit(&instance->reg_write_mtx);
2053
2054 return (DDI_SUCCESS);
2055 } else {
2056 con_log(CL_ANN1, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: pushing the pkt\n"));
2057 push_pending_mfi_pkt(instance, cmd);
2058 }
2059
2060 con_log(CL_DLEVEL2, (CE_NOTE,
2061 "HighQport offset :%p",
2062 (uint32_t *)((uintptr_t)(instance)->regmap + IB_HIGH_QPORT)));
2063 con_log(CL_DLEVEL2, (CE_NOTE,
2064 "LowQport offset :%p",
2065 (uint32_t *)((uintptr_t)(instance)->regmap + IB_LOW_QPORT)));
2066
2067 cmd->sync_cmd = MRSAS_TRUE;
2068 cmd->cmd_status = ENODATA;
2069
2070
2071 mutex_enter(&instance->reg_write_mtx);
2072 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2073 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2074 mutex_exit(&instance->reg_write_mtx);
2075
2076 con_log(CL_ANN1, (CE_NOTE,
2077 " req desc high part %x \n", (uint_t)(req_desc->Words >> 32)));
2078 con_log(CL_ANN1, (CE_NOTE,
2079 " req desc low part %x \n", (uint_t)(req_desc->Words & 0xffffffff)));
2080
2081 mutex_enter(&instance->int_cmd_mtx);
2082 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
2083 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
2084 }
2085 mutex_exit(&instance->int_cmd_mtx);
2086
2087
2088 if (i < (msecs -1)) {
2089 return (DDI_SUCCESS);
2090 } else {
2091 return (DDI_FAILURE);
2092 }
2093 }
2094
2095 /*
2096 * issue_cmd_in_poll_mode
2097 */
2098 int
2099 tbolt_issue_cmd_in_poll_mode(struct mrsas_instance *instance,
2100 struct mrsas_cmd *cmd)
2101 {
2102 int i;
2103 uint16_t flags;
2104 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
2105 struct mrsas_header *frame_hdr;
2106
2107 con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_poll_mode: cmd->[SMID]=0x%X", cmd->SMID));
2108
2109 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
2110
2111 frame_hdr = (struct mrsas_header *)&cmd->frame->hdr;
2112 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
2113 MFI_CMD_STATUS_POLL_MODE);
2114 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
2115 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2116 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
2117
2118 con_log(CL_ANN1, (CE_NOTE,
2119 " req desc low part %x \n", (uint_t)(req_desc->Words & 0xffffffff)));
2120 con_log(CL_ANN1, (CE_NOTE,
2121 " req desc high part %x \n", (uint_t)(req_desc->Words >> 32)));
2122
2123 /* issue the frame using inbound queue port */
2124 mutex_enter(&instance->reg_write_mtx);
2125 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2126 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2127 mutex_exit(&instance->reg_write_mtx);
2128
2129 for (i = 0; i < msecs && (
2130 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
2131 == MFI_CMD_STATUS_POLL_MODE); i++) {
2132 /* wait for cmd_status to change from 0xFF */
2133 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
2134 }
2135
2136 if (ddi_get8(cmd->frame_dma_obj.acc_handle,
2137 &frame_hdr->cmd_status) == MFI_CMD_STATUS_POLL_MODE) {
2138 con_log(CL_ANN1, (CE_NOTE,
2139 " cmd failed %" PRIx64 " \n", (req_desc->Words)));
2140 return (DDI_FAILURE);
2141 }
2142
2143 return (DDI_SUCCESS);
2144 }
2145
2146 void
2147 tbolt_enable_intr(struct mrsas_instance *instance)
2148 {
2149 uint32_t mask;
2150
2151 /* TODO: For Thunderbolt/Invader also clear intr on enable */
2152 //writel(~0, ®s->outbound_intr_status);
2153 //readl(®s->outbound_intr_status);
2154
2155 WR_OB_INTR_MASK(~(MFI_FUSION_ENABLE_INTERRUPT_MASK), instance);
2156
2157 /* dummy read to force PCI flush */
2158 mask = RD_OB_INTR_MASK(instance);
2159
2160 }
2161
2162 void
2163 tbolt_disable_intr(struct mrsas_instance *instance)
2164 {
2165 uint32_t mask = 0xFFFFFFFF;
2166 uint32_t status;
2167
2168
2169 WR_OB_INTR_MASK(mask, instance);
2170
2171 /* Dummy readl to force pci flush */
2172
2173 status = RD_OB_INTR_MASK(instance);
2174 }
2175
2176
2177 int
2178 tbolt_intr_ack(struct mrsas_instance *instance)
2179 {
2180 uint32_t status;
2181
2182 /* check if it is our interrupt */
2183 status = RD_OB_INTR_STATUS(instance);
2184 con_log(CL_ANN1, (CE_NOTE,
2185 "chkpnt: Entered tbolt_intr_ack status = %d \n", status));
2186
2187 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2188 return (DDI_INTR_UNCLAIMED);
2189 }
2190
2191 if ((status & 1) || (status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2192 /* clear the interrupt by writing back the same value */
2193 WR_OB_INTR_STATUS(status, instance);
2194 /* dummy READ */
2195 RD_OB_INTR_STATUS(instance);
2196 }
2197 return (DDI_INTR_CLAIMED);
2198 }
2199
2200 /*
2201 * get_raid_msg_pkt : Get a command from the free pool
2202 * After successful allocation, the caller of this routine
2203 * must clear the frame buffer (memset to zero) before
2204 * using the packet further.
2205 *
2206 * ***** Note *****
2207 * After clearing the frame buffer the context id of the
2208 * frame buffer SHOULD be restored back.
2209 */
2210
2211 struct mrsas_cmd *
2212 get_raid_msg_pkt(struct mrsas_instance *instance)
2213 {
2214 mlist_t *head = &instance->cmd_pool_list;
2215 struct mrsas_cmd *cmd = NULL;
2216
2217 mutex_enter(&instance->cmd_pool_mtx);
2218 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2219
2220
2221 if (!mlist_empty(head)) {
2222 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2223 mlist_del_init(head->next);
2224 }
2225 if (cmd != NULL) {
2226 cmd->pkt = NULL;
2227 cmd->retry_count_for_ocr = 0;
2228 cmd->drv_pkt_time = 0;
2229 }
2230 mutex_exit(&instance->cmd_pool_mtx);
2231
2232 if (cmd != NULL)
2233 bzero(cmd->scsi_io_request,
2234 sizeof (Mpi2RaidSCSIIORequest_t));
2235 return (cmd);
2236 }
2237
2238 struct mrsas_cmd *
2239 get_raid_msg_mfi_pkt(struct mrsas_instance *instance)
2240 {
2241 mlist_t *head = &instance->cmd_app_pool_list;
2242 struct mrsas_cmd *cmd = NULL;
2243
2244 mutex_enter(&instance->cmd_app_pool_mtx);
2245 ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2246
2247 if (!mlist_empty(head)) {
2248 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2249 mlist_del_init(head->next);
2250 }
2251 if (cmd != NULL) {
2252 cmd->retry_count_for_ocr = 0;
2253 cmd->drv_pkt_time = 0;
2254 cmd->pkt = NULL;
2255 cmd->request_desc = NULL;
2256
2257 }
2258
2259 mutex_exit(&instance->cmd_app_pool_mtx);
2260
2261 if (cmd != NULL) {
2262 bzero(cmd->scsi_io_request,
2263 sizeof (Mpi2RaidSCSIIORequest_t));
2264 }
2265
2266 return (cmd);
2267 }
2268
2269 /*
2270 * return_raid_msg_pkt : Return a cmd to free command pool
2271 */
2272 void
2273 return_raid_msg_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2274 {
2275 mutex_enter(&instance->cmd_pool_mtx);
2276 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2277
2278
2279 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2280
2281 mutex_exit(&instance->cmd_pool_mtx);
2282 }
2283
2284 void
2285 return_raid_msg_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2286 {
2287 mutex_enter(&instance->cmd_app_pool_mtx);
2288 ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2289
2290 mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
2291
2292 mutex_exit(&instance->cmd_app_pool_mtx);
2293 }
2294
2295
2296 void
2297 mr_sas_tbolt_build_mfi_cmd(struct mrsas_instance *instance,
2298 struct mrsas_cmd *cmd)
2299 {
2300 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2301 Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee;
2302 MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
2303 uint32_t index;
2304
2305 if (!instance->tbolt) {
2306 con_log(CL_ANN, (CE_NOTE, "Not MFA enabled.\n"));
2307 return;
2308 }
2309
2310 index = cmd->index;
2311
2312 ReqDescUnion =
2313 mr_sas_get_request_descriptor(instance, index, cmd);
2314
2315 if (!ReqDescUnion) {
2316 con_log(CL_ANN1, (CE_NOTE, "[NULL REQDESC]"));
2317 return;
2318 }
2319
2320 con_log(CL_ANN1, (CE_NOTE, "[SMID]%x", cmd->SMID));
2321
2322 ReqDescUnion->Words = 0;
2323
2324 ReqDescUnion->SCSIIO.RequestFlags =
2325 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2326 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2327
2328 ReqDescUnion->SCSIIO.SMID = cmd->SMID;
2329
2330 cmd->request_desc = ReqDescUnion;
2331
2332 // get raid message frame pointer
2333 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2334
2335 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
2336 Mpi25IeeeSgeChain64_t *sgl_ptr_end = (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
2337 sgl_ptr_end += instance->max_sge_in_main_msg - 1;
2338 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2339 &sgl_ptr_end->Flags, 0);
2340 }
2341
2342 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2343 &scsi_raid_io->Function,
2344 MPI2_FUNCTION_PASSTHRU_IO_REQUEST);
2345
2346 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2347 &scsi_raid_io->SGLOffset0,
2348 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
2349
2350 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2351 &scsi_raid_io->ChainOffset,
2352 (U8)offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16);
2353
2354 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
2355 &scsi_raid_io->SenseBufferLowAddress,
2356 cmd->sense_phys_addr1);
2357
2358
2359 scsi_raid_io_sgl_ieee =
2360 (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
2361
2362 ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
2363 &scsi_raid_io_sgl_ieee->Address,
2364 (U64)cmd->frame_phys_addr);
2365
2366 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2367 &scsi_raid_io_sgl_ieee->Flags,
2368 (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2369 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
2370 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
2371 &scsi_raid_io_sgl_ieee->Length, 1024); //MEGASAS_MAX_SZ_CHAIN_FRAME
2372
2373 con_log(CL_ANN1, (CE_NOTE,
2374 "[MFI CMD PHY ADDRESS]:%" PRIx64,
2375 scsi_raid_io_sgl_ieee->Address));
2376 con_log(CL_ANN1, (CE_NOTE,
2377 "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
2378 con_log(CL_ANN1, (CE_NOTE, "[SGL Flags]:%x",
2379 scsi_raid_io_sgl_ieee->Flags));
2380 }
2381
2382
2383 void
2384 tbolt_complete_cmd(struct mrsas_instance *instance,
2385 struct mrsas_cmd *cmd)
2386 {
2387 uint8_t status;
2388 uint8_t extStatus;
2389 uint8_t arm;
2390 struct scsa_cmd *acmd;
2391 struct scsi_pkt *pkt;
2392 struct scsi_arq_status *arqstat;
2393 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2394 LD_LOAD_BALANCE_INFO *lbinfo;
2395 int i;
2396
2397 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2398
2399 status = ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2400 &scsi_raid_io->RaidContext.status);
2401 extStatus = ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2402 &scsi_raid_io->RaidContext.extStatus);
2403
2404 con_log(CL_DLEVEL3, (CE_NOTE, "status %x", status));
2405 con_log(CL_DLEVEL3, (CE_NOTE, "extStatus %x", extStatus));
2406
2407 if (status != MFI_STAT_OK) {
2408 con_log(CL_ANN, (CE_WARN,
2409 "IO Cmd Failed SMID %x", cmd->SMID));
2410 } else {
2411 con_log(CL_ANN, (CE_NOTE,
2412 "IO Cmd Success SMID %x", cmd->SMID));
2413 }
2414
2415 /* regular commands */
2416
2417 switch (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2418 &scsi_raid_io->Function)) {
2419
2420 case MPI2_FUNCTION_SCSI_IO_REQUEST : /* Fast Path IO. */
2421 acmd = (struct scsa_cmd *)cmd->cmd;
2422 lbinfo = &instance->load_balance_info[acmd->device_id];
2423
2424 if (cmd->load_balance_flag & MEGASAS_LOAD_BALANCE_FLAG) {
2425 arm = lbinfo->raid1DevHandle[0] == scsi_raid_io->DevHandle ? 0 : 1;
2426
2427 lbinfo->scsi_pending_cmds[arm]--;
2428 cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG;
2429 }
2430 con_log(CL_DLEVEL3, (CE_NOTE,
2431 "FastPath IO Completion Success "));
2432
2433 case MPI2_FUNCTION_LD_IO_REQUEST : {// Regular Path IO.
2434 acmd = (struct scsa_cmd *)cmd->cmd;
2435 pkt = (struct scsi_pkt *)CMD2PKT(acmd);
2436
2437 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2438 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
2439 (void) ddi_dma_sync(acmd->cmd_dmahandle,
2440 acmd->cmd_dma_offset,
2441 acmd->cmd_dma_len,
2442 DDI_DMA_SYNC_FORCPU);
2443 }
2444 }
2445
2446 pkt->pkt_reason = CMD_CMPLT;
2447 pkt->pkt_statistics = 0;
2448 pkt->pkt_state = STATE_GOT_BUS
2449 | STATE_GOT_TARGET | STATE_SENT_CMD
2450 | STATE_XFERRED_DATA | STATE_GOT_STATUS;
2451
2452 con_log(CL_ANN, (CE_CONT,
2453 " CDB[0] = %x completed for %s: size %lx SMID %x cmd_status %x",
2454 pkt->pkt_cdbp[0],
2455 ((acmd->islogical) ? "LD" : "PD"),
2456 acmd->cmd_dmacount, cmd->SMID, status));
2457
2458 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
2459 struct scsi_inquiry *inq;
2460
2461 if (acmd->cmd_dmacount != 0) {
2462 bp_mapin(acmd->cmd_buf);
2463 inq = (struct scsi_inquiry *)
2464 acmd->cmd_buf->b_un.b_addr;
2465
2466 /* don't expose physical drives to OS */
2467 if (acmd->islogical &&
2468 (status == MFI_STAT_OK)) {
2469 display_scsi_inquiry(
2470 (caddr_t)inq);
2471 }
2472 #ifdef PDSUPPORT
2473 else if ((status ==
2474 MFI_STAT_OK) && inq->inq_dtype ==
2475 DTYPE_DIRECT) {
2476
2477 display_scsi_inquiry(
2478 (caddr_t)inq);
2479 }
2480 #endif
2481 else {
2482 /* for physical disk */
2483 status =
2484 MFI_STAT_DEVICE_NOT_FOUND;
2485 }
2486 }
2487 }
2488
2489 switch (status) {
2490 case MFI_STAT_OK:
2491 pkt->pkt_scbp[0] = STATUS_GOOD;
2492 break;
2493 case MFI_STAT_LD_CC_IN_PROGRESS:
2494 case MFI_STAT_LD_RECON_IN_PROGRESS:
2495 pkt->pkt_scbp[0] = STATUS_GOOD;
2496 break;
2497 case MFI_STAT_LD_INIT_IN_PROGRESS:
2498 pkt->pkt_reason = CMD_TRAN_ERR;
2499 break;
2500 case MFI_STAT_SCSI_IO_FAILED:
2501 cmn_err(CE_WARN, "tbolt_complete_cmd: scsi_io failed");
2502 pkt->pkt_reason = CMD_TRAN_ERR;
2503 break;
2504 case MFI_STAT_SCSI_DONE_WITH_ERROR:
2505 con_log(CL_ANN, (CE_WARN,
2506 "tbolt_complete_cmd: scsi_done with error"));
2507
2508 pkt->pkt_reason = CMD_CMPLT;
2509 ((struct scsi_status *)
2510 pkt->pkt_scbp)->sts_chk = 1;
2511
2512 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
2513 con_log(CL_ANN, (CE_WARN, "TEST_UNIT_READY fail"));
2514 } else {
2515 pkt->pkt_state |= STATE_ARQ_DONE;
2516 arqstat = (void *)(pkt->pkt_scbp);
2517 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2518 arqstat->sts_rqpkt_resid = 0;
2519 arqstat->sts_rqpkt_state |=
2520 STATE_GOT_BUS | STATE_GOT_TARGET
2521 | STATE_SENT_CMD
2522 | STATE_XFERRED_DATA;
2523 *(uint8_t *)&arqstat->sts_rqpkt_status =
2524 STATUS_GOOD;
2525 con_log(CL_ANN1, (CE_NOTE,
2526 "Copying Sense data %x",
2527 cmd->SMID));
2528
2529 ddi_rep_get8(
2530 instance->
2531 mpi2_frame_pool_dma_obj.acc_handle,
2532 (uint8_t *)
2533 &(arqstat->sts_sensedata),
2534 cmd->sense1,
2535 sizeof (struct scsi_extended_sense),
2536 DDI_DEV_AUTOINCR);
2537
2538 }
2539 break;
2540 case MFI_STAT_LD_OFFLINE:
2541 cmn_err(CE_WARN,
2542 "tbolt_complete_cmd: ld offline "
2543 "CDB[0]=0x%x targetId=0x%x devhandle=0x%x\n", //UNDO:
2544 ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_raid_io->CDB.CDB32[0]),
2545 ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_raid_io->RaidContext.ldTargetId),
2546 ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_raid_io->DevHandle) );
2547 pkt->pkt_reason = CMD_DEV_GONE;
2548 pkt->pkt_statistics = STAT_DISCON;
2549 break;
2550 case MFI_STAT_DEVICE_NOT_FOUND:
2551 con_log(CL_ANN, (CE_CONT,
2552 "tbolt_complete_cmd: device not found error"));
2553 pkt->pkt_reason = CMD_DEV_GONE;
2554 pkt->pkt_statistics = STAT_DISCON;
2555 break;
2556
2557 case MFI_STAT_LD_LBA_OUT_OF_RANGE:
2558 pkt->pkt_state |= STATE_ARQ_DONE;
2559 pkt->pkt_reason = CMD_CMPLT;
2560 ((struct scsi_status *)
2561 pkt->pkt_scbp)->sts_chk = 1;
2562
2563 arqstat = (void *)(pkt->pkt_scbp);
2564 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2565 arqstat->sts_rqpkt_resid = 0;
2566 arqstat->sts_rqpkt_state |= STATE_GOT_BUS
2567 | STATE_GOT_TARGET | STATE_SENT_CMD
2568 | STATE_XFERRED_DATA;
2569 *(uint8_t *)&arqstat->sts_rqpkt_status =
2570 STATUS_GOOD;
2571
2572 arqstat->sts_sensedata.es_valid = 1;
2573 arqstat->sts_sensedata.es_key =
2574 KEY_ILLEGAL_REQUEST;
2575 arqstat->sts_sensedata.es_class =
2576 CLASS_EXTENDED_SENSE;
2577
2578 /*
2579 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
2580 * ASC: 0x21h; ASCQ: 0x00h;
2581 */
2582 arqstat->sts_sensedata.es_add_code = 0x21;
2583 arqstat->sts_sensedata.es_qual_code = 0x00;
2584 break;
2585 case MFI_STAT_INVALID_CMD:
2586 case MFI_STAT_INVALID_DCMD:
2587 case MFI_STAT_INVALID_PARAMETER:
2588 case MFI_STAT_INVALID_SEQUENCE_NUMBER:
2589 default:
2590 cmn_err(CE_WARN, "tbolt_complete_cmd: Unknown status!");
2591 pkt->pkt_reason = CMD_TRAN_ERR;
2592
2593 break;
2594 }
2595
2596 atomic_add_16(&instance->fw_outstanding, (-1));
2597
2598 /* Call the callback routine */
2599 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
2600 pkt->pkt_comp) {
2601 (*pkt->pkt_comp)(pkt);
2602 }
2603
2604 con_log(CL_ANN1, (CE_NOTE, "Free smid %x", cmd->SMID));
2605
2606 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2607 &scsi_raid_io->RaidContext.status, 0);
2608
2609 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2610 &scsi_raid_io->RaidContext.extStatus, 0);
2611
2612 return_raid_msg_pkt(instance, cmd);
2613 break;
2614 }
2615 case MPI2_FUNCTION_PASSTHRU_IO_REQUEST: // MFA command.
2616
2617 if (cmd->frame->dcmd.opcode
2618 == MR_DCMD_LD_MAP_GET_INFO &&
2619 cmd->frame->dcmd.mbox.b[1]
2620 == 1) {
2621
2622 mutex_enter(&instance->sync_map_mtx);
2623
2624 con_log(CL_ANN, (CE_NOTE,
2625 "LDMAP sync command SMID RECEIVED 0x%X",
2626 cmd->SMID));
2627 if (cmd->frame->hdr.cmd_status != 0) {
2628 cmn_err(CE_WARN,
2629 "map sync failed, status = 0x%x.\n",cmd->frame->hdr.cmd_status);
2630 }
2631 else {
2632 instance->map_id++;
2633 cmn_err(CE_NOTE,
2634 "map sync received, switched map_id to %" PRIu64 " \n",instance->map_id);
2635 }
2636
2637 if (MR_ValidateMapInfo(instance->ld_map[(instance->map_id & 1)], instance->load_balance_info))
2638 instance->fast_path_io = 1;
2639 else
2640 instance->fast_path_io = 0;
2641
2642 con_log(CL_ANN, (CE_NOTE,
2643 "instance->fast_path_io %d \n",instance->fast_path_io));
2644
2645 instance->unroll.syncCmd = 0;
2646
2647 if(instance->map_update_cmd == cmd) {
2648 return_raid_msg_pkt(instance, cmd);
2649 atomic_add_16(&instance->fw_outstanding, (-1));
2650 mrsas_tbolt_sync_map_info(instance);
2651 }
2652
2653 cmn_err(CE_NOTE, "LDMAP sync completed.\n");
2654 mutex_exit(&instance->sync_map_mtx);
2655 break;
2656 }
2657
2658 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) {
2659 con_log(CL_ANN1, (CE_CONT,
2660 "AEN command SMID RECEIVED 0x%X",
2661 cmd->SMID));
2662 if ((instance->aen_cmd == cmd) &&
2663 (instance->aen_cmd->abort_aen)) {
2664 con_log(CL_ANN, (CE_WARN,
2665 "mrsas_softintr: "
2666 "aborted_aen returned"));
2667 }
2668 else
2669 {
2670 atomic_add_16(&instance->fw_outstanding, (-1));
2671 service_mfi_aen(instance, cmd);
2672 }
2673 }
2674
2675 if (cmd->sync_cmd == MRSAS_TRUE ) {
2676 con_log(CL_ANN1, (CE_CONT,
2677 "Sync-mode Command Response SMID RECEIVED 0x%X",
2678 cmd->SMID));
2679
2680 tbolt_complete_cmd_in_sync_mode(instance, cmd);
2681 }
2682 else
2683 {
2684 con_log(CL_ANN, (CE_CONT,
2685 "tbolt_complete_cmd: Wrong SMID RECEIVED 0x%X",
2686 cmd->SMID));
2687 }
2688 break;
2689 default:
2690 /* free message */
2691 con_log(CL_ANN, (CE_NOTE, "tbolt_complete_cmd: Unknown Type!!!!!!!!"));
2692 break;
2693 }
2694 }
2695
2696 uint_t
2697 mr_sas_tbolt_process_outstanding_cmd(struct mrsas_instance *instance)
2698 {
2699 uint8_t replyType;
2700 Mpi2SCSIIOSuccessReplyDescriptor_t *replyDesc;
2701 Mpi2ReplyDescriptorsUnion_t *desc;
2702 uint16_t smid;
2703 union desc_value d_val;
2704 struct mrsas_cmd *cmd;
2705 uint32_t i;
2706 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2707 uint8_t status;
2708
2709 struct mrsas_header *hdr;
2710 struct scsi_pkt *pkt;
2711
2712 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2713 0, 0, DDI_DMA_SYNC_FORDEV);
2714
2715 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2716 0, 0, DDI_DMA_SYNC_FORCPU);
2717
2718 desc = instance->reply_frame_pool;
2719 desc += instance->reply_read_index;
2720
2721 replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2722 replyType = replyDesc->ReplyFlags &
2723 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2724
2725 if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2726 return (DDI_INTR_UNCLAIMED);
2727
2728 con_log(CL_ANN1, (CE_NOTE, "Reply Desc = %p Words = %" PRIx64 " \n",
2729 desc, desc->Words));
2730
2731 d_val.word = desc->Words;
2732
2733
2734 /* Read Reply descriptor */
2735 while ((d_val.u1.low != 0xffffffff) &&
2736 (d_val.u1.high != 0xffffffff)) {
2737
2738 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2739 0, 0, DDI_DMA_SYNC_FORCPU);
2740
2741 smid = replyDesc->SMID;
2742
2743 if (!smid || smid > instance->max_fw_cmds + 1) {
2744 con_log(CL_ANN1, (CE_NOTE,
2745 "Reply Desc at Break = %p Words = %" PRIx64 " \n",
2746 desc, desc->Words));
2747 break;
2748 }
2749
2750 cmd = instance->cmd_list[smid - 1];
2751 if(!cmd ) {
2752 con_log(CL_ANN1, (CE_NOTE,
2753 "mr_sas_tbolt_process_outstanding_cmd: Invalid command "
2754 " or Poll commad Received in completion path\n"));
2755 }
2756 else {
2757 mutex_enter(&instance->cmd_pend_mtx);
2758 if (cmd->sync_cmd == MRSAS_TRUE) {
2759 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2760 if (hdr) {
2761 con_log(CL_ANN1, (CE_NOTE,
2762 "mr_sas_tbolt_process_outstanding_cmd:"
2763 " mlist_del_init(&cmd->list).\n"));
2764 mlist_del_init(&cmd->list);
2765 }
2766 } else {
2767 pkt = cmd->pkt;
2768 if (pkt) {
2769 con_log(CL_ANN1, (CE_NOTE,
2770 "mr_sas_tbolt_process_outstanding_cmd:"
2771 "mlist_del_init(&cmd->list).\n"));
2772 mlist_del_init(&cmd->list);
2773 }
2774 }
2775
2776 mutex_exit(&instance->cmd_pend_mtx);
2777
2778 tbolt_complete_cmd(instance, cmd);
2779 }
2780 // set it back to all 0xfffffffff.
2781 desc->Words = (uint64_t)~0;
2782
2783 instance->reply_read_index++;
2784
2785 if (instance->reply_read_index >= (instance->reply_q_depth)) {
2786 con_log(CL_ANN1, (CE_NOTE, "wrap around"));
2787 instance->reply_read_index = 0;
2788 }
2789
2790 /* Get the next reply descriptor */
2791 if (!instance->reply_read_index)
2792 desc = instance->reply_frame_pool;
2793 else
2794 desc++;
2795
2796 replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2797
2798 d_val.word = desc->Words;
2799
2800 con_log(CL_ANN1, (CE_NOTE,
2801 "Next Reply Desc = %p Words = %" PRIx64 "\n",
2802 desc, desc->Words));
2803
2804 replyType = replyDesc->ReplyFlags &
2805 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2806
2807 if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2808 break;
2809
2810 } /* End of while loop. */
2811
2812 /* update replyIndex to FW */
2813 WR_MPI2_REPLY_POST_INDEX(instance->reply_read_index, instance);
2814
2815
2816 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2817 0, 0, DDI_DMA_SYNC_FORDEV);
2818
2819 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2820 0, 0, DDI_DMA_SYNC_FORCPU);
2821 return (DDI_INTR_CLAIMED);
2822 }
2823
2824
2825
2826
2827 /*
2828 * complete_cmd_in_sync_mode - Completes an internal command
2829 * @instance: Adapter soft state
2830 * @cmd: Command to be completed
2831 *
2832 * The issue_cmd_in_sync_mode() function waits for a command to complete
2833 * after it issues a command. This function wakes up that waiting routine by
2834 * calling wake_up() on the wait queue.
2835 */
2836 void
2837 tbolt_complete_cmd_in_sync_mode(struct mrsas_instance *instance,
2838 struct mrsas_cmd *cmd)
2839 {
2840
2841 cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle,
2842 &cmd->frame->io.cmd_status);
2843
2844 cmd->sync_cmd = MRSAS_FALSE;
2845
2846 mutex_enter(&instance->int_cmd_mtx);
2847 if (cmd->cmd_status == ENODATA) {
2848 cmd->cmd_status = 0;
2849 }
2850 cv_broadcast(&instance->int_cmd_cv);
2851 mutex_exit(&instance->int_cmd_mtx);
2852
2853 }
2854
2855 /*
2856 * mrsas_tbolt_get_ld_map_info - Returns ld_map structure
2857 * instance: Adapter soft state
2858 *
2859 * Issues an internal command (DCMD) to get the FW's controller PD
2860 * list structure. This information is mainly used to find out SYSTEM
2861 * supported by the FW.
2862 */
2863 int
2864 mrsas_tbolt_get_ld_map_info(struct mrsas_instance *instance)
2865 {
2866 int ret = 0;
2867 struct mrsas_cmd *cmd = NULL;
2868 struct mrsas_dcmd_frame *dcmd;
2869 MR_FW_RAID_MAP_ALL *ci;
2870 uint32_t ci_h = 0;
2871 U32 size_map_info;
2872
2873 cmd = get_raid_msg_pkt(instance);
2874
2875 if (cmd == NULL) {
2876 cmn_err(CE_WARN,
2877 "Failed to get a cmd from free-pool in get_ld_map_info()");
2878 return (DDI_FAILURE);
2879 }
2880
2881 dcmd = &cmd->frame->dcmd;
2882
2883 size_map_info = sizeof (MR_FW_RAID_MAP) +
2884 (sizeof (MR_LD_SPAN_MAP) *
2885 (MAX_LOGICAL_DRIVES - 1));
2886
2887 con_log(CL_ANN, (CE_NOTE,
2888 "size_map_info : 0x%x", size_map_info));
2889
2890 ci = instance->ld_map[(instance->map_id & 1)];
2891 ci_h = instance->ld_map_phy[(instance->map_id & 1)];
2892
2893 if (!ci) {
2894 cmn_err(CE_WARN,
2895 "Failed to alloc mem for ld_map_info");
2896 return_raid_msg_pkt(instance, cmd);
2897 return (-1);
2898 }
2899
2900 memset(ci, 0, sizeof (*ci));
2901 memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
2902
2903 dcmd->cmd = MFI_CMD_OP_DCMD;
2904 dcmd->cmd_status = 0xFF;
2905 dcmd->sge_count = 1;
2906 dcmd->flags = MFI_FRAME_DIR_READ;
2907 dcmd->timeout = 0;
2908 dcmd->pad_0 = 0;
2909 dcmd->data_xfer_len = size_map_info;
2910 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
2911 dcmd->sgl.sge32[0].phys_addr = ci_h;
2912 dcmd->sgl.sge32[0].length = size_map_info;
2913
2914
2915 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
2916
2917 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2918 ret = 0;
2919 con_log(CL_ANN1, (CE_NOTE,
2920 "Get LD Map Info success\n"));
2921 } else {
2922 cmn_err(CE_WARN,
2923 "Get LD Map Info failed\n");
2924 ret = -1;
2925 }
2926
2927 return_raid_msg_pkt(instance, cmd);
2928
2929 return (ret);
2930 }
2931
2932 void
2933 mrsas_dump_reply_desc(struct mrsas_instance *instance)
2934 {
2935 uint32_t i;
2936 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
2937 union desc_value d_val;
2938
2939 reply_desc = instance->reply_frame_pool;
2940
2941 for (i = 0; i < instance->reply_q_depth; i++, reply_desc++) {
2942 d_val.word = reply_desc->Words;
2943 con_log(CL_DLEVEL3, (CE_NOTE,
2944 "i=%d, %x:%x",
2945 i, d_val.u1.high, d_val.u1.low));
2946 }
2947 }
2948
2949 /**
2950 * mrsas_tbolt_command_create - Create command for fast path.
2951 * @io_info: MegaRAID IO request packet pointer.
2952 * @ref_tag: Reference tag for RD/WRPROTECT
2953 *
2954 * Create the command for fast path.
2955 */
2956 void
2957 mrsas_tbolt_prepare_cdb(struct mrsas_instance *instance, U8 cdb[],struct IO_REQUEST_INFO *io_info,Mpi2RaidSCSIIORequest_t *scsi_io_request, U32 ref_tag)
2958 {
2959 uint16_t EEDPFlags;
2960 uint32_t Control;
2961 // Prepare 32-byte CDB if DIF is supported on this device
2962 con_log(CL_ANN, (CE_NOTE, "Prepare DIF CDB\n"));
2963
2964 memset(cdb, 0, 32);
2965
2966 cdb[0] = MRSAS_SCSI_VARIABLE_LENGTH_CMD;
2967
2968
2969 cdb[7] = MRSAS_SCSI_ADDL_CDB_LEN;
2970
2971 if (io_info->isRead) {
2972 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32;
2973 }
2974 else {
2975 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32;
2976 }
2977
2978 cdb[10] = MRSAS_RD_WR_PROTECT; // Verify with in linux driver, set to MEGASAS_RD_WR_PROTECT_CHECK_ALL
2979
2980 /* LOGICAL BLOCK ADDRESS */
2981 cdb[12] = (U8)(((io_info->pdBlock) >> 56) & 0xff);
2982 cdb[13] = (U8)(((io_info->pdBlock) >> 48) & 0xff);
2983 cdb[14] = (U8)(((io_info->pdBlock) >> 40) & 0xff);
2984 cdb[15] = (U8)(((io_info->pdBlock) >> 32) & 0xff);
2985 cdb[16] = (U8)(((io_info->pdBlock) >> 24) & 0xff);
2986 cdb[17] = (U8)(((io_info->pdBlock) >> 16) & 0xff);
2987 cdb[18] = (U8)(((io_info->pdBlock) >> 8) & 0xff);
2988 cdb[19] = (U8)((io_info->pdBlock) & 0xff);
2989
2990 /* Logical block reference tag */
2991 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
2992 &scsi_io_request->CDB.EEDP32.PrimaryReferenceTag,
2993 BIG_ENDIAN(ref_tag));
2994
2995 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
2996 &scsi_io_request->CDB.EEDP32.PrimaryApplicationTagMask,
2997 0xffff);
2998
2999 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
3000 &scsi_io_request->DataLength,
3001 ((io_info->numBlocks)*512));
3002 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
3003 &scsi_io_request->IoFlags,32); /* Specify 32-byte cdb */
3004
3005 /* Transfer length */
3006 cdb[28] = (U8)(((io_info->numBlocks) >> 24) & 0xff);
3007 cdb[29] = (U8)(((io_info->numBlocks) >> 16) & 0xff);
3008 cdb[30] = (U8)(((io_info->numBlocks) >> 8) & 0xff);
3009 cdb[31] = (U8)((io_info->numBlocks) & 0xff);
3010
3011 /* set SCSI IO EEDPFlags */
3012 EEDPFlags = ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle,
3013 &scsi_io_request->EEDPFlags);
3014 Control = ddi_get32(instance->mpi2_frame_pool_dma_obj.acc_handle,
3015 &scsi_io_request->Control);
3016
3017 // set SCSI IO EEDPFlags bits
3018 if (io_info->isRead) {
3019 // For READ commands, the EEDPFlags shall be set to specify to
3020 // Increment the Primary Reference Tag, to Check the Reference
3021 // Tag, and to Check and Remove the Protection Information
3022 // fields.
3023 EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
3024 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
3025 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
3026 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
3027 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
3028 }
3029 else {
3030 // For WRITE commands, the EEDPFlags shall be set to specify to
3031 // Increment the Primary Reference Tag, and to Insert
3032 // Protection Information fields.
3033 EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
3034 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
3035 }
3036 Control |= (0x4 << 26);
3037
3038 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
3039 &scsi_io_request->EEDPFlags, EEDPFlags);
3040 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
3041 &scsi_io_request->Control, Control);
3042 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
3043 &scsi_io_request->EEDPBlockSize,
3044 MRSAS_EEDPBLOCKSIZE);
3045 }
3046
3047
3048 /*
3049 * mrsas_tbolt_set_pd_lba - Sets PD LBA
3050 * @cdb: CDB
3051 * @cdb_len: cdb length
3052 * @start_blk: Start block of IO
3053 *
3054 * Used to set the PD LBA in CDB for FP IOs
3055 */
3056 void
3057 mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr, U64 start_blk, U32 num_blocks, U8 DifCapable)
3058 {
3059 U8 cdb_len = *cdb_len_ptr;
3060 U8 flagvals = 0, opcode = 0, groupnum = 0, control = 0;
3061
3062 /* Some drives don't support 16/12 byte CDB's, convert to 10 */
3063 if (((cdb_len == 12) || (cdb_len == 16)) &&
3064 (start_blk <= 0xffffffff)) {
3065 if (cdb_len == 16) {
3066 con_log(CL_ANN, (CE_NOTE, "Converting READ/WRITE(16) to READ10\n"));
3067 opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
3068 flagvals = cdb[1];
3069 groupnum = cdb[14];
3070 control = cdb[15];
3071 } else {
3072 con_log(CL_ANN, (CE_NOTE, "Converting READ/WRITE(12) to READ10\n"));
3073 opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
3074 flagvals = cdb[1];
3075 groupnum = cdb[10];
3076 control = cdb[11];
3077 }
3078
3079 memset(cdb, 0, sizeof(cdb));
3080
3081 cdb[0] = opcode;
3082 cdb[1] = flagvals;
3083 cdb[6] = groupnum;
3084 cdb[9] = control;
3085 /* Set transfer length */
3086 cdb[8] = (U8)(num_blocks & 0xff);
3087 cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3088 cdb_len = 10;
3089 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
3090 /* Convert to 16 byte CDB for large LBA's */
3091 con_log(CL_ANN, (CE_NOTE, "Converting 6/10/12 CDB to 16 byte CDB\n"));
3092 switch (cdb_len) {
3093 case 6:
3094 opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
3095 control = cdb[5];
3096 break;
3097 case 10:
3098 opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16;
3099 flagvals = cdb[1];
3100 groupnum = cdb[6];
3101 control = cdb[9];
3102 break;
3103 case 12:
3104 opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16;
3105 flagvals = cdb[1];
3106 groupnum = cdb[10];
3107 control = cdb[11];
3108 break;
3109 }
3110
3111 memset(cdb, 0, sizeof(cdb));
3112
3113 cdb[0] = opcode;
3114 cdb[1] = flagvals;
3115 cdb[14] = groupnum;
3116 cdb[15] = control;
3117
3118 /* Transfer length */
3119 cdb[13] = (U8)(num_blocks & 0xff);
3120 cdb[12] = (U8)((num_blocks >> 8) & 0xff);
3121 cdb[11] = (U8)((num_blocks >> 16) & 0xff);
3122 cdb[10] = (U8)((num_blocks >> 24) & 0xff);
3123
3124 /* Specify 16-byte cdb */
3125 cdb_len = 16;
3126 } else if ((cdb_len == 6) && (start_blk > 0x1fffff)) {
3127 /* convert to 10 byte CDB */
3128 opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10;
3129 control = cdb[5];
3130
3131 memset(cdb, 0, sizeof(cdb));
3132 cdb[0] = opcode;
3133 cdb[9] = control;
3134
3135 /* Set transfer length */
3136 cdb[8] = (U8)(num_blocks & 0xff);
3137 cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3138
3139 /* Specify 10-byte cdb */
3140 cdb_len = 10;
3141 }
3142
3143
3144 /* Fall through Normal case, just load LBA here */
3145 switch (cdb_len) {
3146 case 6:
3147 {
3148 U8 val = cdb[1] & 0xE0;
3149 cdb[3] = (U8)(start_blk & 0xff);
3150 cdb[2] = (U8)((start_blk >> 8) & 0xff);
3151 cdb[1] = val | ((U8)(start_blk >> 16) & 0x1f);
3152 break;
3153 }
3154 case 10:
3155 cdb[5] = (U8)(start_blk & 0xff);
3156 cdb[4] = (U8)((start_blk >> 8) & 0xff);
3157 cdb[3] = (U8)((start_blk >> 16) & 0xff);
3158 cdb[2] = (U8)((start_blk >> 24) & 0xff);
3159 break;
3160 case 12:
3161 cdb[5] = (U8)(start_blk & 0xff);
3162 cdb[4] = (U8)((start_blk >> 8) & 0xff);
3163 cdb[3] = (U8)((start_blk >> 16) & 0xff);
3164 cdb[2] = (U8)((start_blk >> 24) & 0xff);
3165 break;
3166
3167 case 16:
3168 cdb[9] = (U8)(start_blk & 0xff);
3169 cdb[8] = (U8)((start_blk >> 8) & 0xff);
3170 cdb[7] = (U8)((start_blk >> 16) & 0xff);
3171 cdb[6] = (U8)((start_blk >> 24) & 0xff);
3172 cdb[5] = (U8)((start_blk >> 32) & 0xff);
3173 cdb[4] = (U8)((start_blk >> 40) & 0xff);
3174 cdb[3] = (U8)((start_blk >> 48) & 0xff);
3175 cdb[2] = (U8)((start_blk >> 56) & 0xff);
3176 break;
3177 }
3178
3179 *cdb_len_ptr = cdb_len;
3180 }
3181
3182
3183 U8
3184 mrsas_tbolt_check_map_info(struct mrsas_instance *instance)
3185 {
3186 MR_FW_RAID_MAP_ALL *ld_map;
3187
3188 if (!mrsas_tbolt_get_ld_map_info(instance)) {
3189
3190 ld_map = instance->ld_map[(instance->map_id & 1)];
3191
3192 con_log(CL_ANN1, (CE_NOTE, "ldCount=%d, map size=%d\n",
3193 ld_map->raidMap.ldCount, ld_map->raidMap.totalSize));
3194
3195 if (MR_ValidateMapInfo(instance->ld_map[(instance->map_id & 1)], instance->load_balance_info)) {
3196 con_log(CL_ANN, (CE_CONT,
3197 "MR_ValidateMapInfo success"));
3198
3199 instance->fast_path_io = 1;
3200 con_log(CL_ANN, (CE_NOTE,
3201 "instance->fast_path_io %d \n",instance->fast_path_io));
3202
3203 return (DDI_SUCCESS);
3204 }
3205
3206 }
3207
3208 instance->fast_path_io = 0;
3209 cmn_err(CE_WARN, "MR_ValidateMapInfo failed");
3210 con_log(CL_ANN, (CE_NOTE,
3211 "instance->fast_path_io %d \n",instance->fast_path_io));
3212
3213
3214 return (DDI_FAILURE);
3215 }
3216 /*
3217 * Marks HBA as bad. This will be called either when an
3218 * IO packet times out even after 3 FW resets
3219 * or FW is found to be fault even after 3 continuous resets.
3220 */
3221
3222 int
3223 mrsas_tbolt_kill_adapter(struct mrsas_instance *instance)
3224 {
3225 cmn_err(CE_WARN, "TBOLT Kill adapter called\n");
3226
3227 if (instance->deadadapter == 1)
3228 return (DDI_FAILURE);
3229
3230 con_log(CL_ANN1, (CE_NOTE, "tbolt_kill_adapter: "
3231 "Writing to doorbell with MFI_STOP_ADP "));
3232 mutex_enter(&instance->ocr_flags_mtx);
3233 instance->deadadapter = 1;
3234 mutex_exit(&instance->ocr_flags_mtx);
3235 instance->func_ptr->disable_intr(instance);
3236 WR_RESERVED0_REGISTER(MFI_STOP_ADP, instance);
3237 /* Flush */
3238 RD_RESERVED0_REGISTER(instance);
3239
3240 (void) mrsas_print_pending_cmds(instance);
3241 mrsas_complete_pending_cmds(instance);
3242 return (DDI_SUCCESS);
3243 }
3244 void mrsas_reset_reply_desc(struct mrsas_instance *instance)
3245 {
3246 int i;
3247 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
3248 instance->reply_read_index= 0;
3249
3250 /* initializing reply address to 0xFFFFFFFF */
3251 reply_desc = instance->reply_frame_pool;
3252
3253 for (i = 0; i < instance->reply_q_depth; i++) {
3254 reply_desc->Words = (uint64_t)~0;
3255 reply_desc++;
3256 }
3257 }
3258
3259 int
3260 mrsas_tbolt_reset_ppc(struct mrsas_instance *instance)
3261 {
3262 uint32_t status=0x00;
3263 uint32_t retry = 0;
3264 uint32_t seq_num;
3265 uint32_t cur_abs_reg_val;
3266 uint32_t fw_state;
3267 union mrsas_evt_class_locale class_locale;
3268 uint32_t abs_state;
3269 uint32_t i;
3270
3271 con_log(CL_ANN, (CE_NOTE,
3272 "mrsas_tbolt_reset_ppc entered\n "));
3273
3274 if (instance->deadadapter == 1) {
3275 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3276 "no more resets as HBA has been marked dead ");
3277 return (DDI_FAILURE);
3278 }
3279
3280 mutex_enter(&instance->ocr_flags_mtx);
3281 instance->adapterresetinprogress = 1;
3282 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc:"
3283 "adpterresetinprogress flag set, time %llx", gethrtime()));
3284 mutex_exit(&instance->ocr_flags_mtx);
3285
3286 instance->func_ptr->disable_intr(instance);
3287
3288 /*Add delay inorder to complete the ioctl & io cmds in-flight */
3289 for (i = 0; i<3000; i++) {
3290 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
3291 }
3292
3293 instance->reply_read_index= 0;
3294
3295 retry_reset:
3296 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3297 ":Resetting TBOLT "));
3298
3299 WR_TBOLT_IB_WRITE_SEQ(0xF, instance);
3300 WR_TBOLT_IB_WRITE_SEQ(4, instance);
3301 WR_TBOLT_IB_WRITE_SEQ(0xb, instance);
3302 WR_TBOLT_IB_WRITE_SEQ(2, instance);
3303 WR_TBOLT_IB_WRITE_SEQ(7, instance);
3304 WR_TBOLT_IB_WRITE_SEQ(0xd, instance);
3305 con_log(CL_ANN1, (CE_NOTE,
3306 "mrsas_tbolt_reset_ppc: magic number written "
3307 "to write sequence register\n"));
3308 delay(100 * drv_usectohz(MILLISEC));
3309 status = RD_TBOLT_HOST_DIAG(instance);
3310 con_log(CL_ANN1, (CE_NOTE,
3311 "mrsas_tbolt_reset_ppc: READ HOSTDIAG SUCCESS "
3312 "to write sequence register\n"));
3313
3314 while (status & DIAG_TBOLT_RESET_ADAPTER) {
3315 delay(100 * drv_usectohz(MILLISEC));
3316 status = RD_TBOLT_HOST_DIAG(instance);
3317 if (retry++ == 100) {
3318 cmn_err(CE_WARN,
3319 "mrsas_tbolt_reset_ppc:"
3320 "resetadapter bit is set already "
3321 "check retry count %d\n", retry);
3322 return (DDI_FAILURE);
3323 }
3324 }
3325
3326 WR_TBOLT_HOST_DIAG(status | DIAG_TBOLT_RESET_ADAPTER, instance);
3327 delay(100 * drv_usectohz(MILLISEC));
3328
3329 ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3330 (uint8_t *)((uintptr_t)(instance)->regmap +
3331 RESET_TBOLT_STATUS_OFF),4,DDI_DEV_AUTOINCR);
3332
3333 while ((status & DIAG_TBOLT_RESET_ADAPTER)) {
3334 delay(100 * drv_usectohz(MILLISEC));
3335 ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3336 (uint8_t *)((uintptr_t)(instance)->regmap +
3337 RESET_TBOLT_STATUS_OFF),4,DDI_DEV_AUTOINCR);
3338 if (retry++ == 100) {
3339 /* Dont call kill adapter here */
3340 /* RESET BIT ADAPTER is cleared by firmare */
3341 //mrsas_tbolt_kill_adapter(instance);
3342 cmn_err(CE_WARN, "mr_sas %d: %s(): RESET FAILED; return failure!!!", instance->instance, __func__);
3343 return (DDI_FAILURE);
3344 }
3345 }
3346
3347 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: Adapter reset complete"));
3348 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3349 "Calling mfi_state_transition_to_ready"));
3350
3351 abs_state = instance->func_ptr->read_fw_status_reg(instance);
3352 retry = 0;
3353 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3354 delay(100 * drv_usectohz(MILLISEC));
3355 abs_state = instance->func_ptr->read_fw_status_reg(instance);
3356 }
3357 if (abs_state <= MFI_STATE_FW_INIT) {
3358 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: firmware state < MFI_STATE_FW_INIT"
3359 "state = 0x%x, RETRY RESET.\n", abs_state);
3360 goto retry_reset;
3361 }
3362
3363 /* Mark HBA as bad, if FW is fault after 3 continuous resets */
3364 if (mfi_state_transition_to_ready(instance) ||
3365 debug_tbolt_fw_faults_after_ocr_g == 1) {
3366 cur_abs_reg_val =
3367 instance->func_ptr->read_fw_status_reg(instance);
3368 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
3369
3370 con_log(CL_ANN1, (CE_NOTE,
3371 "mrsas_tbolt_reset_ppc :before fake: FW is not ready "
3372 "FW state = 0x%x", fw_state));
3373 if (debug_tbolt_fw_faults_after_ocr_g == 1)
3374 fw_state = MFI_STATE_FAULT;
3375
3376 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc : FW is not ready "
3377 "FW state = 0x%x", fw_state));
3378
3379 if (fw_state == MFI_STATE_FAULT) {
3380 // increment the count
3381 instance->fw_fault_count_after_ocr++;
3382 if (instance->fw_fault_count_after_ocr
3383 < MAX_FW_RESET_COUNT) {
3384 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3385 "FW is in fault after OCR count %d "
3386 "Retry Reset",
3387 instance->fw_fault_count_after_ocr);
3388 goto retry_reset;
3389
3390 } else {
3391 cmn_err(CE_WARN, "mrsas %d: %s:"
3392 "Max Reset Count exceeded >%d"
3393 "Mark HBA as bad, KILL adapter",
3394 instance->instance, __func__, MAX_FW_RESET_COUNT);
3395
3396 mrsas_tbolt_kill_adapter(instance);
3397 return (DDI_FAILURE);
3398 }
3399 }
3400 }
3401
3402 // reset the counter as FW is up after OCR
3403 instance->fw_fault_count_after_ocr = 0;
3404
3405 mrsas_reset_reply_desc(instance);
3406
3407
3408 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3409 "Calling mrsas_issue_init_mpi2"));
3410 abs_state = mrsas_issue_init_mpi2(instance);
3411 if(abs_state == DDI_FAILURE) {
3412 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3413 "INIT failed Retrying Reset");
3414 goto retry_reset;
3415 }
3416 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3417 "mrsas_issue_init_mpi2 Done"));
3418
3419 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3420 "Calling mrsas_print_pending_cmd\n"));
3421 mrsas_print_pending_cmds(instance);
3422 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3423 "mrsas_print_pending_cmd done\n"));
3424
3425 instance->func_ptr->enable_intr(instance);
3426 instance->fw_outstanding = 0;
3427
3428 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3429 "Calling mrsas_issue_pending_cmds"));
3430 mrsas_issue_pending_cmds(instance);
3431 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3432 "issue_pending_cmds done.\n"));
3433
3434 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3435 "Calling aen registration"));
3436
3437 instance->aen_cmd->retry_count_for_ocr = 0;
3438 instance->aen_cmd->drv_pkt_time = 0;
3439
3440 instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
3441
3442 con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag.\n"));
3443 mutex_enter(&instance->ocr_flags_mtx);
3444 instance->adapterresetinprogress = 0;
3445 mutex_exit(&instance->ocr_flags_mtx);
3446 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3447 "adpterresetinprogress flag unset"));
3448
3449 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc done\n"));
3450 return (DDI_SUCCESS);
3451
3452 }
3453
3454
3455 /*
3456 * mrsas_sync_map_info - Returns FW's ld_map structure
3457 * @instance: Adapter soft state
3458 *
3459 * Issues an internal command (DCMD) to get the FW's controller PD
3460 * list structure. This information is mainly used to find out SYSTEM
3461 * supported by the FW.
3462 */
3463
3464 int
3465 mrsas_tbolt_sync_map_info(struct mrsas_instance *instance)
3466 {
3467 int ret = 0, i;
3468 struct mrsas_cmd *cmd = NULL;
3469 struct mrsas_dcmd_frame *dcmd;
3470 uint32_t size_sync_info, num_lds;
3471 LD_TARGET_SYNC *ci = NULL;
3472 MR_FW_RAID_MAP_ALL *map;
3473 MR_LD_RAID *raid;
3474 LD_TARGET_SYNC *ld_sync;
3475 uint32_t ci_h = 0;
3476 uint32_t size_map_info;
3477
3478 cmd = get_raid_msg_pkt(instance);
3479
3480 if (cmd == NULL) {
3481 cmn_err(CE_WARN,
3482 "Failed to get a cmd from free-pool in mrsas_tbolt_sync_map_info(). ");
3483 return (DDI_FAILURE);
3484 }
3485
3486 /* Clear the frame buffer and assign back the context id */
3487 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3488 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3489 cmd->index);
3490 bzero(cmd->scsi_io_request, sizeof (Mpi2RaidSCSIIORequest_t));
3491
3492
3493 map = instance->ld_map[instance->map_id & 1];
3494
3495 num_lds = map->raidMap.ldCount;
3496
3497 dcmd = &cmd->frame->dcmd;
3498
3499 size_sync_info = sizeof (LD_TARGET_SYNC) * num_lds;
3500
3501 con_log(CL_ANN, (CE_NOTE,
3502 "size_sync_info =0x%x ; ld count = 0x%x \n ",
3503 size_sync_info, num_lds));
3504
3505 ci = (LD_TARGET_SYNC *)instance->ld_map[(instance->map_id - 1) & 1];
3506
3507 memset(ci, 0, sizeof(MR_FW_RAID_MAP_ALL));
3508 ci_h = instance->ld_map_phy[(instance->map_id - 1) & 1];
3509
3510 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
3511
3512 ld_sync = (LD_TARGET_SYNC *)ci;
3513
3514 for (i = 0; i < num_lds; i++, ld_sync++) {
3515 raid = MR_LdRaidGet(i, map);
3516
3517 con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, Seq Num : 0x%x, Sync Reqd : 0x%x\n",
3518 i, raid->seqNum, raid->flags.ldSyncRequired));
3519
3520 ld_sync->ldTargetId = MR_GetLDTgtId(i, map);
3521
3522 con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, tgt : 0x%x \n",
3523 i, ld_sync->ldTargetId));
3524
3525 ld_sync->seqNum = raid->seqNum;
3526 }
3527
3528
3529 size_map_info = sizeof(MR_FW_RAID_MAP) +
3530 (sizeof(MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
3531
3532 dcmd->cmd = MFI_CMD_OP_DCMD;
3533 dcmd->cmd_status = 0xFF;
3534 dcmd->sge_count = 1;
3535 dcmd->flags = MFI_FRAME_DIR_WRITE;
3536 dcmd->timeout = 0;
3537 dcmd->pad_0 = 0;
3538 dcmd->data_xfer_len = size_map_info;
3539 dcmd->mbox.b[0] = num_lds;
3540 dcmd->mbox.b[1] = 1; /* Pend */
3541 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3542 dcmd->sgl.sge32[0].phys_addr = ci_h;
3543 dcmd->sgl.sge32[0].length = size_map_info;
3544
3545
3546 instance->map_update_cmd = cmd;
3547 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3548
3549 instance->func_ptr->issue_cmd(cmd, instance);
3550
3551 instance->unroll.syncCmd = 1;
3552 con_log(CL_ANN1, (CE_NOTE, "sync cmd issued. [SMID]:%x",cmd->SMID));
3553
3554 return (ret);
3555 }
3556
3557 /*
3558 * abort_syncmap_cmd
3559 */
3560 int
3561 abort_syncmap_cmd(struct mrsas_instance *instance,
3562 struct mrsas_cmd *cmd_to_abort)
3563 {
3564 int ret = 0;
3565
3566 struct mrsas_cmd *cmd;
3567 struct mrsas_abort_frame *abort_fr;
3568
3569 con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_ldsync:%d", __LINE__));
3570
3571 cmd = get_raid_msg_mfi_pkt(instance);
3572
3573 if (!cmd) {
3574 cmn_err(CE_WARN,
3575 "Failed to get a cmd from free-pool abort_syncmap_cmd().");
3576 return (DDI_FAILURE);
3577 }
3578 /* Clear the frame buffer and assign back the context id */
3579 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3580 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3581 cmd->index);
3582
3583 abort_fr = &cmd->frame->abort;
3584
3585 /* prepare and issue the abort frame */
3586 ddi_put8(cmd->frame_dma_obj.acc_handle,
3587 &abort_fr->cmd, MFI_CMD_OP_ABORT);
3588 ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3589 MFI_CMD_STATUS_SYNC_MODE);
3590 ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3591 ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3592 cmd_to_abort->index);
3593 ddi_put32(cmd->frame_dma_obj.acc_handle,
3594 &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3595 ddi_put32(cmd->frame_dma_obj.acc_handle,
3596 &abort_fr->abort_mfi_phys_addr_hi, 0);
3597
3598 cmd->frame_count = 1;
3599
3600 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3601
3602 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3603 con_log(CL_ANN1, (CE_WARN,
3604 "abort_ldsync_cmd: issue_cmd_in_poll_mode failed"));
3605 ret = -1;
3606 } else {
3607 ret = 0;
3608 }
3609
3610 return_raid_msg_mfi_pkt(instance, cmd);
3611
3612 atomic_add_16(&instance->fw_outstanding, (-1));
3613
3614 return (ret);
3615 }
3616
3617
3618 #ifdef PDSUPPORT
3619 int
3620 mrsas_tbolt_config_pd(struct mrsas_instance *instance, uint16_t tgt,
3621 uint8_t lun, dev_info_t **ldip)
3622 {
3623 struct scsi_device *sd;
3624 dev_info_t *child;
3625 int rval, dtype;
3626 struct mrsas_tbolt_pd_info *pds = NULL;
3627 uint64_t *wwn;
3628
3629
3630 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_config_pd: t = %d l = %d",
3631 tgt, lun));
3632
3633 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
3634 if (ldip) {
3635 *ldip = child;
3636 }
3637 if (instance->mr_tbolt_pd_list[tgt].flag != MRDRV_TGT_VALID) {
3638 rval = mrsas_service_evt(instance, tgt, 1,
3639 MRSAS_EVT_UNCONFIG_TGT, NULL);
3640 con_log(CL_ANN1, (CE_WARN,
3641 "mr_sas:DELETING STALE ENTRY rval = %d "
3642 "tgt id = %d ", rval, tgt));
3643 return (NDI_FAILURE);
3644 }
3645 return (NDI_SUCCESS);
3646 }
3647
3648 pds = (struct mrsas_tbolt_pd_info *)
3649 kmem_zalloc(sizeof (struct mrsas_tbolt_pd_info), KM_SLEEP);
3650 mrsas_tbolt_get_pd_info(instance, pds, tgt);
3651 dtype = pds->scsiDevType;
3652
3653 /* Check for Disk*/
3654 if ((dtype == DTYPE_DIRECT)) {
3655 if ((dtype == DTYPE_DIRECT) &&
3656 (LE_16(pds->fwState) != PD_SYSTEM)) {
3657 return (NDI_FAILURE);
3658 }
3659 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
3660 sd->sd_address.a_hba_tran = instance->tran;
3661 sd->sd_address.a_target = (uint16_t)tgt;
3662 sd->sd_address.a_lun = (uint8_t)lun;
3663
3664 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS) {
3665 rval = mrsas_config_scsi_device(instance, sd, ldip);
3666 con_log(CL_DLEVEL1, (CE_NOTE,
3667 "Phys. device found: tgt %d dtype %d: %s",
3668 tgt, dtype, sd->sd_inq->inq_vid));
3669 } else {
3670 rval = NDI_FAILURE;
3671 con_log(CL_DLEVEL1, (CE_NOTE,
3672 "Phys. device Not found scsi_hba_probe Failed: tgt %d dtype %d: %s",
3673 tgt, dtype, sd->sd_inq->inq_vid));
3674 }
3675
3676 /* sd_unprobe is blank now. Free buffer manually */
3677 if (sd->sd_inq) {
3678 kmem_free(sd->sd_inq, SUN_INQSIZE);
3679 sd->sd_inq = (struct scsi_inquiry *)NULL;
3680 }
3681 kmem_free(sd, sizeof (struct scsi_device));
3682 rval = NDI_SUCCESS;
3683 } else {
3684 con_log(CL_ANN1, (CE_NOTE,
3685 "Device not supported: tgt %d lun %d dtype %d",
3686 tgt, lun, dtype));
3687 rval = NDI_FAILURE;
3688 }
3689
3690 kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3691 con_log(CL_ANN1, (CE_NOTE, "mrsas_config_pd: return rval = %d",
3692 rval));
3693 return (rval);
3694 }
3695 static void
3696 mrsas_tbolt_get_pd_info(struct mrsas_instance *instance, struct mrsas_tbolt_pd_info *pds,
3697 int tgt)
3698 {
3699 struct mrsas_cmd *cmd;
3700 struct mrsas_dcmd_frame *dcmd;
3701 dma_obj_t dcmd_dma_obj;
3702
3703 cmd = get_raid_msg_pkt(instance);
3704
3705 if (!cmd) {
3706 con_log(CL_ANN1, (CE_WARN, "Failed to get a cmd for get pd info"));
3707 return;
3708 }
3709
3710 /* Clear the frame buffer and assign back the context id */
3711 memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3712 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3713 cmd->index);
3714
3715
3716 dcmd = &cmd->frame->dcmd;
3717 dcmd_dma_obj.size = sizeof (struct mrsas_tbolt_pd_info);
3718 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
3719 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff;
3720 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff;
3721 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
3722 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
3723
3724 (void) mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
3725 DDI_STRUCTURE_LE_ACC);
3726 (void) memset(dcmd_dma_obj.buffer, 0, sizeof (struct mrsas_tbolt_pd_info));
3727 (void) memset(dcmd->mbox.b, 0, 12);
3728 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3729 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
3730 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3731 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, MFI_FRAME_DIR_READ);
3732 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3733 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3734 sizeof (struct mrsas_tbolt_pd_info));
3735 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3736 MR_DCMD_PD_GET_INFO);
3737 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], tgt);
3738 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3739 sizeof (struct mrsas_tbolt_pd_info));
3740 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3741 dcmd_dma_obj.dma_cookie[0].dmac_address);
3742
3743 cmd->sync_cmd = MRSAS_TRUE;
3744 cmd->frame_count = 1;
3745
3746 if (instance->tbolt) {
3747 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3748 }
3749
3750 instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd);
3751
3752 ddi_rep_get8(cmd->frame_dma_obj.acc_handle, (uint8_t *)pds,
3753 (uint8_t *)dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info),
3754 DDI_DEV_AUTOINCR);
3755 (void) mrsas_free_dma_obj(instance, dcmd_dma_obj);
3756 return_raid_msg_pkt(instance, cmd);
3757 }
3758 #endif