1 /*
2 * mr_sas_tbolt.c: source for mr_sas driver for New Generation.
3 * i.e. Thunderbolt and Invader
4 *
5 * Solaris MegaRAID device driver for SAS2.0 controllers
6 * Copyright (c) 2008-2012, LSI Logic Corporation.
7 * All rights reserved.
8 *
9 * Version:
10 * Author:
11 * Swaminathan K S
12 * Arun Chandrashekhar
13 * Manju R
14 * Rasheed
15 * Shakeel Bukhari
16 */
17
18
19 #include <stddef.h>
20 #include <sys/types.h>
21 #include <sys/file.h>
22 #include <sys/atomic.h>
23 #include <sys/scsi/scsi.h>
24 #include <sys/byteorder.h>
25 #include "ld_pd_map.h"
26 #include "mr_sas.h"
27 #include "fusion.h"
28
29
30 // Pre-TB command size and TB command size.
31 #define MR_COMMAND_SIZE (64*20) // 1280 bytes
32 MR_LD_RAID *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map);
33 U16 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map);
34 U16 MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map);
35 U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *in_info);
36 extern ddi_dma_attr_t mrsas_generic_dma_attr;
37 extern uint32_t mrsas_tbolt_max_cap_maxxfer;
38 extern struct ddi_device_acc_attr endian_attr;
39 extern int debug_level_g;
40 extern unsigned int enable_fp;
41 volatile int dump_io_wait_time = 90;
42 extern void
43 io_timeout_checker(void *arg);
44 extern int
45 mfi_state_transition_to_ready(struct mrsas_instance *instance);
46 extern volatile int debug_timeout_g;
47 extern int mrsas_issue_pending_cmds(struct mrsas_instance *);
48 extern int mrsas_complete_pending_cmds(struct mrsas_instance *instance);
49 extern void push_pending_mfi_pkt(struct mrsas_instance *,
50 struct mrsas_cmd *);
51 extern U8 MR_BuildRaidContext(struct mrsas_instance *, struct IO_REQUEST_INFO *,
52 MPI2_SCSI_IO_VENDOR_UNIQUE *, MR_FW_RAID_MAP_ALL *);
53
54 static volatile int debug_tbolt_fw_faults_after_ocr_g = 0;
55
56 /*
57 * destroy_mfi_mpi_frame_pool
58 */
59 void
60 destroy_mfi_mpi_frame_pool(struct mrsas_instance *instance)
61 {
62 int i;
63
64 struct mrsas_cmd *cmd;
65
66 /* return all mfi frames to pool */
67 for (i = 0; i < MRSAS_APP_RESERVED_CMDS; i++) {
68 cmd = instance->cmd_list[i];
69 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED)
70 (void) mrsas_free_dma_obj(instance,
71 cmd->frame_dma_obj);
72 cmd->frame_dma_obj_status = DMA_OBJ_FREED;
73 }
74 }
75
76 /*
77 * destroy_mpi2_frame_pool
78 */
79 void
80 destroy_mpi2_frame_pool(struct mrsas_instance *instance)
81 {
82
83 if (instance->mpi2_frame_pool_dma_obj.status == DMA_OBJ_ALLOCATED) {
84 (void) mrsas_free_dma_obj(instance,
85 instance->mpi2_frame_pool_dma_obj);
86 instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_FREED;
87 }
88 }
89
90
91 /*
92 * mrsas_tbolt_free_additional_dma_buffer
93 */
94 void
95 mrsas_tbolt_free_additional_dma_buffer(struct mrsas_instance *instance)
96 {
97 int i;
98 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
99 (void) mrsas_free_dma_obj(instance,
100 instance->mfi_internal_dma_obj);
101 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
102 }
103 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
104 (void) mrsas_free_dma_obj(instance,
105 instance->mfi_evt_detail_obj);
106 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
107 }
108
109 for (i = 0; i < 2; i++) {
110 if (instance->ld_map_obj[i].status == DMA_OBJ_ALLOCATED) {
111 (void) mrsas_free_dma_obj(instance,
112 instance->ld_map_obj[i]);
113 instance->ld_map_obj[i].status = DMA_OBJ_FREED;
114 }
115 }
116 }
117
128 instance->request_desc_dma_obj.status = DMA_OBJ_FREED;
129 }
130
131 if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
132 (void) mrsas_free_dma_obj(instance,
133 instance->reply_desc_dma_obj);
134 instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
135 }
136
137
138 }
139
140
141 /*
142 * ThunderBolt(TB) Request Message Frame Pool
143 */
144 int
145 create_mpi2_frame_pool(struct mrsas_instance *instance)
146 {
147 int i = 0;
148 int cookie_cnt;
149 uint16_t max_cmd;
150 uint32_t sgl_sz;
151 uint32_t raid_msg_size;
152 uint32_t total_size;
153 uint32_t offset;
154 uint32_t io_req_base_phys;
155 uint8_t *io_req_base;
156 struct mrsas_cmd *cmd;
157
158 max_cmd = instance->max_fw_cmds;
159
160 sgl_sz = 1024;
161 raid_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
162
163 // Allocating additional 256 bytes to accomodate SMID 0.
164 total_size = MRSAS_THUNDERBOLT_MSG_SIZE + (max_cmd * raid_msg_size) +
165 (max_cmd * sgl_sz) + (max_cmd * SENSE_LENGTH);
166
167 con_log(CL_ANN1, (CE_NOTE, "create_mpi2_frame_pool: "
168 "max_cmd %x ", max_cmd));
169
170 con_log(CL_DLEVEL3, (CE_NOTE, "create_mpi2_frame_pool: "
171 "request message frame pool size %x", total_size));
172
173 /*
174 * ThunderBolt(TB) We need to create a single chunk of DMA'ble memory
175 * and then split the memory to 1024 commands. Each command should be
176 * able to contain a RAID MESSAGE FRAME which will embed a MFI_FRAME
177 * within it. Further refer the "alloc_req_rep_desc" function where
178 * we allocate request/reply descriptors queues for a clue.
179 */
180
181 instance->mpi2_frame_pool_dma_obj.size = total_size;
182 instance->mpi2_frame_pool_dma_obj.dma_attr = mrsas_generic_dma_attr;
183 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_addr_hi =
184 0xFFFFFFFFU;
185 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_count_max =
186 0xFFFFFFFFU;
187 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_sgllen = 1;
188 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_align = 256;
189
190 if (mrsas_alloc_dma_obj(instance, &instance->mpi2_frame_pool_dma_obj,
191 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
192 cmn_err(CE_WARN,
193 "mr_sas: could not alloc mpi2 frame pool");
194 return (DDI_FAILURE);
195 }
196
197 bzero(instance->mpi2_frame_pool_dma_obj.buffer, total_size);
198 instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_ALLOCATED;
199
200 instance->io_request_frames =
201 (uint8_t *)instance->mpi2_frame_pool_dma_obj.buffer;
202 instance->io_request_frames_phy =
203 (uint32_t)
204 instance->mpi2_frame_pool_dma_obj.dma_cookie[0].dmac_address;
205
206 con_log(CL_DLEVEL3, (CE_NOTE,
207 "io_request_frames 0x%x",
208 instance->io_request_frames));
209
210 con_log(CL_DLEVEL3, (CE_NOTE,
211 "io_request_frames_phy 0x%x",
212 instance->io_request_frames_phy));
213
214 io_req_base = (uint8_t *)instance->io_request_frames +
215 MRSAS_THUNDERBOLT_MSG_SIZE;
216 io_req_base_phys = instance->io_request_frames_phy +
217 MRSAS_THUNDERBOLT_MSG_SIZE;
218
219 con_log(CL_DLEVEL3, (CE_NOTE,
220 "io req_base_phys 0x%x", io_req_base_phys));
221
222 for (i = 0; i < max_cmd; i++) {
223 cmd = instance->cmd_list[i];
224
225 offset = i * MRSAS_THUNDERBOLT_MSG_SIZE;
226
227 cmd->scsi_io_request = (Mpi2RaidSCSIIORequest_t *)
228 ((uint8_t *)io_req_base + offset);
229 cmd->scsi_io_request_phys_addr = io_req_base_phys + offset;
230
231 cmd->sgl = (Mpi2SGEIOUnion_t *)
232 ((uint8_t *)io_req_base +
233 (max_cmd * raid_msg_size) + i * sgl_sz);
234
235 cmd->sgl_phys_addr =
236 (io_req_base_phys +
237 (max_cmd * raid_msg_size) + i * sgl_sz);
238
239 cmd->sense1 = (uint8_t *)
240 ((uint8_t *)io_req_base +
241 (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
242 (i * SENSE_LENGTH));
243
244 cmd->sense_phys_addr1 =
245 (io_req_base_phys +
246 (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
247 (i * SENSE_LENGTH));
248
249
250 cmd->SMID = i+1;
251
252 con_log(CL_DLEVEL3, (CE_NOTE,
253 "Frame Pool Addr [%x]0x%x",
254 cmd->index, cmd->scsi_io_request));
255
256 con_log(CL_DLEVEL3, (CE_NOTE,
257 "Frame Pool Phys Addr [%x]0x%x",
258 cmd->index, cmd->scsi_io_request_phys_addr));
259
260 con_log(CL_DLEVEL3, (CE_NOTE,
261 "Sense Addr [%x]0x%x",
262 cmd->index, cmd->sense1));
263
264 con_log(CL_DLEVEL3, (CE_NOTE,
265 "Sense Addr Phys [%x]0x%x",
266 cmd->index, cmd->sense_phys_addr1));
267
268
269 con_log(CL_DLEVEL3, (CE_NOTE,
270 "Sgl bufffers [%x]0x%x",
271 cmd->index, cmd->sgl));
272
273 con_log(CL_DLEVEL3, (CE_NOTE,
274 "Sgl bufffers phys [%x]0x%x",
275 cmd->index, cmd->sgl_phys_addr));
276 }
277
278 return (DDI_SUCCESS);
279
280 }
281
282
283 /*
284 * alloc_additional_dma_buffer for AEN
285 */
286 int
287 mrsas_tbolt_alloc_additional_dma_buffer(struct mrsas_instance *instance)
288 {
289 uint32_t internal_buf_size = PAGESIZE*2;
290 int i;
291
292 /* Initialize buffer status as free */
293 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
294 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
295 instance->ld_map_obj[0].status = DMA_OBJ_FREED;
296 instance->ld_map_obj[1].status = DMA_OBJ_FREED;
297
298
299 instance->mfi_internal_dma_obj.size = internal_buf_size;
300 instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
301 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
302 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
303 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
304
305 if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
306 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
307 cmn_err(CE_WARN,
308 "mr_sas: could not alloc reply queue");
309 return (DDI_FAILURE);
310 }
311
312 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
313
314 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
315 instance->internal_buf = (caddr_t)(((unsigned long)
316 instance->mfi_internal_dma_obj.buffer));
317 instance->internal_buf_dmac_add =
318 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address;
319 instance->internal_buf_size = internal_buf_size;
320
321 /* allocate evt_detail */
322 instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
323 instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
324 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
325 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
326 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
327 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 8;
328
329 if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
330 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
331 cmn_err(CE_WARN,
332 "mrsas_tbolt_alloc_additional_dma_buffer: "
333 "could not allocate data transfer buffer.");
334 goto fail_tbolt_additional_buff;
335 }
336
337 bzero(instance->mfi_evt_detail_obj.buffer,
338 sizeof (struct mrsas_evt_detail));
339
340 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
341
342 instance->size_map_info = sizeof (MR_FW_RAID_MAP) +
343 (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
344
345 for (i = 0; i < 2; i++) {
346 /* allocate the data transfer buffer */
347 instance->ld_map_obj[i].size = instance->size_map_info;
348 instance->ld_map_obj[i].dma_attr = mrsas_generic_dma_attr;
349 instance->ld_map_obj[i].dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
350 instance->ld_map_obj[i].dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
351 instance->ld_map_obj[i].dma_attr.dma_attr_sgllen = 1;
352 instance->ld_map_obj[i].dma_attr.dma_attr_align = 1;
353
354 if (mrsas_alloc_dma_obj(instance, &instance->ld_map_obj[i],
355 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
356 cmn_err(CE_WARN,
357 "could not allocate data transfer buffer.");
358 goto fail_tbolt_additional_buff;
359 }
360
361 instance->ld_map_obj[i].status |= DMA_OBJ_ALLOCATED;
362
363 (void) memset(instance->ld_map_obj[i].buffer, 0,
364 instance->size_map_info);
365
366 instance->ld_map[i] = (MR_FW_RAID_MAP_ALL *)instance->ld_map_obj[i].buffer;
367 instance->ld_map_phy[i] =
368 (uint32_t)instance->ld_map_obj[i].dma_cookie[0].dmac_address;
369
370 con_log(CL_DLEVEL3, (CE_NOTE,
371 "ld_map Addr Phys 0x%x", instance->ld_map_phy[i]));
372
373 con_log(CL_DLEVEL3, (CE_NOTE,
374 "size_map_info 0x%x", instance->size_map_info));
375
376 }
377
378 return (DDI_SUCCESS);
379
380 fail_tbolt_additional_buff:
381 mrsas_tbolt_free_additional_dma_buffer(instance);
382
383 return (DDI_FAILURE);
384 }
385
386 MRSAS_REQUEST_DESCRIPTOR_UNION *
387 mr_sas_get_request_descriptor(struct mrsas_instance *instance,
388 uint16_t index, struct mrsas_cmd *cmd)
389 {
390 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
391
392 if (index > instance->max_fw_cmds) {
393 con_log(CL_ANN1, (CE_NOTE,
394 "Invalid SMID 0x%x request for descriptor", index));
395 con_log(CL_ANN1, (CE_NOTE,
396 "max_fw_cmds : 0x%x\n", instance->max_fw_cmds));
397 return (NULL);
398 }
399
400 req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
401 ((char *)instance->request_message_pool +
402 (sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION) * index));
403
404 con_log(CL_ANN1, (CE_NOTE,
405 "request descriptor : 0x%08lx\n", (unsigned long)req_desc));
406
407 con_log(CL_ANN1, (CE_NOTE,
408 "request descriptor base phy : 0x%08lx\n",
409 (unsigned long)instance->request_message_pool_phy));
410
411 return ((MRSAS_REQUEST_DESCRIPTOR_UNION *)req_desc);
412 }
413
414
415 /*
416 * Allocate Request and Reply Queue Descriptors.
417 */
418 int
419 alloc_req_rep_desc(struct mrsas_instance *instance)
420 {
421 uint32_t request_q_sz, reply_q_sz;
422 int i, max_request_q_sz, max_reply_q_sz;
423 uint64_t request_desc;
424 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
425 uint64_t *reply_ptr;
426
427 /*
428 * ThunderBolt(TB) There's no longer producer consumer mechanism.
429 * Once we have an interrupt we are supposed to scan through the list of
430 * reply descriptors and process them accordingly. We would be needing
431 * to allocate memory for 1024 reply descriptors
432 */
433
434 /* Allocate Reply Descriptors */
435 con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x\n",
436 sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
437
438 // reply queue size should be multiple of 16
439 max_reply_q_sz = ((instance->max_fw_cmds + 1 + 15)/16)*16;
440
441 reply_q_sz = 8 * max_reply_q_sz;
442
443
444 con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x\n",
445 sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
446
447 instance->reply_desc_dma_obj.size = reply_q_sz;
448 instance->reply_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
449 instance->reply_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
450 instance->reply_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
451 instance->reply_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
452 instance->reply_desc_dma_obj.dma_attr.dma_attr_align = 16;
453
454 if (mrsas_alloc_dma_obj(instance, &instance->reply_desc_dma_obj,
455 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
456 cmn_err(CE_WARN,
457 "mr_sas: could not alloc reply queue");
458 return (DDI_FAILURE);
459 }
460
461 bzero(instance->reply_desc_dma_obj.buffer, reply_q_sz);
462 instance->reply_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
463
464 // virtual address of reply queue
465 instance->reply_frame_pool = (MPI2_REPLY_DESCRIPTORS_UNION *)(
466 instance->reply_desc_dma_obj.buffer);
467
468 instance->reply_q_depth = max_reply_q_sz;
469
470 con_log(CL_ANN1, (CE_NOTE, "[reply queue depth]0x%x",
471 instance->reply_q_depth));
472
473 con_log(CL_ANN1, (CE_NOTE, "[reply queue virt addr]0x%x",
474 instance->reply_frame_pool));
475
476 /* initializing reply address to 0xFFFFFFFF */
477 reply_desc = instance->reply_frame_pool;
478
479 for (i = 0; i < instance->reply_q_depth; i++) {
480 reply_desc->Words = (uint64_t)~0;
481 reply_desc++;
482 }
483
484
485 instance->reply_frame_pool_phy =
486 (uint32_t)instance->reply_desc_dma_obj.dma_cookie[0].dmac_address;
487
488 con_log(CL_ANN1, (CE_NOTE,
489 "[reply queue phys addr]0x%x", instance->reply_frame_pool_phy));
490
491
492 instance->reply_pool_limit_phy = (instance->reply_frame_pool_phy +
493 reply_q_sz);
494
495 con_log(CL_ANN1, (CE_NOTE, "[reply pool limit phys addr]0x%x",
496 instance->reply_pool_limit_phy));
497
498
499 con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x\n",
500 sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
501
502 /* Allocate Request Descriptors */
503 con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x\n",
504 sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
505
506 request_q_sz = 8 *
507 (instance->max_fw_cmds);
508
509 instance->request_desc_dma_obj.size = request_q_sz;
510 instance->request_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
511 instance->request_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
512 instance->request_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
513 instance->request_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
514 instance->request_desc_dma_obj.dma_attr.dma_attr_align = 16;
515
516 if (mrsas_alloc_dma_obj(instance, &instance->request_desc_dma_obj,
517 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
518 cmn_err(CE_WARN,
519 "mr_sas: could not alloc request queue desc");
520 goto fail_undo_reply_queue;
521 }
522
523 bzero(instance->request_desc_dma_obj.buffer, request_q_sz);
524 instance->request_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
525
526 /* virtual address of request queue desc */
527 instance->request_message_pool = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
528 (instance->request_desc_dma_obj.buffer);
529
530 instance->request_message_pool_phy =
531 (uint32_t)instance->request_desc_dma_obj.dma_cookie[0].dmac_address;
532
533 max_request_q_sz = instance->max_fw_cmds;
534
535 return (DDI_SUCCESS);
536
537 fail_undo_reply_queue:
538 if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
539 (void) mrsas_free_dma_obj(instance,
540 instance->reply_desc_dma_obj);
541 instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
542 }
543
544 return (DDI_FAILURE);
545 }
546
547 /*
548 * mrsas_alloc_cmd_pool_tbolt
549 * TODO: merge tbolt-specific codee into mrsas_alloc_cmd_pool() to have single routine
550 */
551 int
552 mrsas_alloc_cmd_pool_tbolt(struct mrsas_instance *instance)
553 {
554 int i;
555 int count;
556 uint32_t max_cmd;
557 uint32_t reserve_cmd;
558 size_t sz;
559
560 struct mrsas_cmd *cmd;
561
562 max_cmd = instance->max_fw_cmds;
563 con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
564 "max_cmd %x", max_cmd));
565
566
567 sz = sizeof (struct mrsas_cmd *) * max_cmd;
568
569 /*
570 * instance->cmd_list is an array of struct mrsas_cmd pointers.
571 * Allocate the dynamic array first and then allocate individual
572 * commands.
573 */
574 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
575 if (instance->cmd_list == NULL) {
576 con_log(CL_NONE, (CE_WARN,
577 "Failed to allocate memory for cmd_list"));
578 return (DDI_FAILURE);
579 }
580
581 /* create a frame pool and assign one frame to each cmd */
582 for (count = 0; count < max_cmd; count++) {
583 instance->cmd_list[count] = kmem_zalloc(sizeof (struct mrsas_cmd),
584 KM_SLEEP);
585 if (instance->cmd_list[count] == NULL) {
586 con_log(CL_NONE, (CE_WARN,
587 "Failed to allocate memory for mrsas_cmd"));
588 goto mrsas_undo_cmds;
589 }
590 }
591
592 /* add all the commands to command pool */
593
594 INIT_LIST_HEAD(&instance->cmd_pool_list);
595 INIT_LIST_HEAD(&instance->cmd_pend_list);
596 INIT_LIST_HEAD(&instance->cmd_app_pool_list);
597
598 reserve_cmd = MRSAS_APP_RESERVED_CMDS;
599
600 for (i = 1; i < reserve_cmd; i++) { //cmd index 0 reservered for IOC INIT
601 cmd = instance->cmd_list[i];
602 cmd->index = i;
603 mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
604 }
605
606
607 for (i = reserve_cmd; i < max_cmd; i++) {
608 cmd = instance->cmd_list[i];
609 cmd->index = i;
610 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
611 }
612
613 return (DDI_SUCCESS);
614
615 mrsas_undo_cmds:
616 if (count > 0) {
617 /* free each cmd */
618 for (i = 0; i < count; i++) {
619 if (instance->cmd_list[i] != NULL)
620 kmem_free(instance->cmd_list[i],sizeof (struct mrsas_cmd));
621 instance->cmd_list[i] = NULL;
622 }
623 }
624
625 mrsas_undo_cmd_list:
626 if (instance->cmd_list != NULL)
627 kmem_free(instance->cmd_list,sz);
628 instance->cmd_list = NULL;
629
630 return (DDI_FAILURE);
631 }
632
633
634 /*
635 * free_space_for_mpi2
636 */
637 void
638 free_space_for_mpi2(struct mrsas_instance *instance)
639 {
640 /* already freed */
641 if (instance->cmd_list == NULL) {
642 return;
643 }
644
645 /* First free the additional DMA buffer */
646 mrsas_tbolt_free_additional_dma_buffer(instance);
647
649 free_req_rep_desc_pool(instance);
650
651 /* Free the MPI message pool */
652 destroy_mpi2_frame_pool(instance);
653
654 /* Free the MFI frame pool */
655 destroy_mfi_frame_pool(instance);
656
657 /* Free all the commands in the cmd_list */
658 /* Free the cmd_list buffer itself */
659 mrsas_free_cmd_pool(instance);
660 }
661
662
663 /*
664 * ThunderBolt(TB) memory allocations for commands/messages/frames.
665 */
666 int
667 alloc_space_for_mpi2(struct mrsas_instance *instance)
668 {
669 /* Allocate command pool ( memory for cmd_list & individual commands )*/
670 if (mrsas_alloc_cmd_pool_tbolt(instance)) {
671 cmn_err(CE_WARN, "Error creating cmd pool");
672 return (DDI_FAILURE);
673 }
674
675 /* Initialize single reply size and Message size */
676 instance->reply_size = MRSAS_THUNDERBOLT_REPLY_SIZE;
677 instance->raid_io_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
678
679 instance->max_sge_in_main_msg = (MRSAS_THUNDERBOLT_MSG_SIZE -
680 (sizeof (MPI2_RAID_SCSI_IO_REQUEST) -
681 sizeof (MPI2_SGE_IO_UNION)))/ sizeof (MPI2_SGE_IO_UNION);
682 instance->max_sge_in_chain = (MR_COMMAND_SIZE -
683 MRSAS_THUNDERBOLT_MSG_SIZE) / sizeof (MPI2_SGE_IO_UNION);
684
685 /* Reduce SG count by 1 to take care of group cmds feature in FW */
686 instance->max_num_sge = (instance->max_sge_in_main_msg +
687 instance->max_sge_in_chain - 2);
688 instance->chain_offset_mpt_msg =
689 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16;
749 mpi2_undo_message_pool:
750 destroy_mpi2_frame_pool(instance);
751
752 mpi2_undo_mfi_frame_pool:
753 destroy_mfi_frame_pool(instance);
754
755 mpi2_undo_descripter_pool:
756 free_req_rep_desc_pool(instance);
757
758 mpi2_undo_cmd_pool:
759 mrsas_free_cmd_pool(instance);
760
761 return (DDI_FAILURE);
762 }
763
764
765 /*
766 * mrsas_init_adapter_tbolt - Initialize fusion interface adapter.
767 */
768 int
769 mrsas_init_adapter_tbolt (struct mrsas_instance *instance)
770 {
771
772 /*
773 * Reduce the max supported cmds by 1. This is to ensure that the
774 * reply_q_sz (1 more than the max cmd that driver may send)
775 * does not exceed max cmds that the FW can support
776 */
777
778 if (instance->max_fw_cmds > 1008) {
779 instance->max_fw_cmds = 1008;
780 instance->max_fw_cmds = instance->max_fw_cmds-1;
781 }
782
783 con_log(CL_ANN, (CE_NOTE, "mrsas_init_adapter_tbolt: "
784 " instance->max_fw_cmds 0x%X.", instance->max_fw_cmds));
785
786
787 /* create a pool of commands */
788 if ( alloc_space_for_mpi2(instance) != DDI_SUCCESS) {
789 cmn_err(CE_WARN,
790 " alloc_space_for_mpi2() failed.");
791
792 return (DDI_FAILURE);
793 }
794
795 /* Send ioc init message */
796 if ( mrsas_issue_init_mpi2(instance) != DDI_SUCCESS) {
797 cmn_err(CE_WARN,
798 " mrsas_issue_init_mpi2() failed.");
799
800 goto fail_init_fusion;
801 }
802
803 instance->unroll.alloc_space_mpi2 = 1;
804
805 con_log(CL_ANN, (CE_NOTE,
806 "mrsas_init_adapter_tbolt: SUCCESSFULL\n"));
807
808 return (DDI_SUCCESS);
809
810 fail_init_fusion:
811
812 fail_undo_alloc_mpi2:
813 free_space_for_mpi2(instance);
814
815 return (DDI_FAILURE);
816 }
817
818
819
820 /*
821 * init_mpi2
822 */
823 int
824 mrsas_issue_init_mpi2(struct mrsas_instance *instance)
825 {
826 dma_obj_t init2_dma_obj;
827 int ret_val = DDI_SUCCESS;
828
829 /* allocate DMA buffer for IOC INIT message */
830 init2_dma_obj.size = sizeof (Mpi2IOCInitRequest_t);
831 init2_dma_obj.dma_attr = mrsas_generic_dma_attr;
832 init2_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
833 init2_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
834 init2_dma_obj.dma_attr.dma_attr_sgllen = 1;
835 init2_dma_obj.dma_attr.dma_attr_align = 256;
836
837 if (mrsas_alloc_dma_obj(instance, &init2_dma_obj,
838 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
839 cmn_err(CE_WARN, "mr_sas_issue_init_mpi2 "
840 "could not allocate data transfer buffer.");
841 return (DDI_FAILURE);
842 }
843 (void) memset(init2_dma_obj.buffer, 2,
844 sizeof (Mpi2IOCInitRequest_t));
845
846 con_log(CL_ANN1, (CE_NOTE,
847 "mrsas_issue_init_mpi2 _phys adr: %x \n",
848 init2_dma_obj.dma_cookie[0].dmac_address));
849
850
851 /* Initialize and send ioc init message */
852 ret_val = mrsas_tbolt_ioc_init(instance, &init2_dma_obj,
853 init2_dma_obj.acc_handle);
854 if (ret_val == DDI_FAILURE) {
855 con_log(CL_ANN1, (CE_WARN,
856 "mrsas_issue_init_mpi2: Failed\n"));
857 goto fail_init_mpi2;
858 }
859
860 /* free IOC init DMA buffer */
861 if (mrsas_free_dma_obj(instance, init2_dma_obj)
862 != DDI_SUCCESS) {
863 con_log(CL_ANN1, (CE_WARN,
864 "mrsas_issue_init_mpi2: Free Failed\n"));
865 return (DDI_FAILURE);
866 }
867
868
869 /* Get/Check and sync ld_map info */
870 instance->map_id = 0;
871 if( mrsas_tbolt_check_map_info(instance) == DDI_SUCCESS )
872 mrsas_tbolt_sync_map_info(instance);
873
874 con_log(CL_ANN, (CE_NOTE,
875 "mrsas_issue_init_mpi2: SUCCESSFULL\n"));
876
877 return (DDI_SUCCESS);
878
879 fail_init_mpi2:
880 mrsas_free_dma_obj(instance, init2_dma_obj);
881
882 return (DDI_FAILURE);
883 }
884
885 int
886 mrsas_tbolt_ioc_init(struct mrsas_instance *instance, dma_obj_t *mpi2_dma_obj,
887 ddi_acc_handle_t accessp)
888 {
889 int numbytes, i;
890 int ret = DDI_SUCCESS;
891 uint16_t flags;
892 int status;
893 timespec_t time;
894 uint64_t mSec;
895 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
896 struct mrsas_init_frame2 *mfiFrameInit2;
897 struct mrsas_header *frame_hdr;
898 Mpi2IOCInitRequest_t *init;
899 struct mrsas_cmd *cmd = NULL;
900 struct mrsas_drv_ver drv_ver_info;
901 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
902
903
904 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
905
906
907 #ifdef DEBUG
908 con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
909 sizeof (*mfiFrameInit2)));
910 con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n", sizeof (*init)));
911 con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
912 sizeof (struct mrsas_init_frame2)));
913 con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n",
914 sizeof (Mpi2IOCInitRequest_t)));
915 #endif
916
917 init = (Mpi2IOCInitRequest_t *)mpi2_dma_obj->buffer;
918 numbytes = sizeof (*init);
919 bzero(init, numbytes);
920
921 ddi_put8(mpi2_dma_obj->acc_handle, &init->Function,
922 MPI2_FUNCTION_IOC_INIT);
923
924 ddi_put8(mpi2_dma_obj->acc_handle, &init->WhoInit,
925 MPI2_WHOINIT_HOST_DRIVER);
926
927 /* set MsgVersion and HeaderVersion host driver was built with */
928 ddi_put16(mpi2_dma_obj->acc_handle, &init->MsgVersion,
929 MPI2_VERSION);
930
931 ddi_put16(mpi2_dma_obj->acc_handle, &init->HeaderVersion,
932 MPI2_HEADER_VERSION);
933
934 ddi_put16(mpi2_dma_obj->acc_handle, &init->SystemRequestFrameSize,
952 ddi_put64(mpi2_dma_obj->acc_handle,
953 (uint64_t *)&init->SystemRequestFrameBaseAddress,
954 instance->io_request_frames_phy);
955
956 ddi_put64(mpi2_dma_obj->acc_handle,
957 &init->ReplyDescriptorPostQueueAddress,
958 instance->reply_frame_pool_phy);
959
960 ddi_put64(mpi2_dma_obj->acc_handle,
961 &init->ReplyFreeQueueAddress, 0);
962
963 cmd = instance->cmd_list[0];
964 if (cmd == NULL) {
965 return (DDI_FAILURE);
966 }
967 cmd->retry_count_for_ocr = 0;
968 cmd->pkt = NULL;
969 cmd->drv_pkt_time = 0;
970
971 mfiFrameInit2 = (struct mrsas_init_frame2 *)cmd->scsi_io_request;
972 con_log(CL_ANN1, (CE_CONT, "[mfi vaddr]%x", mfiFrameInit2));
973
974 frame_hdr = &cmd->frame->hdr;
975
976 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
977 MFI_CMD_STATUS_POLL_MODE);
978
979 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
980
981 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
982
983 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
984
985 con_log(CL_ANN, (CE_CONT,
986 "mrsas_tbolt_ioc_init: SMID:%x\n", cmd->SMID));
987
988 // Init the MFI Header
989 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
990 &mfiFrameInit2->cmd, MFI_CMD_OP_INIT);
991
992 con_log(CL_ANN1, (CE_CONT, "[CMD]%x", mfiFrameInit2->cmd));
993
994 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
995 &mfiFrameInit2->cmd_status,
996 MFI_STAT_INVALID_STATUS);
997
998 con_log(CL_ANN1, (CE_CONT, "[Status]%x", mfiFrameInit2->cmd_status));
999
1000 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1001 &mfiFrameInit2->queue_info_new_phys_addr_lo,
1002 mpi2_dma_obj->dma_cookie[0].dmac_address);
1003
1004 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1005 &mfiFrameInit2->data_xfer_len,
1006 sizeof (Mpi2IOCInitRequest_t));
1007
1008 con_log(CL_ANN1, (CE_CONT, "[reply q desc addr]%x",
1009 init->ReplyDescriptorPostQueueAddress));
1010
1011 /* fill driver version information*/
1012 fill_up_drv_ver(&drv_ver_info);
1013
1014 /* allocate the driver version data transfer buffer */
1015 instance->drv_ver_dma_obj.size = sizeof(drv_ver_info.drv_ver);
1016 instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
1017 instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1018 instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1019 instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
1020 instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
1021
1022 if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
1023 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
1024 cmn_err(CE_WARN,
1025 "fusion init: Could not allocate driver version buffer.");
1026 return (DDI_FAILURE);
1027 }
1028 /* copy driver version to dma buffer*/
1029 (void) memset(instance->drv_ver_dma_obj.buffer, 0,sizeof(drv_ver_info.drv_ver));
1030 ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
1031 (uint8_t *)drv_ver_info.drv_ver,
1032 (uint8_t *)instance->drv_ver_dma_obj.buffer,
1033 sizeof(drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
1034
1035 /*send driver version physical address to firmware*/
1036 ddi_put64(cmd->frame_dma_obj.acc_handle,
1037 &mfiFrameInit2->driverversion, instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
1038
1039 con_log(CL_ANN1, (CE_CONT, "[MPIINIT2 frame Phys addr ]0x%x len = %x",
1040 mfiFrameInit2->queue_info_new_phys_addr_lo,
1041 sizeof (Mpi2IOCInitRequest_t)));
1042
1043 con_log(CL_ANN1, (CE_CONT, "[Length]%x", mfiFrameInit2->data_xfer_len));
1044
1045 con_log(CL_ANN1, (CE_CONT, "[MFI frame Phys Address]%x len = %x",
1046 cmd->scsi_io_request_phys_addr, sizeof (struct mrsas_init_frame2)));
1047
1048 /* disable interrupts before sending INIT2 frame */
1049 instance->func_ptr->disable_intr(instance);
1050
1051 req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
1052 instance->request_message_pool;
1053 req_desc->Words = cmd->scsi_io_request_phys_addr;
1054 req_desc->MFAIo.RequestFlags =
1055 (MPI2_REQ_DESCRIPT_FLAGS_MFA << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1056
1057 cmd->request_desc = req_desc;
1058
1059 /* issue the init frame */
1060 instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd);
1061
1062 con_log(CL_ANN1, (CE_CONT, "[cmd = %d] ", frame_hdr->cmd));
1063 con_log(CL_ANN1, (CE_CONT, "[cmd Status= %x] ",
1064 frame_hdr->cmd_status));
1065
1066 if (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1067 &mfiFrameInit2->cmd_status) == 0) {
1068 con_log(CL_ANN, (CE_NOTE, "INIT2 Success"));
1069 ret = DDI_SUCCESS;
1070 } else {
1071 con_log(CL_ANN, (CE_WARN, "INIT2 Fail"));
1072 mrsas_dump_reply_desc(instance);
1073 goto fail_ioc_init;
1074 }
1075
1076 mrsas_dump_reply_desc(instance);
1077
1078 instance->unroll.verBuff = 1;
1079
1080 con_log(CL_ANN, (CE_NOTE,
1081 "mrsas_tbolt_ioc_init: SUCCESSFULL\n"));
1082
1083
1084 return (DDI_SUCCESS);
1085
1086
1087 fail_ioc_init:
1088
1089 mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1090
1091 return (DDI_FAILURE);
1092 }
1093
1094 int wait_for_outstanding_poll_io(struct mrsas_instance *instance)
1095 {
1096 int i;
1097 uint32_t wait_time = dump_io_wait_time;
1098 for (i = 0; i < wait_time; i++) {
1099 /*
1100 * Check For Outstanding poll Commands
1101 * except ldsync command and aen command
1102 */
1103 if (instance->fw_outstanding <= 2) {
1104 break;
1105 }
1106 drv_usecwait(10*MILLISEC);
1107 /* complete commands from reply queue */
1108 (void) mr_sas_tbolt_process_outstanding_cmd(instance);
1109 }
1110 if (instance->fw_outstanding > 2) {
1111 return (1);
1112 }
1113 return (0);
1114 }
1115 /*
1116 * scsi_pkt handling
1117 *
1118 * Visible to the external world via the transport structure.
1119 */
1120
1121 int
1122 mrsas_tbolt_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1123 {
1124 struct mrsas_instance *instance = ADDR2MR(ap);
1125 struct scsa_cmd *acmd = PKT2CMD(pkt);
1126 struct mrsas_cmd *cmd = NULL;
1127 int rval, i;
1128 uchar_t cmd_done = 0;
1129 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
1130 uint32_t msecs = 120 * MILLISEC;
1131
1132 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1133 if (instance->deadadapter == 1) {
1134 cmn_err(CE_WARN,
1135 "mrsas_tran_start:TBOLT return TRAN_FATAL_ERROR "
1136 "for IO, as the HBA doesnt take any more IOs");
1137 if (pkt) {
1138 pkt->pkt_reason = CMD_DEV_GONE;
1139 pkt->pkt_statistics = STAT_DISCON;
1140 }
1141 return (TRAN_FATAL_ERROR);
1142 }
1143 if (instance->adapterresetinprogress) {
1144 con_log(CL_ANN, (CE_NOTE, "Reset flag set, "
1145 "returning mfi_pkt and setting TRAN_BUSY\n"));
1146 return (TRAN_BUSY);
1147 }
1148 rval = mrsas_tbolt_prepare_pkt(acmd);
1149
1150 cmd = mrsas_tbolt_build_cmd(instance, ap, pkt, &cmd_done);
1151
1152 /*
1153 * Check if the command is already completed by the mrsas_build_cmd()
1154 * routine. In which case the busy_flag would be clear and scb will be
1155 * NULL and appropriate reason provided in pkt_reason field
1156 */
1157 if (cmd_done) {
1158 pkt->pkt_reason = CMD_CMPLT;
1159 pkt->pkt_scbp[0] = STATUS_GOOD;
1160 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1161 | STATE_SENT_CMD;
1162 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1163 (*pkt->pkt_comp)(pkt);
1164 }
1165
1166 return (TRAN_ACCEPT);
1167 }
1168
1169 if (cmd == NULL) {
1170 return (TRAN_BUSY);
1171 }
1172
1173
1174 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1175 if (instance->fw_outstanding > instance->max_fw_cmds) {
1176 cmn_err(CE_WARN,
1177 "Command Queue Full... Returning BUSY \n");
1178 return_raid_msg_pkt(instance, cmd);
1179 return (TRAN_BUSY);
1180 }
1181
1182 /* Synchronize the Cmd frame for the controller */
1183 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1184 DDI_DMA_SYNC_FORDEV);
1185
1186 con_log(CL_ANN, (CE_CONT, "tbolt_issue_cmd: SCSI CDB[0]=0x%x "
1187 "cmd->index:0x%x SMID %0x%x\n", pkt->pkt_cdbp[0], cmd->index, cmd->SMID));
1188
1189 instance->func_ptr->issue_cmd(cmd, instance);
1190
1191 return (TRAN_ACCEPT);
1192
1193 } else {
1194 instance->func_ptr->issue_cmd(cmd, instance);
1195 (void) wait_for_outstanding_poll_io(instance);
1196 return (TRAN_ACCEPT);
1197 }
1198 }
1199
1200 /*
1201 * prepare the pkt:
1202 * the pkt may have been resubmitted or just reused so
1203 * initialize some fields and do some checks.
1204 */
1205 int
1206 mrsas_tbolt_prepare_pkt(struct scsa_cmd *acmd)
1207 {
1208 struct scsi_pkt *pkt = CMD2PKT(acmd);
1209
1210
1211 /*
1212 * Reinitialize some fields that need it; the packet may
1213 * have been resubmitted
1214 */
1215 pkt->pkt_reason = CMD_CMPLT;
1216 pkt->pkt_state = 0;
1217 pkt->pkt_statistics = 0;
1218 pkt->pkt_resid = 0;
1219
1220 /*
1221 * zero status byte.
1222 */
1223 *(pkt->pkt_scbp) = 0;
1224
1225 return (0);
1226 }
1227
1228
1229 int
1230 mr_sas_tbolt_build_sgl(struct mrsas_instance *instance,
1231 struct scsa_cmd *acmd,
1232 struct mrsas_cmd *cmd,
1233 Mpi2RaidSCSIIORequest_t *scsi_raid_io,
1234 uint32_t *datalen)
1235 {
1236 uint32_t MaxSGEs;
1237 int sg_to_process;
1238 uint32_t i, j, SGEdwords = 0;
1239 uint32_t numElements, endElement;
1240 Mpi25IeeeSgeChain64_t *ieeeChainElement = NULL;
1241 Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee = NULL;
1242 uint32_t SGLFlags = 0;
1243
1244 con_log(CL_ANN1, (CE_NOTE,
1245 "chkpnt: Building Chained SGL :%d", __LINE__));
1246
1247 /* Calulate SGE size in number of Words(32bit) */
1248 /* Clear the datalen before updating it. */
1249 *datalen = 0;
1250
1251 SGEdwords = sizeof (Mpi25IeeeSgeChain64_t) / 4;
1252
1253 MaxSGEs = instance->max_sge_in_main_msg;
1254
1255 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1256 &scsi_raid_io->SGLFlags,
1257 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
1258
1259 // set data transfer flag.
1260 if (acmd->cmd_flags & CFLAG_DMASEND) {
1261 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1262 &scsi_raid_io->Control,
1263 MPI2_SCSIIO_CONTROL_WRITE);
1264 } else {
1265 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1266 &scsi_raid_io->Control, MPI2_SCSIIO_CONTROL_READ);
1267 }
1268
1269
1270 numElements = acmd->cmd_cookiecnt;
1271
1272 con_log(CL_DLEVEL1, (CE_NOTE, "[SGE Count]:%x", numElements));
1273
1274 if (numElements > instance->max_num_sge) {
1275 con_log(CL_ANN, (CE_NOTE,
1276 "[Max SGE Count Exceeded]:%x", numElements));
1277 return (numElements);
1278 }
1279
1280 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1281 &scsi_raid_io->RaidContext.numSGE, (uint8_t)numElements);
1282
1283 /* set end element in main message frame */
1284 endElement = (numElements <= MaxSGEs) ? numElements : (MaxSGEs - 1);
1285
1286 /* prepare the scatter-gather list for the firmware */
1287 scsi_raid_io_sgl_ieee =
1288 (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
1289
1290 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1291 Mpi25IeeeSgeChain64_t *sgl_ptr_end = scsi_raid_io_sgl_ieee;
1292 sgl_ptr_end += instance->max_sge_in_main_msg - 1;
1293
1294 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1295 &sgl_ptr_end->Flags, 0);
1296 }
1297
1298 for (i = 0; i < endElement; i++, scsi_raid_io_sgl_ieee++) {
1299 ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
1300 &scsi_raid_io_sgl_ieee->Address,
1301 acmd->cmd_dmacookies[i].dmac_laddress);
1302
1303 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1304 &scsi_raid_io_sgl_ieee->Length,
1305 acmd->cmd_dmacookies[i].dmac_size);
1306
1307 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1308 &scsi_raid_io_sgl_ieee->Flags, 0);
1309
1310 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1311 if (i == (numElements - 1))
1312 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1313 &scsi_raid_io_sgl_ieee->Flags, IEEE_SGE_FLAGS_END_OF_LIST);
1314 }
1315
1316 *datalen += acmd->cmd_dmacookies[i].dmac_size;
1317
1318 #ifdef DEBUG
1319 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Address]:%llx",
1320 scsi_raid_io_sgl_ieee->Address));
1321 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Length]:%x",
1322 scsi_raid_io_sgl_ieee->Length));
1323 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Flags]:%x",
1324 scsi_raid_io_sgl_ieee->Flags));
1325 #endif
1326
1327 }
1328
1329 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1330 &scsi_raid_io->ChainOffset, 0);
1331
1332 /* check if chained SGL required */
1333 if (i < numElements) {
1334
1335 con_log(CL_ANN1, (CE_NOTE, "[Chain Element index]:%x", i));
1336
1337 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1338 uint16_t ioFlags = ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1339 &scsi_raid_io->IoFlags);
1340
1341 if ((ioFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
1342 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1343 &scsi_raid_io->ChainOffset, (U8)instance->chain_offset_io_req);
1344 else
1345 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1346 &scsi_raid_io->ChainOffset, 0);
1347 }
1348 else {
1349 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1350 &scsi_raid_io->ChainOffset, (U8)instance->chain_offset_io_req);
1351 }
1352
1353 /* prepare physical chain element */
1354 ieeeChainElement = scsi_raid_io_sgl_ieee;
1355
1356 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1357 &ieeeChainElement->NextChainOffset, 0);
1358
1359 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER)
1360 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1361 &ieeeChainElement->Flags, IEEE_SGE_FLAGS_CHAIN_ELEMENT );
1362 else
1363 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1364 &ieeeChainElement->Flags,
1365 (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1366 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
1367
1368 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1369 &ieeeChainElement->Length,
1370 (sizeof (MPI2_SGE_IO_UNION) * (numElements - i)));
1371
1372 ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
1373 &ieeeChainElement->Address,
1374 (U64)cmd->sgl_phys_addr);
1375
1376 sg_to_process = numElements - i;
1377
1378 con_log(CL_ANN1, (CE_NOTE,
1379 "[Additional SGE Count]:%x", endElement));
1380
1381 /* point to the chained SGL buffer */
1382 scsi_raid_io_sgl_ieee = (Mpi25IeeeSgeChain64_t *)cmd->sgl;
1383
1384 /* build rest of the SGL in chained buffer */
1385 for (j = 0; j < sg_to_process; j++, scsi_raid_io_sgl_ieee++) {
1386 con_log(CL_DLEVEL3, (CE_NOTE, "[remaining SGL]:%x", i));
1387
1388 ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
1389 &scsi_raid_io_sgl_ieee->Address,
1390 acmd->cmd_dmacookies[i].dmac_laddress);
1391
1392 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1393 &scsi_raid_io_sgl_ieee->Length,
1394 acmd->cmd_dmacookies[i].dmac_size);
1395
1396 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1397 &scsi_raid_io_sgl_ieee->Flags, 0);
1398
1399 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1400 if (i == (numElements - 1))
1401 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1402 &scsi_raid_io_sgl_ieee->Flags, IEEE_SGE_FLAGS_END_OF_LIST);
1403 }
1404
1405 *datalen += acmd->cmd_dmacookies[i].dmac_size;
1406
1407 #if DEBUG
1408 con_log(CL_DLEVEL1, (CE_NOTE,
1409 "[SGL Address]:%llx",
1410 scsi_raid_io_sgl_ieee->Address));
1411 con_log(CL_DLEVEL1, (CE_NOTE,
1412 "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
1413 con_log(CL_DLEVEL1, (CE_NOTE,
1414 "[SGL Flags]:%x", scsi_raid_io_sgl_ieee->Flags));
1415 #endif
1416
1417 i++;
1418 }
1419 }
1420
1421 return (0);
1422 } /*end of BuildScatterGather */
1423
1424
1425 /*
1426 * build_cmd
1427 */
1428 struct mrsas_cmd *
1429 mrsas_tbolt_build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
1430 struct scsi_pkt *pkt, uchar_t *cmd_done)
1431 {
1432 uint8_t fp_possible = 0;
1433 uint16_t flags = 0;
1434 uint32_t i, index;
1435 uint32_t context;
1436 uint32_t sge_bytes;
1437 uint8_t ChainOffsetValue;
1438 uint32_t SGLFlags;
1439 uint32_t lba_count=0;
1440 uint32_t start_lba_hi=0;
1441 uint32_t start_lba_lo=0;
1442 ddi_acc_handle_t acc_handle;
1443 struct mrsas_cmd *cmd = NULL;
1444 struct scsa_cmd *acmd = PKT2CMD(pkt);
1445 MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
1446 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
1447 Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee;
1448 uint32_t datalen;
1449 struct IO_REQUEST_INFO io_info;
1450 MR_FW_RAID_MAP_ALL *local_map_ptr;
1451 MR_LD_RAID *raid;
1452 U32 ld;
1453 uint16_t pd_cmd_cdblen;
1454
1455 con_log(CL_DLEVEL1, (CE_NOTE,
1456 "chkpnt: Entered mrsas_tbolt_build_cmd:%d", __LINE__));
1457
1458 /* find out if this is logical or physical drive command. */
1459 acmd->islogical = MRDRV_IS_LOGICAL(ap);
1460 acmd->device_id = MAP_DEVICE_ID(instance, ap);
1461
1462 *cmd_done = 0;
1463
1464 /* get the command packet */
1465 if (!(cmd = get_raid_msg_pkt(instance))) {
1466 return (NULL);
1467 }
1468
1469 index = cmd->index;
1470 ReqDescUnion = mr_sas_get_request_descriptor(instance, index, cmd);
1471 ReqDescUnion->Words = 0;
1472 ReqDescUnion->SCSIIO.SMID = cmd->SMID;
1473 ReqDescUnion->SCSIIO.RequestFlags =
1474 (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1475 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1476
1477
1478 cmd->request_desc = ReqDescUnion;
1479 cmd->pkt = pkt;
1480 cmd->cmd = acmd;
1481
1482 /* lets get the command directions */
1483 if (acmd->cmd_flags & CFLAG_DMASEND) {
1484 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1485 (void) ddi_dma_sync(acmd->cmd_dmahandle,
1486 acmd->cmd_dma_offset, acmd->cmd_dma_len,
1487 DDI_DMA_SYNC_FORDEV);
1488 }
1489 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
1490 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1491 (void) ddi_dma_sync(acmd->cmd_dmahandle,
1492 acmd->cmd_dma_offset, acmd->cmd_dma_len,
1493 DDI_DMA_SYNC_FORCPU);
1494 }
1495 } else {
1496 con_log(CL_ANN, (CE_NOTE, "NO DMA\n"));
1497 }
1498
1499
1500 // get SCSI_IO raid message frame pointer
1501 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
1502
1503 /* zero out SCSI_IO raid message frame */
1504 memset(scsi_raid_io, 0, sizeof(Mpi2RaidSCSIIORequest_t));
1505
1506 /*Set the ldTargetId set by BuildRaidContext() */
1507 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1508 &scsi_raid_io->RaidContext.ldTargetId,
1509 acmd->device_id);
1510
1511 /* Copy CDB to scsi_io_request message frame */
1512 ddi_rep_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1513 (uint8_t *)pkt->pkt_cdbp,
1514 (uint8_t *)scsi_raid_io->CDB.CDB32,
1515 acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
1516
1517 /*
1518 * Just the CDB length,rest of the Flags are zero
1519 * This will be modified later.
1520 */
1521 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1522 &scsi_raid_io->IoFlags,
1523 acmd->cmd_cdblen);
1524
1525 pd_cmd_cdblen = acmd->cmd_cdblen;
1526
1527 switch (pkt->pkt_cdbp[0]) {
1528 case SCMD_READ:
1529 case SCMD_WRITE:
1530 case SCMD_READ_G1:
1531 case SCMD_WRITE_G1:
1532 case SCMD_READ_G4:
1533 case SCMD_WRITE_G4:
1534 case SCMD_READ_G5:
1535 case SCMD_WRITE_G5:
1536
1537 if (acmd->islogical) {
1538 /* Initialize sense Information */
1539 if (cmd->sense1 == NULL) {
1540 con_log(CL_ANN, (CE_NOTE,
1541 "tbolt_build_cmd: Sense buffer ptr NULL \n"));
1542 }
1543 bzero(cmd->sense1, SENSE_LENGTH);
1544 con_log(CL_DLEVEL2, (CE_NOTE,
1545 "tbolt_build_cmd CDB[0] = %x\n", pkt->pkt_cdbp[0]));
1546
1547 if (acmd->cmd_cdblen == CDB_GROUP0) { /* 6-byte cdb */
1548 lba_count = (uint16_t)(pkt->pkt_cdbp[4]);
1549 start_lba_lo =
1550 ((uint32_t)(pkt->pkt_cdbp[3]) |
1551 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
1552 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F) << 16));
1553 } else if (acmd->cmd_cdblen == CDB_GROUP1) { /* 10-byte cdb */
1554 lba_count =
1555 (((uint16_t)(pkt->pkt_cdbp[8])) |
1556 ((uint16_t)(pkt->pkt_cdbp[7]) << 8));
1557
1558 start_lba_lo =
1559 (((uint32_t)(pkt->pkt_cdbp[5])) |
1560 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1561 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1562 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1563
1564 } else if (acmd->cmd_cdblen == CDB_GROUP5) { /* 12-byte cdb */
1565 lba_count = (
1566 ((uint32_t)(pkt->pkt_cdbp[9])) |
1567 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1568 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1569 ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1570
1571 start_lba_lo =
1572 (((uint32_t)(pkt->pkt_cdbp[5])) |
1573 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1574 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1575 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1576
1577 } else if (acmd->cmd_cdblen == CDB_GROUP4) { /* 16-byte cdb */
1578 lba_count = (
1579 ((uint32_t)(pkt->pkt_cdbp[13])) |
1580 ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
1581 ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
1582 ((uint32_t)(pkt->pkt_cdbp[10]) << 24));
1583
1584 start_lba_lo = (
1585 ((uint32_t)(pkt->pkt_cdbp[9])) |
1586 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1587 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1588 ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1589
1590 start_lba_hi = (
1591 ((uint32_t)(pkt->pkt_cdbp[5])) |
1592 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1593 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1594 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1595 }
1596
1597 if (instance->tbolt &&
1598 ((lba_count * 512) > mrsas_tbolt_max_cap_maxxfer) )
1599 cmn_err(CE_WARN," IO SECTOR COUNT exceeds controller limit 0x%x sectors\n", lba_count);
1600
1601 memset(&io_info, 0, sizeof (struct IO_REQUEST_INFO));
1602 io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) | start_lba_lo;
1603 io_info.numBlocks = lba_count;
1604 io_info.ldTgtId = acmd->device_id;
1605
1606 if (acmd->cmd_flags & CFLAG_DMASEND)
1607 io_info.isRead = 0;
1608 else
1609 io_info.isRead = 1;
1610
1611
1612 /*Aquire SYNC MAP UPDATE lock */
1613 mutex_enter(&instance->sync_map_mtx);
1614
1615 local_map_ptr = instance->ld_map[(instance->map_id & 1)];
1616
1617 if ( (MR_TargetIdToLdGet(acmd->device_id, local_map_ptr) >= MAX_LOGICAL_DRIVES) || !instance->fast_path_io ){
1618 cmn_err(CE_NOTE,
1619 "Fast Path NOT Possible, targetId >= MAX_LOGICAL_DRIVES || !instance->fast_path_io\n");
1620 fp_possible = 0;
1621 /* Set Regionlock flags to BYPASS
1622 io_request->RaidContext.regLockFlags = 0; */
1623 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1624 &scsi_raid_io->RaidContext.regLockFlags, 0);
1625 } else {
1626 if (MR_BuildRaidContext(instance, &io_info,
1627 &scsi_raid_io->RaidContext, local_map_ptr))
1628 fp_possible = io_info.fpOkForIo;
1629 }
1630
1631 if (!enable_fp) {
1632 fp_possible = 0;
1633 }
1634 con_log(CL_ANN1, (CE_NOTE,
1635 "enable_fp %d instance->fast_path_io %d fp_possible %d \n",
1636 enable_fp, instance->fast_path_io, fp_possible));
1637
1638 if (fp_possible) {
1639
1640 /* Check for DIF enabled LD */
1641 if (MR_CheckDIF(acmd->device_id, local_map_ptr)) {
1642 /* Prepare 32 Byte CDB for DIF capable Disk */
1643 mrsas_tbolt_prepare_cdb(instance,
1644 scsi_raid_io->CDB.CDB32,
1645 &io_info,
1646 scsi_raid_io,
1647 start_lba_lo);
1648 } else {
1649 mrsas_tbolt_set_pd_lba(scsi_raid_io->CDB.CDB32,
1650 (uint8_t *)&pd_cmd_cdblen, io_info.pdBlock, io_info.numBlocks, 0);
1651 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1652 &scsi_raid_io->IoFlags,
1653 pd_cmd_cdblen);
1654 }
1655
1656 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1657 &scsi_raid_io->Function,
1658 MPI2_FUNCTION_SCSI_IO_REQUEST);
1659
1660 ReqDescUnion->SCSIIO.RequestFlags =
1661 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1662 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1663
1664 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1665 uint8_t regLockFlags = ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1666 &scsi_raid_io->RaidContext.regLockFlags);
1667 uint16_t IoFlags = ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1668 &scsi_raid_io->IoFlags);
1669
1670 if (regLockFlags == REGION_TYPE_UNUSED)
1671 ReqDescUnion->SCSIIO.RequestFlags =
1672 (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1673
1674 IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1675 regLockFlags |= (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | MR_RL_FLAGS_SEQ_NUM_ENABLE);
1676
1677 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1678 &scsi_raid_io->ChainOffset, 0);
1679 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1680 &scsi_raid_io->RaidContext.nsegType, ((0x01 << MPI2_NSEG_FLAGS_SHIFT) | MPI2_TYPE_CUDA));
1681 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1682 &scsi_raid_io->RaidContext.regLockFlags, regLockFlags);
1683 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1684 &scsi_raid_io->IoFlags, IoFlags);
1685 }
1686
1687 if ((instance->load_balance_info[acmd->device_id].loadBalanceFlag) && (io_info.isRead)) {
1688 io_info.devHandle = get_updated_dev_handle(&instance->load_balance_info[acmd->device_id], &io_info);
1689 cmd->load_balance_flag |= MEGASAS_LOAD_BALANCE_FLAG;
1690 } else
1691 cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG;
1692
1693 ReqDescUnion->SCSIIO.DevHandle = io_info.devHandle;
1694 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1695 &scsi_raid_io->DevHandle,
1696 io_info.devHandle);
1697
1698 } else {
1699 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1700 &scsi_raid_io->Function,
1701 MPI2_FUNCTION_LD_IO_REQUEST);
1702
1703 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1704 &scsi_raid_io->DevHandle, acmd->device_id);
1705
1706 ReqDescUnion->SCSIIO.RequestFlags =
1707 (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1708 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1709
1710 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1711 &scsi_raid_io->RaidContext.timeoutValue, local_map_ptr->raidMap.fpPdIoTimeoutSec);
1712
1713 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1714 uint8_t regLockFlags = ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1715 &scsi_raid_io->RaidContext.regLockFlags);
1716
1717 if (regLockFlags == REGION_TYPE_UNUSED)
1718 ReqDescUnion->SCSIIO.RequestFlags =
1719 (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1720
1721 regLockFlags |= (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 | MR_RL_FLAGS_SEQ_NUM_ENABLE);
1722
1723 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1724 &scsi_raid_io->RaidContext.nsegType, ((0x01 << MPI2_NSEG_FLAGS_SHIFT) | MPI2_TYPE_CUDA));
1725 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1726 &scsi_raid_io->RaidContext.regLockFlags, regLockFlags);
1727 }
1728
1729 } /* Not FP */
1730
1731 /*Release SYNC MAP UPDATE lock */
1732 mutex_exit(&instance->sync_map_mtx);
1733
1734
1735 /* Set sense buffer physical address/length in scsi_io_request.*/
1736 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1737 &scsi_raid_io->SenseBufferLowAddress,
1738 cmd->sense_phys_addr1);
1739 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1740 &scsi_raid_io->SenseBufferLength,
1741 SENSE_LENGTH);
1742
1743 /* Construct SGL*/
1744 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1745 &scsi_raid_io->SGLOffset0,
1746 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1747
1748 mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1749 scsi_raid_io, &datalen);
1750
1751 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1752 &scsi_raid_io->DataLength, datalen);
1753
1754 break;
1755
1756 }
1757 else {
1758 #ifndef PDSUPPORT /* if PDSUPPORT, skip break and fall through */
1759 break;
1760 #endif
1761 }
1762 /* fall through For all non-rd/wr cmds */
1763 default:
1764 switch (pkt->pkt_cdbp[0]) {
1765 case 0x35: { // SCMD_SYNCHRONIZE_CACHE
1766 return_raid_msg_pkt(instance, cmd);
1767 *cmd_done = 1;
1768 return (NULL);
1769 }
1770
1771 case SCMD_MODE_SENSE:
1772 case SCMD_MODE_SENSE_G1: {
1773 union scsi_cdb *cdbp;
1774 uint16_t page_code;
1775
1776 cdbp = (void *)pkt->pkt_cdbp;
1777 page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
1778 switch (page_code) {
1779 case 0x3:
1780 case 0x4:
1781 (void) mrsas_mode_sense_build(pkt);
1782 return_raid_msg_pkt(instance, cmd);
1783 *cmd_done = 1;
1784 return (NULL);
1785 }
1786 break;
1787 }
1788
1789 default: {
1790 /* Here we need to handle PASSTHRU for
1791 Logical Devices. Like Inquiry etc.*/
1792
1793 if(!(acmd->islogical)) {
1794
1795 /* Aquire SYNC MAP UPDATE lock */
1796 mutex_enter(&instance->sync_map_mtx);
1797
1798 local_map_ptr = instance->ld_map[(instance->map_id & 1)];
1799
1800 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1801 &scsi_raid_io->Function, MPI2_FUNCTION_SCSI_IO_REQUEST);
1802
1803 ReqDescUnion->SCSIIO.RequestFlags =
1804 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1805 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1806
1807 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1808 &scsi_raid_io->DevHandle,
1809 local_map_ptr->raidMap.devHndlInfo[acmd->device_id].curDevHdl);
1810
1811
1812 /*Set regLockFlasgs to REGION_TYPE_BYPASS */
1813 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1814 &scsi_raid_io->RaidContext.regLockFlags, 0);
1815 ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
1816 &scsi_raid_io->RaidContext.regLockRowLBA, 0);
1817 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1818 &scsi_raid_io->RaidContext.regLockLength, 0);
1819 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_raid_io->RaidContext.RAIDFlags,
1820 MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
1821 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1822 &scsi_raid_io->RaidContext.timeoutValue, local_map_ptr->raidMap.fpPdIoTimeoutSec);
1823 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1824 &scsi_raid_io->RaidContext.ldTargetId, acmd->device_id);
1825 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1826 &scsi_raid_io->LUN[1], acmd->lun);
1827
1828 /* Release SYNC MAP UPDATE lock */
1829 mutex_exit(&instance->sync_map_mtx);
1830
1831 } else {
1832 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1833 &scsi_raid_io->Function, MPI2_FUNCTION_LD_IO_REQUEST);
1834 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1835 &scsi_raid_io->LUN[1], acmd->lun);
1836 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1837 &scsi_raid_io->DevHandle, acmd->device_id);
1838 ReqDescUnion->SCSIIO.RequestFlags =
1839 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1840 }
1841
1842 /* Set sense buffer physical address/length in scsi_io_request.*/
1843 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1844 &scsi_raid_io->SenseBufferLowAddress,
1845 cmd->sense_phys_addr1);
1846 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1847 &scsi_raid_io->SenseBufferLength, SENSE_LENGTH);
1848
1849 /* Construct SGL*/
1850 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1851 &scsi_raid_io->SGLOffset0,
1852 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1853
1854 mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1855 scsi_raid_io, &datalen);
1856
1857 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1858 &scsi_raid_io->DataLength, datalen);
1859
1860
1861 con_log(CL_ANN, (CE_CONT,
1862 "tbolt_build_cmd CDB[0] =%x, TargetID =%x\n",
1863 pkt->pkt_cdbp[0], acmd->device_id));
1864 con_log(CL_DLEVEL1, (CE_CONT,
1865 "data length = %x\n",
1866 scsi_raid_io->DataLength));
1867 con_log(CL_DLEVEL1, (CE_CONT,
1868 "cdb length = %x\n",
1869 acmd->cmd_cdblen));
1870 }
1871 break;
1872 }
1873
1874 }
1875 #ifdef lint
1876 context = context;
1877 #endif
1878
1879 return (cmd);
1880 }
1881
1882 /*
1883 * mrsas_tbolt_tran_init_pkt - allocate & initialize a scsi_pkt structure
1884 * @ap:
1885 * @pkt:
1886 * @bp:
1887 * @cmdlen:
1888 * @statuslen:
1889 * @tgtlen:
1890 * @flags:
1891 * @callback:
1892 *
1893 * The tran_init_pkt() entry point allocates and initializes a scsi_pkt
1894 * structure and DMA resources for a target driver request. The
1895 * tran_init_pkt() entry point is called when the target driver calls the
1896 * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point
1897 * is a request to perform one or more of three possible services:
1966 }
1967 }
1968 return (pkt);
1969 }
1970
1971
1972 uint32_t
1973 tbolt_read_fw_status_reg(struct mrsas_instance *instance)
1974 {
1975 return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
1976 }
1977
1978 void
1979 tbolt_issue_cmd(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
1980 {
1981 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1982 atomic_add_16(&instance->fw_outstanding, 1);
1983
1984 struct scsi_pkt *pkt;
1985
1986 con_log(CL_ANN1, (CE_NOTE, "tbolt_issue_cmd: cmd->[SMID]=0x%X", cmd->SMID));
1987
1988 con_log(CL_DLEVEL1, (CE_CONT,
1989 " [req desc Words] %llx \n", req_desc->Words));
1990 con_log(CL_DLEVEL1, (CE_CONT,
1991 " [req desc low part] %x \n", req_desc->Words));
1992 con_log(CL_DLEVEL1, (CE_CONT,
1993 " [req desc high part] %x \n", (req_desc->Words >> 32)));
1994 pkt = cmd->pkt;
1995
1996 if (pkt) {
1997 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
1998 "ISSUED CMD TO FW : called : cmd:"
1999 ": %p instance : %p pkt : %p pkt_time : %x\n",
2000 gethrtime(), (void *)cmd, (void *)instance,
2001 (void *)pkt, cmd->drv_pkt_time));
2002 if (instance->adapterresetinprogress) {
2003 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2004 con_log(CL_ANN, (CE_NOTE,
2005 "TBOLT Reset the scsi_pkt timer"));
2006 } else {
2007 push_pending_mfi_pkt(instance, cmd);
2008 }
2009
2010 } else {
2011 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
2012 "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
2013 "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
2017 mutex_enter(&instance->reg_write_mtx);
2018 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2019 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2020 mutex_exit(&instance->reg_write_mtx);
2021 }
2022
2023 /*
2024 * issue_cmd_in_sync_mode
2025 */
2026 int
2027 tbolt_issue_cmd_in_sync_mode(struct mrsas_instance *instance,
2028 struct mrsas_cmd *cmd)
2029 {
2030 int i;
2031 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
2032 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
2033
2034 struct mrsas_header *hdr;
2035 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2036
2037 con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: cmd->[SMID]=0x%X", cmd->SMID));
2038
2039
2040 if (instance->adapterresetinprogress) {
2041 cmd->drv_pkt_time = ddi_get16
2042 (cmd->frame_dma_obj.acc_handle, &hdr->timeout);
2043 if (cmd->drv_pkt_time < debug_timeout_g)
2044 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2045 con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode:"
2046 "RESET-IN-PROGRESS, issue cmd & return.\n"));
2047
2048 mutex_enter(&instance->reg_write_mtx);
2049 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2050 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2051 mutex_exit(&instance->reg_write_mtx);
2052
2053 return (DDI_SUCCESS);
2054 } else {
2055 con_log(CL_ANN1, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: pushing the pkt\n"));
2056 push_pending_mfi_pkt(instance, cmd);
2057 }
2058
2059 con_log(CL_DLEVEL2, (CE_NOTE,
2060 "HighQport offset :%lx",
2061 (uint32_t *)((uintptr_t)(instance)->regmap + IB_HIGH_QPORT)));
2062 con_log(CL_DLEVEL2, (CE_NOTE,
2063 "LowQport offset :%lx",
2064 (uint32_t *)((uintptr_t)(instance)->regmap + IB_LOW_QPORT)));
2065
2066 cmd->sync_cmd = MRSAS_TRUE;
2067 cmd->cmd_status = ENODATA;
2068
2069
2070 mutex_enter(&instance->reg_write_mtx);
2071 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2072 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2073 mutex_exit(&instance->reg_write_mtx);
2074
2075 con_log(CL_ANN1, (CE_NOTE,
2076 " req desc high part %x \n", (req_desc->Words >> 32)));
2077 con_log(CL_ANN1, (CE_NOTE,
2078 " req desc low part %x \n", req_desc->Words));
2079
2080 mutex_enter(&instance->int_cmd_mtx);
2081 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
2082 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
2083 }
2084 mutex_exit(&instance->int_cmd_mtx);
2085
2086
2087 if (i < (msecs -1)) {
2088 return (DDI_SUCCESS);
2089 } else {
2090 return (DDI_FAILURE);
2091 }
2092 }
2093
2094 /*
2095 * issue_cmd_in_poll_mode
2096 */
2097 int
2098 tbolt_issue_cmd_in_poll_mode(struct mrsas_instance *instance,
2099 struct mrsas_cmd *cmd)
2100 {
2101 int i;
2102 uint16_t flags;
2103 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
2104 struct mrsas_header *frame_hdr;
2105
2106 con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_poll_mode: cmd->[SMID]=0x%X", cmd->SMID));
2107
2108 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
2109
2110 frame_hdr = (struct mrsas_header *)&cmd->frame->hdr;
2111 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
2112 MFI_CMD_STATUS_POLL_MODE);
2113 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
2114 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2115 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
2116
2117 con_log(CL_ANN1, (CE_NOTE,
2118 " req desc low part %x \n", req_desc->Words));
2119 con_log(CL_ANN1, (CE_NOTE,
2120 " req desc high part %x \n", (req_desc->Words >> 32)));
2121
2122 /* issue the frame using inbound queue port */
2123 mutex_enter(&instance->reg_write_mtx);
2124 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2125 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2126 mutex_exit(&instance->reg_write_mtx);
2127
2128 for (i = 0; i < msecs && (
2129 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
2130 == MFI_CMD_STATUS_POLL_MODE); i++) {
2131 /* wait for cmd_status to change from 0xFF */
2132 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
2133 }
2134
2135 if (ddi_get8(cmd->frame_dma_obj.acc_handle,
2136 &frame_hdr->cmd_status) == MFI_CMD_STATUS_POLL_MODE) {
2137 con_log(CL_ANN1, (CE_NOTE,
2138 " cmd failed %x \n", (req_desc->Words)));
2139 return (DDI_FAILURE);
2140 }
2141
2142 return (DDI_SUCCESS);
2143 }
2144
2145 void
2146 tbolt_enable_intr(struct mrsas_instance *instance)
2147 {
2148 uint32_t mask;
2149
2150 /* TODO: For Thunderbolt/Invader also clear intr on enable */
2151 //writel(~0, ®s->outbound_intr_status);
2152 //readl(®s->outbound_intr_status);
2153
2154 WR_OB_INTR_MASK(~(MFI_FUSION_ENABLE_INTERRUPT_MASK), instance);
2155
2156 /* dummy read to force PCI flush */
2157 mask = RD_OB_INTR_MASK(instance);
2158
2159 }
2160
2161 void
2162 tbolt_disable_intr(struct mrsas_instance *instance)
2163 {
2164 uint32_t mask = 0xFFFFFFFF;
2165 uint32_t status;
2166
2167
2168 WR_OB_INTR_MASK(mask, instance);
2169
2170 /* Dummy readl to force pci flush */
2171
2172 status = RD_OB_INTR_MASK(instance);
2173 }
2174
2175
2176 int
2177 tbolt_intr_ack(struct mrsas_instance *instance)
2178 {
2179 uint32_t status;
2180
2181 /* check if it is our interrupt */
2182 status = RD_OB_INTR_STATUS(instance);
2183 con_log(CL_ANN1, (CE_NOTE,
2184 "chkpnt: Entered tbolt_intr_ack status = %d \n", status));
2185
2186 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2187 return (DDI_INTR_UNCLAIMED);
2188 }
2189
2190 if ((status & 1) || (status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2191 /* clear the interrupt by writing back the same value */
2192 WR_OB_INTR_STATUS(status, instance);
2193 /* dummy READ */
2194 RD_OB_INTR_STATUS(instance);
2195 }
2196 return (DDI_INTR_CLAIMED);
2197 }
2198
2199 /*
2200 * get_raid_msg_pkt : Get a command from the free pool
2201 * After successful allocation, the caller of this routine
2202 * must clear the frame buffer (memset to zero) before
2203 * using the packet further.
2204 *
2205 * ***** Note *****
2206 * After clearing the frame buffer the context id of the
2207 * frame buffer SHOULD be restored back.
2208 */
2209
2210 struct mrsas_cmd *
2211 get_raid_msg_pkt(struct mrsas_instance *instance)
2212 {
2213 mlist_t *head = &instance->cmd_pool_list;
2214 struct mrsas_cmd *cmd = NULL;
2283 void
2284 return_raid_msg_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2285 {
2286 mutex_enter(&instance->cmd_app_pool_mtx);
2287 ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2288
2289 mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
2290
2291 mutex_exit(&instance->cmd_app_pool_mtx);
2292 }
2293
2294
2295 void
2296 mr_sas_tbolt_build_mfi_cmd(struct mrsas_instance *instance,
2297 struct mrsas_cmd *cmd)
2298 {
2299 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2300 Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee;
2301 MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
2302 uint32_t index;
2303
2304 if (!instance->tbolt) {
2305 con_log(CL_ANN, (CE_NOTE, "Not MFA enabled.\n"));
2306 return;
2307 }
2308
2309 index = cmd->index;
2310
2311 ReqDescUnion =
2312 mr_sas_get_request_descriptor(instance, index, cmd);
2313
2314 if (!ReqDescUnion) {
2315 con_log(CL_ANN1, (CE_NOTE, "[NULL REQDESC]%x"));
2316 return;
2317 }
2318
2319 con_log(CL_ANN1, (CE_NOTE, "[SMID]%x", cmd->SMID));
2320
2321 ReqDescUnion->Words = 0;
2322
2323 ReqDescUnion->SCSIIO.RequestFlags =
2324 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2325 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2326
2327 ReqDescUnion->SCSIIO.SMID = cmd->SMID;
2328
2329 cmd->request_desc = ReqDescUnion;
2330
2331 // get raid message frame pointer
2332 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2333
2334 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
2335 Mpi25IeeeSgeChain64_t *sgl_ptr_end = (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
2336 sgl_ptr_end += instance->max_sge_in_main_msg - 1;
2337 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2338 &sgl_ptr_end->Flags, 0);
2339 }
2340
2341 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2342 &scsi_raid_io->Function,
2343 MPI2_FUNCTION_PASSTHRU_IO_REQUEST);
2344
2345 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2346 &scsi_raid_io->SGLOffset0,
2347 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
2348
2349 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2350 &scsi_raid_io->ChainOffset,
2351 (U8)offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16);
2352
2353 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
2354 &scsi_raid_io->SenseBufferLowAddress,
2355 cmd->sense_phys_addr1);
2356
2357
2358 scsi_raid_io_sgl_ieee =
2359 (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
2360
2361 ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
2362 &scsi_raid_io_sgl_ieee->Address,
2363 (U64)cmd->frame_phys_addr);
2364
2365 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2366 &scsi_raid_io_sgl_ieee->Flags,
2367 (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2368 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
2369 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
2370 &scsi_raid_io_sgl_ieee->Length, 1024); //MEGASAS_MAX_SZ_CHAIN_FRAME
2371
2372 con_log(CL_ANN1, (CE_NOTE,
2373 "[MFI CMD PHY ADDRESS]:%x",
2374 scsi_raid_io_sgl_ieee->Address));
2375 con_log(CL_ANN1, (CE_NOTE,
2376 "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
2377 con_log(CL_ANN1, (CE_NOTE, "[SGL Flags]:%x",
2378 scsi_raid_io_sgl_ieee->Flags));
2379 }
2380
2381
2382 void
2383 tbolt_complete_cmd(struct mrsas_instance *instance,
2384 struct mrsas_cmd *cmd)
2385 {
2386 uint8_t status;
2387 uint8_t extStatus;
2388 uint8_t arm;
2389 struct scsa_cmd *acmd;
2390 struct scsi_pkt *pkt;
2391 struct scsi_arq_status *arqstat;
2392 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2393 LD_LOAD_BALANCE_INFO *lbinfo;
2394 int i;
2395
2396 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2397
2398 status = ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2399 &scsi_raid_io->RaidContext.status);
2400 extStatus = ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2401 &scsi_raid_io->RaidContext.extStatus);
2402
2403 con_log(CL_DLEVEL3, (CE_NOTE, "status %x", status));
2404 con_log(CL_DLEVEL3, (CE_NOTE, "extStatus %x", extStatus));
2405
2406 if (status != MFI_STAT_OK) {
2407 con_log(CL_ANN, (CE_WARN,
2408 "IO Cmd Failed SMID %x", cmd->SMID));
2409 } else {
2410 con_log(CL_ANN, (CE_NOTE,
2411 "IO Cmd Success SMID %x", cmd->SMID));
2412 }
2413
2414 /* regular commands */
2415
2416 switch (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2417 &scsi_raid_io->Function)) {
2418
2419 case MPI2_FUNCTION_SCSI_IO_REQUEST : /* Fast Path IO. */
2420 acmd = (struct scsa_cmd *)cmd->cmd;
2421 lbinfo = &instance->load_balance_info[acmd->device_id];
2422
2423 if (cmd->load_balance_flag & MEGASAS_LOAD_BALANCE_FLAG) {
2424 arm = lbinfo->raid1DevHandle[0] == scsi_raid_io->DevHandle ? 0 : 1;
2425
2426 lbinfo->scsi_pending_cmds[arm]--;
2427 cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG;
2428 }
2429 con_log(CL_DLEVEL3, (CE_NOTE,
2430 "FastPath IO Completion Success "));
2431
2432 case MPI2_FUNCTION_LD_IO_REQUEST : {// Regular Path IO.
2433 acmd = (struct scsa_cmd *)cmd->cmd;
2434 pkt = (struct scsi_pkt *)CMD2PKT(acmd);
2435
2436 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2437 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
2438 (void) ddi_dma_sync(acmd->cmd_dmahandle,
2439 acmd->cmd_dma_offset,
2440 acmd->cmd_dma_len,
2441 DDI_DMA_SYNC_FORCPU);
2442 }
2443 }
2444
2445 pkt->pkt_reason = CMD_CMPLT;
2446 pkt->pkt_statistics = 0;
2447 pkt->pkt_state = STATE_GOT_BUS
2448 | STATE_GOT_TARGET | STATE_SENT_CMD
2449 | STATE_XFERRED_DATA | STATE_GOT_STATUS;
2450
2451 con_log(CL_ANN, (CE_CONT,
2452 " CDB[0] = %x completed for %s: size %lx SMID %x cmd_status %x",
2453 pkt->pkt_cdbp[0],
2454 ((acmd->islogical) ? "LD" : "PD"),
2455 acmd->cmd_dmacount, cmd->SMID, status));
2456
2457 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
2458 struct scsi_inquiry *inq;
2459
2460 if (acmd->cmd_dmacount != 0) {
2461 bp_mapin(acmd->cmd_buf);
2462 inq = (struct scsi_inquiry *)
2463 acmd->cmd_buf->b_un.b_addr;
2464
2465 /* don't expose physical drives to OS */
2466 if (acmd->islogical &&
2467 (status == MFI_STAT_OK)) {
2468 display_scsi_inquiry(
2469 (caddr_t)inq);
2470 }
2471 #ifdef PDSUPPORT
2472 else if ((status ==
2473 MFI_STAT_OK) && inq->inq_dtype ==
2474 DTYPE_DIRECT) {
2475
2476 display_scsi_inquiry(
2477 (caddr_t)inq);
2478 }
2479 #endif
2480 else {
2481 /* for physical disk */
2482 status =
2483 MFI_STAT_DEVICE_NOT_FOUND;
2484 }
2485 }
2486 }
2487
2488 switch (status) {
2489 case MFI_STAT_OK:
2490 pkt->pkt_scbp[0] = STATUS_GOOD;
2491 break;
2492 case MFI_STAT_LD_CC_IN_PROGRESS:
2493 case MFI_STAT_LD_RECON_IN_PROGRESS:
2494 pkt->pkt_scbp[0] = STATUS_GOOD;
2495 break;
2496 case MFI_STAT_LD_INIT_IN_PROGRESS:
2497 pkt->pkt_reason = CMD_TRAN_ERR;
2498 break;
2499 case MFI_STAT_SCSI_IO_FAILED:
2500 cmn_err(CE_WARN, "tbolt_complete_cmd: scsi_io failed");
2501 pkt->pkt_reason = CMD_TRAN_ERR;
2502 break;
2503 case MFI_STAT_SCSI_DONE_WITH_ERROR:
2504 con_log(CL_ANN, (CE_WARN,
2505 "tbolt_complete_cmd: scsi_done with error"));
2506
2507 pkt->pkt_reason = CMD_CMPLT;
2508 ((struct scsi_status *)
2509 pkt->pkt_scbp)->sts_chk = 1;
2510
2511 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
2512 con_log(CL_ANN, (CE_WARN, "TEST_UNIT_READY fail"));
2513 } else {
2514 pkt->pkt_state |= STATE_ARQ_DONE;
2515 arqstat = (void *)(pkt->pkt_scbp);
2516 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2517 arqstat->sts_rqpkt_resid = 0;
2518 arqstat->sts_rqpkt_state |=
2519 STATE_GOT_BUS | STATE_GOT_TARGET
2520 | STATE_SENT_CMD
2521 | STATE_XFERRED_DATA;
2522 *(uint8_t *)&arqstat->sts_rqpkt_status =
2523 STATUS_GOOD;
2524 con_log(CL_ANN1, (CE_NOTE,
2525 "Copying Sense data %x",
2526 cmd->SMID));
2527
2528 ddi_rep_get8(
2529 instance->
2530 mpi2_frame_pool_dma_obj.acc_handle,
2531 (uint8_t *)
2532 &(arqstat->sts_sensedata),
2533 cmd->sense1,
2534 sizeof (struct scsi_extended_sense),
2535 DDI_DEV_AUTOINCR);
2536
2537 }
2538 break;
2539 case MFI_STAT_LD_OFFLINE:
2540 cmn_err(CE_WARN,
2541 "tbolt_complete_cmd: ld offline "
2542 "CDB[0]=0x%x targetId=0x%x devhandle=0x%x\n", //UNDO:
2543 ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_raid_io->CDB.CDB32[0]),
2544 ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_raid_io->RaidContext.ldTargetId),
2545 ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_raid_io->DevHandle) );
2546 pkt->pkt_reason = CMD_DEV_GONE;
2547 pkt->pkt_statistics = STAT_DISCON;
2548 break;
2549 case MFI_STAT_DEVICE_NOT_FOUND:
2550 con_log(CL_ANN, (CE_CONT,
2551 "tbolt_complete_cmd: device not found error"));
2552 pkt->pkt_reason = CMD_DEV_GONE;
2553 pkt->pkt_statistics = STAT_DISCON;
2554 break;
2555
2556 case MFI_STAT_LD_LBA_OUT_OF_RANGE:
2557 pkt->pkt_state |= STATE_ARQ_DONE;
2558 pkt->pkt_reason = CMD_CMPLT;
2559 ((struct scsi_status *)
2560 pkt->pkt_scbp)->sts_chk = 1;
2561
2562 arqstat = (void *)(pkt->pkt_scbp);
2563 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2564 arqstat->sts_rqpkt_resid = 0;
2565 arqstat->sts_rqpkt_state |= STATE_GOT_BUS
2566 | STATE_GOT_TARGET | STATE_SENT_CMD
2567 | STATE_XFERRED_DATA;
2568 *(uint8_t *)&arqstat->sts_rqpkt_status =
2569 STATUS_GOOD;
2570
2571 arqstat->sts_sensedata.es_valid = 1;
2572 arqstat->sts_sensedata.es_key =
2573 KEY_ILLEGAL_REQUEST;
2574 arqstat->sts_sensedata.es_class =
2575 CLASS_EXTENDED_SENSE;
2576
2577 /*
2578 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
2579 * ASC: 0x21h; ASCQ: 0x00h;
2580 */
2581 arqstat->sts_sensedata.es_add_code = 0x21;
2582 arqstat->sts_sensedata.es_qual_code = 0x00;
2583 break;
2584 case MFI_STAT_INVALID_CMD:
2585 case MFI_STAT_INVALID_DCMD:
2586 case MFI_STAT_INVALID_PARAMETER:
2587 case MFI_STAT_INVALID_SEQUENCE_NUMBER:
2588 default:
2589 cmn_err(CE_WARN, "tbolt_complete_cmd: Unknown status!");
2590 pkt->pkt_reason = CMD_TRAN_ERR;
2591
2592 break;
2593 }
2594
2595 atomic_add_16(&instance->fw_outstanding, (-1));
2596
2597 /* Call the callback routine */
2598 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
2599 pkt->pkt_comp) {
2600 (*pkt->pkt_comp)(pkt);
2601 }
2602
2603 con_log(CL_ANN1, (CE_NOTE, "Free smid %x", cmd->SMID));
2604
2605 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2606 &scsi_raid_io->RaidContext.status, 0);
2607
2608 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2609 &scsi_raid_io->RaidContext.extStatus, 0);
2610
2611 return_raid_msg_pkt(instance, cmd);
2612 break;
2613 }
2614 case MPI2_FUNCTION_PASSTHRU_IO_REQUEST: // MFA command.
2615
2616 if (cmd->frame->dcmd.opcode
2617 == MR_DCMD_LD_MAP_GET_INFO &&
2618 cmd->frame->dcmd.mbox.b[1]
2619 == 1) {
2620
2621 mutex_enter(&instance->sync_map_mtx);
2622
2623 con_log(CL_ANN, (CE_NOTE,
2624 "LDMAP sync command SMID RECEIVED 0x%X",
2625 cmd->SMID));
2626 if (cmd->frame->hdr.cmd_status != 0) {
2627 cmn_err(CE_WARN,
2628 "map sync failed, status = 0x%x.\n",cmd->frame->hdr.cmd_status);
2629 }
2630 else {
2631 instance->map_id++;
2632 cmn_err(CE_NOTE,
2633 "map sync received, switched map_id to %ld \n",instance->map_id);
2634 }
2635
2636 if (MR_ValidateMapInfo(instance->ld_map[(instance->map_id & 1)], instance->load_balance_info))
2637 instance->fast_path_io = 1;
2638 else
2639 instance->fast_path_io = 0;
2640
2641 con_log(CL_ANN, (CE_NOTE,
2642 "instance->fast_path_io %d \n",instance->fast_path_io));
2643
2644 instance->unroll.syncCmd = 0;
2645
2646 if(instance->map_update_cmd == cmd) {
2647 return_raid_msg_pkt(instance, cmd);
2648 atomic_add_16(&instance->fw_outstanding, (-1));
2649 mrsas_tbolt_sync_map_info(instance);
2650 }
2651
2652 cmn_err(CE_NOTE, "LDMAP sync completed.\n");
2653 mutex_exit(&instance->sync_map_mtx);
2654 break;
2655 }
2656
2657 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) {
2658 con_log(CL_ANN1, (CE_CONT,
2659 "AEN command SMID RECEIVED 0x%X",
2660 cmd->SMID));
2661 if ((instance->aen_cmd == cmd) &&
2662 (instance->aen_cmd->abort_aen)) {
2663 con_log(CL_ANN, (CE_WARN,
2664 "mrsas_softintr: "
2665 "aborted_aen returned"));
2666 }
2667 else
2668 {
2669 atomic_add_16(&instance->fw_outstanding, (-1));
2670 service_mfi_aen(instance, cmd);
2671 }
2672 }
2673
2674 if (cmd->sync_cmd == MRSAS_TRUE ) {
2675 con_log(CL_ANN1, (CE_CONT,
2676 "Sync-mode Command Response SMID RECEIVED 0x%X",
2677 cmd->SMID));
2678
2679 tbolt_complete_cmd_in_sync_mode(instance, cmd);
2680 }
2681 else
2682 {
2683 con_log(CL_ANN, (CE_CONT,
2684 "tbolt_complete_cmd: Wrong SMID RECEIVED 0x%X",
2685 cmd->SMID));
2686 }
2687 break;
2688 default:
2689 /* free message */
2690 con_log(CL_ANN, (CE_NOTE, "tbolt_complete_cmd: Unknown Type!!!!!!!!"));
2691 break;
2692 }
2693 }
2694
2695 uint_t
2696 mr_sas_tbolt_process_outstanding_cmd(struct mrsas_instance *instance)
2697 {
2698 uint8_t replyType;
2699 Mpi2SCSIIOSuccessReplyDescriptor_t *replyDesc;
2700 Mpi2ReplyDescriptorsUnion_t *desc;
2701 uint16_t smid;
2702 union desc_value d_val;
2703 struct mrsas_cmd *cmd;
2704 uint32_t i;
2705 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2706 uint8_t status;
2707
2708 struct mrsas_header *hdr;
2709 struct scsi_pkt *pkt;
2710
2711 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2712 0, 0, DDI_DMA_SYNC_FORDEV);
2713
2714 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2715 0, 0, DDI_DMA_SYNC_FORCPU);
2716
2717 desc = instance->reply_frame_pool;
2718 desc += instance->reply_read_index;
2719
2720 replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2721 replyType = replyDesc->ReplyFlags &
2722 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2723
2724 if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2725 return (DDI_INTR_UNCLAIMED);
2726
2727 con_log(CL_ANN1, (CE_NOTE, "Reply Desc = %llx Words = %llx \n",
2728 desc, desc->Words));
2729
2730 d_val.word = desc->Words;
2731
2732
2733 /* Read Reply descriptor */
2734 while ((d_val.u1.low != 0xffffffff) &&
2735 (d_val.u1.high != 0xffffffff)) {
2736
2737 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2738 0, 0, DDI_DMA_SYNC_FORCPU);
2739
2740 smid = replyDesc->SMID;
2741
2742 if (!smid || smid > instance->max_fw_cmds + 1) {
2743 con_log(CL_ANN1, (CE_NOTE,
2744 "Reply Desc at Break = %llx Words = %llx \n",
2745 desc, desc->Words));
2746 break;
2747 }
2748
2749 cmd = instance->cmd_list[smid - 1];
2750 if(!cmd ) {
2751 con_log(CL_ANN1, (CE_NOTE,
2752 "mr_sas_tbolt_process_outstanding_cmd: Invalid command "
2753 " or Poll commad Received in completion path\n"));
2754 }
2755 else {
2756 mutex_enter(&instance->cmd_pend_mtx);
2757 if (cmd->sync_cmd == MRSAS_TRUE) {
2758 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2759 if (hdr) {
2760 con_log(CL_ANN1, (CE_NOTE,
2761 "mr_sas_tbolt_process_outstanding_cmd:"
2762 " mlist_del_init(&cmd->list).\n"));
2763 mlist_del_init(&cmd->list);
2764 }
2765 } else {
2766 pkt = cmd->pkt;
2767 if (pkt) {
2768 con_log(CL_ANN1, (CE_NOTE,
2769 "mr_sas_tbolt_process_outstanding_cmd:"
2770 "mlist_del_init(&cmd->list).\n"));
2771 mlist_del_init(&cmd->list);
2772 }
2773 }
2774
2775 mutex_exit(&instance->cmd_pend_mtx);
2776
2777 tbolt_complete_cmd(instance, cmd);
2778 }
2779 // set it back to all 0xfffffffff.
2780 desc->Words = (uint64_t)~0;
2781
2782 instance->reply_read_index++;
2783
2784 if (instance->reply_read_index >= (instance->reply_q_depth)) {
2785 con_log(CL_ANN1, (CE_NOTE, "wrap around"));
2786 instance->reply_read_index = 0;
2787 }
2788
2789 /* Get the next reply descriptor */
2790 if (!instance->reply_read_index)
2791 desc = instance->reply_frame_pool;
2792 else
2793 desc++;
2794
2795 replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2796
2797 d_val.word = desc->Words;
2798
2799 con_log(CL_ANN1, (CE_NOTE,
2800 "Next Reply Desc = %llx Words = %llx\n",
2801 desc, desc->Words));
2802
2803 replyType = replyDesc->ReplyFlags &
2804 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2805
2806 if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2807 break;
2808
2809 } /* End of while loop. */
2810
2811 /* update replyIndex to FW */
2812 WR_MPI2_REPLY_POST_INDEX(instance->reply_read_index, instance);
2813
2814
2815 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2816 0, 0, DDI_DMA_SYNC_FORDEV);
2817
2818 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2819 0, 0, DDI_DMA_SYNC_FORCPU);
2820 return (DDI_INTR_CLAIMED);
2821 }
2873
2874 if (cmd == NULL) {
2875 cmn_err(CE_WARN,
2876 "Failed to get a cmd from free-pool in get_ld_map_info()");
2877 return (DDI_FAILURE);
2878 }
2879
2880 dcmd = &cmd->frame->dcmd;
2881
2882 size_map_info = sizeof (MR_FW_RAID_MAP) +
2883 (sizeof (MR_LD_SPAN_MAP) *
2884 (MAX_LOGICAL_DRIVES - 1));
2885
2886 con_log(CL_ANN, (CE_NOTE,
2887 "size_map_info : 0x%x", size_map_info));
2888
2889 ci = instance->ld_map[(instance->map_id & 1)];
2890 ci_h = instance->ld_map_phy[(instance->map_id & 1)];
2891
2892 if (!ci) {
2893 cmn_err(CE_WARN,
2894 "Failed to alloc mem for ld_map_info");
2895 return_raid_msg_pkt(instance, cmd);
2896 return (-1);
2897 }
2898
2899 memset(ci, 0, sizeof (*ci));
2900 memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
2901
2902 dcmd->cmd = MFI_CMD_OP_DCMD;
2903 dcmd->cmd_status = 0xFF;
2904 dcmd->sge_count = 1;
2905 dcmd->flags = MFI_FRAME_DIR_READ;
2906 dcmd->timeout = 0;
2907 dcmd->pad_0 = 0;
2908 dcmd->data_xfer_len = size_map_info;
2909 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
2910 dcmd->sgl.sge32[0].phys_addr = ci_h;
2911 dcmd->sgl.sge32[0].length = size_map_info;
2912
2913
2914 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
2915
2916 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2917 ret = 0;
2918 con_log(CL_ANN1, (CE_NOTE,
2919 "Get LD Map Info success\n"));
2920 } else {
2921 cmn_err(CE_WARN,
2922 "Get LD Map Info failed\n");
2923 ret = -1;
2924 }
2925
2926 return_raid_msg_pkt(instance, cmd);
2927
2928 return (ret);
2929 }
2930
2931 void
2932 mrsas_dump_reply_desc(struct mrsas_instance *instance)
2933 {
2934 uint32_t i;
2935 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
2936 union desc_value d_val;
2937
2938 reply_desc = instance->reply_frame_pool;
2939
2940 for (i = 0; i < instance->reply_q_depth; i++, reply_desc++) {
2941 d_val.word = reply_desc->Words;
2942 con_log(CL_DLEVEL3, (CE_NOTE,
2943 "i=%d, %x:%x",
2944 i, d_val.u1.high, d_val.u1.low));
2945 }
2946 }
2947
2948 /**
2949 * mrsas_tbolt_command_create - Create command for fast path.
2950 * @io_info: MegaRAID IO request packet pointer.
2951 * @ref_tag: Reference tag for RD/WRPROTECT
2952 *
2953 * Create the command for fast path.
2954 */
2955 void
2956 mrsas_tbolt_prepare_cdb(struct mrsas_instance *instance, U8 cdb[],struct IO_REQUEST_INFO *io_info,Mpi2RaidSCSIIORequest_t *scsi_io_request, U32 ref_tag)
2957 {
2958 uint16_t EEDPFlags;
2959 uint32_t Control;
2960 // Prepare 32-byte CDB if DIF is supported on this device
2961 con_log(CL_ANN, (CE_NOTE, "Prepare DIF CDB\n"));
2962
2963 memset(cdb, 0, 32);
2964
2965 cdb[0] = MRSAS_SCSI_VARIABLE_LENGTH_CMD;
2966
2967
2968 cdb[7] = MRSAS_SCSI_ADDL_CDB_LEN;
2969
2970 if (io_info->isRead) {
2971 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32;
2972 }
2973 else {
2974 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32;
2975 }
2976
2977 cdb[10] = MRSAS_RD_WR_PROTECT; // Verify with in linux driver, set to MEGASAS_RD_WR_PROTECT_CHECK_ALL
2978
2979 /* LOGICAL BLOCK ADDRESS */
2980 cdb[12] = (U8)(((io_info->pdBlock) >> 56) & 0xff);
2981 cdb[13] = (U8)(((io_info->pdBlock) >> 48) & 0xff);
2982 cdb[14] = (U8)(((io_info->pdBlock) >> 40) & 0xff);
2983 cdb[15] = (U8)(((io_info->pdBlock) >> 32) & 0xff);
2984 cdb[16] = (U8)(((io_info->pdBlock) >> 24) & 0xff);
2985 cdb[17] = (U8)(((io_info->pdBlock) >> 16) & 0xff);
2986 cdb[18] = (U8)(((io_info->pdBlock) >> 8) & 0xff);
2987 cdb[19] = (U8)((io_info->pdBlock) & 0xff);
2988
2989 /* Logical block reference tag */
2990 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
2991 &scsi_io_request->CDB.EEDP32.PrimaryReferenceTag,
2992 BIG_ENDIAN(ref_tag));
2993
2994 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
2995 &scsi_io_request->CDB.EEDP32.PrimaryApplicationTagMask,
2996 0xffff);
2997
2998 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
2999 &scsi_io_request->DataLength,
3000 ((io_info->numBlocks)*512));
3001 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
3002 &scsi_io_request->IoFlags,32); /* Specify 32-byte cdb */
3003
3004 /* Transfer length */
3005 cdb[28] = (U8)(((io_info->numBlocks) >> 24) & 0xff);
3006 cdb[29] = (U8)(((io_info->numBlocks) >> 16) & 0xff);
3007 cdb[30] = (U8)(((io_info->numBlocks) >> 8) & 0xff);
3008 cdb[31] = (U8)((io_info->numBlocks) & 0xff);
3009
3010 /* set SCSI IO EEDPFlags */
3011 EEDPFlags = ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle,
3012 &scsi_io_request->EEDPFlags);
3013 Control = ddi_get32(instance->mpi2_frame_pool_dma_obj.acc_handle,
3014 &scsi_io_request->Control);
3015
3016 // set SCSI IO EEDPFlags bits
3017 if (io_info->isRead) {
3018 // For READ commands, the EEDPFlags shall be set to specify to
3019 // Increment the Primary Reference Tag, to Check the Reference
3020 // Tag, and to Check and Remove the Protection Information
3021 // fields.
3022 EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
3023 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
3024 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
3025 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
3026 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
3027 }
3028 else {
3029 // For WRITE commands, the EEDPFlags shall be set to specify to
3030 // Increment the Primary Reference Tag, and to Insert
3031 // Protection Information fields.
3032 EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
3033 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
3034 }
3035 Control |= (0x4 << 26);
3036
3037 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
3038 &scsi_io_request->EEDPFlags, EEDPFlags);
3039 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
3040 &scsi_io_request->Control, Control);
3041 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
3042 &scsi_io_request->EEDPBlockSize,
3043 MRSAS_EEDPBLOCKSIZE);
3044 }
3045
3046
3047 /*
3048 * mrsas_tbolt_set_pd_lba - Sets PD LBA
3049 * @cdb: CDB
3050 * @cdb_len: cdb length
3051 * @start_blk: Start block of IO
3052 *
3053 * Used to set the PD LBA in CDB for FP IOs
3054 */
3055 void
3056 mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr, U64 start_blk, U32 num_blocks, U8 DifCapable)
3057 {
3058 U8 cdb_len = *cdb_len_ptr;
3059 U8 flagvals = 0, opcode = 0, groupnum = 0, control = 0;
3060
3061 /* Some drives don't support 16/12 byte CDB's, convert to 10 */
3062 if (((cdb_len == 12) || (cdb_len == 16)) &&
3063 (start_blk <= 0xffffffff)) {
3064 if (cdb_len == 16) {
3065 con_log(CL_ANN, (CE_NOTE, "Converting READ/WRITE(16) to READ10\n"));
3066 opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
3067 flagvals = cdb[1];
3068 groupnum = cdb[14];
3069 control = cdb[15];
3070 } else {
3071 con_log(CL_ANN, (CE_NOTE, "Converting READ/WRITE(12) to READ10\n"));
3072 opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
3073 flagvals = cdb[1];
3074 groupnum = cdb[10];
3075 control = cdb[11];
3076 }
3077
3078 memset(cdb, 0, sizeof(cdb));
3079
3080 cdb[0] = opcode;
3081 cdb[1] = flagvals;
3082 cdb[6] = groupnum;
3083 cdb[9] = control;
3084 /* Set transfer length */
3085 cdb[8] = (U8)(num_blocks & 0xff);
3086 cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3087 cdb_len = 10;
3088 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
3089 /* Convert to 16 byte CDB for large LBA's */
3090 con_log(CL_ANN, (CE_NOTE, "Converting 6/10/12 CDB to 16 byte CDB\n"));
3091 switch (cdb_len) {
3092 case 6:
3093 opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
3094 control = cdb[5];
3095 break;
3096 case 10:
3097 opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16;
3098 flagvals = cdb[1];
3099 groupnum = cdb[6];
3100 control = cdb[9];
3101 break;
3102 case 12:
3103 opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16;
3104 flagvals = cdb[1];
3105 groupnum = cdb[10];
3106 control = cdb[11];
3107 break;
3108 }
3109
3110 memset(cdb, 0, sizeof(cdb));
3111
3112 cdb[0] = opcode;
3113 cdb[1] = flagvals;
3114 cdb[14] = groupnum;
3115 cdb[15] = control;
3116
3117 /* Transfer length */
3118 cdb[13] = (U8)(num_blocks & 0xff);
3119 cdb[12] = (U8)((num_blocks >> 8) & 0xff);
3120 cdb[11] = (U8)((num_blocks >> 16) & 0xff);
3121 cdb[10] = (U8)((num_blocks >> 24) & 0xff);
3122
3123 /* Specify 16-byte cdb */
3124 cdb_len = 16;
3125 } else if ((cdb_len == 6) && (start_blk > 0x1fffff)) {
3126 /* convert to 10 byte CDB */
3127 opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10;
3128 control = cdb[5];
3129
3130 memset(cdb, 0, sizeof(cdb));
3131 cdb[0] = opcode;
3132 cdb[9] = control;
3133
3134 /* Set transfer length */
3135 cdb[8] = (U8)(num_blocks & 0xff);
3136 cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3137
3138 /* Specify 10-byte cdb */
3139 cdb_len = 10;
3140 }
3141
3142
3143 /* Fall through Normal case, just load LBA here */
3144 switch (cdb_len) {
3145 case 6:
3146 {
3147 U8 val = cdb[1] & 0xE0;
3148 cdb[3] = (U8)(start_blk & 0xff);
3149 cdb[2] = (U8)((start_blk >> 8) & 0xff);
3150 cdb[1] = val | ((U8)(start_blk >> 16) & 0x1f);
3162 cdb[3] = (U8)((start_blk >> 16) & 0xff);
3163 cdb[2] = (U8)((start_blk >> 24) & 0xff);
3164 break;
3165
3166 case 16:
3167 cdb[9] = (U8)(start_blk & 0xff);
3168 cdb[8] = (U8)((start_blk >> 8) & 0xff);
3169 cdb[7] = (U8)((start_blk >> 16) & 0xff);
3170 cdb[6] = (U8)((start_blk >> 24) & 0xff);
3171 cdb[5] = (U8)((start_blk >> 32) & 0xff);
3172 cdb[4] = (U8)((start_blk >> 40) & 0xff);
3173 cdb[3] = (U8)((start_blk >> 48) & 0xff);
3174 cdb[2] = (U8)((start_blk >> 56) & 0xff);
3175 break;
3176 }
3177
3178 *cdb_len_ptr = cdb_len;
3179 }
3180
3181
3182 U8
3183 mrsas_tbolt_check_map_info(struct mrsas_instance *instance)
3184 {
3185 MR_FW_RAID_MAP_ALL *ld_map;
3186
3187 if (!mrsas_tbolt_get_ld_map_info(instance)) {
3188
3189 ld_map = instance->ld_map[(instance->map_id & 1)];
3190
3191 con_log(CL_ANN1, (CE_NOTE, "ldCount=%d, map size=%d\n",
3192 ld_map->raidMap.ldCount, ld_map->raidMap.totalSize));
3193
3194 if (MR_ValidateMapInfo(instance->ld_map[(instance->map_id & 1)], instance->load_balance_info)) {
3195 con_log(CL_ANN, (CE_CONT,
3196 "MR_ValidateMapInfo success"));
3197
3198 instance->fast_path_io = 1;
3199 con_log(CL_ANN, (CE_NOTE,
3200 "instance->fast_path_io %d \n",instance->fast_path_io));
3201
3202 return (DDI_SUCCESS);
3203 }
3204
3205 }
3206
3207 instance->fast_path_io = 0;
3208 cmn_err(CE_WARN, "MR_ValidateMapInfo failed");
3209 con_log(CL_ANN, (CE_NOTE,
3210 "instance->fast_path_io %d \n",instance->fast_path_io));
3211
3212
3213 return (DDI_FAILURE);
3214 }
3215 /*
3216 * Marks HBA as bad. This will be called either when an
3217 * IO packet times out even after 3 FW resets
3218 * or FW is found to be fault even after 3 continuous resets.
3219 */
3220
3221 int
3222 mrsas_tbolt_kill_adapter(struct mrsas_instance *instance)
3223 {
3224 cmn_err(CE_WARN, "TBOLT Kill adapter called\n");
3225
3226 if (instance->deadadapter == 1)
3227 return (DDI_FAILURE);
3228
3229 con_log(CL_ANN1, (CE_NOTE, "tbolt_kill_adapter: "
3230 "Writing to doorbell with MFI_STOP_ADP "));
3231 mutex_enter(&instance->ocr_flags_mtx);
3232 instance->deadadapter = 1;
3233 mutex_exit(&instance->ocr_flags_mtx);
3234 instance->func_ptr->disable_intr(instance);
3235 WR_RESERVED0_REGISTER(MFI_STOP_ADP, instance);
3236 /* Flush */
3237 RD_RESERVED0_REGISTER(instance);
3238
3239 (void) mrsas_print_pending_cmds(instance);
3240 mrsas_complete_pending_cmds(instance);
3241 return (DDI_SUCCESS);
3242 }
3243 void mrsas_reset_reply_desc(struct mrsas_instance *instance)
3244 {
3245 int i;
3246 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
3247 instance->reply_read_index= 0;
3248
3249 /* initializing reply address to 0xFFFFFFFF */
3250 reply_desc = instance->reply_frame_pool;
3251
3252 for (i = 0; i < instance->reply_q_depth; i++) {
3253 reply_desc->Words = (uint64_t)~0;
3254 reply_desc++;
3255 }
3256 }
3257
3258 int
3259 mrsas_tbolt_reset_ppc(struct mrsas_instance *instance)
3260 {
3261 uint32_t status=0x00;
3262 uint32_t retry = 0;
3263 uint32_t seq_num;
3264 uint32_t cur_abs_reg_val;
3265 uint32_t fw_state;
3266 union mrsas_evt_class_locale class_locale;
3267 uint32_t abs_state;
3268 uint32_t i;
3269
3270 con_log(CL_ANN, (CE_NOTE,
3271 "mrsas_tbolt_reset_ppc entered\n "));
3272
3273 if (instance->deadadapter == 1) {
3274 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3275 "no more resets as HBA has been marked dead ");
3276 return (DDI_FAILURE);
3277 }
3278
3279 mutex_enter(&instance->ocr_flags_mtx);
3280 instance->adapterresetinprogress = 1;
3281 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc:"
3282 "adpterresetinprogress flag set, time %llx", gethrtime()));
3283 mutex_exit(&instance->ocr_flags_mtx);
3284
3285 instance->func_ptr->disable_intr(instance);
3286
3287 /*Add delay inorder to complete the ioctl & io cmds in-flight */
3288 for (i = 0; i<3000; i++) {
3289 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
3290 }
3291
3292 instance->reply_read_index= 0;
3293
3294 retry_reset:
3295 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3296 ":Resetting TBOLT "));
3297
3298 WR_TBOLT_IB_WRITE_SEQ(0xF, instance);
3299 WR_TBOLT_IB_WRITE_SEQ(4, instance);
3300 WR_TBOLT_IB_WRITE_SEQ(0xb, instance);
3301 WR_TBOLT_IB_WRITE_SEQ(2, instance);
3302 WR_TBOLT_IB_WRITE_SEQ(7, instance);
3303 WR_TBOLT_IB_WRITE_SEQ(0xd, instance);
3304 con_log(CL_ANN1, (CE_NOTE,
3305 "mrsas_tbolt_reset_ppc: magic number written "
3306 "to write sequence register\n"));
3307 delay(100 * drv_usectohz(MILLISEC));
3308 status = RD_TBOLT_HOST_DIAG(instance);
3309 con_log(CL_ANN1, (CE_NOTE,
3310 "mrsas_tbolt_reset_ppc: READ HOSTDIAG SUCCESS "
3311 "to write sequence register\n"));
3312
3313 while (status & DIAG_TBOLT_RESET_ADAPTER) {
3314 delay(100 * drv_usectohz(MILLISEC));
3315 status = RD_TBOLT_HOST_DIAG(instance);
3316 if (retry++ == 100) {
3317 cmn_err(CE_WARN,
3318 "mrsas_tbolt_reset_ppc:"
3319 "resetadapter bit is set already "
3320 "check retry count %d\n", retry);
3321 return (DDI_FAILURE);
3322 }
3323 }
3324
3325 WR_TBOLT_HOST_DIAG(status | DIAG_TBOLT_RESET_ADAPTER, instance);
3326 delay(100 * drv_usectohz(MILLISEC));
3327
3328 ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3329 (uint8_t *)((uintptr_t)(instance)->regmap +
3330 RESET_TBOLT_STATUS_OFF),4,DDI_DEV_AUTOINCR);
3331
3332 while ((status & DIAG_TBOLT_RESET_ADAPTER)) {
3333 delay(100 * drv_usectohz(MILLISEC));
3334 ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3335 (uint8_t *)((uintptr_t)(instance)->regmap +
3336 RESET_TBOLT_STATUS_OFF),4,DDI_DEV_AUTOINCR);
3337 if (retry++ == 100) {
3338 /* Dont call kill adapter here */
3339 /* RESET BIT ADAPTER is cleared by firmare */
3340 //mrsas_tbolt_kill_adapter(instance);
3341 cmn_err(CE_WARN, "mr_sas %d: %s(): RESET FAILED; return failure!!!", instance->instance, __func__);
3342 return (DDI_FAILURE);
3343 }
3344 }
3345
3346 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: Adapter reset complete"));
3347 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3348 "Calling mfi_state_transition_to_ready"));
3349
3350 abs_state = instance->func_ptr->read_fw_status_reg(instance);
3351 retry = 0;
3352 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3353 delay(100 * drv_usectohz(MILLISEC));
3354 abs_state = instance->func_ptr->read_fw_status_reg(instance);
3355 }
3356 if (abs_state <= MFI_STATE_FW_INIT) {
3357 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: firmware state < MFI_STATE_FW_INIT"
3358 "state = 0x%x, RETRY RESET.\n", abs_state);
3359 goto retry_reset;
3360 }
3361
3362 /* Mark HBA as bad, if FW is fault after 3 continuous resets */
3363 if (mfi_state_transition_to_ready(instance) ||
3364 debug_tbolt_fw_faults_after_ocr_g == 1) {
3365 cur_abs_reg_val =
3366 instance->func_ptr->read_fw_status_reg(instance);
3367 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
3368
3369 con_log(CL_ANN1, (CE_NOTE,
3370 "mrsas_tbolt_reset_ppc :before fake: FW is not ready "
3371 "FW state = 0x%x", fw_state));
3372 if (debug_tbolt_fw_faults_after_ocr_g == 1)
3373 fw_state = MFI_STATE_FAULT;
3374
3375 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc : FW is not ready "
3376 "FW state = 0x%x", fw_state));
3377
3378 if (fw_state == MFI_STATE_FAULT) {
3379 // increment the count
3380 instance->fw_fault_count_after_ocr++;
3381 if (instance->fw_fault_count_after_ocr
3382 < MAX_FW_RESET_COUNT) {
3383 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3384 "FW is in fault after OCR count %d "
3385 "Retry Reset",
3386 instance->fw_fault_count_after_ocr);
3387 goto retry_reset;
3388
3389 } else {
3390 cmn_err(CE_WARN, "mrsas %d: %s:"
3391 "Max Reset Count exceeded >%d"
3392 "Mark HBA as bad, KILL adapter",
3393 instance->instance, __func__, MAX_FW_RESET_COUNT);
3394
3395 mrsas_tbolt_kill_adapter(instance);
3396 return (DDI_FAILURE);
3397 }
3398 }
3399 }
3400
3401 // reset the counter as FW is up after OCR
3402 instance->fw_fault_count_after_ocr = 0;
3403
3404 mrsas_reset_reply_desc(instance);
3405
3406
3407 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3408 "Calling mrsas_issue_init_mpi2"));
3409 abs_state = mrsas_issue_init_mpi2(instance);
3410 if(abs_state == DDI_FAILURE) {
3411 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3412 "INIT failed Retrying Reset");
3413 goto retry_reset;
3414 }
3415 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3416 "mrsas_issue_init_mpi2 Done"));
3417
3418 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3419 "Calling mrsas_print_pending_cmd\n"));
3420 mrsas_print_pending_cmds(instance);
3421 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3422 "mrsas_print_pending_cmd done\n"));
3423
3424 instance->func_ptr->enable_intr(instance);
3425 instance->fw_outstanding = 0;
3426
3427 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3428 "Calling mrsas_issue_pending_cmds"));
3429 mrsas_issue_pending_cmds(instance);
3430 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3431 "issue_pending_cmds done.\n"));
3432
3433 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3434 "Calling aen registration"));
3435
3436 instance->aen_cmd->retry_count_for_ocr = 0;
3437 instance->aen_cmd->drv_pkt_time = 0;
3438
3439 instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
3440
3441 con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag.\n"));
3442 mutex_enter(&instance->ocr_flags_mtx);
3443 instance->adapterresetinprogress = 0;
3444 mutex_exit(&instance->ocr_flags_mtx);
3445 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3446 "adpterresetinprogress flag unset"));
3447
3448 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc done\n"));
3449 return (DDI_SUCCESS);
3450
3451 }
3452
3453
3454 /*
3455 * mrsas_sync_map_info - Returns FW's ld_map structure
3456 * @instance: Adapter soft state
3457 *
3458 * Issues an internal command (DCMD) to get the FW's controller PD
3459 * list structure. This information is mainly used to find out SYSTEM
3460 * supported by the FW.
3461 */
3462
3463 int
3464 mrsas_tbolt_sync_map_info(struct mrsas_instance *instance)
3465 {
3466 int ret = 0, i;
3467 struct mrsas_cmd *cmd = NULL;
3468 struct mrsas_dcmd_frame *dcmd;
3469 uint32_t size_sync_info, num_lds;
3470 LD_TARGET_SYNC *ci = NULL;
3471 MR_FW_RAID_MAP_ALL *map;
3472 MR_LD_RAID *raid;
3473 LD_TARGET_SYNC *ld_sync;
3474 uint32_t ci_h = 0;
3475 uint32_t size_map_info;
3476
3477 cmd = get_raid_msg_pkt(instance);
3478
3479 if (cmd == NULL) {
3480 cmn_err(CE_WARN,
3481 "Failed to get a cmd from free-pool in mrsas_tbolt_sync_map_info(). ");
3482 return (DDI_FAILURE);
3483 }
3484
3485 /* Clear the frame buffer and assign back the context id */
3486 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3487 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3488 cmd->index);
3489 bzero(cmd->scsi_io_request, sizeof (Mpi2RaidSCSIIORequest_t));
3490
3491
3492 map = instance->ld_map[instance->map_id & 1];
3493
3494 num_lds = map->raidMap.ldCount;
3495
3496 dcmd = &cmd->frame->dcmd;
3497
3498 size_sync_info = sizeof (LD_TARGET_SYNC) * num_lds;
3499
3500 con_log(CL_ANN, (CE_NOTE,
3501 "size_sync_info =0x%x ; ld count = 0x%x \n ",
3502 size_sync_info, num_lds));
3503
3504 ci = (LD_TARGET_SYNC *)instance->ld_map[(instance->map_id - 1) & 1];
3505
3506 memset(ci, 0, sizeof(MR_FW_RAID_MAP_ALL));
3507 ci_h = instance->ld_map_phy[(instance->map_id - 1) & 1];
3508
3509 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
3510
3511 ld_sync = (LD_TARGET_SYNC *)ci;
3512
3513 for (i = 0; i < num_lds; i++, ld_sync++) {
3514 raid = MR_LdRaidGet(i, map);
3515
3516 con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, Seq Num : 0x%x, Sync Reqd : 0x%x\n",
3517 i, raid->seqNum, raid->flags.ldSyncRequired));
3518
3519 ld_sync->ldTargetId = MR_GetLDTgtId(i, map);
3520
3521 con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, tgt : 0x%x \n",
3522 i, ld_sync->ldTargetId));
3523
3524 ld_sync->seqNum = raid->seqNum;
3525 }
3526
3527
3528 size_map_info = sizeof(MR_FW_RAID_MAP) +
3529 (sizeof(MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
3530
3531 dcmd->cmd = MFI_CMD_OP_DCMD;
3532 dcmd->cmd_status = 0xFF;
3533 dcmd->sge_count = 1;
3534 dcmd->flags = MFI_FRAME_DIR_WRITE;
3535 dcmd->timeout = 0;
3536 dcmd->pad_0 = 0;
3537 dcmd->data_xfer_len = size_map_info;
3538 dcmd->mbox.b[0] = num_lds;
3539 dcmd->mbox.b[1] = 1; /* Pend */
3540 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3541 dcmd->sgl.sge32[0].phys_addr = ci_h;
3542 dcmd->sgl.sge32[0].length = size_map_info;
3543
3544
3545 instance->map_update_cmd = cmd;
3546 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3547
3548 instance->func_ptr->issue_cmd(cmd, instance);
3549
3550 instance->unroll.syncCmd = 1;
3551 con_log(CL_ANN1, (CE_NOTE, "sync cmd issued. [SMID]:%x",cmd->SMID));
3552
3553 return (ret);
3554 }
3555
3556 /*
3557 * abort_syncmap_cmd
3558 */
3559 int
3560 abort_syncmap_cmd(struct mrsas_instance *instance,
3561 struct mrsas_cmd *cmd_to_abort)
3562 {
3563 int ret = 0;
3564
3565 struct mrsas_cmd *cmd;
3566 struct mrsas_abort_frame *abort_fr;
3567
3568 con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_ldsync:%d", __LINE__));
3569
3570 cmd = get_raid_msg_mfi_pkt(instance);
3571
3572 if (!cmd) {
3573 cmn_err(CE_WARN,
3574 "Failed to get a cmd from free-pool abort_syncmap_cmd().");
3575 return (DDI_FAILURE);
3576 }
3577 /* Clear the frame buffer and assign back the context id */
3578 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3579 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3580 cmd->index);
3581
3582 abort_fr = &cmd->frame->abort;
3583
3584 /* prepare and issue the abort frame */
3585 ddi_put8(cmd->frame_dma_obj.acc_handle,
3586 &abort_fr->cmd, MFI_CMD_OP_ABORT);
3587 ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3588 MFI_CMD_STATUS_SYNC_MODE);
3589 ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3590 ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3591 cmd_to_abort->index);
3592 ddi_put32(cmd->frame_dma_obj.acc_handle,
3593 &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3594 ddi_put32(cmd->frame_dma_obj.acc_handle,
3595 &abort_fr->abort_mfi_phys_addr_hi, 0);
3596
3597 cmd->frame_count = 1;
3598
3606 ret = 0;
3607 }
3608
3609 return_raid_msg_mfi_pkt(instance, cmd);
3610
3611 atomic_add_16(&instance->fw_outstanding, (-1));
3612
3613 return (ret);
3614 }
3615
3616
3617 #ifdef PDSUPPORT
3618 int
3619 mrsas_tbolt_config_pd(struct mrsas_instance *instance, uint16_t tgt,
3620 uint8_t lun, dev_info_t **ldip)
3621 {
3622 struct scsi_device *sd;
3623 dev_info_t *child;
3624 int rval, dtype;
3625 struct mrsas_tbolt_pd_info *pds = NULL;
3626 uint64_t *wwn;
3627
3628
3629 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_config_pd: t = %d l = %d",
3630 tgt, lun));
3631
3632 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
3633 if (ldip) {
3634 *ldip = child;
3635 }
3636 if (instance->mr_tbolt_pd_list[tgt].flag != MRDRV_TGT_VALID) {
3637 rval = mrsas_service_evt(instance, tgt, 1,
3638 MRSAS_EVT_UNCONFIG_TGT, NULL);
3639 con_log(CL_ANN1, (CE_WARN,
3640 "mr_sas:DELETING STALE ENTRY rval = %d "
3641 "tgt id = %d ", rval, tgt));
3642 return (NDI_FAILURE);
3643 }
3644 return (NDI_SUCCESS);
3645 }
3646
3647 pds = (struct mrsas_tbolt_pd_info *)
3648 kmem_zalloc(sizeof (struct mrsas_tbolt_pd_info), KM_SLEEP);
3649 mrsas_tbolt_get_pd_info(instance, pds, tgt);
3650 dtype = pds->scsiDevType;
3651
3652 /* Check for Disk*/
3653 if ((dtype == DTYPE_DIRECT)) {
3654 if ((dtype == DTYPE_DIRECT) &&
3655 (LE_16(pds->fwState) != PD_SYSTEM)) {
3656 return (NDI_FAILURE);
3657 }
3658 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
3659 sd->sd_address.a_hba_tran = instance->tran;
3660 sd->sd_address.a_target = (uint16_t)tgt;
3661 sd->sd_address.a_lun = (uint8_t)lun;
3662
3663 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS) {
3664 rval = mrsas_config_scsi_device(instance, sd, ldip);
3665 con_log(CL_DLEVEL1, (CE_NOTE,
3666 "Phys. device found: tgt %d dtype %d: %s",
3667 tgt, dtype, sd->sd_inq->inq_vid));
3668 } else {
3669 rval = NDI_FAILURE;
3670 con_log(CL_DLEVEL1, (CE_NOTE,
3671 "Phys. device Not found scsi_hba_probe Failed: tgt %d dtype %d: %s",
3672 tgt, dtype, sd->sd_inq->inq_vid));
3673 }
3674
3675 /* sd_unprobe is blank now. Free buffer manually */
3676 if (sd->sd_inq) {
3677 kmem_free(sd->sd_inq, SUN_INQSIZE);
3678 sd->sd_inq = (struct scsi_inquiry *)NULL;
3679 }
3680 kmem_free(sd, sizeof (struct scsi_device));
3681 rval = NDI_SUCCESS;
3682 } else {
3683 con_log(CL_ANN1, (CE_NOTE,
3684 "Device not supported: tgt %d lun %d dtype %d",
3685 tgt, lun, dtype));
3686 rval = NDI_FAILURE;
3687 }
3688
3689 kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3690 con_log(CL_ANN1, (CE_NOTE, "mrsas_config_pd: return rval = %d",
3691 rval));
3692 return (rval);
3693 }
3694 static void
3695 mrsas_tbolt_get_pd_info(struct mrsas_instance *instance, struct mrsas_tbolt_pd_info *pds,
3696 int tgt)
3697 {
3698 struct mrsas_cmd *cmd;
3699 struct mrsas_dcmd_frame *dcmd;
3700 dma_obj_t dcmd_dma_obj;
3701
3702 cmd = get_raid_msg_pkt(instance);
3703
3704 if (!cmd) {
3705 con_log(CL_ANN1, (CE_WARN, "Failed to get a cmd for get pd info"));
3706 return;
3707 }
3708
3709 /* Clear the frame buffer and assign back the context id */
3710 memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3711 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3712 cmd->index);
3713
3714
3715 dcmd = &cmd->frame->dcmd;
3716 dcmd_dma_obj.size = sizeof (struct mrsas_tbolt_pd_info);
3717 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
3718 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff;
3719 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff;
3720 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
3721 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
3722
3723 (void) mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
3724 DDI_STRUCTURE_LE_ACC);
3725 (void) memset(dcmd_dma_obj.buffer, 0, sizeof (struct mrsas_tbolt_pd_info));
3726 (void) memset(dcmd->mbox.b, 0, 12);
3727 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3728 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
3729 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3730 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, MFI_FRAME_DIR_READ);
3731 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3732 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3733 sizeof (struct mrsas_tbolt_pd_info));
3734 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3735 MR_DCMD_PD_GET_INFO);
3736 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], tgt);
3737 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3738 sizeof (struct mrsas_tbolt_pd_info));
3739 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3740 dcmd_dma_obj.dma_cookie[0].dmac_address);
3741
3742 cmd->sync_cmd = MRSAS_TRUE;
3743 cmd->frame_count = 1;
3744
3745 if (instance->tbolt) {
3746 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3747 }
3748
3749 instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd);
3750
|
1 /*
2 * mr_sas_tbolt.c: source for mr_sas driver for New Generation.
3 * i.e. Thunderbolt and Invader
4 *
5 * Solaris MegaRAID device driver for SAS2.0 controllers
6 * Copyright (c) 2008-2012, LSI Logic Corporation.
7 * All rights reserved.
8 *
9 * Version:
10 * Author:
11 * Swaminathan K S
12 * Arun Chandrashekhar
13 * Manju R
14 * Rasheed
15 * Shakeel Bukhari
16 */
17
18
19 #include <sys/types.h>
20 #include <sys/file.h>
21 #include <sys/atomic.h>
22 #include <sys/scsi/scsi.h>
23 #include <sys/byteorder.h>
24 #include "ld_pd_map.h"
25 #include "mr_sas.h"
26 #include "fusion.h"
27
28 /*
29 * FMA header files
30 */
31 #include <sys/ddifm.h>
32 #include <sys/fm/protocol.h>
33 #include <sys/fm/util.h>
34 #include <sys/fm/io/ddi.h>
35
36
37 /* Pre-TB command size and TB command size. */
38 #define MR_COMMAND_SIZE (64*20) /* 1280 bytes */
39 MR_LD_RAID *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map);
40 U16 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map);
41 U16 MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map);
42 U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO, struct IO_REQUEST_INFO *);
43 extern ddi_dma_attr_t mrsas_generic_dma_attr;
44 extern uint32_t mrsas_tbolt_max_cap_maxxfer;
45 extern struct ddi_device_acc_attr endian_attr;
46 extern int debug_level_g;
47 extern unsigned int enable_fp;
48 volatile int dump_io_wait_time = 90;
49 extern void
50 io_timeout_checker(void *arg);
51 extern volatile int debug_timeout_g;
52 extern int mrsas_issue_pending_cmds(struct mrsas_instance *);
53 extern int mrsas_complete_pending_cmds(struct mrsas_instance *instance);
54 extern void push_pending_mfi_pkt(struct mrsas_instance *,
55 struct mrsas_cmd *);
56 extern U8 MR_BuildRaidContext(struct mrsas_instance *, struct IO_REQUEST_INFO *,
57 MPI2_SCSI_IO_VENDOR_UNIQUE *, MR_FW_RAID_MAP_ALL *);
58
59 /* Local static prototypes. */
60 static struct mrsas_cmd *mrsas_tbolt_build_cmd(struct mrsas_instance *,
61 struct scsi_address *, struct scsi_pkt *, uchar_t *);
62 static void mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr,
63 U64 start_blk, U32 num_blocks);
64 static int mrsas_tbolt_check_map_info(struct mrsas_instance *);
65 static int mrsas_tbolt_sync_map_info(struct mrsas_instance *);
66 static int mrsas_tbolt_prepare_pkt(struct scsa_cmd *);
67 static int mrsas_tbolt_ioc_init(struct mrsas_instance *, dma_obj_t *);
68 #ifdef PDSUPPORT
69 static void mrsas_tbolt_get_pd_info(struct mrsas_instance *,
70 struct mrsas_tbolt_pd_info *, int);
71 #endif /* PDSUPPORT */
72
73 static int debug_tbolt_fw_faults_after_ocr_g = 0;
74
75 /*
76 * destroy_mfi_mpi_frame_pool
77 */
78 void
79 destroy_mfi_mpi_frame_pool(struct mrsas_instance *instance)
80 {
81 int i;
82
83 struct mrsas_cmd *cmd;
84
85 /* return all mfi frames to pool */
86 for (i = 0; i < MRSAS_APP_RESERVED_CMDS; i++) {
87 cmd = instance->cmd_list[i];
88 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED) {
89 (void) mrsas_free_dma_obj(instance,
90 cmd->frame_dma_obj);
91 }
92 cmd->frame_dma_obj_status = DMA_OBJ_FREED;
93 }
94 }
95
96 /*
97 * destroy_mpi2_frame_pool
98 */
99 void
100 destroy_mpi2_frame_pool(struct mrsas_instance *instance)
101 {
102
103 if (instance->mpi2_frame_pool_dma_obj.status == DMA_OBJ_ALLOCATED) {
104 (void) mrsas_free_dma_obj(instance,
105 instance->mpi2_frame_pool_dma_obj);
106 instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_FREED;
107 }
108 }
109
110
111 /*
112 * mrsas_tbolt_free_additional_dma_buffer
113 */
114 void
115 mrsas_tbolt_free_additional_dma_buffer(struct mrsas_instance *instance)
116 {
117 int i;
118
119 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
120 (void) mrsas_free_dma_obj(instance,
121 instance->mfi_internal_dma_obj);
122 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
123 }
124 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
125 (void) mrsas_free_dma_obj(instance,
126 instance->mfi_evt_detail_obj);
127 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
128 }
129
130 for (i = 0; i < 2; i++) {
131 if (instance->ld_map_obj[i].status == DMA_OBJ_ALLOCATED) {
132 (void) mrsas_free_dma_obj(instance,
133 instance->ld_map_obj[i]);
134 instance->ld_map_obj[i].status = DMA_OBJ_FREED;
135 }
136 }
137 }
138
149 instance->request_desc_dma_obj.status = DMA_OBJ_FREED;
150 }
151
152 if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
153 (void) mrsas_free_dma_obj(instance,
154 instance->reply_desc_dma_obj);
155 instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
156 }
157
158
159 }
160
161
162 /*
163 * ThunderBolt(TB) Request Message Frame Pool
164 */
165 int
166 create_mpi2_frame_pool(struct mrsas_instance *instance)
167 {
168 int i = 0;
169 uint16_t max_cmd;
170 uint32_t sgl_sz;
171 uint32_t raid_msg_size;
172 uint32_t total_size;
173 uint32_t offset;
174 uint32_t io_req_base_phys;
175 uint8_t *io_req_base;
176 struct mrsas_cmd *cmd;
177
178 max_cmd = instance->max_fw_cmds;
179
180 sgl_sz = 1024;
181 raid_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
182
183 /* Allocating additional 256 bytes to accomodate SMID 0. */
184 total_size = MRSAS_THUNDERBOLT_MSG_SIZE + (max_cmd * raid_msg_size) +
185 (max_cmd * sgl_sz) + (max_cmd * SENSE_LENGTH);
186
187 con_log(CL_ANN1, (CE_NOTE, "create_mpi2_frame_pool: "
188 "max_cmd %x", max_cmd));
189
190 con_log(CL_DLEVEL3, (CE_NOTE, "create_mpi2_frame_pool: "
191 "request message frame pool size %x", total_size));
192
193 /*
194 * ThunderBolt(TB) We need to create a single chunk of DMA'ble memory
195 * and then split the memory to 1024 commands. Each command should be
196 * able to contain a RAID MESSAGE FRAME which will embed a MFI_FRAME
197 * within it. Further refer the "alloc_req_rep_desc" function where
198 * we allocate request/reply descriptors queues for a clue.
199 */
200
201 instance->mpi2_frame_pool_dma_obj.size = total_size;
202 instance->mpi2_frame_pool_dma_obj.dma_attr = mrsas_generic_dma_attr;
203 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_addr_hi =
204 0xFFFFFFFFU;
205 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_count_max =
206 0xFFFFFFFFU;
207 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_sgllen = 1;
208 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_align = 256;
209
210 if (mrsas_alloc_dma_obj(instance, &instance->mpi2_frame_pool_dma_obj,
211 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
212 cmn_err(CE_WARN,
213 "mr_sas: could not alloc mpi2 frame pool");
214 return (DDI_FAILURE);
215 }
216
217 bzero(instance->mpi2_frame_pool_dma_obj.buffer, total_size);
218 instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_ALLOCATED;
219
220 instance->io_request_frames =
221 (uint8_t *)instance->mpi2_frame_pool_dma_obj.buffer;
222 instance->io_request_frames_phy =
223 (uint32_t)
224 instance->mpi2_frame_pool_dma_obj.dma_cookie[0].dmac_address;
225
226 con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames 0x%p",
227 (void *)instance->io_request_frames));
228
229 con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames_phy 0x%x",
230 instance->io_request_frames_phy));
231
232 io_req_base = (uint8_t *)instance->io_request_frames +
233 MRSAS_THUNDERBOLT_MSG_SIZE;
234 io_req_base_phys = instance->io_request_frames_phy +
235 MRSAS_THUNDERBOLT_MSG_SIZE;
236
237 con_log(CL_DLEVEL3, (CE_NOTE,
238 "io req_base_phys 0x%x", io_req_base_phys));
239
240 for (i = 0; i < max_cmd; i++) {
241 cmd = instance->cmd_list[i];
242
243 offset = i * MRSAS_THUNDERBOLT_MSG_SIZE;
244
245 cmd->scsi_io_request = (Mpi2RaidSCSIIORequest_t *)
246 ((uint8_t *)io_req_base + offset);
247 cmd->scsi_io_request_phys_addr = io_req_base_phys + offset;
248
249 cmd->sgl = (Mpi2SGEIOUnion_t *)((uint8_t *)io_req_base +
250 (max_cmd * raid_msg_size) + i * sgl_sz);
251
252 cmd->sgl_phys_addr = (io_req_base_phys +
253 (max_cmd * raid_msg_size) + i * sgl_sz);
254
255 cmd->sense1 = (uint8_t *)((uint8_t *)io_req_base +
256 (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
257 (i * SENSE_LENGTH));
258
259 cmd->sense_phys_addr1 = (io_req_base_phys +
260 (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
261 (i * SENSE_LENGTH));
262
263
264 cmd->SMID = i + 1;
265
266 con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Addr [%x]0x%p",
267 cmd->index, (void *)cmd->scsi_io_request));
268
269 con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Phys Addr [%x]0x%x",
270 cmd->index, cmd->scsi_io_request_phys_addr));
271
272 con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr [%x]0x%p",
273 cmd->index, (void *)cmd->sense1));
274
275 con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr Phys [%x]0x%x",
276 cmd->index, cmd->sense_phys_addr1));
277
278 con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers [%x]0x%p",
279 cmd->index, (void *)cmd->sgl));
280
281 con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers phys [%x]0x%x",
282 cmd->index, cmd->sgl_phys_addr));
283 }
284
285 return (DDI_SUCCESS);
286
287 }
288
289
290 /*
291 * alloc_additional_dma_buffer for AEN
292 */
293 int
294 mrsas_tbolt_alloc_additional_dma_buffer(struct mrsas_instance *instance)
295 {
296 uint32_t internal_buf_size = PAGESIZE*2;
297 int i;
298
299 /* Initialize buffer status as free */
300 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
301 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
302 instance->ld_map_obj[0].status = DMA_OBJ_FREED;
303 instance->ld_map_obj[1].status = DMA_OBJ_FREED;
304
305
306 instance->mfi_internal_dma_obj.size = internal_buf_size;
307 instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
308 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
309 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
310 0xFFFFFFFFU;
311 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
312
313 if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
314 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
315 cmn_err(CE_WARN,
316 "mr_sas: could not alloc reply queue");
317 return (DDI_FAILURE);
318 }
319
320 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
321
322 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
323 instance->internal_buf =
324 (caddr_t)(((unsigned long)instance->mfi_internal_dma_obj.buffer));
325 instance->internal_buf_dmac_add =
326 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address;
327 instance->internal_buf_size = internal_buf_size;
328
329 /* allocate evt_detail */
330 instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
331 instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
332 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
333 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
334 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
335 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 8;
336
337 if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
338 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
339 cmn_err(CE_WARN, "mrsas_tbolt_alloc_additional_dma_buffer: "
340 "could not allocate data transfer buffer.");
341 goto fail_tbolt_additional_buff;
342 }
343
344 bzero(instance->mfi_evt_detail_obj.buffer,
345 sizeof (struct mrsas_evt_detail));
346
347 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
348
349 instance->size_map_info = sizeof (MR_FW_RAID_MAP) +
350 (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
351
352 for (i = 0; i < 2; i++) {
353 /* allocate the data transfer buffer */
354 instance->ld_map_obj[i].size = instance->size_map_info;
355 instance->ld_map_obj[i].dma_attr = mrsas_generic_dma_attr;
356 instance->ld_map_obj[i].dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
357 instance->ld_map_obj[i].dma_attr.dma_attr_count_max =
358 0xFFFFFFFFU;
359 instance->ld_map_obj[i].dma_attr.dma_attr_sgllen = 1;
360 instance->ld_map_obj[i].dma_attr.dma_attr_align = 1;
361
362 if (mrsas_alloc_dma_obj(instance, &instance->ld_map_obj[i],
363 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
364 cmn_err(CE_WARN,
365 "could not allocate data transfer buffer.");
366 goto fail_tbolt_additional_buff;
367 }
368
369 instance->ld_map_obj[i].status |= DMA_OBJ_ALLOCATED;
370
371 bzero(instance->ld_map_obj[i].buffer, instance->size_map_info);
372
373 instance->ld_map[i] =
374 (MR_FW_RAID_MAP_ALL *)instance->ld_map_obj[i].buffer;
375 instance->ld_map_phy[i] = (uint32_t)instance->
376 ld_map_obj[i].dma_cookie[0].dmac_address;
377
378 con_log(CL_DLEVEL3, (CE_NOTE,
379 "ld_map Addr Phys 0x%x", instance->ld_map_phy[i]));
380
381 con_log(CL_DLEVEL3, (CE_NOTE,
382 "size_map_info 0x%x", instance->size_map_info));
383 }
384
385 return (DDI_SUCCESS);
386
387 fail_tbolt_additional_buff:
388 mrsas_tbolt_free_additional_dma_buffer(instance);
389
390 return (DDI_FAILURE);
391 }
392
393 MRSAS_REQUEST_DESCRIPTOR_UNION *
394 mr_sas_get_request_descriptor(struct mrsas_instance *instance, uint16_t index)
395 {
396 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
397
398 if (index > instance->max_fw_cmds) {
399 con_log(CL_ANN1, (CE_NOTE,
400 "Invalid SMID 0x%x request for descriptor", index));
401 con_log(CL_ANN1, (CE_NOTE,
402 "max_fw_cmds : 0x%x", instance->max_fw_cmds));
403 return (NULL);
404 }
405
406 req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
407 ((char *)instance->request_message_pool +
408 (sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION) * index));
409
410 con_log(CL_ANN1, (CE_NOTE,
411 "request descriptor : 0x%08lx", (unsigned long)req_desc));
412
413 con_log(CL_ANN1, (CE_NOTE,
414 "request descriptor base phy : 0x%08lx",
415 (unsigned long)instance->request_message_pool_phy));
416
417 return ((MRSAS_REQUEST_DESCRIPTOR_UNION *)req_desc);
418 }
419
420
421 /*
422 * Allocate Request and Reply Queue Descriptors.
423 */
424 int
425 alloc_req_rep_desc(struct mrsas_instance *instance)
426 {
427 uint32_t request_q_sz, reply_q_sz;
428 int i, max_reply_q_sz;
429 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
430
431 /*
432 * ThunderBolt(TB) There's no longer producer consumer mechanism.
433 * Once we have an interrupt we are supposed to scan through the list of
434 * reply descriptors and process them accordingly. We would be needing
435 * to allocate memory for 1024 reply descriptors
436 */
437
438 /* Allocate Reply Descriptors */
439 con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
440 (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
441
442 /* reply queue size should be multiple of 16 */
443 max_reply_q_sz = ((instance->max_fw_cmds + 1 + 15)/16)*16;
444
445 reply_q_sz = 8 * max_reply_q_sz;
446
447
448 con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
449 (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
450
451 instance->reply_desc_dma_obj.size = reply_q_sz;
452 instance->reply_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
453 instance->reply_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
454 instance->reply_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
455 instance->reply_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
456 instance->reply_desc_dma_obj.dma_attr.dma_attr_align = 16;
457
458 if (mrsas_alloc_dma_obj(instance, &instance->reply_desc_dma_obj,
459 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
460 cmn_err(CE_WARN,
461 "mr_sas: could not alloc reply queue");
462 return (DDI_FAILURE);
463 }
464
465 bzero(instance->reply_desc_dma_obj.buffer, reply_q_sz);
466 instance->reply_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
467
468 /* virtual address of reply queue */
469 instance->reply_frame_pool = (MPI2_REPLY_DESCRIPTORS_UNION *)(
470 instance->reply_desc_dma_obj.buffer);
471
472 instance->reply_q_depth = max_reply_q_sz;
473
474 con_log(CL_ANN1, (CE_NOTE, "[reply queue depth]0x%x",
475 instance->reply_q_depth));
476
477 con_log(CL_ANN1, (CE_NOTE, "[reply queue virt addr]0x%p",
478 (void *)instance->reply_frame_pool));
479
480 /* initializing reply address to 0xFFFFFFFF */
481 reply_desc = instance->reply_frame_pool;
482
483 for (i = 0; i < instance->reply_q_depth; i++) {
484 reply_desc->Words = (uint64_t)~0;
485 reply_desc++;
486 }
487
488
489 instance->reply_frame_pool_phy =
490 (uint32_t)instance->reply_desc_dma_obj.dma_cookie[0].dmac_address;
491
492 con_log(CL_ANN1, (CE_NOTE,
493 "[reply queue phys addr]0x%x", instance->reply_frame_pool_phy));
494
495
496 instance->reply_pool_limit_phy = (instance->reply_frame_pool_phy +
497 reply_q_sz);
498
499 con_log(CL_ANN1, (CE_NOTE, "[reply pool limit phys addr]0x%x",
500 instance->reply_pool_limit_phy));
501
502
503 con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
504 (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
505
506 /* Allocate Request Descriptors */
507 con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
508 (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
509
510 request_q_sz = 8 *
511 (instance->max_fw_cmds);
512
513 instance->request_desc_dma_obj.size = request_q_sz;
514 instance->request_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
515 instance->request_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
516 instance->request_desc_dma_obj.dma_attr.dma_attr_count_max =
517 0xFFFFFFFFU;
518 instance->request_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
519 instance->request_desc_dma_obj.dma_attr.dma_attr_align = 16;
520
521 if (mrsas_alloc_dma_obj(instance, &instance->request_desc_dma_obj,
522 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
523 cmn_err(CE_WARN,
524 "mr_sas: could not alloc request queue desc");
525 goto fail_undo_reply_queue;
526 }
527
528 bzero(instance->request_desc_dma_obj.buffer, request_q_sz);
529 instance->request_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
530
531 /* virtual address of request queue desc */
532 instance->request_message_pool = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
533 (instance->request_desc_dma_obj.buffer);
534
535 instance->request_message_pool_phy =
536 (uint32_t)instance->request_desc_dma_obj.dma_cookie[0].dmac_address;
537
538 return (DDI_SUCCESS);
539
540 fail_undo_reply_queue:
541 if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
542 (void) mrsas_free_dma_obj(instance,
543 instance->reply_desc_dma_obj);
544 instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
545 }
546
547 return (DDI_FAILURE);
548 }
549
550 /*
551 * mrsas_alloc_cmd_pool_tbolt
552 *
553 * TODO: merge tbolt-specific code into mrsas_alloc_cmd_pool() to have single
554 * routine
555 */
556 int
557 mrsas_alloc_cmd_pool_tbolt(struct mrsas_instance *instance)
558 {
559 int i;
560 int count;
561 uint32_t max_cmd;
562 uint32_t reserve_cmd;
563 size_t sz;
564
565 struct mrsas_cmd *cmd;
566
567 max_cmd = instance->max_fw_cmds;
568 con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
569 "max_cmd %x", max_cmd));
570
571
572 sz = sizeof (struct mrsas_cmd *) * max_cmd;
573
574 /*
575 * instance->cmd_list is an array of struct mrsas_cmd pointers.
576 * Allocate the dynamic array first and then allocate individual
577 * commands.
578 */
579 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
580
581 /* create a frame pool and assign one frame to each cmd */
582 for (count = 0; count < max_cmd; count++) {
583 instance->cmd_list[count] =
584 kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP);
585 }
586
587 /* add all the commands to command pool */
588
589 INIT_LIST_HEAD(&instance->cmd_pool_list);
590 INIT_LIST_HEAD(&instance->cmd_pend_list);
591 INIT_LIST_HEAD(&instance->cmd_app_pool_list);
592
593 reserve_cmd = MRSAS_APP_RESERVED_CMDS;
594
595 /* cmd index 0 reservered for IOC INIT */
596 for (i = 1; i < reserve_cmd; i++) {
597 cmd = instance->cmd_list[i];
598 cmd->index = i;
599 mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
600 }
601
602
603 for (i = reserve_cmd; i < max_cmd; i++) {
604 cmd = instance->cmd_list[i];
605 cmd->index = i;
606 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
607 }
608
609 return (DDI_SUCCESS);
610
611 mrsas_undo_cmds:
612 if (count > 0) {
613 /* free each cmd */
614 for (i = 0; i < count; i++) {
615 if (instance->cmd_list[i] != NULL) {
616 kmem_free(instance->cmd_list[i],
617 sizeof (struct mrsas_cmd));
618 }
619 instance->cmd_list[i] = NULL;
620 }
621 }
622
623 mrsas_undo_cmd_list:
624 if (instance->cmd_list != NULL)
625 kmem_free(instance->cmd_list, sz);
626 instance->cmd_list = NULL;
627
628 return (DDI_FAILURE);
629 }
630
631
632 /*
633 * free_space_for_mpi2
634 */
635 void
636 free_space_for_mpi2(struct mrsas_instance *instance)
637 {
638 /* already freed */
639 if (instance->cmd_list == NULL) {
640 return;
641 }
642
643 /* First free the additional DMA buffer */
644 mrsas_tbolt_free_additional_dma_buffer(instance);
645
647 free_req_rep_desc_pool(instance);
648
649 /* Free the MPI message pool */
650 destroy_mpi2_frame_pool(instance);
651
652 /* Free the MFI frame pool */
653 destroy_mfi_frame_pool(instance);
654
655 /* Free all the commands in the cmd_list */
656 /* Free the cmd_list buffer itself */
657 mrsas_free_cmd_pool(instance);
658 }
659
660
661 /*
662 * ThunderBolt(TB) memory allocations for commands/messages/frames.
663 */
664 int
665 alloc_space_for_mpi2(struct mrsas_instance *instance)
666 {
667 /* Allocate command pool (memory for cmd_list & individual commands) */
668 if (mrsas_alloc_cmd_pool_tbolt(instance)) {
669 cmn_err(CE_WARN, "Error creating cmd pool");
670 return (DDI_FAILURE);
671 }
672
673 /* Initialize single reply size and Message size */
674 instance->reply_size = MRSAS_THUNDERBOLT_REPLY_SIZE;
675 instance->raid_io_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
676
677 instance->max_sge_in_main_msg = (MRSAS_THUNDERBOLT_MSG_SIZE -
678 (sizeof (MPI2_RAID_SCSI_IO_REQUEST) -
679 sizeof (MPI2_SGE_IO_UNION)))/ sizeof (MPI2_SGE_IO_UNION);
680 instance->max_sge_in_chain = (MR_COMMAND_SIZE -
681 MRSAS_THUNDERBOLT_MSG_SIZE) / sizeof (MPI2_SGE_IO_UNION);
682
683 /* Reduce SG count by 1 to take care of group cmds feature in FW */
684 instance->max_num_sge = (instance->max_sge_in_main_msg +
685 instance->max_sge_in_chain - 2);
686 instance->chain_offset_mpt_msg =
687 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16;
747 mpi2_undo_message_pool:
748 destroy_mpi2_frame_pool(instance);
749
750 mpi2_undo_mfi_frame_pool:
751 destroy_mfi_frame_pool(instance);
752
753 mpi2_undo_descripter_pool:
754 free_req_rep_desc_pool(instance);
755
756 mpi2_undo_cmd_pool:
757 mrsas_free_cmd_pool(instance);
758
759 return (DDI_FAILURE);
760 }
761
762
763 /*
764 * mrsas_init_adapter_tbolt - Initialize fusion interface adapter.
765 */
766 int
767 mrsas_init_adapter_tbolt(struct mrsas_instance *instance)
768 {
769
770 /*
771 * Reduce the max supported cmds by 1. This is to ensure that the
772 * reply_q_sz (1 more than the max cmd that driver may send)
773 * does not exceed max cmds that the FW can support
774 */
775
776 if (instance->max_fw_cmds > 1008) {
777 instance->max_fw_cmds = 1008;
778 instance->max_fw_cmds = instance->max_fw_cmds-1;
779 }
780
781 con_log(CL_ANN, (CE_NOTE, "mrsas_init_adapter_tbolt: "
782 " instance->max_fw_cmds 0x%X.", instance->max_fw_cmds));
783
784
785 /* create a pool of commands */
786 if (alloc_space_for_mpi2(instance) != DDI_SUCCESS) {
787 cmn_err(CE_WARN,
788 " alloc_space_for_mpi2() failed.");
789
790 return (DDI_FAILURE);
791 }
792
793 /* Send ioc init message */
794 /* NOTE: the issue_init call does FMA checking already. */
795 if (mrsas_issue_init_mpi2(instance) != DDI_SUCCESS) {
796 cmn_err(CE_WARN,
797 " mrsas_issue_init_mpi2() failed.");
798
799 goto fail_init_fusion;
800 }
801
802 instance->unroll.alloc_space_mpi2 = 1;
803
804 con_log(CL_ANN, (CE_NOTE,
805 "mrsas_init_adapter_tbolt: SUCCESSFUL"));
806
807 return (DDI_SUCCESS);
808
809 fail_init_fusion:
810 free_space_for_mpi2(instance);
811
812 return (DDI_FAILURE);
813 }
814
815
816
817 /*
818 * init_mpi2
819 */
820 int
821 mrsas_issue_init_mpi2(struct mrsas_instance *instance)
822 {
823 dma_obj_t init2_dma_obj;
824 int ret_val = DDI_SUCCESS;
825
826 /* allocate DMA buffer for IOC INIT message */
827 init2_dma_obj.size = sizeof (Mpi2IOCInitRequest_t);
828 init2_dma_obj.dma_attr = mrsas_generic_dma_attr;
829 init2_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
830 init2_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
831 init2_dma_obj.dma_attr.dma_attr_sgllen = 1;
832 init2_dma_obj.dma_attr.dma_attr_align = 256;
833
834 if (mrsas_alloc_dma_obj(instance, &init2_dma_obj,
835 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
836 cmn_err(CE_WARN, "mr_sas_issue_init_mpi2 "
837 "could not allocate data transfer buffer.");
838 return (DDI_FAILURE);
839 }
840 (void) memset(init2_dma_obj.buffer, 2, sizeof (Mpi2IOCInitRequest_t));
841
842 con_log(CL_ANN1, (CE_NOTE,
843 "mrsas_issue_init_mpi2 _phys adr: %x",
844 init2_dma_obj.dma_cookie[0].dmac_address));
845
846
847 /* Initialize and send ioc init message */
848 ret_val = mrsas_tbolt_ioc_init(instance, &init2_dma_obj);
849 if (ret_val == DDI_FAILURE) {
850 con_log(CL_ANN1, (CE_WARN,
851 "mrsas_issue_init_mpi2: Failed"));
852 goto fail_init_mpi2;
853 }
854
855 /* free IOC init DMA buffer */
856 if (mrsas_free_dma_obj(instance, init2_dma_obj)
857 != DDI_SUCCESS) {
858 con_log(CL_ANN1, (CE_WARN,
859 "mrsas_issue_init_mpi2: Free Failed"));
860 return (DDI_FAILURE);
861 }
862
863 /* Get/Check and sync ld_map info */
864 instance->map_id = 0;
865 if (mrsas_tbolt_check_map_info(instance) == DDI_SUCCESS)
866 (void) mrsas_tbolt_sync_map_info(instance);
867
868
869 /* No mrsas_cmd to send, so send NULL. */
870 if (mrsas_common_check(instance, NULL) != DDI_SUCCESS)
871 goto fail_init_mpi2;
872
873 con_log(CL_ANN, (CE_NOTE,
874 "mrsas_issue_init_mpi2: SUCCESSFUL"));
875
876 return (DDI_SUCCESS);
877
878 fail_init_mpi2:
879 (void) mrsas_free_dma_obj(instance, init2_dma_obj);
880
881 return (DDI_FAILURE);
882 }
883
884 static int
885 mrsas_tbolt_ioc_init(struct mrsas_instance *instance, dma_obj_t *mpi2_dma_obj)
886 {
887 int numbytes;
888 uint16_t flags;
889 struct mrsas_init_frame2 *mfiFrameInit2;
890 struct mrsas_header *frame_hdr;
891 Mpi2IOCInitRequest_t *init;
892 struct mrsas_cmd *cmd = NULL;
893 struct mrsas_drv_ver drv_ver_info;
894 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
895
896 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
897
898
899 #ifdef DEBUG
900 con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
901 (int)sizeof (*mfiFrameInit2)));
902 con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n", (int)sizeof (*init)));
903 con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
904 (int)sizeof (struct mrsas_init_frame2)));
905 con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n",
906 (int)sizeof (Mpi2IOCInitRequest_t)));
907 #endif
908
909 init = (Mpi2IOCInitRequest_t *)mpi2_dma_obj->buffer;
910 numbytes = sizeof (*init);
911 bzero(init, numbytes);
912
913 ddi_put8(mpi2_dma_obj->acc_handle, &init->Function,
914 MPI2_FUNCTION_IOC_INIT);
915
916 ddi_put8(mpi2_dma_obj->acc_handle, &init->WhoInit,
917 MPI2_WHOINIT_HOST_DRIVER);
918
919 /* set MsgVersion and HeaderVersion host driver was built with */
920 ddi_put16(mpi2_dma_obj->acc_handle, &init->MsgVersion,
921 MPI2_VERSION);
922
923 ddi_put16(mpi2_dma_obj->acc_handle, &init->HeaderVersion,
924 MPI2_HEADER_VERSION);
925
926 ddi_put16(mpi2_dma_obj->acc_handle, &init->SystemRequestFrameSize,
944 ddi_put64(mpi2_dma_obj->acc_handle,
945 (uint64_t *)&init->SystemRequestFrameBaseAddress,
946 instance->io_request_frames_phy);
947
948 ddi_put64(mpi2_dma_obj->acc_handle,
949 &init->ReplyDescriptorPostQueueAddress,
950 instance->reply_frame_pool_phy);
951
952 ddi_put64(mpi2_dma_obj->acc_handle,
953 &init->ReplyFreeQueueAddress, 0);
954
955 cmd = instance->cmd_list[0];
956 if (cmd == NULL) {
957 return (DDI_FAILURE);
958 }
959 cmd->retry_count_for_ocr = 0;
960 cmd->pkt = NULL;
961 cmd->drv_pkt_time = 0;
962
963 mfiFrameInit2 = (struct mrsas_init_frame2 *)cmd->scsi_io_request;
964 con_log(CL_ANN1, (CE_CONT, "[mfi vaddr]%p", (void *)mfiFrameInit2));
965
966 frame_hdr = &cmd->frame->hdr;
967
968 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
969 MFI_CMD_STATUS_POLL_MODE);
970
971 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
972
973 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
974
975 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
976
977 con_log(CL_ANN, (CE_CONT,
978 "mrsas_tbolt_ioc_init: SMID:%x\n", cmd->SMID));
979
980 /* Init the MFI Header */
981 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
982 &mfiFrameInit2->cmd, MFI_CMD_OP_INIT);
983
984 con_log(CL_ANN1, (CE_CONT, "[CMD]%x", mfiFrameInit2->cmd));
985
986 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
987 &mfiFrameInit2->cmd_status,
988 MFI_STAT_INVALID_STATUS);
989
990 con_log(CL_ANN1, (CE_CONT, "[Status]%x", mfiFrameInit2->cmd_status));
991
992 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
993 &mfiFrameInit2->queue_info_new_phys_addr_lo,
994 mpi2_dma_obj->dma_cookie[0].dmac_address);
995
996 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
997 &mfiFrameInit2->data_xfer_len,
998 sizeof (Mpi2IOCInitRequest_t));
999
1000 con_log(CL_ANN1, (CE_CONT, "[reply q desc addr]%x",
1001 (int)init->ReplyDescriptorPostQueueAddress));
1002
1003 /* fill driver version information */
1004 fill_up_drv_ver(&drv_ver_info);
1005
1006 /* allocate the driver version data transfer buffer */
1007 instance->drv_ver_dma_obj.size = sizeof (drv_ver_info.drv_ver);
1008 instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
1009 instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1010 instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1011 instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
1012 instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
1013
1014 if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
1015 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
1016 cmn_err(CE_WARN,
1017 "fusion init: Could not allocate driver version buffer.");
1018 return (DDI_FAILURE);
1019 }
1020 /* copy driver version to dma buffer */
1021 bzero(instance->drv_ver_dma_obj.buffer, sizeof (drv_ver_info.drv_ver));
1022 ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
1023 (uint8_t *)drv_ver_info.drv_ver,
1024 (uint8_t *)instance->drv_ver_dma_obj.buffer,
1025 sizeof (drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
1026
1027 /* send driver version physical address to firmware */
1028 ddi_put64(cmd->frame_dma_obj.acc_handle, &mfiFrameInit2->driverversion,
1029 instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
1030
1031 con_log(CL_ANN1, (CE_CONT, "[MPIINIT2 frame Phys addr ]0x%x len = %x",
1032 mfiFrameInit2->queue_info_new_phys_addr_lo,
1033 (int)sizeof (Mpi2IOCInitRequest_t)));
1034
1035 con_log(CL_ANN1, (CE_CONT, "[Length]%x", mfiFrameInit2->data_xfer_len));
1036
1037 con_log(CL_ANN1, (CE_CONT, "[MFI frame Phys Address]%x len = %x",
1038 cmd->scsi_io_request_phys_addr,
1039 (int)sizeof (struct mrsas_init_frame2)));
1040
1041 /* disable interrupts before sending INIT2 frame */
1042 instance->func_ptr->disable_intr(instance);
1043
1044 req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
1045 instance->request_message_pool;
1046 req_desc->Words = cmd->scsi_io_request_phys_addr;
1047 req_desc->MFAIo.RequestFlags =
1048 (MPI2_REQ_DESCRIPT_FLAGS_MFA << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1049
1050 cmd->request_desc = req_desc;
1051
1052 /* issue the init frame */
1053 instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd);
1054
1055 con_log(CL_ANN1, (CE_CONT, "[cmd = %d] ", frame_hdr->cmd));
1056 con_log(CL_ANN1, (CE_CONT, "[cmd Status= %x] ",
1057 frame_hdr->cmd_status));
1058
1059 if (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1060 &mfiFrameInit2->cmd_status) == 0) {
1061 con_log(CL_ANN, (CE_NOTE, "INIT2 Success"));
1062 } else {
1063 con_log(CL_ANN, (CE_WARN, "INIT2 Fail"));
1064 mrsas_dump_reply_desc(instance);
1065 goto fail_ioc_init;
1066 }
1067
1068 mrsas_dump_reply_desc(instance);
1069
1070 instance->unroll.verBuff = 1;
1071
1072 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_ioc_init: SUCCESSFUL"));
1073
1074 return (DDI_SUCCESS);
1075
1076
1077 fail_ioc_init:
1078
1079 (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1080
1081 return (DDI_FAILURE);
1082 }
1083
1084 int
1085 wait_for_outstanding_poll_io(struct mrsas_instance *instance)
1086 {
1087 int i;
1088 uint32_t wait_time = dump_io_wait_time;
1089 for (i = 0; i < wait_time; i++) {
1090 /*
1091 * Check For Outstanding poll Commands
1092 * except ldsync command and aen command
1093 */
1094 if (instance->fw_outstanding <= 2) {
1095 break;
1096 }
1097 drv_usecwait(10*MILLISEC);
1098 /* complete commands from reply queue */
1099 (void) mr_sas_tbolt_process_outstanding_cmd(instance);
1100 }
1101 if (instance->fw_outstanding > 2) {
1102 return (1);
1103 }
1104 return (0);
1105 }
1106 /*
1107 * scsi_pkt handling
1108 *
1109 * Visible to the external world via the transport structure.
1110 */
1111
1112 int
1113 mrsas_tbolt_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1114 {
1115 struct mrsas_instance *instance = ADDR2MR(ap);
1116 struct scsa_cmd *acmd = PKT2CMD(pkt);
1117 struct mrsas_cmd *cmd = NULL;
1118 uchar_t cmd_done = 0;
1119
1120 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1121 if (instance->deadadapter == 1) {
1122 cmn_err(CE_WARN,
1123 "mrsas_tran_start:TBOLT return TRAN_FATAL_ERROR "
1124 "for IO, as the HBA doesnt take any more IOs");
1125 if (pkt) {
1126 pkt->pkt_reason = CMD_DEV_GONE;
1127 pkt->pkt_statistics = STAT_DISCON;
1128 }
1129 return (TRAN_FATAL_ERROR);
1130 }
1131 if (instance->adapterresetinprogress) {
1132 con_log(CL_ANN, (CE_NOTE, "Reset flag set, "
1133 "returning mfi_pkt and setting TRAN_BUSY\n"));
1134 return (TRAN_BUSY);
1135 }
1136 (void) mrsas_tbolt_prepare_pkt(acmd);
1137
1138 cmd = mrsas_tbolt_build_cmd(instance, ap, pkt, &cmd_done);
1139
1140 /*
1141 * Check if the command is already completed by the mrsas_build_cmd()
1142 * routine. In which case the busy_flag would be clear and scb will be
1143 * NULL and appropriate reason provided in pkt_reason field
1144 */
1145 if (cmd_done) {
1146 pkt->pkt_reason = CMD_CMPLT;
1147 pkt->pkt_scbp[0] = STATUS_GOOD;
1148 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1149 | STATE_SENT_CMD;
1150 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1151 (*pkt->pkt_comp)(pkt);
1152 }
1153
1154 return (TRAN_ACCEPT);
1155 }
1156
1157 if (cmd == NULL) {
1158 return (TRAN_BUSY);
1159 }
1160
1161
1162 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1163 if (instance->fw_outstanding > instance->max_fw_cmds) {
1164 cmn_err(CE_WARN,
1165 "Command Queue Full... Returning BUSY");
1166 return_raid_msg_pkt(instance, cmd);
1167 return (TRAN_BUSY);
1168 }
1169
1170 /* Synchronize the Cmd frame for the controller */
1171 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1172 DDI_DMA_SYNC_FORDEV);
1173
1174 con_log(CL_ANN, (CE_CONT, "tbolt_issue_cmd: SCSI CDB[0]=0x%x "
1175 "cmd->index:0x%x SMID 0x%x\n", pkt->pkt_cdbp[0],
1176 cmd->index, cmd->SMID));
1177
1178 instance->func_ptr->issue_cmd(cmd, instance);
1179 } else {
1180 instance->func_ptr->issue_cmd(cmd, instance);
1181 (void) wait_for_outstanding_poll_io(instance);
1182 (void) mrsas_common_check(instance, cmd);
1183 }
1184
1185 return (TRAN_ACCEPT);
1186 }
1187
1188 /*
1189 * prepare the pkt:
1190 * the pkt may have been resubmitted or just reused so
1191 * initialize some fields and do some checks.
1192 */
1193 static int
1194 mrsas_tbolt_prepare_pkt(struct scsa_cmd *acmd)
1195 {
1196 struct scsi_pkt *pkt = CMD2PKT(acmd);
1197
1198
1199 /*
1200 * Reinitialize some fields that need it; the packet may
1201 * have been resubmitted
1202 */
1203 pkt->pkt_reason = CMD_CMPLT;
1204 pkt->pkt_state = 0;
1205 pkt->pkt_statistics = 0;
1206 pkt->pkt_resid = 0;
1207
1208 /*
1209 * zero status byte.
1210 */
1211 *(pkt->pkt_scbp) = 0;
1212
1213 return (0);
1214 }
1215
1216
1217 int
1218 mr_sas_tbolt_build_sgl(struct mrsas_instance *instance,
1219 struct scsa_cmd *acmd,
1220 struct mrsas_cmd *cmd,
1221 Mpi2RaidSCSIIORequest_t *scsi_raid_io,
1222 uint32_t *datalen)
1223 {
1224 uint32_t MaxSGEs;
1225 int sg_to_process;
1226 uint32_t i, j;
1227 uint32_t numElements, endElement;
1228 Mpi25IeeeSgeChain64_t *ieeeChainElement = NULL;
1229 Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee = NULL;
1230 ddi_acc_handle_t acc_handle =
1231 instance->mpi2_frame_pool_dma_obj.acc_handle;
1232
1233 con_log(CL_ANN1, (CE_NOTE,
1234 "chkpnt: Building Chained SGL :%d", __LINE__));
1235
1236 /* Calulate SGE size in number of Words(32bit) */
1237 /* Clear the datalen before updating it. */
1238 *datalen = 0;
1239
1240 MaxSGEs = instance->max_sge_in_main_msg;
1241
1242 ddi_put16(acc_handle, &scsi_raid_io->SGLFlags,
1243 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
1244
1245 /* set data transfer flag. */
1246 if (acmd->cmd_flags & CFLAG_DMASEND) {
1247 ddi_put32(acc_handle, &scsi_raid_io->Control,
1248 MPI2_SCSIIO_CONTROL_WRITE);
1249 } else {
1250 ddi_put32(acc_handle, &scsi_raid_io->Control,
1251 MPI2_SCSIIO_CONTROL_READ);
1252 }
1253
1254
1255 numElements = acmd->cmd_cookiecnt;
1256
1257 con_log(CL_DLEVEL1, (CE_NOTE, "[SGE Count]:%x", numElements));
1258
1259 if (numElements > instance->max_num_sge) {
1260 con_log(CL_ANN, (CE_NOTE,
1261 "[Max SGE Count Exceeded]:%x", numElements));
1262 return (numElements);
1263 }
1264
1265 ddi_put8(acc_handle, &scsi_raid_io->RaidContext.numSGE,
1266 (uint8_t)numElements);
1267
1268 /* set end element in main message frame */
1269 endElement = (numElements <= MaxSGEs) ? numElements : (MaxSGEs - 1);
1270
1271 /* prepare the scatter-gather list for the firmware */
1272 scsi_raid_io_sgl_ieee =
1273 (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
1274
1275 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1276 Mpi25IeeeSgeChain64_t *sgl_ptr_end = scsi_raid_io_sgl_ieee;
1277 sgl_ptr_end += instance->max_sge_in_main_msg - 1;
1278
1279 ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
1280 }
1281
1282 for (i = 0; i < endElement; i++, scsi_raid_io_sgl_ieee++) {
1283 ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1284 acmd->cmd_dmacookies[i].dmac_laddress);
1285
1286 ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1287 acmd->cmd_dmacookies[i].dmac_size);
1288
1289 ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1290
1291 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1292 if (i == (numElements - 1)) {
1293 ddi_put8(acc_handle,
1294 &scsi_raid_io_sgl_ieee->Flags,
1295 IEEE_SGE_FLAGS_END_OF_LIST);
1296 }
1297 }
1298
1299 *datalen += acmd->cmd_dmacookies[i].dmac_size;
1300
1301 #ifdef DEBUG
1302 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Address]: %" PRIx64,
1303 scsi_raid_io_sgl_ieee->Address));
1304 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Length]:%x",
1305 scsi_raid_io_sgl_ieee->Length));
1306 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Flags]:%x",
1307 scsi_raid_io_sgl_ieee->Flags));
1308 #endif
1309
1310 }
1311
1312 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset, 0);
1313
1314 /* check if chained SGL required */
1315 if (i < numElements) {
1316
1317 con_log(CL_ANN1, (CE_NOTE, "[Chain Element index]:%x", i));
1318
1319 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1320 uint16_t ioFlags =
1321 ddi_get16(acc_handle, &scsi_raid_io->IoFlags);
1322
1323 if ((ioFlags &
1324 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
1325 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) {
1326 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1327 (U8)instance->chain_offset_io_req);
1328 } else {
1329 ddi_put8(acc_handle,
1330 &scsi_raid_io->ChainOffset, 0);
1331 }
1332 } else {
1333 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1334 (U8)instance->chain_offset_io_req);
1335 }
1336
1337 /* prepare physical chain element */
1338 ieeeChainElement = scsi_raid_io_sgl_ieee;
1339
1340 ddi_put8(acc_handle, &ieeeChainElement->NextChainOffset, 0);
1341
1342 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1343 ddi_put8(acc_handle, &ieeeChainElement->Flags,
1344 IEEE_SGE_FLAGS_CHAIN_ELEMENT);
1345 } else {
1346 ddi_put8(acc_handle, &ieeeChainElement->Flags,
1347 (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1348 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
1349 }
1350
1351 ddi_put32(acc_handle, &ieeeChainElement->Length,
1352 (sizeof (MPI2_SGE_IO_UNION) * (numElements - i)));
1353
1354 ddi_put64(acc_handle, &ieeeChainElement->Address,
1355 (U64)cmd->sgl_phys_addr);
1356
1357 sg_to_process = numElements - i;
1358
1359 con_log(CL_ANN1, (CE_NOTE,
1360 "[Additional SGE Count]:%x", endElement));
1361
1362 /* point to the chained SGL buffer */
1363 scsi_raid_io_sgl_ieee = (Mpi25IeeeSgeChain64_t *)cmd->sgl;
1364
1365 /* build rest of the SGL in chained buffer */
1366 for (j = 0; j < sg_to_process; j++, scsi_raid_io_sgl_ieee++) {
1367 con_log(CL_DLEVEL3, (CE_NOTE, "[remaining SGL]:%x", i));
1368
1369 ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1370 acmd->cmd_dmacookies[i].dmac_laddress);
1371
1372 ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1373 acmd->cmd_dmacookies[i].dmac_size);
1374
1375 ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1376
1377 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1378 if (i == (numElements - 1)) {
1379 ddi_put8(acc_handle,
1380 &scsi_raid_io_sgl_ieee->Flags,
1381 IEEE_SGE_FLAGS_END_OF_LIST);
1382 }
1383 }
1384
1385 *datalen += acmd->cmd_dmacookies[i].dmac_size;
1386
1387 #if DEBUG
1388 con_log(CL_DLEVEL1, (CE_NOTE,
1389 "[SGL Address]: %" PRIx64,
1390 scsi_raid_io_sgl_ieee->Address));
1391 con_log(CL_DLEVEL1, (CE_NOTE,
1392 "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
1393 con_log(CL_DLEVEL1, (CE_NOTE,
1394 "[SGL Flags]:%x", scsi_raid_io_sgl_ieee->Flags));
1395 #endif
1396
1397 i++;
1398 }
1399 }
1400
1401 return (0);
1402 } /*end of BuildScatterGather */
1403
1404
1405 /*
1406 * build_cmd
1407 */
1408 static struct mrsas_cmd *
1409 mrsas_tbolt_build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
1410 struct scsi_pkt *pkt, uchar_t *cmd_done)
1411 {
1412 uint8_t fp_possible = 0;
1413 uint32_t index;
1414 uint32_t lba_count = 0;
1415 uint32_t start_lba_hi = 0;
1416 uint32_t start_lba_lo = 0;
1417 ddi_acc_handle_t acc_handle =
1418 instance->mpi2_frame_pool_dma_obj.acc_handle;
1419 struct mrsas_cmd *cmd = NULL;
1420 struct scsa_cmd *acmd = PKT2CMD(pkt);
1421 MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
1422 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
1423 uint32_t datalen;
1424 struct IO_REQUEST_INFO io_info;
1425 MR_FW_RAID_MAP_ALL *local_map_ptr;
1426 uint16_t pd_cmd_cdblen;
1427
1428 con_log(CL_DLEVEL1, (CE_NOTE,
1429 "chkpnt: Entered mrsas_tbolt_build_cmd:%d", __LINE__));
1430
1431 /* find out if this is logical or physical drive command. */
1432 acmd->islogical = MRDRV_IS_LOGICAL(ap);
1433 acmd->device_id = MAP_DEVICE_ID(instance, ap);
1434
1435 *cmd_done = 0;
1436
1437 /* get the command packet */
1438 if (!(cmd = get_raid_msg_pkt(instance))) {
1439 return (NULL);
1440 }
1441
1442 index = cmd->index;
1443 ReqDescUnion = mr_sas_get_request_descriptor(instance, index);
1444 ReqDescUnion->Words = 0;
1445 ReqDescUnion->SCSIIO.SMID = cmd->SMID;
1446 ReqDescUnion->SCSIIO.RequestFlags =
1447 (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1448 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1449
1450
1451 cmd->request_desc = ReqDescUnion;
1452 cmd->pkt = pkt;
1453 cmd->cmd = acmd;
1454
1455 /* lets get the command directions */
1456 if (acmd->cmd_flags & CFLAG_DMASEND) {
1457 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1458 (void) ddi_dma_sync(acmd->cmd_dmahandle,
1459 acmd->cmd_dma_offset, acmd->cmd_dma_len,
1460 DDI_DMA_SYNC_FORDEV);
1461 }
1462 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
1463 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1464 (void) ddi_dma_sync(acmd->cmd_dmahandle,
1465 acmd->cmd_dma_offset, acmd->cmd_dma_len,
1466 DDI_DMA_SYNC_FORCPU);
1467 }
1468 } else {
1469 con_log(CL_ANN, (CE_NOTE, "NO DMA"));
1470 }
1471
1472
1473 /* get SCSI_IO raid message frame pointer */
1474 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
1475
1476 /* zero out SCSI_IO raid message frame */
1477 bzero(scsi_raid_io, sizeof (Mpi2RaidSCSIIORequest_t));
1478
1479 /* Set the ldTargetId set by BuildRaidContext() */
1480 ddi_put16(acc_handle, &scsi_raid_io->RaidContext.ldTargetId,
1481 acmd->device_id);
1482
1483 /* Copy CDB to scsi_io_request message frame */
1484 ddi_rep_put8(acc_handle,
1485 (uint8_t *)pkt->pkt_cdbp, (uint8_t *)scsi_raid_io->CDB.CDB32,
1486 acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
1487
1488 /*
1489 * Just the CDB length, rest of the Flags are zero
1490 * This will be modified later.
1491 */
1492 ddi_put16(acc_handle, &scsi_raid_io->IoFlags, acmd->cmd_cdblen);
1493
1494 pd_cmd_cdblen = acmd->cmd_cdblen;
1495
1496 switch (pkt->pkt_cdbp[0]) {
1497 case SCMD_READ:
1498 case SCMD_WRITE:
1499 case SCMD_READ_G1:
1500 case SCMD_WRITE_G1:
1501 case SCMD_READ_G4:
1502 case SCMD_WRITE_G4:
1503 case SCMD_READ_G5:
1504 case SCMD_WRITE_G5:
1505
1506 if (acmd->islogical) {
1507 /* Initialize sense Information */
1508 if (cmd->sense1 == NULL) {
1509 con_log(CL_ANN, (CE_NOTE, "tbolt_build_cmd: "
1510 "Sense buffer ptr NULL "));
1511 }
1512 bzero(cmd->sense1, SENSE_LENGTH);
1513 con_log(CL_DLEVEL2, (CE_NOTE, "tbolt_build_cmd "
1514 "CDB[0] = %x\n", pkt->pkt_cdbp[0]));
1515
1516 if (acmd->cmd_cdblen == CDB_GROUP0) {
1517 /* 6-byte cdb */
1518 lba_count = (uint16_t)(pkt->pkt_cdbp[4]);
1519 start_lba_lo = ((uint32_t)(pkt->pkt_cdbp[3]) |
1520 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
1521 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
1522 << 16));
1523 } else if (acmd->cmd_cdblen == CDB_GROUP1) {
1524 /* 10-byte cdb */
1525 lba_count =
1526 (((uint16_t)(pkt->pkt_cdbp[8])) |
1527 ((uint16_t)(pkt->pkt_cdbp[7]) << 8));
1528
1529 start_lba_lo =
1530 (((uint32_t)(pkt->pkt_cdbp[5])) |
1531 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1532 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1533 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1534
1535 } else if (acmd->cmd_cdblen == CDB_GROUP5) {
1536 /* 12-byte cdb */
1537 lba_count = (
1538 ((uint32_t)(pkt->pkt_cdbp[9])) |
1539 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1540 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1541 ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1542
1543 start_lba_lo =
1544 (((uint32_t)(pkt->pkt_cdbp[5])) |
1545 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1546 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1547 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1548
1549 } else if (acmd->cmd_cdblen == CDB_GROUP4) {
1550 /* 16-byte cdb */
1551 lba_count = (
1552 ((uint32_t)(pkt->pkt_cdbp[13])) |
1553 ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
1554 ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
1555 ((uint32_t)(pkt->pkt_cdbp[10]) << 24));
1556
1557 start_lba_lo = (
1558 ((uint32_t)(pkt->pkt_cdbp[9])) |
1559 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1560 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1561 ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1562
1563 start_lba_hi = (
1564 ((uint32_t)(pkt->pkt_cdbp[5])) |
1565 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1566 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1567 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1568 }
1569
1570 if (instance->tbolt &&
1571 ((lba_count * 512) > mrsas_tbolt_max_cap_maxxfer)) {
1572 cmn_err(CE_WARN, " IO SECTOR COUNT exceeds "
1573 "controller limit 0x%x sectors",
1574 lba_count);
1575 }
1576
1577 bzero(&io_info, sizeof (struct IO_REQUEST_INFO));
1578 io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) |
1579 start_lba_lo;
1580 io_info.numBlocks = lba_count;
1581 io_info.ldTgtId = acmd->device_id;
1582
1583 if (acmd->cmd_flags & CFLAG_DMASEND)
1584 io_info.isRead = 0;
1585 else
1586 io_info.isRead = 1;
1587
1588
1589 /* Acquire SYNC MAP UPDATE lock */
1590 mutex_enter(&instance->sync_map_mtx);
1591
1592 local_map_ptr =
1593 instance->ld_map[(instance->map_id & 1)];
1594
1595 if ((MR_TargetIdToLdGet(
1596 acmd->device_id, local_map_ptr) >=
1597 MAX_LOGICAL_DRIVES) || !instance->fast_path_io) {
1598 cmn_err(CE_NOTE, "Fast Path NOT Possible, "
1599 "targetId >= MAX_LOGICAL_DRIVES || "
1600 "!instance->fast_path_io");
1601 fp_possible = 0;
1602 /* Set Regionlock flags to BYPASS */
1603 /* io_request->RaidContext.regLockFlags = 0; */
1604 ddi_put8(acc_handle,
1605 &scsi_raid_io->RaidContext.regLockFlags, 0);
1606 } else {
1607 if (MR_BuildRaidContext(instance, &io_info,
1608 &scsi_raid_io->RaidContext, local_map_ptr))
1609 fp_possible = io_info.fpOkForIo;
1610 }
1611
1612 if (!enable_fp)
1613 fp_possible = 0;
1614
1615 con_log(CL_ANN1, (CE_NOTE, "enable_fp %d "
1616 "instance->fast_path_io %d fp_possible %d",
1617 enable_fp, instance->fast_path_io, fp_possible));
1618
1619 if (fp_possible) {
1620
1621 /* Check for DIF enabled LD */
1622 if (MR_CheckDIF(acmd->device_id, local_map_ptr)) {
1623 /* Prepare 32 Byte CDB for DIF capable Disk */
1624 mrsas_tbolt_prepare_cdb(instance,
1625 scsi_raid_io->CDB.CDB32,
1626 &io_info, scsi_raid_io, start_lba_lo);
1627 } else {
1628 mrsas_tbolt_set_pd_lba(scsi_raid_io->CDB.CDB32,
1629 (uint8_t *)&pd_cmd_cdblen,
1630 io_info.pdBlock, io_info.numBlocks);
1631 ddi_put16(acc_handle,
1632 &scsi_raid_io->IoFlags, pd_cmd_cdblen);
1633 }
1634
1635 ddi_put8(acc_handle, &scsi_raid_io->Function,
1636 MPI2_FUNCTION_SCSI_IO_REQUEST);
1637
1638 ReqDescUnion->SCSIIO.RequestFlags =
1639 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1640 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1641
1642 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1643 uint8_t regLockFlags = ddi_get8(acc_handle,
1644 &scsi_raid_io->RaidContext.regLockFlags);
1645 uint16_t IoFlags = ddi_get16(acc_handle,
1646 &scsi_raid_io->IoFlags);
1647
1648 if (regLockFlags == REGION_TYPE_UNUSED)
1649 ReqDescUnion->SCSIIO.RequestFlags =
1650 (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1651 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1652
1653 IoFlags |=
1654 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1655 regLockFlags |=
1656 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
1657 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1658
1659 ddi_put8(acc_handle,
1660 &scsi_raid_io->ChainOffset, 0);
1661 ddi_put8(acc_handle,
1662 &scsi_raid_io->RaidContext.nsegType,
1663 ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1664 MPI2_TYPE_CUDA));
1665 ddi_put8(acc_handle,
1666 &scsi_raid_io->RaidContext.regLockFlags,
1667 regLockFlags);
1668 ddi_put16(acc_handle,
1669 &scsi_raid_io->IoFlags, IoFlags);
1670 }
1671
1672 if ((instance->load_balance_info[
1673 acmd->device_id].loadBalanceFlag) &&
1674 (io_info.isRead)) {
1675 io_info.devHandle =
1676 get_updated_dev_handle(&instance->
1677 load_balance_info[acmd->device_id],
1678 &io_info);
1679 cmd->load_balance_flag |=
1680 MEGASAS_LOAD_BALANCE_FLAG;
1681 } else {
1682 cmd->load_balance_flag &=
1683 ~MEGASAS_LOAD_BALANCE_FLAG;
1684 }
1685
1686 ReqDescUnion->SCSIIO.DevHandle = io_info.devHandle;
1687 ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1688 io_info.devHandle);
1689
1690 } else {
1691 ddi_put8(acc_handle, &scsi_raid_io->Function,
1692 MPI2_FUNCTION_LD_IO_REQUEST);
1693
1694 ddi_put16(acc_handle,
1695 &scsi_raid_io->DevHandle, acmd->device_id);
1696
1697 ReqDescUnion->SCSIIO.RequestFlags =
1698 (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1699 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1700
1701 ddi_put16(acc_handle,
1702 &scsi_raid_io->RaidContext.timeoutValue,
1703 local_map_ptr->raidMap.fpPdIoTimeoutSec);
1704
1705 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1706 uint8_t regLockFlags = ddi_get8(acc_handle,
1707 &scsi_raid_io->RaidContext.regLockFlags);
1708
1709 if (regLockFlags == REGION_TYPE_UNUSED) {
1710 ReqDescUnion->SCSIIO.RequestFlags =
1711 (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1712 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1713 }
1714
1715 regLockFlags |=
1716 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
1717 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1718
1719 ddi_put8(acc_handle,
1720 &scsi_raid_io->RaidContext.nsegType,
1721 ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1722 MPI2_TYPE_CUDA));
1723 ddi_put8(acc_handle,
1724 &scsi_raid_io->RaidContext.regLockFlags,
1725 regLockFlags);
1726 }
1727 } /* Not FP */
1728
1729 /* Release SYNC MAP UPDATE lock */
1730 mutex_exit(&instance->sync_map_mtx);
1731
1732
1733 /*
1734 * Set sense buffer physical address/length in scsi_io_request.
1735 */
1736 ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
1737 cmd->sense_phys_addr1);
1738 ddi_put8(acc_handle, &scsi_raid_io->SenseBufferLength,
1739 SENSE_LENGTH);
1740
1741 /* Construct SGL */
1742 ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
1743 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1744
1745 (void) mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1746 scsi_raid_io, &datalen);
1747
1748 ddi_put32(acc_handle, &scsi_raid_io->DataLength, datalen);
1749
1750 break;
1751 #ifndef PDSUPPORT /* if PDSUPPORT, skip break and fall through */
1752 } else {
1753 break;
1754 #endif
1755 }
1756 /* fall through For all non-rd/wr cmds */
1757 default:
1758 switch (pkt->pkt_cdbp[0]) {
1759 case 0x35: { /* SCMD_SYNCHRONIZE_CACHE */
1760 return_raid_msg_pkt(instance, cmd);
1761 *cmd_done = 1;
1762 return (NULL);
1763 }
1764
1765 case SCMD_MODE_SENSE:
1766 case SCMD_MODE_SENSE_G1: {
1767 union scsi_cdb *cdbp;
1768 uint16_t page_code;
1769
1770 cdbp = (void *)pkt->pkt_cdbp;
1771 page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
1772 switch (page_code) {
1773 case 0x3:
1774 case 0x4:
1775 (void) mrsas_mode_sense_build(pkt);
1776 return_raid_msg_pkt(instance, cmd);
1777 *cmd_done = 1;
1778 return (NULL);
1779 }
1780 break;
1781 }
1782
1783 default: {
1784 /*
1785 * Here we need to handle PASSTHRU for
1786 * Logical Devices. Like Inquiry etc.
1787 */
1788
1789 if (!(acmd->islogical)) {
1790
1791 /* Acquire SYNC MAP UPDATE lock */
1792 mutex_enter(&instance->sync_map_mtx);
1793
1794 local_map_ptr =
1795 instance->ld_map[(instance->map_id & 1)];
1796
1797 ddi_put8(acc_handle, &scsi_raid_io->Function,
1798 MPI2_FUNCTION_SCSI_IO_REQUEST);
1799
1800 ReqDescUnion->SCSIIO.RequestFlags =
1801 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1802 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1803
1804 ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1805 local_map_ptr->raidMap.
1806 devHndlInfo[acmd->device_id].curDevHdl);
1807
1808
1809 /* Set regLockFlasgs to REGION_TYPE_BYPASS */
1810 ddi_put8(acc_handle,
1811 &scsi_raid_io->RaidContext.regLockFlags, 0);
1812 ddi_put64(acc_handle,
1813 &scsi_raid_io->RaidContext.regLockRowLBA,
1814 0);
1815 ddi_put32(acc_handle,
1816 &scsi_raid_io->RaidContext.regLockLength,
1817 0);
1818 ddi_put8(acc_handle,
1819 &scsi_raid_io->RaidContext.RAIDFlags,
1820 MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD <<
1821 MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
1822 ddi_put16(acc_handle,
1823 &scsi_raid_io->RaidContext.timeoutValue,
1824 local_map_ptr->raidMap.fpPdIoTimeoutSec);
1825 ddi_put16(acc_handle,
1826 &scsi_raid_io->RaidContext.ldTargetId,
1827 acmd->device_id);
1828 ddi_put8(acc_handle,
1829 &scsi_raid_io->LUN[1], acmd->lun);
1830
1831 /* Release SYNC MAP UPDATE lock */
1832 mutex_exit(&instance->sync_map_mtx);
1833
1834 } else {
1835 ddi_put8(acc_handle, &scsi_raid_io->Function,
1836 MPI2_FUNCTION_LD_IO_REQUEST);
1837 ddi_put8(acc_handle,
1838 &scsi_raid_io->LUN[1], acmd->lun);
1839 ddi_put16(acc_handle,
1840 &scsi_raid_io->DevHandle, acmd->device_id);
1841 ReqDescUnion->SCSIIO.RequestFlags =
1842 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1843 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1844 }
1845
1846 /*
1847 * Set sense buffer physical address/length in
1848 * scsi_io_request.
1849 */
1850 ddi_put32(acc_handle,
1851 &scsi_raid_io->SenseBufferLowAddress,
1852 cmd->sense_phys_addr1);
1853 ddi_put8(acc_handle,
1854 &scsi_raid_io->SenseBufferLength, SENSE_LENGTH);
1855
1856 /* Construct SGL */
1857 ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
1858 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1859
1860 (void) mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1861 scsi_raid_io, &datalen);
1862
1863 ddi_put32(acc_handle,
1864 &scsi_raid_io->DataLength, datalen);
1865
1866
1867 con_log(CL_ANN, (CE_CONT,
1868 "tbolt_build_cmd CDB[0] =%x, TargetID =%x\n",
1869 pkt->pkt_cdbp[0], acmd->device_id));
1870 con_log(CL_DLEVEL1, (CE_CONT,
1871 "data length = %x\n",
1872 scsi_raid_io->DataLength));
1873 con_log(CL_DLEVEL1, (CE_CONT,
1874 "cdb length = %x\n",
1875 acmd->cmd_cdblen));
1876 }
1877 break;
1878 }
1879
1880 }
1881
1882 return (cmd);
1883 }
1884
1885 /*
1886 * mrsas_tbolt_tran_init_pkt - allocate & initialize a scsi_pkt structure
1887 * @ap:
1888 * @pkt:
1889 * @bp:
1890 * @cmdlen:
1891 * @statuslen:
1892 * @tgtlen:
1893 * @flags:
1894 * @callback:
1895 *
1896 * The tran_init_pkt() entry point allocates and initializes a scsi_pkt
1897 * structure and DMA resources for a target driver request. The
1898 * tran_init_pkt() entry point is called when the target driver calls the
1899 * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point
1900 * is a request to perform one or more of three possible services:
1969 }
1970 }
1971 return (pkt);
1972 }
1973
1974
1975 uint32_t
1976 tbolt_read_fw_status_reg(struct mrsas_instance *instance)
1977 {
1978 return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
1979 }
1980
1981 void
1982 tbolt_issue_cmd(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
1983 {
1984 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1985 atomic_add_16(&instance->fw_outstanding, 1);
1986
1987 struct scsi_pkt *pkt;
1988
1989 con_log(CL_ANN1,
1990 (CE_NOTE, "tbolt_issue_cmd: cmd->[SMID]=0x%X", cmd->SMID));
1991
1992 con_log(CL_DLEVEL1, (CE_CONT,
1993 " [req desc Words] %" PRIx64 " \n", req_desc->Words));
1994 con_log(CL_DLEVEL1, (CE_CONT,
1995 " [req desc low part] %x \n",
1996 (uint_t)(req_desc->Words & 0xffffffffff)));
1997 con_log(CL_DLEVEL1, (CE_CONT,
1998 " [req desc high part] %x \n", (uint_t)(req_desc->Words >> 32)));
1999 pkt = cmd->pkt;
2000
2001 if (pkt) {
2002 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
2003 "ISSUED CMD TO FW : called : cmd:"
2004 ": %p instance : %p pkt : %p pkt_time : %x\n",
2005 gethrtime(), (void *)cmd, (void *)instance,
2006 (void *)pkt, cmd->drv_pkt_time));
2007 if (instance->adapterresetinprogress) {
2008 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2009 con_log(CL_ANN, (CE_NOTE,
2010 "TBOLT Reset the scsi_pkt timer"));
2011 } else {
2012 push_pending_mfi_pkt(instance, cmd);
2013 }
2014
2015 } else {
2016 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
2017 "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
2018 "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
2022 mutex_enter(&instance->reg_write_mtx);
2023 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2024 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2025 mutex_exit(&instance->reg_write_mtx);
2026 }
2027
2028 /*
2029 * issue_cmd_in_sync_mode
2030 */
2031 int
2032 tbolt_issue_cmd_in_sync_mode(struct mrsas_instance *instance,
2033 struct mrsas_cmd *cmd)
2034 {
2035 int i;
2036 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
2037 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
2038
2039 struct mrsas_header *hdr;
2040 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2041
2042 con_log(CL_ANN,
2043 (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: cmd->[SMID]=0x%X",
2044 cmd->SMID));
2045
2046
2047 if (instance->adapterresetinprogress) {
2048 cmd->drv_pkt_time = ddi_get16
2049 (cmd->frame_dma_obj.acc_handle, &hdr->timeout);
2050 if (cmd->drv_pkt_time < debug_timeout_g)
2051 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2052 con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode:"
2053 "RESET-IN-PROGRESS, issue cmd & return."));
2054
2055 mutex_enter(&instance->reg_write_mtx);
2056 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2057 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2058 mutex_exit(&instance->reg_write_mtx);
2059
2060 return (DDI_SUCCESS);
2061 } else {
2062 con_log(CL_ANN1, (CE_NOTE,
2063 "tbolt_issue_cmd_in_sync_mode: pushing the pkt"));
2064 push_pending_mfi_pkt(instance, cmd);
2065 }
2066
2067 con_log(CL_DLEVEL2, (CE_NOTE,
2068 "HighQport offset :%p",
2069 (void *)((uintptr_t)(instance)->regmap + IB_HIGH_QPORT)));
2070 con_log(CL_DLEVEL2, (CE_NOTE,
2071 "LowQport offset :%p",
2072 (void *)((uintptr_t)(instance)->regmap + IB_LOW_QPORT)));
2073
2074 cmd->sync_cmd = MRSAS_TRUE;
2075 cmd->cmd_status = ENODATA;
2076
2077
2078 mutex_enter(&instance->reg_write_mtx);
2079 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2080 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2081 mutex_exit(&instance->reg_write_mtx);
2082
2083 con_log(CL_ANN1, (CE_NOTE,
2084 " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
2085 con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
2086 (uint_t)(req_desc->Words & 0xffffffff)));
2087
2088 mutex_enter(&instance->int_cmd_mtx);
2089 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
2090 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
2091 }
2092 mutex_exit(&instance->int_cmd_mtx);
2093
2094
2095 if (i < (msecs -1)) {
2096 return (DDI_SUCCESS);
2097 } else {
2098 return (DDI_FAILURE);
2099 }
2100 }
2101
2102 /*
2103 * issue_cmd_in_poll_mode
2104 */
2105 int
2106 tbolt_issue_cmd_in_poll_mode(struct mrsas_instance *instance,
2107 struct mrsas_cmd *cmd)
2108 {
2109 int i;
2110 uint16_t flags;
2111 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
2112 struct mrsas_header *frame_hdr;
2113
2114 con_log(CL_ANN,
2115 (CE_NOTE, "tbolt_issue_cmd_in_poll_mode: cmd->[SMID]=0x%X",
2116 cmd->SMID));
2117
2118 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
2119
2120 frame_hdr = (struct mrsas_header *)&cmd->frame->hdr;
2121 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
2122 MFI_CMD_STATUS_POLL_MODE);
2123 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
2124 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2125 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
2126
2127 con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
2128 (uint_t)(req_desc->Words & 0xffffffff)));
2129 con_log(CL_ANN1, (CE_NOTE,
2130 " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
2131
2132 /* issue the frame using inbound queue port */
2133 mutex_enter(&instance->reg_write_mtx);
2134 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2135 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2136 mutex_exit(&instance->reg_write_mtx);
2137
2138 for (i = 0; i < msecs && (
2139 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
2140 == MFI_CMD_STATUS_POLL_MODE); i++) {
2141 /* wait for cmd_status to change from 0xFF */
2142 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
2143 }
2144
2145 if (ddi_get8(cmd->frame_dma_obj.acc_handle,
2146 &frame_hdr->cmd_status) == MFI_CMD_STATUS_POLL_MODE) {
2147 con_log(CL_ANN1, (CE_NOTE,
2148 " cmd failed %" PRIx64, (req_desc->Words)));
2149 return (DDI_FAILURE);
2150 }
2151
2152 return (DDI_SUCCESS);
2153 }
2154
2155 void
2156 tbolt_enable_intr(struct mrsas_instance *instance)
2157 {
2158 /* TODO: For Thunderbolt/Invader also clear intr on enable */
2159 /* writel(~0, ®s->outbound_intr_status); */
2160 /* readl(®s->outbound_intr_status); */
2161
2162 WR_OB_INTR_MASK(~(MFI_FUSION_ENABLE_INTERRUPT_MASK), instance);
2163
2164 /* dummy read to force PCI flush */
2165 (void) RD_OB_INTR_MASK(instance);
2166
2167 }
2168
2169 void
2170 tbolt_disable_intr(struct mrsas_instance *instance)
2171 {
2172 uint32_t mask = 0xFFFFFFFF;
2173
2174 WR_OB_INTR_MASK(mask, instance);
2175
2176 /* Dummy readl to force pci flush */
2177
2178 (void) RD_OB_INTR_MASK(instance);
2179 }
2180
2181
2182 int
2183 tbolt_intr_ack(struct mrsas_instance *instance)
2184 {
2185 uint32_t status;
2186
2187 /* check if it is our interrupt */
2188 status = RD_OB_INTR_STATUS(instance);
2189 con_log(CL_ANN1, (CE_NOTE,
2190 "chkpnt: Entered tbolt_intr_ack status = %d", status));
2191
2192 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2193 return (DDI_INTR_UNCLAIMED);
2194 }
2195
2196 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
2197 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2198 return (DDI_INTR_UNCLAIMED);
2199 }
2200
2201 if ((status & 1) || (status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2202 /* clear the interrupt by writing back the same value */
2203 WR_OB_INTR_STATUS(status, instance);
2204 /* dummy READ */
2205 (void) RD_OB_INTR_STATUS(instance);
2206 }
2207 return (DDI_INTR_CLAIMED);
2208 }
2209
2210 /*
2211 * get_raid_msg_pkt : Get a command from the free pool
2212 * After successful allocation, the caller of this routine
2213 * must clear the frame buffer (memset to zero) before
2214 * using the packet further.
2215 *
2216 * ***** Note *****
2217 * After clearing the frame buffer the context id of the
2218 * frame buffer SHOULD be restored back.
2219 */
2220
2221 struct mrsas_cmd *
2222 get_raid_msg_pkt(struct mrsas_instance *instance)
2223 {
2224 mlist_t *head = &instance->cmd_pool_list;
2225 struct mrsas_cmd *cmd = NULL;
2294 void
2295 return_raid_msg_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2296 {
2297 mutex_enter(&instance->cmd_app_pool_mtx);
2298 ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2299
2300 mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
2301
2302 mutex_exit(&instance->cmd_app_pool_mtx);
2303 }
2304
2305
2306 void
2307 mr_sas_tbolt_build_mfi_cmd(struct mrsas_instance *instance,
2308 struct mrsas_cmd *cmd)
2309 {
2310 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2311 Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee;
2312 MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
2313 uint32_t index;
2314 ddi_acc_handle_t acc_handle =
2315 instance->mpi2_frame_pool_dma_obj.acc_handle;
2316
2317 if (!instance->tbolt) {
2318 con_log(CL_ANN, (CE_NOTE, "Not MFA enabled."));
2319 return;
2320 }
2321
2322 index = cmd->index;
2323
2324 ReqDescUnion = mr_sas_get_request_descriptor(instance, index);
2325
2326 if (!ReqDescUnion) {
2327 con_log(CL_ANN1, (CE_NOTE, "[NULL REQDESC]"));
2328 return;
2329 }
2330
2331 con_log(CL_ANN1, (CE_NOTE, "[SMID]%x", cmd->SMID));
2332
2333 ReqDescUnion->Words = 0;
2334
2335 ReqDescUnion->SCSIIO.RequestFlags =
2336 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2337 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2338
2339 ReqDescUnion->SCSIIO.SMID = cmd->SMID;
2340
2341 cmd->request_desc = ReqDescUnion;
2342
2343 /* get raid message frame pointer */
2344 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2345
2346 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
2347 Mpi25IeeeSgeChain64_t *sgl_ptr_end = (Mpi25IeeeSgeChain64_t *)
2348 &scsi_raid_io->SGL.IeeeChain;
2349 sgl_ptr_end += instance->max_sge_in_main_msg - 1;
2350 ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
2351 }
2352
2353 ddi_put8(acc_handle, &scsi_raid_io->Function,
2354 MPI2_FUNCTION_PASSTHRU_IO_REQUEST);
2355
2356 ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
2357 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
2358
2359 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
2360 (U8)offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16);
2361
2362 ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
2363 cmd->sense_phys_addr1);
2364
2365
2366 scsi_raid_io_sgl_ieee =
2367 (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
2368
2369 ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
2370 (U64)cmd->frame_phys_addr);
2371
2372 ddi_put8(acc_handle,
2373 &scsi_raid_io_sgl_ieee->Flags, (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2374 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
2375 /* LSI put hardcoded 1024 instead of MEGASAS_MAX_SZ_CHAIN_FRAME. */
2376 ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length, 1024);
2377
2378 con_log(CL_ANN1, (CE_NOTE,
2379 "[MFI CMD PHY ADDRESS]:%" PRIx64,
2380 scsi_raid_io_sgl_ieee->Address));
2381 con_log(CL_ANN1, (CE_NOTE,
2382 "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
2383 con_log(CL_ANN1, (CE_NOTE, "[SGL Flags]:%x",
2384 scsi_raid_io_sgl_ieee->Flags));
2385 }
2386
2387
2388 void
2389 tbolt_complete_cmd(struct mrsas_instance *instance,
2390 struct mrsas_cmd *cmd)
2391 {
2392 uint8_t status;
2393 uint8_t extStatus;
2394 uint8_t arm;
2395 struct scsa_cmd *acmd;
2396 struct scsi_pkt *pkt;
2397 struct scsi_arq_status *arqstat;
2398 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2399 LD_LOAD_BALANCE_INFO *lbinfo;
2400 ddi_acc_handle_t acc_handle =
2401 instance->mpi2_frame_pool_dma_obj.acc_handle;
2402
2403 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2404
2405 status = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.status);
2406 extStatus = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.extStatus);
2407
2408 con_log(CL_DLEVEL3, (CE_NOTE, "status %x", status));
2409 con_log(CL_DLEVEL3, (CE_NOTE, "extStatus %x", extStatus));
2410
2411 if (status != MFI_STAT_OK) {
2412 con_log(CL_ANN, (CE_WARN,
2413 "IO Cmd Failed SMID %x", cmd->SMID));
2414 } else {
2415 con_log(CL_ANN, (CE_NOTE,
2416 "IO Cmd Success SMID %x", cmd->SMID));
2417 }
2418
2419 /* regular commands */
2420
2421 switch (ddi_get8(acc_handle, &scsi_raid_io->Function)) {
2422
2423 case MPI2_FUNCTION_SCSI_IO_REQUEST : /* Fast Path IO. */
2424 acmd = (struct scsa_cmd *)cmd->cmd;
2425 lbinfo = &instance->load_balance_info[acmd->device_id];
2426
2427 if (cmd->load_balance_flag & MEGASAS_LOAD_BALANCE_FLAG) {
2428 arm = lbinfo->raid1DevHandle[0] ==
2429 scsi_raid_io->DevHandle ? 0 : 1;
2430
2431 lbinfo->scsi_pending_cmds[arm]--;
2432 cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG;
2433 }
2434 con_log(CL_DLEVEL3, (CE_NOTE,
2435 "FastPath IO Completion Success "));
2436 /* FALLTHRU */
2437
2438 case MPI2_FUNCTION_LD_IO_REQUEST : { /* Regular Path IO. */
2439 acmd = (struct scsa_cmd *)cmd->cmd;
2440 pkt = (struct scsi_pkt *)CMD2PKT(acmd);
2441
2442 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2443 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
2444 (void) ddi_dma_sync(acmd->cmd_dmahandle,
2445 acmd->cmd_dma_offset, acmd->cmd_dma_len,
2446 DDI_DMA_SYNC_FORCPU);
2447 }
2448 }
2449
2450 pkt->pkt_reason = CMD_CMPLT;
2451 pkt->pkt_statistics = 0;
2452 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
2453 STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS;
2454
2455 con_log(CL_ANN, (CE_CONT, " CDB[0] = %x completed for %s: "
2456 "size %lx SMID %x cmd_status %x", pkt->pkt_cdbp[0],
2457 ((acmd->islogical) ? "LD" : "PD"),
2458 acmd->cmd_dmacount, cmd->SMID, status));
2459
2460 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
2461 struct scsi_inquiry *inq;
2462
2463 if (acmd->cmd_dmacount != 0) {
2464 bp_mapin(acmd->cmd_buf);
2465 inq = (struct scsi_inquiry *)
2466 acmd->cmd_buf->b_un.b_addr;
2467
2468 /* don't expose physical drives to OS */
2469 if (acmd->islogical &&
2470 (status == MFI_STAT_OK)) {
2471 display_scsi_inquiry((caddr_t)inq);
2472 #ifdef PDSUPPORT
2473 } else if ((status == MFI_STAT_OK) &&
2474 inq->inq_dtype == DTYPE_DIRECT) {
2475 display_scsi_inquiry((caddr_t)inq);
2476 #endif
2477 } else {
2478 /* for physical disk */
2479 status = MFI_STAT_DEVICE_NOT_FOUND;
2480 }
2481 }
2482 }
2483
2484 switch (status) {
2485 case MFI_STAT_OK:
2486 pkt->pkt_scbp[0] = STATUS_GOOD;
2487 break;
2488 case MFI_STAT_LD_CC_IN_PROGRESS:
2489 case MFI_STAT_LD_RECON_IN_PROGRESS:
2490 pkt->pkt_scbp[0] = STATUS_GOOD;
2491 break;
2492 case MFI_STAT_LD_INIT_IN_PROGRESS:
2493 pkt->pkt_reason = CMD_TRAN_ERR;
2494 break;
2495 case MFI_STAT_SCSI_IO_FAILED:
2496 cmn_err(CE_WARN, "tbolt_complete_cmd: scsi_io failed");
2497 pkt->pkt_reason = CMD_TRAN_ERR;
2498 break;
2499 case MFI_STAT_SCSI_DONE_WITH_ERROR:
2500 con_log(CL_ANN, (CE_WARN,
2501 "tbolt_complete_cmd: scsi_done with error"));
2502
2503 pkt->pkt_reason = CMD_CMPLT;
2504 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2505
2506 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
2507 con_log(CL_ANN,
2508 (CE_WARN, "TEST_UNIT_READY fail"));
2509 } else {
2510 pkt->pkt_state |= STATE_ARQ_DONE;
2511 arqstat = (void *)(pkt->pkt_scbp);
2512 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2513 arqstat->sts_rqpkt_resid = 0;
2514 arqstat->sts_rqpkt_state |=
2515 STATE_GOT_BUS | STATE_GOT_TARGET
2516 | STATE_SENT_CMD
2517 | STATE_XFERRED_DATA;
2518 *(uint8_t *)&arqstat->sts_rqpkt_status =
2519 STATUS_GOOD;
2520 con_log(CL_ANN1,
2521 (CE_NOTE, "Copying Sense data %x",
2522 cmd->SMID));
2523
2524 ddi_rep_get8(acc_handle,
2525 (uint8_t *)&(arqstat->sts_sensedata),
2526 cmd->sense1,
2527 sizeof (struct scsi_extended_sense),
2528 DDI_DEV_AUTOINCR);
2529
2530 }
2531 break;
2532 case MFI_STAT_LD_OFFLINE:
2533 cmn_err(CE_WARN,
2534 "tbolt_complete_cmd: ld offline "
2535 "CDB[0]=0x%x targetId=0x%x devhandle=0x%x",
2536 /* UNDO: */
2537 ddi_get8(acc_handle, &scsi_raid_io->CDB.CDB32[0]),
2538
2539 ddi_get16(acc_handle,
2540 &scsi_raid_io->RaidContext.ldTargetId),
2541
2542 ddi_get16(acc_handle, &scsi_raid_io->DevHandle));
2543
2544 pkt->pkt_reason = CMD_DEV_GONE;
2545 pkt->pkt_statistics = STAT_DISCON;
2546 break;
2547 case MFI_STAT_DEVICE_NOT_FOUND:
2548 con_log(CL_ANN, (CE_CONT,
2549 "tbolt_complete_cmd: device not found error"));
2550 pkt->pkt_reason = CMD_DEV_GONE;
2551 pkt->pkt_statistics = STAT_DISCON;
2552 break;
2553
2554 case MFI_STAT_LD_LBA_OUT_OF_RANGE:
2555 pkt->pkt_state |= STATE_ARQ_DONE;
2556 pkt->pkt_reason = CMD_CMPLT;
2557 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2558
2559 arqstat = (void *)(pkt->pkt_scbp);
2560 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2561 arqstat->sts_rqpkt_resid = 0;
2562 arqstat->sts_rqpkt_state |= STATE_GOT_BUS
2563 | STATE_GOT_TARGET | STATE_SENT_CMD
2564 | STATE_XFERRED_DATA;
2565 *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
2566
2567 arqstat->sts_sensedata.es_valid = 1;
2568 arqstat->sts_sensedata.es_key = KEY_ILLEGAL_REQUEST;
2569 arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
2570
2571 /*
2572 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
2573 * ASC: 0x21h; ASCQ: 0x00h;
2574 */
2575 arqstat->sts_sensedata.es_add_code = 0x21;
2576 arqstat->sts_sensedata.es_qual_code = 0x00;
2577 break;
2578 case MFI_STAT_INVALID_CMD:
2579 case MFI_STAT_INVALID_DCMD:
2580 case MFI_STAT_INVALID_PARAMETER:
2581 case MFI_STAT_INVALID_SEQUENCE_NUMBER:
2582 default:
2583 cmn_err(CE_WARN, "tbolt_complete_cmd: Unknown status!");
2584 pkt->pkt_reason = CMD_TRAN_ERR;
2585
2586 break;
2587 }
2588
2589 atomic_add_16(&instance->fw_outstanding, (-1));
2590
2591 (void) mrsas_common_check(instance, cmd);
2592 if (acmd->cmd_dmahandle) {
2593 if (mrsas_check_dma_handle(acmd->cmd_dmahandle) !=
2594 DDI_SUCCESS) {
2595 ddi_fm_service_impact(instance->dip,
2596 DDI_SERVICE_UNAFFECTED);
2597 pkt->pkt_reason = CMD_TRAN_ERR;
2598 pkt->pkt_statistics = 0;
2599 }
2600 }
2601
2602 /* Call the callback routine */
2603 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp)
2604 (*pkt->pkt_comp)(pkt);
2605
2606 con_log(CL_ANN1, (CE_NOTE, "Free smid %x", cmd->SMID));
2607
2608 ddi_put8(acc_handle, &scsi_raid_io->RaidContext.status, 0);
2609
2610 ddi_put8(acc_handle, &scsi_raid_io->RaidContext.extStatus, 0);
2611
2612 return_raid_msg_pkt(instance, cmd);
2613 break;
2614 }
2615 case MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFA command. */
2616
2617 if (cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO &&
2618 cmd->frame->dcmd.mbox.b[1] == 1) {
2619
2620 mutex_enter(&instance->sync_map_mtx);
2621
2622 con_log(CL_ANN, (CE_NOTE,
2623 "LDMAP sync command SMID RECEIVED 0x%X",
2624 cmd->SMID));
2625 if (cmd->frame->hdr.cmd_status != 0) {
2626 cmn_err(CE_WARN,
2627 "map sync failed, status = 0x%x.",
2628 cmd->frame->hdr.cmd_status);
2629 } else {
2630 instance->map_id++;
2631 cmn_err(CE_NOTE,
2632 "map sync received, switched map_id to %"
2633 PRIu64 " \n", instance->map_id);
2634 }
2635
2636 if (MR_ValidateMapInfo(instance->ld_map[
2637 (instance->map_id & 1)],
2638 instance->load_balance_info)) {
2639 instance->fast_path_io = 1;
2640 } else {
2641 instance->fast_path_io = 0;
2642 }
2643
2644 con_log(CL_ANN, (CE_NOTE,
2645 "instance->fast_path_io %d",
2646 instance->fast_path_io));
2647
2648 instance->unroll.syncCmd = 0;
2649
2650 if (instance->map_update_cmd == cmd) {
2651 return_raid_msg_pkt(instance, cmd);
2652 atomic_add_16(&instance->fw_outstanding, (-1));
2653 (void) mrsas_tbolt_sync_map_info(instance);
2654 }
2655
2656 cmn_err(CE_NOTE, "LDMAP sync completed.");
2657 mutex_exit(&instance->sync_map_mtx);
2658 break;
2659 }
2660
2661 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) {
2662 con_log(CL_ANN1, (CE_CONT,
2663 "AEN command SMID RECEIVED 0x%X",
2664 cmd->SMID));
2665 if ((instance->aen_cmd == cmd) &&
2666 (instance->aen_cmd->abort_aen)) {
2667 con_log(CL_ANN, (CE_WARN, "mrsas_softintr: "
2668 "aborted_aen returned"));
2669 } else {
2670 atomic_add_16(&instance->fw_outstanding, (-1));
2671 service_mfi_aen(instance, cmd);
2672 }
2673 }
2674
2675 if (cmd->sync_cmd == MRSAS_TRUE) {
2676 con_log(CL_ANN1, (CE_CONT,
2677 "Sync-mode Command Response SMID RECEIVED 0x%X",
2678 cmd->SMID));
2679
2680 tbolt_complete_cmd_in_sync_mode(instance, cmd);
2681 } else {
2682 con_log(CL_ANN, (CE_CONT,
2683 "tbolt_complete_cmd: Wrong SMID RECEIVED 0x%X",
2684 cmd->SMID));
2685 }
2686 break;
2687 default:
2688 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2689 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2690
2691 /* free message */
2692 con_log(CL_ANN,
2693 (CE_NOTE, "tbolt_complete_cmd: Unknown Type!!!!!!!!"));
2694 break;
2695 }
2696 }
2697
2698 uint_t
2699 mr_sas_tbolt_process_outstanding_cmd(struct mrsas_instance *instance)
2700 {
2701 uint8_t replyType;
2702 Mpi2SCSIIOSuccessReplyDescriptor_t *replyDesc;
2703 Mpi2ReplyDescriptorsUnion_t *desc;
2704 uint16_t smid;
2705 union desc_value d_val;
2706 struct mrsas_cmd *cmd;
2707
2708 struct mrsas_header *hdr;
2709 struct scsi_pkt *pkt;
2710
2711 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2712 0, 0, DDI_DMA_SYNC_FORDEV);
2713
2714 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2715 0, 0, DDI_DMA_SYNC_FORCPU);
2716
2717 desc = instance->reply_frame_pool;
2718 desc += instance->reply_read_index;
2719
2720 replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2721 replyType = replyDesc->ReplyFlags &
2722 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2723
2724 if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2725 return (DDI_INTR_UNCLAIMED);
2726
2727 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
2728 != DDI_SUCCESS) {
2729 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2730 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2731 con_log(CL_ANN1,
2732 (CE_WARN, "mr_sas_tbolt_process_outstanding_cmd(): "
2733 "FMA check, returning DDI_INTR_UNCLAIMED"));
2734 return (DDI_INTR_CLAIMED);
2735 }
2736
2737 con_log(CL_ANN1, (CE_NOTE, "Reply Desc = %p Words = %" PRIx64,
2738 (void *)desc, desc->Words));
2739
2740 d_val.word = desc->Words;
2741
2742
2743 /* Read Reply descriptor */
2744 while ((d_val.u1.low != 0xffffffff) &&
2745 (d_val.u1.high != 0xffffffff)) {
2746
2747 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2748 0, 0, DDI_DMA_SYNC_FORCPU);
2749
2750 smid = replyDesc->SMID;
2751
2752 if (!smid || smid > instance->max_fw_cmds + 1) {
2753 con_log(CL_ANN1, (CE_NOTE,
2754 "Reply Desc at Break = %p Words = %" PRIx64,
2755 (void *)desc, desc->Words));
2756 break;
2757 }
2758
2759 cmd = instance->cmd_list[smid - 1];
2760 if (!cmd) {
2761 con_log(CL_ANN1, (CE_NOTE, "mr_sas_tbolt_process_"
2762 "outstanding_cmd: Invalid command "
2763 " or Poll commad Received in completion path"));
2764 } else {
2765 mutex_enter(&instance->cmd_pend_mtx);
2766 if (cmd->sync_cmd == MRSAS_TRUE) {
2767 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2768 if (hdr) {
2769 con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2770 "tbolt_process_outstanding_cmd:"
2771 " mlist_del_init(&cmd->list)."));
2772 mlist_del_init(&cmd->list);
2773 }
2774 } else {
2775 pkt = cmd->pkt;
2776 if (pkt) {
2777 con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2778 "tbolt_process_outstanding_cmd:"
2779 "mlist_del_init(&cmd->list)."));
2780 mlist_del_init(&cmd->list);
2781 }
2782 }
2783
2784 mutex_exit(&instance->cmd_pend_mtx);
2785
2786 tbolt_complete_cmd(instance, cmd);
2787 }
2788 /* set it back to all 1s. */
2789 desc->Words = -1LL;
2790
2791 instance->reply_read_index++;
2792
2793 if (instance->reply_read_index >= (instance->reply_q_depth)) {
2794 con_log(CL_ANN1, (CE_NOTE, "wrap around"));
2795 instance->reply_read_index = 0;
2796 }
2797
2798 /* Get the next reply descriptor */
2799 if (!instance->reply_read_index)
2800 desc = instance->reply_frame_pool;
2801 else
2802 desc++;
2803
2804 replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2805
2806 d_val.word = desc->Words;
2807
2808 con_log(CL_ANN1, (CE_NOTE,
2809 "Next Reply Desc = %p Words = %" PRIx64,
2810 (void *)desc, desc->Words));
2811
2812 replyType = replyDesc->ReplyFlags &
2813 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2814
2815 if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2816 break;
2817
2818 } /* End of while loop. */
2819
2820 /* update replyIndex to FW */
2821 WR_MPI2_REPLY_POST_INDEX(instance->reply_read_index, instance);
2822
2823
2824 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2825 0, 0, DDI_DMA_SYNC_FORDEV);
2826
2827 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2828 0, 0, DDI_DMA_SYNC_FORCPU);
2829 return (DDI_INTR_CLAIMED);
2830 }
2882
2883 if (cmd == NULL) {
2884 cmn_err(CE_WARN,
2885 "Failed to get a cmd from free-pool in get_ld_map_info()");
2886 return (DDI_FAILURE);
2887 }
2888
2889 dcmd = &cmd->frame->dcmd;
2890
2891 size_map_info = sizeof (MR_FW_RAID_MAP) +
2892 (sizeof (MR_LD_SPAN_MAP) *
2893 (MAX_LOGICAL_DRIVES - 1));
2894
2895 con_log(CL_ANN, (CE_NOTE,
2896 "size_map_info : 0x%x", size_map_info));
2897
2898 ci = instance->ld_map[(instance->map_id & 1)];
2899 ci_h = instance->ld_map_phy[(instance->map_id & 1)];
2900
2901 if (!ci) {
2902 cmn_err(CE_WARN, "Failed to alloc mem for ld_map_info");
2903 return_raid_msg_pkt(instance, cmd);
2904 return (-1);
2905 }
2906
2907 bzero(ci, sizeof (*ci));
2908 bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
2909
2910 dcmd->cmd = MFI_CMD_OP_DCMD;
2911 dcmd->cmd_status = 0xFF;
2912 dcmd->sge_count = 1;
2913 dcmd->flags = MFI_FRAME_DIR_READ;
2914 dcmd->timeout = 0;
2915 dcmd->pad_0 = 0;
2916 dcmd->data_xfer_len = size_map_info;
2917 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
2918 dcmd->sgl.sge32[0].phys_addr = ci_h;
2919 dcmd->sgl.sge32[0].length = size_map_info;
2920
2921
2922 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
2923
2924 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2925 ret = 0;
2926 con_log(CL_ANN1, (CE_NOTE, "Get LD Map Info success"));
2927 } else {
2928 cmn_err(CE_WARN, "Get LD Map Info failed");
2929 ret = -1;
2930 }
2931
2932 return_raid_msg_pkt(instance, cmd);
2933
2934 return (ret);
2935 }
2936
2937 void
2938 mrsas_dump_reply_desc(struct mrsas_instance *instance)
2939 {
2940 uint32_t i;
2941 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
2942 union desc_value d_val;
2943
2944 reply_desc = instance->reply_frame_pool;
2945
2946 for (i = 0; i < instance->reply_q_depth; i++, reply_desc++) {
2947 d_val.word = reply_desc->Words;
2948 con_log(CL_DLEVEL3, (CE_NOTE,
2949 "i=%d, %x:%x",
2950 i, d_val.u1.high, d_val.u1.low));
2951 }
2952 }
2953
2954 /*
2955 * mrsas_tbolt_command_create - Create command for fast path.
2956 * @io_info: MegaRAID IO request packet pointer.
2957 * @ref_tag: Reference tag for RD/WRPROTECT
2958 *
2959 * Create the command for fast path.
2960 */
2961 void
2962 mrsas_tbolt_prepare_cdb(struct mrsas_instance *instance, U8 cdb[],
2963 struct IO_REQUEST_INFO *io_info, Mpi2RaidSCSIIORequest_t *scsi_io_request,
2964 U32 ref_tag)
2965 {
2966 uint16_t EEDPFlags;
2967 uint32_t Control;
2968 ddi_acc_handle_t acc_handle =
2969 instance->mpi2_frame_pool_dma_obj.acc_handle;
2970
2971 /* Prepare 32-byte CDB if DIF is supported on this device */
2972 con_log(CL_ANN, (CE_NOTE, "Prepare DIF CDB"));
2973
2974 bzero(cdb, 32);
2975
2976 cdb[0] = MRSAS_SCSI_VARIABLE_LENGTH_CMD;
2977
2978
2979 cdb[7] = MRSAS_SCSI_ADDL_CDB_LEN;
2980
2981 if (io_info->isRead)
2982 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32;
2983 else
2984 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32;
2985
2986 /* Verify within linux driver, set to MEGASAS_RD_WR_PROTECT_CHECK_ALL */
2987 cdb[10] = MRSAS_RD_WR_PROTECT;
2988
2989 /* LOGICAL BLOCK ADDRESS */
2990 cdb[12] = (U8)(((io_info->pdBlock) >> 56) & 0xff);
2991 cdb[13] = (U8)(((io_info->pdBlock) >> 48) & 0xff);
2992 cdb[14] = (U8)(((io_info->pdBlock) >> 40) & 0xff);
2993 cdb[15] = (U8)(((io_info->pdBlock) >> 32) & 0xff);
2994 cdb[16] = (U8)(((io_info->pdBlock) >> 24) & 0xff);
2995 cdb[17] = (U8)(((io_info->pdBlock) >> 16) & 0xff);
2996 cdb[18] = (U8)(((io_info->pdBlock) >> 8) & 0xff);
2997 cdb[19] = (U8)((io_info->pdBlock) & 0xff);
2998
2999 /* Logical block reference tag */
3000 ddi_put32(acc_handle, &scsi_io_request->CDB.EEDP32.PrimaryReferenceTag,
3001 BE_32(ref_tag));
3002
3003 ddi_put16(acc_handle,
3004 &scsi_io_request->CDB.EEDP32.PrimaryApplicationTagMask, 0xffff);
3005
3006 ddi_put32(acc_handle, &scsi_io_request->DataLength,
3007 ((io_info->numBlocks)*512));
3008 /* Specify 32-byte cdb */
3009 ddi_put16(acc_handle, &scsi_io_request->IoFlags, 32);
3010
3011 /* Transfer length */
3012 cdb[28] = (U8)(((io_info->numBlocks) >> 24) & 0xff);
3013 cdb[29] = (U8)(((io_info->numBlocks) >> 16) & 0xff);
3014 cdb[30] = (U8)(((io_info->numBlocks) >> 8) & 0xff);
3015 cdb[31] = (U8)((io_info->numBlocks) & 0xff);
3016
3017 /* set SCSI IO EEDPFlags */
3018 EEDPFlags = ddi_get16(acc_handle, &scsi_io_request->EEDPFlags);
3019 Control = ddi_get32(acc_handle, &scsi_io_request->Control);
3020
3021 /* set SCSI IO EEDPFlags bits */
3022 if (io_info->isRead) {
3023 /*
3024 * For READ commands, the EEDPFlags shall be set to specify to
3025 * Increment the Primary Reference Tag, to Check the Reference
3026 * Tag, and to Check and Remove the Protection Information
3027 * fields.
3028 */
3029 EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
3030 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
3031 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
3032 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
3033 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
3034 } else {
3035 /*
3036 * For WRITE commands, the EEDPFlags shall be set to specify to
3037 * Increment the Primary Reference Tag, and to Insert
3038 * Protection Information fields.
3039 */
3040 EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
3041 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
3042 }
3043 Control |= (0x4 << 26);
3044
3045 ddi_put16(acc_handle, &scsi_io_request->EEDPFlags, EEDPFlags);
3046 ddi_put32(acc_handle, &scsi_io_request->Control, Control);
3047 ddi_put32(acc_handle,
3048 &scsi_io_request->EEDPBlockSize, MRSAS_EEDPBLOCKSIZE);
3049 }
3050
3051
3052 /*
3053 * mrsas_tbolt_set_pd_lba - Sets PD LBA
3054 * @cdb: CDB
3055 * @cdb_len: cdb length
3056 * @start_blk: Start block of IO
3057 *
3058 * Used to set the PD LBA in CDB for FP IOs
3059 */
3060 static void
3061 mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr, U64 start_blk,
3062 U32 num_blocks)
3063 {
3064 U8 cdb_len = *cdb_len_ptr;
3065 U8 flagvals = 0, opcode = 0, groupnum = 0, control = 0;
3066
3067 /* Some drives don't support 16/12 byte CDB's, convert to 10 */
3068 if (((cdb_len == 12) || (cdb_len == 16)) &&
3069 (start_blk <= 0xffffffff)) {
3070 if (cdb_len == 16) {
3071 con_log(CL_ANN,
3072 (CE_NOTE, "Converting READ/WRITE(16) to READ10"));
3073 opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
3074 flagvals = cdb[1];
3075 groupnum = cdb[14];
3076 control = cdb[15];
3077 } else {
3078 con_log(CL_ANN,
3079 (CE_NOTE, "Converting READ/WRITE(12) to READ10"));
3080 opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
3081 flagvals = cdb[1];
3082 groupnum = cdb[10];
3083 control = cdb[11];
3084 }
3085
3086 bzero(cdb, sizeof (cdb));
3087
3088 cdb[0] = opcode;
3089 cdb[1] = flagvals;
3090 cdb[6] = groupnum;
3091 cdb[9] = control;
3092 /* Set transfer length */
3093 cdb[8] = (U8)(num_blocks & 0xff);
3094 cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3095 cdb_len = 10;
3096 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
3097 /* Convert to 16 byte CDB for large LBA's */
3098 con_log(CL_ANN,
3099 (CE_NOTE, "Converting 6/10/12 CDB to 16 byte CDB"));
3100 switch (cdb_len) {
3101 case 6:
3102 opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
3103 control = cdb[5];
3104 break;
3105 case 10:
3106 opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16;
3107 flagvals = cdb[1];
3108 groupnum = cdb[6];
3109 control = cdb[9];
3110 break;
3111 case 12:
3112 opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16;
3113 flagvals = cdb[1];
3114 groupnum = cdb[10];
3115 control = cdb[11];
3116 break;
3117 }
3118
3119 bzero(cdb, sizeof (cdb));
3120
3121 cdb[0] = opcode;
3122 cdb[1] = flagvals;
3123 cdb[14] = groupnum;
3124 cdb[15] = control;
3125
3126 /* Transfer length */
3127 cdb[13] = (U8)(num_blocks & 0xff);
3128 cdb[12] = (U8)((num_blocks >> 8) & 0xff);
3129 cdb[11] = (U8)((num_blocks >> 16) & 0xff);
3130 cdb[10] = (U8)((num_blocks >> 24) & 0xff);
3131
3132 /* Specify 16-byte cdb */
3133 cdb_len = 16;
3134 } else if ((cdb_len == 6) && (start_blk > 0x1fffff)) {
3135 /* convert to 10 byte CDB */
3136 opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10;
3137 control = cdb[5];
3138
3139 bzero(cdb, sizeof (cdb));
3140 cdb[0] = opcode;
3141 cdb[9] = control;
3142
3143 /* Set transfer length */
3144 cdb[8] = (U8)(num_blocks & 0xff);
3145 cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3146
3147 /* Specify 10-byte cdb */
3148 cdb_len = 10;
3149 }
3150
3151
3152 /* Fall through Normal case, just load LBA here */
3153 switch (cdb_len) {
3154 case 6:
3155 {
3156 U8 val = cdb[1] & 0xE0;
3157 cdb[3] = (U8)(start_blk & 0xff);
3158 cdb[2] = (U8)((start_blk >> 8) & 0xff);
3159 cdb[1] = val | ((U8)(start_blk >> 16) & 0x1f);
3171 cdb[3] = (U8)((start_blk >> 16) & 0xff);
3172 cdb[2] = (U8)((start_blk >> 24) & 0xff);
3173 break;
3174
3175 case 16:
3176 cdb[9] = (U8)(start_blk & 0xff);
3177 cdb[8] = (U8)((start_blk >> 8) & 0xff);
3178 cdb[7] = (U8)((start_blk >> 16) & 0xff);
3179 cdb[6] = (U8)((start_blk >> 24) & 0xff);
3180 cdb[5] = (U8)((start_blk >> 32) & 0xff);
3181 cdb[4] = (U8)((start_blk >> 40) & 0xff);
3182 cdb[3] = (U8)((start_blk >> 48) & 0xff);
3183 cdb[2] = (U8)((start_blk >> 56) & 0xff);
3184 break;
3185 }
3186
3187 *cdb_len_ptr = cdb_len;
3188 }
3189
3190
3191 static int
3192 mrsas_tbolt_check_map_info(struct mrsas_instance *instance)
3193 {
3194 MR_FW_RAID_MAP_ALL *ld_map;
3195
3196 if (!mrsas_tbolt_get_ld_map_info(instance)) {
3197
3198 ld_map = instance->ld_map[(instance->map_id & 1)];
3199
3200 con_log(CL_ANN1, (CE_NOTE, "ldCount=%d, map size=%d",
3201 ld_map->raidMap.ldCount, ld_map->raidMap.totalSize));
3202
3203 if (MR_ValidateMapInfo(instance->ld_map[
3204 (instance->map_id & 1)], instance->load_balance_info)) {
3205 con_log(CL_ANN,
3206 (CE_CONT, "MR_ValidateMapInfo success"));
3207
3208 instance->fast_path_io = 1;
3209 con_log(CL_ANN,
3210 (CE_NOTE, "instance->fast_path_io %d",
3211 instance->fast_path_io));
3212
3213 return (DDI_SUCCESS);
3214 }
3215
3216 }
3217
3218 instance->fast_path_io = 0;
3219 cmn_err(CE_WARN, "MR_ValidateMapInfo failed");
3220 con_log(CL_ANN, (CE_NOTE,
3221 "instance->fast_path_io %d", instance->fast_path_io));
3222
3223 return (DDI_FAILURE);
3224 }
3225
3226 /*
3227 * Marks HBA as bad. This will be called either when an
3228 * IO packet times out even after 3 FW resets
3229 * or FW is found to be fault even after 3 continuous resets.
3230 */
3231
3232 void
3233 mrsas_tbolt_kill_adapter(struct mrsas_instance *instance)
3234 {
3235 cmn_err(CE_NOTE, "TBOLT Kill adapter called");
3236
3237 if (instance->deadadapter == 1)
3238 return;
3239
3240 con_log(CL_ANN1, (CE_NOTE, "tbolt_kill_adapter: "
3241 "Writing to doorbell with MFI_STOP_ADP "));
3242 mutex_enter(&instance->ocr_flags_mtx);
3243 instance->deadadapter = 1;
3244 mutex_exit(&instance->ocr_flags_mtx);
3245 instance->func_ptr->disable_intr(instance);
3246 WR_RESERVED0_REGISTER(MFI_STOP_ADP, instance);
3247 /* Flush */
3248 (void) RD_RESERVED0_REGISTER(instance);
3249
3250 (void) mrsas_print_pending_cmds(instance);
3251 (void) mrsas_complete_pending_cmds(instance);
3252 }
3253
3254 void
3255 mrsas_reset_reply_desc(struct mrsas_instance *instance)
3256 {
3257 int i;
3258 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
3259 instance->reply_read_index = 0;
3260
3261 /* initializing reply address to 0xFFFFFFFF */
3262 reply_desc = instance->reply_frame_pool;
3263
3264 for (i = 0; i < instance->reply_q_depth; i++) {
3265 reply_desc->Words = (uint64_t)~0;
3266 reply_desc++;
3267 }
3268 }
3269
3270 int
3271 mrsas_tbolt_reset_ppc(struct mrsas_instance *instance)
3272 {
3273 uint32_t status = 0x00;
3274 uint32_t retry = 0;
3275 uint32_t cur_abs_reg_val;
3276 uint32_t fw_state;
3277 uint32_t abs_state;
3278 uint32_t i;
3279
3280 con_log(CL_ANN, (CE_NOTE,
3281 "mrsas_tbolt_reset_ppc entered"));
3282
3283 if (instance->deadadapter == 1) {
3284 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3285 "no more resets as HBA has been marked dead ");
3286 return (DDI_FAILURE);
3287 }
3288
3289 mutex_enter(&instance->ocr_flags_mtx);
3290 instance->adapterresetinprogress = 1;
3291 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc:"
3292 "adpterresetinprogress flag set, time %llx", gethrtime()));
3293 mutex_exit(&instance->ocr_flags_mtx);
3294
3295 instance->func_ptr->disable_intr(instance);
3296
3297 /* Add delay inorder to complete the ioctl & io cmds in-flight */
3298 for (i = 0; i < 3000; i++) {
3299 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
3300 }
3301
3302 instance->reply_read_index = 0;
3303
3304 retry_reset:
3305 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3306 ":Resetting TBOLT "));
3307
3308 WR_TBOLT_IB_WRITE_SEQ(0xF, instance);
3309 WR_TBOLT_IB_WRITE_SEQ(4, instance);
3310 WR_TBOLT_IB_WRITE_SEQ(0xb, instance);
3311 WR_TBOLT_IB_WRITE_SEQ(2, instance);
3312 WR_TBOLT_IB_WRITE_SEQ(7, instance);
3313 WR_TBOLT_IB_WRITE_SEQ(0xd, instance);
3314 con_log(CL_ANN1, (CE_NOTE,
3315 "mrsas_tbolt_reset_ppc: magic number written "
3316 "to write sequence register"));
3317 delay(100 * drv_usectohz(MILLISEC));
3318 status = RD_TBOLT_HOST_DIAG(instance);
3319 con_log(CL_ANN1, (CE_NOTE,
3320 "mrsas_tbolt_reset_ppc: READ HOSTDIAG SUCCESS "
3321 "to write sequence register"));
3322
3323 while (status & DIAG_TBOLT_RESET_ADAPTER) {
3324 delay(100 * drv_usectohz(MILLISEC));
3325 status = RD_TBOLT_HOST_DIAG(instance);
3326 if (retry++ == 100) {
3327 cmn_err(CE_WARN,
3328 "mrsas_tbolt_reset_ppc:"
3329 "resetadapter bit is set already "
3330 "check retry count %d", retry);
3331 return (DDI_FAILURE);
3332 }
3333 }
3334
3335 WR_TBOLT_HOST_DIAG(status | DIAG_TBOLT_RESET_ADAPTER, instance);
3336 delay(100 * drv_usectohz(MILLISEC));
3337
3338 ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3339 (uint8_t *)((uintptr_t)(instance)->regmap +
3340 RESET_TBOLT_STATUS_OFF), 4, DDI_DEV_AUTOINCR);
3341
3342 while ((status & DIAG_TBOLT_RESET_ADAPTER)) {
3343 delay(100 * drv_usectohz(MILLISEC));
3344 ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3345 (uint8_t *)((uintptr_t)(instance)->regmap +
3346 RESET_TBOLT_STATUS_OFF), 4, DDI_DEV_AUTOINCR);
3347 if (retry++ == 100) {
3348 /* Dont call kill adapter here */
3349 /* RESET BIT ADAPTER is cleared by firmare */
3350 /* mrsas_tbolt_kill_adapter(instance); */
3351 cmn_err(CE_WARN,
3352 "mr_sas %d: %s(): RESET FAILED; return failure!!!",
3353 instance->instance, __func__);
3354 return (DDI_FAILURE);
3355 }
3356 }
3357
3358 con_log(CL_ANN,
3359 (CE_NOTE, "mrsas_tbolt_reset_ppc: Adapter reset complete"));
3360 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3361 "Calling mfi_state_transition_to_ready"));
3362
3363 abs_state = instance->func_ptr->read_fw_status_reg(instance);
3364 retry = 0;
3365 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3366 delay(100 * drv_usectohz(MILLISEC));
3367 abs_state = instance->func_ptr->read_fw_status_reg(instance);
3368 }
3369 if (abs_state <= MFI_STATE_FW_INIT) {
3370 cmn_err(CE_WARN,
3371 "mrsas_tbolt_reset_ppc: firmware state < MFI_STATE_FW_INIT"
3372 "state = 0x%x, RETRY RESET.", abs_state);
3373 goto retry_reset;
3374 }
3375
3376 /* Mark HBA as bad, if FW is fault after 3 continuous resets */
3377 if (mfi_state_transition_to_ready(instance) ||
3378 debug_tbolt_fw_faults_after_ocr_g == 1) {
3379 cur_abs_reg_val =
3380 instance->func_ptr->read_fw_status_reg(instance);
3381 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
3382
3383 con_log(CL_ANN1, (CE_NOTE,
3384 "mrsas_tbolt_reset_ppc :before fake: FW is not ready "
3385 "FW state = 0x%x", fw_state));
3386 if (debug_tbolt_fw_faults_after_ocr_g == 1)
3387 fw_state = MFI_STATE_FAULT;
3388
3389 con_log(CL_ANN,
3390 (CE_NOTE, "mrsas_tbolt_reset_ppc : FW is not ready "
3391 "FW state = 0x%x", fw_state));
3392
3393 if (fw_state == MFI_STATE_FAULT) {
3394 /* increment the count */
3395 instance->fw_fault_count_after_ocr++;
3396 if (instance->fw_fault_count_after_ocr
3397 < MAX_FW_RESET_COUNT) {
3398 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3399 "FW is in fault after OCR count %d "
3400 "Retry Reset",
3401 instance->fw_fault_count_after_ocr);
3402 goto retry_reset;
3403
3404 } else {
3405 cmn_err(CE_WARN, "mrsas %d: %s:"
3406 "Max Reset Count exceeded >%d"
3407 "Mark HBA as bad, KILL adapter",
3408 instance->instance, __func__,
3409 MAX_FW_RESET_COUNT);
3410
3411 mrsas_tbolt_kill_adapter(instance);
3412 return (DDI_FAILURE);
3413 }
3414 }
3415 }
3416
3417 /* reset the counter as FW is up after OCR */
3418 instance->fw_fault_count_after_ocr = 0;
3419
3420 mrsas_reset_reply_desc(instance);
3421
3422
3423 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3424 "Calling mrsas_issue_init_mpi2"));
3425 abs_state = mrsas_issue_init_mpi2(instance);
3426 if (abs_state == (uint32_t)DDI_FAILURE) {
3427 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3428 "INIT failed Retrying Reset");
3429 goto retry_reset;
3430 }
3431 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3432 "mrsas_issue_init_mpi2 Done"));
3433
3434 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3435 "Calling mrsas_print_pending_cmd"));
3436 (void) mrsas_print_pending_cmds(instance);
3437 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3438 "mrsas_print_pending_cmd done"));
3439
3440 instance->func_ptr->enable_intr(instance);
3441 instance->fw_outstanding = 0;
3442
3443 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3444 "Calling mrsas_issue_pending_cmds"));
3445 (void) mrsas_issue_pending_cmds(instance);
3446 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3447 "issue_pending_cmds done."));
3448
3449 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3450 "Calling aen registration"));
3451
3452 instance->aen_cmd->retry_count_for_ocr = 0;
3453 instance->aen_cmd->drv_pkt_time = 0;
3454
3455 instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
3456
3457 con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag."));
3458 mutex_enter(&instance->ocr_flags_mtx);
3459 instance->adapterresetinprogress = 0;
3460 mutex_exit(&instance->ocr_flags_mtx);
3461 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3462 "adpterresetinprogress flag unset"));
3463
3464 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc done"));
3465 return (DDI_SUCCESS);
3466
3467 }
3468
3469
3470 /*
3471 * mrsas_sync_map_info - Returns FW's ld_map structure
3472 * @instance: Adapter soft state
3473 *
3474 * Issues an internal command (DCMD) to get the FW's controller PD
3475 * list structure. This information is mainly used to find out SYSTEM
3476 * supported by the FW.
3477 */
3478
3479 static int
3480 mrsas_tbolt_sync_map_info(struct mrsas_instance *instance)
3481 {
3482 int ret = 0, i;
3483 struct mrsas_cmd *cmd = NULL;
3484 struct mrsas_dcmd_frame *dcmd;
3485 uint32_t size_sync_info, num_lds;
3486 LD_TARGET_SYNC *ci = NULL;
3487 MR_FW_RAID_MAP_ALL *map;
3488 MR_LD_RAID *raid;
3489 LD_TARGET_SYNC *ld_sync;
3490 uint32_t ci_h = 0;
3491 uint32_t size_map_info;
3492
3493 cmd = get_raid_msg_pkt(instance);
3494
3495 if (cmd == NULL) {
3496 cmn_err(CE_WARN, "Failed to get a cmd from free-pool in "
3497 "mrsas_tbolt_sync_map_info(). ");
3498 return (DDI_FAILURE);
3499 }
3500
3501 /* Clear the frame buffer and assign back the context id */
3502 bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3503 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3504 cmd->index);
3505 bzero(cmd->scsi_io_request, sizeof (Mpi2RaidSCSIIORequest_t));
3506
3507
3508 map = instance->ld_map[instance->map_id & 1];
3509
3510 num_lds = map->raidMap.ldCount;
3511
3512 dcmd = &cmd->frame->dcmd;
3513
3514 size_sync_info = sizeof (LD_TARGET_SYNC) * num_lds;
3515
3516 con_log(CL_ANN, (CE_NOTE, "size_sync_info =0x%x ; ld count = 0x%x",
3517 size_sync_info, num_lds));
3518
3519 ci = (LD_TARGET_SYNC *)instance->ld_map[(instance->map_id - 1) & 1];
3520
3521 bzero(ci, sizeof (MR_FW_RAID_MAP_ALL));
3522 ci_h = instance->ld_map_phy[(instance->map_id - 1) & 1];
3523
3524 bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
3525
3526 ld_sync = (LD_TARGET_SYNC *)ci;
3527
3528 for (i = 0; i < num_lds; i++, ld_sync++) {
3529 raid = MR_LdRaidGet(i, map);
3530
3531 con_log(CL_ANN1,
3532 (CE_NOTE, "i : 0x%x, Seq Num : 0x%x, Sync Reqd : 0x%x",
3533 i, raid->seqNum, raid->flags.ldSyncRequired));
3534
3535 ld_sync->ldTargetId = MR_GetLDTgtId(i, map);
3536
3537 con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, tgt : 0x%x",
3538 i, ld_sync->ldTargetId));
3539
3540 ld_sync->seqNum = raid->seqNum;
3541 }
3542
3543
3544 size_map_info = sizeof (MR_FW_RAID_MAP) +
3545 (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
3546
3547 dcmd->cmd = MFI_CMD_OP_DCMD;
3548 dcmd->cmd_status = 0xFF;
3549 dcmd->sge_count = 1;
3550 dcmd->flags = MFI_FRAME_DIR_WRITE;
3551 dcmd->timeout = 0;
3552 dcmd->pad_0 = 0;
3553 dcmd->data_xfer_len = size_map_info;
3554 ASSERT(num_lds <= 255);
3555 dcmd->mbox.b[0] = (U8)num_lds;
3556 dcmd->mbox.b[1] = 1; /* Pend */
3557 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3558 dcmd->sgl.sge32[0].phys_addr = ci_h;
3559 dcmd->sgl.sge32[0].length = size_map_info;
3560
3561
3562 instance->map_update_cmd = cmd;
3563 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3564
3565 instance->func_ptr->issue_cmd(cmd, instance);
3566
3567 instance->unroll.syncCmd = 1;
3568 con_log(CL_ANN1, (CE_NOTE, "sync cmd issued. [SMID]:%x", cmd->SMID));
3569
3570 return (ret);
3571 }
3572
3573 /*
3574 * abort_syncmap_cmd
3575 */
3576 int
3577 abort_syncmap_cmd(struct mrsas_instance *instance,
3578 struct mrsas_cmd *cmd_to_abort)
3579 {
3580 int ret = 0;
3581
3582 struct mrsas_cmd *cmd;
3583 struct mrsas_abort_frame *abort_fr;
3584
3585 con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_ldsync:%d", __LINE__));
3586
3587 cmd = get_raid_msg_mfi_pkt(instance);
3588
3589 if (!cmd) {
3590 cmn_err(CE_WARN,
3591 "Failed to get a cmd from free-pool abort_syncmap_cmd().");
3592 return (DDI_FAILURE);
3593 }
3594 /* Clear the frame buffer and assign back the context id */
3595 bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3596 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3597 cmd->index);
3598
3599 abort_fr = &cmd->frame->abort;
3600
3601 /* prepare and issue the abort frame */
3602 ddi_put8(cmd->frame_dma_obj.acc_handle,
3603 &abort_fr->cmd, MFI_CMD_OP_ABORT);
3604 ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3605 MFI_CMD_STATUS_SYNC_MODE);
3606 ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3607 ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3608 cmd_to_abort->index);
3609 ddi_put32(cmd->frame_dma_obj.acc_handle,
3610 &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3611 ddi_put32(cmd->frame_dma_obj.acc_handle,
3612 &abort_fr->abort_mfi_phys_addr_hi, 0);
3613
3614 cmd->frame_count = 1;
3615
3623 ret = 0;
3624 }
3625
3626 return_raid_msg_mfi_pkt(instance, cmd);
3627
3628 atomic_add_16(&instance->fw_outstanding, (-1));
3629
3630 return (ret);
3631 }
3632
3633
3634 #ifdef PDSUPPORT
3635 int
3636 mrsas_tbolt_config_pd(struct mrsas_instance *instance, uint16_t tgt,
3637 uint8_t lun, dev_info_t **ldip)
3638 {
3639 struct scsi_device *sd;
3640 dev_info_t *child;
3641 int rval, dtype;
3642 struct mrsas_tbolt_pd_info *pds = NULL;
3643
3644 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_config_pd: t = %d l = %d",
3645 tgt, lun));
3646
3647 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
3648 if (ldip) {
3649 *ldip = child;
3650 }
3651 if (instance->mr_tbolt_pd_list[tgt].flag != MRDRV_TGT_VALID) {
3652 rval = mrsas_service_evt(instance, tgt, 1,
3653 MRSAS_EVT_UNCONFIG_TGT, NULL);
3654 con_log(CL_ANN1, (CE_WARN,
3655 "mr_sas:DELETING STALE ENTRY rval = %d "
3656 "tgt id = %d", rval, tgt));
3657 return (NDI_FAILURE);
3658 }
3659 return (NDI_SUCCESS);
3660 }
3661
3662 pds = (struct mrsas_tbolt_pd_info *)
3663 kmem_zalloc(sizeof (struct mrsas_tbolt_pd_info), KM_SLEEP);
3664 mrsas_tbolt_get_pd_info(instance, pds, tgt);
3665 dtype = pds->scsiDevType;
3666
3667 /* Check for Disk */
3668 if ((dtype == DTYPE_DIRECT)) {
3669 if ((dtype == DTYPE_DIRECT) &&
3670 (LE_16(pds->fwState) != PD_SYSTEM)) {
3671 kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3672 return (NDI_FAILURE);
3673 }
3674 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
3675 sd->sd_address.a_hba_tran = instance->tran;
3676 sd->sd_address.a_target = (uint16_t)tgt;
3677 sd->sd_address.a_lun = (uint8_t)lun;
3678
3679 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS) {
3680 rval = mrsas_config_scsi_device(instance, sd, ldip);
3681 con_log(CL_DLEVEL1, (CE_NOTE,
3682 "Phys. device found: tgt %d dtype %d: %s",
3683 tgt, dtype, sd->sd_inq->inq_vid));
3684 } else {
3685 rval = NDI_FAILURE;
3686 con_log(CL_DLEVEL1, (CE_NOTE, "Phys. device Not found "
3687 "scsi_hba_probe Failed: tgt %d dtype %d: %s",
3688 tgt, dtype, sd->sd_inq->inq_vid));
3689 }
3690
3691 /* sd_unprobe is blank now. Free buffer manually */
3692 if (sd->sd_inq) {
3693 kmem_free(sd->sd_inq, SUN_INQSIZE);
3694 sd->sd_inq = (struct scsi_inquiry *)NULL;
3695 }
3696 kmem_free(sd, sizeof (struct scsi_device));
3697 rval = NDI_SUCCESS;
3698 } else {
3699 con_log(CL_ANN1, (CE_NOTE,
3700 "Device not supported: tgt %d lun %d dtype %d",
3701 tgt, lun, dtype));
3702 rval = NDI_FAILURE;
3703 }
3704
3705 kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3706 con_log(CL_ANN1, (CE_NOTE, "mrsas_config_pd: return rval = %d",
3707 rval));
3708 return (rval);
3709 }
3710
3711 static void
3712 mrsas_tbolt_get_pd_info(struct mrsas_instance *instance,
3713 struct mrsas_tbolt_pd_info *pds, int tgt)
3714 {
3715 struct mrsas_cmd *cmd;
3716 struct mrsas_dcmd_frame *dcmd;
3717 dma_obj_t dcmd_dma_obj;
3718
3719 cmd = get_raid_msg_pkt(instance);
3720
3721 if (!cmd) {
3722 con_log(CL_ANN1,
3723 (CE_WARN, "Failed to get a cmd for get pd info"));
3724 return;
3725 }
3726
3727 /* Clear the frame buffer and assign back the context id */
3728 bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3729 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3730 cmd->index);
3731
3732
3733 dcmd = &cmd->frame->dcmd;
3734 dcmd_dma_obj.size = sizeof (struct mrsas_tbolt_pd_info);
3735 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
3736 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff;
3737 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff;
3738 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
3739 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
3740
3741 (void) mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
3742 DDI_STRUCTURE_LE_ACC);
3743 bzero(dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info));
3744 bzero(dcmd->mbox.b, 12);
3745 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3746 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
3747 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3748 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3749 MFI_FRAME_DIR_READ);
3750 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3751 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3752 sizeof (struct mrsas_tbolt_pd_info));
3753 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3754 MR_DCMD_PD_GET_INFO);
3755 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], tgt);
3756 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3757 sizeof (struct mrsas_tbolt_pd_info));
3758 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3759 dcmd_dma_obj.dma_cookie[0].dmac_address);
3760
3761 cmd->sync_cmd = MRSAS_TRUE;
3762 cmd->frame_count = 1;
3763
3764 if (instance->tbolt) {
3765 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3766 }
3767
3768 instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd);
3769
|