Print this page
Code review comments
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/mr_sas/mr_sas_tbolt.c
+++ new/usr/src/uts/common/io/mr_sas/mr_sas_tbolt.c
1 1 /*
2 2 * mr_sas_tbolt.c: source for mr_sas driver for New Generation.
3 3 * i.e. Thunderbolt and Invader
4 4 *
5 5 * Solaris MegaRAID device driver for SAS2.0 controllers
6 6 * Copyright (c) 2008-2012, LSI Logic Corporation.
7 7 * All rights reserved.
8 8 *
9 9 * Version:
10 10 * Author:
11 11 * Swaminathan K S
12 12 * Arun Chandrashekhar
13 13 * Manju R
14 14 * Rasheed
15 15 * Shakeel Bukhari
16 16 */
17 17
18 18
19 19 #include <sys/types.h>
20 20 #include <sys/file.h>
21 21 #include <sys/atomic.h>
22 22 #include <sys/scsi/scsi.h>
23 23 #include <sys/byteorder.h>
24 24 #include "ld_pd_map.h"
25 25 #include "mr_sas.h"
26 26 #include "fusion.h"
27 27
28 28 /*
29 29 * FMA header files
30 30 */
31 31 #include <sys/ddifm.h>
32 32 #include <sys/fm/protocol.h>
33 33 #include <sys/fm/util.h>
34 34 #include <sys/fm/io/ddi.h>
35 35
36 36
37 37 /* Pre-TB command size and TB command size. */
38 38 #define MR_COMMAND_SIZE (64*20) /* 1280 bytes */
39 39 MR_LD_RAID *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map);
40 40 U16 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map);
41 41 U16 MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map);
42 42 U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO, struct IO_REQUEST_INFO *);
43 43 extern ddi_dma_attr_t mrsas_generic_dma_attr;
44 44 extern uint32_t mrsas_tbolt_max_cap_maxxfer;
45 45 extern struct ddi_device_acc_attr endian_attr;
46 46 extern int debug_level_g;
47 47 extern unsigned int enable_fp;
48 48 volatile int dump_io_wait_time = 90;
49 49 extern void
50 50 io_timeout_checker(void *arg);
51 51 extern volatile int debug_timeout_g;
52 52 extern int mrsas_issue_pending_cmds(struct mrsas_instance *);
53 53 extern int mrsas_complete_pending_cmds(struct mrsas_instance *instance);
54 54 extern void push_pending_mfi_pkt(struct mrsas_instance *,
55 55 struct mrsas_cmd *);
56 56 extern U8 MR_BuildRaidContext(struct mrsas_instance *, struct IO_REQUEST_INFO *,
57 57 MPI2_SCSI_IO_VENDOR_UNIQUE *, MR_FW_RAID_MAP_ALL *);
58 58
59 59 /* Local static prototypes. */
60 60 static struct mrsas_cmd *mrsas_tbolt_build_cmd(struct mrsas_instance *,
61 61 struct scsi_address *, struct scsi_pkt *, uchar_t *);
62 62 static void mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr,
63 63 U64 start_blk, U32 num_blocks);
64 64 static int mrsas_tbolt_check_map_info(struct mrsas_instance *);
65 65 static int mrsas_tbolt_sync_map_info(struct mrsas_instance *);
66 66 static int mrsas_tbolt_prepare_pkt(struct scsa_cmd *);
67 67 static int mrsas_tbolt_ioc_init(struct mrsas_instance *, dma_obj_t *);
68 68 #ifdef PDSUPPORT
69 69 static void mrsas_tbolt_get_pd_info(struct mrsas_instance *,
70 70 struct mrsas_tbolt_pd_info *, int);
71 71 #endif /* PDSUPPORT */
72 72
73 73 static int debug_tbolt_fw_faults_after_ocr_g = 0;
74 74
75 75 /*
76 76 * destroy_mfi_mpi_frame_pool
77 77 */
78 78 void
79 79 destroy_mfi_mpi_frame_pool(struct mrsas_instance *instance)
80 80 {
81 81 int i;
82 82
83 83 struct mrsas_cmd *cmd;
84 84
85 85 /* return all mfi frames to pool */
86 86 for (i = 0; i < MRSAS_APP_RESERVED_CMDS; i++) {
87 87 cmd = instance->cmd_list[i];
88 88 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED) {
89 89 (void) mrsas_free_dma_obj(instance,
90 90 cmd->frame_dma_obj);
91 91 }
92 92 cmd->frame_dma_obj_status = DMA_OBJ_FREED;
93 93 }
94 94 }
95 95
96 96 /*
97 97 * destroy_mpi2_frame_pool
98 98 */
99 99 void
100 100 destroy_mpi2_frame_pool(struct mrsas_instance *instance)
101 101 {
102 102
103 103 if (instance->mpi2_frame_pool_dma_obj.status == DMA_OBJ_ALLOCATED) {
104 104 (void) mrsas_free_dma_obj(instance,
105 105 instance->mpi2_frame_pool_dma_obj);
106 106 instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_FREED;
107 107 }
108 108 }
109 109
110 110
111 111 /*
112 112 * mrsas_tbolt_free_additional_dma_buffer
113 113 */
114 114 void
115 115 mrsas_tbolt_free_additional_dma_buffer(struct mrsas_instance *instance)
116 116 {
117 117 int i;
118 118
119 119 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
120 120 (void) mrsas_free_dma_obj(instance,
121 121 instance->mfi_internal_dma_obj);
122 122 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
123 123 }
124 124 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
125 125 (void) mrsas_free_dma_obj(instance,
126 126 instance->mfi_evt_detail_obj);
127 127 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
128 128 }
129 129
130 130 for (i = 0; i < 2; i++) {
131 131 if (instance->ld_map_obj[i].status == DMA_OBJ_ALLOCATED) {
132 132 (void) mrsas_free_dma_obj(instance,
133 133 instance->ld_map_obj[i]);
134 134 instance->ld_map_obj[i].status = DMA_OBJ_FREED;
135 135 }
136 136 }
137 137 }
138 138
139 139
140 140 /*
141 141 * free_req_desc_pool
142 142 */
143 143 void
144 144 free_req_rep_desc_pool(struct mrsas_instance *instance)
145 145 {
146 146 if (instance->request_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
147 147 (void) mrsas_free_dma_obj(instance,
148 148 instance->request_desc_dma_obj);
149 149 instance->request_desc_dma_obj.status = DMA_OBJ_FREED;
150 150 }
151 151
152 152 if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
153 153 (void) mrsas_free_dma_obj(instance,
154 154 instance->reply_desc_dma_obj);
155 155 instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
156 156 }
157 157
158 158
159 159 }
160 160
161 161
162 162 /*
163 163 * ThunderBolt(TB) Request Message Frame Pool
164 164 */
165 165 int
166 166 create_mpi2_frame_pool(struct mrsas_instance *instance)
167 167 {
168 168 int i = 0;
169 169 uint16_t max_cmd;
170 170 uint32_t sgl_sz;
171 171 uint32_t raid_msg_size;
172 172 uint32_t total_size;
173 173 uint32_t offset;
174 174 uint32_t io_req_base_phys;
175 175 uint8_t *io_req_base;
176 176 struct mrsas_cmd *cmd;
177 177
178 178 max_cmd = instance->max_fw_cmds;
179 179
180 180 sgl_sz = 1024;
181 181 raid_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
182 182
183 183 /* Allocating additional 256 bytes to accomodate SMID 0. */
184 184 total_size = MRSAS_THUNDERBOLT_MSG_SIZE + (max_cmd * raid_msg_size) +
185 185 (max_cmd * sgl_sz) + (max_cmd * SENSE_LENGTH);
186 186
187 187 con_log(CL_ANN1, (CE_NOTE, "create_mpi2_frame_pool: "
188 188 "max_cmd %x ", max_cmd));
189 189
190 190 con_log(CL_DLEVEL3, (CE_NOTE, "create_mpi2_frame_pool: "
191 191 "request message frame pool size %x", total_size));
192 192
193 193 /*
194 194 * ThunderBolt(TB) We need to create a single chunk of DMA'ble memory
195 195 * and then split the memory to 1024 commands. Each command should be
196 196 * able to contain a RAID MESSAGE FRAME which will embed a MFI_FRAME
197 197 * within it. Further refer the "alloc_req_rep_desc" function where
198 198 * we allocate request/reply descriptors queues for a clue.
199 199 */
200 200
201 201 instance->mpi2_frame_pool_dma_obj.size = total_size;
202 202 instance->mpi2_frame_pool_dma_obj.dma_attr = mrsas_generic_dma_attr;
203 203 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_addr_hi =
204 204 0xFFFFFFFFU;
205 205 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_count_max =
206 206 0xFFFFFFFFU;
207 207 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_sgllen = 1;
208 208 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_align = 256;
209 209
210 210 if (mrsas_alloc_dma_obj(instance, &instance->mpi2_frame_pool_dma_obj,
211 211 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
212 212 cmn_err(CE_WARN,
213 213 "mr_sas: could not alloc mpi2 frame pool");
214 214 return (DDI_FAILURE);
215 215 }
216 216
217 217 bzero(instance->mpi2_frame_pool_dma_obj.buffer, total_size);
218 218 instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_ALLOCATED;
219 219
220 220 instance->io_request_frames =
221 221 (uint8_t *)instance->mpi2_frame_pool_dma_obj.buffer;
222 222 instance->io_request_frames_phy =
223 223 (uint32_t)
224 224 instance->mpi2_frame_pool_dma_obj.dma_cookie[0].dmac_address;
225 225
226 226 con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames 0x%p",
227 227 (void *)instance->io_request_frames));
228 228
229 229 con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames_phy 0x%x",
230 230 instance->io_request_frames_phy));
231 231
232 232 io_req_base = (uint8_t *)instance->io_request_frames +
233 233 MRSAS_THUNDERBOLT_MSG_SIZE;
234 234 io_req_base_phys = instance->io_request_frames_phy +
235 235 MRSAS_THUNDERBOLT_MSG_SIZE;
236 236
237 237 con_log(CL_DLEVEL3, (CE_NOTE,
238 238 "io req_base_phys 0x%x", io_req_base_phys));
239 239
240 240 for (i = 0; i < max_cmd; i++) {
241 241 cmd = instance->cmd_list[i];
242 242
243 243 offset = i * MRSAS_THUNDERBOLT_MSG_SIZE;
244 244
245 245 cmd->scsi_io_request = (Mpi2RaidSCSIIORequest_t *)
246 246 ((uint8_t *)io_req_base + offset);
247 247 cmd->scsi_io_request_phys_addr = io_req_base_phys + offset;
248 248
249 249 cmd->sgl = (Mpi2SGEIOUnion_t *)((uint8_t *)io_req_base +
250 250 (max_cmd * raid_msg_size) + i * sgl_sz);
251 251
252 252 cmd->sgl_phys_addr = (io_req_base_phys +
253 253 (max_cmd * raid_msg_size) + i * sgl_sz);
254 254
255 255 cmd->sense1 = (uint8_t *)((uint8_t *)io_req_base +
256 256 (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
257 257 (i * SENSE_LENGTH));
258 258
259 259 cmd->sense_phys_addr1 = (io_req_base_phys +
260 260 (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
261 261 (i * SENSE_LENGTH));
262 262
263 263
264 264 cmd->SMID = i + 1;
265 265
266 266 con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Addr [%x]0x%p",
267 267 cmd->index, (void *)cmd->scsi_io_request));
268 268
269 269 con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Phys Addr [%x]0x%x",
270 270 cmd->index, cmd->scsi_io_request_phys_addr));
271 271
272 272 con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr [%x]0x%p",
273 273 cmd->index, (void *)cmd->sense1));
274 274
275 275 con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr Phys [%x]0x%x",
276 276 cmd->index, cmd->sense_phys_addr1));
277 277
278 278 con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers [%x]0x%p",
279 279 cmd->index, (void *)cmd->sgl));
280 280
281 281 con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers phys [%x]0x%x",
282 282 cmd->index, cmd->sgl_phys_addr));
283 283 }
284 284
285 285 return (DDI_SUCCESS);
286 286
287 287 }
288 288
289 289
290 290 /*
291 291 * alloc_additional_dma_buffer for AEN
292 292 */
293 293 int
294 294 mrsas_tbolt_alloc_additional_dma_buffer(struct mrsas_instance *instance)
295 295 {
296 296 uint32_t internal_buf_size = PAGESIZE*2;
297 297 int i;
298 298
299 299 /* Initialize buffer status as free */
300 300 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
301 301 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
302 302 instance->ld_map_obj[0].status = DMA_OBJ_FREED;
303 303 instance->ld_map_obj[1].status = DMA_OBJ_FREED;
304 304
305 305
306 306 instance->mfi_internal_dma_obj.size = internal_buf_size;
307 307 instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
308 308 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
309 309 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
310 310 0xFFFFFFFFU;
311 311 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
312 312
313 313 if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
314 314 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
315 315 cmn_err(CE_WARN,
316 316 "mr_sas: could not alloc reply queue");
317 317 return (DDI_FAILURE);
318 318 }
319 319
320 320 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
321 321
322 322 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
323 323 instance->internal_buf =
324 324 (caddr_t)(((unsigned long)instance->mfi_internal_dma_obj.buffer));
325 325 instance->internal_buf_dmac_add =
326 326 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address;
327 327 instance->internal_buf_size = internal_buf_size;
328 328
329 329 /* allocate evt_detail */
330 330 instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
331 331 instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
332 332 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
333 333 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
334 334 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
335 335 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 8;
336 336
337 337 if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
338 338 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
339 339 cmn_err(CE_WARN, "mrsas_tbolt_alloc_additional_dma_buffer: "
340 340 "could not allocate data transfer buffer.");
341 341 goto fail_tbolt_additional_buff;
342 342 }
343 343
344 344 bzero(instance->mfi_evt_detail_obj.buffer,
345 345 sizeof (struct mrsas_evt_detail));
346 346
347 347 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
348 348
349 349 instance->size_map_info = sizeof (MR_FW_RAID_MAP) +
350 350 (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
351 351
352 352 for (i = 0; i < 2; i++) {
353 353 /* allocate the data transfer buffer */
354 354 instance->ld_map_obj[i].size = instance->size_map_info;
355 355 instance->ld_map_obj[i].dma_attr = mrsas_generic_dma_attr;
356 356 instance->ld_map_obj[i].dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
357 357 instance->ld_map_obj[i].dma_attr.dma_attr_count_max =
358 358 0xFFFFFFFFU;
359 359 instance->ld_map_obj[i].dma_attr.dma_attr_sgllen = 1;
360 360 instance->ld_map_obj[i].dma_attr.dma_attr_align = 1;
361 361
362 362 if (mrsas_alloc_dma_obj(instance, &instance->ld_map_obj[i],
363 363 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
364 364 cmn_err(CE_WARN,
365 365 "could not allocate data transfer buffer.");
366 366 goto fail_tbolt_additional_buff;
367 367 }
368 368
369 369 instance->ld_map_obj[i].status |= DMA_OBJ_ALLOCATED;
370 370
371 371 (void) memset(instance->ld_map_obj[i].buffer, 0,
372 372 instance->size_map_info);
373 373
374 374 instance->ld_map[i] =
375 375 (MR_FW_RAID_MAP_ALL *)instance->ld_map_obj[i].buffer;
376 376 instance->ld_map_phy[i] = (uint32_t)instance->
377 377 ld_map_obj[i].dma_cookie[0].dmac_address;
378 378
379 379 con_log(CL_DLEVEL3, (CE_NOTE,
380 380 "ld_map Addr Phys 0x%x", instance->ld_map_phy[i]));
381 381
382 382 con_log(CL_DLEVEL3, (CE_NOTE,
383 383 "size_map_info 0x%x", instance->size_map_info));
384 384 }
385 385
386 386 return (DDI_SUCCESS);
387 387
388 388 fail_tbolt_additional_buff:
389 389 mrsas_tbolt_free_additional_dma_buffer(instance);
390 390
391 391 return (DDI_FAILURE);
392 392 }
393 393
394 394 MRSAS_REQUEST_DESCRIPTOR_UNION *
395 395 mr_sas_get_request_descriptor(struct mrsas_instance *instance, uint16_t index)
396 396 {
397 397 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
398 398
399 399 if (index > instance->max_fw_cmds) {
400 400 con_log(CL_ANN1, (CE_NOTE,
401 401 "Invalid SMID 0x%x request for descriptor", index));
402 402 con_log(CL_ANN1, (CE_NOTE,
403 403 "max_fw_cmds : 0x%x\n", instance->max_fw_cmds));
404 404 return (NULL);
405 405 }
406 406
407 407 req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
408 408 ((char *)instance->request_message_pool +
409 409 (sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION) * index));
410 410
411 411 con_log(CL_ANN1, (CE_NOTE,
412 412 "request descriptor : 0x%08lx\n", (unsigned long)req_desc));
413 413
414 414 con_log(CL_ANN1, (CE_NOTE,
415 415 "request descriptor base phy : 0x%08lx\n",
416 416 (unsigned long)instance->request_message_pool_phy));
417 417
418 418 return ((MRSAS_REQUEST_DESCRIPTOR_UNION *)req_desc);
419 419 }
420 420
421 421
422 422 /*
423 423 * Allocate Request and Reply Queue Descriptors.
424 424 */
425 425 int
426 426 alloc_req_rep_desc(struct mrsas_instance *instance)
427 427 {
428 428 uint32_t request_q_sz, reply_q_sz;
429 429 int i, max_reply_q_sz;
430 430 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
431 431
432 432 /*
433 433 * ThunderBolt(TB) There's no longer producer consumer mechanism.
434 434 * Once we have an interrupt we are supposed to scan through the list of
435 435 * reply descriptors and process them accordingly. We would be needing
436 436 * to allocate memory for 1024 reply descriptors
437 437 */
438 438
439 439 /* Allocate Reply Descriptors */
440 440 con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x\n",
441 441 (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
442 442
443 443 /* reply queue size should be multiple of 16 */
444 444 max_reply_q_sz = ((instance->max_fw_cmds + 1 + 15)/16)*16;
445 445
446 446 reply_q_sz = 8 * max_reply_q_sz;
447 447
448 448
449 449 con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x\n",
450 450 (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
451 451
452 452 instance->reply_desc_dma_obj.size = reply_q_sz;
453 453 instance->reply_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
454 454 instance->reply_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
455 455 instance->reply_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
456 456 instance->reply_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
457 457 instance->reply_desc_dma_obj.dma_attr.dma_attr_align = 16;
458 458
459 459 if (mrsas_alloc_dma_obj(instance, &instance->reply_desc_dma_obj,
460 460 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
461 461 cmn_err(CE_WARN,
462 462 "mr_sas: could not alloc reply queue");
463 463 return (DDI_FAILURE);
464 464 }
465 465
466 466 bzero(instance->reply_desc_dma_obj.buffer, reply_q_sz);
467 467 instance->reply_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
468 468
469 469 /* virtual address of reply queue */
470 470 instance->reply_frame_pool = (MPI2_REPLY_DESCRIPTORS_UNION *)(
471 471 instance->reply_desc_dma_obj.buffer);
472 472
473 473 instance->reply_q_depth = max_reply_q_sz;
474 474
475 475 con_log(CL_ANN1, (CE_NOTE, "[reply queue depth]0x%x",
476 476 instance->reply_q_depth));
477 477
478 478 con_log(CL_ANN1, (CE_NOTE, "[reply queue virt addr]0x%p",
479 479 (void *)instance->reply_frame_pool));
480 480
481 481 /* initializing reply address to 0xFFFFFFFF */
482 482 reply_desc = instance->reply_frame_pool;
483 483
484 484 for (i = 0; i < instance->reply_q_depth; i++) {
485 485 reply_desc->Words = (uint64_t)~0;
486 486 reply_desc++;
487 487 }
488 488
489 489
490 490 instance->reply_frame_pool_phy =
491 491 (uint32_t)instance->reply_desc_dma_obj.dma_cookie[0].dmac_address;
492 492
493 493 con_log(CL_ANN1, (CE_NOTE,
494 494 "[reply queue phys addr]0x%x", instance->reply_frame_pool_phy));
495 495
496 496
497 497 instance->reply_pool_limit_phy = (instance->reply_frame_pool_phy +
498 498 reply_q_sz);
499 499
500 500 con_log(CL_ANN1, (CE_NOTE, "[reply pool limit phys addr]0x%x",
501 501 instance->reply_pool_limit_phy));
502 502
503 503
504 504 con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x\n",
505 505 (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
506 506
507 507 /* Allocate Request Descriptors */
508 508 con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x\n",
509 509 (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
510 510
511 511 request_q_sz = 8 *
512 512 (instance->max_fw_cmds);
513 513
514 514 instance->request_desc_dma_obj.size = request_q_sz;
515 515 instance->request_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
516 516 instance->request_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
517 517 instance->request_desc_dma_obj.dma_attr.dma_attr_count_max =
518 518 0xFFFFFFFFU;
519 519 instance->request_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
520 520 instance->request_desc_dma_obj.dma_attr.dma_attr_align = 16;
521 521
522 522 if (mrsas_alloc_dma_obj(instance, &instance->request_desc_dma_obj,
523 523 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
524 524 cmn_err(CE_WARN,
525 525 "mr_sas: could not alloc request queue desc");
526 526 goto fail_undo_reply_queue;
527 527 }
528 528
529 529 bzero(instance->request_desc_dma_obj.buffer, request_q_sz);
530 530 instance->request_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
531 531
532 532 /* virtual address of request queue desc */
533 533 instance->request_message_pool = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
534 534 (instance->request_desc_dma_obj.buffer);
535 535
536 536 instance->request_message_pool_phy =
537 537 (uint32_t)instance->request_desc_dma_obj.dma_cookie[0].dmac_address;
538 538
539 539 return (DDI_SUCCESS);
540 540
541 541 fail_undo_reply_queue:
542 542 if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
543 543 (void) mrsas_free_dma_obj(instance,
|
↓ open down ↓ |
543 lines elided |
↑ open up ↑ |
544 544 instance->reply_desc_dma_obj);
545 545 instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
546 546 }
547 547
548 548 return (DDI_FAILURE);
549 549 }
550 550
551 551 /*
552 552 * mrsas_alloc_cmd_pool_tbolt
553 553 *
554 - * TODO: merge tbolt-specific codee into mrsas_alloc_cmd_pool() to have single
554 + * TODO: merge tbolt-specific code into mrsas_alloc_cmd_pool() to have single
555 555 * routine
556 556 */
557 557 int
558 558 mrsas_alloc_cmd_pool_tbolt(struct mrsas_instance *instance)
559 559 {
560 560 int i;
561 561 int count;
562 562 uint32_t max_cmd;
563 563 uint32_t reserve_cmd;
564 564 size_t sz;
565 565
566 566 struct mrsas_cmd *cmd;
567 567
568 568 max_cmd = instance->max_fw_cmds;
569 569 con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
570 570 "max_cmd %x", max_cmd));
571 571
572 572
573 573 sz = sizeof (struct mrsas_cmd *) * max_cmd;
574 574
575 575 /*
576 576 * instance->cmd_list is an array of struct mrsas_cmd pointers.
577 577 * Allocate the dynamic array first and then allocate individual
578 578 * commands.
579 579 */
580 580 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
581 581 if (instance->cmd_list == NULL) {
582 582 con_log(CL_NONE, (CE_WARN,
583 583 "Failed to allocate memory for cmd_list"));
584 584 return (DDI_FAILURE);
585 585 }
586 586
587 587 /* create a frame pool and assign one frame to each cmd */
588 588 for (count = 0; count < max_cmd; count++) {
589 589 instance->cmd_list[count] =
590 590 kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP);
591 591 if (instance->cmd_list[count] == NULL) {
592 592 con_log(CL_NONE, (CE_WARN,
593 593 "Failed to allocate memory for mrsas_cmd"));
594 594 goto mrsas_undo_cmds;
595 595 }
596 596 }
597 597
598 598 /* add all the commands to command pool */
599 599
600 600 INIT_LIST_HEAD(&instance->cmd_pool_list);
601 601 INIT_LIST_HEAD(&instance->cmd_pend_list);
602 602 INIT_LIST_HEAD(&instance->cmd_app_pool_list);
603 603
604 604 reserve_cmd = MRSAS_APP_RESERVED_CMDS;
605 605
606 606 /* cmd index 0 reservered for IOC INIT */
607 607 for (i = 1; i < reserve_cmd; i++) {
608 608 cmd = instance->cmd_list[i];
609 609 cmd->index = i;
610 610 mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
611 611 }
612 612
613 613
614 614 for (i = reserve_cmd; i < max_cmd; i++) {
615 615 cmd = instance->cmd_list[i];
616 616 cmd->index = i;
617 617 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
618 618 }
619 619
620 620 return (DDI_SUCCESS);
621 621
622 622 mrsas_undo_cmds:
623 623 if (count > 0) {
624 624 /* free each cmd */
625 625 for (i = 0; i < count; i++) {
626 626 if (instance->cmd_list[i] != NULL) {
627 627 kmem_free(instance->cmd_list[i],
628 628 sizeof (struct mrsas_cmd));
629 629 }
630 630 instance->cmd_list[i] = NULL;
631 631 }
632 632 }
633 633
634 634 mrsas_undo_cmd_list:
635 635 if (instance->cmd_list != NULL)
636 636 kmem_free(instance->cmd_list, sz);
637 637 instance->cmd_list = NULL;
638 638
639 639 return (DDI_FAILURE);
640 640 }
641 641
642 642
643 643 /*
644 644 * free_space_for_mpi2
645 645 */
646 646 void
647 647 free_space_for_mpi2(struct mrsas_instance *instance)
648 648 {
649 649 /* already freed */
650 650 if (instance->cmd_list == NULL) {
651 651 return;
652 652 }
653 653
654 654 /* First free the additional DMA buffer */
655 655 mrsas_tbolt_free_additional_dma_buffer(instance);
656 656
657 657 /* Free the request/reply descriptor pool */
658 658 free_req_rep_desc_pool(instance);
659 659
660 660 /* Free the MPI message pool */
661 661 destroy_mpi2_frame_pool(instance);
662 662
663 663 /* Free the MFI frame pool */
664 664 destroy_mfi_frame_pool(instance);
665 665
666 666 /* Free all the commands in the cmd_list */
667 667 /* Free the cmd_list buffer itself */
668 668 mrsas_free_cmd_pool(instance);
669 669 }
670 670
671 671
672 672 /*
673 673 * ThunderBolt(TB) memory allocations for commands/messages/frames.
674 674 */
675 675 int
676 676 alloc_space_for_mpi2(struct mrsas_instance *instance)
677 677 {
678 678 /* Allocate command pool (memory for cmd_list & individual commands) */
679 679 if (mrsas_alloc_cmd_pool_tbolt(instance)) {
680 680 cmn_err(CE_WARN, "Error creating cmd pool");
681 681 return (DDI_FAILURE);
682 682 }
683 683
684 684 /* Initialize single reply size and Message size */
685 685 instance->reply_size = MRSAS_THUNDERBOLT_REPLY_SIZE;
686 686 instance->raid_io_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
687 687
688 688 instance->max_sge_in_main_msg = (MRSAS_THUNDERBOLT_MSG_SIZE -
689 689 (sizeof (MPI2_RAID_SCSI_IO_REQUEST) -
690 690 sizeof (MPI2_SGE_IO_UNION)))/ sizeof (MPI2_SGE_IO_UNION);
691 691 instance->max_sge_in_chain = (MR_COMMAND_SIZE -
692 692 MRSAS_THUNDERBOLT_MSG_SIZE) / sizeof (MPI2_SGE_IO_UNION);
693 693
694 694 /* Reduce SG count by 1 to take care of group cmds feature in FW */
695 695 instance->max_num_sge = (instance->max_sge_in_main_msg +
696 696 instance->max_sge_in_chain - 2);
697 697 instance->chain_offset_mpt_msg =
698 698 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16;
699 699 instance->chain_offset_io_req = (MRSAS_THUNDERBOLT_MSG_SIZE -
700 700 sizeof (MPI2_SGE_IO_UNION)) / 16;
701 701 instance->reply_read_index = 0;
702 702
703 703
704 704 /* Allocate Request and Reply descriptors Array */
705 705 /* Make sure the buffer is aligned to 8 for req/rep descriptor Pool */
706 706 if (alloc_req_rep_desc(instance)) {
707 707 cmn_err(CE_WARN,
708 708 "Error, allocating memory for descripter-pool");
709 709 goto mpi2_undo_cmd_pool;
710 710 }
711 711 con_log(CL_ANN1, (CE_NOTE, "[request message pool phys addr]0x%x",
712 712 instance->request_message_pool_phy));
713 713
714 714
715 715 /* Allocate MFI Frame pool - for MPI-MFI passthru commands */
716 716 if (create_mfi_frame_pool(instance)) {
717 717 cmn_err(CE_WARN,
718 718 "Error, allocating memory for MFI frame-pool");
719 719 goto mpi2_undo_descripter_pool;
720 720 }
721 721
722 722
723 723 /* Allocate MPI2 Message pool */
724 724 /*
725 725 * Make sure the buffer is alligned to 256 for raid message packet
726 726 * create a io request pool and assign one frame to each cmd
727 727 */
728 728
729 729 if (create_mpi2_frame_pool(instance)) {
730 730 cmn_err(CE_WARN,
731 731 "Error, allocating memory for MPI2 Message-pool");
732 732 goto mpi2_undo_mfi_frame_pool;
733 733 }
734 734
735 735 #ifdef DEBUG
736 736 con_log(CL_ANN1, (CE_CONT, "[max_sge_in_main_msg]0x%x",
737 737 instance->max_sge_in_main_msg));
738 738 con_log(CL_ANN1, (CE_CONT, "[max_sge_in_chain]0x%x",
739 739 instance->max_sge_in_chain));
740 740 con_log(CL_ANN1, (CE_CONT,
741 741 "[max_sge]0x%x", instance->max_num_sge));
742 742 con_log(CL_ANN1, (CE_CONT, "[chain_offset_mpt_msg]0x%x",
743 743 instance->chain_offset_mpt_msg));
744 744 con_log(CL_ANN1, (CE_CONT, "[chain_offset_io_req]0x%x",
745 745 instance->chain_offset_io_req));
746 746 #endif
747 747
748 748
749 749 /* Allocate additional dma buffer */
750 750 if (mrsas_tbolt_alloc_additional_dma_buffer(instance)) {
751 751 cmn_err(CE_WARN,
752 752 "Error, allocating tbolt additional DMA buffer");
753 753 goto mpi2_undo_message_pool;
754 754 }
755 755
756 756 return (DDI_SUCCESS);
757 757
758 758 mpi2_undo_message_pool:
759 759 destroy_mpi2_frame_pool(instance);
760 760
761 761 mpi2_undo_mfi_frame_pool:
762 762 destroy_mfi_frame_pool(instance);
763 763
764 764 mpi2_undo_descripter_pool:
765 765 free_req_rep_desc_pool(instance);
766 766
767 767 mpi2_undo_cmd_pool:
768 768 mrsas_free_cmd_pool(instance);
769 769
770 770 return (DDI_FAILURE);
771 771 }
772 772
773 773
774 774 /*
775 775 * mrsas_init_adapter_tbolt - Initialize fusion interface adapter.
776 776 */
777 777 int
778 778 mrsas_init_adapter_tbolt(struct mrsas_instance *instance)
779 779 {
780 780
781 781 /*
782 782 * Reduce the max supported cmds by 1. This is to ensure that the
783 783 * reply_q_sz (1 more than the max cmd that driver may send)
784 784 * does not exceed max cmds that the FW can support
785 785 */
786 786
787 787 if (instance->max_fw_cmds > 1008) {
788 788 instance->max_fw_cmds = 1008;
789 789 instance->max_fw_cmds = instance->max_fw_cmds-1;
790 790 }
791 791
792 792 con_log(CL_ANN, (CE_NOTE, "mrsas_init_adapter_tbolt: "
793 793 " instance->max_fw_cmds 0x%X.", instance->max_fw_cmds));
794 794
795 795
796 796 /* create a pool of commands */
797 797 if (alloc_space_for_mpi2(instance) != DDI_SUCCESS) {
798 798 cmn_err(CE_WARN,
799 799 " alloc_space_for_mpi2() failed.");
800 800
801 801 return (DDI_FAILURE);
802 802 }
803 803
804 804 /* Send ioc init message */
805 805 /* NOTE: the issue_init call does FMA checking already. */
806 806 if (mrsas_issue_init_mpi2(instance) != DDI_SUCCESS) {
807 807 cmn_err(CE_WARN,
808 808 " mrsas_issue_init_mpi2() failed.");
809 809
810 810 goto fail_init_fusion;
811 811 }
812 812
813 813 instance->unroll.alloc_space_mpi2 = 1;
814 814
815 815 con_log(CL_ANN, (CE_NOTE,
816 816 "mrsas_init_adapter_tbolt: SUCCESSFULL\n"));
817 817
818 818 return (DDI_SUCCESS);
819 819
820 820 fail_init_fusion:
821 821 free_space_for_mpi2(instance);
822 822
823 823 return (DDI_FAILURE);
824 824 }
825 825
826 826
827 827
828 828 /*
829 829 * init_mpi2
830 830 */
831 831 int
832 832 mrsas_issue_init_mpi2(struct mrsas_instance *instance)
833 833 {
834 834 dma_obj_t init2_dma_obj;
835 835 int ret_val = DDI_SUCCESS;
836 836
837 837 /* allocate DMA buffer for IOC INIT message */
838 838 init2_dma_obj.size = sizeof (Mpi2IOCInitRequest_t);
839 839 init2_dma_obj.dma_attr = mrsas_generic_dma_attr;
840 840 init2_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
841 841 init2_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
842 842 init2_dma_obj.dma_attr.dma_attr_sgllen = 1;
843 843 init2_dma_obj.dma_attr.dma_attr_align = 256;
844 844
845 845 if (mrsas_alloc_dma_obj(instance, &init2_dma_obj,
846 846 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
847 847 cmn_err(CE_WARN, "mr_sas_issue_init_mpi2 "
848 848 "could not allocate data transfer buffer.");
849 849 return (DDI_FAILURE);
850 850 }
851 851 (void) memset(init2_dma_obj.buffer, 2,
852 852 sizeof (Mpi2IOCInitRequest_t));
853 853
854 854 con_log(CL_ANN1, (CE_NOTE,
855 855 "mrsas_issue_init_mpi2 _phys adr: %x \n",
856 856 init2_dma_obj.dma_cookie[0].dmac_address));
857 857
858 858
859 859 /* Initialize and send ioc init message */
860 860 ret_val = mrsas_tbolt_ioc_init(instance, &init2_dma_obj);
861 861 if (ret_val == DDI_FAILURE) {
862 862 con_log(CL_ANN1, (CE_WARN,
863 863 "mrsas_issue_init_mpi2: Failed\n"));
864 864 goto fail_init_mpi2;
865 865 }
866 866
867 867 /* free IOC init DMA buffer */
868 868 if (mrsas_free_dma_obj(instance, init2_dma_obj)
869 869 != DDI_SUCCESS) {
870 870 con_log(CL_ANN1, (CE_WARN,
871 871 "mrsas_issue_init_mpi2: Free Failed\n"));
872 872 return (DDI_FAILURE);
873 873 }
874 874
875 875 /* Get/Check and sync ld_map info */
876 876 instance->map_id = 0;
877 877 if (mrsas_tbolt_check_map_info(instance) == DDI_SUCCESS)
878 878 (void) mrsas_tbolt_sync_map_info(instance);
879 879
880 880
881 881 /* No mrsas_cmd to send, so send NULL. */
882 882 if (mrsas_common_check(instance, NULL) != DDI_SUCCESS)
883 883 goto fail_init_mpi2;
884 884
885 885 con_log(CL_ANN, (CE_NOTE,
886 886 "mrsas_issue_init_mpi2: SUCCESSFULL\n"));
887 887
888 888 return (DDI_SUCCESS);
889 889
890 890 fail_init_mpi2:
891 891 (void) mrsas_free_dma_obj(instance, init2_dma_obj);
892 892
893 893 return (DDI_FAILURE);
894 894 }
895 895
896 896 static int
897 897 mrsas_tbolt_ioc_init(struct mrsas_instance *instance, dma_obj_t *mpi2_dma_obj)
898 898 {
899 899 int numbytes;
900 900 uint16_t flags;
901 901 struct mrsas_init_frame2 *mfiFrameInit2;
902 902 struct mrsas_header *frame_hdr;
903 903 Mpi2IOCInitRequest_t *init;
904 904 struct mrsas_cmd *cmd = NULL;
905 905 struct mrsas_drv_ver drv_ver_info;
906 906 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
907 907
908 908 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
909 909
910 910
911 911 #ifdef DEBUG
912 912 con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
913 913 (int)sizeof (*mfiFrameInit2)));
914 914 con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n", (int)sizeof (*init)));
915 915 con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
916 916 (int)sizeof (struct mrsas_init_frame2)));
917 917 con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n",
918 918 (int)sizeof (Mpi2IOCInitRequest_t)));
919 919 #endif
920 920
921 921 init = (Mpi2IOCInitRequest_t *)mpi2_dma_obj->buffer;
922 922 numbytes = sizeof (*init);
923 923 bzero(init, numbytes);
924 924
925 925 ddi_put8(mpi2_dma_obj->acc_handle, &init->Function,
926 926 MPI2_FUNCTION_IOC_INIT);
927 927
928 928 ddi_put8(mpi2_dma_obj->acc_handle, &init->WhoInit,
929 929 MPI2_WHOINIT_HOST_DRIVER);
930 930
931 931 /* set MsgVersion and HeaderVersion host driver was built with */
932 932 ddi_put16(mpi2_dma_obj->acc_handle, &init->MsgVersion,
933 933 MPI2_VERSION);
934 934
935 935 ddi_put16(mpi2_dma_obj->acc_handle, &init->HeaderVersion,
936 936 MPI2_HEADER_VERSION);
937 937
938 938 ddi_put16(mpi2_dma_obj->acc_handle, &init->SystemRequestFrameSize,
939 939 instance->raid_io_msg_size / 4);
940 940
941 941 ddi_put16(mpi2_dma_obj->acc_handle, &init->ReplyFreeQueueDepth,
942 942 0);
943 943
944 944 ddi_put16(mpi2_dma_obj->acc_handle,
945 945 &init->ReplyDescriptorPostQueueDepth,
946 946 instance->reply_q_depth);
947 947 /*
948 948 * These addresses are set using the DMA cookie addresses from when the
949 949 * memory was allocated. Sense buffer hi address should be 0.
950 950 * ddi_put32(accessp, &init->SenseBufferAddressHigh, 0);
951 951 */
952 952
953 953 ddi_put32(mpi2_dma_obj->acc_handle,
954 954 &init->SenseBufferAddressHigh, 0);
955 955
956 956 ddi_put64(mpi2_dma_obj->acc_handle,
957 957 (uint64_t *)&init->SystemRequestFrameBaseAddress,
958 958 instance->io_request_frames_phy);
959 959
960 960 ddi_put64(mpi2_dma_obj->acc_handle,
961 961 &init->ReplyDescriptorPostQueueAddress,
962 962 instance->reply_frame_pool_phy);
963 963
964 964 ddi_put64(mpi2_dma_obj->acc_handle,
965 965 &init->ReplyFreeQueueAddress, 0);
966 966
967 967 cmd = instance->cmd_list[0];
968 968 if (cmd == NULL) {
969 969 return (DDI_FAILURE);
970 970 }
971 971 cmd->retry_count_for_ocr = 0;
972 972 cmd->pkt = NULL;
973 973 cmd->drv_pkt_time = 0;
974 974
975 975 mfiFrameInit2 = (struct mrsas_init_frame2 *)cmd->scsi_io_request;
976 976 con_log(CL_ANN1, (CE_CONT, "[mfi vaddr]%p", (void *)mfiFrameInit2));
977 977
978 978 frame_hdr = &cmd->frame->hdr;
979 979
980 980 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
981 981 MFI_CMD_STATUS_POLL_MODE);
982 982
983 983 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
984 984
985 985 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
986 986
987 987 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
988 988
989 989 con_log(CL_ANN, (CE_CONT,
990 990 "mrsas_tbolt_ioc_init: SMID:%x\n", cmd->SMID));
991 991
992 992 /* Init the MFI Header */
993 993 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
994 994 &mfiFrameInit2->cmd, MFI_CMD_OP_INIT);
995 995
996 996 con_log(CL_ANN1, (CE_CONT, "[CMD]%x", mfiFrameInit2->cmd));
997 997
998 998 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
999 999 &mfiFrameInit2->cmd_status,
1000 1000 MFI_STAT_INVALID_STATUS);
1001 1001
1002 1002 con_log(CL_ANN1, (CE_CONT, "[Status]%x", mfiFrameInit2->cmd_status));
1003 1003
1004 1004 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1005 1005 &mfiFrameInit2->queue_info_new_phys_addr_lo,
1006 1006 mpi2_dma_obj->dma_cookie[0].dmac_address);
1007 1007
1008 1008 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1009 1009 &mfiFrameInit2->data_xfer_len,
1010 1010 sizeof (Mpi2IOCInitRequest_t));
1011 1011
1012 1012 con_log(CL_ANN1, (CE_CONT, "[reply q desc addr]%x",
1013 1013 (int)init->ReplyDescriptorPostQueueAddress));
1014 1014
1015 1015 /* fill driver version information */
1016 1016 fill_up_drv_ver(&drv_ver_info);
1017 1017
1018 1018 /* allocate the driver version data transfer buffer */
1019 1019 instance->drv_ver_dma_obj.size = sizeof (drv_ver_info.drv_ver);
1020 1020 instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
1021 1021 instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1022 1022 instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1023 1023 instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
1024 1024 instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
1025 1025
1026 1026 if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
1027 1027 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
1028 1028 cmn_err(CE_WARN,
1029 1029 "fusion init: Could not allocate driver version buffer.");
1030 1030 return (DDI_FAILURE);
1031 1031 }
1032 1032 /* copy driver version to dma buffer */
1033 1033 (void) memset(instance->drv_ver_dma_obj.buffer, 0,
1034 1034 sizeof (drv_ver_info.drv_ver));
1035 1035 ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
1036 1036 (uint8_t *)drv_ver_info.drv_ver,
1037 1037 (uint8_t *)instance->drv_ver_dma_obj.buffer,
1038 1038 sizeof (drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
1039 1039
1040 1040 /* send driver version physical address to firmware */
1041 1041 ddi_put64(cmd->frame_dma_obj.acc_handle, &mfiFrameInit2->driverversion,
1042 1042 instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
1043 1043
1044 1044 con_log(CL_ANN1, (CE_CONT, "[MPIINIT2 frame Phys addr ]0x%x len = %x",
1045 1045 mfiFrameInit2->queue_info_new_phys_addr_lo,
1046 1046 (int)sizeof (Mpi2IOCInitRequest_t)));
1047 1047
1048 1048 con_log(CL_ANN1, (CE_CONT, "[Length]%x", mfiFrameInit2->data_xfer_len));
1049 1049
1050 1050 con_log(CL_ANN1, (CE_CONT, "[MFI frame Phys Address]%x len = %x",
1051 1051 cmd->scsi_io_request_phys_addr,
1052 1052 (int)sizeof (struct mrsas_init_frame2)));
1053 1053
1054 1054 /* disable interrupts before sending INIT2 frame */
1055 1055 instance->func_ptr->disable_intr(instance);
1056 1056
1057 1057 req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
1058 1058 instance->request_message_pool;
1059 1059 req_desc->Words = cmd->scsi_io_request_phys_addr;
1060 1060 req_desc->MFAIo.RequestFlags =
1061 1061 (MPI2_REQ_DESCRIPT_FLAGS_MFA << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1062 1062
1063 1063 cmd->request_desc = req_desc;
1064 1064
1065 1065 /* issue the init frame */
1066 1066 instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd);
1067 1067
1068 1068 con_log(CL_ANN1, (CE_CONT, "[cmd = %d] ", frame_hdr->cmd));
1069 1069 con_log(CL_ANN1, (CE_CONT, "[cmd Status= %x] ",
1070 1070 frame_hdr->cmd_status));
1071 1071
1072 1072 if (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1073 1073 &mfiFrameInit2->cmd_status) == 0) {
1074 1074 con_log(CL_ANN, (CE_NOTE, "INIT2 Success"));
1075 1075 } else {
1076 1076 con_log(CL_ANN, (CE_WARN, "INIT2 Fail"));
1077 1077 mrsas_dump_reply_desc(instance);
1078 1078 goto fail_ioc_init;
1079 1079 }
1080 1080
1081 1081 mrsas_dump_reply_desc(instance);
1082 1082
1083 1083 instance->unroll.verBuff = 1;
1084 1084
1085 1085 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_ioc_init: SUCCESSFULL\n"));
1086 1086
1087 1087 return (DDI_SUCCESS);
1088 1088
1089 1089
1090 1090 fail_ioc_init:
1091 1091
1092 1092 (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1093 1093
1094 1094 return (DDI_FAILURE);
1095 1095 }
1096 1096
1097 1097 int
1098 1098 wait_for_outstanding_poll_io(struct mrsas_instance *instance)
1099 1099 {
1100 1100 int i;
1101 1101 uint32_t wait_time = dump_io_wait_time;
1102 1102 for (i = 0; i < wait_time; i++) {
1103 1103 /*
1104 1104 * Check For Outstanding poll Commands
1105 1105 * except ldsync command and aen command
1106 1106 */
1107 1107 if (instance->fw_outstanding <= 2) {
1108 1108 break;
1109 1109 }
1110 1110 drv_usecwait(10*MILLISEC);
1111 1111 /* complete commands from reply queue */
1112 1112 (void) mr_sas_tbolt_process_outstanding_cmd(instance);
1113 1113 }
1114 1114 if (instance->fw_outstanding > 2) {
1115 1115 return (1);
1116 1116 }
1117 1117 return (0);
1118 1118 }
1119 1119 /*
1120 1120 * scsi_pkt handling
1121 1121 *
1122 1122 * Visible to the external world via the transport structure.
1123 1123 */
1124 1124
1125 1125 int
1126 1126 mrsas_tbolt_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1127 1127 {
1128 1128 struct mrsas_instance *instance = ADDR2MR(ap);
1129 1129 struct scsa_cmd *acmd = PKT2CMD(pkt);
1130 1130 struct mrsas_cmd *cmd = NULL;
1131 1131 uchar_t cmd_done = 0;
1132 1132
1133 1133 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1134 1134 if (instance->deadadapter == 1) {
1135 1135 cmn_err(CE_WARN,
1136 1136 "mrsas_tran_start:TBOLT return TRAN_FATAL_ERROR "
1137 1137 "for IO, as the HBA doesnt take any more IOs");
1138 1138 if (pkt) {
1139 1139 pkt->pkt_reason = CMD_DEV_GONE;
1140 1140 pkt->pkt_statistics = STAT_DISCON;
1141 1141 }
1142 1142 return (TRAN_FATAL_ERROR);
1143 1143 }
1144 1144 if (instance->adapterresetinprogress) {
1145 1145 con_log(CL_ANN, (CE_NOTE, "Reset flag set, "
1146 1146 "returning mfi_pkt and setting TRAN_BUSY\n"));
1147 1147 return (TRAN_BUSY);
1148 1148 }
1149 1149 (void) mrsas_tbolt_prepare_pkt(acmd);
1150 1150
1151 1151 cmd = mrsas_tbolt_build_cmd(instance, ap, pkt, &cmd_done);
1152 1152
1153 1153 /*
1154 1154 * Check if the command is already completed by the mrsas_build_cmd()
1155 1155 * routine. In which case the busy_flag would be clear and scb will be
1156 1156 * NULL and appropriate reason provided in pkt_reason field
1157 1157 */
1158 1158 if (cmd_done) {
1159 1159 pkt->pkt_reason = CMD_CMPLT;
1160 1160 pkt->pkt_scbp[0] = STATUS_GOOD;
1161 1161 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1162 1162 | STATE_SENT_CMD;
1163 1163 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1164 1164 (*pkt->pkt_comp)(pkt);
1165 1165 }
1166 1166
1167 1167 return (TRAN_ACCEPT);
1168 1168 }
1169 1169
1170 1170 if (cmd == NULL) {
1171 1171 return (TRAN_BUSY);
1172 1172 }
1173 1173
1174 1174
1175 1175 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1176 1176 if (instance->fw_outstanding > instance->max_fw_cmds) {
1177 1177 cmn_err(CE_WARN,
1178 1178 "Command Queue Full... Returning BUSY \n");
1179 1179 return_raid_msg_pkt(instance, cmd);
1180 1180 return (TRAN_BUSY);
1181 1181 }
1182 1182
1183 1183 /* Synchronize the Cmd frame for the controller */
1184 1184 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1185 1185 DDI_DMA_SYNC_FORDEV);
1186 1186
1187 1187 con_log(CL_ANN, (CE_CONT, "tbolt_issue_cmd: SCSI CDB[0]=0x%x "
1188 1188 "cmd->index:0x%x SMID 0x%x\n", pkt->pkt_cdbp[0],
1189 1189 cmd->index, cmd->SMID));
1190 1190
1191 1191 instance->func_ptr->issue_cmd(cmd, instance);
1192 1192 } else {
1193 1193 instance->func_ptr->issue_cmd(cmd, instance);
1194 1194 (void) wait_for_outstanding_poll_io(instance);
1195 1195 (void) mrsas_common_check(instance, cmd);
1196 1196 }
1197 1197
1198 1198 return (TRAN_ACCEPT);
1199 1199 }
1200 1200
1201 1201 /*
1202 1202 * prepare the pkt:
1203 1203 * the pkt may have been resubmitted or just reused so
1204 1204 * initialize some fields and do some checks.
1205 1205 */
1206 1206 static int
1207 1207 mrsas_tbolt_prepare_pkt(struct scsa_cmd *acmd)
1208 1208 {
1209 1209 struct scsi_pkt *pkt = CMD2PKT(acmd);
1210 1210
1211 1211
1212 1212 /*
1213 1213 * Reinitialize some fields that need it; the packet may
1214 1214 * have been resubmitted
1215 1215 */
1216 1216 pkt->pkt_reason = CMD_CMPLT;
1217 1217 pkt->pkt_state = 0;
1218 1218 pkt->pkt_statistics = 0;
1219 1219 pkt->pkt_resid = 0;
1220 1220
1221 1221 /*
1222 1222 * zero status byte.
1223 1223 */
1224 1224 *(pkt->pkt_scbp) = 0;
1225 1225
1226 1226 return (0);
1227 1227 }
1228 1228
1229 1229
1230 1230 int
1231 1231 mr_sas_tbolt_build_sgl(struct mrsas_instance *instance,
1232 1232 struct scsa_cmd *acmd,
1233 1233 struct mrsas_cmd *cmd,
1234 1234 Mpi2RaidSCSIIORequest_t *scsi_raid_io,
1235 1235 uint32_t *datalen)
1236 1236 {
1237 1237 uint32_t MaxSGEs;
1238 1238 int sg_to_process;
1239 1239 uint32_t i, j;
1240 1240 uint32_t numElements, endElement;
1241 1241 Mpi25IeeeSgeChain64_t *ieeeChainElement = NULL;
1242 1242 Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee = NULL;
1243 1243 ddi_acc_handle_t acc_handle =
1244 1244 instance->mpi2_frame_pool_dma_obj.acc_handle;
1245 1245
1246 1246 con_log(CL_ANN1, (CE_NOTE,
1247 1247 "chkpnt: Building Chained SGL :%d", __LINE__));
1248 1248
1249 1249 /* Calulate SGE size in number of Words(32bit) */
1250 1250 /* Clear the datalen before updating it. */
1251 1251 *datalen = 0;
1252 1252
1253 1253 MaxSGEs = instance->max_sge_in_main_msg;
1254 1254
1255 1255 ddi_put16(acc_handle, &scsi_raid_io->SGLFlags,
1256 1256 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
1257 1257
1258 1258 /* set data transfer flag. */
1259 1259 if (acmd->cmd_flags & CFLAG_DMASEND) {
1260 1260 ddi_put32(acc_handle, &scsi_raid_io->Control,
1261 1261 MPI2_SCSIIO_CONTROL_WRITE);
1262 1262 } else {
1263 1263 ddi_put32(acc_handle, &scsi_raid_io->Control,
1264 1264 MPI2_SCSIIO_CONTROL_READ);
1265 1265 }
1266 1266
1267 1267
1268 1268 numElements = acmd->cmd_cookiecnt;
1269 1269
1270 1270 con_log(CL_DLEVEL1, (CE_NOTE, "[SGE Count]:%x", numElements));
1271 1271
1272 1272 if (numElements > instance->max_num_sge) {
1273 1273 con_log(CL_ANN, (CE_NOTE,
1274 1274 "[Max SGE Count Exceeded]:%x", numElements));
1275 1275 return (numElements);
1276 1276 }
1277 1277
1278 1278 ddi_put8(acc_handle, &scsi_raid_io->RaidContext.numSGE,
1279 1279 (uint8_t)numElements);
1280 1280
1281 1281 /* set end element in main message frame */
1282 1282 endElement = (numElements <= MaxSGEs) ? numElements : (MaxSGEs - 1);
1283 1283
1284 1284 /* prepare the scatter-gather list for the firmware */
1285 1285 scsi_raid_io_sgl_ieee =
1286 1286 (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
1287 1287
1288 1288 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1289 1289 Mpi25IeeeSgeChain64_t *sgl_ptr_end = scsi_raid_io_sgl_ieee;
1290 1290 sgl_ptr_end += instance->max_sge_in_main_msg - 1;
1291 1291
1292 1292 ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
1293 1293 }
1294 1294
1295 1295 for (i = 0; i < endElement; i++, scsi_raid_io_sgl_ieee++) {
1296 1296 ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1297 1297 acmd->cmd_dmacookies[i].dmac_laddress);
1298 1298
1299 1299 ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1300 1300 acmd->cmd_dmacookies[i].dmac_size);
1301 1301
1302 1302 ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1303 1303
1304 1304 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1305 1305 if (i == (numElements - 1)) {
1306 1306 ddi_put8(acc_handle,
1307 1307 &scsi_raid_io_sgl_ieee->Flags,
1308 1308 IEEE_SGE_FLAGS_END_OF_LIST);
1309 1309 }
1310 1310 }
1311 1311
1312 1312 *datalen += acmd->cmd_dmacookies[i].dmac_size;
1313 1313
1314 1314 #ifdef DEBUG
1315 1315 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Address]: %" PRIx64,
1316 1316 scsi_raid_io_sgl_ieee->Address));
1317 1317 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Length]:%x",
1318 1318 scsi_raid_io_sgl_ieee->Length));
1319 1319 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Flags]:%x",
1320 1320 scsi_raid_io_sgl_ieee->Flags));
1321 1321 #endif
1322 1322
1323 1323 }
1324 1324
1325 1325 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset, 0);
1326 1326
1327 1327 /* check if chained SGL required */
1328 1328 if (i < numElements) {
1329 1329
1330 1330 con_log(CL_ANN1, (CE_NOTE, "[Chain Element index]:%x", i));
1331 1331
1332 1332 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1333 1333 uint16_t ioFlags =
1334 1334 ddi_get16(acc_handle, &scsi_raid_io->IoFlags);
1335 1335
1336 1336 if ((ioFlags &
1337 1337 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
1338 1338 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) {
1339 1339 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1340 1340 (U8)instance->chain_offset_io_req);
1341 1341 } else {
1342 1342 ddi_put8(acc_handle,
1343 1343 &scsi_raid_io->ChainOffset, 0);
1344 1344 }
1345 1345 } else {
1346 1346 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1347 1347 (U8)instance->chain_offset_io_req);
1348 1348 }
1349 1349
1350 1350 /* prepare physical chain element */
1351 1351 ieeeChainElement = scsi_raid_io_sgl_ieee;
1352 1352
1353 1353 ddi_put8(acc_handle, &ieeeChainElement->NextChainOffset, 0);
1354 1354
1355 1355 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1356 1356 ddi_put8(acc_handle, &ieeeChainElement->Flags,
1357 1357 IEEE_SGE_FLAGS_CHAIN_ELEMENT);
1358 1358 } else {
1359 1359 ddi_put8(acc_handle, &ieeeChainElement->Flags,
1360 1360 (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1361 1361 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
1362 1362 }
1363 1363
1364 1364 ddi_put32(acc_handle, &ieeeChainElement->Length,
1365 1365 (sizeof (MPI2_SGE_IO_UNION) * (numElements - i)));
1366 1366
1367 1367 ddi_put64(acc_handle, &ieeeChainElement->Address,
1368 1368 (U64)cmd->sgl_phys_addr);
1369 1369
1370 1370 sg_to_process = numElements - i;
1371 1371
1372 1372 con_log(CL_ANN1, (CE_NOTE,
1373 1373 "[Additional SGE Count]:%x", endElement));
1374 1374
1375 1375 /* point to the chained SGL buffer */
1376 1376 scsi_raid_io_sgl_ieee = (Mpi25IeeeSgeChain64_t *)cmd->sgl;
1377 1377
1378 1378 /* build rest of the SGL in chained buffer */
1379 1379 for (j = 0; j < sg_to_process; j++, scsi_raid_io_sgl_ieee++) {
1380 1380 con_log(CL_DLEVEL3, (CE_NOTE, "[remaining SGL]:%x", i));
1381 1381
1382 1382 ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1383 1383 acmd->cmd_dmacookies[i].dmac_laddress);
1384 1384
1385 1385 ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1386 1386 acmd->cmd_dmacookies[i].dmac_size);
1387 1387
1388 1388 ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1389 1389
1390 1390 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1391 1391 if (i == (numElements - 1)) {
1392 1392 ddi_put8(acc_handle,
1393 1393 &scsi_raid_io_sgl_ieee->Flags,
1394 1394 IEEE_SGE_FLAGS_END_OF_LIST);
1395 1395 }
1396 1396 }
1397 1397
1398 1398 *datalen += acmd->cmd_dmacookies[i].dmac_size;
1399 1399
1400 1400 #if DEBUG
1401 1401 con_log(CL_DLEVEL1, (CE_NOTE,
1402 1402 "[SGL Address]: %" PRIx64,
1403 1403 scsi_raid_io_sgl_ieee->Address));
1404 1404 con_log(CL_DLEVEL1, (CE_NOTE,
1405 1405 "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
1406 1406 con_log(CL_DLEVEL1, (CE_NOTE,
1407 1407 "[SGL Flags]:%x", scsi_raid_io_sgl_ieee->Flags));
1408 1408 #endif
1409 1409
1410 1410 i++;
1411 1411 }
1412 1412 }
1413 1413
1414 1414 return (0);
1415 1415 } /*end of BuildScatterGather */
1416 1416
1417 1417
1418 1418 /*
1419 1419 * build_cmd
1420 1420 */
1421 1421 static struct mrsas_cmd *
1422 1422 mrsas_tbolt_build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
1423 1423 struct scsi_pkt *pkt, uchar_t *cmd_done)
1424 1424 {
1425 1425 uint8_t fp_possible = 0;
1426 1426 uint32_t index;
1427 1427 uint32_t lba_count = 0;
1428 1428 uint32_t start_lba_hi = 0;
1429 1429 uint32_t start_lba_lo = 0;
1430 1430 ddi_acc_handle_t acc_handle =
1431 1431 instance->mpi2_frame_pool_dma_obj.acc_handle;
1432 1432 struct mrsas_cmd *cmd = NULL;
1433 1433 struct scsa_cmd *acmd = PKT2CMD(pkt);
1434 1434 MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
1435 1435 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
1436 1436 uint32_t datalen;
1437 1437 struct IO_REQUEST_INFO io_info;
1438 1438 MR_FW_RAID_MAP_ALL *local_map_ptr;
1439 1439 uint16_t pd_cmd_cdblen;
1440 1440
1441 1441 con_log(CL_DLEVEL1, (CE_NOTE,
1442 1442 "chkpnt: Entered mrsas_tbolt_build_cmd:%d", __LINE__));
1443 1443
1444 1444 /* find out if this is logical or physical drive command. */
1445 1445 acmd->islogical = MRDRV_IS_LOGICAL(ap);
1446 1446 acmd->device_id = MAP_DEVICE_ID(instance, ap);
1447 1447
1448 1448 *cmd_done = 0;
1449 1449
1450 1450 /* get the command packet */
1451 1451 if (!(cmd = get_raid_msg_pkt(instance))) {
1452 1452 return (NULL);
1453 1453 }
1454 1454
1455 1455 index = cmd->index;
1456 1456 ReqDescUnion = mr_sas_get_request_descriptor(instance, index);
1457 1457 ReqDescUnion->Words = 0;
1458 1458 ReqDescUnion->SCSIIO.SMID = cmd->SMID;
1459 1459 ReqDescUnion->SCSIIO.RequestFlags =
1460 1460 (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1461 1461 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1462 1462
1463 1463
1464 1464 cmd->request_desc = ReqDescUnion;
1465 1465 cmd->pkt = pkt;
1466 1466 cmd->cmd = acmd;
1467 1467
1468 1468 /* lets get the command directions */
1469 1469 if (acmd->cmd_flags & CFLAG_DMASEND) {
1470 1470 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1471 1471 (void) ddi_dma_sync(acmd->cmd_dmahandle,
1472 1472 acmd->cmd_dma_offset, acmd->cmd_dma_len,
1473 1473 DDI_DMA_SYNC_FORDEV);
1474 1474 }
1475 1475 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
1476 1476 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1477 1477 (void) ddi_dma_sync(acmd->cmd_dmahandle,
1478 1478 acmd->cmd_dma_offset, acmd->cmd_dma_len,
1479 1479 DDI_DMA_SYNC_FORCPU);
1480 1480 }
1481 1481 } else {
1482 1482 con_log(CL_ANN, (CE_NOTE, "NO DMA\n"));
1483 1483 }
1484 1484
1485 1485
1486 1486 /* get SCSI_IO raid message frame pointer */
1487 1487 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
1488 1488
1489 1489 /* zero out SCSI_IO raid message frame */
1490 1490 (void) memset(scsi_raid_io, 0, sizeof (Mpi2RaidSCSIIORequest_t));
1491 1491
1492 1492 /* Set the ldTargetId set by BuildRaidContext() */
1493 1493 ddi_put16(acc_handle, &scsi_raid_io->RaidContext.ldTargetId,
1494 1494 acmd->device_id);
1495 1495
1496 1496 /* Copy CDB to scsi_io_request message frame */
1497 1497 ddi_rep_put8(acc_handle,
1498 1498 (uint8_t *)pkt->pkt_cdbp, (uint8_t *)scsi_raid_io->CDB.CDB32,
1499 1499 acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
1500 1500
1501 1501 /*
1502 1502 * Just the CDB length, rest of the Flags are zero
1503 1503 * This will be modified later.
1504 1504 */
1505 1505 ddi_put16(acc_handle, &scsi_raid_io->IoFlags, acmd->cmd_cdblen);
1506 1506
1507 1507 pd_cmd_cdblen = acmd->cmd_cdblen;
1508 1508
1509 1509 switch (pkt->pkt_cdbp[0]) {
1510 1510 case SCMD_READ:
1511 1511 case SCMD_WRITE:
1512 1512 case SCMD_READ_G1:
1513 1513 case SCMD_WRITE_G1:
1514 1514 case SCMD_READ_G4:
1515 1515 case SCMD_WRITE_G4:
1516 1516 case SCMD_READ_G5:
1517 1517 case SCMD_WRITE_G5:
1518 1518
1519 1519 if (acmd->islogical) {
1520 1520 /* Initialize sense Information */
1521 1521 if (cmd->sense1 == NULL) {
1522 1522 con_log(CL_ANN, (CE_NOTE, "tbolt_build_cmd: "
1523 1523 "Sense buffer ptr NULL \n"));
1524 1524 }
1525 1525 bzero(cmd->sense1, SENSE_LENGTH);
1526 1526 con_log(CL_DLEVEL2, (CE_NOTE, "tbolt_build_cmd "
1527 1527 "CDB[0] = %x\n", pkt->pkt_cdbp[0]));
1528 1528
1529 1529 if (acmd->cmd_cdblen == CDB_GROUP0) {
1530 1530 /* 6-byte cdb */
1531 1531 lba_count = (uint16_t)(pkt->pkt_cdbp[4]);
1532 1532 start_lba_lo = ((uint32_t)(pkt->pkt_cdbp[3]) |
1533 1533 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
1534 1534 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
1535 1535 << 16));
1536 1536 } else if (acmd->cmd_cdblen == CDB_GROUP1) {
1537 1537 /* 10-byte cdb */
1538 1538 lba_count =
1539 1539 (((uint16_t)(pkt->pkt_cdbp[8])) |
1540 1540 ((uint16_t)(pkt->pkt_cdbp[7]) << 8));
1541 1541
1542 1542 start_lba_lo =
1543 1543 (((uint32_t)(pkt->pkt_cdbp[5])) |
1544 1544 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1545 1545 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1546 1546 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1547 1547
1548 1548 } else if (acmd->cmd_cdblen == CDB_GROUP5) {
1549 1549 /* 12-byte cdb */
1550 1550 lba_count = (
1551 1551 ((uint32_t)(pkt->pkt_cdbp[9])) |
1552 1552 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1553 1553 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1554 1554 ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1555 1555
1556 1556 start_lba_lo =
1557 1557 (((uint32_t)(pkt->pkt_cdbp[5])) |
1558 1558 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1559 1559 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1560 1560 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1561 1561
1562 1562 } else if (acmd->cmd_cdblen == CDB_GROUP4) {
1563 1563 /* 16-byte cdb */
1564 1564 lba_count = (
1565 1565 ((uint32_t)(pkt->pkt_cdbp[13])) |
1566 1566 ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
1567 1567 ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
1568 1568 ((uint32_t)(pkt->pkt_cdbp[10]) << 24));
1569 1569
1570 1570 start_lba_lo = (
1571 1571 ((uint32_t)(pkt->pkt_cdbp[9])) |
1572 1572 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1573 1573 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1574 1574 ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1575 1575
1576 1576 start_lba_hi = (
1577 1577 ((uint32_t)(pkt->pkt_cdbp[5])) |
1578 1578 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1579 1579 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1580 1580 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1581 1581 }
1582 1582
1583 1583 if (instance->tbolt &&
1584 1584 ((lba_count * 512) > mrsas_tbolt_max_cap_maxxfer)) {
1585 1585 cmn_err(CE_WARN, " IO SECTOR COUNT exceeds "
1586 1586 "controller limit 0x%x sectors\n",
1587 1587 lba_count);
1588 1588 }
1589 1589
1590 1590 (void) memset(&io_info, 0,
1591 1591 sizeof (struct IO_REQUEST_INFO));
1592 1592 io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) |
1593 1593 start_lba_lo;
1594 1594 io_info.numBlocks = lba_count;
1595 1595 io_info.ldTgtId = acmd->device_id;
1596 1596
1597 1597 if (acmd->cmd_flags & CFLAG_DMASEND)
1598 1598 io_info.isRead = 0;
1599 1599 else
1600 1600 io_info.isRead = 1;
1601 1601
1602 1602
1603 1603 /* Acquire SYNC MAP UPDATE lock */
1604 1604 mutex_enter(&instance->sync_map_mtx);
1605 1605
1606 1606 local_map_ptr =
1607 1607 instance->ld_map[(instance->map_id & 1)];
1608 1608
1609 1609 if ((MR_TargetIdToLdGet(
1610 1610 acmd->device_id, local_map_ptr) >=
1611 1611 MAX_LOGICAL_DRIVES) || !instance->fast_path_io) {
1612 1612 cmn_err(CE_NOTE, "Fast Path NOT Possible, "
1613 1613 "targetId >= MAX_LOGICAL_DRIVES || "
1614 1614 "!instance->fast_path_io\n");
1615 1615 fp_possible = 0;
1616 1616 /* Set Regionlock flags to BYPASS */
1617 1617 /* io_request->RaidContext.regLockFlags = 0; */
1618 1618 ddi_put8(acc_handle,
1619 1619 &scsi_raid_io->RaidContext.regLockFlags, 0);
1620 1620 } else {
1621 1621 if (MR_BuildRaidContext(instance, &io_info,
1622 1622 &scsi_raid_io->RaidContext, local_map_ptr))
1623 1623 fp_possible = io_info.fpOkForIo;
1624 1624 }
1625 1625
1626 1626 if (!enable_fp)
1627 1627 fp_possible = 0;
1628 1628
1629 1629 con_log(CL_ANN1, (CE_NOTE, "enable_fp %d "
1630 1630 "instance->fast_path_io %d fp_possible %d \n",
1631 1631 enable_fp, instance->fast_path_io, fp_possible));
1632 1632
1633 1633 if (fp_possible) {
1634 1634
1635 1635 /* Check for DIF enabled LD */
1636 1636 if (MR_CheckDIF(acmd->device_id, local_map_ptr)) {
1637 1637 /* Prepare 32 Byte CDB for DIF capable Disk */
1638 1638 mrsas_tbolt_prepare_cdb(instance,
1639 1639 scsi_raid_io->CDB.CDB32,
1640 1640 &io_info, scsi_raid_io, start_lba_lo);
1641 1641 } else {
1642 1642 mrsas_tbolt_set_pd_lba(scsi_raid_io->CDB.CDB32,
1643 1643 (uint8_t *)&pd_cmd_cdblen,
1644 1644 io_info.pdBlock, io_info.numBlocks);
1645 1645 ddi_put16(acc_handle,
1646 1646 &scsi_raid_io->IoFlags, pd_cmd_cdblen);
1647 1647 }
1648 1648
1649 1649 ddi_put8(acc_handle, &scsi_raid_io->Function,
1650 1650 MPI2_FUNCTION_SCSI_IO_REQUEST);
1651 1651
1652 1652 ReqDescUnion->SCSIIO.RequestFlags =
1653 1653 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1654 1654 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1655 1655
1656 1656 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1657 1657 uint8_t regLockFlags = ddi_get8(acc_handle,
1658 1658 &scsi_raid_io->RaidContext.regLockFlags);
1659 1659 uint16_t IoFlags = ddi_get16(acc_handle,
1660 1660 &scsi_raid_io->IoFlags);
1661 1661
1662 1662 if (regLockFlags == REGION_TYPE_UNUSED)
1663 1663 ReqDescUnion->SCSIIO.RequestFlags =
1664 1664 (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1665 1665 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1666 1666
1667 1667 IoFlags |=
1668 1668 MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1669 1669 regLockFlags |=
1670 1670 (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
1671 1671 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1672 1672
1673 1673 ddi_put8(acc_handle,
1674 1674 &scsi_raid_io->ChainOffset, 0);
1675 1675 ddi_put8(acc_handle,
1676 1676 &scsi_raid_io->RaidContext.nsegType,
1677 1677 ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1678 1678 MPI2_TYPE_CUDA));
1679 1679 ddi_put8(acc_handle,
1680 1680 &scsi_raid_io->RaidContext.regLockFlags,
1681 1681 regLockFlags);
1682 1682 ddi_put16(acc_handle,
1683 1683 &scsi_raid_io->IoFlags, IoFlags);
1684 1684 }
1685 1685
1686 1686 if ((instance->load_balance_info[
1687 1687 acmd->device_id].loadBalanceFlag) &&
1688 1688 (io_info.isRead)) {
1689 1689 io_info.devHandle =
1690 1690 get_updated_dev_handle(&instance->
1691 1691 load_balance_info[acmd->device_id],
1692 1692 &io_info);
1693 1693 cmd->load_balance_flag |=
1694 1694 MEGASAS_LOAD_BALANCE_FLAG;
1695 1695 } else {
1696 1696 cmd->load_balance_flag &=
1697 1697 ~MEGASAS_LOAD_BALANCE_FLAG;
1698 1698 }
1699 1699
1700 1700 ReqDescUnion->SCSIIO.DevHandle = io_info.devHandle;
1701 1701 ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1702 1702 io_info.devHandle);
1703 1703
1704 1704 } else {
1705 1705 ddi_put8(acc_handle, &scsi_raid_io->Function,
1706 1706 MPI2_FUNCTION_LD_IO_REQUEST);
1707 1707
1708 1708 ddi_put16(acc_handle,
1709 1709 &scsi_raid_io->DevHandle, acmd->device_id);
1710 1710
1711 1711 ReqDescUnion->SCSIIO.RequestFlags =
1712 1712 (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1713 1713 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1714 1714
1715 1715 ddi_put16(acc_handle,
1716 1716 &scsi_raid_io->RaidContext.timeoutValue,
1717 1717 local_map_ptr->raidMap.fpPdIoTimeoutSec);
1718 1718
1719 1719 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1720 1720 uint8_t regLockFlags = ddi_get8(acc_handle,
1721 1721 &scsi_raid_io->RaidContext.regLockFlags);
1722 1722
1723 1723 if (regLockFlags == REGION_TYPE_UNUSED) {
1724 1724 ReqDescUnion->SCSIIO.RequestFlags =
1725 1725 (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1726 1726 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1727 1727 }
1728 1728
1729 1729 regLockFlags |=
1730 1730 (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
1731 1731 MR_RL_FLAGS_SEQ_NUM_ENABLE);
1732 1732
1733 1733 ddi_put8(acc_handle,
1734 1734 &scsi_raid_io->RaidContext.nsegType,
1735 1735 ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1736 1736 MPI2_TYPE_CUDA));
1737 1737 ddi_put8(acc_handle,
1738 1738 &scsi_raid_io->RaidContext.regLockFlags,
1739 1739 regLockFlags);
1740 1740 }
1741 1741 } /* Not FP */
1742 1742
1743 1743 /* Release SYNC MAP UPDATE lock */
1744 1744 mutex_exit(&instance->sync_map_mtx);
1745 1745
1746 1746
1747 1747 /*
1748 1748 * Set sense buffer physical address/length in scsi_io_request.
1749 1749 */
1750 1750 ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
1751 1751 cmd->sense_phys_addr1);
1752 1752 ddi_put8(acc_handle, &scsi_raid_io->SenseBufferLength,
1753 1753 SENSE_LENGTH);
1754 1754
1755 1755 /* Construct SGL */
1756 1756 ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
1757 1757 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1758 1758
1759 1759 (void) mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1760 1760 scsi_raid_io, &datalen);
1761 1761
1762 1762 ddi_put32(acc_handle, &scsi_raid_io->DataLength, datalen);
1763 1763
1764 1764 break;
1765 1765 #ifndef PDSUPPORT /* if PDSUPPORT, skip break and fall through */
1766 1766 } else {
1767 1767 break;
1768 1768 #endif
1769 1769 }
1770 1770 /* fall through For all non-rd/wr cmds */
1771 1771 default:
1772 1772 switch (pkt->pkt_cdbp[0]) {
1773 1773 case 0x35: { /* SCMD_SYNCHRONIZE_CACHE */
1774 1774 return_raid_msg_pkt(instance, cmd);
1775 1775 *cmd_done = 1;
1776 1776 return (NULL);
1777 1777 }
1778 1778
1779 1779 case SCMD_MODE_SENSE:
1780 1780 case SCMD_MODE_SENSE_G1: {
1781 1781 union scsi_cdb *cdbp;
1782 1782 uint16_t page_code;
1783 1783
1784 1784 cdbp = (void *)pkt->pkt_cdbp;
1785 1785 page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
1786 1786 switch (page_code) {
1787 1787 case 0x3:
1788 1788 case 0x4:
1789 1789 (void) mrsas_mode_sense_build(pkt);
1790 1790 return_raid_msg_pkt(instance, cmd);
1791 1791 *cmd_done = 1;
1792 1792 return (NULL);
1793 1793 }
1794 1794 break;
1795 1795 }
1796 1796
1797 1797 default: {
1798 1798 /*
1799 1799 * Here we need to handle PASSTHRU for
1800 1800 * Logical Devices. Like Inquiry etc.
1801 1801 */
1802 1802
1803 1803 if (!(acmd->islogical)) {
1804 1804
1805 1805 /* Acquire SYNC MAP UPDATE lock */
1806 1806 mutex_enter(&instance->sync_map_mtx);
1807 1807
1808 1808 local_map_ptr =
1809 1809 instance->ld_map[(instance->map_id & 1)];
1810 1810
1811 1811 ddi_put8(acc_handle, &scsi_raid_io->Function,
1812 1812 MPI2_FUNCTION_SCSI_IO_REQUEST);
1813 1813
1814 1814 ReqDescUnion->SCSIIO.RequestFlags =
1815 1815 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1816 1816 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1817 1817
1818 1818 ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1819 1819 local_map_ptr->raidMap.
1820 1820 devHndlInfo[acmd->device_id].curDevHdl);
1821 1821
1822 1822
1823 1823 /* Set regLockFlasgs to REGION_TYPE_BYPASS */
1824 1824 ddi_put8(acc_handle,
1825 1825 &scsi_raid_io->RaidContext.regLockFlags, 0);
1826 1826 ddi_put64(acc_handle,
1827 1827 &scsi_raid_io->RaidContext.regLockRowLBA,
1828 1828 0);
1829 1829 ddi_put32(acc_handle,
1830 1830 &scsi_raid_io->RaidContext.regLockLength,
1831 1831 0);
1832 1832 ddi_put8(acc_handle,
1833 1833 &scsi_raid_io->RaidContext.RAIDFlags,
1834 1834 MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD <<
1835 1835 MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
1836 1836 ddi_put16(acc_handle,
1837 1837 &scsi_raid_io->RaidContext.timeoutValue,
1838 1838 local_map_ptr->raidMap.fpPdIoTimeoutSec);
1839 1839 ddi_put16(acc_handle,
1840 1840 &scsi_raid_io->RaidContext.ldTargetId,
1841 1841 acmd->device_id);
1842 1842 ddi_put8(acc_handle,
1843 1843 &scsi_raid_io->LUN[1], acmd->lun);
1844 1844
1845 1845 /* Release SYNC MAP UPDATE lock */
1846 1846 mutex_exit(&instance->sync_map_mtx);
1847 1847
1848 1848 } else {
1849 1849 ddi_put8(acc_handle, &scsi_raid_io->Function,
1850 1850 MPI2_FUNCTION_LD_IO_REQUEST);
1851 1851 ddi_put8(acc_handle,
1852 1852 &scsi_raid_io->LUN[1], acmd->lun);
1853 1853 ddi_put16(acc_handle,
1854 1854 &scsi_raid_io->DevHandle, acmd->device_id);
1855 1855 ReqDescUnion->SCSIIO.RequestFlags =
1856 1856 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1857 1857 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1858 1858 }
1859 1859
1860 1860 /*
1861 1861 * Set sense buffer physical address/length in
1862 1862 * scsi_io_request.
1863 1863 */
1864 1864 ddi_put32(acc_handle,
1865 1865 &scsi_raid_io->SenseBufferLowAddress,
1866 1866 cmd->sense_phys_addr1);
1867 1867 ddi_put8(acc_handle,
1868 1868 &scsi_raid_io->SenseBufferLength, SENSE_LENGTH);
1869 1869
1870 1870 /* Construct SGL */
1871 1871 ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
1872 1872 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1873 1873
1874 1874 (void) mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1875 1875 scsi_raid_io, &datalen);
1876 1876
1877 1877 ddi_put32(acc_handle,
1878 1878 &scsi_raid_io->DataLength, datalen);
1879 1879
1880 1880
1881 1881 con_log(CL_ANN, (CE_CONT,
1882 1882 "tbolt_build_cmd CDB[0] =%x, TargetID =%x\n",
1883 1883 pkt->pkt_cdbp[0], acmd->device_id));
1884 1884 con_log(CL_DLEVEL1, (CE_CONT,
1885 1885 "data length = %x\n",
1886 1886 scsi_raid_io->DataLength));
1887 1887 con_log(CL_DLEVEL1, (CE_CONT,
1888 1888 "cdb length = %x\n",
1889 1889 acmd->cmd_cdblen));
1890 1890 }
1891 1891 break;
1892 1892 }
1893 1893
1894 1894 }
1895 1895
1896 1896 return (cmd);
1897 1897 }
1898 1898
1899 1899 /*
1900 1900 * mrsas_tbolt_tran_init_pkt - allocate & initialize a scsi_pkt structure
1901 1901 * @ap:
1902 1902 * @pkt:
1903 1903 * @bp:
1904 1904 * @cmdlen:
1905 1905 * @statuslen:
1906 1906 * @tgtlen:
1907 1907 * @flags:
1908 1908 * @callback:
1909 1909 *
1910 1910 * The tran_init_pkt() entry point allocates and initializes a scsi_pkt
1911 1911 * structure and DMA resources for a target driver request. The
1912 1912 * tran_init_pkt() entry point is called when the target driver calls the
1913 1913 * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point
1914 1914 * is a request to perform one or more of three possible services:
1915 1915 * - allocation and initialization of a scsi_pkt structure
1916 1916 * - allocation of DMA resources for data transfer
1917 1917 * - reallocation of DMA resources for the next portion of the data transfer
1918 1918 */
1919 1919 struct scsi_pkt *
1920 1920 mrsas_tbolt_tran_init_pkt(struct scsi_address *ap,
1921 1921 register struct scsi_pkt *pkt,
1922 1922 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1923 1923 int flags, int (*callback)(), caddr_t arg)
1924 1924 {
1925 1925 struct scsa_cmd *acmd;
1926 1926 struct mrsas_instance *instance;
1927 1927 struct scsi_pkt *new_pkt;
1928 1928
1929 1929 instance = ADDR2MR(ap);
1930 1930
1931 1931 /* step #1 : pkt allocation */
1932 1932 if (pkt == NULL) {
1933 1933 pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen,
1934 1934 tgtlen, sizeof (struct scsa_cmd), callback, arg);
1935 1935 if (pkt == NULL) {
1936 1936 return (NULL);
1937 1937 }
1938 1938
1939 1939 acmd = PKT2CMD(pkt);
1940 1940
1941 1941 /*
1942 1942 * Initialize the new pkt - we redundantly initialize
1943 1943 * all the fields for illustrative purposes.
1944 1944 */
1945 1945 acmd->cmd_pkt = pkt;
1946 1946 acmd->cmd_flags = 0;
1947 1947 acmd->cmd_scblen = statuslen;
1948 1948 acmd->cmd_cdblen = cmdlen;
1949 1949 acmd->cmd_dmahandle = NULL;
1950 1950 acmd->cmd_ncookies = 0;
1951 1951 acmd->cmd_cookie = 0;
1952 1952 acmd->cmd_cookiecnt = 0;
1953 1953 acmd->cmd_nwin = 0;
1954 1954
1955 1955 pkt->pkt_address = *ap;
1956 1956 pkt->pkt_comp = (void (*)())NULL;
1957 1957 pkt->pkt_flags = 0;
1958 1958 pkt->pkt_time = 0;
1959 1959 pkt->pkt_resid = 0;
1960 1960 pkt->pkt_state = 0;
1961 1961 pkt->pkt_statistics = 0;
1962 1962 pkt->pkt_reason = 0;
1963 1963 new_pkt = pkt;
1964 1964 } else {
1965 1965 acmd = PKT2CMD(pkt);
1966 1966 new_pkt = NULL;
1967 1967 }
1968 1968
1969 1969 /* step #2 : dma allocation/move */
1970 1970 if (bp && bp->b_bcount != 0) {
1971 1971 if (acmd->cmd_dmahandle == NULL) {
1972 1972 if (mrsas_dma_alloc(instance, pkt, bp, flags,
1973 1973 callback) == DDI_FAILURE) {
1974 1974 if (new_pkt) {
1975 1975 scsi_hba_pkt_free(ap, new_pkt);
1976 1976 }
1977 1977 return ((struct scsi_pkt *)NULL);
1978 1978 }
1979 1979 } else {
1980 1980 if (mrsas_dma_move(instance, pkt, bp) == DDI_FAILURE) {
1981 1981 return ((struct scsi_pkt *)NULL);
1982 1982 }
1983 1983 }
1984 1984 }
1985 1985 return (pkt);
1986 1986 }
1987 1987
1988 1988
1989 1989 uint32_t
1990 1990 tbolt_read_fw_status_reg(struct mrsas_instance *instance)
1991 1991 {
1992 1992 return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
1993 1993 }
1994 1994
1995 1995 void
1996 1996 tbolt_issue_cmd(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
1997 1997 {
1998 1998 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1999 1999 atomic_add_16(&instance->fw_outstanding, 1);
2000 2000
2001 2001 struct scsi_pkt *pkt;
2002 2002
2003 2003 con_log(CL_ANN1,
2004 2004 (CE_NOTE, "tbolt_issue_cmd: cmd->[SMID]=0x%X", cmd->SMID));
2005 2005
2006 2006 con_log(CL_DLEVEL1, (CE_CONT,
2007 2007 " [req desc Words] %" PRIx64 " \n", req_desc->Words));
2008 2008 con_log(CL_DLEVEL1, (CE_CONT,
2009 2009 " [req desc low part] %x \n",
2010 2010 (uint_t)(req_desc->Words & 0xffffffffff)));
2011 2011 con_log(CL_DLEVEL1, (CE_CONT,
2012 2012 " [req desc high part] %x \n", (uint_t)(req_desc->Words >> 32)));
2013 2013 pkt = cmd->pkt;
2014 2014
2015 2015 if (pkt) {
2016 2016 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
2017 2017 "ISSUED CMD TO FW : called : cmd:"
2018 2018 ": %p instance : %p pkt : %p pkt_time : %x\n",
2019 2019 gethrtime(), (void *)cmd, (void *)instance,
2020 2020 (void *)pkt, cmd->drv_pkt_time));
2021 2021 if (instance->adapterresetinprogress) {
2022 2022 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2023 2023 con_log(CL_ANN, (CE_NOTE,
2024 2024 "TBOLT Reset the scsi_pkt timer"));
2025 2025 } else {
2026 2026 push_pending_mfi_pkt(instance, cmd);
2027 2027 }
2028 2028
2029 2029 } else {
2030 2030 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
2031 2031 "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
2032 2032 "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
2033 2033 }
2034 2034
2035 2035 /* Issue the command to the FW */
2036 2036 mutex_enter(&instance->reg_write_mtx);
2037 2037 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2038 2038 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2039 2039 mutex_exit(&instance->reg_write_mtx);
2040 2040 }
2041 2041
2042 2042 /*
2043 2043 * issue_cmd_in_sync_mode
2044 2044 */
2045 2045 int
2046 2046 tbolt_issue_cmd_in_sync_mode(struct mrsas_instance *instance,
2047 2047 struct mrsas_cmd *cmd)
2048 2048 {
2049 2049 int i;
2050 2050 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
2051 2051 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
2052 2052
2053 2053 struct mrsas_header *hdr;
2054 2054 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2055 2055
2056 2056 con_log(CL_ANN,
2057 2057 (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: cmd->[SMID]=0x%X",
2058 2058 cmd->SMID));
2059 2059
2060 2060
2061 2061 if (instance->adapterresetinprogress) {
2062 2062 cmd->drv_pkt_time = ddi_get16
2063 2063 (cmd->frame_dma_obj.acc_handle, &hdr->timeout);
2064 2064 if (cmd->drv_pkt_time < debug_timeout_g)
2065 2065 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2066 2066 con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode:"
2067 2067 "RESET-IN-PROGRESS, issue cmd & return.\n"));
2068 2068
2069 2069 mutex_enter(&instance->reg_write_mtx);
2070 2070 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2071 2071 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2072 2072 mutex_exit(&instance->reg_write_mtx);
2073 2073
2074 2074 return (DDI_SUCCESS);
2075 2075 } else {
2076 2076 con_log(CL_ANN1, (CE_NOTE,
2077 2077 "tbolt_issue_cmd_in_sync_mode: pushing the pkt\n"));
2078 2078 push_pending_mfi_pkt(instance, cmd);
2079 2079 }
2080 2080
2081 2081 con_log(CL_DLEVEL2, (CE_NOTE,
2082 2082 "HighQport offset :%p",
2083 2083 (void *)((uintptr_t)(instance)->regmap + IB_HIGH_QPORT)));
2084 2084 con_log(CL_DLEVEL2, (CE_NOTE,
2085 2085 "LowQport offset :%p",
2086 2086 (void *)((uintptr_t)(instance)->regmap + IB_LOW_QPORT)));
2087 2087
2088 2088 cmd->sync_cmd = MRSAS_TRUE;
2089 2089 cmd->cmd_status = ENODATA;
2090 2090
2091 2091
2092 2092 mutex_enter(&instance->reg_write_mtx);
2093 2093 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2094 2094 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2095 2095 mutex_exit(&instance->reg_write_mtx);
2096 2096
2097 2097 con_log(CL_ANN1, (CE_NOTE,
2098 2098 " req desc high part %x \n", (uint_t)(req_desc->Words >> 32)));
2099 2099 con_log(CL_ANN1, (CE_NOTE, " req desc low part %x \n",
2100 2100 (uint_t)(req_desc->Words & 0xffffffff)));
2101 2101
2102 2102 mutex_enter(&instance->int_cmd_mtx);
2103 2103 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
2104 2104 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
2105 2105 }
2106 2106 mutex_exit(&instance->int_cmd_mtx);
2107 2107
2108 2108
2109 2109 if (i < (msecs -1)) {
2110 2110 return (DDI_SUCCESS);
2111 2111 } else {
2112 2112 return (DDI_FAILURE);
2113 2113 }
2114 2114 }
2115 2115
2116 2116 /*
2117 2117 * issue_cmd_in_poll_mode
2118 2118 */
2119 2119 int
2120 2120 tbolt_issue_cmd_in_poll_mode(struct mrsas_instance *instance,
2121 2121 struct mrsas_cmd *cmd)
2122 2122 {
2123 2123 int i;
2124 2124 uint16_t flags;
2125 2125 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
2126 2126 struct mrsas_header *frame_hdr;
2127 2127
2128 2128 con_log(CL_ANN,
2129 2129 (CE_NOTE, "tbolt_issue_cmd_in_poll_mode: cmd->[SMID]=0x%X",
2130 2130 cmd->SMID));
2131 2131
2132 2132 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
2133 2133
2134 2134 frame_hdr = (struct mrsas_header *)&cmd->frame->hdr;
2135 2135 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
2136 2136 MFI_CMD_STATUS_POLL_MODE);
2137 2137 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
2138 2138 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2139 2139 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
2140 2140
2141 2141 con_log(CL_ANN1, (CE_NOTE, " req desc low part %x \n",
2142 2142 (uint_t)(req_desc->Words & 0xffffffff)));
2143 2143 con_log(CL_ANN1, (CE_NOTE,
2144 2144 " req desc high part %x \n", (uint_t)(req_desc->Words >> 32)));
2145 2145
2146 2146 /* issue the frame using inbound queue port */
2147 2147 mutex_enter(&instance->reg_write_mtx);
2148 2148 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2149 2149 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2150 2150 mutex_exit(&instance->reg_write_mtx);
2151 2151
2152 2152 for (i = 0; i < msecs && (
2153 2153 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
2154 2154 == MFI_CMD_STATUS_POLL_MODE); i++) {
2155 2155 /* wait for cmd_status to change from 0xFF */
2156 2156 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
2157 2157 }
2158 2158
2159 2159 if (ddi_get8(cmd->frame_dma_obj.acc_handle,
2160 2160 &frame_hdr->cmd_status) == MFI_CMD_STATUS_POLL_MODE) {
2161 2161 con_log(CL_ANN1, (CE_NOTE,
2162 2162 " cmd failed %" PRIx64 " \n", (req_desc->Words)));
2163 2163 return (DDI_FAILURE);
2164 2164 }
2165 2165
2166 2166 return (DDI_SUCCESS);
2167 2167 }
2168 2168
2169 2169 void
2170 2170 tbolt_enable_intr(struct mrsas_instance *instance)
2171 2171 {
2172 2172 /* TODO: For Thunderbolt/Invader also clear intr on enable */
2173 2173 /* writel(~0, ®s->outbound_intr_status); */
2174 2174 /* readl(®s->outbound_intr_status); */
2175 2175
2176 2176 WR_OB_INTR_MASK(~(MFI_FUSION_ENABLE_INTERRUPT_MASK), instance);
2177 2177
2178 2178 /* dummy read to force PCI flush */
2179 2179 (void) RD_OB_INTR_MASK(instance);
2180 2180
2181 2181 }
2182 2182
2183 2183 void
2184 2184 tbolt_disable_intr(struct mrsas_instance *instance)
2185 2185 {
2186 2186 uint32_t mask = 0xFFFFFFFF;
2187 2187
2188 2188 WR_OB_INTR_MASK(mask, instance);
2189 2189
2190 2190 /* Dummy readl to force pci flush */
2191 2191
2192 2192 (void) RD_OB_INTR_MASK(instance);
2193 2193 }
2194 2194
2195 2195
2196 2196 int
2197 2197 tbolt_intr_ack(struct mrsas_instance *instance)
2198 2198 {
2199 2199 uint32_t status;
2200 2200
2201 2201 /* check if it is our interrupt */
2202 2202 status = RD_OB_INTR_STATUS(instance);
2203 2203 con_log(CL_ANN1, (CE_NOTE,
2204 2204 "chkpnt: Entered tbolt_intr_ack status = %d \n", status));
2205 2205
2206 2206 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2207 2207 return (DDI_INTR_UNCLAIMED);
2208 2208 }
2209 2209
2210 2210 if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
2211 2211 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2212 2212 return (DDI_INTR_UNCLAIMED);
2213 2213 }
2214 2214
2215 2215 if ((status & 1) || (status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2216 2216 /* clear the interrupt by writing back the same value */
2217 2217 WR_OB_INTR_STATUS(status, instance);
2218 2218 /* dummy READ */
2219 2219 (void) RD_OB_INTR_STATUS(instance);
2220 2220 }
2221 2221 return (DDI_INTR_CLAIMED);
2222 2222 }
2223 2223
2224 2224 /*
2225 2225 * get_raid_msg_pkt : Get a command from the free pool
2226 2226 * After successful allocation, the caller of this routine
2227 2227 * must clear the frame buffer (memset to zero) before
2228 2228 * using the packet further.
2229 2229 *
2230 2230 * ***** Note *****
2231 2231 * After clearing the frame buffer the context id of the
2232 2232 * frame buffer SHOULD be restored back.
2233 2233 */
2234 2234
2235 2235 struct mrsas_cmd *
2236 2236 get_raid_msg_pkt(struct mrsas_instance *instance)
2237 2237 {
2238 2238 mlist_t *head = &instance->cmd_pool_list;
2239 2239 struct mrsas_cmd *cmd = NULL;
2240 2240
2241 2241 mutex_enter(&instance->cmd_pool_mtx);
2242 2242 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2243 2243
2244 2244
2245 2245 if (!mlist_empty(head)) {
2246 2246 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2247 2247 mlist_del_init(head->next);
2248 2248 }
2249 2249 if (cmd != NULL) {
2250 2250 cmd->pkt = NULL;
2251 2251 cmd->retry_count_for_ocr = 0;
2252 2252 cmd->drv_pkt_time = 0;
2253 2253 }
2254 2254 mutex_exit(&instance->cmd_pool_mtx);
2255 2255
2256 2256 if (cmd != NULL)
2257 2257 bzero(cmd->scsi_io_request,
2258 2258 sizeof (Mpi2RaidSCSIIORequest_t));
2259 2259 return (cmd);
2260 2260 }
2261 2261
2262 2262 struct mrsas_cmd *
2263 2263 get_raid_msg_mfi_pkt(struct mrsas_instance *instance)
2264 2264 {
2265 2265 mlist_t *head = &instance->cmd_app_pool_list;
2266 2266 struct mrsas_cmd *cmd = NULL;
2267 2267
2268 2268 mutex_enter(&instance->cmd_app_pool_mtx);
2269 2269 ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2270 2270
2271 2271 if (!mlist_empty(head)) {
2272 2272 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2273 2273 mlist_del_init(head->next);
2274 2274 }
2275 2275 if (cmd != NULL) {
2276 2276 cmd->retry_count_for_ocr = 0;
2277 2277 cmd->drv_pkt_time = 0;
2278 2278 cmd->pkt = NULL;
2279 2279 cmd->request_desc = NULL;
2280 2280
2281 2281 }
2282 2282
2283 2283 mutex_exit(&instance->cmd_app_pool_mtx);
2284 2284
2285 2285 if (cmd != NULL) {
2286 2286 bzero(cmd->scsi_io_request,
2287 2287 sizeof (Mpi2RaidSCSIIORequest_t));
2288 2288 }
2289 2289
2290 2290 return (cmd);
2291 2291 }
2292 2292
2293 2293 /*
2294 2294 * return_raid_msg_pkt : Return a cmd to free command pool
2295 2295 */
2296 2296 void
2297 2297 return_raid_msg_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2298 2298 {
2299 2299 mutex_enter(&instance->cmd_pool_mtx);
2300 2300 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2301 2301
2302 2302
2303 2303 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2304 2304
2305 2305 mutex_exit(&instance->cmd_pool_mtx);
2306 2306 }
2307 2307
2308 2308 void
2309 2309 return_raid_msg_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2310 2310 {
2311 2311 mutex_enter(&instance->cmd_app_pool_mtx);
2312 2312 ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2313 2313
2314 2314 mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
2315 2315
2316 2316 mutex_exit(&instance->cmd_app_pool_mtx);
2317 2317 }
2318 2318
2319 2319
2320 2320 void
2321 2321 mr_sas_tbolt_build_mfi_cmd(struct mrsas_instance *instance,
2322 2322 struct mrsas_cmd *cmd)
2323 2323 {
2324 2324 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2325 2325 Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee;
2326 2326 MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
2327 2327 uint32_t index;
2328 2328 ddi_acc_handle_t acc_handle =
2329 2329 instance->mpi2_frame_pool_dma_obj.acc_handle;
2330 2330
2331 2331 if (!instance->tbolt) {
2332 2332 con_log(CL_ANN, (CE_NOTE, "Not MFA enabled.\n"));
2333 2333 return;
2334 2334 }
2335 2335
2336 2336 index = cmd->index;
2337 2337
2338 2338 ReqDescUnion = mr_sas_get_request_descriptor(instance, index);
2339 2339
2340 2340 if (!ReqDescUnion) {
2341 2341 con_log(CL_ANN1, (CE_NOTE, "[NULL REQDESC]"));
2342 2342 return;
2343 2343 }
2344 2344
2345 2345 con_log(CL_ANN1, (CE_NOTE, "[SMID]%x", cmd->SMID));
2346 2346
2347 2347 ReqDescUnion->Words = 0;
2348 2348
2349 2349 ReqDescUnion->SCSIIO.RequestFlags =
2350 2350 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2351 2351 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2352 2352
2353 2353 ReqDescUnion->SCSIIO.SMID = cmd->SMID;
2354 2354
2355 2355 cmd->request_desc = ReqDescUnion;
2356 2356
2357 2357 /* get raid message frame pointer */
2358 2358 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2359 2359
2360 2360 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
2361 2361 Mpi25IeeeSgeChain64_t *sgl_ptr_end = (Mpi25IeeeSgeChain64_t *)
2362 2362 &scsi_raid_io->SGL.IeeeChain;
2363 2363 sgl_ptr_end += instance->max_sge_in_main_msg - 1;
2364 2364 ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
2365 2365 }
2366 2366
2367 2367 ddi_put8(acc_handle, &scsi_raid_io->Function,
2368 2368 MPI2_FUNCTION_PASSTHRU_IO_REQUEST);
2369 2369
2370 2370 ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
2371 2371 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
2372 2372
2373 2373 ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
2374 2374 (U8)offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16);
2375 2375
2376 2376 ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
2377 2377 cmd->sense_phys_addr1);
2378 2378
2379 2379
2380 2380 scsi_raid_io_sgl_ieee =
2381 2381 (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
2382 2382
2383 2383 ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
2384 2384 (U64)cmd->frame_phys_addr);
2385 2385
2386 2386 ddi_put8(acc_handle,
2387 2387 &scsi_raid_io_sgl_ieee->Flags, (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2388 2388 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
2389 2389 /* LSI put hardcoded 1024 instead of MEGASAS_MAX_SZ_CHAIN_FRAME. */
2390 2390 ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length, 1024);
2391 2391
2392 2392 con_log(CL_ANN1, (CE_NOTE,
2393 2393 "[MFI CMD PHY ADDRESS]:%" PRIx64,
2394 2394 scsi_raid_io_sgl_ieee->Address));
2395 2395 con_log(CL_ANN1, (CE_NOTE,
2396 2396 "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
2397 2397 con_log(CL_ANN1, (CE_NOTE, "[SGL Flags]:%x",
2398 2398 scsi_raid_io_sgl_ieee->Flags));
2399 2399 }
2400 2400
2401 2401
2402 2402 void
2403 2403 tbolt_complete_cmd(struct mrsas_instance *instance,
2404 2404 struct mrsas_cmd *cmd)
2405 2405 {
2406 2406 uint8_t status;
2407 2407 uint8_t extStatus;
2408 2408 uint8_t arm;
2409 2409 struct scsa_cmd *acmd;
2410 2410 struct scsi_pkt *pkt;
2411 2411 struct scsi_arq_status *arqstat;
2412 2412 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2413 2413 LD_LOAD_BALANCE_INFO *lbinfo;
2414 2414 ddi_acc_handle_t acc_handle =
2415 2415 instance->mpi2_frame_pool_dma_obj.acc_handle;
2416 2416
2417 2417 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2418 2418
2419 2419 status = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.status);
2420 2420 extStatus = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.extStatus);
2421 2421
2422 2422 con_log(CL_DLEVEL3, (CE_NOTE, "status %x", status));
2423 2423 con_log(CL_DLEVEL3, (CE_NOTE, "extStatus %x", extStatus));
2424 2424
2425 2425 if (status != MFI_STAT_OK) {
2426 2426 con_log(CL_ANN, (CE_WARN,
2427 2427 "IO Cmd Failed SMID %x", cmd->SMID));
2428 2428 } else {
2429 2429 con_log(CL_ANN, (CE_NOTE,
2430 2430 "IO Cmd Success SMID %x", cmd->SMID));
2431 2431 }
2432 2432
2433 2433 /* regular commands */
2434 2434
2435 2435 switch (ddi_get8(acc_handle, &scsi_raid_io->Function)) {
2436 2436
2437 2437 case MPI2_FUNCTION_SCSI_IO_REQUEST : /* Fast Path IO. */
2438 2438 acmd = (struct scsa_cmd *)cmd->cmd;
2439 2439 lbinfo = &instance->load_balance_info[acmd->device_id];
2440 2440
2441 2441 if (cmd->load_balance_flag & MEGASAS_LOAD_BALANCE_FLAG) {
2442 2442 arm = lbinfo->raid1DevHandle[0] ==
2443 2443 scsi_raid_io->DevHandle ? 0 : 1;
2444 2444
2445 2445 lbinfo->scsi_pending_cmds[arm]--;
2446 2446 cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG;
2447 2447 }
2448 2448 con_log(CL_DLEVEL3, (CE_NOTE,
2449 2449 "FastPath IO Completion Success "));
2450 2450 /* FALLTHRU */
2451 2451
2452 2452 case MPI2_FUNCTION_LD_IO_REQUEST : { /* Regular Path IO. */
2453 2453 acmd = (struct scsa_cmd *)cmd->cmd;
2454 2454 pkt = (struct scsi_pkt *)CMD2PKT(acmd);
2455 2455
2456 2456 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2457 2457 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
2458 2458 (void) ddi_dma_sync(acmd->cmd_dmahandle,
2459 2459 acmd->cmd_dma_offset, acmd->cmd_dma_len,
2460 2460 DDI_DMA_SYNC_FORCPU);
2461 2461 }
2462 2462 }
2463 2463
2464 2464 pkt->pkt_reason = CMD_CMPLT;
2465 2465 pkt->pkt_statistics = 0;
2466 2466 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
2467 2467 STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS;
2468 2468
2469 2469 con_log(CL_ANN, (CE_CONT, " CDB[0] = %x completed for %s: "
2470 2470 "size %lx SMID %x cmd_status %x", pkt->pkt_cdbp[0],
2471 2471 ((acmd->islogical) ? "LD" : "PD"),
2472 2472 acmd->cmd_dmacount, cmd->SMID, status));
2473 2473
2474 2474 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
2475 2475 struct scsi_inquiry *inq;
2476 2476
2477 2477 if (acmd->cmd_dmacount != 0) {
2478 2478 bp_mapin(acmd->cmd_buf);
2479 2479 inq = (struct scsi_inquiry *)
2480 2480 acmd->cmd_buf->b_un.b_addr;
2481 2481
2482 2482 /* don't expose physical drives to OS */
2483 2483 if (acmd->islogical &&
2484 2484 (status == MFI_STAT_OK)) {
2485 2485 display_scsi_inquiry((caddr_t)inq);
2486 2486 #ifdef PDSUPPORT
2487 2487 } else if ((status == MFI_STAT_OK) &&
2488 2488 inq->inq_dtype == DTYPE_DIRECT) {
2489 2489 display_scsi_inquiry((caddr_t)inq);
2490 2490 #endif
2491 2491 } else {
2492 2492 /* for physical disk */
2493 2493 status = MFI_STAT_DEVICE_NOT_FOUND;
2494 2494 }
2495 2495 }
2496 2496 }
2497 2497
2498 2498 switch (status) {
2499 2499 case MFI_STAT_OK:
2500 2500 pkt->pkt_scbp[0] = STATUS_GOOD;
2501 2501 break;
2502 2502 case MFI_STAT_LD_CC_IN_PROGRESS:
2503 2503 case MFI_STAT_LD_RECON_IN_PROGRESS:
2504 2504 pkt->pkt_scbp[0] = STATUS_GOOD;
2505 2505 break;
2506 2506 case MFI_STAT_LD_INIT_IN_PROGRESS:
2507 2507 pkt->pkt_reason = CMD_TRAN_ERR;
2508 2508 break;
2509 2509 case MFI_STAT_SCSI_IO_FAILED:
2510 2510 cmn_err(CE_WARN, "tbolt_complete_cmd: scsi_io failed");
2511 2511 pkt->pkt_reason = CMD_TRAN_ERR;
2512 2512 break;
2513 2513 case MFI_STAT_SCSI_DONE_WITH_ERROR:
2514 2514 con_log(CL_ANN, (CE_WARN,
2515 2515 "tbolt_complete_cmd: scsi_done with error"));
2516 2516
2517 2517 pkt->pkt_reason = CMD_CMPLT;
2518 2518 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2519 2519
2520 2520 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
2521 2521 con_log(CL_ANN,
2522 2522 (CE_WARN, "TEST_UNIT_READY fail"));
2523 2523 } else {
2524 2524 pkt->pkt_state |= STATE_ARQ_DONE;
2525 2525 arqstat = (void *)(pkt->pkt_scbp);
2526 2526 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2527 2527 arqstat->sts_rqpkt_resid = 0;
2528 2528 arqstat->sts_rqpkt_state |=
2529 2529 STATE_GOT_BUS | STATE_GOT_TARGET
2530 2530 | STATE_SENT_CMD
2531 2531 | STATE_XFERRED_DATA;
2532 2532 *(uint8_t *)&arqstat->sts_rqpkt_status =
2533 2533 STATUS_GOOD;
2534 2534 con_log(CL_ANN1,
2535 2535 (CE_NOTE, "Copying Sense data %x",
2536 2536 cmd->SMID));
2537 2537
2538 2538 ddi_rep_get8(acc_handle,
2539 2539 (uint8_t *)&(arqstat->sts_sensedata),
2540 2540 cmd->sense1,
2541 2541 sizeof (struct scsi_extended_sense),
2542 2542 DDI_DEV_AUTOINCR);
2543 2543
2544 2544 }
2545 2545 break;
2546 2546 case MFI_STAT_LD_OFFLINE:
2547 2547 cmn_err(CE_WARN,
2548 2548 "tbolt_complete_cmd: ld offline "
2549 2549 "CDB[0]=0x%x targetId=0x%x devhandle=0x%x\n",
2550 2550 /* UNDO: */
2551 2551 ddi_get8(acc_handle, &scsi_raid_io->CDB.CDB32[0]),
2552 2552
2553 2553 ddi_get16(acc_handle,
2554 2554 &scsi_raid_io->RaidContext.ldTargetId),
2555 2555
2556 2556 ddi_get16(acc_handle, &scsi_raid_io->DevHandle));
2557 2557
2558 2558 pkt->pkt_reason = CMD_DEV_GONE;
2559 2559 pkt->pkt_statistics = STAT_DISCON;
2560 2560 break;
2561 2561 case MFI_STAT_DEVICE_NOT_FOUND:
2562 2562 con_log(CL_ANN, (CE_CONT,
2563 2563 "tbolt_complete_cmd: device not found error"));
2564 2564 pkt->pkt_reason = CMD_DEV_GONE;
2565 2565 pkt->pkt_statistics = STAT_DISCON;
2566 2566 break;
2567 2567
2568 2568 case MFI_STAT_LD_LBA_OUT_OF_RANGE:
2569 2569 pkt->pkt_state |= STATE_ARQ_DONE;
2570 2570 pkt->pkt_reason = CMD_CMPLT;
2571 2571 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2572 2572
2573 2573 arqstat = (void *)(pkt->pkt_scbp);
2574 2574 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2575 2575 arqstat->sts_rqpkt_resid = 0;
2576 2576 arqstat->sts_rqpkt_state |= STATE_GOT_BUS
2577 2577 | STATE_GOT_TARGET | STATE_SENT_CMD
2578 2578 | STATE_XFERRED_DATA;
2579 2579 *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
2580 2580
2581 2581 arqstat->sts_sensedata.es_valid = 1;
2582 2582 arqstat->sts_sensedata.es_key = KEY_ILLEGAL_REQUEST;
2583 2583 arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
2584 2584
2585 2585 /*
2586 2586 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
2587 2587 * ASC: 0x21h; ASCQ: 0x00h;
2588 2588 */
2589 2589 arqstat->sts_sensedata.es_add_code = 0x21;
2590 2590 arqstat->sts_sensedata.es_qual_code = 0x00;
2591 2591 break;
2592 2592 case MFI_STAT_INVALID_CMD:
2593 2593 case MFI_STAT_INVALID_DCMD:
2594 2594 case MFI_STAT_INVALID_PARAMETER:
2595 2595 case MFI_STAT_INVALID_SEQUENCE_NUMBER:
2596 2596 default:
2597 2597 cmn_err(CE_WARN, "tbolt_complete_cmd: Unknown status!");
2598 2598 pkt->pkt_reason = CMD_TRAN_ERR;
2599 2599
2600 2600 break;
2601 2601 }
2602 2602
2603 2603 atomic_add_16(&instance->fw_outstanding, (-1));
2604 2604
2605 2605 (void) mrsas_common_check(instance, cmd);
2606 2606 if (acmd->cmd_dmahandle) {
2607 2607 if (mrsas_check_dma_handle(acmd->cmd_dmahandle) !=
2608 2608 DDI_SUCCESS) {
2609 2609 ddi_fm_service_impact(instance->dip,
2610 2610 DDI_SERVICE_UNAFFECTED);
2611 2611 pkt->pkt_reason = CMD_TRAN_ERR;
2612 2612 pkt->pkt_statistics = 0;
2613 2613 }
2614 2614 }
2615 2615
2616 2616 /* Call the callback routine */
2617 2617 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp)
2618 2618 (*pkt->pkt_comp)(pkt);
2619 2619
2620 2620 con_log(CL_ANN1, (CE_NOTE, "Free smid %x", cmd->SMID));
2621 2621
2622 2622 ddi_put8(acc_handle, &scsi_raid_io->RaidContext.status, 0);
2623 2623
2624 2624 ddi_put8(acc_handle, &scsi_raid_io->RaidContext.extStatus, 0);
2625 2625
2626 2626 return_raid_msg_pkt(instance, cmd);
2627 2627 break;
2628 2628 }
2629 2629 case MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFA command. */
2630 2630
2631 2631 if (cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO &&
2632 2632 cmd->frame->dcmd.mbox.b[1] == 1) {
2633 2633
2634 2634 mutex_enter(&instance->sync_map_mtx);
2635 2635
2636 2636 con_log(CL_ANN, (CE_NOTE,
2637 2637 "LDMAP sync command SMID RECEIVED 0x%X",
2638 2638 cmd->SMID));
2639 2639 if (cmd->frame->hdr.cmd_status != 0) {
2640 2640 cmn_err(CE_WARN,
2641 2641 "map sync failed, status = 0x%x.\n",
2642 2642 cmd->frame->hdr.cmd_status);
2643 2643 } else {
2644 2644 instance->map_id++;
2645 2645 cmn_err(CE_NOTE,
2646 2646 "map sync received, switched map_id to %"
2647 2647 PRIu64 " \n", instance->map_id);
2648 2648 }
2649 2649
2650 2650 if (MR_ValidateMapInfo(instance->ld_map[
2651 2651 (instance->map_id & 1)],
2652 2652 instance->load_balance_info)) {
2653 2653 instance->fast_path_io = 1;
2654 2654 } else {
2655 2655 instance->fast_path_io = 0;
2656 2656 }
2657 2657
2658 2658 con_log(CL_ANN, (CE_NOTE,
2659 2659 "instance->fast_path_io %d \n",
2660 2660 instance->fast_path_io));
2661 2661
2662 2662 instance->unroll.syncCmd = 0;
2663 2663
2664 2664 if (instance->map_update_cmd == cmd) {
2665 2665 return_raid_msg_pkt(instance, cmd);
2666 2666 atomic_add_16(&instance->fw_outstanding, (-1));
2667 2667 (void) mrsas_tbolt_sync_map_info(instance);
2668 2668 }
2669 2669
2670 2670 cmn_err(CE_NOTE, "LDMAP sync completed.\n");
2671 2671 mutex_exit(&instance->sync_map_mtx);
2672 2672 break;
2673 2673 }
2674 2674
2675 2675 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) {
2676 2676 con_log(CL_ANN1, (CE_CONT,
2677 2677 "AEN command SMID RECEIVED 0x%X",
2678 2678 cmd->SMID));
2679 2679 if ((instance->aen_cmd == cmd) &&
2680 2680 (instance->aen_cmd->abort_aen)) {
2681 2681 con_log(CL_ANN, (CE_WARN, "mrsas_softintr: "
2682 2682 "aborted_aen returned"));
2683 2683 } else {
2684 2684 atomic_add_16(&instance->fw_outstanding, (-1));
2685 2685 service_mfi_aen(instance, cmd);
2686 2686 }
2687 2687 }
2688 2688
2689 2689 if (cmd->sync_cmd == MRSAS_TRUE) {
2690 2690 con_log(CL_ANN1, (CE_CONT,
2691 2691 "Sync-mode Command Response SMID RECEIVED 0x%X",
2692 2692 cmd->SMID));
2693 2693
2694 2694 tbolt_complete_cmd_in_sync_mode(instance, cmd);
2695 2695 } else {
2696 2696 con_log(CL_ANN, (CE_CONT,
2697 2697 "tbolt_complete_cmd: Wrong SMID RECEIVED 0x%X",
2698 2698 cmd->SMID));
2699 2699 }
2700 2700 break;
2701 2701 default:
2702 2702 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2703 2703 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2704 2704
2705 2705 /* free message */
2706 2706 con_log(CL_ANN,
2707 2707 (CE_NOTE, "tbolt_complete_cmd: Unknown Type!!!!!!!!"));
2708 2708 break;
2709 2709 }
2710 2710 }
2711 2711
2712 2712 uint_t
2713 2713 mr_sas_tbolt_process_outstanding_cmd(struct mrsas_instance *instance)
2714 2714 {
2715 2715 uint8_t replyType;
2716 2716 Mpi2SCSIIOSuccessReplyDescriptor_t *replyDesc;
2717 2717 Mpi2ReplyDescriptorsUnion_t *desc;
2718 2718 uint16_t smid;
2719 2719 union desc_value d_val;
2720 2720 struct mrsas_cmd *cmd;
2721 2721
2722 2722 struct mrsas_header *hdr;
2723 2723 struct scsi_pkt *pkt;
2724 2724
2725 2725 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2726 2726 0, 0, DDI_DMA_SYNC_FORDEV);
2727 2727
2728 2728 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2729 2729 0, 0, DDI_DMA_SYNC_FORCPU);
2730 2730
2731 2731 desc = instance->reply_frame_pool;
2732 2732 desc += instance->reply_read_index;
2733 2733
2734 2734 replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2735 2735 replyType = replyDesc->ReplyFlags &
2736 2736 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2737 2737
2738 2738 if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2739 2739 return (DDI_INTR_UNCLAIMED);
2740 2740
2741 2741 if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
2742 2742 != DDI_SUCCESS) {
2743 2743 mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2744 2744 ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2745 2745 con_log(CL_ANN1,
2746 2746 (CE_WARN, "mr_sas_tbolt_process_outstanding_cmd(): "
2747 2747 "FMA check, returning DDI_INTR_UNCLAIMED"));
2748 2748 return (DDI_INTR_CLAIMED);
2749 2749 }
2750 2750
2751 2751 con_log(CL_ANN1, (CE_NOTE, "Reply Desc = %p Words = %" PRIx64 " \n",
2752 2752 (void *)desc, desc->Words));
2753 2753
2754 2754 d_val.word = desc->Words;
2755 2755
2756 2756
2757 2757 /* Read Reply descriptor */
2758 2758 while ((d_val.u1.low != 0xffffffff) &&
2759 2759 (d_val.u1.high != 0xffffffff)) {
2760 2760
2761 2761 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2762 2762 0, 0, DDI_DMA_SYNC_FORCPU);
2763 2763
2764 2764 smid = replyDesc->SMID;
2765 2765
2766 2766 if (!smid || smid > instance->max_fw_cmds + 1) {
2767 2767 con_log(CL_ANN1, (CE_NOTE,
2768 2768 "Reply Desc at Break = %p Words = %" PRIx64 " \n",
2769 2769 (void *)desc, desc->Words));
2770 2770 break;
2771 2771 }
2772 2772
2773 2773 cmd = instance->cmd_list[smid - 1];
2774 2774 if (!cmd) {
2775 2775 con_log(CL_ANN1, (CE_NOTE, "mr_sas_tbolt_process_"
2776 2776 "outstanding_cmd: Invalid command "
2777 2777 " or Poll commad Received in completion path\n"));
2778 2778 } else {
2779 2779 mutex_enter(&instance->cmd_pend_mtx);
2780 2780 if (cmd->sync_cmd == MRSAS_TRUE) {
2781 2781 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2782 2782 if (hdr) {
2783 2783 con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2784 2784 "tbolt_process_outstanding_cmd:"
2785 2785 " mlist_del_init(&cmd->list).\n"));
2786 2786 mlist_del_init(&cmd->list);
2787 2787 }
2788 2788 } else {
2789 2789 pkt = cmd->pkt;
2790 2790 if (pkt) {
2791 2791 con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
|
↓ open down ↓ |
2227 lines elided |
↑ open up ↑ |
2792 2792 "tbolt_process_outstanding_cmd:"
2793 2793 "mlist_del_init(&cmd->list).\n"));
2794 2794 mlist_del_init(&cmd->list);
2795 2795 }
2796 2796 }
2797 2797
2798 2798 mutex_exit(&instance->cmd_pend_mtx);
2799 2799
2800 2800 tbolt_complete_cmd(instance, cmd);
2801 2801 }
2802 - /* set it back to all 0xfffffffff. */
2803 - desc->Words = (uint64_t)~0;
2802 + /* set it back to all 1s. */
2803 + desc->Words = -1LL;
2804 2804
2805 2805 instance->reply_read_index++;
2806 2806
2807 2807 if (instance->reply_read_index >= (instance->reply_q_depth)) {
2808 2808 con_log(CL_ANN1, (CE_NOTE, "wrap around"));
2809 2809 instance->reply_read_index = 0;
2810 2810 }
2811 2811
2812 2812 /* Get the next reply descriptor */
2813 2813 if (!instance->reply_read_index)
2814 2814 desc = instance->reply_frame_pool;
2815 2815 else
2816 2816 desc++;
2817 2817
2818 2818 replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2819 2819
2820 2820 d_val.word = desc->Words;
2821 2821
2822 2822 con_log(CL_ANN1, (CE_NOTE,
2823 2823 "Next Reply Desc = %p Words = %" PRIx64 "\n",
2824 2824 (void *)desc, desc->Words));
2825 2825
2826 2826 replyType = replyDesc->ReplyFlags &
2827 2827 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2828 2828
2829 2829 if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2830 2830 break;
2831 2831
2832 2832 } /* End of while loop. */
2833 2833
2834 2834 /* update replyIndex to FW */
2835 2835 WR_MPI2_REPLY_POST_INDEX(instance->reply_read_index, instance);
2836 2836
2837 2837
2838 2838 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2839 2839 0, 0, DDI_DMA_SYNC_FORDEV);
2840 2840
2841 2841 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2842 2842 0, 0, DDI_DMA_SYNC_FORCPU);
2843 2843 return (DDI_INTR_CLAIMED);
2844 2844 }
2845 2845
2846 2846
2847 2847
2848 2848
2849 2849 /*
2850 2850 * complete_cmd_in_sync_mode - Completes an internal command
2851 2851 * @instance: Adapter soft state
2852 2852 * @cmd: Command to be completed
2853 2853 *
2854 2854 * The issue_cmd_in_sync_mode() function waits for a command to complete
2855 2855 * after it issues a command. This function wakes up that waiting routine by
2856 2856 * calling wake_up() on the wait queue.
2857 2857 */
2858 2858 void
2859 2859 tbolt_complete_cmd_in_sync_mode(struct mrsas_instance *instance,
2860 2860 struct mrsas_cmd *cmd)
2861 2861 {
2862 2862
2863 2863 cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle,
2864 2864 &cmd->frame->io.cmd_status);
2865 2865
2866 2866 cmd->sync_cmd = MRSAS_FALSE;
2867 2867
2868 2868 mutex_enter(&instance->int_cmd_mtx);
2869 2869 if (cmd->cmd_status == ENODATA) {
2870 2870 cmd->cmd_status = 0;
2871 2871 }
2872 2872 cv_broadcast(&instance->int_cmd_cv);
2873 2873 mutex_exit(&instance->int_cmd_mtx);
2874 2874
2875 2875 }
2876 2876
2877 2877 /*
2878 2878 * mrsas_tbolt_get_ld_map_info - Returns ld_map structure
2879 2879 * instance: Adapter soft state
2880 2880 *
2881 2881 * Issues an internal command (DCMD) to get the FW's controller PD
2882 2882 * list structure. This information is mainly used to find out SYSTEM
2883 2883 * supported by the FW.
2884 2884 */
2885 2885 int
2886 2886 mrsas_tbolt_get_ld_map_info(struct mrsas_instance *instance)
2887 2887 {
2888 2888 int ret = 0;
2889 2889 struct mrsas_cmd *cmd = NULL;
2890 2890 struct mrsas_dcmd_frame *dcmd;
2891 2891 MR_FW_RAID_MAP_ALL *ci;
2892 2892 uint32_t ci_h = 0;
2893 2893 U32 size_map_info;
2894 2894
2895 2895 cmd = get_raid_msg_pkt(instance);
2896 2896
2897 2897 if (cmd == NULL) {
2898 2898 cmn_err(CE_WARN,
2899 2899 "Failed to get a cmd from free-pool in get_ld_map_info()");
2900 2900 return (DDI_FAILURE);
2901 2901 }
2902 2902
2903 2903 dcmd = &cmd->frame->dcmd;
2904 2904
2905 2905 size_map_info = sizeof (MR_FW_RAID_MAP) +
2906 2906 (sizeof (MR_LD_SPAN_MAP) *
2907 2907 (MAX_LOGICAL_DRIVES - 1));
2908 2908
2909 2909 con_log(CL_ANN, (CE_NOTE,
2910 2910 "size_map_info : 0x%x", size_map_info));
2911 2911
2912 2912 ci = instance->ld_map[(instance->map_id & 1)];
2913 2913 ci_h = instance->ld_map_phy[(instance->map_id & 1)];
2914 2914
2915 2915 if (!ci) {
2916 2916 cmn_err(CE_WARN, "Failed to alloc mem for ld_map_info");
2917 2917 return_raid_msg_pkt(instance, cmd);
2918 2918 return (-1);
2919 2919 }
2920 2920
2921 2921 (void) memset(ci, 0, sizeof (*ci));
2922 2922 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
2923 2923
2924 2924 dcmd->cmd = MFI_CMD_OP_DCMD;
2925 2925 dcmd->cmd_status = 0xFF;
2926 2926 dcmd->sge_count = 1;
2927 2927 dcmd->flags = MFI_FRAME_DIR_READ;
2928 2928 dcmd->timeout = 0;
2929 2929 dcmd->pad_0 = 0;
2930 2930 dcmd->data_xfer_len = size_map_info;
2931 2931 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
2932 2932 dcmd->sgl.sge32[0].phys_addr = ci_h;
2933 2933 dcmd->sgl.sge32[0].length = size_map_info;
2934 2934
2935 2935
2936 2936 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
2937 2937
2938 2938 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2939 2939 ret = 0;
2940 2940 con_log(CL_ANN1, (CE_NOTE,
2941 2941 "Get LD Map Info success\n"));
2942 2942 } else {
2943 2943 cmn_err(CE_WARN,
2944 2944 "Get LD Map Info failed\n");
2945 2945 ret = -1;
2946 2946 }
2947 2947
2948 2948 return_raid_msg_pkt(instance, cmd);
2949 2949
2950 2950 return (ret);
2951 2951 }
2952 2952
2953 2953 void
2954 2954 mrsas_dump_reply_desc(struct mrsas_instance *instance)
2955 2955 {
2956 2956 uint32_t i;
2957 2957 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
2958 2958 union desc_value d_val;
2959 2959
2960 2960 reply_desc = instance->reply_frame_pool;
2961 2961
2962 2962 for (i = 0; i < instance->reply_q_depth; i++, reply_desc++) {
2963 2963 d_val.word = reply_desc->Words;
2964 2964 con_log(CL_DLEVEL3, (CE_NOTE,
2965 2965 "i=%d, %x:%x",
2966 2966 i, d_val.u1.high, d_val.u1.low));
2967 2967 }
2968 2968 }
2969 2969
2970 2970 /*
2971 2971 * mrsas_tbolt_command_create - Create command for fast path.
2972 2972 * @io_info: MegaRAID IO request packet pointer.
2973 2973 * @ref_tag: Reference tag for RD/WRPROTECT
2974 2974 *
2975 2975 * Create the command for fast path.
2976 2976 */
2977 2977 void
2978 2978 mrsas_tbolt_prepare_cdb(struct mrsas_instance *instance, U8 cdb[],
2979 2979 struct IO_REQUEST_INFO *io_info, Mpi2RaidSCSIIORequest_t *scsi_io_request,
2980 2980 U32 ref_tag)
2981 2981 {
2982 2982 uint16_t EEDPFlags;
2983 2983 uint32_t Control;
2984 2984 ddi_acc_handle_t acc_handle =
2985 2985 instance->mpi2_frame_pool_dma_obj.acc_handle;
2986 2986
2987 2987 /* Prepare 32-byte CDB if DIF is supported on this device */
2988 2988 con_log(CL_ANN, (CE_NOTE, "Prepare DIF CDB\n"));
2989 2989
2990 2990 (void) memset(cdb, 0, 32);
2991 2991
2992 2992 cdb[0] = MRSAS_SCSI_VARIABLE_LENGTH_CMD;
2993 2993
2994 2994
2995 2995 cdb[7] = MRSAS_SCSI_ADDL_CDB_LEN;
2996 2996
2997 2997 if (io_info->isRead)
2998 2998 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32;
2999 2999 else
3000 3000 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32;
3001 3001
3002 3002 /* Verify within linux driver, set to MEGASAS_RD_WR_PROTECT_CHECK_ALL */
3003 3003 cdb[10] = MRSAS_RD_WR_PROTECT;
3004 3004
3005 3005 /* LOGICAL BLOCK ADDRESS */
3006 3006 cdb[12] = (U8)(((io_info->pdBlock) >> 56) & 0xff);
3007 3007 cdb[13] = (U8)(((io_info->pdBlock) >> 48) & 0xff);
3008 3008 cdb[14] = (U8)(((io_info->pdBlock) >> 40) & 0xff);
3009 3009 cdb[15] = (U8)(((io_info->pdBlock) >> 32) & 0xff);
3010 3010 cdb[16] = (U8)(((io_info->pdBlock) >> 24) & 0xff);
3011 3011 cdb[17] = (U8)(((io_info->pdBlock) >> 16) & 0xff);
3012 3012 cdb[18] = (U8)(((io_info->pdBlock) >> 8) & 0xff);
3013 3013 cdb[19] = (U8)((io_info->pdBlock) & 0xff);
3014 3014
3015 3015 /* Logical block reference tag */
3016 3016 ddi_put32(acc_handle, &scsi_io_request->CDB.EEDP32.PrimaryReferenceTag,
3017 3017 BE_32(ref_tag));
3018 3018
3019 3019 ddi_put16(acc_handle,
3020 3020 &scsi_io_request->CDB.EEDP32.PrimaryApplicationTagMask, 0xffff);
3021 3021
3022 3022 ddi_put32(acc_handle, &scsi_io_request->DataLength,
3023 3023 ((io_info->numBlocks)*512));
3024 3024 /* Specify 32-byte cdb */
3025 3025 ddi_put16(acc_handle, &scsi_io_request->IoFlags, 32);
3026 3026
3027 3027 /* Transfer length */
3028 3028 cdb[28] = (U8)(((io_info->numBlocks) >> 24) & 0xff);
3029 3029 cdb[29] = (U8)(((io_info->numBlocks) >> 16) & 0xff);
3030 3030 cdb[30] = (U8)(((io_info->numBlocks) >> 8) & 0xff);
3031 3031 cdb[31] = (U8)((io_info->numBlocks) & 0xff);
3032 3032
3033 3033 /* set SCSI IO EEDPFlags */
3034 3034 EEDPFlags = ddi_get16(acc_handle, &scsi_io_request->EEDPFlags);
3035 3035 Control = ddi_get32(acc_handle, &scsi_io_request->Control);
3036 3036
3037 3037 /* set SCSI IO EEDPFlags bits */
3038 3038 if (io_info->isRead) {
3039 3039 /*
3040 3040 * For READ commands, the EEDPFlags shall be set to specify to
3041 3041 * Increment the Primary Reference Tag, to Check the Reference
3042 3042 * Tag, and to Check and Remove the Protection Information
3043 3043 * fields.
3044 3044 */
3045 3045 EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
3046 3046 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
3047 3047 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
3048 3048 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
3049 3049 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
3050 3050 } else {
3051 3051 /*
3052 3052 * For WRITE commands, the EEDPFlags shall be set to specify to
3053 3053 * Increment the Primary Reference Tag, and to Insert
3054 3054 * Protection Information fields.
3055 3055 */
3056 3056 EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
3057 3057 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
3058 3058 }
3059 3059 Control |= (0x4 << 26);
3060 3060
3061 3061 ddi_put16(acc_handle, &scsi_io_request->EEDPFlags, EEDPFlags);
3062 3062 ddi_put32(acc_handle, &scsi_io_request->Control, Control);
3063 3063 ddi_put32(acc_handle,
3064 3064 &scsi_io_request->EEDPBlockSize, MRSAS_EEDPBLOCKSIZE);
3065 3065 }
3066 3066
3067 3067
3068 3068 /*
3069 3069 * mrsas_tbolt_set_pd_lba - Sets PD LBA
3070 3070 * @cdb: CDB
3071 3071 * @cdb_len: cdb length
3072 3072 * @start_blk: Start block of IO
3073 3073 *
3074 3074 * Used to set the PD LBA in CDB for FP IOs
3075 3075 */
3076 3076 static void
3077 3077 mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr, U64 start_blk,
3078 3078 U32 num_blocks)
3079 3079 {
3080 3080 U8 cdb_len = *cdb_len_ptr;
3081 3081 U8 flagvals = 0, opcode = 0, groupnum = 0, control = 0;
3082 3082
3083 3083 /* Some drives don't support 16/12 byte CDB's, convert to 10 */
3084 3084 if (((cdb_len == 12) || (cdb_len == 16)) &&
3085 3085 (start_blk <= 0xffffffff)) {
3086 3086 if (cdb_len == 16) {
3087 3087 con_log(CL_ANN,
3088 3088 (CE_NOTE, "Converting READ/WRITE(16) to READ10\n"));
3089 3089 opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
3090 3090 flagvals = cdb[1];
3091 3091 groupnum = cdb[14];
3092 3092 control = cdb[15];
3093 3093 } else {
3094 3094 con_log(CL_ANN,
3095 3095 (CE_NOTE, "Converting READ/WRITE(12) to READ10\n"));
3096 3096 opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
3097 3097 flagvals = cdb[1];
3098 3098 groupnum = cdb[10];
3099 3099 control = cdb[11];
3100 3100 }
3101 3101
3102 3102 (void) memset(cdb, 0, sizeof (cdb));
3103 3103
3104 3104 cdb[0] = opcode;
3105 3105 cdb[1] = flagvals;
3106 3106 cdb[6] = groupnum;
3107 3107 cdb[9] = control;
3108 3108 /* Set transfer length */
3109 3109 cdb[8] = (U8)(num_blocks & 0xff);
3110 3110 cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3111 3111 cdb_len = 10;
3112 3112 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
3113 3113 /* Convert to 16 byte CDB for large LBA's */
3114 3114 con_log(CL_ANN,
3115 3115 (CE_NOTE, "Converting 6/10/12 CDB to 16 byte CDB\n"));
3116 3116 switch (cdb_len) {
3117 3117 case 6:
3118 3118 opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
3119 3119 control = cdb[5];
3120 3120 break;
3121 3121 case 10:
3122 3122 opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16;
3123 3123 flagvals = cdb[1];
3124 3124 groupnum = cdb[6];
3125 3125 control = cdb[9];
3126 3126 break;
3127 3127 case 12:
3128 3128 opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16;
3129 3129 flagvals = cdb[1];
3130 3130 groupnum = cdb[10];
3131 3131 control = cdb[11];
3132 3132 break;
3133 3133 }
3134 3134
3135 3135 (void) memset(cdb, 0, sizeof (cdb));
3136 3136
3137 3137 cdb[0] = opcode;
3138 3138 cdb[1] = flagvals;
3139 3139 cdb[14] = groupnum;
3140 3140 cdb[15] = control;
3141 3141
3142 3142 /* Transfer length */
3143 3143 cdb[13] = (U8)(num_blocks & 0xff);
3144 3144 cdb[12] = (U8)((num_blocks >> 8) & 0xff);
3145 3145 cdb[11] = (U8)((num_blocks >> 16) & 0xff);
3146 3146 cdb[10] = (U8)((num_blocks >> 24) & 0xff);
3147 3147
3148 3148 /* Specify 16-byte cdb */
3149 3149 cdb_len = 16;
3150 3150 } else if ((cdb_len == 6) && (start_blk > 0x1fffff)) {
3151 3151 /* convert to 10 byte CDB */
3152 3152 opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10;
3153 3153 control = cdb[5];
3154 3154
3155 3155 (void) memset(cdb, 0, sizeof (cdb));
3156 3156 cdb[0] = opcode;
3157 3157 cdb[9] = control;
3158 3158
3159 3159 /* Set transfer length */
3160 3160 cdb[8] = (U8)(num_blocks & 0xff);
3161 3161 cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3162 3162
3163 3163 /* Specify 10-byte cdb */
3164 3164 cdb_len = 10;
3165 3165 }
3166 3166
3167 3167
3168 3168 /* Fall through Normal case, just load LBA here */
3169 3169 switch (cdb_len) {
3170 3170 case 6:
3171 3171 {
3172 3172 U8 val = cdb[1] & 0xE0;
3173 3173 cdb[3] = (U8)(start_blk & 0xff);
3174 3174 cdb[2] = (U8)((start_blk >> 8) & 0xff);
3175 3175 cdb[1] = val | ((U8)(start_blk >> 16) & 0x1f);
3176 3176 break;
3177 3177 }
3178 3178 case 10:
3179 3179 cdb[5] = (U8)(start_blk & 0xff);
3180 3180 cdb[4] = (U8)((start_blk >> 8) & 0xff);
3181 3181 cdb[3] = (U8)((start_blk >> 16) & 0xff);
3182 3182 cdb[2] = (U8)((start_blk >> 24) & 0xff);
3183 3183 break;
3184 3184 case 12:
3185 3185 cdb[5] = (U8)(start_blk & 0xff);
3186 3186 cdb[4] = (U8)((start_blk >> 8) & 0xff);
3187 3187 cdb[3] = (U8)((start_blk >> 16) & 0xff);
3188 3188 cdb[2] = (U8)((start_blk >> 24) & 0xff);
3189 3189 break;
3190 3190
3191 3191 case 16:
3192 3192 cdb[9] = (U8)(start_blk & 0xff);
3193 3193 cdb[8] = (U8)((start_blk >> 8) & 0xff);
3194 3194 cdb[7] = (U8)((start_blk >> 16) & 0xff);
3195 3195 cdb[6] = (U8)((start_blk >> 24) & 0xff);
3196 3196 cdb[5] = (U8)((start_blk >> 32) & 0xff);
3197 3197 cdb[4] = (U8)((start_blk >> 40) & 0xff);
3198 3198 cdb[3] = (U8)((start_blk >> 48) & 0xff);
3199 3199 cdb[2] = (U8)((start_blk >> 56) & 0xff);
3200 3200 break;
3201 3201 }
3202 3202
3203 3203 *cdb_len_ptr = cdb_len;
3204 3204 }
3205 3205
3206 3206
3207 3207 static int
3208 3208 mrsas_tbolt_check_map_info(struct mrsas_instance *instance)
3209 3209 {
3210 3210 MR_FW_RAID_MAP_ALL *ld_map;
3211 3211
3212 3212 if (!mrsas_tbolt_get_ld_map_info(instance)) {
3213 3213
3214 3214 ld_map = instance->ld_map[(instance->map_id & 1)];
3215 3215
3216 3216 con_log(CL_ANN1, (CE_NOTE, "ldCount=%d, map size=%d\n",
3217 3217 ld_map->raidMap.ldCount, ld_map->raidMap.totalSize));
3218 3218
3219 3219 if (MR_ValidateMapInfo(instance->ld_map[
3220 3220 (instance->map_id & 1)], instance->load_balance_info)) {
3221 3221 con_log(CL_ANN,
3222 3222 (CE_CONT, "MR_ValidateMapInfo success"));
3223 3223
3224 3224 instance->fast_path_io = 1;
3225 3225 con_log(CL_ANN,
3226 3226 (CE_NOTE, "instance->fast_path_io %d \n",
3227 3227 instance->fast_path_io));
3228 3228
3229 3229 return (DDI_SUCCESS);
3230 3230 }
3231 3231
3232 3232 }
3233 3233
3234 3234 instance->fast_path_io = 0;
3235 3235 cmn_err(CE_WARN, "MR_ValidateMapInfo failed");
3236 3236 con_log(CL_ANN, (CE_NOTE,
3237 3237 "instance->fast_path_io %d \n", instance->fast_path_io));
3238 3238
3239 3239 return (DDI_FAILURE);
3240 3240 }
3241 3241
3242 3242 /*
3243 3243 * Marks HBA as bad. This will be called either when an
3244 3244 * IO packet times out even after 3 FW resets
3245 3245 * or FW is found to be fault even after 3 continuous resets.
3246 3246 */
3247 3247
3248 3248 void
3249 3249 mrsas_tbolt_kill_adapter(struct mrsas_instance *instance)
3250 3250 {
3251 3251 cmn_err(CE_WARN, "TBOLT Kill adapter called\n");
3252 3252
3253 3253 if (instance->deadadapter == 1)
3254 3254 return;
3255 3255
3256 3256 con_log(CL_ANN1, (CE_NOTE, "tbolt_kill_adapter: "
3257 3257 "Writing to doorbell with MFI_STOP_ADP "));
3258 3258 mutex_enter(&instance->ocr_flags_mtx);
3259 3259 instance->deadadapter = 1;
3260 3260 mutex_exit(&instance->ocr_flags_mtx);
3261 3261 instance->func_ptr->disable_intr(instance);
3262 3262 WR_RESERVED0_REGISTER(MFI_STOP_ADP, instance);
3263 3263 /* Flush */
3264 3264 (void) RD_RESERVED0_REGISTER(instance);
3265 3265
3266 3266 (void) mrsas_print_pending_cmds(instance);
3267 3267 (void) mrsas_complete_pending_cmds(instance);
3268 3268 }
3269 3269
3270 3270 void
3271 3271 mrsas_reset_reply_desc(struct mrsas_instance *instance)
3272 3272 {
3273 3273 int i;
3274 3274 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
3275 3275 instance->reply_read_index = 0;
3276 3276
3277 3277 /* initializing reply address to 0xFFFFFFFF */
3278 3278 reply_desc = instance->reply_frame_pool;
3279 3279
3280 3280 for (i = 0; i < instance->reply_q_depth; i++) {
3281 3281 reply_desc->Words = (uint64_t)~0;
3282 3282 reply_desc++;
3283 3283 }
3284 3284 }
3285 3285
3286 3286 int
3287 3287 mrsas_tbolt_reset_ppc(struct mrsas_instance *instance)
3288 3288 {
3289 3289 uint32_t status = 0x00;
3290 3290 uint32_t retry = 0;
3291 3291 uint32_t cur_abs_reg_val;
3292 3292 uint32_t fw_state;
3293 3293 uint32_t abs_state;
3294 3294 uint32_t i;
3295 3295
3296 3296 con_log(CL_ANN, (CE_NOTE,
3297 3297 "mrsas_tbolt_reset_ppc entered\n "));
3298 3298
3299 3299 if (instance->deadadapter == 1) {
3300 3300 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3301 3301 "no more resets as HBA has been marked dead ");
3302 3302 return (DDI_FAILURE);
3303 3303 }
3304 3304
3305 3305 mutex_enter(&instance->ocr_flags_mtx);
3306 3306 instance->adapterresetinprogress = 1;
3307 3307 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc:"
3308 3308 "adpterresetinprogress flag set, time %llx", gethrtime()));
3309 3309 mutex_exit(&instance->ocr_flags_mtx);
3310 3310
3311 3311 instance->func_ptr->disable_intr(instance);
3312 3312
3313 3313 /* Add delay inorder to complete the ioctl & io cmds in-flight */
3314 3314 for (i = 0; i < 3000; i++) {
3315 3315 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
3316 3316 }
3317 3317
3318 3318 instance->reply_read_index = 0;
3319 3319
3320 3320 retry_reset:
3321 3321 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3322 3322 ":Resetting TBOLT "));
3323 3323
3324 3324 WR_TBOLT_IB_WRITE_SEQ(0xF, instance);
3325 3325 WR_TBOLT_IB_WRITE_SEQ(4, instance);
3326 3326 WR_TBOLT_IB_WRITE_SEQ(0xb, instance);
3327 3327 WR_TBOLT_IB_WRITE_SEQ(2, instance);
3328 3328 WR_TBOLT_IB_WRITE_SEQ(7, instance);
3329 3329 WR_TBOLT_IB_WRITE_SEQ(0xd, instance);
3330 3330 con_log(CL_ANN1, (CE_NOTE,
3331 3331 "mrsas_tbolt_reset_ppc: magic number written "
3332 3332 "to write sequence register\n"));
3333 3333 delay(100 * drv_usectohz(MILLISEC));
3334 3334 status = RD_TBOLT_HOST_DIAG(instance);
3335 3335 con_log(CL_ANN1, (CE_NOTE,
3336 3336 "mrsas_tbolt_reset_ppc: READ HOSTDIAG SUCCESS "
3337 3337 "to write sequence register\n"));
3338 3338
3339 3339 while (status & DIAG_TBOLT_RESET_ADAPTER) {
3340 3340 delay(100 * drv_usectohz(MILLISEC));
3341 3341 status = RD_TBOLT_HOST_DIAG(instance);
3342 3342 if (retry++ == 100) {
3343 3343 cmn_err(CE_WARN,
3344 3344 "mrsas_tbolt_reset_ppc:"
3345 3345 "resetadapter bit is set already "
3346 3346 "check retry count %d\n", retry);
3347 3347 return (DDI_FAILURE);
3348 3348 }
3349 3349 }
3350 3350
3351 3351 WR_TBOLT_HOST_DIAG(status | DIAG_TBOLT_RESET_ADAPTER, instance);
3352 3352 delay(100 * drv_usectohz(MILLISEC));
3353 3353
3354 3354 ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3355 3355 (uint8_t *)((uintptr_t)(instance)->regmap +
3356 3356 RESET_TBOLT_STATUS_OFF), 4, DDI_DEV_AUTOINCR);
3357 3357
3358 3358 while ((status & DIAG_TBOLT_RESET_ADAPTER)) {
3359 3359 delay(100 * drv_usectohz(MILLISEC));
3360 3360 ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3361 3361 (uint8_t *)((uintptr_t)(instance)->regmap +
3362 3362 RESET_TBOLT_STATUS_OFF), 4, DDI_DEV_AUTOINCR);
3363 3363 if (retry++ == 100) {
3364 3364 /* Dont call kill adapter here */
3365 3365 /* RESET BIT ADAPTER is cleared by firmare */
3366 3366 /* mrsas_tbolt_kill_adapter(instance); */
3367 3367 cmn_err(CE_WARN,
3368 3368 "mr_sas %d: %s(): RESET FAILED; return failure!!!",
3369 3369 instance->instance, __func__);
3370 3370 return (DDI_FAILURE);
3371 3371 }
3372 3372 }
3373 3373
3374 3374 con_log(CL_ANN,
3375 3375 (CE_NOTE, "mrsas_tbolt_reset_ppc: Adapter reset complete"));
3376 3376 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3377 3377 "Calling mfi_state_transition_to_ready"));
3378 3378
3379 3379 abs_state = instance->func_ptr->read_fw_status_reg(instance);
3380 3380 retry = 0;
3381 3381 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3382 3382 delay(100 * drv_usectohz(MILLISEC));
3383 3383 abs_state = instance->func_ptr->read_fw_status_reg(instance);
3384 3384 }
3385 3385 if (abs_state <= MFI_STATE_FW_INIT) {
3386 3386 cmn_err(CE_WARN,
3387 3387 "mrsas_tbolt_reset_ppc: firmware state < MFI_STATE_FW_INIT"
3388 3388 "state = 0x%x, RETRY RESET.\n", abs_state);
3389 3389 goto retry_reset;
3390 3390 }
3391 3391
3392 3392 /* Mark HBA as bad, if FW is fault after 3 continuous resets */
3393 3393 if (mfi_state_transition_to_ready(instance) ||
3394 3394 debug_tbolt_fw_faults_after_ocr_g == 1) {
3395 3395 cur_abs_reg_val =
3396 3396 instance->func_ptr->read_fw_status_reg(instance);
3397 3397 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
3398 3398
3399 3399 con_log(CL_ANN1, (CE_NOTE,
3400 3400 "mrsas_tbolt_reset_ppc :before fake: FW is not ready "
3401 3401 "FW state = 0x%x", fw_state));
3402 3402 if (debug_tbolt_fw_faults_after_ocr_g == 1)
3403 3403 fw_state = MFI_STATE_FAULT;
3404 3404
3405 3405 con_log(CL_ANN,
3406 3406 (CE_NOTE, "mrsas_tbolt_reset_ppc : FW is not ready "
3407 3407 "FW state = 0x%x", fw_state));
3408 3408
3409 3409 if (fw_state == MFI_STATE_FAULT) {
3410 3410 /* increment the count */
3411 3411 instance->fw_fault_count_after_ocr++;
3412 3412 if (instance->fw_fault_count_after_ocr
3413 3413 < MAX_FW_RESET_COUNT) {
3414 3414 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3415 3415 "FW is in fault after OCR count %d "
3416 3416 "Retry Reset",
3417 3417 instance->fw_fault_count_after_ocr);
3418 3418 goto retry_reset;
3419 3419
3420 3420 } else {
3421 3421 cmn_err(CE_WARN, "mrsas %d: %s:"
3422 3422 "Max Reset Count exceeded >%d"
3423 3423 "Mark HBA as bad, KILL adapter",
3424 3424 instance->instance, __func__,
3425 3425 MAX_FW_RESET_COUNT);
3426 3426
3427 3427 mrsas_tbolt_kill_adapter(instance);
3428 3428 return (DDI_FAILURE);
3429 3429 }
3430 3430 }
3431 3431 }
3432 3432
3433 3433 /* reset the counter as FW is up after OCR */
3434 3434 instance->fw_fault_count_after_ocr = 0;
3435 3435
3436 3436 mrsas_reset_reply_desc(instance);
3437 3437
3438 3438
3439 3439 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3440 3440 "Calling mrsas_issue_init_mpi2"));
3441 3441 abs_state = mrsas_issue_init_mpi2(instance);
3442 3442 if (abs_state == (uint32_t)DDI_FAILURE) {
3443 3443 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3444 3444 "INIT failed Retrying Reset");
3445 3445 goto retry_reset;
3446 3446 }
3447 3447 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3448 3448 "mrsas_issue_init_mpi2 Done"));
3449 3449
3450 3450 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3451 3451 "Calling mrsas_print_pending_cmd\n"));
3452 3452 (void) mrsas_print_pending_cmds(instance);
3453 3453 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3454 3454 "mrsas_print_pending_cmd done\n"));
3455 3455
3456 3456 instance->func_ptr->enable_intr(instance);
3457 3457 instance->fw_outstanding = 0;
3458 3458
3459 3459 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3460 3460 "Calling mrsas_issue_pending_cmds"));
3461 3461 (void) mrsas_issue_pending_cmds(instance);
3462 3462 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3463 3463 "issue_pending_cmds done.\n"));
3464 3464
3465 3465 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3466 3466 "Calling aen registration"));
3467 3467
3468 3468 instance->aen_cmd->retry_count_for_ocr = 0;
3469 3469 instance->aen_cmd->drv_pkt_time = 0;
3470 3470
3471 3471 instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
3472 3472
3473 3473 con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag.\n"));
3474 3474 mutex_enter(&instance->ocr_flags_mtx);
3475 3475 instance->adapterresetinprogress = 0;
3476 3476 mutex_exit(&instance->ocr_flags_mtx);
3477 3477 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3478 3478 "adpterresetinprogress flag unset"));
3479 3479
3480 3480 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc done\n"));
3481 3481 return (DDI_SUCCESS);
3482 3482
3483 3483 }
3484 3484
3485 3485
3486 3486 /*
3487 3487 * mrsas_sync_map_info - Returns FW's ld_map structure
3488 3488 * @instance: Adapter soft state
3489 3489 *
3490 3490 * Issues an internal command (DCMD) to get the FW's controller PD
3491 3491 * list structure. This information is mainly used to find out SYSTEM
3492 3492 * supported by the FW.
3493 3493 */
3494 3494
3495 3495 static int
3496 3496 mrsas_tbolt_sync_map_info(struct mrsas_instance *instance)
3497 3497 {
3498 3498 int ret = 0, i;
3499 3499 struct mrsas_cmd *cmd = NULL;
3500 3500 struct mrsas_dcmd_frame *dcmd;
3501 3501 uint32_t size_sync_info, num_lds;
3502 3502 LD_TARGET_SYNC *ci = NULL;
3503 3503 MR_FW_RAID_MAP_ALL *map;
3504 3504 MR_LD_RAID *raid;
3505 3505 LD_TARGET_SYNC *ld_sync;
3506 3506 uint32_t ci_h = 0;
3507 3507 uint32_t size_map_info;
3508 3508
3509 3509 cmd = get_raid_msg_pkt(instance);
3510 3510
3511 3511 if (cmd == NULL) {
3512 3512 cmn_err(CE_WARN, "Failed to get a cmd from free-pool in "
3513 3513 "mrsas_tbolt_sync_map_info(). ");
3514 3514 return (DDI_FAILURE);
3515 3515 }
3516 3516
3517 3517 /* Clear the frame buffer and assign back the context id */
3518 3518 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3519 3519 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3520 3520 cmd->index);
3521 3521 bzero(cmd->scsi_io_request, sizeof (Mpi2RaidSCSIIORequest_t));
3522 3522
3523 3523
3524 3524 map = instance->ld_map[instance->map_id & 1];
3525 3525
3526 3526 num_lds = map->raidMap.ldCount;
3527 3527
3528 3528 dcmd = &cmd->frame->dcmd;
3529 3529
3530 3530 size_sync_info = sizeof (LD_TARGET_SYNC) * num_lds;
3531 3531
3532 3532 con_log(CL_ANN, (CE_NOTE, "size_sync_info =0x%x ; ld count = 0x%x \n ",
3533 3533 size_sync_info, num_lds));
3534 3534
3535 3535 ci = (LD_TARGET_SYNC *)instance->ld_map[(instance->map_id - 1) & 1];
3536 3536
3537 3537 (void) memset(ci, 0, sizeof (MR_FW_RAID_MAP_ALL));
3538 3538 ci_h = instance->ld_map_phy[(instance->map_id - 1) & 1];
3539 3539
3540 3540 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
3541 3541
3542 3542 ld_sync = (LD_TARGET_SYNC *)ci;
3543 3543
3544 3544 for (i = 0; i < num_lds; i++, ld_sync++) {
3545 3545 raid = MR_LdRaidGet(i, map);
3546 3546
3547 3547 con_log(CL_ANN1,
3548 3548 (CE_NOTE, "i : 0x%x, Seq Num : 0x%x, Sync Reqd : 0x%x\n",
3549 3549 i, raid->seqNum, raid->flags.ldSyncRequired));
3550 3550
3551 3551 ld_sync->ldTargetId = MR_GetLDTgtId(i, map);
3552 3552
3553 3553 con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, tgt : 0x%x \n",
3554 3554 i, ld_sync->ldTargetId));
3555 3555
3556 3556 ld_sync->seqNum = raid->seqNum;
3557 3557 }
3558 3558
3559 3559
3560 3560 size_map_info = sizeof (MR_FW_RAID_MAP) +
3561 3561 (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
3562 3562
3563 3563 dcmd->cmd = MFI_CMD_OP_DCMD;
3564 3564 dcmd->cmd_status = 0xFF;
3565 3565 dcmd->sge_count = 1;
3566 3566 dcmd->flags = MFI_FRAME_DIR_WRITE;
3567 3567 dcmd->timeout = 0;
3568 3568 dcmd->pad_0 = 0;
3569 3569 dcmd->data_xfer_len = size_map_info;
3570 3570 ASSERT(num_lds <= 255);
3571 3571 dcmd->mbox.b[0] = (U8)num_lds;
3572 3572 dcmd->mbox.b[1] = 1; /* Pend */
3573 3573 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3574 3574 dcmd->sgl.sge32[0].phys_addr = ci_h;
3575 3575 dcmd->sgl.sge32[0].length = size_map_info;
3576 3576
3577 3577
3578 3578 instance->map_update_cmd = cmd;
3579 3579 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3580 3580
3581 3581 instance->func_ptr->issue_cmd(cmd, instance);
3582 3582
3583 3583 instance->unroll.syncCmd = 1;
3584 3584 con_log(CL_ANN1, (CE_NOTE, "sync cmd issued. [SMID]:%x", cmd->SMID));
3585 3585
3586 3586 return (ret);
3587 3587 }
3588 3588
3589 3589 /*
3590 3590 * abort_syncmap_cmd
3591 3591 */
3592 3592 int
3593 3593 abort_syncmap_cmd(struct mrsas_instance *instance,
3594 3594 struct mrsas_cmd *cmd_to_abort)
3595 3595 {
3596 3596 int ret = 0;
3597 3597
3598 3598 struct mrsas_cmd *cmd;
3599 3599 struct mrsas_abort_frame *abort_fr;
3600 3600
3601 3601 con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_ldsync:%d", __LINE__));
3602 3602
3603 3603 cmd = get_raid_msg_mfi_pkt(instance);
3604 3604
3605 3605 if (!cmd) {
3606 3606 cmn_err(CE_WARN,
3607 3607 "Failed to get a cmd from free-pool abort_syncmap_cmd().");
3608 3608 return (DDI_FAILURE);
3609 3609 }
3610 3610 /* Clear the frame buffer and assign back the context id */
3611 3611 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3612 3612 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3613 3613 cmd->index);
3614 3614
3615 3615 abort_fr = &cmd->frame->abort;
3616 3616
3617 3617 /* prepare and issue the abort frame */
3618 3618 ddi_put8(cmd->frame_dma_obj.acc_handle,
3619 3619 &abort_fr->cmd, MFI_CMD_OP_ABORT);
3620 3620 ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3621 3621 MFI_CMD_STATUS_SYNC_MODE);
3622 3622 ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3623 3623 ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3624 3624 cmd_to_abort->index);
3625 3625 ddi_put32(cmd->frame_dma_obj.acc_handle,
3626 3626 &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3627 3627 ddi_put32(cmd->frame_dma_obj.acc_handle,
3628 3628 &abort_fr->abort_mfi_phys_addr_hi, 0);
3629 3629
3630 3630 cmd->frame_count = 1;
3631 3631
3632 3632 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3633 3633
3634 3634 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3635 3635 con_log(CL_ANN1, (CE_WARN,
3636 3636 "abort_ldsync_cmd: issue_cmd_in_poll_mode failed"));
3637 3637 ret = -1;
3638 3638 } else {
3639 3639 ret = 0;
3640 3640 }
3641 3641
3642 3642 return_raid_msg_mfi_pkt(instance, cmd);
3643 3643
3644 3644 atomic_add_16(&instance->fw_outstanding, (-1));
3645 3645
3646 3646 return (ret);
3647 3647 }
3648 3648
3649 3649
3650 3650 #ifdef PDSUPPORT
3651 3651 int
3652 3652 mrsas_tbolt_config_pd(struct mrsas_instance *instance, uint16_t tgt,
3653 3653 uint8_t lun, dev_info_t **ldip)
3654 3654 {
3655 3655 struct scsi_device *sd;
3656 3656 dev_info_t *child;
3657 3657 int rval, dtype;
3658 3658 struct mrsas_tbolt_pd_info *pds = NULL;
3659 3659
3660 3660 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_config_pd: t = %d l = %d",
3661 3661 tgt, lun));
3662 3662
3663 3663 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
3664 3664 if (ldip) {
3665 3665 *ldip = child;
3666 3666 }
3667 3667 if (instance->mr_tbolt_pd_list[tgt].flag != MRDRV_TGT_VALID) {
3668 3668 rval = mrsas_service_evt(instance, tgt, 1,
3669 3669 MRSAS_EVT_UNCONFIG_TGT, NULL);
3670 3670 con_log(CL_ANN1, (CE_WARN,
3671 3671 "mr_sas:DELETING STALE ENTRY rval = %d "
3672 3672 "tgt id = %d ", rval, tgt));
3673 3673 return (NDI_FAILURE);
3674 3674 }
3675 3675 return (NDI_SUCCESS);
3676 3676 }
3677 3677
3678 3678 pds = (struct mrsas_tbolt_pd_info *)
3679 3679 kmem_zalloc(sizeof (struct mrsas_tbolt_pd_info), KM_SLEEP);
3680 3680 mrsas_tbolt_get_pd_info(instance, pds, tgt);
3681 3681 dtype = pds->scsiDevType;
3682 3682
3683 3683 /* Check for Disk */
3684 3684 if ((dtype == DTYPE_DIRECT)) {
3685 3685 if ((dtype == DTYPE_DIRECT) &&
3686 3686 (LE_16(pds->fwState) != PD_SYSTEM)) {
3687 3687 kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3688 3688 return (NDI_FAILURE);
3689 3689 }
3690 3690 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
3691 3691 sd->sd_address.a_hba_tran = instance->tran;
3692 3692 sd->sd_address.a_target = (uint16_t)tgt;
3693 3693 sd->sd_address.a_lun = (uint8_t)lun;
3694 3694
3695 3695 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS) {
3696 3696 rval = mrsas_config_scsi_device(instance, sd, ldip);
3697 3697 con_log(CL_DLEVEL1, (CE_NOTE,
3698 3698 "Phys. device found: tgt %d dtype %d: %s",
3699 3699 tgt, dtype, sd->sd_inq->inq_vid));
3700 3700 } else {
3701 3701 rval = NDI_FAILURE;
3702 3702 con_log(CL_DLEVEL1, (CE_NOTE, "Phys. device Not found "
3703 3703 "scsi_hba_probe Failed: tgt %d dtype %d: %s",
3704 3704 tgt, dtype, sd->sd_inq->inq_vid));
3705 3705 }
3706 3706
3707 3707 /* sd_unprobe is blank now. Free buffer manually */
3708 3708 if (sd->sd_inq) {
3709 3709 kmem_free(sd->sd_inq, SUN_INQSIZE);
3710 3710 sd->sd_inq = (struct scsi_inquiry *)NULL;
3711 3711 }
3712 3712 kmem_free(sd, sizeof (struct scsi_device));
3713 3713 rval = NDI_SUCCESS;
3714 3714 } else {
3715 3715 con_log(CL_ANN1, (CE_NOTE,
3716 3716 "Device not supported: tgt %d lun %d dtype %d",
3717 3717 tgt, lun, dtype));
3718 3718 rval = NDI_FAILURE;
3719 3719 }
3720 3720
3721 3721 kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3722 3722 con_log(CL_ANN1, (CE_NOTE, "mrsas_config_pd: return rval = %d",
3723 3723 rval));
3724 3724 return (rval);
3725 3725 }
3726 3726
3727 3727 static void
3728 3728 mrsas_tbolt_get_pd_info(struct mrsas_instance *instance,
3729 3729 struct mrsas_tbolt_pd_info *pds, int tgt)
3730 3730 {
3731 3731 struct mrsas_cmd *cmd;
3732 3732 struct mrsas_dcmd_frame *dcmd;
3733 3733 dma_obj_t dcmd_dma_obj;
3734 3734
3735 3735 cmd = get_raid_msg_pkt(instance);
3736 3736
3737 3737 if (!cmd) {
3738 3738 con_log(CL_ANN1,
3739 3739 (CE_WARN, "Failed to get a cmd for get pd info"));
3740 3740 return;
3741 3741 }
3742 3742
3743 3743 /* Clear the frame buffer and assign back the context id */
3744 3744 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3745 3745 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3746 3746 cmd->index);
3747 3747
3748 3748
3749 3749 dcmd = &cmd->frame->dcmd;
3750 3750 dcmd_dma_obj.size = sizeof (struct mrsas_tbolt_pd_info);
3751 3751 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
3752 3752 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff;
3753 3753 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff;
3754 3754 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
3755 3755 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
3756 3756
3757 3757 (void) mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
3758 3758 DDI_STRUCTURE_LE_ACC);
3759 3759 (void) memset(dcmd_dma_obj.buffer, 0,
3760 3760 sizeof (struct mrsas_tbolt_pd_info));
3761 3761 (void) memset(dcmd->mbox.b, 0, 12);
3762 3762 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3763 3763 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
3764 3764 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3765 3765 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3766 3766 MFI_FRAME_DIR_READ);
3767 3767 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3768 3768 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3769 3769 sizeof (struct mrsas_tbolt_pd_info));
3770 3770 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3771 3771 MR_DCMD_PD_GET_INFO);
3772 3772 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], tgt);
3773 3773 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3774 3774 sizeof (struct mrsas_tbolt_pd_info));
3775 3775 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3776 3776 dcmd_dma_obj.dma_cookie[0].dmac_address);
3777 3777
3778 3778 cmd->sync_cmd = MRSAS_TRUE;
3779 3779 cmd->frame_count = 1;
3780 3780
3781 3781 if (instance->tbolt) {
3782 3782 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3783 3783 }
3784 3784
3785 3785 instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd);
3786 3786
3787 3787 ddi_rep_get8(cmd->frame_dma_obj.acc_handle, (uint8_t *)pds,
3788 3788 (uint8_t *)dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info),
3789 3789 DDI_DEV_AUTOINCR);
3790 3790 (void) mrsas_free_dma_obj(instance, dcmd_dma_obj);
3791 3791 return_raid_msg_pkt(instance, cmd);
3792 3792 }
3793 3793 #endif
|
↓ open down ↓ |
980 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX