Print this page
| Split |
Close |
| Expand all |
| Collapse all |
--- old/./mr_sas_tbolt.c
+++ new/./mr_sas_tbolt.c
1 1 /*
2 2 * mr_sas_tbolt.c: source for mr_sas driver for New Generation.
3 3 * i.e. Thunderbolt and Invader
4 4 *
5 5 * Solaris MegaRAID device driver for SAS2.0 controllers
6 6 * Copyright (c) 2008-2012, LSI Logic Corporation.
7 7 * All rights reserved.
8 8 *
|
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
9 9 * Version:
10 10 * Author:
11 11 * Swaminathan K S
12 12 * Arun Chandrashekhar
13 13 * Manju R
14 14 * Rasheed
15 15 * Shakeel Bukhari
16 16 */
17 17
18 18
19 -#include <stddef.h>
20 19 #include <sys/types.h>
21 20 #include <sys/file.h>
22 21 #include <sys/atomic.h>
23 22 #include <sys/scsi/scsi.h>
24 23 #include <sys/byteorder.h>
25 24 #include "ld_pd_map.h"
26 25 #include "mr_sas.h"
27 26 #include "fusion.h"
28 27
29 28
30 29 // Pre-TB command size and TB command size.
31 30 #define MR_COMMAND_SIZE (64*20) // 1280 bytes
32 31 MR_LD_RAID *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map);
33 32 U16 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map);
34 33 U16 MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map);
35 34 U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *in_info);
36 35 extern ddi_dma_attr_t mrsas_generic_dma_attr;
37 36 extern uint32_t mrsas_tbolt_max_cap_maxxfer;
38 37 extern struct ddi_device_acc_attr endian_attr;
39 38 extern int debug_level_g;
40 39 extern unsigned int enable_fp;
41 40 volatile int dump_io_wait_time = 90;
42 41 extern void
43 42 io_timeout_checker(void *arg);
44 43 extern int
45 44 mfi_state_transition_to_ready(struct mrsas_instance *instance);
46 45 extern volatile int debug_timeout_g;
47 46 extern int mrsas_issue_pending_cmds(struct mrsas_instance *);
48 47 extern int mrsas_complete_pending_cmds(struct mrsas_instance *instance);
49 48 extern void push_pending_mfi_pkt(struct mrsas_instance *,
50 49 struct mrsas_cmd *);
51 50 extern U8 MR_BuildRaidContext(struct mrsas_instance *, struct IO_REQUEST_INFO *,
52 51 MPI2_SCSI_IO_VENDOR_UNIQUE *, MR_FW_RAID_MAP_ALL *);
53 52
54 53 static volatile int debug_tbolt_fw_faults_after_ocr_g = 0;
55 54
56 55 /*
57 56 * destroy_mfi_mpi_frame_pool
58 57 */
59 58 void
60 59 destroy_mfi_mpi_frame_pool(struct mrsas_instance *instance)
61 60 {
62 61 int i;
63 62
64 63 struct mrsas_cmd *cmd;
65 64
66 65 /* return all mfi frames to pool */
67 66 for (i = 0; i < MRSAS_APP_RESERVED_CMDS; i++) {
68 67 cmd = instance->cmd_list[i];
69 68 if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED)
70 69 (void) mrsas_free_dma_obj(instance,
71 70 cmd->frame_dma_obj);
72 71 cmd->frame_dma_obj_status = DMA_OBJ_FREED;
73 72 }
74 73 }
75 74
76 75 /*
77 76 * destroy_mpi2_frame_pool
78 77 */
79 78 void
80 79 destroy_mpi2_frame_pool(struct mrsas_instance *instance)
81 80 {
82 81
83 82 if (instance->mpi2_frame_pool_dma_obj.status == DMA_OBJ_ALLOCATED) {
84 83 (void) mrsas_free_dma_obj(instance,
85 84 instance->mpi2_frame_pool_dma_obj);
86 85 instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_FREED;
87 86 }
88 87 }
89 88
90 89
91 90 /*
92 91 * mrsas_tbolt_free_additional_dma_buffer
93 92 */
94 93 void
95 94 mrsas_tbolt_free_additional_dma_buffer(struct mrsas_instance *instance)
96 95 {
97 96 int i;
98 97 if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
99 98 (void) mrsas_free_dma_obj(instance,
100 99 instance->mfi_internal_dma_obj);
101 100 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
102 101 }
103 102 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
104 103 (void) mrsas_free_dma_obj(instance,
105 104 instance->mfi_evt_detail_obj);
106 105 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
107 106 }
108 107
109 108 for (i = 0; i < 2; i++) {
110 109 if (instance->ld_map_obj[i].status == DMA_OBJ_ALLOCATED) {
111 110 (void) mrsas_free_dma_obj(instance,
112 111 instance->ld_map_obj[i]);
113 112 instance->ld_map_obj[i].status = DMA_OBJ_FREED;
114 113 }
115 114 }
116 115 }
117 116
118 117
119 118 /*
120 119 * free_req_desc_pool
121 120 */
122 121 void
123 122 free_req_rep_desc_pool(struct mrsas_instance *instance)
124 123 {
125 124 if (instance->request_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
126 125 (void) mrsas_free_dma_obj(instance,
127 126 instance->request_desc_dma_obj);
128 127 instance->request_desc_dma_obj.status = DMA_OBJ_FREED;
129 128 }
130 129
131 130 if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
132 131 (void) mrsas_free_dma_obj(instance,
133 132 instance->reply_desc_dma_obj);
134 133 instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
135 134 }
136 135
137 136
138 137 }
139 138
140 139
141 140 /*
142 141 * ThunderBolt(TB) Request Message Frame Pool
143 142 */
144 143 int
145 144 create_mpi2_frame_pool(struct mrsas_instance *instance)
146 145 {
147 146 int i = 0;
148 147 int cookie_cnt;
149 148 uint16_t max_cmd;
150 149 uint32_t sgl_sz;
151 150 uint32_t raid_msg_size;
152 151 uint32_t total_size;
153 152 uint32_t offset;
154 153 uint32_t io_req_base_phys;
155 154 uint8_t *io_req_base;
156 155 struct mrsas_cmd *cmd;
157 156
158 157 max_cmd = instance->max_fw_cmds;
159 158
160 159 sgl_sz = 1024;
161 160 raid_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
162 161
163 162 // Allocating additional 256 bytes to accomodate SMID 0.
164 163 total_size = MRSAS_THUNDERBOLT_MSG_SIZE + (max_cmd * raid_msg_size) +
165 164 (max_cmd * sgl_sz) + (max_cmd * SENSE_LENGTH);
166 165
167 166 con_log(CL_ANN1, (CE_NOTE, "create_mpi2_frame_pool: "
168 167 "max_cmd %x ", max_cmd));
169 168
170 169 con_log(CL_DLEVEL3, (CE_NOTE, "create_mpi2_frame_pool: "
171 170 "request message frame pool size %x", total_size));
172 171
173 172 /*
174 173 * ThunderBolt(TB) We need to create a single chunk of DMA'ble memory
175 174 * and then split the memory to 1024 commands. Each command should be
176 175 * able to contain a RAID MESSAGE FRAME which will embed a MFI_FRAME
177 176 * within it. Further refer the "alloc_req_rep_desc" function where
178 177 * we allocate request/reply descriptors queues for a clue.
179 178 */
180 179
181 180 instance->mpi2_frame_pool_dma_obj.size = total_size;
182 181 instance->mpi2_frame_pool_dma_obj.dma_attr = mrsas_generic_dma_attr;
183 182 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_addr_hi =
184 183 0xFFFFFFFFU;
185 184 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_count_max =
186 185 0xFFFFFFFFU;
187 186 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_sgllen = 1;
188 187 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_align = 256;
189 188
190 189 if (mrsas_alloc_dma_obj(instance, &instance->mpi2_frame_pool_dma_obj,
191 190 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
192 191 cmn_err(CE_WARN,
193 192 "mr_sas: could not alloc mpi2 frame pool");
194 193 return (DDI_FAILURE);
195 194 }
196 195
|
↓ open down ↓ |
167 lines elided |
↑ open up ↑ |
197 196 bzero(instance->mpi2_frame_pool_dma_obj.buffer, total_size);
198 197 instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_ALLOCATED;
199 198
200 199 instance->io_request_frames =
201 200 (uint8_t *)instance->mpi2_frame_pool_dma_obj.buffer;
202 201 instance->io_request_frames_phy =
203 202 (uint32_t)
204 203 instance->mpi2_frame_pool_dma_obj.dma_cookie[0].dmac_address;
205 204
206 205 con_log(CL_DLEVEL3, (CE_NOTE,
207 - "io_request_frames 0x%x",
206 + "io_request_frames 0x%p",
208 207 instance->io_request_frames));
209 208
210 209 con_log(CL_DLEVEL3, (CE_NOTE,
211 210 "io_request_frames_phy 0x%x",
212 211 instance->io_request_frames_phy));
213 212
214 213 io_req_base = (uint8_t *)instance->io_request_frames +
215 214 MRSAS_THUNDERBOLT_MSG_SIZE;
216 215 io_req_base_phys = instance->io_request_frames_phy +
217 216 MRSAS_THUNDERBOLT_MSG_SIZE;
218 217
219 218 con_log(CL_DLEVEL3, (CE_NOTE,
220 219 "io req_base_phys 0x%x", io_req_base_phys));
221 220
222 221 for (i = 0; i < max_cmd; i++) {
223 222 cmd = instance->cmd_list[i];
224 223
225 224 offset = i * MRSAS_THUNDERBOLT_MSG_SIZE;
226 225
227 226 cmd->scsi_io_request = (Mpi2RaidSCSIIORequest_t *)
228 227 ((uint8_t *)io_req_base + offset);
229 228 cmd->scsi_io_request_phys_addr = io_req_base_phys + offset;
230 229
231 230 cmd->sgl = (Mpi2SGEIOUnion_t *)
232 231 ((uint8_t *)io_req_base +
233 232 (max_cmd * raid_msg_size) + i * sgl_sz);
234 233
235 234 cmd->sgl_phys_addr =
236 235 (io_req_base_phys +
237 236 (max_cmd * raid_msg_size) + i * sgl_sz);
238 237
239 238 cmd->sense1 = (uint8_t *)
240 239 ((uint8_t *)io_req_base +
241 240 (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
242 241 (i * SENSE_LENGTH));
|
↓ open down ↓ |
25 lines elided |
↑ open up ↑ |
243 242
244 243 cmd->sense_phys_addr1 =
245 244 (io_req_base_phys +
246 245 (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
247 246 (i * SENSE_LENGTH));
248 247
249 248
250 249 cmd->SMID = i+1;
251 250
252 251 con_log(CL_DLEVEL3, (CE_NOTE,
253 - "Frame Pool Addr [%x]0x%x",
252 + "Frame Pool Addr [%x]0x%p",
254 253 cmd->index, cmd->scsi_io_request));
255 254
256 255 con_log(CL_DLEVEL3, (CE_NOTE,
257 256 "Frame Pool Phys Addr [%x]0x%x",
258 257 cmd->index, cmd->scsi_io_request_phys_addr));
259 258
260 259 con_log(CL_DLEVEL3, (CE_NOTE,
261 - "Sense Addr [%x]0x%x",
260 + "Sense Addr [%x]0x%p",
262 261 cmd->index, cmd->sense1));
263 262
264 263 con_log(CL_DLEVEL3, (CE_NOTE,
265 264 "Sense Addr Phys [%x]0x%x",
266 265 cmd->index, cmd->sense_phys_addr1));
267 266
268 267
269 268 con_log(CL_DLEVEL3, (CE_NOTE,
270 - "Sgl bufffers [%x]0x%x",
269 + "Sgl bufffers [%x]0x%p",
271 270 cmd->index, cmd->sgl));
272 271
273 272 con_log(CL_DLEVEL3, (CE_NOTE,
274 273 "Sgl bufffers phys [%x]0x%x",
275 274 cmd->index, cmd->sgl_phys_addr));
276 275 }
277 276
278 277 return (DDI_SUCCESS);
279 278
280 279 }
281 280
282 281
283 282 /*
284 283 * alloc_additional_dma_buffer for AEN
285 284 */
286 285 int
287 286 mrsas_tbolt_alloc_additional_dma_buffer(struct mrsas_instance *instance)
288 287 {
289 288 uint32_t internal_buf_size = PAGESIZE*2;
290 289 int i;
291 290
292 291 /* Initialize buffer status as free */
293 292 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
294 293 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
295 294 instance->ld_map_obj[0].status = DMA_OBJ_FREED;
296 295 instance->ld_map_obj[1].status = DMA_OBJ_FREED;
297 296
298 297
299 298 instance->mfi_internal_dma_obj.size = internal_buf_size;
300 299 instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
301 300 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
302 301 instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
303 302 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
304 303
305 304 if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
306 305 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
307 306 cmn_err(CE_WARN,
308 307 "mr_sas: could not alloc reply queue");
309 308 return (DDI_FAILURE);
310 309 }
311 310
312 311 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
313 312
314 313 instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
315 314 instance->internal_buf = (caddr_t)(((unsigned long)
316 315 instance->mfi_internal_dma_obj.buffer));
317 316 instance->internal_buf_dmac_add =
318 317 instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address;
319 318 instance->internal_buf_size = internal_buf_size;
320 319
321 320 /* allocate evt_detail */
322 321 instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
323 322 instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
324 323 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
325 324 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
326 325 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
327 326 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 8;
328 327
329 328 if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
330 329 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
331 330 cmn_err(CE_WARN,
332 331 "mrsas_tbolt_alloc_additional_dma_buffer: "
333 332 "could not allocate data transfer buffer.");
334 333 goto fail_tbolt_additional_buff;
335 334 }
336 335
337 336 bzero(instance->mfi_evt_detail_obj.buffer,
338 337 sizeof (struct mrsas_evt_detail));
339 338
340 339 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
341 340
342 341 instance->size_map_info = sizeof (MR_FW_RAID_MAP) +
343 342 (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
344 343
345 344 for (i = 0; i < 2; i++) {
346 345 /* allocate the data transfer buffer */
347 346 instance->ld_map_obj[i].size = instance->size_map_info;
348 347 instance->ld_map_obj[i].dma_attr = mrsas_generic_dma_attr;
349 348 instance->ld_map_obj[i].dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
350 349 instance->ld_map_obj[i].dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
351 350 instance->ld_map_obj[i].dma_attr.dma_attr_sgllen = 1;
352 351 instance->ld_map_obj[i].dma_attr.dma_attr_align = 1;
353 352
354 353 if (mrsas_alloc_dma_obj(instance, &instance->ld_map_obj[i],
355 354 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
356 355 cmn_err(CE_WARN,
357 356 "could not allocate data transfer buffer.");
358 357 goto fail_tbolt_additional_buff;
359 358 }
360 359
361 360 instance->ld_map_obj[i].status |= DMA_OBJ_ALLOCATED;
362 361
363 362 (void) memset(instance->ld_map_obj[i].buffer, 0,
364 363 instance->size_map_info);
365 364
366 365 instance->ld_map[i] = (MR_FW_RAID_MAP_ALL *)instance->ld_map_obj[i].buffer;
367 366 instance->ld_map_phy[i] =
368 367 (uint32_t)instance->ld_map_obj[i].dma_cookie[0].dmac_address;
369 368
370 369 con_log(CL_DLEVEL3, (CE_NOTE,
371 370 "ld_map Addr Phys 0x%x", instance->ld_map_phy[i]));
372 371
373 372 con_log(CL_DLEVEL3, (CE_NOTE,
374 373 "size_map_info 0x%x", instance->size_map_info));
375 374
376 375 }
377 376
378 377 return (DDI_SUCCESS);
379 378
380 379 fail_tbolt_additional_buff:
381 380 mrsas_tbolt_free_additional_dma_buffer(instance);
382 381
383 382 return (DDI_FAILURE);
384 383 }
385 384
386 385 MRSAS_REQUEST_DESCRIPTOR_UNION *
387 386 mr_sas_get_request_descriptor(struct mrsas_instance *instance,
388 387 uint16_t index, struct mrsas_cmd *cmd)
389 388 {
390 389 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
391 390
392 391 if (index > instance->max_fw_cmds) {
393 392 con_log(CL_ANN1, (CE_NOTE,
394 393 "Invalid SMID 0x%x request for descriptor", index));
395 394 con_log(CL_ANN1, (CE_NOTE,
396 395 "max_fw_cmds : 0x%x\n", instance->max_fw_cmds));
397 396 return (NULL);
398 397 }
399 398
400 399 req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
401 400 ((char *)instance->request_message_pool +
402 401 (sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION) * index));
403 402
404 403 con_log(CL_ANN1, (CE_NOTE,
405 404 "request descriptor : 0x%08lx\n", (unsigned long)req_desc));
406 405
407 406 con_log(CL_ANN1, (CE_NOTE,
408 407 "request descriptor base phy : 0x%08lx\n",
409 408 (unsigned long)instance->request_message_pool_phy));
410 409
411 410 return ((MRSAS_REQUEST_DESCRIPTOR_UNION *)req_desc);
412 411 }
413 412
414 413
415 414 /*
416 415 * Allocate Request and Reply Queue Descriptors.
417 416 */
418 417 int
419 418 alloc_req_rep_desc(struct mrsas_instance *instance)
420 419 {
421 420 uint32_t request_q_sz, reply_q_sz;
422 421 int i, max_request_q_sz, max_reply_q_sz;
423 422 uint64_t request_desc;
424 423 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
425 424 uint64_t *reply_ptr;
|
↓ open down ↓ |
145 lines elided |
↑ open up ↑ |
426 425
427 426 /*
428 427 * ThunderBolt(TB) There's no longer producer consumer mechanism.
429 428 * Once we have an interrupt we are supposed to scan through the list of
430 429 * reply descriptors and process them accordingly. We would be needing
431 430 * to allocate memory for 1024 reply descriptors
432 431 */
433 432
434 433 /* Allocate Reply Descriptors */
435 434 con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x\n",
436 - sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
435 + (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
437 436
438 437 // reply queue size should be multiple of 16
439 438 max_reply_q_sz = ((instance->max_fw_cmds + 1 + 15)/16)*16;
440 439
441 440 reply_q_sz = 8 * max_reply_q_sz;
442 441
443 442
444 443 con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x\n",
445 - sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
444 + (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
446 445
447 446 instance->reply_desc_dma_obj.size = reply_q_sz;
448 447 instance->reply_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
449 448 instance->reply_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
450 449 instance->reply_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
451 450 instance->reply_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
452 451 instance->reply_desc_dma_obj.dma_attr.dma_attr_align = 16;
453 452
454 453 if (mrsas_alloc_dma_obj(instance, &instance->reply_desc_dma_obj,
455 454 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
456 455 cmn_err(CE_WARN,
457 456 "mr_sas: could not alloc reply queue");
458 457 return (DDI_FAILURE);
459 458 }
460 459
461 460 bzero(instance->reply_desc_dma_obj.buffer, reply_q_sz);
462 461 instance->reply_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
|
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
463 462
464 463 // virtual address of reply queue
465 464 instance->reply_frame_pool = (MPI2_REPLY_DESCRIPTORS_UNION *)(
466 465 instance->reply_desc_dma_obj.buffer);
467 466
468 467 instance->reply_q_depth = max_reply_q_sz;
469 468
470 469 con_log(CL_ANN1, (CE_NOTE, "[reply queue depth]0x%x",
471 470 instance->reply_q_depth));
472 471
473 - con_log(CL_ANN1, (CE_NOTE, "[reply queue virt addr]0x%x",
472 + con_log(CL_ANN1, (CE_NOTE, "[reply queue virt addr]0x%p",
474 473 instance->reply_frame_pool));
475 474
476 475 /* initializing reply address to 0xFFFFFFFF */
477 476 reply_desc = instance->reply_frame_pool;
478 477
479 478 for (i = 0; i < instance->reply_q_depth; i++) {
480 479 reply_desc->Words = (uint64_t)~0;
481 480 reply_desc++;
482 481 }
483 482
484 483
485 484 instance->reply_frame_pool_phy =
486 485 (uint32_t)instance->reply_desc_dma_obj.dma_cookie[0].dmac_address;
487 486
488 487 con_log(CL_ANN1, (CE_NOTE,
489 488 "[reply queue phys addr]0x%x", instance->reply_frame_pool_phy));
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
490 489
491 490
492 491 instance->reply_pool_limit_phy = (instance->reply_frame_pool_phy +
493 492 reply_q_sz);
494 493
495 494 con_log(CL_ANN1, (CE_NOTE, "[reply pool limit phys addr]0x%x",
496 495 instance->reply_pool_limit_phy));
497 496
498 497
499 498 con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x\n",
500 - sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
499 + (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
501 500
502 501 /* Allocate Request Descriptors */
503 502 con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x\n",
504 - sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
503 + (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
505 504
506 505 request_q_sz = 8 *
507 506 (instance->max_fw_cmds);
508 507
509 508 instance->request_desc_dma_obj.size = request_q_sz;
510 509 instance->request_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
511 510 instance->request_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
512 511 instance->request_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
513 512 instance->request_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
514 513 instance->request_desc_dma_obj.dma_attr.dma_attr_align = 16;
515 514
516 515 if (mrsas_alloc_dma_obj(instance, &instance->request_desc_dma_obj,
517 516 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
518 517 cmn_err(CE_WARN,
519 518 "mr_sas: could not alloc request queue desc");
520 519 goto fail_undo_reply_queue;
521 520 }
522 521
523 522 bzero(instance->request_desc_dma_obj.buffer, request_q_sz);
524 523 instance->request_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
525 524
526 525 /* virtual address of request queue desc */
527 526 instance->request_message_pool = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
528 527 (instance->request_desc_dma_obj.buffer);
529 528
530 529 instance->request_message_pool_phy =
531 530 (uint32_t)instance->request_desc_dma_obj.dma_cookie[0].dmac_address;
532 531
533 532 max_request_q_sz = instance->max_fw_cmds;
534 533
535 534 return (DDI_SUCCESS);
536 535
537 536 fail_undo_reply_queue:
538 537 if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
539 538 (void) mrsas_free_dma_obj(instance,
540 539 instance->reply_desc_dma_obj);
541 540 instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
542 541 }
543 542
544 543 return (DDI_FAILURE);
545 544 }
546 545
547 546 /*
548 547 * mrsas_alloc_cmd_pool_tbolt
549 548 * TODO: merge tbolt-specific codee into mrsas_alloc_cmd_pool() to have single routine
550 549 */
551 550 int
552 551 mrsas_alloc_cmd_pool_tbolt(struct mrsas_instance *instance)
553 552 {
554 553 int i;
555 554 int count;
556 555 uint32_t max_cmd;
557 556 uint32_t reserve_cmd;
558 557 size_t sz;
559 558
560 559 struct mrsas_cmd *cmd;
561 560
562 561 max_cmd = instance->max_fw_cmds;
563 562 con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
564 563 "max_cmd %x", max_cmd));
565 564
566 565
567 566 sz = sizeof (struct mrsas_cmd *) * max_cmd;
568 567
569 568 /*
570 569 * instance->cmd_list is an array of struct mrsas_cmd pointers.
571 570 * Allocate the dynamic array first and then allocate individual
572 571 * commands.
573 572 */
574 573 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
575 574 if (instance->cmd_list == NULL) {
576 575 con_log(CL_NONE, (CE_WARN,
577 576 "Failed to allocate memory for cmd_list"));
578 577 return (DDI_FAILURE);
579 578 }
580 579
581 580 /* create a frame pool and assign one frame to each cmd */
582 581 for (count = 0; count < max_cmd; count++) {
583 582 instance->cmd_list[count] = kmem_zalloc(sizeof (struct mrsas_cmd),
584 583 KM_SLEEP);
585 584 if (instance->cmd_list[count] == NULL) {
586 585 con_log(CL_NONE, (CE_WARN,
587 586 "Failed to allocate memory for mrsas_cmd"));
588 587 goto mrsas_undo_cmds;
589 588 }
590 589 }
591 590
592 591 /* add all the commands to command pool */
593 592
594 593 INIT_LIST_HEAD(&instance->cmd_pool_list);
595 594 INIT_LIST_HEAD(&instance->cmd_pend_list);
596 595 INIT_LIST_HEAD(&instance->cmd_app_pool_list);
597 596
598 597 reserve_cmd = MRSAS_APP_RESERVED_CMDS;
599 598
600 599 for (i = 1; i < reserve_cmd; i++) { //cmd index 0 reservered for IOC INIT
601 600 cmd = instance->cmd_list[i];
602 601 cmd->index = i;
603 602 mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
604 603 }
605 604
606 605
607 606 for (i = reserve_cmd; i < max_cmd; i++) {
608 607 cmd = instance->cmd_list[i];
609 608 cmd->index = i;
610 609 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
611 610 }
612 611
613 612 return (DDI_SUCCESS);
614 613
615 614 mrsas_undo_cmds:
616 615 if (count > 0) {
617 616 /* free each cmd */
618 617 for (i = 0; i < count; i++) {
619 618 if (instance->cmd_list[i] != NULL)
620 619 kmem_free(instance->cmd_list[i],sizeof (struct mrsas_cmd));
621 620 instance->cmd_list[i] = NULL;
622 621 }
623 622 }
624 623
625 624 mrsas_undo_cmd_list:
626 625 if (instance->cmd_list != NULL)
627 626 kmem_free(instance->cmd_list,sz);
628 627 instance->cmd_list = NULL;
629 628
630 629 return (DDI_FAILURE);
631 630 }
632 631
633 632
634 633 /*
635 634 * free_space_for_mpi2
636 635 */
637 636 void
638 637 free_space_for_mpi2(struct mrsas_instance *instance)
639 638 {
640 639 /* already freed */
641 640 if (instance->cmd_list == NULL) {
642 641 return;
643 642 }
644 643
645 644 /* First free the additional DMA buffer */
646 645 mrsas_tbolt_free_additional_dma_buffer(instance);
647 646
648 647 /* Free the request/reply descriptor pool */
649 648 free_req_rep_desc_pool(instance);
650 649
651 650 /* Free the MPI message pool */
652 651 destroy_mpi2_frame_pool(instance);
653 652
654 653 /* Free the MFI frame pool */
655 654 destroy_mfi_frame_pool(instance);
656 655
657 656 /* Free all the commands in the cmd_list */
658 657 /* Free the cmd_list buffer itself */
659 658 mrsas_free_cmd_pool(instance);
660 659 }
661 660
662 661
663 662 /*
664 663 * ThunderBolt(TB) memory allocations for commands/messages/frames.
665 664 */
666 665 int
667 666 alloc_space_for_mpi2(struct mrsas_instance *instance)
668 667 {
669 668 /* Allocate command pool ( memory for cmd_list & individual commands )*/
670 669 if (mrsas_alloc_cmd_pool_tbolt(instance)) {
671 670 cmn_err(CE_WARN, "Error creating cmd pool");
672 671 return (DDI_FAILURE);
673 672 }
674 673
675 674 /* Initialize single reply size and Message size */
676 675 instance->reply_size = MRSAS_THUNDERBOLT_REPLY_SIZE;
677 676 instance->raid_io_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
678 677
679 678 instance->max_sge_in_main_msg = (MRSAS_THUNDERBOLT_MSG_SIZE -
680 679 (sizeof (MPI2_RAID_SCSI_IO_REQUEST) -
681 680 sizeof (MPI2_SGE_IO_UNION)))/ sizeof (MPI2_SGE_IO_UNION);
682 681 instance->max_sge_in_chain = (MR_COMMAND_SIZE -
683 682 MRSAS_THUNDERBOLT_MSG_SIZE) / sizeof (MPI2_SGE_IO_UNION);
684 683
685 684 /* Reduce SG count by 1 to take care of group cmds feature in FW */
686 685 instance->max_num_sge = (instance->max_sge_in_main_msg +
687 686 instance->max_sge_in_chain - 2);
688 687 instance->chain_offset_mpt_msg =
689 688 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16;
690 689 instance->chain_offset_io_req = (MRSAS_THUNDERBOLT_MSG_SIZE -
691 690 sizeof (MPI2_SGE_IO_UNION)) / 16;
692 691 instance->reply_read_index = 0;
693 692
694 693
695 694 /* Allocate Request and Reply descriptors Array */
696 695 /* Make sure the buffer is aligned to 8 for req/rep descriptor Pool */
697 696 if (alloc_req_rep_desc(instance)) {
698 697 cmn_err(CE_WARN,
699 698 "Error, allocating memory for descripter-pool");
700 699 goto mpi2_undo_cmd_pool;
701 700 }
702 701 con_log(CL_ANN1, (CE_NOTE, "[request message pool phys addr]0x%x",
703 702 instance->request_message_pool_phy));
704 703
705 704
706 705 /* Allocate MFI Frame pool - for MPI-MFI passthru commands */
707 706 if (create_mfi_frame_pool(instance)) {
708 707 cmn_err(CE_WARN,
709 708 "Error, allocating memory for MFI frame-pool");
710 709 goto mpi2_undo_descripter_pool;
711 710 }
712 711
713 712
714 713 /* Allocate MPI2 Message pool */
715 714 /*
716 715 * Make sure the buffer is alligned to 256 for raid message packet
717 716 * create a io request pool and assign one frame to each cmd
718 717 */
719 718
720 719 if (create_mpi2_frame_pool(instance)) {
721 720 cmn_err(CE_WARN,
722 721 "Error, allocating memory for MPI2 Message-pool");
723 722 goto mpi2_undo_mfi_frame_pool;
724 723 }
725 724
726 725 #ifdef DEBUG
727 726 con_log(CL_ANN1, (CE_CONT, "[max_sge_in_main_msg]0x%x",
728 727 instance->max_sge_in_main_msg));
729 728 con_log(CL_ANN1, (CE_CONT, "[max_sge_in_chain]0x%x",
730 729 instance->max_sge_in_chain));
731 730 con_log(CL_ANN1, (CE_CONT,
732 731 "[max_sge]0x%x", instance->max_num_sge));
733 732 con_log(CL_ANN1, (CE_CONT, "[chain_offset_mpt_msg]0x%x",
734 733 instance->chain_offset_mpt_msg));
735 734 con_log(CL_ANN1, (CE_CONT, "[chain_offset_io_req]0x%x",
736 735 instance->chain_offset_io_req));
737 736 #endif
738 737
739 738
740 739 /* Allocate additional dma buffer */
741 740 if (mrsas_tbolt_alloc_additional_dma_buffer(instance)) {
742 741 cmn_err(CE_WARN,
743 742 "Error, allocating tbolt additional DMA buffer");
744 743 goto mpi2_undo_message_pool;
745 744 }
746 745
747 746 return (DDI_SUCCESS);
748 747
749 748 mpi2_undo_message_pool:
750 749 destroy_mpi2_frame_pool(instance);
751 750
752 751 mpi2_undo_mfi_frame_pool:
753 752 destroy_mfi_frame_pool(instance);
754 753
755 754 mpi2_undo_descripter_pool:
756 755 free_req_rep_desc_pool(instance);
757 756
758 757 mpi2_undo_cmd_pool:
759 758 mrsas_free_cmd_pool(instance);
760 759
761 760 return (DDI_FAILURE);
762 761 }
763 762
764 763
765 764 /*
766 765 * mrsas_init_adapter_tbolt - Initialize fusion interface adapter.
767 766 */
768 767 int
769 768 mrsas_init_adapter_tbolt (struct mrsas_instance *instance)
770 769 {
771 770
772 771 /*
773 772 * Reduce the max supported cmds by 1. This is to ensure that the
774 773 * reply_q_sz (1 more than the max cmd that driver may send)
775 774 * does not exceed max cmds that the FW can support
776 775 */
777 776
778 777 if (instance->max_fw_cmds > 1008) {
779 778 instance->max_fw_cmds = 1008;
780 779 instance->max_fw_cmds = instance->max_fw_cmds-1;
781 780 }
782 781
783 782 con_log(CL_ANN, (CE_NOTE, "mrsas_init_adapter_tbolt: "
784 783 " instance->max_fw_cmds 0x%X.", instance->max_fw_cmds));
785 784
786 785
787 786 /* create a pool of commands */
788 787 if ( alloc_space_for_mpi2(instance) != DDI_SUCCESS) {
789 788 cmn_err(CE_WARN,
790 789 " alloc_space_for_mpi2() failed.");
791 790
792 791 return (DDI_FAILURE);
793 792 }
794 793
795 794 /* Send ioc init message */
796 795 if ( mrsas_issue_init_mpi2(instance) != DDI_SUCCESS) {
797 796 cmn_err(CE_WARN,
798 797 " mrsas_issue_init_mpi2() failed.");
799 798
800 799 goto fail_init_fusion;
801 800 }
802 801
803 802 instance->unroll.alloc_space_mpi2 = 1;
804 803
805 804 con_log(CL_ANN, (CE_NOTE,
806 805 "mrsas_init_adapter_tbolt: SUCCESSFULL\n"));
807 806
808 807 return (DDI_SUCCESS);
809 808
810 809 fail_init_fusion:
811 810
812 811 fail_undo_alloc_mpi2:
813 812 free_space_for_mpi2(instance);
814 813
815 814 return (DDI_FAILURE);
816 815 }
817 816
818 817
819 818
820 819 /*
821 820 * init_mpi2
822 821 */
823 822 int
824 823 mrsas_issue_init_mpi2(struct mrsas_instance *instance)
825 824 {
826 825 dma_obj_t init2_dma_obj;
827 826 int ret_val = DDI_SUCCESS;
828 827
829 828 /* allocate DMA buffer for IOC INIT message */
830 829 init2_dma_obj.size = sizeof (Mpi2IOCInitRequest_t);
831 830 init2_dma_obj.dma_attr = mrsas_generic_dma_attr;
832 831 init2_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
833 832 init2_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
834 833 init2_dma_obj.dma_attr.dma_attr_sgllen = 1;
835 834 init2_dma_obj.dma_attr.dma_attr_align = 256;
836 835
837 836 if (mrsas_alloc_dma_obj(instance, &init2_dma_obj,
838 837 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
839 838 cmn_err(CE_WARN, "mr_sas_issue_init_mpi2 "
840 839 "could not allocate data transfer buffer.");
841 840 return (DDI_FAILURE);
842 841 }
843 842 (void) memset(init2_dma_obj.buffer, 2,
844 843 sizeof (Mpi2IOCInitRequest_t));
845 844
846 845 con_log(CL_ANN1, (CE_NOTE,
847 846 "mrsas_issue_init_mpi2 _phys adr: %x \n",
848 847 init2_dma_obj.dma_cookie[0].dmac_address));
849 848
850 849
851 850 /* Initialize and send ioc init message */
852 851 ret_val = mrsas_tbolt_ioc_init(instance, &init2_dma_obj,
853 852 init2_dma_obj.acc_handle);
854 853 if (ret_val == DDI_FAILURE) {
855 854 con_log(CL_ANN1, (CE_WARN,
856 855 "mrsas_issue_init_mpi2: Failed\n"));
857 856 goto fail_init_mpi2;
858 857 }
859 858
860 859 /* free IOC init DMA buffer */
861 860 if (mrsas_free_dma_obj(instance, init2_dma_obj)
862 861 != DDI_SUCCESS) {
863 862 con_log(CL_ANN1, (CE_WARN,
864 863 "mrsas_issue_init_mpi2: Free Failed\n"));
865 864 return (DDI_FAILURE);
866 865 }
867 866
868 867
869 868 /* Get/Check and sync ld_map info */
870 869 instance->map_id = 0;
871 870 if( mrsas_tbolt_check_map_info(instance) == DDI_SUCCESS )
872 871 mrsas_tbolt_sync_map_info(instance);
873 872
874 873 con_log(CL_ANN, (CE_NOTE,
875 874 "mrsas_issue_init_mpi2: SUCCESSFULL\n"));
876 875
877 876 return (DDI_SUCCESS);
878 877
879 878 fail_init_mpi2:
880 879 mrsas_free_dma_obj(instance, init2_dma_obj);
881 880
882 881 return (DDI_FAILURE);
883 882 }
884 883
885 884 int
886 885 mrsas_tbolt_ioc_init(struct mrsas_instance *instance, dma_obj_t *mpi2_dma_obj,
887 886 ddi_acc_handle_t accessp)
888 887 {
889 888 int numbytes, i;
890 889 int ret = DDI_SUCCESS;
891 890 uint16_t flags;
892 891 int status;
893 892 timespec_t time;
894 893 uint64_t mSec;
895 894 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
896 895 struct mrsas_init_frame2 *mfiFrameInit2;
897 896 struct mrsas_header *frame_hdr;
898 897 Mpi2IOCInitRequest_t *init;
|
↓ open down ↓ |
384 lines elided |
↑ open up ↑ |
899 898 struct mrsas_cmd *cmd = NULL;
900 899 struct mrsas_drv_ver drv_ver_info;
901 900 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
902 901
903 902
904 903 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
905 904
906 905
907 906 #ifdef DEBUG
908 907 con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
909 - sizeof (*mfiFrameInit2)));
910 - con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n", sizeof (*init)));
908 + (int)sizeof (*mfiFrameInit2)));
909 + con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n", (int)sizeof (*init)));
911 910 con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
912 - sizeof (struct mrsas_init_frame2)));
911 + (int)sizeof (struct mrsas_init_frame2)));
913 912 con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n",
914 - sizeof (Mpi2IOCInitRequest_t)));
913 + (int)sizeof (Mpi2IOCInitRequest_t)));
915 914 #endif
916 915
917 916 init = (Mpi2IOCInitRequest_t *)mpi2_dma_obj->buffer;
918 917 numbytes = sizeof (*init);
919 918 bzero(init, numbytes);
920 919
921 920 ddi_put8(mpi2_dma_obj->acc_handle, &init->Function,
922 921 MPI2_FUNCTION_IOC_INIT);
923 922
924 923 ddi_put8(mpi2_dma_obj->acc_handle, &init->WhoInit,
925 924 MPI2_WHOINIT_HOST_DRIVER);
926 925
927 926 /* set MsgVersion and HeaderVersion host driver was built with */
928 927 ddi_put16(mpi2_dma_obj->acc_handle, &init->MsgVersion,
929 928 MPI2_VERSION);
930 929
931 930 ddi_put16(mpi2_dma_obj->acc_handle, &init->HeaderVersion,
932 931 MPI2_HEADER_VERSION);
933 932
934 933 ddi_put16(mpi2_dma_obj->acc_handle, &init->SystemRequestFrameSize,
935 934 instance->raid_io_msg_size / 4);
936 935
937 936 ddi_put16(mpi2_dma_obj->acc_handle, &init->ReplyFreeQueueDepth,
938 937 0);
939 938
940 939 ddi_put16(mpi2_dma_obj->acc_handle,
941 940 &init->ReplyDescriptorPostQueueDepth,
942 941 instance->reply_q_depth);
943 942 /*
944 943 * These addresses are set using the DMA cookie addresses from when the
945 944 * memory was allocated. Sense buffer hi address should be 0.
946 945 * ddi_put32(accessp, &init->SenseBufferAddressHigh, 0);
947 946 */
948 947
949 948 ddi_put32(mpi2_dma_obj->acc_handle,
950 949 &init->SenseBufferAddressHigh, 0);
951 950
952 951 ddi_put64(mpi2_dma_obj->acc_handle,
953 952 (uint64_t *)&init->SystemRequestFrameBaseAddress,
954 953 instance->io_request_frames_phy);
955 954
956 955 ddi_put64(mpi2_dma_obj->acc_handle,
957 956 &init->ReplyDescriptorPostQueueAddress,
958 957 instance->reply_frame_pool_phy);
959 958
960 959 ddi_put64(mpi2_dma_obj->acc_handle,
961 960 &init->ReplyFreeQueueAddress, 0);
|
↓ open down ↓ |
37 lines elided |
↑ open up ↑ |
962 961
963 962 cmd = instance->cmd_list[0];
964 963 if (cmd == NULL) {
965 964 return (DDI_FAILURE);
966 965 }
967 966 cmd->retry_count_for_ocr = 0;
968 967 cmd->pkt = NULL;
969 968 cmd->drv_pkt_time = 0;
970 969
971 970 mfiFrameInit2 = (struct mrsas_init_frame2 *)cmd->scsi_io_request;
972 - con_log(CL_ANN1, (CE_CONT, "[mfi vaddr]%x", mfiFrameInit2));
971 + con_log(CL_ANN1, (CE_CONT, "[mfi vaddr]%p", mfiFrameInit2));
973 972
974 973 frame_hdr = &cmd->frame->hdr;
975 974
976 975 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
977 976 MFI_CMD_STATUS_POLL_MODE);
978 977
979 978 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
980 979
981 980 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
982 981
983 982 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
984 983
985 984 con_log(CL_ANN, (CE_CONT,
986 985 "mrsas_tbolt_ioc_init: SMID:%x\n", cmd->SMID));
987 986
988 987 // Init the MFI Header
989 988 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
990 989 &mfiFrameInit2->cmd, MFI_CMD_OP_INIT);
991 990
992 991 con_log(CL_ANN1, (CE_CONT, "[CMD]%x", mfiFrameInit2->cmd));
993 992
994 993 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
995 994 &mfiFrameInit2->cmd_status,
996 995 MFI_STAT_INVALID_STATUS);
997 996
998 997 con_log(CL_ANN1, (CE_CONT, "[Status]%x", mfiFrameInit2->cmd_status));
|
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
999 998
1000 999 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1001 1000 &mfiFrameInit2->queue_info_new_phys_addr_lo,
1002 1001 mpi2_dma_obj->dma_cookie[0].dmac_address);
1003 1002
1004 1003 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1005 1004 &mfiFrameInit2->data_xfer_len,
1006 1005 sizeof (Mpi2IOCInitRequest_t));
1007 1006
1008 1007 con_log(CL_ANN1, (CE_CONT, "[reply q desc addr]%x",
1009 - init->ReplyDescriptorPostQueueAddress));
1008 + (int)init->ReplyDescriptorPostQueueAddress));
1010 1009
1011 1010 /* fill driver version information*/
1012 1011 fill_up_drv_ver(&drv_ver_info);
1013 1012
1014 1013 /* allocate the driver version data transfer buffer */
1015 1014 instance->drv_ver_dma_obj.size = sizeof(drv_ver_info.drv_ver);
1016 1015 instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
1017 1016 instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1018 1017 instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1019 1018 instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
1020 1019 instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
1021 1020
1022 1021 if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
1023 1022 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
1024 1023 cmn_err(CE_WARN,
1025 1024 "fusion init: Could not allocate driver version buffer.");
1026 1025 return (DDI_FAILURE);
1027 1026 }
1028 1027 /* copy driver version to dma buffer*/
1029 1028 (void) memset(instance->drv_ver_dma_obj.buffer, 0,sizeof(drv_ver_info.drv_ver));
1030 1029 ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
|
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
1031 1030 (uint8_t *)drv_ver_info.drv_ver,
1032 1031 (uint8_t *)instance->drv_ver_dma_obj.buffer,
1033 1032 sizeof(drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
1034 1033
1035 1034 /*send driver version physical address to firmware*/
1036 1035 ddi_put64(cmd->frame_dma_obj.acc_handle,
1037 1036 &mfiFrameInit2->driverversion, instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
1038 1037
1039 1038 con_log(CL_ANN1, (CE_CONT, "[MPIINIT2 frame Phys addr ]0x%x len = %x",
1040 1039 mfiFrameInit2->queue_info_new_phys_addr_lo,
1041 - sizeof (Mpi2IOCInitRequest_t)));
1040 + (int)sizeof (Mpi2IOCInitRequest_t)));
1042 1041
1043 1042 con_log(CL_ANN1, (CE_CONT, "[Length]%x", mfiFrameInit2->data_xfer_len));
1044 1043
1045 1044 con_log(CL_ANN1, (CE_CONT, "[MFI frame Phys Address]%x len = %x",
1046 - cmd->scsi_io_request_phys_addr, sizeof (struct mrsas_init_frame2)));
1045 + cmd->scsi_io_request_phys_addr,
1046 + (int) sizeof (struct mrsas_init_frame2)));
1047 1047
1048 1048 /* disable interrupts before sending INIT2 frame */
1049 1049 instance->func_ptr->disable_intr(instance);
1050 1050
1051 1051 req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
1052 1052 instance->request_message_pool;
1053 1053 req_desc->Words = cmd->scsi_io_request_phys_addr;
1054 1054 req_desc->MFAIo.RequestFlags =
1055 1055 (MPI2_REQ_DESCRIPT_FLAGS_MFA << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1056 1056
1057 1057 cmd->request_desc = req_desc;
1058 1058
1059 1059 /* issue the init frame */
1060 1060 instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd);
1061 1061
1062 1062 con_log(CL_ANN1, (CE_CONT, "[cmd = %d] ", frame_hdr->cmd));
1063 1063 con_log(CL_ANN1, (CE_CONT, "[cmd Status= %x] ",
1064 1064 frame_hdr->cmd_status));
1065 1065
1066 1066 if (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1067 1067 &mfiFrameInit2->cmd_status) == 0) {
1068 1068 con_log(CL_ANN, (CE_NOTE, "INIT2 Success"));
1069 1069 ret = DDI_SUCCESS;
1070 1070 } else {
1071 1071 con_log(CL_ANN, (CE_WARN, "INIT2 Fail"));
1072 1072 mrsas_dump_reply_desc(instance);
1073 1073 goto fail_ioc_init;
1074 1074 }
1075 1075
1076 1076 mrsas_dump_reply_desc(instance);
1077 1077
1078 1078 instance->unroll.verBuff = 1;
1079 1079
1080 1080 con_log(CL_ANN, (CE_NOTE,
1081 1081 "mrsas_tbolt_ioc_init: SUCCESSFULL\n"));
1082 1082
1083 1083
1084 1084 return (DDI_SUCCESS);
1085 1085
1086 1086
1087 1087 fail_ioc_init:
1088 1088
1089 1089 mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1090 1090
1091 1091 return (DDI_FAILURE);
1092 1092 }
1093 1093
1094 1094 int wait_for_outstanding_poll_io(struct mrsas_instance *instance)
1095 1095 {
1096 1096 int i;
1097 1097 uint32_t wait_time = dump_io_wait_time;
1098 1098 for (i = 0; i < wait_time; i++) {
1099 1099 /*
1100 1100 * Check For Outstanding poll Commands
1101 1101 * except ldsync command and aen command
1102 1102 */
1103 1103 if (instance->fw_outstanding <= 2) {
1104 1104 break;
1105 1105 }
1106 1106 drv_usecwait(10*MILLISEC);
1107 1107 /* complete commands from reply queue */
1108 1108 (void) mr_sas_tbolt_process_outstanding_cmd(instance);
1109 1109 }
1110 1110 if (instance->fw_outstanding > 2) {
1111 1111 return (1);
1112 1112 }
1113 1113 return (0);
1114 1114 }
1115 1115 /*
1116 1116 * scsi_pkt handling
1117 1117 *
1118 1118 * Visible to the external world via the transport structure.
1119 1119 */
1120 1120
1121 1121 int
1122 1122 mrsas_tbolt_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1123 1123 {
1124 1124 struct mrsas_instance *instance = ADDR2MR(ap);
1125 1125 struct scsa_cmd *acmd = PKT2CMD(pkt);
1126 1126 struct mrsas_cmd *cmd = NULL;
1127 1127 int rval, i;
1128 1128 uchar_t cmd_done = 0;
1129 1129 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
1130 1130 uint32_t msecs = 120 * MILLISEC;
1131 1131
1132 1132 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1133 1133 if (instance->deadadapter == 1) {
1134 1134 cmn_err(CE_WARN,
1135 1135 "mrsas_tran_start:TBOLT return TRAN_FATAL_ERROR "
1136 1136 "for IO, as the HBA doesnt take any more IOs");
1137 1137 if (pkt) {
1138 1138 pkt->pkt_reason = CMD_DEV_GONE;
1139 1139 pkt->pkt_statistics = STAT_DISCON;
1140 1140 }
1141 1141 return (TRAN_FATAL_ERROR);
1142 1142 }
1143 1143 if (instance->adapterresetinprogress) {
1144 1144 con_log(CL_ANN, (CE_NOTE, "Reset flag set, "
1145 1145 "returning mfi_pkt and setting TRAN_BUSY\n"));
1146 1146 return (TRAN_BUSY);
1147 1147 }
1148 1148 rval = mrsas_tbolt_prepare_pkt(acmd);
1149 1149
1150 1150 cmd = mrsas_tbolt_build_cmd(instance, ap, pkt, &cmd_done);
1151 1151
1152 1152 /*
1153 1153 * Check if the command is already completed by the mrsas_build_cmd()
1154 1154 * routine. In which case the busy_flag would be clear and scb will be
1155 1155 * NULL and appropriate reason provided in pkt_reason field
1156 1156 */
1157 1157 if (cmd_done) {
1158 1158 pkt->pkt_reason = CMD_CMPLT;
1159 1159 pkt->pkt_scbp[0] = STATUS_GOOD;
1160 1160 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1161 1161 | STATE_SENT_CMD;
1162 1162 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1163 1163 (*pkt->pkt_comp)(pkt);
1164 1164 }
1165 1165
1166 1166 return (TRAN_ACCEPT);
1167 1167 }
1168 1168
1169 1169 if (cmd == NULL) {
1170 1170 return (TRAN_BUSY);
1171 1171 }
1172 1172
1173 1173
1174 1174 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1175 1175 if (instance->fw_outstanding > instance->max_fw_cmds) {
1176 1176 cmn_err(CE_WARN,
|
↓ open down ↓ |
120 lines elided |
↑ open up ↑ |
1177 1177 "Command Queue Full... Returning BUSY \n");
1178 1178 return_raid_msg_pkt(instance, cmd);
1179 1179 return (TRAN_BUSY);
1180 1180 }
1181 1181
1182 1182 /* Synchronize the Cmd frame for the controller */
1183 1183 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1184 1184 DDI_DMA_SYNC_FORDEV);
1185 1185
1186 1186 con_log(CL_ANN, (CE_CONT, "tbolt_issue_cmd: SCSI CDB[0]=0x%x "
1187 - "cmd->index:0x%x SMID %0x%x\n", pkt->pkt_cdbp[0], cmd->index, cmd->SMID));
1187 + "cmd->index:0x%x SMID 0x%x\n", pkt->pkt_cdbp[0], cmd->index, cmd->SMID));
1188 1188
1189 1189 instance->func_ptr->issue_cmd(cmd, instance);
1190 1190
1191 1191 return (TRAN_ACCEPT);
1192 1192
1193 1193 } else {
1194 1194 instance->func_ptr->issue_cmd(cmd, instance);
1195 1195 (void) wait_for_outstanding_poll_io(instance);
1196 1196 return (TRAN_ACCEPT);
1197 1197 }
1198 1198 }
1199 1199
1200 1200 /*
1201 1201 * prepare the pkt:
1202 1202 * the pkt may have been resubmitted or just reused so
1203 1203 * initialize some fields and do some checks.
1204 1204 */
1205 1205 int
1206 1206 mrsas_tbolt_prepare_pkt(struct scsa_cmd *acmd)
1207 1207 {
1208 1208 struct scsi_pkt *pkt = CMD2PKT(acmd);
1209 1209
1210 1210
1211 1211 /*
1212 1212 * Reinitialize some fields that need it; the packet may
1213 1213 * have been resubmitted
1214 1214 */
1215 1215 pkt->pkt_reason = CMD_CMPLT;
1216 1216 pkt->pkt_state = 0;
1217 1217 pkt->pkt_statistics = 0;
1218 1218 pkt->pkt_resid = 0;
1219 1219
1220 1220 /*
1221 1221 * zero status byte.
1222 1222 */
1223 1223 *(pkt->pkt_scbp) = 0;
1224 1224
1225 1225 return (0);
1226 1226 }
1227 1227
1228 1228
1229 1229 int
1230 1230 mr_sas_tbolt_build_sgl(struct mrsas_instance *instance,
1231 1231 struct scsa_cmd *acmd,
1232 1232 struct mrsas_cmd *cmd,
1233 1233 Mpi2RaidSCSIIORequest_t *scsi_raid_io,
1234 1234 uint32_t *datalen)
1235 1235 {
1236 1236 uint32_t MaxSGEs;
1237 1237 int sg_to_process;
1238 1238 uint32_t i, j, SGEdwords = 0;
1239 1239 uint32_t numElements, endElement;
1240 1240 Mpi25IeeeSgeChain64_t *ieeeChainElement = NULL;
1241 1241 Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee = NULL;
1242 1242 uint32_t SGLFlags = 0;
1243 1243
1244 1244 con_log(CL_ANN1, (CE_NOTE,
1245 1245 "chkpnt: Building Chained SGL :%d", __LINE__));
1246 1246
1247 1247 /* Calulate SGE size in number of Words(32bit) */
1248 1248 /* Clear the datalen before updating it. */
1249 1249 *datalen = 0;
1250 1250
1251 1251 SGEdwords = sizeof (Mpi25IeeeSgeChain64_t) / 4;
1252 1252
1253 1253 MaxSGEs = instance->max_sge_in_main_msg;
1254 1254
1255 1255 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1256 1256 &scsi_raid_io->SGLFlags,
1257 1257 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
1258 1258
1259 1259 // set data transfer flag.
1260 1260 if (acmd->cmd_flags & CFLAG_DMASEND) {
1261 1261 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1262 1262 &scsi_raid_io->Control,
1263 1263 MPI2_SCSIIO_CONTROL_WRITE);
1264 1264 } else {
1265 1265 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1266 1266 &scsi_raid_io->Control, MPI2_SCSIIO_CONTROL_READ);
1267 1267 }
1268 1268
1269 1269
1270 1270 numElements = acmd->cmd_cookiecnt;
1271 1271
1272 1272 con_log(CL_DLEVEL1, (CE_NOTE, "[SGE Count]:%x", numElements));
1273 1273
1274 1274 if (numElements > instance->max_num_sge) {
1275 1275 con_log(CL_ANN, (CE_NOTE,
1276 1276 "[Max SGE Count Exceeded]:%x", numElements));
1277 1277 return (numElements);
1278 1278 }
1279 1279
1280 1280 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1281 1281 &scsi_raid_io->RaidContext.numSGE, (uint8_t)numElements);
1282 1282
1283 1283 /* set end element in main message frame */
1284 1284 endElement = (numElements <= MaxSGEs) ? numElements : (MaxSGEs - 1);
1285 1285
1286 1286 /* prepare the scatter-gather list for the firmware */
1287 1287 scsi_raid_io_sgl_ieee =
1288 1288 (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
1289 1289
1290 1290 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1291 1291 Mpi25IeeeSgeChain64_t *sgl_ptr_end = scsi_raid_io_sgl_ieee;
1292 1292 sgl_ptr_end += instance->max_sge_in_main_msg - 1;
1293 1293
1294 1294 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1295 1295 &sgl_ptr_end->Flags, 0);
1296 1296 }
1297 1297
1298 1298 for (i = 0; i < endElement; i++, scsi_raid_io_sgl_ieee++) {
1299 1299 ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
1300 1300 &scsi_raid_io_sgl_ieee->Address,
1301 1301 acmd->cmd_dmacookies[i].dmac_laddress);
1302 1302
1303 1303 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1304 1304 &scsi_raid_io_sgl_ieee->Length,
1305 1305 acmd->cmd_dmacookies[i].dmac_size);
1306 1306
1307 1307 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1308 1308 &scsi_raid_io_sgl_ieee->Flags, 0);
|
↓ open down ↓ |
111 lines elided |
↑ open up ↑ |
1309 1309
1310 1310 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1311 1311 if (i == (numElements - 1))
1312 1312 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1313 1313 &scsi_raid_io_sgl_ieee->Flags, IEEE_SGE_FLAGS_END_OF_LIST);
1314 1314 }
1315 1315
1316 1316 *datalen += acmd->cmd_dmacookies[i].dmac_size;
1317 1317
1318 1318 #ifdef DEBUG
1319 - con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Address]:%llx",
1319 + con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Address]: %" PRIx64,
1320 1320 scsi_raid_io_sgl_ieee->Address));
1321 1321 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Length]:%x",
1322 1322 scsi_raid_io_sgl_ieee->Length));
1323 1323 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Flags]:%x",
1324 1324 scsi_raid_io_sgl_ieee->Flags));
1325 1325 #endif
1326 1326
1327 1327 }
1328 1328
1329 1329 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1330 1330 &scsi_raid_io->ChainOffset, 0);
1331 1331
1332 1332 /* check if chained SGL required */
1333 1333 if (i < numElements) {
1334 1334
1335 1335 con_log(CL_ANN1, (CE_NOTE, "[Chain Element index]:%x", i));
1336 1336
1337 1337 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1338 1338 uint16_t ioFlags = ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1339 1339 &scsi_raid_io->IoFlags);
1340 1340
1341 1341 if ((ioFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
1342 1342 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1343 1343 &scsi_raid_io->ChainOffset, (U8)instance->chain_offset_io_req);
1344 1344 else
1345 1345 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1346 1346 &scsi_raid_io->ChainOffset, 0);
1347 1347 }
1348 1348 else {
1349 1349 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1350 1350 &scsi_raid_io->ChainOffset, (U8)instance->chain_offset_io_req);
1351 1351 }
1352 1352
1353 1353 /* prepare physical chain element */
1354 1354 ieeeChainElement = scsi_raid_io_sgl_ieee;
1355 1355
1356 1356 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1357 1357 &ieeeChainElement->NextChainOffset, 0);
1358 1358
1359 1359 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER)
1360 1360 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1361 1361 &ieeeChainElement->Flags, IEEE_SGE_FLAGS_CHAIN_ELEMENT );
1362 1362 else
1363 1363 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1364 1364 &ieeeChainElement->Flags,
1365 1365 (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1366 1366 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
1367 1367
1368 1368 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1369 1369 &ieeeChainElement->Length,
1370 1370 (sizeof (MPI2_SGE_IO_UNION) * (numElements - i)));
1371 1371
1372 1372 ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
1373 1373 &ieeeChainElement->Address,
1374 1374 (U64)cmd->sgl_phys_addr);
1375 1375
1376 1376 sg_to_process = numElements - i;
1377 1377
1378 1378 con_log(CL_ANN1, (CE_NOTE,
1379 1379 "[Additional SGE Count]:%x", endElement));
1380 1380
1381 1381 /* point to the chained SGL buffer */
1382 1382 scsi_raid_io_sgl_ieee = (Mpi25IeeeSgeChain64_t *)cmd->sgl;
1383 1383
1384 1384 /* build rest of the SGL in chained buffer */
1385 1385 for (j = 0; j < sg_to_process; j++, scsi_raid_io_sgl_ieee++) {
1386 1386 con_log(CL_DLEVEL3, (CE_NOTE, "[remaining SGL]:%x", i));
1387 1387
1388 1388 ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
1389 1389 &scsi_raid_io_sgl_ieee->Address,
1390 1390 acmd->cmd_dmacookies[i].dmac_laddress);
1391 1391
1392 1392 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1393 1393 &scsi_raid_io_sgl_ieee->Length,
1394 1394 acmd->cmd_dmacookies[i].dmac_size);
1395 1395
1396 1396 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1397 1397 &scsi_raid_io_sgl_ieee->Flags, 0);
1398 1398
|
↓ open down ↓ |
69 lines elided |
↑ open up ↑ |
1399 1399 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1400 1400 if (i == (numElements - 1))
1401 1401 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1402 1402 &scsi_raid_io_sgl_ieee->Flags, IEEE_SGE_FLAGS_END_OF_LIST);
1403 1403 }
1404 1404
1405 1405 *datalen += acmd->cmd_dmacookies[i].dmac_size;
1406 1406
1407 1407 #if DEBUG
1408 1408 con_log(CL_DLEVEL1, (CE_NOTE,
1409 - "[SGL Address]:%llx",
1409 + "[SGL Address]: %" PRIx64,
1410 1410 scsi_raid_io_sgl_ieee->Address));
1411 1411 con_log(CL_DLEVEL1, (CE_NOTE,
1412 1412 "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
1413 1413 con_log(CL_DLEVEL1, (CE_NOTE,
1414 1414 "[SGL Flags]:%x", scsi_raid_io_sgl_ieee->Flags));
1415 1415 #endif
1416 1416
1417 1417 i++;
1418 1418 }
1419 1419 }
1420 1420
1421 1421 return (0);
1422 1422 } /*end of BuildScatterGather */
1423 1423
1424 1424
1425 1425 /*
1426 1426 * build_cmd
1427 1427 */
1428 1428 struct mrsas_cmd *
1429 1429 mrsas_tbolt_build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
1430 1430 struct scsi_pkt *pkt, uchar_t *cmd_done)
1431 1431 {
1432 1432 uint8_t fp_possible = 0;
1433 1433 uint16_t flags = 0;
1434 1434 uint32_t i, index;
1435 1435 uint32_t context;
1436 1436 uint32_t sge_bytes;
1437 1437 uint8_t ChainOffsetValue;
1438 1438 uint32_t SGLFlags;
1439 1439 uint32_t lba_count=0;
1440 1440 uint32_t start_lba_hi=0;
1441 1441 uint32_t start_lba_lo=0;
1442 1442 ddi_acc_handle_t acc_handle;
1443 1443 struct mrsas_cmd *cmd = NULL;
1444 1444 struct scsa_cmd *acmd = PKT2CMD(pkt);
1445 1445 MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
1446 1446 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
1447 1447 Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee;
1448 1448 uint32_t datalen;
1449 1449 struct IO_REQUEST_INFO io_info;
1450 1450 MR_FW_RAID_MAP_ALL *local_map_ptr;
1451 1451 MR_LD_RAID *raid;
1452 1452 U32 ld;
1453 1453 uint16_t pd_cmd_cdblen;
1454 1454
1455 1455 con_log(CL_DLEVEL1, (CE_NOTE,
1456 1456 "chkpnt: Entered mrsas_tbolt_build_cmd:%d", __LINE__));
1457 1457
1458 1458 /* find out if this is logical or physical drive command. */
1459 1459 acmd->islogical = MRDRV_IS_LOGICAL(ap);
1460 1460 acmd->device_id = MAP_DEVICE_ID(instance, ap);
1461 1461
1462 1462 *cmd_done = 0;
1463 1463
1464 1464 /* get the command packet */
1465 1465 if (!(cmd = get_raid_msg_pkt(instance))) {
1466 1466 return (NULL);
1467 1467 }
1468 1468
1469 1469 index = cmd->index;
1470 1470 ReqDescUnion = mr_sas_get_request_descriptor(instance, index, cmd);
1471 1471 ReqDescUnion->Words = 0;
1472 1472 ReqDescUnion->SCSIIO.SMID = cmd->SMID;
1473 1473 ReqDescUnion->SCSIIO.RequestFlags =
1474 1474 (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1475 1475 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1476 1476
1477 1477
1478 1478 cmd->request_desc = ReqDescUnion;
1479 1479 cmd->pkt = pkt;
1480 1480 cmd->cmd = acmd;
1481 1481
1482 1482 /* lets get the command directions */
1483 1483 if (acmd->cmd_flags & CFLAG_DMASEND) {
1484 1484 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1485 1485 (void) ddi_dma_sync(acmd->cmd_dmahandle,
1486 1486 acmd->cmd_dma_offset, acmd->cmd_dma_len,
1487 1487 DDI_DMA_SYNC_FORDEV);
1488 1488 }
1489 1489 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
1490 1490 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1491 1491 (void) ddi_dma_sync(acmd->cmd_dmahandle,
1492 1492 acmd->cmd_dma_offset, acmd->cmd_dma_len,
1493 1493 DDI_DMA_SYNC_FORCPU);
1494 1494 }
1495 1495 } else {
1496 1496 con_log(CL_ANN, (CE_NOTE, "NO DMA\n"));
1497 1497 }
1498 1498
1499 1499
1500 1500 // get SCSI_IO raid message frame pointer
1501 1501 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
1502 1502
1503 1503 /* zero out SCSI_IO raid message frame */
1504 1504 memset(scsi_raid_io, 0, sizeof(Mpi2RaidSCSIIORequest_t));
1505 1505
1506 1506 /*Set the ldTargetId set by BuildRaidContext() */
1507 1507 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1508 1508 &scsi_raid_io->RaidContext.ldTargetId,
1509 1509 acmd->device_id);
1510 1510
1511 1511 /* Copy CDB to scsi_io_request message frame */
1512 1512 ddi_rep_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1513 1513 (uint8_t *)pkt->pkt_cdbp,
1514 1514 (uint8_t *)scsi_raid_io->CDB.CDB32,
1515 1515 acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
1516 1516
1517 1517 /*
1518 1518 * Just the CDB length,rest of the Flags are zero
1519 1519 * This will be modified later.
1520 1520 */
1521 1521 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1522 1522 &scsi_raid_io->IoFlags,
1523 1523 acmd->cmd_cdblen);
1524 1524
1525 1525 pd_cmd_cdblen = acmd->cmd_cdblen;
1526 1526
1527 1527 switch (pkt->pkt_cdbp[0]) {
1528 1528 case SCMD_READ:
1529 1529 case SCMD_WRITE:
1530 1530 case SCMD_READ_G1:
1531 1531 case SCMD_WRITE_G1:
1532 1532 case SCMD_READ_G4:
1533 1533 case SCMD_WRITE_G4:
1534 1534 case SCMD_READ_G5:
1535 1535 case SCMD_WRITE_G5:
1536 1536
1537 1537 if (acmd->islogical) {
1538 1538 /* Initialize sense Information */
1539 1539 if (cmd->sense1 == NULL) {
1540 1540 con_log(CL_ANN, (CE_NOTE,
1541 1541 "tbolt_build_cmd: Sense buffer ptr NULL \n"));
1542 1542 }
1543 1543 bzero(cmd->sense1, SENSE_LENGTH);
1544 1544 con_log(CL_DLEVEL2, (CE_NOTE,
1545 1545 "tbolt_build_cmd CDB[0] = %x\n", pkt->pkt_cdbp[0]));
1546 1546
1547 1547 if (acmd->cmd_cdblen == CDB_GROUP0) { /* 6-byte cdb */
1548 1548 lba_count = (uint16_t)(pkt->pkt_cdbp[4]);
1549 1549 start_lba_lo =
1550 1550 ((uint32_t)(pkt->pkt_cdbp[3]) |
1551 1551 ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
1552 1552 ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F) << 16));
1553 1553 } else if (acmd->cmd_cdblen == CDB_GROUP1) { /* 10-byte cdb */
1554 1554 lba_count =
1555 1555 (((uint16_t)(pkt->pkt_cdbp[8])) |
1556 1556 ((uint16_t)(pkt->pkt_cdbp[7]) << 8));
1557 1557
1558 1558 start_lba_lo =
1559 1559 (((uint32_t)(pkt->pkt_cdbp[5])) |
1560 1560 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1561 1561 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1562 1562 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1563 1563
1564 1564 } else if (acmd->cmd_cdblen == CDB_GROUP5) { /* 12-byte cdb */
1565 1565 lba_count = (
1566 1566 ((uint32_t)(pkt->pkt_cdbp[9])) |
1567 1567 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1568 1568 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1569 1569 ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1570 1570
1571 1571 start_lba_lo =
1572 1572 (((uint32_t)(pkt->pkt_cdbp[5])) |
1573 1573 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1574 1574 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1575 1575 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1576 1576
1577 1577 } else if (acmd->cmd_cdblen == CDB_GROUP4) { /* 16-byte cdb */
1578 1578 lba_count = (
1579 1579 ((uint32_t)(pkt->pkt_cdbp[13])) |
1580 1580 ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
1581 1581 ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
1582 1582 ((uint32_t)(pkt->pkt_cdbp[10]) << 24));
1583 1583
1584 1584 start_lba_lo = (
1585 1585 ((uint32_t)(pkt->pkt_cdbp[9])) |
1586 1586 ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1587 1587 ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1588 1588 ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1589 1589
1590 1590 start_lba_hi = (
1591 1591 ((uint32_t)(pkt->pkt_cdbp[5])) |
1592 1592 ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1593 1593 ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1594 1594 ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1595 1595 }
1596 1596
1597 1597 if (instance->tbolt &&
1598 1598 ((lba_count * 512) > mrsas_tbolt_max_cap_maxxfer) )
1599 1599 cmn_err(CE_WARN," IO SECTOR COUNT exceeds controller limit 0x%x sectors\n", lba_count);
1600 1600
1601 1601 memset(&io_info, 0, sizeof (struct IO_REQUEST_INFO));
1602 1602 io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) | start_lba_lo;
1603 1603 io_info.numBlocks = lba_count;
1604 1604 io_info.ldTgtId = acmd->device_id;
1605 1605
1606 1606 if (acmd->cmd_flags & CFLAG_DMASEND)
1607 1607 io_info.isRead = 0;
1608 1608 else
1609 1609 io_info.isRead = 1;
1610 1610
1611 1611
1612 1612 /*Aquire SYNC MAP UPDATE lock */
1613 1613 mutex_enter(&instance->sync_map_mtx);
1614 1614
1615 1615 local_map_ptr = instance->ld_map[(instance->map_id & 1)];
1616 1616
1617 1617 if ( (MR_TargetIdToLdGet(acmd->device_id, local_map_ptr) >= MAX_LOGICAL_DRIVES) || !instance->fast_path_io ){
1618 1618 cmn_err(CE_NOTE,
1619 1619 "Fast Path NOT Possible, targetId >= MAX_LOGICAL_DRIVES || !instance->fast_path_io\n");
1620 1620 fp_possible = 0;
1621 1621 /* Set Regionlock flags to BYPASS
1622 1622 io_request->RaidContext.regLockFlags = 0; */
1623 1623 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1624 1624 &scsi_raid_io->RaidContext.regLockFlags, 0);
1625 1625 } else {
1626 1626 if (MR_BuildRaidContext(instance, &io_info,
1627 1627 &scsi_raid_io->RaidContext, local_map_ptr))
1628 1628 fp_possible = io_info.fpOkForIo;
1629 1629 }
1630 1630
1631 1631 if (!enable_fp) {
1632 1632 fp_possible = 0;
1633 1633 }
1634 1634 con_log(CL_ANN1, (CE_NOTE,
1635 1635 "enable_fp %d instance->fast_path_io %d fp_possible %d \n",
1636 1636 enable_fp, instance->fast_path_io, fp_possible));
1637 1637
1638 1638 if (fp_possible) {
1639 1639
1640 1640 /* Check for DIF enabled LD */
1641 1641 if (MR_CheckDIF(acmd->device_id, local_map_ptr)) {
1642 1642 /* Prepare 32 Byte CDB for DIF capable Disk */
1643 1643 mrsas_tbolt_prepare_cdb(instance,
1644 1644 scsi_raid_io->CDB.CDB32,
1645 1645 &io_info,
1646 1646 scsi_raid_io,
1647 1647 start_lba_lo);
1648 1648 } else {
1649 1649 mrsas_tbolt_set_pd_lba(scsi_raid_io->CDB.CDB32,
1650 1650 (uint8_t *)&pd_cmd_cdblen, io_info.pdBlock, io_info.numBlocks, 0);
1651 1651 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1652 1652 &scsi_raid_io->IoFlags,
1653 1653 pd_cmd_cdblen);
1654 1654 }
1655 1655
1656 1656 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1657 1657 &scsi_raid_io->Function,
1658 1658 MPI2_FUNCTION_SCSI_IO_REQUEST);
1659 1659
1660 1660 ReqDescUnion->SCSIIO.RequestFlags =
1661 1661 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1662 1662 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1663 1663
1664 1664 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1665 1665 uint8_t regLockFlags = ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1666 1666 &scsi_raid_io->RaidContext.regLockFlags);
1667 1667 uint16_t IoFlags = ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1668 1668 &scsi_raid_io->IoFlags);
1669 1669
1670 1670 if (regLockFlags == REGION_TYPE_UNUSED)
1671 1671 ReqDescUnion->SCSIIO.RequestFlags =
1672 1672 (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1673 1673
1674 1674 IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1675 1675 regLockFlags |= (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | MR_RL_FLAGS_SEQ_NUM_ENABLE);
1676 1676
1677 1677 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1678 1678 &scsi_raid_io->ChainOffset, 0);
1679 1679 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1680 1680 &scsi_raid_io->RaidContext.nsegType, ((0x01 << MPI2_NSEG_FLAGS_SHIFT) | MPI2_TYPE_CUDA));
1681 1681 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1682 1682 &scsi_raid_io->RaidContext.regLockFlags, regLockFlags);
1683 1683 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1684 1684 &scsi_raid_io->IoFlags, IoFlags);
1685 1685 }
1686 1686
1687 1687 if ((instance->load_balance_info[acmd->device_id].loadBalanceFlag) && (io_info.isRead)) {
1688 1688 io_info.devHandle = get_updated_dev_handle(&instance->load_balance_info[acmd->device_id], &io_info);
1689 1689 cmd->load_balance_flag |= MEGASAS_LOAD_BALANCE_FLAG;
1690 1690 } else
1691 1691 cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG;
1692 1692
1693 1693 ReqDescUnion->SCSIIO.DevHandle = io_info.devHandle;
1694 1694 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1695 1695 &scsi_raid_io->DevHandle,
1696 1696 io_info.devHandle);
1697 1697
1698 1698 } else {
1699 1699 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1700 1700 &scsi_raid_io->Function,
1701 1701 MPI2_FUNCTION_LD_IO_REQUEST);
1702 1702
1703 1703 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1704 1704 &scsi_raid_io->DevHandle, acmd->device_id);
1705 1705
1706 1706 ReqDescUnion->SCSIIO.RequestFlags =
1707 1707 (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1708 1708 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1709 1709
1710 1710 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1711 1711 &scsi_raid_io->RaidContext.timeoutValue, local_map_ptr->raidMap.fpPdIoTimeoutSec);
1712 1712
1713 1713 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1714 1714 uint8_t regLockFlags = ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1715 1715 &scsi_raid_io->RaidContext.regLockFlags);
1716 1716
1717 1717 if (regLockFlags == REGION_TYPE_UNUSED)
1718 1718 ReqDescUnion->SCSIIO.RequestFlags =
1719 1719 (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1720 1720
1721 1721 regLockFlags |= (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 | MR_RL_FLAGS_SEQ_NUM_ENABLE);
1722 1722
1723 1723 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1724 1724 &scsi_raid_io->RaidContext.nsegType, ((0x01 << MPI2_NSEG_FLAGS_SHIFT) | MPI2_TYPE_CUDA));
1725 1725 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1726 1726 &scsi_raid_io->RaidContext.regLockFlags, regLockFlags);
1727 1727 }
1728 1728
1729 1729 } /* Not FP */
1730 1730
1731 1731 /*Release SYNC MAP UPDATE lock */
1732 1732 mutex_exit(&instance->sync_map_mtx);
1733 1733
1734 1734
1735 1735 /* Set sense buffer physical address/length in scsi_io_request.*/
1736 1736 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1737 1737 &scsi_raid_io->SenseBufferLowAddress,
1738 1738 cmd->sense_phys_addr1);
1739 1739 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1740 1740 &scsi_raid_io->SenseBufferLength,
1741 1741 SENSE_LENGTH);
1742 1742
1743 1743 /* Construct SGL*/
1744 1744 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1745 1745 &scsi_raid_io->SGLOffset0,
1746 1746 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1747 1747
1748 1748 mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1749 1749 scsi_raid_io, &datalen);
1750 1750
1751 1751 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1752 1752 &scsi_raid_io->DataLength, datalen);
1753 1753
1754 1754 break;
1755 1755
1756 1756 }
1757 1757 else {
1758 1758 #ifndef PDSUPPORT /* if PDSUPPORT, skip break and fall through */
1759 1759 break;
1760 1760 #endif
1761 1761 }
1762 1762 /* fall through For all non-rd/wr cmds */
1763 1763 default:
1764 1764 switch (pkt->pkt_cdbp[0]) {
1765 1765 case 0x35: { // SCMD_SYNCHRONIZE_CACHE
1766 1766 return_raid_msg_pkt(instance, cmd);
1767 1767 *cmd_done = 1;
1768 1768 return (NULL);
1769 1769 }
1770 1770
1771 1771 case SCMD_MODE_SENSE:
1772 1772 case SCMD_MODE_SENSE_G1: {
1773 1773 union scsi_cdb *cdbp;
1774 1774 uint16_t page_code;
1775 1775
1776 1776 cdbp = (void *)pkt->pkt_cdbp;
1777 1777 page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
1778 1778 switch (page_code) {
1779 1779 case 0x3:
1780 1780 case 0x4:
1781 1781 (void) mrsas_mode_sense_build(pkt);
1782 1782 return_raid_msg_pkt(instance, cmd);
1783 1783 *cmd_done = 1;
1784 1784 return (NULL);
1785 1785 }
1786 1786 break;
1787 1787 }
1788 1788
1789 1789 default: {
1790 1790 /* Here we need to handle PASSTHRU for
1791 1791 Logical Devices. Like Inquiry etc.*/
1792 1792
1793 1793 if(!(acmd->islogical)) {
1794 1794
1795 1795 /* Aquire SYNC MAP UPDATE lock */
1796 1796 mutex_enter(&instance->sync_map_mtx);
1797 1797
1798 1798 local_map_ptr = instance->ld_map[(instance->map_id & 1)];
1799 1799
1800 1800 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1801 1801 &scsi_raid_io->Function, MPI2_FUNCTION_SCSI_IO_REQUEST);
1802 1802
1803 1803 ReqDescUnion->SCSIIO.RequestFlags =
1804 1804 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1805 1805 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1806 1806
1807 1807 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1808 1808 &scsi_raid_io->DevHandle,
1809 1809 local_map_ptr->raidMap.devHndlInfo[acmd->device_id].curDevHdl);
1810 1810
1811 1811
1812 1812 /*Set regLockFlasgs to REGION_TYPE_BYPASS */
1813 1813 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1814 1814 &scsi_raid_io->RaidContext.regLockFlags, 0);
1815 1815 ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
1816 1816 &scsi_raid_io->RaidContext.regLockRowLBA, 0);
1817 1817 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1818 1818 &scsi_raid_io->RaidContext.regLockLength, 0);
1819 1819 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_raid_io->RaidContext.RAIDFlags,
1820 1820 MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
1821 1821 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1822 1822 &scsi_raid_io->RaidContext.timeoutValue, local_map_ptr->raidMap.fpPdIoTimeoutSec);
1823 1823 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1824 1824 &scsi_raid_io->RaidContext.ldTargetId, acmd->device_id);
1825 1825 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1826 1826 &scsi_raid_io->LUN[1], acmd->lun);
1827 1827
1828 1828 /* Release SYNC MAP UPDATE lock */
1829 1829 mutex_exit(&instance->sync_map_mtx);
1830 1830
1831 1831 } else {
1832 1832 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1833 1833 &scsi_raid_io->Function, MPI2_FUNCTION_LD_IO_REQUEST);
1834 1834 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1835 1835 &scsi_raid_io->LUN[1], acmd->lun);
1836 1836 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1837 1837 &scsi_raid_io->DevHandle, acmd->device_id);
1838 1838 ReqDescUnion->SCSIIO.RequestFlags =
1839 1839 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1840 1840 }
1841 1841
1842 1842 /* Set sense buffer physical address/length in scsi_io_request.*/
1843 1843 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1844 1844 &scsi_raid_io->SenseBufferLowAddress,
1845 1845 cmd->sense_phys_addr1);
1846 1846 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1847 1847 &scsi_raid_io->SenseBufferLength, SENSE_LENGTH);
1848 1848
1849 1849 /* Construct SGL*/
1850 1850 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1851 1851 &scsi_raid_io->SGLOffset0,
1852 1852 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1853 1853
1854 1854 mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1855 1855 scsi_raid_io, &datalen);
1856 1856
1857 1857 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1858 1858 &scsi_raid_io->DataLength, datalen);
1859 1859
1860 1860
1861 1861 con_log(CL_ANN, (CE_CONT,
1862 1862 "tbolt_build_cmd CDB[0] =%x, TargetID =%x\n",
1863 1863 pkt->pkt_cdbp[0], acmd->device_id));
1864 1864 con_log(CL_DLEVEL1, (CE_CONT,
1865 1865 "data length = %x\n",
1866 1866 scsi_raid_io->DataLength));
1867 1867 con_log(CL_DLEVEL1, (CE_CONT,
1868 1868 "cdb length = %x\n",
1869 1869 acmd->cmd_cdblen));
1870 1870 }
1871 1871 break;
1872 1872 }
1873 1873
1874 1874 }
1875 1875 #ifdef lint
1876 1876 context = context;
1877 1877 #endif
1878 1878
1879 1879 return (cmd);
1880 1880 }
1881 1881
1882 1882 /*
1883 1883 * mrsas_tbolt_tran_init_pkt - allocate & initialize a scsi_pkt structure
1884 1884 * @ap:
1885 1885 * @pkt:
1886 1886 * @bp:
1887 1887 * @cmdlen:
1888 1888 * @statuslen:
1889 1889 * @tgtlen:
1890 1890 * @flags:
1891 1891 * @callback:
1892 1892 *
1893 1893 * The tran_init_pkt() entry point allocates and initializes a scsi_pkt
1894 1894 * structure and DMA resources for a target driver request. The
1895 1895 * tran_init_pkt() entry point is called when the target driver calls the
1896 1896 * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point
1897 1897 * is a request to perform one or more of three possible services:
1898 1898 * - allocation and initialization of a scsi_pkt structure
1899 1899 * - allocation of DMA resources for data transfer
1900 1900 * - reallocation of DMA resources for the next portion of the data transfer
1901 1901 */
1902 1902 struct scsi_pkt *
1903 1903 mrsas_tbolt_tran_init_pkt(struct scsi_address *ap,
1904 1904 register struct scsi_pkt *pkt,
1905 1905 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1906 1906 int flags, int (*callback)(), caddr_t arg)
1907 1907 {
1908 1908 struct scsa_cmd *acmd;
1909 1909 struct mrsas_instance *instance;
1910 1910 struct scsi_pkt *new_pkt;
1911 1911
1912 1912 instance = ADDR2MR(ap);
1913 1913
1914 1914 /* step #1 : pkt allocation */
1915 1915 if (pkt == NULL) {
1916 1916 pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen,
1917 1917 tgtlen, sizeof (struct scsa_cmd), callback, arg);
1918 1918 if (pkt == NULL) {
1919 1919 return (NULL);
1920 1920 }
1921 1921
1922 1922 acmd = PKT2CMD(pkt);
1923 1923
1924 1924 /*
1925 1925 * Initialize the new pkt - we redundantly initialize
1926 1926 * all the fields for illustrative purposes.
1927 1927 */
1928 1928 acmd->cmd_pkt = pkt;
1929 1929 acmd->cmd_flags = 0;
1930 1930 acmd->cmd_scblen = statuslen;
1931 1931 acmd->cmd_cdblen = cmdlen;
1932 1932 acmd->cmd_dmahandle = NULL;
1933 1933 acmd->cmd_ncookies = 0;
1934 1934 acmd->cmd_cookie = 0;
1935 1935 acmd->cmd_cookiecnt = 0;
1936 1936 acmd->cmd_nwin = 0;
1937 1937
1938 1938 pkt->pkt_address = *ap;
1939 1939 pkt->pkt_comp = (void (*)())NULL;
1940 1940 pkt->pkt_flags = 0;
1941 1941 pkt->pkt_time = 0;
1942 1942 pkt->pkt_resid = 0;
1943 1943 pkt->pkt_state = 0;
1944 1944 pkt->pkt_statistics = 0;
1945 1945 pkt->pkt_reason = 0;
1946 1946 new_pkt = pkt;
1947 1947 } else {
1948 1948 acmd = PKT2CMD(pkt);
1949 1949 new_pkt = NULL;
1950 1950 }
1951 1951
1952 1952 /* step #2 : dma allocation/move */
1953 1953 if (bp && bp->b_bcount != 0) {
1954 1954 if (acmd->cmd_dmahandle == NULL) {
1955 1955 if (mrsas_dma_alloc(instance, pkt, bp, flags,
1956 1956 callback) == DDI_FAILURE) {
1957 1957 if (new_pkt) {
1958 1958 scsi_hba_pkt_free(ap, new_pkt);
1959 1959 }
1960 1960 return ((struct scsi_pkt *)NULL);
1961 1961 }
1962 1962 } else {
1963 1963 if (mrsas_dma_move(instance, pkt, bp) == DDI_FAILURE) {
1964 1964 return ((struct scsi_pkt *)NULL);
1965 1965 }
1966 1966 }
1967 1967 }
1968 1968 return (pkt);
1969 1969 }
1970 1970
1971 1971
1972 1972 uint32_t
1973 1973 tbolt_read_fw_status_reg(struct mrsas_instance *instance)
1974 1974 {
1975 1975 return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
1976 1976 }
1977 1977
1978 1978 void
|
↓ open down ↓ |
559 lines elided |
↑ open up ↑ |
1979 1979 tbolt_issue_cmd(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
1980 1980 {
1981 1981 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1982 1982 atomic_add_16(&instance->fw_outstanding, 1);
1983 1983
1984 1984 struct scsi_pkt *pkt;
1985 1985
1986 1986 con_log(CL_ANN1, (CE_NOTE, "tbolt_issue_cmd: cmd->[SMID]=0x%X", cmd->SMID));
1987 1987
1988 1988 con_log(CL_DLEVEL1, (CE_CONT,
1989 - " [req desc Words] %llx \n", req_desc->Words));
1989 + " [req desc Words] %" PRIx64 " \n", req_desc->Words));
1990 1990 con_log(CL_DLEVEL1, (CE_CONT,
1991 - " [req desc low part] %x \n", req_desc->Words));
1991 + " [req desc low part] %x \n",
1992 + (uint_t)(req_desc->Words & 0xffffffffff)));
1992 1993 con_log(CL_DLEVEL1, (CE_CONT,
1993 - " [req desc high part] %x \n", (req_desc->Words >> 32)));
1994 + " [req desc high part] %x \n", (uint_t)(req_desc->Words >> 32)));
1994 1995 pkt = cmd->pkt;
1995 1996
1996 1997 if (pkt) {
1997 1998 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
1998 1999 "ISSUED CMD TO FW : called : cmd:"
1999 2000 ": %p instance : %p pkt : %p pkt_time : %x\n",
2000 2001 gethrtime(), (void *)cmd, (void *)instance,
2001 2002 (void *)pkt, cmd->drv_pkt_time));
2002 2003 if (instance->adapterresetinprogress) {
2003 2004 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2004 2005 con_log(CL_ANN, (CE_NOTE,
2005 2006 "TBOLT Reset the scsi_pkt timer"));
2006 2007 } else {
2007 2008 push_pending_mfi_pkt(instance, cmd);
2008 2009 }
2009 2010
2010 2011 } else {
2011 2012 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
2012 2013 "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
2013 2014 "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
2014 2015 }
2015 2016
2016 2017 /* Issue the command to the FW */
2017 2018 mutex_enter(&instance->reg_write_mtx);
2018 2019 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2019 2020 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2020 2021 mutex_exit(&instance->reg_write_mtx);
2021 2022 }
2022 2023
2023 2024 /*
2024 2025 * issue_cmd_in_sync_mode
2025 2026 */
2026 2027 int
2027 2028 tbolt_issue_cmd_in_sync_mode(struct mrsas_instance *instance,
2028 2029 struct mrsas_cmd *cmd)
2029 2030 {
2030 2031 int i;
2031 2032 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
2032 2033 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
2033 2034
2034 2035 struct mrsas_header *hdr;
2035 2036 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2036 2037
2037 2038 con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: cmd->[SMID]=0x%X", cmd->SMID));
2038 2039
2039 2040
2040 2041 if (instance->adapterresetinprogress) {
2041 2042 cmd->drv_pkt_time = ddi_get16
2042 2043 (cmd->frame_dma_obj.acc_handle, &hdr->timeout);
2043 2044 if (cmd->drv_pkt_time < debug_timeout_g)
2044 2045 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2045 2046 con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode:"
2046 2047 "RESET-IN-PROGRESS, issue cmd & return.\n"));
2047 2048
2048 2049 mutex_enter(&instance->reg_write_mtx);
2049 2050 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
|
↓ open down ↓ |
46 lines elided |
↑ open up ↑ |
2050 2051 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2051 2052 mutex_exit(&instance->reg_write_mtx);
2052 2053
2053 2054 return (DDI_SUCCESS);
2054 2055 } else {
2055 2056 con_log(CL_ANN1, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: pushing the pkt\n"));
2056 2057 push_pending_mfi_pkt(instance, cmd);
2057 2058 }
2058 2059
2059 2060 con_log(CL_DLEVEL2, (CE_NOTE,
2060 - "HighQport offset :%lx",
2061 + "HighQport offset :%p",
2061 2062 (uint32_t *)((uintptr_t)(instance)->regmap + IB_HIGH_QPORT)));
2062 2063 con_log(CL_DLEVEL2, (CE_NOTE,
2063 - "LowQport offset :%lx",
2064 + "LowQport offset :%p",
2064 2065 (uint32_t *)((uintptr_t)(instance)->regmap + IB_LOW_QPORT)));
2065 2066
2066 2067 cmd->sync_cmd = MRSAS_TRUE;
2067 2068 cmd->cmd_status = ENODATA;
2068 2069
2069 2070
2070 2071 mutex_enter(&instance->reg_write_mtx);
2071 2072 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2072 2073 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2073 2074 mutex_exit(&instance->reg_write_mtx);
2074 2075
2075 2076 con_log(CL_ANN1, (CE_NOTE,
2076 - " req desc high part %x \n", (req_desc->Words >> 32)));
2077 + " req desc high part %x \n", (uint_t)(req_desc->Words >> 32)));
2077 2078 con_log(CL_ANN1, (CE_NOTE,
2078 - " req desc low part %x \n", req_desc->Words));
2079 + " req desc low part %x \n", (uint_t)(req_desc->Words & 0xffffffff)));
2079 2080
2080 2081 mutex_enter(&instance->int_cmd_mtx);
2081 2082 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
2082 2083 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
2083 2084 }
2084 2085 mutex_exit(&instance->int_cmd_mtx);
2085 2086
2086 2087
2087 2088 if (i < (msecs -1)) {
2088 2089 return (DDI_SUCCESS);
2089 2090 } else {
2090 2091 return (DDI_FAILURE);
2091 2092 }
2092 2093 }
2093 2094
2094 2095 /*
2095 2096 * issue_cmd_in_poll_mode
2096 2097 */
2097 2098 int
2098 2099 tbolt_issue_cmd_in_poll_mode(struct mrsas_instance *instance,
2099 2100 struct mrsas_cmd *cmd)
2100 2101 {
2101 2102 int i;
2102 2103 uint16_t flags;
2103 2104 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
2104 2105 struct mrsas_header *frame_hdr;
2105 2106
2106 2107 con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_poll_mode: cmd->[SMID]=0x%X", cmd->SMID));
2107 2108
|
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
2108 2109 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
2109 2110
2110 2111 frame_hdr = (struct mrsas_header *)&cmd->frame->hdr;
2111 2112 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
2112 2113 MFI_CMD_STATUS_POLL_MODE);
2113 2114 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
2114 2115 flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2115 2116 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
2116 2117
2117 2118 con_log(CL_ANN1, (CE_NOTE,
2118 - " req desc low part %x \n", req_desc->Words));
2119 + " req desc low part %x \n", (uint_t)(req_desc->Words & 0xffffffff)));
2119 2120 con_log(CL_ANN1, (CE_NOTE,
2120 - " req desc high part %x \n", (req_desc->Words >> 32)));
2121 + " req desc high part %x \n", (uint_t)(req_desc->Words >> 32)));
2121 2122
2122 2123 /* issue the frame using inbound queue port */
2123 2124 mutex_enter(&instance->reg_write_mtx);
2124 2125 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2125 2126 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2126 2127 mutex_exit(&instance->reg_write_mtx);
2127 2128
2128 2129 for (i = 0; i < msecs && (
2129 2130 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
2130 2131 == MFI_CMD_STATUS_POLL_MODE); i++) {
2131 2132 /* wait for cmd_status to change from 0xFF */
2132 2133 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
2133 2134 }
2134 2135
2135 2136 if (ddi_get8(cmd->frame_dma_obj.acc_handle,
2136 2137 &frame_hdr->cmd_status) == MFI_CMD_STATUS_POLL_MODE) {
2137 2138 con_log(CL_ANN1, (CE_NOTE,
2138 - " cmd failed %x \n", (req_desc->Words)));
2139 + " cmd failed %" PRIx64 " \n", (req_desc->Words)));
2139 2140 return (DDI_FAILURE);
2140 2141 }
2141 2142
2142 2143 return (DDI_SUCCESS);
2143 2144 }
2144 2145
2145 2146 void
2146 2147 tbolt_enable_intr(struct mrsas_instance *instance)
2147 2148 {
2148 2149 uint32_t mask;
2149 2150
2150 2151 /* TODO: For Thunderbolt/Invader also clear intr on enable */
2151 2152 //writel(~0, ®s->outbound_intr_status);
2152 2153 //readl(®s->outbound_intr_status);
2153 2154
2154 2155 WR_OB_INTR_MASK(~(MFI_FUSION_ENABLE_INTERRUPT_MASK), instance);
2155 2156
2156 2157 /* dummy read to force PCI flush */
2157 2158 mask = RD_OB_INTR_MASK(instance);
2158 2159
2159 2160 }
2160 2161
2161 2162 void
2162 2163 tbolt_disable_intr(struct mrsas_instance *instance)
2163 2164 {
2164 2165 uint32_t mask = 0xFFFFFFFF;
2165 2166 uint32_t status;
2166 2167
2167 2168
2168 2169 WR_OB_INTR_MASK(mask, instance);
2169 2170
2170 2171 /* Dummy readl to force pci flush */
2171 2172
2172 2173 status = RD_OB_INTR_MASK(instance);
2173 2174 }
2174 2175
2175 2176
2176 2177 int
2177 2178 tbolt_intr_ack(struct mrsas_instance *instance)
2178 2179 {
2179 2180 uint32_t status;
2180 2181
2181 2182 /* check if it is our interrupt */
2182 2183 status = RD_OB_INTR_STATUS(instance);
2183 2184 con_log(CL_ANN1, (CE_NOTE,
2184 2185 "chkpnt: Entered tbolt_intr_ack status = %d \n", status));
2185 2186
2186 2187 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2187 2188 return (DDI_INTR_UNCLAIMED);
2188 2189 }
2189 2190
2190 2191 if ((status & 1) || (status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2191 2192 /* clear the interrupt by writing back the same value */
2192 2193 WR_OB_INTR_STATUS(status, instance);
2193 2194 /* dummy READ */
2194 2195 RD_OB_INTR_STATUS(instance);
2195 2196 }
2196 2197 return (DDI_INTR_CLAIMED);
2197 2198 }
2198 2199
2199 2200 /*
2200 2201 * get_raid_msg_pkt : Get a command from the free pool
2201 2202 * After successful allocation, the caller of this routine
2202 2203 * must clear the frame buffer (memset to zero) before
2203 2204 * using the packet further.
2204 2205 *
2205 2206 * ***** Note *****
2206 2207 * After clearing the frame buffer the context id of the
2207 2208 * frame buffer SHOULD be restored back.
2208 2209 */
2209 2210
2210 2211 struct mrsas_cmd *
2211 2212 get_raid_msg_pkt(struct mrsas_instance *instance)
2212 2213 {
2213 2214 mlist_t *head = &instance->cmd_pool_list;
2214 2215 struct mrsas_cmd *cmd = NULL;
2215 2216
2216 2217 mutex_enter(&instance->cmd_pool_mtx);
2217 2218 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2218 2219
2219 2220
2220 2221 if (!mlist_empty(head)) {
2221 2222 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2222 2223 mlist_del_init(head->next);
2223 2224 }
2224 2225 if (cmd != NULL) {
2225 2226 cmd->pkt = NULL;
2226 2227 cmd->retry_count_for_ocr = 0;
2227 2228 cmd->drv_pkt_time = 0;
2228 2229 }
2229 2230 mutex_exit(&instance->cmd_pool_mtx);
2230 2231
2231 2232 if (cmd != NULL)
2232 2233 bzero(cmd->scsi_io_request,
2233 2234 sizeof (Mpi2RaidSCSIIORequest_t));
2234 2235 return (cmd);
2235 2236 }
2236 2237
2237 2238 struct mrsas_cmd *
2238 2239 get_raid_msg_mfi_pkt(struct mrsas_instance *instance)
2239 2240 {
2240 2241 mlist_t *head = &instance->cmd_app_pool_list;
2241 2242 struct mrsas_cmd *cmd = NULL;
2242 2243
2243 2244 mutex_enter(&instance->cmd_app_pool_mtx);
2244 2245 ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2245 2246
2246 2247 if (!mlist_empty(head)) {
2247 2248 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2248 2249 mlist_del_init(head->next);
2249 2250 }
2250 2251 if (cmd != NULL) {
2251 2252 cmd->retry_count_for_ocr = 0;
2252 2253 cmd->drv_pkt_time = 0;
2253 2254 cmd->pkt = NULL;
2254 2255 cmd->request_desc = NULL;
2255 2256
2256 2257 }
2257 2258
2258 2259 mutex_exit(&instance->cmd_app_pool_mtx);
2259 2260
2260 2261 if (cmd != NULL) {
2261 2262 bzero(cmd->scsi_io_request,
2262 2263 sizeof (Mpi2RaidSCSIIORequest_t));
2263 2264 }
2264 2265
2265 2266 return (cmd);
2266 2267 }
2267 2268
2268 2269 /*
2269 2270 * return_raid_msg_pkt : Return a cmd to free command pool
2270 2271 */
2271 2272 void
2272 2273 return_raid_msg_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2273 2274 {
2274 2275 mutex_enter(&instance->cmd_pool_mtx);
2275 2276 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2276 2277
2277 2278
2278 2279 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2279 2280
2280 2281 mutex_exit(&instance->cmd_pool_mtx);
2281 2282 }
2282 2283
2283 2284 void
2284 2285 return_raid_msg_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2285 2286 {
2286 2287 mutex_enter(&instance->cmd_app_pool_mtx);
2287 2288 ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2288 2289
2289 2290 mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
2290 2291
2291 2292 mutex_exit(&instance->cmd_app_pool_mtx);
2292 2293 }
2293 2294
2294 2295
2295 2296 void
2296 2297 mr_sas_tbolt_build_mfi_cmd(struct mrsas_instance *instance,
2297 2298 struct mrsas_cmd *cmd)
2298 2299 {
2299 2300 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2300 2301 Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee;
2301 2302 MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
2302 2303 uint32_t index;
2303 2304
2304 2305 if (!instance->tbolt) {
|
↓ open down ↓ |
156 lines elided |
↑ open up ↑ |
2305 2306 con_log(CL_ANN, (CE_NOTE, "Not MFA enabled.\n"));
2306 2307 return;
2307 2308 }
2308 2309
2309 2310 index = cmd->index;
2310 2311
2311 2312 ReqDescUnion =
2312 2313 mr_sas_get_request_descriptor(instance, index, cmd);
2313 2314
2314 2315 if (!ReqDescUnion) {
2315 - con_log(CL_ANN1, (CE_NOTE, "[NULL REQDESC]%x"));
2316 + con_log(CL_ANN1, (CE_NOTE, "[NULL REQDESC]"));
2316 2317 return;
2317 2318 }
2318 2319
2319 2320 con_log(CL_ANN1, (CE_NOTE, "[SMID]%x", cmd->SMID));
2320 2321
2321 2322 ReqDescUnion->Words = 0;
2322 2323
2323 2324 ReqDescUnion->SCSIIO.RequestFlags =
2324 2325 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2325 2326 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2326 2327
2327 2328 ReqDescUnion->SCSIIO.SMID = cmd->SMID;
2328 2329
2329 2330 cmd->request_desc = ReqDescUnion;
2330 2331
2331 2332 // get raid message frame pointer
2332 2333 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2333 2334
2334 2335 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
2335 2336 Mpi25IeeeSgeChain64_t *sgl_ptr_end = (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
2336 2337 sgl_ptr_end += instance->max_sge_in_main_msg - 1;
2337 2338 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2338 2339 &sgl_ptr_end->Flags, 0);
2339 2340 }
2340 2341
2341 2342 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2342 2343 &scsi_raid_io->Function,
2343 2344 MPI2_FUNCTION_PASSTHRU_IO_REQUEST);
2344 2345
2345 2346 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2346 2347 &scsi_raid_io->SGLOffset0,
2347 2348 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
2348 2349
2349 2350 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2350 2351 &scsi_raid_io->ChainOffset,
2351 2352 (U8)offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16);
2352 2353
2353 2354 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
2354 2355 &scsi_raid_io->SenseBufferLowAddress,
2355 2356 cmd->sense_phys_addr1);
2356 2357
2357 2358
2358 2359 scsi_raid_io_sgl_ieee =
2359 2360 (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
2360 2361
2361 2362 ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
2362 2363 &scsi_raid_io_sgl_ieee->Address,
|
↓ open down ↓ |
37 lines elided |
↑ open up ↑ |
2363 2364 (U64)cmd->frame_phys_addr);
2364 2365
2365 2366 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2366 2367 &scsi_raid_io_sgl_ieee->Flags,
2367 2368 (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2368 2369 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
2369 2370 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
2370 2371 &scsi_raid_io_sgl_ieee->Length, 1024); //MEGASAS_MAX_SZ_CHAIN_FRAME
2371 2372
2372 2373 con_log(CL_ANN1, (CE_NOTE,
2373 - "[MFI CMD PHY ADDRESS]:%x",
2374 + "[MFI CMD PHY ADDRESS]:%" PRIx64,
2374 2375 scsi_raid_io_sgl_ieee->Address));
2375 2376 con_log(CL_ANN1, (CE_NOTE,
2376 2377 "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
2377 2378 con_log(CL_ANN1, (CE_NOTE, "[SGL Flags]:%x",
2378 2379 scsi_raid_io_sgl_ieee->Flags));
2379 2380 }
2380 2381
2381 2382
2382 2383 void
2383 2384 tbolt_complete_cmd(struct mrsas_instance *instance,
2384 2385 struct mrsas_cmd *cmd)
2385 2386 {
2386 2387 uint8_t status;
2387 2388 uint8_t extStatus;
2388 2389 uint8_t arm;
2389 2390 struct scsa_cmd *acmd;
2390 2391 struct scsi_pkt *pkt;
2391 2392 struct scsi_arq_status *arqstat;
2392 2393 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2393 2394 LD_LOAD_BALANCE_INFO *lbinfo;
2394 2395 int i;
2395 2396
2396 2397 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2397 2398
2398 2399 status = ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2399 2400 &scsi_raid_io->RaidContext.status);
2400 2401 extStatus = ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2401 2402 &scsi_raid_io->RaidContext.extStatus);
2402 2403
2403 2404 con_log(CL_DLEVEL3, (CE_NOTE, "status %x", status));
2404 2405 con_log(CL_DLEVEL3, (CE_NOTE, "extStatus %x", extStatus));
2405 2406
2406 2407 if (status != MFI_STAT_OK) {
2407 2408 con_log(CL_ANN, (CE_WARN,
2408 2409 "IO Cmd Failed SMID %x", cmd->SMID));
2409 2410 } else {
2410 2411 con_log(CL_ANN, (CE_NOTE,
2411 2412 "IO Cmd Success SMID %x", cmd->SMID));
2412 2413 }
2413 2414
2414 2415 /* regular commands */
2415 2416
2416 2417 switch (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2417 2418 &scsi_raid_io->Function)) {
2418 2419
2419 2420 case MPI2_FUNCTION_SCSI_IO_REQUEST : /* Fast Path IO. */
2420 2421 acmd = (struct scsa_cmd *)cmd->cmd;
2421 2422 lbinfo = &instance->load_balance_info[acmd->device_id];
2422 2423
2423 2424 if (cmd->load_balance_flag & MEGASAS_LOAD_BALANCE_FLAG) {
2424 2425 arm = lbinfo->raid1DevHandle[0] == scsi_raid_io->DevHandle ? 0 : 1;
2425 2426
2426 2427 lbinfo->scsi_pending_cmds[arm]--;
2427 2428 cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG;
2428 2429 }
2429 2430 con_log(CL_DLEVEL3, (CE_NOTE,
2430 2431 "FastPath IO Completion Success "));
2431 2432
2432 2433 case MPI2_FUNCTION_LD_IO_REQUEST : {// Regular Path IO.
2433 2434 acmd = (struct scsa_cmd *)cmd->cmd;
2434 2435 pkt = (struct scsi_pkt *)CMD2PKT(acmd);
2435 2436
2436 2437 if (acmd->cmd_flags & CFLAG_DMAVALID) {
2437 2438 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
2438 2439 (void) ddi_dma_sync(acmd->cmd_dmahandle,
2439 2440 acmd->cmd_dma_offset,
2440 2441 acmd->cmd_dma_len,
2441 2442 DDI_DMA_SYNC_FORCPU);
2442 2443 }
2443 2444 }
2444 2445
2445 2446 pkt->pkt_reason = CMD_CMPLT;
2446 2447 pkt->pkt_statistics = 0;
2447 2448 pkt->pkt_state = STATE_GOT_BUS
2448 2449 | STATE_GOT_TARGET | STATE_SENT_CMD
2449 2450 | STATE_XFERRED_DATA | STATE_GOT_STATUS;
2450 2451
2451 2452 con_log(CL_ANN, (CE_CONT,
2452 2453 " CDB[0] = %x completed for %s: size %lx SMID %x cmd_status %x",
2453 2454 pkt->pkt_cdbp[0],
2454 2455 ((acmd->islogical) ? "LD" : "PD"),
2455 2456 acmd->cmd_dmacount, cmd->SMID, status));
2456 2457
2457 2458 if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
2458 2459 struct scsi_inquiry *inq;
2459 2460
2460 2461 if (acmd->cmd_dmacount != 0) {
2461 2462 bp_mapin(acmd->cmd_buf);
2462 2463 inq = (struct scsi_inquiry *)
2463 2464 acmd->cmd_buf->b_un.b_addr;
2464 2465
2465 2466 /* don't expose physical drives to OS */
2466 2467 if (acmd->islogical &&
2467 2468 (status == MFI_STAT_OK)) {
2468 2469 display_scsi_inquiry(
2469 2470 (caddr_t)inq);
2470 2471 }
2471 2472 #ifdef PDSUPPORT
2472 2473 else if ((status ==
2473 2474 MFI_STAT_OK) && inq->inq_dtype ==
2474 2475 DTYPE_DIRECT) {
2475 2476
2476 2477 display_scsi_inquiry(
2477 2478 (caddr_t)inq);
2478 2479 }
2479 2480 #endif
2480 2481 else {
2481 2482 /* for physical disk */
2482 2483 status =
2483 2484 MFI_STAT_DEVICE_NOT_FOUND;
2484 2485 }
2485 2486 }
2486 2487 }
2487 2488
2488 2489 switch (status) {
2489 2490 case MFI_STAT_OK:
2490 2491 pkt->pkt_scbp[0] = STATUS_GOOD;
2491 2492 break;
2492 2493 case MFI_STAT_LD_CC_IN_PROGRESS:
2493 2494 case MFI_STAT_LD_RECON_IN_PROGRESS:
2494 2495 pkt->pkt_scbp[0] = STATUS_GOOD;
2495 2496 break;
2496 2497 case MFI_STAT_LD_INIT_IN_PROGRESS:
2497 2498 pkt->pkt_reason = CMD_TRAN_ERR;
2498 2499 break;
2499 2500 case MFI_STAT_SCSI_IO_FAILED:
2500 2501 cmn_err(CE_WARN, "tbolt_complete_cmd: scsi_io failed");
2501 2502 pkt->pkt_reason = CMD_TRAN_ERR;
2502 2503 break;
2503 2504 case MFI_STAT_SCSI_DONE_WITH_ERROR:
2504 2505 con_log(CL_ANN, (CE_WARN,
2505 2506 "tbolt_complete_cmd: scsi_done with error"));
2506 2507
2507 2508 pkt->pkt_reason = CMD_CMPLT;
2508 2509 ((struct scsi_status *)
2509 2510 pkt->pkt_scbp)->sts_chk = 1;
2510 2511
2511 2512 if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
2512 2513 con_log(CL_ANN, (CE_WARN, "TEST_UNIT_READY fail"));
2513 2514 } else {
2514 2515 pkt->pkt_state |= STATE_ARQ_DONE;
2515 2516 arqstat = (void *)(pkt->pkt_scbp);
2516 2517 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2517 2518 arqstat->sts_rqpkt_resid = 0;
2518 2519 arqstat->sts_rqpkt_state |=
2519 2520 STATE_GOT_BUS | STATE_GOT_TARGET
2520 2521 | STATE_SENT_CMD
2521 2522 | STATE_XFERRED_DATA;
2522 2523 *(uint8_t *)&arqstat->sts_rqpkt_status =
2523 2524 STATUS_GOOD;
2524 2525 con_log(CL_ANN1, (CE_NOTE,
2525 2526 "Copying Sense data %x",
2526 2527 cmd->SMID));
2527 2528
2528 2529 ddi_rep_get8(
2529 2530 instance->
2530 2531 mpi2_frame_pool_dma_obj.acc_handle,
2531 2532 (uint8_t *)
2532 2533 &(arqstat->sts_sensedata),
2533 2534 cmd->sense1,
2534 2535 sizeof (struct scsi_extended_sense),
2535 2536 DDI_DEV_AUTOINCR);
2536 2537
2537 2538 }
2538 2539 break;
2539 2540 case MFI_STAT_LD_OFFLINE:
2540 2541 cmn_err(CE_WARN,
2541 2542 "tbolt_complete_cmd: ld offline "
2542 2543 "CDB[0]=0x%x targetId=0x%x devhandle=0x%x\n", //UNDO:
2543 2544 ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_raid_io->CDB.CDB32[0]),
2544 2545 ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_raid_io->RaidContext.ldTargetId),
2545 2546 ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_raid_io->DevHandle) );
2546 2547 pkt->pkt_reason = CMD_DEV_GONE;
2547 2548 pkt->pkt_statistics = STAT_DISCON;
2548 2549 break;
2549 2550 case MFI_STAT_DEVICE_NOT_FOUND:
2550 2551 con_log(CL_ANN, (CE_CONT,
2551 2552 "tbolt_complete_cmd: device not found error"));
2552 2553 pkt->pkt_reason = CMD_DEV_GONE;
2553 2554 pkt->pkt_statistics = STAT_DISCON;
2554 2555 break;
2555 2556
2556 2557 case MFI_STAT_LD_LBA_OUT_OF_RANGE:
2557 2558 pkt->pkt_state |= STATE_ARQ_DONE;
2558 2559 pkt->pkt_reason = CMD_CMPLT;
2559 2560 ((struct scsi_status *)
2560 2561 pkt->pkt_scbp)->sts_chk = 1;
2561 2562
2562 2563 arqstat = (void *)(pkt->pkt_scbp);
2563 2564 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2564 2565 arqstat->sts_rqpkt_resid = 0;
2565 2566 arqstat->sts_rqpkt_state |= STATE_GOT_BUS
2566 2567 | STATE_GOT_TARGET | STATE_SENT_CMD
2567 2568 | STATE_XFERRED_DATA;
2568 2569 *(uint8_t *)&arqstat->sts_rqpkt_status =
2569 2570 STATUS_GOOD;
2570 2571
2571 2572 arqstat->sts_sensedata.es_valid = 1;
2572 2573 arqstat->sts_sensedata.es_key =
2573 2574 KEY_ILLEGAL_REQUEST;
2574 2575 arqstat->sts_sensedata.es_class =
2575 2576 CLASS_EXTENDED_SENSE;
2576 2577
2577 2578 /*
2578 2579 * LOGICAL BLOCK ADDRESS OUT OF RANGE:
2579 2580 * ASC: 0x21h; ASCQ: 0x00h;
2580 2581 */
2581 2582 arqstat->sts_sensedata.es_add_code = 0x21;
2582 2583 arqstat->sts_sensedata.es_qual_code = 0x00;
2583 2584 break;
2584 2585 case MFI_STAT_INVALID_CMD:
2585 2586 case MFI_STAT_INVALID_DCMD:
2586 2587 case MFI_STAT_INVALID_PARAMETER:
2587 2588 case MFI_STAT_INVALID_SEQUENCE_NUMBER:
2588 2589 default:
2589 2590 cmn_err(CE_WARN, "tbolt_complete_cmd: Unknown status!");
2590 2591 pkt->pkt_reason = CMD_TRAN_ERR;
2591 2592
2592 2593 break;
2593 2594 }
2594 2595
2595 2596 atomic_add_16(&instance->fw_outstanding, (-1));
2596 2597
2597 2598 /* Call the callback routine */
2598 2599 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
2599 2600 pkt->pkt_comp) {
2600 2601 (*pkt->pkt_comp)(pkt);
2601 2602 }
2602 2603
2603 2604 con_log(CL_ANN1, (CE_NOTE, "Free smid %x", cmd->SMID));
2604 2605
2605 2606 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2606 2607 &scsi_raid_io->RaidContext.status, 0);
2607 2608
2608 2609 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2609 2610 &scsi_raid_io->RaidContext.extStatus, 0);
2610 2611
2611 2612 return_raid_msg_pkt(instance, cmd);
2612 2613 break;
2613 2614 }
2614 2615 case MPI2_FUNCTION_PASSTHRU_IO_REQUEST: // MFA command.
2615 2616
2616 2617 if (cmd->frame->dcmd.opcode
2617 2618 == MR_DCMD_LD_MAP_GET_INFO &&
2618 2619 cmd->frame->dcmd.mbox.b[1]
2619 2620 == 1) {
2620 2621
2621 2622 mutex_enter(&instance->sync_map_mtx);
2622 2623
|
↓ open down ↓ |
239 lines elided |
↑ open up ↑ |
2623 2624 con_log(CL_ANN, (CE_NOTE,
2624 2625 "LDMAP sync command SMID RECEIVED 0x%X",
2625 2626 cmd->SMID));
2626 2627 if (cmd->frame->hdr.cmd_status != 0) {
2627 2628 cmn_err(CE_WARN,
2628 2629 "map sync failed, status = 0x%x.\n",cmd->frame->hdr.cmd_status);
2629 2630 }
2630 2631 else {
2631 2632 instance->map_id++;
2632 2633 cmn_err(CE_NOTE,
2633 - "map sync received, switched map_id to %ld \n",instance->map_id);
2634 + "map sync received, switched map_id to %" PRIu64 " \n",instance->map_id);
2634 2635 }
2635 2636
2636 2637 if (MR_ValidateMapInfo(instance->ld_map[(instance->map_id & 1)], instance->load_balance_info))
2637 2638 instance->fast_path_io = 1;
2638 2639 else
2639 2640 instance->fast_path_io = 0;
2640 2641
2641 2642 con_log(CL_ANN, (CE_NOTE,
2642 2643 "instance->fast_path_io %d \n",instance->fast_path_io));
2643 2644
2644 2645 instance->unroll.syncCmd = 0;
2645 2646
2646 2647 if(instance->map_update_cmd == cmd) {
2647 2648 return_raid_msg_pkt(instance, cmd);
2648 2649 atomic_add_16(&instance->fw_outstanding, (-1));
2649 2650 mrsas_tbolt_sync_map_info(instance);
2650 2651 }
2651 2652
2652 2653 cmn_err(CE_NOTE, "LDMAP sync completed.\n");
2653 2654 mutex_exit(&instance->sync_map_mtx);
2654 2655 break;
2655 2656 }
2656 2657
2657 2658 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) {
2658 2659 con_log(CL_ANN1, (CE_CONT,
2659 2660 "AEN command SMID RECEIVED 0x%X",
2660 2661 cmd->SMID));
2661 2662 if ((instance->aen_cmd == cmd) &&
2662 2663 (instance->aen_cmd->abort_aen)) {
2663 2664 con_log(CL_ANN, (CE_WARN,
2664 2665 "mrsas_softintr: "
2665 2666 "aborted_aen returned"));
2666 2667 }
2667 2668 else
2668 2669 {
2669 2670 atomic_add_16(&instance->fw_outstanding, (-1));
2670 2671 service_mfi_aen(instance, cmd);
2671 2672 }
2672 2673 }
2673 2674
2674 2675 if (cmd->sync_cmd == MRSAS_TRUE ) {
2675 2676 con_log(CL_ANN1, (CE_CONT,
2676 2677 "Sync-mode Command Response SMID RECEIVED 0x%X",
2677 2678 cmd->SMID));
2678 2679
2679 2680 tbolt_complete_cmd_in_sync_mode(instance, cmd);
2680 2681 }
2681 2682 else
2682 2683 {
2683 2684 con_log(CL_ANN, (CE_CONT,
2684 2685 "tbolt_complete_cmd: Wrong SMID RECEIVED 0x%X",
2685 2686 cmd->SMID));
2686 2687 }
2687 2688 break;
2688 2689 default:
2689 2690 /* free message */
2690 2691 con_log(CL_ANN, (CE_NOTE, "tbolt_complete_cmd: Unknown Type!!!!!!!!"));
2691 2692 break;
2692 2693 }
2693 2694 }
2694 2695
2695 2696 uint_t
2696 2697 mr_sas_tbolt_process_outstanding_cmd(struct mrsas_instance *instance)
2697 2698 {
2698 2699 uint8_t replyType;
2699 2700 Mpi2SCSIIOSuccessReplyDescriptor_t *replyDesc;
2700 2701 Mpi2ReplyDescriptorsUnion_t *desc;
2701 2702 uint16_t smid;
2702 2703 union desc_value d_val;
2703 2704 struct mrsas_cmd *cmd;
2704 2705 uint32_t i;
2705 2706 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2706 2707 uint8_t status;
2707 2708
2708 2709 struct mrsas_header *hdr;
2709 2710 struct scsi_pkt *pkt;
2710 2711
2711 2712 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2712 2713 0, 0, DDI_DMA_SYNC_FORDEV);
2713 2714
2714 2715 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2715 2716 0, 0, DDI_DMA_SYNC_FORCPU);
2716 2717
|
↓ open down ↓ |
73 lines elided |
↑ open up ↑ |
2717 2718 desc = instance->reply_frame_pool;
2718 2719 desc += instance->reply_read_index;
2719 2720
2720 2721 replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2721 2722 replyType = replyDesc->ReplyFlags &
2722 2723 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2723 2724
2724 2725 if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2725 2726 return (DDI_INTR_UNCLAIMED);
2726 2727
2727 - con_log(CL_ANN1, (CE_NOTE, "Reply Desc = %llx Words = %llx \n",
2728 + con_log(CL_ANN1, (CE_NOTE, "Reply Desc = %p Words = %" PRIx64 " \n",
2728 2729 desc, desc->Words));
2729 2730
2730 2731 d_val.word = desc->Words;
2731 2732
2732 2733
2733 2734 /* Read Reply descriptor */
2734 2735 while ((d_val.u1.low != 0xffffffff) &&
2735 2736 (d_val.u1.high != 0xffffffff)) {
2736 2737
2737 2738 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2738 2739 0, 0, DDI_DMA_SYNC_FORCPU);
2739 2740
2740 2741 smid = replyDesc->SMID;
2741 2742
2742 2743 if (!smid || smid > instance->max_fw_cmds + 1) {
2743 2744 con_log(CL_ANN1, (CE_NOTE,
2744 - "Reply Desc at Break = %llx Words = %llx \n",
2745 + "Reply Desc at Break = %p Words = %" PRIx64 " \n",
2745 2746 desc, desc->Words));
2746 2747 break;
2747 2748 }
2748 2749
2749 2750 cmd = instance->cmd_list[smid - 1];
2750 2751 if(!cmd ) {
2751 2752 con_log(CL_ANN1, (CE_NOTE,
2752 2753 "mr_sas_tbolt_process_outstanding_cmd: Invalid command "
2753 2754 " or Poll commad Received in completion path\n"));
2754 2755 }
2755 2756 else {
2756 2757 mutex_enter(&instance->cmd_pend_mtx);
2757 2758 if (cmd->sync_cmd == MRSAS_TRUE) {
2758 2759 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2759 2760 if (hdr) {
2760 2761 con_log(CL_ANN1, (CE_NOTE,
2761 2762 "mr_sas_tbolt_process_outstanding_cmd:"
2762 2763 " mlist_del_init(&cmd->list).\n"));
2763 2764 mlist_del_init(&cmd->list);
2764 2765 }
2765 2766 } else {
2766 2767 pkt = cmd->pkt;
2767 2768 if (pkt) {
2768 2769 con_log(CL_ANN1, (CE_NOTE,
2769 2770 "mr_sas_tbolt_process_outstanding_cmd:"
2770 2771 "mlist_del_init(&cmd->list).\n"));
2771 2772 mlist_del_init(&cmd->list);
2772 2773 }
2773 2774 }
2774 2775
2775 2776 mutex_exit(&instance->cmd_pend_mtx);
2776 2777
2777 2778 tbolt_complete_cmd(instance, cmd);
2778 2779 }
2779 2780 // set it back to all 0xfffffffff.
2780 2781 desc->Words = (uint64_t)~0;
2781 2782
2782 2783 instance->reply_read_index++;
2783 2784
2784 2785 if (instance->reply_read_index >= (instance->reply_q_depth)) {
2785 2786 con_log(CL_ANN1, (CE_NOTE, "wrap around"));
2786 2787 instance->reply_read_index = 0;
2787 2788 }
2788 2789
2789 2790 /* Get the next reply descriptor */
|
↓ open down ↓ |
35 lines elided |
↑ open up ↑ |
2790 2791 if (!instance->reply_read_index)
2791 2792 desc = instance->reply_frame_pool;
2792 2793 else
2793 2794 desc++;
2794 2795
2795 2796 replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2796 2797
2797 2798 d_val.word = desc->Words;
2798 2799
2799 2800 con_log(CL_ANN1, (CE_NOTE,
2800 - "Next Reply Desc = %llx Words = %llx\n",
2801 + "Next Reply Desc = %p Words = %" PRIx64 "\n",
2801 2802 desc, desc->Words));
2802 2803
2803 2804 replyType = replyDesc->ReplyFlags &
2804 2805 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2805 2806
2806 2807 if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2807 2808 break;
2808 2809
2809 2810 } /* End of while loop. */
2810 2811
2811 2812 /* update replyIndex to FW */
2812 2813 WR_MPI2_REPLY_POST_INDEX(instance->reply_read_index, instance);
2813 2814
2814 2815
2815 2816 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2816 2817 0, 0, DDI_DMA_SYNC_FORDEV);
2817 2818
2818 2819 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2819 2820 0, 0, DDI_DMA_SYNC_FORCPU);
2820 2821 return (DDI_INTR_CLAIMED);
2821 2822 }
2822 2823
2823 2824
2824 2825
2825 2826
2826 2827 /*
2827 2828 * complete_cmd_in_sync_mode - Completes an internal command
2828 2829 * @instance: Adapter soft state
2829 2830 * @cmd: Command to be completed
2830 2831 *
2831 2832 * The issue_cmd_in_sync_mode() function waits for a command to complete
2832 2833 * after it issues a command. This function wakes up that waiting routine by
2833 2834 * calling wake_up() on the wait queue.
2834 2835 */
2835 2836 void
2836 2837 tbolt_complete_cmd_in_sync_mode(struct mrsas_instance *instance,
2837 2838 struct mrsas_cmd *cmd)
2838 2839 {
2839 2840
2840 2841 cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle,
2841 2842 &cmd->frame->io.cmd_status);
2842 2843
2843 2844 cmd->sync_cmd = MRSAS_FALSE;
2844 2845
2845 2846 mutex_enter(&instance->int_cmd_mtx);
2846 2847 if (cmd->cmd_status == ENODATA) {
2847 2848 cmd->cmd_status = 0;
2848 2849 }
2849 2850 cv_broadcast(&instance->int_cmd_cv);
2850 2851 mutex_exit(&instance->int_cmd_mtx);
2851 2852
2852 2853 }
2853 2854
2854 2855 /*
2855 2856 * mrsas_tbolt_get_ld_map_info - Returns ld_map structure
2856 2857 * instance: Adapter soft state
2857 2858 *
2858 2859 * Issues an internal command (DCMD) to get the FW's controller PD
2859 2860 * list structure. This information is mainly used to find out SYSTEM
2860 2861 * supported by the FW.
2861 2862 */
2862 2863 int
2863 2864 mrsas_tbolt_get_ld_map_info(struct mrsas_instance *instance)
2864 2865 {
2865 2866 int ret = 0;
2866 2867 struct mrsas_cmd *cmd = NULL;
2867 2868 struct mrsas_dcmd_frame *dcmd;
2868 2869 MR_FW_RAID_MAP_ALL *ci;
2869 2870 uint32_t ci_h = 0;
2870 2871 U32 size_map_info;
2871 2872
2872 2873 cmd = get_raid_msg_pkt(instance);
2873 2874
2874 2875 if (cmd == NULL) {
2875 2876 cmn_err(CE_WARN,
2876 2877 "Failed to get a cmd from free-pool in get_ld_map_info()");
2877 2878 return (DDI_FAILURE);
2878 2879 }
2879 2880
2880 2881 dcmd = &cmd->frame->dcmd;
2881 2882
2882 2883 size_map_info = sizeof (MR_FW_RAID_MAP) +
2883 2884 (sizeof (MR_LD_SPAN_MAP) *
2884 2885 (MAX_LOGICAL_DRIVES - 1));
2885 2886
2886 2887 con_log(CL_ANN, (CE_NOTE,
2887 2888 "size_map_info : 0x%x", size_map_info));
2888 2889
2889 2890 ci = instance->ld_map[(instance->map_id & 1)];
2890 2891 ci_h = instance->ld_map_phy[(instance->map_id & 1)];
2891 2892
2892 2893 if (!ci) {
2893 2894 cmn_err(CE_WARN,
2894 2895 "Failed to alloc mem for ld_map_info");
2895 2896 return_raid_msg_pkt(instance, cmd);
2896 2897 return (-1);
2897 2898 }
2898 2899
2899 2900 memset(ci, 0, sizeof (*ci));
2900 2901 memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
2901 2902
2902 2903 dcmd->cmd = MFI_CMD_OP_DCMD;
2903 2904 dcmd->cmd_status = 0xFF;
2904 2905 dcmd->sge_count = 1;
2905 2906 dcmd->flags = MFI_FRAME_DIR_READ;
2906 2907 dcmd->timeout = 0;
2907 2908 dcmd->pad_0 = 0;
2908 2909 dcmd->data_xfer_len = size_map_info;
2909 2910 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
2910 2911 dcmd->sgl.sge32[0].phys_addr = ci_h;
2911 2912 dcmd->sgl.sge32[0].length = size_map_info;
2912 2913
2913 2914
2914 2915 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
2915 2916
2916 2917 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2917 2918 ret = 0;
2918 2919 con_log(CL_ANN1, (CE_NOTE,
2919 2920 "Get LD Map Info success\n"));
2920 2921 } else {
2921 2922 cmn_err(CE_WARN,
2922 2923 "Get LD Map Info failed\n");
2923 2924 ret = -1;
2924 2925 }
2925 2926
2926 2927 return_raid_msg_pkt(instance, cmd);
2927 2928
2928 2929 return (ret);
2929 2930 }
2930 2931
2931 2932 void
2932 2933 mrsas_dump_reply_desc(struct mrsas_instance *instance)
2933 2934 {
2934 2935 uint32_t i;
2935 2936 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
2936 2937 union desc_value d_val;
2937 2938
2938 2939 reply_desc = instance->reply_frame_pool;
2939 2940
2940 2941 for (i = 0; i < instance->reply_q_depth; i++, reply_desc++) {
2941 2942 d_val.word = reply_desc->Words;
2942 2943 con_log(CL_DLEVEL3, (CE_NOTE,
2943 2944 "i=%d, %x:%x",
2944 2945 i, d_val.u1.high, d_val.u1.low));
2945 2946 }
2946 2947 }
2947 2948
2948 2949 /**
2949 2950 * mrsas_tbolt_command_create - Create command for fast path.
2950 2951 * @io_info: MegaRAID IO request packet pointer.
2951 2952 * @ref_tag: Reference tag for RD/WRPROTECT
2952 2953 *
2953 2954 * Create the command for fast path.
2954 2955 */
2955 2956 void
2956 2957 mrsas_tbolt_prepare_cdb(struct mrsas_instance *instance, U8 cdb[],struct IO_REQUEST_INFO *io_info,Mpi2RaidSCSIIORequest_t *scsi_io_request, U32 ref_tag)
2957 2958 {
2958 2959 uint16_t EEDPFlags;
2959 2960 uint32_t Control;
2960 2961 // Prepare 32-byte CDB if DIF is supported on this device
2961 2962 con_log(CL_ANN, (CE_NOTE, "Prepare DIF CDB\n"));
2962 2963
2963 2964 memset(cdb, 0, 32);
2964 2965
2965 2966 cdb[0] = MRSAS_SCSI_VARIABLE_LENGTH_CMD;
2966 2967
2967 2968
2968 2969 cdb[7] = MRSAS_SCSI_ADDL_CDB_LEN;
2969 2970
2970 2971 if (io_info->isRead) {
2971 2972 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32;
2972 2973 }
2973 2974 else {
2974 2975 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32;
2975 2976 }
2976 2977
2977 2978 cdb[10] = MRSAS_RD_WR_PROTECT; // Verify with in linux driver, set to MEGASAS_RD_WR_PROTECT_CHECK_ALL
2978 2979
2979 2980 /* LOGICAL BLOCK ADDRESS */
2980 2981 cdb[12] = (U8)(((io_info->pdBlock) >> 56) & 0xff);
2981 2982 cdb[13] = (U8)(((io_info->pdBlock) >> 48) & 0xff);
2982 2983 cdb[14] = (U8)(((io_info->pdBlock) >> 40) & 0xff);
2983 2984 cdb[15] = (U8)(((io_info->pdBlock) >> 32) & 0xff);
2984 2985 cdb[16] = (U8)(((io_info->pdBlock) >> 24) & 0xff);
2985 2986 cdb[17] = (U8)(((io_info->pdBlock) >> 16) & 0xff);
2986 2987 cdb[18] = (U8)(((io_info->pdBlock) >> 8) & 0xff);
2987 2988 cdb[19] = (U8)((io_info->pdBlock) & 0xff);
2988 2989
2989 2990 /* Logical block reference tag */
2990 2991 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
2991 2992 &scsi_io_request->CDB.EEDP32.PrimaryReferenceTag,
2992 2993 BIG_ENDIAN(ref_tag));
2993 2994
2994 2995 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
2995 2996 &scsi_io_request->CDB.EEDP32.PrimaryApplicationTagMask,
2996 2997 0xffff);
2997 2998
2998 2999 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
2999 3000 &scsi_io_request->DataLength,
3000 3001 ((io_info->numBlocks)*512));
3001 3002 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
3002 3003 &scsi_io_request->IoFlags,32); /* Specify 32-byte cdb */
3003 3004
3004 3005 /* Transfer length */
3005 3006 cdb[28] = (U8)(((io_info->numBlocks) >> 24) & 0xff);
3006 3007 cdb[29] = (U8)(((io_info->numBlocks) >> 16) & 0xff);
3007 3008 cdb[30] = (U8)(((io_info->numBlocks) >> 8) & 0xff);
3008 3009 cdb[31] = (U8)((io_info->numBlocks) & 0xff);
3009 3010
3010 3011 /* set SCSI IO EEDPFlags */
3011 3012 EEDPFlags = ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle,
3012 3013 &scsi_io_request->EEDPFlags);
3013 3014 Control = ddi_get32(instance->mpi2_frame_pool_dma_obj.acc_handle,
3014 3015 &scsi_io_request->Control);
3015 3016
3016 3017 // set SCSI IO EEDPFlags bits
3017 3018 if (io_info->isRead) {
3018 3019 // For READ commands, the EEDPFlags shall be set to specify to
3019 3020 // Increment the Primary Reference Tag, to Check the Reference
3020 3021 // Tag, and to Check and Remove the Protection Information
3021 3022 // fields.
3022 3023 EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
3023 3024 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
3024 3025 MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
3025 3026 MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
3026 3027 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
3027 3028 }
3028 3029 else {
3029 3030 // For WRITE commands, the EEDPFlags shall be set to specify to
3030 3031 // Increment the Primary Reference Tag, and to Insert
3031 3032 // Protection Information fields.
3032 3033 EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
3033 3034 MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
3034 3035 }
3035 3036 Control |= (0x4 << 26);
3036 3037
3037 3038 ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
3038 3039 &scsi_io_request->EEDPFlags, EEDPFlags);
3039 3040 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
3040 3041 &scsi_io_request->Control, Control);
3041 3042 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
3042 3043 &scsi_io_request->EEDPBlockSize,
3043 3044 MRSAS_EEDPBLOCKSIZE);
3044 3045 }
3045 3046
3046 3047
3047 3048 /*
3048 3049 * mrsas_tbolt_set_pd_lba - Sets PD LBA
3049 3050 * @cdb: CDB
3050 3051 * @cdb_len: cdb length
3051 3052 * @start_blk: Start block of IO
3052 3053 *
3053 3054 * Used to set the PD LBA in CDB for FP IOs
3054 3055 */
3055 3056 void
3056 3057 mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr, U64 start_blk, U32 num_blocks, U8 DifCapable)
3057 3058 {
3058 3059 U8 cdb_len = *cdb_len_ptr;
3059 3060 U8 flagvals = 0, opcode = 0, groupnum = 0, control = 0;
3060 3061
3061 3062 /* Some drives don't support 16/12 byte CDB's, convert to 10 */
3062 3063 if (((cdb_len == 12) || (cdb_len == 16)) &&
3063 3064 (start_blk <= 0xffffffff)) {
3064 3065 if (cdb_len == 16) {
3065 3066 con_log(CL_ANN, (CE_NOTE, "Converting READ/WRITE(16) to READ10\n"));
3066 3067 opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
3067 3068 flagvals = cdb[1];
3068 3069 groupnum = cdb[14];
3069 3070 control = cdb[15];
3070 3071 } else {
3071 3072 con_log(CL_ANN, (CE_NOTE, "Converting READ/WRITE(12) to READ10\n"));
3072 3073 opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
3073 3074 flagvals = cdb[1];
3074 3075 groupnum = cdb[10];
3075 3076 control = cdb[11];
3076 3077 }
3077 3078
3078 3079 memset(cdb, 0, sizeof(cdb));
3079 3080
3080 3081 cdb[0] = opcode;
3081 3082 cdb[1] = flagvals;
3082 3083 cdb[6] = groupnum;
3083 3084 cdb[9] = control;
3084 3085 /* Set transfer length */
3085 3086 cdb[8] = (U8)(num_blocks & 0xff);
3086 3087 cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3087 3088 cdb_len = 10;
3088 3089 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
3089 3090 /* Convert to 16 byte CDB for large LBA's */
3090 3091 con_log(CL_ANN, (CE_NOTE, "Converting 6/10/12 CDB to 16 byte CDB\n"));
3091 3092 switch (cdb_len) {
3092 3093 case 6:
3093 3094 opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
3094 3095 control = cdb[5];
3095 3096 break;
3096 3097 case 10:
3097 3098 opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16;
3098 3099 flagvals = cdb[1];
3099 3100 groupnum = cdb[6];
3100 3101 control = cdb[9];
3101 3102 break;
3102 3103 case 12:
3103 3104 opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16;
3104 3105 flagvals = cdb[1];
3105 3106 groupnum = cdb[10];
3106 3107 control = cdb[11];
3107 3108 break;
3108 3109 }
3109 3110
3110 3111 memset(cdb, 0, sizeof(cdb));
3111 3112
3112 3113 cdb[0] = opcode;
3113 3114 cdb[1] = flagvals;
3114 3115 cdb[14] = groupnum;
3115 3116 cdb[15] = control;
3116 3117
3117 3118 /* Transfer length */
3118 3119 cdb[13] = (U8)(num_blocks & 0xff);
3119 3120 cdb[12] = (U8)((num_blocks >> 8) & 0xff);
3120 3121 cdb[11] = (U8)((num_blocks >> 16) & 0xff);
3121 3122 cdb[10] = (U8)((num_blocks >> 24) & 0xff);
3122 3123
3123 3124 /* Specify 16-byte cdb */
3124 3125 cdb_len = 16;
3125 3126 } else if ((cdb_len == 6) && (start_blk > 0x1fffff)) {
3126 3127 /* convert to 10 byte CDB */
3127 3128 opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10;
3128 3129 control = cdb[5];
3129 3130
3130 3131 memset(cdb, 0, sizeof(cdb));
3131 3132 cdb[0] = opcode;
3132 3133 cdb[9] = control;
3133 3134
3134 3135 /* Set transfer length */
3135 3136 cdb[8] = (U8)(num_blocks & 0xff);
3136 3137 cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3137 3138
3138 3139 /* Specify 10-byte cdb */
3139 3140 cdb_len = 10;
3140 3141 }
3141 3142
3142 3143
3143 3144 /* Fall through Normal case, just load LBA here */
3144 3145 switch (cdb_len) {
3145 3146 case 6:
3146 3147 {
3147 3148 U8 val = cdb[1] & 0xE0;
3148 3149 cdb[3] = (U8)(start_blk & 0xff);
3149 3150 cdb[2] = (U8)((start_blk >> 8) & 0xff);
3150 3151 cdb[1] = val | ((U8)(start_blk >> 16) & 0x1f);
3151 3152 break;
3152 3153 }
3153 3154 case 10:
3154 3155 cdb[5] = (U8)(start_blk & 0xff);
3155 3156 cdb[4] = (U8)((start_blk >> 8) & 0xff);
3156 3157 cdb[3] = (U8)((start_blk >> 16) & 0xff);
3157 3158 cdb[2] = (U8)((start_blk >> 24) & 0xff);
3158 3159 break;
3159 3160 case 12:
3160 3161 cdb[5] = (U8)(start_blk & 0xff);
3161 3162 cdb[4] = (U8)((start_blk >> 8) & 0xff);
3162 3163 cdb[3] = (U8)((start_blk >> 16) & 0xff);
3163 3164 cdb[2] = (U8)((start_blk >> 24) & 0xff);
3164 3165 break;
3165 3166
3166 3167 case 16:
3167 3168 cdb[9] = (U8)(start_blk & 0xff);
3168 3169 cdb[8] = (U8)((start_blk >> 8) & 0xff);
3169 3170 cdb[7] = (U8)((start_blk >> 16) & 0xff);
3170 3171 cdb[6] = (U8)((start_blk >> 24) & 0xff);
3171 3172 cdb[5] = (U8)((start_blk >> 32) & 0xff);
3172 3173 cdb[4] = (U8)((start_blk >> 40) & 0xff);
3173 3174 cdb[3] = (U8)((start_blk >> 48) & 0xff);
3174 3175 cdb[2] = (U8)((start_blk >> 56) & 0xff);
3175 3176 break;
3176 3177 }
3177 3178
3178 3179 *cdb_len_ptr = cdb_len;
3179 3180 }
3180 3181
3181 3182
3182 3183 U8
3183 3184 mrsas_tbolt_check_map_info(struct mrsas_instance *instance)
3184 3185 {
3185 3186 MR_FW_RAID_MAP_ALL *ld_map;
3186 3187
3187 3188 if (!mrsas_tbolt_get_ld_map_info(instance)) {
3188 3189
3189 3190 ld_map = instance->ld_map[(instance->map_id & 1)];
3190 3191
3191 3192 con_log(CL_ANN1, (CE_NOTE, "ldCount=%d, map size=%d\n",
3192 3193 ld_map->raidMap.ldCount, ld_map->raidMap.totalSize));
3193 3194
3194 3195 if (MR_ValidateMapInfo(instance->ld_map[(instance->map_id & 1)], instance->load_balance_info)) {
3195 3196 con_log(CL_ANN, (CE_CONT,
3196 3197 "MR_ValidateMapInfo success"));
3197 3198
3198 3199 instance->fast_path_io = 1;
3199 3200 con_log(CL_ANN, (CE_NOTE,
3200 3201 "instance->fast_path_io %d \n",instance->fast_path_io));
3201 3202
3202 3203 return (DDI_SUCCESS);
3203 3204 }
3204 3205
3205 3206 }
3206 3207
3207 3208 instance->fast_path_io = 0;
3208 3209 cmn_err(CE_WARN, "MR_ValidateMapInfo failed");
3209 3210 con_log(CL_ANN, (CE_NOTE,
3210 3211 "instance->fast_path_io %d \n",instance->fast_path_io));
3211 3212
3212 3213
3213 3214 return (DDI_FAILURE);
3214 3215 }
3215 3216 /*
3216 3217 * Marks HBA as bad. This will be called either when an
3217 3218 * IO packet times out even after 3 FW resets
3218 3219 * or FW is found to be fault even after 3 continuous resets.
3219 3220 */
3220 3221
3221 3222 int
3222 3223 mrsas_tbolt_kill_adapter(struct mrsas_instance *instance)
3223 3224 {
3224 3225 cmn_err(CE_WARN, "TBOLT Kill adapter called\n");
3225 3226
3226 3227 if (instance->deadadapter == 1)
3227 3228 return (DDI_FAILURE);
3228 3229
3229 3230 con_log(CL_ANN1, (CE_NOTE, "tbolt_kill_adapter: "
3230 3231 "Writing to doorbell with MFI_STOP_ADP "));
3231 3232 mutex_enter(&instance->ocr_flags_mtx);
3232 3233 instance->deadadapter = 1;
3233 3234 mutex_exit(&instance->ocr_flags_mtx);
3234 3235 instance->func_ptr->disable_intr(instance);
3235 3236 WR_RESERVED0_REGISTER(MFI_STOP_ADP, instance);
3236 3237 /* Flush */
3237 3238 RD_RESERVED0_REGISTER(instance);
3238 3239
3239 3240 (void) mrsas_print_pending_cmds(instance);
3240 3241 mrsas_complete_pending_cmds(instance);
3241 3242 return (DDI_SUCCESS);
3242 3243 }
3243 3244 void mrsas_reset_reply_desc(struct mrsas_instance *instance)
3244 3245 {
3245 3246 int i;
3246 3247 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
3247 3248 instance->reply_read_index= 0;
3248 3249
3249 3250 /* initializing reply address to 0xFFFFFFFF */
3250 3251 reply_desc = instance->reply_frame_pool;
3251 3252
3252 3253 for (i = 0; i < instance->reply_q_depth; i++) {
3253 3254 reply_desc->Words = (uint64_t)~0;
3254 3255 reply_desc++;
3255 3256 }
3256 3257 }
3257 3258
3258 3259 int
3259 3260 mrsas_tbolt_reset_ppc(struct mrsas_instance *instance)
3260 3261 {
3261 3262 uint32_t status=0x00;
3262 3263 uint32_t retry = 0;
3263 3264 uint32_t seq_num;
3264 3265 uint32_t cur_abs_reg_val;
3265 3266 uint32_t fw_state;
3266 3267 union mrsas_evt_class_locale class_locale;
3267 3268 uint32_t abs_state;
3268 3269 uint32_t i;
3269 3270
3270 3271 con_log(CL_ANN, (CE_NOTE,
3271 3272 "mrsas_tbolt_reset_ppc entered\n "));
3272 3273
3273 3274 if (instance->deadadapter == 1) {
3274 3275 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3275 3276 "no more resets as HBA has been marked dead ");
3276 3277 return (DDI_FAILURE);
3277 3278 }
3278 3279
3279 3280 mutex_enter(&instance->ocr_flags_mtx);
3280 3281 instance->adapterresetinprogress = 1;
3281 3282 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc:"
3282 3283 "adpterresetinprogress flag set, time %llx", gethrtime()));
3283 3284 mutex_exit(&instance->ocr_flags_mtx);
3284 3285
3285 3286 instance->func_ptr->disable_intr(instance);
3286 3287
3287 3288 /*Add delay inorder to complete the ioctl & io cmds in-flight */
3288 3289 for (i = 0; i<3000; i++) {
3289 3290 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
3290 3291 }
3291 3292
3292 3293 instance->reply_read_index= 0;
3293 3294
3294 3295 retry_reset:
3295 3296 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3296 3297 ":Resetting TBOLT "));
3297 3298
3298 3299 WR_TBOLT_IB_WRITE_SEQ(0xF, instance);
3299 3300 WR_TBOLT_IB_WRITE_SEQ(4, instance);
3300 3301 WR_TBOLT_IB_WRITE_SEQ(0xb, instance);
3301 3302 WR_TBOLT_IB_WRITE_SEQ(2, instance);
3302 3303 WR_TBOLT_IB_WRITE_SEQ(7, instance);
3303 3304 WR_TBOLT_IB_WRITE_SEQ(0xd, instance);
3304 3305 con_log(CL_ANN1, (CE_NOTE,
3305 3306 "mrsas_tbolt_reset_ppc: magic number written "
3306 3307 "to write sequence register\n"));
3307 3308 delay(100 * drv_usectohz(MILLISEC));
3308 3309 status = RD_TBOLT_HOST_DIAG(instance);
3309 3310 con_log(CL_ANN1, (CE_NOTE,
3310 3311 "mrsas_tbolt_reset_ppc: READ HOSTDIAG SUCCESS "
3311 3312 "to write sequence register\n"));
3312 3313
3313 3314 while (status & DIAG_TBOLT_RESET_ADAPTER) {
3314 3315 delay(100 * drv_usectohz(MILLISEC));
3315 3316 status = RD_TBOLT_HOST_DIAG(instance);
3316 3317 if (retry++ == 100) {
3317 3318 cmn_err(CE_WARN,
3318 3319 "mrsas_tbolt_reset_ppc:"
3319 3320 "resetadapter bit is set already "
3320 3321 "check retry count %d\n", retry);
3321 3322 return (DDI_FAILURE);
3322 3323 }
3323 3324 }
3324 3325
3325 3326 WR_TBOLT_HOST_DIAG(status | DIAG_TBOLT_RESET_ADAPTER, instance);
3326 3327 delay(100 * drv_usectohz(MILLISEC));
3327 3328
3328 3329 ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3329 3330 (uint8_t *)((uintptr_t)(instance)->regmap +
3330 3331 RESET_TBOLT_STATUS_OFF),4,DDI_DEV_AUTOINCR);
3331 3332
3332 3333 while ((status & DIAG_TBOLT_RESET_ADAPTER)) {
3333 3334 delay(100 * drv_usectohz(MILLISEC));
3334 3335 ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3335 3336 (uint8_t *)((uintptr_t)(instance)->regmap +
3336 3337 RESET_TBOLT_STATUS_OFF),4,DDI_DEV_AUTOINCR);
3337 3338 if (retry++ == 100) {
3338 3339 /* Dont call kill adapter here */
3339 3340 /* RESET BIT ADAPTER is cleared by firmare */
3340 3341 //mrsas_tbolt_kill_adapter(instance);
3341 3342 cmn_err(CE_WARN, "mr_sas %d: %s(): RESET FAILED; return failure!!!", instance->instance, __func__);
3342 3343 return (DDI_FAILURE);
3343 3344 }
3344 3345 }
3345 3346
3346 3347 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: Adapter reset complete"));
3347 3348 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3348 3349 "Calling mfi_state_transition_to_ready"));
3349 3350
3350 3351 abs_state = instance->func_ptr->read_fw_status_reg(instance);
3351 3352 retry = 0;
3352 3353 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3353 3354 delay(100 * drv_usectohz(MILLISEC));
3354 3355 abs_state = instance->func_ptr->read_fw_status_reg(instance);
3355 3356 }
3356 3357 if (abs_state <= MFI_STATE_FW_INIT) {
3357 3358 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: firmware state < MFI_STATE_FW_INIT"
3358 3359 "state = 0x%x, RETRY RESET.\n", abs_state);
3359 3360 goto retry_reset;
3360 3361 }
3361 3362
3362 3363 /* Mark HBA as bad, if FW is fault after 3 continuous resets */
3363 3364 if (mfi_state_transition_to_ready(instance) ||
3364 3365 debug_tbolt_fw_faults_after_ocr_g == 1) {
3365 3366 cur_abs_reg_val =
3366 3367 instance->func_ptr->read_fw_status_reg(instance);
3367 3368 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
3368 3369
3369 3370 con_log(CL_ANN1, (CE_NOTE,
3370 3371 "mrsas_tbolt_reset_ppc :before fake: FW is not ready "
3371 3372 "FW state = 0x%x", fw_state));
3372 3373 if (debug_tbolt_fw_faults_after_ocr_g == 1)
3373 3374 fw_state = MFI_STATE_FAULT;
3374 3375
3375 3376 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc : FW is not ready "
3376 3377 "FW state = 0x%x", fw_state));
3377 3378
3378 3379 if (fw_state == MFI_STATE_FAULT) {
3379 3380 // increment the count
3380 3381 instance->fw_fault_count_after_ocr++;
3381 3382 if (instance->fw_fault_count_after_ocr
3382 3383 < MAX_FW_RESET_COUNT) {
3383 3384 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3384 3385 "FW is in fault after OCR count %d "
3385 3386 "Retry Reset",
3386 3387 instance->fw_fault_count_after_ocr);
3387 3388 goto retry_reset;
3388 3389
3389 3390 } else {
3390 3391 cmn_err(CE_WARN, "mrsas %d: %s:"
3391 3392 "Max Reset Count exceeded >%d"
3392 3393 "Mark HBA as bad, KILL adapter",
3393 3394 instance->instance, __func__, MAX_FW_RESET_COUNT);
3394 3395
3395 3396 mrsas_tbolt_kill_adapter(instance);
3396 3397 return (DDI_FAILURE);
3397 3398 }
3398 3399 }
3399 3400 }
3400 3401
3401 3402 // reset the counter as FW is up after OCR
3402 3403 instance->fw_fault_count_after_ocr = 0;
3403 3404
3404 3405 mrsas_reset_reply_desc(instance);
3405 3406
3406 3407
3407 3408 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3408 3409 "Calling mrsas_issue_init_mpi2"));
3409 3410 abs_state = mrsas_issue_init_mpi2(instance);
3410 3411 if(abs_state == DDI_FAILURE) {
3411 3412 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3412 3413 "INIT failed Retrying Reset");
3413 3414 goto retry_reset;
3414 3415 }
3415 3416 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3416 3417 "mrsas_issue_init_mpi2 Done"));
3417 3418
3418 3419 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3419 3420 "Calling mrsas_print_pending_cmd\n"));
3420 3421 mrsas_print_pending_cmds(instance);
3421 3422 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3422 3423 "mrsas_print_pending_cmd done\n"));
3423 3424
3424 3425 instance->func_ptr->enable_intr(instance);
3425 3426 instance->fw_outstanding = 0;
3426 3427
3427 3428 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3428 3429 "Calling mrsas_issue_pending_cmds"));
3429 3430 mrsas_issue_pending_cmds(instance);
3430 3431 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3431 3432 "issue_pending_cmds done.\n"));
3432 3433
3433 3434 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3434 3435 "Calling aen registration"));
3435 3436
3436 3437 instance->aen_cmd->retry_count_for_ocr = 0;
3437 3438 instance->aen_cmd->drv_pkt_time = 0;
3438 3439
3439 3440 instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
3440 3441
3441 3442 con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag.\n"));
3442 3443 mutex_enter(&instance->ocr_flags_mtx);
3443 3444 instance->adapterresetinprogress = 0;
3444 3445 mutex_exit(&instance->ocr_flags_mtx);
3445 3446 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3446 3447 "adpterresetinprogress flag unset"));
3447 3448
3448 3449 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc done\n"));
3449 3450 return (DDI_SUCCESS);
3450 3451
3451 3452 }
3452 3453
3453 3454
3454 3455 /*
3455 3456 * mrsas_sync_map_info - Returns FW's ld_map structure
3456 3457 * @instance: Adapter soft state
3457 3458 *
3458 3459 * Issues an internal command (DCMD) to get the FW's controller PD
3459 3460 * list structure. This information is mainly used to find out SYSTEM
3460 3461 * supported by the FW.
3461 3462 */
3462 3463
3463 3464 int
3464 3465 mrsas_tbolt_sync_map_info(struct mrsas_instance *instance)
3465 3466 {
3466 3467 int ret = 0, i;
3467 3468 struct mrsas_cmd *cmd = NULL;
3468 3469 struct mrsas_dcmd_frame *dcmd;
3469 3470 uint32_t size_sync_info, num_lds;
3470 3471 LD_TARGET_SYNC *ci = NULL;
3471 3472 MR_FW_RAID_MAP_ALL *map;
3472 3473 MR_LD_RAID *raid;
3473 3474 LD_TARGET_SYNC *ld_sync;
3474 3475 uint32_t ci_h = 0;
3475 3476 uint32_t size_map_info;
3476 3477
3477 3478 cmd = get_raid_msg_pkt(instance);
3478 3479
3479 3480 if (cmd == NULL) {
3480 3481 cmn_err(CE_WARN,
3481 3482 "Failed to get a cmd from free-pool in mrsas_tbolt_sync_map_info(). ");
3482 3483 return (DDI_FAILURE);
3483 3484 }
3484 3485
3485 3486 /* Clear the frame buffer and assign back the context id */
3486 3487 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3487 3488 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3488 3489 cmd->index);
3489 3490 bzero(cmd->scsi_io_request, sizeof (Mpi2RaidSCSIIORequest_t));
3490 3491
3491 3492
3492 3493 map = instance->ld_map[instance->map_id & 1];
3493 3494
3494 3495 num_lds = map->raidMap.ldCount;
3495 3496
3496 3497 dcmd = &cmd->frame->dcmd;
3497 3498
3498 3499 size_sync_info = sizeof (LD_TARGET_SYNC) * num_lds;
3499 3500
3500 3501 con_log(CL_ANN, (CE_NOTE,
3501 3502 "size_sync_info =0x%x ; ld count = 0x%x \n ",
3502 3503 size_sync_info, num_lds));
3503 3504
3504 3505 ci = (LD_TARGET_SYNC *)instance->ld_map[(instance->map_id - 1) & 1];
3505 3506
3506 3507 memset(ci, 0, sizeof(MR_FW_RAID_MAP_ALL));
3507 3508 ci_h = instance->ld_map_phy[(instance->map_id - 1) & 1];
3508 3509
3509 3510 (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
3510 3511
3511 3512 ld_sync = (LD_TARGET_SYNC *)ci;
3512 3513
3513 3514 for (i = 0; i < num_lds; i++, ld_sync++) {
3514 3515 raid = MR_LdRaidGet(i, map);
3515 3516
3516 3517 con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, Seq Num : 0x%x, Sync Reqd : 0x%x\n",
3517 3518 i, raid->seqNum, raid->flags.ldSyncRequired));
3518 3519
3519 3520 ld_sync->ldTargetId = MR_GetLDTgtId(i, map);
3520 3521
3521 3522 con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, tgt : 0x%x \n",
3522 3523 i, ld_sync->ldTargetId));
3523 3524
3524 3525 ld_sync->seqNum = raid->seqNum;
3525 3526 }
3526 3527
3527 3528
3528 3529 size_map_info = sizeof(MR_FW_RAID_MAP) +
3529 3530 (sizeof(MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
3530 3531
3531 3532 dcmd->cmd = MFI_CMD_OP_DCMD;
3532 3533 dcmd->cmd_status = 0xFF;
3533 3534 dcmd->sge_count = 1;
3534 3535 dcmd->flags = MFI_FRAME_DIR_WRITE;
3535 3536 dcmd->timeout = 0;
3536 3537 dcmd->pad_0 = 0;
3537 3538 dcmd->data_xfer_len = size_map_info;
3538 3539 dcmd->mbox.b[0] = num_lds;
3539 3540 dcmd->mbox.b[1] = 1; /* Pend */
3540 3541 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3541 3542 dcmd->sgl.sge32[0].phys_addr = ci_h;
3542 3543 dcmd->sgl.sge32[0].length = size_map_info;
3543 3544
3544 3545
3545 3546 instance->map_update_cmd = cmd;
3546 3547 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3547 3548
3548 3549 instance->func_ptr->issue_cmd(cmd, instance);
3549 3550
3550 3551 instance->unroll.syncCmd = 1;
3551 3552 con_log(CL_ANN1, (CE_NOTE, "sync cmd issued. [SMID]:%x",cmd->SMID));
3552 3553
3553 3554 return (ret);
3554 3555 }
3555 3556
3556 3557 /*
3557 3558 * abort_syncmap_cmd
3558 3559 */
3559 3560 int
3560 3561 abort_syncmap_cmd(struct mrsas_instance *instance,
3561 3562 struct mrsas_cmd *cmd_to_abort)
3562 3563 {
3563 3564 int ret = 0;
3564 3565
3565 3566 struct mrsas_cmd *cmd;
3566 3567 struct mrsas_abort_frame *abort_fr;
3567 3568
3568 3569 con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_ldsync:%d", __LINE__));
3569 3570
3570 3571 cmd = get_raid_msg_mfi_pkt(instance);
3571 3572
3572 3573 if (!cmd) {
3573 3574 cmn_err(CE_WARN,
3574 3575 "Failed to get a cmd from free-pool abort_syncmap_cmd().");
3575 3576 return (DDI_FAILURE);
3576 3577 }
3577 3578 /* Clear the frame buffer and assign back the context id */
3578 3579 (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3579 3580 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3580 3581 cmd->index);
3581 3582
3582 3583 abort_fr = &cmd->frame->abort;
3583 3584
3584 3585 /* prepare and issue the abort frame */
3585 3586 ddi_put8(cmd->frame_dma_obj.acc_handle,
3586 3587 &abort_fr->cmd, MFI_CMD_OP_ABORT);
3587 3588 ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3588 3589 MFI_CMD_STATUS_SYNC_MODE);
3589 3590 ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3590 3591 ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3591 3592 cmd_to_abort->index);
3592 3593 ddi_put32(cmd->frame_dma_obj.acc_handle,
3593 3594 &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3594 3595 ddi_put32(cmd->frame_dma_obj.acc_handle,
3595 3596 &abort_fr->abort_mfi_phys_addr_hi, 0);
3596 3597
3597 3598 cmd->frame_count = 1;
3598 3599
3599 3600 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3600 3601
3601 3602 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3602 3603 con_log(CL_ANN1, (CE_WARN,
3603 3604 "abort_ldsync_cmd: issue_cmd_in_poll_mode failed"));
3604 3605 ret = -1;
3605 3606 } else {
3606 3607 ret = 0;
3607 3608 }
3608 3609
3609 3610 return_raid_msg_mfi_pkt(instance, cmd);
3610 3611
3611 3612 atomic_add_16(&instance->fw_outstanding, (-1));
3612 3613
3613 3614 return (ret);
3614 3615 }
3615 3616
3616 3617
3617 3618 #ifdef PDSUPPORT
3618 3619 int
3619 3620 mrsas_tbolt_config_pd(struct mrsas_instance *instance, uint16_t tgt,
3620 3621 uint8_t lun, dev_info_t **ldip)
3621 3622 {
3622 3623 struct scsi_device *sd;
3623 3624 dev_info_t *child;
3624 3625 int rval, dtype;
3625 3626 struct mrsas_tbolt_pd_info *pds = NULL;
3626 3627 uint64_t *wwn;
3627 3628
3628 3629
3629 3630 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_config_pd: t = %d l = %d",
3630 3631 tgt, lun));
3631 3632
3632 3633 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
3633 3634 if (ldip) {
3634 3635 *ldip = child;
3635 3636 }
3636 3637 if (instance->mr_tbolt_pd_list[tgt].flag != MRDRV_TGT_VALID) {
3637 3638 rval = mrsas_service_evt(instance, tgt, 1,
3638 3639 MRSAS_EVT_UNCONFIG_TGT, NULL);
3639 3640 con_log(CL_ANN1, (CE_WARN,
3640 3641 "mr_sas:DELETING STALE ENTRY rval = %d "
3641 3642 "tgt id = %d ", rval, tgt));
3642 3643 return (NDI_FAILURE);
3643 3644 }
3644 3645 return (NDI_SUCCESS);
3645 3646 }
3646 3647
3647 3648 pds = (struct mrsas_tbolt_pd_info *)
3648 3649 kmem_zalloc(sizeof (struct mrsas_tbolt_pd_info), KM_SLEEP);
3649 3650 mrsas_tbolt_get_pd_info(instance, pds, tgt);
3650 3651 dtype = pds->scsiDevType;
3651 3652
3652 3653 /* Check for Disk*/
3653 3654 if ((dtype == DTYPE_DIRECT)) {
3654 3655 if ((dtype == DTYPE_DIRECT) &&
3655 3656 (LE_16(pds->fwState) != PD_SYSTEM)) {
3656 3657 return (NDI_FAILURE);
3657 3658 }
3658 3659 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
3659 3660 sd->sd_address.a_hba_tran = instance->tran;
3660 3661 sd->sd_address.a_target = (uint16_t)tgt;
3661 3662 sd->sd_address.a_lun = (uint8_t)lun;
3662 3663
3663 3664 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS) {
3664 3665 rval = mrsas_config_scsi_device(instance, sd, ldip);
3665 3666 con_log(CL_DLEVEL1, (CE_NOTE,
3666 3667 "Phys. device found: tgt %d dtype %d: %s",
3667 3668 tgt, dtype, sd->sd_inq->inq_vid));
3668 3669 } else {
3669 3670 rval = NDI_FAILURE;
3670 3671 con_log(CL_DLEVEL1, (CE_NOTE,
3671 3672 "Phys. device Not found scsi_hba_probe Failed: tgt %d dtype %d: %s",
3672 3673 tgt, dtype, sd->sd_inq->inq_vid));
3673 3674 }
3674 3675
3675 3676 /* sd_unprobe is blank now. Free buffer manually */
3676 3677 if (sd->sd_inq) {
3677 3678 kmem_free(sd->sd_inq, SUN_INQSIZE);
3678 3679 sd->sd_inq = (struct scsi_inquiry *)NULL;
3679 3680 }
3680 3681 kmem_free(sd, sizeof (struct scsi_device));
3681 3682 rval = NDI_SUCCESS;
3682 3683 } else {
3683 3684 con_log(CL_ANN1, (CE_NOTE,
3684 3685 "Device not supported: tgt %d lun %d dtype %d",
3685 3686 tgt, lun, dtype));
3686 3687 rval = NDI_FAILURE;
3687 3688 }
3688 3689
3689 3690 kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3690 3691 con_log(CL_ANN1, (CE_NOTE, "mrsas_config_pd: return rval = %d",
3691 3692 rval));
3692 3693 return (rval);
3693 3694 }
3694 3695 static void
3695 3696 mrsas_tbolt_get_pd_info(struct mrsas_instance *instance, struct mrsas_tbolt_pd_info *pds,
3696 3697 int tgt)
3697 3698 {
3698 3699 struct mrsas_cmd *cmd;
3699 3700 struct mrsas_dcmd_frame *dcmd;
3700 3701 dma_obj_t dcmd_dma_obj;
3701 3702
3702 3703 cmd = get_raid_msg_pkt(instance);
3703 3704
3704 3705 if (!cmd) {
3705 3706 con_log(CL_ANN1, (CE_WARN, "Failed to get a cmd for get pd info"));
3706 3707 return;
3707 3708 }
3708 3709
3709 3710 /* Clear the frame buffer and assign back the context id */
3710 3711 memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3711 3712 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3712 3713 cmd->index);
3713 3714
3714 3715
3715 3716 dcmd = &cmd->frame->dcmd;
3716 3717 dcmd_dma_obj.size = sizeof (struct mrsas_tbolt_pd_info);
3717 3718 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
3718 3719 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff;
3719 3720 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff;
3720 3721 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
3721 3722 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
3722 3723
3723 3724 (void) mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
3724 3725 DDI_STRUCTURE_LE_ACC);
3725 3726 (void) memset(dcmd_dma_obj.buffer, 0, sizeof (struct mrsas_tbolt_pd_info));
3726 3727 (void) memset(dcmd->mbox.b, 0, 12);
3727 3728 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3728 3729 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
3729 3730 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3730 3731 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, MFI_FRAME_DIR_READ);
3731 3732 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3732 3733 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3733 3734 sizeof (struct mrsas_tbolt_pd_info));
3734 3735 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3735 3736 MR_DCMD_PD_GET_INFO);
3736 3737 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], tgt);
3737 3738 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3738 3739 sizeof (struct mrsas_tbolt_pd_info));
3739 3740 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3740 3741 dcmd_dma_obj.dma_cookie[0].dmac_address);
3741 3742
3742 3743 cmd->sync_cmd = MRSAS_TRUE;
3743 3744 cmd->frame_count = 1;
3744 3745
3745 3746 if (instance->tbolt) {
3746 3747 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3747 3748 }
3748 3749
3749 3750 instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd);
3750 3751
3751 3752 ddi_rep_get8(cmd->frame_dma_obj.acc_handle, (uint8_t *)pds,
3752 3753 (uint8_t *)dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info),
3753 3754 DDI_DEV_AUTOINCR);
3754 3755 (void) mrsas_free_dma_obj(instance, dcmd_dma_obj);
3755 3756 return_raid_msg_pkt(instance, cmd);
3756 3757 }
3757 3758 #endif
|
↓ open down ↓ |
947 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX