Print this page
| Split |
Close |
| Expand all |
| Collapse all |
--- old/./mr_sas_tbolt.c
+++ new/./mr_sas_tbolt.c
1 1 /*
2 2 * mr_sas_tbolt.c: source for mr_sas driver for New Generation.
3 3 * i.e. Thunderbolt and Invader
4 4 *
5 5 * Solaris MegaRAID device driver for SAS2.0 controllers
6 6 * Copyright (c) 2008-2012, LSI Logic Corporation.
7 7 * All rights reserved.
8 8 *
|
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
9 9 * Version:
10 10 * Author:
11 11 * Swaminathan K S
12 12 * Arun Chandrashekhar
13 13 * Manju R
14 14 * Rasheed
15 15 * Shakeel Bukhari
16 16 */
17 17
18 18
19 -#include <stddef.h>
20 19 #include <sys/types.h>
21 20 #include <sys/file.h>
22 21 #include <sys/atomic.h>
23 22 #include <sys/scsi/scsi.h>
24 23 #include <sys/byteorder.h>
25 24 #include "ld_pd_map.h"
26 25 #include "mr_sas.h"
27 26 #include "fusion.h"
28 27
28 +/*
29 + * FMA header files
30 + */
31 +#include <sys/ddifm.h>
32 +#include <sys/fm/protocol.h>
33 +#include <sys/fm/util.h>
34 +#include <sys/fm/io/ddi.h>
29 35
30 -// Pre-TB command size and TB command size.
31 -#define MR_COMMAND_SIZE (64*20) // 1280 bytes
36 +
37 +/* Pre-TB command size and TB command size. */
38 +#define MR_COMMAND_SIZE (64*20) /* 1280 bytes */
32 39 MR_LD_RAID *MR_LdRaidGet(U32 ld, MR_FW_RAID_MAP_ALL *map);
33 40 U16 MR_TargetIdToLdGet(U32 ldTgtId, MR_FW_RAID_MAP_ALL *map);
34 41 U16 MR_GetLDTgtId(U32 ld, MR_FW_RAID_MAP_ALL *map);
35 -U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO lbInfo, struct IO_REQUEST_INFO *in_info);
42 +U16 get_updated_dev_handle(PLD_LOAD_BALANCE_INFO, struct IO_REQUEST_INFO *);
36 43 extern ddi_dma_attr_t mrsas_generic_dma_attr;
37 44 extern uint32_t mrsas_tbolt_max_cap_maxxfer;
38 45 extern struct ddi_device_acc_attr endian_attr;
39 -extern int debug_level_g;
46 +extern int debug_level_g;
40 47 extern unsigned int enable_fp;
41 48 volatile int dump_io_wait_time = 90;
42 49 extern void
43 50 io_timeout_checker(void *arg);
44 -extern int
45 -mfi_state_transition_to_ready(struct mrsas_instance *instance);
46 51 extern volatile int debug_timeout_g;
47 -extern int mrsas_issue_pending_cmds(struct mrsas_instance *);
52 +extern int mrsas_issue_pending_cmds(struct mrsas_instance *);
48 53 extern int mrsas_complete_pending_cmds(struct mrsas_instance *instance);
49 54 extern void push_pending_mfi_pkt(struct mrsas_instance *,
50 55 struct mrsas_cmd *);
51 56 extern U8 MR_BuildRaidContext(struct mrsas_instance *, struct IO_REQUEST_INFO *,
52 57 MPI2_SCSI_IO_VENDOR_UNIQUE *, MR_FW_RAID_MAP_ALL *);
53 58
54 -static volatile int debug_tbolt_fw_faults_after_ocr_g = 0;
59 +/* Local static prototypes. */
60 +static struct mrsas_cmd *mrsas_tbolt_build_cmd(struct mrsas_instance *,
61 + struct scsi_address *, struct scsi_pkt *, uchar_t *);
62 +static void mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr,
63 + U64 start_blk, U32 num_blocks);
64 +static int mrsas_tbolt_check_map_info(struct mrsas_instance *);
65 +static int mrsas_tbolt_sync_map_info(struct mrsas_instance *);
66 +static int mrsas_tbolt_prepare_pkt(struct scsa_cmd *);
67 +static int mrsas_tbolt_ioc_init(struct mrsas_instance *, dma_obj_t *);
68 +#ifdef PDSUPPORT
69 +static void mrsas_tbolt_get_pd_info(struct mrsas_instance *,
70 + struct mrsas_tbolt_pd_info *, int);
71 +#endif /* PDSUPPORT */
55 72
73 +static int debug_tbolt_fw_faults_after_ocr_g = 0;
74 +
56 75 /*
57 76 * destroy_mfi_mpi_frame_pool
58 77 */
59 78 void
60 79 destroy_mfi_mpi_frame_pool(struct mrsas_instance *instance)
61 80 {
62 81 int i;
63 82
64 83 struct mrsas_cmd *cmd;
65 84
66 85 /* return all mfi frames to pool */
67 86 for (i = 0; i < MRSAS_APP_RESERVED_CMDS; i++) {
68 87 cmd = instance->cmd_list[i];
69 - if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED)
88 + if (cmd->frame_dma_obj_status == DMA_OBJ_ALLOCATED) {
70 89 (void) mrsas_free_dma_obj(instance,
71 90 cmd->frame_dma_obj);
72 - cmd->frame_dma_obj_status = DMA_OBJ_FREED;
91 + }
92 + cmd->frame_dma_obj_status = DMA_OBJ_FREED;
73 93 }
74 94 }
75 95
76 96 /*
77 97 * destroy_mpi2_frame_pool
78 98 */
79 99 void
80 100 destroy_mpi2_frame_pool(struct mrsas_instance *instance)
81 101 {
82 102
83 103 if (instance->mpi2_frame_pool_dma_obj.status == DMA_OBJ_ALLOCATED) {
84 104 (void) mrsas_free_dma_obj(instance,
85 105 instance->mpi2_frame_pool_dma_obj);
86 106 instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_FREED;
87 107 }
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
88 108 }
89 109
90 110
91 111 /*
92 112 * mrsas_tbolt_free_additional_dma_buffer
93 113 */
94 114 void
95 115 mrsas_tbolt_free_additional_dma_buffer(struct mrsas_instance *instance)
96 116 {
97 117 int i;
98 - if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
99 - (void) mrsas_free_dma_obj(instance,
100 - instance->mfi_internal_dma_obj);
101 - instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
102 - }
118 +
119 + if (instance->mfi_internal_dma_obj.status == DMA_OBJ_ALLOCATED) {
120 + (void) mrsas_free_dma_obj(instance,
121 + instance->mfi_internal_dma_obj);
122 + instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
123 + }
103 124 if (instance->mfi_evt_detail_obj.status == DMA_OBJ_ALLOCATED) {
104 125 (void) mrsas_free_dma_obj(instance,
105 126 instance->mfi_evt_detail_obj);
106 127 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
107 128 }
108 129
109 130 for (i = 0; i < 2; i++) {
110 131 if (instance->ld_map_obj[i].status == DMA_OBJ_ALLOCATED) {
111 132 (void) mrsas_free_dma_obj(instance,
112 - instance->ld_map_obj[i]);
133 + instance->ld_map_obj[i]);
113 134 instance->ld_map_obj[i].status = DMA_OBJ_FREED;
114 135 }
115 136 }
116 137 }
117 138
118 139
119 140 /*
120 141 * free_req_desc_pool
121 142 */
122 143 void
123 144 free_req_rep_desc_pool(struct mrsas_instance *instance)
124 145 {
125 146 if (instance->request_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
126 147 (void) mrsas_free_dma_obj(instance,
127 148 instance->request_desc_dma_obj);
128 149 instance->request_desc_dma_obj.status = DMA_OBJ_FREED;
129 150 }
130 151
131 152 if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
132 153 (void) mrsas_free_dma_obj(instance,
133 154 instance->reply_desc_dma_obj);
134 155 instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
135 156 }
136 157
137 158
|
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
138 159 }
139 160
140 161
141 162 /*
142 163 * ThunderBolt(TB) Request Message Frame Pool
143 164 */
144 165 int
145 166 create_mpi2_frame_pool(struct mrsas_instance *instance)
146 167 {
147 168 int i = 0;
148 - int cookie_cnt;
149 169 uint16_t max_cmd;
150 170 uint32_t sgl_sz;
151 171 uint32_t raid_msg_size;
152 172 uint32_t total_size;
153 - uint32_t offset;
154 - uint32_t io_req_base_phys;
155 - uint8_t *io_req_base;
173 + uint32_t offset;
174 + uint32_t io_req_base_phys;
175 + uint8_t *io_req_base;
156 176 struct mrsas_cmd *cmd;
157 177
158 178 max_cmd = instance->max_fw_cmds;
159 179
160 180 sgl_sz = 1024;
161 181 raid_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
162 182
163 - // Allocating additional 256 bytes to accomodate SMID 0.
183 + /* Allocating additional 256 bytes to accomodate SMID 0. */
164 184 total_size = MRSAS_THUNDERBOLT_MSG_SIZE + (max_cmd * raid_msg_size) +
165 185 (max_cmd * sgl_sz) + (max_cmd * SENSE_LENGTH);
166 186
167 187 con_log(CL_ANN1, (CE_NOTE, "create_mpi2_frame_pool: "
168 - "max_cmd %x ", max_cmd));
188 + "max_cmd %x", max_cmd));
169 189
170 190 con_log(CL_DLEVEL3, (CE_NOTE, "create_mpi2_frame_pool: "
171 191 "request message frame pool size %x", total_size));
172 192
173 193 /*
174 194 * ThunderBolt(TB) We need to create a single chunk of DMA'ble memory
175 195 * and then split the memory to 1024 commands. Each command should be
176 196 * able to contain a RAID MESSAGE FRAME which will embed a MFI_FRAME
177 197 * within it. Further refer the "alloc_req_rep_desc" function where
178 198 * we allocate request/reply descriptors queues for a clue.
179 199 */
180 200
181 201 instance->mpi2_frame_pool_dma_obj.size = total_size;
182 202 instance->mpi2_frame_pool_dma_obj.dma_attr = mrsas_generic_dma_attr;
183 203 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_addr_hi =
184 204 0xFFFFFFFFU;
185 205 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_count_max =
186 206 0xFFFFFFFFU;
187 207 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_sgllen = 1;
188 208 instance->mpi2_frame_pool_dma_obj.dma_attr.dma_attr_align = 256;
189 209
190 210 if (mrsas_alloc_dma_obj(instance, &instance->mpi2_frame_pool_dma_obj,
191 211 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
192 212 cmn_err(CE_WARN,
193 213 "mr_sas: could not alloc mpi2 frame pool");
194 214 return (DDI_FAILURE);
195 215 }
|
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
196 216
197 217 bzero(instance->mpi2_frame_pool_dma_obj.buffer, total_size);
198 218 instance->mpi2_frame_pool_dma_obj.status |= DMA_OBJ_ALLOCATED;
199 219
200 220 instance->io_request_frames =
201 221 (uint8_t *)instance->mpi2_frame_pool_dma_obj.buffer;
202 222 instance->io_request_frames_phy =
203 223 (uint32_t)
204 224 instance->mpi2_frame_pool_dma_obj.dma_cookie[0].dmac_address;
205 225
206 - con_log(CL_DLEVEL3, (CE_NOTE,
207 - "io_request_frames 0x%x",
208 - instance->io_request_frames));
226 + con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames 0x%p",
227 + (void *)instance->io_request_frames));
209 228
210 - con_log(CL_DLEVEL3, (CE_NOTE,
211 - "io_request_frames_phy 0x%x",
229 + con_log(CL_DLEVEL3, (CE_NOTE, "io_request_frames_phy 0x%x",
212 230 instance->io_request_frames_phy));
213 231
214 232 io_req_base = (uint8_t *)instance->io_request_frames +
215 233 MRSAS_THUNDERBOLT_MSG_SIZE;
216 234 io_req_base_phys = instance->io_request_frames_phy +
217 235 MRSAS_THUNDERBOLT_MSG_SIZE;
218 236
219 237 con_log(CL_DLEVEL3, (CE_NOTE,
220 238 "io req_base_phys 0x%x", io_req_base_phys));
221 239
222 240 for (i = 0; i < max_cmd; i++) {
223 241 cmd = instance->cmd_list[i];
224 242
225 243 offset = i * MRSAS_THUNDERBOLT_MSG_SIZE;
226 244
227 245 cmd->scsi_io_request = (Mpi2RaidSCSIIORequest_t *)
228 246 ((uint8_t *)io_req_base + offset);
229 247 cmd->scsi_io_request_phys_addr = io_req_base_phys + offset;
230 248
231 - cmd->sgl = (Mpi2SGEIOUnion_t *)
232 - ((uint8_t *)io_req_base +
233 - (max_cmd * raid_msg_size) + i * sgl_sz);
249 + cmd->sgl = (Mpi2SGEIOUnion_t *)((uint8_t *)io_req_base +
250 + (max_cmd * raid_msg_size) + i * sgl_sz);
234 251
235 - cmd->sgl_phys_addr =
236 - (io_req_base_phys +
252 + cmd->sgl_phys_addr = (io_req_base_phys +
237 253 (max_cmd * raid_msg_size) + i * sgl_sz);
238 254
239 - cmd->sense1 = (uint8_t *)
240 - ((uint8_t *)io_req_base +
255 + cmd->sense1 = (uint8_t *)((uint8_t *)io_req_base +
241 256 (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
242 257 (i * SENSE_LENGTH));
243 258
244 - cmd->sense_phys_addr1 =
245 - (io_req_base_phys +
259 + cmd->sense_phys_addr1 = (io_req_base_phys +
246 260 (max_cmd * raid_msg_size) + (max_cmd * sgl_sz) +
247 261 (i * SENSE_LENGTH));
248 262
249 263
250 - cmd->SMID = i+1;
264 + cmd->SMID = i + 1;
251 265
252 - con_log(CL_DLEVEL3, (CE_NOTE,
253 - "Frame Pool Addr [%x]0x%x",
254 - cmd->index, cmd->scsi_io_request));
266 + con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Addr [%x]0x%p",
267 + cmd->index, (void *)cmd->scsi_io_request));
255 268
256 - con_log(CL_DLEVEL3, (CE_NOTE,
257 - "Frame Pool Phys Addr [%x]0x%x",
269 + con_log(CL_DLEVEL3, (CE_NOTE, "Frame Pool Phys Addr [%x]0x%x",
258 270 cmd->index, cmd->scsi_io_request_phys_addr));
259 271
260 - con_log(CL_DLEVEL3, (CE_NOTE,
261 - "Sense Addr [%x]0x%x",
262 - cmd->index, cmd->sense1));
272 + con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr [%x]0x%p",
273 + cmd->index, (void *)cmd->sense1));
263 274
264 - con_log(CL_DLEVEL3, (CE_NOTE,
265 - "Sense Addr Phys [%x]0x%x",
275 + con_log(CL_DLEVEL3, (CE_NOTE, "Sense Addr Phys [%x]0x%x",
266 276 cmd->index, cmd->sense_phys_addr1));
267 277
278 + con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers [%x]0x%p",
279 + cmd->index, (void *)cmd->sgl));
268 280
269 - con_log(CL_DLEVEL3, (CE_NOTE,
270 - "Sgl bufffers [%x]0x%x",
271 - cmd->index, cmd->sgl));
272 -
273 - con_log(CL_DLEVEL3, (CE_NOTE,
274 - "Sgl bufffers phys [%x]0x%x",
281 + con_log(CL_DLEVEL3, (CE_NOTE, "Sgl bufffers phys [%x]0x%x",
275 282 cmd->index, cmd->sgl_phys_addr));
276 283 }
277 284
278 285 return (DDI_SUCCESS);
279 286
280 287 }
281 288
282 289
283 290 /*
284 291 * alloc_additional_dma_buffer for AEN
285 292 */
286 293 int
287 294 mrsas_tbolt_alloc_additional_dma_buffer(struct mrsas_instance *instance)
288 295 {
289 - uint32_t internal_buf_size = PAGESIZE*2;
296 + uint32_t internal_buf_size = PAGESIZE*2;
290 297 int i;
291 298
292 299 /* Initialize buffer status as free */
293 300 instance->mfi_internal_dma_obj.status = DMA_OBJ_FREED;
294 301 instance->mfi_evt_detail_obj.status = DMA_OBJ_FREED;
295 302 instance->ld_map_obj[0].status = DMA_OBJ_FREED;
296 303 instance->ld_map_obj[1].status = DMA_OBJ_FREED;
297 304
298 305
299 306 instance->mfi_internal_dma_obj.size = internal_buf_size;
300 307 instance->mfi_internal_dma_obj.dma_attr = mrsas_generic_dma_attr;
301 308 instance->mfi_internal_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
302 - instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
309 + instance->mfi_internal_dma_obj.dma_attr.dma_attr_count_max =
310 + 0xFFFFFFFFU;
303 311 instance->mfi_internal_dma_obj.dma_attr.dma_attr_sgllen = 1;
304 312
305 313 if (mrsas_alloc_dma_obj(instance, &instance->mfi_internal_dma_obj,
306 - (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
314 + (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
307 315 cmn_err(CE_WARN,
308 - "mr_sas: could not alloc reply queue");
316 + "mr_sas: could not alloc reply queue");
309 317 return (DDI_FAILURE);
310 318 }
311 319
312 320 bzero(instance->mfi_internal_dma_obj.buffer, internal_buf_size);
313 321
314 - instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
315 - instance->internal_buf = (caddr_t)(((unsigned long)
316 - instance->mfi_internal_dma_obj.buffer));
322 + instance->mfi_internal_dma_obj.status |= DMA_OBJ_ALLOCATED;
323 + instance->internal_buf =
324 + (caddr_t)(((unsigned long)instance->mfi_internal_dma_obj.buffer));
317 325 instance->internal_buf_dmac_add =
318 - instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address;
326 + instance->mfi_internal_dma_obj.dma_cookie[0].dmac_address;
319 327 instance->internal_buf_size = internal_buf_size;
320 -
328 +
321 329 /* allocate evt_detail */
322 330 instance->mfi_evt_detail_obj.size = sizeof (struct mrsas_evt_detail);
323 331 instance->mfi_evt_detail_obj.dma_attr = mrsas_generic_dma_attr;
324 332 instance->mfi_evt_detail_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
325 333 instance->mfi_evt_detail_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
326 334 instance->mfi_evt_detail_obj.dma_attr.dma_attr_sgllen = 1;
327 335 instance->mfi_evt_detail_obj.dma_attr.dma_attr_align = 8;
328 336
329 337 if (mrsas_alloc_dma_obj(instance, &instance->mfi_evt_detail_obj,
330 338 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
331 - cmn_err(CE_WARN,
332 - "mrsas_tbolt_alloc_additional_dma_buffer: "
339 + cmn_err(CE_WARN, "mrsas_tbolt_alloc_additional_dma_buffer: "
333 340 "could not allocate data transfer buffer.");
334 341 goto fail_tbolt_additional_buff;
335 342 }
336 343
337 344 bzero(instance->mfi_evt_detail_obj.buffer,
338 345 sizeof (struct mrsas_evt_detail));
339 346
340 347 instance->mfi_evt_detail_obj.status |= DMA_OBJ_ALLOCATED;
341 348
342 349 instance->size_map_info = sizeof (MR_FW_RAID_MAP) +
343 350 (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
344 351
345 352 for (i = 0; i < 2; i++) {
346 353 /* allocate the data transfer buffer */
347 354 instance->ld_map_obj[i].size = instance->size_map_info;
348 355 instance->ld_map_obj[i].dma_attr = mrsas_generic_dma_attr;
349 356 instance->ld_map_obj[i].dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
350 - instance->ld_map_obj[i].dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
357 + instance->ld_map_obj[i].dma_attr.dma_attr_count_max =
358 + 0xFFFFFFFFU;
351 359 instance->ld_map_obj[i].dma_attr.dma_attr_sgllen = 1;
352 360 instance->ld_map_obj[i].dma_attr.dma_attr_align = 1;
353 361
354 362 if (mrsas_alloc_dma_obj(instance, &instance->ld_map_obj[i],
355 - (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
363 + (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
356 364 cmn_err(CE_WARN,
357 - "could not allocate data transfer buffer.");
365 + "could not allocate data transfer buffer.");
358 366 goto fail_tbolt_additional_buff;
359 367 }
360 368
361 369 instance->ld_map_obj[i].status |= DMA_OBJ_ALLOCATED;
362 370
363 - (void) memset(instance->ld_map_obj[i].buffer, 0,
364 - instance->size_map_info);
371 + bzero(instance->ld_map_obj[i].buffer, instance->size_map_info);
365 372
366 - instance->ld_map[i] = (MR_FW_RAID_MAP_ALL *)instance->ld_map_obj[i].buffer;
367 - instance->ld_map_phy[i] =
368 - (uint32_t)instance->ld_map_obj[i].dma_cookie[0].dmac_address;
373 + instance->ld_map[i] =
374 + (MR_FW_RAID_MAP_ALL *)instance->ld_map_obj[i].buffer;
375 + instance->ld_map_phy[i] = (uint32_t)instance->
376 + ld_map_obj[i].dma_cookie[0].dmac_address;
369 377
370 378 con_log(CL_DLEVEL3, (CE_NOTE,
371 - "ld_map Addr Phys 0x%x", instance->ld_map_phy[i]));
379 + "ld_map Addr Phys 0x%x", instance->ld_map_phy[i]));
372 380
373 381 con_log(CL_DLEVEL3, (CE_NOTE,
374 - "size_map_info 0x%x", instance->size_map_info));
375 -
382 + "size_map_info 0x%x", instance->size_map_info));
376 383 }
377 384
378 385 return (DDI_SUCCESS);
379 386
380 387 fail_tbolt_additional_buff:
381 388 mrsas_tbolt_free_additional_dma_buffer(instance);
382 389
383 390 return (DDI_FAILURE);
384 391 }
385 392
386 393 MRSAS_REQUEST_DESCRIPTOR_UNION *
387 -mr_sas_get_request_descriptor(struct mrsas_instance *instance,
388 - uint16_t index, struct mrsas_cmd *cmd)
394 +mr_sas_get_request_descriptor(struct mrsas_instance *instance, uint16_t index)
389 395 {
390 396 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
391 397
392 398 if (index > instance->max_fw_cmds) {
393 399 con_log(CL_ANN1, (CE_NOTE,
394 400 "Invalid SMID 0x%x request for descriptor", index));
395 401 con_log(CL_ANN1, (CE_NOTE,
396 - "max_fw_cmds : 0x%x\n", instance->max_fw_cmds));
402 + "max_fw_cmds : 0x%x", instance->max_fw_cmds));
397 403 return (NULL);
398 404 }
399 405
400 406 req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
401 407 ((char *)instance->request_message_pool +
402 408 (sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION) * index));
403 409
404 410 con_log(CL_ANN1, (CE_NOTE,
405 - "request descriptor : 0x%08lx\n", (unsigned long)req_desc));
411 + "request descriptor : 0x%08lx", (unsigned long)req_desc));
406 412
407 413 con_log(CL_ANN1, (CE_NOTE,
408 - "request descriptor base phy : 0x%08lx\n",
414 + "request descriptor base phy : 0x%08lx",
409 415 (unsigned long)instance->request_message_pool_phy));
410 416
411 417 return ((MRSAS_REQUEST_DESCRIPTOR_UNION *)req_desc);
412 418 }
413 419
414 420
415 421 /*
416 422 * Allocate Request and Reply Queue Descriptors.
417 423 */
418 424 int
419 425 alloc_req_rep_desc(struct mrsas_instance *instance)
420 426 {
421 427 uint32_t request_q_sz, reply_q_sz;
422 - int i, max_request_q_sz, max_reply_q_sz;
423 - uint64_t request_desc;
428 + int i, max_reply_q_sz;
424 429 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
425 - uint64_t *reply_ptr;
426 430
427 431 /*
428 432 * ThunderBolt(TB) There's no longer producer consumer mechanism.
429 433 * Once we have an interrupt we are supposed to scan through the list of
430 434 * reply descriptors and process them accordingly. We would be needing
431 435 * to allocate memory for 1024 reply descriptors
432 436 */
433 437
434 438 /* Allocate Reply Descriptors */
435 - con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x\n",
436 - sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
439 + con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
440 + (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
437 441
438 - // reply queue size should be multiple of 16
442 + /* reply queue size should be multiple of 16 */
439 443 max_reply_q_sz = ((instance->max_fw_cmds + 1 + 15)/16)*16;
440 444
441 445 reply_q_sz = 8 * max_reply_q_sz;
442 446
443 447
444 - con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x\n",
445 - sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
448 + con_log(CL_ANN1, (CE_NOTE, " reply q desc len = %x",
449 + (uint_t)sizeof (MPI2_REPLY_DESCRIPTORS_UNION)));
446 450
447 451 instance->reply_desc_dma_obj.size = reply_q_sz;
448 452 instance->reply_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
449 453 instance->reply_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
450 454 instance->reply_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
451 455 instance->reply_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
452 456 instance->reply_desc_dma_obj.dma_attr.dma_attr_align = 16;
453 457
454 458 if (mrsas_alloc_dma_obj(instance, &instance->reply_desc_dma_obj,
455 - (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
459 + (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
456 460 cmn_err(CE_WARN,
457 461 "mr_sas: could not alloc reply queue");
458 462 return (DDI_FAILURE);
459 463 }
460 464
461 465 bzero(instance->reply_desc_dma_obj.buffer, reply_q_sz);
462 466 instance->reply_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
463 467
464 - // virtual address of reply queue
468 + /* virtual address of reply queue */
465 469 instance->reply_frame_pool = (MPI2_REPLY_DESCRIPTORS_UNION *)(
466 470 instance->reply_desc_dma_obj.buffer);
467 471
468 472 instance->reply_q_depth = max_reply_q_sz;
469 473
470 474 con_log(CL_ANN1, (CE_NOTE, "[reply queue depth]0x%x",
471 475 instance->reply_q_depth));
472 476
473 - con_log(CL_ANN1, (CE_NOTE, "[reply queue virt addr]0x%x",
474 - instance->reply_frame_pool));
477 + con_log(CL_ANN1, (CE_NOTE, "[reply queue virt addr]0x%p",
478 + (void *)instance->reply_frame_pool));
475 479
476 480 /* initializing reply address to 0xFFFFFFFF */
477 481 reply_desc = instance->reply_frame_pool;
478 482
479 483 for (i = 0; i < instance->reply_q_depth; i++) {
480 484 reply_desc->Words = (uint64_t)~0;
481 485 reply_desc++;
482 486 }
483 487
484 488
485 489 instance->reply_frame_pool_phy =
486 490 (uint32_t)instance->reply_desc_dma_obj.dma_cookie[0].dmac_address;
487 491
488 492 con_log(CL_ANN1, (CE_NOTE,
|
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
489 493 "[reply queue phys addr]0x%x", instance->reply_frame_pool_phy));
490 494
491 495
492 496 instance->reply_pool_limit_phy = (instance->reply_frame_pool_phy +
493 497 reply_q_sz);
494 498
495 499 con_log(CL_ANN1, (CE_NOTE, "[reply pool limit phys addr]0x%x",
496 500 instance->reply_pool_limit_phy));
497 501
498 502
499 - con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x\n",
500 - sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
503 + con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
504 + (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
501 505
502 506 /* Allocate Request Descriptors */
503 - con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x\n",
504 - sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
507 + con_log(CL_ANN1, (CE_NOTE, " request q desc len = %x",
508 + (int)sizeof (MRSAS_REQUEST_DESCRIPTOR_UNION)));
505 509
506 510 request_q_sz = 8 *
507 511 (instance->max_fw_cmds);
508 512
509 513 instance->request_desc_dma_obj.size = request_q_sz;
510 514 instance->request_desc_dma_obj.dma_attr = mrsas_generic_dma_attr;
511 515 instance->request_desc_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
512 - instance->request_desc_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
516 + instance->request_desc_dma_obj.dma_attr.dma_attr_count_max =
517 + 0xFFFFFFFFU;
513 518 instance->request_desc_dma_obj.dma_attr.dma_attr_sgllen = 1;
514 519 instance->request_desc_dma_obj.dma_attr.dma_attr_align = 16;
515 520
516 521 if (mrsas_alloc_dma_obj(instance, &instance->request_desc_dma_obj,
517 522 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
518 523 cmn_err(CE_WARN,
519 524 "mr_sas: could not alloc request queue desc");
520 525 goto fail_undo_reply_queue;
521 526 }
522 527
523 528 bzero(instance->request_desc_dma_obj.buffer, request_q_sz);
524 529 instance->request_desc_dma_obj.status |= DMA_OBJ_ALLOCATED;
525 530
526 531 /* virtual address of request queue desc */
527 532 instance->request_message_pool = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
528 533 (instance->request_desc_dma_obj.buffer);
529 534
530 535 instance->request_message_pool_phy =
531 536 (uint32_t)instance->request_desc_dma_obj.dma_cookie[0].dmac_address;
532 537
533 - max_request_q_sz = instance->max_fw_cmds;
534 -
535 538 return (DDI_SUCCESS);
536 539
537 540 fail_undo_reply_queue:
538 541 if (instance->reply_desc_dma_obj.status == DMA_OBJ_ALLOCATED) {
539 542 (void) mrsas_free_dma_obj(instance,
540 543 instance->reply_desc_dma_obj);
541 544 instance->reply_desc_dma_obj.status = DMA_OBJ_FREED;
542 545 }
543 546
544 547 return (DDI_FAILURE);
545 548 }
546 549
547 550 /*
548 551 * mrsas_alloc_cmd_pool_tbolt
549 - * TODO: merge tbolt-specific codee into mrsas_alloc_cmd_pool() to have single routine
552 + *
553 + * TODO: merge tbolt-specific code into mrsas_alloc_cmd_pool() to have single
554 + * routine
550 555 */
551 556 int
552 557 mrsas_alloc_cmd_pool_tbolt(struct mrsas_instance *instance)
553 558 {
554 559 int i;
555 560 int count;
556 561 uint32_t max_cmd;
557 562 uint32_t reserve_cmd;
558 563 size_t sz;
559 564
560 565 struct mrsas_cmd *cmd;
561 566
562 567 max_cmd = instance->max_fw_cmds;
563 568 con_log(CL_ANN1, (CE_NOTE, "mrsas_alloc_cmd_pool: "
564 569 "max_cmd %x", max_cmd));
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
565 570
566 571
567 572 sz = sizeof (struct mrsas_cmd *) * max_cmd;
568 573
569 574 /*
570 575 * instance->cmd_list is an array of struct mrsas_cmd pointers.
571 576 * Allocate the dynamic array first and then allocate individual
572 577 * commands.
573 578 */
574 579 instance->cmd_list = kmem_zalloc(sz, KM_SLEEP);
575 - if (instance->cmd_list == NULL) {
576 - con_log(CL_NONE, (CE_WARN,
577 - "Failed to allocate memory for cmd_list"));
578 - return (DDI_FAILURE);
579 - }
580 580
581 581 /* create a frame pool and assign one frame to each cmd */
582 582 for (count = 0; count < max_cmd; count++) {
583 - instance->cmd_list[count] = kmem_zalloc(sizeof (struct mrsas_cmd),
584 - KM_SLEEP);
585 - if (instance->cmd_list[count] == NULL) {
586 - con_log(CL_NONE, (CE_WARN,
587 - "Failed to allocate memory for mrsas_cmd"));
588 - goto mrsas_undo_cmds;
589 - }
583 + instance->cmd_list[count] =
584 + kmem_zalloc(sizeof (struct mrsas_cmd), KM_SLEEP);
590 585 }
591 586
592 587 /* add all the commands to command pool */
593 588
594 589 INIT_LIST_HEAD(&instance->cmd_pool_list);
595 590 INIT_LIST_HEAD(&instance->cmd_pend_list);
596 591 INIT_LIST_HEAD(&instance->cmd_app_pool_list);
597 592
598 - reserve_cmd = MRSAS_APP_RESERVED_CMDS;
593 + reserve_cmd = MRSAS_APP_RESERVED_CMDS;
599 594
600 - for (i = 1; i < reserve_cmd; i++) { //cmd index 0 reservered for IOC INIT
595 + /* cmd index 0 reservered for IOC INIT */
596 + for (i = 1; i < reserve_cmd; i++) {
601 597 cmd = instance->cmd_list[i];
602 598 cmd->index = i;
603 599 mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
604 600 }
605 -
606 601
602 +
607 603 for (i = reserve_cmd; i < max_cmd; i++) {
608 604 cmd = instance->cmd_list[i];
609 605 cmd->index = i;
610 606 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
611 607 }
612 608
613 609 return (DDI_SUCCESS);
614 610
615 611 mrsas_undo_cmds:
616 612 if (count > 0) {
617 613 /* free each cmd */
618 614 for (i = 0; i < count; i++) {
619 - if (instance->cmd_list[i] != NULL)
620 - kmem_free(instance->cmd_list[i],sizeof (struct mrsas_cmd));
615 + if (instance->cmd_list[i] != NULL) {
616 + kmem_free(instance->cmd_list[i],
617 + sizeof (struct mrsas_cmd));
618 + }
621 619 instance->cmd_list[i] = NULL;
622 620 }
623 621 }
624 622
625 623 mrsas_undo_cmd_list:
626 624 if (instance->cmd_list != NULL)
627 - kmem_free(instance->cmd_list,sz);
625 + kmem_free(instance->cmd_list, sz);
628 626 instance->cmd_list = NULL;
629 627
630 628 return (DDI_FAILURE);
631 629 }
632 630
633 631
634 632 /*
635 633 * free_space_for_mpi2
636 634 */
637 635 void
638 636 free_space_for_mpi2(struct mrsas_instance *instance)
639 637 {
640 638 /* already freed */
641 639 if (instance->cmd_list == NULL) {
642 640 return;
643 641 }
644 642
645 643 /* First free the additional DMA buffer */
646 644 mrsas_tbolt_free_additional_dma_buffer(instance);
647 645
648 646 /* Free the request/reply descriptor pool */
649 647 free_req_rep_desc_pool(instance);
650 648
651 649 /* Free the MPI message pool */
652 650 destroy_mpi2_frame_pool(instance);
653 651
654 652 /* Free the MFI frame pool */
655 653 destroy_mfi_frame_pool(instance);
656 654
657 655 /* Free all the commands in the cmd_list */
658 656 /* Free the cmd_list buffer itself */
|
↓ open down ↓ |
21 lines elided |
↑ open up ↑ |
659 657 mrsas_free_cmd_pool(instance);
660 658 }
661 659
662 660
663 661 /*
664 662 * ThunderBolt(TB) memory allocations for commands/messages/frames.
665 663 */
666 664 int
667 665 alloc_space_for_mpi2(struct mrsas_instance *instance)
668 666 {
669 - /* Allocate command pool ( memory for cmd_list & individual commands )*/
667 + /* Allocate command pool (memory for cmd_list & individual commands) */
670 668 if (mrsas_alloc_cmd_pool_tbolt(instance)) {
671 669 cmn_err(CE_WARN, "Error creating cmd pool");
672 - return (DDI_FAILURE);
670 + return (DDI_FAILURE);
673 671 }
674 672
675 673 /* Initialize single reply size and Message size */
676 674 instance->reply_size = MRSAS_THUNDERBOLT_REPLY_SIZE;
677 675 instance->raid_io_msg_size = MRSAS_THUNDERBOLT_MSG_SIZE;
678 676
679 677 instance->max_sge_in_main_msg = (MRSAS_THUNDERBOLT_MSG_SIZE -
680 678 (sizeof (MPI2_RAID_SCSI_IO_REQUEST) -
681 679 sizeof (MPI2_SGE_IO_UNION)))/ sizeof (MPI2_SGE_IO_UNION);
682 680 instance->max_sge_in_chain = (MR_COMMAND_SIZE -
683 681 MRSAS_THUNDERBOLT_MSG_SIZE) / sizeof (MPI2_SGE_IO_UNION);
684 682
685 683 /* Reduce SG count by 1 to take care of group cmds feature in FW */
686 684 instance->max_num_sge = (instance->max_sge_in_main_msg +
687 685 instance->max_sge_in_chain - 2);
688 686 instance->chain_offset_mpt_msg =
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
689 687 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16;
690 688 instance->chain_offset_io_req = (MRSAS_THUNDERBOLT_MSG_SIZE -
691 689 sizeof (MPI2_SGE_IO_UNION)) / 16;
692 690 instance->reply_read_index = 0;
693 691
694 692
695 693 /* Allocate Request and Reply descriptors Array */
696 694 /* Make sure the buffer is aligned to 8 for req/rep descriptor Pool */
697 695 if (alloc_req_rep_desc(instance)) {
698 696 cmn_err(CE_WARN,
699 - "Error, allocating memory for descripter-pool");
697 + "Error, allocating memory for descripter-pool");
700 698 goto mpi2_undo_cmd_pool;
701 699 }
702 700 con_log(CL_ANN1, (CE_NOTE, "[request message pool phys addr]0x%x",
703 701 instance->request_message_pool_phy));
704 702
705 703
706 704 /* Allocate MFI Frame pool - for MPI-MFI passthru commands */
707 705 if (create_mfi_frame_pool(instance)) {
708 706 cmn_err(CE_WARN,
709 - "Error, allocating memory for MFI frame-pool");
707 + "Error, allocating memory for MFI frame-pool");
710 708 goto mpi2_undo_descripter_pool;
711 709 }
712 710
713 711
714 712 /* Allocate MPI2 Message pool */
715 713 /*
716 714 * Make sure the buffer is alligned to 256 for raid message packet
717 715 * create a io request pool and assign one frame to each cmd
718 716 */
719 717
720 718 if (create_mpi2_frame_pool(instance)) {
721 719 cmn_err(CE_WARN,
722 - "Error, allocating memory for MPI2 Message-pool");
720 + "Error, allocating memory for MPI2 Message-pool");
723 721 goto mpi2_undo_mfi_frame_pool;
724 722 }
725 723
726 724 #ifdef DEBUG
727 725 con_log(CL_ANN1, (CE_CONT, "[max_sge_in_main_msg]0x%x",
728 726 instance->max_sge_in_main_msg));
729 727 con_log(CL_ANN1, (CE_CONT, "[max_sge_in_chain]0x%x",
730 728 instance->max_sge_in_chain));
731 729 con_log(CL_ANN1, (CE_CONT,
732 730 "[max_sge]0x%x", instance->max_num_sge));
733 731 con_log(CL_ANN1, (CE_CONT, "[chain_offset_mpt_msg]0x%x",
734 732 instance->chain_offset_mpt_msg));
735 733 con_log(CL_ANN1, (CE_CONT, "[chain_offset_io_req]0x%x",
736 734 instance->chain_offset_io_req));
737 735 #endif
738 736
739 737
740 738 /* Allocate additional dma buffer */
741 739 if (mrsas_tbolt_alloc_additional_dma_buffer(instance)) {
742 740 cmn_err(CE_WARN,
743 - "Error, allocating tbolt additional DMA buffer");
741 + "Error, allocating tbolt additional DMA buffer");
744 742 goto mpi2_undo_message_pool;
745 743 }
746 744
747 745 return (DDI_SUCCESS);
748 746
749 747 mpi2_undo_message_pool:
750 748 destroy_mpi2_frame_pool(instance);
751 749
752 750 mpi2_undo_mfi_frame_pool:
753 751 destroy_mfi_frame_pool(instance);
754 752
755 753 mpi2_undo_descripter_pool:
756 754 free_req_rep_desc_pool(instance);
757 755
758 756 mpi2_undo_cmd_pool:
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
759 757 mrsas_free_cmd_pool(instance);
760 758
761 759 return (DDI_FAILURE);
762 760 }
763 761
764 762
765 763 /*
766 764 * mrsas_init_adapter_tbolt - Initialize fusion interface adapter.
767 765 */
768 766 int
769 -mrsas_init_adapter_tbolt (struct mrsas_instance *instance)
767 +mrsas_init_adapter_tbolt(struct mrsas_instance *instance)
770 768 {
771 769
772 770 /*
773 771 * Reduce the max supported cmds by 1. This is to ensure that the
774 772 * reply_q_sz (1 more than the max cmd that driver may send)
775 773 * does not exceed max cmds that the FW can support
776 774 */
777 775
778 776 if (instance->max_fw_cmds > 1008) {
779 777 instance->max_fw_cmds = 1008;
780 778 instance->max_fw_cmds = instance->max_fw_cmds-1;
781 779 }
782 780
783 781 con_log(CL_ANN, (CE_NOTE, "mrsas_init_adapter_tbolt: "
784 - " instance->max_fw_cmds 0x%X.", instance->max_fw_cmds));
782 + " instance->max_fw_cmds 0x%X.", instance->max_fw_cmds));
785 783
786 784
787 785 /* create a pool of commands */
788 - if ( alloc_space_for_mpi2(instance) != DDI_SUCCESS) {
786 + if (alloc_space_for_mpi2(instance) != DDI_SUCCESS) {
789 787 cmn_err(CE_WARN,
790 788 " alloc_space_for_mpi2() failed.");
791 789
792 790 return (DDI_FAILURE);
793 791 }
794 792
795 793 /* Send ioc init message */
796 - if ( mrsas_issue_init_mpi2(instance) != DDI_SUCCESS) {
794 + /* NOTE: the issue_init call does FMA checking already. */
795 + if (mrsas_issue_init_mpi2(instance) != DDI_SUCCESS) {
797 796 cmn_err(CE_WARN,
798 797 " mrsas_issue_init_mpi2() failed.");
799 798
800 799 goto fail_init_fusion;
801 800 }
802 801
803 802 instance->unroll.alloc_space_mpi2 = 1;
804 803
805 804 con_log(CL_ANN, (CE_NOTE,
806 - "mrsas_init_adapter_tbolt: SUCCESSFULL\n"));
805 + "mrsas_init_adapter_tbolt: SUCCESSFUL"));
807 806
808 807 return (DDI_SUCCESS);
809 808
810 809 fail_init_fusion:
811 -
812 -fail_undo_alloc_mpi2:
813 810 free_space_for_mpi2(instance);
814 811
815 812 return (DDI_FAILURE);
816 813 }
817 814
818 815
819 816
820 817 /*
821 818 * init_mpi2
822 819 */
823 820 int
824 821 mrsas_issue_init_mpi2(struct mrsas_instance *instance)
825 822 {
826 823 dma_obj_t init2_dma_obj;
827 824 int ret_val = DDI_SUCCESS;
828 825
829 826 /* allocate DMA buffer for IOC INIT message */
830 827 init2_dma_obj.size = sizeof (Mpi2IOCInitRequest_t);
831 828 init2_dma_obj.dma_attr = mrsas_generic_dma_attr;
832 829 init2_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
|
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
833 830 init2_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
834 831 init2_dma_obj.dma_attr.dma_attr_sgllen = 1;
835 832 init2_dma_obj.dma_attr.dma_attr_align = 256;
836 833
837 834 if (mrsas_alloc_dma_obj(instance, &init2_dma_obj,
838 835 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
839 836 cmn_err(CE_WARN, "mr_sas_issue_init_mpi2 "
840 837 "could not allocate data transfer buffer.");
841 838 return (DDI_FAILURE);
842 839 }
843 - (void) memset(init2_dma_obj.buffer, 2,
844 - sizeof (Mpi2IOCInitRequest_t));
840 + (void) memset(init2_dma_obj.buffer, 2, sizeof (Mpi2IOCInitRequest_t));
845 841
846 842 con_log(CL_ANN1, (CE_NOTE,
847 - "mrsas_issue_init_mpi2 _phys adr: %x \n",
843 + "mrsas_issue_init_mpi2 _phys adr: %x",
848 844 init2_dma_obj.dma_cookie[0].dmac_address));
849 845
850 846
851 847 /* Initialize and send ioc init message */
852 - ret_val = mrsas_tbolt_ioc_init(instance, &init2_dma_obj,
853 - init2_dma_obj.acc_handle);
848 + ret_val = mrsas_tbolt_ioc_init(instance, &init2_dma_obj);
854 849 if (ret_val == DDI_FAILURE) {
855 850 con_log(CL_ANN1, (CE_WARN,
856 - "mrsas_issue_init_mpi2: Failed\n"));
851 + "mrsas_issue_init_mpi2: Failed"));
857 852 goto fail_init_mpi2;
858 853 }
859 854
860 855 /* free IOC init DMA buffer */
861 856 if (mrsas_free_dma_obj(instance, init2_dma_obj)
862 857 != DDI_SUCCESS) {
863 858 con_log(CL_ANN1, (CE_WARN,
864 - "mrsas_issue_init_mpi2: Free Failed\n"));
859 + "mrsas_issue_init_mpi2: Free Failed"));
865 860 return (DDI_FAILURE);
866 861 }
867 862
868 -
869 863 /* Get/Check and sync ld_map info */
870 864 instance->map_id = 0;
871 - if( mrsas_tbolt_check_map_info(instance) == DDI_SUCCESS )
872 - mrsas_tbolt_sync_map_info(instance);
873 -
865 + if (mrsas_tbolt_check_map_info(instance) == DDI_SUCCESS)
866 + (void) mrsas_tbolt_sync_map_info(instance);
867 +
868 +
869 + /* No mrsas_cmd to send, so send NULL. */
870 + if (mrsas_common_check(instance, NULL) != DDI_SUCCESS)
871 + goto fail_init_mpi2;
872 +
874 873 con_log(CL_ANN, (CE_NOTE,
875 - "mrsas_issue_init_mpi2: SUCCESSFULL\n"));
874 + "mrsas_issue_init_mpi2: SUCCESSFUL"));
876 875
877 876 return (DDI_SUCCESS);
878 877
879 878 fail_init_mpi2:
880 - mrsas_free_dma_obj(instance, init2_dma_obj);
879 + (void) mrsas_free_dma_obj(instance, init2_dma_obj);
881 880
882 881 return (DDI_FAILURE);
883 882 }
884 883
885 -int
886 -mrsas_tbolt_ioc_init(struct mrsas_instance *instance, dma_obj_t *mpi2_dma_obj,
887 - ddi_acc_handle_t accessp)
884 +static int
885 +mrsas_tbolt_ioc_init(struct mrsas_instance *instance, dma_obj_t *mpi2_dma_obj)
888 886 {
889 - int numbytes, i;
890 - int ret = DDI_SUCCESS;
887 + int numbytes;
891 888 uint16_t flags;
892 - int status;
893 - timespec_t time;
894 - uint64_t mSec;
895 - uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
896 889 struct mrsas_init_frame2 *mfiFrameInit2;
897 890 struct mrsas_header *frame_hdr;
898 891 Mpi2IOCInitRequest_t *init;
899 892 struct mrsas_cmd *cmd = NULL;
900 893 struct mrsas_drv_ver drv_ver_info;
901 894 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc;
902 895
903 -
904 896 con_log(CL_ANN, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
905 897
906 898
907 899 #ifdef DEBUG
908 900 con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
909 - sizeof (*mfiFrameInit2)));
910 - con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n", sizeof (*init)));
901 + (int)sizeof (*mfiFrameInit2)));
902 + con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n", (int)sizeof (*init)));
911 903 con_log(CL_ANN1, (CE_CONT, " mfiFrameInit2 len = %x\n",
912 - sizeof (struct mrsas_init_frame2)));
904 + (int)sizeof (struct mrsas_init_frame2)));
913 905 con_log(CL_ANN1, (CE_CONT, " MPI len = %x\n",
914 - sizeof (Mpi2IOCInitRequest_t)));
906 + (int)sizeof (Mpi2IOCInitRequest_t)));
915 907 #endif
916 908
917 909 init = (Mpi2IOCInitRequest_t *)mpi2_dma_obj->buffer;
918 910 numbytes = sizeof (*init);
919 911 bzero(init, numbytes);
920 912
921 913 ddi_put8(mpi2_dma_obj->acc_handle, &init->Function,
922 914 MPI2_FUNCTION_IOC_INIT);
923 915
924 916 ddi_put8(mpi2_dma_obj->acc_handle, &init->WhoInit,
925 917 MPI2_WHOINIT_HOST_DRIVER);
926 918
927 919 /* set MsgVersion and HeaderVersion host driver was built with */
928 920 ddi_put16(mpi2_dma_obj->acc_handle, &init->MsgVersion,
929 921 MPI2_VERSION);
930 922
931 923 ddi_put16(mpi2_dma_obj->acc_handle, &init->HeaderVersion,
932 924 MPI2_HEADER_VERSION);
933 925
934 926 ddi_put16(mpi2_dma_obj->acc_handle, &init->SystemRequestFrameSize,
935 927 instance->raid_io_msg_size / 4);
936 928
937 929 ddi_put16(mpi2_dma_obj->acc_handle, &init->ReplyFreeQueueDepth,
938 930 0);
939 931
940 932 ddi_put16(mpi2_dma_obj->acc_handle,
941 933 &init->ReplyDescriptorPostQueueDepth,
942 934 instance->reply_q_depth);
943 935 /*
944 936 * These addresses are set using the DMA cookie addresses from when the
945 937 * memory was allocated. Sense buffer hi address should be 0.
946 938 * ddi_put32(accessp, &init->SenseBufferAddressHigh, 0);
947 939 */
948 940
949 941 ddi_put32(mpi2_dma_obj->acc_handle,
950 942 &init->SenseBufferAddressHigh, 0);
951 943
952 944 ddi_put64(mpi2_dma_obj->acc_handle,
953 945 (uint64_t *)&init->SystemRequestFrameBaseAddress,
954 946 instance->io_request_frames_phy);
955 947
956 948 ddi_put64(mpi2_dma_obj->acc_handle,
957 949 &init->ReplyDescriptorPostQueueAddress,
958 950 instance->reply_frame_pool_phy);
959 951
960 952 ddi_put64(mpi2_dma_obj->acc_handle,
961 953 &init->ReplyFreeQueueAddress, 0);
|
↓ open down ↓ |
37 lines elided |
↑ open up ↑ |
962 954
963 955 cmd = instance->cmd_list[0];
964 956 if (cmd == NULL) {
965 957 return (DDI_FAILURE);
966 958 }
967 959 cmd->retry_count_for_ocr = 0;
968 960 cmd->pkt = NULL;
969 961 cmd->drv_pkt_time = 0;
970 962
971 963 mfiFrameInit2 = (struct mrsas_init_frame2 *)cmd->scsi_io_request;
972 - con_log(CL_ANN1, (CE_CONT, "[mfi vaddr]%x", mfiFrameInit2));
964 + con_log(CL_ANN1, (CE_CONT, "[mfi vaddr]%p", (void *)mfiFrameInit2));
973 965
974 966 frame_hdr = &cmd->frame->hdr;
975 967
976 968 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
977 969 MFI_CMD_STATUS_POLL_MODE);
978 970
979 971 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
980 972
981 - flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
973 + flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
982 974
983 975 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
984 976
985 977 con_log(CL_ANN, (CE_CONT,
986 978 "mrsas_tbolt_ioc_init: SMID:%x\n", cmd->SMID));
987 979
988 - // Init the MFI Header
980 + /* Init the MFI Header */
989 981 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
990 982 &mfiFrameInit2->cmd, MFI_CMD_OP_INIT);
991 983
992 984 con_log(CL_ANN1, (CE_CONT, "[CMD]%x", mfiFrameInit2->cmd));
993 985
994 986 ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
995 987 &mfiFrameInit2->cmd_status,
996 988 MFI_STAT_INVALID_STATUS);
997 989
998 990 con_log(CL_ANN1, (CE_CONT, "[Status]%x", mfiFrameInit2->cmd_status));
999 991
1000 992 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1001 993 &mfiFrameInit2->queue_info_new_phys_addr_lo,
1002 994 mpi2_dma_obj->dma_cookie[0].dmac_address);
1003 995
1004 996 ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1005 997 &mfiFrameInit2->data_xfer_len,
1006 998 sizeof (Mpi2IOCInitRequest_t));
1007 999
1008 1000 con_log(CL_ANN1, (CE_CONT, "[reply q desc addr]%x",
1009 - init->ReplyDescriptorPostQueueAddress));
1001 + (int)init->ReplyDescriptorPostQueueAddress));
1010 1002
1011 - /* fill driver version information*/
1003 + /* fill driver version information */
1012 1004 fill_up_drv_ver(&drv_ver_info);
1013 -
1005 +
1014 1006 /* allocate the driver version data transfer buffer */
1015 - instance->drv_ver_dma_obj.size = sizeof(drv_ver_info.drv_ver);
1007 + instance->drv_ver_dma_obj.size = sizeof (drv_ver_info.drv_ver);
1016 1008 instance->drv_ver_dma_obj.dma_attr = mrsas_generic_dma_attr;
1017 1009 instance->drv_ver_dma_obj.dma_attr.dma_attr_addr_hi = 0xFFFFFFFFU;
1018 1010 instance->drv_ver_dma_obj.dma_attr.dma_attr_count_max = 0xFFFFFFFFU;
1019 1011 instance->drv_ver_dma_obj.dma_attr.dma_attr_sgllen = 1;
1020 1012 instance->drv_ver_dma_obj.dma_attr.dma_attr_align = 1;
1021 1013
1022 1014 if (mrsas_alloc_dma_obj(instance, &instance->drv_ver_dma_obj,
1023 1015 (uchar_t)DDI_STRUCTURE_LE_ACC) != 1) {
1024 1016 cmn_err(CE_WARN,
1025 1017 "fusion init: Could not allocate driver version buffer.");
1026 1018 return (DDI_FAILURE);
1027 1019 }
1028 - /* copy driver version to dma buffer*/
1029 - (void) memset(instance->drv_ver_dma_obj.buffer, 0,sizeof(drv_ver_info.drv_ver));
1020 + /* copy driver version to dma buffer */
1021 + bzero(instance->drv_ver_dma_obj.buffer, sizeof (drv_ver_info.drv_ver));
1030 1022 ddi_rep_put8(cmd->frame_dma_obj.acc_handle,
1031 - (uint8_t *)drv_ver_info.drv_ver,
1032 - (uint8_t *)instance->drv_ver_dma_obj.buffer,
1033 - sizeof(drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
1023 + (uint8_t *)drv_ver_info.drv_ver,
1024 + (uint8_t *)instance->drv_ver_dma_obj.buffer,
1025 + sizeof (drv_ver_info.drv_ver), DDI_DEV_AUTOINCR);
1034 1026
1035 - /*send driver version physical address to firmware*/
1036 - ddi_put64(cmd->frame_dma_obj.acc_handle,
1037 - &mfiFrameInit2->driverversion, instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
1027 + /* send driver version physical address to firmware */
1028 + ddi_put64(cmd->frame_dma_obj.acc_handle, &mfiFrameInit2->driverversion,
1029 + instance->drv_ver_dma_obj.dma_cookie[0].dmac_address);
1038 1030
1039 1031 con_log(CL_ANN1, (CE_CONT, "[MPIINIT2 frame Phys addr ]0x%x len = %x",
1040 1032 mfiFrameInit2->queue_info_new_phys_addr_lo,
1041 - sizeof (Mpi2IOCInitRequest_t)));
1033 + (int)sizeof (Mpi2IOCInitRequest_t)));
1042 1034
1043 1035 con_log(CL_ANN1, (CE_CONT, "[Length]%x", mfiFrameInit2->data_xfer_len));
1044 1036
1045 1037 con_log(CL_ANN1, (CE_CONT, "[MFI frame Phys Address]%x len = %x",
1046 - cmd->scsi_io_request_phys_addr, sizeof (struct mrsas_init_frame2)));
1038 + cmd->scsi_io_request_phys_addr,
1039 + (int)sizeof (struct mrsas_init_frame2)));
1047 1040
1048 1041 /* disable interrupts before sending INIT2 frame */
1049 1042 instance->func_ptr->disable_intr(instance);
1050 1043
1051 1044 req_desc = (MRSAS_REQUEST_DESCRIPTOR_UNION *)
1052 1045 instance->request_message_pool;
1053 1046 req_desc->Words = cmd->scsi_io_request_phys_addr;
1054 1047 req_desc->MFAIo.RequestFlags =
1055 1048 (MPI2_REQ_DESCRIPT_FLAGS_MFA << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1056 1049
1057 1050 cmd->request_desc = req_desc;
1058 1051
|
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
1059 1052 /* issue the init frame */
1060 1053 instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd);
1061 1054
1062 1055 con_log(CL_ANN1, (CE_CONT, "[cmd = %d] ", frame_hdr->cmd));
1063 1056 con_log(CL_ANN1, (CE_CONT, "[cmd Status= %x] ",
1064 1057 frame_hdr->cmd_status));
1065 1058
1066 1059 if (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1067 1060 &mfiFrameInit2->cmd_status) == 0) {
1068 1061 con_log(CL_ANN, (CE_NOTE, "INIT2 Success"));
1069 - ret = DDI_SUCCESS;
1070 1062 } else {
1071 1063 con_log(CL_ANN, (CE_WARN, "INIT2 Fail"));
1072 1064 mrsas_dump_reply_desc(instance);
1073 1065 goto fail_ioc_init;
1074 1066 }
1075 1067
1076 1068 mrsas_dump_reply_desc(instance);
1077 1069
1078 1070 instance->unroll.verBuff = 1;
1079 1071
1080 - con_log(CL_ANN, (CE_NOTE,
1081 - "mrsas_tbolt_ioc_init: SUCCESSFULL\n"));
1072 + con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_ioc_init: SUCCESSFUL"));
1082 1073
1083 -
1084 1074 return (DDI_SUCCESS);
1085 1075
1086 1076
1087 1077 fail_ioc_init:
1088 1078
1089 - mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1079 + (void) mrsas_free_dma_obj(instance, instance->drv_ver_dma_obj);
1090 1080
1091 1081 return (DDI_FAILURE);
1092 1082 }
1093 1083
1094 -int wait_for_outstanding_poll_io(struct mrsas_instance *instance)
1084 +int
1085 +wait_for_outstanding_poll_io(struct mrsas_instance *instance)
1095 1086 {
1096 1087 int i;
1097 1088 uint32_t wait_time = dump_io_wait_time;
1098 1089 for (i = 0; i < wait_time; i++) {
1099 1090 /*
1100 1091 * Check For Outstanding poll Commands
1101 1092 * except ldsync command and aen command
1102 1093 */
1103 1094 if (instance->fw_outstanding <= 2) {
1104 1095 break;
1105 1096 }
1106 1097 drv_usecwait(10*MILLISEC);
1107 1098 /* complete commands from reply queue */
1108 1099 (void) mr_sas_tbolt_process_outstanding_cmd(instance);
1109 1100 }
1110 1101 if (instance->fw_outstanding > 2) {
1111 1102 return (1);
1112 1103 }
1113 1104 return (0);
1114 1105 }
1115 1106 /*
1116 1107 * scsi_pkt handling
|
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
1117 1108 *
1118 1109 * Visible to the external world via the transport structure.
1119 1110 */
1120 1111
1121 1112 int
1122 1113 mrsas_tbolt_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1123 1114 {
1124 1115 struct mrsas_instance *instance = ADDR2MR(ap);
1125 1116 struct scsa_cmd *acmd = PKT2CMD(pkt);
1126 1117 struct mrsas_cmd *cmd = NULL;
1127 - int rval, i;
1128 - uchar_t cmd_done = 0;
1129 - Mpi2RaidSCSIIORequest_t *scsi_raid_io;
1130 - uint32_t msecs = 120 * MILLISEC;
1118 + uchar_t cmd_done = 0;
1131 1119
1132 1120 con_log(CL_DLEVEL1, (CE_NOTE, "chkpnt:%s:%d", __func__, __LINE__));
1133 1121 if (instance->deadadapter == 1) {
1134 1122 cmn_err(CE_WARN,
1135 1123 "mrsas_tran_start:TBOLT return TRAN_FATAL_ERROR "
1136 1124 "for IO, as the HBA doesnt take any more IOs");
1137 1125 if (pkt) {
1138 1126 pkt->pkt_reason = CMD_DEV_GONE;
1139 1127 pkt->pkt_statistics = STAT_DISCON;
1140 1128 }
1141 1129 return (TRAN_FATAL_ERROR);
1142 1130 }
1143 1131 if (instance->adapterresetinprogress) {
1144 1132 con_log(CL_ANN, (CE_NOTE, "Reset flag set, "
1145 1133 "returning mfi_pkt and setting TRAN_BUSY\n"));
1146 1134 return (TRAN_BUSY);
1147 1135 }
1148 - rval = mrsas_tbolt_prepare_pkt(acmd);
1136 + (void) mrsas_tbolt_prepare_pkt(acmd);
1149 1137
1150 1138 cmd = mrsas_tbolt_build_cmd(instance, ap, pkt, &cmd_done);
1151 1139
1152 1140 /*
1153 1141 * Check if the command is already completed by the mrsas_build_cmd()
1154 1142 * routine. In which case the busy_flag would be clear and scb will be
1155 1143 * NULL and appropriate reason provided in pkt_reason field
1156 1144 */
1157 1145 if (cmd_done) {
1158 1146 pkt->pkt_reason = CMD_CMPLT;
1159 1147 pkt->pkt_scbp[0] = STATUS_GOOD;
1160 1148 pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET
1161 1149 | STATE_SENT_CMD;
1162 1150 if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp) {
1163 1151 (*pkt->pkt_comp)(pkt);
1164 1152 }
1165 1153
1166 1154 return (TRAN_ACCEPT);
|
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
1167 1155 }
1168 1156
1169 1157 if (cmd == NULL) {
1170 1158 return (TRAN_BUSY);
1171 1159 }
1172 1160
1173 1161
1174 1162 if ((pkt->pkt_flags & FLAG_NOINTR) == 0) {
1175 1163 if (instance->fw_outstanding > instance->max_fw_cmds) {
1176 1164 cmn_err(CE_WARN,
1177 - "Command Queue Full... Returning BUSY \n");
1165 + "Command Queue Full... Returning BUSY");
1178 1166 return_raid_msg_pkt(instance, cmd);
1179 1167 return (TRAN_BUSY);
1180 1168 }
1181 1169
1182 1170 /* Synchronize the Cmd frame for the controller */
1183 1171 (void) ddi_dma_sync(cmd->frame_dma_obj.dma_handle, 0, 0,
1184 1172 DDI_DMA_SYNC_FORDEV);
1185 1173
1186 1174 con_log(CL_ANN, (CE_CONT, "tbolt_issue_cmd: SCSI CDB[0]=0x%x "
1187 - "cmd->index:0x%x SMID %0x%x\n", pkt->pkt_cdbp[0], cmd->index, cmd->SMID));
1175 + "cmd->index:0x%x SMID 0x%x\n", pkt->pkt_cdbp[0],
1176 + cmd->index, cmd->SMID));
1188 1177
1189 1178 instance->func_ptr->issue_cmd(cmd, instance);
1190 -
1191 - return (TRAN_ACCEPT);
1192 -
1193 1179 } else {
1194 - instance->func_ptr->issue_cmd(cmd, instance);
1180 + instance->func_ptr->issue_cmd(cmd, instance);
1195 1181 (void) wait_for_outstanding_poll_io(instance);
1196 - return (TRAN_ACCEPT);
1182 + (void) mrsas_common_check(instance, cmd);
1197 1183 }
1184 +
1185 + return (TRAN_ACCEPT);
1198 1186 }
1199 1187
1200 1188 /*
1201 1189 * prepare the pkt:
1202 1190 * the pkt may have been resubmitted or just reused so
1203 1191 * initialize some fields and do some checks.
1204 1192 */
1205 -int
1193 +static int
1206 1194 mrsas_tbolt_prepare_pkt(struct scsa_cmd *acmd)
1207 1195 {
1208 1196 struct scsi_pkt *pkt = CMD2PKT(acmd);
1209 1197
1210 1198
1211 1199 /*
1212 1200 * Reinitialize some fields that need it; the packet may
1213 1201 * have been resubmitted
1214 1202 */
1215 1203 pkt->pkt_reason = CMD_CMPLT;
1216 1204 pkt->pkt_state = 0;
1217 1205 pkt->pkt_statistics = 0;
1218 1206 pkt->pkt_resid = 0;
1219 1207
1220 1208 /*
1221 1209 * zero status byte.
1222 1210 */
1223 1211 *(pkt->pkt_scbp) = 0;
1224 1212
1225 1213 return (0);
1226 1214 }
1227 1215
|
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
1228 1216
1229 1217 int
1230 1218 mr_sas_tbolt_build_sgl(struct mrsas_instance *instance,
1231 1219 struct scsa_cmd *acmd,
1232 1220 struct mrsas_cmd *cmd,
1233 1221 Mpi2RaidSCSIIORequest_t *scsi_raid_io,
1234 1222 uint32_t *datalen)
1235 1223 {
1236 1224 uint32_t MaxSGEs;
1237 1225 int sg_to_process;
1238 - uint32_t i, j, SGEdwords = 0;
1226 + uint32_t i, j;
1239 1227 uint32_t numElements, endElement;
1240 1228 Mpi25IeeeSgeChain64_t *ieeeChainElement = NULL;
1241 1229 Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee = NULL;
1242 - uint32_t SGLFlags = 0;
1230 + ddi_acc_handle_t acc_handle =
1231 + instance->mpi2_frame_pool_dma_obj.acc_handle;
1243 1232
1244 1233 con_log(CL_ANN1, (CE_NOTE,
1245 1234 "chkpnt: Building Chained SGL :%d", __LINE__));
1246 1235
1247 1236 /* Calulate SGE size in number of Words(32bit) */
1248 1237 /* Clear the datalen before updating it. */
1249 1238 *datalen = 0;
1250 1239
1251 - SGEdwords = sizeof (Mpi25IeeeSgeChain64_t) / 4;
1252 -
1253 1240 MaxSGEs = instance->max_sge_in_main_msg;
1254 1241
1255 - ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1256 - &scsi_raid_io->SGLFlags,
1242 + ddi_put16(acc_handle, &scsi_raid_io->SGLFlags,
1257 1243 MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
1258 1244
1259 - // set data transfer flag.
1245 + /* set data transfer flag. */
1260 1246 if (acmd->cmd_flags & CFLAG_DMASEND) {
1261 - ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1262 - &scsi_raid_io->Control,
1247 + ddi_put32(acc_handle, &scsi_raid_io->Control,
1263 1248 MPI2_SCSIIO_CONTROL_WRITE);
1264 1249 } else {
1265 - ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1266 - &scsi_raid_io->Control, MPI2_SCSIIO_CONTROL_READ);
1250 + ddi_put32(acc_handle, &scsi_raid_io->Control,
1251 + MPI2_SCSIIO_CONTROL_READ);
1267 1252 }
1268 1253
1269 -
1254 +
1270 1255 numElements = acmd->cmd_cookiecnt;
1271 1256
1272 1257 con_log(CL_DLEVEL1, (CE_NOTE, "[SGE Count]:%x", numElements));
1273 1258
1274 1259 if (numElements > instance->max_num_sge) {
1275 1260 con_log(CL_ANN, (CE_NOTE,
1276 1261 "[Max SGE Count Exceeded]:%x", numElements));
1277 - return (numElements);
1262 + return (numElements);
1278 1263 }
1279 1264
1280 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1281 - &scsi_raid_io->RaidContext.numSGE, (uint8_t)numElements);
1265 + ddi_put8(acc_handle, &scsi_raid_io->RaidContext.numSGE,
1266 + (uint8_t)numElements);
1282 1267
1283 1268 /* set end element in main message frame */
1284 1269 endElement = (numElements <= MaxSGEs) ? numElements : (MaxSGEs - 1);
1285 1270
1286 1271 /* prepare the scatter-gather list for the firmware */
1287 1272 scsi_raid_io_sgl_ieee =
1288 1273 (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
1289 1274
1290 1275 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1291 1276 Mpi25IeeeSgeChain64_t *sgl_ptr_end = scsi_raid_io_sgl_ieee;
1292 1277 sgl_ptr_end += instance->max_sge_in_main_msg - 1;
1293 -
1294 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1295 - &sgl_ptr_end->Flags, 0);
1278 +
1279 + ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
1296 1280 }
1297 1281
1298 1282 for (i = 0; i < endElement; i++, scsi_raid_io_sgl_ieee++) {
1299 - ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
1300 - &scsi_raid_io_sgl_ieee->Address,
1283 + ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1301 1284 acmd->cmd_dmacookies[i].dmac_laddress);
1302 1285
1303 - ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1304 - &scsi_raid_io_sgl_ieee->Length,
1286 + ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1305 1287 acmd->cmd_dmacookies[i].dmac_size);
1306 1288
1307 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1308 - &scsi_raid_io_sgl_ieee->Flags, 0);
1309 -
1289 + ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1290 +
1310 1291 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1311 - if (i == (numElements - 1))
1312 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1313 - &scsi_raid_io_sgl_ieee->Flags, IEEE_SGE_FLAGS_END_OF_LIST);
1292 + if (i == (numElements - 1)) {
1293 + ddi_put8(acc_handle,
1294 + &scsi_raid_io_sgl_ieee->Flags,
1295 + IEEE_SGE_FLAGS_END_OF_LIST);
1296 + }
1314 1297 }
1315 1298
1316 1299 *datalen += acmd->cmd_dmacookies[i].dmac_size;
1317 1300
1318 1301 #ifdef DEBUG
1319 - con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Address]:%llx",
1302 + con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Address]: %" PRIx64,
1320 1303 scsi_raid_io_sgl_ieee->Address));
1321 1304 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Length]:%x",
1322 1305 scsi_raid_io_sgl_ieee->Length));
1323 1306 con_log(CL_DLEVEL1, (CE_NOTE, "[SGL Flags]:%x",
1324 1307 scsi_raid_io_sgl_ieee->Flags));
1325 1308 #endif
1326 1309
1327 1310 }
1328 1311
1329 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1330 - &scsi_raid_io->ChainOffset, 0);
1312 + ddi_put8(acc_handle, &scsi_raid_io->ChainOffset, 0);
1331 1313
1332 1314 /* check if chained SGL required */
1333 1315 if (i < numElements) {
1334 1316
1335 1317 con_log(CL_ANN1, (CE_NOTE, "[Chain Element index]:%x", i));
1336 -
1318 +
1337 1319 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1338 - uint16_t ioFlags = ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1339 - &scsi_raid_io->IoFlags);
1320 + uint16_t ioFlags =
1321 + ddi_get16(acc_handle, &scsi_raid_io->IoFlags);
1340 1322
1341 - if ((ioFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
1342 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1343 - &scsi_raid_io->ChainOffset, (U8)instance->chain_offset_io_req);
1344 - else
1345 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1346 - &scsi_raid_io->ChainOffset, 0);
1323 + if ((ioFlags &
1324 + MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
1325 + MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) {
1326 + ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1327 + (U8)instance->chain_offset_io_req);
1328 + } else {
1329 + ddi_put8(acc_handle,
1330 + &scsi_raid_io->ChainOffset, 0);
1331 + }
1332 + } else {
1333 + ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
1334 + (U8)instance->chain_offset_io_req);
1347 1335 }
1348 - else {
1349 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1350 - &scsi_raid_io->ChainOffset, (U8)instance->chain_offset_io_req);
1351 - }
1352 1336
1353 1337 /* prepare physical chain element */
1354 - ieeeChainElement = scsi_raid_io_sgl_ieee;
1338 + ieeeChainElement = scsi_raid_io_sgl_ieee;
1355 1339
1356 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1357 - &ieeeChainElement->NextChainOffset, 0);
1340 + ddi_put8(acc_handle, &ieeeChainElement->NextChainOffset, 0);
1358 1341
1359 - if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER)
1360 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1361 - &ieeeChainElement->Flags, IEEE_SGE_FLAGS_CHAIN_ELEMENT );
1362 - else
1363 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1364 - &ieeeChainElement->Flags,
1365 - (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1366 - MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
1342 + if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1343 + ddi_put8(acc_handle, &ieeeChainElement->Flags,
1344 + IEEE_SGE_FLAGS_CHAIN_ELEMENT);
1345 + } else {
1346 + ddi_put8(acc_handle, &ieeeChainElement->Flags,
1347 + (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
1348 + MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
1349 + }
1367 1350
1368 - ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1369 - &ieeeChainElement->Length,
1351 + ddi_put32(acc_handle, &ieeeChainElement->Length,
1370 1352 (sizeof (MPI2_SGE_IO_UNION) * (numElements - i)));
1371 1353
1372 - ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
1373 - &ieeeChainElement->Address,
1354 + ddi_put64(acc_handle, &ieeeChainElement->Address,
1374 1355 (U64)cmd->sgl_phys_addr);
1375 1356
1376 1357 sg_to_process = numElements - i;
1377 1358
1378 1359 con_log(CL_ANN1, (CE_NOTE,
1379 1360 "[Additional SGE Count]:%x", endElement));
1380 1361
1381 1362 /* point to the chained SGL buffer */
1382 1363 scsi_raid_io_sgl_ieee = (Mpi25IeeeSgeChain64_t *)cmd->sgl;
1383 1364
1384 1365 /* build rest of the SGL in chained buffer */
1385 1366 for (j = 0; j < sg_to_process; j++, scsi_raid_io_sgl_ieee++) {
1386 1367 con_log(CL_DLEVEL3, (CE_NOTE, "[remaining SGL]:%x", i));
1387 1368
1388 - ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
1389 - &scsi_raid_io_sgl_ieee->Address,
1369 + ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
1390 1370 acmd->cmd_dmacookies[i].dmac_laddress);
1391 1371
1392 - ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1393 - &scsi_raid_io_sgl_ieee->Length,
1372 + ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length,
1394 1373 acmd->cmd_dmacookies[i].dmac_size);
1395 1374
1396 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1397 - &scsi_raid_io_sgl_ieee->Flags, 0);
1375 + ddi_put8(acc_handle, &scsi_raid_io_sgl_ieee->Flags, 0);
1398 1376
1399 1377 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1400 - if (i == (numElements - 1))
1401 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1402 - &scsi_raid_io_sgl_ieee->Flags, IEEE_SGE_FLAGS_END_OF_LIST);
1378 + if (i == (numElements - 1)) {
1379 + ddi_put8(acc_handle,
1380 + &scsi_raid_io_sgl_ieee->Flags,
1381 + IEEE_SGE_FLAGS_END_OF_LIST);
1382 + }
1403 1383 }
1404 1384
1405 1385 *datalen += acmd->cmd_dmacookies[i].dmac_size;
1406 1386
1407 1387 #if DEBUG
1408 1388 con_log(CL_DLEVEL1, (CE_NOTE,
1409 - "[SGL Address]:%llx",
1389 + "[SGL Address]: %" PRIx64,
1410 1390 scsi_raid_io_sgl_ieee->Address));
1411 1391 con_log(CL_DLEVEL1, (CE_NOTE,
1412 1392 "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
1413 1393 con_log(CL_DLEVEL1, (CE_NOTE,
1414 1394 "[SGL Flags]:%x", scsi_raid_io_sgl_ieee->Flags));
1415 1395 #endif
1416 1396
1417 1397 i++;
1418 1398 }
1419 1399 }
1420 1400
1421 1401 return (0);
1422 1402 } /*end of BuildScatterGather */
1423 1403
1424 1404
1425 1405 /*
1426 1406 * build_cmd
1427 1407 */
1428 -struct mrsas_cmd *
1408 +static struct mrsas_cmd *
1429 1409 mrsas_tbolt_build_cmd(struct mrsas_instance *instance, struct scsi_address *ap,
1430 1410 struct scsi_pkt *pkt, uchar_t *cmd_done)
1431 1411 {
1432 1412 uint8_t fp_possible = 0;
1433 - uint16_t flags = 0;
1434 - uint32_t i, index;
1435 - uint32_t context;
1436 - uint32_t sge_bytes;
1437 - uint8_t ChainOffsetValue;
1438 - uint32_t SGLFlags;
1439 - uint32_t lba_count=0;
1440 - uint32_t start_lba_hi=0;
1441 - uint32_t start_lba_lo=0;
1442 - ddi_acc_handle_t acc_handle;
1413 + uint32_t index;
1414 + uint32_t lba_count = 0;
1415 + uint32_t start_lba_hi = 0;
1416 + uint32_t start_lba_lo = 0;
1417 + ddi_acc_handle_t acc_handle =
1418 + instance->mpi2_frame_pool_dma_obj.acc_handle;
1443 1419 struct mrsas_cmd *cmd = NULL;
1444 1420 struct scsa_cmd *acmd = PKT2CMD(pkt);
1445 - MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
1446 - Mpi2RaidSCSIIORequest_t *scsi_raid_io;
1447 - Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee;
1421 + MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
1422 + Mpi2RaidSCSIIORequest_t *scsi_raid_io;
1448 1423 uint32_t datalen;
1449 1424 struct IO_REQUEST_INFO io_info;
1450 1425 MR_FW_RAID_MAP_ALL *local_map_ptr;
1451 - MR_LD_RAID *raid;
1452 - U32 ld;
1453 1426 uint16_t pd_cmd_cdblen;
1454 1427
1455 1428 con_log(CL_DLEVEL1, (CE_NOTE,
1456 1429 "chkpnt: Entered mrsas_tbolt_build_cmd:%d", __LINE__));
1457 1430
1458 1431 /* find out if this is logical or physical drive command. */
1459 1432 acmd->islogical = MRDRV_IS_LOGICAL(ap);
1460 1433 acmd->device_id = MAP_DEVICE_ID(instance, ap);
1461 1434
1462 1435 *cmd_done = 0;
1463 1436
1464 1437 /* get the command packet */
1465 1438 if (!(cmd = get_raid_msg_pkt(instance))) {
1466 1439 return (NULL);
1467 1440 }
1468 1441
1469 1442 index = cmd->index;
1470 - ReqDescUnion = mr_sas_get_request_descriptor(instance, index, cmd);
1443 + ReqDescUnion = mr_sas_get_request_descriptor(instance, index);
1471 1444 ReqDescUnion->Words = 0;
1472 1445 ReqDescUnion->SCSIIO.SMID = cmd->SMID;
1473 1446 ReqDescUnion->SCSIIO.RequestFlags =
1474 1447 (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1475 1448 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1476 1449
1477 1450
1478 1451 cmd->request_desc = ReqDescUnion;
1479 1452 cmd->pkt = pkt;
1480 1453 cmd->cmd = acmd;
1481 1454
1482 1455 /* lets get the command directions */
1483 1456 if (acmd->cmd_flags & CFLAG_DMASEND) {
1484 1457 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1485 1458 (void) ddi_dma_sync(acmd->cmd_dmahandle,
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
1486 1459 acmd->cmd_dma_offset, acmd->cmd_dma_len,
1487 1460 DDI_DMA_SYNC_FORDEV);
1488 1461 }
1489 1462 } else if (acmd->cmd_flags & ~CFLAG_DMASEND) {
1490 1463 if (acmd->cmd_flags & CFLAG_CONSISTENT) {
1491 1464 (void) ddi_dma_sync(acmd->cmd_dmahandle,
1492 1465 acmd->cmd_dma_offset, acmd->cmd_dma_len,
1493 1466 DDI_DMA_SYNC_FORCPU);
1494 1467 }
1495 1468 } else {
1496 - con_log(CL_ANN, (CE_NOTE, "NO DMA\n"));
1469 + con_log(CL_ANN, (CE_NOTE, "NO DMA"));
1497 1470 }
1498 1471
1499 1472
1500 - // get SCSI_IO raid message frame pointer
1473 + /* get SCSI_IO raid message frame pointer */
1501 1474 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
1502 1475
1503 1476 /* zero out SCSI_IO raid message frame */
1504 - memset(scsi_raid_io, 0, sizeof(Mpi2RaidSCSIIORequest_t));
1477 + bzero(scsi_raid_io, sizeof (Mpi2RaidSCSIIORequest_t));
1505 1478
1506 - /*Set the ldTargetId set by BuildRaidContext() */
1507 - ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1508 - &scsi_raid_io->RaidContext.ldTargetId,
1509 - acmd->device_id);
1479 + /* Set the ldTargetId set by BuildRaidContext() */
1480 + ddi_put16(acc_handle, &scsi_raid_io->RaidContext.ldTargetId,
1481 + acmd->device_id);
1510 1482
1511 1483 /* Copy CDB to scsi_io_request message frame */
1512 - ddi_rep_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1513 - (uint8_t *)pkt->pkt_cdbp,
1514 - (uint8_t *)scsi_raid_io->CDB.CDB32,
1515 - acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
1484 + ddi_rep_put8(acc_handle,
1485 + (uint8_t *)pkt->pkt_cdbp, (uint8_t *)scsi_raid_io->CDB.CDB32,
1486 + acmd->cmd_cdblen, DDI_DEV_AUTOINCR);
1516 1487
1517 - /*
1518 - * Just the CDB length,rest of the Flags are zero
1519 - * This will be modified later.
1520 - */
1521 - ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1522 - &scsi_raid_io->IoFlags,
1523 - acmd->cmd_cdblen);
1488 + /*
1489 + * Just the CDB length, rest of the Flags are zero
1490 + * This will be modified later.
1491 + */
1492 + ddi_put16(acc_handle, &scsi_raid_io->IoFlags, acmd->cmd_cdblen);
1524 1493
1525 1494 pd_cmd_cdblen = acmd->cmd_cdblen;
1526 1495
1527 1496 switch (pkt->pkt_cdbp[0]) {
1528 1497 case SCMD_READ:
1529 1498 case SCMD_WRITE:
1530 1499 case SCMD_READ_G1:
1531 1500 case SCMD_WRITE_G1:
1532 1501 case SCMD_READ_G4:
1533 1502 case SCMD_WRITE_G4:
1534 1503 case SCMD_READ_G5:
1535 1504 case SCMD_WRITE_G5:
1536 1505
1537 - if (acmd->islogical) {
1538 - /* Initialize sense Information */
1539 - if (cmd->sense1 == NULL) {
1540 - con_log(CL_ANN, (CE_NOTE,
1541 - "tbolt_build_cmd: Sense buffer ptr NULL \n"));
1542 - }
1543 - bzero(cmd->sense1, SENSE_LENGTH);
1544 - con_log(CL_DLEVEL2, (CE_NOTE,
1545 - "tbolt_build_cmd CDB[0] = %x\n", pkt->pkt_cdbp[0]));
1506 + if (acmd->islogical) {
1507 + /* Initialize sense Information */
1508 + if (cmd->sense1 == NULL) {
1509 + con_log(CL_ANN, (CE_NOTE, "tbolt_build_cmd: "
1510 + "Sense buffer ptr NULL "));
1511 + }
1512 + bzero(cmd->sense1, SENSE_LENGTH);
1513 + con_log(CL_DLEVEL2, (CE_NOTE, "tbolt_build_cmd "
1514 + "CDB[0] = %x\n", pkt->pkt_cdbp[0]));
1546 1515
1547 - if (acmd->cmd_cdblen == CDB_GROUP0) { /* 6-byte cdb */
1548 - lba_count = (uint16_t)(pkt->pkt_cdbp[4]);
1549 - start_lba_lo =
1550 - ((uint32_t)(pkt->pkt_cdbp[3]) |
1551 - ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
1552 - ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F) << 16));
1553 - } else if (acmd->cmd_cdblen == CDB_GROUP1) { /* 10-byte cdb */
1554 - lba_count =
1555 - (((uint16_t)(pkt->pkt_cdbp[8])) |
1556 - ((uint16_t)(pkt->pkt_cdbp[7]) << 8));
1516 + if (acmd->cmd_cdblen == CDB_GROUP0) {
1517 + /* 6-byte cdb */
1518 + lba_count = (uint16_t)(pkt->pkt_cdbp[4]);
1519 + start_lba_lo = ((uint32_t)(pkt->pkt_cdbp[3]) |
1520 + ((uint32_t)(pkt->pkt_cdbp[2]) << 8) |
1521 + ((uint32_t)((pkt->pkt_cdbp[1]) & 0x1F)
1522 + << 16));
1523 + } else if (acmd->cmd_cdblen == CDB_GROUP1) {
1524 + /* 10-byte cdb */
1525 + lba_count =
1526 + (((uint16_t)(pkt->pkt_cdbp[8])) |
1527 + ((uint16_t)(pkt->pkt_cdbp[7]) << 8));
1557 1528
1558 - start_lba_lo =
1559 - (((uint32_t)(pkt->pkt_cdbp[5])) |
1560 - ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1561 - ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1562 - ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1529 + start_lba_lo =
1530 + (((uint32_t)(pkt->pkt_cdbp[5])) |
1531 + ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1532 + ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1533 + ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1563 1534
1564 - } else if (acmd->cmd_cdblen == CDB_GROUP5) { /* 12-byte cdb */
1565 - lba_count = (
1566 - ((uint32_t)(pkt->pkt_cdbp[9])) |
1567 - ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1568 - ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1569 - ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1535 + } else if (acmd->cmd_cdblen == CDB_GROUP5) {
1536 + /* 12-byte cdb */
1537 + lba_count = (
1538 + ((uint32_t)(pkt->pkt_cdbp[9])) |
1539 + ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1540 + ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1541 + ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1570 1542
1571 - start_lba_lo =
1572 - (((uint32_t)(pkt->pkt_cdbp[5])) |
1573 - ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1574 - ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1575 - ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1543 + start_lba_lo =
1544 + (((uint32_t)(pkt->pkt_cdbp[5])) |
1545 + ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1546 + ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1547 + ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1576 1548
1577 - } else if (acmd->cmd_cdblen == CDB_GROUP4) { /* 16-byte cdb */
1578 - lba_count = (
1579 - ((uint32_t)(pkt->pkt_cdbp[13])) |
1580 - ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
1581 - ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
1582 - ((uint32_t)(pkt->pkt_cdbp[10]) << 24));
1549 + } else if (acmd->cmd_cdblen == CDB_GROUP4) {
1550 + /* 16-byte cdb */
1551 + lba_count = (
1552 + ((uint32_t)(pkt->pkt_cdbp[13])) |
1553 + ((uint32_t)(pkt->pkt_cdbp[12]) << 8) |
1554 + ((uint32_t)(pkt->pkt_cdbp[11]) << 16) |
1555 + ((uint32_t)(pkt->pkt_cdbp[10]) << 24));
1583 1556
1584 - start_lba_lo = (
1585 - ((uint32_t)(pkt->pkt_cdbp[9])) |
1586 - ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1587 - ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1588 - ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1557 + start_lba_lo = (
1558 + ((uint32_t)(pkt->pkt_cdbp[9])) |
1559 + ((uint32_t)(pkt->pkt_cdbp[8]) << 8) |
1560 + ((uint32_t)(pkt->pkt_cdbp[7]) << 16) |
1561 + ((uint32_t)(pkt->pkt_cdbp[6]) << 24));
1589 1562
1590 - start_lba_hi = (
1591 - ((uint32_t)(pkt->pkt_cdbp[5])) |
1592 - ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1593 - ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1594 - ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1595 - }
1563 + start_lba_hi = (
1564 + ((uint32_t)(pkt->pkt_cdbp[5])) |
1565 + ((uint32_t)(pkt->pkt_cdbp[4]) << 8) |
1566 + ((uint32_t)(pkt->pkt_cdbp[3]) << 16) |
1567 + ((uint32_t)(pkt->pkt_cdbp[2]) << 24));
1568 + }
1596 1569
1597 - if (instance->tbolt &&
1598 - ((lba_count * 512) > mrsas_tbolt_max_cap_maxxfer) )
1599 - cmn_err(CE_WARN," IO SECTOR COUNT exceeds controller limit 0x%x sectors\n", lba_count);
1570 + if (instance->tbolt &&
1571 + ((lba_count * 512) > mrsas_tbolt_max_cap_maxxfer)) {
1572 + cmn_err(CE_WARN, " IO SECTOR COUNT exceeds "
1573 + "controller limit 0x%x sectors",
1574 + lba_count);
1575 + }
1600 1576
1601 - memset(&io_info, 0, sizeof (struct IO_REQUEST_INFO));
1602 - io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) | start_lba_lo;
1603 - io_info.numBlocks = lba_count;
1604 - io_info.ldTgtId = acmd->device_id;
1577 + bzero(&io_info, sizeof (struct IO_REQUEST_INFO));
1578 + io_info.ldStartBlock = ((uint64_t)start_lba_hi << 32) |
1579 + start_lba_lo;
1580 + io_info.numBlocks = lba_count;
1581 + io_info.ldTgtId = acmd->device_id;
1605 1582
1606 - if (acmd->cmd_flags & CFLAG_DMASEND)
1607 - io_info.isRead = 0;
1608 - else
1609 - io_info.isRead = 1;
1583 + if (acmd->cmd_flags & CFLAG_DMASEND)
1584 + io_info.isRead = 0;
1585 + else
1586 + io_info.isRead = 1;
1610 1587
1611 -
1612 - /*Aquire SYNC MAP UPDATE lock */
1613 - mutex_enter(&instance->sync_map_mtx);
1614 1588
1615 - local_map_ptr = instance->ld_map[(instance->map_id & 1)];
1589 + /* Acquire SYNC MAP UPDATE lock */
1590 + mutex_enter(&instance->sync_map_mtx);
1616 1591
1617 - if ( (MR_TargetIdToLdGet(acmd->device_id, local_map_ptr) >= MAX_LOGICAL_DRIVES) || !instance->fast_path_io ){
1618 - cmn_err(CE_NOTE,
1619 - "Fast Path NOT Possible, targetId >= MAX_LOGICAL_DRIVES || !instance->fast_path_io\n");
1620 - fp_possible = 0;
1621 - /* Set Regionlock flags to BYPASS
1622 - io_request->RaidContext.regLockFlags = 0; */
1623 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1592 + local_map_ptr =
1593 + instance->ld_map[(instance->map_id & 1)];
1594 +
1595 + if ((MR_TargetIdToLdGet(
1596 + acmd->device_id, local_map_ptr) >=
1597 + MAX_LOGICAL_DRIVES) || !instance->fast_path_io) {
1598 + cmn_err(CE_NOTE, "Fast Path NOT Possible, "
1599 + "targetId >= MAX_LOGICAL_DRIVES || "
1600 + "!instance->fast_path_io");
1601 + fp_possible = 0;
1602 + /* Set Regionlock flags to BYPASS */
1603 + /* io_request->RaidContext.regLockFlags = 0; */
1604 + ddi_put8(acc_handle,
1624 1605 &scsi_raid_io->RaidContext.regLockFlags, 0);
1625 - } else {
1626 - if (MR_BuildRaidContext(instance, &io_info,
1627 - &scsi_raid_io->RaidContext, local_map_ptr))
1628 - fp_possible = io_info.fpOkForIo;
1629 - }
1606 + } else {
1607 + if (MR_BuildRaidContext(instance, &io_info,
1608 + &scsi_raid_io->RaidContext, local_map_ptr))
1609 + fp_possible = io_info.fpOkForIo;
1610 + }
1630 1611
1631 - if (!enable_fp) {
1632 - fp_possible = 0;
1633 - }
1634 - con_log(CL_ANN1, (CE_NOTE,
1635 - "enable_fp %d instance->fast_path_io %d fp_possible %d \n",
1636 - enable_fp, instance->fast_path_io, fp_possible));
1612 + if (!enable_fp)
1613 + fp_possible = 0;
1637 1614
1615 + con_log(CL_ANN1, (CE_NOTE, "enable_fp %d "
1616 + "instance->fast_path_io %d fp_possible %d",
1617 + enable_fp, instance->fast_path_io, fp_possible));
1618 +
1638 1619 if (fp_possible) {
1639 1620
1640 - /* Check for DIF enabled LD */
1621 + /* Check for DIF enabled LD */
1641 1622 if (MR_CheckDIF(acmd->device_id, local_map_ptr)) {
1642 1623 /* Prepare 32 Byte CDB for DIF capable Disk */
1643 1624 mrsas_tbolt_prepare_cdb(instance,
1644 - scsi_raid_io->CDB.CDB32,
1645 - &io_info,
1646 - scsi_raid_io,
1647 - start_lba_lo);
1625 + scsi_raid_io->CDB.CDB32,
1626 + &io_info, scsi_raid_io, start_lba_lo);
1648 1627 } else {
1649 1628 mrsas_tbolt_set_pd_lba(scsi_raid_io->CDB.CDB32,
1650 - (uint8_t *)&pd_cmd_cdblen, io_info.pdBlock, io_info.numBlocks, 0);
1651 - ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1652 - &scsi_raid_io->IoFlags,
1653 - pd_cmd_cdblen);
1629 + (uint8_t *)&pd_cmd_cdblen,
1630 + io_info.pdBlock, io_info.numBlocks);
1631 + ddi_put16(acc_handle,
1632 + &scsi_raid_io->IoFlags, pd_cmd_cdblen);
1654 1633 }
1655 1634
1656 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1657 - &scsi_raid_io->Function,
1635 + ddi_put8(acc_handle, &scsi_raid_io->Function,
1658 1636 MPI2_FUNCTION_SCSI_IO_REQUEST);
1659 1637
1660 1638 ReqDescUnion->SCSIIO.RequestFlags =
1661 1639 (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1662 1640 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1663 1641
1664 1642 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1665 - uint8_t regLockFlags = ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1666 - &scsi_raid_io->RaidContext.regLockFlags);
1667 - uint16_t IoFlags = ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1668 - &scsi_raid_io->IoFlags);
1643 + uint8_t regLockFlags = ddi_get8(acc_handle,
1644 + &scsi_raid_io->RaidContext.regLockFlags);
1645 + uint16_t IoFlags = ddi_get16(acc_handle,
1646 + &scsi_raid_io->IoFlags);
1669 1647
1670 1648 if (regLockFlags == REGION_TYPE_UNUSED)
1671 - ReqDescUnion->SCSIIO.RequestFlags =
1672 - (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1649 + ReqDescUnion->SCSIIO.RequestFlags =
1650 + (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1651 + MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1673 1652
1674 - IoFlags |= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1675 - regLockFlags |= (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | MR_RL_FLAGS_SEQ_NUM_ENABLE);
1653 + IoFlags |=
1654 + MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
1655 + regLockFlags |=
1656 + (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
1657 + MR_RL_FLAGS_SEQ_NUM_ENABLE);
1676 1658
1677 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1678 - &scsi_raid_io->ChainOffset, 0);
1679 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1680 - &scsi_raid_io->RaidContext.nsegType, ((0x01 << MPI2_NSEG_FLAGS_SHIFT) | MPI2_TYPE_CUDA));
1681 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1682 - &scsi_raid_io->RaidContext.regLockFlags, regLockFlags);
1683 - ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1684 - &scsi_raid_io->IoFlags, IoFlags);
1659 + ddi_put8(acc_handle,
1660 + &scsi_raid_io->ChainOffset, 0);
1661 + ddi_put8(acc_handle,
1662 + &scsi_raid_io->RaidContext.nsegType,
1663 + ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1664 + MPI2_TYPE_CUDA));
1665 + ddi_put8(acc_handle,
1666 + &scsi_raid_io->RaidContext.regLockFlags,
1667 + regLockFlags);
1668 + ddi_put16(acc_handle,
1669 + &scsi_raid_io->IoFlags, IoFlags);
1685 1670 }
1686 1671
1687 - if ((instance->load_balance_info[acmd->device_id].loadBalanceFlag) && (io_info.isRead)) {
1688 - io_info.devHandle = get_updated_dev_handle(&instance->load_balance_info[acmd->device_id], &io_info);
1689 - cmd->load_balance_flag |= MEGASAS_LOAD_BALANCE_FLAG;
1690 - } else
1691 - cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG;
1672 + if ((instance->load_balance_info[
1673 + acmd->device_id].loadBalanceFlag) &&
1674 + (io_info.isRead)) {
1675 + io_info.devHandle =
1676 + get_updated_dev_handle(&instance->
1677 + load_balance_info[acmd->device_id],
1678 + &io_info);
1679 + cmd->load_balance_flag |=
1680 + MEGASAS_LOAD_BALANCE_FLAG;
1681 + } else {
1682 + cmd->load_balance_flag &=
1683 + ~MEGASAS_LOAD_BALANCE_FLAG;
1684 + }
1692 1685
1693 - ReqDescUnion->SCSIIO.DevHandle = io_info.devHandle;
1694 - ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1695 - &scsi_raid_io->DevHandle,
1686 + ReqDescUnion->SCSIIO.DevHandle = io_info.devHandle;
1687 + ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1696 1688 io_info.devHandle);
1697 1689
1698 1690 } else {
1699 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1700 - &scsi_raid_io->Function,
1691 + ddi_put8(acc_handle, &scsi_raid_io->Function,
1701 1692 MPI2_FUNCTION_LD_IO_REQUEST);
1702 1693
1703 - ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1694 + ddi_put16(acc_handle,
1704 1695 &scsi_raid_io->DevHandle, acmd->device_id);
1705 1696
1706 1697 ReqDescUnion->SCSIIO.RequestFlags =
1707 1698 (MPI2_REQ_DESCRIPT_FLAGS_LD_IO <<
1708 1699 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1709 1700
1710 - ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1711 - &scsi_raid_io->RaidContext.timeoutValue, local_map_ptr->raidMap.fpPdIoTimeoutSec);
1701 + ddi_put16(acc_handle,
1702 + &scsi_raid_io->RaidContext.timeoutValue,
1703 + local_map_ptr->raidMap.fpPdIoTimeoutSec);
1712 1704
1713 1705 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
1714 - uint8_t regLockFlags = ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1715 - &scsi_raid_io->RaidContext.regLockFlags);
1706 + uint8_t regLockFlags = ddi_get8(acc_handle,
1707 + &scsi_raid_io->RaidContext.regLockFlags);
1716 1708
1717 - if (regLockFlags == REGION_TYPE_UNUSED)
1718 - ReqDescUnion->SCSIIO.RequestFlags =
1719 - (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1709 + if (regLockFlags == REGION_TYPE_UNUSED) {
1710 + ReqDescUnion->SCSIIO.RequestFlags =
1711 + (MPI2_REQ_DESCRIPT_FLAGS_NO_LOCK <<
1712 + MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1713 + }
1720 1714
1721 - regLockFlags |= (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 | MR_RL_FLAGS_SEQ_NUM_ENABLE);
1715 + regLockFlags |=
1716 + (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
1717 + MR_RL_FLAGS_SEQ_NUM_ENABLE);
1722 1718
1723 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1724 - &scsi_raid_io->RaidContext.nsegType, ((0x01 << MPI2_NSEG_FLAGS_SHIFT) | MPI2_TYPE_CUDA));
1725 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1726 - &scsi_raid_io->RaidContext.regLockFlags, regLockFlags);
1719 + ddi_put8(acc_handle,
1720 + &scsi_raid_io->RaidContext.nsegType,
1721 + ((0x01 << MPI2_NSEG_FLAGS_SHIFT) |
1722 + MPI2_TYPE_CUDA));
1723 + ddi_put8(acc_handle,
1724 + &scsi_raid_io->RaidContext.regLockFlags,
1725 + regLockFlags);
1727 1726 }
1728 -
1729 1727 } /* Not FP */
1730 1728
1731 - /*Release SYNC MAP UPDATE lock */
1729 + /* Release SYNC MAP UPDATE lock */
1732 1730 mutex_exit(&instance->sync_map_mtx);
1733 1731
1734 -
1735 - /* Set sense buffer physical address/length in scsi_io_request.*/
1736 - ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1737 - &scsi_raid_io->SenseBufferLowAddress,
1732 +
1733 + /*
1734 + * Set sense buffer physical address/length in scsi_io_request.
1735 + */
1736 + ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
1738 1737 cmd->sense_phys_addr1);
1739 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1740 - &scsi_raid_io->SenseBufferLength,
1741 - SENSE_LENGTH);
1742 -
1743 - /* Construct SGL*/
1744 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1745 - &scsi_raid_io->SGLOffset0,
1738 + ddi_put8(acc_handle, &scsi_raid_io->SenseBufferLength,
1739 + SENSE_LENGTH);
1740 +
1741 + /* Construct SGL */
1742 + ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
1746 1743 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1747 1744
1748 - mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1745 + (void) mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1749 1746 scsi_raid_io, &datalen);
1750 1747
1751 - ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1752 - &scsi_raid_io->DataLength, datalen);
1748 + ddi_put32(acc_handle, &scsi_raid_io->DataLength, datalen);
1753 1749
1754 1750 break;
1755 -
1756 - }
1757 - else {
1758 1751 #ifndef PDSUPPORT /* if PDSUPPORT, skip break and fall through */
1752 + } else {
1759 1753 break;
1760 1754 #endif
1761 1755 }
1762 1756 /* fall through For all non-rd/wr cmds */
1763 1757 default:
1764 1758 switch (pkt->pkt_cdbp[0]) {
1765 - case 0x35: { // SCMD_SYNCHRONIZE_CACHE
1759 + case 0x35: { /* SCMD_SYNCHRONIZE_CACHE */
1766 1760 return_raid_msg_pkt(instance, cmd);
1767 1761 *cmd_done = 1;
1768 1762 return (NULL);
1769 1763 }
1770 1764
1771 1765 case SCMD_MODE_SENSE:
1772 1766 case SCMD_MODE_SENSE_G1: {
1773 1767 union scsi_cdb *cdbp;
1774 1768 uint16_t page_code;
1775 1769
1776 1770 cdbp = (void *)pkt->pkt_cdbp;
1777 1771 page_code = (uint16_t)cdbp->cdb_un.sg.scsi[0];
1778 1772 switch (page_code) {
1779 1773 case 0x3:
|
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
1780 1774 case 0x4:
1781 1775 (void) mrsas_mode_sense_build(pkt);
1782 1776 return_raid_msg_pkt(instance, cmd);
1783 1777 *cmd_done = 1;
1784 1778 return (NULL);
1785 1779 }
1786 1780 break;
1787 1781 }
1788 1782
1789 1783 default: {
1790 - /* Here we need to handle PASSTHRU for
1791 - Logical Devices. Like Inquiry etc.*/
1784 + /*
1785 + * Here we need to handle PASSTHRU for
1786 + * Logical Devices. Like Inquiry etc.
1787 + */
1792 1788
1793 - if(!(acmd->islogical)) {
1789 + if (!(acmd->islogical)) {
1794 1790
1795 - /* Aquire SYNC MAP UPDATE lock */
1791 + /* Acquire SYNC MAP UPDATE lock */
1796 1792 mutex_enter(&instance->sync_map_mtx);
1797 1793
1798 - local_map_ptr = instance->ld_map[(instance->map_id & 1)];
1799 -
1800 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1801 - &scsi_raid_io->Function, MPI2_FUNCTION_SCSI_IO_REQUEST);
1802 -
1794 + local_map_ptr =
1795 + instance->ld_map[(instance->map_id & 1)];
1796 +
1797 + ddi_put8(acc_handle, &scsi_raid_io->Function,
1798 + MPI2_FUNCTION_SCSI_IO_REQUEST);
1799 +
1803 1800 ReqDescUnion->SCSIIO.RequestFlags =
1804 - (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1805 - MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1801 + (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
1802 + MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1806 1803
1807 - ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1808 - &scsi_raid_io->DevHandle,
1809 - local_map_ptr->raidMap.devHndlInfo[acmd->device_id].curDevHdl);
1810 -
1804 + ddi_put16(acc_handle, &scsi_raid_io->DevHandle,
1805 + local_map_ptr->raidMap.
1806 + devHndlInfo[acmd->device_id].curDevHdl);
1811 1807
1812 - /*Set regLockFlasgs to REGION_TYPE_BYPASS */
1813 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1814 - &scsi_raid_io->RaidContext.regLockFlags, 0);
1815 - ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
1816 - &scsi_raid_io->RaidContext.regLockRowLBA, 0);
1817 - ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1818 - &scsi_raid_io->RaidContext.regLockLength, 0);
1819 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_raid_io->RaidContext.RAIDFlags,
1820 - MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
1821 - ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1822 - &scsi_raid_io->RaidContext.timeoutValue, local_map_ptr->raidMap.fpPdIoTimeoutSec);
1823 - ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1824 - &scsi_raid_io->RaidContext.ldTargetId, acmd->device_id);
1825 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1826 - &scsi_raid_io->LUN[1], acmd->lun);
1827 1808
1809 + /* Set regLockFlasgs to REGION_TYPE_BYPASS */
1810 + ddi_put8(acc_handle,
1811 + &scsi_raid_io->RaidContext.regLockFlags, 0);
1812 + ddi_put64(acc_handle,
1813 + &scsi_raid_io->RaidContext.regLockRowLBA,
1814 + 0);
1815 + ddi_put32(acc_handle,
1816 + &scsi_raid_io->RaidContext.regLockLength,
1817 + 0);
1818 + ddi_put8(acc_handle,
1819 + &scsi_raid_io->RaidContext.RAIDFlags,
1820 + MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD <<
1821 + MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
1822 + ddi_put16(acc_handle,
1823 + &scsi_raid_io->RaidContext.timeoutValue,
1824 + local_map_ptr->raidMap.fpPdIoTimeoutSec);
1825 + ddi_put16(acc_handle,
1826 + &scsi_raid_io->RaidContext.ldTargetId,
1827 + acmd->device_id);
1828 + ddi_put8(acc_handle,
1829 + &scsi_raid_io->LUN[1], acmd->lun);
1830 +
1828 1831 /* Release SYNC MAP UPDATE lock */
1829 1832 mutex_exit(&instance->sync_map_mtx);
1830 -
1833 +
1831 1834 } else {
1832 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1833 - &scsi_raid_io->Function, MPI2_FUNCTION_LD_IO_REQUEST);
1834 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1835 - &scsi_raid_io->LUN[1], acmd->lun);
1836 - ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
1837 - &scsi_raid_io->DevHandle, acmd->device_id);
1838 - ReqDescUnion->SCSIIO.RequestFlags =
1839 - (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1835 + ddi_put8(acc_handle, &scsi_raid_io->Function,
1836 + MPI2_FUNCTION_LD_IO_REQUEST);
1837 + ddi_put8(acc_handle,
1838 + &scsi_raid_io->LUN[1], acmd->lun);
1839 + ddi_put16(acc_handle,
1840 + &scsi_raid_io->DevHandle, acmd->device_id);
1841 + ReqDescUnion->SCSIIO.RequestFlags =
1842 + (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
1843 + MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1840 1844 }
1841 1845
1842 - /* Set sense buffer physical address/length in scsi_io_request.*/
1843 - ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1846 + /*
1847 + * Set sense buffer physical address/length in
1848 + * scsi_io_request.
1849 + */
1850 + ddi_put32(acc_handle,
1844 1851 &scsi_raid_io->SenseBufferLowAddress,
1845 1852 cmd->sense_phys_addr1);
1846 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1853 + ddi_put8(acc_handle,
1847 1854 &scsi_raid_io->SenseBufferLength, SENSE_LENGTH);
1848 1855
1849 - /* Construct SGL*/
1850 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
1851 - &scsi_raid_io->SGLOffset0,
1856 + /* Construct SGL */
1857 + ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
1852 1858 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
1853 1859
1854 - mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1860 + (void) mr_sas_tbolt_build_sgl(instance, acmd, cmd,
1855 1861 scsi_raid_io, &datalen);
1856 1862
1857 - ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
1863 + ddi_put32(acc_handle,
1858 1864 &scsi_raid_io->DataLength, datalen);
1859 1865
1860 1866
1861 1867 con_log(CL_ANN, (CE_CONT,
1862 1868 "tbolt_build_cmd CDB[0] =%x, TargetID =%x\n",
1863 1869 pkt->pkt_cdbp[0], acmd->device_id));
1864 1870 con_log(CL_DLEVEL1, (CE_CONT,
1865 1871 "data length = %x\n",
1866 1872 scsi_raid_io->DataLength));
1867 1873 con_log(CL_DLEVEL1, (CE_CONT,
1868 1874 "cdb length = %x\n",
1869 1875 acmd->cmd_cdblen));
1870 - }
1876 + }
1871 1877 break;
1872 1878 }
1873 1879
1874 1880 }
1875 -#ifdef lint
1876 - context = context;
1877 -#endif
1878 1881
1879 1882 return (cmd);
1880 1883 }
1881 1884
1882 1885 /*
1883 1886 * mrsas_tbolt_tran_init_pkt - allocate & initialize a scsi_pkt structure
1884 1887 * @ap:
1885 1888 * @pkt:
1886 1889 * @bp:
1887 1890 * @cmdlen:
1888 1891 * @statuslen:
1889 1892 * @tgtlen:
1890 1893 * @flags:
1891 1894 * @callback:
1892 1895 *
1893 1896 * The tran_init_pkt() entry point allocates and initializes a scsi_pkt
1894 1897 * structure and DMA resources for a target driver request. The
1895 1898 * tran_init_pkt() entry point is called when the target driver calls the
1896 1899 * SCSA function scsi_init_pkt(). Each call of the tran_init_pkt() entry point
1897 1900 * is a request to perform one or more of three possible services:
1898 1901 * - allocation and initialization of a scsi_pkt structure
1899 1902 * - allocation of DMA resources for data transfer
1900 1903 * - reallocation of DMA resources for the next portion of the data transfer
1901 1904 */
1902 1905 struct scsi_pkt *
1903 1906 mrsas_tbolt_tran_init_pkt(struct scsi_address *ap,
1904 1907 register struct scsi_pkt *pkt,
1905 1908 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
1906 1909 int flags, int (*callback)(), caddr_t arg)
1907 1910 {
1908 1911 struct scsa_cmd *acmd;
1909 1912 struct mrsas_instance *instance;
1910 1913 struct scsi_pkt *new_pkt;
1911 1914
1912 1915 instance = ADDR2MR(ap);
1913 1916
1914 1917 /* step #1 : pkt allocation */
1915 1918 if (pkt == NULL) {
1916 1919 pkt = scsi_hba_pkt_alloc(instance->dip, ap, cmdlen, statuslen,
1917 1920 tgtlen, sizeof (struct scsa_cmd), callback, arg);
1918 1921 if (pkt == NULL) {
1919 1922 return (NULL);
1920 1923 }
1921 1924
1922 1925 acmd = PKT2CMD(pkt);
1923 1926
1924 1927 /*
1925 1928 * Initialize the new pkt - we redundantly initialize
1926 1929 * all the fields for illustrative purposes.
1927 1930 */
1928 1931 acmd->cmd_pkt = pkt;
1929 1932 acmd->cmd_flags = 0;
1930 1933 acmd->cmd_scblen = statuslen;
1931 1934 acmd->cmd_cdblen = cmdlen;
1932 1935 acmd->cmd_dmahandle = NULL;
1933 1936 acmd->cmd_ncookies = 0;
1934 1937 acmd->cmd_cookie = 0;
1935 1938 acmd->cmd_cookiecnt = 0;
1936 1939 acmd->cmd_nwin = 0;
1937 1940
1938 1941 pkt->pkt_address = *ap;
1939 1942 pkt->pkt_comp = (void (*)())NULL;
1940 1943 pkt->pkt_flags = 0;
1941 1944 pkt->pkt_time = 0;
1942 1945 pkt->pkt_resid = 0;
1943 1946 pkt->pkt_state = 0;
1944 1947 pkt->pkt_statistics = 0;
1945 1948 pkt->pkt_reason = 0;
1946 1949 new_pkt = pkt;
1947 1950 } else {
1948 1951 acmd = PKT2CMD(pkt);
1949 1952 new_pkt = NULL;
1950 1953 }
1951 1954
1952 1955 /* step #2 : dma allocation/move */
1953 1956 if (bp && bp->b_bcount != 0) {
1954 1957 if (acmd->cmd_dmahandle == NULL) {
1955 1958 if (mrsas_dma_alloc(instance, pkt, bp, flags,
1956 1959 callback) == DDI_FAILURE) {
1957 1960 if (new_pkt) {
1958 1961 scsi_hba_pkt_free(ap, new_pkt);
1959 1962 }
1960 1963 return ((struct scsi_pkt *)NULL);
1961 1964 }
1962 1965 } else {
1963 1966 if (mrsas_dma_move(instance, pkt, bp) == DDI_FAILURE) {
1964 1967 return ((struct scsi_pkt *)NULL);
1965 1968 }
1966 1969 }
1967 1970 }
1968 1971 return (pkt);
1969 1972 }
1970 1973
1971 1974
1972 1975 uint32_t
1973 1976 tbolt_read_fw_status_reg(struct mrsas_instance *instance)
1974 1977 {
1975 1978 return ((uint32_t)RD_OB_SCRATCH_PAD_0(instance));
|
↓ open down ↓ |
88 lines elided |
↑ open up ↑ |
1976 1979 }
1977 1980
1978 1981 void
1979 1982 tbolt_issue_cmd(struct mrsas_cmd *cmd, struct mrsas_instance *instance)
1980 1983 {
1981 1984 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
1982 1985 atomic_add_16(&instance->fw_outstanding, 1);
1983 1986
1984 1987 struct scsi_pkt *pkt;
1985 1988
1986 - con_log(CL_ANN1, (CE_NOTE, "tbolt_issue_cmd: cmd->[SMID]=0x%X", cmd->SMID));
1989 + con_log(CL_ANN1,
1990 + (CE_NOTE, "tbolt_issue_cmd: cmd->[SMID]=0x%X", cmd->SMID));
1987 1991
1988 1992 con_log(CL_DLEVEL1, (CE_CONT,
1989 - " [req desc Words] %llx \n", req_desc->Words));
1993 + " [req desc Words] %" PRIx64 " \n", req_desc->Words));
1990 1994 con_log(CL_DLEVEL1, (CE_CONT,
1991 - " [req desc low part] %x \n", req_desc->Words));
1995 + " [req desc low part] %x \n",
1996 + (uint_t)(req_desc->Words & 0xffffffffff)));
1992 1997 con_log(CL_DLEVEL1, (CE_CONT,
1993 - " [req desc high part] %x \n", (req_desc->Words >> 32)));
1998 + " [req desc high part] %x \n", (uint_t)(req_desc->Words >> 32)));
1994 1999 pkt = cmd->pkt;
1995 -
2000 +
1996 2001 if (pkt) {
1997 2002 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
1998 2003 "ISSUED CMD TO FW : called : cmd:"
1999 2004 ": %p instance : %p pkt : %p pkt_time : %x\n",
2000 2005 gethrtime(), (void *)cmd, (void *)instance,
2001 2006 (void *)pkt, cmd->drv_pkt_time));
2002 2007 if (instance->adapterresetinprogress) {
2003 2008 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2004 2009 con_log(CL_ANN, (CE_NOTE,
2005 2010 "TBOLT Reset the scsi_pkt timer"));
2006 2011 } else {
2007 2012 push_pending_mfi_pkt(instance, cmd);
2008 2013 }
2009 2014
2010 2015 } else {
2011 2016 con_log(CL_ANN1, (CE_CONT, "%llx :TBOLT issue_cmd_ppc:"
2012 2017 "ISSUED CMD TO FW : called : cmd : %p, instance: %p"
2013 2018 "(NO PKT)\n", gethrtime(), (void *)cmd, (void *)instance));
2014 2019 }
2015 2020
2016 2021 /* Issue the command to the FW */
2017 2022 mutex_enter(&instance->reg_write_mtx);
2018 2023 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2019 2024 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2020 2025 mutex_exit(&instance->reg_write_mtx);
2021 2026 }
2022 2027
2023 2028 /*
2024 2029 * issue_cmd_in_sync_mode
2025 2030 */
2026 2031 int
|
↓ open down ↓ |
21 lines elided |
↑ open up ↑ |
2027 2032 tbolt_issue_cmd_in_sync_mode(struct mrsas_instance *instance,
2028 2033 struct mrsas_cmd *cmd)
2029 2034 {
2030 2035 int i;
2031 2036 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
2032 2037 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
2033 2038
2034 2039 struct mrsas_header *hdr;
2035 2040 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2036 2041
2037 - con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: cmd->[SMID]=0x%X", cmd->SMID));
2042 + con_log(CL_ANN,
2043 + (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: cmd->[SMID]=0x%X",
2044 + cmd->SMID));
2038 2045
2039 2046
2040 2047 if (instance->adapterresetinprogress) {
2041 2048 cmd->drv_pkt_time = ddi_get16
2042 2049 (cmd->frame_dma_obj.acc_handle, &hdr->timeout);
2043 2050 if (cmd->drv_pkt_time < debug_timeout_g)
2044 2051 cmd->drv_pkt_time = (uint16_t)debug_timeout_g;
2045 2052 con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode:"
2046 - "RESET-IN-PROGRESS, issue cmd & return.\n"));
2053 + "RESET-IN-PROGRESS, issue cmd & return."));
2047 2054
2048 2055 mutex_enter(&instance->reg_write_mtx);
2049 2056 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2050 2057 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2051 2058 mutex_exit(&instance->reg_write_mtx);
2052 2059
2053 2060 return (DDI_SUCCESS);
2054 2061 } else {
2055 - con_log(CL_ANN1, (CE_NOTE, "tbolt_issue_cmd_in_sync_mode: pushing the pkt\n"));
2062 + con_log(CL_ANN1, (CE_NOTE,
2063 + "tbolt_issue_cmd_in_sync_mode: pushing the pkt"));
2056 2064 push_pending_mfi_pkt(instance, cmd);
2057 2065 }
2058 2066
2059 2067 con_log(CL_DLEVEL2, (CE_NOTE,
2060 - "HighQport offset :%lx",
2061 - (uint32_t *)((uintptr_t)(instance)->regmap + IB_HIGH_QPORT)));
2068 + "HighQport offset :%p",
2069 + (void *)((uintptr_t)(instance)->regmap + IB_HIGH_QPORT)));
2062 2070 con_log(CL_DLEVEL2, (CE_NOTE,
2063 - "LowQport offset :%lx",
2064 - (uint32_t *)((uintptr_t)(instance)->regmap + IB_LOW_QPORT)));
2071 + "LowQport offset :%p",
2072 + (void *)((uintptr_t)(instance)->regmap + IB_LOW_QPORT)));
2065 2073
2066 2074 cmd->sync_cmd = MRSAS_TRUE;
2067 2075 cmd->cmd_status = ENODATA;
2068 2076
2069 2077
2070 2078 mutex_enter(&instance->reg_write_mtx);
2071 2079 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2072 2080 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2073 2081 mutex_exit(&instance->reg_write_mtx);
2074 2082
2075 2083 con_log(CL_ANN1, (CE_NOTE,
2076 - " req desc high part %x \n", (req_desc->Words >> 32)));
2077 - con_log(CL_ANN1, (CE_NOTE,
2078 - " req desc low part %x \n", req_desc->Words));
2084 + " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
2085 + con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
2086 + (uint_t)(req_desc->Words & 0xffffffff)));
2079 2087
2080 2088 mutex_enter(&instance->int_cmd_mtx);
2081 2089 for (i = 0; i < msecs && (cmd->cmd_status == ENODATA); i++) {
2082 2090 cv_wait(&instance->int_cmd_cv, &instance->int_cmd_mtx);
2083 2091 }
2084 2092 mutex_exit(&instance->int_cmd_mtx);
2085 2093
2086 2094
2087 2095 if (i < (msecs -1)) {
2088 2096 return (DDI_SUCCESS);
2089 2097 } else {
2090 2098 return (DDI_FAILURE);
2091 2099 }
2092 2100 }
2093 2101
2094 2102 /*
2095 2103 * issue_cmd_in_poll_mode
|
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
2096 2104 */
2097 2105 int
2098 2106 tbolt_issue_cmd_in_poll_mode(struct mrsas_instance *instance,
2099 2107 struct mrsas_cmd *cmd)
2100 2108 {
2101 2109 int i;
2102 2110 uint16_t flags;
2103 2111 uint32_t msecs = MFI_POLL_TIMEOUT_SECS * MILLISEC;
2104 2112 struct mrsas_header *frame_hdr;
2105 2113
2106 - con_log(CL_ANN, (CE_NOTE, "tbolt_issue_cmd_in_poll_mode: cmd->[SMID]=0x%X", cmd->SMID));
2107 -
2114 + con_log(CL_ANN,
2115 + (CE_NOTE, "tbolt_issue_cmd_in_poll_mode: cmd->[SMID]=0x%X",
2116 + cmd->SMID));
2117 +
2108 2118 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc = cmd->request_desc;
2109 2119
2110 2120 frame_hdr = (struct mrsas_header *)&cmd->frame->hdr;
2111 2121 ddi_put8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status,
2112 2122 MFI_CMD_STATUS_POLL_MODE);
2113 2123 flags = ddi_get16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags);
2114 - flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2124 + flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
2115 2125 ddi_put16(cmd->frame_dma_obj.acc_handle, &frame_hdr->flags, flags);
2116 2126
2127 + con_log(CL_ANN1, (CE_NOTE, " req desc low part %x",
2128 + (uint_t)(req_desc->Words & 0xffffffff)));
2117 2129 con_log(CL_ANN1, (CE_NOTE,
2118 - " req desc low part %x \n", req_desc->Words));
2119 - con_log(CL_ANN1, (CE_NOTE,
2120 - " req desc high part %x \n", (req_desc->Words >> 32)));
2130 + " req desc high part %x", (uint_t)(req_desc->Words >> 32)));
2121 2131
2122 2132 /* issue the frame using inbound queue port */
2123 2133 mutex_enter(&instance->reg_write_mtx);
2124 2134 WR_IB_LOW_QPORT((uint32_t)(req_desc->Words), instance);
2125 2135 WR_IB_HIGH_QPORT((uint32_t)(req_desc->Words >> 32), instance);
2126 2136 mutex_exit(&instance->reg_write_mtx);
2127 2137
2128 2138 for (i = 0; i < msecs && (
2129 2139 ddi_get8(cmd->frame_dma_obj.acc_handle, &frame_hdr->cmd_status)
2130 2140 == MFI_CMD_STATUS_POLL_MODE); i++) {
2131 2141 /* wait for cmd_status to change from 0xFF */
2132 2142 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
2133 2143 }
2134 2144
2135 2145 if (ddi_get8(cmd->frame_dma_obj.acc_handle,
2136 2146 &frame_hdr->cmd_status) == MFI_CMD_STATUS_POLL_MODE) {
2137 2147 con_log(CL_ANN1, (CE_NOTE,
2138 - " cmd failed %x \n", (req_desc->Words)));
2148 + " cmd failed %" PRIx64, (req_desc->Words)));
2139 2149 return (DDI_FAILURE);
2140 2150 }
2141 2151
2142 2152 return (DDI_SUCCESS);
2143 2153 }
2144 2154
2145 2155 void
2146 2156 tbolt_enable_intr(struct mrsas_instance *instance)
2147 2157 {
2148 - uint32_t mask;
2149 -
2150 2158 /* TODO: For Thunderbolt/Invader also clear intr on enable */
2151 - //writel(~0, ®s->outbound_intr_status);
2152 - //readl(®s->outbound_intr_status);
2159 + /* writel(~0, ®s->outbound_intr_status); */
2160 + /* readl(®s->outbound_intr_status); */
2153 2161
2154 2162 WR_OB_INTR_MASK(~(MFI_FUSION_ENABLE_INTERRUPT_MASK), instance);
2155 2163
2156 2164 /* dummy read to force PCI flush */
2157 - mask = RD_OB_INTR_MASK(instance);
2165 + (void) RD_OB_INTR_MASK(instance);
2158 2166
2159 2167 }
2160 2168
2161 2169 void
2162 2170 tbolt_disable_intr(struct mrsas_instance *instance)
2163 2171 {
2164 2172 uint32_t mask = 0xFFFFFFFF;
2165 - uint32_t status;
2166 2173
2167 -
2168 2174 WR_OB_INTR_MASK(mask, instance);
2169 2175
2170 2176 /* Dummy readl to force pci flush */
2171 2177
2172 - status = RD_OB_INTR_MASK(instance);
2178 + (void) RD_OB_INTR_MASK(instance);
2173 2179 }
2174 2180
2175 2181
2176 2182 int
2177 2183 tbolt_intr_ack(struct mrsas_instance *instance)
2178 2184 {
2179 2185 uint32_t status;
2180 2186
2181 2187 /* check if it is our interrupt */
2182 2188 status = RD_OB_INTR_STATUS(instance);
2183 2189 con_log(CL_ANN1, (CE_NOTE,
2184 - "chkpnt: Entered tbolt_intr_ack status = %d \n", status));
2190 + "chkpnt: Entered tbolt_intr_ack status = %d", status));
2185 2191
2186 2192 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2187 2193 return (DDI_INTR_UNCLAIMED);
2188 2194 }
2189 2195
2196 + if (mrsas_check_acc_handle(instance->regmap_handle) != DDI_SUCCESS) {
2197 + ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2198 + return (DDI_INTR_UNCLAIMED);
2199 + }
2200 +
2190 2201 if ((status & 1) || (status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) {
2191 2202 /* clear the interrupt by writing back the same value */
2192 2203 WR_OB_INTR_STATUS(status, instance);
2193 2204 /* dummy READ */
2194 - RD_OB_INTR_STATUS(instance);
2195 - }
2205 + (void) RD_OB_INTR_STATUS(instance);
2206 + }
2196 2207 return (DDI_INTR_CLAIMED);
2197 2208 }
2198 2209
2199 2210 /*
2200 2211 * get_raid_msg_pkt : Get a command from the free pool
2201 2212 * After successful allocation, the caller of this routine
2202 2213 * must clear the frame buffer (memset to zero) before
2203 2214 * using the packet further.
2204 2215 *
2205 2216 * ***** Note *****
2206 2217 * After clearing the frame buffer the context id of the
2207 2218 * frame buffer SHOULD be restored back.
2208 2219 */
2209 2220
2210 2221 struct mrsas_cmd *
2211 2222 get_raid_msg_pkt(struct mrsas_instance *instance)
2212 2223 {
2213 2224 mlist_t *head = &instance->cmd_pool_list;
2214 2225 struct mrsas_cmd *cmd = NULL;
2215 2226
2216 2227 mutex_enter(&instance->cmd_pool_mtx);
2217 2228 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2218 2229
2219 2230
2220 2231 if (!mlist_empty(head)) {
2221 2232 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2222 2233 mlist_del_init(head->next);
2223 2234 }
2224 2235 if (cmd != NULL) {
2225 2236 cmd->pkt = NULL;
2226 2237 cmd->retry_count_for_ocr = 0;
2227 2238 cmd->drv_pkt_time = 0;
2228 2239 }
2229 2240 mutex_exit(&instance->cmd_pool_mtx);
2230 2241
2231 2242 if (cmd != NULL)
2232 2243 bzero(cmd->scsi_io_request,
2233 2244 sizeof (Mpi2RaidSCSIIORequest_t));
2234 2245 return (cmd);
2235 2246 }
2236 2247
2237 2248 struct mrsas_cmd *
2238 2249 get_raid_msg_mfi_pkt(struct mrsas_instance *instance)
2239 2250 {
2240 2251 mlist_t *head = &instance->cmd_app_pool_list;
2241 2252 struct mrsas_cmd *cmd = NULL;
2242 2253
2243 2254 mutex_enter(&instance->cmd_app_pool_mtx);
2244 2255 ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2245 2256
2246 2257 if (!mlist_empty(head)) {
2247 2258 cmd = mlist_entry(head->next, struct mrsas_cmd, list);
2248 2259 mlist_del_init(head->next);
2249 2260 }
2250 2261 if (cmd != NULL) {
2251 2262 cmd->retry_count_for_ocr = 0;
2252 2263 cmd->drv_pkt_time = 0;
2253 2264 cmd->pkt = NULL;
2254 2265 cmd->request_desc = NULL;
2255 2266
2256 2267 }
2257 2268
2258 2269 mutex_exit(&instance->cmd_app_pool_mtx);
2259 2270
2260 2271 if (cmd != NULL) {
2261 2272 bzero(cmd->scsi_io_request,
2262 2273 sizeof (Mpi2RaidSCSIIORequest_t));
2263 2274 }
2264 2275
2265 2276 return (cmd);
2266 2277 }
2267 2278
2268 2279 /*
2269 2280 * return_raid_msg_pkt : Return a cmd to free command pool
2270 2281 */
2271 2282 void
2272 2283 return_raid_msg_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2273 2284 {
2274 2285 mutex_enter(&instance->cmd_pool_mtx);
2275 2286 ASSERT(mutex_owned(&instance->cmd_pool_mtx));
2276 2287
2277 2288
2278 2289 mlist_add_tail(&cmd->list, &instance->cmd_pool_list);
2279 2290
2280 2291 mutex_exit(&instance->cmd_pool_mtx);
2281 2292 }
2282 2293
2283 2294 void
2284 2295 return_raid_msg_mfi_pkt(struct mrsas_instance *instance, struct mrsas_cmd *cmd)
2285 2296 {
2286 2297 mutex_enter(&instance->cmd_app_pool_mtx);
2287 2298 ASSERT(mutex_owned(&instance->cmd_app_pool_mtx));
2288 2299
|
↓ open down ↓ |
83 lines elided |
↑ open up ↑ |
2289 2300 mlist_add_tail(&cmd->list, &instance->cmd_app_pool_list);
2290 2301
2291 2302 mutex_exit(&instance->cmd_app_pool_mtx);
2292 2303 }
2293 2304
2294 2305
2295 2306 void
2296 2307 mr_sas_tbolt_build_mfi_cmd(struct mrsas_instance *instance,
2297 2308 struct mrsas_cmd *cmd)
2298 2309 {
2299 - Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2310 + Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2300 2311 Mpi25IeeeSgeChain64_t *scsi_raid_io_sgl_ieee;
2301 - MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
2312 + MRSAS_REQUEST_DESCRIPTOR_UNION *ReqDescUnion;
2302 2313 uint32_t index;
2314 + ddi_acc_handle_t acc_handle =
2315 + instance->mpi2_frame_pool_dma_obj.acc_handle;
2303 2316
2304 2317 if (!instance->tbolt) {
2305 - con_log(CL_ANN, (CE_NOTE, "Not MFA enabled.\n"));
2318 + con_log(CL_ANN, (CE_NOTE, "Not MFA enabled."));
2306 2319 return;
2307 2320 }
2308 2321
2309 2322 index = cmd->index;
2310 2323
2311 - ReqDescUnion =
2312 - mr_sas_get_request_descriptor(instance, index, cmd);
2324 + ReqDescUnion = mr_sas_get_request_descriptor(instance, index);
2313 2325
2314 2326 if (!ReqDescUnion) {
2315 - con_log(CL_ANN1, (CE_NOTE, "[NULL REQDESC]%x"));
2327 + con_log(CL_ANN1, (CE_NOTE, "[NULL REQDESC]"));
2316 2328 return;
2317 2329 }
2318 2330
2319 2331 con_log(CL_ANN1, (CE_NOTE, "[SMID]%x", cmd->SMID));
2320 2332
2321 2333 ReqDescUnion->Words = 0;
2322 2334
2323 2335 ReqDescUnion->SCSIIO.RequestFlags =
2324 2336 (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2325 2337 MPI2_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2326 2338
2327 2339 ReqDescUnion->SCSIIO.SMID = cmd->SMID;
2328 2340
2329 2341 cmd->request_desc = ReqDescUnion;
2330 2342
2331 - // get raid message frame pointer
2343 + /* get raid message frame pointer */
2332 2344 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2333 2345
2334 2346 if (instance->device_id == PCI_DEVICE_ID_LSI_INVADER) {
2335 - Mpi25IeeeSgeChain64_t *sgl_ptr_end = (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
2347 + Mpi25IeeeSgeChain64_t *sgl_ptr_end = (Mpi25IeeeSgeChain64_t *)
2348 + &scsi_raid_io->SGL.IeeeChain;
2336 2349 sgl_ptr_end += instance->max_sge_in_main_msg - 1;
2337 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2338 - &sgl_ptr_end->Flags, 0);
2350 + ddi_put8(acc_handle, &sgl_ptr_end->Flags, 0);
2339 2351 }
2340 2352
2341 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2342 - &scsi_raid_io->Function,
2353 + ddi_put8(acc_handle, &scsi_raid_io->Function,
2343 2354 MPI2_FUNCTION_PASSTHRU_IO_REQUEST);
2344 2355
2345 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2346 - &scsi_raid_io->SGLOffset0,
2356 + ddi_put8(acc_handle, &scsi_raid_io->SGLOffset0,
2347 2357 offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4);
2348 2358
2349 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2350 - &scsi_raid_io->ChainOffset,
2359 + ddi_put8(acc_handle, &scsi_raid_io->ChainOffset,
2351 2360 (U8)offsetof(MPI2_RAID_SCSI_IO_REQUEST, SGL) / 16);
2352 2361
2353 - ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
2354 - &scsi_raid_io->SenseBufferLowAddress,
2362 + ddi_put32(acc_handle, &scsi_raid_io->SenseBufferLowAddress,
2355 2363 cmd->sense_phys_addr1);
2356 2364
2357 2365
2358 2366 scsi_raid_io_sgl_ieee =
2359 2367 (Mpi25IeeeSgeChain64_t *)&scsi_raid_io->SGL.IeeeChain;
2360 2368
2361 - ddi_put64(instance->mpi2_frame_pool_dma_obj.acc_handle,
2362 - &scsi_raid_io_sgl_ieee->Address,
2369 + ddi_put64(acc_handle, &scsi_raid_io_sgl_ieee->Address,
2363 2370 (U64)cmd->frame_phys_addr);
2364 2371
2365 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2366 - &scsi_raid_io_sgl_ieee->Flags,
2367 - (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2368 - MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
2369 - ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
2370 - &scsi_raid_io_sgl_ieee->Length, 1024); //MEGASAS_MAX_SZ_CHAIN_FRAME
2372 + ddi_put8(acc_handle,
2373 + &scsi_raid_io_sgl_ieee->Flags, (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2374 + MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR));
2375 + /* LSI put hardcoded 1024 instead of MEGASAS_MAX_SZ_CHAIN_FRAME. */
2376 + ddi_put32(acc_handle, &scsi_raid_io_sgl_ieee->Length, 1024);
2371 2377
2372 2378 con_log(CL_ANN1, (CE_NOTE,
2373 - "[MFI CMD PHY ADDRESS]:%x",
2379 + "[MFI CMD PHY ADDRESS]:%" PRIx64,
2374 2380 scsi_raid_io_sgl_ieee->Address));
2375 2381 con_log(CL_ANN1, (CE_NOTE,
2376 2382 "[SGL Length]:%x", scsi_raid_io_sgl_ieee->Length));
2377 2383 con_log(CL_ANN1, (CE_NOTE, "[SGL Flags]:%x",
2378 2384 scsi_raid_io_sgl_ieee->Flags));
2379 2385 }
2380 2386
2381 2387
2382 2388 void
2383 2389 tbolt_complete_cmd(struct mrsas_instance *instance,
2384 2390 struct mrsas_cmd *cmd)
2385 2391 {
2386 2392 uint8_t status;
2387 2393 uint8_t extStatus;
2388 2394 uint8_t arm;
2389 2395 struct scsa_cmd *acmd;
2390 2396 struct scsi_pkt *pkt;
2391 2397 struct scsi_arq_status *arqstat;
2392 2398 Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2393 - LD_LOAD_BALANCE_INFO *lbinfo;
2394 - int i;
2399 + LD_LOAD_BALANCE_INFO *lbinfo;
2400 + ddi_acc_handle_t acc_handle =
2401 + instance->mpi2_frame_pool_dma_obj.acc_handle;
2395 2402
2396 2403 scsi_raid_io = (Mpi2RaidSCSIIORequest_t *)cmd->scsi_io_request;
2397 2404
2398 - status = ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2399 - &scsi_raid_io->RaidContext.status);
2400 - extStatus = ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2401 - &scsi_raid_io->RaidContext.extStatus);
2405 + status = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.status);
2406 + extStatus = ddi_get8(acc_handle, &scsi_raid_io->RaidContext.extStatus);
2402 2407
2403 2408 con_log(CL_DLEVEL3, (CE_NOTE, "status %x", status));
2404 2409 con_log(CL_DLEVEL3, (CE_NOTE, "extStatus %x", extStatus));
2405 2410
2406 2411 if (status != MFI_STAT_OK) {
2407 2412 con_log(CL_ANN, (CE_WARN,
2408 2413 "IO Cmd Failed SMID %x", cmd->SMID));
2409 2414 } else {
2410 2415 con_log(CL_ANN, (CE_NOTE,
2411 2416 "IO Cmd Success SMID %x", cmd->SMID));
2412 2417 }
2413 2418
2414 2419 /* regular commands */
2415 2420
2416 - switch (ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2417 - &scsi_raid_io->Function)) {
2421 + switch (ddi_get8(acc_handle, &scsi_raid_io->Function)) {
2418 2422
2419 - case MPI2_FUNCTION_SCSI_IO_REQUEST : /* Fast Path IO. */
2420 - acmd = (struct scsa_cmd *)cmd->cmd;
2421 - lbinfo = &instance->load_balance_info[acmd->device_id];
2423 + case MPI2_FUNCTION_SCSI_IO_REQUEST : /* Fast Path IO. */
2424 + acmd = (struct scsa_cmd *)cmd->cmd;
2425 + lbinfo = &instance->load_balance_info[acmd->device_id];
2422 2426
2423 - if (cmd->load_balance_flag & MEGASAS_LOAD_BALANCE_FLAG) {
2424 - arm = lbinfo->raid1DevHandle[0] == scsi_raid_io->DevHandle ? 0 : 1;
2427 + if (cmd->load_balance_flag & MEGASAS_LOAD_BALANCE_FLAG) {
2428 + arm = lbinfo->raid1DevHandle[0] ==
2429 + scsi_raid_io->DevHandle ? 0 : 1;
2425 2430
2426 - lbinfo->scsi_pending_cmds[arm]--;
2427 - cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG;
2428 - }
2429 - con_log(CL_DLEVEL3, (CE_NOTE,
2430 - "FastPath IO Completion Success "));
2431 + lbinfo->scsi_pending_cmds[arm]--;
2432 + cmd->load_balance_flag &= ~MEGASAS_LOAD_BALANCE_FLAG;
2433 + }
2434 + con_log(CL_DLEVEL3, (CE_NOTE,
2435 + "FastPath IO Completion Success "));
2436 + /* FALLTHRU */
2431 2437
2432 - case MPI2_FUNCTION_LD_IO_REQUEST : {// Regular Path IO.
2433 - acmd = (struct scsa_cmd *)cmd->cmd;
2434 - pkt = (struct scsi_pkt *)CMD2PKT(acmd);
2438 + case MPI2_FUNCTION_LD_IO_REQUEST : { /* Regular Path IO. */
2439 + acmd = (struct scsa_cmd *)cmd->cmd;
2440 + pkt = (struct scsi_pkt *)CMD2PKT(acmd);
2435 2441
2436 - if (acmd->cmd_flags & CFLAG_DMAVALID) {
2437 - if (acmd->cmd_flags & CFLAG_CONSISTENT) {
2438 - (void) ddi_dma_sync(acmd->cmd_dmahandle,
2439 - acmd->cmd_dma_offset,
2440 - acmd->cmd_dma_len,
2441 - DDI_DMA_SYNC_FORCPU);
2442 - }
2442 + if (acmd->cmd_flags & CFLAG_DMAVALID) {
2443 + if (acmd->cmd_flags & CFLAG_CONSISTENT) {
2444 + (void) ddi_dma_sync(acmd->cmd_dmahandle,
2445 + acmd->cmd_dma_offset, acmd->cmd_dma_len,
2446 + DDI_DMA_SYNC_FORCPU);
2443 2447 }
2448 + }
2444 2449
2445 - pkt->pkt_reason = CMD_CMPLT;
2446 - pkt->pkt_statistics = 0;
2447 - pkt->pkt_state = STATE_GOT_BUS
2448 - | STATE_GOT_TARGET | STATE_SENT_CMD
2449 - | STATE_XFERRED_DATA | STATE_GOT_STATUS;
2450 + pkt->pkt_reason = CMD_CMPLT;
2451 + pkt->pkt_statistics = 0;
2452 + pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
2453 + STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS;
2450 2454
2451 - con_log(CL_ANN, (CE_CONT,
2452 - " CDB[0] = %x completed for %s: size %lx SMID %x cmd_status %x",
2453 - pkt->pkt_cdbp[0],
2454 - ((acmd->islogical) ? "LD" : "PD"),
2455 - acmd->cmd_dmacount, cmd->SMID, status));
2455 + con_log(CL_ANN, (CE_CONT, " CDB[0] = %x completed for %s: "
2456 + "size %lx SMID %x cmd_status %x", pkt->pkt_cdbp[0],
2457 + ((acmd->islogical) ? "LD" : "PD"),
2458 + acmd->cmd_dmacount, cmd->SMID, status));
2456 2459
2457 - if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
2458 - struct scsi_inquiry *inq;
2460 + if (pkt->pkt_cdbp[0] == SCMD_INQUIRY) {
2461 + struct scsi_inquiry *inq;
2459 2462
2460 - if (acmd->cmd_dmacount != 0) {
2461 - bp_mapin(acmd->cmd_buf);
2462 - inq = (struct scsi_inquiry *)
2463 - acmd->cmd_buf->b_un.b_addr;
2463 + if (acmd->cmd_dmacount != 0) {
2464 + bp_mapin(acmd->cmd_buf);
2465 + inq = (struct scsi_inquiry *)
2466 + acmd->cmd_buf->b_un.b_addr;
2464 2467
2465 - /* don't expose physical drives to OS */
2466 - if (acmd->islogical &&
2467 - (status == MFI_STAT_OK)) {
2468 - display_scsi_inquiry(
2469 - (caddr_t)inq);
2470 - }
2468 + /* don't expose physical drives to OS */
2469 + if (acmd->islogical &&
2470 + (status == MFI_STAT_OK)) {
2471 + display_scsi_inquiry((caddr_t)inq);
2471 2472 #ifdef PDSUPPORT
2472 - else if ((status ==
2473 - MFI_STAT_OK) && inq->inq_dtype ==
2474 - DTYPE_DIRECT) {
2475 -
2476 - display_scsi_inquiry(
2477 - (caddr_t)inq);
2478 - }
2473 + } else if ((status == MFI_STAT_OK) &&
2474 + inq->inq_dtype == DTYPE_DIRECT) {
2475 + display_scsi_inquiry((caddr_t)inq);
2479 2476 #endif
2480 - else {
2481 - /* for physical disk */
2482 - status =
2483 - MFI_STAT_DEVICE_NOT_FOUND;
2484 - }
2477 + } else {
2478 + /* for physical disk */
2479 + status = MFI_STAT_DEVICE_NOT_FOUND;
2485 2480 }
2486 2481 }
2482 + }
2487 2483
2488 - switch (status) {
2489 - case MFI_STAT_OK:
2490 - pkt->pkt_scbp[0] = STATUS_GOOD;
2491 - break;
2492 - case MFI_STAT_LD_CC_IN_PROGRESS:
2493 - case MFI_STAT_LD_RECON_IN_PROGRESS:
2494 - pkt->pkt_scbp[0] = STATUS_GOOD;
2495 - break;
2496 - case MFI_STAT_LD_INIT_IN_PROGRESS:
2497 - pkt->pkt_reason = CMD_TRAN_ERR;
2498 - break;
2499 - case MFI_STAT_SCSI_IO_FAILED:
2500 - cmn_err(CE_WARN, "tbolt_complete_cmd: scsi_io failed");
2501 - pkt->pkt_reason = CMD_TRAN_ERR;
2502 - break;
2503 - case MFI_STAT_SCSI_DONE_WITH_ERROR:
2504 - con_log(CL_ANN, (CE_WARN,
2505 - "tbolt_complete_cmd: scsi_done with error"));
2484 + switch (status) {
2485 + case MFI_STAT_OK:
2486 + pkt->pkt_scbp[0] = STATUS_GOOD;
2487 + break;
2488 + case MFI_STAT_LD_CC_IN_PROGRESS:
2489 + case MFI_STAT_LD_RECON_IN_PROGRESS:
2490 + pkt->pkt_scbp[0] = STATUS_GOOD;
2491 + break;
2492 + case MFI_STAT_LD_INIT_IN_PROGRESS:
2493 + pkt->pkt_reason = CMD_TRAN_ERR;
2494 + break;
2495 + case MFI_STAT_SCSI_IO_FAILED:
2496 + cmn_err(CE_WARN, "tbolt_complete_cmd: scsi_io failed");
2497 + pkt->pkt_reason = CMD_TRAN_ERR;
2498 + break;
2499 + case MFI_STAT_SCSI_DONE_WITH_ERROR:
2500 + con_log(CL_ANN, (CE_WARN,
2501 + "tbolt_complete_cmd: scsi_done with error"));
2506 2502
2507 - pkt->pkt_reason = CMD_CMPLT;
2508 - ((struct scsi_status *)
2509 - pkt->pkt_scbp)->sts_chk = 1;
2503 + pkt->pkt_reason = CMD_CMPLT;
2504 + ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2510 2505
2511 - if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
2512 - con_log(CL_ANN, (CE_WARN, "TEST_UNIT_READY fail"));
2513 - } else {
2514 - pkt->pkt_state |= STATE_ARQ_DONE;
2515 - arqstat = (void *)(pkt->pkt_scbp);
2516 - arqstat->sts_rqpkt_reason = CMD_CMPLT;
2517 - arqstat->sts_rqpkt_resid = 0;
2518 - arqstat->sts_rqpkt_state |=
2519 - STATE_GOT_BUS | STATE_GOT_TARGET
2520 - | STATE_SENT_CMD
2521 - | STATE_XFERRED_DATA;
2522 - *(uint8_t *)&arqstat->sts_rqpkt_status =
2523 - STATUS_GOOD;
2524 - con_log(CL_ANN1, (CE_NOTE,
2525 - "Copying Sense data %x",
2526 - cmd->SMID));
2527 -
2528 - ddi_rep_get8(
2529 - instance->
2530 - mpi2_frame_pool_dma_obj.acc_handle,
2531 - (uint8_t *)
2532 - &(arqstat->sts_sensedata),
2533 - cmd->sense1,
2534 - sizeof (struct scsi_extended_sense),
2535 - DDI_DEV_AUTOINCR);
2536 -
2537 - }
2538 - break;
2539 - case MFI_STAT_LD_OFFLINE:
2540 - cmn_err(CE_WARN,
2541 - "tbolt_complete_cmd: ld offline "
2542 - "CDB[0]=0x%x targetId=0x%x devhandle=0x%x\n", //UNDO:
2543 - ddi_get8(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_raid_io->CDB.CDB32[0]),
2544 - ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_raid_io->RaidContext.ldTargetId),
2545 - ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle, &scsi_raid_io->DevHandle) );
2546 - pkt->pkt_reason = CMD_DEV_GONE;
2547 - pkt->pkt_statistics = STAT_DISCON;
2548 - break;
2549 - case MFI_STAT_DEVICE_NOT_FOUND:
2550 - con_log(CL_ANN, (CE_CONT,
2551 - "tbolt_complete_cmd: device not found error"));
2552 - pkt->pkt_reason = CMD_DEV_GONE;
2553 - pkt->pkt_statistics = STAT_DISCON;
2554 - break;
2555 -
2556 - case MFI_STAT_LD_LBA_OUT_OF_RANGE:
2506 + if (pkt->pkt_cdbp[0] == SCMD_TEST_UNIT_READY) {
2507 + con_log(CL_ANN,
2508 + (CE_WARN, "TEST_UNIT_READY fail"));
2509 + } else {
2557 2510 pkt->pkt_state |= STATE_ARQ_DONE;
2558 - pkt->pkt_reason = CMD_CMPLT;
2559 - ((struct scsi_status *)
2560 - pkt->pkt_scbp)->sts_chk = 1;
2561 -
2562 2511 arqstat = (void *)(pkt->pkt_scbp);
2563 2512 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2564 2513 arqstat->sts_rqpkt_resid = 0;
2565 - arqstat->sts_rqpkt_state |= STATE_GOT_BUS
2566 - | STATE_GOT_TARGET | STATE_SENT_CMD
2514 + arqstat->sts_rqpkt_state |=
2515 + STATE_GOT_BUS | STATE_GOT_TARGET
2516 + | STATE_SENT_CMD
2567 2517 | STATE_XFERRED_DATA;
2568 2518 *(uint8_t *)&arqstat->sts_rqpkt_status =
2569 2519 STATUS_GOOD;
2520 + con_log(CL_ANN1,
2521 + (CE_NOTE, "Copying Sense data %x",
2522 + cmd->SMID));
2570 2523
2571 - arqstat->sts_sensedata.es_valid = 1;
2572 - arqstat->sts_sensedata.es_key =
2573 - KEY_ILLEGAL_REQUEST;
2574 - arqstat->sts_sensedata.es_class =
2575 - CLASS_EXTENDED_SENSE;
2524 + ddi_rep_get8(acc_handle,
2525 + (uint8_t *)&(arqstat->sts_sensedata),
2526 + cmd->sense1,
2527 + sizeof (struct scsi_extended_sense),
2528 + DDI_DEV_AUTOINCR);
2576 2529
2577 - /*
2578 - * LOGICAL BLOCK ADDRESS OUT OF RANGE:
2579 - * ASC: 0x21h; ASCQ: 0x00h;
2580 - */
2581 - arqstat->sts_sensedata.es_add_code = 0x21;
2582 - arqstat->sts_sensedata.es_qual_code = 0x00;
2583 - break;
2584 - case MFI_STAT_INVALID_CMD:
2585 - case MFI_STAT_INVALID_DCMD:
2586 - case MFI_STAT_INVALID_PARAMETER:
2587 - case MFI_STAT_INVALID_SEQUENCE_NUMBER:
2588 - default:
2589 - cmn_err(CE_WARN, "tbolt_complete_cmd: Unknown status!");
2590 - pkt->pkt_reason = CMD_TRAN_ERR;
2591 -
2592 - break;
2593 2530 }
2531 + break;
2532 + case MFI_STAT_LD_OFFLINE:
2533 + cmn_err(CE_WARN,
2534 + "tbolt_complete_cmd: ld offline "
2535 + "CDB[0]=0x%x targetId=0x%x devhandle=0x%x",
2536 + /* UNDO: */
2537 + ddi_get8(acc_handle, &scsi_raid_io->CDB.CDB32[0]),
2594 2538
2595 - atomic_add_16(&instance->fw_outstanding, (-1));
2539 + ddi_get16(acc_handle,
2540 + &scsi_raid_io->RaidContext.ldTargetId),
2596 2541
2597 - /* Call the callback routine */
2598 - if (((pkt->pkt_flags & FLAG_NOINTR) == 0) &&
2599 - pkt->pkt_comp) {
2600 - (*pkt->pkt_comp)(pkt);
2601 - }
2542 + ddi_get16(acc_handle, &scsi_raid_io->DevHandle));
2602 2543
2603 - con_log(CL_ANN1, (CE_NOTE, "Free smid %x", cmd->SMID));
2544 + pkt->pkt_reason = CMD_DEV_GONE;
2545 + pkt->pkt_statistics = STAT_DISCON;
2546 + break;
2547 + case MFI_STAT_DEVICE_NOT_FOUND:
2548 + con_log(CL_ANN, (CE_CONT,
2549 + "tbolt_complete_cmd: device not found error"));
2550 + pkt->pkt_reason = CMD_DEV_GONE;
2551 + pkt->pkt_statistics = STAT_DISCON;
2552 + break;
2604 2553
2605 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2606 - &scsi_raid_io->RaidContext.status, 0);
2554 + case MFI_STAT_LD_LBA_OUT_OF_RANGE:
2555 + pkt->pkt_state |= STATE_ARQ_DONE;
2556 + pkt->pkt_reason = CMD_CMPLT;
2557 + ((struct scsi_status *)pkt->pkt_scbp)->sts_chk = 1;
2607 2558
2608 - ddi_put8(instance->mpi2_frame_pool_dma_obj.acc_handle,
2609 - &scsi_raid_io->RaidContext.extStatus, 0);
2559 + arqstat = (void *)(pkt->pkt_scbp);
2560 + arqstat->sts_rqpkt_reason = CMD_CMPLT;
2561 + arqstat->sts_rqpkt_resid = 0;
2562 + arqstat->sts_rqpkt_state |= STATE_GOT_BUS
2563 + | STATE_GOT_TARGET | STATE_SENT_CMD
2564 + | STATE_XFERRED_DATA;
2565 + *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
2610 2566
2611 - return_raid_msg_pkt(instance, cmd);
2567 + arqstat->sts_sensedata.es_valid = 1;
2568 + arqstat->sts_sensedata.es_key = KEY_ILLEGAL_REQUEST;
2569 + arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
2570 +
2571 + /*
2572 + * LOGICAL BLOCK ADDRESS OUT OF RANGE:
2573 + * ASC: 0x21h; ASCQ: 0x00h;
2574 + */
2575 + arqstat->sts_sensedata.es_add_code = 0x21;
2576 + arqstat->sts_sensedata.es_qual_code = 0x00;
2612 2577 break;
2578 + case MFI_STAT_INVALID_CMD:
2579 + case MFI_STAT_INVALID_DCMD:
2580 + case MFI_STAT_INVALID_PARAMETER:
2581 + case MFI_STAT_INVALID_SEQUENCE_NUMBER:
2582 + default:
2583 + cmn_err(CE_WARN, "tbolt_complete_cmd: Unknown status!");
2584 + pkt->pkt_reason = CMD_TRAN_ERR;
2585 +
2586 + break;
2613 2587 }
2614 - case MPI2_FUNCTION_PASSTHRU_IO_REQUEST: // MFA command.
2615 2588
2616 - if (cmd->frame->dcmd.opcode
2617 - == MR_DCMD_LD_MAP_GET_INFO &&
2618 - cmd->frame->dcmd.mbox.b[1]
2619 - == 1) {
2620 -
2589 + atomic_add_16(&instance->fw_outstanding, (-1));
2590 +
2591 + (void) mrsas_common_check(instance, cmd);
2592 + if (acmd->cmd_dmahandle) {
2593 + if (mrsas_check_dma_handle(acmd->cmd_dmahandle) !=
2594 + DDI_SUCCESS) {
2595 + ddi_fm_service_impact(instance->dip,
2596 + DDI_SERVICE_UNAFFECTED);
2597 + pkt->pkt_reason = CMD_TRAN_ERR;
2598 + pkt->pkt_statistics = 0;
2599 + }
2600 + }
2601 +
2602 + /* Call the callback routine */
2603 + if (((pkt->pkt_flags & FLAG_NOINTR) == 0) && pkt->pkt_comp)
2604 + (*pkt->pkt_comp)(pkt);
2605 +
2606 + con_log(CL_ANN1, (CE_NOTE, "Free smid %x", cmd->SMID));
2607 +
2608 + ddi_put8(acc_handle, &scsi_raid_io->RaidContext.status, 0);
2609 +
2610 + ddi_put8(acc_handle, &scsi_raid_io->RaidContext.extStatus, 0);
2611 +
2612 + return_raid_msg_pkt(instance, cmd);
2613 + break;
2614 + }
2615 + case MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFA command. */
2616 +
2617 + if (cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO &&
2618 + cmd->frame->dcmd.mbox.b[1] == 1) {
2619 +
2621 2620 mutex_enter(&instance->sync_map_mtx);
2622 2621
2623 2622 con_log(CL_ANN, (CE_NOTE,
2624 - "LDMAP sync command SMID RECEIVED 0x%X",
2625 - cmd->SMID));
2623 + "LDMAP sync command SMID RECEIVED 0x%X",
2624 + cmd->SMID));
2626 2625 if (cmd->frame->hdr.cmd_status != 0) {
2627 2626 cmn_err(CE_WARN,
2628 - "map sync failed, status = 0x%x.\n",cmd->frame->hdr.cmd_status);
2629 - }
2630 - else {
2627 + "map sync failed, status = 0x%x.",
2628 + cmd->frame->hdr.cmd_status);
2629 + } else {
2631 2630 instance->map_id++;
2632 2631 cmn_err(CE_NOTE,
2633 - "map sync received, switched map_id to %ld \n",instance->map_id);
2632 + "map sync received, switched map_id to %"
2633 + PRIu64 " \n", instance->map_id);
2634 2634 }
2635 2635
2636 - if (MR_ValidateMapInfo(instance->ld_map[(instance->map_id & 1)], instance->load_balance_info))
2637 - instance->fast_path_io = 1;
2638 - else
2639 - instance->fast_path_io = 0;
2640 -
2636 + if (MR_ValidateMapInfo(instance->ld_map[
2637 + (instance->map_id & 1)],
2638 + instance->load_balance_info)) {
2639 + instance->fast_path_io = 1;
2640 + } else {
2641 + instance->fast_path_io = 0;
2642 + }
2643 +
2641 2644 con_log(CL_ANN, (CE_NOTE,
2642 - "instance->fast_path_io %d \n",instance->fast_path_io));
2645 + "instance->fast_path_io %d",
2646 + instance->fast_path_io));
2643 2647
2644 2648 instance->unroll.syncCmd = 0;
2645 2649
2646 - if(instance->map_update_cmd == cmd) {
2650 + if (instance->map_update_cmd == cmd) {
2647 2651 return_raid_msg_pkt(instance, cmd);
2648 2652 atomic_add_16(&instance->fw_outstanding, (-1));
2649 - mrsas_tbolt_sync_map_info(instance);
2653 + (void) mrsas_tbolt_sync_map_info(instance);
2650 2654 }
2651 -
2652 - cmn_err(CE_NOTE, "LDMAP sync completed.\n");
2655 +
2656 + cmn_err(CE_NOTE, "LDMAP sync completed.");
2653 2657 mutex_exit(&instance->sync_map_mtx);
2654 2658 break;
2655 2659 }
2656 2660
2657 2661 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) {
2658 2662 con_log(CL_ANN1, (CE_CONT,
2659 - "AEN command SMID RECEIVED 0x%X",
2660 - cmd->SMID));
2661 - if ((instance->aen_cmd == cmd) &&
2662 - (instance->aen_cmd->abort_aen)) {
2663 - con_log(CL_ANN, (CE_WARN,
2664 - "mrsas_softintr: "
2665 - "aborted_aen returned"));
2666 - }
2667 - else
2668 - {
2663 + "AEN command SMID RECEIVED 0x%X",
2664 + cmd->SMID));
2665 + if ((instance->aen_cmd == cmd) &&
2666 + (instance->aen_cmd->abort_aen)) {
2667 + con_log(CL_ANN, (CE_WARN, "mrsas_softintr: "
2668 + "aborted_aen returned"));
2669 + } else {
2669 2670 atomic_add_16(&instance->fw_outstanding, (-1));
2670 - service_mfi_aen(instance, cmd);
2671 + service_mfi_aen(instance, cmd);
2671 2672 }
2672 2673 }
2673 2674
2674 - if (cmd->sync_cmd == MRSAS_TRUE ) {
2675 + if (cmd->sync_cmd == MRSAS_TRUE) {
2675 2676 con_log(CL_ANN1, (CE_CONT,
2676 - "Sync-mode Command Response SMID RECEIVED 0x%X",
2677 - cmd->SMID));
2677 + "Sync-mode Command Response SMID RECEIVED 0x%X",
2678 + cmd->SMID));
2678 2679
2679 2680 tbolt_complete_cmd_in_sync_mode(instance, cmd);
2680 - }
2681 - else
2682 - {
2681 + } else {
2683 2682 con_log(CL_ANN, (CE_CONT,
2684 - "tbolt_complete_cmd: Wrong SMID RECEIVED 0x%X",
2685 - cmd->SMID));
2683 + "tbolt_complete_cmd: Wrong SMID RECEIVED 0x%X",
2684 + cmd->SMID));
2686 2685 }
2687 2686 break;
2688 2687 default:
2688 + mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2689 + ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2690 +
2689 2691 /* free message */
2690 - con_log(CL_ANN, (CE_NOTE, "tbolt_complete_cmd: Unknown Type!!!!!!!!"));
2692 + con_log(CL_ANN,
2693 + (CE_NOTE, "tbolt_complete_cmd: Unknown Type!!!!!!!!"));
2691 2694 break;
2692 2695 }
2693 2696 }
2694 2697
2695 2698 uint_t
2696 2699 mr_sas_tbolt_process_outstanding_cmd(struct mrsas_instance *instance)
2697 2700 {
2698 2701 uint8_t replyType;
2699 2702 Mpi2SCSIIOSuccessReplyDescriptor_t *replyDesc;
2700 - Mpi2ReplyDescriptorsUnion_t *desc;
2703 + Mpi2ReplyDescriptorsUnion_t *desc;
2701 2704 uint16_t smid;
2702 - union desc_value d_val;
2705 + union desc_value d_val;
2703 2706 struct mrsas_cmd *cmd;
2704 - uint32_t i;
2705 - Mpi2RaidSCSIIORequest_t *scsi_raid_io;
2706 - uint8_t status;
2707 2707
2708 2708 struct mrsas_header *hdr;
2709 2709 struct scsi_pkt *pkt;
2710 2710
2711 2711 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2712 2712 0, 0, DDI_DMA_SYNC_FORDEV);
2713 2713
2714 2714 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2715 2715 0, 0, DDI_DMA_SYNC_FORCPU);
2716 2716
2717 2717 desc = instance->reply_frame_pool;
2718 2718 desc += instance->reply_read_index;
2719 2719
2720 2720 replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2721 2721 replyType = replyDesc->ReplyFlags &
2722 2722 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2723 2723
2724 2724 if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2725 2725 return (DDI_INTR_UNCLAIMED);
2726 2726
2727 - con_log(CL_ANN1, (CE_NOTE, "Reply Desc = %llx Words = %llx \n",
2728 - desc, desc->Words));
2727 + if (mrsas_check_dma_handle(instance->mfi_internal_dma_obj.dma_handle)
2728 + != DDI_SUCCESS) {
2729 + mrsas_fm_ereport(instance, DDI_FM_DEVICE_NO_RESPONSE);
2730 + ddi_fm_service_impact(instance->dip, DDI_SERVICE_LOST);
2731 + con_log(CL_ANN1,
2732 + (CE_WARN, "mr_sas_tbolt_process_outstanding_cmd(): "
2733 + "FMA check, returning DDI_INTR_UNCLAIMED"));
2734 + return (DDI_INTR_CLAIMED);
2735 + }
2729 2736
2737 + con_log(CL_ANN1, (CE_NOTE, "Reply Desc = %p Words = %" PRIx64,
2738 + (void *)desc, desc->Words));
2739 +
2730 2740 d_val.word = desc->Words;
2731 -
2732 2741
2742 +
2733 2743 /* Read Reply descriptor */
2734 2744 while ((d_val.u1.low != 0xffffffff) &&
2735 2745 (d_val.u1.high != 0xffffffff)) {
2736 2746
2737 2747 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2738 - 0, 0, DDI_DMA_SYNC_FORCPU);
2748 + 0, 0, DDI_DMA_SYNC_FORCPU);
2739 2749
2740 2750 smid = replyDesc->SMID;
2741 2751
2742 2752 if (!smid || smid > instance->max_fw_cmds + 1) {
2743 2753 con_log(CL_ANN1, (CE_NOTE,
2744 - "Reply Desc at Break = %llx Words = %llx \n",
2745 - desc, desc->Words));
2754 + "Reply Desc at Break = %p Words = %" PRIx64,
2755 + (void *)desc, desc->Words));
2746 2756 break;
2747 2757 }
2748 2758
2749 2759 cmd = instance->cmd_list[smid - 1];
2750 - if(!cmd ) {
2751 - con_log(CL_ANN1, (CE_NOTE,
2752 - "mr_sas_tbolt_process_outstanding_cmd: Invalid command "
2753 - " or Poll commad Received in completion path\n"));
2754 - }
2755 - else {
2760 + if (!cmd) {
2761 + con_log(CL_ANN1, (CE_NOTE, "mr_sas_tbolt_process_"
2762 + "outstanding_cmd: Invalid command "
2763 + " or Poll commad Received in completion path"));
2764 + } else {
2756 2765 mutex_enter(&instance->cmd_pend_mtx);
2757 2766 if (cmd->sync_cmd == MRSAS_TRUE) {
2758 2767 hdr = (struct mrsas_header *)&cmd->frame->hdr;
2759 2768 if (hdr) {
2760 - con_log(CL_ANN1, (CE_NOTE,
2761 - "mr_sas_tbolt_process_outstanding_cmd:"
2762 - " mlist_del_init(&cmd->list).\n"));
2769 + con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2770 + "tbolt_process_outstanding_cmd:"
2771 + " mlist_del_init(&cmd->list)."));
2763 2772 mlist_del_init(&cmd->list);
2764 2773 }
2765 - } else {
2766 - pkt = cmd->pkt;
2767 - if (pkt) {
2768 - con_log(CL_ANN1, (CE_NOTE,
2769 - "mr_sas_tbolt_process_outstanding_cmd:"
2770 - "mlist_del_init(&cmd->list).\n"));
2774 + } else {
2775 + pkt = cmd->pkt;
2776 + if (pkt) {
2777 + con_log(CL_ANN1, (CE_NOTE, "mr_sas_"
2778 + "tbolt_process_outstanding_cmd:"
2779 + "mlist_del_init(&cmd->list)."));
2771 2780 mlist_del_init(&cmd->list);
2772 2781 }
2773 2782 }
2774 2783
2775 2784 mutex_exit(&instance->cmd_pend_mtx);
2776 -
2785 +
2777 2786 tbolt_complete_cmd(instance, cmd);
2778 - }
2779 - // set it back to all 0xfffffffff.
2780 - desc->Words = (uint64_t)~0;
2787 + }
2788 + /* set it back to all 1s. */
2789 + desc->Words = -1LL;
2781 2790
2782 2791 instance->reply_read_index++;
2783 2792
2784 2793 if (instance->reply_read_index >= (instance->reply_q_depth)) {
2785 2794 con_log(CL_ANN1, (CE_NOTE, "wrap around"));
2786 2795 instance->reply_read_index = 0;
2787 2796 }
2788 2797
2789 2798 /* Get the next reply descriptor */
2790 2799 if (!instance->reply_read_index)
2791 2800 desc = instance->reply_frame_pool;
2792 2801 else
2793 2802 desc++;
2794 2803
2795 2804 replyDesc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
2796 2805
2797 2806 d_val.word = desc->Words;
2798 2807
2799 2808 con_log(CL_ANN1, (CE_NOTE,
2800 - "Next Reply Desc = %llx Words = %llx\n",
2801 - desc, desc->Words));
2809 + "Next Reply Desc = %p Words = %" PRIx64,
2810 + (void *)desc, desc->Words));
2802 2811
2803 2812 replyType = replyDesc->ReplyFlags &
2804 2813 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2805 2814
2806 2815 if (replyType == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2807 2816 break;
2808 2817
2809 2818 } /* End of while loop. */
2810 2819
2811 2820 /* update replyIndex to FW */
2812 2821 WR_MPI2_REPLY_POST_INDEX(instance->reply_read_index, instance);
2813 2822
2814 2823
2815 2824 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2816 2825 0, 0, DDI_DMA_SYNC_FORDEV);
2817 2826
2818 2827 (void) ddi_dma_sync(instance->reply_desc_dma_obj.dma_handle,
2819 2828 0, 0, DDI_DMA_SYNC_FORCPU);
2820 2829 return (DDI_INTR_CLAIMED);
2821 2830 }
2822 2831
2823 2832
2824 2833
2825 2834
2826 2835 /*
2827 2836 * complete_cmd_in_sync_mode - Completes an internal command
2828 2837 * @instance: Adapter soft state
2829 2838 * @cmd: Command to be completed
2830 2839 *
2831 2840 * The issue_cmd_in_sync_mode() function waits for a command to complete
2832 2841 * after it issues a command. This function wakes up that waiting routine by
2833 2842 * calling wake_up() on the wait queue.
2834 2843 */
2835 2844 void
2836 2845 tbolt_complete_cmd_in_sync_mode(struct mrsas_instance *instance,
2837 2846 struct mrsas_cmd *cmd)
2838 2847 {
2839 2848
2840 2849 cmd->cmd_status = ddi_get8(cmd->frame_dma_obj.acc_handle,
2841 2850 &cmd->frame->io.cmd_status);
2842 2851
2843 2852 cmd->sync_cmd = MRSAS_FALSE;
2844 2853
|
↓ open down ↓ |
33 lines elided |
↑ open up ↑ |
2845 2854 mutex_enter(&instance->int_cmd_mtx);
2846 2855 if (cmd->cmd_status == ENODATA) {
2847 2856 cmd->cmd_status = 0;
2848 2857 }
2849 2858 cv_broadcast(&instance->int_cmd_cv);
2850 2859 mutex_exit(&instance->int_cmd_mtx);
2851 2860
2852 2861 }
2853 2862
2854 2863 /*
2855 - * mrsas_tbolt_get_ld_map_info - Returns ld_map structure
2864 + * mrsas_tbolt_get_ld_map_info - Returns ld_map structure
2856 2865 * instance: Adapter soft state
2857 2866 *
2858 2867 * Issues an internal command (DCMD) to get the FW's controller PD
2859 2868 * list structure. This information is mainly used to find out SYSTEM
2860 2869 * supported by the FW.
2861 2870 */
2862 2871 int
2863 2872 mrsas_tbolt_get_ld_map_info(struct mrsas_instance *instance)
2864 2873 {
2865 2874 int ret = 0;
2866 2875 struct mrsas_cmd *cmd = NULL;
2867 2876 struct mrsas_dcmd_frame *dcmd;
2868 2877 MR_FW_RAID_MAP_ALL *ci;
2869 2878 uint32_t ci_h = 0;
2870 2879 U32 size_map_info;
2871 2880
2872 2881 cmd = get_raid_msg_pkt(instance);
2873 2882
2874 2883 if (cmd == NULL) {
2875 2884 cmn_err(CE_WARN,
2876 2885 "Failed to get a cmd from free-pool in get_ld_map_info()");
2877 2886 return (DDI_FAILURE);
2878 2887 }
2879 2888
2880 2889 dcmd = &cmd->frame->dcmd;
2881 2890
2882 2891 size_map_info = sizeof (MR_FW_RAID_MAP) +
|
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
2883 2892 (sizeof (MR_LD_SPAN_MAP) *
2884 2893 (MAX_LOGICAL_DRIVES - 1));
2885 2894
2886 2895 con_log(CL_ANN, (CE_NOTE,
2887 2896 "size_map_info : 0x%x", size_map_info));
2888 2897
2889 2898 ci = instance->ld_map[(instance->map_id & 1)];
2890 2899 ci_h = instance->ld_map_phy[(instance->map_id & 1)];
2891 2900
2892 2901 if (!ci) {
2893 - cmn_err(CE_WARN,
2894 - "Failed to alloc mem for ld_map_info");
2902 + cmn_err(CE_WARN, "Failed to alloc mem for ld_map_info");
2895 2903 return_raid_msg_pkt(instance, cmd);
2896 2904 return (-1);
2897 2905 }
2898 2906
2899 - memset(ci, 0, sizeof (*ci));
2900 - memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
2907 + bzero(ci, sizeof (*ci));
2908 + bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
2901 2909
2902 2910 dcmd->cmd = MFI_CMD_OP_DCMD;
2903 2911 dcmd->cmd_status = 0xFF;
2904 2912 dcmd->sge_count = 1;
2905 2913 dcmd->flags = MFI_FRAME_DIR_READ;
2906 2914 dcmd->timeout = 0;
2907 2915 dcmd->pad_0 = 0;
2908 2916 dcmd->data_xfer_len = size_map_info;
2909 2917 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
2910 2918 dcmd->sgl.sge32[0].phys_addr = ci_h;
2911 2919 dcmd->sgl.sge32[0].length = size_map_info;
2912 2920
2913 2921
2914 2922 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
2915 2923
2916 2924 if (!instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
2917 2925 ret = 0;
2918 - con_log(CL_ANN1, (CE_NOTE,
2919 - "Get LD Map Info success\n"));
2926 + con_log(CL_ANN1, (CE_NOTE, "Get LD Map Info success"));
2920 2927 } else {
2921 - cmn_err(CE_WARN,
2922 - "Get LD Map Info failed\n");
2928 + cmn_err(CE_WARN, "Get LD Map Info failed");
2923 2929 ret = -1;
2924 2930 }
2925 2931
2926 2932 return_raid_msg_pkt(instance, cmd);
2927 2933
2928 2934 return (ret);
2929 2935 }
2930 2936
2931 2937 void
2932 2938 mrsas_dump_reply_desc(struct mrsas_instance *instance)
2933 2939 {
2934 2940 uint32_t i;
2935 2941 MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
2936 2942 union desc_value d_val;
2937 2943
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
2938 2944 reply_desc = instance->reply_frame_pool;
2939 2945
2940 2946 for (i = 0; i < instance->reply_q_depth; i++, reply_desc++) {
2941 2947 d_val.word = reply_desc->Words;
2942 2948 con_log(CL_DLEVEL3, (CE_NOTE,
2943 2949 "i=%d, %x:%x",
2944 2950 i, d_val.u1.high, d_val.u1.low));
2945 2951 }
2946 2952 }
2947 2953
2948 -/**
2954 +/*
2949 2955 * mrsas_tbolt_command_create - Create command for fast path.
2950 2956 * @io_info: MegaRAID IO request packet pointer.
2951 2957 * @ref_tag: Reference tag for RD/WRPROTECT
2952 2958 *
2953 2959 * Create the command for fast path.
2954 2960 */
2955 2961 void
2956 -mrsas_tbolt_prepare_cdb(struct mrsas_instance *instance, U8 cdb[],struct IO_REQUEST_INFO *io_info,Mpi2RaidSCSIIORequest_t *scsi_io_request, U32 ref_tag)
2962 +mrsas_tbolt_prepare_cdb(struct mrsas_instance *instance, U8 cdb[],
2963 + struct IO_REQUEST_INFO *io_info, Mpi2RaidSCSIIORequest_t *scsi_io_request,
2964 + U32 ref_tag)
2957 2965 {
2958 2966 uint16_t EEDPFlags;
2959 2967 uint32_t Control;
2960 - // Prepare 32-byte CDB if DIF is supported on this device
2961 - con_log(CL_ANN, (CE_NOTE, "Prepare DIF CDB\n"));
2968 + ddi_acc_handle_t acc_handle =
2969 + instance->mpi2_frame_pool_dma_obj.acc_handle;
2962 2970
2963 - memset(cdb, 0, 32);
2964 -
2971 + /* Prepare 32-byte CDB if DIF is supported on this device */
2972 + con_log(CL_ANN, (CE_NOTE, "Prepare DIF CDB"));
2973 +
2974 + bzero(cdb, 32);
2975 +
2965 2976 cdb[0] = MRSAS_SCSI_VARIABLE_LENGTH_CMD;
2966 2977
2967 -
2978 +
2968 2979 cdb[7] = MRSAS_SCSI_ADDL_CDB_LEN;
2969 2980
2970 - if (io_info->isRead) {
2981 + if (io_info->isRead)
2971 2982 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_READ32;
2972 - }
2973 - else {
2983 + else
2974 2984 cdb[9] = MRSAS_SCSI_SERVICE_ACTION_WRITE32;
2975 - }
2976 2985
2977 - cdb[10] = MRSAS_RD_WR_PROTECT; // Verify with in linux driver, set to MEGASAS_RD_WR_PROTECT_CHECK_ALL
2986 + /* Verify within linux driver, set to MEGASAS_RD_WR_PROTECT_CHECK_ALL */
2987 + cdb[10] = MRSAS_RD_WR_PROTECT;
2978 2988
2979 2989 /* LOGICAL BLOCK ADDRESS */
2980 - cdb[12] = (U8)(((io_info->pdBlock) >> 56) & 0xff);
2990 + cdb[12] = (U8)(((io_info->pdBlock) >> 56) & 0xff);
2981 2991 cdb[13] = (U8)(((io_info->pdBlock) >> 48) & 0xff);
2982 - cdb[14] = (U8)(((io_info->pdBlock) >> 40) & 0xff);
2983 - cdb[15] = (U8)(((io_info->pdBlock) >> 32) & 0xff);
2984 - cdb[16] = (U8)(((io_info->pdBlock) >> 24) & 0xff);
2985 - cdb[17] = (U8)(((io_info->pdBlock) >> 16) & 0xff);
2986 - cdb[18] = (U8)(((io_info->pdBlock) >> 8) & 0xff);
2987 - cdb[19] = (U8)((io_info->pdBlock) & 0xff);
2992 + cdb[14] = (U8)(((io_info->pdBlock) >> 40) & 0xff);
2993 + cdb[15] = (U8)(((io_info->pdBlock) >> 32) & 0xff);
2994 + cdb[16] = (U8)(((io_info->pdBlock) >> 24) & 0xff);
2995 + cdb[17] = (U8)(((io_info->pdBlock) >> 16) & 0xff);
2996 + cdb[18] = (U8)(((io_info->pdBlock) >> 8) & 0xff);
2997 + cdb[19] = (U8)((io_info->pdBlock) & 0xff);
2988 2998
2989 2999 /* Logical block reference tag */
2990 - ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
2991 - &scsi_io_request->CDB.EEDP32.PrimaryReferenceTag,
2992 - BIG_ENDIAN(ref_tag));
2993 -
2994 - ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
2995 - &scsi_io_request->CDB.EEDP32.PrimaryApplicationTagMask,
2996 - 0xffff);
3000 + ddi_put32(acc_handle, &scsi_io_request->CDB.EEDP32.PrimaryReferenceTag,
3001 + BE_32(ref_tag));
2997 3002
2998 - ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
2999 - &scsi_io_request->DataLength,
3003 + ddi_put16(acc_handle,
3004 + &scsi_io_request->CDB.EEDP32.PrimaryApplicationTagMask, 0xffff);
3005 +
3006 + ddi_put32(acc_handle, &scsi_io_request->DataLength,
3000 3007 ((io_info->numBlocks)*512));
3001 - ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
3002 - &scsi_io_request->IoFlags,32); /* Specify 32-byte cdb */
3008 + /* Specify 32-byte cdb */
3009 + ddi_put16(acc_handle, &scsi_io_request->IoFlags, 32);
3003 3010
3004 3011 /* Transfer length */
3005 - cdb[28] = (U8)(((io_info->numBlocks) >> 24) & 0xff);
3012 + cdb[28] = (U8)(((io_info->numBlocks) >> 24) & 0xff);
3006 3013 cdb[29] = (U8)(((io_info->numBlocks) >> 16) & 0xff);
3007 3014 cdb[30] = (U8)(((io_info->numBlocks) >> 8) & 0xff);
3008 3015 cdb[31] = (U8)((io_info->numBlocks) & 0xff);
3009 3016
3010 3017 /* set SCSI IO EEDPFlags */
3011 - EEDPFlags = ddi_get16(instance->mpi2_frame_pool_dma_obj.acc_handle,
3012 - &scsi_io_request->EEDPFlags);
3013 - Control = ddi_get32(instance->mpi2_frame_pool_dma_obj.acc_handle,
3014 - &scsi_io_request->Control);
3018 + EEDPFlags = ddi_get16(acc_handle, &scsi_io_request->EEDPFlags);
3019 + Control = ddi_get32(acc_handle, &scsi_io_request->Control);
3015 3020
3016 - // set SCSI IO EEDPFlags bits
3021 + /* set SCSI IO EEDPFlags bits */
3017 3022 if (io_info->isRead) {
3018 - // For READ commands, the EEDPFlags shall be set to specify to
3019 - // Increment the Primary Reference Tag, to Check the Reference
3020 - // Tag, and to Check and Remove the Protection Information
3021 - // fields.
3022 - EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
3023 - MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
3024 - MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
3025 - MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
3026 - MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
3023 + /*
3024 + * For READ commands, the EEDPFlags shall be set to specify to
3025 + * Increment the Primary Reference Tag, to Check the Reference
3026 + * Tag, and to Check and Remove the Protection Information
3027 + * fields.
3028 + */
3029 + EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
3030 + MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
3031 + MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
3032 + MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
3033 + MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
3034 + } else {
3035 + /*
3036 + * For WRITE commands, the EEDPFlags shall be set to specify to
3037 + * Increment the Primary Reference Tag, and to Insert
3038 + * Protection Information fields.
3039 + */
3040 + EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
3041 + MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
3027 3042 }
3028 - else {
3029 - // For WRITE commands, the EEDPFlags shall be set to specify to
3030 - // Increment the Primary Reference Tag, and to Insert
3031 - // Protection Information fields.
3032 - EEDPFlags = MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
3033 - MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
3034 - }
3035 3043 Control |= (0x4 << 26);
3036 3044
3037 - ddi_put16(instance->mpi2_frame_pool_dma_obj.acc_handle,
3038 - &scsi_io_request->EEDPFlags, EEDPFlags);
3039 - ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
3040 - &scsi_io_request->Control, Control);
3041 - ddi_put32(instance->mpi2_frame_pool_dma_obj.acc_handle,
3042 - &scsi_io_request->EEDPBlockSize,
3043 - MRSAS_EEDPBLOCKSIZE);
3045 + ddi_put16(acc_handle, &scsi_io_request->EEDPFlags, EEDPFlags);
3046 + ddi_put32(acc_handle, &scsi_io_request->Control, Control);
3047 + ddi_put32(acc_handle,
3048 + &scsi_io_request->EEDPBlockSize, MRSAS_EEDPBLOCKSIZE);
3044 3049 }
3045 3050
3046 3051
3047 3052 /*
3048 3053 * mrsas_tbolt_set_pd_lba - Sets PD LBA
3049 3054 * @cdb: CDB
3050 3055 * @cdb_len: cdb length
3051 3056 * @start_blk: Start block of IO
3052 3057 *
3053 3058 * Used to set the PD LBA in CDB for FP IOs
3054 3059 */
3055 -void
3056 -mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr, U64 start_blk, U32 num_blocks, U8 DifCapable)
3060 +static void
3061 +mrsas_tbolt_set_pd_lba(U8 cdb[], uint8_t *cdb_len_ptr, U64 start_blk,
3062 + U32 num_blocks)
3057 3063 {
3058 3064 U8 cdb_len = *cdb_len_ptr;
3059 3065 U8 flagvals = 0, opcode = 0, groupnum = 0, control = 0;
3060 3066
3061 3067 /* Some drives don't support 16/12 byte CDB's, convert to 10 */
3062 - if (((cdb_len == 12) || (cdb_len == 16)) &&
3063 - (start_blk <= 0xffffffff)) {
3068 + if (((cdb_len == 12) || (cdb_len == 16)) &&
3069 + (start_blk <= 0xffffffff)) {
3064 3070 if (cdb_len == 16) {
3065 - con_log(CL_ANN, (CE_NOTE, "Converting READ/WRITE(16) to READ10\n"));
3071 + con_log(CL_ANN,
3072 + (CE_NOTE, "Converting READ/WRITE(16) to READ10"));
3066 3073 opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
3067 3074 flagvals = cdb[1];
3068 3075 groupnum = cdb[14];
3069 3076 control = cdb[15];
3070 3077 } else {
3071 - con_log(CL_ANN, (CE_NOTE, "Converting READ/WRITE(12) to READ10\n"));
3078 + con_log(CL_ANN,
3079 + (CE_NOTE, "Converting READ/WRITE(12) to READ10"));
3072 3080 opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
3073 3081 flagvals = cdb[1];
3074 3082 groupnum = cdb[10];
3075 3083 control = cdb[11];
3076 3084 }
3077 3085
3078 - memset(cdb, 0, sizeof(cdb));
3086 + bzero(cdb, sizeof (cdb));
3079 3087
3080 3088 cdb[0] = opcode;
3081 3089 cdb[1] = flagvals;
3082 3090 cdb[6] = groupnum;
3083 3091 cdb[9] = control;
3084 3092 /* Set transfer length */
3085 3093 cdb[8] = (U8)(num_blocks & 0xff);
3086 3094 cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3087 3095 cdb_len = 10;
3088 3096 } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
3089 - /* Convert to 16 byte CDB for large LBA's */
3090 - con_log(CL_ANN, (CE_NOTE, "Converting 6/10/12 CDB to 16 byte CDB\n"));
3091 - switch (cdb_len) {
3092 - case 6:
3093 - opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
3094 - control = cdb[5];
3095 - break;
3096 - case 10:
3097 - opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16;
3098 - flagvals = cdb[1];
3099 - groupnum = cdb[6];
3100 - control = cdb[9];
3101 - break;
3102 - case 12:
3103 - opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16;
3104 - flagvals = cdb[1];
3105 - groupnum = cdb[10];
3106 - control = cdb[11];
3107 - break;
3108 - }
3097 + /* Convert to 16 byte CDB for large LBA's */
3098 + con_log(CL_ANN,
3099 + (CE_NOTE, "Converting 6/10/12 CDB to 16 byte CDB"));
3100 + switch (cdb_len) {
3101 + case 6:
3102 + opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
3103 + control = cdb[5];
3104 + break;
3105 + case 10:
3106 + opcode = cdb[0] == READ_10 ? READ_16 : WRITE_16;
3107 + flagvals = cdb[1];
3108 + groupnum = cdb[6];
3109 + control = cdb[9];
3110 + break;
3111 + case 12:
3112 + opcode = cdb[0] == READ_12 ? READ_16 : WRITE_16;
3113 + flagvals = cdb[1];
3114 + groupnum = cdb[10];
3115 + control = cdb[11];
3116 + break;
3117 + }
3109 3118
3110 - memset(cdb, 0, sizeof(cdb));
3119 + bzero(cdb, sizeof (cdb));
3111 3120
3112 - cdb[0] = opcode;
3113 - cdb[1] = flagvals;
3114 - cdb[14] = groupnum;
3115 - cdb[15] = control;
3121 + cdb[0] = opcode;
3122 + cdb[1] = flagvals;
3123 + cdb[14] = groupnum;
3124 + cdb[15] = control;
3116 3125
3117 - /* Transfer length */
3118 - cdb[13] = (U8)(num_blocks & 0xff);
3119 - cdb[12] = (U8)((num_blocks >> 8) & 0xff);
3120 - cdb[11] = (U8)((num_blocks >> 16) & 0xff);
3121 - cdb[10] = (U8)((num_blocks >> 24) & 0xff);
3126 + /* Transfer length */
3127 + cdb[13] = (U8)(num_blocks & 0xff);
3128 + cdb[12] = (U8)((num_blocks >> 8) & 0xff);
3129 + cdb[11] = (U8)((num_blocks >> 16) & 0xff);
3130 + cdb[10] = (U8)((num_blocks >> 24) & 0xff);
3122 3131
3123 - /* Specify 16-byte cdb */
3124 - cdb_len = 16;
3125 - } else if ((cdb_len == 6) && (start_blk > 0x1fffff)) {
3126 - /* convert to 10 byte CDB */
3127 - opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10;
3128 - control = cdb[5];
3129 -
3130 - memset(cdb, 0, sizeof(cdb));
3131 - cdb[0] = opcode;
3132 - cdb[9] = control;
3132 + /* Specify 16-byte cdb */
3133 + cdb_len = 16;
3134 + } else if ((cdb_len == 6) && (start_blk > 0x1fffff)) {
3135 + /* convert to 10 byte CDB */
3136 + opcode = cdb[0] == READ_6 ? READ_10 : WRITE_10;
3137 + control = cdb[5];
3133 3138
3134 - /* Set transfer length */
3135 - cdb[8] = (U8)(num_blocks & 0xff);
3136 - cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3139 + bzero(cdb, sizeof (cdb));
3140 + cdb[0] = opcode;
3141 + cdb[9] = control;
3137 3142
3138 - /* Specify 10-byte cdb */
3139 - cdb_len = 10;
3143 + /* Set transfer length */
3144 + cdb[8] = (U8)(num_blocks & 0xff);
3145 + cdb[7] = (U8)((num_blocks >> 8) & 0xff);
3146 +
3147 + /* Specify 10-byte cdb */
3148 + cdb_len = 10;
3140 3149 }
3141 3150
3142 3151
3143 3152 /* Fall through Normal case, just load LBA here */
3144 3153 switch (cdb_len) {
3145 - case 6:
3146 - {
3147 - U8 val = cdb[1] & 0xE0;
3148 - cdb[3] = (U8)(start_blk & 0xff);
3149 - cdb[2] = (U8)((start_blk >> 8) & 0xff);
3150 - cdb[1] = val | ((U8)(start_blk >> 16) & 0x1f);
3151 - break;
3152 - }
3153 - case 10:
3154 - cdb[5] = (U8)(start_blk & 0xff);
3155 - cdb[4] = (U8)((start_blk >> 8) & 0xff);
3156 - cdb[3] = (U8)((start_blk >> 16) & 0xff);
3157 - cdb[2] = (U8)((start_blk >> 24) & 0xff);
3158 - break;
3159 - case 12:
3160 - cdb[5] = (U8)(start_blk & 0xff);
3161 - cdb[4] = (U8)((start_blk >> 8) & 0xff);
3162 - cdb[3] = (U8)((start_blk >> 16) & 0xff);
3163 - cdb[2] = (U8)((start_blk >> 24) & 0xff);
3164 - break;
3154 + case 6:
3155 + {
3156 + U8 val = cdb[1] & 0xE0;
3157 + cdb[3] = (U8)(start_blk & 0xff);
3158 + cdb[2] = (U8)((start_blk >> 8) & 0xff);
3159 + cdb[1] = val | ((U8)(start_blk >> 16) & 0x1f);
3160 + break;
3161 + }
3162 + case 10:
3163 + cdb[5] = (U8)(start_blk & 0xff);
3164 + cdb[4] = (U8)((start_blk >> 8) & 0xff);
3165 + cdb[3] = (U8)((start_blk >> 16) & 0xff);
3166 + cdb[2] = (U8)((start_blk >> 24) & 0xff);
3167 + break;
3168 + case 12:
3169 + cdb[5] = (U8)(start_blk & 0xff);
3170 + cdb[4] = (U8)((start_blk >> 8) & 0xff);
3171 + cdb[3] = (U8)((start_blk >> 16) & 0xff);
3172 + cdb[2] = (U8)((start_blk >> 24) & 0xff);
3173 + break;
3165 3174
3166 - case 16:
3167 - cdb[9] = (U8)(start_blk & 0xff);
3168 - cdb[8] = (U8)((start_blk >> 8) & 0xff);
3169 - cdb[7] = (U8)((start_blk >> 16) & 0xff);
3170 - cdb[6] = (U8)((start_blk >> 24) & 0xff);
3171 - cdb[5] = (U8)((start_blk >> 32) & 0xff);
3172 - cdb[4] = (U8)((start_blk >> 40) & 0xff);
3173 - cdb[3] = (U8)((start_blk >> 48) & 0xff);
3174 - cdb[2] = (U8)((start_blk >> 56) & 0xff);
3175 - break;
3175 + case 16:
3176 + cdb[9] = (U8)(start_blk & 0xff);
3177 + cdb[8] = (U8)((start_blk >> 8) & 0xff);
3178 + cdb[7] = (U8)((start_blk >> 16) & 0xff);
3179 + cdb[6] = (U8)((start_blk >> 24) & 0xff);
3180 + cdb[5] = (U8)((start_blk >> 32) & 0xff);
3181 + cdb[4] = (U8)((start_blk >> 40) & 0xff);
3182 + cdb[3] = (U8)((start_blk >> 48) & 0xff);
3183 + cdb[2] = (U8)((start_blk >> 56) & 0xff);
3184 + break;
3176 3185 }
3177 3186
3178 3187 *cdb_len_ptr = cdb_len;
3179 3188 }
3180 3189
3181 3190
3182 -U8
3191 +static int
3183 3192 mrsas_tbolt_check_map_info(struct mrsas_instance *instance)
3184 3193 {
3185 3194 MR_FW_RAID_MAP_ALL *ld_map;
3186 3195
3187 3196 if (!mrsas_tbolt_get_ld_map_info(instance)) {
3188 3197
3189 3198 ld_map = instance->ld_map[(instance->map_id & 1)];
3190 3199
3191 - con_log(CL_ANN1, (CE_NOTE, "ldCount=%d, map size=%d\n",
3200 + con_log(CL_ANN1, (CE_NOTE, "ldCount=%d, map size=%d",
3192 3201 ld_map->raidMap.ldCount, ld_map->raidMap.totalSize));
3193 3202
3194 - if (MR_ValidateMapInfo(instance->ld_map[(instance->map_id & 1)], instance->load_balance_info)) {
3195 - con_log(CL_ANN, (CE_CONT,
3196 - "MR_ValidateMapInfo success"));
3203 + if (MR_ValidateMapInfo(instance->ld_map[
3204 + (instance->map_id & 1)], instance->load_balance_info)) {
3205 + con_log(CL_ANN,
3206 + (CE_CONT, "MR_ValidateMapInfo success"));
3197 3207
3198 - instance->fast_path_io = 1;
3199 - con_log(CL_ANN, (CE_NOTE,
3200 - "instance->fast_path_io %d \n",instance->fast_path_io));
3208 + instance->fast_path_io = 1;
3209 + con_log(CL_ANN,
3210 + (CE_NOTE, "instance->fast_path_io %d",
3211 + instance->fast_path_io));
3201 3212
3202 3213 return (DDI_SUCCESS);
3203 3214 }
3204 3215
3205 3216 }
3206 3217
3207 - instance->fast_path_io = 0;
3218 + instance->fast_path_io = 0;
3208 3219 cmn_err(CE_WARN, "MR_ValidateMapInfo failed");
3209 3220 con_log(CL_ANN, (CE_NOTE,
3210 - "instance->fast_path_io %d \n",instance->fast_path_io));
3221 + "instance->fast_path_io %d", instance->fast_path_io));
3211 3222
3212 -
3213 3223 return (DDI_FAILURE);
3214 3224 }
3225 +
3215 3226 /*
3216 3227 * Marks HBA as bad. This will be called either when an
3217 3228 * IO packet times out even after 3 FW resets
3218 3229 * or FW is found to be fault even after 3 continuous resets.
3219 3230 */
3220 3231
3221 -int
3232 +void
3222 3233 mrsas_tbolt_kill_adapter(struct mrsas_instance *instance)
3223 3234 {
3224 - cmn_err(CE_WARN, "TBOLT Kill adapter called\n");
3235 + cmn_err(CE_NOTE, "TBOLT Kill adapter called");
3225 3236
3226 3237 if (instance->deadadapter == 1)
3227 - return (DDI_FAILURE);
3238 + return;
3228 3239
3229 3240 con_log(CL_ANN1, (CE_NOTE, "tbolt_kill_adapter: "
3230 - "Writing to doorbell with MFI_STOP_ADP "));
3241 + "Writing to doorbell with MFI_STOP_ADP "));
3231 3242 mutex_enter(&instance->ocr_flags_mtx);
3232 3243 instance->deadadapter = 1;
3233 3244 mutex_exit(&instance->ocr_flags_mtx);
3234 3245 instance->func_ptr->disable_intr(instance);
3235 3246 WR_RESERVED0_REGISTER(MFI_STOP_ADP, instance);
3236 3247 /* Flush */
3237 - RD_RESERVED0_REGISTER(instance);
3248 + (void) RD_RESERVED0_REGISTER(instance);
3238 3249
3239 3250 (void) mrsas_print_pending_cmds(instance);
3240 - mrsas_complete_pending_cmds(instance);
3241 - return (DDI_SUCCESS);
3251 + (void) mrsas_complete_pending_cmds(instance);
3242 3252 }
3243 -void mrsas_reset_reply_desc(struct mrsas_instance *instance)
3253 +
3254 +void
3255 +mrsas_reset_reply_desc(struct mrsas_instance *instance)
3244 3256 {
3245 3257 int i;
3246 - MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
3247 - instance->reply_read_index= 0;
3248 -
3258 + MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
3259 + instance->reply_read_index = 0;
3260 +
3249 3261 /* initializing reply address to 0xFFFFFFFF */
3250 3262 reply_desc = instance->reply_frame_pool;
3251 3263
3252 3264 for (i = 0; i < instance->reply_q_depth; i++) {
3253 3265 reply_desc->Words = (uint64_t)~0;
3254 3266 reply_desc++;
3255 3267 }
3256 3268 }
3257 3269
3258 3270 int
3259 3271 mrsas_tbolt_reset_ppc(struct mrsas_instance *instance)
3260 3272 {
3261 - uint32_t status=0x00;
3273 + uint32_t status = 0x00;
3262 3274 uint32_t retry = 0;
3263 - uint32_t seq_num;
3264 3275 uint32_t cur_abs_reg_val;
3265 3276 uint32_t fw_state;
3266 - union mrsas_evt_class_locale class_locale;
3267 3277 uint32_t abs_state;
3268 3278 uint32_t i;
3269 3279
3270 3280 con_log(CL_ANN, (CE_NOTE,
3271 - "mrsas_tbolt_reset_ppc entered\n "));
3281 + "mrsas_tbolt_reset_ppc entered"));
3272 3282
3273 3283 if (instance->deadadapter == 1) {
3274 3284 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3275 3285 "no more resets as HBA has been marked dead ");
3276 3286 return (DDI_FAILURE);
3277 3287 }
3278 3288
3279 3289 mutex_enter(&instance->ocr_flags_mtx);
3280 3290 instance->adapterresetinprogress = 1;
3281 3291 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc:"
3282 3292 "adpterresetinprogress flag set, time %llx", gethrtime()));
3283 3293 mutex_exit(&instance->ocr_flags_mtx);
3284 3294
3285 3295 instance->func_ptr->disable_intr(instance);
3286 3296
3287 - /*Add delay inorder to complete the ioctl & io cmds in-flight */
3288 - for (i = 0; i<3000; i++) {
3297 + /* Add delay inorder to complete the ioctl & io cmds in-flight */
3298 + for (i = 0; i < 3000; i++) {
3289 3299 drv_usecwait(MILLISEC); /* wait for 1000 usecs */
3290 3300 }
3291 3301
3292 - instance->reply_read_index= 0;
3302 + instance->reply_read_index = 0;
3293 3303
3294 3304 retry_reset:
3295 3305 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3296 3306 ":Resetting TBOLT "));
3297 3307
3298 3308 WR_TBOLT_IB_WRITE_SEQ(0xF, instance);
3299 3309 WR_TBOLT_IB_WRITE_SEQ(4, instance);
3300 3310 WR_TBOLT_IB_WRITE_SEQ(0xb, instance);
3301 3311 WR_TBOLT_IB_WRITE_SEQ(2, instance);
3302 3312 WR_TBOLT_IB_WRITE_SEQ(7, instance);
3303 3313 WR_TBOLT_IB_WRITE_SEQ(0xd, instance);
3304 3314 con_log(CL_ANN1, (CE_NOTE,
3305 3315 "mrsas_tbolt_reset_ppc: magic number written "
3306 - "to write sequence register\n"));
3316 + "to write sequence register"));
3307 3317 delay(100 * drv_usectohz(MILLISEC));
3308 3318 status = RD_TBOLT_HOST_DIAG(instance);
3309 3319 con_log(CL_ANN1, (CE_NOTE,
3310 3320 "mrsas_tbolt_reset_ppc: READ HOSTDIAG SUCCESS "
3311 - "to write sequence register\n"));
3312 -
3321 + "to write sequence register"));
3322 +
3313 3323 while (status & DIAG_TBOLT_RESET_ADAPTER) {
3314 3324 delay(100 * drv_usectohz(MILLISEC));
3315 3325 status = RD_TBOLT_HOST_DIAG(instance);
3316 3326 if (retry++ == 100) {
3317 3327 cmn_err(CE_WARN,
3318 3328 "mrsas_tbolt_reset_ppc:"
3319 3329 "resetadapter bit is set already "
3320 - "check retry count %d\n", retry);
3330 + "check retry count %d", retry);
3321 3331 return (DDI_FAILURE);
3322 3332 }
3323 3333 }
3324 3334
3325 3335 WR_TBOLT_HOST_DIAG(status | DIAG_TBOLT_RESET_ADAPTER, instance);
3326 3336 delay(100 * drv_usectohz(MILLISEC));
3327 3337
3328 - ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3329 - (uint8_t *)((uintptr_t)(instance)->regmap +
3330 - RESET_TBOLT_STATUS_OFF),4,DDI_DEV_AUTOINCR);
3331 -
3338 + ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3339 + (uint8_t *)((uintptr_t)(instance)->regmap +
3340 + RESET_TBOLT_STATUS_OFF), 4, DDI_DEV_AUTOINCR);
3341 +
3332 3342 while ((status & DIAG_TBOLT_RESET_ADAPTER)) {
3333 3343 delay(100 * drv_usectohz(MILLISEC));
3334 - ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3335 - (uint8_t *)((uintptr_t)(instance)->regmap +
3336 - RESET_TBOLT_STATUS_OFF),4,DDI_DEV_AUTOINCR);
3344 + ddi_rep_get8((instance)->regmap_handle, (uint8_t *)&status,
3345 + (uint8_t *)((uintptr_t)(instance)->regmap +
3346 + RESET_TBOLT_STATUS_OFF), 4, DDI_DEV_AUTOINCR);
3337 3347 if (retry++ == 100) {
3338 3348 /* Dont call kill adapter here */
3339 - /* RESET BIT ADAPTER is cleared by firmare */
3340 - //mrsas_tbolt_kill_adapter(instance);
3341 - cmn_err(CE_WARN, "mr_sas %d: %s(): RESET FAILED; return failure!!!", instance->instance, __func__);
3349 + /* RESET BIT ADAPTER is cleared by firmare */
3350 + /* mrsas_tbolt_kill_adapter(instance); */
3351 + cmn_err(CE_WARN,
3352 + "mr_sas %d: %s(): RESET FAILED; return failure!!!",
3353 + instance->instance, __func__);
3342 3354 return (DDI_FAILURE);
3343 3355 }
3344 3356 }
3345 -
3346 - con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: Adapter reset complete"));
3357 +
3358 + con_log(CL_ANN,
3359 + (CE_NOTE, "mrsas_tbolt_reset_ppc: Adapter reset complete"));
3347 3360 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3348 3361 "Calling mfi_state_transition_to_ready"));
3349 -
3362 +
3350 3363 abs_state = instance->func_ptr->read_fw_status_reg(instance);
3351 3364 retry = 0;
3352 3365 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
3353 3366 delay(100 * drv_usectohz(MILLISEC));
3354 3367 abs_state = instance->func_ptr->read_fw_status_reg(instance);
3355 3368 }
3356 3369 if (abs_state <= MFI_STATE_FW_INIT) {
3357 - cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: firmware state < MFI_STATE_FW_INIT"
3358 - "state = 0x%x, RETRY RESET.\n", abs_state);
3370 + cmn_err(CE_WARN,
3371 + "mrsas_tbolt_reset_ppc: firmware state < MFI_STATE_FW_INIT"
3372 + "state = 0x%x, RETRY RESET.", abs_state);
3359 3373 goto retry_reset;
3360 3374 }
3361 3375
3362 3376 /* Mark HBA as bad, if FW is fault after 3 continuous resets */
3363 3377 if (mfi_state_transition_to_ready(instance) ||
3364 3378 debug_tbolt_fw_faults_after_ocr_g == 1) {
3365 3379 cur_abs_reg_val =
3366 3380 instance->func_ptr->read_fw_status_reg(instance);
3367 3381 fw_state = cur_abs_reg_val & MFI_STATE_MASK;
3368 3382
3369 3383 con_log(CL_ANN1, (CE_NOTE,
3370 3384 "mrsas_tbolt_reset_ppc :before fake: FW is not ready "
3371 3385 "FW state = 0x%x", fw_state));
3372 3386 if (debug_tbolt_fw_faults_after_ocr_g == 1)
3373 3387 fw_state = MFI_STATE_FAULT;
3374 3388
3375 - con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc : FW is not ready "
3389 + con_log(CL_ANN,
3390 + (CE_NOTE, "mrsas_tbolt_reset_ppc : FW is not ready "
3376 3391 "FW state = 0x%x", fw_state));
3377 3392
3378 3393 if (fw_state == MFI_STATE_FAULT) {
3379 - // increment the count
3394 + /* increment the count */
3380 3395 instance->fw_fault_count_after_ocr++;
3381 3396 if (instance->fw_fault_count_after_ocr
3382 3397 < MAX_FW_RESET_COUNT) {
3383 3398 cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3384 3399 "FW is in fault after OCR count %d "
3385 - "Retry Reset",
3400 + "Retry Reset",
3386 3401 instance->fw_fault_count_after_ocr);
3387 3402 goto retry_reset;
3388 3403
3389 3404 } else {
3390 3405 cmn_err(CE_WARN, "mrsas %d: %s:"
3391 - "Max Reset Count exceeded >%d"
3406 + "Max Reset Count exceeded >%d"
3392 3407 "Mark HBA as bad, KILL adapter",
3393 - instance->instance, __func__, MAX_FW_RESET_COUNT);
3408 + instance->instance, __func__,
3409 + MAX_FW_RESET_COUNT);
3394 3410
3395 3411 mrsas_tbolt_kill_adapter(instance);
3396 3412 return (DDI_FAILURE);
3397 3413 }
3398 3414 }
3399 3415 }
3400 -
3401 - // reset the counter as FW is up after OCR
3416 +
3417 + /* reset the counter as FW is up after OCR */
3402 3418 instance->fw_fault_count_after_ocr = 0;
3403 -
3419 +
3404 3420 mrsas_reset_reply_desc(instance);
3405 3421
3406 -
3422 +
3407 3423 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3408 3424 "Calling mrsas_issue_init_mpi2"));
3409 3425 abs_state = mrsas_issue_init_mpi2(instance);
3410 - if(abs_state == DDI_FAILURE) {
3411 - cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3412 - "INIT failed Retrying Reset");
3413 - goto retry_reset;
3426 + if (abs_state == (uint32_t)DDI_FAILURE) {
3427 + cmn_err(CE_WARN, "mrsas_tbolt_reset_ppc: "
3428 + "INIT failed Retrying Reset");
3429 + goto retry_reset;
3414 3430 }
3415 3431 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3416 3432 "mrsas_issue_init_mpi2 Done"));
3417 3433
3418 3434 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3419 - "Calling mrsas_print_pending_cmd\n"));
3420 - mrsas_print_pending_cmds(instance);
3435 + "Calling mrsas_print_pending_cmd"));
3436 + (void) mrsas_print_pending_cmds(instance);
3421 3437 con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3422 - "mrsas_print_pending_cmd done\n"));
3438 + "mrsas_print_pending_cmd done"));
3423 3439
3424 3440 instance->func_ptr->enable_intr(instance);
3425 3441 instance->fw_outstanding = 0;
3426 3442
3427 3443 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3428 3444 "Calling mrsas_issue_pending_cmds"));
3429 - mrsas_issue_pending_cmds(instance);
3445 + (void) mrsas_issue_pending_cmds(instance);
3430 3446 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3431 - "issue_pending_cmds done.\n"));
3447 + "issue_pending_cmds done."));
3432 3448
3433 3449 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3434 3450 "Calling aen registration"));
3435 3451
3436 3452 instance->aen_cmd->retry_count_for_ocr = 0;
3437 3453 instance->aen_cmd->drv_pkt_time = 0;
3438 3454
3439 3455 instance->func_ptr->issue_cmd(instance->aen_cmd, instance);
3440 3456
3441 - con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag.\n"));
3457 + con_log(CL_ANN1, (CE_NOTE, "Unsetting adpresetinprogress flag."));
3442 3458 mutex_enter(&instance->ocr_flags_mtx);
3443 3459 instance->adapterresetinprogress = 0;
3444 3460 mutex_exit(&instance->ocr_flags_mtx);
3445 3461 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_reset_ppc: "
3446 3462 "adpterresetinprogress flag unset"));
3447 3463
3448 - con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc done\n"));
3464 + con_log(CL_ANN, (CE_NOTE, "mrsas_tbolt_reset_ppc done"));
3449 3465 return (DDI_SUCCESS);
3450 3466
3451 3467 }
3452 3468
3453 3469
3454 3470 /*
3455 3471 * mrsas_sync_map_info - Returns FW's ld_map structure
3456 3472 * @instance: Adapter soft state
3457 3473 *
3458 3474 * Issues an internal command (DCMD) to get the FW's controller PD
3459 3475 * list structure. This information is mainly used to find out SYSTEM
3460 3476 * supported by the FW.
3461 3477 */
3462 3478
3463 -int
3479 +static int
3464 3480 mrsas_tbolt_sync_map_info(struct mrsas_instance *instance)
3465 3481 {
3466 - int ret = 0, i;
3482 + int ret = 0, i;
3467 3483 struct mrsas_cmd *cmd = NULL;
3468 3484 struct mrsas_dcmd_frame *dcmd;
3469 3485 uint32_t size_sync_info, num_lds;
3470 3486 LD_TARGET_SYNC *ci = NULL;
3471 3487 MR_FW_RAID_MAP_ALL *map;
3472 3488 MR_LD_RAID *raid;
3473 3489 LD_TARGET_SYNC *ld_sync;
3474 3490 uint32_t ci_h = 0;
3475 3491 uint32_t size_map_info;
3476 3492
3477 3493 cmd = get_raid_msg_pkt(instance);
3478 -
3494 +
3479 3495 if (cmd == NULL) {
3480 - cmn_err(CE_WARN,
3481 - "Failed to get a cmd from free-pool in mrsas_tbolt_sync_map_info(). ");
3496 + cmn_err(CE_WARN, "Failed to get a cmd from free-pool in "
3497 + "mrsas_tbolt_sync_map_info(). ");
3482 3498 return (DDI_FAILURE);
3483 3499 }
3484 -
3500 +
3485 3501 /* Clear the frame buffer and assign back the context id */
3486 - (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3502 + bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3487 3503 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3488 - cmd->index);
3504 + cmd->index);
3489 3505 bzero(cmd->scsi_io_request, sizeof (Mpi2RaidSCSIIORequest_t));
3490 3506
3491 -
3507 +
3492 3508 map = instance->ld_map[instance->map_id & 1];
3493 3509
3494 3510 num_lds = map->raidMap.ldCount;
3495 3511
3496 3512 dcmd = &cmd->frame->dcmd;
3497 3513
3498 3514 size_sync_info = sizeof (LD_TARGET_SYNC) * num_lds;
3499 3515
3500 - con_log(CL_ANN, (CE_NOTE,
3501 - "size_sync_info =0x%x ; ld count = 0x%x \n ",
3502 - size_sync_info, num_lds));
3516 + con_log(CL_ANN, (CE_NOTE, "size_sync_info =0x%x ; ld count = 0x%x",
3517 + size_sync_info, num_lds));
3503 3518
3504 - ci = (LD_TARGET_SYNC *)instance->ld_map[(instance->map_id - 1) & 1];
3519 + ci = (LD_TARGET_SYNC *)instance->ld_map[(instance->map_id - 1) & 1];
3505 3520
3506 - memset(ci, 0, sizeof(MR_FW_RAID_MAP_ALL));
3521 + bzero(ci, sizeof (MR_FW_RAID_MAP_ALL));
3507 3522 ci_h = instance->ld_map_phy[(instance->map_id - 1) & 1];
3508 3523
3509 - (void) memset(dcmd->mbox.b, 0, DCMD_MBOX_SZ);
3524 + bzero(dcmd->mbox.b, DCMD_MBOX_SZ);
3510 3525
3511 3526 ld_sync = (LD_TARGET_SYNC *)ci;
3512 3527
3513 3528 for (i = 0; i < num_lds; i++, ld_sync++) {
3514 3529 raid = MR_LdRaidGet(i, map);
3515 3530
3516 - con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, Seq Num : 0x%x, Sync Reqd : 0x%x\n",
3531 + con_log(CL_ANN1,
3532 + (CE_NOTE, "i : 0x%x, Seq Num : 0x%x, Sync Reqd : 0x%x",
3517 3533 i, raid->seqNum, raid->flags.ldSyncRequired));
3518 3534
3519 3535 ld_sync->ldTargetId = MR_GetLDTgtId(i, map);
3520 3536
3521 - con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, tgt : 0x%x \n",
3537 + con_log(CL_ANN1, (CE_NOTE, "i : 0x%x, tgt : 0x%x",
3522 3538 i, ld_sync->ldTargetId));
3523 3539
3524 3540 ld_sync->seqNum = raid->seqNum;
3525 3541 }
3526 3542
3527 3543
3528 - size_map_info = sizeof(MR_FW_RAID_MAP) +
3529 - (sizeof(MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
3544 + size_map_info = sizeof (MR_FW_RAID_MAP) +
3545 + (sizeof (MR_LD_SPAN_MAP) * (MAX_LOGICAL_DRIVES - 1));
3530 3546
3531 3547 dcmd->cmd = MFI_CMD_OP_DCMD;
3532 3548 dcmd->cmd_status = 0xFF;
3533 3549 dcmd->sge_count = 1;
3534 3550 dcmd->flags = MFI_FRAME_DIR_WRITE;
3535 3551 dcmd->timeout = 0;
3536 3552 dcmd->pad_0 = 0;
3537 3553 dcmd->data_xfer_len = size_map_info;
3538 - dcmd->mbox.b[0] = num_lds;
3554 + ASSERT(num_lds <= 255);
3555 + dcmd->mbox.b[0] = (U8)num_lds;
3539 3556 dcmd->mbox.b[1] = 1; /* Pend */
3540 3557 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO;
3541 3558 dcmd->sgl.sge32[0].phys_addr = ci_h;
3542 3559 dcmd->sgl.sge32[0].length = size_map_info;
3543 3560
3544 3561
3545 3562 instance->map_update_cmd = cmd;
3546 3563 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3547 3564
3548 3565 instance->func_ptr->issue_cmd(cmd, instance);
3549 -
3566 +
3550 3567 instance->unroll.syncCmd = 1;
3551 - con_log(CL_ANN1, (CE_NOTE, "sync cmd issued. [SMID]:%x",cmd->SMID));
3568 + con_log(CL_ANN1, (CE_NOTE, "sync cmd issued. [SMID]:%x", cmd->SMID));
3552 3569
3553 3570 return (ret);
3554 3571 }
3555 3572
3556 3573 /*
3557 3574 * abort_syncmap_cmd
3558 3575 */
3559 3576 int
3560 3577 abort_syncmap_cmd(struct mrsas_instance *instance,
3561 3578 struct mrsas_cmd *cmd_to_abort)
3562 3579 {
3563 3580 int ret = 0;
3564 3581
3565 3582 struct mrsas_cmd *cmd;
3566 3583 struct mrsas_abort_frame *abort_fr;
3567 3584
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
3568 3585 con_log(CL_ANN1, (CE_NOTE, "chkpnt: abort_ldsync:%d", __LINE__));
3569 3586
3570 3587 cmd = get_raid_msg_mfi_pkt(instance);
3571 3588
3572 3589 if (!cmd) {
3573 3590 cmn_err(CE_WARN,
3574 3591 "Failed to get a cmd from free-pool abort_syncmap_cmd().");
3575 3592 return (DDI_FAILURE);
3576 3593 }
3577 3594 /* Clear the frame buffer and assign back the context id */
3578 - (void) memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3595 + bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3579 3596 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3580 3597 cmd->index);
3581 3598
3582 3599 abort_fr = &cmd->frame->abort;
3583 3600
3584 3601 /* prepare and issue the abort frame */
3585 3602 ddi_put8(cmd->frame_dma_obj.acc_handle,
3586 3603 &abort_fr->cmd, MFI_CMD_OP_ABORT);
3587 3604 ddi_put8(cmd->frame_dma_obj.acc_handle, &abort_fr->cmd_status,
3588 3605 MFI_CMD_STATUS_SYNC_MODE);
3589 3606 ddi_put16(cmd->frame_dma_obj.acc_handle, &abort_fr->flags, 0);
3590 3607 ddi_put32(cmd->frame_dma_obj.acc_handle, &abort_fr->abort_context,
3591 3608 cmd_to_abort->index);
3592 3609 ddi_put32(cmd->frame_dma_obj.acc_handle,
3593 3610 &abort_fr->abort_mfi_phys_addr_lo, cmd_to_abort->frame_phys_addr);
3594 3611 ddi_put32(cmd->frame_dma_obj.acc_handle,
3595 3612 &abort_fr->abort_mfi_phys_addr_hi, 0);
3596 3613
3597 3614 cmd->frame_count = 1;
3598 3615
3599 3616 mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3600 3617
3601 3618 if (instance->func_ptr->issue_cmd_in_poll_mode(instance, cmd)) {
3602 3619 con_log(CL_ANN1, (CE_WARN,
3603 3620 "abort_ldsync_cmd: issue_cmd_in_poll_mode failed"));
3604 3621 ret = -1;
3605 3622 } else {
3606 3623 ret = 0;
3607 3624 }
3608 3625
3609 3626 return_raid_msg_mfi_pkt(instance, cmd);
3610 3627
3611 3628 atomic_add_16(&instance->fw_outstanding, (-1));
3612 3629
3613 3630 return (ret);
3614 3631 }
3615 3632
|
↓ open down ↓ |
27 lines elided |
↑ open up ↑ |
3616 3633
3617 3634 #ifdef PDSUPPORT
3618 3635 int
3619 3636 mrsas_tbolt_config_pd(struct mrsas_instance *instance, uint16_t tgt,
3620 3637 uint8_t lun, dev_info_t **ldip)
3621 3638 {
3622 3639 struct scsi_device *sd;
3623 3640 dev_info_t *child;
3624 3641 int rval, dtype;
3625 3642 struct mrsas_tbolt_pd_info *pds = NULL;
3626 - uint64_t *wwn;
3627 3643
3628 -
3629 3644 con_log(CL_ANN1, (CE_NOTE, "mrsas_tbolt_config_pd: t = %d l = %d",
3630 3645 tgt, lun));
3631 3646
3632 3647 if ((child = mrsas_find_child(instance, tgt, lun)) != NULL) {
3633 3648 if (ldip) {
3634 3649 *ldip = child;
3635 3650 }
3636 3651 if (instance->mr_tbolt_pd_list[tgt].flag != MRDRV_TGT_VALID) {
3637 3652 rval = mrsas_service_evt(instance, tgt, 1,
3638 - MRSAS_EVT_UNCONFIG_TGT, NULL);
3653 + MRSAS_EVT_UNCONFIG_TGT, NULL);
3639 3654 con_log(CL_ANN1, (CE_WARN,
3640 - "mr_sas:DELETING STALE ENTRY rval = %d "
3641 - "tgt id = %d ", rval, tgt));
3655 + "mr_sas:DELETING STALE ENTRY rval = %d "
3656 + "tgt id = %d", rval, tgt));
3642 3657 return (NDI_FAILURE);
3643 3658 }
3644 3659 return (NDI_SUCCESS);
3645 3660 }
3646 3661
3647 3662 pds = (struct mrsas_tbolt_pd_info *)
3648 3663 kmem_zalloc(sizeof (struct mrsas_tbolt_pd_info), KM_SLEEP);
3649 3664 mrsas_tbolt_get_pd_info(instance, pds, tgt);
3650 3665 dtype = pds->scsiDevType;
3651 3666
3652 - /* Check for Disk*/
3667 + /* Check for Disk */
3653 3668 if ((dtype == DTYPE_DIRECT)) {
3654 3669 if ((dtype == DTYPE_DIRECT) &&
3655 3670 (LE_16(pds->fwState) != PD_SYSTEM)) {
3671 + kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3656 3672 return (NDI_FAILURE);
3657 3673 }
3658 3674 sd = kmem_zalloc(sizeof (struct scsi_device), KM_SLEEP);
3659 3675 sd->sd_address.a_hba_tran = instance->tran;
3660 3676 sd->sd_address.a_target = (uint16_t)tgt;
3661 3677 sd->sd_address.a_lun = (uint8_t)lun;
3662 3678
3663 3679 if (scsi_hba_probe(sd, NULL) == SCSIPROBE_EXISTS) {
3664 3680 rval = mrsas_config_scsi_device(instance, sd, ldip);
3665 3681 con_log(CL_DLEVEL1, (CE_NOTE,
3666 3682 "Phys. device found: tgt %d dtype %d: %s",
3667 3683 tgt, dtype, sd->sd_inq->inq_vid));
3668 3684 } else {
3669 3685 rval = NDI_FAILURE;
3670 - con_log(CL_DLEVEL1, (CE_NOTE,
3671 - "Phys. device Not found scsi_hba_probe Failed: tgt %d dtype %d: %s",
3686 + con_log(CL_DLEVEL1, (CE_NOTE, "Phys. device Not found "
3687 + "scsi_hba_probe Failed: tgt %d dtype %d: %s",
3672 3688 tgt, dtype, sd->sd_inq->inq_vid));
3673 3689 }
3674 3690
3675 3691 /* sd_unprobe is blank now. Free buffer manually */
3676 3692 if (sd->sd_inq) {
3677 3693 kmem_free(sd->sd_inq, SUN_INQSIZE);
3678 3694 sd->sd_inq = (struct scsi_inquiry *)NULL;
3679 3695 }
3680 3696 kmem_free(sd, sizeof (struct scsi_device));
3681 3697 rval = NDI_SUCCESS;
3682 3698 } else {
3683 3699 con_log(CL_ANN1, (CE_NOTE,
|
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
3684 3700 "Device not supported: tgt %d lun %d dtype %d",
3685 3701 tgt, lun, dtype));
3686 3702 rval = NDI_FAILURE;
3687 3703 }
3688 3704
3689 3705 kmem_free(pds, sizeof (struct mrsas_tbolt_pd_info));
3690 3706 con_log(CL_ANN1, (CE_NOTE, "mrsas_config_pd: return rval = %d",
3691 3707 rval));
3692 3708 return (rval);
3693 3709 }
3710 +
3694 3711 static void
3695 -mrsas_tbolt_get_pd_info(struct mrsas_instance *instance, struct mrsas_tbolt_pd_info *pds,
3696 - int tgt)
3712 +mrsas_tbolt_get_pd_info(struct mrsas_instance *instance,
3713 + struct mrsas_tbolt_pd_info *pds, int tgt)
3697 3714 {
3698 3715 struct mrsas_cmd *cmd;
3699 3716 struct mrsas_dcmd_frame *dcmd;
3700 3717 dma_obj_t dcmd_dma_obj;
3701 3718
3702 3719 cmd = get_raid_msg_pkt(instance);
3703 3720
3704 3721 if (!cmd) {
3705 - con_log(CL_ANN1, (CE_WARN, "Failed to get a cmd for get pd info"));
3706 - return;
3707 - }
3722 + con_log(CL_ANN1,
3723 + (CE_WARN, "Failed to get a cmd for get pd info"));
3724 + return;
3725 + }
3708 3726
3709 - /* Clear the frame buffer and assign back the context id */
3710 - memset((char *)&cmd->frame[0], 0, sizeof (union mrsas_frame));
3727 + /* Clear the frame buffer and assign back the context id */
3728 + bzero((char *)&cmd->frame[0], sizeof (union mrsas_frame));
3711 3729 ddi_put32(cmd->frame_dma_obj.acc_handle, &cmd->frame->hdr.context,
3712 3730 cmd->index);
3713 3731
3714 3732
3715 3733 dcmd = &cmd->frame->dcmd;
3716 3734 dcmd_dma_obj.size = sizeof (struct mrsas_tbolt_pd_info);
3717 3735 dcmd_dma_obj.dma_attr = mrsas_generic_dma_attr;
3718 3736 dcmd_dma_obj.dma_attr.dma_attr_addr_hi = 0xffffffff;
3719 3737 dcmd_dma_obj.dma_attr.dma_attr_count_max = 0xffffffff;
3720 3738 dcmd_dma_obj.dma_attr.dma_attr_sgllen = 1;
3721 3739 dcmd_dma_obj.dma_attr.dma_attr_align = 1;
3722 3740
3723 3741 (void) mrsas_alloc_dma_obj(instance, &dcmd_dma_obj,
3724 3742 DDI_STRUCTURE_LE_ACC);
3725 - (void) memset(dcmd_dma_obj.buffer, 0, sizeof (struct mrsas_tbolt_pd_info));
3726 - (void) memset(dcmd->mbox.b, 0, 12);
3743 + bzero(dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info));
3744 + bzero(dcmd->mbox.b, 12);
3727 3745 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd, MFI_CMD_OP_DCMD);
3728 3746 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->cmd_status, 0);
3729 3747 ddi_put8(cmd->frame_dma_obj.acc_handle, &dcmd->sge_count, 1);
3730 - ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags, MFI_FRAME_DIR_READ);
3748 + ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->flags,
3749 + MFI_FRAME_DIR_READ);
3731 3750 ddi_put16(cmd->frame_dma_obj.acc_handle, &dcmd->timeout, 0);
3732 3751 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->data_xfer_len,
3733 3752 sizeof (struct mrsas_tbolt_pd_info));
3734 3753 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->opcode,
3735 3754 MR_DCMD_PD_GET_INFO);
3736 3755 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->mbox.w[0], tgt);
3737 3756 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].length,
3738 3757 sizeof (struct mrsas_tbolt_pd_info));
3739 3758 ddi_put32(cmd->frame_dma_obj.acc_handle, &dcmd->sgl.sge32[0].phys_addr,
3740 3759 dcmd_dma_obj.dma_cookie[0].dmac_address);
3741 3760
3742 3761 cmd->sync_cmd = MRSAS_TRUE;
3743 3762 cmd->frame_count = 1;
3744 3763
3745 3764 if (instance->tbolt) {
3746 - mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3747 - }
3765 + mr_sas_tbolt_build_mfi_cmd(instance, cmd);
3766 + }
3748 3767
3749 3768 instance->func_ptr->issue_cmd_in_sync_mode(instance, cmd);
3750 -
3769 +
3751 3770 ddi_rep_get8(cmd->frame_dma_obj.acc_handle, (uint8_t *)pds,
3752 3771 (uint8_t *)dcmd_dma_obj.buffer, sizeof (struct mrsas_tbolt_pd_info),
3753 3772 DDI_DEV_AUTOINCR);
3754 3773 (void) mrsas_free_dma_obj(instance, dcmd_dma_obj);
3755 3774 return_raid_msg_pkt(instance, cmd);
3756 3775 }
3757 3776 #endif
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX