1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at
9 * http://www.opensource.org/licenses/cddl1.txt.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2004-2012 Emulex. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include <emlxs.h>
28
29 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
30 EMLXS_MSG_DEF(EMLXS_MBOX_C);
31
32
33 emlxs_table_t emlxs_mb_status_table[] = {
34 {MBX_SUCCESS, "SUCCESS"},
35 {MBX_FAILURE, "FAILURE"},
36 {MBXERR_NUM_IOCBS, "NUM_IOCBS"},
37 {MBXERR_IOCBS_EXCEEDED, "IOCBS_EXCEEDED"},
38 {MBXERR_BAD_RING_NUMBER, "BAD_RING_NUMBER"},
39 {MBXERR_MASK_ENTRIES_RANGE, "MASK_ENTRIES_RANGE"},
40 {MBXERR_MASKS_EXCEEDED, "MASKS_EXCEEDED"},
41 {MBXERR_BAD_PROFILE, "BAD_PROFILE"},
42 {MBXERR_BAD_DEF_CLASS, "BAD_DEF_CLASS"},
43 {MBXERR_BAD_MAX_RESPONDER, "BAD_MAX_RESPONDER"},
44 {MBXERR_BAD_MAX_ORIGINATOR, "BAD_MAX_ORIGINATOR"},
45 {MBXERR_RPI_REGISTERED, "RPI_REGISTERED"},
46 {MBXERR_RPI_FULL, "RPI_FULL"},
47 {MBXERR_NO_RESOURCES, "NO_RESOURCES"},
48 {MBXERR_BAD_RCV_LENGTH, "BAD_RCV_LENGTH"},
49 {MBXERR_DMA_ERROR, "DMA_ERROR"},
50 {MBXERR_NOT_SUPPORTED, "NOT_SUPPORTED"},
51 {MBXERR_UNSUPPORTED_FEATURE, "UNSUPPORTED_FEATURE"},
52 {MBXERR_UNKNOWN_COMMAND, "UNKNOWN_COMMAND"},
53 {MBXERR_BAD_IP_BIT, "BAD_IP_BIT"},
54 {MBXERR_BAD_PCB_ALIGN, "BAD_PCB_ALIGN"},
55 {MBXERR_BAD_HBQ_ID, "BAD_HBQ_ID"},
56 {MBXERR_BAD_HBQ_STATE, "BAD_HBQ_STATE"},
57 {MBXERR_BAD_HBQ_MASK_NUM, "BAD_HBQ_MASK_NUM"},
58 {MBXERR_BAD_HBQ_MASK_SUBSET, "BAD_HBQ_MASK_SUBSET"},
59 {MBXERR_HBQ_CREATE_FAIL, "HBQ_CREATE_FAIL"},
60 {MBXERR_HBQ_EXISTING, "HBQ_EXISTING"},
61 {MBXERR_HBQ_RSPRING_FULL, "HBQ_RSPRING_FULL"},
62 {MBXERR_HBQ_DUP_MASK, "HBQ_DUP_MASK"},
63 {MBXERR_HBQ_INVAL_GET_PTR, "HBQ_INVAL_GET_PTR"},
64 {MBXERR_BAD_HBQ_SIZE, "BAD_HBQ_SIZE"},
65 {MBXERR_BAD_HBQ_ORDER, "BAD_HBQ_ORDER"},
66 {MBXERR_INVALID_ID, "INVALID_ID"},
67 {MBXERR_INVALID_VFI, "INVALID_VFI"},
68 {MBXERR_FLASH_WRITE_FAILED, "FLASH_WRITE_FAILED"},
69 {MBXERR_INVALID_LINKSPEED, "INVALID_LINKSPEED"},
70 {MBXERR_BAD_REDIRECT, "BAD_REDIRECT"},
71 {MBXERR_RING_ALREADY_CONFIG, "RING_ALREADY_CONFIG"},
72 {MBXERR_RING_INACTIVE, "RING_INACTIVE"},
73 {MBXERR_RPI_INACTIVE, "RPI_INACTIVE"},
74 {MBXERR_NO_ACTIVE_XRI, "NO_ACTIVE_XRI"},
75 {MBXERR_XRI_NOT_ACTIVE, "XRI_NOT_ACTIVE"},
76 {MBXERR_RPI_INUSE, "RPI_INUSE"},
77 {MBXERR_NO_LINK_ATTENTION, "NO_LINK_ATTENTION"},
78 {MBXERR_INVALID_SLI_MODE, "INVALID_SLI_MODE"},
79 {MBXERR_INVALID_HOST_PTR, "INVALID_HOST_PTR"},
80 {MBXERR_CANT_CFG_SLI_MODE, "CANT_CFG_SLI_MODE"},
81 {MBXERR_BAD_OVERLAY, "BAD_OVERLAY"},
82 {MBXERR_INVALID_FEAT_REQ, "INVALID_FEAT_REQ"},
83 {MBXERR_CONFIG_CANT_COMPLETE, "CONFIG_CANT_COMPLETE"},
84 {MBXERR_DID_ALREADY_REGISTERED, "DID_ALREADY_REGISTERED"},
85 {MBXERR_DID_INCONSISTENT, "DID_INCONSISTENT"},
86 {MBXERR_VPI_TOO_LARGE, "VPI_TOO_LARGE"},
87 {MBXERR_STILL_ASSOCIATED, "STILL_ASSOCIATED"},
88 {MBXERR_INVALID_VF_STATE, "INVALID_VF_STATE"},
89 {MBXERR_VFI_ALREADY_REGISTERED, "VFI_ALREADY_REGISTERED"},
90 {MBXERR_VFI_TOO_LARGE, "VFI_TOO_LARGE"},
91 {MBXERR_LOAD_FW_FAILED, "LOAD_FW_FAILED"},
92 {MBXERR_FIND_FW_FAILED, "FIND_FW_FAILED"},
93 };
94
95 emlxs_table_t emlxs_mb_cmd_table[] = {
96 {MBX_SHUTDOWN, "SHUTDOWN"},
97 {MBX_LOAD_SM, "LOAD_SM"},
98 {MBX_READ_NV, "READ_NV"},
99 {MBX_WRITE_NV, "WRITE_NV"},
100 {MBX_RUN_BIU_DIAG, "RUN_BIU_DIAG"},
101 {MBX_INIT_LINK, "INIT_LINK"},
102 {MBX_DOWN_LINK, "DOWN_LINK"},
103 {MBX_CONFIG_LINK, "CONFIG_LINK"},
104 {MBX_PART_SLIM, "PART_SLIM"},
105 {MBX_CONFIG_RING, "CONFIG_RING"},
106 {MBX_RESET_RING, "RESET_RING"},
107 {MBX_READ_CONFIG, "READ_CONFIG"},
108 {MBX_READ_RCONFIG, "READ_RCONFIG"},
109 {MBX_READ_SPARM, "READ_SPARM"},
110 {MBX_READ_STATUS, "READ_STATUS"},
111 {MBX_READ_RPI, "READ_RPI"},
112 {MBX_READ_XRI, "READ_XRI"},
113 {MBX_READ_REV, "READ_REV"},
114 {MBX_READ_LNK_STAT, "READ_LNK_STAT"},
115 {MBX_REG_LOGIN, "REG_LOGIN"},
116 {MBX_UNREG_LOGIN, "UNREG_RPI"},
117 {MBX_READ_LA, "READ_LA"},
118 {MBX_CLEAR_LA, "CLEAR_LA"},
119 {MBX_DUMP_MEMORY, "DUMP_MEMORY"},
120 {MBX_DUMP_CONTEXT, "DUMP_CONTEXT"},
121 {MBX_RUN_DIAGS, "RUN_DIAGS"},
122 {MBX_RESTART, "RESTART"},
123 {MBX_UPDATE_CFG, "UPDATE_CFG"},
124 {MBX_DOWN_LOAD, "DOWN_LOAD"},
125 {MBX_DEL_LD_ENTRY, "DEL_LD_ENTRY"},
126 {MBX_RUN_PROGRAM, "RUN_PROGRAM"},
127 {MBX_SET_MASK, "SET_MASK"},
128 {MBX_SET_VARIABLE, "SET_VARIABLE"},
129 {MBX_UNREG_D_ID, "UNREG_D_ID"},
130 {MBX_KILL_BOARD, "KILL_BOARD"},
131 {MBX_CONFIG_FARP, "CONFIG_FARP"},
132 {MBX_LOAD_AREA, "LOAD_AREA"},
133 {MBX_RUN_BIU_DIAG64, "RUN_BIU_DIAG64"},
134 {MBX_CONFIG_PORT, "CONFIG_PORT"},
135 {MBX_READ_SPARM64, "READ_SPARM64"},
136 {MBX_READ_RPI64, "READ_RPI64"},
137 {MBX_CONFIG_MSI, "CONFIG_MSI"},
138 {MBX_CONFIG_MSIX, "CONFIG_MSIX"},
139 {MBX_REG_LOGIN64, "REG_RPI"},
140 {MBX_READ_LA64, "READ_LA64"},
141 {MBX_FLASH_WR_ULA, "FLASH_WR_ULA"},
142 {MBX_SET_DEBUG, "SET_DEBUG"},
143 {MBX_GET_DEBUG, "GET_DEBUG"},
144 {MBX_LOAD_EXP_ROM, "LOAD_EXP_ROM"},
145 {MBX_BEACON, "BEACON"},
146 {MBX_CONFIG_HBQ, "CONFIG_HBQ"}, /* SLI3 */
147 {MBX_REG_VPI, "REG_VPI"}, /* NPIV */
148 {MBX_UNREG_VPI, "UNREG_VPI"}, /* NPIV */
149 {MBX_ASYNC_EVENT, "ASYNC_EVENT"},
150 {MBX_HEARTBEAT, "HEARTBEAT"},
151 {MBX_READ_EVENT_LOG_STATUS, "READ_EVENT_LOG_STATUS"},
152 {MBX_READ_EVENT_LOG, "READ_EVENT_LOG"},
153 {MBX_WRITE_EVENT_LOG, "WRITE_EVENT_LOG"},
154 {MBX_NV_LOG, "NV_LOG"},
155 {MBX_PORT_CAPABILITIES, "PORT_CAPABILITIES"},
156 {MBX_IOV_CONTROL, "IOV_CONTROL"},
157 {MBX_IOV_MBX, "IOV_MBX"},
158 {MBX_SLI_CONFIG, "SLI_CONFIG"},
159 {MBX_REQUEST_FEATURES, "REQUEST_FEATURES"},
160 {MBX_RESUME_RPI, "RESUME_RPI"},
161 {MBX_REG_VFI, "REG_VFI"},
162 {MBX_REG_FCFI, "REG_FCFI"},
163 {MBX_UNREG_VFI, "UNREG_VFI"},
164 {MBX_UNREG_FCFI, "UNREG_FCFI"},
165 {MBX_INIT_VFI, "INIT_VFI"},
166 {MBX_INIT_VPI, "INIT_VPI"},
167 {MBX_WRITE_VPARMS, "WRITE_VPARMS"},
168 {MBX_ACCESS_VDATA, "ACCESS_VDATA"}
169 }; /* emlxs_mb_cmd_table */
170
171
172 emlxs_table_t emlxs_request_feature_table[] = {
173 {SLI4_FEATURE_INHIBIT_AUTO_ABTS, "IAA "}, /* Bit 0 */
174 {SLI4_FEATURE_NPIV, "NPIV "}, /* Bit 1 */
175 {SLI4_FEATURE_DIF, "DIF "}, /* Bit 2 */
176 {SLI4_FEATURE_VIRTUAL_FABRICS, "VF "}, /* Bit 3 */
177 {SLI4_FEATURE_FCP_INITIATOR, "FCPI "}, /* Bit 4 */
178 {SLI4_FEATURE_FCP_TARGET, "FCPT "}, /* Bit 5 */
179 {SLI4_FEATURE_FCP_COMBO, "FCPC "}, /* Bit 6 */
180 {SLI4_FEATURE_RSVD1, "RSVD1 "}, /* Bit 7 */
181 {SLI4_FEATURE_RQD, "RQD "}, /* Bit 8 */
182 {SLI4_FEATURE_INHIBIT_AUTO_ABTS_R, "IAAR "}, /* Bit 9 */
183 {SLI4_FEATURE_HIGH_LOGIN_MODE, "HLM "}, /* Bit 10 */
184 {SLI4_FEATURE_PERF_HINT, "PERFH "} /* Bit 11 */
185 }; /* emlxs_request_feature_table */
186
187
188 extern char *
189 emlxs_mb_xlate_status(uint32_t status)
190 {
191 static char buffer[32];
192 uint32_t i;
193 uint32_t count;
194
195 count = sizeof (emlxs_mb_status_table) / sizeof (emlxs_table_t);
196 for (i = 0; i < count; i++) {
197 if (status == emlxs_mb_status_table[i].code) {
198 return (emlxs_mb_status_table[i].string);
199 }
200 }
201
202 (void) snprintf(buffer, sizeof (buffer), "status=%x", status);
203 return (buffer);
204
205 } /* emlxs_mb_xlate_status() */
206
207
208 /* SLI4 */
209 /*ARGSUSED*/
210 extern void
211 emlxs_mb_resetport(emlxs_hba_t *hba, MAILBOXQ *mbq)
212 {
213 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
214
215 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
216 mbq->nonembed = NULL;
217 mbq->mbox_cmpl = NULL; /* no cmpl needed */
218 mbq->port = (void *)&PPORT;
219
220 /*
221 * Signifies an embedded command
222 */
223 mb4->un.varSLIConfig.be.embedded = 1;
224
225 mb4->mbxCommand = MBX_SLI_CONFIG;
226 mb4->mbxOwner = OWN_HOST;
227 mb4->un.varSLIConfig.be.payload_length = IOCTL_HEADER_SZ;
228 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
229 IOCTL_SUBSYSTEM_COMMON;
230 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode = COMMON_OPCODE_RESET;
231 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
232 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length = 0;
233
234 return;
235
236 } /* emlxs_mb_resetport() */
237
238
239 /* SLI4 */
240 /*ARGSUSED*/
241 extern void
242 emlxs_mb_request_features(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t mask)
243 {
244 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
245
246 hba->flag &= ~FC_NPIV_ENABLED;
247 hba->sli.sli4.flag &= ~(EMLXS_SLI4_PHON | EMLXS_SLI4_PHWQ);
248
249 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
250 mbq->nonembed = NULL;
251 mbq->mbox_cmpl = NULL; /* no cmpl needed */
252 mbq->port = (void *)&PPORT;
253
254 mb4->mbxCommand = MBX_REQUEST_FEATURES;
255 mb4->mbxOwner = OWN_HOST;
256
257 mb4->un.varReqFeatures.featuresRequested = mask;
258 return;
259
260 } /* emlxs_mb_request_features() */
261
262
263 /* SLI4 */
264 /*ARGSUSED*/
265 extern void
266 emlxs_mb_noop(emlxs_hba_t *hba, MAILBOXQ *mbq)
267 {
268 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
269 IOCTL_COMMON_NOP *nop;
270
271 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
272 mbq->nonembed = NULL;
273 mbq->mbox_cmpl = NULL; /* no cmpl needed */
274 mbq->port = (void *)&PPORT;
275
276 /*
277 * Signifies an embedded command
278 */
279 mb4->un.varSLIConfig.be.embedded = 1;
280
281 mb4->mbxCommand = MBX_SLI_CONFIG;
282 mb4->mbxOwner = OWN_HOST;
283 mb4->un.varSLIConfig.be.payload_length = sizeof (IOCTL_COMMON_NOP) +
284 IOCTL_HEADER_SZ;
285 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
286 IOCTL_SUBSYSTEM_COMMON;
287 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode = COMMON_OPCODE_NOP;
288 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
289 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
290 sizeof (IOCTL_COMMON_NOP);
291 nop = (IOCTL_COMMON_NOP *)&mb4->un.varSLIConfig.payload;
292 nop->params.request.context = -1;
293
294 return;
295
296 } /* emlxs_mb_noop() */
297
298
299 /* SLI4 */
300 /*ARGSUSED*/
301 extern int
302 emlxs_mbext_noop(emlxs_hba_t *hba, MAILBOXQ *mbq)
303 {
304 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
305 IOCTL_COMMON_NOP *nop;
306 MATCHMAP *mp;
307 mbox_req_hdr_t *hdr_req;
308
309 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
310
311 if ((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF)) == 0) {
312 return (1);
313 }
314 /*
315 * Save address for completion
316 * Signifies a non-embedded command
317 */
318 mb4->un.varSLIConfig.be.embedded = 0;
319 mbq->nonembed = (void *)mp;
320 mbq->mbox_cmpl = NULL; /* no cmpl needed */
321 mbq->port = (void *)&PPORT;
322
323 mb4->mbxCommand = MBX_SLI_CONFIG;
324 mb4->mbxOwner = OWN_HOST;
325
326 hdr_req = (mbox_req_hdr_t *)mp->virt;
327 hdr_req->subsystem = IOCTL_SUBSYSTEM_COMMON;
328 hdr_req->opcode = COMMON_OPCODE_NOP;
329 hdr_req->timeout = 0;
330 hdr_req->req_length = sizeof (IOCTL_COMMON_NOP);
331 nop = (IOCTL_COMMON_NOP *)(hdr_req + 1);
332 nop->params.request.context = -1;
333
334 return (0);
335
336 } /* emlxs_mbext_noop() */
337
338
339 /* SLI4 */
340 /*ARGSUSED*/
341 extern void
342 emlxs_mb_eq_create(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t num)
343 {
344 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
345 IOCTL_COMMON_EQ_CREATE *qp;
346 uint64_t addr;
347
348 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
349 mbq->nonembed = NULL;
350 mbq->mbox_cmpl = NULL; /* no cmpl needed */
351 mbq->port = (void *)&PPORT;
352
353 /*
354 * Signifies an embedded command
355 */
356 mb4->un.varSLIConfig.be.embedded = 1;
357
358 mb4->mbxCommand = MBX_SLI_CONFIG;
359 mb4->mbxOwner = OWN_HOST;
360 mb4->un.varSLIConfig.be.payload_length =
361 sizeof (IOCTL_COMMON_EQ_CREATE) + IOCTL_HEADER_SZ;
362 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
363 IOCTL_SUBSYSTEM_COMMON;
364 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode = COMMON_OPCODE_EQ_CREATE;
365 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
366 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
367 sizeof (IOCTL_COMMON_EQ_CREATE);
368 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 0;
369
370 qp = (IOCTL_COMMON_EQ_CREATE *)&mb4->un.varSLIConfig.payload;
371
372 /* 1024 * 4 bytes = 4K */
373 qp->params.request.EQContext.Count = EQ_ELEMENT_COUNT_1024;
374 qp->params.request.EQContext.Valid = 1;
375 qp->params.request.EQContext.DelayMult = EQ_DELAY_MULT;
376
377 addr = hba->sli.sli4.eq[num].addr.phys;
378 qp->params.request.NumPages = 1;
379 qp->params.request.Pages[0].addrLow = PADDR_LO(addr);
380 qp->params.request.Pages[0].addrHigh = PADDR_HI(addr);
381
382 return;
383
384 } /* emlxs_mb_eq_create() */
385
386
387 /* SLI4 */
388 /*ARGSUSED*/
389 extern void
390 emlxs_mb_cq_create(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t num)
391 {
392 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
393 IOCTL_COMMON_CQ_CREATE *qp;
394 IOCTL_COMMON_CQ_CREATE_V2 *qp2;
395 uint64_t addr;
396 uint32_t i;
397
398 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
399 mbq->nonembed = NULL;
400 mbq->mbox_cmpl = NULL; /* no cmpl needed */
401 mbq->port = (void *)&PPORT;
402
403 /*
404 * Signifies an embedded command
405 */
406 mb4->un.varSLIConfig.be.embedded = 1;
407
408 mb4->mbxCommand = MBX_SLI_CONFIG;
409 mb4->mbxOwner = OWN_HOST;
410
411 switch (hba->sli.sli4.param.CQV) {
412 case 0:
413 mb4->un.varSLIConfig.be.payload_length =
414 sizeof (IOCTL_COMMON_CQ_CREATE) + IOCTL_HEADER_SZ;
415 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
416 IOCTL_SUBSYSTEM_COMMON;
417 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
418 COMMON_OPCODE_CQ_CREATE;
419 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
420 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
421 sizeof (IOCTL_COMMON_CQ_CREATE);
422 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 0;
423
424 qp = (IOCTL_COMMON_CQ_CREATE *)
425 &mb4->un.varSLIConfig.payload;
426
427 /* 256 * 16 bytes = 4K */
428 qp->params.request.CQContext.Count = CQ_ELEMENT_COUNT_256;
429 qp->params.request.CQContext.EQId =
430 (uint8_t)hba->sli.sli4.cq[num].eqid;
431 qp->params.request.CQContext.Valid = 1;
432 qp->params.request.CQContext.Eventable = 1;
433 qp->params.request.CQContext.NoDelay = 0;
434 qp->params.request.CQContext.CoalesceWM = 0;
435
436 addr = hba->sli.sli4.cq[num].addr.phys;
437 qp->params.request.NumPages = 1;
438 qp->params.request.Pages[0].addrLow = PADDR_LO(addr);
439 qp->params.request.Pages[0].addrHigh = PADDR_HI(addr);
440
441 break;
442
443 case 2:
444 default:
445 mb4->un.varSLIConfig.be.payload_length =
446 sizeof (IOCTL_COMMON_CQ_CREATE_V2) + IOCTL_HEADER_SZ;
447 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
448 IOCTL_SUBSYSTEM_COMMON;
449 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
450 COMMON_OPCODE_CQ_CREATE;
451 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
452 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
453 sizeof (IOCTL_COMMON_CQ_CREATE_V2);
454 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 2;
455
456 qp2 = (IOCTL_COMMON_CQ_CREATE_V2 *)
457 &mb4->un.varSLIConfig.payload;
458
459 qp2->params.request.CQContext.CqeCnt = CQ_ELEMENT_COUNT_1024;
460 qp2->params.request.CQContext.CqeSize = CQE_SIZE_16_BYTES;
461 qp2->params.request.CQContext.EQId = hba->sli.sli4.cq[num].eqid;
462 qp2->params.request.CQContext.Valid = 1;
463 qp2->params.request.CQContext.AutoValid = 0;
464 qp2->params.request.CQContext.Eventable = 1;
465 qp2->params.request.CQContext.NoDelay = 0;
466 qp2->params.request.CQContext.Count1 = 0;
467 qp2->params.request.CQContext.CoalesceWM = 0;
468
469 addr = hba->sli.sli4.cq[num].addr.phys;
470 qp2->params.request.PageSize = CQ_PAGE_SIZE_4K;
471 qp2->params.request.NumPages = EMLXS_NUM_CQ_PAGES_V2;
472
473 for (i = 0; i < EMLXS_NUM_CQ_PAGES_V2; i++) {
474 qp2->params.request.Pages[i].addrLow = PADDR_LO(addr);
475 qp2->params.request.Pages[i].addrHigh = PADDR_HI(addr);
476 addr += 4096;
477 }
478
479 break;
480 }
481 return;
482
483 } /* emlxs_mb_cq_create() */
484
485
486 /* SLI4 */
487 /*ARGSUSED*/
488 extern void
489 emlxs_mb_get_port_name(emlxs_hba_t *hba, MAILBOXQ *mbq)
490 {
491 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
492
493 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
494 mbq->nonembed = NULL;
495 mbq->mbox_cmpl = NULL; /* no cmpl needed */
496 mbq->port = (void *)&PPORT;
497
498 mb4->un.varSLIConfig.be.embedded = 1;
499 mb4->mbxCommand = MBX_SLI_CONFIG;
500 mb4->mbxOwner = OWN_HOST;
501
502 mb4->un.varSLIConfig.be.payload_length = IOCTL_HEADER_SZ;
503 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
504 IOCTL_SUBSYSTEM_COMMON;
505 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
506 COMMON_OPCODE_GET_PORT_NAME;
507 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
508 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length = 0;
509
510 if (hba->model_info.chip & EMLXS_BE_CHIPS) {
511 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 0; /* V0 */
512 } else {
513 IOCTL_COMMON_GET_PORT_NAME_V1 *pn;
514
515 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 1; /* V1 */
516
517 pn = (IOCTL_COMMON_GET_PORT_NAME_V1 *)
518 &mb4->un.varSLIConfig.payload;
519 pn->params.request.pt = PORT_TYPE_FC;
520 }
521
522 return;
523
524 } /* emlxs_mb_get_port_name() */
525
526
527 /* SLI4 */
528 /*ARGSUSED*/
529 extern void
530 emlxs_mb_get_sli4_params(emlxs_hba_t *hba, MAILBOXQ *mbq)
531 {
532 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
533
534 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
535 mbq->nonembed = NULL;
536 mbq->mbox_cmpl = NULL; /* no cmpl needed */
537 mbq->port = (void *)&PPORT;
538
539 mb4->un.varSLIConfig.be.embedded = 1;
540 mb4->mbxCommand = MBX_SLI_CONFIG;
541 mb4->mbxOwner = OWN_HOST;
542
543 mb4->un.varSLIConfig.be.payload_length = IOCTL_HEADER_SZ;
544 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
545 IOCTL_SUBSYSTEM_COMMON;
546 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
547 COMMON_OPCODE_GET_SLI4_PARAMS;
548 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
549 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length = 0;
550 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 0; /* V0 */
551
552 return;
553
554 } /* emlxs_mb_get_sli4_params() */
555
556
557 /* SLI4 */
558 /*ARGSUSED*/
559 extern void
560 emlxs_mb_get_extents_info(emlxs_hba_t *hba, MAILBOXQ *mbq, uint16_t type)
561 {
562 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
563 IOCTL_COMMON_EXTENTS *ep;
564
565 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
566 mbq->nonembed = NULL;
567 mbq->mbox_cmpl = NULL; /* no cmpl needed */
568 mbq->port = (void *)&PPORT;
569
570 mb4->un.varSLIConfig.be.embedded = 1;
571 mb4->mbxCommand = MBX_SLI_CONFIG;
572 mb4->mbxOwner = OWN_HOST;
573
574 mb4->un.varSLIConfig.be.payload_length = IOCTL_HEADER_SZ;
575 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.subsystem =
576 IOCTL_SUBSYSTEM_COMMON;
577 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.opcode =
578 COMMON_OPCODE_GET_EXTENTS_INFO;
579 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.timeout = 0;
580 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.req_length =
581 sizeof (IOCTL_COMMON_EXTENTS);
582 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.vf_number = 0;
583 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.vh_number = 0;
584 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.pf_number = 0;
585
586 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.version = 0; /* V0 */
587
588 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
589 ep->params.request.RscType = type;
590
591 return;
592
593 } /* emlxs_mb_get_extents_info() */
594
595
596 /* SLI4 */
597 /*ARGSUSED*/
598 extern void
599 emlxs_mb_get_extents(emlxs_hba_t *hba, MAILBOXQ *mbq, uint16_t type)
600 {
601 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
602 IOCTL_COMMON_EXTENTS *ep;
603
604 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
605 mbq->nonembed = NULL;
606 mbq->mbox_cmpl = NULL; /* no cmpl needed */
607 mbq->port = (void *)&PPORT;
608
609 mb4->un.varSLIConfig.be.embedded = 1;
610 mb4->mbxCommand = MBX_SLI_CONFIG;
611 mb4->mbxOwner = OWN_HOST;
612
613 mb4->un.varSLIConfig.be.payload_length = IOCTL_HEADER_SZ;
614 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.subsystem =
615 IOCTL_SUBSYSTEM_COMMON;
616 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.opcode =
617 COMMON_OPCODE_GET_EXTENTS;
618 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.timeout = 0;
619 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.req_length =
620 sizeof (IOCTL_COMMON_EXTENTS);
621 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.vf_number = 0;
622 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.vh_number = 0;
623 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.pf_number = 0;
624
625 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.version = 0; /* V0 */
626
627 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
628 ep->params.request.RscType = type;
629
630 return;
631
632 } /* emlxs_mb_get_extents() */
633
634
635 /* SLI4 */
636 /*ARGSUSED*/
637 extern void
638 emlxs_mb_alloc_extents(emlxs_hba_t *hba, MAILBOXQ *mbq, uint16_t type,
639 uint16_t count)
640 {
641 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
642 IOCTL_COMMON_EXTENTS *ep;
643
644 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
645 mbq->nonembed = NULL;
646 mbq->mbox_cmpl = NULL; /* no cmpl needed */
647 mbq->port = (void *)&PPORT;
648
649 mb4->un.varSLIConfig.be.embedded = 1;
650 mb4->mbxCommand = MBX_SLI_CONFIG;
651 mb4->mbxOwner = OWN_HOST;
652
653 mb4->un.varSLIConfig.be.payload_length = IOCTL_HEADER_SZ;
654 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.subsystem =
655 IOCTL_SUBSYSTEM_COMMON;
656 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.opcode =
657 COMMON_OPCODE_ALLOC_EXTENTS;
658 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.timeout = 0;
659 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.req_length =
660 sizeof (IOCTL_COMMON_EXTENTS);
661 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.vf_number = 0;
662 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.vh_number = 0;
663 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.pf_number = 0;
664
665 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.version = 0; /* V0 */
666
667 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
668 ep->params.request.RscType = type;
669
670 count = min(count, MAX_EXTENTS);
671 ep->params.request.RscCnt = count;
672
673 return;
674
675 } /* emlxs_mb_alloc_extents() */
676
677
678 /* SLI4 */
679 /*ARGSUSED*/
680 extern void
681 emlxs_mb_dealloc_extents(emlxs_hba_t *hba, MAILBOXQ *mbq, uint16_t type)
682 {
683 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
684 IOCTL_COMMON_EXTENTS *ep;
685
686 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
687 mbq->nonembed = NULL;
688 mbq->mbox_cmpl = NULL; /* no cmpl needed */
689 mbq->port = (void *)&PPORT;
690
691 mb4->un.varSLIConfig.be.embedded = 1;
692 mb4->mbxCommand = MBX_SLI_CONFIG;
693 mb4->mbxOwner = OWN_HOST;
694
695 mb4->un.varSLIConfig.be.payload_length = IOCTL_HEADER_SZ;
696 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.subsystem =
697 IOCTL_SUBSYSTEM_COMMON;
698 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.opcode =
699 COMMON_OPCODE_DEALLOC_EXTENTS;
700 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.timeout = 0;
701 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.req_length =
702 sizeof (IOCTL_COMMON_EXTENTS);
703 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.vf_number = 0;
704 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.vh_number = 0;
705 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.pf_number = 0;
706
707 mb4->un.varSLIConfig.be.un_hdr.hdr_req2.version = 0; /* V0 */
708
709 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
710 ep->params.request.RscType = type;
711
712 return;
713
714 } /* emlxs_mb_dealloc_extents() */
715
716
717 /* SLI4 */
718 /*ARGSUSED*/
719 extern void
720 emlxs_mb_wq_create(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t num)
721 {
722 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
723 IOCTL_FCOE_WQ_CREATE *qp;
724 IOCTL_FCOE_WQ_CREATE_V1 *qp1;
725 uint64_t addr;
726 int i;
727
728 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
729 mbq->nonembed = NULL;
730 mbq->mbox_cmpl = NULL; /* no cmpl needed */
731 mbq->port = (void *)&PPORT;
732
733 /*
734 * Signifies an embedded command
735 */
736 mb4->un.varSLIConfig.be.embedded = 1;
737
738 mb4->mbxCommand = MBX_SLI_CONFIG;
739 mb4->mbxOwner = OWN_HOST;
740
741 switch (hba->sli.sli4.param.WQV) {
742 case 0:
743 mb4->un.varSLIConfig.be.payload_length =
744 sizeof (IOCTL_FCOE_WQ_CREATE) + IOCTL_HEADER_SZ;
745 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
746 IOCTL_SUBSYSTEM_FCOE;
747 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
748 FCOE_OPCODE_WQ_CREATE;
749 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
750 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
751 sizeof (IOCTL_FCOE_WQ_CREATE);
752 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 0;
753
754 addr = hba->sli.sli4.wq[num].addr.phys;
755 qp = (IOCTL_FCOE_WQ_CREATE *)&mb4->un.varSLIConfig.payload;
756
757 qp->params.request.CQId = hba->sli.sli4.wq[num].cqid;
758
759 qp->params.request.NumPages = EMLXS_NUM_WQ_PAGES;
760 for (i = 0; i < EMLXS_NUM_WQ_PAGES; i++) {
761 qp->params.request.Pages[i].addrLow = PADDR_LO(addr);
762 qp->params.request.Pages[i].addrHigh = PADDR_HI(addr);
763 addr += 4096;
764 }
765
766 break;
767
768 case 1:
769 default:
770 mb4->un.varSLIConfig.be.payload_length =
771 sizeof (IOCTL_FCOE_WQ_CREATE_V1) + IOCTL_HEADER_SZ;
772 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
773 IOCTL_SUBSYSTEM_FCOE;
774 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
775 FCOE_OPCODE_WQ_CREATE;
776 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
777 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
778 sizeof (IOCTL_FCOE_WQ_CREATE_V1);
779 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 1;
780
781 addr = hba->sli.sli4.wq[num].addr.phys;
782 qp1 = (IOCTL_FCOE_WQ_CREATE_V1 *)&mb4->un.varSLIConfig.payload;
783
784 qp1->params.request.CQId = hba->sli.sli4.wq[num].cqid;
785 qp1->params.request.NumPages = EMLXS_NUM_WQ_PAGES;
786
787 qp1->params.request.WqeCnt = WQ_DEPTH;
788 qp1->params.request.WqeSize = WQE_SIZE_64_BYTES;
789 qp1->params.request.PageSize = WQ_PAGE_SIZE_4K;
790
791 for (i = 0; i < EMLXS_NUM_WQ_PAGES; i++) {
792 qp1->params.request.Pages[i].addrLow = PADDR_LO(addr);
793 qp1->params.request.Pages[i].addrHigh = PADDR_HI(addr);
794 addr += 4096;
795 }
796
797 break;
798 }
799
800 return;
801
802 } /* emlxs_mb_wq_create() */
803
804
805 /* SLI4 */
806 /*ARGSUSED*/
807 extern void
808 emlxs_mb_rq_create(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t num)
809 {
810 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
811 IOCTL_FCOE_RQ_CREATE *qp;
812 IOCTL_FCOE_RQ_CREATE_V1 *qp1;
813 uint64_t addr;
814
815 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
816 mbq->nonembed = NULL;
817 mbq->mbox_cmpl = NULL; /* no cmpl needed */
818 mbq->port = (void *)&PPORT;
819
820 /*
821 * Signifies an embedded command
822 */
823 mb4->un.varSLIConfig.be.embedded = 1;
824
825 mb4->mbxCommand = MBX_SLI_CONFIG;
826 mb4->mbxOwner = OWN_HOST;
827
828 switch (hba->sli.sli4.param.RQV) {
829 case 0:
830 mb4->un.varSLIConfig.be.payload_length =
831 sizeof (IOCTL_FCOE_RQ_CREATE) + IOCTL_HEADER_SZ;
832 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
833 IOCTL_SUBSYSTEM_FCOE;
834 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
835 FCOE_OPCODE_RQ_CREATE;
836 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
837 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
838 sizeof (IOCTL_FCOE_RQ_CREATE);
839 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 0;
840
841 addr = hba->sli.sli4.rq[num].addr.phys;
842
843 qp = (IOCTL_FCOE_RQ_CREATE *)&mb4->un.varSLIConfig.payload;
844
845 qp->params.request.RQContext.RqeCnt = RQ_DEPTH_EXPONENT;
846 qp->params.request.RQContext.BufferSize = RQB_DATA_SIZE;
847 qp->params.request.RQContext.CQId =
848 hba->sli.sli4.rq[num].cqid;
849
850 qp->params.request.NumPages = 1;
851 qp->params.request.Pages[0].addrLow = PADDR_LO(addr);
852 qp->params.request.Pages[0].addrHigh = PADDR_HI(addr);
853
854 break;
855
856 case 1:
857 default:
858 mb4->un.varSLIConfig.be.payload_length =
859 sizeof (IOCTL_FCOE_RQ_CREATE_V1) + IOCTL_HEADER_SZ;
860 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
861 IOCTL_SUBSYSTEM_FCOE;
862 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
863 FCOE_OPCODE_RQ_CREATE;
864 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
865 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
866 sizeof (IOCTL_FCOE_RQ_CREATE_V1);
867 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 1;
868
869 addr = hba->sli.sli4.rq[num].addr.phys;
870
871 qp1 = (IOCTL_FCOE_RQ_CREATE_V1 *)&mb4->un.varSLIConfig.payload;
872
873 qp1->params.request.RQContext.RqeCnt = RQ_DEPTH;
874 qp1->params.request.RQContext.RqeSize = RQE_SIZE_8_BYTES;
875 qp1->params.request.RQContext.PageSize = RQ_PAGE_SIZE_4K;
876
877 qp1->params.request.RQContext.BufferSize = RQB_DATA_SIZE;
878 qp1->params.request.RQContext.CQId =
879 hba->sli.sli4.rq[num].cqid;
880
881 qp1->params.request.NumPages = 1;
882 qp1->params.request.Pages[0].addrLow = PADDR_LO(addr);
883 qp1->params.request.Pages[0].addrHigh = PADDR_HI(addr);
884
885 break;
886 }
887
888 return;
889
890 } /* emlxs_mb_rq_create() */
891
892
893 /* SLI4 */
894 /*ARGSUSED*/
895 extern void
896 emlxs_mb_mq_create(emlxs_hba_t *hba, MAILBOXQ *mbq)
897 {
898 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
899 IOCTL_COMMON_MQ_CREATE *qp;
900 uint64_t addr;
901
902 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
903 mbq->nonembed = NULL;
904 mbq->mbox_cmpl = NULL; /* no cmpl needed */
905 mbq->port = (void *)&PPORT;
906
907 /*
908 * Signifies an embedded command
909 */
910 mb4->un.varSLIConfig.be.embedded = 1;
911
912 mb4->mbxCommand = MBX_SLI_CONFIG;
913 mb4->mbxOwner = OWN_HOST;
914 mb4->un.varSLIConfig.be.payload_length =
915 sizeof (IOCTL_COMMON_MQ_CREATE) + IOCTL_HEADER_SZ;
916 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
917 IOCTL_SUBSYSTEM_COMMON;
918 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode = COMMON_OPCODE_MQ_CREATE;
919 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
920 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
921 sizeof (IOCTL_COMMON_MQ_CREATE);
922
923 addr = hba->sli.sli4.mq.addr.phys;
924 qp = (IOCTL_COMMON_MQ_CREATE *)&mb4->un.varSLIConfig.payload;
925
926 qp->params.request.MQContext.Size = MQ_ELEMENT_COUNT_16;
927 qp->params.request.MQContext.Valid = 1;
928 qp->params.request.MQContext.CQId = hba->sli.sli4.mq.cqid;
929
930 qp->params.request.NumPages = 1;
931 qp->params.request.Pages[0].addrLow = PADDR_LO(addr);
932 qp->params.request.Pages[0].addrHigh = PADDR_HI(addr);
933
934 return;
935
936 } /* emlxs_mb_mq_create() */
937
938
939 /* SLI4 */
940 /*ARGSUSED*/
941 extern void
942 emlxs_mb_mq_create_ext(emlxs_hba_t *hba, MAILBOXQ *mbq)
943 {
944 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
945 IOCTL_COMMON_MQ_CREATE_EXT *qp;
946 IOCTL_COMMON_MQ_CREATE_EXT_V1 *qp1;
947 uint64_t addr;
948
949 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
950 mbq->nonembed = NULL;
951 mbq->mbox_cmpl = NULL; /* no cmpl needed */
952 mbq->port = (void *)&PPORT;
953
954 /*
955 * Signifies an embedded command
956 */
957 mb4->un.varSLIConfig.be.embedded = 1;
958
959 mb4->mbxCommand = MBX_SLI_CONFIG;
960 mb4->mbxOwner = OWN_HOST;
961
962 switch (hba->sli.sli4.param.MQV) {
963 case 0:
964 mb4->un.varSLIConfig.be.payload_length =
965 sizeof (IOCTL_COMMON_MQ_CREATE_EXT) + IOCTL_HEADER_SZ;
966 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
967 IOCTL_SUBSYSTEM_COMMON;
968 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
969 COMMON_OPCODE_MQ_CREATE_EXT;
970 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
971 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
972 sizeof (IOCTL_COMMON_MQ_CREATE_EXT);
973 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 0;
974
975 addr = hba->sli.sli4.mq.addr.phys;
976 qp = (IOCTL_COMMON_MQ_CREATE_EXT *)
977 &mb4->un.varSLIConfig.payload;
978
979 qp->params.request.num_pages = 1;
980 qp->params.request.async_event_bitmap =
981 ASYNC_LINK_EVENT | ASYNC_FCF_EVENT | ASYNC_GROUP5_EVENT;
982 qp->params.request.context.Size = MQ_ELEMENT_COUNT_16;
983 qp->params.request.context.Valid = 1;
984 qp->params.request.context.CQId = hba->sli.sli4.mq.cqid;
985
986 qp->params.request.pages[0].addrLow = PADDR_LO(addr);
987 qp->params.request.pages[0].addrHigh = PADDR_HI(addr);
988
989 break;
990
991 case 1:
992 default:
993 mb4->un.varSLIConfig.be.payload_length =
994 sizeof (IOCTL_COMMON_MQ_CREATE) + IOCTL_HEADER_SZ;
995 mb4->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
996 IOCTL_SUBSYSTEM_COMMON;
997 mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
998 COMMON_OPCODE_MQ_CREATE_EXT;
999 mb4->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
1000 mb4->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
1001 sizeof (IOCTL_COMMON_MQ_CREATE_EXT_V1);
1002 mb4->un.varSLIConfig.be.un_hdr.hdr_req.version = 1;
1003
1004 addr = hba->sli.sli4.mq.addr.phys;
1005 qp1 = (IOCTL_COMMON_MQ_CREATE_EXT_V1 *)
1006 &mb4->un.varSLIConfig.payload;
1007
1008 qp1->params.request.num_pages = 1;
1009 qp1->params.request.async_event_bitmap =
1010 ASYNC_LINK_EVENT | ASYNC_FCF_EVENT | ASYNC_GROUP5_EVENT |
1011 ASYNC_FC_EVENT | ASYNC_PORT_EVENT;
1012 qp1->params.request.context.Size = MQ_ELEMENT_COUNT_16;
1013 qp1->params.request.context.Valid = 1;
1014 qp1->params.request.CQId = hba->sli.sli4.mq.cqid;
1015
1016 qp1->params.request.pages[0].addrLow = PADDR_LO(addr);
1017 qp1->params.request.pages[0].addrHigh = PADDR_HI(addr);
1018
1019 break;
1020 }
1021
1022 return;
1023
1024 } /* emlxs_mb_mq_create_ext() */
1025
1026
1027 /*ARGSUSED*/
1028 extern void
1029 emlxs_mb_async_event(emlxs_hba_t *hba, MAILBOXQ *mbq)
1030 {
1031 MAILBOX *mb = (MAILBOX *)mbq;
1032
1033 bzero((void *) mb, MAILBOX_CMD_BSIZE);
1034
1035 mb->mbxCommand = MBX_ASYNC_EVENT;
1036 mb->mbxOwner = OWN_HOST;
1037 mb->un.varWords[0] = hba->channel_els;
1038 mbq->mbox_cmpl = NULL; /* no cmpl needed */
1039 mbq->port = (void *)&PPORT;
1040
1041 return;
1042
1043 } /* emlxs_mb_async_event() */
1044
1045
1046 /*ARGSUSED*/
1047 extern void
1048 emlxs_mb_heartbeat(emlxs_hba_t *hba, MAILBOXQ *mbq)
1049 {
1050 MAILBOX *mb = (MAILBOX *)mbq;
1051
1052 bzero((void *) mb, MAILBOX_CMD_BSIZE);
1053
1054 mb->mbxCommand = MBX_HEARTBEAT;
1055 mb->mbxOwner = OWN_HOST;
1056 mbq->mbox_cmpl = NULL; /* no cmpl needed for hbeat */
1057 mbq->port = (void *)&PPORT;
1058
1059 return;
1060
1061 } /* emlxs_mb_heartbeat() */
1062
1063
1064 /*ARGSUSED*/
1065 extern void
1066 emlxs_mb_gpio_write(emlxs_hba_t *hba, MAILBOXQ *mbq, uint8_t pin, uint8_t val)
1067 {
1068 emlxs_port_t *port = &PPORT;
1069 MAILBOX4 *mb4;
1070 be_req_hdr_t *be_req;
1071 mbox_req_hdr_t *hdr_req;
1072 IOCTL_LOWLEVEL_GPIO_RDWR *gpio;
1073
1074 bzero((void *) mbq, sizeof (MAILBOXQ));
1075
1076 mbq->port = port;
1077
1078 mb4 = (MAILBOX4 *)mbq->mbox;
1079 mb4->mbxCommand = MBX_SLI_CONFIG;
1080 mb4->mbxOwner = OWN_HOST;
1081
1082 be_req = (be_req_hdr_t *)&mb4->un.varSLIConfig.be;
1083 be_req->embedded = 1;
1084 be_req->payload_length = sizeof (mbox_req_hdr_t) +
1085 sizeof (IOCTL_LOWLEVEL_GPIO_RDWR);
1086
1087 hdr_req = &be_req->un_hdr.hdr_req;
1088 hdr_req->subsystem = IOCTL_SUBSYSTEM_LOWLEVEL;
1089 hdr_req->opcode = LOWLEVEL_OPCODE_GPIO_RDWR;
1090 hdr_req->timeout = 0;
1091 hdr_req->req_length = sizeof (IOCTL_LOWLEVEL_GPIO_RDWR);
1092
1093 gpio = (IOCTL_LOWLEVEL_GPIO_RDWR *)&mb4->un.varSLIConfig.payload;
1094 gpio->params.request.GpioAction = LOWLEVEL_GPIO_ACT_WRITE;
1095 gpio->params.request.LogicalPin = pin;
1096 gpio->params.request.PinValue = val;
1097 } /* emlxs_mb_gpio_write */
1098
1099 #ifdef MSI_SUPPORT
1100
1101 /*ARGSUSED*/
1102 extern void
1103 emlxs_mb_config_msi(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t *intr_map,
1104 uint32_t intr_count)
1105 {
1106 MAILBOX *mb = (MAILBOX *)mbq;
1107 uint16_t i;
1108 uint32_t mask;
1109
1110 bzero((void *)mb, MAILBOX_CMD_BSIZE);
1111
1112 mb->mbxCommand = MBX_CONFIG_MSI;
1113
1114 /* Set the default message id to zero */
1115 mb->un.varCfgMSI.defaultPresent = 1;
1116 mb->un.varCfgMSI.defaultMessageNumber = 0;
1117
1118 for (i = 1; i < intr_count; i++) {
1119 mask = intr_map[i];
1120
1121 mb->un.varCfgMSI.attConditions |= mask;
1122
1123 #ifdef EMLXS_BIG_ENDIAN
1124 if (mask & HA_R0ATT) {
1125 mb->un.varCfgMSI.messageNumberByHA[3] = i;
1126 }
1127 if (mask & HA_R1ATT) {
1128 mb->un.varCfgMSI.messageNumberByHA[7] = i;
1129 }
1130 if (mask & HA_R2ATT) {
1131 mb->un.varCfgMSI.messageNumberByHA[11] = i;
1132 }
1133 if (mask & HA_R3ATT) {
1134 mb->un.varCfgMSI.messageNumberByHA[15] = i;
1135 }
1136 if (mask & HA_LATT) {
1137 mb->un.varCfgMSI.messageNumberByHA[29] = i;
1138 }
1139 if (mask & HA_MBATT) {
1140 mb->un.varCfgMSI.messageNumberByHA[30] = i;
1141 }
1142 if (mask & HA_ERATT) {
1143 mb->un.varCfgMSI.messageNumberByHA[31] = i;
1144 }
1145 #endif /* EMLXS_BIG_ENDIAN */
1146
1147 #ifdef EMLXS_LITTLE_ENDIAN
1148 /* Accounts for half word swap of LE architecture */
1149 if (mask & HA_R0ATT) {
1150 mb->un.varCfgMSI.messageNumberByHA[2] = i;
1151 }
1152 if (mask & HA_R1ATT) {
1153 mb->un.varCfgMSI.messageNumberByHA[6] = i;
1154 }
1155 if (mask & HA_R2ATT) {
1156 mb->un.varCfgMSI.messageNumberByHA[10] = i;
1157 }
1158 if (mask & HA_R3ATT) {
1159 mb->un.varCfgMSI.messageNumberByHA[14] = i;
1160 }
1161 if (mask & HA_LATT) {
1162 mb->un.varCfgMSI.messageNumberByHA[28] = i;
1163 }
1164 if (mask & HA_MBATT) {
1165 mb->un.varCfgMSI.messageNumberByHA[31] = i;
1166 }
1167 if (mask & HA_ERATT) {
1168 mb->un.varCfgMSI.messageNumberByHA[30] = i;
1169 }
1170 #endif /* EMLXS_LITTLE_ENDIAN */
1171 }
1172
1173 mb->mbxOwner = OWN_HOST;
1174 mbq->mbox_cmpl = NULL; /* no cmpl needed */
1175 mbq->port = (void *)&PPORT;
1176
1177 return;
1178
1179 } /* emlxs_mb_config_msi() */
1180
1181
1182 /*ARGSUSED*/
1183 extern void
1184 emlxs_mb_config_msix(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t *intr_map,
1185 uint32_t intr_count)
1186 {
1187 MAILBOX *mb = (MAILBOX *)mbq;
1188 uint8_t i;
1189 uint32_t mask;
1190
1191 bzero((void *)mb, MAILBOX_CMD_BSIZE);
1192
1193 mb->mbxCommand = MBX_CONFIG_MSIX;
1194
1195 /* Set the default message id to zero */
1196 mb->un.varCfgMSIX.defaultPresent = 1;
1197 mb->un.varCfgMSIX.defaultMessageNumber = 0;
1198
1199 for (i = 1; i < intr_count; i++) {
1200 mask = intr_map[i];
1201
1202 mb->un.varCfgMSIX.attConditions1 |= mask;
1203
1204 #ifdef EMLXS_BIG_ENDIAN
1205 if (mask & HA_R0ATT) {
1206 mb->un.varCfgMSIX.messageNumberByHA[3] = i;
1207 }
1208 if (mask & HA_R1ATT) {
1209 mb->un.varCfgMSIX.messageNumberByHA[7] = i;
1210 }
1211 if (mask & HA_R2ATT) {
1212 mb->un.varCfgMSIX.messageNumberByHA[11] = i;
1213 }
1214 if (mask & HA_R3ATT) {
1215 mb->un.varCfgMSIX.messageNumberByHA[15] = i;
1216 }
1217 if (mask & HA_LATT) {
1218 mb->un.varCfgMSIX.messageNumberByHA[29] = i;
1219 }
1220 if (mask & HA_MBATT) {
1221 mb->un.varCfgMSIX.messageNumberByHA[30] = i;
1222 }
1223 if (mask & HA_ERATT) {
1224 mb->un.varCfgMSIX.messageNumberByHA[31] = i;
1225 }
1226 #endif /* EMLXS_BIG_ENDIAN */
1227
1228 #ifdef EMLXS_LITTLE_ENDIAN
1229 /* Accounts for word swap of LE architecture */
1230 if (mask & HA_R0ATT) {
1231 mb->un.varCfgMSIX.messageNumberByHA[0] = i;
1232 }
1233 if (mask & HA_R1ATT) {
1234 mb->un.varCfgMSIX.messageNumberByHA[4] = i;
1235 }
1236 if (mask & HA_R2ATT) {
1237 mb->un.varCfgMSIX.messageNumberByHA[8] = i;
1238 }
1239 if (mask & HA_R3ATT) {
1240 mb->un.varCfgMSIX.messageNumberByHA[12] = i;
1241 }
1242 if (mask & HA_LATT) {
1243 mb->un.varCfgMSIX.messageNumberByHA[30] = i;
1244 }
1245 if (mask & HA_MBATT) {
1246 mb->un.varCfgMSIX.messageNumberByHA[29] = i;
1247 }
1248 if (mask & HA_ERATT) {
1249 mb->un.varCfgMSIX.messageNumberByHA[28] = i;
1250 }
1251 #endif /* EMLXS_LITTLE_ENDIAN */
1252 }
1253
1254 mb->mbxOwner = OWN_HOST;
1255 mbq->mbox_cmpl = NULL; /* no cmpl needed */
1256 mbq->port = (void *)&PPORT;
1257
1258 return;
1259
1260 } /* emlxs_mb_config_msix() */
1261
1262
1263 #endif /* MSI_SUPPORT */
1264
1265
1266 /*ARGSUSED*/
1267 extern void
1268 emlxs_mb_reset_ring(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t ringno)
1269 {
1270 MAILBOX *mb = (MAILBOX *)mbq;
1271
1272 bzero((void *)mb, MAILBOX_CMD_BSIZE);
1273
1274 mb->mbxCommand = MBX_RESET_RING;
1275 mb->un.varRstRing.ring_no = ringno;
1276 mb->mbxOwner = OWN_HOST;
1277 mbq->mbox_cmpl = NULL; /* no cmpl needed */
1278 mbq->port = (void *)&PPORT;
1279
1280 return;
1281
1282 } /* emlxs_mb_reset_ring() */
1283
1284
1285 /*ARGSUSED*/
1286 extern void
1287 emlxs_mb_dump_vpd(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t offset)
1288 {
1289
1290 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1291 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
1292
1293 /* Clear the local dump_region */
1294 bzero(hba->sli.sli4.dump_region.virt,
1295 hba->sli.sli4.dump_region.size);
1296
1297 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
1298
1299 mb4->mbxCommand = MBX_DUMP_MEMORY;
1300 mb4->un.varDmp4.type = DMP_NV_PARAMS;
1301 mb4->un.varDmp4.entry_index = offset;
1302 mb4->un.varDmp4.region_id = DMP_VPD_REGION;
1303
1304 mb4->un.varDmp4.available_cnt = hba->sli.sli4.dump_region.size;
1305 mb4->un.varDmp4.addrHigh =
1306 PADDR_HI(hba->sli.sli4.dump_region.phys);
1307 mb4->un.varDmp4.addrLow =
1308 PADDR_LO(hba->sli.sli4.dump_region.phys);
1309 mb4->un.varDmp4.rsp_cnt = 0;
1310
1311 mb4->mbxOwner = OWN_HOST;
1312
1313 } else {
1314 MAILBOX *mb = (MAILBOX *)mbq;
1315
1316 bzero((void *)mb, MAILBOX_CMD_BSIZE);
1317
1318 mb->mbxCommand = MBX_DUMP_MEMORY;
1319 mb->un.varDmp.cv = 1;
1320 mb->un.varDmp.type = DMP_NV_PARAMS;
1321 mb->un.varDmp.entry_index = offset;
1322 mb->un.varDmp.region_id = DMP_VPD_REGION;
1323
1324 /* limited by mailbox size */
1325 mb->un.varDmp.word_cnt = DMP_VPD_DUMP_WCOUNT;
1326
1327 mb->un.varDmp.co = 0;
1328 mb->un.varDmp.resp_offset = 0;
1329 mb->mbxOwner = OWN_HOST;
1330 }
1331
1332 mbq->mbox_cmpl = NULL; /* no cmpl needed */
1333 mbq->port = (void *)&PPORT;
1334
1335 } /* emlxs_mb_dump_vpd() */
1336
1337
1338 /* SLI4 */
1339 /*ARGSUSED*/
1340 extern void
1341 emlxs_mb_dump_fcoe(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t offset)
1342 {
1343 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
1344
1345 if (hba->sli_mode < EMLXS_HBA_SLI4_MODE) {
1346 return;
1347 }
1348
1349 /* Clear the local dump_region */
1350 bzero(hba->sli.sli4.dump_region.virt,
1351 hba->sli.sli4.dump_region.size);
1352
1353 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
1354
1355 mb4->mbxCommand = MBX_DUMP_MEMORY;
1356 mb4->un.varDmp4.type = DMP_NV_PARAMS;
1357 mb4->un.varDmp4.entry_index = offset;
1358 mb4->un.varDmp4.region_id = DMP_FCOE_REGION;
1359
1360 mb4->un.varDmp4.available_cnt = hba->sli.sli4.dump_region.size;
1361 mb4->un.varDmp4.addrHigh =
1362 PADDR_HI(hba->sli.sli4.dump_region.phys);
1363 mb4->un.varDmp4.addrLow =
1364 PADDR_LO(hba->sli.sli4.dump_region.phys);
1365 mb4->un.varDmp4.rsp_cnt = 0;
1366
1367 mb4->mbxOwner = OWN_HOST;
1368
1369 mbq->mbox_cmpl = NULL; /* no cmpl needed */
1370 mbq->port = (void *)&PPORT;
1371
1372 } /* emlxs_mb_dump_fcoe() */
1373
1374
1375 /*ARGSUSED*/
1376 extern void
1377 emlxs_mb_dump(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t offset, uint32_t words)
1378 {
1379
1380 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1381 MAILBOX4 *mb4 = (MAILBOX4 *)mbq;
1382
1383 /* Clear the local dump_region */
1384 bzero(hba->sli.sli4.dump_region.virt,
1385 hba->sli.sli4.dump_region.size);
1386
1387 bzero((void *) mb4, MAILBOX_CMD_SLI4_BSIZE);
1388
1389 mb4->mbxCommand = MBX_DUMP_MEMORY;
1390 mb4->un.varDmp4.type = DMP_MEM_REG;
1391 mb4->un.varDmp4.entry_index = offset;
1392 mb4->un.varDmp4.region_id = 0;
1393
1394 mb4->un.varDmp4.available_cnt = min((words*4),
1395 hba->sli.sli4.dump_region.size);
1396 mb4->un.varDmp4.addrHigh =
1397 PADDR_HI(hba->sli.sli4.dump_region.phys);
1398 mb4->un.varDmp4.addrLow =
1399 PADDR_LO(hba->sli.sli4.dump_region.phys);
1400 mb4->un.varDmp4.rsp_cnt = 0;
1401
1402 mb4->mbxOwner = OWN_HOST;
1403
1404 } else {
1405
1406 MAILBOX *mb = (MAILBOX *)mbq;
1407
1408 bzero((void *)mb, MAILBOX_CMD_BSIZE);
1409
1410 mb->mbxCommand = MBX_DUMP_MEMORY;
1411 mb->un.varDmp.type = DMP_MEM_REG;
1412 mb->un.varDmp.word_cnt = words;
1413 mb->un.varDmp.base_adr = offset;
1414
1415 mb->un.varDmp.co = 0;
1416 mb->un.varDmp.resp_offset = 0;
1417 mb->mbxOwner = OWN_HOST;
1418 }
1419
1420 mbq->mbox_cmpl = NULL; /* no cmpl needed */
1421 mbq->port = (void *)&PPORT;
1422
1423 return;
1424
1425 } /* emlxs_mb_dump() */
1426
1427
1428 /*
1429 * emlxs_mb_read_nv Issue a READ NVPARAM mailbox command
1430 */
1431 /*ARGSUSED*/
1432 extern void
1433 emlxs_mb_read_nv(emlxs_hba_t *hba, MAILBOXQ *mbq)
1434 {
1435 MAILBOX *mb = (MAILBOX *)mbq;
1436
1437 bzero((void *)mb, MAILBOX_CMD_BSIZE);
1438
1439 mb->mbxCommand = MBX_READ_NV;
1440 mb->mbxOwner = OWN_HOST;
1441 mbq->mbox_cmpl = NULL; /* no cmpl needed */
1442 mbq->port = (void *)&PPORT;
1443
1444 } /* emlxs_mb_read_nv() */
1445
1446
1447 /*
1448 * emlxs_mb_read_rev Issue a READ REV mailbox command
1449 */
1450 /*ARGSUSED*/
1451 extern void
1452 emlxs_mb_read_rev(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t v3)
1453 {
1454 MAILBOX *mb = (MAILBOX *)mbq;
1455
1456 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1457 bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
1458 mbq->nonembed = NULL;
1459 } else {
1460 bzero((void *)mb, MAILBOX_CMD_BSIZE);
1461
1462 mb->un.varRdRev.cv = 1;
1463
1464 if (v3) {
1465 mb->un.varRdRev.cv3 = 1;
1466 }
1467 }
1468
1469 mb->mbxCommand = MBX_READ_REV;
1470 mb->mbxOwner = OWN_HOST;
1471 mbq->mbox_cmpl = NULL;
1472 mbq->port = (void *)&PPORT;
1473
1474 } /* emlxs_mb_read_rev() */
1475
1476
1477 /*
1478 * emlxs_mb_run_biu_diag Issue a RUN_BIU_DIAG mailbox command
1479 */
1480 /*ARGSUSED*/
1481 extern uint32_t
1482 emlxs_mb_run_biu_diag(emlxs_hba_t *hba, MAILBOXQ *mbq, uint64_t out,
1483 uint64_t in)
1484 {
1485 MAILBOX *mb = (MAILBOX *)mbq;
1486
1487 bzero((void *)mb, MAILBOX_CMD_BSIZE);
1488
1489 mb->mbxCommand = MBX_RUN_BIU_DIAG64;
1490 mb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize = MEM_ELSBUF_SIZE;
1491 mb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh = PADDR_HI(out);
1492 mb->un.varBIUdiag.un.s2.xmit_bde64.addrLow = PADDR_LO(out);
1493 mb->un.varBIUdiag.un.s2.rcv_bde64.tus.f.bdeSize = MEM_ELSBUF_SIZE;
1494 mb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh = PADDR_HI(in);
1495 mb->un.varBIUdiag.un.s2.rcv_bde64.addrLow = PADDR_LO(in);
1496 mb->mbxOwner = OWN_HOST;
1497 mbq->mbox_cmpl = NULL; /* no cmpl needed */
1498 mbq->port = (void *)&PPORT;
1499
1500 return (0);
1501 } /* emlxs_mb_run_biu_diag() */
1502
1503
1504 /* This should only be called with active MBX_NOWAIT mailboxes */
1505 void
1506 emlxs_mb_retry(emlxs_hba_t *hba, MAILBOXQ *mbq)
1507 {
1508 MAILBOX *mb;
1509 MAILBOX *mbox;
1510 int rc;
1511
1512 mbox = (MAILBOX *)emlxs_mem_get(hba, MEM_MBOX);
1513 if (!mbox) {
1514 return;
1515 }
1516 mb = (MAILBOX *)mbq;
1517 bcopy((uint8_t *)mb, (uint8_t *)mbox, MAILBOX_CMD_BSIZE);
1518 mbox->mbxOwner = OWN_HOST;
1519 mbox->mbxStatus = 0;
1520
1521 mutex_enter(&EMLXS_PORT_LOCK);
1522
1523 HBASTATS.MboxCompleted++;
1524
1525 if (mb->mbxStatus != 0) {
1526 HBASTATS.MboxError++;
1527 } else {
1528 HBASTATS.MboxGood++;
1529 }
1530
1531 hba->mbox_mbq = NULL;
1532 hba->mbox_queue_flag = 0;
1533
1534 mutex_exit(&EMLXS_PORT_LOCK);
1535
1536 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbox, MBX_NOWAIT, 0);
1537 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
1538 emlxs_mem_put(hba, MEM_MBOX, (void *)mbox);
1539 }
1540 return;
1541
1542 } /* emlxs_mb_retry() */
1543
1544
1545 /* SLI3 */
1546 static uint32_t
1547 emlxs_read_la_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
1548 {
1549 emlxs_port_t *port = (emlxs_port_t *)mbq->port;
1550 MAILBOX *mb;
1551 MAILBOXQ *mbox;
1552 MATCHMAP *mp;
1553 READ_LA_VAR la;
1554 int i;
1555 uint32_t control;
1556
1557 mb = (MAILBOX *)mbq;
1558 if (mb->mbxStatus) {
1559 if (mb->mbxStatus == MBXERR_NO_RESOURCES) {
1560 control = mb->un.varReadLA.un.lilpBde64.tus.f.bdeSize;
1561 if (control == 0) {
1562 (void) emlxs_mb_read_la(hba, mbq);
1563 }
1564 emlxs_mb_retry(hba, mbq);
1565 return (1);
1566 }
1567 /* Enable Link Attention interrupts */
1568 mutex_enter(&EMLXS_PORT_LOCK);
1569
1570 if (!(hba->sli.sli3.hc_copy & HC_LAINT_ENA)) {
1571 hba->sli.sli3.hc_copy |= HC_LAINT_ENA;
1572 WRITE_CSR_REG(hba, FC_HC_REG(hba),
1573 hba->sli.sli3.hc_copy);
1574 #ifdef FMA_SUPPORT
1575 /* Access handle validation */
1576 EMLXS_CHK_ACC_HANDLE(hba,
1577 hba->sli.sli3.csr_acc_handle);
1578 #endif /* FMA_SUPPORT */
1579 }
1580
1581 mutex_exit(&EMLXS_PORT_LOCK);
1582 return (0);
1583 }
1584 bcopy((void *)&mb->un.varReadLA, (void *)&la, sizeof (READ_LA_VAR));
1585
1586 mp = (MATCHMAP *)mbq->bp;
1587 if (mp) {
1588 bcopy((caddr_t)mp->virt, (caddr_t)port->alpa_map, 128);
1589 } else {
1590 bzero((caddr_t)port->alpa_map, 128);
1591 }
1592
1593 if (la.attType == AT_LINK_UP) {
1594 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_linkup_atten_msg,
1595 "tag=%d -> %d ALPA=%x",
1596 (uint32_t)hba->link_event_tag,
1597 (uint32_t)la.eventTag,
1598 (uint32_t)la.granted_AL_PA);
1599 } else {
1600 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_linkdown_atten_msg,
1601 "tag=%d -> %d ALPA=%x",
1602 (uint32_t)hba->link_event_tag,
1603 (uint32_t)la.eventTag,
1604 (uint32_t)la.granted_AL_PA);
1605 }
1606
1607 if (la.pb) {
1608 hba->flag |= FC_BYPASSED_MODE;
1609 } else {
1610 hba->flag &= ~FC_BYPASSED_MODE;
1611 }
1612
1613 if (hba->link_event_tag == la.eventTag) {
1614 HBASTATS.LinkMultiEvent++;
1615 } else if (hba->link_event_tag + 1 < la.eventTag) {
1616 HBASTATS.LinkMultiEvent++;
1617
1618 /* Make sure link is declared down */
1619 emlxs_linkdown(hba);
1620 }
1621
1622 hba->link_event_tag = la.eventTag;
1623 port->lip_type = 0;
1624
1625 /* If link not already up then declare it up now */
1626 if ((la.attType == AT_LINK_UP) && (hba->state < FC_LINK_UP)) {
1627
1628 #ifdef MENLO_SUPPORT
1629 if ((hba->model_info.device_id == PCI_DEVICE_ID_HORNET) &&
1630 (hba->flag & (FC_ILB_MODE | FC_ELB_MODE))) {
1631 la.topology = TOPOLOGY_LOOP;
1632 la.granted_AL_PA = 0;
1633 port->alpa_map[0] = 1;
1634 port->alpa_map[1] = 0;
1635 la.lipType = LT_PORT_INIT;
1636 }
1637 #endif /* MENLO_SUPPORT */
1638 /* Save the linkspeed */
1639 hba->linkspeed = la.UlnkSpeed;
1640
1641 /* Check for old model adapters that only */
1642 /* supported 1Gb */
1643 if ((hba->linkspeed == 0) &&
1644 (hba->model_info.chip & EMLXS_DRAGONFLY_CHIP)) {
1645 hba->linkspeed = LA_1GHZ_LINK;
1646 }
1647
1648 if ((hba->topology = la.topology) == TOPOLOGY_LOOP) {
1649 port->granted_alpa = la.granted_AL_PA;
1650 port->did = port->granted_alpa;
1651 port->lip_type = la.lipType;
1652 if (hba->flag & FC_SLIM2_MODE) {
1653 i = la.un.lilpBde64.tus.f.bdeSize;
1654 } else {
1655 i = la.un.lilpBde.bdeSize;
1656 }
1657
1658 if (i == 0) {
1659 port->alpa_map[0] = 0;
1660 } else {
1661 uint8_t *alpa_map;
1662 uint32_t j;
1663
1664 /* Check number of devices in map */
1665 if (port->alpa_map[0] > 127) {
1666 port->alpa_map[0] = 127;
1667 }
1668
1669 alpa_map = (uint8_t *)port->alpa_map;
1670
1671 EMLXS_MSGF(EMLXS_CONTEXT,
1672 &emlxs_link_atten_msg,
1673 "alpa_map: %d device(s): "
1674 "%02x %02x %02x %02x %02x %02x "
1675 "%02x", alpa_map[0], alpa_map[1],
1676 alpa_map[2], alpa_map[3],
1677 alpa_map[4], alpa_map[5],
1678 alpa_map[6], alpa_map[7]);
1679
1680 for (j = 8; j <= alpa_map[0]; j += 8) {
1681 EMLXS_MSGF(EMLXS_CONTEXT,
1682 &emlxs_link_atten_msg,
1683 "alpa_map: "
1684 "%02x %02x %02x %02x %02x "
1685 "%02x %02x %02x",
1686 alpa_map[j],
1687 alpa_map[j + 1],
1688 alpa_map[j + 2],
1689 alpa_map[j + 3],
1690 alpa_map[j + 4],
1691 alpa_map[j + 5],
1692 alpa_map[j + 6],
1693 alpa_map[j + 7]);
1694 }
1695 }
1696 }
1697 #ifdef MENLO_SUPPORT
1698 /* Check if Menlo maintenance mode is enabled */
1699 if (hba->model_info.device_id ==
1700 PCI_DEVICE_ID_HORNET) {
1701 if (la.mm == 1) {
1702 EMLXS_MSGF(EMLXS_CONTEXT,
1703 &emlxs_link_atten_msg,
1704 "Maintenance Mode enabled.");
1705
1706 mutex_enter(&EMLXS_PORT_LOCK);
1707 hba->flag |= FC_MENLO_MODE;
1708 mutex_exit(&EMLXS_PORT_LOCK);
1709
1710 mutex_enter(&EMLXS_LINKUP_LOCK);
1711 cv_broadcast(&EMLXS_LINKUP_CV);
1712 mutex_exit(&EMLXS_LINKUP_LOCK);
1713 } else {
1714 EMLXS_MSGF(EMLXS_CONTEXT,
1715 &emlxs_link_atten_msg,
1716 "Maintenance Mode disabled.");
1717 }
1718
1719 /* Check FCoE attention bit */
1720 if (la.fa == 1) {
1721 emlxs_thread_spawn(hba,
1722 emlxs_fcoe_attention_thread,
1723 0, 0);
1724 }
1725 }
1726 #endif /* MENLO_SUPPORT */
1727
1728 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
1729 MEM_MBOX))) {
1730 /* This should turn on DELAYED ABTS for */
1731 /* ELS timeouts */
1732 emlxs_mb_set_var(hba, mbox, 0x00052198, 0x1);
1733
1734 emlxs_mb_put(hba, mbox);
1735 }
1736
1737 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
1738 MEM_MBOX))) {
1739 /* If link not already down then */
1740 /* declare it down now */
1741 if (emlxs_mb_read_sparam(hba, mbox) == 0) {
1742 emlxs_mb_put(hba, mbox);
1743 } else {
1744 emlxs_mem_put(hba, MEM_MBOX,
1745 (void *)mbox);
1746 }
1747 }
1748
1749 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
1750 MEM_MBOX))) {
1751 emlxs_mb_config_link(hba, mbox);
1752
1753 emlxs_mb_put(hba, mbox);
1754 }
1755
1756 /* Declare the linkup here */
1757 emlxs_linkup(hba);
1758 }
1759
1760 /* If link not already down then declare it down now */
1761 else if (la.attType == AT_LINK_DOWN) {
1762 /* Make sure link is declared down */
1763 emlxs_linkdown(hba);
1764 }
1765
1766 /* Enable Link attention interrupt */
1767 mutex_enter(&EMLXS_PORT_LOCK);
1768
1769 if (!(hba->sli.sli3.hc_copy & HC_LAINT_ENA)) {
1770 hba->sli.sli3.hc_copy |= HC_LAINT_ENA;
1771 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
1772 #ifdef FMA_SUPPORT
1773 /* Access handle validation */
1774 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
1775 #endif /* FMA_SUPPORT */
1776 }
1777
1778 mutex_exit(&EMLXS_PORT_LOCK);
1779
1780 return (0);
1781
1782 } /* emlxs_read_la_mbcmpl() */
1783
1784
1785 extern uint32_t
1786 emlxs_mb_read_la(emlxs_hba_t *hba, MAILBOXQ *mbq)
1787 {
1788 MAILBOX *mb = (MAILBOX *)mbq;
1789 MATCHMAP *mp;
1790
1791 bzero((void *)mb, MAILBOX_CMD_BSIZE);
1792
1793 if ((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF)) == 0) {
1794 mb->mbxCommand = MBX_READ_LA64;
1795
1796 return (1);
1797 }
1798
1799 mb->mbxCommand = MBX_READ_LA64;
1800 mb->un.varReadLA.un.lilpBde64.tus.f.bdeSize = 128;
1801 mb->un.varReadLA.un.lilpBde64.addrHigh = PADDR_HI(mp->phys);
1802 mb->un.varReadLA.un.lilpBde64.addrLow = PADDR_LO(mp->phys);
1803 mb->mbxOwner = OWN_HOST;
1804 mbq->mbox_cmpl = emlxs_read_la_mbcmpl;
1805 mbq->port = (void *)&PPORT;
1806
1807 /*
1808 * save address for completion
1809 */
1810 mbq->bp = (void *)mp;
1811
1812 return (0);
1813
1814 } /* emlxs_mb_read_la() */
1815
1816
1817 /* SLI3 */
1818 static uint32_t
1819 emlxs_clear_la_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
1820 {
1821 emlxs_port_t *port = (emlxs_port_t *)mbq->port;
1822 MAILBOX *mb;
1823 MAILBOXQ *mbox;
1824 emlxs_port_t *vport;
1825 uint32_t la_enable;
1826 int i, rc;
1827
1828 mb = (MAILBOX *)mbq;
1829 if (mb->mbxStatus) {
1830 la_enable = 1;
1831
1832 if (mb->mbxStatus == 0x1601) {
1833 /* Get a buffer which will be used for */
1834 /* mailbox commands */
1835 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
1836 MEM_MBOX))) {
1837 /* Get link attention message */
1838 if (emlxs_mb_read_la(hba, mbox) == 0) {
1839 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba,
1840 (MAILBOX *)mbox, MBX_NOWAIT, 0);
1841 if ((rc != MBX_BUSY) &&
1842 (rc != MBX_SUCCESS)) {
1843 emlxs_mem_put(hba,
1844 MEM_MBOX, (void *)mbox);
1845 }
1846 la_enable = 0;
1847 } else {
1848 emlxs_mem_put(hba, MEM_MBOX,
1849 (void *)mbox);
1850 }
1851 }
1852 }
1853
1854 mutex_enter(&EMLXS_PORT_LOCK);
1855 if (la_enable) {
1856 if (!(hba->sli.sli3.hc_copy & HC_LAINT_ENA)) {
1857 /* Enable Link Attention interrupts */
1858 hba->sli.sli3.hc_copy |= HC_LAINT_ENA;
1859 WRITE_CSR_REG(hba, FC_HC_REG(hba),
1860 hba->sli.sli3.hc_copy);
1861 #ifdef FMA_SUPPORT
1862 /* Access handle validation */
1863 EMLXS_CHK_ACC_HANDLE(hba,
1864 hba->sli.sli3.csr_acc_handle);
1865 #endif /* FMA_SUPPORT */
1866 }
1867 } else {
1868 if (hba->sli.sli3.hc_copy & HC_LAINT_ENA) {
1869 /* Disable Link Attention interrupts */
1870 hba->sli.sli3.hc_copy &= ~HC_LAINT_ENA;
1871 WRITE_CSR_REG(hba, FC_HC_REG(hba),
1872 hba->sli.sli3.hc_copy);
1873 #ifdef FMA_SUPPORT
1874 /* Access handle validation */
1875 EMLXS_CHK_ACC_HANDLE(hba,
1876 hba->sli.sli3.csr_acc_handle);
1877 #endif /* FMA_SUPPORT */
1878 }
1879 }
1880 mutex_exit(&EMLXS_PORT_LOCK);
1881
1882 return (0);
1883 }
1884 /* Enable on Link Attention interrupts */
1885 mutex_enter(&EMLXS_PORT_LOCK);
1886
1887 if (!(hba->sli.sli3.hc_copy & HC_LAINT_ENA)) {
1888 hba->sli.sli3.hc_copy |= HC_LAINT_ENA;
1889 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
1890 #ifdef FMA_SUPPORT
1891 /* Access handle validation */
1892 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
1893 #endif /* FMA_SUPPORT */
1894 }
1895
1896 if (hba->state >= FC_LINK_UP) {
1897 EMLXS_STATE_CHANGE_LOCKED(hba, FC_READY);
1898 }
1899
1900 mutex_exit(&EMLXS_PORT_LOCK);
1901
1902 /* Adapter is now ready for FCP traffic */
1903 if (hba->state == FC_READY) {
1904
1905 /* Register vpi's for all ports that have did's */
1906 for (i = 0; i < MAX_VPORTS; i++) {
1907 vport = &VPORT(i);
1908
1909 if (!(vport->flag & EMLXS_PORT_BOUND) ||
1910 !(vport->did)) {
1911 continue;
1912 }
1913
1914 (void) emlxs_mb_reg_vpi(vport, NULL);
1915 }
1916
1917 /* Attempt to send any pending IO */
1918 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[hba->channel_fcp], 0);
1919 }
1920 return (0);
1921
1922 } /* emlxs_clear_la_mbcmpl() */
1923
1924
1925 /* SLI3 */
1926 extern void
1927 emlxs_mb_clear_la(emlxs_hba_t *hba, MAILBOXQ *mbq)
1928 {
1929 MAILBOX *mb = (MAILBOX *)mbq;
1930
1931 #ifdef FC_RPI_CHECK
1932 emlxs_rpi_check(hba);
1933 #endif /* FC_RPI_CHECK */
1934
1935 bzero((void *)mb, MAILBOX_CMD_BSIZE);
1936
1937 mb->un.varClearLA.eventTag = hba->link_event_tag;
1938 mb->mbxCommand = MBX_CLEAR_LA;
1939 mb->mbxOwner = OWN_HOST;
1940 mbq->mbox_cmpl = emlxs_clear_la_mbcmpl;
1941 mbq->port = (void *)&PPORT;
1942
1943 return;
1944
1945 } /* emlxs_mb_clear_la() */
1946
1947
1948 /*
1949 * emlxs_mb_read_status Issue a READ STATUS mailbox command
1950 */
1951 /*ARGSUSED*/
1952 extern void
1953 emlxs_mb_read_status(emlxs_hba_t *hba, MAILBOXQ *mbq)
1954 {
1955 MAILBOX *mb = (MAILBOX *)mbq;
1956
1957 bzero((void *)mb, MAILBOX_CMD_BSIZE);
1958
1959 mb->mbxCommand = MBX_READ_STATUS;
1960 mb->mbxOwner = OWN_HOST;
1961 mbq->mbox_cmpl = NULL; /* no cmpl needed */
1962 mbq->port = (void *)&PPORT;
1963
1964 } /* fc_read_status() */
1965
1966
1967 /*
1968 * emlxs_mb_read_lnk_stat Issue a LINK STATUS mailbox command
1969 */
1970 /*ARGSUSED*/
1971 extern void
1972 emlxs_mb_read_lnk_stat(emlxs_hba_t *hba, MAILBOXQ *mbq)
1973 {
1974 MAILBOX *mb = (MAILBOX *)mbq;
1975
1976 bzero((void *)mb, MAILBOX_CMD_BSIZE);
1977
1978 mb->mbxCommand = MBX_READ_LNK_STAT;
1979 mb->mbxOwner = OWN_HOST;
1980 mbq->mbox_cmpl = NULL; /* no cmpl needed */
1981 mbq->port = (void *)&PPORT;
1982
1983 } /* emlxs_mb_read_lnk_stat() */
1984
1985
1986
1987
1988
1989
1990 /*
1991 * emlxs_mb_config_ring Issue a CONFIG RING mailbox command
1992 */
1993 extern void
1994 emlxs_mb_config_ring(emlxs_hba_t *hba, int32_t ring, MAILBOXQ *mbq)
1995 {
1996 MAILBOX *mb = (MAILBOX *)mbq;
1997 int32_t i;
1998 int32_t j;
1999
2000 bzero((void *)mb, MAILBOX_CMD_BSIZE);
2001
2002 j = 0;
2003 for (i = 0; i < ring; i++) {
2004 j += hba->sli.sli3.ring_masks[i];
2005 }
2006
2007 for (i = 0; i < hba->sli.sli3.ring_masks[ring]; i++) {
2008 if ((j + i) >= 6) {
2009 break;
2010 }
2011
2012 mb->un.varCfgRing.rrRegs[i].rval =
2013 hba->sli.sli3.ring_rval[j + i];
2014 mb->un.varCfgRing.rrRegs[i].rmask =
2015 hba->sli.sli3.ring_rmask[j + i];
2016 mb->un.varCfgRing.rrRegs[i].tval =
2017 hba->sli.sli3.ring_tval[j + i];
2018 mb->un.varCfgRing.rrRegs[i].tmask =
2019 hba->sli.sli3.ring_tmask[j + i];
2020 }
2021
2022 mb->un.varCfgRing.ring = ring;
2023 mb->un.varCfgRing.profile = 0;
2024 mb->un.varCfgRing.maxOrigXchg = 0;
2025 mb->un.varCfgRing.maxRespXchg = 0;
2026 mb->un.varCfgRing.recvNotify = 1;
2027 mb->un.varCfgRing.numMask = hba->sli.sli3.ring_masks[ring];
2028 mb->mbxCommand = MBX_CONFIG_RING;
2029 mb->mbxOwner = OWN_HOST;
2030 mbq->mbox_cmpl = NULL; /* no cmpl needed */
2031 mbq->port = (void *)&PPORT;
2032
2033 return;
2034
2035 } /* emlxs_mb_config_ring() */
2036
2037
2038 /*
2039 * emlxs_mb_config_link Issue a CONFIG LINK mailbox command
2040 */
2041 extern void
2042 emlxs_mb_config_link(emlxs_hba_t *hba, MAILBOXQ *mbq)
2043 {
2044 MAILBOX *mb = (MAILBOX *)mbq;
2045 emlxs_port_t *port = &PPORT;
2046 emlxs_config_t *cfg = &CFG;
2047
2048 bzero((void *)mb, MAILBOX_CMD_BSIZE);
2049
2050 /*
2051 * NEW_FEATURE SLI-2, Coalescing Response Feature.
2052 */
2053 if (cfg[CFG_CR_DELAY].current) {
2054 mb->un.varCfgLnk.cr = 1;
2055 mb->un.varCfgLnk.ci = 1;
2056 mb->un.varCfgLnk.cr_delay = cfg[CFG_CR_DELAY].current;
2057 mb->un.varCfgLnk.cr_count = cfg[CFG_CR_COUNT].current;
2058 }
2059
2060 if (cfg[CFG_ACK0].current) {
2061 mb->un.varCfgLnk.ack0_enable = 1;
2062 }
2063
2064 mb->un.varCfgLnk.myId = port->did;
2065 mb->un.varCfgLnk.edtov = hba->fc_edtov;
2066 mb->un.varCfgLnk.arbtov = hba->fc_arbtov;
2067 mb->un.varCfgLnk.ratov = hba->fc_ratov;
2068 mb->un.varCfgLnk.rttov = hba->fc_rttov;
2069 mb->un.varCfgLnk.altov = hba->fc_altov;
2070 mb->un.varCfgLnk.crtov = hba->fc_crtov;
2071 mb->un.varCfgLnk.citov = hba->fc_citov;
2072 mb->mbxCommand = MBX_CONFIG_LINK;
2073 mb->mbxOwner = OWN_HOST;
2074 mbq->mbox_cmpl = NULL;
2075 mbq->port = (void *)port;
2076
2077 return;
2078
2079 } /* emlxs_mb_config_link() */
2080
2081
2082 static uint32_t
2083 emlxs_init_link_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
2084 {
2085 emlxs_port_t *port = (emlxs_port_t *)mbq->port;
2086 emlxs_config_t *cfg = &CFG;
2087 MAILBOX *mb;
2088
2089 mb = (MAILBOX *)mbq;
2090 if (mb->mbxStatus) {
2091 if ((hba->flag & FC_SLIM2_MODE) &&
2092 (hba->mbox_queue_flag == MBX_NOWAIT)) {
2093 /* Retry only MBX_NOWAIT requests */
2094
2095 if ((cfg[CFG_LINK_SPEED].current > 0) &&
2096 ((mb->mbxStatus == 0x0011) ||
2097 (mb->mbxStatus == 0x0500))) {
2098
2099 EMLXS_MSGF(EMLXS_CONTEXT,
2100 &emlxs_mbox_event_msg,
2101 "Retrying. %s: status=%x. Auto-speed set.",
2102 emlxs_mb_cmd_xlate(mb->mbxCommand),
2103 (uint32_t)mb->mbxStatus);
2104
2105 mb->un.varInitLnk.link_flags &=
2106 ~FLAGS_LINK_SPEED;
2107 mb->un.varInitLnk.link_speed = 0;
2108
2109 emlxs_mb_retry(hba, mbq);
2110 return (1);
2111 }
2112 }
2113 }
2114 return (0);
2115
2116 } /* emlxs_init_link_mbcmpl() */
2117
2118
2119 /*
2120 * emlxs_mb_init_link Issue an INIT LINK mailbox command
2121 */
2122 extern void
2123 emlxs_mb_init_link(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t topology,
2124 uint32_t linkspeed)
2125 {
2126 MAILBOX *mb = (MAILBOX *)mbq;
2127 emlxs_vpd_t *vpd = &VPD;
2128 emlxs_config_t *cfg = &CFG;
2129
2130 if ((hba->sli_mode == EMLXS_HBA_SLI4_MODE) &&
2131 (SLI4_FCOE_MODE)) {
2132 bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
2133 mbq->nonembed = NULL;
2134 mbq->mbox_cmpl = NULL; /* no cmpl needed */
2135 mbq->port = (void *)&PPORT;
2136
2137 mb->mbxCommand = (volatile uint8_t) MBX_INIT_LINK;
2138 mb->mbxOwner = OWN_HOST;
2139 return;
2140 }
2141
2142 bzero((void *)mb, MAILBOX_CMD_BSIZE);
2143
2144 switch (topology) {
2145 case FLAGS_LOCAL_LB:
2146 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
2147 mb->un.varInitLnk.link_flags |= FLAGS_LOCAL_LB;
2148 break;
2149 case FLAGS_TOPOLOGY_MODE_LOOP_PT:
2150 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
2151 mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
2152 break;
2153 case FLAGS_TOPOLOGY_MODE_PT_PT:
2154 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
2155 break;
2156 case FLAGS_TOPOLOGY_MODE_LOOP:
2157 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
2158 break;
2159 case FLAGS_TOPOLOGY_MODE_PT_LOOP:
2160 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
2161 mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
2162 break;
2163 }
2164
2165 if (cfg[CFG_LILP_ENABLE].current == 0) {
2166 /* Disable LIRP/LILP support */
2167 mb->un.varInitLnk.link_flags |= FLAGS_LIRP_LILP;
2168 }
2169
2170 /*
2171 * Setting up the link speed
2172 */
2173 switch (linkspeed) {
2174 case 0:
2175 break;
2176
2177 case 1:
2178 if (!(vpd->link_speed & LMT_1GB_CAPABLE)) {
2179 linkspeed = 0;
2180 }
2181 break;
2182
2183 case 2:
2184 if (!(vpd->link_speed & LMT_2GB_CAPABLE)) {
2185 linkspeed = 0;
2186 }
2187 break;
2188
2189 case 4:
2190 if (!(vpd->link_speed & LMT_4GB_CAPABLE)) {
2191 linkspeed = 0;
2192 }
2193 break;
2194
2195 case 8:
2196 if (!(vpd->link_speed & LMT_8GB_CAPABLE)) {
2197 linkspeed = 0;
2198 }
2199 break;
2200
2201 case 10:
2202 if (!(vpd->link_speed & LMT_10GB_CAPABLE)) {
2203 linkspeed = 0;
2204 }
2205 break;
2206
2207 case 16:
2208 if (!(vpd->link_speed & LMT_16GB_CAPABLE)) {
2209 linkspeed = 0;
2210 }
2211 break;
2212
2213 default:
2214 linkspeed = 0;
2215 break;
2216
2217 }
2218
2219 if ((linkspeed > 0) && (vpd->feaLevelHigh >= 0x02)) {
2220 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
2221 mb->un.varInitLnk.link_speed = linkspeed;
2222 }
2223
2224 mb->un.varInitLnk.link_flags |= FLAGS_PREABORT_RETURN;
2225
2226 mb->un.varInitLnk.fabric_AL_PA =
2227 (uint8_t)cfg[CFG_ASSIGN_ALPA].current;
2228 mb->mbxCommand = (volatile uint8_t) MBX_INIT_LINK;
2229 mb->mbxOwner = OWN_HOST;
2230 mbq->mbox_cmpl = emlxs_init_link_mbcmpl;
2231 mbq->port = (void *)&PPORT;
2232
2233
2234 return;
2235
2236 } /* emlxs_mb_init_link() */
2237
2238
2239 /*
2240 * emlxs_mb_down_link Issue a DOWN LINK mailbox command
2241 */
2242 /*ARGSUSED*/
2243 extern void
2244 emlxs_mb_down_link(emlxs_hba_t *hba, MAILBOXQ *mbq)
2245 {
2246 MAILBOX *mb = (MAILBOX *)mbq;
2247
2248 bzero((void *)mb, MAILBOX_CMD_BSIZE);
2249
2250 mb->mbxCommand = MBX_DOWN_LINK;
2251 mb->mbxOwner = OWN_HOST;
2252 mbq->mbox_cmpl = NULL;
2253 mbq->port = (void *)&PPORT;
2254
2255 return;
2256
2257 } /* emlxs_mb_down_link() */
2258
2259
2260 static uint32_t
2261 emlxs_read_sparam_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
2262 {
2263 emlxs_port_t *port = &PPORT;
2264 MAILBOX *mb;
2265 MATCHMAP *mp;
2266 emlxs_port_t *vport;
2267 int32_t i;
2268 uint32_t control;
2269 uint8_t null_wwn[8];
2270
2271 mb = (MAILBOX *)mbq;
2272 if (mb->mbxStatus) {
2273 if (mb->mbxStatus == MBXERR_NO_RESOURCES) {
2274 control = mb->un.varRdSparm.un.sp64.tus.f.bdeSize;
2275 if (control == 0) {
2276 (void) emlxs_mb_read_sparam(hba, mbq);
2277 }
2278 emlxs_mb_retry(hba, mbq);
2279 return (1);
2280 }
2281 return (0);
2282 }
2283 mp = (MATCHMAP *)mbq->bp;
2284 if (!mp) {
2285 return (0);
2286 }
2287
2288 bcopy((caddr_t)mp->virt, (caddr_t)&hba->sparam, sizeof (SERV_PARM));
2289
2290 /* Initialize the node name and port name only once */
2291 bzero(null_wwn, 8);
2292 if ((bcmp((caddr_t)&hba->wwnn, (caddr_t)null_wwn, 8) == 0) &&
2293 (bcmp((caddr_t)&hba->wwpn, (caddr_t)null_wwn, 8) == 0)) {
2294 bcopy((caddr_t)&hba->sparam.nodeName,
2295 (caddr_t)&hba->wwnn, sizeof (NAME_TYPE));
2296
2297 bcopy((caddr_t)&hba->sparam.portName,
2298 (caddr_t)&hba->wwpn, sizeof (NAME_TYPE));
2299 } else {
2300 bcopy((caddr_t)&hba->wwnn,
2301 (caddr_t)&hba->sparam.nodeName, sizeof (NAME_TYPE));
2302
2303 bcopy((caddr_t)&hba->wwpn,
2304 (caddr_t)&hba->sparam.portName, sizeof (NAME_TYPE));
2305 }
2306
2307 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2308 "SPARAM: EDTOV hba=%x mbox_csp=%x BBC=%x",
2309 hba->fc_edtov, hba->sparam.cmn.e_d_tov,
2310 hba->sparam.cmn.bbCreditlsb);
2311
2312 /* Initialize the physical port */
2313 bcopy((caddr_t)&hba->sparam, (caddr_t)&port->sparam,
2314 sizeof (SERV_PARM));
2315 bcopy((caddr_t)&hba->wwpn, (caddr_t)&port->wwpn,
2316 sizeof (NAME_TYPE));
2317 bcopy((caddr_t)&hba->wwnn, (caddr_t)&port->wwnn,
2318 sizeof (NAME_TYPE));
2319
2320 /* Initialize the virtual ports */
2321 for (i = 1; i < MAX_VPORTS; i++) {
2322 vport = &VPORT(i);
2323 if (! (vport->flag & EMLXS_PORT_BOUND)) {
2324 continue;
2325 }
2326
2327 bcopy((caddr_t)&hba->sparam,
2328 (caddr_t)&vport->sparam,
2329 sizeof (SERV_PARM));
2330
2331 bcopy((caddr_t)&vport->wwnn,
2332 (caddr_t)&vport->sparam.nodeName,
2333 sizeof (NAME_TYPE));
2334
2335 bcopy((caddr_t)&vport->wwpn,
2336 (caddr_t)&vport->sparam.portName,
2337 sizeof (NAME_TYPE));
2338 }
2339
2340 return (0);
2341
2342 } /* emlxs_read_sparam_mbcmpl() */
2343
2344
2345 /*
2346 * emlxs_mb_read_sparam Issue a READ SPARAM mailbox command
2347 */
2348 extern uint32_t
2349 emlxs_mb_read_sparam(emlxs_hba_t *hba, MAILBOXQ *mbq)
2350 {
2351 MAILBOX *mb = (MAILBOX *)mbq;
2352 MATCHMAP *mp;
2353
2354 bzero((void *)mb, MAILBOX_CMD_BSIZE);
2355
2356 if ((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF)) == 0) {
2357 mb->mbxCommand = MBX_READ_SPARM64;
2358
2359 return (1);
2360 }
2361
2362 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (SERV_PARM);
2363 mb->un.varRdSparm.un.sp64.addrHigh = PADDR_HI(mp->phys);
2364 mb->un.varRdSparm.un.sp64.addrLow = PADDR_LO(mp->phys);
2365 mb->mbxCommand = MBX_READ_SPARM64;
2366 mb->mbxOwner = OWN_HOST;
2367 mbq->mbox_cmpl = emlxs_read_sparam_mbcmpl;
2368 mbq->port = (void *)&PPORT;
2369
2370 /*
2371 * save address for completion
2372 */
2373 mbq->bp = (void *)mp;
2374
2375 return (0);
2376
2377 } /* emlxs_mb_read_sparam() */
2378
2379
2380 /*
2381 * emlxs_mb_read_rpi Issue a READ RPI mailbox command
2382 */
2383 /*ARGSUSED*/
2384 extern uint32_t
2385 emlxs_mb_read_rpi(emlxs_hba_t *hba, uint32_t rpi, MAILBOXQ *mbq,
2386 uint32_t flag)
2387 {
2388 MAILBOX *mb = (MAILBOX *)mbq;
2389
2390 bzero((void *)mb, MAILBOX_CMD_BSIZE);
2391
2392 /*
2393 * Set flag to issue action on cmpl
2394 */
2395 mb->un.varWords[30] = flag;
2396 mb->un.varRdRPI.reqRpi = (volatile uint16_t) rpi;
2397 mb->mbxCommand = MBX_READ_RPI64;
2398 mb->mbxOwner = OWN_HOST;
2399 mbq->mbox_cmpl = NULL; /* no cmpl needed */
2400 mbq->port = (void *)&PPORT;
2401
2402 return (0);
2403 } /* emlxs_mb_read_rpi() */
2404
2405
2406 /*
2407 * emlxs_mb_read_xri Issue a READ XRI mailbox command
2408 */
2409 /*ARGSUSED*/
2410 extern uint32_t
2411 emlxs_mb_read_xri(emlxs_hba_t *hba, uint32_t xri, MAILBOXQ *mbq,
2412 uint32_t flag)
2413 {
2414 MAILBOX *mb = (MAILBOX *)mbq;
2415
2416 bzero((void *)mb, MAILBOX_CMD_BSIZE);
2417
2418 /*
2419 * Set flag to issue action on cmpl
2420 */
2421 mb->un.varWords[30] = flag;
2422 mb->un.varRdXRI.reqXri = (volatile uint16_t)xri;
2423 mb->mbxCommand = MBX_READ_XRI;
2424 mb->mbxOwner = OWN_HOST;
2425 mbq->mbox_cmpl = NULL; /* no cmpl needed */
2426 mbq->port = (void *)&PPORT;
2427
2428 return (0);
2429 } /* emlxs_mb_read_xri() */
2430
2431
2432 /*ARGSUSED*/
2433 extern int32_t
2434 emlxs_mb_check_sparm(emlxs_hba_t *hba, SERV_PARM *nsp)
2435 {
2436 uint32_t nsp_value;
2437 uint32_t *iptr;
2438
2439 if (nsp->cmn.fPort) {
2440 return (0);
2441 }
2442
2443 /* Validate the service parameters */
2444 iptr = (uint32_t *)&nsp->portName;
2445 if (iptr[0] == 0 && iptr[1] == 0) {
2446 return (1);
2447 }
2448
2449 iptr = (uint32_t *)&nsp->nodeName;
2450 if (iptr[0] == 0 && iptr[1] == 0) {
2451 return (2);
2452 }
2453
2454 if (nsp->cls2.classValid) {
2455 nsp_value =
2456 ((nsp->cls2.rcvDataSizeMsb & 0x0f) << 8) | nsp->cls2.
2457 rcvDataSizeLsb;
2458
2459 /* If the receive data length is zero then set it to */
2460 /* the CSP value */
2461 if (!nsp_value) {
2462 nsp->cls2.rcvDataSizeMsb = nsp->cmn.bbRcvSizeMsb;
2463 nsp->cls2.rcvDataSizeLsb = nsp->cmn.bbRcvSizeLsb;
2464 return (0);
2465 }
2466 }
2467
2468 if (nsp->cls3.classValid) {
2469 nsp_value =
2470 ((nsp->cls3.rcvDataSizeMsb & 0x0f) << 8) | nsp->cls3.
2471 rcvDataSizeLsb;
2472
2473 /* If the receive data length is zero then set it to */
2474 /* the CSP value */
2475 if (!nsp_value) {
2476 nsp->cls3.rcvDataSizeMsb = nsp->cmn.bbRcvSizeMsb;
2477 nsp->cls3.rcvDataSizeLsb = nsp->cmn.bbRcvSizeLsb;
2478 return (0);
2479 }
2480 }
2481
2482 return (0);
2483
2484 } /* emlxs_mb_check_sparm() */
2485
2486
2487
2488
2489 /*
2490 * emlxs_mb_set_var Issue a special debug mbox command to write slim
2491 */
2492 /*ARGSUSED*/
2493 extern void
2494 emlxs_mb_set_var(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t addr,
2495 uint32_t value)
2496 {
2497 MAILBOX *mb = (MAILBOX *)mbq;
2498
2499 bzero((void *)mb, MAILBOX_CMD_BSIZE);
2500
2501 /* addr = 0x090597 is AUTO ABTS disable for ELS commands */
2502 /* addr = 0x052198 is DELAYED ABTS enable for ELS commands */
2503 /* addr = 0x100506 is for setting PCI MAX READ value */
2504
2505 /*
2506 * Always turn on DELAYED ABTS for ELS timeouts
2507 */
2508 if ((addr == 0x052198) && (value == 0)) {
2509 value = 1;
2510 }
2511
2512 mb->un.varWords[0] = addr;
2513 mb->un.varWords[1] = value;
2514 mb->mbxCommand = MBX_SET_VARIABLE;
2515 mb->mbxOwner = OWN_HOST;
2516 mbq->mbox_cmpl = NULL; /* no cmpl needed */
2517 mbq->port = (void *)&PPORT;
2518
2519 } /* emlxs_mb_set_var() */
2520
2521
2522 /*
2523 * Disable Traffic Cop
2524 */
2525 /*ARGSUSED*/
2526 extern void
2527 emlxs_disable_tc(emlxs_hba_t *hba, MAILBOXQ *mbq)
2528 {
2529 MAILBOX *mb = (MAILBOX *)mbq;
2530
2531 bzero((void *)mb, MAILBOX_CMD_BSIZE);
2532
2533 mb->un.varWords[0] = 0x50797;
2534 mb->un.varWords[1] = 0;
2535 mb->un.varWords[2] = 0xfffffffe;
2536 mb->mbxCommand = MBX_SET_VARIABLE;
2537 mb->mbxOwner = OWN_HOST;
2538 mbq->mbox_cmpl = NULL; /* no cmpl needed */
2539 mbq->port = (void *)&PPORT;
2540
2541 } /* emlxs_disable_tc() */
2542
2543
2544 extern void
2545 emlxs_mb_config_hbq(emlxs_hba_t *hba, MAILBOXQ *mbq, int hbq_id)
2546 {
2547 HBQ_INIT_t *hbq;
2548 MAILBOX *mb = (MAILBOX *)mbq;
2549 int i;
2550
2551 bzero((void *)mb, MAILBOX_CMD_BSIZE);
2552
2553 hbq = &hba->sli.sli3.hbq_table[hbq_id];
2554
2555 mb->un.varCfgHbq.hbqId = hbq_id;
2556 mb->un.varCfgHbq.numEntries = hbq->HBQ_numEntries;
2557 mb->un.varCfgHbq.recvNotify = hbq->HBQ_recvNotify;
2558 mb->un.varCfgHbq.numMask = hbq->HBQ_num_mask;
2559 mb->un.varCfgHbq.profile = hbq->HBQ_profile;
2560 mb->un.varCfgHbq.ringMask = hbq->HBQ_ringMask;
2561 mb->un.varCfgHbq.headerLen = hbq->HBQ_headerLen;
2562 mb->un.varCfgHbq.logEntry = hbq->HBQ_logEntry;
2563 mb->un.varCfgHbq.hbqaddrLow = PADDR_LO(hbq->HBQ_host_buf.phys);
2564 mb->un.varCfgHbq.hbqaddrHigh = PADDR_HI(hbq->HBQ_host_buf.phys);
2565 mb->mbxCommand = MBX_CONFIG_HBQ;
2566 mb->mbxOwner = OWN_HOST;
2567 mbq->mbox_cmpl = NULL;
2568 mbq->port = (void *)&PPORT;
2569
2570 /* Copy info for profiles 2,3,5. Other profiles this area is reserved */
2571 if ((hbq->HBQ_profile == 2) || (hbq->HBQ_profile == 3) ||
2572 (hbq->HBQ_profile == 5)) {
2573 bcopy(&hbq->profiles.allprofiles,
2574 (void *)&mb->un.varCfgHbq.profiles.allprofiles,
2575 sizeof (hbq->profiles));
2576 }
2577
2578 /* Return if no rctl / type masks for this HBQ */
2579 if (!hbq->HBQ_num_mask) {
2580 return;
2581 }
2582
2583 /* Otherwise we setup specific rctl / type masks for this HBQ */
2584 for (i = 0; i < hbq->HBQ_num_mask; i++) {
2585 mb->un.varCfgHbq.hbqMasks[i].tmatch =
2586 hbq->HBQ_Masks[i].tmatch;
2587 mb->un.varCfgHbq.hbqMasks[i].tmask = hbq->HBQ_Masks[i].tmask;
2588 mb->un.varCfgHbq.hbqMasks[i].rctlmatch =
2589 hbq->HBQ_Masks[i].rctlmatch;
2590 mb->un.varCfgHbq.hbqMasks[i].rctlmask =
2591 hbq->HBQ_Masks[i].rctlmask;
2592 }
2593
2594 return;
2595
2596 } /* emlxs_mb_config_hbq() */
2597
2598
2599 /* SLI3 */
2600 static uint32_t
2601 emlxs_reg_vpi_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
2602 {
2603 emlxs_port_t *port = (emlxs_port_t *)mbq->port;
2604 MAILBOX *mb;
2605
2606 mb = (MAILBOX *)mbq;
2607
2608 mutex_enter(&EMLXS_PORT_LOCK);
2609
2610 if (mb->mbxStatus != MBX_SUCCESS) {
2611 port->flag &= ~EMLXS_PORT_REG_VPI;
2612 mutex_exit(&EMLXS_PORT_LOCK);
2613
2614 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2615 "cmpl_reg_vpi:%d failed. status=%x",
2616 port->vpi, mb->mbxStatus);
2617 return (0);
2618 }
2619
2620 port->flag |= EMLXS_PORT_REG_VPI_CMPL;
2621
2622 mutex_exit(&EMLXS_PORT_LOCK);
2623
2624 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2625 "cmpl_reg_vpi:%d ",
2626 port->vpi);
2627
2628 return (0);
2629
2630 } /* emlxs_reg_vpi_mbcmpl */
2631
2632
2633 /* SLI3 */
2634 extern uint32_t
2635 emlxs_mb_reg_vpi(emlxs_port_t *port, emlxs_buf_t *sbp)
2636 {
2637 emlxs_hba_t *hba = HBA;
2638 MAILBOXQ *mbq;
2639 MAILBOX *mb;
2640 int rval;
2641
2642 if (hba->sli_mode > EMLXS_HBA_SLI3_MODE) {
2643 return (1);
2644 }
2645
2646 if (!(hba->flag & FC_NPIV_ENABLED)) {
2647 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2648 "reg_vpi:%d failed. NPIV disabled.",
2649 port->vpi);
2650 return (1);
2651 }
2652
2653 if (port->flag & EMLXS_PORT_REG_VPI) {
2654 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2655 "reg_vpi:%d failed. Already registered.",
2656 port->vpi);
2657 return (0);
2658 }
2659
2660 mutex_enter(&EMLXS_PORT_LOCK);
2661
2662 /* Can't reg vpi until ClearLA is sent */
2663 if (hba->state != FC_READY) {
2664 mutex_exit(&EMLXS_PORT_LOCK);
2665
2666 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2667 "reg_vpi:%d failed. HBA state not READY",
2668 port->vpi);
2669 return (1);
2670 }
2671
2672 /* Must have port id */
2673 if (!port->did) {
2674 mutex_exit(&EMLXS_PORT_LOCK);
2675
2676 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2677 "reg_vpi:%d failed. Port did=0",
2678 port->vpi);
2679 return (1);
2680 }
2681
2682 if (!(mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) {
2683 mutex_exit(&EMLXS_PORT_LOCK);
2684
2685 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2686 "reg_vpi:%d failed. Unable to allocate mbox.",
2687 port->vpi);
2688 return (1);
2689 }
2690
2691 port->flag |= EMLXS_PORT_REG_VPI;
2692
2693 mutex_exit(&EMLXS_PORT_LOCK);
2694
2695 mb = (MAILBOX *)mbq->mbox;
2696 bzero((void *)mb, MAILBOX_CMD_BSIZE);
2697
2698 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2699 "reg_vpi:%d", port->vpi);
2700
2701 mb->un.varRegVpi.vpi = port->vpi;
2702 mb->un.varRegVpi.sid = port->did;
2703 mb->mbxCommand = MBX_REG_VPI;
2704 mb->mbxOwner = OWN_HOST;
2705
2706 mbq->sbp = (void *)sbp;
2707 mbq->mbox_cmpl = emlxs_reg_vpi_mbcmpl;
2708 mbq->context = NULL;
2709 mbq->port = (void *)port;
2710
2711 rval = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_NOWAIT, 0);
2712 if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) {
2713 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2714 "reg_vpi:%d failed. Unable to send request.",
2715 port->vpi);
2716
2717 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
2718 return (1);
2719 }
2720
2721 return (0);
2722
2723 } /* emlxs_mb_reg_vpi() */
2724
2725
2726 /* SLI3 */
2727 static uint32_t
2728 emlxs_unreg_vpi_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
2729 {
2730 emlxs_port_t *port = (emlxs_port_t *)mbq->port;
2731 MAILBOX *mb;
2732
2733 mb = (MAILBOX *)mbq->mbox;
2734
2735 if (mb->mbxStatus != MBX_SUCCESS) {
2736 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2737 "unreg_vpi_mbcmpl:%d failed. status=%x",
2738 port->vpi, mb->mbxStatus);
2739 return (0);
2740 }
2741
2742 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2743 "unreg_vpi_mbcmpl:%d", port->vpi);
2744
2745 mutex_enter(&EMLXS_PORT_LOCK);
2746 port->flag &= ~EMLXS_PORT_REG_VPI_CMPL;
2747 mutex_exit(&EMLXS_PORT_LOCK);
2748
2749 return (0);
2750
2751 } /* emlxs_unreg_vpi_mbcmpl() */
2752
2753
2754 /* SLI3 */
2755 extern uint32_t
2756 emlxs_mb_unreg_vpi(emlxs_port_t *port)
2757 {
2758 emlxs_hba_t *hba = HBA;
2759 MAILBOXQ *mbq;
2760 MAILBOX *mb;
2761 int rval;
2762
2763 if (hba->sli_mode > EMLXS_HBA_SLI3_MODE) {
2764 return (1);
2765 }
2766
2767 mutex_enter(&EMLXS_PORT_LOCK);
2768
2769 if (!(port->flag & EMLXS_PORT_REG_VPI) ||
2770 !(port->flag & EMLXS_PORT_REG_VPI_CMPL)) {
2771
2772 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2773 "unreg_vpi:%d failed. Not registered. flag=%x",
2774 port->vpi, port->flag);
2775
2776 mutex_exit(&EMLXS_PORT_LOCK);
2777 return (0);
2778 }
2779
2780 if (!(mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) {
2781 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2782 "unreg_vpi:%d failed. Unable to allocate mbox.",
2783 port->vpi);
2784
2785 mutex_exit(&EMLXS_PORT_LOCK);
2786 return (1);
2787 }
2788
2789 port->flag &= ~EMLXS_PORT_REG_VPI;
2790
2791 mutex_exit(&EMLXS_PORT_LOCK);
2792
2793 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2794 "unreg_vpi:%d", port->vpi);
2795
2796 mb = (MAILBOX *)mbq->mbox;
2797 bzero((void *)mb, MAILBOX_CMD_BSIZE);
2798 mb->un.varUnregVpi.vpi = port->vpi;
2799 mb->mbxCommand = MBX_UNREG_VPI;
2800 mb->mbxOwner = OWN_HOST;
2801
2802 mbq->mbox_cmpl = emlxs_unreg_vpi_mbcmpl;
2803 mbq->context = NULL;
2804 mbq->port = (void *)port;
2805
2806 rval = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_NOWAIT, 0);
2807 if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) {
2808 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2809 "unreg_vpi:%d failed. Unable to send request.",
2810 port->vpi);
2811
2812 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
2813 return (1);
2814 }
2815
2816 return (0);
2817
2818 } /* emlxs_mb_unreg_vpi() */
2819
2820
2821 /*
2822 * emlxs_mb_config_farp Issue a CONFIG FARP mailbox command
2823 */
2824 extern void
2825 emlxs_mb_config_farp(emlxs_hba_t *hba, MAILBOXQ *mbq)
2826 {
2827 MAILBOX *mb = (MAILBOX *)mbq;
2828
2829 bzero((void *)mb, MAILBOX_CMD_BSIZE);
2830
2831 bcopy((uint8_t *)&hba->wwpn,
2832 (uint8_t *)&mb->un.varCfgFarp.portname, sizeof (NAME_TYPE));
2833
2834 bcopy((uint8_t *)&hba->wwpn,
2835 (uint8_t *)&mb->un.varCfgFarp.nodename, sizeof (NAME_TYPE));
2836
2837 mb->un.varCfgFarp.filterEnable = 1;
2838 mb->un.varCfgFarp.portName = 1;
2839 mb->un.varCfgFarp.nodeName = 1;
2840 mb->mbxCommand = MBX_CONFIG_FARP;
2841 mb->mbxOwner = OWN_HOST;
2842 mbq->mbox_cmpl = NULL; /* no cmpl needed */
2843 mbq->port = (void *)&PPORT;
2844
2845 } /* emlxs_mb_config_farp() */
2846
2847
2848 /*
2849 * emlxs_mb_read_nv Issue a READ CONFIG mailbox command
2850 */
2851 /*ARGSUSED*/
2852 extern void
2853 emlxs_mb_read_config(emlxs_hba_t *hba, MAILBOXQ *mbq)
2854 {
2855 MAILBOX *mb = (MAILBOX *)mbq;
2856
2857 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2858 bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
2859 mbq->nonembed = NULL;
2860 } else {
2861 bzero((void *)mb, MAILBOX_CMD_BSIZE);
2862 }
2863
2864 mb->mbxCommand = MBX_READ_CONFIG;
2865 mb->mbxOwner = OWN_HOST;
2866 mbq->mbox_cmpl = NULL; /* no cmpl needed */
2867 mbq->port = (void *)&PPORT;
2868
2869 } /* emlxs_mb_read_config() */
2870
2871
2872 /*
2873 * NAME: emlxs_mb_put
2874 *
2875 * FUNCTION: put mailbox cmd onto the mailbox queue.
2876 *
2877 * EXECUTION ENVIRONMENT: process and interrupt level.
2878 *
2879 * NOTES:
2880 *
2881 * CALLED FROM: EMLXS_SLI_ISSUE_MBOX_CMD
2882 *
2883 * INPUT: hba - pointer to the device info area
2884 * mbp - pointer to mailbox queue entry of mailbox cmd
2885 *
2886 * RETURNS: NULL - command queued
2887 */
2888 extern void
2889 emlxs_mb_put(emlxs_hba_t *hba, MAILBOXQ *mbq)
2890 {
2891
2892 mutex_enter(&EMLXS_MBOX_LOCK);
2893
2894 if (hba->mbox_queue.q_first) {
2895
2896 /*
2897 * queue command to end of list
2898 */
2899 ((MAILBOXQ *)hba->mbox_queue.q_last)->next = mbq;
2900 hba->mbox_queue.q_last = (uint8_t *)mbq;
2901 hba->mbox_queue.q_cnt++;
2902 } else {
2903
2904 /*
2905 * add command to empty list
2906 */
2907 hba->mbox_queue.q_first = (uint8_t *)mbq;
2908 hba->mbox_queue.q_last = (uint8_t *)mbq;
2909 hba->mbox_queue.q_cnt = 1;
2910 }
2911
2912 mbq->next = NULL;
2913
2914 mutex_exit(&EMLXS_MBOX_LOCK);
2915 } /* emlxs_mb_put() */
2916
2917
2918 /*
2919 * NAME: emlxs_mb_get
2920 *
2921 * FUNCTION: get a mailbox command from mailbox command queue
2922 *
2923 * EXECUTION ENVIRONMENT: interrupt level.
2924 *
2925 * NOTES:
2926 *
2927 * CALLED FROM: emlxs_handle_mb_event
2928 *
2929 * INPUT: hba - pointer to the device info area
2930 *
2931 * RETURNS: NULL - no match found mb pointer - pointer to a mailbox command
2932 */
2933 extern MAILBOXQ *
2934 emlxs_mb_get(emlxs_hba_t *hba)
2935 {
2936 MAILBOXQ *p_first = NULL;
2937
2938 mutex_enter(&EMLXS_MBOX_LOCK);
2939
2940 if (hba->mbox_queue.q_first) {
2941 p_first = (MAILBOXQ *)hba->mbox_queue.q_first;
2942 hba->mbox_queue.q_first = (uint8_t *)p_first->next;
2943
2944 if (hba->mbox_queue.q_first == NULL) {
2945 hba->mbox_queue.q_last = NULL;
2946 hba->mbox_queue.q_cnt = 0;
2947 } else {
2948 hba->mbox_queue.q_cnt--;
2949 }
2950
2951 p_first->next = NULL;
2952 }
2953
2954 mutex_exit(&EMLXS_MBOX_LOCK);
2955
2956 return (p_first);
2957
2958 } /* emlxs_mb_get() */
2959
2960
2961 /* EMLXS_PORT_LOCK must be held when calling this */
2962 void
2963 emlxs_mb_init(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t flag, uint32_t tmo)
2964 {
2965 MATCHMAP *mp;
2966
2967 HBASTATS.MboxIssued++;
2968 hba->mbox_queue_flag = flag;
2969
2970 /* Set the Mailbox timer */
2971 if (hba->timer_tics) {
2972 hba->mbox_timer = hba->timer_tics + tmo;
2973 } else {
2974 hba->mbox_timer = DRV_TIME + tmo;
2975 }
2976
2977 /* Initialize mailbox */
2978 mbq->flag &= MBQ_INIT_MASK;
2979 mbq->next = 0;
2980
2981 mutex_enter(&EMLXS_MBOX_LOCK);
2982 hba->mbox_mbq = (void *)mbq;
2983 mutex_exit(&EMLXS_MBOX_LOCK);
2984
2985 if (mbq->nonembed) {
2986 mp = (MATCHMAP *) mbq->nonembed;
2987 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
2988 DDI_DMA_SYNC_FORDEV);
2989 }
2990
2991 if (mbq->bp) {
2992 mp = (MATCHMAP *) mbq->bp;
2993 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
2994 DDI_DMA_SYNC_FORDEV);
2995 }
2996 return;
2997
2998 } /* emlxs_mb_init() */
2999
3000
3001 extern void
3002 emlxs_mb_fini(emlxs_hba_t *hba, MAILBOX *mb, uint32_t mbxStatus)
3003 {
3004 emlxs_port_t *port = &PPORT;
3005 MATCHMAP *mbox_nonembed;
3006 MATCHMAP *mbox_bp;
3007 emlxs_buf_t *mbox_sbp;
3008 fc_unsol_buf_t *mbox_ubp;
3009 IOCBQ *mbox_iocbq;
3010 MAILBOXQ *mbox_mbq;
3011 MAILBOX *mbox;
3012 uint32_t mbox_queue_flag;
3013
3014 mutex_enter(&EMLXS_PORT_LOCK);
3015
3016 if (hba->mbox_queue_flag) {
3017 HBASTATS.MboxCompleted++;
3018
3019 if (mbxStatus != MBX_SUCCESS) {
3020 HBASTATS.MboxError++;
3021 } else {
3022 HBASTATS.MboxGood++;
3023 }
3024 }
3025
3026 mutex_enter(&EMLXS_MBOX_LOCK);
3027 mbox_queue_flag = hba->mbox_queue_flag;
3028 mbox_mbq = (MAILBOXQ *)hba->mbox_mbq;
3029
3030 if (mbox_mbq) {
3031 mbox_nonembed = (MATCHMAP *)mbox_mbq->nonembed;
3032 mbox_bp = (MATCHMAP *)mbox_mbq->bp;
3033 mbox_sbp = (emlxs_buf_t *)mbox_mbq->sbp;
3034 mbox_ubp = (fc_unsol_buf_t *)mbox_mbq->ubp;
3035 mbox_iocbq = (IOCBQ *)mbox_mbq->iocbq;
3036 } else {
3037 mbox_nonembed = NULL;
3038 mbox_bp = NULL;
3039 mbox_sbp = NULL;
3040 mbox_ubp = NULL;
3041 mbox_iocbq = NULL;
3042 }
3043
3044 hba->mbox_mbq = NULL;
3045 hba->mbox_queue_flag = 0;
3046 hba->mbox_timer = 0;
3047 mutex_exit(&EMLXS_MBOX_LOCK);
3048
3049 mutex_exit(&EMLXS_PORT_LOCK);
3050
3051 #ifdef SFCT_SUPPORT
3052 if (mb && mbox_sbp && mbox_sbp->fct_cmd) {
3053 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_detail_msg,
3054 "FCT mailbox: %s: status=%x",
3055 emlxs_mb_cmd_xlate(mb->mbxCommand),
3056 mb->mbxStatus);
3057 }
3058 #endif /* SFCT_SUPPORT */
3059
3060 if (mbox_queue_flag == MBX_NOWAIT) {
3061 /* Check for deferred MBUF cleanup */
3062 if (mbox_bp) {
3063 emlxs_mem_put(hba, MEM_BUF, (void *)mbox_bp);
3064 }
3065 if (mbox_nonembed) {
3066 emlxs_mem_put(hba, MEM_BUF,
3067 (void *)mbox_nonembed);
3068 }
3069 if (mbox_mbq) {
3070 emlxs_mem_put(hba, MEM_MBOX,
3071 (void *)mbox_mbq);
3072 }
3073 } else { /* MBX_WAIT */
3074 if (mbox_mbq) {
3075 if (mb) {
3076 /* Copy the local mailbox provided back into */
3077 /* the original mailbox */
3078 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3079 bcopy((uint32_t *)mb,
3080 (uint32_t *)mbox_mbq,
3081 MAILBOX_CMD_SLI4_BSIZE);
3082 } else {
3083 bcopy((uint32_t *)mb,
3084 (uint32_t *)mbox_mbq,
3085 MAILBOX_CMD_BSIZE);
3086 }
3087 }
3088
3089 mbox = (MAILBOX *)mbox_mbq;
3090 mbox->mbxStatus = (uint16_t)mbxStatus;
3091
3092 /* Mark mailbox complete */
3093 mbox_mbq->flag |= MBQ_COMPLETED;
3094 }
3095
3096 /* Wake up the sleeping thread */
3097 if (mbox_queue_flag == MBX_SLEEP) {
3098 mutex_enter(&EMLXS_MBOX_LOCK);
3099 cv_broadcast(&EMLXS_MBOX_CV);
3100 mutex_exit(&EMLXS_MBOX_LOCK);
3101 }
3102 }
3103
3104 emlxs_mb_deferred_cmpl(port, mbxStatus, mbox_sbp, mbox_ubp, mbox_iocbq);
3105
3106 return;
3107
3108 } /* emlxs_mb_fini() */
3109
3110
3111 extern void
3112 emlxs_mb_deferred_cmpl(emlxs_port_t *port, uint32_t mbxStatus, emlxs_buf_t *sbp,
3113 fc_unsol_buf_t *ubp, IOCBQ *iocbq)
3114 {
3115 emlxs_hba_t *hba = HBA;
3116 emlxs_ub_priv_t *ub_priv;
3117
3118 #ifdef SFCT_SUPPORT
3119 if (sbp && sbp->fct_cmd && (sbp->fct_state == EMLXS_FCT_REG_PENDING)) {
3120 mutex_enter(&EMLXS_PKT_LOCK);
3121 sbp->fct_flags |= EMLXS_FCT_REGISTERED;
3122 cv_broadcast(&EMLXS_PKT_CV);
3123 mutex_exit(&EMLXS_PKT_LOCK);
3124
3125 sbp = NULL;
3126 }
3127 #endif /* SFCT_SUPPORT */
3128
3129 /* Check for deferred pkt completion */
3130 if (sbp) {
3131 if (mbxStatus != MBX_SUCCESS) {
3132 /* Set error status */
3133 sbp->pkt_flags &= ~PACKET_STATE_VALID;
3134 emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
3135 IOERR_NO_RESOURCES, 1);
3136 }
3137
3138 emlxs_pkt_complete(sbp, -1, 0, 1);
3139 }
3140
3141 /* Check for deferred ub completion */
3142 if (ubp) {
3143 ub_priv = ubp->ub_fca_private;
3144
3145 if (mbxStatus == MBX_SUCCESS) {
3146 emlxs_ub_callback(ub_priv->port, ubp);
3147 } else {
3148 (void) emlxs_fca_ub_release(ub_priv->port, 1,
3149 &ubp->ub_token);
3150 }
3151 }
3152
3153 /* Special handling for restricted login */
3154 if (iocbq == (IOCBQ *)1) {
3155 iocbq = NULL;
3156 }
3157
3158 /* Check for deferred iocb tx */
3159 if (iocbq) {
3160 /* Check for driver special codes */
3161 /* These indicate the mailbox is being flushed */
3162 if (mbxStatus >= MBX_DRIVER_RESERVED) {
3163 /* Set the error status and return it */
3164 iocbq->iocb.ULPSTATUS = IOSTAT_LOCAL_REJECT;
3165 iocbq->iocb.un.grsp.perr.statLocalError =
3166 IOERR_ABORT_REQUESTED;
3167
3168 emlxs_proc_channel_event(hba, iocbq->channel,
3169 iocbq);
3170 } else {
3171 EMLXS_SLI_ISSUE_IOCB_CMD(hba, iocbq->channel,
3172 iocbq);
3173 }
3174 }
3175
3176 return;
3177
3178 } /* emlxs_mb_deferred_cmpl() */
3179
3180
3181 extern void
3182 emlxs_mb_flush(emlxs_hba_t *hba)
3183 {
3184 MAILBOXQ *mbq;
3185 uint32_t mbxStatus;
3186
3187 mbxStatus = (hba->flag & FC_HARDWARE_ERROR) ?
3188 MBX_HARDWARE_ERROR : MBX_NOT_FINISHED;
3189
3190 /* Flush out the active mbox command */
3191 emlxs_mb_fini(hba, NULL, mbxStatus);
3192
3193 /* Flush out the queued mbox commands */
3194 while (mbq = (MAILBOXQ *)emlxs_mb_get(hba)) {
3195 mutex_enter(&EMLXS_MBOX_LOCK);
3196 hba->mbox_queue_flag = MBX_NOWAIT;
3197 hba->mbox_mbq = (void *)mbq;
3198 mutex_exit(&EMLXS_MBOX_LOCK);
3199
3200 emlxs_mb_fini(hba, NULL, mbxStatus);
3201 }
3202
3203 return;
3204
3205 } /* emlxs_mb_flush */
3206
3207
3208 extern char *
3209 emlxs_mb_cmd_xlate(uint8_t cmd)
3210 {
3211 static char buffer[32];
3212 uint32_t i;
3213 uint32_t count;
3214
3215 count = sizeof (emlxs_mb_cmd_table) / sizeof (emlxs_table_t);
3216 for (i = 0; i < count; i++) {
3217 if (cmd == emlxs_mb_cmd_table[i].code) {
3218 return (emlxs_mb_cmd_table[i].string);
3219 }
3220 }
3221
3222 (void) snprintf(buffer, sizeof (buffer), "Cmd=0x%x", cmd);
3223 return (buffer);
3224
3225 } /* emlxs_mb_cmd_xlate() */
3226
3227 extern char *
3228 emlxs_request_feature_xlate(uint32_t mask)
3229 {
3230 static char buffer[64];
3231 uint32_t i;
3232
3233 bzero((char *)&buffer[0], 64);
3234 for (i = 0; i < 12; i++) {
3235 if (mask & (1<<i)) {
3236 (void) strlcat(buffer,
3237 emlxs_request_feature_table[i].string,
3238 sizeof (buffer));
3239 }
3240 }
3241 return (buffer);
3242 }