1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at
9 * http://www.opensource.org/licenses/cddl1.txt.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2004-2012 Emulex. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include <emlxs.h>
28
29 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
30 EMLXS_MSG_DEF(EMLXS_SLI3_C);
31
32 static void emlxs_sli3_issue_iocb(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq);
33 static void emlxs_sli3_handle_link_event(emlxs_hba_t *hba);
34 static void emlxs_sli3_handle_ring_event(emlxs_hba_t *hba, int32_t ring_no,
35 uint32_t ha_copy);
36 #ifdef SFCT_SUPPORT
37 static uint32_t emlxs_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp);
38 #endif /* SFCT_SUPPORT */
39
40 static uint32_t emlxs_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp);
41
42 static uint32_t emlxs_disable_traffic_cop = 1;
43
44 static int emlxs_sli3_map_hdw(emlxs_hba_t *hba);
45
46 static void emlxs_sli3_unmap_hdw(emlxs_hba_t *hba);
47
48 static int32_t emlxs_sli3_online(emlxs_hba_t *hba);
49
50 static void emlxs_sli3_offline(emlxs_hba_t *hba,
51 uint32_t reset_requested);
52
53 static uint32_t emlxs_sli3_hba_reset(emlxs_hba_t *hba,
54 uint32_t restart, uint32_t skip_post,
55 uint32_t quiesce);
56
57 static void emlxs_sli3_hba_kill(emlxs_hba_t *hba);
58 static void emlxs_sli3_hba_kill4quiesce(emlxs_hba_t *hba);
59 static uint32_t emlxs_sli3_hba_init(emlxs_hba_t *hba);
60
61 static uint32_t emlxs_sli2_bde_setup(emlxs_port_t *port,
62 emlxs_buf_t *sbp);
63 static uint32_t emlxs_sli3_bde_setup(emlxs_port_t *port,
64 emlxs_buf_t *sbp);
65 static uint32_t emlxs_sli2_fct_bde_setup(emlxs_port_t *port,
66 emlxs_buf_t *sbp);
67 static uint32_t emlxs_sli3_fct_bde_setup(emlxs_port_t *port,
68 emlxs_buf_t *sbp);
69
70
71 static void emlxs_sli3_issue_iocb_cmd(emlxs_hba_t *hba,
72 CHANNEL *rp, IOCBQ *iocb_cmd);
73
74
75 static uint32_t emlxs_sli3_issue_mbox_cmd(emlxs_hba_t *hba,
76 MAILBOXQ *mbq, int32_t flg,
77 uint32_t tmo);
78
79
80 #ifdef SFCT_SUPPORT
81 static uint32_t emlxs_sli3_prep_fct_iocb(emlxs_port_t *port,
82 emlxs_buf_t *cmd_sbp, int channel);
83
84 #endif /* SFCT_SUPPORT */
85
86 static uint32_t emlxs_sli3_prep_fcp_iocb(emlxs_port_t *port,
87 emlxs_buf_t *sbp, int ring);
88
89 static uint32_t emlxs_sli3_prep_ip_iocb(emlxs_port_t *port,
90 emlxs_buf_t *sbp);
91
92 static uint32_t emlxs_sli3_prep_els_iocb(emlxs_port_t *port,
93 emlxs_buf_t *sbp);
94
95
96 static uint32_t emlxs_sli3_prep_ct_iocb(emlxs_port_t *port,
97 emlxs_buf_t *sbp);
98
99
100 static void emlxs_sli3_poll_intr(emlxs_hba_t *hba);
101
102 static int32_t emlxs_sli3_intx_intr(char *arg);
103 #ifdef MSI_SUPPORT
104 static uint32_t emlxs_sli3_msi_intr(char *arg1, char *arg2);
105 #endif /* MSI_SUPPORT */
106
107 static void emlxs_sli3_enable_intr(emlxs_hba_t *hba);
108
109 static void emlxs_sli3_disable_intr(emlxs_hba_t *hba,
110 uint32_t att);
111
112
113 static void emlxs_handle_ff_error(emlxs_hba_t *hba);
114
115 static uint32_t emlxs_handle_mb_event(emlxs_hba_t *hba);
116
117 static void emlxs_sli3_timer_check_mbox(emlxs_hba_t *hba);
118
119 static uint32_t emlxs_mb_config_port(emlxs_hba_t *hba,
120 MAILBOXQ *mbq, uint32_t sli_mode,
121 uint32_t hbainit);
122 static void emlxs_enable_latt(emlxs_hba_t *hba);
123
124 static uint32_t emlxs_check_attention(emlxs_hba_t *hba);
125
126 static uint32_t emlxs_get_attention(emlxs_hba_t *hba,
127 int32_t msgid);
128 static void emlxs_proc_attention(emlxs_hba_t *hba,
129 uint32_t ha_copy);
130 /* static int emlxs_handle_rcv_seq(emlxs_hba_t *hba, */
131 /* CHANNEL *cp, IOCBQ *iocbq); */
132 /* static void emlxs_update_HBQ_index(emlxs_hba_t *hba, */
133 /* uint32_t hbq_id); */
134 /* static void emlxs_hbq_free_all(emlxs_hba_t *hba, */
135 /* uint32_t hbq_id); */
136 static uint32_t emlxs_hbq_setup(emlxs_hba_t *hba,
137 uint32_t hbq_id);
138 static void emlxs_sli3_timer(emlxs_hba_t *hba);
139
140 static void emlxs_sli3_poll_erratt(emlxs_hba_t *hba);
141
142 static uint32_t emlxs_sli3_reg_did(emlxs_port_t *port,
143 uint32_t did, SERV_PARM *param,
144 emlxs_buf_t *sbp, fc_unsol_buf_t *ubp,
145 IOCBQ *iocbq);
146
147 static uint32_t emlxs_sli3_unreg_node(emlxs_port_t *port,
148 NODELIST *node, emlxs_buf_t *sbp,
149 fc_unsol_buf_t *ubp, IOCBQ *iocbq);
150
151
152 /* Define SLI3 API functions */
153 emlxs_sli_api_t emlxs_sli3_api = {
154 emlxs_sli3_map_hdw,
155 emlxs_sli3_unmap_hdw,
156 emlxs_sli3_online,
157 emlxs_sli3_offline,
158 emlxs_sli3_hba_reset,
159 emlxs_sli3_hba_kill,
160 emlxs_sli3_issue_iocb_cmd,
161 emlxs_sli3_issue_mbox_cmd,
162 #ifdef SFCT_SUPPORT
163 emlxs_sli3_prep_fct_iocb,
164 #else
165 NULL,
166 #endif /* SFCT_SUPPORT */
167 emlxs_sli3_prep_fcp_iocb,
168 emlxs_sli3_prep_ip_iocb,
169 emlxs_sli3_prep_els_iocb,
170 emlxs_sli3_prep_ct_iocb,
171 emlxs_sli3_poll_intr,
172 emlxs_sli3_intx_intr,
173 emlxs_sli3_msi_intr,
174 emlxs_sli3_disable_intr,
175 emlxs_sli3_timer,
176 emlxs_sli3_poll_erratt,
177 emlxs_sli3_reg_did,
178 emlxs_sli3_unreg_node
179 };
180
181
182 /*
183 * emlxs_sli3_online()
184 *
185 * This routine will start initialization of the SLI2/3 HBA.
186 */
187 static int32_t
188 emlxs_sli3_online(emlxs_hba_t *hba)
189 {
190 emlxs_port_t *port = &PPORT;
191 emlxs_config_t *cfg;
192 emlxs_vpd_t *vpd;
193 MAILBOX *mb = NULL;
194 MAILBOXQ *mbq = NULL;
195 RING *rp;
196 CHANNEL *cp;
197 MATCHMAP *mp = NULL;
198 MATCHMAP *mp1 = NULL;
199 uint8_t *inptr;
200 uint8_t *outptr;
201 uint32_t status;
202 uint16_t i;
203 uint32_t j;
204 uint32_t read_rev_reset;
205 uint32_t key = 0;
206 uint32_t fw_check;
207 uint32_t kern_update = 0;
208 uint32_t rval = 0;
209 uint32_t offset;
210 uint8_t vpd_data[DMP_VPD_SIZE];
211 uint32_t MaxRbusSize;
212 uint32_t MaxIbusSize;
213 uint32_t sli_mode;
214 uint32_t sli_mode_mask;
215
216 cfg = &CFG;
217 vpd = &VPD;
218 MaxRbusSize = 0;
219 MaxIbusSize = 0;
220 read_rev_reset = 0;
221 hba->chan_count = MAX_RINGS;
222
223 if (hba->bus_type == SBUS_FC) {
224 (void) READ_SBUS_CSR_REG(hba, FC_SHS_REG(hba));
225 }
226
227 /* Set the fw_check flag */
228 fw_check = cfg[CFG_FW_CHECK].current;
229
230 if ((fw_check & 0x04) ||
231 (hba->fw_flag & FW_UPDATE_KERNEL)) {
232 kern_update = 1;
233 }
234
235 hba->mbox_queue_flag = 0;
236 hba->sli.sli3.hc_copy = 0;
237 hba->fc_edtov = FF_DEF_EDTOV;
238 hba->fc_ratov = FF_DEF_RATOV;
239 hba->fc_altov = FF_DEF_ALTOV;
240 hba->fc_arbtov = FF_DEF_ARBTOV;
241
242 /*
243 * Get a buffer which will be used repeatedly for mailbox commands
244 */
245 mbq = (MAILBOXQ *) kmem_zalloc((sizeof (MAILBOXQ)), KM_SLEEP);
246
247 mb = (MAILBOX *)mbq;
248
249 /* Initialize sli mode based on configuration parameter */
250 switch (cfg[CFG_SLI_MODE].current) {
251 case 2: /* SLI2 mode */
252 sli_mode = EMLXS_HBA_SLI2_MODE;
253 sli_mode_mask = EMLXS_SLI2_MASK;
254 break;
255
256 case 3: /* SLI3 mode */
257 sli_mode = EMLXS_HBA_SLI3_MODE;
258 sli_mode_mask = EMLXS_SLI3_MASK;
259 break;
260
261 case 0: /* Best available */
262 case 1: /* Best available */
263 default:
264 if (hba->model_info.sli_mask & EMLXS_SLI3_MASK) {
265 sli_mode = EMLXS_HBA_SLI3_MODE;
266 sli_mode_mask = EMLXS_SLI3_MASK;
267 } else if (hba->model_info.sli_mask & EMLXS_SLI2_MASK) {
268 sli_mode = EMLXS_HBA_SLI2_MODE;
269 sli_mode_mask = EMLXS_SLI2_MASK;
270 } else {
271 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
272 "No SLI mode available.");
273 rval = EIO;
274 goto failed;
275 }
276 break;
277 }
278 /* SBUS adapters only available in SLI2 */
279 if (hba->bus_type == SBUS_FC) {
280 sli_mode = EMLXS_HBA_SLI2_MODE;
281 sli_mode_mask = EMLXS_SLI2_MASK;
282 }
283
284 reset:
285 /* Reset & Initialize the adapter */
286 if (emlxs_sli3_hba_init(hba)) {
287 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
288 "Unable to init hba.");
289
290 rval = EIO;
291 goto failed;
292 }
293
294 #ifdef FMA_SUPPORT
295 /* Access handle validation */
296 if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
297 != DDI_FM_OK) ||
298 (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
299 != DDI_FM_OK) ||
300 (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle)
301 != DDI_FM_OK)) {
302 EMLXS_MSGF(EMLXS_CONTEXT,
303 &emlxs_invalid_access_handle_msg, NULL);
304
305 rval = EIO;
306 goto failed;
307 }
308 #endif /* FMA_SUPPORT */
309
310 /* Check for PEGASUS (This is a special case) */
311 /* We need to check for dual channel adapter */
312 if (hba->model_info.device_id == PCI_DEVICE_ID_PEGASUS) {
313 /* Try to determine if this is a DC adapter */
314 if (emlxs_get_max_sram(hba, &MaxRbusSize, &MaxIbusSize) == 0) {
315 if (MaxRbusSize == REDUCED_SRAM_CFG) {
316 /* LP9802DC */
317 for (i = 1; i < emlxs_pci_model_count; i++) {
318 if (emlxs_pci_model[i].id == LP9802DC) {
319 bcopy(&emlxs_pci_model[i],
320 &hba->model_info,
321 sizeof (emlxs_model_t));
322 break;
323 }
324 }
325 } else if (hba->model_info.id != LP9802) {
326 /* LP9802 */
327 for (i = 1; i < emlxs_pci_model_count; i++) {
328 if (emlxs_pci_model[i].id == LP9802) {
329 bcopy(&emlxs_pci_model[i],
330 &hba->model_info,
331 sizeof (emlxs_model_t));
332 break;
333 }
334 }
335 }
336 }
337 }
338
339 /*
340 * Setup and issue mailbox READ REV command
341 */
342 vpd->opFwRev = 0;
343 vpd->postKernRev = 0;
344 vpd->sli1FwRev = 0;
345 vpd->sli2FwRev = 0;
346 vpd->sli3FwRev = 0;
347 vpd->sli4FwRev = 0;
348
349 vpd->postKernName[0] = 0;
350 vpd->opFwName[0] = 0;
351 vpd->sli1FwName[0] = 0;
352 vpd->sli2FwName[0] = 0;
353 vpd->sli3FwName[0] = 0;
354 vpd->sli4FwName[0] = 0;
355
356 vpd->opFwLabel[0] = 0;
357 vpd->sli1FwLabel[0] = 0;
358 vpd->sli2FwLabel[0] = 0;
359 vpd->sli3FwLabel[0] = 0;
360 vpd->sli4FwLabel[0] = 0;
361
362 /* Sanity check */
363 if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
364 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
365 "Adapter / SLI mode mismatch mask:x%x",
366 hba->model_info.sli_mask);
367
368 rval = EIO;
369 goto failed;
370 }
371
372 EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
373 emlxs_mb_read_rev(hba, mbq, 0);
374 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
375 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
376 "Unable to read rev. Mailbox cmd=%x status=%x",
377 mb->mbxCommand, mb->mbxStatus);
378
379 rval = EIO;
380 goto failed;
381 }
382
383 if (mb->un.varRdRev.rr == 0) {
384 /* Old firmware */
385 if (read_rev_reset == 0) {
386 read_rev_reset = 1;
387
388 goto reset;
389 } else {
390 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
391 "Outdated firmware detected.");
392 }
393
394 vpd->rBit = 0;
395 } else {
396 if (mb->un.varRdRev.un.b.ProgType != FUNC_FIRMWARE) {
397 if (read_rev_reset == 0) {
398 read_rev_reset = 1;
399
400 goto reset;
401 } else {
402 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
403 "Non-operational firmware detected. "
404 "type=%x",
405 mb->un.varRdRev.un.b.ProgType);
406 }
407 }
408
409 vpd->rBit = 1;
410 vpd->sli1FwRev = mb->un.varRdRev.sliFwRev1;
411 bcopy((char *)mb->un.varRdRev.sliFwName1, vpd->sli1FwLabel,
412 16);
413 vpd->sli2FwRev = mb->un.varRdRev.sliFwRev2;
414 bcopy((char *)mb->un.varRdRev.sliFwName2, vpd->sli2FwLabel,
415 16);
416
417 /*
418 * Lets try to read the SLI3 version
419 * Setup and issue mailbox READ REV(v3) command
420 */
421 EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
422
423 /* Reuse mbq from previous mbox */
424 bzero(mbq, sizeof (MAILBOXQ));
425
426 emlxs_mb_read_rev(hba, mbq, 1);
427
428 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
429 MBX_SUCCESS) {
430 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
431 "Unable to read rev (v3). Mailbox cmd=%x status=%x",
432 mb->mbxCommand, mb->mbxStatus);
433
434 rval = EIO;
435 goto failed;
436 }
437
438 if (mb->un.varRdRev.rf3) {
439 /*
440 * vpd->sli2FwRev = mb->un.varRdRev.sliFwRev1;
441 * Not needed
442 */
443 vpd->sli3FwRev = mb->un.varRdRev.sliFwRev2;
444 bcopy((char *)mb->un.varRdRev.sliFwName2,
445 vpd->sli3FwLabel, 16);
446 }
447 }
448
449 if ((sli_mode == EMLXS_HBA_SLI3_MODE) && (vpd->sli3FwRev == 0)) {
450 if (vpd->sli2FwRev) {
451 sli_mode = EMLXS_HBA_SLI2_MODE;
452 sli_mode_mask = EMLXS_SLI2_MASK;
453 } else {
454 sli_mode = 0;
455 sli_mode_mask = 0;
456 }
457 }
458
459 else if ((sli_mode == EMLXS_HBA_SLI2_MODE) && (vpd->sli2FwRev == 0)) {
460 if (vpd->sli3FwRev) {
461 sli_mode = EMLXS_HBA_SLI3_MODE;
462 sli_mode_mask = EMLXS_SLI3_MASK;
463 } else {
464 sli_mode = 0;
465 sli_mode_mask = 0;
466 }
467 }
468
469 if (!(hba->model_info.sli_mask & sli_mode_mask)) {
470 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
471 "Firmware not available. sli-mode=%d",
472 cfg[CFG_SLI_MODE].current);
473
474 rval = EIO;
475 goto failed;
476 }
477
478 /* Save information as VPD data */
479 vpd->postKernRev = mb->un.varRdRev.postKernRev;
480 vpd->opFwRev = mb->un.varRdRev.opFwRev;
481 bcopy((char *)mb->un.varRdRev.opFwName, vpd->opFwLabel, 16);
482 vpd->biuRev = mb->un.varRdRev.biuRev;
483 vpd->smRev = mb->un.varRdRev.smRev;
484 vpd->smFwRev = mb->un.varRdRev.un.smFwRev;
485 vpd->endecRev = mb->un.varRdRev.endecRev;
486 vpd->fcphHigh = mb->un.varRdRev.fcphHigh;
487 vpd->fcphLow = mb->un.varRdRev.fcphLow;
488 vpd->feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
489 vpd->feaLevelLow = mb->un.varRdRev.feaLevelLow;
490
491 /* Decode FW names */
492 emlxs_decode_version(vpd->postKernRev, vpd->postKernName,
493 sizeof (vpd->postKernName));
494 emlxs_decode_version(vpd->opFwRev, vpd->opFwName,
495 sizeof (vpd->opFwName));
496 emlxs_decode_version(vpd->sli1FwRev, vpd->sli1FwName,
497 sizeof (vpd->sli1FwName));
498 emlxs_decode_version(vpd->sli2FwRev, vpd->sli2FwName,
499 sizeof (vpd->sli2FwName));
500 emlxs_decode_version(vpd->sli3FwRev, vpd->sli3FwName,
501 sizeof (vpd->sli3FwName));
502 emlxs_decode_version(vpd->sli4FwRev, vpd->sli4FwName,
503 sizeof (vpd->sli4FwName));
504
505 /* Decode FW labels */
506 emlxs_decode_label(vpd->opFwLabel, vpd->opFwLabel, 1,
507 sizeof (vpd->opFwLabel));
508 emlxs_decode_label(vpd->sli1FwLabel, vpd->sli1FwLabel, 1,
509 sizeof (vpd->sli1FwLabel));
510 emlxs_decode_label(vpd->sli2FwLabel, vpd->sli2FwLabel, 1,
511 sizeof (vpd->sli2FwLabel));
512 emlxs_decode_label(vpd->sli3FwLabel, vpd->sli3FwLabel, 1,
513 sizeof (vpd->sli3FwLabel));
514 emlxs_decode_label(vpd->sli4FwLabel, vpd->sli4FwLabel, 1,
515 sizeof (vpd->sli4FwLabel));
516
517 /* Reuse mbq from previous mbox */
518 bzero(mbq, sizeof (MAILBOXQ));
519
520 key = emlxs_get_key(hba, mbq);
521
522 /* Get adapter VPD information */
523 offset = 0;
524 bzero(vpd_data, sizeof (vpd_data));
525 vpd->port_index = (uint32_t)-1;
526
527 while (offset < DMP_VPD_SIZE) {
528 /* Reuse mbq from previous mbox */
529 bzero(mbq, sizeof (MAILBOXQ));
530
531 emlxs_mb_dump_vpd(hba, mbq, offset);
532 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
533 MBX_SUCCESS) {
534 /*
535 * Let it go through even if failed.
536 * Not all adapter's have VPD info and thus will
537 * fail here. This is not a problem
538 */
539
540 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
541 "No VPD found. offset=%x status=%x", offset,
542 mb->mbxStatus);
543 break;
544 } else {
545 if (mb->un.varDmp.ra == 1) {
546 uint32_t *lp1, *lp2;
547 uint32_t bsize;
548 uint32_t wsize;
549
550 /*
551 * mb->un.varDmp.word_cnt is actually byte
552 * count for the dump reply
553 */
554 bsize = mb->un.varDmp.word_cnt;
555
556 /* Stop if no data was received */
557 if (bsize == 0) {
558 break;
559 }
560
561 /* Check limit on byte size */
562 bsize = (bsize >
563 (sizeof (vpd_data) - offset)) ?
564 (sizeof (vpd_data) - offset) : bsize;
565
566 /*
567 * Convert size from bytes to words with
568 * minimum of 1 word
569 */
570 wsize = (bsize > 4) ? (bsize >> 2) : 1;
571
572 /*
573 * Transfer data into vpd_data buffer one
574 * word at a time
575 */
576 lp1 = (uint32_t *)&mb->un.varDmp.resp_offset;
577 lp2 = (uint32_t *)&vpd_data[offset];
578
579 for (i = 0; i < wsize; i++) {
580 status = *lp1++;
581 *lp2++ = BE_SWAP32(status);
582 }
583
584 /* Increment total byte count saved */
585 offset += (wsize << 2);
586
587 /*
588 * Stop if less than a full transfer was
589 * received
590 */
591 if (wsize < DMP_VPD_DUMP_WCOUNT) {
592 break;
593 }
594
595 } else {
596 EMLXS_MSGF(EMLXS_CONTEXT,
597 &emlxs_init_debug_msg,
598 "No VPD acknowledgment. offset=%x",
599 offset);
600 break;
601 }
602 }
603
604 }
605
606 if (vpd_data[0]) {
607 (void) emlxs_parse_vpd(hba, (uint8_t *)vpd_data, offset);
608
609 /*
610 * If there is a VPD part number, and it does not
611 * match the current default HBA model info,
612 * replace the default data with an entry that
613 * does match.
614 *
615 * After emlxs_parse_vpd model holds the VPD value
616 * for V2 and part_num hold the value for PN. These
617 * 2 values are NOT necessarily the same.
618 */
619
620 rval = 0;
621 if ((vpd->model[0] != 0) &&
622 (strcmp(&vpd->model[0], hba->model_info.model) != 0)) {
623
624 /* First scan for a V2 match */
625
626 for (i = 1; i < emlxs_pci_model_count; i++) {
627 if (strcmp(&vpd->model[0],
628 emlxs_pci_model[i].model) == 0) {
629 bcopy(&emlxs_pci_model[i],
630 &hba->model_info,
631 sizeof (emlxs_model_t));
632 rval = 1;
633 break;
634 }
635 }
636 }
637
638 if (!rval && (vpd->part_num[0] != 0) &&
639 (strcmp(&vpd->part_num[0], hba->model_info.model) != 0)) {
640
641 /* Next scan for a PN match */
642
643 for (i = 1; i < emlxs_pci_model_count; i++) {
644 if (strcmp(&vpd->part_num[0],
645 emlxs_pci_model[i].model) == 0) {
646 bcopy(&emlxs_pci_model[i],
647 &hba->model_info,
648 sizeof (emlxs_model_t));
649 break;
650 }
651 }
652 }
653
654 /*
655 * Now lets update hba->model_info with the real
656 * VPD data, if any.
657 */
658
659 /*
660 * Replace the default model description with vpd data
661 */
662 if (vpd->model_desc[0] != 0) {
663 (void) strncpy(hba->model_info.model_desc,
664 vpd->model_desc,
665 (sizeof (hba->model_info.model_desc)-1));
666 }
667
668 /* Replace the default model with vpd data */
669 if (vpd->model[0] != 0) {
670 (void) strncpy(hba->model_info.model, vpd->model,
671 (sizeof (hba->model_info.model)-1));
672 }
673
674 /* Replace the default program types with vpd data */
675 if (vpd->prog_types[0] != 0) {
676 emlxs_parse_prog_types(hba, vpd->prog_types);
677 }
678 }
679
680 /*
681 * Since the adapter model may have changed with the vpd data
682 * lets double check if adapter is not supported
683 */
684 if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
685 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
686 "Unsupported adapter found. "
687 "Id:%d Device id:0x%x SSDID:0x%x Model:%s",
688 hba->model_info.id, hba->model_info.device_id,
689 hba->model_info.ssdid, hba->model_info.model);
690
691 rval = EIO;
692 goto failed;
693 }
694
695 /* Read the adapter's wakeup parms */
696 (void) emlxs_read_wakeup_parms(hba, &hba->wakeup_parms, 1);
697 emlxs_decode_version(hba->wakeup_parms.u0.boot_bios_wd[0],
698 vpd->boot_version, sizeof (vpd->boot_version));
699
700 /* Get fcode version property */
701 emlxs_get_fcode_version(hba);
702
703 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
704 "Firmware: kern=%08x stub=%08x sli1=%08x", vpd->postKernRev,
705 vpd->opFwRev, vpd->sli1FwRev);
706
707 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
708 "Firmware: sli2=%08x sli3=%08x sli4=%08x fl=%x", vpd->sli2FwRev,
709 vpd->sli3FwRev, vpd->sli4FwRev, vpd->feaLevelHigh);
710
711 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
712 "BIOS: boot=%s fcode=%s", vpd->boot_version, vpd->fcode_version);
713
714 /*
715 * If firmware checking is enabled and the adapter model indicates
716 * a firmware image, then perform firmware version check
717 */
718 hba->fw_flag = 0;
719 hba->fw_timer = 0;
720
721 if (((fw_check & 0x1) &&
722 (hba->model_info.flags & EMLXS_ORACLE_BRANDED) &&
723 hba->model_info.fwid) || ((fw_check & 0x2) &&
724 hba->model_info.fwid)) {
725 emlxs_firmware_t *fw;
726
727 /* Find firmware image indicated by adapter model */
728 fw = NULL;
729 for (i = 0; i < emlxs_fw_count; i++) {
730 if (emlxs_fw_table[i].id == hba->model_info.fwid) {
731 fw = &emlxs_fw_table[i];
732 break;
733 }
734 }
735
736 /*
737 * If the image was found, then verify current firmware
738 * versions of adapter
739 */
740 if (fw) {
741 if (!kern_update &&
742 ((fw->kern && (vpd->postKernRev != fw->kern)) ||
743 (fw->stub && (vpd->opFwRev != fw->stub)))) {
744
745 hba->fw_flag |= FW_UPDATE_NEEDED;
746
747 } else if ((fw->kern && (vpd->postKernRev !=
748 fw->kern)) ||
749 (fw->stub && (vpd->opFwRev != fw->stub)) ||
750 (fw->sli1 && (vpd->sli1FwRev != fw->sli1)) ||
751 (fw->sli2 && (vpd->sli2FwRev != fw->sli2)) ||
752 (fw->sli3 && (vpd->sli3FwRev != fw->sli3)) ||
753 (fw->sli4 && (vpd->sli4FwRev != fw->sli4))) {
754 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
755 "Firmware update needed. "
756 "Updating. id=%d fw=%d",
757 hba->model_info.id, hba->model_info.fwid);
758
759 #ifdef MODFW_SUPPORT
760 /*
761 * Load the firmware image now
762 * If MODFW_SUPPORT is not defined, the
763 * firmware image will already be defined
764 * in the emlxs_fw_table
765 */
766 emlxs_fw_load(hba, fw);
767 #endif /* MODFW_SUPPORT */
768
769 if (fw->image && fw->size) {
770 uint32_t rc;
771
772 rc = emlxs_fw_download(hba,
773 (char *)fw->image, fw->size, 0);
774 if ((rc != FC_SUCCESS) &&
775 (rc != EMLXS_REBOOT_REQUIRED)) {
776 EMLXS_MSGF(EMLXS_CONTEXT,
777 &emlxs_init_msg,
778 "Firmware update failed.");
779 hba->fw_flag |=
780 FW_UPDATE_NEEDED;
781 }
782 #ifdef MODFW_SUPPORT
783 /*
784 * Unload the firmware image from
785 * kernel memory
786 */
787 emlxs_fw_unload(hba, fw);
788 #endif /* MODFW_SUPPORT */
789
790 fw_check = 0;
791
792 goto reset;
793 }
794
795 hba->fw_flag |= FW_UPDATE_NEEDED;
796
797 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
798 "Firmware image unavailable.");
799 } else {
800 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
801 "Firmware update not needed.");
802 }
803 } else {
804 /* This should not happen */
805
806 /*
807 * This means either the adapter database is not
808 * correct or a firmware image is missing from the
809 * compile
810 */
811 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
812 "Firmware image unavailable. id=%d fw=%d",
813 hba->model_info.id, hba->model_info.fwid);
814 }
815 }
816
817 /*
818 * Add our interrupt routine to kernel's interrupt chain & enable it
819 * If MSI is enabled this will cause Solaris to program the MSI address
820 * and data registers in PCI config space
821 */
822 if (EMLXS_INTR_ADD(hba) != DDI_SUCCESS) {
823 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
824 "Unable to add interrupt(s).");
825
826 rval = EIO;
827 goto failed;
828 }
829
830 EMLXS_STATE_CHANGE(hba, FC_INIT_CFGPORT);
831
832 /* Reuse mbq from previous mbox */
833 bzero(mbq, sizeof (MAILBOXQ));
834
835 (void) emlxs_mb_config_port(hba, mbq, sli_mode, key);
836 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
837 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
838 "Unable to configure port. "
839 "Mailbox cmd=%x status=%x slimode=%d key=%x",
840 mb->mbxCommand, mb->mbxStatus, sli_mode, key);
841
842 for (sli_mode--; sli_mode > 0; sli_mode--) {
843 /* Check if sli_mode is supported by this adapter */
844 if (hba->model_info.sli_mask &
845 EMLXS_SLI_MASK(sli_mode)) {
846 sli_mode_mask = EMLXS_SLI_MASK(sli_mode);
847 break;
848 }
849 }
850
851 if (sli_mode) {
852 fw_check = 0;
853
854 goto reset;
855 }
856
857 hba->flag &= ~FC_SLIM2_MODE;
858
859 rval = EIO;
860 goto failed;
861 }
862
863 /* Check if SLI3 mode was achieved */
864 if (mb->un.varCfgPort.rMA &&
865 (mb->un.varCfgPort.sli_mode == EMLXS_HBA_SLI3_MODE)) {
866
867 if (mb->un.varCfgPort.vpi_max > 1) {
868 hba->flag |= FC_NPIV_ENABLED;
869
870 if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
871 hba->vpi_max =
872 min(mb->un.varCfgPort.vpi_max,
873 MAX_VPORTS - 1);
874 } else {
875 hba->vpi_max =
876 min(mb->un.varCfgPort.vpi_max,
877 MAX_VPORTS_LIMITED - 1);
878 }
879 }
880
881 #if (EMLXS_MODREV >= EMLXS_MODREV5)
882 hba->fca_tran->fca_num_npivports =
883 (cfg[CFG_NPIV_ENABLE].current) ? hba->vpi_max : 0;
884 #endif /* >= EMLXS_MODREV5 */
885
886 if (mb->un.varCfgPort.gerbm && mb->un.varCfgPort.max_hbq) {
887 hba->flag |= FC_HBQ_ENABLED;
888 }
889
890 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
891 "SLI3 mode: flag=%x vpi_max=%d", hba->flag, hba->vpi_max);
892 } else {
893 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
894 "SLI2 mode: flag=%x", hba->flag);
895 sli_mode = EMLXS_HBA_SLI2_MODE;
896 sli_mode_mask = EMLXS_SLI2_MASK;
897 hba->sli_mode = sli_mode;
898 #if (EMLXS_MODREV >= EMLXS_MODREV5)
899 hba->fca_tran->fca_num_npivports = 0;
900 #endif /* >= EMLXS_MODREV5 */
901
902 }
903
904 /* Get and save the current firmware version (based on sli_mode) */
905 emlxs_decode_firmware_rev(hba, vpd);
906
907 emlxs_pcix_mxr_update(hba, 0);
908
909 /* Reuse mbq from previous mbox */
910 bzero(mbq, sizeof (MAILBOXQ));
911
912 emlxs_mb_read_config(hba, mbq);
913 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
914 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
915 "Unable to read configuration. Mailbox cmd=%x status=%x",
916 mb->mbxCommand, mb->mbxStatus);
917
918 rval = EIO;
919 goto failed;
920 }
921
922 /* Save the link speed capabilities */
923 vpd->link_speed = (uint16_t)mb->un.varRdConfig.lmt;
924 emlxs_process_link_speed(hba);
925
926 /* Set the max node count */
927 if (cfg[CFG_NUM_NODES].current > 0) {
928 hba->max_nodes =
929 min(cfg[CFG_NUM_NODES].current,
930 mb->un.varRdConfig.max_rpi);
931 } else {
932 hba->max_nodes = mb->un.varRdConfig.max_rpi;
933 }
934
935 /* Set the io throttle */
936 hba->io_throttle = mb->un.varRdConfig.max_xri - IO_THROTTLE_RESERVE;
937
938 /* Set max_iotag */
939 if (cfg[CFG_NUM_IOTAGS].current) {
940 hba->max_iotag = (uint16_t)cfg[CFG_NUM_IOTAGS].current;
941 } else {
942 hba->max_iotag = mb->un.varRdConfig.max_xri;
943 }
944
945 /* Set out-of-range iotag base */
946 hba->fc_oor_iotag = hba->max_iotag;
947
948 /*
949 * Allocate some memory for buffers
950 */
951 if (emlxs_mem_alloc_buffer(hba) == 0) {
952 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
953 "Unable to allocate memory buffers.");
954
955 EMLXS_STATE_CHANGE(hba, FC_ERROR);
956 return (ENOMEM);
957 }
958
959 /*
960 * Setup and issue mailbox RUN BIU DIAG command Setup test buffers
961 */
962 if (((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF)) == 0) ||
963 ((mp1 = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF)) == 0)) {
964 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
965 "Unable to allocate diag buffers.");
966
967 rval = ENOMEM;
968 goto failed;
969 }
970
971 bcopy((caddr_t)&emlxs_diag_pattern[0], (caddr_t)mp->virt,
972 MEM_ELSBUF_SIZE);
973 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, MEM_ELSBUF_SIZE,
974 DDI_DMA_SYNC_FORDEV);
975
976 bzero(mp1->virt, MEM_ELSBUF_SIZE);
977 EMLXS_MPDATA_SYNC(mp1->dma_handle, 0, MEM_ELSBUF_SIZE,
978 DDI_DMA_SYNC_FORDEV);
979
980 /* Reuse mbq from previous mbox */
981 bzero(mbq, sizeof (MAILBOXQ));
982
983 (void) emlxs_mb_run_biu_diag(hba, mbq, mp->phys, mp1->phys);
984
985 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
986 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
987 "Unable to run BIU diag. Mailbox cmd=%x status=%x",
988 mb->mbxCommand, mb->mbxStatus);
989
990 rval = EIO;
991 goto failed;
992 }
993
994 EMLXS_MPDATA_SYNC(mp1->dma_handle, 0, MEM_ELSBUF_SIZE,
995 DDI_DMA_SYNC_FORKERNEL);
996
997 #ifdef FMA_SUPPORT
998 if (mp->dma_handle) {
999 if (emlxs_fm_check_dma_handle(hba, mp->dma_handle)
1000 != DDI_FM_OK) {
1001 EMLXS_MSGF(EMLXS_CONTEXT,
1002 &emlxs_invalid_dma_handle_msg,
1003 "sli3_online: hdl=%p",
1004 mp->dma_handle);
1005 rval = EIO;
1006 goto failed;
1007 }
1008 }
1009
1010 if (mp1->dma_handle) {
1011 if (emlxs_fm_check_dma_handle(hba, mp1->dma_handle)
1012 != DDI_FM_OK) {
1013 EMLXS_MSGF(EMLXS_CONTEXT,
1014 &emlxs_invalid_dma_handle_msg,
1015 "sli3_online: hdl=%p",
1016 mp1->dma_handle);
1017 rval = EIO;
1018 goto failed;
1019 }
1020 }
1021 #endif /* FMA_SUPPORT */
1022
1023 outptr = mp->virt;
1024 inptr = mp1->virt;
1025
1026 for (i = 0; i < MEM_ELSBUF_SIZE; i++) {
1027 if (*outptr++ != *inptr++) {
1028 outptr--;
1029 inptr--;
1030
1031 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1032 "BIU diagnostic failed. "
1033 "offset %x value %x should be %x.",
1034 i, (uint32_t)*inptr, (uint32_t)*outptr);
1035
1036 rval = EIO;
1037 goto failed;
1038 }
1039 }
1040
1041 /* Free the buffers since we were polling */
1042 emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1043 mp = NULL;
1044 emlxs_mem_put(hba, MEM_BUF, (void *)mp1);
1045 mp1 = NULL;
1046
1047 hba->channel_fcp = FC_FCP_RING;
1048 hba->channel_els = FC_ELS_RING;
1049 hba->channel_ip = FC_IP_RING;
1050 hba->channel_ct = FC_CT_RING;
1051 hba->sli.sli3.ring_count = MAX_RINGS;
1052
1053 hba->channel_tx_count = 0;
1054 hba->io_count = 0;
1055 hba->fc_iotag = 1;
1056
1057 for (i = 0; i < hba->chan_count; i++) {
1058 cp = &hba->chan[i];
1059
1060 /* 1 to 1 mapping between ring and channel */
1061 cp->iopath = (void *)&hba->sli.sli3.ring[i];
1062
1063 cp->hba = hba;
1064 cp->channelno = i;
1065 }
1066
1067 /*
1068 * Setup and issue mailbox CONFIGURE RING command
1069 */
1070 for (i = 0; i < (uint32_t)hba->sli.sli3.ring_count; i++) {
1071 /*
1072 * Initialize cmd/rsp ring pointers
1073 */
1074 rp = &hba->sli.sli3.ring[i];
1075
1076 /* 1 to 1 mapping between ring and channel */
1077 rp->channelp = &hba->chan[i];
1078
1079 rp->hba = hba;
1080 rp->ringno = (uint8_t)i;
1081
1082 rp->fc_cmdidx = 0;
1083 rp->fc_rspidx = 0;
1084 EMLXS_STATE_CHANGE(hba, FC_INIT_CFGRING);
1085
1086 /* Reuse mbq from previous mbox */
1087 bzero(mbq, sizeof (MAILBOXQ));
1088
1089 emlxs_mb_config_ring(hba, i, mbq);
1090 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1091 MBX_SUCCESS) {
1092 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1093 "Unable to configure ring. "
1094 "Mailbox cmd=%x status=%x",
1095 mb->mbxCommand, mb->mbxStatus);
1096
1097 rval = EIO;
1098 goto failed;
1099 }
1100 }
1101
1102 /*
1103 * Setup link timers
1104 */
1105 EMLXS_STATE_CHANGE(hba, FC_INIT_INITLINK);
1106
1107 /* Reuse mbq from previous mbox */
1108 bzero(mbq, sizeof (MAILBOXQ));
1109
1110 emlxs_mb_config_link(hba, mbq);
1111 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1112 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1113 "Unable to configure link. Mailbox cmd=%x status=%x",
1114 mb->mbxCommand, mb->mbxStatus);
1115
1116 rval = EIO;
1117 goto failed;
1118 }
1119
1120 #ifdef MAX_RRDY_SUPPORT
1121 /* Set MAX_RRDY if one is provided */
1122 if (cfg[CFG_MAX_RRDY].current) {
1123
1124 /* Reuse mbq from previous mbox */
1125 bzero(mbq, sizeof (MAILBOXQ));
1126
1127 emlxs_mb_set_var(hba, (MAILBOX *)mbq, 0x00060412,
1128 cfg[CFG_MAX_RRDY].current);
1129
1130 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1131 MBX_SUCCESS) {
1132 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1133 "MAX_RRDY: Unable to set. status=%x " \
1134 "value=%d",
1135 mb->mbxStatus, cfg[CFG_MAX_RRDY].current);
1136 } else {
1137 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1138 "MAX_RRDY: %d", cfg[CFG_MAX_RRDY].current);
1139 }
1140 }
1141 #endif /* MAX_RRDY_SUPPORT */
1142
1143 /* Reuse mbq from previous mbox */
1144 bzero(mbq, sizeof (MAILBOXQ));
1145
1146 /*
1147 * We need to get login parameters for NID
1148 */
1149 (void) emlxs_mb_read_sparam(hba, mbq);
1150 mp = (MATCHMAP *)mbq->bp;
1151 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1152 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1153 "Unable to read parameters. Mailbox cmd=%x status=%x",
1154 mb->mbxCommand, mb->mbxStatus);
1155
1156 rval = EIO;
1157 goto failed;
1158 }
1159
1160 /* Free the buffer since we were polling */
1161 emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1162 mp = NULL;
1163
1164 /* If no serial number in VPD data, then use the WWPN */
1165 if (vpd->serial_num[0] == 0) {
1166 outptr = (uint8_t *)&hba->wwpn.IEEE[0];
1167 for (i = 0; i < 12; i++) {
1168 status = *outptr++;
1169 j = ((status & 0xf0) >> 4);
1170 if (j <= 9) {
1171 vpd->serial_num[i] =
1172 (char)((uint8_t)'0' + (uint8_t)j);
1173 } else {
1174 vpd->serial_num[i] =
1175 (char)((uint8_t)'A' + (uint8_t)(j - 10));
1176 }
1177
1178 i++;
1179 j = (status & 0xf);
1180 if (j <= 9) {
1181 vpd->serial_num[i] =
1182 (char)((uint8_t)'0' + (uint8_t)j);
1183 } else {
1184 vpd->serial_num[i] =
1185 (char)((uint8_t)'A' + (uint8_t)(j - 10));
1186 }
1187 }
1188
1189 /*
1190 * Set port number and port index to zero
1191 * The WWN's are unique to each port and therefore port_num
1192 * must equal zero. This effects the hba_fru_details structure
1193 * in fca_bind_port()
1194 */
1195 vpd->port_num[0] = 0;
1196 vpd->port_index = 0;
1197 }
1198
1199 /*
1200 * Make first attempt to set a port index
1201 * Check if this is a multifunction adapter
1202 */
1203 if ((vpd->port_index == (uint32_t)-1) &&
1204 (hba->model_info.chip >= EMLXS_THOR_CHIP)) {
1205 char *buffer;
1206 int32_t i;
1207
1208 /*
1209 * The port address looks like this:
1210 * 1 - for port index 0
1211 * 1,1 - for port index 1
1212 * 1,2 - for port index 2
1213 */
1214 buffer = ddi_get_name_addr(hba->dip);
1215
1216 if (buffer) {
1217 vpd->port_index = 0;
1218
1219 /* Reverse scan for a comma */
1220 for (i = strlen(buffer) - 1; i > 0; i--) {
1221 if (buffer[i] == ',') {
1222 /* Comma found - set index now */
1223 vpd->port_index =
1224 emlxs_strtol(&buffer[i + 1], 10);
1225 break;
1226 }
1227 }
1228 }
1229 }
1230
1231 /* Make final attempt to set a port index */
1232 if (vpd->port_index == (uint32_t)-1) {
1233 dev_info_t *p_dip;
1234 dev_info_t *c_dip;
1235
1236 p_dip = ddi_get_parent(hba->dip);
1237 c_dip = ddi_get_child(p_dip);
1238
1239 vpd->port_index = 0;
1240 while (c_dip && (hba->dip != c_dip)) {
1241 c_dip = ddi_get_next_sibling(c_dip);
1242 vpd->port_index++;
1243 }
1244 }
1245
1246 if (vpd->port_num[0] == 0) {
1247 if (hba->model_info.channels == EMLXS_MULTI_CHANNEL) {
1248 (void) snprintf(vpd->port_num,
1249 (sizeof (vpd->port_num)-1),
1250 "%d", vpd->port_index);
1251 }
1252 }
1253
1254 if (vpd->id[0] == 0) {
1255 (void) strncpy(vpd->id, hba->model_info.model_desc,
1256 (sizeof (vpd->id)-1));
1257 }
1258
1259 if (vpd->manufacturer[0] == 0) {
1260 (void) strncpy(vpd->manufacturer, hba->model_info.manufacturer,
1261 (sizeof (vpd->manufacturer)-1));
1262 }
1263
1264 if (vpd->part_num[0] == 0) {
1265 (void) strncpy(vpd->part_num, hba->model_info.model,
1266 (sizeof (vpd->part_num)-1));
1267 }
1268
1269 if (vpd->model_desc[0] == 0) {
1270 (void) strncpy(vpd->model_desc, hba->model_info.model_desc,
1271 (sizeof (vpd->model_desc)-1));
1272 }
1273
1274 if (vpd->model[0] == 0) {
1275 (void) strncpy(vpd->model, hba->model_info.model,
1276 (sizeof (vpd->model)-1));
1277 }
1278
1279 if (vpd->prog_types[0] == 0) {
1280 emlxs_build_prog_types(hba, vpd);
1281 }
1282
1283 /* Create the symbolic names */
1284 (void) snprintf(hba->snn, (sizeof (hba->snn)-1),
1285 "%s %s FV%s DV%s %s",
1286 hba->model_info.manufacturer, hba->model_info.model,
1287 hba->vpd.fw_version, emlxs_version,
1288 (char *)utsname.nodename);
1289
1290 (void) snprintf(hba->spn, (sizeof (hba->spn)-1),
1291 "%s PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1292 hba->model_info.manufacturer,
1293 hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb,
1294 hba->wwpn.IEEE[0], hba->wwpn.IEEE[1], hba->wwpn.IEEE[2],
1295 hba->wwpn.IEEE[3], hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1296
1297 if (cfg[CFG_NETWORK_ON].current) {
1298 if ((hba->sparam.portName.nameType != NAME_IEEE) ||
1299 (hba->sparam.portName.IEEEextMsn != 0) ||
1300 (hba->sparam.portName.IEEEextLsb != 0)) {
1301
1302 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
1303 "WWPN doesn't conform to IP profile: "
1304 "nameType=%x. Disabling networking.",
1305 hba->sparam.portName.nameType);
1306
1307 cfg[CFG_NETWORK_ON].current = 0;
1308 }
1309 }
1310
1311 if (cfg[CFG_NETWORK_ON].current) {
1312 /* Reuse mbq from previous mbox */
1313 bzero(mbq, sizeof (MAILBOXQ));
1314
1315 /* Issue CONFIG FARP */
1316 emlxs_mb_config_farp(hba, mbq);
1317 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1318 MBX_SUCCESS) {
1319 /*
1320 * Let it go through even if failed.
1321 */
1322 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
1323 "Unable to configure FARP. "
1324 "Mailbox cmd=%x status=%x",
1325 mb->mbxCommand, mb->mbxStatus);
1326 }
1327 }
1328 #ifdef MSI_SUPPORT
1329 /* Configure MSI map if required */
1330 if (hba->intr_count > 1) {
1331
1332 if (hba->intr_type == DDI_INTR_TYPE_MSIX) {
1333 /* always start from 0 */
1334 hba->last_msiid = 0;
1335 }
1336
1337 /* Reuse mbq from previous mbox */
1338 bzero(mbq, sizeof (MAILBOXQ));
1339
1340 emlxs_mb_config_msix(hba, mbq, hba->intr_map, hba->intr_count);
1341
1342 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) ==
1343 MBX_SUCCESS) {
1344 goto msi_configured;
1345 }
1346
1347 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1348 "Unable to config MSIX. Mailbox cmd=0x%x status=0x%x",
1349 mb->mbxCommand, mb->mbxStatus);
1350
1351 /* Reuse mbq from previous mbox */
1352 bzero(mbq, sizeof (MAILBOXQ));
1353
1354 emlxs_mb_config_msi(hba, mbq, hba->intr_map, hba->intr_count);
1355
1356 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) ==
1357 MBX_SUCCESS) {
1358 goto msi_configured;
1359 }
1360
1361
1362 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1363 "Unable to config MSI. Mailbox cmd=0x%x status=0x%x",
1364 mb->mbxCommand, mb->mbxStatus);
1365
1366 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1367 "Attempting single interrupt mode...");
1368
1369 /* First cleanup old interrupts */
1370 (void) emlxs_msi_remove(hba);
1371 (void) emlxs_msi_uninit(hba);
1372
1373 status = emlxs_msi_init(hba, 1);
1374
1375 if (status != DDI_SUCCESS) {
1376 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1377 "Unable to initialize interrupt. status=%d",
1378 status);
1379
1380 rval = EIO;
1381 goto failed;
1382 }
1383
1384 /*
1385 * Reset adapter - The adapter needs to be reset because
1386 * the bus cannot handle the MSI change without handshaking
1387 * with the adapter again
1388 */
1389
1390 (void) emlxs_mem_free_buffer(hba);
1391 fw_check = 0;
1392 goto reset;
1393 }
1394
1395 msi_configured:
1396
1397
1398 if ((hba->intr_count >= 1) &&
1399 (hba->sli_mode == EMLXS_HBA_SLI3_MODE)) {
1400 /* intr_count is a sequence of msi id */
1401 /* Setup msi2chan[msi_id] */
1402 for (i = 0; i < hba->intr_count; i ++) {
1403 hba->msi2chan[i] = i;
1404 if (i >= hba->chan_count)
1405 hba->msi2chan[i] = (i - hba->chan_count);
1406 }
1407 }
1408 #endif /* MSI_SUPPORT */
1409
1410 /*
1411 * We always disable the firmware traffic cop feature
1412 */
1413 if (emlxs_disable_traffic_cop) {
1414 /* Reuse mbq from previous mbox */
1415 bzero(mbq, sizeof (MAILBOXQ));
1416
1417 emlxs_disable_tc(hba, mbq);
1418 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1419 MBX_SUCCESS) {
1420 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1421 "Unable to disable traffic cop. "
1422 "Mailbox cmd=%x status=%x",
1423 mb->mbxCommand, mb->mbxStatus);
1424
1425 rval = EIO;
1426 goto failed;
1427 }
1428 }
1429
1430
1431 /* Reuse mbq from previous mbox */
1432 bzero(mbq, sizeof (MAILBOXQ));
1433
1434 /* Register for async events */
1435 emlxs_mb_async_event(hba, mbq);
1436 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1437 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1438 "Async events disabled. Mailbox status=%x",
1439 mb->mbxStatus);
1440 } else {
1441 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1442 "Async events enabled.");
1443 hba->flag |= FC_ASYNC_EVENTS;
1444 }
1445
1446 EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN);
1447
1448 emlxs_sli3_enable_intr(hba);
1449
1450 if (hba->flag & FC_HBQ_ENABLED) {
1451 if (port->flag & EMLXS_TGT_ENABLED) {
1452 if (emlxs_hbq_setup(hba, EMLXS_FCT_HBQ_ID)) {
1453 EMLXS_MSGF(EMLXS_CONTEXT,
1454 &emlxs_init_failed_msg,
1455 "Unable to setup FCT HBQ.");
1456
1457 rval = ENOMEM;
1458
1459 #ifdef SFCT_SUPPORT
1460 /* Check if we can fall back to just */
1461 /* initiator mode */
1462 if ((hba->pm_state == EMLXS_PM_IN_ATTACH) &&
1463 (port->flag & EMLXS_INI_ENABLED) &&
1464 (cfg[CFG_DTM_ENABLE].current == 1) &&
1465 (cfg[CFG_TARGET_MODE].current == 0)) {
1466
1467 cfg[CFG_DTM_ENABLE].current = 0;
1468
1469 EMLXS_MSGF(EMLXS_CONTEXT,
1470 &emlxs_init_failed_msg,
1471 "Disabling dynamic target mode. "
1472 "Enabling initiator mode only.");
1473
1474 /* This will trigger the driver to */
1475 /* reattach */
1476 rval = EAGAIN;
1477 }
1478 #endif /* SFCT_SUPPORT */
1479 goto failed;
1480 }
1481 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1482 "FCT Ring: Posted %d buffers.", MEM_FCTBUF_COUNT);
1483 }
1484
1485 if (cfg[CFG_NETWORK_ON].current) {
1486 if (emlxs_hbq_setup(hba, EMLXS_IP_HBQ_ID)) {
1487 EMLXS_MSGF(EMLXS_CONTEXT,
1488 &emlxs_init_failed_msg,
1489 "Unable to setup IP HBQ.");
1490
1491 rval = ENOMEM;
1492 goto failed;
1493 }
1494 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1495 "IP Ring: Posted %d buffers.", MEM_IPBUF_COUNT);
1496 }
1497
1498 if (emlxs_hbq_setup(hba, EMLXS_ELS_HBQ_ID)) {
1499 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1500 "Unable to setup ELS HBQ.");
1501 rval = ENOMEM;
1502 goto failed;
1503 }
1504 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1505 "ELS Ring: Posted %d buffers.", MEM_ELSBUF_COUNT);
1506
1507 if (emlxs_hbq_setup(hba, EMLXS_CT_HBQ_ID)) {
1508 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1509 "Unable to setup CT HBQ.");
1510
1511 rval = ENOMEM;
1512 goto failed;
1513 }
1514 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1515 "CT Ring: Posted %d buffers.", MEM_CTBUF_COUNT);
1516 } else {
1517 if (port->flag & EMLXS_TGT_ENABLED) {
1518 /* Post the FCT unsol buffers */
1519 rp = &hba->sli.sli3.ring[FC_FCT_RING];
1520 for (j = 0; j < MEM_FCTBUF_COUNT; j += 2) {
1521 (void) emlxs_post_buffer(hba, rp, 2);
1522 }
1523 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1524 "FCP Ring: Posted %d buffers.", MEM_FCTBUF_COUNT);
1525 }
1526
1527 if (cfg[CFG_NETWORK_ON].current) {
1528 /* Post the IP unsol buffers */
1529 rp = &hba->sli.sli3.ring[FC_IP_RING];
1530 for (j = 0; j < MEM_IPBUF_COUNT; j += 2) {
1531 (void) emlxs_post_buffer(hba, rp, 2);
1532 }
1533 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1534 "IP Ring: Posted %d buffers.", MEM_IPBUF_COUNT);
1535 }
1536
1537 /* Post the ELS unsol buffers */
1538 rp = &hba->sli.sli3.ring[FC_ELS_RING];
1539 for (j = 0; j < MEM_ELSBUF_COUNT; j += 2) {
1540 (void) emlxs_post_buffer(hba, rp, 2);
1541 }
1542 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1543 "ELS Ring: Posted %d buffers.", MEM_ELSBUF_COUNT);
1544
1545
1546 /* Post the CT unsol buffers */
1547 rp = &hba->sli.sli3.ring[FC_CT_RING];
1548 for (j = 0; j < MEM_CTBUF_COUNT; j += 2) {
1549 (void) emlxs_post_buffer(hba, rp, 2);
1550 }
1551 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1552 "CT Ring: Posted %d buffers.", MEM_CTBUF_COUNT);
1553 }
1554
1555 (void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1556
1557 /* Check persist-linkdown */
1558 if (cfg[CFG_PERSIST_LINKDOWN].current) {
1559 EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN_PERSIST);
1560 return (0);
1561 }
1562
1563 #ifdef SFCT_SUPPORT
1564 if ((port->mode == MODE_TARGET) &&
1565 !(port->fct_flags & FCT_STATE_PORT_ONLINE)) {
1566 emlxs_enable_latt(hba);
1567 return (0);
1568 }
1569 #endif /* SFCT_SUPPORT */
1570
1571 /*
1572 * Setup and issue mailbox INITIALIZE LINK command
1573 * At this point, the interrupt will be generated by the HW
1574 */
1575 mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX);
1576 if (mbq == NULL) {
1577 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1578 "Unable to allocate mailbox buffer.");
1579
1580 rval = EIO;
1581 goto failed;
1582 }
1583 mb = (MAILBOX *)mbq;
1584
1585 emlxs_mb_init_link(hba, mbq, cfg[CFG_TOPOLOGY].current,
1586 cfg[CFG_LINK_SPEED].current);
1587
1588 rval = emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
1589 if ((rval != MBX_SUCCESS) && (rval != MBX_BUSY)) {
1590 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1591 "Unable to initialize link. " \
1592 "Mailbox cmd=%x status=%x",
1593 mb->mbxCommand, mb->mbxStatus);
1594
1595 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
1596 mbq = NULL;
1597 rval = EIO;
1598 goto failed;
1599 }
1600
1601 /*
1602 * Enable link attention interrupt
1603 */
1604 emlxs_enable_latt(hba);
1605
1606 /* Wait for link to come up */
1607 i = cfg[CFG_LINKUP_DELAY].current;
1608 while (i && (hba->state < FC_LINK_UP)) {
1609 /* Check for hardware error */
1610 if (hba->state == FC_ERROR) {
1611 EMLXS_MSGF(EMLXS_CONTEXT,
1612 &emlxs_init_failed_msg,
1613 "Adapter error.");
1614
1615 mbq = NULL;
1616 rval = EIO;
1617 goto failed;
1618 }
1619
1620 BUSYWAIT_MS(1000);
1621 i--;
1622 }
1623
1624 /*
1625 * The leadvile driver will now handle the FLOGI at the driver level
1626 */
1627
1628 return (0);
1629
1630 failed:
1631
1632 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1633
1634 if (hba->intr_flags & EMLXS_MSI_ADDED) {
1635 (void) EMLXS_INTR_REMOVE(hba);
1636 }
1637
1638 if (mp) {
1639 emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1640 mp = NULL;
1641 }
1642
1643 if (mp1) {
1644 emlxs_mem_put(hba, MEM_BUF, (void *)mp1);
1645 mp1 = NULL;
1646 }
1647
1648 (void) emlxs_mem_free_buffer(hba);
1649
1650 if (mbq) {
1651 (void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1652 mbq = NULL;
1653 mb = NULL;
1654 }
1655
1656 if (rval == 0) {
1657 rval = EIO;
1658 }
1659
1660 return (rval);
1661
1662 } /* emlxs_sli3_online() */
1663
1664
1665 /*ARGSUSED*/
1666 static void
1667 emlxs_sli3_offline(emlxs_hba_t *hba, uint32_t reset_requested)
1668 {
1669 /* Reverse emlxs_sli3_online */
1670
1671 /* Kill the adapter */
1672 emlxs_sli3_hba_kill(hba);
1673
1674 /* Free driver shared memory */
1675 (void) emlxs_mem_free_buffer(hba);
1676
1677 } /* emlxs_sli3_offline() */
1678
1679
1680 static int
1681 emlxs_sli3_map_hdw(emlxs_hba_t *hba)
1682 {
1683 emlxs_port_t *port = &PPORT;
1684 dev_info_t *dip;
1685 ddi_device_acc_attr_t dev_attr;
1686 int status;
1687
1688 dip = (dev_info_t *)hba->dip;
1689 dev_attr = emlxs_dev_acc_attr;
1690
1691 if (hba->bus_type == SBUS_FC) {
1692
1693 if (hba->sli.sli3.slim_acc_handle == 0) {
1694 status = ddi_regs_map_setup(dip,
1695 SBUS_DFLY_SLIM_RINDEX,
1696 (caddr_t *)&hba->sli.sli3.slim_addr,
1697 0, 0, &dev_attr, &hba->sli.sli3.slim_acc_handle);
1698 if (status != DDI_SUCCESS) {
1699 EMLXS_MSGF(EMLXS_CONTEXT,
1700 &emlxs_attach_failed_msg,
1701 "(SBUS) ddi_regs_map_setup SLIM failed. "
1702 "status=%x", status);
1703 goto failed;
1704 }
1705 }
1706 if (hba->sli.sli3.csr_acc_handle == 0) {
1707 status = ddi_regs_map_setup(dip,
1708 SBUS_DFLY_CSR_RINDEX,
1709 (caddr_t *)&hba->sli.sli3.csr_addr,
1710 0, 0, &dev_attr, &hba->sli.sli3.csr_acc_handle);
1711 if (status != DDI_SUCCESS) {
1712 EMLXS_MSGF(EMLXS_CONTEXT,
1713 &emlxs_attach_failed_msg,
1714 "(SBUS) ddi_regs_map_setup DFLY CSR "
1715 "failed. status=%x", status);
1716 goto failed;
1717 }
1718 }
1719 if (hba->sli.sli3.sbus_flash_acc_handle == 0) {
1720 status = ddi_regs_map_setup(dip, SBUS_FLASH_RDWR,
1721 (caddr_t *)&hba->sli.sli3.sbus_flash_addr, 0, 0,
1722 &dev_attr, &hba->sli.sli3.sbus_flash_acc_handle);
1723 if (status != DDI_SUCCESS) {
1724 EMLXS_MSGF(EMLXS_CONTEXT,
1725 &emlxs_attach_failed_msg,
1726 "(SBUS) ddi_regs_map_setup Fcode Flash "
1727 "failed. status=%x", status);
1728 goto failed;
1729 }
1730 }
1731 if (hba->sli.sli3.sbus_core_acc_handle == 0) {
1732 status = ddi_regs_map_setup(dip, SBUS_TITAN_CORE_RINDEX,
1733 (caddr_t *)&hba->sli.sli3.sbus_core_addr, 0, 0,
1734 &dev_attr, &hba->sli.sli3.sbus_core_acc_handle);
1735 if (status != DDI_SUCCESS) {
1736 EMLXS_MSGF(EMLXS_CONTEXT,
1737 &emlxs_attach_failed_msg,
1738 "(SBUS) ddi_regs_map_setup TITAN CORE "
1739 "failed. status=%x", status);
1740 goto failed;
1741 }
1742 }
1743
1744 if (hba->sli.sli3.sbus_csr_handle == 0) {
1745 status = ddi_regs_map_setup(dip, SBUS_TITAN_CSR_RINDEX,
1746 (caddr_t *)&hba->sli.sli3.sbus_csr_addr,
1747 0, 0, &dev_attr, &hba->sli.sli3.sbus_csr_handle);
1748 if (status != DDI_SUCCESS) {
1749 EMLXS_MSGF(EMLXS_CONTEXT,
1750 &emlxs_attach_failed_msg,
1751 "(SBUS) ddi_regs_map_setup TITAN CSR "
1752 "failed. status=%x", status);
1753 goto failed;
1754 }
1755 }
1756 } else { /* ****** PCI ****** */
1757
1758 if (hba->sli.sli3.slim_acc_handle == 0) {
1759 status = ddi_regs_map_setup(dip, PCI_SLIM_RINDEX,
1760 (caddr_t *)&hba->sli.sli3.slim_addr,
1761 0, 0, &dev_attr, &hba->sli.sli3.slim_acc_handle);
1762 if (status != DDI_SUCCESS) {
1763 EMLXS_MSGF(EMLXS_CONTEXT,
1764 &emlxs_attach_failed_msg,
1765 "(PCI) ddi_regs_map_setup SLIM failed. "
1766 "stat=%d mem=%p attr=%p hdl=%p",
1767 status, &hba->sli.sli3.slim_addr, &dev_attr,
1768 &hba->sli.sli3.slim_acc_handle);
1769 goto failed;
1770 }
1771 }
1772
1773 /*
1774 * Map in control registers, using memory-mapped version of
1775 * the registers rather than the I/O space-mapped registers.
1776 */
1777 if (hba->sli.sli3.csr_acc_handle == 0) {
1778 status = ddi_regs_map_setup(dip, PCI_CSR_RINDEX,
1779 (caddr_t *)&hba->sli.sli3.csr_addr,
1780 0, 0, &dev_attr, &hba->sli.sli3.csr_acc_handle);
1781 if (status != DDI_SUCCESS) {
1782 EMLXS_MSGF(EMLXS_CONTEXT,
1783 &emlxs_attach_failed_msg,
1784 "ddi_regs_map_setup CSR failed. status=%x",
1785 status);
1786 goto failed;
1787 }
1788 }
1789 }
1790
1791 if (hba->sli.sli3.slim2.virt == 0) {
1792 MBUF_INFO *buf_info;
1793 MBUF_INFO bufinfo;
1794
1795 buf_info = &bufinfo;
1796
1797 bzero(buf_info, sizeof (MBUF_INFO));
1798 buf_info->size = SLI_SLIM2_SIZE;
1799 buf_info->flags =
1800 FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
1801 buf_info->align = ddi_ptob(dip, 1L);
1802
1803 (void) emlxs_mem_alloc(hba, buf_info);
1804
1805 if (buf_info->virt == NULL) {
1806 goto failed;
1807 }
1808
1809 hba->sli.sli3.slim2.virt = buf_info->virt;
1810 hba->sli.sli3.slim2.phys = buf_info->phys;
1811 hba->sli.sli3.slim2.size = SLI_SLIM2_SIZE;
1812 hba->sli.sli3.slim2.data_handle = buf_info->data_handle;
1813 hba->sli.sli3.slim2.dma_handle = buf_info->dma_handle;
1814 bzero((char *)hba->sli.sli3.slim2.virt, SLI_SLIM2_SIZE);
1815 }
1816
1817 /* offset from beginning of register space */
1818 hba->sli.sli3.ha_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1819 (sizeof (uint32_t) * HA_REG_OFFSET));
1820 hba->sli.sli3.ca_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1821 (sizeof (uint32_t) * CA_REG_OFFSET));
1822 hba->sli.sli3.hs_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1823 (sizeof (uint32_t) * HS_REG_OFFSET));
1824 hba->sli.sli3.hc_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1825 (sizeof (uint32_t) * HC_REG_OFFSET));
1826 hba->sli.sli3.bc_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1827 (sizeof (uint32_t) * BC_REG_OFFSET));
1828
1829 if (hba->bus_type == SBUS_FC) {
1830 /* offset from beginning of register space */
1831 /* for TITAN registers */
1832 hba->sli.sli3.shc_reg_addr =
1833 (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1834 (sizeof (uint32_t) * SBUS_CTRL_REG_OFFSET));
1835 hba->sli.sli3.shs_reg_addr =
1836 (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1837 (sizeof (uint32_t) * SBUS_STAT_REG_OFFSET));
1838 hba->sli.sli3.shu_reg_addr =
1839 (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1840 (sizeof (uint32_t) * SBUS_UPDATE_REG_OFFSET));
1841 }
1842 hba->chan_count = MAX_RINGS;
1843
1844 return (0);
1845
1846 failed:
1847
1848 emlxs_sli3_unmap_hdw(hba);
1849 return (ENOMEM);
1850
1851 } /* emlxs_sli3_map_hdw() */
1852
1853
1854 static void
1855 emlxs_sli3_unmap_hdw(emlxs_hba_t *hba)
1856 {
1857 MBUF_INFO bufinfo;
1858 MBUF_INFO *buf_info = &bufinfo;
1859
1860 if (hba->sli.sli3.csr_acc_handle) {
1861 ddi_regs_map_free(&hba->sli.sli3.csr_acc_handle);
1862 hba->sli.sli3.csr_acc_handle = 0;
1863 }
1864
1865 if (hba->sli.sli3.slim_acc_handle) {
1866 ddi_regs_map_free(&hba->sli.sli3.slim_acc_handle);
1867 hba->sli.sli3.slim_acc_handle = 0;
1868 }
1869
1870 if (hba->sli.sli3.sbus_flash_acc_handle) {
1871 ddi_regs_map_free(&hba->sli.sli3.sbus_flash_acc_handle);
1872 hba->sli.sli3.sbus_flash_acc_handle = 0;
1873 }
1874
1875 if (hba->sli.sli3.sbus_core_acc_handle) {
1876 ddi_regs_map_free(&hba->sli.sli3.sbus_core_acc_handle);
1877 hba->sli.sli3.sbus_core_acc_handle = 0;
1878 }
1879
1880 if (hba->sli.sli3.sbus_csr_handle) {
1881 ddi_regs_map_free(&hba->sli.sli3.sbus_csr_handle);
1882 hba->sli.sli3.sbus_csr_handle = 0;
1883 }
1884
1885 if (hba->sli.sli3.slim2.virt) {
1886 bzero(buf_info, sizeof (MBUF_INFO));
1887
1888 if (hba->sli.sli3.slim2.phys) {
1889 buf_info->phys = hba->sli.sli3.slim2.phys;
1890 buf_info->data_handle = hba->sli.sli3.slim2.data_handle;
1891 buf_info->dma_handle = hba->sli.sli3.slim2.dma_handle;
1892 buf_info->flags = FC_MBUF_DMA;
1893 }
1894
1895 buf_info->virt = hba->sli.sli3.slim2.virt;
1896 buf_info->size = hba->sli.sli3.slim2.size;
1897 emlxs_mem_free(hba, buf_info);
1898
1899 hba->sli.sli3.slim2.virt = NULL;
1900 }
1901
1902
1903 return;
1904
1905 } /* emlxs_sli3_unmap_hdw() */
1906
1907
1908 static uint32_t
1909 emlxs_sli3_hba_init(emlxs_hba_t *hba)
1910 {
1911 emlxs_port_t *port = &PPORT;
1912 emlxs_port_t *vport;
1913 emlxs_config_t *cfg;
1914 uint16_t i;
1915 VPIobj_t *vpip;
1916
1917 cfg = &CFG;
1918 i = 0;
1919
1920 /* Restart the adapter */
1921 if (emlxs_sli3_hba_reset(hba, 1, 0, 0)) {
1922 return (1);
1923 }
1924
1925 hba->channel_fcp = FC_FCP_RING;
1926 hba->channel_els = FC_ELS_RING;
1927 hba->channel_ip = FC_IP_RING;
1928 hba->channel_ct = FC_CT_RING;
1929 hba->chan_count = MAX_RINGS;
1930 hba->sli.sli3.ring_count = MAX_RINGS;
1931
1932 /*
1933 * WARNING: There is a max of 6 ring masks allowed
1934 */
1935 /* RING 0 - FCP */
1936 if (port->flag & EMLXS_TGT_ENABLED) {
1937 hba->sli.sli3.ring_masks[FC_FCP_RING] = 1;
1938 hba->sli.sli3.ring_rval[i] = FC_FCP_CMND;
1939 hba->sli.sli3.ring_rmask[i] = 0;
1940 hba->sli.sli3.ring_tval[i] = FC_TYPE_SCSI_FCP;
1941 hba->sli.sli3.ring_tmask[i++] = 0xFF;
1942 } else {
1943 hba->sli.sli3.ring_masks[FC_FCP_RING] = 0;
1944 }
1945
1946 hba->sli.sli3.ring[FC_FCP_RING].fc_numCiocb = SLIM_IOCB_CMD_R0_ENTRIES;
1947 hba->sli.sli3.ring[FC_FCP_RING].fc_numRiocb = SLIM_IOCB_RSP_R0_ENTRIES;
1948
1949 /* RING 1 - IP */
1950 if (cfg[CFG_NETWORK_ON].current) {
1951 hba->sli.sli3.ring_masks[FC_IP_RING] = 1;
1952 hba->sli.sli3.ring_rval[i] = FC_UNSOL_DATA; /* Unsol Data */
1953 hba->sli.sli3.ring_rmask[i] = 0xFF;
1954 hba->sli.sli3.ring_tval[i] = FC_TYPE_IS8802_SNAP; /* LLC/SNAP */
1955 hba->sli.sli3.ring_tmask[i++] = 0xFF;
1956 } else {
1957 hba->sli.sli3.ring_masks[FC_IP_RING] = 0;
1958 }
1959
1960 hba->sli.sli3.ring[FC_IP_RING].fc_numCiocb = SLIM_IOCB_CMD_R1_ENTRIES;
1961 hba->sli.sli3.ring[FC_IP_RING].fc_numRiocb = SLIM_IOCB_RSP_R1_ENTRIES;
1962
1963 /* RING 2 - ELS */
1964 hba->sli.sli3.ring_masks[FC_ELS_RING] = 1;
1965 hba->sli.sli3.ring_rval[i] = FC_ELS_REQ; /* ELS request/rsp */
1966 hba->sli.sli3.ring_rmask[i] = 0xFE;
1967 hba->sli.sli3.ring_tval[i] = FC_TYPE_EXTENDED_LS; /* ELS */
1968 hba->sli.sli3.ring_tmask[i++] = 0xFF;
1969
1970 hba->sli.sli3.ring[FC_ELS_RING].fc_numCiocb = SLIM_IOCB_CMD_R2_ENTRIES;
1971 hba->sli.sli3.ring[FC_ELS_RING].fc_numRiocb = SLIM_IOCB_RSP_R2_ENTRIES;
1972
1973 /* RING 3 - CT */
1974 hba->sli.sli3.ring_masks[FC_CT_RING] = 1;
1975 hba->sli.sli3.ring_rval[i] = FC_UNSOL_CTL; /* CT request/rsp */
1976 hba->sli.sli3.ring_rmask[i] = 0xFE;
1977 hba->sli.sli3.ring_tval[i] = FC_TYPE_FC_SERVICES; /* CT */
1978 hba->sli.sli3.ring_tmask[i++] = 0xFF;
1979
1980 hba->sli.sli3.ring[FC_CT_RING].fc_numCiocb = SLIM_IOCB_CMD_R3_ENTRIES;
1981 hba->sli.sli3.ring[FC_CT_RING].fc_numRiocb = SLIM_IOCB_RSP_R3_ENTRIES;
1982
1983 if (i > 6) {
1984 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
1985 "hba_init: Too many ring masks defined. cnt=%d", i);
1986 return (1);
1987 }
1988
1989 /* Initialize all the port objects */
1990 hba->vpi_max = 0;
1991 for (i = 0; i < MAX_VPORTS; i++) {
1992 vport = &VPORT(i);
1993 vport->hba = hba;
1994 vport->vpi = i;
1995
1996 vpip = &vport->VPIobj;
1997 vpip->index = i;
1998 vpip->VPI = i;
1999 vpip->port = vport;
2000 vpip->state = VPI_STATE_OFFLINE;
2001 vport->vpip = vpip;
2002 }
2003
2004 /*
2005 * Initialize the max_node count to a default value if needed
2006 * This determines how many node objects we preallocate in the pool
2007 * The actual max_nodes will be set later based on adapter info
2008 */
2009 if (hba->max_nodes == 0) {
2010 if (cfg[CFG_NUM_NODES].current > 0) {
2011 hba->max_nodes = cfg[CFG_NUM_NODES].current;
2012 } else if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
2013 hba->max_nodes = 4096;
2014 } else {
2015 hba->max_nodes = 512;
2016 }
2017 }
2018
2019 return (0);
2020
2021 } /* emlxs_sli3_hba_init() */
2022
2023
2024 /*
2025 * 0: quiesce indicates the call is not from quiesce routine.
2026 * 1: quiesce indicates the call is from quiesce routine.
2027 */
2028 static uint32_t
2029 emlxs_sli3_hba_reset(emlxs_hba_t *hba, uint32_t restart, uint32_t skip_post,
2030 uint32_t quiesce)
2031 {
2032 emlxs_port_t *port = &PPORT;
2033 MAILBOX *swpmb;
2034 MAILBOX *mb;
2035 uint32_t word0;
2036 uint16_t cfg_value;
2037 uint32_t status = 0;
2038 uint32_t status1;
2039 uint32_t status2;
2040 uint32_t i;
2041 uint32_t ready;
2042 emlxs_port_t *vport;
2043 RING *rp;
2044 emlxs_config_t *cfg = &CFG;
2045
2046 if (!cfg[CFG_RESET_ENABLE].current) {
2047 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2048 "Adapter reset disabled.");
2049 EMLXS_STATE_CHANGE(hba, FC_ERROR);
2050
2051 return (1);
2052 }
2053
2054 /* Kill the adapter first */
2055 if (quiesce == 0) {
2056 emlxs_sli3_hba_kill(hba);
2057 } else {
2058 emlxs_sli3_hba_kill4quiesce(hba);
2059 }
2060
2061 if (restart) {
2062 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
2063 "Restarting.");
2064 EMLXS_STATE_CHANGE(hba, FC_INIT_START);
2065
2066 ready = (HS_FFRDY | HS_MBRDY);
2067 } else {
2068 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
2069 "Resetting.");
2070 EMLXS_STATE_CHANGE(hba, FC_WARM_START);
2071
2072 ready = HS_MBRDY;
2073 }
2074
2075 hba->flag &= ~(FC_SLIM2_MODE | FC_HARDWARE_ERROR);
2076
2077 mb = FC_SLIM1_MAILBOX(hba);
2078 swpmb = (MAILBOX *)&word0;
2079
2080 reset:
2081
2082 i = 0;
2083
2084 /* Save reset time */
2085 HBASTATS.ResetTime = hba->timer_tics;
2086
2087 if (restart) {
2088 /* First put restart command in mailbox */
2089 word0 = 0;
2090 swpmb->mbxCommand = MBX_RESTART;
2091 swpmb->mbxHc = 1;
2092 WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mb), word0);
2093
2094 /* Only skip post after emlxs_sli3_online is completed */
2095 if (skip_post) {
2096 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb) + 1),
2097 1);
2098 } else {
2099 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb) + 1),
2100 0);
2101 }
2102
2103 }
2104
2105 /*
2106 * Turn off SERR, PERR in PCI cmd register
2107 */
2108 cfg_value = ddi_get16(hba->pci_acc_handle,
2109 (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER));
2110
2111 ddi_put16(hba->pci_acc_handle,
2112 (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
2113 (uint16_t)(cfg_value & ~(CMD_PARITY_CHK | CMD_SERR_ENBL)));
2114
2115 hba->sli.sli3.hc_copy = HC_INITFF;
2116 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
2117
2118 /* Wait 1 msec before restoring PCI config */
2119 BUSYWAIT_MS(1);
2120
2121 /* Restore PCI cmd register */
2122 ddi_put16(hba->pci_acc_handle,
2123 (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
2124 (uint16_t)cfg_value);
2125
2126 /* Wait 3 seconds before checking */
2127 BUSYWAIT_MS(3000);
2128 i += 3;
2129
2130 /* Wait for reset completion */
2131 while (i < 30) {
2132 /* Check status register to see what current state is */
2133 status = READ_CSR_REG(hba, FC_HS_REG(hba));
2134
2135 /* Check to see if any errors occurred during init */
2136 if (status & HS_FFERM) {
2137 status1 = READ_SLIM_ADDR(hba, ((volatile uint8_t *)
2138 hba->sli.sli3.slim_addr + 0xa8));
2139 status2 = READ_SLIM_ADDR(hba, ((volatile uint8_t *)
2140 hba->sli.sli3.slim_addr + 0xac));
2141
2142 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2143 "HS_FFERM: status=0x%x status1=0x%x status2=0x%x",
2144 status, status1, status2);
2145
2146 EMLXS_STATE_CHANGE(hba, FC_ERROR);
2147 return (1);
2148 }
2149
2150 if ((status & ready) == ready) {
2151 /* Reset Done !! */
2152 goto done;
2153 }
2154
2155 /*
2156 * Check every 1 second for 15 seconds, then reset board
2157 * again (w/post), then check every 1 second for 15 * seconds.
2158 */
2159 BUSYWAIT_MS(1000);
2160 i++;
2161
2162 /* Reset again (w/post) at 15 seconds */
2163 if (i == 15) {
2164 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
2165 "Reset failed. Retrying...");
2166
2167 goto reset;
2168 }
2169 }
2170
2171 #ifdef FMA_SUPPORT
2172 reset_fail:
2173 #endif /* FMA_SUPPORT */
2174
2175 /* Timeout occurred */
2176 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2177 "Timeout: status=0x%x", status);
2178 EMLXS_STATE_CHANGE(hba, FC_ERROR);
2179
2180 /* Log a dump event */
2181 emlxs_log_dump_event(port, NULL, 0);
2182
2183 return (1);
2184
2185 done:
2186
2187 /* Initialize hc_copy */
2188 hba->sli.sli3.hc_copy = READ_CSR_REG(hba, FC_HC_REG(hba));
2189
2190 #ifdef FMA_SUPPORT
2191 /* Access handle validation */
2192 if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
2193 != DDI_FM_OK) ||
2194 (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
2195 != DDI_FM_OK) ||
2196 (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle)
2197 != DDI_FM_OK)) {
2198 EMLXS_MSGF(EMLXS_CONTEXT,
2199 &emlxs_invalid_access_handle_msg, NULL);
2200 goto reset_fail;
2201 }
2202 #endif /* FMA_SUPPORT */
2203
2204 /* Reset the hba structure */
2205 hba->flag &= FC_RESET_MASK;
2206 hba->channel_tx_count = 0;
2207 hba->io_count = 0;
2208 hba->iodone_count = 0;
2209 hba->topology = 0;
2210 hba->linkspeed = 0;
2211 hba->heartbeat_active = 0;
2212 hba->discovery_timer = 0;
2213 hba->linkup_timer = 0;
2214 hba->loopback_tics = 0;
2215
2216 /* Reset the ring objects */
2217 for (i = 0; i < MAX_RINGS; i++) {
2218 rp = &hba->sli.sli3.ring[i];
2219 rp->fc_mpon = 0;
2220 rp->fc_mpoff = 0;
2221 }
2222
2223 /* Reset the port objects */
2224 for (i = 0; i < MAX_VPORTS; i++) {
2225 vport = &VPORT(i);
2226
2227 vport->flag &= EMLXS_PORT_RESET_MASK;
2228 vport->did = 0;
2229 vport->prev_did = 0;
2230 vport->lip_type = 0;
2231 bzero(&vport->fabric_sparam, sizeof (SERV_PARM));
2232 bzero(&vport->prev_fabric_sparam, sizeof (SERV_PARM));
2233
2234 bzero((caddr_t)&vport->node_base, sizeof (NODELIST));
2235 vport->node_base.nlp_Rpi = 0;
2236 vport->node_base.nlp_DID = 0xffffff;
2237 vport->node_base.nlp_list_next = NULL;
2238 vport->node_base.nlp_list_prev = NULL;
2239 vport->node_base.nlp_active = 1;
2240 vport->node_count = 0;
2241
2242 if (vport->ub_count < EMLXS_UB_TOKEN_OFFSET) {
2243 vport->ub_count = EMLXS_UB_TOKEN_OFFSET;
2244 }
2245 }
2246
2247 return (0);
2248
2249 } /* emlxs_sli3_hba_reset */
2250
2251
2252 #define BPL_CMD 0
2253 #define BPL_RESP 1
2254 #define BPL_DATA 2
2255
2256 static ULP_BDE64 *
2257 emlxs_pkt_to_bpl(fc_packet_t *pkt, ULP_BDE64 *bpl, uint32_t bpl_type)
2258 {
2259 ddi_dma_cookie_t *cp;
2260 uint_t i;
2261 int32_t size;
2262 uint_t cookie_cnt;
2263 uint8_t bdeFlags;
2264
2265 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2266 switch (bpl_type) {
2267 case BPL_CMD:
2268 cp = pkt->pkt_cmd_cookie;
2269 cookie_cnt = pkt->pkt_cmd_cookie_cnt;
2270 size = (int32_t)pkt->pkt_cmdlen;
2271 bdeFlags = 0;
2272 break;
2273
2274 case BPL_RESP:
2275 cp = pkt->pkt_resp_cookie;
2276 cookie_cnt = pkt->pkt_resp_cookie_cnt;
2277 size = (int32_t)pkt->pkt_rsplen;
2278 bdeFlags = BUFF_USE_RCV;
2279 break;
2280
2281
2282 case BPL_DATA:
2283 cp = pkt->pkt_data_cookie;
2284 cookie_cnt = pkt->pkt_data_cookie_cnt;
2285 size = (int32_t)pkt->pkt_datalen;
2286 bdeFlags = (pkt->pkt_tran_type == FC_PKT_FCP_READ) ?
2287 BUFF_USE_RCV : 0;
2288 break;
2289
2290 default:
2291 return (NULL);
2292 }
2293
2294 #else
2295 switch (bpl_type) {
2296 case BPL_CMD:
2297 cp = &pkt->pkt_cmd_cookie;
2298 cookie_cnt = 1;
2299 size = (int32_t)pkt->pkt_cmdlen;
2300 bdeFlags = 0;
2301 break;
2302
2303 case BPL_RESP:
2304 cp = &pkt->pkt_resp_cookie;
2305 cookie_cnt = 1;
2306 size = (int32_t)pkt->pkt_rsplen;
2307 bdeFlags = BUFF_USE_RCV;
2308 break;
2309
2310
2311 case BPL_DATA:
2312 cp = &pkt->pkt_data_cookie;
2313 cookie_cnt = 1;
2314 size = (int32_t)pkt->pkt_datalen;
2315 bdeFlags = (pkt->pkt_tran_type == FC_PKT_FCP_READ) ?
2316 BUFF_USE_RCV : 0;
2317 break;
2318
2319 default:
2320 return (NULL);
2321 }
2322 #endif /* >= EMLXS_MODREV3 */
2323
2324 for (i = 0; i < cookie_cnt && size > 0; i++, cp++) {
2325 bpl->addrHigh =
2326 BE_SWAP32(PADDR_HI(cp->dmac_laddress));
2327 bpl->addrLow =
2328 BE_SWAP32(PADDR_LO(cp->dmac_laddress));
2329 bpl->tus.f.bdeSize = MIN(size, cp->dmac_size);
2330 bpl->tus.f.bdeFlags = bdeFlags;
2331 bpl->tus.w = BE_SWAP32(bpl->tus.w);
2332
2333 bpl++;
2334 size -= cp->dmac_size;
2335 }
2336
2337 return (bpl);
2338
2339 } /* emlxs_pkt_to_bpl */
2340
2341
2342 static uint32_t
2343 emlxs_sli2_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2344 {
2345 emlxs_hba_t *hba = HBA;
2346 fc_packet_t *pkt;
2347 MATCHMAP *bmp;
2348 ULP_BDE64 *bpl;
2349 uint64_t bp;
2350 IOCB *iocb;
2351 IOCBQ *iocbq;
2352 CHANNEL *cp;
2353 uint32_t data_cookie_cnt;
2354 uint32_t channelno;
2355
2356 cp = sbp->channel;
2357 iocb = (IOCB *) & sbp->iocbq;
2358 pkt = PRIV2PKT(sbp);
2359
2360 if (hba->sli.sli3.bpl_table) {
2361 bmp = hba->sli.sli3.bpl_table[sbp->iotag];
2362 } else {
2363 bmp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BPL);
2364 }
2365
2366 if (!bmp) {
2367 return (1);
2368 }
2369
2370 sbp->bmp = bmp;
2371 bpl = (ULP_BDE64 *)bmp->virt;
2372 bp = bmp->phys;
2373
2374 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2375 data_cookie_cnt = pkt->pkt_data_cookie_cnt;
2376 #else
2377 data_cookie_cnt = 1;
2378 #endif /* >= EMLXS_MODREV3 */
2379
2380 iocbq = &sbp->iocbq;
2381
2382 channelno = (iocbq->flag & IOCB_FCP_CMD)? FC_FCP_RING:cp->channelno;
2383 switch (channelno) {
2384 case FC_FCP_RING:
2385
2386 /* CMD payload */
2387 bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_CMD);
2388 if (! bpl) {
2389 return (1);
2390 }
2391
2392 /* Check if response & data payloads are needed */
2393 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2394 break;
2395 }
2396
2397 /* RSP payload */
2398 bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_RESP);
2399 if (! bpl) {
2400 return (1);
2401 }
2402
2403 /* Check if data payload is needed */
2404 if ((pkt->pkt_datalen == 0) ||
2405 (data_cookie_cnt == 0)) {
2406 break;
2407 }
2408
2409 /* DATA payload */
2410 bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_DATA);
2411 if (! bpl) {
2412 return (1);
2413 }
2414 break;
2415
2416 case FC_IP_RING:
2417
2418 /* CMD payload */
2419 bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_CMD);
2420 if (! bpl) {
2421 return (1);
2422 }
2423 break;
2424
2425 case FC_ELS_RING:
2426
2427 /* CMD payload */
2428 bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_CMD);
2429 if (! bpl) {
2430 return (1);
2431 }
2432
2433 /* Check if response payload is needed */
2434 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2435 break;
2436 }
2437
2438 /* RSP payload */
2439 bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_RESP);
2440 if (! bpl) {
2441 return (1);
2442 }
2443 break;
2444
2445 case FC_CT_RING:
2446
2447 /* CMD payload */
2448 bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_CMD);
2449 if (! bpl) {
2450 return (1);
2451 }
2452
2453 /* Check if response payload is needed */
2454 if ((pkt->pkt_tran_type == FC_PKT_OUTBOUND) &&
2455 (pkt->pkt_cmd_fhdr.type != EMLXS_MENLO_TYPE)) {
2456 break;
2457 }
2458
2459 /* RSP payload */
2460 bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_RESP);
2461 if (! bpl) {
2462 return (1);
2463 }
2464 break;
2465
2466 }
2467
2468 iocb->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
2469 iocb->un.genreq64.bdl.addrHigh = PADDR_HI(bp);
2470 iocb->un.genreq64.bdl.addrLow = PADDR_LO(bp);
2471 iocb->un.genreq64.bdl.bdeSize =
2472 (uint32_t)(((uintptr_t)bpl - (uintptr_t)bmp->virt) & 0xFFFFFFFF);
2473 iocb->ULPBDECOUNT = 1;
2474 iocb->ULPLE = 1;
2475
2476 return (0);
2477
2478 } /* emlxs_sli2_bde_setup */
2479
2480
2481 static uint32_t
2482 emlxs_sli3_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2483 {
2484 ddi_dma_cookie_t *cp_cmd;
2485 ddi_dma_cookie_t *cp_resp;
2486 ddi_dma_cookie_t *cp_data;
2487 fc_packet_t *pkt;
2488 ULP_BDE64 *bde;
2489 int data_cookie_cnt;
2490 uint32_t i;
2491 uint32_t channelno;
2492 IOCB *iocb;
2493 IOCBQ *iocbq;
2494 CHANNEL *cp;
2495
2496 pkt = PRIV2PKT(sbp);
2497 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2498 if ((pkt->pkt_cmd_cookie_cnt > 1) ||
2499 (pkt->pkt_resp_cookie_cnt > 1) ||
2500 ((pkt->pkt_cmd_cookie_cnt + pkt->pkt_resp_cookie_cnt +
2501 pkt->pkt_data_cookie_cnt) > SLI3_MAX_BDE)) {
2502 i = emlxs_sli2_bde_setup(port, sbp);
2503 return (i);
2504 }
2505
2506 cp_cmd = pkt->pkt_cmd_cookie;
2507 cp_resp = pkt->pkt_resp_cookie;
2508 cp_data = pkt->pkt_data_cookie;
2509 data_cookie_cnt = pkt->pkt_data_cookie_cnt;
2510 #else
2511 cp_cmd = &pkt->pkt_cmd_cookie;
2512 cp_resp = &pkt->pkt_resp_cookie;
2513 cp_data = &pkt->pkt_data_cookie;
2514 data_cookie_cnt = 1;
2515 #endif /* >= EMLXS_MODREV3 */
2516
2517 cp = sbp->channel;
2518 iocbq = &sbp->iocbq;
2519 iocb = (IOCB *)iocbq;
2520 iocb->unsli3.ext_iocb.ebde_count = 0;
2521
2522 channelno = (iocbq->flag & IOCB_FCP_CMD)? FC_FCP_RING:cp->channelno;
2523 switch (channelno) {
2524 case FC_FCP_RING:
2525 /* CMD payload */
2526 iocb->un.fcpi64.bdl.addrHigh =
2527 PADDR_HI(cp_cmd->dmac_laddress);
2528 iocb->un.fcpi64.bdl.addrLow =
2529 PADDR_LO(cp_cmd->dmac_laddress);
2530 iocb->un.fcpi64.bdl.bdeSize = pkt->pkt_cmdlen;
2531 iocb->un.fcpi64.bdl.bdeFlags = 0;
2532
2533 /* Check if a response & data payload are needed */
2534 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2535 break;
2536 }
2537
2538 /* RSP payload */
2539 iocb->unsli3.ext_iocb.ebde1.addrHigh =
2540 PADDR_HI(cp_resp->dmac_laddress);
2541 iocb->unsli3.ext_iocb.ebde1.addrLow =
2542 PADDR_LO(cp_resp->dmac_laddress);
2543 iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize = pkt->pkt_rsplen;
2544 iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags = 0;
2545 iocb->unsli3.ext_iocb.ebde_count = 1;
2546
2547 /* Check if a data payload is needed */
2548 if ((pkt->pkt_datalen == 0) ||
2549 (data_cookie_cnt == 0)) {
2550 break;
2551 }
2552
2553 /* DATA payload */
2554 bde = (ULP_BDE64 *)&iocb->unsli3.ext_iocb.ebde2;
2555 for (i = 0; i < data_cookie_cnt; i++) {
2556 bde->addrHigh = PADDR_HI(cp_data->dmac_laddress);
2557 bde->addrLow = PADDR_LO(cp_data->dmac_laddress);
2558 bde->tus.f.bdeSize = cp_data->dmac_size;
2559 bde->tus.f.bdeFlags = 0;
2560 cp_data++;
2561 bde++;
2562 }
2563 iocb->unsli3.ext_iocb.ebde_count += data_cookie_cnt;
2564
2565 break;
2566
2567 case FC_IP_RING:
2568 /* CMD payload */
2569 iocb->un.xseq64.bdl.addrHigh =
2570 PADDR_HI(cp_cmd->dmac_laddress);
2571 iocb->un.xseq64.bdl.addrLow =
2572 PADDR_LO(cp_cmd->dmac_laddress);
2573 iocb->un.xseq64.bdl.bdeSize = pkt->pkt_cmdlen;
2574 iocb->un.xseq64.bdl.bdeFlags = 0;
2575
2576 break;
2577
2578 case FC_ELS_RING:
2579
2580 /* CMD payload */
2581 iocb->un.elsreq64.bdl.addrHigh =
2582 PADDR_HI(cp_cmd->dmac_laddress);
2583 iocb->un.elsreq64.bdl.addrLow =
2584 PADDR_LO(cp_cmd->dmac_laddress);
2585 iocb->un.elsreq64.bdl.bdeSize = pkt->pkt_cmdlen;
2586 iocb->un.elsreq64.bdl.bdeFlags = 0;
2587
2588 /* Check if a response payload is needed */
2589 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2590 break;
2591 }
2592
2593 /* RSP payload */
2594 iocb->unsli3.ext_iocb.ebde1.addrHigh =
2595 PADDR_HI(cp_resp->dmac_laddress);
2596 iocb->unsli3.ext_iocb.ebde1.addrLow =
2597 PADDR_LO(cp_resp->dmac_laddress);
2598 iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize = pkt->pkt_rsplen;
2599 iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags = BUFF_USE_RCV;
2600 iocb->unsli3.ext_iocb.ebde_count = 1;
2601 break;
2602
2603 case FC_CT_RING:
2604
2605 /* CMD payload */
2606 iocb->un.genreq64.bdl.addrHigh =
2607 PADDR_HI(cp_cmd->dmac_laddress);
2608 iocb->un.genreq64.bdl.addrLow =
2609 PADDR_LO(cp_cmd->dmac_laddress);
2610 iocb->un.genreq64.bdl.bdeSize = pkt->pkt_cmdlen;
2611 iocb->un.genreq64.bdl.bdeFlags = 0;
2612
2613 /* Check if a response payload is needed */
2614 if ((pkt->pkt_tran_type == FC_PKT_OUTBOUND) &&
2615 (pkt->pkt_cmd_fhdr.type != EMLXS_MENLO_TYPE)) {
2616 break;
2617 }
2618
2619 /* RSP payload */
2620 iocb->unsli3.ext_iocb.ebde1.addrHigh =
2621 PADDR_HI(cp_resp->dmac_laddress);
2622 iocb->unsli3.ext_iocb.ebde1.addrLow =
2623 PADDR_LO(cp_resp->dmac_laddress);
2624 iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize = pkt->pkt_rsplen;
2625 iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags = BUFF_USE_RCV;
2626 iocb->unsli3.ext_iocb.ebde_count = 1;
2627 break;
2628 }
2629
2630 iocb->ULPBDECOUNT = 0;
2631 iocb->ULPLE = 0;
2632
2633 return (0);
2634
2635 } /* emlxs_sli3_bde_setup */
2636
2637
2638 /* Only used for FCP Data xfers */
2639 #ifdef SFCT_SUPPORT
2640 /*ARGSUSED*/
2641 static uint32_t
2642 emlxs_sli2_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2643 {
2644 emlxs_hba_t *hba = HBA;
2645 scsi_task_t *fct_task;
2646 MATCHMAP *bmp;
2647 ULP_BDE64 *bpl;
2648 uint64_t bp;
2649 uint8_t bdeFlags;
2650 IOCB *iocb;
2651 uint32_t size;
2652 MATCHMAP *mp;
2653
2654 iocb = (IOCB *)&sbp->iocbq.iocb;
2655 sbp->bmp = NULL;
2656
2657 if (!sbp->fct_buf) {
2658 iocb->un.fcpt64.bdl.addrHigh = 0;
2659 iocb->un.fcpt64.bdl.addrLow = 0;
2660 iocb->un.fcpt64.bdl.bdeSize = 0;
2661 iocb->un.fcpt64.bdl.bdeFlags = 0;
2662 iocb->un.fcpt64.fcpt_Offset = 0;
2663 iocb->un.fcpt64.fcpt_Length = 0;
2664 iocb->ULPBDECOUNT = 0;
2665 iocb->ULPLE = 1;
2666 return (0);
2667 }
2668
2669 if (hba->sli.sli3.bpl_table) {
2670 bmp = hba->sli.sli3.bpl_table[sbp->iotag];
2671 } else {
2672 bmp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BPL);
2673 }
2674
2675 if (!bmp) {
2676 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
2677 "fct_sli2_bde_setup: Unable to BPL buffer. iotag=%d",
2678 sbp->iotag);
2679
2680 iocb->un.fcpt64.bdl.addrHigh = 0;
2681 iocb->un.fcpt64.bdl.addrLow = 0;
2682 iocb->un.fcpt64.bdl.bdeSize = 0;
2683 iocb->un.fcpt64.bdl.bdeFlags = 0;
2684 iocb->un.fcpt64.fcpt_Offset = 0;
2685 iocb->un.fcpt64.fcpt_Length = 0;
2686 iocb->ULPBDECOUNT = 0;
2687 iocb->ULPLE = 1;
2688 return (1);
2689 }
2690
2691 bpl = (ULP_BDE64 *)bmp->virt;
2692 bp = bmp->phys;
2693
2694 fct_task = (scsi_task_t *)sbp->fct_cmd->cmd_specific;
2695
2696 size = sbp->fct_buf->db_data_size;
2697 mp = (MATCHMAP *)sbp->fct_buf->db_port_private;
2698
2699 bdeFlags = (fct_task->task_flags & TF_WRITE_DATA) ? BUFF_USE_RCV : 0;
2700
2701 /* Init the buffer list */
2702 bpl->addrHigh = BE_SWAP32(PADDR_HI(mp->phys));
2703 bpl->addrLow = BE_SWAP32(PADDR_LO(mp->phys));
2704 bpl->tus.f.bdeSize = size;
2705 bpl->tus.f.bdeFlags = bdeFlags;
2706 bpl->tus.w = BE_SWAP32(bpl->tus.w);
2707
2708 /* Init the IOCB */
2709 iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(bp);
2710 iocb->un.fcpt64.bdl.addrLow = PADDR_LO(bp);
2711 iocb->un.fcpt64.bdl.bdeSize = sizeof (ULP_BDE64);
2712 iocb->un.fcpt64.bdl.bdeFlags = BUFF_TYPE_BDL;
2713
2714 iocb->un.fcpt64.fcpt_Length =
2715 (fct_task->task_flags & TF_WRITE_DATA) ? size : 0;
2716 iocb->un.fcpt64.fcpt_Offset = 0;
2717
2718 iocb->ULPBDECOUNT = 1;
2719 iocb->ULPLE = 1;
2720 sbp->bmp = bmp;
2721
2722 return (0);
2723
2724 } /* emlxs_sli2_fct_bde_setup */
2725 #endif /* SFCT_SUPPORT */
2726
2727
2728 #ifdef SFCT_SUPPORT
2729 /*ARGSUSED*/
2730 static uint32_t
2731 emlxs_sli3_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2732 {
2733 scsi_task_t *fct_task;
2734 IOCB *iocb;
2735 MATCHMAP *mp;
2736 uint32_t bdeFlags;
2737 uint32_t size;
2738
2739 iocb = (IOCB *)&sbp->iocbq;
2740
2741 if (!sbp->fct_buf) {
2742 iocb->un.fcpt64.bdl.addrHigh = 0;
2743 iocb->un.fcpt64.bdl.addrLow = 0;
2744 iocb->un.fcpt64.bdl.bdeSize = 0;
2745 iocb->un.fcpt64.bdl.bdeFlags = 0;
2746 iocb->un.fcpt64.fcpt_Offset = 0;
2747 iocb->un.fcpt64.fcpt_Length = 0;
2748 iocb->ULPBDECOUNT = 0;
2749 iocb->ULPLE = 0;
2750 iocb->unsli3.ext_iocb.ebde_count = 0;
2751 return (0);
2752 }
2753
2754 fct_task = (scsi_task_t *)sbp->fct_cmd->cmd_specific;
2755
2756 size = sbp->fct_buf->db_data_size;
2757 mp = (MATCHMAP *)sbp->fct_buf->db_port_private;
2758
2759 bdeFlags = (fct_task->task_flags & TF_WRITE_DATA) ? BUFF_USE_RCV : 0;
2760
2761 /* Init first BDE */
2762 iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(mp->phys);
2763 iocb->un.fcpt64.bdl.addrLow = PADDR_LO(mp->phys);
2764 iocb->un.fcpt64.bdl.bdeSize = size;
2765 iocb->un.fcpt64.bdl.bdeFlags = bdeFlags;
2766
2767 iocb->unsli3.ext_iocb.ebde_count = 0;
2768 iocb->un.fcpt64.fcpt_Length =
2769 (fct_task->task_flags & TF_WRITE_DATA) ? size : 0;
2770 iocb->un.fcpt64.fcpt_Offset = 0;
2771
2772 iocb->ULPBDECOUNT = 0;
2773 iocb->ULPLE = 0;
2774
2775 return (0);
2776
2777 } /* emlxs_sli3_fct_bde_setup */
2778 #endif /* SFCT_SUPPORT */
2779
2780
2781 static void
2782 emlxs_sli3_issue_iocb_cmd(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
2783 {
2784 #ifdef FMA_SUPPORT
2785 emlxs_port_t *port = &PPORT;
2786 #endif /* FMA_SUPPORT */
2787 PGP *pgp;
2788 emlxs_buf_t *sbp;
2789 SLIM2 *slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt;
2790 RING *rp;
2791 uint32_t nextIdx;
2792 uint32_t status;
2793 void *ioa2;
2794 off_t offset;
2795 uint32_t count = 0;
2796 uint32_t flag;
2797 uint32_t channelno;
2798 int32_t throttle;
2799 #ifdef NODE_THROTTLE_SUPPORT
2800 int32_t node_throttle;
2801 NODELIST *marked_node = NULL;
2802 #endif /* NODE_THROTTLE_SUPPORT */
2803
2804 channelno = cp->channelno;
2805 rp = (RING *)cp->iopath;
2806
2807 throttle = 0;
2808
2809 /* Check if FCP ring and adapter is not ready */
2810 /* We may use any ring for FCP_CMD */
2811 if (iocbq && (iocbq->flag & IOCB_FCP_CMD) && (hba->state != FC_READY)) {
2812 if (!(iocbq->flag & IOCB_SPECIAL) || !iocbq->port ||
2813 (((emlxs_port_t *)iocbq->port)->mode == MODE_INITIATOR)) {
2814 emlxs_tx_put(iocbq, 1);
2815 return;
2816 }
2817 }
2818
2819 /* Attempt to acquire CMD_RING lock */
2820 if (mutex_tryenter(&EMLXS_CMD_RING_LOCK(channelno)) == 0) {
2821 /* Queue it for later */
2822 if (iocbq) {
2823 if ((hba->io_count -
2824 hba->channel_tx_count) > 10) {
2825 emlxs_tx_put(iocbq, 1);
2826 return;
2827 } else {
2828
2829 /*
2830 * EMLXS_MSGF(EMLXS_CONTEXT,
2831 * &emlxs_ring_watchdog_msg,
2832 * "%s host=%d port=%d cnt=%d,%d RACE
2833 * CONDITION3 DETECTED.",
2834 * emlxs_ring_xlate(channelno),
2835 * rp->fc_cmdidx, rp->fc_port_cmdidx,
2836 * hba->channel_tx_count,
2837 * hba->io_count);
2838 */
2839 mutex_enter(&EMLXS_CMD_RING_LOCK(channelno));
2840 }
2841 } else {
2842 return;
2843 }
2844 }
2845 /* CMD_RING_LOCK acquired */
2846
2847 /* Throttle check only applies to non special iocb */
2848 if (iocbq && (!(iocbq->flag & IOCB_SPECIAL))) {
2849 /* Check if HBA is full */
2850 throttle = hba->io_throttle - hba->io_active;
2851 if (throttle <= 0) {
2852 /* Hitting adapter throttle limit */
2853 /* Queue it for later */
2854 if (iocbq) {
2855 emlxs_tx_put(iocbq, 1);
2856 }
2857
2858 goto busy;
2859 }
2860 }
2861
2862 /* Read adapter's get index */
2863 pgp = (PGP *)
2864 &((SLIM2 *)hba->sli.sli3.slim2.virt)->mbx.us.s2.port[channelno];
2865 offset =
2866 (off_t)((uint64_t)((unsigned long)&(pgp->cmdGetInx)) -
2867 (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
2868 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
2869 DDI_DMA_SYNC_FORKERNEL);
2870 rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
2871
2872 /* Calculate the next put index */
2873 nextIdx =
2874 (rp->fc_cmdidx + 1 >= rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1;
2875
2876 /* Check if ring is full */
2877 if (nextIdx == rp->fc_port_cmdidx) {
2878 /* Try one more time */
2879 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
2880 DDI_DMA_SYNC_FORKERNEL);
2881 rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
2882
2883 if (nextIdx == rp->fc_port_cmdidx) {
2884 /* Queue it for later */
2885 if (iocbq) {
2886 emlxs_tx_put(iocbq, 1);
2887 }
2888
2889 goto busy;
2890 }
2891 }
2892
2893 /*
2894 * We have a command ring slot available
2895 * Make sure we have an iocb to send
2896 */
2897 if (iocbq) {
2898 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2899
2900 /* Check if the ring already has iocb's waiting */
2901 if (cp->nodeq.q_first != NULL) {
2902 /* Put the current iocbq on the tx queue */
2903 emlxs_tx_put(iocbq, 0);
2904
2905 /*
2906 * Attempt to replace it with the next iocbq
2907 * in the tx queue
2908 */
2909 iocbq = emlxs_tx_get(cp, 0);
2910 }
2911
2912 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2913 } else {
2914 /* Try to get the next iocb on the tx queue */
2915 iocbq = emlxs_tx_get(cp, 1);
2916 }
2917
2918 sendit:
2919 count = 0;
2920
2921 /* Process each iocbq */
2922 while (iocbq) {
2923 sbp = iocbq->sbp;
2924
2925 #ifdef NODE_THROTTLE_SUPPORT
2926 if (sbp && sbp->node && sbp->node->io_throttle) {
2927 node_throttle = sbp->node->io_throttle -
2928 sbp->node->io_active;
2929 if (node_throttle <= 0) {
2930 /* Node is busy */
2931 /* Queue this iocb and get next iocb from */
2932 /* channel */
2933
2934 if (!marked_node) {
2935 marked_node = sbp->node;
2936 }
2937
2938 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2939 emlxs_tx_put(iocbq, 0);
2940
2941 if (cp->nodeq.q_first == marked_node) {
2942 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2943 goto busy;
2944 }
2945
2946 iocbq = emlxs_tx_get(cp, 0);
2947 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2948 continue;
2949 }
2950 }
2951 marked_node = 0;
2952 #endif /* NODE_THROTTLE_SUPPORT */
2953
2954 if (sbp && (sbp->pkt_flags & PACKET_DELAY_REQUIRED)) {
2955 /*
2956 * Update adapter if needed, since we are about to
2957 * delay here
2958 */
2959 if (count) {
2960 count = 0;
2961
2962 /* Update the adapter's cmd put index */
2963 if (hba->bus_type == SBUS_FC) {
2964 slim2p->mbx.us.s2.host[channelno].
2965 cmdPutInx =
2966 BE_SWAP32(rp->fc_cmdidx);
2967
2968 /* DMA sync the index for the adapter */
2969 offset = (off_t)
2970 ((uint64_t)
2971 ((unsigned long)&(slim2p->mbx.us.
2972 s2.host[channelno].cmdPutInx)) -
2973 (uint64_t)((unsigned long)slim2p));
2974 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.
2975 dma_handle, offset, 4,
2976 DDI_DMA_SYNC_FORDEV);
2977 } else {
2978 ioa2 = (void *)
2979 ((char *)hba->sli.sli3.slim_addr +
2980 hba->sli.sli3.hgp_ring_offset +
2981 ((channelno * 2) *
2982 sizeof (uint32_t)));
2983 WRITE_SLIM_ADDR(hba,
2984 (volatile uint32_t *)ioa2,
2985 rp->fc_cmdidx);
2986 }
2987
2988 status = (CA_R0ATT << (channelno * 4));
2989 WRITE_CSR_REG(hba, FC_CA_REG(hba),
2990 (volatile uint32_t)status);
2991
2992 }
2993 /* Perform delay */
2994 if ((channelno == FC_ELS_RING) &&
2995 !(iocbq->flag & IOCB_FCP_CMD)) {
2996 drv_usecwait(100000);
2997 } else {
2998 drv_usecwait(20000);
2999 }
3000 }
3001
3002 /*
3003 * At this point, we have a command ring slot available
3004 * and an iocb to send
3005 */
3006 flag = iocbq->flag;
3007
3008 /* Send the iocb */
3009 emlxs_sli3_issue_iocb(hba, rp, iocbq);
3010 /*
3011 * After this, the sbp / iocb should not be
3012 * accessed in the xmit path.
3013 */
3014
3015 count++;
3016 if (iocbq && (!(flag & IOCB_SPECIAL))) {
3017 /* Check if HBA is full */
3018 throttle = hba->io_throttle - hba->io_active;
3019 if (throttle <= 0) {
3020 goto busy;
3021 }
3022 }
3023
3024 /* Calculate the next put index */
3025 nextIdx =
3026 (rp->fc_cmdidx + 1 >=
3027 rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1;
3028
3029 /* Check if ring is full */
3030 if (nextIdx == rp->fc_port_cmdidx) {
3031 /* Try one more time */
3032 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3033 offset, 4, DDI_DMA_SYNC_FORKERNEL);
3034 rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
3035
3036 if (nextIdx == rp->fc_port_cmdidx) {
3037 goto busy;
3038 }
3039 }
3040
3041 /* Get the next iocb from the tx queue if there is one */
3042 iocbq = emlxs_tx_get(cp, 1);
3043 }
3044
3045 if (count) {
3046 /* Update the adapter's cmd put index */
3047 if (hba->bus_type == SBUS_FC) {
3048 slim2p->mbx.us.s2.host[channelno].
3049 cmdPutInx = BE_SWAP32(rp->fc_cmdidx);
3050
3051 /* DMA sync the index for the adapter */
3052 offset = (off_t)
3053 ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2.
3054 host[channelno].cmdPutInx)) -
3055 (uint64_t)((unsigned long)slim2p));
3056 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3057 offset, 4, DDI_DMA_SYNC_FORDEV);
3058 } else {
3059 ioa2 =
3060 (void *)((char *)hba->sli.sli3.slim_addr +
3061 hba->sli.sli3.hgp_ring_offset +
3062 ((channelno * 2) * sizeof (uint32_t)));
3063 WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2,
3064 rp->fc_cmdidx);
3065 }
3066
3067 status = (CA_R0ATT << (channelno * 4));
3068 WRITE_CSR_REG(hba, FC_CA_REG(hba),
3069 (volatile uint32_t)status);
3070
3071 /* Check tx queue one more time before releasing */
3072 if ((iocbq = emlxs_tx_get(cp, 1))) {
3073 /*
3074 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_watchdog_msg,
3075 * "%s host=%d port=%d RACE CONDITION1
3076 * DETECTED.", emlxs_ring_xlate(channelno),
3077 * rp->fc_cmdidx, rp->fc_port_cmdidx);
3078 */
3079 goto sendit;
3080 }
3081 }
3082
3083 #ifdef FMA_SUPPORT
3084 /* Access handle validation */
3085 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
3086 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
3087 #endif /* FMA_SUPPORT */
3088
3089 mutex_exit(&EMLXS_CMD_RING_LOCK(channelno));
3090
3091 return;
3092
3093 busy:
3094
3095 /*
3096 * Set ring to SET R0CE_REQ in Chip Att register.
3097 * Chip will tell us when an entry is freed.
3098 */
3099 if (count) {
3100 /* Update the adapter's cmd put index */
3101 if (hba->bus_type == SBUS_FC) {
3102 slim2p->mbx.us.s2.host[channelno].cmdPutInx =
3103 BE_SWAP32(rp->fc_cmdidx);
3104
3105 /* DMA sync the index for the adapter */
3106 offset = (off_t)
3107 ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2.
3108 host[channelno].cmdPutInx)) -
3109 (uint64_t)((unsigned long)slim2p));
3110 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3111 offset, 4, DDI_DMA_SYNC_FORDEV);
3112 } else {
3113 ioa2 =
3114 (void *)((char *)hba->sli.sli3.slim_addr +
3115 hba->sli.sli3.hgp_ring_offset +
3116 ((channelno * 2) * sizeof (uint32_t)));
3117 WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2,
3118 rp->fc_cmdidx);
3119 }
3120 }
3121
3122 status = ((CA_R0ATT | CA_R0CE_REQ) << (channelno * 4));
3123 WRITE_CSR_REG(hba, FC_CA_REG(hba), (volatile uint32_t)status);
3124
3125 if (throttle <= 0) {
3126 HBASTATS.IocbThrottled++;
3127 } else {
3128 HBASTATS.IocbRingFull[channelno]++;
3129 }
3130
3131 #ifdef FMA_SUPPORT
3132 /* Access handle validation */
3133 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
3134 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
3135 #endif /* FMA_SUPPORT */
3136
3137 mutex_exit(&EMLXS_CMD_RING_LOCK(channelno));
3138
3139 return;
3140
3141 } /* emlxs_sli3_issue_iocb_cmd() */
3142
3143
3144 /* MBX_NOWAIT - returns MBX_BUSY or MBX_SUCCESS or MBX_HARDWARE_ERROR */
3145 /* MBX_WAIT - returns MBX_TIMEOUT or mailbox_status */
3146 /* MBX_SLEEP - returns MBX_TIMEOUT or mailbox_status */
3147 /* MBX_POLL - returns MBX_TIMEOUT or mailbox_status */
3148
3149 static uint32_t
3150 emlxs_sli3_issue_mbox_cmd(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
3151 uint32_t tmo)
3152 {
3153 emlxs_port_t *port;
3154 SLIM2 *slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt;
3155 MAILBOX *mbox;
3156 MAILBOX *mb;
3157 volatile uint32_t word0;
3158 volatile uint32_t ldata;
3159 off_t offset;
3160 MATCHMAP *mbox_bp;
3161 uint32_t tmo_local;
3162 MAILBOX *swpmb;
3163
3164 if (!mbq->port) {
3165 mbq->port = &PPORT;
3166 }
3167
3168 port = (emlxs_port_t *)mbq->port;
3169
3170 mb = (MAILBOX *)mbq;
3171 swpmb = (MAILBOX *)&word0;
3172
3173 mb->mbxStatus = MBX_SUCCESS;
3174
3175 /* Check for minimum timeouts */
3176 switch (mb->mbxCommand) {
3177 /* Mailbox commands that erase/write flash */
3178 case MBX_DOWN_LOAD:
3179 case MBX_UPDATE_CFG:
3180 case MBX_LOAD_AREA:
3181 case MBX_LOAD_EXP_ROM:
3182 case MBX_WRITE_NV:
3183 case MBX_FLASH_WR_ULA:
3184 case MBX_DEL_LD_ENTRY:
3185 case MBX_LOAD_SM:
3186 if (tmo < 300) {
3187 tmo = 300;
3188 }
3189 break;
3190
3191 default:
3192 if (tmo < 30) {
3193 tmo = 30;
3194 }
3195 break;
3196 }
3197
3198 /* Convert tmo seconds to 10 millisecond tics */
3199 tmo_local = tmo * 100;
3200
3201 /* Adjust wait flag */
3202 if (flag != MBX_NOWAIT) {
3203 /* If interrupt is enabled, use sleep, otherwise poll */
3204 if (hba->sli.sli3.hc_copy & HC_MBINT_ENA) {
3205 flag = MBX_SLEEP;
3206 } else {
3207 flag = MBX_POLL;
3208 }
3209 }
3210
3211 mutex_enter(&EMLXS_PORT_LOCK);
3212
3213 /* Check for hardware error */
3214 if (hba->flag & FC_HARDWARE_ERROR) {
3215 mb->mbxStatus = (hba->flag & FC_OVERTEMP_EVENT) ?
3216 MBX_OVERTEMP_ERROR : MBX_HARDWARE_ERROR;
3217
3218 mutex_exit(&EMLXS_PORT_LOCK);
3219
3220 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3221 "Hardware error reported. %s failed. status=%x mb=%p",
3222 emlxs_mb_cmd_xlate(mb->mbxCommand), mb->mbxStatus, mb);
3223
3224 return (MBX_HARDWARE_ERROR);
3225 }
3226
3227 if (hba->mbox_queue_flag) {
3228 /* If we are not polling, then queue it for later */
3229 if (flag == MBX_NOWAIT) {
3230 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3231 "Busy. %s: mb=%p NoWait.",
3232 emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3233
3234 emlxs_mb_put(hba, mbq);
3235
3236 HBASTATS.MboxBusy++;
3237
3238 mutex_exit(&EMLXS_PORT_LOCK);
3239
3240 return (MBX_BUSY);
3241 }
3242
3243 while (hba->mbox_queue_flag) {
3244 mutex_exit(&EMLXS_PORT_LOCK);
3245
3246 if (tmo_local-- == 0) {
3247 EMLXS_MSGF(EMLXS_CONTEXT,
3248 &emlxs_mbox_event_msg,
3249 "Timeout. %s: mb=%p tmo=%d Waiting.",
3250 emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3251 tmo);
3252
3253 /* Non-lethalStatus mailbox timeout */
3254 /* Does not indicate a hardware error */
3255 mb->mbxStatus = MBX_TIMEOUT;
3256 return (MBX_TIMEOUT);
3257 }
3258
3259 BUSYWAIT_MS(10);
3260 mutex_enter(&EMLXS_PORT_LOCK);
3261
3262 /* Check for hardware error */
3263 if (hba->flag & FC_HARDWARE_ERROR) {
3264 mb->mbxStatus =
3265 (hba->flag & FC_OVERTEMP_EVENT) ?
3266 MBX_OVERTEMP_ERROR : MBX_HARDWARE_ERROR;
3267
3268 mutex_exit(&EMLXS_PORT_LOCK);
3269
3270 EMLXS_MSGF(EMLXS_CONTEXT,
3271 &emlxs_mbox_detail_msg,
3272 "Hardware error reported. %s failed. "
3273 "status=%x mb=%p",
3274 emlxs_mb_cmd_xlate(mb->mbxCommand),
3275 mb->mbxStatus, mb);
3276
3277 return (MBX_HARDWARE_ERROR);
3278 }
3279 }
3280 }
3281
3282 /* Initialize mailbox area */
3283 emlxs_mb_init(hba, mbq, flag, tmo);
3284
3285 switch (flag) {
3286 case MBX_NOWAIT:
3287
3288 if (mb->mbxCommand != MBX_HEARTBEAT) {
3289 if (mb->mbxCommand != MBX_DOWN_LOAD &&
3290 mb->mbxCommand != MBX_DUMP_MEMORY) {
3291 EMLXS_MSGF(EMLXS_CONTEXT,
3292 &emlxs_mbox_detail_msg,
3293 "Sending. %s: mb=%p NoWait.",
3294 emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3295 }
3296 }
3297
3298 break;
3299
3300 case MBX_SLEEP:
3301 if (mb->mbxCommand != MBX_DOWN_LOAD &&
3302 mb->mbxCommand != MBX_DUMP_MEMORY) {
3303 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3304 "Sending. %s: mb=%p Sleep.",
3305 emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3306 }
3307
3308 break;
3309
3310 case MBX_POLL:
3311 if (mb->mbxCommand != MBX_DOWN_LOAD &&
3312 mb->mbxCommand != MBX_DUMP_MEMORY) {
3313 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3314 "Sending. %s: mb=%p Polled.",
3315 emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3316 }
3317 break;
3318 }
3319
3320 mb->mbxOwner = OWN_CHIP;
3321
3322 /* Clear the attention bit */
3323 WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_MBATT);
3324
3325 if (hba->flag & FC_SLIM2_MODE) {
3326 /* First copy command data */
3327 mbox = FC_SLIM2_MAILBOX(hba);
3328 offset =
3329 (off_t)((uint64_t)((unsigned long)mbox)
3330 - (uint64_t)((unsigned long)slim2p));
3331
3332 #ifdef MBOX_EXT_SUPPORT
3333 if (mbq->extbuf) {
3334 uint32_t *mbox_ext =
3335 (uint32_t *)((uint8_t *)mbox +
3336 MBOX_EXTENSION_OFFSET);
3337 off_t offset_ext = offset + MBOX_EXTENSION_OFFSET;
3338
3339 BE_SWAP32_BCOPY((uint8_t *)mbq->extbuf,
3340 (uint8_t *)mbox_ext, mbq->extsize);
3341
3342 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3343 offset_ext, mbq->extsize,
3344 DDI_DMA_SYNC_FORDEV);
3345 }
3346 #endif /* MBOX_EXT_SUPPORT */
3347
3348 BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mbox,
3349 MAILBOX_CMD_BSIZE);
3350
3351 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
3352 MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV);
3353 } else { /* SLIM 1 */
3354
3355 mbox = FC_SLIM1_MAILBOX(hba);
3356
3357 #ifdef MBOX_EXT_SUPPORT
3358 if (mbq->extbuf) {
3359 uint32_t *mbox_ext =
3360 (uint32_t *)((uint8_t *)mbox +
3361 MBOX_EXTENSION_OFFSET);
3362 WRITE_SLIM_COPY(hba, (uint32_t *)mbq->extbuf,
3363 mbox_ext, (mbq->extsize / 4));
3364 }
3365 #endif /* MBOX_EXT_SUPPORT */
3366
3367 /* First copy command data */
3368 WRITE_SLIM_COPY(hba, &mb->un.varWords, &mbox->un.varWords,
3369 (MAILBOX_CMD_WSIZE - 1));
3370
3371 /* copy over last word, with mbxOwner set */
3372 ldata = *((volatile uint32_t *)mb);
3373 WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mbox), ldata);
3374 }
3375
3376 /* Interrupt board to do it right away */
3377 WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
3378
3379 mutex_exit(&EMLXS_PORT_LOCK);
3380
3381 #ifdef FMA_SUPPORT
3382 /* Access handle validation */
3383 if ((emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
3384 != DDI_FM_OK) ||
3385 (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle)
3386 != DDI_FM_OK)) {
3387 EMLXS_MSGF(EMLXS_CONTEXT,
3388 &emlxs_invalid_access_handle_msg, NULL);
3389 return (MBX_HARDWARE_ERROR);
3390 }
3391 #endif /* FMA_SUPPORT */
3392
3393 switch (flag) {
3394 case MBX_NOWAIT:
3395 return (MBX_SUCCESS);
3396
3397 case MBX_SLEEP:
3398
3399 /* Wait for completion */
3400 /* The driver clock is timing the mailbox. */
3401 /* emlxs_mb_fini() will be called externally. */
3402
3403 mutex_enter(&EMLXS_MBOX_LOCK);
3404 while (!(mbq->flag & MBQ_COMPLETED)) {
3405 cv_wait(&EMLXS_MBOX_CV, &EMLXS_MBOX_LOCK);
3406 }
3407 mutex_exit(&EMLXS_MBOX_LOCK);
3408
3409 if (mb->mbxStatus == MBX_TIMEOUT) {
3410 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_event_msg,
3411 "Timeout. %s: mb=%p tmo=%d. Sleep.",
3412 emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo);
3413 } else {
3414 if (mb->mbxCommand != MBX_DOWN_LOAD &&
3415 mb->mbxCommand != MBX_DUMP_MEMORY) {
3416 EMLXS_MSGF(EMLXS_CONTEXT,
3417 &emlxs_mbox_detail_msg,
3418 "Completed. %s: mb=%p status=%x Sleep.",
3419 emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3420 mb->mbxStatus);
3421 }
3422 }
3423
3424 break;
3425
3426 case MBX_POLL:
3427
3428 /* Convert tmo seconds to 500 usec tics */
3429 tmo_local = tmo * 2000;
3430
3431 /* Get first word of mailbox */
3432 if (hba->flag & FC_SLIM2_MODE) {
3433 mbox = FC_SLIM2_MAILBOX(hba);
3434 offset = (off_t)((uint64_t)((unsigned long)mbox) -
3435 (uint64_t)((unsigned long)slim2p));
3436
3437 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3438 offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
3439 word0 = *((volatile uint32_t *)mbox);
3440 word0 = BE_SWAP32(word0);
3441 } else {
3442 mbox = FC_SLIM1_MAILBOX(hba);
3443 word0 =
3444 READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox));
3445 }
3446
3447 /* Wait for command to complete */
3448 while ((swpmb->mbxOwner == OWN_CHIP) &&
3449 !(mbq->flag & MBQ_COMPLETED)) {
3450 if (!hba->timer_id && (tmo_local-- == 0)) {
3451 /* self time */
3452 EMLXS_MSGF(EMLXS_CONTEXT,
3453 &emlxs_mbox_timeout_msg,
3454 "%s: mb=%p tmo=%d Polled.",
3455 emlxs_mb_cmd_xlate(mb->mbxCommand),
3456 mb, tmo);
3457
3458 hba->flag |= FC_MBOX_TIMEOUT;
3459 EMLXS_STATE_CHANGE(hba, FC_ERROR);
3460 emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
3461
3462 break;
3463 }
3464
3465 BUSYWAIT_US(500);
3466
3467 /* Get first word of mailbox */
3468 if (hba->flag & FC_SLIM2_MODE) {
3469 EMLXS_MPDATA_SYNC(
3470 hba->sli.sli3.slim2.dma_handle, offset,
3471 sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
3472 word0 = *((volatile uint32_t *)mbox);
3473 word0 = BE_SWAP32(word0);
3474 } else {
3475 word0 =
3476 READ_SLIM_ADDR(hba,
3477 ((volatile uint32_t *)mbox));
3478 }
3479
3480 } /* while */
3481
3482 if (mb->mbxStatus == MBX_TIMEOUT) {
3483 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_event_msg,
3484 "Timeout. %s: mb=%p tmo=%d. Polled.",
3485 emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo);
3486
3487 break;
3488 }
3489
3490 /* Check for config port command */
3491 if ((swpmb->mbxCommand == MBX_CONFIG_PORT) &&
3492 (swpmb->mbxStatus == MBX_SUCCESS)) {
3493 /* Setup host mbox for cmpl */
3494 mbox = FC_SLIM2_MAILBOX(hba);
3495 offset = (off_t)((uint64_t)((unsigned long)mbox)
3496 - (uint64_t)((unsigned long)slim2p));
3497
3498 hba->flag |= FC_SLIM2_MODE;
3499 }
3500
3501 /* copy results back to user */
3502 if (hba->flag & FC_SLIM2_MODE) {
3503 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3504 offset, MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORKERNEL);
3505
3506 BE_SWAP32_BCOPY((uint8_t *)mbox, (uint8_t *)mb,
3507 MAILBOX_CMD_BSIZE);
3508 } else {
3509 READ_SLIM_COPY(hba, (uint32_t *)mb,
3510 (uint32_t *)mbox, MAILBOX_CMD_WSIZE);
3511 }
3512
3513 #ifdef MBOX_EXT_SUPPORT
3514 if (mbq->extbuf) {
3515 uint32_t *mbox_ext =
3516 (uint32_t *)((uint8_t *)mbox +
3517 MBOX_EXTENSION_OFFSET);
3518 off_t offset_ext = offset + MBOX_EXTENSION_OFFSET;
3519
3520 if (hba->flag & FC_SLIM2_MODE) {
3521 EMLXS_MPDATA_SYNC(
3522 hba->sli.sli3.slim2.dma_handle, offset_ext,
3523 mbq->extsize, DDI_DMA_SYNC_FORKERNEL);
3524
3525 BE_SWAP32_BCOPY((uint8_t *)mbox_ext,
3526 (uint8_t *)mbq->extbuf, mbq->extsize);
3527 } else {
3528 READ_SLIM_COPY(hba,
3529 (uint32_t *)mbq->extbuf, mbox_ext,
3530 (mbq->extsize / 4));
3531 }
3532 }
3533 #endif /* MBOX_EXT_SUPPORT */
3534
3535 /* Sync the memory buffer */
3536 if (mbq->bp) {
3537 mbox_bp = (MATCHMAP *)mbq->bp;
3538 EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0,
3539 mbox_bp->size, DDI_DMA_SYNC_FORKERNEL);
3540 }
3541
3542 if (mb->mbxCommand != MBX_DOWN_LOAD &&
3543 mb->mbxCommand != MBX_DUMP_MEMORY) {
3544 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3545 "Completed. %s: mb=%p status=%x Polled.",
3546 emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3547 mb->mbxStatus);
3548 }
3549
3550 /* Process the result */
3551 if (!(mbq->flag & MBQ_PASSTHRU)) {
3552 if (mbq->mbox_cmpl) {
3553 (void) (mbq->mbox_cmpl)(hba, mbq);
3554 }
3555 }
3556
3557 /* Clear the attention bit */
3558 WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_MBATT);
3559
3560 /* Clean up the mailbox area */
3561 emlxs_mb_fini(hba, NULL, mb->mbxStatus);
3562
3563 break;
3564
3565 } /* switch (flag) */
3566
3567 return (mb->mbxStatus);
3568
3569 } /* emlxs_sli3_issue_mbox_cmd() */
3570
3571
3572 #ifdef SFCT_SUPPORT
3573 /*ARGSUSED*/
3574 static uint32_t
3575 emlxs_sli3_prep_fct_iocb(emlxs_port_t *port, emlxs_buf_t *cmd_sbp,
3576 int channel)
3577 {
3578 emlxs_hba_t *hba = HBA;
3579 emlxs_config_t *cfg = &CFG;
3580 fct_cmd_t *fct_cmd;
3581 stmf_data_buf_t *dbuf;
3582 scsi_task_t *fct_task;
3583 fc_packet_t *pkt;
3584 uint32_t did;
3585 IOCBQ *iocbq;
3586 IOCB *iocb;
3587 uint32_t timeout;
3588 uint32_t iotag;
3589 emlxs_node_t *ndlp;
3590 CHANNEL *cp;
3591 ddi_dma_cookie_t *cp_cmd;
3592
3593 pkt = PRIV2PKT(cmd_sbp);
3594
3595 cp = (CHANNEL *)cmd_sbp->channel;
3596
3597 iocbq = &cmd_sbp->iocbq;
3598 iocb = &iocbq->iocb;
3599
3600
3601 /* Get the iotag by registering the packet */
3602 iotag = emlxs_register_pkt(cp, cmd_sbp);
3603
3604 if (!iotag) {
3605 /* No more command slots available, retry later */
3606 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3607 "Adapter Busy. Unable to allocate iotag. did=0x%x",
3608 cmd_sbp->did);
3609
3610 return (IOERR_NO_RESOURCES);
3611 }
3612
3613
3614 /* Point of no return */
3615
3616 if (iocb->ULPCOMMAND == CMD_ABORT_XRI_CX) {
3617
3618 ndlp = cmd_sbp->node;
3619 cp->ulpSendCmd++;
3620
3621 /* Initalize iocbq */
3622 iocbq->port = (void *)port;
3623 iocbq->node = (void *)ndlp;
3624 iocbq->channel = (void *)cp;
3625
3626 /*
3627 * Don't give the abort priority, we want the IOCB
3628 * we are aborting to be processed first.
3629 */
3630 iocbq->flag |= IOCB_SPECIAL;
3631
3632 iocb->ULPCONTEXT = pkt->pkt_cmd_fhdr.rx_id;
3633 iocb->ULPIOTAG = (uint16_t)iotag;
3634 iocb->ULPLE = 1;
3635 iocb->ULPCLASS = cmd_sbp->class;
3636 iocb->ULPOWNER = OWN_CHIP;
3637
3638 if (hba->state >= FC_LINK_UP) {
3639 /* Create the abort IOCB */
3640 iocb->un.acxri.abortType = ABORT_TYPE_ABTS;
3641 iocb->ULPCOMMAND = CMD_ABORT_XRI_CX;
3642
3643 } else {
3644 /* Create the close IOCB */
3645 iocb->ULPCOMMAND = CMD_CLOSE_XRI_CX;
3646
3647 }
3648
3649 iocb->ULPRSVDBYTE =
3650 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3651 /* Set the pkt timer */
3652 cmd_sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
3653 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
3654
3655 return (IOERR_SUCCESS);
3656
3657 } else if (iocb->ULPCOMMAND == CMD_FCP_TRSP64_CX) {
3658
3659 ndlp = cmd_sbp->node;
3660 cp->ulpSendCmd++;
3661
3662 /* Initalize iocbq */
3663 iocbq->port = (void *)port;
3664 iocbq->node = (void *)ndlp;
3665 iocbq->channel = (void *)cp;
3666
3667 #if (EMLXS_MODREV >= EMLXS_MODREV3)
3668 cp_cmd = pkt->pkt_cmd_cookie;
3669 #else
3670 cp_cmd = &pkt->pkt_cmd_cookie;
3671 #endif /* >= EMLXS_MODREV3 */
3672
3673 iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(cp_cmd->dmac_laddress);
3674 iocb->un.fcpt64.bdl.addrLow = PADDR_LO(cp_cmd->dmac_laddress);
3675 iocb->un.fcpt64.bdl.bdeSize = pkt->pkt_cmdlen;
3676 iocb->un.fcpt64.bdl.bdeFlags = 0;
3677
3678 if (hba->sli_mode < 3) {
3679 iocb->ULPBDECOUNT = 1;
3680 iocb->ULPLE = 1;
3681 } else { /* SLI3 */
3682
3683 iocb->ULPBDECOUNT = 0;
3684 iocb->ULPLE = 0;
3685 iocb->unsli3.ext_iocb.ebde_count = 0;
3686 }
3687
3688 /* Initalize iocb */
3689 iocb->ULPCONTEXT = (uint16_t)pkt->pkt_cmd_fhdr.rx_id;
3690 iocb->ULPIOTAG = (uint16_t)iotag;
3691 iocb->ULPRSVDBYTE =
3692 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3693 iocb->ULPOWNER = OWN_CHIP;
3694 iocb->ULPCLASS = cmd_sbp->class;
3695 iocb->ULPCOMMAND = CMD_FCP_TRSP64_CX;
3696
3697 /* Set the pkt timer */
3698 cmd_sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
3699 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
3700
3701 if (pkt->pkt_cmdlen) {
3702 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
3703 DDI_DMA_SYNC_FORDEV);
3704 }
3705
3706 return (IOERR_SUCCESS);
3707 }
3708
3709 dbuf = cmd_sbp->fct_buf;
3710 fct_cmd = cmd_sbp->fct_cmd;
3711 fct_task = (scsi_task_t *)fct_cmd->cmd_specific;
3712 ndlp = *(emlxs_node_t **)fct_cmd->cmd_rp->rp_fca_private;
3713 did = fct_cmd->cmd_rportid;
3714
3715 iocbq->channel = (void *)cmd_sbp->channel;
3716
3717 if (emlxs_fct_bde_setup(port, cmd_sbp)) {
3718 /* Unregister the packet */
3719 (void) emlxs_unregister_pkt(cmd_sbp->channel, iotag, 0);
3720
3721 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3722 "Adapter Busy. Unable to setup buffer list. did=%x", did);
3723
3724 return (IOERR_INTERNAL_ERROR);
3725 }
3726
3727 if (cfg[CFG_TIMEOUT_ENABLE].current) {
3728 timeout =
3729 ((2 * hba->fc_ratov) < 60) ? 60 : (2 * hba->fc_ratov);
3730 } else {
3731 timeout = 0x80000000;
3732 }
3733
3734 cmd_sbp->ticks =
3735 hba->timer_tics + timeout + ((timeout > 0xff) ? 0 : 10);
3736
3737 /* Initalize iocbq */
3738 iocbq->port = (void *)port;
3739 iocbq->node = (void *)ndlp;
3740
3741 /* Initalize iocb */
3742 iocb->ULPCONTEXT = (uint16_t)fct_cmd->cmd_rxid;
3743 iocb->ULPIOTAG = (uint16_t)iotag;
3744 iocb->ULPRSVDBYTE = ((timeout > 0xff) ? 0 : timeout);
3745 iocb->ULPOWNER = OWN_CHIP;
3746 iocb->ULPCLASS = cmd_sbp->class;
3747
3748 iocb->ULPPU = 1; /* Wd4 is relative offset */
3749 iocb->un.fcpt64.fcpt_Offset = dbuf->db_relative_offset;
3750
3751 if (fct_task->task_flags & TF_WRITE_DATA) {
3752 iocb->ULPCOMMAND = CMD_FCP_TRECEIVE64_CX;
3753 } else { /* TF_READ_DATA */
3754
3755 iocb->ULPCOMMAND = CMD_FCP_TSEND64_CX;
3756
3757 if ((hba->sli_mode == EMLXS_HBA_SLI3_MODE) &&
3758 (dbuf->db_data_size >=
3759 fct_task->task_expected_xfer_length)) {
3760 iocb->ULPCT = 0x1;
3761 /* enable auto-rsp AP feature */
3762 }
3763 }
3764
3765 return (IOERR_SUCCESS);
3766
3767 } /* emlxs_sli3_prep_fct_iocb() */
3768 #endif /* SFCT_SUPPORT */
3769
3770 /* ARGSUSED */
3771 static uint32_t
3772 emlxs_sli3_prep_fcp_iocb(emlxs_port_t *port, emlxs_buf_t *sbp, int channel)
3773 {
3774 emlxs_hba_t *hba = HBA;
3775 fc_packet_t *pkt;
3776 CHANNEL *cp;
3777 IOCBQ *iocbq;
3778 IOCB *iocb;
3779 NODELIST *ndlp;
3780 uint16_t iotag;
3781 uint32_t did;
3782
3783 pkt = PRIV2PKT(sbp);
3784 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3785 cp = &hba->chan[FC_FCP_RING];
3786
3787 iocbq = &sbp->iocbq;
3788 iocb = &iocbq->iocb;
3789
3790 /* Find target node object */
3791 ndlp = (NODELIST *)iocbq->node;
3792
3793 /* Get the iotag by registering the packet */
3794 iotag = emlxs_register_pkt(cp, sbp);
3795
3796 if (!iotag) {
3797 /*
3798 * No more command slots available, retry later
3799 */
3800 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3801 "Adapter Busy. Unable to allocate iotag: did=0x%x", did);
3802
3803 return (FC_TRAN_BUSY);
3804 }
3805
3806 /* Initalize iocbq */
3807 iocbq->port = (void *) port;
3808 iocbq->channel = (void *) cp;
3809
3810 /* Indicate this is a FCP cmd */
3811 iocbq->flag |= IOCB_FCP_CMD;
3812
3813 if (emlxs_bde_setup(port, sbp)) {
3814 /* Unregister the packet */
3815 (void) emlxs_unregister_pkt(cp, iotag, 0);
3816
3817 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3818 "Adapter Busy. Unable to setup buffer list. did=%x", did);
3819
3820 return (FC_TRAN_BUSY);
3821 }
3822 /* Point of no return */
3823
3824 /* Initalize iocb */
3825 iocb->ULPCONTEXT = ndlp->nlp_Rpi;
3826 iocb->ULPIOTAG = iotag;
3827 iocb->ULPRSVDBYTE =
3828 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3829 iocb->ULPOWNER = OWN_CHIP;
3830
3831 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3832 case FC_TRAN_CLASS1:
3833 iocb->ULPCLASS = CLASS1;
3834 break;
3835 case FC_TRAN_CLASS2:
3836 iocb->ULPCLASS = CLASS2;
3837 /* iocb->ULPCLASS = CLASS3; */
3838 break;
3839 case FC_TRAN_CLASS3:
3840 default:
3841 iocb->ULPCLASS = CLASS3;
3842 break;
3843 }
3844
3845 /* if device is FCP-2 device, set the following bit */
3846 /* that says to run the FC-TAPE protocol. */
3847 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
3848 iocb->ULPFCP2RCVY = 1;
3849 }
3850
3851 if (pkt->pkt_datalen == 0) {
3852 iocb->ULPCOMMAND = CMD_FCP_ICMND64_CR;
3853 } else if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
3854 iocb->ULPCOMMAND = CMD_FCP_IREAD64_CR;
3855 iocb->ULPPU = PARM_XFER_CHECK;
3856 iocb->un.fcpi64.fcpi_parm = pkt->pkt_datalen;
3857 } else {
3858 iocb->ULPCOMMAND = CMD_FCP_IWRITE64_CR;
3859 }
3860
3861 return (FC_SUCCESS);
3862
3863 } /* emlxs_sli3_prep_fcp_iocb() */
3864
3865
3866 static uint32_t
3867 emlxs_sli3_prep_ip_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3868 {
3869 emlxs_hba_t *hba = HBA;
3870 fc_packet_t *pkt;
3871 IOCBQ *iocbq;
3872 IOCB *iocb;
3873 CHANNEL *cp;
3874 NODELIST *ndlp;
3875 uint16_t iotag;
3876 uint32_t did;
3877
3878 pkt = PRIV2PKT(sbp);
3879 cp = &hba->chan[FC_IP_RING];
3880 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3881
3882 iocbq = &sbp->iocbq;
3883 iocb = &iocbq->iocb;
3884 ndlp = (NODELIST *)iocbq->node;
3885
3886 /* Get the iotag by registering the packet */
3887 iotag = emlxs_register_pkt(cp, sbp);
3888
3889 if (!iotag) {
3890 /*
3891 * No more command slots available, retry later
3892 */
3893 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3894 "Adapter Busy. Unable to allocate iotag: did=0x%x", did);
3895
3896 return (FC_TRAN_BUSY);
3897 }
3898
3899 /* Initalize iocbq */
3900 iocbq->port = (void *) port;
3901 iocbq->channel = (void *) cp;
3902
3903 if (emlxs_bde_setup(port, sbp)) {
3904 /* Unregister the packet */
3905 (void) emlxs_unregister_pkt(cp, iotag, 0);
3906
3907 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3908 "Adapter Busy. Unable to setup buffer list. did=%x", did);
3909
3910 return (FC_TRAN_BUSY);
3911 }
3912 /* Point of no return */
3913
3914 /* Initalize iocb */
3915 iocb->un.xseq64.w5.hcsw.Fctl = 0;
3916
3917 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_FIRST_SEQ) {
3918 iocb->un.xseq64.w5.hcsw.Fctl |= FSEQ;
3919 }
3920 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
3921 iocb->un.xseq64.w5.hcsw.Fctl |= SI;
3922 }
3923
3924 /* network headers */
3925 iocb->un.xseq64.w5.hcsw.Dfctl = pkt->pkt_cmd_fhdr.df_ctl;
3926 iocb->un.xseq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
3927 iocb->un.xseq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
3928
3929 iocb->ULPIOTAG = iotag;
3930 iocb->ULPRSVDBYTE =
3931 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3932 iocb->ULPOWNER = OWN_CHIP;
3933
3934 if (pkt->pkt_tran_type == FC_PKT_BROADCAST) {
3935 HBASTATS.IpBcastIssued++;
3936
3937 iocb->ULPCOMMAND = CMD_XMIT_BCAST64_CN;
3938 iocb->ULPCONTEXT = 0;
3939
3940 if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) {
3941 if (hba->topology != TOPOLOGY_LOOP) {
3942 iocb->ULPCT = 0x1;
3943 }
3944 iocb->ULPCONTEXT = port->vpi;
3945 }
3946 } else {
3947 HBASTATS.IpSeqIssued++;
3948
3949 iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
3950 iocb->ULPCONTEXT = ndlp->nlp_Xri;
3951 }
3952
3953 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3954 case FC_TRAN_CLASS1:
3955 iocb->ULPCLASS = CLASS1;
3956 break;
3957 case FC_TRAN_CLASS2:
3958 iocb->ULPCLASS = CLASS2;
3959 break;
3960 case FC_TRAN_CLASS3:
3961 default:
3962 iocb->ULPCLASS = CLASS3;
3963 break;
3964 }
3965
3966 return (FC_SUCCESS);
3967
3968 } /* emlxs_sli3_prep_ip_iocb() */
3969
3970
3971 static uint32_t
3972 emlxs_sli3_prep_els_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3973 {
3974 emlxs_hba_t *hba = HBA;
3975 fc_packet_t *pkt;
3976 IOCBQ *iocbq;
3977 IOCB *iocb;
3978 CHANNEL *cp;
3979 uint16_t iotag;
3980 uint32_t did;
3981 uint32_t cmd;
3982
3983 pkt = PRIV2PKT(sbp);
3984 cp = &hba->chan[FC_ELS_RING];
3985 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3986
3987 iocbq = &sbp->iocbq;
3988 iocb = &iocbq->iocb;
3989
3990
3991 /* Get the iotag by registering the packet */
3992 iotag = emlxs_register_pkt(cp, sbp);
3993
3994 if (!iotag) {
3995 /*
3996 * No more command slots available, retry later
3997 */
3998 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3999 "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
4000
4001 return (FC_TRAN_BUSY);
4002 }
4003 /* Initalize iocbq */
4004 iocbq->port = (void *) port;
4005 iocbq->channel = (void *) cp;
4006
4007 if (emlxs_bde_setup(port, sbp)) {
4008 /* Unregister the packet */
4009 (void) emlxs_unregister_pkt(cp, iotag, 0);
4010
4011 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4012 "Adapter Busy. Unable to setup buffer list. did=%x", did);
4013
4014 return (FC_TRAN_BUSY);
4015 }
4016 /* Point of no return */
4017
4018 /* Initalize iocb */
4019 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
4020 /* ELS Response */
4021 iocb->ULPCONTEXT = (volatile uint16_t) pkt->pkt_cmd_fhdr.rx_id;
4022 iocb->ULPCOMMAND = CMD_XMIT_ELS_RSP64_CX;
4023 } else {
4024 /* ELS Request */
4025 iocb->un.elsreq64.remoteID = (did == BCAST_DID) ? 0 : did;
4026 iocb->ULPCONTEXT =
4027 (did == BCAST_DID) ? pkt->pkt_cmd_fhdr.seq_id : 0;
4028 iocb->ULPCOMMAND = CMD_ELS_REQUEST64_CR;
4029
4030 if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) {
4031 if (hba->topology != TOPOLOGY_LOOP) {
4032 cmd = *((uint32_t *)pkt->pkt_cmd);
4033 cmd &= ELS_CMD_MASK;
4034
4035 if ((cmd == ELS_CMD_FLOGI) ||
4036 (cmd == ELS_CMD_FDISC)) {
4037 iocb->ULPCT = 0x2;
4038 } else {
4039 iocb->ULPCT = 0x1;
4040 }
4041 }
4042 iocb->ULPCONTEXT = port->vpi;
4043 }
4044 }
4045 iocb->ULPIOTAG = iotag;
4046 iocb->ULPRSVDBYTE =
4047 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
4048 iocb->ULPOWNER = OWN_CHIP;
4049
4050 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
4051 case FC_TRAN_CLASS1:
4052 iocb->ULPCLASS = CLASS1;
4053 break;
4054 case FC_TRAN_CLASS2:
4055 iocb->ULPCLASS = CLASS2;
4056 break;
4057 case FC_TRAN_CLASS3:
4058 default:
4059 iocb->ULPCLASS = CLASS3;
4060 break;
4061 }
4062 sbp->class = iocb->ULPCLASS;
4063
4064 return (FC_SUCCESS);
4065
4066 } /* emlxs_sli3_prep_els_iocb() */
4067
4068
4069 static uint32_t
4070 emlxs_sli3_prep_ct_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
4071 {
4072 emlxs_hba_t *hba = HBA;
4073 fc_packet_t *pkt;
4074 IOCBQ *iocbq;
4075 IOCB *iocb;
4076 CHANNEL *cp;
4077 NODELIST *ndlp;
4078 uint16_t iotag;
4079 uint32_t did;
4080
4081 pkt = PRIV2PKT(sbp);
4082 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
4083 cp = &hba->chan[FC_CT_RING];
4084
4085 iocbq = &sbp->iocbq;
4086 iocb = &iocbq->iocb;
4087 ndlp = (NODELIST *)iocbq->node;
4088
4089 /* Get the iotag by registering the packet */
4090 iotag = emlxs_register_pkt(cp, sbp);
4091
4092 if (!iotag) {
4093 /*
4094 * No more command slots available, retry later
4095 */
4096 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4097 "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
4098
4099 return (FC_TRAN_BUSY);
4100 }
4101
4102 if (emlxs_bde_setup(port, sbp)) {
4103 /* Unregister the packet */
4104 (void) emlxs_unregister_pkt(cp, iotag, 0);
4105
4106 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4107 "Adapter Busy. Unable to setup buffer list. did=%x", did);
4108
4109 return (FC_TRAN_BUSY);
4110 }
4111
4112 /* Point of no return */
4113
4114 /* Initalize iocbq */
4115 iocbq->port = (void *) port;
4116 iocbq->channel = (void *) cp;
4117
4118 /* Fill in rest of iocb */
4119 iocb->un.genreq64.w5.hcsw.Fctl = LA;
4120
4121 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
4122 iocb->un.genreq64.w5.hcsw.Fctl |= LSEQ;
4123 }
4124 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
4125 iocb->un.genreq64.w5.hcsw.Fctl |= SI;
4126 }
4127
4128 /* Initalize iocb */
4129 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
4130 /* CT Response */
4131 iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
4132 iocb->un.genreq64.w5.hcsw.Dfctl = pkt->pkt_cmd_fhdr.df_ctl;
4133 iocb->ULPCONTEXT = pkt->pkt_cmd_fhdr.rx_id;
4134 } else {
4135 /* CT Request */
4136 iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CR;
4137 iocb->un.genreq64.w5.hcsw.Dfctl = 0;
4138 iocb->ULPCONTEXT = ndlp->nlp_Rpi;
4139 }
4140
4141 iocb->un.genreq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
4142 iocb->un.genreq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
4143
4144 iocb->ULPIOTAG = iotag;
4145 iocb->ULPRSVDBYTE =
4146 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
4147 iocb->ULPOWNER = OWN_CHIP;
4148
4149 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
4150 case FC_TRAN_CLASS1:
4151 iocb->ULPCLASS = CLASS1;
4152 break;
4153 case FC_TRAN_CLASS2:
4154 iocb->ULPCLASS = CLASS2;
4155 break;
4156 case FC_TRAN_CLASS3:
4157 default:
4158 iocb->ULPCLASS = CLASS3;
4159 break;
4160 }
4161
4162 return (FC_SUCCESS);
4163
4164 } /* emlxs_sli3_prep_ct_iocb() */
4165
4166
4167 #ifdef SFCT_SUPPORT
4168 static uint32_t
4169 emlxs_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
4170 {
4171 emlxs_hba_t *hba = HBA;
4172 uint32_t rval;
4173
4174 if (sbp->fct_buf->db_sglist_length != 1) {
4175 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
4176 "fct_bde_setup: Only 1 sglist entry supported: %d",
4177 sbp->fct_buf->db_sglist_length);
4178 return (1);
4179 }
4180
4181 if (hba->sli_mode < EMLXS_HBA_SLI3_MODE) {
4182 rval = emlxs_sli2_fct_bde_setup(port, sbp);
4183 } else {
4184 rval = emlxs_sli3_fct_bde_setup(port, sbp);
4185 }
4186
4187 return (rval);
4188
4189 } /* emlxs_fct_bde_setup() */
4190 #endif /* SFCT_SUPPORT */
4191
4192
4193 static uint32_t
4194 emlxs_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
4195 {
4196 uint32_t rval;
4197 emlxs_hba_t *hba = HBA;
4198
4199 if (hba->sli_mode < EMLXS_HBA_SLI3_MODE) {
4200 rval = emlxs_sli2_bde_setup(port, sbp);
4201 } else {
4202 rval = emlxs_sli3_bde_setup(port, sbp);
4203 }
4204
4205 return (rval);
4206
4207 } /* emlxs_bde_setup() */
4208
4209
4210 static void
4211 emlxs_sli3_poll_intr(emlxs_hba_t *hba)
4212 {
4213 uint32_t ha_copy;
4214
4215 /* Check attention bits once and process if required */
4216
4217 ha_copy = emlxs_check_attention(hba);
4218
4219 if (ha_copy == 0) {
4220 return;
4221 }
4222
4223 mutex_enter(&EMLXS_PORT_LOCK);
4224 ha_copy = emlxs_get_attention(hba, -1);
4225 mutex_exit(&EMLXS_PORT_LOCK);
4226
4227 emlxs_proc_attention(hba, ha_copy);
4228
4229 return;
4230
4231 } /* emlxs_sli3_poll_intr() */
4232
4233
4234 #ifdef MSI_SUPPORT
4235 static uint32_t
4236 emlxs_sli3_msi_intr(char *arg1, char *arg2)
4237 {
4238 emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
4239 #ifdef FMA_SUPPORT
4240 emlxs_port_t *port = &PPORT;
4241 #endif /* FMA_SUPPORT */
4242 uint16_t msgid;
4243 uint32_t hc_copy;
4244 uint32_t ha_copy;
4245 uint32_t restore = 0;
4246
4247 /*
4248 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
4249 * "sli3_msi_intr: arg1=%p arg2=%p", arg1, arg2);
4250 */
4251
4252 /* Check for legacy interrupt handling */
4253 if (hba->intr_type == DDI_INTR_TYPE_FIXED) {
4254 mutex_enter(&EMLXS_PORT_LOCK);
4255
4256 if (hba->flag & FC_OFFLINE_MODE) {
4257 mutex_exit(&EMLXS_PORT_LOCK);
4258
4259 if (hba->bus_type == SBUS_FC) {
4260 return (DDI_INTR_CLAIMED);
4261 } else {
4262 return (DDI_INTR_UNCLAIMED);
4263 }
4264 }
4265
4266 /* Get host attention bits */
4267 ha_copy = emlxs_get_attention(hba, -1);
4268
4269 if (ha_copy == 0) {
4270 if (hba->intr_unclaimed) {
4271 mutex_exit(&EMLXS_PORT_LOCK);
4272 return (DDI_INTR_UNCLAIMED);
4273 }
4274
4275 hba->intr_unclaimed = 1;
4276 } else {
4277 hba->intr_unclaimed = 0;
4278 }
4279
4280 mutex_exit(&EMLXS_PORT_LOCK);
4281
4282 /* Process the interrupt */
4283 emlxs_proc_attention(hba, ha_copy);
4284
4285 return (DDI_INTR_CLAIMED);
4286 }
4287
4288 /* DDI_INTR_TYPE_MSI */
4289 /* DDI_INTR_TYPE_MSIX */
4290
4291 /* Get MSI message id */
4292 msgid = (uint16_t)((unsigned long)arg2);
4293
4294 /* Validate the message id */
4295 if (msgid >= hba->intr_count) {
4296 msgid = 0;
4297 }
4298
4299 mutex_enter(&EMLXS_INTR_LOCK(msgid));
4300
4301 mutex_enter(&EMLXS_PORT_LOCK);
4302
4303 /* Check if adapter is offline */
4304 if (hba->flag & FC_OFFLINE_MODE) {
4305 mutex_exit(&EMLXS_PORT_LOCK);
4306 mutex_exit(&EMLXS_INTR_LOCK(msgid));
4307
4308 /* Always claim an MSI interrupt */
4309 return (DDI_INTR_CLAIMED);
4310 }
4311
4312 /* Disable interrupts associated with this msgid */
4313 if (msgid == 0 && (hba->model_info.chip == EMLXS_ZEPHYR_CHIP)) {
4314 hc_copy = hba->sli.sli3.hc_copy & ~hba->intr_mask;
4315 WRITE_CSR_REG(hba, FC_HC_REG(hba), hc_copy);
4316 restore = 1;
4317 }
4318
4319 /* Get host attention bits */
4320 ha_copy = emlxs_get_attention(hba, msgid);
4321
4322 mutex_exit(&EMLXS_PORT_LOCK);
4323
4324 /* Process the interrupt */
4325 emlxs_proc_attention(hba, ha_copy);
4326
4327 /* Restore interrupts */
4328 if (restore) {
4329 mutex_enter(&EMLXS_PORT_LOCK);
4330 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
4331 #ifdef FMA_SUPPORT
4332 /* Access handle validation */
4333 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4334 #endif /* FMA_SUPPORT */
4335 mutex_exit(&EMLXS_PORT_LOCK);
4336 }
4337
4338 mutex_exit(&EMLXS_INTR_LOCK(msgid));
4339
4340 return (DDI_INTR_CLAIMED);
4341
4342 } /* emlxs_sli3_msi_intr() */
4343 #endif /* MSI_SUPPORT */
4344
4345
4346 static int
4347 emlxs_sli3_intx_intr(char *arg)
4348 {
4349 emlxs_hba_t *hba = (emlxs_hba_t *)arg;
4350 uint32_t ha_copy = 0;
4351
4352 mutex_enter(&EMLXS_PORT_LOCK);
4353
4354 if (hba->flag & FC_OFFLINE_MODE) {
4355 mutex_exit(&EMLXS_PORT_LOCK);
4356
4357 if (hba->bus_type == SBUS_FC) {
4358 return (DDI_INTR_CLAIMED);
4359 } else {
4360 return (DDI_INTR_UNCLAIMED);
4361 }
4362 }
4363
4364 /* Get host attention bits */
4365 ha_copy = emlxs_get_attention(hba, -1);
4366
4367 if (ha_copy == 0) {
4368 if (hba->intr_unclaimed) {
4369 mutex_exit(&EMLXS_PORT_LOCK);
4370 return (DDI_INTR_UNCLAIMED);
4371 }
4372
4373 hba->intr_unclaimed = 1;
4374 } else {
4375 hba->intr_unclaimed = 0;
4376 }
4377
4378 mutex_exit(&EMLXS_PORT_LOCK);
4379
4380 /* Process the interrupt */
4381 emlxs_proc_attention(hba, ha_copy);
4382
4383 return (DDI_INTR_CLAIMED);
4384
4385 } /* emlxs_sli3_intx_intr() */
4386
4387
4388 /* EMLXS_PORT_LOCK must be held when call this routine */
4389 static uint32_t
4390 emlxs_get_attention(emlxs_hba_t *hba, int32_t msgid)
4391 {
4392 #ifdef FMA_SUPPORT
4393 emlxs_port_t *port = &PPORT;
4394 #endif /* FMA_SUPPORT */
4395 uint32_t ha_copy = 0;
4396 uint32_t ha_copy2;
4397 uint32_t mask = hba->sli.sli3.hc_copy;
4398
4399 #ifdef MSI_SUPPORT
4400
4401 read_ha_register:
4402
4403 /* Check for default MSI interrupt */
4404 if (msgid == 0) {
4405 /* Read host attention register to determine interrupt source */
4406 ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba));
4407
4408 /* Filter out MSI non-default attention bits */
4409 ha_copy2 &= ~(hba->intr_cond);
4410 }
4411
4412 /* Check for polled or fixed type interrupt */
4413 else if (msgid == -1) {
4414 /* Read host attention register to determine interrupt source */
4415 ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba));
4416 }
4417
4418 /* Otherwise, assume a mapped MSI interrupt */
4419 else {
4420 /* Convert MSI msgid to mapped attention bits */
4421 ha_copy2 = hba->intr_map[msgid];
4422 }
4423
4424 #else /* !MSI_SUPPORT */
4425
4426 /* Read host attention register to determine interrupt source */
4427 ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba));
4428
4429 #endif /* MSI_SUPPORT */
4430
4431 /* Check if Hardware error interrupt is enabled */
4432 if ((ha_copy2 & HA_ERATT) && !(mask & HC_ERINT_ENA)) {
4433 ha_copy2 &= ~HA_ERATT;
4434 }
4435
4436 /* Check if link interrupt is enabled */
4437 if ((ha_copy2 & HA_LATT) && !(mask & HC_LAINT_ENA)) {
4438 ha_copy2 &= ~HA_LATT;
4439 }
4440
4441 /* Check if Mailbox interrupt is enabled */
4442 if ((ha_copy2 & HA_MBATT) && !(mask & HC_MBINT_ENA)) {
4443 ha_copy2 &= ~HA_MBATT;
4444 }
4445
4446 /* Check if ring0 interrupt is enabled */
4447 if ((ha_copy2 & HA_R0ATT) && !(mask & HC_R0INT_ENA)) {
4448 ha_copy2 &= ~HA_R0ATT;
4449 }
4450
4451 /* Check if ring1 interrupt is enabled */
4452 if ((ha_copy2 & HA_R1ATT) && !(mask & HC_R1INT_ENA)) {
4453 ha_copy2 &= ~HA_R1ATT;
4454 }
4455
4456 /* Check if ring2 interrupt is enabled */
4457 if ((ha_copy2 & HA_R2ATT) && !(mask & HC_R2INT_ENA)) {
4458 ha_copy2 &= ~HA_R2ATT;
4459 }
4460
4461 /* Check if ring3 interrupt is enabled */
4462 if ((ha_copy2 & HA_R3ATT) && !(mask & HC_R3INT_ENA)) {
4463 ha_copy2 &= ~HA_R3ATT;
4464 }
4465
4466 /* Accumulate attention bits */
4467 ha_copy |= ha_copy2;
4468
4469 /* Clear attentions except for error, link, and autoclear(MSIX) */
4470 ha_copy2 &= ~(HA_ERATT | HA_LATT); /* | hba->intr_autoClear */
4471
4472 if (ha_copy2) {
4473 WRITE_CSR_REG(hba, FC_HA_REG(hba), ha_copy2);
4474 }
4475
4476 #ifdef FMA_SUPPORT
4477 /* Access handle validation */
4478 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4479 #endif /* FMA_SUPPORT */
4480
4481 return (ha_copy);
4482
4483 } /* emlxs_get_attention() */
4484
4485
4486 static void
4487 emlxs_proc_attention(emlxs_hba_t *hba, uint32_t ha_copy)
4488 {
4489 #ifdef FMA_SUPPORT
4490 emlxs_port_t *port = &PPORT;
4491 #endif /* FMA_SUPPORT */
4492
4493 /* ha_copy should be pre-filtered */
4494
4495 /*
4496 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4497 * "proc_attention: ha_copy=%x", ha_copy);
4498 */
4499
4500 if (hba->state < FC_WARM_START) {
4501 return;
4502 }
4503
4504 if (!ha_copy) {
4505 return;
4506 }
4507
4508 if (hba->bus_type == SBUS_FC) {
4509 (void) READ_SBUS_CSR_REG(hba, FC_SHS_REG(hba));
4510 }
4511
4512 /* Adapter error */
4513 if (ha_copy & HA_ERATT) {
4514 HBASTATS.IntrEvent[6]++;
4515 emlxs_handle_ff_error(hba);
4516 return;
4517 }
4518
4519 /* Mailbox interrupt */
4520 if (ha_copy & HA_MBATT) {
4521 HBASTATS.IntrEvent[5]++;
4522 (void) emlxs_handle_mb_event(hba);
4523 }
4524
4525 /* Link Attention interrupt */
4526 if (ha_copy & HA_LATT) {
4527 HBASTATS.IntrEvent[4]++;
4528 emlxs_sli3_handle_link_event(hba);
4529 }
4530
4531 /* event on ring 0 - FCP Ring */
4532 if (ha_copy & HA_R0ATT) {
4533 HBASTATS.IntrEvent[0]++;
4534 emlxs_sli3_handle_ring_event(hba, 0, ha_copy);
4535 }
4536
4537 /* event on ring 1 - IP Ring */
4538 if (ha_copy & HA_R1ATT) {
4539 HBASTATS.IntrEvent[1]++;
4540 emlxs_sli3_handle_ring_event(hba, 1, ha_copy);
4541 }
4542
4543 /* event on ring 2 - ELS Ring */
4544 if (ha_copy & HA_R2ATT) {
4545 HBASTATS.IntrEvent[2]++;
4546 emlxs_sli3_handle_ring_event(hba, 2, ha_copy);
4547 }
4548
4549 /* event on ring 3 - CT Ring */
4550 if (ha_copy & HA_R3ATT) {
4551 HBASTATS.IntrEvent[3]++;
4552 emlxs_sli3_handle_ring_event(hba, 3, ha_copy);
4553 }
4554
4555 if (hba->bus_type == SBUS_FC) {
4556 WRITE_SBUS_CSR_REG(hba, FC_SHS_REG(hba), SBUS_STAT_IP);
4557 }
4558
4559 /* Set heartbeat flag to show activity */
4560 hba->heartbeat_flag = 1;
4561
4562 #ifdef FMA_SUPPORT
4563 if (hba->bus_type == SBUS_FC) {
4564 /* Access handle validation */
4565 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.sbus_csr_handle);
4566 }
4567 #endif /* FMA_SUPPORT */
4568
4569 return;
4570
4571 } /* emlxs_proc_attention() */
4572
4573
4574 /*
4575 * emlxs_handle_ff_error()
4576 *
4577 * Description: Processes a FireFly error
4578 * Runs at Interrupt level
4579 */
4580 static void
4581 emlxs_handle_ff_error(emlxs_hba_t *hba)
4582 {
4583 emlxs_port_t *port = &PPORT;
4584 uint32_t status;
4585 uint32_t status1;
4586 uint32_t status2;
4587 int i = 0;
4588
4589 /* do what needs to be done, get error from STATUS REGISTER */
4590 status = READ_CSR_REG(hba, FC_HS_REG(hba));
4591
4592 /* Clear Chip error bit */
4593 WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_ERATT);
4594
4595 /* If HS_FFER1 is set, then wait until the HS_FFER1 bit clears */
4596 if (status & HS_FFER1) {
4597
4598 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4599 "HS_FFER1 received");
4600 EMLXS_STATE_CHANGE(hba, FC_ERROR);
4601 (void) emlxs_offline(hba, 1);
4602 while ((status & HS_FFER1) && (i < 300)) {
4603 status =
4604 READ_CSR_REG(hba, FC_HS_REG(hba));
4605 BUSYWAIT_MS(1000);
4606 i++;
4607 }
4608 }
4609
4610 if (i == 300) {
4611 /* 5 minutes is up, shutdown HBA */
4612 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4613 "HS_FFER1 clear timeout");
4614
4615 EMLXS_STATE_CHANGE(hba, FC_ERROR);
4616 emlxs_thread_spawn(hba, emlxs_shutdown_thread, NULL, NULL);
4617
4618 goto done;
4619 }
4620
4621 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4622 "HS_FFER1 cleared");
4623
4624 if (status & HS_OVERTEMP) {
4625 status1 =
4626 READ_SLIM_ADDR(hba,
4627 ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xb0));
4628
4629 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4630 "Maximum adapter temperature exceeded (%d °C).", status1);
4631
4632 hba->temperature = status1;
4633 hba->flag |= FC_OVERTEMP_EVENT;
4634
4635 EMLXS_STATE_CHANGE(hba, FC_ERROR);
4636 emlxs_thread_spawn(hba, emlxs_shutdown_thread,
4637 NULL, NULL);
4638
4639 } else {
4640 status1 =
4641 READ_SLIM_ADDR(hba,
4642 ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xa8));
4643 status2 =
4644 READ_SLIM_ADDR(hba,
4645 ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xac));
4646
4647 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4648 "Host Error Attention: "
4649 "status=0x%x status1=0x%x status2=0x%x",
4650 status, status1, status2);
4651
4652 EMLXS_STATE_CHANGE(hba, FC_ERROR);
4653
4654 if (status & HS_FFER6) {
4655 emlxs_thread_spawn(hba, emlxs_restart_thread,
4656 NULL, NULL);
4657 } else {
4658 emlxs_thread_spawn(hba, emlxs_shutdown_thread,
4659 NULL, NULL);
4660 }
4661 }
4662
4663 done:
4664 #ifdef FMA_SUPPORT
4665 /* Access handle validation */
4666 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
4667 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4668 #endif /* FMA_SUPPORT */
4669
4670 return;
4671
4672 } /* emlxs_handle_ff_error() */
4673
4674
4675 /*
4676 * emlxs_sli3_handle_link_event()
4677 *
4678 * Description: Process a Link Attention.
4679 */
4680 static void
4681 emlxs_sli3_handle_link_event(emlxs_hba_t *hba)
4682 {
4683 emlxs_port_t *port = &PPORT;
4684 MAILBOXQ *mbq;
4685 int rc;
4686
4687 HBASTATS.LinkEvent++;
4688
4689 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_event_msg, "event=%x",
4690 HBASTATS.LinkEvent);
4691
4692 /* Make sure link is declared down */
4693 emlxs_linkdown(hba);
4694
4695 /* Get a buffer which will be used for mailbox commands */
4696 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) {
4697 /* Get link attention message */
4698 if (emlxs_mb_read_la(hba, mbq) == 0) {
4699 rc = emlxs_sli3_issue_mbox_cmd(hba, mbq,
4700 MBX_NOWAIT, 0);
4701 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
4702 emlxs_mem_put(hba, MEM_MBOX,
4703 (void *)mbq);
4704 }
4705
4706 mutex_enter(&EMLXS_PORT_LOCK);
4707
4708 /*
4709 * Clear Link Attention in HA REG
4710 */
4711 WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_LATT);
4712
4713 #ifdef FMA_SUPPORT
4714 /* Access handle validation */
4715 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4716 #endif /* FMA_SUPPORT */
4717
4718 mutex_exit(&EMLXS_PORT_LOCK);
4719 } else {
4720 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
4721 }
4722 }
4723
4724 } /* emlxs_sli3_handle_link_event() */
4725
4726
4727 /*
4728 * emlxs_sli3_handle_ring_event()
4729 *
4730 * Description: Process a Ring Attention.
4731 */
4732 static void
4733 emlxs_sli3_handle_ring_event(emlxs_hba_t *hba, int32_t ring_no,
4734 uint32_t ha_copy)
4735 {
4736 emlxs_port_t *port = &PPORT;
4737 SLIM2 *slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt;
4738 CHANNEL *cp;
4739 RING *rp;
4740 IOCB *entry;
4741 IOCBQ *iocbq;
4742 IOCBQ local_iocbq;
4743 PGP *pgp;
4744 uint32_t count;
4745 volatile uint32_t chipatt;
4746 void *ioa2;
4747 uint32_t reg;
4748 uint32_t channel_no;
4749 off_t offset;
4750 IOCBQ *rsp_head = NULL;
4751 IOCBQ *rsp_tail = NULL;
4752 emlxs_buf_t *sbp = NULL;
4753
4754 count = 0;
4755 rp = &hba->sli.sli3.ring[ring_no];
4756 cp = rp->channelp;
4757 channel_no = cp->channelno;
4758
4759 /*
4760 * Isolate this ring's host attention bits
4761 * This makes all ring attention bits equal
4762 * to Ring0 attention bits
4763 */
4764 reg = (ha_copy >> (ring_no * 4)) & 0x0f;
4765
4766 /*
4767 * Gather iocb entries off response ring.
4768 * Ensure entry is owned by the host.
4769 */
4770 pgp = (PGP *)&slim2p->mbx.us.s2.port[ring_no];
4771 offset =
4772 (off_t)((uint64_t)((unsigned long)&(pgp->rspPutInx)) -
4773 (uint64_t)((unsigned long)slim2p));
4774 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
4775 DDI_DMA_SYNC_FORKERNEL);
4776 rp->fc_port_rspidx = BE_SWAP32(pgp->rspPutInx);
4777
4778 /* While ring is not empty */
4779 while (rp->fc_rspidx != rp->fc_port_rspidx) {
4780 HBASTATS.IocbReceived[channel_no]++;
4781
4782 /* Get the next response ring iocb */
4783 entry =
4784 (IOCB *)(((char *)rp->fc_rspringaddr +
4785 (rp->fc_rspidx * hba->sli.sli3.iocb_rsp_size)));
4786
4787 /* DMA sync the response ring iocb for the adapter */
4788 offset = (off_t)((uint64_t)((unsigned long)entry)
4789 - (uint64_t)((unsigned long)slim2p));
4790 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
4791 hba->sli.sli3.iocb_rsp_size, DDI_DMA_SYNC_FORKERNEL);
4792
4793 count++;
4794
4795 /* Copy word6 and word7 to local iocb for now */
4796 iocbq = &local_iocbq;
4797
4798 BE_SWAP32_BCOPY((uint8_t *)entry + (sizeof (uint32_t) * 6),
4799 (uint8_t *)iocbq + (sizeof (uint32_t) * 6),
4800 (sizeof (uint32_t) * 2));
4801
4802 /* when LE is not set, entire Command has not been received */
4803 if (!iocbq->iocb.ULPLE) {
4804 /* This should never happen */
4805 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_error_msg,
4806 "ulpLE is not set. "
4807 "ring=%d iotag=%d cmd=%x status=%x",
4808 channel_no, iocbq->iocb.ULPIOTAG,
4809 iocbq->iocb.ULPCOMMAND, iocbq->iocb.ULPSTATUS);
4810
4811 goto next;
4812 }
4813
4814 sbp = NULL;
4815 switch (iocbq->iocb.ULPCOMMAND) {
4816 #ifdef SFCT_SUPPORT
4817 case CMD_CLOSE_XRI_CX:
4818 case CMD_CLOSE_XRI_CN:
4819 case CMD_ABORT_XRI_CX:
4820 if (port->mode == MODE_TARGET) {
4821 sbp = emlxs_unregister_pkt(cp,
4822 iocbq->iocb.ULPIOTAG, 0);
4823 }
4824 break;
4825 #endif /* SFCT_SUPPORT */
4826
4827 /* Ring 0 registered commands */
4828 case CMD_FCP_ICMND_CR:
4829 case CMD_FCP_ICMND_CX:
4830 case CMD_FCP_IREAD_CR:
4831 case CMD_FCP_IREAD_CX:
4832 case CMD_FCP_IWRITE_CR:
4833 case CMD_FCP_IWRITE_CX:
4834 case CMD_FCP_ICMND64_CR:
4835 case CMD_FCP_ICMND64_CX:
4836 case CMD_FCP_IREAD64_CR:
4837 case CMD_FCP_IREAD64_CX:
4838 case CMD_FCP_IWRITE64_CR:
4839 case CMD_FCP_IWRITE64_CX:
4840 #ifdef SFCT_SUPPORT
4841 case CMD_FCP_TSEND_CX:
4842 case CMD_FCP_TSEND64_CX:
4843 case CMD_FCP_TRECEIVE_CX:
4844 case CMD_FCP_TRECEIVE64_CX:
4845 case CMD_FCP_TRSP_CX:
4846 case CMD_FCP_TRSP64_CX:
4847 #endif /* SFCT_SUPPORT */
4848
4849 /* Ring 1 registered commands */
4850 case CMD_XMIT_BCAST_CN:
4851 case CMD_XMIT_BCAST_CX:
4852 case CMD_XMIT_SEQUENCE_CX:
4853 case CMD_XMIT_SEQUENCE_CR:
4854 case CMD_XMIT_BCAST64_CN:
4855 case CMD_XMIT_BCAST64_CX:
4856 case CMD_XMIT_SEQUENCE64_CX:
4857 case CMD_XMIT_SEQUENCE64_CR:
4858 case CMD_CREATE_XRI_CR:
4859 case CMD_CREATE_XRI_CX:
4860
4861 /* Ring 2 registered commands */
4862 case CMD_ELS_REQUEST_CR:
4863 case CMD_ELS_REQUEST_CX:
4864 case CMD_XMIT_ELS_RSP_CX:
4865 case CMD_ELS_REQUEST64_CR:
4866 case CMD_ELS_REQUEST64_CX:
4867 case CMD_XMIT_ELS_RSP64_CX:
4868
4869 /* Ring 3 registered commands */
4870 case CMD_GEN_REQUEST64_CR:
4871 case CMD_GEN_REQUEST64_CX:
4872
4873 sbp =
4874 emlxs_unregister_pkt(cp, iocbq->iocb.ULPIOTAG, 0);
4875 break;
4876 }
4877
4878 /* If packet is stale, then drop it. */
4879 if (sbp == STALE_PACKET) {
4880 cp->hbaCmplCmd_sbp++;
4881 /* Copy entry to the local iocbq */
4882 BE_SWAP32_BCOPY((uint8_t *)entry,
4883 (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size);
4884
4885 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_stale_msg,
4886 "channelno=%d iocb=%p cmd=%x status=%x "
4887 "error=%x iotag=%d context=%x info=%x",
4888 channel_no, iocbq, (uint8_t)iocbq->iocb.ULPCOMMAND,
4889 iocbq->iocb.ULPSTATUS,
4890 (uint8_t)iocbq->iocb.un.grsp.perr.statLocalError,
4891 (uint16_t)iocbq->iocb.ULPIOTAG,
4892 (uint16_t)iocbq->iocb.ULPCONTEXT,
4893 (uint8_t)iocbq->iocb.ULPRSVDBYTE);
4894
4895 goto next;
4896 }
4897
4898 /*
4899 * If a packet was found, then queue the packet's
4900 * iocb for deferred processing
4901 */
4902 else if (sbp) {
4903 #ifdef SFCT_SUPPORT
4904 fct_cmd_t *fct_cmd;
4905 emlxs_buf_t *cmd_sbp;
4906
4907 fct_cmd = sbp->fct_cmd;
4908 if (fct_cmd) {
4909 cmd_sbp =
4910 (emlxs_buf_t *)fct_cmd->cmd_fca_private;
4911 mutex_enter(&cmd_sbp->fct_mtx);
4912 EMLXS_FCT_STATE_CHG(fct_cmd, cmd_sbp,
4913 EMLXS_FCT_IOCB_COMPLETE);
4914 mutex_exit(&cmd_sbp->fct_mtx);
4915 }
4916 #endif /* SFCT_SUPPORT */
4917 cp->hbaCmplCmd_sbp++;
4918 atomic_dec_32(&hba->io_active);
4919 #ifdef NODE_THROTTLE_SUPPORT
4920 if (sbp->node) {
4921 atomic_dec_32(&sbp->node->io_active);
4922 }
4923 #endif /* NODE_THROTTLE_SUPPORT */
4924
4925 /* Copy entry to sbp's iocbq */
4926 iocbq = &sbp->iocbq;
4927 BE_SWAP32_BCOPY((uint8_t *)entry,
4928 (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size);
4929
4930 iocbq->next = NULL;
4931
4932 /*
4933 * If this is NOT a polled command completion
4934 * or a driver allocated pkt, then defer pkt
4935 * completion.
4936 */
4937 if (!(sbp->pkt_flags &
4938 (PACKET_POLLED | PACKET_ALLOCATED))) {
4939 /* Add the IOCB to the local list */
4940 if (!rsp_head) {
4941 rsp_head = iocbq;
4942 } else {
4943 rsp_tail->next = iocbq;
4944 }
4945
4946 rsp_tail = iocbq;
4947
4948 goto next;
4949 }
4950 } else {
4951 cp->hbaCmplCmd++;
4952 /* Copy entry to the local iocbq */
4953 BE_SWAP32_BCOPY((uint8_t *)entry,
4954 (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size);
4955
4956 iocbq->next = NULL;
4957 iocbq->bp = NULL;
4958 iocbq->port = &PPORT;
4959 iocbq->channel = cp;
4960 iocbq->node = NULL;
4961 iocbq->sbp = NULL;
4962 iocbq->flag = 0;
4963 }
4964
4965 /* process the channel event now */
4966 emlxs_proc_channel_event(hba, cp, iocbq);
4967
4968 next:
4969 /* Increment the driver's local response get index */
4970 if (++rp->fc_rspidx >= rp->fc_numRiocb) {
4971 rp->fc_rspidx = 0;
4972 }
4973
4974 } /* while (TRUE) */
4975
4976 if (rsp_head) {
4977 mutex_enter(&cp->rsp_lock);
4978 if (cp->rsp_head == NULL) {
4979 cp->rsp_head = rsp_head;
4980 cp->rsp_tail = rsp_tail;
4981 } else {
4982 cp->rsp_tail->next = rsp_head;
4983 cp->rsp_tail = rsp_tail;
4984 }
4985 mutex_exit(&cp->rsp_lock);
4986
4987 emlxs_thread_trigger2(&cp->intr_thread, emlxs_proc_channel, cp);
4988 }
4989
4990 /* Check if at least one response entry was processed */
4991 if (count) {
4992 /* Update response get index for the adapter */
4993 if (hba->bus_type == SBUS_FC) {
4994 slim2p->mbx.us.s2.host[channel_no].rspGetInx
4995 = BE_SWAP32(rp->fc_rspidx);
4996
4997 /* DMA sync the index for the adapter */
4998 offset = (off_t)
4999 ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2.
5000 host[channel_no].rspGetInx))
5001 - (uint64_t)((unsigned long)slim2p));
5002 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
5003 offset, 4, DDI_DMA_SYNC_FORDEV);
5004 } else {
5005 ioa2 =
5006 (void *)((char *)hba->sli.sli3.slim_addr +
5007 hba->sli.sli3.hgp_ring_offset + (((channel_no * 2) +
5008 1) * sizeof (uint32_t)));
5009 WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2,
5010 rp->fc_rspidx);
5011 #ifdef FMA_SUPPORT
5012 /* Access handle validation */
5013 EMLXS_CHK_ACC_HANDLE(hba,
5014 hba->sli.sli3.slim_acc_handle);
5015 #endif /* FMA_SUPPORT */
5016 }
5017
5018 if (reg & HA_R0RE_REQ) {
5019 /* HBASTATS.chipRingFree++; */
5020
5021 mutex_enter(&EMLXS_PORT_LOCK);
5022
5023 /* Tell the adapter we serviced the ring */
5024 chipatt = ((CA_R0ATT | CA_R0RE_RSP) <<
5025 (channel_no * 4));
5026 WRITE_CSR_REG(hba, FC_CA_REG(hba), chipatt);
5027
5028 #ifdef FMA_SUPPORT
5029 /* Access handle validation */
5030 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
5031 #endif /* FMA_SUPPORT */
5032
5033 mutex_exit(&EMLXS_PORT_LOCK);
5034 }
5035 }
5036
5037 if ((reg & HA_R0CE_RSP) || hba->channel_tx_count) {
5038 /* HBASTATS.hostRingFree++; */
5039
5040 /* Cmd ring may be available. Try sending more iocbs */
5041 emlxs_sli3_issue_iocb_cmd(hba, cp, 0);
5042 }
5043
5044 /* HBASTATS.ringEvent++; */
5045
5046 return;
5047
5048 } /* emlxs_sli3_handle_ring_event() */
5049
5050
5051 extern int
5052 emlxs_handle_rcv_seq(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
5053 {
5054 emlxs_port_t *port = &PPORT;
5055 IOCB *iocb;
5056 RING *rp;
5057 MATCHMAP *mp = NULL;
5058 uint64_t bdeAddr;
5059 uint32_t vpi = 0;
5060 uint32_t channelno;
5061 uint32_t size = 0;
5062 uint32_t *RcvError;
5063 uint32_t *RcvDropped;
5064 uint32_t *UbPosted;
5065 emlxs_msg_t *dropped_msg;
5066 char error_str[64];
5067 uint32_t buf_type;
5068 uint32_t *word;
5069
5070 channelno = cp->channelno;
5071 rp = &hba->sli.sli3.ring[channelno];
5072
5073 iocb = &iocbq->iocb;
5074 word = (uint32_t *)iocb;
5075
5076 switch (channelno) {
5077 #ifdef SFCT_SUPPORT
5078 case FC_FCT_RING:
5079 HBASTATS.FctRingEvent++;
5080 RcvError = &HBASTATS.FctRingError;
5081 RcvDropped = &HBASTATS.FctRingDropped;
5082 UbPosted = &HBASTATS.FctUbPosted;
5083 dropped_msg = &emlxs_fct_detail_msg;
5084 buf_type = MEM_FCTBUF;
5085 break;
5086 #endif /* SFCT_SUPPORT */
5087
5088 case FC_IP_RING:
5089 HBASTATS.IpRcvEvent++;
5090 RcvError = &HBASTATS.IpDropped;
5091 RcvDropped = &HBASTATS.IpDropped;
5092 UbPosted = &HBASTATS.IpUbPosted;
5093 dropped_msg = &emlxs_unsol_ip_dropped_msg;
5094 buf_type = MEM_IPBUF;
5095 break;
5096
5097 case FC_ELS_RING:
5098 HBASTATS.ElsRcvEvent++;
5099 RcvError = &HBASTATS.ElsRcvError;
5100 RcvDropped = &HBASTATS.ElsRcvDropped;
5101 UbPosted = &HBASTATS.ElsUbPosted;
5102 dropped_msg = &emlxs_unsol_els_dropped_msg;
5103 buf_type = MEM_ELSBUF;
5104 break;
5105
5106 case FC_CT_RING:
5107 HBASTATS.CtRcvEvent++;
5108 RcvError = &HBASTATS.CtRcvError;
5109 RcvDropped = &HBASTATS.CtRcvDropped;
5110 UbPosted = &HBASTATS.CtUbPosted;
5111 dropped_msg = &emlxs_unsol_ct_dropped_msg;
5112 buf_type = MEM_CTBUF;
5113 break;
5114
5115 default:
5116 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_invalid_msg,
5117 "channel=%d cmd=%x %s %x %x %x %x",
5118 channelno, iocb->ULPCOMMAND,
5119 emlxs_state_xlate(iocb->ULPSTATUS), word[4], word[5],
5120 word[6], word[7]);
5121 return (1);
5122 }
5123
5124 if (iocb->ULPSTATUS) {
5125 if ((iocb->ULPSTATUS == IOSTAT_LOCAL_REJECT) &&
5126 (iocb->un.grsp.perr.statLocalError ==
5127 IOERR_RCV_BUFFER_TIMEOUT)) {
5128 (void) strlcpy(error_str, "Out of posted buffers:",
5129 sizeof (error_str));
5130 iocb->ULPBDECOUNT = 0;
5131 } else if ((iocb->ULPSTATUS == IOSTAT_LOCAL_REJECT) &&
5132 (iocb->un.grsp.perr.statLocalError ==
5133 IOERR_RCV_BUFFER_WAITING)) {
5134 (void) strlcpy(error_str, "Buffer waiting:",
5135 sizeof (error_str));
5136 iocb->ULPBDECOUNT = 0;
5137 goto done;
5138 } else if (iocb->ULPSTATUS == IOSTAT_NEED_BUFF_ENTRY) {
5139 (void) strlcpy(error_str, "Need Buffer Entry:",
5140 sizeof (error_str));
5141 iocb->ULPBDECOUNT = 0;
5142 goto done;
5143 } else {
5144 (void) strlcpy(error_str, "General error:",
5145 sizeof (error_str));
5146 }
5147
5148 goto failed;
5149 }
5150
5151 if (hba->flag & FC_HBQ_ENABLED) {
5152 HBQ_INIT_t *hbq;
5153 HBQE_t *hbqE;
5154 uint32_t hbqe_tag;
5155 uint32_t hbq_id;
5156
5157 (*UbPosted)--;
5158
5159 hbqE = (HBQE_t *)iocb;
5160 hbq_id = hbqE->unt.ext.HBQ_tag;
5161 hbqe_tag = hbqE->unt.ext.HBQE_tag;
5162
5163 hbq = &hba->sli.sli3.hbq_table[hbq_id];
5164
5165 if (hbqe_tag >= hbq->HBQ_numEntries) {
5166 (void) snprintf(error_str, sizeof (error_str),
5167 "Invalid HBQE iotag=%d:", hbqe_tag);
5168 goto dropped;
5169 }
5170
5171 mp = hba->sli.sli3.hbq_table[hbq_id].HBQ_PostBufs[hbqe_tag];
5172
5173 size = iocb->unsli3.ext_rcv.seq_len;
5174 } else {
5175 bdeAddr =
5176 PADDR(iocb->un.cont64[0].addrHigh,
5177 iocb->un.cont64[0].addrLow);
5178
5179 /* Check for invalid buffer */
5180 if (iocb->un.cont64[0].tus.f.bdeFlags & BUFF_TYPE_INVALID) {
5181 (void) strlcpy(error_str, "Invalid buffer:",
5182 sizeof (error_str));
5183 goto dropped;
5184 }
5185
5186 mp = emlxs_mem_get_vaddr(hba, rp, bdeAddr);
5187
5188 size = iocb->un.rcvseq64.rcvBde.tus.f.bdeSize;
5189 }
5190
5191 if (!mp) {
5192 (void) strlcpy(error_str, "Buffer not mapped:",
5193 sizeof (error_str));
5194 goto dropped;
5195 }
5196
5197 #ifdef FMA_SUPPORT
5198 if (mp->dma_handle) {
5199 if (emlxs_fm_check_dma_handle(hba, mp->dma_handle)
5200 != DDI_FM_OK) {
5201 EMLXS_MSGF(EMLXS_CONTEXT,
5202 &emlxs_invalid_dma_handle_msg,
5203 "handle_rcv_seq: hdl=%p",
5204 mp->dma_handle);
5205 goto dropped;
5206 }
5207 }
5208 #endif /* FMA_SUPPORT */
5209
5210 if (!size) {
5211 (void) strlcpy(error_str, "Buffer empty:", sizeof (error_str));
5212 goto dropped;
5213 }
5214
5215 /* To avoid we drop the broadcast packets */
5216 if (channelno != FC_IP_RING) {
5217 /* Get virtual port */
5218 if (hba->flag & FC_NPIV_ENABLED) {
5219 vpi = iocb->unsli3.ext_rcv.vpi;
5220 if (vpi >= hba->vpi_max) {
5221 (void) snprintf(error_str, sizeof (error_str),
5222 "Invalid VPI=%d:", vpi);
5223 goto dropped;
5224 }
5225
5226 port = &VPORT(vpi);
5227 }
5228 }
5229
5230 /* Process request */
5231 switch (channelno) {
5232 case FC_FCT_RING:
5233 if (port->mode == MODE_INITIATOR) {
5234 (void) strlcpy(error_str, "Target mode disabled:",
5235 sizeof (error_str));
5236 goto dropped;
5237 #ifdef SFCT_SUPPORT
5238 } else if (port->mode == MODE_TARGET) {
5239 (void) emlxs_fct_handle_unsol_req(port, cp, iocbq, mp,
5240 size);
5241 #endif /* SFCT_SUPPORT */
5242 } else {
5243 (void) snprintf(error_str, sizeof (error_str),
5244 "Invalid mode=%x:", port->mode);
5245 goto dropped;
5246 }
5247 break;
5248
5249 case FC_IP_RING:
5250 if (port->mode == MODE_INITIATOR) {
5251 (void) emlxs_ip_handle_unsol_req(port, cp, iocbq,
5252 mp, size);
5253 #ifdef SFCT_SUPPORT
5254 } else if (port->mode == MODE_TARGET) {
5255 (void) strlcpy(error_str, "Initiator mode disabled:",
5256 sizeof (error_str));
5257 goto dropped;
5258 #endif /* SFCT_SUPPORT */
5259 } else {
5260 (void) snprintf(error_str, sizeof (error_str),
5261 "Invalid mode=%x:", port->mode);
5262 goto dropped;
5263 }
5264 break;
5265
5266 case FC_ELS_RING:
5267 if (port->mode == MODE_INITIATOR) {
5268 (void) emlxs_els_handle_unsol_req(port, cp, iocbq, mp,
5269 size);
5270 #ifdef SFCT_SUPPORT
5271 } else if (port->mode == MODE_TARGET) {
5272 (void) emlxs_fct_handle_unsol_els(port, cp, iocbq, mp,
5273 size);
5274 #endif /* SFCT_SUPPORT */
5275 } else {
5276 (void) snprintf(error_str, sizeof (error_str),
5277 "Invalid mode=%x:", port->mode);
5278 goto dropped;
5279 }
5280 break;
5281
5282 case FC_CT_RING:
5283 (void) emlxs_ct_handle_unsol_req(port, cp, iocbq, mp, size);
5284 break;
5285 }
5286
5287 goto done;
5288
5289 dropped:
5290 (*RcvDropped)++;
5291
5292 EMLXS_MSGF(EMLXS_CONTEXT, dropped_msg,
5293 "%s: cmd=%x %s %x %x %x %x",
5294 error_str, iocb->ULPCOMMAND, emlxs_state_xlate(iocb->ULPSTATUS),
5295 word[4], word[5], word[6], word[7]);
5296
5297 if (channelno == FC_FCT_RING) {
5298 uint32_t sid;
5299
5300 if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) {
5301 emlxs_node_t *ndlp;
5302 ndlp = emlxs_node_find_rpi(port, iocb->ULPIOTAG);
5303 if (! ndlp) {
5304 goto done;
5305 }
5306 sid = ndlp->nlp_DID;
5307 } else {
5308 sid = iocb->un.ulpWord[4] & 0xFFFFFF;
5309 }
5310
5311 emlxs_send_logo(port, sid);
5312 }
5313
5314 goto done;
5315
5316 failed:
5317 (*RcvError)++;
5318
5319 EMLXS_MSGF(EMLXS_CONTEXT, dropped_msg,
5320 "%s: cmd=%x %s %x %x %x %x hba:%x %x",
5321 error_str, iocb->ULPCOMMAND, emlxs_state_xlate(iocb->ULPSTATUS),
5322 word[4], word[5], word[6], word[7], hba->state, hba->flag);
5323
5324 done:
5325
5326 if (hba->flag & FC_HBQ_ENABLED) {
5327 if (iocb->ULPBDECOUNT) {
5328 HBQE_t *hbqE;
5329 uint32_t hbq_id;
5330
5331 hbqE = (HBQE_t *)iocb;
5332 hbq_id = hbqE->unt.ext.HBQ_tag;
5333
5334 emlxs_update_HBQ_index(hba, hbq_id);
5335 }
5336 } else {
5337 if (mp) {
5338 emlxs_mem_put(hba, buf_type, (void *)mp);
5339 }
5340
5341 if (iocb->ULPBDECOUNT) {
5342 (void) emlxs_post_buffer(hba, rp, 1);
5343 }
5344 }
5345
5346 return (0);
5347
5348 } /* emlxs_handle_rcv_seq() */
5349
5350
5351 /* EMLXS_CMD_RING_LOCK must be held when calling this function */
5352 static void
5353 emlxs_sli3_issue_iocb(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq)
5354 {
5355 emlxs_port_t *port;
5356 IOCB *icmd;
5357 IOCB *iocb;
5358 emlxs_buf_t *sbp;
5359 off_t offset;
5360 uint32_t ringno;
5361
5362 ringno = rp->ringno;
5363 sbp = iocbq->sbp;
5364 icmd = &iocbq->iocb;
5365 port = iocbq->port;
5366
5367 HBASTATS.IocbIssued[ringno]++;
5368
5369 /* Check for ULP pkt request */
5370 if (sbp) {
5371 mutex_enter(&sbp->mtx);
5372
5373 if (sbp->node == NULL) {
5374 /* Set node to base node by default */
5375 iocbq->node = (void *)&port->node_base;
5376 sbp->node = (void *)&port->node_base;
5377 }
5378
5379 sbp->pkt_flags |= PACKET_IN_CHIPQ;
5380 mutex_exit(&sbp->mtx);
5381
5382 atomic_inc_32(&hba->io_active);
5383 #ifdef NODE_THROTTLE_SUPPORT
5384 if (sbp->node) {
5385 atomic_inc_32(&sbp->node->io_active);
5386 }
5387 #endif /* NODE_THROTTLE_SUPPORT */
5388
5389 #ifdef SFCT_SUPPORT
5390 #ifdef FCT_IO_TRACE
5391 if (sbp->fct_cmd) {
5392 emlxs_fct_io_trace(port, sbp->fct_cmd,
5393 EMLXS_FCT_IOCB_ISSUED);
5394 emlxs_fct_io_trace(port, sbp->fct_cmd,
5395 icmd->ULPCOMMAND);
5396 }
5397 #endif /* FCT_IO_TRACE */
5398 #endif /* SFCT_SUPPORT */
5399
5400 rp->channelp->hbaSendCmd_sbp++;
5401 iocbq->channel = rp->channelp;
5402 } else {
5403 rp->channelp->hbaSendCmd++;
5404 }
5405
5406 /* get the next available command ring iocb */
5407 iocb =
5408 (IOCB *)(((char *)rp->fc_cmdringaddr +
5409 (rp->fc_cmdidx * hba->sli.sli3.iocb_cmd_size)));
5410
5411 /* Copy the local iocb to the command ring iocb */
5412 BE_SWAP32_BCOPY((uint8_t *)icmd, (uint8_t *)iocb,
5413 hba->sli.sli3.iocb_cmd_size);
5414
5415 /* DMA sync the command ring iocb for the adapter */
5416 offset = (off_t)((uint64_t)((unsigned long)iocb)
5417 - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5418 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
5419 hba->sli.sli3.iocb_cmd_size, DDI_DMA_SYNC_FORDEV);
5420
5421 /*
5422 * After this, the sbp / iocb should not be
5423 * accessed in the xmit path.
5424 */
5425
5426 /* Free the local iocb if there is no sbp tracking it */
5427 if (!sbp) {
5428 emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
5429 }
5430
5431 /* update local ring index to next available ring index */
5432 rp->fc_cmdidx =
5433 (rp->fc_cmdidx + 1 >= rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1;
5434
5435
5436 return;
5437
5438 } /* emlxs_sli3_issue_iocb() */
5439
5440
5441 static void
5442 emlxs_sli3_hba_kill(emlxs_hba_t *hba)
5443 {
5444 emlxs_port_t *port = &PPORT;
5445 MAILBOX *swpmb;
5446 MAILBOX *mb2;
5447 MAILBOX *mb1;
5448 uint32_t word0;
5449 uint32_t j;
5450 uint32_t interlock_failed;
5451 uint32_t ha_copy;
5452 uint32_t value;
5453 off_t offset;
5454 uint32_t size;
5455
5456 /* Perform adapter interlock to kill adapter */
5457 interlock_failed = 0;
5458
5459 mutex_enter(&EMLXS_PORT_LOCK);
5460 if (hba->flag & FC_INTERLOCKED) {
5461 EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5462
5463 mutex_exit(&EMLXS_PORT_LOCK);
5464
5465 return;
5466 }
5467
5468 j = 0;
5469 while (j++ < 10000) {
5470 if (hba->mbox_queue_flag == 0) {
5471 break;
5472 }
5473
5474 mutex_exit(&EMLXS_PORT_LOCK);
5475 BUSYWAIT_US(100);
5476 mutex_enter(&EMLXS_PORT_LOCK);
5477 }
5478
5479 if (hba->mbox_queue_flag != 0) {
5480 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5481 "Interlock failed. Mailbox busy.");
5482 mutex_exit(&EMLXS_PORT_LOCK);
5483 return;
5484 }
5485
5486 hba->flag |= FC_INTERLOCKED;
5487 hba->mbox_queue_flag = 1;
5488
5489 /* Disable all host interrupts */
5490 hba->sli.sli3.hc_copy = 0;
5491 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
5492 WRITE_CSR_REG(hba, FC_HA_REG(hba), 0xffffffff);
5493
5494 mb2 = FC_SLIM2_MAILBOX(hba);
5495 mb1 = FC_SLIM1_MAILBOX(hba);
5496 swpmb = (MAILBOX *)&word0;
5497
5498 if (!(hba->flag & FC_SLIM2_MODE)) {
5499 goto mode_B;
5500 }
5501
5502 mode_A:
5503
5504 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5505 "Attempting SLIM2 Interlock...");
5506
5507 interlock_A:
5508
5509 value = 0x55555555;
5510 word0 = 0;
5511 swpmb->mbxCommand = MBX_KILL_BOARD;
5512 swpmb->mbxOwner = OWN_CHIP;
5513
5514 /* Write value to SLIM */
5515 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value);
5516 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1)), word0);
5517
5518 /* Send Kill board request */
5519 mb2->un.varWords[0] = value;
5520 mb2->mbxCommand = MBX_KILL_BOARD;
5521 mb2->mbxOwner = OWN_CHIP;
5522
5523 /* Sync the memory */
5524 offset = (off_t)((uint64_t)((unsigned long)mb2)
5525 - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5526 size = (sizeof (uint32_t) * 2);
5527
5528 BE_SWAP32_BCOPY((uint8_t *)mb2, (uint8_t *)mb2, size);
5529
5530 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, size,
5531 DDI_DMA_SYNC_FORDEV);
5532
5533 /* interrupt board to do it right away */
5534 WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
5535
5536 /* First wait for command acceptence */
5537 j = 0;
5538 while (j++ < 1000) {
5539 value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1));
5540
5541 if (value == 0xAAAAAAAA) {
5542 break;
5543 }
5544
5545 BUSYWAIT_US(50);
5546 }
5547
5548 if (value == 0xAAAAAAAA) {
5549 /* Now wait for mailbox ownership to clear */
5550 while (j++ < 10000) {
5551 word0 =
5552 READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1));
5553
5554 if (swpmb->mbxOwner == 0) {
5555 break;
5556 }
5557
5558 BUSYWAIT_US(50);
5559 }
5560
5561 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5562 "Interlock succeeded.");
5563
5564 goto done;
5565 }
5566
5567 /* Interlock failed !!! */
5568 interlock_failed = 1;
5569
5570 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, "Interlock failed.");
5571
5572 mode_B:
5573
5574 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5575 "Attempting SLIM1 Interlock...");
5576
5577 interlock_B:
5578
5579 value = 0x55555555;
5580 word0 = 0;
5581 swpmb->mbxCommand = MBX_KILL_BOARD;
5582 swpmb->mbxOwner = OWN_CHIP;
5583
5584 /* Write KILL BOARD to mailbox */
5585 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value);
5586 WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mb1), word0);
5587
5588 /* interrupt board to do it right away */
5589 WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
5590
5591 /* First wait for command acceptence */
5592 j = 0;
5593 while (j++ < 1000) {
5594 value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1));
5595
5596 if (value == 0xAAAAAAAA) {
5597 break;
5598 }
5599
5600 BUSYWAIT_US(50);
5601 }
5602
5603 if (value == 0xAAAAAAAA) {
5604 /* Now wait for mailbox ownership to clear */
5605 while (j++ < 10000) {
5606 word0 =
5607 READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1));
5608
5609 if (swpmb->mbxOwner == 0) {
5610 break;
5611 }
5612
5613 BUSYWAIT_US(50);
5614 }
5615
5616 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5617 "Interlock succeeded.");
5618
5619 goto done;
5620 }
5621
5622 /* Interlock failed !!! */
5623
5624 /* If this is the first time then try again */
5625 if (interlock_failed == 0) {
5626 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5627 "Interlock failed. Retrying...");
5628
5629 /* Try again */
5630 interlock_failed = 1;
5631 goto interlock_B;
5632 }
5633
5634 /*
5635 * Now check for error attention to indicate the board has
5636 * been kiilled
5637 */
5638 j = 0;
5639 while (j++ < 10000) {
5640 ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba));
5641
5642 if (ha_copy & HA_ERATT) {
5643 break;
5644 }
5645
5646 BUSYWAIT_US(50);
5647 }
5648
5649 if (ha_copy & HA_ERATT) {
5650 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5651 "Interlock failed. Board killed.");
5652 } else {
5653 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5654 "Interlock failed. Board not killed.");
5655 }
5656
5657 done:
5658
5659 hba->mbox_queue_flag = 0;
5660
5661 EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5662
5663 #ifdef FMA_SUPPORT
5664 /* Access handle validation */
5665 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
5666 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
5667 #endif /* FMA_SUPPORT */
5668
5669 mutex_exit(&EMLXS_PORT_LOCK);
5670
5671 return;
5672
5673 } /* emlxs_sli3_hba_kill() */
5674
5675
5676 static void
5677 emlxs_sli3_hba_kill4quiesce(emlxs_hba_t *hba)
5678 {
5679 emlxs_port_t *port = &PPORT;
5680 MAILBOX *swpmb;
5681 MAILBOX *mb2;
5682 MAILBOX *mb1;
5683 uint32_t word0;
5684 off_t offset;
5685 uint32_t j;
5686 uint32_t value;
5687 uint32_t size;
5688
5689 /* Disable all host interrupts */
5690 hba->sli.sli3.hc_copy = 0;
5691 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
5692 WRITE_CSR_REG(hba, FC_HA_REG(hba), 0xffffffff);
5693
5694 mb2 = FC_SLIM2_MAILBOX(hba);
5695 mb1 = FC_SLIM1_MAILBOX(hba);
5696 swpmb = (MAILBOX *)&word0;
5697
5698 value = 0x55555555;
5699 word0 = 0;
5700 swpmb->mbxCommand = MBX_KILL_BOARD;
5701 swpmb->mbxOwner = OWN_CHIP;
5702
5703 /* Write value to SLIM */
5704 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value);
5705 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1)), word0);
5706
5707 /* Send Kill board request */
5708 mb2->un.varWords[0] = value;
5709 mb2->mbxCommand = MBX_KILL_BOARD;
5710 mb2->mbxOwner = OWN_CHIP;
5711
5712 /* Sync the memory */
5713 offset = (off_t)((uint64_t)((unsigned long)mb2)
5714 - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5715 size = (sizeof (uint32_t) * 2);
5716
5717 BE_SWAP32_BCOPY((uint8_t *)mb2, (uint8_t *)mb2, size);
5718
5719 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, size,
5720 DDI_DMA_SYNC_FORDEV);
5721
5722 /* interrupt board to do it right away */
5723 WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
5724
5725 /* First wait for command acceptence */
5726 j = 0;
5727 while (j++ < 1000) {
5728 value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1));
5729
5730 if (value == 0xAAAAAAAA) {
5731 break;
5732 }
5733 BUSYWAIT_US(50);
5734 }
5735 if (value == 0xAAAAAAAA) {
5736 /* Now wait for mailbox ownership to clear */
5737 while (j++ < 10000) {
5738 word0 =
5739 READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1));
5740 if (swpmb->mbxOwner == 0) {
5741 break;
5742 }
5743 BUSYWAIT_US(50);
5744 }
5745 goto done;
5746 }
5747
5748 done:
5749 EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5750
5751 #ifdef FMA_SUPPORT
5752 /* Access handle validation */
5753 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
5754 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
5755 #endif /* FMA_SUPPORT */
5756 return;
5757
5758 } /* emlxs_sli3_hba_kill4quiesce */
5759
5760
5761
5762
5763 /*
5764 * emlxs_handle_mb_event
5765 *
5766 * Description: Process a Mailbox Attention.
5767 * Called from host_interrupt to process MBATT
5768 *
5769 * Returns:
5770 *
5771 */
5772 static uint32_t
5773 emlxs_handle_mb_event(emlxs_hba_t *hba)
5774 {
5775 emlxs_port_t *port = &PPORT;
5776 MAILBOX *mb;
5777 MAILBOX *swpmb;
5778 MAILBOX *mbox;
5779 MAILBOXQ *mbq = NULL;
5780 volatile uint32_t word0;
5781 MATCHMAP *mbox_bp;
5782 off_t offset;
5783 uint32_t i;
5784 int rc;
5785
5786 swpmb = (MAILBOX *)&word0;
5787
5788 mutex_enter(&EMLXS_PORT_LOCK);
5789 switch (hba->mbox_queue_flag) {
5790 case 0:
5791 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
5792 "No mailbox active.");
5793
5794 mutex_exit(&EMLXS_PORT_LOCK);
5795 return (0);
5796
5797 case MBX_POLL:
5798
5799 /* Mark mailbox complete, this should wake up any polling */
5800 /* threads. This can happen if interrupts are enabled while */
5801 /* a polled mailbox command is outstanding. If we don't set */
5802 /* MBQ_COMPLETED here, the polling thread may wait until */
5803 /* timeout error occurs */
5804
5805 mutex_enter(&EMLXS_MBOX_LOCK);
5806 mbq = (MAILBOXQ *)hba->mbox_mbq;
5807 if (mbq) {
5808 port = (emlxs_port_t *)mbq->port;
5809 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5810 "Mailbox event. Completing Polled command.");
5811 mbq->flag |= MBQ_COMPLETED;
5812 }
5813 mutex_exit(&EMLXS_MBOX_LOCK);
5814
5815 mutex_exit(&EMLXS_PORT_LOCK);
5816 return (0);
5817
5818 case MBX_SLEEP:
5819 case MBX_NOWAIT:
5820 /* Check mbox_timer, it acts as a service flag too */
5821 /* The first to service the mbox queue will clear the timer */
5822 if (hba->mbox_timer) {
5823 hba->mbox_timer = 0;
5824
5825 mutex_enter(&EMLXS_MBOX_LOCK);
5826 mbq = (MAILBOXQ *)hba->mbox_mbq;
5827 mutex_exit(&EMLXS_MBOX_LOCK);
5828 }
5829
5830 if (!mbq) {
5831 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5832 "Mailbox event. No service required.");
5833 mutex_exit(&EMLXS_PORT_LOCK);
5834 return (0);
5835 }
5836
5837 mb = (MAILBOX *)mbq;
5838 mutex_exit(&EMLXS_PORT_LOCK);
5839 break;
5840
5841 default:
5842 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_completion_error_msg,
5843 "Invalid Mailbox flag (%x).");
5844
5845 mutex_exit(&EMLXS_PORT_LOCK);
5846 return (0);
5847 }
5848
5849 /* Set port context */
5850 port = (emlxs_port_t *)mbq->port;
5851
5852 /* Get first word of mailbox */
5853 if (hba->flag & FC_SLIM2_MODE) {
5854 mbox = FC_SLIM2_MAILBOX(hba);
5855 offset = (off_t)((uint64_t)((unsigned long)mbox)
5856 - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5857
5858 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
5859 sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
5860 word0 = *((volatile uint32_t *)mbox);
5861 word0 = BE_SWAP32(word0);
5862 } else {
5863 mbox = FC_SLIM1_MAILBOX(hba);
5864 word0 = READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox));
5865 }
5866
5867 i = 0;
5868 while (swpmb->mbxOwner == OWN_CHIP) {
5869 if (i++ > 10000) {
5870 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
5871 "OWN_CHIP: %s: status=%x",
5872 emlxs_mb_cmd_xlate(swpmb->mbxCommand),
5873 swpmb->mbxStatus);
5874
5875 return (1);
5876 }
5877
5878 /* Get first word of mailbox */
5879 if (hba->flag & FC_SLIM2_MODE) {
5880 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
5881 offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
5882 word0 = *((volatile uint32_t *)mbox);
5883 word0 = BE_SWAP32(word0);
5884 } else {
5885 word0 =
5886 READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox));
5887 }
5888 }
5889
5890 /* Now that we are the owner, DMA Sync entire mailbox if needed */
5891 if (hba->flag & FC_SLIM2_MODE) {
5892 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
5893 MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORKERNEL);
5894
5895 BE_SWAP32_BCOPY((uint8_t *)mbox, (uint8_t *)mb,
5896 MAILBOX_CMD_BSIZE);
5897 } else {
5898 READ_SLIM_COPY(hba, (uint32_t *)mb, (uint32_t *)mbox,
5899 MAILBOX_CMD_WSIZE);
5900 }
5901
5902 #ifdef MBOX_EXT_SUPPORT
5903 if (mbq->extbuf) {
5904 uint32_t *mbox_ext =
5905 (uint32_t *)((uint8_t *)mbox + MBOX_EXTENSION_OFFSET);
5906 off_t offset_ext = offset + MBOX_EXTENSION_OFFSET;
5907
5908 if (hba->flag & FC_SLIM2_MODE) {
5909 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
5910 offset_ext, mbq->extsize,
5911 DDI_DMA_SYNC_FORKERNEL);
5912 BE_SWAP32_BCOPY((uint8_t *)mbox_ext,
5913 (uint8_t *)mbq->extbuf, mbq->extsize);
5914 } else {
5915 READ_SLIM_COPY(hba, (uint32_t *)mbq->extbuf,
5916 mbox_ext, (mbq->extsize / 4));
5917 }
5918 }
5919 #endif /* MBOX_EXT_SUPPORT */
5920
5921 #ifdef FMA_SUPPORT
5922 if (!(hba->flag & FC_SLIM2_MODE)) {
5923 /* Access handle validation */
5924 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
5925 }
5926 #endif /* FMA_SUPPORT */
5927
5928 /* Now sync the memory buffer if one was used */
5929 if (mbq->bp) {
5930 mbox_bp = (MATCHMAP *)mbq->bp;
5931 EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0, mbox_bp->size,
5932 DDI_DMA_SYNC_FORKERNEL);
5933 }
5934
5935 /* Mailbox has been completely received at this point */
5936
5937 if (mb->mbxCommand == MBX_HEARTBEAT) {
5938 hba->heartbeat_active = 0;
5939 goto done;
5940 }
5941
5942 if (hba->mbox_queue_flag == MBX_SLEEP) {
5943 if (swpmb->mbxCommand != MBX_DOWN_LOAD &&
5944 swpmb->mbxCommand != MBX_DUMP_MEMORY) {
5945 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5946 "Received. %s: status=%x Sleep.",
5947 emlxs_mb_cmd_xlate(swpmb->mbxCommand),
5948 swpmb->mbxStatus);
5949 }
5950 } else {
5951 if (swpmb->mbxCommand != MBX_DOWN_LOAD &&
5952 swpmb->mbxCommand != MBX_DUMP_MEMORY) {
5953 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5954 "Completed. %s: status=%x",
5955 emlxs_mb_cmd_xlate(swpmb->mbxCommand),
5956 swpmb->mbxStatus);
5957 }
5958 }
5959
5960 /* Filter out passthru mailbox */
5961 if (mbq->flag & MBQ_PASSTHRU) {
5962 goto done;
5963 }
5964
5965 if (mb->mbxStatus) {
5966 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5967 "%s: status=0x%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
5968 (uint32_t)mb->mbxStatus);
5969 }
5970
5971 if (mbq->mbox_cmpl) {
5972 rc = (mbq->mbox_cmpl)(hba, mbq);
5973 /* If mbox was retried, return immediately */
5974 if (rc) {
5975 return (0);
5976 }
5977 }
5978
5979 done:
5980
5981 /* Clean up the mailbox area */
5982 emlxs_mb_fini(hba, mb, mb->mbxStatus);
5983
5984 mbq = (MAILBOXQ *)emlxs_mb_get(hba);
5985 if (mbq) {
5986 /* Attempt to send pending mailboxes */
5987 rc = emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
5988 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
5989 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
5990 }
5991 }
5992 return (0);
5993
5994 } /* emlxs_handle_mb_event() */
5995
5996
5997 static void
5998 emlxs_sli3_timer(emlxs_hba_t *hba)
5999 {
6000 /* Perform SLI3 level timer checks */
6001
6002 emlxs_sli3_timer_check_mbox(hba);
6003
6004 } /* emlxs_sli3_timer() */
6005
6006
6007 static void
6008 emlxs_sli3_timer_check_mbox(emlxs_hba_t *hba)
6009 {
6010 emlxs_port_t *port = &PPORT;
6011 emlxs_config_t *cfg = &CFG;
6012 MAILBOX *mb = NULL;
6013 uint32_t word0;
6014 uint32_t offset;
6015 uint32_t ha_copy = 0;
6016
6017 if (!cfg[CFG_TIMEOUT_ENABLE].current) {
6018 return;
6019 }
6020
6021 mutex_enter(&EMLXS_PORT_LOCK);
6022
6023 /* Return if timer hasn't expired */
6024 if (!hba->mbox_timer || (hba->timer_tics < hba->mbox_timer)) {
6025 mutex_exit(&EMLXS_PORT_LOCK);
6026 return;
6027 }
6028
6029 /* Mailbox timed out, first check for error attention */
6030 ha_copy = emlxs_check_attention(hba);
6031
6032 if (ha_copy & HA_ERATT) {
6033 hba->mbox_timer = 0;
6034 mutex_exit(&EMLXS_PORT_LOCK);
6035 emlxs_handle_ff_error(hba);
6036 return;
6037 }
6038
6039 if (hba->mbox_queue_flag) {
6040 /* Get first word of mailbox */
6041 if (hba->flag & FC_SLIM2_MODE) {
6042 mb = FC_SLIM2_MAILBOX(hba);
6043 offset =
6044 (off_t)((uint64_t)((unsigned long)mb) - (uint64_t)
6045 ((unsigned long)hba->sli.sli3.slim2.virt));
6046
6047 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
6048 offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
6049 word0 = *((volatile uint32_t *)mb);
6050 word0 = BE_SWAP32(word0);
6051 } else {
6052 mb = FC_SLIM1_MAILBOX(hba);
6053 word0 =
6054 READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb));
6055 #ifdef FMA_SUPPORT
6056 /* Access handle validation */
6057 EMLXS_CHK_ACC_HANDLE(hba,
6058 hba->sli.sli3.slim_acc_handle);
6059 #endif /* FMA_SUPPORT */
6060 }
6061
6062 mb = (MAILBOX *)&word0;
6063
6064 /* Check if mailbox has actually completed */
6065 if (mb->mbxOwner == OWN_HOST) {
6066 /* Read host attention register to determine */
6067 /* interrupt source */
6068 uint32_t ha_copy = emlxs_check_attention(hba);
6069
6070 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
6071 "Mailbox attention missed: %s. Forcing event. "
6072 "hc=%x ha=%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
6073 hba->sli.sli3.hc_copy, ha_copy);
6074
6075 mutex_exit(&EMLXS_PORT_LOCK);
6076
6077 (void) emlxs_handle_mb_event(hba);
6078
6079 return;
6080 }
6081
6082 /* The first to service the mbox queue will clear the timer */
6083 /* We will service the mailbox here */
6084 hba->mbox_timer = 0;
6085
6086 mutex_enter(&EMLXS_MBOX_LOCK);
6087 mb = (MAILBOX *)hba->mbox_mbq;
6088 mutex_exit(&EMLXS_MBOX_LOCK);
6089 }
6090
6091 if (mb) {
6092 switch (hba->mbox_queue_flag) {
6093 case MBX_NOWAIT:
6094 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6095 "%s: Nowait.",
6096 emlxs_mb_cmd_xlate(mb->mbxCommand));
6097 break;
6098
6099 case MBX_SLEEP:
6100 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6101 "%s: mb=%p Sleep.",
6102 emlxs_mb_cmd_xlate(mb->mbxCommand),
6103 mb);
6104 break;
6105
6106 case MBX_POLL:
6107 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6108 "%s: mb=%p Polled.",
6109 emlxs_mb_cmd_xlate(mb->mbxCommand),
6110 mb);
6111 break;
6112
6113 default:
6114 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6115 "%s: mb=%p (%d).",
6116 emlxs_mb_cmd_xlate(mb->mbxCommand),
6117 mb, hba->mbox_queue_flag);
6118 break;
6119 }
6120 } else {
6121 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg, NULL);
6122 }
6123
6124 hba->flag |= FC_MBOX_TIMEOUT;
6125 EMLXS_STATE_CHANGE_LOCKED(hba, FC_ERROR);
6126
6127 mutex_exit(&EMLXS_PORT_LOCK);
6128
6129 /* Perform mailbox cleanup */
6130 /* This will wake any sleeping or polling threads */
6131 emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
6132
6133 /* Trigger adapter shutdown */
6134 emlxs_thread_spawn(hba, emlxs_shutdown_thread, NULL, NULL);
6135
6136 return;
6137
6138 } /* emlxs_sli3_timer_check_mbox() */
6139
6140
6141 /*
6142 * emlxs_mb_config_port Issue a CONFIG_PORT mailbox command
6143 */
6144 static uint32_t
6145 emlxs_mb_config_port(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t sli_mode,
6146 uint32_t hbainit)
6147 {
6148 MAILBOX *mb = (MAILBOX *)mbq;
6149 emlxs_vpd_t *vpd = &VPD;
6150 emlxs_port_t *port = &PPORT;
6151 emlxs_config_t *cfg;
6152 RING *rp;
6153 uint64_t pcb;
6154 uint64_t mbx;
6155 uint64_t hgp;
6156 uint64_t pgp;
6157 uint64_t rgp;
6158 MAILBOX *mbox;
6159 SLIM2 *slim;
6160 SLI2_RDSC *rdsc;
6161 uint64_t offset;
6162 uint32_t Laddr;
6163 uint32_t i;
6164
6165 cfg = &CFG;
6166 bzero((void *)mb, MAILBOX_CMD_BSIZE);
6167 mbox = NULL;
6168 slim = NULL;
6169
6170 mb->mbxCommand = MBX_CONFIG_PORT;
6171 mb->mbxOwner = OWN_HOST;
6172 mbq->mbox_cmpl = NULL;
6173
6174 mb->un.varCfgPort.pcbLen = sizeof (PCB);
6175 mb->un.varCfgPort.hbainit[0] = hbainit;
6176
6177 pcb = hba->sli.sli3.slim2.phys +
6178 (uint64_t)((unsigned long)&(slim->pcb));
6179 mb->un.varCfgPort.pcbLow = PADDR_LO(pcb);
6180 mb->un.varCfgPort.pcbHigh = PADDR_HI(pcb);
6181
6182 /* Set Host pointers in SLIM flag */
6183 mb->un.varCfgPort.hps = 1;
6184
6185 /* Initialize hba structure for assumed default SLI2 mode */
6186 /* If config port succeeds, then we will update it then */
6187 hba->sli_mode = sli_mode;
6188 hba->vpi_max = 0;
6189 hba->flag &= ~FC_NPIV_ENABLED;
6190
6191 if (sli_mode == EMLXS_HBA_SLI3_MODE) {
6192 mb->un.varCfgPort.sli_mode = EMLXS_HBA_SLI3_MODE;
6193 mb->un.varCfgPort.cerbm = 1;
6194 mb->un.varCfgPort.max_hbq = EMLXS_NUM_HBQ;
6195
6196 if (cfg[CFG_NPIV_ENABLE].current) {
6197 if (vpd->feaLevelHigh >= 0x09) {
6198 if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
6199 mb->un.varCfgPort.vpi_max =
6200 MAX_VPORTS - 1;
6201 } else {
6202 mb->un.varCfgPort.vpi_max =
6203 MAX_VPORTS_LIMITED - 1;
6204 }
6205
6206 mb->un.varCfgPort.cmv = 1;
6207 } else {
6208 EMLXS_MSGF(EMLXS_CONTEXT,
6209 &emlxs_init_debug_msg,
6210 "CFGPORT: Firmware does not support NPIV. "
6211 "level=%d", vpd->feaLevelHigh);
6212 }
6213
6214 }
6215 }
6216
6217 /*
6218 * Now setup pcb
6219 */
6220 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.type = TYPE_NATIVE_SLI2;
6221 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.feature = FEATURE_INITIAL_SLI2;
6222 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.maxRing =
6223 (hba->sli.sli3.ring_count - 1);
6224 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mailBoxSize =
6225 sizeof (MAILBOX) + MBOX_EXTENSION_SIZE;
6226
6227 mbx = hba->sli.sli3.slim2.phys +
6228 (uint64_t)((unsigned long)&(slim->mbx));
6229 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mbAddrHigh = PADDR_HI(mbx);
6230 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mbAddrLow = PADDR_LO(mbx);
6231
6232
6233 /*
6234 * Set up HGP - Port Memory
6235 *
6236 * CR0Put - SLI2(no HBQs) = 0xc0, With HBQs = 0x80
6237 * RR0Get 0xc4 0x84
6238 * CR1Put 0xc8 0x88
6239 * RR1Get 0xcc 0x8c
6240 * CR2Put 0xd0 0x90
6241 * RR2Get 0xd4 0x94
6242 * CR3Put 0xd8 0x98
6243 * RR3Get 0xdc 0x9c
6244 *
6245 * Reserved 0xa0-0xbf
6246 *
6247 * If HBQs configured:
6248 * HBQ 0 Put ptr 0xc0
6249 * HBQ 1 Put ptr 0xc4
6250 * HBQ 2 Put ptr 0xc8
6251 * ...
6252 * HBQ(M-1)Put Pointer 0xc0+(M-1)*4
6253 */
6254
6255 if (sli_mode >= EMLXS_HBA_SLI3_MODE) {
6256 /* ERBM is enabled */
6257 hba->sli.sli3.hgp_ring_offset = 0x80;
6258 hba->sli.sli3.hgp_hbq_offset = 0xC0;
6259
6260 hba->sli.sli3.iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
6261 hba->sli.sli3.iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
6262
6263 } else { /* SLI2 */
6264 /* ERBM is disabled */
6265 hba->sli.sli3.hgp_ring_offset = 0xC0;
6266 hba->sli.sli3.hgp_hbq_offset = 0;
6267
6268 hba->sli.sli3.iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
6269 hba->sli.sli3.iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
6270 }
6271
6272 /* The Sbus card uses Host Memory. The PCI card uses SLIM POINTER */
6273 if (hba->bus_type == SBUS_FC) {
6274 hgp = hba->sli.sli3.slim2.phys +
6275 (uint64_t)((unsigned long)&(mbox->us.s2.host));
6276 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrHigh =
6277 PADDR_HI(hgp);
6278 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrLow =
6279 PADDR_LO(hgp);
6280 } else {
6281 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrHigh =
6282 (uint32_t)ddi_get32(hba->pci_acc_handle,
6283 (uint32_t *)(hba->pci_addr + PCI_BAR_1_REGISTER));
6284
6285 Laddr =
6286 ddi_get32(hba->pci_acc_handle,
6287 (uint32_t *)(hba->pci_addr + PCI_BAR_0_REGISTER));
6288 Laddr &= ~0x4;
6289 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrLow =
6290 (uint32_t)(Laddr + hba->sli.sli3.hgp_ring_offset);
6291
6292 #ifdef FMA_SUPPORT
6293 /* Access handle validation */
6294 EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
6295 #endif /* FMA_SUPPORT */
6296
6297 }
6298
6299 pgp = hba->sli.sli3.slim2.phys +
6300 (uint64_t)((unsigned long)&(mbox->us.s2.port));
6301 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.pgpAddrHigh =
6302 PADDR_HI(pgp);
6303 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.pgpAddrLow =
6304 PADDR_LO(pgp);
6305
6306 offset = 0;
6307 for (i = 0; i < 4; i++) {
6308 rp = &hba->sli.sli3.ring[i];
6309 rdsc = &((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.rdsc[i];
6310
6311 /* Setup command ring */
6312 rgp = hba->sli.sli3.slim2.phys +
6313 (uint64_t)((unsigned long)&(slim->IOCBs[offset]));
6314 rdsc->cmdAddrHigh = PADDR_HI(rgp);
6315 rdsc->cmdAddrLow = PADDR_LO(rgp);
6316 rdsc->cmdEntries = rp->fc_numCiocb;
6317
6318 rp->fc_cmdringaddr =
6319 (void *)&((SLIM2 *)hba->sli.sli3.slim2.virt)->IOCBs[offset];
6320 offset += rdsc->cmdEntries * hba->sli.sli3.iocb_cmd_size;
6321
6322 /* Setup response ring */
6323 rgp = hba->sli.sli3.slim2.phys +
6324 (uint64_t)((unsigned long)&(slim->IOCBs[offset]));
6325 rdsc->rspAddrHigh = PADDR_HI(rgp);
6326 rdsc->rspAddrLow = PADDR_LO(rgp);
6327 rdsc->rspEntries = rp->fc_numRiocb;
6328
6329 rp->fc_rspringaddr =
6330 (void *)&((SLIM2 *)hba->sli.sli3.slim2.virt)->IOCBs[offset];
6331 offset += rdsc->rspEntries * hba->sli.sli3.iocb_rsp_size;
6332 }
6333
6334 BE_SWAP32_BCOPY((uint8_t *)
6335 (&((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb),
6336 (uint8_t *)(&((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb),
6337 sizeof (PCB));
6338
6339 offset = ((uint64_t)((unsigned long)
6340 &(((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb)) -
6341 (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
6342 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, (off_t)offset,
6343 sizeof (PCB), DDI_DMA_SYNC_FORDEV);
6344
6345 return (0);
6346
6347 } /* emlxs_mb_config_port() */
6348
6349
6350 static uint32_t
6351 emlxs_hbq_setup(emlxs_hba_t *hba, uint32_t hbq_id)
6352 {
6353 emlxs_port_t *port = &PPORT;
6354 HBQ_INIT_t *hbq;
6355 MATCHMAP *mp;
6356 HBQE_t *hbqE;
6357 MAILBOX *mb;
6358 MAILBOXQ *mbq;
6359 void *ioa2;
6360 uint32_t j;
6361 uint32_t count;
6362 uint32_t size;
6363 uint32_t ringno;
6364 uint32_t seg;
6365
6366 switch (hbq_id) {
6367 case EMLXS_ELS_HBQ_ID:
6368 count = MEM_ELSBUF_COUNT;
6369 size = MEM_ELSBUF_SIZE;
6370 ringno = FC_ELS_RING;
6371 seg = MEM_ELSBUF;
6372 HBASTATS.ElsUbPosted = count;
6373 break;
6374
6375 case EMLXS_IP_HBQ_ID:
6376 count = MEM_IPBUF_COUNT;
6377 size = MEM_IPBUF_SIZE;
6378 ringno = FC_IP_RING;
6379 seg = MEM_IPBUF;
6380 HBASTATS.IpUbPosted = count;
6381 break;
6382
6383 case EMLXS_CT_HBQ_ID:
6384 count = MEM_CTBUF_COUNT;
6385 size = MEM_CTBUF_SIZE;
6386 ringno = FC_CT_RING;
6387 seg = MEM_CTBUF;
6388 HBASTATS.CtUbPosted = count;
6389 break;
6390
6391 #ifdef SFCT_SUPPORT
6392 case EMLXS_FCT_HBQ_ID:
6393 count = MEM_FCTBUF_COUNT;
6394 size = MEM_FCTBUF_SIZE;
6395 ringno = FC_FCT_RING;
6396 seg = MEM_FCTBUF;
6397 HBASTATS.FctUbPosted = count;
6398 break;
6399 #endif /* SFCT_SUPPORT */
6400
6401 default:
6402 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6403 "hbq_setup: Invalid HBQ id. (%x)", hbq_id);
6404 return (1);
6405 }
6406
6407 /* Configure HBQ */
6408 hbq = &hba->sli.sli3.hbq_table[hbq_id];
6409 hbq->HBQ_numEntries = count;
6410
6411 /* Get a Mailbox buffer to setup mailbox commands for CONFIG_HBQ */
6412 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX)) == 0) {
6413 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6414 "hbq_setup: Unable to get mailbox.");
6415 return (1);
6416 }
6417 mb = (MAILBOX *)mbq;
6418
6419 /* Allocate HBQ Host buffer and Initialize the HBQEs */
6420 if (emlxs_hbq_alloc(hba, hbq_id)) {
6421 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6422 "hbq_setup: Unable to allocate HBQ.");
6423 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
6424 return (1);
6425 }
6426
6427 hbq->HBQ_recvNotify = 1;
6428 hbq->HBQ_num_mask = 0; /* Bind to ring */
6429 hbq->HBQ_profile = 0; /* Selection profile */
6430 /* 0=all, 7=logentry */
6431 hbq->HBQ_ringMask = 1 << ringno; /* b0100 * ringno - Binds */
6432 /* HBQ to a ring */
6433 /* Ring0=b0001, Ring1=b0010, */
6434 /* Ring2=b0100 */
6435 hbq->HBQ_headerLen = 0; /* 0 if not profile 4 or 5 */
6436 hbq->HBQ_logEntry = 0; /* Set to 1 if this HBQ will */
6437 /* be used for */
6438 hbq->HBQ_id = hbq_id;
6439 hbq->HBQ_PutIdx_next = 0;
6440 hbq->HBQ_PutIdx = hbq->HBQ_numEntries - 1;
6441 hbq->HBQ_GetIdx = 0;
6442 hbq->HBQ_PostBufCnt = hbq->HBQ_numEntries;
6443 bzero(hbq->HBQ_PostBufs, sizeof (hbq->HBQ_PostBufs));
6444
6445 /* Fill in POST BUFFERs in HBQE */
6446 hbqE = (HBQE_t *)hbq->HBQ_host_buf.virt;
6447 for (j = 0; j < hbq->HBQ_numEntries; j++, hbqE++) {
6448 /* Allocate buffer to post */
6449 if ((mp = (MATCHMAP *)emlxs_mem_get(hba,
6450 seg)) == 0) {
6451 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6452 "hbq_setup: Unable to allocate HBQ buffer. "
6453 "cnt=%d", j);
6454 emlxs_hbq_free_all(hba, hbq_id);
6455 return (1);
6456 }
6457
6458 hbq->HBQ_PostBufs[j] = mp;
6459
6460 hbqE->unt.ext.HBQ_tag = hbq_id;
6461 hbqE->unt.ext.HBQE_tag = j;
6462 hbqE->bde.tus.f.bdeSize = size;
6463 hbqE->bde.tus.f.bdeFlags = 0;
6464 hbqE->unt.w = BE_SWAP32(hbqE->unt.w);
6465 hbqE->bde.tus.w = BE_SWAP32(hbqE->bde.tus.w);
6466 hbqE->bde.addrLow =
6467 BE_SWAP32(PADDR_LO(mp->phys));
6468 hbqE->bde.addrHigh =
6469 BE_SWAP32(PADDR_HI(mp->phys));
6470 }
6471
6472 /* Issue CONFIG_HBQ */
6473 emlxs_mb_config_hbq(hba, mbq, hbq_id);
6474 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
6475 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
6476 "hbq_setup: Unable to config HBQ. cmd=%x status=%x",
6477 mb->mbxCommand, mb->mbxStatus);
6478
6479 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
6480 emlxs_hbq_free_all(hba, hbq_id);
6481 return (1);
6482 }
6483
6484 /* Setup HBQ Get/Put indexes */
6485 ioa2 = (void *)((char *)hba->sli.sli3.slim_addr +
6486 (hba->sli.sli3.hgp_hbq_offset + (hbq_id * sizeof (uint32_t))));
6487 WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2, hbq->HBQ_PutIdx);
6488
6489 hba->sli.sli3.hbq_count++;
6490
6491 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
6492
6493 #ifdef FMA_SUPPORT
6494 /* Access handle validation */
6495 if (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
6496 != DDI_FM_OK) {
6497 EMLXS_MSGF(EMLXS_CONTEXT,
6498 &emlxs_invalid_access_handle_msg, NULL);
6499 emlxs_hbq_free_all(hba, hbq_id);
6500 return (1);
6501 }
6502 #endif /* FMA_SUPPORT */
6503
6504 return (0);
6505
6506 } /* emlxs_hbq_setup() */
6507
6508
6509 extern void
6510 emlxs_hbq_free_all(emlxs_hba_t *hba, uint32_t hbq_id)
6511 {
6512 HBQ_INIT_t *hbq;
6513 MBUF_INFO *buf_info;
6514 MBUF_INFO bufinfo;
6515 uint32_t seg;
6516 uint32_t j;
6517
6518 switch (hbq_id) {
6519 case EMLXS_ELS_HBQ_ID:
6520 seg = MEM_ELSBUF;
6521 HBASTATS.ElsUbPosted = 0;
6522 break;
6523
6524 case EMLXS_IP_HBQ_ID:
6525 seg = MEM_IPBUF;
6526 HBASTATS.IpUbPosted = 0;
6527 break;
6528
6529 case EMLXS_CT_HBQ_ID:
6530 seg = MEM_CTBUF;
6531 HBASTATS.CtUbPosted = 0;
6532 break;
6533
6534 #ifdef SFCT_SUPPORT
6535 case EMLXS_FCT_HBQ_ID:
6536 seg = MEM_FCTBUF;
6537 HBASTATS.FctUbPosted = 0;
6538 break;
6539 #endif /* SFCT_SUPPORT */
6540
6541 default:
6542 return;
6543 }
6544
6545
6546 hbq = &hba->sli.sli3.hbq_table[hbq_id];
6547
6548 if (hbq->HBQ_host_buf.virt != 0) {
6549 for (j = 0; j < hbq->HBQ_PostBufCnt; j++) {
6550 emlxs_mem_put(hba, seg,
6551 (void *)hbq->HBQ_PostBufs[j]);
6552 hbq->HBQ_PostBufs[j] = NULL;
6553 }
6554 hbq->HBQ_PostBufCnt = 0;
6555
6556 buf_info = &bufinfo;
6557 bzero(buf_info, sizeof (MBUF_INFO));
6558
6559 buf_info->size = hbq->HBQ_host_buf.size;
6560 buf_info->virt = hbq->HBQ_host_buf.virt;
6561 buf_info->phys = hbq->HBQ_host_buf.phys;
6562 buf_info->dma_handle = hbq->HBQ_host_buf.dma_handle;
6563 buf_info->data_handle = hbq->HBQ_host_buf.data_handle;
6564 buf_info->flags = FC_MBUF_DMA;
6565
6566 emlxs_mem_free(hba, buf_info);
6567
6568 hbq->HBQ_host_buf.virt = NULL;
6569 }
6570
6571 return;
6572
6573 } /* emlxs_hbq_free_all() */
6574
6575
6576 extern void
6577 emlxs_update_HBQ_index(emlxs_hba_t *hba, uint32_t hbq_id)
6578 {
6579 #ifdef FMA_SUPPORT
6580 emlxs_port_t *port = &PPORT;
6581 #endif /* FMA_SUPPORT */
6582 void *ioa2;
6583 uint32_t status;
6584 uint32_t HBQ_PortGetIdx;
6585 HBQ_INIT_t *hbq;
6586
6587 switch (hbq_id) {
6588 case EMLXS_ELS_HBQ_ID:
6589 HBASTATS.ElsUbPosted++;
6590 break;
6591
6592 case EMLXS_IP_HBQ_ID:
6593 HBASTATS.IpUbPosted++;
6594 break;
6595
6596 case EMLXS_CT_HBQ_ID:
6597 HBASTATS.CtUbPosted++;
6598 break;
6599
6600 #ifdef SFCT_SUPPORT
6601 case EMLXS_FCT_HBQ_ID:
6602 HBASTATS.FctUbPosted++;
6603 break;
6604 #endif /* SFCT_SUPPORT */
6605
6606 default:
6607 return;
6608 }
6609
6610 hbq = &hba->sli.sli3.hbq_table[hbq_id];
6611
6612 hbq->HBQ_PutIdx =
6613 (hbq->HBQ_PutIdx + 1 >=
6614 hbq->HBQ_numEntries) ? 0 : hbq->HBQ_PutIdx + 1;
6615
6616 if (hbq->HBQ_PutIdx == hbq->HBQ_GetIdx) {
6617 HBQ_PortGetIdx =
6618 BE_SWAP32(((SLIM2 *)hba->sli.sli3.slim2.virt)->mbx.us.s2.
6619 HBQ_PortGetIdx[hbq_id]);
6620
6621 hbq->HBQ_GetIdx = HBQ_PortGetIdx;
6622
6623 if (hbq->HBQ_PutIdx == hbq->HBQ_GetIdx) {
6624 return;
6625 }
6626 }
6627
6628 ioa2 = (void *)((char *)hba->sli.sli3.slim_addr +
6629 (hba->sli.sli3.hgp_hbq_offset + (hbq_id * sizeof (uint32_t))));
6630 status = hbq->HBQ_PutIdx;
6631 WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2, status);
6632
6633 #ifdef FMA_SUPPORT
6634 /* Access handle validation */
6635 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
6636 #endif /* FMA_SUPPORT */
6637
6638 return;
6639
6640 } /* emlxs_update_HBQ_index() */
6641
6642
6643 static void
6644 emlxs_sli3_enable_intr(emlxs_hba_t *hba)
6645 {
6646 #ifdef FMA_SUPPORT
6647 emlxs_port_t *port = &PPORT;
6648 #endif /* FMA_SUPPORT */
6649 uint32_t status;
6650
6651 /* Enable mailbox, error attention interrupts */
6652 status = (uint32_t)(HC_MBINT_ENA);
6653
6654 /* Enable ring interrupts */
6655 if (hba->sli.sli3.ring_count >= 4) {
6656 status |=
6657 (HC_R3INT_ENA | HC_R2INT_ENA | HC_R1INT_ENA |
6658 HC_R0INT_ENA);
6659 } else if (hba->sli.sli3.ring_count == 3) {
6660 status |= (HC_R2INT_ENA | HC_R1INT_ENA | HC_R0INT_ENA);
6661 } else if (hba->sli.sli3.ring_count == 2) {
6662 status |= (HC_R1INT_ENA | HC_R0INT_ENA);
6663 } else if (hba->sli.sli3.ring_count == 1) {
6664 status |= (HC_R0INT_ENA);
6665 }
6666
6667 hba->sli.sli3.hc_copy = status;
6668 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
6669
6670 #ifdef FMA_SUPPORT
6671 /* Access handle validation */
6672 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6673 #endif /* FMA_SUPPORT */
6674
6675 } /* emlxs_sli3_enable_intr() */
6676
6677
6678 static void
6679 emlxs_enable_latt(emlxs_hba_t *hba)
6680 {
6681 #ifdef FMA_SUPPORT
6682 emlxs_port_t *port = &PPORT;
6683 #endif /* FMA_SUPPORT */
6684
6685 mutex_enter(&EMLXS_PORT_LOCK);
6686 hba->sli.sli3.hc_copy |= HC_LAINT_ENA;
6687 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
6688 #ifdef FMA_SUPPORT
6689 /* Access handle validation */
6690 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6691 #endif /* FMA_SUPPORT */
6692 mutex_exit(&EMLXS_PORT_LOCK);
6693
6694 } /* emlxs_enable_latt() */
6695
6696
6697 static void
6698 emlxs_sli3_disable_intr(emlxs_hba_t *hba, uint32_t att)
6699 {
6700 #ifdef FMA_SUPPORT
6701 emlxs_port_t *port = &PPORT;
6702 #endif /* FMA_SUPPORT */
6703
6704 /* Disable all adapter interrupts */
6705 hba->sli.sli3.hc_copy = att;
6706 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
6707 #ifdef FMA_SUPPORT
6708 /* Access handle validation */
6709 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6710 #endif /* FMA_SUPPORT */
6711
6712 } /* emlxs_sli3_disable_intr() */
6713
6714
6715 static uint32_t
6716 emlxs_check_attention(emlxs_hba_t *hba)
6717 {
6718 #ifdef FMA_SUPPORT
6719 emlxs_port_t *port = &PPORT;
6720 #endif /* FMA_SUPPORT */
6721 uint32_t ha_copy;
6722
6723 ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba));
6724 #ifdef FMA_SUPPORT
6725 /* Access handle validation */
6726 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6727 #endif /* FMA_SUPPORT */
6728 return (ha_copy);
6729
6730 } /* emlxs_check_attention() */
6731
6732
6733 static void
6734 emlxs_sli3_poll_erratt(emlxs_hba_t *hba)
6735 {
6736 uint32_t ha_copy;
6737
6738 ha_copy = emlxs_check_attention(hba);
6739
6740 /* Adapter error */
6741 if (ha_copy & HA_ERATT) {
6742 HBASTATS.IntrEvent[6]++;
6743 emlxs_handle_ff_error(hba);
6744 }
6745
6746 } /* emlxs_sli3_poll_erratt() */
6747
6748
6749 static uint32_t
6750 emlxs_sli3_reg_did_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
6751 {
6752 emlxs_port_t *port = (emlxs_port_t *)mbq->port;
6753 MAILBOXQ *mboxq;
6754 MAILBOX *mb;
6755 MATCHMAP *mp;
6756 NODELIST *ndlp;
6757 emlxs_port_t *vport;
6758 SERV_PARM *sp;
6759 int32_t i;
6760 uint32_t control;
6761 uint32_t ldata;
6762 uint32_t ldid;
6763 uint16_t lrpi;
6764 uint16_t lvpi;
6765 uint32_t rval;
6766
6767 mb = (MAILBOX *)mbq;
6768
6769 if (mb->mbxStatus) {
6770 if (mb->mbxStatus == MBXERR_NO_RESOURCES) {
6771 control = mb->un.varRegLogin.un.sp.bdeSize;
6772 if (control == 0) {
6773 /* Special handle for vport PLOGI */
6774 if (mbq->iocbq == (uint8_t *)1) {
6775 mbq->iocbq = NULL;
6776 }
6777 return (0);
6778 }
6779 emlxs_mb_retry(hba, mbq);
6780 return (1);
6781 }
6782 if (mb->mbxStatus == MBXERR_RPI_FULL) {
6783 EMLXS_MSGF(EMLXS_CONTEXT,
6784 &emlxs_node_create_failed_msg,
6785 "Limit reached. count=%d", port->node_count);
6786 }
6787
6788 /* Special handle for vport PLOGI */
6789 if (mbq->iocbq == (uint8_t *)1) {
6790 mbq->iocbq = NULL;
6791 }
6792
6793 return (0);
6794 }
6795
6796 mp = (MATCHMAP *)mbq->bp;
6797 if (!mp) {
6798 return (0);
6799 }
6800
6801 ldata = mb->un.varWords[5];
6802 lvpi = (ldata & 0xffff);
6803 port = &VPORT(lvpi);
6804
6805 /* First copy command data */
6806 ldata = mb->un.varWords[0]; /* get rpi */
6807 lrpi = ldata & 0xffff;
6808
6809 ldata = mb->un.varWords[1]; /* get did */
6810 ldid = ldata & MASK_DID;
6811
6812 sp = (SERV_PARM *)mp->virt;
6813
6814 /* Create or update the node */
6815 ndlp = emlxs_node_create(port, ldid, lrpi, sp);
6816
6817 if (ndlp == NULL) {
6818 emlxs_ub_priv_t *ub_priv;
6819
6820 /*
6821 * Fake a mailbox error, so the mbox_fini
6822 * can take appropriate action
6823 */
6824 mb->mbxStatus = MBXERR_RPI_FULL;
6825 if (mbq->ubp) {
6826 ub_priv = ((fc_unsol_buf_t *)mbq->ubp)->ub_fca_private;
6827 ub_priv->flags |= EMLXS_UB_REPLY;
6828 }
6829
6830 /* This must be (0xFFFFFE) which was registered by vport */
6831 if (lrpi == 0) {
6832 return (0);
6833 }
6834
6835 if (!(mboxq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) {
6836 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6837 "reg_did_mbcmpl:failed. Unable to allocate mbox");
6838 return (0);
6839 }
6840
6841 mb = (MAILBOX *)mboxq->mbox;
6842 mb->un.varUnregLogin.rpi = lrpi;
6843 mb->un.varUnregLogin.vpi = lvpi;
6844
6845 mb->mbxCommand = MBX_UNREG_LOGIN;
6846 mb->mbxOwner = OWN_HOST;
6847 mboxq->sbp = NULL;
6848 mboxq->ubp = NULL;
6849 mboxq->iocbq = NULL;
6850 mboxq->mbox_cmpl = NULL;
6851 mboxq->context = NULL;
6852 mboxq->port = (void *)port;
6853
6854 rval = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mboxq, MBX_NOWAIT, 0);
6855 if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) {
6856 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6857 "reg_did_mbcmpl:failed. Unable to send request.");
6858
6859 emlxs_mem_put(hba, MEM_MBOX, (void *)mboxq);
6860 return (0);
6861 }
6862
6863 return (0);
6864 }
6865
6866 if (ndlp->nlp_DID == FABRIC_DID) {
6867 /* FLOGI/FDISC successfully completed on this port */
6868 mutex_enter(&EMLXS_PORT_LOCK);
6869 port->flag |= EMLXS_PORT_FLOGI_CMPL;
6870 mutex_exit(&EMLXS_PORT_LOCK);
6871
6872 /* If CLEAR_LA has been sent, then attempt to */
6873 /* register the vpi now */
6874 if (hba->state == FC_READY) {
6875 (void) emlxs_mb_reg_vpi(port, NULL);
6876 }
6877
6878 /*
6879 * If NPIV Fabric support has just been established on
6880 * the physical port, then notify the vports of the
6881 * link up
6882 */
6883 if ((lvpi == 0) &&
6884 (hba->flag & FC_NPIV_ENABLED) &&
6885 (hba->flag & FC_NPIV_SUPPORTED)) {
6886 /* Skip the physical port */
6887 for (i = 1; i < MAX_VPORTS; i++) {
6888 vport = &VPORT(i);
6889
6890 if (!(vport->flag & EMLXS_PORT_BOUND) ||
6891 !(vport->flag &
6892 EMLXS_PORT_ENABLED)) {
6893 continue;
6894 }
6895
6896 emlxs_port_online(vport);
6897 }
6898 }
6899 }
6900
6901 /* Check for special restricted login flag */
6902 if (mbq->iocbq == (uint8_t *)1) {
6903 mbq->iocbq = NULL;
6904 (void) EMLXS_SLI_UNREG_NODE(port, ndlp, NULL, NULL, NULL);
6905 return (0);
6906 }
6907
6908 /* Needed for FCT trigger in emlxs_mb_deferred_cmpl */
6909 if (mbq->sbp) {
6910 ((emlxs_buf_t *)mbq->sbp)->node = ndlp;
6911 }
6912
6913 #ifdef DHCHAP_SUPPORT
6914 if (mbq->sbp || mbq->ubp) {
6915 if (emlxs_dhc_auth_start(port, ndlp, mbq->sbp,
6916 mbq->ubp) == 0) {
6917 /* Auth started - auth completion will */
6918 /* handle sbp and ubp now */
6919 mbq->sbp = NULL;
6920 mbq->ubp = NULL;
6921 }
6922 }
6923 #endif /* DHCHAP_SUPPORT */
6924
6925 return (0);
6926
6927 } /* emlxs_sli3_reg_did_mbcmpl() */
6928
6929
6930 static uint32_t
6931 emlxs_sli3_reg_did(emlxs_port_t *port, uint32_t did, SERV_PARM *param,
6932 emlxs_buf_t *sbp, fc_unsol_buf_t *ubp, IOCBQ *iocbq)
6933 {
6934 emlxs_hba_t *hba = HBA;
6935 MATCHMAP *mp;
6936 MAILBOXQ *mbq;
6937 MAILBOX *mb;
6938 uint32_t rval;
6939
6940 /* Check for invalid node ids to register */
6941 if ((did == 0) && (!(hba->flag & FC_LOOPBACK_MODE))) {
6942 return (1);
6943 }
6944
6945 if (did & 0xff000000) {
6946 return (1);
6947 }
6948
6949 if ((rval = emlxs_mb_check_sparm(hba, param))) {
6950 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
6951 "Invalid service parameters. did=%06x rval=%d", did,
6952 rval);
6953
6954 return (1);
6955 }
6956
6957 /* Check if the node limit has been reached */
6958 if (port->node_count >= hba->max_nodes) {
6959 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
6960 "Limit reached. did=%06x count=%d", did,
6961 port->node_count);
6962
6963 return (1);
6964 }
6965
6966 if (!(mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) {
6967 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
6968 "Unable to allocate mailbox. did=%x", did);
6969
6970 return (1);
6971 }
6972 mb = (MAILBOX *)mbq->mbox;
6973 bzero((void *)mb, MAILBOX_CMD_BSIZE);
6974
6975 /* Build login request */
6976 if ((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF)) == 0) {
6977 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
6978
6979 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
6980 "Unable to allocate buffer. did=%x", did);
6981 return (1);
6982 }
6983 bcopy((void *)param, (void *)mp->virt, sizeof (SERV_PARM));
6984
6985 mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (SERV_PARM);
6986 mb->un.varRegLogin.un.sp64.addrHigh = PADDR_HI(mp->phys);
6987 mb->un.varRegLogin.un.sp64.addrLow = PADDR_LO(mp->phys);
6988 mb->un.varRegLogin.did = did;
6989 mb->un.varWords[30] = 0; /* flags */
6990 mb->mbxCommand = MBX_REG_LOGIN64;
6991 mb->mbxOwner = OWN_HOST;
6992 mb->un.varRegLogin.vpi = port->vpi;
6993 mb->un.varRegLogin.rpi = 0;
6994
6995 mbq->sbp = (void *)sbp;
6996 mbq->ubp = (void *)ubp;
6997 mbq->iocbq = (void *)iocbq;
6998 mbq->bp = (void *)mp;
6999 mbq->mbox_cmpl = emlxs_sli3_reg_did_mbcmpl;
7000 mbq->context = NULL;
7001 mbq->port = (void *)port;
7002
7003 rval = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_NOWAIT, 0);
7004 if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) {
7005 emlxs_mem_put(hba, MEM_BUF, (void *)mp);
7006 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
7007
7008 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
7009 "Unable to send mbox. did=%x", did);
7010 return (1);
7011 }
7012
7013 return (0);
7014
7015 } /* emlxs_sli3_reg_did() */
7016
7017
7018 /*ARGSUSED*/
7019 static uint32_t
7020 emlxs_sli3_unreg_node_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
7021 {
7022 emlxs_port_t *port = (emlxs_port_t *)mbq->port;
7023 MAILBOX *mb;
7024 NODELIST *node;
7025 uint16_t rpi;
7026
7027 node = (NODELIST *)mbq->context;
7028 mb = (MAILBOX *)mbq;
7029 rpi = (node)? node->nlp_Rpi:0xffff;
7030
7031 if (mb->mbxStatus) {
7032 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7033 "unreg_node_mbcmpl:failed. node=%p rpi=%d status=%x",
7034 node, rpi, mb->mbxStatus);
7035
7036 return (0);
7037 }
7038
7039 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7040 "unreg_node_mbcmpl: node=%p rpi=%d",
7041 node, rpi);
7042
7043 if (node) {
7044 emlxs_node_rm(port, node);
7045
7046 } else { /* All nodes */
7047 emlxs_node_destroy_all(port);
7048 }
7049
7050 return (0);
7051
7052 } /* emlxs_sli3_unreg_node_mbcmpl */
7053
7054
7055 static uint32_t
7056 emlxs_sli3_unreg_node(emlxs_port_t *port, NODELIST *node, emlxs_buf_t *sbp,
7057 fc_unsol_buf_t *ubp, IOCBQ *iocbq)
7058 {
7059 emlxs_hba_t *hba = HBA;
7060 MAILBOXQ *mbq;
7061 MAILBOX *mb;
7062 uint16_t rpi;
7063 uint32_t rval;
7064
7065 if (node) {
7066 /* Check for base node */
7067 if (node == &port->node_base) {
7068 /* just flush base node */
7069 (void) emlxs_tx_node_flush(port, &port->node_base,
7070 0, 0, 0);
7071 (void) emlxs_chipq_node_flush(port, 0,
7072 &port->node_base, 0);
7073
7074 port->did = 0;
7075
7076 /* Return now */
7077 return (1);
7078 }
7079
7080 rpi = (uint16_t)node->nlp_Rpi;
7081
7082 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7083 "unreg_node:%p rpi=%d", node, rpi);
7084
7085 /* This node must be (0xFFFFFE) which registered by vport */
7086 if (rpi == 0) {
7087 emlxs_node_rm(port, node);
7088 return (0);
7089 }
7090
7091 } else { /* Unreg all nodes */
7092 rpi = 0xffff;
7093
7094 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7095 "unreg_node: All");
7096 }
7097
7098 if (!(mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) {
7099 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7100 "unreg_node:failed. Unable to allocate mbox");
7101 return (1);
7102 }
7103
7104 mb = (MAILBOX *)mbq->mbox;
7105 mb->un.varUnregLogin.rpi = rpi;
7106 mb->un.varUnregLogin.vpi = port->vpip->VPI;
7107
7108 mb->mbxCommand = MBX_UNREG_LOGIN;
7109 mb->mbxOwner = OWN_HOST;
7110 mbq->sbp = (void *)sbp;
7111 mbq->ubp = (void *)ubp;
7112 mbq->iocbq = (void *)iocbq;
7113 mbq->mbox_cmpl = emlxs_sli3_unreg_node_mbcmpl;
7114 mbq->context = (void *)node;
7115 mbq->port = (void *)port;
7116
7117 rval = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_NOWAIT, 0);
7118 if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) {
7119 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7120 "unreg_node:failed. Unable to send request.");
7121
7122 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
7123 return (1);
7124 }
7125
7126 return (0);
7127
7128 } /* emlxs_sli3_unreg_node() */