Print this page
NEX-8705 Drivers for ATTO Celerity FC-162E Gen 5 and Celerity FC-162P Gen 6 16GB FC cards support
Reviewed by: Dan Fields <dan.fields@nexenta.com>
Reviewed by: Rick McNeal <rick.mcneal@nexenta.com>
NEX-1878 update emlxs from source provided by Emulex
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_sli3.c
+++ new/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_sli3.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at
9 9 * http://www.opensource.org/licenses/cddl1.txt.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2004-2012 Emulex. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 #include <emlxs.h>
28 28
29 29 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
30 30 EMLXS_MSG_DEF(EMLXS_SLI3_C);
31 31
32 32 static void emlxs_sli3_issue_iocb(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq);
33 33 static void emlxs_sli3_handle_link_event(emlxs_hba_t *hba);
34 34 static void emlxs_sli3_handle_ring_event(emlxs_hba_t *hba, int32_t ring_no,
35 35 uint32_t ha_copy);
36 36 #ifdef SFCT_SUPPORT
37 37 static uint32_t emlxs_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp);
38 38 #endif /* SFCT_SUPPORT */
39 39
40 40 static uint32_t emlxs_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp);
41 41
42 42 static uint32_t emlxs_disable_traffic_cop = 1;
43 43
44 44 static int emlxs_sli3_map_hdw(emlxs_hba_t *hba);
45 45
46 46 static void emlxs_sli3_unmap_hdw(emlxs_hba_t *hba);
47 47
48 48 static int32_t emlxs_sli3_online(emlxs_hba_t *hba);
49 49
50 50 static void emlxs_sli3_offline(emlxs_hba_t *hba,
51 51 uint32_t reset_requested);
52 52
53 53 static uint32_t emlxs_sli3_hba_reset(emlxs_hba_t *hba,
54 54 uint32_t restart, uint32_t skip_post,
55 55 uint32_t quiesce);
56 56
57 57 static void emlxs_sli3_hba_kill(emlxs_hba_t *hba);
58 58 static void emlxs_sli3_hba_kill4quiesce(emlxs_hba_t *hba);
59 59 static uint32_t emlxs_sli3_hba_init(emlxs_hba_t *hba);
60 60
61 61 static uint32_t emlxs_sli2_bde_setup(emlxs_port_t *port,
62 62 emlxs_buf_t *sbp);
63 63 static uint32_t emlxs_sli3_bde_setup(emlxs_port_t *port,
64 64 emlxs_buf_t *sbp);
65 65 static uint32_t emlxs_sli2_fct_bde_setup(emlxs_port_t *port,
66 66 emlxs_buf_t *sbp);
67 67 static uint32_t emlxs_sli3_fct_bde_setup(emlxs_port_t *port,
68 68 emlxs_buf_t *sbp);
69 69
70 70
71 71 static void emlxs_sli3_issue_iocb_cmd(emlxs_hba_t *hba,
72 72 CHANNEL *rp, IOCBQ *iocb_cmd);
73 73
74 74
75 75 static uint32_t emlxs_sli3_issue_mbox_cmd(emlxs_hba_t *hba,
76 76 MAILBOXQ *mbq, int32_t flg,
77 77 uint32_t tmo);
78 78
79 79
80 80 #ifdef SFCT_SUPPORT
81 81 static uint32_t emlxs_sli3_prep_fct_iocb(emlxs_port_t *port,
82 82 emlxs_buf_t *cmd_sbp, int channel);
83 83
84 84 #endif /* SFCT_SUPPORT */
85 85
86 86 static uint32_t emlxs_sli3_prep_fcp_iocb(emlxs_port_t *port,
87 87 emlxs_buf_t *sbp, int ring);
88 88
89 89 static uint32_t emlxs_sli3_prep_ip_iocb(emlxs_port_t *port,
90 90 emlxs_buf_t *sbp);
91 91
92 92 static uint32_t emlxs_sli3_prep_els_iocb(emlxs_port_t *port,
93 93 emlxs_buf_t *sbp);
94 94
95 95
96 96 static uint32_t emlxs_sli3_prep_ct_iocb(emlxs_port_t *port,
97 97 emlxs_buf_t *sbp);
98 98
99 99
100 100 static void emlxs_sli3_poll_intr(emlxs_hba_t *hba);
101 101
102 102 static int32_t emlxs_sli3_intx_intr(char *arg);
103 103 #ifdef MSI_SUPPORT
104 104 static uint32_t emlxs_sli3_msi_intr(char *arg1, char *arg2);
105 105 #endif /* MSI_SUPPORT */
106 106
107 107 static void emlxs_sli3_enable_intr(emlxs_hba_t *hba);
108 108
109 109 static void emlxs_sli3_disable_intr(emlxs_hba_t *hba,
110 110 uint32_t att);
111 111
112 112
113 113 static void emlxs_handle_ff_error(emlxs_hba_t *hba);
114 114
115 115 static uint32_t emlxs_handle_mb_event(emlxs_hba_t *hba);
116 116
117 117 static void emlxs_sli3_timer_check_mbox(emlxs_hba_t *hba);
118 118
119 119 static uint32_t emlxs_mb_config_port(emlxs_hba_t *hba,
120 120 MAILBOXQ *mbq, uint32_t sli_mode,
121 121 uint32_t hbainit);
122 122 static void emlxs_enable_latt(emlxs_hba_t *hba);
123 123
124 124 static uint32_t emlxs_check_attention(emlxs_hba_t *hba);
125 125
126 126 static uint32_t emlxs_get_attention(emlxs_hba_t *hba,
127 127 int32_t msgid);
128 128 static void emlxs_proc_attention(emlxs_hba_t *hba,
129 129 uint32_t ha_copy);
130 130 /* static int emlxs_handle_rcv_seq(emlxs_hba_t *hba, */
131 131 /* CHANNEL *cp, IOCBQ *iocbq); */
132 132 /* static void emlxs_update_HBQ_index(emlxs_hba_t *hba, */
133 133 /* uint32_t hbq_id); */
134 134 /* static void emlxs_hbq_free_all(emlxs_hba_t *hba, */
135 135 /* uint32_t hbq_id); */
136 136 static uint32_t emlxs_hbq_setup(emlxs_hba_t *hba,
137 137 uint32_t hbq_id);
138 138 static void emlxs_sli3_timer(emlxs_hba_t *hba);
139 139
140 140 static void emlxs_sli3_poll_erratt(emlxs_hba_t *hba);
141 141
142 142 static uint32_t emlxs_sli3_reg_did(emlxs_port_t *port,
143 143 uint32_t did, SERV_PARM *param,
144 144 emlxs_buf_t *sbp, fc_unsol_buf_t *ubp,
145 145 IOCBQ *iocbq);
146 146
147 147 static uint32_t emlxs_sli3_unreg_node(emlxs_port_t *port,
148 148 NODELIST *node, emlxs_buf_t *sbp,
149 149 fc_unsol_buf_t *ubp, IOCBQ *iocbq);
150 150
151 151
152 152 /* Define SLI3 API functions */
153 153 emlxs_sli_api_t emlxs_sli3_api = {
154 154 emlxs_sli3_map_hdw,
155 155 emlxs_sli3_unmap_hdw,
156 156 emlxs_sli3_online,
157 157 emlxs_sli3_offline,
158 158 emlxs_sli3_hba_reset,
159 159 emlxs_sli3_hba_kill,
160 160 emlxs_sli3_issue_iocb_cmd,
161 161 emlxs_sli3_issue_mbox_cmd,
162 162 #ifdef SFCT_SUPPORT
163 163 emlxs_sli3_prep_fct_iocb,
164 164 #else
165 165 NULL,
166 166 #endif /* SFCT_SUPPORT */
167 167 emlxs_sli3_prep_fcp_iocb,
168 168 emlxs_sli3_prep_ip_iocb,
169 169 emlxs_sli3_prep_els_iocb,
170 170 emlxs_sli3_prep_ct_iocb,
171 171 emlxs_sli3_poll_intr,
172 172 emlxs_sli3_intx_intr,
173 173 emlxs_sli3_msi_intr,
174 174 emlxs_sli3_disable_intr,
175 175 emlxs_sli3_timer,
176 176 emlxs_sli3_poll_erratt,
177 177 emlxs_sli3_reg_did,
178 178 emlxs_sli3_unreg_node
179 179 };
180 180
181 181
182 182 /*
183 183 * emlxs_sli3_online()
184 184 *
185 185 * This routine will start initialization of the SLI2/3 HBA.
186 186 */
187 187 static int32_t
188 188 emlxs_sli3_online(emlxs_hba_t *hba)
189 189 {
190 190 emlxs_port_t *port = &PPORT;
191 191 emlxs_config_t *cfg;
192 192 emlxs_vpd_t *vpd;
193 193 MAILBOX *mb = NULL;
194 194 MAILBOXQ *mbq = NULL;
195 195 RING *rp;
196 196 CHANNEL *cp;
197 197 MATCHMAP *mp = NULL;
198 198 MATCHMAP *mp1 = NULL;
199 199 uint8_t *inptr;
200 200 uint8_t *outptr;
201 201 uint32_t status;
202 202 uint16_t i;
203 203 uint32_t j;
204 204 uint32_t read_rev_reset;
205 205 uint32_t key = 0;
206 206 uint32_t fw_check;
207 207 uint32_t kern_update = 0;
208 208 uint32_t rval = 0;
209 209 uint32_t offset;
210 210 uint8_t vpd_data[DMP_VPD_SIZE];
211 211 uint32_t MaxRbusSize;
212 212 uint32_t MaxIbusSize;
213 213 uint32_t sli_mode;
214 214 uint32_t sli_mode_mask;
215 215
216 216 cfg = &CFG;
217 217 vpd = &VPD;
218 218 MaxRbusSize = 0;
219 219 MaxIbusSize = 0;
220 220 read_rev_reset = 0;
221 221 hba->chan_count = MAX_RINGS;
222 222
223 223 if (hba->bus_type == SBUS_FC) {
224 224 (void) READ_SBUS_CSR_REG(hba, FC_SHS_REG(hba));
225 225 }
226 226
227 227 /* Set the fw_check flag */
228 228 fw_check = cfg[CFG_FW_CHECK].current;
229 229
230 230 if ((fw_check & 0x04) ||
231 231 (hba->fw_flag & FW_UPDATE_KERNEL)) {
232 232 kern_update = 1;
233 233 }
234 234
235 235 hba->mbox_queue_flag = 0;
236 236 hba->sli.sli3.hc_copy = 0;
237 237 hba->fc_edtov = FF_DEF_EDTOV;
238 238 hba->fc_ratov = FF_DEF_RATOV;
239 239 hba->fc_altov = FF_DEF_ALTOV;
240 240 hba->fc_arbtov = FF_DEF_ARBTOV;
241 241
242 242 /*
243 243 * Get a buffer which will be used repeatedly for mailbox commands
244 244 */
245 245 mbq = (MAILBOXQ *) kmem_zalloc((sizeof (MAILBOXQ)), KM_SLEEP);
246 246
247 247 mb = (MAILBOX *)mbq;
248 248
249 249 /* Initialize sli mode based on configuration parameter */
250 250 switch (cfg[CFG_SLI_MODE].current) {
251 251 case 2: /* SLI2 mode */
252 252 sli_mode = EMLXS_HBA_SLI2_MODE;
253 253 sli_mode_mask = EMLXS_SLI2_MASK;
254 254 break;
255 255
256 256 case 3: /* SLI3 mode */
257 257 sli_mode = EMLXS_HBA_SLI3_MODE;
258 258 sli_mode_mask = EMLXS_SLI3_MASK;
259 259 break;
260 260
261 261 case 0: /* Best available */
262 262 case 1: /* Best available */
263 263 default:
264 264 if (hba->model_info.sli_mask & EMLXS_SLI3_MASK) {
265 265 sli_mode = EMLXS_HBA_SLI3_MODE;
266 266 sli_mode_mask = EMLXS_SLI3_MASK;
267 267 } else if (hba->model_info.sli_mask & EMLXS_SLI2_MASK) {
268 268 sli_mode = EMLXS_HBA_SLI2_MODE;
269 269 sli_mode_mask = EMLXS_SLI2_MASK;
270 270 } else {
271 271 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
272 272 "No SLI mode available.");
273 273 rval = EIO;
274 274 goto failed;
275 275 }
276 276 break;
277 277 }
278 278 /* SBUS adapters only available in SLI2 */
279 279 if (hba->bus_type == SBUS_FC) {
280 280 sli_mode = EMLXS_HBA_SLI2_MODE;
281 281 sli_mode_mask = EMLXS_SLI2_MASK;
282 282 }
283 283
284 284 reset:
285 285 /* Reset & Initialize the adapter */
286 286 if (emlxs_sli3_hba_init(hba)) {
287 287 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
288 288 "Unable to init hba.");
289 289
290 290 rval = EIO;
291 291 goto failed;
292 292 }
293 293
294 294 #ifdef FMA_SUPPORT
295 295 /* Access handle validation */
296 296 if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
297 297 != DDI_FM_OK) ||
298 298 (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
299 299 != DDI_FM_OK) ||
300 300 (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle)
301 301 != DDI_FM_OK)) {
302 302 EMLXS_MSGF(EMLXS_CONTEXT,
303 303 &emlxs_invalid_access_handle_msg, NULL);
304 304
305 305 rval = EIO;
306 306 goto failed;
307 307 }
308 308 #endif /* FMA_SUPPORT */
309 309
310 310 /* Check for PEGASUS (This is a special case) */
311 311 /* We need to check for dual channel adapter */
312 312 if (hba->model_info.device_id == PCI_DEVICE_ID_PEGASUS) {
313 313 /* Try to determine if this is a DC adapter */
314 314 if (emlxs_get_max_sram(hba, &MaxRbusSize, &MaxIbusSize) == 0) {
315 315 if (MaxRbusSize == REDUCED_SRAM_CFG) {
316 316 /* LP9802DC */
317 317 for (i = 1; i < emlxs_pci_model_count; i++) {
318 318 if (emlxs_pci_model[i].id == LP9802DC) {
319 319 bcopy(&emlxs_pci_model[i],
320 320 &hba->model_info,
321 321 sizeof (emlxs_model_t));
322 322 break;
323 323 }
324 324 }
325 325 } else if (hba->model_info.id != LP9802) {
326 326 /* LP9802 */
327 327 for (i = 1; i < emlxs_pci_model_count; i++) {
328 328 if (emlxs_pci_model[i].id == LP9802) {
329 329 bcopy(&emlxs_pci_model[i],
330 330 &hba->model_info,
331 331 sizeof (emlxs_model_t));
332 332 break;
333 333 }
334 334 }
335 335 }
336 336 }
337 337 }
338 338
339 339 /*
340 340 * Setup and issue mailbox READ REV command
341 341 */
342 342 vpd->opFwRev = 0;
343 343 vpd->postKernRev = 0;
344 344 vpd->sli1FwRev = 0;
345 345 vpd->sli2FwRev = 0;
346 346 vpd->sli3FwRev = 0;
347 347 vpd->sli4FwRev = 0;
348 348
349 349 vpd->postKernName[0] = 0;
350 350 vpd->opFwName[0] = 0;
351 351 vpd->sli1FwName[0] = 0;
352 352 vpd->sli2FwName[0] = 0;
353 353 vpd->sli3FwName[0] = 0;
354 354 vpd->sli4FwName[0] = 0;
355 355
356 356 vpd->opFwLabel[0] = 0;
357 357 vpd->sli1FwLabel[0] = 0;
358 358 vpd->sli2FwLabel[0] = 0;
359 359 vpd->sli3FwLabel[0] = 0;
360 360 vpd->sli4FwLabel[0] = 0;
361 361
362 362 /* Sanity check */
363 363 if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
364 364 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
365 365 "Adapter / SLI mode mismatch mask:x%x",
366 366 hba->model_info.sli_mask);
367 367
368 368 rval = EIO;
369 369 goto failed;
370 370 }
371 371
372 372 EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
373 373 emlxs_mb_read_rev(hba, mbq, 0);
374 374 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
375 375 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
376 376 "Unable to read rev. Mailbox cmd=%x status=%x",
377 377 mb->mbxCommand, mb->mbxStatus);
378 378
379 379 rval = EIO;
380 380 goto failed;
381 381 }
382 382
383 383 if (mb->un.varRdRev.rr == 0) {
384 384 /* Old firmware */
385 385 if (read_rev_reset == 0) {
386 386 read_rev_reset = 1;
387 387
388 388 goto reset;
389 389 } else {
390 390 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
391 391 "Outdated firmware detected.");
392 392 }
393 393
394 394 vpd->rBit = 0;
395 395 } else {
396 396 if (mb->un.varRdRev.un.b.ProgType != FUNC_FIRMWARE) {
397 397 if (read_rev_reset == 0) {
398 398 read_rev_reset = 1;
399 399
400 400 goto reset;
401 401 } else {
402 402 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
403 403 "Non-operational firmware detected. "
404 404 "type=%x",
405 405 mb->un.varRdRev.un.b.ProgType);
406 406 }
407 407 }
408 408
409 409 vpd->rBit = 1;
410 410 vpd->sli1FwRev = mb->un.varRdRev.sliFwRev1;
411 411 bcopy((char *)mb->un.varRdRev.sliFwName1, vpd->sli1FwLabel,
412 412 16);
413 413 vpd->sli2FwRev = mb->un.varRdRev.sliFwRev2;
414 414 bcopy((char *)mb->un.varRdRev.sliFwName2, vpd->sli2FwLabel,
415 415 16);
416 416
417 417 /*
418 418 * Lets try to read the SLI3 version
419 419 * Setup and issue mailbox READ REV(v3) command
420 420 */
421 421 EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
422 422
423 423 /* Reuse mbq from previous mbox */
424 424 bzero(mbq, sizeof (MAILBOXQ));
425 425
426 426 emlxs_mb_read_rev(hba, mbq, 1);
427 427
428 428 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
429 429 MBX_SUCCESS) {
430 430 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
431 431 "Unable to read rev (v3). Mailbox cmd=%x status=%x",
432 432 mb->mbxCommand, mb->mbxStatus);
433 433
434 434 rval = EIO;
435 435 goto failed;
436 436 }
437 437
438 438 if (mb->un.varRdRev.rf3) {
439 439 /*
440 440 * vpd->sli2FwRev = mb->un.varRdRev.sliFwRev1;
441 441 * Not needed
442 442 */
443 443 vpd->sli3FwRev = mb->un.varRdRev.sliFwRev2;
444 444 bcopy((char *)mb->un.varRdRev.sliFwName2,
445 445 vpd->sli3FwLabel, 16);
446 446 }
447 447 }
448 448
449 449 if ((sli_mode == EMLXS_HBA_SLI3_MODE) && (vpd->sli3FwRev == 0)) {
450 450 if (vpd->sli2FwRev) {
451 451 sli_mode = EMLXS_HBA_SLI2_MODE;
452 452 sli_mode_mask = EMLXS_SLI2_MASK;
453 453 } else {
454 454 sli_mode = 0;
455 455 sli_mode_mask = 0;
456 456 }
457 457 }
458 458
459 459 else if ((sli_mode == EMLXS_HBA_SLI2_MODE) && (vpd->sli2FwRev == 0)) {
460 460 if (vpd->sli3FwRev) {
461 461 sli_mode = EMLXS_HBA_SLI3_MODE;
462 462 sli_mode_mask = EMLXS_SLI3_MASK;
463 463 } else {
464 464 sli_mode = 0;
465 465 sli_mode_mask = 0;
466 466 }
467 467 }
468 468
469 469 if (!(hba->model_info.sli_mask & sli_mode_mask)) {
470 470 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
471 471 "Firmware not available. sli-mode=%d",
472 472 cfg[CFG_SLI_MODE].current);
473 473
474 474 rval = EIO;
475 475 goto failed;
476 476 }
477 477
478 478 /* Save information as VPD data */
479 479 vpd->postKernRev = mb->un.varRdRev.postKernRev;
480 480 vpd->opFwRev = mb->un.varRdRev.opFwRev;
481 481 bcopy((char *)mb->un.varRdRev.opFwName, vpd->opFwLabel, 16);
482 482 vpd->biuRev = mb->un.varRdRev.biuRev;
483 483 vpd->smRev = mb->un.varRdRev.smRev;
484 484 vpd->smFwRev = mb->un.varRdRev.un.smFwRev;
485 485 vpd->endecRev = mb->un.varRdRev.endecRev;
486 486 vpd->fcphHigh = mb->un.varRdRev.fcphHigh;
487 487 vpd->fcphLow = mb->un.varRdRev.fcphLow;
488 488 vpd->feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
489 489 vpd->feaLevelLow = mb->un.varRdRev.feaLevelLow;
490 490
491 491 /* Decode FW names */
492 492 emlxs_decode_version(vpd->postKernRev, vpd->postKernName,
493 493 sizeof (vpd->postKernName));
494 494 emlxs_decode_version(vpd->opFwRev, vpd->opFwName,
495 495 sizeof (vpd->opFwName));
496 496 emlxs_decode_version(vpd->sli1FwRev, vpd->sli1FwName,
497 497 sizeof (vpd->sli1FwName));
498 498 emlxs_decode_version(vpd->sli2FwRev, vpd->sli2FwName,
499 499 sizeof (vpd->sli2FwName));
500 500 emlxs_decode_version(vpd->sli3FwRev, vpd->sli3FwName,
501 501 sizeof (vpd->sli3FwName));
502 502 emlxs_decode_version(vpd->sli4FwRev, vpd->sli4FwName,
503 503 sizeof (vpd->sli4FwName));
504 504
505 505 /* Decode FW labels */
506 506 emlxs_decode_label(vpd->opFwLabel, vpd->opFwLabel, 1,
507 507 sizeof (vpd->opFwLabel));
508 508 emlxs_decode_label(vpd->sli1FwLabel, vpd->sli1FwLabel, 1,
509 509 sizeof (vpd->sli1FwLabel));
510 510 emlxs_decode_label(vpd->sli2FwLabel, vpd->sli2FwLabel, 1,
511 511 sizeof (vpd->sli2FwLabel));
512 512 emlxs_decode_label(vpd->sli3FwLabel, vpd->sli3FwLabel, 1,
513 513 sizeof (vpd->sli3FwLabel));
514 514 emlxs_decode_label(vpd->sli4FwLabel, vpd->sli4FwLabel, 1,
515 515 sizeof (vpd->sli4FwLabel));
516 516
517 517 /* Reuse mbq from previous mbox */
518 518 bzero(mbq, sizeof (MAILBOXQ));
519 519
520 520 key = emlxs_get_key(hba, mbq);
521 521
522 522 /* Get adapter VPD information */
523 523 offset = 0;
524 524 bzero(vpd_data, sizeof (vpd_data));
525 525 vpd->port_index = (uint32_t)-1;
526 526
527 527 while (offset < DMP_VPD_SIZE) {
528 528 /* Reuse mbq from previous mbox */
529 529 bzero(mbq, sizeof (MAILBOXQ));
530 530
531 531 emlxs_mb_dump_vpd(hba, mbq, offset);
532 532 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
533 533 MBX_SUCCESS) {
534 534 /*
535 535 * Let it go through even if failed.
536 536 * Not all adapter's have VPD info and thus will
537 537 * fail here. This is not a problem
538 538 */
539 539
540 540 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
541 541 "No VPD found. offset=%x status=%x", offset,
542 542 mb->mbxStatus);
543 543 break;
544 544 } else {
545 545 if (mb->un.varDmp.ra == 1) {
546 546 uint32_t *lp1, *lp2;
547 547 uint32_t bsize;
548 548 uint32_t wsize;
549 549
550 550 /*
551 551 * mb->un.varDmp.word_cnt is actually byte
552 552 * count for the dump reply
553 553 */
554 554 bsize = mb->un.varDmp.word_cnt;
555 555
556 556 /* Stop if no data was received */
557 557 if (bsize == 0) {
558 558 break;
559 559 }
560 560
561 561 /* Check limit on byte size */
562 562 bsize = (bsize >
563 563 (sizeof (vpd_data) - offset)) ?
564 564 (sizeof (vpd_data) - offset) : bsize;
565 565
566 566 /*
567 567 * Convert size from bytes to words with
568 568 * minimum of 1 word
569 569 */
570 570 wsize = (bsize > 4) ? (bsize >> 2) : 1;
571 571
572 572 /*
573 573 * Transfer data into vpd_data buffer one
574 574 * word at a time
575 575 */
576 576 lp1 = (uint32_t *)&mb->un.varDmp.resp_offset;
577 577 lp2 = (uint32_t *)&vpd_data[offset];
578 578
579 579 for (i = 0; i < wsize; i++) {
580 580 status = *lp1++;
581 581 *lp2++ = BE_SWAP32(status);
582 582 }
583 583
584 584 /* Increment total byte count saved */
585 585 offset += (wsize << 2);
586 586
587 587 /*
588 588 * Stop if less than a full transfer was
589 589 * received
590 590 */
591 591 if (wsize < DMP_VPD_DUMP_WCOUNT) {
592 592 break;
593 593 }
594 594
595 595 } else {
596 596 EMLXS_MSGF(EMLXS_CONTEXT,
597 597 &emlxs_init_debug_msg,
598 598 "No VPD acknowledgment. offset=%x",
599 599 offset);
600 600 break;
601 601 }
602 602 }
603 603
604 604 }
605 605
606 606 if (vpd_data[0]) {
607 607 (void) emlxs_parse_vpd(hba, (uint8_t *)vpd_data, offset);
608 608
609 609 /*
610 610 * If there is a VPD part number, and it does not
611 611 * match the current default HBA model info,
612 612 * replace the default data with an entry that
613 613 * does match.
614 614 *
615 615 * After emlxs_parse_vpd model holds the VPD value
616 616 * for V2 and part_num hold the value for PN. These
617 617 * 2 values are NOT necessarily the same.
618 618 */
619 619
620 620 rval = 0;
621 621 if ((vpd->model[0] != 0) &&
622 622 (strcmp(&vpd->model[0], hba->model_info.model) != 0)) {
623 623
624 624 /* First scan for a V2 match */
625 625
626 626 for (i = 1; i < emlxs_pci_model_count; i++) {
627 627 if (strcmp(&vpd->model[0],
628 628 emlxs_pci_model[i].model) == 0) {
629 629 bcopy(&emlxs_pci_model[i],
630 630 &hba->model_info,
631 631 sizeof (emlxs_model_t));
632 632 rval = 1;
633 633 break;
634 634 }
635 635 }
636 636 }
637 637
638 638 if (!rval && (vpd->part_num[0] != 0) &&
639 639 (strcmp(&vpd->part_num[0], hba->model_info.model) != 0)) {
640 640
641 641 /* Next scan for a PN match */
642 642
643 643 for (i = 1; i < emlxs_pci_model_count; i++) {
644 644 if (strcmp(&vpd->part_num[0],
645 645 emlxs_pci_model[i].model) == 0) {
646 646 bcopy(&emlxs_pci_model[i],
647 647 &hba->model_info,
648 648 sizeof (emlxs_model_t));
649 649 break;
650 650 }
651 651 }
652 652 }
653 653
654 654 /*
655 655 * Now lets update hba->model_info with the real
656 656 * VPD data, if any.
657 657 */
658 658
659 659 /*
660 660 * Replace the default model description with vpd data
661 661 */
662 662 if (vpd->model_desc[0] != 0) {
663 663 (void) strncpy(hba->model_info.model_desc,
664 664 vpd->model_desc,
665 665 (sizeof (hba->model_info.model_desc)-1));
666 666 }
667 667
668 668 /* Replace the default model with vpd data */
669 669 if (vpd->model[0] != 0) {
670 670 (void) strncpy(hba->model_info.model, vpd->model,
671 671 (sizeof (hba->model_info.model)-1));
672 672 }
673 673
674 674 /* Replace the default program types with vpd data */
675 675 if (vpd->prog_types[0] != 0) {
676 676 emlxs_parse_prog_types(hba, vpd->prog_types);
677 677 }
678 678 }
679 679
680 680 /*
681 681 * Since the adapter model may have changed with the vpd data
682 682 * lets double check if adapter is not supported
683 683 */
684 684 if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
685 685 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
686 686 "Unsupported adapter found. "
687 687 "Id:%d Device id:0x%x SSDID:0x%x Model:%s",
688 688 hba->model_info.id, hba->model_info.device_id,
689 689 hba->model_info.ssdid, hba->model_info.model);
690 690
691 691 rval = EIO;
692 692 goto failed;
693 693 }
694 694
695 695 /* Read the adapter's wakeup parms */
696 696 (void) emlxs_read_wakeup_parms(hba, &hba->wakeup_parms, 1);
697 697 emlxs_decode_version(hba->wakeup_parms.u0.boot_bios_wd[0],
698 698 vpd->boot_version, sizeof (vpd->boot_version));
699 699
700 700 /* Get fcode version property */
701 701 emlxs_get_fcode_version(hba);
702 702
703 703 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
704 704 "Firmware: kern=%08x stub=%08x sli1=%08x", vpd->postKernRev,
705 705 vpd->opFwRev, vpd->sli1FwRev);
706 706
707 707 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
708 708 "Firmware: sli2=%08x sli3=%08x sli4=%08x fl=%x", vpd->sli2FwRev,
709 709 vpd->sli3FwRev, vpd->sli4FwRev, vpd->feaLevelHigh);
710 710
711 711 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
712 712 "BIOS: boot=%s fcode=%s", vpd->boot_version, vpd->fcode_version);
713 713
714 714 /*
715 715 * If firmware checking is enabled and the adapter model indicates
716 716 * a firmware image, then perform firmware version check
717 717 */
718 718 hba->fw_flag = 0;
719 719 hba->fw_timer = 0;
720 720
721 721 if (((fw_check & 0x1) &&
722 722 (hba->model_info.flags & EMLXS_ORACLE_BRANDED) &&
723 723 hba->model_info.fwid) || ((fw_check & 0x2) &&
724 724 hba->model_info.fwid)) {
725 725 emlxs_firmware_t *fw;
726 726
727 727 /* Find firmware image indicated by adapter model */
728 728 fw = NULL;
729 729 for (i = 0; i < emlxs_fw_count; i++) {
730 730 if (emlxs_fw_table[i].id == hba->model_info.fwid) {
731 731 fw = &emlxs_fw_table[i];
732 732 break;
733 733 }
734 734 }
735 735
736 736 /*
737 737 * If the image was found, then verify current firmware
738 738 * versions of adapter
739 739 */
740 740 if (fw) {
741 741 if (!kern_update &&
742 742 ((fw->kern && (vpd->postKernRev != fw->kern)) ||
743 743 (fw->stub && (vpd->opFwRev != fw->stub)))) {
744 744
745 745 hba->fw_flag |= FW_UPDATE_NEEDED;
746 746
747 747 } else if ((fw->kern && (vpd->postKernRev !=
748 748 fw->kern)) ||
749 749 (fw->stub && (vpd->opFwRev != fw->stub)) ||
750 750 (fw->sli1 && (vpd->sli1FwRev != fw->sli1)) ||
751 751 (fw->sli2 && (vpd->sli2FwRev != fw->sli2)) ||
752 752 (fw->sli3 && (vpd->sli3FwRev != fw->sli3)) ||
753 753 (fw->sli4 && (vpd->sli4FwRev != fw->sli4))) {
754 754 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
755 755 "Firmware update needed. "
756 756 "Updating. id=%d fw=%d",
757 757 hba->model_info.id, hba->model_info.fwid);
758 758
759 759 #ifdef MODFW_SUPPORT
760 760 /*
761 761 * Load the firmware image now
762 762 * If MODFW_SUPPORT is not defined, the
763 763 * firmware image will already be defined
764 764 * in the emlxs_fw_table
765 765 */
766 766 emlxs_fw_load(hba, fw);
767 767 #endif /* MODFW_SUPPORT */
768 768
769 769 if (fw->image && fw->size) {
770 770 uint32_t rc;
771 771
772 772 rc = emlxs_fw_download(hba,
773 773 (char *)fw->image, fw->size, 0);
774 774 if ((rc != FC_SUCCESS) &&
775 775 (rc != EMLXS_REBOOT_REQUIRED)) {
776 776 EMLXS_MSGF(EMLXS_CONTEXT,
777 777 &emlxs_init_msg,
778 778 "Firmware update failed.");
779 779 hba->fw_flag |=
780 780 FW_UPDATE_NEEDED;
781 781 }
782 782 #ifdef MODFW_SUPPORT
783 783 /*
784 784 * Unload the firmware image from
785 785 * kernel memory
786 786 */
787 787 emlxs_fw_unload(hba, fw);
788 788 #endif /* MODFW_SUPPORT */
789 789
790 790 fw_check = 0;
791 791
792 792 goto reset;
793 793 }
794 794
795 795 hba->fw_flag |= FW_UPDATE_NEEDED;
796 796
797 797 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
798 798 "Firmware image unavailable.");
799 799 } else {
800 800 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
801 801 "Firmware update not needed.");
802 802 }
803 803 } else {
804 804 /* This should not happen */
805 805
806 806 /*
807 807 * This means either the adapter database is not
808 808 * correct or a firmware image is missing from the
809 809 * compile
810 810 */
811 811 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
812 812 "Firmware image unavailable. id=%d fw=%d",
813 813 hba->model_info.id, hba->model_info.fwid);
814 814 }
815 815 }
816 816
817 817 /*
818 818 * Add our interrupt routine to kernel's interrupt chain & enable it
819 819 * If MSI is enabled this will cause Solaris to program the MSI address
820 820 * and data registers in PCI config space
821 821 */
822 822 if (EMLXS_INTR_ADD(hba) != DDI_SUCCESS) {
823 823 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
824 824 "Unable to add interrupt(s).");
825 825
826 826 rval = EIO;
827 827 goto failed;
828 828 }
829 829
830 830 EMLXS_STATE_CHANGE(hba, FC_INIT_CFGPORT);
831 831
832 832 /* Reuse mbq from previous mbox */
833 833 bzero(mbq, sizeof (MAILBOXQ));
834 834
835 835 (void) emlxs_mb_config_port(hba, mbq, sli_mode, key);
836 836 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
837 837 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
838 838 "Unable to configure port. "
839 839 "Mailbox cmd=%x status=%x slimode=%d key=%x",
840 840 mb->mbxCommand, mb->mbxStatus, sli_mode, key);
841 841
842 842 for (sli_mode--; sli_mode > 0; sli_mode--) {
843 843 /* Check if sli_mode is supported by this adapter */
844 844 if (hba->model_info.sli_mask &
845 845 EMLXS_SLI_MASK(sli_mode)) {
846 846 sli_mode_mask = EMLXS_SLI_MASK(sli_mode);
847 847 break;
848 848 }
849 849 }
850 850
851 851 if (sli_mode) {
852 852 fw_check = 0;
853 853
854 854 goto reset;
855 855 }
856 856
857 857 hba->flag &= ~FC_SLIM2_MODE;
858 858
859 859 rval = EIO;
860 860 goto failed;
861 861 }
862 862
863 863 /* Check if SLI3 mode was achieved */
864 864 if (mb->un.varCfgPort.rMA &&
865 865 (mb->un.varCfgPort.sli_mode == EMLXS_HBA_SLI3_MODE)) {
866 866
867 867 if (mb->un.varCfgPort.vpi_max > 1) {
868 868 hba->flag |= FC_NPIV_ENABLED;
869 869
870 870 if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
871 871 hba->vpi_max =
872 872 min(mb->un.varCfgPort.vpi_max,
873 873 MAX_VPORTS - 1);
874 874 } else {
875 875 hba->vpi_max =
876 876 min(mb->un.varCfgPort.vpi_max,
877 877 MAX_VPORTS_LIMITED - 1);
878 878 }
879 879 }
880 880
881 881 #if (EMLXS_MODREV >= EMLXS_MODREV5)
882 882 hba->fca_tran->fca_num_npivports =
883 883 (cfg[CFG_NPIV_ENABLE].current) ? hba->vpi_max : 0;
884 884 #endif /* >= EMLXS_MODREV5 */
885 885
886 886 if (mb->un.varCfgPort.gerbm && mb->un.varCfgPort.max_hbq) {
887 887 hba->flag |= FC_HBQ_ENABLED;
888 888 }
889 889
890 890 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
891 891 "SLI3 mode: flag=%x vpi_max=%d", hba->flag, hba->vpi_max);
892 892 } else {
893 893 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
894 894 "SLI2 mode: flag=%x", hba->flag);
895 895 sli_mode = EMLXS_HBA_SLI2_MODE;
896 896 sli_mode_mask = EMLXS_SLI2_MASK;
897 897 hba->sli_mode = sli_mode;
898 898 #if (EMLXS_MODREV >= EMLXS_MODREV5)
899 899 hba->fca_tran->fca_num_npivports = 0;
900 900 #endif /* >= EMLXS_MODREV5 */
901 901
902 902 }
903 903
904 904 /* Get and save the current firmware version (based on sli_mode) */
905 905 emlxs_decode_firmware_rev(hba, vpd);
906 906
907 907 emlxs_pcix_mxr_update(hba, 0);
908 908
909 909 /* Reuse mbq from previous mbox */
910 910 bzero(mbq, sizeof (MAILBOXQ));
911 911
912 912 emlxs_mb_read_config(hba, mbq);
913 913 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
914 914 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
915 915 "Unable to read configuration. Mailbox cmd=%x status=%x",
916 916 mb->mbxCommand, mb->mbxStatus);
917 917
918 918 rval = EIO;
919 919 goto failed;
920 920 }
921 921
922 922 /* Save the link speed capabilities */
923 923 vpd->link_speed = (uint16_t)mb->un.varRdConfig.lmt;
924 924 emlxs_process_link_speed(hba);
925 925
926 926 /* Set the max node count */
927 927 if (cfg[CFG_NUM_NODES].current > 0) {
928 928 hba->max_nodes =
929 929 min(cfg[CFG_NUM_NODES].current,
930 930 mb->un.varRdConfig.max_rpi);
931 931 } else {
932 932 hba->max_nodes = mb->un.varRdConfig.max_rpi;
933 933 }
934 934
935 935 /* Set the io throttle */
936 936 hba->io_throttle = mb->un.varRdConfig.max_xri - IO_THROTTLE_RESERVE;
937 937
938 938 /* Set max_iotag */
939 939 if (cfg[CFG_NUM_IOTAGS].current) {
940 940 hba->max_iotag = (uint16_t)cfg[CFG_NUM_IOTAGS].current;
941 941 } else {
942 942 hba->max_iotag = mb->un.varRdConfig.max_xri;
943 943 }
944 944
945 945 /* Set out-of-range iotag base */
946 946 hba->fc_oor_iotag = hba->max_iotag;
947 947
948 948 /*
949 949 * Allocate some memory for buffers
950 950 */
951 951 if (emlxs_mem_alloc_buffer(hba) == 0) {
952 952 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
953 953 "Unable to allocate memory buffers.");
954 954
955 955 EMLXS_STATE_CHANGE(hba, FC_ERROR);
956 956 return (ENOMEM);
957 957 }
958 958
959 959 /*
960 960 * Setup and issue mailbox RUN BIU DIAG command Setup test buffers
961 961 */
962 962 if (((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF)) == 0) ||
963 963 ((mp1 = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF)) == 0)) {
964 964 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
965 965 "Unable to allocate diag buffers.");
966 966
967 967 rval = ENOMEM;
968 968 goto failed;
969 969 }
970 970
971 971 bcopy((caddr_t)&emlxs_diag_pattern[0], (caddr_t)mp->virt,
972 972 MEM_ELSBUF_SIZE);
973 973 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, MEM_ELSBUF_SIZE,
974 974 DDI_DMA_SYNC_FORDEV);
975 975
976 976 bzero(mp1->virt, MEM_ELSBUF_SIZE);
977 977 EMLXS_MPDATA_SYNC(mp1->dma_handle, 0, MEM_ELSBUF_SIZE,
978 978 DDI_DMA_SYNC_FORDEV);
979 979
980 980 /* Reuse mbq from previous mbox */
981 981 bzero(mbq, sizeof (MAILBOXQ));
982 982
983 983 (void) emlxs_mb_run_biu_diag(hba, mbq, mp->phys, mp1->phys);
984 984
985 985 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
986 986 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
987 987 "Unable to run BIU diag. Mailbox cmd=%x status=%x",
988 988 mb->mbxCommand, mb->mbxStatus);
989 989
990 990 rval = EIO;
991 991 goto failed;
992 992 }
993 993
994 994 EMLXS_MPDATA_SYNC(mp1->dma_handle, 0, MEM_ELSBUF_SIZE,
995 995 DDI_DMA_SYNC_FORKERNEL);
996 996
997 997 #ifdef FMA_SUPPORT
998 998 if (mp->dma_handle) {
999 999 if (emlxs_fm_check_dma_handle(hba, mp->dma_handle)
1000 1000 != DDI_FM_OK) {
1001 1001 EMLXS_MSGF(EMLXS_CONTEXT,
1002 1002 &emlxs_invalid_dma_handle_msg,
1003 1003 "sli3_online: hdl=%p",
1004 1004 mp->dma_handle);
1005 1005 rval = EIO;
1006 1006 goto failed;
1007 1007 }
1008 1008 }
1009 1009
1010 1010 if (mp1->dma_handle) {
1011 1011 if (emlxs_fm_check_dma_handle(hba, mp1->dma_handle)
1012 1012 != DDI_FM_OK) {
1013 1013 EMLXS_MSGF(EMLXS_CONTEXT,
1014 1014 &emlxs_invalid_dma_handle_msg,
1015 1015 "sli3_online: hdl=%p",
1016 1016 mp1->dma_handle);
1017 1017 rval = EIO;
1018 1018 goto failed;
1019 1019 }
1020 1020 }
1021 1021 #endif /* FMA_SUPPORT */
1022 1022
1023 1023 outptr = mp->virt;
1024 1024 inptr = mp1->virt;
1025 1025
1026 1026 for (i = 0; i < MEM_ELSBUF_SIZE; i++) {
1027 1027 if (*outptr++ != *inptr++) {
1028 1028 outptr--;
1029 1029 inptr--;
1030 1030
1031 1031 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1032 1032 "BIU diagnostic failed. "
1033 1033 "offset %x value %x should be %x.",
1034 1034 i, (uint32_t)*inptr, (uint32_t)*outptr);
1035 1035
1036 1036 rval = EIO;
1037 1037 goto failed;
1038 1038 }
1039 1039 }
1040 1040
1041 1041 /* Free the buffers since we were polling */
1042 1042 emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1043 1043 mp = NULL;
1044 1044 emlxs_mem_put(hba, MEM_BUF, (void *)mp1);
1045 1045 mp1 = NULL;
1046 1046
1047 1047 hba->channel_fcp = FC_FCP_RING;
1048 1048 hba->channel_els = FC_ELS_RING;
1049 1049 hba->channel_ip = FC_IP_RING;
1050 1050 hba->channel_ct = FC_CT_RING;
1051 1051 hba->sli.sli3.ring_count = MAX_RINGS;
1052 1052
1053 1053 hba->channel_tx_count = 0;
1054 1054 hba->io_count = 0;
1055 1055 hba->fc_iotag = 1;
1056 1056
1057 1057 for (i = 0; i < hba->chan_count; i++) {
1058 1058 cp = &hba->chan[i];
1059 1059
1060 1060 /* 1 to 1 mapping between ring and channel */
1061 1061 cp->iopath = (void *)&hba->sli.sli3.ring[i];
1062 1062
1063 1063 cp->hba = hba;
1064 1064 cp->channelno = i;
1065 1065 }
1066 1066
1067 1067 /*
1068 1068 * Setup and issue mailbox CONFIGURE RING command
1069 1069 */
1070 1070 for (i = 0; i < (uint32_t)hba->sli.sli3.ring_count; i++) {
1071 1071 /*
1072 1072 * Initialize cmd/rsp ring pointers
1073 1073 */
1074 1074 rp = &hba->sli.sli3.ring[i];
1075 1075
1076 1076 /* 1 to 1 mapping between ring and channel */
1077 1077 rp->channelp = &hba->chan[i];
1078 1078
1079 1079 rp->hba = hba;
1080 1080 rp->ringno = (uint8_t)i;
1081 1081
1082 1082 rp->fc_cmdidx = 0;
1083 1083 rp->fc_rspidx = 0;
1084 1084 EMLXS_STATE_CHANGE(hba, FC_INIT_CFGRING);
1085 1085
1086 1086 /* Reuse mbq from previous mbox */
1087 1087 bzero(mbq, sizeof (MAILBOXQ));
1088 1088
1089 1089 emlxs_mb_config_ring(hba, i, mbq);
1090 1090 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1091 1091 MBX_SUCCESS) {
1092 1092 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1093 1093 "Unable to configure ring. "
1094 1094 "Mailbox cmd=%x status=%x",
1095 1095 mb->mbxCommand, mb->mbxStatus);
1096 1096
1097 1097 rval = EIO;
1098 1098 goto failed;
1099 1099 }
1100 1100 }
1101 1101
1102 1102 /*
1103 1103 * Setup link timers
1104 1104 */
1105 1105 EMLXS_STATE_CHANGE(hba, FC_INIT_INITLINK);
1106 1106
1107 1107 /* Reuse mbq from previous mbox */
1108 1108 bzero(mbq, sizeof (MAILBOXQ));
1109 1109
1110 1110 emlxs_mb_config_link(hba, mbq);
1111 1111 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1112 1112 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1113 1113 "Unable to configure link. Mailbox cmd=%x status=%x",
1114 1114 mb->mbxCommand, mb->mbxStatus);
1115 1115
1116 1116 rval = EIO;
1117 1117 goto failed;
1118 1118 }
1119 1119
1120 1120 #ifdef MAX_RRDY_SUPPORT
1121 1121 /* Set MAX_RRDY if one is provided */
1122 1122 if (cfg[CFG_MAX_RRDY].current) {
1123 1123
1124 1124 /* Reuse mbq from previous mbox */
1125 1125 bzero(mbq, sizeof (MAILBOXQ));
1126 1126
1127 1127 emlxs_mb_set_var(hba, (MAILBOX *)mbq, 0x00060412,
1128 1128 cfg[CFG_MAX_RRDY].current);
1129 1129
1130 1130 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1131 1131 MBX_SUCCESS) {
1132 1132 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1133 1133 "MAX_RRDY: Unable to set. status=%x " \
1134 1134 "value=%d",
1135 1135 mb->mbxStatus, cfg[CFG_MAX_RRDY].current);
1136 1136 } else {
1137 1137 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1138 1138 "MAX_RRDY: %d", cfg[CFG_MAX_RRDY].current);
1139 1139 }
1140 1140 }
1141 1141 #endif /* MAX_RRDY_SUPPORT */
1142 1142
1143 1143 /* Reuse mbq from previous mbox */
1144 1144 bzero(mbq, sizeof (MAILBOXQ));
1145 1145
1146 1146 /*
1147 1147 * We need to get login parameters for NID
1148 1148 */
1149 1149 (void) emlxs_mb_read_sparam(hba, mbq);
1150 1150 mp = (MATCHMAP *)mbq->bp;
1151 1151 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1152 1152 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1153 1153 "Unable to read parameters. Mailbox cmd=%x status=%x",
1154 1154 mb->mbxCommand, mb->mbxStatus);
1155 1155
1156 1156 rval = EIO;
1157 1157 goto failed;
1158 1158 }
1159 1159
1160 1160 /* Free the buffer since we were polling */
1161 1161 emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1162 1162 mp = NULL;
1163 1163
1164 1164 /* If no serial number in VPD data, then use the WWPN */
1165 1165 if (vpd->serial_num[0] == 0) {
1166 1166 outptr = (uint8_t *)&hba->wwpn.IEEE[0];
1167 1167 for (i = 0; i < 12; i++) {
1168 1168 status = *outptr++;
1169 1169 j = ((status & 0xf0) >> 4);
1170 1170 if (j <= 9) {
1171 1171 vpd->serial_num[i] =
1172 1172 (char)((uint8_t)'0' + (uint8_t)j);
1173 1173 } else {
1174 1174 vpd->serial_num[i] =
1175 1175 (char)((uint8_t)'A' + (uint8_t)(j - 10));
1176 1176 }
1177 1177
1178 1178 i++;
1179 1179 j = (status & 0xf);
1180 1180 if (j <= 9) {
1181 1181 vpd->serial_num[i] =
1182 1182 (char)((uint8_t)'0' + (uint8_t)j);
1183 1183 } else {
1184 1184 vpd->serial_num[i] =
1185 1185 (char)((uint8_t)'A' + (uint8_t)(j - 10));
1186 1186 }
1187 1187 }
1188 1188
1189 1189 /*
1190 1190 * Set port number and port index to zero
1191 1191 * The WWN's are unique to each port and therefore port_num
1192 1192 * must equal zero. This effects the hba_fru_details structure
1193 1193 * in fca_bind_port()
1194 1194 */
1195 1195 vpd->port_num[0] = 0;
1196 1196 vpd->port_index = 0;
1197 1197 }
1198 1198
1199 1199 /*
1200 1200 * Make first attempt to set a port index
1201 1201 * Check if this is a multifunction adapter
1202 1202 */
1203 1203 if ((vpd->port_index == (uint32_t)-1) &&
1204 1204 (hba->model_info.chip >= EMLXS_THOR_CHIP)) {
1205 1205 char *buffer;
1206 1206 int32_t i;
1207 1207
1208 1208 /*
1209 1209 * The port address looks like this:
1210 1210 * 1 - for port index 0
1211 1211 * 1,1 - for port index 1
1212 1212 * 1,2 - for port index 2
1213 1213 */
1214 1214 buffer = ddi_get_name_addr(hba->dip);
1215 1215
1216 1216 if (buffer) {
1217 1217 vpd->port_index = 0;
1218 1218
1219 1219 /* Reverse scan for a comma */
1220 1220 for (i = strlen(buffer) - 1; i > 0; i--) {
1221 1221 if (buffer[i] == ',') {
1222 1222 /* Comma found - set index now */
1223 1223 vpd->port_index =
1224 1224 emlxs_strtol(&buffer[i + 1], 10);
1225 1225 break;
1226 1226 }
1227 1227 }
1228 1228 }
1229 1229 }
1230 1230
1231 1231 /* Make final attempt to set a port index */
1232 1232 if (vpd->port_index == (uint32_t)-1) {
1233 1233 dev_info_t *p_dip;
1234 1234 dev_info_t *c_dip;
1235 1235
1236 1236 p_dip = ddi_get_parent(hba->dip);
1237 1237 c_dip = ddi_get_child(p_dip);
1238 1238
1239 1239 vpd->port_index = 0;
1240 1240 while (c_dip && (hba->dip != c_dip)) {
1241 1241 c_dip = ddi_get_next_sibling(c_dip);
1242 1242 vpd->port_index++;
1243 1243 }
1244 1244 }
1245 1245
1246 1246 if (vpd->port_num[0] == 0) {
1247 1247 if (hba->model_info.channels == EMLXS_MULTI_CHANNEL) {
1248 1248 (void) snprintf(vpd->port_num,
1249 1249 (sizeof (vpd->port_num)-1),
1250 1250 "%d", vpd->port_index);
1251 1251 }
1252 1252 }
1253 1253
1254 1254 if (vpd->id[0] == 0) {
1255 1255 (void) strncpy(vpd->id, hba->model_info.model_desc,
1256 1256 (sizeof (vpd->id)-1));
1257 1257 }
1258 1258
1259 1259 if (vpd->manufacturer[0] == 0) {
1260 1260 (void) strncpy(vpd->manufacturer, hba->model_info.manufacturer,
1261 1261 (sizeof (vpd->manufacturer)-1));
1262 1262 }
1263 1263
1264 1264 if (vpd->part_num[0] == 0) {
1265 1265 (void) strncpy(vpd->part_num, hba->model_info.model,
1266 1266 (sizeof (vpd->part_num)-1));
1267 1267 }
1268 1268
1269 1269 if (vpd->model_desc[0] == 0) {
1270 1270 (void) strncpy(vpd->model_desc, hba->model_info.model_desc,
1271 1271 (sizeof (vpd->model_desc)-1));
1272 1272 }
1273 1273
1274 1274 if (vpd->model[0] == 0) {
|
↓ open down ↓ |
1274 lines elided |
↑ open up ↑ |
1275 1275 (void) strncpy(vpd->model, hba->model_info.model,
1276 1276 (sizeof (vpd->model)-1));
1277 1277 }
1278 1278
1279 1279 if (vpd->prog_types[0] == 0) {
1280 1280 emlxs_build_prog_types(hba, vpd);
1281 1281 }
1282 1282
1283 1283 /* Create the symbolic names */
1284 1284 (void) snprintf(hba->snn, (sizeof (hba->snn)-1),
1285 - "Emulex %s FV%s DV%s %s",
1286 - hba->model_info.model, hba->vpd.fw_version, emlxs_version,
1285 + "%s %s FV%s DV%s %s",
1286 + hba->model_info.manufacturer, hba->model_info.model,
1287 + hba->vpd.fw_version, emlxs_version,
1287 1288 (char *)utsname.nodename);
1288 1289
1289 1290 (void) snprintf(hba->spn, (sizeof (hba->spn)-1),
1290 - "Emulex PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1291 + "%s PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1292 + hba->model_info.manufacturer,
1291 1293 hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb,
1292 1294 hba->wwpn.IEEE[0], hba->wwpn.IEEE[1], hba->wwpn.IEEE[2],
1293 1295 hba->wwpn.IEEE[3], hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1294 1296
1295 1297 if (cfg[CFG_NETWORK_ON].current) {
1296 1298 if ((hba->sparam.portName.nameType != NAME_IEEE) ||
1297 1299 (hba->sparam.portName.IEEEextMsn != 0) ||
1298 1300 (hba->sparam.portName.IEEEextLsb != 0)) {
1299 1301
1300 1302 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
1301 1303 "WWPN doesn't conform to IP profile: "
1302 1304 "nameType=%x. Disabling networking.",
1303 1305 hba->sparam.portName.nameType);
1304 1306
1305 1307 cfg[CFG_NETWORK_ON].current = 0;
1306 1308 }
1307 1309 }
1308 1310
1309 1311 if (cfg[CFG_NETWORK_ON].current) {
1310 1312 /* Reuse mbq from previous mbox */
1311 1313 bzero(mbq, sizeof (MAILBOXQ));
1312 1314
1313 1315 /* Issue CONFIG FARP */
1314 1316 emlxs_mb_config_farp(hba, mbq);
1315 1317 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1316 1318 MBX_SUCCESS) {
1317 1319 /*
1318 1320 * Let it go through even if failed.
1319 1321 */
1320 1322 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
1321 1323 "Unable to configure FARP. "
1322 1324 "Mailbox cmd=%x status=%x",
1323 1325 mb->mbxCommand, mb->mbxStatus);
1324 1326 }
1325 1327 }
1326 1328 #ifdef MSI_SUPPORT
1327 1329 /* Configure MSI map if required */
1328 1330 if (hba->intr_count > 1) {
1329 1331
1330 1332 if (hba->intr_type == DDI_INTR_TYPE_MSIX) {
1331 1333 /* always start from 0 */
1332 1334 hba->last_msiid = 0;
1333 1335 }
1334 1336
1335 1337 /* Reuse mbq from previous mbox */
1336 1338 bzero(mbq, sizeof (MAILBOXQ));
1337 1339
1338 1340 emlxs_mb_config_msix(hba, mbq, hba->intr_map, hba->intr_count);
1339 1341
1340 1342 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) ==
1341 1343 MBX_SUCCESS) {
1342 1344 goto msi_configured;
1343 1345 }
1344 1346
1345 1347 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1346 1348 "Unable to config MSIX. Mailbox cmd=0x%x status=0x%x",
1347 1349 mb->mbxCommand, mb->mbxStatus);
1348 1350
1349 1351 /* Reuse mbq from previous mbox */
1350 1352 bzero(mbq, sizeof (MAILBOXQ));
1351 1353
1352 1354 emlxs_mb_config_msi(hba, mbq, hba->intr_map, hba->intr_count);
1353 1355
1354 1356 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) ==
1355 1357 MBX_SUCCESS) {
1356 1358 goto msi_configured;
1357 1359 }
1358 1360
1359 1361
1360 1362 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1361 1363 "Unable to config MSI. Mailbox cmd=0x%x status=0x%x",
1362 1364 mb->mbxCommand, mb->mbxStatus);
1363 1365
1364 1366 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1365 1367 "Attempting single interrupt mode...");
1366 1368
1367 1369 /* First cleanup old interrupts */
1368 1370 (void) emlxs_msi_remove(hba);
1369 1371 (void) emlxs_msi_uninit(hba);
1370 1372
1371 1373 status = emlxs_msi_init(hba, 1);
1372 1374
1373 1375 if (status != DDI_SUCCESS) {
1374 1376 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1375 1377 "Unable to initialize interrupt. status=%d",
1376 1378 status);
1377 1379
1378 1380 rval = EIO;
1379 1381 goto failed;
1380 1382 }
1381 1383
1382 1384 /*
1383 1385 * Reset adapter - The adapter needs to be reset because
1384 1386 * the bus cannot handle the MSI change without handshaking
1385 1387 * with the adapter again
1386 1388 */
1387 1389
1388 1390 (void) emlxs_mem_free_buffer(hba);
1389 1391 fw_check = 0;
1390 1392 goto reset;
1391 1393 }
1392 1394
1393 1395 msi_configured:
1394 1396
1395 1397
1396 1398 if ((hba->intr_count >= 1) &&
1397 1399 (hba->sli_mode == EMLXS_HBA_SLI3_MODE)) {
1398 1400 /* intr_count is a sequence of msi id */
1399 1401 /* Setup msi2chan[msi_id] */
1400 1402 for (i = 0; i < hba->intr_count; i ++) {
1401 1403 hba->msi2chan[i] = i;
1402 1404 if (i >= hba->chan_count)
1403 1405 hba->msi2chan[i] = (i - hba->chan_count);
1404 1406 }
1405 1407 }
1406 1408 #endif /* MSI_SUPPORT */
1407 1409
1408 1410 /*
1409 1411 * We always disable the firmware traffic cop feature
1410 1412 */
1411 1413 if (emlxs_disable_traffic_cop) {
1412 1414 /* Reuse mbq from previous mbox */
1413 1415 bzero(mbq, sizeof (MAILBOXQ));
1414 1416
1415 1417 emlxs_disable_tc(hba, mbq);
1416 1418 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1417 1419 MBX_SUCCESS) {
1418 1420 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1419 1421 "Unable to disable traffic cop. "
1420 1422 "Mailbox cmd=%x status=%x",
1421 1423 mb->mbxCommand, mb->mbxStatus);
1422 1424
1423 1425 rval = EIO;
1424 1426 goto failed;
1425 1427 }
1426 1428 }
1427 1429
1428 1430
1429 1431 /* Reuse mbq from previous mbox */
1430 1432 bzero(mbq, sizeof (MAILBOXQ));
1431 1433
1432 1434 /* Register for async events */
1433 1435 emlxs_mb_async_event(hba, mbq);
1434 1436 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1435 1437 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1436 1438 "Async events disabled. Mailbox status=%x",
1437 1439 mb->mbxStatus);
1438 1440 } else {
1439 1441 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1440 1442 "Async events enabled.");
1441 1443 hba->flag |= FC_ASYNC_EVENTS;
1442 1444 }
1443 1445
1444 1446 EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN);
1445 1447
1446 1448 emlxs_sli3_enable_intr(hba);
1447 1449
1448 1450 if (hba->flag & FC_HBQ_ENABLED) {
1449 1451 if (port->flag & EMLXS_TGT_ENABLED) {
1450 1452 if (emlxs_hbq_setup(hba, EMLXS_FCT_HBQ_ID)) {
1451 1453 EMLXS_MSGF(EMLXS_CONTEXT,
1452 1454 &emlxs_init_failed_msg,
1453 1455 "Unable to setup FCT HBQ.");
1454 1456
1455 1457 rval = ENOMEM;
1456 1458
1457 1459 #ifdef SFCT_SUPPORT
1458 1460 /* Check if we can fall back to just */
1459 1461 /* initiator mode */
1460 1462 if ((hba->pm_state == EMLXS_PM_IN_ATTACH) &&
1461 1463 (port->flag & EMLXS_INI_ENABLED) &&
1462 1464 (cfg[CFG_DTM_ENABLE].current == 1) &&
1463 1465 (cfg[CFG_TARGET_MODE].current == 0)) {
1464 1466
1465 1467 cfg[CFG_DTM_ENABLE].current = 0;
1466 1468
1467 1469 EMLXS_MSGF(EMLXS_CONTEXT,
1468 1470 &emlxs_init_failed_msg,
1469 1471 "Disabling dynamic target mode. "
1470 1472 "Enabling initiator mode only.");
1471 1473
1472 1474 /* This will trigger the driver to */
1473 1475 /* reattach */
1474 1476 rval = EAGAIN;
1475 1477 }
1476 1478 #endif /* SFCT_SUPPORT */
1477 1479 goto failed;
1478 1480 }
1479 1481 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1480 1482 "FCT Ring: Posted %d buffers.", MEM_FCTBUF_COUNT);
1481 1483 }
1482 1484
1483 1485 if (cfg[CFG_NETWORK_ON].current) {
1484 1486 if (emlxs_hbq_setup(hba, EMLXS_IP_HBQ_ID)) {
1485 1487 EMLXS_MSGF(EMLXS_CONTEXT,
1486 1488 &emlxs_init_failed_msg,
1487 1489 "Unable to setup IP HBQ.");
1488 1490
1489 1491 rval = ENOMEM;
1490 1492 goto failed;
1491 1493 }
1492 1494 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1493 1495 "IP Ring: Posted %d buffers.", MEM_IPBUF_COUNT);
1494 1496 }
1495 1497
1496 1498 if (emlxs_hbq_setup(hba, EMLXS_ELS_HBQ_ID)) {
1497 1499 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1498 1500 "Unable to setup ELS HBQ.");
1499 1501 rval = ENOMEM;
1500 1502 goto failed;
1501 1503 }
1502 1504 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1503 1505 "ELS Ring: Posted %d buffers.", MEM_ELSBUF_COUNT);
1504 1506
1505 1507 if (emlxs_hbq_setup(hba, EMLXS_CT_HBQ_ID)) {
1506 1508 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1507 1509 "Unable to setup CT HBQ.");
1508 1510
1509 1511 rval = ENOMEM;
1510 1512 goto failed;
1511 1513 }
1512 1514 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1513 1515 "CT Ring: Posted %d buffers.", MEM_CTBUF_COUNT);
1514 1516 } else {
1515 1517 if (port->flag & EMLXS_TGT_ENABLED) {
1516 1518 /* Post the FCT unsol buffers */
1517 1519 rp = &hba->sli.sli3.ring[FC_FCT_RING];
1518 1520 for (j = 0; j < MEM_FCTBUF_COUNT; j += 2) {
1519 1521 (void) emlxs_post_buffer(hba, rp, 2);
1520 1522 }
1521 1523 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1522 1524 "FCP Ring: Posted %d buffers.", MEM_FCTBUF_COUNT);
1523 1525 }
1524 1526
1525 1527 if (cfg[CFG_NETWORK_ON].current) {
1526 1528 /* Post the IP unsol buffers */
1527 1529 rp = &hba->sli.sli3.ring[FC_IP_RING];
1528 1530 for (j = 0; j < MEM_IPBUF_COUNT; j += 2) {
1529 1531 (void) emlxs_post_buffer(hba, rp, 2);
1530 1532 }
1531 1533 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1532 1534 "IP Ring: Posted %d buffers.", MEM_IPBUF_COUNT);
1533 1535 }
1534 1536
1535 1537 /* Post the ELS unsol buffers */
1536 1538 rp = &hba->sli.sli3.ring[FC_ELS_RING];
1537 1539 for (j = 0; j < MEM_ELSBUF_COUNT; j += 2) {
1538 1540 (void) emlxs_post_buffer(hba, rp, 2);
1539 1541 }
1540 1542 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1541 1543 "ELS Ring: Posted %d buffers.", MEM_ELSBUF_COUNT);
1542 1544
1543 1545
1544 1546 /* Post the CT unsol buffers */
1545 1547 rp = &hba->sli.sli3.ring[FC_CT_RING];
1546 1548 for (j = 0; j < MEM_CTBUF_COUNT; j += 2) {
1547 1549 (void) emlxs_post_buffer(hba, rp, 2);
1548 1550 }
1549 1551 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1550 1552 "CT Ring: Posted %d buffers.", MEM_CTBUF_COUNT);
1551 1553 }
1552 1554
1553 1555 (void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1554 1556
1555 1557 /* Check persist-linkdown */
1556 1558 if (cfg[CFG_PERSIST_LINKDOWN].current) {
1557 1559 EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN_PERSIST);
1558 1560 return (0);
1559 1561 }
1560 1562
1561 1563 #ifdef SFCT_SUPPORT
1562 1564 if ((port->mode == MODE_TARGET) &&
1563 1565 !(port->fct_flags & FCT_STATE_PORT_ONLINE)) {
1564 1566 emlxs_enable_latt(hba);
1565 1567 return (0);
1566 1568 }
1567 1569 #endif /* SFCT_SUPPORT */
1568 1570
1569 1571 /*
1570 1572 * Setup and issue mailbox INITIALIZE LINK command
1571 1573 * At this point, the interrupt will be generated by the HW
1572 1574 */
1573 1575 mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX);
1574 1576 if (mbq == NULL) {
1575 1577 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1576 1578 "Unable to allocate mailbox buffer.");
1577 1579
1578 1580 rval = EIO;
1579 1581 goto failed;
1580 1582 }
1581 1583 mb = (MAILBOX *)mbq;
1582 1584
1583 1585 emlxs_mb_init_link(hba, mbq, cfg[CFG_TOPOLOGY].current,
1584 1586 cfg[CFG_LINK_SPEED].current);
1585 1587
1586 1588 rval = emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
1587 1589 if ((rval != MBX_SUCCESS) && (rval != MBX_BUSY)) {
1588 1590 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1589 1591 "Unable to initialize link. " \
1590 1592 "Mailbox cmd=%x status=%x",
1591 1593 mb->mbxCommand, mb->mbxStatus);
1592 1594
1593 1595 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
1594 1596 mbq = NULL;
1595 1597 rval = EIO;
1596 1598 goto failed;
1597 1599 }
1598 1600
1599 1601 /*
1600 1602 * Enable link attention interrupt
1601 1603 */
1602 1604 emlxs_enable_latt(hba);
1603 1605
1604 1606 /* Wait for link to come up */
1605 1607 i = cfg[CFG_LINKUP_DELAY].current;
1606 1608 while (i && (hba->state < FC_LINK_UP)) {
1607 1609 /* Check for hardware error */
1608 1610 if (hba->state == FC_ERROR) {
1609 1611 EMLXS_MSGF(EMLXS_CONTEXT,
1610 1612 &emlxs_init_failed_msg,
1611 1613 "Adapter error.");
1612 1614
1613 1615 mbq = NULL;
1614 1616 rval = EIO;
1615 1617 goto failed;
1616 1618 }
1617 1619
1618 1620 BUSYWAIT_MS(1000);
1619 1621 i--;
1620 1622 }
1621 1623
1622 1624 /*
1623 1625 * The leadvile driver will now handle the FLOGI at the driver level
1624 1626 */
1625 1627
1626 1628 return (0);
1627 1629
1628 1630 failed:
1629 1631
1630 1632 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1631 1633
1632 1634 if (hba->intr_flags & EMLXS_MSI_ADDED) {
1633 1635 (void) EMLXS_INTR_REMOVE(hba);
1634 1636 }
1635 1637
1636 1638 if (mp) {
1637 1639 emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1638 1640 mp = NULL;
1639 1641 }
1640 1642
1641 1643 if (mp1) {
1642 1644 emlxs_mem_put(hba, MEM_BUF, (void *)mp1);
1643 1645 mp1 = NULL;
1644 1646 }
1645 1647
1646 1648 (void) emlxs_mem_free_buffer(hba);
1647 1649
1648 1650 if (mbq) {
1649 1651 (void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1650 1652 mbq = NULL;
1651 1653 mb = NULL;
1652 1654 }
1653 1655
1654 1656 if (rval == 0) {
1655 1657 rval = EIO;
1656 1658 }
1657 1659
1658 1660 return (rval);
1659 1661
1660 1662 } /* emlxs_sli3_online() */
1661 1663
1662 1664
1663 1665 /*ARGSUSED*/
1664 1666 static void
1665 1667 emlxs_sli3_offline(emlxs_hba_t *hba, uint32_t reset_requested)
1666 1668 {
1667 1669 /* Reverse emlxs_sli3_online */
1668 1670
1669 1671 /* Kill the adapter */
1670 1672 emlxs_sli3_hba_kill(hba);
1671 1673
1672 1674 /* Free driver shared memory */
1673 1675 (void) emlxs_mem_free_buffer(hba);
1674 1676
1675 1677 } /* emlxs_sli3_offline() */
1676 1678
1677 1679
1678 1680 static int
1679 1681 emlxs_sli3_map_hdw(emlxs_hba_t *hba)
1680 1682 {
1681 1683 emlxs_port_t *port = &PPORT;
1682 1684 dev_info_t *dip;
1683 1685 ddi_device_acc_attr_t dev_attr;
1684 1686 int status;
1685 1687
1686 1688 dip = (dev_info_t *)hba->dip;
1687 1689 dev_attr = emlxs_dev_acc_attr;
1688 1690
1689 1691 if (hba->bus_type == SBUS_FC) {
1690 1692
1691 1693 if (hba->sli.sli3.slim_acc_handle == 0) {
1692 1694 status = ddi_regs_map_setup(dip,
1693 1695 SBUS_DFLY_SLIM_RINDEX,
1694 1696 (caddr_t *)&hba->sli.sli3.slim_addr,
1695 1697 0, 0, &dev_attr, &hba->sli.sli3.slim_acc_handle);
1696 1698 if (status != DDI_SUCCESS) {
1697 1699 EMLXS_MSGF(EMLXS_CONTEXT,
1698 1700 &emlxs_attach_failed_msg,
1699 1701 "(SBUS) ddi_regs_map_setup SLIM failed. "
1700 1702 "status=%x", status);
1701 1703 goto failed;
1702 1704 }
1703 1705 }
1704 1706 if (hba->sli.sli3.csr_acc_handle == 0) {
1705 1707 status = ddi_regs_map_setup(dip,
1706 1708 SBUS_DFLY_CSR_RINDEX,
1707 1709 (caddr_t *)&hba->sli.sli3.csr_addr,
1708 1710 0, 0, &dev_attr, &hba->sli.sli3.csr_acc_handle);
1709 1711 if (status != DDI_SUCCESS) {
1710 1712 EMLXS_MSGF(EMLXS_CONTEXT,
1711 1713 &emlxs_attach_failed_msg,
1712 1714 "(SBUS) ddi_regs_map_setup DFLY CSR "
1713 1715 "failed. status=%x", status);
1714 1716 goto failed;
1715 1717 }
1716 1718 }
1717 1719 if (hba->sli.sli3.sbus_flash_acc_handle == 0) {
1718 1720 status = ddi_regs_map_setup(dip, SBUS_FLASH_RDWR,
1719 1721 (caddr_t *)&hba->sli.sli3.sbus_flash_addr, 0, 0,
1720 1722 &dev_attr, &hba->sli.sli3.sbus_flash_acc_handle);
1721 1723 if (status != DDI_SUCCESS) {
1722 1724 EMLXS_MSGF(EMLXS_CONTEXT,
1723 1725 &emlxs_attach_failed_msg,
1724 1726 "(SBUS) ddi_regs_map_setup Fcode Flash "
1725 1727 "failed. status=%x", status);
1726 1728 goto failed;
1727 1729 }
1728 1730 }
1729 1731 if (hba->sli.sli3.sbus_core_acc_handle == 0) {
1730 1732 status = ddi_regs_map_setup(dip, SBUS_TITAN_CORE_RINDEX,
1731 1733 (caddr_t *)&hba->sli.sli3.sbus_core_addr, 0, 0,
1732 1734 &dev_attr, &hba->sli.sli3.sbus_core_acc_handle);
1733 1735 if (status != DDI_SUCCESS) {
1734 1736 EMLXS_MSGF(EMLXS_CONTEXT,
1735 1737 &emlxs_attach_failed_msg,
1736 1738 "(SBUS) ddi_regs_map_setup TITAN CORE "
1737 1739 "failed. status=%x", status);
1738 1740 goto failed;
1739 1741 }
1740 1742 }
1741 1743
1742 1744 if (hba->sli.sli3.sbus_csr_handle == 0) {
1743 1745 status = ddi_regs_map_setup(dip, SBUS_TITAN_CSR_RINDEX,
1744 1746 (caddr_t *)&hba->sli.sli3.sbus_csr_addr,
1745 1747 0, 0, &dev_attr, &hba->sli.sli3.sbus_csr_handle);
1746 1748 if (status != DDI_SUCCESS) {
1747 1749 EMLXS_MSGF(EMLXS_CONTEXT,
1748 1750 &emlxs_attach_failed_msg,
1749 1751 "(SBUS) ddi_regs_map_setup TITAN CSR "
1750 1752 "failed. status=%x", status);
1751 1753 goto failed;
1752 1754 }
1753 1755 }
1754 1756 } else { /* ****** PCI ****** */
1755 1757
1756 1758 if (hba->sli.sli3.slim_acc_handle == 0) {
1757 1759 status = ddi_regs_map_setup(dip, PCI_SLIM_RINDEX,
1758 1760 (caddr_t *)&hba->sli.sli3.slim_addr,
1759 1761 0, 0, &dev_attr, &hba->sli.sli3.slim_acc_handle);
1760 1762 if (status != DDI_SUCCESS) {
1761 1763 EMLXS_MSGF(EMLXS_CONTEXT,
1762 1764 &emlxs_attach_failed_msg,
1763 1765 "(PCI) ddi_regs_map_setup SLIM failed. "
1764 1766 "stat=%d mem=%p attr=%p hdl=%p",
1765 1767 status, &hba->sli.sli3.slim_addr, &dev_attr,
1766 1768 &hba->sli.sli3.slim_acc_handle);
1767 1769 goto failed;
1768 1770 }
1769 1771 }
1770 1772
1771 1773 /*
1772 1774 * Map in control registers, using memory-mapped version of
1773 1775 * the registers rather than the I/O space-mapped registers.
1774 1776 */
1775 1777 if (hba->sli.sli3.csr_acc_handle == 0) {
1776 1778 status = ddi_regs_map_setup(dip, PCI_CSR_RINDEX,
1777 1779 (caddr_t *)&hba->sli.sli3.csr_addr,
1778 1780 0, 0, &dev_attr, &hba->sli.sli3.csr_acc_handle);
1779 1781 if (status != DDI_SUCCESS) {
1780 1782 EMLXS_MSGF(EMLXS_CONTEXT,
1781 1783 &emlxs_attach_failed_msg,
1782 1784 "ddi_regs_map_setup CSR failed. status=%x",
1783 1785 status);
1784 1786 goto failed;
1785 1787 }
1786 1788 }
1787 1789 }
1788 1790
1789 1791 if (hba->sli.sli3.slim2.virt == 0) {
1790 1792 MBUF_INFO *buf_info;
1791 1793 MBUF_INFO bufinfo;
1792 1794
1793 1795 buf_info = &bufinfo;
1794 1796
1795 1797 bzero(buf_info, sizeof (MBUF_INFO));
1796 1798 buf_info->size = SLI_SLIM2_SIZE;
1797 1799 buf_info->flags =
1798 1800 FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
1799 1801 buf_info->align = ddi_ptob(dip, 1L);
1800 1802
1801 1803 (void) emlxs_mem_alloc(hba, buf_info);
1802 1804
1803 1805 if (buf_info->virt == NULL) {
1804 1806 goto failed;
1805 1807 }
1806 1808
1807 1809 hba->sli.sli3.slim2.virt = buf_info->virt;
1808 1810 hba->sli.sli3.slim2.phys = buf_info->phys;
1809 1811 hba->sli.sli3.slim2.size = SLI_SLIM2_SIZE;
1810 1812 hba->sli.sli3.slim2.data_handle = buf_info->data_handle;
1811 1813 hba->sli.sli3.slim2.dma_handle = buf_info->dma_handle;
1812 1814 bzero((char *)hba->sli.sli3.slim2.virt, SLI_SLIM2_SIZE);
1813 1815 }
1814 1816
1815 1817 /* offset from beginning of register space */
1816 1818 hba->sli.sli3.ha_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1817 1819 (sizeof (uint32_t) * HA_REG_OFFSET));
1818 1820 hba->sli.sli3.ca_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1819 1821 (sizeof (uint32_t) * CA_REG_OFFSET));
1820 1822 hba->sli.sli3.hs_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1821 1823 (sizeof (uint32_t) * HS_REG_OFFSET));
1822 1824 hba->sli.sli3.hc_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1823 1825 (sizeof (uint32_t) * HC_REG_OFFSET));
1824 1826 hba->sli.sli3.bc_reg_addr = (uint32_t *)(hba->sli.sli3.csr_addr +
1825 1827 (sizeof (uint32_t) * BC_REG_OFFSET));
1826 1828
1827 1829 if (hba->bus_type == SBUS_FC) {
1828 1830 /* offset from beginning of register space */
1829 1831 /* for TITAN registers */
1830 1832 hba->sli.sli3.shc_reg_addr =
1831 1833 (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1832 1834 (sizeof (uint32_t) * SBUS_CTRL_REG_OFFSET));
1833 1835 hba->sli.sli3.shs_reg_addr =
1834 1836 (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1835 1837 (sizeof (uint32_t) * SBUS_STAT_REG_OFFSET));
1836 1838 hba->sli.sli3.shu_reg_addr =
1837 1839 (uint32_t *)(hba->sli.sli3.sbus_csr_addr +
1838 1840 (sizeof (uint32_t) * SBUS_UPDATE_REG_OFFSET));
1839 1841 }
1840 1842 hba->chan_count = MAX_RINGS;
1841 1843
1842 1844 return (0);
1843 1845
1844 1846 failed:
1845 1847
1846 1848 emlxs_sli3_unmap_hdw(hba);
1847 1849 return (ENOMEM);
1848 1850
1849 1851 } /* emlxs_sli3_map_hdw() */
1850 1852
1851 1853
1852 1854 static void
1853 1855 emlxs_sli3_unmap_hdw(emlxs_hba_t *hba)
1854 1856 {
1855 1857 MBUF_INFO bufinfo;
1856 1858 MBUF_INFO *buf_info = &bufinfo;
1857 1859
1858 1860 if (hba->sli.sli3.csr_acc_handle) {
1859 1861 ddi_regs_map_free(&hba->sli.sli3.csr_acc_handle);
1860 1862 hba->sli.sli3.csr_acc_handle = 0;
1861 1863 }
1862 1864
1863 1865 if (hba->sli.sli3.slim_acc_handle) {
1864 1866 ddi_regs_map_free(&hba->sli.sli3.slim_acc_handle);
1865 1867 hba->sli.sli3.slim_acc_handle = 0;
1866 1868 }
1867 1869
1868 1870 if (hba->sli.sli3.sbus_flash_acc_handle) {
1869 1871 ddi_regs_map_free(&hba->sli.sli3.sbus_flash_acc_handle);
1870 1872 hba->sli.sli3.sbus_flash_acc_handle = 0;
1871 1873 }
1872 1874
1873 1875 if (hba->sli.sli3.sbus_core_acc_handle) {
1874 1876 ddi_regs_map_free(&hba->sli.sli3.sbus_core_acc_handle);
1875 1877 hba->sli.sli3.sbus_core_acc_handle = 0;
1876 1878 }
1877 1879
1878 1880 if (hba->sli.sli3.sbus_csr_handle) {
1879 1881 ddi_regs_map_free(&hba->sli.sli3.sbus_csr_handle);
1880 1882 hba->sli.sli3.sbus_csr_handle = 0;
1881 1883 }
1882 1884
1883 1885 if (hba->sli.sli3.slim2.virt) {
1884 1886 bzero(buf_info, sizeof (MBUF_INFO));
1885 1887
1886 1888 if (hba->sli.sli3.slim2.phys) {
1887 1889 buf_info->phys = hba->sli.sli3.slim2.phys;
1888 1890 buf_info->data_handle = hba->sli.sli3.slim2.data_handle;
1889 1891 buf_info->dma_handle = hba->sli.sli3.slim2.dma_handle;
1890 1892 buf_info->flags = FC_MBUF_DMA;
1891 1893 }
1892 1894
1893 1895 buf_info->virt = hba->sli.sli3.slim2.virt;
1894 1896 buf_info->size = hba->sli.sli3.slim2.size;
1895 1897 emlxs_mem_free(hba, buf_info);
1896 1898
1897 1899 hba->sli.sli3.slim2.virt = NULL;
1898 1900 }
1899 1901
1900 1902
1901 1903 return;
1902 1904
1903 1905 } /* emlxs_sli3_unmap_hdw() */
1904 1906
1905 1907
1906 1908 static uint32_t
1907 1909 emlxs_sli3_hba_init(emlxs_hba_t *hba)
1908 1910 {
1909 1911 emlxs_port_t *port = &PPORT;
1910 1912 emlxs_port_t *vport;
1911 1913 emlxs_config_t *cfg;
1912 1914 uint16_t i;
1913 1915 VPIobj_t *vpip;
1914 1916
1915 1917 cfg = &CFG;
1916 1918 i = 0;
1917 1919
1918 1920 /* Restart the adapter */
1919 1921 if (emlxs_sli3_hba_reset(hba, 1, 0, 0)) {
1920 1922 return (1);
1921 1923 }
1922 1924
1923 1925 hba->channel_fcp = FC_FCP_RING;
1924 1926 hba->channel_els = FC_ELS_RING;
1925 1927 hba->channel_ip = FC_IP_RING;
1926 1928 hba->channel_ct = FC_CT_RING;
1927 1929 hba->chan_count = MAX_RINGS;
1928 1930 hba->sli.sli3.ring_count = MAX_RINGS;
1929 1931
1930 1932 /*
1931 1933 * WARNING: There is a max of 6 ring masks allowed
1932 1934 */
1933 1935 /* RING 0 - FCP */
1934 1936 if (port->flag & EMLXS_TGT_ENABLED) {
1935 1937 hba->sli.sli3.ring_masks[FC_FCP_RING] = 1;
1936 1938 hba->sli.sli3.ring_rval[i] = FC_FCP_CMND;
1937 1939 hba->sli.sli3.ring_rmask[i] = 0;
1938 1940 hba->sli.sli3.ring_tval[i] = FC_TYPE_SCSI_FCP;
1939 1941 hba->sli.sli3.ring_tmask[i++] = 0xFF;
1940 1942 } else {
1941 1943 hba->sli.sli3.ring_masks[FC_FCP_RING] = 0;
1942 1944 }
1943 1945
1944 1946 hba->sli.sli3.ring[FC_FCP_RING].fc_numCiocb = SLIM_IOCB_CMD_R0_ENTRIES;
1945 1947 hba->sli.sli3.ring[FC_FCP_RING].fc_numRiocb = SLIM_IOCB_RSP_R0_ENTRIES;
1946 1948
1947 1949 /* RING 1 - IP */
1948 1950 if (cfg[CFG_NETWORK_ON].current) {
1949 1951 hba->sli.sli3.ring_masks[FC_IP_RING] = 1;
1950 1952 hba->sli.sli3.ring_rval[i] = FC_UNSOL_DATA; /* Unsol Data */
1951 1953 hba->sli.sli3.ring_rmask[i] = 0xFF;
1952 1954 hba->sli.sli3.ring_tval[i] = FC_TYPE_IS8802_SNAP; /* LLC/SNAP */
1953 1955 hba->sli.sli3.ring_tmask[i++] = 0xFF;
1954 1956 } else {
1955 1957 hba->sli.sli3.ring_masks[FC_IP_RING] = 0;
1956 1958 }
1957 1959
1958 1960 hba->sli.sli3.ring[FC_IP_RING].fc_numCiocb = SLIM_IOCB_CMD_R1_ENTRIES;
1959 1961 hba->sli.sli3.ring[FC_IP_RING].fc_numRiocb = SLIM_IOCB_RSP_R1_ENTRIES;
1960 1962
1961 1963 /* RING 2 - ELS */
1962 1964 hba->sli.sli3.ring_masks[FC_ELS_RING] = 1;
1963 1965 hba->sli.sli3.ring_rval[i] = FC_ELS_REQ; /* ELS request/rsp */
1964 1966 hba->sli.sli3.ring_rmask[i] = 0xFE;
1965 1967 hba->sli.sli3.ring_tval[i] = FC_TYPE_EXTENDED_LS; /* ELS */
1966 1968 hba->sli.sli3.ring_tmask[i++] = 0xFF;
1967 1969
1968 1970 hba->sli.sli3.ring[FC_ELS_RING].fc_numCiocb = SLIM_IOCB_CMD_R2_ENTRIES;
1969 1971 hba->sli.sli3.ring[FC_ELS_RING].fc_numRiocb = SLIM_IOCB_RSP_R2_ENTRIES;
1970 1972
1971 1973 /* RING 3 - CT */
1972 1974 hba->sli.sli3.ring_masks[FC_CT_RING] = 1;
1973 1975 hba->sli.sli3.ring_rval[i] = FC_UNSOL_CTL; /* CT request/rsp */
1974 1976 hba->sli.sli3.ring_rmask[i] = 0xFE;
1975 1977 hba->sli.sli3.ring_tval[i] = FC_TYPE_FC_SERVICES; /* CT */
1976 1978 hba->sli.sli3.ring_tmask[i++] = 0xFF;
1977 1979
1978 1980 hba->sli.sli3.ring[FC_CT_RING].fc_numCiocb = SLIM_IOCB_CMD_R3_ENTRIES;
1979 1981 hba->sli.sli3.ring[FC_CT_RING].fc_numRiocb = SLIM_IOCB_RSP_R3_ENTRIES;
1980 1982
1981 1983 if (i > 6) {
1982 1984 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
1983 1985 "hba_init: Too many ring masks defined. cnt=%d", i);
1984 1986 return (1);
1985 1987 }
1986 1988
1987 1989 /* Initialize all the port objects */
1988 1990 hba->vpi_max = 0;
1989 1991 for (i = 0; i < MAX_VPORTS; i++) {
1990 1992 vport = &VPORT(i);
1991 1993 vport->hba = hba;
1992 1994 vport->vpi = i;
1993 1995
1994 1996 vpip = &vport->VPIobj;
1995 1997 vpip->index = i;
1996 1998 vpip->VPI = i;
1997 1999 vpip->port = vport;
1998 2000 vpip->state = VPI_STATE_OFFLINE;
1999 2001 vport->vpip = vpip;
2000 2002 }
2001 2003
2002 2004 /*
2003 2005 * Initialize the max_node count to a default value if needed
2004 2006 * This determines how many node objects we preallocate in the pool
2005 2007 * The actual max_nodes will be set later based on adapter info
2006 2008 */
2007 2009 if (hba->max_nodes == 0) {
2008 2010 if (cfg[CFG_NUM_NODES].current > 0) {
2009 2011 hba->max_nodes = cfg[CFG_NUM_NODES].current;
2010 2012 } else if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
2011 2013 hba->max_nodes = 4096;
2012 2014 } else {
2013 2015 hba->max_nodes = 512;
2014 2016 }
2015 2017 }
2016 2018
2017 2019 return (0);
2018 2020
2019 2021 } /* emlxs_sli3_hba_init() */
2020 2022
2021 2023
2022 2024 /*
2023 2025 * 0: quiesce indicates the call is not from quiesce routine.
2024 2026 * 1: quiesce indicates the call is from quiesce routine.
2025 2027 */
2026 2028 static uint32_t
2027 2029 emlxs_sli3_hba_reset(emlxs_hba_t *hba, uint32_t restart, uint32_t skip_post,
2028 2030 uint32_t quiesce)
2029 2031 {
2030 2032 emlxs_port_t *port = &PPORT;
2031 2033 MAILBOX *swpmb;
2032 2034 MAILBOX *mb;
2033 2035 uint32_t word0;
2034 2036 uint16_t cfg_value;
2035 2037 uint32_t status = 0;
2036 2038 uint32_t status1;
2037 2039 uint32_t status2;
2038 2040 uint32_t i;
2039 2041 uint32_t ready;
2040 2042 emlxs_port_t *vport;
2041 2043 RING *rp;
2042 2044 emlxs_config_t *cfg = &CFG;
2043 2045
2044 2046 if (!cfg[CFG_RESET_ENABLE].current) {
2045 2047 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2046 2048 "Adapter reset disabled.");
2047 2049 EMLXS_STATE_CHANGE(hba, FC_ERROR);
2048 2050
2049 2051 return (1);
2050 2052 }
2051 2053
2052 2054 /* Kill the adapter first */
2053 2055 if (quiesce == 0) {
2054 2056 emlxs_sli3_hba_kill(hba);
2055 2057 } else {
2056 2058 emlxs_sli3_hba_kill4quiesce(hba);
2057 2059 }
2058 2060
2059 2061 if (restart) {
2060 2062 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
2061 2063 "Restarting.");
2062 2064 EMLXS_STATE_CHANGE(hba, FC_INIT_START);
2063 2065
2064 2066 ready = (HS_FFRDY | HS_MBRDY);
2065 2067 } else {
2066 2068 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
2067 2069 "Resetting.");
2068 2070 EMLXS_STATE_CHANGE(hba, FC_WARM_START);
2069 2071
2070 2072 ready = HS_MBRDY;
2071 2073 }
2072 2074
2073 2075 hba->flag &= ~(FC_SLIM2_MODE | FC_HARDWARE_ERROR);
2074 2076
2075 2077 mb = FC_SLIM1_MAILBOX(hba);
2076 2078 swpmb = (MAILBOX *)&word0;
2077 2079
2078 2080 reset:
2079 2081
2080 2082 i = 0;
2081 2083
2082 2084 /* Save reset time */
2083 2085 HBASTATS.ResetTime = hba->timer_tics;
2084 2086
2085 2087 if (restart) {
2086 2088 /* First put restart command in mailbox */
2087 2089 word0 = 0;
2088 2090 swpmb->mbxCommand = MBX_RESTART;
2089 2091 swpmb->mbxHc = 1;
2090 2092 WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mb), word0);
2091 2093
2092 2094 /* Only skip post after emlxs_sli3_online is completed */
2093 2095 if (skip_post) {
2094 2096 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb) + 1),
2095 2097 1);
2096 2098 } else {
2097 2099 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb) + 1),
2098 2100 0);
2099 2101 }
2100 2102
2101 2103 }
2102 2104
2103 2105 /*
2104 2106 * Turn off SERR, PERR in PCI cmd register
2105 2107 */
2106 2108 cfg_value = ddi_get16(hba->pci_acc_handle,
2107 2109 (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER));
2108 2110
2109 2111 ddi_put16(hba->pci_acc_handle,
2110 2112 (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
2111 2113 (uint16_t)(cfg_value & ~(CMD_PARITY_CHK | CMD_SERR_ENBL)));
2112 2114
2113 2115 hba->sli.sli3.hc_copy = HC_INITFF;
2114 2116 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
2115 2117
2116 2118 /* Wait 1 msec before restoring PCI config */
2117 2119 BUSYWAIT_MS(1);
2118 2120
2119 2121 /* Restore PCI cmd register */
2120 2122 ddi_put16(hba->pci_acc_handle,
2121 2123 (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
2122 2124 (uint16_t)cfg_value);
2123 2125
2124 2126 /* Wait 3 seconds before checking */
2125 2127 BUSYWAIT_MS(3000);
2126 2128 i += 3;
2127 2129
2128 2130 /* Wait for reset completion */
2129 2131 while (i < 30) {
2130 2132 /* Check status register to see what current state is */
2131 2133 status = READ_CSR_REG(hba, FC_HS_REG(hba));
2132 2134
2133 2135 /* Check to see if any errors occurred during init */
2134 2136 if (status & HS_FFERM) {
2135 2137 status1 = READ_SLIM_ADDR(hba, ((volatile uint8_t *)
2136 2138 hba->sli.sli3.slim_addr + 0xa8));
2137 2139 status2 = READ_SLIM_ADDR(hba, ((volatile uint8_t *)
2138 2140 hba->sli.sli3.slim_addr + 0xac));
2139 2141
2140 2142 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2141 2143 "HS_FFERM: status=0x%x status1=0x%x status2=0x%x",
2142 2144 status, status1, status2);
2143 2145
2144 2146 EMLXS_STATE_CHANGE(hba, FC_ERROR);
2145 2147 return (1);
2146 2148 }
2147 2149
2148 2150 if ((status & ready) == ready) {
2149 2151 /* Reset Done !! */
2150 2152 goto done;
2151 2153 }
2152 2154
2153 2155 /*
2154 2156 * Check every 1 second for 15 seconds, then reset board
2155 2157 * again (w/post), then check every 1 second for 15 * seconds.
2156 2158 */
2157 2159 BUSYWAIT_MS(1000);
2158 2160 i++;
2159 2161
2160 2162 /* Reset again (w/post) at 15 seconds */
2161 2163 if (i == 15) {
2162 2164 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
2163 2165 "Reset failed. Retrying...");
2164 2166
2165 2167 goto reset;
2166 2168 }
2167 2169 }
2168 2170
2169 2171 #ifdef FMA_SUPPORT
2170 2172 reset_fail:
2171 2173 #endif /* FMA_SUPPORT */
2172 2174
2173 2175 /* Timeout occurred */
2174 2176 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2175 2177 "Timeout: status=0x%x", status);
2176 2178 EMLXS_STATE_CHANGE(hba, FC_ERROR);
2177 2179
2178 2180 /* Log a dump event */
2179 2181 emlxs_log_dump_event(port, NULL, 0);
2180 2182
2181 2183 return (1);
2182 2184
2183 2185 done:
2184 2186
2185 2187 /* Initialize hc_copy */
2186 2188 hba->sli.sli3.hc_copy = READ_CSR_REG(hba, FC_HC_REG(hba));
2187 2189
2188 2190 #ifdef FMA_SUPPORT
2189 2191 /* Access handle validation */
2190 2192 if ((emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
2191 2193 != DDI_FM_OK) ||
2192 2194 (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
2193 2195 != DDI_FM_OK) ||
2194 2196 (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle)
2195 2197 != DDI_FM_OK)) {
2196 2198 EMLXS_MSGF(EMLXS_CONTEXT,
2197 2199 &emlxs_invalid_access_handle_msg, NULL);
2198 2200 goto reset_fail;
2199 2201 }
2200 2202 #endif /* FMA_SUPPORT */
2201 2203
2202 2204 /* Reset the hba structure */
2203 2205 hba->flag &= FC_RESET_MASK;
2204 2206 hba->channel_tx_count = 0;
2205 2207 hba->io_count = 0;
2206 2208 hba->iodone_count = 0;
2207 2209 hba->topology = 0;
2208 2210 hba->linkspeed = 0;
2209 2211 hba->heartbeat_active = 0;
2210 2212 hba->discovery_timer = 0;
2211 2213 hba->linkup_timer = 0;
2212 2214 hba->loopback_tics = 0;
2213 2215
2214 2216 /* Reset the ring objects */
2215 2217 for (i = 0; i < MAX_RINGS; i++) {
2216 2218 rp = &hba->sli.sli3.ring[i];
2217 2219 rp->fc_mpon = 0;
2218 2220 rp->fc_mpoff = 0;
2219 2221 }
2220 2222
2221 2223 /* Reset the port objects */
2222 2224 for (i = 0; i < MAX_VPORTS; i++) {
2223 2225 vport = &VPORT(i);
2224 2226
2225 2227 vport->flag &= EMLXS_PORT_RESET_MASK;
2226 2228 vport->did = 0;
2227 2229 vport->prev_did = 0;
2228 2230 vport->lip_type = 0;
2229 2231 bzero(&vport->fabric_sparam, sizeof (SERV_PARM));
2230 2232 bzero(&vport->prev_fabric_sparam, sizeof (SERV_PARM));
2231 2233
2232 2234 bzero((caddr_t)&vport->node_base, sizeof (NODELIST));
2233 2235 vport->node_base.nlp_Rpi = 0;
2234 2236 vport->node_base.nlp_DID = 0xffffff;
2235 2237 vport->node_base.nlp_list_next = NULL;
2236 2238 vport->node_base.nlp_list_prev = NULL;
2237 2239 vport->node_base.nlp_active = 1;
2238 2240 vport->node_count = 0;
2239 2241
2240 2242 if (vport->ub_count < EMLXS_UB_TOKEN_OFFSET) {
2241 2243 vport->ub_count = EMLXS_UB_TOKEN_OFFSET;
2242 2244 }
2243 2245 }
2244 2246
2245 2247 return (0);
2246 2248
2247 2249 } /* emlxs_sli3_hba_reset */
2248 2250
2249 2251
2250 2252 #define BPL_CMD 0
2251 2253 #define BPL_RESP 1
2252 2254 #define BPL_DATA 2
2253 2255
2254 2256 static ULP_BDE64 *
2255 2257 emlxs_pkt_to_bpl(fc_packet_t *pkt, ULP_BDE64 *bpl, uint32_t bpl_type)
2256 2258 {
2257 2259 ddi_dma_cookie_t *cp;
2258 2260 uint_t i;
2259 2261 int32_t size;
2260 2262 uint_t cookie_cnt;
2261 2263 uint8_t bdeFlags;
2262 2264
2263 2265 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2264 2266 switch (bpl_type) {
2265 2267 case BPL_CMD:
2266 2268 cp = pkt->pkt_cmd_cookie;
2267 2269 cookie_cnt = pkt->pkt_cmd_cookie_cnt;
2268 2270 size = (int32_t)pkt->pkt_cmdlen;
2269 2271 bdeFlags = 0;
2270 2272 break;
2271 2273
2272 2274 case BPL_RESP:
2273 2275 cp = pkt->pkt_resp_cookie;
2274 2276 cookie_cnt = pkt->pkt_resp_cookie_cnt;
2275 2277 size = (int32_t)pkt->pkt_rsplen;
2276 2278 bdeFlags = BUFF_USE_RCV;
2277 2279 break;
2278 2280
2279 2281
2280 2282 case BPL_DATA:
2281 2283 cp = pkt->pkt_data_cookie;
2282 2284 cookie_cnt = pkt->pkt_data_cookie_cnt;
2283 2285 size = (int32_t)pkt->pkt_datalen;
2284 2286 bdeFlags = (pkt->pkt_tran_type == FC_PKT_FCP_READ) ?
2285 2287 BUFF_USE_RCV : 0;
2286 2288 break;
2287 2289
2288 2290 default:
2289 2291 return (NULL);
2290 2292 }
2291 2293
2292 2294 #else
2293 2295 switch (bpl_type) {
2294 2296 case BPL_CMD:
2295 2297 cp = &pkt->pkt_cmd_cookie;
2296 2298 cookie_cnt = 1;
2297 2299 size = (int32_t)pkt->pkt_cmdlen;
2298 2300 bdeFlags = 0;
2299 2301 break;
2300 2302
2301 2303 case BPL_RESP:
2302 2304 cp = &pkt->pkt_resp_cookie;
2303 2305 cookie_cnt = 1;
2304 2306 size = (int32_t)pkt->pkt_rsplen;
2305 2307 bdeFlags = BUFF_USE_RCV;
2306 2308 break;
2307 2309
2308 2310
2309 2311 case BPL_DATA:
2310 2312 cp = &pkt->pkt_data_cookie;
2311 2313 cookie_cnt = 1;
2312 2314 size = (int32_t)pkt->pkt_datalen;
2313 2315 bdeFlags = (pkt->pkt_tran_type == FC_PKT_FCP_READ) ?
2314 2316 BUFF_USE_RCV : 0;
2315 2317 break;
2316 2318
2317 2319 default:
2318 2320 return (NULL);
2319 2321 }
2320 2322 #endif /* >= EMLXS_MODREV3 */
2321 2323
2322 2324 for (i = 0; i < cookie_cnt && size > 0; i++, cp++) {
2323 2325 bpl->addrHigh =
2324 2326 BE_SWAP32(PADDR_HI(cp->dmac_laddress));
2325 2327 bpl->addrLow =
2326 2328 BE_SWAP32(PADDR_LO(cp->dmac_laddress));
2327 2329 bpl->tus.f.bdeSize = MIN(size, cp->dmac_size);
2328 2330 bpl->tus.f.bdeFlags = bdeFlags;
2329 2331 bpl->tus.w = BE_SWAP32(bpl->tus.w);
2330 2332
2331 2333 bpl++;
2332 2334 size -= cp->dmac_size;
2333 2335 }
2334 2336
2335 2337 return (bpl);
2336 2338
2337 2339 } /* emlxs_pkt_to_bpl */
2338 2340
2339 2341
2340 2342 static uint32_t
2341 2343 emlxs_sli2_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2342 2344 {
2343 2345 emlxs_hba_t *hba = HBA;
2344 2346 fc_packet_t *pkt;
2345 2347 MATCHMAP *bmp;
2346 2348 ULP_BDE64 *bpl;
2347 2349 uint64_t bp;
2348 2350 IOCB *iocb;
2349 2351 IOCBQ *iocbq;
2350 2352 CHANNEL *cp;
2351 2353 uint32_t data_cookie_cnt;
2352 2354 uint32_t channelno;
2353 2355
2354 2356 cp = sbp->channel;
2355 2357 iocb = (IOCB *) & sbp->iocbq;
2356 2358 pkt = PRIV2PKT(sbp);
2357 2359
2358 2360 if (hba->sli.sli3.bpl_table) {
2359 2361 bmp = hba->sli.sli3.bpl_table[sbp->iotag];
2360 2362 } else {
2361 2363 bmp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BPL);
2362 2364 }
2363 2365
2364 2366 if (!bmp) {
2365 2367 return (1);
2366 2368 }
2367 2369
2368 2370 sbp->bmp = bmp;
2369 2371 bpl = (ULP_BDE64 *)bmp->virt;
2370 2372 bp = bmp->phys;
2371 2373
2372 2374 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2373 2375 data_cookie_cnt = pkt->pkt_data_cookie_cnt;
2374 2376 #else
2375 2377 data_cookie_cnt = 1;
2376 2378 #endif /* >= EMLXS_MODREV3 */
2377 2379
2378 2380 iocbq = &sbp->iocbq;
2379 2381
2380 2382 channelno = (iocbq->flag & IOCB_FCP_CMD)? FC_FCP_RING:cp->channelno;
2381 2383 switch (channelno) {
2382 2384 case FC_FCP_RING:
2383 2385
2384 2386 /* CMD payload */
2385 2387 bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_CMD);
2386 2388 if (! bpl) {
2387 2389 return (1);
2388 2390 }
2389 2391
2390 2392 /* Check if response & data payloads are needed */
2391 2393 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2392 2394 break;
2393 2395 }
2394 2396
2395 2397 /* RSP payload */
2396 2398 bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_RESP);
2397 2399 if (! bpl) {
2398 2400 return (1);
2399 2401 }
2400 2402
2401 2403 /* Check if data payload is needed */
2402 2404 if ((pkt->pkt_datalen == 0) ||
2403 2405 (data_cookie_cnt == 0)) {
2404 2406 break;
2405 2407 }
2406 2408
2407 2409 /* DATA payload */
2408 2410 bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_DATA);
2409 2411 if (! bpl) {
2410 2412 return (1);
2411 2413 }
2412 2414 break;
2413 2415
2414 2416 case FC_IP_RING:
2415 2417
2416 2418 /* CMD payload */
2417 2419 bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_CMD);
2418 2420 if (! bpl) {
2419 2421 return (1);
2420 2422 }
2421 2423 break;
2422 2424
2423 2425 case FC_ELS_RING:
2424 2426
2425 2427 /* CMD payload */
2426 2428 bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_CMD);
2427 2429 if (! bpl) {
2428 2430 return (1);
2429 2431 }
2430 2432
2431 2433 /* Check if response payload is needed */
2432 2434 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2433 2435 break;
2434 2436 }
2435 2437
2436 2438 /* RSP payload */
2437 2439 bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_RESP);
2438 2440 if (! bpl) {
2439 2441 return (1);
2440 2442 }
2441 2443 break;
2442 2444
2443 2445 case FC_CT_RING:
2444 2446
2445 2447 /* CMD payload */
2446 2448 bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_CMD);
2447 2449 if (! bpl) {
2448 2450 return (1);
2449 2451 }
2450 2452
2451 2453 /* Check if response payload is needed */
2452 2454 if ((pkt->pkt_tran_type == FC_PKT_OUTBOUND) &&
2453 2455 (pkt->pkt_cmd_fhdr.type != EMLXS_MENLO_TYPE)) {
2454 2456 break;
2455 2457 }
2456 2458
2457 2459 /* RSP payload */
2458 2460 bpl = emlxs_pkt_to_bpl(pkt, bpl, BPL_RESP);
2459 2461 if (! bpl) {
2460 2462 return (1);
2461 2463 }
2462 2464 break;
2463 2465
2464 2466 }
2465 2467
2466 2468 iocb->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BDL;
2467 2469 iocb->un.genreq64.bdl.addrHigh = PADDR_HI(bp);
2468 2470 iocb->un.genreq64.bdl.addrLow = PADDR_LO(bp);
2469 2471 iocb->un.genreq64.bdl.bdeSize =
2470 2472 (uint32_t)(((uintptr_t)bpl - (uintptr_t)bmp->virt) & 0xFFFFFFFF);
2471 2473 iocb->ULPBDECOUNT = 1;
2472 2474 iocb->ULPLE = 1;
2473 2475
2474 2476 return (0);
2475 2477
2476 2478 } /* emlxs_sli2_bde_setup */
2477 2479
2478 2480
2479 2481 static uint32_t
2480 2482 emlxs_sli3_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2481 2483 {
2482 2484 ddi_dma_cookie_t *cp_cmd;
2483 2485 ddi_dma_cookie_t *cp_resp;
2484 2486 ddi_dma_cookie_t *cp_data;
2485 2487 fc_packet_t *pkt;
2486 2488 ULP_BDE64 *bde;
2487 2489 int data_cookie_cnt;
2488 2490 uint32_t i;
2489 2491 uint32_t channelno;
2490 2492 IOCB *iocb;
2491 2493 IOCBQ *iocbq;
2492 2494 CHANNEL *cp;
2493 2495
2494 2496 pkt = PRIV2PKT(sbp);
2495 2497 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2496 2498 if ((pkt->pkt_cmd_cookie_cnt > 1) ||
2497 2499 (pkt->pkt_resp_cookie_cnt > 1) ||
2498 2500 ((pkt->pkt_cmd_cookie_cnt + pkt->pkt_resp_cookie_cnt +
2499 2501 pkt->pkt_data_cookie_cnt) > SLI3_MAX_BDE)) {
2500 2502 i = emlxs_sli2_bde_setup(port, sbp);
2501 2503 return (i);
2502 2504 }
2503 2505
2504 2506 cp_cmd = pkt->pkt_cmd_cookie;
2505 2507 cp_resp = pkt->pkt_resp_cookie;
2506 2508 cp_data = pkt->pkt_data_cookie;
2507 2509 data_cookie_cnt = pkt->pkt_data_cookie_cnt;
2508 2510 #else
2509 2511 cp_cmd = &pkt->pkt_cmd_cookie;
2510 2512 cp_resp = &pkt->pkt_resp_cookie;
2511 2513 cp_data = &pkt->pkt_data_cookie;
2512 2514 data_cookie_cnt = 1;
2513 2515 #endif /* >= EMLXS_MODREV3 */
2514 2516
2515 2517 cp = sbp->channel;
2516 2518 iocbq = &sbp->iocbq;
2517 2519 iocb = (IOCB *)iocbq;
2518 2520 iocb->unsli3.ext_iocb.ebde_count = 0;
2519 2521
2520 2522 channelno = (iocbq->flag & IOCB_FCP_CMD)? FC_FCP_RING:cp->channelno;
2521 2523 switch (channelno) {
2522 2524 case FC_FCP_RING:
2523 2525 /* CMD payload */
2524 2526 iocb->un.fcpi64.bdl.addrHigh =
2525 2527 PADDR_HI(cp_cmd->dmac_laddress);
2526 2528 iocb->un.fcpi64.bdl.addrLow =
2527 2529 PADDR_LO(cp_cmd->dmac_laddress);
2528 2530 iocb->un.fcpi64.bdl.bdeSize = pkt->pkt_cmdlen;
2529 2531 iocb->un.fcpi64.bdl.bdeFlags = 0;
2530 2532
2531 2533 /* Check if a response & data payload are needed */
2532 2534 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2533 2535 break;
2534 2536 }
2535 2537
2536 2538 /* RSP payload */
2537 2539 iocb->unsli3.ext_iocb.ebde1.addrHigh =
2538 2540 PADDR_HI(cp_resp->dmac_laddress);
2539 2541 iocb->unsli3.ext_iocb.ebde1.addrLow =
2540 2542 PADDR_LO(cp_resp->dmac_laddress);
2541 2543 iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize = pkt->pkt_rsplen;
2542 2544 iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags = 0;
2543 2545 iocb->unsli3.ext_iocb.ebde_count = 1;
2544 2546
2545 2547 /* Check if a data payload is needed */
2546 2548 if ((pkt->pkt_datalen == 0) ||
2547 2549 (data_cookie_cnt == 0)) {
2548 2550 break;
2549 2551 }
2550 2552
2551 2553 /* DATA payload */
2552 2554 bde = (ULP_BDE64 *)&iocb->unsli3.ext_iocb.ebde2;
2553 2555 for (i = 0; i < data_cookie_cnt; i++) {
2554 2556 bde->addrHigh = PADDR_HI(cp_data->dmac_laddress);
2555 2557 bde->addrLow = PADDR_LO(cp_data->dmac_laddress);
2556 2558 bde->tus.f.bdeSize = cp_data->dmac_size;
2557 2559 bde->tus.f.bdeFlags = 0;
2558 2560 cp_data++;
2559 2561 bde++;
2560 2562 }
2561 2563 iocb->unsli3.ext_iocb.ebde_count += data_cookie_cnt;
2562 2564
2563 2565 break;
2564 2566
2565 2567 case FC_IP_RING:
2566 2568 /* CMD payload */
2567 2569 iocb->un.xseq64.bdl.addrHigh =
2568 2570 PADDR_HI(cp_cmd->dmac_laddress);
2569 2571 iocb->un.xseq64.bdl.addrLow =
2570 2572 PADDR_LO(cp_cmd->dmac_laddress);
2571 2573 iocb->un.xseq64.bdl.bdeSize = pkt->pkt_cmdlen;
2572 2574 iocb->un.xseq64.bdl.bdeFlags = 0;
2573 2575
2574 2576 break;
2575 2577
2576 2578 case FC_ELS_RING:
2577 2579
2578 2580 /* CMD payload */
2579 2581 iocb->un.elsreq64.bdl.addrHigh =
2580 2582 PADDR_HI(cp_cmd->dmac_laddress);
2581 2583 iocb->un.elsreq64.bdl.addrLow =
2582 2584 PADDR_LO(cp_cmd->dmac_laddress);
2583 2585 iocb->un.elsreq64.bdl.bdeSize = pkt->pkt_cmdlen;
2584 2586 iocb->un.elsreq64.bdl.bdeFlags = 0;
2585 2587
2586 2588 /* Check if a response payload is needed */
2587 2589 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2588 2590 break;
2589 2591 }
2590 2592
2591 2593 /* RSP payload */
2592 2594 iocb->unsli3.ext_iocb.ebde1.addrHigh =
2593 2595 PADDR_HI(cp_resp->dmac_laddress);
2594 2596 iocb->unsli3.ext_iocb.ebde1.addrLow =
2595 2597 PADDR_LO(cp_resp->dmac_laddress);
2596 2598 iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize = pkt->pkt_rsplen;
2597 2599 iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags = BUFF_USE_RCV;
2598 2600 iocb->unsli3.ext_iocb.ebde_count = 1;
2599 2601 break;
2600 2602
2601 2603 case FC_CT_RING:
2602 2604
2603 2605 /* CMD payload */
2604 2606 iocb->un.genreq64.bdl.addrHigh =
2605 2607 PADDR_HI(cp_cmd->dmac_laddress);
2606 2608 iocb->un.genreq64.bdl.addrLow =
2607 2609 PADDR_LO(cp_cmd->dmac_laddress);
2608 2610 iocb->un.genreq64.bdl.bdeSize = pkt->pkt_cmdlen;
2609 2611 iocb->un.genreq64.bdl.bdeFlags = 0;
2610 2612
2611 2613 /* Check if a response payload is needed */
2612 2614 if ((pkt->pkt_tran_type == FC_PKT_OUTBOUND) &&
2613 2615 (pkt->pkt_cmd_fhdr.type != EMLXS_MENLO_TYPE)) {
2614 2616 break;
2615 2617 }
2616 2618
2617 2619 /* RSP payload */
2618 2620 iocb->unsli3.ext_iocb.ebde1.addrHigh =
2619 2621 PADDR_HI(cp_resp->dmac_laddress);
2620 2622 iocb->unsli3.ext_iocb.ebde1.addrLow =
2621 2623 PADDR_LO(cp_resp->dmac_laddress);
2622 2624 iocb->unsli3.ext_iocb.ebde1.tus.f.bdeSize = pkt->pkt_rsplen;
2623 2625 iocb->unsli3.ext_iocb.ebde1.tus.f.bdeFlags = BUFF_USE_RCV;
2624 2626 iocb->unsli3.ext_iocb.ebde_count = 1;
2625 2627 break;
2626 2628 }
2627 2629
2628 2630 iocb->ULPBDECOUNT = 0;
2629 2631 iocb->ULPLE = 0;
2630 2632
2631 2633 return (0);
2632 2634
2633 2635 } /* emlxs_sli3_bde_setup */
2634 2636
2635 2637
2636 2638 /* Only used for FCP Data xfers */
2637 2639 #ifdef SFCT_SUPPORT
2638 2640 /*ARGSUSED*/
2639 2641 static uint32_t
2640 2642 emlxs_sli2_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2641 2643 {
2642 2644 emlxs_hba_t *hba = HBA;
2643 2645 scsi_task_t *fct_task;
2644 2646 MATCHMAP *bmp;
2645 2647 ULP_BDE64 *bpl;
2646 2648 uint64_t bp;
2647 2649 uint8_t bdeFlags;
2648 2650 IOCB *iocb;
2649 2651 uint32_t size;
2650 2652 MATCHMAP *mp;
2651 2653
2652 2654 iocb = (IOCB *)&sbp->iocbq.iocb;
2653 2655 sbp->bmp = NULL;
2654 2656
2655 2657 if (!sbp->fct_buf) {
2656 2658 iocb->un.fcpt64.bdl.addrHigh = 0;
2657 2659 iocb->un.fcpt64.bdl.addrLow = 0;
2658 2660 iocb->un.fcpt64.bdl.bdeSize = 0;
2659 2661 iocb->un.fcpt64.bdl.bdeFlags = 0;
2660 2662 iocb->un.fcpt64.fcpt_Offset = 0;
2661 2663 iocb->un.fcpt64.fcpt_Length = 0;
2662 2664 iocb->ULPBDECOUNT = 0;
2663 2665 iocb->ULPLE = 1;
2664 2666 return (0);
2665 2667 }
2666 2668
2667 2669 if (hba->sli.sli3.bpl_table) {
2668 2670 bmp = hba->sli.sli3.bpl_table[sbp->iotag];
2669 2671 } else {
2670 2672 bmp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BPL);
2671 2673 }
2672 2674
2673 2675 if (!bmp) {
2674 2676 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
2675 2677 "fct_sli2_bde_setup: Unable to BPL buffer. iotag=%d",
2676 2678 sbp->iotag);
2677 2679
2678 2680 iocb->un.fcpt64.bdl.addrHigh = 0;
2679 2681 iocb->un.fcpt64.bdl.addrLow = 0;
2680 2682 iocb->un.fcpt64.bdl.bdeSize = 0;
2681 2683 iocb->un.fcpt64.bdl.bdeFlags = 0;
2682 2684 iocb->un.fcpt64.fcpt_Offset = 0;
2683 2685 iocb->un.fcpt64.fcpt_Length = 0;
2684 2686 iocb->ULPBDECOUNT = 0;
2685 2687 iocb->ULPLE = 1;
2686 2688 return (1);
2687 2689 }
2688 2690
2689 2691 bpl = (ULP_BDE64 *)bmp->virt;
2690 2692 bp = bmp->phys;
2691 2693
2692 2694 fct_task = (scsi_task_t *)sbp->fct_cmd->cmd_specific;
2693 2695
2694 2696 size = sbp->fct_buf->db_data_size;
2695 2697 mp = (MATCHMAP *)sbp->fct_buf->db_port_private;
2696 2698
2697 2699 bdeFlags = (fct_task->task_flags & TF_WRITE_DATA) ? BUFF_USE_RCV : 0;
2698 2700
2699 2701 /* Init the buffer list */
2700 2702 bpl->addrHigh = BE_SWAP32(PADDR_HI(mp->phys));
2701 2703 bpl->addrLow = BE_SWAP32(PADDR_LO(mp->phys));
2702 2704 bpl->tus.f.bdeSize = size;
2703 2705 bpl->tus.f.bdeFlags = bdeFlags;
2704 2706 bpl->tus.w = BE_SWAP32(bpl->tus.w);
2705 2707
2706 2708 /* Init the IOCB */
2707 2709 iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(bp);
2708 2710 iocb->un.fcpt64.bdl.addrLow = PADDR_LO(bp);
2709 2711 iocb->un.fcpt64.bdl.bdeSize = sizeof (ULP_BDE64);
2710 2712 iocb->un.fcpt64.bdl.bdeFlags = BUFF_TYPE_BDL;
2711 2713
2712 2714 iocb->un.fcpt64.fcpt_Length =
2713 2715 (fct_task->task_flags & TF_WRITE_DATA) ? size : 0;
2714 2716 iocb->un.fcpt64.fcpt_Offset = 0;
2715 2717
2716 2718 iocb->ULPBDECOUNT = 1;
2717 2719 iocb->ULPLE = 1;
2718 2720 sbp->bmp = bmp;
2719 2721
2720 2722 return (0);
2721 2723
2722 2724 } /* emlxs_sli2_fct_bde_setup */
2723 2725 #endif /* SFCT_SUPPORT */
2724 2726
2725 2727
2726 2728 #ifdef SFCT_SUPPORT
2727 2729 /*ARGSUSED*/
2728 2730 static uint32_t
2729 2731 emlxs_sli3_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2730 2732 {
2731 2733 scsi_task_t *fct_task;
2732 2734 IOCB *iocb;
2733 2735 MATCHMAP *mp;
2734 2736 uint32_t bdeFlags;
2735 2737 uint32_t size;
2736 2738
2737 2739 iocb = (IOCB *)&sbp->iocbq;
2738 2740
2739 2741 if (!sbp->fct_buf) {
2740 2742 iocb->un.fcpt64.bdl.addrHigh = 0;
2741 2743 iocb->un.fcpt64.bdl.addrLow = 0;
2742 2744 iocb->un.fcpt64.bdl.bdeSize = 0;
2743 2745 iocb->un.fcpt64.bdl.bdeFlags = 0;
2744 2746 iocb->un.fcpt64.fcpt_Offset = 0;
2745 2747 iocb->un.fcpt64.fcpt_Length = 0;
2746 2748 iocb->ULPBDECOUNT = 0;
2747 2749 iocb->ULPLE = 0;
2748 2750 iocb->unsli3.ext_iocb.ebde_count = 0;
2749 2751 return (0);
2750 2752 }
2751 2753
2752 2754 fct_task = (scsi_task_t *)sbp->fct_cmd->cmd_specific;
2753 2755
2754 2756 size = sbp->fct_buf->db_data_size;
2755 2757 mp = (MATCHMAP *)sbp->fct_buf->db_port_private;
2756 2758
2757 2759 bdeFlags = (fct_task->task_flags & TF_WRITE_DATA) ? BUFF_USE_RCV : 0;
2758 2760
2759 2761 /* Init first BDE */
2760 2762 iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(mp->phys);
2761 2763 iocb->un.fcpt64.bdl.addrLow = PADDR_LO(mp->phys);
2762 2764 iocb->un.fcpt64.bdl.bdeSize = size;
2763 2765 iocb->un.fcpt64.bdl.bdeFlags = bdeFlags;
2764 2766
2765 2767 iocb->unsli3.ext_iocb.ebde_count = 0;
2766 2768 iocb->un.fcpt64.fcpt_Length =
2767 2769 (fct_task->task_flags & TF_WRITE_DATA) ? size : 0;
2768 2770 iocb->un.fcpt64.fcpt_Offset = 0;
2769 2771
2770 2772 iocb->ULPBDECOUNT = 0;
2771 2773 iocb->ULPLE = 0;
2772 2774
2773 2775 return (0);
2774 2776
2775 2777 } /* emlxs_sli3_fct_bde_setup */
2776 2778 #endif /* SFCT_SUPPORT */
2777 2779
2778 2780
2779 2781 static void
2780 2782 emlxs_sli3_issue_iocb_cmd(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
2781 2783 {
2782 2784 #ifdef FMA_SUPPORT
2783 2785 emlxs_port_t *port = &PPORT;
2784 2786 #endif /* FMA_SUPPORT */
2785 2787 PGP *pgp;
2786 2788 emlxs_buf_t *sbp;
2787 2789 SLIM2 *slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt;
2788 2790 RING *rp;
2789 2791 uint32_t nextIdx;
2790 2792 uint32_t status;
2791 2793 void *ioa2;
2792 2794 off_t offset;
2793 2795 uint32_t count = 0;
2794 2796 uint32_t flag;
2795 2797 uint32_t channelno;
2796 2798 int32_t throttle;
2797 2799 #ifdef NODE_THROTTLE_SUPPORT
2798 2800 int32_t node_throttle;
2799 2801 NODELIST *marked_node = NULL;
2800 2802 #endif /* NODE_THROTTLE_SUPPORT */
2801 2803
2802 2804 channelno = cp->channelno;
2803 2805 rp = (RING *)cp->iopath;
2804 2806
2805 2807 throttle = 0;
2806 2808
2807 2809 /* Check if FCP ring and adapter is not ready */
2808 2810 /* We may use any ring for FCP_CMD */
2809 2811 if (iocbq && (iocbq->flag & IOCB_FCP_CMD) && (hba->state != FC_READY)) {
2810 2812 if (!(iocbq->flag & IOCB_SPECIAL) || !iocbq->port ||
2811 2813 (((emlxs_port_t *)iocbq->port)->mode == MODE_INITIATOR)) {
2812 2814 emlxs_tx_put(iocbq, 1);
2813 2815 return;
2814 2816 }
2815 2817 }
2816 2818
2817 2819 /* Attempt to acquire CMD_RING lock */
2818 2820 if (mutex_tryenter(&EMLXS_CMD_RING_LOCK(channelno)) == 0) {
2819 2821 /* Queue it for later */
2820 2822 if (iocbq) {
2821 2823 if ((hba->io_count -
2822 2824 hba->channel_tx_count) > 10) {
2823 2825 emlxs_tx_put(iocbq, 1);
2824 2826 return;
2825 2827 } else {
2826 2828
2827 2829 /*
2828 2830 * EMLXS_MSGF(EMLXS_CONTEXT,
2829 2831 * &emlxs_ring_watchdog_msg,
2830 2832 * "%s host=%d port=%d cnt=%d,%d RACE
2831 2833 * CONDITION3 DETECTED.",
2832 2834 * emlxs_ring_xlate(channelno),
2833 2835 * rp->fc_cmdidx, rp->fc_port_cmdidx,
2834 2836 * hba->channel_tx_count,
2835 2837 * hba->io_count);
2836 2838 */
2837 2839 mutex_enter(&EMLXS_CMD_RING_LOCK(channelno));
2838 2840 }
2839 2841 } else {
2840 2842 return;
2841 2843 }
2842 2844 }
2843 2845 /* CMD_RING_LOCK acquired */
2844 2846
2845 2847 /* Throttle check only applies to non special iocb */
2846 2848 if (iocbq && (!(iocbq->flag & IOCB_SPECIAL))) {
2847 2849 /* Check if HBA is full */
2848 2850 throttle = hba->io_throttle - hba->io_active;
2849 2851 if (throttle <= 0) {
2850 2852 /* Hitting adapter throttle limit */
2851 2853 /* Queue it for later */
2852 2854 if (iocbq) {
2853 2855 emlxs_tx_put(iocbq, 1);
2854 2856 }
2855 2857
2856 2858 goto busy;
2857 2859 }
2858 2860 }
2859 2861
2860 2862 /* Read adapter's get index */
2861 2863 pgp = (PGP *)
2862 2864 &((SLIM2 *)hba->sli.sli3.slim2.virt)->mbx.us.s2.port[channelno];
2863 2865 offset =
2864 2866 (off_t)((uint64_t)((unsigned long)&(pgp->cmdGetInx)) -
2865 2867 (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
2866 2868 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
2867 2869 DDI_DMA_SYNC_FORKERNEL);
2868 2870 rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
2869 2871
2870 2872 /* Calculate the next put index */
2871 2873 nextIdx =
2872 2874 (rp->fc_cmdidx + 1 >= rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1;
2873 2875
2874 2876 /* Check if ring is full */
2875 2877 if (nextIdx == rp->fc_port_cmdidx) {
2876 2878 /* Try one more time */
2877 2879 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
2878 2880 DDI_DMA_SYNC_FORKERNEL);
2879 2881 rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
2880 2882
2881 2883 if (nextIdx == rp->fc_port_cmdidx) {
2882 2884 /* Queue it for later */
2883 2885 if (iocbq) {
2884 2886 emlxs_tx_put(iocbq, 1);
2885 2887 }
2886 2888
2887 2889 goto busy;
2888 2890 }
2889 2891 }
2890 2892
2891 2893 /*
2892 2894 * We have a command ring slot available
2893 2895 * Make sure we have an iocb to send
2894 2896 */
2895 2897 if (iocbq) {
2896 2898 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2897 2899
2898 2900 /* Check if the ring already has iocb's waiting */
2899 2901 if (cp->nodeq.q_first != NULL) {
2900 2902 /* Put the current iocbq on the tx queue */
2901 2903 emlxs_tx_put(iocbq, 0);
2902 2904
2903 2905 /*
2904 2906 * Attempt to replace it with the next iocbq
2905 2907 * in the tx queue
2906 2908 */
2907 2909 iocbq = emlxs_tx_get(cp, 0);
2908 2910 }
2909 2911
2910 2912 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2911 2913 } else {
2912 2914 /* Try to get the next iocb on the tx queue */
2913 2915 iocbq = emlxs_tx_get(cp, 1);
2914 2916 }
2915 2917
2916 2918 sendit:
2917 2919 count = 0;
2918 2920
2919 2921 /* Process each iocbq */
2920 2922 while (iocbq) {
2921 2923 sbp = iocbq->sbp;
2922 2924
2923 2925 #ifdef NODE_THROTTLE_SUPPORT
2924 2926 if (sbp && sbp->node && sbp->node->io_throttle) {
2925 2927 node_throttle = sbp->node->io_throttle -
2926 2928 sbp->node->io_active;
2927 2929 if (node_throttle <= 0) {
2928 2930 /* Node is busy */
2929 2931 /* Queue this iocb and get next iocb from */
2930 2932 /* channel */
2931 2933
2932 2934 if (!marked_node) {
2933 2935 marked_node = sbp->node;
2934 2936 }
2935 2937
2936 2938 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2937 2939 emlxs_tx_put(iocbq, 0);
2938 2940
2939 2941 if (cp->nodeq.q_first == marked_node) {
2940 2942 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2941 2943 goto busy;
2942 2944 }
2943 2945
2944 2946 iocbq = emlxs_tx_get(cp, 0);
2945 2947 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2946 2948 continue;
2947 2949 }
2948 2950 }
2949 2951 marked_node = 0;
2950 2952 #endif /* NODE_THROTTLE_SUPPORT */
2951 2953
2952 2954 if (sbp && (sbp->pkt_flags & PACKET_DELAY_REQUIRED)) {
2953 2955 /*
2954 2956 * Update adapter if needed, since we are about to
2955 2957 * delay here
2956 2958 */
2957 2959 if (count) {
2958 2960 count = 0;
2959 2961
2960 2962 /* Update the adapter's cmd put index */
2961 2963 if (hba->bus_type == SBUS_FC) {
2962 2964 slim2p->mbx.us.s2.host[channelno].
2963 2965 cmdPutInx =
2964 2966 BE_SWAP32(rp->fc_cmdidx);
2965 2967
2966 2968 /* DMA sync the index for the adapter */
2967 2969 offset = (off_t)
2968 2970 ((uint64_t)
2969 2971 ((unsigned long)&(slim2p->mbx.us.
2970 2972 s2.host[channelno].cmdPutInx)) -
2971 2973 (uint64_t)((unsigned long)slim2p));
2972 2974 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.
2973 2975 dma_handle, offset, 4,
2974 2976 DDI_DMA_SYNC_FORDEV);
2975 2977 } else {
2976 2978 ioa2 = (void *)
2977 2979 ((char *)hba->sli.sli3.slim_addr +
2978 2980 hba->sli.sli3.hgp_ring_offset +
2979 2981 ((channelno * 2) *
2980 2982 sizeof (uint32_t)));
2981 2983 WRITE_SLIM_ADDR(hba,
2982 2984 (volatile uint32_t *)ioa2,
2983 2985 rp->fc_cmdidx);
2984 2986 }
2985 2987
2986 2988 status = (CA_R0ATT << (channelno * 4));
2987 2989 WRITE_CSR_REG(hba, FC_CA_REG(hba),
2988 2990 (volatile uint32_t)status);
2989 2991
2990 2992 }
2991 2993 /* Perform delay */
2992 2994 if ((channelno == FC_ELS_RING) &&
2993 2995 !(iocbq->flag & IOCB_FCP_CMD)) {
2994 2996 drv_usecwait(100000);
2995 2997 } else {
2996 2998 drv_usecwait(20000);
2997 2999 }
2998 3000 }
2999 3001
3000 3002 /*
3001 3003 * At this point, we have a command ring slot available
3002 3004 * and an iocb to send
3003 3005 */
3004 3006 flag = iocbq->flag;
3005 3007
3006 3008 /* Send the iocb */
3007 3009 emlxs_sli3_issue_iocb(hba, rp, iocbq);
3008 3010 /*
3009 3011 * After this, the sbp / iocb should not be
3010 3012 * accessed in the xmit path.
3011 3013 */
3012 3014
3013 3015 count++;
3014 3016 if (iocbq && (!(flag & IOCB_SPECIAL))) {
3015 3017 /* Check if HBA is full */
3016 3018 throttle = hba->io_throttle - hba->io_active;
3017 3019 if (throttle <= 0) {
3018 3020 goto busy;
3019 3021 }
3020 3022 }
3021 3023
3022 3024 /* Calculate the next put index */
3023 3025 nextIdx =
3024 3026 (rp->fc_cmdidx + 1 >=
3025 3027 rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1;
3026 3028
3027 3029 /* Check if ring is full */
3028 3030 if (nextIdx == rp->fc_port_cmdidx) {
3029 3031 /* Try one more time */
3030 3032 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3031 3033 offset, 4, DDI_DMA_SYNC_FORKERNEL);
3032 3034 rp->fc_port_cmdidx = BE_SWAP32(pgp->cmdGetInx);
3033 3035
3034 3036 if (nextIdx == rp->fc_port_cmdidx) {
3035 3037 goto busy;
3036 3038 }
3037 3039 }
3038 3040
3039 3041 /* Get the next iocb from the tx queue if there is one */
3040 3042 iocbq = emlxs_tx_get(cp, 1);
3041 3043 }
3042 3044
3043 3045 if (count) {
3044 3046 /* Update the adapter's cmd put index */
3045 3047 if (hba->bus_type == SBUS_FC) {
3046 3048 slim2p->mbx.us.s2.host[channelno].
3047 3049 cmdPutInx = BE_SWAP32(rp->fc_cmdidx);
3048 3050
3049 3051 /* DMA sync the index for the adapter */
3050 3052 offset = (off_t)
3051 3053 ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2.
3052 3054 host[channelno].cmdPutInx)) -
3053 3055 (uint64_t)((unsigned long)slim2p));
3054 3056 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3055 3057 offset, 4, DDI_DMA_SYNC_FORDEV);
3056 3058 } else {
3057 3059 ioa2 =
3058 3060 (void *)((char *)hba->sli.sli3.slim_addr +
3059 3061 hba->sli.sli3.hgp_ring_offset +
3060 3062 ((channelno * 2) * sizeof (uint32_t)));
3061 3063 WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2,
3062 3064 rp->fc_cmdidx);
3063 3065 }
3064 3066
3065 3067 status = (CA_R0ATT << (channelno * 4));
3066 3068 WRITE_CSR_REG(hba, FC_CA_REG(hba),
3067 3069 (volatile uint32_t)status);
3068 3070
3069 3071 /* Check tx queue one more time before releasing */
3070 3072 if ((iocbq = emlxs_tx_get(cp, 1))) {
3071 3073 /*
3072 3074 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_watchdog_msg,
3073 3075 * "%s host=%d port=%d RACE CONDITION1
3074 3076 * DETECTED.", emlxs_ring_xlate(channelno),
3075 3077 * rp->fc_cmdidx, rp->fc_port_cmdidx);
3076 3078 */
3077 3079 goto sendit;
3078 3080 }
3079 3081 }
3080 3082
3081 3083 #ifdef FMA_SUPPORT
3082 3084 /* Access handle validation */
3083 3085 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
3084 3086 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
3085 3087 #endif /* FMA_SUPPORT */
3086 3088
3087 3089 mutex_exit(&EMLXS_CMD_RING_LOCK(channelno));
3088 3090
3089 3091 return;
3090 3092
3091 3093 busy:
3092 3094
3093 3095 /*
3094 3096 * Set ring to SET R0CE_REQ in Chip Att register.
3095 3097 * Chip will tell us when an entry is freed.
3096 3098 */
3097 3099 if (count) {
3098 3100 /* Update the adapter's cmd put index */
3099 3101 if (hba->bus_type == SBUS_FC) {
3100 3102 slim2p->mbx.us.s2.host[channelno].cmdPutInx =
3101 3103 BE_SWAP32(rp->fc_cmdidx);
3102 3104
3103 3105 /* DMA sync the index for the adapter */
3104 3106 offset = (off_t)
3105 3107 ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2.
3106 3108 host[channelno].cmdPutInx)) -
3107 3109 (uint64_t)((unsigned long)slim2p));
3108 3110 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3109 3111 offset, 4, DDI_DMA_SYNC_FORDEV);
3110 3112 } else {
3111 3113 ioa2 =
3112 3114 (void *)((char *)hba->sli.sli3.slim_addr +
3113 3115 hba->sli.sli3.hgp_ring_offset +
3114 3116 ((channelno * 2) * sizeof (uint32_t)));
3115 3117 WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2,
3116 3118 rp->fc_cmdidx);
3117 3119 }
3118 3120 }
3119 3121
3120 3122 status = ((CA_R0ATT | CA_R0CE_REQ) << (channelno * 4));
3121 3123 WRITE_CSR_REG(hba, FC_CA_REG(hba), (volatile uint32_t)status);
3122 3124
3123 3125 if (throttle <= 0) {
3124 3126 HBASTATS.IocbThrottled++;
3125 3127 } else {
3126 3128 HBASTATS.IocbRingFull[channelno]++;
3127 3129 }
3128 3130
3129 3131 #ifdef FMA_SUPPORT
3130 3132 /* Access handle validation */
3131 3133 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
3132 3134 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
3133 3135 #endif /* FMA_SUPPORT */
3134 3136
3135 3137 mutex_exit(&EMLXS_CMD_RING_LOCK(channelno));
3136 3138
3137 3139 return;
3138 3140
3139 3141 } /* emlxs_sli3_issue_iocb_cmd() */
3140 3142
3141 3143
3142 3144 /* MBX_NOWAIT - returns MBX_BUSY or MBX_SUCCESS or MBX_HARDWARE_ERROR */
3143 3145 /* MBX_WAIT - returns MBX_TIMEOUT or mailbox_status */
3144 3146 /* MBX_SLEEP - returns MBX_TIMEOUT or mailbox_status */
3145 3147 /* MBX_POLL - returns MBX_TIMEOUT or mailbox_status */
3146 3148
3147 3149 static uint32_t
3148 3150 emlxs_sli3_issue_mbox_cmd(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
3149 3151 uint32_t tmo)
3150 3152 {
3151 3153 emlxs_port_t *port;
3152 3154 SLIM2 *slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt;
3153 3155 MAILBOX *mbox;
3154 3156 MAILBOX *mb;
3155 3157 volatile uint32_t word0;
3156 3158 volatile uint32_t ldata;
3157 3159 off_t offset;
3158 3160 MATCHMAP *mbox_bp;
3159 3161 uint32_t tmo_local;
3160 3162 MAILBOX *swpmb;
3161 3163
3162 3164 if (!mbq->port) {
3163 3165 mbq->port = &PPORT;
3164 3166 }
3165 3167
3166 3168 port = (emlxs_port_t *)mbq->port;
3167 3169
3168 3170 mb = (MAILBOX *)mbq;
3169 3171 swpmb = (MAILBOX *)&word0;
3170 3172
3171 3173 mb->mbxStatus = MBX_SUCCESS;
3172 3174
3173 3175 /* Check for minimum timeouts */
3174 3176 switch (mb->mbxCommand) {
3175 3177 /* Mailbox commands that erase/write flash */
3176 3178 case MBX_DOWN_LOAD:
3177 3179 case MBX_UPDATE_CFG:
3178 3180 case MBX_LOAD_AREA:
3179 3181 case MBX_LOAD_EXP_ROM:
3180 3182 case MBX_WRITE_NV:
3181 3183 case MBX_FLASH_WR_ULA:
3182 3184 case MBX_DEL_LD_ENTRY:
3183 3185 case MBX_LOAD_SM:
3184 3186 if (tmo < 300) {
3185 3187 tmo = 300;
3186 3188 }
3187 3189 break;
3188 3190
3189 3191 default:
3190 3192 if (tmo < 30) {
3191 3193 tmo = 30;
3192 3194 }
3193 3195 break;
3194 3196 }
3195 3197
3196 3198 /* Convert tmo seconds to 10 millisecond tics */
3197 3199 tmo_local = tmo * 100;
3198 3200
3199 3201 /* Adjust wait flag */
3200 3202 if (flag != MBX_NOWAIT) {
3201 3203 /* If interrupt is enabled, use sleep, otherwise poll */
3202 3204 if (hba->sli.sli3.hc_copy & HC_MBINT_ENA) {
3203 3205 flag = MBX_SLEEP;
3204 3206 } else {
3205 3207 flag = MBX_POLL;
3206 3208 }
3207 3209 }
3208 3210
3209 3211 mutex_enter(&EMLXS_PORT_LOCK);
3210 3212
3211 3213 /* Check for hardware error */
3212 3214 if (hba->flag & FC_HARDWARE_ERROR) {
3213 3215 mb->mbxStatus = (hba->flag & FC_OVERTEMP_EVENT) ?
3214 3216 MBX_OVERTEMP_ERROR : MBX_HARDWARE_ERROR;
3215 3217
3216 3218 mutex_exit(&EMLXS_PORT_LOCK);
3217 3219
3218 3220 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3219 3221 "Hardware error reported. %s failed. status=%x mb=%p",
3220 3222 emlxs_mb_cmd_xlate(mb->mbxCommand), mb->mbxStatus, mb);
3221 3223
3222 3224 return (MBX_HARDWARE_ERROR);
3223 3225 }
3224 3226
3225 3227 if (hba->mbox_queue_flag) {
3226 3228 /* If we are not polling, then queue it for later */
3227 3229 if (flag == MBX_NOWAIT) {
3228 3230 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3229 3231 "Busy. %s: mb=%p NoWait.",
3230 3232 emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3231 3233
3232 3234 emlxs_mb_put(hba, mbq);
3233 3235
3234 3236 HBASTATS.MboxBusy++;
3235 3237
3236 3238 mutex_exit(&EMLXS_PORT_LOCK);
3237 3239
3238 3240 return (MBX_BUSY);
3239 3241 }
3240 3242
3241 3243 while (hba->mbox_queue_flag) {
3242 3244 mutex_exit(&EMLXS_PORT_LOCK);
3243 3245
3244 3246 if (tmo_local-- == 0) {
3245 3247 EMLXS_MSGF(EMLXS_CONTEXT,
3246 3248 &emlxs_mbox_event_msg,
3247 3249 "Timeout. %s: mb=%p tmo=%d Waiting.",
3248 3250 emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3249 3251 tmo);
3250 3252
3251 3253 /* Non-lethalStatus mailbox timeout */
3252 3254 /* Does not indicate a hardware error */
3253 3255 mb->mbxStatus = MBX_TIMEOUT;
3254 3256 return (MBX_TIMEOUT);
3255 3257 }
3256 3258
3257 3259 BUSYWAIT_MS(10);
3258 3260 mutex_enter(&EMLXS_PORT_LOCK);
3259 3261
3260 3262 /* Check for hardware error */
3261 3263 if (hba->flag & FC_HARDWARE_ERROR) {
3262 3264 mb->mbxStatus =
3263 3265 (hba->flag & FC_OVERTEMP_EVENT) ?
3264 3266 MBX_OVERTEMP_ERROR : MBX_HARDWARE_ERROR;
3265 3267
3266 3268 mutex_exit(&EMLXS_PORT_LOCK);
3267 3269
3268 3270 EMLXS_MSGF(EMLXS_CONTEXT,
3269 3271 &emlxs_mbox_detail_msg,
3270 3272 "Hardware error reported. %s failed. "
3271 3273 "status=%x mb=%p",
3272 3274 emlxs_mb_cmd_xlate(mb->mbxCommand),
3273 3275 mb->mbxStatus, mb);
3274 3276
3275 3277 return (MBX_HARDWARE_ERROR);
3276 3278 }
3277 3279 }
3278 3280 }
3279 3281
3280 3282 /* Initialize mailbox area */
3281 3283 emlxs_mb_init(hba, mbq, flag, tmo);
3282 3284
3283 3285 switch (flag) {
3284 3286 case MBX_NOWAIT:
3285 3287
3286 3288 if (mb->mbxCommand != MBX_HEARTBEAT) {
3287 3289 if (mb->mbxCommand != MBX_DOWN_LOAD &&
3288 3290 mb->mbxCommand != MBX_DUMP_MEMORY) {
3289 3291 EMLXS_MSGF(EMLXS_CONTEXT,
3290 3292 &emlxs_mbox_detail_msg,
3291 3293 "Sending. %s: mb=%p NoWait.",
3292 3294 emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3293 3295 }
3294 3296 }
3295 3297
3296 3298 break;
3297 3299
3298 3300 case MBX_SLEEP:
3299 3301 if (mb->mbxCommand != MBX_DOWN_LOAD &&
3300 3302 mb->mbxCommand != MBX_DUMP_MEMORY) {
3301 3303 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3302 3304 "Sending. %s: mb=%p Sleep.",
3303 3305 emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3304 3306 }
3305 3307
3306 3308 break;
3307 3309
3308 3310 case MBX_POLL:
3309 3311 if (mb->mbxCommand != MBX_DOWN_LOAD &&
3310 3312 mb->mbxCommand != MBX_DUMP_MEMORY) {
3311 3313 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3312 3314 "Sending. %s: mb=%p Polled.",
3313 3315 emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3314 3316 }
3315 3317 break;
3316 3318 }
3317 3319
3318 3320 mb->mbxOwner = OWN_CHIP;
3319 3321
3320 3322 /* Clear the attention bit */
3321 3323 WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_MBATT);
3322 3324
3323 3325 if (hba->flag & FC_SLIM2_MODE) {
3324 3326 /* First copy command data */
3325 3327 mbox = FC_SLIM2_MAILBOX(hba);
3326 3328 offset =
3327 3329 (off_t)((uint64_t)((unsigned long)mbox)
3328 3330 - (uint64_t)((unsigned long)slim2p));
3329 3331
3330 3332 #ifdef MBOX_EXT_SUPPORT
3331 3333 if (mbq->extbuf) {
3332 3334 uint32_t *mbox_ext =
3333 3335 (uint32_t *)((uint8_t *)mbox +
3334 3336 MBOX_EXTENSION_OFFSET);
3335 3337 off_t offset_ext = offset + MBOX_EXTENSION_OFFSET;
3336 3338
3337 3339 BE_SWAP32_BCOPY((uint8_t *)mbq->extbuf,
3338 3340 (uint8_t *)mbox_ext, mbq->extsize);
3339 3341
3340 3342 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3341 3343 offset_ext, mbq->extsize,
3342 3344 DDI_DMA_SYNC_FORDEV);
3343 3345 }
3344 3346 #endif /* MBOX_EXT_SUPPORT */
3345 3347
3346 3348 BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mbox,
3347 3349 MAILBOX_CMD_BSIZE);
3348 3350
3349 3351 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
3350 3352 MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV);
3351 3353 } else { /* SLIM 1 */
3352 3354
3353 3355 mbox = FC_SLIM1_MAILBOX(hba);
3354 3356
3355 3357 #ifdef MBOX_EXT_SUPPORT
3356 3358 if (mbq->extbuf) {
3357 3359 uint32_t *mbox_ext =
3358 3360 (uint32_t *)((uint8_t *)mbox +
3359 3361 MBOX_EXTENSION_OFFSET);
3360 3362 WRITE_SLIM_COPY(hba, (uint32_t *)mbq->extbuf,
3361 3363 mbox_ext, (mbq->extsize / 4));
3362 3364 }
3363 3365 #endif /* MBOX_EXT_SUPPORT */
3364 3366
3365 3367 /* First copy command data */
3366 3368 WRITE_SLIM_COPY(hba, &mb->un.varWords, &mbox->un.varWords,
3367 3369 (MAILBOX_CMD_WSIZE - 1));
3368 3370
3369 3371 /* copy over last word, with mbxOwner set */
3370 3372 ldata = *((volatile uint32_t *)mb);
3371 3373 WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mbox), ldata);
3372 3374 }
3373 3375
3374 3376 /* Interrupt board to do it right away */
3375 3377 WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
3376 3378
3377 3379 mutex_exit(&EMLXS_PORT_LOCK);
3378 3380
3379 3381 #ifdef FMA_SUPPORT
3380 3382 /* Access handle validation */
3381 3383 if ((emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
3382 3384 != DDI_FM_OK) ||
3383 3385 (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.csr_acc_handle)
3384 3386 != DDI_FM_OK)) {
3385 3387 EMLXS_MSGF(EMLXS_CONTEXT,
3386 3388 &emlxs_invalid_access_handle_msg, NULL);
3387 3389 return (MBX_HARDWARE_ERROR);
3388 3390 }
3389 3391 #endif /* FMA_SUPPORT */
3390 3392
3391 3393 switch (flag) {
3392 3394 case MBX_NOWAIT:
3393 3395 return (MBX_SUCCESS);
3394 3396
3395 3397 case MBX_SLEEP:
3396 3398
3397 3399 /* Wait for completion */
3398 3400 /* The driver clock is timing the mailbox. */
3399 3401 /* emlxs_mb_fini() will be called externally. */
3400 3402
3401 3403 mutex_enter(&EMLXS_MBOX_LOCK);
3402 3404 while (!(mbq->flag & MBQ_COMPLETED)) {
3403 3405 cv_wait(&EMLXS_MBOX_CV, &EMLXS_MBOX_LOCK);
3404 3406 }
3405 3407 mutex_exit(&EMLXS_MBOX_LOCK);
3406 3408
3407 3409 if (mb->mbxStatus == MBX_TIMEOUT) {
3408 3410 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_event_msg,
3409 3411 "Timeout. %s: mb=%p tmo=%d. Sleep.",
3410 3412 emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo);
3411 3413 } else {
3412 3414 if (mb->mbxCommand != MBX_DOWN_LOAD &&
3413 3415 mb->mbxCommand != MBX_DUMP_MEMORY) {
3414 3416 EMLXS_MSGF(EMLXS_CONTEXT,
3415 3417 &emlxs_mbox_detail_msg,
3416 3418 "Completed. %s: mb=%p status=%x Sleep.",
3417 3419 emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3418 3420 mb->mbxStatus);
3419 3421 }
3420 3422 }
3421 3423
3422 3424 break;
3423 3425
3424 3426 case MBX_POLL:
3425 3427
3426 3428 /* Convert tmo seconds to 500 usec tics */
3427 3429 tmo_local = tmo * 2000;
3428 3430
3429 3431 /* Get first word of mailbox */
3430 3432 if (hba->flag & FC_SLIM2_MODE) {
3431 3433 mbox = FC_SLIM2_MAILBOX(hba);
3432 3434 offset = (off_t)((uint64_t)((unsigned long)mbox) -
3433 3435 (uint64_t)((unsigned long)slim2p));
3434 3436
3435 3437 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3436 3438 offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
3437 3439 word0 = *((volatile uint32_t *)mbox);
3438 3440 word0 = BE_SWAP32(word0);
3439 3441 } else {
3440 3442 mbox = FC_SLIM1_MAILBOX(hba);
3441 3443 word0 =
3442 3444 READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox));
3443 3445 }
3444 3446
3445 3447 /* Wait for command to complete */
3446 3448 while ((swpmb->mbxOwner == OWN_CHIP) &&
3447 3449 !(mbq->flag & MBQ_COMPLETED)) {
3448 3450 if (!hba->timer_id && (tmo_local-- == 0)) {
3449 3451 /* self time */
3450 3452 EMLXS_MSGF(EMLXS_CONTEXT,
3451 3453 &emlxs_mbox_timeout_msg,
3452 3454 "%s: mb=%p tmo=%d Polled.",
3453 3455 emlxs_mb_cmd_xlate(mb->mbxCommand),
3454 3456 mb, tmo);
3455 3457
3456 3458 hba->flag |= FC_MBOX_TIMEOUT;
3457 3459 EMLXS_STATE_CHANGE(hba, FC_ERROR);
3458 3460 emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
3459 3461
3460 3462 break;
3461 3463 }
3462 3464
3463 3465 BUSYWAIT_US(500);
3464 3466
3465 3467 /* Get first word of mailbox */
3466 3468 if (hba->flag & FC_SLIM2_MODE) {
3467 3469 EMLXS_MPDATA_SYNC(
3468 3470 hba->sli.sli3.slim2.dma_handle, offset,
3469 3471 sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
3470 3472 word0 = *((volatile uint32_t *)mbox);
3471 3473 word0 = BE_SWAP32(word0);
3472 3474 } else {
3473 3475 word0 =
3474 3476 READ_SLIM_ADDR(hba,
3475 3477 ((volatile uint32_t *)mbox));
3476 3478 }
3477 3479
3478 3480 } /* while */
3479 3481
3480 3482 if (mb->mbxStatus == MBX_TIMEOUT) {
3481 3483 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_event_msg,
3482 3484 "Timeout. %s: mb=%p tmo=%d. Polled.",
3483 3485 emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo);
3484 3486
3485 3487 break;
3486 3488 }
3487 3489
3488 3490 /* Check for config port command */
3489 3491 if ((swpmb->mbxCommand == MBX_CONFIG_PORT) &&
3490 3492 (swpmb->mbxStatus == MBX_SUCCESS)) {
3491 3493 /* Setup host mbox for cmpl */
3492 3494 mbox = FC_SLIM2_MAILBOX(hba);
3493 3495 offset = (off_t)((uint64_t)((unsigned long)mbox)
3494 3496 - (uint64_t)((unsigned long)slim2p));
3495 3497
3496 3498 hba->flag |= FC_SLIM2_MODE;
3497 3499 }
3498 3500
3499 3501 /* copy results back to user */
3500 3502 if (hba->flag & FC_SLIM2_MODE) {
3501 3503 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
3502 3504 offset, MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORKERNEL);
3503 3505
3504 3506 BE_SWAP32_BCOPY((uint8_t *)mbox, (uint8_t *)mb,
3505 3507 MAILBOX_CMD_BSIZE);
3506 3508 } else {
3507 3509 READ_SLIM_COPY(hba, (uint32_t *)mb,
3508 3510 (uint32_t *)mbox, MAILBOX_CMD_WSIZE);
3509 3511 }
3510 3512
3511 3513 #ifdef MBOX_EXT_SUPPORT
3512 3514 if (mbq->extbuf) {
3513 3515 uint32_t *mbox_ext =
3514 3516 (uint32_t *)((uint8_t *)mbox +
3515 3517 MBOX_EXTENSION_OFFSET);
3516 3518 off_t offset_ext = offset + MBOX_EXTENSION_OFFSET;
3517 3519
3518 3520 if (hba->flag & FC_SLIM2_MODE) {
3519 3521 EMLXS_MPDATA_SYNC(
3520 3522 hba->sli.sli3.slim2.dma_handle, offset_ext,
3521 3523 mbq->extsize, DDI_DMA_SYNC_FORKERNEL);
3522 3524
3523 3525 BE_SWAP32_BCOPY((uint8_t *)mbox_ext,
3524 3526 (uint8_t *)mbq->extbuf, mbq->extsize);
3525 3527 } else {
3526 3528 READ_SLIM_COPY(hba,
3527 3529 (uint32_t *)mbq->extbuf, mbox_ext,
3528 3530 (mbq->extsize / 4));
3529 3531 }
3530 3532 }
3531 3533 #endif /* MBOX_EXT_SUPPORT */
3532 3534
3533 3535 /* Sync the memory buffer */
3534 3536 if (mbq->bp) {
3535 3537 mbox_bp = (MATCHMAP *)mbq->bp;
3536 3538 EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0,
3537 3539 mbox_bp->size, DDI_DMA_SYNC_FORKERNEL);
3538 3540 }
3539 3541
3540 3542 if (mb->mbxCommand != MBX_DOWN_LOAD &&
3541 3543 mb->mbxCommand != MBX_DUMP_MEMORY) {
3542 3544 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3543 3545 "Completed. %s: mb=%p status=%x Polled.",
3544 3546 emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3545 3547 mb->mbxStatus);
3546 3548 }
3547 3549
3548 3550 /* Process the result */
3549 3551 if (!(mbq->flag & MBQ_PASSTHRU)) {
3550 3552 if (mbq->mbox_cmpl) {
3551 3553 (void) (mbq->mbox_cmpl)(hba, mbq);
3552 3554 }
3553 3555 }
3554 3556
3555 3557 /* Clear the attention bit */
3556 3558 WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_MBATT);
3557 3559
3558 3560 /* Clean up the mailbox area */
3559 3561 emlxs_mb_fini(hba, NULL, mb->mbxStatus);
3560 3562
3561 3563 break;
3562 3564
3563 3565 } /* switch (flag) */
3564 3566
3565 3567 return (mb->mbxStatus);
3566 3568
3567 3569 } /* emlxs_sli3_issue_mbox_cmd() */
3568 3570
3569 3571
3570 3572 #ifdef SFCT_SUPPORT
3571 3573 /*ARGSUSED*/
3572 3574 static uint32_t
3573 3575 emlxs_sli3_prep_fct_iocb(emlxs_port_t *port, emlxs_buf_t *cmd_sbp,
3574 3576 int channel)
3575 3577 {
3576 3578 emlxs_hba_t *hba = HBA;
3577 3579 emlxs_config_t *cfg = &CFG;
3578 3580 fct_cmd_t *fct_cmd;
3579 3581 stmf_data_buf_t *dbuf;
3580 3582 scsi_task_t *fct_task;
3581 3583 fc_packet_t *pkt;
3582 3584 uint32_t did;
3583 3585 IOCBQ *iocbq;
3584 3586 IOCB *iocb;
3585 3587 uint32_t timeout;
3586 3588 uint32_t iotag;
3587 3589 emlxs_node_t *ndlp;
3588 3590 CHANNEL *cp;
3589 3591 ddi_dma_cookie_t *cp_cmd;
3590 3592
3591 3593 pkt = PRIV2PKT(cmd_sbp);
3592 3594
3593 3595 cp = (CHANNEL *)cmd_sbp->channel;
3594 3596
3595 3597 iocbq = &cmd_sbp->iocbq;
3596 3598 iocb = &iocbq->iocb;
3597 3599
3598 3600
3599 3601 /* Get the iotag by registering the packet */
3600 3602 iotag = emlxs_register_pkt(cp, cmd_sbp);
3601 3603
3602 3604 if (!iotag) {
3603 3605 /* No more command slots available, retry later */
3604 3606 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3605 3607 "Adapter Busy. Unable to allocate iotag. did=0x%x",
3606 3608 cmd_sbp->did);
3607 3609
3608 3610 return (IOERR_NO_RESOURCES);
3609 3611 }
3610 3612
3611 3613
3612 3614 /* Point of no return */
3613 3615
3614 3616 if (iocb->ULPCOMMAND == CMD_ABORT_XRI_CX) {
3615 3617
3616 3618 ndlp = cmd_sbp->node;
3617 3619 cp->ulpSendCmd++;
3618 3620
3619 3621 /* Initalize iocbq */
3620 3622 iocbq->port = (void *)port;
3621 3623 iocbq->node = (void *)ndlp;
3622 3624 iocbq->channel = (void *)cp;
3623 3625
3624 3626 /*
3625 3627 * Don't give the abort priority, we want the IOCB
3626 3628 * we are aborting to be processed first.
3627 3629 */
3628 3630 iocbq->flag |= IOCB_SPECIAL;
3629 3631
3630 3632 iocb->ULPCONTEXT = pkt->pkt_cmd_fhdr.rx_id;
3631 3633 iocb->ULPIOTAG = (uint16_t)iotag;
3632 3634 iocb->ULPLE = 1;
3633 3635 iocb->ULPCLASS = cmd_sbp->class;
3634 3636 iocb->ULPOWNER = OWN_CHIP;
3635 3637
3636 3638 if (hba->state >= FC_LINK_UP) {
3637 3639 /* Create the abort IOCB */
3638 3640 iocb->un.acxri.abortType = ABORT_TYPE_ABTS;
3639 3641 iocb->ULPCOMMAND = CMD_ABORT_XRI_CX;
3640 3642
3641 3643 } else {
3642 3644 /* Create the close IOCB */
3643 3645 iocb->ULPCOMMAND = CMD_CLOSE_XRI_CX;
3644 3646
3645 3647 }
3646 3648
3647 3649 iocb->ULPRSVDBYTE =
3648 3650 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3649 3651 /* Set the pkt timer */
3650 3652 cmd_sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
3651 3653 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
3652 3654
3653 3655 return (IOERR_SUCCESS);
3654 3656
3655 3657 } else if (iocb->ULPCOMMAND == CMD_FCP_TRSP64_CX) {
3656 3658
3657 3659 ndlp = cmd_sbp->node;
3658 3660 cp->ulpSendCmd++;
3659 3661
3660 3662 /* Initalize iocbq */
3661 3663 iocbq->port = (void *)port;
3662 3664 iocbq->node = (void *)ndlp;
3663 3665 iocbq->channel = (void *)cp;
3664 3666
3665 3667 #if (EMLXS_MODREV >= EMLXS_MODREV3)
3666 3668 cp_cmd = pkt->pkt_cmd_cookie;
3667 3669 #else
3668 3670 cp_cmd = &pkt->pkt_cmd_cookie;
3669 3671 #endif /* >= EMLXS_MODREV3 */
3670 3672
3671 3673 iocb->un.fcpt64.bdl.addrHigh = PADDR_HI(cp_cmd->dmac_laddress);
3672 3674 iocb->un.fcpt64.bdl.addrLow = PADDR_LO(cp_cmd->dmac_laddress);
3673 3675 iocb->un.fcpt64.bdl.bdeSize = pkt->pkt_cmdlen;
3674 3676 iocb->un.fcpt64.bdl.bdeFlags = 0;
3675 3677
3676 3678 if (hba->sli_mode < 3) {
3677 3679 iocb->ULPBDECOUNT = 1;
3678 3680 iocb->ULPLE = 1;
3679 3681 } else { /* SLI3 */
3680 3682
3681 3683 iocb->ULPBDECOUNT = 0;
3682 3684 iocb->ULPLE = 0;
3683 3685 iocb->unsli3.ext_iocb.ebde_count = 0;
3684 3686 }
3685 3687
3686 3688 /* Initalize iocb */
3687 3689 iocb->ULPCONTEXT = (uint16_t)pkt->pkt_cmd_fhdr.rx_id;
3688 3690 iocb->ULPIOTAG = (uint16_t)iotag;
3689 3691 iocb->ULPRSVDBYTE =
3690 3692 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3691 3693 iocb->ULPOWNER = OWN_CHIP;
3692 3694 iocb->ULPCLASS = cmd_sbp->class;
3693 3695 iocb->ULPCOMMAND = CMD_FCP_TRSP64_CX;
3694 3696
3695 3697 /* Set the pkt timer */
3696 3698 cmd_sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
3697 3699 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
3698 3700
3699 3701 if (pkt->pkt_cmdlen) {
3700 3702 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
3701 3703 DDI_DMA_SYNC_FORDEV);
3702 3704 }
3703 3705
3704 3706 return (IOERR_SUCCESS);
3705 3707 }
3706 3708
3707 3709 dbuf = cmd_sbp->fct_buf;
3708 3710 fct_cmd = cmd_sbp->fct_cmd;
3709 3711 fct_task = (scsi_task_t *)fct_cmd->cmd_specific;
3710 3712 ndlp = *(emlxs_node_t **)fct_cmd->cmd_rp->rp_fca_private;
3711 3713 did = fct_cmd->cmd_rportid;
3712 3714
3713 3715 iocbq->channel = (void *)cmd_sbp->channel;
3714 3716
3715 3717 if (emlxs_fct_bde_setup(port, cmd_sbp)) {
3716 3718 /* Unregister the packet */
3717 3719 (void) emlxs_unregister_pkt(cmd_sbp->channel, iotag, 0);
3718 3720
3719 3721 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3720 3722 "Adapter Busy. Unable to setup buffer list. did=%x", did);
3721 3723
3722 3724 return (IOERR_INTERNAL_ERROR);
3723 3725 }
3724 3726
3725 3727 if (cfg[CFG_TIMEOUT_ENABLE].current) {
3726 3728 timeout =
3727 3729 ((2 * hba->fc_ratov) < 60) ? 60 : (2 * hba->fc_ratov);
3728 3730 } else {
3729 3731 timeout = 0x80000000;
3730 3732 }
3731 3733
3732 3734 cmd_sbp->ticks =
3733 3735 hba->timer_tics + timeout + ((timeout > 0xff) ? 0 : 10);
3734 3736
3735 3737 /* Initalize iocbq */
3736 3738 iocbq->port = (void *)port;
3737 3739 iocbq->node = (void *)ndlp;
3738 3740
3739 3741 /* Initalize iocb */
3740 3742 iocb->ULPCONTEXT = (uint16_t)fct_cmd->cmd_rxid;
3741 3743 iocb->ULPIOTAG = (uint16_t)iotag;
3742 3744 iocb->ULPRSVDBYTE = ((timeout > 0xff) ? 0 : timeout);
3743 3745 iocb->ULPOWNER = OWN_CHIP;
3744 3746 iocb->ULPCLASS = cmd_sbp->class;
3745 3747
3746 3748 iocb->ULPPU = 1; /* Wd4 is relative offset */
3747 3749 iocb->un.fcpt64.fcpt_Offset = dbuf->db_relative_offset;
3748 3750
3749 3751 if (fct_task->task_flags & TF_WRITE_DATA) {
3750 3752 iocb->ULPCOMMAND = CMD_FCP_TRECEIVE64_CX;
3751 3753 } else { /* TF_READ_DATA */
3752 3754
3753 3755 iocb->ULPCOMMAND = CMD_FCP_TSEND64_CX;
3754 3756
3755 3757 if ((hba->sli_mode == EMLXS_HBA_SLI3_MODE) &&
3756 3758 (dbuf->db_data_size >=
3757 3759 fct_task->task_expected_xfer_length)) {
3758 3760 iocb->ULPCT = 0x1;
3759 3761 /* enable auto-rsp AP feature */
3760 3762 }
3761 3763 }
3762 3764
3763 3765 return (IOERR_SUCCESS);
3764 3766
3765 3767 } /* emlxs_sli3_prep_fct_iocb() */
3766 3768 #endif /* SFCT_SUPPORT */
3767 3769
3768 3770 /* ARGSUSED */
3769 3771 static uint32_t
3770 3772 emlxs_sli3_prep_fcp_iocb(emlxs_port_t *port, emlxs_buf_t *sbp, int channel)
3771 3773 {
3772 3774 emlxs_hba_t *hba = HBA;
3773 3775 fc_packet_t *pkt;
3774 3776 CHANNEL *cp;
3775 3777 IOCBQ *iocbq;
3776 3778 IOCB *iocb;
3777 3779 NODELIST *ndlp;
3778 3780 uint16_t iotag;
3779 3781 uint32_t did;
3780 3782
3781 3783 pkt = PRIV2PKT(sbp);
3782 3784 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3783 3785 cp = &hba->chan[FC_FCP_RING];
3784 3786
3785 3787 iocbq = &sbp->iocbq;
3786 3788 iocb = &iocbq->iocb;
3787 3789
3788 3790 /* Find target node object */
3789 3791 ndlp = (NODELIST *)iocbq->node;
3790 3792
3791 3793 /* Get the iotag by registering the packet */
3792 3794 iotag = emlxs_register_pkt(cp, sbp);
3793 3795
3794 3796 if (!iotag) {
3795 3797 /*
3796 3798 * No more command slots available, retry later
3797 3799 */
3798 3800 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3799 3801 "Adapter Busy. Unable to allocate iotag: did=0x%x", did);
3800 3802
3801 3803 return (FC_TRAN_BUSY);
3802 3804 }
3803 3805
3804 3806 /* Initalize iocbq */
3805 3807 iocbq->port = (void *) port;
3806 3808 iocbq->channel = (void *) cp;
3807 3809
3808 3810 /* Indicate this is a FCP cmd */
3809 3811 iocbq->flag |= IOCB_FCP_CMD;
3810 3812
3811 3813 if (emlxs_bde_setup(port, sbp)) {
3812 3814 /* Unregister the packet */
3813 3815 (void) emlxs_unregister_pkt(cp, iotag, 0);
3814 3816
3815 3817 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3816 3818 "Adapter Busy. Unable to setup buffer list. did=%x", did);
3817 3819
3818 3820 return (FC_TRAN_BUSY);
3819 3821 }
3820 3822 /* Point of no return */
3821 3823
3822 3824 /* Initalize iocb */
3823 3825 iocb->ULPCONTEXT = ndlp->nlp_Rpi;
3824 3826 iocb->ULPIOTAG = iotag;
3825 3827 iocb->ULPRSVDBYTE =
3826 3828 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3827 3829 iocb->ULPOWNER = OWN_CHIP;
3828 3830
3829 3831 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3830 3832 case FC_TRAN_CLASS1:
3831 3833 iocb->ULPCLASS = CLASS1;
3832 3834 break;
3833 3835 case FC_TRAN_CLASS2:
3834 3836 iocb->ULPCLASS = CLASS2;
3835 3837 /* iocb->ULPCLASS = CLASS3; */
3836 3838 break;
3837 3839 case FC_TRAN_CLASS3:
3838 3840 default:
3839 3841 iocb->ULPCLASS = CLASS3;
3840 3842 break;
3841 3843 }
3842 3844
3843 3845 /* if device is FCP-2 device, set the following bit */
3844 3846 /* that says to run the FC-TAPE protocol. */
3845 3847 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
3846 3848 iocb->ULPFCP2RCVY = 1;
3847 3849 }
3848 3850
3849 3851 if (pkt->pkt_datalen == 0) {
3850 3852 iocb->ULPCOMMAND = CMD_FCP_ICMND64_CR;
3851 3853 } else if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
3852 3854 iocb->ULPCOMMAND = CMD_FCP_IREAD64_CR;
3853 3855 iocb->ULPPU = PARM_XFER_CHECK;
3854 3856 iocb->un.fcpi64.fcpi_parm = pkt->pkt_datalen;
3855 3857 } else {
3856 3858 iocb->ULPCOMMAND = CMD_FCP_IWRITE64_CR;
3857 3859 }
3858 3860
3859 3861 return (FC_SUCCESS);
3860 3862
3861 3863 } /* emlxs_sli3_prep_fcp_iocb() */
3862 3864
3863 3865
3864 3866 static uint32_t
3865 3867 emlxs_sli3_prep_ip_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3866 3868 {
3867 3869 emlxs_hba_t *hba = HBA;
3868 3870 fc_packet_t *pkt;
3869 3871 IOCBQ *iocbq;
3870 3872 IOCB *iocb;
3871 3873 CHANNEL *cp;
3872 3874 NODELIST *ndlp;
3873 3875 uint16_t iotag;
3874 3876 uint32_t did;
3875 3877
3876 3878 pkt = PRIV2PKT(sbp);
3877 3879 cp = &hba->chan[FC_IP_RING];
3878 3880 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3879 3881
3880 3882 iocbq = &sbp->iocbq;
3881 3883 iocb = &iocbq->iocb;
3882 3884 ndlp = (NODELIST *)iocbq->node;
3883 3885
3884 3886 /* Get the iotag by registering the packet */
3885 3887 iotag = emlxs_register_pkt(cp, sbp);
3886 3888
3887 3889 if (!iotag) {
3888 3890 /*
3889 3891 * No more command slots available, retry later
3890 3892 */
3891 3893 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3892 3894 "Adapter Busy. Unable to allocate iotag: did=0x%x", did);
3893 3895
3894 3896 return (FC_TRAN_BUSY);
3895 3897 }
3896 3898
3897 3899 /* Initalize iocbq */
3898 3900 iocbq->port = (void *) port;
3899 3901 iocbq->channel = (void *) cp;
3900 3902
3901 3903 if (emlxs_bde_setup(port, sbp)) {
3902 3904 /* Unregister the packet */
3903 3905 (void) emlxs_unregister_pkt(cp, iotag, 0);
3904 3906
3905 3907 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3906 3908 "Adapter Busy. Unable to setup buffer list. did=%x", did);
3907 3909
3908 3910 return (FC_TRAN_BUSY);
3909 3911 }
3910 3912 /* Point of no return */
3911 3913
3912 3914 /* Initalize iocb */
3913 3915 iocb->un.xseq64.w5.hcsw.Fctl = 0;
3914 3916
3915 3917 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_FIRST_SEQ) {
3916 3918 iocb->un.xseq64.w5.hcsw.Fctl |= FSEQ;
3917 3919 }
3918 3920 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
3919 3921 iocb->un.xseq64.w5.hcsw.Fctl |= SI;
3920 3922 }
3921 3923
3922 3924 /* network headers */
3923 3925 iocb->un.xseq64.w5.hcsw.Dfctl = pkt->pkt_cmd_fhdr.df_ctl;
3924 3926 iocb->un.xseq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
3925 3927 iocb->un.xseq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
3926 3928
3927 3929 iocb->ULPIOTAG = iotag;
3928 3930 iocb->ULPRSVDBYTE =
3929 3931 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
3930 3932 iocb->ULPOWNER = OWN_CHIP;
3931 3933
3932 3934 if (pkt->pkt_tran_type == FC_PKT_BROADCAST) {
3933 3935 HBASTATS.IpBcastIssued++;
3934 3936
3935 3937 iocb->ULPCOMMAND = CMD_XMIT_BCAST64_CN;
3936 3938 iocb->ULPCONTEXT = 0;
3937 3939
3938 3940 if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) {
3939 3941 if (hba->topology != TOPOLOGY_LOOP) {
3940 3942 iocb->ULPCT = 0x1;
3941 3943 }
3942 3944 iocb->ULPCONTEXT = port->vpi;
3943 3945 }
3944 3946 } else {
3945 3947 HBASTATS.IpSeqIssued++;
3946 3948
3947 3949 iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
3948 3950 iocb->ULPCONTEXT = ndlp->nlp_Xri;
3949 3951 }
3950 3952
3951 3953 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
3952 3954 case FC_TRAN_CLASS1:
3953 3955 iocb->ULPCLASS = CLASS1;
3954 3956 break;
3955 3957 case FC_TRAN_CLASS2:
3956 3958 iocb->ULPCLASS = CLASS2;
3957 3959 break;
3958 3960 case FC_TRAN_CLASS3:
3959 3961 default:
3960 3962 iocb->ULPCLASS = CLASS3;
3961 3963 break;
3962 3964 }
3963 3965
3964 3966 return (FC_SUCCESS);
3965 3967
3966 3968 } /* emlxs_sli3_prep_ip_iocb() */
3967 3969
3968 3970
3969 3971 static uint32_t
3970 3972 emlxs_sli3_prep_els_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
3971 3973 {
3972 3974 emlxs_hba_t *hba = HBA;
3973 3975 fc_packet_t *pkt;
3974 3976 IOCBQ *iocbq;
3975 3977 IOCB *iocb;
3976 3978 CHANNEL *cp;
3977 3979 uint16_t iotag;
3978 3980 uint32_t did;
3979 3981 uint32_t cmd;
3980 3982
3981 3983 pkt = PRIV2PKT(sbp);
3982 3984 cp = &hba->chan[FC_ELS_RING];
3983 3985 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
3984 3986
3985 3987 iocbq = &sbp->iocbq;
3986 3988 iocb = &iocbq->iocb;
3987 3989
3988 3990
3989 3991 /* Get the iotag by registering the packet */
3990 3992 iotag = emlxs_register_pkt(cp, sbp);
3991 3993
3992 3994 if (!iotag) {
3993 3995 /*
3994 3996 * No more command slots available, retry later
3995 3997 */
3996 3998 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3997 3999 "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
3998 4000
3999 4001 return (FC_TRAN_BUSY);
4000 4002 }
4001 4003 /* Initalize iocbq */
4002 4004 iocbq->port = (void *) port;
4003 4005 iocbq->channel = (void *) cp;
4004 4006
4005 4007 if (emlxs_bde_setup(port, sbp)) {
4006 4008 /* Unregister the packet */
4007 4009 (void) emlxs_unregister_pkt(cp, iotag, 0);
4008 4010
4009 4011 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4010 4012 "Adapter Busy. Unable to setup buffer list. did=%x", did);
4011 4013
4012 4014 return (FC_TRAN_BUSY);
4013 4015 }
4014 4016 /* Point of no return */
4015 4017
4016 4018 /* Initalize iocb */
4017 4019 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
4018 4020 /* ELS Response */
4019 4021 iocb->ULPCONTEXT = (volatile uint16_t) pkt->pkt_cmd_fhdr.rx_id;
4020 4022 iocb->ULPCOMMAND = CMD_XMIT_ELS_RSP64_CX;
4021 4023 } else {
4022 4024 /* ELS Request */
4023 4025 iocb->un.elsreq64.remoteID = (did == BCAST_DID) ? 0 : did;
4024 4026 iocb->ULPCONTEXT =
4025 4027 (did == BCAST_DID) ? pkt->pkt_cmd_fhdr.seq_id : 0;
4026 4028 iocb->ULPCOMMAND = CMD_ELS_REQUEST64_CR;
4027 4029
4028 4030 if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) {
4029 4031 if (hba->topology != TOPOLOGY_LOOP) {
4030 4032 cmd = *((uint32_t *)pkt->pkt_cmd);
4031 4033 cmd &= ELS_CMD_MASK;
4032 4034
4033 4035 if ((cmd == ELS_CMD_FLOGI) ||
4034 4036 (cmd == ELS_CMD_FDISC)) {
4035 4037 iocb->ULPCT = 0x2;
4036 4038 } else {
4037 4039 iocb->ULPCT = 0x1;
4038 4040 }
4039 4041 }
4040 4042 iocb->ULPCONTEXT = port->vpi;
4041 4043 }
4042 4044 }
4043 4045 iocb->ULPIOTAG = iotag;
4044 4046 iocb->ULPRSVDBYTE =
4045 4047 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
4046 4048 iocb->ULPOWNER = OWN_CHIP;
4047 4049
4048 4050 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
4049 4051 case FC_TRAN_CLASS1:
4050 4052 iocb->ULPCLASS = CLASS1;
4051 4053 break;
4052 4054 case FC_TRAN_CLASS2:
4053 4055 iocb->ULPCLASS = CLASS2;
4054 4056 break;
4055 4057 case FC_TRAN_CLASS3:
4056 4058 default:
4057 4059 iocb->ULPCLASS = CLASS3;
4058 4060 break;
4059 4061 }
4060 4062 sbp->class = iocb->ULPCLASS;
4061 4063
4062 4064 return (FC_SUCCESS);
4063 4065
4064 4066 } /* emlxs_sli3_prep_els_iocb() */
4065 4067
4066 4068
4067 4069 static uint32_t
4068 4070 emlxs_sli3_prep_ct_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
4069 4071 {
4070 4072 emlxs_hba_t *hba = HBA;
4071 4073 fc_packet_t *pkt;
4072 4074 IOCBQ *iocbq;
4073 4075 IOCB *iocb;
4074 4076 CHANNEL *cp;
4075 4077 NODELIST *ndlp;
4076 4078 uint16_t iotag;
4077 4079 uint32_t did;
4078 4080
4079 4081 pkt = PRIV2PKT(sbp);
4080 4082 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
4081 4083 cp = &hba->chan[FC_CT_RING];
4082 4084
4083 4085 iocbq = &sbp->iocbq;
4084 4086 iocb = &iocbq->iocb;
4085 4087 ndlp = (NODELIST *)iocbq->node;
4086 4088
4087 4089 /* Get the iotag by registering the packet */
4088 4090 iotag = emlxs_register_pkt(cp, sbp);
4089 4091
4090 4092 if (!iotag) {
4091 4093 /*
4092 4094 * No more command slots available, retry later
4093 4095 */
4094 4096 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4095 4097 "Adapter Busy. Unable to allocate iotag. did=0x%x", did);
4096 4098
4097 4099 return (FC_TRAN_BUSY);
4098 4100 }
4099 4101
4100 4102 if (emlxs_bde_setup(port, sbp)) {
4101 4103 /* Unregister the packet */
4102 4104 (void) emlxs_unregister_pkt(cp, iotag, 0);
4103 4105
4104 4106 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4105 4107 "Adapter Busy. Unable to setup buffer list. did=%x", did);
4106 4108
4107 4109 return (FC_TRAN_BUSY);
4108 4110 }
4109 4111
4110 4112 /* Point of no return */
4111 4113
4112 4114 /* Initalize iocbq */
4113 4115 iocbq->port = (void *) port;
4114 4116 iocbq->channel = (void *) cp;
4115 4117
4116 4118 /* Fill in rest of iocb */
4117 4119 iocb->un.genreq64.w5.hcsw.Fctl = LA;
4118 4120
4119 4121 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
4120 4122 iocb->un.genreq64.w5.hcsw.Fctl |= LSEQ;
4121 4123 }
4122 4124 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
4123 4125 iocb->un.genreq64.w5.hcsw.Fctl |= SI;
4124 4126 }
4125 4127
4126 4128 /* Initalize iocb */
4127 4129 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
4128 4130 /* CT Response */
4129 4131 iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
4130 4132 iocb->un.genreq64.w5.hcsw.Dfctl = pkt->pkt_cmd_fhdr.df_ctl;
4131 4133 iocb->ULPCONTEXT = pkt->pkt_cmd_fhdr.rx_id;
4132 4134 } else {
4133 4135 /* CT Request */
4134 4136 iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CR;
4135 4137 iocb->un.genreq64.w5.hcsw.Dfctl = 0;
4136 4138 iocb->ULPCONTEXT = ndlp->nlp_Rpi;
4137 4139 }
4138 4140
4139 4141 iocb->un.genreq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
4140 4142 iocb->un.genreq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
4141 4143
4142 4144 iocb->ULPIOTAG = iotag;
4143 4145 iocb->ULPRSVDBYTE =
4144 4146 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
4145 4147 iocb->ULPOWNER = OWN_CHIP;
4146 4148
4147 4149 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
4148 4150 case FC_TRAN_CLASS1:
4149 4151 iocb->ULPCLASS = CLASS1;
4150 4152 break;
4151 4153 case FC_TRAN_CLASS2:
4152 4154 iocb->ULPCLASS = CLASS2;
4153 4155 break;
4154 4156 case FC_TRAN_CLASS3:
4155 4157 default:
4156 4158 iocb->ULPCLASS = CLASS3;
4157 4159 break;
4158 4160 }
4159 4161
4160 4162 return (FC_SUCCESS);
4161 4163
4162 4164 } /* emlxs_sli3_prep_ct_iocb() */
4163 4165
4164 4166
4165 4167 #ifdef SFCT_SUPPORT
4166 4168 static uint32_t
4167 4169 emlxs_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
4168 4170 {
4169 4171 emlxs_hba_t *hba = HBA;
4170 4172 uint32_t rval;
4171 4173
4172 4174 if (sbp->fct_buf->db_sglist_length != 1) {
4173 4175 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
4174 4176 "fct_bde_setup: Only 1 sglist entry supported: %d",
4175 4177 sbp->fct_buf->db_sglist_length);
4176 4178 return (1);
4177 4179 }
4178 4180
4179 4181 if (hba->sli_mode < EMLXS_HBA_SLI3_MODE) {
4180 4182 rval = emlxs_sli2_fct_bde_setup(port, sbp);
4181 4183 } else {
4182 4184 rval = emlxs_sli3_fct_bde_setup(port, sbp);
4183 4185 }
4184 4186
4185 4187 return (rval);
4186 4188
4187 4189 } /* emlxs_fct_bde_setup() */
4188 4190 #endif /* SFCT_SUPPORT */
4189 4191
4190 4192
4191 4193 static uint32_t
4192 4194 emlxs_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
4193 4195 {
4194 4196 uint32_t rval;
4195 4197 emlxs_hba_t *hba = HBA;
4196 4198
4197 4199 if (hba->sli_mode < EMLXS_HBA_SLI3_MODE) {
4198 4200 rval = emlxs_sli2_bde_setup(port, sbp);
4199 4201 } else {
4200 4202 rval = emlxs_sli3_bde_setup(port, sbp);
4201 4203 }
4202 4204
4203 4205 return (rval);
4204 4206
4205 4207 } /* emlxs_bde_setup() */
4206 4208
4207 4209
4208 4210 static void
4209 4211 emlxs_sli3_poll_intr(emlxs_hba_t *hba)
4210 4212 {
4211 4213 uint32_t ha_copy;
4212 4214
4213 4215 /* Check attention bits once and process if required */
4214 4216
4215 4217 ha_copy = emlxs_check_attention(hba);
4216 4218
4217 4219 if (ha_copy == 0) {
4218 4220 return;
4219 4221 }
4220 4222
4221 4223 mutex_enter(&EMLXS_PORT_LOCK);
4222 4224 ha_copy = emlxs_get_attention(hba, -1);
4223 4225 mutex_exit(&EMLXS_PORT_LOCK);
4224 4226
4225 4227 emlxs_proc_attention(hba, ha_copy);
4226 4228
4227 4229 return;
4228 4230
4229 4231 } /* emlxs_sli3_poll_intr() */
4230 4232
4231 4233
4232 4234 #ifdef MSI_SUPPORT
4233 4235 static uint32_t
4234 4236 emlxs_sli3_msi_intr(char *arg1, char *arg2)
4235 4237 {
4236 4238 emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
4237 4239 #ifdef FMA_SUPPORT
4238 4240 emlxs_port_t *port = &PPORT;
4239 4241 #endif /* FMA_SUPPORT */
4240 4242 uint16_t msgid;
4241 4243 uint32_t hc_copy;
4242 4244 uint32_t ha_copy;
4243 4245 uint32_t restore = 0;
4244 4246
4245 4247 /*
4246 4248 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
4247 4249 * "sli3_msi_intr: arg1=%p arg2=%p", arg1, arg2);
4248 4250 */
4249 4251
4250 4252 /* Check for legacy interrupt handling */
4251 4253 if (hba->intr_type == DDI_INTR_TYPE_FIXED) {
4252 4254 mutex_enter(&EMLXS_PORT_LOCK);
4253 4255
4254 4256 if (hba->flag & FC_OFFLINE_MODE) {
4255 4257 mutex_exit(&EMLXS_PORT_LOCK);
4256 4258
4257 4259 if (hba->bus_type == SBUS_FC) {
4258 4260 return (DDI_INTR_CLAIMED);
4259 4261 } else {
4260 4262 return (DDI_INTR_UNCLAIMED);
4261 4263 }
4262 4264 }
4263 4265
4264 4266 /* Get host attention bits */
4265 4267 ha_copy = emlxs_get_attention(hba, -1);
4266 4268
4267 4269 if (ha_copy == 0) {
4268 4270 if (hba->intr_unclaimed) {
4269 4271 mutex_exit(&EMLXS_PORT_LOCK);
4270 4272 return (DDI_INTR_UNCLAIMED);
4271 4273 }
4272 4274
4273 4275 hba->intr_unclaimed = 1;
4274 4276 } else {
4275 4277 hba->intr_unclaimed = 0;
4276 4278 }
4277 4279
4278 4280 mutex_exit(&EMLXS_PORT_LOCK);
4279 4281
4280 4282 /* Process the interrupt */
4281 4283 emlxs_proc_attention(hba, ha_copy);
4282 4284
4283 4285 return (DDI_INTR_CLAIMED);
4284 4286 }
4285 4287
4286 4288 /* DDI_INTR_TYPE_MSI */
4287 4289 /* DDI_INTR_TYPE_MSIX */
4288 4290
4289 4291 /* Get MSI message id */
4290 4292 msgid = (uint16_t)((unsigned long)arg2);
4291 4293
4292 4294 /* Validate the message id */
4293 4295 if (msgid >= hba->intr_count) {
4294 4296 msgid = 0;
4295 4297 }
4296 4298
4297 4299 mutex_enter(&EMLXS_INTR_LOCK(msgid));
4298 4300
4299 4301 mutex_enter(&EMLXS_PORT_LOCK);
4300 4302
4301 4303 /* Check if adapter is offline */
4302 4304 if (hba->flag & FC_OFFLINE_MODE) {
4303 4305 mutex_exit(&EMLXS_PORT_LOCK);
4304 4306 mutex_exit(&EMLXS_INTR_LOCK(msgid));
4305 4307
4306 4308 /* Always claim an MSI interrupt */
4307 4309 return (DDI_INTR_CLAIMED);
4308 4310 }
4309 4311
4310 4312 /* Disable interrupts associated with this msgid */
4311 4313 if (msgid == 0 && (hba->model_info.chip == EMLXS_ZEPHYR_CHIP)) {
4312 4314 hc_copy = hba->sli.sli3.hc_copy & ~hba->intr_mask;
4313 4315 WRITE_CSR_REG(hba, FC_HC_REG(hba), hc_copy);
4314 4316 restore = 1;
4315 4317 }
4316 4318
4317 4319 /* Get host attention bits */
4318 4320 ha_copy = emlxs_get_attention(hba, msgid);
4319 4321
4320 4322 mutex_exit(&EMLXS_PORT_LOCK);
4321 4323
4322 4324 /* Process the interrupt */
4323 4325 emlxs_proc_attention(hba, ha_copy);
4324 4326
4325 4327 /* Restore interrupts */
4326 4328 if (restore) {
4327 4329 mutex_enter(&EMLXS_PORT_LOCK);
4328 4330 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
4329 4331 #ifdef FMA_SUPPORT
4330 4332 /* Access handle validation */
4331 4333 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4332 4334 #endif /* FMA_SUPPORT */
4333 4335 mutex_exit(&EMLXS_PORT_LOCK);
4334 4336 }
4335 4337
4336 4338 mutex_exit(&EMLXS_INTR_LOCK(msgid));
4337 4339
4338 4340 return (DDI_INTR_CLAIMED);
4339 4341
4340 4342 } /* emlxs_sli3_msi_intr() */
4341 4343 #endif /* MSI_SUPPORT */
4342 4344
4343 4345
4344 4346 static int
4345 4347 emlxs_sli3_intx_intr(char *arg)
4346 4348 {
4347 4349 emlxs_hba_t *hba = (emlxs_hba_t *)arg;
4348 4350 uint32_t ha_copy = 0;
4349 4351
4350 4352 mutex_enter(&EMLXS_PORT_LOCK);
4351 4353
4352 4354 if (hba->flag & FC_OFFLINE_MODE) {
4353 4355 mutex_exit(&EMLXS_PORT_LOCK);
4354 4356
4355 4357 if (hba->bus_type == SBUS_FC) {
4356 4358 return (DDI_INTR_CLAIMED);
4357 4359 } else {
4358 4360 return (DDI_INTR_UNCLAIMED);
4359 4361 }
4360 4362 }
4361 4363
4362 4364 /* Get host attention bits */
4363 4365 ha_copy = emlxs_get_attention(hba, -1);
4364 4366
4365 4367 if (ha_copy == 0) {
4366 4368 if (hba->intr_unclaimed) {
4367 4369 mutex_exit(&EMLXS_PORT_LOCK);
4368 4370 return (DDI_INTR_UNCLAIMED);
4369 4371 }
4370 4372
4371 4373 hba->intr_unclaimed = 1;
4372 4374 } else {
4373 4375 hba->intr_unclaimed = 0;
4374 4376 }
4375 4377
4376 4378 mutex_exit(&EMLXS_PORT_LOCK);
4377 4379
4378 4380 /* Process the interrupt */
4379 4381 emlxs_proc_attention(hba, ha_copy);
4380 4382
4381 4383 return (DDI_INTR_CLAIMED);
4382 4384
4383 4385 } /* emlxs_sli3_intx_intr() */
4384 4386
4385 4387
4386 4388 /* EMLXS_PORT_LOCK must be held when call this routine */
4387 4389 static uint32_t
4388 4390 emlxs_get_attention(emlxs_hba_t *hba, int32_t msgid)
4389 4391 {
4390 4392 #ifdef FMA_SUPPORT
4391 4393 emlxs_port_t *port = &PPORT;
4392 4394 #endif /* FMA_SUPPORT */
4393 4395 uint32_t ha_copy = 0;
4394 4396 uint32_t ha_copy2;
4395 4397 uint32_t mask = hba->sli.sli3.hc_copy;
4396 4398
4397 4399 #ifdef MSI_SUPPORT
4398 4400
4399 4401 read_ha_register:
4400 4402
4401 4403 /* Check for default MSI interrupt */
4402 4404 if (msgid == 0) {
4403 4405 /* Read host attention register to determine interrupt source */
4404 4406 ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba));
4405 4407
4406 4408 /* Filter out MSI non-default attention bits */
4407 4409 ha_copy2 &= ~(hba->intr_cond);
4408 4410 }
4409 4411
4410 4412 /* Check for polled or fixed type interrupt */
4411 4413 else if (msgid == -1) {
4412 4414 /* Read host attention register to determine interrupt source */
4413 4415 ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba));
4414 4416 }
4415 4417
4416 4418 /* Otherwise, assume a mapped MSI interrupt */
4417 4419 else {
4418 4420 /* Convert MSI msgid to mapped attention bits */
4419 4421 ha_copy2 = hba->intr_map[msgid];
4420 4422 }
4421 4423
4422 4424 #else /* !MSI_SUPPORT */
4423 4425
4424 4426 /* Read host attention register to determine interrupt source */
4425 4427 ha_copy2 = READ_CSR_REG(hba, FC_HA_REG(hba));
4426 4428
4427 4429 #endif /* MSI_SUPPORT */
4428 4430
4429 4431 /* Check if Hardware error interrupt is enabled */
4430 4432 if ((ha_copy2 & HA_ERATT) && !(mask & HC_ERINT_ENA)) {
4431 4433 ha_copy2 &= ~HA_ERATT;
4432 4434 }
4433 4435
4434 4436 /* Check if link interrupt is enabled */
4435 4437 if ((ha_copy2 & HA_LATT) && !(mask & HC_LAINT_ENA)) {
4436 4438 ha_copy2 &= ~HA_LATT;
4437 4439 }
4438 4440
4439 4441 /* Check if Mailbox interrupt is enabled */
4440 4442 if ((ha_copy2 & HA_MBATT) && !(mask & HC_MBINT_ENA)) {
4441 4443 ha_copy2 &= ~HA_MBATT;
4442 4444 }
4443 4445
4444 4446 /* Check if ring0 interrupt is enabled */
4445 4447 if ((ha_copy2 & HA_R0ATT) && !(mask & HC_R0INT_ENA)) {
4446 4448 ha_copy2 &= ~HA_R0ATT;
4447 4449 }
4448 4450
4449 4451 /* Check if ring1 interrupt is enabled */
4450 4452 if ((ha_copy2 & HA_R1ATT) && !(mask & HC_R1INT_ENA)) {
4451 4453 ha_copy2 &= ~HA_R1ATT;
4452 4454 }
4453 4455
4454 4456 /* Check if ring2 interrupt is enabled */
4455 4457 if ((ha_copy2 & HA_R2ATT) && !(mask & HC_R2INT_ENA)) {
4456 4458 ha_copy2 &= ~HA_R2ATT;
4457 4459 }
4458 4460
4459 4461 /* Check if ring3 interrupt is enabled */
4460 4462 if ((ha_copy2 & HA_R3ATT) && !(mask & HC_R3INT_ENA)) {
4461 4463 ha_copy2 &= ~HA_R3ATT;
4462 4464 }
4463 4465
4464 4466 /* Accumulate attention bits */
4465 4467 ha_copy |= ha_copy2;
4466 4468
4467 4469 /* Clear attentions except for error, link, and autoclear(MSIX) */
4468 4470 ha_copy2 &= ~(HA_ERATT | HA_LATT); /* | hba->intr_autoClear */
4469 4471
4470 4472 if (ha_copy2) {
4471 4473 WRITE_CSR_REG(hba, FC_HA_REG(hba), ha_copy2);
4472 4474 }
4473 4475
4474 4476 #ifdef FMA_SUPPORT
4475 4477 /* Access handle validation */
4476 4478 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4477 4479 #endif /* FMA_SUPPORT */
4478 4480
4479 4481 return (ha_copy);
4480 4482
4481 4483 } /* emlxs_get_attention() */
4482 4484
4483 4485
4484 4486 static void
4485 4487 emlxs_proc_attention(emlxs_hba_t *hba, uint32_t ha_copy)
4486 4488 {
4487 4489 #ifdef FMA_SUPPORT
4488 4490 emlxs_port_t *port = &PPORT;
4489 4491 #endif /* FMA_SUPPORT */
4490 4492
4491 4493 /* ha_copy should be pre-filtered */
4492 4494
4493 4495 /*
4494 4496 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4495 4497 * "proc_attention: ha_copy=%x", ha_copy);
4496 4498 */
4497 4499
4498 4500 if (hba->state < FC_WARM_START) {
4499 4501 return;
4500 4502 }
4501 4503
4502 4504 if (!ha_copy) {
4503 4505 return;
4504 4506 }
4505 4507
4506 4508 if (hba->bus_type == SBUS_FC) {
4507 4509 (void) READ_SBUS_CSR_REG(hba, FC_SHS_REG(hba));
4508 4510 }
4509 4511
4510 4512 /* Adapter error */
4511 4513 if (ha_copy & HA_ERATT) {
4512 4514 HBASTATS.IntrEvent[6]++;
4513 4515 emlxs_handle_ff_error(hba);
4514 4516 return;
4515 4517 }
4516 4518
4517 4519 /* Mailbox interrupt */
4518 4520 if (ha_copy & HA_MBATT) {
4519 4521 HBASTATS.IntrEvent[5]++;
4520 4522 (void) emlxs_handle_mb_event(hba);
4521 4523 }
4522 4524
4523 4525 /* Link Attention interrupt */
4524 4526 if (ha_copy & HA_LATT) {
4525 4527 HBASTATS.IntrEvent[4]++;
4526 4528 emlxs_sli3_handle_link_event(hba);
4527 4529 }
4528 4530
4529 4531 /* event on ring 0 - FCP Ring */
4530 4532 if (ha_copy & HA_R0ATT) {
4531 4533 HBASTATS.IntrEvent[0]++;
4532 4534 emlxs_sli3_handle_ring_event(hba, 0, ha_copy);
4533 4535 }
4534 4536
4535 4537 /* event on ring 1 - IP Ring */
4536 4538 if (ha_copy & HA_R1ATT) {
4537 4539 HBASTATS.IntrEvent[1]++;
4538 4540 emlxs_sli3_handle_ring_event(hba, 1, ha_copy);
4539 4541 }
4540 4542
4541 4543 /* event on ring 2 - ELS Ring */
4542 4544 if (ha_copy & HA_R2ATT) {
4543 4545 HBASTATS.IntrEvent[2]++;
4544 4546 emlxs_sli3_handle_ring_event(hba, 2, ha_copy);
4545 4547 }
4546 4548
4547 4549 /* event on ring 3 - CT Ring */
4548 4550 if (ha_copy & HA_R3ATT) {
4549 4551 HBASTATS.IntrEvent[3]++;
4550 4552 emlxs_sli3_handle_ring_event(hba, 3, ha_copy);
4551 4553 }
4552 4554
4553 4555 if (hba->bus_type == SBUS_FC) {
4554 4556 WRITE_SBUS_CSR_REG(hba, FC_SHS_REG(hba), SBUS_STAT_IP);
4555 4557 }
4556 4558
4557 4559 /* Set heartbeat flag to show activity */
4558 4560 hba->heartbeat_flag = 1;
4559 4561
4560 4562 #ifdef FMA_SUPPORT
4561 4563 if (hba->bus_type == SBUS_FC) {
4562 4564 /* Access handle validation */
4563 4565 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.sbus_csr_handle);
4564 4566 }
4565 4567 #endif /* FMA_SUPPORT */
4566 4568
4567 4569 return;
4568 4570
4569 4571 } /* emlxs_proc_attention() */
4570 4572
4571 4573
4572 4574 /*
4573 4575 * emlxs_handle_ff_error()
4574 4576 *
4575 4577 * Description: Processes a FireFly error
4576 4578 * Runs at Interrupt level
4577 4579 */
4578 4580 static void
4579 4581 emlxs_handle_ff_error(emlxs_hba_t *hba)
4580 4582 {
4581 4583 emlxs_port_t *port = &PPORT;
4582 4584 uint32_t status;
4583 4585 uint32_t status1;
4584 4586 uint32_t status2;
4585 4587 int i = 0;
4586 4588
4587 4589 /* do what needs to be done, get error from STATUS REGISTER */
4588 4590 status = READ_CSR_REG(hba, FC_HS_REG(hba));
4589 4591
4590 4592 /* Clear Chip error bit */
4591 4593 WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_ERATT);
4592 4594
4593 4595 /* If HS_FFER1 is set, then wait until the HS_FFER1 bit clears */
4594 4596 if (status & HS_FFER1) {
4595 4597
4596 4598 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4597 4599 "HS_FFER1 received");
4598 4600 EMLXS_STATE_CHANGE(hba, FC_ERROR);
4599 4601 (void) emlxs_offline(hba, 1);
4600 4602 while ((status & HS_FFER1) && (i < 300)) {
4601 4603 status =
4602 4604 READ_CSR_REG(hba, FC_HS_REG(hba));
4603 4605 BUSYWAIT_MS(1000);
4604 4606 i++;
4605 4607 }
4606 4608 }
4607 4609
4608 4610 if (i == 300) {
4609 4611 /* 5 minutes is up, shutdown HBA */
4610 4612 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4611 4613 "HS_FFER1 clear timeout");
4612 4614
4613 4615 EMLXS_STATE_CHANGE(hba, FC_ERROR);
4614 4616 emlxs_thread_spawn(hba, emlxs_shutdown_thread, NULL, NULL);
4615 4617
4616 4618 goto done;
4617 4619 }
4618 4620
4619 4621 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4620 4622 "HS_FFER1 cleared");
4621 4623
4622 4624 if (status & HS_OVERTEMP) {
4623 4625 status1 =
4624 4626 READ_SLIM_ADDR(hba,
4625 4627 ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xb0));
4626 4628
4627 4629 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4628 4630 "Maximum adapter temperature exceeded (%d °C).", status1);
4629 4631
4630 4632 hba->temperature = status1;
4631 4633 hba->flag |= FC_OVERTEMP_EVENT;
4632 4634
4633 4635 EMLXS_STATE_CHANGE(hba, FC_ERROR);
4634 4636 emlxs_thread_spawn(hba, emlxs_shutdown_thread,
4635 4637 NULL, NULL);
4636 4638
4637 4639 } else {
4638 4640 status1 =
4639 4641 READ_SLIM_ADDR(hba,
4640 4642 ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xa8));
4641 4643 status2 =
4642 4644 READ_SLIM_ADDR(hba,
4643 4645 ((volatile uint8_t *)hba->sli.sli3.slim_addr + 0xac));
4644 4646
4645 4647 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
4646 4648 "Host Error Attention: "
4647 4649 "status=0x%x status1=0x%x status2=0x%x",
4648 4650 status, status1, status2);
4649 4651
4650 4652 EMLXS_STATE_CHANGE(hba, FC_ERROR);
4651 4653
4652 4654 if (status & HS_FFER6) {
4653 4655 emlxs_thread_spawn(hba, emlxs_restart_thread,
4654 4656 NULL, NULL);
4655 4657 } else {
4656 4658 emlxs_thread_spawn(hba, emlxs_shutdown_thread,
4657 4659 NULL, NULL);
4658 4660 }
4659 4661 }
4660 4662
4661 4663 done:
4662 4664 #ifdef FMA_SUPPORT
4663 4665 /* Access handle validation */
4664 4666 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
4665 4667 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4666 4668 #endif /* FMA_SUPPORT */
4667 4669
4668 4670 return;
4669 4671
4670 4672 } /* emlxs_handle_ff_error() */
4671 4673
4672 4674
4673 4675 /*
4674 4676 * emlxs_sli3_handle_link_event()
4675 4677 *
4676 4678 * Description: Process a Link Attention.
4677 4679 */
4678 4680 static void
4679 4681 emlxs_sli3_handle_link_event(emlxs_hba_t *hba)
4680 4682 {
4681 4683 emlxs_port_t *port = &PPORT;
4682 4684 MAILBOXQ *mbq;
4683 4685 int rc;
4684 4686
4685 4687 HBASTATS.LinkEvent++;
4686 4688
4687 4689 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_event_msg, "event=%x",
4688 4690 HBASTATS.LinkEvent);
4689 4691
4690 4692 /* Make sure link is declared down */
4691 4693 emlxs_linkdown(hba);
4692 4694
4693 4695 /* Get a buffer which will be used for mailbox commands */
4694 4696 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) {
4695 4697 /* Get link attention message */
4696 4698 if (emlxs_mb_read_la(hba, mbq) == 0) {
4697 4699 rc = emlxs_sli3_issue_mbox_cmd(hba, mbq,
4698 4700 MBX_NOWAIT, 0);
4699 4701 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
4700 4702 emlxs_mem_put(hba, MEM_MBOX,
4701 4703 (void *)mbq);
4702 4704 }
4703 4705
4704 4706 mutex_enter(&EMLXS_PORT_LOCK);
4705 4707
4706 4708 /*
4707 4709 * Clear Link Attention in HA REG
4708 4710 */
4709 4711 WRITE_CSR_REG(hba, FC_HA_REG(hba), HA_LATT);
4710 4712
4711 4713 #ifdef FMA_SUPPORT
4712 4714 /* Access handle validation */
4713 4715 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
4714 4716 #endif /* FMA_SUPPORT */
4715 4717
4716 4718 mutex_exit(&EMLXS_PORT_LOCK);
4717 4719 } else {
4718 4720 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
4719 4721 }
4720 4722 }
4721 4723
4722 4724 } /* emlxs_sli3_handle_link_event() */
4723 4725
4724 4726
4725 4727 /*
4726 4728 * emlxs_sli3_handle_ring_event()
4727 4729 *
4728 4730 * Description: Process a Ring Attention.
4729 4731 */
4730 4732 static void
4731 4733 emlxs_sli3_handle_ring_event(emlxs_hba_t *hba, int32_t ring_no,
4732 4734 uint32_t ha_copy)
4733 4735 {
4734 4736 emlxs_port_t *port = &PPORT;
4735 4737 SLIM2 *slim2p = (SLIM2 *)hba->sli.sli3.slim2.virt;
4736 4738 CHANNEL *cp;
4737 4739 RING *rp;
4738 4740 IOCB *entry;
4739 4741 IOCBQ *iocbq;
4740 4742 IOCBQ local_iocbq;
4741 4743 PGP *pgp;
4742 4744 uint32_t count;
4743 4745 volatile uint32_t chipatt;
4744 4746 void *ioa2;
4745 4747 uint32_t reg;
4746 4748 uint32_t channel_no;
4747 4749 off_t offset;
4748 4750 IOCBQ *rsp_head = NULL;
4749 4751 IOCBQ *rsp_tail = NULL;
4750 4752 emlxs_buf_t *sbp = NULL;
4751 4753
4752 4754 count = 0;
4753 4755 rp = &hba->sli.sli3.ring[ring_no];
4754 4756 cp = rp->channelp;
4755 4757 channel_no = cp->channelno;
4756 4758
4757 4759 /*
4758 4760 * Isolate this ring's host attention bits
4759 4761 * This makes all ring attention bits equal
4760 4762 * to Ring0 attention bits
4761 4763 */
4762 4764 reg = (ha_copy >> (ring_no * 4)) & 0x0f;
4763 4765
4764 4766 /*
4765 4767 * Gather iocb entries off response ring.
4766 4768 * Ensure entry is owned by the host.
4767 4769 */
4768 4770 pgp = (PGP *)&slim2p->mbx.us.s2.port[ring_no];
4769 4771 offset =
4770 4772 (off_t)((uint64_t)((unsigned long)&(pgp->rspPutInx)) -
4771 4773 (uint64_t)((unsigned long)slim2p));
4772 4774 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, 4,
4773 4775 DDI_DMA_SYNC_FORKERNEL);
4774 4776 rp->fc_port_rspidx = BE_SWAP32(pgp->rspPutInx);
4775 4777
4776 4778 /* While ring is not empty */
4777 4779 while (rp->fc_rspidx != rp->fc_port_rspidx) {
4778 4780 HBASTATS.IocbReceived[channel_no]++;
4779 4781
4780 4782 /* Get the next response ring iocb */
4781 4783 entry =
4782 4784 (IOCB *)(((char *)rp->fc_rspringaddr +
4783 4785 (rp->fc_rspidx * hba->sli.sli3.iocb_rsp_size)));
4784 4786
4785 4787 /* DMA sync the response ring iocb for the adapter */
4786 4788 offset = (off_t)((uint64_t)((unsigned long)entry)
4787 4789 - (uint64_t)((unsigned long)slim2p));
4788 4790 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
4789 4791 hba->sli.sli3.iocb_rsp_size, DDI_DMA_SYNC_FORKERNEL);
4790 4792
4791 4793 count++;
4792 4794
4793 4795 /* Copy word6 and word7 to local iocb for now */
4794 4796 iocbq = &local_iocbq;
4795 4797
4796 4798 BE_SWAP32_BCOPY((uint8_t *)entry + (sizeof (uint32_t) * 6),
4797 4799 (uint8_t *)iocbq + (sizeof (uint32_t) * 6),
4798 4800 (sizeof (uint32_t) * 2));
4799 4801
4800 4802 /* when LE is not set, entire Command has not been received */
4801 4803 if (!iocbq->iocb.ULPLE) {
4802 4804 /* This should never happen */
4803 4805 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ring_error_msg,
4804 4806 "ulpLE is not set. "
4805 4807 "ring=%d iotag=%d cmd=%x status=%x",
4806 4808 channel_no, iocbq->iocb.ULPIOTAG,
4807 4809 iocbq->iocb.ULPCOMMAND, iocbq->iocb.ULPSTATUS);
4808 4810
4809 4811 goto next;
4810 4812 }
4811 4813
4812 4814 sbp = NULL;
4813 4815 switch (iocbq->iocb.ULPCOMMAND) {
4814 4816 #ifdef SFCT_SUPPORT
4815 4817 case CMD_CLOSE_XRI_CX:
4816 4818 case CMD_CLOSE_XRI_CN:
4817 4819 case CMD_ABORT_XRI_CX:
4818 4820 if (port->mode == MODE_TARGET) {
4819 4821 sbp = emlxs_unregister_pkt(cp,
4820 4822 iocbq->iocb.ULPIOTAG, 0);
4821 4823 }
4822 4824 break;
4823 4825 #endif /* SFCT_SUPPORT */
4824 4826
4825 4827 /* Ring 0 registered commands */
4826 4828 case CMD_FCP_ICMND_CR:
4827 4829 case CMD_FCP_ICMND_CX:
4828 4830 case CMD_FCP_IREAD_CR:
4829 4831 case CMD_FCP_IREAD_CX:
4830 4832 case CMD_FCP_IWRITE_CR:
4831 4833 case CMD_FCP_IWRITE_CX:
4832 4834 case CMD_FCP_ICMND64_CR:
4833 4835 case CMD_FCP_ICMND64_CX:
4834 4836 case CMD_FCP_IREAD64_CR:
4835 4837 case CMD_FCP_IREAD64_CX:
4836 4838 case CMD_FCP_IWRITE64_CR:
4837 4839 case CMD_FCP_IWRITE64_CX:
4838 4840 #ifdef SFCT_SUPPORT
4839 4841 case CMD_FCP_TSEND_CX:
4840 4842 case CMD_FCP_TSEND64_CX:
4841 4843 case CMD_FCP_TRECEIVE_CX:
4842 4844 case CMD_FCP_TRECEIVE64_CX:
4843 4845 case CMD_FCP_TRSP_CX:
4844 4846 case CMD_FCP_TRSP64_CX:
4845 4847 #endif /* SFCT_SUPPORT */
4846 4848
4847 4849 /* Ring 1 registered commands */
4848 4850 case CMD_XMIT_BCAST_CN:
4849 4851 case CMD_XMIT_BCAST_CX:
4850 4852 case CMD_XMIT_SEQUENCE_CX:
4851 4853 case CMD_XMIT_SEQUENCE_CR:
4852 4854 case CMD_XMIT_BCAST64_CN:
4853 4855 case CMD_XMIT_BCAST64_CX:
4854 4856 case CMD_XMIT_SEQUENCE64_CX:
4855 4857 case CMD_XMIT_SEQUENCE64_CR:
4856 4858 case CMD_CREATE_XRI_CR:
4857 4859 case CMD_CREATE_XRI_CX:
4858 4860
4859 4861 /* Ring 2 registered commands */
4860 4862 case CMD_ELS_REQUEST_CR:
4861 4863 case CMD_ELS_REQUEST_CX:
4862 4864 case CMD_XMIT_ELS_RSP_CX:
4863 4865 case CMD_ELS_REQUEST64_CR:
4864 4866 case CMD_ELS_REQUEST64_CX:
4865 4867 case CMD_XMIT_ELS_RSP64_CX:
4866 4868
4867 4869 /* Ring 3 registered commands */
4868 4870 case CMD_GEN_REQUEST64_CR:
4869 4871 case CMD_GEN_REQUEST64_CX:
4870 4872
4871 4873 sbp =
4872 4874 emlxs_unregister_pkt(cp, iocbq->iocb.ULPIOTAG, 0);
4873 4875 break;
4874 4876 }
4875 4877
4876 4878 /* If packet is stale, then drop it. */
4877 4879 if (sbp == STALE_PACKET) {
4878 4880 cp->hbaCmplCmd_sbp++;
4879 4881 /* Copy entry to the local iocbq */
4880 4882 BE_SWAP32_BCOPY((uint8_t *)entry,
4881 4883 (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size);
4882 4884
4883 4885 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_stale_msg,
4884 4886 "channelno=%d iocb=%p cmd=%x status=%x "
4885 4887 "error=%x iotag=%d context=%x info=%x",
4886 4888 channel_no, iocbq, (uint8_t)iocbq->iocb.ULPCOMMAND,
4887 4889 iocbq->iocb.ULPSTATUS,
4888 4890 (uint8_t)iocbq->iocb.un.grsp.perr.statLocalError,
4889 4891 (uint16_t)iocbq->iocb.ULPIOTAG,
4890 4892 (uint16_t)iocbq->iocb.ULPCONTEXT,
4891 4893 (uint8_t)iocbq->iocb.ULPRSVDBYTE);
4892 4894
4893 4895 goto next;
4894 4896 }
4895 4897
4896 4898 /*
4897 4899 * If a packet was found, then queue the packet's
4898 4900 * iocb for deferred processing
4899 4901 */
4900 4902 else if (sbp) {
4901 4903 #ifdef SFCT_SUPPORT
4902 4904 fct_cmd_t *fct_cmd;
4903 4905 emlxs_buf_t *cmd_sbp;
4904 4906
4905 4907 fct_cmd = sbp->fct_cmd;
4906 4908 if (fct_cmd) {
4907 4909 cmd_sbp =
4908 4910 (emlxs_buf_t *)fct_cmd->cmd_fca_private;
4909 4911 mutex_enter(&cmd_sbp->fct_mtx);
4910 4912 EMLXS_FCT_STATE_CHG(fct_cmd, cmd_sbp,
4911 4913 EMLXS_FCT_IOCB_COMPLETE);
4912 4914 mutex_exit(&cmd_sbp->fct_mtx);
4913 4915 }
4914 4916 #endif /* SFCT_SUPPORT */
4915 4917 cp->hbaCmplCmd_sbp++;
4916 4918 atomic_dec_32(&hba->io_active);
4917 4919 #ifdef NODE_THROTTLE_SUPPORT
4918 4920 if (sbp->node) {
4919 4921 atomic_dec_32(&sbp->node->io_active);
4920 4922 }
4921 4923 #endif /* NODE_THROTTLE_SUPPORT */
4922 4924
4923 4925 /* Copy entry to sbp's iocbq */
4924 4926 iocbq = &sbp->iocbq;
4925 4927 BE_SWAP32_BCOPY((uint8_t *)entry,
4926 4928 (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size);
4927 4929
4928 4930 iocbq->next = NULL;
4929 4931
4930 4932 /*
4931 4933 * If this is NOT a polled command completion
4932 4934 * or a driver allocated pkt, then defer pkt
4933 4935 * completion.
4934 4936 */
4935 4937 if (!(sbp->pkt_flags &
4936 4938 (PACKET_POLLED | PACKET_ALLOCATED))) {
4937 4939 /* Add the IOCB to the local list */
4938 4940 if (!rsp_head) {
4939 4941 rsp_head = iocbq;
4940 4942 } else {
4941 4943 rsp_tail->next = iocbq;
4942 4944 }
4943 4945
4944 4946 rsp_tail = iocbq;
4945 4947
4946 4948 goto next;
4947 4949 }
4948 4950 } else {
4949 4951 cp->hbaCmplCmd++;
4950 4952 /* Copy entry to the local iocbq */
4951 4953 BE_SWAP32_BCOPY((uint8_t *)entry,
4952 4954 (uint8_t *)iocbq, hba->sli.sli3.iocb_rsp_size);
4953 4955
4954 4956 iocbq->next = NULL;
4955 4957 iocbq->bp = NULL;
4956 4958 iocbq->port = &PPORT;
4957 4959 iocbq->channel = cp;
4958 4960 iocbq->node = NULL;
4959 4961 iocbq->sbp = NULL;
4960 4962 iocbq->flag = 0;
4961 4963 }
4962 4964
4963 4965 /* process the channel event now */
4964 4966 emlxs_proc_channel_event(hba, cp, iocbq);
4965 4967
4966 4968 next:
4967 4969 /* Increment the driver's local response get index */
4968 4970 if (++rp->fc_rspidx >= rp->fc_numRiocb) {
4969 4971 rp->fc_rspidx = 0;
4970 4972 }
4971 4973
4972 4974 } /* while (TRUE) */
4973 4975
4974 4976 if (rsp_head) {
4975 4977 mutex_enter(&cp->rsp_lock);
4976 4978 if (cp->rsp_head == NULL) {
4977 4979 cp->rsp_head = rsp_head;
4978 4980 cp->rsp_tail = rsp_tail;
4979 4981 } else {
4980 4982 cp->rsp_tail->next = rsp_head;
4981 4983 cp->rsp_tail = rsp_tail;
4982 4984 }
4983 4985 mutex_exit(&cp->rsp_lock);
4984 4986
4985 4987 emlxs_thread_trigger2(&cp->intr_thread, emlxs_proc_channel, cp);
4986 4988 }
4987 4989
4988 4990 /* Check if at least one response entry was processed */
4989 4991 if (count) {
4990 4992 /* Update response get index for the adapter */
4991 4993 if (hba->bus_type == SBUS_FC) {
4992 4994 slim2p->mbx.us.s2.host[channel_no].rspGetInx
4993 4995 = BE_SWAP32(rp->fc_rspidx);
4994 4996
4995 4997 /* DMA sync the index for the adapter */
4996 4998 offset = (off_t)
4997 4999 ((uint64_t)((unsigned long)&(slim2p->mbx.us.s2.
4998 5000 host[channel_no].rspGetInx))
4999 5001 - (uint64_t)((unsigned long)slim2p));
5000 5002 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
5001 5003 offset, 4, DDI_DMA_SYNC_FORDEV);
5002 5004 } else {
5003 5005 ioa2 =
5004 5006 (void *)((char *)hba->sli.sli3.slim_addr +
5005 5007 hba->sli.sli3.hgp_ring_offset + (((channel_no * 2) +
5006 5008 1) * sizeof (uint32_t)));
5007 5009 WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2,
5008 5010 rp->fc_rspidx);
5009 5011 #ifdef FMA_SUPPORT
5010 5012 /* Access handle validation */
5011 5013 EMLXS_CHK_ACC_HANDLE(hba,
5012 5014 hba->sli.sli3.slim_acc_handle);
5013 5015 #endif /* FMA_SUPPORT */
5014 5016 }
5015 5017
5016 5018 if (reg & HA_R0RE_REQ) {
5017 5019 /* HBASTATS.chipRingFree++; */
5018 5020
5019 5021 mutex_enter(&EMLXS_PORT_LOCK);
5020 5022
5021 5023 /* Tell the adapter we serviced the ring */
5022 5024 chipatt = ((CA_R0ATT | CA_R0RE_RSP) <<
5023 5025 (channel_no * 4));
5024 5026 WRITE_CSR_REG(hba, FC_CA_REG(hba), chipatt);
5025 5027
5026 5028 #ifdef FMA_SUPPORT
5027 5029 /* Access handle validation */
5028 5030 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
5029 5031 #endif /* FMA_SUPPORT */
5030 5032
5031 5033 mutex_exit(&EMLXS_PORT_LOCK);
5032 5034 }
5033 5035 }
5034 5036
5035 5037 if ((reg & HA_R0CE_RSP) || hba->channel_tx_count) {
5036 5038 /* HBASTATS.hostRingFree++; */
5037 5039
5038 5040 /* Cmd ring may be available. Try sending more iocbs */
5039 5041 emlxs_sli3_issue_iocb_cmd(hba, cp, 0);
5040 5042 }
5041 5043
5042 5044 /* HBASTATS.ringEvent++; */
5043 5045
5044 5046 return;
5045 5047
5046 5048 } /* emlxs_sli3_handle_ring_event() */
5047 5049
5048 5050
5049 5051 extern int
5050 5052 emlxs_handle_rcv_seq(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
5051 5053 {
5052 5054 emlxs_port_t *port = &PPORT;
5053 5055 IOCB *iocb;
5054 5056 RING *rp;
5055 5057 MATCHMAP *mp = NULL;
5056 5058 uint64_t bdeAddr;
5057 5059 uint32_t vpi = 0;
5058 5060 uint32_t channelno;
5059 5061 uint32_t size = 0;
5060 5062 uint32_t *RcvError;
5061 5063 uint32_t *RcvDropped;
5062 5064 uint32_t *UbPosted;
5063 5065 emlxs_msg_t *dropped_msg;
5064 5066 char error_str[64];
5065 5067 uint32_t buf_type;
5066 5068 uint32_t *word;
5067 5069
5068 5070 channelno = cp->channelno;
5069 5071 rp = &hba->sli.sli3.ring[channelno];
5070 5072
5071 5073 iocb = &iocbq->iocb;
5072 5074 word = (uint32_t *)iocb;
5073 5075
5074 5076 switch (channelno) {
5075 5077 #ifdef SFCT_SUPPORT
5076 5078 case FC_FCT_RING:
5077 5079 HBASTATS.FctRingEvent++;
5078 5080 RcvError = &HBASTATS.FctRingError;
5079 5081 RcvDropped = &HBASTATS.FctRingDropped;
5080 5082 UbPosted = &HBASTATS.FctUbPosted;
5081 5083 dropped_msg = &emlxs_fct_detail_msg;
5082 5084 buf_type = MEM_FCTBUF;
5083 5085 break;
5084 5086 #endif /* SFCT_SUPPORT */
5085 5087
5086 5088 case FC_IP_RING:
5087 5089 HBASTATS.IpRcvEvent++;
5088 5090 RcvError = &HBASTATS.IpDropped;
5089 5091 RcvDropped = &HBASTATS.IpDropped;
5090 5092 UbPosted = &HBASTATS.IpUbPosted;
5091 5093 dropped_msg = &emlxs_unsol_ip_dropped_msg;
5092 5094 buf_type = MEM_IPBUF;
5093 5095 break;
5094 5096
5095 5097 case FC_ELS_RING:
5096 5098 HBASTATS.ElsRcvEvent++;
5097 5099 RcvError = &HBASTATS.ElsRcvError;
5098 5100 RcvDropped = &HBASTATS.ElsRcvDropped;
5099 5101 UbPosted = &HBASTATS.ElsUbPosted;
5100 5102 dropped_msg = &emlxs_unsol_els_dropped_msg;
5101 5103 buf_type = MEM_ELSBUF;
5102 5104 break;
5103 5105
5104 5106 case FC_CT_RING:
5105 5107 HBASTATS.CtRcvEvent++;
5106 5108 RcvError = &HBASTATS.CtRcvError;
5107 5109 RcvDropped = &HBASTATS.CtRcvDropped;
5108 5110 UbPosted = &HBASTATS.CtUbPosted;
5109 5111 dropped_msg = &emlxs_unsol_ct_dropped_msg;
5110 5112 buf_type = MEM_CTBUF;
5111 5113 break;
5112 5114
5113 5115 default:
5114 5116 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_iocb_invalid_msg,
5115 5117 "channel=%d cmd=%x %s %x %x %x %x",
5116 5118 channelno, iocb->ULPCOMMAND,
5117 5119 emlxs_state_xlate(iocb->ULPSTATUS), word[4], word[5],
5118 5120 word[6], word[7]);
5119 5121 return (1);
5120 5122 }
5121 5123
5122 5124 if (iocb->ULPSTATUS) {
5123 5125 if ((iocb->ULPSTATUS == IOSTAT_LOCAL_REJECT) &&
5124 5126 (iocb->un.grsp.perr.statLocalError ==
5125 5127 IOERR_RCV_BUFFER_TIMEOUT)) {
5126 5128 (void) strlcpy(error_str, "Out of posted buffers:",
5127 5129 sizeof (error_str));
5128 5130 iocb->ULPBDECOUNT = 0;
5129 5131 } else if ((iocb->ULPSTATUS == IOSTAT_LOCAL_REJECT) &&
5130 5132 (iocb->un.grsp.perr.statLocalError ==
5131 5133 IOERR_RCV_BUFFER_WAITING)) {
5132 5134 (void) strlcpy(error_str, "Buffer waiting:",
5133 5135 sizeof (error_str));
5134 5136 iocb->ULPBDECOUNT = 0;
5135 5137 goto done;
5136 5138 } else if (iocb->ULPSTATUS == IOSTAT_NEED_BUFF_ENTRY) {
5137 5139 (void) strlcpy(error_str, "Need Buffer Entry:",
5138 5140 sizeof (error_str));
5139 5141 iocb->ULPBDECOUNT = 0;
5140 5142 goto done;
5141 5143 } else {
5142 5144 (void) strlcpy(error_str, "General error:",
5143 5145 sizeof (error_str));
5144 5146 }
5145 5147
5146 5148 goto failed;
5147 5149 }
5148 5150
5149 5151 if (hba->flag & FC_HBQ_ENABLED) {
5150 5152 HBQ_INIT_t *hbq;
5151 5153 HBQE_t *hbqE;
5152 5154 uint32_t hbqe_tag;
5153 5155 uint32_t hbq_id;
5154 5156
5155 5157 (*UbPosted)--;
5156 5158
5157 5159 hbqE = (HBQE_t *)iocb;
5158 5160 hbq_id = hbqE->unt.ext.HBQ_tag;
5159 5161 hbqe_tag = hbqE->unt.ext.HBQE_tag;
5160 5162
5161 5163 hbq = &hba->sli.sli3.hbq_table[hbq_id];
5162 5164
5163 5165 if (hbqe_tag >= hbq->HBQ_numEntries) {
5164 5166 (void) snprintf(error_str, sizeof (error_str),
5165 5167 "Invalid HBQE iotag=%d:", hbqe_tag);
5166 5168 goto dropped;
5167 5169 }
5168 5170
5169 5171 mp = hba->sli.sli3.hbq_table[hbq_id].HBQ_PostBufs[hbqe_tag];
5170 5172
5171 5173 size = iocb->unsli3.ext_rcv.seq_len;
5172 5174 } else {
5173 5175 bdeAddr =
5174 5176 PADDR(iocb->un.cont64[0].addrHigh,
5175 5177 iocb->un.cont64[0].addrLow);
5176 5178
5177 5179 /* Check for invalid buffer */
5178 5180 if (iocb->un.cont64[0].tus.f.bdeFlags & BUFF_TYPE_INVALID) {
5179 5181 (void) strlcpy(error_str, "Invalid buffer:",
5180 5182 sizeof (error_str));
5181 5183 goto dropped;
5182 5184 }
5183 5185
5184 5186 mp = emlxs_mem_get_vaddr(hba, rp, bdeAddr);
5185 5187
5186 5188 size = iocb->un.rcvseq64.rcvBde.tus.f.bdeSize;
5187 5189 }
5188 5190
5189 5191 if (!mp) {
5190 5192 (void) strlcpy(error_str, "Buffer not mapped:",
5191 5193 sizeof (error_str));
5192 5194 goto dropped;
5193 5195 }
5194 5196
5195 5197 #ifdef FMA_SUPPORT
5196 5198 if (mp->dma_handle) {
5197 5199 if (emlxs_fm_check_dma_handle(hba, mp->dma_handle)
5198 5200 != DDI_FM_OK) {
5199 5201 EMLXS_MSGF(EMLXS_CONTEXT,
5200 5202 &emlxs_invalid_dma_handle_msg,
5201 5203 "handle_rcv_seq: hdl=%p",
5202 5204 mp->dma_handle);
5203 5205 goto dropped;
5204 5206 }
5205 5207 }
5206 5208 #endif /* FMA_SUPPORT */
5207 5209
5208 5210 if (!size) {
5209 5211 (void) strlcpy(error_str, "Buffer empty:", sizeof (error_str));
5210 5212 goto dropped;
5211 5213 }
5212 5214
5213 5215 /* To avoid we drop the broadcast packets */
5214 5216 if (channelno != FC_IP_RING) {
5215 5217 /* Get virtual port */
5216 5218 if (hba->flag & FC_NPIV_ENABLED) {
5217 5219 vpi = iocb->unsli3.ext_rcv.vpi;
5218 5220 if (vpi >= hba->vpi_max) {
5219 5221 (void) snprintf(error_str, sizeof (error_str),
5220 5222 "Invalid VPI=%d:", vpi);
5221 5223 goto dropped;
5222 5224 }
5223 5225
5224 5226 port = &VPORT(vpi);
5225 5227 }
5226 5228 }
5227 5229
5228 5230 /* Process request */
5229 5231 switch (channelno) {
5230 5232 case FC_FCT_RING:
5231 5233 if (port->mode == MODE_INITIATOR) {
5232 5234 (void) strlcpy(error_str, "Target mode disabled:",
5233 5235 sizeof (error_str));
5234 5236 goto dropped;
5235 5237 #ifdef SFCT_SUPPORT
5236 5238 } else if (port->mode == MODE_TARGET) {
5237 5239 (void) emlxs_fct_handle_unsol_req(port, cp, iocbq, mp,
5238 5240 size);
5239 5241 #endif /* SFCT_SUPPORT */
5240 5242 } else {
5241 5243 (void) snprintf(error_str, sizeof (error_str),
5242 5244 "Invalid mode=%x:", port->mode);
5243 5245 goto dropped;
5244 5246 }
5245 5247 break;
5246 5248
5247 5249 case FC_IP_RING:
5248 5250 if (port->mode == MODE_INITIATOR) {
5249 5251 (void) emlxs_ip_handle_unsol_req(port, cp, iocbq,
5250 5252 mp, size);
5251 5253 #ifdef SFCT_SUPPORT
5252 5254 } else if (port->mode == MODE_TARGET) {
5253 5255 (void) strlcpy(error_str, "Initiator mode disabled:",
5254 5256 sizeof (error_str));
5255 5257 goto dropped;
5256 5258 #endif /* SFCT_SUPPORT */
5257 5259 } else {
5258 5260 (void) snprintf(error_str, sizeof (error_str),
5259 5261 "Invalid mode=%x:", port->mode);
5260 5262 goto dropped;
5261 5263 }
5262 5264 break;
5263 5265
5264 5266 case FC_ELS_RING:
5265 5267 if (port->mode == MODE_INITIATOR) {
5266 5268 (void) emlxs_els_handle_unsol_req(port, cp, iocbq, mp,
5267 5269 size);
5268 5270 #ifdef SFCT_SUPPORT
5269 5271 } else if (port->mode == MODE_TARGET) {
5270 5272 (void) emlxs_fct_handle_unsol_els(port, cp, iocbq, mp,
5271 5273 size);
5272 5274 #endif /* SFCT_SUPPORT */
5273 5275 } else {
5274 5276 (void) snprintf(error_str, sizeof (error_str),
5275 5277 "Invalid mode=%x:", port->mode);
5276 5278 goto dropped;
5277 5279 }
5278 5280 break;
5279 5281
5280 5282 case FC_CT_RING:
5281 5283 (void) emlxs_ct_handle_unsol_req(port, cp, iocbq, mp, size);
5282 5284 break;
5283 5285 }
5284 5286
5285 5287 goto done;
5286 5288
5287 5289 dropped:
5288 5290 (*RcvDropped)++;
5289 5291
5290 5292 EMLXS_MSGF(EMLXS_CONTEXT, dropped_msg,
5291 5293 "%s: cmd=%x %s %x %x %x %x",
5292 5294 error_str, iocb->ULPCOMMAND, emlxs_state_xlate(iocb->ULPSTATUS),
5293 5295 word[4], word[5], word[6], word[7]);
5294 5296
5295 5297 if (channelno == FC_FCT_RING) {
5296 5298 uint32_t sid;
5297 5299
5298 5300 if (hba->sli_mode == EMLXS_HBA_SLI3_MODE) {
5299 5301 emlxs_node_t *ndlp;
5300 5302 ndlp = emlxs_node_find_rpi(port, iocb->ULPIOTAG);
5301 5303 if (! ndlp) {
5302 5304 goto done;
5303 5305 }
5304 5306 sid = ndlp->nlp_DID;
5305 5307 } else {
5306 5308 sid = iocb->un.ulpWord[4] & 0xFFFFFF;
5307 5309 }
5308 5310
5309 5311 emlxs_send_logo(port, sid);
5310 5312 }
5311 5313
5312 5314 goto done;
5313 5315
5314 5316 failed:
5315 5317 (*RcvError)++;
5316 5318
5317 5319 EMLXS_MSGF(EMLXS_CONTEXT, dropped_msg,
5318 5320 "%s: cmd=%x %s %x %x %x %x hba:%x %x",
5319 5321 error_str, iocb->ULPCOMMAND, emlxs_state_xlate(iocb->ULPSTATUS),
5320 5322 word[4], word[5], word[6], word[7], hba->state, hba->flag);
5321 5323
5322 5324 done:
5323 5325
5324 5326 if (hba->flag & FC_HBQ_ENABLED) {
5325 5327 if (iocb->ULPBDECOUNT) {
5326 5328 HBQE_t *hbqE;
5327 5329 uint32_t hbq_id;
5328 5330
5329 5331 hbqE = (HBQE_t *)iocb;
5330 5332 hbq_id = hbqE->unt.ext.HBQ_tag;
5331 5333
5332 5334 emlxs_update_HBQ_index(hba, hbq_id);
5333 5335 }
5334 5336 } else {
5335 5337 if (mp) {
5336 5338 emlxs_mem_put(hba, buf_type, (void *)mp);
5337 5339 }
5338 5340
5339 5341 if (iocb->ULPBDECOUNT) {
5340 5342 (void) emlxs_post_buffer(hba, rp, 1);
5341 5343 }
5342 5344 }
5343 5345
5344 5346 return (0);
5345 5347
5346 5348 } /* emlxs_handle_rcv_seq() */
5347 5349
5348 5350
5349 5351 /* EMLXS_CMD_RING_LOCK must be held when calling this function */
5350 5352 static void
5351 5353 emlxs_sli3_issue_iocb(emlxs_hba_t *hba, RING *rp, IOCBQ *iocbq)
5352 5354 {
5353 5355 emlxs_port_t *port;
5354 5356 IOCB *icmd;
5355 5357 IOCB *iocb;
5356 5358 emlxs_buf_t *sbp;
5357 5359 off_t offset;
5358 5360 uint32_t ringno;
5359 5361
5360 5362 ringno = rp->ringno;
5361 5363 sbp = iocbq->sbp;
5362 5364 icmd = &iocbq->iocb;
5363 5365 port = iocbq->port;
5364 5366
5365 5367 HBASTATS.IocbIssued[ringno]++;
5366 5368
5367 5369 /* Check for ULP pkt request */
5368 5370 if (sbp) {
5369 5371 mutex_enter(&sbp->mtx);
5370 5372
5371 5373 if (sbp->node == NULL) {
5372 5374 /* Set node to base node by default */
5373 5375 iocbq->node = (void *)&port->node_base;
5374 5376 sbp->node = (void *)&port->node_base;
5375 5377 }
5376 5378
5377 5379 sbp->pkt_flags |= PACKET_IN_CHIPQ;
5378 5380 mutex_exit(&sbp->mtx);
5379 5381
5380 5382 atomic_inc_32(&hba->io_active);
5381 5383 #ifdef NODE_THROTTLE_SUPPORT
5382 5384 if (sbp->node) {
5383 5385 atomic_inc_32(&sbp->node->io_active);
5384 5386 }
5385 5387 #endif /* NODE_THROTTLE_SUPPORT */
5386 5388
5387 5389 #ifdef SFCT_SUPPORT
5388 5390 #ifdef FCT_IO_TRACE
5389 5391 if (sbp->fct_cmd) {
5390 5392 emlxs_fct_io_trace(port, sbp->fct_cmd,
5391 5393 EMLXS_FCT_IOCB_ISSUED);
5392 5394 emlxs_fct_io_trace(port, sbp->fct_cmd,
5393 5395 icmd->ULPCOMMAND);
5394 5396 }
5395 5397 #endif /* FCT_IO_TRACE */
5396 5398 #endif /* SFCT_SUPPORT */
5397 5399
5398 5400 rp->channelp->hbaSendCmd_sbp++;
5399 5401 iocbq->channel = rp->channelp;
5400 5402 } else {
5401 5403 rp->channelp->hbaSendCmd++;
5402 5404 }
5403 5405
5404 5406 /* get the next available command ring iocb */
5405 5407 iocb =
5406 5408 (IOCB *)(((char *)rp->fc_cmdringaddr +
5407 5409 (rp->fc_cmdidx * hba->sli.sli3.iocb_cmd_size)));
5408 5410
5409 5411 /* Copy the local iocb to the command ring iocb */
5410 5412 BE_SWAP32_BCOPY((uint8_t *)icmd, (uint8_t *)iocb,
5411 5413 hba->sli.sli3.iocb_cmd_size);
5412 5414
5413 5415 /* DMA sync the command ring iocb for the adapter */
5414 5416 offset = (off_t)((uint64_t)((unsigned long)iocb)
5415 5417 - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5416 5418 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
5417 5419 hba->sli.sli3.iocb_cmd_size, DDI_DMA_SYNC_FORDEV);
5418 5420
5419 5421 /*
5420 5422 * After this, the sbp / iocb should not be
5421 5423 * accessed in the xmit path.
5422 5424 */
5423 5425
5424 5426 /* Free the local iocb if there is no sbp tracking it */
5425 5427 if (!sbp) {
5426 5428 emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
5427 5429 }
5428 5430
5429 5431 /* update local ring index to next available ring index */
5430 5432 rp->fc_cmdidx =
5431 5433 (rp->fc_cmdidx + 1 >= rp->fc_numCiocb) ? 0 : rp->fc_cmdidx + 1;
5432 5434
5433 5435
5434 5436 return;
5435 5437
5436 5438 } /* emlxs_sli3_issue_iocb() */
5437 5439
5438 5440
5439 5441 static void
5440 5442 emlxs_sli3_hba_kill(emlxs_hba_t *hba)
5441 5443 {
5442 5444 emlxs_port_t *port = &PPORT;
5443 5445 MAILBOX *swpmb;
5444 5446 MAILBOX *mb2;
5445 5447 MAILBOX *mb1;
5446 5448 uint32_t word0;
5447 5449 uint32_t j;
5448 5450 uint32_t interlock_failed;
5449 5451 uint32_t ha_copy;
5450 5452 uint32_t value;
5451 5453 off_t offset;
5452 5454 uint32_t size;
5453 5455
5454 5456 /* Perform adapter interlock to kill adapter */
5455 5457 interlock_failed = 0;
5456 5458
5457 5459 mutex_enter(&EMLXS_PORT_LOCK);
5458 5460 if (hba->flag & FC_INTERLOCKED) {
5459 5461 EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5460 5462
5461 5463 mutex_exit(&EMLXS_PORT_LOCK);
5462 5464
5463 5465 return;
5464 5466 }
5465 5467
5466 5468 j = 0;
5467 5469 while (j++ < 10000) {
5468 5470 if (hba->mbox_queue_flag == 0) {
5469 5471 break;
5470 5472 }
5471 5473
5472 5474 mutex_exit(&EMLXS_PORT_LOCK);
5473 5475 BUSYWAIT_US(100);
5474 5476 mutex_enter(&EMLXS_PORT_LOCK);
5475 5477 }
5476 5478
5477 5479 if (hba->mbox_queue_flag != 0) {
5478 5480 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5479 5481 "Interlock failed. Mailbox busy.");
5480 5482 mutex_exit(&EMLXS_PORT_LOCK);
5481 5483 return;
5482 5484 }
5483 5485
5484 5486 hba->flag |= FC_INTERLOCKED;
5485 5487 hba->mbox_queue_flag = 1;
5486 5488
5487 5489 /* Disable all host interrupts */
5488 5490 hba->sli.sli3.hc_copy = 0;
5489 5491 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
5490 5492 WRITE_CSR_REG(hba, FC_HA_REG(hba), 0xffffffff);
5491 5493
5492 5494 mb2 = FC_SLIM2_MAILBOX(hba);
5493 5495 mb1 = FC_SLIM1_MAILBOX(hba);
5494 5496 swpmb = (MAILBOX *)&word0;
5495 5497
5496 5498 if (!(hba->flag & FC_SLIM2_MODE)) {
5497 5499 goto mode_B;
5498 5500 }
5499 5501
5500 5502 mode_A:
5501 5503
5502 5504 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5503 5505 "Attempting SLIM2 Interlock...");
5504 5506
5505 5507 interlock_A:
5506 5508
5507 5509 value = 0x55555555;
5508 5510 word0 = 0;
5509 5511 swpmb->mbxCommand = MBX_KILL_BOARD;
5510 5512 swpmb->mbxOwner = OWN_CHIP;
5511 5513
5512 5514 /* Write value to SLIM */
5513 5515 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value);
5514 5516 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1)), word0);
5515 5517
5516 5518 /* Send Kill board request */
5517 5519 mb2->un.varWords[0] = value;
5518 5520 mb2->mbxCommand = MBX_KILL_BOARD;
5519 5521 mb2->mbxOwner = OWN_CHIP;
5520 5522
5521 5523 /* Sync the memory */
5522 5524 offset = (off_t)((uint64_t)((unsigned long)mb2)
5523 5525 - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5524 5526 size = (sizeof (uint32_t) * 2);
5525 5527
5526 5528 BE_SWAP32_BCOPY((uint8_t *)mb2, (uint8_t *)mb2, size);
5527 5529
5528 5530 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, size,
5529 5531 DDI_DMA_SYNC_FORDEV);
5530 5532
5531 5533 /* interrupt board to do it right away */
5532 5534 WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
5533 5535
5534 5536 /* First wait for command acceptence */
5535 5537 j = 0;
5536 5538 while (j++ < 1000) {
5537 5539 value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1));
5538 5540
5539 5541 if (value == 0xAAAAAAAA) {
5540 5542 break;
5541 5543 }
5542 5544
5543 5545 BUSYWAIT_US(50);
5544 5546 }
5545 5547
5546 5548 if (value == 0xAAAAAAAA) {
5547 5549 /* Now wait for mailbox ownership to clear */
5548 5550 while (j++ < 10000) {
5549 5551 word0 =
5550 5552 READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1));
5551 5553
5552 5554 if (swpmb->mbxOwner == 0) {
5553 5555 break;
5554 5556 }
5555 5557
5556 5558 BUSYWAIT_US(50);
5557 5559 }
5558 5560
5559 5561 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5560 5562 "Interlock succeeded.");
5561 5563
5562 5564 goto done;
5563 5565 }
5564 5566
5565 5567 /* Interlock failed !!! */
5566 5568 interlock_failed = 1;
5567 5569
5568 5570 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, "Interlock failed.");
5569 5571
5570 5572 mode_B:
5571 5573
5572 5574 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5573 5575 "Attempting SLIM1 Interlock...");
5574 5576
5575 5577 interlock_B:
5576 5578
5577 5579 value = 0x55555555;
5578 5580 word0 = 0;
5579 5581 swpmb->mbxCommand = MBX_KILL_BOARD;
5580 5582 swpmb->mbxOwner = OWN_CHIP;
5581 5583
5582 5584 /* Write KILL BOARD to mailbox */
5583 5585 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value);
5584 5586 WRITE_SLIM_ADDR(hba, ((volatile uint32_t *)mb1), word0);
5585 5587
5586 5588 /* interrupt board to do it right away */
5587 5589 WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
5588 5590
5589 5591 /* First wait for command acceptence */
5590 5592 j = 0;
5591 5593 while (j++ < 1000) {
5592 5594 value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1));
5593 5595
5594 5596 if (value == 0xAAAAAAAA) {
5595 5597 break;
5596 5598 }
5597 5599
5598 5600 BUSYWAIT_US(50);
5599 5601 }
5600 5602
5601 5603 if (value == 0xAAAAAAAA) {
5602 5604 /* Now wait for mailbox ownership to clear */
5603 5605 while (j++ < 10000) {
5604 5606 word0 =
5605 5607 READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1));
5606 5608
5607 5609 if (swpmb->mbxOwner == 0) {
5608 5610 break;
5609 5611 }
5610 5612
5611 5613 BUSYWAIT_US(50);
5612 5614 }
5613 5615
5614 5616 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5615 5617 "Interlock succeeded.");
5616 5618
5617 5619 goto done;
5618 5620 }
5619 5621
5620 5622 /* Interlock failed !!! */
5621 5623
5622 5624 /* If this is the first time then try again */
5623 5625 if (interlock_failed == 0) {
5624 5626 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5625 5627 "Interlock failed. Retrying...");
5626 5628
5627 5629 /* Try again */
5628 5630 interlock_failed = 1;
5629 5631 goto interlock_B;
5630 5632 }
5631 5633
5632 5634 /*
5633 5635 * Now check for error attention to indicate the board has
5634 5636 * been kiilled
5635 5637 */
5636 5638 j = 0;
5637 5639 while (j++ < 10000) {
5638 5640 ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba));
5639 5641
5640 5642 if (ha_copy & HA_ERATT) {
5641 5643 break;
5642 5644 }
5643 5645
5644 5646 BUSYWAIT_US(50);
5645 5647 }
5646 5648
5647 5649 if (ha_copy & HA_ERATT) {
5648 5650 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5649 5651 "Interlock failed. Board killed.");
5650 5652 } else {
5651 5653 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
5652 5654 "Interlock failed. Board not killed.");
5653 5655 }
5654 5656
5655 5657 done:
5656 5658
5657 5659 hba->mbox_queue_flag = 0;
5658 5660
5659 5661 EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5660 5662
5661 5663 #ifdef FMA_SUPPORT
5662 5664 /* Access handle validation */
5663 5665 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
5664 5666 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
5665 5667 #endif /* FMA_SUPPORT */
5666 5668
5667 5669 mutex_exit(&EMLXS_PORT_LOCK);
5668 5670
5669 5671 return;
5670 5672
5671 5673 } /* emlxs_sli3_hba_kill() */
5672 5674
5673 5675
5674 5676 static void
5675 5677 emlxs_sli3_hba_kill4quiesce(emlxs_hba_t *hba)
5676 5678 {
5677 5679 emlxs_port_t *port = &PPORT;
5678 5680 MAILBOX *swpmb;
5679 5681 MAILBOX *mb2;
5680 5682 MAILBOX *mb1;
5681 5683 uint32_t word0;
5682 5684 off_t offset;
5683 5685 uint32_t j;
5684 5686 uint32_t value;
5685 5687 uint32_t size;
5686 5688
5687 5689 /* Disable all host interrupts */
5688 5690 hba->sli.sli3.hc_copy = 0;
5689 5691 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
5690 5692 WRITE_CSR_REG(hba, FC_HA_REG(hba), 0xffffffff);
5691 5693
5692 5694 mb2 = FC_SLIM2_MAILBOX(hba);
5693 5695 mb1 = FC_SLIM1_MAILBOX(hba);
5694 5696 swpmb = (MAILBOX *)&word0;
5695 5697
5696 5698 value = 0x55555555;
5697 5699 word0 = 0;
5698 5700 swpmb->mbxCommand = MBX_KILL_BOARD;
5699 5701 swpmb->mbxOwner = OWN_CHIP;
5700 5702
5701 5703 /* Write value to SLIM */
5702 5704 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1), value);
5703 5705 WRITE_SLIM_ADDR(hba, (((volatile uint32_t *)mb1)), word0);
5704 5706
5705 5707 /* Send Kill board request */
5706 5708 mb2->un.varWords[0] = value;
5707 5709 mb2->mbxCommand = MBX_KILL_BOARD;
5708 5710 mb2->mbxOwner = OWN_CHIP;
5709 5711
5710 5712 /* Sync the memory */
5711 5713 offset = (off_t)((uint64_t)((unsigned long)mb2)
5712 5714 - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5713 5715 size = (sizeof (uint32_t) * 2);
5714 5716
5715 5717 BE_SWAP32_BCOPY((uint8_t *)mb2, (uint8_t *)mb2, size);
5716 5718
5717 5719 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset, size,
5718 5720 DDI_DMA_SYNC_FORDEV);
5719 5721
5720 5722 /* interrupt board to do it right away */
5721 5723 WRITE_CSR_REG(hba, FC_CA_REG(hba), CA_MBATT);
5722 5724
5723 5725 /* First wait for command acceptence */
5724 5726 j = 0;
5725 5727 while (j++ < 1000) {
5726 5728 value = READ_SLIM_ADDR(hba, (((volatile uint32_t *)mb1) + 1));
5727 5729
5728 5730 if (value == 0xAAAAAAAA) {
5729 5731 break;
5730 5732 }
5731 5733 BUSYWAIT_US(50);
5732 5734 }
5733 5735 if (value == 0xAAAAAAAA) {
5734 5736 /* Now wait for mailbox ownership to clear */
5735 5737 while (j++ < 10000) {
5736 5738 word0 =
5737 5739 READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb1));
5738 5740 if (swpmb->mbxOwner == 0) {
5739 5741 break;
5740 5742 }
5741 5743 BUSYWAIT_US(50);
5742 5744 }
5743 5745 goto done;
5744 5746 }
5745 5747
5746 5748 done:
5747 5749 EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
5748 5750
5749 5751 #ifdef FMA_SUPPORT
5750 5752 /* Access handle validation */
5751 5753 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
5752 5754 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
5753 5755 #endif /* FMA_SUPPORT */
5754 5756 return;
5755 5757
5756 5758 } /* emlxs_sli3_hba_kill4quiesce */
5757 5759
5758 5760
5759 5761
5760 5762
5761 5763 /*
5762 5764 * emlxs_handle_mb_event
5763 5765 *
5764 5766 * Description: Process a Mailbox Attention.
5765 5767 * Called from host_interrupt to process MBATT
5766 5768 *
5767 5769 * Returns:
5768 5770 *
5769 5771 */
5770 5772 static uint32_t
5771 5773 emlxs_handle_mb_event(emlxs_hba_t *hba)
5772 5774 {
5773 5775 emlxs_port_t *port = &PPORT;
5774 5776 MAILBOX *mb;
5775 5777 MAILBOX *swpmb;
5776 5778 MAILBOX *mbox;
5777 5779 MAILBOXQ *mbq = NULL;
5778 5780 volatile uint32_t word0;
5779 5781 MATCHMAP *mbox_bp;
5780 5782 off_t offset;
5781 5783 uint32_t i;
5782 5784 int rc;
5783 5785
5784 5786 swpmb = (MAILBOX *)&word0;
5785 5787
5786 5788 mutex_enter(&EMLXS_PORT_LOCK);
5787 5789 switch (hba->mbox_queue_flag) {
5788 5790 case 0:
5789 5791 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
5790 5792 "No mailbox active.");
5791 5793
5792 5794 mutex_exit(&EMLXS_PORT_LOCK);
5793 5795 return (0);
5794 5796
5795 5797 case MBX_POLL:
5796 5798
5797 5799 /* Mark mailbox complete, this should wake up any polling */
5798 5800 /* threads. This can happen if interrupts are enabled while */
5799 5801 /* a polled mailbox command is outstanding. If we don't set */
5800 5802 /* MBQ_COMPLETED here, the polling thread may wait until */
5801 5803 /* timeout error occurs */
5802 5804
5803 5805 mutex_enter(&EMLXS_MBOX_LOCK);
5804 5806 mbq = (MAILBOXQ *)hba->mbox_mbq;
5805 5807 if (mbq) {
5806 5808 port = (emlxs_port_t *)mbq->port;
5807 5809 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5808 5810 "Mailbox event. Completing Polled command.");
5809 5811 mbq->flag |= MBQ_COMPLETED;
5810 5812 }
5811 5813 mutex_exit(&EMLXS_MBOX_LOCK);
5812 5814
5813 5815 mutex_exit(&EMLXS_PORT_LOCK);
5814 5816 return (0);
5815 5817
5816 5818 case MBX_SLEEP:
5817 5819 case MBX_NOWAIT:
5818 5820 /* Check mbox_timer, it acts as a service flag too */
5819 5821 /* The first to service the mbox queue will clear the timer */
5820 5822 if (hba->mbox_timer) {
5821 5823 hba->mbox_timer = 0;
5822 5824
5823 5825 mutex_enter(&EMLXS_MBOX_LOCK);
5824 5826 mbq = (MAILBOXQ *)hba->mbox_mbq;
5825 5827 mutex_exit(&EMLXS_MBOX_LOCK);
5826 5828 }
5827 5829
5828 5830 if (!mbq) {
5829 5831 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5830 5832 "Mailbox event. No service required.");
5831 5833 mutex_exit(&EMLXS_PORT_LOCK);
5832 5834 return (0);
5833 5835 }
5834 5836
5835 5837 mb = (MAILBOX *)mbq;
5836 5838 mutex_exit(&EMLXS_PORT_LOCK);
5837 5839 break;
5838 5840
5839 5841 default:
5840 5842 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_completion_error_msg,
5841 5843 "Invalid Mailbox flag (%x).");
5842 5844
5843 5845 mutex_exit(&EMLXS_PORT_LOCK);
5844 5846 return (0);
5845 5847 }
5846 5848
5847 5849 /* Set port context */
5848 5850 port = (emlxs_port_t *)mbq->port;
5849 5851
5850 5852 /* Get first word of mailbox */
5851 5853 if (hba->flag & FC_SLIM2_MODE) {
5852 5854 mbox = FC_SLIM2_MAILBOX(hba);
5853 5855 offset = (off_t)((uint64_t)((unsigned long)mbox)
5854 5856 - (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
5855 5857
5856 5858 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
5857 5859 sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
5858 5860 word0 = *((volatile uint32_t *)mbox);
5859 5861 word0 = BE_SWAP32(word0);
5860 5862 } else {
5861 5863 mbox = FC_SLIM1_MAILBOX(hba);
5862 5864 word0 = READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox));
5863 5865 }
5864 5866
5865 5867 i = 0;
5866 5868 while (swpmb->mbxOwner == OWN_CHIP) {
5867 5869 if (i++ > 10000) {
5868 5870 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
5869 5871 "OWN_CHIP: %s: status=%x",
5870 5872 emlxs_mb_cmd_xlate(swpmb->mbxCommand),
5871 5873 swpmb->mbxStatus);
5872 5874
5873 5875 return (1);
5874 5876 }
5875 5877
5876 5878 /* Get first word of mailbox */
5877 5879 if (hba->flag & FC_SLIM2_MODE) {
5878 5880 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
5879 5881 offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
5880 5882 word0 = *((volatile uint32_t *)mbox);
5881 5883 word0 = BE_SWAP32(word0);
5882 5884 } else {
5883 5885 word0 =
5884 5886 READ_SLIM_ADDR(hba, ((volatile uint32_t *)mbox));
5885 5887 }
5886 5888 }
5887 5889
5888 5890 /* Now that we are the owner, DMA Sync entire mailbox if needed */
5889 5891 if (hba->flag & FC_SLIM2_MODE) {
5890 5892 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, offset,
5891 5893 MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORKERNEL);
5892 5894
5893 5895 BE_SWAP32_BCOPY((uint8_t *)mbox, (uint8_t *)mb,
5894 5896 MAILBOX_CMD_BSIZE);
5895 5897 } else {
5896 5898 READ_SLIM_COPY(hba, (uint32_t *)mb, (uint32_t *)mbox,
5897 5899 MAILBOX_CMD_WSIZE);
5898 5900 }
5899 5901
5900 5902 #ifdef MBOX_EXT_SUPPORT
5901 5903 if (mbq->extbuf) {
5902 5904 uint32_t *mbox_ext =
5903 5905 (uint32_t *)((uint8_t *)mbox + MBOX_EXTENSION_OFFSET);
5904 5906 off_t offset_ext = offset + MBOX_EXTENSION_OFFSET;
5905 5907
5906 5908 if (hba->flag & FC_SLIM2_MODE) {
5907 5909 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
5908 5910 offset_ext, mbq->extsize,
5909 5911 DDI_DMA_SYNC_FORKERNEL);
5910 5912 BE_SWAP32_BCOPY((uint8_t *)mbox_ext,
5911 5913 (uint8_t *)mbq->extbuf, mbq->extsize);
5912 5914 } else {
5913 5915 READ_SLIM_COPY(hba, (uint32_t *)mbq->extbuf,
5914 5916 mbox_ext, (mbq->extsize / 4));
5915 5917 }
5916 5918 }
5917 5919 #endif /* MBOX_EXT_SUPPORT */
5918 5920
5919 5921 #ifdef FMA_SUPPORT
5920 5922 if (!(hba->flag & FC_SLIM2_MODE)) {
5921 5923 /* Access handle validation */
5922 5924 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
5923 5925 }
5924 5926 #endif /* FMA_SUPPORT */
5925 5927
5926 5928 /* Now sync the memory buffer if one was used */
5927 5929 if (mbq->bp) {
5928 5930 mbox_bp = (MATCHMAP *)mbq->bp;
5929 5931 EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0, mbox_bp->size,
5930 5932 DDI_DMA_SYNC_FORKERNEL);
5931 5933 }
5932 5934
5933 5935 /* Mailbox has been completely received at this point */
5934 5936
5935 5937 if (mb->mbxCommand == MBX_HEARTBEAT) {
5936 5938 hba->heartbeat_active = 0;
5937 5939 goto done;
5938 5940 }
5939 5941
5940 5942 if (hba->mbox_queue_flag == MBX_SLEEP) {
5941 5943 if (swpmb->mbxCommand != MBX_DOWN_LOAD &&
5942 5944 swpmb->mbxCommand != MBX_DUMP_MEMORY) {
5943 5945 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5944 5946 "Received. %s: status=%x Sleep.",
5945 5947 emlxs_mb_cmd_xlate(swpmb->mbxCommand),
5946 5948 swpmb->mbxStatus);
5947 5949 }
5948 5950 } else {
5949 5951 if (swpmb->mbxCommand != MBX_DOWN_LOAD &&
5950 5952 swpmb->mbxCommand != MBX_DUMP_MEMORY) {
5951 5953 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5952 5954 "Completed. %s: status=%x",
5953 5955 emlxs_mb_cmd_xlate(swpmb->mbxCommand),
5954 5956 swpmb->mbxStatus);
5955 5957 }
5956 5958 }
5957 5959
5958 5960 /* Filter out passthru mailbox */
5959 5961 if (mbq->flag & MBQ_PASSTHRU) {
5960 5962 goto done;
5961 5963 }
5962 5964
5963 5965 if (mb->mbxStatus) {
5964 5966 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5965 5967 "%s: status=0x%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
5966 5968 (uint32_t)mb->mbxStatus);
5967 5969 }
5968 5970
5969 5971 if (mbq->mbox_cmpl) {
5970 5972 rc = (mbq->mbox_cmpl)(hba, mbq);
5971 5973 /* If mbox was retried, return immediately */
5972 5974 if (rc) {
5973 5975 return (0);
5974 5976 }
5975 5977 }
5976 5978
5977 5979 done:
5978 5980
5979 5981 /* Clean up the mailbox area */
5980 5982 emlxs_mb_fini(hba, mb, mb->mbxStatus);
5981 5983
5982 5984 mbq = (MAILBOXQ *)emlxs_mb_get(hba);
5983 5985 if (mbq) {
5984 5986 /* Attempt to send pending mailboxes */
5985 5987 rc = emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
5986 5988 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
5987 5989 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
5988 5990 }
5989 5991 }
5990 5992 return (0);
5991 5993
5992 5994 } /* emlxs_handle_mb_event() */
5993 5995
5994 5996
5995 5997 static void
5996 5998 emlxs_sli3_timer(emlxs_hba_t *hba)
5997 5999 {
5998 6000 /* Perform SLI3 level timer checks */
5999 6001
6000 6002 emlxs_sli3_timer_check_mbox(hba);
6001 6003
6002 6004 } /* emlxs_sli3_timer() */
6003 6005
6004 6006
6005 6007 static void
6006 6008 emlxs_sli3_timer_check_mbox(emlxs_hba_t *hba)
6007 6009 {
6008 6010 emlxs_port_t *port = &PPORT;
6009 6011 emlxs_config_t *cfg = &CFG;
6010 6012 MAILBOX *mb = NULL;
6011 6013 uint32_t word0;
6012 6014 uint32_t offset;
6013 6015 uint32_t ha_copy = 0;
6014 6016
6015 6017 if (!cfg[CFG_TIMEOUT_ENABLE].current) {
6016 6018 return;
6017 6019 }
6018 6020
6019 6021 mutex_enter(&EMLXS_PORT_LOCK);
6020 6022
6021 6023 /* Return if timer hasn't expired */
6022 6024 if (!hba->mbox_timer || (hba->timer_tics < hba->mbox_timer)) {
6023 6025 mutex_exit(&EMLXS_PORT_LOCK);
6024 6026 return;
6025 6027 }
6026 6028
6027 6029 /* Mailbox timed out, first check for error attention */
6028 6030 ha_copy = emlxs_check_attention(hba);
6029 6031
6030 6032 if (ha_copy & HA_ERATT) {
6031 6033 hba->mbox_timer = 0;
6032 6034 mutex_exit(&EMLXS_PORT_LOCK);
6033 6035 emlxs_handle_ff_error(hba);
6034 6036 return;
6035 6037 }
6036 6038
6037 6039 if (hba->mbox_queue_flag) {
6038 6040 /* Get first word of mailbox */
6039 6041 if (hba->flag & FC_SLIM2_MODE) {
6040 6042 mb = FC_SLIM2_MAILBOX(hba);
6041 6043 offset =
6042 6044 (off_t)((uint64_t)((unsigned long)mb) - (uint64_t)
6043 6045 ((unsigned long)hba->sli.sli3.slim2.virt));
6044 6046
6045 6047 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle,
6046 6048 offset, sizeof (uint32_t), DDI_DMA_SYNC_FORKERNEL);
6047 6049 word0 = *((volatile uint32_t *)mb);
6048 6050 word0 = BE_SWAP32(word0);
6049 6051 } else {
6050 6052 mb = FC_SLIM1_MAILBOX(hba);
6051 6053 word0 =
6052 6054 READ_SLIM_ADDR(hba, ((volatile uint32_t *)mb));
6053 6055 #ifdef FMA_SUPPORT
6054 6056 /* Access handle validation */
6055 6057 EMLXS_CHK_ACC_HANDLE(hba,
6056 6058 hba->sli.sli3.slim_acc_handle);
6057 6059 #endif /* FMA_SUPPORT */
6058 6060 }
6059 6061
6060 6062 mb = (MAILBOX *)&word0;
6061 6063
6062 6064 /* Check if mailbox has actually completed */
6063 6065 if (mb->mbxOwner == OWN_HOST) {
6064 6066 /* Read host attention register to determine */
6065 6067 /* interrupt source */
6066 6068 uint32_t ha_copy = emlxs_check_attention(hba);
6067 6069
6068 6070 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
6069 6071 "Mailbox attention missed: %s. Forcing event. "
6070 6072 "hc=%x ha=%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
6071 6073 hba->sli.sli3.hc_copy, ha_copy);
6072 6074
6073 6075 mutex_exit(&EMLXS_PORT_LOCK);
6074 6076
6075 6077 (void) emlxs_handle_mb_event(hba);
6076 6078
6077 6079 return;
6078 6080 }
6079 6081
6080 6082 /* The first to service the mbox queue will clear the timer */
6081 6083 /* We will service the mailbox here */
6082 6084 hba->mbox_timer = 0;
6083 6085
6084 6086 mutex_enter(&EMLXS_MBOX_LOCK);
6085 6087 mb = (MAILBOX *)hba->mbox_mbq;
6086 6088 mutex_exit(&EMLXS_MBOX_LOCK);
6087 6089 }
6088 6090
6089 6091 if (mb) {
6090 6092 switch (hba->mbox_queue_flag) {
6091 6093 case MBX_NOWAIT:
6092 6094 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6093 6095 "%s: Nowait.",
6094 6096 emlxs_mb_cmd_xlate(mb->mbxCommand));
6095 6097 break;
6096 6098
6097 6099 case MBX_SLEEP:
6098 6100 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6099 6101 "%s: mb=%p Sleep.",
6100 6102 emlxs_mb_cmd_xlate(mb->mbxCommand),
6101 6103 mb);
6102 6104 break;
6103 6105
6104 6106 case MBX_POLL:
6105 6107 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6106 6108 "%s: mb=%p Polled.",
6107 6109 emlxs_mb_cmd_xlate(mb->mbxCommand),
6108 6110 mb);
6109 6111 break;
6110 6112
6111 6113 default:
6112 6114 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
6113 6115 "%s: mb=%p (%d).",
6114 6116 emlxs_mb_cmd_xlate(mb->mbxCommand),
6115 6117 mb, hba->mbox_queue_flag);
6116 6118 break;
6117 6119 }
6118 6120 } else {
6119 6121 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg, NULL);
6120 6122 }
6121 6123
6122 6124 hba->flag |= FC_MBOX_TIMEOUT;
6123 6125 EMLXS_STATE_CHANGE_LOCKED(hba, FC_ERROR);
6124 6126
6125 6127 mutex_exit(&EMLXS_PORT_LOCK);
6126 6128
6127 6129 /* Perform mailbox cleanup */
6128 6130 /* This will wake any sleeping or polling threads */
6129 6131 emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
6130 6132
6131 6133 /* Trigger adapter shutdown */
6132 6134 emlxs_thread_spawn(hba, emlxs_shutdown_thread, NULL, NULL);
6133 6135
6134 6136 return;
6135 6137
6136 6138 } /* emlxs_sli3_timer_check_mbox() */
6137 6139
6138 6140
6139 6141 /*
6140 6142 * emlxs_mb_config_port Issue a CONFIG_PORT mailbox command
6141 6143 */
6142 6144 static uint32_t
6143 6145 emlxs_mb_config_port(emlxs_hba_t *hba, MAILBOXQ *mbq, uint32_t sli_mode,
6144 6146 uint32_t hbainit)
6145 6147 {
6146 6148 MAILBOX *mb = (MAILBOX *)mbq;
6147 6149 emlxs_vpd_t *vpd = &VPD;
6148 6150 emlxs_port_t *port = &PPORT;
6149 6151 emlxs_config_t *cfg;
6150 6152 RING *rp;
6151 6153 uint64_t pcb;
6152 6154 uint64_t mbx;
6153 6155 uint64_t hgp;
6154 6156 uint64_t pgp;
6155 6157 uint64_t rgp;
6156 6158 MAILBOX *mbox;
6157 6159 SLIM2 *slim;
6158 6160 SLI2_RDSC *rdsc;
6159 6161 uint64_t offset;
6160 6162 uint32_t Laddr;
6161 6163 uint32_t i;
6162 6164
6163 6165 cfg = &CFG;
6164 6166 bzero((void *)mb, MAILBOX_CMD_BSIZE);
6165 6167 mbox = NULL;
6166 6168 slim = NULL;
6167 6169
6168 6170 mb->mbxCommand = MBX_CONFIG_PORT;
6169 6171 mb->mbxOwner = OWN_HOST;
6170 6172 mbq->mbox_cmpl = NULL;
6171 6173
6172 6174 mb->un.varCfgPort.pcbLen = sizeof (PCB);
6173 6175 mb->un.varCfgPort.hbainit[0] = hbainit;
6174 6176
6175 6177 pcb = hba->sli.sli3.slim2.phys +
6176 6178 (uint64_t)((unsigned long)&(slim->pcb));
6177 6179 mb->un.varCfgPort.pcbLow = PADDR_LO(pcb);
6178 6180 mb->un.varCfgPort.pcbHigh = PADDR_HI(pcb);
6179 6181
6180 6182 /* Set Host pointers in SLIM flag */
6181 6183 mb->un.varCfgPort.hps = 1;
6182 6184
6183 6185 /* Initialize hba structure for assumed default SLI2 mode */
6184 6186 /* If config port succeeds, then we will update it then */
6185 6187 hba->sli_mode = sli_mode;
6186 6188 hba->vpi_max = 0;
6187 6189 hba->flag &= ~FC_NPIV_ENABLED;
6188 6190
6189 6191 if (sli_mode == EMLXS_HBA_SLI3_MODE) {
6190 6192 mb->un.varCfgPort.sli_mode = EMLXS_HBA_SLI3_MODE;
6191 6193 mb->un.varCfgPort.cerbm = 1;
6192 6194 mb->un.varCfgPort.max_hbq = EMLXS_NUM_HBQ;
6193 6195
6194 6196 if (cfg[CFG_NPIV_ENABLE].current) {
6195 6197 if (vpd->feaLevelHigh >= 0x09) {
6196 6198 if (hba->model_info.chip >= EMLXS_SATURN_CHIP) {
6197 6199 mb->un.varCfgPort.vpi_max =
6198 6200 MAX_VPORTS - 1;
6199 6201 } else {
6200 6202 mb->un.varCfgPort.vpi_max =
6201 6203 MAX_VPORTS_LIMITED - 1;
6202 6204 }
6203 6205
6204 6206 mb->un.varCfgPort.cmv = 1;
6205 6207 } else {
6206 6208 EMLXS_MSGF(EMLXS_CONTEXT,
6207 6209 &emlxs_init_debug_msg,
6208 6210 "CFGPORT: Firmware does not support NPIV. "
6209 6211 "level=%d", vpd->feaLevelHigh);
6210 6212 }
6211 6213
6212 6214 }
6213 6215 }
6214 6216
6215 6217 /*
6216 6218 * Now setup pcb
6217 6219 */
6218 6220 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.type = TYPE_NATIVE_SLI2;
6219 6221 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.feature = FEATURE_INITIAL_SLI2;
6220 6222 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.maxRing =
6221 6223 (hba->sli.sli3.ring_count - 1);
6222 6224 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mailBoxSize =
6223 6225 sizeof (MAILBOX) + MBOX_EXTENSION_SIZE;
6224 6226
6225 6227 mbx = hba->sli.sli3.slim2.phys +
6226 6228 (uint64_t)((unsigned long)&(slim->mbx));
6227 6229 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mbAddrHigh = PADDR_HI(mbx);
6228 6230 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.mbAddrLow = PADDR_LO(mbx);
6229 6231
6230 6232
6231 6233 /*
6232 6234 * Set up HGP - Port Memory
6233 6235 *
6234 6236 * CR0Put - SLI2(no HBQs) = 0xc0, With HBQs = 0x80
6235 6237 * RR0Get 0xc4 0x84
6236 6238 * CR1Put 0xc8 0x88
6237 6239 * RR1Get 0xcc 0x8c
6238 6240 * CR2Put 0xd0 0x90
6239 6241 * RR2Get 0xd4 0x94
6240 6242 * CR3Put 0xd8 0x98
6241 6243 * RR3Get 0xdc 0x9c
6242 6244 *
6243 6245 * Reserved 0xa0-0xbf
6244 6246 *
6245 6247 * If HBQs configured:
6246 6248 * HBQ 0 Put ptr 0xc0
6247 6249 * HBQ 1 Put ptr 0xc4
6248 6250 * HBQ 2 Put ptr 0xc8
6249 6251 * ...
6250 6252 * HBQ(M-1)Put Pointer 0xc0+(M-1)*4
6251 6253 */
6252 6254
6253 6255 if (sli_mode >= EMLXS_HBA_SLI3_MODE) {
6254 6256 /* ERBM is enabled */
6255 6257 hba->sli.sli3.hgp_ring_offset = 0x80;
6256 6258 hba->sli.sli3.hgp_hbq_offset = 0xC0;
6257 6259
6258 6260 hba->sli.sli3.iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
6259 6261 hba->sli.sli3.iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
6260 6262
6261 6263 } else { /* SLI2 */
6262 6264 /* ERBM is disabled */
6263 6265 hba->sli.sli3.hgp_ring_offset = 0xC0;
6264 6266 hba->sli.sli3.hgp_hbq_offset = 0;
6265 6267
6266 6268 hba->sli.sli3.iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
6267 6269 hba->sli.sli3.iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
6268 6270 }
6269 6271
6270 6272 /* The Sbus card uses Host Memory. The PCI card uses SLIM POINTER */
6271 6273 if (hba->bus_type == SBUS_FC) {
6272 6274 hgp = hba->sli.sli3.slim2.phys +
6273 6275 (uint64_t)((unsigned long)&(mbox->us.s2.host));
6274 6276 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrHigh =
6275 6277 PADDR_HI(hgp);
6276 6278 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrLow =
6277 6279 PADDR_LO(hgp);
6278 6280 } else {
6279 6281 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrHigh =
6280 6282 (uint32_t)ddi_get32(hba->pci_acc_handle,
6281 6283 (uint32_t *)(hba->pci_addr + PCI_BAR_1_REGISTER));
6282 6284
6283 6285 Laddr =
6284 6286 ddi_get32(hba->pci_acc_handle,
6285 6287 (uint32_t *)(hba->pci_addr + PCI_BAR_0_REGISTER));
6286 6288 Laddr &= ~0x4;
6287 6289 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.hgpAddrLow =
6288 6290 (uint32_t)(Laddr + hba->sli.sli3.hgp_ring_offset);
6289 6291
6290 6292 #ifdef FMA_SUPPORT
6291 6293 /* Access handle validation */
6292 6294 EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
6293 6295 #endif /* FMA_SUPPORT */
6294 6296
6295 6297 }
6296 6298
6297 6299 pgp = hba->sli.sli3.slim2.phys +
6298 6300 (uint64_t)((unsigned long)&(mbox->us.s2.port));
6299 6301 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.pgpAddrHigh =
6300 6302 PADDR_HI(pgp);
6301 6303 ((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.pgpAddrLow =
6302 6304 PADDR_LO(pgp);
6303 6305
6304 6306 offset = 0;
6305 6307 for (i = 0; i < 4; i++) {
6306 6308 rp = &hba->sli.sli3.ring[i];
6307 6309 rdsc = &((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb.rdsc[i];
6308 6310
6309 6311 /* Setup command ring */
6310 6312 rgp = hba->sli.sli3.slim2.phys +
6311 6313 (uint64_t)((unsigned long)&(slim->IOCBs[offset]));
6312 6314 rdsc->cmdAddrHigh = PADDR_HI(rgp);
6313 6315 rdsc->cmdAddrLow = PADDR_LO(rgp);
6314 6316 rdsc->cmdEntries = rp->fc_numCiocb;
6315 6317
6316 6318 rp->fc_cmdringaddr =
6317 6319 (void *)&((SLIM2 *)hba->sli.sli3.slim2.virt)->IOCBs[offset];
6318 6320 offset += rdsc->cmdEntries * hba->sli.sli3.iocb_cmd_size;
6319 6321
6320 6322 /* Setup response ring */
6321 6323 rgp = hba->sli.sli3.slim2.phys +
6322 6324 (uint64_t)((unsigned long)&(slim->IOCBs[offset]));
6323 6325 rdsc->rspAddrHigh = PADDR_HI(rgp);
6324 6326 rdsc->rspAddrLow = PADDR_LO(rgp);
6325 6327 rdsc->rspEntries = rp->fc_numRiocb;
6326 6328
6327 6329 rp->fc_rspringaddr =
6328 6330 (void *)&((SLIM2 *)hba->sli.sli3.slim2.virt)->IOCBs[offset];
6329 6331 offset += rdsc->rspEntries * hba->sli.sli3.iocb_rsp_size;
6330 6332 }
6331 6333
6332 6334 BE_SWAP32_BCOPY((uint8_t *)
6333 6335 (&((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb),
6334 6336 (uint8_t *)(&((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb),
6335 6337 sizeof (PCB));
6336 6338
6337 6339 offset = ((uint64_t)((unsigned long)
6338 6340 &(((SLIM2 *)hba->sli.sli3.slim2.virt)->pcb)) -
6339 6341 (uint64_t)((unsigned long)hba->sli.sli3.slim2.virt));
6340 6342 EMLXS_MPDATA_SYNC(hba->sli.sli3.slim2.dma_handle, (off_t)offset,
6341 6343 sizeof (PCB), DDI_DMA_SYNC_FORDEV);
6342 6344
6343 6345 return (0);
6344 6346
6345 6347 } /* emlxs_mb_config_port() */
6346 6348
6347 6349
6348 6350 static uint32_t
6349 6351 emlxs_hbq_setup(emlxs_hba_t *hba, uint32_t hbq_id)
6350 6352 {
6351 6353 emlxs_port_t *port = &PPORT;
6352 6354 HBQ_INIT_t *hbq;
6353 6355 MATCHMAP *mp;
6354 6356 HBQE_t *hbqE;
6355 6357 MAILBOX *mb;
6356 6358 MAILBOXQ *mbq;
6357 6359 void *ioa2;
6358 6360 uint32_t j;
6359 6361 uint32_t count;
6360 6362 uint32_t size;
6361 6363 uint32_t ringno;
6362 6364 uint32_t seg;
6363 6365
6364 6366 switch (hbq_id) {
6365 6367 case EMLXS_ELS_HBQ_ID:
6366 6368 count = MEM_ELSBUF_COUNT;
6367 6369 size = MEM_ELSBUF_SIZE;
6368 6370 ringno = FC_ELS_RING;
6369 6371 seg = MEM_ELSBUF;
6370 6372 HBASTATS.ElsUbPosted = count;
6371 6373 break;
6372 6374
6373 6375 case EMLXS_IP_HBQ_ID:
6374 6376 count = MEM_IPBUF_COUNT;
6375 6377 size = MEM_IPBUF_SIZE;
6376 6378 ringno = FC_IP_RING;
6377 6379 seg = MEM_IPBUF;
6378 6380 HBASTATS.IpUbPosted = count;
6379 6381 break;
6380 6382
6381 6383 case EMLXS_CT_HBQ_ID:
6382 6384 count = MEM_CTBUF_COUNT;
6383 6385 size = MEM_CTBUF_SIZE;
6384 6386 ringno = FC_CT_RING;
6385 6387 seg = MEM_CTBUF;
6386 6388 HBASTATS.CtUbPosted = count;
6387 6389 break;
6388 6390
6389 6391 #ifdef SFCT_SUPPORT
6390 6392 case EMLXS_FCT_HBQ_ID:
6391 6393 count = MEM_FCTBUF_COUNT;
6392 6394 size = MEM_FCTBUF_SIZE;
6393 6395 ringno = FC_FCT_RING;
6394 6396 seg = MEM_FCTBUF;
6395 6397 HBASTATS.FctUbPosted = count;
6396 6398 break;
6397 6399 #endif /* SFCT_SUPPORT */
6398 6400
6399 6401 default:
6400 6402 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6401 6403 "hbq_setup: Invalid HBQ id. (%x)", hbq_id);
6402 6404 return (1);
6403 6405 }
6404 6406
6405 6407 /* Configure HBQ */
6406 6408 hbq = &hba->sli.sli3.hbq_table[hbq_id];
6407 6409 hbq->HBQ_numEntries = count;
6408 6410
6409 6411 /* Get a Mailbox buffer to setup mailbox commands for CONFIG_HBQ */
6410 6412 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX)) == 0) {
6411 6413 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6412 6414 "hbq_setup: Unable to get mailbox.");
6413 6415 return (1);
6414 6416 }
6415 6417 mb = (MAILBOX *)mbq;
6416 6418
6417 6419 /* Allocate HBQ Host buffer and Initialize the HBQEs */
6418 6420 if (emlxs_hbq_alloc(hba, hbq_id)) {
6419 6421 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6420 6422 "hbq_setup: Unable to allocate HBQ.");
6421 6423 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
6422 6424 return (1);
6423 6425 }
6424 6426
6425 6427 hbq->HBQ_recvNotify = 1;
6426 6428 hbq->HBQ_num_mask = 0; /* Bind to ring */
6427 6429 hbq->HBQ_profile = 0; /* Selection profile */
6428 6430 /* 0=all, 7=logentry */
6429 6431 hbq->HBQ_ringMask = 1 << ringno; /* b0100 * ringno - Binds */
6430 6432 /* HBQ to a ring */
6431 6433 /* Ring0=b0001, Ring1=b0010, */
6432 6434 /* Ring2=b0100 */
6433 6435 hbq->HBQ_headerLen = 0; /* 0 if not profile 4 or 5 */
6434 6436 hbq->HBQ_logEntry = 0; /* Set to 1 if this HBQ will */
6435 6437 /* be used for */
6436 6438 hbq->HBQ_id = hbq_id;
6437 6439 hbq->HBQ_PutIdx_next = 0;
6438 6440 hbq->HBQ_PutIdx = hbq->HBQ_numEntries - 1;
6439 6441 hbq->HBQ_GetIdx = 0;
6440 6442 hbq->HBQ_PostBufCnt = hbq->HBQ_numEntries;
6441 6443 bzero(hbq->HBQ_PostBufs, sizeof (hbq->HBQ_PostBufs));
6442 6444
6443 6445 /* Fill in POST BUFFERs in HBQE */
6444 6446 hbqE = (HBQE_t *)hbq->HBQ_host_buf.virt;
6445 6447 for (j = 0; j < hbq->HBQ_numEntries; j++, hbqE++) {
6446 6448 /* Allocate buffer to post */
6447 6449 if ((mp = (MATCHMAP *)emlxs_mem_get(hba,
6448 6450 seg)) == 0) {
6449 6451 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
6450 6452 "hbq_setup: Unable to allocate HBQ buffer. "
6451 6453 "cnt=%d", j);
6452 6454 emlxs_hbq_free_all(hba, hbq_id);
6453 6455 return (1);
6454 6456 }
6455 6457
6456 6458 hbq->HBQ_PostBufs[j] = mp;
6457 6459
6458 6460 hbqE->unt.ext.HBQ_tag = hbq_id;
6459 6461 hbqE->unt.ext.HBQE_tag = j;
6460 6462 hbqE->bde.tus.f.bdeSize = size;
6461 6463 hbqE->bde.tus.f.bdeFlags = 0;
6462 6464 hbqE->unt.w = BE_SWAP32(hbqE->unt.w);
6463 6465 hbqE->bde.tus.w = BE_SWAP32(hbqE->bde.tus.w);
6464 6466 hbqE->bde.addrLow =
6465 6467 BE_SWAP32(PADDR_LO(mp->phys));
6466 6468 hbqE->bde.addrHigh =
6467 6469 BE_SWAP32(PADDR_HI(mp->phys));
6468 6470 }
6469 6471
6470 6472 /* Issue CONFIG_HBQ */
6471 6473 emlxs_mb_config_hbq(hba, mbq, hbq_id);
6472 6474 if (emlxs_sli3_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
6473 6475 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
6474 6476 "hbq_setup: Unable to config HBQ. cmd=%x status=%x",
6475 6477 mb->mbxCommand, mb->mbxStatus);
6476 6478
6477 6479 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
6478 6480 emlxs_hbq_free_all(hba, hbq_id);
6479 6481 return (1);
6480 6482 }
6481 6483
6482 6484 /* Setup HBQ Get/Put indexes */
6483 6485 ioa2 = (void *)((char *)hba->sli.sli3.slim_addr +
6484 6486 (hba->sli.sli3.hgp_hbq_offset + (hbq_id * sizeof (uint32_t))));
6485 6487 WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2, hbq->HBQ_PutIdx);
6486 6488
6487 6489 hba->sli.sli3.hbq_count++;
6488 6490
6489 6491 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
6490 6492
6491 6493 #ifdef FMA_SUPPORT
6492 6494 /* Access handle validation */
6493 6495 if (emlxs_fm_check_acc_handle(hba, hba->sli.sli3.slim_acc_handle)
6494 6496 != DDI_FM_OK) {
6495 6497 EMLXS_MSGF(EMLXS_CONTEXT,
6496 6498 &emlxs_invalid_access_handle_msg, NULL);
6497 6499 emlxs_hbq_free_all(hba, hbq_id);
6498 6500 return (1);
6499 6501 }
6500 6502 #endif /* FMA_SUPPORT */
6501 6503
6502 6504 return (0);
6503 6505
6504 6506 } /* emlxs_hbq_setup() */
6505 6507
6506 6508
6507 6509 extern void
6508 6510 emlxs_hbq_free_all(emlxs_hba_t *hba, uint32_t hbq_id)
6509 6511 {
6510 6512 HBQ_INIT_t *hbq;
6511 6513 MBUF_INFO *buf_info;
6512 6514 MBUF_INFO bufinfo;
6513 6515 uint32_t seg;
6514 6516 uint32_t j;
6515 6517
6516 6518 switch (hbq_id) {
6517 6519 case EMLXS_ELS_HBQ_ID:
6518 6520 seg = MEM_ELSBUF;
6519 6521 HBASTATS.ElsUbPosted = 0;
6520 6522 break;
6521 6523
6522 6524 case EMLXS_IP_HBQ_ID:
6523 6525 seg = MEM_IPBUF;
6524 6526 HBASTATS.IpUbPosted = 0;
6525 6527 break;
6526 6528
6527 6529 case EMLXS_CT_HBQ_ID:
6528 6530 seg = MEM_CTBUF;
6529 6531 HBASTATS.CtUbPosted = 0;
6530 6532 break;
6531 6533
6532 6534 #ifdef SFCT_SUPPORT
6533 6535 case EMLXS_FCT_HBQ_ID:
6534 6536 seg = MEM_FCTBUF;
6535 6537 HBASTATS.FctUbPosted = 0;
6536 6538 break;
6537 6539 #endif /* SFCT_SUPPORT */
6538 6540
6539 6541 default:
6540 6542 return;
6541 6543 }
6542 6544
6543 6545
6544 6546 hbq = &hba->sli.sli3.hbq_table[hbq_id];
6545 6547
6546 6548 if (hbq->HBQ_host_buf.virt != 0) {
6547 6549 for (j = 0; j < hbq->HBQ_PostBufCnt; j++) {
6548 6550 emlxs_mem_put(hba, seg,
6549 6551 (void *)hbq->HBQ_PostBufs[j]);
6550 6552 hbq->HBQ_PostBufs[j] = NULL;
6551 6553 }
6552 6554 hbq->HBQ_PostBufCnt = 0;
6553 6555
6554 6556 buf_info = &bufinfo;
6555 6557 bzero(buf_info, sizeof (MBUF_INFO));
6556 6558
6557 6559 buf_info->size = hbq->HBQ_host_buf.size;
6558 6560 buf_info->virt = hbq->HBQ_host_buf.virt;
6559 6561 buf_info->phys = hbq->HBQ_host_buf.phys;
6560 6562 buf_info->dma_handle = hbq->HBQ_host_buf.dma_handle;
6561 6563 buf_info->data_handle = hbq->HBQ_host_buf.data_handle;
6562 6564 buf_info->flags = FC_MBUF_DMA;
6563 6565
6564 6566 emlxs_mem_free(hba, buf_info);
6565 6567
6566 6568 hbq->HBQ_host_buf.virt = NULL;
6567 6569 }
6568 6570
6569 6571 return;
6570 6572
6571 6573 } /* emlxs_hbq_free_all() */
6572 6574
6573 6575
6574 6576 extern void
6575 6577 emlxs_update_HBQ_index(emlxs_hba_t *hba, uint32_t hbq_id)
6576 6578 {
6577 6579 #ifdef FMA_SUPPORT
6578 6580 emlxs_port_t *port = &PPORT;
6579 6581 #endif /* FMA_SUPPORT */
6580 6582 void *ioa2;
6581 6583 uint32_t status;
6582 6584 uint32_t HBQ_PortGetIdx;
6583 6585 HBQ_INIT_t *hbq;
6584 6586
6585 6587 switch (hbq_id) {
6586 6588 case EMLXS_ELS_HBQ_ID:
6587 6589 HBASTATS.ElsUbPosted++;
6588 6590 break;
6589 6591
6590 6592 case EMLXS_IP_HBQ_ID:
6591 6593 HBASTATS.IpUbPosted++;
6592 6594 break;
6593 6595
6594 6596 case EMLXS_CT_HBQ_ID:
6595 6597 HBASTATS.CtUbPosted++;
6596 6598 break;
6597 6599
6598 6600 #ifdef SFCT_SUPPORT
6599 6601 case EMLXS_FCT_HBQ_ID:
6600 6602 HBASTATS.FctUbPosted++;
6601 6603 break;
6602 6604 #endif /* SFCT_SUPPORT */
6603 6605
6604 6606 default:
6605 6607 return;
6606 6608 }
6607 6609
6608 6610 hbq = &hba->sli.sli3.hbq_table[hbq_id];
6609 6611
6610 6612 hbq->HBQ_PutIdx =
6611 6613 (hbq->HBQ_PutIdx + 1 >=
6612 6614 hbq->HBQ_numEntries) ? 0 : hbq->HBQ_PutIdx + 1;
6613 6615
6614 6616 if (hbq->HBQ_PutIdx == hbq->HBQ_GetIdx) {
6615 6617 HBQ_PortGetIdx =
6616 6618 BE_SWAP32(((SLIM2 *)hba->sli.sli3.slim2.virt)->mbx.us.s2.
6617 6619 HBQ_PortGetIdx[hbq_id]);
6618 6620
6619 6621 hbq->HBQ_GetIdx = HBQ_PortGetIdx;
6620 6622
6621 6623 if (hbq->HBQ_PutIdx == hbq->HBQ_GetIdx) {
6622 6624 return;
6623 6625 }
6624 6626 }
6625 6627
6626 6628 ioa2 = (void *)((char *)hba->sli.sli3.slim_addr +
6627 6629 (hba->sli.sli3.hgp_hbq_offset + (hbq_id * sizeof (uint32_t))));
6628 6630 status = hbq->HBQ_PutIdx;
6629 6631 WRITE_SLIM_ADDR(hba, (volatile uint32_t *)ioa2, status);
6630 6632
6631 6633 #ifdef FMA_SUPPORT
6632 6634 /* Access handle validation */
6633 6635 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.slim_acc_handle);
6634 6636 #endif /* FMA_SUPPORT */
6635 6637
6636 6638 return;
6637 6639
6638 6640 } /* emlxs_update_HBQ_index() */
6639 6641
6640 6642
6641 6643 static void
6642 6644 emlxs_sli3_enable_intr(emlxs_hba_t *hba)
6643 6645 {
6644 6646 #ifdef FMA_SUPPORT
6645 6647 emlxs_port_t *port = &PPORT;
6646 6648 #endif /* FMA_SUPPORT */
6647 6649 uint32_t status;
6648 6650
6649 6651 /* Enable mailbox, error attention interrupts */
6650 6652 status = (uint32_t)(HC_MBINT_ENA);
6651 6653
6652 6654 /* Enable ring interrupts */
6653 6655 if (hba->sli.sli3.ring_count >= 4) {
6654 6656 status |=
6655 6657 (HC_R3INT_ENA | HC_R2INT_ENA | HC_R1INT_ENA |
6656 6658 HC_R0INT_ENA);
6657 6659 } else if (hba->sli.sli3.ring_count == 3) {
6658 6660 status |= (HC_R2INT_ENA | HC_R1INT_ENA | HC_R0INT_ENA);
6659 6661 } else if (hba->sli.sli3.ring_count == 2) {
6660 6662 status |= (HC_R1INT_ENA | HC_R0INT_ENA);
6661 6663 } else if (hba->sli.sli3.ring_count == 1) {
6662 6664 status |= (HC_R0INT_ENA);
6663 6665 }
6664 6666
6665 6667 hba->sli.sli3.hc_copy = status;
6666 6668 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
6667 6669
6668 6670 #ifdef FMA_SUPPORT
6669 6671 /* Access handle validation */
6670 6672 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6671 6673 #endif /* FMA_SUPPORT */
6672 6674
6673 6675 } /* emlxs_sli3_enable_intr() */
6674 6676
6675 6677
6676 6678 static void
6677 6679 emlxs_enable_latt(emlxs_hba_t *hba)
6678 6680 {
6679 6681 #ifdef FMA_SUPPORT
6680 6682 emlxs_port_t *port = &PPORT;
6681 6683 #endif /* FMA_SUPPORT */
6682 6684
6683 6685 mutex_enter(&EMLXS_PORT_LOCK);
6684 6686 hba->sli.sli3.hc_copy |= HC_LAINT_ENA;
6685 6687 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
6686 6688 #ifdef FMA_SUPPORT
6687 6689 /* Access handle validation */
6688 6690 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6689 6691 #endif /* FMA_SUPPORT */
6690 6692 mutex_exit(&EMLXS_PORT_LOCK);
6691 6693
6692 6694 } /* emlxs_enable_latt() */
6693 6695
6694 6696
6695 6697 static void
6696 6698 emlxs_sli3_disable_intr(emlxs_hba_t *hba, uint32_t att)
6697 6699 {
6698 6700 #ifdef FMA_SUPPORT
6699 6701 emlxs_port_t *port = &PPORT;
6700 6702 #endif /* FMA_SUPPORT */
6701 6703
6702 6704 /* Disable all adapter interrupts */
6703 6705 hba->sli.sli3.hc_copy = att;
6704 6706 WRITE_CSR_REG(hba, FC_HC_REG(hba), hba->sli.sli3.hc_copy);
6705 6707 #ifdef FMA_SUPPORT
6706 6708 /* Access handle validation */
6707 6709 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6708 6710 #endif /* FMA_SUPPORT */
6709 6711
6710 6712 } /* emlxs_sli3_disable_intr() */
6711 6713
6712 6714
6713 6715 static uint32_t
6714 6716 emlxs_check_attention(emlxs_hba_t *hba)
6715 6717 {
6716 6718 #ifdef FMA_SUPPORT
6717 6719 emlxs_port_t *port = &PPORT;
6718 6720 #endif /* FMA_SUPPORT */
6719 6721 uint32_t ha_copy;
6720 6722
6721 6723 ha_copy = READ_CSR_REG(hba, FC_HA_REG(hba));
6722 6724 #ifdef FMA_SUPPORT
6723 6725 /* Access handle validation */
6724 6726 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.csr_acc_handle);
6725 6727 #endif /* FMA_SUPPORT */
6726 6728 return (ha_copy);
6727 6729
6728 6730 } /* emlxs_check_attention() */
6729 6731
6730 6732
6731 6733 static void
6732 6734 emlxs_sli3_poll_erratt(emlxs_hba_t *hba)
6733 6735 {
6734 6736 uint32_t ha_copy;
6735 6737
6736 6738 ha_copy = emlxs_check_attention(hba);
6737 6739
6738 6740 /* Adapter error */
6739 6741 if (ha_copy & HA_ERATT) {
6740 6742 HBASTATS.IntrEvent[6]++;
6741 6743 emlxs_handle_ff_error(hba);
6742 6744 }
6743 6745
6744 6746 } /* emlxs_sli3_poll_erratt() */
6745 6747
6746 6748
6747 6749 static uint32_t
6748 6750 emlxs_sli3_reg_did_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
6749 6751 {
6750 6752 emlxs_port_t *port = (emlxs_port_t *)mbq->port;
6751 6753 MAILBOXQ *mboxq;
6752 6754 MAILBOX *mb;
6753 6755 MATCHMAP *mp;
6754 6756 NODELIST *ndlp;
6755 6757 emlxs_port_t *vport;
6756 6758 SERV_PARM *sp;
6757 6759 int32_t i;
6758 6760 uint32_t control;
6759 6761 uint32_t ldata;
6760 6762 uint32_t ldid;
6761 6763 uint16_t lrpi;
6762 6764 uint16_t lvpi;
6763 6765 uint32_t rval;
6764 6766
6765 6767 mb = (MAILBOX *)mbq;
6766 6768
6767 6769 if (mb->mbxStatus) {
6768 6770 if (mb->mbxStatus == MBXERR_NO_RESOURCES) {
6769 6771 control = mb->un.varRegLogin.un.sp.bdeSize;
6770 6772 if (control == 0) {
6771 6773 /* Special handle for vport PLOGI */
6772 6774 if (mbq->iocbq == (uint8_t *)1) {
6773 6775 mbq->iocbq = NULL;
6774 6776 }
6775 6777 return (0);
6776 6778 }
6777 6779 emlxs_mb_retry(hba, mbq);
6778 6780 return (1);
6779 6781 }
6780 6782 if (mb->mbxStatus == MBXERR_RPI_FULL) {
6781 6783 EMLXS_MSGF(EMLXS_CONTEXT,
6782 6784 &emlxs_node_create_failed_msg,
6783 6785 "Limit reached. count=%d", port->node_count);
6784 6786 }
6785 6787
6786 6788 /* Special handle for vport PLOGI */
6787 6789 if (mbq->iocbq == (uint8_t *)1) {
6788 6790 mbq->iocbq = NULL;
6789 6791 }
6790 6792
6791 6793 return (0);
6792 6794 }
6793 6795
6794 6796 mp = (MATCHMAP *)mbq->bp;
6795 6797 if (!mp) {
6796 6798 return (0);
6797 6799 }
6798 6800
6799 6801 ldata = mb->un.varWords[5];
6800 6802 lvpi = (ldata & 0xffff);
6801 6803 port = &VPORT(lvpi);
6802 6804
6803 6805 /* First copy command data */
6804 6806 ldata = mb->un.varWords[0]; /* get rpi */
6805 6807 lrpi = ldata & 0xffff;
6806 6808
6807 6809 ldata = mb->un.varWords[1]; /* get did */
6808 6810 ldid = ldata & MASK_DID;
6809 6811
6810 6812 sp = (SERV_PARM *)mp->virt;
6811 6813
6812 6814 /* Create or update the node */
6813 6815 ndlp = emlxs_node_create(port, ldid, lrpi, sp);
6814 6816
6815 6817 if (ndlp == NULL) {
6816 6818 emlxs_ub_priv_t *ub_priv;
6817 6819
6818 6820 /*
6819 6821 * Fake a mailbox error, so the mbox_fini
6820 6822 * can take appropriate action
6821 6823 */
6822 6824 mb->mbxStatus = MBXERR_RPI_FULL;
6823 6825 if (mbq->ubp) {
6824 6826 ub_priv = ((fc_unsol_buf_t *)mbq->ubp)->ub_fca_private;
6825 6827 ub_priv->flags |= EMLXS_UB_REPLY;
6826 6828 }
6827 6829
6828 6830 /* This must be (0xFFFFFE) which was registered by vport */
6829 6831 if (lrpi == 0) {
6830 6832 return (0);
6831 6833 }
6832 6834
6833 6835 if (!(mboxq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) {
6834 6836 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6835 6837 "reg_did_mbcmpl:failed. Unable to allocate mbox");
6836 6838 return (0);
6837 6839 }
6838 6840
6839 6841 mb = (MAILBOX *)mboxq->mbox;
6840 6842 mb->un.varUnregLogin.rpi = lrpi;
6841 6843 mb->un.varUnregLogin.vpi = lvpi;
6842 6844
6843 6845 mb->mbxCommand = MBX_UNREG_LOGIN;
6844 6846 mb->mbxOwner = OWN_HOST;
6845 6847 mboxq->sbp = NULL;
6846 6848 mboxq->ubp = NULL;
6847 6849 mboxq->iocbq = NULL;
6848 6850 mboxq->mbox_cmpl = NULL;
6849 6851 mboxq->context = NULL;
6850 6852 mboxq->port = (void *)port;
6851 6853
6852 6854 rval = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mboxq, MBX_NOWAIT, 0);
6853 6855 if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) {
6854 6856 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6855 6857 "reg_did_mbcmpl:failed. Unable to send request.");
6856 6858
6857 6859 emlxs_mem_put(hba, MEM_MBOX, (void *)mboxq);
6858 6860 return (0);
6859 6861 }
6860 6862
6861 6863 return (0);
6862 6864 }
6863 6865
6864 6866 if (ndlp->nlp_DID == FABRIC_DID) {
6865 6867 /* FLOGI/FDISC successfully completed on this port */
6866 6868 mutex_enter(&EMLXS_PORT_LOCK);
6867 6869 port->flag |= EMLXS_PORT_FLOGI_CMPL;
6868 6870 mutex_exit(&EMLXS_PORT_LOCK);
6869 6871
6870 6872 /* If CLEAR_LA has been sent, then attempt to */
6871 6873 /* register the vpi now */
6872 6874 if (hba->state == FC_READY) {
6873 6875 (void) emlxs_mb_reg_vpi(port, NULL);
6874 6876 }
6875 6877
6876 6878 /*
6877 6879 * If NPIV Fabric support has just been established on
6878 6880 * the physical port, then notify the vports of the
6879 6881 * link up
6880 6882 */
6881 6883 if ((lvpi == 0) &&
6882 6884 (hba->flag & FC_NPIV_ENABLED) &&
6883 6885 (hba->flag & FC_NPIV_SUPPORTED)) {
6884 6886 /* Skip the physical port */
6885 6887 for (i = 1; i < MAX_VPORTS; i++) {
6886 6888 vport = &VPORT(i);
6887 6889
6888 6890 if (!(vport->flag & EMLXS_PORT_BOUND) ||
6889 6891 !(vport->flag &
6890 6892 EMLXS_PORT_ENABLED)) {
6891 6893 continue;
6892 6894 }
6893 6895
6894 6896 emlxs_port_online(vport);
6895 6897 }
6896 6898 }
6897 6899 }
6898 6900
6899 6901 /* Check for special restricted login flag */
6900 6902 if (mbq->iocbq == (uint8_t *)1) {
6901 6903 mbq->iocbq = NULL;
6902 6904 (void) EMLXS_SLI_UNREG_NODE(port, ndlp, NULL, NULL, NULL);
6903 6905 return (0);
6904 6906 }
6905 6907
6906 6908 /* Needed for FCT trigger in emlxs_mb_deferred_cmpl */
6907 6909 if (mbq->sbp) {
6908 6910 ((emlxs_buf_t *)mbq->sbp)->node = ndlp;
6909 6911 }
6910 6912
6911 6913 #ifdef DHCHAP_SUPPORT
6912 6914 if (mbq->sbp || mbq->ubp) {
6913 6915 if (emlxs_dhc_auth_start(port, ndlp, mbq->sbp,
6914 6916 mbq->ubp) == 0) {
6915 6917 /* Auth started - auth completion will */
6916 6918 /* handle sbp and ubp now */
6917 6919 mbq->sbp = NULL;
6918 6920 mbq->ubp = NULL;
6919 6921 }
6920 6922 }
6921 6923 #endif /* DHCHAP_SUPPORT */
6922 6924
6923 6925 return (0);
6924 6926
6925 6927 } /* emlxs_sli3_reg_did_mbcmpl() */
6926 6928
6927 6929
6928 6930 static uint32_t
6929 6931 emlxs_sli3_reg_did(emlxs_port_t *port, uint32_t did, SERV_PARM *param,
6930 6932 emlxs_buf_t *sbp, fc_unsol_buf_t *ubp, IOCBQ *iocbq)
6931 6933 {
6932 6934 emlxs_hba_t *hba = HBA;
6933 6935 MATCHMAP *mp;
6934 6936 MAILBOXQ *mbq;
6935 6937 MAILBOX *mb;
6936 6938 uint32_t rval;
6937 6939
6938 6940 /* Check for invalid node ids to register */
6939 6941 if ((did == 0) && (!(hba->flag & FC_LOOPBACK_MODE))) {
6940 6942 return (1);
6941 6943 }
6942 6944
6943 6945 if (did & 0xff000000) {
6944 6946 return (1);
6945 6947 }
6946 6948
6947 6949 if ((rval = emlxs_mb_check_sparm(hba, param))) {
6948 6950 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
6949 6951 "Invalid service parameters. did=%06x rval=%d", did,
6950 6952 rval);
6951 6953
6952 6954 return (1);
6953 6955 }
6954 6956
6955 6957 /* Check if the node limit has been reached */
6956 6958 if (port->node_count >= hba->max_nodes) {
6957 6959 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
6958 6960 "Limit reached. did=%06x count=%d", did,
6959 6961 port->node_count);
6960 6962
6961 6963 return (1);
6962 6964 }
6963 6965
6964 6966 if (!(mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) {
6965 6967 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
6966 6968 "Unable to allocate mailbox. did=%x", did);
6967 6969
6968 6970 return (1);
6969 6971 }
6970 6972 mb = (MAILBOX *)mbq->mbox;
6971 6973 bzero((void *)mb, MAILBOX_CMD_BSIZE);
6972 6974
6973 6975 /* Build login request */
6974 6976 if ((mp = (MATCHMAP *)emlxs_mem_get(hba, MEM_BUF)) == 0) {
6975 6977 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
6976 6978
6977 6979 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
6978 6980 "Unable to allocate buffer. did=%x", did);
6979 6981 return (1);
6980 6982 }
6981 6983 bcopy((void *)param, (void *)mp->virt, sizeof (SERV_PARM));
6982 6984
6983 6985 mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (SERV_PARM);
6984 6986 mb->un.varRegLogin.un.sp64.addrHigh = PADDR_HI(mp->phys);
6985 6987 mb->un.varRegLogin.un.sp64.addrLow = PADDR_LO(mp->phys);
6986 6988 mb->un.varRegLogin.did = did;
6987 6989 mb->un.varWords[30] = 0; /* flags */
6988 6990 mb->mbxCommand = MBX_REG_LOGIN64;
6989 6991 mb->mbxOwner = OWN_HOST;
6990 6992 mb->un.varRegLogin.vpi = port->vpi;
6991 6993 mb->un.varRegLogin.rpi = 0;
6992 6994
6993 6995 mbq->sbp = (void *)sbp;
6994 6996 mbq->ubp = (void *)ubp;
6995 6997 mbq->iocbq = (void *)iocbq;
6996 6998 mbq->bp = (void *)mp;
6997 6999 mbq->mbox_cmpl = emlxs_sli3_reg_did_mbcmpl;
6998 7000 mbq->context = NULL;
6999 7001 mbq->port = (void *)port;
7000 7002
7001 7003 rval = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_NOWAIT, 0);
7002 7004 if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) {
7003 7005 emlxs_mem_put(hba, MEM_BUF, (void *)mp);
7004 7006 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
7005 7007
7006 7008 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
7007 7009 "Unable to send mbox. did=%x", did);
7008 7010 return (1);
7009 7011 }
7010 7012
7011 7013 return (0);
7012 7014
7013 7015 } /* emlxs_sli3_reg_did() */
7014 7016
7015 7017
7016 7018 /*ARGSUSED*/
7017 7019 static uint32_t
7018 7020 emlxs_sli3_unreg_node_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
7019 7021 {
7020 7022 emlxs_port_t *port = (emlxs_port_t *)mbq->port;
7021 7023 MAILBOX *mb;
7022 7024 NODELIST *node;
7023 7025 uint16_t rpi;
7024 7026
7025 7027 node = (NODELIST *)mbq->context;
7026 7028 mb = (MAILBOX *)mbq;
7027 7029 rpi = (node)? node->nlp_Rpi:0xffff;
7028 7030
7029 7031 if (mb->mbxStatus) {
7030 7032 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7031 7033 "unreg_node_mbcmpl:failed. node=%p rpi=%d status=%x",
7032 7034 node, rpi, mb->mbxStatus);
7033 7035
7034 7036 return (0);
7035 7037 }
7036 7038
7037 7039 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7038 7040 "unreg_node_mbcmpl: node=%p rpi=%d",
7039 7041 node, rpi);
7040 7042
7041 7043 if (node) {
7042 7044 emlxs_node_rm(port, node);
7043 7045
7044 7046 } else { /* All nodes */
7045 7047 emlxs_node_destroy_all(port);
7046 7048 }
7047 7049
7048 7050 return (0);
7049 7051
7050 7052 } /* emlxs_sli3_unreg_node_mbcmpl */
7051 7053
7052 7054
7053 7055 static uint32_t
7054 7056 emlxs_sli3_unreg_node(emlxs_port_t *port, NODELIST *node, emlxs_buf_t *sbp,
7055 7057 fc_unsol_buf_t *ubp, IOCBQ *iocbq)
7056 7058 {
7057 7059 emlxs_hba_t *hba = HBA;
7058 7060 MAILBOXQ *mbq;
7059 7061 MAILBOX *mb;
7060 7062 uint16_t rpi;
7061 7063 uint32_t rval;
7062 7064
7063 7065 if (node) {
7064 7066 /* Check for base node */
7065 7067 if (node == &port->node_base) {
7066 7068 /* just flush base node */
7067 7069 (void) emlxs_tx_node_flush(port, &port->node_base,
7068 7070 0, 0, 0);
7069 7071 (void) emlxs_chipq_node_flush(port, 0,
7070 7072 &port->node_base, 0);
7071 7073
7072 7074 port->did = 0;
7073 7075
7074 7076 /* Return now */
7075 7077 return (1);
7076 7078 }
7077 7079
7078 7080 rpi = (uint16_t)node->nlp_Rpi;
7079 7081
7080 7082 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7081 7083 "unreg_node:%p rpi=%d", node, rpi);
7082 7084
7083 7085 /* This node must be (0xFFFFFE) which registered by vport */
7084 7086 if (rpi == 0) {
7085 7087 emlxs_node_rm(port, node);
7086 7088 return (0);
7087 7089 }
7088 7090
7089 7091 } else { /* Unreg all nodes */
7090 7092 rpi = 0xffff;
7091 7093
7092 7094 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7093 7095 "unreg_node: All");
7094 7096 }
7095 7097
7096 7098 if (!(mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) {
7097 7099 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7098 7100 "unreg_node:failed. Unable to allocate mbox");
7099 7101 return (1);
7100 7102 }
7101 7103
7102 7104 mb = (MAILBOX *)mbq->mbox;
7103 7105 mb->un.varUnregLogin.rpi = rpi;
7104 7106 mb->un.varUnregLogin.vpi = port->vpip->VPI;
7105 7107
7106 7108 mb->mbxCommand = MBX_UNREG_LOGIN;
7107 7109 mb->mbxOwner = OWN_HOST;
7108 7110 mbq->sbp = (void *)sbp;
7109 7111 mbq->ubp = (void *)ubp;
7110 7112 mbq->iocbq = (void *)iocbq;
7111 7113 mbq->mbox_cmpl = emlxs_sli3_unreg_node_mbcmpl;
7112 7114 mbq->context = (void *)node;
7113 7115 mbq->port = (void *)port;
7114 7116
7115 7117 rval = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_NOWAIT, 0);
7116 7118 if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) {
7117 7119 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7118 7120 "unreg_node:failed. Unable to send request.");
7119 7121
7120 7122 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
7121 7123 return (1);
7122 7124 }
7123 7125
7124 7126 return (0);
7125 7127
7126 7128 } /* emlxs_sli3_unreg_node() */
|
↓ open down ↓ |
5826 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX