1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at
9 * http://www.opensource.org/licenses/cddl1.txt.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2004-2012 Emulex. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include <emlxs.h>
28
29 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
30 EMLXS_MSG_DEF(EMLXS_FCP_C);
31
32 #define EMLXS_GET_VADDR(hba, rp, icmd) emlxs_mem_get_vaddr(hba, rp, \
33 PADDR(icmd->un.cont64[i].addrHigh, icmd->un.cont64[i].addrLow));
34
35 static void emlxs_sbp_abort_add(emlxs_port_t *port, emlxs_buf_t *sbp,
36 Q *abort, uint8_t *flag, emlxs_buf_t *fpkt);
37
38 #define SCSI3_PERSISTENT_RESERVE_IN 0x5e
39 #define SCSI_INQUIRY 0x12
40 #define SCSI_RX_DIAG 0x1C
41
42
43 /*
44 * emlxs_handle_fcp_event
45 *
46 * Description: Process an FCP Rsp Ring completion
47 *
48 */
49 /* ARGSUSED */
50 extern void
51 emlxs_handle_fcp_event(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
52 {
53 emlxs_port_t *port = &PPORT;
54 emlxs_config_t *cfg = &CFG;
55 IOCB *cmd;
56 emlxs_buf_t *sbp;
57 fc_packet_t *pkt = NULL;
58 #ifdef SAN_DIAG_SUPPORT
59 NODELIST *ndlp;
60 #endif
61 uint32_t iostat;
62 uint8_t localstat;
63 fcp_rsp_t *rsp;
64 uint32_t rsp_data_resid;
65 uint32_t check_underrun;
66 uint8_t asc;
67 uint8_t ascq;
68 uint8_t scsi_status;
69 uint8_t sense;
70 uint32_t did;
71 uint32_t fix_it;
72 uint8_t *scsi_cmd;
73 uint8_t scsi_opcode;
74 uint16_t scsi_dl;
75 uint32_t data_rx;
76 uint32_t length;
77
78 cmd = &iocbq->iocb;
79
80 /* Initialize the status */
81 iostat = cmd->ULPSTATUS;
82 localstat = 0;
83 scsi_status = 0;
84 asc = 0;
85 ascq = 0;
86 sense = 0;
87 check_underrun = 0;
88 fix_it = 0;
89
90 HBASTATS.FcpEvent++;
91
92 sbp = (emlxs_buf_t *)iocbq->sbp;
93
94 if (!sbp) {
95 /* completion with missing xmit command */
96 HBASTATS.FcpStray++;
97
98 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_fcp_completion_msg,
99 "cmd=%x iotag=%d", cmd->ULPCOMMAND, cmd->ULPIOTAG);
100
101 return;
102 }
103
104 HBASTATS.FcpCompleted++;
105
106 #ifdef SAN_DIAG_SUPPORT
107 emlxs_update_sd_bucket(sbp);
108 #endif /* SAN_DIAG_SUPPORT */
109
110 pkt = PRIV2PKT(sbp);
111
112 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
113 scsi_cmd = (uint8_t *)pkt->pkt_cmd;
114 scsi_opcode = scsi_cmd[12];
115 data_rx = 0;
116
117 /* Sync data in data buffer only on FC_PKT_FCP_READ */
118 if (pkt->pkt_datalen && (pkt->pkt_tran_type == FC_PKT_FCP_READ)) {
119 EMLXS_MPDATA_SYNC(pkt->pkt_data_dma, 0, pkt->pkt_datalen,
120 DDI_DMA_SYNC_FORKERNEL);
121
122 #ifdef TEST_SUPPORT
123 if (hba->underrun_counter && (iostat == IOSTAT_SUCCESS) &&
124 (pkt->pkt_datalen >= 512)) {
125 hba->underrun_counter--;
126 iostat = IOSTAT_FCP_RSP_ERROR;
127
128 /* Report 512 bytes missing by adapter */
129 cmd->un.fcpi.fcpi_parm = pkt->pkt_datalen - 512;
130
131 /* Corrupt 512 bytes of Data buffer */
132 bzero((uint8_t *)pkt->pkt_data, 512);
133
134 /* Set FCP response to STATUS_GOOD */
135 bzero((uint8_t *)pkt->pkt_resp, pkt->pkt_rsplen);
136 }
137 #endif /* TEST_SUPPORT */
138 }
139
140 /* Process the pkt */
141 mutex_enter(&sbp->mtx);
142
143 /* Check for immediate return */
144 if ((iostat == IOSTAT_SUCCESS) &&
145 (pkt->pkt_comp) &&
146 !(sbp->pkt_flags &
147 (PACKET_ULP_OWNED | PACKET_COMPLETED |
148 PACKET_IN_COMPLETION | PACKET_IN_TXQ | PACKET_IN_CHIPQ |
149 PACKET_IN_DONEQ | PACKET_IN_TIMEOUT | PACKET_IN_FLUSH |
150 PACKET_IN_ABORT | PACKET_POLLED))) {
151 HBASTATS.FcpGood++;
152
153 sbp->pkt_flags |=
154 (PACKET_STATE_VALID | PACKET_IN_COMPLETION |
155 PACKET_COMPLETED | PACKET_ULP_OWNED);
156 mutex_exit(&sbp->mtx);
157
158 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
159 emlxs_unswap_pkt(sbp);
160 #endif /* EMLXS_MODREV2X */
161
162 #ifdef FMA_SUPPORT
163 emlxs_check_dma(hba, sbp);
164 #endif /* FMA_SUPPORT */
165
166 cp->ulpCmplCmd++;
167 (*pkt->pkt_comp) (pkt);
168
169 #ifdef FMA_SUPPORT
170 if (hba->flag & FC_DMA_CHECK_ERROR) {
171 emlxs_thread_spawn(hba, emlxs_restart_thread,
172 NULL, NULL);
173 }
174 #endif /* FMA_SUPPORT */
175
176 return;
177 }
178
179 /*
180 * A response is only placed in the resp buffer if IOSTAT_FCP_RSP_ERROR
181 * is reported.
182 */
183
184 /* Check if a response buffer was not provided */
185 if ((iostat != IOSTAT_FCP_RSP_ERROR) || (pkt->pkt_rsplen == 0)) {
186 goto done;
187 }
188
189 EMLXS_MPDATA_SYNC(pkt->pkt_resp_dma, 0, pkt->pkt_rsplen,
190 DDI_DMA_SYNC_FORKERNEL);
191
192 /* Get the response buffer pointer */
193 rsp = (fcp_rsp_t *)pkt->pkt_resp;
194
195 /* Validate the response payload */
196 if (!rsp->fcp_u.fcp_status.resid_under &&
197 !rsp->fcp_u.fcp_status.resid_over) {
198 rsp->fcp_resid = 0;
199 }
200
201 if (!rsp->fcp_u.fcp_status.rsp_len_set) {
202 rsp->fcp_response_len = 0;
203 }
204
205 if (!rsp->fcp_u.fcp_status.sense_len_set) {
206 rsp->fcp_sense_len = 0;
207 }
208
209 length = sizeof (fcp_rsp_t) + LE_SWAP32(rsp->fcp_response_len) +
210 LE_SWAP32(rsp->fcp_sense_len);
211
212 if (length > pkt->pkt_rsplen) {
213 iostat = IOSTAT_RSP_INVALID;
214 pkt->pkt_data_resid = pkt->pkt_datalen;
215 goto done;
216 }
217
218 /* Set the valid response flag */
219 sbp->pkt_flags |= PACKET_FCP_RSP_VALID;
220
221 scsi_status = rsp->fcp_u.fcp_status.scsi_status;
222
223 #ifdef SAN_DIAG_SUPPORT
224 ndlp = (NODELIST *)iocbq->node;
225 if (scsi_status == SCSI_STAT_QUE_FULL) {
226 emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_QFULL,
227 (HBA_WWN *)&ndlp->nlp_portname, sbp->lun);
228 } else if (scsi_status == SCSI_STAT_BUSY) {
229 emlxs_log_sd_scsi_event(port,
230 SD_SCSI_SUBCATEGORY_DEVBSY,
231 (HBA_WWN *)&ndlp->nlp_portname, sbp->lun);
232 }
233 #endif
234
235 /*
236 * Convert a task abort to a check condition with no data
237 * transferred. We saw a data corruption when Solaris received
238 * a Task Abort from a tape.
239 */
240
241 if (scsi_status == SCSI_STAT_TASK_ABORT) {
242 EMLXS_MSGF(EMLXS_CONTEXT,
243 &emlxs_fcp_completion_error_msg,
244 "Task Abort. "
245 "Fixed. did=0x%06x sbp=%p cmd=%02x dl=%d",
246 did, sbp, scsi_opcode, pkt->pkt_datalen);
247
248 rsp->fcp_u.fcp_status.scsi_status =
249 SCSI_STAT_CHECK_COND;
250 rsp->fcp_u.fcp_status.rsp_len_set = 0;
251 rsp->fcp_u.fcp_status.sense_len_set = 0;
252 rsp->fcp_u.fcp_status.resid_over = 0;
253
254 if (pkt->pkt_datalen) {
255 rsp->fcp_u.fcp_status.resid_under = 1;
256 rsp->fcp_resid =
257 LE_SWAP32(pkt->pkt_datalen);
258 } else {
259 rsp->fcp_u.fcp_status.resid_under = 0;
260 rsp->fcp_resid = 0;
261 }
262
263 scsi_status = SCSI_STAT_CHECK_COND;
264 }
265
266 /*
267 * We only need to check underrun if data could
268 * have been sent
269 */
270
271 /* Always check underrun if status is good */
272 if (scsi_status == SCSI_STAT_GOOD) {
273 check_underrun = 1;
274 }
275 /* Check the sense codes if this is a check condition */
276 else if (scsi_status == SCSI_STAT_CHECK_COND) {
277 check_underrun = 1;
278
279 /* Check if sense data was provided */
280 if (LE_SWAP32(rsp->fcp_sense_len) >= 14) {
281 sense = *((uint8_t *)rsp + 32 + 2);
282 asc = *((uint8_t *)rsp + 32 + 12);
283 ascq = *((uint8_t *)rsp + 32 + 13);
284 }
285
286 #ifdef SAN_DIAG_SUPPORT
287 emlxs_log_sd_scsi_check_event(port,
288 (HBA_WWN *)&ndlp->nlp_portname, sbp->lun,
289 scsi_opcode, sense, asc, ascq);
290 #endif
291 }
292 /* Status is not good and this is not a check condition */
293 /* No data should have been sent */
294 else {
295 check_underrun = 0;
296 }
297
298 /* Initialize the resids */
299 pkt->pkt_resp_resid = 0;
300 pkt->pkt_data_resid = 0;
301
302 /* Check if no data was to be transferred */
303 if (pkt->pkt_datalen == 0) {
304 goto done;
305 }
306
307 /* Get the residual underrun count reported by the SCSI reply */
308 rsp_data_resid = (rsp->fcp_u.fcp_status.resid_under) ?
309 LE_SWAP32(rsp->fcp_resid) : 0;
310
311 /* Set the pkt_data_resid to what the scsi response resid */
312 pkt->pkt_data_resid = rsp_data_resid;
313
314 /* Adjust the pkt_data_resid field if needed */
315 if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
316 /*
317 * Get the residual underrun count reported by
318 * our adapter
319 */
320 pkt->pkt_data_resid = cmd->un.fcpi.fcpi_parm;
321
322 #ifdef SAN_DIAG_SUPPORT
323 if ((rsp_data_resid == 0) && (pkt->pkt_data_resid)) {
324 emlxs_log_sd_fc_rdchk_event(port,
325 (HBA_WWN *)&ndlp->nlp_portname, sbp->lun,
326 scsi_opcode, pkt->pkt_data_resid);
327 }
328 #endif
329
330 /* Get the actual amount of data transferred */
331 data_rx = pkt->pkt_datalen - pkt->pkt_data_resid;
332
333 /*
334 * If the residual being reported by the adapter is
335 * greater than the residual being reported in the
336 * reply, then we have a true underrun.
337 */
338 if (check_underrun && (pkt->pkt_data_resid > rsp_data_resid)) {
339 switch (scsi_opcode) {
340 case SCSI_INQUIRY:
341 scsi_dl = scsi_cmd[16];
342 break;
343
344 case SCSI_RX_DIAG:
345 scsi_dl =
346 (scsi_cmd[15] * 0x100) +
347 scsi_cmd[16];
348 break;
349
350 default:
351 scsi_dl = pkt->pkt_datalen;
352 }
353
354 #ifdef FCP_UNDERRUN_PATCH1
355 if (cfg[CFG_ENABLE_PATCH].current & FCP_UNDERRUN_PATCH1) {
356 /*
357 * If status is not good and no data was
358 * actually transferred, then we must fix
359 * the issue
360 */
361 if ((scsi_status != SCSI_STAT_GOOD) && (data_rx == 0)) {
362 fix_it = 1;
363
364 EMLXS_MSGF(EMLXS_CONTEXT,
365 &emlxs_fcp_completion_error_msg,
366 "Underrun(1). Fixed. "
367 "did=0x%06x sbp=%p cmd=%02x "
368 "dl=%d,%d rx=%d rsp=%d",
369 did, sbp, scsi_opcode,
370 pkt->pkt_datalen, scsi_dl,
371 (pkt->pkt_datalen -
372 pkt->pkt_data_resid),
373 rsp_data_resid);
374
375 }
376 }
377 #endif /* FCP_UNDERRUN_PATCH1 */
378
379
380 #ifdef FCP_UNDERRUN_PATCH2
381 if (cfg[CFG_ENABLE_PATCH].current & FCP_UNDERRUN_PATCH2) {
382 if (scsi_status == SCSI_STAT_GOOD) {
383 emlxs_msg_t *msg;
384
385 msg = &emlxs_fcp_completion_error_msg;
386 /*
387 * If status is good and this is an
388 * inquiry request and the amount of
389 * data
390 */
391 /*
392 * requested <= data received, then we
393 * must fix the issue.
394 */
395
396 if ((scsi_opcode == SCSI_INQUIRY) &&
397 (pkt->pkt_datalen >= data_rx) &&
398 (scsi_dl <= data_rx)) {
399 fix_it = 1;
400
401 EMLXS_MSGF(EMLXS_CONTEXT, msg,
402 "Underrun(2). Fixed. "
403 "did=0x%06x sbp=%p "
404 "cmd=%02x dl=%d,%d "
405 "rx=%d rsp=%d",
406 did, sbp, scsi_opcode,
407 pkt->pkt_datalen, scsi_dl,
408 data_rx, rsp_data_resid);
409
410 }
411
412 /*
413 * If status is good and this is an
414 * inquiry request and the amount of
415 * data requested >= 128 bytes, but
416 * only 128 bytes were received,
417 * then we must fix the issue.
418 */
419 else if ((scsi_opcode == SCSI_INQUIRY) &&
420 (pkt->pkt_datalen >= 128) &&
421 (scsi_dl >= 128) && (data_rx == 128)) {
422 fix_it = 1;
423
424 EMLXS_MSGF(EMLXS_CONTEXT, msg,
425 "Underrun(3). Fixed. "
426 "did=0x%06x sbp=%p "
427 "cmd=%02x dl=%d,%d "
428 "rx=%d rsp=%d",
429 did, sbp, scsi_opcode,
430 pkt->pkt_datalen, scsi_dl,
431 data_rx, rsp_data_resid);
432
433 }
434 }
435 }
436 #endif /* FCP_UNDERRUN_PATCH2 */
437
438 /*
439 * Check if SCSI response payload should be
440 * fixed or if a DATA_UNDERRUN should be
441 * reported
442 */
443 if (fix_it) {
444 /*
445 * Fix the SCSI response payload itself
446 */
447 rsp->fcp_u.fcp_status.resid_under = 1;
448 rsp->fcp_resid =
449 LE_SWAP32(pkt->pkt_data_resid);
450 } else {
451 /*
452 * Change the status from
453 * IOSTAT_FCP_RSP_ERROR to
454 * IOSTAT_DATA_UNDERRUN
455 */
456 iostat = IOSTAT_DATA_UNDERRUN;
457 pkt->pkt_data_resid =
458 pkt->pkt_datalen;
459 }
460 }
461
462 /*
463 * If the residual being reported by the adapter is
464 * less than the residual being reported in the reply,
465 * then we have a true overrun. Since we don't know
466 * where the extra data came from or went to then we
467 * cannot trust anything we received
468 */
469 else if (rsp_data_resid > pkt->pkt_data_resid) {
470 /*
471 * Change the status from
472 * IOSTAT_FCP_RSP_ERROR to
473 * IOSTAT_DATA_OVERRUN
474 */
475 iostat = IOSTAT_DATA_OVERRUN;
476 pkt->pkt_data_resid = pkt->pkt_datalen;
477 }
478
479 } else if ((hba->sli_mode == EMLXS_HBA_SLI4_MODE) &&
480 (pkt->pkt_tran_type == FC_PKT_FCP_WRITE)) {
481 /*
482 * Get the residual underrun count reported by
483 * our adapter
484 */
485 pkt->pkt_data_resid = cmd->un.fcpi.fcpi_parm;
486
487 #ifdef SAN_DIAG_SUPPORT
488 if ((rsp_data_resid == 0) && (pkt->pkt_data_resid)) {
489 emlxs_log_sd_fc_rdchk_event(port,
490 (HBA_WWN *)&ndlp->nlp_portname, sbp->lun,
491 scsi_opcode, pkt->pkt_data_resid);
492 }
493 #endif /* SAN_DIAG_SUPPORT */
494
495 /* Get the actual amount of data transferred */
496 data_rx = pkt->pkt_datalen - pkt->pkt_data_resid;
497
498 /*
499 * If the residual being reported by the adapter is
500 * greater than the residual being reported in the
501 * reply, then we have a true underrun.
502 */
503 if (check_underrun && (pkt->pkt_data_resid > rsp_data_resid)) {
504
505 scsi_dl = pkt->pkt_datalen;
506
507 #ifdef FCP_UNDERRUN_PATCH1
508 if (cfg[CFG_ENABLE_PATCH].current & FCP_UNDERRUN_PATCH1) {
509 /*
510 * If status is not good and no data was
511 * actually transferred, then we must fix
512 * the issue
513 */
514 if ((scsi_status != SCSI_STAT_GOOD) && (data_rx == 0)) {
515 fix_it = 1;
516
517 EMLXS_MSGF(EMLXS_CONTEXT,
518 &emlxs_fcp_completion_error_msg,
519 "Underrun(1). Fixed. "
520 "did=0x%06x sbp=%p cmd=%02x "
521 "dl=%d,%d rx=%d rsp=%d",
522 did, sbp, scsi_opcode,
523 pkt->pkt_datalen, scsi_dl,
524 (pkt->pkt_datalen -
525 pkt->pkt_data_resid),
526 rsp_data_resid);
527
528 }
529 }
530 #endif /* FCP_UNDERRUN_PATCH1 */
531
532 /*
533 * Check if SCSI response payload should be
534 * fixed or if a DATA_UNDERRUN should be
535 * reported
536 */
537 if (fix_it) {
538 /*
539 * Fix the SCSI response payload itself
540 */
541 rsp->fcp_u.fcp_status.resid_under = 1;
542 rsp->fcp_resid =
543 LE_SWAP32(pkt->pkt_data_resid);
544 } else {
545 /*
546 * Change the status from
547 * IOSTAT_FCP_RSP_ERROR to
548 * IOSTAT_DATA_UNDERRUN
549 */
550 iostat = IOSTAT_DATA_UNDERRUN;
551 pkt->pkt_data_resid =
552 pkt->pkt_datalen;
553 }
554 }
555
556 /*
557 * If the residual being reported by the adapter is
558 * less than the residual being reported in the reply,
559 * then we have a true overrun. Since we don't know
560 * where the extra data came from or went to then we
561 * cannot trust anything we received
562 */
563 else if (rsp_data_resid > pkt->pkt_data_resid) {
564 /*
565 * Change the status from
566 * IOSTAT_FCP_RSP_ERROR to
567 * IOSTAT_DATA_OVERRUN
568 */
569 iostat = IOSTAT_DATA_OVERRUN;
570 pkt->pkt_data_resid = pkt->pkt_datalen;
571 }
572 }
573
574 done:
575
576 /* Print completion message */
577 switch (iostat) {
578 case IOSTAT_SUCCESS:
579 /* Build SCSI GOOD status */
580 if (pkt->pkt_rsplen) {
581 bzero((uint8_t *)pkt->pkt_resp, pkt->pkt_rsplen);
582 }
583 break;
584
585 case IOSTAT_FCP_RSP_ERROR:
586 break;
587
588 case IOSTAT_REMOTE_STOP:
589 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
590 "Remote Stop. did=0x%06x sbp=%p cmd=%02x", did, sbp,
591 scsi_opcode);
592 break;
593
594 case IOSTAT_LOCAL_REJECT:
595 localstat = cmd->un.grsp.perr.statLocalError;
596
597 switch (localstat) {
598 case IOERR_SEQUENCE_TIMEOUT:
599 EMLXS_MSGF(EMLXS_CONTEXT,
600 &emlxs_fcp_completion_error_msg,
601 "Local reject. "
602 "%s did=0x%06x sbp=%p cmd=%02x tmo=%d ",
603 emlxs_error_xlate(localstat), did, sbp,
604 scsi_opcode, pkt->pkt_timeout);
605 break;
606
607 default:
608 EMLXS_MSGF(EMLXS_CONTEXT,
609 &emlxs_fcp_completion_error_msg,
610 "Local reject. %s 0x%06x %p %02x (%x)(%x)",
611 emlxs_error_xlate(localstat), did, sbp,
612 scsi_opcode, (uint16_t)cmd->ULPIOTAG,
613 (uint16_t)cmd->ULPCONTEXT);
614 }
615
616 break;
617
618 case IOSTAT_NPORT_RJT:
619 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
620 "Nport reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
621 scsi_opcode);
622 break;
623
624 case IOSTAT_FABRIC_RJT:
625 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
626 "Fabric reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
627 scsi_opcode);
628 break;
629
630 case IOSTAT_NPORT_BSY:
631 #ifdef SAN_DIAG_SUPPORT
632 ndlp = (NODELIST *)iocbq->node;
633 emlxs_log_sd_fc_bsy_event(port, (HBA_WWN *)&ndlp->nlp_portname);
634 #endif
635
636 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
637 "Nport busy. did=0x%06x sbp=%p cmd=%02x", did, sbp,
638 scsi_opcode);
639 break;
640
641 case IOSTAT_FABRIC_BSY:
642 #ifdef SAN_DIAG_SUPPORT
643 ndlp = (NODELIST *)iocbq->node;
644 emlxs_log_sd_fc_bsy_event(port, NULL);
645 #endif
646
647 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
648 "Fabric busy. did=0x%06x sbp=%p cmd=%02x", did, sbp,
649 scsi_opcode);
650 break;
651
652 case IOSTAT_INTERMED_RSP:
653 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
654 "Intermediate response. did=0x%06x sbp=%p cmd=%02x", did,
655 sbp, scsi_opcode);
656 break;
657
658 case IOSTAT_LS_RJT:
659 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
660 "LS Reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
661 scsi_opcode);
662 break;
663
664 case IOSTAT_DATA_UNDERRUN:
665 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
666 "Underrun. did=0x%06x sbp=%p cmd=%02x "
667 "dl=%d,%d rx=%d rsp=%d (%02x,%02x,%02x,%02x)",
668 did, sbp, scsi_opcode, pkt->pkt_datalen, scsi_dl, data_rx,
669 rsp_data_resid, scsi_status, sense, asc, ascq);
670 break;
671
672 case IOSTAT_DATA_OVERRUN:
673 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
674 "Overrun. did=0x%06x sbp=%p cmd=%02x "
675 "dl=%d,%d rx=%d rsp=%d (%02x,%02x,%02x,%02x)",
676 did, sbp, scsi_opcode, pkt->pkt_datalen, scsi_dl, data_rx,
677 rsp_data_resid, scsi_status, sense, asc, ascq);
678 break;
679
680 case IOSTAT_RSP_INVALID:
681 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
682 "Rsp Invalid. did=0x%06x sbp=%p cmd=%02x dl=%d rl=%d"
683 "(%d, %d, %d)",
684 did, sbp, scsi_opcode, pkt->pkt_datalen, pkt->pkt_rsplen,
685 LE_SWAP32(rsp->fcp_resid),
686 LE_SWAP32(rsp->fcp_sense_len),
687 LE_SWAP32(rsp->fcp_response_len));
688 break;
689
690 default:
691 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
692 "Unknown status=%x reason=%x did=0x%06x sbp=%p cmd=%02x",
693 iostat, cmd->un.grsp.perr.statLocalError, did, sbp,
694 scsi_opcode);
695 break;
696 }
697
698 if (iostat == IOSTAT_SUCCESS) {
699 HBASTATS.FcpGood++;
700 } else {
701 HBASTATS.FcpError++;
702 }
703
704 mutex_exit(&sbp->mtx);
705
706 emlxs_pkt_complete(sbp, iostat, localstat, 0);
707
708 return;
709
710 } /* emlxs_handle_fcp_event() */
711
712
713 /*
714 * emlxs_post_buffer
715 *
716 * This routine will post count buffers to the
717 * ring with the QUE_RING_BUF_CN command. This
718 * allows 2 buffers / command to be posted.
719 * Returns the number of buffers NOT posted.
720 */
721 /* SLI3 */
722 extern int
723 emlxs_post_buffer(emlxs_hba_t *hba, RING *rp, int16_t cnt)
724 {
725 emlxs_port_t *port = &PPORT;
726 IOCB *icmd;
727 IOCBQ *iocbq;
728 MATCHMAP *mp;
729 uint16_t tag;
730 uint32_t maxqbuf;
731 int32_t i;
732 int32_t j;
733 uint32_t seg;
734 uint32_t size;
735
736 mp = 0;
737 maxqbuf = 2;
738 tag = (uint16_t)cnt;
739 cnt += rp->fc_missbufcnt;
740
741 if (rp->ringno == hba->channel_els) {
742 seg = MEM_BUF;
743 size = MEM_ELSBUF_SIZE;
744 } else if (rp->ringno == hba->channel_ip) {
745 seg = MEM_IPBUF;
746 size = MEM_IPBUF_SIZE;
747 } else if (rp->ringno == hba->channel_ct) {
748 seg = MEM_CTBUF;
749 size = MEM_CTBUF_SIZE;
750 }
751 #ifdef SFCT_SUPPORT
752 else if (rp->ringno == hba->CHANNEL_FCT) {
753 seg = MEM_FCTBUF;
754 size = MEM_FCTBUF_SIZE;
755 }
756 #endif /* SFCT_SUPPORT */
757 else {
758 return (0);
759 }
760
761 /*
762 * While there are buffers to post
763 */
764 while (cnt) {
765 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == 0) {
766 rp->fc_missbufcnt = cnt;
767 return (cnt);
768 }
769
770 iocbq->channel = (void *)&hba->chan[rp->ringno];
771 iocbq->port = (void *)port;
772 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
773
774 icmd = &iocbq->iocb;
775
776 /*
777 * Max buffers can be posted per command
778 */
779 for (i = 0; i < maxqbuf; i++) {
780 if (cnt <= 0)
781 break;
782
783 /* fill in BDEs for command */
784 if ((mp = (MATCHMAP *)emlxs_mem_get(hba, seg))
785 == 0) {
786 icmd->ULPBDECOUNT = i;
787 for (j = 0; j < i; j++) {
788 mp = EMLXS_GET_VADDR(hba, rp, icmd);
789 if (mp) {
790 emlxs_mem_put(hba, seg,
791 (void *)mp);
792 }
793 }
794
795 rp->fc_missbufcnt = cnt + i;
796
797 emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
798
799 return (cnt + i);
800 }
801
802 /*
803 * map that page and save the address pair for lookup
804 * later
805 */
806 emlxs_mem_map_vaddr(hba,
807 rp,
808 mp,
809 (uint32_t *)&icmd->un.cont64[i].addrHigh,
810 (uint32_t *)&icmd->un.cont64[i].addrLow);
811
812 icmd->un.cont64[i].tus.f.bdeSize = size;
813 icmd->ULPCOMMAND = CMD_QUE_RING_BUF64_CN;
814
815 /*
816 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
817 * "UB Post: ring=%d addr=%08x%08x size=%d",
818 * rp->ringno, icmd->un.cont64[i].addrHigh,
819 * icmd->un.cont64[i].addrLow, size);
820 */
821
822 cnt--;
823 }
824
825 icmd->ULPIOTAG = tag;
826 icmd->ULPBDECOUNT = i;
827 icmd->ULPLE = 1;
828 icmd->ULPOWNER = OWN_CHIP;
829 /* used for delimiter between commands */
830 iocbq->bp = (void *)mp;
831
832 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[rp->ringno], iocbq);
833 }
834
835 rp->fc_missbufcnt = 0;
836
837 return (0);
838
839 } /* emlxs_post_buffer() */
840
841
842 static void
843 emlxs_fcp_tag_nodes(emlxs_port_t *port)
844 {
845 NODELIST *nlp;
846 int i;
847
848 /* We will process all nodes with this tag later */
849 rw_enter(&port->node_rwlock, RW_READER);
850 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
851 nlp = port->node_table[i];
852 while (nlp != NULL) {
853 nlp->nlp_tag = 1;
854 nlp = nlp->nlp_list_next;
855 }
856 }
857 rw_exit(&port->node_rwlock);
858 }
859
860
861 static NODELIST *
862 emlxs_find_tagged_node(emlxs_port_t *port)
863 {
864 NODELIST *nlp;
865 NODELIST *tagged;
866 int i;
867
868 /* Find first node */
869 rw_enter(&port->node_rwlock, RW_READER);
870 tagged = 0;
871 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
872 nlp = port->node_table[i];
873 while (nlp != NULL) {
874 if (!nlp->nlp_tag) {
875 nlp = nlp->nlp_list_next;
876 continue;
877 }
878 nlp->nlp_tag = 0;
879
880 if (nlp->nlp_Rpi == FABRIC_RPI) {
881 nlp = nlp->nlp_list_next;
882 continue;
883 }
884 tagged = nlp;
885 break;
886 }
887 if (tagged) {
888 break;
889 }
890 }
891 rw_exit(&port->node_rwlock);
892 return (tagged);
893 }
894
895
896 extern int
897 emlxs_port_offline(emlxs_port_t *port, uint32_t scope)
898 {
899 emlxs_hba_t *hba = HBA;
900 emlxs_config_t *cfg;
901 NODELIST *nlp;
902 fc_affected_id_t *aid;
903 uint32_t mask;
904 uint32_t aff_d_id;
905 uint32_t linkdown;
906 uint32_t vlinkdown;
907 uint32_t action;
908 int i;
909 uint32_t unreg_vpi;
910 uint32_t update;
911 uint32_t adisc_support;
912 uint32_t clear_all;
913 uint8_t format;
914
915 /* Target mode only uses this routine for linkdowns */
916 if ((port->mode == MODE_TARGET) && (scope != 0xffffffff) &&
917 (scope != 0xfeffffff) && (scope != 0xfdffffff)) {
918 return (0);
919 }
920
921 cfg = &CFG;
922 aid = (fc_affected_id_t *)&scope;
923 linkdown = 0;
924 vlinkdown = 0;
925 unreg_vpi = 0;
926 update = 0;
927 clear_all = 0;
928
929 if (!(port->flag & EMLXS_PORT_BOUND)) {
930 return (0);
931 }
932
933 format = aid->aff_format;
934
935 switch (format) {
936 case 0: /* Port */
937 mask = 0x00ffffff;
938 break;
939
940 case 1: /* Area */
941 mask = 0x00ffff00;
942 break;
943
944 case 2: /* Domain */
945 mask = 0x00ff0000;
946 break;
947
948 case 3: /* Network */
949 mask = 0x00000000;
950 break;
951
952 #ifdef DHCHAP_SUPPORT
953 case 0xfe: /* Virtual link down */
954 mask = 0x00000000;
955 vlinkdown = 1;
956 break;
957 #endif /* DHCHAP_SUPPORT */
958
959 case 0xff: /* link is down */
960 mask = 0x00000000;
961 linkdown = 1;
962 break;
963
964 case 0xfd: /* New fabric */
965 default:
966 mask = 0x00000000;
967 linkdown = 1;
968 clear_all = 1;
969 break;
970 }
971
972 aff_d_id = aid->aff_d_id & mask;
973
974
975 /*
976 * If link is down then this is a hard shutdown and flush
977 * If link not down then this is a soft shutdown and flush
978 * (e.g. RSCN)
979 */
980 if (linkdown) {
981 hba->flag &= ~FC_GPIO_LINK_UP;
982
983 mutex_enter(&EMLXS_PORT_LOCK);
984
985 port->flag &= EMLXS_PORT_LINKDOWN_MASK;
986
987 if (port->ulp_statec != FC_STATE_OFFLINE) {
988 port->ulp_statec = FC_STATE_OFFLINE;
989
990 port->prev_did = port->did;
991 port->did = 0;
992 port->rdid = 0;
993
994 bcopy(&port->fabric_sparam, &port->prev_fabric_sparam,
995 sizeof (SERV_PARM));
996 bzero(&port->fabric_sparam, sizeof (SERV_PARM));
997
998 update = 1;
999 }
1000
1001 mutex_exit(&EMLXS_PORT_LOCK);
1002
1003 emlxs_timer_cancel_clean_address(port);
1004
1005 /* Tell ULP about it */
1006 if (update) {
1007 if (port->flag & EMLXS_PORT_BOUND) {
1008 if (port->vpi == 0) {
1009 EMLXS_MSGF(EMLXS_CONTEXT,
1010 &emlxs_link_down_msg, NULL);
1011 }
1012
1013 if (port->mode == MODE_INITIATOR) {
1014 emlxs_fca_link_down(port);
1015 }
1016 #ifdef SFCT_SUPPORT
1017 else if (port->mode == MODE_TARGET) {
1018 emlxs_fct_link_down(port);
1019 }
1020 #endif /* SFCT_SUPPORT */
1021
1022 } else {
1023 if (port->vpi == 0) {
1024 EMLXS_MSGF(EMLXS_CONTEXT,
1025 &emlxs_link_down_msg, "*");
1026 }
1027 }
1028
1029
1030 }
1031
1032 unreg_vpi = 1;
1033
1034 #ifdef DHCHAP_SUPPORT
1035 /* Stop authentication with all nodes */
1036 emlxs_dhc_auth_stop(port, NULL);
1037 #endif /* DHCHAP_SUPPORT */
1038
1039 /* Flush the base node */
1040 (void) emlxs_tx_node_flush(port, &port->node_base, 0, 0, 0);
1041 (void) emlxs_chipq_node_flush(port, 0, &port->node_base, 0);
1042
1043 /* Flush any pending ub buffers */
1044 emlxs_ub_flush(port);
1045 }
1046 #ifdef DHCHAP_SUPPORT
1047 /* virtual link down */
1048 else if (vlinkdown) {
1049 mutex_enter(&EMLXS_PORT_LOCK);
1050
1051 if (port->ulp_statec != FC_STATE_OFFLINE) {
1052 port->ulp_statec = FC_STATE_OFFLINE;
1053 update = 1;
1054 }
1055
1056 mutex_exit(&EMLXS_PORT_LOCK);
1057
1058 emlxs_timer_cancel_clean_address(port);
1059
1060 /* Tell ULP about it */
1061 if (update) {
1062 if (port->flag & EMLXS_PORT_BOUND) {
1063 if (port->vpi == 0) {
1064 EMLXS_MSGF(EMLXS_CONTEXT,
1065 &emlxs_link_down_msg,
1066 "Switch authentication failed.");
1067 }
1068
1069 if (port->mode == MODE_INITIATOR) {
1070 emlxs_fca_link_down(port);
1071 }
1072 #ifdef SFCT_SUPPORT
1073 else if (port->mode == MODE_TARGET) {
1074 emlxs_fct_link_down(port);
1075 }
1076 #endif /* SFCT_SUPPORT */
1077 } else {
1078 if (port->vpi == 0) {
1079 EMLXS_MSGF(EMLXS_CONTEXT,
1080 &emlxs_link_down_msg,
1081 "Switch authentication failed. *");
1082 }
1083 }
1084
1085
1086 }
1087
1088 /* Flush the base node */
1089 (void) emlxs_tx_node_flush(port, &port->node_base, 0, 0, 0);
1090 (void) emlxs_chipq_node_flush(port, 0, &port->node_base, 0);
1091 }
1092 #endif /* DHCHAP_SUPPORT */
1093 else {
1094 emlxs_timer_cancel_clean_address(port);
1095 }
1096
1097 if (port->mode == MODE_TARGET) {
1098 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1099 /* Set the node tags */
1100 emlxs_fcp_tag_nodes(port);
1101 unreg_vpi = 0;
1102 while ((nlp = emlxs_find_tagged_node(port))) {
1103 (void) emlxs_rpi_pause_notify(port,
1104 nlp->rpip);
1105 /*
1106 * In port_online we need to resume
1107 * these RPIs before we can use them.
1108 */
1109 }
1110 }
1111 goto done;
1112 }
1113
1114 /* Set the node tags */
1115 emlxs_fcp_tag_nodes(port);
1116
1117 if (!clear_all && (hba->flag & FC_ONLINE_MODE)) {
1118 adisc_support = cfg[CFG_ADISC_SUPPORT].current;
1119 } else {
1120 adisc_support = 0;
1121 }
1122
1123 /* Check ADISC support level */
1124 switch (adisc_support) {
1125 case 0: /* No support - Flush all IO to all matching nodes */
1126
1127 for (;;) {
1128 /*
1129 * We need to hold the locks this way because
1130 * EMLXS_SLI_UNREG_NODE and the flush routines enter the
1131 * same locks. Also, when we release the lock the list
1132 * can change out from under us.
1133 */
1134
1135 /* Find first node */
1136 rw_enter(&port->node_rwlock, RW_READER);
1137 action = 0;
1138 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1139 nlp = port->node_table[i];
1140 while (nlp != NULL) {
1141 if (!nlp->nlp_tag) {
1142 nlp = nlp->nlp_list_next;
1143 continue;
1144 }
1145 nlp->nlp_tag = 0;
1146
1147 /*
1148 * Check for any device that matches
1149 * our mask
1150 */
1151 if ((nlp->nlp_DID & mask) == aff_d_id) {
1152 if (linkdown) {
1153 action = 1;
1154 break;
1155 } else { /* Must be an RCSN */
1156
1157 action = 2;
1158 break;
1159 }
1160 }
1161 nlp = nlp->nlp_list_next;
1162 }
1163
1164 if (action) {
1165 break;
1166 }
1167 }
1168 rw_exit(&port->node_rwlock);
1169
1170
1171 /* Check if nothing was found */
1172 if (action == 0) {
1173 break;
1174 } else if (action == 1) {
1175 (void) EMLXS_SLI_UNREG_NODE(port, nlp,
1176 NULL, NULL, NULL);
1177 } else if (action == 2) {
1178 EMLXS_SET_DFC_STATE(nlp, NODE_LIMBO);
1179
1180 #ifdef DHCHAP_SUPPORT
1181 emlxs_dhc_auth_stop(port, nlp);
1182 #endif /* DHCHAP_SUPPORT */
1183
1184 /*
1185 * Close the node for any further normal IO
1186 * A PLOGI with reopen the node
1187 */
1188 emlxs_node_close(port, nlp,
1189 hba->channel_fcp, 60);
1190 emlxs_node_close(port, nlp,
1191 hba->channel_ip, 60);
1192
1193 /* Flush tx queue */
1194 (void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1195
1196 /* Flush chip queue */
1197 (void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1198 }
1199
1200 }
1201
1202 break;
1203
1204 case 1: /* Partial support - Flush IO for non-FCP2 matching nodes */
1205
1206 for (;;) {
1207
1208 /*
1209 * We need to hold the locks this way because
1210 * EMLXS_SLI_UNREG_NODE and the flush routines enter the
1211 * same locks. Also, when we release the lock the list
1212 * can change out from under us.
1213 */
1214 rw_enter(&port->node_rwlock, RW_READER);
1215 action = 0;
1216 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1217 nlp = port->node_table[i];
1218 while (nlp != NULL) {
1219 if (!nlp->nlp_tag) {
1220 nlp = nlp->nlp_list_next;
1221 continue;
1222 }
1223 nlp->nlp_tag = 0;
1224
1225 /*
1226 * Check for special FCP2 target device
1227 * that matches our mask
1228 */
1229 if ((nlp->nlp_fcp_info &
1230 NLP_FCP_TGT_DEVICE) &&
1231 (nlp-> nlp_fcp_info &
1232 NLP_FCP_2_DEVICE) &&
1233 (nlp->nlp_DID & mask) ==
1234 aff_d_id) {
1235 action = 3;
1236 break;
1237 }
1238
1239 /*
1240 * Check for any other device that
1241 * matches our mask
1242 */
1243 else if ((nlp->nlp_DID & mask) ==
1244 aff_d_id) {
1245 if (linkdown) {
1246 action = 1;
1247 break;
1248 } else { /* Must be an RSCN */
1249
1250 action = 2;
1251 break;
1252 }
1253 }
1254
1255 nlp = nlp->nlp_list_next;
1256 }
1257
1258 if (action) {
1259 break;
1260 }
1261 }
1262 rw_exit(&port->node_rwlock);
1263
1264 /* Check if nothing was found */
1265 if (action == 0) {
1266 break;
1267 } else if (action == 1) {
1268 (void) EMLXS_SLI_UNREG_NODE(port, nlp,
1269 NULL, NULL, NULL);
1270 } else if (action == 2) {
1271 EMLXS_SET_DFC_STATE(nlp, NODE_LIMBO);
1272
1273 #ifdef DHCHAP_SUPPORT
1274 emlxs_dhc_auth_stop(port, nlp);
1275 #endif /* DHCHAP_SUPPORT */
1276
1277 /*
1278 * Close the node for any further normal IO
1279 * A PLOGI with reopen the node
1280 */
1281 emlxs_node_close(port, nlp,
1282 hba->channel_fcp, 60);
1283 emlxs_node_close(port, nlp,
1284 hba->channel_ip, 60);
1285
1286 /* Flush tx queue */
1287 (void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1288
1289 /* Flush chip queue */
1290 (void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1291
1292 } else if (action == 3) { /* FCP2 devices */
1293 EMLXS_SET_DFC_STATE(nlp, NODE_LIMBO);
1294
1295 unreg_vpi = 0;
1296
1297 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1298 (void) emlxs_rpi_pause_notify(port,
1299 nlp->rpip);
1300 }
1301
1302 #ifdef DHCHAP_SUPPORT
1303 emlxs_dhc_auth_stop(port, nlp);
1304 #endif /* DHCHAP_SUPPORT */
1305
1306 /*
1307 * Close the node for any further normal IO
1308 * An ADISC or a PLOGI with reopen the node
1309 */
1310 emlxs_node_close(port, nlp,
1311 hba->channel_fcp, -1);
1312 emlxs_node_close(port, nlp, hba->channel_ip,
1313 ((linkdown) ? 0 : 60));
1314
1315 /* Flush tx queues except for FCP ring */
1316 (void) emlxs_tx_node_flush(port, nlp,
1317 &hba->chan[hba->channel_ct], 0, 0);
1318 (void) emlxs_tx_node_flush(port, nlp,
1319 &hba->chan[hba->channel_els], 0, 0);
1320 (void) emlxs_tx_node_flush(port, nlp,
1321 &hba->chan[hba->channel_ip], 0, 0);
1322
1323 /* Flush chip queues except for FCP ring */
1324 (void) emlxs_chipq_node_flush(port,
1325 &hba->chan[hba->channel_ct], nlp, 0);
1326 (void) emlxs_chipq_node_flush(port,
1327 &hba->chan[hba->channel_els], nlp, 0);
1328 (void) emlxs_chipq_node_flush(port,
1329 &hba->chan[hba->channel_ip], nlp, 0);
1330 }
1331 }
1332 break;
1333
1334 case 2: /* Full support - Hold FCP IO to FCP target matching nodes */
1335
1336 if (!linkdown && !vlinkdown) {
1337 break;
1338 }
1339
1340 for (;;) {
1341 /*
1342 * We need to hold the locks this way because
1343 * EMLXS_SLI_UNREG_NODE and the flush routines enter the
1344 * same locks. Also, when we release the lock the list
1345 * can change out from under us.
1346 */
1347 rw_enter(&port->node_rwlock, RW_READER);
1348 action = 0;
1349 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1350 nlp = port->node_table[i];
1351 while (nlp != NULL) {
1352 if (!nlp->nlp_tag) {
1353 nlp = nlp->nlp_list_next;
1354 continue;
1355 }
1356 nlp->nlp_tag = 0;
1357
1358 /*
1359 * Check for FCP target device that
1360 * matches our mask
1361 */
1362 if ((nlp-> nlp_fcp_info &
1363 NLP_FCP_TGT_DEVICE) &&
1364 (nlp->nlp_DID & mask) ==
1365 aff_d_id) {
1366 action = 3;
1367 break;
1368 }
1369
1370 /*
1371 * Check for any other device that
1372 * matches our mask
1373 */
1374 else if ((nlp->nlp_DID & mask) ==
1375 aff_d_id) {
1376 if (linkdown) {
1377 action = 1;
1378 break;
1379 } else { /* Must be an RSCN */
1380
1381 action = 2;
1382 break;
1383 }
1384 }
1385
1386 nlp = nlp->nlp_list_next;
1387 }
1388 if (action) {
1389 break;
1390 }
1391 }
1392 rw_exit(&port->node_rwlock);
1393
1394 /* Check if nothing was found */
1395 if (action == 0) {
1396 break;
1397 } else if (action == 1) {
1398 (void) EMLXS_SLI_UNREG_NODE(port, nlp,
1399 NULL, NULL, NULL);
1400 } else if (action == 2) {
1401 EMLXS_SET_DFC_STATE(nlp, NODE_LIMBO);
1402
1403 /*
1404 * Close the node for any further normal IO
1405 * A PLOGI with reopen the node
1406 */
1407 emlxs_node_close(port, nlp,
1408 hba->channel_fcp, 60);
1409 emlxs_node_close(port, nlp,
1410 hba->channel_ip, 60);
1411
1412 /* Flush tx queue */
1413 (void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1414
1415 /* Flush chip queue */
1416 (void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1417
1418 } else if (action == 3) { /* FCP2 devices */
1419 EMLXS_SET_DFC_STATE(nlp, NODE_LIMBO);
1420
1421 unreg_vpi = 0;
1422
1423 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1424 (void) emlxs_rpi_pause_notify(port,
1425 nlp->rpip);
1426 }
1427
1428 /*
1429 * Close the node for any further normal IO
1430 * An ADISC or a PLOGI with reopen the node
1431 */
1432 emlxs_node_close(port, nlp,
1433 hba->channel_fcp, -1);
1434 emlxs_node_close(port, nlp, hba->channel_ip,
1435 ((linkdown) ? 0 : 60));
1436
1437 /* Flush tx queues except for FCP ring */
1438 (void) emlxs_tx_node_flush(port, nlp,
1439 &hba->chan[hba->channel_ct], 0, 0);
1440 (void) emlxs_tx_node_flush(port, nlp,
1441 &hba->chan[hba->channel_els], 0, 0);
1442 (void) emlxs_tx_node_flush(port, nlp,
1443 &hba->chan[hba->channel_ip], 0, 0);
1444
1445 /* Flush chip queues except for FCP ring */
1446 (void) emlxs_chipq_node_flush(port,
1447 &hba->chan[hba->channel_ct], nlp, 0);
1448 (void) emlxs_chipq_node_flush(port,
1449 &hba->chan[hba->channel_els], nlp, 0);
1450 (void) emlxs_chipq_node_flush(port,
1451 &hba->chan[hba->channel_ip], nlp, 0);
1452 }
1453 }
1454
1455 break;
1456
1457 } /* switch() */
1458
1459 done:
1460
1461 if (unreg_vpi) {
1462 (void) emlxs_mb_unreg_vpi(port);
1463 }
1464
1465 return (0);
1466
1467 } /* emlxs_port_offline() */
1468
1469
1470 extern void
1471 emlxs_port_online(emlxs_port_t *vport)
1472 {
1473 emlxs_hba_t *hba = vport->hba;
1474 emlxs_port_t *port = &PPORT;
1475 NODELIST *nlp;
1476 uint32_t state;
1477 uint32_t update;
1478 uint32_t npiv_linkup;
1479 char topology[32];
1480 char linkspeed[32];
1481 char mode[32];
1482
1483 /*
1484 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1485 * "linkup_callback. vpi=%d fc_flag=%x", vport->vpi, hba->flag);
1486 */
1487
1488 if ((vport->vpi > 0) &&
1489 (!(hba->flag & FC_NPIV_ENABLED) ||
1490 !(hba->flag & FC_NPIV_SUPPORTED))) {
1491 return;
1492 }
1493
1494 if (!(vport->flag & EMLXS_PORT_BOUND) ||
1495 !(vport->flag & EMLXS_PORT_ENABLED)) {
1496 return;
1497 }
1498
1499 /* Check for mode */
1500 if (port->mode == MODE_TARGET) {
1501 (void) strlcpy(mode, ", target", sizeof (mode));
1502
1503 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1504 /* Set the node tags */
1505 emlxs_fcp_tag_nodes(vport);
1506 while ((nlp = emlxs_find_tagged_node(vport))) {
1507 /* The RPI was paused in port_offline */
1508 (void) emlxs_rpi_resume_notify(vport,
1509 nlp->rpip, 0);
1510 }
1511 }
1512 } else if (port->mode == MODE_INITIATOR) {
1513 (void) strlcpy(mode, ", initiator", sizeof (mode));
1514 } else {
1515 (void) strlcpy(mode, "unknown", sizeof (mode));
1516 }
1517 mutex_enter(&EMLXS_PORT_LOCK);
1518
1519 /* Check for loop topology */
1520 if (hba->topology == TOPOLOGY_LOOP) {
1521 state = FC_STATE_LOOP;
1522 (void) strlcpy(topology, ", loop", sizeof (topology));
1523 } else {
1524 state = FC_STATE_ONLINE;
1525 (void) strlcpy(topology, ", fabric", sizeof (topology));
1526 }
1527
1528 /* Set the link speed */
1529 switch (hba->linkspeed) {
1530 case 0:
1531 (void) strlcpy(linkspeed, "Gb", sizeof (linkspeed));
1532 state |= FC_STATE_1GBIT_SPEED;
1533 break;
1534
1535 case LA_1GHZ_LINK:
1536 (void) strlcpy(linkspeed, "1Gb", sizeof (linkspeed));
1537 state |= FC_STATE_1GBIT_SPEED;
1538 break;
1539 case LA_2GHZ_LINK:
1540 (void) strlcpy(linkspeed, "2Gb", sizeof (linkspeed));
1541 state |= FC_STATE_2GBIT_SPEED;
1542 break;
1543 case LA_4GHZ_LINK:
1544 (void) strlcpy(linkspeed, "4Gb", sizeof (linkspeed));
1545 state |= FC_STATE_4GBIT_SPEED;
1546 break;
1547 case LA_8GHZ_LINK:
1548 (void) strlcpy(linkspeed, "8Gb", sizeof (linkspeed));
1549 state |= FC_STATE_8GBIT_SPEED;
1550 break;
1551 case LA_10GHZ_LINK:
1552 (void) strlcpy(linkspeed, "10Gb", sizeof (linkspeed));
1553 state |= FC_STATE_10GBIT_SPEED;
1554 break;
1555 case LA_16GHZ_LINK:
1556 (void) strlcpy(linkspeed, "16Gb", sizeof (linkspeed));
1557 state |= FC_STATE_16GBIT_SPEED;
1558 break;
1559 case LA_32GHZ_LINK:
1560 (void) strlcpy(linkspeed, "32Gb", sizeof (linkspeed));
1561 state |= FC_STATE_32GBIT_SPEED;
1562 break;
1563 default:
1564 (void) snprintf(linkspeed, sizeof (linkspeed), "unknown(0x%x)",
1565 hba->linkspeed);
1566 break;
1567 }
1568
1569 npiv_linkup = 0;
1570 update = 0;
1571
1572 if ((hba->state >= FC_LINK_UP) &&
1573 !(hba->flag & FC_LOOPBACK_MODE) && (vport->ulp_statec != state)) {
1574 update = 1;
1575 vport->ulp_statec = state;
1576
1577 if ((vport->vpi > 0) && !(hba->flag & FC_NPIV_LINKUP)) {
1578 hba->flag |= FC_NPIV_LINKUP;
1579 npiv_linkup = 1;
1580 }
1581 }
1582
1583 mutex_exit(&EMLXS_PORT_LOCK);
1584
1585 if (update) {
1586 if (vport->flag & EMLXS_PORT_BOUND) {
1587 if (vport->vpi == 0) {
1588 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1589 "%s%s%s", linkspeed, topology, mode);
1590
1591 } else if (npiv_linkup) {
1592 EMLXS_MSGF(EMLXS_CONTEXT,
1593 &emlxs_npiv_link_up_msg, "%s%s%s",
1594 linkspeed, topology, mode);
1595 }
1596
1597 if (vport->mode == MODE_INITIATOR) {
1598 emlxs_fca_link_up(vport);
1599 }
1600 #ifdef SFCT_SUPPORT
1601 else if (vport->mode == MODE_TARGET) {
1602 emlxs_fct_link_up(vport);
1603 }
1604 #endif /* SFCT_SUPPORT */
1605 } else {
1606 if (vport->vpi == 0) {
1607 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1608 "%s%s%s *", linkspeed, topology, mode);
1609
1610 } else if (npiv_linkup) {
1611 EMLXS_MSGF(EMLXS_CONTEXT,
1612 &emlxs_npiv_link_up_msg, "%s%s%s *",
1613 linkspeed, topology, mode);
1614 }
1615 }
1616
1617 /* Check for waiting threads */
1618 if (vport->vpi == 0) {
1619 mutex_enter(&EMLXS_LINKUP_LOCK);
1620 if (hba->linkup_wait_flag == TRUE) {
1621 hba->linkup_wait_flag = FALSE;
1622 cv_broadcast(&EMLXS_LINKUP_CV);
1623 }
1624 mutex_exit(&EMLXS_LINKUP_LOCK);
1625 }
1626
1627 /* Flush any pending ub buffers */
1628 emlxs_ub_flush(vport);
1629 }
1630
1631 hba->flag |= FC_GPIO_LINK_UP;
1632
1633 return;
1634
1635 } /* emlxs_port_online() */
1636
1637
1638 /* SLI3 */
1639 extern void
1640 emlxs_linkdown(emlxs_hba_t *hba)
1641 {
1642 emlxs_port_t *port = &PPORT;
1643 int i;
1644 uint32_t scope;
1645
1646 mutex_enter(&EMLXS_PORT_LOCK);
1647
1648 if (hba->state > FC_LINK_DOWN) {
1649 HBASTATS.LinkDown++;
1650 EMLXS_STATE_CHANGE_LOCKED(hba, FC_LINK_DOWN);
1651 }
1652
1653 /* Set scope */
1654 scope = (hba->flag & FC_NEW_FABRIC)? 0xFDFFFFFF:0xFFFFFFFF;
1655
1656 /* Filter hba flags */
1657 hba->flag &= FC_LINKDOWN_MASK;
1658 hba->discovery_timer = 0;
1659 hba->linkup_timer = 0;
1660
1661 mutex_exit(&EMLXS_PORT_LOCK);
1662
1663 for (i = 0; i < MAX_VPORTS; i++) {
1664 port = &VPORT(i);
1665
1666 if (!(port->flag & EMLXS_PORT_BOUND)) {
1667 continue;
1668 }
1669
1670 (void) emlxs_port_offline(port, scope);
1671
1672 }
1673
1674 emlxs_log_link_event(port);
1675
1676 return;
1677
1678 } /* emlxs_linkdown() */
1679
1680
1681 /* SLI3 */
1682 extern void
1683 emlxs_linkup(emlxs_hba_t *hba)
1684 {
1685 emlxs_port_t *port = &PPORT;
1686 emlxs_config_t *cfg = &CFG;
1687
1688 mutex_enter(&EMLXS_PORT_LOCK);
1689
1690 /* Check for any mode changes */
1691 emlxs_mode_set(hba);
1692
1693 HBASTATS.LinkUp++;
1694 EMLXS_STATE_CHANGE_LOCKED(hba, FC_LINK_UP);
1695
1696 #ifdef MENLO_SUPPORT
1697 if (hba->flag & FC_MENLO_MODE) {
1698 mutex_exit(&EMLXS_PORT_LOCK);
1699
1700 /*
1701 * Trigger linkup CV and don't start linkup & discovery
1702 * timers
1703 */
1704 mutex_enter(&EMLXS_LINKUP_LOCK);
1705 cv_broadcast(&EMLXS_LINKUP_CV);
1706 mutex_exit(&EMLXS_LINKUP_LOCK);
1707
1708 emlxs_log_link_event(port);
1709
1710 return;
1711 }
1712 #endif /* MENLO_SUPPORT */
1713
1714 /* Set the linkup & discovery timers */
1715 hba->linkup_timer = hba->timer_tics + cfg[CFG_LINKUP_TIMEOUT].current;
1716 hba->discovery_timer =
1717 hba->timer_tics + cfg[CFG_LINKUP_TIMEOUT].current +
1718 cfg[CFG_DISC_TIMEOUT].current;
1719
1720 mutex_exit(&EMLXS_PORT_LOCK);
1721
1722 emlxs_log_link_event(port);
1723
1724 return;
1725
1726 } /* emlxs_linkup() */
1727
1728
1729 /*
1730 * emlxs_reset_link
1731 *
1732 * Description:
1733 * Called to reset the link with an init_link
1734 *
1735 * Returns:
1736 *
1737 */
1738 extern int
1739 emlxs_reset_link(emlxs_hba_t *hba, uint32_t linkup, uint32_t wait)
1740 {
1741 emlxs_port_t *port = &PPORT;
1742 emlxs_config_t *cfg;
1743 MAILBOXQ *mbq = NULL;
1744 MAILBOX *mb = NULL;
1745 int rval = 0;
1746 int tmo;
1747 int rc;
1748
1749 /*
1750 * Get a buffer to use for the mailbox command
1751 */
1752 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))
1753 == NULL) {
1754 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_failed_msg,
1755 "Unable to allocate mailbox buffer.");
1756 rval = 1;
1757 goto reset_link_fail;
1758 }
1759
1760 if (linkup) {
1761 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg,
1762 "Resetting link...");
1763 } else {
1764 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg,
1765 "Disabling link...");
1766 }
1767
1768 mb = (MAILBOX *)mbq;
1769
1770 /* Bring link down first */
1771 emlxs_mb_down_link(hba, mbq);
1772
1773 #define MBXERR_LINK_DOWN 0x33
1774
1775 if (wait) {
1776 wait = MBX_WAIT;
1777 } else {
1778 wait = MBX_NOWAIT;
1779 }
1780 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, wait, 0);
1781 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS) &&
1782 (rc != MBXERR_LINK_DOWN)) {
1783 rval = 1;
1784 goto reset_link_fail;
1785 }
1786
1787 tmo = 120;
1788 do {
1789 delay(drv_usectohz(500000));
1790 tmo--;
1791
1792 if (!tmo) {
1793 rval = 1;
1794
1795 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg,
1796 "Linkdown timeout.");
1797
1798 goto reset_link_fail;
1799 }
1800 } while ((hba->state >= FC_LINK_UP) && (hba->state != FC_ERROR));
1801
1802 if (linkup) {
1803 /*
1804 * Setup and issue mailbox INITIALIZE LINK command
1805 */
1806
1807 if (wait == MBX_NOWAIT) {
1808 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))
1809 == NULL) {
1810 EMLXS_MSGF(EMLXS_CONTEXT,
1811 &emlxs_link_reset_failed_msg,
1812 "Unable to allocate mailbox buffer.");
1813 rval = 1;
1814 goto reset_link_fail;
1815 }
1816 mb = (MAILBOX *)mbq;
1817 } else {
1818 /* Reuse mbq from previous mbox */
1819 mb = (MAILBOX *)mbq;
1820 }
1821 cfg = &CFG;
1822
1823 emlxs_mb_init_link(hba, mbq,
1824 cfg[CFG_TOPOLOGY].current, cfg[CFG_LINK_SPEED].current);
1825
1826 mb->un.varInitLnk.lipsr_AL_PA = 0;
1827
1828 /* Clear the loopback mode */
1829 mutex_enter(&EMLXS_PORT_LOCK);
1830 hba->flag &= ~FC_LOOPBACK_MODE;
1831 hba->loopback_tics = 0;
1832 mutex_exit(&EMLXS_PORT_LOCK);
1833
1834 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, wait, 0);
1835 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
1836 rval = 1;
1837 goto reset_link_fail;
1838 }
1839
1840 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg, NULL);
1841 }
1842
1843 reset_link_fail:
1844
1845 if ((wait == MBX_WAIT) && mbq) {
1846 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
1847 }
1848
1849 return (rval);
1850 } /* emlxs_reset_link() */
1851
1852
1853 extern int
1854 emlxs_online(emlxs_hba_t *hba)
1855 {
1856 emlxs_port_t *port = &PPORT;
1857 int32_t rval = 0;
1858 uint32_t i = 0;
1859
1860 /* Make sure adapter is offline or exit trying (30 seconds) */
1861 while (i++ < 30) {
1862 /* Check if adapter is already going online */
1863 if (hba->flag & (FC_ONLINE_MODE | FC_ONLINING_MODE)) {
1864 return (0);
1865 }
1866
1867 mutex_enter(&EMLXS_PORT_LOCK);
1868
1869 /* Check again */
1870 if (hba->flag & (FC_ONLINE_MODE | FC_ONLINING_MODE)) {
1871 mutex_exit(&EMLXS_PORT_LOCK);
1872 return (0);
1873 }
1874
1875 /* Check if adapter is offline */
1876 if (hba->flag & FC_OFFLINE_MODE) {
1877 /* Mark it going online */
1878 hba->flag &= ~FC_OFFLINE_MODE;
1879 hba->flag |= FC_ONLINING_MODE;
1880
1881 /* Currently !FC_ONLINE_MODE and !FC_OFFLINE_MODE */
1882 mutex_exit(&EMLXS_PORT_LOCK);
1883 break;
1884 }
1885
1886 mutex_exit(&EMLXS_PORT_LOCK);
1887
1888 BUSYWAIT_MS(1000);
1889 }
1890
1891 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1892 "Going online...");
1893
1894 if (rval = EMLXS_SLI_ONLINE(hba)) {
1895 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, "status=%x",
1896 rval);
1897 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_offline_msg, NULL);
1898
1899 /* Set FC_OFFLINE_MODE */
1900 mutex_enter(&EMLXS_PORT_LOCK);
1901 hba->flag |= FC_OFFLINE_MODE;
1902 hba->flag &= ~FC_ONLINING_MODE;
1903 mutex_exit(&EMLXS_PORT_LOCK);
1904
1905 return (rval);
1906 }
1907
1908 /* Start the timer */
1909 emlxs_timer_start(hba);
1910
1911 /* Set FC_ONLINE_MODE */
1912 mutex_enter(&EMLXS_PORT_LOCK);
1913 hba->flag |= FC_ONLINE_MODE;
1914 hba->flag &= ~FC_ONLINING_MODE;
1915 mutex_exit(&EMLXS_PORT_LOCK);
1916
1917 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_online_msg, NULL);
1918
1919 #ifdef SFCT_SUPPORT
1920 if (port->flag & EMLXS_TGT_ENABLED) {
1921 (void) emlxs_fct_port_initialize(port);
1922 }
1923 #endif /* SFCT_SUPPORT */
1924
1925 return (rval);
1926
1927 } /* emlxs_online() */
1928
1929
1930 extern int
1931 emlxs_offline(emlxs_hba_t *hba, uint32_t reset_requested)
1932 {
1933 emlxs_port_t *port = &PPORT;
1934 uint32_t i = 0;
1935 int rval = 1;
1936
1937 /* Make sure adapter is online or exit trying (30 seconds) */
1938 while (i++ < 30) {
1939 /* Check if adapter is already going offline */
1940 if (hba->flag & (FC_OFFLINE_MODE | FC_OFFLINING_MODE)) {
1941 return (0);
1942 }
1943
1944 mutex_enter(&EMLXS_PORT_LOCK);
1945
1946 /* Check again */
1947 if (hba->flag & (FC_OFFLINE_MODE | FC_OFFLINING_MODE)) {
1948 mutex_exit(&EMLXS_PORT_LOCK);
1949 return (0);
1950 }
1951
1952 /* Check if adapter is online */
1953 if (hba->flag & FC_ONLINE_MODE) {
1954 /* Mark it going offline */
1955 hba->flag &= ~FC_ONLINE_MODE;
1956 hba->flag |= FC_OFFLINING_MODE;
1957
1958 /* Currently !FC_ONLINE_MODE and !FC_OFFLINE_MODE */
1959 mutex_exit(&EMLXS_PORT_LOCK);
1960 break;
1961 }
1962
1963 mutex_exit(&EMLXS_PORT_LOCK);
1964
1965 BUSYWAIT_MS(1000);
1966 }
1967
1968 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1969 "Going offline...");
1970
1971 /* Declare link down */
1972 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1973 (void) emlxs_fcf_shutdown_notify(port, 1);
1974 } else {
1975 emlxs_linkdown(hba);
1976 }
1977
1978 #ifdef SFCT_SUPPORT
1979 if (port->flag & EMLXS_TGT_ENABLED) {
1980 (void) emlxs_fct_port_shutdown(port);
1981 }
1982 #endif /* SFCT_SUPPORT */
1983
1984 /* Check if adapter was shutdown */
1985 if (hba->flag & FC_HARDWARE_ERROR) {
1986 /*
1987 * Force mailbox cleanup
1988 * This will wake any sleeping or polling threads
1989 */
1990 emlxs_mb_fini(hba, NULL, MBX_HARDWARE_ERROR);
1991 }
1992
1993 /* Pause here for the IO to settle */
1994 delay(drv_usectohz(1000000)); /* 1 sec */
1995
1996 /* Unregister all nodes */
1997 emlxs_ffcleanup(hba);
1998
1999 if (hba->bus_type == SBUS_FC) {
2000 WRITE_SBUS_CSR_REG(hba, FC_SHS_REG(hba), 0x9A);
2001 #ifdef FMA_SUPPORT
2002 /* Access handle validation */
2003 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.sbus_csr_handle);
2004 #endif /* FMA_SUPPORT */
2005 }
2006
2007 /* Stop the timer */
2008 emlxs_timer_stop(hba);
2009
2010 /* For safety flush every iotag list */
2011 if (emlxs_iotag_flush(hba)) {
2012 /* Pause here for the IO to flush */
2013 delay(drv_usectohz(1000));
2014 }
2015
2016 /* Wait for poll command request to settle */
2017 while (hba->io_poll_count > 0) {
2018 delay(drv_usectohz(2000000)); /* 2 sec */
2019 }
2020
2021 /* Shutdown the adapter interface */
2022 EMLXS_SLI_OFFLINE(hba, reset_requested);
2023
2024 mutex_enter(&EMLXS_PORT_LOCK);
2025 hba->flag |= FC_OFFLINE_MODE;
2026 hba->flag &= ~FC_OFFLINING_MODE;
2027 mutex_exit(&EMLXS_PORT_LOCK);
2028
2029 rval = 0;
2030
2031 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_offline_msg, NULL);
2032
2033 done:
2034
2035 return (rval);
2036
2037 } /* emlxs_offline() */
2038
2039
2040
2041 extern int
2042 emlxs_power_down(emlxs_hba_t *hba)
2043 {
2044 #ifdef FMA_SUPPORT
2045 emlxs_port_t *port = &PPORT;
2046 #endif /* FMA_SUPPORT */
2047 int32_t rval = 0;
2048
2049 if ((rval = emlxs_offline(hba, 0))) {
2050 return (rval);
2051 }
2052 EMLXS_SLI_HBA_RESET(hba, 1, 1, 0);
2053
2054
2055 #ifdef FMA_SUPPORT
2056 if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
2057 != DDI_FM_OK) {
2058 EMLXS_MSGF(EMLXS_CONTEXT,
2059 &emlxs_invalid_access_handle_msg, NULL);
2060 return (1);
2061 }
2062 #endif /* FMA_SUPPORT */
2063
2064 return (0);
2065
2066 } /* End emlxs_power_down */
2067
2068
2069 extern int
2070 emlxs_power_up(emlxs_hba_t *hba)
2071 {
2072 #ifdef FMA_SUPPORT
2073 emlxs_port_t *port = &PPORT;
2074 #endif /* FMA_SUPPORT */
2075 int32_t rval = 0;
2076
2077
2078 #ifdef FMA_SUPPORT
2079 if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
2080 != DDI_FM_OK) {
2081 EMLXS_MSGF(EMLXS_CONTEXT,
2082 &emlxs_invalid_access_handle_msg, NULL);
2083 return (1);
2084 }
2085 #endif /* FMA_SUPPORT */
2086
2087 /* Bring adapter online */
2088 if ((rval = emlxs_online(hba))) {
2089 if (hba->pci_cap_offset[PCI_CAP_ID_PM]) {
2090 /* Put chip in D3 state */
2091 (void) ddi_put8(hba->pci_acc_handle,
2092 (uint8_t *)(hba->pci_addr +
2093 hba->pci_cap_offset[PCI_CAP_ID_PM] +
2094 PCI_PMCSR),
2095 (uint8_t)PCI_PMCSR_D3HOT);
2096 }
2097 return (rval);
2098 }
2099
2100 return (rval);
2101
2102 } /* emlxs_power_up() */
2103
2104
2105 /*
2106 *
2107 * NAME: emlxs_ffcleanup
2108 *
2109 * FUNCTION: Cleanup all the Firefly resources used by configuring the adapter
2110 *
2111 * EXECUTION ENVIRONMENT: process only
2112 *
2113 * CALLED FROM: CFG_TERM
2114 *
2115 * INPUT: hba - pointer to the dev_ctl area.
2116 *
2117 * RETURNS: none
2118 */
2119 extern void
2120 emlxs_ffcleanup(emlxs_hba_t *hba)
2121 {
2122 emlxs_port_t *port = &PPORT;
2123 uint32_t i;
2124
2125 /* Disable all but the mailbox interrupt */
2126 EMLXS_SLI_DISABLE_INTR(hba, HC_MBINT_ENA);
2127
2128 /* Make sure all port nodes are destroyed */
2129 for (i = 0; i < MAX_VPORTS; i++) {
2130 port = &VPORT(i);
2131
2132 if (port->node_count) {
2133 (void) EMLXS_SLI_UNREG_NODE(port, 0, 0, 0, 0);
2134 }
2135 }
2136
2137 /* Clear all interrupt enable conditions */
2138 EMLXS_SLI_DISABLE_INTR(hba, 0);
2139
2140 return;
2141
2142 } /* emlxs_ffcleanup() */
2143
2144
2145 extern uint16_t
2146 emlxs_register_pkt(CHANNEL *cp, emlxs_buf_t *sbp)
2147 {
2148 emlxs_hba_t *hba;
2149 emlxs_port_t *port;
2150 uint16_t iotag;
2151 uint32_t i;
2152
2153 hba = cp->hba;
2154
2155 mutex_enter(&EMLXS_FCTAB_LOCK);
2156
2157 if (sbp->iotag != 0) {
2158 port = &PPORT;
2159
2160 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2161 "Pkt already registered! channel=%d iotag=%d sbp=%p",
2162 sbp->channel, sbp->iotag, sbp);
2163 }
2164
2165 iotag = 0;
2166 for (i = 0; i < hba->max_iotag; i++) {
2167 if (!hba->fc_iotag || hba->fc_iotag >= hba->max_iotag) {
2168 hba->fc_iotag = 1;
2169 }
2170 iotag = hba->fc_iotag++;
2171
2172 if (hba->fc_table[iotag] == 0 ||
2173 hba->fc_table[iotag] == STALE_PACKET) {
2174 hba->io_count++;
2175 hba->fc_table[iotag] = sbp;
2176
2177 sbp->iotag = iotag;
2178 sbp->channel = cp;
2179
2180 break;
2181 }
2182 iotag = 0;
2183 }
2184
2185 mutex_exit(&EMLXS_FCTAB_LOCK);
2186
2187 /*
2188 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2189 * "register_pkt: channel=%d iotag=%d sbp=%p",
2190 * cp->channelno, iotag, sbp);
2191 */
2192
2193 return (iotag);
2194
2195 } /* emlxs_register_pkt() */
2196
2197
2198
2199 extern emlxs_buf_t *
2200 emlxs_unregister_pkt(CHANNEL *cp, uint16_t iotag, uint32_t forced)
2201 {
2202 emlxs_hba_t *hba;
2203 emlxs_buf_t *sbp;
2204
2205 sbp = NULL;
2206 hba = cp->hba;
2207
2208 /* Check the iotag range */
2209 if ((iotag == 0) || (iotag >= hba->max_iotag)) {
2210 return (NULL);
2211 }
2212
2213 /* Remove the sbp from the table */
2214 mutex_enter(&EMLXS_FCTAB_LOCK);
2215 sbp = hba->fc_table[iotag];
2216
2217 if (!sbp || (sbp == STALE_PACKET)) {
2218 mutex_exit(&EMLXS_FCTAB_LOCK);
2219 return (sbp);
2220 }
2221
2222 hba->fc_table[iotag] = ((forced) ? STALE_PACKET : NULL);
2223 hba->io_count--;
2224 sbp->iotag = 0;
2225
2226 mutex_exit(&EMLXS_FCTAB_LOCK);
2227
2228
2229 /* Clean up the sbp */
2230 mutex_enter(&sbp->mtx);
2231
2232 if (sbp->pkt_flags & PACKET_IN_TXQ) {
2233 sbp->pkt_flags &= ~PACKET_IN_TXQ;
2234 hba->channel_tx_count--;
2235 }
2236
2237 if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
2238 sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
2239 }
2240
2241 if (sbp->bmp) {
2242 emlxs_mem_put(hba, MEM_BPL, (void *)sbp->bmp);
2243 sbp->bmp = 0;
2244 }
2245
2246 mutex_exit(&sbp->mtx);
2247
2248 return (sbp);
2249
2250 } /* emlxs_unregister_pkt() */
2251
2252
2253
2254 /* Flush all IO's to all nodes for a given IO Channel */
2255 extern uint32_t
2256 emlxs_tx_channel_flush(emlxs_hba_t *hba, CHANNEL *cp, emlxs_buf_t *fpkt)
2257 {
2258 emlxs_port_t *port = &PPORT;
2259 emlxs_buf_t *sbp;
2260 IOCBQ *iocbq;
2261 IOCBQ *next;
2262 IOCB *iocb;
2263 uint32_t channelno;
2264 Q abort;
2265 NODELIST *ndlp;
2266 IOCB *icmd;
2267 MATCHMAP *mp;
2268 uint32_t i;
2269 uint8_t flag[MAX_CHANNEL];
2270
2271 channelno = cp->channelno;
2272 bzero((void *)&abort, sizeof (Q));
2273 bzero((void *)flag, MAX_CHANNEL * sizeof (uint8_t));
2274
2275 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2276
2277 /* While a node needs servicing */
2278 while (cp->nodeq.q_first) {
2279 ndlp = (NODELIST *) cp->nodeq.q_first;
2280
2281 /* Check if priority queue is not empty */
2282 if (ndlp->nlp_ptx[channelno].q_first) {
2283 /* Transfer all iocb's to local queue */
2284 if (abort.q_first == 0) {
2285 abort.q_first =
2286 ndlp->nlp_ptx[channelno].q_first;
2287 } else {
2288 ((IOCBQ *)abort.q_last)->next =
2289 (IOCBQ *)ndlp->nlp_ptx[channelno].q_first;
2290 }
2291 flag[channelno] = 1;
2292
2293 abort.q_last = ndlp->nlp_ptx[channelno].q_last;
2294 abort.q_cnt += ndlp->nlp_ptx[channelno].q_cnt;
2295 }
2296
2297 /* Check if tx queue is not empty */
2298 if (ndlp->nlp_tx[channelno].q_first) {
2299 /* Transfer all iocb's to local queue */
2300 if (abort.q_first == 0) {
2301 abort.q_first = ndlp->nlp_tx[channelno].q_first;
2302 } else {
2303 ((IOCBQ *)abort.q_last)->next =
2304 (IOCBQ *)ndlp->nlp_tx[channelno].q_first;
2305 }
2306
2307 abort.q_last = ndlp->nlp_tx[channelno].q_last;
2308 abort.q_cnt += ndlp->nlp_tx[channelno].q_cnt;
2309 }
2310
2311 /* Clear the queue pointers */
2312 ndlp->nlp_ptx[channelno].q_first = NULL;
2313 ndlp->nlp_ptx[channelno].q_last = NULL;
2314 ndlp->nlp_ptx[channelno].q_cnt = 0;
2315
2316 ndlp->nlp_tx[channelno].q_first = NULL;
2317 ndlp->nlp_tx[channelno].q_last = NULL;
2318 ndlp->nlp_tx[channelno].q_cnt = 0;
2319
2320 /* Remove node from service queue */
2321
2322 /* If this is the last node on list */
2323 if (cp->nodeq.q_last == (void *)ndlp) {
2324 cp->nodeq.q_last = NULL;
2325 cp->nodeq.q_first = NULL;
2326 cp->nodeq.q_cnt = 0;
2327 } else {
2328 /* Remove node from head */
2329 cp->nodeq.q_first = ndlp->nlp_next[channelno];
2330 ((NODELIST *)cp->nodeq.q_last)->nlp_next[channelno] =
2331 cp->nodeq.q_first;
2332 cp->nodeq.q_cnt--;
2333 }
2334
2335 /* Clear node */
2336 ndlp->nlp_next[channelno] = NULL;
2337 }
2338
2339 /* First cleanup the iocb's while still holding the lock */
2340 iocbq = (IOCBQ *) abort.q_first;
2341 while (iocbq) {
2342 /* Free the IoTag and the bmp */
2343 iocb = &iocbq->iocb;
2344
2345 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2346 sbp = iocbq->sbp;
2347 if (sbp) {
2348 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
2349 }
2350 } else {
2351 sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
2352 iocb->ULPIOTAG, 0);
2353 }
2354
2355 if (sbp && (sbp != STALE_PACKET)) {
2356 mutex_enter(&sbp->mtx);
2357
2358 sbp->pkt_flags |= PACKET_IN_FLUSH;
2359 /*
2360 * If the fpkt is already set, then we will leave it
2361 * alone. This ensures that this pkt is only accounted
2362 * for on one fpkt->flush_count
2363 */
2364 if (!sbp->fpkt && fpkt) {
2365 mutex_enter(&fpkt->mtx);
2366 sbp->fpkt = fpkt;
2367 fpkt->flush_count++;
2368 mutex_exit(&fpkt->mtx);
2369 }
2370
2371 mutex_exit(&sbp->mtx);
2372 }
2373
2374 iocbq = (IOCBQ *)iocbq->next;
2375 } /* end of while */
2376
2377 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2378
2379 /* Now abort the iocb's */
2380 iocbq = (IOCBQ *)abort.q_first;
2381 while (iocbq) {
2382 /* Save the next iocbq for now */
2383 next = (IOCBQ *)iocbq->next;
2384
2385 /* Unlink this iocbq */
2386 iocbq->next = NULL;
2387
2388 /* Get the pkt */
2389 sbp = (emlxs_buf_t *)iocbq->sbp;
2390
2391 if (sbp) {
2392 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2393 "tx: sbp=%p node=%p", sbp, sbp->node);
2394
2395 if (hba->state >= FC_LINK_UP) {
2396 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2397 IOERR_ABORT_REQUESTED, 1);
2398 } else {
2399 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2400 IOERR_LINK_DOWN, 1);
2401 }
2402
2403 }
2404 /* Free the iocb and its associated buffers */
2405 else {
2406 icmd = &iocbq->iocb;
2407
2408 /* SLI3 */
2409 if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
2410 icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
2411 icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
2412 if ((hba->flag &
2413 (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2414 /* HBA is detaching or offlining */
2415 if (icmd->ULPCOMMAND !=
2416 CMD_QUE_RING_LIST64_CN) {
2417 void *tmp;
2418 RING *rp;
2419
2420 rp = &hba->sli.sli3.
2421 ring[channelno];
2422 for (i = 0;
2423 i < icmd->ULPBDECOUNT;
2424 i++) {
2425 mp = EMLXS_GET_VADDR(
2426 hba, rp, icmd);
2427
2428 tmp = (void *)mp;
2429 if (mp) {
2430 emlxs_mem_put(
2431 hba, MEM_BUF, tmp);
2432 }
2433 }
2434 }
2435
2436 emlxs_mem_put(hba, MEM_IOCB,
2437 (void *)iocbq);
2438 } else {
2439 /* repost the unsolicited buffer */
2440 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp,
2441 iocbq);
2442 }
2443 } else if (icmd->ULPCOMMAND == CMD_CLOSE_XRI_CN ||
2444 icmd->ULPCOMMAND == CMD_CLOSE_XRI_CX) {
2445
2446 emlxs_tx_put(iocbq, 1);
2447 }
2448 }
2449
2450 iocbq = next;
2451
2452 } /* end of while */
2453
2454 /* Now trigger channel service */
2455 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2456 if (!flag[channelno]) {
2457 continue;
2458 }
2459
2460 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
2461 }
2462
2463 return (abort.q_cnt);
2464
2465 } /* emlxs_tx_channel_flush() */
2466
2467
2468 /* Flush all IO's on all or a given ring for a given node */
2469 extern uint32_t
2470 emlxs_tx_node_flush(emlxs_port_t *port, NODELIST *ndlp, CHANNEL *chan,
2471 uint32_t shutdown, emlxs_buf_t *fpkt)
2472 {
2473 emlxs_hba_t *hba = HBA;
2474 emlxs_buf_t *sbp;
2475 uint32_t channelno;
2476 CHANNEL *cp;
2477 IOCB *icmd;
2478 IOCBQ *iocbq;
2479 NODELIST *prev;
2480 IOCBQ *next;
2481 IOCB *iocb;
2482 Q abort;
2483 uint32_t i;
2484 MATCHMAP *mp;
2485 uint8_t flag[MAX_CHANNEL];
2486
2487 bzero((void *)&abort, sizeof (Q));
2488
2489 /* Flush all I/O's on tx queue to this target */
2490 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2491
2492 if (!ndlp->nlp_base && shutdown) {
2493 ndlp->nlp_active = 0;
2494 }
2495
2496 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2497 cp = &hba->chan[channelno];
2498
2499 if (chan && cp != chan) {
2500 continue;
2501 }
2502
2503 if (!ndlp->nlp_base || shutdown) {
2504 /* Check if priority queue is not empty */
2505 if (ndlp->nlp_ptx[channelno].q_first) {
2506 /* Transfer all iocb's to local queue */
2507 if (abort.q_first == 0) {
2508 abort.q_first =
2509 ndlp->nlp_ptx[channelno].q_first;
2510 } else {
2511 ((IOCBQ *)(abort.q_last))->next =
2512 (IOCBQ *)ndlp->nlp_ptx[channelno].
2513 q_first;
2514 }
2515
2516 flag[channelno] = 1;
2517
2518 abort.q_last = ndlp->nlp_ptx[channelno].q_last;
2519 abort.q_cnt += ndlp->nlp_ptx[channelno].q_cnt;
2520 }
2521 }
2522
2523 /* Check if tx queue is not empty */
2524 if (ndlp->nlp_tx[channelno].q_first) {
2525
2526 /* Transfer all iocb's to local queue */
2527 if (abort.q_first == 0) {
2528 abort.q_first = ndlp->nlp_tx[channelno].q_first;
2529 } else {
2530 ((IOCBQ *)abort.q_last)->next =
2531 (IOCBQ *)ndlp->nlp_tx[channelno].q_first;
2532 }
2533
2534 abort.q_last = ndlp->nlp_tx[channelno].q_last;
2535 abort.q_cnt += ndlp->nlp_tx[channelno].q_cnt;
2536 }
2537
2538 /* Clear the queue pointers */
2539 ndlp->nlp_ptx[channelno].q_first = NULL;
2540 ndlp->nlp_ptx[channelno].q_last = NULL;
2541 ndlp->nlp_ptx[channelno].q_cnt = 0;
2542
2543 ndlp->nlp_tx[channelno].q_first = NULL;
2544 ndlp->nlp_tx[channelno].q_last = NULL;
2545 ndlp->nlp_tx[channelno].q_cnt = 0;
2546
2547 /* If this node was on the channel queue, remove it */
2548 if (ndlp->nlp_next[channelno]) {
2549 /* If this is the only node on list */
2550 if (cp->nodeq.q_first == (void *)ndlp &&
2551 cp->nodeq.q_last == (void *)ndlp) {
2552 cp->nodeq.q_last = NULL;
2553 cp->nodeq.q_first = NULL;
2554 cp->nodeq.q_cnt = 0;
2555 } else if (cp->nodeq.q_first == (void *)ndlp) {
2556 cp->nodeq.q_first = ndlp->nlp_next[channelno];
2557 ((NODELIST *) cp->nodeq.q_last)->
2558 nlp_next[channelno] = cp->nodeq.q_first;
2559 cp->nodeq.q_cnt--;
2560 } else {
2561 /*
2562 * This is a little more difficult find the
2563 * previous node in the circular channel queue
2564 */
2565 prev = ndlp;
2566 while (prev->nlp_next[channelno] != ndlp) {
2567 prev = prev->nlp_next[channelno];
2568 }
2569
2570 prev->nlp_next[channelno] =
2571 ndlp->nlp_next[channelno];
2572
2573 if (cp->nodeq.q_last == (void *)ndlp) {
2574 cp->nodeq.q_last = (void *)prev;
2575 }
2576 cp->nodeq.q_cnt--;
2577
2578 }
2579
2580 /* Clear node */
2581 ndlp->nlp_next[channelno] = NULL;
2582 }
2583
2584 }
2585
2586 /* First cleanup the iocb's while still holding the lock */
2587 iocbq = (IOCBQ *) abort.q_first;
2588 while (iocbq) {
2589 /* Free the IoTag and the bmp */
2590 iocb = &iocbq->iocb;
2591
2592 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2593 sbp = iocbq->sbp;
2594 if (sbp) {
2595 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
2596 }
2597 } else {
2598 sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
2599 iocb->ULPIOTAG, 0);
2600 }
2601
2602 if (sbp && (sbp != STALE_PACKET)) {
2603 mutex_enter(&sbp->mtx);
2604 sbp->pkt_flags |= PACKET_IN_FLUSH;
2605 /*
2606 * If the fpkt is already set, then we will leave it
2607 * alone. This ensures that this pkt is only accounted
2608 * for on one fpkt->flush_count
2609 */
2610 if (!sbp->fpkt && fpkt) {
2611 mutex_enter(&fpkt->mtx);
2612 sbp->fpkt = fpkt;
2613 fpkt->flush_count++;
2614 mutex_exit(&fpkt->mtx);
2615 }
2616
2617 mutex_exit(&sbp->mtx);
2618 }
2619
2620 iocbq = (IOCBQ *) iocbq->next;
2621
2622 } /* end of while */
2623
2624 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2625
2626 /* Now abort the iocb's outside the locks */
2627 iocbq = (IOCBQ *)abort.q_first;
2628 while (iocbq) {
2629 /* Save the next iocbq for now */
2630 next = (IOCBQ *)iocbq->next;
2631
2632 /* Unlink this iocbq */
2633 iocbq->next = NULL;
2634
2635 /* Get the pkt */
2636 sbp = (emlxs_buf_t *)iocbq->sbp;
2637
2638 if (sbp) {
2639 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2640 "tx: sbp=%p node=%p", sbp, sbp->node);
2641
2642 if (hba->state >= FC_LINK_UP) {
2643 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2644 IOERR_ABORT_REQUESTED, 1);
2645 } else {
2646 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2647 IOERR_LINK_DOWN, 1);
2648 }
2649
2650 }
2651 /* Free the iocb and its associated buffers */
2652 else {
2653 /* CMD_CLOSE_XRI_CN should also free the memory */
2654 icmd = &iocbq->iocb;
2655
2656 /* SLI3 */
2657 if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
2658 icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
2659 icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
2660 if ((hba->flag &
2661 (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2662 /* HBA is detaching or offlining */
2663 if (icmd->ULPCOMMAND !=
2664 CMD_QUE_RING_LIST64_CN) {
2665 void *tmp;
2666 RING *rp;
2667 int ch;
2668
2669 ch = ((CHANNEL *)
2670 iocbq->channel)->channelno;
2671 rp = &hba->sli.sli3.ring[ch];
2672 for (i = 0;
2673 i < icmd->ULPBDECOUNT;
2674 i++) {
2675 mp = EMLXS_GET_VADDR(
2676 hba, rp, icmd);
2677
2678 tmp = (void *)mp;
2679 if (mp) {
2680 emlxs_mem_put(
2681 hba, MEM_BUF, tmp);
2682 }
2683 }
2684 }
2685
2686 emlxs_mem_put(hba, MEM_IOCB,
2687 (void *)iocbq);
2688 } else {
2689 /* repost the unsolicited buffer */
2690 EMLXS_SLI_ISSUE_IOCB_CMD(hba,
2691 (CHANNEL *)iocbq->channel, iocbq);
2692 }
2693 } else if (icmd->ULPCOMMAND == CMD_CLOSE_XRI_CN ||
2694 icmd->ULPCOMMAND == CMD_CLOSE_XRI_CX) {
2695 /*
2696 * Resend the abort iocbq if any
2697 */
2698 emlxs_tx_put(iocbq, 1);
2699 }
2700 }
2701
2702 iocbq = next;
2703
2704 } /* end of while */
2705
2706 /* Now trigger channel service */
2707 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2708 if (!flag[channelno]) {
2709 continue;
2710 }
2711
2712 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
2713 }
2714
2715 return (abort.q_cnt);
2716
2717 } /* emlxs_tx_node_flush() */
2718
2719
2720 /* Check for IO's on all or a given ring for a given node */
2721 extern uint32_t
2722 emlxs_tx_node_check(emlxs_port_t *port, NODELIST *ndlp, CHANNEL *chan)
2723 {
2724 emlxs_hba_t *hba = HBA;
2725 uint32_t channelno;
2726 CHANNEL *cp;
2727 uint32_t count;
2728
2729 count = 0;
2730
2731 /* Flush all I/O's on tx queue to this target */
2732 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2733
2734 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2735 cp = &hba->chan[channelno];
2736
2737 if (chan && cp != chan) {
2738 continue;
2739 }
2740
2741 /* Check if priority queue is not empty */
2742 if (ndlp->nlp_ptx[channelno].q_first) {
2743 count += ndlp->nlp_ptx[channelno].q_cnt;
2744 }
2745
2746 /* Check if tx queue is not empty */
2747 if (ndlp->nlp_tx[channelno].q_first) {
2748 count += ndlp->nlp_tx[channelno].q_cnt;
2749 }
2750
2751 }
2752
2753 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2754
2755 return (count);
2756
2757 } /* emlxs_tx_node_check() */
2758
2759
2760
2761 /* Flush all IO's on the any ring for a given node's lun */
2762 extern uint32_t
2763 emlxs_tx_lun_flush(emlxs_port_t *port, NODELIST *ndlp, uint32_t lun,
2764 emlxs_buf_t *fpkt)
2765 {
2766 emlxs_hba_t *hba = HBA;
2767 emlxs_buf_t *sbp;
2768 uint32_t channelno;
2769 IOCBQ *iocbq;
2770 IOCBQ *prev;
2771 IOCBQ *next;
2772 IOCB *iocb;
2773 IOCB *icmd;
2774 Q abort;
2775 uint32_t i;
2776 MATCHMAP *mp;
2777 uint8_t flag[MAX_CHANNEL];
2778
2779 if (lun == EMLXS_LUN_NONE) {
2780 return (0);
2781 }
2782
2783 bzero((void *)&abort, sizeof (Q));
2784
2785 /* Flush I/O's on txQ to this target's lun */
2786 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2787
2788 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2789
2790 /* Scan the priority queue first */
2791 prev = NULL;
2792 iocbq = (IOCBQ *) ndlp->nlp_ptx[channelno].q_first;
2793
2794 while (iocbq) {
2795 next = (IOCBQ *)iocbq->next;
2796 iocb = &iocbq->iocb;
2797 sbp = (emlxs_buf_t *)iocbq->sbp;
2798
2799 /* Check if this IO is for our lun */
2800 if (sbp && (sbp->lun == lun)) {
2801 /* Remove iocb from the node's ptx queue */
2802 if (next == 0) {
2803 ndlp->nlp_ptx[channelno].q_last =
2804 (uint8_t *)prev;
2805 }
2806
2807 if (prev == 0) {
2808 ndlp->nlp_ptx[channelno].q_first =
2809 (uint8_t *)next;
2810 } else {
2811 prev->next = next;
2812 }
2813
2814 iocbq->next = NULL;
2815 ndlp->nlp_ptx[channelno].q_cnt--;
2816
2817 /*
2818 * Add this iocb to our local abort Q
2819 */
2820 if (abort.q_first) {
2821 ((IOCBQ *)abort.q_last)->next = iocbq;
2822 abort.q_last = (uint8_t *)iocbq;
2823 abort.q_cnt++;
2824 } else {
2825 abort.q_first = (uint8_t *)iocbq;
2826 abort.q_last = (uint8_t *)iocbq;
2827 abort.q_cnt = 1;
2828 }
2829 iocbq->next = NULL;
2830 flag[channelno] = 1;
2831
2832 } else {
2833 prev = iocbq;
2834 }
2835
2836 iocbq = next;
2837
2838 } /* while (iocbq) */
2839
2840
2841 /* Scan the regular queue */
2842 prev = NULL;
2843 iocbq = (IOCBQ *)ndlp->nlp_tx[channelno].q_first;
2844
2845 while (iocbq) {
2846 next = (IOCBQ *)iocbq->next;
2847 iocb = &iocbq->iocb;
2848 sbp = (emlxs_buf_t *)iocbq->sbp;
2849
2850 /* Check if this IO is for our lun */
2851 if (sbp && (sbp->lun == lun)) {
2852 /* Remove iocb from the node's tx queue */
2853 if (next == 0) {
2854 ndlp->nlp_tx[channelno].q_last =
2855 (uint8_t *)prev;
2856 }
2857
2858 if (prev == 0) {
2859 ndlp->nlp_tx[channelno].q_first =
2860 (uint8_t *)next;
2861 } else {
2862 prev->next = next;
2863 }
2864
2865 iocbq->next = NULL;
2866 ndlp->nlp_tx[channelno].q_cnt--;
2867
2868 /*
2869 * Add this iocb to our local abort Q
2870 */
2871 if (abort.q_first) {
2872 ((IOCBQ *) abort.q_last)->next = iocbq;
2873 abort.q_last = (uint8_t *)iocbq;
2874 abort.q_cnt++;
2875 } else {
2876 abort.q_first = (uint8_t *)iocbq;
2877 abort.q_last = (uint8_t *)iocbq;
2878 abort.q_cnt = 1;
2879 }
2880 iocbq->next = NULL;
2881 } else {
2882 prev = iocbq;
2883 }
2884
2885 iocbq = next;
2886
2887 } /* while (iocbq) */
2888 } /* for loop */
2889
2890 /* First cleanup the iocb's while still holding the lock */
2891 iocbq = (IOCBQ *)abort.q_first;
2892 while (iocbq) {
2893 /* Free the IoTag and the bmp */
2894 iocb = &iocbq->iocb;
2895
2896 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2897 sbp = iocbq->sbp;
2898 if (sbp) {
2899 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
2900 }
2901 } else {
2902 sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
2903 iocb->ULPIOTAG, 0);
2904 }
2905
2906 if (sbp && (sbp != STALE_PACKET)) {
2907 mutex_enter(&sbp->mtx);
2908 sbp->pkt_flags |= PACKET_IN_FLUSH;
2909 /*
2910 * If the fpkt is already set, then we will leave it
2911 * alone. This ensures that this pkt is only accounted
2912 * for on one fpkt->flush_count
2913 */
2914 if (!sbp->fpkt && fpkt) {
2915 mutex_enter(&fpkt->mtx);
2916 sbp->fpkt = fpkt;
2917 fpkt->flush_count++;
2918 mutex_exit(&fpkt->mtx);
2919 }
2920
2921 mutex_exit(&sbp->mtx);
2922 }
2923
2924 iocbq = (IOCBQ *) iocbq->next;
2925
2926 } /* end of while */
2927
2928 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2929
2930 /* Now abort the iocb's outside the locks */
2931 iocbq = (IOCBQ *)abort.q_first;
2932 while (iocbq) {
2933 /* Save the next iocbq for now */
2934 next = (IOCBQ *)iocbq->next;
2935
2936 /* Unlink this iocbq */
2937 iocbq->next = NULL;
2938
2939 /* Get the pkt */
2940 sbp = (emlxs_buf_t *)iocbq->sbp;
2941
2942 if (sbp) {
2943 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2944 "tx: sbp=%p node=%p", sbp, sbp->node);
2945
2946 if (hba->state >= FC_LINK_UP) {
2947 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2948 IOERR_ABORT_REQUESTED, 1);
2949 } else {
2950 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2951 IOERR_LINK_DOWN, 1);
2952 }
2953 }
2954
2955 /* Free the iocb and its associated buffers */
2956 else {
2957 /* Should never happen! */
2958 icmd = &iocbq->iocb;
2959
2960 /* SLI3 */
2961 if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
2962 icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
2963 icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
2964 if ((hba->flag &
2965 (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2966 /* HBA is detaching or offlining */
2967 if (icmd->ULPCOMMAND !=
2968 CMD_QUE_RING_LIST64_CN) {
2969 void *tmp;
2970 RING *rp;
2971 int ch;
2972
2973 ch = ((CHANNEL *)
2974 iocbq->channel)->channelno;
2975 rp = &hba->sli.sli3.ring[ch];
2976 for (i = 0;
2977 i < icmd->ULPBDECOUNT;
2978 i++) {
2979 mp = EMLXS_GET_VADDR(
2980 hba, rp, icmd);
2981
2982 tmp = (void *)mp;
2983 if (mp) {
2984 emlxs_mem_put(
2985 hba, MEM_BUF, tmp);
2986 }
2987 }
2988 }
2989
2990 emlxs_mem_put(hba, MEM_IOCB,
2991 (void *)iocbq);
2992 } else {
2993 /* repost the unsolicited buffer */
2994 EMLXS_SLI_ISSUE_IOCB_CMD(hba,
2995 (CHANNEL *)iocbq->channel, iocbq);
2996 }
2997 } else if (icmd->ULPCOMMAND == CMD_CLOSE_XRI_CN ||
2998 icmd->ULPCOMMAND == CMD_CLOSE_XRI_CX) {
2999 /*
3000 * Resend the abort iocbq if any
3001 */
3002 emlxs_tx_put(iocbq, 1);
3003 }
3004 }
3005
3006 iocbq = next;
3007
3008 } /* end of while */
3009
3010 /* Now trigger channel service */
3011 for (channelno = 0; channelno < hba->chan_count; channelno++) {
3012 if (!flag[channelno]) {
3013 continue;
3014 }
3015
3016 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
3017 }
3018
3019 return (abort.q_cnt);
3020
3021 } /* emlxs_tx_lun_flush() */
3022
3023
3024 extern void
3025 emlxs_tx_put(IOCBQ *iocbq, uint32_t lock)
3026 {
3027 emlxs_hba_t *hba;
3028 emlxs_port_t *port;
3029 uint32_t channelno;
3030 NODELIST *nlp;
3031 CHANNEL *cp;
3032 emlxs_buf_t *sbp;
3033
3034 port = (emlxs_port_t *)iocbq->port;
3035 hba = HBA;
3036 cp = (CHANNEL *)iocbq->channel;
3037 nlp = (NODELIST *)iocbq->node;
3038 channelno = cp->channelno;
3039 sbp = (emlxs_buf_t *)iocbq->sbp;
3040
3041 if (nlp == NULL) {
3042 /* Set node to base node by default */
3043 nlp = &port->node_base;
3044
3045 iocbq->node = (void *)nlp;
3046
3047 if (sbp) {
3048 sbp->node = (void *)nlp;
3049 }
3050 }
3051
3052 if (lock) {
3053 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3054 }
3055
3056 if (!nlp->nlp_active || (sbp && (sbp->pkt_flags & PACKET_IN_ABORT))) {
3057 if (sbp) {
3058 mutex_enter(&sbp->mtx);
3059 sbp->pkt_flags |= PACKET_IN_FLUSH;
3060 mutex_exit(&sbp->mtx);
3061
3062 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3063 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
3064 } else {
3065 (void) emlxs_unregister_pkt(cp, sbp->iotag, 0);
3066 }
3067
3068 if (lock) {
3069 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3070 }
3071
3072 if (hba->state >= FC_LINK_UP) {
3073 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3074 IOERR_ABORT_REQUESTED, 1);
3075 } else {
3076 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3077 IOERR_LINK_DOWN, 1);
3078 }
3079 return;
3080 } else {
3081 if (lock) {
3082 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3083 }
3084
3085 emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
3086 }
3087
3088 return;
3089 }
3090
3091 if (sbp) {
3092
3093 mutex_enter(&sbp->mtx);
3094
3095 if (sbp->pkt_flags &
3096 (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ | PACKET_IN_TXQ)) {
3097 mutex_exit(&sbp->mtx);
3098 if (lock) {
3099 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3100 }
3101 return;
3102 }
3103
3104 sbp->pkt_flags |= PACKET_IN_TXQ;
3105 hba->channel_tx_count++;
3106
3107 mutex_exit(&sbp->mtx);
3108 }
3109
3110
3111 /* Check iocbq priority */
3112 /* Some IOCB has the high priority like reset/close xri etc */
3113 if (iocbq->flag & IOCB_PRIORITY) {
3114 /* Add the iocb to the bottom of the node's ptx queue */
3115 if (nlp->nlp_ptx[channelno].q_first) {
3116 ((IOCBQ *)nlp->nlp_ptx[channelno].q_last)->next = iocbq;
3117 nlp->nlp_ptx[channelno].q_last = (uint8_t *)iocbq;
3118 nlp->nlp_ptx[channelno].q_cnt++;
3119 } else {
3120 nlp->nlp_ptx[channelno].q_first = (uint8_t *)iocbq;
3121 nlp->nlp_ptx[channelno].q_last = (uint8_t *)iocbq;
3122 nlp->nlp_ptx[channelno].q_cnt = 1;
3123 }
3124
3125 iocbq->next = NULL;
3126 } else { /* Normal priority */
3127
3128
3129 /* Add the iocb to the bottom of the node's tx queue */
3130 if (nlp->nlp_tx[channelno].q_first) {
3131 ((IOCBQ *)nlp->nlp_tx[channelno].q_last)->next = iocbq;
3132 nlp->nlp_tx[channelno].q_last = (uint8_t *)iocbq;
3133 nlp->nlp_tx[channelno].q_cnt++;
3134 } else {
3135 nlp->nlp_tx[channelno].q_first = (uint8_t *)iocbq;
3136 nlp->nlp_tx[channelno].q_last = (uint8_t *)iocbq;
3137 nlp->nlp_tx[channelno].q_cnt = 1;
3138 }
3139
3140 iocbq->next = NULL;
3141 }
3142
3143
3144 /*
3145 * Check if the node is not already on channel queue and
3146 * (is not closed or is a priority request)
3147 */
3148 if (!nlp->nlp_next[channelno] &&
3149 (!(nlp->nlp_flag[channelno] & NLP_CLOSED) ||
3150 (iocbq->flag & IOCB_PRIORITY))) {
3151 /* If so, then add it to the channel queue */
3152 if (cp->nodeq.q_first) {
3153 ((NODELIST *)cp->nodeq.q_last)->nlp_next[channelno] =
3154 (uint8_t *)nlp;
3155 nlp->nlp_next[channelno] = cp->nodeq.q_first;
3156
3157 /*
3158 * If this is not the base node then add it
3159 * to the tail
3160 */
3161 if (!nlp->nlp_base) {
3162 cp->nodeq.q_last = (uint8_t *)nlp;
3163 } else { /* Otherwise, add it to the head */
3164
3165 /* The command node always gets priority */
3166 cp->nodeq.q_first = (uint8_t *)nlp;
3167 }
3168
3169 cp->nodeq.q_cnt++;
3170 } else {
3171 cp->nodeq.q_first = (uint8_t *)nlp;
3172 cp->nodeq.q_last = (uint8_t *)nlp;
3173 nlp->nlp_next[channelno] = nlp;
3174 cp->nodeq.q_cnt = 1;
3175 }
3176 }
3177
3178 HBASTATS.IocbTxPut[channelno]++;
3179
3180 /* Adjust the channel timeout timer */
3181 cp->timeout = hba->timer_tics + 5;
3182
3183 if (lock) {
3184 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3185 }
3186
3187 return;
3188
3189 } /* emlxs_tx_put() */
3190
3191
3192 extern IOCBQ *
3193 emlxs_tx_get(CHANNEL *cp, uint32_t lock)
3194 {
3195 emlxs_hba_t *hba;
3196 uint32_t channelno;
3197 IOCBQ *iocbq;
3198 NODELIST *nlp;
3199 emlxs_buf_t *sbp;
3200
3201 hba = cp->hba;
3202 channelno = cp->channelno;
3203
3204 if (lock) {
3205 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3206 }
3207
3208 begin:
3209
3210 iocbq = NULL;
3211
3212 /* Check if a node needs servicing */
3213 if (cp->nodeq.q_first) {
3214 nlp = (NODELIST *)cp->nodeq.q_first;
3215
3216 /* Get next iocb from node's priority queue */
3217
3218 if (nlp->nlp_ptx[channelno].q_first) {
3219 iocbq = (IOCBQ *)nlp->nlp_ptx[channelno].q_first;
3220
3221 /* Check if this is last entry */
3222 if (nlp->nlp_ptx[channelno].q_last == (void *)iocbq) {
3223 nlp->nlp_ptx[channelno].q_first = NULL;
3224 nlp->nlp_ptx[channelno].q_last = NULL;
3225 nlp->nlp_ptx[channelno].q_cnt = 0;
3226 } else {
3227 /* Remove iocb from head */
3228 nlp->nlp_ptx[channelno].q_first =
3229 (void *)iocbq->next;
3230 nlp->nlp_ptx[channelno].q_cnt--;
3231 }
3232
3233 iocbq->next = NULL;
3234 }
3235
3236 /* Get next iocb from node tx queue if node not closed */
3237 else if (nlp->nlp_tx[channelno].q_first &&
3238 !(nlp->nlp_flag[channelno] & NLP_CLOSED)) {
3239 iocbq = (IOCBQ *)nlp->nlp_tx[channelno].q_first;
3240
3241 /* Check if this is last entry */
3242 if (nlp->nlp_tx[channelno].q_last == (void *)iocbq) {
3243 nlp->nlp_tx[channelno].q_first = NULL;
3244 nlp->nlp_tx[channelno].q_last = NULL;
3245 nlp->nlp_tx[channelno].q_cnt = 0;
3246 } else {
3247 /* Remove iocb from head */
3248 nlp->nlp_tx[channelno].q_first =
3249 (void *)iocbq->next;
3250 nlp->nlp_tx[channelno].q_cnt--;
3251 }
3252
3253 iocbq->next = NULL;
3254 }
3255
3256 /* Now deal with node itself */
3257
3258 /* Check if node still needs servicing */
3259 if ((nlp->nlp_ptx[channelno].q_first) ||
3260 (nlp->nlp_tx[channelno].q_first &&
3261 !(nlp->nlp_flag[channelno] & NLP_CLOSED))) {
3262
3263 /*
3264 * If this is the base node, then don't shift the
3265 * pointers. We want to drain the base node before
3266 * moving on
3267 */
3268 if (!nlp->nlp_base) {
3269 /*
3270 * Just shift channel queue pointers to next
3271 * node
3272 */
3273 cp->nodeq.q_last = (void *)nlp;
3274 cp->nodeq.q_first = nlp->nlp_next[channelno];
3275 }
3276 } else {
3277 /* Remove node from channel queue */
3278
3279 /* If this is the last node on list */
3280 if (cp->nodeq.q_last == (void *)nlp) {
3281 cp->nodeq.q_last = NULL;
3282 cp->nodeq.q_first = NULL;
3283 cp->nodeq.q_cnt = 0;
3284 } else {
3285 /* Remove node from head */
3286 cp->nodeq.q_first = nlp->nlp_next[channelno];
3287 ((NODELIST *)cp->nodeq.q_last)->
3288 nlp_next[channelno] = cp->nodeq.q_first;
3289 cp->nodeq.q_cnt--;
3290
3291 }
3292
3293 /* Clear node */
3294 nlp->nlp_next[channelno] = NULL;
3295 }
3296
3297 /*
3298 * If no iocbq was found on this node, then it will have
3299 * been removed. So try again.
3300 */
3301 if (!iocbq) {
3302 goto begin;
3303 }
3304
3305 sbp = (emlxs_buf_t *)iocbq->sbp;
3306
3307 if (sbp) {
3308 /*
3309 * Check flags before we enter mutex in case this
3310 * has been flushed and destroyed
3311 */
3312 if ((sbp->pkt_flags &
3313 (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ)) ||
3314 !(sbp->pkt_flags & PACKET_IN_TXQ)) {
3315 goto begin;
3316 }
3317
3318 mutex_enter(&sbp->mtx);
3319
3320 if ((sbp->pkt_flags &
3321 (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ)) ||
3322 !(sbp->pkt_flags & PACKET_IN_TXQ)) {
3323 mutex_exit(&sbp->mtx);
3324 goto begin;
3325 }
3326
3327 sbp->pkt_flags &= ~PACKET_IN_TXQ;
3328 hba->channel_tx_count--;
3329
3330 mutex_exit(&sbp->mtx);
3331 }
3332 }
3333
3334 if (iocbq) {
3335 HBASTATS.IocbTxGet[channelno]++;
3336 }
3337
3338 /* Adjust the ring timeout timer */
3339 cp->timeout = (cp->nodeq.q_first) ? (hba->timer_tics + 5) : 0;
3340
3341 if (lock) {
3342 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3343 }
3344
3345 return (iocbq);
3346
3347 } /* emlxs_tx_get() */
3348
3349
3350 /*
3351 * Remove all cmd from from_rp's txq to to_rp's txq for ndlp.
3352 * The old IoTag has to be released, the new one has to be
3353 * allocated. Others no change
3354 * TX_CHANNEL lock is held
3355 */
3356 extern void
3357 emlxs_tx_move(NODELIST *ndlp, CHANNEL *from_chan, CHANNEL *to_chan,
3358 uint32_t cmd, emlxs_buf_t *fpkt, uint32_t lock)
3359 {
3360 emlxs_hba_t *hba;
3361 emlxs_port_t *port;
3362 uint32_t fchanno, tchanno, i;
3363
3364 IOCBQ *iocbq;
3365 IOCBQ *prev;
3366 IOCBQ *next;
3367 IOCB *iocb, *icmd;
3368 Q tbm; /* To Be Moved Q */
3369 MATCHMAP *mp;
3370
3371 NODELIST *nlp = ndlp;
3372 emlxs_buf_t *sbp;
3373
3374 NODELIST *n_prev = NULL;
3375 NODELIST *n_next = NULL;
3376 uint16_t count = 0;
3377
3378 hba = from_chan->hba;
3379 port = &PPORT;
3380 cmd = cmd; /* To pass lint */
3381
3382 fchanno = from_chan->channelno;
3383 tchanno = to_chan->channelno;
3384
3385 if (lock) {
3386 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3387 }
3388
3389 bzero((void *)&tbm, sizeof (Q));
3390
3391 /* Scan the ndlp's fchanno txq to get the iocb of fcp cmd */
3392 prev = NULL;
3393 iocbq = (IOCBQ *)nlp->nlp_tx[fchanno].q_first;
3394
3395 while (iocbq) {
3396 next = (IOCBQ *)iocbq->next;
3397 /* Check if this iocb is fcp cmd */
3398 iocb = &iocbq->iocb;
3399
3400 switch (iocb->ULPCOMMAND) {
3401 /* FCP commands */
3402 case CMD_FCP_ICMND_CR:
3403 case CMD_FCP_ICMND_CX:
3404 case CMD_FCP_IREAD_CR:
3405 case CMD_FCP_IREAD_CX:
3406 case CMD_FCP_IWRITE_CR:
3407 case CMD_FCP_IWRITE_CX:
3408 case CMD_FCP_ICMND64_CR:
3409 case CMD_FCP_ICMND64_CX:
3410 case CMD_FCP_IREAD64_CR:
3411 case CMD_FCP_IREAD64_CX:
3412 case CMD_FCP_IWRITE64_CR:
3413 case CMD_FCP_IWRITE64_CX:
3414 /* We found a fcp cmd */
3415 break;
3416 default:
3417 /* this is not fcp cmd continue */
3418 prev = iocbq;
3419 iocbq = next;
3420 continue;
3421 }
3422
3423 /* found a fcp cmd iocb in fchanno txq, now deque it */
3424 if (next == NULL) {
3425 /* This is the last iocbq */
3426 nlp->nlp_tx[fchanno].q_last =
3427 (uint8_t *)prev;
3428 }
3429
3430 if (prev == NULL) {
3431 /* This is the first one then remove it from head */
3432 nlp->nlp_tx[fchanno].q_first =
3433 (uint8_t *)next;
3434 } else {
3435 prev->next = next;
3436 }
3437
3438 iocbq->next = NULL;
3439 nlp->nlp_tx[fchanno].q_cnt--;
3440
3441 /* Add this iocb to our local toberemovedq */
3442 /* This way we donot hold the TX_CHANNEL lock too long */
3443
3444 if (tbm.q_first) {
3445 ((IOCBQ *)tbm.q_last)->next = iocbq;
3446 tbm.q_last = (uint8_t *)iocbq;
3447 tbm.q_cnt++;
3448 } else {
3449 tbm.q_first = (uint8_t *)iocbq;
3450 tbm.q_last = (uint8_t *)iocbq;
3451 tbm.q_cnt = 1;
3452 }
3453
3454 iocbq = next;
3455
3456 } /* While (iocbq) */
3457
3458 if ((tchanno == hba->channel_fcp) && (tbm.q_cnt != 0)) {
3459
3460 /* from_chan->nodeq.q_first must be non NULL */
3461 if (from_chan->nodeq.q_first) {
3462
3463 /* nodeq is not empty, now deal with the node itself */
3464 if ((nlp->nlp_tx[fchanno].q_first)) {
3465
3466 if (!nlp->nlp_base) {
3467 from_chan->nodeq.q_last =
3468 (void *)nlp;
3469 from_chan->nodeq.q_first =
3470 nlp->nlp_next[fchanno];
3471 }
3472
3473 } else {
3474 n_prev = (NODELIST *)from_chan->nodeq.q_first;
3475 count = from_chan->nodeq.q_cnt;
3476
3477 if (n_prev == nlp) {
3478
3479 /* If this is the only node on list */
3480 if (from_chan->nodeq.q_last ==
3481 (void *)nlp) {
3482 from_chan->nodeq.q_last =
3483 NULL;
3484 from_chan->nodeq.q_first =
3485 NULL;
3486 from_chan->nodeq.q_cnt = 0;
3487 } else {
3488 from_chan->nodeq.q_first =
3489 nlp->nlp_next[fchanno];
3490 ((NODELIST *)from_chan->
3491 nodeq.q_last)->
3492 nlp_next[fchanno] =
3493 from_chan->nodeq.q_first;
3494 from_chan->nodeq.q_cnt--;
3495 }
3496 /* Clear node */
3497 nlp->nlp_next[fchanno] = NULL;
3498 } else {
3499 count--;
3500 do {
3501 n_next =
3502 n_prev->nlp_next[fchanno];
3503 if (n_next == nlp) {
3504 break;
3505 }
3506 n_prev = n_next;
3507 } while (count--);
3508
3509 if (count != 0) {
3510
3511 if (n_next ==
3512 (NODELIST *)from_chan->
3513 nodeq.q_last) {
3514 n_prev->
3515 nlp_next[fchanno]
3516 =
3517 ((NODELIST *)
3518 from_chan->
3519 nodeq.q_last)->
3520 nlp_next
3521 [fchanno];
3522 from_chan->nodeq.q_last
3523 = (uint8_t *)n_prev;
3524 } else {
3525
3526 n_prev->
3527 nlp_next[fchanno]
3528 =
3529 n_next-> nlp_next
3530 [fchanno];
3531 }
3532 from_chan->nodeq.q_cnt--;
3533 /* Clear node */
3534 nlp->nlp_next[fchanno] =
3535 NULL;
3536 }
3537 }
3538 }
3539 }
3540 }
3541
3542 /* Now cleanup the iocb's */
3543 prev = NULL;
3544 iocbq = (IOCBQ *)tbm.q_first;
3545
3546 while (iocbq) {
3547
3548 next = (IOCBQ *)iocbq->next;
3549
3550 /* Free the IoTag and the bmp */
3551 iocb = &iocbq->iocb;
3552
3553 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3554 sbp = iocbq->sbp;
3555 if (sbp) {
3556 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
3557 }
3558 } else {
3559 sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
3560 iocb->ULPIOTAG, 0);
3561 }
3562
3563 if (sbp && (sbp != STALE_PACKET)) {
3564 mutex_enter(&sbp->mtx);
3565 sbp->pkt_flags |= PACKET_IN_FLUSH;
3566
3567 /*
3568 * If the fpkt is already set, then we will leave it
3569 * alone. This ensures that this pkt is only accounted
3570 * for on one fpkt->flush_count
3571 */
3572 if (!sbp->fpkt && fpkt) {
3573 mutex_enter(&fpkt->mtx);
3574 sbp->fpkt = fpkt;
3575 fpkt->flush_count++;
3576 mutex_exit(&fpkt->mtx);
3577 }
3578 mutex_exit(&sbp->mtx);
3579 }
3580 iocbq = next;
3581
3582 } /* end of while */
3583
3584 iocbq = (IOCBQ *)tbm.q_first;
3585 while (iocbq) {
3586 /* Save the next iocbq for now */
3587 next = (IOCBQ *)iocbq->next;
3588
3589 /* Unlink this iocbq */
3590 iocbq->next = NULL;
3591
3592 /* Get the pkt */
3593 sbp = (emlxs_buf_t *)iocbq->sbp;
3594
3595 if (sbp) {
3596 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
3597 "tx: sbp=%p node=%p", sbp, sbp->node);
3598
3599 if (hba->state >= FC_LINK_UP) {
3600 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3601 IOERR_ABORT_REQUESTED, 1);
3602 } else {
3603 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3604 IOERR_LINK_DOWN, 1);
3605 }
3606
3607 }
3608 /* Free the iocb and its associated buffers */
3609 else {
3610 icmd = &iocbq->iocb;
3611
3612 /* SLI3 */
3613 if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
3614 icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
3615 icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
3616 if ((hba->flag &
3617 (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
3618 /* HBA is detaching or offlining */
3619 if (icmd->ULPCOMMAND !=
3620 CMD_QUE_RING_LIST64_CN) {
3621 void *tmp;
3622 RING *rp;
3623 int ch;
3624
3625 ch = from_chan->channelno;
3626 rp = &hba->sli.sli3.ring[ch];
3627
3628 for (i = 0;
3629 i < icmd->ULPBDECOUNT;
3630 i++) {
3631 mp = EMLXS_GET_VADDR(
3632 hba, rp, icmd);
3633
3634 tmp = (void *)mp;
3635 if (mp) {
3636 emlxs_mem_put(
3637 hba,
3638 MEM_BUF,
3639 tmp);
3640 }
3641 }
3642
3643 }
3644
3645 emlxs_mem_put(hba, MEM_IOCB,
3646 (void *)iocbq);
3647 } else {
3648 /* repost the unsolicited buffer */
3649 EMLXS_SLI_ISSUE_IOCB_CMD(hba,
3650 from_chan, iocbq);
3651 }
3652 }
3653 }
3654
3655 iocbq = next;
3656
3657 } /* end of while */
3658
3659 /* Now flush the chipq if any */
3660 if (!(nlp->nlp_flag[fchanno] & NLP_CLOSED)) {
3661
3662 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3663
3664 (void) emlxs_chipq_node_flush(port, from_chan, nlp, 0);
3665
3666 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3667 }
3668
3669 if (lock) {
3670 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3671 }
3672
3673 return;
3674
3675 } /* emlxs_tx_move */
3676
3677
3678 extern uint32_t
3679 emlxs_chipq_node_flush(emlxs_port_t *port, CHANNEL *chan, NODELIST *ndlp,
3680 emlxs_buf_t *fpkt)
3681 {
3682 emlxs_hba_t *hba = HBA;
3683 emlxs_buf_t *sbp;
3684 IOCBQ *iocbq;
3685 IOCBQ *next;
3686 Q abort;
3687 CHANNEL *cp;
3688 uint32_t channelno;
3689 uint8_t flag[MAX_CHANNEL];
3690 uint32_t iotag;
3691
3692 bzero((void *)&abort, sizeof (Q));
3693 bzero((void *)flag, sizeof (flag));
3694
3695 for (channelno = 0; channelno < hba->chan_count; channelno++) {
3696 cp = &hba->chan[channelno];
3697
3698 if (chan && cp != chan) {
3699 continue;
3700 }
3701
3702 mutex_enter(&EMLXS_FCTAB_LOCK);
3703
3704 for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3705 sbp = hba->fc_table[iotag];
3706
3707 if (sbp && (sbp != STALE_PACKET) &&
3708 (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3709 (sbp->node == ndlp) &&
3710 (sbp->channel == cp) &&
3711 !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3712 emlxs_sbp_abort_add(port, sbp, &abort, flag,
3713 fpkt);
3714 }
3715
3716 }
3717 mutex_exit(&EMLXS_FCTAB_LOCK);
3718
3719 } /* for */
3720
3721 /* Now put the iocb's on the tx queue */
3722 iocbq = (IOCBQ *)abort.q_first;
3723 while (iocbq) {
3724 /* Save the next iocbq for now */
3725 next = (IOCBQ *)iocbq->next;
3726
3727 /* Unlink this iocbq */
3728 iocbq->next = NULL;
3729
3730 /* Send this iocbq */
3731 emlxs_tx_put(iocbq, 1);
3732
3733 iocbq = next;
3734 }
3735
3736 /* Now trigger channel service */
3737 for (channelno = 0; channelno < hba->chan_count; channelno++) {
3738 if (!flag[channelno]) {
3739 continue;
3740 }
3741
3742 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
3743 }
3744
3745 return (abort.q_cnt);
3746
3747 } /* emlxs_chipq_node_flush() */
3748
3749
3750 /* Flush all IO's left on all iotag lists */
3751 extern uint32_t
3752 emlxs_iotag_flush(emlxs_hba_t *hba)
3753 {
3754 emlxs_port_t *port = &PPORT;
3755 emlxs_buf_t *sbp;
3756 IOCBQ *iocbq;
3757 IOCB *iocb;
3758 Q abort;
3759 CHANNEL *cp;
3760 uint32_t channelno;
3761 uint32_t iotag;
3762 uint32_t count;
3763
3764 count = 0;
3765 for (channelno = 0; channelno < hba->chan_count; channelno++) {
3766 cp = &hba->chan[channelno];
3767
3768 bzero((void *)&abort, sizeof (Q));
3769
3770 mutex_enter(&EMLXS_FCTAB_LOCK);
3771
3772 for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3773 sbp = hba->fc_table[iotag];
3774
3775 /* Check if the slot is empty */
3776 if (!sbp || (sbp == STALE_PACKET)) {
3777 continue;
3778 }
3779
3780 /* We are building an abort list per channel */
3781 if (sbp->channel != cp) {
3782 continue;
3783 }
3784
3785 hba->fc_table[iotag] = STALE_PACKET;
3786 hba->io_count--;
3787
3788 /* Check if IO is valid */
3789 if (!(sbp->pkt_flags & PACKET_VALID) ||
3790 (sbp->pkt_flags & (PACKET_ULP_OWNED|
3791 PACKET_COMPLETED|PACKET_IN_COMPLETION))) {
3792 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
3793 "iotag_flush: Invalid IO found. iotag=%d",
3794 iotag);
3795
3796 continue;
3797 }
3798
3799 sbp->iotag = 0;
3800
3801 /* Set IOCB status */
3802 iocbq = &sbp->iocbq;
3803 iocb = &iocbq->iocb;
3804
3805 iocb->ULPSTATUS = IOSTAT_LOCAL_REJECT;
3806 iocb->un.grsp.perr.statLocalError = IOERR_LINK_DOWN;
3807 iocb->ULPLE = 1;
3808 iocbq->next = NULL;
3809
3810 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3811 if (sbp->xrip) {
3812 EMLXS_MSGF(EMLXS_CONTEXT,
3813 &emlxs_sli_debug_msg,
3814 "iotag_flush: iotag=%d sbp=%p "
3815 "xrip=%p state=%x flag=%x",
3816 iotag, sbp, sbp->xrip,
3817 sbp->xrip->state, sbp->xrip->flag);
3818 } else {
3819 EMLXS_MSGF(EMLXS_CONTEXT,
3820 &emlxs_sli_debug_msg,
3821 "iotag_flush: iotag=%d sbp=%p "
3822 "xrip=NULL", iotag, sbp);
3823 }
3824
3825 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 0);
3826 } else {
3827 /* Clean up the sbp */
3828 mutex_enter(&sbp->mtx);
3829
3830 if (sbp->pkt_flags & PACKET_IN_TXQ) {
3831 sbp->pkt_flags &= ~PACKET_IN_TXQ;
3832 hba->channel_tx_count --;
3833 }
3834
3835 if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
3836 sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
3837 }
3838
3839 if (sbp->bmp) {
3840 emlxs_mem_put(hba, MEM_BPL,
3841 (void *)sbp->bmp);
3842 sbp->bmp = 0;
3843 }
3844
3845 mutex_exit(&sbp->mtx);
3846 }
3847
3848 /* At this point all nodes are assumed destroyed */
3849 mutex_enter(&sbp->mtx);
3850 sbp->node = 0;
3851 mutex_exit(&sbp->mtx);
3852
3853 /* Add this iocb to our local abort Q */
3854 if (abort.q_first) {
3855 ((IOCBQ *)abort.q_last)->next = iocbq;
3856 abort.q_last = (uint8_t *)iocbq;
3857 abort.q_cnt++;
3858 } else {
3859 abort.q_first = (uint8_t *)iocbq;
3860 abort.q_last = (uint8_t *)iocbq;
3861 abort.q_cnt = 1;
3862 }
3863 }
3864
3865 mutex_exit(&EMLXS_FCTAB_LOCK);
3866
3867 /* Trigger deferred completion */
3868 if (abort.q_first) {
3869 mutex_enter(&cp->rsp_lock);
3870 if (cp->rsp_head == NULL) {
3871 cp->rsp_head = (IOCBQ *)abort.q_first;
3872 cp->rsp_tail = (IOCBQ *)abort.q_last;
3873 } else {
3874 cp->rsp_tail->next = (IOCBQ *)abort.q_first;
3875 cp->rsp_tail = (IOCBQ *)abort.q_last;
3876 }
3877 mutex_exit(&cp->rsp_lock);
3878
3879 emlxs_thread_trigger2(&cp->intr_thread,
3880 emlxs_proc_channel, cp);
3881
3882 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
3883 "iotag_flush: channel=%d count=%d",
3884 channelno, abort.q_cnt);
3885
3886 count += abort.q_cnt;
3887 }
3888 }
3889
3890 return (count);
3891
3892 } /* emlxs_iotag_flush() */
3893
3894
3895
3896 /* Checks for IO's on all or a given channel for a given node */
3897 extern uint32_t
3898 emlxs_chipq_node_check(emlxs_port_t *port, CHANNEL *chan, NODELIST *ndlp)
3899 {
3900 emlxs_hba_t *hba = HBA;
3901 emlxs_buf_t *sbp;
3902 CHANNEL *cp;
3903 uint32_t channelno;
3904 uint32_t count;
3905 uint32_t iotag;
3906
3907 count = 0;
3908
3909 for (channelno = 0; channelno < hba->chan_count; channelno++) {
3910 cp = &hba->chan[channelno];
3911
3912 if (chan && cp != chan) {
3913 continue;
3914 }
3915
3916 mutex_enter(&EMLXS_FCTAB_LOCK);
3917
3918 for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3919 sbp = hba->fc_table[iotag];
3920
3921 if (sbp && (sbp != STALE_PACKET) &&
3922 (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3923 (sbp->node == ndlp) &&
3924 (sbp->channel == cp) &&
3925 !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3926 count++;
3927 }
3928
3929 }
3930 mutex_exit(&EMLXS_FCTAB_LOCK);
3931
3932 } /* for */
3933
3934 return (count);
3935
3936 } /* emlxs_chipq_node_check() */
3937
3938
3939
3940 /* Flush all IO's for a given node's lun (on any channel) */
3941 extern uint32_t
3942 emlxs_chipq_lun_flush(emlxs_port_t *port, NODELIST *ndlp,
3943 uint32_t lun, emlxs_buf_t *fpkt)
3944 {
3945 emlxs_hba_t *hba = HBA;
3946 emlxs_buf_t *sbp;
3947 IOCBQ *iocbq;
3948 IOCBQ *next;
3949 Q abort;
3950 uint32_t iotag;
3951 uint8_t flag[MAX_CHANNEL];
3952 uint32_t channelno;
3953
3954 if (lun == EMLXS_LUN_NONE) {
3955 return (0);
3956 }
3957
3958 bzero((void *)flag, sizeof (flag));
3959 bzero((void *)&abort, sizeof (Q));
3960
3961 mutex_enter(&EMLXS_FCTAB_LOCK);
3962 for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3963 sbp = hba->fc_table[iotag];
3964
3965 if (sbp && (sbp != STALE_PACKET) &&
3966 sbp->pkt_flags & PACKET_IN_CHIPQ &&
3967 sbp->node == ndlp &&
3968 sbp->lun == lun &&
3969 !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3970 emlxs_sbp_abort_add(port, sbp,
3971 &abort, flag, fpkt);
3972 }
3973 }
3974 mutex_exit(&EMLXS_FCTAB_LOCK);
3975
3976 /* Now put the iocb's on the tx queue */
3977 iocbq = (IOCBQ *)abort.q_first;
3978 while (iocbq) {
3979 /* Save the next iocbq for now */
3980 next = (IOCBQ *)iocbq->next;
3981
3982 /* Unlink this iocbq */
3983 iocbq->next = NULL;
3984
3985 /* Send this iocbq */
3986 emlxs_tx_put(iocbq, 1);
3987
3988 iocbq = next;
3989 }
3990
3991 /* Now trigger channel service */
3992 for (channelno = 0; channelno < hba->chan_count; channelno++) {
3993 if (!flag[channelno]) {
3994 continue;
3995 }
3996
3997 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
3998 }
3999
4000 return (abort.q_cnt);
4001
4002 } /* emlxs_chipq_lun_flush() */
4003
4004
4005
4006 /*
4007 * Issue an ABORT_XRI_CN iocb command to abort an FCP command already issued.
4008 * This must be called while holding the EMLXS_FCTAB_LOCK
4009 */
4010 extern IOCBQ *
4011 emlxs_create_abort_xri_cn(emlxs_port_t *port, NODELIST *ndlp,
4012 uint16_t iotag, CHANNEL *cp, uint8_t class, int32_t flag)
4013 {
4014 emlxs_hba_t *hba = HBA;
4015 IOCBQ *iocbq;
4016 IOCB *iocb;
4017 emlxs_wqe_t *wqe;
4018 emlxs_buf_t *sbp;
4019 uint16_t abort_iotag;
4020
4021 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
4022 return (NULL);
4023 }
4024
4025 iocbq->channel = (void *)cp;
4026 iocbq->port = (void *)port;
4027 iocbq->node = (void *)ndlp;
4028 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
4029
4030 /*
4031 * set up an iotag using special Abort iotags
4032 */
4033 if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
4034 hba->fc_oor_iotag = hba->max_iotag;
4035 }
4036 abort_iotag = hba->fc_oor_iotag++;
4037
4038
4039 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4040 wqe = &iocbq->wqe;
4041 sbp = hba->fc_table[iotag];
4042
4043 /* Try to issue abort by XRI if possible */
4044 if (sbp == NULL || sbp == STALE_PACKET || sbp->xrip == NULL) {
4045 wqe->un.Abort.Criteria = ABORT_REQ_TAG;
4046 wqe->AbortTag = iotag;
4047 } else {
4048 wqe->un.Abort.Criteria = ABORT_XRI_TAG;
4049 wqe->AbortTag = sbp->xrip->XRI;
4050 }
4051 wqe->un.Abort.IA = 0;
4052 wqe->RequestTag = abort_iotag;
4053 wqe->Command = CMD_ABORT_XRI_CX;
4054 wqe->Class = CLASS3;
4055 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4056 wqe->CmdType = WQE_TYPE_ABORT;
4057 } else {
4058 iocb = &iocbq->iocb;
4059 iocb->ULPIOTAG = abort_iotag;
4060 iocb->un.acxri.abortType = flag;
4061 iocb->un.acxri.abortContextTag = ndlp->nlp_Rpi;
4062 iocb->un.acxri.abortIoTag = iotag;
4063 iocb->ULPLE = 1;
4064 iocb->ULPCLASS = class;
4065 iocb->ULPCOMMAND = CMD_ABORT_XRI_CN;
4066 iocb->ULPOWNER = OWN_CHIP;
4067 }
4068
4069 return (iocbq);
4070
4071 } /* emlxs_create_abort_xri_cn() */
4072
4073
4074 /* This must be called while holding the EMLXS_FCTAB_LOCK */
4075 extern IOCBQ *
4076 emlxs_create_abort_xri_cx(emlxs_port_t *port, NODELIST *ndlp, uint16_t xid,
4077 CHANNEL *cp, uint8_t class, int32_t flag)
4078 {
4079 emlxs_hba_t *hba = HBA;
4080 IOCBQ *iocbq;
4081 IOCB *iocb;
4082 emlxs_wqe_t *wqe;
4083 uint16_t abort_iotag;
4084
4085 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
4086 return (NULL);
4087 }
4088
4089 iocbq->channel = (void *)cp;
4090 iocbq->port = (void *)port;
4091 iocbq->node = (void *)ndlp;
4092 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
4093
4094 /*
4095 * set up an iotag using special Abort iotags
4096 */
4097 if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
4098 hba->fc_oor_iotag = hba->max_iotag;
4099 }
4100 abort_iotag = hba->fc_oor_iotag++;
4101
4102 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4103 wqe = &iocbq->wqe;
4104 wqe->un.Abort.Criteria = ABORT_XRI_TAG;
4105 wqe->un.Abort.IA = 0;
4106 wqe->RequestTag = abort_iotag;
4107 wqe->AbortTag = xid;
4108 wqe->Command = CMD_ABORT_XRI_CX;
4109 wqe->Class = CLASS3;
4110 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4111 wqe->CmdType = WQE_TYPE_ABORT;
4112 } else {
4113 iocb = &iocbq->iocb;
4114 iocb->ULPCONTEXT = xid;
4115 iocb->ULPIOTAG = abort_iotag;
4116 iocb->un.acxri.abortType = flag;
4117 iocb->ULPLE = 1;
4118 iocb->ULPCLASS = class;
4119 iocb->ULPCOMMAND = CMD_ABORT_XRI_CX;
4120 iocb->ULPOWNER = OWN_CHIP;
4121 }
4122
4123 return (iocbq);
4124
4125 } /* emlxs_create_abort_xri_cx() */
4126
4127
4128
4129 /* This must be called while holding the EMLXS_FCTAB_LOCK */
4130 extern IOCBQ *
4131 emlxs_create_close_xri_cn(emlxs_port_t *port, NODELIST *ndlp,
4132 uint16_t iotag, CHANNEL *cp)
4133 {
4134 emlxs_hba_t *hba = HBA;
4135 IOCBQ *iocbq;
4136 IOCB *iocb;
4137 emlxs_wqe_t *wqe;
4138 emlxs_buf_t *sbp;
4139 uint16_t abort_iotag;
4140
4141 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
4142 return (NULL);
4143 }
4144
4145 iocbq->channel = (void *)cp;
4146 iocbq->port = (void *)port;
4147 iocbq->node = (void *)ndlp;
4148 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
4149
4150 /*
4151 * set up an iotag using special Abort iotags
4152 */
4153 if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
4154 hba->fc_oor_iotag = hba->max_iotag;
4155 }
4156 abort_iotag = hba->fc_oor_iotag++;
4157
4158 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4159 wqe = &iocbq->wqe;
4160 sbp = hba->fc_table[iotag];
4161
4162 /* Try to issue close by XRI if possible */
4163 if (sbp == NULL || sbp == STALE_PACKET || sbp->xrip == NULL) {
4164 wqe->un.Abort.Criteria = ABORT_REQ_TAG;
4165 wqe->AbortTag = iotag;
4166 } else {
4167 wqe->un.Abort.Criteria = ABORT_XRI_TAG;
4168 wqe->AbortTag = sbp->xrip->XRI;
4169 }
4170 wqe->un.Abort.IA = 1;
4171 wqe->RequestTag = abort_iotag;
4172 wqe->Command = CMD_ABORT_XRI_CX;
4173 wqe->Class = CLASS3;
4174 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4175 wqe->CmdType = WQE_TYPE_ABORT;
4176 } else {
4177 iocb = &iocbq->iocb;
4178 iocb->ULPIOTAG = abort_iotag;
4179 iocb->un.acxri.abortType = 0;
4180 iocb->un.acxri.abortContextTag = ndlp->nlp_Rpi;
4181 iocb->un.acxri.abortIoTag = iotag;
4182 iocb->ULPLE = 1;
4183 iocb->ULPCLASS = 0;
4184 iocb->ULPCOMMAND = CMD_CLOSE_XRI_CN;
4185 iocb->ULPOWNER = OWN_CHIP;
4186 }
4187
4188 return (iocbq);
4189
4190 } /* emlxs_create_close_xri_cn() */
4191
4192
4193 /* This must be called while holding the EMLXS_FCTAB_LOCK */
4194 extern IOCBQ *
4195 emlxs_create_close_xri_cx(emlxs_port_t *port, NODELIST *ndlp, uint16_t xid,
4196 CHANNEL *cp)
4197 {
4198 emlxs_hba_t *hba = HBA;
4199 IOCBQ *iocbq;
4200 IOCB *iocb;
4201 emlxs_wqe_t *wqe;
4202 uint16_t abort_iotag;
4203
4204 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
4205 return (NULL);
4206 }
4207
4208 iocbq->channel = (void *)cp;
4209 iocbq->port = (void *)port;
4210 iocbq->node = (void *)ndlp;
4211 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
4212
4213 /*
4214 * set up an iotag using special Abort iotags
4215 */
4216 if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
4217 hba->fc_oor_iotag = hba->max_iotag;
4218 }
4219 abort_iotag = hba->fc_oor_iotag++;
4220
4221 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4222 wqe = &iocbq->wqe;
4223 wqe->un.Abort.Criteria = ABORT_XRI_TAG;
4224 wqe->un.Abort.IA = 1;
4225 wqe->RequestTag = abort_iotag;
4226 wqe->AbortTag = xid;
4227 wqe->Command = CMD_ABORT_XRI_CX;
4228 wqe->Class = CLASS3;
4229 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4230 wqe->CmdType = WQE_TYPE_ABORT;
4231 } else {
4232 iocb = &iocbq->iocb;
4233 iocb->ULPCONTEXT = xid;
4234 iocb->ULPIOTAG = abort_iotag;
4235 iocb->ULPLE = 1;
4236 iocb->ULPCLASS = 0;
4237 iocb->ULPCOMMAND = CMD_CLOSE_XRI_CX;
4238 iocb->ULPOWNER = OWN_CHIP;
4239 }
4240
4241 return (iocbq);
4242
4243 } /* emlxs_create_close_xri_cx() */
4244
4245
4246 void
4247 emlxs_close_els_exchange(emlxs_hba_t *hba, emlxs_port_t *port, uint32_t rxid)
4248 {
4249 CHANNEL *cp;
4250 IOCBQ *iocbq;
4251 IOCB *iocb;
4252
4253 if (rxid == 0 || rxid == 0xFFFF) {
4254 return;
4255 }
4256
4257 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4258 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4259 "Closing ELS exchange: xid=%x", rxid);
4260
4261 if (emlxs_sli4_unreserve_xri(port, rxid, 1) == 0) {
4262 return;
4263 }
4264 }
4265
4266 cp = &hba->chan[hba->channel_els];
4267
4268 mutex_enter(&EMLXS_FCTAB_LOCK);
4269
4270 /* Create the abort IOCB */
4271 iocbq = emlxs_create_close_xri_cx(port, NULL, rxid, cp);
4272
4273 mutex_exit(&EMLXS_FCTAB_LOCK);
4274
4275 if (iocbq) {
4276 iocb = &iocbq->iocb;
4277 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4278 "Closing ELS exchange: xid=%x iotag=%d", rxid,
4279 iocb->ULPIOTAG);
4280
4281 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
4282 }
4283
4284 } /* emlxs_close_els_exchange() */
4285
4286
4287 void
4288 emlxs_abort_els_exchange(emlxs_hba_t *hba, emlxs_port_t *port, uint32_t rxid)
4289 {
4290 CHANNEL *cp;
4291 IOCBQ *iocbq;
4292 IOCB *iocb;
4293
4294 if (rxid == 0 || rxid == 0xFFFF) {
4295 return;
4296 }
4297
4298 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4299
4300 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4301 "Aborting ELS exchange: xid=%x", rxid);
4302
4303 if (emlxs_sli4_unreserve_xri(port, rxid, 1) == 0) {
4304 /* We have no way to abort unsolicited exchanges */
4305 /* that we have not responded to at this time */
4306 /* So we will return for now */
4307 return;
4308 }
4309 }
4310
4311 cp = &hba->chan[hba->channel_els];
4312
4313 mutex_enter(&EMLXS_FCTAB_LOCK);
4314
4315 /* Create the abort IOCB */
4316 if (hba->state >= FC_LINK_UP) {
4317 iocbq = emlxs_create_abort_xri_cx(port, NULL, rxid, cp,
4318 CLASS3, ABORT_TYPE_ABTS);
4319 } else {
4320 iocbq = emlxs_create_close_xri_cx(port, NULL, rxid, cp);
4321 }
4322
4323 mutex_exit(&EMLXS_FCTAB_LOCK);
4324
4325 if (iocbq) {
4326 iocb = &iocbq->iocb;
4327 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4328 "Aborting ELS exchange: xid=%x iotag=%d", rxid,
4329 iocb->ULPIOTAG);
4330
4331 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
4332 }
4333
4334 } /* emlxs_abort_els_exchange() */
4335
4336
4337 void
4338 emlxs_abort_ct_exchange(emlxs_hba_t *hba, emlxs_port_t *port, uint32_t rxid)
4339 {
4340 CHANNEL *cp;
4341 IOCBQ *iocbq;
4342 IOCB *iocb;
4343
4344 if (rxid == 0 || rxid == 0xFFFF) {
4345 return;
4346 }
4347
4348 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4349 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_ct_msg,
4350 "Aborting CT exchange: xid=%x", rxid);
4351
4352 if (emlxs_sli4_unreserve_xri(port, rxid, 1) == 0) {
4353 /* We have no way to abort unsolicited exchanges */
4354 /* that we have not responded to at this time */
4355 /* So we will return for now */
4356 return;
4357 }
4358 }
4359
4360 cp = &hba->chan[hba->channel_ct];
4361
4362 mutex_enter(&EMLXS_FCTAB_LOCK);
4363
4364 /* Create the abort IOCB */
4365 if (hba->state >= FC_LINK_UP) {
4366 iocbq = emlxs_create_abort_xri_cx(port, NULL, rxid, cp,
4367 CLASS3, ABORT_TYPE_ABTS);
4368 } else {
4369 iocbq = emlxs_create_close_xri_cx(port, NULL, rxid, cp);
4370 }
4371
4372 mutex_exit(&EMLXS_FCTAB_LOCK);
4373
4374 if (iocbq) {
4375 iocb = &iocbq->iocb;
4376 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4377 "Aborting CT exchange: xid=%x iotag=%d", rxid,
4378 iocb->ULPIOTAG);
4379
4380 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
4381 }
4382
4383 } /* emlxs_abort_ct_exchange() */
4384
4385
4386 /* This must be called while holding the EMLXS_FCTAB_LOCK */
4387 static void
4388 emlxs_sbp_abort_add(emlxs_port_t *port, emlxs_buf_t *sbp, Q *abort,
4389 uint8_t *flag, emlxs_buf_t *fpkt)
4390 {
4391 emlxs_hba_t *hba = HBA;
4392 IOCBQ *iocbq;
4393 CHANNEL *cp;
4394 NODELIST *ndlp;
4395
4396 cp = (CHANNEL *)sbp->channel;
4397 ndlp = sbp->node;
4398
4399 /* Create the close XRI IOCB */
4400 if (hba->state >= FC_LINK_UP) {
4401 iocbq = emlxs_create_abort_xri_cn(port, ndlp, sbp->iotag, cp,
4402 CLASS3, ABORT_TYPE_ABTS);
4403 } else {
4404 iocbq = emlxs_create_close_xri_cn(port, ndlp, sbp->iotag, cp);
4405 }
4406 /*
4407 * Add this iocb to our local abort Q
4408 * This way we don't hold the CHIPQ lock too long
4409 */
4410 if (iocbq) {
4411 if (abort->q_first) {
4412 ((IOCBQ *)abort->q_last)->next = iocbq;
4413 abort->q_last = (uint8_t *)iocbq;
4414 abort->q_cnt++;
4415 } else {
4416 abort->q_first = (uint8_t *)iocbq;
4417 abort->q_last = (uint8_t *)iocbq;
4418 abort->q_cnt = 1;
4419 }
4420 iocbq->next = NULL;
4421 }
4422
4423 /* set the flags */
4424 mutex_enter(&sbp->mtx);
4425
4426 sbp->pkt_flags |= (PACKET_IN_FLUSH | PACKET_XRI_CLOSED);
4427
4428 sbp->ticks = hba->timer_tics + 10;
4429 sbp->abort_attempts++;
4430
4431 flag[cp->channelno] = 1;
4432
4433 /*
4434 * If the fpkt is already set, then we will leave it alone
4435 * This ensures that this pkt is only accounted for on one
4436 * fpkt->flush_count
4437 */
4438 if (!sbp->fpkt && fpkt) {
4439 mutex_enter(&fpkt->mtx);
4440 sbp->fpkt = fpkt;
4441 fpkt->flush_count++;
4442 mutex_exit(&fpkt->mtx);
4443 }
4444
4445 mutex_exit(&sbp->mtx);
4446
4447 return;
4448
4449 } /* emlxs_sbp_abort_add() */