Print this page
NEX-8705 Drivers for ATTO Celerity FC-162E Gen 5 and Celerity FC-162P Gen 6 16GB FC cards support
Reviewed by: Dan Fields <dan.fields@nexenta.com>
Reviewed by: Rick McNeal <rick.mcneal@nexenta.com>
NEX-1878 update emlxs from source provided by Emulex
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_fcp.c
+++ new/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_fcp.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at
9 9 * http://www.opensource.org/licenses/cddl1.txt.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2004-2012 Emulex. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 #include <emlxs.h>
28 28
29 29 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
30 30 EMLXS_MSG_DEF(EMLXS_FCP_C);
31 31
32 32 #define EMLXS_GET_VADDR(hba, rp, icmd) emlxs_mem_get_vaddr(hba, rp, \
33 33 PADDR(icmd->un.cont64[i].addrHigh, icmd->un.cont64[i].addrLow));
34 34
35 35 static void emlxs_sbp_abort_add(emlxs_port_t *port, emlxs_buf_t *sbp,
36 36 Q *abort, uint8_t *flag, emlxs_buf_t *fpkt);
37 37
38 38 #define SCSI3_PERSISTENT_RESERVE_IN 0x5e
39 39 #define SCSI_INQUIRY 0x12
40 40 #define SCSI_RX_DIAG 0x1C
41 41
42 42
43 43 /*
44 44 * emlxs_handle_fcp_event
45 45 *
46 46 * Description: Process an FCP Rsp Ring completion
47 47 *
48 48 */
49 49 /* ARGSUSED */
50 50 extern void
51 51 emlxs_handle_fcp_event(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
52 52 {
53 53 emlxs_port_t *port = &PPORT;
54 54 emlxs_config_t *cfg = &CFG;
55 55 IOCB *cmd;
56 56 emlxs_buf_t *sbp;
57 57 fc_packet_t *pkt = NULL;
58 58 #ifdef SAN_DIAG_SUPPORT
59 59 NODELIST *ndlp;
60 60 #endif
61 61 uint32_t iostat;
62 62 uint8_t localstat;
63 63 fcp_rsp_t *rsp;
64 64 uint32_t rsp_data_resid;
65 65 uint32_t check_underrun;
66 66 uint8_t asc;
67 67 uint8_t ascq;
68 68 uint8_t scsi_status;
69 69 uint8_t sense;
70 70 uint32_t did;
71 71 uint32_t fix_it;
72 72 uint8_t *scsi_cmd;
73 73 uint8_t scsi_opcode;
74 74 uint16_t scsi_dl;
75 75 uint32_t data_rx;
76 76 uint32_t length;
77 77
78 78 cmd = &iocbq->iocb;
79 79
80 80 /* Initialize the status */
81 81 iostat = cmd->ULPSTATUS;
82 82 localstat = 0;
83 83 scsi_status = 0;
84 84 asc = 0;
85 85 ascq = 0;
86 86 sense = 0;
87 87 check_underrun = 0;
88 88 fix_it = 0;
89 89
90 90 HBASTATS.FcpEvent++;
91 91
92 92 sbp = (emlxs_buf_t *)iocbq->sbp;
93 93
94 94 if (!sbp) {
95 95 /* completion with missing xmit command */
96 96 HBASTATS.FcpStray++;
97 97
98 98 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_fcp_completion_msg,
99 99 "cmd=%x iotag=%d", cmd->ULPCOMMAND, cmd->ULPIOTAG);
100 100
101 101 return;
102 102 }
103 103
104 104 HBASTATS.FcpCompleted++;
105 105
106 106 #ifdef SAN_DIAG_SUPPORT
107 107 emlxs_update_sd_bucket(sbp);
108 108 #endif /* SAN_DIAG_SUPPORT */
109 109
110 110 pkt = PRIV2PKT(sbp);
111 111
112 112 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
113 113 scsi_cmd = (uint8_t *)pkt->pkt_cmd;
114 114 scsi_opcode = scsi_cmd[12];
115 115 data_rx = 0;
116 116
117 117 /* Sync data in data buffer only on FC_PKT_FCP_READ */
118 118 if (pkt->pkt_datalen && (pkt->pkt_tran_type == FC_PKT_FCP_READ)) {
119 119 EMLXS_MPDATA_SYNC(pkt->pkt_data_dma, 0, pkt->pkt_datalen,
120 120 DDI_DMA_SYNC_FORKERNEL);
121 121
122 122 #ifdef TEST_SUPPORT
123 123 if (hba->underrun_counter && (iostat == IOSTAT_SUCCESS) &&
124 124 (pkt->pkt_datalen >= 512)) {
125 125 hba->underrun_counter--;
126 126 iostat = IOSTAT_FCP_RSP_ERROR;
127 127
128 128 /* Report 512 bytes missing by adapter */
129 129 cmd->un.fcpi.fcpi_parm = pkt->pkt_datalen - 512;
130 130
131 131 /* Corrupt 512 bytes of Data buffer */
132 132 bzero((uint8_t *)pkt->pkt_data, 512);
133 133
134 134 /* Set FCP response to STATUS_GOOD */
135 135 bzero((uint8_t *)pkt->pkt_resp, pkt->pkt_rsplen);
136 136 }
137 137 #endif /* TEST_SUPPORT */
138 138 }
139 139
140 140 /* Process the pkt */
141 141 mutex_enter(&sbp->mtx);
142 142
143 143 /* Check for immediate return */
144 144 if ((iostat == IOSTAT_SUCCESS) &&
145 145 (pkt->pkt_comp) &&
146 146 !(sbp->pkt_flags &
147 147 (PACKET_ULP_OWNED | PACKET_COMPLETED |
148 148 PACKET_IN_COMPLETION | PACKET_IN_TXQ | PACKET_IN_CHIPQ |
149 149 PACKET_IN_DONEQ | PACKET_IN_TIMEOUT | PACKET_IN_FLUSH |
150 150 PACKET_IN_ABORT | PACKET_POLLED))) {
151 151 HBASTATS.FcpGood++;
152 152
153 153 sbp->pkt_flags |=
154 154 (PACKET_STATE_VALID | PACKET_IN_COMPLETION |
155 155 PACKET_COMPLETED | PACKET_ULP_OWNED);
156 156 mutex_exit(&sbp->mtx);
157 157
158 158 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
159 159 emlxs_unswap_pkt(sbp);
160 160 #endif /* EMLXS_MODREV2X */
161 161
162 162 #ifdef FMA_SUPPORT
163 163 emlxs_check_dma(hba, sbp);
164 164 #endif /* FMA_SUPPORT */
165 165
166 166 cp->ulpCmplCmd++;
167 167 (*pkt->pkt_comp) (pkt);
168 168
169 169 #ifdef FMA_SUPPORT
170 170 if (hba->flag & FC_DMA_CHECK_ERROR) {
171 171 emlxs_thread_spawn(hba, emlxs_restart_thread,
172 172 NULL, NULL);
173 173 }
174 174 #endif /* FMA_SUPPORT */
175 175
176 176 return;
177 177 }
178 178
179 179 /*
180 180 * A response is only placed in the resp buffer if IOSTAT_FCP_RSP_ERROR
181 181 * is reported.
182 182 */
183 183
184 184 /* Check if a response buffer was not provided */
185 185 if ((iostat != IOSTAT_FCP_RSP_ERROR) || (pkt->pkt_rsplen == 0)) {
186 186 goto done;
187 187 }
188 188
189 189 EMLXS_MPDATA_SYNC(pkt->pkt_resp_dma, 0, pkt->pkt_rsplen,
190 190 DDI_DMA_SYNC_FORKERNEL);
191 191
192 192 /* Get the response buffer pointer */
193 193 rsp = (fcp_rsp_t *)pkt->pkt_resp;
194 194
195 195 /* Validate the response payload */
196 196 if (!rsp->fcp_u.fcp_status.resid_under &&
197 197 !rsp->fcp_u.fcp_status.resid_over) {
198 198 rsp->fcp_resid = 0;
199 199 }
200 200
201 201 if (!rsp->fcp_u.fcp_status.rsp_len_set) {
202 202 rsp->fcp_response_len = 0;
203 203 }
204 204
205 205 if (!rsp->fcp_u.fcp_status.sense_len_set) {
206 206 rsp->fcp_sense_len = 0;
207 207 }
208 208
209 209 length = sizeof (fcp_rsp_t) + LE_SWAP32(rsp->fcp_response_len) +
210 210 LE_SWAP32(rsp->fcp_sense_len);
211 211
212 212 if (length > pkt->pkt_rsplen) {
213 213 iostat = IOSTAT_RSP_INVALID;
214 214 pkt->pkt_data_resid = pkt->pkt_datalen;
215 215 goto done;
216 216 }
217 217
218 218 /* Set the valid response flag */
219 219 sbp->pkt_flags |= PACKET_FCP_RSP_VALID;
220 220
221 221 scsi_status = rsp->fcp_u.fcp_status.scsi_status;
222 222
223 223 #ifdef SAN_DIAG_SUPPORT
224 224 ndlp = (NODELIST *)iocbq->node;
225 225 if (scsi_status == SCSI_STAT_QUE_FULL) {
226 226 emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_QFULL,
227 227 (HBA_WWN *)&ndlp->nlp_portname, sbp->lun);
228 228 } else if (scsi_status == SCSI_STAT_BUSY) {
229 229 emlxs_log_sd_scsi_event(port,
230 230 SD_SCSI_SUBCATEGORY_DEVBSY,
231 231 (HBA_WWN *)&ndlp->nlp_portname, sbp->lun);
232 232 }
233 233 #endif
234 234
235 235 /*
236 236 * Convert a task abort to a check condition with no data
237 237 * transferred. We saw a data corruption when Solaris received
238 238 * a Task Abort from a tape.
239 239 */
240 240
241 241 if (scsi_status == SCSI_STAT_TASK_ABORT) {
242 242 EMLXS_MSGF(EMLXS_CONTEXT,
243 243 &emlxs_fcp_completion_error_msg,
244 244 "Task Abort. "
245 245 "Fixed. did=0x%06x sbp=%p cmd=%02x dl=%d",
246 246 did, sbp, scsi_opcode, pkt->pkt_datalen);
247 247
248 248 rsp->fcp_u.fcp_status.scsi_status =
249 249 SCSI_STAT_CHECK_COND;
250 250 rsp->fcp_u.fcp_status.rsp_len_set = 0;
251 251 rsp->fcp_u.fcp_status.sense_len_set = 0;
252 252 rsp->fcp_u.fcp_status.resid_over = 0;
253 253
254 254 if (pkt->pkt_datalen) {
255 255 rsp->fcp_u.fcp_status.resid_under = 1;
256 256 rsp->fcp_resid =
257 257 LE_SWAP32(pkt->pkt_datalen);
258 258 } else {
259 259 rsp->fcp_u.fcp_status.resid_under = 0;
260 260 rsp->fcp_resid = 0;
261 261 }
262 262
263 263 scsi_status = SCSI_STAT_CHECK_COND;
264 264 }
265 265
266 266 /*
267 267 * We only need to check underrun if data could
268 268 * have been sent
269 269 */
270 270
271 271 /* Always check underrun if status is good */
272 272 if (scsi_status == SCSI_STAT_GOOD) {
273 273 check_underrun = 1;
274 274 }
275 275 /* Check the sense codes if this is a check condition */
276 276 else if (scsi_status == SCSI_STAT_CHECK_COND) {
277 277 check_underrun = 1;
278 278
279 279 /* Check if sense data was provided */
280 280 if (LE_SWAP32(rsp->fcp_sense_len) >= 14) {
281 281 sense = *((uint8_t *)rsp + 32 + 2);
282 282 asc = *((uint8_t *)rsp + 32 + 12);
283 283 ascq = *((uint8_t *)rsp + 32 + 13);
284 284 }
285 285
286 286 #ifdef SAN_DIAG_SUPPORT
287 287 emlxs_log_sd_scsi_check_event(port,
288 288 (HBA_WWN *)&ndlp->nlp_portname, sbp->lun,
289 289 scsi_opcode, sense, asc, ascq);
290 290 #endif
291 291 }
292 292 /* Status is not good and this is not a check condition */
293 293 /* No data should have been sent */
294 294 else {
295 295 check_underrun = 0;
296 296 }
297 297
298 298 /* Initialize the resids */
299 299 pkt->pkt_resp_resid = 0;
300 300 pkt->pkt_data_resid = 0;
301 301
302 302 /* Check if no data was to be transferred */
303 303 if (pkt->pkt_datalen == 0) {
304 304 goto done;
305 305 }
306 306
307 307 /* Get the residual underrun count reported by the SCSI reply */
308 308 rsp_data_resid = (rsp->fcp_u.fcp_status.resid_under) ?
309 309 LE_SWAP32(rsp->fcp_resid) : 0;
310 310
311 311 /* Set the pkt_data_resid to what the scsi response resid */
312 312 pkt->pkt_data_resid = rsp_data_resid;
313 313
314 314 /* Adjust the pkt_data_resid field if needed */
315 315 if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
316 316 /*
317 317 * Get the residual underrun count reported by
318 318 * our adapter
319 319 */
320 320 pkt->pkt_data_resid = cmd->un.fcpi.fcpi_parm;
321 321
322 322 #ifdef SAN_DIAG_SUPPORT
323 323 if ((rsp_data_resid == 0) && (pkt->pkt_data_resid)) {
324 324 emlxs_log_sd_fc_rdchk_event(port,
325 325 (HBA_WWN *)&ndlp->nlp_portname, sbp->lun,
326 326 scsi_opcode, pkt->pkt_data_resid);
327 327 }
328 328 #endif
329 329
330 330 /* Get the actual amount of data transferred */
331 331 data_rx = pkt->pkt_datalen - pkt->pkt_data_resid;
332 332
333 333 /*
334 334 * If the residual being reported by the adapter is
335 335 * greater than the residual being reported in the
336 336 * reply, then we have a true underrun.
337 337 */
338 338 if (check_underrun && (pkt->pkt_data_resid > rsp_data_resid)) {
339 339 switch (scsi_opcode) {
340 340 case SCSI_INQUIRY:
341 341 scsi_dl = scsi_cmd[16];
342 342 break;
343 343
344 344 case SCSI_RX_DIAG:
345 345 scsi_dl =
346 346 (scsi_cmd[15] * 0x100) +
347 347 scsi_cmd[16];
348 348 break;
349 349
350 350 default:
351 351 scsi_dl = pkt->pkt_datalen;
352 352 }
353 353
354 354 #ifdef FCP_UNDERRUN_PATCH1
355 355 if (cfg[CFG_ENABLE_PATCH].current & FCP_UNDERRUN_PATCH1) {
356 356 /*
357 357 * If status is not good and no data was
358 358 * actually transferred, then we must fix
359 359 * the issue
360 360 */
361 361 if ((scsi_status != SCSI_STAT_GOOD) && (data_rx == 0)) {
362 362 fix_it = 1;
363 363
364 364 EMLXS_MSGF(EMLXS_CONTEXT,
365 365 &emlxs_fcp_completion_error_msg,
366 366 "Underrun(1). Fixed. "
367 367 "did=0x%06x sbp=%p cmd=%02x "
368 368 "dl=%d,%d rx=%d rsp=%d",
369 369 did, sbp, scsi_opcode,
370 370 pkt->pkt_datalen, scsi_dl,
371 371 (pkt->pkt_datalen -
372 372 pkt->pkt_data_resid),
373 373 rsp_data_resid);
374 374
375 375 }
376 376 }
377 377 #endif /* FCP_UNDERRUN_PATCH1 */
378 378
379 379
380 380 #ifdef FCP_UNDERRUN_PATCH2
381 381 if (cfg[CFG_ENABLE_PATCH].current & FCP_UNDERRUN_PATCH2) {
382 382 if (scsi_status == SCSI_STAT_GOOD) {
383 383 emlxs_msg_t *msg;
384 384
385 385 msg = &emlxs_fcp_completion_error_msg;
386 386 /*
387 387 * If status is good and this is an
388 388 * inquiry request and the amount of
389 389 * data
390 390 */
391 391 /*
392 392 * requested <= data received, then we
393 393 * must fix the issue.
394 394 */
395 395
396 396 if ((scsi_opcode == SCSI_INQUIRY) &&
397 397 (pkt->pkt_datalen >= data_rx) &&
398 398 (scsi_dl <= data_rx)) {
399 399 fix_it = 1;
400 400
401 401 EMLXS_MSGF(EMLXS_CONTEXT, msg,
402 402 "Underrun(2). Fixed. "
403 403 "did=0x%06x sbp=%p "
404 404 "cmd=%02x dl=%d,%d "
405 405 "rx=%d rsp=%d",
406 406 did, sbp, scsi_opcode,
407 407 pkt->pkt_datalen, scsi_dl,
408 408 data_rx, rsp_data_resid);
409 409
410 410 }
411 411
412 412 /*
413 413 * If status is good and this is an
414 414 * inquiry request and the amount of
415 415 * data requested >= 128 bytes, but
416 416 * only 128 bytes were received,
417 417 * then we must fix the issue.
418 418 */
419 419 else if ((scsi_opcode == SCSI_INQUIRY) &&
420 420 (pkt->pkt_datalen >= 128) &&
421 421 (scsi_dl >= 128) && (data_rx == 128)) {
422 422 fix_it = 1;
423 423
424 424 EMLXS_MSGF(EMLXS_CONTEXT, msg,
425 425 "Underrun(3). Fixed. "
426 426 "did=0x%06x sbp=%p "
427 427 "cmd=%02x dl=%d,%d "
428 428 "rx=%d rsp=%d",
429 429 did, sbp, scsi_opcode,
430 430 pkt->pkt_datalen, scsi_dl,
431 431 data_rx, rsp_data_resid);
432 432
433 433 }
434 434 }
435 435 }
436 436 #endif /* FCP_UNDERRUN_PATCH2 */
437 437
438 438 /*
439 439 * Check if SCSI response payload should be
440 440 * fixed or if a DATA_UNDERRUN should be
441 441 * reported
442 442 */
443 443 if (fix_it) {
444 444 /*
445 445 * Fix the SCSI response payload itself
446 446 */
447 447 rsp->fcp_u.fcp_status.resid_under = 1;
448 448 rsp->fcp_resid =
449 449 LE_SWAP32(pkt->pkt_data_resid);
450 450 } else {
451 451 /*
452 452 * Change the status from
453 453 * IOSTAT_FCP_RSP_ERROR to
454 454 * IOSTAT_DATA_UNDERRUN
455 455 */
456 456 iostat = IOSTAT_DATA_UNDERRUN;
457 457 pkt->pkt_data_resid =
458 458 pkt->pkt_datalen;
459 459 }
460 460 }
461 461
462 462 /*
463 463 * If the residual being reported by the adapter is
464 464 * less than the residual being reported in the reply,
465 465 * then we have a true overrun. Since we don't know
466 466 * where the extra data came from or went to then we
467 467 * cannot trust anything we received
468 468 */
469 469 else if (rsp_data_resid > pkt->pkt_data_resid) {
470 470 /*
471 471 * Change the status from
472 472 * IOSTAT_FCP_RSP_ERROR to
473 473 * IOSTAT_DATA_OVERRUN
474 474 */
475 475 iostat = IOSTAT_DATA_OVERRUN;
476 476 pkt->pkt_data_resid = pkt->pkt_datalen;
477 477 }
478 478
479 479 } else if ((hba->sli_mode == EMLXS_HBA_SLI4_MODE) &&
480 480 (pkt->pkt_tran_type == FC_PKT_FCP_WRITE)) {
481 481 /*
482 482 * Get the residual underrun count reported by
483 483 * our adapter
484 484 */
485 485 pkt->pkt_data_resid = cmd->un.fcpi.fcpi_parm;
486 486
487 487 #ifdef SAN_DIAG_SUPPORT
488 488 if ((rsp_data_resid == 0) && (pkt->pkt_data_resid)) {
489 489 emlxs_log_sd_fc_rdchk_event(port,
490 490 (HBA_WWN *)&ndlp->nlp_portname, sbp->lun,
491 491 scsi_opcode, pkt->pkt_data_resid);
492 492 }
493 493 #endif /* SAN_DIAG_SUPPORT */
494 494
495 495 /* Get the actual amount of data transferred */
496 496 data_rx = pkt->pkt_datalen - pkt->pkt_data_resid;
497 497
498 498 /*
499 499 * If the residual being reported by the adapter is
500 500 * greater than the residual being reported in the
501 501 * reply, then we have a true underrun.
502 502 */
503 503 if (check_underrun && (pkt->pkt_data_resid > rsp_data_resid)) {
504 504
505 505 scsi_dl = pkt->pkt_datalen;
506 506
507 507 #ifdef FCP_UNDERRUN_PATCH1
508 508 if (cfg[CFG_ENABLE_PATCH].current & FCP_UNDERRUN_PATCH1) {
509 509 /*
510 510 * If status is not good and no data was
511 511 * actually transferred, then we must fix
512 512 * the issue
513 513 */
514 514 if ((scsi_status != SCSI_STAT_GOOD) && (data_rx == 0)) {
515 515 fix_it = 1;
516 516
517 517 EMLXS_MSGF(EMLXS_CONTEXT,
518 518 &emlxs_fcp_completion_error_msg,
519 519 "Underrun(1). Fixed. "
520 520 "did=0x%06x sbp=%p cmd=%02x "
521 521 "dl=%d,%d rx=%d rsp=%d",
522 522 did, sbp, scsi_opcode,
523 523 pkt->pkt_datalen, scsi_dl,
524 524 (pkt->pkt_datalen -
525 525 pkt->pkt_data_resid),
526 526 rsp_data_resid);
527 527
528 528 }
529 529 }
530 530 #endif /* FCP_UNDERRUN_PATCH1 */
531 531
532 532 /*
533 533 * Check if SCSI response payload should be
534 534 * fixed or if a DATA_UNDERRUN should be
535 535 * reported
536 536 */
537 537 if (fix_it) {
538 538 /*
539 539 * Fix the SCSI response payload itself
540 540 */
541 541 rsp->fcp_u.fcp_status.resid_under = 1;
542 542 rsp->fcp_resid =
543 543 LE_SWAP32(pkt->pkt_data_resid);
544 544 } else {
545 545 /*
546 546 * Change the status from
547 547 * IOSTAT_FCP_RSP_ERROR to
548 548 * IOSTAT_DATA_UNDERRUN
549 549 */
550 550 iostat = IOSTAT_DATA_UNDERRUN;
551 551 pkt->pkt_data_resid =
552 552 pkt->pkt_datalen;
553 553 }
554 554 }
555 555
556 556 /*
557 557 * If the residual being reported by the adapter is
558 558 * less than the residual being reported in the reply,
559 559 * then we have a true overrun. Since we don't know
560 560 * where the extra data came from or went to then we
561 561 * cannot trust anything we received
562 562 */
563 563 else if (rsp_data_resid > pkt->pkt_data_resid) {
564 564 /*
565 565 * Change the status from
566 566 * IOSTAT_FCP_RSP_ERROR to
567 567 * IOSTAT_DATA_OVERRUN
568 568 */
569 569 iostat = IOSTAT_DATA_OVERRUN;
570 570 pkt->pkt_data_resid = pkt->pkt_datalen;
571 571 }
572 572 }
573 573
574 574 done:
575 575
576 576 /* Print completion message */
577 577 switch (iostat) {
578 578 case IOSTAT_SUCCESS:
579 579 /* Build SCSI GOOD status */
580 580 if (pkt->pkt_rsplen) {
581 581 bzero((uint8_t *)pkt->pkt_resp, pkt->pkt_rsplen);
582 582 }
583 583 break;
584 584
585 585 case IOSTAT_FCP_RSP_ERROR:
586 586 break;
587 587
588 588 case IOSTAT_REMOTE_STOP:
589 589 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
590 590 "Remote Stop. did=0x%06x sbp=%p cmd=%02x", did, sbp,
591 591 scsi_opcode);
592 592 break;
593 593
594 594 case IOSTAT_LOCAL_REJECT:
595 595 localstat = cmd->un.grsp.perr.statLocalError;
596 596
597 597 switch (localstat) {
598 598 case IOERR_SEQUENCE_TIMEOUT:
599 599 EMLXS_MSGF(EMLXS_CONTEXT,
600 600 &emlxs_fcp_completion_error_msg,
601 601 "Local reject. "
602 602 "%s did=0x%06x sbp=%p cmd=%02x tmo=%d ",
603 603 emlxs_error_xlate(localstat), did, sbp,
604 604 scsi_opcode, pkt->pkt_timeout);
605 605 break;
606 606
607 607 default:
608 608 EMLXS_MSGF(EMLXS_CONTEXT,
609 609 &emlxs_fcp_completion_error_msg,
610 610 "Local reject. %s 0x%06x %p %02x (%x)(%x)",
611 611 emlxs_error_xlate(localstat), did, sbp,
612 612 scsi_opcode, (uint16_t)cmd->ULPIOTAG,
613 613 (uint16_t)cmd->ULPCONTEXT);
614 614 }
615 615
616 616 break;
617 617
618 618 case IOSTAT_NPORT_RJT:
619 619 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
620 620 "Nport reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
621 621 scsi_opcode);
622 622 break;
623 623
624 624 case IOSTAT_FABRIC_RJT:
625 625 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
626 626 "Fabric reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
627 627 scsi_opcode);
628 628 break;
629 629
630 630 case IOSTAT_NPORT_BSY:
631 631 #ifdef SAN_DIAG_SUPPORT
632 632 ndlp = (NODELIST *)iocbq->node;
633 633 emlxs_log_sd_fc_bsy_event(port, (HBA_WWN *)&ndlp->nlp_portname);
634 634 #endif
635 635
636 636 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
637 637 "Nport busy. did=0x%06x sbp=%p cmd=%02x", did, sbp,
638 638 scsi_opcode);
639 639 break;
640 640
641 641 case IOSTAT_FABRIC_BSY:
642 642 #ifdef SAN_DIAG_SUPPORT
643 643 ndlp = (NODELIST *)iocbq->node;
644 644 emlxs_log_sd_fc_bsy_event(port, NULL);
645 645 #endif
646 646
647 647 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
648 648 "Fabric busy. did=0x%06x sbp=%p cmd=%02x", did, sbp,
649 649 scsi_opcode);
650 650 break;
651 651
652 652 case IOSTAT_INTERMED_RSP:
653 653 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
654 654 "Intermediate response. did=0x%06x sbp=%p cmd=%02x", did,
655 655 sbp, scsi_opcode);
656 656 break;
657 657
658 658 case IOSTAT_LS_RJT:
659 659 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
660 660 "LS Reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
661 661 scsi_opcode);
662 662 break;
663 663
664 664 case IOSTAT_DATA_UNDERRUN:
665 665 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
666 666 "Underrun. did=0x%06x sbp=%p cmd=%02x "
667 667 "dl=%d,%d rx=%d rsp=%d (%02x,%02x,%02x,%02x)",
668 668 did, sbp, scsi_opcode, pkt->pkt_datalen, scsi_dl, data_rx,
669 669 rsp_data_resid, scsi_status, sense, asc, ascq);
670 670 break;
671 671
672 672 case IOSTAT_DATA_OVERRUN:
673 673 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
674 674 "Overrun. did=0x%06x sbp=%p cmd=%02x "
675 675 "dl=%d,%d rx=%d rsp=%d (%02x,%02x,%02x,%02x)",
676 676 did, sbp, scsi_opcode, pkt->pkt_datalen, scsi_dl, data_rx,
677 677 rsp_data_resid, scsi_status, sense, asc, ascq);
678 678 break;
679 679
680 680 case IOSTAT_RSP_INVALID:
681 681 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
682 682 "Rsp Invalid. did=0x%06x sbp=%p cmd=%02x dl=%d rl=%d"
683 683 "(%d, %d, %d)",
684 684 did, sbp, scsi_opcode, pkt->pkt_datalen, pkt->pkt_rsplen,
685 685 LE_SWAP32(rsp->fcp_resid),
686 686 LE_SWAP32(rsp->fcp_sense_len),
687 687 LE_SWAP32(rsp->fcp_response_len));
688 688 break;
689 689
690 690 default:
691 691 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
692 692 "Unknown status=%x reason=%x did=0x%06x sbp=%p cmd=%02x",
693 693 iostat, cmd->un.grsp.perr.statLocalError, did, sbp,
694 694 scsi_opcode);
695 695 break;
696 696 }
697 697
698 698 if (iostat == IOSTAT_SUCCESS) {
699 699 HBASTATS.FcpGood++;
700 700 } else {
701 701 HBASTATS.FcpError++;
702 702 }
703 703
704 704 mutex_exit(&sbp->mtx);
705 705
706 706 emlxs_pkt_complete(sbp, iostat, localstat, 0);
707 707
708 708 return;
709 709
710 710 } /* emlxs_handle_fcp_event() */
711 711
712 712
713 713 /*
714 714 * emlxs_post_buffer
715 715 *
716 716 * This routine will post count buffers to the
717 717 * ring with the QUE_RING_BUF_CN command. This
718 718 * allows 2 buffers / command to be posted.
719 719 * Returns the number of buffers NOT posted.
720 720 */
721 721 /* SLI3 */
722 722 extern int
723 723 emlxs_post_buffer(emlxs_hba_t *hba, RING *rp, int16_t cnt)
724 724 {
725 725 emlxs_port_t *port = &PPORT;
726 726 IOCB *icmd;
727 727 IOCBQ *iocbq;
728 728 MATCHMAP *mp;
729 729 uint16_t tag;
730 730 uint32_t maxqbuf;
731 731 int32_t i;
732 732 int32_t j;
733 733 uint32_t seg;
734 734 uint32_t size;
735 735
736 736 mp = 0;
737 737 maxqbuf = 2;
738 738 tag = (uint16_t)cnt;
739 739 cnt += rp->fc_missbufcnt;
740 740
741 741 if (rp->ringno == hba->channel_els) {
742 742 seg = MEM_BUF;
743 743 size = MEM_ELSBUF_SIZE;
744 744 } else if (rp->ringno == hba->channel_ip) {
745 745 seg = MEM_IPBUF;
746 746 size = MEM_IPBUF_SIZE;
747 747 } else if (rp->ringno == hba->channel_ct) {
748 748 seg = MEM_CTBUF;
749 749 size = MEM_CTBUF_SIZE;
750 750 }
751 751 #ifdef SFCT_SUPPORT
752 752 else if (rp->ringno == hba->CHANNEL_FCT) {
753 753 seg = MEM_FCTBUF;
754 754 size = MEM_FCTBUF_SIZE;
755 755 }
756 756 #endif /* SFCT_SUPPORT */
757 757 else {
758 758 return (0);
759 759 }
760 760
761 761 /*
762 762 * While there are buffers to post
763 763 */
764 764 while (cnt) {
765 765 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == 0) {
766 766 rp->fc_missbufcnt = cnt;
767 767 return (cnt);
768 768 }
769 769
770 770 iocbq->channel = (void *)&hba->chan[rp->ringno];
771 771 iocbq->port = (void *)port;
772 772 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
773 773
774 774 icmd = &iocbq->iocb;
775 775
776 776 /*
777 777 * Max buffers can be posted per command
778 778 */
779 779 for (i = 0; i < maxqbuf; i++) {
780 780 if (cnt <= 0)
781 781 break;
782 782
783 783 /* fill in BDEs for command */
784 784 if ((mp = (MATCHMAP *)emlxs_mem_get(hba, seg))
785 785 == 0) {
786 786 icmd->ULPBDECOUNT = i;
787 787 for (j = 0; j < i; j++) {
788 788 mp = EMLXS_GET_VADDR(hba, rp, icmd);
789 789 if (mp) {
790 790 emlxs_mem_put(hba, seg,
791 791 (void *)mp);
792 792 }
793 793 }
794 794
795 795 rp->fc_missbufcnt = cnt + i;
796 796
797 797 emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
798 798
799 799 return (cnt + i);
800 800 }
801 801
802 802 /*
803 803 * map that page and save the address pair for lookup
804 804 * later
805 805 */
806 806 emlxs_mem_map_vaddr(hba,
807 807 rp,
808 808 mp,
809 809 (uint32_t *)&icmd->un.cont64[i].addrHigh,
810 810 (uint32_t *)&icmd->un.cont64[i].addrLow);
811 811
812 812 icmd->un.cont64[i].tus.f.bdeSize = size;
813 813 icmd->ULPCOMMAND = CMD_QUE_RING_BUF64_CN;
814 814
815 815 /*
816 816 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
817 817 * "UB Post: ring=%d addr=%08x%08x size=%d",
818 818 * rp->ringno, icmd->un.cont64[i].addrHigh,
819 819 * icmd->un.cont64[i].addrLow, size);
820 820 */
821 821
822 822 cnt--;
823 823 }
824 824
825 825 icmd->ULPIOTAG = tag;
826 826 icmd->ULPBDECOUNT = i;
827 827 icmd->ULPLE = 1;
828 828 icmd->ULPOWNER = OWN_CHIP;
829 829 /* used for delimiter between commands */
830 830 iocbq->bp = (void *)mp;
831 831
832 832 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[rp->ringno], iocbq);
833 833 }
834 834
835 835 rp->fc_missbufcnt = 0;
836 836
837 837 return (0);
838 838
839 839 } /* emlxs_post_buffer() */
840 840
841 841
842 842 static void
843 843 emlxs_fcp_tag_nodes(emlxs_port_t *port)
844 844 {
845 845 NODELIST *nlp;
846 846 int i;
847 847
848 848 /* We will process all nodes with this tag later */
849 849 rw_enter(&port->node_rwlock, RW_READER);
850 850 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
851 851 nlp = port->node_table[i];
852 852 while (nlp != NULL) {
853 853 nlp->nlp_tag = 1;
854 854 nlp = nlp->nlp_list_next;
855 855 }
856 856 }
857 857 rw_exit(&port->node_rwlock);
858 858 }
859 859
860 860
861 861 static NODELIST *
862 862 emlxs_find_tagged_node(emlxs_port_t *port)
863 863 {
864 864 NODELIST *nlp;
865 865 NODELIST *tagged;
866 866 int i;
867 867
868 868 /* Find first node */
869 869 rw_enter(&port->node_rwlock, RW_READER);
870 870 tagged = 0;
871 871 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
872 872 nlp = port->node_table[i];
873 873 while (nlp != NULL) {
874 874 if (!nlp->nlp_tag) {
875 875 nlp = nlp->nlp_list_next;
876 876 continue;
877 877 }
878 878 nlp->nlp_tag = 0;
879 879
880 880 if (nlp->nlp_Rpi == FABRIC_RPI) {
881 881 nlp = nlp->nlp_list_next;
882 882 continue;
883 883 }
884 884 tagged = nlp;
885 885 break;
886 886 }
887 887 if (tagged) {
888 888 break;
889 889 }
890 890 }
891 891 rw_exit(&port->node_rwlock);
892 892 return (tagged);
893 893 }
894 894
895 895
896 896 extern int
897 897 emlxs_port_offline(emlxs_port_t *port, uint32_t scope)
898 898 {
899 899 emlxs_hba_t *hba = HBA;
900 900 emlxs_config_t *cfg;
901 901 NODELIST *nlp;
902 902 fc_affected_id_t *aid;
903 903 uint32_t mask;
904 904 uint32_t aff_d_id;
905 905 uint32_t linkdown;
906 906 uint32_t vlinkdown;
907 907 uint32_t action;
908 908 int i;
909 909 uint32_t unreg_vpi;
910 910 uint32_t update;
911 911 uint32_t adisc_support;
912 912 uint32_t clear_all;
913 913 uint8_t format;
914 914
915 915 /* Target mode only uses this routine for linkdowns */
916 916 if ((port->mode == MODE_TARGET) && (scope != 0xffffffff) &&
917 917 (scope != 0xfeffffff) && (scope != 0xfdffffff)) {
918 918 return (0);
919 919 }
920 920
921 921 cfg = &CFG;
922 922 aid = (fc_affected_id_t *)&scope;
923 923 linkdown = 0;
924 924 vlinkdown = 0;
925 925 unreg_vpi = 0;
926 926 update = 0;
927 927 clear_all = 0;
928 928
929 929 if (!(port->flag & EMLXS_PORT_BOUND)) {
930 930 return (0);
931 931 }
932 932
933 933 format = aid->aff_format;
934 934
935 935 switch (format) {
936 936 case 0: /* Port */
937 937 mask = 0x00ffffff;
938 938 break;
939 939
940 940 case 1: /* Area */
941 941 mask = 0x00ffff00;
942 942 break;
943 943
944 944 case 2: /* Domain */
945 945 mask = 0x00ff0000;
946 946 break;
947 947
948 948 case 3: /* Network */
949 949 mask = 0x00000000;
950 950 break;
951 951
952 952 #ifdef DHCHAP_SUPPORT
953 953 case 0xfe: /* Virtual link down */
954 954 mask = 0x00000000;
955 955 vlinkdown = 1;
956 956 break;
957 957 #endif /* DHCHAP_SUPPORT */
958 958
959 959 case 0xff: /* link is down */
960 960 mask = 0x00000000;
961 961 linkdown = 1;
962 962 break;
963 963
964 964 case 0xfd: /* New fabric */
965 965 default:
966 966 mask = 0x00000000;
967 967 linkdown = 1;
968 968 clear_all = 1;
969 969 break;
970 970 }
|
↓ open down ↓ |
970 lines elided |
↑ open up ↑ |
971 971
972 972 aff_d_id = aid->aff_d_id & mask;
973 973
974 974
975 975 /*
976 976 * If link is down then this is a hard shutdown and flush
977 977 * If link not down then this is a soft shutdown and flush
978 978 * (e.g. RSCN)
979 979 */
980 980 if (linkdown) {
981 + hba->flag &= ~FC_GPIO_LINK_UP;
982 +
981 983 mutex_enter(&EMLXS_PORT_LOCK);
982 984
983 985 port->flag &= EMLXS_PORT_LINKDOWN_MASK;
984 986
985 987 if (port->ulp_statec != FC_STATE_OFFLINE) {
986 988 port->ulp_statec = FC_STATE_OFFLINE;
987 989
988 990 port->prev_did = port->did;
989 991 port->did = 0;
990 992 port->rdid = 0;
991 993
992 994 bcopy(&port->fabric_sparam, &port->prev_fabric_sparam,
993 995 sizeof (SERV_PARM));
994 996 bzero(&port->fabric_sparam, sizeof (SERV_PARM));
995 997
996 998 update = 1;
997 999 }
998 1000
999 1001 mutex_exit(&EMLXS_PORT_LOCK);
1000 1002
1001 1003 emlxs_timer_cancel_clean_address(port);
1002 1004
1003 1005 /* Tell ULP about it */
1004 1006 if (update) {
1005 1007 if (port->flag & EMLXS_PORT_BOUND) {
1006 1008 if (port->vpi == 0) {
1007 1009 EMLXS_MSGF(EMLXS_CONTEXT,
1008 1010 &emlxs_link_down_msg, NULL);
1009 1011 }
1010 1012
1011 1013 if (port->mode == MODE_INITIATOR) {
1012 1014 emlxs_fca_link_down(port);
1013 1015 }
1014 1016 #ifdef SFCT_SUPPORT
1015 1017 else if (port->mode == MODE_TARGET) {
1016 1018 emlxs_fct_link_down(port);
1017 1019 }
1018 1020 #endif /* SFCT_SUPPORT */
1019 1021
1020 1022 } else {
1021 1023 if (port->vpi == 0) {
1022 1024 EMLXS_MSGF(EMLXS_CONTEXT,
1023 1025 &emlxs_link_down_msg, "*");
1024 1026 }
1025 1027 }
1026 1028
1027 1029
1028 1030 }
1029 1031
1030 1032 unreg_vpi = 1;
1031 1033
1032 1034 #ifdef DHCHAP_SUPPORT
1033 1035 /* Stop authentication with all nodes */
1034 1036 emlxs_dhc_auth_stop(port, NULL);
1035 1037 #endif /* DHCHAP_SUPPORT */
1036 1038
1037 1039 /* Flush the base node */
1038 1040 (void) emlxs_tx_node_flush(port, &port->node_base, 0, 0, 0);
1039 1041 (void) emlxs_chipq_node_flush(port, 0, &port->node_base, 0);
1040 1042
1041 1043 /* Flush any pending ub buffers */
1042 1044 emlxs_ub_flush(port);
1043 1045 }
1044 1046 #ifdef DHCHAP_SUPPORT
1045 1047 /* virtual link down */
1046 1048 else if (vlinkdown) {
1047 1049 mutex_enter(&EMLXS_PORT_LOCK);
1048 1050
1049 1051 if (port->ulp_statec != FC_STATE_OFFLINE) {
1050 1052 port->ulp_statec = FC_STATE_OFFLINE;
1051 1053 update = 1;
1052 1054 }
1053 1055
1054 1056 mutex_exit(&EMLXS_PORT_LOCK);
1055 1057
1056 1058 emlxs_timer_cancel_clean_address(port);
1057 1059
1058 1060 /* Tell ULP about it */
1059 1061 if (update) {
1060 1062 if (port->flag & EMLXS_PORT_BOUND) {
1061 1063 if (port->vpi == 0) {
1062 1064 EMLXS_MSGF(EMLXS_CONTEXT,
1063 1065 &emlxs_link_down_msg,
1064 1066 "Switch authentication failed.");
1065 1067 }
1066 1068
1067 1069 if (port->mode == MODE_INITIATOR) {
1068 1070 emlxs_fca_link_down(port);
1069 1071 }
1070 1072 #ifdef SFCT_SUPPORT
1071 1073 else if (port->mode == MODE_TARGET) {
1072 1074 emlxs_fct_link_down(port);
1073 1075 }
1074 1076 #endif /* SFCT_SUPPORT */
1075 1077 } else {
1076 1078 if (port->vpi == 0) {
1077 1079 EMLXS_MSGF(EMLXS_CONTEXT,
1078 1080 &emlxs_link_down_msg,
1079 1081 "Switch authentication failed. *");
1080 1082 }
1081 1083 }
1082 1084
1083 1085
1084 1086 }
1085 1087
1086 1088 /* Flush the base node */
1087 1089 (void) emlxs_tx_node_flush(port, &port->node_base, 0, 0, 0);
1088 1090 (void) emlxs_chipq_node_flush(port, 0, &port->node_base, 0);
1089 1091 }
1090 1092 #endif /* DHCHAP_SUPPORT */
1091 1093 else {
1092 1094 emlxs_timer_cancel_clean_address(port);
1093 1095 }
1094 1096
1095 1097 if (port->mode == MODE_TARGET) {
1096 1098 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1097 1099 /* Set the node tags */
1098 1100 emlxs_fcp_tag_nodes(port);
1099 1101 unreg_vpi = 0;
1100 1102 while ((nlp = emlxs_find_tagged_node(port))) {
1101 1103 (void) emlxs_rpi_pause_notify(port,
1102 1104 nlp->rpip);
1103 1105 /*
1104 1106 * In port_online we need to resume
1105 1107 * these RPIs before we can use them.
1106 1108 */
1107 1109 }
1108 1110 }
1109 1111 goto done;
1110 1112 }
1111 1113
1112 1114 /* Set the node tags */
1113 1115 emlxs_fcp_tag_nodes(port);
1114 1116
1115 1117 if (!clear_all && (hba->flag & FC_ONLINE_MODE)) {
1116 1118 adisc_support = cfg[CFG_ADISC_SUPPORT].current;
1117 1119 } else {
1118 1120 adisc_support = 0;
1119 1121 }
1120 1122
1121 1123 /* Check ADISC support level */
1122 1124 switch (adisc_support) {
1123 1125 case 0: /* No support - Flush all IO to all matching nodes */
1124 1126
1125 1127 for (;;) {
1126 1128 /*
1127 1129 * We need to hold the locks this way because
1128 1130 * EMLXS_SLI_UNREG_NODE and the flush routines enter the
1129 1131 * same locks. Also, when we release the lock the list
1130 1132 * can change out from under us.
1131 1133 */
1132 1134
1133 1135 /* Find first node */
1134 1136 rw_enter(&port->node_rwlock, RW_READER);
1135 1137 action = 0;
1136 1138 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1137 1139 nlp = port->node_table[i];
1138 1140 while (nlp != NULL) {
1139 1141 if (!nlp->nlp_tag) {
1140 1142 nlp = nlp->nlp_list_next;
1141 1143 continue;
1142 1144 }
1143 1145 nlp->nlp_tag = 0;
1144 1146
1145 1147 /*
1146 1148 * Check for any device that matches
1147 1149 * our mask
1148 1150 */
1149 1151 if ((nlp->nlp_DID & mask) == aff_d_id) {
1150 1152 if (linkdown) {
1151 1153 action = 1;
1152 1154 break;
1153 1155 } else { /* Must be an RCSN */
1154 1156
1155 1157 action = 2;
1156 1158 break;
1157 1159 }
1158 1160 }
1159 1161 nlp = nlp->nlp_list_next;
1160 1162 }
1161 1163
1162 1164 if (action) {
1163 1165 break;
1164 1166 }
1165 1167 }
1166 1168 rw_exit(&port->node_rwlock);
1167 1169
1168 1170
1169 1171 /* Check if nothing was found */
1170 1172 if (action == 0) {
1171 1173 break;
1172 1174 } else if (action == 1) {
1173 1175 (void) EMLXS_SLI_UNREG_NODE(port, nlp,
1174 1176 NULL, NULL, NULL);
1175 1177 } else if (action == 2) {
1176 1178 EMLXS_SET_DFC_STATE(nlp, NODE_LIMBO);
1177 1179
1178 1180 #ifdef DHCHAP_SUPPORT
1179 1181 emlxs_dhc_auth_stop(port, nlp);
1180 1182 #endif /* DHCHAP_SUPPORT */
1181 1183
1182 1184 /*
1183 1185 * Close the node for any further normal IO
1184 1186 * A PLOGI with reopen the node
1185 1187 */
1186 1188 emlxs_node_close(port, nlp,
1187 1189 hba->channel_fcp, 60);
1188 1190 emlxs_node_close(port, nlp,
1189 1191 hba->channel_ip, 60);
1190 1192
1191 1193 /* Flush tx queue */
1192 1194 (void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1193 1195
1194 1196 /* Flush chip queue */
1195 1197 (void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1196 1198 }
1197 1199
1198 1200 }
1199 1201
1200 1202 break;
1201 1203
1202 1204 case 1: /* Partial support - Flush IO for non-FCP2 matching nodes */
1203 1205
1204 1206 for (;;) {
1205 1207
1206 1208 /*
1207 1209 * We need to hold the locks this way because
1208 1210 * EMLXS_SLI_UNREG_NODE and the flush routines enter the
1209 1211 * same locks. Also, when we release the lock the list
1210 1212 * can change out from under us.
1211 1213 */
1212 1214 rw_enter(&port->node_rwlock, RW_READER);
1213 1215 action = 0;
1214 1216 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1215 1217 nlp = port->node_table[i];
1216 1218 while (nlp != NULL) {
1217 1219 if (!nlp->nlp_tag) {
1218 1220 nlp = nlp->nlp_list_next;
1219 1221 continue;
1220 1222 }
1221 1223 nlp->nlp_tag = 0;
1222 1224
1223 1225 /*
1224 1226 * Check for special FCP2 target device
1225 1227 * that matches our mask
1226 1228 */
1227 1229 if ((nlp->nlp_fcp_info &
1228 1230 NLP_FCP_TGT_DEVICE) &&
1229 1231 (nlp-> nlp_fcp_info &
1230 1232 NLP_FCP_2_DEVICE) &&
1231 1233 (nlp->nlp_DID & mask) ==
1232 1234 aff_d_id) {
1233 1235 action = 3;
1234 1236 break;
1235 1237 }
1236 1238
1237 1239 /*
1238 1240 * Check for any other device that
1239 1241 * matches our mask
1240 1242 */
1241 1243 else if ((nlp->nlp_DID & mask) ==
1242 1244 aff_d_id) {
1243 1245 if (linkdown) {
1244 1246 action = 1;
1245 1247 break;
1246 1248 } else { /* Must be an RSCN */
1247 1249
1248 1250 action = 2;
1249 1251 break;
1250 1252 }
1251 1253 }
1252 1254
1253 1255 nlp = nlp->nlp_list_next;
1254 1256 }
1255 1257
1256 1258 if (action) {
1257 1259 break;
1258 1260 }
1259 1261 }
1260 1262 rw_exit(&port->node_rwlock);
1261 1263
1262 1264 /* Check if nothing was found */
1263 1265 if (action == 0) {
1264 1266 break;
1265 1267 } else if (action == 1) {
1266 1268 (void) EMLXS_SLI_UNREG_NODE(port, nlp,
1267 1269 NULL, NULL, NULL);
1268 1270 } else if (action == 2) {
1269 1271 EMLXS_SET_DFC_STATE(nlp, NODE_LIMBO);
1270 1272
1271 1273 #ifdef DHCHAP_SUPPORT
1272 1274 emlxs_dhc_auth_stop(port, nlp);
1273 1275 #endif /* DHCHAP_SUPPORT */
1274 1276
1275 1277 /*
1276 1278 * Close the node for any further normal IO
1277 1279 * A PLOGI with reopen the node
1278 1280 */
1279 1281 emlxs_node_close(port, nlp,
1280 1282 hba->channel_fcp, 60);
1281 1283 emlxs_node_close(port, nlp,
1282 1284 hba->channel_ip, 60);
1283 1285
1284 1286 /* Flush tx queue */
1285 1287 (void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1286 1288
1287 1289 /* Flush chip queue */
1288 1290 (void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1289 1291
1290 1292 } else if (action == 3) { /* FCP2 devices */
1291 1293 EMLXS_SET_DFC_STATE(nlp, NODE_LIMBO);
1292 1294
1293 1295 unreg_vpi = 0;
1294 1296
1295 1297 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1296 1298 (void) emlxs_rpi_pause_notify(port,
1297 1299 nlp->rpip);
1298 1300 }
1299 1301
1300 1302 #ifdef DHCHAP_SUPPORT
1301 1303 emlxs_dhc_auth_stop(port, nlp);
1302 1304 #endif /* DHCHAP_SUPPORT */
1303 1305
1304 1306 /*
1305 1307 * Close the node for any further normal IO
1306 1308 * An ADISC or a PLOGI with reopen the node
1307 1309 */
1308 1310 emlxs_node_close(port, nlp,
1309 1311 hba->channel_fcp, -1);
1310 1312 emlxs_node_close(port, nlp, hba->channel_ip,
1311 1313 ((linkdown) ? 0 : 60));
1312 1314
1313 1315 /* Flush tx queues except for FCP ring */
1314 1316 (void) emlxs_tx_node_flush(port, nlp,
1315 1317 &hba->chan[hba->channel_ct], 0, 0);
1316 1318 (void) emlxs_tx_node_flush(port, nlp,
1317 1319 &hba->chan[hba->channel_els], 0, 0);
1318 1320 (void) emlxs_tx_node_flush(port, nlp,
1319 1321 &hba->chan[hba->channel_ip], 0, 0);
1320 1322
1321 1323 /* Flush chip queues except for FCP ring */
1322 1324 (void) emlxs_chipq_node_flush(port,
1323 1325 &hba->chan[hba->channel_ct], nlp, 0);
1324 1326 (void) emlxs_chipq_node_flush(port,
1325 1327 &hba->chan[hba->channel_els], nlp, 0);
1326 1328 (void) emlxs_chipq_node_flush(port,
1327 1329 &hba->chan[hba->channel_ip], nlp, 0);
1328 1330 }
1329 1331 }
1330 1332 break;
1331 1333
1332 1334 case 2: /* Full support - Hold FCP IO to FCP target matching nodes */
1333 1335
1334 1336 if (!linkdown && !vlinkdown) {
1335 1337 break;
1336 1338 }
1337 1339
1338 1340 for (;;) {
1339 1341 /*
1340 1342 * We need to hold the locks this way because
1341 1343 * EMLXS_SLI_UNREG_NODE and the flush routines enter the
1342 1344 * same locks. Also, when we release the lock the list
1343 1345 * can change out from under us.
1344 1346 */
1345 1347 rw_enter(&port->node_rwlock, RW_READER);
1346 1348 action = 0;
1347 1349 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1348 1350 nlp = port->node_table[i];
1349 1351 while (nlp != NULL) {
1350 1352 if (!nlp->nlp_tag) {
1351 1353 nlp = nlp->nlp_list_next;
1352 1354 continue;
1353 1355 }
1354 1356 nlp->nlp_tag = 0;
1355 1357
1356 1358 /*
1357 1359 * Check for FCP target device that
1358 1360 * matches our mask
1359 1361 */
1360 1362 if ((nlp-> nlp_fcp_info &
1361 1363 NLP_FCP_TGT_DEVICE) &&
1362 1364 (nlp->nlp_DID & mask) ==
1363 1365 aff_d_id) {
1364 1366 action = 3;
1365 1367 break;
1366 1368 }
1367 1369
1368 1370 /*
1369 1371 * Check for any other device that
1370 1372 * matches our mask
1371 1373 */
1372 1374 else if ((nlp->nlp_DID & mask) ==
1373 1375 aff_d_id) {
1374 1376 if (linkdown) {
1375 1377 action = 1;
1376 1378 break;
1377 1379 } else { /* Must be an RSCN */
1378 1380
1379 1381 action = 2;
1380 1382 break;
1381 1383 }
1382 1384 }
1383 1385
1384 1386 nlp = nlp->nlp_list_next;
1385 1387 }
1386 1388 if (action) {
1387 1389 break;
1388 1390 }
1389 1391 }
1390 1392 rw_exit(&port->node_rwlock);
1391 1393
1392 1394 /* Check if nothing was found */
1393 1395 if (action == 0) {
1394 1396 break;
1395 1397 } else if (action == 1) {
1396 1398 (void) EMLXS_SLI_UNREG_NODE(port, nlp,
1397 1399 NULL, NULL, NULL);
1398 1400 } else if (action == 2) {
1399 1401 EMLXS_SET_DFC_STATE(nlp, NODE_LIMBO);
1400 1402
1401 1403 /*
1402 1404 * Close the node for any further normal IO
1403 1405 * A PLOGI with reopen the node
1404 1406 */
1405 1407 emlxs_node_close(port, nlp,
1406 1408 hba->channel_fcp, 60);
1407 1409 emlxs_node_close(port, nlp,
1408 1410 hba->channel_ip, 60);
1409 1411
1410 1412 /* Flush tx queue */
1411 1413 (void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1412 1414
1413 1415 /* Flush chip queue */
1414 1416 (void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1415 1417
1416 1418 } else if (action == 3) { /* FCP2 devices */
1417 1419 EMLXS_SET_DFC_STATE(nlp, NODE_LIMBO);
1418 1420
1419 1421 unreg_vpi = 0;
1420 1422
1421 1423 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1422 1424 (void) emlxs_rpi_pause_notify(port,
1423 1425 nlp->rpip);
1424 1426 }
1425 1427
1426 1428 /*
1427 1429 * Close the node for any further normal IO
1428 1430 * An ADISC or a PLOGI with reopen the node
1429 1431 */
1430 1432 emlxs_node_close(port, nlp,
1431 1433 hba->channel_fcp, -1);
1432 1434 emlxs_node_close(port, nlp, hba->channel_ip,
1433 1435 ((linkdown) ? 0 : 60));
1434 1436
1435 1437 /* Flush tx queues except for FCP ring */
1436 1438 (void) emlxs_tx_node_flush(port, nlp,
1437 1439 &hba->chan[hba->channel_ct], 0, 0);
1438 1440 (void) emlxs_tx_node_flush(port, nlp,
1439 1441 &hba->chan[hba->channel_els], 0, 0);
1440 1442 (void) emlxs_tx_node_flush(port, nlp,
1441 1443 &hba->chan[hba->channel_ip], 0, 0);
1442 1444
1443 1445 /* Flush chip queues except for FCP ring */
1444 1446 (void) emlxs_chipq_node_flush(port,
1445 1447 &hba->chan[hba->channel_ct], nlp, 0);
1446 1448 (void) emlxs_chipq_node_flush(port,
1447 1449 &hba->chan[hba->channel_els], nlp, 0);
1448 1450 (void) emlxs_chipq_node_flush(port,
1449 1451 &hba->chan[hba->channel_ip], nlp, 0);
1450 1452 }
1451 1453 }
1452 1454
1453 1455 break;
1454 1456
1455 1457 } /* switch() */
1456 1458
1457 1459 done:
1458 1460
1459 1461 if (unreg_vpi) {
1460 1462 (void) emlxs_mb_unreg_vpi(port);
1461 1463 }
1462 1464
1463 1465 return (0);
1464 1466
1465 1467 } /* emlxs_port_offline() */
1466 1468
1467 1469
1468 1470 extern void
1469 1471 emlxs_port_online(emlxs_port_t *vport)
1470 1472 {
1471 1473 emlxs_hba_t *hba = vport->hba;
1472 1474 emlxs_port_t *port = &PPORT;
1473 1475 NODELIST *nlp;
1474 1476 uint32_t state;
1475 1477 uint32_t update;
1476 1478 uint32_t npiv_linkup;
1477 1479 char topology[32];
1478 1480 char linkspeed[32];
1479 1481 char mode[32];
1480 1482
1481 1483 /*
1482 1484 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1483 1485 * "linkup_callback. vpi=%d fc_flag=%x", vport->vpi, hba->flag);
1484 1486 */
1485 1487
1486 1488 if ((vport->vpi > 0) &&
1487 1489 (!(hba->flag & FC_NPIV_ENABLED) ||
1488 1490 !(hba->flag & FC_NPIV_SUPPORTED))) {
1489 1491 return;
1490 1492 }
1491 1493
1492 1494 if (!(vport->flag & EMLXS_PORT_BOUND) ||
1493 1495 !(vport->flag & EMLXS_PORT_ENABLED)) {
1494 1496 return;
1495 1497 }
1496 1498
1497 1499 /* Check for mode */
1498 1500 if (port->mode == MODE_TARGET) {
1499 1501 (void) strlcpy(mode, ", target", sizeof (mode));
1500 1502
1501 1503 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1502 1504 /* Set the node tags */
1503 1505 emlxs_fcp_tag_nodes(vport);
1504 1506 while ((nlp = emlxs_find_tagged_node(vport))) {
1505 1507 /* The RPI was paused in port_offline */
1506 1508 (void) emlxs_rpi_resume_notify(vport,
1507 1509 nlp->rpip, 0);
1508 1510 }
1509 1511 }
1510 1512 } else if (port->mode == MODE_INITIATOR) {
1511 1513 (void) strlcpy(mode, ", initiator", sizeof (mode));
1512 1514 } else {
1513 1515 (void) strlcpy(mode, "unknown", sizeof (mode));
1514 1516 }
1515 1517 mutex_enter(&EMLXS_PORT_LOCK);
1516 1518
1517 1519 /* Check for loop topology */
1518 1520 if (hba->topology == TOPOLOGY_LOOP) {
1519 1521 state = FC_STATE_LOOP;
1520 1522 (void) strlcpy(topology, ", loop", sizeof (topology));
1521 1523 } else {
1522 1524 state = FC_STATE_ONLINE;
1523 1525 (void) strlcpy(topology, ", fabric", sizeof (topology));
1524 1526 }
1525 1527
1526 1528 /* Set the link speed */
1527 1529 switch (hba->linkspeed) {
1528 1530 case 0:
1529 1531 (void) strlcpy(linkspeed, "Gb", sizeof (linkspeed));
1530 1532 state |= FC_STATE_1GBIT_SPEED;
1531 1533 break;
1532 1534
1533 1535 case LA_1GHZ_LINK:
1534 1536 (void) strlcpy(linkspeed, "1Gb", sizeof (linkspeed));
1535 1537 state |= FC_STATE_1GBIT_SPEED;
1536 1538 break;
1537 1539 case LA_2GHZ_LINK:
1538 1540 (void) strlcpy(linkspeed, "2Gb", sizeof (linkspeed));
1539 1541 state |= FC_STATE_2GBIT_SPEED;
1540 1542 break;
1541 1543 case LA_4GHZ_LINK:
1542 1544 (void) strlcpy(linkspeed, "4Gb", sizeof (linkspeed));
1543 1545 state |= FC_STATE_4GBIT_SPEED;
1544 1546 break;
1545 1547 case LA_8GHZ_LINK:
1546 1548 (void) strlcpy(linkspeed, "8Gb", sizeof (linkspeed));
|
↓ open down ↓ |
556 lines elided |
↑ open up ↑ |
1547 1549 state |= FC_STATE_8GBIT_SPEED;
1548 1550 break;
1549 1551 case LA_10GHZ_LINK:
1550 1552 (void) strlcpy(linkspeed, "10Gb", sizeof (linkspeed));
1551 1553 state |= FC_STATE_10GBIT_SPEED;
1552 1554 break;
1553 1555 case LA_16GHZ_LINK:
1554 1556 (void) strlcpy(linkspeed, "16Gb", sizeof (linkspeed));
1555 1557 state |= FC_STATE_16GBIT_SPEED;
1556 1558 break;
1559 + case LA_32GHZ_LINK:
1560 + (void) strlcpy(linkspeed, "32Gb", sizeof (linkspeed));
1561 + state |= FC_STATE_32GBIT_SPEED;
1562 + break;
1557 1563 default:
1558 1564 (void) snprintf(linkspeed, sizeof (linkspeed), "unknown(0x%x)",
1559 1565 hba->linkspeed);
1560 1566 break;
1561 1567 }
1562 1568
1563 1569 npiv_linkup = 0;
1564 1570 update = 0;
1565 1571
1566 1572 if ((hba->state >= FC_LINK_UP) &&
1567 1573 !(hba->flag & FC_LOOPBACK_MODE) && (vport->ulp_statec != state)) {
1568 1574 update = 1;
1569 1575 vport->ulp_statec = state;
1570 1576
1571 1577 if ((vport->vpi > 0) && !(hba->flag & FC_NPIV_LINKUP)) {
1572 1578 hba->flag |= FC_NPIV_LINKUP;
1573 1579 npiv_linkup = 1;
1574 1580 }
1575 1581 }
1576 1582
1577 1583 mutex_exit(&EMLXS_PORT_LOCK);
1578 1584
1579 1585 if (update) {
1580 1586 if (vport->flag & EMLXS_PORT_BOUND) {
1581 1587 if (vport->vpi == 0) {
1582 1588 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1583 1589 "%s%s%s", linkspeed, topology, mode);
1584 1590
1585 1591 } else if (npiv_linkup) {
1586 1592 EMLXS_MSGF(EMLXS_CONTEXT,
1587 1593 &emlxs_npiv_link_up_msg, "%s%s%s",
1588 1594 linkspeed, topology, mode);
1589 1595 }
1590 1596
1591 1597 if (vport->mode == MODE_INITIATOR) {
1592 1598 emlxs_fca_link_up(vport);
1593 1599 }
1594 1600 #ifdef SFCT_SUPPORT
1595 1601 else if (vport->mode == MODE_TARGET) {
1596 1602 emlxs_fct_link_up(vport);
1597 1603 }
1598 1604 #endif /* SFCT_SUPPORT */
1599 1605 } else {
1600 1606 if (vport->vpi == 0) {
1601 1607 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1602 1608 "%s%s%s *", linkspeed, topology, mode);
1603 1609
1604 1610 } else if (npiv_linkup) {
1605 1611 EMLXS_MSGF(EMLXS_CONTEXT,
1606 1612 &emlxs_npiv_link_up_msg, "%s%s%s *",
1607 1613 linkspeed, topology, mode);
1608 1614 }
1609 1615 }
1610 1616
1611 1617 /* Check for waiting threads */
1612 1618 if (vport->vpi == 0) {
1613 1619 mutex_enter(&EMLXS_LINKUP_LOCK);
1614 1620 if (hba->linkup_wait_flag == TRUE) {
|
↓ open down ↓ |
48 lines elided |
↑ open up ↑ |
1615 1621 hba->linkup_wait_flag = FALSE;
1616 1622 cv_broadcast(&EMLXS_LINKUP_CV);
1617 1623 }
1618 1624 mutex_exit(&EMLXS_LINKUP_LOCK);
1619 1625 }
1620 1626
1621 1627 /* Flush any pending ub buffers */
1622 1628 emlxs_ub_flush(vport);
1623 1629 }
1624 1630
1631 + hba->flag |= FC_GPIO_LINK_UP;
1632 +
1625 1633 return;
1626 1634
1627 1635 } /* emlxs_port_online() */
1628 1636
1629 1637
1630 1638 /* SLI3 */
1631 1639 extern void
1632 1640 emlxs_linkdown(emlxs_hba_t *hba)
1633 1641 {
1634 1642 emlxs_port_t *port = &PPORT;
1635 1643 int i;
1636 1644 uint32_t scope;
1637 1645
1638 1646 mutex_enter(&EMLXS_PORT_LOCK);
1639 1647
1640 1648 if (hba->state > FC_LINK_DOWN) {
1641 1649 HBASTATS.LinkDown++;
1642 1650 EMLXS_STATE_CHANGE_LOCKED(hba, FC_LINK_DOWN);
1643 1651 }
1644 1652
1645 1653 /* Set scope */
1646 1654 scope = (hba->flag & FC_NEW_FABRIC)? 0xFDFFFFFF:0xFFFFFFFF;
1647 1655
1648 1656 /* Filter hba flags */
1649 1657 hba->flag &= FC_LINKDOWN_MASK;
1650 1658 hba->discovery_timer = 0;
1651 1659 hba->linkup_timer = 0;
1652 1660
1653 1661 mutex_exit(&EMLXS_PORT_LOCK);
1654 1662
1655 1663 for (i = 0; i < MAX_VPORTS; i++) {
1656 1664 port = &VPORT(i);
1657 1665
1658 1666 if (!(port->flag & EMLXS_PORT_BOUND)) {
1659 1667 continue;
1660 1668 }
1661 1669
1662 1670 (void) emlxs_port_offline(port, scope);
1663 1671
1664 1672 }
1665 1673
1666 1674 emlxs_log_link_event(port);
1667 1675
1668 1676 return;
1669 1677
1670 1678 } /* emlxs_linkdown() */
1671 1679
1672 1680
1673 1681 /* SLI3 */
1674 1682 extern void
1675 1683 emlxs_linkup(emlxs_hba_t *hba)
1676 1684 {
1677 1685 emlxs_port_t *port = &PPORT;
1678 1686 emlxs_config_t *cfg = &CFG;
1679 1687
1680 1688 mutex_enter(&EMLXS_PORT_LOCK);
1681 1689
1682 1690 /* Check for any mode changes */
1683 1691 emlxs_mode_set(hba);
1684 1692
1685 1693 HBASTATS.LinkUp++;
1686 1694 EMLXS_STATE_CHANGE_LOCKED(hba, FC_LINK_UP);
1687 1695
1688 1696 #ifdef MENLO_SUPPORT
1689 1697 if (hba->flag & FC_MENLO_MODE) {
1690 1698 mutex_exit(&EMLXS_PORT_LOCK);
1691 1699
1692 1700 /*
1693 1701 * Trigger linkup CV and don't start linkup & discovery
1694 1702 * timers
1695 1703 */
1696 1704 mutex_enter(&EMLXS_LINKUP_LOCK);
1697 1705 cv_broadcast(&EMLXS_LINKUP_CV);
1698 1706 mutex_exit(&EMLXS_LINKUP_LOCK);
1699 1707
1700 1708 emlxs_log_link_event(port);
1701 1709
1702 1710 return;
1703 1711 }
1704 1712 #endif /* MENLO_SUPPORT */
1705 1713
1706 1714 /* Set the linkup & discovery timers */
1707 1715 hba->linkup_timer = hba->timer_tics + cfg[CFG_LINKUP_TIMEOUT].current;
1708 1716 hba->discovery_timer =
1709 1717 hba->timer_tics + cfg[CFG_LINKUP_TIMEOUT].current +
1710 1718 cfg[CFG_DISC_TIMEOUT].current;
1711 1719
1712 1720 mutex_exit(&EMLXS_PORT_LOCK);
1713 1721
1714 1722 emlxs_log_link_event(port);
1715 1723
1716 1724 return;
1717 1725
1718 1726 } /* emlxs_linkup() */
1719 1727
1720 1728
1721 1729 /*
1722 1730 * emlxs_reset_link
1723 1731 *
1724 1732 * Description:
1725 1733 * Called to reset the link with an init_link
1726 1734 *
1727 1735 * Returns:
1728 1736 *
1729 1737 */
1730 1738 extern int
1731 1739 emlxs_reset_link(emlxs_hba_t *hba, uint32_t linkup, uint32_t wait)
1732 1740 {
1733 1741 emlxs_port_t *port = &PPORT;
1734 1742 emlxs_config_t *cfg;
1735 1743 MAILBOXQ *mbq = NULL;
1736 1744 MAILBOX *mb = NULL;
1737 1745 int rval = 0;
1738 1746 int tmo;
1739 1747 int rc;
1740 1748
1741 1749 /*
1742 1750 * Get a buffer to use for the mailbox command
1743 1751 */
1744 1752 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))
1745 1753 == NULL) {
1746 1754 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_failed_msg,
1747 1755 "Unable to allocate mailbox buffer.");
1748 1756 rval = 1;
1749 1757 goto reset_link_fail;
1750 1758 }
1751 1759
1752 1760 if (linkup) {
1753 1761 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg,
1754 1762 "Resetting link...");
1755 1763 } else {
1756 1764 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg,
1757 1765 "Disabling link...");
1758 1766 }
1759 1767
1760 1768 mb = (MAILBOX *)mbq;
1761 1769
1762 1770 /* Bring link down first */
1763 1771 emlxs_mb_down_link(hba, mbq);
1764 1772
1765 1773 #define MBXERR_LINK_DOWN 0x33
1766 1774
1767 1775 if (wait) {
1768 1776 wait = MBX_WAIT;
1769 1777 } else {
1770 1778 wait = MBX_NOWAIT;
1771 1779 }
1772 1780 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, wait, 0);
1773 1781 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS) &&
1774 1782 (rc != MBXERR_LINK_DOWN)) {
1775 1783 rval = 1;
1776 1784 goto reset_link_fail;
1777 1785 }
1778 1786
1779 1787 tmo = 120;
1780 1788 do {
1781 1789 delay(drv_usectohz(500000));
1782 1790 tmo--;
1783 1791
1784 1792 if (!tmo) {
1785 1793 rval = 1;
1786 1794
1787 1795 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg,
1788 1796 "Linkdown timeout.");
1789 1797
1790 1798 goto reset_link_fail;
1791 1799 }
1792 1800 } while ((hba->state >= FC_LINK_UP) && (hba->state != FC_ERROR));
1793 1801
1794 1802 if (linkup) {
1795 1803 /*
1796 1804 * Setup and issue mailbox INITIALIZE LINK command
1797 1805 */
1798 1806
1799 1807 if (wait == MBX_NOWAIT) {
1800 1808 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))
1801 1809 == NULL) {
1802 1810 EMLXS_MSGF(EMLXS_CONTEXT,
1803 1811 &emlxs_link_reset_failed_msg,
1804 1812 "Unable to allocate mailbox buffer.");
1805 1813 rval = 1;
1806 1814 goto reset_link_fail;
1807 1815 }
1808 1816 mb = (MAILBOX *)mbq;
1809 1817 } else {
1810 1818 /* Reuse mbq from previous mbox */
1811 1819 mb = (MAILBOX *)mbq;
1812 1820 }
1813 1821 cfg = &CFG;
1814 1822
1815 1823 emlxs_mb_init_link(hba, mbq,
1816 1824 cfg[CFG_TOPOLOGY].current, cfg[CFG_LINK_SPEED].current);
1817 1825
1818 1826 mb->un.varInitLnk.lipsr_AL_PA = 0;
1819 1827
1820 1828 /* Clear the loopback mode */
1821 1829 mutex_enter(&EMLXS_PORT_LOCK);
1822 1830 hba->flag &= ~FC_LOOPBACK_MODE;
1823 1831 hba->loopback_tics = 0;
1824 1832 mutex_exit(&EMLXS_PORT_LOCK);
1825 1833
1826 1834 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, wait, 0);
1827 1835 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
1828 1836 rval = 1;
1829 1837 goto reset_link_fail;
1830 1838 }
1831 1839
1832 1840 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg, NULL);
1833 1841 }
1834 1842
1835 1843 reset_link_fail:
1836 1844
1837 1845 if ((wait == MBX_WAIT) && mbq) {
1838 1846 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
1839 1847 }
1840 1848
1841 1849 return (rval);
1842 1850 } /* emlxs_reset_link() */
1843 1851
1844 1852
1845 1853 extern int
1846 1854 emlxs_online(emlxs_hba_t *hba)
1847 1855 {
1848 1856 emlxs_port_t *port = &PPORT;
1849 1857 int32_t rval = 0;
1850 1858 uint32_t i = 0;
1851 1859
1852 1860 /* Make sure adapter is offline or exit trying (30 seconds) */
1853 1861 while (i++ < 30) {
1854 1862 /* Check if adapter is already going online */
1855 1863 if (hba->flag & (FC_ONLINE_MODE | FC_ONLINING_MODE)) {
1856 1864 return (0);
1857 1865 }
1858 1866
1859 1867 mutex_enter(&EMLXS_PORT_LOCK);
1860 1868
1861 1869 /* Check again */
1862 1870 if (hba->flag & (FC_ONLINE_MODE | FC_ONLINING_MODE)) {
1863 1871 mutex_exit(&EMLXS_PORT_LOCK);
1864 1872 return (0);
1865 1873 }
1866 1874
1867 1875 /* Check if adapter is offline */
1868 1876 if (hba->flag & FC_OFFLINE_MODE) {
1869 1877 /* Mark it going online */
1870 1878 hba->flag &= ~FC_OFFLINE_MODE;
1871 1879 hba->flag |= FC_ONLINING_MODE;
1872 1880
1873 1881 /* Currently !FC_ONLINE_MODE and !FC_OFFLINE_MODE */
1874 1882 mutex_exit(&EMLXS_PORT_LOCK);
1875 1883 break;
1876 1884 }
1877 1885
1878 1886 mutex_exit(&EMLXS_PORT_LOCK);
1879 1887
1880 1888 BUSYWAIT_MS(1000);
1881 1889 }
1882 1890
1883 1891 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1884 1892 "Going online...");
1885 1893
1886 1894 if (rval = EMLXS_SLI_ONLINE(hba)) {
1887 1895 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, "status=%x",
1888 1896 rval);
1889 1897 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_offline_msg, NULL);
1890 1898
1891 1899 /* Set FC_OFFLINE_MODE */
1892 1900 mutex_enter(&EMLXS_PORT_LOCK);
1893 1901 hba->flag |= FC_OFFLINE_MODE;
1894 1902 hba->flag &= ~FC_ONLINING_MODE;
1895 1903 mutex_exit(&EMLXS_PORT_LOCK);
1896 1904
1897 1905 return (rval);
1898 1906 }
1899 1907
1900 1908 /* Start the timer */
1901 1909 emlxs_timer_start(hba);
1902 1910
1903 1911 /* Set FC_ONLINE_MODE */
1904 1912 mutex_enter(&EMLXS_PORT_LOCK);
1905 1913 hba->flag |= FC_ONLINE_MODE;
1906 1914 hba->flag &= ~FC_ONLINING_MODE;
1907 1915 mutex_exit(&EMLXS_PORT_LOCK);
1908 1916
1909 1917 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_online_msg, NULL);
1910 1918
1911 1919 #ifdef SFCT_SUPPORT
1912 1920 if (port->flag & EMLXS_TGT_ENABLED) {
1913 1921 (void) emlxs_fct_port_initialize(port);
1914 1922 }
1915 1923 #endif /* SFCT_SUPPORT */
1916 1924
1917 1925 return (rval);
1918 1926
1919 1927 } /* emlxs_online() */
1920 1928
1921 1929
1922 1930 extern int
1923 1931 emlxs_offline(emlxs_hba_t *hba, uint32_t reset_requested)
1924 1932 {
1925 1933 emlxs_port_t *port = &PPORT;
1926 1934 uint32_t i = 0;
1927 1935 int rval = 1;
1928 1936
1929 1937 /* Make sure adapter is online or exit trying (30 seconds) */
1930 1938 while (i++ < 30) {
1931 1939 /* Check if adapter is already going offline */
1932 1940 if (hba->flag & (FC_OFFLINE_MODE | FC_OFFLINING_MODE)) {
1933 1941 return (0);
1934 1942 }
1935 1943
1936 1944 mutex_enter(&EMLXS_PORT_LOCK);
1937 1945
1938 1946 /* Check again */
1939 1947 if (hba->flag & (FC_OFFLINE_MODE | FC_OFFLINING_MODE)) {
1940 1948 mutex_exit(&EMLXS_PORT_LOCK);
1941 1949 return (0);
1942 1950 }
1943 1951
1944 1952 /* Check if adapter is online */
1945 1953 if (hba->flag & FC_ONLINE_MODE) {
1946 1954 /* Mark it going offline */
1947 1955 hba->flag &= ~FC_ONLINE_MODE;
1948 1956 hba->flag |= FC_OFFLINING_MODE;
1949 1957
1950 1958 /* Currently !FC_ONLINE_MODE and !FC_OFFLINE_MODE */
1951 1959 mutex_exit(&EMLXS_PORT_LOCK);
1952 1960 break;
1953 1961 }
1954 1962
1955 1963 mutex_exit(&EMLXS_PORT_LOCK);
1956 1964
1957 1965 BUSYWAIT_MS(1000);
1958 1966 }
1959 1967
1960 1968 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1961 1969 "Going offline...");
1962 1970
1963 1971 /* Declare link down */
1964 1972 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1965 1973 (void) emlxs_fcf_shutdown_notify(port, 1);
1966 1974 } else {
1967 1975 emlxs_linkdown(hba);
1968 1976 }
1969 1977
1970 1978 #ifdef SFCT_SUPPORT
1971 1979 if (port->flag & EMLXS_TGT_ENABLED) {
1972 1980 (void) emlxs_fct_port_shutdown(port);
1973 1981 }
1974 1982 #endif /* SFCT_SUPPORT */
1975 1983
1976 1984 /* Check if adapter was shutdown */
1977 1985 if (hba->flag & FC_HARDWARE_ERROR) {
1978 1986 /*
1979 1987 * Force mailbox cleanup
1980 1988 * This will wake any sleeping or polling threads
1981 1989 */
1982 1990 emlxs_mb_fini(hba, NULL, MBX_HARDWARE_ERROR);
1983 1991 }
1984 1992
1985 1993 /* Pause here for the IO to settle */
1986 1994 delay(drv_usectohz(1000000)); /* 1 sec */
1987 1995
1988 1996 /* Unregister all nodes */
1989 1997 emlxs_ffcleanup(hba);
1990 1998
1991 1999 if (hba->bus_type == SBUS_FC) {
1992 2000 WRITE_SBUS_CSR_REG(hba, FC_SHS_REG(hba), 0x9A);
1993 2001 #ifdef FMA_SUPPORT
1994 2002 /* Access handle validation */
1995 2003 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.sbus_csr_handle);
1996 2004 #endif /* FMA_SUPPORT */
1997 2005 }
1998 2006
1999 2007 /* Stop the timer */
2000 2008 emlxs_timer_stop(hba);
2001 2009
2002 2010 /* For safety flush every iotag list */
2003 2011 if (emlxs_iotag_flush(hba)) {
2004 2012 /* Pause here for the IO to flush */
2005 2013 delay(drv_usectohz(1000));
2006 2014 }
2007 2015
2008 2016 /* Wait for poll command request to settle */
2009 2017 while (hba->io_poll_count > 0) {
2010 2018 delay(drv_usectohz(2000000)); /* 2 sec */
2011 2019 }
2012 2020
2013 2021 /* Shutdown the adapter interface */
2014 2022 EMLXS_SLI_OFFLINE(hba, reset_requested);
2015 2023
2016 2024 mutex_enter(&EMLXS_PORT_LOCK);
2017 2025 hba->flag |= FC_OFFLINE_MODE;
2018 2026 hba->flag &= ~FC_OFFLINING_MODE;
2019 2027 mutex_exit(&EMLXS_PORT_LOCK);
2020 2028
2021 2029 rval = 0;
2022 2030
2023 2031 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_offline_msg, NULL);
2024 2032
2025 2033 done:
2026 2034
2027 2035 return (rval);
2028 2036
2029 2037 } /* emlxs_offline() */
2030 2038
2031 2039
2032 2040
2033 2041 extern int
2034 2042 emlxs_power_down(emlxs_hba_t *hba)
2035 2043 {
2036 2044 #ifdef FMA_SUPPORT
2037 2045 emlxs_port_t *port = &PPORT;
2038 2046 #endif /* FMA_SUPPORT */
2039 2047 int32_t rval = 0;
2040 2048
2041 2049 if ((rval = emlxs_offline(hba, 0))) {
2042 2050 return (rval);
2043 2051 }
2044 2052 EMLXS_SLI_HBA_RESET(hba, 1, 1, 0);
2045 2053
2046 2054
2047 2055 #ifdef FMA_SUPPORT
2048 2056 if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
2049 2057 != DDI_FM_OK) {
2050 2058 EMLXS_MSGF(EMLXS_CONTEXT,
2051 2059 &emlxs_invalid_access_handle_msg, NULL);
2052 2060 return (1);
2053 2061 }
2054 2062 #endif /* FMA_SUPPORT */
2055 2063
2056 2064 return (0);
2057 2065
2058 2066 } /* End emlxs_power_down */
2059 2067
2060 2068
2061 2069 extern int
2062 2070 emlxs_power_up(emlxs_hba_t *hba)
2063 2071 {
2064 2072 #ifdef FMA_SUPPORT
2065 2073 emlxs_port_t *port = &PPORT;
2066 2074 #endif /* FMA_SUPPORT */
2067 2075 int32_t rval = 0;
2068 2076
2069 2077
2070 2078 #ifdef FMA_SUPPORT
2071 2079 if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
2072 2080 != DDI_FM_OK) {
2073 2081 EMLXS_MSGF(EMLXS_CONTEXT,
2074 2082 &emlxs_invalid_access_handle_msg, NULL);
2075 2083 return (1);
2076 2084 }
2077 2085 #endif /* FMA_SUPPORT */
2078 2086
2079 2087 /* Bring adapter online */
2080 2088 if ((rval = emlxs_online(hba))) {
2081 2089 if (hba->pci_cap_offset[PCI_CAP_ID_PM]) {
2082 2090 /* Put chip in D3 state */
2083 2091 (void) ddi_put8(hba->pci_acc_handle,
2084 2092 (uint8_t *)(hba->pci_addr +
2085 2093 hba->pci_cap_offset[PCI_CAP_ID_PM] +
2086 2094 PCI_PMCSR),
2087 2095 (uint8_t)PCI_PMCSR_D3HOT);
2088 2096 }
2089 2097 return (rval);
2090 2098 }
2091 2099
2092 2100 return (rval);
2093 2101
2094 2102 } /* emlxs_power_up() */
2095 2103
2096 2104
2097 2105 /*
2098 2106 *
2099 2107 * NAME: emlxs_ffcleanup
2100 2108 *
2101 2109 * FUNCTION: Cleanup all the Firefly resources used by configuring the adapter
2102 2110 *
2103 2111 * EXECUTION ENVIRONMENT: process only
2104 2112 *
2105 2113 * CALLED FROM: CFG_TERM
2106 2114 *
2107 2115 * INPUT: hba - pointer to the dev_ctl area.
2108 2116 *
2109 2117 * RETURNS: none
2110 2118 */
2111 2119 extern void
2112 2120 emlxs_ffcleanup(emlxs_hba_t *hba)
2113 2121 {
2114 2122 emlxs_port_t *port = &PPORT;
2115 2123 uint32_t i;
2116 2124
2117 2125 /* Disable all but the mailbox interrupt */
2118 2126 EMLXS_SLI_DISABLE_INTR(hba, HC_MBINT_ENA);
2119 2127
2120 2128 /* Make sure all port nodes are destroyed */
2121 2129 for (i = 0; i < MAX_VPORTS; i++) {
2122 2130 port = &VPORT(i);
2123 2131
2124 2132 if (port->node_count) {
2125 2133 (void) EMLXS_SLI_UNREG_NODE(port, 0, 0, 0, 0);
2126 2134 }
2127 2135 }
2128 2136
2129 2137 /* Clear all interrupt enable conditions */
2130 2138 EMLXS_SLI_DISABLE_INTR(hba, 0);
2131 2139
2132 2140 return;
2133 2141
2134 2142 } /* emlxs_ffcleanup() */
2135 2143
2136 2144
2137 2145 extern uint16_t
2138 2146 emlxs_register_pkt(CHANNEL *cp, emlxs_buf_t *sbp)
2139 2147 {
2140 2148 emlxs_hba_t *hba;
2141 2149 emlxs_port_t *port;
2142 2150 uint16_t iotag;
2143 2151 uint32_t i;
2144 2152
2145 2153 hba = cp->hba;
2146 2154
2147 2155 mutex_enter(&EMLXS_FCTAB_LOCK);
2148 2156
2149 2157 if (sbp->iotag != 0) {
2150 2158 port = &PPORT;
2151 2159
2152 2160 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2153 2161 "Pkt already registered! channel=%d iotag=%d sbp=%p",
2154 2162 sbp->channel, sbp->iotag, sbp);
2155 2163 }
2156 2164
2157 2165 iotag = 0;
2158 2166 for (i = 0; i < hba->max_iotag; i++) {
2159 2167 if (!hba->fc_iotag || hba->fc_iotag >= hba->max_iotag) {
2160 2168 hba->fc_iotag = 1;
2161 2169 }
2162 2170 iotag = hba->fc_iotag++;
2163 2171
2164 2172 if (hba->fc_table[iotag] == 0 ||
2165 2173 hba->fc_table[iotag] == STALE_PACKET) {
2166 2174 hba->io_count++;
2167 2175 hba->fc_table[iotag] = sbp;
2168 2176
2169 2177 sbp->iotag = iotag;
2170 2178 sbp->channel = cp;
2171 2179
2172 2180 break;
2173 2181 }
2174 2182 iotag = 0;
2175 2183 }
2176 2184
2177 2185 mutex_exit(&EMLXS_FCTAB_LOCK);
2178 2186
2179 2187 /*
2180 2188 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2181 2189 * "register_pkt: channel=%d iotag=%d sbp=%p",
2182 2190 * cp->channelno, iotag, sbp);
2183 2191 */
2184 2192
2185 2193 return (iotag);
2186 2194
2187 2195 } /* emlxs_register_pkt() */
2188 2196
2189 2197
2190 2198
2191 2199 extern emlxs_buf_t *
2192 2200 emlxs_unregister_pkt(CHANNEL *cp, uint16_t iotag, uint32_t forced)
2193 2201 {
2194 2202 emlxs_hba_t *hba;
2195 2203 emlxs_buf_t *sbp;
2196 2204
2197 2205 sbp = NULL;
2198 2206 hba = cp->hba;
2199 2207
2200 2208 /* Check the iotag range */
2201 2209 if ((iotag == 0) || (iotag >= hba->max_iotag)) {
2202 2210 return (NULL);
2203 2211 }
2204 2212
2205 2213 /* Remove the sbp from the table */
2206 2214 mutex_enter(&EMLXS_FCTAB_LOCK);
2207 2215 sbp = hba->fc_table[iotag];
2208 2216
2209 2217 if (!sbp || (sbp == STALE_PACKET)) {
2210 2218 mutex_exit(&EMLXS_FCTAB_LOCK);
2211 2219 return (sbp);
2212 2220 }
2213 2221
2214 2222 hba->fc_table[iotag] = ((forced) ? STALE_PACKET : NULL);
2215 2223 hba->io_count--;
2216 2224 sbp->iotag = 0;
2217 2225
2218 2226 mutex_exit(&EMLXS_FCTAB_LOCK);
2219 2227
2220 2228
2221 2229 /* Clean up the sbp */
2222 2230 mutex_enter(&sbp->mtx);
2223 2231
2224 2232 if (sbp->pkt_flags & PACKET_IN_TXQ) {
2225 2233 sbp->pkt_flags &= ~PACKET_IN_TXQ;
2226 2234 hba->channel_tx_count--;
2227 2235 }
2228 2236
2229 2237 if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
2230 2238 sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
2231 2239 }
2232 2240
2233 2241 if (sbp->bmp) {
2234 2242 emlxs_mem_put(hba, MEM_BPL, (void *)sbp->bmp);
2235 2243 sbp->bmp = 0;
2236 2244 }
2237 2245
2238 2246 mutex_exit(&sbp->mtx);
2239 2247
2240 2248 return (sbp);
2241 2249
2242 2250 } /* emlxs_unregister_pkt() */
2243 2251
2244 2252
2245 2253
2246 2254 /* Flush all IO's to all nodes for a given IO Channel */
2247 2255 extern uint32_t
2248 2256 emlxs_tx_channel_flush(emlxs_hba_t *hba, CHANNEL *cp, emlxs_buf_t *fpkt)
2249 2257 {
2250 2258 emlxs_port_t *port = &PPORT;
2251 2259 emlxs_buf_t *sbp;
2252 2260 IOCBQ *iocbq;
2253 2261 IOCBQ *next;
2254 2262 IOCB *iocb;
2255 2263 uint32_t channelno;
2256 2264 Q abort;
2257 2265 NODELIST *ndlp;
2258 2266 IOCB *icmd;
2259 2267 MATCHMAP *mp;
2260 2268 uint32_t i;
2261 2269 uint8_t flag[MAX_CHANNEL];
2262 2270
2263 2271 channelno = cp->channelno;
2264 2272 bzero((void *)&abort, sizeof (Q));
2265 2273 bzero((void *)flag, MAX_CHANNEL * sizeof (uint8_t));
2266 2274
2267 2275 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2268 2276
2269 2277 /* While a node needs servicing */
2270 2278 while (cp->nodeq.q_first) {
2271 2279 ndlp = (NODELIST *) cp->nodeq.q_first;
2272 2280
2273 2281 /* Check if priority queue is not empty */
2274 2282 if (ndlp->nlp_ptx[channelno].q_first) {
2275 2283 /* Transfer all iocb's to local queue */
2276 2284 if (abort.q_first == 0) {
2277 2285 abort.q_first =
2278 2286 ndlp->nlp_ptx[channelno].q_first;
2279 2287 } else {
2280 2288 ((IOCBQ *)abort.q_last)->next =
2281 2289 (IOCBQ *)ndlp->nlp_ptx[channelno].q_first;
2282 2290 }
2283 2291 flag[channelno] = 1;
2284 2292
2285 2293 abort.q_last = ndlp->nlp_ptx[channelno].q_last;
2286 2294 abort.q_cnt += ndlp->nlp_ptx[channelno].q_cnt;
2287 2295 }
2288 2296
2289 2297 /* Check if tx queue is not empty */
2290 2298 if (ndlp->nlp_tx[channelno].q_first) {
2291 2299 /* Transfer all iocb's to local queue */
2292 2300 if (abort.q_first == 0) {
2293 2301 abort.q_first = ndlp->nlp_tx[channelno].q_first;
2294 2302 } else {
2295 2303 ((IOCBQ *)abort.q_last)->next =
2296 2304 (IOCBQ *)ndlp->nlp_tx[channelno].q_first;
2297 2305 }
2298 2306
2299 2307 abort.q_last = ndlp->nlp_tx[channelno].q_last;
2300 2308 abort.q_cnt += ndlp->nlp_tx[channelno].q_cnt;
2301 2309 }
2302 2310
2303 2311 /* Clear the queue pointers */
2304 2312 ndlp->nlp_ptx[channelno].q_first = NULL;
2305 2313 ndlp->nlp_ptx[channelno].q_last = NULL;
2306 2314 ndlp->nlp_ptx[channelno].q_cnt = 0;
2307 2315
2308 2316 ndlp->nlp_tx[channelno].q_first = NULL;
2309 2317 ndlp->nlp_tx[channelno].q_last = NULL;
2310 2318 ndlp->nlp_tx[channelno].q_cnt = 0;
2311 2319
2312 2320 /* Remove node from service queue */
2313 2321
2314 2322 /* If this is the last node on list */
2315 2323 if (cp->nodeq.q_last == (void *)ndlp) {
2316 2324 cp->nodeq.q_last = NULL;
2317 2325 cp->nodeq.q_first = NULL;
2318 2326 cp->nodeq.q_cnt = 0;
2319 2327 } else {
2320 2328 /* Remove node from head */
2321 2329 cp->nodeq.q_first = ndlp->nlp_next[channelno];
2322 2330 ((NODELIST *)cp->nodeq.q_last)->nlp_next[channelno] =
2323 2331 cp->nodeq.q_first;
2324 2332 cp->nodeq.q_cnt--;
2325 2333 }
2326 2334
2327 2335 /* Clear node */
2328 2336 ndlp->nlp_next[channelno] = NULL;
2329 2337 }
2330 2338
2331 2339 /* First cleanup the iocb's while still holding the lock */
2332 2340 iocbq = (IOCBQ *) abort.q_first;
2333 2341 while (iocbq) {
2334 2342 /* Free the IoTag and the bmp */
2335 2343 iocb = &iocbq->iocb;
2336 2344
2337 2345 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2338 2346 sbp = iocbq->sbp;
2339 2347 if (sbp) {
2340 2348 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
2341 2349 }
2342 2350 } else {
2343 2351 sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
2344 2352 iocb->ULPIOTAG, 0);
2345 2353 }
2346 2354
2347 2355 if (sbp && (sbp != STALE_PACKET)) {
2348 2356 mutex_enter(&sbp->mtx);
2349 2357
2350 2358 sbp->pkt_flags |= PACKET_IN_FLUSH;
2351 2359 /*
2352 2360 * If the fpkt is already set, then we will leave it
2353 2361 * alone. This ensures that this pkt is only accounted
2354 2362 * for on one fpkt->flush_count
2355 2363 */
2356 2364 if (!sbp->fpkt && fpkt) {
2357 2365 mutex_enter(&fpkt->mtx);
2358 2366 sbp->fpkt = fpkt;
2359 2367 fpkt->flush_count++;
2360 2368 mutex_exit(&fpkt->mtx);
2361 2369 }
2362 2370
2363 2371 mutex_exit(&sbp->mtx);
2364 2372 }
2365 2373
2366 2374 iocbq = (IOCBQ *)iocbq->next;
2367 2375 } /* end of while */
2368 2376
2369 2377 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2370 2378
2371 2379 /* Now abort the iocb's */
2372 2380 iocbq = (IOCBQ *)abort.q_first;
2373 2381 while (iocbq) {
2374 2382 /* Save the next iocbq for now */
2375 2383 next = (IOCBQ *)iocbq->next;
2376 2384
2377 2385 /* Unlink this iocbq */
2378 2386 iocbq->next = NULL;
2379 2387
2380 2388 /* Get the pkt */
2381 2389 sbp = (emlxs_buf_t *)iocbq->sbp;
2382 2390
2383 2391 if (sbp) {
2384 2392 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2385 2393 "tx: sbp=%p node=%p", sbp, sbp->node);
2386 2394
2387 2395 if (hba->state >= FC_LINK_UP) {
2388 2396 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2389 2397 IOERR_ABORT_REQUESTED, 1);
2390 2398 } else {
2391 2399 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2392 2400 IOERR_LINK_DOWN, 1);
2393 2401 }
2394 2402
2395 2403 }
2396 2404 /* Free the iocb and its associated buffers */
2397 2405 else {
2398 2406 icmd = &iocbq->iocb;
2399 2407
2400 2408 /* SLI3 */
2401 2409 if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
2402 2410 icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
2403 2411 icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
2404 2412 if ((hba->flag &
2405 2413 (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2406 2414 /* HBA is detaching or offlining */
2407 2415 if (icmd->ULPCOMMAND !=
2408 2416 CMD_QUE_RING_LIST64_CN) {
2409 2417 void *tmp;
2410 2418 RING *rp;
2411 2419
2412 2420 rp = &hba->sli.sli3.
2413 2421 ring[channelno];
2414 2422 for (i = 0;
2415 2423 i < icmd->ULPBDECOUNT;
2416 2424 i++) {
2417 2425 mp = EMLXS_GET_VADDR(
2418 2426 hba, rp, icmd);
2419 2427
2420 2428 tmp = (void *)mp;
2421 2429 if (mp) {
2422 2430 emlxs_mem_put(
2423 2431 hba, MEM_BUF, tmp);
2424 2432 }
2425 2433 }
2426 2434 }
2427 2435
2428 2436 emlxs_mem_put(hba, MEM_IOCB,
2429 2437 (void *)iocbq);
2430 2438 } else {
2431 2439 /* repost the unsolicited buffer */
2432 2440 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp,
2433 2441 iocbq);
2434 2442 }
2435 2443 } else if (icmd->ULPCOMMAND == CMD_CLOSE_XRI_CN ||
2436 2444 icmd->ULPCOMMAND == CMD_CLOSE_XRI_CX) {
2437 2445
2438 2446 emlxs_tx_put(iocbq, 1);
2439 2447 }
2440 2448 }
2441 2449
2442 2450 iocbq = next;
2443 2451
2444 2452 } /* end of while */
2445 2453
2446 2454 /* Now trigger channel service */
2447 2455 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2448 2456 if (!flag[channelno]) {
2449 2457 continue;
2450 2458 }
2451 2459
2452 2460 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
2453 2461 }
2454 2462
2455 2463 return (abort.q_cnt);
2456 2464
2457 2465 } /* emlxs_tx_channel_flush() */
2458 2466
2459 2467
2460 2468 /* Flush all IO's on all or a given ring for a given node */
2461 2469 extern uint32_t
2462 2470 emlxs_tx_node_flush(emlxs_port_t *port, NODELIST *ndlp, CHANNEL *chan,
2463 2471 uint32_t shutdown, emlxs_buf_t *fpkt)
2464 2472 {
2465 2473 emlxs_hba_t *hba = HBA;
2466 2474 emlxs_buf_t *sbp;
2467 2475 uint32_t channelno;
2468 2476 CHANNEL *cp;
2469 2477 IOCB *icmd;
2470 2478 IOCBQ *iocbq;
2471 2479 NODELIST *prev;
2472 2480 IOCBQ *next;
2473 2481 IOCB *iocb;
2474 2482 Q abort;
2475 2483 uint32_t i;
2476 2484 MATCHMAP *mp;
2477 2485 uint8_t flag[MAX_CHANNEL];
2478 2486
2479 2487 bzero((void *)&abort, sizeof (Q));
2480 2488
2481 2489 /* Flush all I/O's on tx queue to this target */
2482 2490 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2483 2491
2484 2492 if (!ndlp->nlp_base && shutdown) {
2485 2493 ndlp->nlp_active = 0;
2486 2494 }
2487 2495
2488 2496 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2489 2497 cp = &hba->chan[channelno];
2490 2498
2491 2499 if (chan && cp != chan) {
2492 2500 continue;
2493 2501 }
2494 2502
2495 2503 if (!ndlp->nlp_base || shutdown) {
2496 2504 /* Check if priority queue is not empty */
2497 2505 if (ndlp->nlp_ptx[channelno].q_first) {
2498 2506 /* Transfer all iocb's to local queue */
2499 2507 if (abort.q_first == 0) {
2500 2508 abort.q_first =
2501 2509 ndlp->nlp_ptx[channelno].q_first;
2502 2510 } else {
2503 2511 ((IOCBQ *)(abort.q_last))->next =
2504 2512 (IOCBQ *)ndlp->nlp_ptx[channelno].
2505 2513 q_first;
2506 2514 }
2507 2515
2508 2516 flag[channelno] = 1;
2509 2517
2510 2518 abort.q_last = ndlp->nlp_ptx[channelno].q_last;
2511 2519 abort.q_cnt += ndlp->nlp_ptx[channelno].q_cnt;
2512 2520 }
2513 2521 }
2514 2522
2515 2523 /* Check if tx queue is not empty */
2516 2524 if (ndlp->nlp_tx[channelno].q_first) {
2517 2525
2518 2526 /* Transfer all iocb's to local queue */
2519 2527 if (abort.q_first == 0) {
2520 2528 abort.q_first = ndlp->nlp_tx[channelno].q_first;
2521 2529 } else {
2522 2530 ((IOCBQ *)abort.q_last)->next =
2523 2531 (IOCBQ *)ndlp->nlp_tx[channelno].q_first;
2524 2532 }
2525 2533
2526 2534 abort.q_last = ndlp->nlp_tx[channelno].q_last;
2527 2535 abort.q_cnt += ndlp->nlp_tx[channelno].q_cnt;
2528 2536 }
2529 2537
2530 2538 /* Clear the queue pointers */
2531 2539 ndlp->nlp_ptx[channelno].q_first = NULL;
2532 2540 ndlp->nlp_ptx[channelno].q_last = NULL;
2533 2541 ndlp->nlp_ptx[channelno].q_cnt = 0;
2534 2542
2535 2543 ndlp->nlp_tx[channelno].q_first = NULL;
2536 2544 ndlp->nlp_tx[channelno].q_last = NULL;
2537 2545 ndlp->nlp_tx[channelno].q_cnt = 0;
2538 2546
2539 2547 /* If this node was on the channel queue, remove it */
2540 2548 if (ndlp->nlp_next[channelno]) {
2541 2549 /* If this is the only node on list */
2542 2550 if (cp->nodeq.q_first == (void *)ndlp &&
2543 2551 cp->nodeq.q_last == (void *)ndlp) {
2544 2552 cp->nodeq.q_last = NULL;
2545 2553 cp->nodeq.q_first = NULL;
2546 2554 cp->nodeq.q_cnt = 0;
2547 2555 } else if (cp->nodeq.q_first == (void *)ndlp) {
2548 2556 cp->nodeq.q_first = ndlp->nlp_next[channelno];
2549 2557 ((NODELIST *) cp->nodeq.q_last)->
2550 2558 nlp_next[channelno] = cp->nodeq.q_first;
2551 2559 cp->nodeq.q_cnt--;
2552 2560 } else {
2553 2561 /*
2554 2562 * This is a little more difficult find the
2555 2563 * previous node in the circular channel queue
2556 2564 */
2557 2565 prev = ndlp;
2558 2566 while (prev->nlp_next[channelno] != ndlp) {
2559 2567 prev = prev->nlp_next[channelno];
2560 2568 }
2561 2569
2562 2570 prev->nlp_next[channelno] =
2563 2571 ndlp->nlp_next[channelno];
2564 2572
2565 2573 if (cp->nodeq.q_last == (void *)ndlp) {
2566 2574 cp->nodeq.q_last = (void *)prev;
2567 2575 }
2568 2576 cp->nodeq.q_cnt--;
2569 2577
2570 2578 }
2571 2579
2572 2580 /* Clear node */
2573 2581 ndlp->nlp_next[channelno] = NULL;
2574 2582 }
2575 2583
2576 2584 }
2577 2585
2578 2586 /* First cleanup the iocb's while still holding the lock */
2579 2587 iocbq = (IOCBQ *) abort.q_first;
2580 2588 while (iocbq) {
2581 2589 /* Free the IoTag and the bmp */
2582 2590 iocb = &iocbq->iocb;
2583 2591
2584 2592 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2585 2593 sbp = iocbq->sbp;
2586 2594 if (sbp) {
2587 2595 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
2588 2596 }
2589 2597 } else {
2590 2598 sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
2591 2599 iocb->ULPIOTAG, 0);
2592 2600 }
2593 2601
2594 2602 if (sbp && (sbp != STALE_PACKET)) {
2595 2603 mutex_enter(&sbp->mtx);
2596 2604 sbp->pkt_flags |= PACKET_IN_FLUSH;
2597 2605 /*
2598 2606 * If the fpkt is already set, then we will leave it
2599 2607 * alone. This ensures that this pkt is only accounted
2600 2608 * for on one fpkt->flush_count
2601 2609 */
2602 2610 if (!sbp->fpkt && fpkt) {
2603 2611 mutex_enter(&fpkt->mtx);
2604 2612 sbp->fpkt = fpkt;
2605 2613 fpkt->flush_count++;
2606 2614 mutex_exit(&fpkt->mtx);
2607 2615 }
2608 2616
2609 2617 mutex_exit(&sbp->mtx);
2610 2618 }
2611 2619
2612 2620 iocbq = (IOCBQ *) iocbq->next;
2613 2621
2614 2622 } /* end of while */
2615 2623
2616 2624 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2617 2625
2618 2626 /* Now abort the iocb's outside the locks */
2619 2627 iocbq = (IOCBQ *)abort.q_first;
2620 2628 while (iocbq) {
2621 2629 /* Save the next iocbq for now */
2622 2630 next = (IOCBQ *)iocbq->next;
2623 2631
2624 2632 /* Unlink this iocbq */
2625 2633 iocbq->next = NULL;
2626 2634
2627 2635 /* Get the pkt */
2628 2636 sbp = (emlxs_buf_t *)iocbq->sbp;
2629 2637
2630 2638 if (sbp) {
2631 2639 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2632 2640 "tx: sbp=%p node=%p", sbp, sbp->node);
2633 2641
2634 2642 if (hba->state >= FC_LINK_UP) {
2635 2643 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2636 2644 IOERR_ABORT_REQUESTED, 1);
2637 2645 } else {
2638 2646 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2639 2647 IOERR_LINK_DOWN, 1);
2640 2648 }
2641 2649
2642 2650 }
2643 2651 /* Free the iocb and its associated buffers */
2644 2652 else {
2645 2653 /* CMD_CLOSE_XRI_CN should also free the memory */
2646 2654 icmd = &iocbq->iocb;
2647 2655
2648 2656 /* SLI3 */
2649 2657 if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
2650 2658 icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
2651 2659 icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
2652 2660 if ((hba->flag &
2653 2661 (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2654 2662 /* HBA is detaching or offlining */
2655 2663 if (icmd->ULPCOMMAND !=
2656 2664 CMD_QUE_RING_LIST64_CN) {
2657 2665 void *tmp;
2658 2666 RING *rp;
2659 2667 int ch;
2660 2668
2661 2669 ch = ((CHANNEL *)
2662 2670 iocbq->channel)->channelno;
2663 2671 rp = &hba->sli.sli3.ring[ch];
2664 2672 for (i = 0;
2665 2673 i < icmd->ULPBDECOUNT;
2666 2674 i++) {
2667 2675 mp = EMLXS_GET_VADDR(
2668 2676 hba, rp, icmd);
2669 2677
2670 2678 tmp = (void *)mp;
2671 2679 if (mp) {
2672 2680 emlxs_mem_put(
2673 2681 hba, MEM_BUF, tmp);
2674 2682 }
2675 2683 }
2676 2684 }
2677 2685
2678 2686 emlxs_mem_put(hba, MEM_IOCB,
2679 2687 (void *)iocbq);
2680 2688 } else {
2681 2689 /* repost the unsolicited buffer */
2682 2690 EMLXS_SLI_ISSUE_IOCB_CMD(hba,
2683 2691 (CHANNEL *)iocbq->channel, iocbq);
2684 2692 }
2685 2693 } else if (icmd->ULPCOMMAND == CMD_CLOSE_XRI_CN ||
2686 2694 icmd->ULPCOMMAND == CMD_CLOSE_XRI_CX) {
2687 2695 /*
2688 2696 * Resend the abort iocbq if any
2689 2697 */
2690 2698 emlxs_tx_put(iocbq, 1);
2691 2699 }
2692 2700 }
2693 2701
2694 2702 iocbq = next;
2695 2703
2696 2704 } /* end of while */
2697 2705
2698 2706 /* Now trigger channel service */
2699 2707 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2700 2708 if (!flag[channelno]) {
2701 2709 continue;
2702 2710 }
2703 2711
2704 2712 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
2705 2713 }
2706 2714
2707 2715 return (abort.q_cnt);
2708 2716
2709 2717 } /* emlxs_tx_node_flush() */
2710 2718
2711 2719
2712 2720 /* Check for IO's on all or a given ring for a given node */
2713 2721 extern uint32_t
2714 2722 emlxs_tx_node_check(emlxs_port_t *port, NODELIST *ndlp, CHANNEL *chan)
2715 2723 {
2716 2724 emlxs_hba_t *hba = HBA;
2717 2725 uint32_t channelno;
2718 2726 CHANNEL *cp;
2719 2727 uint32_t count;
2720 2728
2721 2729 count = 0;
2722 2730
2723 2731 /* Flush all I/O's on tx queue to this target */
2724 2732 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2725 2733
2726 2734 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2727 2735 cp = &hba->chan[channelno];
2728 2736
2729 2737 if (chan && cp != chan) {
2730 2738 continue;
2731 2739 }
2732 2740
2733 2741 /* Check if priority queue is not empty */
2734 2742 if (ndlp->nlp_ptx[channelno].q_first) {
2735 2743 count += ndlp->nlp_ptx[channelno].q_cnt;
2736 2744 }
2737 2745
2738 2746 /* Check if tx queue is not empty */
2739 2747 if (ndlp->nlp_tx[channelno].q_first) {
2740 2748 count += ndlp->nlp_tx[channelno].q_cnt;
2741 2749 }
2742 2750
2743 2751 }
2744 2752
2745 2753 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2746 2754
2747 2755 return (count);
2748 2756
2749 2757 } /* emlxs_tx_node_check() */
2750 2758
2751 2759
2752 2760
2753 2761 /* Flush all IO's on the any ring for a given node's lun */
2754 2762 extern uint32_t
2755 2763 emlxs_tx_lun_flush(emlxs_port_t *port, NODELIST *ndlp, uint32_t lun,
2756 2764 emlxs_buf_t *fpkt)
2757 2765 {
2758 2766 emlxs_hba_t *hba = HBA;
2759 2767 emlxs_buf_t *sbp;
2760 2768 uint32_t channelno;
2761 2769 IOCBQ *iocbq;
2762 2770 IOCBQ *prev;
2763 2771 IOCBQ *next;
2764 2772 IOCB *iocb;
2765 2773 IOCB *icmd;
2766 2774 Q abort;
2767 2775 uint32_t i;
2768 2776 MATCHMAP *mp;
2769 2777 uint8_t flag[MAX_CHANNEL];
2770 2778
2771 2779 if (lun == EMLXS_LUN_NONE) {
2772 2780 return (0);
2773 2781 }
2774 2782
2775 2783 bzero((void *)&abort, sizeof (Q));
2776 2784
2777 2785 /* Flush I/O's on txQ to this target's lun */
2778 2786 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2779 2787
2780 2788 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2781 2789
2782 2790 /* Scan the priority queue first */
2783 2791 prev = NULL;
2784 2792 iocbq = (IOCBQ *) ndlp->nlp_ptx[channelno].q_first;
2785 2793
2786 2794 while (iocbq) {
2787 2795 next = (IOCBQ *)iocbq->next;
2788 2796 iocb = &iocbq->iocb;
2789 2797 sbp = (emlxs_buf_t *)iocbq->sbp;
2790 2798
2791 2799 /* Check if this IO is for our lun */
2792 2800 if (sbp && (sbp->lun == lun)) {
2793 2801 /* Remove iocb from the node's ptx queue */
2794 2802 if (next == 0) {
2795 2803 ndlp->nlp_ptx[channelno].q_last =
2796 2804 (uint8_t *)prev;
2797 2805 }
2798 2806
2799 2807 if (prev == 0) {
2800 2808 ndlp->nlp_ptx[channelno].q_first =
2801 2809 (uint8_t *)next;
2802 2810 } else {
2803 2811 prev->next = next;
2804 2812 }
2805 2813
2806 2814 iocbq->next = NULL;
2807 2815 ndlp->nlp_ptx[channelno].q_cnt--;
2808 2816
2809 2817 /*
2810 2818 * Add this iocb to our local abort Q
2811 2819 */
2812 2820 if (abort.q_first) {
2813 2821 ((IOCBQ *)abort.q_last)->next = iocbq;
2814 2822 abort.q_last = (uint8_t *)iocbq;
2815 2823 abort.q_cnt++;
2816 2824 } else {
2817 2825 abort.q_first = (uint8_t *)iocbq;
2818 2826 abort.q_last = (uint8_t *)iocbq;
2819 2827 abort.q_cnt = 1;
2820 2828 }
2821 2829 iocbq->next = NULL;
2822 2830 flag[channelno] = 1;
2823 2831
2824 2832 } else {
2825 2833 prev = iocbq;
2826 2834 }
2827 2835
2828 2836 iocbq = next;
2829 2837
2830 2838 } /* while (iocbq) */
2831 2839
2832 2840
2833 2841 /* Scan the regular queue */
2834 2842 prev = NULL;
2835 2843 iocbq = (IOCBQ *)ndlp->nlp_tx[channelno].q_first;
2836 2844
2837 2845 while (iocbq) {
2838 2846 next = (IOCBQ *)iocbq->next;
2839 2847 iocb = &iocbq->iocb;
2840 2848 sbp = (emlxs_buf_t *)iocbq->sbp;
2841 2849
2842 2850 /* Check if this IO is for our lun */
2843 2851 if (sbp && (sbp->lun == lun)) {
2844 2852 /* Remove iocb from the node's tx queue */
2845 2853 if (next == 0) {
2846 2854 ndlp->nlp_tx[channelno].q_last =
2847 2855 (uint8_t *)prev;
2848 2856 }
2849 2857
2850 2858 if (prev == 0) {
2851 2859 ndlp->nlp_tx[channelno].q_first =
2852 2860 (uint8_t *)next;
2853 2861 } else {
2854 2862 prev->next = next;
2855 2863 }
2856 2864
2857 2865 iocbq->next = NULL;
2858 2866 ndlp->nlp_tx[channelno].q_cnt--;
2859 2867
2860 2868 /*
2861 2869 * Add this iocb to our local abort Q
2862 2870 */
2863 2871 if (abort.q_first) {
2864 2872 ((IOCBQ *) abort.q_last)->next = iocbq;
2865 2873 abort.q_last = (uint8_t *)iocbq;
2866 2874 abort.q_cnt++;
2867 2875 } else {
2868 2876 abort.q_first = (uint8_t *)iocbq;
2869 2877 abort.q_last = (uint8_t *)iocbq;
2870 2878 abort.q_cnt = 1;
2871 2879 }
2872 2880 iocbq->next = NULL;
2873 2881 } else {
2874 2882 prev = iocbq;
2875 2883 }
2876 2884
2877 2885 iocbq = next;
2878 2886
2879 2887 } /* while (iocbq) */
2880 2888 } /* for loop */
2881 2889
2882 2890 /* First cleanup the iocb's while still holding the lock */
2883 2891 iocbq = (IOCBQ *)abort.q_first;
2884 2892 while (iocbq) {
2885 2893 /* Free the IoTag and the bmp */
2886 2894 iocb = &iocbq->iocb;
2887 2895
2888 2896 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2889 2897 sbp = iocbq->sbp;
2890 2898 if (sbp) {
2891 2899 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
2892 2900 }
2893 2901 } else {
2894 2902 sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
2895 2903 iocb->ULPIOTAG, 0);
2896 2904 }
2897 2905
2898 2906 if (sbp && (sbp != STALE_PACKET)) {
2899 2907 mutex_enter(&sbp->mtx);
2900 2908 sbp->pkt_flags |= PACKET_IN_FLUSH;
2901 2909 /*
2902 2910 * If the fpkt is already set, then we will leave it
2903 2911 * alone. This ensures that this pkt is only accounted
2904 2912 * for on one fpkt->flush_count
2905 2913 */
2906 2914 if (!sbp->fpkt && fpkt) {
2907 2915 mutex_enter(&fpkt->mtx);
2908 2916 sbp->fpkt = fpkt;
2909 2917 fpkt->flush_count++;
2910 2918 mutex_exit(&fpkt->mtx);
2911 2919 }
2912 2920
2913 2921 mutex_exit(&sbp->mtx);
2914 2922 }
2915 2923
2916 2924 iocbq = (IOCBQ *) iocbq->next;
2917 2925
2918 2926 } /* end of while */
2919 2927
2920 2928 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2921 2929
2922 2930 /* Now abort the iocb's outside the locks */
2923 2931 iocbq = (IOCBQ *)abort.q_first;
2924 2932 while (iocbq) {
2925 2933 /* Save the next iocbq for now */
2926 2934 next = (IOCBQ *)iocbq->next;
2927 2935
2928 2936 /* Unlink this iocbq */
2929 2937 iocbq->next = NULL;
2930 2938
2931 2939 /* Get the pkt */
2932 2940 sbp = (emlxs_buf_t *)iocbq->sbp;
2933 2941
2934 2942 if (sbp) {
2935 2943 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2936 2944 "tx: sbp=%p node=%p", sbp, sbp->node);
2937 2945
2938 2946 if (hba->state >= FC_LINK_UP) {
2939 2947 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2940 2948 IOERR_ABORT_REQUESTED, 1);
2941 2949 } else {
2942 2950 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2943 2951 IOERR_LINK_DOWN, 1);
2944 2952 }
2945 2953 }
2946 2954
2947 2955 /* Free the iocb and its associated buffers */
2948 2956 else {
2949 2957 /* Should never happen! */
2950 2958 icmd = &iocbq->iocb;
2951 2959
2952 2960 /* SLI3 */
2953 2961 if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
2954 2962 icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
2955 2963 icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
2956 2964 if ((hba->flag &
2957 2965 (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2958 2966 /* HBA is detaching or offlining */
2959 2967 if (icmd->ULPCOMMAND !=
2960 2968 CMD_QUE_RING_LIST64_CN) {
2961 2969 void *tmp;
2962 2970 RING *rp;
2963 2971 int ch;
2964 2972
2965 2973 ch = ((CHANNEL *)
2966 2974 iocbq->channel)->channelno;
2967 2975 rp = &hba->sli.sli3.ring[ch];
2968 2976 for (i = 0;
2969 2977 i < icmd->ULPBDECOUNT;
2970 2978 i++) {
2971 2979 mp = EMLXS_GET_VADDR(
2972 2980 hba, rp, icmd);
2973 2981
2974 2982 tmp = (void *)mp;
2975 2983 if (mp) {
2976 2984 emlxs_mem_put(
2977 2985 hba, MEM_BUF, tmp);
2978 2986 }
2979 2987 }
2980 2988 }
2981 2989
2982 2990 emlxs_mem_put(hba, MEM_IOCB,
2983 2991 (void *)iocbq);
2984 2992 } else {
2985 2993 /* repost the unsolicited buffer */
2986 2994 EMLXS_SLI_ISSUE_IOCB_CMD(hba,
2987 2995 (CHANNEL *)iocbq->channel, iocbq);
2988 2996 }
2989 2997 } else if (icmd->ULPCOMMAND == CMD_CLOSE_XRI_CN ||
2990 2998 icmd->ULPCOMMAND == CMD_CLOSE_XRI_CX) {
2991 2999 /*
2992 3000 * Resend the abort iocbq if any
2993 3001 */
2994 3002 emlxs_tx_put(iocbq, 1);
2995 3003 }
2996 3004 }
2997 3005
2998 3006 iocbq = next;
2999 3007
3000 3008 } /* end of while */
3001 3009
3002 3010 /* Now trigger channel service */
3003 3011 for (channelno = 0; channelno < hba->chan_count; channelno++) {
3004 3012 if (!flag[channelno]) {
3005 3013 continue;
3006 3014 }
3007 3015
3008 3016 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
3009 3017 }
3010 3018
3011 3019 return (abort.q_cnt);
3012 3020
3013 3021 } /* emlxs_tx_lun_flush() */
3014 3022
3015 3023
3016 3024 extern void
3017 3025 emlxs_tx_put(IOCBQ *iocbq, uint32_t lock)
3018 3026 {
3019 3027 emlxs_hba_t *hba;
3020 3028 emlxs_port_t *port;
3021 3029 uint32_t channelno;
3022 3030 NODELIST *nlp;
3023 3031 CHANNEL *cp;
3024 3032 emlxs_buf_t *sbp;
3025 3033
3026 3034 port = (emlxs_port_t *)iocbq->port;
3027 3035 hba = HBA;
3028 3036 cp = (CHANNEL *)iocbq->channel;
3029 3037 nlp = (NODELIST *)iocbq->node;
3030 3038 channelno = cp->channelno;
3031 3039 sbp = (emlxs_buf_t *)iocbq->sbp;
3032 3040
3033 3041 if (nlp == NULL) {
3034 3042 /* Set node to base node by default */
3035 3043 nlp = &port->node_base;
3036 3044
3037 3045 iocbq->node = (void *)nlp;
3038 3046
3039 3047 if (sbp) {
3040 3048 sbp->node = (void *)nlp;
3041 3049 }
3042 3050 }
3043 3051
3044 3052 if (lock) {
3045 3053 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3046 3054 }
3047 3055
3048 3056 if (!nlp->nlp_active || (sbp && (sbp->pkt_flags & PACKET_IN_ABORT))) {
3049 3057 if (sbp) {
3050 3058 mutex_enter(&sbp->mtx);
3051 3059 sbp->pkt_flags |= PACKET_IN_FLUSH;
3052 3060 mutex_exit(&sbp->mtx);
3053 3061
3054 3062 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3055 3063 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
3056 3064 } else {
3057 3065 (void) emlxs_unregister_pkt(cp, sbp->iotag, 0);
3058 3066 }
3059 3067
3060 3068 if (lock) {
3061 3069 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3062 3070 }
3063 3071
3064 3072 if (hba->state >= FC_LINK_UP) {
3065 3073 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3066 3074 IOERR_ABORT_REQUESTED, 1);
3067 3075 } else {
3068 3076 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3069 3077 IOERR_LINK_DOWN, 1);
3070 3078 }
3071 3079 return;
3072 3080 } else {
3073 3081 if (lock) {
3074 3082 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3075 3083 }
3076 3084
3077 3085 emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
3078 3086 }
3079 3087
3080 3088 return;
3081 3089 }
3082 3090
3083 3091 if (sbp) {
3084 3092
3085 3093 mutex_enter(&sbp->mtx);
3086 3094
3087 3095 if (sbp->pkt_flags &
3088 3096 (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ | PACKET_IN_TXQ)) {
3089 3097 mutex_exit(&sbp->mtx);
3090 3098 if (lock) {
3091 3099 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3092 3100 }
3093 3101 return;
3094 3102 }
3095 3103
3096 3104 sbp->pkt_flags |= PACKET_IN_TXQ;
3097 3105 hba->channel_tx_count++;
3098 3106
3099 3107 mutex_exit(&sbp->mtx);
3100 3108 }
3101 3109
3102 3110
3103 3111 /* Check iocbq priority */
3104 3112 /* Some IOCB has the high priority like reset/close xri etc */
3105 3113 if (iocbq->flag & IOCB_PRIORITY) {
3106 3114 /* Add the iocb to the bottom of the node's ptx queue */
3107 3115 if (nlp->nlp_ptx[channelno].q_first) {
3108 3116 ((IOCBQ *)nlp->nlp_ptx[channelno].q_last)->next = iocbq;
3109 3117 nlp->nlp_ptx[channelno].q_last = (uint8_t *)iocbq;
3110 3118 nlp->nlp_ptx[channelno].q_cnt++;
3111 3119 } else {
3112 3120 nlp->nlp_ptx[channelno].q_first = (uint8_t *)iocbq;
3113 3121 nlp->nlp_ptx[channelno].q_last = (uint8_t *)iocbq;
3114 3122 nlp->nlp_ptx[channelno].q_cnt = 1;
3115 3123 }
3116 3124
3117 3125 iocbq->next = NULL;
3118 3126 } else { /* Normal priority */
3119 3127
3120 3128
3121 3129 /* Add the iocb to the bottom of the node's tx queue */
3122 3130 if (nlp->nlp_tx[channelno].q_first) {
3123 3131 ((IOCBQ *)nlp->nlp_tx[channelno].q_last)->next = iocbq;
3124 3132 nlp->nlp_tx[channelno].q_last = (uint8_t *)iocbq;
3125 3133 nlp->nlp_tx[channelno].q_cnt++;
3126 3134 } else {
3127 3135 nlp->nlp_tx[channelno].q_first = (uint8_t *)iocbq;
3128 3136 nlp->nlp_tx[channelno].q_last = (uint8_t *)iocbq;
3129 3137 nlp->nlp_tx[channelno].q_cnt = 1;
3130 3138 }
3131 3139
3132 3140 iocbq->next = NULL;
3133 3141 }
3134 3142
3135 3143
3136 3144 /*
3137 3145 * Check if the node is not already on channel queue and
3138 3146 * (is not closed or is a priority request)
3139 3147 */
3140 3148 if (!nlp->nlp_next[channelno] &&
3141 3149 (!(nlp->nlp_flag[channelno] & NLP_CLOSED) ||
3142 3150 (iocbq->flag & IOCB_PRIORITY))) {
3143 3151 /* If so, then add it to the channel queue */
3144 3152 if (cp->nodeq.q_first) {
3145 3153 ((NODELIST *)cp->nodeq.q_last)->nlp_next[channelno] =
3146 3154 (uint8_t *)nlp;
3147 3155 nlp->nlp_next[channelno] = cp->nodeq.q_first;
3148 3156
3149 3157 /*
3150 3158 * If this is not the base node then add it
3151 3159 * to the tail
3152 3160 */
3153 3161 if (!nlp->nlp_base) {
3154 3162 cp->nodeq.q_last = (uint8_t *)nlp;
3155 3163 } else { /* Otherwise, add it to the head */
3156 3164
3157 3165 /* The command node always gets priority */
3158 3166 cp->nodeq.q_first = (uint8_t *)nlp;
3159 3167 }
3160 3168
3161 3169 cp->nodeq.q_cnt++;
3162 3170 } else {
3163 3171 cp->nodeq.q_first = (uint8_t *)nlp;
3164 3172 cp->nodeq.q_last = (uint8_t *)nlp;
3165 3173 nlp->nlp_next[channelno] = nlp;
3166 3174 cp->nodeq.q_cnt = 1;
3167 3175 }
3168 3176 }
3169 3177
3170 3178 HBASTATS.IocbTxPut[channelno]++;
3171 3179
3172 3180 /* Adjust the channel timeout timer */
3173 3181 cp->timeout = hba->timer_tics + 5;
3174 3182
3175 3183 if (lock) {
3176 3184 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3177 3185 }
3178 3186
3179 3187 return;
3180 3188
3181 3189 } /* emlxs_tx_put() */
3182 3190
3183 3191
3184 3192 extern IOCBQ *
3185 3193 emlxs_tx_get(CHANNEL *cp, uint32_t lock)
3186 3194 {
3187 3195 emlxs_hba_t *hba;
3188 3196 uint32_t channelno;
3189 3197 IOCBQ *iocbq;
3190 3198 NODELIST *nlp;
3191 3199 emlxs_buf_t *sbp;
3192 3200
3193 3201 hba = cp->hba;
3194 3202 channelno = cp->channelno;
3195 3203
3196 3204 if (lock) {
3197 3205 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3198 3206 }
3199 3207
3200 3208 begin:
3201 3209
3202 3210 iocbq = NULL;
3203 3211
3204 3212 /* Check if a node needs servicing */
3205 3213 if (cp->nodeq.q_first) {
3206 3214 nlp = (NODELIST *)cp->nodeq.q_first;
3207 3215
3208 3216 /* Get next iocb from node's priority queue */
3209 3217
3210 3218 if (nlp->nlp_ptx[channelno].q_first) {
3211 3219 iocbq = (IOCBQ *)nlp->nlp_ptx[channelno].q_first;
3212 3220
3213 3221 /* Check if this is last entry */
3214 3222 if (nlp->nlp_ptx[channelno].q_last == (void *)iocbq) {
3215 3223 nlp->nlp_ptx[channelno].q_first = NULL;
3216 3224 nlp->nlp_ptx[channelno].q_last = NULL;
3217 3225 nlp->nlp_ptx[channelno].q_cnt = 0;
3218 3226 } else {
3219 3227 /* Remove iocb from head */
3220 3228 nlp->nlp_ptx[channelno].q_first =
3221 3229 (void *)iocbq->next;
3222 3230 nlp->nlp_ptx[channelno].q_cnt--;
3223 3231 }
3224 3232
3225 3233 iocbq->next = NULL;
3226 3234 }
3227 3235
3228 3236 /* Get next iocb from node tx queue if node not closed */
3229 3237 else if (nlp->nlp_tx[channelno].q_first &&
3230 3238 !(nlp->nlp_flag[channelno] & NLP_CLOSED)) {
3231 3239 iocbq = (IOCBQ *)nlp->nlp_tx[channelno].q_first;
3232 3240
3233 3241 /* Check if this is last entry */
3234 3242 if (nlp->nlp_tx[channelno].q_last == (void *)iocbq) {
3235 3243 nlp->nlp_tx[channelno].q_first = NULL;
3236 3244 nlp->nlp_tx[channelno].q_last = NULL;
3237 3245 nlp->nlp_tx[channelno].q_cnt = 0;
3238 3246 } else {
3239 3247 /* Remove iocb from head */
3240 3248 nlp->nlp_tx[channelno].q_first =
3241 3249 (void *)iocbq->next;
3242 3250 nlp->nlp_tx[channelno].q_cnt--;
3243 3251 }
3244 3252
3245 3253 iocbq->next = NULL;
3246 3254 }
3247 3255
3248 3256 /* Now deal with node itself */
3249 3257
3250 3258 /* Check if node still needs servicing */
3251 3259 if ((nlp->nlp_ptx[channelno].q_first) ||
3252 3260 (nlp->nlp_tx[channelno].q_first &&
3253 3261 !(nlp->nlp_flag[channelno] & NLP_CLOSED))) {
3254 3262
3255 3263 /*
3256 3264 * If this is the base node, then don't shift the
3257 3265 * pointers. We want to drain the base node before
3258 3266 * moving on
3259 3267 */
3260 3268 if (!nlp->nlp_base) {
3261 3269 /*
3262 3270 * Just shift channel queue pointers to next
3263 3271 * node
3264 3272 */
3265 3273 cp->nodeq.q_last = (void *)nlp;
3266 3274 cp->nodeq.q_first = nlp->nlp_next[channelno];
3267 3275 }
3268 3276 } else {
3269 3277 /* Remove node from channel queue */
3270 3278
3271 3279 /* If this is the last node on list */
3272 3280 if (cp->nodeq.q_last == (void *)nlp) {
3273 3281 cp->nodeq.q_last = NULL;
3274 3282 cp->nodeq.q_first = NULL;
3275 3283 cp->nodeq.q_cnt = 0;
3276 3284 } else {
3277 3285 /* Remove node from head */
3278 3286 cp->nodeq.q_first = nlp->nlp_next[channelno];
3279 3287 ((NODELIST *)cp->nodeq.q_last)->
3280 3288 nlp_next[channelno] = cp->nodeq.q_first;
3281 3289 cp->nodeq.q_cnt--;
3282 3290
3283 3291 }
3284 3292
3285 3293 /* Clear node */
3286 3294 nlp->nlp_next[channelno] = NULL;
3287 3295 }
3288 3296
3289 3297 /*
3290 3298 * If no iocbq was found on this node, then it will have
3291 3299 * been removed. So try again.
3292 3300 */
3293 3301 if (!iocbq) {
3294 3302 goto begin;
3295 3303 }
3296 3304
3297 3305 sbp = (emlxs_buf_t *)iocbq->sbp;
3298 3306
3299 3307 if (sbp) {
3300 3308 /*
3301 3309 * Check flags before we enter mutex in case this
3302 3310 * has been flushed and destroyed
3303 3311 */
3304 3312 if ((sbp->pkt_flags &
3305 3313 (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ)) ||
3306 3314 !(sbp->pkt_flags & PACKET_IN_TXQ)) {
3307 3315 goto begin;
3308 3316 }
3309 3317
3310 3318 mutex_enter(&sbp->mtx);
3311 3319
3312 3320 if ((sbp->pkt_flags &
3313 3321 (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ)) ||
3314 3322 !(sbp->pkt_flags & PACKET_IN_TXQ)) {
3315 3323 mutex_exit(&sbp->mtx);
3316 3324 goto begin;
3317 3325 }
3318 3326
3319 3327 sbp->pkt_flags &= ~PACKET_IN_TXQ;
3320 3328 hba->channel_tx_count--;
3321 3329
3322 3330 mutex_exit(&sbp->mtx);
3323 3331 }
3324 3332 }
3325 3333
3326 3334 if (iocbq) {
3327 3335 HBASTATS.IocbTxGet[channelno]++;
3328 3336 }
3329 3337
3330 3338 /* Adjust the ring timeout timer */
3331 3339 cp->timeout = (cp->nodeq.q_first) ? (hba->timer_tics + 5) : 0;
3332 3340
3333 3341 if (lock) {
3334 3342 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3335 3343 }
3336 3344
3337 3345 return (iocbq);
3338 3346
3339 3347 } /* emlxs_tx_get() */
3340 3348
3341 3349
3342 3350 /*
3343 3351 * Remove all cmd from from_rp's txq to to_rp's txq for ndlp.
3344 3352 * The old IoTag has to be released, the new one has to be
3345 3353 * allocated. Others no change
3346 3354 * TX_CHANNEL lock is held
3347 3355 */
3348 3356 extern void
3349 3357 emlxs_tx_move(NODELIST *ndlp, CHANNEL *from_chan, CHANNEL *to_chan,
3350 3358 uint32_t cmd, emlxs_buf_t *fpkt, uint32_t lock)
3351 3359 {
3352 3360 emlxs_hba_t *hba;
3353 3361 emlxs_port_t *port;
3354 3362 uint32_t fchanno, tchanno, i;
3355 3363
3356 3364 IOCBQ *iocbq;
3357 3365 IOCBQ *prev;
3358 3366 IOCBQ *next;
3359 3367 IOCB *iocb, *icmd;
3360 3368 Q tbm; /* To Be Moved Q */
3361 3369 MATCHMAP *mp;
3362 3370
3363 3371 NODELIST *nlp = ndlp;
3364 3372 emlxs_buf_t *sbp;
3365 3373
3366 3374 NODELIST *n_prev = NULL;
3367 3375 NODELIST *n_next = NULL;
3368 3376 uint16_t count = 0;
3369 3377
3370 3378 hba = from_chan->hba;
3371 3379 port = &PPORT;
3372 3380 cmd = cmd; /* To pass lint */
3373 3381
3374 3382 fchanno = from_chan->channelno;
3375 3383 tchanno = to_chan->channelno;
3376 3384
3377 3385 if (lock) {
3378 3386 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3379 3387 }
3380 3388
3381 3389 bzero((void *)&tbm, sizeof (Q));
3382 3390
3383 3391 /* Scan the ndlp's fchanno txq to get the iocb of fcp cmd */
3384 3392 prev = NULL;
3385 3393 iocbq = (IOCBQ *)nlp->nlp_tx[fchanno].q_first;
3386 3394
3387 3395 while (iocbq) {
3388 3396 next = (IOCBQ *)iocbq->next;
3389 3397 /* Check if this iocb is fcp cmd */
3390 3398 iocb = &iocbq->iocb;
3391 3399
3392 3400 switch (iocb->ULPCOMMAND) {
3393 3401 /* FCP commands */
3394 3402 case CMD_FCP_ICMND_CR:
3395 3403 case CMD_FCP_ICMND_CX:
3396 3404 case CMD_FCP_IREAD_CR:
3397 3405 case CMD_FCP_IREAD_CX:
3398 3406 case CMD_FCP_IWRITE_CR:
3399 3407 case CMD_FCP_IWRITE_CX:
3400 3408 case CMD_FCP_ICMND64_CR:
3401 3409 case CMD_FCP_ICMND64_CX:
3402 3410 case CMD_FCP_IREAD64_CR:
3403 3411 case CMD_FCP_IREAD64_CX:
3404 3412 case CMD_FCP_IWRITE64_CR:
3405 3413 case CMD_FCP_IWRITE64_CX:
3406 3414 /* We found a fcp cmd */
3407 3415 break;
3408 3416 default:
3409 3417 /* this is not fcp cmd continue */
3410 3418 prev = iocbq;
3411 3419 iocbq = next;
3412 3420 continue;
3413 3421 }
3414 3422
3415 3423 /* found a fcp cmd iocb in fchanno txq, now deque it */
3416 3424 if (next == NULL) {
3417 3425 /* This is the last iocbq */
3418 3426 nlp->nlp_tx[fchanno].q_last =
3419 3427 (uint8_t *)prev;
3420 3428 }
3421 3429
3422 3430 if (prev == NULL) {
3423 3431 /* This is the first one then remove it from head */
3424 3432 nlp->nlp_tx[fchanno].q_first =
3425 3433 (uint8_t *)next;
3426 3434 } else {
3427 3435 prev->next = next;
3428 3436 }
3429 3437
3430 3438 iocbq->next = NULL;
3431 3439 nlp->nlp_tx[fchanno].q_cnt--;
3432 3440
3433 3441 /* Add this iocb to our local toberemovedq */
3434 3442 /* This way we donot hold the TX_CHANNEL lock too long */
3435 3443
3436 3444 if (tbm.q_first) {
3437 3445 ((IOCBQ *)tbm.q_last)->next = iocbq;
3438 3446 tbm.q_last = (uint8_t *)iocbq;
3439 3447 tbm.q_cnt++;
3440 3448 } else {
3441 3449 tbm.q_first = (uint8_t *)iocbq;
3442 3450 tbm.q_last = (uint8_t *)iocbq;
3443 3451 tbm.q_cnt = 1;
3444 3452 }
3445 3453
3446 3454 iocbq = next;
3447 3455
3448 3456 } /* While (iocbq) */
3449 3457
3450 3458 if ((tchanno == hba->channel_fcp) && (tbm.q_cnt != 0)) {
3451 3459
3452 3460 /* from_chan->nodeq.q_first must be non NULL */
3453 3461 if (from_chan->nodeq.q_first) {
3454 3462
3455 3463 /* nodeq is not empty, now deal with the node itself */
3456 3464 if ((nlp->nlp_tx[fchanno].q_first)) {
3457 3465
3458 3466 if (!nlp->nlp_base) {
3459 3467 from_chan->nodeq.q_last =
3460 3468 (void *)nlp;
3461 3469 from_chan->nodeq.q_first =
3462 3470 nlp->nlp_next[fchanno];
3463 3471 }
3464 3472
3465 3473 } else {
3466 3474 n_prev = (NODELIST *)from_chan->nodeq.q_first;
3467 3475 count = from_chan->nodeq.q_cnt;
3468 3476
3469 3477 if (n_prev == nlp) {
3470 3478
3471 3479 /* If this is the only node on list */
3472 3480 if (from_chan->nodeq.q_last ==
3473 3481 (void *)nlp) {
3474 3482 from_chan->nodeq.q_last =
3475 3483 NULL;
3476 3484 from_chan->nodeq.q_first =
3477 3485 NULL;
3478 3486 from_chan->nodeq.q_cnt = 0;
3479 3487 } else {
3480 3488 from_chan->nodeq.q_first =
3481 3489 nlp->nlp_next[fchanno];
3482 3490 ((NODELIST *)from_chan->
3483 3491 nodeq.q_last)->
3484 3492 nlp_next[fchanno] =
3485 3493 from_chan->nodeq.q_first;
3486 3494 from_chan->nodeq.q_cnt--;
3487 3495 }
3488 3496 /* Clear node */
3489 3497 nlp->nlp_next[fchanno] = NULL;
3490 3498 } else {
3491 3499 count--;
3492 3500 do {
3493 3501 n_next =
3494 3502 n_prev->nlp_next[fchanno];
3495 3503 if (n_next == nlp) {
3496 3504 break;
3497 3505 }
3498 3506 n_prev = n_next;
3499 3507 } while (count--);
3500 3508
3501 3509 if (count != 0) {
3502 3510
3503 3511 if (n_next ==
3504 3512 (NODELIST *)from_chan->
3505 3513 nodeq.q_last) {
3506 3514 n_prev->
3507 3515 nlp_next[fchanno]
3508 3516 =
3509 3517 ((NODELIST *)
3510 3518 from_chan->
3511 3519 nodeq.q_last)->
3512 3520 nlp_next
3513 3521 [fchanno];
3514 3522 from_chan->nodeq.q_last
3515 3523 = (uint8_t *)n_prev;
3516 3524 } else {
3517 3525
3518 3526 n_prev->
3519 3527 nlp_next[fchanno]
3520 3528 =
3521 3529 n_next-> nlp_next
3522 3530 [fchanno];
3523 3531 }
3524 3532 from_chan->nodeq.q_cnt--;
3525 3533 /* Clear node */
3526 3534 nlp->nlp_next[fchanno] =
3527 3535 NULL;
3528 3536 }
3529 3537 }
3530 3538 }
3531 3539 }
3532 3540 }
3533 3541
3534 3542 /* Now cleanup the iocb's */
3535 3543 prev = NULL;
3536 3544 iocbq = (IOCBQ *)tbm.q_first;
3537 3545
3538 3546 while (iocbq) {
3539 3547
3540 3548 next = (IOCBQ *)iocbq->next;
3541 3549
3542 3550 /* Free the IoTag and the bmp */
3543 3551 iocb = &iocbq->iocb;
3544 3552
3545 3553 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3546 3554 sbp = iocbq->sbp;
3547 3555 if (sbp) {
3548 3556 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
3549 3557 }
3550 3558 } else {
3551 3559 sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
3552 3560 iocb->ULPIOTAG, 0);
3553 3561 }
3554 3562
3555 3563 if (sbp && (sbp != STALE_PACKET)) {
3556 3564 mutex_enter(&sbp->mtx);
3557 3565 sbp->pkt_flags |= PACKET_IN_FLUSH;
3558 3566
3559 3567 /*
3560 3568 * If the fpkt is already set, then we will leave it
3561 3569 * alone. This ensures that this pkt is only accounted
3562 3570 * for on one fpkt->flush_count
3563 3571 */
3564 3572 if (!sbp->fpkt && fpkt) {
3565 3573 mutex_enter(&fpkt->mtx);
3566 3574 sbp->fpkt = fpkt;
3567 3575 fpkt->flush_count++;
3568 3576 mutex_exit(&fpkt->mtx);
3569 3577 }
3570 3578 mutex_exit(&sbp->mtx);
3571 3579 }
3572 3580 iocbq = next;
3573 3581
3574 3582 } /* end of while */
3575 3583
3576 3584 iocbq = (IOCBQ *)tbm.q_first;
3577 3585 while (iocbq) {
3578 3586 /* Save the next iocbq for now */
3579 3587 next = (IOCBQ *)iocbq->next;
3580 3588
3581 3589 /* Unlink this iocbq */
3582 3590 iocbq->next = NULL;
3583 3591
3584 3592 /* Get the pkt */
3585 3593 sbp = (emlxs_buf_t *)iocbq->sbp;
3586 3594
3587 3595 if (sbp) {
3588 3596 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
3589 3597 "tx: sbp=%p node=%p", sbp, sbp->node);
3590 3598
3591 3599 if (hba->state >= FC_LINK_UP) {
3592 3600 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3593 3601 IOERR_ABORT_REQUESTED, 1);
3594 3602 } else {
3595 3603 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3596 3604 IOERR_LINK_DOWN, 1);
3597 3605 }
3598 3606
3599 3607 }
3600 3608 /* Free the iocb and its associated buffers */
3601 3609 else {
3602 3610 icmd = &iocbq->iocb;
3603 3611
3604 3612 /* SLI3 */
3605 3613 if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
3606 3614 icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
3607 3615 icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
3608 3616 if ((hba->flag &
3609 3617 (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
3610 3618 /* HBA is detaching or offlining */
3611 3619 if (icmd->ULPCOMMAND !=
3612 3620 CMD_QUE_RING_LIST64_CN) {
3613 3621 void *tmp;
3614 3622 RING *rp;
3615 3623 int ch;
3616 3624
3617 3625 ch = from_chan->channelno;
3618 3626 rp = &hba->sli.sli3.ring[ch];
3619 3627
3620 3628 for (i = 0;
3621 3629 i < icmd->ULPBDECOUNT;
3622 3630 i++) {
3623 3631 mp = EMLXS_GET_VADDR(
3624 3632 hba, rp, icmd);
3625 3633
3626 3634 tmp = (void *)mp;
3627 3635 if (mp) {
3628 3636 emlxs_mem_put(
3629 3637 hba,
3630 3638 MEM_BUF,
3631 3639 tmp);
3632 3640 }
3633 3641 }
3634 3642
3635 3643 }
3636 3644
3637 3645 emlxs_mem_put(hba, MEM_IOCB,
3638 3646 (void *)iocbq);
3639 3647 } else {
3640 3648 /* repost the unsolicited buffer */
3641 3649 EMLXS_SLI_ISSUE_IOCB_CMD(hba,
3642 3650 from_chan, iocbq);
3643 3651 }
3644 3652 }
3645 3653 }
3646 3654
3647 3655 iocbq = next;
3648 3656
3649 3657 } /* end of while */
3650 3658
3651 3659 /* Now flush the chipq if any */
3652 3660 if (!(nlp->nlp_flag[fchanno] & NLP_CLOSED)) {
3653 3661
3654 3662 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3655 3663
3656 3664 (void) emlxs_chipq_node_flush(port, from_chan, nlp, 0);
3657 3665
3658 3666 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3659 3667 }
3660 3668
3661 3669 if (lock) {
3662 3670 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3663 3671 }
3664 3672
3665 3673 return;
3666 3674
3667 3675 } /* emlxs_tx_move */
3668 3676
3669 3677
3670 3678 extern uint32_t
3671 3679 emlxs_chipq_node_flush(emlxs_port_t *port, CHANNEL *chan, NODELIST *ndlp,
3672 3680 emlxs_buf_t *fpkt)
3673 3681 {
3674 3682 emlxs_hba_t *hba = HBA;
3675 3683 emlxs_buf_t *sbp;
3676 3684 IOCBQ *iocbq;
3677 3685 IOCBQ *next;
3678 3686 Q abort;
3679 3687 CHANNEL *cp;
3680 3688 uint32_t channelno;
3681 3689 uint8_t flag[MAX_CHANNEL];
3682 3690 uint32_t iotag;
3683 3691
3684 3692 bzero((void *)&abort, sizeof (Q));
3685 3693 bzero((void *)flag, sizeof (flag));
3686 3694
3687 3695 for (channelno = 0; channelno < hba->chan_count; channelno++) {
3688 3696 cp = &hba->chan[channelno];
3689 3697
3690 3698 if (chan && cp != chan) {
3691 3699 continue;
3692 3700 }
3693 3701
3694 3702 mutex_enter(&EMLXS_FCTAB_LOCK);
3695 3703
3696 3704 for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3697 3705 sbp = hba->fc_table[iotag];
3698 3706
3699 3707 if (sbp && (sbp != STALE_PACKET) &&
3700 3708 (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3701 3709 (sbp->node == ndlp) &&
3702 3710 (sbp->channel == cp) &&
3703 3711 !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3704 3712 emlxs_sbp_abort_add(port, sbp, &abort, flag,
3705 3713 fpkt);
3706 3714 }
3707 3715
3708 3716 }
3709 3717 mutex_exit(&EMLXS_FCTAB_LOCK);
3710 3718
3711 3719 } /* for */
3712 3720
3713 3721 /* Now put the iocb's on the tx queue */
3714 3722 iocbq = (IOCBQ *)abort.q_first;
3715 3723 while (iocbq) {
3716 3724 /* Save the next iocbq for now */
3717 3725 next = (IOCBQ *)iocbq->next;
3718 3726
3719 3727 /* Unlink this iocbq */
3720 3728 iocbq->next = NULL;
3721 3729
3722 3730 /* Send this iocbq */
3723 3731 emlxs_tx_put(iocbq, 1);
3724 3732
3725 3733 iocbq = next;
3726 3734 }
3727 3735
3728 3736 /* Now trigger channel service */
3729 3737 for (channelno = 0; channelno < hba->chan_count; channelno++) {
3730 3738 if (!flag[channelno]) {
3731 3739 continue;
3732 3740 }
3733 3741
3734 3742 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
3735 3743 }
3736 3744
3737 3745 return (abort.q_cnt);
3738 3746
3739 3747 } /* emlxs_chipq_node_flush() */
3740 3748
3741 3749
3742 3750 /* Flush all IO's left on all iotag lists */
3743 3751 extern uint32_t
3744 3752 emlxs_iotag_flush(emlxs_hba_t *hba)
3745 3753 {
3746 3754 emlxs_port_t *port = &PPORT;
3747 3755 emlxs_buf_t *sbp;
3748 3756 IOCBQ *iocbq;
3749 3757 IOCB *iocb;
3750 3758 Q abort;
3751 3759 CHANNEL *cp;
3752 3760 uint32_t channelno;
3753 3761 uint32_t iotag;
3754 3762 uint32_t count;
3755 3763
3756 3764 count = 0;
3757 3765 for (channelno = 0; channelno < hba->chan_count; channelno++) {
3758 3766 cp = &hba->chan[channelno];
3759 3767
3760 3768 bzero((void *)&abort, sizeof (Q));
3761 3769
3762 3770 mutex_enter(&EMLXS_FCTAB_LOCK);
3763 3771
3764 3772 for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3765 3773 sbp = hba->fc_table[iotag];
3766 3774
3767 3775 /* Check if the slot is empty */
3768 3776 if (!sbp || (sbp == STALE_PACKET)) {
3769 3777 continue;
3770 3778 }
3771 3779
3772 3780 /* We are building an abort list per channel */
3773 3781 if (sbp->channel != cp) {
3774 3782 continue;
3775 3783 }
3776 3784
3777 3785 hba->fc_table[iotag] = STALE_PACKET;
3778 3786 hba->io_count--;
3779 3787
3780 3788 /* Check if IO is valid */
3781 3789 if (!(sbp->pkt_flags & PACKET_VALID) ||
3782 3790 (sbp->pkt_flags & (PACKET_ULP_OWNED|
3783 3791 PACKET_COMPLETED|PACKET_IN_COMPLETION))) {
3784 3792 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
3785 3793 "iotag_flush: Invalid IO found. iotag=%d",
3786 3794 iotag);
3787 3795
3788 3796 continue;
3789 3797 }
3790 3798
3791 3799 sbp->iotag = 0;
3792 3800
3793 3801 /* Set IOCB status */
3794 3802 iocbq = &sbp->iocbq;
3795 3803 iocb = &iocbq->iocb;
3796 3804
3797 3805 iocb->ULPSTATUS = IOSTAT_LOCAL_REJECT;
3798 3806 iocb->un.grsp.perr.statLocalError = IOERR_LINK_DOWN;
3799 3807 iocb->ULPLE = 1;
3800 3808 iocbq->next = NULL;
3801 3809
3802 3810 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3803 3811 if (sbp->xrip) {
3804 3812 EMLXS_MSGF(EMLXS_CONTEXT,
3805 3813 &emlxs_sli_debug_msg,
3806 3814 "iotag_flush: iotag=%d sbp=%p "
3807 3815 "xrip=%p state=%x flag=%x",
3808 3816 iotag, sbp, sbp->xrip,
3809 3817 sbp->xrip->state, sbp->xrip->flag);
3810 3818 } else {
3811 3819 EMLXS_MSGF(EMLXS_CONTEXT,
3812 3820 &emlxs_sli_debug_msg,
3813 3821 "iotag_flush: iotag=%d sbp=%p "
3814 3822 "xrip=NULL", iotag, sbp);
3815 3823 }
3816 3824
3817 3825 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 0);
3818 3826 } else {
3819 3827 /* Clean up the sbp */
3820 3828 mutex_enter(&sbp->mtx);
3821 3829
3822 3830 if (sbp->pkt_flags & PACKET_IN_TXQ) {
3823 3831 sbp->pkt_flags &= ~PACKET_IN_TXQ;
3824 3832 hba->channel_tx_count --;
3825 3833 }
3826 3834
3827 3835 if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
3828 3836 sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
3829 3837 }
3830 3838
3831 3839 if (sbp->bmp) {
3832 3840 emlxs_mem_put(hba, MEM_BPL,
3833 3841 (void *)sbp->bmp);
3834 3842 sbp->bmp = 0;
3835 3843 }
3836 3844
3837 3845 mutex_exit(&sbp->mtx);
3838 3846 }
3839 3847
3840 3848 /* At this point all nodes are assumed destroyed */
3841 3849 mutex_enter(&sbp->mtx);
3842 3850 sbp->node = 0;
3843 3851 mutex_exit(&sbp->mtx);
3844 3852
3845 3853 /* Add this iocb to our local abort Q */
3846 3854 if (abort.q_first) {
3847 3855 ((IOCBQ *)abort.q_last)->next = iocbq;
3848 3856 abort.q_last = (uint8_t *)iocbq;
3849 3857 abort.q_cnt++;
3850 3858 } else {
3851 3859 abort.q_first = (uint8_t *)iocbq;
3852 3860 abort.q_last = (uint8_t *)iocbq;
3853 3861 abort.q_cnt = 1;
3854 3862 }
3855 3863 }
3856 3864
3857 3865 mutex_exit(&EMLXS_FCTAB_LOCK);
3858 3866
3859 3867 /* Trigger deferred completion */
3860 3868 if (abort.q_first) {
3861 3869 mutex_enter(&cp->rsp_lock);
3862 3870 if (cp->rsp_head == NULL) {
3863 3871 cp->rsp_head = (IOCBQ *)abort.q_first;
3864 3872 cp->rsp_tail = (IOCBQ *)abort.q_last;
3865 3873 } else {
3866 3874 cp->rsp_tail->next = (IOCBQ *)abort.q_first;
3867 3875 cp->rsp_tail = (IOCBQ *)abort.q_last;
3868 3876 }
3869 3877 mutex_exit(&cp->rsp_lock);
3870 3878
3871 3879 emlxs_thread_trigger2(&cp->intr_thread,
3872 3880 emlxs_proc_channel, cp);
3873 3881
3874 3882 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
3875 3883 "iotag_flush: channel=%d count=%d",
3876 3884 channelno, abort.q_cnt);
3877 3885
3878 3886 count += abort.q_cnt;
3879 3887 }
3880 3888 }
3881 3889
3882 3890 return (count);
3883 3891
3884 3892 } /* emlxs_iotag_flush() */
3885 3893
3886 3894
3887 3895
3888 3896 /* Checks for IO's on all or a given channel for a given node */
3889 3897 extern uint32_t
3890 3898 emlxs_chipq_node_check(emlxs_port_t *port, CHANNEL *chan, NODELIST *ndlp)
3891 3899 {
3892 3900 emlxs_hba_t *hba = HBA;
3893 3901 emlxs_buf_t *sbp;
3894 3902 CHANNEL *cp;
3895 3903 uint32_t channelno;
3896 3904 uint32_t count;
3897 3905 uint32_t iotag;
3898 3906
3899 3907 count = 0;
3900 3908
3901 3909 for (channelno = 0; channelno < hba->chan_count; channelno++) {
3902 3910 cp = &hba->chan[channelno];
3903 3911
3904 3912 if (chan && cp != chan) {
3905 3913 continue;
3906 3914 }
3907 3915
3908 3916 mutex_enter(&EMLXS_FCTAB_LOCK);
3909 3917
3910 3918 for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3911 3919 sbp = hba->fc_table[iotag];
3912 3920
3913 3921 if (sbp && (sbp != STALE_PACKET) &&
3914 3922 (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3915 3923 (sbp->node == ndlp) &&
3916 3924 (sbp->channel == cp) &&
3917 3925 !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3918 3926 count++;
3919 3927 }
3920 3928
3921 3929 }
3922 3930 mutex_exit(&EMLXS_FCTAB_LOCK);
3923 3931
3924 3932 } /* for */
3925 3933
3926 3934 return (count);
3927 3935
3928 3936 } /* emlxs_chipq_node_check() */
3929 3937
3930 3938
3931 3939
3932 3940 /* Flush all IO's for a given node's lun (on any channel) */
3933 3941 extern uint32_t
3934 3942 emlxs_chipq_lun_flush(emlxs_port_t *port, NODELIST *ndlp,
3935 3943 uint32_t lun, emlxs_buf_t *fpkt)
3936 3944 {
3937 3945 emlxs_hba_t *hba = HBA;
3938 3946 emlxs_buf_t *sbp;
3939 3947 IOCBQ *iocbq;
3940 3948 IOCBQ *next;
3941 3949 Q abort;
3942 3950 uint32_t iotag;
3943 3951 uint8_t flag[MAX_CHANNEL];
3944 3952 uint32_t channelno;
3945 3953
3946 3954 if (lun == EMLXS_LUN_NONE) {
3947 3955 return (0);
3948 3956 }
3949 3957
3950 3958 bzero((void *)flag, sizeof (flag));
3951 3959 bzero((void *)&abort, sizeof (Q));
3952 3960
3953 3961 mutex_enter(&EMLXS_FCTAB_LOCK);
3954 3962 for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3955 3963 sbp = hba->fc_table[iotag];
3956 3964
3957 3965 if (sbp && (sbp != STALE_PACKET) &&
3958 3966 sbp->pkt_flags & PACKET_IN_CHIPQ &&
3959 3967 sbp->node == ndlp &&
3960 3968 sbp->lun == lun &&
3961 3969 !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3962 3970 emlxs_sbp_abort_add(port, sbp,
3963 3971 &abort, flag, fpkt);
3964 3972 }
3965 3973 }
3966 3974 mutex_exit(&EMLXS_FCTAB_LOCK);
3967 3975
3968 3976 /* Now put the iocb's on the tx queue */
3969 3977 iocbq = (IOCBQ *)abort.q_first;
3970 3978 while (iocbq) {
3971 3979 /* Save the next iocbq for now */
3972 3980 next = (IOCBQ *)iocbq->next;
3973 3981
3974 3982 /* Unlink this iocbq */
3975 3983 iocbq->next = NULL;
3976 3984
3977 3985 /* Send this iocbq */
3978 3986 emlxs_tx_put(iocbq, 1);
3979 3987
3980 3988 iocbq = next;
3981 3989 }
3982 3990
3983 3991 /* Now trigger channel service */
3984 3992 for (channelno = 0; channelno < hba->chan_count; channelno++) {
3985 3993 if (!flag[channelno]) {
3986 3994 continue;
3987 3995 }
3988 3996
3989 3997 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
3990 3998 }
3991 3999
3992 4000 return (abort.q_cnt);
3993 4001
3994 4002 } /* emlxs_chipq_lun_flush() */
3995 4003
3996 4004
3997 4005
3998 4006 /*
3999 4007 * Issue an ABORT_XRI_CN iocb command to abort an FCP command already issued.
4000 4008 * This must be called while holding the EMLXS_FCTAB_LOCK
4001 4009 */
4002 4010 extern IOCBQ *
4003 4011 emlxs_create_abort_xri_cn(emlxs_port_t *port, NODELIST *ndlp,
4004 4012 uint16_t iotag, CHANNEL *cp, uint8_t class, int32_t flag)
4005 4013 {
4006 4014 emlxs_hba_t *hba = HBA;
4007 4015 IOCBQ *iocbq;
4008 4016 IOCB *iocb;
4009 4017 emlxs_wqe_t *wqe;
4010 4018 emlxs_buf_t *sbp;
4011 4019 uint16_t abort_iotag;
4012 4020
4013 4021 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
4014 4022 return (NULL);
4015 4023 }
4016 4024
4017 4025 iocbq->channel = (void *)cp;
4018 4026 iocbq->port = (void *)port;
4019 4027 iocbq->node = (void *)ndlp;
4020 4028 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
4021 4029
4022 4030 /*
4023 4031 * set up an iotag using special Abort iotags
4024 4032 */
4025 4033 if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
4026 4034 hba->fc_oor_iotag = hba->max_iotag;
4027 4035 }
4028 4036 abort_iotag = hba->fc_oor_iotag++;
4029 4037
4030 4038
4031 4039 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4032 4040 wqe = &iocbq->wqe;
4033 4041 sbp = hba->fc_table[iotag];
4034 4042
4035 4043 /* Try to issue abort by XRI if possible */
4036 4044 if (sbp == NULL || sbp == STALE_PACKET || sbp->xrip == NULL) {
4037 4045 wqe->un.Abort.Criteria = ABORT_REQ_TAG;
4038 4046 wqe->AbortTag = iotag;
4039 4047 } else {
4040 4048 wqe->un.Abort.Criteria = ABORT_XRI_TAG;
4041 4049 wqe->AbortTag = sbp->xrip->XRI;
4042 4050 }
4043 4051 wqe->un.Abort.IA = 0;
4044 4052 wqe->RequestTag = abort_iotag;
4045 4053 wqe->Command = CMD_ABORT_XRI_CX;
4046 4054 wqe->Class = CLASS3;
4047 4055 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4048 4056 wqe->CmdType = WQE_TYPE_ABORT;
4049 4057 } else {
4050 4058 iocb = &iocbq->iocb;
4051 4059 iocb->ULPIOTAG = abort_iotag;
4052 4060 iocb->un.acxri.abortType = flag;
4053 4061 iocb->un.acxri.abortContextTag = ndlp->nlp_Rpi;
4054 4062 iocb->un.acxri.abortIoTag = iotag;
4055 4063 iocb->ULPLE = 1;
4056 4064 iocb->ULPCLASS = class;
4057 4065 iocb->ULPCOMMAND = CMD_ABORT_XRI_CN;
4058 4066 iocb->ULPOWNER = OWN_CHIP;
4059 4067 }
4060 4068
4061 4069 return (iocbq);
4062 4070
4063 4071 } /* emlxs_create_abort_xri_cn() */
4064 4072
4065 4073
4066 4074 /* This must be called while holding the EMLXS_FCTAB_LOCK */
4067 4075 extern IOCBQ *
4068 4076 emlxs_create_abort_xri_cx(emlxs_port_t *port, NODELIST *ndlp, uint16_t xid,
4069 4077 CHANNEL *cp, uint8_t class, int32_t flag)
4070 4078 {
4071 4079 emlxs_hba_t *hba = HBA;
4072 4080 IOCBQ *iocbq;
4073 4081 IOCB *iocb;
4074 4082 emlxs_wqe_t *wqe;
4075 4083 uint16_t abort_iotag;
4076 4084
4077 4085 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
4078 4086 return (NULL);
4079 4087 }
4080 4088
4081 4089 iocbq->channel = (void *)cp;
4082 4090 iocbq->port = (void *)port;
4083 4091 iocbq->node = (void *)ndlp;
4084 4092 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
4085 4093
4086 4094 /*
4087 4095 * set up an iotag using special Abort iotags
4088 4096 */
4089 4097 if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
4090 4098 hba->fc_oor_iotag = hba->max_iotag;
4091 4099 }
4092 4100 abort_iotag = hba->fc_oor_iotag++;
4093 4101
4094 4102 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4095 4103 wqe = &iocbq->wqe;
4096 4104 wqe->un.Abort.Criteria = ABORT_XRI_TAG;
4097 4105 wqe->un.Abort.IA = 0;
4098 4106 wqe->RequestTag = abort_iotag;
4099 4107 wqe->AbortTag = xid;
4100 4108 wqe->Command = CMD_ABORT_XRI_CX;
4101 4109 wqe->Class = CLASS3;
4102 4110 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4103 4111 wqe->CmdType = WQE_TYPE_ABORT;
4104 4112 } else {
4105 4113 iocb = &iocbq->iocb;
4106 4114 iocb->ULPCONTEXT = xid;
4107 4115 iocb->ULPIOTAG = abort_iotag;
4108 4116 iocb->un.acxri.abortType = flag;
4109 4117 iocb->ULPLE = 1;
4110 4118 iocb->ULPCLASS = class;
4111 4119 iocb->ULPCOMMAND = CMD_ABORT_XRI_CX;
4112 4120 iocb->ULPOWNER = OWN_CHIP;
4113 4121 }
4114 4122
4115 4123 return (iocbq);
4116 4124
4117 4125 } /* emlxs_create_abort_xri_cx() */
4118 4126
4119 4127
4120 4128
4121 4129 /* This must be called while holding the EMLXS_FCTAB_LOCK */
4122 4130 extern IOCBQ *
4123 4131 emlxs_create_close_xri_cn(emlxs_port_t *port, NODELIST *ndlp,
4124 4132 uint16_t iotag, CHANNEL *cp)
4125 4133 {
4126 4134 emlxs_hba_t *hba = HBA;
4127 4135 IOCBQ *iocbq;
4128 4136 IOCB *iocb;
4129 4137 emlxs_wqe_t *wqe;
4130 4138 emlxs_buf_t *sbp;
4131 4139 uint16_t abort_iotag;
4132 4140
4133 4141 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
4134 4142 return (NULL);
4135 4143 }
4136 4144
4137 4145 iocbq->channel = (void *)cp;
4138 4146 iocbq->port = (void *)port;
4139 4147 iocbq->node = (void *)ndlp;
4140 4148 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
4141 4149
4142 4150 /*
4143 4151 * set up an iotag using special Abort iotags
4144 4152 */
4145 4153 if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
4146 4154 hba->fc_oor_iotag = hba->max_iotag;
4147 4155 }
4148 4156 abort_iotag = hba->fc_oor_iotag++;
4149 4157
4150 4158 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4151 4159 wqe = &iocbq->wqe;
4152 4160 sbp = hba->fc_table[iotag];
4153 4161
4154 4162 /* Try to issue close by XRI if possible */
4155 4163 if (sbp == NULL || sbp == STALE_PACKET || sbp->xrip == NULL) {
4156 4164 wqe->un.Abort.Criteria = ABORT_REQ_TAG;
4157 4165 wqe->AbortTag = iotag;
4158 4166 } else {
4159 4167 wqe->un.Abort.Criteria = ABORT_XRI_TAG;
4160 4168 wqe->AbortTag = sbp->xrip->XRI;
4161 4169 }
4162 4170 wqe->un.Abort.IA = 1;
4163 4171 wqe->RequestTag = abort_iotag;
4164 4172 wqe->Command = CMD_ABORT_XRI_CX;
4165 4173 wqe->Class = CLASS3;
4166 4174 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4167 4175 wqe->CmdType = WQE_TYPE_ABORT;
4168 4176 } else {
4169 4177 iocb = &iocbq->iocb;
4170 4178 iocb->ULPIOTAG = abort_iotag;
4171 4179 iocb->un.acxri.abortType = 0;
4172 4180 iocb->un.acxri.abortContextTag = ndlp->nlp_Rpi;
4173 4181 iocb->un.acxri.abortIoTag = iotag;
4174 4182 iocb->ULPLE = 1;
4175 4183 iocb->ULPCLASS = 0;
4176 4184 iocb->ULPCOMMAND = CMD_CLOSE_XRI_CN;
4177 4185 iocb->ULPOWNER = OWN_CHIP;
4178 4186 }
4179 4187
4180 4188 return (iocbq);
4181 4189
4182 4190 } /* emlxs_create_close_xri_cn() */
4183 4191
4184 4192
4185 4193 /* This must be called while holding the EMLXS_FCTAB_LOCK */
4186 4194 extern IOCBQ *
4187 4195 emlxs_create_close_xri_cx(emlxs_port_t *port, NODELIST *ndlp, uint16_t xid,
4188 4196 CHANNEL *cp)
4189 4197 {
4190 4198 emlxs_hba_t *hba = HBA;
4191 4199 IOCBQ *iocbq;
4192 4200 IOCB *iocb;
4193 4201 emlxs_wqe_t *wqe;
4194 4202 uint16_t abort_iotag;
4195 4203
4196 4204 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
4197 4205 return (NULL);
4198 4206 }
4199 4207
4200 4208 iocbq->channel = (void *)cp;
4201 4209 iocbq->port = (void *)port;
4202 4210 iocbq->node = (void *)ndlp;
4203 4211 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
4204 4212
4205 4213 /*
4206 4214 * set up an iotag using special Abort iotags
4207 4215 */
4208 4216 if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
4209 4217 hba->fc_oor_iotag = hba->max_iotag;
4210 4218 }
4211 4219 abort_iotag = hba->fc_oor_iotag++;
4212 4220
4213 4221 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4214 4222 wqe = &iocbq->wqe;
4215 4223 wqe->un.Abort.Criteria = ABORT_XRI_TAG;
4216 4224 wqe->un.Abort.IA = 1;
4217 4225 wqe->RequestTag = abort_iotag;
4218 4226 wqe->AbortTag = xid;
4219 4227 wqe->Command = CMD_ABORT_XRI_CX;
4220 4228 wqe->Class = CLASS3;
4221 4229 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4222 4230 wqe->CmdType = WQE_TYPE_ABORT;
4223 4231 } else {
4224 4232 iocb = &iocbq->iocb;
4225 4233 iocb->ULPCONTEXT = xid;
4226 4234 iocb->ULPIOTAG = abort_iotag;
4227 4235 iocb->ULPLE = 1;
4228 4236 iocb->ULPCLASS = 0;
4229 4237 iocb->ULPCOMMAND = CMD_CLOSE_XRI_CX;
4230 4238 iocb->ULPOWNER = OWN_CHIP;
4231 4239 }
4232 4240
4233 4241 return (iocbq);
4234 4242
4235 4243 } /* emlxs_create_close_xri_cx() */
4236 4244
4237 4245
4238 4246 void
4239 4247 emlxs_close_els_exchange(emlxs_hba_t *hba, emlxs_port_t *port, uint32_t rxid)
4240 4248 {
4241 4249 CHANNEL *cp;
4242 4250 IOCBQ *iocbq;
4243 4251 IOCB *iocb;
4244 4252
4245 4253 if (rxid == 0 || rxid == 0xFFFF) {
4246 4254 return;
4247 4255 }
4248 4256
4249 4257 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4250 4258 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4251 4259 "Closing ELS exchange: xid=%x", rxid);
4252 4260
4253 4261 if (emlxs_sli4_unreserve_xri(port, rxid, 1) == 0) {
4254 4262 return;
4255 4263 }
4256 4264 }
4257 4265
4258 4266 cp = &hba->chan[hba->channel_els];
4259 4267
4260 4268 mutex_enter(&EMLXS_FCTAB_LOCK);
4261 4269
4262 4270 /* Create the abort IOCB */
4263 4271 iocbq = emlxs_create_close_xri_cx(port, NULL, rxid, cp);
4264 4272
4265 4273 mutex_exit(&EMLXS_FCTAB_LOCK);
4266 4274
4267 4275 if (iocbq) {
4268 4276 iocb = &iocbq->iocb;
4269 4277 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4270 4278 "Closing ELS exchange: xid=%x iotag=%d", rxid,
4271 4279 iocb->ULPIOTAG);
4272 4280
4273 4281 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
4274 4282 }
4275 4283
4276 4284 } /* emlxs_close_els_exchange() */
4277 4285
4278 4286
4279 4287 void
4280 4288 emlxs_abort_els_exchange(emlxs_hba_t *hba, emlxs_port_t *port, uint32_t rxid)
4281 4289 {
4282 4290 CHANNEL *cp;
4283 4291 IOCBQ *iocbq;
4284 4292 IOCB *iocb;
4285 4293
4286 4294 if (rxid == 0 || rxid == 0xFFFF) {
4287 4295 return;
4288 4296 }
4289 4297
4290 4298 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4291 4299
4292 4300 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4293 4301 "Aborting ELS exchange: xid=%x", rxid);
4294 4302
4295 4303 if (emlxs_sli4_unreserve_xri(port, rxid, 1) == 0) {
4296 4304 /* We have no way to abort unsolicited exchanges */
4297 4305 /* that we have not responded to at this time */
4298 4306 /* So we will return for now */
4299 4307 return;
4300 4308 }
4301 4309 }
4302 4310
4303 4311 cp = &hba->chan[hba->channel_els];
4304 4312
4305 4313 mutex_enter(&EMLXS_FCTAB_LOCK);
4306 4314
4307 4315 /* Create the abort IOCB */
4308 4316 if (hba->state >= FC_LINK_UP) {
4309 4317 iocbq = emlxs_create_abort_xri_cx(port, NULL, rxid, cp,
4310 4318 CLASS3, ABORT_TYPE_ABTS);
4311 4319 } else {
4312 4320 iocbq = emlxs_create_close_xri_cx(port, NULL, rxid, cp);
4313 4321 }
4314 4322
4315 4323 mutex_exit(&EMLXS_FCTAB_LOCK);
4316 4324
4317 4325 if (iocbq) {
4318 4326 iocb = &iocbq->iocb;
4319 4327 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4320 4328 "Aborting ELS exchange: xid=%x iotag=%d", rxid,
4321 4329 iocb->ULPIOTAG);
4322 4330
4323 4331 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
4324 4332 }
4325 4333
4326 4334 } /* emlxs_abort_els_exchange() */
4327 4335
4328 4336
4329 4337 void
4330 4338 emlxs_abort_ct_exchange(emlxs_hba_t *hba, emlxs_port_t *port, uint32_t rxid)
4331 4339 {
4332 4340 CHANNEL *cp;
4333 4341 IOCBQ *iocbq;
4334 4342 IOCB *iocb;
4335 4343
4336 4344 if (rxid == 0 || rxid == 0xFFFF) {
4337 4345 return;
4338 4346 }
4339 4347
4340 4348 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4341 4349 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_ct_msg,
4342 4350 "Aborting CT exchange: xid=%x", rxid);
4343 4351
4344 4352 if (emlxs_sli4_unreserve_xri(port, rxid, 1) == 0) {
4345 4353 /* We have no way to abort unsolicited exchanges */
4346 4354 /* that we have not responded to at this time */
4347 4355 /* So we will return for now */
4348 4356 return;
4349 4357 }
4350 4358 }
4351 4359
4352 4360 cp = &hba->chan[hba->channel_ct];
4353 4361
4354 4362 mutex_enter(&EMLXS_FCTAB_LOCK);
4355 4363
4356 4364 /* Create the abort IOCB */
4357 4365 if (hba->state >= FC_LINK_UP) {
4358 4366 iocbq = emlxs_create_abort_xri_cx(port, NULL, rxid, cp,
4359 4367 CLASS3, ABORT_TYPE_ABTS);
4360 4368 } else {
4361 4369 iocbq = emlxs_create_close_xri_cx(port, NULL, rxid, cp);
4362 4370 }
4363 4371
4364 4372 mutex_exit(&EMLXS_FCTAB_LOCK);
4365 4373
4366 4374 if (iocbq) {
4367 4375 iocb = &iocbq->iocb;
4368 4376 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4369 4377 "Aborting CT exchange: xid=%x iotag=%d", rxid,
4370 4378 iocb->ULPIOTAG);
4371 4379
4372 4380 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
4373 4381 }
4374 4382
4375 4383 } /* emlxs_abort_ct_exchange() */
4376 4384
4377 4385
4378 4386 /* This must be called while holding the EMLXS_FCTAB_LOCK */
4379 4387 static void
4380 4388 emlxs_sbp_abort_add(emlxs_port_t *port, emlxs_buf_t *sbp, Q *abort,
4381 4389 uint8_t *flag, emlxs_buf_t *fpkt)
4382 4390 {
4383 4391 emlxs_hba_t *hba = HBA;
4384 4392 IOCBQ *iocbq;
4385 4393 CHANNEL *cp;
4386 4394 NODELIST *ndlp;
4387 4395
4388 4396 cp = (CHANNEL *)sbp->channel;
4389 4397 ndlp = sbp->node;
4390 4398
4391 4399 /* Create the close XRI IOCB */
4392 4400 if (hba->state >= FC_LINK_UP) {
4393 4401 iocbq = emlxs_create_abort_xri_cn(port, ndlp, sbp->iotag, cp,
4394 4402 CLASS3, ABORT_TYPE_ABTS);
4395 4403 } else {
4396 4404 iocbq = emlxs_create_close_xri_cn(port, ndlp, sbp->iotag, cp);
4397 4405 }
4398 4406 /*
4399 4407 * Add this iocb to our local abort Q
4400 4408 * This way we don't hold the CHIPQ lock too long
4401 4409 */
4402 4410 if (iocbq) {
4403 4411 if (abort->q_first) {
4404 4412 ((IOCBQ *)abort->q_last)->next = iocbq;
4405 4413 abort->q_last = (uint8_t *)iocbq;
4406 4414 abort->q_cnt++;
4407 4415 } else {
4408 4416 abort->q_first = (uint8_t *)iocbq;
4409 4417 abort->q_last = (uint8_t *)iocbq;
4410 4418 abort->q_cnt = 1;
4411 4419 }
4412 4420 iocbq->next = NULL;
4413 4421 }
4414 4422
4415 4423 /* set the flags */
4416 4424 mutex_enter(&sbp->mtx);
4417 4425
4418 4426 sbp->pkt_flags |= (PACKET_IN_FLUSH | PACKET_XRI_CLOSED);
4419 4427
4420 4428 sbp->ticks = hba->timer_tics + 10;
4421 4429 sbp->abort_attempts++;
4422 4430
4423 4431 flag[cp->channelno] = 1;
4424 4432
4425 4433 /*
4426 4434 * If the fpkt is already set, then we will leave it alone
4427 4435 * This ensures that this pkt is only accounted for on one
4428 4436 * fpkt->flush_count
4429 4437 */
4430 4438 if (!sbp->fpkt && fpkt) {
4431 4439 mutex_enter(&fpkt->mtx);
4432 4440 sbp->fpkt = fpkt;
4433 4441 fpkt->flush_count++;
4434 4442 mutex_exit(&fpkt->mtx);
4435 4443 }
4436 4444
4437 4445 mutex_exit(&sbp->mtx);
4438 4446
4439 4447 return;
4440 4448
4441 4449 } /* emlxs_sbp_abort_add() */
|
↓ open down ↓ |
2807 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX