Print this page
NEX-8705 Drivers for ATTO Celerity FC-162E Gen 5 and Celerity FC-162P Gen 6 16GB FC cards support
Reviewed by: Dan Fields <dan.fields@nexenta.com>
Reviewed by: Rick McNeal <rick.mcneal@nexenta.com>
NEX-1878 update emlxs from source provided by Emulex
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_event.c
+++ new/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_event.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at
9 9 * http://www.opensource.org/licenses/cddl1.txt.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2004-2012 Emulex. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 #define DEF_EVENT_STRUCT /* Needed for emlxs_events.h in emlxs_event.h */
28 28 #include <emlxs.h>
29 29
30 30
31 31 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
32 32 EMLXS_MSG_DEF(EMLXS_EVENT_C);
33 33
34 34
35 35 static uint32_t emlxs_event_check(emlxs_port_t *port, emlxs_event_t *evt);
36 36 static void emlxs_event_destroy(emlxs_hba_t *hba, emlxs_event_entry_t *entry);
37 37
38 38 extern void
39 39 emlxs_null_func() {}
40 40
41 41
42 42 static uint32_t
43 43 emlxs_event_check(emlxs_port_t *port, emlxs_event_t *evt)
44 44 {
45 45 emlxs_hba_t *hba = HBA;
46 46
47 47 /* Check if the event is being requested */
48 48 if ((hba->event_mask & evt->mask)) {
49 49 return (1);
50 50 }
51 51
52 52 #ifdef SAN_DIAG_SUPPORT
53 53 if ((port->sd_event_mask & evt->mask)) {
54 54 return (1);
55 55 }
56 56 #endif /* SAN_DIAG_SUPPORT */
57 57
58 58 return (0);
59 59
60 60 } /* emlxs_event_check() */
61 61
62 62
63 63 extern uint32_t
64 64 emlxs_event_queue_create(emlxs_hba_t *hba)
65 65 {
66 66 emlxs_event_queue_t *eventq = &EVENTQ;
67 67 ddi_iblock_cookie_t iblock;
68 68
69 69 /* Clear the queue */
70 70 bzero(eventq, sizeof (emlxs_event_queue_t));
71 71
72 72 cv_init(&eventq->lock_cv, NULL, CV_DRIVER, NULL);
73 73
74 74 if (!(hba->intr_flags & EMLXS_MSI_ENABLED)) {
75 75 /* Get the current interrupt block cookie */
76 76 (void) ddi_get_iblock_cookie(hba->dip, (uint_t)EMLXS_INUMBER,
77 77 &iblock);
78 78
79 79 /* Create the mutex lock */
80 80 mutex_init(&eventq->lock, NULL, MUTEX_DRIVER, (void *)iblock);
81 81 }
82 82 #ifdef MSI_SUPPORT
83 83 else {
84 84 /* Create event mutex lock */
85 85 mutex_init(&eventq->lock, NULL, MUTEX_DRIVER,
86 86 DDI_INTR_PRI(hba->intr_arg));
87 87 }
88 88 #endif
89 89
90 90 return (1);
91 91
92 92 } /* emlxs_event_queue_create() */
93 93
94 94
95 95 extern void
96 96 emlxs_event_queue_destroy(emlxs_hba_t *hba)
97 97 {
98 98 emlxs_port_t *vport;
99 99 emlxs_event_queue_t *eventq = &EVENTQ;
100 100 uint32_t i;
101 101 uint32_t wakeup = 0;
102 102
103 103 mutex_enter(&eventq->lock);
104 104
105 105 /* Clear all event masks and broadcast a wakeup */
106 106 /* to clear any sleeping threads */
107 107 if (hba->event_mask) {
108 108 hba->event_mask = 0;
109 109 hba->event_timer = 0;
110 110 wakeup = 1;
111 111 }
112 112
113 113 for (i = 0; i < MAX_VPORTS; i++) {
114 114 vport = &VPORT(i);
115 115
116 116 if (vport->sd_event_mask) {
117 117 vport->sd_event_mask = 0;
118 118 wakeup = 1;
119 119 }
120 120 }
121 121
122 122 if (wakeup) {
123 123 cv_broadcast(&eventq->lock_cv);
124 124
125 125 mutex_exit(&eventq->lock);
126 126 BUSYWAIT_MS(10);
127 127 mutex_enter(&eventq->lock);
128 128 }
129 129
130 130 /* Destroy the remaining events */
131 131 while (eventq->first) {
132 132 emlxs_event_destroy(hba, eventq->first);
133 133 }
134 134
135 135 mutex_exit(&eventq->lock);
136 136
137 137 /* Destroy the queue lock */
138 138 mutex_destroy(&eventq->lock);
139 139 cv_destroy(&eventq->lock_cv);
140 140
141 141 /* Clear the queue */
142 142 bzero(eventq, sizeof (emlxs_event_queue_t));
143 143
144 144 return;
145 145
146 146 } /* emlxs_event_queue_destroy() */
147 147
148 148
149 149 /* Event queue lock must be held */
150 150 static void
151 151 emlxs_event_destroy(emlxs_hba_t *hba, emlxs_event_entry_t *entry)
152 152 {
153 153 emlxs_event_queue_t *eventq = &EVENTQ;
154 154 emlxs_port_t *port;
155 155 uint32_t missed = 0;
156 156
157 157 port = (emlxs_port_t *)entry->port;
158 158
159 159 eventq->count--;
160 160 if (eventq->count == 0) {
161 161 eventq->first = NULL;
162 162 eventq->last = NULL;
163 163 } else {
164 164 if (entry->prev) {
165 165 entry->prev->next = entry->next;
166 166 }
167 167 if (entry->next) {
168 168 entry->next->prev = entry->prev;
169 169 }
170 170 if (eventq->first == entry) {
171 171 eventq->first = entry->next;
172 172 }
173 173 if (eventq->last == entry) {
174 174 eventq->last = entry->prev;
175 175 }
176 176 }
177 177
178 178 entry->prev = NULL;
179 179 entry->next = NULL;
180 180
181 181 if ((entry->evt->mask == EVT_LINK) ||
182 182 (entry->evt->mask == EVT_RSCN)) {
183 183 if (!(entry->flag & EMLXS_DFC_EVENT_DONE)) {
184 184 hba->hba_event.missed++;
185 185 missed = 1;
186 186 }
187 187 }
188 188
189 189 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_dequeued_msg,
190 190 "%s[%d]: flag=%x missed=%d cnt=%d",
191 191 entry->evt->label, entry->id, entry->flag, missed, eventq->count);
192 192
193 193 /* Call notification handler */
194 194 if (entry->evt->destroy != emlxs_null_func) {
195 195 entry->evt->destroy(entry);
196 196 }
197 197
198 198 /* Free context buffer */
199 199 if (entry->bp && entry->size) {
200 200 kmem_free(entry->bp, entry->size);
201 201 }
202 202
203 203 /* Free entry buffer */
204 204 kmem_free(entry, sizeof (emlxs_event_entry_t));
205 205
206 206 return;
207 207
208 208 } /* emlxs_event_destroy() */
209 209
210 210
211 211 extern void
212 212 emlxs_event(emlxs_port_t *port, emlxs_event_t *evt, void *bp, uint32_t size)
213 213 {
214 214 emlxs_hba_t *hba = HBA;
215 215 emlxs_event_queue_t *eventq = &EVENTQ;
216 216 emlxs_event_entry_t *entry;
217 217 uint32_t i;
218 218 uint32_t mask;
219 219
220 220 if (emlxs_event_check(port, evt) == 0) {
221 221 goto failed;
222 222 }
223 223
224 224 /* Create event entry */
225 225 if (!(entry = (emlxs_event_entry_t *)kmem_alloc(
226 226 sizeof (emlxs_event_entry_t), KM_NOSLEEP))) {
227 227 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
228 228 "%s: Unable to allocate event entry.", evt->label);
229 229
230 230 goto failed;
231 231 }
232 232
233 233 /* Initialize */
234 234 bzero(entry, sizeof (emlxs_event_entry_t));
235 235
236 236 entry->evt = evt;
237 237 entry->port = (void *)port;
238 238 entry->bp = bp;
239 239 entry->size = size;
240 240
241 241 mutex_enter(&eventq->lock);
242 242
243 243 /* Set the event timer */
244 244 entry->timestamp = hba->timer_tics;
245 245 if (evt->timeout) {
246 246 entry->timer = entry->timestamp + evt->timeout;
247 247 }
248 248
249 249 /* Eventq id starts with 1 */
250 250 if (eventq->next_id == 0) {
251 251 eventq->next_id = 1;
252 252 }
253 253
254 254 /* Set the event id */
255 255 entry->id = eventq->next_id++;
256 256
257 257 /* Set last event table */
258 258 mask = evt->mask;
259 259 for (i = 0; i < 32; i++) {
260 260 if (mask & 0x01) {
261 261 eventq->last_id[i] = entry->id;
262 262 }
263 263 mask >>= 1;
264 264 }
265 265
266 266 /* Put event on bottom of queue */
267 267 entry->next = NULL;
268 268 if (eventq->count == 0) {
269 269 entry->prev = NULL;
270 270 eventq->first = entry;
271 271 eventq->last = entry;
272 272 } else {
273 273 entry->prev = eventq->last;
274 274 entry->prev->next = entry;
275 275 eventq->last = entry;
276 276 }
277 277 eventq->count++;
278 278
279 279 if ((entry->evt->mask == EVT_LINK) ||
280 280 (entry->evt->mask == EVT_RSCN)) {
281 281 hba->hba_event.new++;
282 282 }
283 283
284 284 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_queued_msg,
285 285 "%s[%d]: bp=%p size=%d cnt=%d", entry->evt->label,
286 286 entry->id, bp, size, eventq->count);
287 287
288 288 /* Broadcast the event */
289 289 cv_broadcast(&eventq->lock_cv);
290 290
291 291 mutex_exit(&eventq->lock);
292 292
293 293 return;
294 294
295 295 failed:
296 296
297 297 if (bp && size) {
298 298 kmem_free(bp, size);
299 299 }
300 300
301 301 return;
302 302
303 303 } /* emlxs_event() */
304 304
305 305
306 306 extern void
307 307 emlxs_timer_check_events(emlxs_hba_t *hba)
308 308 {
309 309 emlxs_config_t *cfg = &CFG;
310 310 emlxs_event_queue_t *eventq = &EVENTQ;
311 311 emlxs_event_entry_t *entry;
312 312 emlxs_event_entry_t *next;
313 313
314 314 if (!cfg[CFG_TIMEOUT_ENABLE].current) {
315 315 return;
316 316 }
317 317
318 318 if ((hba->event_timer > hba->timer_tics)) {
319 319 return;
320 320 }
321 321
322 322 if (eventq->count) {
323 323 mutex_enter(&eventq->lock);
324 324
325 325 entry = eventq->first;
326 326 while (entry) {
327 327 if ((!entry->timer) ||
328 328 (entry->timer > hba->timer_tics)) {
329 329 entry = entry->next;
330 330 continue;
331 331 }
332 332
333 333 /* Event timed out, destroy it */
334 334 next = entry->next;
335 335 emlxs_event_destroy(hba, entry);
336 336 entry = next;
337 337 }
338 338
339 339 mutex_exit(&eventq->lock);
340 340 }
341 341
342 342 /* Set next event timer check */
343 343 hba->event_timer = hba->timer_tics + EMLXS_EVENT_PERIOD;
344 344
345 345 return;
346 346
347 347 } /* emlxs_timer_check_events() */
348 348
349 349
350 350 extern void
351 351 emlxs_log_rscn_event(emlxs_port_t *port, uint8_t *payload, uint32_t size)
352 352 {
353 353 uint8_t *bp;
354 354 uint32_t *ptr;
355 355
356 356 /* Check if the event is being requested */
357 357 if (emlxs_event_check(port, &emlxs_rscn_event) == 0) {
358 358 return;
359 359 }
360 360
361 361 if (size > MAX_RSCN_PAYLOAD) {
362 362 size = MAX_RSCN_PAYLOAD;
363 363 }
364 364
365 365 size += sizeof (uint32_t);
366 366
367 367 /* Save a copy of the payload for the event log */
368 368 if (!(bp = (uint8_t *)kmem_alloc(size, KM_NOSLEEP))) {
369 369 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
370 370 "%s: Unable to allocate buffer.", emlxs_rscn_event.label);
371 371
372 372 return;
373 373 }
374 374
375 375 /*
376 376 * Buffer Format:
377 377 * word[0] = DID of the RSCN
378 378 * word[1] = RSCN Payload
379 379 */
380 380 ptr = (uint32_t *)bp;
381 381 *ptr++ = port->did;
382 382 bcopy(payload, (char *)ptr, (size - sizeof (uint32_t)));
383 383
384 384 emlxs_event(port, &emlxs_rscn_event, bp, size);
385 385
386 386 return;
387 387
388 388 } /* emlxs_log_rscn_event() */
389 389
390 390
391 391 extern void
392 392 emlxs_log_vportrscn_event(emlxs_port_t *port, uint8_t *payload, uint32_t size)
393 393 {
394 394 uint8_t *bp;
395 395 uint8_t *ptr;
396 396
397 397 /* Check if the event is being requested */
398 398 if (emlxs_event_check(port, &emlxs_vportrscn_event) == 0) {
399 399 return;
400 400 }
401 401
402 402 if (size > MAX_RSCN_PAYLOAD) {
403 403 size = MAX_RSCN_PAYLOAD;
404 404 }
405 405
406 406 size += sizeof (NAME_TYPE);
407 407
408 408 /* Save a copy of the payload for the event log */
409 409 if (!(bp = (uint8_t *)kmem_alloc(size, KM_NOSLEEP))) {
410 410 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
411 411 "%s: Unable to allocate buffer.",
412 412 emlxs_vportrscn_event.label);
413 413
414 414 return;
415 415 }
416 416
417 417 /*
418 418 * Buffer Format:
419 419 * word[0 - 4] = WWPN of the RSCN
420 420 * word[5] = RSCN Payload
421 421 */
422 422 ptr = bp;
423 423 bcopy(&port->wwpn, ptr, sizeof (NAME_TYPE));
424 424 ptr += sizeof (NAME_TYPE);
425 425 bcopy(payload, ptr, (size - sizeof (NAME_TYPE)));
426 426
427 427 emlxs_event(port, &emlxs_vportrscn_event, bp, size);
428 428
429 429 return;
430 430
431 431 } /* emlxs_log_vportrscn_event() */
432 432
433 433
434 434 extern uint32_t
435 435 emlxs_flush_ct_event(emlxs_port_t *port, uint32_t rxid)
436 436 {
437 437 emlxs_hba_t *hba = HBA;
438 438 emlxs_event_queue_t *eventq = &EVENTQ;
439 439 emlxs_event_entry_t *entry;
440 440 uint32_t *ptr;
441 441 uint32_t found = 0;
442 442
443 443 mutex_enter(&eventq->lock);
444 444
445 445 for (entry = eventq->first; entry != NULL; entry = entry->next) {
446 446 if ((entry->port != port) ||
447 447 (entry->evt != &emlxs_ct_event)) {
448 448 continue;
449 449 }
450 450
451 451 ptr = (uint32_t *)entry->bp;
452 452 if (rxid == *ptr) {
453 453 /* This will prevent a CT exchange abort */
454 454 /* in emlxs_ct_event_destroy() */
455 455 entry->flag |= EMLXS_DFC_EVENT_DONE;
456 456
457 457 emlxs_event_destroy(hba, entry);
458 458 found = 1;
459 459 break;
460 460 }
461 461 }
462 462
463 463 mutex_exit(&eventq->lock);
464 464
465 465 return (found);
466 466
467 467 } /* emlxs_flush_ct_event() */
468 468
469 469
470 470 extern uint32_t
471 471 emlxs_log_ct_event(emlxs_port_t *port, uint8_t *payload, uint32_t size,
472 472 uint32_t rxid)
473 473 {
474 474 uint8_t *bp;
475 475 uint32_t *ptr;
476 476
477 477 /* Check if the event is being requested */
478 478 if (emlxs_event_check(port, &emlxs_ct_event) == 0) {
479 479 return (1);
480 480 }
481 481
482 482 if (size > MAX_CT_PAYLOAD) {
483 483 size = MAX_CT_PAYLOAD;
484 484 }
485 485
486 486 size += sizeof (uint32_t);
487 487
488 488 /* Save a copy of the payload for the event log */
489 489 if (!(bp = (uint8_t *)kmem_alloc(size, KM_NOSLEEP))) {
490 490 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
491 491 "%s: Unable to allocate buffer.", emlxs_ct_event.label);
492 492
493 493 return (1);
494 494 }
495 495
496 496 /*
497 497 * Buffer Format:
498 498 * word[0] = RXID tag for outgoing reply to this CT request
499 499 * word[1] = CT Payload
500 500 */
501 501 ptr = (uint32_t *)bp;
502 502 *ptr++ = rxid;
503 503 bcopy(payload, (char *)ptr, (size - sizeof (uint32_t)));
504 504
505 505 emlxs_event(port, &emlxs_ct_event, bp, size);
506 506
507 507 return (0);
508 508
509 509 } /* emlxs_log_ct_event() */
510 510
511 511
512 512 extern void
513 513 emlxs_ct_event_destroy(emlxs_event_entry_t *entry)
514 514 {
515 515 emlxs_port_t *port = (emlxs_port_t *)entry->port;
516 516 emlxs_hba_t *hba = HBA;
517 517 uint32_t rxid;
518 518
519 519 if (!(entry->flag & EMLXS_DFC_EVENT_DONE)) {
520 520
521 521 rxid = *(uint32_t *)entry->bp;
522 522
523 523 /* Abort exchange */
524 524 emlxs_thread_spawn(hba, emlxs_abort_ct_exchange,
525 525 entry->port, (void *)(unsigned long)rxid);
526 526 }
527 527
528 528 return;
529 529
530 530 } /* emlxs_ct_event_destroy() */
531 531
532 532
533 533 extern void
534 534 emlxs_log_link_event(emlxs_port_t *port)
535 535 {
536 536 emlxs_hba_t *hba = HBA;
537 537 uint8_t *bp;
538 538 dfc_linkinfo_t *linkinfo;
539 539 uint8_t *byte;
540 540 uint8_t *linkspeed;
541 541 uint8_t *liptype;
542 542 uint8_t *resv1;
543 543 uint8_t *resv2;
544 544 uint32_t size;
545 545
546 546 /* Check if the event is being requested */
547 547 if (emlxs_event_check(port, &emlxs_link_event) == 0) {
548 548 return;
549 549 }
550 550
551 551 size = sizeof (dfc_linkinfo_t) + sizeof (uint32_t);
552 552
553 553 /* Save a copy of the buffer for the event log */
554 554 if (!(bp = (uint8_t *)kmem_alloc(size, KM_NOSLEEP))) {
555 555 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
556 556 "%s: Unable to allocate buffer.", emlxs_link_event.label);
557 557
558 558 return;
559 559 }
560 560
561 561 /*
562 562 * Buffer Format:
563 563 * word[0] = Linkspeed:8
564 564 * word[0] = LIP_type:8
565 565 * word[0] = resv1:8
566 566 * word[0] = resv2:8
567 567 * word[1] = dfc_linkinfo_t data
568 568 */
569 569 byte = (uint8_t *)bp;
570 570 linkspeed = &byte[0];
571 571 liptype = &byte[1];
572 572 resv1 = &byte[2];
573 573 resv2 = &byte[3];
574 574 linkinfo = (dfc_linkinfo_t *)&byte[4];
575 575
576 576 *resv1 = 0;
577 577 *resv2 = 0;
578 578
579 579 if (hba->state <= FC_LINK_DOWN) {
580 580 *linkspeed = 0;
581 581 *liptype = 0;
582 582 } else {
583 583 /* Set linkspeed */
|
↓ open down ↓ |
583 lines elided |
↑ open up ↑ |
584 584 if (hba->linkspeed == LA_2GHZ_LINK) {
585 585 *linkspeed = HBA_PORTSPEED_2GBIT;
586 586 } else if (hba->linkspeed == LA_4GHZ_LINK) {
587 587 *linkspeed = HBA_PORTSPEED_4GBIT;
588 588 } else if (hba->linkspeed == LA_8GHZ_LINK) {
589 589 *linkspeed = HBA_PORTSPEED_8GBIT;
590 590 } else if (hba->linkspeed == LA_10GHZ_LINK) {
591 591 *linkspeed = HBA_PORTSPEED_10GBIT;
592 592 } else if (hba->linkspeed == LA_16GHZ_LINK) {
593 593 *linkspeed = HBA_PORTSPEED_16GBIT;
594 + } else if (hba->linkspeed == LA_32GHZ_LINK) {
595 + *linkspeed = HBA_PORTSPEED_32GBIT;
594 596 } else {
595 597 *linkspeed = HBA_PORTSPEED_1GBIT;
596 598 }
597 599
598 600 /* Set LIP type */
599 601 *liptype = port->lip_type;
600 602 }
601 603
602 604 bzero(linkinfo, sizeof (dfc_linkinfo_t));
603 605
604 606 linkinfo->a_linkEventTag = hba->link_event_tag;
605 607 linkinfo->a_linkUp = HBASTATS.LinkUp;
606 608 linkinfo->a_linkDown = HBASTATS.LinkDown;
607 609 linkinfo->a_linkMulti = HBASTATS.LinkMultiEvent;
608 610
609 611 if (hba->state <= FC_LINK_DOWN) {
610 612 linkinfo->a_linkState = LNK_DOWN;
611 613 linkinfo->a_DID = port->prev_did;
612 614 } else if (hba->state < FC_READY) {
613 615 linkinfo->a_linkState = LNK_DISCOVERY;
614 616 } else {
615 617 linkinfo->a_linkState = LNK_READY;
616 618 }
617 619
618 620 if (linkinfo->a_linkState != LNK_DOWN) {
619 621 if (hba->topology == TOPOLOGY_LOOP) {
620 622 if (hba->flag & FC_FABRIC_ATTACHED) {
621 623 linkinfo->a_topology = LNK_PUBLIC_LOOP;
622 624 } else {
623 625 linkinfo->a_topology = LNK_LOOP;
624 626 }
625 627
626 628 linkinfo->a_alpa = port->did & 0xff;
627 629 linkinfo->a_DID = linkinfo->a_alpa;
628 630 linkinfo->a_alpaCnt = port->alpa_map[0];
629 631
630 632 if (linkinfo->a_alpaCnt > 127) {
631 633 linkinfo->a_alpaCnt = 127;
632 634 }
633 635
634 636 bcopy((void *)&port->alpa_map[1], linkinfo->a_alpaMap,
635 637 linkinfo->a_alpaCnt);
636 638 } else {
637 639 if (port->node_count == 1) {
638 640 linkinfo->a_topology = LNK_PT2PT;
639 641 } else {
640 642 linkinfo->a_topology = LNK_FABRIC;
641 643 }
642 644
643 645 linkinfo->a_DID = port->did;
644 646 }
645 647 }
646 648
647 649 bcopy(&hba->wwpn, linkinfo->a_wwpName, 8);
648 650 bcopy(&hba->wwnn, linkinfo->a_wwnName, 8);
649 651
650 652 emlxs_event(port, &emlxs_link_event, bp, size);
651 653
652 654 return;
653 655
654 656 } /* emlxs_log_link_event() */
655 657
656 658
657 659 extern void
658 660 emlxs_log_dump_event(emlxs_port_t *port, uint8_t *buffer, uint32_t size)
659 661 {
660 662 emlxs_hba_t *hba = HBA;
661 663 uint8_t *bp;
662 664
663 665 /* Check if the event is being requested */
664 666 if (emlxs_event_check(port, &emlxs_dump_event) == 0) {
665 667 #ifdef DUMP_SUPPORT
666 668 /* Schedule a dump thread */
667 669 emlxs_dump(hba, EMLXS_DRV_DUMP, 0, 0);
668 670 #endif /* DUMP_SUPPORT */
669 671 return;
670 672 }
671 673
672 674 if (buffer && size) {
673 675 /* Save a copy of the buffer for the event log */
674 676 if (!(bp = (uint8_t *)kmem_alloc(size, KM_NOSLEEP))) {
675 677 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
676 678 "%s: Unable to allocate buffer.",
677 679 emlxs_dump_event.label);
678 680
679 681 return;
680 682 }
681 683
682 684 bcopy(buffer, bp, size);
683 685 } else {
684 686 bp = NULL;
685 687 size = 0;
686 688 }
687 689
688 690 emlxs_event(port, &emlxs_dump_event, bp, size);
689 691
690 692 return;
691 693
692 694 } /* emlxs_log_dump_event() */
693 695
694 696
695 697 extern void
696 698 emlxs_log_temp_event(emlxs_port_t *port, uint32_t type, uint32_t temp)
697 699 {
698 700 emlxs_hba_t *hba = HBA;
699 701 uint32_t *bp;
700 702 uint32_t size;
701 703
702 704 /* Check if the event is being requested */
703 705 if (emlxs_event_check(port, &emlxs_temp_event) == 0) {
704 706 #ifdef DUMP_SUPPORT
705 707 /* Schedule a dump thread */
706 708 emlxs_dump(hba, EMLXS_TEMP_DUMP, type, temp);
707 709 #endif /* DUMP_SUPPORT */
708 710 return;
709 711 }
710 712
711 713 size = 2 * sizeof (uint32_t);
712 714
713 715 if (!(bp = (uint32_t *)kmem_alloc(size, KM_NOSLEEP))) {
714 716 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
715 717 "%s: Unable to allocate buffer.", emlxs_temp_event.label);
716 718
717 719 return;
718 720 }
719 721
720 722 bp[0] = type;
721 723 bp[1] = temp;
722 724
723 725 emlxs_event(port, &emlxs_temp_event, bp, size);
724 726
725 727 return;
726 728
727 729 } /* emlxs_log_temp_event() */
728 730
729 731
730 732
731 733 extern void
732 734 emlxs_log_fcoe_event(emlxs_port_t *port, menlo_init_rsp_t *init_rsp)
733 735 {
734 736 emlxs_hba_t *hba = HBA;
735 737 uint8_t *bp;
736 738 uint32_t size;
737 739
738 740 /* Check if the event is being requested */
739 741 if (emlxs_event_check(port, &emlxs_fcoe_event) == 0) {
740 742 return;
741 743 }
742 744
743 745 /* Check if this is a FCOE adapter */
744 746 if (hba->model_info.device_id != PCI_DEVICE_ID_HORNET) {
745 747 return;
746 748 }
747 749
748 750 size = sizeof (menlo_init_rsp_t);
749 751
750 752 if (!(bp = (uint8_t *)kmem_alloc(size, KM_NOSLEEP))) {
751 753 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
752 754 "%s: Unable to allocate buffer.", emlxs_fcoe_event.label);
753 755
754 756 return;
755 757 }
756 758
757 759 bcopy((uint8_t *)init_rsp, bp, size);
758 760
759 761 emlxs_event(port, &emlxs_fcoe_event, bp, size);
760 762
761 763 return;
762 764
763 765 } /* emlxs_log_fcoe_event() */
764 766
765 767
766 768 extern void
767 769 emlxs_log_async_event(emlxs_port_t *port, IOCB *iocb)
768 770 {
769 771 uint8_t *bp;
770 772 uint32_t size;
771 773
772 774 if (emlxs_event_check(port, &emlxs_async_event) == 0) {
773 775 return;
774 776 }
775 777
776 778 /* ASYNC_STATUS_CN response size */
777 779 size = 64;
778 780
779 781 if (!(bp = (uint8_t *)kmem_alloc(size, KM_NOSLEEP))) {
780 782 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
781 783 "%s: Unable to allocate buffer.", emlxs_async_event.label);
782 784
783 785 return;
784 786 }
785 787
786 788 bcopy((uint8_t *)iocb, bp, size);
787 789
788 790 emlxs_event(port, &emlxs_async_event, bp, size);
789 791
790 792 return;
791 793
792 794 } /* emlxs_log_async_event() */
793 795
794 796
795 797 extern uint32_t
796 798 emlxs_get_dfc_eventinfo(emlxs_port_t *port, HBA_EVENTINFO *eventinfo,
797 799 uint32_t *eventcount, uint32_t *missed)
798 800 {
799 801 emlxs_hba_t *hba = HBA;
800 802 emlxs_event_queue_t *eventq = &EVENTQ;
801 803 emlxs_event_entry_t *entry;
802 804 uint32_t max_events;
803 805 dfc_linkinfo_t *linkinfo;
804 806 uint32_t *word;
805 807 uint8_t *byte;
806 808 uint8_t linkspeed;
807 809 uint8_t liptype;
808 810 fc_affected_id_t *aid;
809 811 uint32_t events;
810 812 uint8_t format;
811 813
812 814 if (!eventinfo || !eventcount || !missed) {
813 815 return (DFC_ARG_NULL);
814 816 }
815 817
816 818 max_events = *eventcount;
817 819 *eventcount = 0;
818 820 *missed = 0;
819 821
820 822 mutex_enter(&eventq->lock);
821 823
822 824 /* Account for missed events */
823 825 if (hba->hba_event.new > hba->hba_event.missed) {
824 826 hba->hba_event.new -= hba->hba_event.missed;
825 827 } else {
826 828 hba->hba_event.new = 0;
827 829 }
828 830
829 831 *missed = hba->hba_event.missed;
830 832 hba->hba_event.missed = 0;
831 833
832 834 if (!hba->hba_event.new) {
833 835 hba->hba_event.last_id = eventq->next_id - 1;
834 836 mutex_exit(&eventq->lock);
835 837 return (0);
836 838 }
837 839
838 840 /* A new event has occurred since last acquisition */
839 841
840 842 events = 0;
841 843 entry = eventq->first;
842 844 while (entry && (events < max_events)) {
843 845
844 846 /* Skip old events */
845 847 if (entry->id <= hba->hba_event.last_id) {
846 848 entry = entry->next;
847 849 continue;
848 850 }
849 851
850 852 /* Process this entry */
851 853 switch (entry->evt->mask) {
852 854 case EVT_LINK:
853 855 byte = (uint8_t *)entry->bp;
854 856 linkspeed = byte[0];
855 857 liptype = byte[1];
856 858 linkinfo = (dfc_linkinfo_t *)&byte[4];
857 859
858 860 if (linkinfo->a_linkState == LNK_DOWN) {
859 861 eventinfo->EventCode =
860 862 HBA_EVENT_LINK_DOWN;
861 863 eventinfo->Event.Link_EventInfo.
862 864 PortFcId = linkinfo->a_DID;
863 865 eventinfo->Event.Link_EventInfo.
864 866 Reserved[0] = 0;
865 867 eventinfo->Event.Link_EventInfo.
866 868 Reserved[1] = 0;
867 869 eventinfo->Event.Link_EventInfo.
868 870 Reserved[2] = 0;
869 871 } else {
870 872 eventinfo->EventCode =
871 873 HBA_EVENT_LINK_UP;
872 874 eventinfo->Event.Link_EventInfo.
873 875 PortFcId = linkinfo->a_DID;
874 876
875 877 if ((linkinfo->a_topology ==
876 878 LNK_PUBLIC_LOOP) ||
877 879 (linkinfo->a_topology ==
878 880 LNK_LOOP)) {
879 881 eventinfo->Event.
880 882 Link_EventInfo.
881 883 Reserved[0] = 2;
882 884 } else {
883 885 eventinfo->Event.
884 886 Link_EventInfo.
885 887 Reserved[0] = 1;
886 888 }
887 889
888 890 eventinfo->Event.Link_EventInfo.
889 891 Reserved[1] = liptype;
890 892 eventinfo->Event.Link_EventInfo.
891 893 Reserved[2] = linkspeed;
892 894 }
893 895
894 896 eventinfo++;
895 897 events++;
896 898 hba->hba_event.new--;
897 899 break;
898 900
899 901 case EVT_RSCN:
900 902 word = (uint32_t *)entry->bp;
901 903 eventinfo->EventCode = HBA_EVENT_RSCN;
902 904 eventinfo->Event.RSCN_EventInfo.PortFcId =
903 905 word[0] & 0xFFFFFF;
904 906 /* word[1] is the RSCN payload command */
905 907
906 908 aid = (fc_affected_id_t *)&word[2];
907 909 format = aid->aff_format;
908 910
909 911 switch (format) {
910 912 case 0: /* Port */
911 913 eventinfo->Event.RSCN_EventInfo.
912 914 NPortPage =
913 915 aid->aff_d_id & 0x00ffffff;
914 916 break;
915 917
916 918 case 1: /* Area */
917 919 eventinfo->Event.RSCN_EventInfo.
918 920 NPortPage =
919 921 aid->aff_d_id & 0x00ffff00;
920 922 break;
921 923
922 924 case 2: /* Domain */
923 925 eventinfo->Event.RSCN_EventInfo.
924 926 NPortPage =
925 927 aid->aff_d_id & 0x00ff0000;
926 928 break;
927 929
928 930 case 3: /* Network */
929 931 eventinfo->Event.RSCN_EventInfo.
930 932 NPortPage = 0;
931 933 break;
932 934 }
933 935
934 936 eventinfo->Event.RSCN_EventInfo.Reserved[0] =
935 937 0;
936 938 eventinfo->Event.RSCN_EventInfo.Reserved[1] =
937 939 0;
938 940
939 941 eventinfo++;
940 942 events++;
941 943 hba->hba_event.new--;
942 944 break;
943 945 }
944 946
945 947 hba->hba_event.last_id = entry->id;
946 948 entry = entry->next;
947 949 }
948 950
949 951 /* Return number of events acquired */
950 952 *eventcount = events;
951 953
952 954 mutex_exit(&eventq->lock);
953 955
954 956 return (0);
955 957
956 958 } /* emlxs_get_dfc_eventinfo() */
957 959
958 960
959 961 void
960 962 emlxs_get_dfc_event(emlxs_port_t *port, emlxs_dfc_event_t *dfc_event,
961 963 uint32_t sleep)
962 964 {
963 965 emlxs_hba_t *hba = HBA;
964 966 emlxs_event_queue_t *eventq = &EVENTQ;
965 967 emlxs_event_entry_t *entry;
966 968 uint32_t found;
967 969 uint32_t mask;
968 970 uint32_t i;
969 971 uint32_t size = 0;
970 972 uint32_t rc;
971 973
972 974 if (dfc_event->dataout && dfc_event->size) {
973 975 size = dfc_event->size;
974 976 }
975 977 dfc_event->size = 0;
976 978
977 979 /* Calculate the event index */
978 980 mask = dfc_event->event;
979 981 for (i = 0; i < 32; i++) {
980 982 if (mask & 0x01) {
981 983 break;
982 984 }
983 985
984 986 mask >>= 1;
985 987 }
986 988
987 989 if (i == 32) {
988 990 return;
989 991 }
990 992
991 993 mutex_enter(&eventq->lock);
992 994
993 995 wait_for_event:
994 996
995 997 /* Check if no new event has occurred */
996 998 if (dfc_event->last_id == eventq->last_id[i]) {
997 999 if (!sleep) {
998 1000 mutex_exit(&eventq->lock);
999 1001 return;
1000 1002 }
1001 1003
1002 1004 /* While event is still active and */
1003 1005 /* no new event has been logged */
1004 1006 while ((dfc_event->event & hba->event_mask) &&
1005 1007 (dfc_event->last_id == eventq->last_id[i])) {
1006 1008
1007 1009 rc = cv_wait_sig(&eventq->lock_cv, &eventq->lock);
1008 1010
1009 1011 /* Check if thread was killed by kernel */
1010 1012 if (rc == 0) {
1011 1013 dfc_event->pid = 0;
1012 1014 dfc_event->event = 0;
1013 1015 mutex_exit(&eventq->lock);
1014 1016 return;
1015 1017 }
1016 1018 }
1017 1019
1018 1020 /* If the event is no longer registered then */
1019 1021 /* return immediately */
1020 1022 if (!(dfc_event->event & hba->event_mask)) {
1021 1023 mutex_exit(&eventq->lock);
1022 1024 return;
1023 1025 }
1024 1026 }
1025 1027
1026 1028 /* !!! An event has occurred since last_id !!! */
1027 1029
1028 1030 /* Check if event data is not being requested */
1029 1031 if (!size) {
1030 1032 /* If so, then just return the last event id */
1031 1033 dfc_event->last_id = eventq->last_id[i];
1032 1034
1033 1035 mutex_exit(&eventq->lock);
1034 1036 return;
1035 1037 }
1036 1038
1037 1039 /* !!! The requester wants the next event buffer !!! */
1038 1040
1039 1041 found = 0;
1040 1042 entry = eventq->first;
1041 1043 while (entry) {
1042 1044 if ((entry->id > dfc_event->last_id) &&
1043 1045 (entry->evt->mask == dfc_event->event)) {
1044 1046 found = 1;
1045 1047 break;
1046 1048 }
1047 1049
1048 1050 entry = entry->next;
1049 1051 }
1050 1052
1051 1053 if (!found) {
1052 1054 /* Update last_id to the last known event */
1053 1055 dfc_event->last_id = eventq->last_id[i];
1054 1056
1055 1057 /* Try waiting again if we can */
1056 1058 goto wait_for_event;
1057 1059 }
1058 1060
1059 1061 /* !!! Next event found !!! */
1060 1062
1061 1063 /* Copy the context buffer to the buffer provided */
1062 1064 if (entry->bp && entry->size) {
1063 1065 if (entry->size < size) {
1064 1066 size = entry->size;
1065 1067 }
1066 1068
1067 1069 bcopy((void *)entry->bp, dfc_event->dataout, size);
1068 1070
1069 1071 /* Event has been retrieved by DFCLIB */
1070 1072 entry->flag |= EMLXS_DFC_EVENT_DONE;
1071 1073
1072 1074 dfc_event->size = size;
1073 1075 }
1074 1076
1075 1077 dfc_event->last_id = entry->id;
1076 1078
1077 1079 mutex_exit(&eventq->lock);
1078 1080
1079 1081 return;
1080 1082
1081 1083 } /* emlxs_get_dfc_event() */
1082 1084
1083 1085
1084 1086 uint32_t
1085 1087 emlxs_kill_dfc_event(emlxs_port_t *port, emlxs_dfc_event_t *dfc_event)
1086 1088 {
1087 1089 emlxs_hba_t *hba = HBA;
1088 1090 emlxs_event_queue_t *eventq = &EVENTQ;
1089 1091
1090 1092 mutex_enter(&eventq->lock);
1091 1093 dfc_event->pid = 0;
1092 1094 dfc_event->event = 0;
1093 1095 cv_broadcast(&eventq->lock_cv);
1094 1096 mutex_exit(&eventq->lock);
1095 1097
1096 1098 return (0);
1097 1099
1098 1100 } /* emlxs_kill_dfc_event() */
1099 1101
1100 1102
1101 1103 #ifdef SAN_DIAG_SUPPORT
1102 1104 extern void
1103 1105 emlxs_log_sd_basic_els_event(emlxs_port_t *port, uint32_t subcat,
1104 1106 HBA_WWN *portname, HBA_WWN *nodename)
1105 1107 {
1106 1108 struct sd_plogi_rcv_v0 *bp;
1107 1109 uint32_t size;
1108 1110
1109 1111 /* Check if the event is being requested */
1110 1112 if (emlxs_event_check(port, &emlxs_sd_els_event) == 0) {
1111 1113 return;
1112 1114 }
1113 1115
1114 1116 size = sizeof (struct sd_plogi_rcv_v0);
1115 1117
1116 1118 if (!(bp = (struct sd_plogi_rcv_v0 *)kmem_alloc(size, KM_NOSLEEP))) {
1117 1119 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
1118 1120 "%s: Unable to allocate buffer.", emlxs_sd_els_event.label);
1119 1121
1120 1122 return;
1121 1123 }
1122 1124
1123 1125 /*
1124 1126 * we are using version field to store subtype, libdfc
1125 1127 * will fix this up before returning data to app.
1126 1128 */
1127 1129 bp->sd_plogir_version = subcat;
1128 1130 bcopy((uint8_t *)portname, (uint8_t *)&bp->sd_plogir_portname,
1129 1131 sizeof (HBA_WWN));
1130 1132 bcopy((uint8_t *)nodename, (uint8_t *)&bp->sd_plogir_nodename,
1131 1133 sizeof (HBA_WWN));
1132 1134
1133 1135 emlxs_event(port, &emlxs_sd_els_event, bp, size);
1134 1136
1135 1137 return;
1136 1138
1137 1139 } /* emlxs_log_sd_basic_els_event() */
1138 1140
1139 1141
1140 1142 extern void
1141 1143 emlxs_log_sd_prlo_event(emlxs_port_t *port, HBA_WWN *remoteport)
1142 1144 {
1143 1145 struct sd_prlo_rcv_v0 *bp;
1144 1146 uint32_t size;
1145 1147
1146 1148 /* Check if the event is being requested */
1147 1149 if (emlxs_event_check(port, &emlxs_sd_els_event) == 0) {
1148 1150 return;
1149 1151 }
1150 1152
1151 1153 size = sizeof (struct sd_prlo_rcv_v0);
1152 1154
1153 1155 if (!(bp = (struct sd_prlo_rcv_v0 *)kmem_alloc(size,
1154 1156 KM_NOSLEEP))) {
1155 1157 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
1156 1158 "%s PRLO: Unable to allocate buffer.",
1157 1159 emlxs_sd_els_event.label);
1158 1160
1159 1161 return;
1160 1162 }
1161 1163
1162 1164 /*
1163 1165 * we are using version field to store subtype, libdfc
1164 1166 * will fix this up before returning data to app.
1165 1167 */
1166 1168 bp->sd_prlor_version = SD_ELS_SUBCATEGORY_PRLO_RCV;
1167 1169 bcopy((uint8_t *)remoteport, (uint8_t *)&bp->sd_prlor_remoteport,
1168 1170 sizeof (HBA_WWN));
1169 1171
1170 1172 emlxs_event(port, &emlxs_sd_els_event, bp, size);
1171 1173
1172 1174 return;
1173 1175
1174 1176 } /* emlxs_log_sd_prlo_event() */
1175 1177
1176 1178
1177 1179 extern void
1178 1180 emlxs_log_sd_lsrjt_event(emlxs_port_t *port, HBA_WWN *remoteport,
1179 1181 uint32_t orig_cmd, uint32_t reason, uint32_t reason_expl)
1180 1182 {
1181 1183 struct sd_lsrjt_rcv_v0 *bp;
1182 1184 uint32_t size;
1183 1185
1184 1186 /* Check if the event is being requested */
1185 1187 if (emlxs_event_check(port, &emlxs_sd_els_event) == 0) {
1186 1188 return;
1187 1189 }
1188 1190
1189 1191 size = sizeof (struct sd_lsrjt_rcv_v0);
1190 1192
1191 1193 if (!(bp = (struct sd_lsrjt_rcv_v0 *)kmem_alloc(size,
1192 1194 KM_NOSLEEP))) {
1193 1195 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
1194 1196 "%s LSRJT: Unable to allocate buffer.",
1195 1197 emlxs_sd_els_event.label);
1196 1198
1197 1199 return;
1198 1200 }
1199 1201
1200 1202 /*
1201 1203 * we are using version field to store subtype, libdfc
1202 1204 * will fix this up before returning data to app.
1203 1205 */
1204 1206 bp->sd_lsrjtr_version = SD_ELS_SUBCATEGORY_LSRJT_RCV;
1205 1207 bcopy((uint8_t *)remoteport, (uint8_t *)&bp->sd_lsrjtr_remoteport,
1206 1208 sizeof (HBA_WWN));
1207 1209 bp->sd_lsrjtr_original_cmd = orig_cmd;
1208 1210 bp->sd_lsrjtr_reasoncode = reason;
1209 1211 bp->sd_lsrjtr_reasoncodeexpl = reason_expl;
1210 1212
1211 1213 emlxs_event(port, &emlxs_sd_els_event, bp, size);
1212 1214
1213 1215 return;
1214 1216
1215 1217 } /* emlxs_log_sd_lsrjt_event() */
1216 1218
1217 1219
1218 1220 extern void
1219 1221 emlxs_log_sd_fc_bsy_event(emlxs_port_t *port, HBA_WWN *remoteport)
1220 1222 {
1221 1223 struct sd_pbsy_rcv_v0 *bp;
1222 1224 uint32_t size;
1223 1225
1224 1226 /* Check if the event is being requested */
1225 1227 if (emlxs_event_check(port, &emlxs_sd_fabric_event) == 0) {
1226 1228 return;
1227 1229 }
1228 1230
1229 1231 size = sizeof (struct sd_pbsy_rcv_v0);
1230 1232
1231 1233 if (!(bp = (struct sd_pbsy_rcv_v0 *)kmem_alloc(size,
1232 1234 KM_NOSLEEP))) {
1233 1235 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
1234 1236 "%s BSY: Unable to allocate buffer.",
1235 1237 emlxs_sd_fabric_event.label);
1236 1238
1237 1239 return;
1238 1240 }
1239 1241
1240 1242 /*
1241 1243 * we are using version field to store subtype, libdfc
1242 1244 * will fix this up before returning data to app.
1243 1245 */
1244 1246 if (remoteport == NULL)
1245 1247 bp->sd_pbsyr_evt_version = SD_FABRIC_SUBCATEGORY_FABRIC_BUSY;
1246 1248 else
1247 1249 {
1248 1250 bp->sd_pbsyr_evt_version = SD_FABRIC_SUBCATEGORY_PORT_BUSY;
1249 1251 bcopy((uint8_t *)remoteport, (uint8_t *)&bp->sd_pbsyr_rport,
1250 1252 sizeof (HBA_WWN));
1251 1253 }
1252 1254
1253 1255 emlxs_event(port, &emlxs_sd_fabric_event, bp, size);
1254 1256
1255 1257 return;
1256 1258
1257 1259 } /* emlxs_log_sd_fc_bsy_event() */
1258 1260
1259 1261
1260 1262 extern void
1261 1263 emlxs_log_sd_fc_rdchk_event(emlxs_port_t *port, HBA_WWN *remoteport,
1262 1264 uint32_t lun, uint32_t opcode, uint32_t fcp_param)
1263 1265 {
1264 1266 struct sd_fcprdchkerr_v0 *bp;
1265 1267 uint32_t size;
1266 1268
1267 1269 /* Check if the event is being requested */
1268 1270 if (emlxs_event_check(port, &emlxs_sd_fabric_event) == 0) {
1269 1271 return;
1270 1272 }
1271 1273
1272 1274 size = sizeof (struct sd_fcprdchkerr_v0);
1273 1275
1274 1276 if (!(bp = (struct sd_fcprdchkerr_v0 *)kmem_alloc(size,
1275 1277 KM_NOSLEEP))) {
1276 1278 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
1277 1279 "%s RDCHK: Unable to allocate buffer.",
1278 1280 emlxs_sd_fabric_event.label);
1279 1281
1280 1282 return;
1281 1283 }
1282 1284
1283 1285 /*
1284 1286 * we are using version field to store subtype, libdfc
1285 1287 * will fix this up before returning data to app.
1286 1288 */
1287 1289 bp->sd_fcprdchkerr_version = SD_FABRIC_SUBCATEGORY_FCPRDCHKERR;
1288 1290 bcopy((uint8_t *)remoteport, (uint8_t *)&bp->sd_fcprdchkerr_rport,
1289 1291 sizeof (HBA_WWN));
1290 1292 bp->sd_fcprdchkerr_lun = lun;
1291 1293 bp->sd_fcprdchkerr_opcode = opcode;
1292 1294 bp->sd_fcprdchkerr_fcpiparam = fcp_param;
1293 1295
1294 1296 emlxs_event(port, &emlxs_sd_fabric_event, bp, size);
1295 1297
1296 1298 return;
1297 1299
1298 1300 } /* emlxs_log_sd_rdchk_event() */
1299 1301
1300 1302
1301 1303 extern void
1302 1304 emlxs_log_sd_scsi_event(emlxs_port_t *port, uint32_t type,
1303 1305 HBA_WWN *remoteport, int32_t lun)
1304 1306 {
1305 1307 struct sd_scsi_generic_v0 *bp;
1306 1308 uint32_t size;
1307 1309
1308 1310 /* Check if the event is being requested */
1309 1311 if (emlxs_event_check(port, &emlxs_sd_scsi_event) == 0) {
1310 1312 return;
1311 1313 }
1312 1314
1313 1315 size = sizeof (struct sd_scsi_generic_v0);
1314 1316
1315 1317 if (!(bp = (struct sd_scsi_generic_v0 *)kmem_alloc(size,
1316 1318 KM_NOSLEEP))) {
1317 1319 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
1318 1320 "%s: Unable to allocate buffer.",
1319 1321 emlxs_sd_scsi_event.label);
1320 1322
1321 1323 return;
1322 1324 }
1323 1325
1324 1326 /*
1325 1327 * we are using version field to store subtype, libdfc
1326 1328 * will fix this up before returning data to app.
1327 1329 */
1328 1330 bp->sd_scsi_generic_version = type;
1329 1331 bcopy((uint8_t *)remoteport, (uint8_t *)&bp->sd_scsi_generic_rport,
1330 1332 sizeof (HBA_WWN));
1331 1333 bp->sd_scsi_generic_lun = lun;
1332 1334
1333 1335 emlxs_event(port, &emlxs_sd_scsi_event, bp, size);
1334 1336
1335 1337 return;
1336 1338
1337 1339 } /* emlxs_log_sd_scsi_event() */
1338 1340
1339 1341
1340 1342 extern void
1341 1343 emlxs_log_sd_scsi_check_event(emlxs_port_t *port, HBA_WWN *remoteport,
1342 1344 uint32_t lun, uint32_t cmdcode, uint32_t sensekey,
1343 1345 uint32_t asc, uint32_t ascq)
1344 1346 {
1345 1347 struct sd_scsi_checkcond_v0 *bp;
1346 1348 uint32_t size;
1347 1349
1348 1350 /* Check if the event is being requested */
1349 1351 if (emlxs_event_check(port, &emlxs_sd_scsi_event) == 0) {
1350 1352 return;
1351 1353 }
1352 1354
1353 1355 size = sizeof (struct sd_scsi_checkcond_v0);
1354 1356
1355 1357 if (!(bp = (struct sd_scsi_checkcond_v0 *)kmem_alloc(size,
1356 1358 KM_NOSLEEP))) {
1357 1359 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg,
1358 1360 "%s CHECK: Unable to allocate buffer.",
1359 1361 emlxs_sd_scsi_event.label);
1360 1362
1361 1363 return;
1362 1364 }
1363 1365
1364 1366 /*
1365 1367 * we are using version field to store subtype, libdfc
1366 1368 * will fix this up before returning data to app.
1367 1369 */
1368 1370 bp->sd_scsi_checkcond_version = SD_SCSI_SUBCATEGORY_CHECKCONDITION;
1369 1371 bcopy((uint8_t *)remoteport, (uint8_t *)&bp->sd_scsi_checkcond_rport,
1370 1372 sizeof (HBA_WWN));
1371 1373 bp->sd_scsi_checkcond_lun = lun;
1372 1374 bp->sd_scsi_checkcond_cmdcode = cmdcode;
1373 1375 bp->sd_scsi_checkcond_sensekey = sensekey;
1374 1376 bp->sd_scsi_checkcond_asc = asc;
1375 1377 bp->sd_scsi_checkcond_ascq = ascq;
1376 1378
1377 1379 emlxs_event(port, &emlxs_sd_scsi_event, bp, size);
1378 1380
1379 1381 return;
1380 1382
1381 1383 } /* emlxs_log_sd_scsi_check_event() */
1382 1384
1383 1385
1384 1386 void
1385 1387 emlxs_get_sd_event(emlxs_port_t *port, emlxs_dfc_event_t *dfc_event,
1386 1388 uint32_t sleep)
1387 1389 {
1388 1390 emlxs_hba_t *hba = HBA;
1389 1391 emlxs_event_queue_t *eventq = &EVENTQ;
1390 1392 emlxs_event_entry_t *entry;
1391 1393 uint32_t found;
1392 1394 uint32_t mask;
1393 1395 uint32_t i;
1394 1396 uint32_t size = 0;
1395 1397 uint32_t rc;
1396 1398
1397 1399 if (dfc_event->dataout && dfc_event->size) {
1398 1400 size = dfc_event->size;
1399 1401 }
1400 1402 dfc_event->size = 0;
1401 1403
1402 1404 /* Calculate the event index */
1403 1405 mask = dfc_event->event;
1404 1406 for (i = 0; i < 32; i++) {
1405 1407 if (mask & 0x01) {
1406 1408 break;
1407 1409 }
1408 1410
1409 1411 mask >>= 1;
1410 1412 }
1411 1413
1412 1414 if (i == 32) {
1413 1415 return;
1414 1416 }
1415 1417
1416 1418 mutex_enter(&eventq->lock);
1417 1419
1418 1420 wait_for_event:
1419 1421
1420 1422 /* Check if no new event has ocurred */
1421 1423 if (dfc_event->last_id == eventq->last_id[i]) {
1422 1424 if (!sleep) {
1423 1425 mutex_exit(&eventq->lock);
1424 1426 return;
1425 1427 }
1426 1428
1427 1429 /* While event is active and no new event has been logged */
1428 1430 while ((dfc_event->event & port->sd_event_mask) &&
1429 1431 (dfc_event->last_id == eventq->last_id[i])) {
1430 1432 rc = cv_wait_sig(&eventq->lock_cv, &eventq->lock);
1431 1433
1432 1434 /* Check if thread was killed by kernel */
1433 1435 if (rc == 0) {
1434 1436 dfc_event->pid = 0;
1435 1437 dfc_event->event = 0;
1436 1438 mutex_exit(&eventq->lock);
1437 1439 return;
1438 1440 }
1439 1441 }
1440 1442
1441 1443 /* If the event is no longer registered then return */
1442 1444 if (!(dfc_event->event & port->sd_event_mask)) {
1443 1445 mutex_exit(&eventq->lock);
1444 1446 return;
1445 1447 }
1446 1448 }
1447 1449
1448 1450 /* !!! An event has occurred since last_id !!! */
1449 1451
1450 1452 /* Check if event data is not being requested */
1451 1453 if (!size) {
1452 1454 /* If so, then just return the last event id */
1453 1455 dfc_event->last_id = eventq->last_id[i];
1454 1456
1455 1457 mutex_exit(&eventq->lock);
1456 1458 return;
1457 1459 }
1458 1460
1459 1461 /* !!! The requester wants the next event buffer !!! */
1460 1462
1461 1463 found = 0;
1462 1464 entry = eventq->first;
1463 1465 while (entry) {
1464 1466 if ((entry->id > dfc_event->last_id) &&
1465 1467 (entry->port == (void *)port) &&
1466 1468 (entry->evt->mask == dfc_event->event)) {
1467 1469 found = 1;
1468 1470 break;
1469 1471 }
1470 1472
1471 1473 entry = entry->next;
1472 1474 }
1473 1475
1474 1476 if (!found) {
1475 1477 /* Update last_id to the last known event */
1476 1478 dfc_event->last_id = eventq->last_id[i];
1477 1479
1478 1480 /* Try waiting again if we can */
1479 1481 goto wait_for_event;
1480 1482 }
1481 1483
1482 1484 /* !!! Next event found !!! */
1483 1485
1484 1486 /* Copy the context buffer to the buffer provided */
1485 1487 if (entry->bp && entry->size) {
1486 1488 if (entry->size < size) {
1487 1489 size = entry->size;
1488 1490 }
1489 1491
1490 1492 bcopy((void *)entry->bp, dfc_event->dataout, size);
1491 1493
1492 1494 /* Event has been retrieved by SANDIAG */
1493 1495 entry->flag |= EMLXS_SD_EVENT_DONE;
1494 1496
1495 1497 dfc_event->size = size;
1496 1498 }
1497 1499
1498 1500 dfc_event->last_id = entry->id;
1499 1501
1500 1502 mutex_exit(&eventq->lock);
1501 1503
1502 1504 return;
1503 1505
1504 1506 } /* emlxs_get_sd_event */
1505 1507 #endif /* SAN_DIAG_SUPPORT */
|
↓ open down ↓ |
902 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX