1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at
9 * http://www.opensource.org/licenses/cddl1.txt.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2004-2012 Emulex. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #define DEF_ICFG 1
28
29 #include <emlxs.h>
30 #include <emlxs_version.h>
31
32
33 static char emlxs_copyright[] = EMLXS_COPYRIGHT;
34 char emlxs_revision[] = EMLXS_REVISION;
35 char emlxs_version[] = EMLXS_VERSION;
36 char emlxs_name[] = EMLXS_NAME;
37 char emlxs_label[] = EMLXS_LABEL;
38
39 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
40 EMLXS_MSG_DEF(EMLXS_SOLARIS_C);
41
42 #ifdef MENLO_SUPPORT
43 static int32_t emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp);
44 #endif /* MENLO_SUPPORT */
45
46 static void emlxs_fca_attach(emlxs_hba_t *hba);
47 static void emlxs_fca_detach(emlxs_hba_t *hba);
48 static void emlxs_drv_banner(emlxs_hba_t *hba);
49
50 static int32_t emlxs_get_props(emlxs_hba_t *hba);
51 static int32_t emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp,
52 uint32_t *pkt_flags);
53 static int32_t emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp);
54 static int32_t emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp);
55 static int32_t emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp);
56 static int32_t emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp);
57 static int32_t emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp);
58 static int32_t emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp);
59 static int32_t emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp);
60 static uint32_t emlxs_add_instance(int32_t ddiinst);
61 static void emlxs_iodone(emlxs_buf_t *sbp);
62 static int emlxs_pm_lower_power(dev_info_t *dip);
63 static int emlxs_pm_raise_power(dev_info_t *dip);
64 static void emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag,
65 uint32_t failed);
66 static void emlxs_iodone_server(void *arg1, void *arg2, void *arg3);
67 static uint32_t emlxs_integrity_check(emlxs_hba_t *hba);
68 static uint32_t emlxs_test(emlxs_hba_t *hba, uint32_t test_code,
69 uint32_t args, uint32_t *arg);
70
71 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4)
72 static void emlxs_read_vport_prop(emlxs_hba_t *hba);
73 #endif /* EMLXS_MODREV3 && EMLXS_MODREV4 */
74
75 static void emlxs_mode_init_masks(emlxs_hba_t *hba);
76
77
78 extern int
79 emlxs_msiid_to_chan(emlxs_hba_t *hba, int msi_id);
80 extern int
81 emlxs_select_msiid(emlxs_hba_t *hba);
82 extern void
83 emlxs_sli4_zero_queue_stat(emlxs_hba_t *hba);
84
85 /*
86 * Driver Entry Routines.
87 */
88 static int32_t emlxs_detach(dev_info_t *, ddi_detach_cmd_t);
89 static int32_t emlxs_attach(dev_info_t *, ddi_attach_cmd_t);
90 static int32_t emlxs_open(dev_t *, int32_t, int32_t, cred_t *);
91 static int32_t emlxs_close(dev_t, int32_t, int32_t, cred_t *);
92 static int32_t emlxs_ioctl(dev_t, int32_t, intptr_t, int32_t,
93 cred_t *, int32_t *);
94 static int32_t emlxs_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
95
96
97 /*
98 * FC_AL Transport Functions.
99 */
100 static opaque_t emlxs_fca_bind_port(dev_info_t *, fc_fca_port_info_t *,
101 fc_fca_bind_info_t *);
102 static void emlxs_fca_unbind_port(opaque_t);
103 static void emlxs_initialize_pkt(emlxs_port_t *, emlxs_buf_t *);
104 static int32_t emlxs_fca_get_cap(opaque_t, char *, void *);
105 static int32_t emlxs_fca_set_cap(opaque_t, char *, void *);
106 static int32_t emlxs_fca_get_map(opaque_t, fc_lilpmap_t *);
107 static int32_t emlxs_fca_ub_alloc(opaque_t, uint64_t *, uint32_t,
108 uint32_t *, uint32_t);
109 static int32_t emlxs_fca_ub_free(opaque_t, uint32_t, uint64_t *);
110
111 static opaque_t emlxs_fca_get_device(opaque_t, fc_portid_t);
112 static int32_t emlxs_fca_notify(opaque_t, uint32_t);
113 static void emlxs_ub_els_reject(emlxs_port_t *, fc_unsol_buf_t *);
114
115 /*
116 * Driver Internal Functions.
117 */
118
119 static void emlxs_poll(emlxs_port_t *, emlxs_buf_t *);
120 static int32_t emlxs_power(dev_info_t *, int32_t, int32_t);
121 #ifdef EMLXS_I386
122 #ifdef S11
123 static int32_t emlxs_quiesce(dev_info_t *);
124 #endif /* S11 */
125 #endif /* EMLXS_I386 */
126 static int32_t emlxs_hba_resume(dev_info_t *);
127 static int32_t emlxs_hba_suspend(dev_info_t *);
128 static int32_t emlxs_hba_detach(dev_info_t *);
129 static int32_t emlxs_hba_attach(dev_info_t *);
130 static void emlxs_lock_destroy(emlxs_hba_t *);
131 static void emlxs_lock_init(emlxs_hba_t *);
132
133 char *emlxs_pm_components[] = {
134 "NAME=" DRIVER_NAME "000",
135 "0=Device D3 State",
136 "1=Device D0 State"
137 };
138
139
140 /*
141 * Default emlx dma limits
142 */
143 ddi_dma_lim_t emlxs_dma_lim = {
144 (uint32_t)0, /* dlim_addr_lo */
145 (uint32_t)0xffffffff, /* dlim_addr_hi */
146 (uint_t)0x00ffffff, /* dlim_cntr_max */
147 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dlim_burstsizes */
148 1, /* dlim_minxfer */
149 0x00ffffff /* dlim_dmaspeed */
150 };
151
152 /*
153 * Be careful when using these attributes; the defaults listed below are
154 * (almost) the most general case, permitting allocation in almost any
155 * way supported by the LightPulse family. The sole exception is the
156 * alignment specified as requiring memory allocation on a 4-byte boundary;
157 * the Lightpulse can DMA memory on any byte boundary.
158 *
159 * The LightPulse family currently is limited to 16M transfers;
160 * this restriction affects the dma_attr_count_max and dma_attr_maxxfer fields.
161 */
162 ddi_dma_attr_t emlxs_dma_attr = {
163 DMA_ATTR_V0, /* dma_attr_version */
164 (uint64_t)0, /* dma_attr_addr_lo */
165 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */
166 (uint64_t)0x00ffffff, /* dma_attr_count_max */
167 1, /* dma_attr_align */
168 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */
169 1, /* dma_attr_minxfer */
170 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */
171 (uint64_t)0xffffffff, /* dma_attr_seg */
172 1, /* dma_attr_sgllen */
173 1, /* dma_attr_granular */
174 0 /* dma_attr_flags */
175 };
176
177 ddi_dma_attr_t emlxs_dma_attr_ro = {
178 DMA_ATTR_V0, /* dma_attr_version */
179 (uint64_t)0, /* dma_attr_addr_lo */
180 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */
181 (uint64_t)0x00ffffff, /* dma_attr_count_max */
182 1, /* dma_attr_align */
183 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */
184 1, /* dma_attr_minxfer */
185 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */
186 (uint64_t)0xffffffff, /* dma_attr_seg */
187 1, /* dma_attr_sgllen */
188 1, /* dma_attr_granular */
189 DDI_DMA_RELAXED_ORDERING /* dma_attr_flags */
190 };
191
192 ddi_dma_attr_t emlxs_dma_attr_1sg = {
193 DMA_ATTR_V0, /* dma_attr_version */
194 (uint64_t)0, /* dma_attr_addr_lo */
195 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */
196 (uint64_t)0x00ffffff, /* dma_attr_count_max */
197 1, /* dma_attr_align */
198 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */
199 1, /* dma_attr_minxfer */
200 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */
201 (uint64_t)0xffffffff, /* dma_attr_seg */
202 1, /* dma_attr_sgllen */
203 1, /* dma_attr_granular */
204 0 /* dma_attr_flags */
205 };
206
207 #if (EMLXS_MODREV >= EMLXS_MODREV3)
208 ddi_dma_attr_t emlxs_dma_attr_fcip_rsp = {
209 DMA_ATTR_V0, /* dma_attr_version */
210 (uint64_t)0, /* dma_attr_addr_lo */
211 (uint64_t)0xffffffffffffffff, /* dma_attr_addr_hi */
212 (uint64_t)0x00ffffff, /* dma_attr_count_max */
213 1, /* dma_attr_align */
214 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* dma_attr_burstsizes */
215 1, /* dma_attr_minxfer */
216 (uint64_t)0x00ffffff, /* dma_attr_maxxfer */
217 (uint64_t)0xffffffff, /* dma_attr_seg */
218 1, /* dma_attr_sgllen */
219 1, /* dma_attr_granular */
220 0 /* dma_attr_flags */
221 };
222 #endif /* >= EMLXS_MODREV3 */
223
224 /*
225 * DDI access attributes for device
226 */
227 ddi_device_acc_attr_t emlxs_dev_acc_attr = {
228 DDI_DEVICE_ATTR_V1, /* devacc_attr_version */
229 DDI_STRUCTURE_LE_ACC, /* PCI is Little Endian */
230 DDI_STRICTORDER_ACC, /* devacc_attr_dataorder */
231 DDI_DEFAULT_ACC /* devacc_attr_access */
232 };
233
234 /*
235 * DDI access attributes for data
236 */
237 ddi_device_acc_attr_t emlxs_data_acc_attr = {
238 DDI_DEVICE_ATTR_V1, /* devacc_attr_version */
239 DDI_NEVERSWAP_ACC, /* don't swap for Data */
240 DDI_STRICTORDER_ACC, /* devacc_attr_dataorder */
241 DDI_DEFAULT_ACC /* devacc_attr_access */
242 };
243
244 /*
245 * Fill in the FC Transport structure,
246 * as defined in the Fibre Channel Transport Programmming Guide.
247 */
248 #if (EMLXS_MODREV == EMLXS_MODREV5)
249 static fc_fca_tran_t emlxs_fca_tran = {
250 FCTL_FCA_MODREV_5, /* fca_version, with SUN NPIV support */
251 MAX_VPORTS, /* fca numerb of ports */
252 sizeof (emlxs_buf_t), /* fca pkt size */
253 2048, /* fca cmd max */
254 &emlxs_dma_lim, /* fca dma limits */
255 0, /* fca iblock, to be filled in later */
256 &emlxs_dma_attr, /* fca dma attributes */
257 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */
258 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */
259 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */
260 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */
261 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */
262 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */
263 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */
264 &emlxs_data_acc_attr, /* fca access atributes */
265 0, /* fca_num_npivports */
266 {0, 0, 0, 0, 0, 0, 0, 0}, /* Physical port WWPN */
267 emlxs_fca_bind_port,
268 emlxs_fca_unbind_port,
269 emlxs_fca_pkt_init,
270 emlxs_fca_pkt_uninit,
271 emlxs_fca_transport,
272 emlxs_fca_get_cap,
273 emlxs_fca_set_cap,
274 emlxs_fca_get_map,
275 emlxs_fca_transport,
276 emlxs_fca_ub_alloc,
277 emlxs_fca_ub_free,
278 emlxs_fca_ub_release,
279 emlxs_fca_pkt_abort,
280 emlxs_fca_reset,
281 emlxs_fca_port_manage,
282 emlxs_fca_get_device,
283 emlxs_fca_notify
284 };
285 #endif /* EMLXS_MODREV5 */
286
287
288 #if (EMLXS_MODREV == EMLXS_MODREV4)
289 static fc_fca_tran_t emlxs_fca_tran = {
290 FCTL_FCA_MODREV_4, /* fca_version */
291 MAX_VPORTS, /* fca numerb of ports */
292 sizeof (emlxs_buf_t), /* fca pkt size */
293 2048, /* fca cmd max */
294 &emlxs_dma_lim, /* fca dma limits */
295 0, /* fca iblock, to be filled in later */
296 &emlxs_dma_attr, /* fca dma attributes */
297 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */
298 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */
299 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */
300 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */
301 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */
302 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */
303 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */
304 &emlxs_data_acc_attr, /* fca access atributes */
305 emlxs_fca_bind_port,
306 emlxs_fca_unbind_port,
307 emlxs_fca_pkt_init,
308 emlxs_fca_pkt_uninit,
309 emlxs_fca_transport,
310 emlxs_fca_get_cap,
311 emlxs_fca_set_cap,
312 emlxs_fca_get_map,
313 emlxs_fca_transport,
314 emlxs_fca_ub_alloc,
315 emlxs_fca_ub_free,
316 emlxs_fca_ub_release,
317 emlxs_fca_pkt_abort,
318 emlxs_fca_reset,
319 emlxs_fca_port_manage,
320 emlxs_fca_get_device,
321 emlxs_fca_notify
322 };
323 #endif /* EMLXS_MODEREV4 */
324
325
326 #if (EMLXS_MODREV == EMLXS_MODREV3)
327 static fc_fca_tran_t emlxs_fca_tran = {
328 FCTL_FCA_MODREV_3, /* fca_version */
329 MAX_VPORTS, /* fca numerb of ports */
330 sizeof (emlxs_buf_t), /* fca pkt size */
331 2048, /* fca cmd max */
332 &emlxs_dma_lim, /* fca dma limits */
333 0, /* fca iblock, to be filled in later */
334 &emlxs_dma_attr, /* fca dma attributes */
335 &emlxs_dma_attr_1sg, /* fca dma fcp cmd attributes */
336 &emlxs_dma_attr_1sg, /* fca dma fcp rsp attributes */
337 &emlxs_dma_attr_ro, /* fca dma fcp data attributes */
338 &emlxs_dma_attr_1sg, /* fca dma fcip cmd attributes */
339 &emlxs_dma_attr_fcip_rsp, /* fca dma fcip rsp attributes */
340 &emlxs_dma_attr_1sg, /* fca dma fcsm cmd attributes */
341 &emlxs_dma_attr, /* fca dma fcsm rsp attributes */
342 &emlxs_data_acc_attr, /* fca access atributes */
343 emlxs_fca_bind_port,
344 emlxs_fca_unbind_port,
345 emlxs_fca_pkt_init,
346 emlxs_fca_pkt_uninit,
347 emlxs_fca_transport,
348 emlxs_fca_get_cap,
349 emlxs_fca_set_cap,
350 emlxs_fca_get_map,
351 emlxs_fca_transport,
352 emlxs_fca_ub_alloc,
353 emlxs_fca_ub_free,
354 emlxs_fca_ub_release,
355 emlxs_fca_pkt_abort,
356 emlxs_fca_reset,
357 emlxs_fca_port_manage,
358 emlxs_fca_get_device,
359 emlxs_fca_notify
360 };
361 #endif /* EMLXS_MODREV3 */
362
363
364 #if (EMLXS_MODREV == EMLXS_MODREV2)
365 static fc_fca_tran_t emlxs_fca_tran = {
366 FCTL_FCA_MODREV_2, /* fca_version */
367 MAX_VPORTS, /* number of ports */
368 sizeof (emlxs_buf_t), /* pkt size */
369 2048, /* max cmds */
370 &emlxs_dma_lim, /* DMA limits */
371 0, /* iblock, to be filled in later */
372 &emlxs_dma_attr, /* dma attributes */
373 &emlxs_data_acc_attr, /* access atributes */
374 emlxs_fca_bind_port,
375 emlxs_fca_unbind_port,
376 emlxs_fca_pkt_init,
377 emlxs_fca_pkt_uninit,
378 emlxs_fca_transport,
379 emlxs_fca_get_cap,
380 emlxs_fca_set_cap,
381 emlxs_fca_get_map,
382 emlxs_fca_transport,
383 emlxs_fca_ub_alloc,
384 emlxs_fca_ub_free,
385 emlxs_fca_ub_release,
386 emlxs_fca_pkt_abort,
387 emlxs_fca_reset,
388 emlxs_fca_port_manage,
389 emlxs_fca_get_device,
390 emlxs_fca_notify
391 };
392 #endif /* EMLXS_MODREV2 */
393
394
395 /*
396 * state pointer which the implementation uses as a place to
397 * hang a set of per-driver structures;
398 *
399 */
400 void *emlxs_soft_state = NULL;
401
402 /*
403 * Driver Global variables.
404 */
405 int32_t emlxs_scsi_reset_delay = 3000; /* milliseconds */
406
407 emlxs_device_t emlxs_device;
408
409 uint32_t emlxs_instance[MAX_FC_BRDS]; /* uses emlxs_device.lock */
410 uint32_t emlxs_instance_count = 0; /* uses emlxs_device.lock */
411 uint32_t emlxs_instance_flag = 0; /* uses emlxs_device.lock */
412 #define EMLXS_FW_SHOW 0x00000001
413
414
415 /*
416 * CB ops vector. Used for administration only.
417 */
418 static struct cb_ops emlxs_cb_ops = {
419 emlxs_open, /* cb_open */
420 emlxs_close, /* cb_close */
421 nodev, /* cb_strategy */
422 nodev, /* cb_print */
423 nodev, /* cb_dump */
424 nodev, /* cb_read */
425 nodev, /* cb_write */
426 emlxs_ioctl, /* cb_ioctl */
427 nodev, /* cb_devmap */
428 nodev, /* cb_mmap */
429 nodev, /* cb_segmap */
430 nochpoll, /* cb_chpoll */
431 ddi_prop_op, /* cb_prop_op */
432 0, /* cb_stream */
433 #ifdef _LP64
434 D_64BIT | D_HOTPLUG | D_MP | D_NEW, /* cb_flag */
435 #else
436 D_HOTPLUG | D_MP | D_NEW, /* cb_flag */
437 #endif
438 CB_REV, /* rev */
439 nodev, /* cb_aread */
440 nodev /* cb_awrite */
441 };
442
443 static struct dev_ops emlxs_ops = {
444 DEVO_REV, /* rev */
445 0, /* refcnt */
446 emlxs_info, /* getinfo */
447 nulldev, /* identify */
448 nulldev, /* probe */
449 emlxs_attach, /* attach */
450 emlxs_detach, /* detach */
451 nodev, /* reset */
452 &emlxs_cb_ops, /* devo_cb_ops */
453 NULL, /* devo_bus_ops */
454 emlxs_power, /* power ops */
455 #ifdef EMLXS_I386
456 #ifdef S11
457 emlxs_quiesce, /* quiesce */
458 #endif /* S11 */
459 #endif /* EMLXS_I386 */
460 };
461
462 #include <sys/modctl.h>
463 extern struct mod_ops mod_driverops;
464
465 #ifdef SAN_DIAG_SUPPORT
466 extern kmutex_t emlxs_sd_bucket_mutex;
467 extern sd_bucket_info_t emlxs_sd_bucket;
468 #endif /* SAN_DIAG_SUPPORT */
469
470 /*
471 * Module linkage information for the kernel.
472 */
473 static struct modldrv emlxs_modldrv = {
474 &mod_driverops, /* module type - driver */
475 emlxs_name, /* module name */
476 &emlxs_ops, /* driver ops */
477 };
478
479
480 /*
481 * Driver module linkage structure
482 */
483 static struct modlinkage emlxs_modlinkage = {
484 MODREV_1, /* ml_rev - must be MODREV_1 */
485 &emlxs_modldrv, /* ml_linkage */
486 NULL /* end of driver linkage */
487 };
488
489
490 /* We only need to add entries for non-default return codes. */
491 /* Entries do not need to be in order. */
492 /* Default: FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, */
493 /* FC_EXPLN_NONE, FC_ACTION_RETRYABLE */
494
495 emlxs_xlat_err_t emlxs_iostat_tbl[] = {
496 /* {f/w code, pkt_state, pkt_reason, */
497 /* pkt_expln, pkt_action} */
498
499 /* 0x00 - Do not remove */
500 {IOSTAT_SUCCESS, FC_PKT_SUCCESS, FC_REASON_NONE,
501 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
502
503 /* 0x01 - Do not remove */
504 {IOSTAT_FCP_RSP_ERROR, FC_PKT_SUCCESS, FC_REASON_NONE,
505 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
506
507 /* 0x02 */
508 {IOSTAT_REMOTE_STOP, FC_PKT_REMOTE_STOP, FC_REASON_ABTS,
509 FC_EXPLN_NONE, FC_ACTION_NON_RETRYABLE},
510
511 /*
512 * This is a default entry.
513 * The real codes are written dynamically in emlxs_els.c
514 */
515 /* 0x09 */
516 {IOSTAT_LS_RJT, FC_PKT_LS_RJT, FC_REASON_CMD_UNABLE,
517 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
518
519 /* Special error code */
520 /* 0x10 */
521 {IOSTAT_DATA_OVERRUN, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN,
522 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
523
524 /* Special error code */
525 /* 0x11 */
526 {IOSTAT_DATA_UNDERRUN, FC_PKT_TRAN_ERROR, FC_REASON_ABORTED,
527 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
528
529 /* Special error code */
530 /* 0x12 */
531 {IOSTAT_RSP_INVALID, FC_PKT_TRAN_ERROR, FC_REASON_ABORTED,
532 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
533
534 /* CLASS 2 only */
535 /* 0x04 */
536 {IOSTAT_NPORT_RJT, FC_PKT_NPORT_RJT, FC_REASON_PROTOCOL_ERROR,
537 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
538
539 /* CLASS 2 only */
540 /* 0x05 */
541 {IOSTAT_FABRIC_RJT, FC_PKT_FABRIC_RJT, FC_REASON_PROTOCOL_ERROR,
542 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
543
544 /* CLASS 2 only */
545 /* 0x06 */
546 {IOSTAT_NPORT_BSY, FC_PKT_NPORT_BSY, FC_REASON_PHYSICAL_BUSY,
547 FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY},
548
549 /* CLASS 2 only */
550 /* 0x07 */
551 {IOSTAT_FABRIC_BSY, FC_PKT_FABRIC_BSY, FC_REASON_FABRIC_BSY,
552 FC_EXPLN_NONE, FC_ACTION_SEQ_TERM_RETRY},
553 };
554
555 #define IOSTAT_MAX (sizeof (emlxs_iostat_tbl)/sizeof (emlxs_xlat_err_t))
556
557
558 /* We only need to add entries for non-default return codes. */
559 /* Entries do not need to be in order. */
560 /* Default: FC_PKT_TRAN_ERROR, FC_REASON_ABORTED, */
561 /* FC_EXPLN_NONE, FC_ACTION_RETRYABLE} */
562
563 emlxs_xlat_err_t emlxs_ioerr_tbl[] = {
564 /* {f/w code, pkt_state, pkt_reason, */
565 /* pkt_expln, pkt_action} */
566
567 /* 0x01 */
568 {IOERR_MISSING_CONTINUE, FC_PKT_TRAN_ERROR, FC_REASON_OVERRUN,
569 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
570
571 /* 0x02 */
572 {IOERR_SEQUENCE_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT,
573 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
574
575 /* 0x04 */
576 {IOERR_INVALID_RPI, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
577 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
578
579 /* 0x05 */
580 {IOERR_NO_XRI, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED,
581 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
582
583 /* 0x06 */
584 {IOERR_ILLEGAL_COMMAND, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ,
585 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
586
587 /* 0x07 */
588 {IOERR_XCHG_DROPPED, FC_PKT_LOCAL_RJT, FC_REASON_XCHG_DROPPED,
589 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
590
591 /* 0x08 */
592 {IOERR_ILLEGAL_FIELD, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_REQ,
593 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
594
595 /* 0x0B */
596 {IOERR_RCV_BUFFER_WAITING, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM,
597 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
598
599 /* 0x0D */
600 {IOERR_TX_DMA_FAILED, FC_PKT_LOCAL_RJT, FC_REASON_DMA_ERROR,
601 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
602
603 /* 0x0E */
604 {IOERR_RX_DMA_FAILED, FC_PKT_LOCAL_RJT, FC_REASON_DMA_ERROR,
605 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
606
607 /* 0x0F */
608 {IOERR_ILLEGAL_FRAME, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_FRAME,
609 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
610
611 /* 0x11 */
612 {IOERR_NO_RESOURCES, FC_PKT_LOCAL_RJT, FC_REASON_NOMEM,
613 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
614
615 /* 0x13 */
616 {IOERR_ILLEGAL_LENGTH, FC_PKT_LOCAL_RJT, FC_REASON_ILLEGAL_LENGTH,
617 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
618
619 /* 0x14 */
620 {IOERR_UNSUPPORTED_FEATURE, FC_PKT_LOCAL_RJT, FC_REASON_UNSUPPORTED,
621 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
622
623 /* 0x15 */
624 {IOERR_ABORT_IN_PROGRESS, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED,
625 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
626
627 /* 0x16 */
628 {IOERR_ABORT_REQUESTED, FC_PKT_LOCAL_RJT, FC_REASON_ABORTED,
629 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
630
631 /* 0x17 */
632 {IOERR_RCV_BUFFER_TIMEOUT, FC_PKT_LOCAL_RJT, FC_REASON_RX_BUF_TIMEOUT,
633 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
634
635 /* 0x18 */
636 {IOERR_LOOP_OPEN_FAILURE, FC_PKT_LOCAL_RJT, FC_REASON_FCAL_OPN_FAIL,
637 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
638
639 /* 0x1A */
640 {IOERR_LINK_DOWN, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
641 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
642
643 /* 0x21 */
644 {IOERR_BAD_HOST_ADDRESS, FC_PKT_LOCAL_RJT, FC_REASON_BAD_SID,
645 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
646
647 /* Occurs at link down */
648 /* 0x28 */
649 {IOERR_BUFFER_SHORTAGE, FC_PKT_PORT_OFFLINE, FC_REASON_OFFLINE,
650 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
651
652 /* 0xF0 */
653 {IOERR_ABORT_TIMEOUT, FC_PKT_TIMEOUT, FC_REASON_SEQ_TIMEOUT,
654 FC_EXPLN_NONE, FC_ACTION_RETRYABLE},
655 };
656
657 #define IOERR_MAX (sizeof (emlxs_ioerr_tbl)/sizeof (emlxs_xlat_err_t))
658
659
660
661 emlxs_table_t emlxs_error_table[] = {
662 {IOERR_SUCCESS, "No error."},
663 {IOERR_MISSING_CONTINUE, "Missing continue."},
664 {IOERR_SEQUENCE_TIMEOUT, "Sequence timeout."},
665 {IOERR_INTERNAL_ERROR, "Internal error."},
666 {IOERR_INVALID_RPI, "Invalid RPI."},
667 {IOERR_NO_XRI, "No XRI."},
668 {IOERR_ILLEGAL_COMMAND, "Illegal command."},
669 {IOERR_XCHG_DROPPED, "Exchange dropped."},
670 {IOERR_ILLEGAL_FIELD, "Illegal field."},
671 {IOERR_RCV_BUFFER_WAITING, "RX buffer waiting."},
672 {IOERR_TX_DMA_FAILED, "TX DMA failed."},
673 {IOERR_RX_DMA_FAILED, "RX DMA failed."},
674 {IOERR_ILLEGAL_FRAME, "Illegal frame."},
675 {IOERR_NO_RESOURCES, "No resources."},
676 {IOERR_ILLEGAL_LENGTH, "Illegal length."},
677 {IOERR_UNSUPPORTED_FEATURE, "Unsupported feature."},
678 {IOERR_ABORT_IN_PROGRESS, "Abort in progess."},
679 {IOERR_ABORT_REQUESTED, "Abort requested."},
680 {IOERR_RCV_BUFFER_TIMEOUT, "RX buffer timeout."},
681 {IOERR_LOOP_OPEN_FAILURE, "Loop open failed."},
682 {IOERR_RING_RESET, "Ring reset."},
683 {IOERR_LINK_DOWN, "Link down."},
684 {IOERR_CORRUPTED_DATA, "Corrupted data."},
685 {IOERR_CORRUPTED_RPI, "Corrupted RPI."},
686 {IOERR_OUT_OF_ORDER_DATA, "Out-of-order data."},
687 {IOERR_OUT_OF_ORDER_ACK, "Out-of-order ack."},
688 {IOERR_DUP_FRAME, "Duplicate frame."},
689 {IOERR_LINK_CONTROL_FRAME, "Link control frame."},
690 {IOERR_BAD_HOST_ADDRESS, "Bad host address."},
691 {IOERR_RCV_HDRBUF_WAITING, "RX header buffer waiting."},
692 {IOERR_MISSING_HDR_BUFFER, "Missing header buffer."},
693 {IOERR_MSEQ_CHAIN_CORRUPTED, "MSEQ chain corrupted."},
694 {IOERR_ABORTMULT_REQUESTED, "Abort multiple requested."},
695 {IOERR_BUFFER_SHORTAGE, "Buffer shortage."},
696 {IOERR_XRIBUF_WAITING, "XRI buffer shortage"},
697 {IOERR_XRIBUF_MISSING, "XRI buffer missing"},
698 {IOERR_ROFFSET_INVAL, "Relative offset invalid."},
699 {IOERR_ROFFSET_MISSING, "Relative offset missing."},
700 {IOERR_INSUF_BUFFER, "Buffer too small."},
701 {IOERR_MISSING_SI, "ELS frame missing SI"},
702 {IOERR_MISSING_ES, "Exhausted burst without ES"},
703 {IOERR_INCOMP_XFER, "Transfer incomplete."},
704 {IOERR_ABORT_TIMEOUT, "Abort timeout."}
705
706 }; /* emlxs_error_table */
707
708
709 emlxs_table_t emlxs_state_table[] = {
710 {IOSTAT_SUCCESS, "Success."},
711 {IOSTAT_FCP_RSP_ERROR, "FCP response error."},
712 {IOSTAT_REMOTE_STOP, "Remote stop."},
713 {IOSTAT_LOCAL_REJECT, "Local reject."},
714 {IOSTAT_NPORT_RJT, "NPort reject."},
715 {IOSTAT_FABRIC_RJT, "Fabric reject."},
716 {IOSTAT_NPORT_BSY, "Nport busy."},
717 {IOSTAT_FABRIC_BSY, "Fabric busy."},
718 {IOSTAT_INTERMED_RSP, "Intermediate response."},
719 {IOSTAT_LS_RJT, "LS reject."},
720 {IOSTAT_CMD_REJECT, "Cmd reject."},
721 {IOSTAT_FCP_TGT_LENCHK, "TGT length check."},
722 {IOSTAT_NEED_BUFF_ENTRY, "Need buffer entry."},
723 {IOSTAT_DATA_UNDERRUN, "Data underrun."},
724 {IOSTAT_DATA_OVERRUN, "Data overrun."},
725 {IOSTAT_RSP_INVALID, "Response Invalid."},
726
727 }; /* emlxs_state_table */
728
729
730 #ifdef MENLO_SUPPORT
731 emlxs_table_t emlxs_menlo_cmd_table[] = {
732 {MENLO_CMD_INITIALIZE, "MENLO_INIT"},
733 {MENLO_CMD_FW_DOWNLOAD, "MENLO_FW_DOWNLOAD"},
734 {MENLO_CMD_READ_MEMORY, "MENLO_READ_MEM"},
735 {MENLO_CMD_WRITE_MEMORY, "MENLO_WRITE_MEM"},
736 {MENLO_CMD_FTE_INSERT, "MENLO_FTE_INSERT"},
737 {MENLO_CMD_FTE_DELETE, "MENLO_FTE_DELETE"},
738
739 {MENLO_CMD_GET_INIT, "MENLO_GET_INIT"},
740 {MENLO_CMD_GET_CONFIG, "MENLO_GET_CONFIG"},
741 {MENLO_CMD_GET_PORT_STATS, "MENLO_GET_PORT_STATS"},
742 {MENLO_CMD_GET_LIF_STATS, "MENLO_GET_LIF_STATS"},
743 {MENLO_CMD_GET_ASIC_STATS, "MENLO_GET_ASIC_STATS"},
744 {MENLO_CMD_GET_LOG_CONFIG, "MENLO_GET_LOG_CFG"},
745 {MENLO_CMD_GET_LOG_DATA, "MENLO_GET_LOG_DATA"},
746 {MENLO_CMD_GET_PANIC_LOG, "MENLO_GET_PANIC_LOG"},
747 {MENLO_CMD_GET_LB_MODE, "MENLO_GET_LB_MODE"},
748
749 {MENLO_CMD_SET_PAUSE, "MENLO_SET_PAUSE"},
750 {MENLO_CMD_SET_FCOE_COS, "MENLO_SET_FCOE_COS"},
751 {MENLO_CMD_SET_UIF_PORT_TYPE, "MENLO_SET_UIF_TYPE"},
752
753 {MENLO_CMD_DIAGNOSTICS, "MENLO_DIAGNOSTICS"},
754 {MENLO_CMD_LOOPBACK, "MENLO_LOOPBACK"},
755
756 {MENLO_CMD_RESET, "MENLO_RESET"},
757 {MENLO_CMD_SET_MODE, "MENLO_SET_MODE"}
758
759 }; /* emlxs_menlo_cmd_table */
760
761 emlxs_table_t emlxs_menlo_rsp_table[] = {
762 {MENLO_RSP_SUCCESS, "SUCCESS"},
763 {MENLO_ERR_FAILED, "FAILED"},
764 {MENLO_ERR_INVALID_CMD, "INVALID_CMD"},
765 {MENLO_ERR_INVALID_CREDIT, "INVALID_CREDIT"},
766 {MENLO_ERR_INVALID_SIZE, "INVALID_SIZE"},
767 {MENLO_ERR_INVALID_ADDRESS, "INVALID_ADDRESS"},
768 {MENLO_ERR_INVALID_CONTEXT, "INVALID_CONTEXT"},
769 {MENLO_ERR_INVALID_LENGTH, "INVALID_LENGTH"},
770 {MENLO_ERR_INVALID_TYPE, "INVALID_TYPE"},
771 {MENLO_ERR_INVALID_DATA, "INVALID_DATA"},
772 {MENLO_ERR_INVALID_VALUE1, "INVALID_VALUE1"},
773 {MENLO_ERR_INVALID_VALUE2, "INVALID_VALUE2"},
774 {MENLO_ERR_INVALID_MASK, "INVALID_MASK"},
775 {MENLO_ERR_CHECKSUM, "CHECKSUM_ERROR"},
776 {MENLO_ERR_UNKNOWN_FCID, "UNKNOWN_FCID"},
777 {MENLO_ERR_UNKNOWN_WWN, "UNKNOWN_WWN"},
778 {MENLO_ERR_BUSY, "BUSY"},
779
780 }; /* emlxs_menlo_rsp_table */
781
782 #endif /* MENLO_SUPPORT */
783
784
785 emlxs_table_t emlxs_mscmd_table[] = {
786 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
787 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
788 {MS_GTIN, "MS_GTIN"},
789 {MS_GIEL, "MS_GIEL"},
790 {MS_GIET, "MS_GIET"},
791 {MS_GDID, "MS_GDID"},
792 {MS_GMID, "MS_GMID"},
793 {MS_GFN, "MS_GFN"},
794 {MS_GIELN, "MS_GIELN"},
795 {MS_GMAL, "MS_GMAL"},
796 {MS_GIEIL, "MS_GIEIL"},
797 {MS_GPL, "MS_GPL"},
798 {MS_GPT, "MS_GPT"},
799 {MS_GPPN, "MS_GPPN"},
800 {MS_GAPNL, "MS_GAPNL"},
801 {MS_GPS, "MS_GPS"},
802 {MS_GPSC, "MS_GPSC"},
803 {MS_GATIN, "MS_GATIN"},
804 {MS_GSES, "MS_GSES"},
805 {MS_GPLNL, "MS_GPLNL"},
806 {MS_GPLT, "MS_GPLT"},
807 {MS_GPLML, "MS_GPLML"},
808 {MS_GPAB, "MS_GPAB"},
809 {MS_GNPL, "MS_GNPL"},
810 {MS_GPNL, "MS_GPNL"},
811 {MS_GPFCP, "MS_GPFCP"},
812 {MS_GPLI, "MS_GPLI"},
813 {MS_GNID, "MS_GNID"},
814 {MS_RIELN, "MS_RIELN"},
815 {MS_RPL, "MS_RPL"},
816 {MS_RPLN, "MS_RPLN"},
817 {MS_RPLT, "MS_RPLT"},
818 {MS_RPLM, "MS_RPLM"},
819 {MS_RPAB, "MS_RPAB"},
820 {MS_RPFCP, "MS_RPFCP"},
821 {MS_RPLI, "MS_RPLI"},
822 {MS_DPL, "MS_DPL"},
823 {MS_DPLN, "MS_DPLN"},
824 {MS_DPLM, "MS_DPLM"},
825 {MS_DPLML, "MS_DPLML"},
826 {MS_DPLI, "MS_DPLI"},
827 {MS_DPAB, "MS_DPAB"},
828 {MS_DPALL, "MS_DPALL"}
829
830 }; /* emlxs_mscmd_table */
831
832
833 emlxs_table_t emlxs_ctcmd_table[] = {
834 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
835 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
836 {SLI_CTNS_GA_NXT, "GA_NXT"},
837 {SLI_CTNS_GPN_ID, "GPN_ID"},
838 {SLI_CTNS_GNN_ID, "GNN_ID"},
839 {SLI_CTNS_GCS_ID, "GCS_ID"},
840 {SLI_CTNS_GFT_ID, "GFT_ID"},
841 {SLI_CTNS_GSPN_ID, "GSPN_ID"},
842 {SLI_CTNS_GPT_ID, "GPT_ID"},
843 {SLI_CTNS_GID_PN, "GID_PN"},
844 {SLI_CTNS_GID_NN, "GID_NN"},
845 {SLI_CTNS_GIP_NN, "GIP_NN"},
846 {SLI_CTNS_GIPA_NN, "GIPA_NN"},
847 {SLI_CTNS_GSNN_NN, "GSNN_NN"},
848 {SLI_CTNS_GNN_IP, "GNN_IP"},
849 {SLI_CTNS_GIPA_IP, "GIPA_IP"},
850 {SLI_CTNS_GID_FT, "GID_FT"},
851 {SLI_CTNS_GID_PT, "GID_PT"},
852 {SLI_CTNS_RPN_ID, "RPN_ID"},
853 {SLI_CTNS_RNN_ID, "RNN_ID"},
854 {SLI_CTNS_RCS_ID, "RCS_ID"},
855 {SLI_CTNS_RFT_ID, "RFT_ID"},
856 {SLI_CTNS_RSPN_ID, "RSPN_ID"},
857 {SLI_CTNS_RPT_ID, "RPT_ID"},
858 {SLI_CTNS_RIP_NN, "RIP_NN"},
859 {SLI_CTNS_RIPA_NN, "RIPA_NN"},
860 {SLI_CTNS_RSNN_NN, "RSNN_NN"},
861 {SLI_CTNS_DA_ID, "DA_ID"},
862 {SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */
863
864 }; /* emlxs_ctcmd_table */
865
866
867
868 emlxs_table_t emlxs_rmcmd_table[] = {
869 {SLI_CT_RESPONSE_FS_ACC, "CT_ACC"},
870 {SLI_CT_RESPONSE_FS_RJT, "CT_RJT"},
871 {CT_OP_GSAT, "RM_GSAT"},
872 {CT_OP_GHAT, "RM_GHAT"},
873 {CT_OP_GPAT, "RM_GPAT"},
874 {CT_OP_GDAT, "RM_GDAT"},
875 {CT_OP_GPST, "RM_GPST"},
876 {CT_OP_GDP, "RM_GDP"},
877 {CT_OP_GDPG, "RM_GDPG"},
878 {CT_OP_GEPS, "RM_GEPS"},
879 {CT_OP_GLAT, "RM_GLAT"},
880 {CT_OP_SSAT, "RM_SSAT"},
881 {CT_OP_SHAT, "RM_SHAT"},
882 {CT_OP_SPAT, "RM_SPAT"},
883 {CT_OP_SDAT, "RM_SDAT"},
884 {CT_OP_SDP, "RM_SDP"},
885 {CT_OP_SBBS, "RM_SBBS"},
886 {CT_OP_RPST, "RM_RPST"},
887 {CT_OP_VFW, "RM_VFW"},
888 {CT_OP_DFW, "RM_DFW"},
889 {CT_OP_RES, "RM_RES"},
890 {CT_OP_RHD, "RM_RHD"},
891 {CT_OP_UFW, "RM_UFW"},
892 {CT_OP_RDP, "RM_RDP"},
893 {CT_OP_GHDR, "RM_GHDR"},
894 {CT_OP_CHD, "RM_CHD"},
895 {CT_OP_SSR, "RM_SSR"},
896 {CT_OP_RSAT, "RM_RSAT"},
897 {CT_OP_WSAT, "RM_WSAT"},
898 {CT_OP_RSAH, "RM_RSAH"},
899 {CT_OP_WSAH, "RM_WSAH"},
900 {CT_OP_RACT, "RM_RACT"},
901 {CT_OP_WACT, "RM_WACT"},
902 {CT_OP_RKT, "RM_RKT"},
903 {CT_OP_WKT, "RM_WKT"},
904 {CT_OP_SSC, "RM_SSC"},
905 {CT_OP_QHBA, "RM_QHBA"},
906 {CT_OP_GST, "RM_GST"},
907 {CT_OP_GFTM, "RM_GFTM"},
908 {CT_OP_SRL, "RM_SRL"},
909 {CT_OP_SI, "RM_SI"},
910 {CT_OP_SRC, "RM_SRC"},
911 {CT_OP_GPB, "RM_GPB"},
912 {CT_OP_SPB, "RM_SPB"},
913 {CT_OP_RPB, "RM_RPB"},
914 {CT_OP_RAPB, "RM_RAPB"},
915 {CT_OP_GBC, "RM_GBC"},
916 {CT_OP_GBS, "RM_GBS"},
917 {CT_OP_SBS, "RM_SBS"},
918 {CT_OP_GANI, "RM_GANI"},
919 {CT_OP_GRV, "RM_GRV"},
920 {CT_OP_GAPBS, "RM_GAPBS"},
921 {CT_OP_APBC, "RM_APBC"},
922 {CT_OP_GDT, "RM_GDT"},
923 {CT_OP_GDLMI, "RM_GDLMI"},
924 {CT_OP_GANA, "RM_GANA"},
925 {CT_OP_GDLV, "RM_GDLV"},
926 {CT_OP_GWUP, "RM_GWUP"},
927 {CT_OP_GLM, "RM_GLM"},
928 {CT_OP_GABS, "RM_GABS"},
929 {CT_OP_SABS, "RM_SABS"},
930 {CT_OP_RPR, "RM_RPR"},
931 {SLI_CT_LOOPBACK, "LOOPBACK"} /* Driver special */
932
933 }; /* emlxs_rmcmd_table */
934
935
936 emlxs_table_t emlxs_elscmd_table[] = {
937 {ELS_CMD_ACC, "ACC"},
938 {ELS_CMD_LS_RJT, "LS_RJT"},
939 {ELS_CMD_PLOGI, "PLOGI"},
940 {ELS_CMD_FLOGI, "FLOGI"},
941 {ELS_CMD_LOGO, "LOGO"},
942 {ELS_CMD_ABTX, "ABTX"},
943 {ELS_CMD_RCS, "RCS"},
944 {ELS_CMD_RES, "RES"},
945 {ELS_CMD_RSS, "RSS"},
946 {ELS_CMD_RSI, "RSI"},
947 {ELS_CMD_ESTS, "ESTS"},
948 {ELS_CMD_ESTC, "ESTC"},
949 {ELS_CMD_ADVC, "ADVC"},
950 {ELS_CMD_RTV, "RTV"},
951 {ELS_CMD_RLS, "RLS"},
952 {ELS_CMD_ECHO, "ECHO"},
953 {ELS_CMD_TEST, "TEST"},
954 {ELS_CMD_RRQ, "RRQ"},
955 {ELS_CMD_REC, "REC"},
956 {ELS_CMD_PRLI, "PRLI"},
957 {ELS_CMD_PRLO, "PRLO"},
958 {ELS_CMD_SCN, "SCN"},
959 {ELS_CMD_TPLS, "TPLS"},
960 {ELS_CMD_GPRLO, "GPRLO"},
961 {ELS_CMD_GAID, "GAID"},
962 {ELS_CMD_FACT, "FACT"},
963 {ELS_CMD_FDACT, "FDACT"},
964 {ELS_CMD_NACT, "NACT"},
965 {ELS_CMD_NDACT, "NDACT"},
966 {ELS_CMD_QoSR, "QoSR"},
967 {ELS_CMD_RVCS, "RVCS"},
968 {ELS_CMD_PDISC, "PDISC"},
969 {ELS_CMD_FDISC, "FDISC"},
970 {ELS_CMD_ADISC, "ADISC"},
971 {ELS_CMD_FARP, "FARP"},
972 {ELS_CMD_FARPR, "FARPR"},
973 {ELS_CMD_FAN, "FAN"},
974 {ELS_CMD_RSCN, "RSCN"},
975 {ELS_CMD_SCR, "SCR"},
976 {ELS_CMD_LINIT, "LINIT"},
977 {ELS_CMD_RNID, "RNID"},
978 {ELS_CMD_AUTH, "AUTH"}
979
980 }; /* emlxs_elscmd_table */
981
982
983 emlxs_table_t emlxs_mode_table[] = {
984 {MODE_NONE, "NONE"},
985 {MODE_INITIATOR, "INITIATOR"},
986 {MODE_TARGET, "TARGET"},
987 {MODE_ALL, "INITIATOR | TARGET"}
988 }; /* emlxs_mode_table */
989
990 /*
991 *
992 * Device Driver Entry Routines
993 *
994 */
995
996 #ifdef MODSYM_SUPPORT
997 static void emlxs_fca_modclose();
998 static int emlxs_fca_modopen();
999 emlxs_modsym_t emlxs_modsym; /* uses emlxs_device.lock */
1000
1001 static int
1002 emlxs_fca_modopen()
1003 {
1004 int err;
1005
1006 if (emlxs_modsym.mod_fctl) {
1007 return (0);
1008 }
1009
1010 /* Leadville (fctl) */
1011 err = 0;
1012 emlxs_modsym.mod_fctl =
1013 ddi_modopen("misc/fctl", KRTLD_MODE_FIRST, &err);
1014 if (!emlxs_modsym.mod_fctl) {
1015 cmn_err(CE_WARN,
1016 "?%s: misc/fctl: ddi_modopen misc/fctl failed: error=%d",
1017 DRIVER_NAME, err);
1018
1019 goto failed;
1020 }
1021
1022 err = 0;
1023 /* Check if the fctl fc_fca_attach is present */
1024 emlxs_modsym.fc_fca_attach =
1025 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_attach",
1026 &err);
1027 if ((void *)emlxs_modsym.fc_fca_attach == NULL) {
1028 cmn_err(CE_WARN,
1029 "?%s: misc/fctl: fc_fca_attach not present", DRIVER_NAME);
1030 goto failed;
1031 }
1032
1033 err = 0;
1034 /* Check if the fctl fc_fca_detach is present */
1035 emlxs_modsym.fc_fca_detach =
1036 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_detach",
1037 &err);
1038 if ((void *)emlxs_modsym.fc_fca_detach == NULL) {
1039 cmn_err(CE_WARN,
1040 "?%s: misc/fctl: fc_fca_detach not present", DRIVER_NAME);
1041 goto failed;
1042 }
1043
1044 err = 0;
1045 /* Check if the fctl fc_fca_init is present */
1046 emlxs_modsym.fc_fca_init =
1047 (int (*)())ddi_modsym(emlxs_modsym.mod_fctl, "fc_fca_init", &err);
1048 if ((void *)emlxs_modsym.fc_fca_init == NULL) {
1049 cmn_err(CE_WARN,
1050 "?%s: misc/fctl: fc_fca_init not present", DRIVER_NAME);
1051 goto failed;
1052 }
1053
1054 return (0);
1055
1056 failed:
1057
1058 emlxs_fca_modclose();
1059
1060 return (1);
1061
1062
1063 } /* emlxs_fca_modopen() */
1064
1065
1066 static void
1067 emlxs_fca_modclose()
1068 {
1069 if (emlxs_modsym.mod_fctl) {
1070 (void) ddi_modclose(emlxs_modsym.mod_fctl);
1071 emlxs_modsym.mod_fctl = 0;
1072 }
1073
1074 emlxs_modsym.fc_fca_attach = NULL;
1075 emlxs_modsym.fc_fca_detach = NULL;
1076 emlxs_modsym.fc_fca_init = NULL;
1077
1078 return;
1079
1080 } /* emlxs_fca_modclose() */
1081
1082 #endif /* MODSYM_SUPPORT */
1083
1084
1085
1086 /*
1087 * Global driver initialization, called once when driver is loaded
1088 */
1089 int
1090 _init(void)
1091 {
1092 int ret;
1093
1094 /*
1095 * First init call for this driver,
1096 * so initialize the emlxs_dev_ctl structure.
1097 */
1098 bzero(&emlxs_device, sizeof (emlxs_device));
1099
1100 #ifdef MODSYM_SUPPORT
1101 bzero(&emlxs_modsym, sizeof (emlxs_modsym_t));
1102 #endif /* MODSYM_SUPPORT */
1103
1104 mutex_init(&emlxs_device.lock, NULL, MUTEX_DRIVER, NULL);
1105
1106 (void) drv_getparm(LBOLT, &emlxs_device.log_timestamp);
1107 emlxs_device.drv_timestamp = ddi_get_time();
1108
1109 for (ret = 0; ret < MAX_FC_BRDS; ret++) {
1110 emlxs_instance[ret] = (uint32_t)-1;
1111 }
1112
1113 /*
1114 * Provide for one ddiinst of the emlxs_dev_ctl structure
1115 * for each possible board in the system.
1116 */
1117 if ((ret = ddi_soft_state_init(&emlxs_soft_state,
1118 sizeof (emlxs_hba_t), MAX_FC_BRDS)) != 0) {
1119 cmn_err(CE_WARN,
1120 "?%s: _init: ddi_soft_state_init failed. rval=%x",
1121 DRIVER_NAME, ret);
1122
1123 return (ret);
1124 }
1125
1126 #ifdef MODSYM_SUPPORT
1127 /* Open SFS */
1128 (void) emlxs_fca_modopen();
1129 #endif /* MODSYM_SUPPORT */
1130
1131 /* Setup devops for SFS */
1132 MODSYM(fc_fca_init)(&emlxs_ops);
1133
1134 if ((ret = mod_install(&emlxs_modlinkage)) != 0) {
1135 (void) ddi_soft_state_fini(&emlxs_soft_state);
1136 #ifdef MODSYM_SUPPORT
1137 /* Close SFS */
1138 emlxs_fca_modclose();
1139 #endif /* MODSYM_SUPPORT */
1140
1141 return (ret);
1142 }
1143
1144 #ifdef SAN_DIAG_SUPPORT
1145 mutex_init(&emlxs_sd_bucket_mutex, NULL, MUTEX_DRIVER, NULL);
1146 #endif /* SAN_DIAG_SUPPORT */
1147
1148 return (ret);
1149
1150 } /* _init() */
1151
1152
1153 /*
1154 * Called when driver is unloaded.
1155 */
1156 int
1157 _fini(void)
1158 {
1159 int ret;
1160
1161 if ((ret = mod_remove(&emlxs_modlinkage)) != 0) {
1162 return (ret);
1163 }
1164 #ifdef MODSYM_SUPPORT
1165 /* Close SFS */
1166 emlxs_fca_modclose();
1167 #endif /* MODSYM_SUPPORT */
1168
1169 /*
1170 * Destroy the soft state structure
1171 */
1172 (void) ddi_soft_state_fini(&emlxs_soft_state);
1173
1174 /* Destroy the global device lock */
1175 mutex_destroy(&emlxs_device.lock);
1176
1177 #ifdef SAN_DIAG_SUPPORT
1178 mutex_destroy(&emlxs_sd_bucket_mutex);
1179 #endif /* SAN_DIAG_SUPPORT */
1180
1181 return (ret);
1182
1183 } /* _fini() */
1184
1185
1186
1187 int
1188 _info(struct modinfo *modinfop)
1189 {
1190
1191 return (mod_info(&emlxs_modlinkage, modinfop));
1192
1193 } /* _info() */
1194
1195
1196 /*
1197 * Attach an ddiinst of an emlx host adapter.
1198 * Allocate data structures, initialize the adapter and we're ready to fly.
1199 */
1200 static int
1201 emlxs_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1202 {
1203 emlxs_hba_t *hba;
1204 int ddiinst;
1205 int emlxinst;
1206 int rval;
1207
1208 switch (cmd) {
1209 case DDI_ATTACH:
1210 /* If successful this will set EMLXS_PM_IN_ATTACH */
1211 rval = emlxs_hba_attach(dip);
1212 break;
1213
1214 case DDI_RESUME:
1215 /* This will resume the driver */
1216 rval = emlxs_hba_resume(dip);
1217 break;
1218
1219 default:
1220 rval = DDI_FAILURE;
1221 }
1222
1223 if (rval == DDI_SUCCESS) {
1224 ddiinst = ddi_get_instance(dip);
1225 emlxinst = emlxs_get_instance(ddiinst);
1226 hba = emlxs_device.hba[emlxinst];
1227
1228 if ((hba != NULL) && (hba != (emlxs_hba_t *)-1)) {
1229
1230 /* Enable driver dump feature */
1231 mutex_enter(&EMLXS_PORT_LOCK);
1232 hba->flag |= FC_DUMP_SAFE;
1233 mutex_exit(&EMLXS_PORT_LOCK);
1234 }
1235 }
1236
1237 return (rval);
1238
1239 } /* emlxs_attach() */
1240
1241
1242 /*
1243 * Detach/prepare driver to unload (see detach(9E)).
1244 */
1245 static int
1246 emlxs_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1247 {
1248 emlxs_hba_t *hba;
1249 emlxs_port_t *port;
1250 int ddiinst;
1251 int emlxinst;
1252 int rval;
1253
1254 ddiinst = ddi_get_instance(dip);
1255 emlxinst = emlxs_get_instance(ddiinst);
1256 hba = emlxs_device.hba[emlxinst];
1257
1258 if (hba == NULL) {
1259 cmn_err(CE_WARN, "?%s: Detach: NULL device.", DRIVER_NAME);
1260
1261 return (DDI_FAILURE);
1262 }
1263
1264 if (hba == (emlxs_hba_t *)-1) {
1265 cmn_err(CE_WARN, "?%s: Detach: Device attach failed.",
1266 DRIVER_NAME);
1267
1268 return (DDI_FAILURE);
1269 }
1270
1271 port = &PPORT;
1272 rval = DDI_SUCCESS;
1273
1274 /* Check driver dump */
1275 mutex_enter(&EMLXS_PORT_LOCK);
1276
1277 if (hba->flag & FC_DUMP_ACTIVE) {
1278 mutex_exit(&EMLXS_PORT_LOCK);
1279
1280 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1281 "detach: Driver busy. Driver dump active.");
1282
1283 return (DDI_FAILURE);
1284 }
1285
1286 #ifdef SFCT_SUPPORT
1287 if ((port->flag & EMLXS_TGT_BOUND) &&
1288 ((port->fct_flags & FCT_STATE_PORT_ONLINE) ||
1289 (port->fct_flags & FCT_STATE_NOT_ACKED))) {
1290 mutex_exit(&EMLXS_PORT_LOCK);
1291
1292 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1293 "detach: Driver busy. Target mode active.");
1294
1295 return (DDI_FAILURE);
1296 }
1297 #endif /* SFCT_SUPPORT */
1298
1299 if (port->flag & EMLXS_INI_BOUND) {
1300 mutex_exit(&EMLXS_PORT_LOCK);
1301
1302 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1303 "detach: Driver busy. Initiator mode active.");
1304
1305 return (DDI_FAILURE);
1306 }
1307
1308 hba->flag &= ~FC_DUMP_SAFE;
1309
1310 mutex_exit(&EMLXS_PORT_LOCK);
1311
1312 switch (cmd) {
1313 case DDI_DETACH:
1314
1315 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1316 "DDI_DETACH");
1317
1318 rval = emlxs_hba_detach(dip);
1319
1320 if (rval != DDI_SUCCESS) {
1321 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1322 "Unable to detach.");
1323 }
1324 break;
1325
1326 case DDI_SUSPEND:
1327
1328 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg,
1329 "DDI_SUSPEND");
1330
1331 /* Suspend the driver */
1332 rval = emlxs_hba_suspend(dip);
1333
1334 if (rval != DDI_SUCCESS) {
1335 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
1336 "Unable to suspend driver.");
1337 }
1338 break;
1339
1340 default:
1341 cmn_err(CE_WARN, "?%s: Detach: Unknown cmd received. cmd=%x",
1342 DRIVER_NAME, cmd);
1343 rval = DDI_FAILURE;
1344 }
1345
1346 if (rval == DDI_FAILURE) {
1347 /* Re-Enable driver dump feature */
1348 mutex_enter(&EMLXS_PORT_LOCK);
1349 hba->flag |= FC_DUMP_SAFE;
1350 mutex_exit(&EMLXS_PORT_LOCK);
1351 }
1352
1353 return (rval);
1354
1355 } /* emlxs_detach() */
1356
1357
1358 /* EMLXS_PORT_LOCK must be held when calling this */
1359 extern void
1360 emlxs_port_init(emlxs_port_t *port)
1361 {
1362 emlxs_hba_t *hba = HBA;
1363
1364 /* Initialize the base node */
1365 bzero((caddr_t)&port->node_base, sizeof (NODELIST));
1366 port->node_base.nlp_Rpi = 0;
1367 port->node_base.nlp_DID = 0xffffff;
1368 port->node_base.nlp_list_next = NULL;
1369 port->node_base.nlp_list_prev = NULL;
1370 port->node_base.nlp_active = 1;
1371 port->node_base.nlp_base = 1;
1372 port->node_count = 0;
1373
1374 if (!(port->flag & EMLXS_PORT_ENABLED)) {
1375 uint8_t dummy_wwn[8] =
1376 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
1377
1378 bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwnn,
1379 sizeof (NAME_TYPE));
1380 bcopy((caddr_t)dummy_wwn, (caddr_t)&port->wwpn,
1381 sizeof (NAME_TYPE));
1382 }
1383
1384 if (!(port->flag & EMLXS_PORT_CONFIG)) {
1385 (void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn,
1386 (sizeof (port->snn)-1));
1387 (void) strncpy((caddr_t)port->spn, (caddr_t)hba->spn,
1388 (sizeof (port->spn)-1));
1389 }
1390
1391 bcopy((caddr_t)&hba->sparam, (caddr_t)&port->sparam,
1392 sizeof (SERV_PARM));
1393 bcopy((caddr_t)&port->wwnn, (caddr_t)&port->sparam.nodeName,
1394 sizeof (NAME_TYPE));
1395 bcopy((caddr_t)&port->wwpn, (caddr_t)&port->sparam.portName,
1396 sizeof (NAME_TYPE));
1397
1398 return;
1399
1400 } /* emlxs_port_init() */
1401
1402
1403 void
1404 emlxs_disable_pcie_ce_err(emlxs_hba_t *hba)
1405 {
1406 uint16_t reg;
1407
1408 if (!hba->pci_cap_offset[PCI_CAP_ID_PCI_E]) {
1409 return;
1410 }
1411
1412 /* Turn off the Correctable Error Reporting */
1413 /* (the Device Control Register, bit 0). */
1414 reg = ddi_get16(hba->pci_acc_handle,
1415 (uint16_t *)(hba->pci_addr +
1416 hba->pci_cap_offset[PCI_CAP_ID_PCI_E] +
1417 PCIE_DEVCTL));
1418
1419 reg &= ~1;
1420
1421 (void) ddi_put16(hba->pci_acc_handle,
1422 (uint16_t *)(hba->pci_addr +
1423 hba->pci_cap_offset[PCI_CAP_ID_PCI_E] +
1424 PCIE_DEVCTL),
1425 reg);
1426
1427 return;
1428
1429 } /* emlxs_disable_pcie_ce_err() */
1430
1431
1432 /*
1433 * emlxs_fca_bind_port
1434 *
1435 * Arguments:
1436 *
1437 * dip: the dev_info pointer for the ddiinst
1438 * port_info: pointer to info handed back to the transport
1439 * bind_info: pointer to info from the transport
1440 *
1441 * Return values: a port handle for this port, NULL for failure
1442 *
1443 */
1444 static opaque_t
1445 emlxs_fca_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
1446 fc_fca_bind_info_t *bind_info)
1447 {
1448 emlxs_hba_t *hba;
1449 emlxs_port_t *port;
1450 emlxs_port_t *pport;
1451 emlxs_port_t *vport;
1452 int ddiinst;
1453 emlxs_vpd_t *vpd;
1454 emlxs_config_t *cfg;
1455 char *dptr;
1456 char buffer[16];
1457 uint32_t length;
1458 uint32_t len;
1459 char topology[32];
1460 char linkspeed[32];
1461 uint32_t linkstate;
1462
1463 ddiinst = ddi_get_instance(dip);
1464 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
1465 port = &PPORT;
1466 pport = &PPORT;
1467
1468 ddiinst = hba->ddiinst;
1469 vpd = &VPD;
1470 cfg = &CFG;
1471
1472 mutex_enter(&EMLXS_PORT_LOCK);
1473
1474 if (bind_info->port_num > 0) {
1475 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1476 if (!(hba->flag & FC_NPIV_ENABLED) ||
1477 !(bind_info->port_npiv) ||
1478 (bind_info->port_num > hba->vpi_max))
1479 #elif (EMLXS_MODREV >= EMLXS_MODREV3)
1480 if (!(hba->flag & FC_NPIV_ENABLED) ||
1481 (bind_info->port_num > hba->vpi_high))
1482 #endif
1483 {
1484 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1485 "fca_bind_port: Port %d not supported.",
1486 bind_info->port_num);
1487
1488 mutex_exit(&EMLXS_PORT_LOCK);
1489
1490 port_info->pi_error = FC_OUTOFBOUNDS;
1491 return (NULL);
1492 }
1493 }
1494
1495 /* Get true port pointer */
1496 port = &VPORT(bind_info->port_num);
1497
1498 /* Make sure the port is not already bound to the transport */
1499 if (port->flag & EMLXS_INI_BOUND) {
1500
1501 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1502 "fca_bind_port: Port %d already bound. flag=%x",
1503 bind_info->port_num, port->flag);
1504
1505 mutex_exit(&EMLXS_PORT_LOCK);
1506
1507 port_info->pi_error = FC_ALREADY;
1508 return (NULL);
1509 }
1510
1511 if (!(pport->flag & EMLXS_INI_ENABLED)) {
1512 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1513 "fca_bind_port: Physical port does not support "
1514 "initiator mode.");
1515
1516 mutex_exit(&EMLXS_PORT_LOCK);
1517
1518 port_info->pi_error = FC_OUTOFBOUNDS;
1519 return (NULL);
1520 }
1521
1522 /* Make sure port enable flag is set */
1523 /* Just in case fca_port_unbind is called just prior to fca_port_bind */
1524 /* without a driver attach or resume operation */
1525 port->flag |= EMLXS_PORT_ENABLED;
1526
1527 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
1528 "fca_bind_port: Port %d: port_info=%p bind_info=%p",
1529 bind_info->port_num, port_info, bind_info);
1530
1531 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1532 if (bind_info->port_npiv) {
1533 /* Leadville is telling us about a new virtual port */
1534 bcopy((caddr_t)&bind_info->port_nwwn, (caddr_t)&port->wwnn,
1535 sizeof (NAME_TYPE));
1536 bcopy((caddr_t)&bind_info->port_pwwn, (caddr_t)&port->wwpn,
1537 sizeof (NAME_TYPE));
1538 if (port->snn[0] == 0) {
1539 (void) strncpy((caddr_t)port->snn, (caddr_t)hba->snn,
1540 (sizeof (port->snn)-1));
1541
1542 }
1543
1544 if (port->spn[0] == 0) {
1545 (void) snprintf((caddr_t)port->spn,
1546 (sizeof (port->spn)-1), "%s VPort-%d",
1547 (caddr_t)hba->spn, port->vpi);
1548 }
1549 port->flag |= EMLXS_PORT_CONFIG;
1550 }
1551 #endif /* >= EMLXS_MODREV5 */
1552
1553 /*
1554 * Restricted login should apply both physical and
1555 * virtual ports.
1556 */
1557 if (cfg[CFG_VPORT_RESTRICTED].current) {
1558 port->flag |= EMLXS_PORT_RESTRICTED;
1559 }
1560
1561 /* Perform generic port initialization */
1562 emlxs_port_init(port);
1563
1564 /* Perform SFS specific initialization */
1565 port->ulp_handle = bind_info->port_handle;
1566 port->ulp_statec_cb = bind_info->port_statec_cb;
1567 port->ulp_unsol_cb = bind_info->port_unsol_cb;
1568
1569 /* Set the bound flag */
1570 port->flag |= EMLXS_INI_BOUND;
1571 hba->num_of_ports++;
1572
1573 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1574 mutex_exit(&EMLXS_PORT_LOCK);
1575 (void) emlxs_vpi_port_bind_notify(port);
1576 mutex_enter(&EMLXS_PORT_LOCK);
1577
1578 linkstate = (port->vpip->state == VPI_STATE_PORT_ONLINE)?
1579 FC_LINK_UP:FC_LINK_DOWN;
1580 } else {
1581 linkstate = hba->state;
1582 }
1583
1584 /* Update the port info structure */
1585
1586 /* Set the topology and state */
1587 if (port->mode == MODE_TARGET) {
1588 port_info->pi_port_state = FC_STATE_OFFLINE;
1589 port_info->pi_topology = FC_TOP_UNKNOWN;
1590 } else if ((linkstate < FC_LINK_UP) ||
1591 ((port->vpi > 0) && (!(port->flag & EMLXS_PORT_ENABLED) ||
1592 !(hba->flag & FC_NPIV_SUPPORTED)))) {
1593 port_info->pi_port_state = FC_STATE_OFFLINE;
1594 port_info->pi_topology = FC_TOP_UNKNOWN;
1595 }
1596 #ifdef MENLO_SUPPORT
1597 else if (hba->flag & FC_MENLO_MODE) {
1598 port_info->pi_port_state = FC_STATE_OFFLINE;
1599 port_info->pi_topology = FC_TOP_UNKNOWN;
1600 }
1601 #endif /* MENLO_SUPPORT */
1602 else {
1603 /* Check for loop topology */
1604 if (hba->topology == TOPOLOGY_LOOP) {
1605 port_info->pi_port_state = FC_STATE_LOOP;
1606 (void) strlcpy(topology, ", loop", sizeof (topology));
1607
1608 if (hba->flag & FC_FABRIC_ATTACHED) {
1609 port_info->pi_topology = FC_TOP_PUBLIC_LOOP;
1610 } else {
1611 port_info->pi_topology = FC_TOP_PRIVATE_LOOP;
1612 }
1613 } else {
1614 port_info->pi_topology = FC_TOP_FABRIC;
1615 port_info->pi_port_state = FC_STATE_ONLINE;
1616 (void) strlcpy(topology, ", fabric", sizeof (topology));
1617 }
1618
1619 /* Set the link speed */
1620 switch (hba->linkspeed) {
1621 case 0:
1622 (void) strlcpy(linkspeed, "Gb", sizeof (linkspeed));
1623 port_info->pi_port_state |= FC_STATE_1GBIT_SPEED;
1624 break;
1625
1626 case LA_1GHZ_LINK:
1627 (void) strlcpy(linkspeed, "1Gb", sizeof (linkspeed));
1628 port_info->pi_port_state |= FC_STATE_1GBIT_SPEED;
1629 break;
1630 case LA_2GHZ_LINK:
1631 (void) strlcpy(linkspeed, "2Gb", sizeof (linkspeed));
1632 port_info->pi_port_state |= FC_STATE_2GBIT_SPEED;
1633 break;
1634 case LA_4GHZ_LINK:
1635 (void) strlcpy(linkspeed, "4Gb", sizeof (linkspeed));
1636 port_info->pi_port_state |= FC_STATE_4GBIT_SPEED;
1637 break;
1638 case LA_8GHZ_LINK:
1639 (void) strlcpy(linkspeed, "8Gb", sizeof (linkspeed));
1640 port_info->pi_port_state |= FC_STATE_8GBIT_SPEED;
1641 break;
1642 case LA_10GHZ_LINK:
1643 (void) strlcpy(linkspeed, "10Gb", sizeof (linkspeed));
1644 port_info->pi_port_state |= FC_STATE_10GBIT_SPEED;
1645 break;
1646 case LA_16GHZ_LINK:
1647 (void) strlcpy(linkspeed, "16Gb", sizeof (linkspeed));
1648 port_info->pi_port_state |= FC_STATE_16GBIT_SPEED;
1649 break;
1650 case LA_32GHZ_LINK:
1651 (void) strlcpy(linkspeed, "32Gb", sizeof (linkspeed));
1652 port_info->pi_port_state |= FC_STATE_32GBIT_SPEED;
1653 break;
1654 default:
1655 (void) snprintf(linkspeed, sizeof (linkspeed),
1656 "unknown(0x%x)", hba->linkspeed);
1657 break;
1658 }
1659
1660 if (hba->sli_mode <= EMLXS_HBA_SLI3_MODE) {
1661 /* Adjusting port context for link up messages */
1662 vport = port;
1663 port = &PPORT;
1664 if (vport->vpi == 0) {
1665 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1666 "%s%s, initiator",
1667 linkspeed, topology);
1668 } else if (!(hba->flag & FC_NPIV_LINKUP)) {
1669 hba->flag |= FC_NPIV_LINKUP;
1670 EMLXS_MSGF(EMLXS_CONTEXT,
1671 &emlxs_npiv_link_up_msg,
1672 "%s%s, initiator", linkspeed, topology);
1673 }
1674 port = vport;
1675 }
1676 }
1677
1678 /* PCIE Correctable Error Reporting workaround */
1679 if (((hba->model_info.chip == EMLXS_BE2_CHIP) ||
1680 (hba->model_info.chip == EMLXS_BE3_CHIP)) &&
1681 (bind_info->port_num == 0)) {
1682 emlxs_disable_pcie_ce_err(hba);
1683 }
1684
1685 /* Save initial state */
1686 port->ulp_statec = port_info->pi_port_state;
1687
1688 /*
1689 * The transport needs a copy of the common service parameters
1690 * for this port. The transport can get any updates through
1691 * the getcap entry point.
1692 */
1693 bcopy((void *) &port->sparam,
1694 (void *) &port_info->pi_login_params.common_service,
1695 sizeof (SERV_PARM));
1696
1697 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
1698 /* Swap the service parameters for ULP */
1699 emlxs_swap_service_params((SERV_PARM *)&port_info->pi_login_params.
1700 common_service);
1701 #endif /* EMLXS_MODREV2X */
1702
1703 port_info->pi_login_params.common_service.btob_credit = 0xffff;
1704
1705 bcopy((void *) &port->wwnn,
1706 (void *) &port_info->pi_login_params.node_ww_name,
1707 sizeof (NAME_TYPE));
1708
1709 bcopy((void *) &port->wwpn,
1710 (void *) &port_info->pi_login_params.nport_ww_name,
1711 sizeof (NAME_TYPE));
1712
1713 /*
1714 * We need to turn off CLASS2 support.
1715 * Otherwise, FC transport will use CLASS2 as default class
1716 * and never try with CLASS3.
1717 */
1718 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1719 #if (EMLXS_MODREVX >= EMLXS_MODREV3X)
1720 if ((port_info->pi_login_params.class_1.class_opt) & 0x0080) {
1721 port_info->pi_login_params.class_1.class_opt &= ~0x0080;
1722 }
1723
1724 if ((port_info->pi_login_params.class_2.class_opt) & 0x0080) {
1725 port_info->pi_login_params.class_2.class_opt &= ~0x0080;
1726 }
1727 #else /* EMLXS_SPARC or EMLXS_MODREV2X */
1728 if ((port_info->pi_login_params.class_1.class_opt) & 0x8000) {
1729 port_info->pi_login_params.class_1.class_opt &= ~0x8000;
1730 }
1731
1732 if ((port_info->pi_login_params.class_2.class_opt) & 0x8000) {
1733 port_info->pi_login_params.class_2.class_opt &= ~0x8000;
1734 }
1735 #endif /* >= EMLXS_MODREV3X */
1736 #endif /* >= EMLXS_MODREV3 */
1737
1738
1739 #if (EMLXS_MODREV <= EMLXS_MODREV2)
1740 if ((port_info->pi_login_params.class_1.data[0]) & 0x80) {
1741 port_info->pi_login_params.class_1.data[0] &= ~0x80;
1742 }
1743
1744 if ((port_info->pi_login_params.class_2.data[0]) & 0x80) {
1745 port_info->pi_login_params.class_2.data[0] &= ~0x80;
1746 }
1747 #endif /* <= EMLXS_MODREV2 */
1748
1749 /* Additional parameters */
1750 port_info->pi_s_id.port_id = port->did;
1751 port_info->pi_s_id.priv_lilp_posit = 0;
1752 port_info->pi_hard_addr.hard_addr = cfg[CFG_ASSIGN_ALPA].current;
1753
1754 /* Initialize the RNID parameters */
1755 bzero(&port_info->pi_rnid_params, sizeof (port_info->pi_rnid_params));
1756
1757 (void) snprintf((char *)port_info->pi_rnid_params.params.global_id,
1758 (sizeof (port_info->pi_rnid_params.params.global_id)-1),
1759 "%01x%01x%02x%02x%02x%02x%02x%02x%02x", hba->wwpn.nameType,
1760 hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0],
1761 hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3],
1762 hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1763
1764 port_info->pi_rnid_params.params.unit_type = RNID_HBA;
1765 port_info->pi_rnid_params.params.port_id = port->did;
1766 port_info->pi_rnid_params.params.ip_version = RNID_IPV4;
1767
1768 /* Initialize the port attributes */
1769 bzero(&port_info->pi_attrs, sizeof (port_info->pi_attrs));
1770
1771 (void) strncpy(port_info->pi_attrs.manufacturer,
1772 hba->model_info.manufacturer,
1773 (sizeof (port_info->pi_attrs.manufacturer)-1));
1774
1775 port_info->pi_rnid_params.status = FC_SUCCESS;
1776
1777 (void) strncpy(port_info->pi_attrs.serial_number, vpd->serial_num,
1778 (sizeof (port_info->pi_attrs.serial_number)-1));
1779
1780 (void) snprintf(port_info->pi_attrs.firmware_version,
1781 (sizeof (port_info->pi_attrs.firmware_version)-1), "%s (%s)",
1782 vpd->fw_version, vpd->fw_label);
1783
1784 #ifdef EMLXS_I386
1785 (void) snprintf(port_info->pi_attrs.option_rom_version,
1786 (sizeof (port_info->pi_attrs.option_rom_version)-1),
1787 "Boot:%s", vpd->boot_version);
1788 #else /* EMLXS_SPARC */
1789 (void) snprintf(port_info->pi_attrs.option_rom_version,
1790 (sizeof (port_info->pi_attrs.option_rom_version)-1),
1791 "Boot:%s Fcode:%s", vpd->boot_version, vpd->fcode_version);
1792 #endif /* EMLXS_I386 */
1793
1794 (void) snprintf(port_info->pi_attrs.driver_version,
1795 (sizeof (port_info->pi_attrs.driver_version)-1), "%s (%s)",
1796 emlxs_version, emlxs_revision);
1797
1798 (void) strncpy(port_info->pi_attrs.driver_name, DRIVER_NAME,
1799 (sizeof (port_info->pi_attrs.driver_name)-1));
1800
1801 port_info->pi_attrs.vendor_specific_id =
1802 ((hba->model_info.device_id << 16) | PCI_VENDOR_ID_EMULEX);
1803
1804 port_info->pi_attrs.supported_cos = LE_SWAP32(FC_NS_CLASS3);
1805
1806 port_info->pi_attrs.max_frame_size = FF_FRAME_SIZE;
1807
1808 #if (EMLXS_MODREV >= EMLXS_MODREV3)
1809 port_info->pi_rnid_params.params.num_attached = 0;
1810
1811 if (hba->model_info.chip == EMLXS_LANCER_CHIP) {
1812 uint8_t byte;
1813 uint8_t *wwpn;
1814 uint32_t i;
1815 uint32_t j;
1816
1817 /* Copy the WWPN as a string into the local buffer */
1818 wwpn = (uint8_t *)&hba->wwpn;
1819 for (i = 0; i < 16; i++) {
1820 byte = *wwpn++;
1821 j = ((byte & 0xf0) >> 4);
1822 if (j <= 9) {
1823 buffer[i] =
1824 (char)((uint8_t)'0' + (uint8_t)j);
1825 } else {
1826 buffer[i] =
1827 (char)((uint8_t)'A' + (uint8_t)(j -
1828 10));
1829 }
1830
1831 i++;
1832 j = (byte & 0xf);
1833 if (j <= 9) {
1834 buffer[i] =
1835 (char)((uint8_t)'0' + (uint8_t)j);
1836 } else {
1837 buffer[i] =
1838 (char)((uint8_t)'A' + (uint8_t)(j -
1839 10));
1840 }
1841 }
1842
1843 port_info->pi_attrs.hba_fru_details.port_index = 0;
1844 #if ((EMLXS_MODREV == EMLXS_MODREV3) || (EMLXS_MODREV == EMLXS_MODREV4))
1845
1846 } else if (hba->flag & FC_NPIV_ENABLED) {
1847 uint8_t byte;
1848 uint8_t *wwpn;
1849 uint32_t i;
1850 uint32_t j;
1851
1852 /* Copy the WWPN as a string into the local buffer */
1853 wwpn = (uint8_t *)&hba->wwpn;
1854 for (i = 0; i < 16; i++) {
1855 byte = *wwpn++;
1856 j = ((byte & 0xf0) >> 4);
1857 if (j <= 9) {
1858 buffer[i] =
1859 (char)((uint8_t)'0' + (uint8_t)j);
1860 } else {
1861 buffer[i] =
1862 (char)((uint8_t)'A' + (uint8_t)(j -
1863 10));
1864 }
1865
1866 i++;
1867 j = (byte & 0xf);
1868 if (j <= 9) {
1869 buffer[i] =
1870 (char)((uint8_t)'0' + (uint8_t)j);
1871 } else {
1872 buffer[i] =
1873 (char)((uint8_t)'A' + (uint8_t)(j -
1874 10));
1875 }
1876 }
1877
1878 port_info->pi_attrs.hba_fru_details.port_index = port->vpi;
1879 #endif /* == EMLXS_MODREV3 || EMLXS_MODREV4 */
1880
1881 } else {
1882 /* Copy the serial number string (right most 16 chars) */
1883 /* into the right justified local buffer */
1884 bzero(buffer, sizeof (buffer));
1885 length = strlen(vpd->serial_num);
1886 len = (length > 16) ? 16 : length;
1887 bcopy(&vpd->serial_num[(length - len)],
1888 &buffer[(sizeof (buffer) - len)], len);
1889
1890 port_info->pi_attrs.hba_fru_details.port_index =
1891 vpd->port_index;
1892 }
1893
1894 dptr = (char *)&port_info->pi_attrs.hba_fru_details.high;
1895 dptr[0] = buffer[0];
1896 dptr[1] = buffer[1];
1897 dptr[2] = buffer[2];
1898 dptr[3] = buffer[3];
1899 dptr[4] = buffer[4];
1900 dptr[5] = buffer[5];
1901 dptr[6] = buffer[6];
1902 dptr[7] = buffer[7];
1903 port_info->pi_attrs.hba_fru_details.high =
1904 LE_SWAP64(port_info->pi_attrs.hba_fru_details.high);
1905
1906 dptr = (char *)&port_info->pi_attrs.hba_fru_details.low;
1907 dptr[0] = buffer[8];
1908 dptr[1] = buffer[9];
1909 dptr[2] = buffer[10];
1910 dptr[3] = buffer[11];
1911 dptr[4] = buffer[12];
1912 dptr[5] = buffer[13];
1913 dptr[6] = buffer[14];
1914 dptr[7] = buffer[15];
1915 port_info->pi_attrs.hba_fru_details.low =
1916 LE_SWAP64(port_info->pi_attrs.hba_fru_details.low);
1917
1918 #endif /* >= EMLXS_MODREV3 */
1919
1920 #if (EMLXS_MODREV >= EMLXS_MODREV4)
1921 (void) strncpy((caddr_t)port_info->pi_attrs.sym_node_name,
1922 (caddr_t)port->snn, FCHBA_SYMB_NAME_LEN);
1923 (void) strncpy((caddr_t)port_info->pi_attrs.sym_port_name,
1924 (caddr_t)port->spn, FCHBA_SYMB_NAME_LEN);
1925 #endif /* >= EMLXS_MODREV4 */
1926
1927 (void) snprintf(port_info->pi_attrs.hardware_version,
1928 (sizeof (port_info->pi_attrs.hardware_version)-1),
1929 "%x", vpd->biuRev);
1930
1931 /* Set the hba speed limit */
1932 if (vpd->link_speed & LMT_16GB_CAPABLE) {
1933 port_info->pi_attrs.supported_speed |=
1934 FC_HBA_PORTSPEED_16GBIT;
1935 }
1936 if (vpd->link_speed & LMT_10GB_CAPABLE) {
1937 port_info->pi_attrs.supported_speed |=
1938 FC_HBA_PORTSPEED_10GBIT;
1939 }
1940 if (vpd->link_speed & LMT_8GB_CAPABLE) {
1941 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_8GBIT;
1942 }
1943 if (vpd->link_speed & LMT_4GB_CAPABLE) {
1944 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_4GBIT;
1945 }
1946 if (vpd->link_speed & LMT_2GB_CAPABLE) {
1947 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_2GBIT;
1948 }
1949 if (vpd->link_speed & LMT_1GB_CAPABLE) {
1950 port_info->pi_attrs.supported_speed |= FC_HBA_PORTSPEED_1GBIT;
1951 }
1952
1953 /* Set the hba model info */
1954 (void) strncpy(port_info->pi_attrs.model, hba->model_info.model,
1955 (sizeof (port_info->pi_attrs.model)-1));
1956 (void) strncpy(port_info->pi_attrs.model_description,
1957 hba->model_info.model_desc,
1958 (sizeof (port_info->pi_attrs.model_description)-1));
1959
1960
1961 /* Log information */
1962 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1963 "Bind info: port_num = %d", bind_info->port_num);
1964 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1965 "Bind info: port_handle = %p", bind_info->port_handle);
1966
1967 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1968 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1969 "Bind info: port_npiv = %d", bind_info->port_npiv);
1970 #endif /* >= EMLXS_MODREV5 */
1971
1972 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1973 "Port info: pi_topology = %x", port_info->pi_topology);
1974 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1975 "Port info: pi_error = %x", port_info->pi_error);
1976 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1977 "Port info: pi_port_state = %x", port_info->pi_port_state);
1978
1979 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1980 "Port info: port_id = %x", port_info->pi_s_id.port_id);
1981 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1982 "Port info: priv_lilp_posit = %x",
1983 port_info->pi_s_id.priv_lilp_posit);
1984
1985 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1986 "Port info: hard_addr = %x",
1987 port_info->pi_hard_addr.hard_addr);
1988
1989 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1990 "Port info: rnid.status = %x",
1991 port_info->pi_rnid_params.status);
1992 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1993 "Port info: rnid.global_id = %16s",
1994 port_info->pi_rnid_params.params.global_id);
1995 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1996 "Port info: rnid.unit_type = %x",
1997 port_info->pi_rnid_params.params.unit_type);
1998 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
1999 "Port info: rnid.port_id = %x",
2000 port_info->pi_rnid_params.params.port_id);
2001 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2002 "Port info: rnid.num_attached = %x",
2003 port_info->pi_rnid_params.params.num_attached);
2004 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2005 "Port info: rnid.ip_version = %x",
2006 port_info->pi_rnid_params.params.ip_version);
2007 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2008 "Port info: rnid.udp_port = %x",
2009 port_info->pi_rnid_params.params.udp_port);
2010 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2011 "Port info: rnid.ip_addr = %16s",
2012 port_info->pi_rnid_params.params.ip_addr);
2013 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2014 "Port info: rnid.spec_id_resv = %x",
2015 port_info->pi_rnid_params.params.specific_id_resv);
2016 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2017 "Port info: rnid.topo_flags = %x",
2018 port_info->pi_rnid_params.params.topo_flags);
2019
2020 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2021 "Port info: manufacturer = %s",
2022 port_info->pi_attrs.manufacturer);
2023 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2024 "Port info: serial_num = %s",
2025 port_info->pi_attrs.serial_number);
2026 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2027 "Port info: model = %s", port_info->pi_attrs.model);
2028 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2029 "Port info: model_description = %s",
2030 port_info->pi_attrs.model_description);
2031 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2032 "Port info: hardware_version = %s",
2033 port_info->pi_attrs.hardware_version);
2034 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2035 "Port info: driver_version = %s",
2036 port_info->pi_attrs.driver_version);
2037 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2038 "Port info: option_rom_version = %s",
2039 port_info->pi_attrs.option_rom_version);
2040 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2041 "Port info: firmware_version = %s",
2042 port_info->pi_attrs.firmware_version);
2043 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2044 "Port info: driver_name = %s",
2045 port_info->pi_attrs.driver_name);
2046 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2047 "Port info: vendor_specific_id = %x",
2048 port_info->pi_attrs.vendor_specific_id);
2049 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2050 "Port info: supported_cos = %x",
2051 port_info->pi_attrs.supported_cos);
2052 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2053 "Port info: supported_speed = %x",
2054 port_info->pi_attrs.supported_speed);
2055 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2056 "Port info: max_frame_size = %x",
2057 port_info->pi_attrs.max_frame_size);
2058
2059 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2060 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2061 "Port info: fru_port_index = %x",
2062 port_info->pi_attrs.hba_fru_details.port_index);
2063 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2064 "Port info: fru_high = %llx",
2065 port_info->pi_attrs.hba_fru_details.high);
2066 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2067 "Port info: fru_low = %llx",
2068 port_info->pi_attrs.hba_fru_details.low);
2069 #endif /* >= EMLXS_MODREV3 */
2070
2071 #if (EMLXS_MODREV >= EMLXS_MODREV4)
2072 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2073 "Port info: sym_node_name = %s",
2074 port_info->pi_attrs.sym_node_name);
2075 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
2076 "Port info: sym_port_name = %s",
2077 port_info->pi_attrs.sym_port_name);
2078 #endif /* >= EMLXS_MODREV4 */
2079
2080 mutex_exit(&EMLXS_PORT_LOCK);
2081
2082 #ifdef SFCT_SUPPORT
2083 if (port->flag & EMLXS_TGT_ENABLED) {
2084 emlxs_fct_bind_port(port);
2085 }
2086 #endif /* SFCT_SUPPORT */
2087
2088 return ((opaque_t)port);
2089
2090 } /* emlxs_fca_bind_port() */
2091
2092
2093 static void
2094 emlxs_fca_unbind_port(opaque_t fca_port_handle)
2095 {
2096 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2097 emlxs_hba_t *hba = HBA;
2098
2099 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2100 "fca_unbind_port: port=%p", port);
2101
2102 if (!(port->flag & EMLXS_PORT_BOUND)) {
2103 return;
2104 }
2105
2106 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2107 (void) emlxs_vpi_port_unbind_notify(port, 1);
2108 }
2109
2110 /* Destroy & flush all port nodes, if they exist */
2111 if (port->node_count) {
2112 (void) EMLXS_SLI_UNREG_NODE(port, 0, 0, 0, 0);
2113 }
2114
2115 #if (EMLXS_MODREV >= EMLXS_MODREV5)
2116 if ((hba->sli_mode <= EMLXS_HBA_SLI3_MODE) &&
2117 (hba->flag & FC_NPIV_ENABLED) &&
2118 (port->flag & (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLED))) {
2119 (void) emlxs_mb_unreg_vpi(port);
2120 }
2121 #endif
2122
2123 mutex_enter(&EMLXS_PORT_LOCK);
2124 if (port->flag & EMLXS_INI_BOUND) {
2125 #if (EMLXS_MODREV >= EMLXS_MODREV5)
2126 port->flag &= ~(EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLED);
2127 #endif
2128 port->flag &= ~EMLXS_INI_BOUND;
2129 hba->num_of_ports--;
2130
2131 /* Wait until ulp callback interface is idle */
2132 while (port->ulp_busy) {
2133 mutex_exit(&EMLXS_PORT_LOCK);
2134 delay(drv_usectohz(500000));
2135 mutex_enter(&EMLXS_PORT_LOCK);
2136 }
2137
2138 port->ulp_handle = 0;
2139 port->ulp_statec = FC_STATE_OFFLINE;
2140 port->ulp_statec_cb = NULL;
2141 port->ulp_unsol_cb = NULL;
2142 }
2143 mutex_exit(&EMLXS_PORT_LOCK);
2144
2145 #ifdef SFCT_SUPPORT
2146 /* Check if port was target bound */
2147 if (port->flag & EMLXS_TGT_BOUND) {
2148 emlxs_fct_unbind_port(port);
2149 }
2150 #endif /* SFCT_SUPPORT */
2151
2152 return;
2153
2154 } /* emlxs_fca_unbind_port() */
2155
2156
2157 /*ARGSUSED*/
2158 extern int
2159 emlxs_fca_pkt_init(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep)
2160 {
2161 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2162 emlxs_hba_t *hba = HBA;
2163 emlxs_buf_t *sbp = (emlxs_buf_t *)pkt->pkt_fca_private;
2164
2165 if (!sbp) {
2166 return (FC_FAILURE);
2167 }
2168 bzero((void *)sbp, sizeof (emlxs_buf_t));
2169
2170 mutex_init(&sbp->mtx, NULL, MUTEX_DRIVER, DDI_INTR_PRI(hba->intr_arg));
2171 sbp->pkt_flags =
2172 PACKET_VALID | PACKET_ULP_OWNED;
2173 sbp->port = port;
2174 sbp->pkt = pkt;
2175 sbp->iocbq.sbp = sbp;
2176
2177 return (FC_SUCCESS);
2178
2179 } /* emlxs_fca_pkt_init() */
2180
2181
2182
2183 static void
2184 emlxs_initialize_pkt(emlxs_port_t *port, emlxs_buf_t *sbp)
2185 {
2186 emlxs_hba_t *hba = HBA;
2187 emlxs_config_t *cfg = &CFG;
2188 fc_packet_t *pkt = PRIV2PKT(sbp);
2189
2190 mutex_enter(&sbp->mtx);
2191
2192 /* Reinitialize */
2193 sbp->pkt = pkt;
2194 sbp->port = port;
2195 sbp->bmp = NULL;
2196 sbp->pkt_flags &= (PACKET_VALID | PACKET_ALLOCATED);
2197 sbp->iotag = 0;
2198 sbp->ticks = 0;
2199 sbp->abort_attempts = 0;
2200 sbp->fpkt = NULL;
2201 sbp->flush_count = 0;
2202 sbp->next = NULL;
2203
2204 if (port->mode == MODE_INITIATOR) {
2205 sbp->node = NULL;
2206 sbp->did = 0;
2207 sbp->lun = EMLXS_LUN_NONE;
2208 sbp->class = 0;
2209 sbp->channel = NULL;
2210 }
2211
2212 bzero((void *)&sbp->iocbq, sizeof (IOCBQ));
2213 sbp->iocbq.sbp = sbp;
2214
2215 if ((pkt->pkt_tran_flags & FC_TRAN_NO_INTR) || !pkt->pkt_comp ||
2216 ddi_in_panic()) {
2217 sbp->pkt_flags |= PACKET_POLLED;
2218 }
2219
2220 /* Prepare the fc packet */
2221 pkt->pkt_state = FC_PKT_SUCCESS;
2222 pkt->pkt_reason = 0;
2223 pkt->pkt_action = 0;
2224 pkt->pkt_expln = 0;
2225 pkt->pkt_data_resid = 0;
2226 pkt->pkt_resp_resid = 0;
2227
2228 /* Make sure all pkt's have a proper timeout */
2229 if (!cfg[CFG_TIMEOUT_ENABLE].current) {
2230 /* This disables all IOCB on chip timeouts */
2231 pkt->pkt_timeout = 0x80000000;
2232 } else if (pkt->pkt_timeout == 0 || pkt->pkt_timeout == 0xffffffff) {
2233 pkt->pkt_timeout = 60;
2234 }
2235
2236 /* Clear the response buffer */
2237 if (pkt->pkt_rsplen) {
2238 bzero(pkt->pkt_resp, pkt->pkt_rsplen);
2239 }
2240
2241 mutex_exit(&sbp->mtx);
2242
2243 return;
2244
2245 } /* emlxs_initialize_pkt() */
2246
2247
2248
2249 /*
2250 * We may not need this routine
2251 */
2252 /*ARGSUSED*/
2253 extern int
2254 emlxs_fca_pkt_uninit(opaque_t fca_port_handle, fc_packet_t *pkt)
2255 {
2256 emlxs_buf_t *sbp = PKT2PRIV(pkt);
2257
2258 if (!sbp) {
2259 return (FC_FAILURE);
2260 }
2261
2262 if (!(sbp->pkt_flags & PACKET_VALID)) {
2263 return (FC_FAILURE);
2264 }
2265 sbp->pkt_flags &= ~PACKET_VALID;
2266 mutex_destroy(&sbp->mtx);
2267
2268 return (FC_SUCCESS);
2269
2270 } /* emlxs_fca_pkt_uninit() */
2271
2272
2273 static int
2274 emlxs_fca_get_cap(opaque_t fca_port_handle, char *cap, void *ptr)
2275 {
2276 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2277 emlxs_hba_t *hba = HBA;
2278 int32_t rval;
2279 emlxs_config_t *cfg = &CFG;
2280
2281 if (!(port->flag & EMLXS_INI_BOUND)) {
2282 return (FC_CAP_ERROR);
2283 }
2284
2285 if (strcmp(cap, FC_NODE_WWN) == 0) {
2286 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2287 "fca_get_cap: FC_NODE_WWN");
2288
2289 bcopy((void *)&hba->wwnn, (void *)ptr, sizeof (NAME_TYPE));
2290 rval = FC_CAP_FOUND;
2291
2292 } else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2293 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2294 "fca_get_cap: FC_LOGIN_PARAMS");
2295
2296 /*
2297 * We need to turn off CLASS2 support.
2298 * Otherwise, FC transport will use CLASS2 as default class
2299 * and never try with CLASS3.
2300 */
2301 hba->sparam.cls2.classValid = 0;
2302
2303 bcopy((void *)&hba->sparam, (void *)ptr, sizeof (SERV_PARM));
2304
2305 rval = FC_CAP_FOUND;
2306
2307 } else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2308 int32_t *num_bufs;
2309
2310 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2311 "fca_get_cap: FC_CAP_UNSOL_BUF (%d)",
2312 cfg[CFG_UB_BUFS].current);
2313
2314 num_bufs = (int32_t *)ptr;
2315
2316 /* We multiply by MAX_VPORTS because ULP uses a */
2317 /* formula to calculate ub bufs from this */
2318 *num_bufs = (cfg[CFG_UB_BUFS].current * MAX_VPORTS);
2319
2320 rval = FC_CAP_FOUND;
2321
2322 } else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2323 int32_t *size;
2324
2325 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2326 "fca_get_cap: FC_CAP_PAYLOAD_SIZE");
2327
2328 size = (int32_t *)ptr;
2329 *size = -1;
2330 rval = FC_CAP_FOUND;
2331
2332 } else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2333 fc_reset_action_t *action;
2334
2335 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2336 "fca_get_cap: FC_CAP_POST_RESET_BEHAVIOR");
2337
2338 action = (fc_reset_action_t *)ptr;
2339 *action = FC_RESET_RETURN_ALL;
2340 rval = FC_CAP_FOUND;
2341
2342 } else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
2343 fc_dma_behavior_t *behavior;
2344
2345 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2346 "fca_get_cap: FC_CAP_NOSTREAM_ON_UNALIGN_BUF");
2347
2348 behavior = (fc_dma_behavior_t *)ptr;
2349 *behavior = FC_ALLOW_STREAMING;
2350 rval = FC_CAP_FOUND;
2351
2352 } else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
2353 fc_fcp_dma_t *fcp_dma;
2354
2355 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2356 "fca_get_cap: FC_CAP_FCP_DMA");
2357
2358 fcp_dma = (fc_fcp_dma_t *)ptr;
2359 *fcp_dma = FC_DVMA_SPACE;
2360 rval = FC_CAP_FOUND;
2361
2362 } else {
2363 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2364 "fca_get_cap: Unknown capability. [%s]", cap);
2365
2366 rval = FC_CAP_ERROR;
2367
2368 }
2369
2370 return (rval);
2371
2372 } /* emlxs_fca_get_cap() */
2373
2374
2375
2376 static int
2377 emlxs_fca_set_cap(opaque_t fca_port_handle, char *cap, void *ptr)
2378 {
2379 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2380
2381 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2382 "fca_set_cap: cap=[%s] arg=%p", cap, ptr);
2383
2384 return (FC_CAP_ERROR);
2385
2386 } /* emlxs_fca_set_cap() */
2387
2388
2389 static opaque_t
2390 emlxs_fca_get_device(opaque_t fca_port_handle, fc_portid_t d_id)
2391 {
2392 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2393
2394 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2395 "fca_get_device: did=%x", d_id.port_id);
2396
2397 return (NULL);
2398
2399 } /* emlxs_fca_get_device() */
2400
2401
2402 static int32_t
2403 emlxs_fca_notify(opaque_t fca_port_handle, uint32_t cmd)
2404 {
2405 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2406
2407 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, "fca_notify: cmd=%x",
2408 cmd);
2409
2410 return (FC_SUCCESS);
2411
2412 } /* emlxs_fca_notify */
2413
2414
2415
2416 static int
2417 emlxs_fca_get_map(opaque_t fca_port_handle, fc_lilpmap_t *mapbuf)
2418 {
2419 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2420 emlxs_hba_t *hba = HBA;
2421 uint32_t lilp_length;
2422
2423 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2424 "fca_get_map: mapbuf=%p length=%d (%X,%X,%X,%X)", mapbuf,
2425 port->alpa_map[0], port->alpa_map[1], port->alpa_map[2],
2426 port->alpa_map[3], port->alpa_map[4]);
2427
2428 if (!(port->flag & EMLXS_INI_BOUND)) {
2429 return (FC_NOMAP);
2430 }
2431
2432 if (hba->topology != TOPOLOGY_LOOP) {
2433 return (FC_NOMAP);
2434 }
2435
2436 /* Check if alpa map is available */
2437 if (port->alpa_map[0] != 0) {
2438 mapbuf->lilp_magic = MAGIC_LILP;
2439 } else { /* No LILP map available */
2440
2441 /* Set lilp_magic to MAGIC_LISA and this will */
2442 /* trigger an ALPA scan in ULP */
2443 mapbuf->lilp_magic = MAGIC_LISA;
2444 }
2445
2446 mapbuf->lilp_myalpa = port->did;
2447
2448 /* The first byte of the alpa_map is the lilp map length */
2449 /* Add one to include the lilp length byte itself */
2450 lilp_length = (uint32_t)port->alpa_map[0] + 1;
2451
2452 /* Make sure the max transfer is 128 bytes */
2453 if (lilp_length > 128) {
2454 lilp_length = 128;
2455 }
2456
2457 /* We start copying from the lilp_length field */
2458 /* in order to get a word aligned address */
2459 bcopy((void *)&port->alpa_map, (void *)&mapbuf->lilp_length,
2460 lilp_length);
2461
2462 return (FC_SUCCESS);
2463
2464 } /* emlxs_fca_get_map() */
2465
2466
2467
2468 extern int
2469 emlxs_fca_transport(opaque_t fca_port_handle, fc_packet_t *pkt)
2470 {
2471 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2472 emlxs_hba_t *hba = HBA;
2473 emlxs_buf_t *sbp;
2474 uint32_t rval;
2475 uint32_t pkt_flags;
2476
2477 /* Validate packet */
2478 sbp = PKT2PRIV(pkt);
2479
2480 /* Make sure adapter is online */
2481 if (!(hba->flag & FC_ONLINE_MODE) &&
2482 !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2483 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
2484 "Adapter offline.");
2485
2486 rval = (hba->flag & FC_ONLINING_MODE) ?
2487 FC_TRAN_BUSY : FC_OFFLINE;
2488 return (rval);
2489 }
2490
2491 /* Make sure ULP was told that the port was online */
2492 if ((port->ulp_statec == FC_STATE_OFFLINE) &&
2493 !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2494 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
2495 "Port offline.");
2496
2497 return (FC_OFFLINE);
2498 }
2499
2500 if (sbp->port != port) {
2501 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2502 "Invalid port handle. sbp=%p port=%p flags=%x", sbp,
2503 sbp->port, sbp->pkt_flags);
2504 return (FC_BADPACKET);
2505 }
2506
2507 if (!(sbp->pkt_flags & (PACKET_VALID | PACKET_ULP_OWNED))) {
2508 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2509 "Invalid packet flags. sbp=%p port=%p flags=%x", sbp,
2510 sbp->port, sbp->pkt_flags);
2511 return (FC_BADPACKET);
2512 }
2513
2514 #ifdef SFCT_SUPPORT
2515 if ((port->mode == MODE_TARGET) && !sbp->fct_cmd &&
2516 !(sbp->pkt_flags & PACKET_ALLOCATED)) {
2517 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2518 "Packet blocked. Target mode.");
2519 return (FC_TRANSPORT_ERROR);
2520 }
2521 #endif /* SFCT_SUPPORT */
2522
2523 #ifdef IDLE_TIMER
2524 emlxs_pm_busy_component(hba);
2525 #endif /* IDLE_TIMER */
2526
2527 /* Prepare the packet for transport */
2528 emlxs_initialize_pkt(port, sbp);
2529
2530 /* Save a copy of the pkt flags. */
2531 /* We will check the polling flag later */
2532 pkt_flags = sbp->pkt_flags;
2533
2534 /* Send the packet */
2535 switch (pkt->pkt_tran_type) {
2536 case FC_PKT_FCP_READ:
2537 case FC_PKT_FCP_WRITE:
2538 rval = emlxs_send_fcp_cmd(port, sbp, &pkt_flags);
2539 break;
2540
2541 case FC_PKT_IP_WRITE:
2542 case FC_PKT_BROADCAST:
2543 rval = emlxs_send_ip(port, sbp);
2544 break;
2545
2546 case FC_PKT_EXCHANGE:
2547 switch (pkt->pkt_cmd_fhdr.type) {
2548 case FC_TYPE_SCSI_FCP:
2549 rval = emlxs_send_fcp_cmd(port, sbp, &pkt_flags);
2550 break;
2551
2552 case FC_TYPE_FC_SERVICES:
2553 rval = emlxs_send_ct(port, sbp);
2554 break;
2555
2556 #ifdef MENLO_SUPPORT
2557 case EMLXS_MENLO_TYPE:
2558 rval = emlxs_send_menlo(port, sbp);
2559 break;
2560 #endif /* MENLO_SUPPORT */
2561
2562 default:
2563 rval = emlxs_send_els(port, sbp);
2564 }
2565 break;
2566
2567 case FC_PKT_OUTBOUND:
2568 switch (pkt->pkt_cmd_fhdr.type) {
2569 #ifdef SFCT_SUPPORT
2570 case FC_TYPE_SCSI_FCP:
2571 rval = emlxs_send_fct_status(port, sbp);
2572 break;
2573
2574 case FC_TYPE_BASIC_LS:
2575 rval = emlxs_send_fct_abort(port, sbp);
2576 break;
2577 #endif /* SFCT_SUPPORT */
2578
2579 case FC_TYPE_FC_SERVICES:
2580 rval = emlxs_send_ct_rsp(port, sbp);
2581 break;
2582 #ifdef MENLO_SUPPORT
2583 case EMLXS_MENLO_TYPE:
2584 rval = emlxs_send_menlo(port, sbp);
2585 break;
2586 #endif /* MENLO_SUPPORT */
2587
2588 default:
2589 rval = emlxs_send_els_rsp(port, sbp);
2590 }
2591 break;
2592
2593 default:
2594 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
2595 "Unsupported pkt_tran_type. type=%x", pkt->pkt_tran_type);
2596 rval = FC_TRANSPORT_ERROR;
2597 break;
2598 }
2599
2600 /* Check if send was not successful */
2601 if (rval != FC_SUCCESS) {
2602 /* Return packet to ULP */
2603 mutex_enter(&sbp->mtx);
2604 sbp->pkt_flags |= PACKET_ULP_OWNED;
2605 mutex_exit(&sbp->mtx);
2606
2607 return (rval);
2608 }
2609
2610 /* Check if this packet should be polled for completion before */
2611 /* returning. This check must be done with a saved copy of the */
2612 /* pkt_flags because the packet itself could already be freed from */
2613 /* memory if it was not polled. */
2614 if (pkt_flags & PACKET_POLLED) {
2615 emlxs_poll(port, sbp);
2616 }
2617
2618 return (FC_SUCCESS);
2619
2620 } /* emlxs_fca_transport() */
2621
2622
2623
2624 static void
2625 emlxs_poll(emlxs_port_t *port, emlxs_buf_t *sbp)
2626 {
2627 emlxs_hba_t *hba = HBA;
2628 fc_packet_t *pkt = PRIV2PKT(sbp);
2629 clock_t timeout;
2630 clock_t time;
2631 CHANNEL *cp;
2632 int in_panic = 0;
2633
2634 mutex_enter(&EMLXS_PORT_LOCK);
2635 hba->io_poll_count++;
2636 mutex_exit(&EMLXS_PORT_LOCK);
2637
2638 /* Check for panic situation */
2639 cp = (CHANNEL *)sbp->channel;
2640
2641 if (ddi_in_panic()) {
2642 in_panic = 1;
2643 /*
2644 * In panic situations there will be one thread with
2645 * no interrrupts (hard or soft) and no timers
2646 */
2647
2648 /*
2649 * We must manually poll everything in this thread
2650 * to keep the driver going.
2651 */
2652
2653 /* Keep polling the chip until our IO is completed */
2654 /* Driver's timer will not function during panics. */
2655 /* Therefore, timer checks must be performed manually. */
2656 (void) drv_getparm(LBOLT, &time);
2657 timeout = time + drv_usectohz(1000000);
2658 while (!(sbp->pkt_flags & PACKET_COMPLETED)) {
2659 EMLXS_SLI_POLL_INTR(hba);
2660 (void) drv_getparm(LBOLT, &time);
2661
2662 /* Trigger timer checks periodically */
2663 if (time >= timeout) {
2664 emlxs_timer_checks(hba);
2665 timeout = time + drv_usectohz(1000000);
2666 }
2667 }
2668 } else {
2669 /* Wait for IO completion */
2670 /* The driver's timer will detect */
2671 /* any timeout and abort the I/O. */
2672 mutex_enter(&EMLXS_PKT_LOCK);
2673 while (!(sbp->pkt_flags & PACKET_COMPLETED)) {
2674 cv_wait(&EMLXS_PKT_CV, &EMLXS_PKT_LOCK);
2675 }
2676 mutex_exit(&EMLXS_PKT_LOCK);
2677 }
2678
2679 /* Check for fcp reset pkt */
2680 if (sbp->pkt_flags & PACKET_FCP_RESET) {
2681 if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) {
2682 /* Flush the IO's on the chipq */
2683 (void) emlxs_chipq_node_flush(port,
2684 &hba->chan[hba->channel_fcp],
2685 sbp->node, sbp);
2686 } else {
2687 /* Flush the IO's on the chipq for this lun */
2688 (void) emlxs_chipq_lun_flush(port,
2689 sbp->node, sbp->lun, sbp);
2690 }
2691
2692 if (sbp->flush_count == 0) {
2693 emlxs_node_open(port, sbp->node, hba->channel_fcp);
2694 goto done;
2695 }
2696
2697 /* Set the timeout so the flush has time to complete */
2698 timeout = emlxs_timeout(hba, 60);
2699 (void) drv_getparm(LBOLT, &time);
2700 while ((time < timeout) && sbp->flush_count > 0) {
2701 delay(drv_usectohz(500000));
2702 (void) drv_getparm(LBOLT, &time);
2703 }
2704
2705 if (sbp->flush_count == 0) {
2706 emlxs_node_open(port, sbp->node, hba->channel_fcp);
2707 goto done;
2708 }
2709
2710 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2711 "sbp=%p flush_count=%d. Waiting...", sbp,
2712 sbp->flush_count);
2713
2714 /* Let's try this one more time */
2715
2716 if (sbp->pkt_flags & PACKET_FCP_TGT_RESET) {
2717 /* Flush the IO's on the chipq */
2718 (void) emlxs_chipq_node_flush(port,
2719 &hba->chan[hba->channel_fcp],
2720 sbp->node, sbp);
2721 } else {
2722 /* Flush the IO's on the chipq for this lun */
2723 (void) emlxs_chipq_lun_flush(port,
2724 sbp->node, sbp->lun, sbp);
2725 }
2726
2727 /* Reset the timeout so the flush has time to complete */
2728 timeout = emlxs_timeout(hba, 60);
2729 (void) drv_getparm(LBOLT, &time);
2730 while ((time < timeout) && sbp->flush_count > 0) {
2731 delay(drv_usectohz(500000));
2732 (void) drv_getparm(LBOLT, &time);
2733 }
2734
2735 if (sbp->flush_count == 0) {
2736 emlxs_node_open(port, sbp->node, hba->channel_fcp);
2737 goto done;
2738 }
2739
2740 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2741 "sbp=%p flush_count=%d. Resetting link.", sbp,
2742 sbp->flush_count);
2743
2744 /* Let's first try to reset the link */
2745 (void) emlxs_reset(port, FC_FCA_LINK_RESET);
2746
2747 if (sbp->flush_count == 0) {
2748 goto done;
2749 }
2750
2751 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2752 "sbp=%p flush_count=%d. Resetting HBA.", sbp,
2753 sbp->flush_count);
2754
2755 /* If that doesn't work, reset the adapter */
2756 (void) emlxs_reset(port, FC_FCA_RESET);
2757
2758 if (sbp->flush_count != 0) {
2759 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_flush_timeout_msg,
2760 "sbp=%p flush_count=%d. Giving up.", sbp,
2761 sbp->flush_count);
2762 }
2763
2764 }
2765 /* PACKET_FCP_RESET */
2766 done:
2767
2768 /* Packet has been declared completed and is now ready to be returned */
2769
2770 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
2771 emlxs_unswap_pkt(sbp);
2772 #endif /* EMLXS_MODREV2X */
2773
2774 mutex_enter(&sbp->mtx);
2775 sbp->pkt_flags |= PACKET_ULP_OWNED;
2776 mutex_exit(&sbp->mtx);
2777
2778 mutex_enter(&EMLXS_PORT_LOCK);
2779 hba->io_poll_count--;
2780 mutex_exit(&EMLXS_PORT_LOCK);
2781
2782 #ifdef FMA_SUPPORT
2783 if (!in_panic) {
2784 emlxs_check_dma(hba, sbp);
2785 }
2786 #endif
2787
2788 /* Make ULP completion callback if required */
2789 if (pkt->pkt_comp) {
2790 cp->ulpCmplCmd++;
2791 (*pkt->pkt_comp) (pkt);
2792 }
2793
2794 #ifdef FMA_SUPPORT
2795 if (hba->flag & FC_DMA_CHECK_ERROR) {
2796 emlxs_thread_spawn(hba, emlxs_restart_thread,
2797 NULL, NULL);
2798 }
2799 #endif
2800
2801 return;
2802
2803 } /* emlxs_poll() */
2804
2805
2806 static int
2807 emlxs_fca_ub_alloc(opaque_t fca_port_handle, uint64_t tokens[], uint32_t size,
2808 uint32_t *count, uint32_t type)
2809 {
2810 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
2811 emlxs_hba_t *hba = HBA;
2812 char *err = NULL;
2813 emlxs_unsol_buf_t *pool = NULL;
2814 emlxs_unsol_buf_t *new_pool = NULL;
2815 emlxs_config_t *cfg = &CFG;
2816 int32_t i;
2817 int result;
2818 uint32_t free_resv;
2819 uint32_t free;
2820 fc_unsol_buf_t *ubp;
2821 emlxs_ub_priv_t *ub_priv;
2822 int rc;
2823
2824 if (!(port->flag & EMLXS_INI_ENABLED)) {
2825 if (tokens && count) {
2826 bzero(tokens, (sizeof (uint64_t) * (*count)));
2827 }
2828 return (FC_SUCCESS);
2829 }
2830
2831 if (!(port->flag & EMLXS_INI_BOUND)) {
2832 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2833 "fca_ub_alloc failed: Port not bound! size=%x count=%d "
2834 "type=%x", size, *count, type);
2835
2836 return (FC_FAILURE);
2837 }
2838
2839 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2840 "fca_ub_alloc: size=%x count=%d type=%x", size, *count, type);
2841
2842 if (count && (*count > EMLXS_MAX_UBUFS)) {
2843 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2844 "fca_ub_alloc failed: Too many unsolicted buffers "
2845 "requested. count=%x", *count);
2846
2847 return (FC_FAILURE);
2848
2849 }
2850
2851 if (tokens == NULL) {
2852 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2853 "fca_ub_alloc failed: Token array is NULL.");
2854
2855 return (FC_FAILURE);
2856 }
2857
2858 /* Clear the token array */
2859 bzero(tokens, (sizeof (uint64_t) * (*count)));
2860
2861 free_resv = 0;
2862 free = *count;
2863 switch (type) {
2864 case FC_TYPE_BASIC_LS:
2865 err = "BASIC_LS";
2866 break;
2867 case FC_TYPE_EXTENDED_LS:
2868 err = "EXTENDED_LS";
2869 free = *count / 2; /* Hold 50% for normal use */
2870 free_resv = *count - free; /* Reserve 50% for RSCN use */
2871 break;
2872 case FC_TYPE_IS8802:
2873 err = "IS8802";
2874 break;
2875 case FC_TYPE_IS8802_SNAP:
2876 err = "IS8802_SNAP";
2877
2878 if (cfg[CFG_NETWORK_ON].current == 0) {
2879 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
2880 "fca_ub_alloc failed: IP support is disabled.");
2881
2882 return (FC_FAILURE);
2883 }
2884 break;
2885 case FC_TYPE_SCSI_FCP:
2886 err = "SCSI_FCP";
2887 break;
2888 case FC_TYPE_SCSI_GPP:
2889 err = "SCSI_GPP";
2890 break;
2891 case FC_TYPE_HIPP_FP:
2892 err = "HIPP_FP";
2893 break;
2894 case FC_TYPE_IPI3_MASTER:
2895 err = "IPI3_MASTER";
2896 break;
2897 case FC_TYPE_IPI3_SLAVE:
2898 err = "IPI3_SLAVE";
2899 break;
2900 case FC_TYPE_IPI3_PEER:
2901 err = "IPI3_PEER";
2902 break;
2903 case FC_TYPE_FC_SERVICES:
2904 err = "FC_SERVICES";
2905 break;
2906 }
2907
2908 mutex_enter(&EMLXS_UB_LOCK);
2909
2910 /*
2911 * Walk through the list of the unsolicited buffers
2912 * for this ddiinst of emlx.
2913 */
2914
2915 pool = port->ub_pool;
2916
2917 /*
2918 * The emlxs_fca_ub_alloc() can be called more than once with different
2919 * size. We will reject the call if there are
2920 * duplicate size with the same FC-4 type.
2921 */
2922 while (pool) {
2923 if ((pool->pool_type == type) &&
2924 (pool->pool_buf_size == size)) {
2925 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
2926 "fca_ub_alloc failed: Unsolicited buffer pool "
2927 "for %s of size 0x%x bytes already exists.",
2928 err, size);
2929
2930 result = FC_FAILURE;
2931 goto fail;
2932 }
2933
2934 pool = pool->pool_next;
2935 }
2936
2937 mutex_exit(&EMLXS_UB_LOCK);
2938
2939 new_pool = (emlxs_unsol_buf_t *)kmem_zalloc(sizeof (emlxs_unsol_buf_t),
2940 KM_SLEEP);
2941
2942 new_pool->pool_next = NULL;
2943 new_pool->pool_type = type;
2944 new_pool->pool_buf_size = size;
2945 new_pool->pool_nentries = *count;
2946 new_pool->pool_available = new_pool->pool_nentries;
2947 new_pool->pool_free = free;
2948 new_pool->pool_free_resv = free_resv;
2949 new_pool->fc_ubufs =
2950 kmem_zalloc((sizeof (fc_unsol_buf_t) * (*count)), KM_SLEEP);
2951
2952 new_pool->pool_first_token = port->ub_count;
2953 new_pool->pool_last_token = port->ub_count + new_pool->pool_nentries;
2954
2955 for (i = 0; i < new_pool->pool_nentries; i++) {
2956 ubp = (fc_unsol_buf_t *)&new_pool->fc_ubufs[i];
2957 ubp->ub_port_handle = port->ulp_handle;
2958 ubp->ub_token = (uint64_t)((unsigned long)ubp);
2959 ubp->ub_bufsize = size;
2960 ubp->ub_class = FC_TRAN_CLASS3;
2961 ubp->ub_port_private = NULL;
2962 ubp->ub_fca_private =
2963 (emlxs_ub_priv_t *)kmem_zalloc(sizeof (emlxs_ub_priv_t),
2964 KM_SLEEP);
2965
2966 /*
2967 * Initialize emlxs_ub_priv_t
2968 */
2969 ub_priv = ubp->ub_fca_private;
2970 ub_priv->ubp = ubp;
2971 ub_priv->port = port;
2972 ub_priv->flags = EMLXS_UB_FREE;
2973 ub_priv->available = 1;
2974 ub_priv->pool = new_pool;
2975 ub_priv->time = 0;
2976 ub_priv->timeout = 0;
2977 ub_priv->token = port->ub_count;
2978 ub_priv->cmd = 0;
2979
2980 /* Allocate the actual buffer */
2981 ubp->ub_buffer = (caddr_t)kmem_zalloc(size, KM_SLEEP);
2982
2983
2984 tokens[i] = (uint64_t)((unsigned long)ubp);
2985 port->ub_count++;
2986 }
2987
2988 mutex_enter(&EMLXS_UB_LOCK);
2989
2990 /* Add the pool to the top of the pool list */
2991 new_pool->pool_prev = NULL;
2992 new_pool->pool_next = port->ub_pool;
2993
2994 if (port->ub_pool) {
2995 port->ub_pool->pool_prev = new_pool;
2996 }
2997 port->ub_pool = new_pool;
2998
2999 /* Set the post counts */
3000 if (type == FC_TYPE_IS8802_SNAP) {
3001 MAILBOXQ *mbox;
3002
3003 port->ub_post[hba->channel_ip] += new_pool->pool_nentries;
3004
3005 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
3006 MEM_MBOX))) {
3007 emlxs_mb_config_farp(hba, mbox);
3008 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba,
3009 mbox, MBX_NOWAIT, 0);
3010 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
3011 emlxs_mem_put(hba, MEM_MBOX, (void *)mbox);
3012 }
3013 }
3014 port->flag |= EMLXS_PORT_IP_UP;
3015 } else if (type == FC_TYPE_EXTENDED_LS) {
3016 port->ub_post[hba->channel_els] += new_pool->pool_nentries;
3017 } else if (type == FC_TYPE_FC_SERVICES) {
3018 port->ub_post[hba->channel_ct] += new_pool->pool_nentries;
3019 }
3020
3021 mutex_exit(&EMLXS_UB_LOCK);
3022
3023 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
3024 "%d unsolicited buffers allocated for %s of size 0x%x bytes.",
3025 *count, err, size);
3026
3027 return (FC_SUCCESS);
3028
3029 fail:
3030
3031 /* Clean the pool */
3032 for (i = 0; tokens[i] != NULL; i++) {
3033 /* Get the buffer object */
3034 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3035 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3036
3037 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3038 "fca_ub_alloc failed: Freed buffer=%p token=%x size=%x "
3039 "type=%x ", ubp, ub_priv->token, ubp->ub_bufsize, type);
3040
3041 /* Free the actual buffer */
3042 kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
3043
3044 /* Free the private area of the buffer object */
3045 kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t));
3046
3047 tokens[i] = 0;
3048 port->ub_count--;
3049 }
3050
3051 if (new_pool) {
3052 /* Free the array of buffer objects in the pool */
3053 kmem_free((caddr_t)new_pool->fc_ubufs,
3054 (sizeof (fc_unsol_buf_t) * new_pool->pool_nentries));
3055
3056 /* Free the pool object */
3057 kmem_free((caddr_t)new_pool, sizeof (emlxs_unsol_buf_t));
3058 }
3059
3060 mutex_exit(&EMLXS_UB_LOCK);
3061
3062 return (result);
3063
3064 } /* emlxs_fca_ub_alloc() */
3065
3066
3067 static void
3068 emlxs_ub_els_reject(emlxs_port_t *port, fc_unsol_buf_t *ubp)
3069 {
3070 emlxs_hba_t *hba = HBA;
3071 emlxs_ub_priv_t *ub_priv;
3072 fc_packet_t *pkt;
3073 ELS_PKT *els;
3074 uint32_t sid;
3075
3076 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3077
3078 if (hba->state <= FC_LINK_DOWN) {
3079 emlxs_abort_els_exchange(hba, port, ubp->ub_frame.rx_id);
3080 return;
3081 }
3082
3083 if (!(pkt = emlxs_pkt_alloc(port, sizeof (uint32_t) +
3084 sizeof (LS_RJT), 0, 0, KM_NOSLEEP))) {
3085 emlxs_abort_els_exchange(hba, port, ubp->ub_frame.rx_id);
3086 return;
3087 }
3088
3089 sid = LE_SWAP24_LO(ubp->ub_frame.s_id);
3090
3091 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
3092 "%s dropped: sid=%x. Rejecting.",
3093 emlxs_elscmd_xlate(ub_priv->cmd), sid);
3094
3095 pkt->pkt_tran_type = FC_PKT_OUTBOUND;
3096 pkt->pkt_timeout = (2 * hba->fc_ratov);
3097
3098 if ((uint32_t)ubp->ub_class == FC_TRAN_CLASS2) {
3099 pkt->pkt_tran_flags &= ~FC_TRAN_CLASS3;
3100 pkt->pkt_tran_flags |= FC_TRAN_CLASS2;
3101 }
3102
3103 /* Build the fc header */
3104 pkt->pkt_cmd_fhdr.d_id = ubp->ub_frame.s_id;
3105 pkt->pkt_cmd_fhdr.r_ctl =
3106 R_CTL_EXTENDED_SVC | R_CTL_SOLICITED_CONTROL;
3107 pkt->pkt_cmd_fhdr.s_id = LE_SWAP24_LO(port->did);
3108 pkt->pkt_cmd_fhdr.type = FC_TYPE_EXTENDED_LS;
3109 pkt->pkt_cmd_fhdr.f_ctl =
3110 F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ | F_CTL_END_SEQ;
3111 pkt->pkt_cmd_fhdr.seq_id = 0;
3112 pkt->pkt_cmd_fhdr.df_ctl = 0;
3113 pkt->pkt_cmd_fhdr.seq_cnt = 0;
3114 pkt->pkt_cmd_fhdr.ox_id = (ub_priv->cmd >> ELS_CMD_SHIFT) & 0xff;
3115 pkt->pkt_cmd_fhdr.rx_id = ubp->ub_frame.rx_id;
3116 pkt->pkt_cmd_fhdr.ro = 0;
3117
3118 /* Build the command */
3119 els = (ELS_PKT *) pkt->pkt_cmd;
3120 els->elsCode = 0x01;
3121 els->un.lsRjt.un.b.lsRjtRsvd0 = 0;
3122 els->un.lsRjt.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
3123 els->un.lsRjt.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
3124 els->un.lsRjt.un.b.vendorUnique = 0x02;
3125
3126 /* Send the pkt later in another thread */
3127 (void) emlxs_pkt_send(pkt, 0);
3128
3129 return;
3130
3131 } /* emlxs_ub_els_reject() */
3132
3133 extern int
3134 emlxs_fca_ub_release(opaque_t fca_port_handle, uint32_t count,
3135 uint64_t tokens[])
3136 {
3137 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
3138 emlxs_hba_t *hba = HBA;
3139 fc_unsol_buf_t *ubp;
3140 emlxs_ub_priv_t *ub_priv;
3141 uint32_t i;
3142 uint32_t time;
3143 emlxs_unsol_buf_t *pool;
3144
3145 if (count == 0) {
3146 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3147 "fca_ub_release: Nothing to do. count=%d", count);
3148
3149 return (FC_SUCCESS);
3150 }
3151
3152 if (!(port->flag & EMLXS_INI_BOUND)) {
3153 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3154 "fca_ub_release failed: Port not bound. count=%d "
3155 "token[0]=%p",
3156 count, tokens[0]);
3157
3158 return (FC_UNBOUND);
3159 }
3160
3161 mutex_enter(&EMLXS_UB_LOCK);
3162
3163 if (!port->ub_pool) {
3164 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3165 "fca_ub_release failed: No pools! count=%d token[0]=%p",
3166 count, tokens[0]);
3167
3168 mutex_exit(&EMLXS_UB_LOCK);
3169 return (FC_UB_BADTOKEN);
3170 }
3171
3172 for (i = 0; i < count; i++) {
3173 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3174
3175 if (!ubp) {
3176 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3177 "fca_ub_release failed: count=%d tokens[%d]=0",
3178 count, i);
3179
3180 mutex_exit(&EMLXS_UB_LOCK);
3181 return (FC_UB_BADTOKEN);
3182 }
3183
3184 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3185
3186 if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) {
3187 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3188 "fca_ub_release failed: Dead buffer found. ubp=%p",
3189 ubp);
3190
3191 mutex_exit(&EMLXS_UB_LOCK);
3192 return (FC_UB_BADTOKEN);
3193 }
3194
3195 if (ub_priv->flags == EMLXS_UB_FREE) {
3196 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3197 "fca_ub_release: Buffer already free! ubp=%p "
3198 "token=%x",
3199 ubp, ub_priv->token);
3200
3201 continue;
3202 }
3203
3204 /* Check for dropped els buffer */
3205 /* ULP will do this sometimes without sending a reply */
3206 if ((ubp->ub_frame.r_ctl == FC_ELS_REQ) &&
3207 !(ub_priv->flags & EMLXS_UB_REPLY)) {
3208 emlxs_ub_els_reject(port, ubp);
3209 }
3210
3211 /* Mark the buffer free */
3212 ub_priv->flags = EMLXS_UB_FREE;
3213 bzero(ubp->ub_buffer, ubp->ub_bufsize);
3214
3215 time = hba->timer_tics - ub_priv->time;
3216 ub_priv->time = 0;
3217 ub_priv->timeout = 0;
3218
3219 pool = ub_priv->pool;
3220
3221 if (ub_priv->flags & EMLXS_UB_RESV) {
3222 pool->pool_free_resv++;
3223 } else {
3224 pool->pool_free++;
3225 }
3226
3227 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3228 "fca_ub_release: ubp=%p token=%x time=%d av=%d "
3229 "(%d,%d,%d,%d)",
3230 ubp, ub_priv->token, time, ub_priv->available,
3231 pool->pool_nentries, pool->pool_available,
3232 pool->pool_free, pool->pool_free_resv);
3233
3234 /* Check if pool can be destroyed now */
3235 if ((pool->pool_available == 0) &&
3236 (pool->pool_free + pool->pool_free_resv ==
3237 pool->pool_nentries)) {
3238 emlxs_ub_destroy(port, pool);
3239 }
3240 }
3241
3242 mutex_exit(&EMLXS_UB_LOCK);
3243
3244 return (FC_SUCCESS);
3245
3246 } /* emlxs_fca_ub_release() */
3247
3248
3249 static int
3250 emlxs_fca_ub_free(opaque_t fca_port_handle, uint32_t count, uint64_t tokens[])
3251 {
3252 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
3253 emlxs_unsol_buf_t *pool;
3254 fc_unsol_buf_t *ubp;
3255 emlxs_ub_priv_t *ub_priv;
3256 uint32_t i;
3257
3258 if (!(port->flag & EMLXS_INI_ENABLED)) {
3259 return (FC_SUCCESS);
3260 }
3261
3262 if (count == 0) {
3263 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3264 "fca_ub_free: Nothing to do. count=%d token[0]=%p", count,
3265 tokens[0]);
3266
3267 return (FC_SUCCESS);
3268 }
3269
3270 if (!(port->flag & EMLXS_INI_BOUND)) {
3271 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3272 "fca_ub_free: Port not bound. count=%d token[0]=%p", count,
3273 tokens[0]);
3274
3275 return (FC_SUCCESS);
3276 }
3277
3278 mutex_enter(&EMLXS_UB_LOCK);
3279
3280 if (!port->ub_pool) {
3281 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3282 "fca_ub_free failed: No pools! count=%d token[0]=%p", count,
3283 tokens[0]);
3284
3285 mutex_exit(&EMLXS_UB_LOCK);
3286 return (FC_UB_BADTOKEN);
3287 }
3288
3289 /* Process buffer list */
3290 for (i = 0; i < count; i++) {
3291 ubp = (fc_unsol_buf_t *)((unsigned long)tokens[i]);
3292
3293 if (!ubp) {
3294 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3295 "fca_ub_free failed: count=%d tokens[%d]=0", count,
3296 i);
3297
3298 mutex_exit(&EMLXS_UB_LOCK);
3299 return (FC_UB_BADTOKEN);
3300 }
3301
3302 /* Mark buffer unavailable */
3303 ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
3304
3305 if (!ub_priv || (ub_priv == (emlxs_ub_priv_t *)DEAD_PTR)) {
3306 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3307 "fca_ub_free failed: Dead buffer found. ubp=%p",
3308 ubp);
3309
3310 mutex_exit(&EMLXS_UB_LOCK);
3311 return (FC_UB_BADTOKEN);
3312 }
3313
3314 ub_priv->available = 0;
3315
3316 /* Mark one less buffer available in the parent pool */
3317 pool = ub_priv->pool;
3318
3319 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
3320 "fca_ub_free: ubp=%p token=%x (%d,%d,%d,%d)", ubp,
3321 ub_priv->token, pool->pool_nentries,
3322 pool->pool_available - 1, pool->pool_free,
3323 pool->pool_free_resv);
3324
3325 if (pool->pool_available) {
3326 pool->pool_available--;
3327
3328 /* Check if pool can be destroyed */
3329 if ((pool->pool_available == 0) &&
3330 (pool->pool_free + pool->pool_free_resv ==
3331 pool->pool_nentries)) {
3332 emlxs_ub_destroy(port, pool);
3333 }
3334 }
3335 }
3336
3337 mutex_exit(&EMLXS_UB_LOCK);
3338
3339 return (FC_SUCCESS);
3340
3341 } /* emlxs_fca_ub_free() */
3342
3343
3344 /* EMLXS_UB_LOCK must be held when calling this routine */
3345 extern void
3346 emlxs_ub_destroy(emlxs_port_t *port, emlxs_unsol_buf_t *pool)
3347 {
3348 emlxs_hba_t *hba = HBA;
3349 emlxs_unsol_buf_t *next;
3350 emlxs_unsol_buf_t *prev;
3351 fc_unsol_buf_t *ubp;
3352 uint32_t i;
3353
3354 /* Remove the pool object from the pool list */
3355 next = pool->pool_next;
3356 prev = pool->pool_prev;
3357
3358 if (port->ub_pool == pool) {
3359 port->ub_pool = next;
3360 }
3361
3362 if (prev) {
3363 prev->pool_next = next;
3364 }
3365
3366 if (next) {
3367 next->pool_prev = prev;
3368 }
3369
3370 pool->pool_prev = NULL;
3371 pool->pool_next = NULL;
3372
3373 /* Clear the post counts */
3374 switch (pool->pool_type) {
3375 case FC_TYPE_IS8802_SNAP:
3376 port->ub_post[hba->channel_ip] -= pool->pool_nentries;
3377 break;
3378
3379 case FC_TYPE_EXTENDED_LS:
3380 port->ub_post[hba->channel_els] -= pool->pool_nentries;
3381 break;
3382
3383 case FC_TYPE_FC_SERVICES:
3384 port->ub_post[hba->channel_ct] -= pool->pool_nentries;
3385 break;
3386 }
3387
3388 /* Now free the pool memory */
3389 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3390 "ub_destroy: pool=%p type=%d size=%d count=%d", pool,
3391 pool->pool_type, pool->pool_buf_size, pool->pool_nentries);
3392
3393 /* Process the array of buffer objects in the pool */
3394 for (i = 0; i < pool->pool_nentries; i++) {
3395 /* Get the buffer object */
3396 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i];
3397
3398 /* Free the memory the buffer object represents */
3399 kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
3400
3401 /* Free the private area of the buffer object */
3402 kmem_free(ubp->ub_fca_private, sizeof (emlxs_ub_priv_t));
3403 }
3404
3405 /* Free the array of buffer objects in the pool */
3406 kmem_free((caddr_t)pool->fc_ubufs,
3407 (sizeof (fc_unsol_buf_t)*pool->pool_nentries));
3408
3409 /* Free the pool object */
3410 kmem_free((caddr_t)pool, sizeof (emlxs_unsol_buf_t));
3411
3412 return;
3413
3414 } /* emlxs_ub_destroy() */
3415
3416
3417 /*ARGSUSED*/
3418 extern int
3419 emlxs_fca_pkt_abort(opaque_t fca_port_handle, fc_packet_t *pkt, int32_t sleep)
3420 {
3421 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
3422 emlxs_hba_t *hba = HBA;
3423 emlxs_config_t *cfg = &CFG;
3424
3425 emlxs_buf_t *sbp;
3426 NODELIST *nlp;
3427 NODELIST *prev_nlp;
3428 uint8_t channelno;
3429 CHANNEL *cp;
3430 clock_t pkt_timeout;
3431 clock_t timer;
3432 clock_t time;
3433 int32_t pkt_ret;
3434 IOCBQ *iocbq;
3435 IOCBQ *next;
3436 IOCBQ *prev;
3437 uint32_t found;
3438 uint32_t pass = 0;
3439
3440 sbp = (emlxs_buf_t *)pkt->pkt_fca_private;
3441 iocbq = &sbp->iocbq;
3442 nlp = (NODELIST *)sbp->node;
3443 cp = (CHANNEL *)sbp->channel;
3444 channelno = (cp) ? cp->channelno : 0;
3445
3446 if (!(port->flag & EMLXS_INI_BOUND)) {
3447 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3448 "Port not bound.");
3449 return (FC_UNBOUND);
3450 }
3451
3452 if (!(hba->flag & FC_ONLINE_MODE)) {
3453 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3454 "Adapter offline.");
3455 return (FC_OFFLINE);
3456 }
3457
3458 /* ULP requires the aborted pkt to be completed */
3459 /* back to ULP before returning from this call. */
3460 /* SUN knows of problems with this call so they suggested that we */
3461 /* always return a FC_FAILURE for this call, until it is worked out. */
3462
3463 /* Check if pkt is no good */
3464 if (!(sbp->pkt_flags & PACKET_VALID) ||
3465 (sbp->pkt_flags & PACKET_ULP_OWNED)) {
3466 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3467 "Bad sbp. flags=%x", sbp->pkt_flags);
3468 return (FC_FAILURE);
3469 }
3470
3471 /* Tag this now */
3472 /* This will prevent any thread except ours from completing it */
3473 mutex_enter(&sbp->mtx);
3474
3475 /* Check again if we still own this */
3476 if (!(sbp->pkt_flags & PACKET_VALID) ||
3477 (sbp->pkt_flags & PACKET_ULP_OWNED)) {
3478 mutex_exit(&sbp->mtx);
3479 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3480 "Bad sbp. flags=%x", sbp->pkt_flags);
3481 return (FC_FAILURE);
3482 }
3483
3484 /* Check if pkt is a real polled command */
3485 if (!(sbp->pkt_flags & PACKET_IN_ABORT) &&
3486 (sbp->pkt_flags & PACKET_POLLED)) {
3487 mutex_exit(&sbp->mtx);
3488
3489 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3490 "Attempting to abort a polled I/O. sbp=%p flags=%x", sbp,
3491 sbp->pkt_flags);
3492 return (FC_FAILURE);
3493 }
3494
3495 sbp->pkt_flags |= PACKET_POLLED;
3496 sbp->pkt_flags |= PACKET_IN_ABORT;
3497
3498 if (sbp->pkt_flags & (PACKET_IN_COMPLETION | PACKET_IN_FLUSH |
3499 PACKET_IN_TIMEOUT)) {
3500 mutex_exit(&sbp->mtx);
3501
3502 /* Do nothing, pkt already on its way out */
3503 goto done;
3504 }
3505
3506 mutex_exit(&sbp->mtx);
3507
3508 begin:
3509 pass++;
3510
3511 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3512
3513 if (sbp->pkt_flags & PACKET_IN_TXQ) {
3514 /* Find it on the queue */
3515 found = 0;
3516 if (iocbq->flag & IOCB_PRIORITY) {
3517 /* Search the priority queue */
3518 prev = NULL;
3519 next = (IOCBQ *) nlp->nlp_ptx[channelno].q_first;
3520
3521 while (next) {
3522 if (next == iocbq) {
3523 /* Remove it */
3524 if (prev) {
3525 prev->next = iocbq->next;
3526 }
3527
3528 if (nlp->nlp_ptx[channelno].q_last ==
3529 (void *)iocbq) {
3530 nlp->nlp_ptx[channelno].q_last =
3531 (void *)prev;
3532 }
3533
3534 if (nlp->nlp_ptx[channelno].q_first ==
3535 (void *)iocbq) {
3536 nlp->nlp_ptx[channelno].
3537 q_first =
3538 (void *)iocbq->next;
3539 }
3540
3541 nlp->nlp_ptx[channelno].q_cnt--;
3542 iocbq->next = NULL;
3543 found = 1;
3544 break;
3545 }
3546
3547 prev = next;
3548 next = next->next;
3549 }
3550 } else {
3551 /* Search the normal queue */
3552 prev = NULL;
3553 next = (IOCBQ *) nlp->nlp_tx[channelno].q_first;
3554
3555 while (next) {
3556 if (next == iocbq) {
3557 /* Remove it */
3558 if (prev) {
3559 prev->next = iocbq->next;
3560 }
3561
3562 if (nlp->nlp_tx[channelno].q_last ==
3563 (void *)iocbq) {
3564 nlp->nlp_tx[channelno].q_last =
3565 (void *)prev;
3566 }
3567
3568 if (nlp->nlp_tx[channelno].q_first ==
3569 (void *)iocbq) {
3570 nlp->nlp_tx[channelno].q_first =
3571 (void *)iocbq->next;
3572 }
3573
3574 nlp->nlp_tx[channelno].q_cnt--;
3575 iocbq->next = NULL;
3576 found = 1;
3577 break;
3578 }
3579
3580 prev = next;
3581 next = (IOCBQ *) next->next;
3582 }
3583 }
3584
3585 if (!found) {
3586 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3587 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_abort_failed_msg,
3588 "I/O not found in driver. sbp=%p flags=%x", sbp,
3589 sbp->pkt_flags);
3590 goto done;
3591 }
3592
3593 /* Check if node still needs servicing */
3594 if ((nlp->nlp_ptx[channelno].q_first) ||
3595 (nlp->nlp_tx[channelno].q_first &&
3596 !(nlp->nlp_flag[channelno] & NLP_CLOSED))) {
3597
3598 /*
3599 * If this is the base node,
3600 * then don't shift the pointers
3601 */
3602 /* We want to drain the base node before moving on */
3603 if (!nlp->nlp_base) {
3604 /* Just shift channel queue */
3605 /* pointers to next node */
3606 cp->nodeq.q_last = (void *) nlp;
3607 cp->nodeq.q_first = nlp->nlp_next[channelno];
3608 }
3609 } else {
3610 /* Remove node from channel queue */
3611
3612 /* If this is the only node on list */
3613 if (cp->nodeq.q_first == (void *)nlp &&
3614 cp->nodeq.q_last == (void *)nlp) {
3615 cp->nodeq.q_last = NULL;
3616 cp->nodeq.q_first = NULL;
3617 cp->nodeq.q_cnt = 0;
3618 } else if (cp->nodeq.q_first == (void *)nlp) {
3619 cp->nodeq.q_first = nlp->nlp_next[channelno];
3620 ((NODELIST *) cp->nodeq.q_last)->
3621 nlp_next[channelno] = cp->nodeq.q_first;
3622 cp->nodeq.q_cnt--;
3623 } else {
3624 /*
3625 * This is a little more difficult find the
3626 * previous node in the circular channel queue
3627 */
3628 prev_nlp = nlp;
3629 while (prev_nlp->nlp_next[channelno] != nlp) {
3630 prev_nlp = prev_nlp->
3631 nlp_next[channelno];
3632 }
3633
3634 prev_nlp->nlp_next[channelno] =
3635 nlp->nlp_next[channelno];
3636
3637 if (cp->nodeq.q_last == (void *)nlp) {
3638 cp->nodeq.q_last = (void *)prev_nlp;
3639 }
3640 cp->nodeq.q_cnt--;
3641
3642 }
3643
3644 /* Clear node */
3645 nlp->nlp_next[channelno] = NULL;
3646 }
3647
3648 /* Free the ULPIOTAG and the bmp */
3649 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3650 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
3651 } else {
3652 (void) emlxs_unregister_pkt(cp, sbp->iotag, 1);
3653 }
3654
3655
3656 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3657
3658 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3659 IOERR_ABORT_REQUESTED, 1);
3660
3661 goto done;
3662 }
3663
3664 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3665
3666
3667 /* Check the chip queue */
3668 mutex_enter(&EMLXS_FCTAB_LOCK);
3669
3670 if ((sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3671 !(sbp->pkt_flags & PACKET_XRI_CLOSED) &&
3672 (sbp == hba->fc_table[sbp->iotag])) {
3673
3674 /* Create the abort IOCB */
3675 if (hba->state >= FC_LINK_UP) {
3676 iocbq =
3677 emlxs_create_abort_xri_cn(port, sbp->node,
3678 sbp->iotag, cp, sbp->class, ABORT_TYPE_ABTS);
3679
3680 mutex_enter(&sbp->mtx);
3681 sbp->pkt_flags |= PACKET_XRI_CLOSED;
3682 sbp->ticks =
3683 hba->timer_tics + (4 * hba->fc_ratov) + 10;
3684 sbp->abort_attempts++;
3685 mutex_exit(&sbp->mtx);
3686 } else {
3687 iocbq =
3688 emlxs_create_close_xri_cn(port, sbp->node,
3689 sbp->iotag, cp);
3690
3691 mutex_enter(&sbp->mtx);
3692 sbp->pkt_flags |= PACKET_XRI_CLOSED;
3693 sbp->ticks = hba->timer_tics + 30;
3694 sbp->abort_attempts++;
3695 mutex_exit(&sbp->mtx);
3696 }
3697
3698 mutex_exit(&EMLXS_FCTAB_LOCK);
3699
3700 /* Send this iocbq */
3701 if (iocbq) {
3702 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
3703 iocbq = NULL;
3704 }
3705
3706 goto done;
3707 }
3708
3709 mutex_exit(&EMLXS_FCTAB_LOCK);
3710
3711 /* Pkt was not on any queues */
3712
3713 /* Check again if we still own this */
3714 if (!(sbp->pkt_flags & PACKET_VALID) ||
3715 (sbp->pkt_flags &
3716 (PACKET_ULP_OWNED | PACKET_IN_COMPLETION |
3717 PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) {
3718 goto done;
3719 }
3720
3721 if (!sleep) {
3722 return (FC_FAILURE);
3723 }
3724
3725 /* Apparently the pkt was not found. Let's delay and try again */
3726 if (pass < 5) {
3727 delay(drv_usectohz(5000000)); /* 5 seconds */
3728
3729 /* Check again if we still own this */
3730 if (!(sbp->pkt_flags & PACKET_VALID) ||
3731 (sbp->pkt_flags &
3732 (PACKET_ULP_OWNED | PACKET_IN_COMPLETION |
3733 PACKET_IN_FLUSH | PACKET_IN_TIMEOUT))) {
3734 goto done;
3735 }
3736
3737 goto begin;
3738 }
3739
3740 force_it:
3741
3742 /* Force the completion now */
3743 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3744 "Abort: Completing an IO thats not outstanding: %x", sbp->iotag);
3745
3746 /* Now complete it */
3747 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, IOERR_ABORT_REQUESTED,
3748 1);
3749
3750 done:
3751
3752 /* Now wait for the pkt to complete */
3753 if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
3754 /* Set thread timeout */
3755 pkt_timeout = emlxs_timeout(hba, 30);
3756
3757 /* Check for panic situation */
3758 if (ddi_in_panic()) {
3759
3760 /*
3761 * In panic situations there will be one thread with no
3762 * interrrupts (hard or soft) and no timers
3763 */
3764
3765 /*
3766 * We must manually poll everything in this thread
3767 * to keep the driver going.
3768 */
3769
3770 /* Keep polling the chip until our IO is completed */
3771 (void) drv_getparm(LBOLT, &time);
3772 timer = time + drv_usectohz(1000000);
3773 while ((time < pkt_timeout) &&
3774 !(sbp->pkt_flags & PACKET_COMPLETED)) {
3775 EMLXS_SLI_POLL_INTR(hba);
3776 (void) drv_getparm(LBOLT, &time);
3777
3778 /* Trigger timer checks periodically */
3779 if (time >= timer) {
3780 emlxs_timer_checks(hba);
3781 timer = time + drv_usectohz(1000000);
3782 }
3783 }
3784 } else {
3785 /* Wait for IO completion or pkt_timeout */
3786 mutex_enter(&EMLXS_PKT_LOCK);
3787 pkt_ret = 0;
3788 while ((pkt_ret != -1) &&
3789 !(sbp->pkt_flags & PACKET_COMPLETED)) {
3790 pkt_ret =
3791 cv_timedwait(&EMLXS_PKT_CV,
3792 &EMLXS_PKT_LOCK, pkt_timeout);
3793 }
3794 mutex_exit(&EMLXS_PKT_LOCK);
3795 }
3796
3797 /* Check if pkt_timeout occured. This is not good. */
3798 /* Something happened to our IO. */
3799 if (!(sbp->pkt_flags & PACKET_COMPLETED)) {
3800 /* Force the completion now */
3801 goto force_it;
3802 }
3803 }
3804 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
3805 emlxs_unswap_pkt(sbp);
3806 #endif /* EMLXS_MODREV2X */
3807
3808 /* Check again if we still own this */
3809 if ((sbp->pkt_flags & PACKET_VALID) &&
3810 !(sbp->pkt_flags & PACKET_ULP_OWNED)) {
3811 mutex_enter(&sbp->mtx);
3812 if ((sbp->pkt_flags & PACKET_VALID) &&
3813 !(sbp->pkt_flags & PACKET_ULP_OWNED)) {
3814 sbp->pkt_flags |= PACKET_ULP_OWNED;
3815 }
3816 mutex_exit(&sbp->mtx);
3817 }
3818
3819 #ifdef ULP_PATCH5
3820 if (cfg[CFG_ENABLE_PATCH].current & ULP_PATCH5) {
3821 return (FC_FAILURE);
3822 }
3823 #endif /* ULP_PATCH5 */
3824
3825 return (FC_SUCCESS);
3826
3827 } /* emlxs_fca_pkt_abort() */
3828
3829
3830 static void
3831 emlxs_abort_all(emlxs_hba_t *hba, uint32_t *tx, uint32_t *chip)
3832 {
3833 emlxs_port_t *port = &PPORT;
3834 fc_packet_t *pkt;
3835 emlxs_buf_t *sbp;
3836 uint32_t i;
3837 uint32_t flg;
3838 uint32_t rc;
3839 uint32_t txcnt;
3840 uint32_t chipcnt;
3841
3842 txcnt = 0;
3843 chipcnt = 0;
3844
3845 mutex_enter(&EMLXS_FCTAB_LOCK);
3846 for (i = 0; i < hba->max_iotag; i++) {
3847 sbp = hba->fc_table[i];
3848 if (sbp == NULL || sbp == STALE_PACKET) {
3849 continue;
3850 }
3851 flg = (sbp->pkt_flags & PACKET_IN_CHIPQ);
3852 pkt = PRIV2PKT(sbp);
3853 mutex_exit(&EMLXS_FCTAB_LOCK);
3854 rc = emlxs_fca_pkt_abort(port, pkt, 0);
3855 if (rc == FC_SUCCESS) {
3856 if (flg) {
3857 chipcnt++;
3858 } else {
3859 txcnt++;
3860 }
3861 }
3862 mutex_enter(&EMLXS_FCTAB_LOCK);
3863 }
3864 mutex_exit(&EMLXS_FCTAB_LOCK);
3865 *tx = txcnt;
3866 *chip = chipcnt;
3867 } /* emlxs_abort_all() */
3868
3869
3870 extern int32_t
3871 emlxs_reset(emlxs_port_t *port, uint32_t cmd)
3872 {
3873 emlxs_hba_t *hba = HBA;
3874 int rval;
3875 int i = 0;
3876 int ret;
3877 clock_t timeout;
3878
3879 switch (cmd) {
3880 case FC_FCA_LINK_RESET:
3881
3882 mutex_enter(&EMLXS_PORT_LOCK);
3883 if (!(hba->flag & FC_ONLINE_MODE) ||
3884 (hba->state <= FC_LINK_DOWN)) {
3885 mutex_exit(&EMLXS_PORT_LOCK);
3886 return (FC_SUCCESS);
3887 }
3888
3889 if (hba->reset_state &
3890 (FC_LINK_RESET_INP | FC_PORT_RESET_INP)) {
3891 mutex_exit(&EMLXS_PORT_LOCK);
3892 return (FC_FAILURE);
3893 }
3894
3895 hba->reset_state |= FC_LINK_RESET_INP;
3896 hba->reset_request |= FC_LINK_RESET;
3897 mutex_exit(&EMLXS_PORT_LOCK);
3898
3899 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3900 "Resetting Link.");
3901
3902 mutex_enter(&EMLXS_LINKUP_LOCK);
3903 hba->linkup_wait_flag = TRUE;
3904 mutex_exit(&EMLXS_LINKUP_LOCK);
3905
3906 if (emlxs_reset_link(hba, 1, 1)) {
3907 mutex_enter(&EMLXS_LINKUP_LOCK);
3908 hba->linkup_wait_flag = FALSE;
3909 mutex_exit(&EMLXS_LINKUP_LOCK);
3910
3911 mutex_enter(&EMLXS_PORT_LOCK);
3912 hba->reset_state &= ~FC_LINK_RESET_INP;
3913 hba->reset_request &= ~FC_LINK_RESET;
3914 mutex_exit(&EMLXS_PORT_LOCK);
3915
3916 return (FC_FAILURE);
3917 }
3918
3919 mutex_enter(&EMLXS_LINKUP_LOCK);
3920 timeout = emlxs_timeout(hba, 60);
3921 ret = 0;
3922 while ((ret != -1) && (hba->linkup_wait_flag == TRUE)) {
3923 ret =
3924 cv_timedwait(&EMLXS_LINKUP_CV, &EMLXS_LINKUP_LOCK,
3925 timeout);
3926 }
3927
3928 hba->linkup_wait_flag = FALSE;
3929 mutex_exit(&EMLXS_LINKUP_LOCK);
3930
3931 mutex_enter(&EMLXS_PORT_LOCK);
3932 hba->reset_state &= ~FC_LINK_RESET_INP;
3933 hba->reset_request &= ~FC_LINK_RESET;
3934 mutex_exit(&EMLXS_PORT_LOCK);
3935
3936 if (ret == -1) {
3937 return (FC_FAILURE);
3938 }
3939
3940 return (FC_SUCCESS);
3941
3942 case FC_FCA_CORE:
3943 #ifdef DUMP_SUPPORT
3944 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3945 "Dumping Core.");
3946
3947 /* Schedule a USER dump */
3948 emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0);
3949
3950 /* Wait for dump to complete */
3951 emlxs_dump_wait(hba);
3952
3953 return (FC_SUCCESS);
3954 #endif /* DUMP_SUPPORT */
3955
3956 case FC_FCA_RESET:
3957 case FC_FCA_RESET_CORE:
3958
3959 mutex_enter(&EMLXS_PORT_LOCK);
3960 if (hba->reset_state & FC_PORT_RESET_INP) {
3961 mutex_exit(&EMLXS_PORT_LOCK);
3962 return (FC_FAILURE);
3963 }
3964
3965 hba->reset_state |= FC_PORT_RESET_INP;
3966 hba->reset_request |= (FC_PORT_RESET | FC_LINK_RESET);
3967
3968 /* wait for any pending link resets to complete */
3969 while ((hba->reset_state & FC_LINK_RESET_INP) &&
3970 (i++ < 1000)) {
3971 mutex_exit(&EMLXS_PORT_LOCK);
3972 delay(drv_usectohz(1000));
3973 mutex_enter(&EMLXS_PORT_LOCK);
3974 }
3975
3976 if (hba->reset_state & FC_LINK_RESET_INP) {
3977 hba->reset_state &= ~FC_PORT_RESET_INP;
3978 hba->reset_request &= ~(FC_PORT_RESET | FC_LINK_RESET);
3979 mutex_exit(&EMLXS_PORT_LOCK);
3980 return (FC_FAILURE);
3981 }
3982 mutex_exit(&EMLXS_PORT_LOCK);
3983
3984 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3985 "Resetting Adapter.");
3986
3987 rval = FC_SUCCESS;
3988
3989 if (emlxs_offline(hba, 0) == 0) {
3990 (void) emlxs_online(hba);
3991 } else {
3992 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
3993 "Adapter reset failed. Device busy.");
3994
3995 rval = FC_DEVICE_BUSY;
3996 }
3997
3998 mutex_enter(&EMLXS_PORT_LOCK);
3999 hba->reset_state &= ~FC_PORT_RESET_INP;
4000 hba->reset_request &= ~(FC_PORT_RESET | FC_LINK_RESET);
4001 mutex_exit(&EMLXS_PORT_LOCK);
4002
4003 return (rval);
4004
4005 case EMLXS_DFC_RESET_ALL:
4006 case EMLXS_DFC_RESET_ALL_FORCE_DUMP:
4007
4008 mutex_enter(&EMLXS_PORT_LOCK);
4009 if (hba->reset_state & FC_PORT_RESET_INP) {
4010 mutex_exit(&EMLXS_PORT_LOCK);
4011 return (FC_FAILURE);
4012 }
4013
4014 hba->reset_state |= FC_PORT_RESET_INP;
4015 hba->reset_request |= (FC_PORT_RESET | FC_LINK_RESET);
4016
4017 /* wait for any pending link resets to complete */
4018 while ((hba->reset_state & FC_LINK_RESET_INP) &&
4019 (i++ < 1000)) {
4020 mutex_exit(&EMLXS_PORT_LOCK);
4021 delay(drv_usectohz(1000));
4022 mutex_enter(&EMLXS_PORT_LOCK);
4023 }
4024
4025 if (hba->reset_state & FC_LINK_RESET_INP) {
4026 hba->reset_state &= ~FC_PORT_RESET_INP;
4027 hba->reset_request &= ~(FC_PORT_RESET | FC_LINK_RESET);
4028 mutex_exit(&EMLXS_PORT_LOCK);
4029 return (FC_FAILURE);
4030 }
4031 mutex_exit(&EMLXS_PORT_LOCK);
4032
4033 rval = FC_SUCCESS;
4034
4035 if (cmd == EMLXS_DFC_RESET_ALL) {
4036 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4037 "Resetting Adapter (All Firmware Reset).");
4038
4039 emlxs_sli4_hba_reset_all(hba, 0);
4040 } else {
4041 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4042 "Resetting Adapter "
4043 "(All Firmware Reset, Force Dump).");
4044
4045 emlxs_sli4_hba_reset_all(hba, 1);
4046 }
4047
4048 mutex_enter(&EMLXS_PORT_LOCK);
4049 hba->reset_state &= ~FC_PORT_RESET_INP;
4050 hba->reset_request &= ~(FC_PORT_RESET | FC_LINK_RESET);
4051 mutex_exit(&EMLXS_PORT_LOCK);
4052
4053 /* Wait for the timer thread to detect the error condition */
4054 delay(drv_usectohz(1000000));
4055
4056 /* Wait for the HBA to re-initialize */
4057 i = 0;
4058 mutex_enter(&EMLXS_PORT_LOCK);
4059 while (!(hba->flag & FC_ONLINE_MODE) && (i++ < 30)) {
4060 mutex_exit(&EMLXS_PORT_LOCK);
4061 delay(drv_usectohz(1000000));
4062 mutex_enter(&EMLXS_PORT_LOCK);
4063 }
4064
4065 if (!(hba->flag & FC_ONLINE_MODE)) {
4066 rval = FC_FAILURE;
4067 }
4068
4069 mutex_exit(&EMLXS_PORT_LOCK);
4070
4071 return (rval);
4072
4073 default:
4074 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4075 "reset: Unknown command. cmd=%x", cmd);
4076
4077 break;
4078 }
4079
4080 return (FC_FAILURE);
4081
4082 } /* emlxs_reset() */
4083
4084
4085 extern int32_t
4086 emlxs_fca_reset(opaque_t fca_port_handle, uint32_t cmd)
4087 {
4088 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
4089 emlxs_hba_t *hba = HBA;
4090 int32_t rval;
4091
4092 if (port->mode != MODE_INITIATOR) {
4093 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4094 "fca_reset failed. Port is not in initiator mode.");
4095
4096 return (FC_FAILURE);
4097 }
4098
4099 if (!(port->flag & EMLXS_INI_BOUND)) {
4100 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4101 "fca_reset: Port not bound.");
4102
4103 return (FC_UNBOUND);
4104 }
4105
4106 switch (cmd) {
4107 case FC_FCA_LINK_RESET:
4108 if (hba->fw_flag & FW_UPDATE_NEEDED) {
4109 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4110 "fca_reset: FC_FCA_LINK_RESET -> FC_FCA_RESET");
4111 cmd = FC_FCA_RESET;
4112 } else {
4113 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4114 "fca_reset: FC_FCA_LINK_RESET");
4115 }
4116 break;
4117
4118 case FC_FCA_CORE:
4119 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4120 "fca_reset: FC_FCA_CORE");
4121 break;
4122
4123 case FC_FCA_RESET:
4124 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4125 "fca_reset: FC_FCA_RESET");
4126 break;
4127
4128 case FC_FCA_RESET_CORE:
4129 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4130 "fca_reset: FC_FCA_RESET_CORE");
4131 break;
4132
4133 default:
4134 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4135 "fca_reset: Unknown command. cmd=%x", cmd);
4136 return (FC_FAILURE);
4137 }
4138
4139 if (hba->fw_flag & FW_UPDATE_NEEDED) {
4140 hba->fw_flag |= FW_UPDATE_KERNEL;
4141 }
4142
4143 rval = emlxs_reset(port, cmd);
4144
4145 return (rval);
4146
4147 } /* emlxs_fca_reset() */
4148
4149
4150 extern int
4151 emlxs_fca_port_manage(opaque_t fca_port_handle, fc_fca_pm_t *pm)
4152 {
4153 emlxs_port_t *port = (emlxs_port_t *)fca_port_handle;
4154 emlxs_hba_t *hba = HBA;
4155 int32_t ret;
4156 emlxs_vpd_t *vpd = &VPD;
4157
4158 ret = FC_SUCCESS;
4159
4160 #ifdef IDLE_TIMER
4161 emlxs_pm_busy_component(hba);
4162 #endif /* IDLE_TIMER */
4163
4164 switch (pm->pm_cmd_code) {
4165
4166 case FC_PORT_GET_FW_REV:
4167 {
4168 char buffer[128];
4169
4170 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4171 "fca_port_manage: FC_PORT_GET_FW_REV");
4172
4173 (void) snprintf(buffer, (sizeof (buffer)-1),
4174 "%s %s", hba->model_info.model,
4175 vpd->fw_version);
4176 bzero(pm->pm_data_buf, pm->pm_data_len);
4177
4178 if (pm->pm_data_len < strlen(buffer) + 1) {
4179 ret = FC_NOMEM;
4180
4181 break;
4182 }
4183
4184 (void) strncpy(pm->pm_data_buf, buffer,
4185 (pm->pm_data_len-1));
4186 break;
4187 }
4188
4189 case FC_PORT_GET_FCODE_REV:
4190 {
4191 char buffer[128];
4192
4193 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4194 "fca_port_manage: FC_PORT_GET_FCODE_REV");
4195
4196 /* Force update here just to be sure */
4197 emlxs_get_fcode_version(hba);
4198
4199 (void) snprintf(buffer, (sizeof (buffer)-1),
4200 "%s %s", hba->model_info.model,
4201 vpd->fcode_version);
4202 bzero(pm->pm_data_buf, pm->pm_data_len);
4203
4204 if (pm->pm_data_len < strlen(buffer) + 1) {
4205 ret = FC_NOMEM;
4206 break;
4207 }
4208
4209 (void) strncpy(pm->pm_data_buf, buffer,
4210 (pm->pm_data_len-1));
4211 break;
4212 }
4213
4214 case FC_PORT_GET_DUMP_SIZE:
4215 {
4216 #ifdef DUMP_SUPPORT
4217 uint32_t dump_size = 0;
4218
4219 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4220 "fca_port_manage: FC_PORT_GET_DUMP_SIZE");
4221
4222 if (pm->pm_data_len < sizeof (uint32_t)) {
4223 ret = FC_NOMEM;
4224 break;
4225 }
4226
4227 (void) emlxs_get_dump(hba, NULL, &dump_size);
4228
4229 *((uint32_t *)pm->pm_data_buf) = dump_size;
4230
4231 #else
4232 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4233 "fca_port_manage: FC_PORT_GET_DUMP_SIZE unsupported.");
4234
4235 #endif /* DUMP_SUPPORT */
4236
4237 break;
4238 }
4239
4240 case FC_PORT_GET_DUMP:
4241 {
4242 #ifdef DUMP_SUPPORT
4243 uint32_t dump_size = 0;
4244
4245 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4246 "fca_port_manage: FC_PORT_GET_DUMP");
4247
4248 (void) emlxs_get_dump(hba, NULL, &dump_size);
4249
4250 if (pm->pm_data_len < dump_size) {
4251 ret = FC_NOMEM;
4252 break;
4253 }
4254
4255 (void) emlxs_get_dump(hba, (uint8_t *)pm->pm_data_buf,
4256 (uint32_t *)&dump_size);
4257 #else
4258 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4259 "fca_port_manage: FC_PORT_GET_DUMP unsupported.");
4260
4261 #endif /* DUMP_SUPPORT */
4262
4263 break;
4264 }
4265
4266 case FC_PORT_FORCE_DUMP:
4267 {
4268 #ifdef DUMP_SUPPORT
4269 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4270 "fca_port_manage: FC_PORT_FORCE_DUMP");
4271
4272 /* Schedule a USER dump */
4273 emlxs_dump(hba, EMLXS_USER_DUMP, 0, 0);
4274
4275 /* Wait for dump to complete */
4276 emlxs_dump_wait(hba);
4277 #else
4278 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4279 "fca_port_manage: FC_PORT_FORCE_DUMP unsupported.");
4280
4281 #endif /* DUMP_SUPPORT */
4282 break;
4283 }
4284
4285 case FC_PORT_LINK_STATE:
4286 {
4287 uint32_t *link_state;
4288
4289 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4290 "fca_port_manage: FC_PORT_LINK_STATE");
4291
4292 if (pm->pm_stat_len != sizeof (*link_state)) {
4293 ret = FC_NOMEM;
4294 break;
4295 }
4296
4297 if (pm->pm_cmd_buf != NULL) {
4298 /*
4299 * Can't look beyond the FCA port.
4300 */
4301 ret = FC_INVALID_REQUEST;
4302 break;
4303 }
4304
4305 link_state = (uint32_t *)pm->pm_stat_buf;
4306
4307 /* Set the state */
4308 if (hba->state >= FC_LINK_UP) {
4309 /* Check for loop topology */
4310 if (hba->topology == TOPOLOGY_LOOP) {
4311 *link_state = FC_STATE_LOOP;
4312 } else {
4313 *link_state = FC_STATE_ONLINE;
4314 }
4315
4316 /* Set the link speed */
4317 switch (hba->linkspeed) {
4318 case LA_2GHZ_LINK:
4319 *link_state |= FC_STATE_2GBIT_SPEED;
4320 break;
4321 case LA_4GHZ_LINK:
4322 *link_state |= FC_STATE_4GBIT_SPEED;
4323 break;
4324 case LA_8GHZ_LINK:
4325 *link_state |= FC_STATE_8GBIT_SPEED;
4326 break;
4327 case LA_10GHZ_LINK:
4328 *link_state |= FC_STATE_10GBIT_SPEED;
4329 break;
4330 case LA_16GHZ_LINK:
4331 *link_state |= FC_STATE_16GBIT_SPEED;
4332 break;
4333 case LA_32GHZ_LINK:
4334 *link_state |= FC_STATE_32GBIT_SPEED;
4335 break;
4336 case LA_1GHZ_LINK:
4337 default:
4338 *link_state |= FC_STATE_1GBIT_SPEED;
4339 break;
4340 }
4341 } else {
4342 *link_state = FC_STATE_OFFLINE;
4343 }
4344
4345 break;
4346 }
4347
4348
4349 case FC_PORT_ERR_STATS:
4350 case FC_PORT_RLS:
4351 {
4352 MAILBOXQ *mbq;
4353 MAILBOX *mb;
4354 fc_rls_acc_t *bp;
4355
4356 if (!(hba->flag & FC_ONLINE_MODE)) {
4357 return (FC_OFFLINE);
4358 }
4359 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4360 "fca_port_manage: FC_PORT_RLS / FC_PORT_ERR_STATS");
4361
4362 if (pm->pm_data_len < sizeof (fc_rls_acc_t)) {
4363 ret = FC_NOMEM;
4364 break;
4365 }
4366
4367 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba,
4368 MEM_MBOX)) == 0) {
4369 ret = FC_NOMEM;
4370 break;
4371 }
4372 mb = (MAILBOX *)mbq;
4373
4374 emlxs_mb_read_lnk_stat(hba, mbq);
4375 if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0)
4376 != MBX_SUCCESS) {
4377 ret = FC_PBUSY;
4378 } else {
4379 bp = (fc_rls_acc_t *)pm->pm_data_buf;
4380
4381 bp->rls_link_fail = mb->un.varRdLnk.linkFailureCnt;
4382 bp->rls_sync_loss = mb->un.varRdLnk.lossSyncCnt;
4383 bp->rls_sig_loss = mb->un.varRdLnk.lossSignalCnt;
4384 bp->rls_prim_seq_err = mb->un.varRdLnk.primSeqErrCnt;
4385 bp->rls_invalid_word =
4386 mb->un.varRdLnk.invalidXmitWord;
4387 bp->rls_invalid_crc = mb->un.varRdLnk.crcCnt;
4388 }
4389
4390 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
4391 break;
4392 }
4393
4394 case FC_PORT_DOWNLOAD_FW:
4395 if (!(hba->flag & FC_ONLINE_MODE)) {
4396 return (FC_OFFLINE);
4397 }
4398 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4399 "fca_port_manage: FC_PORT_DOWNLOAD_FW");
4400 ret = emlxs_fw_download(hba, pm->pm_data_buf,
4401 pm->pm_data_len, 1);
4402 break;
4403
4404 case FC_PORT_DOWNLOAD_FCODE:
4405 if (!(hba->flag & FC_ONLINE_MODE)) {
4406 return (FC_OFFLINE);
4407 }
4408 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4409 "fca_port_manage: FC_PORT_DOWNLOAD_FCODE");
4410 ret = emlxs_fw_download(hba, pm->pm_data_buf,
4411 pm->pm_data_len, 1);
4412 break;
4413
4414 case FC_PORT_DIAG:
4415 {
4416 uint32_t errno = 0;
4417 uint32_t did = 0;
4418 uint32_t pattern = 0;
4419
4420 switch (pm->pm_cmd_flags) {
4421 case EMLXS_DIAG_BIU:
4422
4423 if (!(hba->flag & FC_ONLINE_MODE)) {
4424 return (FC_OFFLINE);
4425 }
4426 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4427 "fca_port_manage: DIAG_BIU");
4428
4429 if (pm->pm_data_len) {
4430 pattern = *((uint32_t *)pm->pm_data_buf);
4431 }
4432
4433 errno = emlxs_diag_biu_run(hba, pattern);
4434
4435 if (pm->pm_stat_len == sizeof (errno)) {
4436 *(int *)pm->pm_stat_buf = errno;
4437 }
4438
4439 break;
4440
4441
4442 case EMLXS_DIAG_POST:
4443
4444 if (!(hba->flag & FC_ONLINE_MODE)) {
4445 return (FC_OFFLINE);
4446 }
4447 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4448 "fca_port_manage: DIAG_POST");
4449
4450 errno = emlxs_diag_post_run(hba);
4451
4452 if (pm->pm_stat_len == sizeof (errno)) {
4453 *(int *)pm->pm_stat_buf = errno;
4454 }
4455
4456 break;
4457
4458
4459 case EMLXS_DIAG_ECHO:
4460
4461 if (!(hba->flag & FC_ONLINE_MODE)) {
4462 return (FC_OFFLINE);
4463 }
4464 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4465 "fca_port_manage: DIAG_ECHO");
4466
4467 if (pm->pm_cmd_len != sizeof (uint32_t)) {
4468 ret = FC_INVALID_REQUEST;
4469 break;
4470 }
4471
4472 did = *((uint32_t *)pm->pm_cmd_buf);
4473
4474 if (pm->pm_data_len) {
4475 pattern = *((uint32_t *)pm->pm_data_buf);
4476 }
4477
4478 errno = emlxs_diag_echo_run(port, did, pattern);
4479
4480 if (pm->pm_stat_len == sizeof (errno)) {
4481 *(int *)pm->pm_stat_buf = errno;
4482 }
4483
4484 break;
4485
4486
4487 case EMLXS_PARM_GET_NUM:
4488 {
4489 uint32_t *num;
4490 emlxs_config_t *cfg;
4491 uint32_t i;
4492 uint32_t count;
4493 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4494 "fca_port_manage: PARM_GET_NUM");
4495
4496 if (pm->pm_stat_len < sizeof (uint32_t)) {
4497 ret = FC_NOMEM;
4498 break;
4499 }
4500
4501 num = (uint32_t *)pm->pm_stat_buf;
4502 count = 0;
4503 cfg = &CFG;
4504 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4505 if (!(cfg->flags & PARM_HIDDEN)) {
4506 count++;
4507 }
4508
4509 }
4510
4511 *num = count;
4512
4513 break;
4514 }
4515
4516 case EMLXS_PARM_GET_LIST:
4517 {
4518 emlxs_parm_t *parm;
4519 emlxs_config_t *cfg;
4520 uint32_t i;
4521 uint32_t max_count;
4522
4523 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4524 "fca_port_manage: PARM_GET_LIST");
4525
4526 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4527 ret = FC_NOMEM;
4528 break;
4529 }
4530
4531 max_count = pm->pm_stat_len / sizeof (emlxs_parm_t);
4532
4533 parm = (emlxs_parm_t *)pm->pm_stat_buf;
4534 cfg = &CFG;
4535 for (i = 0; i < NUM_CFG_PARAM && max_count; i++,
4536 cfg++) {
4537 if (!(cfg->flags & PARM_HIDDEN)) {
4538 (void) strncpy(parm->label, cfg->string,
4539 (sizeof (parm->label)-1));
4540 parm->min = cfg->low;
4541 parm->max = cfg->hi;
4542 parm->def = cfg->def;
4543 parm->current = cfg->current;
4544 parm->flags = cfg->flags;
4545 (void) strncpy(parm->help, cfg->help,
4546 (sizeof (parm->help)-1));
4547 parm++;
4548 max_count--;
4549 }
4550 }
4551
4552 break;
4553 }
4554
4555 case EMLXS_PARM_GET:
4556 {
4557 emlxs_parm_t *parm_in;
4558 emlxs_parm_t *parm_out;
4559 emlxs_config_t *cfg;
4560 uint32_t i;
4561 uint32_t len;
4562
4563 if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) {
4564 EMLXS_MSGF(EMLXS_CONTEXT,
4565 &emlxs_sfs_debug_msg,
4566 "fca_port_manage: PARM_GET. "
4567 "inbuf too small.");
4568
4569 ret = FC_BADCMD;
4570 break;
4571 }
4572
4573 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4574 EMLXS_MSGF(EMLXS_CONTEXT,
4575 &emlxs_sfs_debug_msg,
4576 "fca_port_manage: PARM_GET. "
4577 "outbuf too small");
4578
4579 ret = FC_BADCMD;
4580 break;
4581 }
4582
4583 parm_in = (emlxs_parm_t *)pm->pm_cmd_buf;
4584 parm_out = (emlxs_parm_t *)pm->pm_stat_buf;
4585 len = strlen(parm_in->label);
4586 cfg = &CFG;
4587 ret = FC_BADOBJECT;
4588
4589 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4590 "fca_port_manage: PARM_GET: %s=0x%x,%d",
4591 parm_in->label, parm_in->current,
4592 parm_in->current);
4593
4594 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4595 if (len == strlen(cfg->string) &&
4596 (strcmp(parm_in->label,
4597 cfg->string) == 0)) {
4598 (void) strncpy(parm_out->label,
4599 cfg->string,
4600 (sizeof (parm_out->label)-1));
4601 parm_out->min = cfg->low;
4602 parm_out->max = cfg->hi;
4603 parm_out->def = cfg->def;
4604 parm_out->current = cfg->current;
4605 parm_out->flags = cfg->flags;
4606 (void) strncpy(parm_out->help,
4607 cfg->help,
4608 (sizeof (parm_out->help)-1));
4609
4610 ret = FC_SUCCESS;
4611 break;
4612 }
4613 }
4614
4615 break;
4616 }
4617
4618 case EMLXS_PARM_SET:
4619 {
4620 emlxs_parm_t *parm_in;
4621 emlxs_parm_t *parm_out;
4622 emlxs_config_t *cfg;
4623 uint32_t i;
4624 uint32_t len;
4625
4626 if (pm->pm_cmd_len < sizeof (emlxs_parm_t)) {
4627 EMLXS_MSGF(EMLXS_CONTEXT,
4628 &emlxs_sfs_debug_msg,
4629 "fca_port_manage: PARM_GET. "
4630 "inbuf too small.");
4631
4632 ret = FC_BADCMD;
4633 break;
4634 }
4635
4636 if (pm->pm_stat_len < sizeof (emlxs_parm_t)) {
4637 EMLXS_MSGF(EMLXS_CONTEXT,
4638 &emlxs_sfs_debug_msg,
4639 "fca_port_manage: PARM_GET. "
4640 "outbuf too small");
4641 ret = FC_BADCMD;
4642 break;
4643 }
4644
4645 parm_in = (emlxs_parm_t *)pm->pm_cmd_buf;
4646 parm_out = (emlxs_parm_t *)pm->pm_stat_buf;
4647 len = strlen(parm_in->label);
4648 cfg = &CFG;
4649 ret = FC_BADOBJECT;
4650
4651 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4652 "fca_port_manage: PARM_SET: %s=0x%x,%d",
4653 parm_in->label, parm_in->current,
4654 parm_in->current);
4655
4656 for (i = 0; i < NUM_CFG_PARAM; i++, cfg++) {
4657 /* Find matching parameter string */
4658 if (len == strlen(cfg->string) &&
4659 (strcmp(parm_in->label,
4660 cfg->string) == 0)) {
4661 /* Attempt to update parameter */
4662 if (emlxs_set_parm(hba, i,
4663 parm_in->current) == FC_SUCCESS) {
4664 (void) strncpy(parm_out->label,
4665 cfg->string,
4666 (sizeof (parm_out->label)-
4667 1));
4668 parm_out->min = cfg->low;
4669 parm_out->max = cfg->hi;
4670 parm_out->def = cfg->def;
4671 parm_out->current =
4672 cfg->current;
4673 parm_out->flags = cfg->flags;
4674 (void) strncpy(parm_out->help,
4675 cfg->help,
4676 (sizeof (parm_out->help)-
4677 1));
4678
4679 ret = FC_SUCCESS;
4680 }
4681
4682 break;
4683 }
4684 }
4685
4686 break;
4687 }
4688
4689 case EMLXS_LOG_GET:
4690 {
4691 emlxs_log_req_t *req;
4692 emlxs_log_resp_t *resp;
4693 uint32_t len;
4694
4695 /* Check command size */
4696 if (pm->pm_cmd_len < sizeof (emlxs_log_req_t)) {
4697 ret = FC_BADCMD;
4698 break;
4699 }
4700
4701 /* Get the request */
4702 req = (emlxs_log_req_t *)pm->pm_cmd_buf;
4703
4704 /* Calculate the response length from the request */
4705 len = sizeof (emlxs_log_resp_t) +
4706 (req->count * MAX_LOG_MSG_LENGTH);
4707
4708 /* Check the response buffer length */
4709 if (pm->pm_stat_len < len) {
4710 ret = FC_BADCMD;
4711 break;
4712 }
4713
4714 /* Get the response pointer */
4715 resp = (emlxs_log_resp_t *)pm->pm_stat_buf;
4716
4717 /* Get the request log enties */
4718 (void) emlxs_msg_log_get(hba, req, resp);
4719
4720 ret = FC_SUCCESS;
4721 break;
4722 }
4723
4724 case EMLXS_GET_BOOT_REV:
4725 {
4726 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4727 "fca_port_manage: GET_BOOT_REV");
4728
4729 if (pm->pm_stat_len < strlen(vpd->boot_version)) {
4730 ret = FC_NOMEM;
4731 break;
4732 }
4733
4734 bzero(pm->pm_stat_buf, pm->pm_stat_len);
4735 (void) snprintf(pm->pm_stat_buf, pm->pm_stat_len,
4736 "%s %s", hba->model_info.model, vpd->boot_version);
4737
4738 break;
4739 }
4740
4741 case EMLXS_DOWNLOAD_BOOT:
4742 if (!(hba->flag & FC_ONLINE_MODE)) {
4743 return (FC_OFFLINE);
4744 }
4745 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4746 "fca_port_manage: DOWNLOAD_BOOT");
4747
4748 ret = emlxs_fw_download(hba, pm->pm_data_buf,
4749 pm->pm_data_len, 1);
4750 break;
4751
4752 case EMLXS_DOWNLOAD_CFL:
4753 {
4754 uint32_t *buffer;
4755 uint32_t region;
4756 uint32_t length;
4757
4758 if (!(hba->flag & FC_ONLINE_MODE)) {
4759 return (FC_OFFLINE);
4760 }
4761
4762 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4763 "fca_port_manage: DOWNLOAD_CFL");
4764
4765 /* Extract the region number from the first word. */
4766 buffer = (uint32_t *)pm->pm_data_buf;
4767 region = *buffer++;
4768
4769 /* Adjust the image length for the header word */
4770 length = pm->pm_data_len - 4;
4771
4772 ret =
4773 emlxs_cfl_download(hba, region, (caddr_t)buffer,
4774 length);
4775 break;
4776 }
4777
4778 case EMLXS_VPD_GET:
4779 {
4780 emlxs_vpd_desc_t *vpd_out;
4781
4782 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4783 "fca_port_manage: VPD_GET");
4784
4785 if (pm->pm_stat_len < sizeof (emlxs_vpd_desc_t)) {
4786 ret = FC_BADCMD;
4787 break;
4788 }
4789
4790 vpd_out = (emlxs_vpd_desc_t *)pm->pm_stat_buf;
4791 bzero(vpd_out, pm->pm_stat_len);
4792
4793 (void) strncpy(vpd_out->id, vpd->id,
4794 (sizeof (vpd_out->id)-1));
4795 (void) strncpy(vpd_out->part_num, vpd->part_num,
4796 (sizeof (vpd_out->part_num)-1));
4797 (void) strncpy(vpd_out->eng_change, vpd->eng_change,
4798 (sizeof (vpd_out->eng_change)-1));
4799 (void) strncpy(vpd_out->manufacturer, vpd->manufacturer,
4800 (sizeof (vpd_out->manufacturer)-1));
4801 (void) strncpy(vpd_out->serial_num, vpd->serial_num,
4802 (sizeof (vpd_out->serial_num)-1));
4803 (void) strncpy(vpd_out->model, vpd->model,
4804 (sizeof (vpd_out->model)-1));
4805 (void) strncpy(vpd_out->model_desc, vpd->model_desc,
4806 (sizeof (vpd_out->model_desc)-1));
4807 (void) strncpy(vpd_out->port_num, vpd->port_num,
4808 (sizeof (vpd_out->port_num)-1));
4809 (void) strncpy(vpd_out->prog_types, vpd->prog_types,
4810 (sizeof (vpd_out->prog_types)-1));
4811
4812 ret = FC_SUCCESS;
4813
4814 break;
4815 }
4816
4817 case EMLXS_VPD_GET_V2:
4818 {
4819 emlxs_vpd_desc_v2_t *vpd_out;
4820
4821 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4822 "fca_port_manage: VPD_GET_V2");
4823
4824 if (pm->pm_stat_len < sizeof (emlxs_vpd_desc_v2_t)) {
4825 ret = FC_BADCMD;
4826 break;
4827 }
4828
4829 vpd_out = (emlxs_vpd_desc_v2_t *)pm->pm_stat_buf;
4830 bzero(vpd_out, pm->pm_stat_len);
4831
4832 (void) strncpy(vpd_out->id, vpd->id,
4833 (sizeof (vpd_out->id)-1));
4834 (void) strncpy(vpd_out->part_num, vpd->part_num,
4835 (sizeof (vpd_out->part_num)-1));
4836 (void) strncpy(vpd_out->eng_change, vpd->eng_change,
4837 (sizeof (vpd_out->eng_change)-1));
4838 (void) strncpy(vpd_out->manufacturer, vpd->manufacturer,
4839 (sizeof (vpd_out->manufacturer)-1));
4840 (void) strncpy(vpd_out->serial_num, vpd->serial_num,
4841 (sizeof (vpd_out->serial_num)-1));
4842 (void) strncpy(vpd_out->model, vpd->model,
4843 (sizeof (vpd_out->model)-1));
4844 (void) strncpy(vpd_out->model_desc, vpd->model_desc,
4845 (sizeof (vpd_out->model_desc)-1));
4846 (void) strncpy(vpd_out->port_num, vpd->port_num,
4847 (sizeof (vpd_out->port_num)-1));
4848 (void) strncpy(vpd_out->prog_types, vpd->prog_types,
4849 (sizeof (vpd_out->prog_types)-1));
4850
4851 ret = FC_SUCCESS;
4852
4853 break;
4854 }
4855
4856 case EMLXS_PHY_GET:
4857 {
4858 emlxs_phy_desc_t *phy_out;
4859 MAILBOXQ *mbq;
4860 MAILBOX4 *mb;
4861 IOCTL_COMMON_GET_PHY_DETAILS *phy;
4862 mbox_req_hdr_t *hdr_req;
4863
4864 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4865 "fca_port_manage: EMLXS_PHY_GET");
4866
4867 if (pm->pm_stat_len < sizeof (emlxs_phy_desc_t)) {
4868 ret = FC_BADCMD;
4869 break;
4870 }
4871
4872 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
4873 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4874 "Invalid sli_mode. mode=%d", hba->sli_mode);
4875 ret = FC_BADCMD;
4876 break;
4877 }
4878
4879 phy_out = (emlxs_phy_desc_t *)pm->pm_stat_buf;
4880 bzero(phy_out, sizeof (emlxs_phy_desc_t));
4881
4882 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba,
4883 MEM_MBOX)) == 0) {
4884 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4885 "Unable to allocate mailbox buffer.");
4886 ret = FC_NOMEM;
4887 break;
4888 }
4889
4890 mb = (MAILBOX4*)mbq;
4891
4892 bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
4893
4894 mb->un.varSLIConfig.be.embedded = 1;
4895 mbq->mbox_cmpl = NULL;
4896
4897 mb->mbxCommand = MBX_SLI_CONFIG;
4898 mb->mbxOwner = OWN_HOST;
4899
4900 hdr_req = (mbox_req_hdr_t *)
4901 &mb->un.varSLIConfig.be.un_hdr.hdr_req;
4902 hdr_req->subsystem = IOCTL_SUBSYSTEM_COMMON;
4903 hdr_req->opcode = COMMON_OPCODE_GET_PHY_DETAILS;
4904 hdr_req->timeout = 0;
4905 hdr_req->req_length =
4906 sizeof (IOCTL_COMMON_GET_PHY_DETAILS);
4907
4908 phy = (IOCTL_COMMON_GET_PHY_DETAILS *)(hdr_req + 1);
4909
4910 /* Send read request */
4911 if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0) !=
4912 MBX_SUCCESS) {
4913 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4914 "Unable to get PHY details. status=%x",
4915 mb->mbxStatus);
4916
4917 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
4918
4919 ret = FC_FAILURE;
4920 break;
4921 }
4922
4923 phy_out->phy_type = phy->params.response.phy_type;
4924 phy_out->interface_type =
4925 phy->params.response.interface_type;
4926 phy_out->misc_params = phy->params.response.misc_params;
4927 phy_out->rsvd[0] = phy->params.response.rsvd[0];
4928 phy_out->rsvd[1] = phy->params.response.rsvd[1];
4929 phy_out->rsvd[2] = phy->params.response.rsvd[2];
4930 phy_out->rsvd[3] = phy->params.response.rsvd[3];
4931
4932 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
4933
4934 ret = FC_SUCCESS;
4935 break;
4936 }
4937
4938 #ifdef NODE_THROTTLE_SUPPORT
4939 case EMLXS_SET_THROTTLE:
4940 {
4941 emlxs_node_t *node;
4942 uint32_t scope = 0;
4943 uint32_t i;
4944 char buf1[32];
4945 emlxs_throttle_desc_t *desc;
4946
4947 if ((pm->pm_data_buf == NULL) ||
4948 (pm->pm_data_len !=
4949 sizeof (emlxs_throttle_desc_t))) {
4950 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4951 "fca_port_manage: EMLXS_SET_THROTTLE: "
4952 "Descriptor buffer not valid. %d",
4953 pm->pm_data_len);
4954 ret = FC_BADCMD;
4955 break;
4956 }
4957
4958 if ((pm->pm_cmd_buf != NULL) &&
4959 (pm->pm_cmd_len == sizeof (uint32_t))) {
4960 scope = *(uint32_t *)pm->pm_cmd_buf;
4961 }
4962
4963 desc = (emlxs_throttle_desc_t *)pm->pm_data_buf;
4964 desc->throttle = MIN(desc->throttle, MAX_NODE_THROTTLE);
4965
4966 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
4967 "fca_port_manage: EMLXS_SET_THROTTLE: scope=%d "
4968 "depth=%d",
4969 scope, desc->throttle);
4970
4971 rw_enter(&port->node_rwlock, RW_WRITER);
4972 switch (scope) {
4973 case 1: /* all */
4974 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
4975 node = port->node_table[i];
4976 while (node != NULL) {
4977 node->io_throttle = desc->throttle;
4978
4979 EMLXS_MSGF(EMLXS_CONTEXT,
4980 &emlxs_sfs_debug_msg,
4981 "EMLXS_SET_THROTTLE: wwpn=%s "
4982 "depth=%d",
4983 emlxs_wwn_xlate(buf1, sizeof (buf1),
4984 (uint8_t *)&node->nlp_portname),
4985 node->io_throttle);
4986
4987 node = (NODELIST *)node->nlp_list_next;
4988 }
4989 }
4990 break;
4991
4992 case 2: /* FCP */
4993 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
4994 node = port->node_table[i];
4995 while (node != NULL) {
4996 if (!(node->nlp_fcp_info &
4997 NLP_FCP_TGT_DEVICE)) {
4998 node = (NODELIST *)
4999 node->nlp_list_next;
5000 continue;
5001 }
5002
5003 node->io_throttle = desc->throttle;
5004
5005 EMLXS_MSGF(EMLXS_CONTEXT,
5006 &emlxs_sfs_debug_msg,
5007 "EMLXS_SET_THROTTLE: wwpn=%s "
5008 "depth=%d",
5009 emlxs_wwn_xlate(buf1, sizeof (buf1),
5010 (uint8_t *)&node->nlp_portname),
5011 node->io_throttle);
5012
5013 node = (NODELIST *)node->nlp_list_next;
5014 }
5015 }
5016 break;
5017
5018 case 0: /* WWPN */
5019 default:
5020 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
5021 node = port->node_table[i];
5022 while (node != NULL) {
5023 if (bcmp((caddr_t)&node->nlp_portname,
5024 desc->wwpn, 8)) {
5025 node = (NODELIST *)
5026 node->nlp_list_next;
5027 continue;
5028 }
5029
5030 node->io_throttle = desc->throttle;
5031
5032 EMLXS_MSGF(EMLXS_CONTEXT,
5033 &emlxs_sfs_debug_msg,
5034 "EMLXS_SET_THROTTLE: wwpn=%s "
5035 "depth=%d",
5036 emlxs_wwn_xlate(buf1, sizeof (buf1),
5037 (uint8_t *)&node->nlp_portname),
5038 node->io_throttle);
5039
5040 goto set_throttle_done;
5041 }
5042 }
5043 set_throttle_done:
5044 break;
5045 }
5046
5047 rw_exit(&port->node_rwlock);
5048 ret = FC_SUCCESS;
5049
5050 break;
5051 }
5052
5053 case EMLXS_GET_THROTTLE:
5054 {
5055 emlxs_node_t *node;
5056 uint32_t i;
5057 uint32_t j;
5058 char buf1[32];
5059 uint32_t count;
5060 emlxs_throttle_desc_t *desc;
5061
5062 if (pm->pm_stat_len == sizeof (uint32_t)) {
5063 count = emlxs_nport_count(port);
5064 *(uint32_t *)pm->pm_stat_buf = count;
5065
5066 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5067 "fca_port_manage: EMLXS_GET_THROTTLE: "
5068 "count=%d",
5069 count);
5070
5071 ret = FC_SUCCESS;
5072 break;
5073 }
5074
5075 if ((pm->pm_stat_buf == NULL) ||
5076 (pm->pm_stat_len <
5077 sizeof (emlxs_throttle_desc_t))) {
5078 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5079 "fca_port_manage: EMLXS_GET_THROTTLE: "
5080 "Descriptor buffer too small. %d",
5081 pm->pm_data_len);
5082 ret = FC_BADCMD;
5083 break;
5084 }
5085
5086 count = pm->pm_stat_len /
5087 sizeof (emlxs_throttle_desc_t);
5088 desc = (emlxs_throttle_desc_t *)pm->pm_stat_buf;
5089
5090 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5091 "fca_port_manage: EMLXS_GET_THROTTLE: max=%d",
5092 count);
5093
5094 rw_enter(&port->node_rwlock, RW_READER);
5095 j = 0;
5096 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
5097 node = port->node_table[i];
5098 while (node != NULL) {
5099 if ((node->nlp_DID & 0xFFF000) ==
5100 0xFFF000) {
5101 node = (NODELIST *)
5102 node->nlp_list_next;
5103 continue;
5104 }
5105
5106 bcopy((uint8_t *)&node->nlp_portname,
5107 desc[j].wwpn, 8);
5108 desc[j].throttle = node->io_throttle;
5109
5110 EMLXS_MSGF(EMLXS_CONTEXT,
5111 &emlxs_sfs_debug_msg,
5112 "EMLXS_GET_THROTTLE: wwpn=%s "
5113 "depth=%d",
5114 emlxs_wwn_xlate(buf1, sizeof (buf1),
5115 desc[j].wwpn),
5116 desc[j].throttle);
5117
5118 j++;
5119 if (j >= count) {
5120 goto get_throttle_done;
5121 }
5122
5123 node = (NODELIST *)node->nlp_list_next;
5124 }
5125 }
5126 get_throttle_done:
5127 rw_exit(&port->node_rwlock);
5128 ret = FC_SUCCESS;
5129
5130 break;
5131 }
5132 #endif /* NODE_THROTTLE_SUPPORT */
5133
5134 case EMLXS_GET_FCIO_REV:
5135 {
5136 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5137 "fca_port_manage: GET_FCIO_REV");
5138
5139 if (pm->pm_stat_len < sizeof (uint32_t)) {
5140 ret = FC_NOMEM;
5141 break;
5142 }
5143
5144 bzero(pm->pm_stat_buf, pm->pm_stat_len);
5145 *(uint32_t *)pm->pm_stat_buf = FCIO_REV;
5146
5147 break;
5148 }
5149
5150 case EMLXS_GET_DFC_REV:
5151 {
5152 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5153 "fca_port_manage: GET_DFC_REV");
5154
5155 if (pm->pm_stat_len < sizeof (uint32_t)) {
5156 ret = FC_NOMEM;
5157 break;
5158 }
5159
5160 bzero(pm->pm_stat_buf, pm->pm_stat_len);
5161 *(uint32_t *)pm->pm_stat_buf = DFC_REV;
5162
5163 break;
5164 }
5165
5166 case EMLXS_SET_BOOT_STATE:
5167 case EMLXS_SET_BOOT_STATE_old:
5168 {
5169 uint32_t state;
5170
5171 if (!(hba->flag & FC_ONLINE_MODE)) {
5172 return (FC_OFFLINE);
5173 }
5174 if (pm->pm_cmd_len < sizeof (uint32_t)) {
5175 EMLXS_MSGF(EMLXS_CONTEXT,
5176 &emlxs_sfs_debug_msg,
5177 "fca_port_manage: SET_BOOT_STATE");
5178 ret = FC_BADCMD;
5179 break;
5180 }
5181
5182 state = *(uint32_t *)pm->pm_cmd_buf;
5183
5184 if (state == 0) {
5185 EMLXS_MSGF(EMLXS_CONTEXT,
5186 &emlxs_sfs_debug_msg,
5187 "fca_port_manage: SET_BOOT_STATE: "
5188 "Disable");
5189 ret = emlxs_boot_code_disable(hba);
5190 } else {
5191 EMLXS_MSGF(EMLXS_CONTEXT,
5192 &emlxs_sfs_debug_msg,
5193 "fca_port_manage: SET_BOOT_STATE: "
5194 "Enable");
5195 ret = emlxs_boot_code_enable(hba);
5196 }
5197
5198 break;
5199 }
5200
5201 case EMLXS_GET_BOOT_STATE:
5202 case EMLXS_GET_BOOT_STATE_old:
5203 {
5204 if (!(hba->flag & FC_ONLINE_MODE)) {
5205 return (FC_OFFLINE);
5206 }
5207 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5208 "fca_port_manage: GET_BOOT_STATE");
5209
5210 if (pm->pm_stat_len < sizeof (uint32_t)) {
5211 ret = FC_NOMEM;
5212 break;
5213 }
5214 bzero(pm->pm_stat_buf, pm->pm_stat_len);
5215
5216 ret = emlxs_boot_code_state(hba);
5217
5218 if (ret == FC_SUCCESS) {
5219 *(uint32_t *)pm->pm_stat_buf = 1;
5220 ret = FC_SUCCESS;
5221 } else if (ret == FC_FAILURE) {
5222 ret = FC_SUCCESS;
5223 }
5224
5225 break;
5226 }
5227
5228 case EMLXS_HW_ERROR_TEST:
5229 {
5230 /*
5231 * This command is used for simulating HW ERROR
5232 * on SLI4 only.
5233 */
5234 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5235 ret = FC_INVALID_REQUEST;
5236 break;
5237 }
5238 hba->sli.sli4.flag |= EMLXS_SLI4_HW_ERROR;
5239 break;
5240 }
5241
5242 case EMLXS_MB_TIMEOUT_TEST:
5243 {
5244 if (!(hba->flag & FC_ONLINE_MODE)) {
5245 return (FC_OFFLINE);
5246 }
5247
5248 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5249 "fca_port_manage: HW_ERROR_TEST");
5250
5251 /* Trigger a mailbox timeout */
5252 hba->mbox_timer = hba->timer_tics;
5253
5254 break;
5255 }
5256
5257 case EMLXS_TEST_CODE:
5258 {
5259 uint32_t *cmd;
5260
5261 if (!(hba->flag & FC_ONLINE_MODE)) {
5262 return (FC_OFFLINE);
5263 }
5264
5265 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5266 "fca_port_manage: TEST_CODE");
5267
5268 if (pm->pm_cmd_len < sizeof (uint32_t)) {
5269 EMLXS_MSGF(EMLXS_CONTEXT,
5270 &emlxs_sfs_debug_msg,
5271 "fca_port_manage: TEST_CODE. "
5272 "inbuf to small.");
5273
5274 ret = FC_BADCMD;
5275 break;
5276 }
5277
5278 cmd = (uint32_t *)pm->pm_cmd_buf;
5279
5280 ret = emlxs_test(hba, cmd[0],
5281 (pm->pm_cmd_len/sizeof (uint32_t)) - 1, &cmd[1]);
5282
5283 break;
5284 }
5285
5286 case EMLXS_BAR_IO:
5287 {
5288 uint32_t *cmd;
5289 uint32_t *datap;
5290 FCIO_Q_STAT_t *qp;
5291 clock_t time;
5292 uint32_t offset;
5293 caddr_t addr;
5294 uint32_t i;
5295 uint32_t tx_cnt;
5296 uint32_t chip_cnt;
5297
5298 cmd = (uint32_t *)pm->pm_cmd_buf;
5299 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5300 "fca_port_manage: BAR_IO %x %x %x",
5301 cmd[0], cmd[1], cmd[2]);
5302
5303 offset = cmd[1];
5304
5305 ret = FC_SUCCESS;
5306
5307 switch (cmd[0]) {
5308 case 2: /* bar1read */
5309 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5310 return (FC_BADCMD);
5311 }
5312
5313 /* Registers in this range are invalid */
5314 if ((offset >= 0x4C00) && (offset < 0x5000)) {
5315 return (FC_BADCMD);
5316 }
5317 if ((offset >= 0x5800) || (offset & 0x3)) {
5318 return (FC_BADCMD);
5319 }
5320 datap = (uint32_t *)pm->pm_stat_buf;
5321
5322 for (i = 0; i < pm->pm_stat_len;
5323 i += sizeof (uint32_t)) {
5324 if ((offset >= 0x4C00) &&
5325 (offset < 0x5000)) {
5326 pm->pm_stat_len = i;
5327 break;
5328 }
5329 if (offset >= 0x5800) {
5330 pm->pm_stat_len = i;
5331 break;
5332 }
5333 addr = hba->sli.sli4.bar1_addr + offset;
5334 *datap = READ_BAR1_REG(hba, addr);
5335 datap++;
5336 offset += sizeof (uint32_t);
5337 }
5338 #ifdef FMA_SUPPORT
5339 /* Access handle validation */
5340 EMLXS_CHK_ACC_HANDLE(hba,
5341 hba->sli.sli4.bar1_acc_handle);
5342 #endif /* FMA_SUPPORT */
5343 break;
5344 case 3: /* bar2read */
5345 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5346 return (FC_BADCMD);
5347 }
5348 if ((offset >= 0x1000) || (offset & 0x3)) {
5349 return (FC_BADCMD);
5350 }
5351 datap = (uint32_t *)pm->pm_stat_buf;
5352
5353 for (i = 0; i < pm->pm_stat_len;
5354 i += sizeof (uint32_t)) {
5355 *datap = READ_BAR2_REG(hba,
5356 hba->sli.sli4.bar2_addr + offset);
5357 datap++;
5358 offset += sizeof (uint32_t);
5359 }
5360 #ifdef FMA_SUPPORT
5361 /* Access handle validation */
5362 EMLXS_CHK_ACC_HANDLE(hba,
5363 hba->sli.sli4.bar2_acc_handle);
5364 #endif /* FMA_SUPPORT */
5365 break;
5366 case 4: /* bar1write */
5367 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5368 return (FC_BADCMD);
5369 }
5370 WRITE_BAR1_REG(hba, hba->sli.sli4.bar1_addr +
5371 offset, cmd[2]);
5372 #ifdef FMA_SUPPORT
5373 /* Access handle validation */
5374 EMLXS_CHK_ACC_HANDLE(hba,
5375 hba->sli.sli4.bar1_acc_handle);
5376 #endif /* FMA_SUPPORT */
5377 break;
5378 case 5: /* bar2write */
5379 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5380 return (FC_BADCMD);
5381 }
5382 WRITE_BAR2_REG(hba, hba->sli.sli4.bar2_addr +
5383 offset, cmd[2]);
5384 #ifdef FMA_SUPPORT
5385 /* Access handle validation */
5386 EMLXS_CHK_ACC_HANDLE(hba,
5387 hba->sli.sli4.bar2_acc_handle);
5388 #endif /* FMA_SUPPORT */
5389 break;
5390 case 6: /* dumpbsmbox */
5391 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5392 return (FC_BADCMD);
5393 }
5394 if (offset != 0) {
5395 return (FC_BADCMD);
5396 }
5397
5398 bcopy((caddr_t)hba->sli.sli4.bootstrapmb.virt,
5399 (caddr_t)pm->pm_stat_buf, 256);
5400 break;
5401 case 7: /* pciread */
5402 if ((offset >= 0x200) || (offset & 0x3)) {
5403 return (FC_BADCMD);
5404 }
5405 datap = (uint32_t *)pm->pm_stat_buf;
5406 for (i = 0; i < pm->pm_stat_len;
5407 i += sizeof (uint32_t)) {
5408 *datap = ddi_get32(hba->pci_acc_handle,
5409 (uint32_t *)(hba->pci_addr +
5410 offset));
5411 datap++;
5412 offset += sizeof (uint32_t);
5413 }
5414 #ifdef FMA_SUPPORT
5415 /* Access handle validation */
5416 EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
5417 #endif /* FMA_SUPPORT */
5418 break;
5419 case 8: /* abortall */
5420 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5421 return (FC_BADCMD);
5422 }
5423 emlxs_abort_all(hba, &tx_cnt, &chip_cnt);
5424 datap = (uint32_t *)pm->pm_stat_buf;
5425 *datap++ = tx_cnt;
5426 *datap = chip_cnt;
5427 break;
5428 case 9: /* get_q_info */
5429 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5430 return (FC_BADCMD);
5431 }
5432 qp = (FCIO_Q_STAT_t *)pm->pm_stat_buf;
5433 for (i = 0; i < FCIO_MAX_EQS; i++) {
5434 addr = hba->sli.sli4.eq[i].addr.virt;
5435 qp->eq[i].host_index =
5436 hba->sli.sli4.eq[i].host_index;
5437 qp->eq[i].max_index =
5438 hba->sli.sli4.eq[i].max_index;
5439 qp->eq[i].qid =
5440 hba->sli.sli4.eq[i].qid;
5441 qp->eq[i].msix_vector =
5442 hba->sli.sli4.eq[i].msix_vector;
5443 qp->eq[i].phys =
5444 hba->sli.sli4.eq[i].addr.phys;
5445 qp->eq[i].virt = PADDR_LO(
5446 (uintptr_t)addr);
5447 qp->eq[i].virt_hi = PADDR_HI(
5448 (uintptr_t)addr);
5449 qp->eq[i].max_proc =
5450 hba->sli.sli4.eq[i].max_proc;
5451 qp->eq[i].isr_count =
5452 hba->sli.sli4.eq[i].isr_count;
5453 qp->eq[i].num_proc =
5454 hba->sli.sli4.eq[i].num_proc;
5455 }
5456 for (i = 0; i < FCIO_MAX_CQS; i++) {
5457 addr = hba->sli.sli4.cq[i].addr.virt;
5458 qp->cq[i].host_index =
5459 hba->sli.sli4.cq[i].host_index;
5460 qp->cq[i].max_index =
5461 hba->sli.sli4.cq[i].max_index;
5462 qp->cq[i].qid =
5463 hba->sli.sli4.cq[i].qid;
5464 qp->cq[i].eqid =
5465 hba->sli.sli4.cq[i].eqid;
5466 qp->cq[i].type =
5467 hba->sli.sli4.cq[i].type;
5468 qp->cq[i].phys =
5469 hba->sli.sli4.cq[i].addr.phys;
5470 qp->cq[i].virt = PADDR_LO(
5471 (uintptr_t)addr);
5472 qp->cq[i].virt_hi = PADDR_HI(
5473 (uintptr_t)addr);
5474 qp->cq[i].max_proc =
5475 hba->sli.sli4.cq[i].max_proc;
5476 qp->cq[i].isr_count =
5477 hba->sli.sli4.cq[i].isr_count;
5478 qp->cq[i].num_proc =
5479 hba->sli.sli4.cq[i].num_proc;
5480 }
5481 for (i = 0; i < FCIO_MAX_WQS; i++) {
5482 addr = hba->sli.sli4.wq[i].addr.virt;
5483 qp->wq[i].host_index =
5484 hba->sli.sli4.wq[i].host_index;
5485 qp->wq[i].max_index =
5486 hba->sli.sli4.wq[i].max_index;
5487 qp->wq[i].port_index =
5488 hba->sli.sli4.wq[i].port_index;
5489 qp->wq[i].release_depth =
5490 hba->sli.sli4.wq[i].release_depth;
5491 qp->wq[i].qid =
5492 hba->sli.sli4.wq[i].qid;
5493 qp->wq[i].cqid =
5494 hba->sli.sli4.wq[i].cqid;
5495 qp->wq[i].phys =
5496 hba->sli.sli4.wq[i].addr.phys;
5497 qp->wq[i].virt = PADDR_LO(
5498 (uintptr_t)addr);
5499 qp->wq[i].virt_hi = PADDR_HI(
5500 (uintptr_t)addr);
5501 qp->wq[i].num_proc =
5502 hba->sli.sli4.wq[i].num_proc;
5503 qp->wq[i].num_busy =
5504 hba->sli.sli4.wq[i].num_busy;
5505 }
5506 for (i = 0; i < FCIO_MAX_RQS; i++) {
5507 addr = hba->sli.sli4.rq[i].addr.virt;
5508 qp->rq[i].qid =
5509 hba->sli.sli4.rq[i].qid;
5510 qp->rq[i].cqid =
5511 hba->sli.sli4.rq[i].cqid;
5512 qp->rq[i].host_index =
5513 hba->sli.sli4.rq[i].host_index;
5514 qp->rq[i].max_index =
5515 hba->sli.sli4.rq[i].max_index;
5516 qp->rq[i].phys =
5517 hba->sli.sli4.rq[i].addr.phys;
5518 qp->rq[i].virt = PADDR_LO(
5519 (uintptr_t)addr);
5520 qp->rq[i].virt_hi = PADDR_HI(
5521 (uintptr_t)addr);
5522 qp->rq[i].num_proc =
5523 hba->sli.sli4.rq[i].num_proc;
5524 }
5525 qp->que_start_timer =
5526 hba->sli.sli4.que_stat_timer;
5527 (void) drv_getparm(LBOLT, &time);
5528 qp->que_current_timer = (uint32_t)time;
5529 qp->intr_count = hba->intr_count;
5530 break;
5531 case 10: /* zero_q_stat */
5532 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
5533 return (FC_BADCMD);
5534 }
5535 emlxs_sli4_zero_queue_stat(hba);
5536 break;
5537 default:
5538 ret = FC_BADCMD;
5539 break;
5540 }
5541 break;
5542 }
5543
5544 default:
5545
5546 ret = FC_INVALID_REQUEST;
5547 break;
5548 }
5549
5550 break;
5551
5552 }
5553
5554 case FC_PORT_INITIALIZE:
5555 if (!(hba->flag & FC_ONLINE_MODE)) {
5556 return (FC_OFFLINE);
5557 }
5558 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5559 "fca_port_manage: FC_PORT_INITIALIZE");
5560 break;
5561
5562 case FC_PORT_LOOPBACK:
5563 if (!(hba->flag & FC_ONLINE_MODE)) {
5564 return (FC_OFFLINE);
5565 }
5566 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5567 "fca_port_manage: FC_PORT_LOOPBACK");
5568 break;
5569
5570 case FC_PORT_BYPASS:
5571 if (!(hba->flag & FC_ONLINE_MODE)) {
5572 return (FC_OFFLINE);
5573 }
5574 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5575 "fca_port_manage: FC_PORT_BYPASS");
5576 ret = FC_INVALID_REQUEST;
5577 break;
5578
5579 case FC_PORT_UNBYPASS:
5580 if (!(hba->flag & FC_ONLINE_MODE)) {
5581 return (FC_OFFLINE);
5582 }
5583 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5584 "fca_port_manage: FC_PORT_UNBYPASS");
5585 ret = FC_INVALID_REQUEST;
5586 break;
5587
5588 case FC_PORT_GET_NODE_ID:
5589 {
5590 fc_rnid_t *rnid;
5591
5592 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5593 "fca_port_manage: FC_PORT_GET_NODE_ID");
5594
5595 bzero(pm->pm_data_buf, pm->pm_data_len);
5596
5597 if (pm->pm_data_len < sizeof (fc_rnid_t)) {
5598 ret = FC_NOMEM;
5599 break;
5600 }
5601
5602 rnid = (fc_rnid_t *)pm->pm_data_buf;
5603
5604 (void) snprintf((char *)rnid->global_id,
5605 (sizeof (rnid->global_id)-1),
5606 "%01x%01x%02x%02x%02x%02x%02x%02x%02x",
5607 hba->wwpn.nameType, hba->wwpn.IEEEextMsn,
5608 hba->wwpn.IEEEextLsb, hba->wwpn.IEEE[0],
5609 hba->wwpn.IEEE[1], hba->wwpn.IEEE[2], hba->wwpn.IEEE[3],
5610 hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
5611
5612 rnid->unit_type = RNID_HBA;
5613 rnid->port_id = port->did;
5614 rnid->ip_version = RNID_IPV4;
5615
5616 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5617 "GET_NODE_ID: wwpn: %s", rnid->global_id);
5618 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5619 "GET_NODE_ID: unit_type: 0x%x", rnid->unit_type);
5620 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5621 "GET_NODE_ID: port_id: 0x%x", rnid->port_id);
5622 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5623 "GET_NODE_ID: num_attach: %d", rnid->num_attached);
5624 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5625 "GET_NODE_ID: ip_version: 0x%x", rnid->ip_version);
5626 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5627 "GET_NODE_ID: udp_port: 0x%x", rnid->udp_port);
5628 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5629 "GET_NODE_ID: ip_addr: %s", rnid->ip_addr);
5630 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5631 "GET_NODE_ID: resv: 0x%x", rnid->specific_id_resv);
5632 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5633 "GET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags);
5634
5635 ret = FC_SUCCESS;
5636 break;
5637 }
5638
5639 case FC_PORT_SET_NODE_ID:
5640 {
5641 fc_rnid_t *rnid;
5642
5643 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5644 "fca_port_manage: FC_PORT_SET_NODE_ID");
5645
5646 if (pm->pm_data_len < sizeof (fc_rnid_t)) {
5647 ret = FC_NOMEM;
5648 break;
5649 }
5650
5651 rnid = (fc_rnid_t *)pm->pm_data_buf;
5652
5653 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5654 "SET_NODE_ID: wwpn: %s", rnid->global_id);
5655 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5656 "SET_NODE_ID: unit_type: 0x%x", rnid->unit_type);
5657 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5658 "SET_NODE_ID: port_id: 0x%x", rnid->port_id);
5659 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5660 "SET_NODE_ID: num_attach: %d", rnid->num_attached);
5661 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5662 "SET_NODE_ID: ip_version: 0x%x", rnid->ip_version);
5663 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5664 "SET_NODE_ID: udp_port: 0x%x", rnid->udp_port);
5665 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5666 "SET_NODE_ID: ip_addr: %s", rnid->ip_addr);
5667 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5668 "SET_NODE_ID: resv: 0x%x", rnid->specific_id_resv);
5669 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5670 "SET_NODE_ID: topo_flags: 0x%x", rnid->topo_flags);
5671
5672 ret = FC_SUCCESS;
5673 break;
5674 }
5675
5676 #ifdef S11
5677 case FC_PORT_GET_P2P_INFO:
5678 {
5679 fc_fca_p2p_info_t *p2p_info;
5680 NODELIST *ndlp;
5681
5682 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5683 "fca_port_manage: FC_PORT_GET_P2P_INFO");
5684
5685 bzero(pm->pm_data_buf, pm->pm_data_len);
5686
5687 if (pm->pm_data_len < sizeof (fc_fca_p2p_info_t)) {
5688 ret = FC_NOMEM;
5689 break;
5690 }
5691
5692 p2p_info = (fc_fca_p2p_info_t *)pm->pm_data_buf;
5693
5694 if (hba->state >= FC_LINK_UP) {
5695 if ((hba->topology == TOPOLOGY_PT_PT) &&
5696 (hba->flag & FC_PT_TO_PT)) {
5697 p2p_info->fca_d_id = port->did;
5698 p2p_info->d_id = port->rdid;
5699
5700 ndlp = emlxs_node_find_did(port,
5701 port->rdid, 1);
5702
5703 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5704 "FC_PORT_GET_P2P_INFO: fca_d_id: 0x%x, "
5705 "d_id: 0x%x, ndlp: 0x%p", port->did,
5706 port->rdid, ndlp);
5707 if (ndlp) {
5708 bcopy(&ndlp->nlp_portname,
5709 (caddr_t)&p2p_info->pwwn,
5710 sizeof (la_wwn_t));
5711 bcopy(&ndlp->nlp_nodename,
5712 (caddr_t)&p2p_info->nwwn,
5713 sizeof (la_wwn_t));
5714
5715 ret = FC_SUCCESS;
5716 break;
5717
5718 }
5719 }
5720 }
5721
5722 ret = FC_FAILURE;
5723 break;
5724 }
5725 #endif /* S11 */
5726
5727 default:
5728 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5729 "fca_port_manage: code=%x", pm->pm_cmd_code);
5730 ret = FC_INVALID_REQUEST;
5731 break;
5732
5733 }
5734
5735 return (ret);
5736
5737 } /* emlxs_fca_port_manage() */
5738
5739
5740 /*ARGSUSED*/
5741 static uint32_t
5742 emlxs_test(emlxs_hba_t *hba, uint32_t test_code, uint32_t args,
5743 uint32_t *arg)
5744 {
5745 uint32_t rval = 0;
5746 emlxs_port_t *port = &PPORT;
5747
5748 switch (test_code) {
5749 #ifdef TEST_SUPPORT
5750 case 1: /* SCSI underrun */
5751 {
5752 hba->underrun_counter = (args)? arg[0]:1;
5753 break;
5754 }
5755 #endif /* TEST_SUPPORT */
5756
5757 default:
5758 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5759 "test: Unsupported test code. (0x%x)", test_code);
5760 rval = FC_INVALID_REQUEST;
5761 }
5762
5763 return (rval);
5764
5765 } /* emlxs_test() */
5766
5767
5768 /*
5769 * Given the device number, return the devinfo pointer or the ddiinst number.
5770 * Note: this routine must be successful on DDI_INFO_DEVT2INSTANCE even
5771 * before attach.
5772 *
5773 * Translate "dev_t" to a pointer to the associated "dev_info_t".
5774 */
5775 /*ARGSUSED*/
5776 static int
5777 emlxs_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
5778 {
5779 emlxs_hba_t *hba;
5780 int32_t ddiinst;
5781
5782 ddiinst = getminor((dev_t)arg);
5783
5784 switch (infocmd) {
5785 case DDI_INFO_DEVT2DEVINFO:
5786 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5787 if (hba)
5788 *result = hba->dip;
5789 else
5790 *result = NULL;
5791 break;
5792
5793 case DDI_INFO_DEVT2INSTANCE:
5794 *result = (void *)((unsigned long)ddiinst);
5795 break;
5796
5797 default:
5798 return (DDI_FAILURE);
5799 }
5800
5801 return (DDI_SUCCESS);
5802
5803 } /* emlxs_info() */
5804
5805
5806 static int32_t
5807 emlxs_power(dev_info_t *dip, int32_t comp, int32_t level)
5808 {
5809 emlxs_hba_t *hba;
5810 emlxs_port_t *port;
5811 int32_t ddiinst;
5812 int rval = DDI_SUCCESS;
5813
5814 ddiinst = ddi_get_instance(dip);
5815 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5816 port = &PPORT;
5817
5818 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
5819 "fca_power: comp=%x level=%x", comp, level);
5820
5821 if (hba == NULL || comp != EMLXS_PM_ADAPTER) {
5822 return (DDI_FAILURE);
5823 }
5824
5825 mutex_enter(&EMLXS_PM_LOCK);
5826
5827 /* If we are already at the proper level then return success */
5828 if (hba->pm_level == level) {
5829 mutex_exit(&EMLXS_PM_LOCK);
5830 return (DDI_SUCCESS);
5831 }
5832
5833 switch (level) {
5834 case EMLXS_PM_ADAPTER_UP:
5835
5836 /*
5837 * If we are already in emlxs_attach,
5838 * let emlxs_hba_attach take care of things
5839 */
5840 if (hba->pm_state & EMLXS_PM_IN_ATTACH) {
5841 hba->pm_level = EMLXS_PM_ADAPTER_UP;
5842 break;
5843 }
5844
5845 /* Check if adapter is suspended */
5846 if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5847 hba->pm_level = EMLXS_PM_ADAPTER_UP;
5848
5849 /* Try to resume the port */
5850 rval = emlxs_hba_resume(dip);
5851
5852 if (rval != DDI_SUCCESS) {
5853 hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5854 }
5855 break;
5856 }
5857
5858 /* Set adapter up */
5859 hba->pm_level = EMLXS_PM_ADAPTER_UP;
5860 break;
5861
5862 case EMLXS_PM_ADAPTER_DOWN:
5863
5864
5865 /*
5866 * If we are already in emlxs_detach,
5867 * let emlxs_hba_detach take care of things
5868 */
5869 if (hba->pm_state & EMLXS_PM_IN_DETACH) {
5870 hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5871 break;
5872 }
5873
5874 /* Check if adapter is not suspended */
5875 if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) {
5876 hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5877
5878 /* Try to suspend the port */
5879 rval = emlxs_hba_suspend(dip);
5880
5881 if (rval != DDI_SUCCESS) {
5882 hba->pm_level = EMLXS_PM_ADAPTER_UP;
5883 }
5884
5885 break;
5886 }
5887
5888 /* Set adapter down */
5889 hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
5890 break;
5891
5892 default:
5893 rval = DDI_FAILURE;
5894 break;
5895
5896 }
5897
5898 mutex_exit(&EMLXS_PM_LOCK);
5899
5900 return (rval);
5901
5902 } /* emlxs_power() */
5903
5904
5905 #ifdef EMLXS_I386
5906 #ifdef S11
5907 /*
5908 * quiesce(9E) entry point.
5909 *
5910 * This function is called when the system is single-thread at hight PIL
5911 * with preemption disabled. Therefore, this function must not be blocked.
5912 *
5913 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
5914 * DDI_FAILURE indicates an error condition and should almost never happen.
5915 */
5916 static int
5917 emlxs_quiesce(dev_info_t *dip)
5918 {
5919 emlxs_hba_t *hba;
5920 emlxs_port_t *port;
5921 int32_t ddiinst;
5922 int rval = DDI_SUCCESS;
5923
5924 ddiinst = ddi_get_instance(dip);
5925 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5926 port = &PPORT;
5927
5928 if (hba == NULL || port == NULL) {
5929 return (DDI_FAILURE);
5930 }
5931
5932 /* The fourth arg 1 indicates the call is from quiesce */
5933 if (EMLXS_SLI_HBA_RESET(hba, 1, 1, 1) == 0) {
5934 return (rval);
5935 } else {
5936 return (DDI_FAILURE);
5937 }
5938
5939 } /* emlxs_quiesce */
5940 #endif /* S11 */
5941 #endif /* EMLXS_I386 */
5942
5943
5944 static int
5945 emlxs_open(dev_t *dev_p, int32_t flag, int32_t otype, cred_t *cred_p)
5946 {
5947 emlxs_hba_t *hba;
5948 emlxs_port_t *port;
5949 int ddiinst;
5950
5951 ddiinst = getminor(*dev_p);
5952 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
5953
5954 if (hba == NULL) {
5955 return (ENXIO);
5956 }
5957
5958 port = &PPORT;
5959
5960 if (hba->pm_state & EMLXS_PM_SUSPENDED) {
5961 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
5962 "open failed: Driver suspended.");
5963 return (ENXIO);
5964 }
5965
5966 if (otype != OTYP_CHR) {
5967 return (EINVAL);
5968 }
5969
5970 if (drv_priv(cred_p)) {
5971 return (EPERM);
5972 }
5973
5974 mutex_enter(&EMLXS_IOCTL_LOCK);
5975
5976 if (hba->ioctl_flags & EMLXS_OPEN_EXCLUSIVE) {
5977 mutex_exit(&EMLXS_IOCTL_LOCK);
5978 return (EBUSY);
5979 }
5980
5981 if (flag & FEXCL) {
5982 if (hba->ioctl_flags & EMLXS_OPEN) {
5983 mutex_exit(&EMLXS_IOCTL_LOCK);
5984 return (EBUSY);
5985 }
5986
5987 hba->ioctl_flags |= EMLXS_OPEN_EXCLUSIVE;
5988 }
5989
5990 hba->ioctl_flags |= EMLXS_OPEN;
5991
5992 mutex_exit(&EMLXS_IOCTL_LOCK);
5993
5994 return (0);
5995
5996 } /* emlxs_open() */
5997
5998
5999 /*ARGSUSED*/
6000 static int
6001 emlxs_close(dev_t dev, int32_t flag, int32_t otype, cred_t *cred_p)
6002 {
6003 emlxs_hba_t *hba;
6004 int ddiinst;
6005
6006 ddiinst = getminor(dev);
6007 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6008
6009 if (hba == NULL) {
6010 return (ENXIO);
6011 }
6012
6013 if (otype != OTYP_CHR) {
6014 return (EINVAL);
6015 }
6016
6017 mutex_enter(&EMLXS_IOCTL_LOCK);
6018
6019 if (!(hba->ioctl_flags & EMLXS_OPEN)) {
6020 mutex_exit(&EMLXS_IOCTL_LOCK);
6021 return (ENODEV);
6022 }
6023
6024 hba->ioctl_flags &= ~EMLXS_OPEN;
6025 hba->ioctl_flags &= ~EMLXS_OPEN_EXCLUSIVE;
6026
6027 mutex_exit(&EMLXS_IOCTL_LOCK);
6028
6029 return (0);
6030
6031 } /* emlxs_close() */
6032
6033
6034 /*ARGSUSED*/
6035 static int
6036 emlxs_ioctl(dev_t dev, int32_t cmd, intptr_t arg, int32_t mode,
6037 cred_t *cred_p, int32_t *rval_p)
6038 {
6039 emlxs_hba_t *hba;
6040 emlxs_port_t *port;
6041 int rval = 0; /* return code */
6042 int ddiinst;
6043
6044 ddiinst = getminor(dev);
6045 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6046
6047 if (hba == NULL) {
6048 return (ENXIO);
6049 }
6050
6051 port = &PPORT;
6052
6053 if (hba->pm_state & EMLXS_PM_SUSPENDED) {
6054 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
6055 "ioctl failed: Driver suspended.");
6056
6057 return (ENXIO);
6058 }
6059
6060 mutex_enter(&EMLXS_IOCTL_LOCK);
6061 if (!(hba->ioctl_flags & EMLXS_OPEN)) {
6062 mutex_exit(&EMLXS_IOCTL_LOCK);
6063 return (ENXIO);
6064 }
6065 mutex_exit(&EMLXS_IOCTL_LOCK);
6066
6067 #ifdef IDLE_TIMER
6068 emlxs_pm_busy_component(hba);
6069 #endif /* IDLE_TIMER */
6070
6071 switch (cmd) {
6072 case EMLXS_DFC_COMMAND:
6073 rval = emlxs_dfc_manage(hba, (void *)arg, mode);
6074 break;
6075
6076 default:
6077 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ioctl_detail_msg,
6078 "ioctl: Invalid command received. cmd=%x", cmd);
6079 rval = EINVAL;
6080 }
6081
6082 done:
6083 return (rval);
6084
6085 } /* emlxs_ioctl() */
6086
6087
6088
6089 /*
6090 *
6091 * Device Driver Common Routines
6092 *
6093 */
6094
6095 /* EMLXS_PM_LOCK must be held for this call */
6096 static int
6097 emlxs_hba_resume(dev_info_t *dip)
6098 {
6099 emlxs_hba_t *hba;
6100 emlxs_port_t *port;
6101 int ddiinst;
6102
6103 ddiinst = ddi_get_instance(dip);
6104 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6105 port = &PPORT;
6106
6107 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_msg, NULL);
6108
6109 if (!(hba->pm_state & EMLXS_PM_SUSPENDED)) {
6110 return (DDI_SUCCESS);
6111 }
6112
6113 hba->pm_state &= ~EMLXS_PM_SUSPENDED;
6114
6115 /* Re-enable the physical port on this HBA */
6116 port->flag |= EMLXS_PORT_ENABLED;
6117
6118 /* Take the adapter online */
6119 if (emlxs_power_up(hba)) {
6120 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_resume_failed_msg,
6121 "Unable to take adapter online.");
6122
6123 hba->pm_state |= EMLXS_PM_SUSPENDED;
6124
6125 return (DDI_FAILURE);
6126 }
6127
6128 return (DDI_SUCCESS);
6129
6130 } /* emlxs_hba_resume() */
6131
6132
6133 /* EMLXS_PM_LOCK must be held for this call */
6134 static int
6135 emlxs_hba_suspend(dev_info_t *dip)
6136 {
6137 emlxs_hba_t *hba;
6138 emlxs_port_t *port;
6139 int ddiinst;
6140
6141 ddiinst = ddi_get_instance(dip);
6142 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6143 port = &PPORT;
6144
6145 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_msg, NULL);
6146
6147 if (hba->pm_state & EMLXS_PM_SUSPENDED) {
6148 return (DDI_SUCCESS);
6149 }
6150
6151 hba->pm_state |= EMLXS_PM_SUSPENDED;
6152
6153 /* Take the adapter offline */
6154 if (emlxs_power_down(hba)) {
6155 hba->pm_state &= ~EMLXS_PM_SUSPENDED;
6156
6157 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_suspend_failed_msg,
6158 "Unable to take adapter offline.");
6159
6160 return (DDI_FAILURE);
6161 }
6162
6163 return (DDI_SUCCESS);
6164
6165 } /* emlxs_hba_suspend() */
6166
6167
6168
6169 static void
6170 emlxs_lock_init(emlxs_hba_t *hba)
6171 {
6172 emlxs_port_t *port = &PPORT;
6173 uint32_t i;
6174
6175 /* Initialize the power management */
6176 mutex_init(&EMLXS_PM_LOCK, NULL, MUTEX_DRIVER,
6177 DDI_INTR_PRI(hba->intr_arg));
6178
6179 mutex_init(&EMLXS_TIMER_LOCK, NULL, MUTEX_DRIVER,
6180 DDI_INTR_PRI(hba->intr_arg));
6181
6182 cv_init(&hba->timer_lock_cv, NULL, CV_DRIVER, NULL);
6183
6184 mutex_init(&EMLXS_PORT_LOCK, NULL, MUTEX_DRIVER,
6185 DDI_INTR_PRI(hba->intr_arg));
6186
6187 mutex_init(&EMLXS_MBOX_LOCK, NULL, MUTEX_DRIVER,
6188 DDI_INTR_PRI(hba->intr_arg));
6189
6190 cv_init(&EMLXS_MBOX_CV, NULL, CV_DRIVER, NULL);
6191
6192 mutex_init(&EMLXS_LINKUP_LOCK, NULL, MUTEX_DRIVER,
6193 DDI_INTR_PRI(hba->intr_arg));
6194
6195 cv_init(&EMLXS_LINKUP_CV, NULL, CV_DRIVER, NULL);
6196
6197 mutex_init(&EMLXS_TX_CHANNEL_LOCK, NULL, MUTEX_DRIVER,
6198 DDI_INTR_PRI(hba->intr_arg));
6199
6200 for (i = 0; i < MAX_RINGS; i++) {
6201 mutex_init(&EMLXS_CMD_RING_LOCK(i), NULL, MUTEX_DRIVER,
6202 DDI_INTR_PRI(hba->intr_arg));
6203 }
6204
6205
6206 for (i = 0; i < EMLXS_MAX_WQS; i++) {
6207 mutex_init(&EMLXS_QUE_LOCK(i), NULL, MUTEX_DRIVER,
6208 DDI_INTR_PRI(hba->intr_arg));
6209 }
6210
6211 mutex_init(&EMLXS_MSIID_LOCK, NULL, MUTEX_DRIVER,
6212 DDI_INTR_PRI(hba->intr_arg));
6213
6214 mutex_init(&EMLXS_FCTAB_LOCK, NULL, MUTEX_DRIVER,
6215 DDI_INTR_PRI(hba->intr_arg));
6216
6217 mutex_init(&EMLXS_MEMGET_LOCK, NULL, MUTEX_DRIVER,
6218 DDI_INTR_PRI(hba->intr_arg));
6219
6220 mutex_init(&EMLXS_MEMPUT_LOCK, NULL, MUTEX_DRIVER,
6221 DDI_INTR_PRI(hba->intr_arg));
6222
6223 mutex_init(&EMLXS_IOCTL_LOCK, NULL, MUTEX_DRIVER,
6224 DDI_INTR_PRI(hba->intr_arg));
6225
6226 #ifdef DUMP_SUPPORT
6227 mutex_init(&EMLXS_DUMP_LOCK, NULL, MUTEX_DRIVER,
6228 DDI_INTR_PRI(hba->intr_arg));
6229 #endif /* DUMP_SUPPORT */
6230
6231 mutex_init(&EMLXS_SPAWN_LOCK, NULL, MUTEX_DRIVER,
6232 DDI_INTR_PRI(hba->intr_arg));
6233
6234 /* Create per port locks */
6235 for (i = 0; i < MAX_VPORTS; i++) {
6236 port = &VPORT(i);
6237
6238 rw_init(&port->node_rwlock, NULL, RW_DRIVER, NULL);
6239
6240 if (i == 0) {
6241 mutex_init(&EMLXS_PKT_LOCK, NULL, MUTEX_DRIVER,
6242 DDI_INTR_PRI(hba->intr_arg));
6243
6244 cv_init(&EMLXS_PKT_CV, NULL, CV_DRIVER, NULL);
6245
6246 mutex_init(&EMLXS_UB_LOCK, NULL, MUTEX_DRIVER,
6247 DDI_INTR_PRI(hba->intr_arg));
6248 } else {
6249 mutex_init(&EMLXS_PKT_LOCK, NULL, MUTEX_DRIVER,
6250 DDI_INTR_PRI(hba->intr_arg));
6251
6252 cv_init(&EMLXS_PKT_CV, NULL, CV_DRIVER, NULL);
6253
6254 mutex_init(&EMLXS_UB_LOCK, NULL, MUTEX_DRIVER,
6255 DDI_INTR_PRI(hba->intr_arg));
6256 }
6257 }
6258
6259 return;
6260
6261 } /* emlxs_lock_init() */
6262
6263
6264
6265 static void
6266 emlxs_lock_destroy(emlxs_hba_t *hba)
6267 {
6268 emlxs_port_t *port = &PPORT;
6269 uint32_t i;
6270
6271 mutex_destroy(&EMLXS_TIMER_LOCK);
6272 cv_destroy(&hba->timer_lock_cv);
6273
6274 mutex_destroy(&EMLXS_PORT_LOCK);
6275
6276 cv_destroy(&EMLXS_MBOX_CV);
6277 cv_destroy(&EMLXS_LINKUP_CV);
6278
6279 mutex_destroy(&EMLXS_LINKUP_LOCK);
6280 mutex_destroy(&EMLXS_MBOX_LOCK);
6281
6282 mutex_destroy(&EMLXS_TX_CHANNEL_LOCK);
6283
6284 for (i = 0; i < MAX_RINGS; i++) {
6285 mutex_destroy(&EMLXS_CMD_RING_LOCK(i));
6286 }
6287
6288 for (i = 0; i < EMLXS_MAX_WQS; i++) {
6289 mutex_destroy(&EMLXS_QUE_LOCK(i));
6290 }
6291
6292 mutex_destroy(&EMLXS_MSIID_LOCK);
6293
6294 mutex_destroy(&EMLXS_FCTAB_LOCK);
6295 mutex_destroy(&EMLXS_MEMGET_LOCK);
6296 mutex_destroy(&EMLXS_MEMPUT_LOCK);
6297 mutex_destroy(&EMLXS_IOCTL_LOCK);
6298 mutex_destroy(&EMLXS_SPAWN_LOCK);
6299 mutex_destroy(&EMLXS_PM_LOCK);
6300
6301 #ifdef DUMP_SUPPORT
6302 mutex_destroy(&EMLXS_DUMP_LOCK);
6303 #endif /* DUMP_SUPPORT */
6304
6305 /* Destroy per port locks */
6306 for (i = 0; i < MAX_VPORTS; i++) {
6307 port = &VPORT(i);
6308 rw_destroy(&port->node_rwlock);
6309 mutex_destroy(&EMLXS_PKT_LOCK);
6310 cv_destroy(&EMLXS_PKT_CV);
6311 mutex_destroy(&EMLXS_UB_LOCK);
6312 }
6313
6314 return;
6315
6316 } /* emlxs_lock_destroy() */
6317
6318
6319 /* init_flag values */
6320 #define ATTACH_SOFT_STATE 0x00000001
6321 #define ATTACH_FCA_TRAN 0x00000002
6322 #define ATTACH_HBA 0x00000004
6323 #define ATTACH_LOG 0x00000008
6324 #define ATTACH_MAP_BUS 0x00000010
6325 #define ATTACH_INTR_INIT 0x00000020
6326 #define ATTACH_PROP 0x00000040
6327 #define ATTACH_LOCK 0x00000080
6328 #define ATTACH_THREAD 0x00000100
6329 #define ATTACH_INTR_ADD 0x00000200
6330 #define ATTACH_ONLINE 0x00000400
6331 #define ATTACH_NODE 0x00000800
6332 #define ATTACH_FCT 0x00001000
6333 #define ATTACH_FCA 0x00002000
6334 #define ATTACH_KSTAT 0x00004000
6335 #define ATTACH_DHCHAP 0x00008000
6336 #define ATTACH_FM 0x00010000
6337 #define ATTACH_MAP_SLI 0x00020000
6338 #define ATTACH_SPAWN 0x00040000
6339 #define ATTACH_EVENTS 0x00080000
6340
6341 static void
6342 emlxs_driver_remove(dev_info_t *dip, uint32_t init_flag, uint32_t failed)
6343 {
6344 emlxs_hba_t *hba = NULL;
6345 int ddiinst;
6346
6347 ddiinst = ddi_get_instance(dip);
6348
6349 if (init_flag & ATTACH_HBA) {
6350 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
6351
6352 if (init_flag & ATTACH_SPAWN) {
6353 emlxs_thread_spawn_destroy(hba);
6354 }
6355
6356 if (init_flag & ATTACH_EVENTS) {
6357 (void) emlxs_event_queue_destroy(hba);
6358 }
6359
6360 if (init_flag & ATTACH_ONLINE) {
6361 (void) emlxs_offline(hba, 1);
6362 }
6363
6364 if (init_flag & ATTACH_INTR_ADD) {
6365 (void) EMLXS_INTR_REMOVE(hba);
6366 }
6367 #ifdef SFCT_SUPPORT
6368 if (init_flag & ATTACH_FCT) {
6369 emlxs_fct_detach(hba);
6370 emlxs_fct_modclose();
6371 }
6372 #endif /* SFCT_SUPPORT */
6373
6374 #ifdef DHCHAP_SUPPORT
6375 if (init_flag & ATTACH_DHCHAP) {
6376 emlxs_dhc_detach(hba);
6377 }
6378 #endif /* DHCHAP_SUPPORT */
6379
6380 if (init_flag & ATTACH_KSTAT) {
6381 kstat_delete(hba->kstat);
6382 }
6383
6384 if (init_flag & ATTACH_FCA) {
6385 emlxs_fca_detach(hba);
6386 }
6387
6388 if (init_flag & ATTACH_NODE) {
6389 (void) ddi_remove_minor_node(hba->dip, "devctl");
6390 }
6391
6392 if (init_flag & ATTACH_THREAD) {
6393 emlxs_thread_destroy(&hba->iodone_thread);
6394 }
6395
6396 if (init_flag & ATTACH_PROP) {
6397 (void) ddi_prop_remove_all(hba->dip);
6398 }
6399
6400 if (init_flag & ATTACH_LOCK) {
6401 emlxs_lock_destroy(hba);
6402 }
6403
6404 if (init_flag & ATTACH_INTR_INIT) {
6405 (void) EMLXS_INTR_UNINIT(hba);
6406 }
6407
6408 if (init_flag & ATTACH_MAP_BUS) {
6409 emlxs_unmap_bus(hba);
6410 }
6411
6412 if (init_flag & ATTACH_MAP_SLI) {
6413 EMLXS_SLI_UNMAP_HDW(hba);
6414 }
6415
6416 #ifdef FMA_SUPPORT
6417 if (init_flag & ATTACH_FM) {
6418 emlxs_fm_fini(hba);
6419 }
6420 #endif /* FMA_SUPPORT */
6421
6422 if (init_flag & ATTACH_LOG) {
6423 emlxs_msg_log_destroy(hba);
6424 }
6425
6426 if (init_flag & ATTACH_FCA_TRAN) {
6427 (void) ddi_set_driver_private(hba->dip, NULL);
6428 kmem_free(hba->fca_tran, sizeof (fc_fca_tran_t));
6429 hba->fca_tran = NULL;
6430 }
6431
6432 if (init_flag & ATTACH_HBA) {
6433 emlxs_device.log[hba->emlxinst] = 0;
6434 emlxs_device.hba[hba->emlxinst] =
6435 (emlxs_hba_t *)((unsigned long)((failed) ? -1 : 0));
6436 #ifdef DUMP_SUPPORT
6437 emlxs_device.dump_txtfile[hba->emlxinst] = 0;
6438 emlxs_device.dump_dmpfile[hba->emlxinst] = 0;
6439 emlxs_device.dump_ceefile[hba->emlxinst] = 0;
6440 #endif /* DUMP_SUPPORT */
6441
6442 }
6443 }
6444
6445 if (init_flag & ATTACH_SOFT_STATE) {
6446 (void) ddi_soft_state_free(emlxs_soft_state, ddiinst);
6447 }
6448
6449 return;
6450
6451 } /* emlxs_driver_remove() */
6452
6453
6454 /* This determines which ports will be initiator mode */
6455 static uint32_t
6456 emlxs_fca_init(emlxs_hba_t *hba)
6457 {
6458 emlxs_port_t *port = &PPORT;
6459
6460 /* Check if SFS present */
6461 if (((void *)MODSYM(fc_fca_init) == NULL) ||
6462 ((void *)MODSYM(fc_fca_attach) == NULL)) {
6463 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6464 "SFS not present.");
6465 return (1);
6466 }
6467
6468 /* Check if our SFS driver interface matches the current SFS stack */
6469 if (MODSYM(fc_fca_attach) (hba->dip, hba->fca_tran) != DDI_SUCCESS) {
6470 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6471 "SFS/FCA version mismatch. FCA=0x%x",
6472 hba->fca_tran->fca_version);
6473 return (1);
6474 }
6475
6476 return (0);
6477
6478 } /* emlxs_fca_init() */
6479
6480
6481 /* This determines which ports will be initiator or target mode */
6482 static void
6483 emlxs_mode_init(emlxs_hba_t *hba)
6484 {
6485 emlxs_port_t *port = &PPORT;
6486 emlxs_config_t *cfg = &CFG;
6487 emlxs_port_t *vport;
6488 uint32_t i;
6489 uint32_t mode_mask;
6490
6491 /* Initialize mode masks */
6492 (void) emlxs_mode_init_masks(hba);
6493
6494 if (!(port->mode_mask & MODE_INITIATOR)) {
6495 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6496 "Initiator mode not enabled.");
6497
6498 #ifdef SFCT_SUPPORT
6499 /* Disable dynamic target mode */
6500 cfg[CFG_DTM_ENABLE].current = 0;
6501 #endif /* SFCT_SUPPORT */
6502
6503 goto done1;
6504 }
6505
6506 /* Try to initialize fca interface */
6507 if (emlxs_fca_init(hba) != 0) {
6508 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6509 "Initiator mode disabled.");
6510
6511 /* Disable initiator mode */
6512 port->mode_mask &= ~MODE_INITIATOR;
6513
6514 #ifdef SFCT_SUPPORT
6515 /* Disable dynamic target mode */
6516 cfg[CFG_DTM_ENABLE].current = 0;
6517 #endif /* SFCT_SUPPORT */
6518
6519 goto done1;
6520 }
6521
6522 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6523 "Initiator mode enabled.");
6524
6525 done1:
6526
6527 #ifdef SFCT_SUPPORT
6528 if (!(port->mode_mask & MODE_TARGET)) {
6529 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6530 "Target mode not enabled.");
6531
6532 /* Disable target modes */
6533 cfg[CFG_DTM_ENABLE].current = 0;
6534 cfg[CFG_TARGET_MODE].current = 0;
6535
6536 goto done2;
6537 }
6538
6539 /* Try to open the COMSTAR module */
6540 if (emlxs_fct_modopen() != 0) {
6541 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6542 "Target mode disabled.");
6543
6544 /* Disable target modes */
6545 port->mode_mask &= ~MODE_TARGET;
6546 cfg[CFG_DTM_ENABLE].current = 0;
6547 cfg[CFG_TARGET_MODE].current = 0;
6548
6549 goto done2;
6550 }
6551
6552 /* Try to initialize fct interface */
6553 if (emlxs_fct_init(hba) != 0) {
6554 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6555 "Target mode disabled.");
6556
6557 /* Disable target modes */
6558 port->mode_mask &= ~MODE_TARGET;
6559 cfg[CFG_DTM_ENABLE].current = 0;
6560 cfg[CFG_TARGET_MODE].current = 0;
6561
6562 goto done2;
6563 }
6564
6565 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6566 "Target mode enabled.");
6567
6568 done2:
6569 /* Adjust target mode parameter flags */
6570 if (cfg[CFG_DTM_ENABLE].current) {
6571 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6572 "Dynamic target mode enabled.");
6573
6574 cfg[CFG_TARGET_MODE].flags |= PARM_DYNAMIC;
6575 } else {
6576 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6577 "Dynamic target mode disabled.");
6578
6579 cfg[CFG_TARGET_MODE].flags &= ~PARM_DYNAMIC;
6580 }
6581 #endif /* SFCT_SUPPORT */
6582
6583 /* Now set port flags */
6584 mutex_enter(&EMLXS_PORT_LOCK);
6585
6586 /* Set flags for physical port */
6587 if (port->mode_mask & MODE_INITIATOR) {
6588 port->flag |= EMLXS_INI_ENABLED;
6589 } else {
6590 port->flag &= ~EMLXS_INI_ENABLED;
6591 }
6592
6593 if (port->mode_mask & MODE_TARGET) {
6594 port->flag |= EMLXS_TGT_ENABLED;
6595 } else {
6596 port->flag &= ~EMLXS_TGT_ENABLED;
6597 }
6598
6599 for (i = 1; i < MAX_VPORTS; i++) {
6600 vport = &VPORT(i);
6601
6602 /* Physical port mask has only allowable bits */
6603 mode_mask = vport->mode_mask & port->mode_mask;
6604
6605 /* Set flags for physical port */
6606 if (mode_mask & MODE_INITIATOR) {
6607 vport->flag |= EMLXS_INI_ENABLED;
6608 } else {
6609 vport->flag &= ~EMLXS_INI_ENABLED;
6610 }
6611
6612 if (mode_mask & MODE_TARGET) {
6613 vport->flag |= EMLXS_TGT_ENABLED;
6614 } else {
6615 vport->flag &= ~EMLXS_TGT_ENABLED;
6616 }
6617 }
6618
6619 /* Set initial driver mode */
6620 emlxs_mode_set(hba);
6621
6622 mutex_exit(&EMLXS_PORT_LOCK);
6623
6624 /* Recheck possible mode dependent parameters */
6625 /* in case conditions have changed. */
6626 if (port->mode != MODE_NONE) {
6627 for (i = 0; i < NUM_CFG_PARAM; i++) {
6628 cfg = &hba->config[i];
6629 cfg->current = emlxs_check_parm(hba, i, cfg->current);
6630 }
6631 }
6632
6633 return;
6634
6635 } /* emlxs_mode_init() */
6636
6637
6638 /* This must be called while holding the EMLXS_PORT_LOCK */
6639 extern void
6640 emlxs_mode_set(emlxs_hba_t *hba)
6641 {
6642 emlxs_port_t *port = &PPORT;
6643 #ifdef SFCT_SUPPORT
6644 emlxs_config_t *cfg = &CFG;
6645 #endif /* SFCT_SUPPORT */
6646 emlxs_port_t *vport;
6647 uint32_t i;
6648 uint32_t cfg_tgt_mode = 0;
6649
6650 /* mutex_enter(&EMLXS_PORT_LOCK); */
6651
6652 #ifdef SFCT_SUPPORT
6653 cfg_tgt_mode = cfg[CFG_TARGET_MODE].current;
6654 #endif /* SFCT_SUPPORT */
6655
6656 /* Initiator mode requested */
6657 if (!cfg_tgt_mode) {
6658 for (i = 0; i < MAX_VPORTS; i++) {
6659 vport = &VPORT(i);
6660 vport->mode = (vport->flag & EMLXS_INI_ENABLED)?
6661 MODE_INITIATOR:MODE_NONE;
6662 }
6663 #ifdef SFCT_SUPPORT
6664 /* Target mode requested */
6665 } else {
6666 for (i = 0; i < MAX_VPORTS; i++) {
6667 vport = &VPORT(i);
6668 vport->mode = (vport->flag & EMLXS_TGT_ENABLED)?
6669 MODE_TARGET:MODE_NONE;
6670 }
6671 #endif /* SFCT_SUPPORT */
6672 }
6673
6674 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
6675 "MODE: %s", emlxs_mode_xlate(port->mode));
6676
6677 /* mutex_exit(&EMLXS_PORT_LOCK); */
6678
6679 return;
6680
6681 } /* emlxs_mode_set() */
6682
6683
6684 static void
6685 emlxs_mode_init_masks(emlxs_hba_t *hba)
6686 {
6687 emlxs_port_t *port = &PPORT;
6688 emlxs_port_t *vport;
6689 uint32_t i;
6690
6691 #ifdef SFCT_SUPPORT
6692 emlxs_config_t *cfg = &CFG;
6693 uint32_t vport_mode_mask;
6694 uint32_t cfg_vport_mode_mask;
6695 uint32_t mode_mask;
6696 char string[256];
6697
6698 port->mode_mask = 0;
6699
6700 if (!cfg[CFG_TARGET_MODE].current ||
6701 cfg[CFG_DTM_ENABLE].current) {
6702 port->mode_mask |= MODE_INITIATOR;
6703 }
6704
6705 if (cfg[CFG_TARGET_MODE].current ||
6706 cfg[CFG_DTM_ENABLE].current) {
6707 port->mode_mask |= MODE_TARGET;
6708 }
6709
6710 /* Physical port mask has only allowable bits */
6711 vport_mode_mask = port->mode_mask;
6712 cfg_vport_mode_mask = cfg[CFG_VPORT_MODE_MASK].current;
6713
6714 /* Check dynamic target mode value for virtual ports */
6715 if (cfg[CFG_DTM_ENABLE].current == 0) {
6716 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6717 "%s = 0: Virtual target ports are not supported.",
6718 cfg[CFG_DTM_ENABLE].string);
6719
6720 vport_mode_mask &= ~MODE_TARGET;
6721 }
6722
6723 cfg_vport_mode_mask &= vport_mode_mask;
6724
6725 if (cfg[CFG_VPORT_MODE_MASK].current != cfg_vport_mode_mask) {
6726 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6727 "%s: Changing 0x%x --> 0x%x",
6728 cfg[CFG_VPORT_MODE_MASK].string,
6729 cfg[CFG_VPORT_MODE_MASK].current,
6730 cfg_vport_mode_mask);
6731
6732 cfg[CFG_VPORT_MODE_MASK].current = cfg_vport_mode_mask;
6733 }
6734
6735 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6736 "pport-mode-mask: %s", emlxs_mode_xlate(port->mode_mask));
6737
6738 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6739 "vport-mode-mask: %s", emlxs_mode_xlate(cfg_vport_mode_mask));
6740
6741 for (i = 1; i < MAX_VPORTS; i++) {
6742 vport = &VPORT(i);
6743
6744 (void) snprintf(string, sizeof (string),
6745 "%s%d-vport%d-mode-mask", DRIVER_NAME, hba->ddiinst, i);
6746
6747 mode_mask = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
6748 (void *)hba->dip, DDI_PROP_DONTPASS, string,
6749 cfg_vport_mode_mask);
6750
6751 vport->mode_mask = mode_mask & vport_mode_mask;
6752
6753 if (vport->mode_mask != cfg_vport_mode_mask) {
6754 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
6755 "vport%d-mode-mask: %s",
6756 i, emlxs_mode_xlate(vport->mode_mask));
6757 }
6758 }
6759 #else
6760 port->mode_mask = MODE_INITIATOR;
6761 for (i = 1; i < MAX_VPORTS; i++) {
6762 vport = &VPORT(i);
6763 vport->mode_mask = MODE_INITIATOR;
6764 }
6765 #endif /* SFCT_SUPPORT */
6766
6767 return;
6768
6769 } /* emlxs_mode_init_masks() */
6770
6771
6772 static void
6773 emlxs_fca_attach(emlxs_hba_t *hba)
6774 {
6775 emlxs_port_t *port;
6776 uint32_t i;
6777
6778 /* Update our transport structure */
6779 hba->fca_tran->fca_iblock = (ddi_iblock_cookie_t *)&hba->intr_arg;
6780 hba->fca_tran->fca_cmd_max = hba->io_throttle;
6781
6782 for (i = 0; i < MAX_VPORTS; i++) {
6783 port = &VPORT(i);
6784 port->ub_count = EMLXS_UB_TOKEN_OFFSET;
6785 port->ub_pool = NULL;
6786 }
6787
6788 #if (EMLXS_MODREV >= EMLXS_MODREV5)
6789 bcopy((caddr_t)&hba->wwpn, (caddr_t)&hba->fca_tran->fca_perm_pwwn,
6790 sizeof (NAME_TYPE));
6791 #endif /* >= EMLXS_MODREV5 */
6792
6793 return;
6794
6795 } /* emlxs_fca_attach() */
6796
6797
6798 static void
6799 emlxs_fca_detach(emlxs_hba_t *hba)
6800 {
6801 emlxs_port_t *port = &PPORT;
6802 uint32_t i;
6803 emlxs_port_t *vport;
6804
6805 if (!(port->flag & EMLXS_INI_ENABLED)) {
6806 return;
6807 }
6808
6809 if ((void *)MODSYM(fc_fca_detach) != NULL) {
6810 MODSYM(fc_fca_detach)(hba->dip);
6811 }
6812
6813 /* Disable INI mode for all ports */
6814 for (i = 0; i < MAX_VPORTS; i++) {
6815 vport = &VPORT(i);
6816 vport->flag &= ~EMLXS_INI_ENABLED;
6817 }
6818
6819 return;
6820
6821 } /* emlxs_fca_detach() */
6822
6823
6824 static void
6825 emlxs_drv_banner(emlxs_hba_t *hba)
6826 {
6827 emlxs_port_t *port = &PPORT;
6828 uint32_t i;
6829 char sli_mode[16];
6830 char msi_mode[16];
6831 char npiv_mode[16];
6832 emlxs_vpd_t *vpd = &VPD;
6833 uint8_t *wwpn;
6834 uint8_t *wwnn;
6835 uint32_t fw_show = 0;
6836
6837 /* Display firmware library one time for all driver instances */
6838 mutex_enter(&emlxs_device.lock);
6839 if (!(emlxs_instance_flag & EMLXS_FW_SHOW)) {
6840 emlxs_instance_flag |= EMLXS_FW_SHOW;
6841 fw_show = 1;
6842 }
6843 mutex_exit(&emlxs_device.lock);
6844
6845 if (fw_show) {
6846 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s",
6847 emlxs_copyright);
6848 emlxs_fw_show(hba);
6849 }
6850
6851 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s (%s)", emlxs_label,
6852 emlxs_revision);
6853
6854 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6855 "%s Dev_id:%x Sub_id:%x Id:%d", hba->model_info.model,
6856 hba->model_info.device_id, hba->model_info.ssdid,
6857 hba->model_info.id);
6858
6859 #ifdef EMLXS_I386
6860
6861 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6862 "Firmware:%s (%s) Boot:%s", vpd->fw_version, vpd->fw_label,
6863 vpd->boot_version);
6864
6865 #else /* EMLXS_SPARC */
6866
6867 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6868 "Firmware:%s (%s) Boot:%s Fcode:%s", vpd->fw_version,
6869 vpd->fw_label, vpd->boot_version, vpd->fcode_version);
6870
6871 #endif /* EMLXS_I386 */
6872
6873 if (hba->sli_mode > 3) {
6874 (void) snprintf(sli_mode, sizeof (sli_mode), "SLI:%d(%s)",
6875 hba->sli_mode,
6876 ((hba->flag & FC_FIP_SUPPORTED) ? "FIP" : "nonFIP"));
6877 } else {
6878 (void) snprintf(sli_mode, sizeof (sli_mode), "SLI:%d",
6879 hba->sli_mode);
6880 }
6881
6882 (void) strlcpy(msi_mode, " INTX:1", sizeof (msi_mode));
6883
6884 #ifdef MSI_SUPPORT
6885 if (hba->intr_flags & EMLXS_MSI_ENABLED) {
6886 switch (hba->intr_type) {
6887 case DDI_INTR_TYPE_FIXED:
6888 (void) strlcpy(msi_mode, " MSI:0", sizeof (msi_mode));
6889 break;
6890
6891 case DDI_INTR_TYPE_MSI:
6892 (void) snprintf(msi_mode, sizeof (msi_mode), " MSI:%d",
6893 hba->intr_count);
6894 break;
6895
6896 case DDI_INTR_TYPE_MSIX:
6897 (void) snprintf(msi_mode, sizeof (msi_mode), " MSIX:%d",
6898 hba->intr_count);
6899 break;
6900 }
6901 }
6902 #endif /* MSI_SUPPORT */
6903
6904 (void) strlcpy(npiv_mode, "", sizeof (npiv_mode));
6905
6906 if (hba->flag & FC_NPIV_ENABLED) {
6907 (void) snprintf(npiv_mode, sizeof (npiv_mode), " NPIV:%d",
6908 hba->vpi_max+1);
6909 } else {
6910 (void) strlcpy(npiv_mode, " NPIV:0", sizeof (npiv_mode));
6911 }
6912
6913 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
6914 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s%s%s%s%s%s",
6915 sli_mode, msi_mode, npiv_mode,
6916 ((port->flag & EMLXS_INI_ENABLED)? " FCA":""),
6917 ((port->flag & EMLXS_TGT_ENABLED)? " FCT":""),
6918 ((SLI4_FCOE_MODE)? " FCoE":" FC"));
6919 } else {
6920 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg, "%s%s%s%s%s",
6921 sli_mode, msi_mode, npiv_mode,
6922 ((port->flag & EMLXS_INI_ENABLED)? " FCA":""),
6923 ((port->flag & EMLXS_TGT_ENABLED)? " FCT":""));
6924 }
6925
6926 wwpn = (uint8_t *)&hba->wwpn;
6927 wwnn = (uint8_t *)&hba->wwnn;
6928 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6929 "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X "
6930 "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X",
6931 wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5], wwpn[6],
6932 wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3], wwnn[4], wwnn[5],
6933 wwnn[6], wwnn[7]);
6934
6935 for (i = 0; i < MAX_VPORTS; i++) {
6936 port = &VPORT(i);
6937
6938 if (!(port->flag & EMLXS_PORT_CONFIG)) {
6939 continue;
6940 }
6941
6942 wwpn = (uint8_t *)&port->wwpn;
6943 wwnn = (uint8_t *)&port->wwnn;
6944
6945 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_msg,
6946 "WWPN:%02X%02X%02X%02X%02X%02X%02X%02X "
6947 "WWNN:%02X%02X%02X%02X%02X%02X%02X%02X",
6948 wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5],
6949 wwpn[6], wwpn[7], wwnn[0], wwnn[1], wwnn[2], wwnn[3],
6950 wwnn[4], wwnn[5], wwnn[6], wwnn[7]);
6951 }
6952
6953 /*
6954 * Announce the device: ddi_report_dev() prints a banner at boot time,
6955 * announcing the device pointed to by dip.
6956 */
6957 (void) ddi_report_dev(hba->dip);
6958
6959 return;
6960
6961 } /* emlxs_drv_banner() */
6962
6963
6964 extern void
6965 emlxs_get_fcode_version(emlxs_hba_t *hba)
6966 {
6967 emlxs_vpd_t *vpd = &VPD;
6968 char *prop_str;
6969 int status;
6970
6971 /* Setup fcode version property */
6972 prop_str = NULL;
6973 status =
6974 ddi_prop_lookup_string(DDI_DEV_T_ANY, (dev_info_t *)hba->dip, 0,
6975 "fcode-version", (char **)&prop_str);
6976
6977 if (status == DDI_PROP_SUCCESS) {
6978 bcopy(prop_str, vpd->fcode_version, strlen(prop_str));
6979 (void) ddi_prop_free((void *)prop_str);
6980 } else {
6981 (void) strncpy(vpd->fcode_version, "none",
6982 (sizeof (vpd->fcode_version)-1));
6983 }
6984
6985 return;
6986
6987 } /* emlxs_get_fcode_version() */
6988
6989
6990 static int
6991 emlxs_hba_attach(dev_info_t *dip)
6992 {
6993 emlxs_hba_t *hba;
6994 emlxs_port_t *port;
6995 emlxs_config_t *cfg;
6996 char *prop_str;
6997 int ddiinst;
6998 int32_t emlxinst;
6999 int status;
7000 uint32_t rval;
7001 uint32_t init_flag = 0;
7002 char local_pm_components[32];
7003 uint32_t i;
7004
7005 ddiinst = ddi_get_instance(dip);
7006 emlxinst = emlxs_add_instance(ddiinst);
7007
7008 if (emlxinst >= MAX_FC_BRDS) {
7009 cmn_err(CE_WARN,
7010 "?%s: fca_hba_attach failed. Too many driver ddiinsts. "
7011 "inst=%x", DRIVER_NAME, ddiinst);
7012 return (DDI_FAILURE);
7013 }
7014
7015 if (emlxs_device.hba[emlxinst] == (emlxs_hba_t *)-1) {
7016 return (DDI_FAILURE);
7017 }
7018
7019 if (emlxs_device.hba[emlxinst]) {
7020 return (DDI_SUCCESS);
7021 }
7022
7023 /* An adapter can accidentally be plugged into a slave-only PCI slot */
7024 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
7025 cmn_err(CE_WARN,
7026 "?%s%d: fca_hba_attach failed. Device in slave-only slot.",
7027 DRIVER_NAME, ddiinst);
7028 return (DDI_FAILURE);
7029 }
7030
7031 /* Allocate emlxs_dev_ctl structure. */
7032 if (ddi_soft_state_zalloc(emlxs_soft_state, ddiinst) != DDI_SUCCESS) {
7033 cmn_err(CE_WARN,
7034 "?%s%d: fca_hba_attach failed. Unable to allocate soft "
7035 "state.", DRIVER_NAME, ddiinst);
7036 return (DDI_FAILURE);
7037 }
7038 init_flag |= ATTACH_SOFT_STATE;
7039
7040 if ((hba = (emlxs_hba_t *)ddi_get_soft_state(emlxs_soft_state,
7041 ddiinst)) == NULL) {
7042 cmn_err(CE_WARN,
7043 "?%s%d: fca_hba_attach failed. Unable to get soft state.",
7044 DRIVER_NAME, ddiinst);
7045 goto failed;
7046 }
7047 bzero((char *)hba, sizeof (emlxs_hba_t));
7048
7049 emlxs_device.hba[emlxinst] = hba;
7050 emlxs_device.log[emlxinst] = &hba->log;
7051
7052 #ifdef DUMP_SUPPORT
7053 emlxs_device.dump_txtfile[emlxinst] = &hba->dump_txtfile;
7054 emlxs_device.dump_dmpfile[emlxinst] = &hba->dump_dmpfile;
7055 emlxs_device.dump_ceefile[emlxinst] = &hba->dump_ceefile;
7056 #endif /* DUMP_SUPPORT */
7057
7058 hba->dip = dip;
7059 hba->emlxinst = emlxinst;
7060 hba->ddiinst = ddiinst;
7061
7062 init_flag |= ATTACH_HBA;
7063
7064 /* Enable the physical port on this HBA */
7065 port = &PPORT;
7066 port->hba = hba;
7067 port->vpi = 0;
7068 port->flag |= EMLXS_PORT_ENABLED;
7069
7070 /* Allocate a transport structure */
7071 hba->fca_tran =
7072 (fc_fca_tran_t *)kmem_zalloc(sizeof (fc_fca_tran_t), KM_NOSLEEP);
7073 if (hba->fca_tran == NULL) {
7074 cmn_err(CE_WARN,
7075 "?%s%d: fca_hba_attach failed. Unable to allocate fca_tran "
7076 "memory.", DRIVER_NAME, ddiinst);
7077 goto failed;
7078 }
7079 bcopy((caddr_t)&emlxs_fca_tran, (caddr_t)hba->fca_tran,
7080 sizeof (fc_fca_tran_t));
7081
7082 /*
7083 * Copy the global ddi_dma_attr to the local hba fields
7084 */
7085 bcopy((caddr_t)&emlxs_dma_attr, (caddr_t)&hba->dma_attr,
7086 sizeof (ddi_dma_attr_t));
7087 bcopy((caddr_t)&emlxs_dma_attr_ro, (caddr_t)&hba->dma_attr_ro,
7088 sizeof (ddi_dma_attr_t));
7089 bcopy((caddr_t)&emlxs_dma_attr_1sg, (caddr_t)&hba->dma_attr_1sg,
7090 sizeof (ddi_dma_attr_t));
7091 bcopy((caddr_t)&emlxs_dma_attr_fcip_rsp,
7092 (caddr_t)&hba->dma_attr_fcip_rsp, sizeof (ddi_dma_attr_t));
7093
7094 /* Reset the fca_tran dma_attr fields to the per-hba copies */
7095 hba->fca_tran->fca_dma_attr = &hba->dma_attr;
7096 hba->fca_tran->fca_dma_fcp_cmd_attr = &hba->dma_attr_1sg;
7097 hba->fca_tran->fca_dma_fcp_rsp_attr = &hba->dma_attr_1sg;
7098 hba->fca_tran->fca_dma_fcp_data_attr = &hba->dma_attr_ro;
7099 hba->fca_tran->fca_dma_fcip_cmd_attr = &hba->dma_attr_1sg;
7100 hba->fca_tran->fca_dma_fcip_rsp_attr = &hba->dma_attr_fcip_rsp;
7101 hba->fca_tran->fca_dma_fcsm_cmd_attr = &hba->dma_attr_1sg;
7102 hba->fca_tran->fca_dma_fcsm_rsp_attr = &hba->dma_attr;
7103
7104 /* Set the transport structure pointer in our dip */
7105 /* SFS may panic if we are in target only mode */
7106 /* We will update the transport structure later */
7107 (void) ddi_set_driver_private(dip, (caddr_t)&emlxs_fca_tran);
7108 init_flag |= ATTACH_FCA_TRAN;
7109
7110 /* Perform driver integrity check */
7111 rval = emlxs_integrity_check(hba);
7112 if (rval) {
7113 cmn_err(CE_WARN,
7114 "?%s%d: fca_hba_attach failed. Driver integrity check "
7115 "failed. %d error(s) found.", DRIVER_NAME, ddiinst, rval);
7116 goto failed;
7117 }
7118
7119 cfg = &CFG;
7120
7121 bcopy((uint8_t *)&emlxs_cfg, (uint8_t *)cfg, sizeof (emlxs_cfg));
7122 #ifdef MSI_SUPPORT
7123 if ((void *)&ddi_intr_get_supported_types != NULL) {
7124 hba->intr_flags |= EMLXS_MSI_ENABLED;
7125 }
7126 #endif /* MSI_SUPPORT */
7127
7128
7129 /* Create the msg log file */
7130 if (emlxs_msg_log_create(hba) == 0) {
7131 cmn_err(CE_WARN,
7132 "?%s%d: fca_hba_attach failed. Unable to create message "
7133 "log", DRIVER_NAME, ddiinst);
7134 goto failed;
7135
7136 }
7137 init_flag |= ATTACH_LOG;
7138
7139 /* We can begin to use EMLXS_MSGF from this point on */
7140
7141 /*
7142 * Find the I/O bus type If it is not a SBUS card,
7143 * then it is a PCI card. Default is PCI_FC (0).
7144 */
7145 prop_str = NULL;
7146 status = ddi_prop_lookup_string(DDI_DEV_T_ANY,
7147 (dev_info_t *)dip, 0, "name", (char **)&prop_str);
7148
7149 if (status == DDI_PROP_SUCCESS) {
7150 if (strncmp(prop_str, "lpfs", 4) == 0) {
7151 hba->bus_type = SBUS_FC;
7152 }
7153
7154 (void) ddi_prop_free((void *)prop_str);
7155 }
7156
7157 /*
7158 * Copy DDS from the config method and update configuration parameters
7159 */
7160 (void) emlxs_get_props(hba);
7161
7162 #ifdef FMA_SUPPORT
7163 hba->fm_caps = cfg[CFG_FM_CAPS].current;
7164
7165 emlxs_fm_init(hba);
7166
7167 init_flag |= ATTACH_FM;
7168 #endif /* FMA_SUPPORT */
7169
7170 if (emlxs_map_bus(hba)) {
7171 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7172 "Unable to map memory");
7173 goto failed;
7174
7175 }
7176 init_flag |= ATTACH_MAP_BUS;
7177
7178 /* Attempt to identify the adapter */
7179 rval = emlxs_init_adapter_info(hba);
7180
7181 if (rval == 0) {
7182 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7183 "Unable to get adapter info. Id:%d Device id:0x%x "
7184 "Model:%s", hba->model_info.id,
7185 hba->model_info.device_id, hba->model_info.model);
7186 goto failed;
7187 }
7188 #define FILTER_ORACLE_BRANDED
7189 #ifdef FILTER_ORACLE_BRANDED
7190
7191 /* Oracle branded adapters are not supported in this driver */
7192 if (hba->model_info.flags & EMLXS_ORACLE_BRANDED) {
7193 hba->model_info.flags |= EMLXS_NOT_SUPPORTED;
7194 }
7195 #endif /* FILTER_ORACLE_BRANDED */
7196
7197 /* Check if adapter is not supported */
7198 if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
7199 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7200 "Unsupported adapter found. Id:%d Device id:0x%x "
7201 "SSDID:0x%x Model:%s", hba->model_info.id,
7202 hba->model_info.device_id,
7203 hba->model_info.ssdid, hba->model_info.model);
7204 goto failed;
7205 }
7206
7207 if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
7208 hba->sli.sli4.mem_sgl_size = MEM_SGL_SIZE;
7209
7210 #ifdef EMLXS_I386
7211 /*
7212 * TigerShark has 64K limit for SG element size
7213 * Do this for x86 alone. For SPARC, the driver
7214 * breaks up the single SGE later on.
7215 */
7216 hba->dma_attr_ro.dma_attr_count_max = 0xffff;
7217
7218 i = cfg[CFG_MAX_XFER_SIZE].current;
7219 /* Update SGL size based on max_xfer_size */
7220 if (i > 516096) {
7221 /* 516096 = (((2048 / 16) - 2) * 4096) */
7222 hba->sli.sli4.mem_sgl_size = 4096;
7223 } else if (i > 253952) {
7224 /* 253952 = (((1024 / 16) - 2) * 4096) */
7225 hba->sli.sli4.mem_sgl_size = 2048;
7226 } else {
7227 hba->sli.sli4.mem_sgl_size = 1024;
7228 }
7229 #endif /* EMLXS_I386 */
7230
7231 i = SGL_TO_SGLLEN(hba->sli.sli4.mem_sgl_size);
7232 } else {
7233 hba->sli.sli3.mem_bpl_size = MEM_BPL_SIZE;
7234
7235 #ifdef EMLXS_I386
7236 i = cfg[CFG_MAX_XFER_SIZE].current;
7237 /* Update BPL size based on max_xfer_size */
7238 if (i > 688128) {
7239 /* 688128 = (((2048 / 12) - 2) * 4096) */
7240 hba->sli.sli3.mem_bpl_size = 4096;
7241 } else if (i > 339968) {
7242 /* 339968 = (((1024 / 12) - 2) * 4096) */
7243 hba->sli.sli3.mem_bpl_size = 2048;
7244 } else {
7245 hba->sli.sli3.mem_bpl_size = 1024;
7246 }
7247 #endif /* EMLXS_I386 */
7248
7249 i = BPL_TO_SGLLEN(hba->sli.sli3.mem_bpl_size);
7250 }
7251
7252 /* Update dma_attr_sgllen based on true SGL length */
7253 hba->dma_attr.dma_attr_sgllen = i;
7254 hba->dma_attr_ro.dma_attr_sgllen = i;
7255 hba->dma_attr_fcip_rsp.dma_attr_sgllen = i;
7256
7257 if (EMLXS_SLI_MAP_HDW(hba)) {
7258 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7259 "Unable to map memory");
7260 goto failed;
7261
7262 }
7263 init_flag |= ATTACH_MAP_SLI;
7264
7265 /* Initialize the interrupts. But don't add them yet */
7266 status = EMLXS_INTR_INIT(hba, 0);
7267 if (status != DDI_SUCCESS) {
7268 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7269 "Unable to initalize interrupt(s).");
7270 goto failed;
7271
7272 }
7273 init_flag |= ATTACH_INTR_INIT;
7274
7275 /* Initialize LOCKs */
7276 emlxs_msg_lock_reinit(hba);
7277 emlxs_lock_init(hba);
7278 init_flag |= ATTACH_LOCK;
7279
7280 /* Create the event queue */
7281 if (emlxs_event_queue_create(hba) == 0) {
7282 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7283 "Unable to create event queue");
7284
7285 goto failed;
7286
7287 }
7288 init_flag |= ATTACH_EVENTS;
7289
7290 /* Initialize the power management */
7291 mutex_enter(&EMLXS_PM_LOCK);
7292 hba->pm_state = EMLXS_PM_IN_ATTACH;
7293 hba->pm_level = EMLXS_PM_ADAPTER_DOWN;
7294 hba->pm_busy = 0;
7295 #ifdef IDLE_TIMER
7296 hba->pm_active = 1;
7297 hba->pm_idle_timer = 0;
7298 #endif /* IDLE_TIMER */
7299 mutex_exit(&EMLXS_PM_LOCK);
7300
7301 /* Set the pm component name */
7302 (void) snprintf(local_pm_components, sizeof (local_pm_components),
7303 "NAME=%s%d", DRIVER_NAME, ddiinst);
7304 emlxs_pm_components[0] = local_pm_components;
7305
7306 /* Check if power management support is enabled */
7307 if (cfg[CFG_PM_SUPPORT].current) {
7308 if (ddi_prop_update_string_array(DDI_DEV_T_NONE, dip,
7309 "pm-components", emlxs_pm_components,
7310 sizeof (emlxs_pm_components) /
7311 sizeof (emlxs_pm_components[0])) !=
7312 DDI_PROP_SUCCESS) {
7313 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7314 "Unable to create pm components.");
7315 goto failed;
7316 }
7317 }
7318
7319 /* Needed for suspend and resume support */
7320 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "pm-hardware-state",
7321 "needs-suspend-resume");
7322 init_flag |= ATTACH_PROP;
7323
7324 emlxs_thread_spawn_create(hba);
7325 init_flag |= ATTACH_SPAWN;
7326
7327 emlxs_thread_create(hba, &hba->iodone_thread);
7328
7329 init_flag |= ATTACH_THREAD;
7330
7331 retry:
7332 /* Setup initiator / target ports */
7333 emlxs_mode_init(hba);
7334
7335 /* If driver did not attach to either stack, */
7336 /* then driver attach fails */
7337 if (port->mode == MODE_NONE) {
7338 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7339 "Driver interfaces not enabled.");
7340 goto failed;
7341 }
7342
7343 /*
7344 * Initialize HBA
7345 */
7346
7347 /* Set initial state */
7348 mutex_enter(&EMLXS_PORT_LOCK);
7349 hba->flag |= FC_OFFLINE_MODE;
7350 hba->flag &= ~(FC_ONLINE_MODE | FC_ONLINING_MODE | FC_OFFLINING_MODE);
7351 mutex_exit(&EMLXS_PORT_LOCK);
7352
7353 if (status = emlxs_online(hba)) {
7354 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7355 "Unable to initialize adapter.");
7356
7357 if (status == EAGAIN) {
7358 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7359 "Retrying adapter initialization ...");
7360 goto retry;
7361 }
7362 goto failed;
7363 }
7364 init_flag |= ATTACH_ONLINE;
7365
7366 /* This is to ensure that the model property is properly set */
7367 (void) ddi_prop_update_string(DDI_DEV_T_NONE, dip, "model",
7368 hba->model_info.model);
7369
7370 /* Create the device node. */
7371 if (ddi_create_minor_node(dip, "devctl", S_IFCHR, ddiinst, NULL, 0) ==
7372 DDI_FAILURE) {
7373 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_failed_msg,
7374 "Unable to create device node.");
7375 goto failed;
7376 }
7377 init_flag |= ATTACH_NODE;
7378
7379 /* Attach initiator now */
7380 /* This must come after emlxs_online() */
7381 emlxs_fca_attach(hba);
7382 init_flag |= ATTACH_FCA;
7383
7384 /* Initialize kstat information */
7385 hba->kstat = kstat_create(DRIVER_NAME,
7386 ddiinst, "statistics", "controller",
7387 KSTAT_TYPE_RAW, sizeof (emlxs_stats_t),
7388 KSTAT_FLAG_VIRTUAL);
7389
7390 if (hba->kstat == NULL) {
7391 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
7392 "kstat_create failed.");
7393 } else {
7394 hba->kstat->ks_data = (void *)&hba->stats;
7395 kstat_install(hba->kstat);
7396 init_flag |= ATTACH_KSTAT;
7397 }
7398
7399 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4)
7400 /* Setup virtual port properties */
7401 emlxs_read_vport_prop(hba);
7402 #endif /* EMLXS_MODREV3 || EMLXS_MODREV4 */
7403
7404
7405 #ifdef DHCHAP_SUPPORT
7406 emlxs_dhc_attach(hba);
7407 init_flag |= ATTACH_DHCHAP;
7408 #endif /* DHCHAP_SUPPORT */
7409
7410 /* Display the driver banner now */
7411 emlxs_drv_banner(hba);
7412
7413 /* Raise the power level */
7414
7415 /*
7416 * This will not execute emlxs_hba_resume because
7417 * EMLXS_PM_IN_ATTACH is set
7418 */
7419 if (emlxs_pm_raise_power(dip) != DDI_SUCCESS) {
7420 /* Set power up anyway. This should not happen! */
7421 mutex_enter(&EMLXS_PM_LOCK);
7422 hba->pm_level = EMLXS_PM_ADAPTER_UP;
7423 hba->pm_state &= ~EMLXS_PM_IN_ATTACH;
7424 mutex_exit(&EMLXS_PM_LOCK);
7425 } else {
7426 mutex_enter(&EMLXS_PM_LOCK);
7427 hba->pm_state &= ~EMLXS_PM_IN_ATTACH;
7428 mutex_exit(&EMLXS_PM_LOCK);
7429 }
7430
7431 #ifdef SFCT_SUPPORT
7432 if (port->flag & EMLXS_TGT_ENABLED) {
7433 /* Do this last */
7434 emlxs_fct_attach(hba);
7435 init_flag |= ATTACH_FCT;
7436 }
7437 #endif /* SFCT_SUPPORT */
7438
7439 return (DDI_SUCCESS);
7440
7441 failed:
7442
7443 emlxs_driver_remove(dip, init_flag, 1);
7444
7445 return (DDI_FAILURE);
7446
7447 } /* emlxs_hba_attach() */
7448
7449
7450 static int
7451 emlxs_hba_detach(dev_info_t *dip)
7452 {
7453 emlxs_hba_t *hba;
7454 emlxs_port_t *port;
7455 int ddiinst;
7456 int count;
7457 uint32_t init_flag = (uint32_t)-1;
7458
7459 ddiinst = ddi_get_instance(dip);
7460 hba = ddi_get_soft_state(emlxs_soft_state, ddiinst);
7461 port = &PPORT;
7462
7463 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_debug_msg, NULL);
7464
7465 mutex_enter(&EMLXS_PM_LOCK);
7466 hba->pm_state |= EMLXS_PM_IN_DETACH;
7467 mutex_exit(&EMLXS_PM_LOCK);
7468
7469 /* Lower the power level */
7470 /*
7471 * This will not suspend the driver since the
7472 * EMLXS_PM_IN_DETACH has been set
7473 */
7474 if (emlxs_pm_lower_power(dip) != DDI_SUCCESS) {
7475 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
7476 "Unable to lower power.");
7477
7478 mutex_enter(&EMLXS_PM_LOCK);
7479 hba->pm_state &= ~EMLXS_PM_IN_DETACH;
7480 mutex_exit(&EMLXS_PM_LOCK);
7481
7482 return (DDI_FAILURE);
7483 }
7484
7485 /* Take the adapter offline first, if not already */
7486 if (emlxs_offline(hba, 1) != 0) {
7487 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_detach_failed_msg,
7488 "Unable to take adapter offline.");
7489
7490 mutex_enter(&EMLXS_PM_LOCK);
7491 hba->pm_state &= ~EMLXS_PM_IN_DETACH;
7492 mutex_exit(&EMLXS_PM_LOCK);
7493
7494 (void) emlxs_pm_raise_power(dip);
7495
7496 return (DDI_FAILURE);
7497 }
7498 /* Check ub buffer pools */
7499 if (port->ub_pool) {
7500 mutex_enter(&EMLXS_UB_LOCK);
7501
7502 /* Wait up to 10 seconds for all ub pools to be freed */
7503 count = 10 * 2;
7504 while (port->ub_pool && count) {
7505 mutex_exit(&EMLXS_UB_LOCK);
7506 delay(drv_usectohz(500000)); /* half second wait */
7507 count--;
7508 mutex_enter(&EMLXS_UB_LOCK);
7509 }
7510
7511 if (port->ub_pool) {
7512 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7513 "fca_unbind_port: Unsolicited buffers still "
7514 "active. port=%p. Destroying...", port);
7515
7516 /* Destroy all pools */
7517 while (port->ub_pool) {
7518 emlxs_ub_destroy(port, port->ub_pool);
7519 }
7520 }
7521
7522 mutex_exit(&EMLXS_UB_LOCK);
7523 }
7524 init_flag &= ~ATTACH_ONLINE;
7525
7526 /* Remove the driver instance */
7527 emlxs_driver_remove(dip, init_flag, 0);
7528
7529 return (DDI_SUCCESS);
7530
7531 } /* emlxs_hba_detach() */
7532
7533
7534 extern int
7535 emlxs_map_bus(emlxs_hba_t *hba)
7536 {
7537 emlxs_port_t *port = &PPORT;
7538 dev_info_t *dip;
7539 ddi_device_acc_attr_t dev_attr;
7540 int status;
7541
7542 dip = (dev_info_t *)hba->dip;
7543 dev_attr = emlxs_dev_acc_attr;
7544
7545 if (hba->bus_type == SBUS_FC) {
7546 if (hba->pci_acc_handle == 0) {
7547 status = ddi_regs_map_setup(dip,
7548 SBUS_DFLY_PCI_CFG_RINDEX,
7549 (caddr_t *)&hba->pci_addr,
7550 0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle);
7551 if (status != DDI_SUCCESS) {
7552 EMLXS_MSGF(EMLXS_CONTEXT,
7553 &emlxs_attach_failed_msg,
7554 "(SBUS) ddi_regs_map_setup PCI failed. "
7555 "status=%x", status);
7556 goto failed;
7557 }
7558 }
7559
7560 if (hba->sbus_pci_handle == 0) {
7561 status = ddi_regs_map_setup(dip,
7562 SBUS_TITAN_PCI_CFG_RINDEX,
7563 (caddr_t *)&hba->sbus_pci_addr,
7564 0, 0, &dev_attr, &hba->sbus_pci_handle);
7565 if (status != DDI_SUCCESS) {
7566 EMLXS_MSGF(EMLXS_CONTEXT,
7567 &emlxs_attach_failed_msg,
7568 "(SBUS) ddi_regs_map_setup TITAN PCI "
7569 "failed. status=%x", status);
7570 goto failed;
7571 }
7572 }
7573
7574 } else { /* ****** PCI ****** */
7575
7576 if (hba->pci_acc_handle == 0) {
7577 status = ddi_regs_map_setup(dip,
7578 PCI_CFG_RINDEX,
7579 (caddr_t *)&hba->pci_addr,
7580 0, 0, &emlxs_dev_acc_attr, &hba->pci_acc_handle);
7581 if (status != DDI_SUCCESS) {
7582 EMLXS_MSGF(EMLXS_CONTEXT,
7583 &emlxs_attach_failed_msg,
7584 "(PCI) ddi_regs_map_setup PCI failed. "
7585 "status=%x", status);
7586 goto failed;
7587 }
7588 }
7589 #ifdef EMLXS_I386
7590 /* Setting up PCI configure space */
7591 (void) ddi_put16(hba->pci_acc_handle,
7592 (uint16_t *)(hba->pci_addr + PCI_COMMAND_REGISTER),
7593 CMD_CFG_VALUE | CMD_IO_ENBL);
7594
7595 #ifdef FMA_SUPPORT
7596 if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
7597 != DDI_FM_OK) {
7598 EMLXS_MSGF(EMLXS_CONTEXT,
7599 &emlxs_invalid_access_handle_msg, NULL);
7600 goto failed;
7601 }
7602 #endif /* FMA_SUPPORT */
7603
7604 #endif /* EMLXS_I386 */
7605
7606 }
7607 return (0);
7608
7609 failed:
7610
7611 emlxs_unmap_bus(hba);
7612 return (ENOMEM);
7613
7614 } /* emlxs_map_bus() */
7615
7616
7617 extern void
7618 emlxs_unmap_bus(emlxs_hba_t *hba)
7619 {
7620 if (hba->pci_acc_handle) {
7621 (void) ddi_regs_map_free(&hba->pci_acc_handle);
7622 hba->pci_acc_handle = 0;
7623 }
7624
7625 if (hba->sbus_pci_handle) {
7626 (void) ddi_regs_map_free(&hba->sbus_pci_handle);
7627 hba->sbus_pci_handle = 0;
7628 }
7629
7630 return;
7631
7632 } /* emlxs_unmap_bus() */
7633
7634
7635 static int
7636 emlxs_get_props(emlxs_hba_t *hba)
7637 {
7638 emlxs_config_t *cfg;
7639 uint32_t i;
7640 char string[256];
7641 uint32_t new_value;
7642
7643 /* Initialize each parameter */
7644 for (i = 0; i < NUM_CFG_PARAM; i++) {
7645 cfg = &hba->config[i];
7646
7647 /* Ensure strings are terminated */
7648 cfg->string[(EMLXS_CFG_STR_SIZE-1)] = 0;
7649 cfg->help[(EMLXS_CFG_HELP_SIZE-1)] = 0;
7650
7651 /* Set the current value to the default value */
7652 new_value = cfg->def;
7653
7654 /* First check for the global setting */
7655 new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
7656 (void *)hba->dip, DDI_PROP_DONTPASS,
7657 cfg->string, new_value);
7658
7659 /* Now check for the per adapter ddiinst setting */
7660 (void) snprintf(string, sizeof (string), "%s%d-%s", DRIVER_NAME,
7661 hba->ddiinst, cfg->string);
7662
7663 new_value = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
7664 (void *)hba->dip, DDI_PROP_DONTPASS, string, new_value);
7665
7666 /* Now check the parameter */
7667 cfg->current = emlxs_check_parm(hba, i, new_value);
7668 }
7669
7670 return (0);
7671
7672 } /* emlxs_get_props() */
7673
7674
7675 extern uint32_t
7676 emlxs_check_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value)
7677 {
7678 emlxs_port_t *port = &PPORT;
7679 uint32_t i;
7680 emlxs_config_t *cfg;
7681 emlxs_vpd_t *vpd = &VPD;
7682
7683 if (index >= NUM_CFG_PARAM) {
7684 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7685 "check_parm failed. Invalid index = %d", index);
7686
7687 return (new_value);
7688 }
7689
7690 cfg = &hba->config[index];
7691
7692 if (new_value > cfg->hi) {
7693 new_value = cfg->def;
7694 } else if (new_value < cfg->low) {
7695 new_value = cfg->def;
7696 }
7697
7698 /* Perform additional checks */
7699 switch (index) {
7700 #ifdef SFCT_SUPPORT
7701 case CFG_NPIV_ENABLE:
7702 if (hba->config[CFG_TARGET_MODE].current &&
7703 hba->config[CFG_DTM_ENABLE].current == 0) {
7704 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7705 "enable-npiv: Not supported in pure target mode. "
7706 "Disabling.");
7707
7708 new_value = 0;
7709 }
7710 break;
7711 #endif /* SFCT_SUPPORT */
7712
7713
7714 case CFG_NUM_NODES:
7715 switch (new_value) {
7716 case 1:
7717 case 2:
7718 /* Must have at least 3 if not 0 */
7719 return (3);
7720
7721 default:
7722 break;
7723 }
7724 break;
7725
7726 case CFG_FW_CHECK:
7727 /* The 0x2 bit implies the 0x1 bit will also be set */
7728 if (new_value & 0x2) {
7729 new_value |= 0x1;
7730 }
7731
7732 /* The 0x4 bit should not be set if 0x1 or 0x2 is not set */
7733 if (!(new_value & 0x3) && (new_value & 0x4)) {
7734 new_value &= ~0x4;
7735 }
7736 break;
7737
7738 case CFG_LINK_SPEED:
7739 if ((new_value > 8) &&
7740 (hba->config[CFG_TOPOLOGY].current == 4)) {
7741 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7742 "link-speed: %dGb not supported in loop topology. "
7743 "Switching to auto detect.",
7744 new_value);
7745
7746 new_value = 0;
7747 break;
7748 }
7749
7750 if (vpd->link_speed) {
7751 switch (new_value) {
7752 case 0:
7753 break;
7754
7755 case 1:
7756 if (!(vpd->link_speed & LMT_1GB_CAPABLE)) {
7757 new_value = 0;
7758
7759 EMLXS_MSGF(EMLXS_CONTEXT,
7760 &emlxs_init_msg,
7761 "link-speed: 1Gb not supported "
7762 "by adapter. Switching to auto "
7763 "detect.");
7764 }
7765 break;
7766
7767 case 2:
7768 if (!(vpd->link_speed & LMT_2GB_CAPABLE)) {
7769 new_value = 0;
7770
7771 EMLXS_MSGF(EMLXS_CONTEXT,
7772 &emlxs_init_msg,
7773 "link-speed: 2Gb not supported "
7774 "by adapter. Switching to auto "
7775 "detect.");
7776 }
7777 break;
7778
7779 case 4:
7780 if (!(vpd->link_speed & LMT_4GB_CAPABLE)) {
7781 new_value = 0;
7782
7783 EMLXS_MSGF(EMLXS_CONTEXT,
7784 &emlxs_init_msg,
7785 "link-speed: 4Gb not supported "
7786 "by adapter. Switching to auto "
7787 "detect.");
7788 }
7789 break;
7790
7791 case 8:
7792 if (!(vpd->link_speed & LMT_8GB_CAPABLE)) {
7793 new_value = 0;
7794
7795 EMLXS_MSGF(EMLXS_CONTEXT,
7796 &emlxs_init_msg,
7797 "link-speed: 8Gb not supported "
7798 "by adapter. Switching to auto "
7799 "detect.");
7800 }
7801 break;
7802
7803 case 16:
7804 if (!(vpd->link_speed & LMT_16GB_CAPABLE)) {
7805 new_value = 0;
7806
7807 EMLXS_MSGF(EMLXS_CONTEXT,
7808 &emlxs_init_msg,
7809 "link-speed: 16Gb not supported "
7810 "by adapter. Switching to auto "
7811 "detect.");
7812 }
7813 break;
7814
7815 default:
7816 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7817 "link-speed: Invalid value=%d provided. "
7818 "Switching to auto detect.",
7819 new_value);
7820
7821 new_value = 0;
7822 }
7823 } else { /* Perform basic validity check */
7824
7825 /* Perform additional check on link speed */
7826 switch (new_value) {
7827 case 0:
7828 case 1:
7829 case 2:
7830 case 4:
7831 case 8:
7832 case 16:
7833 /* link-speed is a valid choice */
7834 break;
7835
7836 default:
7837 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7838 "link-speed: Invalid value=%d provided. "
7839 "Switching to auto detect.",
7840 new_value);
7841
7842 new_value = 0;
7843 }
7844 }
7845 break;
7846
7847 case CFG_TOPOLOGY:
7848 if ((new_value == 4) &&
7849 (hba->config[CFG_LINK_SPEED].current > 8)) {
7850 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7851 "topology: Loop topology not supported "
7852 "with link speeds greater than 8Gb. "
7853 "Switching to auto detect.");
7854
7855 new_value = 0;
7856 break;
7857 }
7858
7859 /* Perform additional check on topology */
7860 switch (new_value) {
7861 case 0:
7862 case 2:
7863 case 4:
7864 case 6:
7865 /* topology is a valid choice */
7866 break;
7867
7868 default:
7869 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
7870 "topology: Invalid value=%d provided. "
7871 "Switching to auto detect.",
7872 new_value);
7873
7874 new_value = 0;
7875 break;
7876 }
7877 break;
7878
7879 #ifdef DHCHAP_SUPPORT
7880 case CFG_AUTH_TYPE:
7881 {
7882 uint32_t shift;
7883 uint32_t mask;
7884
7885 /* Perform additional check on auth type */
7886 shift = 12;
7887 mask = 0xF000;
7888 for (i = 0; i < 4; i++) {
7889 if (((new_value & mask) >> shift) > DFC_AUTH_TYPE_MAX) {
7890 return (cfg->def);
7891 }
7892
7893 shift -= 4;
7894 mask >>= 4;
7895 }
7896 break;
7897 }
7898
7899 case CFG_AUTH_HASH:
7900 {
7901 uint32_t shift;
7902 uint32_t mask;
7903
7904 /* Perform additional check on auth hash */
7905 shift = 12;
7906 mask = 0xF000;
7907 for (i = 0; i < 4; i++) {
7908 if (((new_value & mask) >> shift) > DFC_AUTH_HASH_MAX) {
7909 return (cfg->def);
7910 }
7911
7912 shift -= 4;
7913 mask >>= 4;
7914 }
7915 break;
7916 }
7917
7918 case CFG_AUTH_GROUP:
7919 {
7920 uint32_t shift;
7921 uint32_t mask;
7922
7923 /* Perform additional check on auth group */
7924 shift = 28;
7925 mask = 0xF0000000;
7926 for (i = 0; i < 8; i++) {
7927 if (((new_value & mask) >> shift) >
7928 DFC_AUTH_GROUP_MAX) {
7929 return (cfg->def);
7930 }
7931
7932 shift -= 4;
7933 mask >>= 4;
7934 }
7935 break;
7936 }
7937
7938 case CFG_AUTH_INTERVAL:
7939 if (new_value < 10) {
7940 return (10);
7941 }
7942 break;
7943
7944
7945 #endif /* DHCHAP_SUPPORT */
7946
7947 } /* switch */
7948
7949 return (new_value);
7950
7951 } /* emlxs_check_parm() */
7952
7953
7954 extern uint32_t
7955 emlxs_set_parm(emlxs_hba_t *hba, uint32_t index, uint32_t new_value)
7956 {
7957 emlxs_port_t *port = &PPORT;
7958 emlxs_port_t *vport;
7959 uint32_t vpi;
7960 emlxs_config_t *cfg;
7961 uint32_t old_value;
7962
7963 if (index >= NUM_CFG_PARAM) {
7964 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7965 "set_parm failed. Invalid index = %d", index);
7966
7967 return ((uint32_t)FC_FAILURE);
7968 }
7969
7970 cfg = &hba->config[index];
7971
7972 if (!(cfg->flags & PARM_DYNAMIC)) {
7973 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7974 "set_parm failed. %s is not dynamic.", cfg->string);
7975
7976 return ((uint32_t)FC_FAILURE);
7977 }
7978
7979 /* Check new value */
7980 old_value = new_value;
7981 new_value = emlxs_check_parm(hba, index, new_value);
7982
7983 if (old_value != new_value) {
7984 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7985 "set_parm: %s invalid. 0x%x --> 0x%x",
7986 cfg->string, old_value, new_value);
7987 }
7988
7989 /* Return now if no actual change */
7990 if (new_value == cfg->current) {
7991 return (FC_SUCCESS);
7992 }
7993
7994 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
7995 "set_parm: %s changing. 0x%x --> 0x%x",
7996 cfg->string, cfg->current, new_value);
7997
7998 old_value = cfg->current;
7999 cfg->current = new_value;
8000
8001 /* React to change if needed */
8002 switch (index) {
8003
8004 case CFG_PCI_MAX_READ:
8005 /* Update MXR */
8006 emlxs_pcix_mxr_update(hba, 1);
8007 break;
8008
8009 #ifdef SFCT_SUPPORT
8010 case CFG_TARGET_MODE:
8011 (void) emlxs_reset(port, FC_FCA_LINK_RESET);
8012 break;
8013 #endif /* SFCT_SUPPORT */
8014
8015 case CFG_SLI_MODE:
8016 /* Check SLI mode */
8017 if ((hba->sli_mode == 3) && (new_value == 2)) {
8018 /* All vports must be disabled first */
8019 for (vpi = 1; vpi < MAX_VPORTS; vpi++) {
8020 vport = &VPORT(vpi);
8021
8022 if (vport->flag & EMLXS_PORT_ENABLED) {
8023 /* Reset current value */
8024 cfg->current = old_value;
8025
8026 EMLXS_MSGF(EMLXS_CONTEXT,
8027 &emlxs_sfs_debug_msg,
8028 "set_parm failed. %s: vpi=%d "
8029 "still enabled. Value restored to "
8030 "0x%x.", cfg->string, vpi,
8031 old_value);
8032
8033 return (2);
8034 }
8035 }
8036 }
8037
8038 if ((hba->sli_mode >= 4) && (new_value < 4)) {
8039 /*
8040 * Not allow to set to SLI 2 or 3 if HBA supports SLI4
8041 */
8042 cfg->current = old_value;
8043 return ((uint32_t)FC_FAILURE);
8044 }
8045
8046 break;
8047
8048 case CFG_NPIV_ENABLE:
8049 /* Check if NPIV is being disabled */
8050 if ((old_value == 1) && (new_value == 0)) {
8051 /* All vports must be disabled first */
8052 for (vpi = 1; vpi < MAX_VPORTS; vpi++) {
8053 vport = &VPORT(vpi);
8054
8055 if (vport->flag & EMLXS_PORT_ENABLED) {
8056 /* Reset current value */
8057 cfg->current = old_value;
8058
8059 EMLXS_MSGF(EMLXS_CONTEXT,
8060 &emlxs_sfs_debug_msg,
8061 "set_parm failed. %s: vpi=%d "
8062 "still enabled. Value restored to "
8063 "0x%x.", cfg->string, vpi,
8064 old_value);
8065
8066 return (2);
8067 }
8068 }
8069 }
8070
8071 /* Trigger adapter reset */
8072 /* (void) emlxs_reset(port, FC_FCA_RESET); */
8073
8074 break;
8075
8076
8077 case CFG_VPORT_RESTRICTED:
8078 for (vpi = 0; vpi < MAX_VPORTS; vpi++) {
8079 vport = &VPORT(vpi);
8080
8081 if (!(vport->flag & EMLXS_PORT_CONFIG)) {
8082 continue;
8083 }
8084
8085 if (vport->options & EMLXS_OPT_RESTRICT_MASK) {
8086 continue;
8087 }
8088
8089 if (new_value) {
8090 vport->flag |= EMLXS_PORT_RESTRICTED;
8091 } else {
8092 vport->flag &= ~EMLXS_PORT_RESTRICTED;
8093 }
8094 }
8095
8096 break;
8097
8098 #ifdef DHCHAP_SUPPORT
8099 case CFG_AUTH_ENABLE:
8100 (void) emlxs_reset(port, FC_FCA_LINK_RESET);
8101 break;
8102
8103 case CFG_AUTH_TMO:
8104 hba->auth_cfg.authentication_timeout = cfg->current;
8105 break;
8106
8107 case CFG_AUTH_MODE:
8108 hba->auth_cfg.authentication_mode = cfg->current;
8109 break;
8110
8111 case CFG_AUTH_BIDIR:
8112 hba->auth_cfg.bidirectional = cfg->current;
8113 break;
8114
8115 case CFG_AUTH_TYPE:
8116 hba->auth_cfg.authentication_type_priority[0] =
8117 (cfg->current & 0xF000) >> 12;
8118 hba->auth_cfg.authentication_type_priority[1] =
8119 (cfg->current & 0x0F00) >> 8;
8120 hba->auth_cfg.authentication_type_priority[2] =
8121 (cfg->current & 0x00F0) >> 4;
8122 hba->auth_cfg.authentication_type_priority[3] =
8123 (cfg->current & 0x000F);
8124 break;
8125
8126 case CFG_AUTH_HASH:
8127 hba->auth_cfg.hash_priority[0] =
8128 (cfg->current & 0xF000) >> 12;
8129 hba->auth_cfg.hash_priority[1] = (cfg->current & 0x0F00)>>8;
8130 hba->auth_cfg.hash_priority[2] = (cfg->current & 0x00F0)>>4;
8131 hba->auth_cfg.hash_priority[3] = (cfg->current & 0x000F);
8132 break;
8133
8134 case CFG_AUTH_GROUP:
8135 hba->auth_cfg.dh_group_priority[0] =
8136 (cfg->current & 0xF0000000) >> 28;
8137 hba->auth_cfg.dh_group_priority[1] =
8138 (cfg->current & 0x0F000000) >> 24;
8139 hba->auth_cfg.dh_group_priority[2] =
8140 (cfg->current & 0x00F00000) >> 20;
8141 hba->auth_cfg.dh_group_priority[3] =
8142 (cfg->current & 0x000F0000) >> 16;
8143 hba->auth_cfg.dh_group_priority[4] =
8144 (cfg->current & 0x0000F000) >> 12;
8145 hba->auth_cfg.dh_group_priority[5] =
8146 (cfg->current & 0x00000F00) >> 8;
8147 hba->auth_cfg.dh_group_priority[6] =
8148 (cfg->current & 0x000000F0) >> 4;
8149 hba->auth_cfg.dh_group_priority[7] =
8150 (cfg->current & 0x0000000F);
8151 break;
8152
8153 case CFG_AUTH_INTERVAL:
8154 hba->auth_cfg.reauthenticate_time_interval = cfg->current;
8155 break;
8156 #endif /* DHCHAP_SUPPORT */
8157
8158 }
8159
8160 return (FC_SUCCESS);
8161
8162 } /* emlxs_set_parm() */
8163
8164
8165 /*
8166 * emlxs_mem_alloc OS specific routine for memory allocation / mapping
8167 *
8168 * The buf_info->flags field describes the memory operation requested.
8169 *
8170 * FC_MBUF_PHYSONLY set requests a supplied virtual address be mapped for DMA
8171 * Virtual address is supplied in buf_info->virt
8172 * DMA mapping flag is in buf_info->align
8173 * (DMA_READ_ONLY, DMA_WRITE_ONLY, DMA_READ_WRITE)
8174 * The mapped physical address is returned buf_info->phys
8175 *
8176 * FC_MBUF_PHYSONLY cleared requests memory be allocated for driver use and
8177 * if FC_MBUF_DMA is set the memory is also mapped for DMA
8178 * The byte alignment of the memory request is supplied in buf_info->align
8179 * The byte size of the memory request is supplied in buf_info->size
8180 * The virtual address is returned buf_info->virt
8181 * The mapped physical address is returned buf_info->phys (for FC_MBUF_DMA)
8182 */
8183 extern uint8_t *
8184 emlxs_mem_alloc(emlxs_hba_t *hba, MBUF_INFO *buf_info)
8185 {
8186 emlxs_port_t *port = &PPORT;
8187 ddi_dma_attr_t dma_attr;
8188 ddi_device_acc_attr_t dev_attr;
8189 uint_t cookie_count;
8190 size_t dma_reallen;
8191 ddi_dma_cookie_t dma_cookie;
8192 uint_t dma_flag;
8193 int status;
8194
8195 dma_attr = hba->dma_attr_1sg;
8196 dev_attr = emlxs_data_acc_attr;
8197
8198 if (buf_info->flags & FC_MBUF_SNGLSG) {
8199 dma_attr.dma_attr_sgllen = 1;
8200 }
8201
8202 if (buf_info->flags & FC_MBUF_DMA32) {
8203 dma_attr.dma_attr_addr_hi = (uint64_t)0xffffffff;
8204 }
8205
8206 if (buf_info->flags & FC_MBUF_PHYSONLY) {
8207
8208 if (buf_info->virt == NULL) {
8209 goto done;
8210 }
8211
8212 /*
8213 * Allocate the DMA handle for this DMA object
8214 */
8215 status = ddi_dma_alloc_handle((void *)hba->dip,
8216 &dma_attr, DDI_DMA_DONTWAIT,
8217 NULL, (ddi_dma_handle_t *)&buf_info->dma_handle);
8218 if (status != DDI_SUCCESS) {
8219 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8220 "ddi_dma_alloc_handle failed: size=%x align=%x "
8221 "flags=%x", buf_info->size, buf_info->align,
8222 buf_info->flags);
8223
8224 buf_info->phys = 0;
8225 buf_info->dma_handle = 0;
8226 goto done;
8227 }
8228
8229 switch (buf_info->align) {
8230 case DMA_READ_WRITE:
8231 dma_flag = (DDI_DMA_RDWR | DDI_DMA_CONSISTENT);
8232 break;
8233 case DMA_READ_ONLY:
8234 dma_flag = (DDI_DMA_READ | DDI_DMA_CONSISTENT);
8235 break;
8236 case DMA_WRITE_ONLY:
8237 dma_flag = (DDI_DMA_WRITE | DDI_DMA_CONSISTENT);
8238 break;
8239 default:
8240 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8241 "Invalid DMA flag");
8242 (void) ddi_dma_free_handle(
8243 (ddi_dma_handle_t *)&buf_info->dma_handle);
8244 buf_info->phys = 0;
8245 buf_info->dma_handle = 0;
8246 return ((uint8_t *)buf_info->virt);
8247 }
8248
8249 /* Map this page of memory */
8250 status = ddi_dma_addr_bind_handle(
8251 (ddi_dma_handle_t)buf_info->dma_handle, NULL,
8252 (caddr_t)buf_info->virt, (size_t)buf_info->size,
8253 dma_flag, DDI_DMA_DONTWAIT, NULL, &dma_cookie,
8254 &cookie_count);
8255
8256 if (status != DDI_DMA_MAPPED || (cookie_count > 1)) {
8257 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8258 "ddi_dma_addr_bind_handle failed: status=%x "
8259 "count=%x flags=%x", status, cookie_count,
8260 buf_info->flags);
8261
8262 (void) ddi_dma_free_handle(
8263 (ddi_dma_handle_t *)&buf_info->dma_handle);
8264 buf_info->phys = 0;
8265 buf_info->dma_handle = 0;
8266 goto done;
8267 }
8268
8269 if (hba->bus_type == SBUS_FC) {
8270
8271 int32_t burstsizes_limit = 0xff;
8272 int32_t ret_burst;
8273
8274 ret_burst = ddi_dma_burstsizes(
8275 buf_info->dma_handle) & burstsizes_limit;
8276 if (ddi_dma_set_sbus64(buf_info->dma_handle,
8277 ret_burst) == DDI_FAILURE) {
8278 EMLXS_MSGF(EMLXS_CONTEXT,
8279 &emlxs_mem_alloc_failed_msg,
8280 "ddi_dma_set_sbus64 failed.");
8281 }
8282 }
8283
8284 /* Save Physical address */
8285 buf_info->phys = dma_cookie.dmac_laddress;
8286
8287 /*
8288 * Just to be sure, let's add this
8289 */
8290 EMLXS_MPDATA_SYNC((ddi_dma_handle_t)buf_info->dma_handle,
8291 (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV);
8292
8293 } else if (buf_info->flags & (FC_MBUF_DMA|FC_MBUF_DMA32)) {
8294
8295 dma_attr.dma_attr_align = buf_info->align;
8296
8297 /*
8298 * Allocate the DMA handle for this DMA object
8299 */
8300 status = ddi_dma_alloc_handle((void *)hba->dip, &dma_attr,
8301 DDI_DMA_DONTWAIT, NULL,
8302 (ddi_dma_handle_t *)&buf_info->dma_handle);
8303 if (status != DDI_SUCCESS) {
8304 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8305 "ddi_dma_alloc_handle failed: size=%x align=%x "
8306 "flags=%x", buf_info->size, buf_info->align,
8307 buf_info->flags);
8308
8309 buf_info->virt = NULL;
8310 buf_info->phys = 0;
8311 buf_info->data_handle = 0;
8312 buf_info->dma_handle = 0;
8313 goto done;
8314 }
8315
8316 status = ddi_dma_mem_alloc(
8317 (ddi_dma_handle_t)buf_info->dma_handle,
8318 (size_t)buf_info->size, &dev_attr, DDI_DMA_CONSISTENT,
8319 DDI_DMA_DONTWAIT, NULL, (caddr_t *)&buf_info->virt,
8320 &dma_reallen, (ddi_acc_handle_t *)&buf_info->data_handle);
8321
8322 if ((status != DDI_SUCCESS) || (buf_info->size > dma_reallen)) {
8323 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8324 "ddi_dma_mem_alloc failed: size=%x align=%x "
8325 "flags=%x", buf_info->size, buf_info->align,
8326 buf_info->flags);
8327
8328 (void) ddi_dma_free_handle(
8329 (ddi_dma_handle_t *)&buf_info->dma_handle);
8330
8331 buf_info->virt = NULL;
8332 buf_info->phys = 0;
8333 buf_info->data_handle = 0;
8334 buf_info->dma_handle = 0;
8335 goto done;
8336 }
8337
8338 /* Map this page of memory */
8339 status = ddi_dma_addr_bind_handle(
8340 (ddi_dma_handle_t)buf_info->dma_handle, NULL,
8341 (caddr_t)buf_info->virt, (size_t)buf_info->size,
8342 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
8343 &dma_cookie, &cookie_count);
8344
8345 if (status != DDI_DMA_MAPPED || (cookie_count > 1)) {
8346 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8347 "ddi_dma_addr_bind_handle failed: status=%x "
8348 "count=%d size=%x align=%x flags=%x", status,
8349 cookie_count, buf_info->size, buf_info->align,
8350 buf_info->flags);
8351
8352 (void) ddi_dma_mem_free(
8353 (ddi_acc_handle_t *)&buf_info->data_handle);
8354 (void) ddi_dma_free_handle(
8355 (ddi_dma_handle_t *)&buf_info->dma_handle);
8356
8357 buf_info->virt = NULL;
8358 buf_info->phys = 0;
8359 buf_info->dma_handle = 0;
8360 buf_info->data_handle = 0;
8361 goto done;
8362 }
8363
8364 if (hba->bus_type == SBUS_FC) {
8365 int32_t burstsizes_limit = 0xff;
8366 int32_t ret_burst;
8367
8368 ret_burst =
8369 ddi_dma_burstsizes(buf_info->
8370 dma_handle) & burstsizes_limit;
8371 if (ddi_dma_set_sbus64(buf_info->dma_handle,
8372 ret_burst) == DDI_FAILURE) {
8373 EMLXS_MSGF(EMLXS_CONTEXT,
8374 &emlxs_mem_alloc_failed_msg,
8375 "ddi_dma_set_sbus64 failed.");
8376 }
8377 }
8378
8379 /* Save Physical address */
8380 buf_info->phys = dma_cookie.dmac_laddress;
8381
8382 /* Just to be sure, let's add this */
8383 EMLXS_MPDATA_SYNC((ddi_dma_handle_t)buf_info->dma_handle,
8384 (off_t)0, (size_t)buf_info->size, DDI_DMA_SYNC_FORDEV);
8385
8386 } else { /* allocate virtual memory */
8387
8388 buf_info->virt =
8389 kmem_zalloc((size_t)buf_info->size, KM_NOSLEEP);
8390 buf_info->phys = 0;
8391 buf_info->data_handle = 0;
8392 buf_info->dma_handle = 0;
8393
8394 if (buf_info->virt == (uint32_t *)0) {
8395 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
8396 "size=%x flags=%x", buf_info->size,
8397 buf_info->flags);
8398 }
8399
8400 }
8401
8402 done:
8403
8404 return ((uint8_t *)buf_info->virt);
8405
8406 } /* emlxs_mem_alloc() */
8407
8408
8409
8410 /*
8411 * emlxs_mem_free:
8412 *
8413 * OS specific routine for memory de-allocation / unmapping
8414 *
8415 * The buf_info->flags field describes the memory operation requested.
8416 *
8417 * FC_MBUF_PHYSONLY set requests a supplied virtual address be unmapped
8418 * for DMA, but not freed. The mapped physical address to be unmapped is in
8419 * buf_info->phys
8420 *
8421 * FC_MBUF_PHYSONLY cleared requests memory be freed and unmapped for DMA only
8422 * if FC_MBUF_DMA is set. The mapped physical address to be unmapped is in
8423 * buf_info->phys. The virtual address to be freed is in buf_info->virt
8424 */
8425 /*ARGSUSED*/
8426 extern void
8427 emlxs_mem_free(emlxs_hba_t *hba, MBUF_INFO *buf_info)
8428 {
8429 if (buf_info->flags & FC_MBUF_PHYSONLY) {
8430
8431 if (buf_info->dma_handle) {
8432 (void) ddi_dma_unbind_handle(buf_info->dma_handle);
8433 (void) ddi_dma_free_handle(
8434 (ddi_dma_handle_t *)&buf_info->dma_handle);
8435 buf_info->dma_handle = NULL;
8436 }
8437
8438 } else if (buf_info->flags & (FC_MBUF_DMA|FC_MBUF_DMA32)) {
8439
8440 if (buf_info->dma_handle) {
8441 (void) ddi_dma_unbind_handle(buf_info->dma_handle);
8442 (void) ddi_dma_mem_free(
8443 (ddi_acc_handle_t *)&buf_info->data_handle);
8444 (void) ddi_dma_free_handle(
8445 (ddi_dma_handle_t *)&buf_info->dma_handle);
8446 buf_info->dma_handle = NULL;
8447 buf_info->data_handle = NULL;
8448 }
8449
8450 } else { /* allocate virtual memory */
8451
8452 if (buf_info->virt) {
8453 kmem_free(buf_info->virt, (size_t)buf_info->size);
8454 buf_info->virt = NULL;
8455 }
8456 }
8457
8458 } /* emlxs_mem_free() */
8459
8460
8461 static int
8462 emlxs_select_fcp_channel(emlxs_hba_t *hba, NODELIST *ndlp, int reset)
8463 {
8464 int channel;
8465 int msi_id;
8466
8467
8468 /* IO to FCP2 device or a device reset always use fcp channel */
8469 if ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) || reset) {
8470 return (hba->channel_fcp);
8471 }
8472
8473
8474 msi_id = emlxs_select_msiid(hba);
8475 channel = emlxs_msiid_to_chan(hba, msi_id);
8476
8477
8478
8479 /* If channel is closed, then try fcp channel */
8480 if (ndlp->nlp_flag[channel] & NLP_CLOSED) {
8481 channel = hba->channel_fcp;
8482 }
8483 return (channel);
8484
8485 } /* emlxs_select_fcp_channel() */
8486
8487
8488 static int32_t
8489 emlxs_fast_target_reset(emlxs_port_t *port, emlxs_buf_t *sbp, NODELIST *ndlp)
8490 {
8491 emlxs_hba_t *hba = HBA;
8492 fc_packet_t *pkt;
8493 emlxs_config_t *cfg;
8494 MAILBOXQ *mbq;
8495 MAILBOX *mb;
8496 uint32_t rc;
8497
8498 /*
8499 * This routine provides a alternative target reset provessing
8500 * method. Instead of sending an actual target reset to the
8501 * NPort, we will first unreg the login to that NPort. This
8502 * will cause all the outstanding IOs the quickly complete with
8503 * a NO RPI local error. Next we will force the ULP to relogin
8504 * to the NPort by sending an RSCN (for that NPort) to the
8505 * upper layer. This method should result in a fast target
8506 * reset, as far as IOs completing; however, since an actual
8507 * target reset is not sent to the NPort, it is not 100%
8508 * compatable. Things like reservations will not be broken.
8509 * By default this option is DISABLED, and its only enabled thru
8510 * a hidden configuration parameter (fast-tgt-reset).
8511 */
8512 rc = FC_TRAN_BUSY;
8513 pkt = PRIV2PKT(sbp);
8514 cfg = &CFG;
8515
8516 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))) {
8517 /* issue the mbox cmd to the sli */
8518 mb = (MAILBOX *) mbq->mbox;
8519 bzero((void *) mb, MAILBOX_CMD_BSIZE);
8520 mb->un.varUnregLogin.rpi = (uint16_t)ndlp->nlp_Rpi;
8521 #ifdef SLI3_SUPPORT
8522 mb->un.varUnregLogin.vpi = port->vpi;
8523 #endif /* SLI3_SUPPORT */
8524 mb->mbxCommand = MBX_UNREG_LOGIN;
8525 mb->mbxOwner = OWN_HOST;
8526
8527 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8528 "Fast Target Reset: unreg rpi=%d tmr=%d", ndlp->nlp_Rpi,
8529 cfg[CFG_FAST_TGT_RESET_TMR].current);
8530
8531 if (EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_WAIT, 0)
8532 == MBX_SUCCESS) {
8533
8534 ndlp->nlp_Rpi = 0;
8535
8536 mutex_enter(&sbp->mtx);
8537 sbp->node = (void *)ndlp;
8538 sbp->did = ndlp->nlp_DID;
8539 mutex_exit(&sbp->mtx);
8540
8541 if (pkt->pkt_rsplen) {
8542 bzero((uint8_t *)pkt->pkt_resp,
8543 pkt->pkt_rsplen);
8544 }
8545 if (cfg[CFG_FAST_TGT_RESET_TMR].current) {
8546 ndlp->nlp_force_rscn = hba->timer_tics +
8547 cfg[CFG_FAST_TGT_RESET_TMR].current;
8548 }
8549
8550 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 0);
8551 }
8552
8553 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
8554 rc = FC_SUCCESS;
8555 }
8556 return (rc);
8557 } /* emlxs_fast_target_reset() */
8558
8559 static int32_t
8560 emlxs_send_fcp_cmd(emlxs_port_t *port, emlxs_buf_t *sbp, uint32_t *pkt_flags)
8561 {
8562 emlxs_hba_t *hba = HBA;
8563 fc_packet_t *pkt;
8564 emlxs_config_t *cfg;
8565 IOCBQ *iocbq;
8566 IOCB *iocb;
8567 CHANNEL *cp;
8568 NODELIST *ndlp;
8569 char *cmd;
8570 uint16_t lun;
8571 FCP_CMND *fcp_cmd;
8572 uint32_t did;
8573 uint32_t reset = 0;
8574 int channel;
8575 int32_t rval;
8576
8577 pkt = PRIV2PKT(sbp);
8578 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
8579
8580 /* Find target node object */
8581 ndlp = emlxs_node_find_did(port, did, 1);
8582
8583 if (!ndlp || !ndlp->nlp_active) {
8584 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8585 "Node not found. did=%x", did);
8586
8587 return (FC_BADPACKET);
8588 }
8589
8590 /* When the fcp channel is closed we stop accepting any FCP cmd */
8591 if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
8592 return (FC_TRAN_BUSY);
8593 }
8594
8595 /* Snoop for target or lun reset first */
8596 /* We always use FCP channel to send out target/lun reset fcp cmds */
8597 /* interrupt affinity only applies to non tgt lun reset fcp cmd */
8598
8599 cmd = (char *)pkt->pkt_cmd;
8600 lun = *((uint16_t *)cmd);
8601 lun = LE_SWAP16(lun);
8602
8603 iocbq = &sbp->iocbq;
8604 iocb = &iocbq->iocb;
8605 iocbq->node = (void *) ndlp;
8606
8607 /* Check for target reset */
8608 if (cmd[10] & 0x20) {
8609 /* prepare iocb */
8610 if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
8611 hba->channel_fcp)) != FC_SUCCESS) {
8612
8613 if (rval == 0xff) {
8614 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
8615 0, 1);
8616 rval = FC_SUCCESS;
8617 }
8618
8619 return (rval);
8620 }
8621
8622 mutex_enter(&sbp->mtx);
8623 sbp->pkt_flags |= PACKET_FCP_TGT_RESET;
8624 sbp->pkt_flags |= PACKET_POLLED;
8625 *pkt_flags = sbp->pkt_flags;
8626 mutex_exit(&sbp->mtx);
8627
8628 #ifdef SAN_DIAG_SUPPORT
8629 emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_TGTRESET,
8630 (HBA_WWN *)&ndlp->nlp_portname, -1);
8631 #endif /* SAN_DIAG_SUPPORT */
8632
8633 iocbq->flag |= IOCB_PRIORITY;
8634
8635 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8636 "Target Reset: did=%x", did);
8637
8638 cfg = &CFG;
8639 if (cfg[CFG_FAST_TGT_RESET].current) {
8640 if (emlxs_fast_target_reset(port, sbp, ndlp) ==
8641 FC_SUCCESS) {
8642 return (FC_SUCCESS);
8643 }
8644 }
8645
8646 /* Close the node for any further normal IO */
8647 emlxs_node_close(port, ndlp, hba->channel_fcp,
8648 pkt->pkt_timeout);
8649
8650 /* Flush the IO's on the tx queues */
8651 (void) emlxs_tx_node_flush(port, ndlp,
8652 &hba->chan[hba->channel_fcp], 0, sbp);
8653
8654 /* This is the target reset fcp cmd */
8655 reset = 1;
8656 }
8657
8658 /* Check for lun reset */
8659 else if (cmd[10] & 0x10) {
8660 /* prepare iocb */
8661 if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
8662 hba->channel_fcp)) != FC_SUCCESS) {
8663
8664 if (rval == 0xff) {
8665 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
8666 0, 1);
8667 rval = FC_SUCCESS;
8668 }
8669
8670 return (rval);
8671 }
8672
8673 mutex_enter(&sbp->mtx);
8674 sbp->pkt_flags |= PACKET_FCP_LUN_RESET;
8675 sbp->pkt_flags |= PACKET_POLLED;
8676 *pkt_flags = sbp->pkt_flags;
8677 mutex_exit(&sbp->mtx);
8678
8679 #ifdef SAN_DIAG_SUPPORT
8680 emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_LUNRESET,
8681 (HBA_WWN *)&ndlp->nlp_portname, lun);
8682 #endif /* SAN_DIAG_SUPPORT */
8683
8684 iocbq->flag |= IOCB_PRIORITY;
8685
8686 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8687 "LUN Reset: did=%x lun=%02x LUN=%02x02x", did, lun,
8688 cmd[0], cmd[1]);
8689
8690 /* Flush the IO's on the tx queues for this lun */
8691 (void) emlxs_tx_lun_flush(port, ndlp, lun, sbp);
8692
8693 /* This is the lun reset fcp cmd */
8694 reset = 1;
8695 }
8696
8697 channel = emlxs_select_fcp_channel(hba, ndlp, reset);
8698
8699 #ifdef SAN_DIAG_SUPPORT
8700 sbp->sd_start_time = gethrtime();
8701 #endif /* SAN_DIAG_SUPPORT */
8702
8703 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
8704 emlxs_swap_fcp_pkt(sbp);
8705 #endif /* EMLXS_MODREV2X */
8706
8707 fcp_cmd = (FCP_CMND *) pkt->pkt_cmd;
8708
8709 if (fcp_cmd->fcpCntl1 == FCP_QTYPE_UNTAGGED) {
8710 fcp_cmd->fcpCntl1 = FCP_QTYPE_SIMPLE;
8711 }
8712
8713 if (reset == 0) {
8714 /*
8715 * tgt lun reset fcp cmd has been prepared
8716 * separately in the beginning
8717 */
8718 if ((rval = EMLXS_SLI_PREP_FCP_IOCB(port, sbp,
8719 channel)) != FC_SUCCESS) {
8720
8721 if (rval == 0xff) {
8722 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
8723 0, 1);
8724 rval = FC_SUCCESS;
8725 }
8726
8727 return (rval);
8728 }
8729 }
8730
8731 cp = &hba->chan[channel];
8732 cp->ulpSendCmd++;
8733
8734 /* Initalize sbp */
8735 mutex_enter(&sbp->mtx);
8736 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
8737 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
8738 sbp->node = (void *)ndlp;
8739 sbp->lun = lun;
8740 sbp->class = iocb->ULPCLASS;
8741 sbp->did = ndlp->nlp_DID;
8742 mutex_exit(&sbp->mtx);
8743
8744 if (pkt->pkt_cmdlen) {
8745 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
8746 DDI_DMA_SYNC_FORDEV);
8747 }
8748
8749 if (pkt->pkt_datalen && pkt->pkt_tran_type == FC_PKT_FCP_WRITE) {
8750 EMLXS_MPDATA_SYNC(pkt->pkt_data_dma, 0, pkt->pkt_datalen,
8751 DDI_DMA_SYNC_FORDEV);
8752 }
8753
8754 HBASTATS.FcpIssued++;
8755
8756 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
8757 return (FC_SUCCESS);
8758
8759 } /* emlxs_send_fcp_cmd() */
8760
8761
8762
8763
8764 /*
8765 * We have to consider this setup works for INTX, MSI, and MSIX
8766 * For INTX, intr_count is always 1
8767 * For MSI, intr_count is always 2 by default
8768 * For MSIX, intr_count is configurable (1, 2, 4, 8) for now.
8769 */
8770 extern int
8771 emlxs_select_msiid(emlxs_hba_t *hba)
8772 {
8773 int msiid = 0;
8774
8775 /* We use round-robin */
8776 mutex_enter(&EMLXS_MSIID_LOCK);
8777 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
8778 msiid = hba->last_msiid;
8779 hba->last_msiid ++;
8780 if (hba->last_msiid >= hba->intr_count) {
8781 hba->last_msiid = 0;
8782 }
8783 } else {
8784 /* This should work for INTX and MSI also */
8785 /* For SLI3 the chan_count is always 4 */
8786 /* For SLI3 the msiid is limited to chan_count */
8787 msiid = hba->last_msiid;
8788 hba->last_msiid ++;
8789 if (hba->intr_count > hba->chan_count) {
8790 if (hba->last_msiid >= hba->chan_count) {
8791 hba->last_msiid = 0;
8792 }
8793 } else {
8794 if (hba->last_msiid >= hba->intr_count) {
8795 hba->last_msiid = 0;
8796 }
8797 }
8798 }
8799 mutex_exit(&EMLXS_MSIID_LOCK);
8800
8801 return (msiid);
8802 } /* emlxs_select_msiid */
8803
8804
8805 /*
8806 * A channel has a association with a msi id.
8807 * One msi id could be associated with multiple channels.
8808 */
8809 extern int
8810 emlxs_msiid_to_chan(emlxs_hba_t *hba, int msi_id)
8811 {
8812 emlxs_config_t *cfg = &CFG;
8813 EQ_DESC_t *eqp;
8814 int chan;
8815 int num_wq;
8816
8817 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
8818 /* For SLI4 round robin all WQs associated with the msi_id */
8819 eqp = &hba->sli.sli4.eq[msi_id];
8820
8821 mutex_enter(&eqp->lastwq_lock);
8822 chan = eqp->lastwq;
8823 eqp->lastwq++;
8824 num_wq = cfg[CFG_NUM_WQ].current;
8825 if (eqp->lastwq >= ((msi_id + 1) * num_wq)) {
8826 eqp->lastwq -= num_wq;
8827 }
8828 mutex_exit(&eqp->lastwq_lock);
8829
8830 return (chan);
8831 } else {
8832 /* This is for SLI3 mode */
8833 return (hba->msi2chan[msi_id]);
8834 }
8835
8836 } /* emlxs_msiid_to_chan */
8837
8838
8839 #ifdef SFCT_SUPPORT
8840 static int32_t
8841 emlxs_send_fct_status(emlxs_port_t *port, emlxs_buf_t *sbp)
8842 {
8843 emlxs_hba_t *hba = HBA;
8844 IOCBQ *iocbq;
8845 IOCB *iocb;
8846 NODELIST *ndlp;
8847 CHANNEL *cp;
8848 uint32_t did;
8849
8850 did = sbp->did;
8851 ndlp = sbp->node;
8852 cp = (CHANNEL *)sbp->channel;
8853
8854 iocbq = &sbp->iocbq;
8855 iocb = &iocbq->iocb;
8856
8857 /* Make sure node is still active */
8858 if (!ndlp->nlp_active) {
8859 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8860 "*Node not found. did=%x", did);
8861
8862 return (FC_BADPACKET);
8863 }
8864
8865 /* If gate is closed */
8866 if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
8867 return (FC_TRAN_BUSY);
8868 }
8869
8870 iocb->ULPCOMMAND = CMD_FCP_TRSP64_CX;
8871 if (EMLXS_SLI_PREP_FCT_IOCB(port, sbp, cp->channelno) !=
8872 IOERR_SUCCESS) {
8873 return (FC_TRAN_BUSY);
8874 }
8875
8876 HBASTATS.FcpIssued++;
8877
8878 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
8879
8880 return (FC_SUCCESS);
8881
8882 } /* emlxs_send_fct_status() */
8883
8884
8885 static int32_t
8886 emlxs_send_fct_abort(emlxs_port_t *port, emlxs_buf_t *sbp)
8887 {
8888 emlxs_hba_t *hba = HBA;
8889 IOCBQ *iocbq;
8890 IOCB *iocb;
8891 NODELIST *ndlp;
8892 CHANNEL *cp;
8893 uint32_t did;
8894
8895 did = sbp->did;
8896 ndlp = sbp->node;
8897 cp = (CHANNEL *)sbp->channel;
8898
8899 iocbq = &sbp->iocbq;
8900 iocb = &iocbq->iocb;
8901
8902 /* Make sure node is still active */
8903 if ((ndlp == NULL) || (!ndlp->nlp_active)) {
8904 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8905 "*Node not found. did=%x", did);
8906
8907 return (FC_BADPACKET);
8908 }
8909
8910 /* If gate is closed */
8911 if (ndlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED) {
8912 return (FC_TRAN_BUSY);
8913 }
8914
8915 iocb->ULPCOMMAND = CMD_ABORT_XRI_CX;
8916 if (EMLXS_SLI_PREP_FCT_IOCB(port, sbp, cp->channelno) !=
8917 IOERR_SUCCESS) {
8918 return (FC_TRAN_BUSY);
8919 }
8920
8921 EMLXS_SLI_ISSUE_IOCB_CMD(hba, sbp->channel, iocbq);
8922
8923 return (FC_SUCCESS);
8924
8925 } /* emlxs_send_fct_abort() */
8926
8927 #endif /* SFCT_SUPPORT */
8928
8929
8930 static int32_t
8931 emlxs_send_ip(emlxs_port_t *port, emlxs_buf_t *sbp)
8932 {
8933 emlxs_hba_t *hba = HBA;
8934 fc_packet_t *pkt;
8935 IOCBQ *iocbq;
8936 IOCB *iocb;
8937 CHANNEL *cp;
8938 uint32_t i;
8939 NODELIST *ndlp;
8940 uint32_t did;
8941 int32_t rval;
8942
8943 pkt = PRIV2PKT(sbp);
8944 cp = &hba->chan[hba->channel_ip];
8945 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
8946
8947 /* Check if node exists */
8948 /* Broadcast did is always a success */
8949 ndlp = emlxs_node_find_did(port, did, 1);
8950
8951 if (!ndlp || !ndlp->nlp_active) {
8952 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
8953 "Node not found. did=0x%x", did);
8954
8955 return (FC_BADPACKET);
8956 }
8957
8958 /* Check if gate is temporarily closed */
8959 if (ndlp->nlp_flag[hba->channel_ip] & NLP_CLOSED) {
8960 return (FC_TRAN_BUSY);
8961 }
8962
8963 /* Check if an exchange has been created */
8964 if ((ndlp->nlp_Xri == 0) && (did != BCAST_DID)) {
8965 /* No exchange. Try creating one */
8966 (void) emlxs_create_xri(port, cp, ndlp);
8967
8968 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8969 "Adapter Busy. Exchange not found. did=0x%x", did);
8970
8971 return (FC_TRAN_BUSY);
8972 }
8973
8974 /* ULP PATCH: pkt_cmdlen was found to be set to zero */
8975 /* on BROADCAST commands */
8976 if (pkt->pkt_cmdlen == 0) {
8977 /* Set the pkt_cmdlen to the cookie size */
8978 #if (EMLXS_MODREV >= EMLXS_MODREV3)
8979 for (i = 0; i < pkt->pkt_cmd_cookie_cnt; i++) {
8980 pkt->pkt_cmdlen += pkt->pkt_cmd_cookie[i].dmac_size;
8981 }
8982 #else
8983 pkt->pkt_cmdlen = pkt->pkt_cmd_cookie.dmac_size;
8984 #endif /* >= EMLXS_MODREV3 */
8985
8986 }
8987
8988 iocbq = &sbp->iocbq;
8989 iocb = &iocbq->iocb;
8990
8991 iocbq->node = (void *)ndlp;
8992 if ((rval = EMLXS_SLI_PREP_IP_IOCB(port, sbp)) != FC_SUCCESS) {
8993
8994 if (rval == 0xff) {
8995 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
8996 rval = FC_SUCCESS;
8997 }
8998
8999 return (rval);
9000 }
9001
9002 cp->ulpSendCmd++;
9003
9004 /* Initalize sbp */
9005 mutex_enter(&sbp->mtx);
9006 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9007 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9008 sbp->node = (void *)ndlp;
9009 sbp->lun = EMLXS_LUN_NONE;
9010 sbp->class = iocb->ULPCLASS;
9011 sbp->did = did;
9012 mutex_exit(&sbp->mtx);
9013
9014 if (pkt->pkt_cmdlen) {
9015 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9016 DDI_DMA_SYNC_FORDEV);
9017 }
9018
9019 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9020
9021 return (FC_SUCCESS);
9022
9023 } /* emlxs_send_ip() */
9024
9025
9026 static int32_t
9027 emlxs_send_els(emlxs_port_t *port, emlxs_buf_t *sbp)
9028 {
9029 emlxs_hba_t *hba = HBA;
9030 emlxs_port_t *vport;
9031 fc_packet_t *pkt;
9032 IOCBQ *iocbq;
9033 CHANNEL *cp;
9034 SERV_PARM *sp;
9035 uint32_t cmd;
9036 int i;
9037 ELS_PKT *els_pkt;
9038 NODELIST *ndlp;
9039 uint32_t did;
9040 char fcsp_msg[32];
9041 int rc;
9042 int32_t rval;
9043 emlxs_config_t *cfg = &CFG;
9044
9045 fcsp_msg[0] = 0;
9046 pkt = PRIV2PKT(sbp);
9047 els_pkt = (ELS_PKT *)pkt->pkt_cmd;
9048 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
9049
9050 iocbq = &sbp->iocbq;
9051
9052 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9053 emlxs_swap_els_pkt(sbp);
9054 #endif /* EMLXS_MODREV2X */
9055
9056 cmd = *((uint32_t *)pkt->pkt_cmd);
9057 cmd &= ELS_CMD_MASK;
9058
9059 /* Point of no return, except for ADISC & PLOGI */
9060
9061 /* Check node */
9062 switch (cmd) {
9063 case ELS_CMD_FLOGI:
9064 case ELS_CMD_FDISC:
9065 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
9066
9067 if (emlxs_vpi_logi_notify(port, sbp)) {
9068 pkt->pkt_state = FC_PKT_LOCAL_RJT;
9069 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9070 emlxs_unswap_pkt(sbp);
9071 #endif /* EMLXS_MODREV2X */
9072 return (FC_FAILURE);
9073 }
9074 } else {
9075 /*
9076 * If FLOGI is already complete, then we
9077 * should not be receiving another FLOGI.
9078 * Reset the link to recover.
9079 */
9080 if (port->flag & EMLXS_PORT_FLOGI_CMPL) {
9081 pkt->pkt_state = FC_PKT_LOCAL_RJT;
9082 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9083 emlxs_unswap_pkt(sbp);
9084 #endif /* EMLXS_MODREV2X */
9085
9086 (void) emlxs_reset(port, FC_FCA_LINK_RESET);
9087 return (FC_FAILURE);
9088 }
9089
9090 if (port->vpi > 0) {
9091 *((uint32_t *)pkt->pkt_cmd) = ELS_CMD_FDISC;
9092 }
9093 }
9094
9095 /* Command may have been changed */
9096 cmd = *((uint32_t *)pkt->pkt_cmd);
9097 cmd &= ELS_CMD_MASK;
9098
9099 if (hba->flag & FC_NPIV_DELAY_REQUIRED) {
9100 sbp->pkt_flags |= PACKET_DELAY_REQUIRED;
9101 }
9102
9103 ndlp = NULL;
9104
9105 /* We will process these cmds at the bottom of this routine */
9106 break;
9107
9108 case ELS_CMD_PLOGI:
9109 /* Make sure we don't log into ourself */
9110 for (i = 0; i < MAX_VPORTS; i++) {
9111 vport = &VPORT(i);
9112
9113 if (!(vport->flag & EMLXS_INI_BOUND)) {
9114 continue;
9115 }
9116
9117 if (did == vport->did) {
9118 pkt->pkt_state = FC_PKT_NPORT_RJT;
9119
9120 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9121 emlxs_unswap_pkt(sbp);
9122 #endif /* EMLXS_MODREV2X */
9123
9124 return (FC_FAILURE);
9125 }
9126 }
9127
9128 ndlp = NULL;
9129
9130 if (hba->flag & FC_PT_TO_PT) {
9131 MAILBOXQ *mbox;
9132
9133 /* ULP bug fix */
9134 if (pkt->pkt_cmd_fhdr.s_id == 0) {
9135 pkt->pkt_cmd_fhdr.s_id = FP_DEFAULT_SID;
9136 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg,
9137 "PLOGI: P2P Fix. sid=0-->%x did=%x",
9138 pkt->pkt_cmd_fhdr.s_id,
9139 pkt->pkt_cmd_fhdr.d_id);
9140 }
9141
9142 mutex_enter(&EMLXS_PORT_LOCK);
9143 port->did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.s_id);
9144 port->rdid = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
9145 mutex_exit(&EMLXS_PORT_LOCK);
9146
9147 if (hba->sli_mode <= EMLXS_HBA_SLI3_MODE) {
9148 /* Update our service parms */
9149 if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba,
9150 MEM_MBOX))) {
9151 emlxs_mb_config_link(hba, mbox);
9152
9153 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba,
9154 mbox, MBX_NOWAIT, 0);
9155 if ((rc != MBX_BUSY) &&
9156 (rc != MBX_SUCCESS)) {
9157 emlxs_mem_put(hba, MEM_MBOX,
9158 (void *)mbox);
9159 }
9160 }
9161 }
9162 }
9163
9164 /* We will process these cmds at the bottom of this routine */
9165 break;
9166
9167 default:
9168 ndlp = emlxs_node_find_did(port, did, 1);
9169
9170 /* If an ADISC is being sent and we have no node, */
9171 /* then we must fail the ADISC now */
9172 if (!ndlp && (cmd == ELS_CMD_ADISC) &&
9173 (port->mode == MODE_INITIATOR)) {
9174
9175 /* Build the LS_RJT response */
9176 els_pkt = (ELS_PKT *)pkt->pkt_resp;
9177 els_pkt->elsCode = 0x01;
9178 els_pkt->un.lsRjt.un.b.lsRjtRsvd0 = 0;
9179 els_pkt->un.lsRjt.un.b.lsRjtRsnCode =
9180 LSRJT_LOGICAL_ERR;
9181 els_pkt->un.lsRjt.un.b.lsRjtRsnCodeExp =
9182 LSEXP_NOTHING_MORE;
9183 els_pkt->un.lsRjt.un.b.vendorUnique = 0x03;
9184
9185 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
9186 "ADISC Rejected. Node not found. did=0x%x", did);
9187
9188 if (sbp->channel == NULL) {
9189 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
9190 sbp->channel =
9191 &hba->chan[hba->channel_els];
9192 } else {
9193 sbp->channel =
9194 &hba->chan[FC_ELS_RING];
9195 }
9196 }
9197
9198 /* Return this as rejected by the target */
9199 emlxs_pkt_complete(sbp, IOSTAT_LS_RJT, 0, 1);
9200
9201 return (FC_SUCCESS);
9202 }
9203 }
9204
9205 /* DID == BCAST_DID is special case to indicate that */
9206 /* RPI is being passed in seq_id field */
9207 /* This is used by emlxs_send_logo() for target mode */
9208
9209 /* Initalize iocbq */
9210 iocbq->node = (void *)ndlp;
9211 if ((rval = EMLXS_SLI_PREP_ELS_IOCB(port, sbp)) != FC_SUCCESS) {
9212
9213 if (rval == 0xff) {
9214 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9215 rval = FC_SUCCESS;
9216 }
9217
9218 return (rval);
9219 }
9220
9221 cp = &hba->chan[hba->channel_els];
9222 cp->ulpSendCmd++;
9223 sp = (SERV_PARM *)&els_pkt->un.logi;
9224
9225 /* Check cmd */
9226 switch (cmd) {
9227 case ELS_CMD_PRLI:
9228 /*
9229 * if our firmware version is 3.20 or later,
9230 * set the following bits for FC-TAPE support.
9231 */
9232 if ((port->mode == MODE_INITIATOR) &&
9233 (hba->vpd.feaLevelHigh >= 0x02) &&
9234 (cfg[CFG_ADISC_SUPPORT].current != 0)) {
9235 els_pkt->un.prli.ConfmComplAllowed = 1;
9236 els_pkt->un.prli.Retry = 1;
9237 els_pkt->un.prli.TaskRetryIdReq = 1;
9238 } else {
9239 els_pkt->un.prli.ConfmComplAllowed = 0;
9240 els_pkt->un.prli.Retry = 0;
9241 els_pkt->un.prli.TaskRetryIdReq = 0;
9242 }
9243
9244 break;
9245
9246 /* This is a patch for the ULP stack. */
9247
9248 /*
9249 * ULP only reads our service parameters once during bind_port,
9250 * but the service parameters change due to topology.
9251 */
9252 case ELS_CMD_FLOGI:
9253 case ELS_CMD_FDISC:
9254 case ELS_CMD_PLOGI:
9255 case ELS_CMD_PDISC:
9256 /* Copy latest service parameters to payload */
9257 bcopy((void *) &port->sparam, (void *)sp, sizeof (SERV_PARM));
9258
9259 if ((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) {
9260
9261 /* Clear support for virtual fabrics */
9262 /* randomOffset bit controls this for FLOGI */
9263 sp->cmn.randomOffset = 0;
9264
9265 /* Set R_A_TOV to current value */
9266 sp->cmn.w2.r_a_tov =
9267 LE_SWAP32((hba->fc_ratov * 1000));
9268 }
9269
9270 if ((hba->flag & FC_NPIV_ENABLED) &&
9271 (hba->flag & FC_NPIV_SUPPORTED) &&
9272 (cmd == ELS_CMD_PLOGI)) {
9273 emlxs_vvl_fmt_t *vvl;
9274
9275 sp->VALID_VENDOR_VERSION = 1;
9276 vvl = (emlxs_vvl_fmt_t *)&sp->vendorVersion[0];
9277 vvl->un0.w0.oui = 0x0000C9;
9278 vvl->un0.word0 = LE_SWAP32(vvl->un0.word0);
9279 vvl->un1.w1.vport = (port->vpi > 0) ? 1 : 0;
9280 vvl->un1.word1 = LE_SWAP32(vvl->un1.word1);
9281 }
9282
9283 #ifdef DHCHAP_SUPPORT
9284 emlxs_dhc_init_sp(port, did, sp, (char **)&fcsp_msg);
9285 #endif /* DHCHAP_SUPPORT */
9286
9287 break;
9288 }
9289
9290 /* Initialize the sbp */
9291 mutex_enter(&sbp->mtx);
9292 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9293 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9294 sbp->node = (void *)ndlp;
9295 sbp->lun = EMLXS_LUN_NONE;
9296 sbp->did = did;
9297 mutex_exit(&sbp->mtx);
9298
9299 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_send_msg, "%s: sid=%x did=%x %s",
9300 emlxs_elscmd_xlate(cmd), port->did, did, fcsp_msg);
9301
9302 if (pkt->pkt_cmdlen) {
9303 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9304 DDI_DMA_SYNC_FORDEV);
9305 }
9306
9307 /* Check node */
9308 switch (cmd) {
9309 case ELS_CMD_FLOGI:
9310 case ELS_CMD_FDISC:
9311 if (port->mode == MODE_INITIATOR) {
9312 /* Make sure fabric node is destroyed */
9313 /* It should already have been destroyed at link down */
9314 if (hba->sli_mode != EMLXS_HBA_SLI4_MODE) {
9315 ndlp = emlxs_node_find_did(port, FABRIC_DID, 1);
9316 if (ndlp) {
9317 if (EMLXS_SLI_UNREG_NODE(port, ndlp,
9318 NULL, NULL, iocbq) == 0) {
9319 /* Deferring iocb tx until */
9320 /* completion of unreg */
9321 return (FC_SUCCESS);
9322 }
9323 }
9324 }
9325 }
9326 break;
9327
9328 case ELS_CMD_PLOGI:
9329
9330 ndlp = emlxs_node_find_did(port, did, 1);
9331
9332 if (ndlp && ndlp->nlp_active) {
9333 /* Close the node for any further normal IO */
9334 emlxs_node_close(port, ndlp, hba->channel_fcp,
9335 pkt->pkt_timeout + 10);
9336 emlxs_node_close(port, ndlp, hba->channel_ip,
9337 pkt->pkt_timeout + 10);
9338
9339 /* Flush tx queues */
9340 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
9341
9342 /* Flush chip queues */
9343 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
9344 }
9345
9346 break;
9347
9348 case ELS_CMD_PRLI:
9349
9350 ndlp = emlxs_node_find_did(port, did, 1);
9351
9352 if (ndlp && ndlp->nlp_active) {
9353 /*
9354 * Close the node for any further FCP IO;
9355 * Flush all outstanding I/O only if
9356 * "Establish Image Pair" bit is set.
9357 */
9358 emlxs_node_close(port, ndlp, hba->channel_fcp,
9359 pkt->pkt_timeout + 10);
9360
9361 if (els_pkt->un.prli.estabImagePair) {
9362 /* Flush tx queues */
9363 (void) emlxs_tx_node_flush(port, ndlp,
9364 &hba->chan[hba->channel_fcp], 0, 0);
9365
9366 /* Flush chip queues */
9367 (void) emlxs_chipq_node_flush(port,
9368 &hba->chan[hba->channel_fcp], ndlp, 0);
9369 }
9370 }
9371
9372 break;
9373
9374 }
9375
9376 HBASTATS.ElsCmdIssued++;
9377
9378 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9379
9380 return (FC_SUCCESS);
9381
9382 } /* emlxs_send_els() */
9383
9384
9385
9386
9387 static int32_t
9388 emlxs_send_els_rsp(emlxs_port_t *port, emlxs_buf_t *sbp)
9389 {
9390 emlxs_hba_t *hba = HBA;
9391 emlxs_config_t *cfg = &CFG;
9392 fc_packet_t *pkt;
9393 IOCBQ *iocbq;
9394 IOCB *iocb;
9395 NODELIST *ndlp;
9396 CHANNEL *cp;
9397 int i;
9398 uint32_t cmd;
9399 uint32_t ucmd;
9400 ELS_PKT *els_pkt;
9401 fc_unsol_buf_t *ubp;
9402 emlxs_ub_priv_t *ub_priv;
9403 uint32_t did;
9404 char fcsp_msg[32];
9405 uint8_t *ub_buffer;
9406 int32_t rval;
9407
9408 fcsp_msg[0] = 0;
9409 pkt = PRIV2PKT(sbp);
9410 els_pkt = (ELS_PKT *)pkt->pkt_cmd;
9411 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
9412
9413 iocbq = &sbp->iocbq;
9414 iocb = &iocbq->iocb;
9415
9416 /* Acquire the unsolicited command this pkt is replying to */
9417 if (pkt->pkt_cmd_fhdr.ox_id < EMLXS_UB_TOKEN_OFFSET) {
9418 /* This is for auto replies when no ub's are used */
9419 ucmd = pkt->pkt_cmd_fhdr.ox_id << ELS_CMD_SHIFT;
9420 ubp = NULL;
9421 ub_priv = NULL;
9422 ub_buffer = NULL;
9423
9424 #ifdef SFCT_SUPPORT
9425 if (sbp->fct_cmd) {
9426 fct_els_t *els =
9427 (fct_els_t *)sbp->fct_cmd->cmd_specific;
9428 ub_buffer = (uint8_t *)els->els_req_payload;
9429 }
9430 #endif /* SFCT_SUPPORT */
9431
9432 } else {
9433 /* Find the ub buffer that goes with this reply */
9434 if (!(ubp = emlxs_ub_find(port, pkt->pkt_cmd_fhdr.ox_id))) {
9435 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_error_msg,
9436 "ELS reply: Invalid oxid=%x",
9437 pkt->pkt_cmd_fhdr.ox_id);
9438 return (FC_BADPACKET);
9439 }
9440
9441 ub_buffer = (uint8_t *)ubp->ub_buffer;
9442 ub_priv = ubp->ub_fca_private;
9443 ucmd = ub_priv->cmd;
9444
9445 ub_priv->flags |= EMLXS_UB_REPLY;
9446
9447 /* Reset oxid to ELS command */
9448 /* We do this because the ub is only valid */
9449 /* until we return from this thread */
9450 pkt->pkt_cmd_fhdr.ox_id = (ucmd >> ELS_CMD_SHIFT) & 0xff;
9451 }
9452
9453 /* Save the result */
9454 sbp->ucmd = ucmd;
9455
9456 if (sbp->channel == NULL) {
9457 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
9458 sbp->channel = &hba->chan[hba->channel_els];
9459 } else {
9460 sbp->channel = &hba->chan[FC_ELS_RING];
9461 }
9462 }
9463
9464 /* Check for interceptions */
9465 switch (ucmd) {
9466
9467 #ifdef ULP_PATCH2
9468 case ELS_CMD_LOGO:
9469 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH2)) {
9470 break;
9471 }
9472
9473 /* Check if this was generated by ULP and not us */
9474 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
9475
9476 /*
9477 * Since we replied to this already,
9478 * we won't need to send this now
9479 */
9480 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
9481
9482 return (FC_SUCCESS);
9483 }
9484
9485 break;
9486 #endif /* ULP_PATCH2 */
9487
9488 #ifdef ULP_PATCH3
9489 case ELS_CMD_PRLI:
9490 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH3)) {
9491 break;
9492 }
9493
9494 /* Check if this was generated by ULP and not us */
9495 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
9496
9497 /*
9498 * Since we replied to this already,
9499 * we won't need to send this now
9500 */
9501 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
9502
9503 return (FC_SUCCESS);
9504 }
9505
9506 break;
9507 #endif /* ULP_PATCH3 */
9508
9509
9510 #ifdef ULP_PATCH4
9511 case ELS_CMD_PRLO:
9512 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH4)) {
9513 break;
9514 }
9515
9516 /* Check if this was generated by ULP and not us */
9517 if (!(sbp->pkt_flags & PACKET_ALLOCATED)) {
9518 /*
9519 * Since we replied to this already,
9520 * we won't need to send this now
9521 */
9522 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
9523
9524 return (FC_SUCCESS);
9525 }
9526
9527 break;
9528 #endif /* ULP_PATCH4 */
9529
9530 #ifdef ULP_PATCH6
9531 case ELS_CMD_RSCN:
9532 if (!(cfg[CFG_ENABLE_PATCH].current & ULP_PATCH6)) {
9533 break;
9534 }
9535
9536 /* Check if this RSCN was generated by us */
9537 if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) {
9538 cmd = *((uint32_t *)pkt->pkt_cmd);
9539 cmd = LE_SWAP32(cmd);
9540 cmd &= ELS_CMD_MASK;
9541
9542 /*
9543 * If ULP is accepting this,
9544 * then close affected node
9545 */
9546 if ((port->mode == MODE_INITIATOR) && ub_buffer &&
9547 cmd == ELS_CMD_ACC) {
9548 fc_rscn_t *rscn;
9549 uint32_t count;
9550 uint32_t *lp;
9551
9552 /*
9553 * Only the Leadville code path will
9554 * come thru here. The RSCN data is NOT
9555 * swapped properly for the Comstar code
9556 * path.
9557 */
9558 lp = (uint32_t *)ub_buffer;
9559 rscn = (fc_rscn_t *)lp++;
9560 count =
9561 ((rscn->rscn_payload_len - 4) / 4);
9562
9563 /* Close affected ports */
9564 for (i = 0; i < count; i++, lp++) {
9565 (void) emlxs_port_offline(port,
9566 *lp);
9567 }
9568 }
9569
9570 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
9571 "RSCN %s: did=%x oxid=%x rxid=%x. "
9572 "Intercepted.", emlxs_elscmd_xlate(cmd),
9573 did, pkt->pkt_cmd_fhdr.ox_id,
9574 pkt->pkt_cmd_fhdr.rx_id);
9575
9576 /*
9577 * Since we generated this RSCN,
9578 * we won't need to send this reply
9579 */
9580 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
9581
9582 return (FC_SUCCESS);
9583 }
9584
9585 break;
9586 #endif /* ULP_PATCH6 */
9587
9588 case ELS_CMD_PLOGI:
9589 /* Check if this PLOGI was generated by us */
9590 if (ub_priv && (ub_priv->flags & EMLXS_UB_INTERCEPT)) {
9591 cmd = *((uint32_t *)pkt->pkt_cmd);
9592 cmd = LE_SWAP32(cmd);
9593 cmd &= ELS_CMD_MASK;
9594
9595 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
9596 "PLOGI %s: did=%x oxid=%x rxid=%x. "
9597 "Intercepted.", emlxs_elscmd_xlate(cmd),
9598 did, pkt->pkt_cmd_fhdr.ox_id,
9599 pkt->pkt_cmd_fhdr.rx_id);
9600
9601 /*
9602 * Since we generated this PLOGI,
9603 * we won't need to send this reply
9604 */
9605 emlxs_pkt_complete(sbp, IOSTAT_SUCCESS, 0, 1);
9606
9607 return (FC_SUCCESS);
9608 }
9609
9610 break;
9611 }
9612
9613 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9614 emlxs_swap_els_pkt(sbp);
9615 #endif /* EMLXS_MODREV2X */
9616
9617
9618 cmd = *((uint32_t *)pkt->pkt_cmd);
9619 cmd &= ELS_CMD_MASK;
9620
9621 /* Check if modifications are needed */
9622 switch (ucmd) {
9623 case (ELS_CMD_PRLI):
9624
9625 if (cmd == ELS_CMD_ACC) {
9626 /* This is a patch for the ULP stack. */
9627 /* ULP does not keep track of FCP2 support */
9628 if ((port->mode == MODE_INITIATOR) &&
9629 (hba->vpd.feaLevelHigh >= 0x02) &&
9630 (cfg[CFG_ADISC_SUPPORT].current != 0)) {
9631 els_pkt->un.prli.ConfmComplAllowed = 1;
9632 els_pkt->un.prli.Retry = 1;
9633 els_pkt->un.prli.TaskRetryIdReq = 1;
9634 } else {
9635 els_pkt->un.prli.ConfmComplAllowed = 0;
9636 els_pkt->un.prli.Retry = 0;
9637 els_pkt->un.prli.TaskRetryIdReq = 0;
9638 }
9639 }
9640
9641 break;
9642
9643 case ELS_CMD_FLOGI:
9644 case ELS_CMD_FDISC:
9645 if (cmd == ELS_CMD_ACC) {
9646 SERV_PARM *sp = (SERV_PARM *)&els_pkt->un.logi;
9647
9648 /* This is a patch for the ULP stack. */
9649
9650 /*
9651 * ULP only reads our service parameters
9652 * once during bind_port, but the service
9653 * parameters change due to topology.
9654 */
9655
9656 /* Copy latest service parameters to payload */
9657 bcopy((void *)&port->sparam,
9658 (void *)sp, sizeof (SERV_PARM));
9659
9660 /* We are in pt-to-pt mode. Set R_A_TOV to default */
9661 sp->cmn.w2.r_a_tov =
9662 LE_SWAP32((FF_DEF_RATOV * 1000));
9663
9664 /* Clear support for virtual fabrics */
9665 /* randomOffset bit controls this for FLOGI */
9666 sp->cmn.randomOffset = 0;
9667 #ifdef DHCHAP_SUPPORT
9668 emlxs_dhc_init_sp(port, did, sp, (char **)&fcsp_msg);
9669 #endif /* DHCHAP_SUPPORT */
9670 }
9671 break;
9672
9673 case ELS_CMD_PLOGI:
9674 case ELS_CMD_PDISC:
9675 if (cmd == ELS_CMD_ACC) {
9676 SERV_PARM *sp = (SERV_PARM *)&els_pkt->un.logi;
9677
9678 /* This is a patch for the ULP stack. */
9679
9680 /*
9681 * ULP only reads our service parameters
9682 * once during bind_port, but the service
9683 * parameters change due to topology.
9684 */
9685
9686 /* Copy latest service parameters to payload */
9687 bcopy((void *)&port->sparam,
9688 (void *)sp, sizeof (SERV_PARM));
9689
9690 #ifdef DHCHAP_SUPPORT
9691 emlxs_dhc_init_sp(port, did, sp, (char **)&fcsp_msg);
9692 #endif /* DHCHAP_SUPPORT */
9693 }
9694 break;
9695
9696 }
9697
9698 /* Initalize iocbq */
9699 iocbq->node = (void *)NULL;
9700 if ((rval = EMLXS_SLI_PREP_ELS_IOCB(port, sbp)) != FC_SUCCESS) {
9701
9702 if (rval == 0xff) {
9703 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9704 rval = FC_SUCCESS;
9705 }
9706
9707 return (rval);
9708 }
9709
9710 cp = &hba->chan[hba->channel_els];
9711 cp->ulpSendCmd++;
9712
9713 /* Initalize sbp */
9714 mutex_enter(&sbp->mtx);
9715 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9716 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9717 sbp->node = (void *) NULL;
9718 sbp->lun = EMLXS_LUN_NONE;
9719 sbp->class = iocb->ULPCLASS;
9720 sbp->did = did;
9721 mutex_exit(&sbp->mtx);
9722
9723 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_els_reply_msg,
9724 "%s %s: did=%x oxid=%x rxid=%x %s", emlxs_elscmd_xlate(ucmd),
9725 emlxs_elscmd_xlate(cmd), did, pkt->pkt_cmd_fhdr.ox_id,
9726 pkt->pkt_cmd_fhdr.rx_id, fcsp_msg);
9727
9728 /* Process nodes */
9729 switch (ucmd) {
9730 case ELS_CMD_RSCN:
9731 if ((port->mode == MODE_INITIATOR) && ub_buffer &&
9732 cmd == ELS_CMD_ACC) {
9733 fc_rscn_t *rscn;
9734 uint32_t count;
9735 uint32_t *lp = NULL;
9736
9737 /*
9738 * Only the Leadville code path will come thru
9739 * here. The RSCN data is NOT swapped properly
9740 * for the Comstar code path.
9741 */
9742 lp = (uint32_t *)ub_buffer;
9743 rscn = (fc_rscn_t *)lp++;
9744 count = ((rscn->rscn_payload_len - 4) / 4);
9745
9746 /* Close affected ports */
9747 for (i = 0; i < count; i++, lp++) {
9748 (void) emlxs_port_offline(port, *lp);
9749 }
9750 }
9751 break;
9752
9753 case ELS_CMD_PLOGI:
9754 if (cmd == ELS_CMD_ACC) {
9755 ndlp = emlxs_node_find_did(port, did, 1);
9756
9757 if (ndlp && ndlp->nlp_active) {
9758 /* Close the node for any further normal IO */
9759 emlxs_node_close(port, ndlp, hba->channel_fcp,
9760 pkt->pkt_timeout + 10);
9761 emlxs_node_close(port, ndlp, hba->channel_ip,
9762 pkt->pkt_timeout + 10);
9763
9764 /* Flush tx queue */
9765 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
9766
9767 /* Flush chip queue */
9768 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
9769 }
9770 }
9771 break;
9772
9773 case ELS_CMD_PRLI:
9774 if (cmd == ELS_CMD_ACC) {
9775 ndlp = emlxs_node_find_did(port, did, 1);
9776
9777 if (ndlp && ndlp->nlp_active) {
9778 /* Close the node for any further normal IO */
9779 emlxs_node_close(port, ndlp, hba->channel_fcp,
9780 pkt->pkt_timeout + 10);
9781
9782 /* Flush tx queues */
9783 (void) emlxs_tx_node_flush(port, ndlp,
9784 &hba->chan[hba->channel_fcp], 0, 0);
9785
9786 /* Flush chip queues */
9787 (void) emlxs_chipq_node_flush(port,
9788 &hba->chan[hba->channel_fcp], ndlp, 0);
9789 }
9790 }
9791 break;
9792
9793 case ELS_CMD_PRLO:
9794 if (cmd == ELS_CMD_ACC) {
9795 ndlp = emlxs_node_find_did(port, did, 1);
9796
9797 if (ndlp && ndlp->nlp_active) {
9798 /* Close the node for any further normal IO */
9799 emlxs_node_close(port, ndlp,
9800 hba->channel_fcp, 60);
9801
9802 /* Flush tx queues */
9803 (void) emlxs_tx_node_flush(port, ndlp,
9804 &hba->chan[hba->channel_fcp], 0, 0);
9805
9806 /* Flush chip queues */
9807 (void) emlxs_chipq_node_flush(port,
9808 &hba->chan[hba->channel_fcp], ndlp, 0);
9809 }
9810 }
9811
9812 break;
9813
9814 case ELS_CMD_LOGO:
9815 if (cmd == ELS_CMD_ACC) {
9816 ndlp = emlxs_node_find_did(port, did, 1);
9817
9818 if (ndlp && ndlp->nlp_active) {
9819 /* Close the node for any further normal IO */
9820 emlxs_node_close(port, ndlp,
9821 hba->channel_fcp, 60);
9822 emlxs_node_close(port, ndlp,
9823 hba->channel_ip, 60);
9824
9825 /* Flush tx queues */
9826 (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0);
9827
9828 /* Flush chip queues */
9829 (void) emlxs_chipq_node_flush(port, 0, ndlp, 0);
9830 }
9831 }
9832
9833 break;
9834 }
9835
9836 if (pkt->pkt_cmdlen) {
9837 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9838 DDI_DMA_SYNC_FORDEV);
9839 }
9840
9841 HBASTATS.ElsRspIssued++;
9842
9843 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9844
9845 return (FC_SUCCESS);
9846
9847 } /* emlxs_send_els_rsp() */
9848
9849
9850 #ifdef MENLO_SUPPORT
9851 static int32_t
9852 emlxs_send_menlo(emlxs_port_t *port, emlxs_buf_t *sbp)
9853 {
9854 emlxs_hba_t *hba = HBA;
9855 fc_packet_t *pkt;
9856 IOCBQ *iocbq;
9857 IOCB *iocb;
9858 CHANNEL *cp;
9859 NODELIST *ndlp;
9860 uint32_t did;
9861 uint32_t *lp;
9862 int32_t rval;
9863
9864 pkt = PRIV2PKT(sbp);
9865 did = EMLXS_MENLO_DID;
9866 lp = (uint32_t *)pkt->pkt_cmd;
9867
9868 iocbq = &sbp->iocbq;
9869 iocb = &iocbq->iocb;
9870
9871 ndlp = emlxs_node_find_did(port, did, 1);
9872
9873 if (!ndlp || !ndlp->nlp_active) {
9874 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
9875 "Node not found. did=0x%x", did);
9876
9877 return (FC_BADPACKET);
9878 }
9879
9880 iocbq->node = (void *) ndlp;
9881 if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
9882
9883 if (rval == 0xff) {
9884 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9885 rval = FC_SUCCESS;
9886 }
9887
9888 return (rval);
9889 }
9890
9891 cp = &hba->chan[hba->channel_ct];
9892 cp->ulpSendCmd++;
9893
9894 if (pkt->pkt_tran_type == FC_PKT_EXCHANGE) {
9895 /* Cmd phase */
9896
9897 /* Initalize iocb */
9898 iocb->un.genreq64.param = pkt->pkt_cmd_fhdr.d_id;
9899 iocb->ULPCONTEXT = 0;
9900 iocb->ULPPU = 3;
9901
9902 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9903 "%s: [%08x,%08x,%08x,%08x]",
9904 emlxs_menlo_cmd_xlate(BE_SWAP32(lp[0])), BE_SWAP32(lp[1]),
9905 BE_SWAP32(lp[2]), BE_SWAP32(lp[3]), BE_SWAP32(lp[4]));
9906
9907 } else { /* FC_PKT_OUTBOUND */
9908
9909 /* MENLO_CMD_FW_DOWNLOAD Data Phase */
9910 iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CX;
9911
9912 /* Initalize iocb */
9913 iocb->un.genreq64.param = 0;
9914 iocb->ULPCONTEXT = pkt->pkt_cmd_fhdr.rx_id;
9915 iocb->ULPPU = 1;
9916
9917 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
9918 "%s: Data: rxid=0x%x size=%d",
9919 emlxs_menlo_cmd_xlate(MENLO_CMD_FW_DOWNLOAD),
9920 pkt->pkt_cmd_fhdr.rx_id, pkt->pkt_cmdlen);
9921 }
9922
9923 /* Initalize sbp */
9924 mutex_enter(&sbp->mtx);
9925 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9926 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9927 sbp->node = (void *) ndlp;
9928 sbp->lun = EMLXS_LUN_NONE;
9929 sbp->class = iocb->ULPCLASS;
9930 sbp->did = did;
9931 mutex_exit(&sbp->mtx);
9932
9933 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
9934 DDI_DMA_SYNC_FORDEV);
9935
9936 HBASTATS.CtCmdIssued++;
9937
9938 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
9939
9940 return (FC_SUCCESS);
9941
9942 } /* emlxs_send_menlo() */
9943 #endif /* MENLO_SUPPORT */
9944
9945
9946 static int32_t
9947 emlxs_send_ct(emlxs_port_t *port, emlxs_buf_t *sbp)
9948 {
9949 emlxs_hba_t *hba = HBA;
9950 fc_packet_t *pkt;
9951 IOCBQ *iocbq;
9952 IOCB *iocb;
9953 NODELIST *ndlp;
9954 uint32_t did;
9955 CHANNEL *cp;
9956 int32_t rval;
9957
9958 pkt = PRIV2PKT(sbp);
9959 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
9960
9961 iocbq = &sbp->iocbq;
9962 iocb = &iocbq->iocb;
9963
9964 ndlp = emlxs_node_find_did(port, did, 1);
9965
9966 if (!ndlp || !ndlp->nlp_active) {
9967 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
9968 "Node not found. did=0x%x", did);
9969
9970 return (FC_BADPACKET);
9971 }
9972
9973 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
9974 emlxs_swap_ct_pkt(sbp);
9975 #endif /* EMLXS_MODREV2X */
9976
9977 iocbq->node = (void *)ndlp;
9978 if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
9979
9980 if (rval == 0xff) {
9981 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
9982 rval = FC_SUCCESS;
9983 }
9984
9985 return (rval);
9986 }
9987
9988 cp = &hba->chan[hba->channel_ct];
9989 cp->ulpSendCmd++;
9990
9991 /* Initalize sbp */
9992 mutex_enter(&sbp->mtx);
9993 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
9994 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
9995 sbp->node = (void *)ndlp;
9996 sbp->lun = EMLXS_LUN_NONE;
9997 sbp->class = iocb->ULPCLASS;
9998 sbp->did = did;
9999 mutex_exit(&sbp->mtx);
10000
10001 if (did == NAMESERVER_DID) {
10002 SLI_CT_REQUEST *CtCmd;
10003 uint32_t *lp0;
10004
10005 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
10006 lp0 = (uint32_t *)pkt->pkt_cmd;
10007
10008 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
10009 "%s: did=%x [%08x,%08x]",
10010 emlxs_ctcmd_xlate(
10011 LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
10012 did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
10013
10014 if (hba->flag & FC_NPIV_DELAY_REQUIRED) {
10015 sbp->pkt_flags |= PACKET_DELAY_REQUIRED;
10016 }
10017
10018 } else if (did == FDMI_DID) {
10019 SLI_CT_REQUEST *CtCmd;
10020 uint32_t *lp0;
10021
10022 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
10023 lp0 = (uint32_t *)pkt->pkt_cmd;
10024
10025 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
10026 "%s: did=%x [%08x,%08x]",
10027 emlxs_mscmd_xlate(
10028 LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
10029 did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
10030 } else {
10031 SLI_CT_REQUEST *CtCmd;
10032 uint32_t *lp0;
10033
10034 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
10035 lp0 = (uint32_t *)pkt->pkt_cmd;
10036
10037 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_send_msg,
10038 "%s: did=%x [%08x,%08x]",
10039 emlxs_rmcmd_xlate(
10040 LE_SWAP16(CtCmd->CommandResponse.bits.CmdRsp)),
10041 did, LE_SWAP32(lp0[4]), LE_SWAP32(lp0[5]));
10042 }
10043
10044 if (pkt->pkt_cmdlen) {
10045 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
10046 DDI_DMA_SYNC_FORDEV);
10047 }
10048
10049 HBASTATS.CtCmdIssued++;
10050
10051 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
10052
10053 return (FC_SUCCESS);
10054
10055 } /* emlxs_send_ct() */
10056
10057
10058 static int32_t
10059 emlxs_send_ct_rsp(emlxs_port_t *port, emlxs_buf_t *sbp)
10060 {
10061 emlxs_hba_t *hba = HBA;
10062 fc_packet_t *pkt;
10063 CHANNEL *cp;
10064 IOCBQ *iocbq;
10065 IOCB *iocb;
10066 uint32_t *cmd;
10067 SLI_CT_REQUEST *CtCmd;
10068 int32_t rval;
10069
10070 pkt = PRIV2PKT(sbp);
10071 CtCmd = (SLI_CT_REQUEST *)pkt->pkt_cmd;
10072 cmd = (uint32_t *)pkt->pkt_cmd;
10073
10074 iocbq = &sbp->iocbq;
10075 iocb = &iocbq->iocb;
10076
10077 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
10078 emlxs_swap_ct_pkt(sbp);
10079 #endif /* EMLXS_MODREV2X */
10080
10081 if ((rval = EMLXS_SLI_PREP_CT_IOCB(port, sbp)) != FC_SUCCESS) {
10082
10083 if (rval == 0xff) {
10084 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, 0, 1);
10085 rval = FC_SUCCESS;
10086 }
10087
10088 return (rval);
10089 }
10090
10091 cp = &hba->chan[hba->channel_ct];
10092 cp->ulpSendCmd++;
10093
10094 /* Initalize sbp */
10095 mutex_enter(&sbp->mtx);
10096 sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
10097 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
10098 sbp->node = NULL;
10099 sbp->lun = EMLXS_LUN_NONE;
10100 sbp->class = iocb->ULPCLASS;
10101 mutex_exit(&sbp->mtx);
10102
10103 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ct_reply_msg,
10104 "%s: Rsn=%x Exp=%x [%08x,%08x] rxid=%x ",
10105 emlxs_rmcmd_xlate(LE_SWAP16(
10106 CtCmd->CommandResponse.bits.CmdRsp)),
10107 CtCmd->ReasonCode, CtCmd->Explanation,
10108 LE_SWAP32(cmd[4]), LE_SWAP32(cmd[5]),
10109 pkt->pkt_cmd_fhdr.rx_id);
10110
10111 if (pkt->pkt_cmdlen) {
10112 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
10113 DDI_DMA_SYNC_FORDEV);
10114 }
10115
10116 HBASTATS.CtRspIssued++;
10117
10118 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
10119
10120 return (FC_SUCCESS);
10121
10122 } /* emlxs_send_ct_rsp() */
10123
10124
10125 /*
10126 * emlxs_get_instance()
10127 * Given a ddi ddiinst, return a Fibre Channel (emlx) ddiinst.
10128 */
10129 extern uint32_t
10130 emlxs_get_instance(int32_t ddiinst)
10131 {
10132 uint32_t i;
10133 uint32_t inst;
10134
10135 mutex_enter(&emlxs_device.lock);
10136
10137 inst = MAX_FC_BRDS;
10138 for (i = 0; i < emlxs_instance_count; i++) {
10139 if (emlxs_instance[i] == ddiinst) {
10140 inst = i;
10141 break;
10142 }
10143 }
10144
10145 mutex_exit(&emlxs_device.lock);
10146
10147 return (inst);
10148
10149 } /* emlxs_get_instance() */
10150
10151
10152 /*
10153 * emlxs_add_instance()
10154 * Given a ddi ddiinst, create a Fibre Channel (emlx) ddiinst.
10155 * emlx ddiinsts are the order that emlxs_attach gets called, starting at 0.
10156 */
10157 static uint32_t
10158 emlxs_add_instance(int32_t ddiinst)
10159 {
10160 uint32_t i;
10161
10162 mutex_enter(&emlxs_device.lock);
10163
10164 /* First see if the ddiinst already exists */
10165 for (i = 0; i < emlxs_instance_count; i++) {
10166 if (emlxs_instance[i] == ddiinst) {
10167 break;
10168 }
10169 }
10170
10171 /* If it doesn't already exist, add it */
10172 if (i >= emlxs_instance_count) {
10173 if ((i = emlxs_instance_count) < MAX_FC_BRDS) {
10174 emlxs_instance[i] = ddiinst;
10175 emlxs_instance_count++;
10176 emlxs_device.hba_count = emlxs_instance_count;
10177 }
10178 }
10179
10180 mutex_exit(&emlxs_device.lock);
10181
10182 return (i);
10183
10184 } /* emlxs_add_instance() */
10185
10186
10187 /*ARGSUSED*/
10188 extern void
10189 emlxs_pkt_complete(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat,
10190 uint32_t doneq)
10191 {
10192 emlxs_hba_t *hba;
10193 emlxs_port_t *port;
10194 emlxs_buf_t *fpkt;
10195
10196 port = sbp->port;
10197
10198 if (!port) {
10199 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_completion_error_msg,
10200 "NULL port found. sbp=%p flags=%x", sbp, sbp->pkt_flags);
10201
10202 return;
10203 }
10204
10205 hba = HBA;
10206
10207 if ((hba->sli_mode == EMLXS_HBA_SLI4_MODE) &&
10208 (sbp->iotag)) {
10209 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
10210 "WARNING: Completing IO with iotag. sbp=%p iotag=%d "
10211 "xri_flags=%x",
10212 sbp, sbp->iotag, ((sbp->xrip)? sbp->xrip->flag:0));
10213
10214 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
10215 }
10216
10217 mutex_enter(&sbp->mtx);
10218
10219 /* Check for error conditions */
10220 if (sbp->pkt_flags & (PACKET_ULP_OWNED | PACKET_COMPLETED |
10221 PACKET_IN_DONEQ | PACKET_IN_COMPLETION |
10222 PACKET_IN_TXQ | PACKET_IN_CHIPQ)) {
10223 if (sbp->pkt_flags & PACKET_ULP_OWNED) {
10224 EMLXS_MSGF(EMLXS_CONTEXT,
10225 &emlxs_pkt_completion_error_msg,
10226 "Packet already returned. sbp=%p flags=%x", sbp,
10227 sbp->pkt_flags);
10228 }
10229
10230 else if (sbp->pkt_flags & PACKET_COMPLETED) {
10231 EMLXS_MSGF(EMLXS_CONTEXT,
10232 &emlxs_pkt_completion_error_msg,
10233 "Packet already completed. sbp=%p flags=%x", sbp,
10234 sbp->pkt_flags);
10235 }
10236
10237 else if (sbp->pkt_flags & PACKET_IN_DONEQ) {
10238 EMLXS_MSGF(EMLXS_CONTEXT,
10239 &emlxs_pkt_completion_error_msg,
10240 "Pkt already on done queue. sbp=%p flags=%x", sbp,
10241 sbp->pkt_flags);
10242 }
10243
10244 else if (sbp->pkt_flags & PACKET_IN_COMPLETION) {
10245 EMLXS_MSGF(EMLXS_CONTEXT,
10246 &emlxs_pkt_completion_error_msg,
10247 "Packet already in completion. sbp=%p flags=%x",
10248 sbp, sbp->pkt_flags);
10249 }
10250
10251 else if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
10252 EMLXS_MSGF(EMLXS_CONTEXT,
10253 &emlxs_pkt_completion_error_msg,
10254 "Packet still on chip queue. sbp=%p flags=%x",
10255 sbp, sbp->pkt_flags);
10256 }
10257
10258 else if (sbp->pkt_flags & PACKET_IN_TXQ) {
10259 EMLXS_MSGF(EMLXS_CONTEXT,
10260 &emlxs_pkt_completion_error_msg,
10261 "Packet still on tx queue. sbp=%p flags=%x", sbp,
10262 sbp->pkt_flags);
10263 }
10264
10265 mutex_exit(&sbp->mtx);
10266 return;
10267 }
10268
10269 /* Packet is now in completion */
10270 sbp->pkt_flags |= PACKET_IN_COMPLETION;
10271
10272 /* Set the state if not already set */
10273 if (!(sbp->pkt_flags & PACKET_STATE_VALID)) {
10274 emlxs_set_pkt_state(sbp, iostat, localstat, 0);
10275 }
10276
10277 /* Check for parent flush packet */
10278 /* If pkt has a parent flush packet then adjust its count now */
10279 fpkt = sbp->fpkt;
10280 if (fpkt) {
10281 /*
10282 * We will try to NULL sbp->fpkt inside the
10283 * fpkt's mutex if possible
10284 */
10285
10286 if (!(fpkt->pkt_flags & PACKET_ULP_OWNED)) {
10287 mutex_enter(&fpkt->mtx);
10288 if (fpkt->flush_count) {
10289 fpkt->flush_count--;
10290 }
10291 sbp->fpkt = NULL;
10292 mutex_exit(&fpkt->mtx);
10293 } else { /* fpkt has been returned already */
10294
10295 sbp->fpkt = NULL;
10296 }
10297 }
10298
10299 /* If pkt is polled, then wake up sleeping thread */
10300 if (sbp->pkt_flags & PACKET_POLLED) {
10301 /* Don't set the PACKET_ULP_OWNED flag here */
10302 /* because the polling thread will do it */
10303 sbp->pkt_flags |= PACKET_COMPLETED;
10304 mutex_exit(&sbp->mtx);
10305
10306 /* Wake up sleeping thread */
10307 mutex_enter(&EMLXS_PKT_LOCK);
10308 cv_broadcast(&EMLXS_PKT_CV);
10309 mutex_exit(&EMLXS_PKT_LOCK);
10310 }
10311
10312 /* If packet was generated by our driver, */
10313 /* then complete it immediately */
10314 else if (sbp->pkt_flags & PACKET_ALLOCATED) {
10315 mutex_exit(&sbp->mtx);
10316
10317 emlxs_iodone(sbp);
10318 }
10319
10320 /* Put the pkt on the done queue for callback */
10321 /* completion in another thread */
10322 else {
10323 sbp->pkt_flags |= PACKET_IN_DONEQ;
10324 sbp->next = NULL;
10325 mutex_exit(&sbp->mtx);
10326
10327 /* Put pkt on doneq, so I/O's will be completed in order */
10328 mutex_enter(&EMLXS_PORT_LOCK);
10329 if (hba->iodone_tail == NULL) {
10330 hba->iodone_list = sbp;
10331 hba->iodone_count = 1;
10332 } else {
10333 hba->iodone_tail->next = sbp;
10334 hba->iodone_count++;
10335 }
10336 hba->iodone_tail = sbp;
10337 mutex_exit(&EMLXS_PORT_LOCK);
10338
10339 /* Trigger a thread to service the doneq */
10340 emlxs_thread_trigger1(&hba->iodone_thread,
10341 emlxs_iodone_server);
10342 }
10343
10344 return;
10345
10346 } /* emlxs_pkt_complete() */
10347
10348
10349 #ifdef SAN_DIAG_SUPPORT
10350 /*
10351 * This routine is called with EMLXS_PORT_LOCK held so we can just increment
10352 * normally. Don't have to use atomic operations.
10353 */
10354 extern void
10355 emlxs_update_sd_bucket(emlxs_buf_t *sbp)
10356 {
10357 emlxs_port_t *vport;
10358 fc_packet_t *pkt;
10359 uint32_t did;
10360 hrtime_t t;
10361 hrtime_t delta_time;
10362 int i;
10363 NODELIST *ndlp;
10364
10365 vport = sbp->port;
10366
10367 if ((emlxs_sd_bucket.search_type == 0) ||
10368 (vport->sd_io_latency_state != SD_COLLECTING)) {
10369 return;
10370 }
10371
10372 /* Compute the iolatency time in microseconds */
10373 t = gethrtime();
10374 delta_time = t - sbp->sd_start_time;
10375 pkt = PRIV2PKT(sbp);
10376 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
10377 ndlp = emlxs_node_find_did(vport, did, 1);
10378
10379 if (!ndlp) {
10380 return;
10381 }
10382
10383 if (delta_time >=
10384 emlxs_sd_bucket.values[SD_IO_LATENCY_MAX_BUCKETS - 1]) {
10385 ndlp->sd_dev_bucket[SD_IO_LATENCY_MAX_BUCKETS - 1].
10386 count++;
10387 } else if (delta_time <= emlxs_sd_bucket.values[0]) {
10388 ndlp->sd_dev_bucket[0].count++;
10389 } else {
10390 for (i = 1; i < SD_IO_LATENCY_MAX_BUCKETS; i++) {
10391 if ((delta_time > emlxs_sd_bucket.values[i-1]) &&
10392 (delta_time <= emlxs_sd_bucket.values[i])) {
10393 ndlp->sd_dev_bucket[i].count++;
10394 break;
10395 }
10396 }
10397 }
10398
10399 return;
10400
10401 } /* emlxs_update_sd_bucket() */
10402 #endif /* SAN_DIAG_SUPPORT */
10403
10404 /*ARGSUSED*/
10405 static void
10406 emlxs_iodone_server(void *arg1, void *arg2, void *arg3)
10407 {
10408 emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
10409 emlxs_buf_t *sbp;
10410
10411 mutex_enter(&EMLXS_PORT_LOCK);
10412
10413 /* Remove one pkt from the doneq head and complete it */
10414 while ((sbp = hba->iodone_list) != NULL) {
10415 if ((hba->iodone_list = sbp->next) == NULL) {
10416 hba->iodone_tail = NULL;
10417 hba->iodone_count = 0;
10418 } else {
10419 hba->iodone_count--;
10420 }
10421
10422 mutex_exit(&EMLXS_PORT_LOCK);
10423
10424 /* Prepare the pkt for completion */
10425 mutex_enter(&sbp->mtx);
10426 sbp->next = NULL;
10427 sbp->pkt_flags &= ~PACKET_IN_DONEQ;
10428 mutex_exit(&sbp->mtx);
10429
10430 /* Complete the IO now */
10431 emlxs_iodone(sbp);
10432
10433 /* Reacquire lock and check if more work is to be done */
10434 mutex_enter(&EMLXS_PORT_LOCK);
10435 }
10436
10437 mutex_exit(&EMLXS_PORT_LOCK);
10438
10439 #ifdef FMA_SUPPORT
10440 if (hba->flag & FC_DMA_CHECK_ERROR) {
10441 emlxs_thread_spawn(hba, emlxs_restart_thread,
10442 NULL, NULL);
10443 }
10444 #endif /* FMA_SUPPORT */
10445
10446 return;
10447
10448 } /* End emlxs_iodone_server */
10449
10450
10451 static void
10452 emlxs_iodone(emlxs_buf_t *sbp)
10453 {
10454 #ifdef FMA_SUPPORT
10455 emlxs_port_t *port = sbp->port;
10456 emlxs_hba_t *hba = port->hba;
10457 #endif /* FMA_SUPPORT */
10458
10459 fc_packet_t *pkt;
10460 CHANNEL *cp;
10461
10462 pkt = PRIV2PKT(sbp);
10463
10464 /* Check one more time that the pkt has not already been returned */
10465 if (sbp->pkt_flags & PACKET_ULP_OWNED) {
10466 return;
10467 }
10468
10469 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
10470 emlxs_unswap_pkt(sbp);
10471 #endif /* EMLXS_MODREV2X */
10472
10473 mutex_enter(&sbp->mtx);
10474 sbp->pkt_flags |= (PACKET_COMPLETED | PACKET_ULP_OWNED);
10475 mutex_exit(&sbp->mtx);
10476
10477 if (pkt->pkt_comp) {
10478 #ifdef FMA_SUPPORT
10479 emlxs_check_dma(hba, sbp);
10480 #endif /* FMA_SUPPORT */
10481
10482 if (sbp->channel) {
10483 cp = (CHANNEL *)sbp->channel;
10484 cp->ulpCmplCmd++;
10485 }
10486
10487 (*pkt->pkt_comp) (pkt);
10488 }
10489
10490 return;
10491
10492 } /* emlxs_iodone() */
10493
10494
10495
10496 extern fc_unsol_buf_t *
10497 emlxs_ub_find(emlxs_port_t *port, uint32_t token)
10498 {
10499 emlxs_unsol_buf_t *pool;
10500 fc_unsol_buf_t *ubp;
10501 emlxs_ub_priv_t *ub_priv;
10502
10503 /* Check if this is a valid ub token */
10504 if (token < EMLXS_UB_TOKEN_OFFSET) {
10505 return (NULL);
10506 }
10507
10508 mutex_enter(&EMLXS_UB_LOCK);
10509
10510 pool = port->ub_pool;
10511 while (pool) {
10512 /* Find a pool with the proper token range */
10513 if (token >= pool->pool_first_token &&
10514 token <= pool->pool_last_token) {
10515 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[(token -
10516 pool->pool_first_token)];
10517 ub_priv = ubp->ub_fca_private;
10518
10519 if (ub_priv->token != token) {
10520 EMLXS_MSGF(EMLXS_CONTEXT,
10521 &emlxs_sfs_debug_msg,
10522 "ub_find: Invalid token=%x", ubp, token,
10523 ub_priv->token);
10524
10525 ubp = NULL;
10526 }
10527
10528 else if (!(ub_priv->flags & EMLXS_UB_IN_USE)) {
10529 EMLXS_MSGF(EMLXS_CONTEXT,
10530 &emlxs_sfs_debug_msg,
10531 "ub_find: Buffer not in use. buffer=%p "
10532 "token=%x", ubp, token);
10533
10534 ubp = NULL;
10535 }
10536
10537 mutex_exit(&EMLXS_UB_LOCK);
10538
10539 return (ubp);
10540 }
10541
10542 pool = pool->pool_next;
10543 }
10544
10545 mutex_exit(&EMLXS_UB_LOCK);
10546
10547 return (NULL);
10548
10549 } /* emlxs_ub_find() */
10550
10551
10552
10553 extern fc_unsol_buf_t *
10554 emlxs_ub_get(emlxs_port_t *port, uint32_t size, uint32_t type,
10555 uint32_t reserve)
10556 {
10557 emlxs_hba_t *hba = HBA;
10558 emlxs_unsol_buf_t *pool;
10559 fc_unsol_buf_t *ubp;
10560 emlxs_ub_priv_t *ub_priv;
10561 uint32_t i;
10562 uint32_t resv_flag;
10563 uint32_t pool_free;
10564 uint32_t pool_free_resv;
10565
10566 mutex_enter(&EMLXS_UB_LOCK);
10567
10568 pool = port->ub_pool;
10569 while (pool) {
10570 /* Find a pool of the appropriate type and size */
10571 if ((pool->pool_available == 0) ||
10572 (pool->pool_type != type) ||
10573 (pool->pool_buf_size < size)) {
10574 goto next_pool;
10575 }
10576
10577
10578 /* Adjust free counts based on availablity */
10579 /* The free reserve count gets first priority */
10580 pool_free_resv =
10581 min(pool->pool_free_resv, pool->pool_available);
10582 pool_free =
10583 min(pool->pool_free,
10584 (pool->pool_available - pool_free_resv));
10585
10586 /* Initialize reserve flag */
10587 resv_flag = reserve;
10588
10589 if (resv_flag) {
10590 if (pool_free_resv == 0) {
10591 if (pool_free == 0) {
10592 goto next_pool;
10593 }
10594 resv_flag = 0;
10595 }
10596 } else if (pool_free == 0) {
10597 goto next_pool;
10598 }
10599
10600 /* Find next available free buffer in this pool */
10601 for (i = 0; i < pool->pool_nentries; i++) {
10602 ubp = (fc_unsol_buf_t *)&pool->fc_ubufs[i];
10603 ub_priv = ubp->ub_fca_private;
10604
10605 if (!ub_priv->available ||
10606 ub_priv->flags != EMLXS_UB_FREE) {
10607 continue;
10608 }
10609
10610 ub_priv->time = hba->timer_tics;
10611
10612 /* Timeout in 5 minutes */
10613 ub_priv->timeout = (5 * 60);
10614
10615 ub_priv->flags = EMLXS_UB_IN_USE;
10616
10617 /* Alloc the buffer from the pool */
10618 if (resv_flag) {
10619 ub_priv->flags |= EMLXS_UB_RESV;
10620 pool->pool_free_resv--;
10621 } else {
10622 pool->pool_free--;
10623 }
10624
10625 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_detail_msg,
10626 "ub_get: ubp=%p token=%x (%d,%d,%d,%d)", ubp,
10627 ub_priv->token, pool->pool_nentries,
10628 pool->pool_available, pool->pool_free,
10629 pool->pool_free_resv);
10630
10631 mutex_exit(&EMLXS_UB_LOCK);
10632
10633 return (ubp);
10634 }
10635 next_pool:
10636
10637 pool = pool->pool_next;
10638 }
10639
10640 mutex_exit(&EMLXS_UB_LOCK);
10641
10642 return (NULL);
10643
10644 } /* emlxs_ub_get() */
10645
10646
10647
10648 extern void
10649 emlxs_set_pkt_state(emlxs_buf_t *sbp, uint32_t iostat, uint8_t localstat,
10650 uint32_t lock)
10651 {
10652 fc_packet_t *pkt;
10653 fcp_rsp_t *fcp_rsp;
10654 uint32_t i;
10655 emlxs_xlat_err_t *tptr;
10656 emlxs_xlat_err_t *entry;
10657
10658
10659 pkt = PRIV2PKT(sbp);
10660
10661 /* Warning: Some FCT sbp's don't have */
10662 /* fc_packet objects, so just return */
10663 if (!pkt) {
10664 return;
10665 }
10666
10667 if (lock) {
10668 mutex_enter(&sbp->mtx);
10669 }
10670
10671 if (!(sbp->pkt_flags & PACKET_STATE_VALID)) {
10672 sbp->pkt_flags |= PACKET_STATE_VALID;
10673
10674 /* Perform table lookup */
10675 entry = NULL;
10676 if (iostat != IOSTAT_LOCAL_REJECT) {
10677 tptr = emlxs_iostat_tbl;
10678 for (i = 0; i < IOSTAT_MAX; i++, tptr++) {
10679 if (iostat == tptr->emlxs_status) {
10680 entry = tptr;
10681 break;
10682 }
10683 }
10684 } else { /* iostate == IOSTAT_LOCAL_REJECT */
10685
10686 tptr = emlxs_ioerr_tbl;
10687 for (i = 0; i < IOERR_MAX; i++, tptr++) {
10688 if (localstat == tptr->emlxs_status) {
10689 entry = tptr;
10690 break;
10691 }
10692 }
10693 }
10694
10695 if (entry) {
10696 pkt->pkt_state = entry->pkt_state;
10697 pkt->pkt_reason = entry->pkt_reason;
10698 pkt->pkt_expln = entry->pkt_expln;
10699 pkt->pkt_action = entry->pkt_action;
10700 } else {
10701 /* Set defaults */
10702 pkt->pkt_state = FC_PKT_TRAN_ERROR;
10703 pkt->pkt_reason = FC_REASON_ABORTED;
10704 pkt->pkt_expln = FC_EXPLN_NONE;
10705 pkt->pkt_action = FC_ACTION_RETRYABLE;
10706 }
10707
10708
10709 /* Set the residual counts and response frame */
10710 /* Check if response frame was received from the chip */
10711 /* If so, then the residual counts will already be set */
10712 if (!(sbp->pkt_flags & (PACKET_FCP_RSP_VALID |
10713 PACKET_CT_RSP_VALID | PACKET_ELS_RSP_VALID))) {
10714 /* We have to create the response frame */
10715 if (iostat == IOSTAT_SUCCESS) {
10716 pkt->pkt_resp_resid = 0;
10717 pkt->pkt_data_resid = 0;
10718
10719 if ((pkt->pkt_cmd_fhdr.type ==
10720 FC_TYPE_SCSI_FCP) && pkt->pkt_rsplen &&
10721 pkt->pkt_resp) {
10722 fcp_rsp = (fcp_rsp_t *)pkt->pkt_resp;
10723
10724 fcp_rsp->fcp_u.fcp_status.
10725 rsp_len_set = 1;
10726 fcp_rsp->fcp_response_len = 8;
10727 }
10728 } else {
10729 /* Otherwise assume no data */
10730 /* and no response received */
10731 pkt->pkt_data_resid = pkt->pkt_datalen;
10732 pkt->pkt_resp_resid = pkt->pkt_rsplen;
10733 }
10734 }
10735 }
10736
10737 if (lock) {
10738 mutex_exit(&sbp->mtx);
10739 }
10740
10741 return;
10742
10743 } /* emlxs_set_pkt_state() */
10744
10745
10746 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
10747
10748 extern void
10749 emlxs_swap_service_params(SERV_PARM *sp)
10750 {
10751 uint16_t *p;
10752 int size;
10753 int i;
10754
10755 size = (sizeof (CSP) - 4) / 2;
10756 p = (uint16_t *)&sp->cmn;
10757 for (i = 0; i < size; i++) {
10758 p[i] = LE_SWAP16(p[i]);
10759 }
10760 sp->cmn.e_d_tov = LE_SWAP32(sp->cmn.e_d_tov);
10761
10762 size = sizeof (CLASS_PARMS) / 2;
10763 p = (uint16_t *)&sp->cls1;
10764 for (i = 0; i < size; i++, p++) {
10765 *p = LE_SWAP16(*p);
10766 }
10767
10768 size = sizeof (CLASS_PARMS) / 2;
10769 p = (uint16_t *)&sp->cls2;
10770 for (i = 0; i < size; i++, p++) {
10771 *p = LE_SWAP16(*p);
10772 }
10773
10774 size = sizeof (CLASS_PARMS) / 2;
10775 p = (uint16_t *)&sp->cls3;
10776 for (i = 0; i < size; i++, p++) {
10777 *p = LE_SWAP16(*p);
10778 }
10779
10780 size = sizeof (CLASS_PARMS) / 2;
10781 p = (uint16_t *)&sp->cls4;
10782 for (i = 0; i < size; i++, p++) {
10783 *p = LE_SWAP16(*p);
10784 }
10785
10786 return;
10787
10788 } /* emlxs_swap_service_params() */
10789
10790 extern void
10791 emlxs_unswap_pkt(emlxs_buf_t *sbp)
10792 {
10793 if (sbp->pkt_flags & PACKET_FCP_SWAPPED) {
10794 emlxs_swap_fcp_pkt(sbp);
10795 }
10796
10797 else if (sbp->pkt_flags & PACKET_ELS_SWAPPED) {
10798 emlxs_swap_els_pkt(sbp);
10799 }
10800
10801 else if (sbp->pkt_flags & PACKET_CT_SWAPPED) {
10802 emlxs_swap_ct_pkt(sbp);
10803 }
10804
10805 } /* emlxs_unswap_pkt() */
10806
10807
10808 extern void
10809 emlxs_swap_fcp_pkt(emlxs_buf_t *sbp)
10810 {
10811 fc_packet_t *pkt;
10812 FCP_CMND *cmd;
10813 fcp_rsp_t *rsp;
10814 uint16_t *lunp;
10815 uint32_t i;
10816
10817 mutex_enter(&sbp->mtx);
10818
10819 if (sbp->pkt_flags & PACKET_ALLOCATED) {
10820 mutex_exit(&sbp->mtx);
10821 return;
10822 }
10823
10824 if (sbp->pkt_flags & PACKET_FCP_SWAPPED) {
10825 sbp->pkt_flags &= ~PACKET_FCP_SWAPPED;
10826 } else {
10827 sbp->pkt_flags |= PACKET_FCP_SWAPPED;
10828 }
10829
10830 mutex_exit(&sbp->mtx);
10831
10832 pkt = PRIV2PKT(sbp);
10833
10834 cmd = (FCP_CMND *)pkt->pkt_cmd;
10835 rsp = (pkt->pkt_rsplen &&
10836 (sbp->pkt_flags & PACKET_FCP_RSP_VALID)) ?
10837 (fcp_rsp_t *)pkt->pkt_resp : NULL;
10838
10839 /* The size of data buffer needs to be swapped. */
10840 cmd->fcpDl = LE_SWAP32(cmd->fcpDl);
10841
10842 /*
10843 * Swap first 2 words of FCP CMND payload.
10844 */
10845 lunp = (uint16_t *)&cmd->fcpLunMsl;
10846 for (i = 0; i < 4; i++) {
10847 lunp[i] = LE_SWAP16(lunp[i]);
10848 }
10849
10850 if (rsp) {
10851 rsp->fcp_resid = LE_SWAP32(rsp->fcp_resid);
10852 rsp->fcp_sense_len = LE_SWAP32(rsp->fcp_sense_len);
10853 rsp->fcp_response_len = LE_SWAP32(rsp->fcp_response_len);
10854 }
10855
10856 return;
10857
10858 } /* emlxs_swap_fcp_pkt() */
10859
10860
10861 extern void
10862 emlxs_swap_els_pkt(emlxs_buf_t *sbp)
10863 {
10864 fc_packet_t *pkt;
10865 uint32_t *cmd;
10866 uint32_t *rsp;
10867 uint32_t command;
10868 uint16_t *c;
10869 uint32_t i;
10870 uint32_t swapped;
10871
10872 mutex_enter(&sbp->mtx);
10873
10874 if (sbp->pkt_flags & PACKET_ALLOCATED) {
10875 mutex_exit(&sbp->mtx);
10876 return;
10877 }
10878
10879 if (sbp->pkt_flags & PACKET_ELS_SWAPPED) {
10880 sbp->pkt_flags &= ~PACKET_ELS_SWAPPED;
10881 swapped = 1;
10882 } else {
10883 sbp->pkt_flags |= PACKET_ELS_SWAPPED;
10884 swapped = 0;
10885 }
10886
10887 mutex_exit(&sbp->mtx);
10888
10889 pkt = PRIV2PKT(sbp);
10890
10891 cmd = (uint32_t *)pkt->pkt_cmd;
10892 rsp = (pkt->pkt_rsplen &&
10893 (sbp->pkt_flags & PACKET_ELS_RSP_VALID)) ?
10894 (uint32_t *)pkt->pkt_resp : NULL;
10895
10896 if (!swapped) {
10897 cmd[0] = LE_SWAP32(cmd[0]);
10898 command = cmd[0] & ELS_CMD_MASK;
10899 } else {
10900 command = cmd[0] & ELS_CMD_MASK;
10901 cmd[0] = LE_SWAP32(cmd[0]);
10902 }
10903
10904 if (rsp) {
10905 rsp[0] = LE_SWAP32(rsp[0]);
10906 }
10907
10908 switch (command) {
10909 case ELS_CMD_ACC:
10910 if (sbp->ucmd == ELS_CMD_ADISC) {
10911 /* Hard address of originator */
10912 cmd[1] = LE_SWAP32(cmd[1]);
10913
10914 /* N_Port ID of originator */
10915 cmd[6] = LE_SWAP32(cmd[6]);
10916 }
10917 break;
10918
10919 case ELS_CMD_PLOGI:
10920 case ELS_CMD_FLOGI:
10921 case ELS_CMD_FDISC:
10922 if (rsp) {
10923 emlxs_swap_service_params((SERV_PARM *) & rsp[1]);
10924 }
10925 break;
10926
10927 case ELS_CMD_LOGO:
10928 cmd[1] = LE_SWAP32(cmd[1]); /* N_Port ID */
10929 break;
10930
10931 case ELS_CMD_RLS:
10932 cmd[1] = LE_SWAP32(cmd[1]);
10933
10934 if (rsp) {
10935 for (i = 0; i < 6; i++) {
10936 rsp[1 + i] = LE_SWAP32(rsp[1 + i]);
10937 }
10938 }
10939 break;
10940
10941 case ELS_CMD_ADISC:
10942 cmd[1] = LE_SWAP32(cmd[1]); /* Hard address of originator */
10943 cmd[6] = LE_SWAP32(cmd[6]); /* N_Port ID of originator */
10944 break;
10945
10946 case ELS_CMD_PRLI:
10947 c = (uint16_t *)&cmd[1];
10948 c[1] = LE_SWAP16(c[1]);
10949
10950 cmd[4] = LE_SWAP32(cmd[4]);
10951
10952 if (rsp) {
10953 rsp[4] = LE_SWAP32(rsp[4]);
10954 }
10955 break;
10956
10957 case ELS_CMD_SCR:
10958 cmd[1] = LE_SWAP32(cmd[1]);
10959 break;
10960
10961 case ELS_CMD_LINIT:
10962 if (rsp) {
10963 rsp[1] = LE_SWAP32(rsp[1]);
10964 }
10965 break;
10966
10967 default:
10968 break;
10969 }
10970
10971 return;
10972
10973 } /* emlxs_swap_els_pkt() */
10974
10975
10976 extern void
10977 emlxs_swap_ct_pkt(emlxs_buf_t *sbp)
10978 {
10979 fc_packet_t *pkt;
10980 uint32_t *cmd;
10981 uint32_t *rsp;
10982 uint32_t command;
10983 uint32_t i;
10984 uint32_t swapped;
10985
10986 mutex_enter(&sbp->mtx);
10987
10988 if (sbp->pkt_flags & PACKET_ALLOCATED) {
10989 mutex_exit(&sbp->mtx);
10990 return;
10991 }
10992
10993 if (sbp->pkt_flags & PACKET_CT_SWAPPED) {
10994 sbp->pkt_flags &= ~PACKET_CT_SWAPPED;
10995 swapped = 1;
10996 } else {
10997 sbp->pkt_flags |= PACKET_CT_SWAPPED;
10998 swapped = 0;
10999 }
11000
11001 mutex_exit(&sbp->mtx);
11002
11003 pkt = PRIV2PKT(sbp);
11004
11005 cmd = (uint32_t *)pkt->pkt_cmd;
11006 rsp = (pkt->pkt_rsplen &&
11007 (sbp->pkt_flags & PACKET_CT_RSP_VALID)) ?
11008 (uint32_t *)pkt->pkt_resp : NULL;
11009
11010 if (!swapped) {
11011 cmd[0] = 0x01000000;
11012 command = cmd[2];
11013 }
11014
11015 cmd[0] = LE_SWAP32(cmd[0]);
11016 cmd[1] = LE_SWAP32(cmd[1]);
11017 cmd[2] = LE_SWAP32(cmd[2]);
11018 cmd[3] = LE_SWAP32(cmd[3]);
11019
11020 if (swapped) {
11021 command = cmd[2];
11022 }
11023
11024 switch ((command >> 16)) {
11025 case SLI_CTNS_GA_NXT:
11026 cmd[4] = LE_SWAP32(cmd[4]);
11027 break;
11028
11029 case SLI_CTNS_GPN_ID:
11030 case SLI_CTNS_GNN_ID:
11031 case SLI_CTNS_RPN_ID:
11032 case SLI_CTNS_RNN_ID:
11033 case SLI_CTNS_RSPN_ID:
11034 cmd[4] = LE_SWAP32(cmd[4]);
11035 break;
11036
11037 case SLI_CTNS_RCS_ID:
11038 case SLI_CTNS_RPT_ID:
11039 cmd[4] = LE_SWAP32(cmd[4]);
11040 cmd[5] = LE_SWAP32(cmd[5]);
11041 break;
11042
11043 case SLI_CTNS_RFT_ID:
11044 cmd[4] = LE_SWAP32(cmd[4]);
11045
11046 /* Swap FC4 types */
11047 for (i = 0; i < 8; i++) {
11048 cmd[5 + i] = LE_SWAP32(cmd[5 + i]);
11049 }
11050 break;
11051
11052 case SLI_CTNS_GFT_ID:
11053 if (rsp) {
11054 /* Swap FC4 types */
11055 for (i = 0; i < 8; i++) {
11056 rsp[4 + i] = LE_SWAP32(rsp[4 + i]);
11057 }
11058 }
11059 break;
11060
11061 case SLI_CTNS_GCS_ID:
11062 case SLI_CTNS_GSPN_ID:
11063 case SLI_CTNS_GSNN_NN:
11064 case SLI_CTNS_GIP_NN:
11065 case SLI_CTNS_GIPA_NN:
11066
11067 case SLI_CTNS_GPT_ID:
11068 case SLI_CTNS_GID_NN:
11069 case SLI_CTNS_GNN_IP:
11070 case SLI_CTNS_GIPA_IP:
11071 case SLI_CTNS_GID_FT:
11072 case SLI_CTNS_GID_PT:
11073 case SLI_CTNS_GID_PN:
11074 case SLI_CTNS_RIP_NN:
11075 case SLI_CTNS_RIPA_NN:
11076 case SLI_CTNS_RSNN_NN:
11077 case SLI_CTNS_DA_ID:
11078 case SLI_CT_RESPONSE_FS_RJT:
11079 case SLI_CT_RESPONSE_FS_ACC:
11080
11081 default:
11082 break;
11083 }
11084 return;
11085
11086 } /* emlxs_swap_ct_pkt() */
11087
11088
11089 extern void
11090 emlxs_swap_els_ub(fc_unsol_buf_t *ubp)
11091 {
11092 emlxs_ub_priv_t *ub_priv;
11093 fc_rscn_t *rscn;
11094 uint32_t count;
11095 uint32_t i;
11096 uint32_t *lp;
11097 la_els_logi_t *logi;
11098
11099 ub_priv = ubp->ub_fca_private;
11100
11101 switch (ub_priv->cmd) {
11102 case ELS_CMD_RSCN:
11103 rscn = (fc_rscn_t *)ubp->ub_buffer;
11104
11105 rscn->rscn_payload_len = LE_SWAP16(rscn->rscn_payload_len);
11106
11107 count = ((rscn->rscn_payload_len - 4) / 4);
11108 lp = (uint32_t *)ubp->ub_buffer + 1;
11109 for (i = 0; i < count; i++, lp++) {
11110 *lp = LE_SWAP32(*lp);
11111 }
11112
11113 break;
11114
11115 case ELS_CMD_FLOGI:
11116 case ELS_CMD_PLOGI:
11117 case ELS_CMD_FDISC:
11118 case ELS_CMD_PDISC:
11119 logi = (la_els_logi_t *)ubp->ub_buffer;
11120 emlxs_swap_service_params(
11121 (SERV_PARM *)&logi->common_service);
11122 break;
11123
11124 /* ULP handles this */
11125 case ELS_CMD_LOGO:
11126 case ELS_CMD_PRLI:
11127 case ELS_CMD_PRLO:
11128 case ELS_CMD_ADISC:
11129 default:
11130 break;
11131 }
11132
11133 return;
11134
11135 } /* emlxs_swap_els_ub() */
11136
11137
11138 #endif /* EMLXS_MODREV2X */
11139
11140
11141 extern char *
11142 emlxs_mode_xlate(uint32_t mode)
11143 {
11144 static char buffer[32];
11145 uint32_t i;
11146 uint32_t count;
11147
11148 count = sizeof (emlxs_mode_table) / sizeof (emlxs_table_t);
11149 for (i = 0; i < count; i++) {
11150 if (mode == emlxs_mode_table[i].code) {
11151 return (emlxs_mode_table[i].string);
11152 }
11153 }
11154
11155 (void) snprintf(buffer, sizeof (buffer), "Unknown (%x)", mode);
11156 return (buffer);
11157
11158 } /* emlxs_mode_xlate() */
11159
11160
11161 extern char *
11162 emlxs_elscmd_xlate(uint32_t elscmd)
11163 {
11164 static char buffer[32];
11165 uint32_t i;
11166 uint32_t count;
11167
11168 count = sizeof (emlxs_elscmd_table) / sizeof (emlxs_table_t);
11169 for (i = 0; i < count; i++) {
11170 if (elscmd == emlxs_elscmd_table[i].code) {
11171 return (emlxs_elscmd_table[i].string);
11172 }
11173 }
11174
11175 (void) snprintf(buffer, sizeof (buffer), "ELS=0x%x", elscmd);
11176 return (buffer);
11177
11178 } /* emlxs_elscmd_xlate() */
11179
11180
11181 extern char *
11182 emlxs_ctcmd_xlate(uint32_t ctcmd)
11183 {
11184 static char buffer[32];
11185 uint32_t i;
11186 uint32_t count;
11187
11188 count = sizeof (emlxs_ctcmd_table) / sizeof (emlxs_table_t);
11189 for (i = 0; i < count; i++) {
11190 if (ctcmd == emlxs_ctcmd_table[i].code) {
11191 return (emlxs_ctcmd_table[i].string);
11192 }
11193 }
11194
11195 (void) snprintf(buffer, sizeof (buffer), "cmd=0x%x", ctcmd);
11196 return (buffer);
11197
11198 } /* emlxs_ctcmd_xlate() */
11199
11200
11201 #ifdef MENLO_SUPPORT
11202 extern char *
11203 emlxs_menlo_cmd_xlate(uint32_t cmd)
11204 {
11205 static char buffer[32];
11206 uint32_t i;
11207 uint32_t count;
11208
11209 count = sizeof (emlxs_menlo_cmd_table) / sizeof (emlxs_table_t);
11210 for (i = 0; i < count; i++) {
11211 if (cmd == emlxs_menlo_cmd_table[i].code) {
11212 return (emlxs_menlo_cmd_table[i].string);
11213 }
11214 }
11215
11216 (void) snprintf(buffer, sizeof (buffer), "Cmd=0x%x", cmd);
11217 return (buffer);
11218
11219 } /* emlxs_menlo_cmd_xlate() */
11220
11221 extern char *
11222 emlxs_menlo_rsp_xlate(uint32_t rsp)
11223 {
11224 static char buffer[32];
11225 uint32_t i;
11226 uint32_t count;
11227
11228 count = sizeof (emlxs_menlo_rsp_table) / sizeof (emlxs_table_t);
11229 for (i = 0; i < count; i++) {
11230 if (rsp == emlxs_menlo_rsp_table[i].code) {
11231 return (emlxs_menlo_rsp_table[i].string);
11232 }
11233 }
11234
11235 (void) snprintf(buffer, sizeof (buffer), "Rsp=0x%x", rsp);
11236 return (buffer);
11237
11238 } /* emlxs_menlo_rsp_xlate() */
11239
11240 #endif /* MENLO_SUPPORT */
11241
11242
11243 extern char *
11244 emlxs_rmcmd_xlate(uint32_t rmcmd)
11245 {
11246 static char buffer[32];
11247 uint32_t i;
11248 uint32_t count;
11249
11250 count = sizeof (emlxs_rmcmd_table) / sizeof (emlxs_table_t);
11251 for (i = 0; i < count; i++) {
11252 if (rmcmd == emlxs_rmcmd_table[i].code) {
11253 return (emlxs_rmcmd_table[i].string);
11254 }
11255 }
11256
11257 (void) snprintf(buffer, sizeof (buffer), "RM=0x%x", rmcmd);
11258 return (buffer);
11259
11260 } /* emlxs_rmcmd_xlate() */
11261
11262
11263
11264 extern char *
11265 emlxs_mscmd_xlate(uint16_t mscmd)
11266 {
11267 static char buffer[32];
11268 uint32_t i;
11269 uint32_t count;
11270
11271 count = sizeof (emlxs_mscmd_table) / sizeof (emlxs_table_t);
11272 for (i = 0; i < count; i++) {
11273 if (mscmd == emlxs_mscmd_table[i].code) {
11274 return (emlxs_mscmd_table[i].string);
11275 }
11276 }
11277
11278 (void) snprintf(buffer, sizeof (buffer), "Cmd=0x%x", mscmd);
11279 return (buffer);
11280
11281 } /* emlxs_mscmd_xlate() */
11282
11283
11284 extern char *
11285 emlxs_state_xlate(uint8_t state)
11286 {
11287 static char buffer[32];
11288 uint32_t i;
11289 uint32_t count;
11290
11291 count = sizeof (emlxs_state_table) / sizeof (emlxs_table_t);
11292 for (i = 0; i < count; i++) {
11293 if (state == emlxs_state_table[i].code) {
11294 return (emlxs_state_table[i].string);
11295 }
11296 }
11297
11298 (void) snprintf(buffer, sizeof (buffer), "State=0x%x", state);
11299 return (buffer);
11300
11301 } /* emlxs_state_xlate() */
11302
11303
11304 extern char *
11305 emlxs_error_xlate(uint8_t errno)
11306 {
11307 static char buffer[32];
11308 uint32_t i;
11309 uint32_t count;
11310
11311 count = sizeof (emlxs_error_table) / sizeof (emlxs_table_t);
11312 for (i = 0; i < count; i++) {
11313 if (errno == emlxs_error_table[i].code) {
11314 return (emlxs_error_table[i].string);
11315 }
11316 }
11317
11318 (void) snprintf(buffer, sizeof (buffer), "Errno=0x%x", errno);
11319 return (buffer);
11320
11321 } /* emlxs_error_xlate() */
11322
11323
11324 static int
11325 emlxs_pm_lower_power(dev_info_t *dip)
11326 {
11327 int ddiinst;
11328 int emlxinst;
11329 emlxs_config_t *cfg;
11330 int32_t rval;
11331 emlxs_hba_t *hba;
11332
11333 ddiinst = ddi_get_instance(dip);
11334 emlxinst = emlxs_get_instance(ddiinst);
11335 hba = emlxs_device.hba[emlxinst];
11336 cfg = &CFG;
11337
11338 rval = DDI_SUCCESS;
11339
11340 /* Lower the power level */
11341 if (cfg[CFG_PM_SUPPORT].current) {
11342 rval =
11343 pm_lower_power(dip, EMLXS_PM_ADAPTER,
11344 EMLXS_PM_ADAPTER_DOWN);
11345 } else {
11346 /* We do not have kernel support of power management enabled */
11347 /* therefore, call our power management routine directly */
11348 rval =
11349 emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_DOWN);
11350 }
11351
11352 return (rval);
11353
11354 } /* emlxs_pm_lower_power() */
11355
11356
11357 static int
11358 emlxs_pm_raise_power(dev_info_t *dip)
11359 {
11360 int ddiinst;
11361 int emlxinst;
11362 emlxs_config_t *cfg;
11363 int32_t rval;
11364 emlxs_hba_t *hba;
11365
11366 ddiinst = ddi_get_instance(dip);
11367 emlxinst = emlxs_get_instance(ddiinst);
11368 hba = emlxs_device.hba[emlxinst];
11369 cfg = &CFG;
11370
11371 /* Raise the power level */
11372 if (cfg[CFG_PM_SUPPORT].current) {
11373 rval =
11374 pm_raise_power(dip, EMLXS_PM_ADAPTER,
11375 EMLXS_PM_ADAPTER_UP);
11376 } else {
11377 /* We do not have kernel support of power management enabled */
11378 /* therefore, call our power management routine directly */
11379 rval =
11380 emlxs_power(dip, EMLXS_PM_ADAPTER, EMLXS_PM_ADAPTER_UP);
11381 }
11382
11383 return (rval);
11384
11385 } /* emlxs_pm_raise_power() */
11386
11387
11388 #ifdef IDLE_TIMER
11389
11390 extern int
11391 emlxs_pm_busy_component(emlxs_hba_t *hba)
11392 {
11393 emlxs_config_t *cfg = &CFG;
11394 int rval;
11395
11396 hba->pm_active = 1;
11397
11398 if (hba->pm_busy) {
11399 return (DDI_SUCCESS);
11400 }
11401
11402 mutex_enter(&EMLXS_PM_LOCK);
11403
11404 if (hba->pm_busy) {
11405 mutex_exit(&EMLXS_PM_LOCK);
11406 return (DDI_SUCCESS);
11407 }
11408 hba->pm_busy = 1;
11409
11410 mutex_exit(&EMLXS_PM_LOCK);
11411
11412 /* Attempt to notify system that we are busy */
11413 if (cfg[CFG_PM_SUPPORT].current) {
11414 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
11415 "pm_busy_component.");
11416
11417 rval = pm_busy_component(dip, EMLXS_PM_ADAPTER);
11418
11419 if (rval != DDI_SUCCESS) {
11420 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
11421 "pm_busy_component failed. ret=%d", rval);
11422
11423 /* If this attempt failed then clear our flags */
11424 mutex_enter(&EMLXS_PM_LOCK);
11425 hba->pm_busy = 0;
11426 mutex_exit(&EMLXS_PM_LOCK);
11427
11428 return (rval);
11429 }
11430 }
11431
11432 return (DDI_SUCCESS);
11433
11434 } /* emlxs_pm_busy_component() */
11435
11436
11437 extern int
11438 emlxs_pm_idle_component(emlxs_hba_t *hba)
11439 {
11440 emlxs_config_t *cfg = &CFG;
11441 int rval;
11442
11443 if (!hba->pm_busy) {
11444 return (DDI_SUCCESS);
11445 }
11446
11447 mutex_enter(&EMLXS_PM_LOCK);
11448
11449 if (!hba->pm_busy) {
11450 mutex_exit(&EMLXS_PM_LOCK);
11451 return (DDI_SUCCESS);
11452 }
11453 hba->pm_busy = 0;
11454
11455 mutex_exit(&EMLXS_PM_LOCK);
11456
11457 if (cfg[CFG_PM_SUPPORT].current) {
11458 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
11459 "pm_idle_component.");
11460
11461 rval = pm_idle_component(dip, EMLXS_PM_ADAPTER);
11462
11463 if (rval != DDI_SUCCESS) {
11464 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg,
11465 "pm_idle_component failed. ret=%d", rval);
11466
11467 /* If this attempt failed then */
11468 /* reset our flags for another attempt */
11469 mutex_enter(&EMLXS_PM_LOCK);
11470 hba->pm_busy = 1;
11471 mutex_exit(&EMLXS_PM_LOCK);
11472
11473 return (rval);
11474 }
11475 }
11476
11477 return (DDI_SUCCESS);
11478
11479 } /* emlxs_pm_idle_component() */
11480
11481
11482 extern void
11483 emlxs_pm_idle_timer(emlxs_hba_t *hba)
11484 {
11485 emlxs_config_t *cfg = &CFG;
11486
11487 if (hba->pm_active) {
11488 /* Clear active flag and reset idle timer */
11489 mutex_enter(&EMLXS_PM_LOCK);
11490 hba->pm_active = 0;
11491 hba->pm_idle_timer =
11492 hba->timer_tics + cfg[CFG_PM_IDLE].current;
11493 mutex_exit(&EMLXS_PM_LOCK);
11494 }
11495
11496 /* Check for idle timeout */
11497 else if (hba->timer_tics >= hba->pm_idle_timer) {
11498 if (emlxs_pm_idle_component(hba) == DDI_SUCCESS) {
11499 mutex_enter(&EMLXS_PM_LOCK);
11500 hba->pm_idle_timer =
11501 hba->timer_tics + cfg[CFG_PM_IDLE].current;
11502 mutex_exit(&EMLXS_PM_LOCK);
11503 }
11504 }
11505
11506 return;
11507
11508 } /* emlxs_pm_idle_timer() */
11509
11510 #endif /* IDLE_TIMER */
11511
11512
11513 #if (EMLXS_MODREV >= EMLXS_MODREV3) && (EMLXS_MODREV <= EMLXS_MODREV4)
11514 static void
11515 emlxs_read_vport_prop(emlxs_hba_t *hba)
11516 {
11517 emlxs_port_t *port = &PPORT;
11518 emlxs_config_t *cfg = &CFG;
11519 char **arrayp;
11520 uint8_t *s;
11521 uint8_t *np;
11522 NAME_TYPE pwwpn;
11523 NAME_TYPE wwnn;
11524 NAME_TYPE wwpn;
11525 uint32_t vpi;
11526 uint32_t cnt;
11527 uint32_t rval;
11528 uint32_t i;
11529 uint32_t j;
11530 uint32_t c1;
11531 uint32_t sum;
11532 uint32_t errors;
11533 char buffer[64];
11534
11535 /* Check for the per adapter vport setting */
11536 (void) snprintf(buffer, sizeof (buffer), "%s%d-vport", DRIVER_NAME,
11537 hba->ddiinst);
11538 cnt = 0;
11539 arrayp = NULL;
11540 rval =
11541 ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip,
11542 (DDI_PROP_DONTPASS), buffer, &arrayp, &cnt);
11543
11544 if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) {
11545 /* Check for the global vport setting */
11546 cnt = 0;
11547 arrayp = NULL;
11548 rval =
11549 ddi_prop_lookup_string_array(DDI_DEV_T_ANY, hba->dip,
11550 (DDI_PROP_DONTPASS), "vport", &arrayp, &cnt);
11551 }
11552
11553 if ((rval != DDI_PROP_SUCCESS) || !cnt || !arrayp) {
11554 return;
11555 }
11556
11557 for (i = 0; i < cnt; i++) {
11558 errors = 0;
11559 s = (uint8_t *)arrayp[i];
11560
11561 if (!s) {
11562 break;
11563 }
11564
11565 np = (uint8_t *)&pwwpn;
11566 for (j = 0; j < sizeof (NAME_TYPE); j++) {
11567 c1 = *s++;
11568 if ((c1 >= '0') && (c1 <= '9')) {
11569 sum = ((c1 - '0') << 4);
11570 } else if ((c1 >= 'a') && (c1 <= 'f')) {
11571 sum = ((c1 - 'a' + 10) << 4);
11572 } else if ((c1 >= 'A') && (c1 <= 'F')) {
11573 sum = ((c1 - 'A' + 10) << 4);
11574 } else {
11575 EMLXS_MSGF(EMLXS_CONTEXT,
11576 &emlxs_attach_debug_msg,
11577 "Config error: Invalid PWWPN found. "
11578 "entry=%d byte=%d hi_nibble=%c",
11579 i, j, c1);
11580 errors++;
11581 }
11582
11583 c1 = *s++;
11584 if ((c1 >= '0') && (c1 <= '9')) {
11585 sum |= (c1 - '0');
11586 } else if ((c1 >= 'a') && (c1 <= 'f')) {
11587 sum |= (c1 - 'a' + 10);
11588 } else if ((c1 >= 'A') && (c1 <= 'F')) {
11589 sum |= (c1 - 'A' + 10);
11590 } else {
11591 EMLXS_MSGF(EMLXS_CONTEXT,
11592 &emlxs_attach_debug_msg,
11593 "Config error: Invalid PWWPN found. "
11594 "entry=%d byte=%d lo_nibble=%c",
11595 i, j, c1);
11596 errors++;
11597 }
11598
11599 *np++ = (uint8_t)sum;
11600 }
11601
11602 if (*s++ != ':') {
11603 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
11604 "Config error: Invalid delimiter after PWWPN. "
11605 "entry=%d", i);
11606 goto out;
11607 }
11608
11609 np = (uint8_t *)&wwnn;
11610 for (j = 0; j < sizeof (NAME_TYPE); j++) {
11611 c1 = *s++;
11612 if ((c1 >= '0') && (c1 <= '9')) {
11613 sum = ((c1 - '0') << 4);
11614 } else if ((c1 >= 'a') && (c1 <= 'f')) {
11615 sum = ((c1 - 'a' + 10) << 4);
11616 } else if ((c1 >= 'A') && (c1 <= 'F')) {
11617 sum = ((c1 - 'A' + 10) << 4);
11618 } else {
11619 EMLXS_MSGF(EMLXS_CONTEXT,
11620 &emlxs_attach_debug_msg,
11621 "Config error: Invalid WWNN found. "
11622 "entry=%d byte=%d hi_nibble=%c",
11623 i, j, c1);
11624 errors++;
11625 }
11626
11627 c1 = *s++;
11628 if ((c1 >= '0') && (c1 <= '9')) {
11629 sum |= (c1 - '0');
11630 } else if ((c1 >= 'a') && (c1 <= 'f')) {
11631 sum |= (c1 - 'a' + 10);
11632 } else if ((c1 >= 'A') && (c1 <= 'F')) {
11633 sum |= (c1 - 'A' + 10);
11634 } else {
11635 EMLXS_MSGF(EMLXS_CONTEXT,
11636 &emlxs_attach_debug_msg,
11637 "Config error: Invalid WWNN found. "
11638 "entry=%d byte=%d lo_nibble=%c",
11639 i, j, c1);
11640 errors++;
11641 }
11642
11643 *np++ = (uint8_t)sum;
11644 }
11645
11646 if (*s++ != ':') {
11647 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
11648 "Config error: Invalid delimiter after WWNN. "
11649 "entry=%d", i);
11650 goto out;
11651 }
11652
11653 np = (uint8_t *)&wwpn;
11654 for (j = 0; j < sizeof (NAME_TYPE); j++) {
11655 c1 = *s++;
11656 if ((c1 >= '0') && (c1 <= '9')) {
11657 sum = ((c1 - '0') << 4);
11658 } else if ((c1 >= 'a') && (c1 <= 'f')) {
11659 sum = ((c1 - 'a' + 10) << 4);
11660 } else if ((c1 >= 'A') && (c1 <= 'F')) {
11661 sum = ((c1 - 'A' + 10) << 4);
11662 } else {
11663 EMLXS_MSGF(EMLXS_CONTEXT,
11664 &emlxs_attach_debug_msg,
11665 "Config error: Invalid WWPN found. "
11666 "entry=%d byte=%d hi_nibble=%c",
11667 i, j, c1);
11668
11669 errors++;
11670 }
11671
11672 c1 = *s++;
11673 if ((c1 >= '0') && (c1 <= '9')) {
11674 sum |= (c1 - '0');
11675 } else if ((c1 >= 'a') && (c1 <= 'f')) {
11676 sum |= (c1 - 'a' + 10);
11677 } else if ((c1 >= 'A') && (c1 <= 'F')) {
11678 sum |= (c1 - 'A' + 10);
11679 } else {
11680 EMLXS_MSGF(EMLXS_CONTEXT,
11681 &emlxs_attach_debug_msg,
11682 "Config error: Invalid WWPN found. "
11683 "entry=%d byte=%d lo_nibble=%c",
11684 i, j, c1);
11685
11686 errors++;
11687 }
11688
11689 *np++ = (uint8_t)sum;
11690 }
11691
11692 if (*s++ != ':') {
11693 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_attach_debug_msg,
11694 "Config error: Invalid delimiter after WWPN. "
11695 "entry=%d", i);
11696
11697 goto out;
11698 }
11699
11700 sum = 0;
11701 do {
11702 c1 = *s++;
11703 if ((c1 < '0') || (c1 > '9')) {
11704 EMLXS_MSGF(EMLXS_CONTEXT,
11705 &emlxs_attach_debug_msg,
11706 "Config error: Invalid VPI found. "
11707 "entry=%d c=%c vpi=%d", i, c1, sum);
11708
11709 goto out;
11710 }
11711
11712 sum = (sum * 10) + (c1 - '0');
11713
11714 } while (*s != 0);
11715
11716 vpi = sum;
11717
11718 if (errors) {
11719 continue;
11720 }
11721
11722 /* Entry has been read */
11723
11724 /* Check if the physical port wwpn */
11725 /* matches our physical port wwpn */
11726 if (bcmp((caddr_t)&hba->wwpn, (caddr_t)&pwwpn, 8)) {
11727 continue;
11728 }
11729
11730 /* Check vpi range */
11731 if ((vpi == 0) || (vpi >= MAX_VPORTS)) {
11732 continue;
11733 }
11734
11735 /* Check if port has already been configured */
11736 if (hba->port[vpi].flag & EMLXS_PORT_CONFIG) {
11737 continue;
11738 }
11739
11740 /* Set the highest configured vpi */
11741 if (vpi > hba->vpi_high) {
11742 hba->vpi_high = vpi;
11743 }
11744
11745 bcopy((caddr_t)&wwnn, (caddr_t)&hba->port[vpi].wwnn,
11746 sizeof (NAME_TYPE));
11747 bcopy((caddr_t)&wwpn, (caddr_t)&hba->port[vpi].wwpn,
11748 sizeof (NAME_TYPE));
11749
11750 if (hba->port[vpi].snn[0] == 0) {
11751 (void) strncpy((caddr_t)hba->port[vpi].snn,
11752 (caddr_t)hba->snn,
11753 (sizeof (hba->port[vpi].snn)-1));
11754 }
11755
11756 if (hba->port[vpi].spn[0] == 0) {
11757 (void) snprintf((caddr_t)hba->port[vpi].spn,
11758 sizeof (hba->port[vpi].spn),
11759 "%s VPort-%d",
11760 (caddr_t)hba->spn, vpi);
11761 }
11762
11763 hba->port[vpi].flag |=
11764 (EMLXS_PORT_CONFIG | EMLXS_PORT_ENABLED);
11765
11766 if (cfg[CFG_VPORT_RESTRICTED].current) {
11767 hba->port[vpi].flag |= EMLXS_PORT_RESTRICTED;
11768 }
11769 }
11770
11771 out:
11772
11773 (void) ddi_prop_free((void *) arrayp);
11774 return;
11775
11776 } /* emlxs_read_vport_prop() */
11777 #endif /* EMLXS_MODREV3 || EMLXS_MODREV4 */
11778
11779
11780 extern char *
11781 emlxs_wwn_xlate(char *buffer, size_t len, uint8_t *wwn)
11782 {
11783 (void) snprintf(buffer, len, "%02x%02x%02x%02x%02x%02x%02x%02x",
11784 wwn[0] & 0xff, wwn[1] & 0xff, wwn[2] & 0xff, wwn[3] & 0xff,
11785 wwn[4] & 0xff, wwn[5] & 0xff, wwn[6] & 0xff, wwn[7] & 0xff);
11786
11787 return (buffer);
11788
11789 } /* emlxs_wwn_xlate() */
11790
11791
11792 extern int32_t
11793 emlxs_wwn_cmp(uint8_t *wwn1, uint8_t *wwn2)
11794 {
11795 uint32_t i;
11796
11797 for (i = 0; i < 8; i ++, wwn1 ++, wwn2 ++) {
11798 if (*wwn1 > *wwn2) {
11799 return (1);
11800 }
11801 if (*wwn1 < *wwn2) {
11802 return (-1);
11803 }
11804 }
11805
11806 return (0);
11807
11808 } /* emlxs_wwn_cmp() */
11809
11810
11811 /* This is called at port online and offline */
11812 extern void
11813 emlxs_ub_flush(emlxs_port_t *port)
11814 {
11815 emlxs_hba_t *hba = HBA;
11816 fc_unsol_buf_t *ubp;
11817 emlxs_ub_priv_t *ub_priv;
11818 emlxs_ub_priv_t *next;
11819
11820 /* Return if nothing to do */
11821 if (!port->ub_wait_head) {
11822 return;
11823 }
11824
11825 mutex_enter(&EMLXS_PORT_LOCK);
11826 ub_priv = port->ub_wait_head;
11827 port->ub_wait_head = NULL;
11828 port->ub_wait_tail = NULL;
11829 mutex_exit(&EMLXS_PORT_LOCK);
11830
11831 while (ub_priv) {
11832 next = ub_priv->next;
11833 ubp = ub_priv->ubp;
11834
11835 /* Check if ULP is online and we have a callback function */
11836 if (port->ulp_statec != FC_STATE_OFFLINE) {
11837 /* Send ULP the ub buffer */
11838 emlxs_ulp_unsol_cb(port, ubp);
11839 } else { /* Drop the buffer */
11840 (void) emlxs_fca_ub_release(port, 1, &ubp->ub_token);
11841 }
11842
11843 ub_priv = next;
11844
11845 } /* while () */
11846
11847 return;
11848
11849 } /* emlxs_ub_flush() */
11850
11851
11852 extern void
11853 emlxs_ub_callback(emlxs_port_t *port, fc_unsol_buf_t *ubp)
11854 {
11855 emlxs_hba_t *hba = HBA;
11856 emlxs_ub_priv_t *ub_priv;
11857
11858 ub_priv = ubp->ub_fca_private;
11859
11860 /* Check if ULP is online */
11861 if (port->ulp_statec != FC_STATE_OFFLINE) {
11862 emlxs_ulp_unsol_cb(port, ubp);
11863
11864 } else { /* ULP offline */
11865
11866 if (hba->state >= FC_LINK_UP) {
11867 /* Add buffer to queue tail */
11868 mutex_enter(&EMLXS_PORT_LOCK);
11869
11870 if (port->ub_wait_tail) {
11871 port->ub_wait_tail->next = ub_priv;
11872 }
11873 port->ub_wait_tail = ub_priv;
11874
11875 if (!port->ub_wait_head) {
11876 port->ub_wait_head = ub_priv;
11877 }
11878
11879 mutex_exit(&EMLXS_PORT_LOCK);
11880 } else {
11881 (void) emlxs_fca_ub_release(port, 1, &ubp->ub_token);
11882 }
11883 }
11884
11885 return;
11886
11887 } /* emlxs_ub_callback() */
11888
11889
11890 extern void
11891 emlxs_fca_link_up(emlxs_port_t *port)
11892 {
11893 emlxs_ulp_statec_cb(port, port->ulp_statec);
11894 return;
11895
11896 } /* emlxs_fca_link_up() */
11897
11898
11899 extern void
11900 emlxs_fca_link_down(emlxs_port_t *port)
11901 {
11902 emlxs_ulp_statec_cb(port, FC_STATE_OFFLINE);
11903 return;
11904
11905 } /* emlxs_fca_link_down() */
11906
11907
11908 static uint32_t
11909 emlxs_integrity_check(emlxs_hba_t *hba)
11910 {
11911 uint32_t size;
11912 uint32_t errors = 0;
11913 int ddiinst = hba->ddiinst;
11914
11915 size = 16;
11916 if (sizeof (ULP_BDL) != size) {
11917 cmn_err(CE_WARN, "?%s%d: ULP_BDL size incorrect. %d != 16",
11918 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDL));
11919
11920 errors++;
11921 }
11922 size = 8;
11923 if (sizeof (ULP_BDE) != size) {
11924 cmn_err(CE_WARN, "?%s%d: ULP_BDE size incorrect. %d != 8",
11925 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE));
11926
11927 errors++;
11928 }
11929 size = 12;
11930 if (sizeof (ULP_BDE64) != size) {
11931 cmn_err(CE_WARN, "?%s%d: ULP_BDE64 size incorrect. %d != 12",
11932 DRIVER_NAME, ddiinst, (int)sizeof (ULP_BDE64));
11933
11934 errors++;
11935 }
11936 size = 16;
11937 if (sizeof (HBQE_t) != size) {
11938 cmn_err(CE_WARN, "?%s%d: HBQE size incorrect. %d != 16",
11939 DRIVER_NAME, ddiinst, (int)sizeof (HBQE_t));
11940
11941 errors++;
11942 }
11943 size = 8;
11944 if (sizeof (HGP) != size) {
11945 cmn_err(CE_WARN, "?%s%d: HGP size incorrect. %d != 8",
11946 DRIVER_NAME, ddiinst, (int)sizeof (HGP));
11947
11948 errors++;
11949 }
11950 if (sizeof (PGP) != size) {
11951 cmn_err(CE_WARN, "?%s%d: PGP size incorrect. %d != 8",
11952 DRIVER_NAME, ddiinst, (int)sizeof (PGP));
11953
11954 errors++;
11955 }
11956 size = 4;
11957 if (sizeof (WORD5) != size) {
11958 cmn_err(CE_WARN, "?%s%d: WORD5 size incorrect. %d != 4",
11959 DRIVER_NAME, ddiinst, (int)sizeof (WORD5));
11960
11961 errors++;
11962 }
11963 size = 124;
11964 if (sizeof (MAILVARIANTS) != size) {
11965 cmn_err(CE_WARN, "?%s%d: MAILVARIANTS size incorrect. "
11966 "%d != 124", DRIVER_NAME, ddiinst,
11967 (int)sizeof (MAILVARIANTS));
11968
11969 errors++;
11970 }
11971 size = 128;
11972 if (sizeof (SLI1_DESC) != size) {
11973 cmn_err(CE_WARN, "?%s%d: SLI1_DESC size incorrect. %d != 128",
11974 DRIVER_NAME, ddiinst, (int)sizeof (SLI1_DESC));
11975
11976 errors++;
11977 }
11978 if (sizeof (SLI2_DESC) != size) {
11979 cmn_err(CE_WARN, "?%s%d: SLI2_DESC size incorrect. %d != 128",
11980 DRIVER_NAME, ddiinst, (int)sizeof (SLI2_DESC));
11981
11982 errors++;
11983 }
11984 size = MBOX_SIZE;
11985 if (sizeof (MAILBOX) != size) {
11986 cmn_err(CE_WARN, "?%s%d: MAILBOX size incorrect. %d != %d",
11987 DRIVER_NAME, ddiinst, (int)sizeof (MAILBOX), MBOX_SIZE);
11988
11989 errors++;
11990 }
11991 size = PCB_SIZE;
11992 if (sizeof (PCB) != size) {
11993 cmn_err(CE_WARN, "?%s%d: PCB size incorrect. %d != %d",
11994 DRIVER_NAME, ddiinst, (int)sizeof (PCB), PCB_SIZE);
11995
11996 errors++;
11997 }
11998 size = 260;
11999 if (sizeof (ATTRIBUTE_ENTRY) != size) {
12000 cmn_err(CE_WARN, "?%s%d: ATTRIBUTE_ENTRY size incorrect. "
12001 "%d != 260", DRIVER_NAME, ddiinst,
12002 (int)sizeof (ATTRIBUTE_ENTRY));
12003
12004 errors++;
12005 }
12006 size = SLI_SLIM1_SIZE;
12007 if (sizeof (SLIM1) != size) {
12008 cmn_err(CE_WARN, "?%s%d: SLIM1 size incorrect. %d != %d",
12009 DRIVER_NAME, ddiinst, (int)sizeof (SLIM1), SLI_SLIM1_SIZE);
12010
12011 errors++;
12012 }
12013 size = SLI3_IOCB_CMD_SIZE;
12014 if (sizeof (IOCB) != size) {
12015 cmn_err(CE_WARN, "?%s%d: IOCB size incorrect. %d != %d",
12016 DRIVER_NAME, ddiinst, (int)sizeof (IOCB),
12017 SLI3_IOCB_CMD_SIZE);
12018
12019 errors++;
12020 }
12021
12022 size = SLI_SLIM2_SIZE;
12023 if (sizeof (SLIM2) != size) {
12024 cmn_err(CE_WARN, "?%s%d: SLIM2 size incorrect. %d != %d",
12025 DRIVER_NAME, ddiinst, (int)sizeof (SLIM2),
12026 SLI_SLIM2_SIZE);
12027
12028 errors++;
12029 }
12030 return (errors);
12031
12032 } /* emlxs_integrity_check() */
12033
12034
12035 #ifdef FMA_SUPPORT
12036 /*
12037 * FMA support
12038 */
12039
12040 extern void
12041 emlxs_fm_init(emlxs_hba_t *hba)
12042 {
12043 ddi_iblock_cookie_t iblk;
12044
12045 if (hba->fm_caps == DDI_FM_NOT_CAPABLE) {
12046 return;
12047 }
12048
12049 if (DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
12050 emlxs_dev_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
12051 emlxs_data_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
12052 }
12053
12054 if (DDI_FM_DMA_ERR_CAP(hba->fm_caps)) {
12055 hba->dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
12056 hba->dma_attr_ro.dma_attr_flags |= DDI_DMA_FLAGERR;
12057 hba->dma_attr_1sg.dma_attr_flags |= DDI_DMA_FLAGERR;
12058 hba->dma_attr_fcip_rsp.dma_attr_flags |= DDI_DMA_FLAGERR;
12059 } else {
12060 hba->dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12061 hba->dma_attr_ro.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12062 hba->dma_attr_1sg.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12063 hba->dma_attr_fcip_rsp.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12064 }
12065
12066 ddi_fm_init(hba->dip, &hba->fm_caps, &iblk);
12067
12068 if (DDI_FM_EREPORT_CAP(hba->fm_caps) ||
12069 DDI_FM_ERRCB_CAP(hba->fm_caps)) {
12070 pci_ereport_setup(hba->dip);
12071 }
12072
12073 if (DDI_FM_ERRCB_CAP(hba->fm_caps)) {
12074 ddi_fm_handler_register(hba->dip, emlxs_fm_error_cb,
12075 (void *)hba);
12076 }
12077
12078 } /* emlxs_fm_init() */
12079
12080
12081 extern void
12082 emlxs_fm_fini(emlxs_hba_t *hba)
12083 {
12084 if (hba->fm_caps == DDI_FM_NOT_CAPABLE) {
12085 return;
12086 }
12087
12088 if (DDI_FM_EREPORT_CAP(hba->fm_caps) ||
12089 DDI_FM_ERRCB_CAP(hba->fm_caps)) {
12090 pci_ereport_teardown(hba->dip);
12091 }
12092
12093 if (DDI_FM_ERRCB_CAP(hba->fm_caps)) {
12094 ddi_fm_handler_unregister(hba->dip);
12095 }
12096
12097 (void) ddi_fm_fini(hba->dip);
12098
12099 } /* emlxs_fm_fini() */
12100
12101
12102 extern int
12103 emlxs_fm_check_acc_handle(emlxs_hba_t *hba, ddi_acc_handle_t handle)
12104 {
12105 ddi_fm_error_t err;
12106
12107 if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
12108 return (DDI_FM_OK);
12109 }
12110
12111 /* Some S10 versions do not define the ahi_err structure */
12112 if (((ddi_acc_impl_t *)handle)->ahi_err == NULL) {
12113 return (DDI_FM_OK);
12114 }
12115
12116 err.fme_status = DDI_FM_OK;
12117 (void) ddi_fm_acc_err_get(handle, &err, DDI_FME_VERSION);
12118
12119 /* Some S10 versions do not define the ddi_fm_acc_err_clear function */
12120 if ((void *)&ddi_fm_acc_err_clear != NULL) {
12121 (void) ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
12122 }
12123
12124 return (err.fme_status);
12125
12126 } /* emlxs_fm_check_acc_handle() */
12127
12128
12129 extern int
12130 emlxs_fm_check_dma_handle(emlxs_hba_t *hba, ddi_dma_handle_t handle)
12131 {
12132 ddi_fm_error_t err;
12133
12134 if (!DDI_FM_ACC_ERR_CAP(hba->fm_caps)) {
12135 return (DDI_FM_OK);
12136 }
12137
12138 err.fme_status = DDI_FM_OK;
12139 (void) ddi_fm_dma_err_get(handle, &err, DDI_FME_VERSION);
12140
12141 return (err.fme_status);
12142
12143 } /* emlxs_fm_check_dma_handle() */
12144
12145
12146 extern void
12147 emlxs_fm_ereport(emlxs_hba_t *hba, char *detail)
12148 {
12149 uint64_t ena;
12150 char buf[FM_MAX_CLASS];
12151
12152 if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) {
12153 return;
12154 }
12155
12156 if (detail == NULL) {
12157 return;
12158 }
12159
12160 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
12161 ena = fm_ena_generate(0, FM_ENA_FMT1);
12162
12163 ddi_fm_ereport_post(hba->dip, buf, ena, DDI_NOSLEEP,
12164 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
12165
12166 } /* emlxs_fm_ereport() */
12167
12168
12169 extern void
12170 emlxs_fm_service_impact(emlxs_hba_t *hba, int impact)
12171 {
12172 if (!DDI_FM_EREPORT_CAP(hba->fm_caps)) {
12173 return;
12174 }
12175
12176 if (impact == NULL) {
12177 return;
12178 }
12179
12180 if ((hba->pm_state & EMLXS_PM_IN_DETACH) &&
12181 (impact == DDI_SERVICE_DEGRADED)) {
12182 impact = DDI_SERVICE_UNAFFECTED;
12183 }
12184
12185 ddi_fm_service_impact(hba->dip, impact);
12186
12187 return;
12188
12189 } /* emlxs_fm_service_impact() */
12190
12191
12192 /*
12193 * The I/O fault service error handling callback function
12194 */
12195 /*ARGSUSED*/
12196 extern int
12197 emlxs_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
12198 const void *impl_data)
12199 {
12200 /*
12201 * as the driver can always deal with an error
12202 * in any dma or access handle, we can just return
12203 * the fme_status value.
12204 */
12205 pci_ereport_post(dip, err, NULL);
12206 return (err->fme_status);
12207
12208 } /* emlxs_fm_error_cb() */
12209
12210 extern void
12211 emlxs_check_dma(emlxs_hba_t *hba, emlxs_buf_t *sbp)
12212 {
12213 emlxs_port_t *port = sbp->port;
12214 fc_packet_t *pkt = PRIV2PKT(sbp);
12215
12216 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
12217 if (emlxs_fm_check_dma_handle(hba,
12218 hba->sli.sli4.slim2.dma_handle)
12219 != DDI_FM_OK) {
12220 EMLXS_MSGF(EMLXS_CONTEXT,
12221 &emlxs_invalid_dma_handle_msg,
12222 "slim2: hdl=%p",
12223 hba->sli.sli4.slim2.dma_handle);
12224
12225 mutex_enter(&EMLXS_PORT_LOCK);
12226 hba->flag |= FC_DMA_CHECK_ERROR;
12227 mutex_exit(&EMLXS_PORT_LOCK);
12228 }
12229 } else {
12230 if (emlxs_fm_check_dma_handle(hba,
12231 hba->sli.sli3.slim2.dma_handle)
12232 != DDI_FM_OK) {
12233 EMLXS_MSGF(EMLXS_CONTEXT,
12234 &emlxs_invalid_dma_handle_msg,
12235 "slim2: hdl=%p",
12236 hba->sli.sli3.slim2.dma_handle);
12237
12238 mutex_enter(&EMLXS_PORT_LOCK);
12239 hba->flag |= FC_DMA_CHECK_ERROR;
12240 mutex_exit(&EMLXS_PORT_LOCK);
12241 }
12242 }
12243
12244 if (hba->flag & FC_DMA_CHECK_ERROR) {
12245 pkt->pkt_state = FC_PKT_TRAN_ERROR;
12246 pkt->pkt_reason = FC_REASON_DMA_ERROR;
12247 pkt->pkt_expln = FC_EXPLN_NONE;
12248 pkt->pkt_action = FC_ACTION_RETRYABLE;
12249 return;
12250 }
12251
12252 if (pkt->pkt_cmdlen) {
12253 if (emlxs_fm_check_dma_handle(hba, pkt->pkt_cmd_dma)
12254 != DDI_FM_OK) {
12255 EMLXS_MSGF(EMLXS_CONTEXT,
12256 &emlxs_invalid_dma_handle_msg,
12257 "pkt_cmd_dma: hdl=%p",
12258 pkt->pkt_cmd_dma);
12259
12260 pkt->pkt_state = FC_PKT_TRAN_ERROR;
12261 pkt->pkt_reason = FC_REASON_DMA_ERROR;
12262 pkt->pkt_expln = FC_EXPLN_NONE;
12263 pkt->pkt_action = FC_ACTION_RETRYABLE;
12264
12265 return;
12266 }
12267 }
12268
12269 if (pkt->pkt_rsplen) {
12270 if (emlxs_fm_check_dma_handle(hba, pkt->pkt_resp_dma)
12271 != DDI_FM_OK) {
12272 EMLXS_MSGF(EMLXS_CONTEXT,
12273 &emlxs_invalid_dma_handle_msg,
12274 "pkt_resp_dma: hdl=%p",
12275 pkt->pkt_resp_dma);
12276
12277 pkt->pkt_state = FC_PKT_TRAN_ERROR;
12278 pkt->pkt_reason = FC_REASON_DMA_ERROR;
12279 pkt->pkt_expln = FC_EXPLN_NONE;
12280 pkt->pkt_action = FC_ACTION_RETRYABLE;
12281
12282 return;
12283 }
12284 }
12285
12286 if (pkt->pkt_datalen) {
12287 if (emlxs_fm_check_dma_handle(hba, pkt->pkt_data_dma)
12288 != DDI_FM_OK) {
12289 EMLXS_MSGF(EMLXS_CONTEXT,
12290 &emlxs_invalid_dma_handle_msg,
12291 "pkt_data_dma: hdl=%p",
12292 pkt->pkt_data_dma);
12293
12294 pkt->pkt_state = FC_PKT_TRAN_ERROR;
12295 pkt->pkt_reason = FC_REASON_DMA_ERROR;
12296 pkt->pkt_expln = FC_EXPLN_NONE;
12297 pkt->pkt_action = FC_ACTION_RETRYABLE;
12298
12299 return;
12300 }
12301 }
12302
12303 return;
12304
12305 }
12306 #endif /* FMA_SUPPORT */
12307
12308
12309 extern void
12310 emlxs_swap32_buffer(uint8_t *buffer, uint32_t size)
12311 {
12312 uint32_t word;
12313 uint32_t *wptr;
12314 uint32_t i;
12315
12316 VERIFY((size % 4) == 0);
12317
12318 wptr = (uint32_t *)buffer;
12319
12320 for (i = 0; i < size / 4; i++) {
12321 word = *wptr;
12322 *wptr++ = SWAP32(word);
12323 }
12324
12325 return;
12326
12327 } /* emlxs_swap32_buffer() */
12328
12329
12330 extern void
12331 emlxs_swap32_bcopy(uint8_t *src, uint8_t *dst, uint32_t size)
12332 {
12333 uint32_t word;
12334 uint32_t *sptr;
12335 uint32_t *dptr;
12336 uint32_t i;
12337
12338 VERIFY((size % 4) == 0);
12339
12340 sptr = (uint32_t *)src;
12341 dptr = (uint32_t *)dst;
12342
12343 for (i = 0; i < size / 4; i++) {
12344 word = *sptr++;
12345 *dptr++ = SWAP32(word);
12346 }
12347
12348 return;
12349
12350 } /* emlxs_swap32_buffer() */
12351
12352
12353 extern char *
12354 emlxs_strtoupper(char *str)
12355 {
12356 char *cptr = str;
12357
12358 while (*cptr) {
12359 if ((*cptr >= 'a') && (*cptr <= 'z')) {
12360 *cptr -= ('a' - 'A');
12361 }
12362 cptr++;
12363 }
12364
12365 return (str);
12366
12367 } /* emlxs_strtoupper() */
12368
12369
12370 extern void
12371 emlxs_ulp_statec_cb(emlxs_port_t *port, uint32_t statec)
12372 {
12373 emlxs_hba_t *hba = HBA;
12374
12375 /* This routine coordinates protection with emlxs_fca_unbind_port() */
12376
12377 mutex_enter(&EMLXS_PORT_LOCK);
12378 if (!(port->flag & EMLXS_INI_BOUND)) {
12379 mutex_exit(&EMLXS_PORT_LOCK);
12380 return;
12381 }
12382 port->ulp_busy++;
12383 mutex_exit(&EMLXS_PORT_LOCK);
12384
12385 port->ulp_statec_cb(port->ulp_handle, statec);
12386
12387 mutex_enter(&EMLXS_PORT_LOCK);
12388 port->ulp_busy--;
12389 mutex_exit(&EMLXS_PORT_LOCK);
12390
12391 return;
12392
12393 } /* emlxs_ulp_statec_cb() */
12394
12395
12396 extern void
12397 emlxs_ulp_unsol_cb(emlxs_port_t *port, fc_unsol_buf_t *ubp)
12398 {
12399 emlxs_hba_t *hba = HBA;
12400
12401 /* This routine coordinates protection with emlxs_fca_unbind_port() */
12402
12403 mutex_enter(&EMLXS_PORT_LOCK);
12404 if (!(port->flag & EMLXS_INI_BOUND)) {
12405 mutex_exit(&EMLXS_PORT_LOCK);
12406 return;
12407 }
12408 port->ulp_busy++;
12409 mutex_exit(&EMLXS_PORT_LOCK);
12410
12411 port->ulp_unsol_cb(port->ulp_handle, ubp, ubp->ub_frame.type);
12412
12413 mutex_enter(&EMLXS_PORT_LOCK);
12414 port->ulp_busy--;
12415 mutex_exit(&EMLXS_PORT_LOCK);
12416
12417 return;
12418
12419 } /* emlxs_ulp_unsol_cb() */