1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at
9 * http://www.opensource.org/licenses/cddl1.txt.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2004-2012 Emulex. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
26 */
27
28 #include <emlxs.h>
29
30
31 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
32 EMLXS_MSG_DEF(EMLXS_SLI4_C);
33
34 static int emlxs_sli4_init_extents(emlxs_hba_t *hba,
35 MAILBOXQ *mbq);
36 static uint32_t emlxs_sli4_read_status(emlxs_hba_t *hba);
37
38 static int emlxs_init_bootstrap_mb(emlxs_hba_t *hba);
39
40 static uint32_t emlxs_sli4_read_sema(emlxs_hba_t *hba);
41
42 static uint32_t emlxs_sli4_read_mbdb(emlxs_hba_t *hba);
43
44 static void emlxs_sli4_write_mbdb(emlxs_hba_t *hba, uint32_t value);
45
46 static void emlxs_sli4_write_wqdb(emlxs_hba_t *hba, uint32_t value);
47
48 static void emlxs_sli4_write_mqdb(emlxs_hba_t *hba, uint32_t value);
49
50 static void emlxs_sli4_write_rqdb(emlxs_hba_t *hba, uint32_t value);
51
52 static void emlxs_sli4_write_cqdb(emlxs_hba_t *hba, uint32_t value);
53
54 static int emlxs_sli4_create_queues(emlxs_hba_t *hba,
55 MAILBOXQ *mbq);
56 static int emlxs_sli4_post_hdr_tmplates(emlxs_hba_t *hba,
57 MAILBOXQ *mbq);
58 static int emlxs_sli4_post_sgl_pages(emlxs_hba_t *hba,
59 MAILBOXQ *mbq);
60
61 static int emlxs_sli4_read_eq(emlxs_hba_t *hba, EQ_DESC_t *eq);
62
63 static int emlxs_sli4_map_hdw(emlxs_hba_t *hba);
64
65 static void emlxs_sli4_unmap_hdw(emlxs_hba_t *hba);
66
67 static int32_t emlxs_sli4_online(emlxs_hba_t *hba);
68
69 static void emlxs_sli4_offline(emlxs_hba_t *hba,
70 uint32_t reset_requested);
71
72 static uint32_t emlxs_sli4_hba_reset(emlxs_hba_t *hba, uint32_t restart,
73 uint32_t skip_post, uint32_t quiesce);
74 static void emlxs_sli4_hba_kill(emlxs_hba_t *hba);
75
76 static uint32_t emlxs_sli4_hba_init(emlxs_hba_t *hba);
77
78 static uint32_t emlxs_sli4_bde_setup(emlxs_port_t *port,
79 emlxs_buf_t *sbp);
80
81 static void emlxs_sli4_issue_iocb_cmd(emlxs_hba_t *hba,
82 CHANNEL *cp, IOCBQ *iocb_cmd);
83 static uint32_t emlxs_sli4_issue_mbox_cmd(emlxs_hba_t *hba,
84 MAILBOXQ *mbq, int32_t flg, uint32_t tmo);
85 static uint32_t emlxs_sli4_issue_mbox_cmd4quiesce(emlxs_hba_t *hba,
86 MAILBOXQ *mbq, int32_t flg, uint32_t tmo);
87 #ifdef SFCT_SUPPORT
88 static uint32_t emlxs_sli4_prep_fct_iocb(emlxs_port_t *port,
89 emlxs_buf_t *cmd_sbp, int channel);
90 static uint32_t emlxs_sli4_fct_bde_setup(emlxs_port_t *port,
91 emlxs_buf_t *sbp);
92 #endif /* SFCT_SUPPORT */
93
94 static uint32_t emlxs_sli4_prep_fcp_iocb(emlxs_port_t *port,
95 emlxs_buf_t *sbp, int ring);
96 static uint32_t emlxs_sli4_prep_ip_iocb(emlxs_port_t *port,
97 emlxs_buf_t *sbp);
98 static uint32_t emlxs_sli4_prep_els_iocb(emlxs_port_t *port,
99 emlxs_buf_t *sbp);
100 static uint32_t emlxs_sli4_prep_ct_iocb(emlxs_port_t *port,
101 emlxs_buf_t *sbp);
102 static void emlxs_sli4_poll_intr(emlxs_hba_t *hba);
103 static int32_t emlxs_sli4_intx_intr(char *arg);
104
105 #ifdef MSI_SUPPORT
106 static uint32_t emlxs_sli4_msi_intr(char *arg1, char *arg2);
107 #endif /* MSI_SUPPORT */
108
109 static void emlxs_sli4_resource_free(emlxs_hba_t *hba);
110
111 static int emlxs_sli4_resource_alloc(emlxs_hba_t *hba);
112 extern void emlxs_sli4_zero_queue_stat(emlxs_hba_t *hba);
113
114 static XRIobj_t *emlxs_sli4_alloc_xri(emlxs_port_t *port,
115 emlxs_buf_t *sbp, RPIobj_t *rpip,
116 uint32_t type);
117 static void emlxs_sli4_enable_intr(emlxs_hba_t *hba);
118
119 static void emlxs_sli4_disable_intr(emlxs_hba_t *hba, uint32_t att);
120
121 static void emlxs_sli4_timer(emlxs_hba_t *hba);
122
123 static void emlxs_sli4_timer_check_mbox(emlxs_hba_t *hba);
124
125 static void emlxs_sli4_gpio_timer_start(emlxs_hba_t *hba);
126
127 static void emlxs_sli4_gpio_timer_stop(emlxs_hba_t *hba);
128
129 static void emlxs_sli4_gpio_timer(void *arg);
130
131 static void emlxs_sli4_check_gpio(emlxs_hba_t *hba);
132
133 static uint32_t emlxs_sli4_fix_gpio(emlxs_hba_t *hba,
134 uint8_t *pin, uint8_t *pinval);
135
136 static uint32_t emlxs_sli4_fix_gpio_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq);
137
138 static void emlxs_sli4_poll_erratt(emlxs_hba_t *hba);
139
140 extern XRIobj_t *emlxs_sli4_reserve_xri(emlxs_port_t *port,
141 RPIobj_t *rpip, uint32_t type, uint16_t rx_id);
142 static int emlxs_check_hdw_ready(emlxs_hba_t *);
143
144 static uint32_t emlxs_sli4_reg_did(emlxs_port_t *port,
145 uint32_t did, SERV_PARM *param,
146 emlxs_buf_t *sbp, fc_unsol_buf_t *ubp,
147 IOCBQ *iocbq);
148
149 static uint32_t emlxs_sli4_unreg_node(emlxs_port_t *port,
150 emlxs_node_t *node, emlxs_buf_t *sbp,
151 fc_unsol_buf_t *ubp, IOCBQ *iocbq);
152
153 static void emlxs_sli4_handle_fc_link_att(emlxs_hba_t *hba,
154 CQE_ASYNC_t *cqe);
155 static void emlxs_sli4_handle_fcoe_link_event(emlxs_hba_t *hba,
156 CQE_ASYNC_t *cqe);
157
158
159 static uint16_t emlxs_sli4_rqid_to_index(emlxs_hba_t *hba,
160 uint16_t rqid);
161 static uint16_t emlxs_sli4_wqid_to_index(emlxs_hba_t *hba,
162 uint16_t wqid);
163 static uint16_t emlxs_sli4_cqid_to_index(emlxs_hba_t *hba,
164 uint16_t cqid);
165
166 /* Define SLI4 API functions */
167 emlxs_sli_api_t emlxs_sli4_api = {
168 emlxs_sli4_map_hdw,
169 emlxs_sli4_unmap_hdw,
170 emlxs_sli4_online,
171 emlxs_sli4_offline,
172 emlxs_sli4_hba_reset,
173 emlxs_sli4_hba_kill,
174 emlxs_sli4_issue_iocb_cmd,
175 emlxs_sli4_issue_mbox_cmd,
176 #ifdef SFCT_SUPPORT
177 emlxs_sli4_prep_fct_iocb,
178 #else
179 NULL,
180 #endif /* SFCT_SUPPORT */
181 emlxs_sli4_prep_fcp_iocb,
182 emlxs_sli4_prep_ip_iocb,
183 emlxs_sli4_prep_els_iocb,
184 emlxs_sli4_prep_ct_iocb,
185 emlxs_sli4_poll_intr,
186 emlxs_sli4_intx_intr,
187 emlxs_sli4_msi_intr,
188 emlxs_sli4_disable_intr,
189 emlxs_sli4_timer,
190 emlxs_sli4_poll_erratt,
191 emlxs_sli4_reg_did,
192 emlxs_sli4_unreg_node
193 };
194
195
196 /* ************************************************************************** */
197
198 static void
199 emlxs_sli4_set_default_params(emlxs_hba_t *hba)
200 {
201 emlxs_port_t *port = &PPORT;
202
203 bzero((char *)&hba->sli.sli4.param, sizeof (sli_params_t));
204
205 hba->sli.sli4.param.ProtocolType = 0x3; /* FC/FCoE */
206
207 hba->sli.sli4.param.SliHint2 = 0;
208 hba->sli.sli4.param.SliHint1 = 0;
209 hba->sli.sli4.param.IfType = 0;
210 hba->sli.sli4.param.SliFamily = 0;
211 hba->sli.sli4.param.Revision = 0x4; /* SLI4 */
212 hba->sli.sli4.param.FT = 0;
213
214 hba->sli.sli4.param.EqeCntMethod = 0x1; /* Bit pattern */
215 hba->sli.sli4.param.EqPageSize = 0x1; /* 4096 */
216 hba->sli.sli4.param.EqeSize = 0x1; /* 4 byte */
217 hba->sli.sli4.param.EqPageCnt = 8;
218 hba->sli.sli4.param.EqeCntMask = 0x1F; /* 256-4096 elements */
219
220 hba->sli.sli4.param.CqeCntMethod = 0x1; /* Bit pattern */
221 hba->sli.sli4.param.CqPageSize = 0x1; /* 4096 */
222 hba->sli.sli4.param.CQV = 0;
223 hba->sli.sli4.param.CqeSize = 0x3; /* 16 byte */
224 hba->sli.sli4.param.CqPageCnt = 4;
225 hba->sli.sli4.param.CqeCntMask = 0x70; /* 256-1024 elements */
226
227 hba->sli.sli4.param.MqeCntMethod = 0x1; /* Bit pattern */
228 hba->sli.sli4.param.MqPageSize = 0x1; /* 4096 */
229 hba->sli.sli4.param.MQV = 0;
230 hba->sli.sli4.param.MqPageCnt = 8;
231 hba->sli.sli4.param.MqeCntMask = 0x0F; /* 16-128 elements */
232
233 hba->sli.sli4.param.WqeCntMethod = 0; /* Page Count */
234 hba->sli.sli4.param.WqPageSize = 0x1; /* 4096 */
235 hba->sli.sli4.param.WQV = 0;
236 hba->sli.sli4.param.WqeSize = 0x5; /* 64 byte */
237 hba->sli.sli4.param.WqPageCnt = 4;
238 hba->sli.sli4.param.WqeCntMask = 0x10; /* 256 elements */
239
240 hba->sli.sli4.param.RqeCntMethod = 0; /* Page Count */
241 hba->sli.sli4.param.RqPageSize = 0x1; /* 4096 */
242 hba->sli.sli4.param.RQV = 0;
243 hba->sli.sli4.param.RqeSize = 0x2; /* 8 byte */
244 hba->sli.sli4.param.RqPageCnt = 8;
245 hba->sli.sli4.param.RqDbWin = 1;
246 hba->sli.sli4.param.RqeCntMask = 0x100; /* 4096 elements */
247
248 hba->sli.sli4.param.Loopback = 0xf; /* unsupported */
249 hba->sli.sli4.param.PHWQ = 0;
250 hba->sli.sli4.param.PHON = 0;
251 hba->sli.sli4.param.TRIR = 0;
252 hba->sli.sli4.param.TRTY = 0;
253 hba->sli.sli4.param.TCCA = 0;
254 hba->sli.sli4.param.MWQE = 0;
255 hba->sli.sli4.param.ASSI = 0;
256 hba->sli.sli4.param.TERP = 0;
257 hba->sli.sli4.param.TGT = 0;
258 hba->sli.sli4.param.AREG = 0;
259 hba->sli.sli4.param.FBRR = 0;
260 hba->sli.sli4.param.SGLR = 1;
261 hba->sli.sli4.param.HDRR = 1;
262 hba->sli.sli4.param.EXT = 0;
263 hba->sli.sli4.param.FCOE = 1;
264
265 hba->sli.sli4.param.SgeLength = (64 * 1024);
266 hba->sli.sli4.param.SglAlign = 0x7 /* 4096 */;
267 hba->sli.sli4.param.SglPageSize = 0x1; /* 4096 */
268 hba->sli.sli4.param.SglPageCnt = 2;
269
270 hba->sli.sli4.param.MinRqSize = 128;
271 hba->sli.sli4.param.MaxRqSize = 2048;
272
273 hba->sli.sli4.param.RPIMax = 0x3ff;
274 hba->sli.sli4.param.XRIMax = 0x3ff;
275 hba->sli.sli4.param.VFIMax = 0xff;
276 hba->sli.sli4.param.VPIMax = 0xff;
277
278 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
279 "Default SLI4 parameters set.");
280
281 } /* emlxs_sli4_set_default_params() */
282
283
284 /*
285 * emlxs_sli4_online()
286 *
287 * This routine will start initialization of the SLI4 HBA.
288 */
289 static int32_t
290 emlxs_sli4_online(emlxs_hba_t *hba)
291 {
292 emlxs_port_t *port = &PPORT;
293 emlxs_config_t *cfg;
294 emlxs_vpd_t *vpd;
295 MAILBOXQ *mbq = NULL;
296 MAILBOX4 *mb = NULL;
297 MATCHMAP *mp = NULL;
298 uint32_t i;
299 uint32_t j;
300 uint32_t rval = 0;
301 uint8_t *vpd_data;
302 uint32_t sli_mode;
303 uint8_t *outptr;
304 uint32_t status;
305 uint32_t fw_check;
306 uint32_t kern_update = 0;
307 emlxs_firmware_t hba_fw;
308 emlxs_firmware_t *fw;
309 uint16_t ssvid;
310 char buf[64];
311
312 cfg = &CFG;
313 vpd = &VPD;
314
315 sli_mode = EMLXS_HBA_SLI4_MODE;
316 hba->sli_mode = sli_mode;
317
318 /* Set the fw_check flag */
319 fw_check = cfg[CFG_FW_CHECK].current;
320
321 if ((fw_check & 0x04) ||
322 (hba->fw_flag & FW_UPDATE_KERNEL)) {
323 kern_update = 1;
324 }
325
326 hba->mbox_queue_flag = 0;
327 hba->fc_edtov = FF_DEF_EDTOV;
328 hba->fc_ratov = FF_DEF_RATOV;
329 hba->fc_altov = FF_DEF_ALTOV;
330 hba->fc_arbtov = FF_DEF_ARBTOV;
331
332 /* Networking not supported */
333 if (cfg[CFG_NETWORK_ON].current) {
334 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
335 "Networking is not supported in SLI4, turning it off");
336 cfg[CFG_NETWORK_ON].current = 0;
337 }
338
339 hba->chan_count = hba->intr_count * cfg[CFG_NUM_WQ].current;
340 if (hba->chan_count > MAX_CHANNEL) {
341 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
342 "Max channels exceeded, dropping num-wq from %d to 1",
343 cfg[CFG_NUM_WQ].current);
344 cfg[CFG_NUM_WQ].current = 1;
345 hba->chan_count = hba->intr_count * cfg[CFG_NUM_WQ].current;
346 }
347 hba->channel_fcp = 0; /* First channel */
348
349 /* Gen6 chips only support P2P topologies */
350 if ((hba->model_info.flags & EMLXS_FC_GEN6) &&
351 cfg[CFG_TOPOLOGY].current != 2) {
352 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
353 "Loop topologies are not supported by this HBA. "
354 "Forcing topology to P2P.");
355 cfg[CFG_TOPOLOGY].current = 2;
356 }
357
358 /* Default channel for everything else is the last channel */
359 hba->channel_ip = hba->chan_count - 1;
360 hba->channel_els = hba->chan_count - 1;
361 hba->channel_ct = hba->chan_count - 1;
362
363 hba->fc_iotag = 1;
364 hba->io_count = 0;
365 hba->channel_tx_count = 0;
366
367 /* Specific to ATTO G5 boards */
368 if (hba->model_info.flags & EMLXS_GPIO_LEDS) {
369 /* Set hard-coded GPIO pins */
370 if (hba->pci_function_number) {
371 hba->gpio_pin[EMLXS_GPIO_PIN_LO] = 27;
372 hba->gpio_pin[EMLXS_GPIO_PIN_HI] = 28;
373 hba->gpio_pin[EMLXS_GPIO_PIN_ACT] = 29;
374 hba->gpio_pin[EMLXS_GPIO_PIN_LASER] = 8;
375 } else {
376 hba->gpio_pin[EMLXS_GPIO_PIN_LO] = 13;
377 hba->gpio_pin[EMLXS_GPIO_PIN_HI] = 25;
378 hba->gpio_pin[EMLXS_GPIO_PIN_ACT] = 26;
379 hba->gpio_pin[EMLXS_GPIO_PIN_LASER] = 12;
380 }
381 }
382
383 /* Initialize the local dump region buffer */
384 bzero(&hba->sli.sli4.dump_region, sizeof (MBUF_INFO));
385 hba->sli.sli4.dump_region.size = EMLXS_DUMP_REGION_SIZE;
386 hba->sli.sli4.dump_region.flags = FC_MBUF_DMA | FC_MBUF_SNGLSG
387 | FC_MBUF_DMA32;
388 hba->sli.sli4.dump_region.align = ddi_ptob(hba->dip, 1L);
389
390 (void) emlxs_mem_alloc(hba, &hba->sli.sli4.dump_region);
391
392 if (hba->sli.sli4.dump_region.virt == NULL) {
393 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
394 "Unable to allocate dump region buffer.");
395
396 return (ENOMEM);
397 }
398
399 /*
400 * Get a buffer which will be used repeatedly for mailbox commands
401 */
402 mbq = (MAILBOXQ *) kmem_zalloc((sizeof (MAILBOXQ)), KM_SLEEP);
403
404 mb = (MAILBOX4 *)mbq;
405
406 reset:
407 /* Reset & Initialize the adapter */
408 if (emlxs_sli4_hba_init(hba)) {
409 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
410 "Unable to init hba.");
411
412 rval = EIO;
413 goto failed1;
414 }
415
416 #ifdef FMA_SUPPORT
417 /* Access handle validation */
418 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
419 case SLI_INTF_IF_TYPE_2:
420 if ((emlxs_fm_check_acc_handle(hba,
421 hba->pci_acc_handle) != DDI_FM_OK) ||
422 (emlxs_fm_check_acc_handle(hba,
423 hba->sli.sli4.bar0_acc_handle) != DDI_FM_OK)) {
424 EMLXS_MSGF(EMLXS_CONTEXT,
425 &emlxs_invalid_access_handle_msg, NULL);
426
427 rval = EIO;
428 goto failed1;
429 }
430 break;
431
432 default :
433 if ((emlxs_fm_check_acc_handle(hba,
434 hba->pci_acc_handle) != DDI_FM_OK) ||
435 (emlxs_fm_check_acc_handle(hba,
436 hba->sli.sli4.bar1_acc_handle) != DDI_FM_OK) ||
437 (emlxs_fm_check_acc_handle(hba,
438 hba->sli.sli4.bar2_acc_handle) != DDI_FM_OK)) {
439 EMLXS_MSGF(EMLXS_CONTEXT,
440 &emlxs_invalid_access_handle_msg, NULL);
441
442 rval = EIO;
443 goto failed1;
444 }
445 break;
446 }
447 #endif /* FMA_SUPPORT */
448
449 /*
450 * Setup and issue mailbox READ REV command
451 */
452 vpd->opFwRev = 0;
453 vpd->postKernRev = 0;
454 vpd->sli1FwRev = 0;
455 vpd->sli2FwRev = 0;
456 vpd->sli3FwRev = 0;
457 vpd->sli4FwRev = 0;
458
459 vpd->postKernName[0] = 0;
460 vpd->opFwName[0] = 0;
461 vpd->sli1FwName[0] = 0;
462 vpd->sli2FwName[0] = 0;
463 vpd->sli3FwName[0] = 0;
464 vpd->sli4FwName[0] = 0;
465
466 vpd->opFwLabel[0] = 0;
467 vpd->sli1FwLabel[0] = 0;
468 vpd->sli2FwLabel[0] = 0;
469 vpd->sli3FwLabel[0] = 0;
470 vpd->sli4FwLabel[0] = 0;
471
472 EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
473
474 emlxs_mb_get_sli4_params(hba, mbq);
475 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
476 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
477 "Unable to read parameters. Mailbox cmd=%x status=%x",
478 mb->mbxCommand, mb->mbxStatus);
479
480 /* Set param defaults */
481 emlxs_sli4_set_default_params(hba);
482
483 } else {
484 /* Save parameters */
485 bcopy((char *)&mb->un.varSLIConfig.payload,
486 (char *)&hba->sli.sli4.param, sizeof (sli_params_t));
487
488 emlxs_data_dump(port, "SLI_PARMS",
489 (uint32_t *)&hba->sli.sli4.param,
490 sizeof (sli_params_t), 0);
491 }
492
493 /* Reuse mbq from previous mbox */
494 bzero(mbq, sizeof (MAILBOXQ));
495
496 emlxs_mb_get_port_name(hba, mbq);
497 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
498 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
499 "Unable to get port names. Mailbox cmd=%x status=%x",
500 mb->mbxCommand, mb->mbxStatus);
501
502 bzero(hba->sli.sli4.port_name,
503 sizeof (hba->sli.sli4.port_name));
504 } else {
505 /* Save port names */
506 bcopy((char *)&mb->un.varSLIConfig.payload,
507 (char *)&hba->sli.sli4.port_name,
508 sizeof (hba->sli.sli4.port_name));
509 }
510
511 /* Reuse mbq from previous mbox */
512 bzero(mbq, sizeof (MAILBOXQ));
513
514 emlxs_mb_read_rev(hba, mbq, 0);
515 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
516 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
517 "Unable to read rev. Mailbox cmd=%x status=%x",
518 mb->mbxCommand, mb->mbxStatus);
519
520 rval = EIO;
521 goto failed1;
522
523 }
524
525 emlxs_data_dump(port, "RD_REV", (uint32_t *)mb, 18, 0);
526 if (mb->un.varRdRev4.sliLevel != 4) {
527 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
528 "Invalid read rev Version for SLI4: 0x%x",
529 mb->un.varRdRev4.sliLevel);
530
531 rval = EIO;
532 goto failed1;
533 }
534
535 switch (mb->un.varRdRev4.dcbxMode) {
536 case EMLXS_DCBX_MODE_CIN: /* Mapped to nonFIP mode */
537 hba->flag &= ~FC_FIP_SUPPORTED;
538 break;
539
540 case EMLXS_DCBX_MODE_CEE: /* Mapped to FIP mode */
541 hba->flag |= FC_FIP_SUPPORTED;
542 break;
543
544 default:
545 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
546 "Invalid read rev dcbx mode for SLI4: 0x%x",
547 mb->un.varRdRev4.dcbxMode);
548
549 rval = EIO;
550 goto failed1;
551 }
552
553 /* Set FC/FCoE mode */
554 if (mb->un.varRdRev4.FCoE) {
555 hba->sli.sli4.flag |= EMLXS_SLI4_FCOE_MODE;
556 } else {
557 hba->sli.sli4.flag &= ~EMLXS_SLI4_FCOE_MODE;
558 }
559
560 /* Save information as VPD data */
561 vpd->rBit = 1;
562
563 vpd->sli4FwRev = (mb->un.varRdRev4.ULPFwId);
564 bcopy((char *)mb->un.varRdRev4.ULPFwName, vpd->sli4FwName, 16);
565
566 vpd->opFwRev = (mb->un.varRdRev4.ULPFwId);
567 bcopy((char *)mb->un.varRdRev4.ULPFwName, vpd->opFwName, 16);
568
569 vpd->postKernRev = (mb->un.varRdRev4.ARMFwId);
570 bcopy((char *)mb->un.varRdRev4.ARMFwName, vpd->postKernName, 16);
571
572 vpd->biuRev = mb->un.varRdRev4.HwRev1;
573 vpd->fcphHigh = mb->un.varRdRev4.fcphHigh;
574 vpd->fcphLow = mb->un.varRdRev4.fcphLow;
575 vpd->feaLevelHigh = mb->un.varRdRev4.feaLevelHigh;
576 vpd->feaLevelLow = mb->un.varRdRev4.feaLevelLow;
577
578 /* Decode FW labels */
579 if (hba->model_info.chip == EMLXS_LANCER_CHIP) {
580 bcopy(vpd->postKernName, vpd->sli4FwName, 16);
581 }
582 emlxs_decode_label(vpd->sli4FwName, vpd->sli4FwName, 0,
583 sizeof (vpd->sli4FwName));
584 emlxs_decode_label(vpd->opFwName, vpd->opFwName, 0,
585 sizeof (vpd->opFwName));
586 emlxs_decode_label(vpd->postKernName, vpd->postKernName, 0,
587 sizeof (vpd->postKernName));
588
589 if (hba->model_info.chip == EMLXS_BE2_CHIP) {
590 (void) strlcpy(vpd->sli4FwLabel, "be2.ufi",
591 sizeof (vpd->sli4FwLabel));
592 } else if (hba->model_info.chip == EMLXS_BE3_CHIP) {
593 (void) strlcpy(vpd->sli4FwLabel, "be3.ufi",
594 sizeof (vpd->sli4FwLabel));
595 } else if (hba->model_info.chip == EMLXS_LANCER_CHIP) {
596 (void) strlcpy(vpd->sli4FwLabel, "xe201.grp",
597 sizeof (vpd->sli4FwLabel));
598 } else {
599 (void) strlcpy(vpd->sli4FwLabel, "sli4.fw",
600 sizeof (vpd->sli4FwLabel));
601 }
602
603 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
604 "VPD ULP:%08x %s ARM:%08x %s f:%d %d %d %d : dcbx %d",
605 vpd->opFwRev, vpd->opFwName, vpd->postKernRev, vpd->postKernName,
606 vpd->fcphHigh, vpd->fcphLow, vpd->feaLevelHigh, vpd->feaLevelLow,
607 mb->un.varRdRev4.dcbxMode);
608
609 /* No key information is needed for SLI4 products */
610
611 /* Get adapter VPD information */
612 vpd->port_index = (uint32_t)-1;
613
614 /* Reuse mbq from previous mbox */
615 bzero(mbq, sizeof (MAILBOXQ));
616
617 emlxs_mb_dump_vpd(hba, mbq, 0);
618 vpd_data = hba->sli.sli4.dump_region.virt;
619
620 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
621 MBX_SUCCESS) {
622 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
623 "No VPD found. status=%x", mb->mbxStatus);
624 } else {
625 EMLXS_MSGF(EMLXS_CONTEXT,
626 &emlxs_init_debug_msg,
627 "VPD dumped. rsp_cnt=%d status=%x",
628 mb->un.varDmp4.rsp_cnt, mb->mbxStatus);
629
630 if (mb->un.varDmp4.rsp_cnt) {
631 EMLXS_MPDATA_SYNC(hba->sli.sli4.dump_region.dma_handle,
632 0, mb->un.varDmp4.rsp_cnt, DDI_DMA_SYNC_FORKERNEL);
633
634 #ifdef FMA_SUPPORT
635 if (hba->sli.sli4.dump_region.dma_handle) {
636 if (emlxs_fm_check_dma_handle(hba,
637 hba->sli.sli4.dump_region.dma_handle)
638 != DDI_FM_OK) {
639 EMLXS_MSGF(EMLXS_CONTEXT,
640 &emlxs_invalid_dma_handle_msg,
641 "sli4_online: hdl=%p",
642 hba->sli.sli4.dump_region.
643 dma_handle);
644 rval = EIO;
645 goto failed1;
646 }
647 }
648 #endif /* FMA_SUPPORT */
649
650 }
651 }
652
653 if (vpd_data[0]) {
654 (void) emlxs_parse_vpd(hba, (uint8_t *)vpd_data,
655 mb->un.varDmp4.rsp_cnt);
656
657 /*
658 * If there is a VPD part number, and it does not
659 * match the current default HBA model info,
660 * replace the default data with an entry that
661 * does match.
662 *
663 * After emlxs_parse_vpd model holds the VPD value
664 * for V2 and part_num hold the value for PN. These
665 * 2 values are NOT necessarily the same.
666 */
667
668 rval = 0;
669 if ((vpd->model[0] != 0) &&
670 (strcmp(&vpd->model[0], hba->model_info.model) != 0)) {
671
672 /* First scan for a V2 match */
673
674 for (i = 1; i < emlxs_pci_model_count; i++) {
675 if (strcmp(&vpd->model[0],
676 emlxs_pci_model[i].model) == 0) {
677 bcopy(&emlxs_pci_model[i],
678 &hba->model_info,
679 sizeof (emlxs_model_t));
680 rval = 1;
681 break;
682 }
683 }
684 }
685
686 if (!rval && (vpd->part_num[0] != 0) &&
687 (strcmp(&vpd->part_num[0], hba->model_info.model) != 0)) {
688
689 /* Next scan for a PN match */
690
691 for (i = 1; i < emlxs_pci_model_count; i++) {
692 if (strcmp(&vpd->part_num[0],
693 emlxs_pci_model[i].model) == 0) {
694 bcopy(&emlxs_pci_model[i],
695 &hba->model_info,
696 sizeof (emlxs_model_t));
697 break;
698 }
699 }
700 }
701
702 /* HP CNA port indices start at 1 instead of 0 */
703 if (hba->model_info.chip & EMLXS_BE_CHIPS) {
704 ssvid = ddi_get16(hba->pci_acc_handle,
705 (uint16_t *)(hba->pci_addr + PCI_SSVID_REGISTER));
706
707 if ((ssvid == PCI_SSVID_HP) && (vpd->port_index > 0)) {
708 vpd->port_index--;
709 }
710 }
711
712 /*
713 * Now lets update hba->model_info with the real
714 * VPD data, if any.
715 */
716
717 /*
718 * Replace the default model description with vpd data
719 */
720 if (vpd->model_desc[0] != 0) {
721 (void) strncpy(hba->model_info.model_desc,
722 vpd->model_desc,
723 (sizeof (hba->model_info.model_desc)-1));
724 }
725
726 /* Replace the default model with vpd data */
727 if (vpd->model[0] != 0) {
728 (void) strncpy(hba->model_info.model, vpd->model,
729 (sizeof (hba->model_info.model)-1));
730 }
731
732 /* Replace the default program types with vpd data */
733 if (vpd->prog_types[0] != 0) {
734 emlxs_parse_prog_types(hba, vpd->prog_types);
735 }
736 }
737
738 /*
739 * Since the adapter model may have changed with the vpd data
740 * lets double check if adapter is not supported
741 */
742 if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
743 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
744 "Unsupported adapter found. "
745 "Id:%d Device id:0x%x SSDID:0x%x Model:%s",
746 hba->model_info.id, hba->model_info.device_id,
747 hba->model_info.ssdid, hba->model_info.model);
748
749 rval = EIO;
750 goto failed1;
751 }
752
753 (void) strncpy(vpd->boot_version, vpd->sli4FwName,
754 (sizeof (vpd->boot_version)-1));
755
756 /* Get fcode version property */
757 emlxs_get_fcode_version(hba);
758
759 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
760 "Firmware: kern=%08x stub=%08x sli1=%08x", vpd->postKernRev,
761 vpd->opFwRev, vpd->sli1FwRev);
762
763 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
764 "Firmware: sli2=%08x sli3=%08x sli4=%08x fl=%x", vpd->sli2FwRev,
765 vpd->sli3FwRev, vpd->sli4FwRev, vpd->feaLevelHigh);
766
767 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
768 "BIOS: boot=%s fcode=%s", vpd->boot_version, vpd->fcode_version);
769
770 /*
771 * If firmware checking is enabled and the adapter model indicates
772 * a firmware image, then perform firmware version check
773 */
774 hba->fw_flag = 0;
775 hba->fw_timer = 0;
776
777 if (((fw_check & 0x1) &&
778 (hba->model_info.flags & EMLXS_ORACLE_BRANDED) &&
779 hba->model_info.fwid) ||
780 ((fw_check & 0x2) && hba->model_info.fwid)) {
781
782 /* Find firmware image indicated by adapter model */
783 fw = NULL;
784 for (i = 0; i < emlxs_fw_count; i++) {
785 if (emlxs_fw_table[i].id == hba->model_info.fwid) {
786 fw = &emlxs_fw_table[i];
787 break;
788 }
789 }
790
791 /*
792 * If the image was found, then verify current firmware
793 * versions of adapter
794 */
795 if (fw) {
796 /* Obtain current firmware version info */
797 if (hba->model_info.chip & EMLXS_BE_CHIPS) {
798 (void) emlxs_be_read_fw_version(hba, &hba_fw);
799 } else {
800 hba_fw.kern = vpd->postKernRev;
801 hba_fw.stub = vpd->opFwRev;
802 hba_fw.sli1 = vpd->sli1FwRev;
803 hba_fw.sli2 = vpd->sli2FwRev;
804 hba_fw.sli3 = vpd->sli3FwRev;
805 hba_fw.sli4 = vpd->sli4FwRev;
806 }
807
808 if (!kern_update &&
809 ((fw->kern && (hba_fw.kern != fw->kern)) ||
810 (fw->stub && (hba_fw.stub != fw->stub)))) {
811
812 hba->fw_flag |= FW_UPDATE_NEEDED;
813
814 } else if ((fw->kern && (hba_fw.kern != fw->kern)) ||
815 (fw->stub && (hba_fw.stub != fw->stub)) ||
816 (fw->sli1 && (hba_fw.sli1 != fw->sli1)) ||
817 (fw->sli2 && (hba_fw.sli2 != fw->sli2)) ||
818 (fw->sli3 && (hba_fw.sli3 != fw->sli3)) ||
819 (fw->sli4 && (hba_fw.sli4 != fw->sli4))) {
820
821 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
822 "Firmware update needed. "
823 "Updating. id=%d fw=%d",
824 hba->model_info.id, hba->model_info.fwid);
825
826 #ifdef MODFW_SUPPORT
827 /*
828 * Load the firmware image now
829 * If MODFW_SUPPORT is not defined, the
830 * firmware image will already be defined
831 * in the emlxs_fw_table
832 */
833 emlxs_fw_load(hba, fw);
834 #endif /* MODFW_SUPPORT */
835
836 if (fw->image && fw->size) {
837 uint32_t rc;
838
839 rc = emlxs_fw_download(hba,
840 (char *)fw->image, fw->size, 0);
841 if ((rc != FC_SUCCESS) &&
842 (rc != EMLXS_REBOOT_REQUIRED)) {
843 EMLXS_MSGF(EMLXS_CONTEXT,
844 &emlxs_init_msg,
845 "Firmware update failed.");
846 hba->fw_flag |=
847 FW_UPDATE_NEEDED;
848 }
849 #ifdef MODFW_SUPPORT
850 /*
851 * Unload the firmware image from
852 * kernel memory
853 */
854 emlxs_fw_unload(hba, fw);
855 #endif /* MODFW_SUPPORT */
856
857 fw_check = 0;
858
859 goto reset;
860 }
861
862 hba->fw_flag |= FW_UPDATE_NEEDED;
863
864 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
865 "Firmware image unavailable.");
866 } else {
867 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
868 "Firmware update not needed.");
869 }
870 } else {
871 /*
872 * This means either the adapter database is not
873 * correct or a firmware image is missing from the
874 * compile
875 */
876 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
877 "Firmware image unavailable. id=%d fw=%d",
878 hba->model_info.id, hba->model_info.fwid);
879 }
880 }
881
882 /* Reuse mbq from previous mbox */
883 bzero(mbq, sizeof (MAILBOXQ));
884
885 emlxs_mb_dump_fcoe(hba, mbq, 0);
886
887 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
888 MBX_SUCCESS) {
889 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
890 "No FCOE info found. status=%x", mb->mbxStatus);
891 } else {
892 EMLXS_MSGF(EMLXS_CONTEXT,
893 &emlxs_init_debug_msg,
894 "FCOE info dumped. rsp_cnt=%d status=%x",
895 mb->un.varDmp4.rsp_cnt, mb->mbxStatus);
896 (void) emlxs_parse_fcoe(hba,
897 (uint8_t *)hba->sli.sli4.dump_region.virt,
898 mb->un.varDmp4.rsp_cnt);
899 }
900
901 /* Reuse mbq from previous mbox */
902 bzero(mbq, sizeof (MAILBOXQ));
903
904 status = 0;
905 if (port->flag & EMLXS_INI_ENABLED) {
906 status |= SLI4_FEATURE_FCP_INITIATOR;
907 }
908 if (port->flag & EMLXS_TGT_ENABLED) {
909 status |= SLI4_FEATURE_FCP_TARGET;
910 }
911 if (cfg[CFG_NPIV_ENABLE].current) {
912 status |= SLI4_FEATURE_NPIV;
913 }
914 if (cfg[CFG_RQD_MODE].current) {
915 status |= SLI4_FEATURE_RQD;
916 }
917 if (cfg[CFG_PERF_HINT].current) {
918 if (hba->sli.sli4.param.PHON) {
919 status |= SLI4_FEATURE_PERF_HINT;
920 }
921 }
922
923 emlxs_mb_request_features(hba, mbq, status);
924
925 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
926 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
927 "Unable to REQUEST_FEATURES. Mailbox cmd=%x status=%x",
928 mb->mbxCommand, mb->mbxStatus);
929
930 rval = EIO;
931 goto failed1;
932 }
933 emlxs_data_dump(port, "REQ_FEATURE", (uint32_t *)mb, 6, 0);
934
935 /* Check to see if we get the features we requested */
936 if (status != mb->un.varReqFeatures.featuresEnabled) {
937
938 /* Just report descrepencies, don't abort the attach */
939
940 outptr = (uint8_t *)emlxs_request_feature_xlate(
941 mb->un.varReqFeatures.featuresRequested);
942 (void) strlcpy(buf, (char *)outptr, sizeof (buf));
943
944 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
945 "REQUEST_FEATURES: wanted:%s got:%s",
946 &buf[0], emlxs_request_feature_xlate(
947 mb->un.varReqFeatures.featuresEnabled));
948
949 }
950
951 if ((port->flag & EMLXS_INI_ENABLED) &&
952 !(mb->un.varReqFeatures.featuresEnabled &
953 SLI4_FEATURE_FCP_INITIATOR)) {
954 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
955 "Initiator mode not supported by adapter.");
956
957 rval = EIO;
958
959 #ifdef SFCT_SUPPORT
960 /* Check if we can fall back to just target mode */
961 if ((hba->pm_state == EMLXS_PM_IN_ATTACH) &&
962 (mb->un.varReqFeatures.featuresEnabled &
963 SLI4_FEATURE_FCP_TARGET) &&
964 (cfg[CFG_DTM_ENABLE].current == 1) &&
965 (cfg[CFG_TARGET_MODE].current == 1)) {
966
967 cfg[CFG_DTM_ENABLE].current = 0;
968
969 EMLXS_MSGF(EMLXS_CONTEXT,
970 &emlxs_init_failed_msg,
971 "Disabling dynamic target mode. "
972 "Enabling target mode only.");
973
974 /* This will trigger the driver to reattach */
975 rval = EAGAIN;
976 }
977 #endif /* SFCT_SUPPORT */
978 goto failed1;
979 }
980
981 if ((port->flag & EMLXS_TGT_ENABLED) &&
982 !(mb->un.varReqFeatures.featuresEnabled &
983 SLI4_FEATURE_FCP_TARGET)) {
984 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
985 "Target mode not supported by adapter.");
986
987 rval = EIO;
988
989 #ifdef SFCT_SUPPORT
990 /* Check if we can fall back to just initiator mode */
991 if ((hba->pm_state == EMLXS_PM_IN_ATTACH) &&
992 (mb->un.varReqFeatures.featuresEnabled &
993 SLI4_FEATURE_FCP_INITIATOR) &&
994 (cfg[CFG_DTM_ENABLE].current == 1) &&
995 (cfg[CFG_TARGET_MODE].current == 0)) {
996
997 cfg[CFG_DTM_ENABLE].current = 0;
998
999 EMLXS_MSGF(EMLXS_CONTEXT,
1000 &emlxs_init_failed_msg,
1001 "Disabling dynamic target mode. "
1002 "Enabling initiator mode only.");
1003
1004 /* This will trigger the driver to reattach */
1005 rval = EAGAIN;
1006 }
1007 #endif /* SFCT_SUPPORT */
1008 goto failed1;
1009 }
1010
1011 if (mb->un.varReqFeatures.featuresEnabled & SLI4_FEATURE_NPIV) {
1012 hba->flag |= FC_NPIV_ENABLED;
1013 }
1014
1015 if (mb->un.varReqFeatures.featuresEnabled & SLI4_FEATURE_PERF_HINT) {
1016 hba->sli.sli4.flag |= EMLXS_SLI4_PHON;
1017 if (hba->sli.sli4.param.PHWQ) {
1018 hba->sli.sli4.flag |= EMLXS_SLI4_PHWQ;
1019 }
1020 }
1021
1022 /* Reuse mbq from previous mbox */
1023 bzero(mbq, sizeof (MAILBOXQ));
1024
1025 emlxs_mb_read_config(hba, mbq);
1026 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1027 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1028 "Unable to READ_CONFIG. Mailbox cmd=%x status=%x",
1029 mb->mbxCommand, mb->mbxStatus);
1030
1031 rval = EIO;
1032 goto failed1;
1033 }
1034 emlxs_data_dump(port, "READ_CONFIG4", (uint32_t *)mb, 18, 0);
1035
1036 /* Set default extents */
1037 hba->sli.sli4.XRICount = mb->un.varRdConfig4.XRICount;
1038 hba->sli.sli4.XRIExtCount = 1;
1039 hba->sli.sli4.XRIExtSize = hba->sli.sli4.XRICount;
1040 hba->sli.sli4.XRIBase[0] = mb->un.varRdConfig4.XRIBase;
1041
1042 hba->sli.sli4.RPICount = mb->un.varRdConfig4.RPICount;
1043 hba->sli.sli4.RPIExtCount = 1;
1044 hba->sli.sli4.RPIExtSize = hba->sli.sli4.RPICount;
1045 hba->sli.sli4.RPIBase[0] = mb->un.varRdConfig4.RPIBase;
1046
1047 hba->sli.sli4.VPICount = mb->un.varRdConfig4.VPICount;
1048 hba->sli.sli4.VPIExtCount = 1;
1049 hba->sli.sli4.VPIExtSize = hba->sli.sli4.VPICount;
1050 hba->sli.sli4.VPIBase[0] = mb->un.varRdConfig4.VPIBase;
1051
1052 hba->sli.sli4.VFICount = mb->un.varRdConfig4.VFICount;
1053 hba->sli.sli4.VFIExtCount = 1;
1054 hba->sli.sli4.VFIExtSize = hba->sli.sli4.VFICount;
1055 hba->sli.sli4.VFIBase[0] = mb->un.varRdConfig4.VFIBase;
1056
1057 hba->sli.sli4.FCFICount = mb->un.varRdConfig4.FCFICount;
1058
1059 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1060 "CONFIG: xri:%d rpi:%d vpi:%d vfi:%d fcfi:%d",
1061 hba->sli.sli4.XRICount,
1062 hba->sli.sli4.RPICount,
1063 hba->sli.sli4.VPICount,
1064 hba->sli.sli4.VFICount,
1065 hba->sli.sli4.FCFICount);
1066
1067 if ((hba->sli.sli4.XRICount == 0) ||
1068 (hba->sli.sli4.RPICount == 0) ||
1069 (hba->sli.sli4.VPICount == 0) ||
1070 (hba->sli.sli4.VFICount == 0) ||
1071 (hba->sli.sli4.FCFICount == 0)) {
1072 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1073 "Invalid extent value(s) - xri:%d rpi:%d vpi:%d "
1074 "vfi:%d fcfi:%d",
1075 hba->sli.sli4.XRICount,
1076 hba->sli.sli4.RPICount,
1077 hba->sli.sli4.VPICount,
1078 hba->sli.sli4.VFICount,
1079 hba->sli.sli4.FCFICount);
1080
1081 rval = EIO;
1082 goto failed1;
1083 }
1084
1085 if (mb->un.varRdConfig4.extents) {
1086 if (emlxs_sli4_init_extents(hba, mbq)) {
1087 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1088 "Unable to initialize extents.");
1089
1090 rval = EIO;
1091 goto failed1;
1092 }
1093 }
1094
1095 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1096 "CONFIG: port_name:%c %c %c %c",
1097 hba->sli.sli4.port_name[0],
1098 hba->sli.sli4.port_name[1],
1099 hba->sli.sli4.port_name[2],
1100 hba->sli.sli4.port_name[3]);
1101
1102 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1103 "CONFIG: ldv:%d link_type:%d link_number:%d",
1104 mb->un.varRdConfig4.ldv,
1105 mb->un.varRdConfig4.link_type,
1106 mb->un.varRdConfig4.link_number);
1107
1108 if (mb->un.varRdConfig4.ldv) {
1109 hba->sli.sli4.link_number = mb->un.varRdConfig4.link_number;
1110 } else {
1111 hba->sli.sli4.link_number = (uint32_t)-1;
1112 }
1113
1114 if (hba->sli.sli4.VPICount) {
1115 hba->vpi_max = min(hba->sli.sli4.VPICount, MAX_VPORTS) - 1;
1116 }
1117
1118 /* Set the max node count */
1119 if (cfg[CFG_NUM_NODES].current > 0) {
1120 hba->max_nodes =
1121 min(cfg[CFG_NUM_NODES].current,
1122 hba->sli.sli4.RPICount);
1123 } else {
1124 hba->max_nodes = hba->sli.sli4.RPICount;
1125 }
1126
1127 /* Set the io throttle */
1128 hba->io_throttle = hba->sli.sli4.XRICount - IO_THROTTLE_RESERVE;
1129
1130 /* Set max_iotag */
1131 /* We add 1 in case all XRI's are non-zero */
1132 hba->max_iotag = hba->sli.sli4.XRICount + 1;
1133
1134 if (cfg[CFG_NUM_IOTAGS].current) {
1135 hba->max_iotag = min(hba->max_iotag,
1136 (uint16_t)cfg[CFG_NUM_IOTAGS].current);
1137 }
1138
1139 /* Set out-of-range iotag base */
1140 hba->fc_oor_iotag = hba->max_iotag;
1141
1142 /* Save the link speed capabilities */
1143 vpd->link_speed = (uint16_t)mb->un.varRdConfig4.lmt;
1144 emlxs_process_link_speed(hba);
1145
1146 /*
1147 * Allocate some memory for buffers
1148 */
1149 if (emlxs_mem_alloc_buffer(hba) == 0) {
1150 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1151 "Unable to allocate memory buffers.");
1152
1153 rval = ENOMEM;
1154 goto failed1;
1155 }
1156
1157 if (emlxs_sli4_resource_alloc(hba)) {
1158 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1159 "Unable to allocate resources.");
1160
1161 rval = ENOMEM;
1162 goto failed2;
1163 }
1164 emlxs_data_dump(port, "XRIp", (uint32_t *)hba->sli.sli4.XRIp, 18, 0);
1165 emlxs_sli4_zero_queue_stat(hba);
1166
1167 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1168 if ((cfg[CFG_NPIV_ENABLE].current) && (hba->flag & FC_NPIV_ENABLED)) {
1169 hba->fca_tran->fca_num_npivports = hba->vpi_max;
1170 }
1171 #endif /* >= EMLXS_MODREV5 */
1172
1173 /* Reuse mbq from previous mbox */
1174 bzero(mbq, sizeof (MAILBOXQ));
1175
1176 if (emlxs_sli4_post_sgl_pages(hba, mbq)) {
1177 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1178 "Unable to post sgl pages.");
1179
1180 rval = EIO;
1181 goto failed3;
1182 }
1183
1184 /* Reuse mbq from previous mbox */
1185 bzero(mbq, sizeof (MAILBOXQ));
1186
1187 if (emlxs_sli4_post_hdr_tmplates(hba, mbq)) {
1188 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1189 "Unable to post header templates.");
1190
1191 rval = EIO;
1192 goto failed3;
1193 }
1194
1195 /*
1196 * Add our interrupt routine to kernel's interrupt chain & enable it
1197 * If MSI is enabled this will cause Solaris to program the MSI address
1198 * and data registers in PCI config space
1199 */
1200 if (EMLXS_INTR_ADD(hba) != DDI_SUCCESS) {
1201 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1202 "Unable to add interrupt(s).");
1203
1204 rval = EIO;
1205 goto failed3;
1206 }
1207
1208 /* Reuse mbq from previous mbox */
1209 bzero(mbq, sizeof (MAILBOXQ));
1210
1211 /* This MUST be done after EMLXS_INTR_ADD */
1212 if (emlxs_sli4_create_queues(hba, mbq)) {
1213 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1214 "Unable to create queues.");
1215
1216 rval = EIO;
1217 goto failed3;
1218 }
1219
1220 EMLXS_STATE_CHANGE(hba, FC_INIT_CFGPORT);
1221
1222 /* Get and save the current firmware version (based on sli_mode) */
1223 emlxs_decode_firmware_rev(hba, vpd);
1224
1225
1226 EMLXS_STATE_CHANGE(hba, FC_INIT_INITLINK);
1227
1228 if (SLI4_FC_MODE) {
1229 /* Reuse mbq from previous mbox */
1230 bzero(mbq, sizeof (MAILBOXQ));
1231
1232 emlxs_mb_config_link(hba, mbq);
1233 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1234 MBX_SUCCESS) {
1235 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1236 "Unable to configure link. Mailbox cmd=%x "
1237 "status=%x",
1238 mb->mbxCommand, mb->mbxStatus);
1239
1240 rval = EIO;
1241 goto failed3;
1242 }
1243 }
1244
1245 /* Reuse mbq from previous mbox */
1246 bzero(mbq, sizeof (MAILBOXQ));
1247
1248 /*
1249 * We need to get login parameters for NID
1250 */
1251 (void) emlxs_mb_read_sparam(hba, mbq);
1252 mp = (MATCHMAP *)mbq->bp;
1253 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1254 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1255 "Unable to read parameters. Mailbox cmd=%x status=%x",
1256 mb->mbxCommand, mb->mbxStatus);
1257
1258 rval = EIO;
1259 goto failed3;
1260 }
1261
1262 /* Free the buffer since we were polling */
1263 emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1264 mp = NULL;
1265
1266 /* If no serial number in VPD data, then use the WWPN */
1267 if (vpd->serial_num[0] == 0) {
1268 outptr = (uint8_t *)&hba->wwpn.IEEE[0];
1269 for (i = 0; i < 12; i++) {
1270 status = *outptr++;
1271 j = ((status & 0xf0) >> 4);
1272 if (j <= 9) {
1273 vpd->serial_num[i] =
1274 (char)((uint8_t)'0' + (uint8_t)j);
1275 } else {
1276 vpd->serial_num[i] =
1277 (char)((uint8_t)'A' + (uint8_t)(j - 10));
1278 }
1279
1280 i++;
1281 j = (status & 0xf);
1282 if (j <= 9) {
1283 vpd->serial_num[i] =
1284 (char)((uint8_t)'0' + (uint8_t)j);
1285 } else {
1286 vpd->serial_num[i] =
1287 (char)((uint8_t)'A' + (uint8_t)(j - 10));
1288 }
1289 }
1290
1291 /*
1292 * Set port number and port index to zero
1293 * The WWN's are unique to each port and therefore port_num
1294 * must equal zero. This effects the hba_fru_details structure
1295 * in fca_bind_port()
1296 */
1297 vpd->port_num[0] = 0;
1298 vpd->port_index = 0;
1299
1300 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1301 "CONFIG: WWPN: port_index=0");
1302 }
1303
1304 /* Make final attempt to set a port index */
1305 if (vpd->port_index == (uint32_t)-1) {
1306 dev_info_t *p_dip;
1307 dev_info_t *c_dip;
1308
1309 p_dip = ddi_get_parent(hba->dip);
1310 c_dip = ddi_get_child(p_dip);
1311
1312 vpd->port_index = 0;
1313 while (c_dip && (hba->dip != c_dip)) {
1314 c_dip = ddi_get_next_sibling(c_dip);
1315
1316 if (strcmp(ddi_get_name(c_dip), "ethernet") == 0) {
1317 continue;
1318 }
1319
1320 vpd->port_index++;
1321 }
1322
1323 EMLXS_MSGF(EMLXS_CONTEXT,
1324 &emlxs_init_debug_msg,
1325 "CONFIG: Device tree: port_index=%d",
1326 vpd->port_index);
1327 }
1328
1329 if (vpd->port_num[0] == 0) {
1330 if (hba->model_info.channels == EMLXS_MULTI_CHANNEL) {
1331 (void) snprintf(vpd->port_num,
1332 (sizeof (vpd->port_num)-1),
1333 "%d", vpd->port_index);
1334 }
1335 }
1336
1337 if (vpd->id[0] == 0) {
1338 (void) snprintf(vpd->id, (sizeof (vpd->id)-1),
1339 "%s %d",
1340 hba->model_info.model_desc, vpd->port_index);
1341
1342 }
1343
1344 if (vpd->manufacturer[0] == 0) {
1345 (void) strncpy(vpd->manufacturer, hba->model_info.manufacturer,
1346 (sizeof (vpd->manufacturer)-1));
1347 }
1348
1349 if (vpd->part_num[0] == 0) {
1350 (void) strncpy(vpd->part_num, hba->model_info.model,
1351 (sizeof (vpd->part_num)-1));
1352 }
1353
1354 if (vpd->model_desc[0] == 0) {
1355 (void) snprintf(vpd->model_desc, (sizeof (vpd->model_desc)-1),
1356 "%s %d",
1357 hba->model_info.model_desc, vpd->port_index);
1358 }
1359
1360 if (vpd->model[0] == 0) {
1361 (void) strncpy(vpd->model, hba->model_info.model,
1362 (sizeof (vpd->model)-1));
1363 }
1364
1365 if (vpd->prog_types[0] == 0) {
1366 emlxs_build_prog_types(hba, vpd);
1367 }
1368
1369 /* Create the symbolic names */
1370 (void) snprintf(hba->snn, (sizeof (hba->snn)-1),
1371 "%s %s FV%s DV%s %s",
1372 hba->model_info.manufacturer, hba->model_info.model,
1373 hba->vpd.fw_version, emlxs_version,
1374 (char *)utsname.nodename);
1375
1376 (void) snprintf(hba->spn, (sizeof (hba->spn)-1),
1377 "%s PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1378 hba->model_info.manufacturer,
1379 hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb,
1380 hba->wwpn.IEEE[0], hba->wwpn.IEEE[1], hba->wwpn.IEEE[2],
1381 hba->wwpn.IEEE[3], hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1382
1383
1384 EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN);
1385 emlxs_sli4_enable_intr(hba);
1386
1387 /* Check persist-linkdown */
1388 if (cfg[CFG_PERSIST_LINKDOWN].current) {
1389 EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN_PERSIST);
1390 goto done;
1391 }
1392
1393 #ifdef SFCT_SUPPORT
1394 if ((port->mode == MODE_TARGET) &&
1395 !(port->fct_flags & FCT_STATE_PORT_ONLINE)) {
1396 goto done;
1397 }
1398 #endif /* SFCT_SUPPORT */
1399
1400 /* Reuse mbq from previous mbox */
1401 bzero(mbq, sizeof (MAILBOXQ));
1402
1403 /*
1404 * Setup and issue mailbox INITIALIZE LINK command
1405 * At this point, the interrupt will be generated by the HW
1406 */
1407 emlxs_mb_init_link(hba, mbq,
1408 cfg[CFG_TOPOLOGY].current, cfg[CFG_LINK_SPEED].current);
1409
1410 rval = emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
1411 if ((rval != MBX_SUCCESS) && (rval != MBX_BUSY)) {
1412 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1413 "Unable to initialize link. "
1414 "Mailbox cmd=%x status=%x",
1415 mb->mbxCommand, mb->mbxStatus);
1416
1417 rval = EIO;
1418 goto failed3;
1419 }
1420
1421 /* Wait for link to come up */
1422 i = cfg[CFG_LINKUP_DELAY].current;
1423 while (i && (hba->state < FC_LINK_UP)) {
1424 /* Check for hardware error */
1425 if (hba->state == FC_ERROR) {
1426 EMLXS_MSGF(EMLXS_CONTEXT,
1427 &emlxs_init_failed_msg,
1428 "Adapter error.", mb->mbxCommand,
1429 mb->mbxStatus);
1430
1431 rval = EIO;
1432 goto failed3;
1433 }
1434
1435 BUSYWAIT_MS(1000);
1436 i--;
1437 }
1438
1439 done:
1440 /*
1441 * The leadville driver will now handle the FLOGI at the driver level
1442 */
1443
1444 if (mbq) {
1445 (void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1446 mbq = NULL;
1447 mb = NULL;
1448 }
1449
1450 if (hba->model_info.flags & EMLXS_GPIO_LEDS)
1451 emlxs_sli4_gpio_timer_start(hba);
1452
1453 return (0);
1454
1455 failed3:
1456 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1457
1458 if (mp) {
1459 emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1460 mp = NULL;
1461 }
1462
1463
1464 if (hba->intr_flags & EMLXS_MSI_ADDED) {
1465 (void) EMLXS_INTR_REMOVE(hba);
1466 }
1467
1468 emlxs_sli4_resource_free(hba);
1469
1470 failed2:
1471 (void) emlxs_mem_free_buffer(hba);
1472
1473 failed1:
1474 if (mbq) {
1475 (void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1476 mbq = NULL;
1477 mb = NULL;
1478 }
1479
1480 if (hba->sli.sli4.dump_region.virt) {
1481 (void) emlxs_mem_free(hba, &hba->sli.sli4.dump_region);
1482 }
1483
1484 if (rval == 0) {
1485 rval = EIO;
1486 }
1487
1488 return (rval);
1489
1490 } /* emlxs_sli4_online() */
1491
1492
1493 static void
1494 emlxs_sli4_offline(emlxs_hba_t *hba, uint32_t reset_requested)
1495 {
1496 /* Reverse emlxs_sli4_online */
1497
1498 if (hba->model_info.flags & EMLXS_GPIO_LEDS)
1499 emlxs_sli4_gpio_timer_stop(hba);
1500
1501 mutex_enter(&EMLXS_PORT_LOCK);
1502 if (hba->flag & FC_INTERLOCKED) {
1503 mutex_exit(&EMLXS_PORT_LOCK);
1504 goto killed;
1505 }
1506 mutex_exit(&EMLXS_PORT_LOCK);
1507
1508 if (reset_requested) {
1509 (void) emlxs_sli4_hba_reset(hba, 0, 0, 0);
1510 }
1511
1512 /* Shutdown the adapter interface */
1513 emlxs_sli4_hba_kill(hba);
1514
1515 killed:
1516
1517 /* Free SLI shared memory */
1518 emlxs_sli4_resource_free(hba);
1519
1520 /* Free driver shared memory */
1521 (void) emlxs_mem_free_buffer(hba);
1522
1523 /* Free the host dump region buffer */
1524 (void) emlxs_mem_free(hba, &hba->sli.sli4.dump_region);
1525
1526 } /* emlxs_sli4_offline() */
1527
1528
1529 /*ARGSUSED*/
1530 static int
1531 emlxs_sli4_map_hdw(emlxs_hba_t *hba)
1532 {
1533 emlxs_port_t *port = &PPORT;
1534 dev_info_t *dip;
1535 ddi_device_acc_attr_t dev_attr;
1536 int status;
1537
1538 dip = (dev_info_t *)hba->dip;
1539 dev_attr = emlxs_dev_acc_attr;
1540
1541 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1542 case SLI_INTF_IF_TYPE_0:
1543
1544 /* Map in Hardware BAR pages that will be used for */
1545 /* communication with HBA. */
1546 if (hba->sli.sli4.bar1_acc_handle == 0) {
1547 status = ddi_regs_map_setup(dip, PCI_BAR1_RINDEX,
1548 (caddr_t *)&hba->sli.sli4.bar1_addr,
1549 0, 0, &dev_attr, &hba->sli.sli4.bar1_acc_handle);
1550 if (status != DDI_SUCCESS) {
1551 EMLXS_MSGF(EMLXS_CONTEXT,
1552 &emlxs_attach_failed_msg,
1553 "(PCI) ddi_regs_map_setup BAR1 failed. "
1554 "stat=%d mem=%p attr=%p hdl=%p",
1555 status, &hba->sli.sli4.bar1_addr, &dev_attr,
1556 &hba->sli.sli4.bar1_acc_handle);
1557 goto failed;
1558 }
1559 }
1560
1561 if (hba->sli.sli4.bar2_acc_handle == 0) {
1562 status = ddi_regs_map_setup(dip, PCI_BAR2_RINDEX,
1563 (caddr_t *)&hba->sli.sli4.bar2_addr,
1564 0, 0, &dev_attr, &hba->sli.sli4.bar2_acc_handle);
1565 if (status != DDI_SUCCESS) {
1566 EMLXS_MSGF(EMLXS_CONTEXT,
1567 &emlxs_attach_failed_msg,
1568 "ddi_regs_map_setup BAR2 failed. status=%x",
1569 status);
1570 goto failed;
1571 }
1572 }
1573
1574 /* offset from beginning of register space */
1575 hba->sli.sli4.MPUEPSemaphore_reg_addr =
1576 (uint32_t *)(hba->sli.sli4.bar1_addr +
1577 CSR_MPU_EP_SEMAPHORE_OFFSET);
1578 hba->sli.sli4.MBDB_reg_addr =
1579 (uint32_t *)(hba->sli.sli4.bar2_addr + PD_MB_DB_OFFSET);
1580 hba->sli.sli4.CQDB_reg_addr =
1581 (uint32_t *)(hba->sli.sli4.bar2_addr + PD_CQ_DB_OFFSET);
1582 hba->sli.sli4.MQDB_reg_addr =
1583 (uint32_t *)(hba->sli.sli4.bar2_addr + PD_MQ_DB_OFFSET);
1584 hba->sli.sli4.WQDB_reg_addr =
1585 (uint32_t *)(hba->sli.sli4.bar2_addr + PD_WQ_DB_OFFSET);
1586 hba->sli.sli4.RQDB_reg_addr =
1587 (uint32_t *)(hba->sli.sli4.bar2_addr + PD_RQ_DB_OFFSET);
1588
1589 hba->sli.sli4.STATUS_reg_addr = 0;
1590 hba->sli.sli4.CNTL_reg_addr = 0;
1591
1592 hba->sli.sli4.ERR1_reg_addr =
1593 (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_LO_OFFSET);
1594 hba->sli.sli4.ERR2_reg_addr =
1595 (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_HI_OFFSET);
1596
1597 hba->sli.sli4.PHYSDEV_reg_addr = 0;
1598 break;
1599
1600 case SLI_INTF_IF_TYPE_2:
1601
1602 /* Map in Hardware BAR pages that will be used for */
1603 /* communication with HBA. */
1604 if (hba->sli.sli4.bar0_acc_handle == 0) {
1605 status = ddi_regs_map_setup(dip, PCI_BAR0_RINDEX,
1606 (caddr_t *)&hba->sli.sli4.bar0_addr,
1607 0, 0, &dev_attr, &hba->sli.sli4.bar0_acc_handle);
1608 if (status != DDI_SUCCESS) {
1609 EMLXS_MSGF(EMLXS_CONTEXT,
1610 &emlxs_attach_failed_msg,
1611 "(PCI) ddi_regs_map_setup BAR0 failed. "
1612 "stat=%d mem=%p attr=%p hdl=%p",
1613 status, &hba->sli.sli4.bar0_addr, &dev_attr,
1614 &hba->sli.sli4.bar0_acc_handle);
1615 goto failed;
1616 }
1617 }
1618
1619 /* offset from beginning of register space */
1620 hba->sli.sli4.MPUEPSemaphore_reg_addr =
1621 (uint32_t *)(hba->sli.sli4.bar0_addr +
1622 SLIPORT_SEMAPHORE_OFFSET);
1623 hba->sli.sli4.MBDB_reg_addr =
1624 (uint32_t *)(hba->sli.sli4.bar0_addr + PD_MB_DB_OFFSET);
1625 hba->sli.sli4.CQDB_reg_addr =
1626 (uint32_t *)(hba->sli.sli4.bar0_addr + PD_CQ_DB_OFFSET);
1627 hba->sli.sli4.MQDB_reg_addr =
1628 (uint32_t *)(hba->sli.sli4.bar0_addr + PD_MQ_DB_OFFSET);
1629 hba->sli.sli4.WQDB_reg_addr =
1630 (uint32_t *)(hba->sli.sli4.bar0_addr + PD_WQ_DB_OFFSET);
1631 hba->sli.sli4.RQDB_reg_addr =
1632 (uint32_t *)(hba->sli.sli4.bar0_addr + PD_RQ_DB_OFFSET);
1633
1634 hba->sli.sli4.STATUS_reg_addr =
1635 (uint32_t *)(hba->sli.sli4.bar0_addr +
1636 SLIPORT_STATUS_OFFSET);
1637 hba->sli.sli4.CNTL_reg_addr =
1638 (uint32_t *)(hba->sli.sli4.bar0_addr +
1639 SLIPORT_CONTROL_OFFSET);
1640 hba->sli.sli4.ERR1_reg_addr =
1641 (uint32_t *)(hba->sli.sli4.bar0_addr +
1642 SLIPORT_ERROR1_OFFSET);
1643 hba->sli.sli4.ERR2_reg_addr =
1644 (uint32_t *)(hba->sli.sli4.bar0_addr +
1645 SLIPORT_ERROR2_OFFSET);
1646 hba->sli.sli4.PHYSDEV_reg_addr =
1647 (uint32_t *)(hba->sli.sli4.bar0_addr +
1648 PHYSDEV_CONTROL_OFFSET);
1649
1650 break;
1651
1652 case SLI_INTF_IF_TYPE_1:
1653 case SLI_INTF_IF_TYPE_3:
1654 default:
1655 EMLXS_MSGF(EMLXS_CONTEXT,
1656 &emlxs_attach_failed_msg,
1657 "Map hdw: Unsupported if_type %08x",
1658 (hba->sli_intf & SLI_INTF_IF_TYPE_MASK));
1659
1660 goto failed;
1661 }
1662
1663 if (hba->sli.sli4.bootstrapmb.virt == 0) {
1664 MBUF_INFO *buf_info;
1665 MBUF_INFO bufinfo;
1666
1667 buf_info = &bufinfo;
1668
1669 bzero(buf_info, sizeof (MBUF_INFO));
1670 buf_info->size = EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE;
1671 buf_info->flags =
1672 FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
1673 buf_info->align = ddi_ptob(dip, 1L);
1674
1675 (void) emlxs_mem_alloc(hba, buf_info);
1676
1677 if (buf_info->virt == NULL) {
1678 goto failed;
1679 }
1680
1681 hba->sli.sli4.bootstrapmb.virt = buf_info->virt;
1682 hba->sli.sli4.bootstrapmb.phys = buf_info->phys;
1683 hba->sli.sli4.bootstrapmb.size = EMLXS_BOOTSTRAP_MB_SIZE +
1684 MBOX_EXTENSION_SIZE;
1685 hba->sli.sli4.bootstrapmb.data_handle = buf_info->data_handle;
1686 hba->sli.sli4.bootstrapmb.dma_handle = buf_info->dma_handle;
1687 bzero((char *)hba->sli.sli4.bootstrapmb.virt,
1688 EMLXS_BOOTSTRAP_MB_SIZE);
1689 }
1690
1691 hba->chan_count = MAX_CHANNEL;
1692
1693 return (0);
1694
1695 failed:
1696
1697 emlxs_sli4_unmap_hdw(hba);
1698 return (ENOMEM);
1699
1700
1701 } /* emlxs_sli4_map_hdw() */
1702
1703
1704 /*ARGSUSED*/
1705 static void
1706 emlxs_sli4_unmap_hdw(emlxs_hba_t *hba)
1707 {
1708 MBUF_INFO bufinfo;
1709 MBUF_INFO *buf_info = &bufinfo;
1710
1711
1712 if (hba->sli.sli4.bar0_acc_handle) {
1713 ddi_regs_map_free(&hba->sli.sli4.bar0_acc_handle);
1714 hba->sli.sli4.bar0_acc_handle = 0;
1715 }
1716
1717 if (hba->sli.sli4.bar1_acc_handle) {
1718 ddi_regs_map_free(&hba->sli.sli4.bar1_acc_handle);
1719 hba->sli.sli4.bar1_acc_handle = 0;
1720 }
1721
1722 if (hba->sli.sli4.bar2_acc_handle) {
1723 ddi_regs_map_free(&hba->sli.sli4.bar2_acc_handle);
1724 hba->sli.sli4.bar2_acc_handle = 0;
1725 }
1726
1727 if (hba->sli.sli4.bootstrapmb.virt) {
1728 bzero(buf_info, sizeof (MBUF_INFO));
1729
1730 if (hba->sli.sli4.bootstrapmb.phys) {
1731 buf_info->phys = hba->sli.sli4.bootstrapmb.phys;
1732 buf_info->data_handle =
1733 hba->sli.sli4.bootstrapmb.data_handle;
1734 buf_info->dma_handle =
1735 hba->sli.sli4.bootstrapmb.dma_handle;
1736 buf_info->flags = FC_MBUF_DMA;
1737 }
1738
1739 buf_info->virt = hba->sli.sli4.bootstrapmb.virt;
1740 buf_info->size = hba->sli.sli4.bootstrapmb.size;
1741 emlxs_mem_free(hba, buf_info);
1742
1743 hba->sli.sli4.bootstrapmb.virt = NULL;
1744 }
1745
1746 return;
1747
1748 } /* emlxs_sli4_unmap_hdw() */
1749
1750
1751 static int
1752 emlxs_check_hdw_ready(emlxs_hba_t *hba)
1753 {
1754 emlxs_port_t *port = &PPORT;
1755 uint32_t status;
1756 uint32_t i = 0;
1757 uint32_t err1;
1758 uint32_t err2;
1759
1760 /* Wait for reset completion */
1761 while (i < 30) {
1762
1763 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1764 case SLI_INTF_IF_TYPE_0:
1765 status = emlxs_sli4_read_sema(hba);
1766
1767 /* Check to see if any errors occurred during init */
1768 if (status & ARM_POST_FATAL) {
1769 EMLXS_MSGF(EMLXS_CONTEXT,
1770 &emlxs_reset_failed_msg,
1771 "SEMA Error: status=%x", status);
1772
1773 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1774
1775 return (1);
1776 }
1777
1778 if ((status & ARM_UNRECOVERABLE_ERROR) ==
1779 ARM_UNRECOVERABLE_ERROR) {
1780 EMLXS_MSGF(EMLXS_CONTEXT,
1781 &emlxs_reset_failed_msg,
1782 "Unrecoverable Error: status=%x", status);
1783
1784 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1785
1786 return (1);
1787 }
1788
1789 if ((status & ARM_POST_MASK) == ARM_POST_READY) {
1790 /* ARM Ready !! */
1791 EMLXS_MSGF(EMLXS_CONTEXT,
1792 &emlxs_sli_detail_msg,
1793 "ARM Ready: status=%x", status);
1794
1795 return (0);
1796 }
1797 break;
1798
1799 case SLI_INTF_IF_TYPE_2:
1800 status = emlxs_sli4_read_status(hba);
1801
1802 if (status & SLI_STATUS_READY) {
1803 if (!(status & SLI_STATUS_ERROR)) {
1804 /* ARM Ready !! */
1805 EMLXS_MSGF(EMLXS_CONTEXT,
1806 &emlxs_sli_detail_msg,
1807 "ARM Ready: status=%x", status);
1808
1809 return (0);
1810 }
1811
1812 err1 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1813 hba->sli.sli4.ERR1_reg_addr);
1814 err2 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1815 hba->sli.sli4.ERR2_reg_addr);
1816
1817 if (status & SLI_STATUS_RESET_NEEDED) {
1818 EMLXS_MSGF(EMLXS_CONTEXT,
1819 &emlxs_sli_detail_msg,
1820 "ARM Ready (Reset Needed): "
1821 "status=%x err1=%x "
1822 "err2=%x",
1823 status, err1, err2);
1824
1825 return (1);
1826 }
1827
1828 EMLXS_MSGF(EMLXS_CONTEXT,
1829 &emlxs_reset_failed_msg,
1830 "Unrecoverable Error: status=%x err1=%x "
1831 "err2=%x",
1832 status, err1, err2);
1833
1834 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1835
1836 return (2);
1837 }
1838
1839 break;
1840
1841 default:
1842 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1843
1844 return (3);
1845 }
1846
1847 BUSYWAIT_MS(1000);
1848 i++;
1849 }
1850
1851 /* Timeout occurred */
1852 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1853 case SLI_INTF_IF_TYPE_0:
1854 err1 = ddi_get32(hba->pci_acc_handle,
1855 hba->sli.sli4.ERR1_reg_addr);
1856 err2 = ddi_get32(hba->pci_acc_handle,
1857 hba->sli.sli4.ERR2_reg_addr);
1858 break;
1859
1860 default:
1861 err1 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1862 hba->sli.sli4.ERR1_reg_addr);
1863 err2 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1864 hba->sli.sli4.ERR2_reg_addr);
1865 break;
1866 }
1867
1868 if (status & SLI_STATUS_ERROR) {
1869 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1870 "Ready Timeout: Port Error: status=%x err1=%x err2=%x",
1871 status, err1, err2);
1872 } else {
1873 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1874 "Ready Timeout: status=%x err1=%x err2=%x",
1875 status, err1, err2);
1876 }
1877
1878 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1879
1880 return (3);
1881
1882 } /* emlxs_check_hdw_ready() */
1883
1884
1885 static uint32_t
1886 emlxs_sli4_read_status(emlxs_hba_t *hba)
1887 {
1888 #ifdef FMA_SUPPORT
1889 emlxs_port_t *port = &PPORT;
1890 #endif /* FMA_SUPPORT */
1891 uint32_t status;
1892
1893 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1894 case SLI_INTF_IF_TYPE_2:
1895 status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1896 hba->sli.sli4.STATUS_reg_addr);
1897 #ifdef FMA_SUPPORT
1898 /* Access handle validation */
1899 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar0_acc_handle);
1900 #endif /* FMA_SUPPORT */
1901 break;
1902 default:
1903 status = 0;
1904 break;
1905 }
1906
1907 return (status);
1908
1909 } /* emlxs_sli4_read_status() */
1910
1911
1912 static uint32_t
1913 emlxs_sli4_read_sema(emlxs_hba_t *hba)
1914 {
1915 #ifdef FMA_SUPPORT
1916 emlxs_port_t *port = &PPORT;
1917 #endif /* FMA_SUPPORT */
1918 uint32_t status;
1919
1920 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1921 case SLI_INTF_IF_TYPE_0:
1922 status = ddi_get32(hba->sli.sli4.bar1_acc_handle,
1923 hba->sli.sli4.MPUEPSemaphore_reg_addr);
1924 #ifdef FMA_SUPPORT
1925 /* Access handle validation */
1926 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar1_acc_handle);
1927 #endif /* FMA_SUPPORT */
1928 break;
1929
1930 case SLI_INTF_IF_TYPE_2:
1931 status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1932 hba->sli.sli4.MPUEPSemaphore_reg_addr);
1933 #ifdef FMA_SUPPORT
1934 /* Access handle validation */
1935 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar0_acc_handle);
1936 #endif /* FMA_SUPPORT */
1937 break;
1938 default:
1939 status = 0;
1940 break;
1941 }
1942
1943 return (status);
1944
1945 } /* emlxs_sli4_read_sema() */
1946
1947
1948 static uint32_t
1949 emlxs_sli4_read_mbdb(emlxs_hba_t *hba)
1950 {
1951 #ifdef FMA_SUPPORT
1952 emlxs_port_t *port = &PPORT;
1953 #endif /* FMA_SUPPORT */
1954 uint32_t status;
1955
1956 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1957 case SLI_INTF_IF_TYPE_0:
1958 status = ddi_get32(hba->sli.sli4.bar2_acc_handle,
1959 hba->sli.sli4.MBDB_reg_addr);
1960
1961 #ifdef FMA_SUPPORT
1962 /* Access handle validation */
1963 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar2_acc_handle);
1964 #endif /* FMA_SUPPORT */
1965 break;
1966
1967 case SLI_INTF_IF_TYPE_2:
1968 status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1969 hba->sli.sli4.MBDB_reg_addr);
1970 #ifdef FMA_SUPPORT
1971 /* Access handle validation */
1972 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar0_acc_handle);
1973 #endif /* FMA_SUPPORT */
1974 break;
1975 default:
1976 status = 0;
1977 break;
1978 }
1979
1980 return (status);
1981
1982 } /* emlxs_sli4_read_mbdb() */
1983
1984
1985 static void
1986 emlxs_sli4_write_mbdb(emlxs_hba_t *hba, uint32_t value)
1987 {
1988 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1989 case SLI_INTF_IF_TYPE_0:
1990 ddi_put32(hba->sli.sli4.bar2_acc_handle,
1991 hba->sli.sli4.MBDB_reg_addr, value);
1992 break;
1993
1994 case SLI_INTF_IF_TYPE_2:
1995 ddi_put32(hba->sli.sli4.bar0_acc_handle,
1996 hba->sli.sli4.MBDB_reg_addr, value);
1997 break;
1998 }
1999
2000 } /* emlxs_sli4_write_mbdb() */
2001
2002
2003 static void
2004 emlxs_sli4_write_cqdb(emlxs_hba_t *hba, uint32_t value)
2005 {
2006 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2007 case SLI_INTF_IF_TYPE_0:
2008 ddi_put32(hba->sli.sli4.bar2_acc_handle,
2009 hba->sli.sli4.CQDB_reg_addr, value);
2010 break;
2011
2012 case SLI_INTF_IF_TYPE_2:
2013 ddi_put32(hba->sli.sli4.bar0_acc_handle,
2014 hba->sli.sli4.CQDB_reg_addr, value);
2015 break;
2016 }
2017
2018 } /* emlxs_sli4_write_cqdb() */
2019
2020
2021 static void
2022 emlxs_sli4_write_rqdb(emlxs_hba_t *hba, uint32_t value)
2023 {
2024 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2025 case SLI_INTF_IF_TYPE_0:
2026 ddi_put32(hba->sli.sli4.bar2_acc_handle,
2027 hba->sli.sli4.RQDB_reg_addr, value);
2028 break;
2029
2030 case SLI_INTF_IF_TYPE_2:
2031 ddi_put32(hba->sli.sli4.bar0_acc_handle,
2032 hba->sli.sli4.RQDB_reg_addr, value);
2033 break;
2034 }
2035
2036 } /* emlxs_sli4_write_rqdb() */
2037
2038
2039 static void
2040 emlxs_sli4_write_mqdb(emlxs_hba_t *hba, uint32_t value)
2041 {
2042 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2043 case SLI_INTF_IF_TYPE_0:
2044 ddi_put32(hba->sli.sli4.bar2_acc_handle,
2045 hba->sli.sli4.MQDB_reg_addr, value);
2046 break;
2047
2048 case SLI_INTF_IF_TYPE_2:
2049 ddi_put32(hba->sli.sli4.bar0_acc_handle,
2050 hba->sli.sli4.MQDB_reg_addr, value);
2051 break;
2052 }
2053
2054 } /* emlxs_sli4_write_mqdb() */
2055
2056
2057 static void
2058 emlxs_sli4_write_wqdb(emlxs_hba_t *hba, uint32_t value)
2059 {
2060 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2061 case SLI_INTF_IF_TYPE_0:
2062 ddi_put32(hba->sli.sli4.bar2_acc_handle,
2063 hba->sli.sli4.WQDB_reg_addr, value);
2064 break;
2065
2066 case SLI_INTF_IF_TYPE_2:
2067 ddi_put32(hba->sli.sli4.bar0_acc_handle,
2068 hba->sli.sli4.WQDB_reg_addr, value);
2069 break;
2070 }
2071
2072 } /* emlxs_sli4_write_wqdb() */
2073
2074
2075 static uint32_t
2076 emlxs_check_bootstrap_ready(emlxs_hba_t *hba, uint32_t tmo)
2077 {
2078 emlxs_port_t *port = &PPORT;
2079 uint32_t status = 0;
2080 uint32_t err1;
2081 uint32_t err2;
2082
2083 /* Wait for reset completion, tmo is in 10ms ticks */
2084 while (tmo) {
2085 status = emlxs_sli4_read_mbdb(hba);
2086
2087 /* Check to see if any errors occurred during init */
2088 if (status & BMBX_READY) {
2089 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2090 "BMBX Ready: status=0x%x", status);
2091
2092 return (tmo);
2093 }
2094
2095 BUSYWAIT_MS(10);
2096 tmo--;
2097 }
2098
2099 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2100 case SLI_INTF_IF_TYPE_0:
2101 err1 = ddi_get32(hba->pci_acc_handle,
2102 hba->sli.sli4.ERR1_reg_addr);
2103 err2 = ddi_get32(hba->pci_acc_handle,
2104 hba->sli.sli4.ERR2_reg_addr);
2105 break;
2106
2107 default:
2108 err1 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2109 hba->sli.sli4.ERR1_reg_addr);
2110 err2 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2111 hba->sli.sli4.ERR2_reg_addr);
2112 break;
2113 }
2114
2115 /* Timeout occurred */
2116 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2117 "Timeout waiting for BMailbox: status=%x err1=%x err2=%x",
2118 status, err1, err2);
2119
2120 EMLXS_STATE_CHANGE(hba, FC_ERROR);
2121
2122 return (0);
2123
2124 } /* emlxs_check_bootstrap_ready() */
2125
2126
2127 static uint32_t
2128 emlxs_issue_bootstrap_mb(emlxs_hba_t *hba, uint32_t tmo)
2129 {
2130 emlxs_port_t *port = &PPORT;
2131 uint32_t *iptr;
2132 uint32_t addr30;
2133
2134 /*
2135 * This routine assumes the bootstrap mbox is loaded
2136 * with the mailbox command to be executed.
2137 *
2138 * First, load the high 30 bits of bootstrap mailbox
2139 */
2140 addr30 = (uint32_t)((hba->sli.sli4.bootstrapmb.phys>>32) & 0xfffffffc);
2141 addr30 |= BMBX_ADDR_HI;
2142 emlxs_sli4_write_mbdb(hba, addr30);
2143
2144 tmo = emlxs_check_bootstrap_ready(hba, tmo);
2145 if (tmo == 0) {
2146 return (0);
2147 }
2148
2149 /* Load the low 30 bits of bootstrap mailbox */
2150 addr30 = (uint32_t)((hba->sli.sli4.bootstrapmb.phys>>2) & 0xfffffffc);
2151 emlxs_sli4_write_mbdb(hba, addr30);
2152
2153 tmo = emlxs_check_bootstrap_ready(hba, tmo);
2154 if (tmo == 0) {
2155 return (0);
2156 }
2157
2158 iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2159
2160 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2161 "BootstrapMB: %p Completed %08x %08x %08x",
2162 hba->sli.sli4.bootstrapmb.virt,
2163 *iptr, *(iptr+1), *(iptr+2));
2164
2165 return (tmo);
2166
2167 } /* emlxs_issue_bootstrap_mb() */
2168
2169
2170 static int
2171 emlxs_init_bootstrap_mb(emlxs_hba_t *hba)
2172 {
2173 #ifdef FMA_SUPPORT
2174 emlxs_port_t *port = &PPORT;
2175 #endif /* FMA_SUPPORT */
2176 uint32_t *iptr;
2177 uint32_t tmo;
2178
2179 if (emlxs_check_hdw_ready(hba)) {
2180 return (1);
2181 }
2182
2183 if (hba->flag & FC_BOOTSTRAPMB_INIT) {
2184 return (0); /* Already initialized */
2185 }
2186
2187 /* NOTE: tmo is in 10ms ticks */
2188 tmo = emlxs_check_bootstrap_ready(hba, 3000);
2189 if (tmo == 0) {
2190 return (1);
2191 }
2192
2193 /* Issue FW_INITIALIZE command */
2194
2195 /* Special words to initialize bootstrap mbox MUST be little endian */
2196 iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2197 *iptr = LE_SWAP32(FW_INITIALIZE_WORD0);
2198 *(iptr+1) = LE_SWAP32(FW_INITIALIZE_WORD1);
2199
2200 EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2201 MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV);
2202
2203 emlxs_data_dump(port, "FW_INIT", (uint32_t *)iptr, 6, 0);
2204 if (!emlxs_issue_bootstrap_mb(hba, tmo)) {
2205 return (1);
2206 }
2207
2208 #ifdef FMA_SUPPORT
2209 if (emlxs_fm_check_dma_handle(hba, hba->sli.sli4.bootstrapmb.dma_handle)
2210 != DDI_FM_OK) {
2211 EMLXS_MSGF(EMLXS_CONTEXT,
2212 &emlxs_invalid_dma_handle_msg,
2213 "init_bootstrap_mb: hdl=%p",
2214 hba->sli.sli4.bootstrapmb.dma_handle);
2215 return (1);
2216 }
2217 #endif
2218 hba->flag |= FC_BOOTSTRAPMB_INIT;
2219 return (0);
2220
2221 } /* emlxs_init_bootstrap_mb() */
2222
2223
2224
2225
2226 static uint32_t
2227 emlxs_sli4_hba_init(emlxs_hba_t *hba)
2228 {
2229 int rc;
2230 uint16_t i;
2231 emlxs_port_t *vport;
2232 emlxs_config_t *cfg = &CFG;
2233 CHANNEL *cp;
2234 VPIobj_t *vpip;
2235
2236 /* Restart the adapter */
2237 if (emlxs_sli4_hba_reset(hba, 1, 0, 0)) {
2238 return (1);
2239 }
2240
2241 for (i = 0; i < hba->chan_count; i++) {
2242 cp = &hba->chan[i];
2243 cp->iopath = (void *)&hba->sli.sli4.wq[i];
2244 }
2245
2246 /* Initialize all the port objects */
2247 hba->vpi_max = 0;
2248 for (i = 0; i < MAX_VPORTS; i++) {
2249 vport = &VPORT(i);
2250 vport->hba = hba;
2251 vport->vpi = i;
2252
2253 vpip = &vport->VPIobj;
2254 vpip->index = i;
2255 vpip->VPI = i;
2256 vpip->port = vport;
2257 vpip->state = VPI_STATE_OFFLINE;
2258 vport->vpip = vpip;
2259 }
2260
2261 /* Set the max node count */
2262 if (hba->max_nodes == 0) {
2263 if (cfg[CFG_NUM_NODES].current > 0) {
2264 hba->max_nodes = cfg[CFG_NUM_NODES].current;
2265 } else {
2266 hba->max_nodes = 4096;
2267 }
2268 }
2269
2270 rc = emlxs_init_bootstrap_mb(hba);
2271 if (rc) {
2272 return (rc);
2273 }
2274
2275 hba->sli.sli4.cfgFCOE.FCMap[0] = FCOE_FCF_MAP0;
2276 hba->sli.sli4.cfgFCOE.FCMap[1] = FCOE_FCF_MAP1;
2277 hba->sli.sli4.cfgFCOE.FCMap[2] = FCOE_FCF_MAP2;
2278
2279 if ((hba->sli_intf & SLI_INTF_IF_TYPE_MASK) == SLI_INTF_IF_TYPE_0) {
2280 /* Cache the UE MASK registers value for UE error detection */
2281 hba->sli.sli4.ue_mask_lo = ddi_get32(hba->pci_acc_handle,
2282 (uint32_t *)(hba->pci_addr + PCICFG_UE_MASK_LO_OFFSET));
2283 hba->sli.sli4.ue_mask_hi = ddi_get32(hba->pci_acc_handle,
2284 (uint32_t *)(hba->pci_addr + PCICFG_UE_MASK_HI_OFFSET));
2285 }
2286
2287 return (0);
2288
2289 } /* emlxs_sli4_hba_init() */
2290
2291
2292 /*ARGSUSED*/
2293 static uint32_t
2294 emlxs_sli4_hba_reset(emlxs_hba_t *hba, uint32_t restart, uint32_t skip_post,
2295 uint32_t quiesce)
2296 {
2297 emlxs_port_t *port = &PPORT;
2298 emlxs_port_t *vport;
2299 CHANNEL *cp;
2300 emlxs_config_t *cfg = &CFG;
2301 MAILBOXQ mboxq;
2302 uint32_t value;
2303 uint32_t i;
2304 uint32_t rc;
2305 uint16_t channelno;
2306 uint32_t status;
2307 uint32_t err1;
2308 uint32_t err2;
2309 uint8_t generate_event = 0;
2310
2311 if (!cfg[CFG_RESET_ENABLE].current) {
2312 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2313 "Adapter reset disabled.");
2314 EMLXS_STATE_CHANGE(hba, FC_ERROR);
2315
2316 return (1);
2317 }
2318
2319 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2320 case SLI_INTF_IF_TYPE_0:
2321 if (quiesce == 0) {
2322 emlxs_sli4_hba_kill(hba);
2323
2324 /*
2325 * Initalize Hardware that will be used to bring
2326 * SLI4 online.
2327 */
2328 rc = emlxs_init_bootstrap_mb(hba);
2329 if (rc) {
2330 return (rc);
2331 }
2332 }
2333
2334 bzero((void *)&mboxq, sizeof (MAILBOXQ));
2335 emlxs_mb_resetport(hba, &mboxq);
2336
2337 if (quiesce == 0) {
2338 if (emlxs_sli4_issue_mbox_cmd(hba, &mboxq,
2339 MBX_POLL, 0) != MBX_SUCCESS) {
2340 /* Timeout occurred */
2341 EMLXS_MSGF(EMLXS_CONTEXT,
2342 &emlxs_reset_failed_msg,
2343 "Timeout: RESET");
2344 EMLXS_STATE_CHANGE(hba, FC_ERROR);
2345 /* Log a dump event - not supported */
2346 return (1);
2347 }
2348 } else {
2349 if (emlxs_sli4_issue_mbox_cmd4quiesce(hba, &mboxq,
2350 MBX_POLL, 0) != MBX_SUCCESS) {
2351 EMLXS_STATE_CHANGE(hba, FC_ERROR);
2352 /* Log a dump event - not supported */
2353 return (1);
2354 }
2355 }
2356 emlxs_data_dump(port, "resetPort", (uint32_t *)&mboxq, 12, 0);
2357 break;
2358
2359 case SLI_INTF_IF_TYPE_2:
2360 if (quiesce == 0) {
2361 emlxs_sli4_hba_kill(hba);
2362 }
2363
2364 rc = emlxs_check_hdw_ready(hba);
2365 if (rc > 1) {
2366 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
2367 "Adapter not ready for reset.");
2368 return (1);
2369 }
2370
2371 if (rc == 1) {
2372 err1 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2373 hba->sli.sli4.ERR1_reg_addr);
2374 err2 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2375 hba->sli.sli4.ERR2_reg_addr);
2376
2377 /* Don't generate an event if dump was forced */
2378 if ((err1 != 0x2) || (err2 != 0x2)) {
2379 generate_event = 1;
2380 }
2381 }
2382
2383 /* Reset the port now */
2384
2385 mutex_enter(&EMLXS_PORT_LOCK);
2386 value = SLI_CNTL_INIT_PORT;
2387
2388 ddi_put32(hba->sli.sli4.bar0_acc_handle,
2389 hba->sli.sli4.CNTL_reg_addr, value);
2390 mutex_exit(&EMLXS_PORT_LOCK);
2391
2392 break;
2393 }
2394
2395 /* Reset the hba structure */
2396 hba->flag &= FC_RESET_MASK;
2397
2398 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2399 cp = &hba->chan[channelno];
2400 cp->hba = hba;
2401 cp->channelno = channelno;
2402 }
2403
2404 hba->channel_tx_count = 0;
2405 hba->io_count = 0;
2406 hba->iodone_count = 0;
2407 hba->topology = 0;
2408 hba->linkspeed = 0;
2409 hba->heartbeat_active = 0;
2410 hba->discovery_timer = 0;
2411 hba->linkup_timer = 0;
2412 hba->loopback_tics = 0;
2413
2414 /* Specific to ATTO G5 boards */
2415 if (hba->model_info.flags & EMLXS_GPIO_LEDS) {
2416 /* Assume the boot driver enabled all LEDs */
2417 hba->gpio_current =
2418 EMLXS_GPIO_LO | EMLXS_GPIO_HI | EMLXS_GPIO_ACT;
2419 hba->gpio_desired = 0;
2420 hba->gpio_bit = 0;
2421 }
2422
2423 /* Reset the port objects */
2424 for (i = 0; i < MAX_VPORTS; i++) {
2425 vport = &VPORT(i);
2426
2427 vport->flag &= EMLXS_PORT_RESET_MASK;
2428 vport->did = 0;
2429 vport->prev_did = 0;
2430 vport->lip_type = 0;
2431 bzero(&vport->fabric_sparam, sizeof (SERV_PARM));
2432 bzero(&vport->prev_fabric_sparam, sizeof (SERV_PARM));
2433
2434 bzero((caddr_t)&vport->node_base, sizeof (NODELIST));
2435 vport->node_base.nlp_Rpi = 0;
2436 vport->node_base.nlp_DID = 0xffffff;
2437 vport->node_base.nlp_list_next = NULL;
2438 vport->node_base.nlp_list_prev = NULL;
2439 vport->node_base.nlp_active = 1;
2440 vport->node_count = 0;
2441
2442 if (vport->ub_count < EMLXS_UB_TOKEN_OFFSET) {
2443 vport->ub_count = EMLXS_UB_TOKEN_OFFSET;
2444 }
2445 }
2446
2447 if (emlxs_check_hdw_ready(hba)) {
2448 return (1);
2449 }
2450
2451 if (generate_event) {
2452 status = emlxs_sli4_read_status(hba);
2453 if (status & SLI_STATUS_DUMP_IMAGE_PRESENT) {
2454 emlxs_log_dump_event(port, NULL, 0);
2455 }
2456 }
2457
2458 return (0);
2459
2460 } /* emlxs_sli4_hba_reset */
2461
2462
2463 #define SGL_CMD 0
2464 #define SGL_RESP 1
2465 #define SGL_DATA 2
2466 #define SGL_LAST 0x80
2467
2468 /*ARGSUSED*/
2469 static ULP_SGE64 *
2470 emlxs_pkt_to_sgl(emlxs_port_t *port, fc_packet_t *pkt, ULP_SGE64 *sge,
2471 uint32_t sgl_type, uint32_t *pcnt)
2472 {
2473 #ifdef DEBUG_SGE
2474 emlxs_hba_t *hba = HBA;
2475 #endif /* DEBUG_SGE */
2476 ddi_dma_cookie_t *cp;
2477 uint_t i;
2478 uint_t last;
2479 int32_t size;
2480 int32_t sge_size;
2481 uint64_t sge_addr;
2482 int32_t len;
2483 uint32_t cnt;
2484 uint_t cookie_cnt;
2485 ULP_SGE64 stage_sge;
2486
2487 last = sgl_type & SGL_LAST;
2488 sgl_type &= ~SGL_LAST;
2489
2490 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2491 switch (sgl_type) {
2492 case SGL_CMD:
2493 cp = pkt->pkt_cmd_cookie;
2494 cookie_cnt = pkt->pkt_cmd_cookie_cnt;
2495 size = (int32_t)pkt->pkt_cmdlen;
2496 break;
2497
2498 case SGL_RESP:
2499 cp = pkt->pkt_resp_cookie;
2500 cookie_cnt = pkt->pkt_resp_cookie_cnt;
2501 size = (int32_t)pkt->pkt_rsplen;
2502 break;
2503
2504
2505 case SGL_DATA:
2506 cp = pkt->pkt_data_cookie;
2507 cookie_cnt = pkt->pkt_data_cookie_cnt;
2508 size = (int32_t)pkt->pkt_datalen;
2509 break;
2510
2511 default:
2512 return (NULL);
2513 }
2514
2515 #else
2516 switch (sgl_type) {
2517 case SGL_CMD:
2518 cp = &pkt->pkt_cmd_cookie;
2519 cookie_cnt = 1;
2520 size = (int32_t)pkt->pkt_cmdlen;
2521 break;
2522
2523 case SGL_RESP:
2524 cp = &pkt->pkt_resp_cookie;
2525 cookie_cnt = 1;
2526 size = (int32_t)pkt->pkt_rsplen;
2527 break;
2528
2529
2530 case SGL_DATA:
2531 cp = &pkt->pkt_data_cookie;
2532 cookie_cnt = 1;
2533 size = (int32_t)pkt->pkt_datalen;
2534 break;
2535
2536 default:
2537 return (NULL);
2538 }
2539 #endif /* >= EMLXS_MODREV3 */
2540
2541 stage_sge.offset = 0;
2542 stage_sge.type = 0;
2543 stage_sge.last = 0;
2544 cnt = 0;
2545 for (i = 0; i < cookie_cnt && size > 0; i++, cp++) {
2546
2547 sge_size = cp->dmac_size;
2548 sge_addr = cp->dmac_laddress;
2549 while (sge_size && size) {
2550 if (cnt) {
2551 /* Copy staged SGE before we build next one */
2552 BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
2553 (uint8_t *)sge, sizeof (ULP_SGE64));
2554 sge++;
2555 }
2556 len = MIN(EMLXS_MAX_SGE_SIZE, sge_size);
2557 len = MIN(size, len);
2558
2559 stage_sge.addrHigh =
2560 PADDR_HI(sge_addr);
2561 stage_sge.addrLow =
2562 PADDR_LO(sge_addr);
2563 stage_sge.length = len;
2564 if (sgl_type == SGL_DATA) {
2565 stage_sge.offset = cnt;
2566 }
2567 #ifdef DEBUG_SGE
2568 emlxs_data_dump(port, "SGE", (uint32_t *)&stage_sge,
2569 4, 0);
2570 #endif /* DEBUG_SGE */
2571 sge_addr += len;
2572 sge_size -= len;
2573
2574 cnt += len;
2575 size -= len;
2576 }
2577 }
2578
2579 if (last) {
2580 stage_sge.last = 1;
2581 }
2582 BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
2583 sizeof (ULP_SGE64));
2584
2585 sge++;
2586
2587 if (pcnt) {
2588 *pcnt = cnt;
2589 }
2590 return (sge);
2591
2592 } /* emlxs_pkt_to_sgl */
2593
2594
2595 /*ARGSUSED*/
2596 uint32_t
2597 emlxs_sli4_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2598 {
2599 emlxs_hba_t *hba = HBA;
2600 fc_packet_t *pkt;
2601 XRIobj_t *xrip;
2602 ULP_SGE64 *sge;
2603 emlxs_wqe_t *wqe;
2604 IOCBQ *iocbq;
2605 ddi_dma_cookie_t *cp_cmd;
2606 ddi_dma_cookie_t *cp_data;
2607 uint64_t sge_addr;
2608 uint32_t cmd_cnt;
2609 uint32_t resp_cnt;
2610
2611 iocbq = (IOCBQ *) &sbp->iocbq;
2612 wqe = &iocbq->wqe;
2613 pkt = PRIV2PKT(sbp);
2614 xrip = sbp->xrip;
2615 sge = xrip->SGList->virt;
2616
2617 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2618 cp_cmd = pkt->pkt_cmd_cookie;
2619 cp_data = pkt->pkt_data_cookie;
2620 #else
2621 cp_cmd = &pkt->pkt_cmd_cookie;
2622 cp_data = &pkt->pkt_data_cookie;
2623 #endif /* >= EMLXS_MODREV3 */
2624
2625 iocbq = &sbp->iocbq;
2626 if (iocbq->flag & IOCB_FCP_CMD) {
2627
2628 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2629 return (1);
2630 }
2631
2632 /* CMD payload */
2633 sge = emlxs_pkt_to_sgl(port, pkt, sge, SGL_CMD, &cmd_cnt);
2634 if (! sge) {
2635 return (1);
2636 }
2637
2638 /* DATA payload */
2639 if (pkt->pkt_datalen != 0) {
2640 /* RSP payload */
2641 sge = emlxs_pkt_to_sgl(port, pkt, sge,
2642 SGL_RESP, &resp_cnt);
2643 if (! sge) {
2644 return (1);
2645 }
2646
2647 /* Data payload */
2648 sge = emlxs_pkt_to_sgl(port, pkt, sge,
2649 SGL_DATA | SGL_LAST, 0);
2650 if (! sge) {
2651 return (1);
2652 }
2653 sgl_done:
2654 if (hba->sli.sli4.flag & EMLXS_SLI4_PHON) {
2655 sge_addr = cp_data->dmac_laddress;
2656 wqe->FirstData.addrHigh = PADDR_HI(sge_addr);
2657 wqe->FirstData.addrLow = PADDR_LO(sge_addr);
2658 wqe->FirstData.tus.f.bdeSize =
2659 cp_data->dmac_size;
2660 }
2661 } else {
2662 /* RSP payload */
2663 sge = emlxs_pkt_to_sgl(port, pkt, sge,
2664 SGL_RESP | SGL_LAST, &resp_cnt);
2665 if (! sge) {
2666 return (1);
2667 }
2668 }
2669
2670 wqe->un.FcpCmd.Payload.addrHigh =
2671 PADDR_HI(cp_cmd->dmac_laddress);
2672 wqe->un.FcpCmd.Payload.addrLow =
2673 PADDR_LO(cp_cmd->dmac_laddress);
2674 wqe->un.FcpCmd.Payload.tus.f.bdeSize = cmd_cnt;
2675 wqe->un.FcpCmd.PayloadLength = cmd_cnt + resp_cnt;
2676
2677 } else {
2678
2679 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2680 /* CMD payload */
2681 sge = emlxs_pkt_to_sgl(port, pkt, sge,
2682 SGL_CMD | SGL_LAST, &cmd_cnt);
2683 if (! sge) {
2684 return (1);
2685 }
2686 } else {
2687 /* CMD payload */
2688 sge = emlxs_pkt_to_sgl(port, pkt, sge,
2689 SGL_CMD, &cmd_cnt);
2690 if (! sge) {
2691 return (1);
2692 }
2693
2694 /* RSP payload */
2695 sge = emlxs_pkt_to_sgl(port, pkt, sge,
2696 SGL_RESP | SGL_LAST, &resp_cnt);
2697 if (! sge) {
2698 return (1);
2699 }
2700 wqe->un.GenReq.PayloadLength = cmd_cnt;
2701 }
2702
2703 wqe->un.GenReq.Payload.addrHigh =
2704 PADDR_HI(cp_cmd->dmac_laddress);
2705 wqe->un.GenReq.Payload.addrLow =
2706 PADDR_LO(cp_cmd->dmac_laddress);
2707 wqe->un.GenReq.Payload.tus.f.bdeSize = cmd_cnt;
2708 }
2709 return (0);
2710 } /* emlxs_sli4_bde_setup */
2711
2712
2713
2714
2715 #ifdef SFCT_SUPPORT
2716 /*ARGSUSED*/
2717 static uint32_t
2718 emlxs_sli4_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2719 {
2720 emlxs_hba_t *hba = HBA;
2721 emlxs_wqe_t *wqe;
2722 ULP_SGE64 stage_sge;
2723 ULP_SGE64 *sge;
2724 IOCB *iocb;
2725 IOCBQ *iocbq;
2726 MATCHMAP *mp;
2727 MATCHMAP *fct_mp;
2728 XRIobj_t *xrip;
2729 uint64_t sge_addr;
2730 uint32_t sge_size;
2731 uint32_t cnt;
2732 uint32_t len;
2733 uint32_t size;
2734 uint32_t *xrdy_vaddr;
2735 stmf_data_buf_t *dbuf;
2736
2737 iocbq = &sbp->iocbq;
2738 iocb = &iocbq->iocb;
2739 wqe = &iocbq->wqe;
2740 xrip = sbp->xrip;
2741
2742 if (!sbp->fct_buf) {
2743 return (0);
2744 }
2745
2746 size = sbp->fct_buf->db_data_size;
2747
2748 /*
2749 * The hardware will automaticlly round up
2750 * to multiple of 4.
2751 *
2752 * if (size & 3) {
2753 * size = (size + 3) & 0xfffffffc;
2754 * }
2755 */
2756 fct_mp = (MATCHMAP *)sbp->fct_buf->db_port_private;
2757
2758 if (sbp->fct_buf->db_sglist_length != 1) {
2759 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
2760 "fct_bde_setup: Only 1 sglist entry supported: %d",
2761 sbp->fct_buf->db_sglist_length);
2762 return (1);
2763 }
2764
2765 sge = xrip->SGList->virt;
2766
2767 if (iocb->ULPCOMMAND == CMD_FCP_TRECEIVE64_CX) {
2768
2769 mp = emlxs_mem_buf_alloc(hba, EMLXS_XFER_RDY_SIZE);
2770 if (!mp || !mp->virt || !mp->phys) {
2771 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
2772 "fct_bde_setup: Cannot allocate XRDY memory");
2773 return (1);
2774 }
2775 /* Save the MATCHMAP info to free this memory later */
2776 iocbq->bp = mp;
2777
2778 /* Point to XRDY payload */
2779 xrdy_vaddr = (uint32_t *)(mp->virt);
2780
2781 /* Fill in burstsize in payload */
2782 *xrdy_vaddr++ = 0;
2783 *xrdy_vaddr++ = LE_SWAP32(size);
2784 *xrdy_vaddr = 0;
2785
2786 /* First 2 SGEs are XRDY and SKIP */
2787 stage_sge.addrHigh = PADDR_HI(mp->phys);
2788 stage_sge.addrLow = PADDR_LO(mp->phys);
2789 stage_sge.length = EMLXS_XFER_RDY_SIZE;
2790 stage_sge.offset = 0;
2791 stage_sge.type = 0;
2792 stage_sge.last = 0;
2793
2794 /* Words 0-3 */
2795 wqe->un.FcpCmd.Payload.addrHigh = stage_sge.addrHigh;
2796 wqe->un.FcpCmd.Payload.addrLow = stage_sge.addrLow;
2797 wqe->un.FcpCmd.Payload.tus.f.bdeSize = EMLXS_XFER_RDY_SIZE;
2798 wqe->un.FcpCmd.PayloadLength = EMLXS_XFER_RDY_SIZE;
2799
2800 } else { /* CMD_FCP_TSEND64_CX */
2801 /* First 2 SGEs are SKIP */
2802 stage_sge.addrHigh = 0;
2803 stage_sge.addrLow = 0;
2804 stage_sge.length = 0;
2805 stage_sge.offset = 0;
2806 stage_sge.type = EMLXS_SGE_TYPE_SKIP;
2807 stage_sge.last = 0;
2808
2809 /* Words 0-3 */
2810 wqe->un.FcpCmd.Payload.addrHigh = PADDR_HI(fct_mp->phys);
2811 wqe->un.FcpCmd.Payload.addrLow = PADDR_LO(fct_mp->phys);
2812
2813 /* The BDE should match the contents of the first SGE payload */
2814 len = MIN(EMLXS_MAX_SGE_SIZE, size);
2815 wqe->un.FcpCmd.Payload.tus.f.bdeSize = len;
2816
2817 /* The PayloadLength should be set to 0 for TSEND64. */
2818 wqe->un.FcpCmd.PayloadLength = 0;
2819 }
2820
2821 dbuf = sbp->fct_buf;
2822 /*
2823 * TotalTransferCount equals to Relative Offset field (Word 4)
2824 * in both TSEND64 and TRECEIVE64 WQE.
2825 */
2826 wqe->un.FcpCmd.TotalTransferCount = dbuf->db_relative_offset;
2827
2828 /* Copy staged SGE into SGL */
2829 BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
2830 (uint8_t *)sge, sizeof (ULP_SGE64));
2831 sge++;
2832
2833 stage_sge.addrHigh = 0;
2834 stage_sge.addrLow = 0;
2835 stage_sge.length = 0;
2836 stage_sge.offset = 0;
2837 stage_sge.type = EMLXS_SGE_TYPE_SKIP;
2838 stage_sge.last = 0;
2839
2840 /* Copy staged SGE into SGL */
2841 BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
2842 (uint8_t *)sge, sizeof (ULP_SGE64));
2843 sge++;
2844
2845 sge_size = size;
2846 sge_addr = fct_mp->phys;
2847 cnt = 0;
2848
2849 /* Build SGEs */
2850 while (sge_size) {
2851 if (cnt) {
2852 /* Copy staged SGE before we build next one */
2853 BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
2854 (uint8_t *)sge, sizeof (ULP_SGE64));
2855 sge++;
2856 }
2857
2858 len = MIN(EMLXS_MAX_SGE_SIZE, sge_size);
2859
2860 stage_sge.addrHigh = PADDR_HI(sge_addr);
2861 stage_sge.addrLow = PADDR_LO(sge_addr);
2862 stage_sge.length = len;
2863 stage_sge.offset = cnt;
2864 stage_sge.type = EMLXS_SGE_TYPE_DATA;
2865
2866 sge_addr += len;
2867 sge_size -= len;
2868 cnt += len;
2869 }
2870
2871 stage_sge.last = 1;
2872
2873 if (hba->sli.sli4.flag & EMLXS_SLI4_PHON) {
2874 wqe->FirstData.addrHigh = stage_sge.addrHigh;
2875 wqe->FirstData.addrLow = stage_sge.addrLow;
2876 wqe->FirstData.tus.f.bdeSize = stage_sge.length;
2877 }
2878 /* Copy staged SGE into SGL */
2879 BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
2880 (uint8_t *)sge, sizeof (ULP_SGE64));
2881
2882 return (0);
2883
2884 } /* emlxs_sli4_fct_bde_setup */
2885 #endif /* SFCT_SUPPORT */
2886
2887
2888 static void
2889 emlxs_sli4_issue_iocb_cmd(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
2890 {
2891 emlxs_port_t *port = &PPORT;
2892 emlxs_buf_t *sbp;
2893 uint32_t channelno;
2894 int32_t throttle;
2895 emlxs_wqe_t *wqe;
2896 emlxs_wqe_t *wqeslot;
2897 WQ_DESC_t *wq;
2898 uint32_t flag;
2899 uint32_t wqdb;
2900 uint16_t next_wqe;
2901 off_t offset;
2902 #ifdef NODE_THROTTLE_SUPPORT
2903 int32_t node_throttle;
2904 NODELIST *marked_node = NULL;
2905 #endif /* NODE_THROTTLE_SUPPORT */
2906
2907
2908 channelno = cp->channelno;
2909 wq = (WQ_DESC_t *)cp->iopath;
2910
2911 #ifdef DEBUG_FASTPATH
2912 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2913 "ISSUE WQE channel: %x %p", channelno, wq);
2914 #endif /* DEBUG_FASTPATH */
2915
2916 throttle = 0;
2917
2918 /* Check if FCP ring and adapter is not ready */
2919 /* We may use any ring for FCP_CMD */
2920 if (iocbq && (iocbq->flag & IOCB_FCP_CMD) && (hba->state != FC_READY)) {
2921 if (!(iocbq->flag & IOCB_SPECIAL) || !iocbq->port ||
2922 (((emlxs_port_t *)iocbq->port)->mode == MODE_INITIATOR)) {
2923 emlxs_tx_put(iocbq, 1);
2924 return;
2925 }
2926 }
2927
2928 /* Attempt to acquire CMD_RING lock */
2929 if (mutex_tryenter(&EMLXS_QUE_LOCK(channelno)) == 0) {
2930 /* Queue it for later */
2931 if (iocbq) {
2932 if ((hba->io_count -
2933 hba->channel_tx_count) > 10) {
2934 emlxs_tx_put(iocbq, 1);
2935 return;
2936 } else {
2937
2938 mutex_enter(&EMLXS_QUE_LOCK(channelno));
2939 }
2940 } else {
2941 return;
2942 }
2943 }
2944 /* EMLXS_QUE_LOCK acquired */
2945
2946 /* Throttle check only applies to non special iocb */
2947 if (iocbq && (!(iocbq->flag & IOCB_SPECIAL))) {
2948 /* Check if HBA is full */
2949 throttle = hba->io_throttle - hba->io_active;
2950 if (throttle <= 0) {
2951 /* Hitting adapter throttle limit */
2952 /* Queue it for later */
2953 if (iocbq) {
2954 emlxs_tx_put(iocbq, 1);
2955 }
2956
2957 goto busy;
2958 }
2959 }
2960
2961 /* Check to see if we have room for this WQE */
2962 next_wqe = wq->host_index + 1;
2963 if (next_wqe >= wq->max_index) {
2964 next_wqe = 0;
2965 }
2966
2967 if (next_wqe == wq->port_index) {
2968 /* Queue it for later */
2969 if (iocbq) {
2970 emlxs_tx_put(iocbq, 1);
2971 }
2972 goto busy;
2973 }
2974
2975 /*
2976 * We have a command ring slot available
2977 * Make sure we have an iocb to send
2978 */
2979 if (iocbq) {
2980 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2981
2982 /* Check if the ring already has iocb's waiting */
2983 if (cp->nodeq.q_first != NULL) {
2984 /* Put the current iocbq on the tx queue */
2985 emlxs_tx_put(iocbq, 0);
2986
2987 /*
2988 * Attempt to replace it with the next iocbq
2989 * in the tx queue
2990 */
2991 iocbq = emlxs_tx_get(cp, 0);
2992 }
2993
2994 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2995 } else {
2996 iocbq = emlxs_tx_get(cp, 1);
2997 }
2998
2999 sendit:
3000 /* Process each iocbq */
3001 while (iocbq) {
3002 sbp = iocbq->sbp;
3003
3004 #ifdef NODE_THROTTLE_SUPPORT
3005 if (sbp && sbp->node && sbp->node->io_throttle) {
3006 node_throttle = sbp->node->io_throttle -
3007 sbp->node->io_active;
3008 if (node_throttle <= 0) {
3009 /* Node is busy */
3010 /* Queue this iocb and get next iocb from */
3011 /* channel */
3012
3013 if (!marked_node) {
3014 marked_node = sbp->node;
3015 }
3016
3017 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3018 emlxs_tx_put(iocbq, 0);
3019
3020 if (cp->nodeq.q_first == marked_node) {
3021 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3022 goto busy;
3023 }
3024
3025 iocbq = emlxs_tx_get(cp, 0);
3026 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3027 continue;
3028 }
3029 }
3030 marked_node = 0;
3031 #endif /* NODE_THROTTLE_SUPPORT */
3032
3033 wqe = &iocbq->wqe;
3034 #ifdef DEBUG_FASTPATH
3035 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3036 "ISSUE QID %d WQE iotag:%x xri:%d", wq->qid,
3037 wqe->RequestTag, wqe->XRITag);
3038 #endif /* DEBUG_FASTPATH */
3039
3040 if (sbp) {
3041 /* If exchange removed after wqe was prep'ed, drop it */
3042 if (!(sbp->xrip)) {
3043 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3044 "Xmit WQE iotag:%x xri:%d aborted",
3045 wqe->RequestTag, wqe->XRITag);
3046
3047 /* Get next iocb from the tx queue */
3048 iocbq = emlxs_tx_get(cp, 1);
3049 continue;
3050 }
3051
3052 if (sbp->pkt_flags & PACKET_DELAY_REQUIRED) {
3053
3054 /* Perform delay */
3055 if ((channelno == hba->channel_els) &&
3056 !(iocbq->flag & IOCB_FCP_CMD)) {
3057 drv_usecwait(100000);
3058 } else {
3059 drv_usecwait(20000);
3060 }
3061 }
3062
3063 /* Check for ULP pkt request */
3064 mutex_enter(&sbp->mtx);
3065
3066 if (sbp->node == NULL) {
3067 /* Set node to base node by default */
3068 iocbq->node = (void *)&port->node_base;
3069 sbp->node = (void *)&port->node_base;
3070 }
3071
3072 sbp->pkt_flags |= PACKET_IN_CHIPQ;
3073 mutex_exit(&sbp->mtx);
3074
3075 atomic_inc_32(&hba->io_active);
3076 #ifdef NODE_THROTTLE_SUPPORT
3077 if (sbp->node) {
3078 atomic_inc_32(&sbp->node->io_active);
3079 }
3080 #endif /* NODE_THROTTLE_SUPPORT */
3081
3082 sbp->xrip->flag |= EMLXS_XRI_PENDING_IO;
3083 #ifdef SFCT_SUPPORT
3084 #ifdef FCT_IO_TRACE
3085 if (sbp->fct_cmd) {
3086 emlxs_fct_io_trace(port, sbp->fct_cmd,
3087 EMLXS_FCT_IOCB_ISSUED);
3088 emlxs_fct_io_trace(port, sbp->fct_cmd,
3089 icmd->ULPCOMMAND);
3090 }
3091 #endif /* FCT_IO_TRACE */
3092 #endif /* SFCT_SUPPORT */
3093 cp->hbaSendCmd_sbp++;
3094 iocbq->channel = cp;
3095 } else {
3096 cp->hbaSendCmd++;
3097 }
3098
3099 flag = iocbq->flag;
3100
3101 /*
3102 * At this point, we have a command ring slot available
3103 * and an iocb to send
3104 */
3105 wq->release_depth--;
3106 if (wq->release_depth == 0) {
3107 wq->release_depth = WQE_RELEASE_DEPTH;
3108 wqe->WQEC = 1;
3109 }
3110
3111 HBASTATS.IocbIssued[channelno]++;
3112 wq->num_proc++;
3113
3114 /* Send the iocb */
3115 wqeslot = (emlxs_wqe_t *)wq->addr.virt;
3116 wqeslot += wq->host_index;
3117
3118 wqe->CQId = wq->cqid;
3119 if (hba->sli.sli4.param.PHWQ) {
3120 WQE_PHWQ_WQID(wqe, wq->qid);
3121 }
3122 BE_SWAP32_BCOPY((uint8_t *)wqe, (uint8_t *)wqeslot,
3123 sizeof (emlxs_wqe_t));
3124 #ifdef DEBUG_WQE
3125 emlxs_data_dump(port, "WQE", (uint32_t *)wqe, 18, 0);
3126 #endif /* DEBUG_WQE */
3127 offset = (off_t)((uint64_t)((unsigned long)
3128 wq->addr.virt) -
3129 (uint64_t)((unsigned long)
3130 hba->sli.sli4.slim2.virt));
3131
3132 EMLXS_MPDATA_SYNC(wq->addr.dma_handle, offset,
3133 4096, DDI_DMA_SYNC_FORDEV);
3134
3135 /* Ring the WQ Doorbell */
3136 wqdb = wq->qid;
3137 wqdb |= ((1 << 24) | (wq->host_index << 16));
3138
3139 /*
3140 * After this, the sbp / iocb / wqe should not be
3141 * accessed in the xmit path.
3142 */
3143
3144 emlxs_sli4_write_wqdb(hba, wqdb);
3145 wq->host_index = next_wqe;
3146
3147 #ifdef DEBUG_FASTPATH
3148 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3149 "WQ RING: %08x", wqdb);
3150 #endif /* DEBUG_FASTPATH */
3151
3152 if (!sbp) {
3153 emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
3154 }
3155
3156 if (iocbq && (!(flag & IOCB_SPECIAL))) {
3157 /* Check if HBA is full */
3158 throttle = hba->io_throttle - hba->io_active;
3159 if (throttle <= 0) {
3160 goto busy;
3161 }
3162 }
3163
3164 /* Check to see if we have room for another WQE */
3165 next_wqe++;
3166 if (next_wqe >= wq->max_index) {
3167 next_wqe = 0;
3168 }
3169
3170 if (next_wqe == wq->port_index) {
3171 /* Queue it for later */
3172 goto busy;
3173 }
3174
3175 /* Get the next iocb from the tx queue if there is one */
3176 iocbq = emlxs_tx_get(cp, 1);
3177 }
3178
3179 mutex_exit(&EMLXS_QUE_LOCK(channelno));
3180
3181 return;
3182
3183 busy:
3184 wq->num_busy++;
3185 if (throttle <= 0) {
3186 HBASTATS.IocbThrottled++;
3187 } else {
3188 HBASTATS.IocbRingFull[channelno]++;
3189 }
3190
3191 mutex_exit(&EMLXS_QUE_LOCK(channelno));
3192
3193 return;
3194
3195 } /* emlxs_sli4_issue_iocb_cmd() */
3196
3197
3198 /*ARGSUSED*/
3199 static uint32_t
3200 emlxs_sli4_issue_mq(emlxs_port_t *port, MAILBOX4 *mqe, MAILBOX *mb,
3201 uint32_t tmo)
3202 {
3203 emlxs_hba_t *hba = HBA;
3204 MAILBOXQ *mbq;
3205 MAILBOX4 *mb4;
3206 MATCHMAP *mp;
3207 uint32_t *iptr;
3208 uint32_t mqdb;
3209 off_t offset;
3210
3211 mbq = (MAILBOXQ *)mb;
3212 mb4 = (MAILBOX4 *)mb;
3213 mp = (MATCHMAP *) mbq->nonembed;
3214 hba->mbox_mqe = (void *)mqe;
3215
3216 if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
3217 (mb4->un.varSLIConfig.be.embedded)) {
3218 /*
3219 * If this is an embedded mbox, everything should fit
3220 * into the mailbox area.
3221 */
3222 BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mqe,
3223 MAILBOX_CMD_SLI4_BSIZE);
3224
3225 EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, 0,
3226 4096, DDI_DMA_SYNC_FORDEV);
3227
3228 if (mb->mbxCommand != MBX_HEARTBEAT) {
3229 emlxs_data_dump(port, "MBOX CMD", (uint32_t *)mqe,
3230 18, 0);
3231 }
3232 } else {
3233 /* SLI_CONFIG and non-embedded */
3234
3235 /*
3236 * If this is not embedded, the MQ area
3237 * MUST contain a SGE pointer to a larger area for the
3238 * non-embedded mailbox command.
3239 * mp will point to the actual mailbox command which
3240 * should be copied into the non-embedded area.
3241 */
3242 mb4->un.varSLIConfig.be.sge_cnt = 1;
3243 mb4->un.varSLIConfig.be.payload_length = mp->size;
3244 iptr = (uint32_t *)&mb4->un.varSLIConfig.be.un_hdr.hdr_req;
3245 *iptr++ = (uint32_t)PADDR_LO(mp->phys);
3246 *iptr++ = (uint32_t)PADDR_HI(mp->phys);
3247 *iptr = mp->size;
3248
3249 BE_SWAP32_BUFFER(mp->virt, mp->size);
3250
3251 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
3252 DDI_DMA_SYNC_FORDEV);
3253
3254 BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mqe,
3255 MAILBOX_CMD_SLI4_BSIZE);
3256
3257 offset = (off_t)((uint64_t)((unsigned long)
3258 hba->sli.sli4.mq.addr.virt) -
3259 (uint64_t)((unsigned long)
3260 hba->sli.sli4.slim2.virt));
3261
3262 EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, offset,
3263 4096, DDI_DMA_SYNC_FORDEV);
3264
3265 emlxs_data_dump(port, "MBOX EXT", (uint32_t *)mqe, 12, 0);
3266 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3267 "Extension Addr %p %p", mp->phys, (uint32_t *)(mp->virt));
3268 emlxs_data_dump(port, "EXT AREA", (uint32_t *)mp->virt, 24, 0);
3269 }
3270
3271 /* Ring the MQ Doorbell */
3272 mqdb = hba->sli.sli4.mq.qid;
3273 mqdb |= ((1 << MQ_DB_POP_SHIFT) & MQ_DB_POP_MASK);
3274
3275 if (mb->mbxCommand != MBX_HEARTBEAT) {
3276 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3277 "MQ RING: %08x", mqdb);
3278 }
3279
3280 emlxs_sli4_write_mqdb(hba, mqdb);
3281
3282 return (MBX_SUCCESS);
3283
3284 } /* emlxs_sli4_issue_mq() */
3285
3286
3287 /*ARGSUSED*/
3288 static uint32_t
3289 emlxs_sli4_issue_bootstrap(emlxs_hba_t *hba, MAILBOX *mb, uint32_t tmo)
3290 {
3291 emlxs_port_t *port = &PPORT;
3292 MAILBOXQ *mbq;
3293 MAILBOX4 *mb4;
3294 MATCHMAP *mp = NULL;
3295 uint32_t *iptr;
3296 int nonembed = 0;
3297
3298 mbq = (MAILBOXQ *)mb;
3299 mb4 = (MAILBOX4 *)mb;
3300 mp = (MATCHMAP *) mbq->nonembed;
3301 hba->mbox_mqe = hba->sli.sli4.bootstrapmb.virt;
3302
3303 if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
3304 (mb4->un.varSLIConfig.be.embedded)) {
3305 /*
3306 * If this is an embedded mbox, everything should fit
3307 * into the bootstrap mailbox area.
3308 */
3309 iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
3310 BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)iptr,
3311 MAILBOX_CMD_SLI4_BSIZE);
3312
3313 EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
3314 MAILBOX_CMD_SLI4_BSIZE, DDI_DMA_SYNC_FORDEV);
3315 emlxs_data_dump(port, "MBOX CMD", iptr, 18, 0);
3316 } else {
3317 /*
3318 * If this is not embedded, the bootstrap mailbox area
3319 * MUST contain a SGE pointer to a larger area for the
3320 * non-embedded mailbox command.
3321 * mp will point to the actual mailbox command which
3322 * should be copied into the non-embedded area.
3323 */
3324 nonembed = 1;
3325 mb4->un.varSLIConfig.be.sge_cnt = 1;
3326 mb4->un.varSLIConfig.be.payload_length = mp->size;
3327 iptr = (uint32_t *)&mb4->un.varSLIConfig.be.un_hdr.hdr_req;
3328 *iptr++ = (uint32_t)PADDR_LO(mp->phys);
3329 *iptr++ = (uint32_t)PADDR_HI(mp->phys);
3330 *iptr = mp->size;
3331
3332 BE_SWAP32_BUFFER(mp->virt, mp->size);
3333
3334 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
3335 DDI_DMA_SYNC_FORDEV);
3336
3337 iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
3338 BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)iptr,
3339 MAILBOX_CMD_SLI4_BSIZE);
3340
3341 EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
3342 EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE,
3343 DDI_DMA_SYNC_FORDEV);
3344
3345 emlxs_data_dump(port, "MBOX EXT", iptr, 12, 0);
3346 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3347 "Extension Addr %p %p", mp->phys,
3348 (uint32_t *)((uint8_t *)mp->virt));
3349 iptr = (uint32_t *)((uint8_t *)mp->virt);
3350 emlxs_data_dump(port, "EXT AREA", (uint32_t *)mp->virt, 24, 0);
3351 }
3352
3353
3354 /* NOTE: tmo is in 10ms ticks */
3355 if (!emlxs_issue_bootstrap_mb(hba, tmo)) {
3356 return (MBX_TIMEOUT);
3357 }
3358
3359 if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
3360 (mb4->un.varSLIConfig.be.embedded)) {
3361 EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
3362 MAILBOX_CMD_SLI4_BSIZE, DDI_DMA_SYNC_FORKERNEL);
3363
3364 iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
3365 BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)mb,
3366 MAILBOX_CMD_SLI4_BSIZE);
3367
3368 emlxs_data_dump(port, "MBOX CMP", iptr, 18, 0);
3369
3370 } else {
3371 EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
3372 EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE,
3373 DDI_DMA_SYNC_FORKERNEL);
3374
3375 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
3376 DDI_DMA_SYNC_FORKERNEL);
3377
3378 BE_SWAP32_BUFFER(mp->virt, mp->size);
3379
3380 iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
3381 BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)mb,
3382 MAILBOX_CMD_SLI4_BSIZE);
3383
3384 emlxs_data_dump(port, "MBOX CMP", iptr, 12, 0);
3385 iptr = (uint32_t *)((uint8_t *)mp->virt);
3386 emlxs_data_dump(port, "EXT AREA", (uint32_t *)iptr, 24, 0);
3387 }
3388
3389 #ifdef FMA_SUPPORT
3390 if (nonembed && mp) {
3391 if (emlxs_fm_check_dma_handle(hba, mp->dma_handle)
3392 != DDI_FM_OK) {
3393 EMLXS_MSGF(EMLXS_CONTEXT,
3394 &emlxs_invalid_dma_handle_msg,
3395 "sli4_issue_bootstrap: mp_hdl=%p",
3396 mp->dma_handle);
3397 return (MBXERR_DMA_ERROR);
3398 }
3399 }
3400
3401 if (emlxs_fm_check_dma_handle(hba,
3402 hba->sli.sli4.bootstrapmb.dma_handle)
3403 != DDI_FM_OK) {
3404 EMLXS_MSGF(EMLXS_CONTEXT,
3405 &emlxs_invalid_dma_handle_msg,
3406 "sli4_issue_bootstrap: hdl=%p",
3407 hba->sli.sli4.bootstrapmb.dma_handle);
3408 return (MBXERR_DMA_ERROR);
3409 }
3410 #endif
3411
3412 return (MBX_SUCCESS);
3413
3414 } /* emlxs_sli4_issue_bootstrap() */
3415
3416
3417 /*ARGSUSED*/
3418 static uint32_t
3419 emlxs_sli4_issue_mbox_cmd(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
3420 uint32_t tmo)
3421 {
3422 emlxs_port_t *port;
3423 MAILBOX4 *mb4;
3424 MAILBOX *mb;
3425 mbox_rsp_hdr_t *hdr_rsp;
3426 MATCHMAP *mp;
3427 uint32_t *iptr;
3428 uint32_t rc;
3429 uint32_t i;
3430 uint32_t tmo_local;
3431
3432 if (!mbq->port) {
3433 mbq->port = &PPORT;
3434 }
3435
3436 port = (emlxs_port_t *)mbq->port;
3437
3438 mb4 = (MAILBOX4 *)mbq;
3439 mb = (MAILBOX *)mbq;
3440
3441 mb->mbxStatus = MBX_SUCCESS;
3442 rc = MBX_SUCCESS;
3443
3444 /* Check for minimum timeouts */
3445 switch (mb->mbxCommand) {
3446 /* Mailbox commands that erase/write flash */
3447 case MBX_DOWN_LOAD:
3448 case MBX_UPDATE_CFG:
3449 case MBX_LOAD_AREA:
3450 case MBX_LOAD_EXP_ROM:
3451 case MBX_WRITE_NV:
3452 case MBX_FLASH_WR_ULA:
3453 case MBX_DEL_LD_ENTRY:
3454 case MBX_LOAD_SM:
3455 case MBX_DUMP_MEMORY:
3456 case MBX_WRITE_VPARMS:
3457 case MBX_ACCESS_VDATA:
3458 if (tmo < 300) {
3459 tmo = 300;
3460 }
3461 break;
3462
3463 case MBX_SLI_CONFIG: {
3464 mbox_req_hdr_t *hdr_req;
3465
3466 hdr_req = (mbox_req_hdr_t *)
3467 &mb4->un.varSLIConfig.be.un_hdr.hdr_req;
3468
3469 if (hdr_req->subsystem == IOCTL_SUBSYSTEM_COMMON) {
3470 switch (hdr_req->opcode) {
3471 case COMMON_OPCODE_WRITE_OBJ:
3472 case COMMON_OPCODE_READ_OBJ:
3473 case COMMON_OPCODE_READ_OBJ_LIST:
3474 case COMMON_OPCODE_DELETE_OBJ:
3475 case COMMON_OPCODE_SET_BOOT_CFG:
3476 case COMMON_OPCODE_GET_PROFILE_CFG:
3477 case COMMON_OPCODE_SET_PROFILE_CFG:
3478 case COMMON_OPCODE_GET_PROFILE_LIST:
3479 case COMMON_OPCODE_SET_ACTIVE_PROFILE:
3480 case COMMON_OPCODE_GET_PROFILE_CAPS:
3481 case COMMON_OPCODE_GET_MR_PROFILE_CAPS:
3482 case COMMON_OPCODE_SET_MR_PROFILE_CAPS:
3483 case COMMON_OPCODE_SET_FACTORY_PROFILE_CFG:
3484 case COMMON_OPCODE_SEND_ACTIVATION:
3485 case COMMON_OPCODE_RESET_LICENSES:
3486 case COMMON_OPCODE_SET_PHYSICAL_LINK_CFG_V1:
3487 case COMMON_OPCODE_GET_VPD_DATA:
3488 if (tmo < 300) {
3489 tmo = 300;
3490 }
3491 break;
3492 default:
3493 if (tmo < 30) {
3494 tmo = 30;
3495 }
3496 }
3497 } else if (hdr_req->subsystem == IOCTL_SUBSYSTEM_FCOE) {
3498 switch (hdr_req->opcode) {
3499 case FCOE_OPCODE_SET_FCLINK_SETTINGS:
3500 if (tmo < 300) {
3501 tmo = 300;
3502 }
3503 break;
3504 default:
3505 if (tmo < 30) {
3506 tmo = 30;
3507 }
3508 }
3509 } else {
3510 if (tmo < 30) {
3511 tmo = 30;
3512 }
3513 }
3514
3515 /*
3516 * Also: VENDOR_MANAGE_FFV (0x13, 0x02) (not currently used)
3517 */
3518
3519 break;
3520 }
3521 default:
3522 if (tmo < 30) {
3523 tmo = 30;
3524 }
3525 break;
3526 }
3527
3528 /* Convert tmo seconds to 10 millisecond tics */
3529 tmo_local = tmo * 100;
3530
3531 mutex_enter(&EMLXS_PORT_LOCK);
3532
3533 /* Adjust wait flag */
3534 if (flag != MBX_NOWAIT) {
3535 if (hba->sli.sli4.flag & EMLXS_SLI4_INTR_ENABLED) {
3536 flag = MBX_SLEEP;
3537 } else {
3538 flag = MBX_POLL;
3539 }
3540 } else {
3541 /* Must have interrupts enabled to perform MBX_NOWAIT */
3542 if (!(hba->sli.sli4.flag & EMLXS_SLI4_INTR_ENABLED)) {
3543
3544 mb->mbxStatus = MBX_HARDWARE_ERROR;
3545 mutex_exit(&EMLXS_PORT_LOCK);
3546
3547 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3548 "Interrupts disabled. %s failed.",
3549 emlxs_mb_cmd_xlate(mb->mbxCommand));
3550
3551 return (MBX_HARDWARE_ERROR);
3552 }
3553 }
3554
3555 /* Check for hardware error ; special case SLI_CONFIG */
3556 if ((hba->flag & FC_HARDWARE_ERROR) &&
3557 ! ((mb4->mbxCommand == MBX_SLI_CONFIG) &&
3558 (mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode ==
3559 COMMON_OPCODE_RESET))) {
3560 mb->mbxStatus = MBX_HARDWARE_ERROR;
3561
3562 mutex_exit(&EMLXS_PORT_LOCK);
3563
3564 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3565 "Hardware error reported. %s failed. status=%x mb=%p",
3566 emlxs_mb_cmd_xlate(mb->mbxCommand), mb->mbxStatus, mb);
3567
3568 return (MBX_HARDWARE_ERROR);
3569 }
3570
3571 if (hba->mbox_queue_flag) {
3572 /* If we are not polling, then queue it for later */
3573 if (flag == MBX_NOWAIT) {
3574 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3575 "Busy. %s: mb=%p NoWait.",
3576 emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3577
3578 emlxs_mb_put(hba, mbq);
3579
3580 HBASTATS.MboxBusy++;
3581
3582 mutex_exit(&EMLXS_PORT_LOCK);
3583
3584 return (MBX_BUSY);
3585 }
3586
3587 while (hba->mbox_queue_flag) {
3588 mutex_exit(&EMLXS_PORT_LOCK);
3589
3590 if (tmo_local-- == 0) {
3591 EMLXS_MSGF(EMLXS_CONTEXT,
3592 &emlxs_mbox_event_msg,
3593 "Timeout. %s: mb=%p tmo=%d Waiting.",
3594 emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3595 tmo);
3596
3597 /* Non-lethalStatus mailbox timeout */
3598 /* Does not indicate a hardware error */
3599 mb->mbxStatus = MBX_TIMEOUT;
3600 return (MBX_TIMEOUT);
3601 }
3602
3603 BUSYWAIT_MS(10);
3604 mutex_enter(&EMLXS_PORT_LOCK);
3605
3606 /* Check for hardware error ; special case SLI_CONFIG */
3607 if ((hba->flag & FC_HARDWARE_ERROR) &&
3608 ! ((mb4->mbxCommand == MBX_SLI_CONFIG) &&
3609 (mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode ==
3610 COMMON_OPCODE_RESET))) {
3611 mb->mbxStatus = MBX_HARDWARE_ERROR;
3612
3613 mutex_exit(&EMLXS_PORT_LOCK);
3614
3615 EMLXS_MSGF(EMLXS_CONTEXT,
3616 &emlxs_mbox_detail_msg,
3617 "Hardware error reported. %s failed. "
3618 "status=%x mb=%p",
3619 emlxs_mb_cmd_xlate(mb->mbxCommand),
3620 mb->mbxStatus, mb);
3621
3622 return (MBX_HARDWARE_ERROR);
3623 }
3624 }
3625 }
3626
3627 /* Initialize mailbox area */
3628 emlxs_mb_init(hba, mbq, flag, tmo);
3629
3630 if (mb->mbxCommand == MBX_DOWN_LINK) {
3631 hba->sli.sli4.flag |= EMLXS_SLI4_DOWN_LINK;
3632 }
3633
3634 mutex_exit(&EMLXS_PORT_LOCK);
3635 switch (flag) {
3636
3637 case MBX_NOWAIT:
3638 if (mb->mbxCommand != MBX_HEARTBEAT) {
3639 if (mb->mbxCommand != MBX_DOWN_LOAD
3640 /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3641 EMLXS_MSGF(EMLXS_CONTEXT,
3642 &emlxs_mbox_detail_msg,
3643 "Sending. %s: mb=%p NoWait. embedded %d",
3644 emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3645 ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3646 (mb4->un.varSLIConfig.be.embedded)));
3647 }
3648 }
3649
3650 iptr = hba->sli.sli4.mq.addr.virt;
3651 iptr += (hba->sli.sli4.mq.host_index * MAILBOX_CMD_SLI4_WSIZE);
3652 hba->sli.sli4.mq.host_index++;
3653 if (hba->sli.sli4.mq.host_index >= hba->sli.sli4.mq.max_index) {
3654 hba->sli.sli4.mq.host_index = 0;
3655 }
3656
3657 if (mbq->bp) {
3658 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3659 "BDE virt %p phys %p size x%x",
3660 ((MATCHMAP *)mbq->bp)->virt,
3661 ((MATCHMAP *)mbq->bp)->phys,
3662 ((MATCHMAP *)mbq->bp)->size);
3663 emlxs_data_dump(port, "DATA",
3664 (uint32_t *)(((MATCHMAP *)mbq->bp)->virt), 30, 0);
3665 }
3666 rc = emlxs_sli4_issue_mq(port, (MAILBOX4 *)iptr, mb, tmo_local);
3667 break;
3668
3669 case MBX_POLL:
3670 if (mb->mbxCommand != MBX_DOWN_LOAD
3671 /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3672 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3673 "Sending. %s: mb=%p Poll. embedded %d",
3674 emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3675 ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3676 (mb4->un.varSLIConfig.be.embedded)));
3677 }
3678
3679 rc = emlxs_sli4_issue_bootstrap(hba, mb, tmo_local);
3680
3681 /* Clean up the mailbox area */
3682 if (rc == MBX_TIMEOUT) {
3683 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3684 "Timeout. %s: mb=%p tmo=%x Poll. embedded %d",
3685 emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo,
3686 ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3687 (mb4->un.varSLIConfig.be.embedded)));
3688
3689 hba->flag |= FC_MBOX_TIMEOUT;
3690 EMLXS_STATE_CHANGE(hba, FC_ERROR);
3691 emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
3692
3693 } else {
3694 if (mb->mbxCommand != MBX_DOWN_LOAD
3695 /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3696 EMLXS_MSGF(EMLXS_CONTEXT,
3697 &emlxs_mbox_detail_msg,
3698 "Completed. %s: mb=%p status=%x Poll. "
3699 "embedded %d",
3700 emlxs_mb_cmd_xlate(mb->mbxCommand), mb, rc,
3701 ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3702 (mb4->un.varSLIConfig.be.embedded)));
3703 }
3704
3705 /* Process the result */
3706 if (!(mbq->flag & MBQ_PASSTHRU)) {
3707 if (mbq->mbox_cmpl) {
3708 (void) (mbq->mbox_cmpl)(hba, mbq);
3709 }
3710 }
3711
3712 emlxs_mb_fini(hba, NULL, mb->mbxStatus);
3713 }
3714
3715 mp = (MATCHMAP *)mbq->nonembed;
3716 if (mp) {
3717 hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
3718 if (hdr_rsp->status) {
3719 EMLXS_MSGF(EMLXS_CONTEXT,
3720 &emlxs_mbox_detail_msg,
3721 "%s: MBX_NONEMBED_ERROR: 0x%x, 0x%x",
3722 emlxs_mb_cmd_xlate(mb->mbxCommand),
3723 hdr_rsp->status, hdr_rsp->extra_status);
3724
3725 mb->mbxStatus = MBX_NONEMBED_ERROR;
3726 }
3727 }
3728 rc = mb->mbxStatus;
3729
3730 /* Attempt to send pending mailboxes */
3731 mbq = (MAILBOXQ *)emlxs_mb_get(hba);
3732 if (mbq) {
3733 /* Attempt to send pending mailboxes */
3734 i = emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
3735 if ((i != MBX_BUSY) && (i != MBX_SUCCESS)) {
3736 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
3737 }
3738 }
3739 break;
3740
3741 case MBX_SLEEP:
3742 if (mb->mbxCommand != MBX_DOWN_LOAD
3743 /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3744 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3745 "Sending. %s: mb=%p Sleep. embedded %d",
3746 emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3747 ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3748 (mb4->un.varSLIConfig.be.embedded)));
3749 }
3750
3751 iptr = hba->sli.sli4.mq.addr.virt;
3752 iptr += (hba->sli.sli4.mq.host_index * MAILBOX_CMD_SLI4_WSIZE);
3753 hba->sli.sli4.mq.host_index++;
3754 if (hba->sli.sli4.mq.host_index >= hba->sli.sli4.mq.max_index) {
3755 hba->sli.sli4.mq.host_index = 0;
3756 }
3757
3758 rc = emlxs_sli4_issue_mq(port, (MAILBOX4 *)iptr, mb, tmo_local);
3759
3760 if (rc != MBX_SUCCESS) {
3761 break;
3762 }
3763
3764 /* Wait for completion */
3765 /* The driver clock is timing the mailbox. */
3766
3767 mutex_enter(&EMLXS_MBOX_LOCK);
3768 while (!(mbq->flag & MBQ_COMPLETED)) {
3769 cv_wait(&EMLXS_MBOX_CV, &EMLXS_MBOX_LOCK);
3770 }
3771 mutex_exit(&EMLXS_MBOX_LOCK);
3772
3773 mp = (MATCHMAP *)mbq->nonembed;
3774 if (mp) {
3775 hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
3776 if (hdr_rsp->status) {
3777 EMLXS_MSGF(EMLXS_CONTEXT,
3778 &emlxs_mbox_detail_msg,
3779 "%s: MBX_NONEMBED_ERROR: 0x%x, 0x%x",
3780 emlxs_mb_cmd_xlate(mb->mbxCommand),
3781 hdr_rsp->status, hdr_rsp->extra_status);
3782
3783 mb->mbxStatus = MBX_NONEMBED_ERROR;
3784 }
3785 }
3786 rc = mb->mbxStatus;
3787
3788 if (rc == MBX_TIMEOUT) {
3789 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3790 "Timeout. %s: mb=%p tmo=%x Sleep. embedded %d",
3791 emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo,
3792 ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3793 (mb4->un.varSLIConfig.be.embedded)));
3794 } else {
3795 if (mb->mbxCommand != MBX_DOWN_LOAD
3796 /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3797 EMLXS_MSGF(EMLXS_CONTEXT,
3798 &emlxs_mbox_detail_msg,
3799 "Completed. %s: mb=%p status=%x Sleep. "
3800 "embedded %d",
3801 emlxs_mb_cmd_xlate(mb->mbxCommand), mb, rc,
3802 ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3803 (mb4->un.varSLIConfig.be.embedded)));
3804 }
3805 }
3806 break;
3807 }
3808
3809 return (rc);
3810
3811 } /* emlxs_sli4_issue_mbox_cmd() */
3812
3813
3814
3815 /*ARGSUSED*/
3816 static uint32_t
3817 emlxs_sli4_issue_mbox_cmd4quiesce(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
3818 uint32_t tmo)
3819 {
3820 emlxs_port_t *port = &PPORT;
3821 MAILBOX *mb;
3822 mbox_rsp_hdr_t *hdr_rsp;
3823 MATCHMAP *mp;
3824 uint32_t rc;
3825 uint32_t tmo_local;
3826
3827 mb = (MAILBOX *)mbq;
3828
3829 mb->mbxStatus = MBX_SUCCESS;
3830 rc = MBX_SUCCESS;
3831
3832 if (tmo < 30) {
3833 tmo = 30;
3834 }
3835
3836 /* Convert tmo seconds to 10 millisecond tics */
3837 tmo_local = tmo * 100;
3838
3839 flag = MBX_POLL;
3840
3841 /* Check for hardware error */
3842 if (hba->flag & FC_HARDWARE_ERROR) {
3843 mb->mbxStatus = MBX_HARDWARE_ERROR;
3844 return (MBX_HARDWARE_ERROR);
3845 }
3846
3847 /* Initialize mailbox area */
3848 emlxs_mb_init(hba, mbq, flag, tmo);
3849
3850 switch (flag) {
3851
3852 case MBX_POLL:
3853
3854 rc = emlxs_sli4_issue_bootstrap(hba, mb, tmo_local);
3855
3856 /* Clean up the mailbox area */
3857 if (rc == MBX_TIMEOUT) {
3858 hba->flag |= FC_MBOX_TIMEOUT;
3859 EMLXS_STATE_CHANGE(hba, FC_ERROR);
3860 emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
3861
3862 } else {
3863 /* Process the result */
3864 if (!(mbq->flag & MBQ_PASSTHRU)) {
3865 if (mbq->mbox_cmpl) {
3866 (void) (mbq->mbox_cmpl)(hba, mbq);
3867 }
3868 }
3869
3870 emlxs_mb_fini(hba, NULL, mb->mbxStatus);
3871 }
3872
3873 mp = (MATCHMAP *)mbq->nonembed;
3874 if (mp) {
3875 hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
3876 if (hdr_rsp->status) {
3877 EMLXS_MSGF(EMLXS_CONTEXT,
3878 &emlxs_mbox_detail_msg,
3879 "%s: MBX_NONEMBED_ERROR: 0x%x, 0x%x",
3880 emlxs_mb_cmd_xlate(mb->mbxCommand),
3881 hdr_rsp->status, hdr_rsp->extra_status);
3882
3883 mb->mbxStatus = MBX_NONEMBED_ERROR;
3884 }
3885 }
3886 rc = mb->mbxStatus;
3887
3888 break;
3889 }
3890
3891 return (rc);
3892
3893 } /* emlxs_sli4_issue_mbox_cmd4quiesce() */
3894
3895
3896
3897 #ifdef SFCT_SUPPORT
3898 /*ARGSUSED*/
3899 extern uint32_t
3900 emlxs_sli4_prep_fct_iocb(emlxs_port_t *port, emlxs_buf_t *cmd_sbp, int channel)
3901 {
3902 emlxs_hba_t *hba = HBA;
3903 emlxs_config_t *cfg = &CFG;
3904 fct_cmd_t *fct_cmd;
3905 stmf_data_buf_t *dbuf;
3906 scsi_task_t *fct_task;
3907 fc_packet_t *pkt;
3908 CHANNEL *cp;
3909 XRIobj_t *xrip;
3910 emlxs_node_t *ndlp;
3911 IOCBQ *iocbq;
3912 IOCB *iocb;
3913 emlxs_wqe_t *wqe;
3914 ULP_SGE64 stage_sge;
3915 ULP_SGE64 *sge;
3916 RPIobj_t *rpip;
3917 int32_t sge_size;
3918 uint64_t sge_addr;
3919 uint32_t did;
3920 uint32_t timeout;
3921
3922 ddi_dma_cookie_t *cp_cmd;
3923
3924 pkt = PRIV2PKT(cmd_sbp);
3925
3926 cp = (CHANNEL *)cmd_sbp->channel;
3927
3928 iocbq = &cmd_sbp->iocbq;
3929 iocb = &iocbq->iocb;
3930
3931 did = cmd_sbp->did;
3932 if (iocb->ULPCOMMAND == CMD_ABORT_XRI_CX) {
3933
3934 ndlp = cmd_sbp->node;
3935 rpip = EMLXS_NODE_TO_RPI(port, ndlp);
3936
3937 if (!rpip) {
3938 /* Use the fabric rpi */
3939 rpip = port->vpip->fabric_rpip;
3940 }
3941
3942 /* Next allocate an Exchange for this command */
3943 xrip = emlxs_sli4_alloc_xri(port, cmd_sbp, rpip,
3944 EMLXS_XRI_SOL_BLS_TYPE);
3945
3946 if (!xrip) {
3947 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3948 "Adapter Busy. Unable to allocate exchange. "
3949 "did=0x%x", did);
3950
3951 return (FC_TRAN_BUSY);
3952 }
3953
3954 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3955 "FCT Abort Request: xri=%d iotag=%d sbp=%p rxid=%x",
3956 xrip->XRI, xrip->iotag, cmd_sbp, pkt->pkt_cmd_fhdr.rx_id);
3957
3958 cmd_sbp->xrip = xrip;
3959
3960 cp->ulpSendCmd++;
3961
3962 /* Initalize iocbq */
3963 iocbq->port = (void *)port;
3964 iocbq->node = (void *)ndlp;
3965 iocbq->channel = (void *)cp;
3966
3967 /*
3968 * Don't give the abort priority, we want the IOCB
3969 * we are aborting to be processed first.
3970 */
3971 iocbq->flag |= IOCB_SPECIAL;
3972
3973 wqe = &iocbq->wqe;
3974 bzero((void *)wqe, sizeof (emlxs_wqe_t));
3975
3976 wqe = &iocbq->wqe;
3977 wqe->un.Abort.Criteria = ABORT_XRI_TAG;
3978 wqe->RequestTag = xrip->iotag;
3979 wqe->AbortTag = pkt->pkt_cmd_fhdr.rx_id;
3980 wqe->Command = CMD_ABORT_XRI_CX;
3981 wqe->Class = CLASS3;
3982 wqe->CQId = 0xffff;
3983 wqe->CmdType = WQE_TYPE_ABORT;
3984
3985 if (hba->state >= FC_LINK_UP) {
3986 wqe->un.Abort.IA = 0;
3987 } else {
3988 wqe->un.Abort.IA = 1;
3989 }
3990
3991 /* Set the pkt timer */
3992 cmd_sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
3993 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
3994
3995 return (IOERR_SUCCESS);
3996
3997 } else if (iocb->ULPCOMMAND == CMD_FCP_TRSP64_CX) {
3998
3999 timeout = pkt->pkt_timeout;
4000 ndlp = cmd_sbp->node;
4001 if (!ndlp) {
4002 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4003 "Unable to find rpi. did=0x%x", did);
4004
4005 emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
4006 IOERR_INVALID_RPI, 0);
4007 return (0xff);
4008 }
4009
4010 cp->ulpSendCmd++;
4011
4012 /* Initalize iocbq */
4013 iocbq->port = (void *)port;
4014 iocbq->node = (void *)ndlp;
4015 iocbq->channel = (void *)cp;
4016
4017 wqe = &iocbq->wqe;
4018 bzero((void *)wqe, sizeof (emlxs_wqe_t));
4019
4020 xrip = emlxs_sli4_register_xri(port, cmd_sbp,
4021 pkt->pkt_cmd_fhdr.rx_id, did);
4022
4023 if (!xrip) {
4024 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4025 "Unable to register xri %x. did=0x%x",
4026 pkt->pkt_cmd_fhdr.rx_id, did);
4027
4028 emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
4029 IOERR_NO_XRI, 0);
4030 return (0xff);
4031 }
4032
4033 cmd_sbp->iotag = xrip->iotag;
4034 cmd_sbp->channel = cp;
4035
4036 #if (EMLXS_MODREV >= EMLXS_MODREV3)
4037 cp_cmd = pkt->pkt_cmd_cookie;
4038 #else
4039 cp_cmd = &pkt->pkt_cmd_cookie;
4040 #endif /* >= EMLXS_MODREV3 */
4041
4042 sge_size = pkt->pkt_cmdlen;
4043 /* Make size a multiple of 4 */
4044 if (sge_size & 3) {
4045 sge_size = (sge_size + 3) & 0xfffffffc;
4046 }
4047 sge_addr = cp_cmd->dmac_laddress;
4048 sge = xrip->SGList->virt;
4049
4050 stage_sge.addrHigh = PADDR_HI(sge_addr);
4051 stage_sge.addrLow = PADDR_LO(sge_addr);
4052 stage_sge.length = sge_size;
4053 stage_sge.offset = 0;
4054 stage_sge.type = 0;
4055 stage_sge.last = 1;
4056
4057 /* Copy staged SGE into SGL */
4058 BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
4059 (uint8_t *)sge, sizeof (ULP_SGE64));
4060
4061 /* Words 0-3 */
4062 wqe->un.FcpCmd.Payload.addrHigh = stage_sge.addrHigh;
4063 wqe->un.FcpCmd.Payload.addrLow = stage_sge.addrLow;
4064 wqe->un.FcpCmd.Payload.tus.f.bdeSize = sge_size;
4065 wqe->un.FcpCmd.PayloadLength = sge_size;
4066
4067 /* Word 6 */
4068 wqe->ContextTag = ndlp->nlp_Rpi;
4069 wqe->XRITag = xrip->XRI;
4070
4071 /* Word 7 */
4072 wqe->Command = iocb->ULPCOMMAND;
4073 wqe->Class = cmd_sbp->class;
4074 wqe->ContextType = WQE_RPI_CONTEXT;
4075 wqe->Timer = ((timeout > 0xff) ? 0 : timeout);
4076
4077 /* Word 8 */
4078 wqe->AbortTag = 0;
4079
4080 /* Word 9 */
4081 wqe->RequestTag = xrip->iotag;
4082 wqe->OXId = (uint16_t)xrip->rx_id;
4083
4084 /* Word 10 */
4085 if (xrip->flag & EMLXS_XRI_BUSY) {
4086 wqe->XC = 1;
4087 }
4088
4089 if (!(hba->sli.sli4.param.PHWQ)) {
4090 wqe->QOSd = 1;
4091 wqe->DBDE = 1; /* Data type for BDE 0 */
4092 }
4093
4094 /* Word 11 */
4095 wqe->CmdType = WQE_TYPE_TRSP;
4096 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4097
4098 /* Set the pkt timer */
4099 cmd_sbp->ticks = hba->timer_tics + timeout +
4100 ((timeout > 0xff) ? 0 : 10);
4101
4102 if (pkt->pkt_cmdlen) {
4103 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
4104 DDI_DMA_SYNC_FORDEV);
4105 }
4106
4107 return (IOERR_SUCCESS);
4108 }
4109
4110 fct_cmd = cmd_sbp->fct_cmd;
4111 did = fct_cmd->cmd_rportid;
4112 dbuf = cmd_sbp->fct_buf;
4113 fct_task = (scsi_task_t *)fct_cmd->cmd_specific;
4114 ndlp = *(emlxs_node_t **)fct_cmd->cmd_rp->rp_fca_private;
4115 if (!ndlp) {
4116 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4117 "Unable to find rpi. did=0x%x", did);
4118
4119 emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
4120 IOERR_INVALID_RPI, 0);
4121 return (0xff);
4122 }
4123
4124
4125 /* Initalize iocbq */
4126 iocbq->port = (void *) port;
4127 iocbq->node = (void *)ndlp;
4128 iocbq->channel = (void *) cp;
4129
4130 wqe = &iocbq->wqe;
4131 bzero((void *)wqe, sizeof (emlxs_wqe_t));
4132
4133 xrip = cmd_sbp->xrip;
4134 if (!xrip) {
4135 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4136 "Unable to find xri. did=0x%x", did);
4137
4138 emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
4139 IOERR_NO_XRI, 0);
4140 return (0xff);
4141 }
4142
4143 if (emlxs_sli4_register_xri(port, cmd_sbp,
4144 xrip->XRI, ndlp->nlp_DID) == NULL) {
4145 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4146 "Unable to register xri. did=0x%x", did);
4147
4148 emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
4149 IOERR_NO_XRI, 0);
4150 return (0xff);
4151 }
4152 cmd_sbp->iotag = xrip->iotag;
4153 cmd_sbp->channel = cp;
4154
4155 if (cfg[CFG_TIMEOUT_ENABLE].current) {
4156 timeout =
4157 ((2 * hba->fc_ratov) < 60) ? 60 : (2 * hba->fc_ratov);
4158 } else {
4159 timeout = 0x80000000;
4160 }
4161 cmd_sbp->ticks =
4162 hba->timer_tics + timeout + ((timeout > 0xff) ? 0 : 10);
4163
4164
4165 iocb->ULPCT = 0;
4166 if (fct_task->task_flags & TF_WRITE_DATA) {
4167 iocb->ULPCOMMAND = CMD_FCP_TRECEIVE64_CX;
4168 wqe->CmdType = WQE_TYPE_TRECEIVE; /* Word 11 */
4169
4170 } else { /* TF_READ_DATA */
4171
4172 iocb->ULPCOMMAND = CMD_FCP_TSEND64_CX;
4173 wqe->CmdType = WQE_TYPE_TSEND; /* Word 11 */
4174
4175 if ((dbuf->db_data_size >=
4176 fct_task->task_expected_xfer_length)) {
4177 /* enable auto-rsp AP feature */
4178 wqe->AR = 0x1;
4179 iocb->ULPCT = 0x1; /* for cmpl */
4180 }
4181 }
4182
4183 (void) emlxs_sli4_fct_bde_setup(port, cmd_sbp);
4184
4185 /* Word 6 */
4186 wqe->ContextTag = ndlp->nlp_Rpi;
4187 wqe->XRITag = xrip->XRI;
4188
4189 /* Word 7 */
4190 wqe->Command = iocb->ULPCOMMAND;
4191 wqe->Class = cmd_sbp->class;
4192 wqe->ContextType = WQE_RPI_CONTEXT;
4193 wqe->Timer = ((timeout > 0xff) ? 0 : timeout);
4194 wqe->PU = 1;
4195
4196 /* Word 8 */
4197 wqe->AbortTag = 0;
4198
4199 /* Word 9 */
4200 wqe->RequestTag = xrip->iotag;
4201 wqe->OXId = (uint16_t)fct_cmd->cmd_oxid;
4202
4203 /* Word 10 */
4204 if (xrip->flag & EMLXS_XRI_BUSY) {
4205 wqe->XC = 1;
4206 }
4207
4208 if (!(hba->sli.sli4.param.PHWQ)) {
4209 wqe->QOSd = 1;
4210 wqe->DBDE = 1; /* Data type for BDE 0 */
4211 }
4212
4213 /* Word 11 */
4214 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4215
4216 /* Word 12 */
4217 wqe->CmdSpecific = dbuf->db_data_size;
4218
4219 return (IOERR_SUCCESS);
4220
4221 } /* emlxs_sli4_prep_fct_iocb() */
4222 #endif /* SFCT_SUPPORT */
4223
4224
4225 /*ARGSUSED*/
4226 extern uint32_t
4227 emlxs_sli4_prep_fcp_iocb(emlxs_port_t *port, emlxs_buf_t *sbp, int channel)
4228 {
4229 emlxs_hba_t *hba = HBA;
4230 fc_packet_t *pkt;
4231 CHANNEL *cp;
4232 RPIobj_t *rpip;
4233 XRIobj_t *xrip;
4234 emlxs_wqe_t *wqe;
4235 IOCBQ *iocbq;
4236 IOCB *iocb;
4237 NODELIST *node;
4238 uint16_t iotag;
4239 uint32_t did;
4240
4241 pkt = PRIV2PKT(sbp);
4242 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
4243 cp = &hba->chan[channel];
4244
4245 iocbq = &sbp->iocbq;
4246 iocbq->channel = (void *) cp;
4247 iocbq->port = (void *) port;
4248
4249 wqe = &iocbq->wqe;
4250 iocb = &iocbq->iocb;
4251 bzero((void *)wqe, sizeof (emlxs_wqe_t));
4252 bzero((void *)iocb, sizeof (IOCB));
4253
4254 /* Find target node object */
4255 node = (NODELIST *)iocbq->node;
4256 rpip = EMLXS_NODE_TO_RPI(port, node);
4257
4258 if (!rpip) {
4259 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4260 "Unable to find rpi. did=0x%x", did);
4261
4262 emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4263 IOERR_INVALID_RPI, 0);
4264 return (0xff);
4265 }
4266
4267 sbp->channel = cp;
4268 /* Next allocate an Exchange for this command */
4269 xrip = emlxs_sli4_alloc_xri(port, sbp, rpip,
4270 EMLXS_XRI_SOL_FCP_TYPE);
4271
4272 if (!xrip) {
4273 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4274 "Adapter Busy. Unable to allocate exchange. did=0x%x", did);
4275
4276 return (FC_TRAN_BUSY);
4277 }
4278 sbp->bmp = NULL;
4279 iotag = sbp->iotag;
4280
4281 #ifdef DEBUG_FASTPATH
4282 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4283 "FCP: Prep xri=%d iotag=%d oxid=%x rpi=%d",
4284 xrip->XRI, xrip->iotag, xrip->rx_id, rpip->RPI);
4285 #endif /* DEBUG_FASTPATH */
4286
4287 /* Indicate this is a FCP cmd */
4288 iocbq->flag |= IOCB_FCP_CMD;
4289
4290 if (emlxs_sli4_bde_setup(port, sbp)) {
4291 emlxs_sli4_free_xri(port, sbp, xrip, 1);
4292 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4293 "Adapter Busy. Unable to setup SGE. did=0x%x", did);
4294
4295 return (FC_TRAN_BUSY);
4296 }
4297
4298 /* DEBUG */
4299 #ifdef DEBUG_FCP
4300 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4301 "FCP: SGLaddr virt %p phys %p size %d", xrip->SGList->virt,
4302 xrip->SGList->phys, pkt->pkt_datalen);
4303 emlxs_data_dump(port, "FCP: SGL",
4304 (uint32_t *)xrip->SGList->virt, 20, 0);
4305 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4306 "FCP: CMD virt %p len %d:%d:%d",
4307 pkt->pkt_cmd, pkt->pkt_cmdlen, pkt->pkt_rsplen, pkt->pkt_datalen);
4308 emlxs_data_dump(port, "FCP: CMD", (uint32_t *)pkt->pkt_cmd, 10, 0);
4309 #endif /* DEBUG_FCP */
4310
4311 EMLXS_MPDATA_SYNC(xrip->SGList->dma_handle, 0,
4312 xrip->SGList->size, DDI_DMA_SYNC_FORDEV);
4313
4314 /* if device is FCP-2 device, set the following bit */
4315 /* that says to run the FC-TAPE protocol. */
4316 if (node->nlp_fcp_info & NLP_FCP_2_DEVICE) {
4317 wqe->ERP = 1;
4318 }
4319
4320 if (pkt->pkt_datalen == 0) {
4321 iocb->ULPCOMMAND = CMD_FCP_ICMND64_CR;
4322 wqe->Command = CMD_FCP_ICMND64_CR;
4323 wqe->CmdType = WQE_TYPE_FCP_DATA_IN;
4324 } else if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
4325 iocb->ULPCOMMAND = CMD_FCP_IREAD64_CR;
4326 wqe->Command = CMD_FCP_IREAD64_CR;
4327 wqe->CmdType = WQE_TYPE_FCP_DATA_IN;
4328 wqe->PU = PARM_XFER_CHECK;
4329 } else {
4330 iocb->ULPCOMMAND = CMD_FCP_IWRITE64_CR;
4331 wqe->Command = CMD_FCP_IWRITE64_CR;
4332 wqe->CmdType = WQE_TYPE_FCP_DATA_OUT;
4333 }
4334 wqe->un.FcpCmd.TotalTransferCount = pkt->pkt_datalen;
4335
4336 if (!(hba->sli.sli4.param.PHWQ)) {
4337 wqe->DBDE = 1; /* Data type for BDE 0 */
4338 }
4339 wqe->ContextTag = rpip->RPI;
4340 wqe->ContextType = WQE_RPI_CONTEXT;
4341 wqe->XRITag = xrip->XRI;
4342 wqe->Timer =
4343 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
4344
4345 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
4346 wqe->CCPE = 1;
4347 wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
4348 }
4349
4350 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
4351 case FC_TRAN_CLASS2:
4352 wqe->Class = CLASS2;
4353 break;
4354 case FC_TRAN_CLASS3:
4355 default:
4356 wqe->Class = CLASS3;
4357 break;
4358 }
4359 sbp->class = wqe->Class;
4360 wqe->RequestTag = iotag;
4361 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4362
4363 return (FC_SUCCESS);
4364 } /* emlxs_sli4_prep_fcp_iocb() */
4365
4366
4367 /*ARGSUSED*/
4368 static uint32_t
4369 emlxs_sli4_prep_ip_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
4370 {
4371 return (FC_TRAN_BUSY);
4372
4373 } /* emlxs_sli4_prep_ip_iocb() */
4374
4375
4376 /*ARGSUSED*/
4377 static uint32_t
4378 emlxs_sli4_prep_els_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
4379 {
4380 emlxs_hba_t *hba = HBA;
4381 fc_packet_t *pkt;
4382 IOCBQ *iocbq;
4383 IOCB *iocb;
4384 emlxs_wqe_t *wqe;
4385 FCFIobj_t *fcfp;
4386 RPIobj_t *reserved_rpip = NULL;
4387 RPIobj_t *rpip = NULL;
4388 XRIobj_t *xrip;
4389 CHANNEL *cp;
4390 uint32_t did;
4391 uint32_t cmd;
4392 ULP_SGE64 stage_sge;
4393 ULP_SGE64 *sge;
4394 ddi_dma_cookie_t *cp_cmd;
4395 ddi_dma_cookie_t *cp_resp;
4396 emlxs_node_t *node;
4397
4398 pkt = PRIV2PKT(sbp);
4399 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
4400
4401 iocbq = &sbp->iocbq;
4402 wqe = &iocbq->wqe;
4403 iocb = &iocbq->iocb;
4404 bzero((void *)wqe, sizeof (emlxs_wqe_t));
4405 bzero((void *)iocb, sizeof (IOCB));
4406 cp = &hba->chan[hba->channel_els];
4407
4408 /* Initalize iocbq */
4409 iocbq->port = (void *) port;
4410 iocbq->channel = (void *) cp;
4411
4412 sbp->channel = cp;
4413 sbp->bmp = NULL;
4414
4415 #if (EMLXS_MODREV >= EMLXS_MODREV3)
4416 cp_cmd = pkt->pkt_cmd_cookie;
4417 cp_resp = pkt->pkt_resp_cookie;
4418 #else
4419 cp_cmd = &pkt->pkt_cmd_cookie;
4420 cp_resp = &pkt->pkt_resp_cookie;
4421 #endif /* >= EMLXS_MODREV3 */
4422
4423 /* CMD payload */
4424 sge = &stage_sge;
4425 sge->addrHigh = PADDR_HI(cp_cmd->dmac_laddress);
4426 sge->addrLow = PADDR_LO(cp_cmd->dmac_laddress);
4427 sge->length = pkt->pkt_cmdlen;
4428 sge->offset = 0;
4429 sge->type = 0;
4430
4431 cmd = *((uint32_t *)pkt->pkt_cmd);
4432 cmd &= ELS_CMD_MASK;
4433
4434 /* Initalize iocb */
4435 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
4436 /* ELS Response */
4437
4438 sbp->xrip = 0;
4439 xrip = emlxs_sli4_register_xri(port, sbp,
4440 pkt->pkt_cmd_fhdr.rx_id, did);
4441
4442 if (!xrip) {
4443 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4444 "Unable to find XRI. rxid=%x",
4445 pkt->pkt_cmd_fhdr.rx_id);
4446
4447 emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4448 IOERR_NO_XRI, 0);
4449 return (0xff);
4450 }
4451
4452 rpip = xrip->rpip;
4453
4454 if (!rpip) {
4455 /* This means that we had a node registered */
4456 /* when the unsol request came in but the node */
4457 /* has since been unregistered. */
4458 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4459 "Unable to find RPI. rxid=%x",
4460 pkt->pkt_cmd_fhdr.rx_id);
4461
4462 emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4463 IOERR_INVALID_RPI, 0);
4464 return (0xff);
4465 }
4466
4467 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4468 "ELS: Prep xri=%d iotag=%d oxid=%x rpi=%d",
4469 xrip->XRI, xrip->iotag, xrip->rx_id, rpip->RPI);
4470
4471 iocb->ULPCOMMAND = CMD_XMIT_ELS_RSP64_CX;
4472 wqe->Command = CMD_XMIT_ELS_RSP64_CX;
4473 wqe->CmdType = WQE_TYPE_GEN;
4474 if (!(hba->sli.sli4.param.PHWQ)) {
4475 wqe->DBDE = 1; /* Data type for BDE 0 */
4476 }
4477
4478 wqe->un.ElsRsp.Payload.addrHigh = sge->addrHigh;
4479 wqe->un.ElsRsp.Payload.addrLow = sge->addrLow;
4480 wqe->un.ElsRsp.Payload.tus.f.bdeSize = pkt->pkt_cmdlen;
4481 wqe->un.ElsCmd.PayloadLength = pkt->pkt_cmdlen;
4482
4483 wqe->un.ElsRsp.RemoteId = did;
4484 wqe->PU = 0x3;
4485 wqe->OXId = xrip->rx_id;
4486
4487 sge->last = 1;
4488 /* Now sge is fully staged */
4489
4490 sge = xrip->SGList->virt;
4491 BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
4492 sizeof (ULP_SGE64));
4493
4494 if (rpip->RPI == FABRIC_RPI) {
4495 wqe->ContextTag = port->vpip->VPI;
4496 wqe->ContextType = WQE_VPI_CONTEXT;
4497 } else {
4498 wqe->ContextTag = rpip->RPI;
4499 wqe->ContextType = WQE_RPI_CONTEXT;
4500 }
4501
4502 if ((cmd == ELS_CMD_ACC) && (sbp->ucmd == ELS_CMD_FLOGI)) {
4503 wqe->un.ElsCmd.SP = 1;
4504 wqe->un.ElsCmd.LocalId = 0xFFFFFE;
4505 }
4506
4507 } else {
4508 /* ELS Request */
4509
4510 fcfp = port->vpip->vfip->fcfp;
4511 node = (emlxs_node_t *)iocbq->node;
4512 rpip = EMLXS_NODE_TO_RPI(port, node);
4513
4514 if (!rpip) {
4515 /* Use the fabric rpi */
4516 rpip = port->vpip->fabric_rpip;
4517 }
4518
4519 /* Next allocate an Exchange for this command */
4520 xrip = emlxs_sli4_alloc_xri(port, sbp, rpip,
4521 EMLXS_XRI_SOL_ELS_TYPE);
4522
4523 if (!xrip) {
4524 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4525 "Adapter Busy. Unable to allocate exchange. "
4526 "did=0x%x", did);
4527
4528 return (FC_TRAN_BUSY);
4529 }
4530
4531 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4532 "ELS: Prep xri=%d iotag=%d rpi=%d",
4533 xrip->XRI, xrip->iotag, rpip->RPI);
4534
4535 iocb->ULPCOMMAND = CMD_ELS_REQUEST64_CR;
4536 wqe->Command = CMD_ELS_REQUEST64_CR;
4537 wqe->CmdType = WQE_TYPE_ELS;
4538 if (!(hba->sli.sli4.param.PHWQ)) {
4539 wqe->DBDE = 1; /* Data type for BDE 0 */
4540 }
4541
4542 wqe->un.ElsCmd.Payload.addrHigh = sge->addrHigh;
4543 wqe->un.ElsCmd.Payload.addrLow = sge->addrLow;
4544 wqe->un.ElsCmd.Payload.tus.f.bdeSize = pkt->pkt_cmdlen;
4545
4546 wqe->un.ElsCmd.RemoteId = did;
4547 wqe->Timer = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
4548
4549 /* setup for rsp */
4550 iocb->un.elsreq64.remoteID = (did == BCAST_DID) ? 0 : did;
4551 iocb->ULPPU = 1; /* Wd4 is relative offset */
4552
4553 sge->last = 0;
4554
4555 sge = xrip->SGList->virt;
4556 BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
4557 sizeof (ULP_SGE64));
4558
4559 wqe->un.ElsCmd.PayloadLength =
4560 pkt->pkt_cmdlen; /* Byte offset of rsp data */
4561
4562 /* RSP payload */
4563 sge = &stage_sge;
4564 sge->addrHigh = PADDR_HI(cp_resp->dmac_laddress);
4565 sge->addrLow = PADDR_LO(cp_resp->dmac_laddress);
4566 sge->length = pkt->pkt_rsplen;
4567 sge->offset = 0;
4568 sge->last = 1;
4569 /* Now sge is fully staged */
4570
4571 sge = xrip->SGList->virt;
4572 sge++;
4573 BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
4574 sizeof (ULP_SGE64));
4575 #ifdef DEBUG_ELS
4576 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4577 "ELS: SGLaddr virt %p phys %p",
4578 xrip->SGList->virt, xrip->SGList->phys);
4579 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4580 "ELS: PAYLOAD virt %p phys %p",
4581 pkt->pkt_cmd, cp_cmd->dmac_laddress);
4582 emlxs_data_dump(port, "ELS: SGL",
4583 (uint32_t *)xrip->SGList->virt, 12, 0);
4584 #endif /* DEBUG_ELS */
4585
4586 switch (cmd) {
4587 case ELS_CMD_FLOGI:
4588 wqe->un.ElsCmd.SP = 1;
4589
4590 if ((hba->sli_intf & SLI_INTF_IF_TYPE_MASK) ==
4591 SLI_INTF_IF_TYPE_0) {
4592 wqe->ContextTag = fcfp->FCFI;
4593 wqe->ContextType = WQE_FCFI_CONTEXT;
4594 } else {
4595 wqe->ContextTag = port->vpip->VPI;
4596 wqe->ContextType = WQE_VPI_CONTEXT;
4597 }
4598
4599 if (hba->flag & FC_FIP_SUPPORTED) {
4600 wqe->CmdType |= WQE_TYPE_MASK_FIP;
4601 }
4602
4603 if (hba->topology == TOPOLOGY_LOOP) {
4604 wqe->un.ElsCmd.LocalId = port->did;
4605 }
4606
4607 wqe->ELSId = WQE_ELSID_FLOGI;
4608 break;
4609 case ELS_CMD_FDISC:
4610 wqe->un.ElsCmd.SP = 1;
4611 wqe->ContextTag = port->vpip->VPI;
4612 wqe->ContextType = WQE_VPI_CONTEXT;
4613
4614 if (hba->flag & FC_FIP_SUPPORTED) {
4615 wqe->CmdType |= WQE_TYPE_MASK_FIP;
4616 }
4617
4618 wqe->ELSId = WQE_ELSID_FDISC;
4619 break;
4620 case ELS_CMD_LOGO:
4621 if ((did == FABRIC_DID) &&
4622 (hba->flag & FC_FIP_SUPPORTED)) {
4623 wqe->CmdType |= WQE_TYPE_MASK_FIP;
4624 }
4625
4626 wqe->ContextTag = port->vpip->VPI;
4627 wqe->ContextType = WQE_VPI_CONTEXT;
4628 wqe->ELSId = WQE_ELSID_LOGO;
4629 break;
4630 case ELS_CMD_PLOGI:
4631 if (rpip->RPI == FABRIC_RPI) {
4632 if (hba->flag & FC_PT_TO_PT) {
4633 wqe->un.ElsCmd.SP = 1;
4634 wqe->un.ElsCmd.LocalId = port->did;
4635 }
4636
4637 wqe->ContextTag = port->vpip->VPI;
4638 wqe->ContextType = WQE_VPI_CONTEXT;
4639 } else {
4640 wqe->ContextTag = rpip->RPI;
4641 wqe->ContextType = WQE_RPI_CONTEXT;
4642 }
4643
4644 wqe->ELSId = WQE_ELSID_PLOGI;
4645 break;
4646 default:
4647 if (rpip->RPI == FABRIC_RPI) {
4648 wqe->ContextTag = port->vpip->VPI;
4649 wqe->ContextType = WQE_VPI_CONTEXT;
4650 } else {
4651 wqe->ContextTag = rpip->RPI;
4652 wqe->ContextType = WQE_RPI_CONTEXT;
4653 }
4654
4655 wqe->ELSId = WQE_ELSID_CMD;
4656 break;
4657 }
4658
4659 #ifdef SFCT_SUPPORT
4660 /* This allows fct to abort the request */
4661 if (sbp->fct_cmd) {
4662 sbp->fct_cmd->cmd_oxid = xrip->XRI;
4663 sbp->fct_cmd->cmd_rxid = 0xFFFF;
4664 }
4665 #endif /* SFCT_SUPPORT */
4666 }
4667
4668 if (wqe->ContextType == WQE_VPI_CONTEXT) {
4669 reserved_rpip = emlxs_rpi_reserve_notify(port, did, xrip);
4670
4671 if (!reserved_rpip) {
4672 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4673 "Unable to alloc reserved RPI. rxid=%x. Rejecting.",
4674 pkt->pkt_cmd_fhdr.rx_id);
4675
4676 emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4677 IOERR_INVALID_RPI, 0);
4678 return (0xff);
4679 }
4680
4681 /* Store the reserved rpi */
4682 if (wqe->Command == CMD_ELS_REQUEST64_CR) {
4683 wqe->OXId = reserved_rpip->RPI;
4684 } else {
4685 wqe->CmdSpecific = reserved_rpip->RPI;
4686 }
4687 }
4688
4689 EMLXS_MPDATA_SYNC(xrip->SGList->dma_handle, 0,
4690 xrip->SGList->size, DDI_DMA_SYNC_FORDEV);
4691
4692 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
4693 wqe->CCPE = 1;
4694 wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
4695 }
4696
4697 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
4698 case FC_TRAN_CLASS2:
4699 wqe->Class = CLASS2;
4700 break;
4701 case FC_TRAN_CLASS3:
4702 default:
4703 wqe->Class = CLASS3;
4704 break;
4705 }
4706 sbp->class = wqe->Class;
4707 wqe->XRITag = xrip->XRI;
4708 wqe->RequestTag = xrip->iotag;
4709 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4710 return (FC_SUCCESS);
4711
4712 } /* emlxs_sli4_prep_els_iocb() */
4713
4714
4715 /*ARGSUSED*/
4716 static uint32_t
4717 emlxs_sli4_prep_ct_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
4718 {
4719 emlxs_hba_t *hba = HBA;
4720 fc_packet_t *pkt;
4721 IOCBQ *iocbq;
4722 IOCB *iocb;
4723 emlxs_wqe_t *wqe;
4724 NODELIST *node = NULL;
4725 CHANNEL *cp;
4726 RPIobj_t *rpip;
4727 XRIobj_t *xrip;
4728 uint32_t did;
4729
4730 pkt = PRIV2PKT(sbp);
4731 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
4732
4733 iocbq = &sbp->iocbq;
4734 wqe = &iocbq->wqe;
4735 iocb = &iocbq->iocb;
4736 bzero((void *)wqe, sizeof (emlxs_wqe_t));
4737 bzero((void *)iocb, sizeof (IOCB));
4738
4739 cp = &hba->chan[hba->channel_ct];
4740
4741 iocbq->port = (void *) port;
4742 iocbq->channel = (void *) cp;
4743
4744 sbp->bmp = NULL;
4745 sbp->channel = cp;
4746
4747 /* Initalize wqe */
4748 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
4749 /* CT Response */
4750
4751 sbp->xrip = 0;
4752 xrip = emlxs_sli4_register_xri(port, sbp,
4753 pkt->pkt_cmd_fhdr.rx_id, did);
4754
4755 if (!xrip) {
4756 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4757 "Unable to find XRI. rxid=%x",
4758 pkt->pkt_cmd_fhdr.rx_id);
4759
4760 emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4761 IOERR_NO_XRI, 0);
4762 return (0xff);
4763 }
4764
4765 rpip = xrip->rpip;
4766
4767 if (!rpip) {
4768 /* This means that we had a node registered */
4769 /* when the unsol request came in but the node */
4770 /* has since been unregistered. */
4771 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4772 "Unable to find RPI. rxid=%x",
4773 pkt->pkt_cmd_fhdr.rx_id);
4774
4775 emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4776 IOERR_INVALID_RPI, 0);
4777 return (0xff);
4778 }
4779
4780 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4781 "CT: Prep xri=%d iotag=%d oxid=%x rpi=%d",
4782 xrip->XRI, xrip->iotag, xrip->rx_id, rpip->RPI);
4783
4784 if (emlxs_sli4_bde_setup(port, sbp)) {
4785 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4786 "Adapter Busy. Unable to setup SGE. did=0x%x", did);
4787
4788 return (FC_TRAN_BUSY);
4789 }
4790
4791 if (!(hba->model_info.chip & EMLXS_BE_CHIPS)) {
4792 wqe->un.XmitSeq.Rsvd0 = 0; /* Word3 now reserved */
4793 }
4794
4795 if (!(hba->sli.sli4.param.PHWQ)) {
4796 wqe->DBDE = 1; /* Data type for BDE 0 */
4797 }
4798
4799 iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CR;
4800 wqe->CmdType = WQE_TYPE_GEN;
4801 wqe->Command = CMD_XMIT_SEQUENCE64_CR;
4802 wqe->LenLoc = 2;
4803
4804 if (((SLI_CT_REQUEST *) pkt->pkt_cmd)->CommandResponse.bits.
4805 CmdRsp == (LE_SWAP16(SLI_CT_LOOPBACK))) {
4806 wqe->un.XmitSeq.xo = 1;
4807 } else {
4808 wqe->un.XmitSeq.xo = 0;
4809 }
4810
4811 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
4812 wqe->un.XmitSeq.ls = 1;
4813 }
4814
4815 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
4816 wqe->un.XmitSeq.si = 1;
4817 }
4818
4819 wqe->un.XmitSeq.DFctl = pkt->pkt_cmd_fhdr.df_ctl;
4820 wqe->un.XmitSeq.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
4821 wqe->un.XmitSeq.Type = pkt->pkt_cmd_fhdr.type;
4822 wqe->OXId = xrip->rx_id;
4823 wqe->XC = 0; /* xri_tag is a new exchange */
4824 wqe->CmdSpecific = wqe->un.GenReq.Payload.tus.f.bdeSize;
4825
4826 } else {
4827 /* CT Request */
4828
4829 node = (emlxs_node_t *)iocbq->node;
4830 rpip = EMLXS_NODE_TO_RPI(port, node);
4831
4832 if (!rpip) {
4833 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4834 "Unable to find rpi. did=0x%x rpi=%d",
4835 did, node->nlp_Rpi);
4836
4837 emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4838 IOERR_INVALID_RPI, 0);
4839 return (0xff);
4840 }
4841
4842 /* Next allocate an Exchange for this command */
4843 xrip = emlxs_sli4_alloc_xri(port, sbp, rpip,
4844 EMLXS_XRI_SOL_CT_TYPE);
4845
4846 if (!xrip) {
4847 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4848 "Adapter Busy. Unable to allocate exchange. "
4849 "did=0x%x", did);
4850
4851 return (FC_TRAN_BUSY);
4852 }
4853
4854 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4855 "CT: Prep xri=%d iotag=%d oxid=%x rpi=%d",
4856 xrip->XRI, xrip->iotag, xrip->rx_id, rpip->RPI);
4857
4858 if (emlxs_sli4_bde_setup(port, sbp)) {
4859 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4860 "Adapter Busy. Unable to setup SGE. did=0x%x", did);
4861
4862 emlxs_sli4_free_xri(port, sbp, xrip, 1);
4863 return (FC_TRAN_BUSY);
4864 }
4865
4866 if (!(hba->sli.sli4.param.PHWQ)) {
4867 wqe->DBDE = 1; /* Data type for BDE 0 */
4868 }
4869
4870 iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CR;
4871 wqe->CmdType = WQE_TYPE_GEN;
4872 wqe->Command = CMD_GEN_REQUEST64_CR;
4873 wqe->un.GenReq.la = 1;
4874 wqe->un.GenReq.DFctl = pkt->pkt_cmd_fhdr.df_ctl;
4875 wqe->un.GenReq.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
4876 wqe->un.GenReq.Type = pkt->pkt_cmd_fhdr.type;
4877
4878 #ifdef DEBUG_CT
4879 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4880 "CT: SGLaddr virt %p phys %p", xrip->SGList->virt,
4881 xrip->SGList->phys);
4882 emlxs_data_dump(port, "CT: SGL", (uint32_t *)xrip->SGList->virt,
4883 12, 0);
4884 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4885 "CT: CMD virt %p len %d:%d",
4886 pkt->pkt_cmd, pkt->pkt_cmdlen, pkt->pkt_rsplen);
4887 emlxs_data_dump(port, "CT: DATA", (uint32_t *)pkt->pkt_cmd,
4888 20, 0);
4889 #endif /* DEBUG_CT */
4890
4891 #ifdef SFCT_SUPPORT
4892 /* This allows fct to abort the request */
4893 if (sbp->fct_cmd) {
4894 sbp->fct_cmd->cmd_oxid = xrip->XRI;
4895 sbp->fct_cmd->cmd_rxid = 0xFFFF;
4896 }
4897 #endif /* SFCT_SUPPORT */
4898 }
4899
4900 /* Setup for rsp */
4901 iocb->un.genreq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
4902 iocb->un.genreq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
4903 iocb->un.genreq64.w5.hcsw.Dfctl = pkt->pkt_cmd_fhdr.df_ctl;
4904 iocb->ULPPU = 1; /* Wd4 is relative offset */
4905
4906 EMLXS_MPDATA_SYNC(xrip->SGList->dma_handle, 0,
4907 xrip->SGList->size, DDI_DMA_SYNC_FORDEV);
4908
4909 wqe->ContextTag = rpip->RPI;
4910 wqe->ContextType = WQE_RPI_CONTEXT;
4911 wqe->XRITag = xrip->XRI;
4912 wqe->Timer = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
4913
4914 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
4915 wqe->CCPE = 1;
4916 wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
4917 }
4918
4919 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
4920 case FC_TRAN_CLASS2:
4921 wqe->Class = CLASS2;
4922 break;
4923 case FC_TRAN_CLASS3:
4924 default:
4925 wqe->Class = CLASS3;
4926 break;
4927 }
4928 sbp->class = wqe->Class;
4929 wqe->RequestTag = xrip->iotag;
4930 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4931 return (FC_SUCCESS);
4932
4933 } /* emlxs_sli4_prep_ct_iocb() */
4934
4935
4936 /*ARGSUSED*/
4937 static int
4938 emlxs_sli4_read_eq(emlxs_hba_t *hba, EQ_DESC_t *eq)
4939 {
4940 uint32_t *ptr;
4941 EQE_u eqe;
4942 int rc = 0;
4943 off_t offset;
4944
4945 mutex_enter(&EMLXS_PORT_LOCK);
4946
4947 ptr = eq->addr.virt;
4948 ptr += eq->host_index;
4949
4950 offset = (off_t)((uint64_t)((unsigned long)
4951 eq->addr.virt) -
4952 (uint64_t)((unsigned long)
4953 hba->sli.sli4.slim2.virt));
4954
4955 EMLXS_MPDATA_SYNC(eq->addr.dma_handle, offset,
4956 4096, DDI_DMA_SYNC_FORKERNEL);
4957
4958 eqe.word = *ptr;
4959 eqe.word = BE_SWAP32(eqe.word);
4960
4961 if (eqe.word & EQE_VALID) {
4962 rc = 1;
4963 }
4964
4965 mutex_exit(&EMLXS_PORT_LOCK);
4966
4967 return (rc);
4968
4969 } /* emlxs_sli4_read_eq */
4970
4971
4972 static void
4973 emlxs_sli4_poll_intr(emlxs_hba_t *hba)
4974 {
4975 int rc = 0;
4976 int i;
4977 char arg[] = {0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7};
4978
4979 /* Check attention bits once and process if required */
4980
4981 for (i = 0; i < hba->intr_count; i++) {
4982 rc = emlxs_sli4_read_eq(hba, &hba->sli.sli4.eq[i]);
4983 if (rc == 1) {
4984 break;
4985 }
4986 }
4987
4988 if (rc != 1) {
4989 return;
4990 }
4991
4992 (void) emlxs_sli4_msi_intr((char *)hba,
4993 (char *)(unsigned long)arg[i]);
4994
4995 return;
4996
4997 } /* emlxs_sli4_poll_intr() */
4998
4999
5000 /*ARGSUSED*/
5001 static void
5002 emlxs_sli4_process_async_event(emlxs_hba_t *hba, CQE_ASYNC_t *cqe)
5003 {
5004 emlxs_port_t *port = &PPORT;
5005 uint8_t status;
5006
5007 /* Save the event tag */
5008 if (hba->link_event_tag == cqe->un.link.event_tag) {
5009 HBASTATS.LinkMultiEvent++;
5010 } else if (hba->link_event_tag + 1 < cqe->un.link.event_tag) {
5011 HBASTATS.LinkMultiEvent++;
5012 }
5013 hba->link_event_tag = cqe->un.link.event_tag;
5014
5015 switch (cqe->event_code) {
5016 case ASYNC_EVENT_CODE_FCOE_LINK_STATE:
5017 HBASTATS.LinkEvent++;
5018
5019 switch (cqe->un.link.link_status) {
5020 case ASYNC_EVENT_PHYS_LINK_UP:
5021 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5022 "Link Async Event: PHYS_LINK_UP. val=%d "
5023 "type=%x event=%x",
5024 cqe->valid, cqe->event_type, HBASTATS.LinkEvent);
5025 break;
5026
5027 case ASYNC_EVENT_LOGICAL_LINK_UP:
5028 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5029 "Link Async Event: LOGICAL_LINK_UP. val=%d "
5030 "type=%x event=%x",
5031 cqe->valid, cqe->event_type, HBASTATS.LinkEvent);
5032
5033 emlxs_sli4_handle_fcoe_link_event(hba, cqe);
5034 break;
5035
5036 case ASYNC_EVENT_PHYS_LINK_DOWN:
5037 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5038 "Link Async Event: PHYS_LINK_DOWN. val=%d "
5039 "type=%x event=%x",
5040 cqe->valid, cqe->event_type, HBASTATS.LinkEvent);
5041
5042 emlxs_sli4_handle_fcoe_link_event(hba, cqe);
5043 break;
5044
5045 case ASYNC_EVENT_LOGICAL_LINK_DOWN:
5046 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5047 "Link Async Event: LOGICAL_LINK_DOWN. val=%d "
5048 "type=%x event=%x",
5049 cqe->valid, cqe->event_type, HBASTATS.LinkEvent);
5050
5051 emlxs_sli4_handle_fcoe_link_event(hba, cqe);
5052 break;
5053 default:
5054 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5055 "Link Async Event: Unknown link status=%d event=%x",
5056 cqe->un.link.link_status, HBASTATS.LinkEvent);
5057 break;
5058 }
5059 break;
5060 case ASYNC_EVENT_CODE_FCOE_FIP:
5061 switch (cqe->un.fcoe.evt_type) {
5062 case ASYNC_EVENT_NEW_FCF_DISC:
5063 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5064 "FIP Async Event: FCF_FOUND %d:%d",
5065 cqe->un.fcoe.ref_index, cqe->un.fcoe.fcf_count);
5066
5067 (void) emlxs_fcf_found_notify(port,
5068 cqe->un.fcoe.ref_index);
5069 break;
5070 case ASYNC_EVENT_FCF_TABLE_FULL:
5071 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5072 "FIP Async Event: FCFTAB_FULL %d:%d",
5073 cqe->un.fcoe.ref_index, cqe->un.fcoe.fcf_count);
5074
5075 (void) emlxs_fcf_full_notify(port);
5076 break;
5077 case ASYNC_EVENT_FCF_DEAD:
5078 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5079 "FIP Async Event: FCF_LOST %d:%d",
5080 cqe->un.fcoe.ref_index, cqe->un.fcoe.fcf_count);
5081
5082 (void) emlxs_fcf_lost_notify(port,
5083 cqe->un.fcoe.ref_index);
5084 break;
5085 case ASYNC_EVENT_VIRT_LINK_CLEAR:
5086 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5087 "FIP Async Event: CVL %d",
5088 cqe->un.fcoe.ref_index);
5089
5090 (void) emlxs_fcf_cvl_notify(port,
5091 emlxs_sli4_vpi_to_index(hba,
5092 cqe->un.fcoe.ref_index));
5093 break;
5094
5095 case ASYNC_EVENT_FCF_MODIFIED:
5096 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5097 "FIP Async Event: FCF_CHANGED %d",
5098 cqe->un.fcoe.ref_index);
5099
5100 (void) emlxs_fcf_changed_notify(port,
5101 cqe->un.fcoe.ref_index);
5102 break;
5103 default:
5104 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5105 "FIP Async Event: Unknown event type=%d",
5106 cqe->un.fcoe.evt_type);
5107 break;
5108 }
5109 break;
5110 case ASYNC_EVENT_CODE_DCBX:
5111 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5112 "DCBX Async Event: type=%d. Not supported.",
5113 cqe->event_type);
5114 break;
5115 case ASYNC_EVENT_CODE_GRP_5:
5116 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5117 "Group 5 Async Event: type=%d.", cqe->event_type);
5118 if (cqe->event_type == ASYNC_EVENT_QOS_SPEED) {
5119 hba->qos_linkspeed = cqe->un.qos.qos_link_speed;
5120 }
5121 break;
5122 case ASYNC_EVENT_CODE_FC_EVENT:
5123 switch (cqe->event_type) {
5124 case ASYNC_EVENT_FC_LINK_ATT:
5125 HBASTATS.LinkEvent++;
5126
5127 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5128 "FC Async Event: Link Attention. event=%x",
5129 HBASTATS.LinkEvent);
5130
5131 emlxs_sli4_handle_fc_link_att(hba, cqe);
5132 break;
5133 case ASYNC_EVENT_FC_SHARED_LINK_ATT:
5134 HBASTATS.LinkEvent++;
5135
5136 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5137 "FC Async Event: Shared Link Attention. event=%x",
5138 HBASTATS.LinkEvent);
5139
5140 emlxs_sli4_handle_fc_link_att(hba, cqe);
5141 break;
5142 default:
5143 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5144 "FC Async Event: Unknown event. type=%d event=%x",
5145 cqe->event_type, HBASTATS.LinkEvent);
5146 }
5147 break;
5148 case ASYNC_EVENT_CODE_PORT:
5149 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5150 "SLI Port Async Event: type=%d", cqe->event_type);
5151
5152 switch (cqe->event_type) {
5153 case ASYNC_EVENT_PORT_OTEMP:
5154 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5155 "SLI Port Async Event: Temperature limit exceeded");
5156 cmn_err(CE_WARN,
5157 "^%s%d: Temperature limit exceeded. Fibre channel "
5158 "controller temperature %u degrees C",
5159 DRIVER_NAME, hba->ddiinst,
5160 BE_SWAP32(*(uint32_t *)cqe->un.port.link_status));
5161 break;
5162
5163 case ASYNC_EVENT_PORT_NTEMP:
5164 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5165 "SLI Port Async Event: Temperature returned to "
5166 "normal");
5167 cmn_err(CE_WARN,
5168 "^%s%d: Temperature returned to normal",
5169 DRIVER_NAME, hba->ddiinst);
5170 break;
5171
5172 case ASYNC_EVENT_MISCONFIG_PORT:
5173 *((uint32_t *)cqe->un.port.link_status) =
5174 BE_SWAP32(*((uint32_t *)cqe->un.port.link_status));
5175 status =
5176 cqe->un.port.link_status[hba->sli.sli4.link_number];
5177
5178 switch (status) {
5179 case 0 :
5180 break;
5181
5182 case 1 :
5183 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5184 "SLI Port Async Event: Physical media not "
5185 "detected");
5186 cmn_err(CE_WARN,
5187 "^%s%d: Optics faulted/incorrectly "
5188 "installed/not installed - Reseat optics, "
5189 "if issue not resolved, replace.",
5190 DRIVER_NAME, hba->ddiinst);
5191 break;
5192
5193 case 2 :
5194 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5195 "SLI Port Async Event: Wrong physical "
5196 "media detected");
5197 cmn_err(CE_WARN,
5198 "^%s%d: Optics of two types installed - "
5199 "Remove one optic or install matching"
5200 "pair of optics.",
5201 DRIVER_NAME, hba->ddiinst);
5202 break;
5203
5204 case 3 :
5205 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5206 "SLI Port Async Event: Unsupported "
5207 "physical media detected");
5208 cmn_err(CE_WARN,
5209 "^%s%d: Incompatible optics - Replace "
5210 "with compatible optics for card to "
5211 "function.",
5212 DRIVER_NAME, hba->ddiinst);
5213 break;
5214
5215 default :
5216 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5217 "SLI Port Async Event: Physical media "
5218 "error, status=%x", status);
5219 cmn_err(CE_WARN,
5220 "^%s%d: Misconfigured port: status=0x%x - "
5221 "Check optics on card.",
5222 DRIVER_NAME, hba->ddiinst, status);
5223 break;
5224 }
5225 break;
5226 }
5227
5228 break;
5229 case ASYNC_EVENT_CODE_VF:
5230 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5231 "VF Async Event: type=%d",
5232 cqe->event_type);
5233 break;
5234 case ASYNC_EVENT_CODE_MR:
5235 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5236 "MR Async Event: type=%d",
5237 cqe->event_type);
5238 break;
5239 default:
5240 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5241 "Unknown Async Event: code=%d type=%d.",
5242 cqe->event_code, cqe->event_type);
5243 break;
5244 }
5245
5246 } /* emlxs_sli4_process_async_event() */
5247
5248
5249 /*ARGSUSED*/
5250 static void
5251 emlxs_sli4_process_mbox_event(emlxs_hba_t *hba, CQE_MBOX_t *cqe)
5252 {
5253 emlxs_port_t *port = &PPORT;
5254 MAILBOX4 *mb;
5255 MATCHMAP *mbox_bp;
5256 MATCHMAP *mbox_nonembed;
5257 MAILBOXQ *mbq = NULL;
5258 uint32_t size;
5259 uint32_t *iptr;
5260 int rc;
5261 off_t offset;
5262
5263 if (cqe->consumed && !cqe->completed) {
5264 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5265 "CQ ENTRY: Mbox event. Entry consumed but not completed");
5266 return;
5267 }
5268
5269 mutex_enter(&EMLXS_PORT_LOCK);
5270 switch (hba->mbox_queue_flag) {
5271 case 0:
5272 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
5273 "CQ ENTRY: Mbox event. No mailbox active.");
5274
5275 mutex_exit(&EMLXS_PORT_LOCK);
5276 return;
5277
5278 case MBX_POLL:
5279
5280 /* Mark mailbox complete, this should wake up any polling */
5281 /* threads. This can happen if interrupts are enabled while */
5282 /* a polled mailbox command is outstanding. If we don't set */
5283 /* MBQ_COMPLETED here, the polling thread may wait until */
5284 /* timeout error occurs */
5285
5286 mutex_enter(&EMLXS_MBOX_LOCK);
5287 mbq = (MAILBOXQ *)hba->mbox_mbq;
5288 if (mbq) {
5289 port = (emlxs_port_t *)mbq->port;
5290 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5291 "CQ ENTRY: Mbox event. Completing Polled command.");
5292 mbq->flag |= MBQ_COMPLETED;
5293 }
5294 mutex_exit(&EMLXS_MBOX_LOCK);
5295
5296 mutex_exit(&EMLXS_PORT_LOCK);
5297 return;
5298
5299 case MBX_SLEEP:
5300 case MBX_NOWAIT:
5301 /* Check mbox_timer, it acts as a service flag too */
5302 /* The first to service the mbox queue will clear the timer */
5303 if (hba->mbox_timer) {
5304 hba->mbox_timer = 0;
5305
5306 mutex_enter(&EMLXS_MBOX_LOCK);
5307 mbq = (MAILBOXQ *)hba->mbox_mbq;
5308 mutex_exit(&EMLXS_MBOX_LOCK);
5309 }
5310
5311 if (!mbq) {
5312 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5313 "Mailbox event. No service required.");
5314 mutex_exit(&EMLXS_PORT_LOCK);
5315 return;
5316 }
5317
5318 mb = (MAILBOX4 *)mbq;
5319 mutex_exit(&EMLXS_PORT_LOCK);
5320 break;
5321
5322 default:
5323 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_completion_error_msg,
5324 "CQ ENTRY: Mbox event. Invalid Mailbox flag (%x).",
5325 hba->mbox_queue_flag);
5326
5327 mutex_exit(&EMLXS_PORT_LOCK);
5328 return;
5329 }
5330
5331 /* Set port context */
5332 port = (emlxs_port_t *)mbq->port;
5333
5334 offset = (off_t)((uint64_t)((unsigned long)
5335 hba->sli.sli4.mq.addr.virt) -
5336 (uint64_t)((unsigned long)
5337 hba->sli.sli4.slim2.virt));
5338
5339 /* Now that we are the owner, DMA Sync entire MQ if needed */
5340 EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, offset,
5341 4096, DDI_DMA_SYNC_FORDEV);
5342
5343 BE_SWAP32_BCOPY((uint8_t *)hba->mbox_mqe, (uint8_t *)mb,
5344 MAILBOX_CMD_SLI4_BSIZE);
5345
5346 if (mb->mbxCommand != MBX_HEARTBEAT) {
5347 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5348 "CQ ENTRY: Mbox event. Mbox complete. status=%x cmd=%x",
5349 mb->mbxStatus, mb->mbxCommand);
5350
5351 emlxs_data_dump(port, "MBOX CMP", (uint32_t *)hba->mbox_mqe,
5352 12, 0);
5353 }
5354
5355 if (mb->mbxCommand == MBX_SLI_CONFIG) {
5356 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5357 "Mbox sge_cnt: %d length: %d embed: %d",
5358 mb->un.varSLIConfig.be.sge_cnt,
5359 mb->un.varSLIConfig.be.payload_length,
5360 mb->un.varSLIConfig.be.embedded);
5361 }
5362
5363 /* Now sync the memory buffer if one was used */
5364 if (mbq->bp) {
5365 mbox_bp = (MATCHMAP *)mbq->bp;
5366 EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0, mbox_bp->size,
5367 DDI_DMA_SYNC_FORKERNEL);
5368 #ifdef FMA_SUPPORT
5369 if (emlxs_fm_check_dma_handle(hba, mbox_bp->dma_handle)
5370 != DDI_FM_OK) {
5371 EMLXS_MSGF(EMLXS_CONTEXT,
5372 &emlxs_invalid_dma_handle_msg,
5373 "sli4_process_mbox_event: hdl=%p",
5374 mbox_bp->dma_handle);
5375
5376 mb->mbxStatus = MBXERR_DMA_ERROR;
5377 }
5378 #endif
5379 }
5380
5381 /* Now sync the memory buffer if one was used */
5382 if (mbq->nonembed) {
5383 mbox_nonembed = (MATCHMAP *)mbq->nonembed;
5384 size = mbox_nonembed->size;
5385 EMLXS_MPDATA_SYNC(mbox_nonembed->dma_handle, 0, size,
5386 DDI_DMA_SYNC_FORKERNEL);
5387 iptr = (uint32_t *)((uint8_t *)mbox_nonembed->virt);
5388 BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)iptr, size);
5389
5390 #ifdef FMA_SUPPORT
5391 if (emlxs_fm_check_dma_handle(hba,
5392 mbox_nonembed->dma_handle) != DDI_FM_OK) {
5393 EMLXS_MSGF(EMLXS_CONTEXT,
5394 &emlxs_invalid_dma_handle_msg,
5395 "sli4_process_mbox_event: hdl=%p",
5396 mbox_nonembed->dma_handle);
5397
5398 mb->mbxStatus = MBXERR_DMA_ERROR;
5399 }
5400 #endif
5401 emlxs_data_dump(port, "EXT AREA", (uint32_t *)iptr, 24, 0);
5402 }
5403
5404 /* Mailbox has been completely received at this point */
5405
5406 if (mb->mbxCommand == MBX_HEARTBEAT) {
5407 hba->heartbeat_active = 0;
5408 goto done;
5409 }
5410
5411 if (hba->mbox_queue_flag == MBX_SLEEP) {
5412 if (mb->mbxCommand != MBX_DOWN_LOAD
5413 /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
5414 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5415 "Received. %s: status=%x Sleep.",
5416 emlxs_mb_cmd_xlate(mb->mbxCommand),
5417 mb->mbxStatus);
5418 }
5419 } else {
5420 if (mb->mbxCommand != MBX_DOWN_LOAD
5421 /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
5422 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5423 "Completed. %s: status=%x",
5424 emlxs_mb_cmd_xlate(mb->mbxCommand),
5425 mb->mbxStatus);
5426 }
5427 }
5428
5429 /* Filter out passthru mailbox */
5430 if (mbq->flag & MBQ_PASSTHRU) {
5431 goto done;
5432 }
5433
5434 if (mb->mbxStatus) {
5435 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5436 "%s: status=0x%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
5437 (uint32_t)mb->mbxStatus);
5438 }
5439
5440 if (mbq->mbox_cmpl) {
5441 rc = (mbq->mbox_cmpl)(hba, mbq);
5442
5443 /* If mbox was retried, return immediately */
5444 if (rc) {
5445 return;
5446 }
5447 }
5448
5449 done:
5450
5451 /* Clean up the mailbox area */
5452 emlxs_mb_fini(hba, (MAILBOX *)mb, mb->mbxStatus);
5453
5454 /* Attempt to send pending mailboxes */
5455 mbq = (MAILBOXQ *)emlxs_mb_get(hba);
5456 if (mbq) {
5457 /* Attempt to send pending mailboxes */
5458 rc = emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
5459 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
5460 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
5461 }
5462 }
5463 return;
5464
5465 } /* emlxs_sli4_process_mbox_event() */
5466
5467
5468 /*ARGSUSED*/
5469 static void
5470 emlxs_CQE_to_IOCB(emlxs_hba_t *hba, CQE_CmplWQ_t *cqe, emlxs_buf_t *sbp)
5471 {
5472 #ifdef DEBUG_FASTPATH
5473 emlxs_port_t *port = &PPORT;
5474 #endif /* DEBUG_FASTPATH */
5475 IOCBQ *iocbq;
5476 IOCB *iocb;
5477 uint32_t *iptr;
5478 fc_packet_t *pkt;
5479 emlxs_wqe_t *wqe;
5480
5481 iocbq = &sbp->iocbq;
5482 wqe = &iocbq->wqe;
5483 iocb = &iocbq->iocb;
5484
5485 #ifdef DEBUG_FASTPATH
5486 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5487 "CQE to IOCB: cmd:%x tag:%x xri:%d", wqe->Command,
5488 wqe->RequestTag, wqe->XRITag);
5489 #endif /* DEBUG_FASTPATH */
5490
5491 iocb->ULPSTATUS = cqe->Status;
5492 iocb->un.ulpWord[4] = cqe->Parameter;
5493 iocb->ULPIOTAG = cqe->RequestTag;
5494 iocb->ULPCONTEXT = wqe->XRITag;
5495
5496 switch (wqe->Command) {
5497
5498 case CMD_FCP_ICMND64_CR:
5499 iocb->ULPCOMMAND = CMD_FCP_ICMND64_CX;
5500 break;
5501
5502 case CMD_FCP_IREAD64_CR:
5503 iocb->ULPCOMMAND = CMD_FCP_IREAD64_CX;
5504 iocb->ULPPU = PARM_XFER_CHECK;
5505 if (iocb->ULPSTATUS == IOSTAT_FCP_RSP_ERROR) {
5506 iocb->un.fcpi64.fcpi_parm =
5507 wqe->un.FcpCmd.TotalTransferCount -
5508 cqe->CmdSpecific;
5509 }
5510 break;
5511
5512 case CMD_FCP_IWRITE64_CR:
5513 iocb->ULPCOMMAND = CMD_FCP_IWRITE64_CX;
5514 if (iocb->ULPSTATUS == IOSTAT_FCP_RSP_ERROR) {
5515 if (wqe->un.FcpCmd.TotalTransferCount >
5516 cqe->CmdSpecific) {
5517 iocb->un.fcpi64.fcpi_parm =
5518 wqe->un.FcpCmd.TotalTransferCount -
5519 cqe->CmdSpecific;
5520 } else {
5521 iocb->un.fcpi64.fcpi_parm = 0;
5522 }
5523 }
5524 break;
5525
5526 case CMD_ELS_REQUEST64_CR:
5527 iocb->ULPCOMMAND = CMD_ELS_REQUEST64_CX;
5528 iocb->un.elsreq64.bdl.bdeSize = cqe->CmdSpecific;
5529 if (iocb->ULPSTATUS == 0) {
5530 iocb->unsli3.ext_iocb.rsplen = cqe->CmdSpecific;
5531 }
5532 if (iocb->ULPSTATUS == IOSTAT_LS_RJT) {
5533 /* For LS_RJT, the driver populates the rsp buffer */
5534 pkt = PRIV2PKT(sbp);
5535 iptr = (uint32_t *)pkt->pkt_resp;
5536 *iptr++ = ELS_CMD_LS_RJT;
5537 *iptr = cqe->Parameter;
5538 }
5539 break;
5540
5541 case CMD_GEN_REQUEST64_CR:
5542 iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CX;
5543 iocb->unsli3.ext_iocb.rsplen = cqe->CmdSpecific;
5544 break;
5545
5546 case CMD_XMIT_SEQUENCE64_CR:
5547 iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
5548 break;
5549
5550 case CMD_ABORT_XRI_CX:
5551 iocb->ULPCONTEXT = wqe->AbortTag;
5552 break;
5553
5554 case CMD_FCP_TRECEIVE64_CX:
5555 /* free memory for XRDY */
5556 if (iocbq->bp) {
5557 emlxs_mem_buf_free(hba, iocbq->bp);
5558 iocbq->bp = 0;
5559 }
5560
5561 /*FALLTHROUGH*/
5562
5563 case CMD_FCP_TSEND64_CX:
5564 case CMD_FCP_TRSP64_CX:
5565 default:
5566 iocb->ULPCOMMAND = wqe->Command;
5567
5568 }
5569 } /* emlxs_CQE_to_IOCB() */
5570
5571
5572 /*ARGSUSED*/
5573 static void
5574 emlxs_sli4_hba_flush_chipq(emlxs_hba_t *hba)
5575 {
5576 emlxs_port_t *port = &PPORT;
5577 CHANNEL *cp;
5578 emlxs_buf_t *sbp;
5579 IOCBQ *iocbq;
5580 uint16_t i;
5581 uint32_t trigger = 0;
5582 CQE_CmplWQ_t cqe;
5583
5584 mutex_enter(&EMLXS_FCTAB_LOCK);
5585 for (i = 0; i < hba->max_iotag; i++) {
5586 sbp = hba->fc_table[i];
5587 if (sbp == NULL || sbp == STALE_PACKET) {
5588 continue;
5589 }
5590 hba->fc_table[i] = STALE_PACKET;
5591 hba->io_count--;
5592 sbp->iotag = 0;
5593 mutex_exit(&EMLXS_FCTAB_LOCK);
5594
5595 cp = sbp->channel;
5596 bzero(&cqe, sizeof (CQE_CmplWQ_t));
5597 cqe.RequestTag = i;
5598 cqe.Status = IOSTAT_LOCAL_REJECT;
5599 cqe.Parameter = IOERR_SEQUENCE_TIMEOUT;
5600
5601 cp->hbaCmplCmd_sbp++;
5602
5603 #ifdef SFCT_SUPPORT
5604 #ifdef FCT_IO_TRACE
5605 if (sbp->fct_cmd) {
5606 emlxs_fct_io_trace(port, sbp->fct_cmd,
5607 EMLXS_FCT_IOCB_COMPLETE);
5608 }
5609 #endif /* FCT_IO_TRACE */
5610 #endif /* SFCT_SUPPORT */
5611
5612 if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
5613 atomic_dec_32(&hba->io_active);
5614 #ifdef NODE_THROTTLE_SUPPORT
5615 if (sbp->node) {
5616 atomic_dec_32(&sbp->node->io_active);
5617 }
5618 #endif /* NODE_THROTTLE_SUPPORT */
5619 }
5620
5621 /* Copy entry to sbp's iocbq */
5622 iocbq = &sbp->iocbq;
5623 emlxs_CQE_to_IOCB(hba, &cqe, sbp);
5624
5625 iocbq->next = NULL;
5626
5627 /* Exchange is no longer busy on-chip, free it */
5628 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
5629
5630 if (!(sbp->pkt_flags &
5631 (PACKET_POLLED | PACKET_ALLOCATED))) {
5632 /* Add the IOCB to the channel list */
5633 mutex_enter(&cp->rsp_lock);
5634 if (cp->rsp_head == NULL) {
5635 cp->rsp_head = iocbq;
5636 cp->rsp_tail = iocbq;
5637 } else {
5638 cp->rsp_tail->next = iocbq;
5639 cp->rsp_tail = iocbq;
5640 }
5641 mutex_exit(&cp->rsp_lock);
5642 trigger = 1;
5643 } else {
5644 emlxs_proc_channel_event(hba, cp, iocbq);
5645 }
5646 mutex_enter(&EMLXS_FCTAB_LOCK);
5647 }
5648 mutex_exit(&EMLXS_FCTAB_LOCK);
5649
5650 if (trigger) {
5651 for (i = 0; i < hba->chan_count; i++) {
5652 cp = &hba->chan[i];
5653 if (cp->rsp_head != NULL) {
5654 emlxs_thread_trigger2(&cp->intr_thread,
5655 emlxs_proc_channel, cp);
5656 }
5657 }
5658 }
5659
5660 } /* emlxs_sli4_hba_flush_chipq() */
5661
5662
5663 /*ARGSUSED*/
5664 static void
5665 emlxs_sli4_process_oor_wqe_cmpl(emlxs_hba_t *hba,
5666 CQ_DESC_t *cq, CQE_CmplWQ_t *cqe)
5667 {
5668 emlxs_port_t *port = &PPORT;
5669 CHANNEL *cp;
5670 uint16_t request_tag;
5671
5672 request_tag = cqe->RequestTag;
5673
5674 /* 1 to 1 mapping between CQ and channel */
5675 cp = cq->channelp;
5676
5677 cp->hbaCmplCmd++;
5678
5679 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5680 "CQ ENTRY: OOR Cmpl: iotag=%d", request_tag);
5681
5682 emlxs_data_dump(port, "CQE", (uint32_t *)cqe, 4, 0);
5683
5684 } /* emlxs_sli4_process_oor_wqe_cmpl() */
5685
5686
5687 /*ARGSUSED*/
5688 static void
5689 emlxs_sli4_process_wqe_cmpl(emlxs_hba_t *hba, CQ_DESC_t *cq, CQE_CmplWQ_t *cqe)
5690 {
5691 emlxs_port_t *port = &PPORT;
5692 CHANNEL *cp;
5693 emlxs_buf_t *sbp;
5694 IOCBQ *iocbq;
5695 uint16_t request_tag;
5696 #ifdef SFCT_SUPPORT
5697 #ifdef FCT_IO_TRACE
5698 fct_cmd_t *fct_cmd;
5699 emlxs_buf_t *cmd_sbp;
5700 #endif /* FCT_IO_TRACE */
5701 #endif /* SFCT_SUPPORT */
5702
5703 request_tag = cqe->RequestTag;
5704
5705 /* 1 to 1 mapping between CQ and channel */
5706 cp = cq->channelp;
5707
5708 mutex_enter(&EMLXS_FCTAB_LOCK);
5709 sbp = hba->fc_table[request_tag];
5710
5711 if (!sbp) {
5712 cp->hbaCmplCmd++;
5713 mutex_exit(&EMLXS_FCTAB_LOCK);
5714 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5715 "CQ ENTRY: NULL sbp. iotag=%d. Dropping...",
5716 request_tag);
5717 return;
5718 }
5719
5720 if (sbp == STALE_PACKET) {
5721 cp->hbaCmplCmd_sbp++;
5722 mutex_exit(&EMLXS_FCTAB_LOCK);
5723 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5724 "CQ ENTRY: Stale sbp. iotag=%d. Dropping...", request_tag);
5725 return;
5726 }
5727
5728 if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
5729 atomic_add_32(&hba->io_active, -1);
5730 #ifdef NODE_THROTTLE_SUPPORT
5731 if (sbp->node) {
5732 atomic_add_32(&sbp->node->io_active, -1);
5733 }
5734 #endif /* NODE_THROTTLE_SUPPORT */
5735 }
5736
5737 if (!(sbp->xrip)) {
5738 cp->hbaCmplCmd++;
5739 mutex_exit(&EMLXS_FCTAB_LOCK);
5740 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5741 "CQ ENTRY: NULL sbp xrip %p. iotag=%d. Dropping...",
5742 sbp, request_tag);
5743 return;
5744 }
5745
5746 #ifdef DEBUG_FASTPATH
5747 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5748 "CQ ENTRY: process wqe compl");
5749 #endif /* DEBUG_FASTPATH */
5750 cp->hbaCmplCmd_sbp++;
5751
5752 /* Copy entry to sbp's iocbq */
5753 iocbq = &sbp->iocbq;
5754 emlxs_CQE_to_IOCB(hba, cqe, sbp);
5755
5756 iocbq->next = NULL;
5757
5758 if (cqe->XB) {
5759 /* Mark exchange as ABORT in progress */
5760 sbp->xrip->flag &= ~EMLXS_XRI_PENDING_IO;
5761 sbp->xrip->flag |= EMLXS_XRI_BUSY;
5762
5763 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5764 "CQ ENTRY: XRI BUSY: iotag=%d xri=%d", request_tag,
5765 sbp->xrip->XRI);
5766
5767 emlxs_sli4_free_xri(port, sbp, 0, 0);
5768 } else {
5769 /* Exchange is no longer busy on-chip, free it */
5770 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 0);
5771 }
5772
5773 mutex_exit(&EMLXS_FCTAB_LOCK);
5774
5775 #ifdef SFCT_SUPPORT
5776 #ifdef FCT_IO_TRACE
5777 fct_cmd = sbp->fct_cmd;
5778 if (fct_cmd) {
5779 cmd_sbp = (emlxs_buf_t *)fct_cmd->cmd_fca_private;
5780 mutex_enter(&cmd_sbp->fct_mtx);
5781 EMLXS_FCT_STATE_CHG(fct_cmd, cmd_sbp, EMLXS_FCT_IOCB_COMPLETE);
5782 mutex_exit(&cmd_sbp->fct_mtx);
5783 }
5784 #endif /* FCT_IO_TRACE */
5785 #endif /* SFCT_SUPPORT */
5786
5787 /*
5788 * If this is NOT a polled command completion
5789 * or a driver allocated pkt, then defer pkt
5790 * completion.
5791 */
5792 if (!(sbp->pkt_flags &
5793 (PACKET_POLLED | PACKET_ALLOCATED))) {
5794 /* Add the IOCB to the channel list */
5795 mutex_enter(&cp->rsp_lock);
5796 if (cp->rsp_head == NULL) {
5797 cp->rsp_head = iocbq;
5798 cp->rsp_tail = iocbq;
5799 } else {
5800 cp->rsp_tail->next = iocbq;
5801 cp->rsp_tail = iocbq;
5802 }
5803 mutex_exit(&cp->rsp_lock);
5804
5805 /* Delay triggering thread till end of ISR */
5806 cp->chan_flag |= EMLXS_NEEDS_TRIGGER;
5807 } else {
5808 emlxs_proc_channel_event(hba, cp, iocbq);
5809 }
5810
5811 } /* emlxs_sli4_process_wqe_cmpl() */
5812
5813
5814 /*ARGSUSED*/
5815 static void
5816 emlxs_sli4_process_release_wqe(emlxs_hba_t *hba, CQ_DESC_t *cq,
5817 CQE_RelWQ_t *cqe)
5818 {
5819 emlxs_port_t *port = &PPORT;
5820 WQ_DESC_t *wq;
5821 CHANNEL *cp;
5822 uint32_t i;
5823 uint16_t wqi;
5824
5825 wqi = emlxs_sli4_wqid_to_index(hba, (uint16_t)cqe->WQid);
5826
5827 /* Verify WQ index */
5828 if (wqi == 0xffff) {
5829 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5830 "CQ ENTRY: Invalid WQid:%d. Dropping...",
5831 cqe->WQid);
5832 return;
5833 }
5834
5835 wq = &hba->sli.sli4.wq[wqi];
5836
5837 #ifdef DEBUG_FASTPATH
5838 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5839 "CQ ENTRY: process release wqe: old %d new %d", wq->port_index,
5840 cqe->WQindex);
5841 #endif /* DEBUG_FASTPATH */
5842
5843 wq->port_index = cqe->WQindex;
5844
5845 /* Cmd ring may be available. Try sending more iocbs */
5846 for (i = 0; i < hba->chan_count; i++) {
5847 cp = &hba->chan[i];
5848 if (wq == (WQ_DESC_t *)cp->iopath) {
5849 emlxs_sli4_issue_iocb_cmd(hba, cp, 0);
5850 }
5851 }
5852
5853 } /* emlxs_sli4_process_release_wqe() */
5854
5855
5856 /*ARGSUSED*/
5857 emlxs_iocbq_t *
5858 emlxs_sli4_rxq_get(emlxs_hba_t *hba, fc_frame_hdr_t *fchdr)
5859 {
5860 emlxs_queue_t *q;
5861 emlxs_iocbq_t *iocbq;
5862 emlxs_iocbq_t *prev;
5863 fc_frame_hdr_t *fchdr2;
5864 RXQ_DESC_t *rxq;
5865
5866 switch (fchdr->type) {
5867 case 1: /* ELS */
5868 rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_ELS];
5869 break;
5870 case 0x20: /* CT */
5871 rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_CT];
5872 break;
5873 default:
5874 return (NULL);
5875 }
5876
5877 mutex_enter(&rxq->lock);
5878
5879 q = &rxq->active;
5880 iocbq = (emlxs_iocbq_t *)q->q_first;
5881 prev = NULL;
5882
5883 while (iocbq) {
5884
5885 fchdr2 = (fc_frame_hdr_t *)iocbq->iocb.un.ulpWord;
5886
5887 if ((fchdr2->s_id == fchdr->s_id) &&
5888 (fchdr2->ox_id == fchdr->ox_id) &&
5889 (fchdr2->seq_id == fchdr->seq_id)) {
5890 /* Remove iocbq */
5891 if (prev) {
5892 prev->next = iocbq->next;
5893 }
5894 if (q->q_first == (uint8_t *)iocbq) {
5895 q->q_first = (uint8_t *)iocbq->next;
5896 }
5897 if (q->q_last == (uint8_t *)iocbq) {
5898 q->q_last = (uint8_t *)prev;
5899 }
5900 q->q_cnt--;
5901
5902 break;
5903 }
5904
5905 prev = iocbq;
5906 iocbq = iocbq->next;
5907 }
5908
5909 mutex_exit(&rxq->lock);
5910
5911 return (iocbq);
5912
5913 } /* emlxs_sli4_rxq_get() */
5914
5915
5916 /*ARGSUSED*/
5917 void
5918 emlxs_sli4_rxq_put(emlxs_hba_t *hba, emlxs_iocbq_t *iocbq)
5919 {
5920 emlxs_queue_t *q;
5921 fc_frame_hdr_t *fchdr;
5922 RXQ_DESC_t *rxq;
5923
5924 fchdr = (fc_frame_hdr_t *)iocbq->iocb.RXFCHDR;
5925
5926 switch (fchdr->type) {
5927 case 1: /* ELS */
5928 rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_ELS];
5929 break;
5930 case 0x20: /* CT */
5931 rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_CT];
5932 break;
5933 default:
5934 return;
5935 }
5936
5937 mutex_enter(&rxq->lock);
5938
5939 q = &rxq->active;
5940
5941 if (q->q_last) {
5942 ((emlxs_iocbq_t *)q->q_last)->next = iocbq;
5943 q->q_cnt++;
5944 } else {
5945 q->q_first = (uint8_t *)iocbq;
5946 q->q_cnt = 1;
5947 }
5948
5949 q->q_last = (uint8_t *)iocbq;
5950 iocbq->next = NULL;
5951
5952 mutex_exit(&rxq->lock);
5953
5954 return;
5955
5956 } /* emlxs_sli4_rxq_put() */
5957
5958
5959 static void
5960 emlxs_sli4_rq_post(emlxs_port_t *port, uint16_t rqid)
5961 {
5962 emlxs_hba_t *hba = HBA;
5963 emlxs_rqdbu_t rqdb;
5964
5965 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5966 "RQ POST: rqid=%d count=1", rqid);
5967
5968 /* Ring the RQ doorbell once to repost the RQ buffer */
5969 rqdb.word = 0;
5970 rqdb.db.Qid = rqid;
5971 rqdb.db.NumPosted = 1;
5972
5973 emlxs_sli4_write_rqdb(hba, rqdb.word);
5974
5975 } /* emlxs_sli4_rq_post() */
5976
5977
5978 /*ARGSUSED*/
5979 static void
5980 emlxs_sli4_process_unsol_rcv(emlxs_hba_t *hba, CQ_DESC_t *cq,
5981 CQE_UnsolRcv_t *cqe)
5982 {
5983 emlxs_port_t *port = &PPORT;
5984 emlxs_port_t *vport;
5985 RQ_DESC_t *hdr_rq;
5986 RQ_DESC_t *data_rq;
5987 MBUF_INFO *hdr_mp;
5988 MBUF_INFO *data_mp;
5989 MATCHMAP *seq_mp;
5990 uint32_t *data;
5991 fc_frame_hdr_t fchdr;
5992 uint16_t hdr_rqi;
5993 uint32_t host_index;
5994 emlxs_iocbq_t *iocbq = NULL;
5995 emlxs_iocb_t *iocb;
5996 emlxs_node_t *node = NULL;
5997 uint32_t i;
5998 uint32_t seq_len;
5999 uint32_t seq_cnt;
6000 uint32_t buf_type;
6001 char label[32];
6002 emlxs_wqe_t *wqe;
6003 CHANNEL *cp;
6004 XRIobj_t *xrip;
6005 RPIobj_t *rpip = NULL;
6006 uint32_t cmd;
6007 uint32_t posted = 0;
6008 uint32_t abort = 1;
6009 off_t offset;
6010 uint32_t status;
6011 uint32_t data_size;
6012 uint16_t rqid;
6013 uint32_t hdr_size;
6014 fc_packet_t *pkt;
6015 emlxs_buf_t *sbp;
6016
6017 if (cqe->Code == CQE_TYPE_UNSOL_RCV_V1) {
6018 CQE_UnsolRcvV1_t *cqeV1 = (CQE_UnsolRcvV1_t *)cqe;
6019
6020 status = cqeV1->Status;
6021 data_size = cqeV1->data_size;
6022 rqid = cqeV1->RQid;
6023 hdr_size = cqeV1->hdr_size;
6024 } else {
6025 status = cqe->Status;
6026 data_size = cqe->data_size;
6027 rqid = cqe->RQid;
6028 hdr_size = cqe->hdr_size;
6029 }
6030
6031 /* Validate the CQE */
6032
6033 /* Check status */
6034 switch (status) {
6035 case RQ_STATUS_SUCCESS: /* 0x10 */
6036 break;
6037
6038 case RQ_STATUS_BUFLEN_EXCEEDED: /* 0x11 */
6039 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6040 "CQ ENTRY: Unsol Rcv: Payload truncated.");
6041 break;
6042
6043 case RQ_STATUS_NEED_BUFFER: /* 0x12 */
6044 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6045 "CQ ENTRY: Unsol Rcv: Payload buffer needed.");
6046 return;
6047
6048 case RQ_STATUS_FRAME_DISCARDED: /* 0x13 */
6049 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6050 "CQ ENTRY: Unsol Rcv: Payload buffer discarded.");
6051 return;
6052
6053 default:
6054 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6055 "CQ ENTRY: Unsol Rcv: Unknown status=%x.",
6056 status);
6057 break;
6058 }
6059
6060 /* Make sure there is a frame header */
6061 if (hdr_size < sizeof (fc_frame_hdr_t)) {
6062 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6063 "CQ ENTRY: Unsol Rcv: FC header too small. Dropping...");
6064 return;
6065 }
6066
6067 hdr_rqi = emlxs_sli4_rqid_to_index(hba, rqid);
6068
6069 /* Verify RQ index */
6070 if (hdr_rqi == 0xffff) {
6071 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6072 "CQ ENTRY: Unsol Rcv: Invalid RQID:%d. Dropping...",
6073 rqid);
6074 return;
6075 }
6076
6077 hdr_rq = &hba->sli.sli4.rq[hdr_rqi];
6078 data_rq = &hba->sli.sli4.rq[hdr_rqi + 1];
6079
6080 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6081 "CQ ENTRY: Unsol Rcv:%x rqid=%d,%d index=%d status=%x "
6082 "hdr_size=%d data_size=%d",
6083 cqe->Code, rqid, hdr_rqi, hdr_rq->host_index, status, hdr_size,
6084 data_size);
6085
6086 hdr_rq->num_proc++;
6087
6088 /* Update host index */
6089 mutex_enter(&hba->sli.sli4.rq[hdr_rqi].lock);
6090 host_index = hdr_rq->host_index;
6091 hdr_rq->host_index++;
6092
6093 if (hdr_rq->host_index >= hdr_rq->max_index) {
6094 hdr_rq->host_index = 0;
6095 }
6096 data_rq->host_index = hdr_rq->host_index;
6097 mutex_exit(&hba->sli.sli4.rq[hdr_rqi].lock);
6098
6099 /* Get the next header rqb */
6100 hdr_mp = &hdr_rq->rqb[host_index];
6101
6102 offset = (off_t)((uint64_t)((unsigned long)hdr_mp->virt) -
6103 (uint64_t)((unsigned long)hba->sli.sli4.slim2.virt));
6104
6105 EMLXS_MPDATA_SYNC(hdr_mp->dma_handle, offset,
6106 sizeof (fc_frame_hdr_t), DDI_DMA_SYNC_FORKERNEL);
6107
6108 LE_SWAP32_BCOPY(hdr_mp->virt, (uint8_t *)&fchdr,
6109 sizeof (fc_frame_hdr_t));
6110
6111 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6112 "RQ HDR[%d]: rctl:%x type:%x "
6113 "sid:%x did:%x oxid:%x rxid:%x",
6114 host_index, fchdr.r_ctl, fchdr.type,
6115 fchdr.s_id, fchdr.d_id, fchdr.ox_id, fchdr.rx_id);
6116
6117 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6118 "RQ HDR[%d]: fctl:%x seq_id:%x seq_cnt:%x df_ctl:%x ro:%x",
6119 host_index, fchdr.f_ctl, fchdr.seq_id, fchdr.seq_cnt,
6120 fchdr.df_ctl, fchdr.ro);
6121
6122 /* Verify fc header type */
6123 switch (fchdr.type) {
6124 case 0: /* BLS */
6125 if (fchdr.r_ctl != 0x81) {
6126 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6127 "RQ ENTRY: Unexpected FC rctl (0x%x) "
6128 "received. Dropping...",
6129 fchdr.r_ctl);
6130
6131 goto done;
6132 }
6133
6134 /* Make sure there is no payload */
6135 if (data_size != 0) {
6136 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6137 "RQ ENTRY: ABTS payload provided. Dropping...");
6138
6139 goto done;
6140 }
6141
6142 buf_type = 0xFFFFFFFF;
6143 (void) strlcpy(label, "ABTS", sizeof (label));
6144 cp = &hba->chan[hba->channel_els];
6145 break;
6146
6147 case 0x01: /* ELS */
6148 /* Make sure there is a payload */
6149 if (data_size == 0) {
6150 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6151 "RQ ENTRY: Unsol Rcv: No ELS payload provided. "
6152 "Dropping...");
6153
6154 goto done;
6155 }
6156
6157 buf_type = MEM_ELSBUF;
6158 (void) strlcpy(label, "Unsol ELS", sizeof (label));
6159 cp = &hba->chan[hba->channel_els];
6160 break;
6161
6162 case 0x20: /* CT */
6163 /* Make sure there is a payload */
6164 if (data_size == 0) {
6165 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6166 "RQ ENTRY: Unsol Rcv: No CT payload provided. "
6167 "Dropping...");
6168
6169 goto done;
6170 }
6171
6172 buf_type = MEM_CTBUF;
6173 (void) strlcpy(label, "Unsol CT", sizeof (label));
6174 cp = &hba->chan[hba->channel_ct];
6175 break;
6176
6177 case 0x08: /* FCT */
6178 /* Make sure there is a payload */
6179 if (data_size == 0) {
6180 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6181 "RQ ENTRY: Unsol Rcv: No FCP payload provided. "
6182 "Dropping...");
6183
6184 goto done;
6185 }
6186
6187 buf_type = MEM_FCTBUF;
6188 (void) strlcpy(label, "Unsol FCT", sizeof (label));
6189 cp = &hba->chan[hba->CHANNEL_FCT];
6190 break;
6191
6192 default:
6193 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6194 "RQ ENTRY: Unexpected FC type (0x%x) received. Dropping...",
6195 fchdr.type);
6196
6197 goto done;
6198 }
6199 /* Fc Header is valid */
6200
6201 /* Check if this is an active sequence */
6202 iocbq = emlxs_sli4_rxq_get(hba, &fchdr);
6203
6204 if (!iocbq) {
6205 if (fchdr.type != 0) {
6206 if (!(fchdr.f_ctl & F_CTL_FIRST_SEQ)) {
6207 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6208 "RQ ENTRY: %s: First of sequence not"
6209 " set. Dropping...",
6210 label);
6211
6212 goto done;
6213 }
6214 }
6215
6216 if ((fchdr.type != 0) && (fchdr.seq_cnt != 0)) {
6217 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6218 "RQ ENTRY: %s: Sequence count not zero (%d). "
6219 "Dropping...",
6220 label, fchdr.seq_cnt);
6221
6222 goto done;
6223 }
6224
6225 /* Find vport */
6226 for (i = 0; i < MAX_VPORTS; i++) {
6227 vport = &VPORT(i);
6228
6229 if (vport->did == fchdr.d_id) {
6230 port = vport;
6231 break;
6232 }
6233 }
6234
6235 if (i == MAX_VPORTS) {
6236 /* Allow unsol FLOGI & PLOGI for P2P */
6237 if ((fchdr.type != 1 /* ELS*/) ||
6238 ((fchdr.d_id != FABRIC_DID) &&
6239 !(hba->flag & FC_PT_TO_PT))) {
6240 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6241 "RQ ENTRY: %s: Invalid did=%x. Dropping...",
6242 label, fchdr.d_id);
6243
6244 goto done;
6245 }
6246 }
6247
6248 /* Allocate an IOCBQ */
6249 iocbq = (emlxs_iocbq_t *)emlxs_mem_get(hba, MEM_IOCB);
6250
6251 if (!iocbq) {
6252 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6253 "RQ ENTRY: %s: Out of IOCB "
6254 "resources. Dropping...",
6255 label);
6256
6257 goto done;
6258 }
6259
6260 seq_mp = NULL;
6261 if (fchdr.type != 0) {
6262 /* Allocate a buffer */
6263 seq_mp = (MATCHMAP *)emlxs_mem_get(hba, buf_type);
6264
6265 if (!seq_mp) {
6266 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6267 "RQ ENTRY: %s: Out of buffer "
6268 "resources. Dropping...",
6269 label);
6270
6271 goto done;
6272 }
6273
6274 iocbq->bp = (uint8_t *)seq_mp;
6275 }
6276
6277 node = (void *)emlxs_node_find_did(port, fchdr.s_id, 1);
6278 if (node == NULL) {
6279 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6280 "RQ ENTRY: %s: Node not found. sid=%x",
6281 label, fchdr.s_id);
6282 }
6283
6284 /* Initialize the iocbq */
6285 iocbq->port = port;
6286 iocbq->channel = cp;
6287 iocbq->node = node;
6288
6289 iocb = &iocbq->iocb;
6290 iocb->RXSEQCNT = 0;
6291 iocb->RXSEQLEN = 0;
6292
6293 seq_len = 0;
6294 seq_cnt = 0;
6295
6296 } else {
6297
6298 iocb = &iocbq->iocb;
6299 port = iocbq->port;
6300 node = (emlxs_node_t *)iocbq->node;
6301
6302 seq_mp = (MATCHMAP *)iocbq->bp;
6303 seq_len = iocb->RXSEQLEN;
6304 seq_cnt = iocb->RXSEQCNT;
6305
6306 /* Check sequence order */
6307 if (fchdr.seq_cnt != seq_cnt) {
6308 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6309 "RQ ENTRY: %s: Out of order frame received "
6310 "(%d != %d). Dropping...",
6311 label, fchdr.seq_cnt, seq_cnt);
6312
6313 goto done;
6314 }
6315 }
6316
6317 /* We now have an iocbq */
6318
6319 if (!port->vpip->vfip) {
6320 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6321 "RQ ENTRY: %s: No fabric connection. "
6322 "Dropping...",
6323 label);
6324
6325 goto done;
6326 }
6327
6328 /* Save the frame data to our seq buffer */
6329 if (data_size && seq_mp) {
6330 /* Get the next data rqb */
6331 data_mp = &data_rq->rqb[host_index];
6332
6333 offset = (off_t)((uint64_t)((unsigned long)
6334 data_mp->virt) -
6335 (uint64_t)((unsigned long)
6336 hba->sli.sli4.slim2.virt));
6337
6338 EMLXS_MPDATA_SYNC(data_mp->dma_handle, offset,
6339 data_size, DDI_DMA_SYNC_FORKERNEL);
6340
6341 data = (uint32_t *)data_mp->virt;
6342
6343 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6344 "RQ DAT[%d]: %08x %08x %08x %08x %08x %08x ...",
6345 host_index, data[0], data[1], data[2], data[3],
6346 data[4], data[5]);
6347
6348 /* Check sequence length */
6349 if ((seq_len + data_size) > seq_mp->size) {
6350 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6351 "RQ ENTRY: %s: Sequence buffer overflow. "
6352 "(%d > %d). Dropping...",
6353 label, (seq_len + data_size), seq_mp->size);
6354
6355 goto done;
6356 }
6357
6358 /* Copy data to local receive buffer */
6359 bcopy((uint8_t *)data, ((uint8_t *)seq_mp->virt +
6360 seq_len), data_size);
6361
6362 seq_len += data_size;
6363 }
6364
6365 /* If this is not the last frame of sequence, queue it. */
6366 if (!(fchdr.f_ctl & F_CTL_END_SEQ)) {
6367 /* Save sequence header */
6368 if (seq_cnt == 0) {
6369 bcopy((uint8_t *)&fchdr, (uint8_t *)iocb->RXFCHDR,
6370 sizeof (fc_frame_hdr_t));
6371 }
6372
6373 /* Update sequence info in iocb */
6374 iocb->RXSEQCNT = seq_cnt + 1;
6375 iocb->RXSEQLEN = seq_len;
6376
6377 /* Queue iocbq for next frame */
6378 emlxs_sli4_rxq_put(hba, iocbq);
6379
6380 /* Don't free resources */
6381 iocbq = NULL;
6382
6383 /* No need to abort */
6384 abort = 0;
6385
6386 goto done;
6387 }
6388
6389 emlxs_sli4_rq_post(port, hdr_rq->qid);
6390 posted = 1;
6391
6392 /* End of sequence found. Process request now. */
6393
6394 if (seq_cnt > 0) {
6395 /* Retrieve first frame of sequence */
6396 bcopy((uint8_t *)iocb->RXFCHDR, (uint8_t *)&fchdr,
6397 sizeof (fc_frame_hdr_t));
6398
6399 bzero((uint8_t *)iocb, sizeof (emlxs_iocb_t));
6400 }
6401
6402 /* Build rcv iocb and process it */
6403 switch (fchdr.type) {
6404 case 0: /* BLS */
6405
6406 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6407 "RQ ENTRY: %s: oxid:%x rxid %x sid:%x. Sending BLS ACC...",
6408 label, fchdr.ox_id, fchdr.rx_id, fchdr.s_id);
6409
6410 /* Try to send abort response */
6411 if (!(pkt = emlxs_pkt_alloc(port, 0, 0, 0, KM_NOSLEEP))) {
6412 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6413 "RQ ENTRY: %s: Unable to alloc pkt. Dropping...",
6414 label);
6415 goto done;
6416 }
6417
6418 /* Setup sbp / iocb for driver initiated cmd */
6419 sbp = PKT2PRIV(pkt);
6420
6421 /* Free the temporary iocbq */
6422 emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
6423
6424 iocbq = (emlxs_iocbq_t *)&sbp->iocbq;
6425 iocbq->port = port;
6426 iocbq->channel = cp;
6427 iocbq->node = node;
6428
6429 sbp->pkt_flags &= ~PACKET_ULP_OWNED;
6430
6431 if (node) {
6432 sbp->node = node;
6433 sbp->did = node->nlp_DID;
6434 }
6435
6436 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
6437
6438 /* BLS ACC Response */
6439 wqe = &iocbq->wqe;
6440 bzero((void *)wqe, sizeof (emlxs_wqe_t));
6441
6442 iocbq->iocb.ULPCOMMAND = CMD_XMIT_BLS_RSP64_CX;
6443 wqe->Command = CMD_XMIT_BLS_RSP64_CX;
6444 wqe->CmdType = WQE_TYPE_GEN;
6445
6446 wqe->un.BlsRsp.Payload0 = 0x80;
6447 wqe->un.BlsRsp.Payload1 = fchdr.seq_id;
6448
6449 wqe->un.BlsRsp.OXId = fchdr.ox_id;
6450 wqe->un.BlsRsp.RXId = fchdr.rx_id;
6451
6452 wqe->un.BlsRsp.SeqCntLow = 0;
6453 wqe->un.BlsRsp.SeqCntHigh = 0xFFFF;
6454
6455 wqe->un.BlsRsp.XO = ((fchdr.f_ctl & F_CTL_XCHG_CONTEXT)? 1:0);
6456 wqe->un.BlsRsp.AR = 0;
6457
6458 rpip = EMLXS_NODE_TO_RPI(port, node);
6459
6460 if (rpip) {
6461 wqe->ContextType = WQE_RPI_CONTEXT;
6462 wqe->ContextTag = rpip->RPI;
6463 } else {
6464 wqe->ContextType = WQE_VPI_CONTEXT;
6465 wqe->ContextTag = port->vpip->VPI;
6466
6467 rpip = emlxs_rpi_reserve_notify(port, fchdr.s_id, 0);
6468
6469 if (!rpip) {
6470 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6471 "RQ ENTRY: %s: Unable to alloc "
6472 "reserved RPI. Dropping...",
6473 label);
6474
6475 goto done;
6476 }
6477
6478 /* Store the reserved rpi */
6479 wqe->CmdSpecific = rpip->RPI;
6480
6481 wqe->un.BlsRsp.RemoteId = fchdr.s_id;
6482 wqe->un.BlsRsp.LocalId = fchdr.d_id;
6483 }
6484
6485 if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
6486 wqe->CCPE = 1;
6487 wqe->CCP = fchdr.rsvd;
6488 }
6489
6490 /* Allocate an exchange for this command */
6491 xrip = emlxs_sli4_alloc_xri(port, sbp, rpip,
6492 EMLXS_XRI_SOL_BLS_TYPE);
6493
6494 if (!xrip) {
6495 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6496 "RQ ENTRY: %s: Unable to alloc XRI. Dropping...",
6497 label);
6498 goto done;
6499 }
6500
6501 wqe->XRITag = xrip->XRI;
6502 wqe->Class = CLASS3;
6503 wqe->RequestTag = xrip->iotag;
6504 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
6505
6506 sbp->ticks = hba->timer_tics + 30;
6507
6508 emlxs_sli4_issue_iocb_cmd(hba, iocbq->channel, iocbq);
6509
6510 /* The temporary iocbq has been freed already */
6511 iocbq = NULL;
6512
6513 break;
6514
6515 case 1: /* ELS */
6516 cmd = *((uint32_t *)seq_mp->virt);
6517 cmd &= ELS_CMD_MASK;
6518
6519 if (!(port->vpip->flag & EMLXS_VPI_PORT_ENABLED)) {
6520 uint32_t dropit = 1;
6521
6522 /* Allow for P2P handshaking */
6523 switch (cmd) {
6524 case ELS_CMD_FLOGI:
6525 dropit = 0;
6526 break;
6527
6528 case ELS_CMD_PLOGI:
6529 case ELS_CMD_PRLI:
6530 if (hba->flag & FC_PT_TO_PT) {
6531 dropit = 0;
6532 }
6533 break;
6534 }
6535
6536 if (dropit) {
6537 EMLXS_MSGF(EMLXS_CONTEXT,
6538 &emlxs_sli_detail_msg,
6539 "RQ ENTRY: %s: Port not yet enabled. "
6540 "Dropping...",
6541 label);
6542 goto done;
6543 }
6544 }
6545
6546 rpip = NULL;
6547
6548 if (cmd != ELS_CMD_LOGO) {
6549 rpip = EMLXS_NODE_TO_RPI(port, node);
6550 }
6551
6552 if (!rpip) {
6553 /* Use the fabric rpi */
6554 rpip = port->vpip->fabric_rpip;
6555 }
6556
6557 xrip = emlxs_sli4_reserve_xri(port, rpip,
6558 EMLXS_XRI_UNSOL_ELS_TYPE, fchdr.ox_id);
6559
6560 if (!xrip) {
6561 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6562 "RQ ENTRY: %s: Out of exchange "
6563 "resources. Dropping...",
6564 label);
6565
6566 goto done;
6567 }
6568
6569 /* Build CMD_RCV_ELS64_CX */
6570 iocb->un.rcvels64.elsReq.tus.f.bdeFlags = 0;
6571 iocb->un.rcvels64.elsReq.tus.f.bdeSize = seq_len;
6572 iocb->un.rcvels64.elsReq.addrLow = PADDR_LO(seq_mp->phys);
6573 iocb->un.rcvels64.elsReq.addrHigh = PADDR_HI(seq_mp->phys);
6574 iocb->ULPBDECOUNT = 1;
6575
6576 iocb->un.rcvels64.remoteID = fchdr.s_id;
6577 iocb->un.rcvels64.parmRo = fchdr.d_id;
6578
6579 iocb->ULPPU = 0x3;
6580 iocb->ULPCONTEXT = xrip->XRI;
6581 iocb->ULPIOTAG = ((node)? node->nlp_Rpi:0);
6582 iocb->ULPCLASS = CLASS3;
6583 iocb->ULPCOMMAND = CMD_RCV_ELS64_CX;
6584
6585 iocb->unsli3.ext_rcv.seq_len = seq_len;
6586 iocb->unsli3.ext_rcv.vpi = port->vpip->VPI;
6587 iocb->unsli3.ext_rcv.oxid = fchdr.ox_id;
6588
6589 if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
6590 iocb->unsli3.ext_rcv.ccpe = 1;
6591 iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
6592 }
6593
6594 if (port->mode == MODE_INITIATOR) {
6595 (void) emlxs_els_handle_unsol_req(port, iocbq->channel,
6596 iocbq, seq_mp, seq_len);
6597 }
6598 #ifdef SFCT_SUPPORT
6599 else if (port->mode == MODE_TARGET) {
6600 (void) emlxs_fct_handle_unsol_els(port, iocbq->channel,
6601 iocbq, seq_mp, seq_len);
6602 }
6603 #endif /* SFCT_SUPPORT */
6604 break;
6605
6606 #ifdef SFCT_SUPPORT
6607 case 8: /* FCT */
6608 if (!(port->VPIobj.flag & EMLXS_VPI_PORT_ENABLED)) {
6609 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6610 "RQ ENTRY: %s: Port not yet enabled. "
6611 "Dropping...",
6612 label);
6613
6614 goto done;
6615 }
6616
6617 rpip = EMLXS_NODE_TO_RPI(port, node);
6618
6619 if (!rpip) {
6620 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6621 "RQ ENTRY: %s: Port not logged in. "
6622 "Dropping...",
6623 label);
6624
6625 goto done;
6626 }
6627
6628 xrip = emlxs_sli4_reserve_xri(port, rpip,
6629 EMLXS_XRI_UNSOL_FCP_TYPE, fchdr.ox_id);
6630
6631 if (!xrip) {
6632 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6633 "RQ ENTRY: %s: Out of exchange "
6634 "resources. Dropping...",
6635 label);
6636
6637 goto done;
6638 }
6639
6640 /* Build CMD_RCV_SEQUENCE64_CX */
6641 iocb->un.rcvseq64.rcvBde.tus.f.bdeFlags = 0;
6642 iocb->un.rcvseq64.rcvBde.tus.f.bdeSize = seq_len;
6643 iocb->un.rcvseq64.rcvBde.addrLow = PADDR_LO(seq_mp->phys);
6644 iocb->un.rcvseq64.rcvBde.addrHigh = PADDR_HI(seq_mp->phys);
6645 iocb->ULPBDECOUNT = 1;
6646
6647 iocb->ULPPU = 0x3;
6648 iocb->ULPCONTEXT = xrip->XRI;
6649 iocb->ULPIOTAG = ((node)? node->nlp_Rpi:0);
6650 iocb->ULPCLASS = CLASS3;
6651 iocb->ULPCOMMAND = CMD_RCV_ELS64_CX;
6652
6653 iocb->unsli3.ext_rcv.seq_len = seq_len;
6654 iocb->unsli3.ext_rcv.vpi = port->VPIobj.VPI;
6655 iocb->unsli3.ext_rcv.oxid = fchdr.ox_id;
6656
6657 if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
6658 iocb->unsli3.ext_rcv.ccpe = 1;
6659 iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
6660 }
6661
6662 /* pass xrip to FCT in the iocbq */
6663 iocbq->sbp = xrip;
6664
6665 (void) emlxs_fct_handle_unsol_req(port, cp, iocbq,
6666 seq_mp, seq_len);
6667 break;
6668 #endif /* SFCT_SUPPORT */
6669
6670 case 0x20: /* CT */
6671 if (!(port->vpip->flag & EMLXS_VPI_PORT_ENABLED) &&
6672 !(hba->flag & FC_LOOPBACK_MODE)) {
6673 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6674 "RQ ENTRY: %s: Port not yet enabled. "
6675 "Dropping...",
6676 label);
6677
6678 goto done;
6679 }
6680
6681 if (!node) {
6682 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6683 "RQ ENTRY: %s: Node not found (did=%x). "
6684 "Dropping...",
6685 label, fchdr.d_id);
6686
6687 goto done;
6688 }
6689
6690 rpip = EMLXS_NODE_TO_RPI(port, node);
6691
6692 if (!rpip) {
6693 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6694 "RQ ENTRY: %s: RPI not found (did=%x rpi=%d). "
6695 "Dropping...",
6696 label, fchdr.d_id, node->nlp_Rpi);
6697
6698 goto done;
6699 }
6700
6701 xrip = emlxs_sli4_reserve_xri(port, rpip,
6702 EMLXS_XRI_UNSOL_CT_TYPE, fchdr.ox_id);
6703
6704 if (!xrip) {
6705 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6706 "RQ ENTRY: %s: Out of exchange "
6707 "resources. Dropping...",
6708 label);
6709
6710 goto done;
6711 }
6712
6713 /* Build CMD_RCV_SEQ64_CX */
6714 iocb->un.rcvseq64.rcvBde.tus.f.bdeFlags = 0;
6715 iocb->un.rcvseq64.rcvBde.tus.f.bdeSize = seq_len;
6716 iocb->un.rcvseq64.rcvBde.addrLow = PADDR_LO(seq_mp->phys);
6717 iocb->un.rcvseq64.rcvBde.addrHigh = PADDR_HI(seq_mp->phys);
6718 iocb->ULPBDECOUNT = 1;
6719
6720 iocb->un.rcvseq64.xrsqRo = 0;
6721 iocb->un.rcvseq64.w5.hcsw.Rctl = fchdr.r_ctl;
6722 iocb->un.rcvseq64.w5.hcsw.Type = fchdr.type;
6723 iocb->un.rcvseq64.w5.hcsw.Dfctl = fchdr.df_ctl;
6724 iocb->un.rcvseq64.w5.hcsw.Fctl = fchdr.f_ctl;
6725
6726 iocb->ULPPU = 0x3;
6727 iocb->ULPCONTEXT = xrip->XRI;
6728 iocb->ULPIOTAG = rpip->RPI;
6729 iocb->ULPCLASS = CLASS3;
6730 iocb->ULPCOMMAND = CMD_RCV_SEQ64_CX;
6731
6732 iocb->unsli3.ext_rcv.seq_len = seq_len;
6733 iocb->unsli3.ext_rcv.vpi = port->vpip->VPI;
6734
6735 if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
6736 iocb->unsli3.ext_rcv.ccpe = 1;
6737 iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
6738 }
6739
6740 (void) emlxs_ct_handle_unsol_req(port, iocbq->channel,
6741 iocbq, seq_mp, seq_len);
6742
6743 break;
6744 }
6745
6746 /* Sequence handled, no need to abort */
6747 abort = 0;
6748
6749 done:
6750
6751 if (!posted) {
6752 emlxs_sli4_rq_post(port, hdr_rq->qid);
6753 }
6754
6755 if (abort) {
6756 /* Send ABTS for this exchange */
6757 /* !!! Currently, we have no implementation for this !!! */
6758 abort = 0;
6759 }
6760
6761 /* Return memory resources to pools */
6762 if (iocbq) {
6763 if (iocbq->bp) {
6764 emlxs_mem_put(hba, buf_type, (void *)iocbq->bp);
6765 iocbq->bp = 0;
6766 }
6767
6768 emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
6769 }
6770
6771 #ifdef FMA_SUPPORT
6772 if (emlxs_fm_check_dma_handle(hba,
6773 hba->sli.sli4.slim2.dma_handle)
6774 != DDI_FM_OK) {
6775 EMLXS_MSGF(EMLXS_CONTEXT,
6776 &emlxs_invalid_dma_handle_msg,
6777 "sli4_process_unsol_rcv: hdl=%p",
6778 hba->sli.sli4.slim2.dma_handle);
6779
6780 emlxs_thread_spawn(hba, emlxs_restart_thread,
6781 0, 0);
6782 }
6783 #endif
6784 return;
6785
6786 } /* emlxs_sli4_process_unsol_rcv() */
6787
6788
6789 /*ARGSUSED*/
6790 static void
6791 emlxs_sli4_process_xri_aborted(emlxs_hba_t *hba, CQ_DESC_t *cq,
6792 CQE_XRI_Abort_t *cqe)
6793 {
6794 emlxs_port_t *port = &PPORT;
6795 XRIobj_t *xrip;
6796
6797 mutex_enter(&EMLXS_FCTAB_LOCK);
6798
6799 xrip = emlxs_sli4_find_xri(port, cqe->XRI);
6800 if (xrip == NULL) {
6801 /* EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg, */
6802 /* "CQ ENTRY: process xri aborted ignored"); */
6803
6804 mutex_exit(&EMLXS_FCTAB_LOCK);
6805 return;
6806 }
6807
6808 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6809 "CQ ENTRY: XRI Aborted: xri=%d IA=%d EO=%d BR=%d",
6810 cqe->XRI, cqe->IA, cqe->EO, cqe->BR);
6811
6812 if (!(xrip->flag & EMLXS_XRI_BUSY)) {
6813 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6814 "CQ ENTRY: XRI Aborted: xri=%d flag=%x. Bad state.",
6815 xrip->XRI, xrip->flag);
6816
6817 mutex_exit(&EMLXS_FCTAB_LOCK);
6818 return;
6819 }
6820
6821 /* Exchange is no longer busy on-chip, free it */
6822 emlxs_sli4_free_xri(port, 0, xrip, 0);
6823
6824 mutex_exit(&EMLXS_FCTAB_LOCK);
6825
6826 return;
6827
6828 } /* emlxs_sli4_process_xri_aborted () */
6829
6830
6831 /*ARGSUSED*/
6832 static void
6833 emlxs_sli4_process_cq(emlxs_hba_t *hba, CQ_DESC_t *cq)
6834 {
6835 emlxs_port_t *port = &PPORT;
6836 CQE_u *cqe;
6837 CQE_u cq_entry;
6838 uint32_t cqdb;
6839 int num_entries = 0;
6840 off_t offset;
6841
6842 /* EMLXS_PORT_LOCK must be held when entering this routine */
6843
6844 cqe = (CQE_u *)cq->addr.virt;
6845 cqe += cq->host_index;
6846
6847 offset = (off_t)((uint64_t)((unsigned long)
6848 cq->addr.virt) -
6849 (uint64_t)((unsigned long)
6850 hba->sli.sli4.slim2.virt));
6851
6852 EMLXS_MPDATA_SYNC(cq->addr.dma_handle, offset,
6853 4096, DDI_DMA_SYNC_FORKERNEL);
6854
6855 for (;;) {
6856 cq_entry.word[3] = BE_SWAP32(cqe->word[3]);
6857 if (!(cq_entry.word[3] & CQE_VALID)) {
6858 break;
6859 }
6860
6861 cq_entry.word[2] = BE_SWAP32(cqe->word[2]);
6862 cq_entry.word[1] = BE_SWAP32(cqe->word[1]);
6863 cq_entry.word[0] = BE_SWAP32(cqe->word[0]);
6864
6865 #ifdef DEBUG_CQE
6866 emlxs_data_dump(port, "CQE", (uint32_t *)cqe, 6, 0);
6867 #endif /* DEBUG_CQE */
6868 num_entries++;
6869 cqe->word[3] = 0;
6870
6871 cq->host_index++;
6872 if (cq->host_index >= cq->max_index) {
6873 cq->host_index = 0;
6874 cqe = (CQE_u *)cq->addr.virt;
6875 } else {
6876 cqe++;
6877 }
6878 mutex_exit(&EMLXS_PORT_LOCK);
6879
6880 /* Now handle specific cq type */
6881 if (cq->type == EMLXS_CQ_TYPE_GROUP1) {
6882 if (cq_entry.cqAsyncEntry.async_evt) {
6883 emlxs_sli4_process_async_event(hba,
6884 (CQE_ASYNC_t *)&cq_entry);
6885 } else {
6886 emlxs_sli4_process_mbox_event(hba,
6887 (CQE_MBOX_t *)&cq_entry);
6888 }
6889 } else { /* EMLXS_CQ_TYPE_GROUP2 */
6890 switch (cq_entry.cqCmplEntry.Code) {
6891 case CQE_TYPE_WQ_COMPLETION:
6892 if (cq_entry.cqCmplEntry.RequestTag <
6893 hba->max_iotag) {
6894 emlxs_sli4_process_wqe_cmpl(hba, cq,
6895 (CQE_CmplWQ_t *)&cq_entry);
6896 } else {
6897 emlxs_sli4_process_oor_wqe_cmpl(hba, cq,
6898 (CQE_CmplWQ_t *)&cq_entry);
6899 }
6900 break;
6901 case CQE_TYPE_RELEASE_WQE:
6902 emlxs_sli4_process_release_wqe(hba, cq,
6903 (CQE_RelWQ_t *)&cq_entry);
6904 break;
6905 case CQE_TYPE_UNSOL_RCV:
6906 case CQE_TYPE_UNSOL_RCV_V1:
6907 emlxs_sli4_process_unsol_rcv(hba, cq,
6908 (CQE_UnsolRcv_t *)&cq_entry);
6909 break;
6910 case CQE_TYPE_XRI_ABORTED:
6911 emlxs_sli4_process_xri_aborted(hba, cq,
6912 (CQE_XRI_Abort_t *)&cq_entry);
6913 break;
6914 default:
6915 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6916 "Invalid CQ entry %d: %08x %08x %08x %08x",
6917 cq_entry.cqCmplEntry.Code, cq_entry.word[0],
6918 cq_entry.word[1], cq_entry.word[2],
6919 cq_entry.word[3]);
6920 break;
6921 }
6922 }
6923
6924 mutex_enter(&EMLXS_PORT_LOCK);
6925 }
6926
6927 /* Number of times this routine gets called for this CQ */
6928 cq->isr_count++;
6929
6930 /* num_entries is the number of CQEs we process in this specific CQ */
6931 cq->num_proc += num_entries;
6932 if (cq->max_proc < num_entries)
6933 cq->max_proc = num_entries;
6934
6935 cqdb = cq->qid;
6936 cqdb |= CQ_DB_REARM;
6937 if (num_entries != 0) {
6938 cqdb |= ((num_entries << CQ_DB_POP_SHIFT) & CQ_DB_POP_MASK);
6939 }
6940
6941 #ifdef DEBUG_FASTPATH
6942 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6943 "CQE: CLEAR cqdb=%08x: pops=%d", cqdb, num_entries);
6944 #endif /* DEBUG_FASTPATH */
6945
6946 emlxs_sli4_write_cqdb(hba, cqdb);
6947
6948 /* EMLXS_PORT_LOCK must be held when exiting this routine */
6949
6950 } /* emlxs_sli4_process_cq() */
6951
6952
6953 /*ARGSUSED*/
6954 static void
6955 emlxs_sli4_process_eq(emlxs_hba_t *hba, EQ_DESC_t *eq)
6956 {
6957 emlxs_port_t *port = &PPORT;
6958 uint32_t eqdb;
6959 uint32_t *ptr;
6960 CHANNEL *cp;
6961 EQE_u eqe;
6962 uint32_t i;
6963 uint16_t cqi;
6964 int num_entries = 0;
6965 off_t offset;
6966
6967 /* EMLXS_PORT_LOCK must be held when entering this routine */
6968
6969 hba->intr_busy_cnt ++;
6970
6971 ptr = eq->addr.virt;
6972 ptr += eq->host_index;
6973
6974 offset = (off_t)((uint64_t)((unsigned long)
6975 eq->addr.virt) -
6976 (uint64_t)((unsigned long)
6977 hba->sli.sli4.slim2.virt));
6978
6979 EMLXS_MPDATA_SYNC(eq->addr.dma_handle, offset,
6980 4096, DDI_DMA_SYNC_FORKERNEL);
6981
6982 for (;;) {
6983 eqe.word = *ptr;
6984 eqe.word = BE_SWAP32(eqe.word);
6985
6986 if (!(eqe.word & EQE_VALID)) {
6987 break;
6988 }
6989
6990 #ifdef DEBUG_FASTPATH
6991 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6992 "EQE00: %08x", eqe.word);
6993 #endif /* DEBUG_FASTPATH */
6994
6995 *ptr = 0;
6996 num_entries++;
6997 eq->host_index++;
6998 if (eq->host_index >= eq->max_index) {
6999 eq->host_index = 0;
7000 ptr = eq->addr.virt;
7001 } else {
7002 ptr++;
7003 }
7004
7005 cqi = emlxs_sli4_cqid_to_index(hba, eqe.entry.CQId);
7006
7007 /* Verify CQ index */
7008 if (cqi == 0xffff) {
7009 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
7010 "EQE: Invalid CQid: %d. Dropping...",
7011 eqe.entry.CQId);
7012 continue;
7013 }
7014
7015 #ifdef DEBUG_FASTPATH
7016 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7017 "EQE: CQIndex:%x cqid:%x", cqi, eqe.entry.CQId);
7018 #endif /* DEBUG_FASTPATH */
7019
7020 emlxs_sli4_process_cq(hba, &hba->sli.sli4.cq[cqi]);
7021 }
7022
7023 /* Number of times the ISR for this EQ gets called */
7024 eq->isr_count++;
7025
7026 /* num_entries is the number of EQEs we process in this specific ISR */
7027 eq->num_proc += num_entries;
7028 if (eq->max_proc < num_entries) {
7029 eq->max_proc = num_entries;
7030 }
7031
7032 eqdb = eq->qid;
7033 eqdb |= (EQ_DB_CLEAR | EQ_DB_EVENT | EQ_DB_REARM);
7034
7035 #ifdef DEBUG_FASTPATH
7036 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7037 "EQE: CLEAR eqdb=%08x pops=%d", eqdb, num_entries);
7038 #endif /* DEBUG_FASTPATH */
7039
7040 if (num_entries != 0) {
7041 eqdb |= ((num_entries << EQ_DB_POP_SHIFT) & EQ_DB_POP_MASK);
7042 for (i = 0; i < hba->chan_count; i++) {
7043 cp = &hba->chan[i];
7044 if (cp->chan_flag & EMLXS_NEEDS_TRIGGER) {
7045 cp->chan_flag &= ~EMLXS_NEEDS_TRIGGER;
7046 emlxs_thread_trigger2(&cp->intr_thread,
7047 emlxs_proc_channel, cp);
7048 }
7049 }
7050 }
7051
7052 emlxs_sli4_write_cqdb(hba, eqdb);
7053
7054 /* EMLXS_PORT_LOCK must be held when exiting this routine */
7055
7056 hba->intr_busy_cnt --;
7057
7058 } /* emlxs_sli4_process_eq() */
7059
7060
7061 #ifdef MSI_SUPPORT
7062 /*ARGSUSED*/
7063 static uint32_t
7064 emlxs_sli4_msi_intr(char *arg1, char *arg2)
7065 {
7066 emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
7067 #ifdef DEBUG_FASTPATH
7068 emlxs_port_t *port = &PPORT;
7069 #endif /* DEBUG_FASTPATH */
7070 uint16_t msgid;
7071 int rc;
7072
7073 #ifdef DEBUG_FASTPATH
7074 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7075 "msiINTR arg1:%p arg2:%p", arg1, arg2);
7076 #endif /* DEBUG_FASTPATH */
7077
7078 /* Check for legacy interrupt handling */
7079 if (hba->intr_type == DDI_INTR_TYPE_FIXED) {
7080 rc = emlxs_sli4_intx_intr(arg1);
7081 return (rc);
7082 }
7083
7084 /* Get MSI message id */
7085 msgid = (uint16_t)((unsigned long)arg2);
7086
7087 /* Validate the message id */
7088 if (msgid >= hba->intr_count) {
7089 msgid = 0;
7090 }
7091 mutex_enter(&EMLXS_PORT_LOCK);
7092
7093 if ((hba->state == FC_KILLED) || (hba->flag & FC_OFFLINE_MODE)) {
7094 mutex_exit(&EMLXS_PORT_LOCK);
7095 return (DDI_INTR_UNCLAIMED);
7096 }
7097
7098 /* The eq[] index == the MSI vector number */
7099 emlxs_sli4_process_eq(hba, &hba->sli.sli4.eq[msgid]);
7100
7101 mutex_exit(&EMLXS_PORT_LOCK);
7102 return (DDI_INTR_CLAIMED);
7103
7104 } /* emlxs_sli4_msi_intr() */
7105 #endif /* MSI_SUPPORT */
7106
7107
7108 /*ARGSUSED*/
7109 static int
7110 emlxs_sli4_intx_intr(char *arg)
7111 {
7112 emlxs_hba_t *hba = (emlxs_hba_t *)arg;
7113 #ifdef DEBUG_FASTPATH
7114 emlxs_port_t *port = &PPORT;
7115 #endif /* DEBUG_FASTPATH */
7116
7117 #ifdef DEBUG_FASTPATH
7118 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7119 "intxINTR arg:%p", arg);
7120 #endif /* DEBUG_FASTPATH */
7121
7122 mutex_enter(&EMLXS_PORT_LOCK);
7123
7124 if ((hba->state == FC_KILLED) || (hba->flag & FC_OFFLINE_MODE)) {
7125 mutex_exit(&EMLXS_PORT_LOCK);
7126 return (DDI_INTR_UNCLAIMED);
7127 }
7128
7129 emlxs_sli4_process_eq(hba, &hba->sli.sli4.eq[0]);
7130
7131 mutex_exit(&EMLXS_PORT_LOCK);
7132 return (DDI_INTR_CLAIMED);
7133 } /* emlxs_sli4_intx_intr() */
7134
7135
7136 static void
7137 emlxs_sli4_hba_kill(emlxs_hba_t *hba)
7138 {
7139 emlxs_port_t *port = &PPORT;
7140 uint32_t j;
7141
7142 mutex_enter(&EMLXS_PORT_LOCK);
7143 if (hba->flag & FC_INTERLOCKED) {
7144 EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
7145
7146 mutex_exit(&EMLXS_PORT_LOCK);
7147
7148 return;
7149 }
7150
7151 j = 0;
7152 while (j++ < 10000) {
7153 if ((hba->mbox_queue_flag == 0) &&
7154 (hba->intr_busy_cnt == 0)) {
7155 break;
7156 }
7157
7158 mutex_exit(&EMLXS_PORT_LOCK);
7159 BUSYWAIT_US(100);
7160 mutex_enter(&EMLXS_PORT_LOCK);
7161 }
7162
7163 if ((hba->mbox_queue_flag != 0) || (hba->intr_busy_cnt > 0)) {
7164 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
7165 "Board kill failed. Adapter busy, %d, %d.",
7166 hba->mbox_queue_flag, hba->intr_busy_cnt);
7167 mutex_exit(&EMLXS_PORT_LOCK);
7168 return;
7169 }
7170
7171 hba->flag |= FC_INTERLOCKED;
7172
7173 EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
7174
7175 mutex_exit(&EMLXS_PORT_LOCK);
7176
7177 } /* emlxs_sli4_hba_kill() */
7178
7179
7180 extern void
7181 emlxs_sli4_hba_reset_all(emlxs_hba_t *hba, uint32_t flag)
7182 {
7183 emlxs_port_t *port = &PPORT;
7184 uint32_t value;
7185
7186 mutex_enter(&EMLXS_PORT_LOCK);
7187
7188 if ((hba->sli_intf & SLI_INTF_IF_TYPE_MASK) != SLI_INTF_IF_TYPE_2) {
7189 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
7190 "Reset All failed. Invalid Operation.");
7191 mutex_exit(&EMLXS_PORT_LOCK);
7192 return;
7193 }
7194
7195 /* Issue a Firmware Reset All Request */
7196 if (flag) {
7197 value = SLI_PHYDEV_FRST | SLI_PHYDEV_FRL_ALL | SLI_PHYDEV_DD;
7198 } else {
7199 value = SLI_PHYDEV_FRST | SLI_PHYDEV_FRL_ALL;
7200 }
7201
7202 ddi_put32(hba->sli.sli4.bar0_acc_handle,
7203 hba->sli.sli4.PHYSDEV_reg_addr, value);
7204
7205 mutex_exit(&EMLXS_PORT_LOCK);
7206
7207 } /* emlxs_sli4_hba_reset_all() */
7208
7209
7210 static void
7211 emlxs_sli4_enable_intr(emlxs_hba_t *hba)
7212 {
7213 emlxs_config_t *cfg = &CFG;
7214 int i;
7215 int num_cq;
7216 uint32_t data;
7217
7218 hba->sli.sli4.flag |= EMLXS_SLI4_INTR_ENABLED;
7219
7220 num_cq = (hba->intr_count * cfg[CFG_NUM_WQ].current) +
7221 EMLXS_CQ_OFFSET_WQ;
7222
7223 /* ARM EQ / CQs */
7224 for (i = 0; i < num_cq; i++) {
7225 data = hba->sli.sli4.cq[i].qid;
7226 data |= CQ_DB_REARM;
7227 emlxs_sli4_write_cqdb(hba, data);
7228 }
7229 for (i = 0; i < hba->intr_count; i++) {
7230 data = hba->sli.sli4.eq[i].qid;
7231 data |= (EQ_DB_REARM | EQ_DB_EVENT);
7232 emlxs_sli4_write_cqdb(hba, data);
7233 }
7234 } /* emlxs_sli4_enable_intr() */
7235
7236
7237 static void
7238 emlxs_sli4_disable_intr(emlxs_hba_t *hba, uint32_t att)
7239 {
7240 if (att) {
7241 return;
7242 }
7243
7244 hba->sli.sli4.flag &= ~EMLXS_SLI4_INTR_ENABLED;
7245
7246 /* Short of reset, we cannot disable interrupts */
7247 } /* emlxs_sli4_disable_intr() */
7248
7249 static void
7250 emlxs_sli4_resource_free(emlxs_hba_t *hba)
7251 {
7252 emlxs_port_t *port = &PPORT;
7253 MBUF_INFO *buf_info;
7254 uint32_t i;
7255
7256 buf_info = &hba->sli.sli4.slim2;
7257 if (buf_info->virt == 0) {
7258 /* Already free */
7259 return;
7260 }
7261
7262 emlxs_fcf_fini(hba);
7263
7264 buf_info = &hba->sli.sli4.HeaderTmplate;
7265 if (buf_info->virt) {
7266 bzero(buf_info, sizeof (MBUF_INFO));
7267 }
7268
7269 if (hba->sli.sli4.XRIp) {
7270 XRIobj_t *xrip;
7271
7272 if ((hba->sli.sli4.XRIinuse_f !=
7273 (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) ||
7274 (hba->sli.sli4.XRIinuse_b !=
7275 (XRIobj_t *)&hba->sli.sli4.XRIinuse_f)) {
7276 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
7277 "XRIs in use during free!: %p %p != %p\n",
7278 hba->sli.sli4.XRIinuse_f,
7279 hba->sli.sli4.XRIinuse_b,
7280 &hba->sli.sli4.XRIinuse_f);
7281 }
7282
7283 xrip = hba->sli.sli4.XRIp;
7284 for (i = 0; i < hba->sli.sli4.XRICount; i++) {
7285 xrip->XRI = emlxs_sli4_index_to_xri(hba, i);
7286
7287 if (xrip->XRI != 0)
7288 emlxs_mem_put(hba, xrip->SGSeg, xrip->SGList);
7289
7290 xrip++;
7291 }
7292
7293 kmem_free(hba->sli.sli4.XRIp,
7294 (sizeof (XRIobj_t) * hba->sli.sli4.XRICount));
7295 hba->sli.sli4.XRIp = NULL;
7296
7297 hba->sli.sli4.XRIfree_f =
7298 (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7299 hba->sli.sli4.XRIfree_b =
7300 (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7301 hba->sli.sli4.xrif_count = 0;
7302 }
7303
7304 for (i = 0; i < hba->intr_count; i++) {
7305 mutex_destroy(&hba->sli.sli4.eq[i].lastwq_lock);
7306 bzero(&hba->sli.sli4.eq[i], sizeof (EQ_DESC_t));
7307 hba->sli.sli4.eq[i].qid = 0xffff;
7308 }
7309 for (i = 0; i < EMLXS_MAX_CQS; i++) {
7310 bzero(&hba->sli.sli4.cq[i], sizeof (CQ_DESC_t));
7311 hba->sli.sli4.cq[i].qid = 0xffff;
7312 }
7313 for (i = 0; i < EMLXS_MAX_WQS; i++) {
7314 bzero(&hba->sli.sli4.wq[i], sizeof (WQ_DESC_t));
7315 hba->sli.sli4.wq[i].qid = 0xffff;
7316 }
7317 for (i = 0; i < EMLXS_MAX_RXQS; i++) {
7318 mutex_destroy(&hba->sli.sli4.rxq[i].lock);
7319 bzero(&hba->sli.sli4.rxq[i], sizeof (RXQ_DESC_t));
7320 }
7321 for (i = 0; i < EMLXS_MAX_RQS; i++) {
7322 mutex_destroy(&hba->sli.sli4.rq[i].lock);
7323 bzero(&hba->sli.sli4.rq[i], sizeof (RQ_DESC_t));
7324 hba->sli.sli4.rq[i].qid = 0xffff;
7325 }
7326
7327 /* Free the MQ */
7328 bzero(&hba->sli.sli4.mq, sizeof (MQ_DESC_t));
7329
7330 buf_info = &hba->sli.sli4.slim2;
7331 if (buf_info->virt) {
7332 buf_info->flags = FC_MBUF_DMA;
7333 emlxs_mem_free(hba, buf_info);
7334 bzero(buf_info, sizeof (MBUF_INFO));
7335 }
7336
7337 /* GPIO lock */
7338 if (hba->model_info.flags & EMLXS_GPIO_LEDS)
7339 mutex_destroy(&hba->gpio_lock);
7340
7341 } /* emlxs_sli4_resource_free() */
7342
7343 static int
7344 emlxs_sli4_resource_alloc(emlxs_hba_t *hba)
7345 {
7346 emlxs_port_t *port = &PPORT;
7347 emlxs_config_t *cfg = &CFG;
7348 MBUF_INFO *buf_info;
7349 int num_eq;
7350 int num_wq;
7351 uint16_t i;
7352 uint32_t j;
7353 uint32_t k;
7354 uint16_t cq_depth;
7355 uint32_t cq_size;
7356 uint32_t word;
7357 XRIobj_t *xrip;
7358 RQE_t *rqe;
7359 MBUF_INFO *rqb;
7360 uint64_t phys;
7361 uint64_t tmp_phys;
7362 char *virt;
7363 char *tmp_virt;
7364 void *data_handle;
7365 void *dma_handle;
7366 int32_t size;
7367 off_t offset;
7368 uint32_t count = 0;
7369 uint32_t hddr_size = 0;
7370 uint32_t align;
7371 uint32_t iotag;
7372
7373 buf_info = &hba->sli.sli4.slim2;
7374 if (buf_info->virt) {
7375 /* Already allocated */
7376 return (0);
7377 }
7378
7379 emlxs_fcf_init(hba);
7380
7381 switch (hba->sli.sli4.param.CQV) {
7382 case 0:
7383 cq_depth = CQ_DEPTH;
7384 break;
7385 case 2:
7386 default:
7387 cq_depth = CQ_DEPTH_V2;
7388 break;
7389 }
7390 cq_size = (cq_depth * CQE_SIZE);
7391
7392 /* EQs - 1 per Interrupt vector */
7393 num_eq = hba->intr_count;
7394
7395 /* CQs - number of WQs + 1 for RQs + 1 for mbox/async events */
7396 num_wq = cfg[CFG_NUM_WQ].current * num_eq;
7397
7398 /* Calculate total dmable memory we need */
7399 /* WARNING: make sure each section is aligned on 4K boundary */
7400
7401 /* EQ */
7402 count += num_eq * 4096;
7403
7404 /* CQ */
7405 count += (num_wq + EMLXS_CQ_OFFSET_WQ) * cq_size;
7406
7407 /* WQ */
7408 count += num_wq * (4096 * EMLXS_NUM_WQ_PAGES);
7409
7410 /* MQ */
7411 count += EMLXS_MAX_MQS * 4096;
7412
7413 /* RQ */
7414 count += EMLXS_MAX_RQS * 4096;
7415
7416 /* RQB/E */
7417 count += RQB_COUNT * (RQB_DATA_SIZE + RQB_HEADER_SIZE);
7418 count += (4096 - (count%4096)); /* Ensure 4K alignment */
7419
7420 /* RPI Header Templates */
7421 if (hba->sli.sli4.param.HDRR) {
7422 /* Bytes per extent */
7423 j = hba->sli.sli4.RPIExtSize * sizeof (RPIHdrTmplate_t);
7424
7425 /* Pages required per extent (page == 4096 bytes) */
7426 k = (j/4096) + ((j%4096)? 1:0);
7427
7428 /* Total size */
7429 hddr_size = (k * hba->sli.sli4.RPIExtCount * 4096);
7430
7431 count += hddr_size;
7432 }
7433
7434 /* Allocate slim2 for SLI4 */
7435 buf_info = &hba->sli.sli4.slim2;
7436 buf_info->size = count;
7437 buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
7438 buf_info->align = ddi_ptob(hba->dip, 1L);
7439
7440 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
7441 "Allocating memory for slim2: %d", count);
7442
7443 (void) emlxs_mem_alloc(hba, buf_info);
7444
7445 if (buf_info->virt == NULL) {
7446 EMLXS_MSGF(EMLXS_CONTEXT,
7447 &emlxs_init_failed_msg,
7448 "Unable to allocate internal memory for SLI4: %d",
7449 count);
7450 goto failed;
7451 }
7452 bzero(buf_info->virt, buf_info->size);
7453 EMLXS_MPDATA_SYNC(buf_info->dma_handle, 0,
7454 buf_info->size, DDI_DMA_SYNC_FORDEV);
7455
7456 /* Assign memory to Head Template, EQ, CQ, WQ, RQ and MQ */
7457 data_handle = buf_info->data_handle;
7458 dma_handle = buf_info->dma_handle;
7459 phys = buf_info->phys;
7460 virt = (char *)buf_info->virt;
7461
7462 /* Allocate space for queues */
7463
7464 /* EQ */
7465 size = 4096;
7466 for (i = 0; i < num_eq; i++) {
7467 bzero(&hba->sli.sli4.eq[i], sizeof (EQ_DESC_t));
7468
7469 buf_info = &hba->sli.sli4.eq[i].addr;
7470 buf_info->size = size;
7471 buf_info->flags =
7472 FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
7473 buf_info->align = ddi_ptob(hba->dip, 1L);
7474 buf_info->phys = phys;
7475 buf_info->virt = (void *)virt;
7476 buf_info->data_handle = data_handle;
7477 buf_info->dma_handle = dma_handle;
7478
7479 phys += size;
7480 virt += size;
7481
7482 hba->sli.sli4.eq[i].max_index = EQ_DEPTH;
7483 hba->sli.sli4.eq[i].qid = 0xffff;
7484
7485 mutex_init(&hba->sli.sli4.eq[i].lastwq_lock, NULL,
7486 MUTEX_DRIVER, NULL);
7487 }
7488
7489
7490 /* CQ */
7491 for (i = 0; i < (num_wq + EMLXS_CQ_OFFSET_WQ); i++) {
7492 bzero(&hba->sli.sli4.cq[i], sizeof (CQ_DESC_t));
7493
7494 buf_info = &hba->sli.sli4.cq[i].addr;
7495 buf_info->size = cq_size;
7496 buf_info->flags =
7497 FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
7498 buf_info->align = ddi_ptob(hba->dip, 1L);
7499 buf_info->phys = phys;
7500 buf_info->virt = (void *)virt;
7501 buf_info->data_handle = data_handle;
7502 buf_info->dma_handle = dma_handle;
7503
7504 phys += cq_size;
7505 virt += cq_size;
7506
7507 hba->sli.sli4.cq[i].max_index = cq_depth;
7508 hba->sli.sli4.cq[i].qid = 0xffff;
7509 }
7510
7511
7512 /* WQ */
7513 size = 4096 * EMLXS_NUM_WQ_PAGES;
7514 for (i = 0; i < num_wq; i++) {
7515 bzero(&hba->sli.sli4.wq[i], sizeof (WQ_DESC_t));
7516
7517 buf_info = &hba->sli.sli4.wq[i].addr;
7518 buf_info->size = size;
7519 buf_info->flags =
7520 FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
7521 buf_info->align = ddi_ptob(hba->dip, 1L);
7522 buf_info->phys = phys;
7523 buf_info->virt = (void *)virt;
7524 buf_info->data_handle = data_handle;
7525 buf_info->dma_handle = dma_handle;
7526
7527 phys += size;
7528 virt += size;
7529
7530 hba->sli.sli4.wq[i].max_index = WQ_DEPTH;
7531 hba->sli.sli4.wq[i].release_depth = WQE_RELEASE_DEPTH;
7532 hba->sli.sli4.wq[i].qid = 0xFFFF;
7533 }
7534
7535
7536 /* MQ */
7537 size = 4096;
7538 bzero(&hba->sli.sli4.mq, sizeof (MQ_DESC_t));
7539
7540 buf_info = &hba->sli.sli4.mq.addr;
7541 buf_info->size = size;
7542 buf_info->flags =
7543 FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
7544 buf_info->align = ddi_ptob(hba->dip, 1L);
7545 buf_info->phys = phys;
7546 buf_info->virt = (void *)virt;
7547 buf_info->data_handle = data_handle;
7548 buf_info->dma_handle = dma_handle;
7549
7550 phys += size;
7551 virt += size;
7552
7553 hba->sli.sli4.mq.max_index = MQ_DEPTH;
7554
7555
7556 /* RXQ */
7557 for (i = 0; i < EMLXS_MAX_RXQS; i++) {
7558 bzero(&hba->sli.sli4.rxq[i], sizeof (RXQ_DESC_t));
7559
7560 mutex_init(&hba->sli.sli4.rxq[i].lock, NULL, MUTEX_DRIVER,
7561 NULL);
7562 }
7563
7564
7565 /* RQ */
7566 size = 4096;
7567 for (i = 0; i < EMLXS_MAX_RQS; i++) {
7568 bzero(&hba->sli.sli4.rq[i], sizeof (RQ_DESC_t));
7569
7570 buf_info = &hba->sli.sli4.rq[i].addr;
7571 buf_info->size = size;
7572 buf_info->flags =
7573 FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
7574 buf_info->align = ddi_ptob(hba->dip, 1L);
7575 buf_info->phys = phys;
7576 buf_info->virt = (void *)virt;
7577 buf_info->data_handle = data_handle;
7578 buf_info->dma_handle = dma_handle;
7579
7580 phys += size;
7581 virt += size;
7582
7583 hba->sli.sli4.rq[i].max_index = RQ_DEPTH;
7584 hba->sli.sli4.rq[i].qid = 0xFFFF;
7585
7586 mutex_init(&hba->sli.sli4.rq[i].lock, NULL, MUTEX_DRIVER, NULL);
7587 }
7588
7589
7590 /* RQB/E */
7591 for (i = 0; i < EMLXS_MAX_RQS; i++) {
7592 size = (i & 0x1) ? RQB_DATA_SIZE : RQB_HEADER_SIZE;
7593 tmp_phys = phys;
7594 tmp_virt = virt;
7595
7596 /* Initialize the RQEs */
7597 rqe = (RQE_t *)hba->sli.sli4.rq[i].addr.virt;
7598 for (j = 0; j < (RQ_DEPTH/RQB_COUNT); j++) {
7599 phys = tmp_phys;
7600 virt = tmp_virt;
7601 for (k = 0; k < RQB_COUNT; k++) {
7602 word = PADDR_HI(phys);
7603 rqe->AddrHi = BE_SWAP32(word);
7604
7605 word = PADDR_LO(phys);
7606 rqe->AddrLo = BE_SWAP32(word);
7607
7608 rqb = &hba->sli.sli4.rq[i].
7609 rqb[k + (j * RQB_COUNT)];
7610 rqb->size = size;
7611 rqb->flags = FC_MBUF_DMA |
7612 FC_MBUF_SNGLSG | FC_MBUF_DMA32;
7613 rqb->align = ddi_ptob(hba->dip, 1L);
7614 rqb->phys = phys;
7615 rqb->virt = (void *)virt;
7616 rqb->data_handle = data_handle;
7617 rqb->dma_handle = dma_handle;
7618
7619 phys += size;
7620 virt += size;
7621 #ifdef DEBUG_RQE
7622 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7623 "RQ_ALLOC: rq[%d] rqb[%d,%d]=%p iotag=%d",
7624 i, j, k, mp, mp->tag);
7625 #endif /* DEBUG_RQE */
7626
7627 rqe++;
7628 }
7629 }
7630
7631 offset = (off_t)((uint64_t)((unsigned long)
7632 hba->sli.sli4.rq[i].addr.virt) -
7633 (uint64_t)((unsigned long)
7634 hba->sli.sli4.slim2.virt));
7635
7636 /* Sync the RQ buffer list */
7637 EMLXS_MPDATA_SYNC(hba->sli.sli4.rq[i].addr.dma_handle, offset,
7638 hba->sli.sli4.rq[i].addr.size, DDI_DMA_SYNC_FORDEV);
7639 }
7640
7641 /* 4K Alignment */
7642 align = (4096 - (phys%4096));
7643 phys += align;
7644 virt += align;
7645
7646 /* RPI Header Templates */
7647 if (hba->sli.sli4.param.HDRR) {
7648 buf_info = &hba->sli.sli4.HeaderTmplate;
7649 bzero(buf_info, sizeof (MBUF_INFO));
7650 buf_info->size = hddr_size;
7651 buf_info->flags = FC_MBUF_DMA | FC_MBUF_DMA32;
7652 buf_info->align = ddi_ptob(hba->dip, 1L);
7653 buf_info->phys = phys;
7654 buf_info->virt = (void *)virt;
7655 buf_info->data_handle = data_handle;
7656 buf_info->dma_handle = dma_handle;
7657 }
7658
7659 /* SGL */
7660
7661 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
7662 "Allocating memory for %d SGLs: %d/%d",
7663 hba->sli.sli4.XRICount, sizeof (XRIobj_t), size);
7664
7665 /* Initialize double linked lists */
7666 hba->sli.sli4.XRIinuse_f =
7667 (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
7668 hba->sli.sli4.XRIinuse_b =
7669 (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
7670 hba->sli.sli4.xria_count = 0;
7671
7672 hba->sli.sli4.XRIfree_f =
7673 (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7674 hba->sli.sli4.XRIfree_b =
7675 (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7676 hba->sli.sli4.xrif_count = 0;
7677
7678 uint32_t mseg;
7679
7680 switch (hba->sli.sli4.mem_sgl_size) {
7681 case 1024:
7682 mseg = MEM_SGL1K;
7683 break;
7684 case 2048:
7685 mseg = MEM_SGL2K;
7686 break;
7687 case 4096:
7688 mseg = MEM_SGL4K;
7689 break;
7690 default:
7691 EMLXS_MSGF(EMLXS_CONTEXT,
7692 &emlxs_init_failed_msg,
7693 "Unsupported SGL Size: %d", hba->sli.sli4.mem_sgl_size);
7694 goto failed;
7695 }
7696
7697 hba->sli.sli4.XRIp = (XRIobj_t *)kmem_zalloc(
7698 (sizeof (XRIobj_t) * hba->sli.sli4.XRICount), KM_SLEEP);
7699
7700 xrip = hba->sli.sli4.XRIp;
7701 iotag = 1;
7702
7703 for (i = 0; i < hba->sli.sli4.XRICount; i++) {
7704 xrip->XRI = emlxs_sli4_index_to_xri(hba, i);
7705
7706 /* We don't use XRI==0, since it also represents an */
7707 /* uninitialized exchange */
7708 if (xrip->XRI == 0) {
7709 xrip++;
7710 continue;
7711 }
7712
7713 xrip->iotag = iotag++;
7714 xrip->sge_count =
7715 (hba->sli.sli4.mem_sgl_size / sizeof (ULP_SGE64));
7716
7717 /* Add xrip to end of free list */
7718 xrip->_b = hba->sli.sli4.XRIfree_b;
7719 hba->sli.sli4.XRIfree_b->_f = xrip;
7720 xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7721 hba->sli.sli4.XRIfree_b = xrip;
7722 hba->sli.sli4.xrif_count++;
7723
7724 /* Allocate SGL for this xrip */
7725 xrip->SGSeg = mseg;
7726 xrip->SGList = emlxs_mem_get(hba, xrip->SGSeg);
7727
7728 if (xrip->SGList == NULL) {
7729 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
7730 "Unable to allocate memory for SGL %d", i);
7731 goto failed;
7732 }
7733
7734 EMLXS_MPDATA_SYNC(xrip->SGList->dma_handle, 0,
7735 xrip->SGList->size, DDI_DMA_SYNC_FORDEV);
7736
7737 xrip++;
7738 }
7739
7740 /* GPIO lock */
7741 if (hba->model_info.flags & EMLXS_GPIO_LEDS)
7742 mutex_init(&hba->gpio_lock, NULL, MUTEX_DRIVER, NULL);
7743
7744 #ifdef FMA_SUPPORT
7745 if (hba->sli.sli4.slim2.dma_handle) {
7746 if (emlxs_fm_check_dma_handle(hba,
7747 hba->sli.sli4.slim2.dma_handle)
7748 != DDI_FM_OK) {
7749 EMLXS_MSGF(EMLXS_CONTEXT,
7750 &emlxs_invalid_dma_handle_msg,
7751 "sli4_resource_alloc: hdl=%p",
7752 hba->sli.sli4.slim2.dma_handle);
7753 goto failed;
7754 }
7755 }
7756 #endif /* FMA_SUPPORT */
7757
7758 return (0);
7759
7760 failed:
7761
7762 (void) emlxs_sli4_resource_free(hba);
7763 return (ENOMEM);
7764
7765 } /* emlxs_sli4_resource_alloc */
7766
7767
7768 extern void
7769 emlxs_sli4_zero_queue_stat(emlxs_hba_t *hba)
7770 {
7771 uint32_t i;
7772 uint32_t num_wq;
7773 emlxs_config_t *cfg = &CFG;
7774 clock_t time;
7775
7776 /* EQ */
7777 for (i = 0; i < hba->intr_count; i++) {
7778 hba->sli.sli4.eq[i].num_proc = 0;
7779 hba->sli.sli4.eq[i].max_proc = 0;
7780 hba->sli.sli4.eq[i].isr_count = 0;
7781 }
7782 num_wq = cfg[CFG_NUM_WQ].current * hba->intr_count;
7783 /* CQ */
7784 for (i = 0; i < (num_wq + EMLXS_CQ_OFFSET_WQ); i++) {
7785 hba->sli.sli4.cq[i].num_proc = 0;
7786 hba->sli.sli4.cq[i].max_proc = 0;
7787 hba->sli.sli4.cq[i].isr_count = 0;
7788 }
7789 /* WQ */
7790 for (i = 0; i < num_wq; i++) {
7791 hba->sli.sli4.wq[i].num_proc = 0;
7792 hba->sli.sli4.wq[i].num_busy = 0;
7793 }
7794 /* RQ */
7795 for (i = 0; i < EMLXS_MAX_RQS; i++) {
7796 hba->sli.sli4.rq[i].num_proc = 0;
7797 }
7798 (void) drv_getparm(LBOLT, &time);
7799 hba->sli.sli4.que_stat_timer = (uint32_t)time;
7800
7801 } /* emlxs_sli4_zero_queue_stat */
7802
7803
7804 extern XRIobj_t *
7805 emlxs_sli4_reserve_xri(emlxs_port_t *port, RPIobj_t *rpip, uint32_t type,
7806 uint16_t rx_id)
7807 {
7808 emlxs_hba_t *hba = HBA;
7809 XRIobj_t *xrip;
7810 uint16_t iotag;
7811
7812 mutex_enter(&EMLXS_FCTAB_LOCK);
7813
7814 xrip = hba->sli.sli4.XRIfree_f;
7815
7816 if (xrip == (XRIobj_t *)&hba->sli.sli4.XRIfree_f) {
7817 mutex_exit(&EMLXS_FCTAB_LOCK);
7818
7819 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
7820 "Unable to reserve XRI. type=%d",
7821 type);
7822
7823 return (NULL);
7824 }
7825
7826 iotag = xrip->iotag;
7827
7828 if ((!iotag) ||
7829 ((hba->fc_table[iotag] != NULL) &&
7830 (hba->fc_table[iotag] != STALE_PACKET))) {
7831 /*
7832 * No more command slots available, retry later
7833 */
7834 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7835 "Adapter Busy. Unable to reserve iotag. type=%d",
7836 type);
7837
7838 mutex_exit(&EMLXS_FCTAB_LOCK);
7839 return (NULL);
7840 }
7841
7842 xrip->state = XRI_STATE_ALLOCATED;
7843 xrip->type = type;
7844 xrip->flag = EMLXS_XRI_RESERVED;
7845 xrip->sbp = NULL;
7846
7847 xrip->rpip = rpip;
7848 xrip->rx_id = rx_id;
7849 rpip->xri_count++;
7850
7851 /* Take it off free list */
7852 (xrip->_b)->_f = xrip->_f;
7853 (xrip->_f)->_b = xrip->_b;
7854 xrip->_f = NULL;
7855 xrip->_b = NULL;
7856 hba->sli.sli4.xrif_count--;
7857
7858 /* Add it to end of inuse list */
7859 xrip->_b = hba->sli.sli4.XRIinuse_b;
7860 hba->sli.sli4.XRIinuse_b->_f = xrip;
7861 xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
7862 hba->sli.sli4.XRIinuse_b = xrip;
7863 hba->sli.sli4.xria_count++;
7864
7865 mutex_exit(&EMLXS_FCTAB_LOCK);
7866 return (xrip);
7867
7868 } /* emlxs_sli4_reserve_xri() */
7869
7870
7871 extern uint32_t
7872 emlxs_sli4_unreserve_xri(emlxs_port_t *port, uint16_t xri, uint32_t lock)
7873 {
7874 emlxs_hba_t *hba = HBA;
7875 XRIobj_t *xrip;
7876
7877 if (lock) {
7878 mutex_enter(&EMLXS_FCTAB_LOCK);
7879 }
7880
7881 xrip = emlxs_sli4_find_xri(port, xri);
7882
7883 if (!xrip || xrip->state == XRI_STATE_FREE) {
7884 if (lock) {
7885 mutex_exit(&EMLXS_FCTAB_LOCK);
7886 }
7887
7888 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7889 "sli4_unreserve_xri:%d already freed.", xri);
7890 return (0);
7891 }
7892
7893 /* Flush this unsolicited ct command */
7894 if (xrip->type == EMLXS_XRI_UNSOL_CT_TYPE) {
7895 (void) emlxs_flush_ct_event(port, xrip->rx_id);
7896 }
7897
7898 if (!(xrip->flag & EMLXS_XRI_RESERVED)) {
7899 if (lock) {
7900 mutex_exit(&EMLXS_FCTAB_LOCK);
7901 }
7902
7903 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7904 "sli4_unreserve_xri:%d in use. type=%d",
7905 xrip->XRI, xrip->type);
7906 return (1);
7907 }
7908
7909 if (xrip->iotag &&
7910 (hba->fc_table[xrip->iotag] != NULL) &&
7911 (hba->fc_table[xrip->iotag] != STALE_PACKET)) {
7912 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
7913 "sli4_unreserve_xri:%d sbp dropped:%p type=%d",
7914 xrip->XRI, hba->fc_table[xrip->iotag], xrip->type);
7915
7916 hba->fc_table[xrip->iotag] = NULL;
7917 hba->io_count--;
7918 }
7919
7920 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7921 "sli4_unreserve_xri:%d unreserved. type=%d",
7922 xrip->XRI, xrip->type);
7923
7924 xrip->state = XRI_STATE_FREE;
7925 xrip->type = 0;
7926
7927 if (xrip->rpip) {
7928 xrip->rpip->xri_count--;
7929 xrip->rpip = NULL;
7930 }
7931
7932 if (xrip->reserved_rpip) {
7933 xrip->reserved_rpip->xri_count--;
7934 xrip->reserved_rpip = NULL;
7935 }
7936
7937 /* Take it off inuse list */
7938 (xrip->_b)->_f = xrip->_f;
7939 (xrip->_f)->_b = xrip->_b;
7940 xrip->_f = NULL;
7941 xrip->_b = NULL;
7942 hba->sli.sli4.xria_count--;
7943
7944 /* Add it to end of free list */
7945 xrip->_b = hba->sli.sli4.XRIfree_b;
7946 hba->sli.sli4.XRIfree_b->_f = xrip;
7947 xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7948 hba->sli.sli4.XRIfree_b = xrip;
7949 hba->sli.sli4.xrif_count++;
7950
7951 if (lock) {
7952 mutex_exit(&EMLXS_FCTAB_LOCK);
7953 }
7954
7955 return (0);
7956
7957 } /* emlxs_sli4_unreserve_xri() */
7958
7959
7960 XRIobj_t *
7961 emlxs_sli4_register_xri(emlxs_port_t *port, emlxs_buf_t *sbp, uint16_t xri,
7962 uint32_t did)
7963 {
7964 emlxs_hba_t *hba = HBA;
7965 uint16_t iotag;
7966 XRIobj_t *xrip;
7967 emlxs_node_t *node;
7968 RPIobj_t *rpip;
7969
7970 mutex_enter(&EMLXS_FCTAB_LOCK);
7971
7972 xrip = sbp->xrip;
7973 if (!xrip) {
7974 xrip = emlxs_sli4_find_xri(port, xri);
7975
7976 if (!xrip) {
7977 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7978 "sli4_register_xri:%d XRI not found.", xri);
7979
7980 mutex_exit(&EMLXS_FCTAB_LOCK);
7981 return (NULL);
7982 }
7983 }
7984
7985 if ((xrip->state == XRI_STATE_FREE) ||
7986 !(xrip->flag & EMLXS_XRI_RESERVED)) {
7987
7988 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7989 "sli4_register_xri:%d Invalid XRI. xrip=%p "
7990 "state=%x flag=%x",
7991 xrip->XRI, xrip, xrip->state, xrip->flag);
7992
7993 mutex_exit(&EMLXS_FCTAB_LOCK);
7994 return (NULL);
7995 }
7996
7997 iotag = xrip->iotag;
7998
7999 if ((!iotag) ||
8000 ((hba->fc_table[iotag] != NULL) &&
8001 (hba->fc_table[iotag] != STALE_PACKET))) {
8002
8003 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8004 "sli4_register_xri:%d Invalid fc_table entry. "
8005 "iotag=%d entry=%p",
8006 xrip->XRI, iotag, hba->fc_table[iotag]);
8007
8008 mutex_exit(&EMLXS_FCTAB_LOCK);
8009 return (NULL);
8010 }
8011
8012 hba->fc_table[iotag] = sbp;
8013 hba->io_count++;
8014
8015 sbp->iotag = iotag;
8016 sbp->xrip = xrip;
8017
8018 xrip->flag &= ~EMLXS_XRI_RESERVED;
8019 xrip->sbp = sbp;
8020
8021 /* If we did not have a registered RPI when we reserved */
8022 /* this exchange, check again now. */
8023 if (xrip->rpip && (xrip->rpip->RPI == FABRIC_RPI)) {
8024 node = emlxs_node_find_did(port, did, 1);
8025 rpip = EMLXS_NODE_TO_RPI(port, node);
8026
8027 if (rpip && (rpip->RPI != FABRIC_RPI)) {
8028 /* Move the XRI to the new RPI */
8029 xrip->rpip->xri_count--;
8030 xrip->rpip = rpip;
8031 rpip->xri_count++;
8032 }
8033 }
8034
8035 mutex_exit(&EMLXS_FCTAB_LOCK);
8036
8037 return (xrip);
8038
8039 } /* emlxs_sli4_register_xri() */
8040
8041
8042 /* Performs both reserve and register functions for XRI */
8043 static XRIobj_t *
8044 emlxs_sli4_alloc_xri(emlxs_port_t *port, emlxs_buf_t *sbp, RPIobj_t *rpip,
8045 uint32_t type)
8046 {
8047 emlxs_hba_t *hba = HBA;
8048 XRIobj_t *xrip;
8049 uint16_t iotag;
8050
8051 mutex_enter(&EMLXS_FCTAB_LOCK);
8052
8053 xrip = hba->sli.sli4.XRIfree_f;
8054
8055 if (xrip == (XRIobj_t *)&hba->sli.sli4.XRIfree_f) {
8056 mutex_exit(&EMLXS_FCTAB_LOCK);
8057
8058 return (NULL);
8059 }
8060
8061 /* Get the iotag by registering the packet */
8062 iotag = xrip->iotag;
8063
8064 if ((!iotag) ||
8065 ((hba->fc_table[iotag] != NULL) &&
8066 (hba->fc_table[iotag] != STALE_PACKET))) {
8067 /*
8068 * No more command slots available, retry later
8069 */
8070 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
8071 "Adapter Busy. Unable to alloc iotag:(0x%x)(%p) type=%d",
8072 iotag, hba->fc_table[iotag], type);
8073
8074 mutex_exit(&EMLXS_FCTAB_LOCK);
8075 return (NULL);
8076 }
8077
8078 hba->fc_table[iotag] = sbp;
8079 hba->io_count++;
8080
8081 sbp->iotag = iotag;
8082 sbp->xrip = xrip;
8083
8084 xrip->state = XRI_STATE_ALLOCATED;
8085 xrip->type = type;
8086 xrip->flag = 0;
8087 xrip->sbp = sbp;
8088
8089 xrip->rpip = rpip;
8090 rpip->xri_count++;
8091
8092 /* Take it off free list */
8093 (xrip->_b)->_f = xrip->_f;
8094 (xrip->_f)->_b = xrip->_b;
8095 xrip->_f = NULL;
8096 xrip->_b = NULL;
8097 hba->sli.sli4.xrif_count--;
8098
8099 /* Add it to end of inuse list */
8100 xrip->_b = hba->sli.sli4.XRIinuse_b;
8101 hba->sli.sli4.XRIinuse_b->_f = xrip;
8102 xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
8103 hba->sli.sli4.XRIinuse_b = xrip;
8104 hba->sli.sli4.xria_count++;
8105
8106 mutex_exit(&EMLXS_FCTAB_LOCK);
8107
8108 return (xrip);
8109
8110 } /* emlxs_sli4_alloc_xri() */
8111
8112
8113 /* EMLXS_FCTAB_LOCK must be held to enter */
8114 extern XRIobj_t *
8115 emlxs_sli4_find_xri(emlxs_port_t *port, uint16_t xri)
8116 {
8117 emlxs_hba_t *hba = HBA;
8118 XRIobj_t *xrip;
8119
8120 xrip = (XRIobj_t *)hba->sli.sli4.XRIinuse_f;
8121 while (xrip != (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) {
8122 if ((xrip->state >= XRI_STATE_ALLOCATED) &&
8123 (xrip->XRI == xri)) {
8124 return (xrip);
8125 }
8126 xrip = xrip->_f;
8127 }
8128
8129 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8130 "Unable to find XRI x%x", xri);
8131
8132 return (NULL);
8133
8134 } /* emlxs_sli4_find_xri() */
8135
8136
8137
8138
8139 extern void
8140 emlxs_sli4_free_xri(emlxs_port_t *port, emlxs_buf_t *sbp, XRIobj_t *xrip,
8141 uint8_t lock)
8142 {
8143 emlxs_hba_t *hba = HBA;
8144
8145 if (lock) {
8146 mutex_enter(&EMLXS_FCTAB_LOCK);
8147 }
8148
8149 if (xrip) {
8150 if (xrip->state == XRI_STATE_FREE) {
8151 if (lock) {
8152 mutex_exit(&EMLXS_FCTAB_LOCK);
8153 }
8154 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8155 "Free XRI:%x, Already freed. type=%d",
8156 xrip->XRI, xrip->type);
8157 return;
8158 }
8159
8160 if (xrip->type == EMLXS_XRI_UNSOL_CT_TYPE) {
8161 (void) emlxs_flush_ct_event(port, xrip->rx_id);
8162 }
8163
8164 if (xrip->iotag &&
8165 (hba->fc_table[xrip->iotag] != NULL) &&
8166 (hba->fc_table[xrip->iotag] != STALE_PACKET)) {
8167 hba->fc_table[xrip->iotag] = NULL;
8168 hba->io_count--;
8169 }
8170
8171 xrip->state = XRI_STATE_FREE;
8172 xrip->type = 0;
8173 xrip->flag = 0;
8174
8175 if (xrip->rpip) {
8176 xrip->rpip->xri_count--;
8177 xrip->rpip = NULL;
8178 }
8179
8180 if (xrip->reserved_rpip) {
8181 xrip->reserved_rpip->xri_count--;
8182 xrip->reserved_rpip = NULL;
8183 }
8184
8185 /* Take it off inuse list */
8186 (xrip->_b)->_f = xrip->_f;
8187 (xrip->_f)->_b = xrip->_b;
8188 xrip->_f = NULL;
8189 xrip->_b = NULL;
8190 hba->sli.sli4.xria_count--;
8191
8192 /* Add it to end of free list */
8193 xrip->_b = hba->sli.sli4.XRIfree_b;
8194 hba->sli.sli4.XRIfree_b->_f = xrip;
8195 xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
8196 hba->sli.sli4.XRIfree_b = xrip;
8197 hba->sli.sli4.xrif_count++;
8198 }
8199
8200 if (sbp) {
8201 if (!(sbp->pkt_flags & PACKET_VALID) ||
8202 (sbp->pkt_flags &
8203 (PACKET_ULP_OWNED|PACKET_COMPLETED|PACKET_IN_COMPLETION))) {
8204 if (lock) {
8205 mutex_exit(&EMLXS_FCTAB_LOCK);
8206 }
8207 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8208 "Free XRI: sbp invalid. sbp=%p flags=%x xri=%d",
8209 sbp, sbp->pkt_flags, ((xrip)? xrip->XRI:0));
8210 return;
8211 }
8212
8213 if (xrip && (xrip->iotag != sbp->iotag)) {
8214 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
8215 "sbp/iotag mismatch %p iotag:%d %d", sbp,
8216 sbp->iotag, xrip->iotag);
8217 }
8218
8219 if (sbp->iotag) {
8220 if (sbp == hba->fc_table[sbp->iotag]) {
8221 hba->fc_table[sbp->iotag] = NULL;
8222 hba->io_count--;
8223
8224 if (sbp->xrip) {
8225 /* Exchange is still reserved */
8226 sbp->xrip->flag |= EMLXS_XRI_RESERVED;
8227 }
8228 }
8229 sbp->iotag = 0;
8230 }
8231
8232 if (xrip) {
8233 sbp->xrip = 0;
8234 }
8235
8236 if (lock) {
8237 mutex_exit(&EMLXS_FCTAB_LOCK);
8238 }
8239
8240 /* Clean up the sbp */
8241 mutex_enter(&sbp->mtx);
8242
8243 if (sbp->pkt_flags & PACKET_IN_TXQ) {
8244 sbp->pkt_flags &= ~PACKET_IN_TXQ;
8245 hba->channel_tx_count--;
8246 }
8247
8248 if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
8249 sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
8250 }
8251
8252 mutex_exit(&sbp->mtx);
8253 } else {
8254 if (lock) {
8255 mutex_exit(&EMLXS_FCTAB_LOCK);
8256 }
8257 }
8258
8259 } /* emlxs_sli4_free_xri() */
8260
8261
8262 static int
8263 emlxs_sli4_post_sgl_pages(emlxs_hba_t *hba, MAILBOXQ *mbq)
8264 {
8265 MAILBOX4 *mb = (MAILBOX4 *)mbq;
8266 emlxs_port_t *port = &PPORT;
8267 XRIobj_t *xrip;
8268 MATCHMAP *mp;
8269 mbox_req_hdr_t *hdr_req;
8270 uint32_t i;
8271 uint32_t cnt;
8272 uint32_t xri_cnt;
8273 uint32_t j;
8274 uint32_t size;
8275 IOCTL_FCOE_CFG_POST_SGL_PAGES *post_sgl;
8276
8277 bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
8278 mbq->bp = NULL;
8279 mbq->mbox_cmpl = NULL;
8280
8281 if ((mp = emlxs_mem_buf_alloc(hba, EMLXS_MAX_NONEMBED_SIZE)) == 0) {
8282 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8283 "Unable to POST_SGL. Mailbox cmd=%x ",
8284 mb->mbxCommand);
8285 return (EIO);
8286 }
8287 mbq->nonembed = (void *)mp;
8288
8289 /*
8290 * Signifies a non embedded command
8291 */
8292 mb->un.varSLIConfig.be.embedded = 0;
8293 mb->mbxCommand = MBX_SLI_CONFIG;
8294 mb->mbxOwner = OWN_HOST;
8295
8296 hdr_req = (mbox_req_hdr_t *)mp->virt;
8297 post_sgl =
8298 (IOCTL_FCOE_CFG_POST_SGL_PAGES *)(hdr_req + 1);
8299
8300 xrip = hba->sli.sli4.XRIp;
8301
8302 /* For each extent */
8303 for (j = 0; j < hba->sli.sli4.XRIExtCount; j++) {
8304 cnt = hba->sli.sli4.XRIExtSize;
8305 while (cnt) {
8306 if (xrip->XRI == 0) {
8307 cnt--;
8308 xrip++;
8309 continue;
8310 }
8311
8312 bzero((void *) hdr_req, mp->size);
8313 size = mp->size - IOCTL_HEADER_SZ;
8314
8315 mb->un.varSLIConfig.be.payload_length =
8316 mp->size;
8317 mb->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
8318 IOCTL_SUBSYSTEM_FCOE;
8319 mb->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
8320 FCOE_OPCODE_CFG_POST_SGL_PAGES;
8321 mb->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
8322 mb->un.varSLIConfig.be.un_hdr.hdr_req.req_length = size;
8323
8324 hdr_req->subsystem = IOCTL_SUBSYSTEM_FCOE;
8325 hdr_req->opcode = FCOE_OPCODE_CFG_POST_SGL_PAGES;
8326 hdr_req->timeout = 0;
8327 hdr_req->req_length = size;
8328
8329 post_sgl->params.request.xri_count = 0;
8330 post_sgl->params.request.xri_start = xrip->XRI;
8331
8332 xri_cnt = (size -
8333 sizeof (IOCTL_FCOE_CFG_POST_SGL_PAGES)) /
8334 sizeof (FCOE_SGL_PAGES);
8335
8336 for (i = 0; (i < xri_cnt) && cnt; i++) {
8337 post_sgl->params.request.xri_count++;
8338 post_sgl->params.request.pages[i].\
8339 sgl_page0.addrLow =
8340 PADDR_LO(xrip->SGList->phys);
8341 post_sgl->params.request.pages[i].\
8342 sgl_page0.addrHigh =
8343 PADDR_HI(xrip->SGList->phys);
8344
8345 cnt--;
8346 xrip++;
8347 }
8348
8349 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8350 MBX_SUCCESS) {
8351 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8352 "Unable to POST_SGL. Mailbox cmd=%x "
8353 "status=%x XRI cnt:%d start:%d",
8354 mb->mbxCommand, mb->mbxStatus,
8355 post_sgl->params.request.xri_count,
8356 post_sgl->params.request.xri_start);
8357 emlxs_mem_buf_free(hba, mp);
8358 mbq->nonembed = NULL;
8359 return (EIO);
8360 }
8361 }
8362 }
8363
8364 emlxs_mem_buf_free(hba, mp);
8365 mbq->nonembed = NULL;
8366 return (0);
8367
8368 } /* emlxs_sli4_post_sgl_pages() */
8369
8370
8371 static int
8372 emlxs_sli4_post_hdr_tmplates(emlxs_hba_t *hba, MAILBOXQ *mbq)
8373 {
8374 MAILBOX4 *mb = (MAILBOX4 *)mbq;
8375 emlxs_port_t *port = &PPORT;
8376 uint32_t j;
8377 uint32_t k;
8378 uint64_t addr;
8379 IOCTL_FCOE_POST_HDR_TEMPLATES *post_hdr;
8380 uint16_t num_pages;
8381
8382 if (!(hba->sli.sli4.param.HDRR)) {
8383 return (0);
8384 }
8385
8386 /* Bytes per extent */
8387 j = hba->sli.sli4.RPIExtSize * sizeof (RPIHdrTmplate_t);
8388
8389 /* Pages required per extent (page == 4096 bytes) */
8390 num_pages = (j/4096) + ((j%4096)? 1:0);
8391
8392 addr = hba->sli.sli4.HeaderTmplate.phys;
8393
8394 /* For each extent */
8395 for (j = 0; j < hba->sli.sli4.RPIExtCount; j++) {
8396 bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
8397 mbq->bp = NULL;
8398 mbq->mbox_cmpl = NULL;
8399
8400 /*
8401 * Signifies an embedded command
8402 */
8403 mb->un.varSLIConfig.be.embedded = 1;
8404
8405 mb->mbxCommand = MBX_SLI_CONFIG;
8406 mb->mbxOwner = OWN_HOST;
8407 mb->un.varSLIConfig.be.payload_length =
8408 sizeof (IOCTL_FCOE_POST_HDR_TEMPLATES) + IOCTL_HEADER_SZ;
8409 mb->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
8410 IOCTL_SUBSYSTEM_FCOE;
8411 mb->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
8412 FCOE_OPCODE_POST_HDR_TEMPLATES;
8413 mb->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
8414 mb->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
8415 sizeof (IOCTL_FCOE_POST_HDR_TEMPLATES);
8416
8417 post_hdr =
8418 (IOCTL_FCOE_POST_HDR_TEMPLATES *)
8419 &mb->un.varSLIConfig.payload;
8420 post_hdr->params.request.num_pages = num_pages;
8421 post_hdr->params.request.rpi_offset = hba->sli.sli4.RPIBase[j];
8422
8423 for (k = 0; k < num_pages; k++) {
8424 post_hdr->params.request.pages[k].addrLow =
8425 PADDR_LO(addr);
8426 post_hdr->params.request.pages[k].addrHigh =
8427 PADDR_HI(addr);
8428 addr += 4096;
8429 }
8430
8431 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8432 MBX_SUCCESS) {
8433 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8434 "Unable to POST_HDR_TEMPLATES. Mailbox cmd=%x "
8435 "status=%x ",
8436 mb->mbxCommand, mb->mbxStatus);
8437 return (EIO);
8438 }
8439 emlxs_data_dump(port, "POST_HDR", (uint32_t *)mb, 18, 0);
8440 }
8441
8442 return (0);
8443
8444 } /* emlxs_sli4_post_hdr_tmplates() */
8445
8446
8447 static int
8448 emlxs_sli4_create_queues(emlxs_hba_t *hba, MAILBOXQ *mbq)
8449 {
8450 MAILBOX4 *mb = (MAILBOX4 *)mbq;
8451 emlxs_port_t *port = &PPORT;
8452 emlxs_config_t *cfg = &CFG;
8453 IOCTL_COMMON_EQ_CREATE *eq;
8454 IOCTL_COMMON_CQ_CREATE *cq;
8455 IOCTL_FCOE_WQ_CREATE *wq;
8456 IOCTL_FCOE_RQ_CREATE *rq;
8457 IOCTL_COMMON_MQ_CREATE *mq;
8458 IOCTL_COMMON_MQ_CREATE_EXT *mq_ext;
8459 emlxs_rqdbu_t rqdb;
8460 uint16_t i, j;
8461 uint16_t num_cq, total_cq;
8462 uint16_t num_wq, total_wq;
8463
8464 /*
8465 * The first CQ is reserved for ASYNC events,
8466 * the second is reserved for unsol rcv, the rest
8467 * correspond to WQs. (WQ0 -> CQ2, WQ1 -> CQ3, ...)
8468 */
8469
8470 total_cq = 0;
8471 total_wq = 0;
8472
8473 /* Create EQ's */
8474 for (i = 0; i < hba->intr_count; i++) {
8475 emlxs_mb_eq_create(hba, mbq, i);
8476 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8477 MBX_SUCCESS) {
8478 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
8479 "Unable to Create EQ %d: Mailbox cmd=%x status=%x ",
8480 i, mb->mbxCommand, mb->mbxStatus);
8481 return (EIO);
8482 }
8483 eq = (IOCTL_COMMON_EQ_CREATE *)&mb->un.varSLIConfig.payload;
8484 hba->sli.sli4.eq[i].qid = eq->params.response.EQId;
8485 hba->sli.sli4.eq[i].lastwq = total_wq;
8486 hba->sli.sli4.eq[i].msix_vector = i;
8487
8488 emlxs_data_dump(port, "EQ0_CREATE", (uint32_t *)mb, 18, 0);
8489 num_wq = cfg[CFG_NUM_WQ].current;
8490 num_cq = num_wq;
8491 if (i == 0) {
8492 /* One for RQ handling, one for mbox/event handling */
8493 num_cq += EMLXS_CQ_OFFSET_WQ;
8494 }
8495
8496 /* Create CQ's */
8497 for (j = 0; j < num_cq; j++) {
8498 /* Reuse mbq from previous mbox */
8499 bzero(mbq, sizeof (MAILBOXQ));
8500
8501 hba->sli.sli4.cq[total_cq].eqid =
8502 hba->sli.sli4.eq[i].qid;
8503
8504 emlxs_mb_cq_create(hba, mbq, total_cq);
8505 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8506 MBX_SUCCESS) {
8507 EMLXS_MSGF(EMLXS_CONTEXT,
8508 &emlxs_init_failed_msg, "Unable to Create "
8509 "CQ %d: Mailbox cmd=%x status=%x ",
8510 total_cq, mb->mbxCommand, mb->mbxStatus);
8511 return (EIO);
8512 }
8513 cq = (IOCTL_COMMON_CQ_CREATE *)
8514 &mb->un.varSLIConfig.payload;
8515 hba->sli.sli4.cq[total_cq].qid =
8516 cq->params.response.CQId;
8517
8518 switch (total_cq) {
8519 case EMLXS_CQ_MBOX:
8520 /* First CQ is for async event handling */
8521 hba->sli.sli4.cq[total_cq].type =
8522 EMLXS_CQ_TYPE_GROUP1;
8523 break;
8524
8525 case EMLXS_CQ_RCV:
8526 /* Second CQ is for unsol receive handling */
8527 hba->sli.sli4.cq[total_cq].type =
8528 EMLXS_CQ_TYPE_GROUP2;
8529 break;
8530
8531 default:
8532 /* Setup CQ to channel mapping */
8533 hba->sli.sli4.cq[total_cq].type =
8534 EMLXS_CQ_TYPE_GROUP2;
8535 hba->sli.sli4.cq[total_cq].channelp =
8536 &hba->chan[total_cq - EMLXS_CQ_OFFSET_WQ];
8537 break;
8538 }
8539 emlxs_data_dump(port, "CQX_CREATE", (uint32_t *)mb,
8540 18, 0);
8541 total_cq++;
8542 }
8543
8544 /* Create WQ's */
8545 for (j = 0; j < num_wq; j++) {
8546 /* Reuse mbq from previous mbox */
8547 bzero(mbq, sizeof (MAILBOXQ));
8548
8549 hba->sli.sli4.wq[total_wq].cqid =
8550 hba->sli.sli4.cq[total_wq + EMLXS_CQ_OFFSET_WQ].qid;
8551
8552 emlxs_mb_wq_create(hba, mbq, total_wq);
8553 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8554 MBX_SUCCESS) {
8555 EMLXS_MSGF(EMLXS_CONTEXT,
8556 &emlxs_init_failed_msg, "Unable to Create "
8557 "WQ %d: Mailbox cmd=%x status=%x ",
8558 total_wq, mb->mbxCommand, mb->mbxStatus);
8559 return (EIO);
8560 }
8561 wq = (IOCTL_FCOE_WQ_CREATE *)
8562 &mb->un.varSLIConfig.payload;
8563 hba->sli.sli4.wq[total_wq].qid =
8564 wq->params.response.WQId;
8565
8566 hba->sli.sli4.wq[total_wq].cqid =
8567 hba->sli.sli4.cq[total_wq+EMLXS_CQ_OFFSET_WQ].qid;
8568 emlxs_data_dump(port, "WQ_CREATE", (uint32_t *)mb,
8569 18, 0);
8570 total_wq++;
8571 }
8572 hba->last_msiid = i;
8573 }
8574
8575 /* We assume 1 RQ pair will handle ALL incoming data */
8576 /* Create RQs */
8577 for (i = 0; i < EMLXS_MAX_RQS; i++) {
8578 /* Personalize the RQ */
8579 switch (i) {
8580 case 0:
8581 hba->sli.sli4.rq[i].cqid =
8582 hba->sli.sli4.cq[EMLXS_CQ_RCV].qid;
8583 break;
8584 case 1:
8585 hba->sli.sli4.rq[i].cqid =
8586 hba->sli.sli4.cq[EMLXS_CQ_RCV].qid;
8587 break;
8588 default:
8589 hba->sli.sli4.rq[i].cqid = 0xffff;
8590 }
8591
8592 /* Reuse mbq from previous mbox */
8593 bzero(mbq, sizeof (MAILBOXQ));
8594
8595 emlxs_mb_rq_create(hba, mbq, i);
8596 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8597 MBX_SUCCESS) {
8598 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
8599 "Unable to Create RQ %d: Mailbox cmd=%x status=%x ",
8600 i, mb->mbxCommand, mb->mbxStatus);
8601 return (EIO);
8602 }
8603
8604 rq = (IOCTL_FCOE_RQ_CREATE *)&mb->un.varSLIConfig.payload;
8605 hba->sli.sli4.rq[i].qid = rq->params.response.RQId;
8606 emlxs_data_dump(port, "RQ CREATE", (uint32_t *)mb, 18, 0);
8607
8608 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8609 "RQ CREATE: rq[%d].qid=%d cqid=%d",
8610 i, hba->sli.sli4.rq[i].qid, hba->sli.sli4.rq[i].cqid);
8611
8612 /* Initialize the host_index */
8613 hba->sli.sli4.rq[i].host_index = 0;
8614
8615 /* If Data queue was just created, */
8616 /* then post buffers using the header qid */
8617 if ((i & 0x1)) {
8618 /* Ring the RQ doorbell to post buffers */
8619 rqdb.word = 0;
8620 rqdb.db.Qid = hba->sli.sli4.rq[i-1].qid;
8621 rqdb.db.NumPosted = RQB_COUNT;
8622
8623 emlxs_sli4_write_rqdb(hba, rqdb.word);
8624
8625 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8626 "RQ CREATE: Doorbell rang: qid=%d count=%d",
8627 hba->sli.sli4.rq[i-1].qid, RQB_COUNT);
8628 }
8629 }
8630
8631 /* Create MQ */
8632
8633 /* Personalize the MQ */
8634 hba->sli.sli4.mq.cqid = hba->sli.sli4.cq[EMLXS_CQ_MBOX].qid;
8635
8636 /* Reuse mbq from previous mbox */
8637 bzero(mbq, sizeof (MAILBOXQ));
8638
8639 emlxs_mb_mq_create_ext(hba, mbq);
8640 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8641 MBX_SUCCESS) {
8642 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
8643 "Unable to Create MQ_EXT %d: Mailbox cmd=%x status=%x ",
8644 i, mb->mbxCommand, mb->mbxStatus);
8645
8646 /* Reuse mbq from previous mbox */
8647 bzero(mbq, sizeof (MAILBOXQ));
8648
8649 emlxs_mb_mq_create(hba, mbq);
8650 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8651 MBX_SUCCESS) {
8652 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
8653 "Unable to Create MQ %d: Mailbox cmd=%x status=%x ",
8654 i, mb->mbxCommand, mb->mbxStatus);
8655 return (EIO);
8656 }
8657
8658 mq = (IOCTL_COMMON_MQ_CREATE *)&mb->un.varSLIConfig.payload;
8659 hba->sli.sli4.mq.qid = mq->params.response.MQId;
8660 return (0);
8661 }
8662
8663 mq_ext = (IOCTL_COMMON_MQ_CREATE_EXT *)&mb->un.varSLIConfig.payload;
8664 hba->sli.sli4.mq.qid = mq_ext->params.response.MQId;
8665 return (0);
8666
8667 } /* emlxs_sli4_create_queues() */
8668
8669
8670 extern void
8671 emlxs_sli4_timer(emlxs_hba_t *hba)
8672 {
8673 /* Perform SLI4 level timer checks */
8674
8675 emlxs_fcf_timer_notify(hba);
8676
8677 emlxs_sli4_timer_check_mbox(hba);
8678
8679 return;
8680
8681 } /* emlxs_sli4_timer() */
8682
8683
8684 static void
8685 emlxs_sli4_timer_check_mbox(emlxs_hba_t *hba)
8686 {
8687 emlxs_port_t *port = &PPORT;
8688 emlxs_config_t *cfg = &CFG;
8689 MAILBOX *mb = NULL;
8690
8691 if (!cfg[CFG_TIMEOUT_ENABLE].current) {
8692 return;
8693 }
8694
8695 mutex_enter(&EMLXS_PORT_LOCK);
8696
8697 /* Return if timer hasn't expired */
8698 if (!hba->mbox_timer || (hba->timer_tics < hba->mbox_timer)) {
8699 mutex_exit(&EMLXS_PORT_LOCK);
8700 return;
8701 }
8702
8703 /* The first to service the mbox queue will clear the timer */
8704 hba->mbox_timer = 0;
8705
8706 if (hba->mbox_queue_flag) {
8707 if (hba->mbox_mbq) {
8708 mb = (MAILBOX *)hba->mbox_mbq;
8709 }
8710 }
8711
8712 if (mb) {
8713 switch (hba->mbox_queue_flag) {
8714 case MBX_NOWAIT:
8715 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
8716 "%s: Nowait.",
8717 emlxs_mb_cmd_xlate(mb->mbxCommand));
8718 break;
8719
8720 case MBX_SLEEP:
8721 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
8722 "%s: mb=%p Sleep.",
8723 emlxs_mb_cmd_xlate(mb->mbxCommand),
8724 mb);
8725 break;
8726
8727 case MBX_POLL:
8728 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
8729 "%s: mb=%p Polled.",
8730 emlxs_mb_cmd_xlate(mb->mbxCommand),
8731 mb);
8732 break;
8733
8734 default:
8735 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
8736 "%s: mb=%p (%d).",
8737 emlxs_mb_cmd_xlate(mb->mbxCommand),
8738 mb, hba->mbox_queue_flag);
8739 break;
8740 }
8741 } else {
8742 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg, NULL);
8743 }
8744
8745 hba->flag |= FC_MBOX_TIMEOUT;
8746 EMLXS_STATE_CHANGE_LOCKED(hba, FC_ERROR);
8747
8748 mutex_exit(&EMLXS_PORT_LOCK);
8749
8750 /* Perform mailbox cleanup */
8751 /* This will wake any sleeping or polling threads */
8752 emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
8753
8754 /* Trigger adapter shutdown */
8755 emlxs_thread_spawn(hba, emlxs_shutdown_thread, 0, 0);
8756
8757 return;
8758
8759 } /* emlxs_sli4_timer_check_mbox() */
8760
8761 static void
8762 emlxs_sli4_gpio_timer_start(emlxs_hba_t *hba)
8763 {
8764 mutex_enter(&hba->gpio_lock);
8765
8766 if (!hba->gpio_timer) {
8767 hba->gpio_timer = timeout(emlxs_sli4_gpio_timer, (void *)hba,
8768 drv_usectohz(100000));
8769 }
8770
8771 mutex_exit(&hba->gpio_lock);
8772
8773 } /* emlxs_sli4_gpio_timer_start() */
8774
8775 static void
8776 emlxs_sli4_gpio_timer_stop(emlxs_hba_t *hba)
8777 {
8778 mutex_enter(&hba->gpio_lock);
8779
8780 if (hba->gpio_timer) {
8781 (void) untimeout(hba->gpio_timer);
8782 hba->gpio_timer = 0;
8783 }
8784
8785 mutex_exit(&hba->gpio_lock);
8786
8787 delay(drv_usectohz(300000));
8788 } /* emlxs_sli4_gpio_timer_stop() */
8789
8790 static void
8791 emlxs_sli4_gpio_timer(void *arg)
8792 {
8793 emlxs_hba_t *hba = (emlxs_hba_t *)arg;
8794
8795 mutex_enter(&hba->gpio_lock);
8796
8797 if (hba->gpio_timer) {
8798 emlxs_sli4_check_gpio(hba);
8799 hba->gpio_timer = timeout(emlxs_sli4_gpio_timer, (void *)hba,
8800 drv_usectohz(100000));
8801 }
8802
8803 mutex_exit(&hba->gpio_lock);
8804 } /* emlxs_sli4_gpio_timer() */
8805
8806 static void
8807 emlxs_sli4_check_gpio(emlxs_hba_t *hba)
8808 {
8809 hba->gpio_desired = 0;
8810
8811 if (hba->flag & FC_GPIO_LINK_UP) {
8812 if (hba->io_active)
8813 hba->gpio_desired |= EMLXS_GPIO_ACT;
8814
8815 /* This is model specific to ATTO gen5 lancer cards */
8816
8817 switch (hba->linkspeed) {
8818 case LA_4GHZ_LINK:
8819 hba->gpio_desired |= EMLXS_GPIO_LO;
8820 break;
8821
8822 case LA_8GHZ_LINK:
8823 hba->gpio_desired |= EMLXS_GPIO_HI;
8824 break;
8825
8826 case LA_16GHZ_LINK:
8827 hba->gpio_desired |=
8828 EMLXS_GPIO_LO | EMLXS_GPIO_HI;
8829 break;
8830 }
8831 }
8832
8833 if (hba->gpio_current != hba->gpio_desired) {
8834 emlxs_port_t *port = &PPORT;
8835 uint8_t pin;
8836 uint8_t pinval;
8837 MAILBOXQ *mbq;
8838 uint32_t rval;
8839
8840 if (!emlxs_sli4_fix_gpio(hba, &pin, &pinval))
8841 return;
8842
8843 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX)) == NULL) {
8844 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
8845 "Unable to allocate GPIO mailbox.");
8846
8847 hba->gpio_bit = 0;
8848 return;
8849 }
8850
8851 emlxs_mb_gpio_write(hba, mbq, pin, pinval);
8852 mbq->mbox_cmpl = emlxs_sli4_fix_gpio_mbcmpl;
8853
8854 rval = emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
8855
8856 if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) {
8857 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
8858 "Unable to start GPIO mailbox.");
8859
8860 hba->gpio_bit = 0;
8861 emlxs_mem_put(hba, MEM_MBOX, mbq);
8862 return;
8863 }
8864 }
8865 } /* emlxs_sli4_check_gpio */
8866
8867 static uint32_t
8868 emlxs_sli4_fix_gpio(emlxs_hba_t *hba, uint8_t *pin, uint8_t *pinval)
8869 {
8870 uint8_t dif = hba->gpio_desired ^ hba->gpio_current;
8871 uint8_t bit;
8872 uint8_t i;
8873
8874 /* Get out if no pins to set a GPIO request is pending */
8875
8876 if (dif == 0 || hba->gpio_bit)
8877 return (0);
8878
8879 /* Fix one pin at a time */
8880
8881 bit = dif & -dif;
8882 hba->gpio_bit = bit;
8883 dif = hba->gpio_current ^ bit;
8884
8885 for (i = EMLXS_GPIO_PIN_LO; bit > 1; ++i) {
8886 dif >>= 1;
8887 bit >>= 1;
8888 }
8889
8890 /* Pins are active low so invert the bit value */
8891
8892 *pin = hba->gpio_pin[i];
8893 *pinval = ~dif & bit;
8894
8895 return (1);
8896 } /* emlxs_sli4_fix_gpio */
8897
8898 static uint32_t
8899 emlxs_sli4_fix_gpio_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
8900 {
8901 MAILBOX *mb;
8902 uint8_t pin;
8903 uint8_t pinval;
8904
8905 mb = (MAILBOX *)mbq;
8906
8907 mutex_enter(&hba->gpio_lock);
8908
8909 if (mb->mbxStatus == 0)
8910 hba->gpio_current ^= hba->gpio_bit;
8911
8912 hba->gpio_bit = 0;
8913
8914 if (emlxs_sli4_fix_gpio(hba, &pin, &pinval)) {
8915 emlxs_port_t *port = &PPORT;
8916 MAILBOXQ *mbq;
8917 uint32_t rval;
8918
8919 /*
8920 * We're not using the mb_retry routine here because for some
8921 * reason it doesn't preserve the completion routine. Just let
8922 * this mbox cmd fail to start here and run when the mailbox
8923 * is no longer busy.
8924 */
8925
8926 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX)) == NULL) {
8927 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
8928 "Unable to allocate GPIO mailbox.");
8929
8930 hba->gpio_bit = 0;
8931 goto done;
8932 }
8933
8934 emlxs_mb_gpio_write(hba, mbq, pin, pinval);
8935 mbq->mbox_cmpl = emlxs_sli4_fix_gpio_mbcmpl;
8936
8937 rval = emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
8938
8939 if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) {
8940 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
8941 "Unable to start GPIO mailbox.");
8942
8943 hba->gpio_bit = 0;
8944 emlxs_mem_put(hba, MEM_MBOX, mbq);
8945 goto done;
8946 }
8947 }
8948
8949 done:
8950 mutex_exit(&hba->gpio_lock);
8951
8952 return (0);
8953 }
8954
8955 extern void
8956 emlxs_data_dump(emlxs_port_t *port, char *str, uint32_t *iptr, int cnt, int err)
8957 {
8958 void *msg;
8959
8960 if (!port || !str || !iptr || !cnt) {
8961 return;
8962 }
8963
8964 if (err) {
8965 msg = &emlxs_sli_err_msg;
8966 } else {
8967 msg = &emlxs_sli_detail_msg;
8968 }
8969
8970 if (cnt) {
8971 EMLXS_MSGF(EMLXS_CONTEXT, msg,
8972 "%s00: %08x %08x %08x %08x %08x %08x", str, *iptr,
8973 *(iptr+1), *(iptr+2), *(iptr+3), *(iptr+4), *(iptr+5));
8974 }
8975 if (cnt > 6) {
8976 EMLXS_MSGF(EMLXS_CONTEXT, msg,
8977 "%s06: %08x %08x %08x %08x %08x %08x", str, *(iptr+6),
8978 *(iptr+7), *(iptr+8), *(iptr+9), *(iptr+10), *(iptr+11));
8979 }
8980 if (cnt > 12) {
8981 EMLXS_MSGF(EMLXS_CONTEXT, msg,
8982 "%s12: %08x %08x %08x %08x %08x %08x", str, *(iptr+12),
8983 *(iptr+13), *(iptr+14), *(iptr+15), *(iptr+16), *(iptr+17));
8984 }
8985 if (cnt > 18) {
8986 EMLXS_MSGF(EMLXS_CONTEXT, msg,
8987 "%s18: %08x %08x %08x %08x %08x %08x", str, *(iptr+18),
8988 *(iptr+19), *(iptr+20), *(iptr+21), *(iptr+22), *(iptr+23));
8989 }
8990 if (cnt > 24) {
8991 EMLXS_MSGF(EMLXS_CONTEXT, msg,
8992 "%s24: %08x %08x %08x %08x %08x %08x", str, *(iptr+24),
8993 *(iptr+25), *(iptr+26), *(iptr+27), *(iptr+28), *(iptr+29));
8994 }
8995 if (cnt > 30) {
8996 EMLXS_MSGF(EMLXS_CONTEXT, msg,
8997 "%s30: %08x %08x %08x %08x %08x %08x", str, *(iptr+30),
8998 *(iptr+31), *(iptr+32), *(iptr+33), *(iptr+34), *(iptr+35));
8999 }
9000 if (cnt > 36) {
9001 EMLXS_MSGF(EMLXS_CONTEXT, msg,
9002 "%s36: %08x %08x %08x %08x %08x %08x", str, *(iptr+36),
9003 *(iptr+37), *(iptr+38), *(iptr+39), *(iptr+40), *(iptr+41));
9004 }
9005
9006 } /* emlxs_data_dump() */
9007
9008
9009 extern void
9010 emlxs_ue_dump(emlxs_hba_t *hba, char *str)
9011 {
9012 emlxs_port_t *port = &PPORT;
9013 uint32_t status;
9014 uint32_t ue_h;
9015 uint32_t ue_l;
9016 uint32_t on1;
9017 uint32_t on2;
9018
9019 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
9020 case SLI_INTF_IF_TYPE_0:
9021 ue_l = ddi_get32(hba->pci_acc_handle,
9022 hba->sli.sli4.ERR1_reg_addr);
9023 ue_h = ddi_get32(hba->pci_acc_handle,
9024 hba->sli.sli4.ERR2_reg_addr);
9025
9026 on1 = ddi_get32(hba->pci_acc_handle,
9027 (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_ONLINE1));
9028 on2 = ddi_get32(hba->pci_acc_handle,
9029 (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_ONLINE2));
9030
9031 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
9032 "%s: ueLow:%08x ueHigh:%08x on1:%08x on2:%08x", str,
9033 ue_l, ue_h, on1, on2);
9034 break;
9035
9036 case SLI_INTF_IF_TYPE_2:
9037 status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
9038 hba->sli.sli4.STATUS_reg_addr);
9039
9040 ue_l = ddi_get32(hba->sli.sli4.bar0_acc_handle,
9041 hba->sli.sli4.ERR1_reg_addr);
9042 ue_h = ddi_get32(hba->sli.sli4.bar0_acc_handle,
9043 hba->sli.sli4.ERR2_reg_addr);
9044
9045 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
9046 "%s: status:%08x err1:%08x err2:%08x", str,
9047 status, ue_l, ue_h);
9048
9049 break;
9050 }
9051
9052 #ifdef FMA_SUPPORT
9053 /* Access handle validation */
9054 EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
9055 #endif /* FMA_SUPPORT */
9056
9057 } /* emlxs_ue_dump() */
9058
9059
9060 static void
9061 emlxs_sli4_poll_erratt(emlxs_hba_t *hba)
9062 {
9063 emlxs_port_t *port = &PPORT;
9064 uint32_t status;
9065 uint32_t ue_h;
9066 uint32_t ue_l;
9067 uint32_t error = 0;
9068
9069 if (hba->flag & FC_HARDWARE_ERROR) {
9070 return;
9071 }
9072
9073 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
9074 case SLI_INTF_IF_TYPE_0:
9075 ue_l = ddi_get32(hba->pci_acc_handle,
9076 hba->sli.sli4.ERR1_reg_addr);
9077 ue_h = ddi_get32(hba->pci_acc_handle,
9078 hba->sli.sli4.ERR2_reg_addr);
9079
9080 if ((~hba->sli.sli4.ue_mask_lo & ue_l) ||
9081 (~hba->sli.sli4.ue_mask_hi & ue_h) ||
9082 (hba->sli.sli4.flag & EMLXS_SLI4_HW_ERROR)) {
9083 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
9084 "Host Error: ueLow:%08x ueHigh:%08x maskLow:%08x "
9085 "maskHigh:%08x flag:%08x",
9086 ue_l, ue_h, hba->sli.sli4.ue_mask_lo,
9087 hba->sli.sli4.ue_mask_hi, hba->sli.sli4.flag);
9088
9089 error = 2;
9090 }
9091 break;
9092
9093 case SLI_INTF_IF_TYPE_2:
9094 status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
9095 hba->sli.sli4.STATUS_reg_addr);
9096
9097 if ((status & SLI_STATUS_ERROR) ||
9098 (hba->sli.sli4.flag & EMLXS_SLI4_HW_ERROR)) {
9099 ue_l = ddi_get32(hba->sli.sli4.bar0_acc_handle,
9100 hba->sli.sli4.ERR1_reg_addr);
9101 ue_h = ddi_get32(hba->sli.sli4.bar0_acc_handle,
9102 hba->sli.sli4.ERR2_reg_addr);
9103
9104 error = (status & SLI_STATUS_RESET_NEEDED)? 1:2;
9105
9106 if (error == 1) {
9107 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
9108 "Host Error: status:%08x err1:%08x "
9109 "err2:%08x flag:%08x",
9110 status, ue_l, ue_h, hba->sli.sli4.flag);
9111 } else {
9112 EMLXS_MSGF(EMLXS_CONTEXT,
9113 &emlxs_hardware_error_msg,
9114 "Host Error: status:%08x err1:%08x "
9115 "err2:%08x flag:%08x",
9116 status, ue_l, ue_h, hba->sli.sli4.flag);
9117 }
9118 }
9119 break;
9120 }
9121
9122 if (error == 2) {
9123 EMLXS_STATE_CHANGE(hba, FC_ERROR);
9124
9125 emlxs_sli4_hba_flush_chipq(hba);
9126
9127 emlxs_thread_spawn(hba, emlxs_shutdown_thread, 0, 0);
9128
9129 } else if (error == 1) {
9130 EMLXS_STATE_CHANGE(hba, FC_ERROR);
9131
9132 emlxs_sli4_hba_flush_chipq(hba);
9133
9134 emlxs_thread_spawn(hba, emlxs_restart_thread, 0, 0);
9135 }
9136
9137 #ifdef FMA_SUPPORT
9138 /* Access handle validation */
9139 EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
9140 #endif /* FMA_SUPPORT */
9141
9142 } /* emlxs_sli4_poll_erratt() */
9143
9144
9145 static uint32_t
9146 emlxs_sli4_reg_did(emlxs_port_t *port, uint32_t did, SERV_PARM *param,
9147 emlxs_buf_t *sbp, fc_unsol_buf_t *ubp, IOCBQ *iocbq)
9148 {
9149 emlxs_hba_t *hba = HBA;
9150 NODELIST *node;
9151 RPIobj_t *rpip;
9152 uint32_t rval;
9153
9154 /* Check for invalid node ids to register */
9155 if ((did == 0) && (!(hba->flag & FC_LOOPBACK_MODE))) {
9156 return (1);
9157 }
9158
9159 if (did & 0xff000000) {
9160 return (1);
9161 }
9162
9163 /* We don't register our own did */
9164 if ((did == port->did) && (!(hba->flag & FC_LOOPBACK_MODE))) {
9165 return (1);
9166 }
9167
9168 if (did != FABRIC_DID) {
9169 if ((rval = emlxs_mb_check_sparm(hba, param))) {
9170 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
9171 "Invalid service parameters. did=%06x rval=%d", did,
9172 rval);
9173
9174 return (1);
9175 }
9176 }
9177
9178 /* Check if the node limit has been reached */
9179 if (port->node_count >= hba->max_nodes) {
9180 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
9181 "Limit reached. did=%06x count=%d", did,
9182 port->node_count);
9183
9184 return (1);
9185 }
9186
9187 node = emlxs_node_find_did(port, did, 1);
9188 rpip = EMLXS_NODE_TO_RPI(port, node);
9189
9190 rval = emlxs_rpi_online_notify(port, rpip, did, param, (void *)sbp,
9191 (void *)ubp, (void *)iocbq);
9192
9193 return (rval);
9194
9195 } /* emlxs_sli4_reg_did() */
9196
9197
9198 static uint32_t
9199 emlxs_sli4_unreg_node(emlxs_port_t *port, emlxs_node_t *node,
9200 emlxs_buf_t *sbp, fc_unsol_buf_t *ubp, IOCBQ *iocbq)
9201 {
9202 RPIobj_t *rpip;
9203 uint32_t rval;
9204
9205 if (!node) {
9206 /* Unreg all nodes */
9207 (void) emlxs_sli4_unreg_all_nodes(port);
9208 return (1);
9209 }
9210
9211 /* Check for base node */
9212 if (node == &port->node_base) {
9213 /* Just flush base node */
9214 (void) emlxs_tx_node_flush(port, &port->node_base,
9215 0, 0, 0);
9216
9217 (void) emlxs_chipq_node_flush(port, 0,
9218 &port->node_base, 0);
9219
9220 port->did = 0;
9221
9222 /* Return now */
9223 return (1);
9224 }
9225
9226 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
9227 "unreg_node:%p did=%x rpi=%d",
9228 node, node->nlp_DID, node->nlp_Rpi);
9229
9230 rpip = EMLXS_NODE_TO_RPI(port, node);
9231
9232 if (!rpip) {
9233 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
9234 "unreg_node:%p did=%x rpi=%d. RPI not found.",
9235 node, node->nlp_DID, node->nlp_Rpi);
9236
9237 emlxs_node_rm(port, node);
9238 return (1);
9239 }
9240
9241 rval = emlxs_rpi_offline_notify(port, rpip, (void *)sbp, (void *)ubp,
9242 (void *)iocbq);
9243
9244 return (rval);
9245
9246 } /* emlxs_sli4_unreg_node() */
9247
9248
9249 extern uint32_t
9250 emlxs_sli4_unreg_all_nodes(emlxs_port_t *port)
9251 {
9252 NODELIST *nlp;
9253 int i;
9254 uint32_t found;
9255
9256 /* Set the node tags */
9257 /* We will process all nodes with this tag */
9258 rw_enter(&port->node_rwlock, RW_READER);
9259 found = 0;
9260 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
9261 nlp = port->node_table[i];
9262 while (nlp != NULL) {
9263 found = 1;
9264 nlp->nlp_tag = 1;
9265 nlp = nlp->nlp_list_next;
9266 }
9267 }
9268 rw_exit(&port->node_rwlock);
9269
9270 if (!found) {
9271 return (0);
9272 }
9273
9274 for (;;) {
9275 rw_enter(&port->node_rwlock, RW_READER);
9276 found = 0;
9277 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
9278 nlp = port->node_table[i];
9279 while (nlp != NULL) {
9280 if (!nlp->nlp_tag) {
9281 nlp = nlp->nlp_list_next;
9282 continue;
9283 }
9284 nlp->nlp_tag = 0;
9285 found = 1;
9286 break;
9287 }
9288
9289 if (found) {
9290 break;
9291 }
9292 }
9293 rw_exit(&port->node_rwlock);
9294
9295 if (!found) {
9296 break;
9297 }
9298
9299 (void) emlxs_sli4_unreg_node(port, nlp, 0, 0, 0);
9300 }
9301
9302 return (0);
9303
9304 } /* emlxs_sli4_unreg_all_nodes() */
9305
9306
9307 static void
9308 emlxs_sli4_handle_fcoe_link_event(emlxs_hba_t *hba, CQE_ASYNC_t *cqe)
9309 {
9310 emlxs_port_t *port = &PPORT;
9311
9312 /* Handle link down */
9313 if ((cqe->un.link.link_status == ASYNC_EVENT_LOGICAL_LINK_DOWN) ||
9314 (cqe->un.link.link_status == ASYNC_EVENT_PHYS_LINK_DOWN)) {
9315 (void) emlxs_fcf_linkdown_notify(port);
9316
9317 mutex_enter(&EMLXS_PORT_LOCK);
9318 hba->sli.sli4.flag &= ~EMLXS_SLI4_DOWN_LINK;
9319 mutex_exit(&EMLXS_PORT_LOCK);
9320 return;
9321 }
9322
9323 /* Link is up */
9324
9325 /* Set linkspeed */
9326 switch (cqe->un.link.port_speed) {
9327 case PHY_1GHZ_LINK:
9328 hba->linkspeed = LA_1GHZ_LINK;
9329 break;
9330 case PHY_10GHZ_LINK:
9331 hba->linkspeed = LA_10GHZ_LINK;
9332 break;
9333 default:
9334 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
9335 "sli4_handle_fcoe_link_event: Unknown link speed=%x.",
9336 cqe->un.link.port_speed);
9337 hba->linkspeed = 0;
9338 break;
9339 }
9340
9341 /* Set qos_linkspeed */
9342 hba->qos_linkspeed = cqe->un.link.qos_link_speed;
9343
9344 /* Set topology */
9345 hba->topology = TOPOLOGY_PT_PT;
9346
9347 mutex_enter(&EMLXS_PORT_LOCK);
9348 hba->sli.sli4.flag &= ~EMLXS_SLI4_DOWN_LINK;
9349 mutex_exit(&EMLXS_PORT_LOCK);
9350
9351 (void) emlxs_fcf_linkup_notify(port);
9352
9353 return;
9354
9355 } /* emlxs_sli4_handle_fcoe_link_event() */
9356
9357
9358 static void
9359 emlxs_sli4_handle_fc_link_att(emlxs_hba_t *hba, CQE_ASYNC_t *cqe)
9360 {
9361 emlxs_port_t *port = &PPORT;
9362
9363 /* Handle link down */
9364 if (cqe->un.fc.att_type == ATT_TYPE_LINK_DOWN) {
9365 (void) emlxs_fcf_linkdown_notify(port);
9366
9367 mutex_enter(&EMLXS_PORT_LOCK);
9368 hba->sli.sli4.flag &= ~EMLXS_SLI4_DOWN_LINK;
9369 mutex_exit(&EMLXS_PORT_LOCK);
9370 return;
9371 }
9372
9373 /* Link is up */
9374
9375 /* Set linkspeed */
9376 switch (cqe->un.fc.port_speed) {
9377 case 1:
9378 hba->linkspeed = LA_1GHZ_LINK;
9379 break;
9380 case 2:
9381 hba->linkspeed = LA_2GHZ_LINK;
9382 break;
9383 case 4:
9384 hba->linkspeed = LA_4GHZ_LINK;
9385 break;
9386 case 8:
9387 hba->linkspeed = LA_8GHZ_LINK;
9388 break;
9389 case 10:
9390 hba->linkspeed = LA_10GHZ_LINK;
9391 break;
9392 case 16:
9393 hba->linkspeed = LA_16GHZ_LINK;
9394 break;
9395 case 32:
9396 hba->linkspeed = LA_32GHZ_LINK;
9397 break;
9398 default:
9399 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
9400 "sli4_handle_fc_link_att: Unknown link speed=%x.",
9401 cqe->un.fc.port_speed);
9402 hba->linkspeed = 0;
9403 break;
9404 }
9405
9406 /* Set qos_linkspeed */
9407 hba->qos_linkspeed = cqe->un.fc.link_speed;
9408
9409 /* Set topology */
9410 hba->topology = cqe->un.fc.topology;
9411
9412 mutex_enter(&EMLXS_PORT_LOCK);
9413 hba->sli.sli4.flag &= ~EMLXS_SLI4_DOWN_LINK;
9414 mutex_exit(&EMLXS_PORT_LOCK);
9415
9416 (void) emlxs_fcf_linkup_notify(port);
9417
9418 return;
9419
9420 } /* emlxs_sli4_handle_fc_link_att() */
9421
9422
9423 static int
9424 emlxs_sli4_init_extents(emlxs_hba_t *hba, MAILBOXQ *mbq)
9425 {
9426 emlxs_port_t *port = &PPORT;
9427 MAILBOX4 *mb4;
9428 IOCTL_COMMON_EXTENTS *ep;
9429 uint32_t i;
9430 uint32_t ExtentCnt;
9431
9432 if (!(hba->sli.sli4.param.EXT)) {
9433 return (0);
9434 }
9435
9436 mb4 = (MAILBOX4 *) mbq;
9437
9438 /* Discover XRI Extents */
9439 bzero(mbq, sizeof (MAILBOXQ));
9440 emlxs_mb_get_extents_info(hba, mbq, RSC_TYPE_FCOE_XRI);
9441
9442 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9443 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9444 "Unable to discover XRI extents. Mailbox cmd=%x status=%x",
9445 mb4->mbxCommand, mb4->mbxStatus);
9446
9447 return (EIO);
9448 }
9449
9450 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9451 hba->sli.sli4.XRIExtSize = ep->params.response.ExtentSize;
9452 ExtentCnt = ep->params.response.ExtentCnt;
9453
9454 /* Allocate XRI Extents */
9455 bzero(mbq, sizeof (MAILBOXQ));
9456 emlxs_mb_alloc_extents(hba, mbq, RSC_TYPE_FCOE_XRI, ExtentCnt);
9457
9458 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9459 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9460 "Unable to allocate XRI extents. Mailbox cmd=%x status=%x",
9461 mb4->mbxCommand, mb4->mbxStatus);
9462
9463 return (EIO);
9464 }
9465 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9466
9467 bcopy((uint8_t *)ep->params.response.RscId,
9468 (uint8_t *)hba->sli.sli4.XRIBase,
9469 (ep->params.response.ExtentCnt * sizeof (uint16_t)));
9470
9471 hba->sli.sli4.XRIExtCount = ep->params.response.ExtentCnt;
9472 hba->sli.sli4.XRICount = hba->sli.sli4.XRIExtCount *
9473 hba->sli.sli4.XRIExtSize;
9474
9475 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9476 "XRI Ext: size=%d cnt=%d/%d",
9477 hba->sli.sli4.XRIExtSize,
9478 hba->sli.sli4.XRIExtCount, ExtentCnt);
9479
9480 for (i = 0; i < ep->params.response.ExtentCnt; i += 4) {
9481 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9482 "XRI Ext%d: %d, %d, %d, %d", i,
9483 hba->sli.sli4.XRIBase[i],
9484 hba->sli.sli4.XRIBase[i+1],
9485 hba->sli.sli4.XRIBase[i+2],
9486 hba->sli.sli4.XRIBase[i+3]);
9487 }
9488
9489
9490 /* Discover RPI Extents */
9491 bzero(mbq, sizeof (MAILBOXQ));
9492 emlxs_mb_get_extents_info(hba, mbq, RSC_TYPE_FCOE_RPI);
9493
9494 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9495 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9496 "Unable to discover RPI extents. Mailbox cmd=%x status=%x",
9497 mb4->mbxCommand, mb4->mbxStatus);
9498
9499 return (EIO);
9500 }
9501
9502 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9503 hba->sli.sli4.RPIExtSize = ep->params.response.ExtentSize;
9504 ExtentCnt = ep->params.response.ExtentCnt;
9505
9506 /* Allocate RPI Extents */
9507 bzero(mbq, sizeof (MAILBOXQ));
9508 emlxs_mb_alloc_extents(hba, mbq, RSC_TYPE_FCOE_RPI, ExtentCnt);
9509
9510 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9511 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9512 "Unable to allocate RPI extents. Mailbox cmd=%x status=%x",
9513 mb4->mbxCommand, mb4->mbxStatus);
9514
9515 return (EIO);
9516 }
9517 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9518
9519 bcopy((uint8_t *)ep->params.response.RscId,
9520 (uint8_t *)hba->sli.sli4.RPIBase,
9521 (ep->params.response.ExtentCnt * sizeof (uint16_t)));
9522
9523 hba->sli.sli4.RPIExtCount = ep->params.response.ExtentCnt;
9524 hba->sli.sli4.RPICount = hba->sli.sli4.RPIExtCount *
9525 hba->sli.sli4.RPIExtSize;
9526
9527 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9528 "RPI Ext: size=%d cnt=%d/%d",
9529 hba->sli.sli4.RPIExtSize,
9530 hba->sli.sli4.RPIExtCount, ExtentCnt);
9531
9532 for (i = 0; i < ep->params.response.ExtentCnt; i += 4) {
9533 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9534 "RPI Ext%d: %d, %d, %d, %d", i,
9535 hba->sli.sli4.RPIBase[i],
9536 hba->sli.sli4.RPIBase[i+1],
9537 hba->sli.sli4.RPIBase[i+2],
9538 hba->sli.sli4.RPIBase[i+3]);
9539 }
9540
9541
9542 /* Discover VPI Extents */
9543 bzero(mbq, sizeof (MAILBOXQ));
9544 emlxs_mb_get_extents_info(hba, mbq, RSC_TYPE_FCOE_VPI);
9545
9546 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9547 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9548 "Unable to discover VPI extents. Mailbox cmd=%x status=%x",
9549 mb4->mbxCommand, mb4->mbxStatus);
9550
9551 return (EIO);
9552 }
9553
9554 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9555 hba->sli.sli4.VPIExtSize = ep->params.response.ExtentSize;
9556 ExtentCnt = ep->params.response.ExtentCnt;
9557
9558 /* Allocate VPI Extents */
9559 bzero(mbq, sizeof (MAILBOXQ));
9560 emlxs_mb_alloc_extents(hba, mbq, RSC_TYPE_FCOE_VPI, ExtentCnt);
9561
9562 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9563 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9564 "Unable to allocate VPI extents. Mailbox cmd=%x status=%x",
9565 mb4->mbxCommand, mb4->mbxStatus);
9566
9567 return (EIO);
9568 }
9569 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9570
9571 bcopy((uint8_t *)ep->params.response.RscId,
9572 (uint8_t *)hba->sli.sli4.VPIBase,
9573 (ep->params.response.ExtentCnt * sizeof (uint16_t)));
9574
9575 hba->sli.sli4.VPIExtCount = ep->params.response.ExtentCnt;
9576 hba->sli.sli4.VPICount = hba->sli.sli4.VPIExtCount *
9577 hba->sli.sli4.VPIExtSize;
9578
9579 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9580 "VPI Ext: size=%d cnt=%d/%d",
9581 hba->sli.sli4.VPIExtSize,
9582 hba->sli.sli4.VPIExtCount, ExtentCnt);
9583
9584 for (i = 0; i < ep->params.response.ExtentCnt; i += 4) {
9585 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9586 "VPI Ext%d: %d, %d, %d, %d", i,
9587 hba->sli.sli4.VPIBase[i],
9588 hba->sli.sli4.VPIBase[i+1],
9589 hba->sli.sli4.VPIBase[i+2],
9590 hba->sli.sli4.VPIBase[i+3]);
9591 }
9592
9593 /* Discover VFI Extents */
9594 bzero(mbq, sizeof (MAILBOXQ));
9595 emlxs_mb_get_extents_info(hba, mbq, RSC_TYPE_FCOE_VFI);
9596
9597 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9598 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9599 "Unable to discover VFI extents. Mailbox cmd=%x status=%x",
9600 mb4->mbxCommand, mb4->mbxStatus);
9601
9602 return (EIO);
9603 }
9604
9605 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9606 hba->sli.sli4.VFIExtSize = ep->params.response.ExtentSize;
9607 ExtentCnt = ep->params.response.ExtentCnt;
9608
9609 /* Allocate VFI Extents */
9610 bzero(mbq, sizeof (MAILBOXQ));
9611 emlxs_mb_alloc_extents(hba, mbq, RSC_TYPE_FCOE_VFI, ExtentCnt);
9612
9613 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9614 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9615 "Unable to allocate VFI extents. Mailbox cmd=%x status=%x",
9616 mb4->mbxCommand, mb4->mbxStatus);
9617
9618 return (EIO);
9619 }
9620 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9621
9622 bcopy((uint8_t *)ep->params.response.RscId,
9623 (uint8_t *)hba->sli.sli4.VFIBase,
9624 (ep->params.response.ExtentCnt * sizeof (uint16_t)));
9625
9626 hba->sli.sli4.VFIExtCount = ep->params.response.ExtentCnt;
9627 hba->sli.sli4.VFICount = hba->sli.sli4.VFIExtCount *
9628 hba->sli.sli4.VFIExtSize;
9629
9630 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9631 "VFI Ext: size=%d cnt=%d/%d",
9632 hba->sli.sli4.VFIExtSize,
9633 hba->sli.sli4.VFIExtCount, ExtentCnt);
9634
9635 for (i = 0; i < ep->params.response.ExtentCnt; i += 4) {
9636 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9637 "VFI Ext%d: %d, %d, %d, %d", i,
9638 hba->sli.sli4.VFIBase[i],
9639 hba->sli.sli4.VFIBase[i+1],
9640 hba->sli.sli4.VFIBase[i+2],
9641 hba->sli.sli4.VFIBase[i+3]);
9642 }
9643
9644 return (0);
9645
9646 } /* emlxs_sli4_init_extents() */
9647
9648
9649 extern uint32_t
9650 emlxs_sli4_index_to_rpi(emlxs_hba_t *hba, uint32_t index)
9651 {
9652 uint32_t i;
9653 uint32_t j;
9654 uint32_t rpi;
9655
9656 i = index / hba->sli.sli4.RPIExtSize;
9657 j = index % hba->sli.sli4.RPIExtSize;
9658 rpi = hba->sli.sli4.RPIBase[i] + j;
9659
9660 return (rpi);
9661
9662 } /* emlxs_sli4_index_to_rpi */
9663
9664
9665 extern uint32_t
9666 emlxs_sli4_rpi_to_index(emlxs_hba_t *hba, uint32_t rpi)
9667 {
9668 uint32_t i;
9669 uint32_t lo;
9670 uint32_t hi;
9671 uint32_t index = hba->sli.sli4.RPICount;
9672
9673 for (i = 0; i < hba->sli.sli4.RPIExtCount; i++) {
9674 lo = hba->sli.sli4.RPIBase[i];
9675 hi = lo + hba->sli.sli4.RPIExtSize;
9676
9677 if ((rpi < hi) && (rpi >= lo)) {
9678 index = (i * hba->sli.sli4.RPIExtSize) + (rpi - lo);
9679 break;
9680 }
9681 }
9682
9683 return (index);
9684
9685 } /* emlxs_sli4_rpi_to_index */
9686
9687
9688 extern uint32_t
9689 emlxs_sli4_index_to_xri(emlxs_hba_t *hba, uint32_t index)
9690 {
9691 uint32_t i;
9692 uint32_t j;
9693 uint32_t xri;
9694
9695 i = index / hba->sli.sli4.XRIExtSize;
9696 j = index % hba->sli.sli4.XRIExtSize;
9697 xri = hba->sli.sli4.XRIBase[i] + j;
9698
9699 return (xri);
9700
9701 } /* emlxs_sli4_index_to_xri */
9702
9703
9704
9705
9706 extern uint32_t
9707 emlxs_sli4_index_to_vpi(emlxs_hba_t *hba, uint32_t index)
9708 {
9709 uint32_t i;
9710 uint32_t j;
9711 uint32_t vpi;
9712
9713 i = index / hba->sli.sli4.VPIExtSize;
9714 j = index % hba->sli.sli4.VPIExtSize;
9715 vpi = hba->sli.sli4.VPIBase[i] + j;
9716
9717 return (vpi);
9718
9719 } /* emlxs_sli4_index_to_vpi */
9720
9721
9722 extern uint32_t
9723 emlxs_sli4_vpi_to_index(emlxs_hba_t *hba, uint32_t vpi)
9724 {
9725 uint32_t i;
9726 uint32_t lo;
9727 uint32_t hi;
9728 uint32_t index = hba->sli.sli4.VPICount;
9729
9730 for (i = 0; i < hba->sli.sli4.VPIExtCount; i++) {
9731 lo = hba->sli.sli4.VPIBase[i];
9732 hi = lo + hba->sli.sli4.VPIExtSize;
9733
9734 if ((vpi < hi) && (vpi >= lo)) {
9735 index = (i * hba->sli.sli4.VPIExtSize) + (vpi - lo);
9736 break;
9737 }
9738 }
9739
9740 return (index);
9741
9742 } /* emlxs_sli4_vpi_to_index */
9743
9744
9745
9746
9747 extern uint32_t
9748 emlxs_sli4_index_to_vfi(emlxs_hba_t *hba, uint32_t index)
9749 {
9750 uint32_t i;
9751 uint32_t j;
9752 uint32_t vfi;
9753
9754 i = index / hba->sli.sli4.VFIExtSize;
9755 j = index % hba->sli.sli4.VFIExtSize;
9756 vfi = hba->sli.sli4.VFIBase[i] + j;
9757
9758 return (vfi);
9759
9760 } /* emlxs_sli4_index_to_vfi */
9761
9762
9763 static uint16_t
9764 emlxs_sli4_rqid_to_index(emlxs_hba_t *hba, uint16_t rqid)
9765 {
9766 uint16_t i;
9767
9768 if (rqid < 0xffff) {
9769 for (i = 0; i < EMLXS_MAX_RQS; i++) {
9770 if (hba->sli.sli4.rq[i].qid == rqid) {
9771 return (i);
9772 }
9773 }
9774 }
9775
9776 return (0xffff);
9777
9778 } /* emlxs_sli4_rqid_to_index */
9779
9780
9781 static uint16_t
9782 emlxs_sli4_wqid_to_index(emlxs_hba_t *hba, uint16_t wqid)
9783 {
9784 uint16_t i;
9785
9786 if (wqid < 0xffff) {
9787 for (i = 0; i < EMLXS_MAX_WQS; i++) {
9788 if (hba->sli.sli4.wq[i].qid == wqid) {
9789 return (i);
9790 }
9791 }
9792 }
9793
9794 return (0xffff);
9795
9796 } /* emlxs_sli4_wqid_to_index */
9797
9798
9799 static uint16_t
9800 emlxs_sli4_cqid_to_index(emlxs_hba_t *hba, uint16_t cqid)
9801 {
9802 uint16_t i;
9803
9804 if (cqid < 0xffff) {
9805 for (i = 0; i < EMLXS_MAX_CQS; i++) {
9806 if (hba->sli.sli4.cq[i].qid == cqid) {
9807 return (i);
9808 }
9809 }
9810 }
9811
9812 return (0xffff);
9813
9814 } /* emlxs_sli4_cqid_to_index */