5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at
9 * http://www.opensource.org/licenses/cddl1.txt.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2004-2012 Emulex. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #include <emlxs.h>
28
29
30 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
31 EMLXS_MSG_DEF(EMLXS_SLI4_C);
32
33 static int emlxs_sli4_init_extents(emlxs_hba_t *hba,
34 MAILBOXQ *mbq);
35 static uint32_t emlxs_sli4_read_status(emlxs_hba_t *hba);
36
37 static int emlxs_init_bootstrap_mb(emlxs_hba_t *hba);
38
39 static uint32_t emlxs_sli4_read_sema(emlxs_hba_t *hba);
40
41 static uint32_t emlxs_sli4_read_mbdb(emlxs_hba_t *hba);
42
43 static void emlxs_sli4_write_mbdb(emlxs_hba_t *hba, uint32_t value);
44
104 #ifdef MSI_SUPPORT
105 static uint32_t emlxs_sli4_msi_intr(char *arg1, char *arg2);
106 #endif /* MSI_SUPPORT */
107
108 static void emlxs_sli4_resource_free(emlxs_hba_t *hba);
109
110 static int emlxs_sli4_resource_alloc(emlxs_hba_t *hba);
111 extern void emlxs_sli4_zero_queue_stat(emlxs_hba_t *hba);
112
113 static XRIobj_t *emlxs_sli4_alloc_xri(emlxs_port_t *port,
114 emlxs_buf_t *sbp, RPIobj_t *rpip,
115 uint32_t type);
116 static void emlxs_sli4_enable_intr(emlxs_hba_t *hba);
117
118 static void emlxs_sli4_disable_intr(emlxs_hba_t *hba, uint32_t att);
119
120 static void emlxs_sli4_timer(emlxs_hba_t *hba);
121
122 static void emlxs_sli4_timer_check_mbox(emlxs_hba_t *hba);
123
124 static void emlxs_sli4_poll_erratt(emlxs_hba_t *hba);
125
126 extern XRIobj_t *emlxs_sli4_reserve_xri(emlxs_port_t *port,
127 RPIobj_t *rpip, uint32_t type, uint16_t rx_id);
128 static int emlxs_check_hdw_ready(emlxs_hba_t *);
129
130 static uint32_t emlxs_sli4_reg_did(emlxs_port_t *port,
131 uint32_t did, SERV_PARM *param,
132 emlxs_buf_t *sbp, fc_unsol_buf_t *ubp,
133 IOCBQ *iocbq);
134
135 static uint32_t emlxs_sli4_unreg_node(emlxs_port_t *port,
136 emlxs_node_t *node, emlxs_buf_t *sbp,
137 fc_unsol_buf_t *ubp, IOCBQ *iocbq);
138
139 static void emlxs_sli4_handle_fc_link_att(emlxs_hba_t *hba,
140 CQE_ASYNC_t *cqe);
141 static void emlxs_sli4_handle_fcoe_link_event(emlxs_hba_t *hba,
142 CQE_ASYNC_t *cqe);
143
315 hba->fc_altov = FF_DEF_ALTOV;
316 hba->fc_arbtov = FF_DEF_ARBTOV;
317
318 /* Networking not supported */
319 if (cfg[CFG_NETWORK_ON].current) {
320 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
321 "Networking is not supported in SLI4, turning it off");
322 cfg[CFG_NETWORK_ON].current = 0;
323 }
324
325 hba->chan_count = hba->intr_count * cfg[CFG_NUM_WQ].current;
326 if (hba->chan_count > MAX_CHANNEL) {
327 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
328 "Max channels exceeded, dropping num-wq from %d to 1",
329 cfg[CFG_NUM_WQ].current);
330 cfg[CFG_NUM_WQ].current = 1;
331 hba->chan_count = hba->intr_count * cfg[CFG_NUM_WQ].current;
332 }
333 hba->channel_fcp = 0; /* First channel */
334
335 /* Default channel for everything else is the last channel */
336 hba->channel_ip = hba->chan_count - 1;
337 hba->channel_els = hba->chan_count - 1;
338 hba->channel_ct = hba->chan_count - 1;
339
340 hba->fc_iotag = 1;
341 hba->io_count = 0;
342 hba->channel_tx_count = 0;
343
344 /* Initialize the local dump region buffer */
345 bzero(&hba->sli.sli4.dump_region, sizeof (MBUF_INFO));
346 hba->sli.sli4.dump_region.size = EMLXS_DUMP_REGION_SIZE;
347 hba->sli.sli4.dump_region.flags = FC_MBUF_DMA | FC_MBUF_SNGLSG
348 | FC_MBUF_DMA32;
349 hba->sli.sli4.dump_region.align = ddi_ptob(hba->dip, 1L);
350
351 (void) emlxs_mem_alloc(hba, &hba->sli.sli4.dump_region);
352
353 if (hba->sli.sli4.dump_region.virt == NULL) {
354 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
355 "Unable to allocate dump region buffer.");
356
357 return (ENOMEM);
358 }
359
360 /*
361 * Get a buffer which will be used repeatedly for mailbox commands
362 */
363 mbq = (MAILBOXQ *) kmem_zalloc((sizeof (MAILBOXQ)), KM_SLEEP);
1312 (sizeof (vpd->part_num)-1));
1313 }
1314
1315 if (vpd->model_desc[0] == 0) {
1316 (void) snprintf(vpd->model_desc, (sizeof (vpd->model_desc)-1),
1317 "%s %d",
1318 hba->model_info.model_desc, vpd->port_index);
1319 }
1320
1321 if (vpd->model[0] == 0) {
1322 (void) strncpy(vpd->model, hba->model_info.model,
1323 (sizeof (vpd->model)-1));
1324 }
1325
1326 if (vpd->prog_types[0] == 0) {
1327 emlxs_build_prog_types(hba, vpd);
1328 }
1329
1330 /* Create the symbolic names */
1331 (void) snprintf(hba->snn, (sizeof (hba->snn)-1),
1332 "Emulex %s FV%s DV%s %s",
1333 hba->model_info.model, hba->vpd.fw_version, emlxs_version,
1334 (char *)utsname.nodename);
1335
1336 (void) snprintf(hba->spn, (sizeof (hba->spn)-1),
1337 "Emulex PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1338 hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb,
1339 hba->wwpn.IEEE[0], hba->wwpn.IEEE[1], hba->wwpn.IEEE[2],
1340 hba->wwpn.IEEE[3], hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1341
1342
1343 EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN);
1344 emlxs_sli4_enable_intr(hba);
1345
1346 /* Check persist-linkdown */
1347 if (cfg[CFG_PERSIST_LINKDOWN].current) {
1348 EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN_PERSIST);
1349 goto done;
1350 }
1351
1352 #ifdef SFCT_SUPPORT
1353 if ((port->mode == MODE_TARGET) &&
1354 !(port->fct_flags & FCT_STATE_PORT_ONLINE)) {
1355 goto done;
1356 }
1357 #endif /* SFCT_SUPPORT */
1388 mb->mbxStatus);
1389
1390 rval = EIO;
1391 goto failed3;
1392 }
1393
1394 BUSYWAIT_MS(1000);
1395 i--;
1396 }
1397
1398 done:
1399 /*
1400 * The leadville driver will now handle the FLOGI at the driver level
1401 */
1402
1403 if (mbq) {
1404 (void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1405 mbq = NULL;
1406 mb = NULL;
1407 }
1408 return (0);
1409
1410 failed3:
1411 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1412
1413 if (mp) {
1414 emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1415 mp = NULL;
1416 }
1417
1418
1419 if (hba->intr_flags & EMLXS_MSI_ADDED) {
1420 (void) EMLXS_INTR_REMOVE(hba);
1421 }
1422
1423 emlxs_sli4_resource_free(hba);
1424
1425 failed2:
1426 (void) emlxs_mem_free_buffer(hba);
1427
1433 }
1434
1435 if (hba->sli.sli4.dump_region.virt) {
1436 (void) emlxs_mem_free(hba, &hba->sli.sli4.dump_region);
1437 }
1438
1439 if (rval == 0) {
1440 rval = EIO;
1441 }
1442
1443 return (rval);
1444
1445 } /* emlxs_sli4_online() */
1446
1447
1448 static void
1449 emlxs_sli4_offline(emlxs_hba_t *hba, uint32_t reset_requested)
1450 {
1451 /* Reverse emlxs_sli4_online */
1452
1453 mutex_enter(&EMLXS_PORT_LOCK);
1454 if (hba->flag & FC_INTERLOCKED) {
1455 mutex_exit(&EMLXS_PORT_LOCK);
1456 goto killed;
1457 }
1458 mutex_exit(&EMLXS_PORT_LOCK);
1459
1460 if (reset_requested) {
1461 (void) emlxs_sli4_hba_reset(hba, 0, 0, 0);
1462 }
1463
1464 /* Shutdown the adapter interface */
1465 emlxs_sli4_hba_kill(hba);
1466
1467 killed:
1468
1469 /* Free SLI shared memory */
1470 emlxs_sli4_resource_free(hba);
1471
1472 /* Free driver shared memory */
2346
2347 /* Reset the hba structure */
2348 hba->flag &= FC_RESET_MASK;
2349
2350 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2351 cp = &hba->chan[channelno];
2352 cp->hba = hba;
2353 cp->channelno = channelno;
2354 }
2355
2356 hba->channel_tx_count = 0;
2357 hba->io_count = 0;
2358 hba->iodone_count = 0;
2359 hba->topology = 0;
2360 hba->linkspeed = 0;
2361 hba->heartbeat_active = 0;
2362 hba->discovery_timer = 0;
2363 hba->linkup_timer = 0;
2364 hba->loopback_tics = 0;
2365
2366 /* Reset the port objects */
2367 for (i = 0; i < MAX_VPORTS; i++) {
2368 vport = &VPORT(i);
2369
2370 vport->flag &= EMLXS_PORT_RESET_MASK;
2371 vport->did = 0;
2372 vport->prev_did = 0;
2373 vport->lip_type = 0;
2374 bzero(&vport->fabric_sparam, sizeof (SERV_PARM));
2375 bzero(&vport->prev_fabric_sparam, sizeof (SERV_PARM));
2376
2377 bzero((caddr_t)&vport->node_base, sizeof (NODELIST));
2378 vport->node_base.nlp_Rpi = 0;
2379 vport->node_base.nlp_DID = 0xffffff;
2380 vport->node_base.nlp_list_next = NULL;
2381 vport->node_base.nlp_list_prev = NULL;
2382 vport->node_base.nlp_active = 1;
2383 vport->node_count = 0;
2384
2385 if (vport->ub_count < EMLXS_UB_TOKEN_OFFSET) {
2538 /*ARGSUSED*/
2539 uint32_t
2540 emlxs_sli4_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2541 {
2542 emlxs_hba_t *hba = HBA;
2543 fc_packet_t *pkt;
2544 XRIobj_t *xrip;
2545 ULP_SGE64 *sge;
2546 emlxs_wqe_t *wqe;
2547 IOCBQ *iocbq;
2548 ddi_dma_cookie_t *cp_cmd;
2549 ddi_dma_cookie_t *cp_data;
2550 uint64_t sge_addr;
2551 uint32_t cmd_cnt;
2552 uint32_t resp_cnt;
2553
2554 iocbq = (IOCBQ *) &sbp->iocbq;
2555 wqe = &iocbq->wqe;
2556 pkt = PRIV2PKT(sbp);
2557 xrip = sbp->xrip;
2558 sge = xrip->SGList.virt;
2559
2560 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2561 cp_cmd = pkt->pkt_cmd_cookie;
2562 cp_data = pkt->pkt_data_cookie;
2563 #else
2564 cp_cmd = &pkt->pkt_cmd_cookie;
2565 cp_data = &pkt->pkt_data_cookie;
2566 #endif /* >= EMLXS_MODREV3 */
2567
2568 iocbq = &sbp->iocbq;
2569 if (iocbq->flag & IOCB_FCP_CMD) {
2570
2571 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2572 return (1);
2573 }
2574
2575 /* CMD payload */
2576 sge = emlxs_pkt_to_sgl(port, pkt, sge, SGL_CMD, &cmd_cnt);
2577 if (! sge) {
2578 return (1);
2688
2689 size = sbp->fct_buf->db_data_size;
2690
2691 /*
2692 * The hardware will automaticlly round up
2693 * to multiple of 4.
2694 *
2695 * if (size & 3) {
2696 * size = (size + 3) & 0xfffffffc;
2697 * }
2698 */
2699 fct_mp = (MATCHMAP *)sbp->fct_buf->db_port_private;
2700
2701 if (sbp->fct_buf->db_sglist_length != 1) {
2702 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
2703 "fct_bde_setup: Only 1 sglist entry supported: %d",
2704 sbp->fct_buf->db_sglist_length);
2705 return (1);
2706 }
2707
2708 sge = xrip->SGList.virt;
2709
2710 if (iocb->ULPCOMMAND == CMD_FCP_TRECEIVE64_CX) {
2711
2712 mp = emlxs_mem_buf_alloc(hba, EMLXS_XFER_RDY_SIZE);
2713 if (!mp || !mp->virt || !mp->phys) {
2714 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
2715 "fct_bde_setup: Cannot allocate XRDY memory");
2716 return (1);
2717 }
2718 /* Save the MATCHMAP info to free this memory later */
2719 iocbq->bp = mp;
2720
2721 /* Point to XRDY payload */
2722 xrdy_vaddr = (uint32_t *)(mp->virt);
2723
2724 /* Fill in burstsize in payload */
2725 *xrdy_vaddr++ = 0;
2726 *xrdy_vaddr++ = LE_SWAP32(size);
2727 *xrdy_vaddr = 0;
2728
3971 emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
3972 IOERR_NO_XRI, 0);
3973 return (0xff);
3974 }
3975
3976 cmd_sbp->iotag = xrip->iotag;
3977 cmd_sbp->channel = cp;
3978
3979 #if (EMLXS_MODREV >= EMLXS_MODREV3)
3980 cp_cmd = pkt->pkt_cmd_cookie;
3981 #else
3982 cp_cmd = &pkt->pkt_cmd_cookie;
3983 #endif /* >= EMLXS_MODREV3 */
3984
3985 sge_size = pkt->pkt_cmdlen;
3986 /* Make size a multiple of 4 */
3987 if (sge_size & 3) {
3988 sge_size = (sge_size + 3) & 0xfffffffc;
3989 }
3990 sge_addr = cp_cmd->dmac_laddress;
3991 sge = xrip->SGList.virt;
3992
3993 stage_sge.addrHigh = PADDR_HI(sge_addr);
3994 stage_sge.addrLow = PADDR_LO(sge_addr);
3995 stage_sge.length = sge_size;
3996 stage_sge.offset = 0;
3997 stage_sge.type = 0;
3998 stage_sge.last = 1;
3999
4000 /* Copy staged SGE into SGL */
4001 BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
4002 (uint8_t *)sge, sizeof (ULP_SGE64));
4003
4004 /* Words 0-3 */
4005 wqe->un.FcpCmd.Payload.addrHigh = stage_sge.addrHigh;
4006 wqe->un.FcpCmd.Payload.addrLow = stage_sge.addrLow;
4007 wqe->un.FcpCmd.Payload.tus.f.bdeSize = sge_size;
4008 wqe->un.FcpCmd.PayloadLength = sge_size;
4009
4010 /* Word 6 */
4011 wqe->ContextTag = ndlp->nlp_Rpi;
4163
4164 } /* emlxs_sli4_prep_fct_iocb() */
4165 #endif /* SFCT_SUPPORT */
4166
4167
4168 /*ARGSUSED*/
4169 extern uint32_t
4170 emlxs_sli4_prep_fcp_iocb(emlxs_port_t *port, emlxs_buf_t *sbp, int channel)
4171 {
4172 emlxs_hba_t *hba = HBA;
4173 fc_packet_t *pkt;
4174 CHANNEL *cp;
4175 RPIobj_t *rpip;
4176 XRIobj_t *xrip;
4177 emlxs_wqe_t *wqe;
4178 IOCBQ *iocbq;
4179 IOCB *iocb;
4180 NODELIST *node;
4181 uint16_t iotag;
4182 uint32_t did;
4183 off_t offset;
4184
4185 pkt = PRIV2PKT(sbp);
4186 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
4187 cp = &hba->chan[channel];
4188
4189 iocbq = &sbp->iocbq;
4190 iocbq->channel = (void *) cp;
4191 iocbq->port = (void *) port;
4192
4193 wqe = &iocbq->wqe;
4194 iocb = &iocbq->iocb;
4195 bzero((void *)wqe, sizeof (emlxs_wqe_t));
4196 bzero((void *)iocb, sizeof (IOCB));
4197
4198 /* Find target node object */
4199 node = (NODELIST *)iocbq->node;
4200 rpip = EMLXS_NODE_TO_RPI(port, node);
4201
4202 if (!rpip) {
4203 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4225 #ifdef DEBUG_FASTPATH
4226 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4227 "FCP: Prep xri=%d iotag=%d oxid=%x rpi=%d",
4228 xrip->XRI, xrip->iotag, xrip->rx_id, rpip->RPI);
4229 #endif /* DEBUG_FASTPATH */
4230
4231 /* Indicate this is a FCP cmd */
4232 iocbq->flag |= IOCB_FCP_CMD;
4233
4234 if (emlxs_sli4_bde_setup(port, sbp)) {
4235 emlxs_sli4_free_xri(port, sbp, xrip, 1);
4236 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4237 "Adapter Busy. Unable to setup SGE. did=0x%x", did);
4238
4239 return (FC_TRAN_BUSY);
4240 }
4241
4242 /* DEBUG */
4243 #ifdef DEBUG_FCP
4244 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4245 "FCP: SGLaddr virt %p phys %p size %d", xrip->SGList.virt,
4246 xrip->SGList.phys, pkt->pkt_datalen);
4247 emlxs_data_dump(port, "FCP: SGL", (uint32_t *)xrip->SGList.virt, 20, 0);
4248 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4249 "FCP: CMD virt %p len %d:%d:%d",
4250 pkt->pkt_cmd, pkt->pkt_cmdlen, pkt->pkt_rsplen, pkt->pkt_datalen);
4251 emlxs_data_dump(port, "FCP: CMD", (uint32_t *)pkt->pkt_cmd, 10, 0);
4252 #endif /* DEBUG_FCP */
4253
4254 offset = (off_t)((uint64_t)((unsigned long)
4255 xrip->SGList.virt) -
4256 (uint64_t)((unsigned long)
4257 hba->sli.sli4.slim2.virt));
4258
4259 EMLXS_MPDATA_SYNC(xrip->SGList.dma_handle, offset,
4260 xrip->SGList.size, DDI_DMA_SYNC_FORDEV);
4261
4262 /* if device is FCP-2 device, set the following bit */
4263 /* that says to run the FC-TAPE protocol. */
4264 if (node->nlp_fcp_info & NLP_FCP_2_DEVICE) {
4265 wqe->ERP = 1;
4266 }
4267
4268 if (pkt->pkt_datalen == 0) {
4269 iocb->ULPCOMMAND = CMD_FCP_ICMND64_CR;
4270 wqe->Command = CMD_FCP_ICMND64_CR;
4271 wqe->CmdType = WQE_TYPE_FCP_DATA_IN;
4272 } else if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
4273 iocb->ULPCOMMAND = CMD_FCP_IREAD64_CR;
4274 wqe->Command = CMD_FCP_IREAD64_CR;
4275 wqe->CmdType = WQE_TYPE_FCP_DATA_IN;
4276 wqe->PU = PARM_XFER_CHECK;
4277 } else {
4278 iocb->ULPCOMMAND = CMD_FCP_IWRITE64_CR;
4279 wqe->Command = CMD_FCP_IWRITE64_CR;
4280 wqe->CmdType = WQE_TYPE_FCP_DATA_OUT;
4281 }
4325 static uint32_t
4326 emlxs_sli4_prep_els_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
4327 {
4328 emlxs_hba_t *hba = HBA;
4329 fc_packet_t *pkt;
4330 IOCBQ *iocbq;
4331 IOCB *iocb;
4332 emlxs_wqe_t *wqe;
4333 FCFIobj_t *fcfp;
4334 RPIobj_t *reserved_rpip = NULL;
4335 RPIobj_t *rpip = NULL;
4336 XRIobj_t *xrip;
4337 CHANNEL *cp;
4338 uint32_t did;
4339 uint32_t cmd;
4340 ULP_SGE64 stage_sge;
4341 ULP_SGE64 *sge;
4342 ddi_dma_cookie_t *cp_cmd;
4343 ddi_dma_cookie_t *cp_resp;
4344 emlxs_node_t *node;
4345 off_t offset;
4346
4347 pkt = PRIV2PKT(sbp);
4348 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
4349
4350 iocbq = &sbp->iocbq;
4351 wqe = &iocbq->wqe;
4352 iocb = &iocbq->iocb;
4353 bzero((void *)wqe, sizeof (emlxs_wqe_t));
4354 bzero((void *)iocb, sizeof (IOCB));
4355 cp = &hba->chan[hba->channel_els];
4356
4357 /* Initalize iocbq */
4358 iocbq->port = (void *) port;
4359 iocbq->channel = (void *) cp;
4360
4361 sbp->channel = cp;
4362 sbp->bmp = NULL;
4363
4364 #if (EMLXS_MODREV >= EMLXS_MODREV3)
4365 cp_cmd = pkt->pkt_cmd_cookie;
4419
4420 iocb->ULPCOMMAND = CMD_XMIT_ELS_RSP64_CX;
4421 wqe->Command = CMD_XMIT_ELS_RSP64_CX;
4422 wqe->CmdType = WQE_TYPE_GEN;
4423 if (!(hba->sli.sli4.param.PHWQ)) {
4424 wqe->DBDE = 1; /* Data type for BDE 0 */
4425 }
4426
4427 wqe->un.ElsRsp.Payload.addrHigh = sge->addrHigh;
4428 wqe->un.ElsRsp.Payload.addrLow = sge->addrLow;
4429 wqe->un.ElsRsp.Payload.tus.f.bdeSize = pkt->pkt_cmdlen;
4430 wqe->un.ElsCmd.PayloadLength = pkt->pkt_cmdlen;
4431
4432 wqe->un.ElsRsp.RemoteId = did;
4433 wqe->PU = 0x3;
4434 wqe->OXId = xrip->rx_id;
4435
4436 sge->last = 1;
4437 /* Now sge is fully staged */
4438
4439 sge = xrip->SGList.virt;
4440 BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
4441 sizeof (ULP_SGE64));
4442
4443 if (rpip->RPI == FABRIC_RPI) {
4444 wqe->ContextTag = port->vpip->VPI;
4445 wqe->ContextType = WQE_VPI_CONTEXT;
4446 } else {
4447 wqe->ContextTag = rpip->RPI;
4448 wqe->ContextType = WQE_RPI_CONTEXT;
4449 }
4450
4451 if ((cmd == ELS_CMD_ACC) && (sbp->ucmd == ELS_CMD_FLOGI)) {
4452 wqe->un.ElsCmd.SP = 1;
4453 wqe->un.ElsCmd.LocalId = 0xFFFFFE;
4454 }
4455
4456 } else {
4457 /* ELS Request */
4458
4459 fcfp = port->vpip->vfip->fcfp;
4484 iocb->ULPCOMMAND = CMD_ELS_REQUEST64_CR;
4485 wqe->Command = CMD_ELS_REQUEST64_CR;
4486 wqe->CmdType = WQE_TYPE_ELS;
4487 if (!(hba->sli.sli4.param.PHWQ)) {
4488 wqe->DBDE = 1; /* Data type for BDE 0 */
4489 }
4490
4491 wqe->un.ElsCmd.Payload.addrHigh = sge->addrHigh;
4492 wqe->un.ElsCmd.Payload.addrLow = sge->addrLow;
4493 wqe->un.ElsCmd.Payload.tus.f.bdeSize = pkt->pkt_cmdlen;
4494
4495 wqe->un.ElsCmd.RemoteId = did;
4496 wqe->Timer = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
4497
4498 /* setup for rsp */
4499 iocb->un.elsreq64.remoteID = (did == BCAST_DID) ? 0 : did;
4500 iocb->ULPPU = 1; /* Wd4 is relative offset */
4501
4502 sge->last = 0;
4503
4504 sge = xrip->SGList.virt;
4505 BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
4506 sizeof (ULP_SGE64));
4507
4508 wqe->un.ElsCmd.PayloadLength =
4509 pkt->pkt_cmdlen; /* Byte offset of rsp data */
4510
4511 /* RSP payload */
4512 sge = &stage_sge;
4513 sge->addrHigh = PADDR_HI(cp_resp->dmac_laddress);
4514 sge->addrLow = PADDR_LO(cp_resp->dmac_laddress);
4515 sge->length = pkt->pkt_rsplen;
4516 sge->offset = 0;
4517 sge->last = 1;
4518 /* Now sge is fully staged */
4519
4520 sge = xrip->SGList.virt;
4521 sge++;
4522 BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
4523 sizeof (ULP_SGE64));
4524 #ifdef DEBUG_ELS
4525 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4526 "ELS: SGLaddr virt %p phys %p",
4527 xrip->SGList.virt, xrip->SGList.phys);
4528 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4529 "ELS: PAYLOAD virt %p phys %p",
4530 pkt->pkt_cmd, cp_cmd->dmac_laddress);
4531 emlxs_data_dump(port, "ELS: SGL", (uint32_t *)xrip->SGList.virt,
4532 12, 0);
4533 #endif /* DEBUG_ELS */
4534
4535 switch (cmd) {
4536 case ELS_CMD_FLOGI:
4537 wqe->un.ElsCmd.SP = 1;
4538
4539 if ((hba->sli_intf & SLI_INTF_IF_TYPE_MASK) ==
4540 SLI_INTF_IF_TYPE_0) {
4541 wqe->ContextTag = fcfp->FCFI;
4542 wqe->ContextType = WQE_FCFI_CONTEXT;
4543 } else {
4544 wqe->ContextTag = port->vpip->VPI;
4545 wqe->ContextType = WQE_VPI_CONTEXT;
4546 }
4547
4548 if (hba->flag & FC_FIP_SUPPORTED) {
4549 wqe->CmdType |= WQE_TYPE_MASK_FIP;
4550 }
4551
4552 if (hba->topology == TOPOLOGY_LOOP) {
4618 reserved_rpip = emlxs_rpi_reserve_notify(port, did, xrip);
4619
4620 if (!reserved_rpip) {
4621 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4622 "Unable to alloc reserved RPI. rxid=%x. Rejecting.",
4623 pkt->pkt_cmd_fhdr.rx_id);
4624
4625 emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4626 IOERR_INVALID_RPI, 0);
4627 return (0xff);
4628 }
4629
4630 /* Store the reserved rpi */
4631 if (wqe->Command == CMD_ELS_REQUEST64_CR) {
4632 wqe->OXId = reserved_rpip->RPI;
4633 } else {
4634 wqe->CmdSpecific = reserved_rpip->RPI;
4635 }
4636 }
4637
4638 offset = (off_t)((uint64_t)((unsigned long)
4639 xrip->SGList.virt) -
4640 (uint64_t)((unsigned long)
4641 hba->sli.sli4.slim2.virt));
4642
4643 EMLXS_MPDATA_SYNC(xrip->SGList.dma_handle, offset,
4644 xrip->SGList.size, DDI_DMA_SYNC_FORDEV);
4645
4646 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
4647 wqe->CCPE = 1;
4648 wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
4649 }
4650
4651 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
4652 case FC_TRAN_CLASS2:
4653 wqe->Class = CLASS2;
4654 break;
4655 case FC_TRAN_CLASS3:
4656 default:
4657 wqe->Class = CLASS3;
4658 break;
4659 }
4660 sbp->class = wqe->Class;
4661 wqe->XRITag = xrip->XRI;
4662 wqe->RequestTag = xrip->iotag;
4663 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4664 return (FC_SUCCESS);
4665
4666 } /* emlxs_sli4_prep_els_iocb() */
4667
4668
4669 /*ARGSUSED*/
4670 static uint32_t
4671 emlxs_sli4_prep_ct_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
4672 {
4673 emlxs_hba_t *hba = HBA;
4674 fc_packet_t *pkt;
4675 IOCBQ *iocbq;
4676 IOCB *iocb;
4677 emlxs_wqe_t *wqe;
4678 NODELIST *node = NULL;
4679 CHANNEL *cp;
4680 RPIobj_t *rpip;
4681 XRIobj_t *xrip;
4682 uint32_t did;
4683 off_t offset;
4684
4685 pkt = PRIV2PKT(sbp);
4686 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
4687
4688 iocbq = &sbp->iocbq;
4689 wqe = &iocbq->wqe;
4690 iocb = &iocbq->iocb;
4691 bzero((void *)wqe, sizeof (emlxs_wqe_t));
4692 bzero((void *)iocb, sizeof (IOCB));
4693
4694 cp = &hba->chan[hba->channel_ct];
4695
4696 iocbq->port = (void *) port;
4697 iocbq->channel = (void *) cp;
4698
4699 sbp->bmp = NULL;
4700 sbp->channel = cp;
4701
4702 /* Initalize wqe */
4703 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
4815 "Adapter Busy. Unable to setup SGE. did=0x%x", did);
4816
4817 emlxs_sli4_free_xri(port, sbp, xrip, 1);
4818 return (FC_TRAN_BUSY);
4819 }
4820
4821 if (!(hba->sli.sli4.param.PHWQ)) {
4822 wqe->DBDE = 1; /* Data type for BDE 0 */
4823 }
4824
4825 iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CR;
4826 wqe->CmdType = WQE_TYPE_GEN;
4827 wqe->Command = CMD_GEN_REQUEST64_CR;
4828 wqe->un.GenReq.la = 1;
4829 wqe->un.GenReq.DFctl = pkt->pkt_cmd_fhdr.df_ctl;
4830 wqe->un.GenReq.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
4831 wqe->un.GenReq.Type = pkt->pkt_cmd_fhdr.type;
4832
4833 #ifdef DEBUG_CT
4834 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4835 "CT: SGLaddr virt %p phys %p", xrip->SGList.virt,
4836 xrip->SGList.phys);
4837 emlxs_data_dump(port, "CT: SGL", (uint32_t *)xrip->SGList.virt,
4838 12, 0);
4839 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4840 "CT: CMD virt %p len %d:%d",
4841 pkt->pkt_cmd, pkt->pkt_cmdlen, pkt->pkt_rsplen);
4842 emlxs_data_dump(port, "CT: DATA", (uint32_t *)pkt->pkt_cmd,
4843 20, 0);
4844 #endif /* DEBUG_CT */
4845
4846 #ifdef SFCT_SUPPORT
4847 /* This allows fct to abort the request */
4848 if (sbp->fct_cmd) {
4849 sbp->fct_cmd->cmd_oxid = xrip->XRI;
4850 sbp->fct_cmd->cmd_rxid = 0xFFFF;
4851 }
4852 #endif /* SFCT_SUPPORT */
4853 }
4854
4855 /* Setup for rsp */
4856 iocb->un.genreq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
4857 iocb->un.genreq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
4858 iocb->un.genreq64.w5.hcsw.Dfctl = pkt->pkt_cmd_fhdr.df_ctl;
4859 iocb->ULPPU = 1; /* Wd4 is relative offset */
4860
4861 offset = (off_t)((uint64_t)((unsigned long)
4862 xrip->SGList.virt) -
4863 (uint64_t)((unsigned long)
4864 hba->sli.sli4.slim2.virt));
4865
4866 EMLXS_MPDATA_SYNC(xrip->SGList.dma_handle, offset,
4867 xrip->SGList.size, DDI_DMA_SYNC_FORDEV);
4868
4869 wqe->ContextTag = rpip->RPI;
4870 wqe->ContextType = WQE_RPI_CONTEXT;
4871 wqe->XRITag = xrip->XRI;
4872 wqe->Timer = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
4873
4874 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
4875 wqe->CCPE = 1;
4876 wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
4877 }
4878
4879 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
4880 case FC_TRAN_CLASS2:
4881 wqe->Class = CLASS2;
4882 break;
4883 case FC_TRAN_CLASS3:
4884 default:
4885 wqe->Class = CLASS3;
4886 break;
4887 }
4888 sbp->class = wqe->Class;
5091 emlxs_sli4_handle_fc_link_att(hba, cqe);
5092 break;
5093 case ASYNC_EVENT_FC_SHARED_LINK_ATT:
5094 HBASTATS.LinkEvent++;
5095
5096 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5097 "FC Async Event: Shared Link Attention. event=%x",
5098 HBASTATS.LinkEvent);
5099
5100 emlxs_sli4_handle_fc_link_att(hba, cqe);
5101 break;
5102 default:
5103 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5104 "FC Async Event: Unknown event. type=%d event=%x",
5105 cqe->event_type, HBASTATS.LinkEvent);
5106 }
5107 break;
5108 case ASYNC_EVENT_CODE_PORT:
5109 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5110 "SLI Port Async Event: type=%d", cqe->event_type);
5111 if (cqe->event_type == ASYNC_EVENT_MISCONFIG_PORT) {
5112 *((uint32_t *)cqe->un.port.link_status) =
5113 BE_SWAP32(*((uint32_t *)cqe->un.port.link_status));
5114 status =
5115 cqe->un.port.link_status[hba->sli.sli4.link_number];
5116
5117 switch (status) {
5118 case 0 :
5119 break;
5120
5121 case 1 :
5122 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5123 "SLI Port Async Event: Physical media not "
5124 "detected");
5125 cmn_err(CE_WARN,
5126 "^%s%d: Optics faulted/incorrectly "
5127 "installed/not installed - Reseat optics, "
5128 "if issue not resolved, replace.",
5129 DRIVER_NAME, hba->ddiinst);
5130 break;
5131
5144 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5145 "SLI Port Async Event: Unsupported "
5146 "physical media detected");
5147 cmn_err(CE_WARN,
5148 "^%s%d: Incompatible optics - Replace "
5149 "with compatible optics for card to "
5150 "function.",
5151 DRIVER_NAME, hba->ddiinst);
5152 break;
5153
5154 default :
5155 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5156 "SLI Port Async Event: Physical media "
5157 "error, status=%x", status);
5158 cmn_err(CE_WARN,
5159 "^%s%d: Misconfigured port: status=0x%x - "
5160 "Check optics on card.",
5161 DRIVER_NAME, hba->ddiinst, status);
5162 break;
5163 }
5164 }
5165 break;
5166 case ASYNC_EVENT_CODE_VF:
5167 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5168 "VF Async Event: type=%d",
5169 cqe->event_type);
5170 break;
5171 case ASYNC_EVENT_CODE_MR:
5172 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5173 "MR Async Event: type=%d",
5174 cqe->event_type);
5175 break;
5176 default:
5177 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5178 "Unknown Async Event: code=%d type=%d.",
5179 cqe->event_code, cqe->event_type);
5180 break;
5181 }
5182
5183 } /* emlxs_sli4_process_async_event() */
5184
6582 iocb->ULPBDECOUNT = 1;
6583
6584 iocb->ULPPU = 0x3;
6585 iocb->ULPCONTEXT = xrip->XRI;
6586 iocb->ULPIOTAG = ((node)? node->nlp_Rpi:0);
6587 iocb->ULPCLASS = CLASS3;
6588 iocb->ULPCOMMAND = CMD_RCV_ELS64_CX;
6589
6590 iocb->unsli3.ext_rcv.seq_len = seq_len;
6591 iocb->unsli3.ext_rcv.vpi = port->VPIobj.VPI;
6592 iocb->unsli3.ext_rcv.oxid = fchdr.ox_id;
6593
6594 if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
6595 iocb->unsli3.ext_rcv.ccpe = 1;
6596 iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
6597 }
6598
6599 /* pass xrip to FCT in the iocbq */
6600 iocbq->sbp = xrip;
6601
6602 #define EMLXS_FIX_CISCO_BUG1
6603 #ifdef EMLXS_FIX_CISCO_BUG1
6604 {
6605 uint8_t *ptr;
6606 ptr = ((uint8_t *)seq_mp->virt);
6607 if (((*ptr+12) != 0xa0) && (*(ptr+20) == 0x8) && (*(ptr+21) == 0x8)) {
6608 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6609 "RQ ENTRY: Bad CDB fixed");
6610 *ptr++ = 0;
6611 *ptr = 0;
6612 }
6613 }
6614 #endif
6615 (void) emlxs_fct_handle_unsol_req(port, cp, iocbq,
6616 seq_mp, seq_len);
6617 break;
6618 #endif /* SFCT_SUPPORT */
6619
6620 case 0x20: /* CT */
6621 if (!(port->vpip->flag & EMLXS_VPI_PORT_ENABLED) &&
6622 !(hba->flag & FC_LOOPBACK_MODE)) {
6623 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6624 "RQ ENTRY: %s: Port not yet enabled. "
6625 "Dropping...",
6626 label);
6627
6628 goto done;
6629 }
6630
6631 if (!node) {
6632 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6633 "RQ ENTRY: %s: Node not found (did=%x). "
6634 "Dropping...",
7179 for (i = 0; i < hba->intr_count; i++) {
7180 data = hba->sli.sli4.eq[i].qid;
7181 data |= (EQ_DB_REARM | EQ_DB_EVENT);
7182 emlxs_sli4_write_cqdb(hba, data);
7183 }
7184 } /* emlxs_sli4_enable_intr() */
7185
7186
7187 static void
7188 emlxs_sli4_disable_intr(emlxs_hba_t *hba, uint32_t att)
7189 {
7190 if (att) {
7191 return;
7192 }
7193
7194 hba->sli.sli4.flag &= ~EMLXS_SLI4_INTR_ENABLED;
7195
7196 /* Short of reset, we cannot disable interrupts */
7197 } /* emlxs_sli4_disable_intr() */
7198
7199
7200 static void
7201 emlxs_sli4_resource_free(emlxs_hba_t *hba)
7202 {
7203 emlxs_port_t *port = &PPORT;
7204 MBUF_INFO *buf_info;
7205 uint32_t i;
7206
7207 buf_info = &hba->sli.sli4.slim2;
7208 if (buf_info->virt == 0) {
7209 /* Already free */
7210 return;
7211 }
7212
7213 emlxs_fcf_fini(hba);
7214
7215 buf_info = &hba->sli.sli4.HeaderTmplate;
7216 if (buf_info->virt) {
7217 bzero(buf_info, sizeof (MBUF_INFO));
7218 }
7219
7220 if (hba->sli.sli4.XRIp) {
7221 if ((hba->sli.sli4.XRIinuse_f !=
7222 (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) ||
7223 (hba->sli.sli4.XRIinuse_b !=
7224 (XRIobj_t *)&hba->sli.sli4.XRIinuse_f)) {
7225 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
7226 "XRIs in use during free!: %p %p != %p\n",
7227 hba->sli.sli4.XRIinuse_f,
7228 hba->sli.sli4.XRIinuse_b,
7229 &hba->sli.sli4.XRIinuse_f);
7230 }
7231 kmem_free(hba->sli.sli4.XRIp,
7232 (sizeof (XRIobj_t) * hba->sli.sli4.XRICount));
7233 hba->sli.sli4.XRIp = NULL;
7234
7235 hba->sli.sli4.XRIfree_f =
7236 (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7237 hba->sli.sli4.XRIfree_b =
7238 (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7239 hba->sli.sli4.xrif_count = 0;
7240 }
7241
7242 for (i = 0; i < hba->intr_count; i++) {
7243 mutex_destroy(&hba->sli.sli4.eq[i].lastwq_lock);
7244 bzero(&hba->sli.sli4.eq[i], sizeof (EQ_DESC_t));
7245 hba->sli.sli4.eq[i].qid = 0xffff;
7246 }
7247 for (i = 0; i < EMLXS_MAX_CQS; i++) {
7248 bzero(&hba->sli.sli4.cq[i], sizeof (CQ_DESC_t));
7249 hba->sli.sli4.cq[i].qid = 0xffff;
7250 }
7255 for (i = 0; i < EMLXS_MAX_RXQS; i++) {
7256 mutex_destroy(&hba->sli.sli4.rxq[i].lock);
7257 bzero(&hba->sli.sli4.rxq[i], sizeof (RXQ_DESC_t));
7258 }
7259 for (i = 0; i < EMLXS_MAX_RQS; i++) {
7260 mutex_destroy(&hba->sli.sli4.rq[i].lock);
7261 bzero(&hba->sli.sli4.rq[i], sizeof (RQ_DESC_t));
7262 hba->sli.sli4.rq[i].qid = 0xffff;
7263 }
7264
7265 /* Free the MQ */
7266 bzero(&hba->sli.sli4.mq, sizeof (MQ_DESC_t));
7267
7268 buf_info = &hba->sli.sli4.slim2;
7269 if (buf_info->virt) {
7270 buf_info->flags = FC_MBUF_DMA;
7271 emlxs_mem_free(hba, buf_info);
7272 bzero(buf_info, sizeof (MBUF_INFO));
7273 }
7274
7275 } /* emlxs_sli4_resource_free() */
7276
7277
7278 static int
7279 emlxs_sli4_resource_alloc(emlxs_hba_t *hba)
7280 {
7281 emlxs_port_t *port = &PPORT;
7282 emlxs_config_t *cfg = &CFG;
7283 MBUF_INFO *buf_info;
7284 int num_eq;
7285 int num_wq;
7286 uint16_t i;
7287 uint32_t j;
7288 uint32_t k;
7289 uint16_t cq_depth;
7290 uint32_t cq_size;
7291 uint32_t word;
7292 XRIobj_t *xrip;
7293 RQE_t *rqe;
7294 MBUF_INFO *rqb;
7295 uint64_t phys;
7296 uint64_t tmp_phys;
7297 char *virt;
7335
7336 /* EQ */
7337 count += num_eq * 4096;
7338
7339 /* CQ */
7340 count += (num_wq + EMLXS_CQ_OFFSET_WQ) * cq_size;
7341
7342 /* WQ */
7343 count += num_wq * (4096 * EMLXS_NUM_WQ_PAGES);
7344
7345 /* MQ */
7346 count += EMLXS_MAX_MQS * 4096;
7347
7348 /* RQ */
7349 count += EMLXS_MAX_RQS * 4096;
7350
7351 /* RQB/E */
7352 count += RQB_COUNT * (RQB_DATA_SIZE + RQB_HEADER_SIZE);
7353 count += (4096 - (count%4096)); /* Ensure 4K alignment */
7354
7355 /* SGL */
7356 count += hba->sli.sli4.XRIExtSize * hba->sli.sli4.mem_sgl_size;
7357 count += (4096 - (count%4096)); /* Ensure 4K alignment */
7358
7359 /* RPI Header Templates */
7360 if (hba->sli.sli4.param.HDRR) {
7361 /* Bytes per extent */
7362 j = hba->sli.sli4.RPIExtSize * sizeof (RPIHdrTmplate_t);
7363
7364 /* Pages required per extent (page == 4096 bytes) */
7365 k = (j/4096) + ((j%4096)? 1:0);
7366
7367 /* Total size */
7368 hddr_size = (k * hba->sli.sli4.RPIExtCount * 4096);
7369
7370 count += hddr_size;
7371 }
7372
7373 /* Allocate slim2 for SLI4 */
7374 buf_info = &hba->sli.sli4.slim2;
7375 buf_info->size = count;
7376 buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
7377 buf_info->align = ddi_ptob(hba->dip, 1L);
7378
7379 (void) emlxs_mem_alloc(hba, buf_info);
7380
7381 if (buf_info->virt == NULL) {
7382 EMLXS_MSGF(EMLXS_CONTEXT,
7383 &emlxs_init_failed_msg,
7384 "Unable to allocate internal memory for SLI4: %d",
7385 count);
7386 goto failed;
7387 }
7388 bzero(buf_info->virt, buf_info->size);
7389 EMLXS_MPDATA_SYNC(buf_info->dma_handle, 0,
7390 buf_info->size, DDI_DMA_SYNC_FORDEV);
7391
7392 /* Assign memory to SGL, Head Template, EQ, CQ, WQ, RQ and MQ */
7393 data_handle = buf_info->data_handle;
7394 dma_handle = buf_info->dma_handle;
7395 phys = buf_info->phys;
7396 virt = (char *)buf_info->virt;
7397
7398 /* Allocate space for queues */
7399
7400 /* EQ */
7401 size = 4096;
7402 for (i = 0; i < num_eq; i++) {
7403 bzero(&hba->sli.sli4.eq[i], sizeof (EQ_DESC_t));
7404
7405 buf_info = &hba->sli.sli4.eq[i].addr;
7406 buf_info->size = size;
7407 buf_info->flags =
7408 FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
7409 buf_info->align = ddi_ptob(hba->dip, 1L);
7410 buf_info->phys = phys;
7411 buf_info->virt = (void *)virt;
7412 buf_info->data_handle = data_handle;
7562
7563 rqe++;
7564 }
7565 }
7566
7567 offset = (off_t)((uint64_t)((unsigned long)
7568 hba->sli.sli4.rq[i].addr.virt) -
7569 (uint64_t)((unsigned long)
7570 hba->sli.sli4.slim2.virt));
7571
7572 /* Sync the RQ buffer list */
7573 EMLXS_MPDATA_SYNC(hba->sli.sli4.rq[i].addr.dma_handle, offset,
7574 hba->sli.sli4.rq[i].addr.size, DDI_DMA_SYNC_FORDEV);
7575 }
7576
7577 /* 4K Alignment */
7578 align = (4096 - (phys%4096));
7579 phys += align;
7580 virt += align;
7581
7582 /* SGL */
7583 /* Initialize double linked lists */
7584 hba->sli.sli4.XRIinuse_f =
7585 (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
7586 hba->sli.sli4.XRIinuse_b =
7587 (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
7588 hba->sli.sli4.xria_count = 0;
7589
7590 hba->sli.sli4.XRIfree_f =
7591 (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7592 hba->sli.sli4.XRIfree_b =
7593 (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7594 hba->sli.sli4.xria_count = 0;
7595
7596 hba->sli.sli4.XRIp = (XRIobj_t *)kmem_zalloc(
7597 (sizeof (XRIobj_t) * hba->sli.sli4.XRICount), KM_SLEEP);
7598
7599 xrip = hba->sli.sli4.XRIp;
7600 size = hba->sli.sli4.mem_sgl_size;
7601 iotag = 1;
7602 for (i = 0; i < hba->sli.sli4.XRICount; i++) {
7603 xrip->XRI = emlxs_sli4_index_to_xri(hba, i);
7604
7605 /* We don't use XRI==0, since it also represents an */
7606 /* uninitialized exchange */
7607 if (xrip->XRI == 0) {
7608 xrip++;
7609 continue;
7610 }
7611
7612 xrip->iotag = iotag++;
7613 xrip->sge_count =
7614 (hba->sli.sli4.mem_sgl_size / sizeof (ULP_SGE64));
7615
7616 /* Add xrip to end of free list */
7617 xrip->_b = hba->sli.sli4.XRIfree_b;
7618 hba->sli.sli4.XRIfree_b->_f = xrip;
7619 xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7620 hba->sli.sli4.XRIfree_b = xrip;
7621 hba->sli.sli4.xrif_count++;
7622
7623 /* Allocate SGL for this xrip */
7624 buf_info = &xrip->SGList;
7625 buf_info->size = size;
7626 buf_info->flags =
7627 FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
7628 buf_info->align = size;
7629 buf_info->phys = phys;
7630 buf_info->virt = (void *)virt;
7631 buf_info->data_handle = data_handle;
7632 buf_info->dma_handle = dma_handle;
7633
7634 phys += size;
7635 virt += size;
7636
7637 xrip++;
7638 }
7639
7640 /* 4K Alignment */
7641 align = (4096 - (phys%4096));
7642 phys += align;
7643 virt += align;
7644
7645 /* RPI Header Templates */
7646 if (hba->sli.sli4.param.HDRR) {
7647 buf_info = &hba->sli.sli4.HeaderTmplate;
7648 bzero(buf_info, sizeof (MBUF_INFO));
7649 buf_info->size = hddr_size;
7650 buf_info->flags = FC_MBUF_DMA | FC_MBUF_DMA32;
7651 buf_info->align = ddi_ptob(hba->dip, 1L);
7652 buf_info->phys = phys;
7653 buf_info->virt = (void *)virt;
7654 buf_info->data_handle = data_handle;
7655 buf_info->dma_handle = dma_handle;
7656 }
7657
7658 #ifdef FMA_SUPPORT
7659 if (hba->sli.sli4.slim2.dma_handle) {
7660 if (emlxs_fm_check_dma_handle(hba,
7661 hba->sli.sli4.slim2.dma_handle)
7662 != DDI_FM_OK) {
7663 EMLXS_MSGF(EMLXS_CONTEXT,
7664 &emlxs_invalid_dma_handle_msg,
7665 "sli4_resource_alloc: hdl=%p",
7666 hba->sli.sli4.slim2.dma_handle);
7667 goto failed;
7668 }
7669 }
7670 #endif /* FMA_SUPPORT */
7671
7672 return (0);
7673
7674 failed:
7675
7676 (void) emlxs_sli4_resource_free(hba);
7677 return (ENOMEM);
8234 FCOE_OPCODE_CFG_POST_SGL_PAGES;
8235 mb->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
8236 mb->un.varSLIConfig.be.un_hdr.hdr_req.req_length = size;
8237
8238 hdr_req->subsystem = IOCTL_SUBSYSTEM_FCOE;
8239 hdr_req->opcode = FCOE_OPCODE_CFG_POST_SGL_PAGES;
8240 hdr_req->timeout = 0;
8241 hdr_req->req_length = size;
8242
8243 post_sgl->params.request.xri_count = 0;
8244 post_sgl->params.request.xri_start = xrip->XRI;
8245
8246 xri_cnt = (size -
8247 sizeof (IOCTL_FCOE_CFG_POST_SGL_PAGES)) /
8248 sizeof (FCOE_SGL_PAGES);
8249
8250 for (i = 0; (i < xri_cnt) && cnt; i++) {
8251 post_sgl->params.request.xri_count++;
8252 post_sgl->params.request.pages[i].\
8253 sgl_page0.addrLow =
8254 PADDR_LO(xrip->SGList.phys);
8255 post_sgl->params.request.pages[i].\
8256 sgl_page0.addrHigh =
8257 PADDR_HI(xrip->SGList.phys);
8258
8259 cnt--;
8260 xrip++;
8261 }
8262
8263 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8264 MBX_SUCCESS) {
8265 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8266 "Unable to POST_SGL. Mailbox cmd=%x "
8267 "status=%x XRI cnt:%d start:%d",
8268 mb->mbxCommand, mb->mbxStatus,
8269 post_sgl->params.request.xri_count,
8270 post_sgl->params.request.xri_start);
8271 emlxs_mem_buf_free(hba, mp);
8272 mbq->nonembed = NULL;
8273 return (EIO);
8274 }
8275 }
8276 }
8277
8655 } else {
8656 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg, NULL);
8657 }
8658
8659 hba->flag |= FC_MBOX_TIMEOUT;
8660 EMLXS_STATE_CHANGE_LOCKED(hba, FC_ERROR);
8661
8662 mutex_exit(&EMLXS_PORT_LOCK);
8663
8664 /* Perform mailbox cleanup */
8665 /* This will wake any sleeping or polling threads */
8666 emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
8667
8668 /* Trigger adapter shutdown */
8669 emlxs_thread_spawn(hba, emlxs_shutdown_thread, 0, 0);
8670
8671 return;
8672
8673 } /* emlxs_sli4_timer_check_mbox() */
8674
8675
8676 extern void
8677 emlxs_data_dump(emlxs_port_t *port, char *str, uint32_t *iptr, int cnt, int err)
8678 {
8679 void *msg;
8680
8681 if (!port || !str || !iptr || !cnt) {
8682 return;
8683 }
8684
8685 if (err) {
8686 msg = &emlxs_sli_err_msg;
8687 } else {
8688 msg = &emlxs_sli_detail_msg;
8689 }
8690
8691 if (cnt) {
8692 EMLXS_MSGF(EMLXS_CONTEXT, msg,
8693 "%s00: %08x %08x %08x %08x %08x %08x", str, *iptr,
8694 *(iptr+1), *(iptr+2), *(iptr+3), *(iptr+4), *(iptr+5));
8695 }
9096 /* Set linkspeed */
9097 switch (cqe->un.fc.port_speed) {
9098 case 1:
9099 hba->linkspeed = LA_1GHZ_LINK;
9100 break;
9101 case 2:
9102 hba->linkspeed = LA_2GHZ_LINK;
9103 break;
9104 case 4:
9105 hba->linkspeed = LA_4GHZ_LINK;
9106 break;
9107 case 8:
9108 hba->linkspeed = LA_8GHZ_LINK;
9109 break;
9110 case 10:
9111 hba->linkspeed = LA_10GHZ_LINK;
9112 break;
9113 case 16:
9114 hba->linkspeed = LA_16GHZ_LINK;
9115 break;
9116 default:
9117 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
9118 "sli4_handle_fc_link_att: Unknown link speed=%x.",
9119 cqe->un.fc.port_speed);
9120 hba->linkspeed = 0;
9121 break;
9122 }
9123
9124 /* Set qos_linkspeed */
9125 hba->qos_linkspeed = cqe->un.fc.link_speed;
9126
9127 /* Set topology */
9128 hba->topology = cqe->un.fc.topology;
9129
9130 mutex_enter(&EMLXS_PORT_LOCK);
9131 hba->sli.sli4.flag &= ~EMLXS_SLI4_DOWN_LINK;
9132 mutex_exit(&EMLXS_PORT_LOCK);
9133
9134 (void) emlxs_fcf_linkup_notify(port);
9135
|
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at
9 * http://www.opensource.org/licenses/cddl1.txt.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2004-2012 Emulex. All rights reserved.
24 * Use is subject to license terms.
25 * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
26 */
27
28 #include <emlxs.h>
29
30
31 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
32 EMLXS_MSG_DEF(EMLXS_SLI4_C);
33
34 static int emlxs_sli4_init_extents(emlxs_hba_t *hba,
35 MAILBOXQ *mbq);
36 static uint32_t emlxs_sli4_read_status(emlxs_hba_t *hba);
37
38 static int emlxs_init_bootstrap_mb(emlxs_hba_t *hba);
39
40 static uint32_t emlxs_sli4_read_sema(emlxs_hba_t *hba);
41
42 static uint32_t emlxs_sli4_read_mbdb(emlxs_hba_t *hba);
43
44 static void emlxs_sli4_write_mbdb(emlxs_hba_t *hba, uint32_t value);
45
105 #ifdef MSI_SUPPORT
106 static uint32_t emlxs_sli4_msi_intr(char *arg1, char *arg2);
107 #endif /* MSI_SUPPORT */
108
109 static void emlxs_sli4_resource_free(emlxs_hba_t *hba);
110
111 static int emlxs_sli4_resource_alloc(emlxs_hba_t *hba);
112 extern void emlxs_sli4_zero_queue_stat(emlxs_hba_t *hba);
113
114 static XRIobj_t *emlxs_sli4_alloc_xri(emlxs_port_t *port,
115 emlxs_buf_t *sbp, RPIobj_t *rpip,
116 uint32_t type);
117 static void emlxs_sli4_enable_intr(emlxs_hba_t *hba);
118
119 static void emlxs_sli4_disable_intr(emlxs_hba_t *hba, uint32_t att);
120
121 static void emlxs_sli4_timer(emlxs_hba_t *hba);
122
123 static void emlxs_sli4_timer_check_mbox(emlxs_hba_t *hba);
124
125 static void emlxs_sli4_gpio_timer_start(emlxs_hba_t *hba);
126
127 static void emlxs_sli4_gpio_timer_stop(emlxs_hba_t *hba);
128
129 static void emlxs_sli4_gpio_timer(void *arg);
130
131 static void emlxs_sli4_check_gpio(emlxs_hba_t *hba);
132
133 static uint32_t emlxs_sli4_fix_gpio(emlxs_hba_t *hba,
134 uint8_t *pin, uint8_t *pinval);
135
136 static uint32_t emlxs_sli4_fix_gpio_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq);
137
138 static void emlxs_sli4_poll_erratt(emlxs_hba_t *hba);
139
140 extern XRIobj_t *emlxs_sli4_reserve_xri(emlxs_port_t *port,
141 RPIobj_t *rpip, uint32_t type, uint16_t rx_id);
142 static int emlxs_check_hdw_ready(emlxs_hba_t *);
143
144 static uint32_t emlxs_sli4_reg_did(emlxs_port_t *port,
145 uint32_t did, SERV_PARM *param,
146 emlxs_buf_t *sbp, fc_unsol_buf_t *ubp,
147 IOCBQ *iocbq);
148
149 static uint32_t emlxs_sli4_unreg_node(emlxs_port_t *port,
150 emlxs_node_t *node, emlxs_buf_t *sbp,
151 fc_unsol_buf_t *ubp, IOCBQ *iocbq);
152
153 static void emlxs_sli4_handle_fc_link_att(emlxs_hba_t *hba,
154 CQE_ASYNC_t *cqe);
155 static void emlxs_sli4_handle_fcoe_link_event(emlxs_hba_t *hba,
156 CQE_ASYNC_t *cqe);
157
329 hba->fc_altov = FF_DEF_ALTOV;
330 hba->fc_arbtov = FF_DEF_ARBTOV;
331
332 /* Networking not supported */
333 if (cfg[CFG_NETWORK_ON].current) {
334 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
335 "Networking is not supported in SLI4, turning it off");
336 cfg[CFG_NETWORK_ON].current = 0;
337 }
338
339 hba->chan_count = hba->intr_count * cfg[CFG_NUM_WQ].current;
340 if (hba->chan_count > MAX_CHANNEL) {
341 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
342 "Max channels exceeded, dropping num-wq from %d to 1",
343 cfg[CFG_NUM_WQ].current);
344 cfg[CFG_NUM_WQ].current = 1;
345 hba->chan_count = hba->intr_count * cfg[CFG_NUM_WQ].current;
346 }
347 hba->channel_fcp = 0; /* First channel */
348
349 /* Gen6 chips only support P2P topologies */
350 if ((hba->model_info.flags & EMLXS_FC_GEN6) &&
351 cfg[CFG_TOPOLOGY].current != 2) {
352 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
353 "Loop topologies are not supported by this HBA. "
354 "Forcing topology to P2P.");
355 cfg[CFG_TOPOLOGY].current = 2;
356 }
357
358 /* Default channel for everything else is the last channel */
359 hba->channel_ip = hba->chan_count - 1;
360 hba->channel_els = hba->chan_count - 1;
361 hba->channel_ct = hba->chan_count - 1;
362
363 hba->fc_iotag = 1;
364 hba->io_count = 0;
365 hba->channel_tx_count = 0;
366
367 /* Specific to ATTO G5 boards */
368 if (hba->model_info.flags & EMLXS_GPIO_LEDS) {
369 /* Set hard-coded GPIO pins */
370 if (hba->pci_function_number) {
371 hba->gpio_pin[EMLXS_GPIO_PIN_LO] = 27;
372 hba->gpio_pin[EMLXS_GPIO_PIN_HI] = 28;
373 hba->gpio_pin[EMLXS_GPIO_PIN_ACT] = 29;
374 hba->gpio_pin[EMLXS_GPIO_PIN_LASER] = 8;
375 } else {
376 hba->gpio_pin[EMLXS_GPIO_PIN_LO] = 13;
377 hba->gpio_pin[EMLXS_GPIO_PIN_HI] = 25;
378 hba->gpio_pin[EMLXS_GPIO_PIN_ACT] = 26;
379 hba->gpio_pin[EMLXS_GPIO_PIN_LASER] = 12;
380 }
381 }
382
383 /* Initialize the local dump region buffer */
384 bzero(&hba->sli.sli4.dump_region, sizeof (MBUF_INFO));
385 hba->sli.sli4.dump_region.size = EMLXS_DUMP_REGION_SIZE;
386 hba->sli.sli4.dump_region.flags = FC_MBUF_DMA | FC_MBUF_SNGLSG
387 | FC_MBUF_DMA32;
388 hba->sli.sli4.dump_region.align = ddi_ptob(hba->dip, 1L);
389
390 (void) emlxs_mem_alloc(hba, &hba->sli.sli4.dump_region);
391
392 if (hba->sli.sli4.dump_region.virt == NULL) {
393 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
394 "Unable to allocate dump region buffer.");
395
396 return (ENOMEM);
397 }
398
399 /*
400 * Get a buffer which will be used repeatedly for mailbox commands
401 */
402 mbq = (MAILBOXQ *) kmem_zalloc((sizeof (MAILBOXQ)), KM_SLEEP);
1351 (sizeof (vpd->part_num)-1));
1352 }
1353
1354 if (vpd->model_desc[0] == 0) {
1355 (void) snprintf(vpd->model_desc, (sizeof (vpd->model_desc)-1),
1356 "%s %d",
1357 hba->model_info.model_desc, vpd->port_index);
1358 }
1359
1360 if (vpd->model[0] == 0) {
1361 (void) strncpy(vpd->model, hba->model_info.model,
1362 (sizeof (vpd->model)-1));
1363 }
1364
1365 if (vpd->prog_types[0] == 0) {
1366 emlxs_build_prog_types(hba, vpd);
1367 }
1368
1369 /* Create the symbolic names */
1370 (void) snprintf(hba->snn, (sizeof (hba->snn)-1),
1371 "%s %s FV%s DV%s %s",
1372 hba->model_info.manufacturer, hba->model_info.model,
1373 hba->vpd.fw_version, emlxs_version,
1374 (char *)utsname.nodename);
1375
1376 (void) snprintf(hba->spn, (sizeof (hba->spn)-1),
1377 "%s PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1378 hba->model_info.manufacturer,
1379 hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb,
1380 hba->wwpn.IEEE[0], hba->wwpn.IEEE[1], hba->wwpn.IEEE[2],
1381 hba->wwpn.IEEE[3], hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1382
1383
1384 EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN);
1385 emlxs_sli4_enable_intr(hba);
1386
1387 /* Check persist-linkdown */
1388 if (cfg[CFG_PERSIST_LINKDOWN].current) {
1389 EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN_PERSIST);
1390 goto done;
1391 }
1392
1393 #ifdef SFCT_SUPPORT
1394 if ((port->mode == MODE_TARGET) &&
1395 !(port->fct_flags & FCT_STATE_PORT_ONLINE)) {
1396 goto done;
1397 }
1398 #endif /* SFCT_SUPPORT */
1429 mb->mbxStatus);
1430
1431 rval = EIO;
1432 goto failed3;
1433 }
1434
1435 BUSYWAIT_MS(1000);
1436 i--;
1437 }
1438
1439 done:
1440 /*
1441 * The leadville driver will now handle the FLOGI at the driver level
1442 */
1443
1444 if (mbq) {
1445 (void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1446 mbq = NULL;
1447 mb = NULL;
1448 }
1449
1450 if (hba->model_info.flags & EMLXS_GPIO_LEDS)
1451 emlxs_sli4_gpio_timer_start(hba);
1452
1453 return (0);
1454
1455 failed3:
1456 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1457
1458 if (mp) {
1459 emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1460 mp = NULL;
1461 }
1462
1463
1464 if (hba->intr_flags & EMLXS_MSI_ADDED) {
1465 (void) EMLXS_INTR_REMOVE(hba);
1466 }
1467
1468 emlxs_sli4_resource_free(hba);
1469
1470 failed2:
1471 (void) emlxs_mem_free_buffer(hba);
1472
1478 }
1479
1480 if (hba->sli.sli4.dump_region.virt) {
1481 (void) emlxs_mem_free(hba, &hba->sli.sli4.dump_region);
1482 }
1483
1484 if (rval == 0) {
1485 rval = EIO;
1486 }
1487
1488 return (rval);
1489
1490 } /* emlxs_sli4_online() */
1491
1492
1493 static void
1494 emlxs_sli4_offline(emlxs_hba_t *hba, uint32_t reset_requested)
1495 {
1496 /* Reverse emlxs_sli4_online */
1497
1498 if (hba->model_info.flags & EMLXS_GPIO_LEDS)
1499 emlxs_sli4_gpio_timer_stop(hba);
1500
1501 mutex_enter(&EMLXS_PORT_LOCK);
1502 if (hba->flag & FC_INTERLOCKED) {
1503 mutex_exit(&EMLXS_PORT_LOCK);
1504 goto killed;
1505 }
1506 mutex_exit(&EMLXS_PORT_LOCK);
1507
1508 if (reset_requested) {
1509 (void) emlxs_sli4_hba_reset(hba, 0, 0, 0);
1510 }
1511
1512 /* Shutdown the adapter interface */
1513 emlxs_sli4_hba_kill(hba);
1514
1515 killed:
1516
1517 /* Free SLI shared memory */
1518 emlxs_sli4_resource_free(hba);
1519
1520 /* Free driver shared memory */
2394
2395 /* Reset the hba structure */
2396 hba->flag &= FC_RESET_MASK;
2397
2398 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2399 cp = &hba->chan[channelno];
2400 cp->hba = hba;
2401 cp->channelno = channelno;
2402 }
2403
2404 hba->channel_tx_count = 0;
2405 hba->io_count = 0;
2406 hba->iodone_count = 0;
2407 hba->topology = 0;
2408 hba->linkspeed = 0;
2409 hba->heartbeat_active = 0;
2410 hba->discovery_timer = 0;
2411 hba->linkup_timer = 0;
2412 hba->loopback_tics = 0;
2413
2414 /* Specific to ATTO G5 boards */
2415 if (hba->model_info.flags & EMLXS_GPIO_LEDS) {
2416 /* Assume the boot driver enabled all LEDs */
2417 hba->gpio_current =
2418 EMLXS_GPIO_LO | EMLXS_GPIO_HI | EMLXS_GPIO_ACT;
2419 hba->gpio_desired = 0;
2420 hba->gpio_bit = 0;
2421 }
2422
2423 /* Reset the port objects */
2424 for (i = 0; i < MAX_VPORTS; i++) {
2425 vport = &VPORT(i);
2426
2427 vport->flag &= EMLXS_PORT_RESET_MASK;
2428 vport->did = 0;
2429 vport->prev_did = 0;
2430 vport->lip_type = 0;
2431 bzero(&vport->fabric_sparam, sizeof (SERV_PARM));
2432 bzero(&vport->prev_fabric_sparam, sizeof (SERV_PARM));
2433
2434 bzero((caddr_t)&vport->node_base, sizeof (NODELIST));
2435 vport->node_base.nlp_Rpi = 0;
2436 vport->node_base.nlp_DID = 0xffffff;
2437 vport->node_base.nlp_list_next = NULL;
2438 vport->node_base.nlp_list_prev = NULL;
2439 vport->node_base.nlp_active = 1;
2440 vport->node_count = 0;
2441
2442 if (vport->ub_count < EMLXS_UB_TOKEN_OFFSET) {
2595 /*ARGSUSED*/
2596 uint32_t
2597 emlxs_sli4_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2598 {
2599 emlxs_hba_t *hba = HBA;
2600 fc_packet_t *pkt;
2601 XRIobj_t *xrip;
2602 ULP_SGE64 *sge;
2603 emlxs_wqe_t *wqe;
2604 IOCBQ *iocbq;
2605 ddi_dma_cookie_t *cp_cmd;
2606 ddi_dma_cookie_t *cp_data;
2607 uint64_t sge_addr;
2608 uint32_t cmd_cnt;
2609 uint32_t resp_cnt;
2610
2611 iocbq = (IOCBQ *) &sbp->iocbq;
2612 wqe = &iocbq->wqe;
2613 pkt = PRIV2PKT(sbp);
2614 xrip = sbp->xrip;
2615 sge = xrip->SGList->virt;
2616
2617 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2618 cp_cmd = pkt->pkt_cmd_cookie;
2619 cp_data = pkt->pkt_data_cookie;
2620 #else
2621 cp_cmd = &pkt->pkt_cmd_cookie;
2622 cp_data = &pkt->pkt_data_cookie;
2623 #endif /* >= EMLXS_MODREV3 */
2624
2625 iocbq = &sbp->iocbq;
2626 if (iocbq->flag & IOCB_FCP_CMD) {
2627
2628 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2629 return (1);
2630 }
2631
2632 /* CMD payload */
2633 sge = emlxs_pkt_to_sgl(port, pkt, sge, SGL_CMD, &cmd_cnt);
2634 if (! sge) {
2635 return (1);
2745
2746 size = sbp->fct_buf->db_data_size;
2747
2748 /*
2749 * The hardware will automaticlly round up
2750 * to multiple of 4.
2751 *
2752 * if (size & 3) {
2753 * size = (size + 3) & 0xfffffffc;
2754 * }
2755 */
2756 fct_mp = (MATCHMAP *)sbp->fct_buf->db_port_private;
2757
2758 if (sbp->fct_buf->db_sglist_length != 1) {
2759 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
2760 "fct_bde_setup: Only 1 sglist entry supported: %d",
2761 sbp->fct_buf->db_sglist_length);
2762 return (1);
2763 }
2764
2765 sge = xrip->SGList->virt;
2766
2767 if (iocb->ULPCOMMAND == CMD_FCP_TRECEIVE64_CX) {
2768
2769 mp = emlxs_mem_buf_alloc(hba, EMLXS_XFER_RDY_SIZE);
2770 if (!mp || !mp->virt || !mp->phys) {
2771 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
2772 "fct_bde_setup: Cannot allocate XRDY memory");
2773 return (1);
2774 }
2775 /* Save the MATCHMAP info to free this memory later */
2776 iocbq->bp = mp;
2777
2778 /* Point to XRDY payload */
2779 xrdy_vaddr = (uint32_t *)(mp->virt);
2780
2781 /* Fill in burstsize in payload */
2782 *xrdy_vaddr++ = 0;
2783 *xrdy_vaddr++ = LE_SWAP32(size);
2784 *xrdy_vaddr = 0;
2785
4028 emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
4029 IOERR_NO_XRI, 0);
4030 return (0xff);
4031 }
4032
4033 cmd_sbp->iotag = xrip->iotag;
4034 cmd_sbp->channel = cp;
4035
4036 #if (EMLXS_MODREV >= EMLXS_MODREV3)
4037 cp_cmd = pkt->pkt_cmd_cookie;
4038 #else
4039 cp_cmd = &pkt->pkt_cmd_cookie;
4040 #endif /* >= EMLXS_MODREV3 */
4041
4042 sge_size = pkt->pkt_cmdlen;
4043 /* Make size a multiple of 4 */
4044 if (sge_size & 3) {
4045 sge_size = (sge_size + 3) & 0xfffffffc;
4046 }
4047 sge_addr = cp_cmd->dmac_laddress;
4048 sge = xrip->SGList->virt;
4049
4050 stage_sge.addrHigh = PADDR_HI(sge_addr);
4051 stage_sge.addrLow = PADDR_LO(sge_addr);
4052 stage_sge.length = sge_size;
4053 stage_sge.offset = 0;
4054 stage_sge.type = 0;
4055 stage_sge.last = 1;
4056
4057 /* Copy staged SGE into SGL */
4058 BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
4059 (uint8_t *)sge, sizeof (ULP_SGE64));
4060
4061 /* Words 0-3 */
4062 wqe->un.FcpCmd.Payload.addrHigh = stage_sge.addrHigh;
4063 wqe->un.FcpCmd.Payload.addrLow = stage_sge.addrLow;
4064 wqe->un.FcpCmd.Payload.tus.f.bdeSize = sge_size;
4065 wqe->un.FcpCmd.PayloadLength = sge_size;
4066
4067 /* Word 6 */
4068 wqe->ContextTag = ndlp->nlp_Rpi;
4220
4221 } /* emlxs_sli4_prep_fct_iocb() */
4222 #endif /* SFCT_SUPPORT */
4223
4224
4225 /*ARGSUSED*/
4226 extern uint32_t
4227 emlxs_sli4_prep_fcp_iocb(emlxs_port_t *port, emlxs_buf_t *sbp, int channel)
4228 {
4229 emlxs_hba_t *hba = HBA;
4230 fc_packet_t *pkt;
4231 CHANNEL *cp;
4232 RPIobj_t *rpip;
4233 XRIobj_t *xrip;
4234 emlxs_wqe_t *wqe;
4235 IOCBQ *iocbq;
4236 IOCB *iocb;
4237 NODELIST *node;
4238 uint16_t iotag;
4239 uint32_t did;
4240
4241 pkt = PRIV2PKT(sbp);
4242 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
4243 cp = &hba->chan[channel];
4244
4245 iocbq = &sbp->iocbq;
4246 iocbq->channel = (void *) cp;
4247 iocbq->port = (void *) port;
4248
4249 wqe = &iocbq->wqe;
4250 iocb = &iocbq->iocb;
4251 bzero((void *)wqe, sizeof (emlxs_wqe_t));
4252 bzero((void *)iocb, sizeof (IOCB));
4253
4254 /* Find target node object */
4255 node = (NODELIST *)iocbq->node;
4256 rpip = EMLXS_NODE_TO_RPI(port, node);
4257
4258 if (!rpip) {
4259 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4281 #ifdef DEBUG_FASTPATH
4282 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4283 "FCP: Prep xri=%d iotag=%d oxid=%x rpi=%d",
4284 xrip->XRI, xrip->iotag, xrip->rx_id, rpip->RPI);
4285 #endif /* DEBUG_FASTPATH */
4286
4287 /* Indicate this is a FCP cmd */
4288 iocbq->flag |= IOCB_FCP_CMD;
4289
4290 if (emlxs_sli4_bde_setup(port, sbp)) {
4291 emlxs_sli4_free_xri(port, sbp, xrip, 1);
4292 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4293 "Adapter Busy. Unable to setup SGE. did=0x%x", did);
4294
4295 return (FC_TRAN_BUSY);
4296 }
4297
4298 /* DEBUG */
4299 #ifdef DEBUG_FCP
4300 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4301 "FCP: SGLaddr virt %p phys %p size %d", xrip->SGList->virt,
4302 xrip->SGList->phys, pkt->pkt_datalen);
4303 emlxs_data_dump(port, "FCP: SGL",
4304 (uint32_t *)xrip->SGList->virt, 20, 0);
4305 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4306 "FCP: CMD virt %p len %d:%d:%d",
4307 pkt->pkt_cmd, pkt->pkt_cmdlen, pkt->pkt_rsplen, pkt->pkt_datalen);
4308 emlxs_data_dump(port, "FCP: CMD", (uint32_t *)pkt->pkt_cmd, 10, 0);
4309 #endif /* DEBUG_FCP */
4310
4311 EMLXS_MPDATA_SYNC(xrip->SGList->dma_handle, 0,
4312 xrip->SGList->size, DDI_DMA_SYNC_FORDEV);
4313
4314 /* if device is FCP-2 device, set the following bit */
4315 /* that says to run the FC-TAPE protocol. */
4316 if (node->nlp_fcp_info & NLP_FCP_2_DEVICE) {
4317 wqe->ERP = 1;
4318 }
4319
4320 if (pkt->pkt_datalen == 0) {
4321 iocb->ULPCOMMAND = CMD_FCP_ICMND64_CR;
4322 wqe->Command = CMD_FCP_ICMND64_CR;
4323 wqe->CmdType = WQE_TYPE_FCP_DATA_IN;
4324 } else if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
4325 iocb->ULPCOMMAND = CMD_FCP_IREAD64_CR;
4326 wqe->Command = CMD_FCP_IREAD64_CR;
4327 wqe->CmdType = WQE_TYPE_FCP_DATA_IN;
4328 wqe->PU = PARM_XFER_CHECK;
4329 } else {
4330 iocb->ULPCOMMAND = CMD_FCP_IWRITE64_CR;
4331 wqe->Command = CMD_FCP_IWRITE64_CR;
4332 wqe->CmdType = WQE_TYPE_FCP_DATA_OUT;
4333 }
4377 static uint32_t
4378 emlxs_sli4_prep_els_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
4379 {
4380 emlxs_hba_t *hba = HBA;
4381 fc_packet_t *pkt;
4382 IOCBQ *iocbq;
4383 IOCB *iocb;
4384 emlxs_wqe_t *wqe;
4385 FCFIobj_t *fcfp;
4386 RPIobj_t *reserved_rpip = NULL;
4387 RPIobj_t *rpip = NULL;
4388 XRIobj_t *xrip;
4389 CHANNEL *cp;
4390 uint32_t did;
4391 uint32_t cmd;
4392 ULP_SGE64 stage_sge;
4393 ULP_SGE64 *sge;
4394 ddi_dma_cookie_t *cp_cmd;
4395 ddi_dma_cookie_t *cp_resp;
4396 emlxs_node_t *node;
4397
4398 pkt = PRIV2PKT(sbp);
4399 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
4400
4401 iocbq = &sbp->iocbq;
4402 wqe = &iocbq->wqe;
4403 iocb = &iocbq->iocb;
4404 bzero((void *)wqe, sizeof (emlxs_wqe_t));
4405 bzero((void *)iocb, sizeof (IOCB));
4406 cp = &hba->chan[hba->channel_els];
4407
4408 /* Initalize iocbq */
4409 iocbq->port = (void *) port;
4410 iocbq->channel = (void *) cp;
4411
4412 sbp->channel = cp;
4413 sbp->bmp = NULL;
4414
4415 #if (EMLXS_MODREV >= EMLXS_MODREV3)
4416 cp_cmd = pkt->pkt_cmd_cookie;
4470
4471 iocb->ULPCOMMAND = CMD_XMIT_ELS_RSP64_CX;
4472 wqe->Command = CMD_XMIT_ELS_RSP64_CX;
4473 wqe->CmdType = WQE_TYPE_GEN;
4474 if (!(hba->sli.sli4.param.PHWQ)) {
4475 wqe->DBDE = 1; /* Data type for BDE 0 */
4476 }
4477
4478 wqe->un.ElsRsp.Payload.addrHigh = sge->addrHigh;
4479 wqe->un.ElsRsp.Payload.addrLow = sge->addrLow;
4480 wqe->un.ElsRsp.Payload.tus.f.bdeSize = pkt->pkt_cmdlen;
4481 wqe->un.ElsCmd.PayloadLength = pkt->pkt_cmdlen;
4482
4483 wqe->un.ElsRsp.RemoteId = did;
4484 wqe->PU = 0x3;
4485 wqe->OXId = xrip->rx_id;
4486
4487 sge->last = 1;
4488 /* Now sge is fully staged */
4489
4490 sge = xrip->SGList->virt;
4491 BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
4492 sizeof (ULP_SGE64));
4493
4494 if (rpip->RPI == FABRIC_RPI) {
4495 wqe->ContextTag = port->vpip->VPI;
4496 wqe->ContextType = WQE_VPI_CONTEXT;
4497 } else {
4498 wqe->ContextTag = rpip->RPI;
4499 wqe->ContextType = WQE_RPI_CONTEXT;
4500 }
4501
4502 if ((cmd == ELS_CMD_ACC) && (sbp->ucmd == ELS_CMD_FLOGI)) {
4503 wqe->un.ElsCmd.SP = 1;
4504 wqe->un.ElsCmd.LocalId = 0xFFFFFE;
4505 }
4506
4507 } else {
4508 /* ELS Request */
4509
4510 fcfp = port->vpip->vfip->fcfp;
4535 iocb->ULPCOMMAND = CMD_ELS_REQUEST64_CR;
4536 wqe->Command = CMD_ELS_REQUEST64_CR;
4537 wqe->CmdType = WQE_TYPE_ELS;
4538 if (!(hba->sli.sli4.param.PHWQ)) {
4539 wqe->DBDE = 1; /* Data type for BDE 0 */
4540 }
4541
4542 wqe->un.ElsCmd.Payload.addrHigh = sge->addrHigh;
4543 wqe->un.ElsCmd.Payload.addrLow = sge->addrLow;
4544 wqe->un.ElsCmd.Payload.tus.f.bdeSize = pkt->pkt_cmdlen;
4545
4546 wqe->un.ElsCmd.RemoteId = did;
4547 wqe->Timer = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
4548
4549 /* setup for rsp */
4550 iocb->un.elsreq64.remoteID = (did == BCAST_DID) ? 0 : did;
4551 iocb->ULPPU = 1; /* Wd4 is relative offset */
4552
4553 sge->last = 0;
4554
4555 sge = xrip->SGList->virt;
4556 BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
4557 sizeof (ULP_SGE64));
4558
4559 wqe->un.ElsCmd.PayloadLength =
4560 pkt->pkt_cmdlen; /* Byte offset of rsp data */
4561
4562 /* RSP payload */
4563 sge = &stage_sge;
4564 sge->addrHigh = PADDR_HI(cp_resp->dmac_laddress);
4565 sge->addrLow = PADDR_LO(cp_resp->dmac_laddress);
4566 sge->length = pkt->pkt_rsplen;
4567 sge->offset = 0;
4568 sge->last = 1;
4569 /* Now sge is fully staged */
4570
4571 sge = xrip->SGList->virt;
4572 sge++;
4573 BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
4574 sizeof (ULP_SGE64));
4575 #ifdef DEBUG_ELS
4576 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4577 "ELS: SGLaddr virt %p phys %p",
4578 xrip->SGList->virt, xrip->SGList->phys);
4579 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4580 "ELS: PAYLOAD virt %p phys %p",
4581 pkt->pkt_cmd, cp_cmd->dmac_laddress);
4582 emlxs_data_dump(port, "ELS: SGL",
4583 (uint32_t *)xrip->SGList->virt, 12, 0);
4584 #endif /* DEBUG_ELS */
4585
4586 switch (cmd) {
4587 case ELS_CMD_FLOGI:
4588 wqe->un.ElsCmd.SP = 1;
4589
4590 if ((hba->sli_intf & SLI_INTF_IF_TYPE_MASK) ==
4591 SLI_INTF_IF_TYPE_0) {
4592 wqe->ContextTag = fcfp->FCFI;
4593 wqe->ContextType = WQE_FCFI_CONTEXT;
4594 } else {
4595 wqe->ContextTag = port->vpip->VPI;
4596 wqe->ContextType = WQE_VPI_CONTEXT;
4597 }
4598
4599 if (hba->flag & FC_FIP_SUPPORTED) {
4600 wqe->CmdType |= WQE_TYPE_MASK_FIP;
4601 }
4602
4603 if (hba->topology == TOPOLOGY_LOOP) {
4669 reserved_rpip = emlxs_rpi_reserve_notify(port, did, xrip);
4670
4671 if (!reserved_rpip) {
4672 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4673 "Unable to alloc reserved RPI. rxid=%x. Rejecting.",
4674 pkt->pkt_cmd_fhdr.rx_id);
4675
4676 emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4677 IOERR_INVALID_RPI, 0);
4678 return (0xff);
4679 }
4680
4681 /* Store the reserved rpi */
4682 if (wqe->Command == CMD_ELS_REQUEST64_CR) {
4683 wqe->OXId = reserved_rpip->RPI;
4684 } else {
4685 wqe->CmdSpecific = reserved_rpip->RPI;
4686 }
4687 }
4688
4689 EMLXS_MPDATA_SYNC(xrip->SGList->dma_handle, 0,
4690 xrip->SGList->size, DDI_DMA_SYNC_FORDEV);
4691
4692 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
4693 wqe->CCPE = 1;
4694 wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
4695 }
4696
4697 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
4698 case FC_TRAN_CLASS2:
4699 wqe->Class = CLASS2;
4700 break;
4701 case FC_TRAN_CLASS3:
4702 default:
4703 wqe->Class = CLASS3;
4704 break;
4705 }
4706 sbp->class = wqe->Class;
4707 wqe->XRITag = xrip->XRI;
4708 wqe->RequestTag = xrip->iotag;
4709 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4710 return (FC_SUCCESS);
4711
4712 } /* emlxs_sli4_prep_els_iocb() */
4713
4714
4715 /*ARGSUSED*/
4716 static uint32_t
4717 emlxs_sli4_prep_ct_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
4718 {
4719 emlxs_hba_t *hba = HBA;
4720 fc_packet_t *pkt;
4721 IOCBQ *iocbq;
4722 IOCB *iocb;
4723 emlxs_wqe_t *wqe;
4724 NODELIST *node = NULL;
4725 CHANNEL *cp;
4726 RPIobj_t *rpip;
4727 XRIobj_t *xrip;
4728 uint32_t did;
4729
4730 pkt = PRIV2PKT(sbp);
4731 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
4732
4733 iocbq = &sbp->iocbq;
4734 wqe = &iocbq->wqe;
4735 iocb = &iocbq->iocb;
4736 bzero((void *)wqe, sizeof (emlxs_wqe_t));
4737 bzero((void *)iocb, sizeof (IOCB));
4738
4739 cp = &hba->chan[hba->channel_ct];
4740
4741 iocbq->port = (void *) port;
4742 iocbq->channel = (void *) cp;
4743
4744 sbp->bmp = NULL;
4745 sbp->channel = cp;
4746
4747 /* Initalize wqe */
4748 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
4860 "Adapter Busy. Unable to setup SGE. did=0x%x", did);
4861
4862 emlxs_sli4_free_xri(port, sbp, xrip, 1);
4863 return (FC_TRAN_BUSY);
4864 }
4865
4866 if (!(hba->sli.sli4.param.PHWQ)) {
4867 wqe->DBDE = 1; /* Data type for BDE 0 */
4868 }
4869
4870 iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CR;
4871 wqe->CmdType = WQE_TYPE_GEN;
4872 wqe->Command = CMD_GEN_REQUEST64_CR;
4873 wqe->un.GenReq.la = 1;
4874 wqe->un.GenReq.DFctl = pkt->pkt_cmd_fhdr.df_ctl;
4875 wqe->un.GenReq.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
4876 wqe->un.GenReq.Type = pkt->pkt_cmd_fhdr.type;
4877
4878 #ifdef DEBUG_CT
4879 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4880 "CT: SGLaddr virt %p phys %p", xrip->SGList->virt,
4881 xrip->SGList->phys);
4882 emlxs_data_dump(port, "CT: SGL", (uint32_t *)xrip->SGList->virt,
4883 12, 0);
4884 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4885 "CT: CMD virt %p len %d:%d",
4886 pkt->pkt_cmd, pkt->pkt_cmdlen, pkt->pkt_rsplen);
4887 emlxs_data_dump(port, "CT: DATA", (uint32_t *)pkt->pkt_cmd,
4888 20, 0);
4889 #endif /* DEBUG_CT */
4890
4891 #ifdef SFCT_SUPPORT
4892 /* This allows fct to abort the request */
4893 if (sbp->fct_cmd) {
4894 sbp->fct_cmd->cmd_oxid = xrip->XRI;
4895 sbp->fct_cmd->cmd_rxid = 0xFFFF;
4896 }
4897 #endif /* SFCT_SUPPORT */
4898 }
4899
4900 /* Setup for rsp */
4901 iocb->un.genreq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
4902 iocb->un.genreq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
4903 iocb->un.genreq64.w5.hcsw.Dfctl = pkt->pkt_cmd_fhdr.df_ctl;
4904 iocb->ULPPU = 1; /* Wd4 is relative offset */
4905
4906 EMLXS_MPDATA_SYNC(xrip->SGList->dma_handle, 0,
4907 xrip->SGList->size, DDI_DMA_SYNC_FORDEV);
4908
4909 wqe->ContextTag = rpip->RPI;
4910 wqe->ContextType = WQE_RPI_CONTEXT;
4911 wqe->XRITag = xrip->XRI;
4912 wqe->Timer = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
4913
4914 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
4915 wqe->CCPE = 1;
4916 wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
4917 }
4918
4919 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
4920 case FC_TRAN_CLASS2:
4921 wqe->Class = CLASS2;
4922 break;
4923 case FC_TRAN_CLASS3:
4924 default:
4925 wqe->Class = CLASS3;
4926 break;
4927 }
4928 sbp->class = wqe->Class;
5131 emlxs_sli4_handle_fc_link_att(hba, cqe);
5132 break;
5133 case ASYNC_EVENT_FC_SHARED_LINK_ATT:
5134 HBASTATS.LinkEvent++;
5135
5136 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5137 "FC Async Event: Shared Link Attention. event=%x",
5138 HBASTATS.LinkEvent);
5139
5140 emlxs_sli4_handle_fc_link_att(hba, cqe);
5141 break;
5142 default:
5143 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5144 "FC Async Event: Unknown event. type=%d event=%x",
5145 cqe->event_type, HBASTATS.LinkEvent);
5146 }
5147 break;
5148 case ASYNC_EVENT_CODE_PORT:
5149 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5150 "SLI Port Async Event: type=%d", cqe->event_type);
5151
5152 switch (cqe->event_type) {
5153 case ASYNC_EVENT_PORT_OTEMP:
5154 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5155 "SLI Port Async Event: Temperature limit exceeded");
5156 cmn_err(CE_WARN,
5157 "^%s%d: Temperature limit exceeded. Fibre channel "
5158 "controller temperature %u degrees C",
5159 DRIVER_NAME, hba->ddiinst,
5160 BE_SWAP32(*(uint32_t *)cqe->un.port.link_status));
5161 break;
5162
5163 case ASYNC_EVENT_PORT_NTEMP:
5164 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5165 "SLI Port Async Event: Temperature returned to "
5166 "normal");
5167 cmn_err(CE_WARN,
5168 "^%s%d: Temperature returned to normal",
5169 DRIVER_NAME, hba->ddiinst);
5170 break;
5171
5172 case ASYNC_EVENT_MISCONFIG_PORT:
5173 *((uint32_t *)cqe->un.port.link_status) =
5174 BE_SWAP32(*((uint32_t *)cqe->un.port.link_status));
5175 status =
5176 cqe->un.port.link_status[hba->sli.sli4.link_number];
5177
5178 switch (status) {
5179 case 0 :
5180 break;
5181
5182 case 1 :
5183 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5184 "SLI Port Async Event: Physical media not "
5185 "detected");
5186 cmn_err(CE_WARN,
5187 "^%s%d: Optics faulted/incorrectly "
5188 "installed/not installed - Reseat optics, "
5189 "if issue not resolved, replace.",
5190 DRIVER_NAME, hba->ddiinst);
5191 break;
5192
5205 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5206 "SLI Port Async Event: Unsupported "
5207 "physical media detected");
5208 cmn_err(CE_WARN,
5209 "^%s%d: Incompatible optics - Replace "
5210 "with compatible optics for card to "
5211 "function.",
5212 DRIVER_NAME, hba->ddiinst);
5213 break;
5214
5215 default :
5216 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5217 "SLI Port Async Event: Physical media "
5218 "error, status=%x", status);
5219 cmn_err(CE_WARN,
5220 "^%s%d: Misconfigured port: status=0x%x - "
5221 "Check optics on card.",
5222 DRIVER_NAME, hba->ddiinst, status);
5223 break;
5224 }
5225 break;
5226 }
5227
5228 break;
5229 case ASYNC_EVENT_CODE_VF:
5230 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5231 "VF Async Event: type=%d",
5232 cqe->event_type);
5233 break;
5234 case ASYNC_EVENT_CODE_MR:
5235 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5236 "MR Async Event: type=%d",
5237 cqe->event_type);
5238 break;
5239 default:
5240 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5241 "Unknown Async Event: code=%d type=%d.",
5242 cqe->event_code, cqe->event_type);
5243 break;
5244 }
5245
5246 } /* emlxs_sli4_process_async_event() */
5247
6645 iocb->ULPBDECOUNT = 1;
6646
6647 iocb->ULPPU = 0x3;
6648 iocb->ULPCONTEXT = xrip->XRI;
6649 iocb->ULPIOTAG = ((node)? node->nlp_Rpi:0);
6650 iocb->ULPCLASS = CLASS3;
6651 iocb->ULPCOMMAND = CMD_RCV_ELS64_CX;
6652
6653 iocb->unsli3.ext_rcv.seq_len = seq_len;
6654 iocb->unsli3.ext_rcv.vpi = port->VPIobj.VPI;
6655 iocb->unsli3.ext_rcv.oxid = fchdr.ox_id;
6656
6657 if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
6658 iocb->unsli3.ext_rcv.ccpe = 1;
6659 iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
6660 }
6661
6662 /* pass xrip to FCT in the iocbq */
6663 iocbq->sbp = xrip;
6664
6665 (void) emlxs_fct_handle_unsol_req(port, cp, iocbq,
6666 seq_mp, seq_len);
6667 break;
6668 #endif /* SFCT_SUPPORT */
6669
6670 case 0x20: /* CT */
6671 if (!(port->vpip->flag & EMLXS_VPI_PORT_ENABLED) &&
6672 !(hba->flag & FC_LOOPBACK_MODE)) {
6673 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6674 "RQ ENTRY: %s: Port not yet enabled. "
6675 "Dropping...",
6676 label);
6677
6678 goto done;
6679 }
6680
6681 if (!node) {
6682 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6683 "RQ ENTRY: %s: Node not found (did=%x). "
6684 "Dropping...",
7229 for (i = 0; i < hba->intr_count; i++) {
7230 data = hba->sli.sli4.eq[i].qid;
7231 data |= (EQ_DB_REARM | EQ_DB_EVENT);
7232 emlxs_sli4_write_cqdb(hba, data);
7233 }
7234 } /* emlxs_sli4_enable_intr() */
7235
7236
7237 static void
7238 emlxs_sli4_disable_intr(emlxs_hba_t *hba, uint32_t att)
7239 {
7240 if (att) {
7241 return;
7242 }
7243
7244 hba->sli.sli4.flag &= ~EMLXS_SLI4_INTR_ENABLED;
7245
7246 /* Short of reset, we cannot disable interrupts */
7247 } /* emlxs_sli4_disable_intr() */
7248
7249 static void
7250 emlxs_sli4_resource_free(emlxs_hba_t *hba)
7251 {
7252 emlxs_port_t *port = &PPORT;
7253 MBUF_INFO *buf_info;
7254 uint32_t i;
7255
7256 buf_info = &hba->sli.sli4.slim2;
7257 if (buf_info->virt == 0) {
7258 /* Already free */
7259 return;
7260 }
7261
7262 emlxs_fcf_fini(hba);
7263
7264 buf_info = &hba->sli.sli4.HeaderTmplate;
7265 if (buf_info->virt) {
7266 bzero(buf_info, sizeof (MBUF_INFO));
7267 }
7268
7269 if (hba->sli.sli4.XRIp) {
7270 XRIobj_t *xrip;
7271
7272 if ((hba->sli.sli4.XRIinuse_f !=
7273 (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) ||
7274 (hba->sli.sli4.XRIinuse_b !=
7275 (XRIobj_t *)&hba->sli.sli4.XRIinuse_f)) {
7276 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
7277 "XRIs in use during free!: %p %p != %p\n",
7278 hba->sli.sli4.XRIinuse_f,
7279 hba->sli.sli4.XRIinuse_b,
7280 &hba->sli.sli4.XRIinuse_f);
7281 }
7282
7283 xrip = hba->sli.sli4.XRIp;
7284 for (i = 0; i < hba->sli.sli4.XRICount; i++) {
7285 xrip->XRI = emlxs_sli4_index_to_xri(hba, i);
7286
7287 if (xrip->XRI != 0)
7288 emlxs_mem_put(hba, xrip->SGSeg, xrip->SGList);
7289
7290 xrip++;
7291 }
7292
7293 kmem_free(hba->sli.sli4.XRIp,
7294 (sizeof (XRIobj_t) * hba->sli.sli4.XRICount));
7295 hba->sli.sli4.XRIp = NULL;
7296
7297 hba->sli.sli4.XRIfree_f =
7298 (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7299 hba->sli.sli4.XRIfree_b =
7300 (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7301 hba->sli.sli4.xrif_count = 0;
7302 }
7303
7304 for (i = 0; i < hba->intr_count; i++) {
7305 mutex_destroy(&hba->sli.sli4.eq[i].lastwq_lock);
7306 bzero(&hba->sli.sli4.eq[i], sizeof (EQ_DESC_t));
7307 hba->sli.sli4.eq[i].qid = 0xffff;
7308 }
7309 for (i = 0; i < EMLXS_MAX_CQS; i++) {
7310 bzero(&hba->sli.sli4.cq[i], sizeof (CQ_DESC_t));
7311 hba->sli.sli4.cq[i].qid = 0xffff;
7312 }
7317 for (i = 0; i < EMLXS_MAX_RXQS; i++) {
7318 mutex_destroy(&hba->sli.sli4.rxq[i].lock);
7319 bzero(&hba->sli.sli4.rxq[i], sizeof (RXQ_DESC_t));
7320 }
7321 for (i = 0; i < EMLXS_MAX_RQS; i++) {
7322 mutex_destroy(&hba->sli.sli4.rq[i].lock);
7323 bzero(&hba->sli.sli4.rq[i], sizeof (RQ_DESC_t));
7324 hba->sli.sli4.rq[i].qid = 0xffff;
7325 }
7326
7327 /* Free the MQ */
7328 bzero(&hba->sli.sli4.mq, sizeof (MQ_DESC_t));
7329
7330 buf_info = &hba->sli.sli4.slim2;
7331 if (buf_info->virt) {
7332 buf_info->flags = FC_MBUF_DMA;
7333 emlxs_mem_free(hba, buf_info);
7334 bzero(buf_info, sizeof (MBUF_INFO));
7335 }
7336
7337 /* GPIO lock */
7338 if (hba->model_info.flags & EMLXS_GPIO_LEDS)
7339 mutex_destroy(&hba->gpio_lock);
7340
7341 } /* emlxs_sli4_resource_free() */
7342
7343 static int
7344 emlxs_sli4_resource_alloc(emlxs_hba_t *hba)
7345 {
7346 emlxs_port_t *port = &PPORT;
7347 emlxs_config_t *cfg = &CFG;
7348 MBUF_INFO *buf_info;
7349 int num_eq;
7350 int num_wq;
7351 uint16_t i;
7352 uint32_t j;
7353 uint32_t k;
7354 uint16_t cq_depth;
7355 uint32_t cq_size;
7356 uint32_t word;
7357 XRIobj_t *xrip;
7358 RQE_t *rqe;
7359 MBUF_INFO *rqb;
7360 uint64_t phys;
7361 uint64_t tmp_phys;
7362 char *virt;
7400
7401 /* EQ */
7402 count += num_eq * 4096;
7403
7404 /* CQ */
7405 count += (num_wq + EMLXS_CQ_OFFSET_WQ) * cq_size;
7406
7407 /* WQ */
7408 count += num_wq * (4096 * EMLXS_NUM_WQ_PAGES);
7409
7410 /* MQ */
7411 count += EMLXS_MAX_MQS * 4096;
7412
7413 /* RQ */
7414 count += EMLXS_MAX_RQS * 4096;
7415
7416 /* RQB/E */
7417 count += RQB_COUNT * (RQB_DATA_SIZE + RQB_HEADER_SIZE);
7418 count += (4096 - (count%4096)); /* Ensure 4K alignment */
7419
7420 /* RPI Header Templates */
7421 if (hba->sli.sli4.param.HDRR) {
7422 /* Bytes per extent */
7423 j = hba->sli.sli4.RPIExtSize * sizeof (RPIHdrTmplate_t);
7424
7425 /* Pages required per extent (page == 4096 bytes) */
7426 k = (j/4096) + ((j%4096)? 1:0);
7427
7428 /* Total size */
7429 hddr_size = (k * hba->sli.sli4.RPIExtCount * 4096);
7430
7431 count += hddr_size;
7432 }
7433
7434 /* Allocate slim2 for SLI4 */
7435 buf_info = &hba->sli.sli4.slim2;
7436 buf_info->size = count;
7437 buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
7438 buf_info->align = ddi_ptob(hba->dip, 1L);
7439
7440 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
7441 "Allocating memory for slim2: %d", count);
7442
7443 (void) emlxs_mem_alloc(hba, buf_info);
7444
7445 if (buf_info->virt == NULL) {
7446 EMLXS_MSGF(EMLXS_CONTEXT,
7447 &emlxs_init_failed_msg,
7448 "Unable to allocate internal memory for SLI4: %d",
7449 count);
7450 goto failed;
7451 }
7452 bzero(buf_info->virt, buf_info->size);
7453 EMLXS_MPDATA_SYNC(buf_info->dma_handle, 0,
7454 buf_info->size, DDI_DMA_SYNC_FORDEV);
7455
7456 /* Assign memory to Head Template, EQ, CQ, WQ, RQ and MQ */
7457 data_handle = buf_info->data_handle;
7458 dma_handle = buf_info->dma_handle;
7459 phys = buf_info->phys;
7460 virt = (char *)buf_info->virt;
7461
7462 /* Allocate space for queues */
7463
7464 /* EQ */
7465 size = 4096;
7466 for (i = 0; i < num_eq; i++) {
7467 bzero(&hba->sli.sli4.eq[i], sizeof (EQ_DESC_t));
7468
7469 buf_info = &hba->sli.sli4.eq[i].addr;
7470 buf_info->size = size;
7471 buf_info->flags =
7472 FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
7473 buf_info->align = ddi_ptob(hba->dip, 1L);
7474 buf_info->phys = phys;
7475 buf_info->virt = (void *)virt;
7476 buf_info->data_handle = data_handle;
7626
7627 rqe++;
7628 }
7629 }
7630
7631 offset = (off_t)((uint64_t)((unsigned long)
7632 hba->sli.sli4.rq[i].addr.virt) -
7633 (uint64_t)((unsigned long)
7634 hba->sli.sli4.slim2.virt));
7635
7636 /* Sync the RQ buffer list */
7637 EMLXS_MPDATA_SYNC(hba->sli.sli4.rq[i].addr.dma_handle, offset,
7638 hba->sli.sli4.rq[i].addr.size, DDI_DMA_SYNC_FORDEV);
7639 }
7640
7641 /* 4K Alignment */
7642 align = (4096 - (phys%4096));
7643 phys += align;
7644 virt += align;
7645
7646 /* RPI Header Templates */
7647 if (hba->sli.sli4.param.HDRR) {
7648 buf_info = &hba->sli.sli4.HeaderTmplate;
7649 bzero(buf_info, sizeof (MBUF_INFO));
7650 buf_info->size = hddr_size;
7651 buf_info->flags = FC_MBUF_DMA | FC_MBUF_DMA32;
7652 buf_info->align = ddi_ptob(hba->dip, 1L);
7653 buf_info->phys = phys;
7654 buf_info->virt = (void *)virt;
7655 buf_info->data_handle = data_handle;
7656 buf_info->dma_handle = dma_handle;
7657 }
7658
7659 /* SGL */
7660
7661 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
7662 "Allocating memory for %d SGLs: %d/%d",
7663 hba->sli.sli4.XRICount, sizeof (XRIobj_t), size);
7664
7665 /* Initialize double linked lists */
7666 hba->sli.sli4.XRIinuse_f =
7667 (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
7668 hba->sli.sli4.XRIinuse_b =
7669 (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
7670 hba->sli.sli4.xria_count = 0;
7671
7672 hba->sli.sli4.XRIfree_f =
7673 (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7674 hba->sli.sli4.XRIfree_b =
7675 (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7676 hba->sli.sli4.xrif_count = 0;
7677
7678 uint32_t mseg;
7679
7680 switch (hba->sli.sli4.mem_sgl_size) {
7681 case 1024:
7682 mseg = MEM_SGL1K;
7683 break;
7684 case 2048:
7685 mseg = MEM_SGL2K;
7686 break;
7687 case 4096:
7688 mseg = MEM_SGL4K;
7689 break;
7690 default:
7691 EMLXS_MSGF(EMLXS_CONTEXT,
7692 &emlxs_init_failed_msg,
7693 "Unsupported SGL Size: %d", hba->sli.sli4.mem_sgl_size);
7694 goto failed;
7695 }
7696
7697 hba->sli.sli4.XRIp = (XRIobj_t *)kmem_zalloc(
7698 (sizeof (XRIobj_t) * hba->sli.sli4.XRICount), KM_SLEEP);
7699
7700 xrip = hba->sli.sli4.XRIp;
7701 iotag = 1;
7702
7703 for (i = 0; i < hba->sli.sli4.XRICount; i++) {
7704 xrip->XRI = emlxs_sli4_index_to_xri(hba, i);
7705
7706 /* We don't use XRI==0, since it also represents an */
7707 /* uninitialized exchange */
7708 if (xrip->XRI == 0) {
7709 xrip++;
7710 continue;
7711 }
7712
7713 xrip->iotag = iotag++;
7714 xrip->sge_count =
7715 (hba->sli.sli4.mem_sgl_size / sizeof (ULP_SGE64));
7716
7717 /* Add xrip to end of free list */
7718 xrip->_b = hba->sli.sli4.XRIfree_b;
7719 hba->sli.sli4.XRIfree_b->_f = xrip;
7720 xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7721 hba->sli.sli4.XRIfree_b = xrip;
7722 hba->sli.sli4.xrif_count++;
7723
7724 /* Allocate SGL for this xrip */
7725 xrip->SGSeg = mseg;
7726 xrip->SGList = emlxs_mem_get(hba, xrip->SGSeg);
7727
7728 if (xrip->SGList == NULL) {
7729 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
7730 "Unable to allocate memory for SGL %d", i);
7731 goto failed;
7732 }
7733
7734 EMLXS_MPDATA_SYNC(xrip->SGList->dma_handle, 0,
7735 xrip->SGList->size, DDI_DMA_SYNC_FORDEV);
7736
7737 xrip++;
7738 }
7739
7740 /* GPIO lock */
7741 if (hba->model_info.flags & EMLXS_GPIO_LEDS)
7742 mutex_init(&hba->gpio_lock, NULL, MUTEX_DRIVER, NULL);
7743
7744 #ifdef FMA_SUPPORT
7745 if (hba->sli.sli4.slim2.dma_handle) {
7746 if (emlxs_fm_check_dma_handle(hba,
7747 hba->sli.sli4.slim2.dma_handle)
7748 != DDI_FM_OK) {
7749 EMLXS_MSGF(EMLXS_CONTEXT,
7750 &emlxs_invalid_dma_handle_msg,
7751 "sli4_resource_alloc: hdl=%p",
7752 hba->sli.sli4.slim2.dma_handle);
7753 goto failed;
7754 }
7755 }
7756 #endif /* FMA_SUPPORT */
7757
7758 return (0);
7759
7760 failed:
7761
7762 (void) emlxs_sli4_resource_free(hba);
7763 return (ENOMEM);
8320 FCOE_OPCODE_CFG_POST_SGL_PAGES;
8321 mb->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
8322 mb->un.varSLIConfig.be.un_hdr.hdr_req.req_length = size;
8323
8324 hdr_req->subsystem = IOCTL_SUBSYSTEM_FCOE;
8325 hdr_req->opcode = FCOE_OPCODE_CFG_POST_SGL_PAGES;
8326 hdr_req->timeout = 0;
8327 hdr_req->req_length = size;
8328
8329 post_sgl->params.request.xri_count = 0;
8330 post_sgl->params.request.xri_start = xrip->XRI;
8331
8332 xri_cnt = (size -
8333 sizeof (IOCTL_FCOE_CFG_POST_SGL_PAGES)) /
8334 sizeof (FCOE_SGL_PAGES);
8335
8336 for (i = 0; (i < xri_cnt) && cnt; i++) {
8337 post_sgl->params.request.xri_count++;
8338 post_sgl->params.request.pages[i].\
8339 sgl_page0.addrLow =
8340 PADDR_LO(xrip->SGList->phys);
8341 post_sgl->params.request.pages[i].\
8342 sgl_page0.addrHigh =
8343 PADDR_HI(xrip->SGList->phys);
8344
8345 cnt--;
8346 xrip++;
8347 }
8348
8349 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8350 MBX_SUCCESS) {
8351 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8352 "Unable to POST_SGL. Mailbox cmd=%x "
8353 "status=%x XRI cnt:%d start:%d",
8354 mb->mbxCommand, mb->mbxStatus,
8355 post_sgl->params.request.xri_count,
8356 post_sgl->params.request.xri_start);
8357 emlxs_mem_buf_free(hba, mp);
8358 mbq->nonembed = NULL;
8359 return (EIO);
8360 }
8361 }
8362 }
8363
8741 } else {
8742 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg, NULL);
8743 }
8744
8745 hba->flag |= FC_MBOX_TIMEOUT;
8746 EMLXS_STATE_CHANGE_LOCKED(hba, FC_ERROR);
8747
8748 mutex_exit(&EMLXS_PORT_LOCK);
8749
8750 /* Perform mailbox cleanup */
8751 /* This will wake any sleeping or polling threads */
8752 emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
8753
8754 /* Trigger adapter shutdown */
8755 emlxs_thread_spawn(hba, emlxs_shutdown_thread, 0, 0);
8756
8757 return;
8758
8759 } /* emlxs_sli4_timer_check_mbox() */
8760
8761 static void
8762 emlxs_sli4_gpio_timer_start(emlxs_hba_t *hba)
8763 {
8764 mutex_enter(&hba->gpio_lock);
8765
8766 if (!hba->gpio_timer) {
8767 hba->gpio_timer = timeout(emlxs_sli4_gpio_timer, (void *)hba,
8768 drv_usectohz(100000));
8769 }
8770
8771 mutex_exit(&hba->gpio_lock);
8772
8773 } /* emlxs_sli4_gpio_timer_start() */
8774
8775 static void
8776 emlxs_sli4_gpio_timer_stop(emlxs_hba_t *hba)
8777 {
8778 mutex_enter(&hba->gpio_lock);
8779
8780 if (hba->gpio_timer) {
8781 (void) untimeout(hba->gpio_timer);
8782 hba->gpio_timer = 0;
8783 }
8784
8785 mutex_exit(&hba->gpio_lock);
8786
8787 delay(drv_usectohz(300000));
8788 } /* emlxs_sli4_gpio_timer_stop() */
8789
8790 static void
8791 emlxs_sli4_gpio_timer(void *arg)
8792 {
8793 emlxs_hba_t *hba = (emlxs_hba_t *)arg;
8794
8795 mutex_enter(&hba->gpio_lock);
8796
8797 if (hba->gpio_timer) {
8798 emlxs_sli4_check_gpio(hba);
8799 hba->gpio_timer = timeout(emlxs_sli4_gpio_timer, (void *)hba,
8800 drv_usectohz(100000));
8801 }
8802
8803 mutex_exit(&hba->gpio_lock);
8804 } /* emlxs_sli4_gpio_timer() */
8805
8806 static void
8807 emlxs_sli4_check_gpio(emlxs_hba_t *hba)
8808 {
8809 hba->gpio_desired = 0;
8810
8811 if (hba->flag & FC_GPIO_LINK_UP) {
8812 if (hba->io_active)
8813 hba->gpio_desired |= EMLXS_GPIO_ACT;
8814
8815 /* This is model specific to ATTO gen5 lancer cards */
8816
8817 switch (hba->linkspeed) {
8818 case LA_4GHZ_LINK:
8819 hba->gpio_desired |= EMLXS_GPIO_LO;
8820 break;
8821
8822 case LA_8GHZ_LINK:
8823 hba->gpio_desired |= EMLXS_GPIO_HI;
8824 break;
8825
8826 case LA_16GHZ_LINK:
8827 hba->gpio_desired |=
8828 EMLXS_GPIO_LO | EMLXS_GPIO_HI;
8829 break;
8830 }
8831 }
8832
8833 if (hba->gpio_current != hba->gpio_desired) {
8834 emlxs_port_t *port = &PPORT;
8835 uint8_t pin;
8836 uint8_t pinval;
8837 MAILBOXQ *mbq;
8838 uint32_t rval;
8839
8840 if (!emlxs_sli4_fix_gpio(hba, &pin, &pinval))
8841 return;
8842
8843 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX)) == NULL) {
8844 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
8845 "Unable to allocate GPIO mailbox.");
8846
8847 hba->gpio_bit = 0;
8848 return;
8849 }
8850
8851 emlxs_mb_gpio_write(hba, mbq, pin, pinval);
8852 mbq->mbox_cmpl = emlxs_sli4_fix_gpio_mbcmpl;
8853
8854 rval = emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
8855
8856 if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) {
8857 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
8858 "Unable to start GPIO mailbox.");
8859
8860 hba->gpio_bit = 0;
8861 emlxs_mem_put(hba, MEM_MBOX, mbq);
8862 return;
8863 }
8864 }
8865 } /* emlxs_sli4_check_gpio */
8866
8867 static uint32_t
8868 emlxs_sli4_fix_gpio(emlxs_hba_t *hba, uint8_t *pin, uint8_t *pinval)
8869 {
8870 uint8_t dif = hba->gpio_desired ^ hba->gpio_current;
8871 uint8_t bit;
8872 uint8_t i;
8873
8874 /* Get out if no pins to set a GPIO request is pending */
8875
8876 if (dif == 0 || hba->gpio_bit)
8877 return (0);
8878
8879 /* Fix one pin at a time */
8880
8881 bit = dif & -dif;
8882 hba->gpio_bit = bit;
8883 dif = hba->gpio_current ^ bit;
8884
8885 for (i = EMLXS_GPIO_PIN_LO; bit > 1; ++i) {
8886 dif >>= 1;
8887 bit >>= 1;
8888 }
8889
8890 /* Pins are active low so invert the bit value */
8891
8892 *pin = hba->gpio_pin[i];
8893 *pinval = ~dif & bit;
8894
8895 return (1);
8896 } /* emlxs_sli4_fix_gpio */
8897
8898 static uint32_t
8899 emlxs_sli4_fix_gpio_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
8900 {
8901 MAILBOX *mb;
8902 uint8_t pin;
8903 uint8_t pinval;
8904
8905 mb = (MAILBOX *)mbq;
8906
8907 mutex_enter(&hba->gpio_lock);
8908
8909 if (mb->mbxStatus == 0)
8910 hba->gpio_current ^= hba->gpio_bit;
8911
8912 hba->gpio_bit = 0;
8913
8914 if (emlxs_sli4_fix_gpio(hba, &pin, &pinval)) {
8915 emlxs_port_t *port = &PPORT;
8916 MAILBOXQ *mbq;
8917 uint32_t rval;
8918
8919 /*
8920 * We're not using the mb_retry routine here because for some
8921 * reason it doesn't preserve the completion routine. Just let
8922 * this mbox cmd fail to start here and run when the mailbox
8923 * is no longer busy.
8924 */
8925
8926 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX)) == NULL) {
8927 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
8928 "Unable to allocate GPIO mailbox.");
8929
8930 hba->gpio_bit = 0;
8931 goto done;
8932 }
8933
8934 emlxs_mb_gpio_write(hba, mbq, pin, pinval);
8935 mbq->mbox_cmpl = emlxs_sli4_fix_gpio_mbcmpl;
8936
8937 rval = emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
8938
8939 if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) {
8940 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
8941 "Unable to start GPIO mailbox.");
8942
8943 hba->gpio_bit = 0;
8944 emlxs_mem_put(hba, MEM_MBOX, mbq);
8945 goto done;
8946 }
8947 }
8948
8949 done:
8950 mutex_exit(&hba->gpio_lock);
8951
8952 return (0);
8953 }
8954
8955 extern void
8956 emlxs_data_dump(emlxs_port_t *port, char *str, uint32_t *iptr, int cnt, int err)
8957 {
8958 void *msg;
8959
8960 if (!port || !str || !iptr || !cnt) {
8961 return;
8962 }
8963
8964 if (err) {
8965 msg = &emlxs_sli_err_msg;
8966 } else {
8967 msg = &emlxs_sli_detail_msg;
8968 }
8969
8970 if (cnt) {
8971 EMLXS_MSGF(EMLXS_CONTEXT, msg,
8972 "%s00: %08x %08x %08x %08x %08x %08x", str, *iptr,
8973 *(iptr+1), *(iptr+2), *(iptr+3), *(iptr+4), *(iptr+5));
8974 }
9375 /* Set linkspeed */
9376 switch (cqe->un.fc.port_speed) {
9377 case 1:
9378 hba->linkspeed = LA_1GHZ_LINK;
9379 break;
9380 case 2:
9381 hba->linkspeed = LA_2GHZ_LINK;
9382 break;
9383 case 4:
9384 hba->linkspeed = LA_4GHZ_LINK;
9385 break;
9386 case 8:
9387 hba->linkspeed = LA_8GHZ_LINK;
9388 break;
9389 case 10:
9390 hba->linkspeed = LA_10GHZ_LINK;
9391 break;
9392 case 16:
9393 hba->linkspeed = LA_16GHZ_LINK;
9394 break;
9395 case 32:
9396 hba->linkspeed = LA_32GHZ_LINK;
9397 break;
9398 default:
9399 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
9400 "sli4_handle_fc_link_att: Unknown link speed=%x.",
9401 cqe->un.fc.port_speed);
9402 hba->linkspeed = 0;
9403 break;
9404 }
9405
9406 /* Set qos_linkspeed */
9407 hba->qos_linkspeed = cqe->un.fc.link_speed;
9408
9409 /* Set topology */
9410 hba->topology = cqe->un.fc.topology;
9411
9412 mutex_enter(&EMLXS_PORT_LOCK);
9413 hba->sli.sli4.flag &= ~EMLXS_SLI4_DOWN_LINK;
9414 mutex_exit(&EMLXS_PORT_LOCK);
9415
9416 (void) emlxs_fcf_linkup_notify(port);
9417
|