Print this page
MFV: illumos-omnios@aea0472ecb9ee91fa70556d6f6a941c10c989f1d
Add support for Emulex Corporation Lancer Gen6: LPe32000 FC Host Adapter
Author: Andy Fiddaman <omnios@citrus-it.co.uk>
NEX-8705 Drivers for ATTO Celerity FC-162E Gen 5 and Celerity FC-162P Gen 6 16GB FC cards support
Reviewed by: Dan Fields <dan.fields@nexenta.com>
Reviewed by: Rick McNeal <rick.mcneal@nexenta.com>
NEX-1878 update emlxs from source provided by Emulex
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_sli4.c
+++ new/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_sli4.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at
9 9 * http://www.opensource.org/licenses/cddl1.txt.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2004-2012 Emulex. All rights reserved.
24 24 * Use is subject to license terms.
25 + * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
25 26 */
26 27
27 28 #include <emlxs.h>
28 29
29 30
30 31 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
31 32 EMLXS_MSG_DEF(EMLXS_SLI4_C);
32 33
33 34 static int emlxs_sli4_init_extents(emlxs_hba_t *hba,
34 35 MAILBOXQ *mbq);
35 36 static uint32_t emlxs_sli4_read_status(emlxs_hba_t *hba);
36 37
37 38 static int emlxs_init_bootstrap_mb(emlxs_hba_t *hba);
38 39
39 40 static uint32_t emlxs_sli4_read_sema(emlxs_hba_t *hba);
40 41
41 42 static uint32_t emlxs_sli4_read_mbdb(emlxs_hba_t *hba);
42 43
43 44 static void emlxs_sli4_write_mbdb(emlxs_hba_t *hba, uint32_t value);
44 45
45 46 static void emlxs_sli4_write_wqdb(emlxs_hba_t *hba, uint32_t value);
46 47
47 48 static void emlxs_sli4_write_mqdb(emlxs_hba_t *hba, uint32_t value);
48 49
49 50 static void emlxs_sli4_write_rqdb(emlxs_hba_t *hba, uint32_t value);
50 51
51 52 static void emlxs_sli4_write_cqdb(emlxs_hba_t *hba, uint32_t value);
52 53
53 54 static int emlxs_sli4_create_queues(emlxs_hba_t *hba,
54 55 MAILBOXQ *mbq);
55 56 static int emlxs_sli4_post_hdr_tmplates(emlxs_hba_t *hba,
56 57 MAILBOXQ *mbq);
57 58 static int emlxs_sli4_post_sgl_pages(emlxs_hba_t *hba,
58 59 MAILBOXQ *mbq);
59 60
60 61 static int emlxs_sli4_read_eq(emlxs_hba_t *hba, EQ_DESC_t *eq);
61 62
62 63 static int emlxs_sli4_map_hdw(emlxs_hba_t *hba);
63 64
64 65 static void emlxs_sli4_unmap_hdw(emlxs_hba_t *hba);
65 66
66 67 static int32_t emlxs_sli4_online(emlxs_hba_t *hba);
67 68
68 69 static void emlxs_sli4_offline(emlxs_hba_t *hba,
69 70 uint32_t reset_requested);
70 71
71 72 static uint32_t emlxs_sli4_hba_reset(emlxs_hba_t *hba, uint32_t restart,
72 73 uint32_t skip_post, uint32_t quiesce);
73 74 static void emlxs_sli4_hba_kill(emlxs_hba_t *hba);
74 75
75 76 static uint32_t emlxs_sli4_hba_init(emlxs_hba_t *hba);
76 77
77 78 static uint32_t emlxs_sli4_bde_setup(emlxs_port_t *port,
78 79 emlxs_buf_t *sbp);
79 80
80 81 static void emlxs_sli4_issue_iocb_cmd(emlxs_hba_t *hba,
81 82 CHANNEL *cp, IOCBQ *iocb_cmd);
82 83 static uint32_t emlxs_sli4_issue_mbox_cmd(emlxs_hba_t *hba,
83 84 MAILBOXQ *mbq, int32_t flg, uint32_t tmo);
84 85 static uint32_t emlxs_sli4_issue_mbox_cmd4quiesce(emlxs_hba_t *hba,
85 86 MAILBOXQ *mbq, int32_t flg, uint32_t tmo);
86 87 #ifdef SFCT_SUPPORT
87 88 static uint32_t emlxs_sli4_prep_fct_iocb(emlxs_port_t *port,
88 89 emlxs_buf_t *cmd_sbp, int channel);
89 90 static uint32_t emlxs_sli4_fct_bde_setup(emlxs_port_t *port,
90 91 emlxs_buf_t *sbp);
91 92 #endif /* SFCT_SUPPORT */
92 93
93 94 static uint32_t emlxs_sli4_prep_fcp_iocb(emlxs_port_t *port,
94 95 emlxs_buf_t *sbp, int ring);
95 96 static uint32_t emlxs_sli4_prep_ip_iocb(emlxs_port_t *port,
96 97 emlxs_buf_t *sbp);
97 98 static uint32_t emlxs_sli4_prep_els_iocb(emlxs_port_t *port,
98 99 emlxs_buf_t *sbp);
99 100 static uint32_t emlxs_sli4_prep_ct_iocb(emlxs_port_t *port,
100 101 emlxs_buf_t *sbp);
101 102 static void emlxs_sli4_poll_intr(emlxs_hba_t *hba);
102 103 static int32_t emlxs_sli4_intx_intr(char *arg);
103 104
104 105 #ifdef MSI_SUPPORT
105 106 static uint32_t emlxs_sli4_msi_intr(char *arg1, char *arg2);
106 107 #endif /* MSI_SUPPORT */
107 108
108 109 static void emlxs_sli4_resource_free(emlxs_hba_t *hba);
109 110
110 111 static int emlxs_sli4_resource_alloc(emlxs_hba_t *hba);
111 112 extern void emlxs_sli4_zero_queue_stat(emlxs_hba_t *hba);
112 113
113 114 static XRIobj_t *emlxs_sli4_alloc_xri(emlxs_port_t *port,
|
↓ open down ↓ |
79 lines elided |
↑ open up ↑ |
114 115 emlxs_buf_t *sbp, RPIobj_t *rpip,
115 116 uint32_t type);
116 117 static void emlxs_sli4_enable_intr(emlxs_hba_t *hba);
117 118
118 119 static void emlxs_sli4_disable_intr(emlxs_hba_t *hba, uint32_t att);
119 120
120 121 static void emlxs_sli4_timer(emlxs_hba_t *hba);
121 122
122 123 static void emlxs_sli4_timer_check_mbox(emlxs_hba_t *hba);
123 124
125 +static void emlxs_sli4_gpio_timer_start(emlxs_hba_t *hba);
126 +
127 +static void emlxs_sli4_gpio_timer_stop(emlxs_hba_t *hba);
128 +
129 +static void emlxs_sli4_gpio_timer(void *arg);
130 +
131 +static void emlxs_sli4_check_gpio(emlxs_hba_t *hba);
132 +
133 +static uint32_t emlxs_sli4_fix_gpio(emlxs_hba_t *hba,
134 + uint8_t *pin, uint8_t *pinval);
135 +
136 +static uint32_t emlxs_sli4_fix_gpio_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq);
137 +
124 138 static void emlxs_sli4_poll_erratt(emlxs_hba_t *hba);
125 139
126 -extern XRIobj_t *emlxs_sli4_reserve_xri(emlxs_port_t *port,
140 +extern XRIobj_t *emlxs_sli4_reserve_xri(emlxs_port_t *port,
127 141 RPIobj_t *rpip, uint32_t type, uint16_t rx_id);
128 142 static int emlxs_check_hdw_ready(emlxs_hba_t *);
129 143
130 144 static uint32_t emlxs_sli4_reg_did(emlxs_port_t *port,
131 145 uint32_t did, SERV_PARM *param,
132 146 emlxs_buf_t *sbp, fc_unsol_buf_t *ubp,
133 147 IOCBQ *iocbq);
134 148
135 149 static uint32_t emlxs_sli4_unreg_node(emlxs_port_t *port,
136 150 emlxs_node_t *node, emlxs_buf_t *sbp,
137 151 fc_unsol_buf_t *ubp, IOCBQ *iocbq);
138 152
139 153 static void emlxs_sli4_handle_fc_link_att(emlxs_hba_t *hba,
140 154 CQE_ASYNC_t *cqe);
141 155 static void emlxs_sli4_handle_fcoe_link_event(emlxs_hba_t *hba,
142 156 CQE_ASYNC_t *cqe);
143 157
144 158
145 159 static uint16_t emlxs_sli4_rqid_to_index(emlxs_hba_t *hba,
146 160 uint16_t rqid);
147 161 static uint16_t emlxs_sli4_wqid_to_index(emlxs_hba_t *hba,
148 162 uint16_t wqid);
149 163 static uint16_t emlxs_sli4_cqid_to_index(emlxs_hba_t *hba,
150 164 uint16_t cqid);
151 165
152 166 /* Define SLI4 API functions */
153 167 emlxs_sli_api_t emlxs_sli4_api = {
154 168 emlxs_sli4_map_hdw,
155 169 emlxs_sli4_unmap_hdw,
156 170 emlxs_sli4_online,
157 171 emlxs_sli4_offline,
158 172 emlxs_sli4_hba_reset,
159 173 emlxs_sli4_hba_kill,
160 174 emlxs_sli4_issue_iocb_cmd,
161 175 emlxs_sli4_issue_mbox_cmd,
162 176 #ifdef SFCT_SUPPORT
163 177 emlxs_sli4_prep_fct_iocb,
164 178 #else
165 179 NULL,
166 180 #endif /* SFCT_SUPPORT */
167 181 emlxs_sli4_prep_fcp_iocb,
168 182 emlxs_sli4_prep_ip_iocb,
169 183 emlxs_sli4_prep_els_iocb,
170 184 emlxs_sli4_prep_ct_iocb,
171 185 emlxs_sli4_poll_intr,
172 186 emlxs_sli4_intx_intr,
173 187 emlxs_sli4_msi_intr,
174 188 emlxs_sli4_disable_intr,
175 189 emlxs_sli4_timer,
176 190 emlxs_sli4_poll_erratt,
177 191 emlxs_sli4_reg_did,
178 192 emlxs_sli4_unreg_node
179 193 };
180 194
181 195
182 196 /* ************************************************************************** */
183 197
184 198 static void
185 199 emlxs_sli4_set_default_params(emlxs_hba_t *hba)
186 200 {
187 201 emlxs_port_t *port = &PPORT;
188 202
189 203 bzero((char *)&hba->sli.sli4.param, sizeof (sli_params_t));
190 204
191 205 hba->sli.sli4.param.ProtocolType = 0x3; /* FC/FCoE */
192 206
193 207 hba->sli.sli4.param.SliHint2 = 0;
194 208 hba->sli.sli4.param.SliHint1 = 0;
195 209 hba->sli.sli4.param.IfType = 0;
196 210 hba->sli.sli4.param.SliFamily = 0;
197 211 hba->sli.sli4.param.Revision = 0x4; /* SLI4 */
198 212 hba->sli.sli4.param.FT = 0;
199 213
200 214 hba->sli.sli4.param.EqeCntMethod = 0x1; /* Bit pattern */
201 215 hba->sli.sli4.param.EqPageSize = 0x1; /* 4096 */
202 216 hba->sli.sli4.param.EqeSize = 0x1; /* 4 byte */
203 217 hba->sli.sli4.param.EqPageCnt = 8;
204 218 hba->sli.sli4.param.EqeCntMask = 0x1F; /* 256-4096 elements */
205 219
206 220 hba->sli.sli4.param.CqeCntMethod = 0x1; /* Bit pattern */
207 221 hba->sli.sli4.param.CqPageSize = 0x1; /* 4096 */
208 222 hba->sli.sli4.param.CQV = 0;
209 223 hba->sli.sli4.param.CqeSize = 0x3; /* 16 byte */
210 224 hba->sli.sli4.param.CqPageCnt = 4;
211 225 hba->sli.sli4.param.CqeCntMask = 0x70; /* 256-1024 elements */
212 226
213 227 hba->sli.sli4.param.MqeCntMethod = 0x1; /* Bit pattern */
214 228 hba->sli.sli4.param.MqPageSize = 0x1; /* 4096 */
215 229 hba->sli.sli4.param.MQV = 0;
216 230 hba->sli.sli4.param.MqPageCnt = 8;
217 231 hba->sli.sli4.param.MqeCntMask = 0x0F; /* 16-128 elements */
218 232
219 233 hba->sli.sli4.param.WqeCntMethod = 0; /* Page Count */
220 234 hba->sli.sli4.param.WqPageSize = 0x1; /* 4096 */
221 235 hba->sli.sli4.param.WQV = 0;
222 236 hba->sli.sli4.param.WqeSize = 0x5; /* 64 byte */
223 237 hba->sli.sli4.param.WqPageCnt = 4;
224 238 hba->sli.sli4.param.WqeCntMask = 0x10; /* 256 elements */
225 239
226 240 hba->sli.sli4.param.RqeCntMethod = 0; /* Page Count */
227 241 hba->sli.sli4.param.RqPageSize = 0x1; /* 4096 */
228 242 hba->sli.sli4.param.RQV = 0;
229 243 hba->sli.sli4.param.RqeSize = 0x2; /* 8 byte */
230 244 hba->sli.sli4.param.RqPageCnt = 8;
231 245 hba->sli.sli4.param.RqDbWin = 1;
232 246 hba->sli.sli4.param.RqeCntMask = 0x100; /* 4096 elements */
233 247
234 248 hba->sli.sli4.param.Loopback = 0xf; /* unsupported */
235 249 hba->sli.sli4.param.PHWQ = 0;
236 250 hba->sli.sli4.param.PHON = 0;
237 251 hba->sli.sli4.param.TRIR = 0;
238 252 hba->sli.sli4.param.TRTY = 0;
239 253 hba->sli.sli4.param.TCCA = 0;
240 254 hba->sli.sli4.param.MWQE = 0;
241 255 hba->sli.sli4.param.ASSI = 0;
242 256 hba->sli.sli4.param.TERP = 0;
243 257 hba->sli.sli4.param.TGT = 0;
244 258 hba->sli.sli4.param.AREG = 0;
245 259 hba->sli.sli4.param.FBRR = 0;
246 260 hba->sli.sli4.param.SGLR = 1;
247 261 hba->sli.sli4.param.HDRR = 1;
248 262 hba->sli.sli4.param.EXT = 0;
249 263 hba->sli.sli4.param.FCOE = 1;
250 264
251 265 hba->sli.sli4.param.SgeLength = (64 * 1024);
252 266 hba->sli.sli4.param.SglAlign = 0x7 /* 4096 */;
253 267 hba->sli.sli4.param.SglPageSize = 0x1; /* 4096 */
254 268 hba->sli.sli4.param.SglPageCnt = 2;
255 269
256 270 hba->sli.sli4.param.MinRqSize = 128;
257 271 hba->sli.sli4.param.MaxRqSize = 2048;
258 272
259 273 hba->sli.sli4.param.RPIMax = 0x3ff;
260 274 hba->sli.sli4.param.XRIMax = 0x3ff;
261 275 hba->sli.sli4.param.VFIMax = 0xff;
262 276 hba->sli.sli4.param.VPIMax = 0xff;
263 277
264 278 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
265 279 "Default SLI4 parameters set.");
266 280
267 281 } /* emlxs_sli4_set_default_params() */
268 282
269 283
270 284 /*
271 285 * emlxs_sli4_online()
272 286 *
273 287 * This routine will start initialization of the SLI4 HBA.
274 288 */
275 289 static int32_t
276 290 emlxs_sli4_online(emlxs_hba_t *hba)
277 291 {
278 292 emlxs_port_t *port = &PPORT;
279 293 emlxs_config_t *cfg;
280 294 emlxs_vpd_t *vpd;
281 295 MAILBOXQ *mbq = NULL;
282 296 MAILBOX4 *mb = NULL;
283 297 MATCHMAP *mp = NULL;
284 298 uint32_t i;
285 299 uint32_t j;
286 300 uint32_t rval = 0;
287 301 uint8_t *vpd_data;
288 302 uint32_t sli_mode;
289 303 uint8_t *outptr;
290 304 uint32_t status;
291 305 uint32_t fw_check;
292 306 uint32_t kern_update = 0;
293 307 emlxs_firmware_t hba_fw;
294 308 emlxs_firmware_t *fw;
295 309 uint16_t ssvid;
296 310 char buf[64];
297 311
298 312 cfg = &CFG;
299 313 vpd = &VPD;
300 314
301 315 sli_mode = EMLXS_HBA_SLI4_MODE;
302 316 hba->sli_mode = sli_mode;
303 317
304 318 /* Set the fw_check flag */
305 319 fw_check = cfg[CFG_FW_CHECK].current;
306 320
307 321 if ((fw_check & 0x04) ||
308 322 (hba->fw_flag & FW_UPDATE_KERNEL)) {
309 323 kern_update = 1;
310 324 }
311 325
312 326 hba->mbox_queue_flag = 0;
313 327 hba->fc_edtov = FF_DEF_EDTOV;
314 328 hba->fc_ratov = FF_DEF_RATOV;
315 329 hba->fc_altov = FF_DEF_ALTOV;
316 330 hba->fc_arbtov = FF_DEF_ARBTOV;
317 331
318 332 /* Networking not supported */
319 333 if (cfg[CFG_NETWORK_ON].current) {
320 334 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
321 335 "Networking is not supported in SLI4, turning it off");
322 336 cfg[CFG_NETWORK_ON].current = 0;
323 337 }
324 338
|
↓ open down ↓ |
188 lines elided |
↑ open up ↑ |
325 339 hba->chan_count = hba->intr_count * cfg[CFG_NUM_WQ].current;
326 340 if (hba->chan_count > MAX_CHANNEL) {
327 341 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
328 342 "Max channels exceeded, dropping num-wq from %d to 1",
329 343 cfg[CFG_NUM_WQ].current);
330 344 cfg[CFG_NUM_WQ].current = 1;
331 345 hba->chan_count = hba->intr_count * cfg[CFG_NUM_WQ].current;
332 346 }
333 347 hba->channel_fcp = 0; /* First channel */
334 348
349 + /* Gen6 chips only support P2P topologies */
350 + if ((hba->model_info.flags & EMLXS_FC_GEN6) &&
351 + cfg[CFG_TOPOLOGY].current != 2) {
352 + EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
353 + "Loop topologies are not supported by this HBA. "
354 + "Forcing topology to P2P.");
355 + cfg[CFG_TOPOLOGY].current = 2;
356 + }
357 +
335 358 /* Default channel for everything else is the last channel */
336 359 hba->channel_ip = hba->chan_count - 1;
337 360 hba->channel_els = hba->chan_count - 1;
338 361 hba->channel_ct = hba->chan_count - 1;
339 362
340 363 hba->fc_iotag = 1;
341 364 hba->io_count = 0;
342 365 hba->channel_tx_count = 0;
343 366
367 + /* Specific to ATTO G5 boards */
368 + if (hba->model_info.flags & EMLXS_GPIO_LEDS) {
369 + /* Set hard-coded GPIO pins */
370 + if (hba->pci_function_number) {
371 + hba->gpio_pin[EMLXS_GPIO_PIN_LO] = 27;
372 + hba->gpio_pin[EMLXS_GPIO_PIN_HI] = 28;
373 + hba->gpio_pin[EMLXS_GPIO_PIN_ACT] = 29;
374 + hba->gpio_pin[EMLXS_GPIO_PIN_LASER] = 8;
375 + } else {
376 + hba->gpio_pin[EMLXS_GPIO_PIN_LO] = 13;
377 + hba->gpio_pin[EMLXS_GPIO_PIN_HI] = 25;
378 + hba->gpio_pin[EMLXS_GPIO_PIN_ACT] = 26;
379 + hba->gpio_pin[EMLXS_GPIO_PIN_LASER] = 12;
380 + }
381 + }
382 +
344 383 /* Initialize the local dump region buffer */
345 384 bzero(&hba->sli.sli4.dump_region, sizeof (MBUF_INFO));
346 385 hba->sli.sli4.dump_region.size = EMLXS_DUMP_REGION_SIZE;
347 386 hba->sli.sli4.dump_region.flags = FC_MBUF_DMA | FC_MBUF_SNGLSG
348 387 | FC_MBUF_DMA32;
349 388 hba->sli.sli4.dump_region.align = ddi_ptob(hba->dip, 1L);
350 389
351 390 (void) emlxs_mem_alloc(hba, &hba->sli.sli4.dump_region);
352 391
353 392 if (hba->sli.sli4.dump_region.virt == NULL) {
354 393 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
355 394 "Unable to allocate dump region buffer.");
356 395
357 396 return (ENOMEM);
358 397 }
359 398
360 399 /*
361 400 * Get a buffer which will be used repeatedly for mailbox commands
362 401 */
363 402 mbq = (MAILBOXQ *) kmem_zalloc((sizeof (MAILBOXQ)), KM_SLEEP);
364 403
365 404 mb = (MAILBOX4 *)mbq;
366 405
367 406 reset:
368 407 /* Reset & Initialize the adapter */
369 408 if (emlxs_sli4_hba_init(hba)) {
370 409 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
371 410 "Unable to init hba.");
372 411
373 412 rval = EIO;
374 413 goto failed1;
375 414 }
376 415
377 416 #ifdef FMA_SUPPORT
378 417 /* Access handle validation */
379 418 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
380 419 case SLI_INTF_IF_TYPE_2:
381 420 if ((emlxs_fm_check_acc_handle(hba,
382 421 hba->pci_acc_handle) != DDI_FM_OK) ||
383 422 (emlxs_fm_check_acc_handle(hba,
384 423 hba->sli.sli4.bar0_acc_handle) != DDI_FM_OK)) {
385 424 EMLXS_MSGF(EMLXS_CONTEXT,
386 425 &emlxs_invalid_access_handle_msg, NULL);
387 426
388 427 rval = EIO;
389 428 goto failed1;
390 429 }
391 430 break;
392 431
393 432 default :
394 433 if ((emlxs_fm_check_acc_handle(hba,
395 434 hba->pci_acc_handle) != DDI_FM_OK) ||
396 435 (emlxs_fm_check_acc_handle(hba,
397 436 hba->sli.sli4.bar1_acc_handle) != DDI_FM_OK) ||
398 437 (emlxs_fm_check_acc_handle(hba,
399 438 hba->sli.sli4.bar2_acc_handle) != DDI_FM_OK)) {
400 439 EMLXS_MSGF(EMLXS_CONTEXT,
401 440 &emlxs_invalid_access_handle_msg, NULL);
402 441
403 442 rval = EIO;
404 443 goto failed1;
405 444 }
406 445 break;
407 446 }
408 447 #endif /* FMA_SUPPORT */
409 448
410 449 /*
411 450 * Setup and issue mailbox READ REV command
412 451 */
413 452 vpd->opFwRev = 0;
414 453 vpd->postKernRev = 0;
415 454 vpd->sli1FwRev = 0;
416 455 vpd->sli2FwRev = 0;
417 456 vpd->sli3FwRev = 0;
418 457 vpd->sli4FwRev = 0;
419 458
420 459 vpd->postKernName[0] = 0;
421 460 vpd->opFwName[0] = 0;
422 461 vpd->sli1FwName[0] = 0;
423 462 vpd->sli2FwName[0] = 0;
424 463 vpd->sli3FwName[0] = 0;
425 464 vpd->sli4FwName[0] = 0;
426 465
427 466 vpd->opFwLabel[0] = 0;
428 467 vpd->sli1FwLabel[0] = 0;
429 468 vpd->sli2FwLabel[0] = 0;
430 469 vpd->sli3FwLabel[0] = 0;
431 470 vpd->sli4FwLabel[0] = 0;
432 471
433 472 EMLXS_STATE_CHANGE(hba, FC_INIT_REV);
434 473
435 474 emlxs_mb_get_sli4_params(hba, mbq);
436 475 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
437 476 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
438 477 "Unable to read parameters. Mailbox cmd=%x status=%x",
439 478 mb->mbxCommand, mb->mbxStatus);
440 479
441 480 /* Set param defaults */
442 481 emlxs_sli4_set_default_params(hba);
443 482
444 483 } else {
445 484 /* Save parameters */
446 485 bcopy((char *)&mb->un.varSLIConfig.payload,
447 486 (char *)&hba->sli.sli4.param, sizeof (sli_params_t));
448 487
449 488 emlxs_data_dump(port, "SLI_PARMS",
450 489 (uint32_t *)&hba->sli.sli4.param,
451 490 sizeof (sli_params_t), 0);
452 491 }
453 492
454 493 /* Reuse mbq from previous mbox */
455 494 bzero(mbq, sizeof (MAILBOXQ));
456 495
457 496 emlxs_mb_get_port_name(hba, mbq);
458 497 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
459 498 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
460 499 "Unable to get port names. Mailbox cmd=%x status=%x",
461 500 mb->mbxCommand, mb->mbxStatus);
462 501
463 502 bzero(hba->sli.sli4.port_name,
464 503 sizeof (hba->sli.sli4.port_name));
465 504 } else {
466 505 /* Save port names */
467 506 bcopy((char *)&mb->un.varSLIConfig.payload,
468 507 (char *)&hba->sli.sli4.port_name,
469 508 sizeof (hba->sli.sli4.port_name));
470 509 }
471 510
472 511 /* Reuse mbq from previous mbox */
473 512 bzero(mbq, sizeof (MAILBOXQ));
474 513
475 514 emlxs_mb_read_rev(hba, mbq, 0);
476 515 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
477 516 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
478 517 "Unable to read rev. Mailbox cmd=%x status=%x",
479 518 mb->mbxCommand, mb->mbxStatus);
480 519
481 520 rval = EIO;
482 521 goto failed1;
483 522
484 523 }
485 524
486 525 emlxs_data_dump(port, "RD_REV", (uint32_t *)mb, 18, 0);
487 526 if (mb->un.varRdRev4.sliLevel != 4) {
488 527 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
489 528 "Invalid read rev Version for SLI4: 0x%x",
490 529 mb->un.varRdRev4.sliLevel);
491 530
492 531 rval = EIO;
493 532 goto failed1;
494 533 }
495 534
496 535 switch (mb->un.varRdRev4.dcbxMode) {
497 536 case EMLXS_DCBX_MODE_CIN: /* Mapped to nonFIP mode */
498 537 hba->flag &= ~FC_FIP_SUPPORTED;
499 538 break;
500 539
501 540 case EMLXS_DCBX_MODE_CEE: /* Mapped to FIP mode */
502 541 hba->flag |= FC_FIP_SUPPORTED;
503 542 break;
504 543
505 544 default:
506 545 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
507 546 "Invalid read rev dcbx mode for SLI4: 0x%x",
508 547 mb->un.varRdRev4.dcbxMode);
509 548
510 549 rval = EIO;
511 550 goto failed1;
512 551 }
513 552
514 553 /* Set FC/FCoE mode */
515 554 if (mb->un.varRdRev4.FCoE) {
516 555 hba->sli.sli4.flag |= EMLXS_SLI4_FCOE_MODE;
517 556 } else {
518 557 hba->sli.sli4.flag &= ~EMLXS_SLI4_FCOE_MODE;
519 558 }
520 559
521 560 /* Save information as VPD data */
522 561 vpd->rBit = 1;
523 562
524 563 vpd->sli4FwRev = (mb->un.varRdRev4.ULPFwId);
525 564 bcopy((char *)mb->un.varRdRev4.ULPFwName, vpd->sli4FwName, 16);
526 565
527 566 vpd->opFwRev = (mb->un.varRdRev4.ULPFwId);
528 567 bcopy((char *)mb->un.varRdRev4.ULPFwName, vpd->opFwName, 16);
529 568
530 569 vpd->postKernRev = (mb->un.varRdRev4.ARMFwId);
531 570 bcopy((char *)mb->un.varRdRev4.ARMFwName, vpd->postKernName, 16);
532 571
533 572 vpd->biuRev = mb->un.varRdRev4.HwRev1;
534 573 vpd->fcphHigh = mb->un.varRdRev4.fcphHigh;
535 574 vpd->fcphLow = mb->un.varRdRev4.fcphLow;
536 575 vpd->feaLevelHigh = mb->un.varRdRev4.feaLevelHigh;
537 576 vpd->feaLevelLow = mb->un.varRdRev4.feaLevelLow;
538 577
539 578 /* Decode FW labels */
540 579 if (hba->model_info.chip == EMLXS_LANCER_CHIP) {
541 580 bcopy(vpd->postKernName, vpd->sli4FwName, 16);
542 581 }
543 582 emlxs_decode_label(vpd->sli4FwName, vpd->sli4FwName, 0,
544 583 sizeof (vpd->sli4FwName));
545 584 emlxs_decode_label(vpd->opFwName, vpd->opFwName, 0,
546 585 sizeof (vpd->opFwName));
547 586 emlxs_decode_label(vpd->postKernName, vpd->postKernName, 0,
548 587 sizeof (vpd->postKernName));
549 588
550 589 if (hba->model_info.chip == EMLXS_BE2_CHIP) {
551 590 (void) strlcpy(vpd->sli4FwLabel, "be2.ufi",
552 591 sizeof (vpd->sli4FwLabel));
553 592 } else if (hba->model_info.chip == EMLXS_BE3_CHIP) {
554 593 (void) strlcpy(vpd->sli4FwLabel, "be3.ufi",
555 594 sizeof (vpd->sli4FwLabel));
556 595 } else if (hba->model_info.chip == EMLXS_LANCER_CHIP) {
557 596 (void) strlcpy(vpd->sli4FwLabel, "xe201.grp",
558 597 sizeof (vpd->sli4FwLabel));
559 598 } else {
560 599 (void) strlcpy(vpd->sli4FwLabel, "sli4.fw",
561 600 sizeof (vpd->sli4FwLabel));
562 601 }
563 602
564 603 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
565 604 "VPD ULP:%08x %s ARM:%08x %s f:%d %d %d %d : dcbx %d",
566 605 vpd->opFwRev, vpd->opFwName, vpd->postKernRev, vpd->postKernName,
567 606 vpd->fcphHigh, vpd->fcphLow, vpd->feaLevelHigh, vpd->feaLevelLow,
568 607 mb->un.varRdRev4.dcbxMode);
569 608
570 609 /* No key information is needed for SLI4 products */
571 610
572 611 /* Get adapter VPD information */
573 612 vpd->port_index = (uint32_t)-1;
574 613
575 614 /* Reuse mbq from previous mbox */
576 615 bzero(mbq, sizeof (MAILBOXQ));
577 616
578 617 emlxs_mb_dump_vpd(hba, mbq, 0);
579 618 vpd_data = hba->sli.sli4.dump_region.virt;
580 619
581 620 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
582 621 MBX_SUCCESS) {
583 622 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
584 623 "No VPD found. status=%x", mb->mbxStatus);
585 624 } else {
586 625 EMLXS_MSGF(EMLXS_CONTEXT,
587 626 &emlxs_init_debug_msg,
588 627 "VPD dumped. rsp_cnt=%d status=%x",
589 628 mb->un.varDmp4.rsp_cnt, mb->mbxStatus);
590 629
591 630 if (mb->un.varDmp4.rsp_cnt) {
592 631 EMLXS_MPDATA_SYNC(hba->sli.sli4.dump_region.dma_handle,
593 632 0, mb->un.varDmp4.rsp_cnt, DDI_DMA_SYNC_FORKERNEL);
594 633
595 634 #ifdef FMA_SUPPORT
596 635 if (hba->sli.sli4.dump_region.dma_handle) {
597 636 if (emlxs_fm_check_dma_handle(hba,
598 637 hba->sli.sli4.dump_region.dma_handle)
599 638 != DDI_FM_OK) {
600 639 EMLXS_MSGF(EMLXS_CONTEXT,
601 640 &emlxs_invalid_dma_handle_msg,
602 641 "sli4_online: hdl=%p",
603 642 hba->sli.sli4.dump_region.
604 643 dma_handle);
605 644 rval = EIO;
606 645 goto failed1;
607 646 }
608 647 }
609 648 #endif /* FMA_SUPPORT */
610 649
611 650 }
612 651 }
613 652
614 653 if (vpd_data[0]) {
615 654 (void) emlxs_parse_vpd(hba, (uint8_t *)vpd_data,
616 655 mb->un.varDmp4.rsp_cnt);
617 656
618 657 /*
619 658 * If there is a VPD part number, and it does not
620 659 * match the current default HBA model info,
621 660 * replace the default data with an entry that
622 661 * does match.
623 662 *
624 663 * After emlxs_parse_vpd model holds the VPD value
625 664 * for V2 and part_num hold the value for PN. These
626 665 * 2 values are NOT necessarily the same.
627 666 */
628 667
629 668 rval = 0;
630 669 if ((vpd->model[0] != 0) &&
631 670 (strcmp(&vpd->model[0], hba->model_info.model) != 0)) {
632 671
633 672 /* First scan for a V2 match */
634 673
635 674 for (i = 1; i < emlxs_pci_model_count; i++) {
636 675 if (strcmp(&vpd->model[0],
637 676 emlxs_pci_model[i].model) == 0) {
638 677 bcopy(&emlxs_pci_model[i],
639 678 &hba->model_info,
640 679 sizeof (emlxs_model_t));
641 680 rval = 1;
642 681 break;
643 682 }
644 683 }
645 684 }
646 685
647 686 if (!rval && (vpd->part_num[0] != 0) &&
648 687 (strcmp(&vpd->part_num[0], hba->model_info.model) != 0)) {
649 688
650 689 /* Next scan for a PN match */
651 690
652 691 for (i = 1; i < emlxs_pci_model_count; i++) {
653 692 if (strcmp(&vpd->part_num[0],
654 693 emlxs_pci_model[i].model) == 0) {
655 694 bcopy(&emlxs_pci_model[i],
656 695 &hba->model_info,
657 696 sizeof (emlxs_model_t));
658 697 break;
659 698 }
660 699 }
661 700 }
662 701
663 702 /* HP CNA port indices start at 1 instead of 0 */
664 703 if (hba->model_info.chip & EMLXS_BE_CHIPS) {
665 704 ssvid = ddi_get16(hba->pci_acc_handle,
666 705 (uint16_t *)(hba->pci_addr + PCI_SSVID_REGISTER));
667 706
668 707 if ((ssvid == PCI_SSVID_HP) && (vpd->port_index > 0)) {
669 708 vpd->port_index--;
670 709 }
671 710 }
672 711
673 712 /*
674 713 * Now lets update hba->model_info with the real
675 714 * VPD data, if any.
676 715 */
677 716
678 717 /*
679 718 * Replace the default model description with vpd data
680 719 */
681 720 if (vpd->model_desc[0] != 0) {
682 721 (void) strncpy(hba->model_info.model_desc,
683 722 vpd->model_desc,
684 723 (sizeof (hba->model_info.model_desc)-1));
685 724 }
686 725
687 726 /* Replace the default model with vpd data */
688 727 if (vpd->model[0] != 0) {
689 728 (void) strncpy(hba->model_info.model, vpd->model,
690 729 (sizeof (hba->model_info.model)-1));
691 730 }
692 731
693 732 /* Replace the default program types with vpd data */
694 733 if (vpd->prog_types[0] != 0) {
695 734 emlxs_parse_prog_types(hba, vpd->prog_types);
696 735 }
697 736 }
698 737
699 738 /*
700 739 * Since the adapter model may have changed with the vpd data
701 740 * lets double check if adapter is not supported
702 741 */
703 742 if (hba->model_info.flags & EMLXS_NOT_SUPPORTED) {
704 743 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
705 744 "Unsupported adapter found. "
706 745 "Id:%d Device id:0x%x SSDID:0x%x Model:%s",
707 746 hba->model_info.id, hba->model_info.device_id,
708 747 hba->model_info.ssdid, hba->model_info.model);
709 748
710 749 rval = EIO;
711 750 goto failed1;
712 751 }
713 752
714 753 (void) strncpy(vpd->boot_version, vpd->sli4FwName,
715 754 (sizeof (vpd->boot_version)-1));
716 755
717 756 /* Get fcode version property */
718 757 emlxs_get_fcode_version(hba);
719 758
720 759 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
721 760 "Firmware: kern=%08x stub=%08x sli1=%08x", vpd->postKernRev,
722 761 vpd->opFwRev, vpd->sli1FwRev);
723 762
724 763 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
725 764 "Firmware: sli2=%08x sli3=%08x sli4=%08x fl=%x", vpd->sli2FwRev,
726 765 vpd->sli3FwRev, vpd->sli4FwRev, vpd->feaLevelHigh);
727 766
728 767 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
729 768 "BIOS: boot=%s fcode=%s", vpd->boot_version, vpd->fcode_version);
730 769
731 770 /*
732 771 * If firmware checking is enabled and the adapter model indicates
733 772 * a firmware image, then perform firmware version check
734 773 */
735 774 hba->fw_flag = 0;
736 775 hba->fw_timer = 0;
737 776
738 777 if (((fw_check & 0x1) &&
739 778 (hba->model_info.flags & EMLXS_ORACLE_BRANDED) &&
740 779 hba->model_info.fwid) ||
741 780 ((fw_check & 0x2) && hba->model_info.fwid)) {
742 781
743 782 /* Find firmware image indicated by adapter model */
744 783 fw = NULL;
745 784 for (i = 0; i < emlxs_fw_count; i++) {
746 785 if (emlxs_fw_table[i].id == hba->model_info.fwid) {
747 786 fw = &emlxs_fw_table[i];
748 787 break;
749 788 }
750 789 }
751 790
752 791 /*
753 792 * If the image was found, then verify current firmware
754 793 * versions of adapter
755 794 */
756 795 if (fw) {
757 796 /* Obtain current firmware version info */
758 797 if (hba->model_info.chip & EMLXS_BE_CHIPS) {
759 798 (void) emlxs_be_read_fw_version(hba, &hba_fw);
760 799 } else {
761 800 hba_fw.kern = vpd->postKernRev;
762 801 hba_fw.stub = vpd->opFwRev;
763 802 hba_fw.sli1 = vpd->sli1FwRev;
764 803 hba_fw.sli2 = vpd->sli2FwRev;
765 804 hba_fw.sli3 = vpd->sli3FwRev;
766 805 hba_fw.sli4 = vpd->sli4FwRev;
767 806 }
768 807
769 808 if (!kern_update &&
770 809 ((fw->kern && (hba_fw.kern != fw->kern)) ||
771 810 (fw->stub && (hba_fw.stub != fw->stub)))) {
772 811
773 812 hba->fw_flag |= FW_UPDATE_NEEDED;
774 813
775 814 } else if ((fw->kern && (hba_fw.kern != fw->kern)) ||
776 815 (fw->stub && (hba_fw.stub != fw->stub)) ||
777 816 (fw->sli1 && (hba_fw.sli1 != fw->sli1)) ||
778 817 (fw->sli2 && (hba_fw.sli2 != fw->sli2)) ||
779 818 (fw->sli3 && (hba_fw.sli3 != fw->sli3)) ||
780 819 (fw->sli4 && (hba_fw.sli4 != fw->sli4))) {
781 820
782 821 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
783 822 "Firmware update needed. "
784 823 "Updating. id=%d fw=%d",
785 824 hba->model_info.id, hba->model_info.fwid);
786 825
787 826 #ifdef MODFW_SUPPORT
788 827 /*
789 828 * Load the firmware image now
790 829 * If MODFW_SUPPORT is not defined, the
791 830 * firmware image will already be defined
792 831 * in the emlxs_fw_table
793 832 */
794 833 emlxs_fw_load(hba, fw);
795 834 #endif /* MODFW_SUPPORT */
796 835
797 836 if (fw->image && fw->size) {
798 837 uint32_t rc;
799 838
800 839 rc = emlxs_fw_download(hba,
801 840 (char *)fw->image, fw->size, 0);
802 841 if ((rc != FC_SUCCESS) &&
803 842 (rc != EMLXS_REBOOT_REQUIRED)) {
804 843 EMLXS_MSGF(EMLXS_CONTEXT,
805 844 &emlxs_init_msg,
806 845 "Firmware update failed.");
807 846 hba->fw_flag |=
808 847 FW_UPDATE_NEEDED;
809 848 }
810 849 #ifdef MODFW_SUPPORT
811 850 /*
812 851 * Unload the firmware image from
813 852 * kernel memory
814 853 */
815 854 emlxs_fw_unload(hba, fw);
816 855 #endif /* MODFW_SUPPORT */
817 856
818 857 fw_check = 0;
819 858
820 859 goto reset;
821 860 }
822 861
823 862 hba->fw_flag |= FW_UPDATE_NEEDED;
824 863
825 864 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
826 865 "Firmware image unavailable.");
827 866 } else {
828 867 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
829 868 "Firmware update not needed.");
830 869 }
831 870 } else {
832 871 /*
833 872 * This means either the adapter database is not
834 873 * correct or a firmware image is missing from the
835 874 * compile
836 875 */
837 876 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_msg,
838 877 "Firmware image unavailable. id=%d fw=%d",
839 878 hba->model_info.id, hba->model_info.fwid);
840 879 }
841 880 }
842 881
843 882 /* Reuse mbq from previous mbox */
844 883 bzero(mbq, sizeof (MAILBOXQ));
845 884
846 885 emlxs_mb_dump_fcoe(hba, mbq, 0);
847 886
848 887 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
849 888 MBX_SUCCESS) {
850 889 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
851 890 "No FCOE info found. status=%x", mb->mbxStatus);
852 891 } else {
853 892 EMLXS_MSGF(EMLXS_CONTEXT,
854 893 &emlxs_init_debug_msg,
855 894 "FCOE info dumped. rsp_cnt=%d status=%x",
856 895 mb->un.varDmp4.rsp_cnt, mb->mbxStatus);
857 896 (void) emlxs_parse_fcoe(hba,
858 897 (uint8_t *)hba->sli.sli4.dump_region.virt,
859 898 mb->un.varDmp4.rsp_cnt);
860 899 }
861 900
862 901 /* Reuse mbq from previous mbox */
863 902 bzero(mbq, sizeof (MAILBOXQ));
864 903
865 904 status = 0;
866 905 if (port->flag & EMLXS_INI_ENABLED) {
867 906 status |= SLI4_FEATURE_FCP_INITIATOR;
868 907 }
869 908 if (port->flag & EMLXS_TGT_ENABLED) {
870 909 status |= SLI4_FEATURE_FCP_TARGET;
871 910 }
872 911 if (cfg[CFG_NPIV_ENABLE].current) {
873 912 status |= SLI4_FEATURE_NPIV;
874 913 }
875 914 if (cfg[CFG_RQD_MODE].current) {
876 915 status |= SLI4_FEATURE_RQD;
877 916 }
878 917 if (cfg[CFG_PERF_HINT].current) {
879 918 if (hba->sli.sli4.param.PHON) {
880 919 status |= SLI4_FEATURE_PERF_HINT;
881 920 }
882 921 }
883 922
884 923 emlxs_mb_request_features(hba, mbq, status);
885 924
886 925 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
887 926 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
888 927 "Unable to REQUEST_FEATURES. Mailbox cmd=%x status=%x",
889 928 mb->mbxCommand, mb->mbxStatus);
890 929
891 930 rval = EIO;
892 931 goto failed1;
893 932 }
894 933 emlxs_data_dump(port, "REQ_FEATURE", (uint32_t *)mb, 6, 0);
895 934
896 935 /* Check to see if we get the features we requested */
897 936 if (status != mb->un.varReqFeatures.featuresEnabled) {
898 937
899 938 /* Just report descrepencies, don't abort the attach */
900 939
901 940 outptr = (uint8_t *)emlxs_request_feature_xlate(
902 941 mb->un.varReqFeatures.featuresRequested);
903 942 (void) strlcpy(buf, (char *)outptr, sizeof (buf));
904 943
905 944 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
906 945 "REQUEST_FEATURES: wanted:%s got:%s",
907 946 &buf[0], emlxs_request_feature_xlate(
908 947 mb->un.varReqFeatures.featuresEnabled));
909 948
910 949 }
911 950
912 951 if ((port->flag & EMLXS_INI_ENABLED) &&
913 952 !(mb->un.varReqFeatures.featuresEnabled &
914 953 SLI4_FEATURE_FCP_INITIATOR)) {
915 954 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
916 955 "Initiator mode not supported by adapter.");
917 956
918 957 rval = EIO;
919 958
920 959 #ifdef SFCT_SUPPORT
921 960 /* Check if we can fall back to just target mode */
922 961 if ((hba->pm_state == EMLXS_PM_IN_ATTACH) &&
923 962 (mb->un.varReqFeatures.featuresEnabled &
924 963 SLI4_FEATURE_FCP_TARGET) &&
925 964 (cfg[CFG_DTM_ENABLE].current == 1) &&
926 965 (cfg[CFG_TARGET_MODE].current == 1)) {
927 966
928 967 cfg[CFG_DTM_ENABLE].current = 0;
929 968
930 969 EMLXS_MSGF(EMLXS_CONTEXT,
931 970 &emlxs_init_failed_msg,
932 971 "Disabling dynamic target mode. "
933 972 "Enabling target mode only.");
934 973
935 974 /* This will trigger the driver to reattach */
936 975 rval = EAGAIN;
937 976 }
938 977 #endif /* SFCT_SUPPORT */
939 978 goto failed1;
940 979 }
941 980
942 981 if ((port->flag & EMLXS_TGT_ENABLED) &&
943 982 !(mb->un.varReqFeatures.featuresEnabled &
944 983 SLI4_FEATURE_FCP_TARGET)) {
945 984 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
946 985 "Target mode not supported by adapter.");
947 986
948 987 rval = EIO;
949 988
950 989 #ifdef SFCT_SUPPORT
951 990 /* Check if we can fall back to just initiator mode */
952 991 if ((hba->pm_state == EMLXS_PM_IN_ATTACH) &&
953 992 (mb->un.varReqFeatures.featuresEnabled &
954 993 SLI4_FEATURE_FCP_INITIATOR) &&
955 994 (cfg[CFG_DTM_ENABLE].current == 1) &&
956 995 (cfg[CFG_TARGET_MODE].current == 0)) {
957 996
958 997 cfg[CFG_DTM_ENABLE].current = 0;
959 998
960 999 EMLXS_MSGF(EMLXS_CONTEXT,
961 1000 &emlxs_init_failed_msg,
962 1001 "Disabling dynamic target mode. "
963 1002 "Enabling initiator mode only.");
964 1003
965 1004 /* This will trigger the driver to reattach */
966 1005 rval = EAGAIN;
967 1006 }
968 1007 #endif /* SFCT_SUPPORT */
969 1008 goto failed1;
970 1009 }
971 1010
972 1011 if (mb->un.varReqFeatures.featuresEnabled & SLI4_FEATURE_NPIV) {
973 1012 hba->flag |= FC_NPIV_ENABLED;
974 1013 }
975 1014
976 1015 if (mb->un.varReqFeatures.featuresEnabled & SLI4_FEATURE_PERF_HINT) {
977 1016 hba->sli.sli4.flag |= EMLXS_SLI4_PHON;
978 1017 if (hba->sli.sli4.param.PHWQ) {
979 1018 hba->sli.sli4.flag |= EMLXS_SLI4_PHWQ;
980 1019 }
981 1020 }
982 1021
983 1022 /* Reuse mbq from previous mbox */
984 1023 bzero(mbq, sizeof (MAILBOXQ));
985 1024
986 1025 emlxs_mb_read_config(hba, mbq);
987 1026 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
988 1027 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
989 1028 "Unable to READ_CONFIG. Mailbox cmd=%x status=%x",
990 1029 mb->mbxCommand, mb->mbxStatus);
991 1030
992 1031 rval = EIO;
993 1032 goto failed1;
994 1033 }
995 1034 emlxs_data_dump(port, "READ_CONFIG4", (uint32_t *)mb, 18, 0);
996 1035
997 1036 /* Set default extents */
998 1037 hba->sli.sli4.XRICount = mb->un.varRdConfig4.XRICount;
999 1038 hba->sli.sli4.XRIExtCount = 1;
1000 1039 hba->sli.sli4.XRIExtSize = hba->sli.sli4.XRICount;
1001 1040 hba->sli.sli4.XRIBase[0] = mb->un.varRdConfig4.XRIBase;
1002 1041
1003 1042 hba->sli.sli4.RPICount = mb->un.varRdConfig4.RPICount;
1004 1043 hba->sli.sli4.RPIExtCount = 1;
1005 1044 hba->sli.sli4.RPIExtSize = hba->sli.sli4.RPICount;
1006 1045 hba->sli.sli4.RPIBase[0] = mb->un.varRdConfig4.RPIBase;
1007 1046
1008 1047 hba->sli.sli4.VPICount = mb->un.varRdConfig4.VPICount;
1009 1048 hba->sli.sli4.VPIExtCount = 1;
1010 1049 hba->sli.sli4.VPIExtSize = hba->sli.sli4.VPICount;
1011 1050 hba->sli.sli4.VPIBase[0] = mb->un.varRdConfig4.VPIBase;
1012 1051
1013 1052 hba->sli.sli4.VFICount = mb->un.varRdConfig4.VFICount;
1014 1053 hba->sli.sli4.VFIExtCount = 1;
1015 1054 hba->sli.sli4.VFIExtSize = hba->sli.sli4.VFICount;
1016 1055 hba->sli.sli4.VFIBase[0] = mb->un.varRdConfig4.VFIBase;
1017 1056
1018 1057 hba->sli.sli4.FCFICount = mb->un.varRdConfig4.FCFICount;
1019 1058
1020 1059 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1021 1060 "CONFIG: xri:%d rpi:%d vpi:%d vfi:%d fcfi:%d",
1022 1061 hba->sli.sli4.XRICount,
1023 1062 hba->sli.sli4.RPICount,
1024 1063 hba->sli.sli4.VPICount,
1025 1064 hba->sli.sli4.VFICount,
1026 1065 hba->sli.sli4.FCFICount);
1027 1066
1028 1067 if ((hba->sli.sli4.XRICount == 0) ||
1029 1068 (hba->sli.sli4.RPICount == 0) ||
1030 1069 (hba->sli.sli4.VPICount == 0) ||
1031 1070 (hba->sli.sli4.VFICount == 0) ||
1032 1071 (hba->sli.sli4.FCFICount == 0)) {
1033 1072 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1034 1073 "Invalid extent value(s) - xri:%d rpi:%d vpi:%d "
1035 1074 "vfi:%d fcfi:%d",
1036 1075 hba->sli.sli4.XRICount,
1037 1076 hba->sli.sli4.RPICount,
1038 1077 hba->sli.sli4.VPICount,
1039 1078 hba->sli.sli4.VFICount,
1040 1079 hba->sli.sli4.FCFICount);
1041 1080
1042 1081 rval = EIO;
1043 1082 goto failed1;
1044 1083 }
1045 1084
1046 1085 if (mb->un.varRdConfig4.extents) {
1047 1086 if (emlxs_sli4_init_extents(hba, mbq)) {
1048 1087 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1049 1088 "Unable to initialize extents.");
1050 1089
1051 1090 rval = EIO;
1052 1091 goto failed1;
1053 1092 }
1054 1093 }
1055 1094
1056 1095 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1057 1096 "CONFIG: port_name:%c %c %c %c",
1058 1097 hba->sli.sli4.port_name[0],
1059 1098 hba->sli.sli4.port_name[1],
1060 1099 hba->sli.sli4.port_name[2],
1061 1100 hba->sli.sli4.port_name[3]);
1062 1101
1063 1102 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1064 1103 "CONFIG: ldv:%d link_type:%d link_number:%d",
1065 1104 mb->un.varRdConfig4.ldv,
1066 1105 mb->un.varRdConfig4.link_type,
1067 1106 mb->un.varRdConfig4.link_number);
1068 1107
1069 1108 if (mb->un.varRdConfig4.ldv) {
1070 1109 hba->sli.sli4.link_number = mb->un.varRdConfig4.link_number;
1071 1110 } else {
1072 1111 hba->sli.sli4.link_number = (uint32_t)-1;
1073 1112 }
1074 1113
1075 1114 if (hba->sli.sli4.VPICount) {
1076 1115 hba->vpi_max = min(hba->sli.sli4.VPICount, MAX_VPORTS) - 1;
1077 1116 }
1078 1117
1079 1118 /* Set the max node count */
1080 1119 if (cfg[CFG_NUM_NODES].current > 0) {
1081 1120 hba->max_nodes =
1082 1121 min(cfg[CFG_NUM_NODES].current,
1083 1122 hba->sli.sli4.RPICount);
1084 1123 } else {
1085 1124 hba->max_nodes = hba->sli.sli4.RPICount;
1086 1125 }
1087 1126
1088 1127 /* Set the io throttle */
1089 1128 hba->io_throttle = hba->sli.sli4.XRICount - IO_THROTTLE_RESERVE;
1090 1129
1091 1130 /* Set max_iotag */
1092 1131 /* We add 1 in case all XRI's are non-zero */
1093 1132 hba->max_iotag = hba->sli.sli4.XRICount + 1;
1094 1133
1095 1134 if (cfg[CFG_NUM_IOTAGS].current) {
1096 1135 hba->max_iotag = min(hba->max_iotag,
1097 1136 (uint16_t)cfg[CFG_NUM_IOTAGS].current);
1098 1137 }
1099 1138
1100 1139 /* Set out-of-range iotag base */
1101 1140 hba->fc_oor_iotag = hba->max_iotag;
1102 1141
1103 1142 /* Save the link speed capabilities */
1104 1143 vpd->link_speed = (uint16_t)mb->un.varRdConfig4.lmt;
1105 1144 emlxs_process_link_speed(hba);
1106 1145
1107 1146 /*
1108 1147 * Allocate some memory for buffers
1109 1148 */
1110 1149 if (emlxs_mem_alloc_buffer(hba) == 0) {
1111 1150 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1112 1151 "Unable to allocate memory buffers.");
1113 1152
1114 1153 rval = ENOMEM;
1115 1154 goto failed1;
1116 1155 }
1117 1156
1118 1157 if (emlxs_sli4_resource_alloc(hba)) {
1119 1158 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1120 1159 "Unable to allocate resources.");
1121 1160
1122 1161 rval = ENOMEM;
1123 1162 goto failed2;
1124 1163 }
1125 1164 emlxs_data_dump(port, "XRIp", (uint32_t *)hba->sli.sli4.XRIp, 18, 0);
1126 1165 emlxs_sli4_zero_queue_stat(hba);
1127 1166
1128 1167 #if (EMLXS_MODREV >= EMLXS_MODREV5)
1129 1168 if ((cfg[CFG_NPIV_ENABLE].current) && (hba->flag & FC_NPIV_ENABLED)) {
1130 1169 hba->fca_tran->fca_num_npivports = hba->vpi_max;
1131 1170 }
1132 1171 #endif /* >= EMLXS_MODREV5 */
1133 1172
1134 1173 /* Reuse mbq from previous mbox */
1135 1174 bzero(mbq, sizeof (MAILBOXQ));
1136 1175
1137 1176 if (emlxs_sli4_post_sgl_pages(hba, mbq)) {
1138 1177 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1139 1178 "Unable to post sgl pages.");
1140 1179
1141 1180 rval = EIO;
1142 1181 goto failed3;
1143 1182 }
1144 1183
1145 1184 /* Reuse mbq from previous mbox */
1146 1185 bzero(mbq, sizeof (MAILBOXQ));
1147 1186
1148 1187 if (emlxs_sli4_post_hdr_tmplates(hba, mbq)) {
1149 1188 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1150 1189 "Unable to post header templates.");
1151 1190
1152 1191 rval = EIO;
1153 1192 goto failed3;
1154 1193 }
1155 1194
1156 1195 /*
1157 1196 * Add our interrupt routine to kernel's interrupt chain & enable it
1158 1197 * If MSI is enabled this will cause Solaris to program the MSI address
1159 1198 * and data registers in PCI config space
1160 1199 */
1161 1200 if (EMLXS_INTR_ADD(hba) != DDI_SUCCESS) {
1162 1201 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1163 1202 "Unable to add interrupt(s).");
1164 1203
1165 1204 rval = EIO;
1166 1205 goto failed3;
1167 1206 }
1168 1207
1169 1208 /* Reuse mbq from previous mbox */
1170 1209 bzero(mbq, sizeof (MAILBOXQ));
1171 1210
1172 1211 /* This MUST be done after EMLXS_INTR_ADD */
1173 1212 if (emlxs_sli4_create_queues(hba, mbq)) {
1174 1213 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1175 1214 "Unable to create queues.");
1176 1215
1177 1216 rval = EIO;
1178 1217 goto failed3;
1179 1218 }
1180 1219
1181 1220 EMLXS_STATE_CHANGE(hba, FC_INIT_CFGPORT);
1182 1221
1183 1222 /* Get and save the current firmware version (based on sli_mode) */
1184 1223 emlxs_decode_firmware_rev(hba, vpd);
1185 1224
1186 1225
1187 1226 EMLXS_STATE_CHANGE(hba, FC_INIT_INITLINK);
1188 1227
1189 1228 if (SLI4_FC_MODE) {
1190 1229 /* Reuse mbq from previous mbox */
1191 1230 bzero(mbq, sizeof (MAILBOXQ));
1192 1231
1193 1232 emlxs_mb_config_link(hba, mbq);
1194 1233 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
1195 1234 MBX_SUCCESS) {
1196 1235 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1197 1236 "Unable to configure link. Mailbox cmd=%x "
1198 1237 "status=%x",
1199 1238 mb->mbxCommand, mb->mbxStatus);
1200 1239
1201 1240 rval = EIO;
1202 1241 goto failed3;
1203 1242 }
1204 1243 }
1205 1244
1206 1245 /* Reuse mbq from previous mbox */
1207 1246 bzero(mbq, sizeof (MAILBOXQ));
1208 1247
1209 1248 /*
1210 1249 * We need to get login parameters for NID
1211 1250 */
1212 1251 (void) emlxs_mb_read_sparam(hba, mbq);
1213 1252 mp = (MATCHMAP *)mbq->bp;
1214 1253 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
1215 1254 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1216 1255 "Unable to read parameters. Mailbox cmd=%x status=%x",
1217 1256 mb->mbxCommand, mb->mbxStatus);
1218 1257
1219 1258 rval = EIO;
1220 1259 goto failed3;
1221 1260 }
1222 1261
1223 1262 /* Free the buffer since we were polling */
1224 1263 emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1225 1264 mp = NULL;
1226 1265
1227 1266 /* If no serial number in VPD data, then use the WWPN */
1228 1267 if (vpd->serial_num[0] == 0) {
1229 1268 outptr = (uint8_t *)&hba->wwpn.IEEE[0];
1230 1269 for (i = 0; i < 12; i++) {
1231 1270 status = *outptr++;
1232 1271 j = ((status & 0xf0) >> 4);
1233 1272 if (j <= 9) {
1234 1273 vpd->serial_num[i] =
1235 1274 (char)((uint8_t)'0' + (uint8_t)j);
1236 1275 } else {
1237 1276 vpd->serial_num[i] =
1238 1277 (char)((uint8_t)'A' + (uint8_t)(j - 10));
1239 1278 }
1240 1279
1241 1280 i++;
1242 1281 j = (status & 0xf);
1243 1282 if (j <= 9) {
1244 1283 vpd->serial_num[i] =
1245 1284 (char)((uint8_t)'0' + (uint8_t)j);
1246 1285 } else {
1247 1286 vpd->serial_num[i] =
1248 1287 (char)((uint8_t)'A' + (uint8_t)(j - 10));
1249 1288 }
1250 1289 }
1251 1290
1252 1291 /*
1253 1292 * Set port number and port index to zero
1254 1293 * The WWN's are unique to each port and therefore port_num
1255 1294 * must equal zero. This effects the hba_fru_details structure
1256 1295 * in fca_bind_port()
1257 1296 */
1258 1297 vpd->port_num[0] = 0;
1259 1298 vpd->port_index = 0;
1260 1299
1261 1300 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1262 1301 "CONFIG: WWPN: port_index=0");
1263 1302 }
1264 1303
1265 1304 /* Make final attempt to set a port index */
1266 1305 if (vpd->port_index == (uint32_t)-1) {
1267 1306 dev_info_t *p_dip;
1268 1307 dev_info_t *c_dip;
1269 1308
1270 1309 p_dip = ddi_get_parent(hba->dip);
1271 1310 c_dip = ddi_get_child(p_dip);
1272 1311
1273 1312 vpd->port_index = 0;
1274 1313 while (c_dip && (hba->dip != c_dip)) {
1275 1314 c_dip = ddi_get_next_sibling(c_dip);
1276 1315
1277 1316 if (strcmp(ddi_get_name(c_dip), "ethernet") == 0) {
1278 1317 continue;
1279 1318 }
1280 1319
1281 1320 vpd->port_index++;
1282 1321 }
1283 1322
1284 1323 EMLXS_MSGF(EMLXS_CONTEXT,
1285 1324 &emlxs_init_debug_msg,
1286 1325 "CONFIG: Device tree: port_index=%d",
1287 1326 vpd->port_index);
1288 1327 }
1289 1328
1290 1329 if (vpd->port_num[0] == 0) {
1291 1330 if (hba->model_info.channels == EMLXS_MULTI_CHANNEL) {
1292 1331 (void) snprintf(vpd->port_num,
1293 1332 (sizeof (vpd->port_num)-1),
1294 1333 "%d", vpd->port_index);
1295 1334 }
1296 1335 }
1297 1336
1298 1337 if (vpd->id[0] == 0) {
1299 1338 (void) snprintf(vpd->id, (sizeof (vpd->id)-1),
1300 1339 "%s %d",
1301 1340 hba->model_info.model_desc, vpd->port_index);
1302 1341
1303 1342 }
1304 1343
1305 1344 if (vpd->manufacturer[0] == 0) {
1306 1345 (void) strncpy(vpd->manufacturer, hba->model_info.manufacturer,
1307 1346 (sizeof (vpd->manufacturer)-1));
1308 1347 }
1309 1348
1310 1349 if (vpd->part_num[0] == 0) {
1311 1350 (void) strncpy(vpd->part_num, hba->model_info.model,
1312 1351 (sizeof (vpd->part_num)-1));
1313 1352 }
1314 1353
1315 1354 if (vpd->model_desc[0] == 0) {
1316 1355 (void) snprintf(vpd->model_desc, (sizeof (vpd->model_desc)-1),
1317 1356 "%s %d",
1318 1357 hba->model_info.model_desc, vpd->port_index);
1319 1358 }
1320 1359
1321 1360 if (vpd->model[0] == 0) {
|
↓ open down ↓ |
968 lines elided |
↑ open up ↑ |
1322 1361 (void) strncpy(vpd->model, hba->model_info.model,
1323 1362 (sizeof (vpd->model)-1));
1324 1363 }
1325 1364
1326 1365 if (vpd->prog_types[0] == 0) {
1327 1366 emlxs_build_prog_types(hba, vpd);
1328 1367 }
1329 1368
1330 1369 /* Create the symbolic names */
1331 1370 (void) snprintf(hba->snn, (sizeof (hba->snn)-1),
1332 - "Emulex %s FV%s DV%s %s",
1333 - hba->model_info.model, hba->vpd.fw_version, emlxs_version,
1371 + "%s %s FV%s DV%s %s",
1372 + hba->model_info.manufacturer, hba->model_info.model,
1373 + hba->vpd.fw_version, emlxs_version,
1334 1374 (char *)utsname.nodename);
1335 1375
1336 1376 (void) snprintf(hba->spn, (sizeof (hba->spn)-1),
1337 - "Emulex PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1377 + "%s PPN-%01x%01x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1378 + hba->model_info.manufacturer,
1338 1379 hba->wwpn.nameType, hba->wwpn.IEEEextMsn, hba->wwpn.IEEEextLsb,
1339 1380 hba->wwpn.IEEE[0], hba->wwpn.IEEE[1], hba->wwpn.IEEE[2],
1340 1381 hba->wwpn.IEEE[3], hba->wwpn.IEEE[4], hba->wwpn.IEEE[5]);
1341 1382
1342 1383
1343 1384 EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN);
1344 1385 emlxs_sli4_enable_intr(hba);
1345 1386
1346 1387 /* Check persist-linkdown */
1347 1388 if (cfg[CFG_PERSIST_LINKDOWN].current) {
1348 1389 EMLXS_STATE_CHANGE(hba, FC_LINK_DOWN_PERSIST);
1349 1390 goto done;
1350 1391 }
1351 1392
1352 1393 #ifdef SFCT_SUPPORT
1353 1394 if ((port->mode == MODE_TARGET) &&
1354 1395 !(port->fct_flags & FCT_STATE_PORT_ONLINE)) {
1355 1396 goto done;
1356 1397 }
1357 1398 #endif /* SFCT_SUPPORT */
1358 1399
1359 1400 /* Reuse mbq from previous mbox */
1360 1401 bzero(mbq, sizeof (MAILBOXQ));
1361 1402
1362 1403 /*
1363 1404 * Setup and issue mailbox INITIALIZE LINK command
1364 1405 * At this point, the interrupt will be generated by the HW
1365 1406 */
1366 1407 emlxs_mb_init_link(hba, mbq,
1367 1408 cfg[CFG_TOPOLOGY].current, cfg[CFG_LINK_SPEED].current);
1368 1409
1369 1410 rval = emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
1370 1411 if ((rval != MBX_SUCCESS) && (rval != MBX_BUSY)) {
1371 1412 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
1372 1413 "Unable to initialize link. "
1373 1414 "Mailbox cmd=%x status=%x",
1374 1415 mb->mbxCommand, mb->mbxStatus);
1375 1416
1376 1417 rval = EIO;
1377 1418 goto failed3;
1378 1419 }
1379 1420
1380 1421 /* Wait for link to come up */
1381 1422 i = cfg[CFG_LINKUP_DELAY].current;
1382 1423 while (i && (hba->state < FC_LINK_UP)) {
1383 1424 /* Check for hardware error */
1384 1425 if (hba->state == FC_ERROR) {
1385 1426 EMLXS_MSGF(EMLXS_CONTEXT,
1386 1427 &emlxs_init_failed_msg,
1387 1428 "Adapter error.", mb->mbxCommand,
1388 1429 mb->mbxStatus);
1389 1430
1390 1431 rval = EIO;
1391 1432 goto failed3;
1392 1433 }
1393 1434
1394 1435 BUSYWAIT_MS(1000);
1395 1436 i--;
1396 1437 }
1397 1438
|
↓ open down ↓ |
50 lines elided |
↑ open up ↑ |
1398 1439 done:
1399 1440 /*
1400 1441 * The leadville driver will now handle the FLOGI at the driver level
1401 1442 */
1402 1443
1403 1444 if (mbq) {
1404 1445 (void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1405 1446 mbq = NULL;
1406 1447 mb = NULL;
1407 1448 }
1449 +
1450 + if (hba->model_info.flags & EMLXS_GPIO_LEDS)
1451 + emlxs_sli4_gpio_timer_start(hba);
1452 +
1408 1453 return (0);
1409 1454
1410 1455 failed3:
1411 1456 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1412 1457
1413 1458 if (mp) {
1414 1459 emlxs_mem_put(hba, MEM_BUF, (void *)mp);
1415 1460 mp = NULL;
1416 1461 }
1417 1462
1418 1463
1419 1464 if (hba->intr_flags & EMLXS_MSI_ADDED) {
1420 1465 (void) EMLXS_INTR_REMOVE(hba);
1421 1466 }
1422 1467
1423 1468 emlxs_sli4_resource_free(hba);
1424 1469
1425 1470 failed2:
1426 1471 (void) emlxs_mem_free_buffer(hba);
1427 1472
1428 1473 failed1:
1429 1474 if (mbq) {
1430 1475 (void) kmem_free((uint8_t *)mbq, sizeof (MAILBOXQ));
1431 1476 mbq = NULL;
1432 1477 mb = NULL;
1433 1478 }
1434 1479
1435 1480 if (hba->sli.sli4.dump_region.virt) {
1436 1481 (void) emlxs_mem_free(hba, &hba->sli.sli4.dump_region);
1437 1482 }
1438 1483
1439 1484 if (rval == 0) {
1440 1485 rval = EIO;
1441 1486 }
1442 1487
|
↓ open down ↓ |
25 lines elided |
↑ open up ↑ |
1443 1488 return (rval);
1444 1489
1445 1490 } /* emlxs_sli4_online() */
1446 1491
1447 1492
1448 1493 static void
1449 1494 emlxs_sli4_offline(emlxs_hba_t *hba, uint32_t reset_requested)
1450 1495 {
1451 1496 /* Reverse emlxs_sli4_online */
1452 1497
1498 + if (hba->model_info.flags & EMLXS_GPIO_LEDS)
1499 + emlxs_sli4_gpio_timer_stop(hba);
1500 +
1453 1501 mutex_enter(&EMLXS_PORT_LOCK);
1454 1502 if (hba->flag & FC_INTERLOCKED) {
1455 1503 mutex_exit(&EMLXS_PORT_LOCK);
1456 1504 goto killed;
1457 1505 }
1458 1506 mutex_exit(&EMLXS_PORT_LOCK);
1459 1507
1460 1508 if (reset_requested) {
1461 1509 (void) emlxs_sli4_hba_reset(hba, 0, 0, 0);
1462 1510 }
1463 1511
1464 1512 /* Shutdown the adapter interface */
1465 1513 emlxs_sli4_hba_kill(hba);
1466 1514
1467 1515 killed:
1468 1516
1469 1517 /* Free SLI shared memory */
1470 1518 emlxs_sli4_resource_free(hba);
1471 1519
1472 1520 /* Free driver shared memory */
1473 1521 (void) emlxs_mem_free_buffer(hba);
1474 1522
1475 1523 /* Free the host dump region buffer */
1476 1524 (void) emlxs_mem_free(hba, &hba->sli.sli4.dump_region);
1477 1525
1478 1526 } /* emlxs_sli4_offline() */
1479 1527
1480 1528
1481 1529 /*ARGSUSED*/
1482 1530 static int
1483 1531 emlxs_sli4_map_hdw(emlxs_hba_t *hba)
1484 1532 {
1485 1533 emlxs_port_t *port = &PPORT;
1486 1534 dev_info_t *dip;
1487 1535 ddi_device_acc_attr_t dev_attr;
1488 1536 int status;
1489 1537
1490 1538 dip = (dev_info_t *)hba->dip;
1491 1539 dev_attr = emlxs_dev_acc_attr;
1492 1540
1493 1541 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1494 1542 case SLI_INTF_IF_TYPE_0:
1495 1543
1496 1544 /* Map in Hardware BAR pages that will be used for */
1497 1545 /* communication with HBA. */
1498 1546 if (hba->sli.sli4.bar1_acc_handle == 0) {
1499 1547 status = ddi_regs_map_setup(dip, PCI_BAR1_RINDEX,
1500 1548 (caddr_t *)&hba->sli.sli4.bar1_addr,
1501 1549 0, 0, &dev_attr, &hba->sli.sli4.bar1_acc_handle);
1502 1550 if (status != DDI_SUCCESS) {
1503 1551 EMLXS_MSGF(EMLXS_CONTEXT,
1504 1552 &emlxs_attach_failed_msg,
1505 1553 "(PCI) ddi_regs_map_setup BAR1 failed. "
1506 1554 "stat=%d mem=%p attr=%p hdl=%p",
1507 1555 status, &hba->sli.sli4.bar1_addr, &dev_attr,
1508 1556 &hba->sli.sli4.bar1_acc_handle);
1509 1557 goto failed;
1510 1558 }
1511 1559 }
1512 1560
1513 1561 if (hba->sli.sli4.bar2_acc_handle == 0) {
1514 1562 status = ddi_regs_map_setup(dip, PCI_BAR2_RINDEX,
1515 1563 (caddr_t *)&hba->sli.sli4.bar2_addr,
1516 1564 0, 0, &dev_attr, &hba->sli.sli4.bar2_acc_handle);
1517 1565 if (status != DDI_SUCCESS) {
1518 1566 EMLXS_MSGF(EMLXS_CONTEXT,
1519 1567 &emlxs_attach_failed_msg,
1520 1568 "ddi_regs_map_setup BAR2 failed. status=%x",
1521 1569 status);
1522 1570 goto failed;
1523 1571 }
1524 1572 }
1525 1573
1526 1574 /* offset from beginning of register space */
1527 1575 hba->sli.sli4.MPUEPSemaphore_reg_addr =
1528 1576 (uint32_t *)(hba->sli.sli4.bar1_addr +
1529 1577 CSR_MPU_EP_SEMAPHORE_OFFSET);
1530 1578 hba->sli.sli4.MBDB_reg_addr =
1531 1579 (uint32_t *)(hba->sli.sli4.bar2_addr + PD_MB_DB_OFFSET);
1532 1580 hba->sli.sli4.CQDB_reg_addr =
1533 1581 (uint32_t *)(hba->sli.sli4.bar2_addr + PD_CQ_DB_OFFSET);
1534 1582 hba->sli.sli4.MQDB_reg_addr =
1535 1583 (uint32_t *)(hba->sli.sli4.bar2_addr + PD_MQ_DB_OFFSET);
1536 1584 hba->sli.sli4.WQDB_reg_addr =
1537 1585 (uint32_t *)(hba->sli.sli4.bar2_addr + PD_WQ_DB_OFFSET);
1538 1586 hba->sli.sli4.RQDB_reg_addr =
1539 1587 (uint32_t *)(hba->sli.sli4.bar2_addr + PD_RQ_DB_OFFSET);
1540 1588
1541 1589 hba->sli.sli4.STATUS_reg_addr = 0;
1542 1590 hba->sli.sli4.CNTL_reg_addr = 0;
1543 1591
1544 1592 hba->sli.sli4.ERR1_reg_addr =
1545 1593 (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_LO_OFFSET);
1546 1594 hba->sli.sli4.ERR2_reg_addr =
1547 1595 (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_HI_OFFSET);
1548 1596
1549 1597 hba->sli.sli4.PHYSDEV_reg_addr = 0;
1550 1598 break;
1551 1599
1552 1600 case SLI_INTF_IF_TYPE_2:
1553 1601
1554 1602 /* Map in Hardware BAR pages that will be used for */
1555 1603 /* communication with HBA. */
1556 1604 if (hba->sli.sli4.bar0_acc_handle == 0) {
1557 1605 status = ddi_regs_map_setup(dip, PCI_BAR0_RINDEX,
1558 1606 (caddr_t *)&hba->sli.sli4.bar0_addr,
1559 1607 0, 0, &dev_attr, &hba->sli.sli4.bar0_acc_handle);
1560 1608 if (status != DDI_SUCCESS) {
1561 1609 EMLXS_MSGF(EMLXS_CONTEXT,
1562 1610 &emlxs_attach_failed_msg,
1563 1611 "(PCI) ddi_regs_map_setup BAR0 failed. "
1564 1612 "stat=%d mem=%p attr=%p hdl=%p",
1565 1613 status, &hba->sli.sli4.bar0_addr, &dev_attr,
1566 1614 &hba->sli.sli4.bar0_acc_handle);
1567 1615 goto failed;
1568 1616 }
1569 1617 }
1570 1618
1571 1619 /* offset from beginning of register space */
1572 1620 hba->sli.sli4.MPUEPSemaphore_reg_addr =
1573 1621 (uint32_t *)(hba->sli.sli4.bar0_addr +
1574 1622 SLIPORT_SEMAPHORE_OFFSET);
1575 1623 hba->sli.sli4.MBDB_reg_addr =
1576 1624 (uint32_t *)(hba->sli.sli4.bar0_addr + PD_MB_DB_OFFSET);
1577 1625 hba->sli.sli4.CQDB_reg_addr =
1578 1626 (uint32_t *)(hba->sli.sli4.bar0_addr + PD_CQ_DB_OFFSET);
1579 1627 hba->sli.sli4.MQDB_reg_addr =
1580 1628 (uint32_t *)(hba->sli.sli4.bar0_addr + PD_MQ_DB_OFFSET);
1581 1629 hba->sli.sli4.WQDB_reg_addr =
1582 1630 (uint32_t *)(hba->sli.sli4.bar0_addr + PD_WQ_DB_OFFSET);
1583 1631 hba->sli.sli4.RQDB_reg_addr =
1584 1632 (uint32_t *)(hba->sli.sli4.bar0_addr + PD_RQ_DB_OFFSET);
1585 1633
1586 1634 hba->sli.sli4.STATUS_reg_addr =
1587 1635 (uint32_t *)(hba->sli.sli4.bar0_addr +
1588 1636 SLIPORT_STATUS_OFFSET);
1589 1637 hba->sli.sli4.CNTL_reg_addr =
1590 1638 (uint32_t *)(hba->sli.sli4.bar0_addr +
1591 1639 SLIPORT_CONTROL_OFFSET);
1592 1640 hba->sli.sli4.ERR1_reg_addr =
1593 1641 (uint32_t *)(hba->sli.sli4.bar0_addr +
1594 1642 SLIPORT_ERROR1_OFFSET);
1595 1643 hba->sli.sli4.ERR2_reg_addr =
1596 1644 (uint32_t *)(hba->sli.sli4.bar0_addr +
1597 1645 SLIPORT_ERROR2_OFFSET);
1598 1646 hba->sli.sli4.PHYSDEV_reg_addr =
1599 1647 (uint32_t *)(hba->sli.sli4.bar0_addr +
1600 1648 PHYSDEV_CONTROL_OFFSET);
1601 1649
1602 1650 break;
1603 1651
1604 1652 case SLI_INTF_IF_TYPE_1:
1605 1653 case SLI_INTF_IF_TYPE_3:
1606 1654 default:
1607 1655 EMLXS_MSGF(EMLXS_CONTEXT,
1608 1656 &emlxs_attach_failed_msg,
1609 1657 "Map hdw: Unsupported if_type %08x",
1610 1658 (hba->sli_intf & SLI_INTF_IF_TYPE_MASK));
1611 1659
1612 1660 goto failed;
1613 1661 }
1614 1662
1615 1663 if (hba->sli.sli4.bootstrapmb.virt == 0) {
1616 1664 MBUF_INFO *buf_info;
1617 1665 MBUF_INFO bufinfo;
1618 1666
1619 1667 buf_info = &bufinfo;
1620 1668
1621 1669 bzero(buf_info, sizeof (MBUF_INFO));
1622 1670 buf_info->size = EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE;
1623 1671 buf_info->flags =
1624 1672 FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
1625 1673 buf_info->align = ddi_ptob(dip, 1L);
1626 1674
1627 1675 (void) emlxs_mem_alloc(hba, buf_info);
1628 1676
1629 1677 if (buf_info->virt == NULL) {
1630 1678 goto failed;
1631 1679 }
1632 1680
1633 1681 hba->sli.sli4.bootstrapmb.virt = buf_info->virt;
1634 1682 hba->sli.sli4.bootstrapmb.phys = buf_info->phys;
1635 1683 hba->sli.sli4.bootstrapmb.size = EMLXS_BOOTSTRAP_MB_SIZE +
1636 1684 MBOX_EXTENSION_SIZE;
1637 1685 hba->sli.sli4.bootstrapmb.data_handle = buf_info->data_handle;
1638 1686 hba->sli.sli4.bootstrapmb.dma_handle = buf_info->dma_handle;
1639 1687 bzero((char *)hba->sli.sli4.bootstrapmb.virt,
1640 1688 EMLXS_BOOTSTRAP_MB_SIZE);
1641 1689 }
1642 1690
1643 1691 hba->chan_count = MAX_CHANNEL;
1644 1692
1645 1693 return (0);
1646 1694
1647 1695 failed:
1648 1696
1649 1697 emlxs_sli4_unmap_hdw(hba);
1650 1698 return (ENOMEM);
1651 1699
1652 1700
1653 1701 } /* emlxs_sli4_map_hdw() */
1654 1702
1655 1703
1656 1704 /*ARGSUSED*/
1657 1705 static void
1658 1706 emlxs_sli4_unmap_hdw(emlxs_hba_t *hba)
1659 1707 {
1660 1708 MBUF_INFO bufinfo;
1661 1709 MBUF_INFO *buf_info = &bufinfo;
1662 1710
1663 1711
1664 1712 if (hba->sli.sli4.bar0_acc_handle) {
1665 1713 ddi_regs_map_free(&hba->sli.sli4.bar0_acc_handle);
1666 1714 hba->sli.sli4.bar0_acc_handle = 0;
1667 1715 }
1668 1716
1669 1717 if (hba->sli.sli4.bar1_acc_handle) {
1670 1718 ddi_regs_map_free(&hba->sli.sli4.bar1_acc_handle);
1671 1719 hba->sli.sli4.bar1_acc_handle = 0;
1672 1720 }
1673 1721
1674 1722 if (hba->sli.sli4.bar2_acc_handle) {
1675 1723 ddi_regs_map_free(&hba->sli.sli4.bar2_acc_handle);
1676 1724 hba->sli.sli4.bar2_acc_handle = 0;
1677 1725 }
1678 1726
1679 1727 if (hba->sli.sli4.bootstrapmb.virt) {
1680 1728 bzero(buf_info, sizeof (MBUF_INFO));
1681 1729
1682 1730 if (hba->sli.sli4.bootstrapmb.phys) {
1683 1731 buf_info->phys = hba->sli.sli4.bootstrapmb.phys;
1684 1732 buf_info->data_handle =
1685 1733 hba->sli.sli4.bootstrapmb.data_handle;
1686 1734 buf_info->dma_handle =
1687 1735 hba->sli.sli4.bootstrapmb.dma_handle;
1688 1736 buf_info->flags = FC_MBUF_DMA;
1689 1737 }
1690 1738
1691 1739 buf_info->virt = hba->sli.sli4.bootstrapmb.virt;
1692 1740 buf_info->size = hba->sli.sli4.bootstrapmb.size;
1693 1741 emlxs_mem_free(hba, buf_info);
1694 1742
1695 1743 hba->sli.sli4.bootstrapmb.virt = NULL;
1696 1744 }
1697 1745
1698 1746 return;
1699 1747
1700 1748 } /* emlxs_sli4_unmap_hdw() */
1701 1749
1702 1750
1703 1751 static int
1704 1752 emlxs_check_hdw_ready(emlxs_hba_t *hba)
1705 1753 {
1706 1754 emlxs_port_t *port = &PPORT;
1707 1755 uint32_t status;
1708 1756 uint32_t i = 0;
1709 1757 uint32_t err1;
1710 1758 uint32_t err2;
1711 1759
1712 1760 /* Wait for reset completion */
1713 1761 while (i < 30) {
1714 1762
1715 1763 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1716 1764 case SLI_INTF_IF_TYPE_0:
1717 1765 status = emlxs_sli4_read_sema(hba);
1718 1766
1719 1767 /* Check to see if any errors occurred during init */
1720 1768 if (status & ARM_POST_FATAL) {
1721 1769 EMLXS_MSGF(EMLXS_CONTEXT,
1722 1770 &emlxs_reset_failed_msg,
1723 1771 "SEMA Error: status=%x", status);
1724 1772
1725 1773 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1726 1774
1727 1775 return (1);
1728 1776 }
1729 1777
1730 1778 if ((status & ARM_UNRECOVERABLE_ERROR) ==
1731 1779 ARM_UNRECOVERABLE_ERROR) {
1732 1780 EMLXS_MSGF(EMLXS_CONTEXT,
1733 1781 &emlxs_reset_failed_msg,
1734 1782 "Unrecoverable Error: status=%x", status);
1735 1783
1736 1784 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1737 1785
1738 1786 return (1);
1739 1787 }
1740 1788
1741 1789 if ((status & ARM_POST_MASK) == ARM_POST_READY) {
1742 1790 /* ARM Ready !! */
1743 1791 EMLXS_MSGF(EMLXS_CONTEXT,
1744 1792 &emlxs_sli_detail_msg,
1745 1793 "ARM Ready: status=%x", status);
1746 1794
1747 1795 return (0);
1748 1796 }
1749 1797 break;
1750 1798
1751 1799 case SLI_INTF_IF_TYPE_2:
1752 1800 status = emlxs_sli4_read_status(hba);
1753 1801
1754 1802 if (status & SLI_STATUS_READY) {
1755 1803 if (!(status & SLI_STATUS_ERROR)) {
1756 1804 /* ARM Ready !! */
1757 1805 EMLXS_MSGF(EMLXS_CONTEXT,
1758 1806 &emlxs_sli_detail_msg,
1759 1807 "ARM Ready: status=%x", status);
1760 1808
1761 1809 return (0);
1762 1810 }
1763 1811
1764 1812 err1 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1765 1813 hba->sli.sli4.ERR1_reg_addr);
1766 1814 err2 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1767 1815 hba->sli.sli4.ERR2_reg_addr);
1768 1816
1769 1817 if (status & SLI_STATUS_RESET_NEEDED) {
1770 1818 EMLXS_MSGF(EMLXS_CONTEXT,
1771 1819 &emlxs_sli_detail_msg,
1772 1820 "ARM Ready (Reset Needed): "
1773 1821 "status=%x err1=%x "
1774 1822 "err2=%x",
1775 1823 status, err1, err2);
1776 1824
1777 1825 return (1);
1778 1826 }
1779 1827
1780 1828 EMLXS_MSGF(EMLXS_CONTEXT,
1781 1829 &emlxs_reset_failed_msg,
1782 1830 "Unrecoverable Error: status=%x err1=%x "
1783 1831 "err2=%x",
1784 1832 status, err1, err2);
1785 1833
1786 1834 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1787 1835
1788 1836 return (2);
1789 1837 }
1790 1838
1791 1839 break;
1792 1840
1793 1841 default:
1794 1842 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1795 1843
1796 1844 return (3);
1797 1845 }
1798 1846
1799 1847 BUSYWAIT_MS(1000);
1800 1848 i++;
1801 1849 }
1802 1850
1803 1851 /* Timeout occurred */
1804 1852 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1805 1853 case SLI_INTF_IF_TYPE_0:
1806 1854 err1 = ddi_get32(hba->pci_acc_handle,
1807 1855 hba->sli.sli4.ERR1_reg_addr);
1808 1856 err2 = ddi_get32(hba->pci_acc_handle,
1809 1857 hba->sli.sli4.ERR2_reg_addr);
1810 1858 break;
1811 1859
1812 1860 default:
1813 1861 err1 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1814 1862 hba->sli.sli4.ERR1_reg_addr);
1815 1863 err2 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1816 1864 hba->sli.sli4.ERR2_reg_addr);
1817 1865 break;
1818 1866 }
1819 1867
1820 1868 if (status & SLI_STATUS_ERROR) {
1821 1869 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1822 1870 "Ready Timeout: Port Error: status=%x err1=%x err2=%x",
1823 1871 status, err1, err2);
1824 1872 } else {
1825 1873 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
1826 1874 "Ready Timeout: status=%x err1=%x err2=%x",
1827 1875 status, err1, err2);
1828 1876 }
1829 1877
1830 1878 EMLXS_STATE_CHANGE(hba, FC_ERROR);
1831 1879
1832 1880 return (3);
1833 1881
1834 1882 } /* emlxs_check_hdw_ready() */
1835 1883
1836 1884
1837 1885 static uint32_t
1838 1886 emlxs_sli4_read_status(emlxs_hba_t *hba)
1839 1887 {
1840 1888 #ifdef FMA_SUPPORT
1841 1889 emlxs_port_t *port = &PPORT;
1842 1890 #endif /* FMA_SUPPORT */
1843 1891 uint32_t status;
1844 1892
1845 1893 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1846 1894 case SLI_INTF_IF_TYPE_2:
1847 1895 status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1848 1896 hba->sli.sli4.STATUS_reg_addr);
1849 1897 #ifdef FMA_SUPPORT
1850 1898 /* Access handle validation */
1851 1899 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar0_acc_handle);
1852 1900 #endif /* FMA_SUPPORT */
1853 1901 break;
1854 1902 default:
1855 1903 status = 0;
1856 1904 break;
1857 1905 }
1858 1906
1859 1907 return (status);
1860 1908
1861 1909 } /* emlxs_sli4_read_status() */
1862 1910
1863 1911
1864 1912 static uint32_t
1865 1913 emlxs_sli4_read_sema(emlxs_hba_t *hba)
1866 1914 {
1867 1915 #ifdef FMA_SUPPORT
1868 1916 emlxs_port_t *port = &PPORT;
1869 1917 #endif /* FMA_SUPPORT */
1870 1918 uint32_t status;
1871 1919
1872 1920 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1873 1921 case SLI_INTF_IF_TYPE_0:
1874 1922 status = ddi_get32(hba->sli.sli4.bar1_acc_handle,
1875 1923 hba->sli.sli4.MPUEPSemaphore_reg_addr);
1876 1924 #ifdef FMA_SUPPORT
1877 1925 /* Access handle validation */
1878 1926 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar1_acc_handle);
1879 1927 #endif /* FMA_SUPPORT */
1880 1928 break;
1881 1929
1882 1930 case SLI_INTF_IF_TYPE_2:
1883 1931 status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1884 1932 hba->sli.sli4.MPUEPSemaphore_reg_addr);
1885 1933 #ifdef FMA_SUPPORT
1886 1934 /* Access handle validation */
1887 1935 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar0_acc_handle);
1888 1936 #endif /* FMA_SUPPORT */
1889 1937 break;
1890 1938 default:
1891 1939 status = 0;
1892 1940 break;
1893 1941 }
1894 1942
1895 1943 return (status);
1896 1944
1897 1945 } /* emlxs_sli4_read_sema() */
1898 1946
1899 1947
1900 1948 static uint32_t
1901 1949 emlxs_sli4_read_mbdb(emlxs_hba_t *hba)
1902 1950 {
1903 1951 #ifdef FMA_SUPPORT
1904 1952 emlxs_port_t *port = &PPORT;
1905 1953 #endif /* FMA_SUPPORT */
1906 1954 uint32_t status;
1907 1955
1908 1956 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1909 1957 case SLI_INTF_IF_TYPE_0:
1910 1958 status = ddi_get32(hba->sli.sli4.bar2_acc_handle,
1911 1959 hba->sli.sli4.MBDB_reg_addr);
1912 1960
1913 1961 #ifdef FMA_SUPPORT
1914 1962 /* Access handle validation */
1915 1963 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar2_acc_handle);
1916 1964 #endif /* FMA_SUPPORT */
1917 1965 break;
1918 1966
1919 1967 case SLI_INTF_IF_TYPE_2:
1920 1968 status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
1921 1969 hba->sli.sli4.MBDB_reg_addr);
1922 1970 #ifdef FMA_SUPPORT
1923 1971 /* Access handle validation */
1924 1972 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli4.bar0_acc_handle);
1925 1973 #endif /* FMA_SUPPORT */
1926 1974 break;
1927 1975 default:
1928 1976 status = 0;
1929 1977 break;
1930 1978 }
1931 1979
1932 1980 return (status);
1933 1981
1934 1982 } /* emlxs_sli4_read_mbdb() */
1935 1983
1936 1984
1937 1985 static void
1938 1986 emlxs_sli4_write_mbdb(emlxs_hba_t *hba, uint32_t value)
1939 1987 {
1940 1988 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1941 1989 case SLI_INTF_IF_TYPE_0:
1942 1990 ddi_put32(hba->sli.sli4.bar2_acc_handle,
1943 1991 hba->sli.sli4.MBDB_reg_addr, value);
1944 1992 break;
1945 1993
1946 1994 case SLI_INTF_IF_TYPE_2:
1947 1995 ddi_put32(hba->sli.sli4.bar0_acc_handle,
1948 1996 hba->sli.sli4.MBDB_reg_addr, value);
1949 1997 break;
1950 1998 }
1951 1999
1952 2000 } /* emlxs_sli4_write_mbdb() */
1953 2001
1954 2002
1955 2003 static void
1956 2004 emlxs_sli4_write_cqdb(emlxs_hba_t *hba, uint32_t value)
1957 2005 {
1958 2006 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1959 2007 case SLI_INTF_IF_TYPE_0:
1960 2008 ddi_put32(hba->sli.sli4.bar2_acc_handle,
1961 2009 hba->sli.sli4.CQDB_reg_addr, value);
1962 2010 break;
1963 2011
1964 2012 case SLI_INTF_IF_TYPE_2:
1965 2013 ddi_put32(hba->sli.sli4.bar0_acc_handle,
1966 2014 hba->sli.sli4.CQDB_reg_addr, value);
1967 2015 break;
1968 2016 }
1969 2017
1970 2018 } /* emlxs_sli4_write_cqdb() */
1971 2019
1972 2020
1973 2021 static void
1974 2022 emlxs_sli4_write_rqdb(emlxs_hba_t *hba, uint32_t value)
1975 2023 {
1976 2024 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1977 2025 case SLI_INTF_IF_TYPE_0:
1978 2026 ddi_put32(hba->sli.sli4.bar2_acc_handle,
1979 2027 hba->sli.sli4.RQDB_reg_addr, value);
1980 2028 break;
1981 2029
1982 2030 case SLI_INTF_IF_TYPE_2:
1983 2031 ddi_put32(hba->sli.sli4.bar0_acc_handle,
1984 2032 hba->sli.sli4.RQDB_reg_addr, value);
1985 2033 break;
1986 2034 }
1987 2035
1988 2036 } /* emlxs_sli4_write_rqdb() */
1989 2037
1990 2038
1991 2039 static void
1992 2040 emlxs_sli4_write_mqdb(emlxs_hba_t *hba, uint32_t value)
1993 2041 {
1994 2042 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
1995 2043 case SLI_INTF_IF_TYPE_0:
1996 2044 ddi_put32(hba->sli.sli4.bar2_acc_handle,
1997 2045 hba->sli.sli4.MQDB_reg_addr, value);
1998 2046 break;
1999 2047
2000 2048 case SLI_INTF_IF_TYPE_2:
2001 2049 ddi_put32(hba->sli.sli4.bar0_acc_handle,
2002 2050 hba->sli.sli4.MQDB_reg_addr, value);
2003 2051 break;
2004 2052 }
2005 2053
2006 2054 } /* emlxs_sli4_write_mqdb() */
2007 2055
2008 2056
2009 2057 static void
2010 2058 emlxs_sli4_write_wqdb(emlxs_hba_t *hba, uint32_t value)
2011 2059 {
2012 2060 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2013 2061 case SLI_INTF_IF_TYPE_0:
2014 2062 ddi_put32(hba->sli.sli4.bar2_acc_handle,
2015 2063 hba->sli.sli4.WQDB_reg_addr, value);
2016 2064 break;
2017 2065
2018 2066 case SLI_INTF_IF_TYPE_2:
2019 2067 ddi_put32(hba->sli.sli4.bar0_acc_handle,
2020 2068 hba->sli.sli4.WQDB_reg_addr, value);
2021 2069 break;
2022 2070 }
2023 2071
2024 2072 } /* emlxs_sli4_write_wqdb() */
2025 2073
2026 2074
2027 2075 static uint32_t
2028 2076 emlxs_check_bootstrap_ready(emlxs_hba_t *hba, uint32_t tmo)
2029 2077 {
2030 2078 emlxs_port_t *port = &PPORT;
2031 2079 uint32_t status = 0;
2032 2080 uint32_t err1;
2033 2081 uint32_t err2;
2034 2082
2035 2083 /* Wait for reset completion, tmo is in 10ms ticks */
2036 2084 while (tmo) {
2037 2085 status = emlxs_sli4_read_mbdb(hba);
2038 2086
2039 2087 /* Check to see if any errors occurred during init */
2040 2088 if (status & BMBX_READY) {
2041 2089 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2042 2090 "BMBX Ready: status=0x%x", status);
2043 2091
2044 2092 return (tmo);
2045 2093 }
2046 2094
2047 2095 BUSYWAIT_MS(10);
2048 2096 tmo--;
2049 2097 }
2050 2098
2051 2099 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2052 2100 case SLI_INTF_IF_TYPE_0:
2053 2101 err1 = ddi_get32(hba->pci_acc_handle,
2054 2102 hba->sli.sli4.ERR1_reg_addr);
2055 2103 err2 = ddi_get32(hba->pci_acc_handle,
2056 2104 hba->sli.sli4.ERR2_reg_addr);
2057 2105 break;
2058 2106
2059 2107 default:
2060 2108 err1 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2061 2109 hba->sli.sli4.ERR1_reg_addr);
2062 2110 err2 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2063 2111 hba->sli.sli4.ERR2_reg_addr);
2064 2112 break;
2065 2113 }
2066 2114
2067 2115 /* Timeout occurred */
2068 2116 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2069 2117 "Timeout waiting for BMailbox: status=%x err1=%x err2=%x",
2070 2118 status, err1, err2);
2071 2119
2072 2120 EMLXS_STATE_CHANGE(hba, FC_ERROR);
2073 2121
2074 2122 return (0);
2075 2123
2076 2124 } /* emlxs_check_bootstrap_ready() */
2077 2125
2078 2126
2079 2127 static uint32_t
2080 2128 emlxs_issue_bootstrap_mb(emlxs_hba_t *hba, uint32_t tmo)
2081 2129 {
2082 2130 emlxs_port_t *port = &PPORT;
2083 2131 uint32_t *iptr;
2084 2132 uint32_t addr30;
2085 2133
2086 2134 /*
2087 2135 * This routine assumes the bootstrap mbox is loaded
2088 2136 * with the mailbox command to be executed.
2089 2137 *
2090 2138 * First, load the high 30 bits of bootstrap mailbox
2091 2139 */
2092 2140 addr30 = (uint32_t)((hba->sli.sli4.bootstrapmb.phys>>32) & 0xfffffffc);
2093 2141 addr30 |= BMBX_ADDR_HI;
2094 2142 emlxs_sli4_write_mbdb(hba, addr30);
2095 2143
2096 2144 tmo = emlxs_check_bootstrap_ready(hba, tmo);
2097 2145 if (tmo == 0) {
2098 2146 return (0);
2099 2147 }
2100 2148
2101 2149 /* Load the low 30 bits of bootstrap mailbox */
2102 2150 addr30 = (uint32_t)((hba->sli.sli4.bootstrapmb.phys>>2) & 0xfffffffc);
2103 2151 emlxs_sli4_write_mbdb(hba, addr30);
2104 2152
2105 2153 tmo = emlxs_check_bootstrap_ready(hba, tmo);
2106 2154 if (tmo == 0) {
2107 2155 return (0);
2108 2156 }
2109 2157
2110 2158 iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2111 2159
2112 2160 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2113 2161 "BootstrapMB: %p Completed %08x %08x %08x",
2114 2162 hba->sli.sli4.bootstrapmb.virt,
2115 2163 *iptr, *(iptr+1), *(iptr+2));
2116 2164
2117 2165 return (tmo);
2118 2166
2119 2167 } /* emlxs_issue_bootstrap_mb() */
2120 2168
2121 2169
2122 2170 static int
2123 2171 emlxs_init_bootstrap_mb(emlxs_hba_t *hba)
2124 2172 {
2125 2173 #ifdef FMA_SUPPORT
2126 2174 emlxs_port_t *port = &PPORT;
2127 2175 #endif /* FMA_SUPPORT */
2128 2176 uint32_t *iptr;
2129 2177 uint32_t tmo;
2130 2178
2131 2179 if (emlxs_check_hdw_ready(hba)) {
2132 2180 return (1);
2133 2181 }
2134 2182
2135 2183 if (hba->flag & FC_BOOTSTRAPMB_INIT) {
2136 2184 return (0); /* Already initialized */
2137 2185 }
2138 2186
2139 2187 /* NOTE: tmo is in 10ms ticks */
2140 2188 tmo = emlxs_check_bootstrap_ready(hba, 3000);
2141 2189 if (tmo == 0) {
2142 2190 return (1);
2143 2191 }
2144 2192
2145 2193 /* Issue FW_INITIALIZE command */
2146 2194
2147 2195 /* Special words to initialize bootstrap mbox MUST be little endian */
2148 2196 iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
2149 2197 *iptr = LE_SWAP32(FW_INITIALIZE_WORD0);
2150 2198 *(iptr+1) = LE_SWAP32(FW_INITIALIZE_WORD1);
2151 2199
2152 2200 EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
2153 2201 MAILBOX_CMD_BSIZE, DDI_DMA_SYNC_FORDEV);
2154 2202
2155 2203 emlxs_data_dump(port, "FW_INIT", (uint32_t *)iptr, 6, 0);
2156 2204 if (!emlxs_issue_bootstrap_mb(hba, tmo)) {
2157 2205 return (1);
2158 2206 }
2159 2207
2160 2208 #ifdef FMA_SUPPORT
2161 2209 if (emlxs_fm_check_dma_handle(hba, hba->sli.sli4.bootstrapmb.dma_handle)
2162 2210 != DDI_FM_OK) {
2163 2211 EMLXS_MSGF(EMLXS_CONTEXT,
2164 2212 &emlxs_invalid_dma_handle_msg,
2165 2213 "init_bootstrap_mb: hdl=%p",
2166 2214 hba->sli.sli4.bootstrapmb.dma_handle);
2167 2215 return (1);
2168 2216 }
2169 2217 #endif
2170 2218 hba->flag |= FC_BOOTSTRAPMB_INIT;
2171 2219 return (0);
2172 2220
2173 2221 } /* emlxs_init_bootstrap_mb() */
2174 2222
2175 2223
2176 2224
2177 2225
2178 2226 static uint32_t
2179 2227 emlxs_sli4_hba_init(emlxs_hba_t *hba)
2180 2228 {
2181 2229 int rc;
2182 2230 uint16_t i;
2183 2231 emlxs_port_t *vport;
2184 2232 emlxs_config_t *cfg = &CFG;
2185 2233 CHANNEL *cp;
2186 2234 VPIobj_t *vpip;
2187 2235
2188 2236 /* Restart the adapter */
2189 2237 if (emlxs_sli4_hba_reset(hba, 1, 0, 0)) {
2190 2238 return (1);
2191 2239 }
2192 2240
2193 2241 for (i = 0; i < hba->chan_count; i++) {
2194 2242 cp = &hba->chan[i];
2195 2243 cp->iopath = (void *)&hba->sli.sli4.wq[i];
2196 2244 }
2197 2245
2198 2246 /* Initialize all the port objects */
2199 2247 hba->vpi_max = 0;
2200 2248 for (i = 0; i < MAX_VPORTS; i++) {
2201 2249 vport = &VPORT(i);
2202 2250 vport->hba = hba;
2203 2251 vport->vpi = i;
2204 2252
2205 2253 vpip = &vport->VPIobj;
2206 2254 vpip->index = i;
2207 2255 vpip->VPI = i;
2208 2256 vpip->port = vport;
2209 2257 vpip->state = VPI_STATE_OFFLINE;
2210 2258 vport->vpip = vpip;
2211 2259 }
2212 2260
2213 2261 /* Set the max node count */
2214 2262 if (hba->max_nodes == 0) {
2215 2263 if (cfg[CFG_NUM_NODES].current > 0) {
2216 2264 hba->max_nodes = cfg[CFG_NUM_NODES].current;
2217 2265 } else {
2218 2266 hba->max_nodes = 4096;
2219 2267 }
2220 2268 }
2221 2269
2222 2270 rc = emlxs_init_bootstrap_mb(hba);
2223 2271 if (rc) {
2224 2272 return (rc);
2225 2273 }
2226 2274
2227 2275 hba->sli.sli4.cfgFCOE.FCMap[0] = FCOE_FCF_MAP0;
2228 2276 hba->sli.sli4.cfgFCOE.FCMap[1] = FCOE_FCF_MAP1;
2229 2277 hba->sli.sli4.cfgFCOE.FCMap[2] = FCOE_FCF_MAP2;
2230 2278
2231 2279 if ((hba->sli_intf & SLI_INTF_IF_TYPE_MASK) == SLI_INTF_IF_TYPE_0) {
2232 2280 /* Cache the UE MASK registers value for UE error detection */
2233 2281 hba->sli.sli4.ue_mask_lo = ddi_get32(hba->pci_acc_handle,
2234 2282 (uint32_t *)(hba->pci_addr + PCICFG_UE_MASK_LO_OFFSET));
2235 2283 hba->sli.sli4.ue_mask_hi = ddi_get32(hba->pci_acc_handle,
2236 2284 (uint32_t *)(hba->pci_addr + PCICFG_UE_MASK_HI_OFFSET));
|
↓ open down ↓ |
774 lines elided |
↑ open up ↑ |
2237 2285 }
2238 2286
2239 2287 return (0);
2240 2288
2241 2289 } /* emlxs_sli4_hba_init() */
2242 2290
2243 2291
2244 2292 /*ARGSUSED*/
2245 2293 static uint32_t
2246 2294 emlxs_sli4_hba_reset(emlxs_hba_t *hba, uint32_t restart, uint32_t skip_post,
2247 - uint32_t quiesce)
2295 + uint32_t quiesce)
2248 2296 {
2249 2297 emlxs_port_t *port = &PPORT;
2250 2298 emlxs_port_t *vport;
2251 2299 CHANNEL *cp;
2252 2300 emlxs_config_t *cfg = &CFG;
2253 2301 MAILBOXQ mboxq;
2254 2302 uint32_t value;
2255 2303 uint32_t i;
2256 2304 uint32_t rc;
2257 2305 uint16_t channelno;
2258 2306 uint32_t status;
2259 2307 uint32_t err1;
2260 2308 uint32_t err2;
2261 2309 uint8_t generate_event = 0;
2262 2310
2263 2311 if (!cfg[CFG_RESET_ENABLE].current) {
2264 2312 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_reset_failed_msg,
2265 2313 "Adapter reset disabled.");
2266 2314 EMLXS_STATE_CHANGE(hba, FC_ERROR);
2267 2315
2268 2316 return (1);
2269 2317 }
2270 2318
2271 2319 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
2272 2320 case SLI_INTF_IF_TYPE_0:
2273 2321 if (quiesce == 0) {
2274 2322 emlxs_sli4_hba_kill(hba);
2275 2323
2276 2324 /*
2277 2325 * Initalize Hardware that will be used to bring
2278 2326 * SLI4 online.
2279 2327 */
2280 2328 rc = emlxs_init_bootstrap_mb(hba);
2281 2329 if (rc) {
2282 2330 return (rc);
2283 2331 }
2284 2332 }
2285 2333
2286 2334 bzero((void *)&mboxq, sizeof (MAILBOXQ));
2287 2335 emlxs_mb_resetport(hba, &mboxq);
2288 2336
2289 2337 if (quiesce == 0) {
2290 2338 if (emlxs_sli4_issue_mbox_cmd(hba, &mboxq,
2291 2339 MBX_POLL, 0) != MBX_SUCCESS) {
2292 2340 /* Timeout occurred */
2293 2341 EMLXS_MSGF(EMLXS_CONTEXT,
2294 2342 &emlxs_reset_failed_msg,
2295 2343 "Timeout: RESET");
2296 2344 EMLXS_STATE_CHANGE(hba, FC_ERROR);
2297 2345 /* Log a dump event - not supported */
2298 2346 return (1);
2299 2347 }
2300 2348 } else {
2301 2349 if (emlxs_sli4_issue_mbox_cmd4quiesce(hba, &mboxq,
2302 2350 MBX_POLL, 0) != MBX_SUCCESS) {
2303 2351 EMLXS_STATE_CHANGE(hba, FC_ERROR);
2304 2352 /* Log a dump event - not supported */
2305 2353 return (1);
2306 2354 }
2307 2355 }
2308 2356 emlxs_data_dump(port, "resetPort", (uint32_t *)&mboxq, 12, 0);
2309 2357 break;
2310 2358
2311 2359 case SLI_INTF_IF_TYPE_2:
2312 2360 if (quiesce == 0) {
2313 2361 emlxs_sli4_hba_kill(hba);
2314 2362 }
2315 2363
2316 2364 rc = emlxs_check_hdw_ready(hba);
2317 2365 if (rc > 1) {
2318 2366 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
2319 2367 "Adapter not ready for reset.");
2320 2368 return (1);
2321 2369 }
2322 2370
2323 2371 if (rc == 1) {
2324 2372 err1 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2325 2373 hba->sli.sli4.ERR1_reg_addr);
2326 2374 err2 = ddi_get32(hba->sli.sli4.bar0_acc_handle,
2327 2375 hba->sli.sli4.ERR2_reg_addr);
2328 2376
2329 2377 /* Don't generate an event if dump was forced */
2330 2378 if ((err1 != 0x2) || (err2 != 0x2)) {
2331 2379 generate_event = 1;
2332 2380 }
2333 2381 }
2334 2382
2335 2383 /* Reset the port now */
2336 2384
2337 2385 mutex_enter(&EMLXS_PORT_LOCK);
2338 2386 value = SLI_CNTL_INIT_PORT;
2339 2387
2340 2388 ddi_put32(hba->sli.sli4.bar0_acc_handle,
2341 2389 hba->sli.sli4.CNTL_reg_addr, value);
2342 2390 mutex_exit(&EMLXS_PORT_LOCK);
2343 2391
2344 2392 break;
2345 2393 }
2346 2394
2347 2395 /* Reset the hba structure */
2348 2396 hba->flag &= FC_RESET_MASK;
2349 2397
2350 2398 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2351 2399 cp = &hba->chan[channelno];
2352 2400 cp->hba = hba;
2353 2401 cp->channelno = channelno;
2354 2402 }
2355 2403
|
↓ open down ↓ |
98 lines elided |
↑ open up ↑ |
2356 2404 hba->channel_tx_count = 0;
2357 2405 hba->io_count = 0;
2358 2406 hba->iodone_count = 0;
2359 2407 hba->topology = 0;
2360 2408 hba->linkspeed = 0;
2361 2409 hba->heartbeat_active = 0;
2362 2410 hba->discovery_timer = 0;
2363 2411 hba->linkup_timer = 0;
2364 2412 hba->loopback_tics = 0;
2365 2413
2414 + /* Specific to ATTO G5 boards */
2415 + if (hba->model_info.flags & EMLXS_GPIO_LEDS) {
2416 + /* Assume the boot driver enabled all LEDs */
2417 + hba->gpio_current =
2418 + EMLXS_GPIO_LO | EMLXS_GPIO_HI | EMLXS_GPIO_ACT;
2419 + hba->gpio_desired = 0;
2420 + hba->gpio_bit = 0;
2421 + }
2422 +
2366 2423 /* Reset the port objects */
2367 2424 for (i = 0; i < MAX_VPORTS; i++) {
2368 2425 vport = &VPORT(i);
2369 2426
2370 2427 vport->flag &= EMLXS_PORT_RESET_MASK;
2371 2428 vport->did = 0;
2372 2429 vport->prev_did = 0;
2373 2430 vport->lip_type = 0;
2374 2431 bzero(&vport->fabric_sparam, sizeof (SERV_PARM));
2375 2432 bzero(&vport->prev_fabric_sparam, sizeof (SERV_PARM));
2376 2433
2377 2434 bzero((caddr_t)&vport->node_base, sizeof (NODELIST));
2378 2435 vport->node_base.nlp_Rpi = 0;
2379 2436 vport->node_base.nlp_DID = 0xffffff;
2380 2437 vport->node_base.nlp_list_next = NULL;
2381 2438 vport->node_base.nlp_list_prev = NULL;
2382 2439 vport->node_base.nlp_active = 1;
2383 2440 vport->node_count = 0;
2384 2441
2385 2442 if (vport->ub_count < EMLXS_UB_TOKEN_OFFSET) {
2386 2443 vport->ub_count = EMLXS_UB_TOKEN_OFFSET;
2387 2444 }
2388 2445 }
2389 2446
2390 2447 if (emlxs_check_hdw_ready(hba)) {
2391 2448 return (1);
2392 2449 }
2393 2450
2394 2451 if (generate_event) {
2395 2452 status = emlxs_sli4_read_status(hba);
2396 2453 if (status & SLI_STATUS_DUMP_IMAGE_PRESENT) {
2397 2454 emlxs_log_dump_event(port, NULL, 0);
2398 2455 }
2399 2456 }
2400 2457
2401 2458 return (0);
2402 2459
2403 2460 } /* emlxs_sli4_hba_reset */
2404 2461
2405 2462
2406 2463 #define SGL_CMD 0
2407 2464 #define SGL_RESP 1
2408 2465 #define SGL_DATA 2
2409 2466 #define SGL_LAST 0x80
2410 2467
2411 2468 /*ARGSUSED*/
2412 2469 static ULP_SGE64 *
2413 2470 emlxs_pkt_to_sgl(emlxs_port_t *port, fc_packet_t *pkt, ULP_SGE64 *sge,
2414 2471 uint32_t sgl_type, uint32_t *pcnt)
2415 2472 {
2416 2473 #ifdef DEBUG_SGE
2417 2474 emlxs_hba_t *hba = HBA;
2418 2475 #endif /* DEBUG_SGE */
2419 2476 ddi_dma_cookie_t *cp;
2420 2477 uint_t i;
2421 2478 uint_t last;
2422 2479 int32_t size;
2423 2480 int32_t sge_size;
2424 2481 uint64_t sge_addr;
2425 2482 int32_t len;
2426 2483 uint32_t cnt;
2427 2484 uint_t cookie_cnt;
2428 2485 ULP_SGE64 stage_sge;
2429 2486
2430 2487 last = sgl_type & SGL_LAST;
2431 2488 sgl_type &= ~SGL_LAST;
2432 2489
2433 2490 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2434 2491 switch (sgl_type) {
2435 2492 case SGL_CMD:
2436 2493 cp = pkt->pkt_cmd_cookie;
2437 2494 cookie_cnt = pkt->pkt_cmd_cookie_cnt;
2438 2495 size = (int32_t)pkt->pkt_cmdlen;
2439 2496 break;
2440 2497
2441 2498 case SGL_RESP:
2442 2499 cp = pkt->pkt_resp_cookie;
2443 2500 cookie_cnt = pkt->pkt_resp_cookie_cnt;
2444 2501 size = (int32_t)pkt->pkt_rsplen;
2445 2502 break;
2446 2503
2447 2504
2448 2505 case SGL_DATA:
2449 2506 cp = pkt->pkt_data_cookie;
2450 2507 cookie_cnt = pkt->pkt_data_cookie_cnt;
2451 2508 size = (int32_t)pkt->pkt_datalen;
2452 2509 break;
2453 2510
2454 2511 default:
2455 2512 return (NULL);
2456 2513 }
2457 2514
2458 2515 #else
2459 2516 switch (sgl_type) {
2460 2517 case SGL_CMD:
2461 2518 cp = &pkt->pkt_cmd_cookie;
2462 2519 cookie_cnt = 1;
2463 2520 size = (int32_t)pkt->pkt_cmdlen;
2464 2521 break;
2465 2522
2466 2523 case SGL_RESP:
2467 2524 cp = &pkt->pkt_resp_cookie;
2468 2525 cookie_cnt = 1;
2469 2526 size = (int32_t)pkt->pkt_rsplen;
2470 2527 break;
2471 2528
2472 2529
2473 2530 case SGL_DATA:
2474 2531 cp = &pkt->pkt_data_cookie;
2475 2532 cookie_cnt = 1;
2476 2533 size = (int32_t)pkt->pkt_datalen;
2477 2534 break;
2478 2535
2479 2536 default:
2480 2537 return (NULL);
2481 2538 }
2482 2539 #endif /* >= EMLXS_MODREV3 */
2483 2540
2484 2541 stage_sge.offset = 0;
2485 2542 stage_sge.type = 0;
2486 2543 stage_sge.last = 0;
2487 2544 cnt = 0;
2488 2545 for (i = 0; i < cookie_cnt && size > 0; i++, cp++) {
2489 2546
2490 2547 sge_size = cp->dmac_size;
2491 2548 sge_addr = cp->dmac_laddress;
2492 2549 while (sge_size && size) {
2493 2550 if (cnt) {
2494 2551 /* Copy staged SGE before we build next one */
2495 2552 BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
2496 2553 (uint8_t *)sge, sizeof (ULP_SGE64));
2497 2554 sge++;
2498 2555 }
2499 2556 len = MIN(EMLXS_MAX_SGE_SIZE, sge_size);
2500 2557 len = MIN(size, len);
2501 2558
2502 2559 stage_sge.addrHigh =
2503 2560 PADDR_HI(sge_addr);
2504 2561 stage_sge.addrLow =
2505 2562 PADDR_LO(sge_addr);
2506 2563 stage_sge.length = len;
2507 2564 if (sgl_type == SGL_DATA) {
2508 2565 stage_sge.offset = cnt;
2509 2566 }
2510 2567 #ifdef DEBUG_SGE
2511 2568 emlxs_data_dump(port, "SGE", (uint32_t *)&stage_sge,
2512 2569 4, 0);
2513 2570 #endif /* DEBUG_SGE */
2514 2571 sge_addr += len;
2515 2572 sge_size -= len;
2516 2573
2517 2574 cnt += len;
2518 2575 size -= len;
2519 2576 }
2520 2577 }
2521 2578
2522 2579 if (last) {
2523 2580 stage_sge.last = 1;
2524 2581 }
2525 2582 BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
2526 2583 sizeof (ULP_SGE64));
2527 2584
2528 2585 sge++;
2529 2586
2530 2587 if (pcnt) {
2531 2588 *pcnt = cnt;
2532 2589 }
2533 2590 return (sge);
2534 2591
2535 2592 } /* emlxs_pkt_to_sgl */
2536 2593
2537 2594
2538 2595 /*ARGSUSED*/
2539 2596 uint32_t
2540 2597 emlxs_sli4_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2541 2598 {
2542 2599 emlxs_hba_t *hba = HBA;
2543 2600 fc_packet_t *pkt;
2544 2601 XRIobj_t *xrip;
2545 2602 ULP_SGE64 *sge;
2546 2603 emlxs_wqe_t *wqe;
2547 2604 IOCBQ *iocbq;
|
↓ open down ↓ |
172 lines elided |
↑ open up ↑ |
2548 2605 ddi_dma_cookie_t *cp_cmd;
2549 2606 ddi_dma_cookie_t *cp_data;
2550 2607 uint64_t sge_addr;
2551 2608 uint32_t cmd_cnt;
2552 2609 uint32_t resp_cnt;
2553 2610
2554 2611 iocbq = (IOCBQ *) &sbp->iocbq;
2555 2612 wqe = &iocbq->wqe;
2556 2613 pkt = PRIV2PKT(sbp);
2557 2614 xrip = sbp->xrip;
2558 - sge = xrip->SGList.virt;
2615 + sge = xrip->SGList->virt;
2559 2616
2560 2617 #if (EMLXS_MODREV >= EMLXS_MODREV3)
2561 2618 cp_cmd = pkt->pkt_cmd_cookie;
2562 2619 cp_data = pkt->pkt_data_cookie;
2563 2620 #else
2564 2621 cp_cmd = &pkt->pkt_cmd_cookie;
2565 2622 cp_data = &pkt->pkt_data_cookie;
2566 2623 #endif /* >= EMLXS_MODREV3 */
2567 2624
2568 2625 iocbq = &sbp->iocbq;
2569 2626 if (iocbq->flag & IOCB_FCP_CMD) {
2570 2627
2571 2628 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2572 2629 return (1);
2573 2630 }
2574 2631
2575 2632 /* CMD payload */
2576 2633 sge = emlxs_pkt_to_sgl(port, pkt, sge, SGL_CMD, &cmd_cnt);
2577 2634 if (! sge) {
2578 2635 return (1);
2579 2636 }
2580 2637
2581 2638 /* DATA payload */
2582 2639 if (pkt->pkt_datalen != 0) {
2583 2640 /* RSP payload */
2584 2641 sge = emlxs_pkt_to_sgl(port, pkt, sge,
2585 2642 SGL_RESP, &resp_cnt);
2586 2643 if (! sge) {
2587 2644 return (1);
2588 2645 }
2589 2646
2590 2647 /* Data payload */
2591 2648 sge = emlxs_pkt_to_sgl(port, pkt, sge,
2592 2649 SGL_DATA | SGL_LAST, 0);
2593 2650 if (! sge) {
2594 2651 return (1);
2595 2652 }
2596 2653 sgl_done:
2597 2654 if (hba->sli.sli4.flag & EMLXS_SLI4_PHON) {
2598 2655 sge_addr = cp_data->dmac_laddress;
2599 2656 wqe->FirstData.addrHigh = PADDR_HI(sge_addr);
2600 2657 wqe->FirstData.addrLow = PADDR_LO(sge_addr);
2601 2658 wqe->FirstData.tus.f.bdeSize =
2602 2659 cp_data->dmac_size;
2603 2660 }
2604 2661 } else {
2605 2662 /* RSP payload */
2606 2663 sge = emlxs_pkt_to_sgl(port, pkt, sge,
2607 2664 SGL_RESP | SGL_LAST, &resp_cnt);
2608 2665 if (! sge) {
2609 2666 return (1);
2610 2667 }
2611 2668 }
2612 2669
2613 2670 wqe->un.FcpCmd.Payload.addrHigh =
2614 2671 PADDR_HI(cp_cmd->dmac_laddress);
2615 2672 wqe->un.FcpCmd.Payload.addrLow =
2616 2673 PADDR_LO(cp_cmd->dmac_laddress);
2617 2674 wqe->un.FcpCmd.Payload.tus.f.bdeSize = cmd_cnt;
2618 2675 wqe->un.FcpCmd.PayloadLength = cmd_cnt + resp_cnt;
2619 2676
2620 2677 } else {
2621 2678
2622 2679 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
2623 2680 /* CMD payload */
2624 2681 sge = emlxs_pkt_to_sgl(port, pkt, sge,
2625 2682 SGL_CMD | SGL_LAST, &cmd_cnt);
2626 2683 if (! sge) {
2627 2684 return (1);
2628 2685 }
2629 2686 } else {
2630 2687 /* CMD payload */
2631 2688 sge = emlxs_pkt_to_sgl(port, pkt, sge,
2632 2689 SGL_CMD, &cmd_cnt);
2633 2690 if (! sge) {
2634 2691 return (1);
2635 2692 }
2636 2693
2637 2694 /* RSP payload */
2638 2695 sge = emlxs_pkt_to_sgl(port, pkt, sge,
2639 2696 SGL_RESP | SGL_LAST, &resp_cnt);
2640 2697 if (! sge) {
2641 2698 return (1);
2642 2699 }
2643 2700 wqe->un.GenReq.PayloadLength = cmd_cnt;
2644 2701 }
2645 2702
2646 2703 wqe->un.GenReq.Payload.addrHigh =
2647 2704 PADDR_HI(cp_cmd->dmac_laddress);
2648 2705 wqe->un.GenReq.Payload.addrLow =
2649 2706 PADDR_LO(cp_cmd->dmac_laddress);
2650 2707 wqe->un.GenReq.Payload.tus.f.bdeSize = cmd_cnt;
2651 2708 }
2652 2709 return (0);
2653 2710 } /* emlxs_sli4_bde_setup */
2654 2711
2655 2712
2656 2713
2657 2714
2658 2715 #ifdef SFCT_SUPPORT
2659 2716 /*ARGSUSED*/
2660 2717 static uint32_t
2661 2718 emlxs_sli4_fct_bde_setup(emlxs_port_t *port, emlxs_buf_t *sbp)
2662 2719 {
2663 2720 emlxs_hba_t *hba = HBA;
2664 2721 emlxs_wqe_t *wqe;
2665 2722 ULP_SGE64 stage_sge;
2666 2723 ULP_SGE64 *sge;
2667 2724 IOCB *iocb;
2668 2725 IOCBQ *iocbq;
2669 2726 MATCHMAP *mp;
2670 2727 MATCHMAP *fct_mp;
2671 2728 XRIobj_t *xrip;
2672 2729 uint64_t sge_addr;
2673 2730 uint32_t sge_size;
2674 2731 uint32_t cnt;
2675 2732 uint32_t len;
2676 2733 uint32_t size;
2677 2734 uint32_t *xrdy_vaddr;
2678 2735 stmf_data_buf_t *dbuf;
2679 2736
2680 2737 iocbq = &sbp->iocbq;
2681 2738 iocb = &iocbq->iocb;
2682 2739 wqe = &iocbq->wqe;
2683 2740 xrip = sbp->xrip;
2684 2741
2685 2742 if (!sbp->fct_buf) {
2686 2743 return (0);
2687 2744 }
2688 2745
2689 2746 size = sbp->fct_buf->db_data_size;
2690 2747
2691 2748 /*
2692 2749 * The hardware will automaticlly round up
2693 2750 * to multiple of 4.
2694 2751 *
2695 2752 * if (size & 3) {
2696 2753 * size = (size + 3) & 0xfffffffc;
2697 2754 * }
|
↓ open down ↓ |
129 lines elided |
↑ open up ↑ |
2698 2755 */
2699 2756 fct_mp = (MATCHMAP *)sbp->fct_buf->db_port_private;
2700 2757
2701 2758 if (sbp->fct_buf->db_sglist_length != 1) {
2702 2759 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
2703 2760 "fct_bde_setup: Only 1 sglist entry supported: %d",
2704 2761 sbp->fct_buf->db_sglist_length);
2705 2762 return (1);
2706 2763 }
2707 2764
2708 - sge = xrip->SGList.virt;
2765 + sge = xrip->SGList->virt;
2709 2766
2710 2767 if (iocb->ULPCOMMAND == CMD_FCP_TRECEIVE64_CX) {
2711 2768
2712 2769 mp = emlxs_mem_buf_alloc(hba, EMLXS_XFER_RDY_SIZE);
2713 2770 if (!mp || !mp->virt || !mp->phys) {
2714 2771 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fct_error_msg,
2715 2772 "fct_bde_setup: Cannot allocate XRDY memory");
2716 2773 return (1);
2717 2774 }
2718 2775 /* Save the MATCHMAP info to free this memory later */
2719 2776 iocbq->bp = mp;
2720 2777
2721 2778 /* Point to XRDY payload */
2722 2779 xrdy_vaddr = (uint32_t *)(mp->virt);
2723 2780
2724 2781 /* Fill in burstsize in payload */
2725 2782 *xrdy_vaddr++ = 0;
2726 2783 *xrdy_vaddr++ = LE_SWAP32(size);
2727 2784 *xrdy_vaddr = 0;
2728 2785
2729 2786 /* First 2 SGEs are XRDY and SKIP */
2730 2787 stage_sge.addrHigh = PADDR_HI(mp->phys);
2731 2788 stage_sge.addrLow = PADDR_LO(mp->phys);
2732 2789 stage_sge.length = EMLXS_XFER_RDY_SIZE;
2733 2790 stage_sge.offset = 0;
2734 2791 stage_sge.type = 0;
2735 2792 stage_sge.last = 0;
2736 2793
2737 2794 /* Words 0-3 */
2738 2795 wqe->un.FcpCmd.Payload.addrHigh = stage_sge.addrHigh;
2739 2796 wqe->un.FcpCmd.Payload.addrLow = stage_sge.addrLow;
2740 2797 wqe->un.FcpCmd.Payload.tus.f.bdeSize = EMLXS_XFER_RDY_SIZE;
2741 2798 wqe->un.FcpCmd.PayloadLength = EMLXS_XFER_RDY_SIZE;
2742 2799
2743 2800 } else { /* CMD_FCP_TSEND64_CX */
2744 2801 /* First 2 SGEs are SKIP */
2745 2802 stage_sge.addrHigh = 0;
2746 2803 stage_sge.addrLow = 0;
2747 2804 stage_sge.length = 0;
2748 2805 stage_sge.offset = 0;
2749 2806 stage_sge.type = EMLXS_SGE_TYPE_SKIP;
2750 2807 stage_sge.last = 0;
2751 2808
2752 2809 /* Words 0-3 */
2753 2810 wqe->un.FcpCmd.Payload.addrHigh = PADDR_HI(fct_mp->phys);
2754 2811 wqe->un.FcpCmd.Payload.addrLow = PADDR_LO(fct_mp->phys);
2755 2812
2756 2813 /* The BDE should match the contents of the first SGE payload */
2757 2814 len = MIN(EMLXS_MAX_SGE_SIZE, size);
2758 2815 wqe->un.FcpCmd.Payload.tus.f.bdeSize = len;
2759 2816
2760 2817 /* The PayloadLength should be set to 0 for TSEND64. */
2761 2818 wqe->un.FcpCmd.PayloadLength = 0;
2762 2819 }
2763 2820
2764 2821 dbuf = sbp->fct_buf;
2765 2822 /*
2766 2823 * TotalTransferCount equals to Relative Offset field (Word 4)
2767 2824 * in both TSEND64 and TRECEIVE64 WQE.
2768 2825 */
2769 2826 wqe->un.FcpCmd.TotalTransferCount = dbuf->db_relative_offset;
2770 2827
2771 2828 /* Copy staged SGE into SGL */
2772 2829 BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
2773 2830 (uint8_t *)sge, sizeof (ULP_SGE64));
2774 2831 sge++;
2775 2832
2776 2833 stage_sge.addrHigh = 0;
2777 2834 stage_sge.addrLow = 0;
2778 2835 stage_sge.length = 0;
2779 2836 stage_sge.offset = 0;
2780 2837 stage_sge.type = EMLXS_SGE_TYPE_SKIP;
2781 2838 stage_sge.last = 0;
2782 2839
2783 2840 /* Copy staged SGE into SGL */
2784 2841 BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
2785 2842 (uint8_t *)sge, sizeof (ULP_SGE64));
2786 2843 sge++;
2787 2844
2788 2845 sge_size = size;
2789 2846 sge_addr = fct_mp->phys;
2790 2847 cnt = 0;
2791 2848
2792 2849 /* Build SGEs */
2793 2850 while (sge_size) {
2794 2851 if (cnt) {
2795 2852 /* Copy staged SGE before we build next one */
2796 2853 BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
2797 2854 (uint8_t *)sge, sizeof (ULP_SGE64));
2798 2855 sge++;
2799 2856 }
2800 2857
2801 2858 len = MIN(EMLXS_MAX_SGE_SIZE, sge_size);
2802 2859
2803 2860 stage_sge.addrHigh = PADDR_HI(sge_addr);
2804 2861 stage_sge.addrLow = PADDR_LO(sge_addr);
2805 2862 stage_sge.length = len;
2806 2863 stage_sge.offset = cnt;
2807 2864 stage_sge.type = EMLXS_SGE_TYPE_DATA;
2808 2865
2809 2866 sge_addr += len;
2810 2867 sge_size -= len;
2811 2868 cnt += len;
2812 2869 }
2813 2870
2814 2871 stage_sge.last = 1;
2815 2872
2816 2873 if (hba->sli.sli4.flag & EMLXS_SLI4_PHON) {
2817 2874 wqe->FirstData.addrHigh = stage_sge.addrHigh;
2818 2875 wqe->FirstData.addrLow = stage_sge.addrLow;
2819 2876 wqe->FirstData.tus.f.bdeSize = stage_sge.length;
2820 2877 }
2821 2878 /* Copy staged SGE into SGL */
2822 2879 BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
2823 2880 (uint8_t *)sge, sizeof (ULP_SGE64));
2824 2881
2825 2882 return (0);
2826 2883
2827 2884 } /* emlxs_sli4_fct_bde_setup */
2828 2885 #endif /* SFCT_SUPPORT */
2829 2886
2830 2887
2831 2888 static void
2832 2889 emlxs_sli4_issue_iocb_cmd(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
2833 2890 {
2834 2891 emlxs_port_t *port = &PPORT;
2835 2892 emlxs_buf_t *sbp;
2836 2893 uint32_t channelno;
2837 2894 int32_t throttle;
2838 2895 emlxs_wqe_t *wqe;
2839 2896 emlxs_wqe_t *wqeslot;
2840 2897 WQ_DESC_t *wq;
2841 2898 uint32_t flag;
2842 2899 uint32_t wqdb;
2843 2900 uint16_t next_wqe;
2844 2901 off_t offset;
2845 2902 #ifdef NODE_THROTTLE_SUPPORT
2846 2903 int32_t node_throttle;
2847 2904 NODELIST *marked_node = NULL;
2848 2905 #endif /* NODE_THROTTLE_SUPPORT */
2849 2906
2850 2907
2851 2908 channelno = cp->channelno;
2852 2909 wq = (WQ_DESC_t *)cp->iopath;
2853 2910
2854 2911 #ifdef DEBUG_FASTPATH
2855 2912 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2856 2913 "ISSUE WQE channel: %x %p", channelno, wq);
2857 2914 #endif /* DEBUG_FASTPATH */
2858 2915
2859 2916 throttle = 0;
2860 2917
2861 2918 /* Check if FCP ring and adapter is not ready */
2862 2919 /* We may use any ring for FCP_CMD */
2863 2920 if (iocbq && (iocbq->flag & IOCB_FCP_CMD) && (hba->state != FC_READY)) {
2864 2921 if (!(iocbq->flag & IOCB_SPECIAL) || !iocbq->port ||
2865 2922 (((emlxs_port_t *)iocbq->port)->mode == MODE_INITIATOR)) {
2866 2923 emlxs_tx_put(iocbq, 1);
2867 2924 return;
2868 2925 }
2869 2926 }
2870 2927
2871 2928 /* Attempt to acquire CMD_RING lock */
2872 2929 if (mutex_tryenter(&EMLXS_QUE_LOCK(channelno)) == 0) {
2873 2930 /* Queue it for later */
2874 2931 if (iocbq) {
2875 2932 if ((hba->io_count -
2876 2933 hba->channel_tx_count) > 10) {
2877 2934 emlxs_tx_put(iocbq, 1);
2878 2935 return;
2879 2936 } else {
2880 2937
2881 2938 mutex_enter(&EMLXS_QUE_LOCK(channelno));
2882 2939 }
2883 2940 } else {
2884 2941 return;
2885 2942 }
2886 2943 }
2887 2944 /* EMLXS_QUE_LOCK acquired */
2888 2945
2889 2946 /* Throttle check only applies to non special iocb */
2890 2947 if (iocbq && (!(iocbq->flag & IOCB_SPECIAL))) {
2891 2948 /* Check if HBA is full */
2892 2949 throttle = hba->io_throttle - hba->io_active;
2893 2950 if (throttle <= 0) {
2894 2951 /* Hitting adapter throttle limit */
2895 2952 /* Queue it for later */
2896 2953 if (iocbq) {
2897 2954 emlxs_tx_put(iocbq, 1);
2898 2955 }
2899 2956
2900 2957 goto busy;
2901 2958 }
2902 2959 }
2903 2960
2904 2961 /* Check to see if we have room for this WQE */
2905 2962 next_wqe = wq->host_index + 1;
2906 2963 if (next_wqe >= wq->max_index) {
2907 2964 next_wqe = 0;
2908 2965 }
2909 2966
2910 2967 if (next_wqe == wq->port_index) {
2911 2968 /* Queue it for later */
2912 2969 if (iocbq) {
2913 2970 emlxs_tx_put(iocbq, 1);
2914 2971 }
2915 2972 goto busy;
2916 2973 }
2917 2974
2918 2975 /*
2919 2976 * We have a command ring slot available
2920 2977 * Make sure we have an iocb to send
2921 2978 */
2922 2979 if (iocbq) {
2923 2980 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2924 2981
2925 2982 /* Check if the ring already has iocb's waiting */
2926 2983 if (cp->nodeq.q_first != NULL) {
2927 2984 /* Put the current iocbq on the tx queue */
2928 2985 emlxs_tx_put(iocbq, 0);
2929 2986
2930 2987 /*
2931 2988 * Attempt to replace it with the next iocbq
2932 2989 * in the tx queue
2933 2990 */
2934 2991 iocbq = emlxs_tx_get(cp, 0);
2935 2992 }
2936 2993
2937 2994 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2938 2995 } else {
2939 2996 iocbq = emlxs_tx_get(cp, 1);
2940 2997 }
2941 2998
2942 2999 sendit:
2943 3000 /* Process each iocbq */
2944 3001 while (iocbq) {
2945 3002 sbp = iocbq->sbp;
2946 3003
2947 3004 #ifdef NODE_THROTTLE_SUPPORT
2948 3005 if (sbp && sbp->node && sbp->node->io_throttle) {
2949 3006 node_throttle = sbp->node->io_throttle -
2950 3007 sbp->node->io_active;
2951 3008 if (node_throttle <= 0) {
2952 3009 /* Node is busy */
2953 3010 /* Queue this iocb and get next iocb from */
2954 3011 /* channel */
2955 3012
2956 3013 if (!marked_node) {
2957 3014 marked_node = sbp->node;
2958 3015 }
2959 3016
2960 3017 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2961 3018 emlxs_tx_put(iocbq, 0);
2962 3019
2963 3020 if (cp->nodeq.q_first == marked_node) {
2964 3021 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2965 3022 goto busy;
2966 3023 }
2967 3024
2968 3025 iocbq = emlxs_tx_get(cp, 0);
2969 3026 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2970 3027 continue;
2971 3028 }
2972 3029 }
2973 3030 marked_node = 0;
2974 3031 #endif /* NODE_THROTTLE_SUPPORT */
2975 3032
2976 3033 wqe = &iocbq->wqe;
2977 3034 #ifdef DEBUG_FASTPATH
2978 3035 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2979 3036 "ISSUE QID %d WQE iotag:%x xri:%d", wq->qid,
2980 3037 wqe->RequestTag, wqe->XRITag);
2981 3038 #endif /* DEBUG_FASTPATH */
2982 3039
2983 3040 if (sbp) {
2984 3041 /* If exchange removed after wqe was prep'ed, drop it */
2985 3042 if (!(sbp->xrip)) {
2986 3043 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2987 3044 "Xmit WQE iotag:%x xri:%d aborted",
2988 3045 wqe->RequestTag, wqe->XRITag);
2989 3046
2990 3047 /* Get next iocb from the tx queue */
2991 3048 iocbq = emlxs_tx_get(cp, 1);
2992 3049 continue;
2993 3050 }
2994 3051
2995 3052 if (sbp->pkt_flags & PACKET_DELAY_REQUIRED) {
2996 3053
2997 3054 /* Perform delay */
2998 3055 if ((channelno == hba->channel_els) &&
2999 3056 !(iocbq->flag & IOCB_FCP_CMD)) {
3000 3057 drv_usecwait(100000);
3001 3058 } else {
3002 3059 drv_usecwait(20000);
3003 3060 }
3004 3061 }
3005 3062
3006 3063 /* Check for ULP pkt request */
3007 3064 mutex_enter(&sbp->mtx);
3008 3065
3009 3066 if (sbp->node == NULL) {
3010 3067 /* Set node to base node by default */
3011 3068 iocbq->node = (void *)&port->node_base;
3012 3069 sbp->node = (void *)&port->node_base;
3013 3070 }
3014 3071
3015 3072 sbp->pkt_flags |= PACKET_IN_CHIPQ;
3016 3073 mutex_exit(&sbp->mtx);
3017 3074
3018 3075 atomic_inc_32(&hba->io_active);
3019 3076 #ifdef NODE_THROTTLE_SUPPORT
3020 3077 if (sbp->node) {
3021 3078 atomic_inc_32(&sbp->node->io_active);
3022 3079 }
3023 3080 #endif /* NODE_THROTTLE_SUPPORT */
3024 3081
3025 3082 sbp->xrip->flag |= EMLXS_XRI_PENDING_IO;
3026 3083 #ifdef SFCT_SUPPORT
3027 3084 #ifdef FCT_IO_TRACE
3028 3085 if (sbp->fct_cmd) {
3029 3086 emlxs_fct_io_trace(port, sbp->fct_cmd,
3030 3087 EMLXS_FCT_IOCB_ISSUED);
3031 3088 emlxs_fct_io_trace(port, sbp->fct_cmd,
3032 3089 icmd->ULPCOMMAND);
3033 3090 }
3034 3091 #endif /* FCT_IO_TRACE */
3035 3092 #endif /* SFCT_SUPPORT */
3036 3093 cp->hbaSendCmd_sbp++;
3037 3094 iocbq->channel = cp;
3038 3095 } else {
3039 3096 cp->hbaSendCmd++;
3040 3097 }
3041 3098
3042 3099 flag = iocbq->flag;
3043 3100
3044 3101 /*
3045 3102 * At this point, we have a command ring slot available
3046 3103 * and an iocb to send
3047 3104 */
3048 3105 wq->release_depth--;
3049 3106 if (wq->release_depth == 0) {
3050 3107 wq->release_depth = WQE_RELEASE_DEPTH;
3051 3108 wqe->WQEC = 1;
3052 3109 }
3053 3110
3054 3111 HBASTATS.IocbIssued[channelno]++;
3055 3112 wq->num_proc++;
3056 3113
3057 3114 /* Send the iocb */
3058 3115 wqeslot = (emlxs_wqe_t *)wq->addr.virt;
3059 3116 wqeslot += wq->host_index;
3060 3117
3061 3118 wqe->CQId = wq->cqid;
3062 3119 if (hba->sli.sli4.param.PHWQ) {
3063 3120 WQE_PHWQ_WQID(wqe, wq->qid);
3064 3121 }
3065 3122 BE_SWAP32_BCOPY((uint8_t *)wqe, (uint8_t *)wqeslot,
3066 3123 sizeof (emlxs_wqe_t));
3067 3124 #ifdef DEBUG_WQE
3068 3125 emlxs_data_dump(port, "WQE", (uint32_t *)wqe, 18, 0);
3069 3126 #endif /* DEBUG_WQE */
3070 3127 offset = (off_t)((uint64_t)((unsigned long)
3071 3128 wq->addr.virt) -
3072 3129 (uint64_t)((unsigned long)
3073 3130 hba->sli.sli4.slim2.virt));
3074 3131
3075 3132 EMLXS_MPDATA_SYNC(wq->addr.dma_handle, offset,
3076 3133 4096, DDI_DMA_SYNC_FORDEV);
3077 3134
3078 3135 /* Ring the WQ Doorbell */
3079 3136 wqdb = wq->qid;
3080 3137 wqdb |= ((1 << 24) | (wq->host_index << 16));
3081 3138
3082 3139 /*
3083 3140 * After this, the sbp / iocb / wqe should not be
3084 3141 * accessed in the xmit path.
3085 3142 */
3086 3143
3087 3144 emlxs_sli4_write_wqdb(hba, wqdb);
3088 3145 wq->host_index = next_wqe;
3089 3146
3090 3147 #ifdef DEBUG_FASTPATH
3091 3148 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3092 3149 "WQ RING: %08x", wqdb);
3093 3150 #endif /* DEBUG_FASTPATH */
3094 3151
3095 3152 if (!sbp) {
3096 3153 emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
3097 3154 }
3098 3155
3099 3156 if (iocbq && (!(flag & IOCB_SPECIAL))) {
3100 3157 /* Check if HBA is full */
3101 3158 throttle = hba->io_throttle - hba->io_active;
3102 3159 if (throttle <= 0) {
3103 3160 goto busy;
3104 3161 }
3105 3162 }
3106 3163
3107 3164 /* Check to see if we have room for another WQE */
3108 3165 next_wqe++;
3109 3166 if (next_wqe >= wq->max_index) {
3110 3167 next_wqe = 0;
3111 3168 }
3112 3169
3113 3170 if (next_wqe == wq->port_index) {
3114 3171 /* Queue it for later */
3115 3172 goto busy;
3116 3173 }
3117 3174
3118 3175 /* Get the next iocb from the tx queue if there is one */
3119 3176 iocbq = emlxs_tx_get(cp, 1);
3120 3177 }
3121 3178
3122 3179 mutex_exit(&EMLXS_QUE_LOCK(channelno));
3123 3180
3124 3181 return;
3125 3182
3126 3183 busy:
3127 3184 wq->num_busy++;
3128 3185 if (throttle <= 0) {
3129 3186 HBASTATS.IocbThrottled++;
3130 3187 } else {
3131 3188 HBASTATS.IocbRingFull[channelno]++;
3132 3189 }
3133 3190
3134 3191 mutex_exit(&EMLXS_QUE_LOCK(channelno));
3135 3192
3136 3193 return;
3137 3194
3138 3195 } /* emlxs_sli4_issue_iocb_cmd() */
3139 3196
3140 3197
3141 3198 /*ARGSUSED*/
3142 3199 static uint32_t
3143 3200 emlxs_sli4_issue_mq(emlxs_port_t *port, MAILBOX4 *mqe, MAILBOX *mb,
3144 3201 uint32_t tmo)
3145 3202 {
3146 3203 emlxs_hba_t *hba = HBA;
3147 3204 MAILBOXQ *mbq;
3148 3205 MAILBOX4 *mb4;
3149 3206 MATCHMAP *mp;
3150 3207 uint32_t *iptr;
3151 3208 uint32_t mqdb;
3152 3209 off_t offset;
3153 3210
3154 3211 mbq = (MAILBOXQ *)mb;
3155 3212 mb4 = (MAILBOX4 *)mb;
3156 3213 mp = (MATCHMAP *) mbq->nonembed;
3157 3214 hba->mbox_mqe = (void *)mqe;
3158 3215
3159 3216 if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
3160 3217 (mb4->un.varSLIConfig.be.embedded)) {
3161 3218 /*
3162 3219 * If this is an embedded mbox, everything should fit
3163 3220 * into the mailbox area.
3164 3221 */
3165 3222 BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mqe,
3166 3223 MAILBOX_CMD_SLI4_BSIZE);
3167 3224
3168 3225 EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, 0,
3169 3226 4096, DDI_DMA_SYNC_FORDEV);
3170 3227
3171 3228 if (mb->mbxCommand != MBX_HEARTBEAT) {
3172 3229 emlxs_data_dump(port, "MBOX CMD", (uint32_t *)mqe,
3173 3230 18, 0);
3174 3231 }
3175 3232 } else {
3176 3233 /* SLI_CONFIG and non-embedded */
3177 3234
3178 3235 /*
3179 3236 * If this is not embedded, the MQ area
3180 3237 * MUST contain a SGE pointer to a larger area for the
3181 3238 * non-embedded mailbox command.
3182 3239 * mp will point to the actual mailbox command which
3183 3240 * should be copied into the non-embedded area.
3184 3241 */
3185 3242 mb4->un.varSLIConfig.be.sge_cnt = 1;
3186 3243 mb4->un.varSLIConfig.be.payload_length = mp->size;
3187 3244 iptr = (uint32_t *)&mb4->un.varSLIConfig.be.un_hdr.hdr_req;
3188 3245 *iptr++ = (uint32_t)PADDR_LO(mp->phys);
3189 3246 *iptr++ = (uint32_t)PADDR_HI(mp->phys);
3190 3247 *iptr = mp->size;
3191 3248
3192 3249 BE_SWAP32_BUFFER(mp->virt, mp->size);
3193 3250
3194 3251 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
3195 3252 DDI_DMA_SYNC_FORDEV);
3196 3253
3197 3254 BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)mqe,
3198 3255 MAILBOX_CMD_SLI4_BSIZE);
3199 3256
3200 3257 offset = (off_t)((uint64_t)((unsigned long)
3201 3258 hba->sli.sli4.mq.addr.virt) -
3202 3259 (uint64_t)((unsigned long)
3203 3260 hba->sli.sli4.slim2.virt));
3204 3261
3205 3262 EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, offset,
3206 3263 4096, DDI_DMA_SYNC_FORDEV);
3207 3264
3208 3265 emlxs_data_dump(port, "MBOX EXT", (uint32_t *)mqe, 12, 0);
3209 3266 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3210 3267 "Extension Addr %p %p", mp->phys, (uint32_t *)(mp->virt));
3211 3268 emlxs_data_dump(port, "EXT AREA", (uint32_t *)mp->virt, 24, 0);
3212 3269 }
3213 3270
3214 3271 /* Ring the MQ Doorbell */
3215 3272 mqdb = hba->sli.sli4.mq.qid;
3216 3273 mqdb |= ((1 << MQ_DB_POP_SHIFT) & MQ_DB_POP_MASK);
3217 3274
3218 3275 if (mb->mbxCommand != MBX_HEARTBEAT) {
3219 3276 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3220 3277 "MQ RING: %08x", mqdb);
3221 3278 }
3222 3279
3223 3280 emlxs_sli4_write_mqdb(hba, mqdb);
3224 3281
3225 3282 return (MBX_SUCCESS);
3226 3283
3227 3284 } /* emlxs_sli4_issue_mq() */
3228 3285
3229 3286
3230 3287 /*ARGSUSED*/
3231 3288 static uint32_t
3232 3289 emlxs_sli4_issue_bootstrap(emlxs_hba_t *hba, MAILBOX *mb, uint32_t tmo)
3233 3290 {
3234 3291 emlxs_port_t *port = &PPORT;
3235 3292 MAILBOXQ *mbq;
3236 3293 MAILBOX4 *mb4;
3237 3294 MATCHMAP *mp = NULL;
3238 3295 uint32_t *iptr;
3239 3296 int nonembed = 0;
3240 3297
3241 3298 mbq = (MAILBOXQ *)mb;
3242 3299 mb4 = (MAILBOX4 *)mb;
3243 3300 mp = (MATCHMAP *) mbq->nonembed;
3244 3301 hba->mbox_mqe = hba->sli.sli4.bootstrapmb.virt;
3245 3302
3246 3303 if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
3247 3304 (mb4->un.varSLIConfig.be.embedded)) {
3248 3305 /*
3249 3306 * If this is an embedded mbox, everything should fit
3250 3307 * into the bootstrap mailbox area.
3251 3308 */
3252 3309 iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
3253 3310 BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)iptr,
3254 3311 MAILBOX_CMD_SLI4_BSIZE);
3255 3312
3256 3313 EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
3257 3314 MAILBOX_CMD_SLI4_BSIZE, DDI_DMA_SYNC_FORDEV);
3258 3315 emlxs_data_dump(port, "MBOX CMD", iptr, 18, 0);
3259 3316 } else {
3260 3317 /*
3261 3318 * If this is not embedded, the bootstrap mailbox area
3262 3319 * MUST contain a SGE pointer to a larger area for the
3263 3320 * non-embedded mailbox command.
3264 3321 * mp will point to the actual mailbox command which
3265 3322 * should be copied into the non-embedded area.
3266 3323 */
3267 3324 nonembed = 1;
3268 3325 mb4->un.varSLIConfig.be.sge_cnt = 1;
3269 3326 mb4->un.varSLIConfig.be.payload_length = mp->size;
3270 3327 iptr = (uint32_t *)&mb4->un.varSLIConfig.be.un_hdr.hdr_req;
3271 3328 *iptr++ = (uint32_t)PADDR_LO(mp->phys);
3272 3329 *iptr++ = (uint32_t)PADDR_HI(mp->phys);
3273 3330 *iptr = mp->size;
3274 3331
3275 3332 BE_SWAP32_BUFFER(mp->virt, mp->size);
3276 3333
3277 3334 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
3278 3335 DDI_DMA_SYNC_FORDEV);
3279 3336
3280 3337 iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
3281 3338 BE_SWAP32_BCOPY((uint8_t *)mb, (uint8_t *)iptr,
3282 3339 MAILBOX_CMD_SLI4_BSIZE);
3283 3340
3284 3341 EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
3285 3342 EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE,
3286 3343 DDI_DMA_SYNC_FORDEV);
3287 3344
3288 3345 emlxs_data_dump(port, "MBOX EXT", iptr, 12, 0);
3289 3346 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3290 3347 "Extension Addr %p %p", mp->phys,
3291 3348 (uint32_t *)((uint8_t *)mp->virt));
3292 3349 iptr = (uint32_t *)((uint8_t *)mp->virt);
3293 3350 emlxs_data_dump(port, "EXT AREA", (uint32_t *)mp->virt, 24, 0);
3294 3351 }
3295 3352
3296 3353
3297 3354 /* NOTE: tmo is in 10ms ticks */
3298 3355 if (!emlxs_issue_bootstrap_mb(hba, tmo)) {
3299 3356 return (MBX_TIMEOUT);
3300 3357 }
3301 3358
3302 3359 if ((mb->mbxCommand != MBX_SLI_CONFIG) ||
3303 3360 (mb4->un.varSLIConfig.be.embedded)) {
3304 3361 EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
3305 3362 MAILBOX_CMD_SLI4_BSIZE, DDI_DMA_SYNC_FORKERNEL);
3306 3363
3307 3364 iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
3308 3365 BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)mb,
3309 3366 MAILBOX_CMD_SLI4_BSIZE);
3310 3367
3311 3368 emlxs_data_dump(port, "MBOX CMP", iptr, 18, 0);
3312 3369
3313 3370 } else {
3314 3371 EMLXS_MPDATA_SYNC(hba->sli.sli4.bootstrapmb.dma_handle, 0,
3315 3372 EMLXS_BOOTSTRAP_MB_SIZE + MBOX_EXTENSION_SIZE,
3316 3373 DDI_DMA_SYNC_FORKERNEL);
3317 3374
3318 3375 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
3319 3376 DDI_DMA_SYNC_FORKERNEL);
3320 3377
3321 3378 BE_SWAP32_BUFFER(mp->virt, mp->size);
3322 3379
3323 3380 iptr = (uint32_t *)hba->sli.sli4.bootstrapmb.virt;
3324 3381 BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)mb,
3325 3382 MAILBOX_CMD_SLI4_BSIZE);
3326 3383
3327 3384 emlxs_data_dump(port, "MBOX CMP", iptr, 12, 0);
3328 3385 iptr = (uint32_t *)((uint8_t *)mp->virt);
3329 3386 emlxs_data_dump(port, "EXT AREA", (uint32_t *)iptr, 24, 0);
3330 3387 }
3331 3388
3332 3389 #ifdef FMA_SUPPORT
3333 3390 if (nonembed && mp) {
3334 3391 if (emlxs_fm_check_dma_handle(hba, mp->dma_handle)
3335 3392 != DDI_FM_OK) {
3336 3393 EMLXS_MSGF(EMLXS_CONTEXT,
3337 3394 &emlxs_invalid_dma_handle_msg,
3338 3395 "sli4_issue_bootstrap: mp_hdl=%p",
3339 3396 mp->dma_handle);
3340 3397 return (MBXERR_DMA_ERROR);
3341 3398 }
3342 3399 }
3343 3400
3344 3401 if (emlxs_fm_check_dma_handle(hba,
3345 3402 hba->sli.sli4.bootstrapmb.dma_handle)
3346 3403 != DDI_FM_OK) {
3347 3404 EMLXS_MSGF(EMLXS_CONTEXT,
3348 3405 &emlxs_invalid_dma_handle_msg,
3349 3406 "sli4_issue_bootstrap: hdl=%p",
3350 3407 hba->sli.sli4.bootstrapmb.dma_handle);
3351 3408 return (MBXERR_DMA_ERROR);
3352 3409 }
3353 3410 #endif
3354 3411
3355 3412 return (MBX_SUCCESS);
3356 3413
3357 3414 } /* emlxs_sli4_issue_bootstrap() */
3358 3415
3359 3416
3360 3417 /*ARGSUSED*/
3361 3418 static uint32_t
3362 3419 emlxs_sli4_issue_mbox_cmd(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
3363 3420 uint32_t tmo)
3364 3421 {
3365 3422 emlxs_port_t *port;
3366 3423 MAILBOX4 *mb4;
3367 3424 MAILBOX *mb;
3368 3425 mbox_rsp_hdr_t *hdr_rsp;
3369 3426 MATCHMAP *mp;
3370 3427 uint32_t *iptr;
3371 3428 uint32_t rc;
3372 3429 uint32_t i;
3373 3430 uint32_t tmo_local;
3374 3431
3375 3432 if (!mbq->port) {
3376 3433 mbq->port = &PPORT;
3377 3434 }
3378 3435
3379 3436 port = (emlxs_port_t *)mbq->port;
3380 3437
3381 3438 mb4 = (MAILBOX4 *)mbq;
3382 3439 mb = (MAILBOX *)mbq;
3383 3440
3384 3441 mb->mbxStatus = MBX_SUCCESS;
3385 3442 rc = MBX_SUCCESS;
3386 3443
3387 3444 /* Check for minimum timeouts */
3388 3445 switch (mb->mbxCommand) {
3389 3446 /* Mailbox commands that erase/write flash */
3390 3447 case MBX_DOWN_LOAD:
3391 3448 case MBX_UPDATE_CFG:
3392 3449 case MBX_LOAD_AREA:
3393 3450 case MBX_LOAD_EXP_ROM:
3394 3451 case MBX_WRITE_NV:
3395 3452 case MBX_FLASH_WR_ULA:
3396 3453 case MBX_DEL_LD_ENTRY:
3397 3454 case MBX_LOAD_SM:
3398 3455 case MBX_DUMP_MEMORY:
3399 3456 case MBX_WRITE_VPARMS:
3400 3457 case MBX_ACCESS_VDATA:
3401 3458 if (tmo < 300) {
3402 3459 tmo = 300;
3403 3460 }
3404 3461 break;
3405 3462
3406 3463 case MBX_SLI_CONFIG: {
3407 3464 mbox_req_hdr_t *hdr_req;
3408 3465
3409 3466 hdr_req = (mbox_req_hdr_t *)
3410 3467 &mb4->un.varSLIConfig.be.un_hdr.hdr_req;
3411 3468
3412 3469 if (hdr_req->subsystem == IOCTL_SUBSYSTEM_COMMON) {
3413 3470 switch (hdr_req->opcode) {
3414 3471 case COMMON_OPCODE_WRITE_OBJ:
3415 3472 case COMMON_OPCODE_READ_OBJ:
3416 3473 case COMMON_OPCODE_READ_OBJ_LIST:
3417 3474 case COMMON_OPCODE_DELETE_OBJ:
3418 3475 case COMMON_OPCODE_SET_BOOT_CFG:
3419 3476 case COMMON_OPCODE_GET_PROFILE_CFG:
3420 3477 case COMMON_OPCODE_SET_PROFILE_CFG:
3421 3478 case COMMON_OPCODE_GET_PROFILE_LIST:
3422 3479 case COMMON_OPCODE_SET_ACTIVE_PROFILE:
3423 3480 case COMMON_OPCODE_GET_PROFILE_CAPS:
3424 3481 case COMMON_OPCODE_GET_MR_PROFILE_CAPS:
3425 3482 case COMMON_OPCODE_SET_MR_PROFILE_CAPS:
3426 3483 case COMMON_OPCODE_SET_FACTORY_PROFILE_CFG:
3427 3484 case COMMON_OPCODE_SEND_ACTIVATION:
3428 3485 case COMMON_OPCODE_RESET_LICENSES:
3429 3486 case COMMON_OPCODE_SET_PHYSICAL_LINK_CFG_V1:
3430 3487 case COMMON_OPCODE_GET_VPD_DATA:
3431 3488 if (tmo < 300) {
3432 3489 tmo = 300;
3433 3490 }
3434 3491 break;
3435 3492 default:
3436 3493 if (tmo < 30) {
3437 3494 tmo = 30;
3438 3495 }
3439 3496 }
3440 3497 } else if (hdr_req->subsystem == IOCTL_SUBSYSTEM_FCOE) {
3441 3498 switch (hdr_req->opcode) {
3442 3499 case FCOE_OPCODE_SET_FCLINK_SETTINGS:
3443 3500 if (tmo < 300) {
3444 3501 tmo = 300;
3445 3502 }
3446 3503 break;
3447 3504 default:
3448 3505 if (tmo < 30) {
3449 3506 tmo = 30;
3450 3507 }
3451 3508 }
3452 3509 } else {
3453 3510 if (tmo < 30) {
3454 3511 tmo = 30;
3455 3512 }
3456 3513 }
3457 3514
3458 3515 /*
3459 3516 * Also: VENDOR_MANAGE_FFV (0x13, 0x02) (not currently used)
3460 3517 */
3461 3518
3462 3519 break;
3463 3520 }
3464 3521 default:
3465 3522 if (tmo < 30) {
3466 3523 tmo = 30;
3467 3524 }
3468 3525 break;
3469 3526 }
3470 3527
3471 3528 /* Convert tmo seconds to 10 millisecond tics */
3472 3529 tmo_local = tmo * 100;
3473 3530
3474 3531 mutex_enter(&EMLXS_PORT_LOCK);
3475 3532
3476 3533 /* Adjust wait flag */
3477 3534 if (flag != MBX_NOWAIT) {
3478 3535 if (hba->sli.sli4.flag & EMLXS_SLI4_INTR_ENABLED) {
3479 3536 flag = MBX_SLEEP;
3480 3537 } else {
3481 3538 flag = MBX_POLL;
3482 3539 }
3483 3540 } else {
3484 3541 /* Must have interrupts enabled to perform MBX_NOWAIT */
3485 3542 if (!(hba->sli.sli4.flag & EMLXS_SLI4_INTR_ENABLED)) {
3486 3543
3487 3544 mb->mbxStatus = MBX_HARDWARE_ERROR;
3488 3545 mutex_exit(&EMLXS_PORT_LOCK);
3489 3546
3490 3547 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3491 3548 "Interrupts disabled. %s failed.",
3492 3549 emlxs_mb_cmd_xlate(mb->mbxCommand));
3493 3550
3494 3551 return (MBX_HARDWARE_ERROR);
3495 3552 }
3496 3553 }
3497 3554
3498 3555 /* Check for hardware error ; special case SLI_CONFIG */
3499 3556 if ((hba->flag & FC_HARDWARE_ERROR) &&
3500 3557 ! ((mb4->mbxCommand == MBX_SLI_CONFIG) &&
3501 3558 (mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode ==
3502 3559 COMMON_OPCODE_RESET))) {
3503 3560 mb->mbxStatus = MBX_HARDWARE_ERROR;
3504 3561
3505 3562 mutex_exit(&EMLXS_PORT_LOCK);
3506 3563
3507 3564 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3508 3565 "Hardware error reported. %s failed. status=%x mb=%p",
3509 3566 emlxs_mb_cmd_xlate(mb->mbxCommand), mb->mbxStatus, mb);
3510 3567
3511 3568 return (MBX_HARDWARE_ERROR);
3512 3569 }
3513 3570
3514 3571 if (hba->mbox_queue_flag) {
3515 3572 /* If we are not polling, then queue it for later */
3516 3573 if (flag == MBX_NOWAIT) {
3517 3574 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3518 3575 "Busy. %s: mb=%p NoWait.",
3519 3576 emlxs_mb_cmd_xlate(mb->mbxCommand), mb);
3520 3577
3521 3578 emlxs_mb_put(hba, mbq);
3522 3579
3523 3580 HBASTATS.MboxBusy++;
3524 3581
3525 3582 mutex_exit(&EMLXS_PORT_LOCK);
3526 3583
3527 3584 return (MBX_BUSY);
3528 3585 }
3529 3586
3530 3587 while (hba->mbox_queue_flag) {
3531 3588 mutex_exit(&EMLXS_PORT_LOCK);
3532 3589
3533 3590 if (tmo_local-- == 0) {
3534 3591 EMLXS_MSGF(EMLXS_CONTEXT,
3535 3592 &emlxs_mbox_event_msg,
3536 3593 "Timeout. %s: mb=%p tmo=%d Waiting.",
3537 3594 emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3538 3595 tmo);
3539 3596
3540 3597 /* Non-lethalStatus mailbox timeout */
3541 3598 /* Does not indicate a hardware error */
3542 3599 mb->mbxStatus = MBX_TIMEOUT;
3543 3600 return (MBX_TIMEOUT);
3544 3601 }
3545 3602
3546 3603 BUSYWAIT_MS(10);
3547 3604 mutex_enter(&EMLXS_PORT_LOCK);
3548 3605
3549 3606 /* Check for hardware error ; special case SLI_CONFIG */
3550 3607 if ((hba->flag & FC_HARDWARE_ERROR) &&
3551 3608 ! ((mb4->mbxCommand == MBX_SLI_CONFIG) &&
3552 3609 (mb4->un.varSLIConfig.be.un_hdr.hdr_req.opcode ==
3553 3610 COMMON_OPCODE_RESET))) {
3554 3611 mb->mbxStatus = MBX_HARDWARE_ERROR;
3555 3612
3556 3613 mutex_exit(&EMLXS_PORT_LOCK);
3557 3614
3558 3615 EMLXS_MSGF(EMLXS_CONTEXT,
3559 3616 &emlxs_mbox_detail_msg,
3560 3617 "Hardware error reported. %s failed. "
3561 3618 "status=%x mb=%p",
3562 3619 emlxs_mb_cmd_xlate(mb->mbxCommand),
3563 3620 mb->mbxStatus, mb);
3564 3621
3565 3622 return (MBX_HARDWARE_ERROR);
3566 3623 }
3567 3624 }
3568 3625 }
3569 3626
3570 3627 /* Initialize mailbox area */
3571 3628 emlxs_mb_init(hba, mbq, flag, tmo);
3572 3629
3573 3630 if (mb->mbxCommand == MBX_DOWN_LINK) {
3574 3631 hba->sli.sli4.flag |= EMLXS_SLI4_DOWN_LINK;
3575 3632 }
3576 3633
3577 3634 mutex_exit(&EMLXS_PORT_LOCK);
3578 3635 switch (flag) {
3579 3636
3580 3637 case MBX_NOWAIT:
3581 3638 if (mb->mbxCommand != MBX_HEARTBEAT) {
3582 3639 if (mb->mbxCommand != MBX_DOWN_LOAD
3583 3640 /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3584 3641 EMLXS_MSGF(EMLXS_CONTEXT,
3585 3642 &emlxs_mbox_detail_msg,
3586 3643 "Sending. %s: mb=%p NoWait. embedded %d",
3587 3644 emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3588 3645 ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3589 3646 (mb4->un.varSLIConfig.be.embedded)));
3590 3647 }
3591 3648 }
3592 3649
3593 3650 iptr = hba->sli.sli4.mq.addr.virt;
3594 3651 iptr += (hba->sli.sli4.mq.host_index * MAILBOX_CMD_SLI4_WSIZE);
3595 3652 hba->sli.sli4.mq.host_index++;
3596 3653 if (hba->sli.sli4.mq.host_index >= hba->sli.sli4.mq.max_index) {
3597 3654 hba->sli.sli4.mq.host_index = 0;
3598 3655 }
3599 3656
3600 3657 if (mbq->bp) {
3601 3658 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3602 3659 "BDE virt %p phys %p size x%x",
3603 3660 ((MATCHMAP *)mbq->bp)->virt,
3604 3661 ((MATCHMAP *)mbq->bp)->phys,
3605 3662 ((MATCHMAP *)mbq->bp)->size);
3606 3663 emlxs_data_dump(port, "DATA",
3607 3664 (uint32_t *)(((MATCHMAP *)mbq->bp)->virt), 30, 0);
3608 3665 }
3609 3666 rc = emlxs_sli4_issue_mq(port, (MAILBOX4 *)iptr, mb, tmo_local);
3610 3667 break;
3611 3668
3612 3669 case MBX_POLL:
3613 3670 if (mb->mbxCommand != MBX_DOWN_LOAD
3614 3671 /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3615 3672 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3616 3673 "Sending. %s: mb=%p Poll. embedded %d",
3617 3674 emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3618 3675 ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3619 3676 (mb4->un.varSLIConfig.be.embedded)));
3620 3677 }
3621 3678
3622 3679 rc = emlxs_sli4_issue_bootstrap(hba, mb, tmo_local);
3623 3680
3624 3681 /* Clean up the mailbox area */
3625 3682 if (rc == MBX_TIMEOUT) {
3626 3683 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3627 3684 "Timeout. %s: mb=%p tmo=%x Poll. embedded %d",
3628 3685 emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo,
3629 3686 ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3630 3687 (mb4->un.varSLIConfig.be.embedded)));
3631 3688
3632 3689 hba->flag |= FC_MBOX_TIMEOUT;
3633 3690 EMLXS_STATE_CHANGE(hba, FC_ERROR);
3634 3691 emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
3635 3692
3636 3693 } else {
3637 3694 if (mb->mbxCommand != MBX_DOWN_LOAD
3638 3695 /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3639 3696 EMLXS_MSGF(EMLXS_CONTEXT,
3640 3697 &emlxs_mbox_detail_msg,
3641 3698 "Completed. %s: mb=%p status=%x Poll. "
3642 3699 "embedded %d",
3643 3700 emlxs_mb_cmd_xlate(mb->mbxCommand), mb, rc,
3644 3701 ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3645 3702 (mb4->un.varSLIConfig.be.embedded)));
3646 3703 }
3647 3704
3648 3705 /* Process the result */
3649 3706 if (!(mbq->flag & MBQ_PASSTHRU)) {
3650 3707 if (mbq->mbox_cmpl) {
3651 3708 (void) (mbq->mbox_cmpl)(hba, mbq);
3652 3709 }
3653 3710 }
3654 3711
3655 3712 emlxs_mb_fini(hba, NULL, mb->mbxStatus);
3656 3713 }
3657 3714
3658 3715 mp = (MATCHMAP *)mbq->nonembed;
3659 3716 if (mp) {
3660 3717 hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
3661 3718 if (hdr_rsp->status) {
3662 3719 EMLXS_MSGF(EMLXS_CONTEXT,
3663 3720 &emlxs_mbox_detail_msg,
3664 3721 "%s: MBX_NONEMBED_ERROR: 0x%x, 0x%x",
3665 3722 emlxs_mb_cmd_xlate(mb->mbxCommand),
3666 3723 hdr_rsp->status, hdr_rsp->extra_status);
3667 3724
3668 3725 mb->mbxStatus = MBX_NONEMBED_ERROR;
3669 3726 }
3670 3727 }
3671 3728 rc = mb->mbxStatus;
3672 3729
3673 3730 /* Attempt to send pending mailboxes */
3674 3731 mbq = (MAILBOXQ *)emlxs_mb_get(hba);
3675 3732 if (mbq) {
3676 3733 /* Attempt to send pending mailboxes */
3677 3734 i = emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
3678 3735 if ((i != MBX_BUSY) && (i != MBX_SUCCESS)) {
3679 3736 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
3680 3737 }
3681 3738 }
3682 3739 break;
3683 3740
3684 3741 case MBX_SLEEP:
3685 3742 if (mb->mbxCommand != MBX_DOWN_LOAD
3686 3743 /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3687 3744 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3688 3745 "Sending. %s: mb=%p Sleep. embedded %d",
3689 3746 emlxs_mb_cmd_xlate(mb->mbxCommand), mb,
3690 3747 ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3691 3748 (mb4->un.varSLIConfig.be.embedded)));
3692 3749 }
3693 3750
3694 3751 iptr = hba->sli.sli4.mq.addr.virt;
3695 3752 iptr += (hba->sli.sli4.mq.host_index * MAILBOX_CMD_SLI4_WSIZE);
3696 3753 hba->sli.sli4.mq.host_index++;
3697 3754 if (hba->sli.sli4.mq.host_index >= hba->sli.sli4.mq.max_index) {
3698 3755 hba->sli.sli4.mq.host_index = 0;
3699 3756 }
3700 3757
3701 3758 rc = emlxs_sli4_issue_mq(port, (MAILBOX4 *)iptr, mb, tmo_local);
3702 3759
3703 3760 if (rc != MBX_SUCCESS) {
3704 3761 break;
3705 3762 }
3706 3763
3707 3764 /* Wait for completion */
3708 3765 /* The driver clock is timing the mailbox. */
3709 3766
3710 3767 mutex_enter(&EMLXS_MBOX_LOCK);
3711 3768 while (!(mbq->flag & MBQ_COMPLETED)) {
3712 3769 cv_wait(&EMLXS_MBOX_CV, &EMLXS_MBOX_LOCK);
3713 3770 }
3714 3771 mutex_exit(&EMLXS_MBOX_LOCK);
3715 3772
3716 3773 mp = (MATCHMAP *)mbq->nonembed;
3717 3774 if (mp) {
3718 3775 hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
3719 3776 if (hdr_rsp->status) {
3720 3777 EMLXS_MSGF(EMLXS_CONTEXT,
3721 3778 &emlxs_mbox_detail_msg,
3722 3779 "%s: MBX_NONEMBED_ERROR: 0x%x, 0x%x",
3723 3780 emlxs_mb_cmd_xlate(mb->mbxCommand),
3724 3781 hdr_rsp->status, hdr_rsp->extra_status);
3725 3782
3726 3783 mb->mbxStatus = MBX_NONEMBED_ERROR;
3727 3784 }
3728 3785 }
3729 3786 rc = mb->mbxStatus;
3730 3787
3731 3788 if (rc == MBX_TIMEOUT) {
3732 3789 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
3733 3790 "Timeout. %s: mb=%p tmo=%x Sleep. embedded %d",
3734 3791 emlxs_mb_cmd_xlate(mb->mbxCommand), mb, tmo,
3735 3792 ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3736 3793 (mb4->un.varSLIConfig.be.embedded)));
3737 3794 } else {
3738 3795 if (mb->mbxCommand != MBX_DOWN_LOAD
3739 3796 /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
3740 3797 EMLXS_MSGF(EMLXS_CONTEXT,
3741 3798 &emlxs_mbox_detail_msg,
3742 3799 "Completed. %s: mb=%p status=%x Sleep. "
3743 3800 "embedded %d",
3744 3801 emlxs_mb_cmd_xlate(mb->mbxCommand), mb, rc,
3745 3802 ((mb->mbxCommand != MBX_SLI_CONFIG) ? 1 :
3746 3803 (mb4->un.varSLIConfig.be.embedded)));
3747 3804 }
3748 3805 }
3749 3806 break;
3750 3807 }
3751 3808
3752 3809 return (rc);
3753 3810
3754 3811 } /* emlxs_sli4_issue_mbox_cmd() */
3755 3812
3756 3813
3757 3814
3758 3815 /*ARGSUSED*/
3759 3816 static uint32_t
3760 3817 emlxs_sli4_issue_mbox_cmd4quiesce(emlxs_hba_t *hba, MAILBOXQ *mbq, int32_t flag,
3761 3818 uint32_t tmo)
3762 3819 {
3763 3820 emlxs_port_t *port = &PPORT;
3764 3821 MAILBOX *mb;
3765 3822 mbox_rsp_hdr_t *hdr_rsp;
3766 3823 MATCHMAP *mp;
3767 3824 uint32_t rc;
3768 3825 uint32_t tmo_local;
3769 3826
3770 3827 mb = (MAILBOX *)mbq;
3771 3828
3772 3829 mb->mbxStatus = MBX_SUCCESS;
3773 3830 rc = MBX_SUCCESS;
3774 3831
3775 3832 if (tmo < 30) {
3776 3833 tmo = 30;
3777 3834 }
3778 3835
3779 3836 /* Convert tmo seconds to 10 millisecond tics */
3780 3837 tmo_local = tmo * 100;
3781 3838
3782 3839 flag = MBX_POLL;
3783 3840
3784 3841 /* Check for hardware error */
3785 3842 if (hba->flag & FC_HARDWARE_ERROR) {
3786 3843 mb->mbxStatus = MBX_HARDWARE_ERROR;
3787 3844 return (MBX_HARDWARE_ERROR);
3788 3845 }
3789 3846
3790 3847 /* Initialize mailbox area */
3791 3848 emlxs_mb_init(hba, mbq, flag, tmo);
3792 3849
3793 3850 switch (flag) {
3794 3851
3795 3852 case MBX_POLL:
3796 3853
3797 3854 rc = emlxs_sli4_issue_bootstrap(hba, mb, tmo_local);
3798 3855
3799 3856 /* Clean up the mailbox area */
3800 3857 if (rc == MBX_TIMEOUT) {
3801 3858 hba->flag |= FC_MBOX_TIMEOUT;
3802 3859 EMLXS_STATE_CHANGE(hba, FC_ERROR);
3803 3860 emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
3804 3861
3805 3862 } else {
3806 3863 /* Process the result */
3807 3864 if (!(mbq->flag & MBQ_PASSTHRU)) {
3808 3865 if (mbq->mbox_cmpl) {
3809 3866 (void) (mbq->mbox_cmpl)(hba, mbq);
3810 3867 }
3811 3868 }
3812 3869
3813 3870 emlxs_mb_fini(hba, NULL, mb->mbxStatus);
3814 3871 }
3815 3872
3816 3873 mp = (MATCHMAP *)mbq->nonembed;
3817 3874 if (mp) {
3818 3875 hdr_rsp = (mbox_rsp_hdr_t *)mp->virt;
3819 3876 if (hdr_rsp->status) {
3820 3877 EMLXS_MSGF(EMLXS_CONTEXT,
3821 3878 &emlxs_mbox_detail_msg,
3822 3879 "%s: MBX_NONEMBED_ERROR: 0x%x, 0x%x",
3823 3880 emlxs_mb_cmd_xlate(mb->mbxCommand),
3824 3881 hdr_rsp->status, hdr_rsp->extra_status);
3825 3882
3826 3883 mb->mbxStatus = MBX_NONEMBED_ERROR;
3827 3884 }
3828 3885 }
3829 3886 rc = mb->mbxStatus;
3830 3887
3831 3888 break;
3832 3889 }
3833 3890
3834 3891 return (rc);
3835 3892
3836 3893 } /* emlxs_sli4_issue_mbox_cmd4quiesce() */
3837 3894
3838 3895
3839 3896
3840 3897 #ifdef SFCT_SUPPORT
3841 3898 /*ARGSUSED*/
3842 3899 extern uint32_t
3843 3900 emlxs_sli4_prep_fct_iocb(emlxs_port_t *port, emlxs_buf_t *cmd_sbp, int channel)
3844 3901 {
3845 3902 emlxs_hba_t *hba = HBA;
3846 3903 emlxs_config_t *cfg = &CFG;
3847 3904 fct_cmd_t *fct_cmd;
3848 3905 stmf_data_buf_t *dbuf;
3849 3906 scsi_task_t *fct_task;
3850 3907 fc_packet_t *pkt;
3851 3908 CHANNEL *cp;
3852 3909 XRIobj_t *xrip;
3853 3910 emlxs_node_t *ndlp;
3854 3911 IOCBQ *iocbq;
3855 3912 IOCB *iocb;
3856 3913 emlxs_wqe_t *wqe;
3857 3914 ULP_SGE64 stage_sge;
3858 3915 ULP_SGE64 *sge;
3859 3916 RPIobj_t *rpip;
3860 3917 int32_t sge_size;
3861 3918 uint64_t sge_addr;
3862 3919 uint32_t did;
3863 3920 uint32_t timeout;
3864 3921
3865 3922 ddi_dma_cookie_t *cp_cmd;
3866 3923
3867 3924 pkt = PRIV2PKT(cmd_sbp);
3868 3925
3869 3926 cp = (CHANNEL *)cmd_sbp->channel;
3870 3927
3871 3928 iocbq = &cmd_sbp->iocbq;
3872 3929 iocb = &iocbq->iocb;
3873 3930
3874 3931 did = cmd_sbp->did;
3875 3932 if (iocb->ULPCOMMAND == CMD_ABORT_XRI_CX) {
3876 3933
3877 3934 ndlp = cmd_sbp->node;
3878 3935 rpip = EMLXS_NODE_TO_RPI(port, ndlp);
3879 3936
3880 3937 if (!rpip) {
3881 3938 /* Use the fabric rpi */
3882 3939 rpip = port->vpip->fabric_rpip;
3883 3940 }
3884 3941
3885 3942 /* Next allocate an Exchange for this command */
3886 3943 xrip = emlxs_sli4_alloc_xri(port, cmd_sbp, rpip,
3887 3944 EMLXS_XRI_SOL_BLS_TYPE);
3888 3945
3889 3946 if (!xrip) {
3890 3947 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3891 3948 "Adapter Busy. Unable to allocate exchange. "
3892 3949 "did=0x%x", did);
3893 3950
3894 3951 return (FC_TRAN_BUSY);
3895 3952 }
3896 3953
3897 3954 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
3898 3955 "FCT Abort Request: xri=%d iotag=%d sbp=%p rxid=%x",
3899 3956 xrip->XRI, xrip->iotag, cmd_sbp, pkt->pkt_cmd_fhdr.rx_id);
3900 3957
3901 3958 cmd_sbp->xrip = xrip;
3902 3959
3903 3960 cp->ulpSendCmd++;
3904 3961
3905 3962 /* Initalize iocbq */
3906 3963 iocbq->port = (void *)port;
3907 3964 iocbq->node = (void *)ndlp;
3908 3965 iocbq->channel = (void *)cp;
3909 3966
3910 3967 /*
3911 3968 * Don't give the abort priority, we want the IOCB
3912 3969 * we are aborting to be processed first.
3913 3970 */
3914 3971 iocbq->flag |= IOCB_SPECIAL;
3915 3972
3916 3973 wqe = &iocbq->wqe;
3917 3974 bzero((void *)wqe, sizeof (emlxs_wqe_t));
3918 3975
3919 3976 wqe = &iocbq->wqe;
3920 3977 wqe->un.Abort.Criteria = ABORT_XRI_TAG;
3921 3978 wqe->RequestTag = xrip->iotag;
3922 3979 wqe->AbortTag = pkt->pkt_cmd_fhdr.rx_id;
3923 3980 wqe->Command = CMD_ABORT_XRI_CX;
3924 3981 wqe->Class = CLASS3;
3925 3982 wqe->CQId = 0xffff;
3926 3983 wqe->CmdType = WQE_TYPE_ABORT;
3927 3984
3928 3985 if (hba->state >= FC_LINK_UP) {
3929 3986 wqe->un.Abort.IA = 0;
3930 3987 } else {
3931 3988 wqe->un.Abort.IA = 1;
3932 3989 }
3933 3990
3934 3991 /* Set the pkt timer */
3935 3992 cmd_sbp->ticks = hba->timer_tics + pkt->pkt_timeout +
3936 3993 ((pkt->pkt_timeout > 0xff) ? 0 : 10);
3937 3994
3938 3995 return (IOERR_SUCCESS);
3939 3996
3940 3997 } else if (iocb->ULPCOMMAND == CMD_FCP_TRSP64_CX) {
3941 3998
3942 3999 timeout = pkt->pkt_timeout;
3943 4000 ndlp = cmd_sbp->node;
3944 4001 if (!ndlp) {
3945 4002 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3946 4003 "Unable to find rpi. did=0x%x", did);
3947 4004
3948 4005 emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
3949 4006 IOERR_INVALID_RPI, 0);
3950 4007 return (0xff);
3951 4008 }
3952 4009
3953 4010 cp->ulpSendCmd++;
3954 4011
3955 4012 /* Initalize iocbq */
3956 4013 iocbq->port = (void *)port;
3957 4014 iocbq->node = (void *)ndlp;
3958 4015 iocbq->channel = (void *)cp;
3959 4016
3960 4017 wqe = &iocbq->wqe;
3961 4018 bzero((void *)wqe, sizeof (emlxs_wqe_t));
3962 4019
3963 4020 xrip = emlxs_sli4_register_xri(port, cmd_sbp,
3964 4021 pkt->pkt_cmd_fhdr.rx_id, did);
3965 4022
3966 4023 if (!xrip) {
3967 4024 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
3968 4025 "Unable to register xri %x. did=0x%x",
3969 4026 pkt->pkt_cmd_fhdr.rx_id, did);
3970 4027
3971 4028 emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
3972 4029 IOERR_NO_XRI, 0);
3973 4030 return (0xff);
3974 4031 }
3975 4032
3976 4033 cmd_sbp->iotag = xrip->iotag;
3977 4034 cmd_sbp->channel = cp;
3978 4035
3979 4036 #if (EMLXS_MODREV >= EMLXS_MODREV3)
3980 4037 cp_cmd = pkt->pkt_cmd_cookie;
|
↓ open down ↓ |
1262 lines elided |
↑ open up ↑ |
3981 4038 #else
3982 4039 cp_cmd = &pkt->pkt_cmd_cookie;
3983 4040 #endif /* >= EMLXS_MODREV3 */
3984 4041
3985 4042 sge_size = pkt->pkt_cmdlen;
3986 4043 /* Make size a multiple of 4 */
3987 4044 if (sge_size & 3) {
3988 4045 sge_size = (sge_size + 3) & 0xfffffffc;
3989 4046 }
3990 4047 sge_addr = cp_cmd->dmac_laddress;
3991 - sge = xrip->SGList.virt;
4048 + sge = xrip->SGList->virt;
3992 4049
3993 4050 stage_sge.addrHigh = PADDR_HI(sge_addr);
3994 4051 stage_sge.addrLow = PADDR_LO(sge_addr);
3995 4052 stage_sge.length = sge_size;
3996 4053 stage_sge.offset = 0;
3997 4054 stage_sge.type = 0;
3998 4055 stage_sge.last = 1;
3999 4056
4000 4057 /* Copy staged SGE into SGL */
4001 4058 BE_SWAP32_BCOPY((uint8_t *)&stage_sge,
4002 4059 (uint8_t *)sge, sizeof (ULP_SGE64));
4003 4060
4004 4061 /* Words 0-3 */
4005 4062 wqe->un.FcpCmd.Payload.addrHigh = stage_sge.addrHigh;
4006 4063 wqe->un.FcpCmd.Payload.addrLow = stage_sge.addrLow;
4007 4064 wqe->un.FcpCmd.Payload.tus.f.bdeSize = sge_size;
4008 4065 wqe->un.FcpCmd.PayloadLength = sge_size;
4009 4066
4010 4067 /* Word 6 */
4011 4068 wqe->ContextTag = ndlp->nlp_Rpi;
4012 4069 wqe->XRITag = xrip->XRI;
4013 4070
4014 4071 /* Word 7 */
4015 4072 wqe->Command = iocb->ULPCOMMAND;
4016 4073 wqe->Class = cmd_sbp->class;
4017 4074 wqe->ContextType = WQE_RPI_CONTEXT;
4018 4075 wqe->Timer = ((timeout > 0xff) ? 0 : timeout);
4019 4076
4020 4077 /* Word 8 */
4021 4078 wqe->AbortTag = 0;
4022 4079
4023 4080 /* Word 9 */
4024 4081 wqe->RequestTag = xrip->iotag;
4025 4082 wqe->OXId = (uint16_t)xrip->rx_id;
4026 4083
4027 4084 /* Word 10 */
4028 4085 if (xrip->flag & EMLXS_XRI_BUSY) {
4029 4086 wqe->XC = 1;
4030 4087 }
4031 4088
4032 4089 if (!(hba->sli.sli4.param.PHWQ)) {
4033 4090 wqe->QOSd = 1;
4034 4091 wqe->DBDE = 1; /* Data type for BDE 0 */
4035 4092 }
4036 4093
4037 4094 /* Word 11 */
4038 4095 wqe->CmdType = WQE_TYPE_TRSP;
4039 4096 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4040 4097
4041 4098 /* Set the pkt timer */
4042 4099 cmd_sbp->ticks = hba->timer_tics + timeout +
4043 4100 ((timeout > 0xff) ? 0 : 10);
4044 4101
4045 4102 if (pkt->pkt_cmdlen) {
4046 4103 EMLXS_MPDATA_SYNC(pkt->pkt_cmd_dma, 0, pkt->pkt_cmdlen,
4047 4104 DDI_DMA_SYNC_FORDEV);
4048 4105 }
4049 4106
4050 4107 return (IOERR_SUCCESS);
4051 4108 }
4052 4109
4053 4110 fct_cmd = cmd_sbp->fct_cmd;
4054 4111 did = fct_cmd->cmd_rportid;
4055 4112 dbuf = cmd_sbp->fct_buf;
4056 4113 fct_task = (scsi_task_t *)fct_cmd->cmd_specific;
4057 4114 ndlp = *(emlxs_node_t **)fct_cmd->cmd_rp->rp_fca_private;
4058 4115 if (!ndlp) {
4059 4116 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4060 4117 "Unable to find rpi. did=0x%x", did);
4061 4118
4062 4119 emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
4063 4120 IOERR_INVALID_RPI, 0);
4064 4121 return (0xff);
4065 4122 }
4066 4123
4067 4124
4068 4125 /* Initalize iocbq */
4069 4126 iocbq->port = (void *) port;
4070 4127 iocbq->node = (void *)ndlp;
4071 4128 iocbq->channel = (void *) cp;
4072 4129
4073 4130 wqe = &iocbq->wqe;
4074 4131 bzero((void *)wqe, sizeof (emlxs_wqe_t));
4075 4132
4076 4133 xrip = cmd_sbp->xrip;
4077 4134 if (!xrip) {
4078 4135 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4079 4136 "Unable to find xri. did=0x%x", did);
4080 4137
4081 4138 emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
4082 4139 IOERR_NO_XRI, 0);
4083 4140 return (0xff);
4084 4141 }
4085 4142
4086 4143 if (emlxs_sli4_register_xri(port, cmd_sbp,
4087 4144 xrip->XRI, ndlp->nlp_DID) == NULL) {
4088 4145 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4089 4146 "Unable to register xri. did=0x%x", did);
4090 4147
4091 4148 emlxs_set_pkt_state(cmd_sbp, IOSTAT_LOCAL_REJECT,
4092 4149 IOERR_NO_XRI, 0);
4093 4150 return (0xff);
4094 4151 }
4095 4152 cmd_sbp->iotag = xrip->iotag;
4096 4153 cmd_sbp->channel = cp;
4097 4154
4098 4155 if (cfg[CFG_TIMEOUT_ENABLE].current) {
4099 4156 timeout =
4100 4157 ((2 * hba->fc_ratov) < 60) ? 60 : (2 * hba->fc_ratov);
4101 4158 } else {
4102 4159 timeout = 0x80000000;
4103 4160 }
4104 4161 cmd_sbp->ticks =
4105 4162 hba->timer_tics + timeout + ((timeout > 0xff) ? 0 : 10);
4106 4163
4107 4164
4108 4165 iocb->ULPCT = 0;
4109 4166 if (fct_task->task_flags & TF_WRITE_DATA) {
4110 4167 iocb->ULPCOMMAND = CMD_FCP_TRECEIVE64_CX;
4111 4168 wqe->CmdType = WQE_TYPE_TRECEIVE; /* Word 11 */
4112 4169
4113 4170 } else { /* TF_READ_DATA */
4114 4171
4115 4172 iocb->ULPCOMMAND = CMD_FCP_TSEND64_CX;
4116 4173 wqe->CmdType = WQE_TYPE_TSEND; /* Word 11 */
4117 4174
4118 4175 if ((dbuf->db_data_size >=
4119 4176 fct_task->task_expected_xfer_length)) {
4120 4177 /* enable auto-rsp AP feature */
4121 4178 wqe->AR = 0x1;
4122 4179 iocb->ULPCT = 0x1; /* for cmpl */
4123 4180 }
4124 4181 }
4125 4182
4126 4183 (void) emlxs_sli4_fct_bde_setup(port, cmd_sbp);
4127 4184
4128 4185 /* Word 6 */
4129 4186 wqe->ContextTag = ndlp->nlp_Rpi;
4130 4187 wqe->XRITag = xrip->XRI;
4131 4188
4132 4189 /* Word 7 */
4133 4190 wqe->Command = iocb->ULPCOMMAND;
4134 4191 wqe->Class = cmd_sbp->class;
4135 4192 wqe->ContextType = WQE_RPI_CONTEXT;
4136 4193 wqe->Timer = ((timeout > 0xff) ? 0 : timeout);
4137 4194 wqe->PU = 1;
4138 4195
4139 4196 /* Word 8 */
4140 4197 wqe->AbortTag = 0;
4141 4198
4142 4199 /* Word 9 */
4143 4200 wqe->RequestTag = xrip->iotag;
4144 4201 wqe->OXId = (uint16_t)fct_cmd->cmd_oxid;
4145 4202
4146 4203 /* Word 10 */
4147 4204 if (xrip->flag & EMLXS_XRI_BUSY) {
4148 4205 wqe->XC = 1;
4149 4206 }
4150 4207
4151 4208 if (!(hba->sli.sli4.param.PHWQ)) {
4152 4209 wqe->QOSd = 1;
4153 4210 wqe->DBDE = 1; /* Data type for BDE 0 */
4154 4211 }
4155 4212
4156 4213 /* Word 11 */
4157 4214 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4158 4215
4159 4216 /* Word 12 */
4160 4217 wqe->CmdSpecific = dbuf->db_data_size;
4161 4218
4162 4219 return (IOERR_SUCCESS);
4163 4220
4164 4221 } /* emlxs_sli4_prep_fct_iocb() */
4165 4222 #endif /* SFCT_SUPPORT */
4166 4223
4167 4224
4168 4225 /*ARGSUSED*/
4169 4226 extern uint32_t
4170 4227 emlxs_sli4_prep_fcp_iocb(emlxs_port_t *port, emlxs_buf_t *sbp, int channel)
4171 4228 {
4172 4229 emlxs_hba_t *hba = HBA;
|
↓ open down ↓ |
171 lines elided |
↑ open up ↑ |
4173 4230 fc_packet_t *pkt;
4174 4231 CHANNEL *cp;
4175 4232 RPIobj_t *rpip;
4176 4233 XRIobj_t *xrip;
4177 4234 emlxs_wqe_t *wqe;
4178 4235 IOCBQ *iocbq;
4179 4236 IOCB *iocb;
4180 4237 NODELIST *node;
4181 4238 uint16_t iotag;
4182 4239 uint32_t did;
4183 - off_t offset;
4184 4240
4185 4241 pkt = PRIV2PKT(sbp);
4186 4242 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
4187 4243 cp = &hba->chan[channel];
4188 4244
4189 4245 iocbq = &sbp->iocbq;
4190 4246 iocbq->channel = (void *) cp;
4191 4247 iocbq->port = (void *) port;
4192 4248
4193 4249 wqe = &iocbq->wqe;
4194 4250 iocb = &iocbq->iocb;
4195 4251 bzero((void *)wqe, sizeof (emlxs_wqe_t));
4196 4252 bzero((void *)iocb, sizeof (IOCB));
4197 4253
4198 4254 /* Find target node object */
4199 4255 node = (NODELIST *)iocbq->node;
4200 4256 rpip = EMLXS_NODE_TO_RPI(port, node);
4201 4257
4202 4258 if (!rpip) {
4203 4259 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4204 4260 "Unable to find rpi. did=0x%x", did);
4205 4261
4206 4262 emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4207 4263 IOERR_INVALID_RPI, 0);
4208 4264 return (0xff);
4209 4265 }
4210 4266
4211 4267 sbp->channel = cp;
4212 4268 /* Next allocate an Exchange for this command */
4213 4269 xrip = emlxs_sli4_alloc_xri(port, sbp, rpip,
4214 4270 EMLXS_XRI_SOL_FCP_TYPE);
4215 4271
4216 4272 if (!xrip) {
4217 4273 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4218 4274 "Adapter Busy. Unable to allocate exchange. did=0x%x", did);
4219 4275
4220 4276 return (FC_TRAN_BUSY);
4221 4277 }
4222 4278 sbp->bmp = NULL;
4223 4279 iotag = sbp->iotag;
4224 4280
4225 4281 #ifdef DEBUG_FASTPATH
4226 4282 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4227 4283 "FCP: Prep xri=%d iotag=%d oxid=%x rpi=%d",
4228 4284 xrip->XRI, xrip->iotag, xrip->rx_id, rpip->RPI);
4229 4285 #endif /* DEBUG_FASTPATH */
4230 4286
4231 4287 /* Indicate this is a FCP cmd */
4232 4288 iocbq->flag |= IOCB_FCP_CMD;
4233 4289
4234 4290 if (emlxs_sli4_bde_setup(port, sbp)) {
|
↓ open down ↓ |
41 lines elided |
↑ open up ↑ |
4235 4291 emlxs_sli4_free_xri(port, sbp, xrip, 1);
4236 4292 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4237 4293 "Adapter Busy. Unable to setup SGE. did=0x%x", did);
4238 4294
4239 4295 return (FC_TRAN_BUSY);
4240 4296 }
4241 4297
4242 4298 /* DEBUG */
4243 4299 #ifdef DEBUG_FCP
4244 4300 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4245 - "FCP: SGLaddr virt %p phys %p size %d", xrip->SGList.virt,
4246 - xrip->SGList.phys, pkt->pkt_datalen);
4247 - emlxs_data_dump(port, "FCP: SGL", (uint32_t *)xrip->SGList.virt, 20, 0);
4301 + "FCP: SGLaddr virt %p phys %p size %d", xrip->SGList->virt,
4302 + xrip->SGList->phys, pkt->pkt_datalen);
4303 + emlxs_data_dump(port, "FCP: SGL",
4304 + (uint32_t *)xrip->SGList->virt, 20, 0);
4248 4305 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4249 4306 "FCP: CMD virt %p len %d:%d:%d",
4250 4307 pkt->pkt_cmd, pkt->pkt_cmdlen, pkt->pkt_rsplen, pkt->pkt_datalen);
4251 4308 emlxs_data_dump(port, "FCP: CMD", (uint32_t *)pkt->pkt_cmd, 10, 0);
4252 4309 #endif /* DEBUG_FCP */
4253 4310
4254 - offset = (off_t)((uint64_t)((unsigned long)
4255 - xrip->SGList.virt) -
4256 - (uint64_t)((unsigned long)
4257 - hba->sli.sli4.slim2.virt));
4311 + EMLXS_MPDATA_SYNC(xrip->SGList->dma_handle, 0,
4312 + xrip->SGList->size, DDI_DMA_SYNC_FORDEV);
4258 4313
4259 - EMLXS_MPDATA_SYNC(xrip->SGList.dma_handle, offset,
4260 - xrip->SGList.size, DDI_DMA_SYNC_FORDEV);
4261 -
4262 4314 /* if device is FCP-2 device, set the following bit */
4263 4315 /* that says to run the FC-TAPE protocol. */
4264 4316 if (node->nlp_fcp_info & NLP_FCP_2_DEVICE) {
4265 4317 wqe->ERP = 1;
4266 4318 }
4267 4319
4268 4320 if (pkt->pkt_datalen == 0) {
4269 4321 iocb->ULPCOMMAND = CMD_FCP_ICMND64_CR;
4270 4322 wqe->Command = CMD_FCP_ICMND64_CR;
4271 4323 wqe->CmdType = WQE_TYPE_FCP_DATA_IN;
4272 4324 } else if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
4273 4325 iocb->ULPCOMMAND = CMD_FCP_IREAD64_CR;
4274 4326 wqe->Command = CMD_FCP_IREAD64_CR;
4275 4327 wqe->CmdType = WQE_TYPE_FCP_DATA_IN;
4276 4328 wqe->PU = PARM_XFER_CHECK;
4277 4329 } else {
4278 4330 iocb->ULPCOMMAND = CMD_FCP_IWRITE64_CR;
4279 4331 wqe->Command = CMD_FCP_IWRITE64_CR;
4280 4332 wqe->CmdType = WQE_TYPE_FCP_DATA_OUT;
4281 4333 }
4282 4334 wqe->un.FcpCmd.TotalTransferCount = pkt->pkt_datalen;
4283 4335
4284 4336 if (!(hba->sli.sli4.param.PHWQ)) {
4285 4337 wqe->DBDE = 1; /* Data type for BDE 0 */
4286 4338 }
4287 4339 wqe->ContextTag = rpip->RPI;
4288 4340 wqe->ContextType = WQE_RPI_CONTEXT;
4289 4341 wqe->XRITag = xrip->XRI;
4290 4342 wqe->Timer =
4291 4343 ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
4292 4344
4293 4345 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
4294 4346 wqe->CCPE = 1;
4295 4347 wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
4296 4348 }
4297 4349
4298 4350 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
4299 4351 case FC_TRAN_CLASS2:
4300 4352 wqe->Class = CLASS2;
4301 4353 break;
4302 4354 case FC_TRAN_CLASS3:
4303 4355 default:
4304 4356 wqe->Class = CLASS3;
4305 4357 break;
4306 4358 }
4307 4359 sbp->class = wqe->Class;
4308 4360 wqe->RequestTag = iotag;
4309 4361 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4310 4362
4311 4363 return (FC_SUCCESS);
4312 4364 } /* emlxs_sli4_prep_fcp_iocb() */
4313 4365
4314 4366
4315 4367 /*ARGSUSED*/
4316 4368 static uint32_t
4317 4369 emlxs_sli4_prep_ip_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
4318 4370 {
4319 4371 return (FC_TRAN_BUSY);
4320 4372
4321 4373 } /* emlxs_sli4_prep_ip_iocb() */
4322 4374
4323 4375
4324 4376 /*ARGSUSED*/
4325 4377 static uint32_t
4326 4378 emlxs_sli4_prep_els_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
4327 4379 {
4328 4380 emlxs_hba_t *hba = HBA;
4329 4381 fc_packet_t *pkt;
4330 4382 IOCBQ *iocbq;
4331 4383 IOCB *iocb;
4332 4384 emlxs_wqe_t *wqe;
4333 4385 FCFIobj_t *fcfp;
4334 4386 RPIobj_t *reserved_rpip = NULL;
|
↓ open down ↓ |
63 lines elided |
↑ open up ↑ |
4335 4387 RPIobj_t *rpip = NULL;
4336 4388 XRIobj_t *xrip;
4337 4389 CHANNEL *cp;
4338 4390 uint32_t did;
4339 4391 uint32_t cmd;
4340 4392 ULP_SGE64 stage_sge;
4341 4393 ULP_SGE64 *sge;
4342 4394 ddi_dma_cookie_t *cp_cmd;
4343 4395 ddi_dma_cookie_t *cp_resp;
4344 4396 emlxs_node_t *node;
4345 - off_t offset;
4346 4397
4347 4398 pkt = PRIV2PKT(sbp);
4348 4399 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
4349 4400
4350 4401 iocbq = &sbp->iocbq;
4351 4402 wqe = &iocbq->wqe;
4352 4403 iocb = &iocbq->iocb;
4353 4404 bzero((void *)wqe, sizeof (emlxs_wqe_t));
4354 4405 bzero((void *)iocb, sizeof (IOCB));
4355 4406 cp = &hba->chan[hba->channel_els];
4356 4407
4357 4408 /* Initalize iocbq */
4358 4409 iocbq->port = (void *) port;
4359 4410 iocbq->channel = (void *) cp;
4360 4411
4361 4412 sbp->channel = cp;
4362 4413 sbp->bmp = NULL;
4363 4414
4364 4415 #if (EMLXS_MODREV >= EMLXS_MODREV3)
4365 4416 cp_cmd = pkt->pkt_cmd_cookie;
4366 4417 cp_resp = pkt->pkt_resp_cookie;
4367 4418 #else
4368 4419 cp_cmd = &pkt->pkt_cmd_cookie;
4369 4420 cp_resp = &pkt->pkt_resp_cookie;
4370 4421 #endif /* >= EMLXS_MODREV3 */
4371 4422
4372 4423 /* CMD payload */
4373 4424 sge = &stage_sge;
4374 4425 sge->addrHigh = PADDR_HI(cp_cmd->dmac_laddress);
4375 4426 sge->addrLow = PADDR_LO(cp_cmd->dmac_laddress);
4376 4427 sge->length = pkt->pkt_cmdlen;
4377 4428 sge->offset = 0;
4378 4429 sge->type = 0;
4379 4430
4380 4431 cmd = *((uint32_t *)pkt->pkt_cmd);
4381 4432 cmd &= ELS_CMD_MASK;
4382 4433
4383 4434 /* Initalize iocb */
4384 4435 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
4385 4436 /* ELS Response */
4386 4437
4387 4438 sbp->xrip = 0;
4388 4439 xrip = emlxs_sli4_register_xri(port, sbp,
4389 4440 pkt->pkt_cmd_fhdr.rx_id, did);
4390 4441
4391 4442 if (!xrip) {
4392 4443 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4393 4444 "Unable to find XRI. rxid=%x",
4394 4445 pkt->pkt_cmd_fhdr.rx_id);
4395 4446
4396 4447 emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4397 4448 IOERR_NO_XRI, 0);
4398 4449 return (0xff);
4399 4450 }
4400 4451
4401 4452 rpip = xrip->rpip;
4402 4453
4403 4454 if (!rpip) {
4404 4455 /* This means that we had a node registered */
4405 4456 /* when the unsol request came in but the node */
4406 4457 /* has since been unregistered. */
4407 4458 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4408 4459 "Unable to find RPI. rxid=%x",
4409 4460 pkt->pkt_cmd_fhdr.rx_id);
4410 4461
4411 4462 emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4412 4463 IOERR_INVALID_RPI, 0);
4413 4464 return (0xff);
4414 4465 }
4415 4466
4416 4467 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4417 4468 "ELS: Prep xri=%d iotag=%d oxid=%x rpi=%d",
4418 4469 xrip->XRI, xrip->iotag, xrip->rx_id, rpip->RPI);
4419 4470
4420 4471 iocb->ULPCOMMAND = CMD_XMIT_ELS_RSP64_CX;
4421 4472 wqe->Command = CMD_XMIT_ELS_RSP64_CX;
4422 4473 wqe->CmdType = WQE_TYPE_GEN;
4423 4474 if (!(hba->sli.sli4.param.PHWQ)) {
4424 4475 wqe->DBDE = 1; /* Data type for BDE 0 */
4425 4476 }
4426 4477
4427 4478 wqe->un.ElsRsp.Payload.addrHigh = sge->addrHigh;
4428 4479 wqe->un.ElsRsp.Payload.addrLow = sge->addrLow;
|
↓ open down ↓ |
73 lines elided |
↑ open up ↑ |
4429 4480 wqe->un.ElsRsp.Payload.tus.f.bdeSize = pkt->pkt_cmdlen;
4430 4481 wqe->un.ElsCmd.PayloadLength = pkt->pkt_cmdlen;
4431 4482
4432 4483 wqe->un.ElsRsp.RemoteId = did;
4433 4484 wqe->PU = 0x3;
4434 4485 wqe->OXId = xrip->rx_id;
4435 4486
4436 4487 sge->last = 1;
4437 4488 /* Now sge is fully staged */
4438 4489
4439 - sge = xrip->SGList.virt;
4490 + sge = xrip->SGList->virt;
4440 4491 BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
4441 4492 sizeof (ULP_SGE64));
4442 4493
4443 4494 if (rpip->RPI == FABRIC_RPI) {
4444 4495 wqe->ContextTag = port->vpip->VPI;
4445 4496 wqe->ContextType = WQE_VPI_CONTEXT;
4446 4497 } else {
4447 4498 wqe->ContextTag = rpip->RPI;
4448 4499 wqe->ContextType = WQE_RPI_CONTEXT;
4449 4500 }
4450 4501
4451 4502 if ((cmd == ELS_CMD_ACC) && (sbp->ucmd == ELS_CMD_FLOGI)) {
4452 4503 wqe->un.ElsCmd.SP = 1;
4453 4504 wqe->un.ElsCmd.LocalId = 0xFFFFFE;
4454 4505 }
4455 4506
4456 4507 } else {
4457 4508 /* ELS Request */
4458 4509
4459 4510 fcfp = port->vpip->vfip->fcfp;
4460 4511 node = (emlxs_node_t *)iocbq->node;
4461 4512 rpip = EMLXS_NODE_TO_RPI(port, node);
4462 4513
4463 4514 if (!rpip) {
4464 4515 /* Use the fabric rpi */
4465 4516 rpip = port->vpip->fabric_rpip;
4466 4517 }
4467 4518
4468 4519 /* Next allocate an Exchange for this command */
4469 4520 xrip = emlxs_sli4_alloc_xri(port, sbp, rpip,
4470 4521 EMLXS_XRI_SOL_ELS_TYPE);
4471 4522
4472 4523 if (!xrip) {
4473 4524 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4474 4525 "Adapter Busy. Unable to allocate exchange. "
4475 4526 "did=0x%x", did);
4476 4527
4477 4528 return (FC_TRAN_BUSY);
4478 4529 }
4479 4530
4480 4531 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4481 4532 "ELS: Prep xri=%d iotag=%d rpi=%d",
4482 4533 xrip->XRI, xrip->iotag, rpip->RPI);
4483 4534
4484 4535 iocb->ULPCOMMAND = CMD_ELS_REQUEST64_CR;
4485 4536 wqe->Command = CMD_ELS_REQUEST64_CR;
4486 4537 wqe->CmdType = WQE_TYPE_ELS;
4487 4538 if (!(hba->sli.sli4.param.PHWQ)) {
4488 4539 wqe->DBDE = 1; /* Data type for BDE 0 */
4489 4540 }
4490 4541
4491 4542 wqe->un.ElsCmd.Payload.addrHigh = sge->addrHigh;
4492 4543 wqe->un.ElsCmd.Payload.addrLow = sge->addrLow;
4493 4544 wqe->un.ElsCmd.Payload.tus.f.bdeSize = pkt->pkt_cmdlen;
|
↓ open down ↓ |
44 lines elided |
↑ open up ↑ |
4494 4545
4495 4546 wqe->un.ElsCmd.RemoteId = did;
4496 4547 wqe->Timer = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
4497 4548
4498 4549 /* setup for rsp */
4499 4550 iocb->un.elsreq64.remoteID = (did == BCAST_DID) ? 0 : did;
4500 4551 iocb->ULPPU = 1; /* Wd4 is relative offset */
4501 4552
4502 4553 sge->last = 0;
4503 4554
4504 - sge = xrip->SGList.virt;
4555 + sge = xrip->SGList->virt;
4505 4556 BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
4506 4557 sizeof (ULP_SGE64));
4507 4558
4508 4559 wqe->un.ElsCmd.PayloadLength =
4509 4560 pkt->pkt_cmdlen; /* Byte offset of rsp data */
4510 4561
4511 4562 /* RSP payload */
4512 4563 sge = &stage_sge;
4513 4564 sge->addrHigh = PADDR_HI(cp_resp->dmac_laddress);
4514 4565 sge->addrLow = PADDR_LO(cp_resp->dmac_laddress);
4515 4566 sge->length = pkt->pkt_rsplen;
4516 4567 sge->offset = 0;
4517 4568 sge->last = 1;
4518 4569 /* Now sge is fully staged */
4519 4570
4520 - sge = xrip->SGList.virt;
4571 + sge = xrip->SGList->virt;
4521 4572 sge++;
4522 4573 BE_SWAP32_BCOPY((uint8_t *)&stage_sge, (uint8_t *)sge,
4523 4574 sizeof (ULP_SGE64));
4524 4575 #ifdef DEBUG_ELS
4525 4576 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4526 4577 "ELS: SGLaddr virt %p phys %p",
4527 - xrip->SGList.virt, xrip->SGList.phys);
4578 + xrip->SGList->virt, xrip->SGList->phys);
4528 4579 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4529 4580 "ELS: PAYLOAD virt %p phys %p",
4530 4581 pkt->pkt_cmd, cp_cmd->dmac_laddress);
4531 - emlxs_data_dump(port, "ELS: SGL", (uint32_t *)xrip->SGList.virt,
4532 - 12, 0);
4582 + emlxs_data_dump(port, "ELS: SGL",
4583 + (uint32_t *)xrip->SGList->virt, 12, 0);
4533 4584 #endif /* DEBUG_ELS */
4534 4585
4535 4586 switch (cmd) {
4536 4587 case ELS_CMD_FLOGI:
4537 4588 wqe->un.ElsCmd.SP = 1;
4538 4589
4539 4590 if ((hba->sli_intf & SLI_INTF_IF_TYPE_MASK) ==
4540 4591 SLI_INTF_IF_TYPE_0) {
4541 4592 wqe->ContextTag = fcfp->FCFI;
4542 4593 wqe->ContextType = WQE_FCFI_CONTEXT;
4543 4594 } else {
4544 4595 wqe->ContextTag = port->vpip->VPI;
4545 4596 wqe->ContextType = WQE_VPI_CONTEXT;
4546 4597 }
4547 4598
4548 4599 if (hba->flag & FC_FIP_SUPPORTED) {
4549 4600 wqe->CmdType |= WQE_TYPE_MASK_FIP;
4550 4601 }
4551 4602
4552 4603 if (hba->topology == TOPOLOGY_LOOP) {
4553 4604 wqe->un.ElsCmd.LocalId = port->did;
4554 4605 }
4555 4606
4556 4607 wqe->ELSId = WQE_ELSID_FLOGI;
4557 4608 break;
4558 4609 case ELS_CMD_FDISC:
4559 4610 wqe->un.ElsCmd.SP = 1;
4560 4611 wqe->ContextTag = port->vpip->VPI;
4561 4612 wqe->ContextType = WQE_VPI_CONTEXT;
4562 4613
4563 4614 if (hba->flag & FC_FIP_SUPPORTED) {
4564 4615 wqe->CmdType |= WQE_TYPE_MASK_FIP;
4565 4616 }
4566 4617
4567 4618 wqe->ELSId = WQE_ELSID_FDISC;
4568 4619 break;
4569 4620 case ELS_CMD_LOGO:
4570 4621 if ((did == FABRIC_DID) &&
4571 4622 (hba->flag & FC_FIP_SUPPORTED)) {
4572 4623 wqe->CmdType |= WQE_TYPE_MASK_FIP;
4573 4624 }
4574 4625
4575 4626 wqe->ContextTag = port->vpip->VPI;
4576 4627 wqe->ContextType = WQE_VPI_CONTEXT;
4577 4628 wqe->ELSId = WQE_ELSID_LOGO;
4578 4629 break;
4579 4630 case ELS_CMD_PLOGI:
4580 4631 if (rpip->RPI == FABRIC_RPI) {
4581 4632 if (hba->flag & FC_PT_TO_PT) {
4582 4633 wqe->un.ElsCmd.SP = 1;
4583 4634 wqe->un.ElsCmd.LocalId = port->did;
4584 4635 }
4585 4636
4586 4637 wqe->ContextTag = port->vpip->VPI;
4587 4638 wqe->ContextType = WQE_VPI_CONTEXT;
4588 4639 } else {
4589 4640 wqe->ContextTag = rpip->RPI;
4590 4641 wqe->ContextType = WQE_RPI_CONTEXT;
4591 4642 }
4592 4643
4593 4644 wqe->ELSId = WQE_ELSID_PLOGI;
4594 4645 break;
4595 4646 default:
4596 4647 if (rpip->RPI == FABRIC_RPI) {
4597 4648 wqe->ContextTag = port->vpip->VPI;
4598 4649 wqe->ContextType = WQE_VPI_CONTEXT;
4599 4650 } else {
4600 4651 wqe->ContextTag = rpip->RPI;
4601 4652 wqe->ContextType = WQE_RPI_CONTEXT;
4602 4653 }
4603 4654
4604 4655 wqe->ELSId = WQE_ELSID_CMD;
4605 4656 break;
4606 4657 }
4607 4658
4608 4659 #ifdef SFCT_SUPPORT
4609 4660 /* This allows fct to abort the request */
4610 4661 if (sbp->fct_cmd) {
4611 4662 sbp->fct_cmd->cmd_oxid = xrip->XRI;
4612 4663 sbp->fct_cmd->cmd_rxid = 0xFFFF;
4613 4664 }
4614 4665 #endif /* SFCT_SUPPORT */
4615 4666 }
4616 4667
4617 4668 if (wqe->ContextType == WQE_VPI_CONTEXT) {
4618 4669 reserved_rpip = emlxs_rpi_reserve_notify(port, did, xrip);
4619 4670
4620 4671 if (!reserved_rpip) {
4621 4672 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4622 4673 "Unable to alloc reserved RPI. rxid=%x. Rejecting.",
4623 4674 pkt->pkt_cmd_fhdr.rx_id);
4624 4675
4625 4676 emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4626 4677 IOERR_INVALID_RPI, 0);
4627 4678 return (0xff);
|
↓ open down ↓ |
85 lines elided |
↑ open up ↑ |
4628 4679 }
4629 4680
4630 4681 /* Store the reserved rpi */
4631 4682 if (wqe->Command == CMD_ELS_REQUEST64_CR) {
4632 4683 wqe->OXId = reserved_rpip->RPI;
4633 4684 } else {
4634 4685 wqe->CmdSpecific = reserved_rpip->RPI;
4635 4686 }
4636 4687 }
4637 4688
4638 - offset = (off_t)((uint64_t)((unsigned long)
4639 - xrip->SGList.virt) -
4640 - (uint64_t)((unsigned long)
4641 - hba->sli.sli4.slim2.virt));
4689 + EMLXS_MPDATA_SYNC(xrip->SGList->dma_handle, 0,
4690 + xrip->SGList->size, DDI_DMA_SYNC_FORDEV);
4642 4691
4643 - EMLXS_MPDATA_SYNC(xrip->SGList.dma_handle, offset,
4644 - xrip->SGList.size, DDI_DMA_SYNC_FORDEV);
4645 -
4646 4692 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
4647 4693 wqe->CCPE = 1;
4648 4694 wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
4649 4695 }
4650 4696
4651 4697 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
4652 4698 case FC_TRAN_CLASS2:
4653 4699 wqe->Class = CLASS2;
4654 4700 break;
4655 4701 case FC_TRAN_CLASS3:
4656 4702 default:
4657 4703 wqe->Class = CLASS3;
4658 4704 break;
4659 4705 }
4660 4706 sbp->class = wqe->Class;
4661 4707 wqe->XRITag = xrip->XRI;
4662 4708 wqe->RequestTag = xrip->iotag;
4663 4709 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4664 4710 return (FC_SUCCESS);
4665 4711
4666 4712 } /* emlxs_sli4_prep_els_iocb() */
4667 4713
4668 4714
4669 4715 /*ARGSUSED*/
4670 4716 static uint32_t
4671 4717 emlxs_sli4_prep_ct_iocb(emlxs_port_t *port, emlxs_buf_t *sbp)
4672 4718 {
|
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
4673 4719 emlxs_hba_t *hba = HBA;
4674 4720 fc_packet_t *pkt;
4675 4721 IOCBQ *iocbq;
4676 4722 IOCB *iocb;
4677 4723 emlxs_wqe_t *wqe;
4678 4724 NODELIST *node = NULL;
4679 4725 CHANNEL *cp;
4680 4726 RPIobj_t *rpip;
4681 4727 XRIobj_t *xrip;
4682 4728 uint32_t did;
4683 - off_t offset;
4684 4729
4685 4730 pkt = PRIV2PKT(sbp);
4686 4731 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
4687 4732
4688 4733 iocbq = &sbp->iocbq;
4689 4734 wqe = &iocbq->wqe;
4690 4735 iocb = &iocbq->iocb;
4691 4736 bzero((void *)wqe, sizeof (emlxs_wqe_t));
4692 4737 bzero((void *)iocb, sizeof (IOCB));
4693 4738
4694 4739 cp = &hba->chan[hba->channel_ct];
4695 4740
4696 4741 iocbq->port = (void *) port;
4697 4742 iocbq->channel = (void *) cp;
4698 4743
4699 4744 sbp->bmp = NULL;
4700 4745 sbp->channel = cp;
4701 4746
4702 4747 /* Initalize wqe */
4703 4748 if (pkt->pkt_tran_type == FC_PKT_OUTBOUND) {
4704 4749 /* CT Response */
4705 4750
4706 4751 sbp->xrip = 0;
4707 4752 xrip = emlxs_sli4_register_xri(port, sbp,
4708 4753 pkt->pkt_cmd_fhdr.rx_id, did);
4709 4754
4710 4755 if (!xrip) {
4711 4756 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4712 4757 "Unable to find XRI. rxid=%x",
4713 4758 pkt->pkt_cmd_fhdr.rx_id);
4714 4759
4715 4760 emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4716 4761 IOERR_NO_XRI, 0);
4717 4762 return (0xff);
4718 4763 }
4719 4764
4720 4765 rpip = xrip->rpip;
4721 4766
4722 4767 if (!rpip) {
4723 4768 /* This means that we had a node registered */
4724 4769 /* when the unsol request came in but the node */
4725 4770 /* has since been unregistered. */
4726 4771 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4727 4772 "Unable to find RPI. rxid=%x",
4728 4773 pkt->pkt_cmd_fhdr.rx_id);
4729 4774
4730 4775 emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4731 4776 IOERR_INVALID_RPI, 0);
4732 4777 return (0xff);
4733 4778 }
4734 4779
4735 4780 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4736 4781 "CT: Prep xri=%d iotag=%d oxid=%x rpi=%d",
4737 4782 xrip->XRI, xrip->iotag, xrip->rx_id, rpip->RPI);
4738 4783
4739 4784 if (emlxs_sli4_bde_setup(port, sbp)) {
4740 4785 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4741 4786 "Adapter Busy. Unable to setup SGE. did=0x%x", did);
4742 4787
4743 4788 return (FC_TRAN_BUSY);
4744 4789 }
4745 4790
4746 4791 if (!(hba->model_info.chip & EMLXS_BE_CHIPS)) {
4747 4792 wqe->un.XmitSeq.Rsvd0 = 0; /* Word3 now reserved */
4748 4793 }
4749 4794
4750 4795 if (!(hba->sli.sli4.param.PHWQ)) {
4751 4796 wqe->DBDE = 1; /* Data type for BDE 0 */
4752 4797 }
4753 4798
4754 4799 iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CR;
4755 4800 wqe->CmdType = WQE_TYPE_GEN;
4756 4801 wqe->Command = CMD_XMIT_SEQUENCE64_CR;
4757 4802 wqe->LenLoc = 2;
4758 4803
4759 4804 if (((SLI_CT_REQUEST *) pkt->pkt_cmd)->CommandResponse.bits.
4760 4805 CmdRsp == (LE_SWAP16(SLI_CT_LOOPBACK))) {
4761 4806 wqe->un.XmitSeq.xo = 1;
4762 4807 } else {
4763 4808 wqe->un.XmitSeq.xo = 0;
4764 4809 }
4765 4810
4766 4811 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_LAST_SEQ) {
4767 4812 wqe->un.XmitSeq.ls = 1;
4768 4813 }
4769 4814
4770 4815 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_SEQ_INITIATIVE) {
4771 4816 wqe->un.XmitSeq.si = 1;
4772 4817 }
4773 4818
4774 4819 wqe->un.XmitSeq.DFctl = pkt->pkt_cmd_fhdr.df_ctl;
4775 4820 wqe->un.XmitSeq.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
4776 4821 wqe->un.XmitSeq.Type = pkt->pkt_cmd_fhdr.type;
4777 4822 wqe->OXId = xrip->rx_id;
4778 4823 wqe->XC = 0; /* xri_tag is a new exchange */
4779 4824 wqe->CmdSpecific = wqe->un.GenReq.Payload.tus.f.bdeSize;
4780 4825
4781 4826 } else {
4782 4827 /* CT Request */
4783 4828
4784 4829 node = (emlxs_node_t *)iocbq->node;
4785 4830 rpip = EMLXS_NODE_TO_RPI(port, node);
4786 4831
4787 4832 if (!rpip) {
4788 4833 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_failed_msg,
4789 4834 "Unable to find rpi. did=0x%x rpi=%d",
4790 4835 did, node->nlp_Rpi);
4791 4836
4792 4837 emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT,
4793 4838 IOERR_INVALID_RPI, 0);
4794 4839 return (0xff);
4795 4840 }
4796 4841
4797 4842 /* Next allocate an Exchange for this command */
4798 4843 xrip = emlxs_sli4_alloc_xri(port, sbp, rpip,
4799 4844 EMLXS_XRI_SOL_CT_TYPE);
4800 4845
4801 4846 if (!xrip) {
4802 4847 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4803 4848 "Adapter Busy. Unable to allocate exchange. "
4804 4849 "did=0x%x", did);
4805 4850
4806 4851 return (FC_TRAN_BUSY);
4807 4852 }
4808 4853
4809 4854 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4810 4855 "CT: Prep xri=%d iotag=%d oxid=%x rpi=%d",
4811 4856 xrip->XRI, xrip->iotag, xrip->rx_id, rpip->RPI);
4812 4857
4813 4858 if (emlxs_sli4_bde_setup(port, sbp)) {
4814 4859 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
4815 4860 "Adapter Busy. Unable to setup SGE. did=0x%x", did);
4816 4861
4817 4862 emlxs_sli4_free_xri(port, sbp, xrip, 1);
4818 4863 return (FC_TRAN_BUSY);
4819 4864 }
4820 4865
4821 4866 if (!(hba->sli.sli4.param.PHWQ)) {
4822 4867 wqe->DBDE = 1; /* Data type for BDE 0 */
4823 4868 }
4824 4869
|
↓ open down ↓ |
131 lines elided |
↑ open up ↑ |
4825 4870 iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CR;
4826 4871 wqe->CmdType = WQE_TYPE_GEN;
4827 4872 wqe->Command = CMD_GEN_REQUEST64_CR;
4828 4873 wqe->un.GenReq.la = 1;
4829 4874 wqe->un.GenReq.DFctl = pkt->pkt_cmd_fhdr.df_ctl;
4830 4875 wqe->un.GenReq.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
4831 4876 wqe->un.GenReq.Type = pkt->pkt_cmd_fhdr.type;
4832 4877
4833 4878 #ifdef DEBUG_CT
4834 4879 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4835 - "CT: SGLaddr virt %p phys %p", xrip->SGList.virt,
4836 - xrip->SGList.phys);
4837 - emlxs_data_dump(port, "CT: SGL", (uint32_t *)xrip->SGList.virt,
4880 + "CT: SGLaddr virt %p phys %p", xrip->SGList->virt,
4881 + xrip->SGList->phys);
4882 + emlxs_data_dump(port, "CT: SGL", (uint32_t *)xrip->SGList->virt,
4838 4883 12, 0);
4839 4884 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4840 4885 "CT: CMD virt %p len %d:%d",
4841 4886 pkt->pkt_cmd, pkt->pkt_cmdlen, pkt->pkt_rsplen);
4842 4887 emlxs_data_dump(port, "CT: DATA", (uint32_t *)pkt->pkt_cmd,
4843 4888 20, 0);
4844 4889 #endif /* DEBUG_CT */
4845 4890
4846 4891 #ifdef SFCT_SUPPORT
4847 4892 /* This allows fct to abort the request */
4848 4893 if (sbp->fct_cmd) {
4849 4894 sbp->fct_cmd->cmd_oxid = xrip->XRI;
4850 4895 sbp->fct_cmd->cmd_rxid = 0xFFFF;
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
4851 4896 }
4852 4897 #endif /* SFCT_SUPPORT */
4853 4898 }
4854 4899
4855 4900 /* Setup for rsp */
4856 4901 iocb->un.genreq64.w5.hcsw.Rctl = pkt->pkt_cmd_fhdr.r_ctl;
4857 4902 iocb->un.genreq64.w5.hcsw.Type = pkt->pkt_cmd_fhdr.type;
4858 4903 iocb->un.genreq64.w5.hcsw.Dfctl = pkt->pkt_cmd_fhdr.df_ctl;
4859 4904 iocb->ULPPU = 1; /* Wd4 is relative offset */
4860 4905
4861 - offset = (off_t)((uint64_t)((unsigned long)
4862 - xrip->SGList.virt) -
4863 - (uint64_t)((unsigned long)
4864 - hba->sli.sli4.slim2.virt));
4906 + EMLXS_MPDATA_SYNC(xrip->SGList->dma_handle, 0,
4907 + xrip->SGList->size, DDI_DMA_SYNC_FORDEV);
4865 4908
4866 - EMLXS_MPDATA_SYNC(xrip->SGList.dma_handle, offset,
4867 - xrip->SGList.size, DDI_DMA_SYNC_FORDEV);
4868 -
4869 4909 wqe->ContextTag = rpip->RPI;
4870 4910 wqe->ContextType = WQE_RPI_CONTEXT;
4871 4911 wqe->XRITag = xrip->XRI;
4872 4912 wqe->Timer = ((pkt->pkt_timeout > 0xff) ? 0 : pkt->pkt_timeout);
4873 4913
4874 4914 if (pkt->pkt_cmd_fhdr.f_ctl & F_CTL_CHAINED_SEQ) {
4875 4915 wqe->CCPE = 1;
4876 4916 wqe->CCP = pkt->pkt_cmd_fhdr.rsvd;
4877 4917 }
4878 4918
4879 4919 switch (FC_TRAN_CLASS(pkt->pkt_tran_flags)) {
4880 4920 case FC_TRAN_CLASS2:
4881 4921 wqe->Class = CLASS2;
4882 4922 break;
4883 4923 case FC_TRAN_CLASS3:
4884 4924 default:
4885 4925 wqe->Class = CLASS3;
4886 4926 break;
4887 4927 }
4888 4928 sbp->class = wqe->Class;
4889 4929 wqe->RequestTag = xrip->iotag;
4890 4930 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4891 4931 return (FC_SUCCESS);
4892 4932
4893 4933 } /* emlxs_sli4_prep_ct_iocb() */
4894 4934
4895 4935
4896 4936 /*ARGSUSED*/
4897 4937 static int
4898 4938 emlxs_sli4_read_eq(emlxs_hba_t *hba, EQ_DESC_t *eq)
4899 4939 {
4900 4940 uint32_t *ptr;
4901 4941 EQE_u eqe;
4902 4942 int rc = 0;
4903 4943 off_t offset;
4904 4944
4905 4945 mutex_enter(&EMLXS_PORT_LOCK);
4906 4946
4907 4947 ptr = eq->addr.virt;
4908 4948 ptr += eq->host_index;
4909 4949
4910 4950 offset = (off_t)((uint64_t)((unsigned long)
4911 4951 eq->addr.virt) -
4912 4952 (uint64_t)((unsigned long)
4913 4953 hba->sli.sli4.slim2.virt));
4914 4954
4915 4955 EMLXS_MPDATA_SYNC(eq->addr.dma_handle, offset,
4916 4956 4096, DDI_DMA_SYNC_FORKERNEL);
4917 4957
4918 4958 eqe.word = *ptr;
4919 4959 eqe.word = BE_SWAP32(eqe.word);
4920 4960
4921 4961 if (eqe.word & EQE_VALID) {
4922 4962 rc = 1;
4923 4963 }
4924 4964
4925 4965 mutex_exit(&EMLXS_PORT_LOCK);
4926 4966
4927 4967 return (rc);
4928 4968
4929 4969 } /* emlxs_sli4_read_eq */
4930 4970
4931 4971
4932 4972 static void
4933 4973 emlxs_sli4_poll_intr(emlxs_hba_t *hba)
4934 4974 {
4935 4975 int rc = 0;
4936 4976 int i;
4937 4977 char arg[] = {0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7};
4938 4978
4939 4979 /* Check attention bits once and process if required */
4940 4980
4941 4981 for (i = 0; i < hba->intr_count; i++) {
4942 4982 rc = emlxs_sli4_read_eq(hba, &hba->sli.sli4.eq[i]);
4943 4983 if (rc == 1) {
4944 4984 break;
4945 4985 }
4946 4986 }
4947 4987
4948 4988 if (rc != 1) {
4949 4989 return;
4950 4990 }
4951 4991
4952 4992 (void) emlxs_sli4_msi_intr((char *)hba,
4953 4993 (char *)(unsigned long)arg[i]);
4954 4994
4955 4995 return;
4956 4996
4957 4997 } /* emlxs_sli4_poll_intr() */
4958 4998
4959 4999
4960 5000 /*ARGSUSED*/
4961 5001 static void
4962 5002 emlxs_sli4_process_async_event(emlxs_hba_t *hba, CQE_ASYNC_t *cqe)
4963 5003 {
4964 5004 emlxs_port_t *port = &PPORT;
4965 5005 uint8_t status;
4966 5006
4967 5007 /* Save the event tag */
4968 5008 if (hba->link_event_tag == cqe->un.link.event_tag) {
4969 5009 HBASTATS.LinkMultiEvent++;
4970 5010 } else if (hba->link_event_tag + 1 < cqe->un.link.event_tag) {
4971 5011 HBASTATS.LinkMultiEvent++;
4972 5012 }
4973 5013 hba->link_event_tag = cqe->un.link.event_tag;
4974 5014
4975 5015 switch (cqe->event_code) {
4976 5016 case ASYNC_EVENT_CODE_FCOE_LINK_STATE:
4977 5017 HBASTATS.LinkEvent++;
4978 5018
4979 5019 switch (cqe->un.link.link_status) {
4980 5020 case ASYNC_EVENT_PHYS_LINK_UP:
4981 5021 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4982 5022 "Link Async Event: PHYS_LINK_UP. val=%d "
4983 5023 "type=%x event=%x",
4984 5024 cqe->valid, cqe->event_type, HBASTATS.LinkEvent);
4985 5025 break;
4986 5026
4987 5027 case ASYNC_EVENT_LOGICAL_LINK_UP:
4988 5028 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4989 5029 "Link Async Event: LOGICAL_LINK_UP. val=%d "
4990 5030 "type=%x event=%x",
4991 5031 cqe->valid, cqe->event_type, HBASTATS.LinkEvent);
4992 5032
4993 5033 emlxs_sli4_handle_fcoe_link_event(hba, cqe);
4994 5034 break;
4995 5035
4996 5036 case ASYNC_EVENT_PHYS_LINK_DOWN:
4997 5037 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
4998 5038 "Link Async Event: PHYS_LINK_DOWN. val=%d "
4999 5039 "type=%x event=%x",
5000 5040 cqe->valid, cqe->event_type, HBASTATS.LinkEvent);
5001 5041
5002 5042 emlxs_sli4_handle_fcoe_link_event(hba, cqe);
5003 5043 break;
5004 5044
5005 5045 case ASYNC_EVENT_LOGICAL_LINK_DOWN:
5006 5046 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5007 5047 "Link Async Event: LOGICAL_LINK_DOWN. val=%d "
5008 5048 "type=%x event=%x",
5009 5049 cqe->valid, cqe->event_type, HBASTATS.LinkEvent);
5010 5050
5011 5051 emlxs_sli4_handle_fcoe_link_event(hba, cqe);
5012 5052 break;
5013 5053 default:
5014 5054 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5015 5055 "Link Async Event: Unknown link status=%d event=%x",
5016 5056 cqe->un.link.link_status, HBASTATS.LinkEvent);
5017 5057 break;
5018 5058 }
5019 5059 break;
5020 5060 case ASYNC_EVENT_CODE_FCOE_FIP:
5021 5061 switch (cqe->un.fcoe.evt_type) {
5022 5062 case ASYNC_EVENT_NEW_FCF_DISC:
5023 5063 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5024 5064 "FIP Async Event: FCF_FOUND %d:%d",
5025 5065 cqe->un.fcoe.ref_index, cqe->un.fcoe.fcf_count);
5026 5066
5027 5067 (void) emlxs_fcf_found_notify(port,
5028 5068 cqe->un.fcoe.ref_index);
5029 5069 break;
5030 5070 case ASYNC_EVENT_FCF_TABLE_FULL:
5031 5071 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5032 5072 "FIP Async Event: FCFTAB_FULL %d:%d",
5033 5073 cqe->un.fcoe.ref_index, cqe->un.fcoe.fcf_count);
5034 5074
5035 5075 (void) emlxs_fcf_full_notify(port);
5036 5076 break;
5037 5077 case ASYNC_EVENT_FCF_DEAD:
5038 5078 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5039 5079 "FIP Async Event: FCF_LOST %d:%d",
5040 5080 cqe->un.fcoe.ref_index, cqe->un.fcoe.fcf_count);
5041 5081
5042 5082 (void) emlxs_fcf_lost_notify(port,
5043 5083 cqe->un.fcoe.ref_index);
5044 5084 break;
5045 5085 case ASYNC_EVENT_VIRT_LINK_CLEAR:
5046 5086 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5047 5087 "FIP Async Event: CVL %d",
5048 5088 cqe->un.fcoe.ref_index);
5049 5089
5050 5090 (void) emlxs_fcf_cvl_notify(port,
5051 5091 emlxs_sli4_vpi_to_index(hba,
5052 5092 cqe->un.fcoe.ref_index));
5053 5093 break;
5054 5094
5055 5095 case ASYNC_EVENT_FCF_MODIFIED:
5056 5096 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5057 5097 "FIP Async Event: FCF_CHANGED %d",
5058 5098 cqe->un.fcoe.ref_index);
5059 5099
5060 5100 (void) emlxs_fcf_changed_notify(port,
5061 5101 cqe->un.fcoe.ref_index);
5062 5102 break;
5063 5103 default:
5064 5104 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5065 5105 "FIP Async Event: Unknown event type=%d",
5066 5106 cqe->un.fcoe.evt_type);
5067 5107 break;
5068 5108 }
5069 5109 break;
5070 5110 case ASYNC_EVENT_CODE_DCBX:
5071 5111 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5072 5112 "DCBX Async Event: type=%d. Not supported.",
5073 5113 cqe->event_type);
5074 5114 break;
5075 5115 case ASYNC_EVENT_CODE_GRP_5:
5076 5116 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5077 5117 "Group 5 Async Event: type=%d.", cqe->event_type);
5078 5118 if (cqe->event_type == ASYNC_EVENT_QOS_SPEED) {
5079 5119 hba->qos_linkspeed = cqe->un.qos.qos_link_speed;
5080 5120 }
5081 5121 break;
5082 5122 case ASYNC_EVENT_CODE_FC_EVENT:
5083 5123 switch (cqe->event_type) {
5084 5124 case ASYNC_EVENT_FC_LINK_ATT:
5085 5125 HBASTATS.LinkEvent++;
5086 5126
5087 5127 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5088 5128 "FC Async Event: Link Attention. event=%x",
5089 5129 HBASTATS.LinkEvent);
5090 5130
5091 5131 emlxs_sli4_handle_fc_link_att(hba, cqe);
5092 5132 break;
5093 5133 case ASYNC_EVENT_FC_SHARED_LINK_ATT:
5094 5134 HBASTATS.LinkEvent++;
5095 5135
5096 5136 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5097 5137 "FC Async Event: Shared Link Attention. event=%x",
5098 5138 HBASTATS.LinkEvent);
5099 5139
5100 5140 emlxs_sli4_handle_fc_link_att(hba, cqe);
|
↓ open down ↓ |
222 lines elided |
↑ open up ↑ |
5101 5141 break;
5102 5142 default:
5103 5143 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5104 5144 "FC Async Event: Unknown event. type=%d event=%x",
5105 5145 cqe->event_type, HBASTATS.LinkEvent);
5106 5146 }
5107 5147 break;
5108 5148 case ASYNC_EVENT_CODE_PORT:
5109 5149 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5110 5150 "SLI Port Async Event: type=%d", cqe->event_type);
5111 - if (cqe->event_type == ASYNC_EVENT_MISCONFIG_PORT) {
5151 +
5152 + switch (cqe->event_type) {
5153 + case ASYNC_EVENT_PORT_OTEMP:
5154 + EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5155 + "SLI Port Async Event: Temperature limit exceeded");
5156 + cmn_err(CE_WARN,
5157 + "^%s%d: Temperature limit exceeded. Fibre channel "
5158 + "controller temperature %u degrees C",
5159 + DRIVER_NAME, hba->ddiinst,
5160 + BE_SWAP32(*(uint32_t *)cqe->un.port.link_status));
5161 + break;
5162 +
5163 + case ASYNC_EVENT_PORT_NTEMP:
5164 + EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5165 + "SLI Port Async Event: Temperature returned to "
5166 + "normal");
5167 + cmn_err(CE_WARN,
5168 + "^%s%d: Temperature returned to normal",
5169 + DRIVER_NAME, hba->ddiinst);
5170 + break;
5171 +
5172 + case ASYNC_EVENT_MISCONFIG_PORT:
5112 5173 *((uint32_t *)cqe->un.port.link_status) =
5113 5174 BE_SWAP32(*((uint32_t *)cqe->un.port.link_status));
5114 5175 status =
5115 5176 cqe->un.port.link_status[hba->sli.sli4.link_number];
5116 5177
5117 5178 switch (status) {
5118 5179 case 0 :
5119 5180 break;
5120 5181
5121 5182 case 1 :
5122 5183 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5123 5184 "SLI Port Async Event: Physical media not "
5124 5185 "detected");
5125 5186 cmn_err(CE_WARN,
5126 5187 "^%s%d: Optics faulted/incorrectly "
5127 5188 "installed/not installed - Reseat optics, "
5128 5189 "if issue not resolved, replace.",
5129 5190 DRIVER_NAME, hba->ddiinst);
5130 5191 break;
5131 5192
5132 5193 case 2 :
5133 5194 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5134 5195 "SLI Port Async Event: Wrong physical "
5135 5196 "media detected");
5136 5197 cmn_err(CE_WARN,
5137 5198 "^%s%d: Optics of two types installed - "
5138 5199 "Remove one optic or install matching"
5139 5200 "pair of optics.",
5140 5201 DRIVER_NAME, hba->ddiinst);
5141 5202 break;
5142 5203
5143 5204 case 3 :
5144 5205 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5145 5206 "SLI Port Async Event: Unsupported "
5146 5207 "physical media detected");
5147 5208 cmn_err(CE_WARN,
5148 5209 "^%s%d: Incompatible optics - Replace "
5149 5210 "with compatible optics for card to "
5150 5211 "function.",
5151 5212 DRIVER_NAME, hba->ddiinst);
5152 5213 break;
5153 5214
|
↓ open down ↓ |
32 lines elided |
↑ open up ↑ |
5154 5215 default :
5155 5216 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5156 5217 "SLI Port Async Event: Physical media "
5157 5218 "error, status=%x", status);
5158 5219 cmn_err(CE_WARN,
5159 5220 "^%s%d: Misconfigured port: status=0x%x - "
5160 5221 "Check optics on card.",
5161 5222 DRIVER_NAME, hba->ddiinst, status);
5162 5223 break;
5163 5224 }
5225 + break;
5164 5226 }
5227 +
5165 5228 break;
5166 5229 case ASYNC_EVENT_CODE_VF:
5167 5230 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5168 5231 "VF Async Event: type=%d",
5169 5232 cqe->event_type);
5170 5233 break;
5171 5234 case ASYNC_EVENT_CODE_MR:
5172 5235 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5173 5236 "MR Async Event: type=%d",
5174 5237 cqe->event_type);
5175 5238 break;
5176 5239 default:
5177 5240 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5178 5241 "Unknown Async Event: code=%d type=%d.",
5179 5242 cqe->event_code, cqe->event_type);
5180 5243 break;
5181 5244 }
5182 5245
5183 5246 } /* emlxs_sli4_process_async_event() */
5184 5247
5185 5248
5186 5249 /*ARGSUSED*/
5187 5250 static void
5188 5251 emlxs_sli4_process_mbox_event(emlxs_hba_t *hba, CQE_MBOX_t *cqe)
5189 5252 {
5190 5253 emlxs_port_t *port = &PPORT;
5191 5254 MAILBOX4 *mb;
5192 5255 MATCHMAP *mbox_bp;
5193 5256 MATCHMAP *mbox_nonembed;
5194 5257 MAILBOXQ *mbq = NULL;
5195 5258 uint32_t size;
5196 5259 uint32_t *iptr;
5197 5260 int rc;
5198 5261 off_t offset;
5199 5262
5200 5263 if (cqe->consumed && !cqe->completed) {
5201 5264 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5202 5265 "CQ ENTRY: Mbox event. Entry consumed but not completed");
5203 5266 return;
5204 5267 }
5205 5268
5206 5269 mutex_enter(&EMLXS_PORT_LOCK);
5207 5270 switch (hba->mbox_queue_flag) {
5208 5271 case 0:
5209 5272 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_mbox_intr_msg,
5210 5273 "CQ ENTRY: Mbox event. No mailbox active.");
5211 5274
5212 5275 mutex_exit(&EMLXS_PORT_LOCK);
5213 5276 return;
5214 5277
5215 5278 case MBX_POLL:
5216 5279
5217 5280 /* Mark mailbox complete, this should wake up any polling */
5218 5281 /* threads. This can happen if interrupts are enabled while */
5219 5282 /* a polled mailbox command is outstanding. If we don't set */
5220 5283 /* MBQ_COMPLETED here, the polling thread may wait until */
5221 5284 /* timeout error occurs */
5222 5285
5223 5286 mutex_enter(&EMLXS_MBOX_LOCK);
5224 5287 mbq = (MAILBOXQ *)hba->mbox_mbq;
5225 5288 if (mbq) {
5226 5289 port = (emlxs_port_t *)mbq->port;
5227 5290 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5228 5291 "CQ ENTRY: Mbox event. Completing Polled command.");
5229 5292 mbq->flag |= MBQ_COMPLETED;
5230 5293 }
5231 5294 mutex_exit(&EMLXS_MBOX_LOCK);
5232 5295
5233 5296 mutex_exit(&EMLXS_PORT_LOCK);
5234 5297 return;
5235 5298
5236 5299 case MBX_SLEEP:
5237 5300 case MBX_NOWAIT:
5238 5301 /* Check mbox_timer, it acts as a service flag too */
5239 5302 /* The first to service the mbox queue will clear the timer */
5240 5303 if (hba->mbox_timer) {
5241 5304 hba->mbox_timer = 0;
5242 5305
5243 5306 mutex_enter(&EMLXS_MBOX_LOCK);
5244 5307 mbq = (MAILBOXQ *)hba->mbox_mbq;
5245 5308 mutex_exit(&EMLXS_MBOX_LOCK);
5246 5309 }
5247 5310
5248 5311 if (!mbq) {
5249 5312 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5250 5313 "Mailbox event. No service required.");
5251 5314 mutex_exit(&EMLXS_PORT_LOCK);
5252 5315 return;
5253 5316 }
5254 5317
5255 5318 mb = (MAILBOX4 *)mbq;
5256 5319 mutex_exit(&EMLXS_PORT_LOCK);
5257 5320 break;
5258 5321
5259 5322 default:
5260 5323 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_completion_error_msg,
5261 5324 "CQ ENTRY: Mbox event. Invalid Mailbox flag (%x).",
5262 5325 hba->mbox_queue_flag);
5263 5326
5264 5327 mutex_exit(&EMLXS_PORT_LOCK);
5265 5328 return;
5266 5329 }
5267 5330
5268 5331 /* Set port context */
5269 5332 port = (emlxs_port_t *)mbq->port;
5270 5333
5271 5334 offset = (off_t)((uint64_t)((unsigned long)
5272 5335 hba->sli.sli4.mq.addr.virt) -
5273 5336 (uint64_t)((unsigned long)
5274 5337 hba->sli.sli4.slim2.virt));
5275 5338
5276 5339 /* Now that we are the owner, DMA Sync entire MQ if needed */
5277 5340 EMLXS_MPDATA_SYNC(hba->sli.sli4.mq.addr.dma_handle, offset,
5278 5341 4096, DDI_DMA_SYNC_FORDEV);
5279 5342
5280 5343 BE_SWAP32_BCOPY((uint8_t *)hba->mbox_mqe, (uint8_t *)mb,
5281 5344 MAILBOX_CMD_SLI4_BSIZE);
5282 5345
5283 5346 if (mb->mbxCommand != MBX_HEARTBEAT) {
5284 5347 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5285 5348 "CQ ENTRY: Mbox event. Mbox complete. status=%x cmd=%x",
5286 5349 mb->mbxStatus, mb->mbxCommand);
5287 5350
5288 5351 emlxs_data_dump(port, "MBOX CMP", (uint32_t *)hba->mbox_mqe,
5289 5352 12, 0);
5290 5353 }
5291 5354
5292 5355 if (mb->mbxCommand == MBX_SLI_CONFIG) {
5293 5356 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5294 5357 "Mbox sge_cnt: %d length: %d embed: %d",
5295 5358 mb->un.varSLIConfig.be.sge_cnt,
5296 5359 mb->un.varSLIConfig.be.payload_length,
5297 5360 mb->un.varSLIConfig.be.embedded);
5298 5361 }
5299 5362
5300 5363 /* Now sync the memory buffer if one was used */
5301 5364 if (mbq->bp) {
5302 5365 mbox_bp = (MATCHMAP *)mbq->bp;
5303 5366 EMLXS_MPDATA_SYNC(mbox_bp->dma_handle, 0, mbox_bp->size,
5304 5367 DDI_DMA_SYNC_FORKERNEL);
5305 5368 #ifdef FMA_SUPPORT
5306 5369 if (emlxs_fm_check_dma_handle(hba, mbox_bp->dma_handle)
5307 5370 != DDI_FM_OK) {
5308 5371 EMLXS_MSGF(EMLXS_CONTEXT,
5309 5372 &emlxs_invalid_dma_handle_msg,
5310 5373 "sli4_process_mbox_event: hdl=%p",
5311 5374 mbox_bp->dma_handle);
5312 5375
5313 5376 mb->mbxStatus = MBXERR_DMA_ERROR;
5314 5377 }
5315 5378 #endif
5316 5379 }
5317 5380
5318 5381 /* Now sync the memory buffer if one was used */
5319 5382 if (mbq->nonembed) {
5320 5383 mbox_nonembed = (MATCHMAP *)mbq->nonembed;
5321 5384 size = mbox_nonembed->size;
5322 5385 EMLXS_MPDATA_SYNC(mbox_nonembed->dma_handle, 0, size,
5323 5386 DDI_DMA_SYNC_FORKERNEL);
5324 5387 iptr = (uint32_t *)((uint8_t *)mbox_nonembed->virt);
5325 5388 BE_SWAP32_BCOPY((uint8_t *)iptr, (uint8_t *)iptr, size);
5326 5389
5327 5390 #ifdef FMA_SUPPORT
5328 5391 if (emlxs_fm_check_dma_handle(hba,
5329 5392 mbox_nonembed->dma_handle) != DDI_FM_OK) {
5330 5393 EMLXS_MSGF(EMLXS_CONTEXT,
5331 5394 &emlxs_invalid_dma_handle_msg,
5332 5395 "sli4_process_mbox_event: hdl=%p",
5333 5396 mbox_nonembed->dma_handle);
5334 5397
5335 5398 mb->mbxStatus = MBXERR_DMA_ERROR;
5336 5399 }
5337 5400 #endif
5338 5401 emlxs_data_dump(port, "EXT AREA", (uint32_t *)iptr, 24, 0);
5339 5402 }
5340 5403
5341 5404 /* Mailbox has been completely received at this point */
5342 5405
5343 5406 if (mb->mbxCommand == MBX_HEARTBEAT) {
5344 5407 hba->heartbeat_active = 0;
5345 5408 goto done;
5346 5409 }
5347 5410
5348 5411 if (hba->mbox_queue_flag == MBX_SLEEP) {
5349 5412 if (mb->mbxCommand != MBX_DOWN_LOAD
5350 5413 /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
5351 5414 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5352 5415 "Received. %s: status=%x Sleep.",
5353 5416 emlxs_mb_cmd_xlate(mb->mbxCommand),
5354 5417 mb->mbxStatus);
5355 5418 }
5356 5419 } else {
5357 5420 if (mb->mbxCommand != MBX_DOWN_LOAD
5358 5421 /* && mb->mbxCommand != MBX_DUMP_MEMORY */) {
5359 5422 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5360 5423 "Completed. %s: status=%x",
5361 5424 emlxs_mb_cmd_xlate(mb->mbxCommand),
5362 5425 mb->mbxStatus);
5363 5426 }
5364 5427 }
5365 5428
5366 5429 /* Filter out passthru mailbox */
5367 5430 if (mbq->flag & MBQ_PASSTHRU) {
5368 5431 goto done;
5369 5432 }
5370 5433
5371 5434 if (mb->mbxStatus) {
5372 5435 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_detail_msg,
5373 5436 "%s: status=0x%x", emlxs_mb_cmd_xlate(mb->mbxCommand),
5374 5437 (uint32_t)mb->mbxStatus);
5375 5438 }
5376 5439
5377 5440 if (mbq->mbox_cmpl) {
5378 5441 rc = (mbq->mbox_cmpl)(hba, mbq);
5379 5442
5380 5443 /* If mbox was retried, return immediately */
5381 5444 if (rc) {
5382 5445 return;
5383 5446 }
5384 5447 }
5385 5448
5386 5449 done:
5387 5450
5388 5451 /* Clean up the mailbox area */
5389 5452 emlxs_mb_fini(hba, (MAILBOX *)mb, mb->mbxStatus);
5390 5453
5391 5454 /* Attempt to send pending mailboxes */
5392 5455 mbq = (MAILBOXQ *)emlxs_mb_get(hba);
5393 5456 if (mbq) {
5394 5457 /* Attempt to send pending mailboxes */
5395 5458 rc = emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
5396 5459 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
5397 5460 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
5398 5461 }
5399 5462 }
5400 5463 return;
5401 5464
5402 5465 } /* emlxs_sli4_process_mbox_event() */
5403 5466
5404 5467
5405 5468 /*ARGSUSED*/
5406 5469 static void
5407 5470 emlxs_CQE_to_IOCB(emlxs_hba_t *hba, CQE_CmplWQ_t *cqe, emlxs_buf_t *sbp)
5408 5471 {
5409 5472 #ifdef DEBUG_FASTPATH
5410 5473 emlxs_port_t *port = &PPORT;
5411 5474 #endif /* DEBUG_FASTPATH */
5412 5475 IOCBQ *iocbq;
5413 5476 IOCB *iocb;
5414 5477 uint32_t *iptr;
5415 5478 fc_packet_t *pkt;
5416 5479 emlxs_wqe_t *wqe;
5417 5480
5418 5481 iocbq = &sbp->iocbq;
5419 5482 wqe = &iocbq->wqe;
5420 5483 iocb = &iocbq->iocb;
5421 5484
5422 5485 #ifdef DEBUG_FASTPATH
5423 5486 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5424 5487 "CQE to IOCB: cmd:%x tag:%x xri:%d", wqe->Command,
5425 5488 wqe->RequestTag, wqe->XRITag);
5426 5489 #endif /* DEBUG_FASTPATH */
5427 5490
5428 5491 iocb->ULPSTATUS = cqe->Status;
5429 5492 iocb->un.ulpWord[4] = cqe->Parameter;
5430 5493 iocb->ULPIOTAG = cqe->RequestTag;
5431 5494 iocb->ULPCONTEXT = wqe->XRITag;
5432 5495
5433 5496 switch (wqe->Command) {
5434 5497
5435 5498 case CMD_FCP_ICMND64_CR:
5436 5499 iocb->ULPCOMMAND = CMD_FCP_ICMND64_CX;
5437 5500 break;
5438 5501
5439 5502 case CMD_FCP_IREAD64_CR:
5440 5503 iocb->ULPCOMMAND = CMD_FCP_IREAD64_CX;
5441 5504 iocb->ULPPU = PARM_XFER_CHECK;
5442 5505 if (iocb->ULPSTATUS == IOSTAT_FCP_RSP_ERROR) {
5443 5506 iocb->un.fcpi64.fcpi_parm =
5444 5507 wqe->un.FcpCmd.TotalTransferCount -
5445 5508 cqe->CmdSpecific;
5446 5509 }
5447 5510 break;
5448 5511
5449 5512 case CMD_FCP_IWRITE64_CR:
5450 5513 iocb->ULPCOMMAND = CMD_FCP_IWRITE64_CX;
5451 5514 if (iocb->ULPSTATUS == IOSTAT_FCP_RSP_ERROR) {
5452 5515 if (wqe->un.FcpCmd.TotalTransferCount >
5453 5516 cqe->CmdSpecific) {
5454 5517 iocb->un.fcpi64.fcpi_parm =
5455 5518 wqe->un.FcpCmd.TotalTransferCount -
5456 5519 cqe->CmdSpecific;
5457 5520 } else {
5458 5521 iocb->un.fcpi64.fcpi_parm = 0;
5459 5522 }
5460 5523 }
5461 5524 break;
5462 5525
5463 5526 case CMD_ELS_REQUEST64_CR:
5464 5527 iocb->ULPCOMMAND = CMD_ELS_REQUEST64_CX;
5465 5528 iocb->un.elsreq64.bdl.bdeSize = cqe->CmdSpecific;
5466 5529 if (iocb->ULPSTATUS == 0) {
5467 5530 iocb->unsli3.ext_iocb.rsplen = cqe->CmdSpecific;
5468 5531 }
5469 5532 if (iocb->ULPSTATUS == IOSTAT_LS_RJT) {
5470 5533 /* For LS_RJT, the driver populates the rsp buffer */
5471 5534 pkt = PRIV2PKT(sbp);
5472 5535 iptr = (uint32_t *)pkt->pkt_resp;
5473 5536 *iptr++ = ELS_CMD_LS_RJT;
5474 5537 *iptr = cqe->Parameter;
5475 5538 }
5476 5539 break;
5477 5540
5478 5541 case CMD_GEN_REQUEST64_CR:
5479 5542 iocb->ULPCOMMAND = CMD_GEN_REQUEST64_CX;
5480 5543 iocb->unsli3.ext_iocb.rsplen = cqe->CmdSpecific;
5481 5544 break;
5482 5545
5483 5546 case CMD_XMIT_SEQUENCE64_CR:
5484 5547 iocb->ULPCOMMAND = CMD_XMIT_SEQUENCE64_CX;
5485 5548 break;
5486 5549
5487 5550 case CMD_ABORT_XRI_CX:
5488 5551 iocb->ULPCONTEXT = wqe->AbortTag;
5489 5552 break;
5490 5553
5491 5554 case CMD_FCP_TRECEIVE64_CX:
5492 5555 /* free memory for XRDY */
5493 5556 if (iocbq->bp) {
5494 5557 emlxs_mem_buf_free(hba, iocbq->bp);
5495 5558 iocbq->bp = 0;
5496 5559 }
5497 5560
5498 5561 /*FALLTHROUGH*/
5499 5562
5500 5563 case CMD_FCP_TSEND64_CX:
5501 5564 case CMD_FCP_TRSP64_CX:
5502 5565 default:
5503 5566 iocb->ULPCOMMAND = wqe->Command;
5504 5567
5505 5568 }
5506 5569 } /* emlxs_CQE_to_IOCB() */
5507 5570
5508 5571
5509 5572 /*ARGSUSED*/
5510 5573 static void
5511 5574 emlxs_sli4_hba_flush_chipq(emlxs_hba_t *hba)
5512 5575 {
5513 5576 emlxs_port_t *port = &PPORT;
5514 5577 CHANNEL *cp;
5515 5578 emlxs_buf_t *sbp;
5516 5579 IOCBQ *iocbq;
5517 5580 uint16_t i;
5518 5581 uint32_t trigger = 0;
5519 5582 CQE_CmplWQ_t cqe;
5520 5583
5521 5584 mutex_enter(&EMLXS_FCTAB_LOCK);
5522 5585 for (i = 0; i < hba->max_iotag; i++) {
5523 5586 sbp = hba->fc_table[i];
5524 5587 if (sbp == NULL || sbp == STALE_PACKET) {
5525 5588 continue;
5526 5589 }
5527 5590 hba->fc_table[i] = STALE_PACKET;
5528 5591 hba->io_count--;
5529 5592 sbp->iotag = 0;
5530 5593 mutex_exit(&EMLXS_FCTAB_LOCK);
5531 5594
5532 5595 cp = sbp->channel;
5533 5596 bzero(&cqe, sizeof (CQE_CmplWQ_t));
5534 5597 cqe.RequestTag = i;
5535 5598 cqe.Status = IOSTAT_LOCAL_REJECT;
5536 5599 cqe.Parameter = IOERR_SEQUENCE_TIMEOUT;
5537 5600
5538 5601 cp->hbaCmplCmd_sbp++;
5539 5602
5540 5603 #ifdef SFCT_SUPPORT
5541 5604 #ifdef FCT_IO_TRACE
5542 5605 if (sbp->fct_cmd) {
5543 5606 emlxs_fct_io_trace(port, sbp->fct_cmd,
5544 5607 EMLXS_FCT_IOCB_COMPLETE);
5545 5608 }
5546 5609 #endif /* FCT_IO_TRACE */
5547 5610 #endif /* SFCT_SUPPORT */
5548 5611
5549 5612 if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
5550 5613 atomic_dec_32(&hba->io_active);
5551 5614 #ifdef NODE_THROTTLE_SUPPORT
5552 5615 if (sbp->node) {
5553 5616 atomic_dec_32(&sbp->node->io_active);
5554 5617 }
5555 5618 #endif /* NODE_THROTTLE_SUPPORT */
5556 5619 }
5557 5620
5558 5621 /* Copy entry to sbp's iocbq */
5559 5622 iocbq = &sbp->iocbq;
5560 5623 emlxs_CQE_to_IOCB(hba, &cqe, sbp);
5561 5624
5562 5625 iocbq->next = NULL;
5563 5626
5564 5627 /* Exchange is no longer busy on-chip, free it */
5565 5628 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
5566 5629
5567 5630 if (!(sbp->pkt_flags &
5568 5631 (PACKET_POLLED | PACKET_ALLOCATED))) {
5569 5632 /* Add the IOCB to the channel list */
5570 5633 mutex_enter(&cp->rsp_lock);
5571 5634 if (cp->rsp_head == NULL) {
5572 5635 cp->rsp_head = iocbq;
5573 5636 cp->rsp_tail = iocbq;
5574 5637 } else {
5575 5638 cp->rsp_tail->next = iocbq;
5576 5639 cp->rsp_tail = iocbq;
5577 5640 }
5578 5641 mutex_exit(&cp->rsp_lock);
5579 5642 trigger = 1;
5580 5643 } else {
5581 5644 emlxs_proc_channel_event(hba, cp, iocbq);
5582 5645 }
5583 5646 mutex_enter(&EMLXS_FCTAB_LOCK);
5584 5647 }
5585 5648 mutex_exit(&EMLXS_FCTAB_LOCK);
5586 5649
5587 5650 if (trigger) {
5588 5651 for (i = 0; i < hba->chan_count; i++) {
5589 5652 cp = &hba->chan[i];
5590 5653 if (cp->rsp_head != NULL) {
5591 5654 emlxs_thread_trigger2(&cp->intr_thread,
5592 5655 emlxs_proc_channel, cp);
5593 5656 }
5594 5657 }
5595 5658 }
5596 5659
5597 5660 } /* emlxs_sli4_hba_flush_chipq() */
5598 5661
5599 5662
5600 5663 /*ARGSUSED*/
5601 5664 static void
5602 5665 emlxs_sli4_process_oor_wqe_cmpl(emlxs_hba_t *hba,
5603 5666 CQ_DESC_t *cq, CQE_CmplWQ_t *cqe)
5604 5667 {
5605 5668 emlxs_port_t *port = &PPORT;
5606 5669 CHANNEL *cp;
5607 5670 uint16_t request_tag;
5608 5671
5609 5672 request_tag = cqe->RequestTag;
5610 5673
5611 5674 /* 1 to 1 mapping between CQ and channel */
5612 5675 cp = cq->channelp;
5613 5676
5614 5677 cp->hbaCmplCmd++;
5615 5678
5616 5679 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5617 5680 "CQ ENTRY: OOR Cmpl: iotag=%d", request_tag);
5618 5681
5619 5682 emlxs_data_dump(port, "CQE", (uint32_t *)cqe, 4, 0);
5620 5683
5621 5684 } /* emlxs_sli4_process_oor_wqe_cmpl() */
5622 5685
5623 5686
5624 5687 /*ARGSUSED*/
5625 5688 static void
5626 5689 emlxs_sli4_process_wqe_cmpl(emlxs_hba_t *hba, CQ_DESC_t *cq, CQE_CmplWQ_t *cqe)
5627 5690 {
5628 5691 emlxs_port_t *port = &PPORT;
5629 5692 CHANNEL *cp;
5630 5693 emlxs_buf_t *sbp;
5631 5694 IOCBQ *iocbq;
5632 5695 uint16_t request_tag;
5633 5696 #ifdef SFCT_SUPPORT
5634 5697 #ifdef FCT_IO_TRACE
5635 5698 fct_cmd_t *fct_cmd;
5636 5699 emlxs_buf_t *cmd_sbp;
5637 5700 #endif /* FCT_IO_TRACE */
5638 5701 #endif /* SFCT_SUPPORT */
5639 5702
5640 5703 request_tag = cqe->RequestTag;
5641 5704
5642 5705 /* 1 to 1 mapping between CQ and channel */
5643 5706 cp = cq->channelp;
5644 5707
5645 5708 mutex_enter(&EMLXS_FCTAB_LOCK);
5646 5709 sbp = hba->fc_table[request_tag];
5647 5710
5648 5711 if (!sbp) {
5649 5712 cp->hbaCmplCmd++;
5650 5713 mutex_exit(&EMLXS_FCTAB_LOCK);
5651 5714 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5652 5715 "CQ ENTRY: NULL sbp. iotag=%d. Dropping...",
5653 5716 request_tag);
5654 5717 return;
5655 5718 }
5656 5719
5657 5720 if (sbp == STALE_PACKET) {
5658 5721 cp->hbaCmplCmd_sbp++;
5659 5722 mutex_exit(&EMLXS_FCTAB_LOCK);
5660 5723 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5661 5724 "CQ ENTRY: Stale sbp. iotag=%d. Dropping...", request_tag);
5662 5725 return;
5663 5726 }
5664 5727
5665 5728 if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
5666 5729 atomic_add_32(&hba->io_active, -1);
5667 5730 #ifdef NODE_THROTTLE_SUPPORT
5668 5731 if (sbp->node) {
5669 5732 atomic_add_32(&sbp->node->io_active, -1);
5670 5733 }
5671 5734 #endif /* NODE_THROTTLE_SUPPORT */
5672 5735 }
5673 5736
5674 5737 if (!(sbp->xrip)) {
5675 5738 cp->hbaCmplCmd++;
5676 5739 mutex_exit(&EMLXS_FCTAB_LOCK);
5677 5740 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5678 5741 "CQ ENTRY: NULL sbp xrip %p. iotag=%d. Dropping...",
5679 5742 sbp, request_tag);
5680 5743 return;
5681 5744 }
5682 5745
5683 5746 #ifdef DEBUG_FASTPATH
5684 5747 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5685 5748 "CQ ENTRY: process wqe compl");
5686 5749 #endif /* DEBUG_FASTPATH */
5687 5750 cp->hbaCmplCmd_sbp++;
5688 5751
5689 5752 /* Copy entry to sbp's iocbq */
5690 5753 iocbq = &sbp->iocbq;
5691 5754 emlxs_CQE_to_IOCB(hba, cqe, sbp);
5692 5755
5693 5756 iocbq->next = NULL;
5694 5757
5695 5758 if (cqe->XB) {
5696 5759 /* Mark exchange as ABORT in progress */
5697 5760 sbp->xrip->flag &= ~EMLXS_XRI_PENDING_IO;
5698 5761 sbp->xrip->flag |= EMLXS_XRI_BUSY;
5699 5762
5700 5763 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5701 5764 "CQ ENTRY: XRI BUSY: iotag=%d xri=%d", request_tag,
5702 5765 sbp->xrip->XRI);
5703 5766
5704 5767 emlxs_sli4_free_xri(port, sbp, 0, 0);
5705 5768 } else {
5706 5769 /* Exchange is no longer busy on-chip, free it */
5707 5770 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 0);
5708 5771 }
5709 5772
5710 5773 mutex_exit(&EMLXS_FCTAB_LOCK);
5711 5774
5712 5775 #ifdef SFCT_SUPPORT
5713 5776 #ifdef FCT_IO_TRACE
5714 5777 fct_cmd = sbp->fct_cmd;
5715 5778 if (fct_cmd) {
5716 5779 cmd_sbp = (emlxs_buf_t *)fct_cmd->cmd_fca_private;
5717 5780 mutex_enter(&cmd_sbp->fct_mtx);
5718 5781 EMLXS_FCT_STATE_CHG(fct_cmd, cmd_sbp, EMLXS_FCT_IOCB_COMPLETE);
5719 5782 mutex_exit(&cmd_sbp->fct_mtx);
5720 5783 }
5721 5784 #endif /* FCT_IO_TRACE */
5722 5785 #endif /* SFCT_SUPPORT */
5723 5786
5724 5787 /*
5725 5788 * If this is NOT a polled command completion
5726 5789 * or a driver allocated pkt, then defer pkt
5727 5790 * completion.
5728 5791 */
5729 5792 if (!(sbp->pkt_flags &
5730 5793 (PACKET_POLLED | PACKET_ALLOCATED))) {
5731 5794 /* Add the IOCB to the channel list */
5732 5795 mutex_enter(&cp->rsp_lock);
5733 5796 if (cp->rsp_head == NULL) {
5734 5797 cp->rsp_head = iocbq;
5735 5798 cp->rsp_tail = iocbq;
5736 5799 } else {
5737 5800 cp->rsp_tail->next = iocbq;
5738 5801 cp->rsp_tail = iocbq;
5739 5802 }
5740 5803 mutex_exit(&cp->rsp_lock);
5741 5804
5742 5805 /* Delay triggering thread till end of ISR */
5743 5806 cp->chan_flag |= EMLXS_NEEDS_TRIGGER;
5744 5807 } else {
5745 5808 emlxs_proc_channel_event(hba, cp, iocbq);
5746 5809 }
5747 5810
5748 5811 } /* emlxs_sli4_process_wqe_cmpl() */
5749 5812
5750 5813
5751 5814 /*ARGSUSED*/
5752 5815 static void
5753 5816 emlxs_sli4_process_release_wqe(emlxs_hba_t *hba, CQ_DESC_t *cq,
5754 5817 CQE_RelWQ_t *cqe)
5755 5818 {
5756 5819 emlxs_port_t *port = &PPORT;
5757 5820 WQ_DESC_t *wq;
5758 5821 CHANNEL *cp;
5759 5822 uint32_t i;
5760 5823 uint16_t wqi;
5761 5824
5762 5825 wqi = emlxs_sli4_wqid_to_index(hba, (uint16_t)cqe->WQid);
5763 5826
5764 5827 /* Verify WQ index */
5765 5828 if (wqi == 0xffff) {
5766 5829 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5767 5830 "CQ ENTRY: Invalid WQid:%d. Dropping...",
5768 5831 cqe->WQid);
5769 5832 return;
5770 5833 }
5771 5834
5772 5835 wq = &hba->sli.sli4.wq[wqi];
5773 5836
5774 5837 #ifdef DEBUG_FASTPATH
5775 5838 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5776 5839 "CQ ENTRY: process release wqe: old %d new %d", wq->port_index,
5777 5840 cqe->WQindex);
5778 5841 #endif /* DEBUG_FASTPATH */
5779 5842
5780 5843 wq->port_index = cqe->WQindex;
5781 5844
5782 5845 /* Cmd ring may be available. Try sending more iocbs */
5783 5846 for (i = 0; i < hba->chan_count; i++) {
5784 5847 cp = &hba->chan[i];
5785 5848 if (wq == (WQ_DESC_t *)cp->iopath) {
5786 5849 emlxs_sli4_issue_iocb_cmd(hba, cp, 0);
5787 5850 }
5788 5851 }
5789 5852
5790 5853 } /* emlxs_sli4_process_release_wqe() */
5791 5854
5792 5855
5793 5856 /*ARGSUSED*/
5794 5857 emlxs_iocbq_t *
5795 5858 emlxs_sli4_rxq_get(emlxs_hba_t *hba, fc_frame_hdr_t *fchdr)
5796 5859 {
5797 5860 emlxs_queue_t *q;
5798 5861 emlxs_iocbq_t *iocbq;
5799 5862 emlxs_iocbq_t *prev;
5800 5863 fc_frame_hdr_t *fchdr2;
5801 5864 RXQ_DESC_t *rxq;
5802 5865
5803 5866 switch (fchdr->type) {
5804 5867 case 1: /* ELS */
5805 5868 rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_ELS];
5806 5869 break;
5807 5870 case 0x20: /* CT */
5808 5871 rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_CT];
5809 5872 break;
5810 5873 default:
5811 5874 return (NULL);
5812 5875 }
5813 5876
5814 5877 mutex_enter(&rxq->lock);
5815 5878
5816 5879 q = &rxq->active;
5817 5880 iocbq = (emlxs_iocbq_t *)q->q_first;
5818 5881 prev = NULL;
5819 5882
5820 5883 while (iocbq) {
5821 5884
5822 5885 fchdr2 = (fc_frame_hdr_t *)iocbq->iocb.un.ulpWord;
5823 5886
5824 5887 if ((fchdr2->s_id == fchdr->s_id) &&
5825 5888 (fchdr2->ox_id == fchdr->ox_id) &&
5826 5889 (fchdr2->seq_id == fchdr->seq_id)) {
5827 5890 /* Remove iocbq */
5828 5891 if (prev) {
5829 5892 prev->next = iocbq->next;
5830 5893 }
5831 5894 if (q->q_first == (uint8_t *)iocbq) {
5832 5895 q->q_first = (uint8_t *)iocbq->next;
5833 5896 }
5834 5897 if (q->q_last == (uint8_t *)iocbq) {
5835 5898 q->q_last = (uint8_t *)prev;
5836 5899 }
5837 5900 q->q_cnt--;
5838 5901
5839 5902 break;
5840 5903 }
5841 5904
5842 5905 prev = iocbq;
5843 5906 iocbq = iocbq->next;
5844 5907 }
5845 5908
5846 5909 mutex_exit(&rxq->lock);
5847 5910
5848 5911 return (iocbq);
5849 5912
5850 5913 } /* emlxs_sli4_rxq_get() */
5851 5914
5852 5915
5853 5916 /*ARGSUSED*/
5854 5917 void
5855 5918 emlxs_sli4_rxq_put(emlxs_hba_t *hba, emlxs_iocbq_t *iocbq)
5856 5919 {
5857 5920 emlxs_queue_t *q;
5858 5921 fc_frame_hdr_t *fchdr;
5859 5922 RXQ_DESC_t *rxq;
5860 5923
5861 5924 fchdr = (fc_frame_hdr_t *)iocbq->iocb.RXFCHDR;
5862 5925
5863 5926 switch (fchdr->type) {
5864 5927 case 1: /* ELS */
5865 5928 rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_ELS];
5866 5929 break;
5867 5930 case 0x20: /* CT */
5868 5931 rxq = &hba->sli.sli4.rxq[EMLXS_RXQ_CT];
5869 5932 break;
5870 5933 default:
5871 5934 return;
5872 5935 }
5873 5936
5874 5937 mutex_enter(&rxq->lock);
5875 5938
5876 5939 q = &rxq->active;
5877 5940
5878 5941 if (q->q_last) {
5879 5942 ((emlxs_iocbq_t *)q->q_last)->next = iocbq;
5880 5943 q->q_cnt++;
5881 5944 } else {
5882 5945 q->q_first = (uint8_t *)iocbq;
5883 5946 q->q_cnt = 1;
5884 5947 }
5885 5948
5886 5949 q->q_last = (uint8_t *)iocbq;
5887 5950 iocbq->next = NULL;
5888 5951
5889 5952 mutex_exit(&rxq->lock);
5890 5953
5891 5954 return;
5892 5955
5893 5956 } /* emlxs_sli4_rxq_put() */
5894 5957
5895 5958
5896 5959 static void
5897 5960 emlxs_sli4_rq_post(emlxs_port_t *port, uint16_t rqid)
5898 5961 {
5899 5962 emlxs_hba_t *hba = HBA;
5900 5963 emlxs_rqdbu_t rqdb;
5901 5964
5902 5965 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5903 5966 "RQ POST: rqid=%d count=1", rqid);
5904 5967
5905 5968 /* Ring the RQ doorbell once to repost the RQ buffer */
5906 5969 rqdb.word = 0;
5907 5970 rqdb.db.Qid = rqid;
5908 5971 rqdb.db.NumPosted = 1;
5909 5972
5910 5973 emlxs_sli4_write_rqdb(hba, rqdb.word);
5911 5974
5912 5975 } /* emlxs_sli4_rq_post() */
5913 5976
5914 5977
5915 5978 /*ARGSUSED*/
5916 5979 static void
5917 5980 emlxs_sli4_process_unsol_rcv(emlxs_hba_t *hba, CQ_DESC_t *cq,
5918 5981 CQE_UnsolRcv_t *cqe)
5919 5982 {
5920 5983 emlxs_port_t *port = &PPORT;
5921 5984 emlxs_port_t *vport;
5922 5985 RQ_DESC_t *hdr_rq;
5923 5986 RQ_DESC_t *data_rq;
5924 5987 MBUF_INFO *hdr_mp;
5925 5988 MBUF_INFO *data_mp;
5926 5989 MATCHMAP *seq_mp;
5927 5990 uint32_t *data;
5928 5991 fc_frame_hdr_t fchdr;
5929 5992 uint16_t hdr_rqi;
5930 5993 uint32_t host_index;
5931 5994 emlxs_iocbq_t *iocbq = NULL;
5932 5995 emlxs_iocb_t *iocb;
5933 5996 emlxs_node_t *node = NULL;
5934 5997 uint32_t i;
5935 5998 uint32_t seq_len;
5936 5999 uint32_t seq_cnt;
5937 6000 uint32_t buf_type;
5938 6001 char label[32];
5939 6002 emlxs_wqe_t *wqe;
5940 6003 CHANNEL *cp;
5941 6004 XRIobj_t *xrip;
5942 6005 RPIobj_t *rpip = NULL;
5943 6006 uint32_t cmd;
5944 6007 uint32_t posted = 0;
5945 6008 uint32_t abort = 1;
5946 6009 off_t offset;
5947 6010 uint32_t status;
5948 6011 uint32_t data_size;
5949 6012 uint16_t rqid;
5950 6013 uint32_t hdr_size;
5951 6014 fc_packet_t *pkt;
5952 6015 emlxs_buf_t *sbp;
5953 6016
5954 6017 if (cqe->Code == CQE_TYPE_UNSOL_RCV_V1) {
5955 6018 CQE_UnsolRcvV1_t *cqeV1 = (CQE_UnsolRcvV1_t *)cqe;
5956 6019
5957 6020 status = cqeV1->Status;
5958 6021 data_size = cqeV1->data_size;
5959 6022 rqid = cqeV1->RQid;
5960 6023 hdr_size = cqeV1->hdr_size;
5961 6024 } else {
5962 6025 status = cqe->Status;
5963 6026 data_size = cqe->data_size;
5964 6027 rqid = cqe->RQid;
5965 6028 hdr_size = cqe->hdr_size;
5966 6029 }
5967 6030
5968 6031 /* Validate the CQE */
5969 6032
5970 6033 /* Check status */
5971 6034 switch (status) {
5972 6035 case RQ_STATUS_SUCCESS: /* 0x10 */
5973 6036 break;
5974 6037
5975 6038 case RQ_STATUS_BUFLEN_EXCEEDED: /* 0x11 */
5976 6039 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5977 6040 "CQ ENTRY: Unsol Rcv: Payload truncated.");
5978 6041 break;
5979 6042
5980 6043 case RQ_STATUS_NEED_BUFFER: /* 0x12 */
5981 6044 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5982 6045 "CQ ENTRY: Unsol Rcv: Payload buffer needed.");
5983 6046 return;
5984 6047
5985 6048 case RQ_STATUS_FRAME_DISCARDED: /* 0x13 */
5986 6049 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
5987 6050 "CQ ENTRY: Unsol Rcv: Payload buffer discarded.");
5988 6051 return;
5989 6052
5990 6053 default:
5991 6054 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
5992 6055 "CQ ENTRY: Unsol Rcv: Unknown status=%x.",
5993 6056 status);
5994 6057 break;
5995 6058 }
5996 6059
5997 6060 /* Make sure there is a frame header */
5998 6061 if (hdr_size < sizeof (fc_frame_hdr_t)) {
5999 6062 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6000 6063 "CQ ENTRY: Unsol Rcv: FC header too small. Dropping...");
6001 6064 return;
6002 6065 }
6003 6066
6004 6067 hdr_rqi = emlxs_sli4_rqid_to_index(hba, rqid);
6005 6068
6006 6069 /* Verify RQ index */
6007 6070 if (hdr_rqi == 0xffff) {
6008 6071 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6009 6072 "CQ ENTRY: Unsol Rcv: Invalid RQID:%d. Dropping...",
6010 6073 rqid);
6011 6074 return;
6012 6075 }
6013 6076
6014 6077 hdr_rq = &hba->sli.sli4.rq[hdr_rqi];
6015 6078 data_rq = &hba->sli.sli4.rq[hdr_rqi + 1];
6016 6079
6017 6080 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6018 6081 "CQ ENTRY: Unsol Rcv:%x rqid=%d,%d index=%d status=%x "
6019 6082 "hdr_size=%d data_size=%d",
6020 6083 cqe->Code, rqid, hdr_rqi, hdr_rq->host_index, status, hdr_size,
6021 6084 data_size);
6022 6085
6023 6086 hdr_rq->num_proc++;
6024 6087
6025 6088 /* Update host index */
6026 6089 mutex_enter(&hba->sli.sli4.rq[hdr_rqi].lock);
6027 6090 host_index = hdr_rq->host_index;
6028 6091 hdr_rq->host_index++;
6029 6092
6030 6093 if (hdr_rq->host_index >= hdr_rq->max_index) {
6031 6094 hdr_rq->host_index = 0;
6032 6095 }
6033 6096 data_rq->host_index = hdr_rq->host_index;
6034 6097 mutex_exit(&hba->sli.sli4.rq[hdr_rqi].lock);
6035 6098
6036 6099 /* Get the next header rqb */
6037 6100 hdr_mp = &hdr_rq->rqb[host_index];
6038 6101
6039 6102 offset = (off_t)((uint64_t)((unsigned long)hdr_mp->virt) -
6040 6103 (uint64_t)((unsigned long)hba->sli.sli4.slim2.virt));
6041 6104
6042 6105 EMLXS_MPDATA_SYNC(hdr_mp->dma_handle, offset,
6043 6106 sizeof (fc_frame_hdr_t), DDI_DMA_SYNC_FORKERNEL);
6044 6107
6045 6108 LE_SWAP32_BCOPY(hdr_mp->virt, (uint8_t *)&fchdr,
6046 6109 sizeof (fc_frame_hdr_t));
6047 6110
6048 6111 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6049 6112 "RQ HDR[%d]: rctl:%x type:%x "
6050 6113 "sid:%x did:%x oxid:%x rxid:%x",
6051 6114 host_index, fchdr.r_ctl, fchdr.type,
6052 6115 fchdr.s_id, fchdr.d_id, fchdr.ox_id, fchdr.rx_id);
6053 6116
6054 6117 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6055 6118 "RQ HDR[%d]: fctl:%x seq_id:%x seq_cnt:%x df_ctl:%x ro:%x",
6056 6119 host_index, fchdr.f_ctl, fchdr.seq_id, fchdr.seq_cnt,
6057 6120 fchdr.df_ctl, fchdr.ro);
6058 6121
6059 6122 /* Verify fc header type */
6060 6123 switch (fchdr.type) {
6061 6124 case 0: /* BLS */
6062 6125 if (fchdr.r_ctl != 0x81) {
6063 6126 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6064 6127 "RQ ENTRY: Unexpected FC rctl (0x%x) "
6065 6128 "received. Dropping...",
6066 6129 fchdr.r_ctl);
6067 6130
6068 6131 goto done;
6069 6132 }
6070 6133
6071 6134 /* Make sure there is no payload */
6072 6135 if (data_size != 0) {
6073 6136 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6074 6137 "RQ ENTRY: ABTS payload provided. Dropping...");
6075 6138
6076 6139 goto done;
6077 6140 }
6078 6141
6079 6142 buf_type = 0xFFFFFFFF;
6080 6143 (void) strlcpy(label, "ABTS", sizeof (label));
6081 6144 cp = &hba->chan[hba->channel_els];
6082 6145 break;
6083 6146
6084 6147 case 0x01: /* ELS */
6085 6148 /* Make sure there is a payload */
6086 6149 if (data_size == 0) {
6087 6150 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6088 6151 "RQ ENTRY: Unsol Rcv: No ELS payload provided. "
6089 6152 "Dropping...");
6090 6153
6091 6154 goto done;
6092 6155 }
6093 6156
6094 6157 buf_type = MEM_ELSBUF;
6095 6158 (void) strlcpy(label, "Unsol ELS", sizeof (label));
6096 6159 cp = &hba->chan[hba->channel_els];
6097 6160 break;
6098 6161
6099 6162 case 0x20: /* CT */
6100 6163 /* Make sure there is a payload */
6101 6164 if (data_size == 0) {
6102 6165 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6103 6166 "RQ ENTRY: Unsol Rcv: No CT payload provided. "
6104 6167 "Dropping...");
6105 6168
6106 6169 goto done;
6107 6170 }
6108 6171
6109 6172 buf_type = MEM_CTBUF;
6110 6173 (void) strlcpy(label, "Unsol CT", sizeof (label));
6111 6174 cp = &hba->chan[hba->channel_ct];
6112 6175 break;
6113 6176
6114 6177 case 0x08: /* FCT */
6115 6178 /* Make sure there is a payload */
6116 6179 if (data_size == 0) {
6117 6180 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6118 6181 "RQ ENTRY: Unsol Rcv: No FCP payload provided. "
6119 6182 "Dropping...");
6120 6183
6121 6184 goto done;
6122 6185 }
6123 6186
6124 6187 buf_type = MEM_FCTBUF;
6125 6188 (void) strlcpy(label, "Unsol FCT", sizeof (label));
6126 6189 cp = &hba->chan[hba->CHANNEL_FCT];
6127 6190 break;
6128 6191
6129 6192 default:
6130 6193 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6131 6194 "RQ ENTRY: Unexpected FC type (0x%x) received. Dropping...",
6132 6195 fchdr.type);
6133 6196
6134 6197 goto done;
6135 6198 }
6136 6199 /* Fc Header is valid */
6137 6200
6138 6201 /* Check if this is an active sequence */
6139 6202 iocbq = emlxs_sli4_rxq_get(hba, &fchdr);
6140 6203
6141 6204 if (!iocbq) {
6142 6205 if (fchdr.type != 0) {
6143 6206 if (!(fchdr.f_ctl & F_CTL_FIRST_SEQ)) {
6144 6207 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6145 6208 "RQ ENTRY: %s: First of sequence not"
6146 6209 " set. Dropping...",
6147 6210 label);
6148 6211
6149 6212 goto done;
6150 6213 }
6151 6214 }
6152 6215
6153 6216 if ((fchdr.type != 0) && (fchdr.seq_cnt != 0)) {
6154 6217 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6155 6218 "RQ ENTRY: %s: Sequence count not zero (%d). "
6156 6219 "Dropping...",
6157 6220 label, fchdr.seq_cnt);
6158 6221
6159 6222 goto done;
6160 6223 }
6161 6224
6162 6225 /* Find vport */
6163 6226 for (i = 0; i < MAX_VPORTS; i++) {
6164 6227 vport = &VPORT(i);
6165 6228
6166 6229 if (vport->did == fchdr.d_id) {
6167 6230 port = vport;
6168 6231 break;
6169 6232 }
6170 6233 }
6171 6234
6172 6235 if (i == MAX_VPORTS) {
6173 6236 /* Allow unsol FLOGI & PLOGI for P2P */
6174 6237 if ((fchdr.type != 1 /* ELS*/) ||
6175 6238 ((fchdr.d_id != FABRIC_DID) &&
6176 6239 !(hba->flag & FC_PT_TO_PT))) {
6177 6240 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6178 6241 "RQ ENTRY: %s: Invalid did=%x. Dropping...",
6179 6242 label, fchdr.d_id);
6180 6243
6181 6244 goto done;
6182 6245 }
6183 6246 }
6184 6247
6185 6248 /* Allocate an IOCBQ */
6186 6249 iocbq = (emlxs_iocbq_t *)emlxs_mem_get(hba, MEM_IOCB);
6187 6250
6188 6251 if (!iocbq) {
6189 6252 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6190 6253 "RQ ENTRY: %s: Out of IOCB "
6191 6254 "resources. Dropping...",
6192 6255 label);
6193 6256
6194 6257 goto done;
6195 6258 }
6196 6259
6197 6260 seq_mp = NULL;
6198 6261 if (fchdr.type != 0) {
6199 6262 /* Allocate a buffer */
6200 6263 seq_mp = (MATCHMAP *)emlxs_mem_get(hba, buf_type);
6201 6264
6202 6265 if (!seq_mp) {
6203 6266 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6204 6267 "RQ ENTRY: %s: Out of buffer "
6205 6268 "resources. Dropping...",
6206 6269 label);
6207 6270
6208 6271 goto done;
6209 6272 }
6210 6273
6211 6274 iocbq->bp = (uint8_t *)seq_mp;
6212 6275 }
6213 6276
6214 6277 node = (void *)emlxs_node_find_did(port, fchdr.s_id, 1);
6215 6278 if (node == NULL) {
6216 6279 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6217 6280 "RQ ENTRY: %s: Node not found. sid=%x",
6218 6281 label, fchdr.s_id);
6219 6282 }
6220 6283
6221 6284 /* Initialize the iocbq */
6222 6285 iocbq->port = port;
6223 6286 iocbq->channel = cp;
6224 6287 iocbq->node = node;
6225 6288
6226 6289 iocb = &iocbq->iocb;
6227 6290 iocb->RXSEQCNT = 0;
6228 6291 iocb->RXSEQLEN = 0;
6229 6292
6230 6293 seq_len = 0;
6231 6294 seq_cnt = 0;
6232 6295
6233 6296 } else {
6234 6297
6235 6298 iocb = &iocbq->iocb;
6236 6299 port = iocbq->port;
6237 6300 node = (emlxs_node_t *)iocbq->node;
6238 6301
6239 6302 seq_mp = (MATCHMAP *)iocbq->bp;
6240 6303 seq_len = iocb->RXSEQLEN;
6241 6304 seq_cnt = iocb->RXSEQCNT;
6242 6305
6243 6306 /* Check sequence order */
6244 6307 if (fchdr.seq_cnt != seq_cnt) {
6245 6308 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6246 6309 "RQ ENTRY: %s: Out of order frame received "
6247 6310 "(%d != %d). Dropping...",
6248 6311 label, fchdr.seq_cnt, seq_cnt);
6249 6312
6250 6313 goto done;
6251 6314 }
6252 6315 }
6253 6316
6254 6317 /* We now have an iocbq */
6255 6318
6256 6319 if (!port->vpip->vfip) {
6257 6320 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6258 6321 "RQ ENTRY: %s: No fabric connection. "
6259 6322 "Dropping...",
6260 6323 label);
6261 6324
6262 6325 goto done;
6263 6326 }
6264 6327
6265 6328 /* Save the frame data to our seq buffer */
6266 6329 if (data_size && seq_mp) {
6267 6330 /* Get the next data rqb */
6268 6331 data_mp = &data_rq->rqb[host_index];
6269 6332
6270 6333 offset = (off_t)((uint64_t)((unsigned long)
6271 6334 data_mp->virt) -
6272 6335 (uint64_t)((unsigned long)
6273 6336 hba->sli.sli4.slim2.virt));
6274 6337
6275 6338 EMLXS_MPDATA_SYNC(data_mp->dma_handle, offset,
6276 6339 data_size, DDI_DMA_SYNC_FORKERNEL);
6277 6340
6278 6341 data = (uint32_t *)data_mp->virt;
6279 6342
6280 6343 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6281 6344 "RQ DAT[%d]: %08x %08x %08x %08x %08x %08x ...",
6282 6345 host_index, data[0], data[1], data[2], data[3],
6283 6346 data[4], data[5]);
6284 6347
6285 6348 /* Check sequence length */
6286 6349 if ((seq_len + data_size) > seq_mp->size) {
6287 6350 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6288 6351 "RQ ENTRY: %s: Sequence buffer overflow. "
6289 6352 "(%d > %d). Dropping...",
6290 6353 label, (seq_len + data_size), seq_mp->size);
6291 6354
6292 6355 goto done;
6293 6356 }
6294 6357
6295 6358 /* Copy data to local receive buffer */
6296 6359 bcopy((uint8_t *)data, ((uint8_t *)seq_mp->virt +
6297 6360 seq_len), data_size);
6298 6361
6299 6362 seq_len += data_size;
6300 6363 }
6301 6364
6302 6365 /* If this is not the last frame of sequence, queue it. */
6303 6366 if (!(fchdr.f_ctl & F_CTL_END_SEQ)) {
6304 6367 /* Save sequence header */
6305 6368 if (seq_cnt == 0) {
6306 6369 bcopy((uint8_t *)&fchdr, (uint8_t *)iocb->RXFCHDR,
6307 6370 sizeof (fc_frame_hdr_t));
6308 6371 }
6309 6372
6310 6373 /* Update sequence info in iocb */
6311 6374 iocb->RXSEQCNT = seq_cnt + 1;
6312 6375 iocb->RXSEQLEN = seq_len;
6313 6376
6314 6377 /* Queue iocbq for next frame */
6315 6378 emlxs_sli4_rxq_put(hba, iocbq);
6316 6379
6317 6380 /* Don't free resources */
6318 6381 iocbq = NULL;
6319 6382
6320 6383 /* No need to abort */
6321 6384 abort = 0;
6322 6385
6323 6386 goto done;
6324 6387 }
6325 6388
6326 6389 emlxs_sli4_rq_post(port, hdr_rq->qid);
6327 6390 posted = 1;
6328 6391
6329 6392 /* End of sequence found. Process request now. */
6330 6393
6331 6394 if (seq_cnt > 0) {
6332 6395 /* Retrieve first frame of sequence */
6333 6396 bcopy((uint8_t *)iocb->RXFCHDR, (uint8_t *)&fchdr,
6334 6397 sizeof (fc_frame_hdr_t));
6335 6398
6336 6399 bzero((uint8_t *)iocb, sizeof (emlxs_iocb_t));
6337 6400 }
6338 6401
6339 6402 /* Build rcv iocb and process it */
6340 6403 switch (fchdr.type) {
6341 6404 case 0: /* BLS */
6342 6405
6343 6406 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6344 6407 "RQ ENTRY: %s: oxid:%x rxid %x sid:%x. Sending BLS ACC...",
6345 6408 label, fchdr.ox_id, fchdr.rx_id, fchdr.s_id);
6346 6409
6347 6410 /* Try to send abort response */
6348 6411 if (!(pkt = emlxs_pkt_alloc(port, 0, 0, 0, KM_NOSLEEP))) {
6349 6412 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6350 6413 "RQ ENTRY: %s: Unable to alloc pkt. Dropping...",
6351 6414 label);
6352 6415 goto done;
6353 6416 }
6354 6417
6355 6418 /* Setup sbp / iocb for driver initiated cmd */
6356 6419 sbp = PKT2PRIV(pkt);
6357 6420
6358 6421 /* Free the temporary iocbq */
6359 6422 emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
6360 6423
6361 6424 iocbq = (emlxs_iocbq_t *)&sbp->iocbq;
6362 6425 iocbq->port = port;
6363 6426 iocbq->channel = cp;
6364 6427 iocbq->node = node;
6365 6428
6366 6429 sbp->pkt_flags &= ~PACKET_ULP_OWNED;
6367 6430
6368 6431 if (node) {
6369 6432 sbp->node = node;
6370 6433 sbp->did = node->nlp_DID;
6371 6434 }
6372 6435
6373 6436 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
6374 6437
6375 6438 /* BLS ACC Response */
6376 6439 wqe = &iocbq->wqe;
6377 6440 bzero((void *)wqe, sizeof (emlxs_wqe_t));
6378 6441
6379 6442 iocbq->iocb.ULPCOMMAND = CMD_XMIT_BLS_RSP64_CX;
6380 6443 wqe->Command = CMD_XMIT_BLS_RSP64_CX;
6381 6444 wqe->CmdType = WQE_TYPE_GEN;
6382 6445
6383 6446 wqe->un.BlsRsp.Payload0 = 0x80;
6384 6447 wqe->un.BlsRsp.Payload1 = fchdr.seq_id;
6385 6448
6386 6449 wqe->un.BlsRsp.OXId = fchdr.ox_id;
6387 6450 wqe->un.BlsRsp.RXId = fchdr.rx_id;
6388 6451
6389 6452 wqe->un.BlsRsp.SeqCntLow = 0;
6390 6453 wqe->un.BlsRsp.SeqCntHigh = 0xFFFF;
6391 6454
6392 6455 wqe->un.BlsRsp.XO = ((fchdr.f_ctl & F_CTL_XCHG_CONTEXT)? 1:0);
6393 6456 wqe->un.BlsRsp.AR = 0;
6394 6457
6395 6458 rpip = EMLXS_NODE_TO_RPI(port, node);
6396 6459
6397 6460 if (rpip) {
6398 6461 wqe->ContextType = WQE_RPI_CONTEXT;
6399 6462 wqe->ContextTag = rpip->RPI;
6400 6463 } else {
6401 6464 wqe->ContextType = WQE_VPI_CONTEXT;
6402 6465 wqe->ContextTag = port->vpip->VPI;
6403 6466
6404 6467 rpip = emlxs_rpi_reserve_notify(port, fchdr.s_id, 0);
6405 6468
6406 6469 if (!rpip) {
6407 6470 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6408 6471 "RQ ENTRY: %s: Unable to alloc "
6409 6472 "reserved RPI. Dropping...",
6410 6473 label);
6411 6474
6412 6475 goto done;
6413 6476 }
6414 6477
6415 6478 /* Store the reserved rpi */
6416 6479 wqe->CmdSpecific = rpip->RPI;
6417 6480
6418 6481 wqe->un.BlsRsp.RemoteId = fchdr.s_id;
6419 6482 wqe->un.BlsRsp.LocalId = fchdr.d_id;
6420 6483 }
6421 6484
6422 6485 if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
6423 6486 wqe->CCPE = 1;
6424 6487 wqe->CCP = fchdr.rsvd;
6425 6488 }
6426 6489
6427 6490 /* Allocate an exchange for this command */
6428 6491 xrip = emlxs_sli4_alloc_xri(port, sbp, rpip,
6429 6492 EMLXS_XRI_SOL_BLS_TYPE);
6430 6493
6431 6494 if (!xrip) {
6432 6495 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6433 6496 "RQ ENTRY: %s: Unable to alloc XRI. Dropping...",
6434 6497 label);
6435 6498 goto done;
6436 6499 }
6437 6500
6438 6501 wqe->XRITag = xrip->XRI;
6439 6502 wqe->Class = CLASS3;
6440 6503 wqe->RequestTag = xrip->iotag;
6441 6504 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
6442 6505
6443 6506 sbp->ticks = hba->timer_tics + 30;
6444 6507
6445 6508 emlxs_sli4_issue_iocb_cmd(hba, iocbq->channel, iocbq);
6446 6509
6447 6510 /* The temporary iocbq has been freed already */
6448 6511 iocbq = NULL;
6449 6512
6450 6513 break;
6451 6514
6452 6515 case 1: /* ELS */
6453 6516 cmd = *((uint32_t *)seq_mp->virt);
6454 6517 cmd &= ELS_CMD_MASK;
6455 6518
6456 6519 if (!(port->vpip->flag & EMLXS_VPI_PORT_ENABLED)) {
6457 6520 uint32_t dropit = 1;
6458 6521
6459 6522 /* Allow for P2P handshaking */
6460 6523 switch (cmd) {
6461 6524 case ELS_CMD_FLOGI:
6462 6525 dropit = 0;
6463 6526 break;
6464 6527
6465 6528 case ELS_CMD_PLOGI:
6466 6529 case ELS_CMD_PRLI:
6467 6530 if (hba->flag & FC_PT_TO_PT) {
6468 6531 dropit = 0;
6469 6532 }
6470 6533 break;
6471 6534 }
6472 6535
6473 6536 if (dropit) {
6474 6537 EMLXS_MSGF(EMLXS_CONTEXT,
6475 6538 &emlxs_sli_detail_msg,
6476 6539 "RQ ENTRY: %s: Port not yet enabled. "
6477 6540 "Dropping...",
6478 6541 label);
6479 6542 goto done;
6480 6543 }
6481 6544 }
6482 6545
6483 6546 rpip = NULL;
6484 6547
6485 6548 if (cmd != ELS_CMD_LOGO) {
6486 6549 rpip = EMLXS_NODE_TO_RPI(port, node);
6487 6550 }
6488 6551
6489 6552 if (!rpip) {
6490 6553 /* Use the fabric rpi */
6491 6554 rpip = port->vpip->fabric_rpip;
6492 6555 }
6493 6556
6494 6557 xrip = emlxs_sli4_reserve_xri(port, rpip,
6495 6558 EMLXS_XRI_UNSOL_ELS_TYPE, fchdr.ox_id);
6496 6559
6497 6560 if (!xrip) {
6498 6561 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6499 6562 "RQ ENTRY: %s: Out of exchange "
6500 6563 "resources. Dropping...",
6501 6564 label);
6502 6565
6503 6566 goto done;
6504 6567 }
6505 6568
6506 6569 /* Build CMD_RCV_ELS64_CX */
6507 6570 iocb->un.rcvels64.elsReq.tus.f.bdeFlags = 0;
6508 6571 iocb->un.rcvels64.elsReq.tus.f.bdeSize = seq_len;
6509 6572 iocb->un.rcvels64.elsReq.addrLow = PADDR_LO(seq_mp->phys);
6510 6573 iocb->un.rcvels64.elsReq.addrHigh = PADDR_HI(seq_mp->phys);
6511 6574 iocb->ULPBDECOUNT = 1;
6512 6575
6513 6576 iocb->un.rcvels64.remoteID = fchdr.s_id;
6514 6577 iocb->un.rcvels64.parmRo = fchdr.d_id;
6515 6578
6516 6579 iocb->ULPPU = 0x3;
6517 6580 iocb->ULPCONTEXT = xrip->XRI;
6518 6581 iocb->ULPIOTAG = ((node)? node->nlp_Rpi:0);
6519 6582 iocb->ULPCLASS = CLASS3;
6520 6583 iocb->ULPCOMMAND = CMD_RCV_ELS64_CX;
6521 6584
6522 6585 iocb->unsli3.ext_rcv.seq_len = seq_len;
6523 6586 iocb->unsli3.ext_rcv.vpi = port->vpip->VPI;
6524 6587 iocb->unsli3.ext_rcv.oxid = fchdr.ox_id;
6525 6588
6526 6589 if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
6527 6590 iocb->unsli3.ext_rcv.ccpe = 1;
6528 6591 iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
6529 6592 }
6530 6593
6531 6594 if (port->mode == MODE_INITIATOR) {
6532 6595 (void) emlxs_els_handle_unsol_req(port, iocbq->channel,
6533 6596 iocbq, seq_mp, seq_len);
6534 6597 }
6535 6598 #ifdef SFCT_SUPPORT
6536 6599 else if (port->mode == MODE_TARGET) {
6537 6600 (void) emlxs_fct_handle_unsol_els(port, iocbq->channel,
6538 6601 iocbq, seq_mp, seq_len);
6539 6602 }
6540 6603 #endif /* SFCT_SUPPORT */
6541 6604 break;
6542 6605
6543 6606 #ifdef SFCT_SUPPORT
6544 6607 case 8: /* FCT */
6545 6608 if (!(port->VPIobj.flag & EMLXS_VPI_PORT_ENABLED)) {
6546 6609 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6547 6610 "RQ ENTRY: %s: Port not yet enabled. "
6548 6611 "Dropping...",
6549 6612 label);
6550 6613
6551 6614 goto done;
6552 6615 }
6553 6616
6554 6617 rpip = EMLXS_NODE_TO_RPI(port, node);
6555 6618
6556 6619 if (!rpip) {
6557 6620 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6558 6621 "RQ ENTRY: %s: Port not logged in. "
6559 6622 "Dropping...",
6560 6623 label);
6561 6624
6562 6625 goto done;
6563 6626 }
6564 6627
6565 6628 xrip = emlxs_sli4_reserve_xri(port, rpip,
6566 6629 EMLXS_XRI_UNSOL_FCP_TYPE, fchdr.ox_id);
6567 6630
6568 6631 if (!xrip) {
6569 6632 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6570 6633 "RQ ENTRY: %s: Out of exchange "
6571 6634 "resources. Dropping...",
6572 6635 label);
6573 6636
6574 6637 goto done;
6575 6638 }
6576 6639
6577 6640 /* Build CMD_RCV_SEQUENCE64_CX */
6578 6641 iocb->un.rcvseq64.rcvBde.tus.f.bdeFlags = 0;
6579 6642 iocb->un.rcvseq64.rcvBde.tus.f.bdeSize = seq_len;
6580 6643 iocb->un.rcvseq64.rcvBde.addrLow = PADDR_LO(seq_mp->phys);
6581 6644 iocb->un.rcvseq64.rcvBde.addrHigh = PADDR_HI(seq_mp->phys);
6582 6645 iocb->ULPBDECOUNT = 1;
6583 6646
6584 6647 iocb->ULPPU = 0x3;
6585 6648 iocb->ULPCONTEXT = xrip->XRI;
6586 6649 iocb->ULPIOTAG = ((node)? node->nlp_Rpi:0);
6587 6650 iocb->ULPCLASS = CLASS3;
6588 6651 iocb->ULPCOMMAND = CMD_RCV_ELS64_CX;
6589 6652
6590 6653 iocb->unsli3.ext_rcv.seq_len = seq_len;
6591 6654 iocb->unsli3.ext_rcv.vpi = port->VPIobj.VPI;
|
↓ open down ↓ |
1417 lines elided |
↑ open up ↑ |
6592 6655 iocb->unsli3.ext_rcv.oxid = fchdr.ox_id;
6593 6656
6594 6657 if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
6595 6658 iocb->unsli3.ext_rcv.ccpe = 1;
6596 6659 iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
6597 6660 }
6598 6661
6599 6662 /* pass xrip to FCT in the iocbq */
6600 6663 iocbq->sbp = xrip;
6601 6664
6602 -#define EMLXS_FIX_CISCO_BUG1
6603 -#ifdef EMLXS_FIX_CISCO_BUG1
6604 -{
6605 -uint8_t *ptr;
6606 -ptr = ((uint8_t *)seq_mp->virt);
6607 -if (((*ptr+12) != 0xa0) && (*(ptr+20) == 0x8) && (*(ptr+21) == 0x8)) {
6608 - EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6609 - "RQ ENTRY: Bad CDB fixed");
6610 - *ptr++ = 0;
6611 - *ptr = 0;
6612 -}
6613 -}
6614 -#endif
6615 6665 (void) emlxs_fct_handle_unsol_req(port, cp, iocbq,
6616 - seq_mp, seq_len);
6666 + seq_mp, seq_len);
6617 6667 break;
6618 6668 #endif /* SFCT_SUPPORT */
6619 6669
6620 6670 case 0x20: /* CT */
6621 6671 if (!(port->vpip->flag & EMLXS_VPI_PORT_ENABLED) &&
6622 6672 !(hba->flag & FC_LOOPBACK_MODE)) {
6623 6673 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6624 6674 "RQ ENTRY: %s: Port not yet enabled. "
6625 6675 "Dropping...",
6626 6676 label);
6627 6677
6628 6678 goto done;
6629 6679 }
6630 6680
6631 6681 if (!node) {
6632 6682 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6633 6683 "RQ ENTRY: %s: Node not found (did=%x). "
6634 6684 "Dropping...",
6635 6685 label, fchdr.d_id);
6636 6686
6637 6687 goto done;
6638 6688 }
6639 6689
6640 6690 rpip = EMLXS_NODE_TO_RPI(port, node);
6641 6691
6642 6692 if (!rpip) {
6643 6693 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6644 6694 "RQ ENTRY: %s: RPI not found (did=%x rpi=%d). "
6645 6695 "Dropping...",
6646 6696 label, fchdr.d_id, node->nlp_Rpi);
6647 6697
6648 6698 goto done;
6649 6699 }
6650 6700
6651 6701 xrip = emlxs_sli4_reserve_xri(port, rpip,
6652 6702 EMLXS_XRI_UNSOL_CT_TYPE, fchdr.ox_id);
6653 6703
6654 6704 if (!xrip) {
6655 6705 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6656 6706 "RQ ENTRY: %s: Out of exchange "
6657 6707 "resources. Dropping...",
6658 6708 label);
6659 6709
6660 6710 goto done;
6661 6711 }
6662 6712
6663 6713 /* Build CMD_RCV_SEQ64_CX */
6664 6714 iocb->un.rcvseq64.rcvBde.tus.f.bdeFlags = 0;
6665 6715 iocb->un.rcvseq64.rcvBde.tus.f.bdeSize = seq_len;
6666 6716 iocb->un.rcvseq64.rcvBde.addrLow = PADDR_LO(seq_mp->phys);
6667 6717 iocb->un.rcvseq64.rcvBde.addrHigh = PADDR_HI(seq_mp->phys);
6668 6718 iocb->ULPBDECOUNT = 1;
6669 6719
6670 6720 iocb->un.rcvseq64.xrsqRo = 0;
6671 6721 iocb->un.rcvseq64.w5.hcsw.Rctl = fchdr.r_ctl;
6672 6722 iocb->un.rcvseq64.w5.hcsw.Type = fchdr.type;
6673 6723 iocb->un.rcvseq64.w5.hcsw.Dfctl = fchdr.df_ctl;
6674 6724 iocb->un.rcvseq64.w5.hcsw.Fctl = fchdr.f_ctl;
6675 6725
6676 6726 iocb->ULPPU = 0x3;
6677 6727 iocb->ULPCONTEXT = xrip->XRI;
6678 6728 iocb->ULPIOTAG = rpip->RPI;
6679 6729 iocb->ULPCLASS = CLASS3;
6680 6730 iocb->ULPCOMMAND = CMD_RCV_SEQ64_CX;
6681 6731
6682 6732 iocb->unsli3.ext_rcv.seq_len = seq_len;
6683 6733 iocb->unsli3.ext_rcv.vpi = port->vpip->VPI;
6684 6734
6685 6735 if (fchdr.f_ctl & F_CTL_CHAINED_SEQ) {
6686 6736 iocb->unsli3.ext_rcv.ccpe = 1;
6687 6737 iocb->unsli3.ext_rcv.ccp = fchdr.rsvd;
6688 6738 }
6689 6739
6690 6740 (void) emlxs_ct_handle_unsol_req(port, iocbq->channel,
6691 6741 iocbq, seq_mp, seq_len);
6692 6742
6693 6743 break;
6694 6744 }
6695 6745
6696 6746 /* Sequence handled, no need to abort */
6697 6747 abort = 0;
6698 6748
6699 6749 done:
6700 6750
6701 6751 if (!posted) {
6702 6752 emlxs_sli4_rq_post(port, hdr_rq->qid);
6703 6753 }
6704 6754
6705 6755 if (abort) {
6706 6756 /* Send ABTS for this exchange */
6707 6757 /* !!! Currently, we have no implementation for this !!! */
6708 6758 abort = 0;
6709 6759 }
6710 6760
6711 6761 /* Return memory resources to pools */
6712 6762 if (iocbq) {
6713 6763 if (iocbq->bp) {
6714 6764 emlxs_mem_put(hba, buf_type, (void *)iocbq->bp);
6715 6765 iocbq->bp = 0;
6716 6766 }
6717 6767
6718 6768 emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
6719 6769 }
6720 6770
6721 6771 #ifdef FMA_SUPPORT
6722 6772 if (emlxs_fm_check_dma_handle(hba,
6723 6773 hba->sli.sli4.slim2.dma_handle)
6724 6774 != DDI_FM_OK) {
6725 6775 EMLXS_MSGF(EMLXS_CONTEXT,
6726 6776 &emlxs_invalid_dma_handle_msg,
6727 6777 "sli4_process_unsol_rcv: hdl=%p",
6728 6778 hba->sli.sli4.slim2.dma_handle);
6729 6779
6730 6780 emlxs_thread_spawn(hba, emlxs_restart_thread,
6731 6781 0, 0);
6732 6782 }
6733 6783 #endif
6734 6784 return;
6735 6785
6736 6786 } /* emlxs_sli4_process_unsol_rcv() */
6737 6787
6738 6788
6739 6789 /*ARGSUSED*/
6740 6790 static void
6741 6791 emlxs_sli4_process_xri_aborted(emlxs_hba_t *hba, CQ_DESC_t *cq,
6742 6792 CQE_XRI_Abort_t *cqe)
6743 6793 {
6744 6794 emlxs_port_t *port = &PPORT;
6745 6795 XRIobj_t *xrip;
6746 6796
6747 6797 mutex_enter(&EMLXS_FCTAB_LOCK);
6748 6798
6749 6799 xrip = emlxs_sli4_find_xri(port, cqe->XRI);
6750 6800 if (xrip == NULL) {
6751 6801 /* EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg, */
6752 6802 /* "CQ ENTRY: process xri aborted ignored"); */
6753 6803
6754 6804 mutex_exit(&EMLXS_FCTAB_LOCK);
6755 6805 return;
6756 6806 }
6757 6807
6758 6808 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6759 6809 "CQ ENTRY: XRI Aborted: xri=%d IA=%d EO=%d BR=%d",
6760 6810 cqe->XRI, cqe->IA, cqe->EO, cqe->BR);
6761 6811
6762 6812 if (!(xrip->flag & EMLXS_XRI_BUSY)) {
6763 6813 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6764 6814 "CQ ENTRY: XRI Aborted: xri=%d flag=%x. Bad state.",
6765 6815 xrip->XRI, xrip->flag);
6766 6816
6767 6817 mutex_exit(&EMLXS_FCTAB_LOCK);
6768 6818 return;
6769 6819 }
6770 6820
6771 6821 /* Exchange is no longer busy on-chip, free it */
6772 6822 emlxs_sli4_free_xri(port, 0, xrip, 0);
6773 6823
6774 6824 mutex_exit(&EMLXS_FCTAB_LOCK);
6775 6825
6776 6826 return;
6777 6827
6778 6828 } /* emlxs_sli4_process_xri_aborted () */
6779 6829
6780 6830
6781 6831 /*ARGSUSED*/
6782 6832 static void
6783 6833 emlxs_sli4_process_cq(emlxs_hba_t *hba, CQ_DESC_t *cq)
6784 6834 {
6785 6835 emlxs_port_t *port = &PPORT;
6786 6836 CQE_u *cqe;
6787 6837 CQE_u cq_entry;
6788 6838 uint32_t cqdb;
6789 6839 int num_entries = 0;
6790 6840 off_t offset;
6791 6841
6792 6842 /* EMLXS_PORT_LOCK must be held when entering this routine */
6793 6843
6794 6844 cqe = (CQE_u *)cq->addr.virt;
6795 6845 cqe += cq->host_index;
6796 6846
6797 6847 offset = (off_t)((uint64_t)((unsigned long)
6798 6848 cq->addr.virt) -
6799 6849 (uint64_t)((unsigned long)
6800 6850 hba->sli.sli4.slim2.virt));
6801 6851
6802 6852 EMLXS_MPDATA_SYNC(cq->addr.dma_handle, offset,
6803 6853 4096, DDI_DMA_SYNC_FORKERNEL);
6804 6854
6805 6855 for (;;) {
6806 6856 cq_entry.word[3] = BE_SWAP32(cqe->word[3]);
6807 6857 if (!(cq_entry.word[3] & CQE_VALID)) {
6808 6858 break;
6809 6859 }
6810 6860
6811 6861 cq_entry.word[2] = BE_SWAP32(cqe->word[2]);
6812 6862 cq_entry.word[1] = BE_SWAP32(cqe->word[1]);
6813 6863 cq_entry.word[0] = BE_SWAP32(cqe->word[0]);
6814 6864
6815 6865 #ifdef DEBUG_CQE
6816 6866 emlxs_data_dump(port, "CQE", (uint32_t *)cqe, 6, 0);
6817 6867 #endif /* DEBUG_CQE */
6818 6868 num_entries++;
6819 6869 cqe->word[3] = 0;
6820 6870
6821 6871 cq->host_index++;
6822 6872 if (cq->host_index >= cq->max_index) {
6823 6873 cq->host_index = 0;
6824 6874 cqe = (CQE_u *)cq->addr.virt;
6825 6875 } else {
6826 6876 cqe++;
6827 6877 }
6828 6878 mutex_exit(&EMLXS_PORT_LOCK);
6829 6879
6830 6880 /* Now handle specific cq type */
6831 6881 if (cq->type == EMLXS_CQ_TYPE_GROUP1) {
6832 6882 if (cq_entry.cqAsyncEntry.async_evt) {
6833 6883 emlxs_sli4_process_async_event(hba,
6834 6884 (CQE_ASYNC_t *)&cq_entry);
6835 6885 } else {
6836 6886 emlxs_sli4_process_mbox_event(hba,
6837 6887 (CQE_MBOX_t *)&cq_entry);
6838 6888 }
6839 6889 } else { /* EMLXS_CQ_TYPE_GROUP2 */
6840 6890 switch (cq_entry.cqCmplEntry.Code) {
6841 6891 case CQE_TYPE_WQ_COMPLETION:
6842 6892 if (cq_entry.cqCmplEntry.RequestTag <
6843 6893 hba->max_iotag) {
6844 6894 emlxs_sli4_process_wqe_cmpl(hba, cq,
6845 6895 (CQE_CmplWQ_t *)&cq_entry);
6846 6896 } else {
6847 6897 emlxs_sli4_process_oor_wqe_cmpl(hba, cq,
6848 6898 (CQE_CmplWQ_t *)&cq_entry);
6849 6899 }
6850 6900 break;
6851 6901 case CQE_TYPE_RELEASE_WQE:
6852 6902 emlxs_sli4_process_release_wqe(hba, cq,
6853 6903 (CQE_RelWQ_t *)&cq_entry);
6854 6904 break;
6855 6905 case CQE_TYPE_UNSOL_RCV:
6856 6906 case CQE_TYPE_UNSOL_RCV_V1:
6857 6907 emlxs_sli4_process_unsol_rcv(hba, cq,
6858 6908 (CQE_UnsolRcv_t *)&cq_entry);
6859 6909 break;
6860 6910 case CQE_TYPE_XRI_ABORTED:
6861 6911 emlxs_sli4_process_xri_aborted(hba, cq,
6862 6912 (CQE_XRI_Abort_t *)&cq_entry);
6863 6913 break;
6864 6914 default:
6865 6915 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6866 6916 "Invalid CQ entry %d: %08x %08x %08x %08x",
6867 6917 cq_entry.cqCmplEntry.Code, cq_entry.word[0],
6868 6918 cq_entry.word[1], cq_entry.word[2],
6869 6919 cq_entry.word[3]);
6870 6920 break;
6871 6921 }
6872 6922 }
6873 6923
6874 6924 mutex_enter(&EMLXS_PORT_LOCK);
6875 6925 }
6876 6926
6877 6927 /* Number of times this routine gets called for this CQ */
6878 6928 cq->isr_count++;
6879 6929
6880 6930 /* num_entries is the number of CQEs we process in this specific CQ */
6881 6931 cq->num_proc += num_entries;
6882 6932 if (cq->max_proc < num_entries)
6883 6933 cq->max_proc = num_entries;
6884 6934
6885 6935 cqdb = cq->qid;
6886 6936 cqdb |= CQ_DB_REARM;
6887 6937 if (num_entries != 0) {
6888 6938 cqdb |= ((num_entries << CQ_DB_POP_SHIFT) & CQ_DB_POP_MASK);
6889 6939 }
6890 6940
6891 6941 #ifdef DEBUG_FASTPATH
6892 6942 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6893 6943 "CQE: CLEAR cqdb=%08x: pops=%d", cqdb, num_entries);
6894 6944 #endif /* DEBUG_FASTPATH */
6895 6945
6896 6946 emlxs_sli4_write_cqdb(hba, cqdb);
6897 6947
6898 6948 /* EMLXS_PORT_LOCK must be held when exiting this routine */
6899 6949
6900 6950 } /* emlxs_sli4_process_cq() */
6901 6951
6902 6952
6903 6953 /*ARGSUSED*/
6904 6954 static void
6905 6955 emlxs_sli4_process_eq(emlxs_hba_t *hba, EQ_DESC_t *eq)
6906 6956 {
6907 6957 emlxs_port_t *port = &PPORT;
6908 6958 uint32_t eqdb;
6909 6959 uint32_t *ptr;
6910 6960 CHANNEL *cp;
6911 6961 EQE_u eqe;
6912 6962 uint32_t i;
6913 6963 uint16_t cqi;
6914 6964 int num_entries = 0;
6915 6965 off_t offset;
6916 6966
6917 6967 /* EMLXS_PORT_LOCK must be held when entering this routine */
6918 6968
6919 6969 hba->intr_busy_cnt ++;
6920 6970
6921 6971 ptr = eq->addr.virt;
6922 6972 ptr += eq->host_index;
6923 6973
6924 6974 offset = (off_t)((uint64_t)((unsigned long)
6925 6975 eq->addr.virt) -
6926 6976 (uint64_t)((unsigned long)
6927 6977 hba->sli.sli4.slim2.virt));
6928 6978
6929 6979 EMLXS_MPDATA_SYNC(eq->addr.dma_handle, offset,
6930 6980 4096, DDI_DMA_SYNC_FORKERNEL);
6931 6981
6932 6982 for (;;) {
6933 6983 eqe.word = *ptr;
6934 6984 eqe.word = BE_SWAP32(eqe.word);
6935 6985
6936 6986 if (!(eqe.word & EQE_VALID)) {
6937 6987 break;
6938 6988 }
6939 6989
6940 6990 #ifdef DEBUG_FASTPATH
6941 6991 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6942 6992 "EQE00: %08x", eqe.word);
6943 6993 #endif /* DEBUG_FASTPATH */
6944 6994
6945 6995 *ptr = 0;
6946 6996 num_entries++;
6947 6997 eq->host_index++;
6948 6998 if (eq->host_index >= eq->max_index) {
6949 6999 eq->host_index = 0;
6950 7000 ptr = eq->addr.virt;
6951 7001 } else {
6952 7002 ptr++;
6953 7003 }
6954 7004
6955 7005 cqi = emlxs_sli4_cqid_to_index(hba, eqe.entry.CQId);
6956 7006
6957 7007 /* Verify CQ index */
6958 7008 if (cqi == 0xffff) {
6959 7009 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
6960 7010 "EQE: Invalid CQid: %d. Dropping...",
6961 7011 eqe.entry.CQId);
6962 7012 continue;
6963 7013 }
6964 7014
6965 7015 #ifdef DEBUG_FASTPATH
6966 7016 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6967 7017 "EQE: CQIndex:%x cqid:%x", cqi, eqe.entry.CQId);
6968 7018 #endif /* DEBUG_FASTPATH */
6969 7019
6970 7020 emlxs_sli4_process_cq(hba, &hba->sli.sli4.cq[cqi]);
6971 7021 }
6972 7022
6973 7023 /* Number of times the ISR for this EQ gets called */
6974 7024 eq->isr_count++;
6975 7025
6976 7026 /* num_entries is the number of EQEs we process in this specific ISR */
6977 7027 eq->num_proc += num_entries;
6978 7028 if (eq->max_proc < num_entries) {
6979 7029 eq->max_proc = num_entries;
6980 7030 }
6981 7031
6982 7032 eqdb = eq->qid;
6983 7033 eqdb |= (EQ_DB_CLEAR | EQ_DB_EVENT | EQ_DB_REARM);
6984 7034
6985 7035 #ifdef DEBUG_FASTPATH
6986 7036 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
6987 7037 "EQE: CLEAR eqdb=%08x pops=%d", eqdb, num_entries);
6988 7038 #endif /* DEBUG_FASTPATH */
6989 7039
6990 7040 if (num_entries != 0) {
6991 7041 eqdb |= ((num_entries << EQ_DB_POP_SHIFT) & EQ_DB_POP_MASK);
6992 7042 for (i = 0; i < hba->chan_count; i++) {
6993 7043 cp = &hba->chan[i];
6994 7044 if (cp->chan_flag & EMLXS_NEEDS_TRIGGER) {
6995 7045 cp->chan_flag &= ~EMLXS_NEEDS_TRIGGER;
6996 7046 emlxs_thread_trigger2(&cp->intr_thread,
6997 7047 emlxs_proc_channel, cp);
6998 7048 }
6999 7049 }
7000 7050 }
7001 7051
7002 7052 emlxs_sli4_write_cqdb(hba, eqdb);
7003 7053
7004 7054 /* EMLXS_PORT_LOCK must be held when exiting this routine */
7005 7055
7006 7056 hba->intr_busy_cnt --;
7007 7057
7008 7058 } /* emlxs_sli4_process_eq() */
7009 7059
7010 7060
7011 7061 #ifdef MSI_SUPPORT
7012 7062 /*ARGSUSED*/
7013 7063 static uint32_t
7014 7064 emlxs_sli4_msi_intr(char *arg1, char *arg2)
7015 7065 {
7016 7066 emlxs_hba_t *hba = (emlxs_hba_t *)arg1;
7017 7067 #ifdef DEBUG_FASTPATH
7018 7068 emlxs_port_t *port = &PPORT;
7019 7069 #endif /* DEBUG_FASTPATH */
7020 7070 uint16_t msgid;
7021 7071 int rc;
7022 7072
7023 7073 #ifdef DEBUG_FASTPATH
7024 7074 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7025 7075 "msiINTR arg1:%p arg2:%p", arg1, arg2);
7026 7076 #endif /* DEBUG_FASTPATH */
7027 7077
7028 7078 /* Check for legacy interrupt handling */
7029 7079 if (hba->intr_type == DDI_INTR_TYPE_FIXED) {
7030 7080 rc = emlxs_sli4_intx_intr(arg1);
7031 7081 return (rc);
7032 7082 }
7033 7083
7034 7084 /* Get MSI message id */
7035 7085 msgid = (uint16_t)((unsigned long)arg2);
7036 7086
7037 7087 /* Validate the message id */
7038 7088 if (msgid >= hba->intr_count) {
7039 7089 msgid = 0;
7040 7090 }
7041 7091 mutex_enter(&EMLXS_PORT_LOCK);
7042 7092
7043 7093 if ((hba->state == FC_KILLED) || (hba->flag & FC_OFFLINE_MODE)) {
7044 7094 mutex_exit(&EMLXS_PORT_LOCK);
7045 7095 return (DDI_INTR_UNCLAIMED);
7046 7096 }
7047 7097
7048 7098 /* The eq[] index == the MSI vector number */
7049 7099 emlxs_sli4_process_eq(hba, &hba->sli.sli4.eq[msgid]);
7050 7100
7051 7101 mutex_exit(&EMLXS_PORT_LOCK);
7052 7102 return (DDI_INTR_CLAIMED);
7053 7103
7054 7104 } /* emlxs_sli4_msi_intr() */
7055 7105 #endif /* MSI_SUPPORT */
7056 7106
7057 7107
7058 7108 /*ARGSUSED*/
7059 7109 static int
7060 7110 emlxs_sli4_intx_intr(char *arg)
7061 7111 {
7062 7112 emlxs_hba_t *hba = (emlxs_hba_t *)arg;
7063 7113 #ifdef DEBUG_FASTPATH
7064 7114 emlxs_port_t *port = &PPORT;
7065 7115 #endif /* DEBUG_FASTPATH */
7066 7116
7067 7117 #ifdef DEBUG_FASTPATH
7068 7118 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7069 7119 "intxINTR arg:%p", arg);
7070 7120 #endif /* DEBUG_FASTPATH */
7071 7121
7072 7122 mutex_enter(&EMLXS_PORT_LOCK);
7073 7123
7074 7124 if ((hba->state == FC_KILLED) || (hba->flag & FC_OFFLINE_MODE)) {
7075 7125 mutex_exit(&EMLXS_PORT_LOCK);
7076 7126 return (DDI_INTR_UNCLAIMED);
7077 7127 }
7078 7128
7079 7129 emlxs_sli4_process_eq(hba, &hba->sli.sli4.eq[0]);
7080 7130
7081 7131 mutex_exit(&EMLXS_PORT_LOCK);
7082 7132 return (DDI_INTR_CLAIMED);
7083 7133 } /* emlxs_sli4_intx_intr() */
7084 7134
7085 7135
7086 7136 static void
7087 7137 emlxs_sli4_hba_kill(emlxs_hba_t *hba)
7088 7138 {
7089 7139 emlxs_port_t *port = &PPORT;
7090 7140 uint32_t j;
7091 7141
7092 7142 mutex_enter(&EMLXS_PORT_LOCK);
7093 7143 if (hba->flag & FC_INTERLOCKED) {
7094 7144 EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
7095 7145
7096 7146 mutex_exit(&EMLXS_PORT_LOCK);
7097 7147
7098 7148 return;
7099 7149 }
7100 7150
7101 7151 j = 0;
7102 7152 while (j++ < 10000) {
7103 7153 if ((hba->mbox_queue_flag == 0) &&
7104 7154 (hba->intr_busy_cnt == 0)) {
7105 7155 break;
7106 7156 }
7107 7157
7108 7158 mutex_exit(&EMLXS_PORT_LOCK);
7109 7159 BUSYWAIT_US(100);
7110 7160 mutex_enter(&EMLXS_PORT_LOCK);
7111 7161 }
7112 7162
7113 7163 if ((hba->mbox_queue_flag != 0) || (hba->intr_busy_cnt > 0)) {
7114 7164 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
7115 7165 "Board kill failed. Adapter busy, %d, %d.",
7116 7166 hba->mbox_queue_flag, hba->intr_busy_cnt);
7117 7167 mutex_exit(&EMLXS_PORT_LOCK);
7118 7168 return;
7119 7169 }
7120 7170
7121 7171 hba->flag |= FC_INTERLOCKED;
7122 7172
7123 7173 EMLXS_STATE_CHANGE_LOCKED(hba, FC_KILLED);
7124 7174
7125 7175 mutex_exit(&EMLXS_PORT_LOCK);
7126 7176
7127 7177 } /* emlxs_sli4_hba_kill() */
7128 7178
7129 7179
7130 7180 extern void
7131 7181 emlxs_sli4_hba_reset_all(emlxs_hba_t *hba, uint32_t flag)
7132 7182 {
7133 7183 emlxs_port_t *port = &PPORT;
7134 7184 uint32_t value;
7135 7185
7136 7186 mutex_enter(&EMLXS_PORT_LOCK);
7137 7187
7138 7188 if ((hba->sli_intf & SLI_INTF_IF_TYPE_MASK) != SLI_INTF_IF_TYPE_2) {
7139 7189 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
7140 7190 "Reset All failed. Invalid Operation.");
7141 7191 mutex_exit(&EMLXS_PORT_LOCK);
7142 7192 return;
7143 7193 }
7144 7194
7145 7195 /* Issue a Firmware Reset All Request */
7146 7196 if (flag) {
7147 7197 value = SLI_PHYDEV_FRST | SLI_PHYDEV_FRL_ALL | SLI_PHYDEV_DD;
7148 7198 } else {
7149 7199 value = SLI_PHYDEV_FRST | SLI_PHYDEV_FRL_ALL;
7150 7200 }
7151 7201
7152 7202 ddi_put32(hba->sli.sli4.bar0_acc_handle,
7153 7203 hba->sli.sli4.PHYSDEV_reg_addr, value);
7154 7204
7155 7205 mutex_exit(&EMLXS_PORT_LOCK);
7156 7206
7157 7207 } /* emlxs_sli4_hba_reset_all() */
7158 7208
7159 7209
7160 7210 static void
7161 7211 emlxs_sli4_enable_intr(emlxs_hba_t *hba)
7162 7212 {
7163 7213 emlxs_config_t *cfg = &CFG;
7164 7214 int i;
7165 7215 int num_cq;
7166 7216 uint32_t data;
7167 7217
7168 7218 hba->sli.sli4.flag |= EMLXS_SLI4_INTR_ENABLED;
7169 7219
7170 7220 num_cq = (hba->intr_count * cfg[CFG_NUM_WQ].current) +
7171 7221 EMLXS_CQ_OFFSET_WQ;
7172 7222
7173 7223 /* ARM EQ / CQs */
7174 7224 for (i = 0; i < num_cq; i++) {
7175 7225 data = hba->sli.sli4.cq[i].qid;
7176 7226 data |= CQ_DB_REARM;
7177 7227 emlxs_sli4_write_cqdb(hba, data);
7178 7228 }
7179 7229 for (i = 0; i < hba->intr_count; i++) {
7180 7230 data = hba->sli.sli4.eq[i].qid;
7181 7231 data |= (EQ_DB_REARM | EQ_DB_EVENT);
7182 7232 emlxs_sli4_write_cqdb(hba, data);
7183 7233 }
7184 7234 } /* emlxs_sli4_enable_intr() */
7185 7235
7186 7236
7187 7237 static void
7188 7238 emlxs_sli4_disable_intr(emlxs_hba_t *hba, uint32_t att)
|
↓ open down ↓ |
562 lines elided |
↑ open up ↑ |
7189 7239 {
7190 7240 if (att) {
7191 7241 return;
7192 7242 }
7193 7243
7194 7244 hba->sli.sli4.flag &= ~EMLXS_SLI4_INTR_ENABLED;
7195 7245
7196 7246 /* Short of reset, we cannot disable interrupts */
7197 7247 } /* emlxs_sli4_disable_intr() */
7198 7248
7199 -
7200 7249 static void
7201 7250 emlxs_sli4_resource_free(emlxs_hba_t *hba)
7202 7251 {
7203 7252 emlxs_port_t *port = &PPORT;
7204 7253 MBUF_INFO *buf_info;
7205 7254 uint32_t i;
7206 7255
7207 7256 buf_info = &hba->sli.sli4.slim2;
7208 7257 if (buf_info->virt == 0) {
7209 7258 /* Already free */
7210 7259 return;
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
7211 7260 }
7212 7261
7213 7262 emlxs_fcf_fini(hba);
7214 7263
7215 7264 buf_info = &hba->sli.sli4.HeaderTmplate;
7216 7265 if (buf_info->virt) {
7217 7266 bzero(buf_info, sizeof (MBUF_INFO));
7218 7267 }
7219 7268
7220 7269 if (hba->sli.sli4.XRIp) {
7270 + XRIobj_t *xrip;
7271 +
7221 7272 if ((hba->sli.sli4.XRIinuse_f !=
7222 7273 (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) ||
7223 7274 (hba->sli.sli4.XRIinuse_b !=
7224 7275 (XRIobj_t *)&hba->sli.sli4.XRIinuse_f)) {
7225 7276 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
7226 7277 "XRIs in use during free!: %p %p != %p\n",
7227 7278 hba->sli.sli4.XRIinuse_f,
7228 7279 hba->sli.sli4.XRIinuse_b,
7229 7280 &hba->sli.sli4.XRIinuse_f);
7230 7281 }
7282 +
7283 + xrip = hba->sli.sli4.XRIp;
7284 + for (i = 0; i < hba->sli.sli4.XRICount; i++) {
7285 + xrip->XRI = emlxs_sli4_index_to_xri(hba, i);
7286 +
7287 + if (xrip->XRI != 0)
7288 + emlxs_mem_put(hba, xrip->SGSeg, xrip->SGList);
7289 +
7290 + xrip++;
7291 + }
7292 +
7231 7293 kmem_free(hba->sli.sli4.XRIp,
7232 7294 (sizeof (XRIobj_t) * hba->sli.sli4.XRICount));
7233 7295 hba->sli.sli4.XRIp = NULL;
7234 7296
7235 7297 hba->sli.sli4.XRIfree_f =
7236 7298 (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7237 7299 hba->sli.sli4.XRIfree_b =
7238 7300 (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7239 7301 hba->sli.sli4.xrif_count = 0;
7240 7302 }
7241 7303
7242 7304 for (i = 0; i < hba->intr_count; i++) {
7243 7305 mutex_destroy(&hba->sli.sli4.eq[i].lastwq_lock);
7244 7306 bzero(&hba->sli.sli4.eq[i], sizeof (EQ_DESC_t));
7245 7307 hba->sli.sli4.eq[i].qid = 0xffff;
7246 7308 }
7247 7309 for (i = 0; i < EMLXS_MAX_CQS; i++) {
7248 7310 bzero(&hba->sli.sli4.cq[i], sizeof (CQ_DESC_t));
7249 7311 hba->sli.sli4.cq[i].qid = 0xffff;
7250 7312 }
7251 7313 for (i = 0; i < EMLXS_MAX_WQS; i++) {
7252 7314 bzero(&hba->sli.sli4.wq[i], sizeof (WQ_DESC_t));
7253 7315 hba->sli.sli4.wq[i].qid = 0xffff;
7254 7316 }
7255 7317 for (i = 0; i < EMLXS_MAX_RXQS; i++) {
7256 7318 mutex_destroy(&hba->sli.sli4.rxq[i].lock);
7257 7319 bzero(&hba->sli.sli4.rxq[i], sizeof (RXQ_DESC_t));
7258 7320 }
7259 7321 for (i = 0; i < EMLXS_MAX_RQS; i++) {
7260 7322 mutex_destroy(&hba->sli.sli4.rq[i].lock);
7261 7323 bzero(&hba->sli.sli4.rq[i], sizeof (RQ_DESC_t));
7262 7324 hba->sli.sli4.rq[i].qid = 0xffff;
7263 7325 }
7264 7326
|
↓ open down ↓ |
24 lines elided |
↑ open up ↑ |
7265 7327 /* Free the MQ */
7266 7328 bzero(&hba->sli.sli4.mq, sizeof (MQ_DESC_t));
7267 7329
7268 7330 buf_info = &hba->sli.sli4.slim2;
7269 7331 if (buf_info->virt) {
7270 7332 buf_info->flags = FC_MBUF_DMA;
7271 7333 emlxs_mem_free(hba, buf_info);
7272 7334 bzero(buf_info, sizeof (MBUF_INFO));
7273 7335 }
7274 7336
7337 + /* GPIO lock */
7338 + if (hba->model_info.flags & EMLXS_GPIO_LEDS)
7339 + mutex_destroy(&hba->gpio_lock);
7340 +
7275 7341 } /* emlxs_sli4_resource_free() */
7276 7342
7277 -
7278 7343 static int
7279 7344 emlxs_sli4_resource_alloc(emlxs_hba_t *hba)
7280 7345 {
7281 7346 emlxs_port_t *port = &PPORT;
7282 7347 emlxs_config_t *cfg = &CFG;
7283 7348 MBUF_INFO *buf_info;
7284 7349 int num_eq;
7285 7350 int num_wq;
7286 7351 uint16_t i;
7287 7352 uint32_t j;
7288 7353 uint32_t k;
7289 7354 uint16_t cq_depth;
7290 7355 uint32_t cq_size;
7291 7356 uint32_t word;
7292 7357 XRIobj_t *xrip;
7293 7358 RQE_t *rqe;
7294 7359 MBUF_INFO *rqb;
7295 7360 uint64_t phys;
7296 7361 uint64_t tmp_phys;
7297 7362 char *virt;
7298 7363 char *tmp_virt;
7299 7364 void *data_handle;
7300 7365 void *dma_handle;
7301 7366 int32_t size;
7302 7367 off_t offset;
7303 7368 uint32_t count = 0;
7304 7369 uint32_t hddr_size = 0;
7305 7370 uint32_t align;
7306 7371 uint32_t iotag;
7307 7372
7308 7373 buf_info = &hba->sli.sli4.slim2;
7309 7374 if (buf_info->virt) {
7310 7375 /* Already allocated */
7311 7376 return (0);
7312 7377 }
7313 7378
7314 7379 emlxs_fcf_init(hba);
7315 7380
7316 7381 switch (hba->sli.sli4.param.CQV) {
7317 7382 case 0:
7318 7383 cq_depth = CQ_DEPTH;
7319 7384 break;
7320 7385 case 2:
7321 7386 default:
7322 7387 cq_depth = CQ_DEPTH_V2;
7323 7388 break;
7324 7389 }
7325 7390 cq_size = (cq_depth * CQE_SIZE);
7326 7391
7327 7392 /* EQs - 1 per Interrupt vector */
7328 7393 num_eq = hba->intr_count;
7329 7394
7330 7395 /* CQs - number of WQs + 1 for RQs + 1 for mbox/async events */
7331 7396 num_wq = cfg[CFG_NUM_WQ].current * num_eq;
7332 7397
7333 7398 /* Calculate total dmable memory we need */
7334 7399 /* WARNING: make sure each section is aligned on 4K boundary */
7335 7400
7336 7401 /* EQ */
7337 7402 count += num_eq * 4096;
7338 7403
7339 7404 /* CQ */
7340 7405 count += (num_wq + EMLXS_CQ_OFFSET_WQ) * cq_size;
7341 7406
7342 7407 /* WQ */
7343 7408 count += num_wq * (4096 * EMLXS_NUM_WQ_PAGES);
7344 7409
|
↓ open down ↓ |
57 lines elided |
↑ open up ↑ |
7345 7410 /* MQ */
7346 7411 count += EMLXS_MAX_MQS * 4096;
7347 7412
7348 7413 /* RQ */
7349 7414 count += EMLXS_MAX_RQS * 4096;
7350 7415
7351 7416 /* RQB/E */
7352 7417 count += RQB_COUNT * (RQB_DATA_SIZE + RQB_HEADER_SIZE);
7353 7418 count += (4096 - (count%4096)); /* Ensure 4K alignment */
7354 7419
7355 - /* SGL */
7356 - count += hba->sli.sli4.XRIExtSize * hba->sli.sli4.mem_sgl_size;
7357 - count += (4096 - (count%4096)); /* Ensure 4K alignment */
7358 -
7359 7420 /* RPI Header Templates */
7360 7421 if (hba->sli.sli4.param.HDRR) {
7361 7422 /* Bytes per extent */
7362 7423 j = hba->sli.sli4.RPIExtSize * sizeof (RPIHdrTmplate_t);
7363 7424
7364 7425 /* Pages required per extent (page == 4096 bytes) */
7365 7426 k = (j/4096) + ((j%4096)? 1:0);
7366 7427
7367 7428 /* Total size */
7368 7429 hddr_size = (k * hba->sli.sli4.RPIExtCount * 4096);
7369 7430
7370 7431 count += hddr_size;
7371 7432 }
7372 7433
7373 7434 /* Allocate slim2 for SLI4 */
7374 7435 buf_info = &hba->sli.sli4.slim2;
7375 7436 buf_info->size = count;
7376 7437 buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
7377 7438 buf_info->align = ddi_ptob(hba->dip, 1L);
7378 7439
7440 + EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
7441 + "Allocating memory for slim2: %d", count);
7442 +
7379 7443 (void) emlxs_mem_alloc(hba, buf_info);
7380 7444
7381 7445 if (buf_info->virt == NULL) {
7382 7446 EMLXS_MSGF(EMLXS_CONTEXT,
7383 7447 &emlxs_init_failed_msg,
7384 7448 "Unable to allocate internal memory for SLI4: %d",
7385 7449 count);
7386 7450 goto failed;
7387 7451 }
7388 7452 bzero(buf_info->virt, buf_info->size);
7389 7453 EMLXS_MPDATA_SYNC(buf_info->dma_handle, 0,
7390 7454 buf_info->size, DDI_DMA_SYNC_FORDEV);
7391 7455
7392 - /* Assign memory to SGL, Head Template, EQ, CQ, WQ, RQ and MQ */
7456 + /* Assign memory to Head Template, EQ, CQ, WQ, RQ and MQ */
7393 7457 data_handle = buf_info->data_handle;
7394 7458 dma_handle = buf_info->dma_handle;
7395 7459 phys = buf_info->phys;
7396 7460 virt = (char *)buf_info->virt;
7397 7461
7398 7462 /* Allocate space for queues */
7399 7463
7400 7464 /* EQ */
7401 7465 size = 4096;
7402 7466 for (i = 0; i < num_eq; i++) {
7403 7467 bzero(&hba->sli.sli4.eq[i], sizeof (EQ_DESC_t));
7404 7468
7405 7469 buf_info = &hba->sli.sli4.eq[i].addr;
7406 7470 buf_info->size = size;
7407 7471 buf_info->flags =
7408 7472 FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
7409 7473 buf_info->align = ddi_ptob(hba->dip, 1L);
7410 7474 buf_info->phys = phys;
7411 7475 buf_info->virt = (void *)virt;
7412 7476 buf_info->data_handle = data_handle;
7413 7477 buf_info->dma_handle = dma_handle;
7414 7478
7415 7479 phys += size;
7416 7480 virt += size;
7417 7481
7418 7482 hba->sli.sli4.eq[i].max_index = EQ_DEPTH;
7419 7483 hba->sli.sli4.eq[i].qid = 0xffff;
7420 7484
7421 7485 mutex_init(&hba->sli.sli4.eq[i].lastwq_lock, NULL,
7422 7486 MUTEX_DRIVER, NULL);
7423 7487 }
7424 7488
7425 7489
7426 7490 /* CQ */
7427 7491 for (i = 0; i < (num_wq + EMLXS_CQ_OFFSET_WQ); i++) {
7428 7492 bzero(&hba->sli.sli4.cq[i], sizeof (CQ_DESC_t));
7429 7493
7430 7494 buf_info = &hba->sli.sli4.cq[i].addr;
7431 7495 buf_info->size = cq_size;
7432 7496 buf_info->flags =
7433 7497 FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
7434 7498 buf_info->align = ddi_ptob(hba->dip, 1L);
7435 7499 buf_info->phys = phys;
7436 7500 buf_info->virt = (void *)virt;
7437 7501 buf_info->data_handle = data_handle;
7438 7502 buf_info->dma_handle = dma_handle;
7439 7503
7440 7504 phys += cq_size;
7441 7505 virt += cq_size;
7442 7506
7443 7507 hba->sli.sli4.cq[i].max_index = cq_depth;
7444 7508 hba->sli.sli4.cq[i].qid = 0xffff;
7445 7509 }
7446 7510
7447 7511
7448 7512 /* WQ */
7449 7513 size = 4096 * EMLXS_NUM_WQ_PAGES;
7450 7514 for (i = 0; i < num_wq; i++) {
7451 7515 bzero(&hba->sli.sli4.wq[i], sizeof (WQ_DESC_t));
7452 7516
7453 7517 buf_info = &hba->sli.sli4.wq[i].addr;
7454 7518 buf_info->size = size;
7455 7519 buf_info->flags =
7456 7520 FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
7457 7521 buf_info->align = ddi_ptob(hba->dip, 1L);
7458 7522 buf_info->phys = phys;
7459 7523 buf_info->virt = (void *)virt;
7460 7524 buf_info->data_handle = data_handle;
7461 7525 buf_info->dma_handle = dma_handle;
7462 7526
7463 7527 phys += size;
7464 7528 virt += size;
7465 7529
7466 7530 hba->sli.sli4.wq[i].max_index = WQ_DEPTH;
7467 7531 hba->sli.sli4.wq[i].release_depth = WQE_RELEASE_DEPTH;
7468 7532 hba->sli.sli4.wq[i].qid = 0xFFFF;
7469 7533 }
7470 7534
7471 7535
7472 7536 /* MQ */
7473 7537 size = 4096;
7474 7538 bzero(&hba->sli.sli4.mq, sizeof (MQ_DESC_t));
7475 7539
7476 7540 buf_info = &hba->sli.sli4.mq.addr;
7477 7541 buf_info->size = size;
7478 7542 buf_info->flags =
7479 7543 FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
7480 7544 buf_info->align = ddi_ptob(hba->dip, 1L);
7481 7545 buf_info->phys = phys;
7482 7546 buf_info->virt = (void *)virt;
7483 7547 buf_info->data_handle = data_handle;
7484 7548 buf_info->dma_handle = dma_handle;
7485 7549
7486 7550 phys += size;
7487 7551 virt += size;
7488 7552
7489 7553 hba->sli.sli4.mq.max_index = MQ_DEPTH;
7490 7554
7491 7555
7492 7556 /* RXQ */
7493 7557 for (i = 0; i < EMLXS_MAX_RXQS; i++) {
7494 7558 bzero(&hba->sli.sli4.rxq[i], sizeof (RXQ_DESC_t));
7495 7559
7496 7560 mutex_init(&hba->sli.sli4.rxq[i].lock, NULL, MUTEX_DRIVER,
7497 7561 NULL);
7498 7562 }
7499 7563
7500 7564
7501 7565 /* RQ */
7502 7566 size = 4096;
7503 7567 for (i = 0; i < EMLXS_MAX_RQS; i++) {
7504 7568 bzero(&hba->sli.sli4.rq[i], sizeof (RQ_DESC_t));
7505 7569
7506 7570 buf_info = &hba->sli.sli4.rq[i].addr;
7507 7571 buf_info->size = size;
7508 7572 buf_info->flags =
7509 7573 FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
7510 7574 buf_info->align = ddi_ptob(hba->dip, 1L);
7511 7575 buf_info->phys = phys;
7512 7576 buf_info->virt = (void *)virt;
7513 7577 buf_info->data_handle = data_handle;
7514 7578 buf_info->dma_handle = dma_handle;
7515 7579
7516 7580 phys += size;
7517 7581 virt += size;
7518 7582
7519 7583 hba->sli.sli4.rq[i].max_index = RQ_DEPTH;
7520 7584 hba->sli.sli4.rq[i].qid = 0xFFFF;
7521 7585
7522 7586 mutex_init(&hba->sli.sli4.rq[i].lock, NULL, MUTEX_DRIVER, NULL);
7523 7587 }
7524 7588
7525 7589
7526 7590 /* RQB/E */
7527 7591 for (i = 0; i < EMLXS_MAX_RQS; i++) {
7528 7592 size = (i & 0x1) ? RQB_DATA_SIZE : RQB_HEADER_SIZE;
7529 7593 tmp_phys = phys;
7530 7594 tmp_virt = virt;
7531 7595
7532 7596 /* Initialize the RQEs */
7533 7597 rqe = (RQE_t *)hba->sli.sli4.rq[i].addr.virt;
7534 7598 for (j = 0; j < (RQ_DEPTH/RQB_COUNT); j++) {
7535 7599 phys = tmp_phys;
7536 7600 virt = tmp_virt;
7537 7601 for (k = 0; k < RQB_COUNT; k++) {
7538 7602 word = PADDR_HI(phys);
7539 7603 rqe->AddrHi = BE_SWAP32(word);
7540 7604
7541 7605 word = PADDR_LO(phys);
7542 7606 rqe->AddrLo = BE_SWAP32(word);
7543 7607
7544 7608 rqb = &hba->sli.sli4.rq[i].
7545 7609 rqb[k + (j * RQB_COUNT)];
7546 7610 rqb->size = size;
7547 7611 rqb->flags = FC_MBUF_DMA |
7548 7612 FC_MBUF_SNGLSG | FC_MBUF_DMA32;
7549 7613 rqb->align = ddi_ptob(hba->dip, 1L);
7550 7614 rqb->phys = phys;
7551 7615 rqb->virt = (void *)virt;
7552 7616 rqb->data_handle = data_handle;
7553 7617 rqb->dma_handle = dma_handle;
7554 7618
7555 7619 phys += size;
7556 7620 virt += size;
7557 7621 #ifdef DEBUG_RQE
7558 7622 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7559 7623 "RQ_ALLOC: rq[%d] rqb[%d,%d]=%p iotag=%d",
7560 7624 i, j, k, mp, mp->tag);
7561 7625 #endif /* DEBUG_RQE */
7562 7626
7563 7627 rqe++;
7564 7628 }
7565 7629 }
7566 7630
7567 7631 offset = (off_t)((uint64_t)((unsigned long)
7568 7632 hba->sli.sli4.rq[i].addr.virt) -
7569 7633 (uint64_t)((unsigned long)
7570 7634 hba->sli.sli4.slim2.virt));
7571 7635
|
↓ open down ↓ |
169 lines elided |
↑ open up ↑ |
7572 7636 /* Sync the RQ buffer list */
7573 7637 EMLXS_MPDATA_SYNC(hba->sli.sli4.rq[i].addr.dma_handle, offset,
7574 7638 hba->sli.sli4.rq[i].addr.size, DDI_DMA_SYNC_FORDEV);
7575 7639 }
7576 7640
7577 7641 /* 4K Alignment */
7578 7642 align = (4096 - (phys%4096));
7579 7643 phys += align;
7580 7644 virt += align;
7581 7645
7646 + /* RPI Header Templates */
7647 + if (hba->sli.sli4.param.HDRR) {
7648 + buf_info = &hba->sli.sli4.HeaderTmplate;
7649 + bzero(buf_info, sizeof (MBUF_INFO));
7650 + buf_info->size = hddr_size;
7651 + buf_info->flags = FC_MBUF_DMA | FC_MBUF_DMA32;
7652 + buf_info->align = ddi_ptob(hba->dip, 1L);
7653 + buf_info->phys = phys;
7654 + buf_info->virt = (void *)virt;
7655 + buf_info->data_handle = data_handle;
7656 + buf_info->dma_handle = dma_handle;
7657 + }
7658 +
7582 7659 /* SGL */
7660 +
7661 + EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
7662 + "Allocating memory for %d SGLs: %d/%d",
7663 + hba->sli.sli4.XRICount, sizeof (XRIobj_t), size);
7664 +
7583 7665 /* Initialize double linked lists */
7584 7666 hba->sli.sli4.XRIinuse_f =
7585 7667 (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
7586 7668 hba->sli.sli4.XRIinuse_b =
7587 7669 (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
7588 7670 hba->sli.sli4.xria_count = 0;
7589 7671
7590 7672 hba->sli.sli4.XRIfree_f =
7591 7673 (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7592 7674 hba->sli.sli4.XRIfree_b =
7593 7675 (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7594 - hba->sli.sli4.xria_count = 0;
7676 + hba->sli.sli4.xrif_count = 0;
7595 7677
7678 + uint32_t mseg;
7679 +
7680 + switch (hba->sli.sli4.mem_sgl_size) {
7681 + case 1024:
7682 + mseg = MEM_SGL1K;
7683 + break;
7684 + case 2048:
7685 + mseg = MEM_SGL2K;
7686 + break;
7687 + case 4096:
7688 + mseg = MEM_SGL4K;
7689 + break;
7690 + default:
7691 + EMLXS_MSGF(EMLXS_CONTEXT,
7692 + &emlxs_init_failed_msg,
7693 + "Unsupported SGL Size: %d", hba->sli.sli4.mem_sgl_size);
7694 + goto failed;
7695 + }
7696 +
7596 7697 hba->sli.sli4.XRIp = (XRIobj_t *)kmem_zalloc(
7597 7698 (sizeof (XRIobj_t) * hba->sli.sli4.XRICount), KM_SLEEP);
7598 7699
7599 7700 xrip = hba->sli.sli4.XRIp;
7600 - size = hba->sli.sli4.mem_sgl_size;
7601 7701 iotag = 1;
7702 +
7602 7703 for (i = 0; i < hba->sli.sli4.XRICount; i++) {
7603 7704 xrip->XRI = emlxs_sli4_index_to_xri(hba, i);
7604 7705
7605 7706 /* We don't use XRI==0, since it also represents an */
7606 7707 /* uninitialized exchange */
7607 7708 if (xrip->XRI == 0) {
7608 7709 xrip++;
7609 7710 continue;
7610 7711 }
7611 7712
7612 7713 xrip->iotag = iotag++;
7613 7714 xrip->sge_count =
|
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
7614 7715 (hba->sli.sli4.mem_sgl_size / sizeof (ULP_SGE64));
7615 7716
7616 7717 /* Add xrip to end of free list */
7617 7718 xrip->_b = hba->sli.sli4.XRIfree_b;
7618 7719 hba->sli.sli4.XRIfree_b->_f = xrip;
7619 7720 xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7620 7721 hba->sli.sli4.XRIfree_b = xrip;
7621 7722 hba->sli.sli4.xrif_count++;
7622 7723
7623 7724 /* Allocate SGL for this xrip */
7624 - buf_info = &xrip->SGList;
7625 - buf_info->size = size;
7626 - buf_info->flags =
7627 - FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
7628 - buf_info->align = size;
7629 - buf_info->phys = phys;
7630 - buf_info->virt = (void *)virt;
7631 - buf_info->data_handle = data_handle;
7632 - buf_info->dma_handle = dma_handle;
7725 + xrip->SGSeg = mseg;
7726 + xrip->SGList = emlxs_mem_get(hba, xrip->SGSeg);
7633 7727
7634 - phys += size;
7635 - virt += size;
7728 + if (xrip->SGList == NULL) {
7729 + EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
7730 + "Unable to allocate memory for SGL %d", i);
7731 + goto failed;
7732 + }
7636 7733
7734 + EMLXS_MPDATA_SYNC(xrip->SGList->dma_handle, 0,
7735 + xrip->SGList->size, DDI_DMA_SYNC_FORDEV);
7736 +
7637 7737 xrip++;
7638 7738 }
7639 7739
7640 - /* 4K Alignment */
7641 - align = (4096 - (phys%4096));
7642 - phys += align;
7643 - virt += align;
7740 + /* GPIO lock */
7741 + if (hba->model_info.flags & EMLXS_GPIO_LEDS)
7742 + mutex_init(&hba->gpio_lock, NULL, MUTEX_DRIVER, NULL);
7644 7743
7645 - /* RPI Header Templates */
7646 - if (hba->sli.sli4.param.HDRR) {
7647 - buf_info = &hba->sli.sli4.HeaderTmplate;
7648 - bzero(buf_info, sizeof (MBUF_INFO));
7649 - buf_info->size = hddr_size;
7650 - buf_info->flags = FC_MBUF_DMA | FC_MBUF_DMA32;
7651 - buf_info->align = ddi_ptob(hba->dip, 1L);
7652 - buf_info->phys = phys;
7653 - buf_info->virt = (void *)virt;
7654 - buf_info->data_handle = data_handle;
7655 - buf_info->dma_handle = dma_handle;
7656 - }
7657 -
7658 7744 #ifdef FMA_SUPPORT
7659 7745 if (hba->sli.sli4.slim2.dma_handle) {
7660 7746 if (emlxs_fm_check_dma_handle(hba,
7661 7747 hba->sli.sli4.slim2.dma_handle)
7662 7748 != DDI_FM_OK) {
7663 7749 EMLXS_MSGF(EMLXS_CONTEXT,
7664 7750 &emlxs_invalid_dma_handle_msg,
7665 7751 "sli4_resource_alloc: hdl=%p",
7666 7752 hba->sli.sli4.slim2.dma_handle);
7667 7753 goto failed;
7668 7754 }
7669 7755 }
7670 7756 #endif /* FMA_SUPPORT */
7671 7757
7672 7758 return (0);
7673 7759
7674 7760 failed:
7675 7761
7676 7762 (void) emlxs_sli4_resource_free(hba);
7677 7763 return (ENOMEM);
7678 7764
7679 7765 } /* emlxs_sli4_resource_alloc */
7680 7766
7681 7767
7682 7768 extern void
7683 7769 emlxs_sli4_zero_queue_stat(emlxs_hba_t *hba)
7684 7770 {
7685 7771 uint32_t i;
7686 7772 uint32_t num_wq;
7687 7773 emlxs_config_t *cfg = &CFG;
7688 7774 clock_t time;
7689 7775
7690 7776 /* EQ */
7691 7777 for (i = 0; i < hba->intr_count; i++) {
7692 7778 hba->sli.sli4.eq[i].num_proc = 0;
7693 7779 hba->sli.sli4.eq[i].max_proc = 0;
7694 7780 hba->sli.sli4.eq[i].isr_count = 0;
7695 7781 }
7696 7782 num_wq = cfg[CFG_NUM_WQ].current * hba->intr_count;
7697 7783 /* CQ */
7698 7784 for (i = 0; i < (num_wq + EMLXS_CQ_OFFSET_WQ); i++) {
7699 7785 hba->sli.sli4.cq[i].num_proc = 0;
7700 7786 hba->sli.sli4.cq[i].max_proc = 0;
7701 7787 hba->sli.sli4.cq[i].isr_count = 0;
7702 7788 }
7703 7789 /* WQ */
7704 7790 for (i = 0; i < num_wq; i++) {
7705 7791 hba->sli.sli4.wq[i].num_proc = 0;
7706 7792 hba->sli.sli4.wq[i].num_busy = 0;
7707 7793 }
7708 7794 /* RQ */
7709 7795 for (i = 0; i < EMLXS_MAX_RQS; i++) {
7710 7796 hba->sli.sli4.rq[i].num_proc = 0;
7711 7797 }
7712 7798 (void) drv_getparm(LBOLT, &time);
7713 7799 hba->sli.sli4.que_stat_timer = (uint32_t)time;
7714 7800
7715 7801 } /* emlxs_sli4_zero_queue_stat */
7716 7802
7717 7803
7718 7804 extern XRIobj_t *
7719 7805 emlxs_sli4_reserve_xri(emlxs_port_t *port, RPIobj_t *rpip, uint32_t type,
7720 7806 uint16_t rx_id)
7721 7807 {
7722 7808 emlxs_hba_t *hba = HBA;
7723 7809 XRIobj_t *xrip;
7724 7810 uint16_t iotag;
7725 7811
7726 7812 mutex_enter(&EMLXS_FCTAB_LOCK);
7727 7813
7728 7814 xrip = hba->sli.sli4.XRIfree_f;
7729 7815
7730 7816 if (xrip == (XRIobj_t *)&hba->sli.sli4.XRIfree_f) {
7731 7817 mutex_exit(&EMLXS_FCTAB_LOCK);
7732 7818
7733 7819 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
7734 7820 "Unable to reserve XRI. type=%d",
7735 7821 type);
7736 7822
7737 7823 return (NULL);
7738 7824 }
7739 7825
7740 7826 iotag = xrip->iotag;
7741 7827
7742 7828 if ((!iotag) ||
7743 7829 ((hba->fc_table[iotag] != NULL) &&
7744 7830 (hba->fc_table[iotag] != STALE_PACKET))) {
7745 7831 /*
7746 7832 * No more command slots available, retry later
7747 7833 */
7748 7834 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7749 7835 "Adapter Busy. Unable to reserve iotag. type=%d",
7750 7836 type);
7751 7837
7752 7838 mutex_exit(&EMLXS_FCTAB_LOCK);
7753 7839 return (NULL);
7754 7840 }
7755 7841
7756 7842 xrip->state = XRI_STATE_ALLOCATED;
7757 7843 xrip->type = type;
7758 7844 xrip->flag = EMLXS_XRI_RESERVED;
7759 7845 xrip->sbp = NULL;
7760 7846
7761 7847 xrip->rpip = rpip;
7762 7848 xrip->rx_id = rx_id;
7763 7849 rpip->xri_count++;
7764 7850
7765 7851 /* Take it off free list */
7766 7852 (xrip->_b)->_f = xrip->_f;
7767 7853 (xrip->_f)->_b = xrip->_b;
7768 7854 xrip->_f = NULL;
7769 7855 xrip->_b = NULL;
7770 7856 hba->sli.sli4.xrif_count--;
7771 7857
7772 7858 /* Add it to end of inuse list */
7773 7859 xrip->_b = hba->sli.sli4.XRIinuse_b;
7774 7860 hba->sli.sli4.XRIinuse_b->_f = xrip;
7775 7861 xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
7776 7862 hba->sli.sli4.XRIinuse_b = xrip;
7777 7863 hba->sli.sli4.xria_count++;
7778 7864
7779 7865 mutex_exit(&EMLXS_FCTAB_LOCK);
7780 7866 return (xrip);
7781 7867
7782 7868 } /* emlxs_sli4_reserve_xri() */
7783 7869
7784 7870
7785 7871 extern uint32_t
7786 7872 emlxs_sli4_unreserve_xri(emlxs_port_t *port, uint16_t xri, uint32_t lock)
7787 7873 {
7788 7874 emlxs_hba_t *hba = HBA;
7789 7875 XRIobj_t *xrip;
7790 7876
7791 7877 if (lock) {
7792 7878 mutex_enter(&EMLXS_FCTAB_LOCK);
7793 7879 }
7794 7880
7795 7881 xrip = emlxs_sli4_find_xri(port, xri);
7796 7882
7797 7883 if (!xrip || xrip->state == XRI_STATE_FREE) {
7798 7884 if (lock) {
7799 7885 mutex_exit(&EMLXS_FCTAB_LOCK);
7800 7886 }
7801 7887
7802 7888 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7803 7889 "sli4_unreserve_xri:%d already freed.", xri);
7804 7890 return (0);
7805 7891 }
7806 7892
7807 7893 /* Flush this unsolicited ct command */
7808 7894 if (xrip->type == EMLXS_XRI_UNSOL_CT_TYPE) {
7809 7895 (void) emlxs_flush_ct_event(port, xrip->rx_id);
7810 7896 }
7811 7897
7812 7898 if (!(xrip->flag & EMLXS_XRI_RESERVED)) {
7813 7899 if (lock) {
7814 7900 mutex_exit(&EMLXS_FCTAB_LOCK);
7815 7901 }
7816 7902
7817 7903 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7818 7904 "sli4_unreserve_xri:%d in use. type=%d",
7819 7905 xrip->XRI, xrip->type);
7820 7906 return (1);
7821 7907 }
7822 7908
7823 7909 if (xrip->iotag &&
7824 7910 (hba->fc_table[xrip->iotag] != NULL) &&
7825 7911 (hba->fc_table[xrip->iotag] != STALE_PACKET)) {
7826 7912 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_err_msg,
7827 7913 "sli4_unreserve_xri:%d sbp dropped:%p type=%d",
7828 7914 xrip->XRI, hba->fc_table[xrip->iotag], xrip->type);
7829 7915
7830 7916 hba->fc_table[xrip->iotag] = NULL;
7831 7917 hba->io_count--;
7832 7918 }
7833 7919
7834 7920 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
7835 7921 "sli4_unreserve_xri:%d unreserved. type=%d",
7836 7922 xrip->XRI, xrip->type);
7837 7923
7838 7924 xrip->state = XRI_STATE_FREE;
7839 7925 xrip->type = 0;
7840 7926
7841 7927 if (xrip->rpip) {
7842 7928 xrip->rpip->xri_count--;
7843 7929 xrip->rpip = NULL;
7844 7930 }
7845 7931
7846 7932 if (xrip->reserved_rpip) {
7847 7933 xrip->reserved_rpip->xri_count--;
7848 7934 xrip->reserved_rpip = NULL;
7849 7935 }
7850 7936
7851 7937 /* Take it off inuse list */
7852 7938 (xrip->_b)->_f = xrip->_f;
7853 7939 (xrip->_f)->_b = xrip->_b;
7854 7940 xrip->_f = NULL;
7855 7941 xrip->_b = NULL;
7856 7942 hba->sli.sli4.xria_count--;
7857 7943
7858 7944 /* Add it to end of free list */
7859 7945 xrip->_b = hba->sli.sli4.XRIfree_b;
7860 7946 hba->sli.sli4.XRIfree_b->_f = xrip;
7861 7947 xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
7862 7948 hba->sli.sli4.XRIfree_b = xrip;
7863 7949 hba->sli.sli4.xrif_count++;
7864 7950
7865 7951 if (lock) {
7866 7952 mutex_exit(&EMLXS_FCTAB_LOCK);
7867 7953 }
7868 7954
7869 7955 return (0);
7870 7956
7871 7957 } /* emlxs_sli4_unreserve_xri() */
7872 7958
7873 7959
7874 7960 XRIobj_t *
7875 7961 emlxs_sli4_register_xri(emlxs_port_t *port, emlxs_buf_t *sbp, uint16_t xri,
7876 7962 uint32_t did)
7877 7963 {
7878 7964 emlxs_hba_t *hba = HBA;
7879 7965 uint16_t iotag;
7880 7966 XRIobj_t *xrip;
7881 7967 emlxs_node_t *node;
7882 7968 RPIobj_t *rpip;
7883 7969
7884 7970 mutex_enter(&EMLXS_FCTAB_LOCK);
7885 7971
7886 7972 xrip = sbp->xrip;
7887 7973 if (!xrip) {
7888 7974 xrip = emlxs_sli4_find_xri(port, xri);
7889 7975
7890 7976 if (!xrip) {
7891 7977 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7892 7978 "sli4_register_xri:%d XRI not found.", xri);
7893 7979
7894 7980 mutex_exit(&EMLXS_FCTAB_LOCK);
7895 7981 return (NULL);
7896 7982 }
7897 7983 }
7898 7984
7899 7985 if ((xrip->state == XRI_STATE_FREE) ||
7900 7986 !(xrip->flag & EMLXS_XRI_RESERVED)) {
7901 7987
7902 7988 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7903 7989 "sli4_register_xri:%d Invalid XRI. xrip=%p "
7904 7990 "state=%x flag=%x",
7905 7991 xrip->XRI, xrip, xrip->state, xrip->flag);
7906 7992
7907 7993 mutex_exit(&EMLXS_FCTAB_LOCK);
7908 7994 return (NULL);
7909 7995 }
7910 7996
7911 7997 iotag = xrip->iotag;
7912 7998
7913 7999 if ((!iotag) ||
7914 8000 ((hba->fc_table[iotag] != NULL) &&
7915 8001 (hba->fc_table[iotag] != STALE_PACKET))) {
7916 8002
7917 8003 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7918 8004 "sli4_register_xri:%d Invalid fc_table entry. "
7919 8005 "iotag=%d entry=%p",
7920 8006 xrip->XRI, iotag, hba->fc_table[iotag]);
7921 8007
7922 8008 mutex_exit(&EMLXS_FCTAB_LOCK);
7923 8009 return (NULL);
7924 8010 }
7925 8011
7926 8012 hba->fc_table[iotag] = sbp;
7927 8013 hba->io_count++;
7928 8014
7929 8015 sbp->iotag = iotag;
7930 8016 sbp->xrip = xrip;
7931 8017
7932 8018 xrip->flag &= ~EMLXS_XRI_RESERVED;
7933 8019 xrip->sbp = sbp;
7934 8020
7935 8021 /* If we did not have a registered RPI when we reserved */
7936 8022 /* this exchange, check again now. */
7937 8023 if (xrip->rpip && (xrip->rpip->RPI == FABRIC_RPI)) {
7938 8024 node = emlxs_node_find_did(port, did, 1);
7939 8025 rpip = EMLXS_NODE_TO_RPI(port, node);
7940 8026
7941 8027 if (rpip && (rpip->RPI != FABRIC_RPI)) {
7942 8028 /* Move the XRI to the new RPI */
7943 8029 xrip->rpip->xri_count--;
7944 8030 xrip->rpip = rpip;
7945 8031 rpip->xri_count++;
7946 8032 }
7947 8033 }
7948 8034
7949 8035 mutex_exit(&EMLXS_FCTAB_LOCK);
7950 8036
7951 8037 return (xrip);
7952 8038
7953 8039 } /* emlxs_sli4_register_xri() */
7954 8040
7955 8041
7956 8042 /* Performs both reserve and register functions for XRI */
7957 8043 static XRIobj_t *
7958 8044 emlxs_sli4_alloc_xri(emlxs_port_t *port, emlxs_buf_t *sbp, RPIobj_t *rpip,
7959 8045 uint32_t type)
7960 8046 {
7961 8047 emlxs_hba_t *hba = HBA;
7962 8048 XRIobj_t *xrip;
7963 8049 uint16_t iotag;
7964 8050
7965 8051 mutex_enter(&EMLXS_FCTAB_LOCK);
7966 8052
7967 8053 xrip = hba->sli.sli4.XRIfree_f;
7968 8054
7969 8055 if (xrip == (XRIobj_t *)&hba->sli.sli4.XRIfree_f) {
7970 8056 mutex_exit(&EMLXS_FCTAB_LOCK);
7971 8057
7972 8058 return (NULL);
7973 8059 }
7974 8060
7975 8061 /* Get the iotag by registering the packet */
7976 8062 iotag = xrip->iotag;
7977 8063
7978 8064 if ((!iotag) ||
7979 8065 ((hba->fc_table[iotag] != NULL) &&
7980 8066 (hba->fc_table[iotag] != STALE_PACKET))) {
7981 8067 /*
7982 8068 * No more command slots available, retry later
7983 8069 */
7984 8070 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg,
7985 8071 "Adapter Busy. Unable to alloc iotag:(0x%x)(%p) type=%d",
7986 8072 iotag, hba->fc_table[iotag], type);
7987 8073
7988 8074 mutex_exit(&EMLXS_FCTAB_LOCK);
7989 8075 return (NULL);
7990 8076 }
7991 8077
7992 8078 hba->fc_table[iotag] = sbp;
7993 8079 hba->io_count++;
7994 8080
7995 8081 sbp->iotag = iotag;
7996 8082 sbp->xrip = xrip;
7997 8083
7998 8084 xrip->state = XRI_STATE_ALLOCATED;
7999 8085 xrip->type = type;
8000 8086 xrip->flag = 0;
8001 8087 xrip->sbp = sbp;
8002 8088
8003 8089 xrip->rpip = rpip;
8004 8090 rpip->xri_count++;
8005 8091
8006 8092 /* Take it off free list */
8007 8093 (xrip->_b)->_f = xrip->_f;
8008 8094 (xrip->_f)->_b = xrip->_b;
8009 8095 xrip->_f = NULL;
8010 8096 xrip->_b = NULL;
8011 8097 hba->sli.sli4.xrif_count--;
8012 8098
8013 8099 /* Add it to end of inuse list */
8014 8100 xrip->_b = hba->sli.sli4.XRIinuse_b;
8015 8101 hba->sli.sli4.XRIinuse_b->_f = xrip;
8016 8102 xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIinuse_f;
8017 8103 hba->sli.sli4.XRIinuse_b = xrip;
8018 8104 hba->sli.sli4.xria_count++;
8019 8105
8020 8106 mutex_exit(&EMLXS_FCTAB_LOCK);
8021 8107
8022 8108 return (xrip);
8023 8109
8024 8110 } /* emlxs_sli4_alloc_xri() */
8025 8111
8026 8112
8027 8113 /* EMLXS_FCTAB_LOCK must be held to enter */
8028 8114 extern XRIobj_t *
8029 8115 emlxs_sli4_find_xri(emlxs_port_t *port, uint16_t xri)
8030 8116 {
8031 8117 emlxs_hba_t *hba = HBA;
8032 8118 XRIobj_t *xrip;
8033 8119
8034 8120 xrip = (XRIobj_t *)hba->sli.sli4.XRIinuse_f;
8035 8121 while (xrip != (XRIobj_t *)&hba->sli.sli4.XRIinuse_f) {
8036 8122 if ((xrip->state >= XRI_STATE_ALLOCATED) &&
8037 8123 (xrip->XRI == xri)) {
8038 8124 return (xrip);
8039 8125 }
8040 8126 xrip = xrip->_f;
8041 8127 }
8042 8128
8043 8129 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8044 8130 "Unable to find XRI x%x", xri);
8045 8131
8046 8132 return (NULL);
8047 8133
8048 8134 } /* emlxs_sli4_find_xri() */
8049 8135
8050 8136
8051 8137
8052 8138
8053 8139 extern void
8054 8140 emlxs_sli4_free_xri(emlxs_port_t *port, emlxs_buf_t *sbp, XRIobj_t *xrip,
8055 8141 uint8_t lock)
8056 8142 {
8057 8143 emlxs_hba_t *hba = HBA;
8058 8144
8059 8145 if (lock) {
8060 8146 mutex_enter(&EMLXS_FCTAB_LOCK);
8061 8147 }
8062 8148
8063 8149 if (xrip) {
8064 8150 if (xrip->state == XRI_STATE_FREE) {
8065 8151 if (lock) {
8066 8152 mutex_exit(&EMLXS_FCTAB_LOCK);
8067 8153 }
8068 8154 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8069 8155 "Free XRI:%x, Already freed. type=%d",
8070 8156 xrip->XRI, xrip->type);
8071 8157 return;
8072 8158 }
8073 8159
8074 8160 if (xrip->type == EMLXS_XRI_UNSOL_CT_TYPE) {
8075 8161 (void) emlxs_flush_ct_event(port, xrip->rx_id);
8076 8162 }
8077 8163
8078 8164 if (xrip->iotag &&
8079 8165 (hba->fc_table[xrip->iotag] != NULL) &&
8080 8166 (hba->fc_table[xrip->iotag] != STALE_PACKET)) {
8081 8167 hba->fc_table[xrip->iotag] = NULL;
8082 8168 hba->io_count--;
8083 8169 }
8084 8170
8085 8171 xrip->state = XRI_STATE_FREE;
8086 8172 xrip->type = 0;
8087 8173 xrip->flag = 0;
8088 8174
8089 8175 if (xrip->rpip) {
8090 8176 xrip->rpip->xri_count--;
8091 8177 xrip->rpip = NULL;
8092 8178 }
8093 8179
8094 8180 if (xrip->reserved_rpip) {
8095 8181 xrip->reserved_rpip->xri_count--;
8096 8182 xrip->reserved_rpip = NULL;
8097 8183 }
8098 8184
8099 8185 /* Take it off inuse list */
8100 8186 (xrip->_b)->_f = xrip->_f;
8101 8187 (xrip->_f)->_b = xrip->_b;
8102 8188 xrip->_f = NULL;
8103 8189 xrip->_b = NULL;
8104 8190 hba->sli.sli4.xria_count--;
8105 8191
8106 8192 /* Add it to end of free list */
8107 8193 xrip->_b = hba->sli.sli4.XRIfree_b;
8108 8194 hba->sli.sli4.XRIfree_b->_f = xrip;
8109 8195 xrip->_f = (XRIobj_t *)&hba->sli.sli4.XRIfree_f;
8110 8196 hba->sli.sli4.XRIfree_b = xrip;
8111 8197 hba->sli.sli4.xrif_count++;
8112 8198 }
8113 8199
8114 8200 if (sbp) {
8115 8201 if (!(sbp->pkt_flags & PACKET_VALID) ||
8116 8202 (sbp->pkt_flags &
8117 8203 (PACKET_ULP_OWNED|PACKET_COMPLETED|PACKET_IN_COMPLETION))) {
8118 8204 if (lock) {
8119 8205 mutex_exit(&EMLXS_FCTAB_LOCK);
8120 8206 }
8121 8207 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8122 8208 "Free XRI: sbp invalid. sbp=%p flags=%x xri=%d",
8123 8209 sbp, sbp->pkt_flags, ((xrip)? xrip->XRI:0));
8124 8210 return;
8125 8211 }
8126 8212
8127 8213 if (xrip && (xrip->iotag != sbp->iotag)) {
8128 8214 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
8129 8215 "sbp/iotag mismatch %p iotag:%d %d", sbp,
8130 8216 sbp->iotag, xrip->iotag);
8131 8217 }
8132 8218
8133 8219 if (sbp->iotag) {
8134 8220 if (sbp == hba->fc_table[sbp->iotag]) {
8135 8221 hba->fc_table[sbp->iotag] = NULL;
8136 8222 hba->io_count--;
8137 8223
8138 8224 if (sbp->xrip) {
8139 8225 /* Exchange is still reserved */
8140 8226 sbp->xrip->flag |= EMLXS_XRI_RESERVED;
8141 8227 }
8142 8228 }
8143 8229 sbp->iotag = 0;
8144 8230 }
8145 8231
8146 8232 if (xrip) {
8147 8233 sbp->xrip = 0;
8148 8234 }
8149 8235
8150 8236 if (lock) {
8151 8237 mutex_exit(&EMLXS_FCTAB_LOCK);
8152 8238 }
8153 8239
8154 8240 /* Clean up the sbp */
8155 8241 mutex_enter(&sbp->mtx);
8156 8242
8157 8243 if (sbp->pkt_flags & PACKET_IN_TXQ) {
8158 8244 sbp->pkt_flags &= ~PACKET_IN_TXQ;
8159 8245 hba->channel_tx_count--;
8160 8246 }
8161 8247
8162 8248 if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
8163 8249 sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
8164 8250 }
8165 8251
8166 8252 mutex_exit(&sbp->mtx);
8167 8253 } else {
8168 8254 if (lock) {
8169 8255 mutex_exit(&EMLXS_FCTAB_LOCK);
8170 8256 }
8171 8257 }
8172 8258
|
↓ open down ↓ |
505 lines elided |
↑ open up ↑ |
8173 8259 } /* emlxs_sli4_free_xri() */
8174 8260
8175 8261
8176 8262 static int
8177 8263 emlxs_sli4_post_sgl_pages(emlxs_hba_t *hba, MAILBOXQ *mbq)
8178 8264 {
8179 8265 MAILBOX4 *mb = (MAILBOX4 *)mbq;
8180 8266 emlxs_port_t *port = &PPORT;
8181 8267 XRIobj_t *xrip;
8182 8268 MATCHMAP *mp;
8183 - mbox_req_hdr_t *hdr_req;
8269 + mbox_req_hdr_t *hdr_req;
8184 8270 uint32_t i;
8185 8271 uint32_t cnt;
8186 8272 uint32_t xri_cnt;
8187 8273 uint32_t j;
8188 8274 uint32_t size;
8189 8275 IOCTL_FCOE_CFG_POST_SGL_PAGES *post_sgl;
8190 8276
8191 8277 bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
8192 8278 mbq->bp = NULL;
8193 8279 mbq->mbox_cmpl = NULL;
8194 8280
8195 8281 if ((mp = emlxs_mem_buf_alloc(hba, EMLXS_MAX_NONEMBED_SIZE)) == 0) {
8196 8282 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8197 8283 "Unable to POST_SGL. Mailbox cmd=%x ",
8198 8284 mb->mbxCommand);
8199 8285 return (EIO);
8200 8286 }
8201 8287 mbq->nonembed = (void *)mp;
8202 8288
8203 8289 /*
8204 8290 * Signifies a non embedded command
8205 8291 */
8206 8292 mb->un.varSLIConfig.be.embedded = 0;
8207 8293 mb->mbxCommand = MBX_SLI_CONFIG;
8208 8294 mb->mbxOwner = OWN_HOST;
8209 8295
8210 8296 hdr_req = (mbox_req_hdr_t *)mp->virt;
8211 8297 post_sgl =
8212 8298 (IOCTL_FCOE_CFG_POST_SGL_PAGES *)(hdr_req + 1);
8213 8299
8214 8300 xrip = hba->sli.sli4.XRIp;
8215 8301
8216 8302 /* For each extent */
8217 8303 for (j = 0; j < hba->sli.sli4.XRIExtCount; j++) {
8218 8304 cnt = hba->sli.sli4.XRIExtSize;
8219 8305 while (cnt) {
8220 8306 if (xrip->XRI == 0) {
8221 8307 cnt--;
8222 8308 xrip++;
8223 8309 continue;
8224 8310 }
8225 8311
8226 8312 bzero((void *) hdr_req, mp->size);
8227 8313 size = mp->size - IOCTL_HEADER_SZ;
8228 8314
8229 8315 mb->un.varSLIConfig.be.payload_length =
8230 8316 mp->size;
8231 8317 mb->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
8232 8318 IOCTL_SUBSYSTEM_FCOE;
8233 8319 mb->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
8234 8320 FCOE_OPCODE_CFG_POST_SGL_PAGES;
8235 8321 mb->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
8236 8322 mb->un.varSLIConfig.be.un_hdr.hdr_req.req_length = size;
8237 8323
8238 8324 hdr_req->subsystem = IOCTL_SUBSYSTEM_FCOE;
8239 8325 hdr_req->opcode = FCOE_OPCODE_CFG_POST_SGL_PAGES;
8240 8326 hdr_req->timeout = 0;
8241 8327 hdr_req->req_length = size;
8242 8328
8243 8329 post_sgl->params.request.xri_count = 0;
|
↓ open down ↓ |
50 lines elided |
↑ open up ↑ |
8244 8330 post_sgl->params.request.xri_start = xrip->XRI;
8245 8331
8246 8332 xri_cnt = (size -
8247 8333 sizeof (IOCTL_FCOE_CFG_POST_SGL_PAGES)) /
8248 8334 sizeof (FCOE_SGL_PAGES);
8249 8335
8250 8336 for (i = 0; (i < xri_cnt) && cnt; i++) {
8251 8337 post_sgl->params.request.xri_count++;
8252 8338 post_sgl->params.request.pages[i].\
8253 8339 sgl_page0.addrLow =
8254 - PADDR_LO(xrip->SGList.phys);
8340 + PADDR_LO(xrip->SGList->phys);
8255 8341 post_sgl->params.request.pages[i].\
8256 8342 sgl_page0.addrHigh =
8257 - PADDR_HI(xrip->SGList.phys);
8343 + PADDR_HI(xrip->SGList->phys);
8258 8344
8259 8345 cnt--;
8260 8346 xrip++;
8261 8347 }
8262 8348
8263 8349 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8264 8350 MBX_SUCCESS) {
8265 8351 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8266 8352 "Unable to POST_SGL. Mailbox cmd=%x "
8267 8353 "status=%x XRI cnt:%d start:%d",
8268 8354 mb->mbxCommand, mb->mbxStatus,
8269 8355 post_sgl->params.request.xri_count,
8270 8356 post_sgl->params.request.xri_start);
8271 8357 emlxs_mem_buf_free(hba, mp);
8272 8358 mbq->nonembed = NULL;
8273 8359 return (EIO);
8274 8360 }
8275 8361 }
8276 8362 }
8277 8363
8278 8364 emlxs_mem_buf_free(hba, mp);
8279 8365 mbq->nonembed = NULL;
|
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
8280 8366 return (0);
8281 8367
8282 8368 } /* emlxs_sli4_post_sgl_pages() */
8283 8369
8284 8370
8285 8371 static int
8286 8372 emlxs_sli4_post_hdr_tmplates(emlxs_hba_t *hba, MAILBOXQ *mbq)
8287 8373 {
8288 8374 MAILBOX4 *mb = (MAILBOX4 *)mbq;
8289 8375 emlxs_port_t *port = &PPORT;
8290 - uint32_t j;
8291 - uint32_t k;
8376 + uint32_t j;
8377 + uint32_t k;
8292 8378 uint64_t addr;
8293 8379 IOCTL_FCOE_POST_HDR_TEMPLATES *post_hdr;
8294 8380 uint16_t num_pages;
8295 8381
8296 8382 if (!(hba->sli.sli4.param.HDRR)) {
8297 8383 return (0);
8298 8384 }
8299 8385
8300 8386 /* Bytes per extent */
8301 8387 j = hba->sli.sli4.RPIExtSize * sizeof (RPIHdrTmplate_t);
8302 8388
8303 8389 /* Pages required per extent (page == 4096 bytes) */
8304 8390 num_pages = (j/4096) + ((j%4096)? 1:0);
8305 8391
8306 8392 addr = hba->sli.sli4.HeaderTmplate.phys;
8307 8393
8308 8394 /* For each extent */
8309 8395 for (j = 0; j < hba->sli.sli4.RPIExtCount; j++) {
8310 8396 bzero((void *) mb, MAILBOX_CMD_SLI4_BSIZE);
8311 8397 mbq->bp = NULL;
8312 8398 mbq->mbox_cmpl = NULL;
8313 8399
8314 8400 /*
8315 8401 * Signifies an embedded command
8316 8402 */
8317 8403 mb->un.varSLIConfig.be.embedded = 1;
8318 8404
8319 8405 mb->mbxCommand = MBX_SLI_CONFIG;
8320 8406 mb->mbxOwner = OWN_HOST;
8321 8407 mb->un.varSLIConfig.be.payload_length =
8322 8408 sizeof (IOCTL_FCOE_POST_HDR_TEMPLATES) + IOCTL_HEADER_SZ;
8323 8409 mb->un.varSLIConfig.be.un_hdr.hdr_req.subsystem =
8324 8410 IOCTL_SUBSYSTEM_FCOE;
8325 8411 mb->un.varSLIConfig.be.un_hdr.hdr_req.opcode =
8326 8412 FCOE_OPCODE_POST_HDR_TEMPLATES;
8327 8413 mb->un.varSLIConfig.be.un_hdr.hdr_req.timeout = 0;
8328 8414 mb->un.varSLIConfig.be.un_hdr.hdr_req.req_length =
8329 8415 sizeof (IOCTL_FCOE_POST_HDR_TEMPLATES);
8330 8416
8331 8417 post_hdr =
8332 8418 (IOCTL_FCOE_POST_HDR_TEMPLATES *)
8333 8419 &mb->un.varSLIConfig.payload;
8334 8420 post_hdr->params.request.num_pages = num_pages;
8335 8421 post_hdr->params.request.rpi_offset = hba->sli.sli4.RPIBase[j];
8336 8422
8337 8423 for (k = 0; k < num_pages; k++) {
8338 8424 post_hdr->params.request.pages[k].addrLow =
8339 8425 PADDR_LO(addr);
8340 8426 post_hdr->params.request.pages[k].addrHigh =
8341 8427 PADDR_HI(addr);
8342 8428 addr += 4096;
8343 8429 }
8344 8430
8345 8431 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8346 8432 MBX_SUCCESS) {
8347 8433 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8348 8434 "Unable to POST_HDR_TEMPLATES. Mailbox cmd=%x "
8349 8435 "status=%x ",
8350 8436 mb->mbxCommand, mb->mbxStatus);
8351 8437 return (EIO);
8352 8438 }
8353 8439 emlxs_data_dump(port, "POST_HDR", (uint32_t *)mb, 18, 0);
8354 8440 }
8355 8441
8356 8442 return (0);
8357 8443
8358 8444 } /* emlxs_sli4_post_hdr_tmplates() */
8359 8445
8360 8446
8361 8447 static int
8362 8448 emlxs_sli4_create_queues(emlxs_hba_t *hba, MAILBOXQ *mbq)
8363 8449 {
8364 8450 MAILBOX4 *mb = (MAILBOX4 *)mbq;
8365 8451 emlxs_port_t *port = &PPORT;
8366 8452 emlxs_config_t *cfg = &CFG;
8367 8453 IOCTL_COMMON_EQ_CREATE *eq;
8368 8454 IOCTL_COMMON_CQ_CREATE *cq;
8369 8455 IOCTL_FCOE_WQ_CREATE *wq;
8370 8456 IOCTL_FCOE_RQ_CREATE *rq;
8371 8457 IOCTL_COMMON_MQ_CREATE *mq;
8372 8458 IOCTL_COMMON_MQ_CREATE_EXT *mq_ext;
8373 8459 emlxs_rqdbu_t rqdb;
8374 8460 uint16_t i, j;
8375 8461 uint16_t num_cq, total_cq;
8376 8462 uint16_t num_wq, total_wq;
8377 8463
8378 8464 /*
8379 8465 * The first CQ is reserved for ASYNC events,
8380 8466 * the second is reserved for unsol rcv, the rest
8381 8467 * correspond to WQs. (WQ0 -> CQ2, WQ1 -> CQ3, ...)
8382 8468 */
8383 8469
8384 8470 total_cq = 0;
8385 8471 total_wq = 0;
8386 8472
8387 8473 /* Create EQ's */
8388 8474 for (i = 0; i < hba->intr_count; i++) {
8389 8475 emlxs_mb_eq_create(hba, mbq, i);
8390 8476 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8391 8477 MBX_SUCCESS) {
8392 8478 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
8393 8479 "Unable to Create EQ %d: Mailbox cmd=%x status=%x ",
8394 8480 i, mb->mbxCommand, mb->mbxStatus);
8395 8481 return (EIO);
8396 8482 }
8397 8483 eq = (IOCTL_COMMON_EQ_CREATE *)&mb->un.varSLIConfig.payload;
8398 8484 hba->sli.sli4.eq[i].qid = eq->params.response.EQId;
8399 8485 hba->sli.sli4.eq[i].lastwq = total_wq;
8400 8486 hba->sli.sli4.eq[i].msix_vector = i;
8401 8487
8402 8488 emlxs_data_dump(port, "EQ0_CREATE", (uint32_t *)mb, 18, 0);
8403 8489 num_wq = cfg[CFG_NUM_WQ].current;
8404 8490 num_cq = num_wq;
8405 8491 if (i == 0) {
8406 8492 /* One for RQ handling, one for mbox/event handling */
8407 8493 num_cq += EMLXS_CQ_OFFSET_WQ;
8408 8494 }
8409 8495
8410 8496 /* Create CQ's */
8411 8497 for (j = 0; j < num_cq; j++) {
8412 8498 /* Reuse mbq from previous mbox */
8413 8499 bzero(mbq, sizeof (MAILBOXQ));
8414 8500
8415 8501 hba->sli.sli4.cq[total_cq].eqid =
8416 8502 hba->sli.sli4.eq[i].qid;
8417 8503
8418 8504 emlxs_mb_cq_create(hba, mbq, total_cq);
8419 8505 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8420 8506 MBX_SUCCESS) {
8421 8507 EMLXS_MSGF(EMLXS_CONTEXT,
8422 8508 &emlxs_init_failed_msg, "Unable to Create "
8423 8509 "CQ %d: Mailbox cmd=%x status=%x ",
8424 8510 total_cq, mb->mbxCommand, mb->mbxStatus);
8425 8511 return (EIO);
8426 8512 }
8427 8513 cq = (IOCTL_COMMON_CQ_CREATE *)
8428 8514 &mb->un.varSLIConfig.payload;
8429 8515 hba->sli.sli4.cq[total_cq].qid =
8430 8516 cq->params.response.CQId;
8431 8517
8432 8518 switch (total_cq) {
8433 8519 case EMLXS_CQ_MBOX:
8434 8520 /* First CQ is for async event handling */
8435 8521 hba->sli.sli4.cq[total_cq].type =
8436 8522 EMLXS_CQ_TYPE_GROUP1;
8437 8523 break;
8438 8524
8439 8525 case EMLXS_CQ_RCV:
8440 8526 /* Second CQ is for unsol receive handling */
8441 8527 hba->sli.sli4.cq[total_cq].type =
8442 8528 EMLXS_CQ_TYPE_GROUP2;
8443 8529 break;
8444 8530
8445 8531 default:
8446 8532 /* Setup CQ to channel mapping */
8447 8533 hba->sli.sli4.cq[total_cq].type =
8448 8534 EMLXS_CQ_TYPE_GROUP2;
8449 8535 hba->sli.sli4.cq[total_cq].channelp =
8450 8536 &hba->chan[total_cq - EMLXS_CQ_OFFSET_WQ];
8451 8537 break;
8452 8538 }
8453 8539 emlxs_data_dump(port, "CQX_CREATE", (uint32_t *)mb,
8454 8540 18, 0);
8455 8541 total_cq++;
8456 8542 }
8457 8543
8458 8544 /* Create WQ's */
8459 8545 for (j = 0; j < num_wq; j++) {
8460 8546 /* Reuse mbq from previous mbox */
8461 8547 bzero(mbq, sizeof (MAILBOXQ));
8462 8548
8463 8549 hba->sli.sli4.wq[total_wq].cqid =
8464 8550 hba->sli.sli4.cq[total_wq + EMLXS_CQ_OFFSET_WQ].qid;
8465 8551
8466 8552 emlxs_mb_wq_create(hba, mbq, total_wq);
8467 8553 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8468 8554 MBX_SUCCESS) {
8469 8555 EMLXS_MSGF(EMLXS_CONTEXT,
8470 8556 &emlxs_init_failed_msg, "Unable to Create "
8471 8557 "WQ %d: Mailbox cmd=%x status=%x ",
8472 8558 total_wq, mb->mbxCommand, mb->mbxStatus);
8473 8559 return (EIO);
8474 8560 }
8475 8561 wq = (IOCTL_FCOE_WQ_CREATE *)
8476 8562 &mb->un.varSLIConfig.payload;
8477 8563 hba->sli.sli4.wq[total_wq].qid =
8478 8564 wq->params.response.WQId;
8479 8565
8480 8566 hba->sli.sli4.wq[total_wq].cqid =
8481 8567 hba->sli.sli4.cq[total_wq+EMLXS_CQ_OFFSET_WQ].qid;
8482 8568 emlxs_data_dump(port, "WQ_CREATE", (uint32_t *)mb,
8483 8569 18, 0);
8484 8570 total_wq++;
8485 8571 }
8486 8572 hba->last_msiid = i;
8487 8573 }
8488 8574
8489 8575 /* We assume 1 RQ pair will handle ALL incoming data */
8490 8576 /* Create RQs */
8491 8577 for (i = 0; i < EMLXS_MAX_RQS; i++) {
8492 8578 /* Personalize the RQ */
8493 8579 switch (i) {
8494 8580 case 0:
8495 8581 hba->sli.sli4.rq[i].cqid =
8496 8582 hba->sli.sli4.cq[EMLXS_CQ_RCV].qid;
8497 8583 break;
8498 8584 case 1:
8499 8585 hba->sli.sli4.rq[i].cqid =
8500 8586 hba->sli.sli4.cq[EMLXS_CQ_RCV].qid;
8501 8587 break;
8502 8588 default:
8503 8589 hba->sli.sli4.rq[i].cqid = 0xffff;
8504 8590 }
8505 8591
8506 8592 /* Reuse mbq from previous mbox */
8507 8593 bzero(mbq, sizeof (MAILBOXQ));
8508 8594
8509 8595 emlxs_mb_rq_create(hba, mbq, i);
8510 8596 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8511 8597 MBX_SUCCESS) {
8512 8598 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
8513 8599 "Unable to Create RQ %d: Mailbox cmd=%x status=%x ",
8514 8600 i, mb->mbxCommand, mb->mbxStatus);
8515 8601 return (EIO);
8516 8602 }
8517 8603
8518 8604 rq = (IOCTL_FCOE_RQ_CREATE *)&mb->un.varSLIConfig.payload;
8519 8605 hba->sli.sli4.rq[i].qid = rq->params.response.RQId;
8520 8606 emlxs_data_dump(port, "RQ CREATE", (uint32_t *)mb, 18, 0);
8521 8607
8522 8608 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8523 8609 "RQ CREATE: rq[%d].qid=%d cqid=%d",
8524 8610 i, hba->sli.sli4.rq[i].qid, hba->sli.sli4.rq[i].cqid);
8525 8611
8526 8612 /* Initialize the host_index */
8527 8613 hba->sli.sli4.rq[i].host_index = 0;
8528 8614
8529 8615 /* If Data queue was just created, */
8530 8616 /* then post buffers using the header qid */
8531 8617 if ((i & 0x1)) {
8532 8618 /* Ring the RQ doorbell to post buffers */
8533 8619 rqdb.word = 0;
8534 8620 rqdb.db.Qid = hba->sli.sli4.rq[i-1].qid;
8535 8621 rqdb.db.NumPosted = RQB_COUNT;
8536 8622
8537 8623 emlxs_sli4_write_rqdb(hba, rqdb.word);
8538 8624
8539 8625 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8540 8626 "RQ CREATE: Doorbell rang: qid=%d count=%d",
8541 8627 hba->sli.sli4.rq[i-1].qid, RQB_COUNT);
8542 8628 }
8543 8629 }
8544 8630
8545 8631 /* Create MQ */
8546 8632
8547 8633 /* Personalize the MQ */
8548 8634 hba->sli.sli4.mq.cqid = hba->sli.sli4.cq[EMLXS_CQ_MBOX].qid;
8549 8635
8550 8636 /* Reuse mbq from previous mbox */
8551 8637 bzero(mbq, sizeof (MAILBOXQ));
8552 8638
8553 8639 emlxs_mb_mq_create_ext(hba, mbq);
8554 8640 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8555 8641 MBX_SUCCESS) {
8556 8642 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
8557 8643 "Unable to Create MQ_EXT %d: Mailbox cmd=%x status=%x ",
8558 8644 i, mb->mbxCommand, mb->mbxStatus);
8559 8645
8560 8646 /* Reuse mbq from previous mbox */
8561 8647 bzero(mbq, sizeof (MAILBOXQ));
8562 8648
8563 8649 emlxs_mb_mq_create(hba, mbq);
8564 8650 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) !=
8565 8651 MBX_SUCCESS) {
8566 8652 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
8567 8653 "Unable to Create MQ %d: Mailbox cmd=%x status=%x ",
8568 8654 i, mb->mbxCommand, mb->mbxStatus);
8569 8655 return (EIO);
8570 8656 }
8571 8657
8572 8658 mq = (IOCTL_COMMON_MQ_CREATE *)&mb->un.varSLIConfig.payload;
8573 8659 hba->sli.sli4.mq.qid = mq->params.response.MQId;
8574 8660 return (0);
8575 8661 }
8576 8662
8577 8663 mq_ext = (IOCTL_COMMON_MQ_CREATE_EXT *)&mb->un.varSLIConfig.payload;
8578 8664 hba->sli.sli4.mq.qid = mq_ext->params.response.MQId;
8579 8665 return (0);
8580 8666
8581 8667 } /* emlxs_sli4_create_queues() */
8582 8668
8583 8669
8584 8670 extern void
8585 8671 emlxs_sli4_timer(emlxs_hba_t *hba)
8586 8672 {
8587 8673 /* Perform SLI4 level timer checks */
8588 8674
8589 8675 emlxs_fcf_timer_notify(hba);
8590 8676
8591 8677 emlxs_sli4_timer_check_mbox(hba);
8592 8678
8593 8679 return;
8594 8680
8595 8681 } /* emlxs_sli4_timer() */
8596 8682
8597 8683
8598 8684 static void
8599 8685 emlxs_sli4_timer_check_mbox(emlxs_hba_t *hba)
8600 8686 {
8601 8687 emlxs_port_t *port = &PPORT;
8602 8688 emlxs_config_t *cfg = &CFG;
8603 8689 MAILBOX *mb = NULL;
8604 8690
8605 8691 if (!cfg[CFG_TIMEOUT_ENABLE].current) {
8606 8692 return;
8607 8693 }
8608 8694
8609 8695 mutex_enter(&EMLXS_PORT_LOCK);
8610 8696
8611 8697 /* Return if timer hasn't expired */
8612 8698 if (!hba->mbox_timer || (hba->timer_tics < hba->mbox_timer)) {
8613 8699 mutex_exit(&EMLXS_PORT_LOCK);
8614 8700 return;
8615 8701 }
8616 8702
8617 8703 /* The first to service the mbox queue will clear the timer */
8618 8704 hba->mbox_timer = 0;
8619 8705
8620 8706 if (hba->mbox_queue_flag) {
8621 8707 if (hba->mbox_mbq) {
8622 8708 mb = (MAILBOX *)hba->mbox_mbq;
8623 8709 }
8624 8710 }
8625 8711
8626 8712 if (mb) {
8627 8713 switch (hba->mbox_queue_flag) {
8628 8714 case MBX_NOWAIT:
8629 8715 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
8630 8716 "%s: Nowait.",
8631 8717 emlxs_mb_cmd_xlate(mb->mbxCommand));
8632 8718 break;
8633 8719
8634 8720 case MBX_SLEEP:
8635 8721 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
8636 8722 "%s: mb=%p Sleep.",
8637 8723 emlxs_mb_cmd_xlate(mb->mbxCommand),
8638 8724 mb);
8639 8725 break;
8640 8726
8641 8727 case MBX_POLL:
8642 8728 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
8643 8729 "%s: mb=%p Polled.",
8644 8730 emlxs_mb_cmd_xlate(mb->mbxCommand),
8645 8731 mb);
8646 8732 break;
8647 8733
8648 8734 default:
8649 8735 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg,
8650 8736 "%s: mb=%p (%d).",
8651 8737 emlxs_mb_cmd_xlate(mb->mbxCommand),
8652 8738 mb, hba->mbox_queue_flag);
8653 8739 break;
8654 8740 }
8655 8741 } else {
8656 8742 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mbox_timeout_msg, NULL);
8657 8743 }
8658 8744
8659 8745 hba->flag |= FC_MBOX_TIMEOUT;
8660 8746 EMLXS_STATE_CHANGE_LOCKED(hba, FC_ERROR);
8661 8747
8662 8748 mutex_exit(&EMLXS_PORT_LOCK);
8663 8749
8664 8750 /* Perform mailbox cleanup */
|
↓ open down ↓ |
363 lines elided |
↑ open up ↑ |
8665 8751 /* This will wake any sleeping or polling threads */
8666 8752 emlxs_mb_fini(hba, NULL, MBX_TIMEOUT);
8667 8753
8668 8754 /* Trigger adapter shutdown */
8669 8755 emlxs_thread_spawn(hba, emlxs_shutdown_thread, 0, 0);
8670 8756
8671 8757 return;
8672 8758
8673 8759 } /* emlxs_sli4_timer_check_mbox() */
8674 8760
8761 +static void
8762 +emlxs_sli4_gpio_timer_start(emlxs_hba_t *hba)
8763 +{
8764 + mutex_enter(&hba->gpio_lock);
8675 8765
8766 + if (!hba->gpio_timer) {
8767 + hba->gpio_timer = timeout(emlxs_sli4_gpio_timer, (void *)hba,
8768 + drv_usectohz(100000));
8769 + }
8770 +
8771 + mutex_exit(&hba->gpio_lock);
8772 +
8773 +} /* emlxs_sli4_gpio_timer_start() */
8774 +
8775 +static void
8776 +emlxs_sli4_gpio_timer_stop(emlxs_hba_t *hba)
8777 +{
8778 + mutex_enter(&hba->gpio_lock);
8779 +
8780 + if (hba->gpio_timer) {
8781 + (void) untimeout(hba->gpio_timer);
8782 + hba->gpio_timer = 0;
8783 + }
8784 +
8785 + mutex_exit(&hba->gpio_lock);
8786 +
8787 + delay(drv_usectohz(300000));
8788 +} /* emlxs_sli4_gpio_timer_stop() */
8789 +
8790 +static void
8791 +emlxs_sli4_gpio_timer(void *arg)
8792 +{
8793 + emlxs_hba_t *hba = (emlxs_hba_t *)arg;
8794 +
8795 + mutex_enter(&hba->gpio_lock);
8796 +
8797 + if (hba->gpio_timer) {
8798 + emlxs_sli4_check_gpio(hba);
8799 + hba->gpio_timer = timeout(emlxs_sli4_gpio_timer, (void *)hba,
8800 + drv_usectohz(100000));
8801 + }
8802 +
8803 + mutex_exit(&hba->gpio_lock);
8804 +} /* emlxs_sli4_gpio_timer() */
8805 +
8806 +static void
8807 +emlxs_sli4_check_gpio(emlxs_hba_t *hba)
8808 +{
8809 + hba->gpio_desired = 0;
8810 +
8811 + if (hba->flag & FC_GPIO_LINK_UP) {
8812 + if (hba->io_active)
8813 + hba->gpio_desired |= EMLXS_GPIO_ACT;
8814 +
8815 + /* This is model specific to ATTO gen5 lancer cards */
8816 +
8817 + switch (hba->linkspeed) {
8818 + case LA_4GHZ_LINK:
8819 + hba->gpio_desired |= EMLXS_GPIO_LO;
8820 + break;
8821 +
8822 + case LA_8GHZ_LINK:
8823 + hba->gpio_desired |= EMLXS_GPIO_HI;
8824 + break;
8825 +
8826 + case LA_16GHZ_LINK:
8827 + hba->gpio_desired |=
8828 + EMLXS_GPIO_LO | EMLXS_GPIO_HI;
8829 + break;
8830 + }
8831 + }
8832 +
8833 + if (hba->gpio_current != hba->gpio_desired) {
8834 + emlxs_port_t *port = &PPORT;
8835 + uint8_t pin;
8836 + uint8_t pinval;
8837 + MAILBOXQ *mbq;
8838 + uint32_t rval;
8839 +
8840 + if (!emlxs_sli4_fix_gpio(hba, &pin, &pinval))
8841 + return;
8842 +
8843 + if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX)) == NULL) {
8844 + EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
8845 + "Unable to allocate GPIO mailbox.");
8846 +
8847 + hba->gpio_bit = 0;
8848 + return;
8849 + }
8850 +
8851 + emlxs_mb_gpio_write(hba, mbq, pin, pinval);
8852 + mbq->mbox_cmpl = emlxs_sli4_fix_gpio_mbcmpl;
8853 +
8854 + rval = emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
8855 +
8856 + if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) {
8857 + EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
8858 + "Unable to start GPIO mailbox.");
8859 +
8860 + hba->gpio_bit = 0;
8861 + emlxs_mem_put(hba, MEM_MBOX, mbq);
8862 + return;
8863 + }
8864 + }
8865 +} /* emlxs_sli4_check_gpio */
8866 +
8867 +static uint32_t
8868 +emlxs_sli4_fix_gpio(emlxs_hba_t *hba, uint8_t *pin, uint8_t *pinval)
8869 +{
8870 + uint8_t dif = hba->gpio_desired ^ hba->gpio_current;
8871 + uint8_t bit;
8872 + uint8_t i;
8873 +
8874 + /* Get out if no pins to set a GPIO request is pending */
8875 +
8876 + if (dif == 0 || hba->gpio_bit)
8877 + return (0);
8878 +
8879 + /* Fix one pin at a time */
8880 +
8881 + bit = dif & -dif;
8882 + hba->gpio_bit = bit;
8883 + dif = hba->gpio_current ^ bit;
8884 +
8885 + for (i = EMLXS_GPIO_PIN_LO; bit > 1; ++i) {
8886 + dif >>= 1;
8887 + bit >>= 1;
8888 + }
8889 +
8890 + /* Pins are active low so invert the bit value */
8891 +
8892 + *pin = hba->gpio_pin[i];
8893 + *pinval = ~dif & bit;
8894 +
8895 + return (1);
8896 +} /* emlxs_sli4_fix_gpio */
8897 +
8898 +static uint32_t
8899 +emlxs_sli4_fix_gpio_mbcmpl(emlxs_hba_t *hba, MAILBOXQ *mbq)
8900 +{
8901 + MAILBOX *mb;
8902 + uint8_t pin;
8903 + uint8_t pinval;
8904 +
8905 + mb = (MAILBOX *)mbq;
8906 +
8907 + mutex_enter(&hba->gpio_lock);
8908 +
8909 + if (mb->mbxStatus == 0)
8910 + hba->gpio_current ^= hba->gpio_bit;
8911 +
8912 + hba->gpio_bit = 0;
8913 +
8914 + if (emlxs_sli4_fix_gpio(hba, &pin, &pinval)) {
8915 + emlxs_port_t *port = &PPORT;
8916 + MAILBOXQ *mbq;
8917 + uint32_t rval;
8918 +
8919 + /*
8920 + * We're not using the mb_retry routine here because for some
8921 + * reason it doesn't preserve the completion routine. Just let
8922 + * this mbox cmd fail to start here and run when the mailbox
8923 + * is no longer busy.
8924 + */
8925 +
8926 + if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX)) == NULL) {
8927 + EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
8928 + "Unable to allocate GPIO mailbox.");
8929 +
8930 + hba->gpio_bit = 0;
8931 + goto done;
8932 + }
8933 +
8934 + emlxs_mb_gpio_write(hba, mbq, pin, pinval);
8935 + mbq->mbox_cmpl = emlxs_sli4_fix_gpio_mbcmpl;
8936 +
8937 + rval = emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_NOWAIT, 0);
8938 +
8939 + if ((rval != MBX_BUSY) && (rval != MBX_SUCCESS)) {
8940 + EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
8941 + "Unable to start GPIO mailbox.");
8942 +
8943 + hba->gpio_bit = 0;
8944 + emlxs_mem_put(hba, MEM_MBOX, mbq);
8945 + goto done;
8946 + }
8947 + }
8948 +
8949 +done:
8950 + mutex_exit(&hba->gpio_lock);
8951 +
8952 + return (0);
8953 +}
8954 +
8676 8955 extern void
8677 8956 emlxs_data_dump(emlxs_port_t *port, char *str, uint32_t *iptr, int cnt, int err)
8678 8957 {
8679 8958 void *msg;
8680 8959
8681 8960 if (!port || !str || !iptr || !cnt) {
8682 8961 return;
8683 8962 }
8684 8963
8685 8964 if (err) {
8686 8965 msg = &emlxs_sli_err_msg;
8687 8966 } else {
8688 8967 msg = &emlxs_sli_detail_msg;
8689 8968 }
8690 8969
8691 8970 if (cnt) {
8692 8971 EMLXS_MSGF(EMLXS_CONTEXT, msg,
8693 8972 "%s00: %08x %08x %08x %08x %08x %08x", str, *iptr,
8694 8973 *(iptr+1), *(iptr+2), *(iptr+3), *(iptr+4), *(iptr+5));
8695 8974 }
8696 8975 if (cnt > 6) {
8697 8976 EMLXS_MSGF(EMLXS_CONTEXT, msg,
8698 8977 "%s06: %08x %08x %08x %08x %08x %08x", str, *(iptr+6),
8699 8978 *(iptr+7), *(iptr+8), *(iptr+9), *(iptr+10), *(iptr+11));
8700 8979 }
8701 8980 if (cnt > 12) {
8702 8981 EMLXS_MSGF(EMLXS_CONTEXT, msg,
8703 8982 "%s12: %08x %08x %08x %08x %08x %08x", str, *(iptr+12),
8704 8983 *(iptr+13), *(iptr+14), *(iptr+15), *(iptr+16), *(iptr+17));
8705 8984 }
8706 8985 if (cnt > 18) {
8707 8986 EMLXS_MSGF(EMLXS_CONTEXT, msg,
8708 8987 "%s18: %08x %08x %08x %08x %08x %08x", str, *(iptr+18),
8709 8988 *(iptr+19), *(iptr+20), *(iptr+21), *(iptr+22), *(iptr+23));
8710 8989 }
8711 8990 if (cnt > 24) {
8712 8991 EMLXS_MSGF(EMLXS_CONTEXT, msg,
8713 8992 "%s24: %08x %08x %08x %08x %08x %08x", str, *(iptr+24),
8714 8993 *(iptr+25), *(iptr+26), *(iptr+27), *(iptr+28), *(iptr+29));
8715 8994 }
8716 8995 if (cnt > 30) {
8717 8996 EMLXS_MSGF(EMLXS_CONTEXT, msg,
8718 8997 "%s30: %08x %08x %08x %08x %08x %08x", str, *(iptr+30),
8719 8998 *(iptr+31), *(iptr+32), *(iptr+33), *(iptr+34), *(iptr+35));
8720 8999 }
8721 9000 if (cnt > 36) {
8722 9001 EMLXS_MSGF(EMLXS_CONTEXT, msg,
8723 9002 "%s36: %08x %08x %08x %08x %08x %08x", str, *(iptr+36),
8724 9003 *(iptr+37), *(iptr+38), *(iptr+39), *(iptr+40), *(iptr+41));
8725 9004 }
8726 9005
8727 9006 } /* emlxs_data_dump() */
8728 9007
8729 9008
8730 9009 extern void
8731 9010 emlxs_ue_dump(emlxs_hba_t *hba, char *str)
8732 9011 {
8733 9012 emlxs_port_t *port = &PPORT;
8734 9013 uint32_t status;
8735 9014 uint32_t ue_h;
8736 9015 uint32_t ue_l;
8737 9016 uint32_t on1;
8738 9017 uint32_t on2;
8739 9018
8740 9019 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
8741 9020 case SLI_INTF_IF_TYPE_0:
8742 9021 ue_l = ddi_get32(hba->pci_acc_handle,
8743 9022 hba->sli.sli4.ERR1_reg_addr);
8744 9023 ue_h = ddi_get32(hba->pci_acc_handle,
8745 9024 hba->sli.sli4.ERR2_reg_addr);
8746 9025
8747 9026 on1 = ddi_get32(hba->pci_acc_handle,
8748 9027 (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_ONLINE1));
8749 9028 on2 = ddi_get32(hba->pci_acc_handle,
8750 9029 (uint32_t *)(hba->pci_addr + PCICFG_UE_STATUS_ONLINE2));
8751 9030
8752 9031 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8753 9032 "%s: ueLow:%08x ueHigh:%08x on1:%08x on2:%08x", str,
8754 9033 ue_l, ue_h, on1, on2);
8755 9034 break;
8756 9035
8757 9036 case SLI_INTF_IF_TYPE_2:
8758 9037 status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
8759 9038 hba->sli.sli4.STATUS_reg_addr);
8760 9039
8761 9040 ue_l = ddi_get32(hba->sli.sli4.bar0_acc_handle,
8762 9041 hba->sli.sli4.ERR1_reg_addr);
8763 9042 ue_h = ddi_get32(hba->sli.sli4.bar0_acc_handle,
8764 9043 hba->sli.sli4.ERR2_reg_addr);
8765 9044
8766 9045 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8767 9046 "%s: status:%08x err1:%08x err2:%08x", str,
8768 9047 status, ue_l, ue_h);
8769 9048
8770 9049 break;
8771 9050 }
8772 9051
8773 9052 #ifdef FMA_SUPPORT
8774 9053 /* Access handle validation */
8775 9054 EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
8776 9055 #endif /* FMA_SUPPORT */
8777 9056
8778 9057 } /* emlxs_ue_dump() */
8779 9058
8780 9059
8781 9060 static void
8782 9061 emlxs_sli4_poll_erratt(emlxs_hba_t *hba)
8783 9062 {
8784 9063 emlxs_port_t *port = &PPORT;
8785 9064 uint32_t status;
8786 9065 uint32_t ue_h;
8787 9066 uint32_t ue_l;
8788 9067 uint32_t error = 0;
8789 9068
8790 9069 if (hba->flag & FC_HARDWARE_ERROR) {
8791 9070 return;
8792 9071 }
8793 9072
8794 9073 switch (hba->sli_intf & SLI_INTF_IF_TYPE_MASK) {
8795 9074 case SLI_INTF_IF_TYPE_0:
8796 9075 ue_l = ddi_get32(hba->pci_acc_handle,
8797 9076 hba->sli.sli4.ERR1_reg_addr);
8798 9077 ue_h = ddi_get32(hba->pci_acc_handle,
8799 9078 hba->sli.sli4.ERR2_reg_addr);
8800 9079
8801 9080 if ((~hba->sli.sli4.ue_mask_lo & ue_l) ||
8802 9081 (~hba->sli.sli4.ue_mask_hi & ue_h) ||
8803 9082 (hba->sli.sli4.flag & EMLXS_SLI4_HW_ERROR)) {
8804 9083 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_hardware_error_msg,
8805 9084 "Host Error: ueLow:%08x ueHigh:%08x maskLow:%08x "
8806 9085 "maskHigh:%08x flag:%08x",
8807 9086 ue_l, ue_h, hba->sli.sli4.ue_mask_lo,
8808 9087 hba->sli.sli4.ue_mask_hi, hba->sli.sli4.flag);
8809 9088
8810 9089 error = 2;
8811 9090 }
8812 9091 break;
8813 9092
8814 9093 case SLI_INTF_IF_TYPE_2:
8815 9094 status = ddi_get32(hba->sli.sli4.bar0_acc_handle,
8816 9095 hba->sli.sli4.STATUS_reg_addr);
8817 9096
8818 9097 if ((status & SLI_STATUS_ERROR) ||
8819 9098 (hba->sli.sli4.flag & EMLXS_SLI4_HW_ERROR)) {
8820 9099 ue_l = ddi_get32(hba->sli.sli4.bar0_acc_handle,
8821 9100 hba->sli.sli4.ERR1_reg_addr);
8822 9101 ue_h = ddi_get32(hba->sli.sli4.bar0_acc_handle,
8823 9102 hba->sli.sli4.ERR2_reg_addr);
8824 9103
8825 9104 error = (status & SLI_STATUS_RESET_NEEDED)? 1:2;
8826 9105
8827 9106 if (error == 1) {
8828 9107 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
8829 9108 "Host Error: status:%08x err1:%08x "
8830 9109 "err2:%08x flag:%08x",
8831 9110 status, ue_l, ue_h, hba->sli.sli4.flag);
8832 9111 } else {
8833 9112 EMLXS_MSGF(EMLXS_CONTEXT,
8834 9113 &emlxs_hardware_error_msg,
8835 9114 "Host Error: status:%08x err1:%08x "
8836 9115 "err2:%08x flag:%08x",
8837 9116 status, ue_l, ue_h, hba->sli.sli4.flag);
8838 9117 }
8839 9118 }
8840 9119 break;
8841 9120 }
8842 9121
8843 9122 if (error == 2) {
8844 9123 EMLXS_STATE_CHANGE(hba, FC_ERROR);
8845 9124
8846 9125 emlxs_sli4_hba_flush_chipq(hba);
8847 9126
8848 9127 emlxs_thread_spawn(hba, emlxs_shutdown_thread, 0, 0);
8849 9128
8850 9129 } else if (error == 1) {
8851 9130 EMLXS_STATE_CHANGE(hba, FC_ERROR);
8852 9131
8853 9132 emlxs_sli4_hba_flush_chipq(hba);
8854 9133
8855 9134 emlxs_thread_spawn(hba, emlxs_restart_thread, 0, 0);
8856 9135 }
8857 9136
8858 9137 #ifdef FMA_SUPPORT
8859 9138 /* Access handle validation */
8860 9139 EMLXS_CHK_ACC_HANDLE(hba, hba->pci_acc_handle);
8861 9140 #endif /* FMA_SUPPORT */
8862 9141
8863 9142 } /* emlxs_sli4_poll_erratt() */
8864 9143
8865 9144
8866 9145 static uint32_t
8867 9146 emlxs_sli4_reg_did(emlxs_port_t *port, uint32_t did, SERV_PARM *param,
8868 9147 emlxs_buf_t *sbp, fc_unsol_buf_t *ubp, IOCBQ *iocbq)
8869 9148 {
8870 9149 emlxs_hba_t *hba = HBA;
8871 9150 NODELIST *node;
8872 9151 RPIobj_t *rpip;
8873 9152 uint32_t rval;
8874 9153
8875 9154 /* Check for invalid node ids to register */
8876 9155 if ((did == 0) && (!(hba->flag & FC_LOOPBACK_MODE))) {
8877 9156 return (1);
8878 9157 }
8879 9158
8880 9159 if (did & 0xff000000) {
8881 9160 return (1);
8882 9161 }
8883 9162
8884 9163 /* We don't register our own did */
8885 9164 if ((did == port->did) && (!(hba->flag & FC_LOOPBACK_MODE))) {
8886 9165 return (1);
8887 9166 }
8888 9167
8889 9168 if (did != FABRIC_DID) {
8890 9169 if ((rval = emlxs_mb_check_sparm(hba, param))) {
8891 9170 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
8892 9171 "Invalid service parameters. did=%06x rval=%d", did,
8893 9172 rval);
8894 9173
8895 9174 return (1);
8896 9175 }
8897 9176 }
8898 9177
8899 9178 /* Check if the node limit has been reached */
8900 9179 if (port->node_count >= hba->max_nodes) {
8901 9180 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_failed_msg,
8902 9181 "Limit reached. did=%06x count=%d", did,
8903 9182 port->node_count);
8904 9183
8905 9184 return (1);
8906 9185 }
8907 9186
8908 9187 node = emlxs_node_find_did(port, did, 1);
8909 9188 rpip = EMLXS_NODE_TO_RPI(port, node);
8910 9189
8911 9190 rval = emlxs_rpi_online_notify(port, rpip, did, param, (void *)sbp,
8912 9191 (void *)ubp, (void *)iocbq);
8913 9192
8914 9193 return (rval);
8915 9194
8916 9195 } /* emlxs_sli4_reg_did() */
8917 9196
8918 9197
8919 9198 static uint32_t
8920 9199 emlxs_sli4_unreg_node(emlxs_port_t *port, emlxs_node_t *node,
8921 9200 emlxs_buf_t *sbp, fc_unsol_buf_t *ubp, IOCBQ *iocbq)
8922 9201 {
8923 9202 RPIobj_t *rpip;
8924 9203 uint32_t rval;
8925 9204
8926 9205 if (!node) {
8927 9206 /* Unreg all nodes */
8928 9207 (void) emlxs_sli4_unreg_all_nodes(port);
8929 9208 return (1);
8930 9209 }
8931 9210
8932 9211 /* Check for base node */
8933 9212 if (node == &port->node_base) {
8934 9213 /* Just flush base node */
8935 9214 (void) emlxs_tx_node_flush(port, &port->node_base,
8936 9215 0, 0, 0);
8937 9216
8938 9217 (void) emlxs_chipq_node_flush(port, 0,
8939 9218 &port->node_base, 0);
8940 9219
8941 9220 port->did = 0;
8942 9221
8943 9222 /* Return now */
8944 9223 return (1);
8945 9224 }
8946 9225
8947 9226 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8948 9227 "unreg_node:%p did=%x rpi=%d",
8949 9228 node, node->nlp_DID, node->nlp_Rpi);
8950 9229
8951 9230 rpip = EMLXS_NODE_TO_RPI(port, node);
8952 9231
8953 9232 if (!rpip) {
8954 9233 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
8955 9234 "unreg_node:%p did=%x rpi=%d. RPI not found.",
8956 9235 node, node->nlp_DID, node->nlp_Rpi);
8957 9236
8958 9237 emlxs_node_rm(port, node);
8959 9238 return (1);
8960 9239 }
8961 9240
8962 9241 rval = emlxs_rpi_offline_notify(port, rpip, (void *)sbp, (void *)ubp,
8963 9242 (void *)iocbq);
8964 9243
|
↓ open down ↓ |
279 lines elided |
↑ open up ↑ |
8965 9244 return (rval);
8966 9245
8967 9246 } /* emlxs_sli4_unreg_node() */
8968 9247
8969 9248
8970 9249 extern uint32_t
8971 9250 emlxs_sli4_unreg_all_nodes(emlxs_port_t *port)
8972 9251 {
8973 9252 NODELIST *nlp;
8974 9253 int i;
8975 - uint32_t found;
9254 + uint32_t found;
8976 9255
8977 9256 /* Set the node tags */
8978 9257 /* We will process all nodes with this tag */
8979 9258 rw_enter(&port->node_rwlock, RW_READER);
8980 9259 found = 0;
8981 9260 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
8982 9261 nlp = port->node_table[i];
8983 9262 while (nlp != NULL) {
8984 9263 found = 1;
8985 9264 nlp->nlp_tag = 1;
8986 9265 nlp = nlp->nlp_list_next;
8987 9266 }
8988 9267 }
8989 9268 rw_exit(&port->node_rwlock);
8990 9269
8991 9270 if (!found) {
8992 9271 return (0);
8993 9272 }
8994 9273
8995 9274 for (;;) {
8996 9275 rw_enter(&port->node_rwlock, RW_READER);
8997 9276 found = 0;
8998 9277 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
8999 9278 nlp = port->node_table[i];
9000 9279 while (nlp != NULL) {
9001 9280 if (!nlp->nlp_tag) {
9002 9281 nlp = nlp->nlp_list_next;
9003 9282 continue;
9004 9283 }
9005 9284 nlp->nlp_tag = 0;
9006 9285 found = 1;
9007 9286 break;
9008 9287 }
9009 9288
9010 9289 if (found) {
9011 9290 break;
9012 9291 }
9013 9292 }
9014 9293 rw_exit(&port->node_rwlock);
9015 9294
9016 9295 if (!found) {
9017 9296 break;
9018 9297 }
9019 9298
9020 9299 (void) emlxs_sli4_unreg_node(port, nlp, 0, 0, 0);
9021 9300 }
9022 9301
9023 9302 return (0);
9024 9303
9025 9304 } /* emlxs_sli4_unreg_all_nodes() */
9026 9305
9027 9306
9028 9307 static void
9029 9308 emlxs_sli4_handle_fcoe_link_event(emlxs_hba_t *hba, CQE_ASYNC_t *cqe)
9030 9309 {
9031 9310 emlxs_port_t *port = &PPORT;
9032 9311
9033 9312 /* Handle link down */
9034 9313 if ((cqe->un.link.link_status == ASYNC_EVENT_LOGICAL_LINK_DOWN) ||
9035 9314 (cqe->un.link.link_status == ASYNC_EVENT_PHYS_LINK_DOWN)) {
9036 9315 (void) emlxs_fcf_linkdown_notify(port);
9037 9316
9038 9317 mutex_enter(&EMLXS_PORT_LOCK);
9039 9318 hba->sli.sli4.flag &= ~EMLXS_SLI4_DOWN_LINK;
9040 9319 mutex_exit(&EMLXS_PORT_LOCK);
9041 9320 return;
9042 9321 }
9043 9322
9044 9323 /* Link is up */
9045 9324
9046 9325 /* Set linkspeed */
9047 9326 switch (cqe->un.link.port_speed) {
9048 9327 case PHY_1GHZ_LINK:
9049 9328 hba->linkspeed = LA_1GHZ_LINK;
9050 9329 break;
9051 9330 case PHY_10GHZ_LINK:
9052 9331 hba->linkspeed = LA_10GHZ_LINK;
9053 9332 break;
9054 9333 default:
9055 9334 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
9056 9335 "sli4_handle_fcoe_link_event: Unknown link speed=%x.",
9057 9336 cqe->un.link.port_speed);
9058 9337 hba->linkspeed = 0;
9059 9338 break;
9060 9339 }
9061 9340
9062 9341 /* Set qos_linkspeed */
9063 9342 hba->qos_linkspeed = cqe->un.link.qos_link_speed;
9064 9343
9065 9344 /* Set topology */
9066 9345 hba->topology = TOPOLOGY_PT_PT;
9067 9346
9068 9347 mutex_enter(&EMLXS_PORT_LOCK);
9069 9348 hba->sli.sli4.flag &= ~EMLXS_SLI4_DOWN_LINK;
9070 9349 mutex_exit(&EMLXS_PORT_LOCK);
9071 9350
9072 9351 (void) emlxs_fcf_linkup_notify(port);
9073 9352
9074 9353 return;
9075 9354
9076 9355 } /* emlxs_sli4_handle_fcoe_link_event() */
9077 9356
9078 9357
9079 9358 static void
9080 9359 emlxs_sli4_handle_fc_link_att(emlxs_hba_t *hba, CQE_ASYNC_t *cqe)
9081 9360 {
9082 9361 emlxs_port_t *port = &PPORT;
9083 9362
9084 9363 /* Handle link down */
9085 9364 if (cqe->un.fc.att_type == ATT_TYPE_LINK_DOWN) {
9086 9365 (void) emlxs_fcf_linkdown_notify(port);
9087 9366
9088 9367 mutex_enter(&EMLXS_PORT_LOCK);
9089 9368 hba->sli.sli4.flag &= ~EMLXS_SLI4_DOWN_LINK;
9090 9369 mutex_exit(&EMLXS_PORT_LOCK);
9091 9370 return;
9092 9371 }
9093 9372
9094 9373 /* Link is up */
9095 9374
9096 9375 /* Set linkspeed */
9097 9376 switch (cqe->un.fc.port_speed) {
9098 9377 case 1:
9099 9378 hba->linkspeed = LA_1GHZ_LINK;
9100 9379 break;
9101 9380 case 2:
9102 9381 hba->linkspeed = LA_2GHZ_LINK;
9103 9382 break;
9104 9383 case 4:
9105 9384 hba->linkspeed = LA_4GHZ_LINK;
|
↓ open down ↓ |
120 lines elided |
↑ open up ↑ |
9106 9385 break;
9107 9386 case 8:
9108 9387 hba->linkspeed = LA_8GHZ_LINK;
9109 9388 break;
9110 9389 case 10:
9111 9390 hba->linkspeed = LA_10GHZ_LINK;
9112 9391 break;
9113 9392 case 16:
9114 9393 hba->linkspeed = LA_16GHZ_LINK;
9115 9394 break;
9395 + case 32:
9396 + hba->linkspeed = LA_32GHZ_LINK;
9397 + break;
9116 9398 default:
9117 9399 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
9118 9400 "sli4_handle_fc_link_att: Unknown link speed=%x.",
9119 9401 cqe->un.fc.port_speed);
9120 9402 hba->linkspeed = 0;
9121 9403 break;
9122 9404 }
9123 9405
9124 9406 /* Set qos_linkspeed */
9125 9407 hba->qos_linkspeed = cqe->un.fc.link_speed;
9126 9408
9127 9409 /* Set topology */
9128 9410 hba->topology = cqe->un.fc.topology;
9129 9411
9130 9412 mutex_enter(&EMLXS_PORT_LOCK);
9131 9413 hba->sli.sli4.flag &= ~EMLXS_SLI4_DOWN_LINK;
9132 9414 mutex_exit(&EMLXS_PORT_LOCK);
9133 9415
9134 9416 (void) emlxs_fcf_linkup_notify(port);
9135 9417
9136 9418 return;
9137 9419
9138 9420 } /* emlxs_sli4_handle_fc_link_att() */
9139 9421
9140 9422
9141 9423 static int
9142 9424 emlxs_sli4_init_extents(emlxs_hba_t *hba, MAILBOXQ *mbq)
9143 9425 {
9144 9426 emlxs_port_t *port = &PPORT;
9145 9427 MAILBOX4 *mb4;
9146 9428 IOCTL_COMMON_EXTENTS *ep;
9147 9429 uint32_t i;
9148 9430 uint32_t ExtentCnt;
9149 9431
9150 9432 if (!(hba->sli.sli4.param.EXT)) {
9151 9433 return (0);
9152 9434 }
9153 9435
9154 9436 mb4 = (MAILBOX4 *) mbq;
9155 9437
9156 9438 /* Discover XRI Extents */
9157 9439 bzero(mbq, sizeof (MAILBOXQ));
9158 9440 emlxs_mb_get_extents_info(hba, mbq, RSC_TYPE_FCOE_XRI);
9159 9441
9160 9442 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9161 9443 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9162 9444 "Unable to discover XRI extents. Mailbox cmd=%x status=%x",
9163 9445 mb4->mbxCommand, mb4->mbxStatus);
9164 9446
9165 9447 return (EIO);
9166 9448 }
9167 9449
9168 9450 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9169 9451 hba->sli.sli4.XRIExtSize = ep->params.response.ExtentSize;
9170 9452 ExtentCnt = ep->params.response.ExtentCnt;
9171 9453
9172 9454 /* Allocate XRI Extents */
9173 9455 bzero(mbq, sizeof (MAILBOXQ));
9174 9456 emlxs_mb_alloc_extents(hba, mbq, RSC_TYPE_FCOE_XRI, ExtentCnt);
9175 9457
9176 9458 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9177 9459 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9178 9460 "Unable to allocate XRI extents. Mailbox cmd=%x status=%x",
9179 9461 mb4->mbxCommand, mb4->mbxStatus);
9180 9462
9181 9463 return (EIO);
9182 9464 }
9183 9465 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9184 9466
9185 9467 bcopy((uint8_t *)ep->params.response.RscId,
9186 9468 (uint8_t *)hba->sli.sli4.XRIBase,
9187 9469 (ep->params.response.ExtentCnt * sizeof (uint16_t)));
9188 9470
9189 9471 hba->sli.sli4.XRIExtCount = ep->params.response.ExtentCnt;
9190 9472 hba->sli.sli4.XRICount = hba->sli.sli4.XRIExtCount *
9191 9473 hba->sli.sli4.XRIExtSize;
9192 9474
9193 9475 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9194 9476 "XRI Ext: size=%d cnt=%d/%d",
9195 9477 hba->sli.sli4.XRIExtSize,
9196 9478 hba->sli.sli4.XRIExtCount, ExtentCnt);
9197 9479
9198 9480 for (i = 0; i < ep->params.response.ExtentCnt; i += 4) {
9199 9481 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9200 9482 "XRI Ext%d: %d, %d, %d, %d", i,
9201 9483 hba->sli.sli4.XRIBase[i],
9202 9484 hba->sli.sli4.XRIBase[i+1],
9203 9485 hba->sli.sli4.XRIBase[i+2],
9204 9486 hba->sli.sli4.XRIBase[i+3]);
9205 9487 }
9206 9488
9207 9489
9208 9490 /* Discover RPI Extents */
9209 9491 bzero(mbq, sizeof (MAILBOXQ));
9210 9492 emlxs_mb_get_extents_info(hba, mbq, RSC_TYPE_FCOE_RPI);
9211 9493
9212 9494 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9213 9495 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9214 9496 "Unable to discover RPI extents. Mailbox cmd=%x status=%x",
9215 9497 mb4->mbxCommand, mb4->mbxStatus);
9216 9498
9217 9499 return (EIO);
9218 9500 }
9219 9501
9220 9502 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9221 9503 hba->sli.sli4.RPIExtSize = ep->params.response.ExtentSize;
9222 9504 ExtentCnt = ep->params.response.ExtentCnt;
9223 9505
9224 9506 /* Allocate RPI Extents */
9225 9507 bzero(mbq, sizeof (MAILBOXQ));
9226 9508 emlxs_mb_alloc_extents(hba, mbq, RSC_TYPE_FCOE_RPI, ExtentCnt);
9227 9509
9228 9510 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9229 9511 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9230 9512 "Unable to allocate RPI extents. Mailbox cmd=%x status=%x",
9231 9513 mb4->mbxCommand, mb4->mbxStatus);
9232 9514
9233 9515 return (EIO);
9234 9516 }
9235 9517 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9236 9518
9237 9519 bcopy((uint8_t *)ep->params.response.RscId,
9238 9520 (uint8_t *)hba->sli.sli4.RPIBase,
9239 9521 (ep->params.response.ExtentCnt * sizeof (uint16_t)));
9240 9522
9241 9523 hba->sli.sli4.RPIExtCount = ep->params.response.ExtentCnt;
9242 9524 hba->sli.sli4.RPICount = hba->sli.sli4.RPIExtCount *
9243 9525 hba->sli.sli4.RPIExtSize;
9244 9526
9245 9527 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9246 9528 "RPI Ext: size=%d cnt=%d/%d",
9247 9529 hba->sli.sli4.RPIExtSize,
9248 9530 hba->sli.sli4.RPIExtCount, ExtentCnt);
9249 9531
9250 9532 for (i = 0; i < ep->params.response.ExtentCnt; i += 4) {
9251 9533 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9252 9534 "RPI Ext%d: %d, %d, %d, %d", i,
9253 9535 hba->sli.sli4.RPIBase[i],
9254 9536 hba->sli.sli4.RPIBase[i+1],
9255 9537 hba->sli.sli4.RPIBase[i+2],
9256 9538 hba->sli.sli4.RPIBase[i+3]);
9257 9539 }
9258 9540
9259 9541
9260 9542 /* Discover VPI Extents */
9261 9543 bzero(mbq, sizeof (MAILBOXQ));
9262 9544 emlxs_mb_get_extents_info(hba, mbq, RSC_TYPE_FCOE_VPI);
9263 9545
9264 9546 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9265 9547 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9266 9548 "Unable to discover VPI extents. Mailbox cmd=%x status=%x",
9267 9549 mb4->mbxCommand, mb4->mbxStatus);
9268 9550
9269 9551 return (EIO);
9270 9552 }
9271 9553
9272 9554 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9273 9555 hba->sli.sli4.VPIExtSize = ep->params.response.ExtentSize;
9274 9556 ExtentCnt = ep->params.response.ExtentCnt;
9275 9557
9276 9558 /* Allocate VPI Extents */
9277 9559 bzero(mbq, sizeof (MAILBOXQ));
9278 9560 emlxs_mb_alloc_extents(hba, mbq, RSC_TYPE_FCOE_VPI, ExtentCnt);
9279 9561
9280 9562 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9281 9563 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9282 9564 "Unable to allocate VPI extents. Mailbox cmd=%x status=%x",
9283 9565 mb4->mbxCommand, mb4->mbxStatus);
9284 9566
9285 9567 return (EIO);
9286 9568 }
9287 9569 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9288 9570
9289 9571 bcopy((uint8_t *)ep->params.response.RscId,
9290 9572 (uint8_t *)hba->sli.sli4.VPIBase,
9291 9573 (ep->params.response.ExtentCnt * sizeof (uint16_t)));
9292 9574
9293 9575 hba->sli.sli4.VPIExtCount = ep->params.response.ExtentCnt;
9294 9576 hba->sli.sli4.VPICount = hba->sli.sli4.VPIExtCount *
9295 9577 hba->sli.sli4.VPIExtSize;
9296 9578
9297 9579 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9298 9580 "VPI Ext: size=%d cnt=%d/%d",
9299 9581 hba->sli.sli4.VPIExtSize,
9300 9582 hba->sli.sli4.VPIExtCount, ExtentCnt);
9301 9583
9302 9584 for (i = 0; i < ep->params.response.ExtentCnt; i += 4) {
9303 9585 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9304 9586 "VPI Ext%d: %d, %d, %d, %d", i,
9305 9587 hba->sli.sli4.VPIBase[i],
9306 9588 hba->sli.sli4.VPIBase[i+1],
9307 9589 hba->sli.sli4.VPIBase[i+2],
9308 9590 hba->sli.sli4.VPIBase[i+3]);
9309 9591 }
9310 9592
9311 9593 /* Discover VFI Extents */
9312 9594 bzero(mbq, sizeof (MAILBOXQ));
9313 9595 emlxs_mb_get_extents_info(hba, mbq, RSC_TYPE_FCOE_VFI);
9314 9596
9315 9597 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9316 9598 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9317 9599 "Unable to discover VFI extents. Mailbox cmd=%x status=%x",
9318 9600 mb4->mbxCommand, mb4->mbxStatus);
9319 9601
9320 9602 return (EIO);
9321 9603 }
9322 9604
9323 9605 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9324 9606 hba->sli.sli4.VFIExtSize = ep->params.response.ExtentSize;
9325 9607 ExtentCnt = ep->params.response.ExtentCnt;
9326 9608
9327 9609 /* Allocate VFI Extents */
9328 9610 bzero(mbq, sizeof (MAILBOXQ));
9329 9611 emlxs_mb_alloc_extents(hba, mbq, RSC_TYPE_FCOE_VFI, ExtentCnt);
9330 9612
9331 9613 if (emlxs_sli4_issue_mbox_cmd(hba, mbq, MBX_WAIT, 0) != MBX_SUCCESS) {
9332 9614 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg,
9333 9615 "Unable to allocate VFI extents. Mailbox cmd=%x status=%x",
9334 9616 mb4->mbxCommand, mb4->mbxStatus);
9335 9617
9336 9618 return (EIO);
9337 9619 }
9338 9620 ep = (IOCTL_COMMON_EXTENTS *)&mb4->un.varSLIConfig.payload;
9339 9621
9340 9622 bcopy((uint8_t *)ep->params.response.RscId,
9341 9623 (uint8_t *)hba->sli.sli4.VFIBase,
9342 9624 (ep->params.response.ExtentCnt * sizeof (uint16_t)));
9343 9625
9344 9626 hba->sli.sli4.VFIExtCount = ep->params.response.ExtentCnt;
9345 9627 hba->sli.sli4.VFICount = hba->sli.sli4.VFIExtCount *
9346 9628 hba->sli.sli4.VFIExtSize;
9347 9629
9348 9630 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9349 9631 "VFI Ext: size=%d cnt=%d/%d",
9350 9632 hba->sli.sli4.VFIExtSize,
9351 9633 hba->sli.sli4.VFIExtCount, ExtentCnt);
9352 9634
9353 9635 for (i = 0; i < ep->params.response.ExtentCnt; i += 4) {
9354 9636 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
9355 9637 "VFI Ext%d: %d, %d, %d, %d", i,
9356 9638 hba->sli.sli4.VFIBase[i],
9357 9639 hba->sli.sli4.VFIBase[i+1],
9358 9640 hba->sli.sli4.VFIBase[i+2],
9359 9641 hba->sli.sli4.VFIBase[i+3]);
9360 9642 }
9361 9643
9362 9644 return (0);
9363 9645
9364 9646 } /* emlxs_sli4_init_extents() */
9365 9647
9366 9648
9367 9649 extern uint32_t
9368 9650 emlxs_sli4_index_to_rpi(emlxs_hba_t *hba, uint32_t index)
9369 9651 {
9370 9652 uint32_t i;
9371 9653 uint32_t j;
9372 9654 uint32_t rpi;
9373 9655
9374 9656 i = index / hba->sli.sli4.RPIExtSize;
9375 9657 j = index % hba->sli.sli4.RPIExtSize;
9376 9658 rpi = hba->sli.sli4.RPIBase[i] + j;
9377 9659
9378 9660 return (rpi);
9379 9661
9380 9662 } /* emlxs_sli4_index_to_rpi */
9381 9663
9382 9664
9383 9665 extern uint32_t
9384 9666 emlxs_sli4_rpi_to_index(emlxs_hba_t *hba, uint32_t rpi)
9385 9667 {
9386 9668 uint32_t i;
9387 9669 uint32_t lo;
9388 9670 uint32_t hi;
9389 9671 uint32_t index = hba->sli.sli4.RPICount;
9390 9672
9391 9673 for (i = 0; i < hba->sli.sli4.RPIExtCount; i++) {
9392 9674 lo = hba->sli.sli4.RPIBase[i];
9393 9675 hi = lo + hba->sli.sli4.RPIExtSize;
9394 9676
9395 9677 if ((rpi < hi) && (rpi >= lo)) {
9396 9678 index = (i * hba->sli.sli4.RPIExtSize) + (rpi - lo);
9397 9679 break;
9398 9680 }
9399 9681 }
9400 9682
9401 9683 return (index);
9402 9684
9403 9685 } /* emlxs_sli4_rpi_to_index */
9404 9686
9405 9687
9406 9688 extern uint32_t
9407 9689 emlxs_sli4_index_to_xri(emlxs_hba_t *hba, uint32_t index)
9408 9690 {
9409 9691 uint32_t i;
9410 9692 uint32_t j;
9411 9693 uint32_t xri;
9412 9694
9413 9695 i = index / hba->sli.sli4.XRIExtSize;
9414 9696 j = index % hba->sli.sli4.XRIExtSize;
9415 9697 xri = hba->sli.sli4.XRIBase[i] + j;
9416 9698
9417 9699 return (xri);
9418 9700
9419 9701 } /* emlxs_sli4_index_to_xri */
9420 9702
9421 9703
9422 9704
9423 9705
9424 9706 extern uint32_t
9425 9707 emlxs_sli4_index_to_vpi(emlxs_hba_t *hba, uint32_t index)
9426 9708 {
9427 9709 uint32_t i;
9428 9710 uint32_t j;
9429 9711 uint32_t vpi;
9430 9712
9431 9713 i = index / hba->sli.sli4.VPIExtSize;
9432 9714 j = index % hba->sli.sli4.VPIExtSize;
9433 9715 vpi = hba->sli.sli4.VPIBase[i] + j;
9434 9716
9435 9717 return (vpi);
9436 9718
9437 9719 } /* emlxs_sli4_index_to_vpi */
9438 9720
9439 9721
9440 9722 extern uint32_t
9441 9723 emlxs_sli4_vpi_to_index(emlxs_hba_t *hba, uint32_t vpi)
9442 9724 {
9443 9725 uint32_t i;
9444 9726 uint32_t lo;
9445 9727 uint32_t hi;
9446 9728 uint32_t index = hba->sli.sli4.VPICount;
9447 9729
9448 9730 for (i = 0; i < hba->sli.sli4.VPIExtCount; i++) {
9449 9731 lo = hba->sli.sli4.VPIBase[i];
9450 9732 hi = lo + hba->sli.sli4.VPIExtSize;
9451 9733
9452 9734 if ((vpi < hi) && (vpi >= lo)) {
9453 9735 index = (i * hba->sli.sli4.VPIExtSize) + (vpi - lo);
9454 9736 break;
9455 9737 }
9456 9738 }
9457 9739
9458 9740 return (index);
9459 9741
9460 9742 } /* emlxs_sli4_vpi_to_index */
9461 9743
9462 9744
9463 9745
9464 9746
9465 9747 extern uint32_t
9466 9748 emlxs_sli4_index_to_vfi(emlxs_hba_t *hba, uint32_t index)
9467 9749 {
9468 9750 uint32_t i;
9469 9751 uint32_t j;
9470 9752 uint32_t vfi;
9471 9753
9472 9754 i = index / hba->sli.sli4.VFIExtSize;
9473 9755 j = index % hba->sli.sli4.VFIExtSize;
9474 9756 vfi = hba->sli.sli4.VFIBase[i] + j;
9475 9757
9476 9758 return (vfi);
9477 9759
9478 9760 } /* emlxs_sli4_index_to_vfi */
9479 9761
9480 9762
9481 9763 static uint16_t
9482 9764 emlxs_sli4_rqid_to_index(emlxs_hba_t *hba, uint16_t rqid)
9483 9765 {
9484 9766 uint16_t i;
9485 9767
9486 9768 if (rqid < 0xffff) {
9487 9769 for (i = 0; i < EMLXS_MAX_RQS; i++) {
9488 9770 if (hba->sli.sli4.rq[i].qid == rqid) {
9489 9771 return (i);
9490 9772 }
9491 9773 }
9492 9774 }
9493 9775
9494 9776 return (0xffff);
9495 9777
9496 9778 } /* emlxs_sli4_rqid_to_index */
9497 9779
9498 9780
9499 9781 static uint16_t
9500 9782 emlxs_sli4_wqid_to_index(emlxs_hba_t *hba, uint16_t wqid)
9501 9783 {
9502 9784 uint16_t i;
9503 9785
9504 9786 if (wqid < 0xffff) {
9505 9787 for (i = 0; i < EMLXS_MAX_WQS; i++) {
9506 9788 if (hba->sli.sli4.wq[i].qid == wqid) {
9507 9789 return (i);
9508 9790 }
9509 9791 }
9510 9792 }
9511 9793
9512 9794 return (0xffff);
9513 9795
9514 9796 } /* emlxs_sli4_wqid_to_index */
9515 9797
9516 9798
9517 9799 static uint16_t
9518 9800 emlxs_sli4_cqid_to_index(emlxs_hba_t *hba, uint16_t cqid)
9519 9801 {
9520 9802 uint16_t i;
9521 9803
9522 9804 if (cqid < 0xffff) {
9523 9805 for (i = 0; i < EMLXS_MAX_CQS; i++) {
9524 9806 if (hba->sli.sli4.cq[i].qid == cqid) {
9525 9807 return (i);
9526 9808 }
9527 9809 }
9528 9810 }
9529 9811
9530 9812 return (0xffff);
9531 9813
9532 9814 } /* emlxs_sli4_cqid_to_index */
|
↓ open down ↓ |
407 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX