1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2009-2012 Emulex. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27
28
29 /*
30 * Source file containing Queue handling functions
31 *
32 */
33
34 #include <oce_impl.h>
35
36 int oce_destroy_q(struct oce_dev *oce, struct oce_mbx *mbx, size_t req_size,
37 enum qtype qtype, uint32_t mode);
38 /* MAil box Queue functions */
39 struct oce_mq *
40 oce_mq_create_ext_v0(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len);
41 struct oce_mq *
42 oce_mq_create_ext_v1(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len);
43 struct oce_mq *
44 oce_mq_create(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len);
45
46 /* event queue handling */
47 int
48 oce_eq_create(struct oce_dev *dev, struct oce_eq *, uint32_t q_len,
49 uint32_t item_size, uint32_t eq_delay, uint32_t mode);
50
51 /* completion queue handling */
52 struct oce_cq *
53 oce_cq_create(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len,
54 uint32_t item_size, boolean_t sol_event, boolean_t is_eventable,
55 boolean_t nodelay, uint32_t ncoalesce, boolean_t armed, uint32_t mode);
56
57 struct oce_cq *
58 oce_cq_create_v0(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len,
59 uint32_t item_size, boolean_t sol_event, boolean_t is_eventable,
60 boolean_t nodelay, uint32_t ncoalesce, boolean_t armed, uint32_t mode);
61
62 struct oce_cq *
63 oce_cq_create_v2(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len,
64 uint32_t item_size, boolean_t sol_event, boolean_t is_eventable,
65 boolean_t nodelay, uint32_t ncoalesce, boolean_t armed, uint32_t mode);
66
67 /* Tx WQ functions */
68 int oce_wq_init(struct oce_dev *dev, struct oce_wq *, uint32_t q_len,
69 int wq_type);
70 static void oce_wq_fini(struct oce_dev *dev, struct oce_wq *wq);
71 static int oce_wq_create(struct oce_wq *wq, struct oce_eq *eq, uint32_t mode);
72 static void oce_wq_del(struct oce_dev *dev, struct oce_wq *wq, uint32_t mode);
73 /* Rx Queue functions */
74 static int oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq,
75 uint32_t mode);
76 static void oce_rq_del(struct oce_dev *dev, struct oce_rq *rq, uint32_t mode);
77
78 /*
79 * function to create an event queue
80 *
81 * dev - software handle to the device
82 * eqcfg - pointer to a config structure containg the eq parameters
83 *
84 * return pointer to EQ; NULL on failure
85 */
86 int oce_eq_create(struct oce_dev *dev, struct oce_eq *eq,
87 uint32_t q_len, uint32_t item_size, uint32_t eq_delay, uint32_t mode)
88 {
89 struct oce_mbx mbx;
90 struct mbx_create_common_eq *fwcmd;
91 int ret = 0;
92
93 if (eq == NULL) {
94 return (DDI_FAILURE);
95 }
96 mutex_enter(&eq->lock);
97 bzero(&mbx, sizeof (struct oce_mbx));
98 /* allocate mbx */
99 fwcmd = (struct mbx_create_common_eq *)&mbx.payload;
100
101 eq->ring = oce_create_ring_buffer(dev, q_len,
102 item_size, DDI_DMA_CONSISTENT|DDI_DMA_RDWR);
103
104 if (eq->ring == NULL) {
105 oce_log(dev, CE_WARN, MOD_CONFIG,
106 "EQ ring alloc failed:0x%p", (void *)eq->ring);
107 return (DDI_FAILURE);
108 }
109
110 mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
111 MBX_SUBSYSTEM_COMMON,
112 OPCODE_CREATE_COMMON_EQ, MBX_TIMEOUT_SEC,
113 sizeof (struct mbx_create_common_eq), 0);
114
115 fwcmd->params.req.num_pages = eq->ring->dbuf.num_pages;
116 oce_page_list(&eq->ring->dbuf, &fwcmd->params.req.pages[0],
117 eq->ring->dbuf.num_pages);
118
119 /* dw 0 */
120 fwcmd->params.req.eq_ctx.size = (item_size == 4) ? 0 : 1;
121 fwcmd->params.req.eq_ctx.valid = 1;
122 /* dw 1 */
123 fwcmd->params.req.eq_ctx.armed = 0;
124 fwcmd->params.req.eq_ctx.pd = 0;
125 fwcmd->params.req.eq_ctx.count = OCE_LOG2(q_len/256);
126
127 /* dw 2 */
128 fwcmd->params.req.eq_ctx.nodelay = 0;
129 fwcmd->params.req.eq_ctx.phase = 0;
130 /* todo: calculate multiplier from max min and cur */
131 fwcmd->params.req.eq_ctx.delay_mult = eq_delay;
132
133 /* fill rest of mbx */
134 mbx.u0.s.embedded = 1;
135 mbx.payload_length = sizeof (struct mbx_create_common_eq);
136 DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
137
138 /* now post the command */
139 ret = oce_issue_mbox_cmd(dev, &mbx, MBX_TIMEOUT_SEC, mode);
140
141 if (ret != 0) {
142 oce_log(dev, CE_WARN, MOD_CONFIG,
143 "EQ create failed: 0x%x", ret);
144 destroy_ring_buffer(dev, eq->ring);
145 return (DDI_FAILURE);
146 }
147
148 /* interpret the response */
149 eq->eq_id = LE_16(fwcmd->params.rsp.eq_id);
150 eq->parent = (void *)dev;
151 atomic_inc_32(&dev->neqs);
152 eq->qstate = QCREATED;
153 mutex_exit(&eq->lock);
154 oce_log(dev, CE_NOTE, MOD_CONFIG,
155 "EQ created, eq=0x%p eq_id=0x%x", (void *)eq, eq->eq_id);
156 /* Save the eq pointer */
157 return (DDI_SUCCESS);
158 } /* oce_eq_create */
159
160 /*
161 * function to delete an event queue
162 *
163 * dev - software handle to the device
164 * eq - handle to the eq to be deleted
165 *
166 * return 0=>success, failure otherwise
167 */
168 void
169 oce_eq_del(struct oce_dev *dev, struct oce_eq *eq, uint32_t mode)
170 {
171 struct oce_mbx mbx;
172 struct mbx_destroy_common_eq *fwcmd;
173
174 mutex_enter(&eq->lock);
175 eq->qstate = QDELETED;
176 bzero(&mbx, sizeof (struct oce_mbx));
177
178 /* drain the residual events */
179 oce_drain_eq(eq);
180
181 /* destroy the ring */
182 destroy_ring_buffer(dev, eq->ring);
183 eq->ring = NULL;
184
185 /* send a command to delete the EQ */
186 fwcmd = (struct mbx_destroy_common_eq *)&mbx.payload;
187 fwcmd->params.req.id = eq->eq_id;
188 (void) oce_destroy_q(dev, &mbx,
189 sizeof (struct mbx_destroy_common_eq),
190 QTYPE_EQ, mode);
191 atomic_dec_32(&dev->neqs);
192 mutex_exit(&eq->lock);
193 }
194
195 /*
196 * function to create a V0 completion queue
197 *
198 * dev - software handle to the device
199 * eq - optional eq to be associated with to the cq
200 * cqcfg - configuration for this queue
201 *
202 * return pointer to the cq created. NULL on failure
203 */
204 struct oce_cq *
205 oce_cq_create_v0(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len,
206 uint32_t item_size, boolean_t sol_event, boolean_t is_eventable,
207 boolean_t nodelay, uint32_t ncoalesce, boolean_t armed, uint32_t mode)
208 {
209 struct oce_cq *cq = NULL;
210 struct oce_mbx mbx;
211 struct mbx_create_common_cq_v0 *fwcmd;
212 int ret = 0;
213
214 /* create cq */
215 cq = kmem_zalloc(sizeof (struct oce_cq), KM_NOSLEEP);
216 if (cq == NULL) {
217 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
218 "CQ allocation failed");
219 return (NULL);
220 }
221
222 /* create the ring buffer for this queue */
223 cq->ring = oce_create_ring_buffer(dev, q_len,
224 item_size, DDI_DMA_CONSISTENT|DDI_DMA_RDWR);
225 if (cq->ring == NULL) {
226 oce_log(dev, CE_WARN, MOD_CONFIG,
227 "CQ ring alloc failed:0x%p",
228 (void *)cq->ring);
229 kmem_free(cq, sizeof (struct oce_cq));
230 return (NULL);
231 }
232 /* initialize mailbox */
233 bzero(&mbx, sizeof (struct oce_mbx));
234 fwcmd = (struct mbx_create_common_cq_v0 *)&mbx.payload;
235
236 /* fill the command header */
237 mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
238 MBX_SUBSYSTEM_COMMON,
239 OPCODE_CREATE_COMMON_CQ, MBX_TIMEOUT_SEC,
240 sizeof (struct mbx_create_common_cq_v0), 0);
241
242 /* fill command context */
243 /* dw0 */
244 fwcmd->params.req.cq_ctx.eventable = is_eventable;
245 fwcmd->params.req.cq_ctx.sol_event = sol_event;
246 fwcmd->params.req.cq_ctx.valid = 1;
247 fwcmd->params.req.cq_ctx.count = OCE_LOG2(q_len/256);
248 fwcmd->params.req.cq_ctx.nodelay = nodelay;
249 fwcmd->params.req.cq_ctx.coalesce_wm = ncoalesce;
250
251 /* dw1 */
252 fwcmd->params.req.cq_ctx.armed = armed;
253 fwcmd->params.req.cq_ctx.eq_id = eq->eq_id;
254 fwcmd->params.req.cq_ctx.pd = 0;
255
256 /* fill the rest of the command */
257 fwcmd->params.req.num_pages = cq->ring->dbuf.num_pages;
258 oce_page_list(&cq->ring->dbuf, &fwcmd->params.req.pages[0],
259 cq->ring->dbuf.num_pages);
260
261 /* fill rest of mbx */
262 mbx.u0.s.embedded = 1;
263 mbx.payload_length = sizeof (struct mbx_create_common_cq_v0);
264 DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
265
266 /* now send the mail box */
267 ret = oce_issue_mbox_cmd(dev, &mbx, MBX_TIMEOUT_SEC, mode);
268
269 if (ret != 0) {
270 oce_log(dev, CE_WARN, MOD_CONFIG,
271 "CQ create failed: 0x%x", ret);
272 destroy_ring_buffer(dev, cq->ring);
273 kmem_free(cq, sizeof (struct oce_cq));
274 return (NULL);
275 }
276
277 cq->parent = dev;
278 cq->eq = eq; /* eq array index */
279 cq->cq_cfg.q_len = q_len;
280
281 /* interpret the response */
282 cq->cq_id = LE_16(fwcmd->params.rsp.cq_id);
283 dev->cq[cq->cq_id % OCE_MAX_CQ] = cq;
284 cq->qstate = QCREATED;
285 atomic_inc_32(&eq->ref_count);
286 return (cq);
287 } /* oce_cq_create_v0 */
288
289 /*
290 * function to create a V2 completion queue
291 *
292 * dev - software handle to the device
293 * eq - optional eq to be associated with to the cq
294 * cqcfg - configuration for this queue
295 *
296 * return pointer to the cq created. NULL on failure
297 */
298 struct oce_cq *
299 oce_cq_create_v2(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len,
300 uint32_t item_size, boolean_t sol_event, boolean_t is_eventable,
301 boolean_t nodelay, uint32_t ncoalesce, boolean_t armed, uint32_t mode)
302 {
303 struct oce_cq *cq = NULL;
304 struct oce_mbx mbx;
305 struct mbx_create_common_cq_v2 *fwcmd;
306 int ret = 0;
307
308 _NOTE(ARGUNUSED(sol_event));
309 _NOTE(ARGUNUSED(ncoalesce));
310 /* create cq */
311 cq = kmem_zalloc(sizeof (struct oce_cq), KM_NOSLEEP);
312 if (cq == NULL) {
313 oce_log(dev, CE_NOTE, MOD_CONFIG, "%s",
314 "CQ allocation failed");
315 return (NULL);
316 }
317
318 /* create the ring buffer for this queue */
319 cq->ring = oce_create_ring_buffer(dev, q_len,
320 item_size, DDI_DMA_CONSISTENT | DDI_DMA_RDWR);
321 if (cq->ring == NULL) {
322 oce_log(dev, CE_WARN, MOD_CONFIG,
323 "CQ ring alloc failed:0x%p",
324 (void *)cq->ring);
325 kmem_free(cq, sizeof (struct oce_cq));
326 return (NULL);
327 }
328 /* initialize mailbox */
329 bzero(&mbx, sizeof (struct oce_mbx));
330 fwcmd = (struct mbx_create_common_cq_v2 *)&mbx.payload;
331
332 /* fill the command header */
333 mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
334 MBX_SUBSYSTEM_COMMON,
335 OPCODE_CREATE_COMMON_CQ, MBX_TIMEOUT_SEC,
336 sizeof (struct mbx_create_common_cq_v2), 2);
337
338 /* fill command context */
339 /* dw0 */
340 fwcmd->params.req.cq_ctx.eventable = is_eventable;
341 fwcmd->params.req.cq_ctx.valid = 1;
342 fwcmd->params.req.cq_ctx.count = 3;
343 fwcmd->params.req.cq_ctx.nodelay = nodelay;
344 fwcmd->params.req.cq_ctx.coalesce_wm = 0;
345
346 /* dw1 */
347 fwcmd->params.req.cq_ctx.armed = armed;
348 fwcmd->params.req.cq_ctx.eq_id = eq->eq_id;
349 fwcmd->params.req.cq_ctx.cqe_count = q_len;
350
351 fwcmd->params.req.page_size = 1;
352 /* fill the rest of the command */
353 fwcmd->params.req.num_pages = cq->ring->dbuf.num_pages;
354 oce_page_list(&cq->ring->dbuf, &fwcmd->params.req.pages[0],
355 cq->ring->dbuf.num_pages);
356
357 /* fill rest of mbx */
358 mbx.u0.s.embedded = 1;
359 mbx.payload_length = sizeof (struct mbx_create_common_cq_v2);
360 DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
361
362 /* now send the mail box */
363 ret = oce_issue_mbox_cmd(dev, &mbx, MBX_TIMEOUT_SEC, mode);
364
365 if (ret != 0) {
366 oce_log(dev, CE_WARN, MOD_CONFIG,
367 "CQ create failed: 0x%x", ret);
368 destroy_ring_buffer(dev, cq->ring);
369 kmem_free(cq, sizeof (struct oce_cq));
370 return (NULL);
371 }
372
373 cq->parent = dev;
374 cq->eq = eq; /* eq array index */
375 cq->cq_cfg.q_len = q_len;
376
377 /* interpret the response */
378 cq->cq_id = LE_16(fwcmd->params.rsp.cq_id);
379 dev->cq[cq->cq_id % OCE_MAX_CQ] = cq;
380 cq->qstate = QCREATED;
381 atomic_inc_32(&eq->ref_count);
382 return (cq);
383 } /* oce_cq_create_v2 */
384
385 /*
386 * function to create a completion queue
387 *
388 * dev - software handle to the device
389 * eq - optional eq to be associated with to the cq
390 * cqcfg - configuration for this queue
391 *
392 * return pointer to the cq created. NULL on failure
393 */
394 struct oce_cq *
395 oce_cq_create(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len,
396 uint32_t item_size, boolean_t sol_event, boolean_t is_eventable,
397 boolean_t nodelay, uint32_t ncoalesce, boolean_t armed, uint32_t mode)
398 {
399 struct oce_cq *cq = NULL;
400 if (LANCER_CHIP(dev))
401 cq = oce_cq_create_v2(dev, eq, q_len, item_size, sol_event,
402 is_eventable, nodelay, ncoalesce, armed, mode);
403 else
404 cq = oce_cq_create_v0(dev, eq, q_len, item_size, sol_event,
405 is_eventable, nodelay, ncoalesce, armed, mode);
406 return (cq);
407 }
408
409 /*
410 * function to delete a completion queue
411 *
412 * dev - software handle to the device
413 * cq - handle to the CQ to delete
414 *
415 * return none
416 */
417 static void
418 oce_cq_del(struct oce_dev *dev, struct oce_cq *cq, uint32_t mode)
419 {
420 struct oce_mbx mbx;
421 struct mbx_destroy_common_cq *fwcmd;
422
423 /* destroy the ring */
424 destroy_ring_buffer(dev, cq->ring);
425 cq->ring = NULL;
426
427 bzero(&mbx, sizeof (struct oce_mbx));
428 /* send a command to delete the CQ */
429 fwcmd = (struct mbx_destroy_common_cq *)&mbx.payload;
430 fwcmd->params.req.id = cq->cq_id;
431 (void) oce_destroy_q(dev, &mbx,
432 sizeof (struct mbx_destroy_common_cq),
433 QTYPE_CQ, mode);
434
435 /* Reset the handler */
436 cq->cq_handler = NULL;
437 cq->qstate = QDELETED;
438 dev->cq[cq->cq_id % OCE_MAX_CQ] = NULL;
439 atomic_dec_32(&cq->eq->ref_count);
440
441 /* release the eq */
442 kmem_free(cq, sizeof (struct oce_cq));
443 } /* oce_cq_del */
444
445 /*
446 * function to create an MQ
447 *
448 * dev - software handle to the device
449 * eq - the EQ to associate with the MQ for event notification
450 * q_len - the number of entries to create in the MQ
451 *
452 * return pointer to the created MQ, failure otherwise
453 */
454 struct oce_mq *
455 oce_mq_create(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len)
456 {
457 struct oce_mbx mbx;
458 struct mbx_create_common_mq *fwcmd;
459 struct oce_mq *mq = NULL;
460 int ret = 0;
461 struct oce_cq *cq;
462
463 /* Create the Completion Q */
464 cq = oce_cq_create(dev, eq, MCC_CQ_LEN,
465 sizeof (struct oce_mq_cqe),
466 B_FALSE, B_TRUE, B_TRUE, 0, B_FALSE, MBX_BOOTSTRAP);
467 if (cq == NULL) {
468 return (NULL);
469 }
470
471
472 /* allocate the mq */
473 mq = kmem_zalloc(sizeof (struct oce_mq), KM_NOSLEEP);
474
475 if (mq == NULL) {
476 goto mq_alloc_fail;
477 }
478
479 bzero(&mbx, sizeof (struct oce_mbx));
480 /* allocate mbx */
481 fwcmd = (struct mbx_create_common_mq *)&mbx.payload;
482
483 /* create the ring buffer for this queue */
484 mq->ring = oce_create_ring_buffer(dev, q_len,
485 sizeof (struct oce_mbx), DDI_DMA_CONSISTENT | DDI_DMA_RDWR);
486 if (mq->ring == NULL) {
487 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
488 "Legacy MQ ring alloc failed");
489 goto mq_ring_alloc;
490 }
491
492 mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
493 MBX_SUBSYSTEM_COMMON,
494 OPCODE_CREATE_COMMON_MQ, MBX_TIMEOUT_SEC,
495 sizeof (struct mbx_create_common_mq), 0);
496
497 fwcmd->params.req.num_pages = (uint16_t)mq->ring->dbuf.num_pages;
498 oce_page_list(&mq->ring->dbuf, fwcmd->params.req.pages,
499 mq->ring->dbuf.num_pages);
500 fwcmd->params.req.context.u0.s.cq_id = cq->cq_id;
501 fwcmd->params.req.context.u0.s.ring_size =
502 OCE_LOG2(q_len) + 1;
503 fwcmd->params.req.context.u0.s.valid = 1;
504 fwcmd->params.req.context.u0.s.fid = dev->fn;
505
506 /* fill rest of mbx */
507 mbx.u0.s.embedded = 1;
508 mbx.payload_length = sizeof (struct mbx_create_common_mq);
509 DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
510
511 /* now send the mail box */
512 ret = oce_issue_mbox_cmd(dev, &mbx, MBX_TIMEOUT_SEC, MBX_BOOTSTRAP);
513 if (ret != DDI_SUCCESS) {
514 oce_log(dev, CE_WARN, MOD_CONFIG,
515 "Legacy MQ create failed: 0x%x", ret);
516 goto mq_fail;
517 }
518
519 /* interpret the response */
520 mq->mq_id = LE_16(fwcmd->params.rsp.mq_id);
521 mq->cq = cq;
522 mq->cfg.q_len = (uint8_t)q_len;
523 mq->cfg.eqd = 0;
524
525 /* fill rest of the mq */
526 mq->parent = dev;
527
528 /* set the MQCQ handlers */
529 cq->cq_handler = oce_drain_mq_cq;
530 cq->cb_arg = (void *)mq;
531 mutex_init(&mq->lock, NULL, MUTEX_DRIVER,
532 DDI_INTR_PRI(dev->intr_pri));
533 oce_log(dev, CE_NOTE, MOD_CONFIG,
534 "Legacy MQ CREATED SUCCESSFULLY MQID:%d\n",
535 mq->mq_id);
536 return (mq);
537
538 mq_fail:
539 destroy_ring_buffer(dev, mq->ring);
540 mq_ring_alloc:
541 kmem_free(mq, sizeof (struct oce_mq));
542 mq_alloc_fail:
543 oce_cq_del(dev, cq, MBX_BOOTSTRAP);
544 return (NULL);
545 } /* oce_mq_create */
546
547 /*
548 * function to create an extended V0 MQ
549 *
550 * dev - software handle to the device
551 * eq - the EQ to associate with the MQ for event notification
552 * q_len - the number of entries to create in the MQ
553 *
554 * return pointer to the created MQ, failure otherwise
555 */
556 struct oce_mq *
557 oce_mq_create_ext_v0(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len)
558 {
559 struct oce_mbx mbx;
560 struct mbx_create_common_mq_ext_v0 *fwcmd;
561 struct oce_mq *mq = NULL;
562 int ret = 0;
563 struct oce_cq *cq;
564
565 /* Create the Completion Q */
566 cq = oce_cq_create(dev, eq, MCC_CQ_LEN,
567 sizeof (struct oce_mq_cqe),
568 B_FALSE, B_TRUE, B_TRUE, 0, B_FALSE, MBX_BOOTSTRAP);
569 if (cq == NULL) {
570 return (NULL);
571 }
572
573
574 /* allocate the mq */
575 mq = kmem_zalloc(sizeof (struct oce_mq), KM_NOSLEEP);
576
577 if (mq == NULL) {
578 goto mq_alloc_fail;
579 }
580
581 bzero(&mbx, sizeof (struct oce_mbx));
582 /* allocate mbx */
583 fwcmd = (struct mbx_create_common_mq_ext_v0 *)&mbx.payload;
584
585 /* create the ring buffer for this queue */
586 mq->ring = oce_create_ring_buffer(dev, q_len,
587 sizeof (struct oce_mbx), DDI_DMA_CONSISTENT | DDI_DMA_RDWR);
588 if (mq->ring == NULL) {
589 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
590 "MQ EXT ring alloc failed");
591 goto mq_ring_alloc;
592 }
593
594 mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
595 MBX_SUBSYSTEM_COMMON,
596 OPCODE_CREATE_COMMON_MQ_EXT, MBX_TIMEOUT_SEC,
597 sizeof (struct mbx_create_common_mq_ext_v0), 0);
598
599 fwcmd->params.req.num_pages = mq->ring->dbuf.num_pages;
600 oce_page_list(&mq->ring->dbuf, fwcmd->params.req.pages,
601 mq->ring->dbuf.num_pages);
602 fwcmd->params.req.context.u0.s.cq_id = cq->cq_id;
603 fwcmd->params.req.context.u0.s.ring_size =
604 OCE_LOG2(q_len) + 1;
605 fwcmd->params.req.context.u0.s.valid = 1;
606 fwcmd->params.req.context.u0.s.fid = dev->fn;
607
608 /* Register to Link State(bit 1) and Group 5 Events(bit 5) */
609 fwcmd->params.req.async_event_bitmap[0] =
610 (1 << ASYNC_EVENT_CODE_LINK_STATE) |
611 (1 << ASYNC_EVENT_CODE_GRP_5) |
612 (1 << ASYNC_EVENT_CODE_DEBUG);
613
614 /* fill rest of mbx */
615 mbx.u0.s.embedded = 1;
616 mbx.payload_length = sizeof (struct mbx_create_common_mq_ext_v0);
617 DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
618
619 /* now send the mail box */
620 ret = oce_issue_mbox_cmd(dev, &mbx, MBX_TIMEOUT_SEC, MBX_BOOTSTRAP);
621 if (ret != DDI_SUCCESS) {
622 oce_log(dev, CE_WARN, MOD_CONFIG,
623 "Extended MQ create failed: 0x%x", ret);
624 goto mq_fail;
625 }
626
627 /* interpret the response */
628 mq->mq_id = LE_16(fwcmd->params.rsp.mq_id);
629 mq->cq = cq;
630 mq->cfg.q_len = (uint8_t)q_len;
631 mq->cfg.eqd = 0;
632
633 /* fill rest of the mq */
634 mq->parent = dev;
635 mq->qstate = QCREATED;
636 mq->mq_free = mq->cfg.q_len;
637
638 /* reset indicies */
639 mq->ring->cidx = 0;
640 mq->ring->pidx = 0;
641
642 /* set the MQCQ handlers */
643 cq->cq_handler = oce_drain_mq_cq;
644 cq->cb_arg = (void *)mq;
645 mutex_init(&mq->lock, NULL, MUTEX_DRIVER,
646 DDI_INTR_PRI(dev->intr_pri));
647 oce_log(dev, CE_NOTE, MOD_CONFIG,
648 "Ext MQ CREATED SUCCESSFULLY MQID:%d\n", mq->mq_id);
649 return (mq);
650
651 mq_fail:
652 destroy_ring_buffer(dev, mq->ring);
653 mq_ring_alloc:
654 kmem_free(mq, sizeof (struct oce_mq));
655 mq_alloc_fail:
656 oce_cq_del(dev, cq, MBX_BOOTSTRAP);
657 return (NULL);
658 } /* oce_mq_create_ext_v0 */
659
660 /*
661 * function to create an extended V1 MQ
662 *
663 * dev - software handle to the device
664 * eq - the EQ to associate with the MQ for event notification
665 * q_len - the number of entries to create in the MQ
666 *
667 * return pointer to the created MQ, failure otherwise
668 */
669 struct oce_mq *
670 oce_mq_create_ext_v1(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len)
671 {
672 struct oce_mbx mbx;
673 struct mbx_create_common_mq_ext_v1 *fwcmd;
674 struct oce_mq *mq = NULL;
675 int ret = 0;
676 struct oce_cq *cq;
677
678 /* Create the Completion Q */
679 cq = oce_cq_create(dev, eq, MCC_CQ_LEN,
680 sizeof (struct oce_mq_cqe),
681 B_FALSE, B_TRUE, B_TRUE, 0, B_FALSE, MBX_BOOTSTRAP);
682 if (cq == NULL) {
683 return (NULL);
684 }
685
686
687 /* allocate the mq */
688 mq = kmem_zalloc(sizeof (struct oce_mq), KM_NOSLEEP);
689
690 if (mq == NULL) {
691 goto mq_alloc_fail;
692 }
693
694 bzero(&mbx, sizeof (struct oce_mbx));
695 /* allocate mbx */
696 fwcmd = (struct mbx_create_common_mq_ext_v1 *)&mbx.payload;
697
698 /* create the ring buffer for this queue */
699 mq->ring = oce_create_ring_buffer(dev, q_len,
700 sizeof (struct oce_mbx), DDI_DMA_CONSISTENT | DDI_DMA_RDWR);
701 if (mq->ring == NULL) {
702 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
703 "MQ EXT ring alloc failed");
704 goto mq_ring_alloc;
705 }
706
707 mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
708 MBX_SUBSYSTEM_COMMON,
709 OPCODE_CREATE_COMMON_MQ_EXT, MBX_TIMEOUT_SEC,
710 sizeof (struct mbx_create_common_mq_ext_v1), 1);
711
712 fwcmd->params.req.cq_id = cq->cq_id;
713
714 fwcmd->params.req.context.u0.s.ring_size =
715 OCE_LOG2(q_len) + 1;
716 fwcmd->params.req.context.u0.s.valid = 1;
717 fwcmd->params.req.context.u0.s.async_cq_id = cq->cq_id;
718 fwcmd->params.req.context.u0.s.async_cq_valid = 1;
719
720 fwcmd->params.req.num_pages = mq->ring->dbuf.num_pages;
721 oce_page_list(&mq->ring->dbuf, fwcmd->params.req.pages,
722 mq->ring->dbuf.num_pages);
723
724 /* Register to Link State(bit 1) and Group 5 Events(bit 5) */
725 fwcmd->params.req.async_event_bitmap[0] =
726 (1 << ASYNC_EVENT_CODE_LINK_STATE) |
727 (1 << ASYNC_EVENT_CODE_GRP_5) |
728 (1 << ASYNC_EVENT_CODE_DEBUG);
729
730 /* fill rest of mbx */
731 mbx.u0.s.embedded = 1;
732 mbx.payload_length = sizeof (struct mbx_create_common_mq_ext_v1);
733 DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
734
735 /* now send the mail box */
736 ret = oce_issue_mbox_cmd(dev, &mbx, MBX_TIMEOUT_SEC, MBX_BOOTSTRAP);
737 if (ret != DDI_SUCCESS) {
738 oce_log(dev, CE_WARN, MOD_CONFIG,
739 "Extended MQ create failed: 0x%x", ret);
740 goto mq_fail;
741 }
742
743 /* interpret the response */
744 mq->mq_id = LE_16(fwcmd->params.rsp.mq_id);
745 mq->cq = cq;
746 mq->cfg.q_len = (uint8_t)q_len;
747 mq->cfg.eqd = 0;
748
749 /* fill rest of the mq */
750 mq->parent = dev;
751 mq->qstate = QCREATED;
752 mq->mq_free = mq->cfg.q_len;
753
754 /* reset indicies */
755 mq->ring->cidx = 0;
756 mq->ring->pidx = 0;
757
758 /* set the MQCQ handlers */
759 cq->cq_handler = oce_drain_mq_cq;
760 cq->cb_arg = (void *)mq;
761 mutex_init(&mq->lock, NULL, MUTEX_DRIVER,
762 DDI_INTR_PRI(dev->intr_pri));
763 oce_log(dev, CE_NOTE, MOD_CONFIG,
764 "Ext MQ CREATED SUCCESSFULLY MQID:%d\n", mq->mq_id);
765 return (mq);
766
767 mq_fail:
768 destroy_ring_buffer(dev, mq->ring);
769 mq_ring_alloc:
770 kmem_free(mq, sizeof (struct oce_mq));
771 mq_alloc_fail:
772 oce_cq_del(dev, cq, MBX_BOOTSTRAP);
773 return (NULL);
774 } /* oce_mq_create_ext_v0 */
775
776 /*
777 * function to delete an MQ
778 *
779 * dev - software handle to the device
780 * mq - pointer to the MQ to delete
781 *
782 * return none
783 */
784 static void
785 oce_mq_del(struct oce_dev *dev, struct oce_mq *mq)
786 {
787 struct oce_mbx mbx;
788 struct mbx_destroy_common_mq *fwcmd;
789
790 /* destroy the ring */
791 destroy_ring_buffer(dev, mq->ring);
792 mq->ring = NULL;
793 bzero(&mbx, sizeof (struct oce_mbx));
794 fwcmd = (struct mbx_destroy_common_mq *)&mbx.payload;
795 fwcmd->params.req.id = mq->mq_id;
796 (void) oce_destroy_q(dev, &mbx,
797 sizeof (struct mbx_destroy_common_mq),
798 QTYPE_MQ, MBX_BOOTSTRAP);
799 oce_cq_del(dev, mq->cq, MBX_BOOTSTRAP);
800 mq->cq = NULL;
801 mq->qstate = QDELETED;
802 mutex_destroy(&mq->lock);
803 kmem_free(mq, sizeof (struct oce_mq));
804 } /* oce_mq_del */
805
806 /*
807 * function to create a WQ for NIC Tx
808 *
809 * dev - software handle to the device
810 * wqcfg - configuration structure providing WQ config parameters
811 *
812 * return pointer to the WQ created. NULL on failure
813 */
814 int oce_wq_init(struct oce_dev *dev, struct oce_wq *wq, uint32_t q_len,
815 int wq_type)
816 {
817 char str[MAX_POOL_NAME];
818 int ret;
819 static int wq_id = 0;
820 int buf_size;
821
822 ASSERT(dev != NULL);
823 /* q_len must be min 256 and max 2k */
824 if (q_len < 256 || q_len > 2048) {
825 oce_log(dev, CE_WARN, MOD_CONFIG,
826 "Invalid q length. Must be "
827 "[256, 2000]: 0x%x", q_len);
828 return (DDI_FAILURE);
829 }
830
831 /* Set the wq config */
832 wq->cfg.q_len = q_len;
833 wq->cfg.wq_type = (uint8_t)wq_type;
834 wq->cfg.eqd = OCE_DEFAULT_WQ_EQD;
835 wq->cfg.nbufs = 2 * wq->cfg.q_len;
836 wq->cfg.nhdl = 2 * wq->cfg.q_len;
837
838 buf_size = ((dev->tx_bcopy_limit >> 10) +
839 ((dev->tx_bcopy_limit & (((uint32_t)1 << 10) - 1)) > 0 ? 1 :
840 0)) << 10;
841 wq->cfg.buf_size = (uint16_t)buf_size;
842
843 /* initialize ring statistics */
844 wq->stat_bytes = wq->stat_pkts = 0;
845
846 /* Create the WQ Buffer pool */
847 ret = oce_wqb_cache_create(wq, wq->cfg.buf_size);
848 if (ret != DDI_SUCCESS) {
849 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
850 "WQ Buffer Pool create failed ");
851 return (DDI_FAILURE);
852 }
853
854 /* Create a pool of memory handles */
855 ret = oce_wqm_cache_create(wq);
856 if (ret != DDI_SUCCESS) {
857 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
858 "WQ MAP Handles Pool create failed ");
859 goto wqm_fail;
860 }
861
862 (void) snprintf(str, MAX_POOL_NAME, "%s%d%s%d", "oce_wqed_",
863 dev->dev_id, "_", wq_id++);
864 wq->wqed_cache = kmem_cache_create(str, sizeof (oce_wqe_desc_t),
865 0, NULL, NULL, NULL, NULL, NULL, 0);
866 if (wq->wqed_cache == NULL) {
867 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
868 "WQ Packet Desc Pool create failed ");
869 goto wqed_fail;
870 }
871
872 /* create the ring buffer */
873 wq->ring = oce_create_ring_buffer(dev, q_len,
874 NIC_WQE_SIZE, DDI_DMA_CONSISTENT | DDI_DMA_RDWR);
875 if (wq->ring == NULL) {
876 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
877 "Failed to create WQ ring ");
878 goto wq_ringfail;
879 }
880
881 /* Initialize WQ lock */
882 mutex_init(&wq->tx_lock, NULL, MUTEX_DRIVER,
883 DDI_INTR_PRI(dev->intr_pri));
884 /* Initialize WQ lock */
885 mutex_init(&wq->txc_lock, NULL, MUTEX_DRIVER,
886 DDI_INTR_PRI(dev->intr_pri));
887 atomic_inc_32(&dev->nwqs);
888
889 mutex_init(&wq->wqed_list_lock, NULL, MUTEX_DRIVER,
890 DDI_INTR_PRI(dev->intr_pri));
891
892 list_create(&wq->wqe_desc_list, sizeof (oce_wqe_desc_t),
893 offsetof(oce_wqe_desc_t, link));
894 return (DDI_SUCCESS);
895
896 wqcq_fail:
897 destroy_ring_buffer(dev, wq->ring);
898 wq_ringfail:
899 kmem_cache_destroy(wq->wqed_cache);
900 wqed_fail:
901 oce_wqm_cache_destroy(wq);
902 wqm_fail:
903 oce_wqb_cache_destroy(wq);
904 return (DDI_FAILURE);
905 } /* oce_wq_create */
906
907 /*
908 * function to delete a WQ
909 *
910 * dev - software handle to the device
911 * wq - WQ to delete
912 *
913 * return 0 => success, failure otherwise
914 */
915 static void
916 oce_wq_fini(struct oce_dev *dev, struct oce_wq *wq)
917 {
918 /* destroy cq */
919 oce_wqb_cache_destroy(wq);
920 oce_wqm_cache_destroy(wq);
921 kmem_cache_destroy(wq->wqed_cache);
922
923 /* Free the packet descriptor list */
924 list_destroy(&wq->wqe_desc_list);
925 destroy_ring_buffer(dev, wq->ring);
926 wq->ring = NULL;
927 /* Destroy the Mutex */
928 mutex_destroy(&wq->wqed_list_lock);
929 mutex_destroy(&wq->tx_lock);
930 mutex_destroy(&wq->txc_lock);
931 atomic_dec_32(&dev->nwqs);
932 } /* oce_wq_del */
933
934
935 static int
936 oce_wq_create(struct oce_wq *wq, struct oce_eq *eq, uint32_t mode)
937 {
938
939 struct oce_mbx mbx;
940 struct mbx_create_nic_wq *fwcmd;
941 struct oce_dev *dev = wq->parent;
942 struct oce_cq *cq;
943 int ret;
944
945 /* create the CQ */
946 cq = oce_cq_create(dev, eq, CQ_LEN_1024,
947 sizeof (struct oce_nic_tx_cqe),
948 B_FALSE, B_TRUE, B_FALSE, 3, B_FALSE, mode);
949 if (cq == NULL) {
950 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
951 "WQCQ create failed ");
952 return (DDI_FAILURE);
953 }
954 /* now fill the command */
955 bzero(&mbx, sizeof (struct oce_mbx));
956 fwcmd = (struct mbx_create_nic_wq *)&mbx.payload;
957 if (LANCER_CHIP(dev)) {
958 mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
959 MBX_SUBSYSTEM_NIC,
960 OPCODE_CREATE_NIC_WQ, MBX_TIMEOUT_SEC,
961 sizeof (struct mbx_create_nic_wq), 1);
962 fwcmd->params.req.ctx.if_id = dev->if_id;
963 } else {
964 mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
965 MBX_SUBSYSTEM_NIC,
966 OPCODE_CREATE_NIC_WQ, MBX_TIMEOUT_SEC,
967 sizeof (struct mbx_create_nic_wq), 0);
968 }
969
970 fwcmd->params.req.type = (uint8_t)wq->cfg.wq_type;
971 fwcmd->params.req.num_pages = wq->ring->dbuf.num_pages;
972 fwcmd->params.req.ulp_num = BE_ULP1_NUM;
973 oce_log(dev, CE_NOTE, MOD_CONFIG, "NUM_PAGES = 0x%d size = %lu",
974 (uint32_t)wq->ring->dbuf.num_pages,
975 wq->ring->dbuf.size);
976
977 /* Context info */
978 fwcmd->params.req.ctx.wq_size = OCE_LOG2(wq->cfg.q_len) + 1;
979 fwcmd->params.req.ctx.valid = 1;
980 fwcmd->params.req.ctx.cofe = 1;
981 fwcmd->params.req.ctx.no_rem_allowed = 1;
982 fwcmd->params.req.ctx.cq_id = cq->cq_id;
983
984 oce_page_list(&wq->ring->dbuf, fwcmd->params.req.pages,
985 wq->ring->dbuf.num_pages);
986
987 /* fill rest of mbx */
988 mbx.u0.s.embedded = 1;
989 mbx.payload_length = sizeof (struct mbx_create_nic_wq);
990 DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
991
992 /* now post the command */
993 ret = oce_issue_mbox_cmd(dev, &mbx, MBX_TIMEOUT_SEC, mode);
994 if (ret != DDI_SUCCESS) {
995 oce_log(dev, CE_WARN, MOD_CONFIG,
996 "WQ create failed: 0x%x", ret);
997 oce_cq_del(dev, cq, mode);
998 return (ret);
999 }
1000
1001 /* interpret the response */
1002 wq->wq_id = LE_16(fwcmd->params.rsp.wq_id);
1003 wq->qstate = QCREATED;
1004 wq->cq = cq;
1005 /* set the WQCQ handlers */
1006 wq->cq->cq_handler = oce_drain_wq_cq;
1007 wq->cq->cb_arg = (void *)wq;
1008
1009 /* All are free to start with */
1010 wq->wq_free = wq->cfg.q_len;
1011 /* reset indicies */
1012 wq->ring->cidx = 0;
1013 wq->ring->pidx = 0;
1014 oce_log(dev, CE_NOTE, MOD_CONFIG, "WQ CREATED WQID = %d",
1015 wq->wq_id);
1016
1017 return (0);
1018 }
1019
1020 /*
1021 * function to delete a WQ
1022 *
1023 * dev - software handle to the device
1024 * wq - WQ to delete
1025 *
1026 * return none
1027 */
1028 static void
1029 oce_wq_del(struct oce_dev *dev, struct oce_wq *wq, uint32_t mode)
1030 {
1031 struct oce_mbx mbx;
1032 struct mbx_delete_nic_wq *fwcmd;
1033
1034 ASSERT(dev != NULL);
1035 ASSERT(wq != NULL);
1036 if (wq->qstate == QCREATED) {
1037 bzero(&mbx, sizeof (struct oce_mbx));
1038 /* now fill the command */
1039 fwcmd = (struct mbx_delete_nic_wq *)&mbx.payload;
1040 fwcmd->params.req.wq_id = wq->wq_id;
1041 (void) oce_destroy_q(dev, &mbx,
1042 sizeof (struct mbx_delete_nic_wq),
1043 QTYPE_WQ, mode);
1044 wq->qstate = QDELETED;
1045 oce_cq_del(dev, wq->cq, mode);
1046 wq->cq = NULL;
1047 }
1048 } /* oce_wq_del */
1049
1050 /*
1051 * function to allocate RQ resources
1052 *
1053 * dev - software handle to the device
1054 * rqcfg - configuration structure providing RQ config parameters
1055 *
1056 * return pointer to the RQ created. NULL on failure
1057 */
1058 int oce_rq_init(struct oce_dev *dev, struct oce_rq *rq, uint32_t q_len,
1059 uint32_t frag_size, uint32_t mtu)
1060 {
1061 int ret;
1062
1063 /* validate q creation parameters */
1064 if (!OCE_LOG2(frag_size))
1065 return (NULL);
1066 if ((q_len == 0) || (q_len > 1024))
1067 return (NULL);
1068
1069 rq->cfg.q_len = q_len;
1070 rq->cfg.frag_size = frag_size;
1071 rq->cfg.mtu = mtu;
1072 rq->cfg.eqd = 0;
1073 rq->cfg.nbufs = dev->rq_max_bufs;
1074
1075 /* initialize ring statistics */
1076 rq->stat_bytes = rq->stat_pkts = 0;
1077
1078 rq->rq_bdesc_array =
1079 kmem_zalloc((sizeof (oce_rq_bdesc_t) * rq->cfg.nbufs), KM_NOSLEEP);
1080 if (rq->rq_bdesc_array == NULL) {
1081 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
1082 "RQ bdesc alloc failed");
1083 return (DDI_FAILURE);
1084 }
1085 /* create the rq buffer descriptor ring */
1086 rq->shadow_ring =
1087 kmem_zalloc((rq->cfg.q_len * sizeof (oce_rq_bdesc_t *)),
1088 KM_NOSLEEP);
1089 if (rq->shadow_ring == NULL) {
1090 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
1091 "RQ shadow ring alloc failed ");
1092 goto rq_shdw_fail;
1093 }
1094
1095 /* allocate the free list array */
1096 rq->rqb_freelist =
1097 kmem_zalloc(rq->cfg.nbufs * sizeof (oce_rq_bdesc_t *), KM_NOSLEEP);
1098 if (rq->rqb_freelist == NULL) {
1099 goto rqb_free_list_fail;
1100 }
1101 /* create the buffer pool */
1102 ret = oce_rqb_cache_create(rq, rq->cfg.frag_size);
1103 if (ret != DDI_SUCCESS) {
1104 goto rqb_fail;
1105 }
1106
1107 /* create the ring buffer */
1108 rq->ring = oce_create_ring_buffer(dev, q_len,
1109 sizeof (struct oce_nic_rqe), DDI_DMA_CONSISTENT | DDI_DMA_RDWR);
1110 if (rq->ring == NULL) {
1111 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
1112 "RQ ring create failed ");
1113 goto rq_ringfail;
1114 }
1115
1116 /* Initialize the RQ lock */
1117 mutex_init(&rq->rx_lock, NULL, MUTEX_DRIVER,
1118 DDI_INTR_PRI(dev->intr_pri));
1119 /* Initialize the recharge lock */
1120 mutex_init(&rq->rc_lock, NULL, MUTEX_DRIVER,
1121 DDI_INTR_PRI(dev->intr_pri));
1122 atomic_inc_32(&dev->nrqs);
1123 return (DDI_SUCCESS);
1124
1125 rq_ringfail:
1126 oce_rqb_cache_destroy(rq);
1127 rqb_fail:
1128 kmem_free(rq->rqb_freelist,
1129 (rq->cfg.nbufs * sizeof (oce_rq_bdesc_t *)));
1130 rqb_free_list_fail:
1131
1132 kmem_free(rq->shadow_ring,
1133 (rq->cfg.q_len * sizeof (oce_rq_bdesc_t *)));
1134 rq_shdw_fail:
1135 kmem_free(rq->rq_bdesc_array,
1136 (sizeof (oce_rq_bdesc_t) * rq->cfg.nbufs));
1137 return (DDI_FAILURE);
1138 } /* oce_rq_create */
1139
1140
1141 /*
1142 * function to delete an RQ
1143 *
1144 * dev - software handle to the device
1145 * rq - RQ to delete
1146 *
1147 * return none
1148 */
1149 void
1150 oce_rq_fini(struct oce_dev *dev, struct oce_rq *rq)
1151 {
1152 /* Destroy buffer cache */
1153 rq->qstate = QFINI;
1154 oce_rqb_cache_destroy(rq);
1155 destroy_ring_buffer(dev, rq->ring);
1156 rq->ring = NULL;
1157 kmem_free(rq->shadow_ring,
1158 sizeof (oce_rq_bdesc_t *) * rq->cfg.q_len);
1159 rq->shadow_ring = NULL;
1160 kmem_free(rq->rq_bdesc_array,
1161 (sizeof (oce_rq_bdesc_t) * rq->cfg.nbufs));
1162 rq->rq_bdesc_array = NULL;
1163 kmem_free(rq->rqb_freelist,
1164 (rq->cfg.nbufs * sizeof (oce_rq_bdesc_t *)));
1165 rq->rqb_freelist = NULL;
1166 mutex_destroy(&rq->rx_lock);
1167 mutex_destroy(&rq->rc_lock);
1168 atomic_dec_32(&dev->nrqs);
1169 } /* oce_rq_del */
1170
1171
1172 static int
1173 oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq,
1174 uint32_t mode)
1175 {
1176 struct oce_mbx mbx;
1177 struct mbx_create_nic_rq *fwcmd;
1178 struct oce_dev *dev = rq->parent;
1179 struct oce_cq *cq;
1180 int cq_len;
1181 int ret;
1182
1183 if (LANCER_CHIP(dev))
1184 cq_len = CQ_LEN_2048;
1185 else
1186 cq_len = CQ_LEN_1024;
1187
1188 cq = oce_cq_create(dev, eq, cq_len, sizeof (struct oce_nic_rx_cqe),
1189 B_FALSE, B_TRUE, B_FALSE, 3, B_FALSE, mode);
1190
1191 if (cq == NULL) {
1192 return (DDI_FAILURE);
1193 }
1194
1195 /* now fill the command */
1196 bzero(&mbx, sizeof (struct oce_mbx));
1197 fwcmd = (struct mbx_create_nic_rq *)&mbx.payload;
1198 mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
1199 MBX_SUBSYSTEM_NIC,
1200 OPCODE_CREATE_NIC_RQ, MBX_TIMEOUT_SEC,
1201 sizeof (struct mbx_create_nic_rq), 0);
1202
1203 fwcmd->params.req.num_pages = rq->ring->dbuf.num_pages;
1204 fwcmd->params.req.frag_size = OCE_LOG2(rq->cfg.frag_size);
1205 fwcmd->params.req.cq_id = cq->cq_id;
1206 oce_page_list(&rq->ring->dbuf, fwcmd->params.req.pages,
1207 rq->ring->dbuf.num_pages);
1208
1209 fwcmd->params.req.if_id = if_id;
1210 fwcmd->params.req.max_frame_size = (uint16_t)rq->cfg.mtu;
1211 fwcmd->params.req.is_rss_queue = rq->cfg.is_rss_queue;
1212
1213 /* fill rest of mbx */
1214 mbx.u0.s.embedded = 1;
1215 mbx.payload_length = sizeof (struct mbx_create_nic_rq);
1216 DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
1217
1218 /* now post the command */
1219 ret = oce_issue_mbox_cmd(dev, &mbx, MBX_TIMEOUT_SEC, mode);
1220 if (ret != 0) {
1221 oce_log(dev, CE_WARN, MOD_CONFIG,
1222 "RQ create failed: 0x%x", ret);
1223 oce_cq_del(dev, cq, MBX_BOOTSTRAP);
1224 return (ret);
1225 }
1226
1227 /* interpret the response */
1228 rq->rq_id = LE_16(fwcmd->params.rsp.u0.s.rq_id);
1229 rq->rss_cpuid = fwcmd->params.rsp.u0.s.rss_cpuid;
1230 rq->cfg.if_id = if_id;
1231 rq->qstate = QCREATED;
1232 rq->cq = cq;
1233
1234 /* set the Completion Handler */
1235 rq->cq->cq_handler = oce_drain_rq_cq;
1236 rq->cq->cb_arg = (void *)rq;
1237
1238 /* reset the indicies */
1239 rq->ring->cidx = 0;
1240 rq->ring->pidx = 0;
1241 rq->buf_avail = 0;
1242 oce_log(dev, CE_NOTE, MOD_CONFIG, "RQ created, RQID : %d, cpu-id = %d",
1243 rq->rq_id, rq->rss_cpuid);
1244 return (0);
1245
1246 }
1247
1248 /*
1249 * function to delete an RQ
1250 *
1251 * dev - software handle to the device
1252 * rq - RQ to delete
1253 *
1254 * return none
1255 */
1256 static void
1257 oce_rq_del(struct oce_dev *dev, struct oce_rq *rq, uint32_t mode)
1258 {
1259 struct oce_mbx mbx;
1260 struct mbx_delete_nic_rq *fwcmd;
1261
1262 ASSERT(dev != NULL);
1263 ASSERT(rq != NULL);
1264
1265 bzero(&mbx, sizeof (struct oce_mbx));
1266
1267 mutex_enter(&rq->rx_lock);
1268 /* delete the Queue */
1269 if (rq->qstate == QCREATED) {
1270 fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
1271 fwcmd->params.req.rq_id = rq->rq_id;
1272 (void) oce_destroy_q(dev, &mbx,
1273 sizeof (struct mbx_delete_nic_rq), QTYPE_RQ, mode);
1274 oce_clean_rq(rq);
1275 /* Delete the associated CQ */
1276 oce_cq_del(dev, rq->cq, mode);
1277 rq->cq = NULL;
1278 /* free up the posted buffers */
1279 oce_rq_discharge(rq);
1280 (void) atomic_swap_32(&rq->qstate, QDELETED);
1281 }
1282 mutex_exit(&rq->rx_lock);
1283 } /* oce_rq_del */
1284
1285 /*
1286 * function to arm an EQ so that it can generate events
1287 *
1288 * dev - software handle to the device
1289 * qid - id of the EQ returned by the fw at the time of creation
1290 * npopped - number of EQEs to arm with
1291 * rearm - rearm bit
1292 * clearint - bit to clear the interrupt condition because of which
1293 * EQEs are generated
1294 *
1295 * return none
1296 */
1297 void
1298 oce_arm_eq(struct oce_dev *dev, int16_t qid, int npopped,
1299 boolean_t rearm, boolean_t clearint)
1300 {
1301 eq_db_t eq_db = {0};
1302
1303 eq_db.bits.rearm = rearm;
1304 eq_db.bits.event = B_TRUE;
1305 eq_db.bits.eq_cq_extid =
1306 (((uint64_t)qid & (uint64_t)DB_EQ_RING_ID_EXT_MASK) <<
1307 (uint64_t)DB_EQ_RING_ID_EXT_MASK_SHIFT);
1308 eq_db.bits.num_popped = npopped;
1309 eq_db.bits.clrint = clearint;
1310 eq_db.bits.qid = qid;
1311 OCE_DB_WRITE32(dev, PD_EQ_DB, eq_db.dw0);
1312 }
1313
1314 /*
1315 * function to arm a CQ with CQEs
1316 *
1317 * dev - software handle to the device
1318 * qid - the id of the CQ returned by the fw at the time of creation
1319 * npopped - number of CQEs to arm with
1320 * rearm - rearm bit enable/disable
1321 *
1322 * return none
1323 */
1324 void
1325 oce_arm_cq(struct oce_dev *dev, int16_t qid, int npopped,
1326 boolean_t rearm)
1327 {
1328 cq_db_t cq_db = {0};
1329 cq_db.bits.rearm = rearm;
1330 cq_db.bits.eq_cq_extid =
1331 (((uint64_t)qid & (uint64_t)DB_CQ_RING_ID_EXT_MASK) <<
1332 (uint64_t)DB_CQ_RING_ID_EXT_MASK_SHIFT);
1333 cq_db.bits.num_popped = npopped;
1334 cq_db.bits.event = 0;
1335 cq_db.bits.qid = qid;
1336 OCE_DB_WRITE32(dev, PD_CQ_DB, cq_db.dw0);
1337 }
1338
1339
1340 /*
1341 * function to delete a EQ, CQ, MQ, WQ or RQ
1342 *
1343 * dev - sofware handle to the device
1344 * mbx - mbox command to send to the fw to delete the queue
1345 * mbx contains the queue information to delete
1346 * req_size - the size of the mbx payload dependent on the qtype
1347 * qtype - the type of queue i.e. EQ, CQ, MQ, WQ or RQ
1348 *
1349 * return DDI_SUCCESS => success, failure otherwise
1350 */
1351 int
1352 oce_destroy_q(struct oce_dev *dev, struct oce_mbx *mbx, size_t req_size,
1353 enum qtype qtype, uint32_t mode)
1354 {
1355 struct mbx_hdr *hdr = (struct mbx_hdr *)&mbx->payload;
1356 int opcode;
1357 int subsys;
1358 int ret;
1359
1360 switch (qtype) {
1361 case QTYPE_EQ: {
1362 opcode = OPCODE_DESTROY_COMMON_EQ;
1363 subsys = MBX_SUBSYSTEM_COMMON;
1364 break;
1365 }
1366 case QTYPE_CQ: {
1367 opcode = OPCODE_DESTROY_COMMON_CQ;
1368 subsys = MBX_SUBSYSTEM_COMMON;
1369 break;
1370 }
1371 case QTYPE_MQ: {
1372 opcode = OPCODE_DESTROY_COMMON_MQ;
1373 subsys = MBX_SUBSYSTEM_COMMON;
1374 break;
1375 }
1376 case QTYPE_WQ: {
1377 opcode = OPCODE_DELETE_NIC_WQ;
1378 subsys = MBX_SUBSYSTEM_NIC;
1379 break;
1380 }
1381 case QTYPE_RQ: {
1382 opcode = OPCODE_DELETE_NIC_RQ;
1383 subsys = MBX_SUBSYSTEM_NIC;
1384 break;
1385 }
1386 default: {
1387 ASSERT(0);
1388 break;
1389 }
1390 }
1391
1392 mbx_common_req_hdr_init(hdr, 0, 0, subsys,
1393 opcode, MBX_TIMEOUT_SEC, req_size, 0);
1394
1395 /* fill rest of mbx */
1396 mbx->u0.s.embedded = 1;
1397 mbx->payload_length = (uint32_t)req_size;
1398 DW_SWAP(u32ptr(mbx), mbx->payload_length + OCE_BMBX_RHDR_SZ);
1399
1400 /* send command */
1401 ret = oce_issue_mbox_cmd(dev, mbx, MBX_TIMEOUT_SEC, mode);
1402 if (ret != 0) {
1403 oce_log(dev, CE_WARN, MOD_CONFIG,
1404 "Failed to del q: 0x%x", ret);
1405 }
1406
1407 return (ret);
1408 }
1409
1410 /*
1411 * function to set the delay parameter in the EQ for interrupt coalescing
1412 *
1413 * dev - software handle to the device
1414 * eq_arr - array of EQ ids to delete
1415 * eq_cnt - number of elements in eq_arr
1416 * eq_delay - delay parameter
1417 *
1418 * return DDI_SUCCESS => success, failure otherwise
1419 */
1420 int
1421 oce_set_eq_delay(struct oce_dev *dev, uint32_t *eq_arr,
1422 uint32_t eq_cnt, uint32_t eq_delay, uint32_t mode)
1423 {
1424 struct oce_mbx mbx;
1425 struct mbx_modify_common_eq_delay *fwcmd;
1426 int ret;
1427 int neq;
1428
1429 bzero(&mbx, sizeof (struct oce_mbx));
1430 fwcmd = (struct mbx_modify_common_eq_delay *)&mbx.payload;
1431
1432 /* fill the command */
1433 fwcmd->params.req.num_eq = eq_cnt;
1434 for (neq = 0; neq < eq_cnt; neq++) {
1435 fwcmd->params.req.delay[neq].eq_id = eq_arr[neq];
1436 fwcmd->params.req.delay[neq].phase = 0;
1437 fwcmd->params.req.delay[neq].dm = eq_delay;
1438
1439 }
1440
1441 /* initialize the ioctl header */
1442 mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
1443 MBX_SUBSYSTEM_COMMON,
1444 OPCODE_MODIFY_COMMON_EQ_DELAY,
1445 MBX_TIMEOUT_SEC,
1446 sizeof (struct mbx_modify_common_eq_delay), 0);
1447
1448 /* fill rest of mbx */
1449 mbx.u0.s.embedded = 1;
1450 mbx.payload_length = sizeof (struct mbx_modify_common_eq_delay);
1451 DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
1452
1453 /* post the command */
1454 ret = oce_issue_mbox_cmd(dev, &mbx, MBX_TIMEOUT_SEC, mode);
1455 if (ret != 0) {
1456 oce_log(dev, CE_WARN, MOD_CONFIG,
1457 "Failed to set EQ delay 0x%x", ret);
1458 }
1459
1460 return (ret);
1461 } /* oce_set_eq_delay */
1462
1463 /*
1464 * function to cleanup the eqs used during stop
1465 *
1466 * eq - pointer to event queue structure
1467 *
1468 * return none
1469 */
1470 void
1471 oce_drain_eq(struct oce_eq *eq)
1472 {
1473 struct oce_eqe *eqe;
1474 uint16_t num_eqe = 0;
1475 struct oce_dev *dev;
1476
1477 dev = eq->parent;
1478 /* get the first item in eq to process */
1479 eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
1480
1481 while (eqe->u0.dw0) {
1482 eqe->u0.dw0 = LE_32(eqe->u0.dw0);
1483
1484 /* clear valid bit */
1485 eqe->u0.dw0 = 0;
1486
1487 /* process next eqe */
1488 RING_GET(eq->ring, 1);
1489
1490 eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
1491 num_eqe++;
1492 } /* for all EQEs */
1493 if (num_eqe) {
1494 oce_arm_eq(dev, eq->eq_id, num_eqe, B_FALSE, B_TRUE);
1495 }
1496 } /* oce_drain_eq */
1497
1498
1499 int
1500 oce_init_tx(struct oce_dev *dev)
1501 {
1502 int qid = 0;
1503
1504 for (qid = 0; qid < dev->tx_rings; qid++) {
1505 if (oce_wq_init(dev, &dev->wq[qid], dev->tx_ring_size,
1506 NIC_WQ_TYPE_STANDARD) != DDI_SUCCESS) {
1507 goto queue_fail;
1508 }
1509 }
1510
1511 return (DDI_SUCCESS);
1512 queue_fail:
1513 oce_fini_tx(dev);
1514 return (DDI_FAILURE);
1515 }
1516
1517
1518 void
1519 oce_fini_tx(struct oce_dev *dev)
1520 {
1521 int qid;
1522 int nqs;
1523
1524 /* free all the tx rings */
1525 /* nwqs is decremented in fini so copy count first */
1526 nqs = dev->nwqs;
1527 for (qid = 0; qid < nqs; qid++) {
1528 oce_wq_fini(dev, &dev->wq[qid]);
1529 }
1530 }
1531
1532
1533 int
1534 oce_create_queues(struct oce_dev *dev)
1535 {
1536 int i, num_if;
1537
1538 for (num_if = 0; num_if < dev->num_rx_groups; num_if++) {
1539 if (oce_create_nw_interface(dev, &dev->rx_group[num_if],
1540 MBX_BOOTSTRAP) != DDI_SUCCESS) {
1541 goto if_fail;
1542 }
1543 }
1544
1545 /* create resources that are common to an oce instance */
1546 for (i = 0; i < dev->num_vectors; i++) {
1547 if (oce_eq_create(dev, &dev->eq[i], EQ_LEN_1024, EQE_SIZE_4, 0,
1548 MBX_BOOTSTRAP) != DDI_SUCCESS) {
1549 goto rings_fail;
1550 }
1551 }
1552
1553 /* create tx rings */
1554 if (dev->num_tx_groups == 1) {
1555 for (i = 0; i < dev->tx_rings; i++) {
1556 if (oce_wq_create(&dev->wq[i], &dev->eq[i],
1557 MBX_BOOTSTRAP) != 0) {
1558 dev->tx_rings = i;
1559 goto rings_fail;
1560 }
1561 oce_log(dev, CE_NOTE, MOD_CONFIG,
1562 "wq[%d] created on eq[%d]=%p wq=%p",
1563 i, i, (void *)&dev->eq[i], (void *)&dev->wq[i]);
1564 }
1565 } else {
1566 /* Tx groups not supported */
1567 oce_log(dev, CE_WARN, MOD_CONFIG,
1568 "unsupported number of tx groups %d", dev->num_tx_groups);
1569 goto rings_fail;
1570 }
1571
1572 return (DDI_SUCCESS);
1573 rings_fail:
1574 oce_delete_queues(dev);
1575 if_fail:
1576 for (i = 0; i < num_if; i++) {
1577 oce_delete_nw_interface(dev, &dev->rx_group[i], MBX_BOOTSTRAP);
1578 }
1579 return (DDI_FAILURE);
1580 }
1581
1582 int
1583 oce_create_mcc_queue(struct oce_dev *dev)
1584 {
1585 if (LANCER_CHIP(dev)) {
1586 dev->mq = oce_mq_create_ext_v1(dev, &dev->eq[0], MCC_Q_LEN);
1587 } else {
1588 dev->mq = oce_mq_create_ext_v0(dev, &dev->eq[0], MCC_Q_LEN);
1589 }
1590
1591 if (dev->mq == NULL) {
1592 oce_log(dev, CE_NOTE, MOD_CONFIG, "%s",
1593 "Extended MQ is not supported reverting to Legacy MQ mode");
1594 dev->mq = oce_mq_create(dev, &dev->eq[0], MCC_Q_LEN);
1595 if (dev->mq == NULL)
1596 return (DDI_FAILURE);
1597 }
1598
1599 return (DDI_SUCCESS);
1600 }
1601
1602 int
1603 oce_create_group(struct oce_dev *dev, oce_group_t *grp, uint32_t mode)
1604 {
1605 int eqidx, ret, i;
1606 char itbl[OCE_ITBL_SIZE];
1607 char hkey[OCE_HKEY_SIZE];
1608
1609 for (i = 0; i < grp->num_rings; i++) {
1610 if (i != 0) {
1611 grp->ring[i].rx->cfg.is_rss_queue =
1612 grp->rss_enable;
1613 eqidx = (grp->eq_idx + i - grp->rss_enable) %
1614 dev->num_vectors;
1615 } else {
1616 grp->ring[i].rx->cfg.is_rss_queue = B_FALSE;
1617 eqidx = grp->eq_idx % dev->num_vectors;
1618 }
1619
1620 ret = oce_rq_create(grp->ring[i].rx,
1621 grp->if_id, &dev->eq[eqidx], mode);
1622
1623 if (ret != 0) {
1624 goto cleanup_group;
1625 }
1626 oce_log(dev, CE_NOTE, MOD_CONFIG,
1627 "rq[%d][%d] created on eq[%d]=%p rq=%p, rss=%d",
1628 grp->grp_num, i, eqidx,
1629 (void *)&dev->eq[eqidx],
1630 (void *)grp->ring[i].rx,
1631 grp->ring[i].rx->cfg.is_rss_queue);
1632 }
1633
1634 if (grp->rss_enable) {
1635 (void) oce_group_create_itbl(grp, itbl);
1636
1637 (void) oce_gen_hkey(hkey, OCE_HKEY_SIZE);
1638 ret = oce_config_rss(dev, grp->if_id, hkey,
1639 itbl, OCE_ITBL_SIZE, OCE_DEFAULT_RSS_TYPE, B_FALSE,
1640 mode);
1641 if (ret != DDI_SUCCESS) {
1642 oce_log(dev, CE_WARN, MOD_CONFIG,
1643 "Failed to Configure RSS 0x%x", ret);
1644 goto cleanup_group;
1645 }
1646 }
1647
1648 return (DDI_SUCCESS);
1649 cleanup_group:
1650 oce_delete_group(dev, grp);
1651 return (DDI_FAILURE);
1652 }
1653
1654 void
1655 oce_delete_mcc_queue(struct oce_dev *dev)
1656 {
1657 if (dev->mq != NULL) {
1658 oce_mq_del(dev, dev->mq);
1659 dev->mq = NULL;
1660 }
1661 }
1662
1663 void
1664 oce_delete_queues(struct oce_dev *dev)
1665 {
1666 int i;
1667 int neqs = dev->neqs;
1668
1669 for (i = 0; i < dev->nwqs; i++) {
1670 oce_wq_del(dev, &dev->wq[i], MBX_BOOTSTRAP);
1671 }
1672
1673 /* delete as many eqs as the number of vectors */
1674 for (i = 0; i < neqs; i++) {
1675 oce_eq_del(dev, &dev->eq[i], MBX_BOOTSTRAP);
1676 }
1677
1678 for (i = dev->num_rx_groups - 1; i >= 0; i--) {
1679 oce_delete_nw_interface(dev, &dev->rx_group[i], MBX_BOOTSTRAP);
1680 }
1681 }
1682
1683 void
1684 oce_delete_group(struct oce_dev *dev, oce_group_t *grp)
1685 {
1686 int i;
1687
1688 for (i = 0; i < grp->num_rings; i++) {
1689 oce_rq_del(dev, grp->ring[i].rx, MBX_BOOTSTRAP);
1690 }
1691 }
1692
1693
1694 void
1695 oce_free_queues(struct oce_dev *dev)
1696 {
1697 int i = 0;
1698
1699 for (i = 0; i < dev->rx_rings; i++) {
1700 mutex_destroy(&dev->rq[i].rq_fini_lock);
1701 }
1702 if (dev->rq != NULL) {
1703 kmem_free(dev->rq,
1704 sizeof (struct oce_rq) * dev->rx_rings);
1705 dev->rq = NULL;
1706 }
1707 if (dev->wq != NULL) {
1708 kmem_free(dev->wq,
1709 sizeof (struct oce_wq) * dev->tx_rings);
1710 dev->wq = NULL;
1711 }
1712 if (dev->cq != NULL) {
1713 kmem_free(dev->cq,
1714 sizeof (struct oce_cq *) * OCE_MAX_CQ);
1715 dev->cq = NULL;
1716 }
1717 if (dev->eq != NULL) {
1718 for (i = 0; i < OCE_MAX_EQ; i++) {
1719 mutex_destroy(&dev->eq[i].lock);
1720 }
1721
1722 kmem_free(dev->eq,
1723 sizeof (struct oce_eq) * OCE_MAX_EQ);
1724 dev->eq = NULL;
1725 }
1726 }
1727
1728 int
1729 oce_alloc_queues(struct oce_dev *dev)
1730 {
1731 int i, j, nrings = 0;
1732
1733 /* Allocate space for RQ array */
1734 dev->rq = kmem_zalloc(sizeof (struct oce_rq) * dev->rx_rings,
1735 KM_NOSLEEP);
1736
1737 if (dev->rq == NULL) {
1738 return (DDI_FAILURE);
1739 }
1740 for (i = 0; i < dev->rx_rings; i++) {
1741 mutex_init(&dev->rq[i].rq_fini_lock, NULL, MUTEX_DRIVER,
1742 DDI_INTR_PRI(dev->intr_pri));
1743 }
1744
1745 /* Allocate space for WQ array */
1746 dev->wq = kmem_zalloc(sizeof (struct oce_wq) * dev->tx_rings,
1747 KM_NOSLEEP);
1748
1749 if (dev->wq == NULL) {
1750 goto alloc_fail;
1751 }
1752
1753 dev->cq = kmem_zalloc(sizeof (struct oce_cq *) * OCE_MAX_CQ,
1754 KM_NOSLEEP);
1755
1756 if (dev->cq == NULL) {
1757 goto alloc_fail;
1758 }
1759
1760 dev->eq = kmem_zalloc(sizeof (struct oce_eq) * OCE_MAX_EQ,
1761 KM_NOSLEEP);
1762 if (dev->eq == NULL) {
1763 goto alloc_fail;
1764 }
1765
1766 for (i = 0; i < OCE_MAX_EQ; i++) {
1767 dev->eq[i].idx = i;
1768 mutex_init(&dev->eq[i].lock, NULL, MUTEX_DRIVER,
1769 DDI_INTR_PRI(dev->intr_pri));
1770 }
1771
1772 for (i = 0; i < dev->tx_rings; i++) {
1773 dev->wq[i].parent = (void *)dev;
1774 dev->default_tx_rings[i].tx = &dev->wq[i];
1775 }
1776
1777 for (i = 0; i < dev->num_rx_groups; i++) {
1778 for (j = 0; j < dev->rx_group[i].num_rings; j++) {
1779 dev->rq[nrings].parent = (void *)dev;
1780 dev->rx_group[i].ring[j].rx = &dev->rq[nrings];
1781 dev->rx_group[i].ring[j].rx->grp = &dev->rx_group[i];
1782 nrings++;
1783 }
1784 }
1785
1786 return (DDI_SUCCESS);
1787 alloc_fail:
1788 oce_free_queues(dev);
1789 return (DDI_FAILURE);
1790 }