Print this page
NEX-1890 update oce from source provided by Emulex


   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /* Copyright © 2003-2011 Emulex. All rights reserved.  */



  23 


  24 /*
  25  * Source file containing Queue handling functions
  26  *
  27  */
  28 
  29 #include <oce_impl.h>
  30 extern struct oce_dev *oce_dev_list[];
  31 
  32 int oce_destroy_q(struct oce_dev  *oce, struct oce_mbx  *mbx, size_t req_size,
  33     enum qtype  qtype);
  34 /* MAil box Queue functions */
  35 struct oce_mq *




  36 oce_mq_create(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len);
  37 
  38 /* event queue handling */
  39 struct oce_eq *
  40 oce_eq_create(struct oce_dev *dev, uint32_t q_len, uint32_t item_size,
  41     uint32_t eq_delay);
  42 
  43 /* completion queue handling */
  44 struct oce_cq *
  45 oce_cq_create(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len,
  46     uint32_t item_size, boolean_t sol_event, boolean_t is_eventable,
  47     boolean_t nodelay, uint32_t ncoalesce);
  48 




  49 





  50 /* Tx  WQ functions */
  51 static struct oce_wq *oce_wq_init(struct oce_dev *dev,  uint32_t q_len,
  52     int wq_type);
  53 static void oce_wq_fini(struct oce_dev *dev, struct oce_wq *wq);
  54 static int oce_wq_create(struct oce_wq *wq, struct oce_eq *eq);
  55 static void oce_wq_del(struct oce_dev *dev, struct oce_wq *wq);
  56 /* Rx Queue functions */
  57 static struct oce_rq *oce_rq_init(struct oce_dev *dev, uint32_t q_len,
  58     uint32_t frag_size, uint32_t mtu,
  59     boolean_t rss);
  60 static void oce_rq_fini(struct oce_dev *dev, struct oce_rq *rq);
  61 static int oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq);
  62 static void oce_rq_del(struct oce_dev *dev, struct oce_rq *rq);
  63 
  64 /*
  65  * function to create an event queue
  66  *
  67  * dev - software handle to the device
  68  * eqcfg - pointer to a config structure containg the eq parameters
  69  *
  70  * return pointer to EQ; NULL on failure
  71  */
  72 struct oce_eq *
  73 oce_eq_create(struct oce_dev *dev, uint32_t q_len, uint32_t item_size,
  74     uint32_t eq_delay)
  75 {
  76         struct oce_eq *eq;
  77         struct oce_mbx mbx;
  78         struct mbx_create_common_eq *fwcmd;
  79         int ret = 0;
  80 
  81         /* allocate an eq */
  82         eq = kmem_zalloc(sizeof (struct oce_eq), KM_NOSLEEP);
  83 
  84         if (eq == NULL) {
  85                 return (NULL);
  86         }
  87 
  88         bzero(&mbx, sizeof (struct oce_mbx));
  89         /* allocate mbx */
  90         fwcmd = (struct mbx_create_common_eq *)&mbx.payload;
  91 
  92         eq->ring = create_ring_buffer(dev, q_len,
  93             item_size, DDI_DMA_CONSISTENT);
  94 
  95         if (eq->ring == NULL) {
  96                 oce_log(dev, CE_WARN, MOD_CONFIG,
  97                     "EQ ring alloc failed:0x%p", (void *)eq->ring);
  98                 kmem_free(eq, sizeof (struct oce_eq));
  99                 return (NULL);
 100         }
 101 
 102         mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
 103             MBX_SUBSYSTEM_COMMON,
 104             OPCODE_CREATE_COMMON_EQ, MBX_TIMEOUT_SEC,
 105             sizeof (struct mbx_create_common_eq));
 106 
 107         fwcmd->params.req.num_pages = eq->ring->dbuf->num_pages;
 108         oce_page_list(eq->ring->dbuf, &fwcmd->params.req.pages[0],
 109             eq->ring->dbuf->num_pages);
 110 
 111         /* dw 0 */
 112         fwcmd->params.req.eq_ctx.size = (item_size == 4) ? 0 : 1;
 113         fwcmd->params.req.eq_ctx.valid = 1;
 114         /* dw 1 */
 115         fwcmd->params.req.eq_ctx.armed = 0;
 116         fwcmd->params.req.eq_ctx.pd = 0;
 117         fwcmd->params.req.eq_ctx.count = OCE_LOG2(q_len/256);
 118 
 119         /* dw 2 */
 120         fwcmd->params.req.eq_ctx.function = dev->fn;
 121         fwcmd->params.req.eq_ctx.nodelay  = 0;
 122         fwcmd->params.req.eq_ctx.phase = 0;
 123         /* todo: calculate multiplier from max min and cur */
 124         fwcmd->params.req.eq_ctx.delay_mult = eq_delay;
 125 
 126         /* fill rest of mbx */
 127         mbx.u0.s.embedded = 1;
 128         mbx.payload_length = sizeof (struct mbx_create_common_eq);
 129         DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
 130 
 131         /* now post the command */
 132         ret = oce_mbox_post(dev, &mbx, NULL);
 133 
 134         if (ret != 0) {
 135                 oce_log(dev, CE_WARN, MOD_CONFIG, "EQ create failed: %d", ret);

 136                 destroy_ring_buffer(dev, eq->ring);
 137                 kmem_free(eq, sizeof (struct oce_eq));
 138                 return (NULL);
 139         }
 140 
 141         /* interpret the response */
 142         eq->eq_id = LE_16(fwcmd->params.rsp.eq_id);
 143         eq->eq_cfg.q_len = q_len;
 144         eq->eq_cfg.item_size = item_size;
 145         eq->eq_cfg.cur_eqd = (uint8_t)eq_delay;
 146         eq->parent = (void *)dev;
 147         atomic_inc_32(&dev->neqs);


 148         oce_log(dev, CE_NOTE, MOD_CONFIG,
 149             "EQ created, eq=0x%p eq_id=0x%x", (void *)eq, eq->eq_id);
 150         /* Save the eq pointer */
 151         return (eq);
 152 } /* oce_eq_create */
 153 
 154 /*
 155  * function to delete an event queue
 156  *
 157  * dev - software handle to the device
 158  * eq - handle to the eq to be deleted
 159  *
 160  * return 0=>success, failure otherwise
 161  */
 162 void
 163 oce_eq_del(struct oce_dev *dev, struct oce_eq *eq)
 164 {
 165         struct oce_mbx mbx;
 166         struct mbx_destroy_common_eq *fwcmd;
 167 




 168         /* drain the residual events */
 169         oce_drain_eq(eq);
 170 
 171         /* destroy the ring */
 172         destroy_ring_buffer(dev, eq->ring);
 173         eq->ring = NULL;
 174 
 175         /* send a command to delete the EQ */
 176         fwcmd = (struct mbx_destroy_common_eq *)&mbx.payload;
 177         fwcmd->params.req.id = eq->eq_id;
 178         (void) oce_destroy_q(dev, &mbx,
 179             sizeof (struct mbx_destroy_common_eq),
 180             QTYPE_EQ);
 181         kmem_free(eq, sizeof (struct oce_eq));
 182         atomic_dec_32(&dev->neqs);

 183 }
 184 
 185 /*
 186  * function to create a completion queue
 187  *
 188  * dev - software handle to the device
 189  * eq - optional eq to be associated with to the cq
 190  * cqcfg - configuration for this queue
 191  *
 192  * return pointer to the cq created. NULL on failure
 193  */
 194 struct oce_cq *
 195 oce_cq_create(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len,
 196     uint32_t item_size, boolean_t sol_event, boolean_t is_eventable,
 197     boolean_t nodelay, uint32_t ncoalesce)
 198 {
 199         struct oce_cq *cq = NULL;
 200         struct oce_mbx mbx;
 201         struct mbx_create_common_cq *fwcmd;
 202         int ret = 0;
 203 
 204         /* create cq */
 205         cq = kmem_zalloc(sizeof (struct oce_cq), KM_NOSLEEP);
 206         if (cq == NULL) {
 207                 oce_log(dev, CE_NOTE, MOD_CONFIG, "%s",
 208                     "CQ allocation failed");
 209                 return (NULL);
 210         }
 211 
 212         /* create the ring buffer for this queue */
 213         cq->ring = create_ring_buffer(dev, q_len,
 214             item_size, DDI_DMA_CONSISTENT);
 215         if (cq->ring == NULL) {
 216                 oce_log(dev, CE_WARN, MOD_CONFIG,
 217                     "CQ ring alloc failed:0x%p",
 218                     (void *)cq->ring);
 219                 kmem_free(cq, sizeof (struct oce_cq));
 220                 return (NULL);
 221         }
 222         /* initialize mailbox */
 223         bzero(&mbx, sizeof (struct oce_mbx));
 224         fwcmd = (struct mbx_create_common_cq *)&mbx.payload;
 225 
 226         /* fill the command header */
 227         mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
 228             MBX_SUBSYSTEM_COMMON,
 229             OPCODE_CREATE_COMMON_CQ, MBX_TIMEOUT_SEC,
 230             sizeof (struct mbx_create_common_cq));
 231 
 232         /* fill command context */
 233         /* dw0 */
 234         fwcmd->params.req.cq_ctx.eventable = is_eventable;
 235         fwcmd->params.req.cq_ctx.sol_event = sol_event;
 236         fwcmd->params.req.cq_ctx.valid = 1;
 237         fwcmd->params.req.cq_ctx.count = OCE_LOG2(q_len/256);
 238         fwcmd->params.req.cq_ctx.nodelay = nodelay;
 239         fwcmd->params.req.cq_ctx.coalesce_wm = ncoalesce;
 240 
 241         /* dw1 */
 242         fwcmd->params.req.cq_ctx.armed = B_FALSE;
 243         fwcmd->params.req.cq_ctx.eq_id = eq->eq_id;
 244         fwcmd->params.req.cq_ctx.pd = 0;
 245         /* dw2 */
 246         fwcmd->params.req.cq_ctx.function = dev->fn;
 247 
 248         /* fill the rest of the command */
 249         fwcmd->params.req.num_pages = cq->ring->dbuf->num_pages;
 250         oce_page_list(cq->ring->dbuf, &fwcmd->params.req.pages[0],
 251             cq->ring->dbuf->num_pages);
 252 
 253         /* fill rest of mbx */
 254         mbx.u0.s.embedded = 1;
 255         mbx.payload_length = sizeof (struct mbx_create_common_cq);
 256         DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
 257 
 258         /* now send the mail box */
 259         ret = oce_mbox_post(dev, &mbx, NULL);
 260 
 261         if (ret != 0) {
 262                 oce_log(dev, CE_WARN, MOD_CONFIG,
 263                     "CQ create failed: 0x%x", ret);
 264                 destroy_ring_buffer(dev, cq->ring);
 265                 kmem_free(cq, sizeof (struct oce_cq));
 266                 return (NULL);
 267         }
 268 
 269         cq->parent = dev;
 270         cq->eq = eq; /* eq array index */
 271         cq->cq_cfg.q_len = q_len;
 272         cq->cq_cfg.item_size = item_size;
 273         cq->cq_cfg.sol_eventable = (uint8_t)sol_event;
 274         cq->cq_cfg.nodelay = (uint8_t)nodelay;
 275         /* interpret the response */
 276         cq->cq_id = LE_16(fwcmd->params.rsp.cq_id);
 277         dev->cq[cq->cq_id % OCE_MAX_CQ] = cq;

 278         atomic_inc_32(&eq->ref_count);
 279         return (cq);
 280 } /* oce_cq_create */
 281 
 282 /*
























































































































 283  * function to delete a completion queue
 284  *
 285  * dev - software handle to the device
 286  * cq - handle to the CQ to delete
 287  *
 288  * return none
 289  */
 290 static void
 291 oce_cq_del(struct oce_dev *dev, struct oce_cq *cq)
 292 {
 293         struct oce_mbx mbx;
 294         struct mbx_destroy_common_cq *fwcmd;
 295 
 296         /* destroy the ring */
 297         destroy_ring_buffer(dev, cq->ring);
 298         cq->ring = NULL;
 299 
 300         bzero(&mbx, sizeof (struct oce_mbx));
 301         /* send a command to delete the CQ */
 302         fwcmd = (struct mbx_destroy_common_cq *)&mbx.payload;
 303         fwcmd->params.req.id = cq->cq_id;
 304         (void) oce_destroy_q(dev, &mbx,
 305             sizeof (struct mbx_destroy_common_cq),
 306             QTYPE_CQ);
 307 
 308         /* Reset the handler */
 309         cq->cq_handler = NULL;

 310         dev->cq[cq->cq_id % OCE_MAX_CQ] = NULL;
 311         atomic_dec_32(&cq->eq->ref_count);
 312         mutex_destroy(&cq->lock);
 313 
 314         /* release the eq */
 315         kmem_free(cq, sizeof (struct oce_cq));
 316 } /* oce_cq_del */
 317 
 318 /*
 319  * function to create an MQ
 320  *
 321  * dev - software handle to the device
 322  * eq - the EQ to associate with the MQ for event notification
 323  * q_len - the number of entries to create in the MQ
 324  *
 325  * return pointer to the created MQ, failure otherwise
 326  */
 327 struct oce_mq *
 328 oce_mq_create(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len)
 329 {
 330         struct oce_mbx mbx;
 331         struct mbx_create_common_mq *fwcmd;
 332         struct oce_mq *mq = NULL;
 333         int ret = 0;
 334         struct oce_cq  *cq;
 335 
 336         /* Create the Completion Q */
 337         cq = oce_cq_create(dev, eq, CQ_LEN_256,
 338             sizeof (struct oce_mq_cqe),
 339             B_FALSE, B_TRUE, B_TRUE, 0);
 340         if (cq == NULL) {
 341                 return (NULL);
 342         }
 343 
 344 
 345         /* allocate the mq */
 346         mq = kmem_zalloc(sizeof (struct oce_mq), KM_NOSLEEP);
 347 
 348         if (mq == NULL) {
 349                 goto mq_alloc_fail;
 350         }
 351 
 352         bzero(&mbx, sizeof (struct oce_mbx));
 353         /* allocate mbx */
 354         fwcmd = (struct mbx_create_common_mq *)&mbx.payload;
 355 
 356         /* create the ring buffer for this queue */
 357         mq->ring = create_ring_buffer(dev, q_len,
 358             sizeof (struct oce_mbx), DDI_DMA_CONSISTENT | DDI_DMA_RDWR);
 359         if (mq->ring == NULL) {
 360                 oce_log(dev, CE_WARN, MOD_CONFIG,
 361                     "MQ ring alloc failed:0x%p",
 362                     (void *)mq->ring);
 363                 goto mq_ring_alloc;
 364         }
 365 
 366         mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
 367             MBX_SUBSYSTEM_COMMON,
 368             OPCODE_CREATE_COMMON_MQ, MBX_TIMEOUT_SEC,
 369             sizeof (struct mbx_create_common_mq));
 370 
 371         fwcmd->params.req.num_pages = mq->ring->dbuf->num_pages;
 372         oce_page_list(mq->ring->dbuf, fwcmd->params.req.pages,
 373             mq->ring->dbuf->num_pages);
 374         fwcmd->params.req.context.u0.s.cq_id = cq->cq_id;
 375         fwcmd->params.req.context.u0.s.ring_size =
 376             OCE_LOG2(q_len) + 1;
 377         fwcmd->params.req.context.u0.s.valid = 1;
 378         fwcmd->params.req.context.u0.s.fid = dev->fn;
 379 
 380         /* fill rest of mbx */
 381         mbx.u0.s.embedded = 1;
 382         mbx.payload_length = sizeof (struct mbx_create_common_mq);
 383         DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
 384 
 385         /* now send the mail box */
 386         ret = oce_mbox_post(dev, &mbx, NULL);
 387         if (ret != DDI_SUCCESS) {
 388                 oce_log(dev, CE_WARN, MOD_CONFIG,
 389                     "MQ create failed: 0x%x", ret);
 390                 goto mq_fail;
 391         }
 392 
 393         /* interpret the response */
 394         mq->mq_id = LE_16(fwcmd->params.rsp.mq_id);
 395         mq->cq = cq;
 396         mq->cfg.q_len = (uint8_t)q_len;
 397         mq->cfg.eqd = 0;
 398 
 399         /* fill rest of the mq */
 400         mq->parent = dev;
 401 
 402         /* set the MQCQ handlers */
 403         cq->cq_handler = oce_drain_mq_cq;
 404         cq->cb_arg = (void *)mq;
 405         mutex_init(&mq->lock, NULL, MUTEX_DRIVER,
 406             DDI_INTR_PRI(dev->intr_pri));



 407         return (mq);
 408 
 409 mq_fail:
 410         destroy_ring_buffer(dev, mq->ring);
 411 mq_ring_alloc:
 412         kmem_free(mq, sizeof (struct oce_mq));
 413 mq_alloc_fail:
 414         oce_cq_del(dev, cq);
 415         return (NULL);
 416 } /* oce_mq_create */
 417 
 418 /*





































































































































































































































 419  * function to delete an MQ
 420  *
 421  * dev - software handle to the device
 422  * mq - pointer to the MQ to delete
 423  *
 424  * return none
 425  */
 426 static void
 427 oce_mq_del(struct oce_dev *dev, struct oce_mq *mq)
 428 {
 429         struct oce_mbx mbx;
 430         struct mbx_destroy_common_mq *fwcmd;
 431 
 432         /* destroy the ring */
 433         destroy_ring_buffer(dev, mq->ring);
 434         mq->ring = NULL;
 435         bzero(&mbx, sizeof (struct oce_mbx));
 436         fwcmd = (struct mbx_destroy_common_mq *)&mbx.payload;
 437         fwcmd->params.req.id = mq->mq_id;
 438         (void) oce_destroy_q(dev, &mbx,
 439             sizeof (struct mbx_destroy_common_mq),
 440             QTYPE_MQ);
 441         oce_cq_del(dev, mq->cq);
 442         mq->cq = NULL;

 443         mutex_destroy(&mq->lock);
 444         kmem_free(mq, sizeof (struct oce_mq));
 445 } /* oce_mq_del */
 446 
 447 /*
 448  * function to create a WQ for NIC Tx
 449  *
 450  * dev - software handle to the device
 451  * wqcfg - configuration structure providing WQ config parameters
 452  *
 453  * return pointer to the WQ created. NULL on failure
 454  */
 455 static struct oce_wq *
 456 oce_wq_init(struct oce_dev *dev,  uint32_t q_len, int wq_type)
 457 {
 458         struct oce_wq *wq;
 459         char str[MAX_POOL_NAME];
 460         int ret;
 461         static int wq_id = 0;

 462 
 463         ASSERT(dev != NULL);
 464         /* q_len must be min 256 and max 2k */
 465         if (q_len < 256 || q_len > 2048) {
 466                 oce_log(dev, CE_WARN, MOD_CONFIG,
 467                     "Invalid q length. Must be "
 468                     "[256, 2000]: 0x%x", q_len);
 469                 return (NULL);
 470         }
 471 
 472         /* allocate wq */
 473         wq = kmem_zalloc(sizeof (struct oce_wq), KM_NOSLEEP);
 474         if (wq == NULL) {
 475                 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
 476                     "WQ allocation failed");
 477                 return (NULL);
 478         }
 479 
 480         /* Set the wq config */
 481         wq->cfg.q_len = q_len;
 482         wq->cfg.wq_type = (uint8_t)wq_type;
 483         wq->cfg.eqd = OCE_DEFAULT_WQ_EQD;
 484         wq->cfg.nbufs = 2 * wq->cfg.q_len;
 485         wq->cfg.nhdl = 2 * wq->cfg.q_len;
 486         wq->cfg.buf_size = dev->tx_bcopy_limit;
 487 
 488         /* assign parent */
 489         wq->parent = (void *)dev;


 490 



 491         /* Create the WQ Buffer pool */
 492         ret  = oce_wqb_cache_create(wq, wq->cfg.buf_size);
 493         if (ret != DDI_SUCCESS) {
 494                 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
 495                     "WQ Buffer Pool create failed ");
 496                 goto wqb_fail;
 497         }
 498 
 499         /* Create a pool of memory handles */
 500         ret = oce_wqm_cache_create(wq);
 501         if (ret != DDI_SUCCESS) {
 502                 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
 503                     "WQ MAP Handles Pool create failed ");
 504                 goto wqm_fail;
 505         }
 506 
 507         (void) snprintf(str, MAX_POOL_NAME, "%s%d%s%d", "oce_wqed_",
 508             dev->dev_id, "_", wq_id++);
 509         wq->wqed_cache = kmem_cache_create(str, sizeof (oce_wqe_desc_t),
 510             0, NULL, NULL, NULL, NULL, NULL, 0);
 511         if (wq->wqed_cache == NULL) {
 512                 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
 513                     "WQ Packet Desc Pool create failed ");
 514                 goto wqed_fail;
 515         }
 516 
 517         /* create the ring buffer */
 518         wq->ring = create_ring_buffer(dev, q_len,
 519             NIC_WQE_SIZE, DDI_DMA_CONSISTENT | DDI_DMA_RDWR);
 520         if (wq->ring == NULL) {
 521                 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
 522                     "Failed to create WQ ring ");
 523                 goto wq_ringfail;
 524         }
 525 
 526         /* Initialize WQ lock */
 527         mutex_init(&wq->tx_lock, NULL, MUTEX_DRIVER,
 528             DDI_INTR_PRI(dev->intr_pri));
 529         /* Initialize WQ lock */
 530         mutex_init(&wq->txc_lock, NULL, MUTEX_DRIVER,
 531             DDI_INTR_PRI(dev->intr_pri));
 532         atomic_inc_32(&dev->nwqs);
 533 
 534         OCE_LIST_CREATE(&wq->wqe_desc_list, DDI_INTR_PRI(dev->intr_pri));
 535         return (wq);
 536 




 537 wqcq_fail:
 538         destroy_ring_buffer(dev, wq->ring);
 539 wq_ringfail:
 540         kmem_cache_destroy(wq->wqed_cache);
 541 wqed_fail:
 542         oce_wqm_cache_destroy(wq);
 543 wqm_fail:
 544         oce_wqb_cache_destroy(wq);
 545 wqb_fail:
 546         kmem_free(wq, sizeof (struct oce_wq));
 547         return (NULL);
 548 } /* oce_wq_create */
 549 
 550 /*
 551  * function to delete a WQ
 552  *
 553  * dev - software handle to the device
 554  * wq - WQ to delete
 555  *
 556  * return 0 => success, failure otherwise
 557  */
 558 static void
 559 oce_wq_fini(struct oce_dev *dev, struct oce_wq *wq)
 560 {
 561         /* destroy cq */
 562         oce_wqb_cache_destroy(wq);
 563         oce_wqm_cache_destroy(wq);
 564         kmem_cache_destroy(wq->wqed_cache);
 565 
 566         /* Free the packet descriptor list */
 567         OCE_LIST_DESTROY(&wq->wqe_desc_list);
 568         destroy_ring_buffer(dev, wq->ring);
 569         wq->ring = NULL;
 570         /* Destroy the Mutex */

 571         mutex_destroy(&wq->tx_lock);
 572         mutex_destroy(&wq->txc_lock);
 573         kmem_free(wq, sizeof (struct oce_wq));
 574         atomic_dec_32(&dev->nwqs);
 575 } /* oce_wq_del */
 576 
 577 
 578 static int
 579 oce_wq_create(struct oce_wq *wq, struct oce_eq *eq)
 580 {
 581 
 582         struct oce_mbx mbx;
 583         struct mbx_create_nic_wq *fwcmd;
 584         struct oce_dev *dev = wq->parent;
 585         struct oce_cq *cq;
 586         int ret;
 587 
 588         /* create the CQ */
 589         cq = oce_cq_create(dev, eq, CQ_LEN_1024,
 590             sizeof (struct oce_nic_tx_cqe),
 591             B_FALSE, B_TRUE, B_FALSE, 3);
 592         if (cq == NULL) {
 593                 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
 594                     "WCCQ create failed ");
 595                 return (DDI_FAILURE);
 596         }
 597         /* now fill the command */
 598         bzero(&mbx, sizeof (struct oce_mbx));
 599         fwcmd = (struct mbx_create_nic_wq *)&mbx.payload;

 600         mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
 601             MBX_SUBSYSTEM_NIC,
 602             OPCODE_CREATE_NIC_WQ, MBX_TIMEOUT_SEC,
 603             sizeof (struct mbx_create_nic_wq));







 604 
 605         fwcmd->params.req.nic_wq_type = (uint8_t)wq->cfg.wq_type;
 606         fwcmd->params.req.num_pages = wq->ring->dbuf->num_pages;

 607         oce_log(dev, CE_NOTE, MOD_CONFIG, "NUM_PAGES = 0x%d size = %lu",
 608             (uint32_t)wq->ring->dbuf->num_pages,
 609             wq->ring->dbuf->size);
 610 
 611         /* workaround: fill 0x01 for ulp_mask in rsvd0 */
 612         fwcmd->params.req.rsvd0 = 0x01;
 613         fwcmd->params.req.wq_size = OCE_LOG2(wq->cfg.q_len) + 1;
 614         fwcmd->params.req.valid = 1;
 615         fwcmd->params.req.pd_id = 0;
 616         fwcmd->params.req.pci_function_id = dev->fn;
 617         fwcmd->params.req.cq_id = cq->cq_id;
 618 
 619         oce_page_list(wq->ring->dbuf, fwcmd->params.req.pages,
 620             wq->ring->dbuf->num_pages);
 621 
 622         /* fill rest of mbx */
 623         mbx.u0.s.embedded = 1;
 624         mbx.payload_length = sizeof (struct mbx_create_nic_wq);
 625         DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
 626 
 627         /* now post the command */
 628         ret = oce_mbox_post(dev, &mbx, NULL);
 629         if (ret != DDI_SUCCESS) {
 630                 oce_log(dev, CE_WARN, MOD_CONFIG,
 631                     "WQ create failed: %d", ret);
 632                 oce_cq_del(dev, cq);
 633                 return (ret);
 634         }
 635 
 636         /* interpret the response */
 637         wq->wq_id = LE_16(fwcmd->params.rsp.wq_id);
 638         wq->qstate = QCREATED;
 639         wq->cq = cq;
 640         /* set the WQCQ handlers */
 641         wq->cq->cq_handler = oce_drain_wq_cq;
 642         wq->cq->cb_arg = (void *)wq;

 643         /* All are free to start with */
 644         wq->wq_free = wq->cfg.q_len;
 645         /* reset indicies */
 646         wq->ring->cidx = 0;
 647         wq->ring->pidx = 0;
 648         oce_log(dev, CE_NOTE, MOD_CONFIG, "WQ CREATED WQID = %d",
 649             wq->wq_id);
 650 
 651         return (0);
 652 }
 653 
 654 /*
 655  * function to delete a WQ
 656  *
 657  * dev - software handle to the device
 658  * wq - WQ to delete
 659  *
 660  * return none
 661  */
 662 static void
 663 oce_wq_del(struct oce_dev *dev, struct oce_wq *wq)
 664 {
 665         struct oce_mbx mbx;
 666         struct mbx_delete_nic_wq *fwcmd;
 667 
 668 
 669         ASSERT(dev != NULL);
 670         ASSERT(wq != NULL);
 671         if (wq->qstate == QCREATED) {
 672                 bzero(&mbx, sizeof (struct oce_mbx));
 673                 /* now fill the command */
 674                 fwcmd = (struct mbx_delete_nic_wq *)&mbx.payload;
 675                 fwcmd->params.req.wq_id = wq->wq_id;
 676                 (void) oce_destroy_q(dev, &mbx,
 677                     sizeof (struct mbx_delete_nic_wq),
 678                     QTYPE_WQ);
 679                 wq->qstate = QDELETED;
 680                 oce_cq_del(dev, wq->cq);
 681                 wq->cq = NULL;
 682         }
 683 } /* oce_wq_del */
 684 
 685 /*
 686  * function to allocate RQ resources
 687  *
 688  * dev - software handle to the device
 689  * rqcfg - configuration structure providing RQ config parameters
 690  *
 691  * return pointer to the RQ created. NULL on failure
 692  */
 693 static struct oce_rq *
 694 oce_rq_init(struct oce_dev *dev, uint32_t q_len,
 695     uint32_t frag_size, uint32_t mtu,
 696     boolean_t rss)
 697 {
 698 
 699         struct oce_rq *rq;
 700         int ret;
 701 
 702         /* validate q creation parameters */
 703         if (!OCE_LOG2(frag_size))
 704                 return (NULL);
 705         if ((q_len == 0) || (q_len > 1024))
 706                 return (NULL);
 707 
 708         /* allocate the rq */
 709         rq = kmem_zalloc(sizeof (struct oce_rq), KM_NOSLEEP);
 710         if (rq == NULL) {
 711                 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
 712                     "RQ allocation failed");
 713                 return (NULL);
 714         }
 715 
 716         rq->cfg.q_len = q_len;
 717         rq->cfg.frag_size = frag_size;
 718         rq->cfg.mtu = mtu;
 719         rq->cfg.eqd = 0;
 720         rq->cfg.nbufs = dev->rq_max_bufs;
 721         rq->cfg.is_rss_queue = rss;
 722 
 723         /* assign parent */
 724         rq->parent = (void *)dev;
 725 
 726         rq->rq_bdesc_array =
 727             kmem_zalloc((sizeof (oce_rq_bdesc_t) * rq->cfg.nbufs), KM_NOSLEEP);
 728         if (rq->rq_bdesc_array == NULL) {
 729                 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
 730                     "RQ bdesc alloc failed");
 731                 goto rqbd_alloc_fail;
 732         }
 733         /* create the rq buffer descriptor ring */
 734         rq->shadow_ring =
 735             kmem_zalloc((rq->cfg.q_len * sizeof (oce_rq_bdesc_t *)),
 736             KM_NOSLEEP);
 737         if (rq->shadow_ring == NULL) {
 738                 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
 739                     "RQ shadow ring alloc failed ");
 740                 goto rq_shdw_fail;
 741         }
 742 
 743         /* allocate the free list array */
 744         rq->rqb_freelist =
 745             kmem_zalloc(rq->cfg.nbufs * sizeof (oce_rq_bdesc_t *), KM_NOSLEEP);
 746         if (rq->rqb_freelist == NULL) {
 747                 goto rqb_free_list_fail;
 748         }
 749         /* create the buffer pool */
 750         ret  =  oce_rqb_cache_create(rq, dev->rq_frag_size +
 751             OCE_RQE_BUF_HEADROOM);
 752         if (ret != DDI_SUCCESS) {
 753                 goto rqb_fail;
 754         }
 755 
 756         /* create the ring buffer */
 757         rq->ring = create_ring_buffer(dev, q_len,
 758             sizeof (struct oce_nic_rqe), DDI_DMA_CONSISTENT | DDI_DMA_RDWR);
 759         if (rq->ring == NULL) {
 760                 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
 761                     "RQ ring create failed ");
 762                 goto rq_ringfail;
 763         }
 764 
 765         /* Initialize the RQ lock */
 766         mutex_init(&rq->rx_lock, NULL, MUTEX_DRIVER,
 767             DDI_INTR_PRI(dev->intr_pri));
 768         /* Initialize the recharge  lock */
 769         mutex_init(&rq->rc_lock, NULL, MUTEX_DRIVER,
 770             DDI_INTR_PRI(dev->intr_pri));
 771         atomic_inc_32(&dev->nrqs);
 772         return (rq);
 773 
 774 rq_ringfail:
 775         oce_rqb_cache_destroy(rq);
 776 rqb_fail:
 777         kmem_free(rq->rqb_freelist,
 778             (rq->cfg.nbufs * sizeof (oce_rq_bdesc_t *)));
 779 rqb_free_list_fail:
 780 
 781         kmem_free(rq->shadow_ring,
 782             (rq->cfg.q_len * sizeof (oce_rq_bdesc_t *)));
 783 rq_shdw_fail:
 784         kmem_free(rq->rq_bdesc_array,
 785             (sizeof (oce_rq_bdesc_t) * rq->cfg.nbufs));
 786 rqbd_alloc_fail:
 787         kmem_free(rq, sizeof (struct oce_rq));
 788         return (NULL);
 789 } /* oce_rq_create */
 790 

 791 /*
 792  * function to delete an RQ
 793  *
 794  * dev - software handle to the device
 795  * rq - RQ to delete
 796  *
 797  * return none
 798  */
 799 static void
 800 oce_rq_fini(struct oce_dev *dev, struct oce_rq *rq)
 801 {
 802         /* Destroy buffer cache */

 803         oce_rqb_cache_destroy(rq);
 804         destroy_ring_buffer(dev, rq->ring);
 805         rq->ring = NULL;
 806         kmem_free(rq->shadow_ring,
 807             sizeof (oce_rq_bdesc_t *) * rq->cfg.q_len);
 808         rq->shadow_ring = NULL;
 809         kmem_free(rq->rq_bdesc_array,
 810             (sizeof (oce_rq_bdesc_t) * rq->cfg.nbufs));
 811         rq->rq_bdesc_array = NULL;
 812         kmem_free(rq->rqb_freelist,
 813             (rq->cfg.nbufs * sizeof (oce_rq_bdesc_t *)));
 814         rq->rqb_freelist = NULL;
 815         mutex_destroy(&rq->rx_lock);
 816         mutex_destroy(&rq->rc_lock);
 817         kmem_free(rq, sizeof (struct oce_rq));
 818         atomic_dec_32(&dev->nrqs);
 819 } /* oce_rq_del */
 820 
 821 
 822 static int
 823 oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq)

 824 {
 825         struct oce_mbx mbx;
 826         struct mbx_create_nic_rq *fwcmd;
 827         struct oce_dev *dev = rq->parent;
 828         struct oce_cq *cq;

 829         int ret;
 830 
 831         cq = oce_cq_create(dev, eq, CQ_LEN_1024, sizeof (struct oce_nic_rx_cqe),
 832             B_FALSE, B_TRUE, B_FALSE, 3);


 833 



 834         if (cq == NULL) {
 835                 return (DDI_FAILURE);
 836         }
 837 
 838         /* now fill the command */
 839         bzero(&mbx, sizeof (struct oce_mbx));
 840         fwcmd = (struct mbx_create_nic_rq *)&mbx.payload;
 841         mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
 842             MBX_SUBSYSTEM_NIC,
 843             OPCODE_CREATE_NIC_RQ, MBX_TIMEOUT_SEC,
 844             sizeof (struct mbx_create_nic_rq));
 845 
 846         fwcmd->params.req.num_pages = rq->ring->dbuf->num_pages;
 847         fwcmd->params.req.frag_size = OCE_LOG2(rq->cfg.frag_size);
 848         fwcmd->params.req.cq_id = cq->cq_id;
 849         oce_page_list(rq->ring->dbuf, fwcmd->params.req.pages,
 850             rq->ring->dbuf->num_pages);
 851 
 852         fwcmd->params.req.if_id = if_id;
 853         fwcmd->params.req.max_frame_size = (uint16_t)rq->cfg.mtu;
 854         fwcmd->params.req.is_rss_queue = rq->cfg.is_rss_queue;
 855 
 856         /* fill rest of mbx */
 857         mbx.u0.s.embedded = 1;
 858         mbx.payload_length = sizeof (struct mbx_create_nic_rq);
 859         DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
 860 
 861         /* now post the command */
 862         ret = oce_mbox_post(dev, &mbx, NULL);
 863         if (ret != 0) {
 864                 oce_log(dev, CE_WARN, MOD_CONFIG,
 865                     "RQ create failed: %d", ret);
 866                 oce_cq_del(dev, cq);
 867                 return (ret);
 868         }
 869 
 870         /* interpret the response */
 871         rq->rq_id = LE_16(fwcmd->params.rsp.u0.s.rq_id);
 872         rq->rss_cpuid = fwcmd->params.rsp.u0.s.rss_cpuid;
 873         rq->cfg.if_id = if_id;
 874         rq->qstate = QCREATED;
 875         rq->cq = cq;
 876 
 877         /* set the Completion Handler */
 878         rq->cq->cq_handler = oce_drain_rq_cq;
 879         rq->cq->cb_arg  = (void *)rq;

 880         /* reset the indicies */
 881         rq->ring->cidx = 0;
 882         rq->ring->pidx = 0;
 883         rq->buf_avail = 0;
 884         oce_log(dev, CE_NOTE, MOD_CONFIG, "RQ created, RQID : %d", rq->rq_id);

 885         return (0);
 886 
 887 }
 888 
 889 /*
 890  * function to delete an RQ
 891  *
 892  * dev - software handle to the device
 893  * rq - RQ to delete
 894  *
 895  * return none
 896  */
 897 static void
 898 oce_rq_del(struct oce_dev *dev, struct oce_rq *rq)
 899 {
 900         struct oce_mbx mbx;
 901         struct mbx_delete_nic_rq *fwcmd;
 902 
 903         ASSERT(dev != NULL);
 904         ASSERT(rq != NULL);
 905 
 906         bzero(&mbx, sizeof (struct oce_mbx));
 907 

 908         /* delete the Queue  */
 909         if (rq->qstate == QCREATED) {
 910                 fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
 911                 fwcmd->params.req.rq_id = rq->rq_id;
 912                 (void) oce_destroy_q(dev, &mbx,
 913                     sizeof (struct mbx_delete_nic_rq), QTYPE_RQ);
 914                 rq->qstate = QDELETED;
 915                 oce_clean_rq(rq);
 916                 /* Delete the associated CQ */
 917                 oce_cq_del(dev, rq->cq);
 918                 rq->cq = NULL;
 919                 /* free up the posted buffers */
 920                 oce_rq_discharge(rq);

 921         }

 922 } /* oce_rq_del */
 923 
 924 /*
 925  * function to arm an EQ so that it can generate events
 926  *
 927  * dev - software handle to the device
 928  * qid - id of the EQ returned by the fw at the time of creation
 929  * npopped - number of EQEs to arm with
 930  * rearm - rearm bit
 931  * clearint - bit to clear the interrupt condition because of which
 932  *      EQEs are generated
 933  *
 934  * return none
 935  */
 936 void
 937 oce_arm_eq(struct oce_dev *dev, int16_t qid, int npopped,
 938     boolean_t rearm, boolean_t clearint)
 939 {
 940         eq_db_t eq_db = {0};
 941 
 942         eq_db.bits.rearm = rearm;
 943         eq_db.bits.event  = B_TRUE;



 944         eq_db.bits.num_popped = npopped;
 945         eq_db.bits.clrint = clearint;
 946         eq_db.bits.qid = qid;
 947         OCE_DB_WRITE32(dev, PD_EQ_DB, eq_db.dw0);
 948 }
 949 
 950 /*
 951  * function to arm a CQ with CQEs
 952  *
 953  * dev - software handle to the device
 954  * qid - the id of the CQ returned by the fw at the time of creation
 955  * npopped - number of CQEs to arm with
 956  * rearm - rearm bit enable/disable
 957  *
 958  * return none
 959  */
 960 void
 961 oce_arm_cq(struct oce_dev *dev, int16_t qid, int npopped,
 962     boolean_t rearm)
 963 {
 964         cq_db_t cq_db = {0};
 965         cq_db.bits.rearm = rearm;



 966         cq_db.bits.num_popped = npopped;
 967         cq_db.bits.event = 0;
 968         cq_db.bits.qid = qid;
 969         OCE_DB_WRITE32(dev, PD_CQ_DB, cq_db.dw0);
 970 }
 971 
 972 
 973 /*
 974  * function to delete a EQ, CQ, MQ, WQ or RQ
 975  *
 976  * dev - sofware handle to the device
 977  * mbx - mbox command to send to the fw to delete the queue
 978  *      mbx contains the queue information to delete
 979  * req_size - the size of the mbx payload dependent on the qtype
 980  * qtype - the type of queue i.e. EQ, CQ, MQ, WQ or RQ
 981  *
 982  * return DDI_SUCCESS => success, failure otherwise
 983  */
 984 int
 985 oce_destroy_q(struct oce_dev *dev, struct oce_mbx  *mbx, size_t req_size,
 986     enum qtype qtype)
 987 {
 988         struct mbx_hdr *hdr = (struct mbx_hdr *)&mbx->payload;
 989         int opcode;
 990         int subsys;
 991         int ret;
 992 
 993         switch (qtype) {
 994         case QTYPE_EQ: {
 995                 opcode = OPCODE_DESTROY_COMMON_EQ;
 996                 subsys = MBX_SUBSYSTEM_COMMON;
 997                 break;
 998         }
 999         case QTYPE_CQ: {
1000                 opcode = OPCODE_DESTROY_COMMON_CQ;
1001                 subsys = MBX_SUBSYSTEM_COMMON;
1002                 break;
1003         }
1004         case QTYPE_MQ: {
1005                 opcode = OPCODE_DESTROY_COMMON_MQ;
1006                 subsys = MBX_SUBSYSTEM_COMMON;
1007                 break;
1008         }
1009         case QTYPE_WQ: {
1010                 opcode = OPCODE_DELETE_NIC_WQ;
1011                 subsys = MBX_SUBSYSTEM_NIC;
1012                 break;
1013         }
1014         case QTYPE_RQ: {
1015                 opcode = OPCODE_DELETE_NIC_RQ;
1016                 subsys = MBX_SUBSYSTEM_NIC;
1017                 break;
1018         }
1019         default: {
1020                 ASSERT(0);
1021                 break;
1022         }
1023         }
1024 
1025         mbx_common_req_hdr_init(hdr, 0, 0, subsys,
1026             opcode, MBX_TIMEOUT_SEC, req_size);
1027 
1028         /* fill rest of mbx */
1029         mbx->u0.s.embedded = 1;
1030         mbx->payload_length = (uint32_t)req_size;
1031         DW_SWAP(u32ptr(mbx), mbx->payload_length + OCE_BMBX_RHDR_SZ);
1032 
1033         /* send command */
1034         ret = oce_mbox_post(dev, mbx, NULL);
1035 
1036         if (ret != 0) {
1037                 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
1038                     "Failed to del q ");
1039         }

1040         return (ret);
1041 }
1042 
1043 /*
1044  * function to set the delay parameter in the EQ for interrupt coalescing
1045  *
1046  * dev - software handle to the device
1047  * eq_arr - array of EQ ids to delete
1048  * eq_cnt - number of elements in eq_arr
1049  * eq_delay - delay parameter
1050  *
1051  * return DDI_SUCCESS => success, failure otherwise
1052  */
1053 int
1054 oce_set_eq_delay(struct oce_dev *dev, uint32_t *eq_arr,
1055     uint32_t eq_cnt, uint32_t eq_delay)
1056 {
1057         struct oce_mbx mbx;
1058         struct mbx_modify_common_eq_delay *fwcmd;
1059         int ret;
1060         int neq;
1061 
1062         bzero(&mbx, sizeof (struct oce_mbx));
1063         fwcmd = (struct mbx_modify_common_eq_delay *)&mbx.payload;
1064 
1065         /* fill the command */
1066         fwcmd->params.req.num_eq = eq_cnt;
1067         for (neq = 0; neq < eq_cnt; neq++) {
1068                 fwcmd->params.req.delay[neq].eq_id = eq_arr[neq];
1069                 fwcmd->params.req.delay[neq].phase = 0;
1070                 fwcmd->params.req.delay[neq].dm = eq_delay;
1071 
1072         }
1073 
1074         /* initialize the ioctl header */
1075         mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
1076             MBX_SUBSYSTEM_COMMON,
1077             OPCODE_MODIFY_COMMON_EQ_DELAY,
1078             MBX_TIMEOUT_SEC,
1079             sizeof (struct mbx_modify_common_eq_delay));
1080 
1081         /* fill rest of mbx */
1082         mbx.u0.s.embedded = 1;
1083         mbx.payload_length = sizeof (struct mbx_modify_common_eq_delay);
1084         DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
1085 
1086         /* post the command */
1087         ret = oce_mbox_post(dev, &mbx, NULL);
1088         if (ret != 0) {
1089                 oce_log(dev, CE_WARN, MOD_CONFIG,
1090                     "Failed to set EQ delay %d", ret);
1091         }
1092 
1093         return (ret);
1094 } /* oce_set_eq_delay */
1095 
1096 /*
1097  * function to cleanup the eqs used during stop
1098  *
1099  * eq - pointer to event queue structure
1100  *
1101  * return none
1102  */
1103 void
1104 oce_drain_eq(struct oce_eq *eq)
1105 {
1106         struct oce_eqe *eqe;
1107         uint16_t num_eqe = 0;
1108         struct oce_dev *dev;
1109 
1110         dev = eq->parent;


1113 
1114         while (eqe->u0.dw0) {
1115                 eqe->u0.dw0 = LE_32(eqe->u0.dw0);
1116 
1117                 /* clear valid bit */
1118                 eqe->u0.dw0 = 0;
1119 
1120                 /* process next eqe */
1121                 RING_GET(eq->ring, 1);
1122 
1123                 eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
1124                 num_eqe++;
1125         } /* for all EQEs */
1126         if (num_eqe) {
1127                 oce_arm_eq(dev, eq->eq_id, num_eqe, B_FALSE, B_TRUE);
1128         }
1129 } /* oce_drain_eq */
1130 
1131 
1132 int
1133 oce_init_txrx(struct oce_dev  *dev)
1134 {
1135         int qid = 0;
1136 
1137         /* enable RSS if rx queues > 1 */
1138         dev->rss_enable = (dev->rx_rings > 1) ? B_TRUE : B_FALSE;
1139 
1140         for (qid = 0; qid < dev->tx_rings; qid++) {
1141                 dev->wq[qid] = oce_wq_init(dev, dev->tx_ring_size,
1142                     NIC_WQ_TYPE_STANDARD);
1143                 if (dev->wq[qid] == NULL) {
1144                         goto queue_fail;
1145                 }
1146         }
1147 
1148         /* Now create the Rx Queues */
1149         /* qid 0 is always default non rss queue for rss */
1150         dev->rq[0] = oce_rq_init(dev, dev->rx_ring_size, dev->rq_frag_size,
1151             OCE_MAX_JUMBO_FRAME_SIZE, B_FALSE);
1152         if (dev->rq[0] == NULL) {
1153                 goto queue_fail;
1154         }
1155 
1156         for (qid = 1; qid < dev->rx_rings; qid++) {
1157                 dev->rq[qid] = oce_rq_init(dev, dev->rx_ring_size,
1158                     dev->rq_frag_size, OCE_MAX_JUMBO_FRAME_SIZE,
1159                     dev->rss_enable);
1160                 if (dev->rq[qid] == NULL) {
1161                         goto queue_fail;
1162                 }
1163         }
1164 
1165         return (DDI_SUCCESS);
1166 queue_fail:
1167         oce_fini_txrx(dev);
1168         return (DDI_FAILURE);
1169 }


1170 void
1171 oce_fini_txrx(struct oce_dev *dev)
1172 {
1173         int qid;
1174         int nqs;
1175 
1176         /* free all the tx rings */
1177         /* nwqs is decremented in fini so copy count first */
1178         nqs = dev->nwqs;
1179         for (qid = 0; qid < nqs; qid++) {
1180                 if (dev->wq[qid] != NULL) {
1181                         oce_wq_fini(dev, dev->wq[qid]);
1182                         dev->wq[qid] = NULL;
1183                 }
1184         }
1185         /* free all the rx rings */
1186         nqs = dev->nrqs;
1187         for (qid = 0; qid < nqs; qid++) {
1188                 if (dev->rq[qid] != NULL) {
1189                         oce_rq_fini(dev, dev->rq[qid]);
1190                         dev->rq[qid] = NULL;
1191                 }
1192         }
1193 }
1194 

1195 int
1196 oce_create_queues(struct oce_dev *dev)
1197 {

1198 
1199         int i;
1200         struct oce_eq *eq;
1201         struct oce_mq *mq;



1202 

1203         for (i = 0; i < dev->num_vectors; i++) {
1204                 eq = oce_eq_create(dev, EQ_LEN_1024, EQE_SIZE_4, 0);
1205                 if (eq == NULL) {
1206                         goto rings_fail;
1207                 }
1208                 dev->eq[i] = eq;
1209         }
1210         for (i = 0; i < dev->nwqs; i++) {
1211                 if (oce_wq_create(dev->wq[i], dev->eq[0]) != 0)
1212                         goto rings_fail;
1213         }
1214 
1215         for (i = 0; i < dev->nrqs; i++) {
1216                 if (oce_rq_create(dev->rq[i], dev->if_id,
1217                     dev->neqs > 1 ? dev->eq[1 + i] : dev->eq[0]) != 0)



1218                         goto rings_fail;
1219         }
1220         mq = oce_mq_create(dev, dev->eq[0], 64);
1221         if (mq == NULL)






1222                 goto rings_fail;
1223         dev->mq = mq;

1224         return (DDI_SUCCESS);
1225 rings_fail:
1226         oce_delete_queues(dev);




1227         return (DDI_FAILURE);

1228 


















1229 }
1230 




















































1231 void
1232 oce_delete_queues(struct oce_dev *dev)
1233 {
1234         int i;
1235         int neqs = dev->neqs;
1236         if (dev->mq != NULL) {
1237                 oce_mq_del(dev, dev->mq);
1238                 dev->mq = NULL;
1239         }

1240 
1241         for (i = 0; i < dev->nrqs; i++) {
1242                 oce_rq_del(dev, dev->rq[i]);
1243         }



1244         for (i = 0; i < dev->nwqs; i++) {
1245                 oce_wq_del(dev, dev->wq[i]);
1246         }

1247         /* delete as many eqs as the number of vectors */
1248         for (i = 0; i < neqs; i++) {
1249                 oce_eq_del(dev, dev->eq[i]);
1250                 dev->eq[i] = NULL;
1251         }




1252 }
1253 
1254 void
1255 oce_dev_rss_ready(struct oce_dev *dev)
1256 {
1257         uint8_t dev_index = 0;
1258         uint8_t adapter_rss = 0;
1259 
1260         /* Return if rx_rings <= 1 (No RSS) */
1261         if (dev->rx_rings <= 1) {
1262                 oce_log(dev, CE_NOTE, MOD_CONFIG,
1263                     "Rx rings = %d, Not enabling RSS", dev->rx_rings);
1264                 return;
1265         }

1266 
1267         /*
1268          * Count the number of PCI functions enabling RSS on this
1269          * adapter
1270          */
1271         while (dev_index < MAX_DEVS) {
1272                 if ((oce_dev_list[dev_index] != NULL) &&
1273                     (dev->pci_bus == oce_dev_list[dev_index]->pci_bus) &&
1274                     (dev->pci_device == oce_dev_list[dev_index]->pci_device) &&
1275                     (oce_dev_list[dev_index]->rss_enable)) {
1276                         adapter_rss++;
1277                 }
1278                 dev_index++;



1279         }














1280 
1281         /*
1282          * If there are already MAX_RSS_PER_ADAPTER PCI functions using
1283          * RSS on this adapter, reduce the number of rx rings to 1
1284          * (No RSS)
1285          */
1286         if (adapter_rss >= MAX_RSS_PER_ADAPTER) {
1287                 dev->rx_rings = 1;
1288         }
































































1289 }


   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 2009-2012 Emulex. All rights reserved.
  24  * Use is subject to license terms.
  25  */
  26 
  27 
  28 
  29 /*
  30  * Source file containing Queue handling functions
  31  *
  32  */
  33 
  34 #include <oce_impl.h>

  35 
  36 int oce_destroy_q(struct oce_dev  *oce, struct oce_mbx  *mbx, size_t req_size,
  37     enum qtype  qtype, uint32_t mode);
  38 /* MAil box Queue functions */
  39 struct oce_mq *
  40 oce_mq_create_ext_v0(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len);
  41 struct oce_mq *
  42 oce_mq_create_ext_v1(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len);
  43 struct oce_mq *
  44 oce_mq_create(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len);
  45 
  46 /* event queue handling */
  47 int
  48 oce_eq_create(struct oce_dev *dev, struct oce_eq *, uint32_t q_len,
  49         uint32_t item_size, uint32_t eq_delay, uint32_t mode);
  50 
  51 /* completion queue handling */
  52 struct oce_cq *
  53 oce_cq_create(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len,
  54     uint32_t item_size, boolean_t sol_event, boolean_t is_eventable,
  55     boolean_t nodelay, uint32_t ncoalesce, boolean_t armed, uint32_t mode);
  56 
  57 struct oce_cq *
  58 oce_cq_create_v0(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len,
  59         uint32_t item_size, boolean_t sol_event, boolean_t is_eventable,
  60         boolean_t nodelay, uint32_t ncoalesce, boolean_t armed, uint32_t mode);
  61 
  62 struct oce_cq *
  63 oce_cq_create_v2(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len,
  64         uint32_t item_size, boolean_t sol_event, boolean_t is_eventable,
  65         boolean_t nodelay, uint32_t ncoalesce, boolean_t armed, uint32_t mode);
  66 
  67 /* Tx  WQ functions */
  68 int oce_wq_init(struct oce_dev *dev, struct oce_wq *, uint32_t q_len,
  69     int wq_type);
  70 static void oce_wq_fini(struct oce_dev *dev, struct oce_wq *wq);
  71 static int oce_wq_create(struct oce_wq *wq, struct oce_eq *eq, uint32_t mode);
  72 static void oce_wq_del(struct oce_dev *dev, struct oce_wq *wq, uint32_t mode);
  73 /* Rx Queue functions */
  74 static int oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq,
  75     uint32_t mode);
  76 static void oce_rq_del(struct oce_dev *dev, struct oce_rq *rq, uint32_t mode);



  77 
  78 /*
  79  * function to create an event queue
  80  *
  81  * dev - software handle to the device
  82  * eqcfg - pointer to a config structure containg the eq parameters
  83  *
  84  * return pointer to EQ; NULL on failure
  85  */
  86 int oce_eq_create(struct oce_dev *dev, struct oce_eq *eq,
  87         uint32_t q_len, uint32_t item_size, uint32_t eq_delay, uint32_t mode)

  88 {

  89         struct oce_mbx mbx;
  90         struct mbx_create_common_eq *fwcmd;
  91         int ret = 0;
  92 



  93         if (eq == NULL) {
  94                 return (DDI_FAILURE);
  95         }
  96         mutex_enter(&eq->lock);
  97         bzero(&mbx, sizeof (struct oce_mbx));
  98         /* allocate mbx */
  99         fwcmd = (struct mbx_create_common_eq *)&mbx.payload;
 100 
 101         eq->ring = oce_create_ring_buffer(dev, q_len,
 102             item_size, DDI_DMA_CONSISTENT|DDI_DMA_RDWR);
 103 
 104         if (eq->ring == NULL) {
 105                 oce_log(dev, CE_WARN, MOD_CONFIG,
 106                     "EQ ring alloc failed:0x%p", (void *)eq->ring);
 107                 return (DDI_FAILURE);

 108         }
 109 
 110         mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
 111             MBX_SUBSYSTEM_COMMON,
 112             OPCODE_CREATE_COMMON_EQ, MBX_TIMEOUT_SEC,
 113             sizeof (struct mbx_create_common_eq), 0);
 114 
 115         fwcmd->params.req.num_pages = eq->ring->dbuf.num_pages;
 116         oce_page_list(&eq->ring->dbuf, &fwcmd->params.req.pages[0],
 117             eq->ring->dbuf.num_pages);
 118 
 119         /* dw 0 */
 120         fwcmd->params.req.eq_ctx.size = (item_size == 4) ? 0 : 1;
 121         fwcmd->params.req.eq_ctx.valid = 1;
 122         /* dw 1 */
 123         fwcmd->params.req.eq_ctx.armed = 0;
 124         fwcmd->params.req.eq_ctx.pd = 0;
 125         fwcmd->params.req.eq_ctx.count = OCE_LOG2(q_len/256);
 126 
 127         /* dw 2 */

 128         fwcmd->params.req.eq_ctx.nodelay  = 0;
 129         fwcmd->params.req.eq_ctx.phase = 0;
 130         /* todo: calculate multiplier from max min and cur */
 131         fwcmd->params.req.eq_ctx.delay_mult = eq_delay;
 132 
 133         /* fill rest of mbx */
 134         mbx.u0.s.embedded = 1;
 135         mbx.payload_length = sizeof (struct mbx_create_common_eq);
 136         DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
 137 
 138         /* now post the command */
 139         ret = oce_issue_mbox_cmd(dev, &mbx, MBX_TIMEOUT_SEC, mode);
 140 
 141         if (ret != 0) {
 142                 oce_log(dev, CE_WARN, MOD_CONFIG,
 143                     "EQ create failed: 0x%x", ret);
 144                 destroy_ring_buffer(dev, eq->ring);
 145                 return (DDI_FAILURE);

 146         }
 147 
 148         /* interpret the response */
 149         eq->eq_id = LE_16(fwcmd->params.rsp.eq_id);



 150         eq->parent = (void *)dev;
 151         atomic_inc_32(&dev->neqs);
 152         eq->qstate = QCREATED;
 153         mutex_exit(&eq->lock);
 154         oce_log(dev, CE_NOTE, MOD_CONFIG,
 155             "EQ created, eq=0x%p eq_id=0x%x", (void *)eq, eq->eq_id);
 156         /* Save the eq pointer */
 157         return (DDI_SUCCESS);
 158 } /* oce_eq_create */
 159 
 160 /*
 161  * function to delete an event queue
 162  *
 163  * dev - software handle to the device
 164  * eq - handle to the eq to be deleted
 165  *
 166  * return 0=>success, failure otherwise
 167  */
 168 void
 169 oce_eq_del(struct oce_dev *dev, struct oce_eq *eq, uint32_t mode)
 170 {
 171         struct oce_mbx mbx;
 172         struct mbx_destroy_common_eq *fwcmd;
 173 
 174         mutex_enter(&eq->lock);
 175         eq->qstate = QDELETED;
 176         bzero(&mbx, sizeof (struct oce_mbx));
 177 
 178         /* drain the residual events */
 179         oce_drain_eq(eq);
 180 
 181         /* destroy the ring */
 182         destroy_ring_buffer(dev, eq->ring);
 183         eq->ring = NULL;
 184 
 185         /* send a command to delete the EQ */
 186         fwcmd = (struct mbx_destroy_common_eq *)&mbx.payload;
 187         fwcmd->params.req.id = eq->eq_id;
 188         (void) oce_destroy_q(dev, &mbx,
 189             sizeof (struct mbx_destroy_common_eq),
 190             QTYPE_EQ, mode);

 191         atomic_dec_32(&dev->neqs);
 192         mutex_exit(&eq->lock);
 193 }
 194 
 195 /*
 196  * function to create a V0 completion queue
 197  *
 198  * dev - software handle to the device
 199  * eq - optional eq to be associated with to the cq
 200  * cqcfg - configuration for this queue
 201  *
 202  * return pointer to the cq created. NULL on failure
 203  */
 204 struct oce_cq *
 205 oce_cq_create_v0(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len,
 206     uint32_t item_size, boolean_t sol_event, boolean_t is_eventable,
 207     boolean_t nodelay, uint32_t ncoalesce, boolean_t armed, uint32_t mode)
 208 {
 209         struct oce_cq *cq = NULL;
 210         struct oce_mbx mbx;
 211         struct mbx_create_common_cq_v0 *fwcmd;
 212         int ret = 0;
 213 
 214         /* create cq */
 215         cq = kmem_zalloc(sizeof (struct oce_cq), KM_NOSLEEP);
 216         if (cq == NULL) {
 217                 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
 218                     "CQ allocation failed");
 219                 return (NULL);
 220         }
 221 
 222         /* create the ring buffer for this queue */
 223         cq->ring = oce_create_ring_buffer(dev, q_len,
 224             item_size, DDI_DMA_CONSISTENT|DDI_DMA_RDWR);
 225         if (cq->ring == NULL) {
 226                 oce_log(dev, CE_WARN, MOD_CONFIG,
 227                     "CQ ring alloc failed:0x%p",
 228                     (void *)cq->ring);
 229                 kmem_free(cq, sizeof (struct oce_cq));
 230                 return (NULL);
 231         }
 232         /* initialize mailbox */
 233         bzero(&mbx, sizeof (struct oce_mbx));
 234         fwcmd = (struct mbx_create_common_cq_v0 *)&mbx.payload;
 235 
 236         /* fill the command header */
 237         mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
 238             MBX_SUBSYSTEM_COMMON,
 239             OPCODE_CREATE_COMMON_CQ, MBX_TIMEOUT_SEC,
 240             sizeof (struct mbx_create_common_cq_v0), 0);
 241 
 242         /* fill command context */
 243         /* dw0 */
 244         fwcmd->params.req.cq_ctx.eventable = is_eventable;
 245         fwcmd->params.req.cq_ctx.sol_event = sol_event;
 246         fwcmd->params.req.cq_ctx.valid = 1;
 247         fwcmd->params.req.cq_ctx.count = OCE_LOG2(q_len/256);
 248         fwcmd->params.req.cq_ctx.nodelay = nodelay;
 249         fwcmd->params.req.cq_ctx.coalesce_wm = ncoalesce;
 250 
 251         /* dw1 */
 252         fwcmd->params.req.cq_ctx.armed = armed;
 253         fwcmd->params.req.cq_ctx.eq_id = eq->eq_id;
 254         fwcmd->params.req.cq_ctx.pd = 0;


 255 
 256         /* fill the rest of the command */
 257         fwcmd->params.req.num_pages = cq->ring->dbuf.num_pages;
 258         oce_page_list(&cq->ring->dbuf, &fwcmd->params.req.pages[0],
 259             cq->ring->dbuf.num_pages);
 260 
 261         /* fill rest of mbx */
 262         mbx.u0.s.embedded = 1;
 263         mbx.payload_length = sizeof (struct mbx_create_common_cq_v0);
 264         DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
 265 
 266         /* now send the mail box */
 267         ret = oce_issue_mbox_cmd(dev, &mbx, MBX_TIMEOUT_SEC, mode);
 268 
 269         if (ret != 0) {
 270                 oce_log(dev, CE_WARN, MOD_CONFIG,
 271                     "CQ create failed: 0x%x", ret);
 272                 destroy_ring_buffer(dev, cq->ring);
 273                 kmem_free(cq, sizeof (struct oce_cq));
 274                 return (NULL);
 275         }
 276 
 277         cq->parent = dev;
 278         cq->eq = eq; /* eq array index */
 279         cq->cq_cfg.q_len = q_len;
 280 


 281         /* interpret the response */
 282         cq->cq_id = LE_16(fwcmd->params.rsp.cq_id);
 283         dev->cq[cq->cq_id % OCE_MAX_CQ] = cq;
 284         cq->qstate = QCREATED;
 285         atomic_inc_32(&eq->ref_count);
 286         return (cq);
 287 } /* oce_cq_create_v0 */
 288 
 289 /*
 290  * function to create a V2 completion queue
 291  *
 292  * dev - software handle to the device
 293  * eq - optional eq to be associated with to the cq
 294  * cqcfg - configuration for this queue
 295  *
 296  * return pointer to the cq created. NULL on failure
 297  */
 298 struct oce_cq *
 299 oce_cq_create_v2(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len,
 300     uint32_t item_size, boolean_t sol_event, boolean_t is_eventable,
 301     boolean_t nodelay, uint32_t ncoalesce, boolean_t armed, uint32_t mode)
 302 {
 303         struct oce_cq *cq = NULL;
 304         struct oce_mbx mbx;
 305         struct mbx_create_common_cq_v2 *fwcmd;
 306         int ret = 0;
 307 
 308         _NOTE(ARGUNUSED(sol_event));
 309         _NOTE(ARGUNUSED(ncoalesce));
 310         /* create cq */
 311         cq = kmem_zalloc(sizeof (struct oce_cq), KM_NOSLEEP);
 312         if (cq == NULL) {
 313                 oce_log(dev, CE_NOTE, MOD_CONFIG, "%s",
 314                     "CQ allocation failed");
 315                 return (NULL);
 316         }
 317 
 318         /* create the ring buffer for this queue */
 319         cq->ring = oce_create_ring_buffer(dev, q_len,
 320             item_size, DDI_DMA_CONSISTENT | DDI_DMA_RDWR);
 321         if (cq->ring == NULL) {
 322                 oce_log(dev, CE_WARN, MOD_CONFIG,
 323                     "CQ ring alloc failed:0x%p",
 324                     (void *)cq->ring);
 325                 kmem_free(cq, sizeof (struct oce_cq));
 326                 return (NULL);
 327         }
 328         /* initialize mailbox */
 329         bzero(&mbx, sizeof (struct oce_mbx));
 330         fwcmd = (struct mbx_create_common_cq_v2 *)&mbx.payload;
 331 
 332         /* fill the command header */
 333         mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
 334             MBX_SUBSYSTEM_COMMON,
 335             OPCODE_CREATE_COMMON_CQ, MBX_TIMEOUT_SEC,
 336             sizeof (struct mbx_create_common_cq_v2), 2);
 337 
 338         /* fill command context */
 339         /* dw0 */
 340         fwcmd->params.req.cq_ctx.eventable = is_eventable;
 341         fwcmd->params.req.cq_ctx.valid = 1;
 342         fwcmd->params.req.cq_ctx.count = 3;
 343         fwcmd->params.req.cq_ctx.nodelay = nodelay;
 344         fwcmd->params.req.cq_ctx.coalesce_wm = 0;
 345 
 346         /* dw1 */
 347         fwcmd->params.req.cq_ctx.armed = armed;
 348         fwcmd->params.req.cq_ctx.eq_id = eq->eq_id;
 349         fwcmd->params.req.cq_ctx.cqe_count = q_len;
 350 
 351         fwcmd->params.req.page_size = 1;
 352         /* fill the rest of the command */
 353         fwcmd->params.req.num_pages = cq->ring->dbuf.num_pages;
 354         oce_page_list(&cq->ring->dbuf, &fwcmd->params.req.pages[0],
 355             cq->ring->dbuf.num_pages);
 356 
 357         /* fill rest of mbx */
 358         mbx.u0.s.embedded = 1;
 359         mbx.payload_length = sizeof (struct mbx_create_common_cq_v2);
 360         DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
 361 
 362         /* now send the mail box */
 363         ret = oce_issue_mbox_cmd(dev, &mbx, MBX_TIMEOUT_SEC, mode);
 364 
 365         if (ret != 0) {
 366                 oce_log(dev, CE_WARN, MOD_CONFIG,
 367                     "CQ create failed: 0x%x", ret);
 368                 destroy_ring_buffer(dev, cq->ring);
 369                 kmem_free(cq, sizeof (struct oce_cq));
 370                 return (NULL);
 371         }
 372 
 373         cq->parent = dev;
 374         cq->eq = eq; /* eq array index */
 375         cq->cq_cfg.q_len = q_len;
 376 
 377         /* interpret the response */
 378         cq->cq_id = LE_16(fwcmd->params.rsp.cq_id);
 379         dev->cq[cq->cq_id % OCE_MAX_CQ] = cq;
 380         cq->qstate = QCREATED;
 381         atomic_inc_32(&eq->ref_count);
 382         return (cq);
 383 } /* oce_cq_create_v2 */
 384 
 385 /*
 386  * function to create a completion queue
 387  *
 388  * dev - software handle to the device
 389  * eq - optional eq to be associated with to the cq
 390  * cqcfg - configuration for this queue
 391  *
 392  * return pointer to the cq created. NULL on failure
 393  */
 394 struct oce_cq *
 395 oce_cq_create(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len,
 396     uint32_t item_size, boolean_t sol_event, boolean_t is_eventable,
 397     boolean_t nodelay, uint32_t ncoalesce, boolean_t armed, uint32_t mode)
 398 {
 399         struct oce_cq *cq = NULL;
 400         if (LANCER_CHIP(dev))
 401                 cq = oce_cq_create_v2(dev, eq, q_len, item_size, sol_event,
 402                     is_eventable, nodelay, ncoalesce, armed, mode);
 403         else
 404                 cq = oce_cq_create_v0(dev, eq, q_len, item_size, sol_event,
 405                     is_eventable, nodelay, ncoalesce, armed, mode);
 406         return (cq);
 407 }
 408 
 409 /*
 410  * function to delete a completion queue
 411  *
 412  * dev - software handle to the device
 413  * cq - handle to the CQ to delete
 414  *
 415  * return none
 416  */
 417 static void
 418 oce_cq_del(struct oce_dev *dev, struct oce_cq *cq, uint32_t mode)
 419 {
 420         struct oce_mbx mbx;
 421         struct mbx_destroy_common_cq *fwcmd;
 422 
 423         /* destroy the ring */
 424         destroy_ring_buffer(dev, cq->ring);
 425         cq->ring = NULL;
 426 
 427         bzero(&mbx, sizeof (struct oce_mbx));
 428         /* send a command to delete the CQ */
 429         fwcmd = (struct mbx_destroy_common_cq *)&mbx.payload;
 430         fwcmd->params.req.id = cq->cq_id;
 431         (void) oce_destroy_q(dev, &mbx,
 432             sizeof (struct mbx_destroy_common_cq),
 433             QTYPE_CQ, mode);
 434 
 435         /* Reset the handler */
 436         cq->cq_handler = NULL;
 437         cq->qstate = QDELETED;
 438         dev->cq[cq->cq_id % OCE_MAX_CQ] = NULL;
 439         atomic_dec_32(&cq->eq->ref_count);

 440 
 441         /* release the eq */
 442         kmem_free(cq, sizeof (struct oce_cq));
 443 } /* oce_cq_del */
 444 
 445 /*
 446  * function to create an MQ
 447  *
 448  * dev - software handle to the device
 449  * eq - the EQ to associate with the MQ for event notification
 450  * q_len - the number of entries to create in the MQ
 451  *
 452  * return pointer to the created MQ, failure otherwise
 453  */
 454 struct oce_mq *
 455 oce_mq_create(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len)
 456 {
 457         struct oce_mbx mbx;
 458         struct mbx_create_common_mq *fwcmd;
 459         struct oce_mq *mq = NULL;
 460         int ret = 0;
 461         struct oce_cq  *cq;
 462 
 463         /* Create the Completion Q */
 464         cq = oce_cq_create(dev, eq, MCC_CQ_LEN,
 465             sizeof (struct oce_mq_cqe),
 466             B_FALSE, B_TRUE, B_TRUE, 0, B_FALSE, MBX_BOOTSTRAP);
 467         if (cq == NULL) {
 468                 return (NULL);
 469         }
 470 
 471 
 472         /* allocate the mq */
 473         mq = kmem_zalloc(sizeof (struct oce_mq), KM_NOSLEEP);
 474 
 475         if (mq == NULL) {
 476                 goto mq_alloc_fail;
 477         }
 478 
 479         bzero(&mbx, sizeof (struct oce_mbx));
 480         /* allocate mbx */
 481         fwcmd = (struct mbx_create_common_mq *)&mbx.payload;
 482 
 483         /* create the ring buffer for this queue */
 484         mq->ring = oce_create_ring_buffer(dev, q_len,
 485             sizeof (struct oce_mbx), DDI_DMA_CONSISTENT | DDI_DMA_RDWR);
 486         if (mq->ring == NULL) {
 487                 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
 488                     "Legacy MQ ring alloc failed");

 489                 goto mq_ring_alloc;
 490         }
 491 
 492         mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
 493             MBX_SUBSYSTEM_COMMON,
 494             OPCODE_CREATE_COMMON_MQ, MBX_TIMEOUT_SEC,
 495             sizeof (struct mbx_create_common_mq), 0);
 496 
 497         fwcmd->params.req.num_pages = (uint16_t)mq->ring->dbuf.num_pages;
 498         oce_page_list(&mq->ring->dbuf, fwcmd->params.req.pages,
 499             mq->ring->dbuf.num_pages);
 500         fwcmd->params.req.context.u0.s.cq_id = cq->cq_id;
 501         fwcmd->params.req.context.u0.s.ring_size =
 502             OCE_LOG2(q_len) + 1;
 503         fwcmd->params.req.context.u0.s.valid = 1;
 504         fwcmd->params.req.context.u0.s.fid = dev->fn;
 505 
 506         /* fill rest of mbx */
 507         mbx.u0.s.embedded = 1;
 508         mbx.payload_length = sizeof (struct mbx_create_common_mq);
 509         DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
 510 
 511         /* now send the mail box */
 512         ret = oce_issue_mbox_cmd(dev, &mbx, MBX_TIMEOUT_SEC, MBX_BOOTSTRAP);
 513         if (ret != DDI_SUCCESS) {
 514                 oce_log(dev, CE_WARN, MOD_CONFIG,
 515                     "Legacy MQ create failed: 0x%x", ret);
 516                 goto mq_fail;
 517         }
 518 
 519         /* interpret the response */
 520         mq->mq_id = LE_16(fwcmd->params.rsp.mq_id);
 521         mq->cq = cq;
 522         mq->cfg.q_len = (uint8_t)q_len;
 523         mq->cfg.eqd = 0;
 524 
 525         /* fill rest of the mq */
 526         mq->parent = dev;
 527 
 528         /* set the MQCQ handlers */
 529         cq->cq_handler = oce_drain_mq_cq;
 530         cq->cb_arg = (void *)mq;
 531         mutex_init(&mq->lock, NULL, MUTEX_DRIVER,
 532             DDI_INTR_PRI(dev->intr_pri));
 533         oce_log(dev, CE_NOTE, MOD_CONFIG,
 534             "Legacy MQ CREATED SUCCESSFULLY MQID:%d\n",
 535             mq->mq_id);
 536         return (mq);
 537 
 538 mq_fail:
 539         destroy_ring_buffer(dev, mq->ring);
 540 mq_ring_alloc:
 541         kmem_free(mq, sizeof (struct oce_mq));
 542 mq_alloc_fail:
 543         oce_cq_del(dev, cq, MBX_BOOTSTRAP);
 544         return (NULL);
 545 } /* oce_mq_create */
 546 
 547 /*
 548  * function to create an extended V0 MQ
 549  *
 550  * dev - software handle to the device
 551  * eq - the EQ to associate with the MQ for event notification
 552  * q_len - the number of entries to create in the MQ
 553  *
 554  * return pointer to the created MQ, failure otherwise
 555  */
 556 struct oce_mq *
 557 oce_mq_create_ext_v0(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len)
 558 {
 559         struct oce_mbx mbx;
 560         struct mbx_create_common_mq_ext_v0 *fwcmd;
 561         struct oce_mq *mq = NULL;
 562         int ret = 0;
 563         struct oce_cq  *cq;
 564 
 565         /* Create the Completion Q */
 566         cq = oce_cq_create(dev, eq, MCC_CQ_LEN,
 567             sizeof (struct oce_mq_cqe),
 568             B_FALSE, B_TRUE, B_TRUE, 0, B_FALSE, MBX_BOOTSTRAP);
 569         if (cq == NULL) {
 570                 return (NULL);
 571         }
 572 
 573 
 574         /* allocate the mq */
 575         mq = kmem_zalloc(sizeof (struct oce_mq), KM_NOSLEEP);
 576 
 577         if (mq == NULL) {
 578                 goto mq_alloc_fail;
 579         }
 580 
 581         bzero(&mbx, sizeof (struct oce_mbx));
 582         /* allocate mbx */
 583         fwcmd = (struct mbx_create_common_mq_ext_v0 *)&mbx.payload;
 584 
 585         /* create the ring buffer for this queue */
 586         mq->ring = oce_create_ring_buffer(dev, q_len,
 587             sizeof (struct oce_mbx), DDI_DMA_CONSISTENT | DDI_DMA_RDWR);
 588         if (mq->ring == NULL) {
 589                 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
 590                     "MQ EXT ring alloc failed");
 591                 goto mq_ring_alloc;
 592         }
 593 
 594         mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
 595             MBX_SUBSYSTEM_COMMON,
 596             OPCODE_CREATE_COMMON_MQ_EXT, MBX_TIMEOUT_SEC,
 597             sizeof (struct mbx_create_common_mq_ext_v0), 0);
 598 
 599         fwcmd->params.req.num_pages = mq->ring->dbuf.num_pages;
 600         oce_page_list(&mq->ring->dbuf, fwcmd->params.req.pages,
 601             mq->ring->dbuf.num_pages);
 602         fwcmd->params.req.context.u0.s.cq_id = cq->cq_id;
 603         fwcmd->params.req.context.u0.s.ring_size =
 604             OCE_LOG2(q_len) + 1;
 605         fwcmd->params.req.context.u0.s.valid = 1;
 606         fwcmd->params.req.context.u0.s.fid = dev->fn;
 607 
 608         /*  Register to Link State(bit 1)  and Group 5 Events(bit 5) */
 609         fwcmd->params.req.async_event_bitmap[0] =
 610             (1 << ASYNC_EVENT_CODE_LINK_STATE) |
 611             (1 << ASYNC_EVENT_CODE_GRP_5) |
 612             (1 << ASYNC_EVENT_CODE_DEBUG);
 613 
 614         /* fill rest of mbx */
 615         mbx.u0.s.embedded = 1;
 616         mbx.payload_length = sizeof (struct mbx_create_common_mq_ext_v0);
 617         DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
 618 
 619         /* now send the mail box */
 620         ret = oce_issue_mbox_cmd(dev, &mbx, MBX_TIMEOUT_SEC, MBX_BOOTSTRAP);
 621         if (ret != DDI_SUCCESS) {
 622                 oce_log(dev, CE_WARN, MOD_CONFIG,
 623                     "Extended MQ create failed: 0x%x", ret);
 624                 goto mq_fail;
 625         }
 626 
 627         /* interpret the response */
 628         mq->mq_id = LE_16(fwcmd->params.rsp.mq_id);
 629         mq->cq = cq;
 630         mq->cfg.q_len = (uint8_t)q_len;
 631         mq->cfg.eqd = 0;
 632 
 633         /* fill rest of the mq */
 634         mq->parent = dev;
 635         mq->qstate = QCREATED;
 636         mq->mq_free = mq->cfg.q_len;
 637 
 638         /* reset indicies */
 639         mq->ring->cidx = 0;
 640         mq->ring->pidx = 0;
 641 
 642         /* set the MQCQ handlers */
 643         cq->cq_handler = oce_drain_mq_cq;
 644         cq->cb_arg = (void *)mq;
 645         mutex_init(&mq->lock, NULL, MUTEX_DRIVER,
 646             DDI_INTR_PRI(dev->intr_pri));
 647         oce_log(dev, CE_NOTE, MOD_CONFIG,
 648             "Ext MQ CREATED SUCCESSFULLY MQID:%d\n", mq->mq_id);
 649         return (mq);
 650 
 651 mq_fail:
 652         destroy_ring_buffer(dev, mq->ring);
 653 mq_ring_alloc:
 654         kmem_free(mq, sizeof (struct oce_mq));
 655 mq_alloc_fail:
 656         oce_cq_del(dev, cq, MBX_BOOTSTRAP);
 657         return (NULL);
 658 } /* oce_mq_create_ext_v0 */
 659 
 660 /*
 661  * function to create an extended V1 MQ
 662  *
 663  * dev - software handle to the device
 664  * eq - the EQ to associate with the MQ for event notification
 665  * q_len - the number of entries to create in the MQ
 666  *
 667  * return pointer to the created MQ, failure otherwise
 668  */
 669 struct oce_mq *
 670 oce_mq_create_ext_v1(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len)
 671 {
 672         struct oce_mbx mbx;
 673         struct mbx_create_common_mq_ext_v1 *fwcmd;
 674         struct oce_mq *mq = NULL;
 675         int ret = 0;
 676         struct oce_cq  *cq;
 677 
 678         /* Create the Completion Q */
 679         cq = oce_cq_create(dev, eq, MCC_CQ_LEN,
 680             sizeof (struct oce_mq_cqe),
 681             B_FALSE, B_TRUE, B_TRUE, 0, B_FALSE, MBX_BOOTSTRAP);
 682         if (cq == NULL) {
 683                 return (NULL);
 684         }
 685 
 686 
 687         /* allocate the mq */
 688         mq = kmem_zalloc(sizeof (struct oce_mq), KM_NOSLEEP);
 689 
 690         if (mq == NULL) {
 691                 goto mq_alloc_fail;
 692         }
 693 
 694         bzero(&mbx, sizeof (struct oce_mbx));
 695         /* allocate mbx */
 696         fwcmd = (struct mbx_create_common_mq_ext_v1 *)&mbx.payload;
 697 
 698         /* create the ring buffer for this queue */
 699         mq->ring = oce_create_ring_buffer(dev, q_len,
 700             sizeof (struct oce_mbx), DDI_DMA_CONSISTENT | DDI_DMA_RDWR);
 701         if (mq->ring == NULL) {
 702                 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
 703                     "MQ EXT ring alloc failed");
 704                 goto mq_ring_alloc;
 705         }
 706 
 707         mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
 708             MBX_SUBSYSTEM_COMMON,
 709             OPCODE_CREATE_COMMON_MQ_EXT, MBX_TIMEOUT_SEC,
 710             sizeof (struct mbx_create_common_mq_ext_v1), 1);
 711 
 712         fwcmd->params.req.cq_id = cq->cq_id;
 713 
 714         fwcmd->params.req.context.u0.s.ring_size =
 715             OCE_LOG2(q_len) + 1;
 716         fwcmd->params.req.context.u0.s.valid = 1;
 717         fwcmd->params.req.context.u0.s.async_cq_id = cq->cq_id;
 718         fwcmd->params.req.context.u0.s.async_cq_valid = 1;
 719 
 720         fwcmd->params.req.num_pages = mq->ring->dbuf.num_pages;
 721         oce_page_list(&mq->ring->dbuf, fwcmd->params.req.pages,
 722             mq->ring->dbuf.num_pages);
 723 
 724         /*  Register to Link State(bit 1)  and Group 5 Events(bit 5) */
 725         fwcmd->params.req.async_event_bitmap[0] =
 726             (1 << ASYNC_EVENT_CODE_LINK_STATE) |
 727             (1 << ASYNC_EVENT_CODE_GRP_5) |
 728             (1 << ASYNC_EVENT_CODE_DEBUG);
 729 
 730         /* fill rest of mbx */
 731         mbx.u0.s.embedded = 1;
 732         mbx.payload_length = sizeof (struct mbx_create_common_mq_ext_v1);
 733         DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
 734 
 735         /* now send the mail box */
 736         ret = oce_issue_mbox_cmd(dev, &mbx, MBX_TIMEOUT_SEC, MBX_BOOTSTRAP);
 737         if (ret != DDI_SUCCESS) {
 738                 oce_log(dev, CE_WARN, MOD_CONFIG,
 739                     "Extended MQ create failed: 0x%x", ret);
 740                 goto mq_fail;
 741         }
 742 
 743         /* interpret the response */
 744         mq->mq_id = LE_16(fwcmd->params.rsp.mq_id);
 745         mq->cq = cq;
 746         mq->cfg.q_len = (uint8_t)q_len;
 747         mq->cfg.eqd = 0;
 748 
 749         /* fill rest of the mq */
 750         mq->parent = dev;
 751         mq->qstate = QCREATED;
 752         mq->mq_free = mq->cfg.q_len;
 753 
 754         /* reset indicies */
 755         mq->ring->cidx = 0;
 756         mq->ring->pidx = 0;
 757 
 758         /* set the MQCQ handlers */
 759         cq->cq_handler = oce_drain_mq_cq;
 760         cq->cb_arg = (void *)mq;
 761         mutex_init(&mq->lock, NULL, MUTEX_DRIVER,
 762             DDI_INTR_PRI(dev->intr_pri));
 763         oce_log(dev, CE_NOTE, MOD_CONFIG,
 764             "Ext MQ CREATED SUCCESSFULLY MQID:%d\n", mq->mq_id);
 765         return (mq);
 766 
 767 mq_fail:
 768         destroy_ring_buffer(dev, mq->ring);
 769 mq_ring_alloc:
 770         kmem_free(mq, sizeof (struct oce_mq));
 771 mq_alloc_fail:
 772         oce_cq_del(dev, cq, MBX_BOOTSTRAP);
 773         return (NULL);
 774 } /* oce_mq_create_ext_v0 */
 775 
 776 /*
 777  * function to delete an MQ
 778  *
 779  * dev - software handle to the device
 780  * mq - pointer to the MQ to delete
 781  *
 782  * return none
 783  */
 784 static void
 785 oce_mq_del(struct oce_dev *dev, struct oce_mq *mq)
 786 {
 787         struct oce_mbx mbx;
 788         struct mbx_destroy_common_mq *fwcmd;
 789 
 790         /* destroy the ring */
 791         destroy_ring_buffer(dev, mq->ring);
 792         mq->ring = NULL;
 793         bzero(&mbx, sizeof (struct oce_mbx));
 794         fwcmd = (struct mbx_destroy_common_mq *)&mbx.payload;
 795         fwcmd->params.req.id = mq->mq_id;
 796         (void) oce_destroy_q(dev, &mbx,
 797             sizeof (struct mbx_destroy_common_mq),
 798             QTYPE_MQ, MBX_BOOTSTRAP);
 799         oce_cq_del(dev, mq->cq, MBX_BOOTSTRAP);
 800         mq->cq = NULL;
 801         mq->qstate = QDELETED;
 802         mutex_destroy(&mq->lock);
 803         kmem_free(mq, sizeof (struct oce_mq));
 804 } /* oce_mq_del */
 805 
 806 /*
 807  * function to create a WQ for NIC Tx
 808  *
 809  * dev - software handle to the device
 810  * wqcfg - configuration structure providing WQ config parameters
 811  *
 812  * return pointer to the WQ created. NULL on failure
 813  */
 814 int oce_wq_init(struct oce_dev *dev, struct oce_wq *wq, uint32_t q_len,
 815     int wq_type)
 816 {

 817         char str[MAX_POOL_NAME];
 818         int ret;
 819         static int wq_id = 0;
 820         int buf_size;
 821 
 822         ASSERT(dev != NULL);
 823         /* q_len must be min 256 and max 2k */
 824         if (q_len < 256 || q_len > 2048) {
 825                 oce_log(dev, CE_WARN, MOD_CONFIG,
 826                     "Invalid q length. Must be "
 827                     "[256, 2000]: 0x%x", q_len);
 828                 return (DDI_FAILURE);
 829         }
 830 








 831         /* Set the wq config */
 832         wq->cfg.q_len = q_len;
 833         wq->cfg.wq_type = (uint8_t)wq_type;
 834         wq->cfg.eqd = OCE_DEFAULT_WQ_EQD;
 835         wq->cfg.nbufs = 2 * wq->cfg.q_len;
 836         wq->cfg.nhdl = 2 * wq->cfg.q_len;

 837 
 838         buf_size = ((dev->tx_bcopy_limit >> 10) +
 839             ((dev->tx_bcopy_limit & (((uint32_t)1 << 10) - 1)) > 0 ? 1 :
 840             0)) << 10;
 841         wq->cfg.buf_size = (uint16_t)buf_size;
 842 
 843         /* initialize ring statistics */
 844         wq->stat_bytes = wq->stat_pkts = 0;
 845 
 846         /* Create the WQ Buffer pool */
 847         ret  = oce_wqb_cache_create(wq, wq->cfg.buf_size);
 848         if (ret != DDI_SUCCESS) {
 849                 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
 850                     "WQ Buffer Pool create failed ");
 851                 return (DDI_FAILURE);
 852         }
 853 
 854         /* Create a pool of memory handles */
 855         ret = oce_wqm_cache_create(wq);
 856         if (ret != DDI_SUCCESS) {
 857                 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
 858                     "WQ MAP Handles Pool create failed ");
 859                 goto wqm_fail;
 860         }
 861 
 862         (void) snprintf(str, MAX_POOL_NAME, "%s%d%s%d", "oce_wqed_",
 863             dev->dev_id, "_", wq_id++);
 864         wq->wqed_cache = kmem_cache_create(str, sizeof (oce_wqe_desc_t),
 865             0, NULL, NULL, NULL, NULL, NULL, 0);
 866         if (wq->wqed_cache == NULL) {
 867                 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
 868                     "WQ Packet Desc Pool create failed ");
 869                 goto wqed_fail;
 870         }
 871 
 872         /* create the ring buffer */
 873         wq->ring = oce_create_ring_buffer(dev, q_len,
 874             NIC_WQE_SIZE, DDI_DMA_CONSISTENT | DDI_DMA_RDWR);
 875         if (wq->ring == NULL) {
 876                 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
 877                     "Failed to create WQ ring ");
 878                 goto wq_ringfail;
 879         }
 880 
 881         /* Initialize WQ lock */
 882         mutex_init(&wq->tx_lock, NULL, MUTEX_DRIVER,
 883             DDI_INTR_PRI(dev->intr_pri));
 884         /* Initialize WQ lock */
 885         mutex_init(&wq->txc_lock, NULL, MUTEX_DRIVER,
 886             DDI_INTR_PRI(dev->intr_pri));
 887         atomic_inc_32(&dev->nwqs);
 888 
 889         mutex_init(&wq->wqed_list_lock, NULL, MUTEX_DRIVER,
 890             DDI_INTR_PRI(dev->intr_pri));
 891 
 892         list_create(&wq->wqe_desc_list, sizeof (oce_wqe_desc_t),
 893             offsetof(oce_wqe_desc_t, link));
 894         return (DDI_SUCCESS);
 895 
 896 wqcq_fail:
 897         destroy_ring_buffer(dev, wq->ring);
 898 wq_ringfail:
 899         kmem_cache_destroy(wq->wqed_cache);
 900 wqed_fail:
 901         oce_wqm_cache_destroy(wq);
 902 wqm_fail:
 903         oce_wqb_cache_destroy(wq);
 904         return (DDI_FAILURE);


 905 } /* oce_wq_create */
 906 
 907 /*
 908  * function to delete a WQ
 909  *
 910  * dev - software handle to the device
 911  * wq - WQ to delete
 912  *
 913  * return 0 => success, failure otherwise
 914  */
 915 static void
 916 oce_wq_fini(struct oce_dev *dev, struct oce_wq *wq)
 917 {
 918         /* destroy cq */
 919         oce_wqb_cache_destroy(wq);
 920         oce_wqm_cache_destroy(wq);
 921         kmem_cache_destroy(wq->wqed_cache);
 922 
 923         /* Free the packet descriptor list */
 924         list_destroy(&wq->wqe_desc_list);
 925         destroy_ring_buffer(dev, wq->ring);
 926         wq->ring = NULL;
 927         /* Destroy the Mutex */
 928         mutex_destroy(&wq->wqed_list_lock);
 929         mutex_destroy(&wq->tx_lock);
 930         mutex_destroy(&wq->txc_lock);

 931         atomic_dec_32(&dev->nwqs);
 932 } /* oce_wq_del */
 933 
 934 
 935 static int
 936 oce_wq_create(struct oce_wq *wq, struct oce_eq *eq, uint32_t mode)
 937 {
 938 
 939         struct oce_mbx mbx;
 940         struct mbx_create_nic_wq *fwcmd;
 941         struct oce_dev *dev = wq->parent;
 942         struct oce_cq *cq;
 943         int ret;
 944 
 945         /* create the CQ */
 946         cq = oce_cq_create(dev, eq, CQ_LEN_1024,
 947             sizeof (struct oce_nic_tx_cqe),
 948             B_FALSE, B_TRUE, B_FALSE, 3, B_FALSE, mode);
 949         if (cq == NULL) {
 950                 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
 951                     "WQCQ create failed ");
 952                 return (DDI_FAILURE);
 953         }
 954         /* now fill the command */
 955         bzero(&mbx, sizeof (struct oce_mbx));
 956         fwcmd = (struct mbx_create_nic_wq *)&mbx.payload;
 957         if (LANCER_CHIP(dev)) {
 958                 mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
 959                     MBX_SUBSYSTEM_NIC,
 960                     OPCODE_CREATE_NIC_WQ, MBX_TIMEOUT_SEC,
 961                     sizeof (struct mbx_create_nic_wq), 1);
 962                 fwcmd->params.req.ctx.if_id = dev->if_id;
 963         } else {
 964                 mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
 965                     MBX_SUBSYSTEM_NIC,
 966                     OPCODE_CREATE_NIC_WQ, MBX_TIMEOUT_SEC,
 967                     sizeof (struct mbx_create_nic_wq), 0);
 968         }
 969 
 970         fwcmd->params.req.type = (uint8_t)wq->cfg.wq_type;
 971         fwcmd->params.req.num_pages = wq->ring->dbuf.num_pages;
 972         fwcmd->params.req.ulp_num = BE_ULP1_NUM;
 973         oce_log(dev, CE_NOTE, MOD_CONFIG, "NUM_PAGES = 0x%d size = %lu",
 974             (uint32_t)wq->ring->dbuf.num_pages,
 975             wq->ring->dbuf.size);
 976 
 977         /* Context info */
 978         fwcmd->params.req.ctx.wq_size = OCE_LOG2(wq->cfg.q_len) + 1;
 979         fwcmd->params.req.ctx.valid = 1;
 980         fwcmd->params.req.ctx.cofe = 1;
 981         fwcmd->params.req.ctx.no_rem_allowed = 1;
 982         fwcmd->params.req.ctx.cq_id = cq->cq_id;

 983 
 984         oce_page_list(&wq->ring->dbuf, fwcmd->params.req.pages,
 985             wq->ring->dbuf.num_pages);
 986 
 987         /* fill rest of mbx */
 988         mbx.u0.s.embedded = 1;
 989         mbx.payload_length = sizeof (struct mbx_create_nic_wq);
 990         DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
 991 
 992         /* now post the command */
 993         ret = oce_issue_mbox_cmd(dev, &mbx, MBX_TIMEOUT_SEC, mode);
 994         if (ret != DDI_SUCCESS) {
 995                 oce_log(dev, CE_WARN, MOD_CONFIG,
 996                     "WQ create failed: 0x%x", ret);
 997                 oce_cq_del(dev, cq, mode);
 998                 return (ret);
 999         }
1000 
1001         /* interpret the response */
1002         wq->wq_id = LE_16(fwcmd->params.rsp.wq_id);
1003         wq->qstate = QCREATED;
1004         wq->cq = cq;
1005         /* set the WQCQ handlers */
1006         wq->cq->cq_handler = oce_drain_wq_cq;
1007         wq->cq->cb_arg = (void *)wq;
1008 
1009         /* All are free to start with */
1010         wq->wq_free = wq->cfg.q_len;
1011         /* reset indicies */
1012         wq->ring->cidx = 0;
1013         wq->ring->pidx = 0;
1014         oce_log(dev, CE_NOTE, MOD_CONFIG, "WQ CREATED WQID = %d",
1015             wq->wq_id);
1016 
1017         return (0);
1018 }
1019 
1020 /*
1021  * function to delete a WQ
1022  *
1023  * dev - software handle to the device
1024  * wq - WQ to delete
1025  *
1026  * return none
1027  */
1028 static void
1029 oce_wq_del(struct oce_dev *dev, struct oce_wq *wq, uint32_t mode)
1030 {
1031         struct oce_mbx mbx;
1032         struct mbx_delete_nic_wq *fwcmd;
1033 

1034         ASSERT(dev != NULL);
1035         ASSERT(wq != NULL);
1036         if (wq->qstate == QCREATED) {
1037                 bzero(&mbx, sizeof (struct oce_mbx));
1038                 /* now fill the command */
1039                 fwcmd = (struct mbx_delete_nic_wq *)&mbx.payload;
1040                 fwcmd->params.req.wq_id = wq->wq_id;
1041                 (void) oce_destroy_q(dev, &mbx,
1042                     sizeof (struct mbx_delete_nic_wq),
1043                     QTYPE_WQ, mode);
1044                 wq->qstate = QDELETED;
1045                 oce_cq_del(dev, wq->cq, mode);
1046                 wq->cq = NULL;
1047         }
1048 } /* oce_wq_del */
1049 
1050 /*
1051  * function to allocate RQ resources
1052  *
1053  * dev - software handle to the device
1054  * rqcfg - configuration structure providing RQ config parameters
1055  *
1056  * return pointer to the RQ created. NULL on failure
1057  */
1058 int oce_rq_init(struct oce_dev *dev, struct oce_rq *rq, uint32_t q_len,
1059     uint32_t frag_size, uint32_t mtu)


1060 {


1061         int ret;
1062 
1063         /* validate q creation parameters */
1064         if (!OCE_LOG2(frag_size))
1065                 return (NULL);
1066         if ((q_len == 0) || (q_len > 1024))
1067                 return (NULL);
1068 








1069         rq->cfg.q_len = q_len;
1070         rq->cfg.frag_size = frag_size;
1071         rq->cfg.mtu = mtu;
1072         rq->cfg.eqd = 0;
1073         rq->cfg.nbufs = dev->rq_max_bufs;

1074 
1075         /* initialize ring statistics */
1076         rq->stat_bytes = rq->stat_pkts = 0;
1077 
1078         rq->rq_bdesc_array =
1079             kmem_zalloc((sizeof (oce_rq_bdesc_t) * rq->cfg.nbufs), KM_NOSLEEP);
1080         if (rq->rq_bdesc_array == NULL) {
1081                 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
1082                     "RQ bdesc alloc failed");
1083                 return (DDI_FAILURE);
1084         }
1085         /* create the rq buffer descriptor ring */
1086         rq->shadow_ring =
1087             kmem_zalloc((rq->cfg.q_len * sizeof (oce_rq_bdesc_t *)),
1088             KM_NOSLEEP);
1089         if (rq->shadow_ring == NULL) {
1090                 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
1091                     "RQ shadow ring alloc failed ");
1092                 goto rq_shdw_fail;
1093         }
1094 
1095         /* allocate the free list array */
1096         rq->rqb_freelist =
1097             kmem_zalloc(rq->cfg.nbufs * sizeof (oce_rq_bdesc_t *), KM_NOSLEEP);
1098         if (rq->rqb_freelist == NULL) {
1099                 goto rqb_free_list_fail;
1100         }
1101         /* create the buffer pool */
1102         ret  =  oce_rqb_cache_create(rq, rq->cfg.frag_size);

1103         if (ret != DDI_SUCCESS) {
1104                 goto rqb_fail;
1105         }
1106 
1107         /* create the ring buffer */
1108         rq->ring = oce_create_ring_buffer(dev, q_len,
1109             sizeof (struct oce_nic_rqe), DDI_DMA_CONSISTENT | DDI_DMA_RDWR);
1110         if (rq->ring == NULL) {
1111                 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
1112                     "RQ ring create failed ");
1113                 goto rq_ringfail;
1114         }
1115 
1116         /* Initialize the RQ lock */
1117         mutex_init(&rq->rx_lock, NULL, MUTEX_DRIVER,
1118             DDI_INTR_PRI(dev->intr_pri));
1119         /* Initialize the recharge  lock */
1120         mutex_init(&rq->rc_lock, NULL, MUTEX_DRIVER,
1121             DDI_INTR_PRI(dev->intr_pri));
1122         atomic_inc_32(&dev->nrqs);
1123         return (DDI_SUCCESS);
1124 
1125 rq_ringfail:
1126         oce_rqb_cache_destroy(rq);
1127 rqb_fail:
1128         kmem_free(rq->rqb_freelist,
1129             (rq->cfg.nbufs * sizeof (oce_rq_bdesc_t *)));
1130 rqb_free_list_fail:
1131 
1132         kmem_free(rq->shadow_ring,
1133             (rq->cfg.q_len * sizeof (oce_rq_bdesc_t *)));
1134 rq_shdw_fail:
1135         kmem_free(rq->rq_bdesc_array,
1136             (sizeof (oce_rq_bdesc_t) * rq->cfg.nbufs));
1137         return (DDI_FAILURE);


1138 } /* oce_rq_create */
1139 
1140 
1141 /*
1142  * function to delete an RQ
1143  *
1144  * dev - software handle to the device
1145  * rq - RQ to delete
1146  *
1147  * return none
1148  */
1149 void
1150 oce_rq_fini(struct oce_dev *dev, struct oce_rq *rq)
1151 {
1152         /* Destroy buffer cache */
1153         rq->qstate = QFINI;
1154         oce_rqb_cache_destroy(rq);
1155         destroy_ring_buffer(dev, rq->ring);
1156         rq->ring = NULL;
1157         kmem_free(rq->shadow_ring,
1158             sizeof (oce_rq_bdesc_t *) * rq->cfg.q_len);
1159         rq->shadow_ring = NULL;
1160         kmem_free(rq->rq_bdesc_array,
1161             (sizeof (oce_rq_bdesc_t) * rq->cfg.nbufs));
1162         rq->rq_bdesc_array = NULL;
1163         kmem_free(rq->rqb_freelist,
1164             (rq->cfg.nbufs * sizeof (oce_rq_bdesc_t *)));
1165         rq->rqb_freelist = NULL;
1166         mutex_destroy(&rq->rx_lock);
1167         mutex_destroy(&rq->rc_lock);

1168         atomic_dec_32(&dev->nrqs);
1169 } /* oce_rq_del */
1170 
1171 
1172 static int
1173 oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq,
1174     uint32_t mode)
1175 {
1176         struct oce_mbx mbx;
1177         struct mbx_create_nic_rq *fwcmd;
1178         struct oce_dev *dev = rq->parent;
1179         struct oce_cq *cq;
1180         int cq_len;
1181         int ret;
1182 
1183         if (LANCER_CHIP(dev))
1184                 cq_len = CQ_LEN_2048;
1185         else
1186                 cq_len = CQ_LEN_1024;
1187 
1188         cq = oce_cq_create(dev, eq, cq_len, sizeof (struct oce_nic_rx_cqe),
1189             B_FALSE, B_TRUE, B_FALSE, 3, B_FALSE, mode);
1190 
1191         if (cq == NULL) {
1192                 return (DDI_FAILURE);
1193         }
1194 
1195         /* now fill the command */
1196         bzero(&mbx, sizeof (struct oce_mbx));
1197         fwcmd = (struct mbx_create_nic_rq *)&mbx.payload;
1198         mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
1199             MBX_SUBSYSTEM_NIC,
1200             OPCODE_CREATE_NIC_RQ, MBX_TIMEOUT_SEC,
1201             sizeof (struct mbx_create_nic_rq), 0);
1202 
1203         fwcmd->params.req.num_pages = rq->ring->dbuf.num_pages;
1204         fwcmd->params.req.frag_size = OCE_LOG2(rq->cfg.frag_size);
1205         fwcmd->params.req.cq_id = cq->cq_id;
1206         oce_page_list(&rq->ring->dbuf, fwcmd->params.req.pages,
1207             rq->ring->dbuf.num_pages);
1208 
1209         fwcmd->params.req.if_id = if_id;
1210         fwcmd->params.req.max_frame_size = (uint16_t)rq->cfg.mtu;
1211         fwcmd->params.req.is_rss_queue = rq->cfg.is_rss_queue;
1212 
1213         /* fill rest of mbx */
1214         mbx.u0.s.embedded = 1;
1215         mbx.payload_length = sizeof (struct mbx_create_nic_rq);
1216         DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
1217 
1218         /* now post the command */
1219         ret = oce_issue_mbox_cmd(dev, &mbx, MBX_TIMEOUT_SEC, mode);
1220         if (ret != 0) {
1221                 oce_log(dev, CE_WARN, MOD_CONFIG,
1222                     "RQ create failed: 0x%x", ret);
1223                 oce_cq_del(dev, cq, MBX_BOOTSTRAP);
1224                 return (ret);
1225         }
1226 
1227         /* interpret the response */
1228         rq->rq_id = LE_16(fwcmd->params.rsp.u0.s.rq_id);
1229         rq->rss_cpuid = fwcmd->params.rsp.u0.s.rss_cpuid;
1230         rq->cfg.if_id = if_id;
1231         rq->qstate = QCREATED;
1232         rq->cq = cq;
1233 
1234         /* set the Completion Handler */
1235         rq->cq->cq_handler = oce_drain_rq_cq;
1236         rq->cq->cb_arg  = (void *)rq;
1237 
1238         /* reset the indicies */
1239         rq->ring->cidx = 0;
1240         rq->ring->pidx = 0;
1241         rq->buf_avail = 0;
1242         oce_log(dev, CE_NOTE, MOD_CONFIG, "RQ created, RQID : %d, cpu-id = %d",
1243             rq->rq_id, rq->rss_cpuid);
1244         return (0);
1245 
1246 }
1247 
1248 /*
1249  * function to delete an RQ
1250  *
1251  * dev - software handle to the device
1252  * rq - RQ to delete
1253  *
1254  * return none
1255  */
1256 static void
1257 oce_rq_del(struct oce_dev *dev, struct oce_rq *rq, uint32_t mode)
1258 {
1259         struct oce_mbx mbx;
1260         struct mbx_delete_nic_rq *fwcmd;
1261 
1262         ASSERT(dev != NULL);
1263         ASSERT(rq != NULL);
1264 
1265         bzero(&mbx, sizeof (struct oce_mbx));
1266 
1267         mutex_enter(&rq->rx_lock);
1268         /* delete the Queue  */
1269         if (rq->qstate == QCREATED) {
1270                 fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
1271                 fwcmd->params.req.rq_id = rq->rq_id;
1272                 (void) oce_destroy_q(dev, &mbx,
1273                     sizeof (struct mbx_delete_nic_rq), QTYPE_RQ, mode);

1274                 oce_clean_rq(rq);
1275                 /* Delete the associated CQ */
1276                 oce_cq_del(dev, rq->cq, mode);
1277                 rq->cq = NULL;
1278                 /* free up the posted buffers */
1279                 oce_rq_discharge(rq);
1280                 (void) atomic_swap_32(&rq->qstate, QDELETED);
1281         }
1282         mutex_exit(&rq->rx_lock);
1283 } /* oce_rq_del */
1284 
1285 /*
1286  * function to arm an EQ so that it can generate events
1287  *
1288  * dev - software handle to the device
1289  * qid - id of the EQ returned by the fw at the time of creation
1290  * npopped - number of EQEs to arm with
1291  * rearm - rearm bit
1292  * clearint - bit to clear the interrupt condition because of which
1293  *      EQEs are generated
1294  *
1295  * return none
1296  */
1297 void
1298 oce_arm_eq(struct oce_dev *dev, int16_t qid, int npopped,
1299     boolean_t rearm, boolean_t clearint)
1300 {
1301         eq_db_t eq_db = {0};
1302 
1303         eq_db.bits.rearm = rearm;
1304         eq_db.bits.event  = B_TRUE;
1305         eq_db.bits.eq_cq_extid =
1306             (((uint64_t)qid & (uint64_t)DB_EQ_RING_ID_EXT_MASK) <<
1307             (uint64_t)DB_EQ_RING_ID_EXT_MASK_SHIFT);
1308         eq_db.bits.num_popped = npopped;
1309         eq_db.bits.clrint = clearint;
1310         eq_db.bits.qid = qid;
1311         OCE_DB_WRITE32(dev, PD_EQ_DB, eq_db.dw0);
1312 }
1313 
1314 /*
1315  * function to arm a CQ with CQEs
1316  *
1317  * dev - software handle to the device
1318  * qid - the id of the CQ returned by the fw at the time of creation
1319  * npopped - number of CQEs to arm with
1320  * rearm - rearm bit enable/disable
1321  *
1322  * return none
1323  */
1324 void
1325 oce_arm_cq(struct oce_dev *dev, int16_t qid, int npopped,
1326     boolean_t rearm)
1327 {
1328         cq_db_t cq_db = {0};
1329         cq_db.bits.rearm = rearm;
1330         cq_db.bits.eq_cq_extid =
1331             (((uint64_t)qid & (uint64_t)DB_CQ_RING_ID_EXT_MASK) <<
1332             (uint64_t)DB_CQ_RING_ID_EXT_MASK_SHIFT);
1333         cq_db.bits.num_popped = npopped;
1334         cq_db.bits.event = 0;
1335         cq_db.bits.qid = qid;
1336         OCE_DB_WRITE32(dev, PD_CQ_DB, cq_db.dw0);
1337 }
1338 
1339 
1340 /*
1341  * function to delete a EQ, CQ, MQ, WQ or RQ
1342  *
1343  * dev - sofware handle to the device
1344  * mbx - mbox command to send to the fw to delete the queue
1345  *      mbx contains the queue information to delete
1346  * req_size - the size of the mbx payload dependent on the qtype
1347  * qtype - the type of queue i.e. EQ, CQ, MQ, WQ or RQ
1348  *
1349  * return DDI_SUCCESS => success, failure otherwise
1350  */
1351 int
1352 oce_destroy_q(struct oce_dev *dev, struct oce_mbx  *mbx, size_t req_size,
1353     enum qtype qtype, uint32_t mode)
1354 {
1355         struct mbx_hdr *hdr = (struct mbx_hdr *)&mbx->payload;
1356         int opcode;
1357         int subsys;
1358         int ret;
1359 
1360         switch (qtype) {
1361         case QTYPE_EQ: {
1362                 opcode = OPCODE_DESTROY_COMMON_EQ;
1363                 subsys = MBX_SUBSYSTEM_COMMON;
1364                 break;
1365         }
1366         case QTYPE_CQ: {
1367                 opcode = OPCODE_DESTROY_COMMON_CQ;
1368                 subsys = MBX_SUBSYSTEM_COMMON;
1369                 break;
1370         }
1371         case QTYPE_MQ: {
1372                 opcode = OPCODE_DESTROY_COMMON_MQ;
1373                 subsys = MBX_SUBSYSTEM_COMMON;
1374                 break;
1375         }
1376         case QTYPE_WQ: {
1377                 opcode = OPCODE_DELETE_NIC_WQ;
1378                 subsys = MBX_SUBSYSTEM_NIC;
1379                 break;
1380         }
1381         case QTYPE_RQ: {
1382                 opcode = OPCODE_DELETE_NIC_RQ;
1383                 subsys = MBX_SUBSYSTEM_NIC;
1384                 break;
1385         }
1386         default: {
1387                 ASSERT(0);
1388                 break;
1389         }
1390         }
1391 
1392         mbx_common_req_hdr_init(hdr, 0, 0, subsys,
1393             opcode, MBX_TIMEOUT_SEC, req_size, 0);
1394 
1395         /* fill rest of mbx */
1396         mbx->u0.s.embedded = 1;
1397         mbx->payload_length = (uint32_t)req_size;
1398         DW_SWAP(u32ptr(mbx), mbx->payload_length + OCE_BMBX_RHDR_SZ);
1399 
1400         /* send command */
1401         ret = oce_issue_mbox_cmd(dev, mbx, MBX_TIMEOUT_SEC, mode);

1402         if (ret != 0) {
1403                 oce_log(dev, CE_WARN, MOD_CONFIG,
1404                     "Failed to del q: 0x%x", ret);
1405         }
1406 
1407         return (ret);
1408 }
1409 
1410 /*
1411  * function to set the delay parameter in the EQ for interrupt coalescing
1412  *
1413  * dev - software handle to the device
1414  * eq_arr - array of EQ ids to delete
1415  * eq_cnt - number of elements in eq_arr
1416  * eq_delay - delay parameter
1417  *
1418  * return DDI_SUCCESS => success, failure otherwise
1419  */
1420 int
1421 oce_set_eq_delay(struct oce_dev *dev, uint32_t *eq_arr,
1422     uint32_t eq_cnt, uint32_t eq_delay, uint32_t mode)
1423 {
1424         struct oce_mbx mbx;
1425         struct mbx_modify_common_eq_delay *fwcmd;
1426         int ret;
1427         int neq;
1428 
1429         bzero(&mbx, sizeof (struct oce_mbx));
1430         fwcmd = (struct mbx_modify_common_eq_delay *)&mbx.payload;
1431 
1432         /* fill the command */
1433         fwcmd->params.req.num_eq = eq_cnt;
1434         for (neq = 0; neq < eq_cnt; neq++) {
1435                 fwcmd->params.req.delay[neq].eq_id = eq_arr[neq];
1436                 fwcmd->params.req.delay[neq].phase = 0;
1437                 fwcmd->params.req.delay[neq].dm = eq_delay;
1438 
1439         }
1440 
1441         /* initialize the ioctl header */
1442         mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
1443             MBX_SUBSYSTEM_COMMON,
1444             OPCODE_MODIFY_COMMON_EQ_DELAY,
1445             MBX_TIMEOUT_SEC,
1446             sizeof (struct mbx_modify_common_eq_delay), 0);
1447 
1448         /* fill rest of mbx */
1449         mbx.u0.s.embedded = 1;
1450         mbx.payload_length = sizeof (struct mbx_modify_common_eq_delay);
1451         DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
1452 
1453         /* post the command */
1454         ret = oce_issue_mbox_cmd(dev, &mbx, MBX_TIMEOUT_SEC, mode);
1455         if (ret != 0) {
1456                 oce_log(dev, CE_WARN, MOD_CONFIG,
1457                     "Failed to set EQ delay 0x%x", ret);
1458         }
1459 
1460         return (ret);
1461 } /* oce_set_eq_delay */
1462 
1463 /*
1464  * function to cleanup the eqs used during stop
1465  *
1466  * eq - pointer to event queue structure
1467  *
1468  * return none
1469  */
1470 void
1471 oce_drain_eq(struct oce_eq *eq)
1472 {
1473         struct oce_eqe *eqe;
1474         uint16_t num_eqe = 0;
1475         struct oce_dev *dev;
1476 
1477         dev = eq->parent;


1480 
1481         while (eqe->u0.dw0) {
1482                 eqe->u0.dw0 = LE_32(eqe->u0.dw0);
1483 
1484                 /* clear valid bit */
1485                 eqe->u0.dw0 = 0;
1486 
1487                 /* process next eqe */
1488                 RING_GET(eq->ring, 1);
1489 
1490                 eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
1491                 num_eqe++;
1492         } /* for all EQEs */
1493         if (num_eqe) {
1494                 oce_arm_eq(dev, eq->eq_id, num_eqe, B_FALSE, B_TRUE);
1495         }
1496 } /* oce_drain_eq */
1497 
1498 
1499 int
1500 oce_init_tx(struct oce_dev  *dev)
1501 {
1502         int qid = 0;
1503 



1504         for (qid = 0; qid < dev->tx_rings; qid++) {
1505                 if (oce_wq_init(dev, &dev->wq[qid], dev->tx_ring_size,
1506                     NIC_WQ_TYPE_STANDARD) != DDI_SUCCESS) {

1507                         goto queue_fail;
1508                 }
1509         }
1510 

















1511         return (DDI_SUCCESS);
1512 queue_fail:
1513         oce_fini_tx(dev);
1514         return (DDI_FAILURE);
1515 }
1516 
1517 
1518 void
1519 oce_fini_tx(struct oce_dev *dev)
1520 {
1521         int qid;
1522         int nqs;
1523 
1524         /* free all the tx rings */
1525         /* nwqs is decremented in fini so copy count first */
1526         nqs = dev->nwqs;
1527         for (qid = 0; qid < nqs; qid++) {
1528                 oce_wq_fini(dev, &dev->wq[qid]);


1529         }









1530 }
1531 
1532 
1533 int
1534 oce_create_queues(struct oce_dev *dev)
1535 {
1536         int i, num_if;
1537 
1538         for (num_if = 0; num_if < dev->num_rx_groups; num_if++) {
1539                 if (oce_create_nw_interface(dev, &dev->rx_group[num_if],
1540                     MBX_BOOTSTRAP) != DDI_SUCCESS) {
1541                         goto if_fail;
1542                 }
1543         }
1544 
1545         /* create resources that are common to an oce instance */
1546         for (i = 0; i < dev->num_vectors; i++) {
1547                 if (oce_eq_create(dev, &dev->eq[i], EQ_LEN_1024, EQE_SIZE_4, 0,
1548                     MBX_BOOTSTRAP) != DDI_SUCCESS) {
1549                         goto rings_fail;
1550                 }

1551         }




1552 
1553         /* create tx rings */
1554         if (dev->num_tx_groups == 1) {
1555                 for (i = 0; i < dev->tx_rings; i++) {
1556                         if (oce_wq_create(&dev->wq[i], &dev->eq[i],
1557                             MBX_BOOTSTRAP) != 0) {
1558                                 dev->tx_rings = i;
1559                                 goto rings_fail;
1560                         }
1561                         oce_log(dev, CE_NOTE, MOD_CONFIG,
1562                             "wq[%d] created on eq[%d]=%p wq=%p",
1563                             i, i, (void *)&dev->eq[i], (void *)&dev->wq[i]);
1564                 }
1565         } else {
1566                 /* Tx groups not supported */
1567                 oce_log(dev, CE_WARN, MOD_CONFIG,
1568                     "unsupported number of tx groups %d", dev->num_tx_groups);
1569                 goto rings_fail;
1570         }
1571 
1572         return (DDI_SUCCESS);
1573 rings_fail:
1574         oce_delete_queues(dev);
1575 if_fail:
1576         for (i = 0; i < num_if; i++) {
1577                 oce_delete_nw_interface(dev, &dev->rx_group[i], MBX_BOOTSTRAP);
1578         }
1579         return (DDI_FAILURE);
1580 }
1581 
1582 int
1583 oce_create_mcc_queue(struct oce_dev *dev)
1584 {
1585         if (LANCER_CHIP(dev)) {
1586                 dev->mq = oce_mq_create_ext_v1(dev, &dev->eq[0], MCC_Q_LEN);
1587         } else {
1588                 dev->mq = oce_mq_create_ext_v0(dev, &dev->eq[0], MCC_Q_LEN);
1589         }
1590 
1591         if (dev->mq == NULL) {
1592                 oce_log(dev, CE_NOTE, MOD_CONFIG, "%s",
1593                     "Extended MQ is not supported reverting to Legacy MQ mode");
1594                 dev->mq = oce_mq_create(dev, &dev->eq[0], MCC_Q_LEN);
1595                 if (dev->mq == NULL)
1596                         return (DDI_FAILURE);
1597         }
1598 
1599         return (DDI_SUCCESS);
1600 }
1601 
1602 int
1603 oce_create_group(struct oce_dev *dev, oce_group_t *grp, uint32_t mode)
1604 {
1605         int eqidx, ret, i;
1606         char itbl[OCE_ITBL_SIZE];
1607         char hkey[OCE_HKEY_SIZE];
1608 
1609         for (i = 0; i < grp->num_rings; i++) {
1610                 if (i != 0) {
1611                         grp->ring[i].rx->cfg.is_rss_queue =
1612                             grp->rss_enable;
1613                         eqidx = (grp->eq_idx + i - grp->rss_enable) %
1614                             dev->num_vectors;
1615                 } else {
1616                         grp->ring[i].rx->cfg.is_rss_queue = B_FALSE;
1617                         eqidx = grp->eq_idx % dev->num_vectors;
1618                 }
1619 
1620                 ret = oce_rq_create(grp->ring[i].rx,
1621                     grp->if_id, &dev->eq[eqidx], mode);
1622 
1623                 if (ret != 0) {
1624                         goto cleanup_group;
1625                 }
1626                 oce_log(dev, CE_NOTE, MOD_CONFIG,
1627                     "rq[%d][%d] created on eq[%d]=%p rq=%p, rss=%d",
1628                     grp->grp_num, i, eqidx,
1629                     (void *)&dev->eq[eqidx],
1630                     (void *)grp->ring[i].rx,
1631                     grp->ring[i].rx->cfg.is_rss_queue);
1632         }
1633 
1634         if (grp->rss_enable) {
1635                 (void) oce_group_create_itbl(grp, itbl);
1636 
1637                 (void) oce_gen_hkey(hkey, OCE_HKEY_SIZE);
1638                 ret = oce_config_rss(dev, grp->if_id, hkey,
1639                     itbl, OCE_ITBL_SIZE, OCE_DEFAULT_RSS_TYPE, B_FALSE,
1640                     mode);
1641                 if (ret != DDI_SUCCESS) {
1642                         oce_log(dev, CE_WARN, MOD_CONFIG,
1643                             "Failed to Configure RSS 0x%x", ret);
1644                         goto cleanup_group;
1645                 }
1646         }
1647 
1648         return (DDI_SUCCESS);
1649 cleanup_group:
1650         oce_delete_group(dev, grp);
1651         return (DDI_FAILURE);
1652 }
1653 
1654 void
1655 oce_delete_mcc_queue(struct oce_dev *dev)
1656 {


1657         if (dev->mq != NULL) {
1658                 oce_mq_del(dev, dev->mq);
1659                 dev->mq = NULL;
1660         }
1661 }
1662 
1663 void
1664 oce_delete_queues(struct oce_dev *dev)
1665 {
1666         int i;
1667         int neqs = dev->neqs;
1668 
1669         for (i = 0; i < dev->nwqs; i++) {
1670                 oce_wq_del(dev, &dev->wq[i], MBX_BOOTSTRAP);
1671         }
1672 
1673         /* delete as many eqs as the number of vectors */
1674         for (i = 0; i < neqs; i++) {
1675                 oce_eq_del(dev, &dev->eq[i], MBX_BOOTSTRAP);

1676         }
1677 
1678         for (i = dev->num_rx_groups - 1; i >= 0; i--) {
1679                 oce_delete_nw_interface(dev, &dev->rx_group[i], MBX_BOOTSTRAP);
1680         }
1681 }
1682 
1683 void
1684 oce_delete_group(struct oce_dev *dev, oce_group_t *grp)
1685 {
1686         int i;

1687 
1688         for (i = 0; i < grp->num_rings; i++) {
1689                 oce_rq_del(dev, grp->ring[i].rx, MBX_BOOTSTRAP);



1690         }
1691 }
1692 
1693 
1694 void
1695 oce_free_queues(struct oce_dev *dev)
1696 {
1697         int i = 0;
1698 
1699         for (i = 0; i < dev->rx_rings; i++) {
1700                 mutex_destroy(&dev->rq[i].rq_fini_lock);


1701         }
1702         if (dev->rq != NULL) {
1703                 kmem_free(dev->rq,
1704                     sizeof (struct oce_rq) * dev->rx_rings);
1705                 dev->rq = NULL;
1706         }
1707         if (dev->wq != NULL) {
1708                 kmem_free(dev->wq,
1709                     sizeof (struct oce_wq) * dev->tx_rings);
1710                 dev->wq = NULL;
1711         }
1712         if (dev->cq != NULL) {
1713                 kmem_free(dev->cq,
1714                     sizeof (struct oce_cq *) * OCE_MAX_CQ);
1715                 dev->cq = NULL;
1716         }
1717         if (dev->eq != NULL) {
1718                 for (i = 0; i < OCE_MAX_EQ; i++) {
1719                         mutex_destroy(&dev->eq[i].lock);
1720                 }
1721 
1722                 kmem_free(dev->eq,
1723                     sizeof (struct oce_eq) * OCE_MAX_EQ);
1724                 dev->eq = NULL;




1725         }
1726 }
1727 
1728 int
1729 oce_alloc_queues(struct oce_dev *dev)
1730 {
1731         int i, j, nrings = 0;
1732 
1733         /* Allocate space for RQ array */
1734         dev->rq = kmem_zalloc(sizeof (struct oce_rq) * dev->rx_rings,
1735             KM_NOSLEEP);
1736 
1737         if (dev->rq == NULL) {
1738                 return (DDI_FAILURE);
1739         }
1740         for (i = 0; i < dev->rx_rings; i++) {
1741                 mutex_init(&dev->rq[i].rq_fini_lock, NULL, MUTEX_DRIVER,
1742                     DDI_INTR_PRI(dev->intr_pri));
1743         }
1744 
1745         /* Allocate space for WQ array */
1746         dev->wq = kmem_zalloc(sizeof (struct oce_wq) * dev->tx_rings,
1747             KM_NOSLEEP);
1748 
1749         if (dev->wq == NULL) {
1750                 goto alloc_fail;
1751         }
1752 
1753         dev->cq = kmem_zalloc(sizeof (struct oce_cq *) * OCE_MAX_CQ,
1754             KM_NOSLEEP);
1755 
1756         if (dev->cq == NULL) {
1757                 goto alloc_fail;
1758         }
1759 
1760         dev->eq = kmem_zalloc(sizeof (struct oce_eq) * OCE_MAX_EQ,
1761             KM_NOSLEEP);
1762         if (dev->eq == NULL) {
1763                 goto alloc_fail;
1764         }
1765 
1766         for (i = 0; i < OCE_MAX_EQ; i++) {
1767                 dev->eq[i].idx = i;
1768                 mutex_init(&dev->eq[i].lock, NULL, MUTEX_DRIVER,
1769                     DDI_INTR_PRI(dev->intr_pri));
1770         }
1771 
1772         for (i = 0; i < dev->tx_rings; i++) {
1773                 dev->wq[i].parent = (void *)dev;
1774                 dev->default_tx_rings[i].tx = &dev->wq[i];
1775         }
1776 
1777         for (i = 0; i < dev->num_rx_groups; i++) {
1778                 for (j = 0; j < dev->rx_group[i].num_rings; j++) {
1779                         dev->rq[nrings].parent = (void *)dev;
1780                         dev->rx_group[i].ring[j].rx = &dev->rq[nrings];
1781                         dev->rx_group[i].ring[j].rx->grp = &dev->rx_group[i];
1782                         nrings++;
1783                 }
1784         }
1785 
1786         return (DDI_SUCCESS);
1787 alloc_fail:
1788         oce_free_queues(dev);
1789         return (DDI_FAILURE);
1790 }