Print this page
NEX-1890 update oce from source provided by Emulex

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/io/fibre-channel/fca/oce/oce_queue.c
          +++ new/usr/src/uts/common/io/fibre-channel/fca/oce/oce_queue.c
↓ open down ↓ 11 lines elided ↑ open up ↑
  12   12   *
  13   13   * When distributing Covered Code, include this CDDL HEADER in each
  14   14   * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15   15   * If applicable, add the following below this CDDL HEADER, with the
  16   16   * fields enclosed by brackets "[]" replaced with your own identifying
  17   17   * information: Portions Copyright [yyyy] [name of copyright owner]
  18   18   *
  19   19   * CDDL HEADER END
  20   20   */
  21   21  
  22      -/* Copyright © 2003-2011 Emulex. All rights reserved.  */
       22 +/*
       23 + * Copyright (c) 2009-2012 Emulex. All rights reserved.
       24 + * Use is subject to license terms.
       25 + */
  23   26  
       27 +
       28 +
  24   29  /*
  25   30   * Source file containing Queue handling functions
  26   31   *
  27   32   */
  28   33  
  29   34  #include <oce_impl.h>
  30      -extern struct oce_dev *oce_dev_list[];
  31   35  
  32   36  int oce_destroy_q(struct oce_dev  *oce, struct oce_mbx  *mbx, size_t req_size,
  33      -    enum qtype  qtype);
       37 +    enum qtype  qtype, uint32_t mode);
  34   38  /* MAil box Queue functions */
  35   39  struct oce_mq *
       40 +oce_mq_create_ext_v0(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len);
       41 +struct oce_mq *
       42 +oce_mq_create_ext_v1(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len);
       43 +struct oce_mq *
  36   44  oce_mq_create(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len);
  37   45  
  38   46  /* event queue handling */
  39      -struct oce_eq *
  40      -oce_eq_create(struct oce_dev *dev, uint32_t q_len, uint32_t item_size,
  41      -    uint32_t eq_delay);
       47 +int
       48 +oce_eq_create(struct oce_dev *dev, struct oce_eq *, uint32_t q_len,
       49 +        uint32_t item_size, uint32_t eq_delay, uint32_t mode);
  42   50  
  43   51  /* completion queue handling */
  44   52  struct oce_cq *
  45   53  oce_cq_create(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len,
  46   54      uint32_t item_size, boolean_t sol_event, boolean_t is_eventable,
  47      -    boolean_t nodelay, uint32_t ncoalesce);
       55 +    boolean_t nodelay, uint32_t ncoalesce, boolean_t armed, uint32_t mode);
  48   56  
       57 +struct oce_cq *
       58 +oce_cq_create_v0(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len,
       59 +        uint32_t item_size, boolean_t sol_event, boolean_t is_eventable,
       60 +        boolean_t nodelay, uint32_t ncoalesce, boolean_t armed, uint32_t mode);
  49   61  
       62 +struct oce_cq *
       63 +oce_cq_create_v2(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len,
       64 +        uint32_t item_size, boolean_t sol_event, boolean_t is_eventable,
       65 +        boolean_t nodelay, uint32_t ncoalesce, boolean_t armed, uint32_t mode);
       66 +
  50   67  /* Tx  WQ functions */
  51      -static struct oce_wq *oce_wq_init(struct oce_dev *dev,  uint32_t q_len,
       68 +int oce_wq_init(struct oce_dev *dev, struct oce_wq *, uint32_t q_len,
  52   69      int wq_type);
  53   70  static void oce_wq_fini(struct oce_dev *dev, struct oce_wq *wq);
  54      -static int oce_wq_create(struct oce_wq *wq, struct oce_eq *eq);
  55      -static void oce_wq_del(struct oce_dev *dev, struct oce_wq *wq);
       71 +static int oce_wq_create(struct oce_wq *wq, struct oce_eq *eq, uint32_t mode);
       72 +static void oce_wq_del(struct oce_dev *dev, struct oce_wq *wq, uint32_t mode);
  56   73  /* Rx Queue functions */
  57      -static struct oce_rq *oce_rq_init(struct oce_dev *dev, uint32_t q_len,
  58      -    uint32_t frag_size, uint32_t mtu,
  59      -    boolean_t rss);
  60      -static void oce_rq_fini(struct oce_dev *dev, struct oce_rq *rq);
  61      -static int oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq);
  62      -static void oce_rq_del(struct oce_dev *dev, struct oce_rq *rq);
       74 +static int oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq,
       75 +    uint32_t mode);
       76 +static void oce_rq_del(struct oce_dev *dev, struct oce_rq *rq, uint32_t mode);
  63   77  
  64   78  /*
  65   79   * function to create an event queue
  66   80   *
  67   81   * dev - software handle to the device
  68   82   * eqcfg - pointer to a config structure containg the eq parameters
  69   83   *
  70   84   * return pointer to EQ; NULL on failure
  71   85   */
  72      -struct oce_eq *
  73      -oce_eq_create(struct oce_dev *dev, uint32_t q_len, uint32_t item_size,
  74      -    uint32_t eq_delay)
       86 +int oce_eq_create(struct oce_dev *dev, struct oce_eq *eq,
       87 +        uint32_t q_len, uint32_t item_size, uint32_t eq_delay, uint32_t mode)
  75   88  {
  76      -        struct oce_eq *eq;
  77   89          struct oce_mbx mbx;
  78   90          struct mbx_create_common_eq *fwcmd;
  79   91          int ret = 0;
  80   92  
  81      -        /* allocate an eq */
  82      -        eq = kmem_zalloc(sizeof (struct oce_eq), KM_NOSLEEP);
  83      -
  84   93          if (eq == NULL) {
  85      -                return (NULL);
       94 +                return (DDI_FAILURE);
  86   95          }
  87      -
       96 +        mutex_enter(&eq->lock);
  88   97          bzero(&mbx, sizeof (struct oce_mbx));
  89   98          /* allocate mbx */
  90   99          fwcmd = (struct mbx_create_common_eq *)&mbx.payload;
  91  100  
  92      -        eq->ring = create_ring_buffer(dev, q_len,
  93      -            item_size, DDI_DMA_CONSISTENT);
      101 +        eq->ring = oce_create_ring_buffer(dev, q_len,
      102 +            item_size, DDI_DMA_CONSISTENT|DDI_DMA_RDWR);
  94  103  
  95  104          if (eq->ring == NULL) {
  96  105                  oce_log(dev, CE_WARN, MOD_CONFIG,
  97  106                      "EQ ring alloc failed:0x%p", (void *)eq->ring);
  98      -                kmem_free(eq, sizeof (struct oce_eq));
  99      -                return (NULL);
      107 +                return (DDI_FAILURE);
 100  108          }
 101  109  
 102  110          mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
 103  111              MBX_SUBSYSTEM_COMMON,
 104  112              OPCODE_CREATE_COMMON_EQ, MBX_TIMEOUT_SEC,
 105      -            sizeof (struct mbx_create_common_eq));
      113 +            sizeof (struct mbx_create_common_eq), 0);
 106  114  
 107      -        fwcmd->params.req.num_pages = eq->ring->dbuf->num_pages;
 108      -        oce_page_list(eq->ring->dbuf, &fwcmd->params.req.pages[0],
 109      -            eq->ring->dbuf->num_pages);
      115 +        fwcmd->params.req.num_pages = eq->ring->dbuf.num_pages;
      116 +        oce_page_list(&eq->ring->dbuf, &fwcmd->params.req.pages[0],
      117 +            eq->ring->dbuf.num_pages);
 110  118  
 111  119          /* dw 0 */
 112  120          fwcmd->params.req.eq_ctx.size = (item_size == 4) ? 0 : 1;
 113  121          fwcmd->params.req.eq_ctx.valid = 1;
 114  122          /* dw 1 */
 115  123          fwcmd->params.req.eq_ctx.armed = 0;
 116  124          fwcmd->params.req.eq_ctx.pd = 0;
 117  125          fwcmd->params.req.eq_ctx.count = OCE_LOG2(q_len/256);
 118  126  
 119  127          /* dw 2 */
 120      -        fwcmd->params.req.eq_ctx.function = dev->fn;
 121  128          fwcmd->params.req.eq_ctx.nodelay  = 0;
 122  129          fwcmd->params.req.eq_ctx.phase = 0;
 123  130          /* todo: calculate multiplier from max min and cur */
 124  131          fwcmd->params.req.eq_ctx.delay_mult = eq_delay;
 125  132  
 126  133          /* fill rest of mbx */
 127  134          mbx.u0.s.embedded = 1;
 128  135          mbx.payload_length = sizeof (struct mbx_create_common_eq);
 129  136          DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
 130  137  
 131  138          /* now post the command */
 132      -        ret = oce_mbox_post(dev, &mbx, NULL);
      139 +        ret = oce_issue_mbox_cmd(dev, &mbx, MBX_TIMEOUT_SEC, mode);
 133  140  
 134  141          if (ret != 0) {
 135      -                oce_log(dev, CE_WARN, MOD_CONFIG, "EQ create failed: %d", ret);
      142 +                oce_log(dev, CE_WARN, MOD_CONFIG,
      143 +                    "EQ create failed: 0x%x", ret);
 136  144                  destroy_ring_buffer(dev, eq->ring);
 137      -                kmem_free(eq, sizeof (struct oce_eq));
 138      -                return (NULL);
      145 +                return (DDI_FAILURE);
 139  146          }
 140  147  
 141  148          /* interpret the response */
 142  149          eq->eq_id = LE_16(fwcmd->params.rsp.eq_id);
 143      -        eq->eq_cfg.q_len = q_len;
 144      -        eq->eq_cfg.item_size = item_size;
 145      -        eq->eq_cfg.cur_eqd = (uint8_t)eq_delay;
 146  150          eq->parent = (void *)dev;
 147  151          atomic_inc_32(&dev->neqs);
      152 +        eq->qstate = QCREATED;
      153 +        mutex_exit(&eq->lock);
 148  154          oce_log(dev, CE_NOTE, MOD_CONFIG,
 149  155              "EQ created, eq=0x%p eq_id=0x%x", (void *)eq, eq->eq_id);
 150  156          /* Save the eq pointer */
 151      -        return (eq);
      157 +        return (DDI_SUCCESS);
 152  158  } /* oce_eq_create */
 153  159  
 154  160  /*
 155  161   * function to delete an event queue
 156  162   *
 157  163   * dev - software handle to the device
 158  164   * eq - handle to the eq to be deleted
 159  165   *
 160  166   * return 0=>success, failure otherwise
 161  167   */
 162  168  void
 163      -oce_eq_del(struct oce_dev *dev, struct oce_eq *eq)
      169 +oce_eq_del(struct oce_dev *dev, struct oce_eq *eq, uint32_t mode)
 164  170  {
 165  171          struct oce_mbx mbx;
 166  172          struct mbx_destroy_common_eq *fwcmd;
 167  173  
      174 +        mutex_enter(&eq->lock);
      175 +        eq->qstate = QDELETED;
      176 +        bzero(&mbx, sizeof (struct oce_mbx));
      177 +
 168  178          /* drain the residual events */
 169  179          oce_drain_eq(eq);
 170  180  
 171  181          /* destroy the ring */
 172  182          destroy_ring_buffer(dev, eq->ring);
 173  183          eq->ring = NULL;
 174  184  
 175  185          /* send a command to delete the EQ */
 176  186          fwcmd = (struct mbx_destroy_common_eq *)&mbx.payload;
 177  187          fwcmd->params.req.id = eq->eq_id;
 178  188          (void) oce_destroy_q(dev, &mbx,
 179  189              sizeof (struct mbx_destroy_common_eq),
 180      -            QTYPE_EQ);
 181      -        kmem_free(eq, sizeof (struct oce_eq));
      190 +            QTYPE_EQ, mode);
 182  191          atomic_dec_32(&dev->neqs);
      192 +        mutex_exit(&eq->lock);
 183  193  }
 184  194  
 185  195  /*
 186      - * function to create a completion queue
      196 + * function to create a V0 completion queue
 187  197   *
 188  198   * dev - software handle to the device
 189  199   * eq - optional eq to be associated with to the cq
 190  200   * cqcfg - configuration for this queue
 191  201   *
 192  202   * return pointer to the cq created. NULL on failure
 193  203   */
 194  204  struct oce_cq *
 195      -oce_cq_create(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len,
      205 +oce_cq_create_v0(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len,
 196  206      uint32_t item_size, boolean_t sol_event, boolean_t is_eventable,
 197      -    boolean_t nodelay, uint32_t ncoalesce)
      207 +    boolean_t nodelay, uint32_t ncoalesce, boolean_t armed, uint32_t mode)
 198  208  {
 199  209          struct oce_cq *cq = NULL;
 200  210          struct oce_mbx mbx;
 201      -        struct mbx_create_common_cq *fwcmd;
      211 +        struct mbx_create_common_cq_v0 *fwcmd;
 202  212          int ret = 0;
 203  213  
 204  214          /* create cq */
 205  215          cq = kmem_zalloc(sizeof (struct oce_cq), KM_NOSLEEP);
 206  216          if (cq == NULL) {
 207      -                oce_log(dev, CE_NOTE, MOD_CONFIG, "%s",
      217 +                oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
 208  218                      "CQ allocation failed");
 209  219                  return (NULL);
 210  220          }
 211  221  
 212  222          /* create the ring buffer for this queue */
 213      -        cq->ring = create_ring_buffer(dev, q_len,
 214      -            item_size, DDI_DMA_CONSISTENT);
      223 +        cq->ring = oce_create_ring_buffer(dev, q_len,
      224 +            item_size, DDI_DMA_CONSISTENT|DDI_DMA_RDWR);
 215  225          if (cq->ring == NULL) {
 216  226                  oce_log(dev, CE_WARN, MOD_CONFIG,
 217  227                      "CQ ring alloc failed:0x%p",
 218  228                      (void *)cq->ring);
 219  229                  kmem_free(cq, sizeof (struct oce_cq));
 220  230                  return (NULL);
 221  231          }
 222  232          /* initialize mailbox */
 223  233          bzero(&mbx, sizeof (struct oce_mbx));
 224      -        fwcmd = (struct mbx_create_common_cq *)&mbx.payload;
      234 +        fwcmd = (struct mbx_create_common_cq_v0 *)&mbx.payload;
 225  235  
 226  236          /* fill the command header */
 227  237          mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
 228  238              MBX_SUBSYSTEM_COMMON,
 229  239              OPCODE_CREATE_COMMON_CQ, MBX_TIMEOUT_SEC,
 230      -            sizeof (struct mbx_create_common_cq));
      240 +            sizeof (struct mbx_create_common_cq_v0), 0);
 231  241  
 232  242          /* fill command context */
 233  243          /* dw0 */
 234  244          fwcmd->params.req.cq_ctx.eventable = is_eventable;
 235  245          fwcmd->params.req.cq_ctx.sol_event = sol_event;
 236  246          fwcmd->params.req.cq_ctx.valid = 1;
 237  247          fwcmd->params.req.cq_ctx.count = OCE_LOG2(q_len/256);
 238  248          fwcmd->params.req.cq_ctx.nodelay = nodelay;
 239  249          fwcmd->params.req.cq_ctx.coalesce_wm = ncoalesce;
 240  250  
 241  251          /* dw1 */
 242      -        fwcmd->params.req.cq_ctx.armed = B_FALSE;
      252 +        fwcmd->params.req.cq_ctx.armed = armed;
 243  253          fwcmd->params.req.cq_ctx.eq_id = eq->eq_id;
 244  254          fwcmd->params.req.cq_ctx.pd = 0;
 245      -        /* dw2 */
 246      -        fwcmd->params.req.cq_ctx.function = dev->fn;
 247  255  
 248  256          /* fill the rest of the command */
 249      -        fwcmd->params.req.num_pages = cq->ring->dbuf->num_pages;
 250      -        oce_page_list(cq->ring->dbuf, &fwcmd->params.req.pages[0],
 251      -            cq->ring->dbuf->num_pages);
      257 +        fwcmd->params.req.num_pages = cq->ring->dbuf.num_pages;
      258 +        oce_page_list(&cq->ring->dbuf, &fwcmd->params.req.pages[0],
      259 +            cq->ring->dbuf.num_pages);
 252  260  
 253  261          /* fill rest of mbx */
 254  262          mbx.u0.s.embedded = 1;
 255      -        mbx.payload_length = sizeof (struct mbx_create_common_cq);
      263 +        mbx.payload_length = sizeof (struct mbx_create_common_cq_v0);
 256  264          DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
 257  265  
 258  266          /* now send the mail box */
 259      -        ret = oce_mbox_post(dev, &mbx, NULL);
      267 +        ret = oce_issue_mbox_cmd(dev, &mbx, MBX_TIMEOUT_SEC, mode);
 260  268  
 261  269          if (ret != 0) {
 262  270                  oce_log(dev, CE_WARN, MOD_CONFIG,
 263  271                      "CQ create failed: 0x%x", ret);
 264  272                  destroy_ring_buffer(dev, cq->ring);
 265  273                  kmem_free(cq, sizeof (struct oce_cq));
 266  274                  return (NULL);
 267  275          }
 268  276  
 269  277          cq->parent = dev;
 270  278          cq->eq = eq; /* eq array index */
 271  279          cq->cq_cfg.q_len = q_len;
 272      -        cq->cq_cfg.item_size = item_size;
 273      -        cq->cq_cfg.sol_eventable = (uint8_t)sol_event;
 274      -        cq->cq_cfg.nodelay = (uint8_t)nodelay;
      280 +
 275  281          /* interpret the response */
 276  282          cq->cq_id = LE_16(fwcmd->params.rsp.cq_id);
 277  283          dev->cq[cq->cq_id % OCE_MAX_CQ] = cq;
      284 +        cq->qstate = QCREATED;
 278  285          atomic_inc_32(&eq->ref_count);
 279  286          return (cq);
 280      -} /* oce_cq_create */
      287 +} /* oce_cq_create_v0 */
 281  288  
 282  289  /*
      290 + * function to create a V2 completion queue
      291 + *
      292 + * dev - software handle to the device
      293 + * eq - optional eq to be associated with to the cq
      294 + * cqcfg - configuration for this queue
      295 + *
      296 + * return pointer to the cq created. NULL on failure
      297 + */
      298 +struct oce_cq *
      299 +oce_cq_create_v2(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len,
      300 +    uint32_t item_size, boolean_t sol_event, boolean_t is_eventable,
      301 +    boolean_t nodelay, uint32_t ncoalesce, boolean_t armed, uint32_t mode)
      302 +{
      303 +        struct oce_cq *cq = NULL;
      304 +        struct oce_mbx mbx;
      305 +        struct mbx_create_common_cq_v2 *fwcmd;
      306 +        int ret = 0;
      307 +
      308 +        _NOTE(ARGUNUSED(sol_event));
      309 +        _NOTE(ARGUNUSED(ncoalesce));
      310 +        /* create cq */
      311 +        cq = kmem_zalloc(sizeof (struct oce_cq), KM_NOSLEEP);
      312 +        if (cq == NULL) {
      313 +                oce_log(dev, CE_NOTE, MOD_CONFIG, "%s",
      314 +                    "CQ allocation failed");
      315 +                return (NULL);
      316 +        }
      317 +
      318 +        /* create the ring buffer for this queue */
      319 +        cq->ring = oce_create_ring_buffer(dev, q_len,
      320 +            item_size, DDI_DMA_CONSISTENT | DDI_DMA_RDWR);
      321 +        if (cq->ring == NULL) {
      322 +                oce_log(dev, CE_WARN, MOD_CONFIG,
      323 +                    "CQ ring alloc failed:0x%p",
      324 +                    (void *)cq->ring);
      325 +                kmem_free(cq, sizeof (struct oce_cq));
      326 +                return (NULL);
      327 +        }
      328 +        /* initialize mailbox */
      329 +        bzero(&mbx, sizeof (struct oce_mbx));
      330 +        fwcmd = (struct mbx_create_common_cq_v2 *)&mbx.payload;
      331 +
      332 +        /* fill the command header */
      333 +        mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
      334 +            MBX_SUBSYSTEM_COMMON,
      335 +            OPCODE_CREATE_COMMON_CQ, MBX_TIMEOUT_SEC,
      336 +            sizeof (struct mbx_create_common_cq_v2), 2);
      337 +
      338 +        /* fill command context */
      339 +        /* dw0 */
      340 +        fwcmd->params.req.cq_ctx.eventable = is_eventable;
      341 +        fwcmd->params.req.cq_ctx.valid = 1;
      342 +        fwcmd->params.req.cq_ctx.count = 3;
      343 +        fwcmd->params.req.cq_ctx.nodelay = nodelay;
      344 +        fwcmd->params.req.cq_ctx.coalesce_wm = 0;
      345 +
      346 +        /* dw1 */
      347 +        fwcmd->params.req.cq_ctx.armed = armed;
      348 +        fwcmd->params.req.cq_ctx.eq_id = eq->eq_id;
      349 +        fwcmd->params.req.cq_ctx.cqe_count = q_len;
      350 +
      351 +        fwcmd->params.req.page_size = 1;
      352 +        /* fill the rest of the command */
      353 +        fwcmd->params.req.num_pages = cq->ring->dbuf.num_pages;
      354 +        oce_page_list(&cq->ring->dbuf, &fwcmd->params.req.pages[0],
      355 +            cq->ring->dbuf.num_pages);
      356 +
      357 +        /* fill rest of mbx */
      358 +        mbx.u0.s.embedded = 1;
      359 +        mbx.payload_length = sizeof (struct mbx_create_common_cq_v2);
      360 +        DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
      361 +
      362 +        /* now send the mail box */
      363 +        ret = oce_issue_mbox_cmd(dev, &mbx, MBX_TIMEOUT_SEC, mode);
      364 +
      365 +        if (ret != 0) {
      366 +                oce_log(dev, CE_WARN, MOD_CONFIG,
      367 +                    "CQ create failed: 0x%x", ret);
      368 +                destroy_ring_buffer(dev, cq->ring);
      369 +                kmem_free(cq, sizeof (struct oce_cq));
      370 +                return (NULL);
      371 +        }
      372 +
      373 +        cq->parent = dev;
      374 +        cq->eq = eq; /* eq array index */
      375 +        cq->cq_cfg.q_len = q_len;
      376 +
      377 +        /* interpret the response */
      378 +        cq->cq_id = LE_16(fwcmd->params.rsp.cq_id);
      379 +        dev->cq[cq->cq_id % OCE_MAX_CQ] = cq;
      380 +        cq->qstate = QCREATED;
      381 +        atomic_inc_32(&eq->ref_count);
      382 +        return (cq);
      383 +} /* oce_cq_create_v2 */
      384 +
      385 +/*
      386 + * function to create a completion queue
      387 + *
      388 + * dev - software handle to the device
      389 + * eq - optional eq to be associated with to the cq
      390 + * cqcfg - configuration for this queue
      391 + *
      392 + * return pointer to the cq created. NULL on failure
      393 + */
      394 +struct oce_cq *
      395 +oce_cq_create(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len,
      396 +    uint32_t item_size, boolean_t sol_event, boolean_t is_eventable,
      397 +    boolean_t nodelay, uint32_t ncoalesce, boolean_t armed, uint32_t mode)
      398 +{
      399 +        struct oce_cq *cq = NULL;
      400 +        if (LANCER_CHIP(dev))
      401 +                cq = oce_cq_create_v2(dev, eq, q_len, item_size, sol_event,
      402 +                    is_eventable, nodelay, ncoalesce, armed, mode);
      403 +        else
      404 +                cq = oce_cq_create_v0(dev, eq, q_len, item_size, sol_event,
      405 +                    is_eventable, nodelay, ncoalesce, armed, mode);
      406 +        return (cq);
      407 +}
      408 +
      409 +/*
 283  410   * function to delete a completion queue
 284  411   *
 285  412   * dev - software handle to the device
 286  413   * cq - handle to the CQ to delete
 287  414   *
 288  415   * return none
 289  416   */
 290  417  static void
 291      -oce_cq_del(struct oce_dev *dev, struct oce_cq *cq)
      418 +oce_cq_del(struct oce_dev *dev, struct oce_cq *cq, uint32_t mode)
 292  419  {
 293  420          struct oce_mbx mbx;
 294  421          struct mbx_destroy_common_cq *fwcmd;
 295  422  
 296  423          /* destroy the ring */
 297  424          destroy_ring_buffer(dev, cq->ring);
 298  425          cq->ring = NULL;
 299  426  
 300  427          bzero(&mbx, sizeof (struct oce_mbx));
 301  428          /* send a command to delete the CQ */
 302  429          fwcmd = (struct mbx_destroy_common_cq *)&mbx.payload;
 303  430          fwcmd->params.req.id = cq->cq_id;
 304  431          (void) oce_destroy_q(dev, &mbx,
 305  432              sizeof (struct mbx_destroy_common_cq),
 306      -            QTYPE_CQ);
      433 +            QTYPE_CQ, mode);
 307  434  
 308  435          /* Reset the handler */
 309  436          cq->cq_handler = NULL;
      437 +        cq->qstate = QDELETED;
 310  438          dev->cq[cq->cq_id % OCE_MAX_CQ] = NULL;
 311  439          atomic_dec_32(&cq->eq->ref_count);
 312      -        mutex_destroy(&cq->lock);
 313  440  
 314  441          /* release the eq */
 315  442          kmem_free(cq, sizeof (struct oce_cq));
 316  443  } /* oce_cq_del */
 317  444  
 318  445  /*
 319  446   * function to create an MQ
 320  447   *
 321  448   * dev - software handle to the device
 322  449   * eq - the EQ to associate with the MQ for event notification
↓ open down ↓ 4 lines elided ↑ open up ↑
 327  454  struct oce_mq *
 328  455  oce_mq_create(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len)
 329  456  {
 330  457          struct oce_mbx mbx;
 331  458          struct mbx_create_common_mq *fwcmd;
 332  459          struct oce_mq *mq = NULL;
 333  460          int ret = 0;
 334  461          struct oce_cq  *cq;
 335  462  
 336  463          /* Create the Completion Q */
 337      -        cq = oce_cq_create(dev, eq, CQ_LEN_256,
      464 +        cq = oce_cq_create(dev, eq, MCC_CQ_LEN,
 338  465              sizeof (struct oce_mq_cqe),
 339      -            B_FALSE, B_TRUE, B_TRUE, 0);
      466 +            B_FALSE, B_TRUE, B_TRUE, 0, B_FALSE, MBX_BOOTSTRAP);
 340  467          if (cq == NULL) {
 341  468                  return (NULL);
 342  469          }
 343  470  
 344  471  
 345  472          /* allocate the mq */
 346  473          mq = kmem_zalloc(sizeof (struct oce_mq), KM_NOSLEEP);
 347  474  
 348  475          if (mq == NULL) {
 349  476                  goto mq_alloc_fail;
 350  477          }
 351  478  
 352  479          bzero(&mbx, sizeof (struct oce_mbx));
 353  480          /* allocate mbx */
 354  481          fwcmd = (struct mbx_create_common_mq *)&mbx.payload;
 355  482  
 356  483          /* create the ring buffer for this queue */
 357      -        mq->ring = create_ring_buffer(dev, q_len,
      484 +        mq->ring = oce_create_ring_buffer(dev, q_len,
 358  485              sizeof (struct oce_mbx), DDI_DMA_CONSISTENT | DDI_DMA_RDWR);
 359  486          if (mq->ring == NULL) {
 360      -                oce_log(dev, CE_WARN, MOD_CONFIG,
 361      -                    "MQ ring alloc failed:0x%p",
 362      -                    (void *)mq->ring);
      487 +                oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
      488 +                    "Legacy MQ ring alloc failed");
 363  489                  goto mq_ring_alloc;
 364  490          }
 365  491  
 366  492          mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
 367  493              MBX_SUBSYSTEM_COMMON,
 368  494              OPCODE_CREATE_COMMON_MQ, MBX_TIMEOUT_SEC,
 369      -            sizeof (struct mbx_create_common_mq));
      495 +            sizeof (struct mbx_create_common_mq), 0);
 370  496  
 371      -        fwcmd->params.req.num_pages = mq->ring->dbuf->num_pages;
 372      -        oce_page_list(mq->ring->dbuf, fwcmd->params.req.pages,
 373      -            mq->ring->dbuf->num_pages);
      497 +        fwcmd->params.req.num_pages = (uint16_t)mq->ring->dbuf.num_pages;
      498 +        oce_page_list(&mq->ring->dbuf, fwcmd->params.req.pages,
      499 +            mq->ring->dbuf.num_pages);
 374  500          fwcmd->params.req.context.u0.s.cq_id = cq->cq_id;
 375  501          fwcmd->params.req.context.u0.s.ring_size =
 376  502              OCE_LOG2(q_len) + 1;
 377  503          fwcmd->params.req.context.u0.s.valid = 1;
 378  504          fwcmd->params.req.context.u0.s.fid = dev->fn;
 379  505  
 380  506          /* fill rest of mbx */
 381  507          mbx.u0.s.embedded = 1;
 382  508          mbx.payload_length = sizeof (struct mbx_create_common_mq);
 383  509          DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
 384  510  
 385  511          /* now send the mail box */
 386      -        ret = oce_mbox_post(dev, &mbx, NULL);
      512 +        ret = oce_issue_mbox_cmd(dev, &mbx, MBX_TIMEOUT_SEC, MBX_BOOTSTRAP);
 387  513          if (ret != DDI_SUCCESS) {
 388  514                  oce_log(dev, CE_WARN, MOD_CONFIG,
 389      -                    "MQ create failed: 0x%x", ret);
      515 +                    "Legacy MQ create failed: 0x%x", ret);
 390  516                  goto mq_fail;
 391  517          }
 392  518  
 393  519          /* interpret the response */
 394  520          mq->mq_id = LE_16(fwcmd->params.rsp.mq_id);
 395  521          mq->cq = cq;
 396  522          mq->cfg.q_len = (uint8_t)q_len;
 397  523          mq->cfg.eqd = 0;
 398  524  
 399  525          /* fill rest of the mq */
 400  526          mq->parent = dev;
 401  527  
 402  528          /* set the MQCQ handlers */
 403  529          cq->cq_handler = oce_drain_mq_cq;
 404  530          cq->cb_arg = (void *)mq;
 405  531          mutex_init(&mq->lock, NULL, MUTEX_DRIVER,
 406  532              DDI_INTR_PRI(dev->intr_pri));
      533 +        oce_log(dev, CE_NOTE, MOD_CONFIG,
      534 +            "Legacy MQ CREATED SUCCESSFULLY MQID:%d\n",
      535 +            mq->mq_id);
 407  536          return (mq);
 408  537  
 409  538  mq_fail:
 410  539          destroy_ring_buffer(dev, mq->ring);
 411  540  mq_ring_alloc:
 412  541          kmem_free(mq, sizeof (struct oce_mq));
 413  542  mq_alloc_fail:
 414      -        oce_cq_del(dev, cq);
      543 +        oce_cq_del(dev, cq, MBX_BOOTSTRAP);
 415  544          return (NULL);
 416  545  } /* oce_mq_create */
 417  546  
 418  547  /*
      548 + * function to create an extended V0 MQ
      549 + *
      550 + * dev - software handle to the device
      551 + * eq - the EQ to associate with the MQ for event notification
      552 + * q_len - the number of entries to create in the MQ
      553 + *
      554 + * return pointer to the created MQ, failure otherwise
      555 + */
      556 +struct oce_mq *
      557 +oce_mq_create_ext_v0(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len)
      558 +{
      559 +        struct oce_mbx mbx;
      560 +        struct mbx_create_common_mq_ext_v0 *fwcmd;
      561 +        struct oce_mq *mq = NULL;
      562 +        int ret = 0;
      563 +        struct oce_cq  *cq;
      564 +
      565 +        /* Create the Completion Q */
      566 +        cq = oce_cq_create(dev, eq, MCC_CQ_LEN,
      567 +            sizeof (struct oce_mq_cqe),
      568 +            B_FALSE, B_TRUE, B_TRUE, 0, B_FALSE, MBX_BOOTSTRAP);
      569 +        if (cq == NULL) {
      570 +                return (NULL);
      571 +        }
      572 +
      573 +
      574 +        /* allocate the mq */
      575 +        mq = kmem_zalloc(sizeof (struct oce_mq), KM_NOSLEEP);
      576 +
      577 +        if (mq == NULL) {
      578 +                goto mq_alloc_fail;
      579 +        }
      580 +
      581 +        bzero(&mbx, sizeof (struct oce_mbx));
      582 +        /* allocate mbx */
      583 +        fwcmd = (struct mbx_create_common_mq_ext_v0 *)&mbx.payload;
      584 +
      585 +        /* create the ring buffer for this queue */
      586 +        mq->ring = oce_create_ring_buffer(dev, q_len,
      587 +            sizeof (struct oce_mbx), DDI_DMA_CONSISTENT | DDI_DMA_RDWR);
      588 +        if (mq->ring == NULL) {
      589 +                oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
      590 +                    "MQ EXT ring alloc failed");
      591 +                goto mq_ring_alloc;
      592 +        }
      593 +
      594 +        mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
      595 +            MBX_SUBSYSTEM_COMMON,
      596 +            OPCODE_CREATE_COMMON_MQ_EXT, MBX_TIMEOUT_SEC,
      597 +            sizeof (struct mbx_create_common_mq_ext_v0), 0);
      598 +
      599 +        fwcmd->params.req.num_pages = mq->ring->dbuf.num_pages;
      600 +        oce_page_list(&mq->ring->dbuf, fwcmd->params.req.pages,
      601 +            mq->ring->dbuf.num_pages);
      602 +        fwcmd->params.req.context.u0.s.cq_id = cq->cq_id;
      603 +        fwcmd->params.req.context.u0.s.ring_size =
      604 +            OCE_LOG2(q_len) + 1;
      605 +        fwcmd->params.req.context.u0.s.valid = 1;
      606 +        fwcmd->params.req.context.u0.s.fid = dev->fn;
      607 +
      608 +        /*  Register to Link State(bit 1)  and Group 5 Events(bit 5) */
      609 +        fwcmd->params.req.async_event_bitmap[0] =
      610 +            (1 << ASYNC_EVENT_CODE_LINK_STATE) |
      611 +            (1 << ASYNC_EVENT_CODE_GRP_5) |
      612 +            (1 << ASYNC_EVENT_CODE_DEBUG);
      613 +
      614 +        /* fill rest of mbx */
      615 +        mbx.u0.s.embedded = 1;
      616 +        mbx.payload_length = sizeof (struct mbx_create_common_mq_ext_v0);
      617 +        DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
      618 +
      619 +        /* now send the mail box */
      620 +        ret = oce_issue_mbox_cmd(dev, &mbx, MBX_TIMEOUT_SEC, MBX_BOOTSTRAP);
      621 +        if (ret != DDI_SUCCESS) {
      622 +                oce_log(dev, CE_WARN, MOD_CONFIG,
      623 +                    "Extended MQ create failed: 0x%x", ret);
      624 +                goto mq_fail;
      625 +        }
      626 +
      627 +        /* interpret the response */
      628 +        mq->mq_id = LE_16(fwcmd->params.rsp.mq_id);
      629 +        mq->cq = cq;
      630 +        mq->cfg.q_len = (uint8_t)q_len;
      631 +        mq->cfg.eqd = 0;
      632 +
      633 +        /* fill rest of the mq */
      634 +        mq->parent = dev;
      635 +        mq->qstate = QCREATED;
      636 +        mq->mq_free = mq->cfg.q_len;
      637 +
      638 +        /* reset indicies */
      639 +        mq->ring->cidx = 0;
      640 +        mq->ring->pidx = 0;
      641 +
      642 +        /* set the MQCQ handlers */
      643 +        cq->cq_handler = oce_drain_mq_cq;
      644 +        cq->cb_arg = (void *)mq;
      645 +        mutex_init(&mq->lock, NULL, MUTEX_DRIVER,
      646 +            DDI_INTR_PRI(dev->intr_pri));
      647 +        oce_log(dev, CE_NOTE, MOD_CONFIG,
      648 +            "Ext MQ CREATED SUCCESSFULLY MQID:%d\n", mq->mq_id);
      649 +        return (mq);
      650 +
      651 +mq_fail:
      652 +        destroy_ring_buffer(dev, mq->ring);
      653 +mq_ring_alloc:
      654 +        kmem_free(mq, sizeof (struct oce_mq));
      655 +mq_alloc_fail:
      656 +        oce_cq_del(dev, cq, MBX_BOOTSTRAP);
      657 +        return (NULL);
      658 +} /* oce_mq_create_ext_v0 */
      659 +
      660 +/*
      661 + * function to create an extended V1 MQ
      662 + *
      663 + * dev - software handle to the device
      664 + * eq - the EQ to associate with the MQ for event notification
      665 + * q_len - the number of entries to create in the MQ
      666 + *
      667 + * return pointer to the created MQ, failure otherwise
      668 + */
      669 +struct oce_mq *
      670 +oce_mq_create_ext_v1(struct oce_dev *dev, struct oce_eq *eq, uint32_t q_len)
      671 +{
      672 +        struct oce_mbx mbx;
      673 +        struct mbx_create_common_mq_ext_v1 *fwcmd;
      674 +        struct oce_mq *mq = NULL;
      675 +        int ret = 0;
      676 +        struct oce_cq  *cq;
      677 +
      678 +        /* Create the Completion Q */
      679 +        cq = oce_cq_create(dev, eq, MCC_CQ_LEN,
      680 +            sizeof (struct oce_mq_cqe),
      681 +            B_FALSE, B_TRUE, B_TRUE, 0, B_FALSE, MBX_BOOTSTRAP);
      682 +        if (cq == NULL) {
      683 +                return (NULL);
      684 +        }
      685 +
      686 +
      687 +        /* allocate the mq */
      688 +        mq = kmem_zalloc(sizeof (struct oce_mq), KM_NOSLEEP);
      689 +
      690 +        if (mq == NULL) {
      691 +                goto mq_alloc_fail;
      692 +        }
      693 +
      694 +        bzero(&mbx, sizeof (struct oce_mbx));
      695 +        /* allocate mbx */
      696 +        fwcmd = (struct mbx_create_common_mq_ext_v1 *)&mbx.payload;
      697 +
      698 +        /* create the ring buffer for this queue */
      699 +        mq->ring = oce_create_ring_buffer(dev, q_len,
      700 +            sizeof (struct oce_mbx), DDI_DMA_CONSISTENT | DDI_DMA_RDWR);
      701 +        if (mq->ring == NULL) {
      702 +                oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
      703 +                    "MQ EXT ring alloc failed");
      704 +                goto mq_ring_alloc;
      705 +        }
      706 +
      707 +        mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
      708 +            MBX_SUBSYSTEM_COMMON,
      709 +            OPCODE_CREATE_COMMON_MQ_EXT, MBX_TIMEOUT_SEC,
      710 +            sizeof (struct mbx_create_common_mq_ext_v1), 1);
      711 +
      712 +        fwcmd->params.req.cq_id = cq->cq_id;
      713 +
      714 +        fwcmd->params.req.context.u0.s.ring_size =
      715 +            OCE_LOG2(q_len) + 1;
      716 +        fwcmd->params.req.context.u0.s.valid = 1;
      717 +        fwcmd->params.req.context.u0.s.async_cq_id = cq->cq_id;
      718 +        fwcmd->params.req.context.u0.s.async_cq_valid = 1;
      719 +
      720 +        fwcmd->params.req.num_pages = mq->ring->dbuf.num_pages;
      721 +        oce_page_list(&mq->ring->dbuf, fwcmd->params.req.pages,
      722 +            mq->ring->dbuf.num_pages);
      723 +
      724 +        /*  Register to Link State(bit 1)  and Group 5 Events(bit 5) */
      725 +        fwcmd->params.req.async_event_bitmap[0] =
      726 +            (1 << ASYNC_EVENT_CODE_LINK_STATE) |
      727 +            (1 << ASYNC_EVENT_CODE_GRP_5) |
      728 +            (1 << ASYNC_EVENT_CODE_DEBUG);
      729 +
      730 +        /* fill rest of mbx */
      731 +        mbx.u0.s.embedded = 1;
      732 +        mbx.payload_length = sizeof (struct mbx_create_common_mq_ext_v1);
      733 +        DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
      734 +
      735 +        /* now send the mail box */
      736 +        ret = oce_issue_mbox_cmd(dev, &mbx, MBX_TIMEOUT_SEC, MBX_BOOTSTRAP);
      737 +        if (ret != DDI_SUCCESS) {
      738 +                oce_log(dev, CE_WARN, MOD_CONFIG,
      739 +                    "Extended MQ create failed: 0x%x", ret);
      740 +                goto mq_fail;
      741 +        }
      742 +
      743 +        /* interpret the response */
      744 +        mq->mq_id = LE_16(fwcmd->params.rsp.mq_id);
      745 +        mq->cq = cq;
      746 +        mq->cfg.q_len = (uint8_t)q_len;
      747 +        mq->cfg.eqd = 0;
      748 +
      749 +        /* fill rest of the mq */
      750 +        mq->parent = dev;
      751 +        mq->qstate = QCREATED;
      752 +        mq->mq_free = mq->cfg.q_len;
      753 +
      754 +        /* reset indicies */
      755 +        mq->ring->cidx = 0;
      756 +        mq->ring->pidx = 0;
      757 +
      758 +        /* set the MQCQ handlers */
      759 +        cq->cq_handler = oce_drain_mq_cq;
      760 +        cq->cb_arg = (void *)mq;
      761 +        mutex_init(&mq->lock, NULL, MUTEX_DRIVER,
      762 +            DDI_INTR_PRI(dev->intr_pri));
      763 +        oce_log(dev, CE_NOTE, MOD_CONFIG,
      764 +            "Ext MQ CREATED SUCCESSFULLY MQID:%d\n", mq->mq_id);
      765 +        return (mq);
      766 +
      767 +mq_fail:
      768 +        destroy_ring_buffer(dev, mq->ring);
      769 +mq_ring_alloc:
      770 +        kmem_free(mq, sizeof (struct oce_mq));
      771 +mq_alloc_fail:
      772 +        oce_cq_del(dev, cq, MBX_BOOTSTRAP);
      773 +        return (NULL);
      774 +} /* oce_mq_create_ext_v0 */
      775 +
      776 +/*
 419  777   * function to delete an MQ
 420  778   *
 421  779   * dev - software handle to the device
 422  780   * mq - pointer to the MQ to delete
 423  781   *
 424  782   * return none
 425  783   */
 426  784  static void
 427  785  oce_mq_del(struct oce_dev *dev, struct oce_mq *mq)
 428  786  {
↓ open down ↓ 1 lines elided ↑ open up ↑
 430  788          struct mbx_destroy_common_mq *fwcmd;
 431  789  
 432  790          /* destroy the ring */
 433  791          destroy_ring_buffer(dev, mq->ring);
 434  792          mq->ring = NULL;
 435  793          bzero(&mbx, sizeof (struct oce_mbx));
 436  794          fwcmd = (struct mbx_destroy_common_mq *)&mbx.payload;
 437  795          fwcmd->params.req.id = mq->mq_id;
 438  796          (void) oce_destroy_q(dev, &mbx,
 439  797              sizeof (struct mbx_destroy_common_mq),
 440      -            QTYPE_MQ);
 441      -        oce_cq_del(dev, mq->cq);
      798 +            QTYPE_MQ, MBX_BOOTSTRAP);
      799 +        oce_cq_del(dev, mq->cq, MBX_BOOTSTRAP);
 442  800          mq->cq = NULL;
      801 +        mq->qstate = QDELETED;
 443  802          mutex_destroy(&mq->lock);
 444  803          kmem_free(mq, sizeof (struct oce_mq));
 445  804  } /* oce_mq_del */
 446  805  
 447  806  /*
 448  807   * function to create a WQ for NIC Tx
 449  808   *
 450  809   * dev - software handle to the device
 451  810   * wqcfg - configuration structure providing WQ config parameters
 452  811   *
 453  812   * return pointer to the WQ created. NULL on failure
 454  813   */
 455      -static struct oce_wq *
 456      -oce_wq_init(struct oce_dev *dev,  uint32_t q_len, int wq_type)
      814 +int oce_wq_init(struct oce_dev *dev, struct oce_wq *wq, uint32_t q_len,
      815 +    int wq_type)
 457  816  {
 458      -        struct oce_wq *wq;
 459  817          char str[MAX_POOL_NAME];
 460  818          int ret;
 461  819          static int wq_id = 0;
      820 +        int buf_size;
 462  821  
 463  822          ASSERT(dev != NULL);
 464  823          /* q_len must be min 256 and max 2k */
 465  824          if (q_len < 256 || q_len > 2048) {
 466  825                  oce_log(dev, CE_WARN, MOD_CONFIG,
 467  826                      "Invalid q length. Must be "
 468  827                      "[256, 2000]: 0x%x", q_len);
 469      -                return (NULL);
      828 +                return (DDI_FAILURE);
 470  829          }
 471  830  
 472      -        /* allocate wq */
 473      -        wq = kmem_zalloc(sizeof (struct oce_wq), KM_NOSLEEP);
 474      -        if (wq == NULL) {
 475      -                oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
 476      -                    "WQ allocation failed");
 477      -                return (NULL);
 478      -        }
 479      -
 480  831          /* Set the wq config */
 481  832          wq->cfg.q_len = q_len;
 482  833          wq->cfg.wq_type = (uint8_t)wq_type;
 483  834          wq->cfg.eqd = OCE_DEFAULT_WQ_EQD;
 484  835          wq->cfg.nbufs = 2 * wq->cfg.q_len;
 485  836          wq->cfg.nhdl = 2 * wq->cfg.q_len;
 486      -        wq->cfg.buf_size = dev->tx_bcopy_limit;
 487  837  
 488      -        /* assign parent */
 489      -        wq->parent = (void *)dev;
      838 +        buf_size = ((dev->tx_bcopy_limit >> 10) +
      839 +            ((dev->tx_bcopy_limit & (((uint32_t)1 << 10) - 1)) > 0 ? 1 :
      840 +            0)) << 10;
      841 +        wq->cfg.buf_size = (uint16_t)buf_size;
 490  842  
      843 +        /* initialize ring statistics */
      844 +        wq->stat_bytes = wq->stat_pkts = 0;
      845 +
 491  846          /* Create the WQ Buffer pool */
 492  847          ret  = oce_wqb_cache_create(wq, wq->cfg.buf_size);
 493  848          if (ret != DDI_SUCCESS) {
 494  849                  oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
 495  850                      "WQ Buffer Pool create failed ");
 496      -                goto wqb_fail;
      851 +                return (DDI_FAILURE);
 497  852          }
 498  853  
 499  854          /* Create a pool of memory handles */
 500  855          ret = oce_wqm_cache_create(wq);
 501  856          if (ret != DDI_SUCCESS) {
 502  857                  oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
 503  858                      "WQ MAP Handles Pool create failed ");
 504  859                  goto wqm_fail;
 505  860          }
 506  861  
↓ open down ↓ 1 lines elided ↑ open up ↑
 508  863              dev->dev_id, "_", wq_id++);
 509  864          wq->wqed_cache = kmem_cache_create(str, sizeof (oce_wqe_desc_t),
 510  865              0, NULL, NULL, NULL, NULL, NULL, 0);
 511  866          if (wq->wqed_cache == NULL) {
 512  867                  oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
 513  868                      "WQ Packet Desc Pool create failed ");
 514  869                  goto wqed_fail;
 515  870          }
 516  871  
 517  872          /* create the ring buffer */
 518      -        wq->ring = create_ring_buffer(dev, q_len,
      873 +        wq->ring = oce_create_ring_buffer(dev, q_len,
 519  874              NIC_WQE_SIZE, DDI_DMA_CONSISTENT | DDI_DMA_RDWR);
 520  875          if (wq->ring == NULL) {
 521  876                  oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
 522  877                      "Failed to create WQ ring ");
 523  878                  goto wq_ringfail;
 524  879          }
 525  880  
 526  881          /* Initialize WQ lock */
 527  882          mutex_init(&wq->tx_lock, NULL, MUTEX_DRIVER,
 528  883              DDI_INTR_PRI(dev->intr_pri));
 529  884          /* Initialize WQ lock */
 530  885          mutex_init(&wq->txc_lock, NULL, MUTEX_DRIVER,
 531  886              DDI_INTR_PRI(dev->intr_pri));
 532  887          atomic_inc_32(&dev->nwqs);
 533  888  
 534      -        OCE_LIST_CREATE(&wq->wqe_desc_list, DDI_INTR_PRI(dev->intr_pri));
 535      -        return (wq);
      889 +        mutex_init(&wq->wqed_list_lock, NULL, MUTEX_DRIVER,
      890 +            DDI_INTR_PRI(dev->intr_pri));
 536  891  
      892 +        list_create(&wq->wqe_desc_list, sizeof (oce_wqe_desc_t),
      893 +            offsetof(oce_wqe_desc_t, link));
      894 +        return (DDI_SUCCESS);
      895 +
 537  896  wqcq_fail:
 538  897          destroy_ring_buffer(dev, wq->ring);
 539  898  wq_ringfail:
 540  899          kmem_cache_destroy(wq->wqed_cache);
 541  900  wqed_fail:
 542  901          oce_wqm_cache_destroy(wq);
 543  902  wqm_fail:
 544  903          oce_wqb_cache_destroy(wq);
 545      -wqb_fail:
 546      -        kmem_free(wq, sizeof (struct oce_wq));
 547      -        return (NULL);
      904 +        return (DDI_FAILURE);
 548  905  } /* oce_wq_create */
 549  906  
 550  907  /*
 551  908   * function to delete a WQ
 552  909   *
 553  910   * dev - software handle to the device
 554  911   * wq - WQ to delete
 555  912   *
 556  913   * return 0 => success, failure otherwise
 557  914   */
 558  915  static void
 559  916  oce_wq_fini(struct oce_dev *dev, struct oce_wq *wq)
 560  917  {
 561  918          /* destroy cq */
 562  919          oce_wqb_cache_destroy(wq);
 563  920          oce_wqm_cache_destroy(wq);
 564  921          kmem_cache_destroy(wq->wqed_cache);
 565  922  
 566  923          /* Free the packet descriptor list */
 567      -        OCE_LIST_DESTROY(&wq->wqe_desc_list);
      924 +        list_destroy(&wq->wqe_desc_list);
 568  925          destroy_ring_buffer(dev, wq->ring);
 569  926          wq->ring = NULL;
 570  927          /* Destroy the Mutex */
      928 +        mutex_destroy(&wq->wqed_list_lock);
 571  929          mutex_destroy(&wq->tx_lock);
 572  930          mutex_destroy(&wq->txc_lock);
 573      -        kmem_free(wq, sizeof (struct oce_wq));
 574  931          atomic_dec_32(&dev->nwqs);
 575  932  } /* oce_wq_del */
 576  933  
 577  934  
 578  935  static int
 579      -oce_wq_create(struct oce_wq *wq, struct oce_eq *eq)
      936 +oce_wq_create(struct oce_wq *wq, struct oce_eq *eq, uint32_t mode)
 580  937  {
 581  938  
 582  939          struct oce_mbx mbx;
 583  940          struct mbx_create_nic_wq *fwcmd;
 584  941          struct oce_dev *dev = wq->parent;
 585  942          struct oce_cq *cq;
 586  943          int ret;
 587  944  
 588  945          /* create the CQ */
 589  946          cq = oce_cq_create(dev, eq, CQ_LEN_1024,
 590  947              sizeof (struct oce_nic_tx_cqe),
 591      -            B_FALSE, B_TRUE, B_FALSE, 3);
      948 +            B_FALSE, B_TRUE, B_FALSE, 3, B_FALSE, mode);
 592  949          if (cq == NULL) {
 593  950                  oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
 594      -                    "WCCQ create failed ");
      951 +                    "WQCQ create failed ");
 595  952                  return (DDI_FAILURE);
 596  953          }
 597  954          /* now fill the command */
 598  955          bzero(&mbx, sizeof (struct oce_mbx));
 599  956          fwcmd = (struct mbx_create_nic_wq *)&mbx.payload;
 600      -        mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
 601      -            MBX_SUBSYSTEM_NIC,
 602      -            OPCODE_CREATE_NIC_WQ, MBX_TIMEOUT_SEC,
 603      -            sizeof (struct mbx_create_nic_wq));
      957 +        if (LANCER_CHIP(dev)) {
      958 +                mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
      959 +                    MBX_SUBSYSTEM_NIC,
      960 +                    OPCODE_CREATE_NIC_WQ, MBX_TIMEOUT_SEC,
      961 +                    sizeof (struct mbx_create_nic_wq), 1);
      962 +                fwcmd->params.req.ctx.if_id = dev->if_id;
      963 +        } else {
      964 +                mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
      965 +                    MBX_SUBSYSTEM_NIC,
      966 +                    OPCODE_CREATE_NIC_WQ, MBX_TIMEOUT_SEC,
      967 +                    sizeof (struct mbx_create_nic_wq), 0);
      968 +        }
 604  969  
 605      -        fwcmd->params.req.nic_wq_type = (uint8_t)wq->cfg.wq_type;
 606      -        fwcmd->params.req.num_pages = wq->ring->dbuf->num_pages;
      970 +        fwcmd->params.req.type = (uint8_t)wq->cfg.wq_type;
      971 +        fwcmd->params.req.num_pages = wq->ring->dbuf.num_pages;
      972 +        fwcmd->params.req.ulp_num = BE_ULP1_NUM;
 607  973          oce_log(dev, CE_NOTE, MOD_CONFIG, "NUM_PAGES = 0x%d size = %lu",
 608      -            (uint32_t)wq->ring->dbuf->num_pages,
 609      -            wq->ring->dbuf->size);
      974 +            (uint32_t)wq->ring->dbuf.num_pages,
      975 +            wq->ring->dbuf.size);
 610  976  
 611      -        /* workaround: fill 0x01 for ulp_mask in rsvd0 */
 612      -        fwcmd->params.req.rsvd0 = 0x01;
 613      -        fwcmd->params.req.wq_size = OCE_LOG2(wq->cfg.q_len) + 1;
 614      -        fwcmd->params.req.valid = 1;
 615      -        fwcmd->params.req.pd_id = 0;
 616      -        fwcmd->params.req.pci_function_id = dev->fn;
 617      -        fwcmd->params.req.cq_id = cq->cq_id;
      977 +        /* Context info */
      978 +        fwcmd->params.req.ctx.wq_size = OCE_LOG2(wq->cfg.q_len) + 1;
      979 +        fwcmd->params.req.ctx.valid = 1;
      980 +        fwcmd->params.req.ctx.cofe = 1;
      981 +        fwcmd->params.req.ctx.no_rem_allowed = 1;
      982 +        fwcmd->params.req.ctx.cq_id = cq->cq_id;
 618  983  
 619      -        oce_page_list(wq->ring->dbuf, fwcmd->params.req.pages,
 620      -            wq->ring->dbuf->num_pages);
      984 +        oce_page_list(&wq->ring->dbuf, fwcmd->params.req.pages,
      985 +            wq->ring->dbuf.num_pages);
 621  986  
 622  987          /* fill rest of mbx */
 623  988          mbx.u0.s.embedded = 1;
 624  989          mbx.payload_length = sizeof (struct mbx_create_nic_wq);
 625  990          DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
 626  991  
 627  992          /* now post the command */
 628      -        ret = oce_mbox_post(dev, &mbx, NULL);
      993 +        ret = oce_issue_mbox_cmd(dev, &mbx, MBX_TIMEOUT_SEC, mode);
 629  994          if (ret != DDI_SUCCESS) {
 630  995                  oce_log(dev, CE_WARN, MOD_CONFIG,
 631      -                    "WQ create failed: %d", ret);
 632      -                oce_cq_del(dev, cq);
      996 +                    "WQ create failed: 0x%x", ret);
      997 +                oce_cq_del(dev, cq, mode);
 633  998                  return (ret);
 634  999          }
 635 1000  
 636 1001          /* interpret the response */
 637 1002          wq->wq_id = LE_16(fwcmd->params.rsp.wq_id);
 638 1003          wq->qstate = QCREATED;
 639 1004          wq->cq = cq;
 640 1005          /* set the WQCQ handlers */
 641 1006          wq->cq->cq_handler = oce_drain_wq_cq;
 642 1007          wq->cq->cb_arg = (void *)wq;
     1008 +
 643 1009          /* All are free to start with */
 644 1010          wq->wq_free = wq->cfg.q_len;
 645 1011          /* reset indicies */
 646 1012          wq->ring->cidx = 0;
 647 1013          wq->ring->pidx = 0;
 648 1014          oce_log(dev, CE_NOTE, MOD_CONFIG, "WQ CREATED WQID = %d",
 649 1015              wq->wq_id);
 650 1016  
 651 1017          return (0);
 652 1018  }
 653 1019  
 654 1020  /*
 655 1021   * function to delete a WQ
 656 1022   *
 657 1023   * dev - software handle to the device
 658 1024   * wq - WQ to delete
 659 1025   *
 660 1026   * return none
 661 1027   */
 662 1028  static void
 663      -oce_wq_del(struct oce_dev *dev, struct oce_wq *wq)
     1029 +oce_wq_del(struct oce_dev *dev, struct oce_wq *wq, uint32_t mode)
 664 1030  {
 665 1031          struct oce_mbx mbx;
 666 1032          struct mbx_delete_nic_wq *fwcmd;
 667 1033  
 668      -
 669 1034          ASSERT(dev != NULL);
 670 1035          ASSERT(wq != NULL);
 671 1036          if (wq->qstate == QCREATED) {
 672 1037                  bzero(&mbx, sizeof (struct oce_mbx));
 673 1038                  /* now fill the command */
 674 1039                  fwcmd = (struct mbx_delete_nic_wq *)&mbx.payload;
 675 1040                  fwcmd->params.req.wq_id = wq->wq_id;
 676 1041                  (void) oce_destroy_q(dev, &mbx,
 677 1042                      sizeof (struct mbx_delete_nic_wq),
 678      -                    QTYPE_WQ);
     1043 +                    QTYPE_WQ, mode);
 679 1044                  wq->qstate = QDELETED;
 680      -                oce_cq_del(dev, wq->cq);
     1045 +                oce_cq_del(dev, wq->cq, mode);
 681 1046                  wq->cq = NULL;
 682 1047          }
 683 1048  } /* oce_wq_del */
 684 1049  
 685 1050  /*
 686 1051   * function to allocate RQ resources
 687 1052   *
 688 1053   * dev - software handle to the device
 689 1054   * rqcfg - configuration structure providing RQ config parameters
 690 1055   *
 691 1056   * return pointer to the RQ created. NULL on failure
 692 1057   */
 693      -static struct oce_rq *
 694      -oce_rq_init(struct oce_dev *dev, uint32_t q_len,
 695      -    uint32_t frag_size, uint32_t mtu,
 696      -    boolean_t rss)
     1058 +int oce_rq_init(struct oce_dev *dev, struct oce_rq *rq, uint32_t q_len,
     1059 +    uint32_t frag_size, uint32_t mtu)
 697 1060  {
 698      -
 699      -        struct oce_rq *rq;
 700 1061          int ret;
 701 1062  
 702 1063          /* validate q creation parameters */
 703 1064          if (!OCE_LOG2(frag_size))
 704 1065                  return (NULL);
 705 1066          if ((q_len == 0) || (q_len > 1024))
 706 1067                  return (NULL);
 707 1068  
 708      -        /* allocate the rq */
 709      -        rq = kmem_zalloc(sizeof (struct oce_rq), KM_NOSLEEP);
 710      -        if (rq == NULL) {
 711      -                oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
 712      -                    "RQ allocation failed");
 713      -                return (NULL);
 714      -        }
 715      -
 716 1069          rq->cfg.q_len = q_len;
 717 1070          rq->cfg.frag_size = frag_size;
 718 1071          rq->cfg.mtu = mtu;
 719 1072          rq->cfg.eqd = 0;
 720 1073          rq->cfg.nbufs = dev->rq_max_bufs;
 721      -        rq->cfg.is_rss_queue = rss;
 722 1074  
 723      -        /* assign parent */
 724      -        rq->parent = (void *)dev;
     1075 +        /* initialize ring statistics */
     1076 +        rq->stat_bytes = rq->stat_pkts = 0;
 725 1077  
 726 1078          rq->rq_bdesc_array =
 727 1079              kmem_zalloc((sizeof (oce_rq_bdesc_t) * rq->cfg.nbufs), KM_NOSLEEP);
 728 1080          if (rq->rq_bdesc_array == NULL) {
 729 1081                  oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
 730 1082                      "RQ bdesc alloc failed");
 731      -                goto rqbd_alloc_fail;
     1083 +                return (DDI_FAILURE);
 732 1084          }
 733 1085          /* create the rq buffer descriptor ring */
 734 1086          rq->shadow_ring =
 735 1087              kmem_zalloc((rq->cfg.q_len * sizeof (oce_rq_bdesc_t *)),
 736 1088              KM_NOSLEEP);
 737 1089          if (rq->shadow_ring == NULL) {
 738 1090                  oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
 739 1091                      "RQ shadow ring alloc failed ");
 740 1092                  goto rq_shdw_fail;
 741 1093          }
 742 1094  
 743 1095          /* allocate the free list array */
 744 1096          rq->rqb_freelist =
 745 1097              kmem_zalloc(rq->cfg.nbufs * sizeof (oce_rq_bdesc_t *), KM_NOSLEEP);
 746 1098          if (rq->rqb_freelist == NULL) {
 747 1099                  goto rqb_free_list_fail;
 748 1100          }
 749 1101          /* create the buffer pool */
 750      -        ret  =  oce_rqb_cache_create(rq, dev->rq_frag_size +
 751      -            OCE_RQE_BUF_HEADROOM);
     1102 +        ret  =  oce_rqb_cache_create(rq, rq->cfg.frag_size);
 752 1103          if (ret != DDI_SUCCESS) {
 753 1104                  goto rqb_fail;
 754 1105          }
 755 1106  
 756 1107          /* create the ring buffer */
 757      -        rq->ring = create_ring_buffer(dev, q_len,
     1108 +        rq->ring = oce_create_ring_buffer(dev, q_len,
 758 1109              sizeof (struct oce_nic_rqe), DDI_DMA_CONSISTENT | DDI_DMA_RDWR);
 759 1110          if (rq->ring == NULL) {
 760 1111                  oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
 761 1112                      "RQ ring create failed ");
 762 1113                  goto rq_ringfail;
 763 1114          }
 764 1115  
 765 1116          /* Initialize the RQ lock */
 766 1117          mutex_init(&rq->rx_lock, NULL, MUTEX_DRIVER,
 767 1118              DDI_INTR_PRI(dev->intr_pri));
 768 1119          /* Initialize the recharge  lock */
 769 1120          mutex_init(&rq->rc_lock, NULL, MUTEX_DRIVER,
 770 1121              DDI_INTR_PRI(dev->intr_pri));
 771 1122          atomic_inc_32(&dev->nrqs);
 772      -        return (rq);
     1123 +        return (DDI_SUCCESS);
 773 1124  
 774 1125  rq_ringfail:
 775 1126          oce_rqb_cache_destroy(rq);
 776 1127  rqb_fail:
 777 1128          kmem_free(rq->rqb_freelist,
 778 1129              (rq->cfg.nbufs * sizeof (oce_rq_bdesc_t *)));
 779 1130  rqb_free_list_fail:
 780 1131  
 781 1132          kmem_free(rq->shadow_ring,
 782 1133              (rq->cfg.q_len * sizeof (oce_rq_bdesc_t *)));
 783 1134  rq_shdw_fail:
 784 1135          kmem_free(rq->rq_bdesc_array,
 785 1136              (sizeof (oce_rq_bdesc_t) * rq->cfg.nbufs));
 786      -rqbd_alloc_fail:
 787      -        kmem_free(rq, sizeof (struct oce_rq));
 788      -        return (NULL);
     1137 +        return (DDI_FAILURE);
 789 1138  } /* oce_rq_create */
 790 1139  
     1140 +
 791 1141  /*
 792 1142   * function to delete an RQ
 793 1143   *
 794 1144   * dev - software handle to the device
 795 1145   * rq - RQ to delete
 796 1146   *
 797 1147   * return none
 798 1148   */
 799      -static void
     1149 +void
 800 1150  oce_rq_fini(struct oce_dev *dev, struct oce_rq *rq)
 801 1151  {
 802 1152          /* Destroy buffer cache */
     1153 +        rq->qstate = QFINI;
 803 1154          oce_rqb_cache_destroy(rq);
 804 1155          destroy_ring_buffer(dev, rq->ring);
 805 1156          rq->ring = NULL;
 806 1157          kmem_free(rq->shadow_ring,
 807 1158              sizeof (oce_rq_bdesc_t *) * rq->cfg.q_len);
 808 1159          rq->shadow_ring = NULL;
 809 1160          kmem_free(rq->rq_bdesc_array,
 810 1161              (sizeof (oce_rq_bdesc_t) * rq->cfg.nbufs));
 811 1162          rq->rq_bdesc_array = NULL;
 812 1163          kmem_free(rq->rqb_freelist,
 813 1164              (rq->cfg.nbufs * sizeof (oce_rq_bdesc_t *)));
 814 1165          rq->rqb_freelist = NULL;
 815 1166          mutex_destroy(&rq->rx_lock);
 816 1167          mutex_destroy(&rq->rc_lock);
 817      -        kmem_free(rq, sizeof (struct oce_rq));
 818 1168          atomic_dec_32(&dev->nrqs);
 819 1169  } /* oce_rq_del */
 820 1170  
 821 1171  
 822 1172  static int
 823      -oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq)
     1173 +oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq,
     1174 +    uint32_t mode)
 824 1175  {
 825 1176          struct oce_mbx mbx;
 826 1177          struct mbx_create_nic_rq *fwcmd;
 827 1178          struct oce_dev *dev = rq->parent;
 828 1179          struct oce_cq *cq;
     1180 +        int cq_len;
 829 1181          int ret;
 830 1182  
 831      -        cq = oce_cq_create(dev, eq, CQ_LEN_1024, sizeof (struct oce_nic_rx_cqe),
 832      -            B_FALSE, B_TRUE, B_FALSE, 3);
     1183 +        if (LANCER_CHIP(dev))
     1184 +                cq_len = CQ_LEN_2048;
     1185 +        else
     1186 +                cq_len = CQ_LEN_1024;
 833 1187  
     1188 +        cq = oce_cq_create(dev, eq, cq_len, sizeof (struct oce_nic_rx_cqe),
     1189 +            B_FALSE, B_TRUE, B_FALSE, 3, B_FALSE, mode);
     1190 +
 834 1191          if (cq == NULL) {
 835 1192                  return (DDI_FAILURE);
 836 1193          }
 837 1194  
 838 1195          /* now fill the command */
 839 1196          bzero(&mbx, sizeof (struct oce_mbx));
 840 1197          fwcmd = (struct mbx_create_nic_rq *)&mbx.payload;
 841 1198          mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
 842 1199              MBX_SUBSYSTEM_NIC,
 843 1200              OPCODE_CREATE_NIC_RQ, MBX_TIMEOUT_SEC,
 844      -            sizeof (struct mbx_create_nic_rq));
     1201 +            sizeof (struct mbx_create_nic_rq), 0);
 845 1202  
 846      -        fwcmd->params.req.num_pages = rq->ring->dbuf->num_pages;
     1203 +        fwcmd->params.req.num_pages = rq->ring->dbuf.num_pages;
 847 1204          fwcmd->params.req.frag_size = OCE_LOG2(rq->cfg.frag_size);
 848 1205          fwcmd->params.req.cq_id = cq->cq_id;
 849      -        oce_page_list(rq->ring->dbuf, fwcmd->params.req.pages,
 850      -            rq->ring->dbuf->num_pages);
     1206 +        oce_page_list(&rq->ring->dbuf, fwcmd->params.req.pages,
     1207 +            rq->ring->dbuf.num_pages);
 851 1208  
 852 1209          fwcmd->params.req.if_id = if_id;
 853 1210          fwcmd->params.req.max_frame_size = (uint16_t)rq->cfg.mtu;
 854 1211          fwcmd->params.req.is_rss_queue = rq->cfg.is_rss_queue;
 855 1212  
 856 1213          /* fill rest of mbx */
 857 1214          mbx.u0.s.embedded = 1;
 858 1215          mbx.payload_length = sizeof (struct mbx_create_nic_rq);
 859 1216          DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
 860 1217  
 861 1218          /* now post the command */
 862      -        ret = oce_mbox_post(dev, &mbx, NULL);
     1219 +        ret = oce_issue_mbox_cmd(dev, &mbx, MBX_TIMEOUT_SEC, mode);
 863 1220          if (ret != 0) {
 864 1221                  oce_log(dev, CE_WARN, MOD_CONFIG,
 865      -                    "RQ create failed: %d", ret);
 866      -                oce_cq_del(dev, cq);
     1222 +                    "RQ create failed: 0x%x", ret);
     1223 +                oce_cq_del(dev, cq, MBX_BOOTSTRAP);
 867 1224                  return (ret);
 868 1225          }
 869 1226  
 870 1227          /* interpret the response */
 871 1228          rq->rq_id = LE_16(fwcmd->params.rsp.u0.s.rq_id);
 872 1229          rq->rss_cpuid = fwcmd->params.rsp.u0.s.rss_cpuid;
 873 1230          rq->cfg.if_id = if_id;
 874 1231          rq->qstate = QCREATED;
 875 1232          rq->cq = cq;
 876 1233  
 877 1234          /* set the Completion Handler */
 878 1235          rq->cq->cq_handler = oce_drain_rq_cq;
 879 1236          rq->cq->cb_arg  = (void *)rq;
     1237 +
 880 1238          /* reset the indicies */
 881 1239          rq->ring->cidx = 0;
 882 1240          rq->ring->pidx = 0;
 883 1241          rq->buf_avail = 0;
 884      -        oce_log(dev, CE_NOTE, MOD_CONFIG, "RQ created, RQID : %d", rq->rq_id);
     1242 +        oce_log(dev, CE_NOTE, MOD_CONFIG, "RQ created, RQID : %d, cpu-id = %d",
     1243 +            rq->rq_id, rq->rss_cpuid);
 885 1244          return (0);
 886 1245  
 887 1246  }
 888 1247  
 889 1248  /*
 890 1249   * function to delete an RQ
 891 1250   *
 892 1251   * dev - software handle to the device
 893 1252   * rq - RQ to delete
 894 1253   *
 895 1254   * return none
 896 1255   */
 897 1256  static void
 898      -oce_rq_del(struct oce_dev *dev, struct oce_rq *rq)
     1257 +oce_rq_del(struct oce_dev *dev, struct oce_rq *rq, uint32_t mode)
 899 1258  {
 900 1259          struct oce_mbx mbx;
 901 1260          struct mbx_delete_nic_rq *fwcmd;
 902 1261  
 903 1262          ASSERT(dev != NULL);
 904 1263          ASSERT(rq != NULL);
 905 1264  
 906 1265          bzero(&mbx, sizeof (struct oce_mbx));
 907 1266  
     1267 +        mutex_enter(&rq->rx_lock);
 908 1268          /* delete the Queue  */
 909 1269          if (rq->qstate == QCREATED) {
 910 1270                  fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
 911 1271                  fwcmd->params.req.rq_id = rq->rq_id;
 912 1272                  (void) oce_destroy_q(dev, &mbx,
 913      -                    sizeof (struct mbx_delete_nic_rq), QTYPE_RQ);
 914      -                rq->qstate = QDELETED;
     1273 +                    sizeof (struct mbx_delete_nic_rq), QTYPE_RQ, mode);
 915 1274                  oce_clean_rq(rq);
 916 1275                  /* Delete the associated CQ */
 917      -                oce_cq_del(dev, rq->cq);
     1276 +                oce_cq_del(dev, rq->cq, mode);
 918 1277                  rq->cq = NULL;
 919 1278                  /* free up the posted buffers */
 920 1279                  oce_rq_discharge(rq);
     1280 +                (void) atomic_swap_32(&rq->qstate, QDELETED);
 921 1281          }
     1282 +        mutex_exit(&rq->rx_lock);
 922 1283  } /* oce_rq_del */
 923 1284  
 924 1285  /*
 925 1286   * function to arm an EQ so that it can generate events
 926 1287   *
 927 1288   * dev - software handle to the device
 928 1289   * qid - id of the EQ returned by the fw at the time of creation
 929 1290   * npopped - number of EQEs to arm with
 930 1291   * rearm - rearm bit
 931 1292   * clearint - bit to clear the interrupt condition because of which
↓ open down ↓ 2 lines elided ↑ open up ↑
 934 1295   * return none
 935 1296   */
 936 1297  void
 937 1298  oce_arm_eq(struct oce_dev *dev, int16_t qid, int npopped,
 938 1299      boolean_t rearm, boolean_t clearint)
 939 1300  {
 940 1301          eq_db_t eq_db = {0};
 941 1302  
 942 1303          eq_db.bits.rearm = rearm;
 943 1304          eq_db.bits.event  = B_TRUE;
     1305 +        eq_db.bits.eq_cq_extid =
     1306 +            (((uint64_t)qid & (uint64_t)DB_EQ_RING_ID_EXT_MASK) <<
     1307 +            (uint64_t)DB_EQ_RING_ID_EXT_MASK_SHIFT);
 944 1308          eq_db.bits.num_popped = npopped;
 945 1309          eq_db.bits.clrint = clearint;
 946 1310          eq_db.bits.qid = qid;
 947 1311          OCE_DB_WRITE32(dev, PD_EQ_DB, eq_db.dw0);
 948 1312  }
 949 1313  
 950 1314  /*
 951 1315   * function to arm a CQ with CQEs
 952 1316   *
 953 1317   * dev - software handle to the device
↓ open down ↓ 2 lines elided ↑ open up ↑
 956 1320   * rearm - rearm bit enable/disable
 957 1321   *
 958 1322   * return none
 959 1323   */
 960 1324  void
 961 1325  oce_arm_cq(struct oce_dev *dev, int16_t qid, int npopped,
 962 1326      boolean_t rearm)
 963 1327  {
 964 1328          cq_db_t cq_db = {0};
 965 1329          cq_db.bits.rearm = rearm;
     1330 +        cq_db.bits.eq_cq_extid =
     1331 +            (((uint64_t)qid & (uint64_t)DB_CQ_RING_ID_EXT_MASK) <<
     1332 +            (uint64_t)DB_CQ_RING_ID_EXT_MASK_SHIFT);
 966 1333          cq_db.bits.num_popped = npopped;
 967 1334          cq_db.bits.event = 0;
 968 1335          cq_db.bits.qid = qid;
 969 1336          OCE_DB_WRITE32(dev, PD_CQ_DB, cq_db.dw0);
 970 1337  }
 971 1338  
 972 1339  
 973 1340  /*
 974 1341   * function to delete a EQ, CQ, MQ, WQ or RQ
 975 1342   *
 976 1343   * dev - sofware handle to the device
 977 1344   * mbx - mbox command to send to the fw to delete the queue
 978 1345   *      mbx contains the queue information to delete
 979 1346   * req_size - the size of the mbx payload dependent on the qtype
 980 1347   * qtype - the type of queue i.e. EQ, CQ, MQ, WQ or RQ
 981 1348   *
 982 1349   * return DDI_SUCCESS => success, failure otherwise
 983 1350   */
 984 1351  int
 985 1352  oce_destroy_q(struct oce_dev *dev, struct oce_mbx  *mbx, size_t req_size,
 986      -    enum qtype qtype)
     1353 +    enum qtype qtype, uint32_t mode)
 987 1354  {
 988 1355          struct mbx_hdr *hdr = (struct mbx_hdr *)&mbx->payload;
 989 1356          int opcode;
 990 1357          int subsys;
 991 1358          int ret;
 992 1359  
 993 1360          switch (qtype) {
 994 1361          case QTYPE_EQ: {
 995 1362                  opcode = OPCODE_DESTROY_COMMON_EQ;
 996 1363                  subsys = MBX_SUBSYSTEM_COMMON;
↓ open down ↓ 19 lines elided ↑ open up ↑
1016 1383                  subsys = MBX_SUBSYSTEM_NIC;
1017 1384                  break;
1018 1385          }
1019 1386          default: {
1020 1387                  ASSERT(0);
1021 1388                  break;
1022 1389          }
1023 1390          }
1024 1391  
1025 1392          mbx_common_req_hdr_init(hdr, 0, 0, subsys,
1026      -            opcode, MBX_TIMEOUT_SEC, req_size);
     1393 +            opcode, MBX_TIMEOUT_SEC, req_size, 0);
1027 1394  
1028 1395          /* fill rest of mbx */
1029 1396          mbx->u0.s.embedded = 1;
1030 1397          mbx->payload_length = (uint32_t)req_size;
1031 1398          DW_SWAP(u32ptr(mbx), mbx->payload_length + OCE_BMBX_RHDR_SZ);
1032 1399  
1033 1400          /* send command */
1034      -        ret = oce_mbox_post(dev, mbx, NULL);
1035      -
     1401 +        ret = oce_issue_mbox_cmd(dev, mbx, MBX_TIMEOUT_SEC, mode);
1036 1402          if (ret != 0) {
1037      -                oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
1038      -                    "Failed to del q ");
     1403 +                oce_log(dev, CE_WARN, MOD_CONFIG,
     1404 +                    "Failed to del q: 0x%x", ret);
1039 1405          }
     1406 +
1040 1407          return (ret);
1041 1408  }
1042 1409  
1043 1410  /*
1044 1411   * function to set the delay parameter in the EQ for interrupt coalescing
1045 1412   *
1046 1413   * dev - software handle to the device
1047 1414   * eq_arr - array of EQ ids to delete
1048 1415   * eq_cnt - number of elements in eq_arr
1049 1416   * eq_delay - delay parameter
1050 1417   *
1051 1418   * return DDI_SUCCESS => success, failure otherwise
1052 1419   */
1053 1420  int
1054 1421  oce_set_eq_delay(struct oce_dev *dev, uint32_t *eq_arr,
1055      -    uint32_t eq_cnt, uint32_t eq_delay)
     1422 +    uint32_t eq_cnt, uint32_t eq_delay, uint32_t mode)
1056 1423  {
1057 1424          struct oce_mbx mbx;
1058 1425          struct mbx_modify_common_eq_delay *fwcmd;
1059 1426          int ret;
1060 1427          int neq;
1061 1428  
1062 1429          bzero(&mbx, sizeof (struct oce_mbx));
1063 1430          fwcmd = (struct mbx_modify_common_eq_delay *)&mbx.payload;
1064 1431  
1065 1432          /* fill the command */
↓ open down ↓ 3 lines elided ↑ open up ↑
1069 1436                  fwcmd->params.req.delay[neq].phase = 0;
1070 1437                  fwcmd->params.req.delay[neq].dm = eq_delay;
1071 1438  
1072 1439          }
1073 1440  
1074 1441          /* initialize the ioctl header */
1075 1442          mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
1076 1443              MBX_SUBSYSTEM_COMMON,
1077 1444              OPCODE_MODIFY_COMMON_EQ_DELAY,
1078 1445              MBX_TIMEOUT_SEC,
1079      -            sizeof (struct mbx_modify_common_eq_delay));
     1446 +            sizeof (struct mbx_modify_common_eq_delay), 0);
1080 1447  
1081 1448          /* fill rest of mbx */
1082 1449          mbx.u0.s.embedded = 1;
1083 1450          mbx.payload_length = sizeof (struct mbx_modify_common_eq_delay);
1084 1451          DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
1085 1452  
1086 1453          /* post the command */
1087      -        ret = oce_mbox_post(dev, &mbx, NULL);
     1454 +        ret = oce_issue_mbox_cmd(dev, &mbx, MBX_TIMEOUT_SEC, mode);
1088 1455          if (ret != 0) {
1089 1456                  oce_log(dev, CE_WARN, MOD_CONFIG,
1090      -                    "Failed to set EQ delay %d", ret);
     1457 +                    "Failed to set EQ delay 0x%x", ret);
1091 1458          }
1092 1459  
1093 1460          return (ret);
1094 1461  } /* oce_set_eq_delay */
1095 1462  
1096 1463  /*
1097 1464   * function to cleanup the eqs used during stop
1098 1465   *
1099 1466   * eq - pointer to event queue structure
1100 1467   *
↓ open down ↓ 22 lines elided ↑ open up ↑
1123 1490                  eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
1124 1491                  num_eqe++;
1125 1492          } /* for all EQEs */
1126 1493          if (num_eqe) {
1127 1494                  oce_arm_eq(dev, eq->eq_id, num_eqe, B_FALSE, B_TRUE);
1128 1495          }
1129 1496  } /* oce_drain_eq */
1130 1497  
1131 1498  
1132 1499  int
1133      -oce_init_txrx(struct oce_dev  *dev)
     1500 +oce_init_tx(struct oce_dev  *dev)
1134 1501  {
1135 1502          int qid = 0;
1136 1503  
1137      -        /* enable RSS if rx queues > 1 */
1138      -        dev->rss_enable = (dev->rx_rings > 1) ? B_TRUE : B_FALSE;
1139      -
1140 1504          for (qid = 0; qid < dev->tx_rings; qid++) {
1141      -                dev->wq[qid] = oce_wq_init(dev, dev->tx_ring_size,
1142      -                    NIC_WQ_TYPE_STANDARD);
1143      -                if (dev->wq[qid] == NULL) {
     1505 +                if (oce_wq_init(dev, &dev->wq[qid], dev->tx_ring_size,
     1506 +                    NIC_WQ_TYPE_STANDARD) != DDI_SUCCESS) {
1144 1507                          goto queue_fail;
1145 1508                  }
1146 1509          }
1147 1510  
1148      -        /* Now create the Rx Queues */
1149      -        /* qid 0 is always default non rss queue for rss */
1150      -        dev->rq[0] = oce_rq_init(dev, dev->rx_ring_size, dev->rq_frag_size,
1151      -            OCE_MAX_JUMBO_FRAME_SIZE, B_FALSE);
1152      -        if (dev->rq[0] == NULL) {
1153      -                goto queue_fail;
1154      -        }
1155      -
1156      -        for (qid = 1; qid < dev->rx_rings; qid++) {
1157      -                dev->rq[qid] = oce_rq_init(dev, dev->rx_ring_size,
1158      -                    dev->rq_frag_size, OCE_MAX_JUMBO_FRAME_SIZE,
1159      -                    dev->rss_enable);
1160      -                if (dev->rq[qid] == NULL) {
1161      -                        goto queue_fail;
1162      -                }
1163      -        }
1164      -
1165 1511          return (DDI_SUCCESS);
1166 1512  queue_fail:
1167      -        oce_fini_txrx(dev);
     1513 +        oce_fini_tx(dev);
1168 1514          return (DDI_FAILURE);
1169 1515  }
     1516 +
     1517 +
1170 1518  void
1171      -oce_fini_txrx(struct oce_dev *dev)
     1519 +oce_fini_tx(struct oce_dev *dev)
1172 1520  {
1173 1521          int qid;
1174 1522          int nqs;
1175 1523  
1176 1524          /* free all the tx rings */
1177 1525          /* nwqs is decremented in fini so copy count first */
1178 1526          nqs = dev->nwqs;
1179 1527          for (qid = 0; qid < nqs; qid++) {
1180      -                if (dev->wq[qid] != NULL) {
1181      -                        oce_wq_fini(dev, dev->wq[qid]);
1182      -                        dev->wq[qid] = NULL;
1183      -                }
     1528 +                oce_wq_fini(dev, &dev->wq[qid]);
1184 1529          }
1185      -        /* free all the rx rings */
1186      -        nqs = dev->nrqs;
1187      -        for (qid = 0; qid < nqs; qid++) {
1188      -                if (dev->rq[qid] != NULL) {
1189      -                        oce_rq_fini(dev, dev->rq[qid]);
1190      -                        dev->rq[qid] = NULL;
1191      -                }
1192      -        }
1193 1530  }
1194 1531  
     1532 +
1195 1533  int
1196 1534  oce_create_queues(struct oce_dev *dev)
1197 1535  {
     1536 +        int i, num_if;
1198 1537  
1199      -        int i;
1200      -        struct oce_eq *eq;
1201      -        struct oce_mq *mq;
     1538 +        for (num_if = 0; num_if < dev->num_rx_groups; num_if++) {
     1539 +                if (oce_create_nw_interface(dev, &dev->rx_group[num_if],
     1540 +                    MBX_BOOTSTRAP) != DDI_SUCCESS) {
     1541 +                        goto if_fail;
     1542 +                }
     1543 +        }
1202 1544  
     1545 +        /* create resources that are common to an oce instance */
1203 1546          for (i = 0; i < dev->num_vectors; i++) {
1204      -                eq = oce_eq_create(dev, EQ_LEN_1024, EQE_SIZE_4, 0);
1205      -                if (eq == NULL) {
     1547 +                if (oce_eq_create(dev, &dev->eq[i], EQ_LEN_1024, EQE_SIZE_4, 0,
     1548 +                    MBX_BOOTSTRAP) != DDI_SUCCESS) {
1206 1549                          goto rings_fail;
1207 1550                  }
1208      -                dev->eq[i] = eq;
1209 1551          }
1210      -        for (i = 0; i < dev->nwqs; i++) {
1211      -                if (oce_wq_create(dev->wq[i], dev->eq[0]) != 0)
1212      -                        goto rings_fail;
1213      -        }
1214 1552  
1215      -        for (i = 0; i < dev->nrqs; i++) {
1216      -                if (oce_rq_create(dev->rq[i], dev->if_id,
1217      -                    dev->neqs > 1 ? dev->eq[1 + i] : dev->eq[0]) != 0)
1218      -                        goto rings_fail;
1219      -        }
1220      -        mq = oce_mq_create(dev, dev->eq[0], 64);
1221      -        if (mq == NULL)
     1553 +        /* create tx rings */
     1554 +        if (dev->num_tx_groups == 1) {
     1555 +                for (i = 0; i < dev->tx_rings; i++) {
     1556 +                        if (oce_wq_create(&dev->wq[i], &dev->eq[i],
     1557 +                            MBX_BOOTSTRAP) != 0) {
     1558 +                                dev->tx_rings = i;
     1559 +                                goto rings_fail;
     1560 +                        }
     1561 +                        oce_log(dev, CE_NOTE, MOD_CONFIG,
     1562 +                            "wq[%d] created on eq[%d]=%p wq=%p",
     1563 +                            i, i, (void *)&dev->eq[i], (void *)&dev->wq[i]);
     1564 +                }
     1565 +        } else {
     1566 +                /* Tx groups not supported */
     1567 +                oce_log(dev, CE_WARN, MOD_CONFIG,
     1568 +                    "unsupported number of tx groups %d", dev->num_tx_groups);
1222 1569                  goto rings_fail;
1223      -        dev->mq = mq;
     1570 +        }
     1571 +
1224 1572          return (DDI_SUCCESS);
1225 1573  rings_fail:
1226 1574          oce_delete_queues(dev);
     1575 +if_fail:
     1576 +        for (i = 0; i < num_if; i++) {
     1577 +                oce_delete_nw_interface(dev, &dev->rx_group[i], MBX_BOOTSTRAP);
     1578 +        }
1227 1579          return (DDI_FAILURE);
     1580 +}
1228 1581  
     1582 +int
     1583 +oce_create_mcc_queue(struct oce_dev *dev)
     1584 +{
     1585 +        if (LANCER_CHIP(dev)) {
     1586 +                dev->mq = oce_mq_create_ext_v1(dev, &dev->eq[0], MCC_Q_LEN);
     1587 +        } else {
     1588 +                dev->mq = oce_mq_create_ext_v0(dev, &dev->eq[0], MCC_Q_LEN);
     1589 +        }
     1590 +
     1591 +        if (dev->mq == NULL) {
     1592 +                oce_log(dev, CE_NOTE, MOD_CONFIG, "%s",
     1593 +                    "Extended MQ is not supported reverting to Legacy MQ mode");
     1594 +                dev->mq = oce_mq_create(dev, &dev->eq[0], MCC_Q_LEN);
     1595 +                if (dev->mq == NULL)
     1596 +                        return (DDI_FAILURE);
     1597 +        }
     1598 +
     1599 +        return (DDI_SUCCESS);
1229 1600  }
1230 1601  
     1602 +int
     1603 +oce_create_group(struct oce_dev *dev, oce_group_t *grp, uint32_t mode)
     1604 +{
     1605 +        int eqidx, ret, i;
     1606 +        char itbl[OCE_ITBL_SIZE];
     1607 +        char hkey[OCE_HKEY_SIZE];
     1608 +
     1609 +        for (i = 0; i < grp->num_rings; i++) {
     1610 +                if (i != 0) {
     1611 +                        grp->ring[i].rx->cfg.is_rss_queue =
     1612 +                            grp->rss_enable;
     1613 +                        eqidx = (grp->eq_idx + i - grp->rss_enable) %
     1614 +                            dev->num_vectors;
     1615 +                } else {
     1616 +                        grp->ring[i].rx->cfg.is_rss_queue = B_FALSE;
     1617 +                        eqidx = grp->eq_idx % dev->num_vectors;
     1618 +                }
     1619 +
     1620 +                ret = oce_rq_create(grp->ring[i].rx,
     1621 +                    grp->if_id, &dev->eq[eqidx], mode);
     1622 +
     1623 +                if (ret != 0) {
     1624 +                        goto cleanup_group;
     1625 +                }
     1626 +                oce_log(dev, CE_NOTE, MOD_CONFIG,
     1627 +                    "rq[%d][%d] created on eq[%d]=%p rq=%p, rss=%d",
     1628 +                    grp->grp_num, i, eqidx,
     1629 +                    (void *)&dev->eq[eqidx],
     1630 +                    (void *)grp->ring[i].rx,
     1631 +                    grp->ring[i].rx->cfg.is_rss_queue);
     1632 +        }
     1633 +
     1634 +        if (grp->rss_enable) {
     1635 +                (void) oce_group_create_itbl(grp, itbl);
     1636 +
     1637 +                (void) oce_gen_hkey(hkey, OCE_HKEY_SIZE);
     1638 +                ret = oce_config_rss(dev, grp->if_id, hkey,
     1639 +                    itbl, OCE_ITBL_SIZE, OCE_DEFAULT_RSS_TYPE, B_FALSE,
     1640 +                    mode);
     1641 +                if (ret != DDI_SUCCESS) {
     1642 +                        oce_log(dev, CE_WARN, MOD_CONFIG,
     1643 +                            "Failed to Configure RSS 0x%x", ret);
     1644 +                        goto cleanup_group;
     1645 +                }
     1646 +        }
     1647 +
     1648 +        return (DDI_SUCCESS);
     1649 +cleanup_group:
     1650 +        oce_delete_group(dev, grp);
     1651 +        return (DDI_FAILURE);
     1652 +}
     1653 +
1231 1654  void
1232      -oce_delete_queues(struct oce_dev *dev)
     1655 +oce_delete_mcc_queue(struct oce_dev *dev)
1233 1656  {
1234      -        int i;
1235      -        int neqs = dev->neqs;
1236 1657          if (dev->mq != NULL) {
1237 1658                  oce_mq_del(dev, dev->mq);
1238 1659                  dev->mq = NULL;
1239 1660          }
     1661 +}
1240 1662  
1241      -        for (i = 0; i < dev->nrqs; i++) {
1242      -                oce_rq_del(dev, dev->rq[i]);
1243      -        }
     1663 +void
     1664 +oce_delete_queues(struct oce_dev *dev)
     1665 +{
     1666 +        int i;
     1667 +        int neqs = dev->neqs;
     1668 +
1244 1669          for (i = 0; i < dev->nwqs; i++) {
1245      -                oce_wq_del(dev, dev->wq[i]);
     1670 +                oce_wq_del(dev, &dev->wq[i], MBX_BOOTSTRAP);
1246 1671          }
     1672 +
1247 1673          /* delete as many eqs as the number of vectors */
1248 1674          for (i = 0; i < neqs; i++) {
1249      -                oce_eq_del(dev, dev->eq[i]);
1250      -                dev->eq[i] = NULL;
     1675 +                oce_eq_del(dev, &dev->eq[i], MBX_BOOTSTRAP);
1251 1676          }
     1677 +
     1678 +        for (i = dev->num_rx_groups - 1; i >= 0; i--) {
     1679 +                oce_delete_nw_interface(dev, &dev->rx_group[i], MBX_BOOTSTRAP);
     1680 +        }
1252 1681  }
1253 1682  
1254 1683  void
1255      -oce_dev_rss_ready(struct oce_dev *dev)
     1684 +oce_delete_group(struct oce_dev *dev, oce_group_t *grp)
1256 1685  {
1257      -        uint8_t dev_index = 0;
1258      -        uint8_t adapter_rss = 0;
     1686 +        int i;
1259 1687  
1260      -        /* Return if rx_rings <= 1 (No RSS) */
1261      -        if (dev->rx_rings <= 1) {
1262      -                oce_log(dev, CE_NOTE, MOD_CONFIG,
1263      -                    "Rx rings = %d, Not enabling RSS", dev->rx_rings);
1264      -                return;
     1688 +        for (i = 0; i < grp->num_rings; i++) {
     1689 +                oce_rq_del(dev, grp->ring[i].rx, MBX_BOOTSTRAP);
1265 1690          }
     1691 +}
1266 1692  
1267      -        /*
1268      -         * Count the number of PCI functions enabling RSS on this
1269      -         * adapter
1270      -         */
1271      -        while (dev_index < MAX_DEVS) {
1272      -                if ((oce_dev_list[dev_index] != NULL) &&
1273      -                    (dev->pci_bus == oce_dev_list[dev_index]->pci_bus) &&
1274      -                    (dev->pci_device == oce_dev_list[dev_index]->pci_device) &&
1275      -                    (oce_dev_list[dev_index]->rss_enable)) {
1276      -                        adapter_rss++;
     1693 +
     1694 +void
     1695 +oce_free_queues(struct oce_dev *dev)
     1696 +{
     1697 +        int i = 0;
     1698 +
     1699 +        for (i = 0; i < dev->rx_rings; i++) {
     1700 +                mutex_destroy(&dev->rq[i].rq_fini_lock);
     1701 +        }
     1702 +        if (dev->rq != NULL) {
     1703 +                kmem_free(dev->rq,
     1704 +                    sizeof (struct oce_rq) * dev->rx_rings);
     1705 +                dev->rq = NULL;
     1706 +        }
     1707 +        if (dev->wq != NULL) {
     1708 +                kmem_free(dev->wq,
     1709 +                    sizeof (struct oce_wq) * dev->tx_rings);
     1710 +                dev->wq = NULL;
     1711 +        }
     1712 +        if (dev->cq != NULL) {
     1713 +                kmem_free(dev->cq,
     1714 +                    sizeof (struct oce_cq *) * OCE_MAX_CQ);
     1715 +                dev->cq = NULL;
     1716 +        }
     1717 +        if (dev->eq != NULL) {
     1718 +                for (i = 0; i < OCE_MAX_EQ; i++) {
     1719 +                        mutex_destroy(&dev->eq[i].lock);
1277 1720                  }
1278      -                dev_index++;
     1721 +
     1722 +                kmem_free(dev->eq,
     1723 +                    sizeof (struct oce_eq) * OCE_MAX_EQ);
     1724 +                dev->eq = NULL;
1279 1725          }
     1726 +}
1280 1727  
1281      -        /*
1282      -         * If there are already MAX_RSS_PER_ADAPTER PCI functions using
1283      -         * RSS on this adapter, reduce the number of rx rings to 1
1284      -         * (No RSS)
1285      -         */
1286      -        if (adapter_rss >= MAX_RSS_PER_ADAPTER) {
1287      -                dev->rx_rings = 1;
     1728 +int
     1729 +oce_alloc_queues(struct oce_dev *dev)
     1730 +{
     1731 +        int i, j, nrings = 0;
     1732 +
     1733 +        /* Allocate space for RQ array */
     1734 +        dev->rq = kmem_zalloc(sizeof (struct oce_rq) * dev->rx_rings,
     1735 +            KM_NOSLEEP);
     1736 +
     1737 +        if (dev->rq == NULL) {
     1738 +                return (DDI_FAILURE);
1288 1739          }
     1740 +        for (i = 0; i < dev->rx_rings; i++) {
     1741 +                mutex_init(&dev->rq[i].rq_fini_lock, NULL, MUTEX_DRIVER,
     1742 +                    DDI_INTR_PRI(dev->intr_pri));
     1743 +        }
     1744 +
     1745 +        /* Allocate space for WQ array */
     1746 +        dev->wq = kmem_zalloc(sizeof (struct oce_wq) * dev->tx_rings,
     1747 +            KM_NOSLEEP);
     1748 +
     1749 +        if (dev->wq == NULL) {
     1750 +                goto alloc_fail;
     1751 +        }
     1752 +
     1753 +        dev->cq = kmem_zalloc(sizeof (struct oce_cq *) * OCE_MAX_CQ,
     1754 +            KM_NOSLEEP);
     1755 +
     1756 +        if (dev->cq == NULL) {
     1757 +                goto alloc_fail;
     1758 +        }
     1759 +
     1760 +        dev->eq = kmem_zalloc(sizeof (struct oce_eq) * OCE_MAX_EQ,
     1761 +            KM_NOSLEEP);
     1762 +        if (dev->eq == NULL) {
     1763 +                goto alloc_fail;
     1764 +        }
     1765 +
     1766 +        for (i = 0; i < OCE_MAX_EQ; i++) {
     1767 +                dev->eq[i].idx = i;
     1768 +                mutex_init(&dev->eq[i].lock, NULL, MUTEX_DRIVER,
     1769 +                    DDI_INTR_PRI(dev->intr_pri));
     1770 +        }
     1771 +
     1772 +        for (i = 0; i < dev->tx_rings; i++) {
     1773 +                dev->wq[i].parent = (void *)dev;
     1774 +                dev->default_tx_rings[i].tx = &dev->wq[i];
     1775 +        }
     1776 +
     1777 +        for (i = 0; i < dev->num_rx_groups; i++) {
     1778 +                for (j = 0; j < dev->rx_group[i].num_rings; j++) {
     1779 +                        dev->rq[nrings].parent = (void *)dev;
     1780 +                        dev->rx_group[i].ring[j].rx = &dev->rq[nrings];
     1781 +                        dev->rx_group[i].ring[j].rx->grp = &dev->rx_group[i];
     1782 +                        nrings++;
     1783 +                }
     1784 +        }
     1785 +
     1786 +        return (DDI_SUCCESS);
     1787 +alloc_fail:
     1788 +        oce_free_queues(dev);
     1789 +        return (DDI_FAILURE);
1289 1790  }
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX