1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 2009-2012 Emulex. All rights reserved.
  24  * Use is subject to license terms.
  25  */
  26 
  27 
  28 
  29 /*
  30  * Source file containing Ring handling functions
  31  *
  32  */
  33 
  34 #include <oce_impl.h>
  35 
  36 void
  37 oce_group_rings(struct oce_dev *dev)
  38 {
  39         uint32_t i;
  40         /*
  41          * decide the ring groups based on dev->rx_rings &
  42          * dev->rx_rings_per_group
  43          */
  44 
  45         dev->rss_cnt = 0;
  46         if ((dev->chip_rev == OC_CNA_GEN2) || (dev->chip_rev == OC_CNA_GEN3)) {
  47                 /*
  48                  * BE2 supports only one RSS group per function.
  49                  * In a multi-group configuration Group-0 will have RSS
  50                  * The remaining groups (group 1 to N-1) will have single
  51                  * RX ring
  52                  */
  53 
  54                 if (!(dev->function_caps & BE_FUNCTION_CAPS_RSS)) {
  55                         dev->rx_rings = dev->tx_rings = 1;
  56                 }
  57 
  58                 /* RSS grouping */
  59                 dev->rx_group[0].parent = dev;
  60                 dev->rx_group[0].num_rings =
  61                     min(dev->rx_rings, dev->rx_rings_per_group);
  62                 if (dev->rx_group[0].num_rings > 1) {
  63                         dev->rx_group[0].rss_enable = B_TRUE;
  64                         dev->rss_cnt++;
  65                 } else {
  66                         dev->rx_group[0].rss_enable = B_FALSE;
  67                 }
  68 
  69                 /* non-RSS groups */
  70                 dev->num_rx_groups =
  71                     min(dev->rx_rings - dev->rx_group[0].num_rings + 1,
  72                     OCE_MAX_RING_GROUPS);
  73                 dev->rx_rings =
  74                     dev->rx_group[0].num_rings + dev->num_rx_groups - 1;
  75 
  76                 for (i = 1; i < dev->num_rx_groups; i++) {
  77                         dev->rx_group[i].parent = dev;
  78                         dev->rx_group[i].num_rings = 1;
  79                         dev->rx_group[i].rss_enable = B_FALSE;
  80                         dev->rx_group[i].eq_idx = dev->rx_group[i-1].eq_idx +
  81                             dev->rx_group[i-1].num_rings -
  82                             dev->rx_group[i-1].rss_enable;
  83                 }
  84                 /* default single tx group */
  85                 dev->num_tx_groups   = 1;
  86         } else if (LANCER_CHIP(dev)) {
  87                 if (dev->rx_rings_per_group > dev->rx_rings)
  88                         dev->rx_rings_per_group = dev->rx_rings;
  89                 dev->num_rx_groups = dev->rx_rings / dev->rx_rings_per_group;
  90                 dev->rx_rings = dev->num_rx_groups *  dev->rx_rings_per_group;
  91                 for (i = 0; i < dev->num_rx_groups; i++) {
  92                         dev->rx_group[i].parent = dev;
  93                         dev->rx_group[i].num_rings = dev->rx_rings_per_group;
  94                         if (dev->rx_rings_per_group > 1) {
  95                                 dev->rx_group[i].rss_enable = B_TRUE;
  96                                 dev->rss_cnt++;
  97                         } else {
  98                                 dev->rx_group[i].rss_enable = B_FALSE;
  99                         }
 100 
 101                         if (i != 0)
 102                                 dev->rx_group[i].eq_idx =
 103                                     dev->rx_group[i-1].eq_idx +
 104                                     dev->rx_group[i-1].num_rings -
 105                                     dev->rx_group[i-1].rss_enable;
 106                 }
 107                 /* default single tx group */
 108                 dev->num_tx_groups   = 1;
 109         }
 110 }
 111 
 112 
 113 /*
 114  * Decide Ring Groups information (no. of groups, no. of rings
 115  * in each group, rss or no_rss), based on profile, num_rx_rings,
 116  * num_tx_rings input by the user in the config settings.
 117  */
 118 boolean_t
 119 oce_fill_rings_capab(struct oce_dev *dev, mac_capab_rings_t *capab)
 120 {
 121 
 122         switch (capab->mr_type) {
 123         case MAC_RING_TYPE_RX:
 124                 capab->mr_group_type = MAC_GROUP_TYPE_STATIC;
 125                 capab->mr_gnum = dev->num_rx_groups;
 126                 capab->mr_rnum = dev->rx_rings;
 127                 capab->mr_rget = oce_get_ring;
 128                 capab->mr_gget = oce_get_group;
 129                 capab->mr_gaddring = NULL;
 130                 capab->mr_gremring = NULL;
 131                 break;
 132 
 133         case MAC_RING_TYPE_TX:
 134                 capab->mr_group_type = MAC_GROUP_TYPE_STATIC;
 135                 /*
 136                  * XXX: num_tx_groups is always 1, and for some reason
 137                  * mr_gnum has to be 0 or else we trigger an assertion in
 138                  * mac_init_rings() at mac.c:4022. This could be a bug in
 139                  * our GLDv3, I don't know. No other driver seems to use
 140                  * mr_gnum != 0 for TX.  -- Hans
 141                  */
 142                 ASSERT(dev->num_tx_groups == 1);
 143                 capab->mr_gnum = dev->num_tx_groups - 1;
 144                 capab->mr_rnum = dev->tx_rings;
 145                 capab->mr_rget = oce_get_ring;
 146                 capab->mr_gget = oce_get_group;
 147                 break;
 148         default:
 149                 return (B_FALSE);
 150         }
 151         return (B_TRUE);
 152 
 153 }
 154 
 155 /*
 156  * Driver entry points for groups/rings registration
 157  */
 158 
 159 void
 160 oce_get_ring(void *arg, mac_ring_type_t rtype, const int grp_index,
 161     const int ring_index, mac_ring_info_t *ring_info,
 162     mac_ring_handle_t ring_handle)
 163 {
 164         struct oce_dev *dev = arg;
 165         oce_ring_t *ring;
 166 
 167         switch (rtype) {
 168         case MAC_RING_TYPE_RX: {
 169 
 170                 ring = &dev->rx_group[grp_index].ring[ring_index];
 171                 ring->rx->handle = ring_handle;
 172                 ring_info->mri_driver = (mac_ring_driver_t)ring->rx;
 173                 ring_info->mri_start = oce_ring_start;
 174                 ring_info->mri_stop = oce_ring_stop;
 175                 ring_info->mri_poll = oce_ring_rx_poll;
 176                 ring_info->mri_stat = oce_ring_rx_stat;
 177                 ring_info->mri_intr.mi_enable = oce_ring_intr_enable;
 178                 ring_info->mri_intr.mi_disable = oce_ring_intr_disable;
 179                 ring_info->mri_intr.mi_handle = (mac_intr_handle_t)ring->rx;
 180 
 181                 break;
 182         }
 183         case MAC_RING_TYPE_TX: {
 184 
 185                 ring = &dev->default_tx_rings[ring_index];
 186                 /* mac_intr_t not applicable for TX */
 187                 ring->tx->handle = ring_handle;
 188                 ring_info->mri_driver = (mac_ring_driver_t)ring->tx;
 189                 ring_info->mri_start = NULL;
 190                 ring_info->mri_stop = NULL;
 191                 ring_info->mri_tx = oce_ring_tx;
 192                 ring_info->mri_stat = oce_ring_tx_stat;
 193 
 194                 break;
 195         }
 196         default:
 197                 break;
 198         }
 199 }
 200 
 201 
 202 void
 203 oce_get_group(void *arg, mac_ring_type_t rtype, const int grp_index,
 204     mac_group_info_t *grp_info, mac_group_handle_t grp_handle)
 205 {
 206         struct oce_dev *dev = arg;
 207         oce_group_t *grp = NULL;
 208         uint32_t i = 0;
 209 
 210         switch (rtype) {
 211 
 212         case MAC_RING_TYPE_RX: {
 213                 grp = &dev->rx_group[grp_index];
 214                 grp->handle = grp_handle;
 215                 grp->grp_type = rtype;
 216                 grp->grp_num = grp_index;
 217                 /* Initialize the pmac-ids to invalid values */
 218                 while (i < OCE_MAX_PMAC_PER_GRP) {
 219                         grp->pmac_ids[i] = INVALID_PMAC_ID;
 220                         i++;
 221                 }
 222 
 223                 grp_info->mgi_driver = (mac_group_driver_t)grp;
 224                 grp_info->mgi_start = oce_m_start_group;
 225                 grp_info->mgi_stop = oce_m_stop_group;
 226                 grp_info->mgi_addmac = oce_group_addmac;
 227                 grp_info->mgi_remmac = oce_group_remmac;
 228                 grp_info->mgi_count = grp->num_rings;
 229                 break;
 230         }
 231 
 232         case MAC_RING_TYPE_TX:
 233                 /* default TX group of 1 */
 234                 grp_info->mgi_driver = NULL;
 235                 grp_info->mgi_start = NULL;
 236                 grp_info->mgi_stop = NULL;
 237                 grp_info->mgi_count = dev->tx_rings;
 238                 break;
 239 
 240         default:
 241                 break;
 242         }
 243 }
 244 
 245 /*
 246  * Ring level operations
 247  */
 248 int
 249 oce_ring_start(mac_ring_driver_t ring_handle, uint64_t gen_number)
 250 {
 251         struct oce_rq *rx_ring = (struct oce_rq *)ring_handle;
 252         struct oce_dev *dev = rx_ring->parent;
 253 
 254         mutex_enter(&rx_ring->rx_lock);
 255         rx_ring->gen_number = gen_number;
 256         mac_ring_intr_set(rx_ring->handle,
 257             dev->htable[rx_ring->cq->eq->idx]);
 258         (void) oce_start_rq(rx_ring);
 259         mutex_exit(&rx_ring->rx_lock);
 260 
 261         return (0);
 262 }
 263 
 264 void
 265 oce_ring_stop(mac_ring_driver_t ring_handle)
 266 {
 267         struct oce_rq *rx_ring = (struct oce_rq *)ring_handle;
 268 
 269         (void) oce_ring_intr_disable((mac_intr_handle_t)ring_handle);
 270         mac_ring_intr_set(rx_ring->handle, NULL);
 271 }
 272 
 273 mblk_t *
 274 oce_ring_tx(void *ring_handle, mblk_t *mp)
 275 {
 276         struct oce_wq *wq = ring_handle;
 277         mblk_t *nxt_pkt;
 278         mblk_t *rmp = NULL;
 279         struct oce_dev *dev = wq->parent;
 280 
 281         if (dev->suspended) {
 282                 freemsg(mp);
 283                 return (NULL);
 284         }
 285         while (mp != NULL) {
 286                 /* Save the Pointer since mp will be freed in case of copy */
 287                 nxt_pkt = mp->b_next;
 288                 mp->b_next = NULL;
 289                 /* Hardcode wq since we have only one */
 290                 rmp = oce_send_packet(wq, mp);
 291                 if (rmp != NULL) {
 292                         /* restore the chain */
 293                         rmp->b_next = nxt_pkt;
 294                         break;
 295                 }
 296                 mp  = nxt_pkt;
 297         }
 298 
 299         if (wq->resched) {
 300                 if (atomic_cas_uint(&wq->qmode, OCE_MODE_POLL, OCE_MODE_INTR)
 301                     == OCE_MODE_POLL) {
 302                         oce_arm_cq(wq->parent, wq->cq->cq_id, 0, B_TRUE);
 303                         wq->last_armed = ddi_get_lbolt();
 304                 }
 305         }
 306 
 307         return (rmp);
 308 }
 309 
 310 mblk_t  *
 311 oce_ring_rx_poll(void *ring_handle, int nbytes)
 312 {
 313         struct oce_rq *rx_ring = ring_handle;
 314         mblk_t *mp = NULL;
 315         struct oce_dev *dev = rx_ring->parent;
 316 
 317         if (dev->suspended || rx_ring == NULL || nbytes == 0)
 318                 return (NULL);
 319 
 320         mp = oce_drain_rq_cq(rx_ring, nbytes, 0);
 321         return (mp);
 322 }
 323 
 324 int
 325 oce_group_addmac(void *group_handle, const uint8_t *mac)
 326 {
 327         oce_group_t *grp = group_handle;
 328         struct oce_dev *dev;
 329         int pmac_index = 0;
 330         int ret;
 331 
 332         dev = grp->parent;
 333 
 334         oce_log(dev, CE_NOTE, MOD_CONFIG,
 335             "oce_group_addmac , grp_type = %d, grp_num = %d, "
 336             "mac = %x:%x:%x:%x:%x:%x",
 337             grp->grp_type, grp->grp_num, mac[0], mac[1], mac[2],
 338             mac[3], mac[4], mac[5]);
 339 
 340         while ((pmac_index < OCE_MAX_PMAC_PER_GRP) &&
 341             (grp->pmac_ids[pmac_index] != INVALID_PMAC_ID)) {
 342                 pmac_index++;
 343         }
 344         if ((pmac_index >= OCE_MAX_PMAC_PER_GRP) ||
 345             (grp->num_pmac >= OCE_MAX_PMAC_PER_GRP) ||
 346             (dev->num_pmac >= OCE_MAX_SMAC_PER_DEV)) {
 347                 oce_log(dev, CE_NOTE, MOD_CONFIG,
 348                     "PMAC exceeding limits, num_pmac=%d, num_pmac=%d, index=%d",
 349                     grp->num_pmac, dev->num_pmac, pmac_index);
 350                 return (ENOSPC);
 351         }
 352 
 353         /* Add the New MAC */
 354         ret = oce_add_mac(dev, grp->if_id, mac, &grp->pmac_ids[pmac_index],
 355             MBX_BOOTSTRAP);
 356         if (ret != DDI_SUCCESS) {
 357                 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
 358                     "MAC addition failed ");
 359                 return (EIO);
 360         }
 361 
 362         grp->num_pmac++;
 363         dev->num_pmac++;
 364         bcopy(mac, &grp->mac_addr[pmac_index], ETHERADDRL);
 365         return (0);
 366 }
 367 
 368 
 369 int
 370 oce_group_remmac(void *group_handle, const uint8_t *mac)
 371 {
 372         oce_group_t *grp = group_handle;
 373         struct oce_dev *dev;
 374         int ret;
 375         int pmac_index = 0;
 376 
 377         dev = grp->parent;
 378 
 379         while ((pmac_index < OCE_MAX_PMAC_PER_GRP)) {
 380                 if (bcmp(mac, &grp->mac_addr[pmac_index], ETHERADDRL) == 0) {
 381                         break;
 382                 }
 383                 pmac_index++;
 384         }
 385 
 386         if (pmac_index >= OCE_MAX_PMAC_PER_GRP) {
 387                 oce_log(dev, CE_WARN, MOD_CONFIG,
 388                     "Could not find the MAC: %x:%x:%x:%x:%x:%x",
 389                     mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
 390                 return (EINVAL);
 391         }
 392 
 393         /* Delete previous one */
 394         ret = oce_del_mac(dev, grp->if_id, &grp->pmac_ids[pmac_index],
 395             MBX_BOOTSTRAP);
 396         if (ret != DDI_SUCCESS) {
 397                 oce_log(dev, CE_WARN, MOD_CONFIG,
 398                     "Failed to delete MAC: %x:%x:%x:%x:%x:%x, ret=0x%x",
 399                     mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], ret);
 400                 return (EIO);
 401         }
 402 
 403         grp->num_pmac--;
 404         dev->num_pmac--;
 405         grp->pmac_ids[pmac_index] = INVALID_PMAC_ID;
 406         bzero(&grp->mac_addr[pmac_index], ETHERADDRL);
 407         return (0);
 408 }
 409 
 410 
 411 int
 412 oce_m_start_group(mac_group_driver_t group_h)
 413 {
 414         oce_group_t *grp = (oce_group_t *)group_h;
 415         int ret;
 416         mutex_enter(&grp->grp_lock);
 417         grp->state |= GROUP_MAC_STARTED;
 418         ret = oce_start_group(grp, B_TRUE);
 419         if (ret != DDI_SUCCESS) {
 420                 grp->state &= ~GROUP_MAC_STARTED;
 421         }
 422         mutex_exit(&grp->grp_lock);
 423         return (ret);
 424 }
 425 
 426 
 427 void
 428 oce_m_stop_group(mac_group_driver_t group_h)
 429 {
 430         oce_group_t *grp = (oce_group_t *)group_h;
 431         mutex_enter(&grp->grp_lock);
 432         oce_stop_group(grp, B_TRUE);
 433         grp->state &= ~GROUP_MAC_STARTED;
 434         mutex_exit(&grp->grp_lock);
 435 }
 436 
 437 
 438 int
 439 oce_start_group(oce_group_t *grp, boolean_t alloc_buffer)
 440 {
 441         struct oce_dev *dev = grp->parent;
 442         int qidx;
 443         int max_frame_sz;
 444 
 445         max_frame_sz = dev->mtu + sizeof (struct ether_vlan_header) + VTAG_SIZE;
 446         /* allocate Rx buffers */
 447         if (alloc_buffer && !(grp->state & GROUP_INIT)) {
 448                 for (qidx = 0; qidx < grp->num_rings; qidx++) {
 449                         if (oce_rq_init(dev, grp->ring[qidx].rx,
 450                             dev->rx_ring_size, dev->rq_frag_size,
 451                             max_frame_sz) != DDI_SUCCESS) {
 452                                 goto group_fail;
 453                         }
 454                 }
 455                 grp->state |= GROUP_INIT;
 456         }
 457 
 458         if (grp->state & GROUP_MAC_STARTED) {
 459 
 460                 if (oce_create_group(dev, grp, MBX_ASYNC_MQ) != DDI_SUCCESS) {
 461                         goto group_fail;
 462                 }
 463                 oce_log(dev, CE_NOTE, MOD_CONFIG,
 464                     "group %d started", grp->grp_num);
 465         }
 466         return (DDI_SUCCESS);
 467 
 468 group_fail:
 469         oce_log(dev, CE_WARN, MOD_CONFIG,
 470             "Failed to setup group %x", grp->grp_num);
 471         return (DDI_FAILURE);
 472 }
 473 
 474 static int
 475 oce_check_pending(oce_group_t *grp)
 476 {
 477 
 478         struct oce_dev *dev = grp->parent;
 479         int qidx;
 480         int pending = 0;
 481         for (qidx = 0; qidx < grp->num_rings; qidx++) {
 482                 pending = oce_rx_pending(dev, grp->ring[qidx].rx,
 483                     DEFAULT_DRAIN_TIME);
 484                 if (pending) {
 485                         break;
 486                 }
 487         }
 488         return (pending);
 489 }
 490 
 491 void
 492 oce_stop_group(oce_group_t *grp, boolean_t free_buffer)
 493 {
 494         struct oce_dev *dev = grp->parent;
 495         struct oce_rq *rq;
 496         int qidx;
 497         int pending = 0;
 498 
 499         if (grp->state & GROUP_MAC_STARTED) {
 500                 oce_delete_group(dev, grp);
 501                 /* wait for receive buffers to be freed by stack */
 502                 while (oce_check_pending(grp) != 0) {
 503                         oce_log(dev, CE_NOTE, MOD_CONFIG, "%s",
 504                             "Wait if buffers are pending with stack\n");
 505                         if (pending++ >= 2) {
 506                                 break;
 507                         }
 508                 }
 509         }
 510 
 511         /* free Rx buffers */
 512         if (free_buffer && (grp->state & GROUP_INIT)) {
 513                 for (qidx = 0; qidx < grp->num_rings; qidx++) {
 514                         rq = grp->ring[qidx].rx;
 515                         mutex_enter(&rq->rq_fini_lock);
 516                         if (rq->pending == 0) {
 517                                 if (rq->qstate == QDELETED) {
 518                                         oce_rq_fini(dev, rq);
 519                                 }
 520                         } else {
 521                                 rq->qstate = QFINI_PENDING;
 522                         }
 523                         mutex_exit(&rq->rq_fini_lock);
 524                 }
 525                 grp->state &= ~GROUP_INIT;
 526         }
 527         oce_log(dev, CE_NOTE, MOD_CONFIG, "group %d stopped", grp->grp_num);
 528 }
 529 
 530 
 531 /* Internally halt the rings on group basis (eg. IRM) */
 532 void
 533 oce_suspend_group_rings(oce_group_t *grp)
 534 {
 535         int qidx;
 536 
 537         if (grp->state & GROUP_MAC_STARTED) {
 538                 grp->state |= GROUP_SUSPEND;
 539                 for (qidx = 0; qidx < grp->num_rings; qidx++) {
 540                         (void) oce_ring_intr_disable((mac_intr_handle_t)
 541                             grp->ring[qidx].rx);
 542                         mac_ring_intr_set(grp->ring[qidx].rx->handle, NULL);
 543                 }
 544         }
 545 }
 546 
 547 
 548 /* Internally resume the rings on group basis (Eg IRM) */
 549 int
 550 oce_resume_group_rings(oce_group_t *grp)
 551 {
 552         struct oce_dev *dev = grp->parent;
 553         int qidx, pmac_idx, ret = DDI_SUCCESS;
 554 
 555         if (grp->state & GROUP_MAC_STARTED) {
 556 
 557                 if (grp->grp_num == 0) {
 558                         if (dev->num_mca > OCE_MAX_MCA) {
 559                                 ret = oce_set_multicast_table(dev, dev->if_id,
 560                                     &dev->multi_cast[0], OCE_MAX_MCA, B_TRUE,
 561                                     MBX_BOOTSTRAP);
 562                         } else {
 563                                 ret = oce_set_multicast_table(dev, dev->if_id,
 564                                     &dev->multi_cast[0], dev->num_mca, B_FALSE,
 565                                     MBX_BOOTSTRAP);
 566                         }
 567                         if (ret != 0) {
 568                                 oce_log(dev, CE_WARN, MOD_CONFIG,
 569                                     "set mcast failed 0x%x", ret);
 570                                 return (ret);
 571                         }
 572                 }
 573 
 574                 /* Add the group based MACs */
 575                 for (pmac_idx = 0; pmac_idx < grp->num_pmac; pmac_idx++) {
 576                         if (grp->pmac_ids[pmac_idx] != INVALID_PMAC_ID) {
 577                                 ret = oce_add_mac(dev, grp->if_id,
 578                                     (uint8_t *)&grp->mac_addr[pmac_idx],
 579                                     &grp->pmac_ids[pmac_idx], MBX_BOOTSTRAP);
 580                                 if (ret != DDI_SUCCESS) {
 581                                         oce_log(dev, CE_WARN, MOD_CONFIG,
 582                                             "MAC addition failed grp = %p, "
 583                                             "idx = %d, ret = %x",
 584                                             (void *)grp, pmac_idx, ret);
 585                                         return (ret);
 586                                 }
 587                         }
 588                 }
 589 
 590                 for (qidx = 0; qidx < grp->num_rings; qidx++) {
 591                         mac_ring_intr_set(grp->ring[qidx].rx->handle,
 592                             dev->htable[grp->ring[qidx].rx->cq->eq->idx]);
 593                         (void) oce_start_rq(grp->ring[qidx].rx);
 594                 }
 595                 grp->state &= ~GROUP_SUSPEND;
 596         }
 597         return (ret);
 598 }
 599 
 600 
 601 int
 602 oce_ring_rx_stat(mac_ring_driver_t ring_handle, uint_t type, uint64_t *stat)
 603 {
 604         struct oce_rq *rx_ring = (struct oce_rq *)ring_handle;
 605         struct oce_dev *dev = rx_ring->parent;
 606 
 607         if (dev->suspended || !(dev->state & STATE_MAC_STARTED)) {
 608                 return (ECANCELED);
 609         }
 610 
 611         switch (type) {
 612         case MAC_STAT_RBYTES:
 613                 *stat = rx_ring->stat_bytes;
 614                 break;
 615 
 616         case MAC_STAT_IPACKETS:
 617                 *stat = rx_ring->stat_pkts;
 618                 break;
 619 
 620         default:
 621                 *stat = 0;
 622                 return (ENOTSUP);
 623         }
 624 
 625         return (DDI_SUCCESS);
 626 }
 627 
 628 int
 629 oce_ring_tx_stat(mac_ring_driver_t ring_handle, uint_t type, uint64_t *stat)
 630 {
 631         struct oce_wq *tx_ring = (struct oce_wq *)ring_handle;
 632         struct oce_dev *dev = tx_ring->parent;
 633 
 634         if (dev->suspended || !(dev->state & STATE_MAC_STARTED)) {
 635                 return (ECANCELED);
 636         }
 637 
 638         switch (type) {
 639         case MAC_STAT_OBYTES:
 640                 *stat = tx_ring->stat_bytes;
 641         break;
 642 
 643         case MAC_STAT_OPACKETS:
 644                 *stat = tx_ring->stat_pkts;
 645                 break;
 646 
 647         default:
 648                 *stat = 0;
 649                 return (ENOTSUP);
 650         }
 651 
 652         return (DDI_SUCCESS);
 653 }