2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /* Copyright © 2003-2011 Emulex. All rights reserved.  */
  23 
  24 /*
  25  * Source file containing the Receive Path handling
  26  * functions
  27  */
  28 #include <oce_impl.h>
  29 
  30 
  31 void oce_rx_pool_free(char *arg);
  32 static void oce_rqb_dtor(oce_rq_bdesc_t *rqbd);
  33 static int oce_rqb_ctor(oce_rq_bdesc_t *rqbd, struct oce_rq *rq,
  34     size_t size, int flags);
  35 
  36 static inline mblk_t *oce_rx(struct oce_dev *dev, struct oce_rq *rq,
  37     struct oce_nic_rx_cqe *cqe);
  38 static inline mblk_t *oce_rx_bcopy(struct oce_dev *dev,
  39         struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
  40 static int oce_rq_charge(struct oce_rq *rq, uint32_t nbufs, boolean_t repost);
  41 static void oce_rx_insert_tag(mblk_t *mp, uint16_t vtag);
  42 static void oce_set_rx_oflags(mblk_t *mp, struct oce_nic_rx_cqe *cqe);
  43 static inline void oce_rx_drop_pkt(struct oce_rq *rq,
  44     struct oce_nic_rx_cqe *cqe);
  45 static oce_rq_bdesc_t *oce_rqb_alloc(struct oce_rq *rq);
  46 static void oce_rqb_free(struct oce_rq *rq, oce_rq_bdesc_t *rqbd);
  47 static void oce_rq_post_buffer(struct oce_rq *rq, int nbufs);
  48 
  49 #pragma inline(oce_rx)
  50 #pragma inline(oce_rx_bcopy)
  51 #pragma inline(oce_rq_charge)
  52 #pragma inline(oce_rx_insert_tag)
  53 #pragma inline(oce_set_rx_oflags)
  54 #pragma inline(oce_rx_drop_pkt)
  55 #pragma inline(oce_rqb_alloc)
  56 #pragma inline(oce_rqb_free)
  57 #pragma inline(oce_rq_post_buffer)
  58 
  59 static ddi_dma_attr_t oce_rx_buf_attr = {
  60         DMA_ATTR_V0,            /* version number */
  61         0x0000000000000000ull,  /* low address */
  62         0xFFFFFFFFFFFFFFFFull,  /* high address */
  63         0x00000000FFFFFFFFull,  /* dma counter max */
  64         OCE_DMA_ALIGNMENT,      /* alignment */
  65         0x000007FF,             /* burst sizes */
  66         0x00000001,             /* minimum transfer size */
  67         0x00000000FFFFFFFFull,  /* maximum transfer size */
  68         0xFFFFFFFFFFFFFFFFull,  /* maximum segment size */
  69         1,                      /* scatter/gather list length */
  70         0x00000001,             /* granularity */
  71         DDI_DMA_FLAGERR|DDI_DMA_RELAXED_ORDERING                /* DMA flags */
  72 };
  73 
  74 /*
  75  * function to create a DMA buffer pool for RQ
  76  *
  77  * dev - software handle to the device
  78  * num_items - number of buffers in the pool
  79  * item_size - size of each buffer
  80  *
  81  * return DDI_SUCCESS => success, DDI_FAILURE otherwise
  82  */
  83 int
  84 oce_rqb_cache_create(struct oce_rq *rq, size_t buf_size)
  85 {
  86         int size;
  87         int cnt;
  88         int ret;
  89         oce_rq_bdesc_t *rqbd;
  90 
  91         _NOTE(ARGUNUSED(buf_size));
  92         rqbd = rq->rq_bdesc_array;
  93         size = rq->cfg.frag_size + OCE_RQE_BUF_HEADROOM;
  94         for (cnt = 0; cnt < rq->cfg.nbufs; cnt++, rqbd++) {
  95                 rq->rqb_freelist[cnt] = rqbd;
  96                 ret = oce_rqb_ctor(rqbd, rq,
  97                     size, (DDI_DMA_RDWR|DDI_DMA_STREAMING));
  98                 if (ret != DDI_SUCCESS) {
  99                         goto rqb_fail;
 100                 }
 101         }
 102         rq->rqb_free = rq->cfg.nbufs;
 103         rq->rqb_rc_head = 0;
 104         rq->rqb_next_free = 0;
 105         return (DDI_SUCCESS);
 106 
 107 rqb_fail:
 108         oce_rqb_cache_destroy(rq);
 109         return (DDI_FAILURE);
 110 } /* oce_rqb_cache_create */
 111 
 112 /*
 113  * function to Destroy RQ DMA buffer cache
 114  *
 115  * rq - pointer to rq structure
 116  *
 117  * return none
 118  */
 119 void
 120 oce_rqb_cache_destroy(struct oce_rq *rq)
 121 {
 122         oce_rq_bdesc_t *rqbd = NULL;
 123         int cnt;
 124 
 125         rqbd = rq->rq_bdesc_array;
 126         for (cnt = 0; cnt < rq->cfg.nbufs; cnt++, rqbd++) {
 127                 oce_rqb_dtor(rqbd);
 128         }
 129 } /* oce_rqb_cache_destroy */
 130 
 131 /*
 132  * RQ buffer destructor function
 133  *
 134  * rqbd - pointer to rq buffer descriptor
 135  *
 136  * return none
 137  */
 138 static  void
 139 oce_rqb_dtor(oce_rq_bdesc_t *rqbd)
 140 {
 141         if ((rqbd == NULL) || (rqbd->rq == NULL)) {
 142                 return;
 143         }
 144         if (rqbd->mp != NULL) {
 145                 rqbd->fr_rtn.free_arg = NULL;
 146                 freemsg(rqbd->mp);
 147                 rqbd->mp = NULL;
 148         }
 149         oce_free_dma_buffer(rqbd->rq->parent, rqbd->rqb);
 150 } /* oce_rqb_dtor */
 151 
 152 /*
 153  * RQ buffer constructor function
 154  *
 155  * rqbd - pointer to rq buffer descriptor
 156  * rq - pointer to RQ structure
 157  * size - size of the buffer
 158  * flags - KM_SLEEP OR KM_NOSLEEP
 159  *
 160  * return DDI_SUCCESS => success, DDI_FAILURE otherwise
 161  */
 162 static int
 163 oce_rqb_ctor(oce_rq_bdesc_t *rqbd, struct oce_rq *rq, size_t size, int flags)
 164 {
 165         struct oce_dev *dev;
 166         oce_dma_buf_t *dbuf;
 167 
 168         dev = rq->parent;
 169 
 170         dbuf  = oce_alloc_dma_buffer(dev, size, &oce_rx_buf_attr, flags);
 171         if (dbuf == NULL) {
 172                 return (DDI_FAILURE);
 173         }
 174 
 175         /* Set the call back function parameters */
 176         rqbd->fr_rtn.free_func = (void (*)())oce_rx_pool_free;
 177         rqbd->fr_rtn.free_arg = (caddr_t)(void *)rqbd;
 178         rqbd->mp = desballoc((uchar_t *)(dbuf->base),
 179             dbuf->size, 0, &rqbd->fr_rtn);
 180         if (rqbd->mp == NULL) {
 181                 oce_free_dma_buffer(dev, dbuf);
 182                 return (DDI_FAILURE);
 183         }
 184         rqbd->rqb = dbuf;
 185         rqbd->rq = rq;
 186         rqbd->frag_addr.dw.addr_lo = ADDR_LO(dbuf->addr + OCE_RQE_BUF_HEADROOM);
 187         rqbd->frag_addr.dw.addr_hi = ADDR_HI(dbuf->addr + OCE_RQE_BUF_HEADROOM);
 188         rqbd->mp->b_rptr = (uchar_t *)rqbd->rqb->base + OCE_RQE_BUF_HEADROOM;
 189 
 190         return (DDI_SUCCESS);
 191 } /* oce_rqb_ctor */
 192 
 193 /*
 194  * RQ buffer allocator function
 195  *
 196  * rq - pointer to RQ structure
 197  *
 198  * return pointer to RQ buffer descriptor
 199  */
 200 static inline oce_rq_bdesc_t *
 201 oce_rqb_alloc(struct oce_rq *rq)
 202 {
 203         oce_rq_bdesc_t *rqbd;
 204         uint32_t free_index;
 205         free_index = rq->rqb_next_free;
 206         rqbd = rq->rqb_freelist[free_index];
 207         rq->rqb_freelist[free_index] = NULL;
 208         rq->rqb_next_free = GET_Q_NEXT(free_index, 1, rq->cfg.nbufs);
 209         return (rqbd);
 210 } /* oce_rqb_alloc */
 211 
 212 /*
 
 325  *
 326  * dev - software handle to the device
 327  * rq - pointer to the RQ to charge
 328  * cqe - Pointer to Completion Q entry
 329  *
 330  * return mblk pointer =>  success, NULL  => error
 331  */
 332 static inline mblk_t *
 333 oce_rx(struct oce_dev *dev, struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
 334 {
 335         mblk_t *mp;
 336         int pkt_len;
 337         int32_t frag_cnt = 0;
 338         mblk_t **mblk_tail;
 339         mblk_t  *mblk_head;
 340         int frag_size;
 341         oce_rq_bdesc_t *rqbd;
 342         uint16_t cur_index;
 343         oce_ring_buffer_t *ring;
 344         int i;
 345 
 346         frag_cnt  = cqe->u0.s.num_fragments & 0x7;
 347         mblk_head = NULL;
 348         mblk_tail = &mblk_head;
 349 
 350         ring = rq->ring;
 351         cur_index = ring->cidx;
 352 
 353         /* Get the relevant Queue pointers */
 354         pkt_len = cqe->u0.s.pkt_size;
 355         for (i = 0; i < frag_cnt; i++) {
 356                 rqbd = rq->shadow_ring[cur_index];
 357                 if (rqbd->mp == NULL) {
 358                         rqbd->mp = desballoc((uchar_t *)rqbd->rqb->base,
 359                             rqbd->rqb->size, 0, &rqbd->fr_rtn);
 360                         if (rqbd->mp == NULL) {
 361                                 return (NULL);
 362                         }
 363 
 364                         rqbd->mp->b_rptr =
 365                             (uchar_t *)rqbd->rqb->base + OCE_RQE_BUF_HEADROOM;
 366                 }
 367 
 368                 mp = rqbd->mp;
 369                 frag_size  = (pkt_len > rq->cfg.frag_size) ?
 370                     rq->cfg.frag_size : pkt_len;
 371                 mp->b_wptr = mp->b_rptr + frag_size;
 372                 pkt_len   -= frag_size;
 373                 mp->b_next = mp->b_cont = NULL;
 374                 /* Chain the message mblks */
 375                 *mblk_tail = mp;
 376                 mblk_tail = &mp->b_cont;
 377                 (void) DBUF_SYNC(rqbd->rqb, DDI_DMA_SYNC_FORCPU);
 378                 cur_index = GET_Q_NEXT(cur_index, 1, ring->num_items);
 379         }
 380 
 381         if (mblk_head == NULL) {
 382                 oce_log(dev, CE_WARN, MOD_RX, "%s", "oce_rx:no frags?");
 383                 return (NULL);
 384         }
 385 
 386         /* replace the buffer with new ones */
 387         (void) oce_rq_charge(rq, frag_cnt, B_FALSE);
 388         atomic_add_32(&rq->pending, frag_cnt);
 389         return (mblk_head);
 390 } /* oce_rx */
 391 
 392 static inline mblk_t *
 393 oce_rx_bcopy(struct oce_dev *dev, struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
 394 {
 395         mblk_t *mp;
 396         int pkt_len;
 397         int alloc_len;
 398         int32_t frag_cnt = 0;
 399         int frag_size;
 400         oce_rq_bdesc_t *rqbd;
 401         unsigned char  *rptr;
 402         uint32_t cur_index;
 403         oce_ring_buffer_t *ring;
 404         oce_rq_bdesc_t **shadow_rq;
 405         int cnt = 0;
 406 
 407         _NOTE(ARGUNUSED(dev));
 408 
 409         shadow_rq = rq->shadow_ring;
 410         pkt_len = cqe->u0.s.pkt_size;
 411         alloc_len = pkt_len + OCE_RQE_BUF_HEADROOM;
 412         frag_cnt = cqe->u0.s.num_fragments & 0x7;
 413 
 414         mp = allocb(alloc_len, BPRI_HI);
 415         if (mp == NULL) {
 416                 return (NULL);
 417         }
 418 
 419         mp->b_rptr += OCE_RQE_BUF_HEADROOM;
 420         rptr = mp->b_rptr;
 421         mp->b_wptr = mp->b_rptr + pkt_len;
 422         ring = rq->ring;
 423 
 424         cur_index = ring->cidx;
 425         for (cnt = 0; cnt < frag_cnt; cnt++) {
 426                 rqbd = shadow_rq[cur_index];
 427                 frag_size  = (pkt_len > rq->cfg.frag_size) ?
 428                     rq->cfg.frag_size : pkt_len;
 429                 (void) DBUF_SYNC(rqbd->rqb, DDI_DMA_SYNC_FORCPU);
 430                 bcopy(rqbd->rqb->base + OCE_RQE_BUF_HEADROOM, rptr, frag_size);
 431                 rptr += frag_size;
 432                 pkt_len   -= frag_size;
 433                 cur_index = GET_Q_NEXT(cur_index, 1, ring->num_items);
 434         }
 435         (void) oce_rq_charge(rq, frag_cnt, B_TRUE);
 436         return (mp);
 437 }
 438 
 439 static inline void
 440 oce_set_rx_oflags(mblk_t *mp, struct oce_nic_rx_cqe *cqe)
 441 {
 442         int csum_flags = 0;
 443 
 444         /* set flags */
 445         if (cqe->u0.s.ip_cksum_pass) {
 446                 csum_flags |= HCK_IPV4_HDRCKSUM_OK;
 447         }
 448 
 449         if (cqe->u0.s.l4_cksum_pass) {
 450                 csum_flags |= (HCK_FULLCKSUM | HCK_FULLCKSUM_OK);
 451         }
 452 
 453         if (csum_flags) {
 454                 (void) mac_hcksum_set(mp, 0, 0, 0, 0, csum_flags);
 455         }
 456 }
 457 
 458 static inline void
 459 oce_rx_insert_tag(mblk_t *mp, uint16_t vtag)
 460 {
 461         struct ether_vlan_header *ehp;
 462 
 463         (void) memmove(mp->b_rptr - VTAG_SIZE,
 464             mp->b_rptr, 2 * ETHERADDRL);
 465         mp->b_rptr -= VTAG_SIZE;
 466         ehp = (struct ether_vlan_header *)voidptr(mp->b_rptr);
 467         ehp->ether_tpid = htons(ETHERTYPE_VLAN);
 468         ehp->ether_tci = LE_16(vtag);
 469 }
 470 
 471 static inline void
 472 oce_rx_drop_pkt(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
 473 {
 474         int frag_cnt;
 475         oce_rq_bdesc_t *rqbd;
 476         oce_rq_bdesc_t  **shadow_rq;
 477         shadow_rq = rq->shadow_ring;
 478         for (frag_cnt = 0; frag_cnt < cqe->u0.s.num_fragments; frag_cnt++) {
 479                 rqbd = shadow_rq[rq->ring->cidx];
 480                 oce_rqb_free(rq, rqbd);
 481                 RING_GET(rq->ring, 1);
 482         }
 483 }
 484 
 485 
 486 /*
 487  * function to process a Recieve queue
 488  *
 489  * arg - pointer to the RQ to charge
 490  *
 491  * return number of cqes processed
 492  */
 493 uint16_t
 494 oce_drain_rq_cq(void *arg)
 495 {
 496         struct oce_nic_rx_cqe *cqe;
 497         struct oce_rq *rq;
 498         mblk_t *mp = NULL;
 499         mblk_t *mblk_head;
 500         mblk_t **mblk_tail;
 501         uint16_t num_cqe = 0;
 502         struct oce_cq  *cq;
 503         struct oce_dev *dev;
 504         int32_t frag_cnt;
 505         uint32_t nbufs = 0;
 506 
 507         rq = (struct oce_rq *)arg;
 508         dev = rq->parent;
 509         cq = rq->cq;
 510         mblk_head = NULL;
 511         mblk_tail = &mblk_head;
 512 
 513         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
 514 
 515         (void) DBUF_SYNC(cq->ring->dbuf, DDI_DMA_SYNC_FORKERNEL);
 516         /* dequeue till you reach an invalid cqe */
 517         while (RQ_CQE_VALID(cqe)) {
 518                 DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_rx_cqe));
 519                 frag_cnt = cqe->u0.s.num_fragments & 0x7;
 520                 /* if insufficient buffers to charge then do copy */
 521                 if ((cqe->u0.s.pkt_size < dev->rx_bcopy_limit) ||
 522                     (oce_atomic_reserve(&rq->rqb_free, frag_cnt) < 0)) {
 523                         mp = oce_rx_bcopy(dev, rq, cqe);
 524                 } else {
 525                         mp = oce_rx(dev, rq, cqe);
 526                         if (mp == NULL) {
 527                                 atomic_add_32(&rq->rqb_free, frag_cnt);
 528                                 mp = oce_rx_bcopy(dev, rq, cqe);
 529                         }
 530                 }
 531                 if (mp != NULL) {
 532                         if (dev->function_mode & FLEX10_MODE) {
 533                                 if (cqe->u0.s.vlan_tag_present &&
 534                                     cqe->u0.s.qnq) {
 535                                         oce_rx_insert_tag(mp,
 536                                             cqe->u0.s.vlan_tag);
 537                                 }
 538                         } else if (cqe->u0.s.vlan_tag_present) {
 539                                 oce_rx_insert_tag(mp, cqe->u0.s.vlan_tag);
 540                         }
 541                         oce_set_rx_oflags(mp, cqe);
 542 
 543                         *mblk_tail = mp;
 544                         mblk_tail = &mp->b_next;
 545                 } else {
 546                         (void) oce_rq_charge(rq, frag_cnt, B_TRUE);
 547                 }
 548                 RING_GET(rq->ring, frag_cnt);
 549                 rq->buf_avail -= frag_cnt;
 550                 nbufs += frag_cnt;
 551 
 552                 oce_rq_post_buffer(rq, frag_cnt);
 553                 RQ_CQE_INVALIDATE(cqe);
 554                 RING_GET(cq->ring, 1);
 555                 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
 556                     struct oce_nic_rx_cqe);
 557                 num_cqe++;
 558                 /* process max ring size */
 559                 if (num_cqe > dev->rx_pkt_per_intr) {
 560                         break;
 561                 }
 562         } /* for all valid CQEs */
 563 
 564         if (mblk_head) {
 565                 mac_rx(dev->mac_handle, NULL, mblk_head);
 566         }
 567         oce_arm_cq(dev, cq->cq_id, num_cqe, B_TRUE);
 568         return (num_cqe);
 569 } /* oce_drain_rq_cq */
 570 
 571 /*
 572  * function to free mblk databuffer to the RQ pool
 573  *
 574  * arg - pointer to the receive buffer descriptor
 575  *
 576  * return none
 577  */
 578 void
 579 oce_rx_pool_free(char *arg)
 580 {
 581         oce_rq_bdesc_t *rqbd;
 582         struct oce_rq  *rq;
 583 
 584         /* During destroy, arg will be NULL */
 585         if (arg == NULL) {
 586                 return;
 587         }
 588 
 589         /* retrieve the pointers from arg */
 590         rqbd = (oce_rq_bdesc_t *)(void *)arg;
 591         rq = rqbd->rq;
 592         rqbd->mp = desballoc((uchar_t *)rqbd->rqb->base,
 593             rqbd->rqb->size, 0, &rqbd->fr_rtn);
 594 
 595         if (rqbd->mp) {
 596                 rqbd->mp->b_rptr =
 597                     (uchar_t *)rqbd->rqb->base + OCE_RQE_BUF_HEADROOM;
 598         }
 599 
 600         oce_rqb_free(rq, rqbd);
 601         (void) atomic_dec_32(&rq->pending);
 602 } /* rx_pool_free */
 603 
 604 /*
 605  * function to stop the RX
 606  *
 607  * rq - pointer to RQ structure
 608  *
 609  * return none
 610  */
 611 void
 612 oce_clean_rq(struct oce_rq *rq)
 613 {
 614         uint16_t num_cqe = 0;
 615         struct oce_cq  *cq;
 616         struct oce_dev *dev;
 617         struct oce_nic_rx_cqe *cqe;
 618         int32_t ti = 0;
 619 
 620         dev = rq->parent;
 621         cq = rq->cq;
 622         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
 623         /* dequeue till you reach an invalid cqe */
 624         for (ti = 0; ti < DEFAULT_DRAIN_TIME; ti++) {
 625 
 626                 while (RQ_CQE_VALID(cqe)) {
 627                         DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_rx_cqe));
 628                         oce_rx_drop_pkt(rq, cqe);
 629                         atomic_add_32(&rq->buf_avail,
 630                             -(cqe->u0.s.num_fragments & 0x7));
 631                         oce_arm_cq(dev, cq->cq_id, 1, B_TRUE);
 632                         RQ_CQE_INVALIDATE(cqe);
 633                         RING_GET(cq->ring, 1);
 634                         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
 635                             struct oce_nic_rx_cqe);
 636                         num_cqe++;
 637                 }
 638                 OCE_MSDELAY(1);
 639         }
 640 } /* oce_clean_rq */
 641 
 642 /*
 643  * function to start  the RX
 644  *
 645  * rq - pointer to RQ structure
 646  *
 647  * return number of rqe's charges.
 648  */
 649 int
 650 oce_start_rq(struct oce_rq *rq)
 651 {
 652         int ret = 0;
 653         int to_charge = 0;
 654         struct oce_dev *dev = rq->parent;
 655         to_charge = rq->cfg.q_len - rq->buf_avail;
 656         to_charge = min(to_charge, rq->rqb_free);
 657         atomic_add_32(&rq->rqb_free, -to_charge);
 658         (void) oce_rq_charge(rq, to_charge, B_FALSE);
 659         /* ok to do it here since Rx has not even started */
 660         oce_rq_post_buffer(rq, to_charge);
 661         oce_arm_cq(dev, rq->cq->cq_id, 0, B_TRUE);
 662         return (ret);
 663 } /* oce_start_rq */
 664 
 665 /* Checks for pending rx buffers with Stack */
 666 int
 667 oce_rx_pending(struct oce_dev *dev, struct oce_rq *rq, int32_t timeout)
 668 {
 669         int ti;
 670         _NOTE(ARGUNUSED(dev));
 671 
 672         for (ti = 0; ti < timeout; ti++) {
 673                 if (rq->pending > 0) {
 674                         OCE_MSDELAY(10);
 675                         continue;
 676                 } else {
 677                         rq->pending = 0;
 678                         break;
 679                 }
 680         }
 681         return (rq->pending);
 682 }
 | 
 
 
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 2009-2012 Emulex. All rights reserved.
  24  * Use is subject to license terms.
  25  */
  26 
  27 
  28 
  29 /*
  30  * Source file containing the Receive Path handling
  31  * functions
  32  */
  33 #include <oce_impl.h>
  34 
  35 
  36 void oce_rx_pool_free(char *arg);
  37 static void oce_rqb_dtor(oce_rq_bdesc_t *rqbd);
  38 
  39 static inline mblk_t *oce_rx(struct oce_dev *dev, struct oce_rq *rq,
  40     struct oce_nic_rx_cqe *cqe);
  41 static inline mblk_t *oce_rx_bcopy(struct oce_dev *dev,
  42         struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
  43 static int oce_rq_charge(struct oce_rq *rq, uint32_t nbufs, boolean_t repost);
  44 static inline void oce_rx_insert_tag(struct oce_dev *dev, mblk_t *mp,
  45     uint16_t vtag);
  46 static void oce_set_rx_oflags(mblk_t *mp, struct oce_nic_rx_cqe *cqe);
  47 static inline void oce_rx_drop_pkt(struct oce_rq *rq,
  48     struct oce_nic_rx_cqe *cqe);
  49 static oce_rq_bdesc_t *oce_rqb_alloc(struct oce_rq *rq);
  50 static void oce_rqb_free(struct oce_rq *rq, oce_rq_bdesc_t *rqbd);
  51 static void oce_rq_post_buffer(struct oce_rq *rq, int nbufs);
  52 static boolean_t oce_check_tagged(struct oce_dev *dev,
  53     struct oce_nic_rx_cqe *cqe);
  54 
  55 #pragma inline(oce_rx)
  56 #pragma inline(oce_rx_bcopy)
  57 #pragma inline(oce_rq_charge)
  58 #pragma inline(oce_rx_insert_tag)
  59 #pragma inline(oce_set_rx_oflags)
  60 #pragma inline(oce_rx_drop_pkt)
  61 #pragma inline(oce_rqb_alloc)
  62 #pragma inline(oce_rqb_free)
  63 #pragma inline(oce_rq_post_buffer)
  64 
  65 static ddi_dma_attr_t oce_rx_buf_attr = {
  66         DMA_ATTR_V0,            /* version number */
  67         0x0000000000000000ull,  /* low address */
  68         0xFFFFFFFFFFFFFFFFull,  /* high address */
  69         0x00000000FFFFFFFFull,  /* dma counter max */
  70         OCE_DMA_ALIGNMENT,      /* alignment */
  71         0x000007FF,             /* burst sizes */
  72         0x00000001,             /* minimum transfer size */
  73         0x00000000FFFFFFFFull,  /* maximum transfer size */
  74         0xFFFFFFFFFFFFFFFFull,  /* maximum segment size */
  75         1,                      /* scatter/gather list length */
  76         0x00000001,             /* granularity */
  77         DDI_DMA_RELAXED_ORDERING                /* DMA flags */
  78 };
  79 
  80 /*
  81  * function to create a DMA buffer pool for RQ
  82  *
  83  * dev - software handle to the device
  84  * num_items - number of buffers in the pool
  85  * item_size - size of each buffer
  86  *
  87  * return DDI_SUCCESS => success, DDI_FAILURE otherwise
  88  */
  89 int
  90 oce_rqb_cache_create(struct oce_rq *rq, size_t buf_size)
  91 {
  92         oce_rq_bdesc_t *rqbd;
  93         struct oce_dev *dev;
  94         uint32_t size;
  95         uint64_t paddr;
  96         caddr_t vaddr;
  97         int ncookies = 0;
  98         int bufs_per_cookie = 0;
  99         int ridx = 0;
 100         int i = 0;
 101         ddi_dma_cookie_t cookie;
 102         int ret;
 103 
 104         rqbd = rq->rq_bdesc_array;
 105         size = buf_size * rq->cfg.nbufs;
 106         dev = rq->parent;
 107 
 108         oce_rx_buf_attr.dma_attr_granular = (uint32_t)buf_size;
 109         if (DDI_FM_DMA_ERR_CAP(dev->fm_caps)) {
 110                 oce_rx_buf_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
 111         }
 112 
 113         /* Try to get single big chunk With iommu normally cookie count is 1 */
 114         oce_rx_buf_attr.dma_attr_sgllen = 1;
 115         ret = oce_alloc_dma_buffer(dev, &rq->rqb, size, &oce_rx_buf_attr,
 116             (DDI_DMA_RDWR|DDI_DMA_STREAMING));
 117         /* retry with single page allocation */
 118         if (ret != DDI_SUCCESS) {
 119                 oce_rx_buf_attr.dma_attr_sgllen =
 120                     size/ddi_ptob(dev->dip, (ulong_t)1) + 2;
 121                 ret = oce_alloc_dma_buffer(dev, &rq->rqb, size,
 122                     &oce_rx_buf_attr, (DDI_DMA_RDWR | DDI_DMA_STREAMING));
 123                 if (ret != DDI_SUCCESS) {
 124                         return (DDI_FAILURE);
 125                 }
 126         }
 127 
 128         ncookies = rq->rqb.ncookies;
 129         /* Set the starting phys and vaddr */
 130         /* paddr = rq->rqb.addr; */
 131         vaddr = rq->rqb.base;
 132         cookie = rq->rqb.cookie;
 133 
 134         do {
 135                 paddr = cookie.dmac_laddress;
 136                 bufs_per_cookie = cookie.dmac_size/buf_size;
 137                 for (i = 0; i < bufs_per_cookie; i++, rqbd++) {
 138                         rqbd->mp = desballoc((uchar_t *)vaddr, buf_size, 0,
 139                             &rqbd->fr_rtn);
 140                         if (rqbd->mp == NULL) {
 141                                 goto desb_fail;
 142                         }
 143                         /* Set the call back function parameters */
 144                         rqbd->fr_rtn.free_func = (void (*)())oce_rx_pool_free;
 145                         rqbd->fr_rtn.free_arg = (caddr_t)(void *)rqbd;
 146                         /* Populate the DMA object for each buffer */
 147                         rqbd->rqb.acc_handle = rq->rqb.acc_handle;
 148                         rqbd->rqb.dma_handle = rq->rqb.dma_handle;
 149                         rqbd->rqb.base = vaddr;
 150                         rqbd->rqb.addr = paddr;
 151                         rqbd->rqb.len  = buf_size;
 152                         rqbd->rqb.size = buf_size;
 153                         rqbd->rqb.off  = ridx * buf_size;
 154                         rqbd->rq = rq;
 155                         rqbd->frag_addr.dw.addr_lo = ADDR_LO(paddr);
 156                         rqbd->frag_addr.dw.addr_hi = ADDR_HI(paddr);
 157                         rq->rqb_freelist[ridx] = rqbd;
 158                         /* increment the addresses */
 159                         paddr += buf_size;
 160                         vaddr += buf_size;
 161                         ridx++;
 162                         if (ridx >= rq->cfg.nbufs) {
 163                                 break;
 164                         }
 165                 }
 166                 if (--ncookies > 0) {
 167                         (void) ddi_dma_nextcookie(rq->rqb.dma_handle, &cookie);
 168                 }
 169         } while (ncookies > 0);
 170 
 171         rq->rqb_free = rq->cfg.nbufs;
 172         rq->rqb_rc_head = 0;
 173         rq->rqb_next_free = 0;
 174         return (DDI_SUCCESS);
 175 
 176 desb_fail:
 177         oce_rqb_cache_destroy(rq);
 178         return (DDI_FAILURE);
 179 } /* oce_rqb_cache_create */
 180 
 181 /*
 182  * function to Destroy RQ DMA buffer cache
 183  *
 184  * rq - pointer to rq structure
 185  *
 186  * return none
 187  */
 188 void
 189 oce_rqb_cache_destroy(struct oce_rq *rq)
 190 {
 191         oce_rq_bdesc_t *rqbd = NULL;
 192         int cnt;
 193 
 194         rqbd = rq->rq_bdesc_array;
 195         for (cnt = 0; cnt < rq->cfg.nbufs; cnt++, rqbd++) {
 196                 oce_rqb_dtor(rqbd);
 197         }
 198 
 199         oce_free_dma_buffer(rq->parent, &rq->rqb);
 200 } /* oce_rqb_cache_destroy */
 201 
 202 /*
 203  * RQ buffer destructor function
 204  *
 205  * rqbd - pointer to rq buffer descriptor
 206  *
 207  * return none
 208  */
 209 static  void
 210 oce_rqb_dtor(oce_rq_bdesc_t *rqbd)
 211 {
 212         if ((rqbd == NULL) || (rqbd->rq == NULL)) {
 213                 return;
 214         }
 215         if (rqbd->mp != NULL) {
 216                 rqbd->fr_rtn.free_arg = NULL;
 217                 freemsg(rqbd->mp);
 218                 rqbd->mp = NULL;
 219         }
 220 } /* oce_rqb_dtor */
 221 
 222 
 223 /*
 224  * RQ buffer allocator function
 225  *
 226  * rq - pointer to RQ structure
 227  *
 228  * return pointer to RQ buffer descriptor
 229  */
 230 static inline oce_rq_bdesc_t *
 231 oce_rqb_alloc(struct oce_rq *rq)
 232 {
 233         oce_rq_bdesc_t *rqbd;
 234         uint32_t free_index;
 235         free_index = rq->rqb_next_free;
 236         rqbd = rq->rqb_freelist[free_index];
 237         rq->rqb_freelist[free_index] = NULL;
 238         rq->rqb_next_free = GET_Q_NEXT(free_index, 1, rq->cfg.nbufs);
 239         return (rqbd);
 240 } /* oce_rqb_alloc */
 241 
 242 /*
 
 355  *
 356  * dev - software handle to the device
 357  * rq - pointer to the RQ to charge
 358  * cqe - Pointer to Completion Q entry
 359  *
 360  * return mblk pointer =>  success, NULL  => error
 361  */
 362 static inline mblk_t *
 363 oce_rx(struct oce_dev *dev, struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
 364 {
 365         mblk_t *mp;
 366         int pkt_len;
 367         int32_t frag_cnt = 0;
 368         mblk_t **mblk_tail;
 369         mblk_t  *mblk_head;
 370         int frag_size;
 371         oce_rq_bdesc_t *rqbd;
 372         uint16_t cur_index;
 373         oce_ring_buffer_t *ring;
 374         int i;
 375         uint32_t hdr_len;
 376 
 377         frag_cnt  = cqe->u0.s.num_fragments & 0x7;
 378         mblk_head = NULL;
 379         mblk_tail = &mblk_head;
 380 
 381         ring = rq->ring;
 382         cur_index = ring->cidx;
 383 
 384         /* Get the relevant Queue pointers */
 385         pkt_len = cqe->u0.s.pkt_size;
 386 
 387         if (pkt_len == 0) {
 388                 return (NULL);
 389         }
 390 
 391         for (i = 0; i < frag_cnt; i++) {
 392                 rqbd = rq->shadow_ring[cur_index];
 393                 if (rqbd->mp == NULL) {
 394                         rqbd->mp = desballoc((uchar_t *)rqbd->rqb.base,
 395                             rqbd->rqb.size, 0, &rqbd->fr_rtn);
 396                         if (rqbd->mp == NULL) {
 397                                 return (NULL);
 398                         }
 399                 }
 400 
 401                 mp = rqbd->mp;
 402                 frag_size  = (pkt_len > rq->cfg.frag_size) ?
 403                     rq->cfg.frag_size : pkt_len;
 404                 mp->b_wptr = mp->b_rptr + frag_size;
 405                 pkt_len   -= frag_size;
 406                 mp->b_next = mp->b_cont = NULL;
 407                 /* Chain the message mblks */
 408                 *mblk_tail = mp;
 409                 mblk_tail = &mp->b_cont;
 410                 DBUF_SYNC(rqbd->rqb, rqbd->rqb.off, rqbd->rqb.len,
 411                     DDI_DMA_SYNC_FORCPU);
 412                 cur_index = GET_Q_NEXT(cur_index, 1, ring->num_items);
 413         }
 414 
 415         if (mblk_head == NULL) {
 416                 oce_log(dev, CE_WARN, MOD_RX, "%s", "oce_rx:no frags?");
 417                 return (NULL);
 418         }
 419         /* coallesce headers + Vtag  to first mblk */
 420         mp = allocb(OCE_HDR_LEN, BPRI_HI);
 421         if (mp == NULL) {
 422                 return (NULL);
 423         }
 424         /* Align the IP header */
 425         mp->b_rptr += OCE_IP_ALIGN;
 426 
 427         if (oce_check_tagged(dev, cqe)) {
 428                 hdr_len = min(MBLKL(mblk_head), OCE_HDR_LEN) -
 429                     VTAG_SIZE - OCE_IP_ALIGN;
 430                 (void) memcpy(mp->b_rptr, mblk_head->b_rptr, 2 * ETHERADDRL);
 431                 oce_rx_insert_tag(dev, mp, cqe->u0.s.vlan_tag);
 432                 (void) memcpy(mp->b_rptr + 16, mblk_head->b_rptr + 12,
 433                     hdr_len - 12);
 434                 mp->b_wptr = mp->b_rptr + VTAG_SIZE + hdr_len;
 435         } else {
 436 
 437                 hdr_len = min(MBLKL(mblk_head), OCE_HDR_LEN) - OCE_IP_ALIGN;
 438                 (void) memcpy(mp->b_rptr, mblk_head->b_rptr, hdr_len);
 439                 mp->b_wptr = mp->b_rptr + hdr_len;
 440         }
 441         mblk_head->b_rptr += hdr_len;
 442         if (MBLKL(mblk_head) > 0) {
 443                 mp->b_cont = mblk_head;
 444         } else {
 445                 mp->b_cont = mblk_head->b_cont;
 446                 freeb(mblk_head);
 447         }
 448         /* replace the buffer with new ones */
 449         (void) oce_rq_charge(rq, frag_cnt, B_FALSE);
 450         atomic_add_32(&rq->pending, frag_cnt);
 451         return (mp);
 452 } /* oce_rx */
 453 
 454 static inline mblk_t *
 455 oce_rx_bcopy(struct oce_dev *dev, struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
 456 {
 457         mblk_t *mp;
 458         int pkt_len;
 459         int32_t frag_cnt = 0;
 460         int frag_size;
 461         oce_rq_bdesc_t *rqbd;
 462         uint32_t cur_index;
 463         oce_ring_buffer_t *ring;
 464         oce_rq_bdesc_t **shadow_rq;
 465         int cnt = 0;
 466         pkt_len = cqe->u0.s.pkt_size;
 467 
 468         if (pkt_len == 0) {
 469                 return (NULL);
 470         }
 471 
 472         mp = allocb(pkt_len + OCE_RQE_BUF_HEADROOM, BPRI_HI);
 473         if (mp == NULL) {
 474                 return (NULL);
 475         }
 476 
 477         ring = rq->ring;
 478         shadow_rq = rq->shadow_ring;
 479         frag_cnt = cqe->u0.s.num_fragments & 0x7;
 480         cur_index = ring->cidx;
 481         rqbd = shadow_rq[cur_index];
 482         frag_size  = min(pkt_len, rq->cfg.frag_size);
 483         /* Align IP header */
 484         mp->b_rptr += OCE_IP_ALIGN;
 485 
 486         /* Sync the first buffer */
 487         DBUF_SYNC(rqbd->rqb, rqbd->rqb.off, rqbd->rqb.len,
 488             DDI_DMA_SYNC_FORCPU);
 489 
 490 
 491         if (oce_check_tagged(dev, cqe)) {
 492                 (void) memcpy(mp->b_rptr, rqbd->rqb.base, 2  * ETHERADDRL);
 493                 oce_rx_insert_tag(dev, mp, cqe->u0.s.vlan_tag);
 494                 (void) memcpy(mp->b_rptr + 16, rqbd->rqb.base + 12,
 495                     frag_size - 12);
 496                 mp->b_wptr = mp->b_rptr + frag_size + VTAG_SIZE;
 497         } else {
 498                 (void) memcpy(mp->b_rptr, rqbd->rqb.base, frag_size);
 499                 mp->b_wptr = mp->b_rptr + frag_size;
 500         }
 501 
 502         for (cnt = 1; cnt < frag_cnt; cnt++) {
 503                 cur_index = GET_Q_NEXT(cur_index, 1, ring->num_items);
 504                 pkt_len   -= frag_size;
 505                 rqbd = shadow_rq[cur_index];
 506                 frag_size  = min(rq->cfg.frag_size, pkt_len);
 507                 DBUF_SYNC(rqbd->rqb, rqbd->rqb.off, rqbd->rqb.len,
 508                     DDI_DMA_SYNC_FORCPU);
 509 
 510                 (void) memcpy(mp->b_wptr, rqbd->rqb.base, frag_size);
 511                 mp->b_wptr += frag_size;
 512         }
 513         (void) oce_rq_charge(rq, frag_cnt, B_TRUE);
 514         return (mp);
 515 }
 516 
 517 static inline void
 518 oce_set_rx_oflags(mblk_t *mp, struct oce_nic_rx_cqe *cqe)
 519 {
 520         int csum_flags = 0;
 521 
 522         /* set flags */
 523         if (cqe->u0.s.ip_cksum_pass) {
 524                 csum_flags |= HCK_IPV4_HDRCKSUM_OK;
 525         }
 526 
 527         if (cqe->u0.s.l4_cksum_pass) {
 528                 csum_flags |= (HCK_FULLCKSUM | HCK_FULLCKSUM_OK);
 529         }
 530 
 531         if (csum_flags) {
 532                 (void) mac_hcksum_set(mp, 0, 0, 0, 0, csum_flags);
 533         }
 534 }
 535 
 536 static inline void
 537 oce_rx_insert_tag(struct oce_dev *dev, mblk_t *mp, uint16_t vtag)
 538 {
 539         struct ether_vlan_header *ehp;
 540 
 541         ehp = (struct ether_vlan_header *)voidptr(mp->b_rptr);
 542         ehp->ether_tpid = htons(ETHERTYPE_VLAN);
 543         if (LANCER_CHIP(dev))
 544                 ehp->ether_tci = htons(vtag);
 545         else
 546                 ehp->ether_tci = LE_16(vtag);
 547 
 548 }
 549 
 550 static inline void
 551 oce_rx_drop_pkt(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
 552 {
 553         int frag_cnt;
 554         oce_rq_bdesc_t *rqbd;
 555         oce_rq_bdesc_t  **shadow_rq;
 556         shadow_rq = rq->shadow_ring;
 557         for (frag_cnt = 0; frag_cnt < cqe->u0.s.num_fragments; frag_cnt++) {
 558                 rqbd = shadow_rq[rq->ring->cidx];
 559                 oce_rqb_free(rq, rqbd);
 560                 RING_GET(rq->ring, 1);
 561         }
 562 }
 563 
 564 void *
 565 oce_drain_rq_cq(void *arg, int nbytes, int npkts)
 566 {
 567         struct oce_rq *rq;
 568         struct oce_dev *dev;
 569         struct oce_nic_rx_cqe *cqe;
 570         mblk_t *mp = NULL;
 571         struct oce_cq  *cq;
 572         int32_t frag_cnt;
 573         uint16_t num_cqe = 0;
 574         uint16_t cqe_consumed = 0;
 575         uint32_t nbufs = 0;
 576         int pkt_len;
 577         uint32_t poll = (nbytes || 0);
 578         mblk_t *mp_head = NULL;
 579         mblk_t **mp_tail = &mp_head;
 580 
 581         rq = (struct oce_rq *)arg;
 582         cq = rq->cq;
 583         dev = rq->parent;
 584 
 585         if (!poll) {
 586                 npkts = dev->rx_pkt_per_intr;
 587         }
 588 
 589         mutex_enter(&rq->rx_lock);
 590         if ((!poll) && (rq->qmode == OCE_MODE_POLL)) {
 591                 /* reject any interrupt call in poll mode */
 592                 mutex_exit(&rq->rx_lock);
 593                 return (NULL);
 594         }
 595 
 596         if (rq->qstate == QDELETED) {
 597                 mutex_exit(&rq->rx_lock);
 598                 return (NULL);
 599         }
 600 
 601         DBUF_SYNC(cq->ring->dbuf, 0, 0, DDI_DMA_SYNC_FORKERNEL);
 602         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
 603 
 604         /* dequeue till you reach an invalid cqe */
 605         while (RQ_CQE_VALID(cqe)) {
 606                 DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_rx_cqe));
 607 
 608                 pkt_len = cqe->u0.s.pkt_size;
 609 
 610 
 611                 if (poll) {
 612                         if (nbytes < pkt_len) {
 613                                 DW_SWAP(u32ptr(cqe),
 614                                     sizeof (struct oce_nic_rx_cqe));
 615                                 break;
 616                         }
 617                         /* reduce the available budget */
 618                         nbytes -= pkt_len;
 619                 }
 620 
 621                 frag_cnt = cqe->u0.s.num_fragments & 0x7;
 622 
 623                 /* if insufficient buffers to charge then do copy */
 624                 if ((pkt_len < dev->rx_bcopy_limit) ||
 625                     (oce_atomic_reserve(&rq->rqb_free, frag_cnt) < 0)) {
 626                         mp = oce_rx_bcopy(dev, rq, cqe);
 627                 } else {
 628                         mp = oce_rx(dev, rq, cqe);
 629                         if (mp == NULL) {
 630                                 atomic_add_32(&rq->rqb_free, frag_cnt);
 631                                 mp = oce_rx_bcopy(dev, rq, cqe);
 632                         }
 633                 }
 634 
 635                 if (mp != NULL) {
 636                         oce_set_rx_oflags(mp, cqe);
 637 
 638                         *mp_tail = mp;
 639                         mp_tail = &mp->b_next;
 640 
 641                 } else {
 642                         (void) oce_rq_charge(rq, frag_cnt, B_TRUE);
 643                 }
 644                 RING_GET(rq->ring, frag_cnt);
 645                 rq->buf_avail -= frag_cnt;
 646                 nbufs += frag_cnt;
 647 
 648                 /* update the ring stats */
 649                 rq->stat_bytes += pkt_len;
 650                 rq->stat_pkts++;
 651 
 652                 RQ_CQE_INVALIDATE(cqe);
 653                 RING_GET(cq->ring, 1);
 654                 num_cqe++;
 655 
 656                 cqe_consumed++;
 657                 if (nbufs >= OCE_DEFAULT_RECHARGE_THRESHOLD) {
 658                         oce_arm_cq(dev, cq->cq_id, cqe_consumed, B_FALSE);
 659                         oce_rq_post_buffer(rq, nbufs);
 660                         nbufs = 0;
 661                         cqe_consumed = 0;
 662                 }
 663 
 664                 if (!poll && (--npkts <= 0)) {
 665                         break;
 666                 }
 667                 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
 668                     struct oce_nic_rx_cqe);
 669 
 670         } /* for all valid CQEs */
 671 
 672         if (cqe_consumed) {
 673                 oce_arm_cq(dev, cq->cq_id, cqe_consumed, rq->qmode);
 674                 oce_rq_post_buffer(rq, nbufs);
 675         } else {
 676                 oce_arm_cq(dev, cq->cq_id, 0, rq->qmode);
 677         }
 678 
 679         mutex_exit(&rq->rx_lock);
 680 
 681         if (!poll && mp_head) {
 682                 mac_rx_ring(dev->mac_handle, rq->handle, mp_head,
 683                     rq->gen_number);
 684         }
 685 
 686         return (mp_head);
 687 
 688 } /* oce_drain_rq_cq */
 689 
 690 /*
 691  * function to free mblk databuffer to the RQ pool
 692  *
 693  * arg - pointer to the receive buffer descriptor
 694  *
 695  * return none
 696  */
 697 void
 698 oce_rx_pool_free(char *arg)
 699 {
 700         oce_rq_bdesc_t *rqbd;
 701         struct oce_rq  *rq;
 702         struct oce_dev  *dev;
 703 
 704         /* During destroy, arg will be NULL */
 705         if (arg == NULL) {
 706                 return;
 707         }
 708 
 709         /* retrieve the pointers from arg */
 710         rqbd = (oce_rq_bdesc_t *)(void *)arg;
 711         rq = rqbd->rq;
 712         dev = rq->parent;
 713         rqbd->mp = desballoc((uchar_t *)rqbd->rqb.base,
 714             rqbd->rqb.size, 0, &rqbd->fr_rtn);
 715 
 716         oce_rqb_free(rq, rqbd);
 717         (void) atomic_dec_32(&rq->pending);
 718 
 719         if (rq->pending == 0) {
 720                 mutex_enter(&rq->rq_fini_lock);
 721                 if (rq->qstate == QFINI_PENDING) {
 722                         oce_rq_fini(dev, rq);
 723                 }
 724                 mutex_exit(&rq->rq_fini_lock);
 725         }
 726 } /* rx_pool_free */
 727 
 728 /*
 729  * function to stop the RX
 730  *
 731  * rq - pointer to RQ structure
 732  *
 733  * return none
 734  */
 735 void
 736 oce_clean_rq(struct oce_rq *rq)
 737 {
 738         uint16_t num_cqe = 0;
 739         struct oce_cq  *cq;
 740         struct oce_dev *dev;
 741         struct oce_nic_rx_cqe *cqe;
 742         int32_t ti = 0;
 743         int frag_cnt;
 744 
 745         dev = rq->parent;
 746         cq = rq->cq;
 747         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
 748         /* dequeue till you reach an invalid cqe */
 749         for (ti = 0; ti < DEFAULT_DRAIN_TIME; ti++) {
 750 
 751                 while (RQ_CQE_VALID(cqe)) {
 752                         DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_rx_cqe));
 753                         frag_cnt = cqe->u0.s.num_fragments & 0x7;
 754                         if (frag_cnt == 0) {
 755                                 oce_log(dev, CE_NOTE, MOD_RX, "%s",
 756                                     "Got Rx Completion Marble Returning ...\n");
 757                                 RQ_CQE_INVALIDATE(cqe);
 758                                 return;
 759                         }
 760                         oce_rx_drop_pkt(rq, cqe);
 761                         atomic_add_32(&rq->buf_avail,
 762                             -(cqe->u0.s.num_fragments & 0x7));
 763                         oce_arm_cq(dev, cq->cq_id, 1, B_TRUE);
 764                         RQ_CQE_INVALIDATE(cqe);
 765                         RING_GET(cq->ring, 1);
 766                         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
 767                             struct oce_nic_rx_cqe);
 768                         num_cqe++;
 769                 }
 770                 if (num_cqe == 0) {
 771                         /* arm the queue again to get completion marble */
 772                         oce_arm_cq(dev, cq->cq_id, 0, 1);
 773                 } else {
 774                         /* reset counter to reap valid completions again */
 775                         num_cqe = 0;
 776                 }
 777                 OCE_MSDELAY(1);
 778         }
 779 } /* oce_clean_rq */
 780 
 781 /*
 782  * function to start  the RX
 783  *
 784  * rq - pointer to RQ structure
 785  *
 786  * return number of rqe's charges.
 787  */
 788 int
 789 oce_start_rq(struct oce_rq *rq)
 790 {
 791         int ret = 0;
 792         int to_charge = 0;
 793         struct oce_dev *dev = rq->parent;
 794         to_charge = rq->cfg.q_len - rq->buf_avail;
 795         to_charge = min(to_charge, rq->rqb_free);
 796         atomic_add_32(&rq->rqb_free, -to_charge);
 797         (void) oce_rq_charge(rq, to_charge, B_FALSE);
 798         /* ok to do it here since Rx has not even started */
 799         oce_rq_post_buffer(rq, to_charge);
 800         rq->qmode = OCE_MODE_INTR;
 801         oce_arm_cq(dev, rq->cq->cq_id, 0, B_TRUE);
 802         return (ret);
 803 } /* oce_start_rq */
 804 
 805 /* Checks for pending rx buffers with Stack */
 806 int
 807 oce_rx_pending(struct oce_dev *dev, struct oce_rq *rq, int32_t timeout)
 808 {
 809         int ti;
 810         _NOTE(ARGUNUSED(dev));
 811 
 812         for (ti = 0; ti < timeout; ti++) {
 813                 if (rq->pending > 0) {
 814                         OCE_MSDELAY(10);
 815                         continue;
 816                 } else {
 817                         break;
 818                 }
 819         }
 820 
 821         if (rq->pending != 0) {
 822                 oce_log(dev, CE_NOTE, MOD_CONFIG,
 823                     "%d pending RX buffers in rq=0x%p", rq->pending,
 824                     (void *)rq);
 825         }
 826         return (rq->pending);
 827 }
 828 
 829 static boolean_t
 830 oce_check_tagged(struct oce_dev *dev, struct oce_nic_rx_cqe *cqe)
 831 {
 832         boolean_t tagged = B_FALSE;
 833         if (((dev->drvfn_caps & DRVFN_CAPAB_BE3_NATIVE) &&
 834             cqe->u0.s.vlan_tag_present) ||
 835             (!(dev->drvfn_caps & DRVFN_CAPAB_BE3_NATIVE) &&
 836             cqe->u0.v0.vlan_tag_present)) {
 837                 if (dev->function_mode & FLEX10_MODE) {
 838                         if (cqe->u0.s.qnq)
 839                                 tagged = B_TRUE;
 840                 } else if (dev->pvid != 0) {
 841                         if (dev->pvid != cqe->u0.v0.vlan_tag)
 842                                 tagged = B_TRUE;
 843                 } else
 844                         tagged = B_TRUE;
 845         }
 846         return (tagged);
 847 }
 |