Print this page
NEX-1890 update oce from source provided by Emulex


   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /* Copyright © 2003-2011 Emulex. All rights reserved.  */



  23 


  24 /*
  25  * Source file containing the implementation of the Transmit
  26  * Path
  27  */
  28 
  29 #include <oce_impl.h>
  30 
  31 static void oce_free_wqed(struct oce_wq *wq,  oce_wqe_desc_t *wqed);
  32 static int oce_map_wqe(struct oce_wq *wq, oce_wqe_desc_t *wqed,
  33     mblk_t *mp, uint32_t pkt_len);
  34 static int oce_bcopy_wqe(struct oce_wq *wq, oce_wqe_desc_t *wqed, mblk_t *mp,
  35     uint32_t pkt_len);
  36 static void oce_wqb_dtor(struct oce_wq *wq, oce_wq_bdesc_t *wqbd);
  37 static int oce_wqb_ctor(oce_wq_bdesc_t *wqbd, struct oce_wq *wq,
  38     size_t size, int flags);
  39 static inline oce_wq_bdesc_t *oce_wqb_alloc(struct oce_wq *wq);
  40 static void oce_wqb_free(struct oce_wq *wq, oce_wq_bdesc_t *wqbd);
  41 
  42 static void oce_wqmd_free(struct oce_wq *wq, oce_wq_mdesc_t *wqmd);
  43 static void oce_wqm_free(struct oce_wq *wq, oce_wq_mdesc_t *wqmd);
  44 static oce_wq_mdesc_t *oce_wqm_alloc(struct oce_wq *wq);
  45 static int oce_wqm_ctor(oce_wq_mdesc_t *wqmd, struct oce_wq *wq);
  46 static void oce_wqm_dtor(struct oce_wq *wq, oce_wq_mdesc_t *wqmd);
  47 static void oce_fill_ring_descs(struct oce_wq *wq, oce_wqe_desc_t *wqed);
  48 static void oce_remove_vtag(mblk_t *mp);
  49 static void oce_insert_vtag(mblk_t  *mp, uint16_t vlan_tag);
  50 static inline int oce_process_tx_compl(struct oce_wq *wq, boolean_t rearm);
  51 
  52 
  53 static ddi_dma_attr_t tx_map_dma_attr = {
  54         DMA_ATTR_V0,            /* version number */
  55         0x0000000000000000ull,  /* low address */
  56         0xFFFFFFFFFFFFFFFFull,  /* high address */
  57         0x0000000000010000ull,  /* dma counter max */
  58         OCE_TXMAP_ALIGN,        /* alignment */
  59         0x7FF,                  /* burst sizes */
  60         0x00000001,             /* minimum transfer size */
  61         0x00000000FFFFFFFFull,  /* maximum transfer size */
  62         0xFFFFFFFFFFFFFFFFull,  /* maximum segment size */
  63         OCE_MAX_TXDMA_COOKIES,  /* scatter/gather list length */
  64         0x00000001,             /* granularity */
  65         DDI_DMA_FLAGERR         /* dma_attr_flags */
  66 };
  67 
  68 
  69 ddi_dma_attr_t oce_tx_dma_buf_attr = {
  70         DMA_ATTR_V0,            /* version number */
  71         0x0000000000000000ull,  /* low address */
  72         0xFFFFFFFFFFFFFFFFull,  /* high address */
  73         0x00000000FFFFFFFFull,  /* dma counter max */
  74         OCE_DMA_ALIGNMENT,      /* alignment */
  75         0x000007FF,             /* burst sizes */
  76         0x00000001,             /* minimum transfer size */
  77         0x00000000FFFFFFFFull,  /* maximum transfer size */
  78         0xFFFFFFFFFFFFFFFFull,  /* maximum segment size */
  79         1,                      /* scatter/gather list length */
  80         0x00000001,             /* granularity */
  81         DDI_DMA_FLAGERR         /* dma_attr_flags */
  82 };
  83 
  84 /*
  85  * WQ map handle destructor
  86  *
  87  * wq - Pointer to WQ structure
  88  * wqmd - pointer to WQE mapping handle descriptor
  89  *
  90  * return none
  91  */
  92 
  93 static void
  94 oce_wqm_dtor(struct oce_wq *wq, oce_wq_mdesc_t *wqmd)
  95 {
  96         _NOTE(ARGUNUSED(wq));
  97         /* Free the DMA handle */
  98         if (wqmd->dma_handle != NULL)
  99                 (void) ddi_dma_free_handle(&(wqmd->dma_handle));
 100         wqmd->dma_handle = NULL;
 101 } /* oce_wqm_dtor */
 102 
 103 /*
 104  * WQ map handles contructor
 105  *
 106  * wqmd - pointer to WQE mapping handle descriptor
 107  * wq - Pointer to WQ structure
 108  *
 109  * return DDI_SUCCESS=>success, DDI_FAILURE=>error
 110  */
 111 static int
 112 oce_wqm_ctor(oce_wq_mdesc_t *wqmd, struct oce_wq *wq)
 113 {
 114         struct oce_dev *dev;
 115         int ret;

 116 
 117         dev = wq->parent;
















 118         /* Allocate DMA handle */
 119         ret = ddi_dma_alloc_handle(dev->dip, &tx_map_dma_attr,
 120             DDI_DMA_DONTWAIT, NULL, &wqmd->dma_handle);
 121 
 122         return (ret);
 123 } /* oce_wqm_ctor */
 124 
 125 /*
 126  * function to create WQ mapping handles cache
 127  *
 128  * wq - pointer to WQ structure
 129  *
 130  * return DDI_SUCCESS=>success, DDI_FAILURE=>error
 131  */
 132 int
 133 oce_wqm_cache_create(struct oce_wq *wq)
 134 {
 135         struct oce_dev *dev = wq->parent;
 136         int size;
 137         int cnt;
 138         int ret;
 139 
 140         size = wq->cfg.nhdl * sizeof (oce_wq_mdesc_t);
 141         wq->wq_mdesc_array = kmem_zalloc(size, KM_NOSLEEP);
 142         if (wq->wq_mdesc_array == NULL) {
 143                 return (DDI_FAILURE);
 144         }
 145 
 146         /* Create the free buffer list */
 147         OCE_LIST_CREATE(&wq->wq_mdesc_list, DDI_INTR_PRI(dev->intr_pri));




 148 
 149         for (cnt = 0; cnt < wq->cfg.nhdl; cnt++) {
 150                 ret = oce_wqm_ctor(&wq->wq_mdesc_array[cnt], wq);
 151                 if (ret != DDI_SUCCESS) {
 152                         goto wqm_fail;
 153                 }
 154                 OCE_LIST_INSERT_TAIL(&wq->wq_mdesc_list,
 155                     &wq->wq_mdesc_array[cnt]);
 156         }








 157         return (DDI_SUCCESS);
 158 
 159 wqm_fail:
 160         oce_wqm_cache_destroy(wq);
 161         return (DDI_FAILURE);
 162 }
 163 
 164 /*
 165  * function to destroy WQ mapping handles cache
 166  *
 167  * wq - pointer to WQ structure
 168  *
 169  * return none
 170  */
 171 void
 172 oce_wqm_cache_destroy(struct oce_wq *wq)
 173 {
 174         oce_wq_mdesc_t *wqmd;
 175 
 176         while ((wqmd = OCE_LIST_REM_HEAD(&wq->wq_mdesc_list)) != NULL) {
 177                 oce_wqm_dtor(wq, wqmd);
 178         }
 179 




 180         kmem_free(wq->wq_mdesc_array,
 181             wq->cfg.nhdl * sizeof (oce_wq_mdesc_t));
 182 
 183         OCE_LIST_DESTROY(&wq->wq_mdesc_list);
 184 }
 185 
 186 /*
 187  * function to create  WQ buffer cache
 188  *
 189  * wq - pointer to WQ structure
 190  * buf_size - size of the buffer
 191  *
 192  * return DDI_SUCCESS=>success, DDI_FAILURE=>error
 193  */
 194 int
 195 oce_wqb_cache_create(struct oce_wq *wq, size_t buf_size)
 196 {
 197         struct oce_dev *dev = wq->parent;



 198         int size;
 199         int cnt;





 200         int ret;
 201 
 202         size = wq->cfg.nbufs * sizeof (oce_wq_bdesc_t);
 203         wq->wq_bdesc_array = kmem_zalloc(size, KM_NOSLEEP);
 204         if (wq->wq_bdesc_array == NULL) {
 205                 return (DDI_FAILURE);
 206         }
 207 
 208         /* Create the free buffer list */
 209         OCE_LIST_CREATE(&wq->wq_buf_list, DDI_INTR_PRI(dev->intr_pri));





 210 
 211         for (cnt = 0; cnt <  wq->cfg.nbufs; cnt++) {
 212                 ret = oce_wqb_ctor(&wq->wq_bdesc_array[cnt],
 213                     wq, buf_size, DDI_DMA_STREAMING);



















 214                 if (ret != DDI_SUCCESS) {
 215                         goto wqb_fail;









 216                 }
 217                 OCE_LIST_INSERT_TAIL(&wq->wq_buf_list,
 218                     &wq->wq_bdesc_array[cnt]);
 219         }
 220         return (DDI_SUCCESS);
 221 
 222 wqb_fail:
 223         oce_wqb_cache_destroy(wq);
 224         return (DDI_FAILURE);




































 225 }
 226 
 227 /*
 228  * function to destroy WQ buffer cache
 229  *
 230  * wq - pointer to WQ structure
 231  *
 232  * return none
 233  */
 234 void
 235 oce_wqb_cache_destroy(struct oce_wq *wq)
 236 {
 237         oce_wq_bdesc_t *wqbd;
 238         while ((wqbd = OCE_LIST_REM_HEAD(&wq->wq_buf_list)) != NULL) {
 239                 oce_wqb_dtor(wq, wqbd);
 240         }




 241         kmem_free(wq->wq_bdesc_array,
 242             wq->cfg.nbufs * sizeof (oce_wq_bdesc_t));
 243         OCE_LIST_DESTROY(&wq->wq_buf_list);
 244 }
 245 
 246 /*
 247  * WQ buffer constructor
 248  *
 249  * wqbd - pointer to WQ buffer descriptor
 250  * wq - pointer to WQ structure
 251  * size - size of the buffer
 252  * flags - KM_SLEEP or KM_NOSLEEP
 253  *
 254  * return DDI_SUCCESS=>success, DDI_FAILURE=>error
 255  */
 256 static int
 257 oce_wqb_ctor(oce_wq_bdesc_t *wqbd, struct oce_wq *wq, size_t size, int flags)
 258 {
 259         struct oce_dev *dev;
 260         dev = wq->parent;
 261 
 262         wqbd->wqb = oce_alloc_dma_buffer(dev, size, &oce_tx_dma_buf_attr,
 263             flags);
 264         if (wqbd->wqb == NULL) {
 265                 return (DDI_FAILURE);
 266         }
 267         wqbd->frag_addr.dw.addr_lo = ADDR_LO(wqbd->wqb->addr);
 268         wqbd->frag_addr.dw.addr_hi = ADDR_HI(wqbd->wqb->addr);
 269         return (DDI_SUCCESS);
 270 }
 271 
 272 /*
 273  * WQ buffer destructor
 274  *
 275  * wq - pointer to WQ structure
 276  * wqbd - pointer to WQ buffer descriptor
 277  *
 278  * return none
 279  */
 280 static void
 281 oce_wqb_dtor(struct oce_wq *wq, oce_wq_bdesc_t *wqbd)
 282 {
 283         oce_free_dma_buffer(wq->parent, wqbd->wqb);
 284 }
 285 
 286 /*
 287  * function to alloc   WQE buffer descriptor
 288  *
 289  * wq - pointer to WQ structure
 290  *
 291  * return pointer to WQE buffer descriptor
 292  */
 293 static inline oce_wq_bdesc_t *
 294 oce_wqb_alloc(struct oce_wq *wq)
 295 {
 296         return (OCE_LIST_REM_HEAD(&wq->wq_buf_list));











 297 }
 298 
 299 /*
 300  * function to free   WQE buffer descriptor
 301  *
 302  * wq - pointer to WQ structure
 303  * wqbd - pointer to WQ buffer descriptor
 304  *
 305  * return none
 306  */
 307 static inline void
 308 oce_wqb_free(struct oce_wq *wq, oce_wq_bdesc_t *wqbd)
 309 {
 310         OCE_LIST_INSERT_TAIL(&wq->wq_buf_list, wqbd);




 311 } /* oce_wqb_free */
 312 
 313 /*
 314  * function to allocate   WQE mapping descriptor
 315  *
 316  * wq - pointer to WQ structure
 317  *
 318  * return pointer to WQE mapping descriptor
 319  */
 320 static inline oce_wq_mdesc_t *
 321 oce_wqm_alloc(struct oce_wq *wq)
 322 {
 323         return (OCE_LIST_REM_HEAD(&wq->wq_mdesc_list));












 324 } /* oce_wqm_alloc */
 325 
 326 /*
 327  * function to insert   WQE mapping descriptor to the list
 328  *
 329  * wq - pointer to WQ structure
 330  * wqmd - Pointer to WQ mapping descriptor
 331  *
 332  * return none
 333  */
 334 static inline void
 335 oce_wqm_free(struct oce_wq *wq, oce_wq_mdesc_t *wqmd)
 336 {
 337         OCE_LIST_INSERT_TAIL(&wq->wq_mdesc_list, wqmd);




 338 }
 339 
 340 /*
 341  * function to free  WQE mapping descriptor
 342  *
 343  * wq - pointer to WQ structure
 344  * wqmd - Pointer to WQ mapping descriptor
 345  *
 346  * return none
 347  */
 348 static void
 349 oce_wqmd_free(struct oce_wq *wq, oce_wq_mdesc_t *wqmd)
 350 {
 351         if (wqmd == NULL) {
 352                 return;
 353         }
 354         (void) ddi_dma_unbind_handle(wqmd->dma_handle);
 355         oce_wqm_free(wq, wqmd);
 356 }
 357 


 370         _NOTE(ARGUNUSED(kmflags));
 371 
 372         return (DDI_SUCCESS);
 373 }
 374 
 375 /*
 376  * WQED kmem_cache destructor
 377  *
 378  * buf - pointer to WQE descriptor
 379  *
 380  * return none
 381  */
 382 void
 383 oce_wqe_desc_dtor(void *buf, void *arg)
 384 {
 385         _NOTE(ARGUNUSED(buf));
 386         _NOTE(ARGUNUSED(arg));
 387 }
 388 
 389 /*
 390  * function to choose a WQ given a mblk depending on priority, flowID etc.
 391  *
 392  * dev - software handle to device
 393  * mp - the mblk to send
 394  *
 395  * return pointer to the WQ selected
 396  */
 397 static uint8_t oce_tx_hash_policy = 0x4;
 398 struct oce_wq *
 399 oce_get_wq(struct oce_dev *dev, mblk_t *mp)
 400 {
 401         struct oce_wq *wq;
 402         int qidx = 0;
 403         if (dev->nwqs > 1) {
 404                 qidx = mac_pkt_hash(DL_ETHER, mp, oce_tx_hash_policy, B_TRUE);
 405                 qidx = qidx % dev->nwqs;
 406 
 407         } else {
 408                 qidx = 0;
 409         }
 410         wq = dev->wq[qidx];
 411         /* for the time being hardcode */
 412         return (wq);
 413 } /* oce_get_wq */
 414 
 415 /*
 416  * function to populate the single WQE
 417  *
 418  * wq - pointer to wq
 419  * wqed - pointer to WQ entry  descriptor
 420  *
 421  * return none
 422  */
 423 #pragma inline(oce_fill_ring_descs)
 424 static void
 425 oce_fill_ring_descs(struct oce_wq *wq, oce_wqe_desc_t *wqed)
 426 {
 427 
 428         struct oce_nic_frag_wqe *wqe;
 429         int i;
 430         /* Copy the precreate WQE descs to the ring desc */
 431         for (i = 0; i < wqed->wqe_cnt; i++) {
 432                 wqe = RING_GET_PRODUCER_ITEM_VA(wq->ring,
 433                     struct oce_nic_frag_wqe);
 434 
 435                 bcopy(&wqed->frag[i], wqe, NIC_WQE_SIZE);


 459         wqbd = oce_wqb_alloc(wq);
 460         if (wqbd == NULL) {
 461                 atomic_inc_32(&dev->tx_noxmtbuf);
 462                 oce_log(dev, CE_WARN, MOD_TX, "%s",
 463                     "wqb pool empty");
 464                 return (ENOMEM);
 465         }
 466 
 467         /* create a fragment wqe for the packet */
 468         wqed->frag[wqed->frag_idx].u0.s.frag_pa_hi = wqbd->frag_addr.dw.addr_hi;
 469         wqed->frag[wqed->frag_idx].u0.s.frag_pa_lo = wqbd->frag_addr.dw.addr_lo;
 470         buf_va = DBUF_VA(wqbd->wqb);
 471 
 472         /* copy pkt into buffer */
 473         for (len = 0; mp != NULL && len < pkt_len; mp = mp->b_cont) {
 474                 bcopy(mp->b_rptr, buf_va, MBLKL(mp));
 475                 buf_va += MBLKL(mp);
 476                 len += MBLKL(mp);
 477         }
 478 
 479         (void) ddi_dma_sync(DBUF_DHDL(wqbd->wqb), 0, pkt_len,
 480             DDI_DMA_SYNC_FORDEV);
 481 
 482         if (oce_fm_check_dma_handle(dev, DBUF_DHDL(wqbd->wqb))) {
 483                 ddi_fm_service_impact(dev->dip, DDI_SERVICE_DEGRADED);
 484                 /* Free the buffer */
 485                 oce_wqb_free(wq, wqbd);
 486                 return (EIO);
 487         }
 488         wqed->frag[wqed->frag_idx].u0.s.frag_len   =  pkt_len;

 489         wqed->hdesc[wqed->nhdl].hdl = (void *)(wqbd);
 490         wqed->hdesc[wqed->nhdl].type = COPY_WQE;
 491         wqed->frag_cnt++;
 492         wqed->frag_idx++;
 493         wqed->nhdl++;
 494         return (0);
 495 } /* oce_bcopy_wqe */
 496 
 497 /*
 498  * function to copy the packet or dma map on the fly depending on size
 499  *
 500  * wq - pointer to WQ
 501  * wqed - Pointer to WQE descriptor
 502  * mp - Pointer to packet chain
 503  *
 504  * return DDI_SUCCESS=>success, DDI_FAILURE=>error
 505  */
 506 static  int
 507 oce_map_wqe(struct oce_wq *wq, oce_wqe_desc_t *wqed, mblk_t *mp,
 508     uint32_t pkt_len)


 521         }
 522 
 523         ret = ddi_dma_addr_bind_handle(wqmd->dma_handle,
 524             (struct as *)0, (caddr_t)mp->b_rptr,
 525             pkt_len, DDI_DMA_WRITE | DDI_DMA_STREAMING,
 526             DDI_DMA_DONTWAIT, NULL, &cookie, &ncookies);
 527         if (ret != DDI_DMA_MAPPED) {
 528                 oce_log(dev, CE_WARN, MOD_TX, "MAP FAILED %d",
 529                     ret);
 530                 /* free the last one */
 531                 oce_wqm_free(wq, wqmd);
 532                 return (ENOMEM);
 533         }
 534         do {
 535                 wqed->frag[wqed->frag_idx].u0.s.frag_pa_hi =
 536                     ADDR_HI(cookie.dmac_laddress);
 537                 wqed->frag[wqed->frag_idx].u0.s.frag_pa_lo =
 538                     ADDR_LO(cookie.dmac_laddress);
 539                 wqed->frag[wqed->frag_idx].u0.s.frag_len =
 540                     (uint32_t)cookie.dmac_size;

 541                 wqed->frag_cnt++;
 542                 wqed->frag_idx++;
 543                 if (--ncookies > 0)
 544                         ddi_dma_nextcookie(wqmd->dma_handle,
 545                             &cookie);
 546                         else break;
 547         } while (ncookies > 0);
 548 
 549         wqed->hdesc[wqed->nhdl].hdl = (void *)wqmd;
 550         wqed->hdesc[wqed->nhdl].type = MAPPED_WQE;
 551         wqed->nhdl++;
 552         return (0);
 553 } /* oce_map_wqe */
 554 
 555 static inline int
 556 oce_process_tx_compl(struct oce_wq *wq, boolean_t rearm)
 557 {
 558         struct oce_nic_tx_cqe *cqe;
 559         uint16_t num_cqe = 0;
 560         struct oce_cq *cq;
 561         oce_wqe_desc_t *wqed;
 562         int wqe_freed = 0;
 563         struct oce_dev *dev;

 564 
 565         cq  = wq->cq;
 566         dev = wq->parent;
 567         (void) ddi_dma_sync(cq->ring->dbuf->dma_handle, 0, 0,
 568             DDI_DMA_SYNC_FORKERNEL);
 569 





 570         mutex_enter(&wq->txc_lock);
 571         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
 572         while (WQ_CQE_VALID(cqe)) {
 573 
 574                 DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_tx_cqe));
 575 
 576                 /* update stats */
 577                 if (cqe->u0.s.status != 0) {
 578                         atomic_inc_32(&dev->tx_errors);
 579                 }
 580 
 581                 /* complete the WQEs */
 582                 wqed = OCE_LIST_REM_HEAD(&wq->wqe_desc_list);







 583 
 584                 wqe_freed = wqed->wqe_cnt;
 585                 oce_free_wqed(wq, wqed);
 586                 RING_GET(wq->ring, wqe_freed);
 587                 atomic_add_32(&wq->wq_free, wqe_freed);
 588                 /* clear the valid bit and progress cqe */
 589                 WQ_CQE_INVALIDATE(cqe);
 590                 RING_GET(cq->ring, 1);

 591                 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
 592                     struct oce_nic_tx_cqe);
 593                 num_cqe++;
 594         } /* for all valid CQE */


 595         mutex_exit(&wq->txc_lock);
 596         if (num_cqe)



 597                 oce_arm_cq(wq->parent, cq->cq_id, num_cqe, rearm);













 598         return (num_cqe);
 599 } /* oce_process_tx_completion */
 600 
 601 /*
 602  * function to drain a TxCQ and process its CQEs
 603  *
 604  * dev - software handle to the device
 605  * cq - pointer to the cq to drain
 606  *
 607  * return the number of CQEs processed
 608  */
 609 uint16_t
 610 oce_drain_wq_cq(void *arg)
 611 {
 612         uint16_t num_cqe = 0;
 613         struct oce_dev *dev;
 614         struct oce_wq *wq;
 615 



 616         wq = (struct oce_wq *)arg;
 617         dev = wq->parent;
 618 
 619         /* do while we do not reach a cqe that is not valid */
 620         num_cqe = oce_process_tx_compl(wq, B_FALSE);
 621 
 622         /* check if we need to restart Tx */
 623         if (wq->resched && num_cqe) {
 624                 wq->resched = B_FALSE;
 625                 mac_tx_update(dev->mac_handle);
 626         }

 627 
 628         return (num_cqe);
 629 } /* oce_process_wq_cqe */
 630 































 631 /*
 632  * function to insert vtag to packet


 633  *
 634  * mp - mblk pointer
 635  * vlan_tag - tag to be inserted

 636  *
 637  * return none
 638  */
 639 static inline void
 640 oce_insert_vtag(mblk_t *mp, uint16_t vlan_tag)
 641 {
 642         struct ether_vlan_header  *evh;
 643         (void) memmove(mp->b_rptr - VTAG_SIZE,
 644             mp->b_rptr, 2 * ETHERADDRL);
 645         mp->b_rptr -= VTAG_SIZE;
 646         evh = (struct ether_vlan_header *)(void *)mp->b_rptr;
 647         evh->ether_tpid = htons(VLAN_TPID);
 648         evh->ether_tci = htons(vlan_tag);
 649 }
 650 


















 651 /*
 652  * function to strip  vtag from packet

 653  *
 654  * mp - mblk pointer

 655  *
 656  * return none
 657  */
 658 
 659 static inline void
 660 oce_remove_vtag(mblk_t *mp)
 661 {
 662         (void) memmove(mp->b_rptr + VTAG_SIZE, mp->b_rptr,
 663             ETHERADDRL * 2);
 664         mp->b_rptr += VTAG_SIZE;





















 665 }
 666 
 667 /*
 668  * function to xmit  Single packet over the wire
 669  *
 670  * wq - pointer to WQ
 671  * mp - Pointer to packet chain
 672  *
 673  * return pointer to the packet
 674  */
 675 mblk_t *
 676 oce_send_packet(struct oce_wq *wq, mblk_t *mp)
 677 {
 678         struct oce_nic_hdr_wqe *wqeh;
 679         struct oce_dev *dev;
 680         struct ether_header *eh;
 681         struct ether_vlan_header *evh;
 682         int32_t num_wqes;
 683         uint16_t etype;
 684         uint32_t ip_offset;
 685         uint32_t csum_flags = 0;
 686         boolean_t use_copy = B_FALSE;
 687         boolean_t tagged   = B_FALSE;
 688         uint16_t  vlan_tag;
 689         uint32_t  reg_value = 0;
 690         oce_wqe_desc_t *wqed = NULL;
 691         mblk_t *nmp = NULL;
 692         mblk_t *tmp = NULL;
 693         uint32_t pkt_len = 0;
 694         int num_mblks = 0;
 695         int ret = 0;
 696         uint32_t mss = 0;
 697         uint32_t flags = 0;
 698         int len = 0;
 699 
 700         /* retrieve the adap priv struct ptr */
 701         dev = wq->parent;
 702 
 703         /* check if we have enough free slots */
 704         if (wq->wq_free < dev->tx_reclaim_threshold) {
 705                 (void) oce_process_tx_compl(wq, B_FALSE);
 706         }
 707         if (wq->wq_free < OCE_MAX_TX_HDL) {



 708                 return (mp);
 709         }
 710 
 711         /* check if we should copy */
 712         for (tmp = mp; tmp != NULL; tmp = tmp->b_cont) {
 713                 pkt_len += MBLKL(tmp);
 714                 num_mblks++;
 715         }
 716 
 717         if (pkt_len == 0 || num_mblks == 0) {
 718                 freemsg(mp);
 719                 return (NULL);
 720         }
 721 
 722         /* retrieve LSO information */
 723         mac_lso_get(mp, &mss, &flags);
 724 
 725         /* get the offload flags */
 726         mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &csum_flags);
 727 
 728         /* restrict the mapped segment to wat we support */
 729         if (num_mblks  > OCE_MAX_TX_HDL) {
 730                 nmp = msgpullup(mp, -1);
 731                 if (nmp == NULL) {
 732                         atomic_inc_32(&wq->pkt_drops);
 733                         freemsg(mp);
 734                         return (NULL);
 735                 }
 736                 /* Reset it to new collapsed mp */
 737                 freemsg(mp);
 738                 mp = nmp;




 739         }




 740 
 741         /* Get the packet descriptor for Tx */
 742         wqed = kmem_cache_alloc(wq->wqed_cache, KM_NOSLEEP);
 743         if (wqed == NULL) {
 744                 atomic_inc_32(&wq->pkt_drops);
 745                 freemsg(mp);
 746                 return (NULL);
 747         }








 748         eh = (struct ether_header *)(void *)mp->b_rptr;
 749         if (ntohs(eh->ether_type) == VLAN_TPID) {
 750                 evh = (struct ether_vlan_header *)(void *)mp->b_rptr;
 751                 tagged = B_TRUE;
 752                 etype = ntohs(evh->ether_type);
 753                 ip_offset = sizeof (struct ether_vlan_header);
 754                 pkt_len -= VTAG_SIZE;
 755                 vlan_tag = ntohs(evh->ether_tci);
 756                 oce_remove_vtag(mp);
 757         } else {
 758                 etype = ntohs(eh->ether_type);
 759                 ip_offset = sizeof (struct ether_header);
 760         }
































 761 
 762         /* Save the WQ pointer */
 763         wqed->wq = wq;
 764         wqed->frag_idx = 1; /* index zero is always header */
 765         wqed->frag_cnt = 0;
 766         wqed->nhdl = 0;
 767         wqed->mp = NULL;
 768         OCE_LIST_LINK_INIT(&wqed->link);
 769 
 770         /* If entire packet is less than the copy limit  just do copy */
 771         if (pkt_len < dev->tx_bcopy_limit) {
 772                 use_copy = B_TRUE;
 773                 ret = oce_bcopy_wqe(wq, wqed, mp, pkt_len);
 774         } else {
 775                 /* copy or dma map the individual fragments */
 776                 for (nmp = mp; nmp != NULL; nmp = nmp->b_cont) {
 777                         len = MBLKL(nmp);
 778                         if (len == 0) {
 779                                 continue;
 780                         }
 781                         if (len < dev->tx_bcopy_limit) {
 782                                 ret = oce_bcopy_wqe(wq, wqed, nmp, len);
 783                         } else {
 784                                 ret = oce_map_wqe(wq, wqed, nmp, len);
 785                         }
 786                         if (ret != 0)
 787                                 break;
 788                 }
 789         }


 806         if (flags & HW_LSO) {
 807                 wqeh->u0.s.lso = B_TRUE;
 808                 wqeh->u0.s.lso_mss = mss;
 809         }
 810         if (csum_flags & HCK_FULLCKSUM) {
 811                 uint8_t *proto;
 812                 if (etype == ETHERTYPE_IP) {
 813                         proto = (uint8_t *)(void *)
 814                             (mp->b_rptr + ip_offset);
 815                         if (proto[9] == 6)
 816                                 /* IPPROTO_TCP */
 817                                 wqeh->u0.s.tcpcs = B_TRUE;
 818                         else if (proto[9] == 17)
 819                                 /* IPPROTO_UDP */
 820                                 wqeh->u0.s.udpcs = B_TRUE;
 821                 }
 822         }
 823 
 824         if (csum_flags & HCK_IPV4_HDRCKSUM)
 825                 wqeh->u0.s.ipcs = B_TRUE;
 826         if (tagged) {
 827                 wqeh->u0.s.vlan = B_TRUE;
 828                 wqeh->u0.s.vlan_tag = vlan_tag;
 829         }
 830 
 831         wqeh->u0.s.complete = B_TRUE;
 832         wqeh->u0.s.event = B_TRUE;

 833         wqeh->u0.s.crc = B_TRUE;
 834         wqeh->u0.s.total_length = pkt_len;
 835 
 836         num_wqes = wqed->frag_cnt + 1;
 837 
 838         /* h/w expects even no. of WQEs */
 839         if (num_wqes & 0x1) {
 840                 bzero(&wqed->frag[num_wqes], sizeof (struct oce_nic_frag_wqe));
 841                 num_wqes++;
 842         }
 843         wqed->wqe_cnt = (uint16_t)num_wqes;
 844         wqeh->u0.s.num_wqe = num_wqes;
 845         DW_SWAP(u32ptr(&wqed->frag[0]), (wqed->wqe_cnt * NIC_WQE_SIZE));
 846 
 847         mutex_enter(&wq->tx_lock);
 848         if (num_wqes > wq->wq_free) {
 849                 atomic_inc_32(&wq->tx_deferd);
 850                 mutex_exit(&wq->tx_lock);
 851                 goto wqe_fail;
 852         }
 853         atomic_add_32(&wq->wq_free, -num_wqes);
 854 
 855         /* fill the wq for adapter */
 856         oce_fill_ring_descs(wq, wqed);
 857 
 858         /* Set the mp pointer in the wqe descriptor */
 859         if (use_copy == B_FALSE) {
 860                 wqed->mp = mp;
 861         }
 862         /* Add the packet desc to list to be retrieved during cmpl */
 863         OCE_LIST_INSERT_TAIL(&wq->wqe_desc_list,  wqed);
 864         (void) ddi_dma_sync(wq->ring->dbuf->dma_handle, 0, 0,
 865             DDI_DMA_SYNC_FORDEV);


 866 
 867         /* ring tx doorbell */
 868         reg_value = (num_wqes << 16) | wq->wq_id;
 869         /* Ring the door bell  */
 870         OCE_DB_WRITE32(dev, PD_TXULP_DB, reg_value);





 871         mutex_exit(&wq->tx_lock);
 872         if (oce_fm_check_acc_handle(dev, dev->db_handle) != DDI_FM_OK) {
 873                 ddi_fm_service_impact(dev->dip, DDI_SERVICE_DEGRADED);
 874         }
 875 
 876         /* free mp if copied or packet chain collapsed */
 877         if (use_copy == B_TRUE) {
 878                 freemsg(mp);
 879         }
 880         return (NULL);
 881 
 882 wqe_fail:
 883 
 884         if (tagged) {
 885                 oce_insert_vtag(mp, vlan_tag);
 886         }
 887         oce_free_wqed(wq, wqed);


 888         return (mp);
 889 } /* oce_send_packet */
 890 
 891 /*
 892  * function to free the WQE descriptor
 893  *
 894  * wq - pointer to WQ
 895  * wqed - Pointer to WQE descriptor
 896  *
 897  * return none
 898  */
 899 #pragma inline(oce_free_wqed)
 900 static void
 901 oce_free_wqed(struct oce_wq *wq, oce_wqe_desc_t *wqed)
 902 {
 903         int i = 0;
 904         if (wqed == NULL) {
 905                 return;
 906         }
 907 


 936  * function to stop  the WQ
 937  *
 938  * wq - pointer to WQ
 939  *
 940  * return none
 941  */
 942 void
 943 oce_clean_wq(struct oce_wq *wq)
 944 {
 945         oce_wqe_desc_t *wqed;
 946         int ti;
 947 
 948         /* Wait for already posted Tx to complete */
 949 
 950         for (ti = 0; ti < DEFAULT_DRAIN_TIME; ti++) {
 951                 (void) oce_process_tx_compl(wq, B_FALSE);
 952                 OCE_MSDELAY(1);
 953         }
 954 
 955         /* Free the remaining descriptors */
 956         while ((wqed = OCE_LIST_REM_HEAD(&wq->wqe_desc_list)) != NULL) {

 957                 atomic_add_32(&wq->wq_free, wqed->wqe_cnt);
 958                 oce_free_wqed(wq, wqed);
 959         }

 960         oce_drain_eq(wq->cq->eq);
 961 } /* oce_stop_wq */
 962 
 963 /*
 964  * function to set the tx mapping handle fma attr
 965  *
 966  * fm_caps - capability flags
 967  *
 968  * return none
 969  */
 970 
 971 void
 972 oce_set_tx_map_dma_fma_flags(int fm_caps)
 973 {
 974         if (fm_caps == DDI_FM_NOT_CAPABLE) {
 975                 return;
 976         }
 977 
 978         if (DDI_FM_DMA_ERR_CAP(fm_caps)) {
 979                 tx_map_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
 980         } else {
 981                 tx_map_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
 982         }
 983 } /* oce_set_tx_map_dma_fma_flags */


   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 2009-2012 Emulex. All rights reserved.
  24  * Use is subject to license terms.
  25  */
  26 
  27 
  28 
  29 /*
  30  * Source file containing the implementation of the Transmit
  31  * Path
  32  */
  33 
  34 #include <oce_impl.h>
  35 
  36 static void oce_free_wqed(struct oce_wq *wq,  oce_wqe_desc_t *wqed);
  37 static int oce_map_wqe(struct oce_wq *wq, oce_wqe_desc_t *wqed,
  38     mblk_t *mp, uint32_t pkt_len);
  39 static int oce_bcopy_wqe(struct oce_wq *wq, oce_wqe_desc_t *wqed, mblk_t *mp,
  40     uint32_t pkt_len);



  41 static inline oce_wq_bdesc_t *oce_wqb_alloc(struct oce_wq *wq);
  42 static void oce_wqb_free(struct oce_wq *wq, oce_wq_bdesc_t *wqbd);
  43 
  44 static void oce_wqmd_free(struct oce_wq *wq, oce_wq_mdesc_t *wqmd);
  45 static void oce_wqm_free(struct oce_wq *wq, oce_wq_mdesc_t *wqmd);
  46 static inline oce_wq_mdesc_t *oce_wqm_alloc(struct oce_wq *wq);
  47 static int oce_wqm_ctor(oce_wq_mdesc_t *wqmd, struct oce_wq *wq);
  48 static void oce_wqm_dtor(struct oce_wq *wq, oce_wq_mdesc_t *wqmd);
  49 static void oce_fill_ring_descs(struct oce_wq *wq, oce_wqe_desc_t *wqed);


  50 static inline int oce_process_tx_compl(struct oce_wq *wq, boolean_t rearm);
  51 
  52 































  53 /*
  54  * WQ map handle destructor
  55  *
  56  * wq - Pointer to WQ structure
  57  * wqmd - pointer to WQE mapping handle descriptor
  58  *
  59  * return none
  60  */
  61 
  62 static void
  63 oce_wqm_dtor(struct oce_wq *wq, oce_wq_mdesc_t *wqmd)
  64 {
  65         _NOTE(ARGUNUSED(wq));
  66         /* Free the DMA handle */
  67         if (wqmd->dma_handle != NULL)
  68                 (void) ddi_dma_free_handle(&(wqmd->dma_handle));
  69         wqmd->dma_handle = NULL;
  70 } /* oce_wqm_dtor */
  71 
  72 /*
  73  * WQ map handles contructor
  74  *
  75  * wqmd - pointer to WQE mapping handle descriptor
  76  * wq - Pointer to WQ structure
  77  *
  78  * return DDI_SUCCESS=>success, DDI_FAILURE=>error
  79  */
  80 static int
  81 oce_wqm_ctor(oce_wq_mdesc_t *wqmd, struct oce_wq *wq)
  82 {
  83         struct oce_dev *dev;
  84         int ret;
  85         ddi_dma_attr_t tx_map_attr = {0};
  86 
  87         dev = wq->parent;
  88         /* Populate the DMA attributes structure */
  89         tx_map_attr.dma_attr_version    = DMA_ATTR_V0;
  90         tx_map_attr.dma_attr_addr_lo    = 0x0000000000000000ull;
  91         tx_map_attr.dma_attr_addr_hi    = 0xFFFFFFFFFFFFFFFFull;
  92         tx_map_attr.dma_attr_count_max  = 0x00000000FFFFFFFFull;
  93         tx_map_attr.dma_attr_align              = OCE_TXMAP_ALIGN;
  94         tx_map_attr.dma_attr_burstsizes = 0x000007FF;
  95         tx_map_attr.dma_attr_minxfer    = 0x00000001;
  96         tx_map_attr.dma_attr_maxxfer    = 0x00000000FFFFFFFFull;
  97         tx_map_attr.dma_attr_seg                = 0xFFFFFFFFFFFFFFFFull;
  98         tx_map_attr.dma_attr_sgllen             = OCE_MAX_TXDMA_COOKIES;
  99         tx_map_attr.dma_attr_granular   = 0x00000001;
 100 
 101         if (DDI_FM_DMA_ERR_CAP(dev->fm_caps)) {
 102                 tx_map_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
 103         }
 104         /* Allocate DMA handle */
 105         ret = ddi_dma_alloc_handle(dev->dip, &tx_map_attr,
 106             DDI_DMA_DONTWAIT, NULL, &wqmd->dma_handle);
 107 
 108         return (ret);
 109 } /* oce_wqm_ctor */
 110 
 111 /*
 112  * function to create WQ mapping handles cache
 113  *
 114  * wq - pointer to WQ structure
 115  *
 116  * return DDI_SUCCESS=>success, DDI_FAILURE=>error
 117  */
 118 int
 119 oce_wqm_cache_create(struct oce_wq *wq)
 120 {
 121         struct oce_dev *dev = wq->parent;
 122         int size;
 123         int cnt;
 124         int ret;
 125 
 126         size = wq->cfg.nhdl * sizeof (oce_wq_mdesc_t);
 127         wq->wq_mdesc_array = kmem_zalloc(size, KM_NOSLEEP);
 128         if (wq->wq_mdesc_array == NULL) {
 129                 return (DDI_FAILURE);
 130         }
 131 
 132         wq->wqm_freelist =
 133             kmem_zalloc(wq->cfg.nhdl * sizeof (oce_wq_mdesc_t *), KM_NOSLEEP);
 134         if (wq->wqm_freelist == NULL) {
 135                 kmem_free(wq->wq_mdesc_array, size);
 136                 return (DDI_FAILURE);
 137         }
 138 
 139         for (cnt = 0; cnt < wq->cfg.nhdl; cnt++) {
 140                 ret = oce_wqm_ctor(&wq->wq_mdesc_array[cnt], wq);
 141                 if (ret != DDI_SUCCESS) {
 142                         goto wqm_fail;
 143                 }
 144                 wq->wqm_freelist[cnt] = &wq->wq_mdesc_array[cnt];
 145                 atomic_inc_32(&wq->wqm_free);
 146         }
 147 
 148         wq->wqmd_next_free = 0;
 149         wq->wqmd_rc_head = 0;
 150 
 151         mutex_init(&wq->wqm_alloc_lock, NULL, MUTEX_DRIVER,
 152             DDI_INTR_PRI(dev->intr_pri));
 153         mutex_init(&wq->wqm_free_lock, NULL, MUTEX_DRIVER,
 154             DDI_INTR_PRI(dev->intr_pri));
 155         return (DDI_SUCCESS);
 156 
 157 wqm_fail:
 158         oce_wqm_cache_destroy(wq);
 159         return (DDI_FAILURE);
 160 }
 161 
 162 /*
 163  * function to destroy WQ mapping handles cache
 164  *
 165  * wq - pointer to WQ structure
 166  *
 167  * return none
 168  */
 169 void
 170 oce_wqm_cache_destroy(struct oce_wq *wq)
 171 {
 172         oce_wq_mdesc_t *wqmd;
 173 
 174         while ((wqmd = oce_wqm_alloc(wq)) != NULL) {
 175                 oce_wqm_dtor(wq, wqmd);
 176         }
 177 
 178         mutex_destroy(&wq->wqm_alloc_lock);
 179         mutex_destroy(&wq->wqm_free_lock);
 180         kmem_free(wq->wqm_freelist,
 181             wq->cfg.nhdl * sizeof (oce_wq_mdesc_t *));
 182         kmem_free(wq->wq_mdesc_array,
 183             wq->cfg.nhdl * sizeof (oce_wq_mdesc_t));


 184 }
 185 
 186 /*
 187  * function to create  WQ buffer cache
 188  *
 189  * wq - pointer to WQ structure
 190  * buf_size - size of the buffer
 191  *
 192  * return DDI_SUCCESS=>success, DDI_FAILURE=>error
 193  */
 194 int
 195 oce_wqb_cache_create(struct oce_wq *wq, size_t buf_size)
 196 {
 197         struct oce_dev *dev = wq->parent;
 198         oce_wq_bdesc_t *wqbd;
 199         uint64_t paddr;
 200         caddr_t  vaddr;
 201         int size;
 202         int bufs_per_cookie = 0;
 203         int tidx = 0;
 204         int ncookies = 0;
 205         int i = 0;
 206         ddi_dma_cookie_t cookie;
 207         ddi_dma_attr_t tx_buf_attr = {0};
 208         int ret;
 209 
 210         size = wq->cfg.nbufs * sizeof (oce_wq_bdesc_t);
 211         wq->wq_bdesc_array = kmem_zalloc(size, KM_NOSLEEP);
 212         if (wq->wq_bdesc_array == NULL) {
 213                 return (DDI_FAILURE);
 214         }
 215 
 216         wq->wqb_freelist =
 217             kmem_zalloc(wq->cfg.nbufs * sizeof (oce_wq_bdesc_t *), KM_NOSLEEP);
 218         if (wq->wqb_freelist == NULL) {
 219                 kmem_free(wq->wq_bdesc_array,
 220                     wq->cfg.nbufs * sizeof (oce_wq_bdesc_t));
 221                 return (DDI_FAILURE);
 222         }
 223 
 224         size = wq->cfg.nbufs * wq->cfg.buf_size;
 225 
 226         /* Populate dma attributes */
 227         tx_buf_attr.dma_attr_version    = DMA_ATTR_V0;
 228         tx_buf_attr.dma_attr_addr_lo    = 0x0000000000000000ull;
 229         tx_buf_attr.dma_attr_addr_hi    = 0xFFFFFFFFFFFFFFFFull;
 230         tx_buf_attr.dma_attr_count_max  = 0x00000000FFFFFFFFull;
 231         tx_buf_attr.dma_attr_align      = OCE_DMA_ALIGNMENT;
 232         tx_buf_attr.dma_attr_burstsizes = 0x000007FF;
 233         tx_buf_attr.dma_attr_minxfer    = 0x00000001;
 234         tx_buf_attr.dma_attr_maxxfer    = 0x00000000FFFFFFFFull;
 235         tx_buf_attr.dma_attr_seg        = 0xFFFFFFFFFFFFFFFFull;
 236         tx_buf_attr.dma_attr_granular   = (uint32_t)buf_size;
 237 
 238         if (DDI_FM_DMA_ERR_CAP(dev->fm_caps)) {
 239                 tx_buf_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
 240         }
 241 
 242         tx_buf_attr.dma_attr_sgllen = 1;
 243 
 244         ret = oce_alloc_dma_buffer(dev, &wq->wqb, size, &tx_buf_attr,
 245             DDI_DMA_STREAMING|DDI_DMA_WRITE);
 246         if (ret != DDI_SUCCESS) {
 247                 tx_buf_attr.dma_attr_sgllen =
 248                     size/ddi_ptob(dev->dip, (ulong_t)1) + 2;
 249                 ret = oce_alloc_dma_buffer(dev, &wq->wqb, size, &tx_buf_attr,
 250                     DDI_DMA_STREAMING|DDI_DMA_WRITE);
 251                 if (ret != DDI_SUCCESS) {
 252                         kmem_free(wq->wq_bdesc_array,
 253                             wq->cfg.nbufs * sizeof (oce_wq_bdesc_t));
 254                         kmem_free(wq->wqb_freelist,
 255                             wq->cfg.nbufs * sizeof (oce_wq_bdesc_t *));
 256                         return (DDI_FAILURE);
 257                 }


 258         }

 259 
 260         wqbd = wq->wq_bdesc_array;
 261         vaddr = wq->wqb.base;
 262         cookie = wq->wqb.cookie;
 263         ncookies = wq->wqb.ncookies;
 264         do {
 265                 paddr = cookie.dmac_laddress;
 266                 bufs_per_cookie = cookie.dmac_size/buf_size;
 267                 for (i = 0; i <  bufs_per_cookie; i++, wqbd++) {
 268                         wqbd->wqb.acc_handle = wq->wqb.acc_handle;
 269                         wqbd->wqb.dma_handle = wq->wqb.dma_handle;
 270                         wqbd->wqb.base = vaddr;
 271                         wqbd->wqb.addr = paddr;
 272                         wqbd->wqb.len  = buf_size;
 273                         wqbd->wqb.size = buf_size;
 274                         wqbd->wqb.off  = tidx * buf_size;
 275                         wqbd->frag_addr.dw.addr_lo = ADDR_LO(paddr);
 276                         wqbd->frag_addr.dw.addr_hi = ADDR_HI(paddr);
 277                         wq->wqb_freelist[tidx] = wqbd;
 278                         /* increment the addresses */
 279                         paddr += buf_size;
 280                         vaddr += buf_size;
 281                         atomic_inc_32(&wq->wqb_free);
 282                         tidx++;
 283                         if (tidx >= wq->cfg.nbufs)
 284                                 break;
 285                 }
 286                 if (--ncookies > 0) {
 287                         (void) ddi_dma_nextcookie(wq->wqb.dma_handle, &cookie);
 288                 }
 289         } while (ncookies > 0);
 290 
 291         wq->wqbd_next_free = 0;
 292         wq->wqbd_rc_head = 0;
 293 
 294         mutex_init(&wq->wqb_alloc_lock, NULL, MUTEX_DRIVER,
 295             DDI_INTR_PRI(dev->intr_pri));
 296         mutex_init(&wq->wqb_free_lock, NULL, MUTEX_DRIVER,
 297             DDI_INTR_PRI(dev->intr_pri));
 298         return (DDI_SUCCESS);
 299 }
 300 
 301 /*
 302  * function to destroy WQ buffer cache
 303  *
 304  * wq - pointer to WQ structure
 305  *
 306  * return none
 307  */
 308 void
 309 oce_wqb_cache_destroy(struct oce_wq *wq)
 310 {
 311         /* Free Tx buffer dma memory */
 312         oce_free_dma_buffer(wq->parent, &wq->wqb);
 313 
 314         mutex_destroy(&wq->wqb_alloc_lock);
 315         mutex_destroy(&wq->wqb_free_lock);
 316         kmem_free(wq->wqb_freelist,
 317             wq->cfg.nbufs * sizeof (oce_wq_bdesc_t *));
 318         wq->wqb_freelist = NULL;
 319         kmem_free(wq->wq_bdesc_array,
 320             wq->cfg.nbufs * sizeof (oce_wq_bdesc_t));

 321 }
 322 
 323 /*








































 324  * function to alloc   WQE buffer descriptor
 325  *
 326  * wq - pointer to WQ structure
 327  *
 328  * return pointer to WQE buffer descriptor
 329  */
 330 static inline oce_wq_bdesc_t *
 331 oce_wqb_alloc(struct oce_wq *wq)
 332 {
 333         oce_wq_bdesc_t *wqbd;
 334         if (oce_atomic_reserve(&wq->wqb_free, 1) < 0) {
 335                 return (NULL);
 336         }
 337 
 338         mutex_enter(&wq->wqb_alloc_lock);
 339         wqbd = wq->wqb_freelist[wq->wqbd_next_free];
 340         wq->wqb_freelist[wq->wqbd_next_free] = NULL;
 341         wq->wqbd_next_free = GET_Q_NEXT(wq->wqbd_next_free, 1, wq->cfg.nbufs);
 342         mutex_exit(&wq->wqb_alloc_lock);
 343 
 344         return (wqbd);
 345 }
 346 
 347 /*
 348  * function to free   WQE buffer descriptor
 349  *
 350  * wq - pointer to WQ structure
 351  * wqbd - pointer to WQ buffer descriptor
 352  *
 353  * return none
 354  */
 355 static inline void
 356 oce_wqb_free(struct oce_wq *wq, oce_wq_bdesc_t *wqbd)
 357 {
 358         mutex_enter(&wq->wqb_free_lock);
 359         wq->wqb_freelist[wq->wqbd_rc_head] = wqbd;
 360         wq->wqbd_rc_head = GET_Q_NEXT(wq->wqbd_rc_head, 1, wq->cfg.nbufs);
 361         atomic_inc_32(&wq->wqb_free);
 362         mutex_exit(&wq->wqb_free_lock);
 363 } /* oce_wqb_free */
 364 
 365 /*
 366  * function to allocate   WQE mapping descriptor
 367  *
 368  * wq - pointer to WQ structure
 369  *
 370  * return pointer to WQE mapping descriptor
 371  */
 372 static inline oce_wq_mdesc_t *
 373 oce_wqm_alloc(struct oce_wq *wq)
 374 {
 375         oce_wq_mdesc_t *wqmd;
 376 
 377         if (oce_atomic_reserve(&wq->wqm_free, 1) < 0) {
 378                 return (NULL);
 379         }
 380 
 381         mutex_enter(&wq->wqm_alloc_lock);
 382         wqmd = wq->wqm_freelist[wq->wqmd_next_free];
 383         wq->wqm_freelist[wq->wqmd_next_free] = NULL;
 384         wq->wqmd_next_free = GET_Q_NEXT(wq->wqmd_next_free, 1, wq->cfg.nhdl);
 385         mutex_exit(&wq->wqm_alloc_lock);
 386 
 387         return (wqmd);
 388 } /* oce_wqm_alloc */
 389 
 390 /*
 391  * function to insert   WQE mapping descriptor to the list
 392  *
 393  * wq - pointer to WQ structure
 394  * wqmd - Pointer to WQ mapping descriptor
 395  *
 396  * return none
 397  */
 398 static inline void
 399 oce_wqm_free(struct oce_wq *wq, oce_wq_mdesc_t *wqmd)
 400 {
 401         mutex_enter(&wq->wqm_free_lock);
 402         wq->wqm_freelist[wq->wqmd_rc_head] = wqmd;
 403         wq->wqmd_rc_head = GET_Q_NEXT(wq->wqmd_rc_head, 1, wq->cfg.nhdl);
 404         atomic_inc_32(&wq->wqm_free);
 405         mutex_exit(&wq->wqm_free_lock);
 406 }
 407 
 408 /*
 409  * function to free  WQE mapping descriptor
 410  *
 411  * wq - pointer to WQ structure
 412  * wqmd - Pointer to WQ mapping descriptor
 413  *
 414  * return none
 415  */
 416 static void
 417 oce_wqmd_free(struct oce_wq *wq, oce_wq_mdesc_t *wqmd)
 418 {
 419         if (wqmd == NULL) {
 420                 return;
 421         }
 422         (void) ddi_dma_unbind_handle(wqmd->dma_handle);
 423         oce_wqm_free(wq, wqmd);
 424 }
 425 


 438         _NOTE(ARGUNUSED(kmflags));
 439 
 440         return (DDI_SUCCESS);
 441 }
 442 
 443 /*
 444  * WQED kmem_cache destructor
 445  *
 446  * buf - pointer to WQE descriptor
 447  *
 448  * return none
 449  */
 450 void
 451 oce_wqe_desc_dtor(void *buf, void *arg)
 452 {
 453         _NOTE(ARGUNUSED(buf));
 454         _NOTE(ARGUNUSED(arg));
 455 }
 456 
 457 /*


























 458  * function to populate the single WQE
 459  *
 460  * wq - pointer to wq
 461  * wqed - pointer to WQ entry  descriptor
 462  *
 463  * return none
 464  */
 465 #pragma inline(oce_fill_ring_descs)
 466 static void
 467 oce_fill_ring_descs(struct oce_wq *wq, oce_wqe_desc_t *wqed)
 468 {
 469 
 470         struct oce_nic_frag_wqe *wqe;
 471         int i;
 472         /* Copy the precreate WQE descs to the ring desc */
 473         for (i = 0; i < wqed->wqe_cnt; i++) {
 474                 wqe = RING_GET_PRODUCER_ITEM_VA(wq->ring,
 475                     struct oce_nic_frag_wqe);
 476 
 477                 bcopy(&wqed->frag[i], wqe, NIC_WQE_SIZE);


 501         wqbd = oce_wqb_alloc(wq);
 502         if (wqbd == NULL) {
 503                 atomic_inc_32(&dev->tx_noxmtbuf);
 504                 oce_log(dev, CE_WARN, MOD_TX, "%s",
 505                     "wqb pool empty");
 506                 return (ENOMEM);
 507         }
 508 
 509         /* create a fragment wqe for the packet */
 510         wqed->frag[wqed->frag_idx].u0.s.frag_pa_hi = wqbd->frag_addr.dw.addr_hi;
 511         wqed->frag[wqed->frag_idx].u0.s.frag_pa_lo = wqbd->frag_addr.dw.addr_lo;
 512         buf_va = DBUF_VA(wqbd->wqb);
 513 
 514         /* copy pkt into buffer */
 515         for (len = 0; mp != NULL && len < pkt_len; mp = mp->b_cont) {
 516                 bcopy(mp->b_rptr, buf_va, MBLKL(mp));
 517                 buf_va += MBLKL(mp);
 518                 len += MBLKL(mp);
 519         }
 520 
 521         DBUF_SYNC(wqbd->wqb, wqbd->wqb.off, pkt_len, DDI_DMA_SYNC_FORDEV);

 522 
 523         if (oce_fm_check_dma_handle(dev, DBUF_DHDL(wqbd->wqb))) {
 524                 ddi_fm_service_impact(dev->dip, DDI_SERVICE_DEGRADED);
 525                 /* Free the buffer */
 526                 oce_wqb_free(wq, wqbd);
 527                 return (EIO);
 528         }
 529         wqed->frag[wqed->frag_idx].u0.s.frag_len   =  pkt_len;
 530         wqed->frag[wqed->frag_idx].u0.s.rsvd0 = 0;
 531         wqed->hdesc[wqed->nhdl].hdl = (void *)(wqbd);
 532         wqed->hdesc[wqed->nhdl].type = COPY_WQE;
 533         wqed->frag_cnt++;
 534         wqed->frag_idx++;
 535         wqed->nhdl++;
 536         return (0);
 537 } /* oce_bcopy_wqe */
 538 
 539 /*
 540  * function to copy the packet or dma map on the fly depending on size
 541  *
 542  * wq - pointer to WQ
 543  * wqed - Pointer to WQE descriptor
 544  * mp - Pointer to packet chain
 545  *
 546  * return DDI_SUCCESS=>success, DDI_FAILURE=>error
 547  */
 548 static  int
 549 oce_map_wqe(struct oce_wq *wq, oce_wqe_desc_t *wqed, mblk_t *mp,
 550     uint32_t pkt_len)


 563         }
 564 
 565         ret = ddi_dma_addr_bind_handle(wqmd->dma_handle,
 566             (struct as *)0, (caddr_t)mp->b_rptr,
 567             pkt_len, DDI_DMA_WRITE | DDI_DMA_STREAMING,
 568             DDI_DMA_DONTWAIT, NULL, &cookie, &ncookies);
 569         if (ret != DDI_DMA_MAPPED) {
 570                 oce_log(dev, CE_WARN, MOD_TX, "MAP FAILED %d",
 571                     ret);
 572                 /* free the last one */
 573                 oce_wqm_free(wq, wqmd);
 574                 return (ENOMEM);
 575         }
 576         do {
 577                 wqed->frag[wqed->frag_idx].u0.s.frag_pa_hi =
 578                     ADDR_HI(cookie.dmac_laddress);
 579                 wqed->frag[wqed->frag_idx].u0.s.frag_pa_lo =
 580                     ADDR_LO(cookie.dmac_laddress);
 581                 wqed->frag[wqed->frag_idx].u0.s.frag_len =
 582                     (uint32_t)cookie.dmac_size;
 583                 wqed->frag[wqed->frag_idx].u0.s.rsvd0 = 0;
 584                 wqed->frag_cnt++;
 585                 wqed->frag_idx++;
 586                 if (--ncookies > 0)
 587                         ddi_dma_nextcookie(wqmd->dma_handle, &cookie);
 588                 else
 589                         break;
 590         } while (ncookies > 0);
 591 
 592         wqed->hdesc[wqed->nhdl].hdl = (void *)wqmd;
 593         wqed->hdesc[wqed->nhdl].type = MAPPED_WQE;
 594         wqed->nhdl++;
 595         return (0);
 596 } /* oce_map_wqe */
 597 
 598 static inline int
 599 oce_process_tx_compl(struct oce_wq *wq, boolean_t rearm)
 600 {
 601         struct oce_nic_tx_cqe *cqe;
 602         uint16_t num_cqe = 0;
 603         struct oce_cq *cq;
 604         oce_wqe_desc_t *wqed;
 605         int wqe_freed = 0;
 606         struct oce_dev *dev;
 607         list_t wqe_desc_list;
 608 
 609         cq  = wq->cq;
 610         dev = wq->parent;


 611 
 612         DBUF_SYNC(cq->ring->dbuf, 0, 0, DDI_DMA_SYNC_FORKERNEL);
 613 
 614         list_create(&wqe_desc_list, sizeof (oce_wqe_desc_t),
 615             offsetof(oce_wqe_desc_t, link));
 616 
 617         mutex_enter(&wq->txc_lock);
 618         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
 619         while (WQ_CQE_VALID(cqe)) {
 620 
 621                 DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_tx_cqe));
 622 
 623                 /* update stats */
 624                 if (cqe->u0.s.status != 0) {
 625                         atomic_inc_32(&dev->tx_errors);
 626                 }
 627 
 628                 mutex_enter(&wq->wqed_list_lock);
 629                 wqed = list_remove_head(&wq->wqe_desc_list);
 630                 mutex_exit(&wq->wqed_list_lock);
 631                 if (wqed == NULL) {
 632                         oce_log(dev, CE_NOTE, MOD_CONFIG, "%s",
 633                             "oce_process_tx_compl: wqed list empty");
 634                         break;
 635                 }
 636                 atomic_dec_32(&wq->wqe_pending);
 637 
 638                 wqe_freed += wqed->wqe_cnt;
 639                 list_insert_tail(&wqe_desc_list, wqed);


 640                 /* clear the valid bit and progress cqe */
 641                 WQ_CQE_INVALIDATE(cqe);
 642                 RING_GET(cq->ring, 1);
 643 
 644                 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
 645                     struct oce_nic_tx_cqe);
 646                 num_cqe++;
 647         } /* for all valid CQE */
 648 
 649         if (num_cqe == 0 && wq->wqe_pending > 0) {
 650                 mutex_exit(&wq->txc_lock);
 651                 return (0);
 652         }
 653 
 654         DBUF_SYNC(cq->ring->dbuf, 0, 0, DDI_DMA_SYNC_FORDEV);
 655         oce_arm_cq(wq->parent, cq->cq_id, num_cqe, rearm);
 656 
 657         RING_GET(wq->ring, wqe_freed);
 658         atomic_add_32(&wq->wq_free, wqe_freed);
 659         if (wq->resched && wq->wq_free >= OCE_MAX_TX_HDL) {
 660                 wq->resched = B_FALSE;
 661                 mac_tx_ring_update(dev->mac_handle, wq->handle);
 662         }
 663         wq->last_compl = ddi_get_lbolt();
 664         mutex_exit(&wq->txc_lock);
 665         while (wqed = list_remove_head(&wqe_desc_list)) {
 666                 oce_free_wqed(wq, wqed);
 667         }
 668         list_destroy(&wqe_desc_list);
 669         return (num_cqe);
 670 } /* oce_process_tx_completion */
 671 
 672 /*
 673  * function to drain a TxCQ and process its CQEs
 674  *
 675  * dev - software handle to the device
 676  * cq - pointer to the cq to drain
 677  *
 678  * return the number of CQEs processed
 679  */
 680 void *
 681 oce_drain_wq_cq(void *arg, int arg2, int arg3)
 682 {

 683         struct oce_dev *dev;
 684         struct oce_wq *wq;
 685 
 686         _NOTE(ARGUNUSED(arg2));
 687         _NOTE(ARGUNUSED(arg3));
 688 
 689         wq = (struct oce_wq *)arg;
 690         dev = wq->parent;
 691         wq->last_intr = ddi_get_lbolt();
 692         /* do while we do not reach a cqe that is not valid */
 693         (void) oce_process_tx_compl(wq, B_FALSE);
 694         (void) atomic_cas_uint(&wq->qmode, OCE_MODE_INTR, OCE_MODE_POLL);
 695         if ((wq->wq_free > OCE_MAX_TX_HDL) && wq->resched) {

 696                 wq->resched = B_FALSE;
 697                 mac_tx_ring_update(dev->mac_handle, wq->handle);
 698         }
 699         return (NULL);
 700 

 701 } /* oce_process_wq_cqe */
 702 
 703 
 704 boolean_t
 705 oce_tx_stall_check(struct oce_dev *dev)
 706 {
 707         struct oce_wq *wq;
 708         int ring = 0;
 709         boolean_t is_stalled = B_FALSE;
 710 
 711         if (!(dev->state & STATE_MAC_STARTED) ||
 712             (dev->link_status != LINK_STATE_UP)) {
 713                 return (B_FALSE);
 714         }
 715 
 716         for (ring = 0; ring < dev->tx_rings; ring++) {
 717                 wq = &dev->wq[ring];
 718 
 719                 if (wq->resched) {
 720                         if (wq->wq_free > OCE_MAX_TX_HDL) {
 721                                 mac_tx_ring_update(dev->mac_handle, wq->handle);
 722                         } else {
 723                                 /* enable the interrupts only once */
 724                                 if (atomic_cas_uint(&wq->qmode, OCE_MODE_POLL,
 725                                     OCE_MODE_INTR) == OCE_MODE_POLL) {
 726                                         oce_arm_cq(dev, wq->cq->cq_id, 0,
 727                                             B_TRUE);
 728                                 }
 729                         }
 730                 }
 731         }
 732         return (is_stalled);
 733 }
 734 /*
 735  * Function to check whether TX stall
 736  * can occur for an IPV6 packet for
 737  * some versions of BE cards
 738  *
 739  * dev - software handle to the device
 740  * mp - Pointer to packet chain
 741  * ipoffset - ip header offset in mp chain
 742  *
 743  * return B_TRUE or B_FALSE
 744  */











 745 
 746 static inline int
 747 oce_check_ipv6_tx_stall(struct oce_dev *dev,
 748         mblk_t *mp, uint32_t ip_offset) {
 749 
 750         _NOTE(ARGUNUSED(dev));
 751         ip6_t *ipv6_hdr;
 752         struct ip6_opt *v6_op;
 753         ipv6_hdr =  (ip6_t *)(void *)
 754                 (mp->b_rptr + ip_offset);
 755         v6_op = (struct ip6_opt *)(ipv6_hdr+1);
 756         if (ipv6_hdr->ip6_nxt  != IPPROTO_TCP &&
 757             ipv6_hdr->ip6_nxt  != IPPROTO_UDP &&
 758             v6_op->ip6o_len == 0xFF) {
 759                 return (B_TRUE);
 760         } else {
 761                 return (B_FALSE);
 762         }
 763 }
 764 /*
 765  * Function to insert VLAN Tag to
 766  * mp cahin
 767  *
 768  * dev - software handle to the device
 769  * mblk_haad - Pointer holding packet chain
 770  *
 771  * return DDI_FAILURE or DDI_SUCCESS
 772  */
 773 
 774 static int
 775 oce_ipv6_tx_stall_workaround(struct oce_dev *dev,
 776         mblk_t **mblk_head) {
 777 
 778         mblk_t *mp;
 779         struct ether_vlan_header *evh;
 780         mp = allocb(OCE_HDR_LEN, BPRI_HI);
 781         if (mp == NULL) {
 782                 return (DDI_FAILURE);
 783         }
 784         /* copy ether header */
 785         (void) memcpy(mp->b_rptr, (*mblk_head)->b_rptr, 2 * ETHERADDRL);
 786         evh = (struct ether_vlan_header *)(void *)mp->b_rptr;
 787         evh->ether_tpid = htons(VLAN_TPID);
 788         evh->ether_tci = ((dev->pvid > 0) ? LE_16(dev->pvid) :
 789             htons(dev->QnQ_tag));
 790         mp->b_wptr = mp->b_rptr + (2 * ETHERADDRL) + VTAG_SIZE;
 791         (*mblk_head)->b_rptr += 2 * ETHERADDRL;
 792 
 793         if (MBLKL(*mblk_head) > 0) {
 794                 mp->b_cont = *mblk_head;
 795         } else {
 796                 mp->b_cont = (*mblk_head)->b_cont;
 797                 freeb(*mblk_head);
 798         }
 799         *mblk_head = mp;
 800         return (DDI_SUCCESS);
 801 }
 802 
 803 /*
 804  * function to xmit  Single packet over the wire
 805  *
 806  * wq - pointer to WQ
 807  * mp - Pointer to packet chain
 808  *
 809  * return pointer to the packet
 810  */
 811 mblk_t *
 812 oce_send_packet(struct oce_wq *wq, mblk_t *mp)
 813 {
 814         struct oce_nic_hdr_wqe *wqeh;
 815         struct oce_dev *dev;
 816         struct ether_header *eh;
 817         struct ether_vlan_header *evh;
 818         int32_t num_wqes;
 819         uint16_t etype;
 820         uint32_t ip_offset;
 821         uint32_t csum_flags = 0;
 822         boolean_t use_copy = B_FALSE;
 823         boolean_t tagged   = B_FALSE;
 824         boolean_t ipv6_stall   = B_FALSE;
 825         uint32_t  reg_value = 0;
 826         oce_wqe_desc_t *wqed = NULL;
 827         mblk_t *nmp = NULL;
 828         mblk_t *tmp = NULL;
 829         uint32_t pkt_len = 0;
 830         int num_mblks = 0;
 831         int ret = 0;
 832         uint32_t mss = 0;
 833         uint32_t flags = 0;
 834         int len = 0;
 835 
 836         /* retrieve the adap priv struct ptr */
 837         dev = wq->parent;
 838 
 839         /* check if we have enough free slots */
 840         if (wq->wq_free < dev->tx_reclaim_threshold) {
 841                 (void) oce_process_tx_compl(wq, B_FALSE);
 842         }
 843         if (wq->wq_free < OCE_MAX_TX_HDL) {
 844                 wq->resched = B_TRUE;
 845                 wq->last_defered = ddi_get_lbolt();
 846                 atomic_inc_32(&wq->tx_deferd);
 847                 return (mp);
 848         }
 849 
 850         /* check if we should copy */
 851         for (tmp = mp; tmp != NULL; tmp = tmp->b_cont) {
 852                 pkt_len += MBLKL(tmp);
 853                 num_mblks++;
 854         }
 855 
 856         if (pkt_len == 0 || num_mblks == 0) {
 857                 freemsg(mp);
 858                 return (NULL);
 859         }
 860 
 861         /* retrieve LSO information */
 862         mac_lso_get(mp, &mss, &flags);
 863 
 864         /* get the offload flags */
 865         mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &csum_flags);
 866 
 867         /* restrict the mapped segment to wat we support */
 868         if (num_mblks  > OCE_MAX_TX_HDL) {
 869                 nmp = msgpullup(mp, -1);
 870                 if (nmp == NULL) {
 871                         atomic_inc_32(&wq->pkt_drops);
 872                         freemsg(mp);
 873                         return (NULL);
 874                 }
 875                 /* Reset it to new collapsed mp */
 876                 freemsg(mp);
 877                 mp = nmp;
 878                 /* restore the flags on new mp */
 879                 if (flags & HW_LSO) {
 880                         DB_CKSUMFLAGS(mp) |= HW_LSO;
 881                         DB_LSOMSS(mp) = (uint16_t)mss;
 882                 }
 883                 if (csum_flags != 0) {
 884                         DB_CKSUMFLAGS(mp) |= csum_flags;
 885                 }
 886         }
 887 
 888         /* Get the packet descriptor for Tx */
 889         wqed = kmem_cache_alloc(wq->wqed_cache, KM_NOSLEEP);
 890         if (wqed == NULL) {
 891                 atomic_inc_32(&wq->pkt_drops);
 892                 freemsg(mp);
 893                 return (NULL);
 894         }
 895 
 896         /* Save the WQ pointer */
 897         wqed->wq = wq;
 898         wqed->frag_idx = 1; /* index zero is always header */
 899         wqed->frag_cnt = 0;
 900         wqed->nhdl = 0;
 901         wqed->mp = NULL;
 902 
 903         eh = (struct ether_header *)(void *)mp->b_rptr;
 904         if (ntohs(eh->ether_type) == VLAN_TPID) {
 905                 evh = (struct ether_vlan_header *)(void *)mp->b_rptr;
 906                 tagged = B_TRUE;
 907                 etype = ntohs(evh->ether_type);
 908                 ip_offset = sizeof (struct ether_vlan_header);
 909 


 910         } else {
 911                 etype = ntohs(eh->ether_type);
 912                 ip_offset = sizeof (struct ether_header);
 913         }
 914         /* Check Workaround required for IPV6 TX stall */
 915         if (BE3_A1(dev) && (etype == ETHERTYPE_IPV6) &&
 916             ((dev->QnQ_valid) || (!tagged && dev->pvid != 0))) {
 917                 len = ip_offset + sizeof (ip6_t) + sizeof (struct ip6_opt);
 918                 if (MBLKL(mp) < len) {
 919                         nmp = msgpullup(mp, len);
 920                         if (nmp == NULL) {
 921                                 oce_free_wqed(wq, wqed);
 922                                 atomic_inc_32(&wq->pkt_drops);
 923                                 freemsg(mp);
 924                                 return (NULL);
 925                         }
 926                         freemsg(mp);
 927                         mp = nmp;
 928                 }
 929                 ipv6_stall = oce_check_ipv6_tx_stall(dev, mp, ip_offset);
 930                 if (ipv6_stall) {
 931                         if (dev->QnQ_queried)
 932                                 ret = oce_ipv6_tx_stall_workaround(dev, &mp);
 933                         else {
 934                                 /* FW Workaround not available */
 935                                 ret = DDI_FAILURE;
 936                         }
 937                         if (ret) {
 938                                 oce_free_wqed(wq, wqed);
 939                                 atomic_inc_32(&wq->pkt_drops);
 940                                 freemsg(mp);
 941                                 return (NULL);
 942                         }
 943                         pkt_len += VTAG_SIZE;
 944                 }
 945         }
 946 








 947         /* If entire packet is less than the copy limit  just do copy */
 948         if (pkt_len < dev->tx_bcopy_limit) {
 949                 use_copy = B_TRUE;
 950                 ret = oce_bcopy_wqe(wq, wqed, mp, pkt_len);
 951         } else {
 952                 /* copy or dma map the individual fragments */
 953                 for (nmp = mp; nmp != NULL; nmp = nmp->b_cont) {
 954                         len = MBLKL(nmp);
 955                         if (len == 0) {
 956                                 continue;
 957                         }
 958                         if (len < dev->tx_bcopy_limit) {
 959                                 ret = oce_bcopy_wqe(wq, wqed, nmp, len);
 960                         } else {
 961                                 ret = oce_map_wqe(wq, wqed, nmp, len);
 962                         }
 963                         if (ret != 0)
 964                                 break;
 965                 }
 966         }


 983         if (flags & HW_LSO) {
 984                 wqeh->u0.s.lso = B_TRUE;
 985                 wqeh->u0.s.lso_mss = mss;
 986         }
 987         if (csum_flags & HCK_FULLCKSUM) {
 988                 uint8_t *proto;
 989                 if (etype == ETHERTYPE_IP) {
 990                         proto = (uint8_t *)(void *)
 991                             (mp->b_rptr + ip_offset);
 992                         if (proto[9] == 6)
 993                                 /* IPPROTO_TCP */
 994                                 wqeh->u0.s.tcpcs = B_TRUE;
 995                         else if (proto[9] == 17)
 996                                 /* IPPROTO_UDP */
 997                                 wqeh->u0.s.udpcs = B_TRUE;
 998                 }
 999         }
1000 
1001         if (csum_flags & HCK_IPV4_HDRCKSUM)
1002                 wqeh->u0.s.ipcs = B_TRUE;
1003         if (ipv6_stall) {
1004                 wqeh->u0.s.complete = B_FALSE;
1005                 wqeh->u0.s.event = B_TRUE;
1006         } else {
1007 
1008                 wqeh->u0.s.complete = B_TRUE;
1009                 wqeh->u0.s.event = B_TRUE;
1010         }
1011         wqeh->u0.s.crc = B_TRUE;
1012         wqeh->u0.s.total_length = pkt_len;
1013 
1014         num_wqes = wqed->frag_cnt + 1;
1015 
1016         /* h/w expects even no. of WQEs */
1017         if ((num_wqes & 0x1) && !(LANCER_CHIP(dev))) {
1018                 bzero(&wqed->frag[num_wqes], sizeof (struct oce_nic_frag_wqe));
1019                 num_wqes++;
1020         }
1021         wqed->wqe_cnt = (uint16_t)num_wqes;
1022         wqeh->u0.s.num_wqe = num_wqes;
1023         DW_SWAP(u32ptr(&wqed->frag[0]), (wqed->wqe_cnt * NIC_WQE_SIZE));
1024 
1025         mutex_enter(&wq->tx_lock);
1026         if (num_wqes > wq->wq_free - 2) {
1027                 atomic_inc_32(&wq->tx_deferd);
1028                 mutex_exit(&wq->tx_lock);
1029                 goto wqe_fail;
1030         }
1031         atomic_add_32(&wq->wq_free, -num_wqes);
1032 
1033         /* fill the wq for adapter */
1034         oce_fill_ring_descs(wq, wqed);
1035 
1036         /* Set the mp pointer in the wqe descriptor */
1037         if (use_copy == B_FALSE) {
1038                 wqed->mp = mp;
1039         }
1040         /* Add the packet desc to list to be retrieved during cmpl */
1041         mutex_enter(&wq->wqed_list_lock);
1042         list_insert_tail(&wq->wqe_desc_list,  wqed);
1043         mutex_exit(&wq->wqed_list_lock);
1044         atomic_inc_32(&wq->wqe_pending);
1045         DBUF_SYNC(wq->ring->dbuf, 0, 0, DDI_DMA_SYNC_FORDEV);
1046 
1047         /* ring tx doorbell */
1048         reg_value = (num_wqes << 16) | wq->wq_id;
1049         /* Ring the door bell  */
1050         OCE_DB_WRITE32(dev, PD_TXULP_DB, reg_value);
1051 
1052         /* update the ring stats */
1053         wq->stat_bytes += pkt_len;
1054         wq->stat_pkts++;
1055 
1056         mutex_exit(&wq->tx_lock);
1057         if (oce_fm_check_acc_handle(dev, dev->db_handle) != DDI_FM_OK) {
1058                 ddi_fm_service_impact(dev->dip, DDI_SERVICE_DEGRADED);
1059         }
1060 
1061         /* free mp if copied or packet chain collapsed */
1062         if (use_copy == B_TRUE) {
1063                 freemsg(mp);
1064         }
1065         return (NULL);
1066 
1067 wqe_fail:
1068 



1069         oce_free_wqed(wq, wqed);
1070         wq->resched = B_TRUE;
1071         wq->last_defered = ddi_get_lbolt();
1072         return (mp);
1073 } /* oce_send_packet */
1074 
1075 /*
1076  * function to free the WQE descriptor
1077  *
1078  * wq - pointer to WQ
1079  * wqed - Pointer to WQE descriptor
1080  *
1081  * return none
1082  */
1083 #pragma inline(oce_free_wqed)
1084 static void
1085 oce_free_wqed(struct oce_wq *wq, oce_wqe_desc_t *wqed)
1086 {
1087         int i = 0;
1088         if (wqed == NULL) {
1089                 return;
1090         }
1091 


1120  * function to stop  the WQ
1121  *
1122  * wq - pointer to WQ
1123  *
1124  * return none
1125  */
1126 void
1127 oce_clean_wq(struct oce_wq *wq)
1128 {
1129         oce_wqe_desc_t *wqed;
1130         int ti;
1131 
1132         /* Wait for already posted Tx to complete */
1133 
1134         for (ti = 0; ti < DEFAULT_DRAIN_TIME; ti++) {
1135                 (void) oce_process_tx_compl(wq, B_FALSE);
1136                 OCE_MSDELAY(1);
1137         }
1138 
1139         /* Free the remaining descriptors */
1140         mutex_enter(&wq->wqed_list_lock);
1141         while ((wqed = list_remove_head(&wq->wqe_desc_list)) != NULL) {
1142                 atomic_add_32(&wq->wq_free, wqed->wqe_cnt);
1143                 oce_free_wqed(wq, wqed);
1144         }
1145         mutex_exit(&wq->wqed_list_lock);
1146         oce_drain_eq(wq->cq->eq);
1147 } /* oce_stop_wq */