1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2009-2012 Emulex. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27
28
29 /*
30 * Source file containing the Receive Path handling
31 * functions
32 */
33 #include <oce_impl.h>
34
35
36 void oce_rx_pool_free(char *arg);
37 static void oce_rqb_dtor(oce_rq_bdesc_t *rqbd);
38
39 static inline mblk_t *oce_rx(struct oce_dev *dev, struct oce_rq *rq,
40 struct oce_nic_rx_cqe *cqe);
41 static inline mblk_t *oce_rx_bcopy(struct oce_dev *dev,
42 struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
43 static int oce_rq_charge(struct oce_rq *rq, uint32_t nbufs, boolean_t repost);
44 static inline void oce_rx_insert_tag(struct oce_dev *dev, mblk_t *mp,
45 uint16_t vtag);
46 static void oce_set_rx_oflags(mblk_t *mp, struct oce_nic_rx_cqe *cqe);
47 static inline void oce_rx_drop_pkt(struct oce_rq *rq,
48 struct oce_nic_rx_cqe *cqe);
49 static oce_rq_bdesc_t *oce_rqb_alloc(struct oce_rq *rq);
50 static void oce_rqb_free(struct oce_rq *rq, oce_rq_bdesc_t *rqbd);
51 static void oce_rq_post_buffer(struct oce_rq *rq, int nbufs);
52 static boolean_t oce_check_tagged(struct oce_dev *dev,
53 struct oce_nic_rx_cqe *cqe);
54
55 #pragma inline(oce_rx)
56 #pragma inline(oce_rx_bcopy)
57 #pragma inline(oce_rq_charge)
58 #pragma inline(oce_rx_insert_tag)
59 #pragma inline(oce_set_rx_oflags)
60 #pragma inline(oce_rx_drop_pkt)
61 #pragma inline(oce_rqb_alloc)
62 #pragma inline(oce_rqb_free)
63 #pragma inline(oce_rq_post_buffer)
64
65 static ddi_dma_attr_t oce_rx_buf_attr = {
66 DMA_ATTR_V0, /* version number */
67 0x0000000000000000ull, /* low address */
68 0xFFFFFFFFFFFFFFFFull, /* high address */
69 0x00000000FFFFFFFFull, /* dma counter max */
70 OCE_DMA_ALIGNMENT, /* alignment */
71 0x000007FF, /* burst sizes */
72 0x00000001, /* minimum transfer size */
73 0x00000000FFFFFFFFull, /* maximum transfer size */
74 0xFFFFFFFFFFFFFFFFull, /* maximum segment size */
75 1, /* scatter/gather list length */
76 0x00000001, /* granularity */
77 DDI_DMA_RELAXED_ORDERING /* DMA flags */
78 };
79
80 /*
81 * function to create a DMA buffer pool for RQ
82 *
83 * dev - software handle to the device
84 * num_items - number of buffers in the pool
85 * item_size - size of each buffer
86 *
87 * return DDI_SUCCESS => success, DDI_FAILURE otherwise
88 */
89 int
90 oce_rqb_cache_create(struct oce_rq *rq, size_t buf_size)
91 {
92 oce_rq_bdesc_t *rqbd;
93 struct oce_dev *dev;
94 uint32_t size;
95 uint64_t paddr;
96 caddr_t vaddr;
97 int ncookies = 0;
98 int bufs_per_cookie = 0;
99 int ridx = 0;
100 int i = 0;
101 ddi_dma_cookie_t cookie;
102 int ret;
103
104 rqbd = rq->rq_bdesc_array;
105 size = buf_size * rq->cfg.nbufs;
106 dev = rq->parent;
107
108 oce_rx_buf_attr.dma_attr_granular = (uint32_t)buf_size;
109 if (DDI_FM_DMA_ERR_CAP(dev->fm_caps)) {
110 oce_rx_buf_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
111 }
112
113 /* Try to get single big chunk With iommu normally cookie count is 1 */
114 oce_rx_buf_attr.dma_attr_sgllen = 1;
115 ret = oce_alloc_dma_buffer(dev, &rq->rqb, size, &oce_rx_buf_attr,
116 (DDI_DMA_RDWR|DDI_DMA_STREAMING));
117 /* retry with single page allocation */
118 if (ret != DDI_SUCCESS) {
119 oce_rx_buf_attr.dma_attr_sgllen =
120 size/ddi_ptob(dev->dip, (ulong_t)1) + 2;
121 ret = oce_alloc_dma_buffer(dev, &rq->rqb, size,
122 &oce_rx_buf_attr, (DDI_DMA_RDWR | DDI_DMA_STREAMING));
123 if (ret != DDI_SUCCESS) {
124 return (DDI_FAILURE);
125 }
126 }
127
128 ncookies = rq->rqb.ncookies;
129 /* Set the starting phys and vaddr */
130 /* paddr = rq->rqb.addr; */
131 vaddr = rq->rqb.base;
132 cookie = rq->rqb.cookie;
133
134 do {
135 paddr = cookie.dmac_laddress;
136 bufs_per_cookie = cookie.dmac_size/buf_size;
137 for (i = 0; i < bufs_per_cookie; i++, rqbd++) {
138 rqbd->mp = desballoc((uchar_t *)vaddr, buf_size, 0,
139 &rqbd->fr_rtn);
140 if (rqbd->mp == NULL) {
141 goto desb_fail;
142 }
143 /* Set the call back function parameters */
144 rqbd->fr_rtn.free_func = (void (*)())oce_rx_pool_free;
145 rqbd->fr_rtn.free_arg = (caddr_t)(void *)rqbd;
146 /* Populate the DMA object for each buffer */
147 rqbd->rqb.acc_handle = rq->rqb.acc_handle;
148 rqbd->rqb.dma_handle = rq->rqb.dma_handle;
149 rqbd->rqb.base = vaddr;
150 rqbd->rqb.addr = paddr;
151 rqbd->rqb.len = buf_size;
152 rqbd->rqb.size = buf_size;
153 rqbd->rqb.off = ridx * buf_size;
154 rqbd->rq = rq;
155 rqbd->frag_addr.dw.addr_lo = ADDR_LO(paddr);
156 rqbd->frag_addr.dw.addr_hi = ADDR_HI(paddr);
157 rq->rqb_freelist[ridx] = rqbd;
158 /* increment the addresses */
159 paddr += buf_size;
160 vaddr += buf_size;
161 ridx++;
162 if (ridx >= rq->cfg.nbufs) {
163 break;
164 }
165 }
166 if (--ncookies > 0) {
167 (void) ddi_dma_nextcookie(rq->rqb.dma_handle, &cookie);
168 }
169 } while (ncookies > 0);
170
171 rq->rqb_free = rq->cfg.nbufs;
172 rq->rqb_rc_head = 0;
173 rq->rqb_next_free = 0;
174 return (DDI_SUCCESS);
175
176 desb_fail:
177 oce_rqb_cache_destroy(rq);
178 return (DDI_FAILURE);
179 } /* oce_rqb_cache_create */
180
181 /*
182 * function to Destroy RQ DMA buffer cache
183 *
184 * rq - pointer to rq structure
185 *
186 * return none
187 */
188 void
189 oce_rqb_cache_destroy(struct oce_rq *rq)
190 {
191 oce_rq_bdesc_t *rqbd = NULL;
192 int cnt;
193
194 rqbd = rq->rq_bdesc_array;
195 for (cnt = 0; cnt < rq->cfg.nbufs; cnt++, rqbd++) {
196 oce_rqb_dtor(rqbd);
197 }
198
199 oce_free_dma_buffer(rq->parent, &rq->rqb);
200 } /* oce_rqb_cache_destroy */
201
202 /*
203 * RQ buffer destructor function
204 *
205 * rqbd - pointer to rq buffer descriptor
206 *
207 * return none
208 */
209 static void
210 oce_rqb_dtor(oce_rq_bdesc_t *rqbd)
211 {
212 if ((rqbd == NULL) || (rqbd->rq == NULL)) {
213 return;
214 }
215 if (rqbd->mp != NULL) {
216 rqbd->fr_rtn.free_arg = NULL;
217 freemsg(rqbd->mp);
218 rqbd->mp = NULL;
219 }
220 } /* oce_rqb_dtor */
221
222
223 /*
224 * RQ buffer allocator function
225 *
226 * rq - pointer to RQ structure
227 *
228 * return pointer to RQ buffer descriptor
229 */
230 static inline oce_rq_bdesc_t *
231 oce_rqb_alloc(struct oce_rq *rq)
232 {
233 oce_rq_bdesc_t *rqbd;
234 uint32_t free_index;
235 free_index = rq->rqb_next_free;
236 rqbd = rq->rqb_freelist[free_index];
237 rq->rqb_freelist[free_index] = NULL;
238 rq->rqb_next_free = GET_Q_NEXT(free_index, 1, rq->cfg.nbufs);
239 return (rqbd);
240 } /* oce_rqb_alloc */
241
242 /*
243 * function to free the RQ buffer
244 *
245 * rq - pointer to RQ structure
246 * rqbd - pointer to recieve buffer descriptor
247 *
248 * return none
249 */
250 static inline void
251 oce_rqb_free(struct oce_rq *rq, oce_rq_bdesc_t *rqbd)
252 {
253 uint32_t free_index;
254 mutex_enter(&rq->rc_lock);
255 free_index = rq->rqb_rc_head;
256 rq->rqb_freelist[free_index] = rqbd;
257 rq->rqb_rc_head = GET_Q_NEXT(free_index, 1, rq->cfg.nbufs);
258 mutex_exit(&rq->rc_lock);
259 atomic_inc_32(&rq->rqb_free);
260 } /* oce_rqb_free */
261
262
263
264
265 static void oce_rq_post_buffer(struct oce_rq *rq, int nbufs)
266 {
267 pd_rxulp_db_t rxdb_reg;
268 int count;
269 struct oce_dev *dev = rq->parent;
270
271
272 rxdb_reg.dw0 = 0;
273 rxdb_reg.bits.qid = rq->rq_id & DB_RQ_ID_MASK;
274
275 for (count = nbufs/OCE_MAX_RQ_POSTS; count > 0; count--) {
276 rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
277 OCE_DB_WRITE32(dev, PD_RXULP_DB, rxdb_reg.dw0);
278 rq->buf_avail += OCE_MAX_RQ_POSTS;
279 nbufs -= OCE_MAX_RQ_POSTS;
280 }
281 if (nbufs > 0) {
282 rxdb_reg.bits.num_posted = nbufs;
283 OCE_DB_WRITE32(dev, PD_RXULP_DB, rxdb_reg.dw0);
284 rq->buf_avail += nbufs;
285 }
286 }
287 /*
288 * function to charge a given rq with buffers from a pool's free list
289 *
290 * dev - software handle to the device
291 * rq - pointer to the RQ to charge
292 * nbufs - numbers of buffers to be charged
293 *
294 * return number of rqe's charges.
295 */
296 static inline int
297 oce_rq_charge(struct oce_rq *rq, uint32_t nbufs, boolean_t repost)
298 {
299 struct oce_nic_rqe *rqe;
300 oce_rq_bdesc_t *rqbd;
301 oce_rq_bdesc_t **shadow_rq;
302 int cnt;
303 int cur_index;
304 oce_ring_buffer_t *ring;
305
306 shadow_rq = rq->shadow_ring;
307 ring = rq->ring;
308 cur_index = ring->cidx;
309
310 for (cnt = 0; cnt < nbufs; cnt++) {
311 if (!repost) {
312 rqbd = oce_rqb_alloc(rq);
313 } else {
314 /* just repost the buffers from shadow ring */
315 rqbd = shadow_rq[cur_index];
316 cur_index = GET_Q_NEXT(cur_index, 1, ring->num_items);
317 }
318 /* fill the rqes */
319 rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring,
320 struct oce_nic_rqe);
321 rqe->u0.s.frag_pa_lo = rqbd->frag_addr.dw.addr_lo;
322 rqe->u0.s.frag_pa_hi = rqbd->frag_addr.dw.addr_hi;
323 shadow_rq[rq->ring->pidx] = rqbd;
324 DW_SWAP(u32ptr(rqe), sizeof (struct oce_nic_rqe));
325 RING_PUT(rq->ring, 1);
326 }
327
328 return (cnt);
329 } /* oce_rq_charge */
330
331 /*
332 * function to release the posted buffers
333 *
334 * rq - pointer to the RQ to charge
335 *
336 * return none
337 */
338 void
339 oce_rq_discharge(struct oce_rq *rq)
340 {
341 oce_rq_bdesc_t *rqbd;
342 oce_rq_bdesc_t **shadow_rq;
343
344 shadow_rq = rq->shadow_ring;
345 /* Free the posted buffer since RQ is destroyed already */
346 while ((int32_t)rq->buf_avail > 0) {
347 rqbd = shadow_rq[rq->ring->cidx];
348 oce_rqb_free(rq, rqbd);
349 RING_GET(rq->ring, 1);
350 rq->buf_avail--;
351 }
352 }
353 /*
354 * function to process a single packet
355 *
356 * dev - software handle to the device
357 * rq - pointer to the RQ to charge
358 * cqe - Pointer to Completion Q entry
359 *
360 * return mblk pointer => success, NULL => error
361 */
362 static inline mblk_t *
363 oce_rx(struct oce_dev *dev, struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
364 {
365 mblk_t *mp;
366 int pkt_len;
367 int32_t frag_cnt = 0;
368 mblk_t **mblk_tail;
369 mblk_t *mblk_head;
370 int frag_size;
371 oce_rq_bdesc_t *rqbd;
372 uint16_t cur_index;
373 oce_ring_buffer_t *ring;
374 int i;
375 uint32_t hdr_len;
376
377 frag_cnt = cqe->u0.s.num_fragments & 0x7;
378 mblk_head = NULL;
379 mblk_tail = &mblk_head;
380
381 ring = rq->ring;
382 cur_index = ring->cidx;
383
384 /* Get the relevant Queue pointers */
385 pkt_len = cqe->u0.s.pkt_size;
386
387 if (pkt_len == 0) {
388 return (NULL);
389 }
390
391 for (i = 0; i < frag_cnt; i++) {
392 rqbd = rq->shadow_ring[cur_index];
393 if (rqbd->mp == NULL) {
394 rqbd->mp = desballoc((uchar_t *)rqbd->rqb.base,
395 rqbd->rqb.size, 0, &rqbd->fr_rtn);
396 if (rqbd->mp == NULL) {
397 return (NULL);
398 }
399 }
400
401 mp = rqbd->mp;
402 frag_size = (pkt_len > rq->cfg.frag_size) ?
403 rq->cfg.frag_size : pkt_len;
404 mp->b_wptr = mp->b_rptr + frag_size;
405 pkt_len -= frag_size;
406 mp->b_next = mp->b_cont = NULL;
407 /* Chain the message mblks */
408 *mblk_tail = mp;
409 mblk_tail = &mp->b_cont;
410 DBUF_SYNC(rqbd->rqb, rqbd->rqb.off, rqbd->rqb.len,
411 DDI_DMA_SYNC_FORCPU);
412 cur_index = GET_Q_NEXT(cur_index, 1, ring->num_items);
413 }
414
415 if (mblk_head == NULL) {
416 oce_log(dev, CE_WARN, MOD_RX, "%s", "oce_rx:no frags?");
417 return (NULL);
418 }
419 /* coallesce headers + Vtag to first mblk */
420 mp = allocb(OCE_HDR_LEN, BPRI_HI);
421 if (mp == NULL) {
422 return (NULL);
423 }
424 /* Align the IP header */
425 mp->b_rptr += OCE_IP_ALIGN;
426
427 if (oce_check_tagged(dev, cqe)) {
428 hdr_len = min(MBLKL(mblk_head), OCE_HDR_LEN) -
429 VTAG_SIZE - OCE_IP_ALIGN;
430 (void) memcpy(mp->b_rptr, mblk_head->b_rptr, 2 * ETHERADDRL);
431 oce_rx_insert_tag(dev, mp, cqe->u0.s.vlan_tag);
432 (void) memcpy(mp->b_rptr + 16, mblk_head->b_rptr + 12,
433 hdr_len - 12);
434 mp->b_wptr = mp->b_rptr + VTAG_SIZE + hdr_len;
435 } else {
436
437 hdr_len = min(MBLKL(mblk_head), OCE_HDR_LEN) - OCE_IP_ALIGN;
438 (void) memcpy(mp->b_rptr, mblk_head->b_rptr, hdr_len);
439 mp->b_wptr = mp->b_rptr + hdr_len;
440 }
441 mblk_head->b_rptr += hdr_len;
442 if (MBLKL(mblk_head) > 0) {
443 mp->b_cont = mblk_head;
444 } else {
445 mp->b_cont = mblk_head->b_cont;
446 freeb(mblk_head);
447 }
448 /* replace the buffer with new ones */
449 (void) oce_rq_charge(rq, frag_cnt, B_FALSE);
450 atomic_add_32(&rq->pending, frag_cnt);
451 return (mp);
452 } /* oce_rx */
453
454 static inline mblk_t *
455 oce_rx_bcopy(struct oce_dev *dev, struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
456 {
457 mblk_t *mp;
458 int pkt_len;
459 int32_t frag_cnt = 0;
460 int frag_size;
461 oce_rq_bdesc_t *rqbd;
462 uint32_t cur_index;
463 oce_ring_buffer_t *ring;
464 oce_rq_bdesc_t **shadow_rq;
465 int cnt = 0;
466 pkt_len = cqe->u0.s.pkt_size;
467
468 if (pkt_len == 0) {
469 return (NULL);
470 }
471
472 mp = allocb(pkt_len + OCE_RQE_BUF_HEADROOM, BPRI_HI);
473 if (mp == NULL) {
474 return (NULL);
475 }
476
477 ring = rq->ring;
478 shadow_rq = rq->shadow_ring;
479 frag_cnt = cqe->u0.s.num_fragments & 0x7;
480 cur_index = ring->cidx;
481 rqbd = shadow_rq[cur_index];
482 frag_size = min(pkt_len, rq->cfg.frag_size);
483 /* Align IP header */
484 mp->b_rptr += OCE_IP_ALIGN;
485
486 /* Sync the first buffer */
487 DBUF_SYNC(rqbd->rqb, rqbd->rqb.off, rqbd->rqb.len,
488 DDI_DMA_SYNC_FORCPU);
489
490
491 if (oce_check_tagged(dev, cqe)) {
492 (void) memcpy(mp->b_rptr, rqbd->rqb.base, 2 * ETHERADDRL);
493 oce_rx_insert_tag(dev, mp, cqe->u0.s.vlan_tag);
494 (void) memcpy(mp->b_rptr + 16, rqbd->rqb.base + 12,
495 frag_size - 12);
496 mp->b_wptr = mp->b_rptr + frag_size + VTAG_SIZE;
497 } else {
498 (void) memcpy(mp->b_rptr, rqbd->rqb.base, frag_size);
499 mp->b_wptr = mp->b_rptr + frag_size;
500 }
501
502 for (cnt = 1; cnt < frag_cnt; cnt++) {
503 cur_index = GET_Q_NEXT(cur_index, 1, ring->num_items);
504 pkt_len -= frag_size;
505 rqbd = shadow_rq[cur_index];
506 frag_size = min(rq->cfg.frag_size, pkt_len);
507 DBUF_SYNC(rqbd->rqb, rqbd->rqb.off, rqbd->rqb.len,
508 DDI_DMA_SYNC_FORCPU);
509
510 (void) memcpy(mp->b_wptr, rqbd->rqb.base, frag_size);
511 mp->b_wptr += frag_size;
512 }
513 (void) oce_rq_charge(rq, frag_cnt, B_TRUE);
514 return (mp);
515 }
516
517 static inline void
518 oce_set_rx_oflags(mblk_t *mp, struct oce_nic_rx_cqe *cqe)
519 {
520 int csum_flags = 0;
521
522 /* set flags */
523 if (cqe->u0.s.ip_cksum_pass) {
524 csum_flags |= HCK_IPV4_HDRCKSUM_OK;
525 }
526
527 if (cqe->u0.s.l4_cksum_pass) {
528 csum_flags |= (HCK_FULLCKSUM | HCK_FULLCKSUM_OK);
529 }
530
531 if (csum_flags) {
532 (void) mac_hcksum_set(mp, 0, 0, 0, 0, csum_flags);
533 }
534 }
535
536 static inline void
537 oce_rx_insert_tag(struct oce_dev *dev, mblk_t *mp, uint16_t vtag)
538 {
539 struct ether_vlan_header *ehp;
540
541 ehp = (struct ether_vlan_header *)voidptr(mp->b_rptr);
542 ehp->ether_tpid = htons(ETHERTYPE_VLAN);
543 if (LANCER_CHIP(dev))
544 ehp->ether_tci = htons(vtag);
545 else
546 ehp->ether_tci = LE_16(vtag);
547
548 }
549
550 static inline void
551 oce_rx_drop_pkt(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
552 {
553 int frag_cnt;
554 oce_rq_bdesc_t *rqbd;
555 oce_rq_bdesc_t **shadow_rq;
556 shadow_rq = rq->shadow_ring;
557 for (frag_cnt = 0; frag_cnt < cqe->u0.s.num_fragments; frag_cnt++) {
558 rqbd = shadow_rq[rq->ring->cidx];
559 oce_rqb_free(rq, rqbd);
560 RING_GET(rq->ring, 1);
561 }
562 }
563
564 void *
565 oce_drain_rq_cq(void *arg, int nbytes, int npkts)
566 {
567 struct oce_rq *rq;
568 struct oce_dev *dev;
569 struct oce_nic_rx_cqe *cqe;
570 mblk_t *mp = NULL;
571 struct oce_cq *cq;
572 int32_t frag_cnt;
573 uint16_t num_cqe = 0;
574 uint16_t cqe_consumed = 0;
575 uint32_t nbufs = 0;
576 int pkt_len;
577 uint32_t poll = (nbytes || 0);
578 mblk_t *mp_head = NULL;
579 mblk_t **mp_tail = &mp_head;
580
581 rq = (struct oce_rq *)arg;
582 cq = rq->cq;
583 dev = rq->parent;
584
585 if (!poll) {
586 npkts = dev->rx_pkt_per_intr;
587 }
588
589 mutex_enter(&rq->rx_lock);
590 if ((!poll) && (rq->qmode == OCE_MODE_POLL)) {
591 /* reject any interrupt call in poll mode */
592 mutex_exit(&rq->rx_lock);
593 return (NULL);
594 }
595
596 if (rq->qstate == QDELETED) {
597 mutex_exit(&rq->rx_lock);
598 return (NULL);
599 }
600
601 DBUF_SYNC(cq->ring->dbuf, 0, 0, DDI_DMA_SYNC_FORKERNEL);
602 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
603
604 /* dequeue till you reach an invalid cqe */
605 while (RQ_CQE_VALID(cqe)) {
606 DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_rx_cqe));
607
608 pkt_len = cqe->u0.s.pkt_size;
609
610
611 if (poll) {
612 if (nbytes < pkt_len) {
613 DW_SWAP(u32ptr(cqe),
614 sizeof (struct oce_nic_rx_cqe));
615 break;
616 }
617 /* reduce the available budget */
618 nbytes -= pkt_len;
619 }
620
621 frag_cnt = cqe->u0.s.num_fragments & 0x7;
622
623 /* if insufficient buffers to charge then do copy */
624 if ((pkt_len < dev->rx_bcopy_limit) ||
625 (oce_atomic_reserve(&rq->rqb_free, frag_cnt) < 0)) {
626 mp = oce_rx_bcopy(dev, rq, cqe);
627 } else {
628 mp = oce_rx(dev, rq, cqe);
629 if (mp == NULL) {
630 atomic_add_32(&rq->rqb_free, frag_cnt);
631 mp = oce_rx_bcopy(dev, rq, cqe);
632 }
633 }
634
635 if (mp != NULL) {
636 oce_set_rx_oflags(mp, cqe);
637
638 *mp_tail = mp;
639 mp_tail = &mp->b_next;
640
641 } else {
642 (void) oce_rq_charge(rq, frag_cnt, B_TRUE);
643 }
644 RING_GET(rq->ring, frag_cnt);
645 rq->buf_avail -= frag_cnt;
646 nbufs += frag_cnt;
647
648 /* update the ring stats */
649 rq->stat_bytes += pkt_len;
650 rq->stat_pkts++;
651
652 RQ_CQE_INVALIDATE(cqe);
653 RING_GET(cq->ring, 1);
654 num_cqe++;
655
656 cqe_consumed++;
657 if (nbufs >= OCE_DEFAULT_RECHARGE_THRESHOLD) {
658 oce_arm_cq(dev, cq->cq_id, cqe_consumed, B_FALSE);
659 oce_rq_post_buffer(rq, nbufs);
660 nbufs = 0;
661 cqe_consumed = 0;
662 }
663
664 if (!poll && (--npkts <= 0)) {
665 break;
666 }
667 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
668 struct oce_nic_rx_cqe);
669
670 } /* for all valid CQEs */
671
672 if (cqe_consumed) {
673 oce_arm_cq(dev, cq->cq_id, cqe_consumed, rq->qmode);
674 oce_rq_post_buffer(rq, nbufs);
675 } else {
676 oce_arm_cq(dev, cq->cq_id, 0, rq->qmode);
677 }
678
679 mutex_exit(&rq->rx_lock);
680
681 if (!poll && mp_head) {
682 mac_rx_ring(dev->mac_handle, rq->handle, mp_head,
683 rq->gen_number);
684 }
685
686 return (mp_head);
687
688 } /* oce_drain_rq_cq */
689
690 /*
691 * function to free mblk databuffer to the RQ pool
692 *
693 * arg - pointer to the receive buffer descriptor
694 *
695 * return none
696 */
697 void
698 oce_rx_pool_free(char *arg)
699 {
700 oce_rq_bdesc_t *rqbd;
701 struct oce_rq *rq;
702 struct oce_dev *dev;
703
704 /* During destroy, arg will be NULL */
705 if (arg == NULL) {
706 return;
707 }
708
709 /* retrieve the pointers from arg */
710 rqbd = (oce_rq_bdesc_t *)(void *)arg;
711 rq = rqbd->rq;
712 dev = rq->parent;
713 rqbd->mp = desballoc((uchar_t *)rqbd->rqb.base,
714 rqbd->rqb.size, 0, &rqbd->fr_rtn);
715
716 oce_rqb_free(rq, rqbd);
717 (void) atomic_dec_32(&rq->pending);
718
719 if (rq->pending == 0) {
720 mutex_enter(&rq->rq_fini_lock);
721 if (rq->qstate == QFINI_PENDING) {
722 oce_rq_fini(dev, rq);
723 }
724 mutex_exit(&rq->rq_fini_lock);
725 }
726 } /* rx_pool_free */
727
728 /*
729 * function to stop the RX
730 *
731 * rq - pointer to RQ structure
732 *
733 * return none
734 */
735 void
736 oce_clean_rq(struct oce_rq *rq)
737 {
738 uint16_t num_cqe = 0;
739 struct oce_cq *cq;
740 struct oce_dev *dev;
741 struct oce_nic_rx_cqe *cqe;
742 int32_t ti = 0;
743 int frag_cnt;
744
745 dev = rq->parent;
746 cq = rq->cq;
747 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
748 /* dequeue till you reach an invalid cqe */
749 for (ti = 0; ti < DEFAULT_DRAIN_TIME; ti++) {
750
751 while (RQ_CQE_VALID(cqe)) {
752 DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_rx_cqe));
753 frag_cnt = cqe->u0.s.num_fragments & 0x7;
754 if (frag_cnt == 0) {
755 oce_log(dev, CE_NOTE, MOD_RX, "%s",
756 "Got Rx Completion Marble Returning ...\n");
757 RQ_CQE_INVALIDATE(cqe);
758 return;
759 }
760 oce_rx_drop_pkt(rq, cqe);
761 atomic_add_32(&rq->buf_avail,
762 -(cqe->u0.s.num_fragments & 0x7));
763 oce_arm_cq(dev, cq->cq_id, 1, B_TRUE);
764 RQ_CQE_INVALIDATE(cqe);
765 RING_GET(cq->ring, 1);
766 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
767 struct oce_nic_rx_cqe);
768 num_cqe++;
769 }
770 if (num_cqe == 0) {
771 /* arm the queue again to get completion marble */
772 oce_arm_cq(dev, cq->cq_id, 0, 1);
773 } else {
774 /* reset counter to reap valid completions again */
775 num_cqe = 0;
776 }
777 OCE_MSDELAY(1);
778 }
779 } /* oce_clean_rq */
780
781 /*
782 * function to start the RX
783 *
784 * rq - pointer to RQ structure
785 *
786 * return number of rqe's charges.
787 */
788 int
789 oce_start_rq(struct oce_rq *rq)
790 {
791 int ret = 0;
792 int to_charge = 0;
793 struct oce_dev *dev = rq->parent;
794 to_charge = rq->cfg.q_len - rq->buf_avail;
795 to_charge = min(to_charge, rq->rqb_free);
796 atomic_add_32(&rq->rqb_free, -to_charge);
797 (void) oce_rq_charge(rq, to_charge, B_FALSE);
798 /* ok to do it here since Rx has not even started */
799 oce_rq_post_buffer(rq, to_charge);
800 rq->qmode = OCE_MODE_INTR;
801 oce_arm_cq(dev, rq->cq->cq_id, 0, B_TRUE);
802 return (ret);
803 } /* oce_start_rq */
804
805 /* Checks for pending rx buffers with Stack */
806 int
807 oce_rx_pending(struct oce_dev *dev, struct oce_rq *rq, int32_t timeout)
808 {
809 int ti;
810 _NOTE(ARGUNUSED(dev));
811
812 for (ti = 0; ti < timeout; ti++) {
813 if (rq->pending > 0) {
814 OCE_MSDELAY(10);
815 continue;
816 } else {
817 break;
818 }
819 }
820
821 if (rq->pending != 0) {
822 oce_log(dev, CE_NOTE, MOD_CONFIG,
823 "%d pending RX buffers in rq=0x%p", rq->pending,
824 (void *)rq);
825 }
826 return (rq->pending);
827 }
828
829 static boolean_t
830 oce_check_tagged(struct oce_dev *dev, struct oce_nic_rx_cqe *cqe)
831 {
832 boolean_t tagged = B_FALSE;
833 if (((dev->drvfn_caps & DRVFN_CAPAB_BE3_NATIVE) &&
834 cqe->u0.s.vlan_tag_present) ||
835 (!(dev->drvfn_caps & DRVFN_CAPAB_BE3_NATIVE) &&
836 cqe->u0.v0.vlan_tag_present)) {
837 if (dev->function_mode & FLEX10_MODE) {
838 if (cqe->u0.s.qnq)
839 tagged = B_TRUE;
840 } else if (dev->pvid != 0) {
841 if (dev->pvid != cqe->u0.v0.vlan_tag)
842 tagged = B_TRUE;
843 } else
844 tagged = B_TRUE;
845 }
846 return (tagged);
847 }