Print this page
NEX-1890 update oce from source provided by Emulex
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/fibre-channel/fca/oce/oce_rx.c
+++ new/usr/src/uts/common/io/fibre-channel/fca/oce/oce_rx.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
|
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 -/* Copyright © 2003-2011 Emulex. All rights reserved. */
22 +/*
23 + * Copyright (c) 2009-2012 Emulex. All rights reserved.
24 + * Use is subject to license terms.
25 + */
23 26
27 +
28 +
24 29 /*
25 30 * Source file containing the Receive Path handling
26 31 * functions
27 32 */
28 33 #include <oce_impl.h>
29 34
30 35
31 36 void oce_rx_pool_free(char *arg);
32 37 static void oce_rqb_dtor(oce_rq_bdesc_t *rqbd);
33 -static int oce_rqb_ctor(oce_rq_bdesc_t *rqbd, struct oce_rq *rq,
34 - size_t size, int flags);
35 38
36 39 static inline mblk_t *oce_rx(struct oce_dev *dev, struct oce_rq *rq,
37 40 struct oce_nic_rx_cqe *cqe);
38 41 static inline mblk_t *oce_rx_bcopy(struct oce_dev *dev,
39 42 struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
40 43 static int oce_rq_charge(struct oce_rq *rq, uint32_t nbufs, boolean_t repost);
41 -static void oce_rx_insert_tag(mblk_t *mp, uint16_t vtag);
44 +static inline void oce_rx_insert_tag(struct oce_dev *dev, mblk_t *mp,
45 + uint16_t vtag);
42 46 static void oce_set_rx_oflags(mblk_t *mp, struct oce_nic_rx_cqe *cqe);
43 47 static inline void oce_rx_drop_pkt(struct oce_rq *rq,
44 48 struct oce_nic_rx_cqe *cqe);
45 49 static oce_rq_bdesc_t *oce_rqb_alloc(struct oce_rq *rq);
46 50 static void oce_rqb_free(struct oce_rq *rq, oce_rq_bdesc_t *rqbd);
47 51 static void oce_rq_post_buffer(struct oce_rq *rq, int nbufs);
52 +static boolean_t oce_check_tagged(struct oce_dev *dev,
53 + struct oce_nic_rx_cqe *cqe);
48 54
49 55 #pragma inline(oce_rx)
50 56 #pragma inline(oce_rx_bcopy)
51 57 #pragma inline(oce_rq_charge)
52 58 #pragma inline(oce_rx_insert_tag)
53 59 #pragma inline(oce_set_rx_oflags)
54 60 #pragma inline(oce_rx_drop_pkt)
55 61 #pragma inline(oce_rqb_alloc)
56 62 #pragma inline(oce_rqb_free)
57 63 #pragma inline(oce_rq_post_buffer)
58 64
59 65 static ddi_dma_attr_t oce_rx_buf_attr = {
60 66 DMA_ATTR_V0, /* version number */
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
61 67 0x0000000000000000ull, /* low address */
62 68 0xFFFFFFFFFFFFFFFFull, /* high address */
63 69 0x00000000FFFFFFFFull, /* dma counter max */
64 70 OCE_DMA_ALIGNMENT, /* alignment */
65 71 0x000007FF, /* burst sizes */
66 72 0x00000001, /* minimum transfer size */
67 73 0x00000000FFFFFFFFull, /* maximum transfer size */
68 74 0xFFFFFFFFFFFFFFFFull, /* maximum segment size */
69 75 1, /* scatter/gather list length */
70 76 0x00000001, /* granularity */
71 - DDI_DMA_FLAGERR|DDI_DMA_RELAXED_ORDERING /* DMA flags */
77 + DDI_DMA_RELAXED_ORDERING /* DMA flags */
72 78 };
73 79
74 80 /*
75 81 * function to create a DMA buffer pool for RQ
76 82 *
77 83 * dev - software handle to the device
78 84 * num_items - number of buffers in the pool
79 85 * item_size - size of each buffer
80 86 *
81 87 * return DDI_SUCCESS => success, DDI_FAILURE otherwise
82 88 */
83 89 int
84 90 oce_rqb_cache_create(struct oce_rq *rq, size_t buf_size)
85 91 {
86 - int size;
87 - int cnt;
88 - int ret;
89 92 oce_rq_bdesc_t *rqbd;
93 + struct oce_dev *dev;
94 + uint32_t size;
95 + uint64_t paddr;
96 + caddr_t vaddr;
97 + int ncookies = 0;
98 + int bufs_per_cookie = 0;
99 + int ridx = 0;
100 + int i = 0;
101 + ddi_dma_cookie_t cookie;
102 + int ret;
90 103
91 - _NOTE(ARGUNUSED(buf_size));
92 104 rqbd = rq->rq_bdesc_array;
93 - size = rq->cfg.frag_size + OCE_RQE_BUF_HEADROOM;
94 - for (cnt = 0; cnt < rq->cfg.nbufs; cnt++, rqbd++) {
95 - rq->rqb_freelist[cnt] = rqbd;
96 - ret = oce_rqb_ctor(rqbd, rq,
97 - size, (DDI_DMA_RDWR|DDI_DMA_STREAMING));
105 + size = buf_size * rq->cfg.nbufs;
106 + dev = rq->parent;
107 +
108 + oce_rx_buf_attr.dma_attr_granular = (uint32_t)buf_size;
109 + if (DDI_FM_DMA_ERR_CAP(dev->fm_caps)) {
110 + oce_rx_buf_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
111 + }
112 +
113 + /* Try to get single big chunk With iommu normally cookie count is 1 */
114 + oce_rx_buf_attr.dma_attr_sgllen = 1;
115 + ret = oce_alloc_dma_buffer(dev, &rq->rqb, size, &oce_rx_buf_attr,
116 + (DDI_DMA_RDWR|DDI_DMA_STREAMING));
117 + /* retry with single page allocation */
118 + if (ret != DDI_SUCCESS) {
119 + oce_rx_buf_attr.dma_attr_sgllen =
120 + size/ddi_ptob(dev->dip, (ulong_t)1) + 2;
121 + ret = oce_alloc_dma_buffer(dev, &rq->rqb, size,
122 + &oce_rx_buf_attr, (DDI_DMA_RDWR | DDI_DMA_STREAMING));
98 123 if (ret != DDI_SUCCESS) {
99 - goto rqb_fail;
124 + return (DDI_FAILURE);
100 125 }
101 126 }
127 +
128 + ncookies = rq->rqb.ncookies;
129 + /* Set the starting phys and vaddr */
130 + /* paddr = rq->rqb.addr; */
131 + vaddr = rq->rqb.base;
132 + cookie = rq->rqb.cookie;
133 +
134 + do {
135 + paddr = cookie.dmac_laddress;
136 + bufs_per_cookie = cookie.dmac_size/buf_size;
137 + for (i = 0; i < bufs_per_cookie; i++, rqbd++) {
138 + rqbd->mp = desballoc((uchar_t *)vaddr, buf_size, 0,
139 + &rqbd->fr_rtn);
140 + if (rqbd->mp == NULL) {
141 + goto desb_fail;
142 + }
143 + /* Set the call back function parameters */
144 + rqbd->fr_rtn.free_func = (void (*)())oce_rx_pool_free;
145 + rqbd->fr_rtn.free_arg = (caddr_t)(void *)rqbd;
146 + /* Populate the DMA object for each buffer */
147 + rqbd->rqb.acc_handle = rq->rqb.acc_handle;
148 + rqbd->rqb.dma_handle = rq->rqb.dma_handle;
149 + rqbd->rqb.base = vaddr;
150 + rqbd->rqb.addr = paddr;
151 + rqbd->rqb.len = buf_size;
152 + rqbd->rqb.size = buf_size;
153 + rqbd->rqb.off = ridx * buf_size;
154 + rqbd->rq = rq;
155 + rqbd->frag_addr.dw.addr_lo = ADDR_LO(paddr);
156 + rqbd->frag_addr.dw.addr_hi = ADDR_HI(paddr);
157 + rq->rqb_freelist[ridx] = rqbd;
158 + /* increment the addresses */
159 + paddr += buf_size;
160 + vaddr += buf_size;
161 + ridx++;
162 + if (ridx >= rq->cfg.nbufs) {
163 + break;
164 + }
165 + }
166 + if (--ncookies > 0) {
167 + (void) ddi_dma_nextcookie(rq->rqb.dma_handle, &cookie);
168 + }
169 + } while (ncookies > 0);
170 +
102 171 rq->rqb_free = rq->cfg.nbufs;
103 172 rq->rqb_rc_head = 0;
104 173 rq->rqb_next_free = 0;
105 174 return (DDI_SUCCESS);
106 175
107 -rqb_fail:
176 +desb_fail:
108 177 oce_rqb_cache_destroy(rq);
109 178 return (DDI_FAILURE);
110 179 } /* oce_rqb_cache_create */
111 180
112 181 /*
113 182 * function to Destroy RQ DMA buffer cache
114 183 *
115 184 * rq - pointer to rq structure
116 185 *
117 186 * return none
118 187 */
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
119 188 void
120 189 oce_rqb_cache_destroy(struct oce_rq *rq)
121 190 {
122 191 oce_rq_bdesc_t *rqbd = NULL;
123 192 int cnt;
124 193
125 194 rqbd = rq->rq_bdesc_array;
126 195 for (cnt = 0; cnt < rq->cfg.nbufs; cnt++, rqbd++) {
127 196 oce_rqb_dtor(rqbd);
128 197 }
198 +
199 + oce_free_dma_buffer(rq->parent, &rq->rqb);
129 200 } /* oce_rqb_cache_destroy */
130 201
131 202 /*
132 203 * RQ buffer destructor function
133 204 *
134 205 * rqbd - pointer to rq buffer descriptor
135 206 *
136 207 * return none
137 208 */
138 209 static void
139 210 oce_rqb_dtor(oce_rq_bdesc_t *rqbd)
140 211 {
141 212 if ((rqbd == NULL) || (rqbd->rq == NULL)) {
142 213 return;
143 214 }
144 215 if (rqbd->mp != NULL) {
145 216 rqbd->fr_rtn.free_arg = NULL;
146 217 freemsg(rqbd->mp);
147 218 rqbd->mp = NULL;
148 219 }
149 - oce_free_dma_buffer(rqbd->rq->parent, rqbd->rqb);
150 220 } /* oce_rqb_dtor */
151 221
152 -/*
153 - * RQ buffer constructor function
154 - *
155 - * rqbd - pointer to rq buffer descriptor
156 - * rq - pointer to RQ structure
157 - * size - size of the buffer
158 - * flags - KM_SLEEP OR KM_NOSLEEP
159 - *
160 - * return DDI_SUCCESS => success, DDI_FAILURE otherwise
161 - */
162 -static int
163 -oce_rqb_ctor(oce_rq_bdesc_t *rqbd, struct oce_rq *rq, size_t size, int flags)
164 -{
165 - struct oce_dev *dev;
166 - oce_dma_buf_t *dbuf;
167 222
168 - dev = rq->parent;
169 -
170 - dbuf = oce_alloc_dma_buffer(dev, size, &oce_rx_buf_attr, flags);
171 - if (dbuf == NULL) {
172 - return (DDI_FAILURE);
173 - }
174 -
175 - /* Set the call back function parameters */
176 - rqbd->fr_rtn.free_func = (void (*)())oce_rx_pool_free;
177 - rqbd->fr_rtn.free_arg = (caddr_t)(void *)rqbd;
178 - rqbd->mp = desballoc((uchar_t *)(dbuf->base),
179 - dbuf->size, 0, &rqbd->fr_rtn);
180 - if (rqbd->mp == NULL) {
181 - oce_free_dma_buffer(dev, dbuf);
182 - return (DDI_FAILURE);
183 - }
184 - rqbd->rqb = dbuf;
185 - rqbd->rq = rq;
186 - rqbd->frag_addr.dw.addr_lo = ADDR_LO(dbuf->addr + OCE_RQE_BUF_HEADROOM);
187 - rqbd->frag_addr.dw.addr_hi = ADDR_HI(dbuf->addr + OCE_RQE_BUF_HEADROOM);
188 - rqbd->mp->b_rptr = (uchar_t *)rqbd->rqb->base + OCE_RQE_BUF_HEADROOM;
189 -
190 - return (DDI_SUCCESS);
191 -} /* oce_rqb_ctor */
192 -
193 223 /*
194 224 * RQ buffer allocator function
195 225 *
196 226 * rq - pointer to RQ structure
197 227 *
198 228 * return pointer to RQ buffer descriptor
199 229 */
200 230 static inline oce_rq_bdesc_t *
201 231 oce_rqb_alloc(struct oce_rq *rq)
202 232 {
203 233 oce_rq_bdesc_t *rqbd;
204 234 uint32_t free_index;
205 235 free_index = rq->rqb_next_free;
206 236 rqbd = rq->rqb_freelist[free_index];
207 237 rq->rqb_freelist[free_index] = NULL;
208 238 rq->rqb_next_free = GET_Q_NEXT(free_index, 1, rq->cfg.nbufs);
209 239 return (rqbd);
210 240 } /* oce_rqb_alloc */
211 241
212 242 /*
213 243 * function to free the RQ buffer
214 244 *
215 245 * rq - pointer to RQ structure
216 246 * rqbd - pointer to recieve buffer descriptor
217 247 *
218 248 * return none
219 249 */
220 250 static inline void
221 251 oce_rqb_free(struct oce_rq *rq, oce_rq_bdesc_t *rqbd)
222 252 {
223 253 uint32_t free_index;
224 254 mutex_enter(&rq->rc_lock);
225 255 free_index = rq->rqb_rc_head;
226 256 rq->rqb_freelist[free_index] = rqbd;
227 257 rq->rqb_rc_head = GET_Q_NEXT(free_index, 1, rq->cfg.nbufs);
228 258 mutex_exit(&rq->rc_lock);
229 259 atomic_inc_32(&rq->rqb_free);
230 260 } /* oce_rqb_free */
231 261
232 262
233 263
234 264
235 265 static void oce_rq_post_buffer(struct oce_rq *rq, int nbufs)
236 266 {
237 267 pd_rxulp_db_t rxdb_reg;
238 268 int count;
239 269 struct oce_dev *dev = rq->parent;
240 270
241 271
242 272 rxdb_reg.dw0 = 0;
243 273 rxdb_reg.bits.qid = rq->rq_id & DB_RQ_ID_MASK;
244 274
245 275 for (count = nbufs/OCE_MAX_RQ_POSTS; count > 0; count--) {
246 276 rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
247 277 OCE_DB_WRITE32(dev, PD_RXULP_DB, rxdb_reg.dw0);
248 278 rq->buf_avail += OCE_MAX_RQ_POSTS;
249 279 nbufs -= OCE_MAX_RQ_POSTS;
250 280 }
251 281 if (nbufs > 0) {
252 282 rxdb_reg.bits.num_posted = nbufs;
253 283 OCE_DB_WRITE32(dev, PD_RXULP_DB, rxdb_reg.dw0);
254 284 rq->buf_avail += nbufs;
255 285 }
256 286 }
257 287 /*
258 288 * function to charge a given rq with buffers from a pool's free list
259 289 *
260 290 * dev - software handle to the device
261 291 * rq - pointer to the RQ to charge
262 292 * nbufs - numbers of buffers to be charged
263 293 *
264 294 * return number of rqe's charges.
265 295 */
266 296 static inline int
267 297 oce_rq_charge(struct oce_rq *rq, uint32_t nbufs, boolean_t repost)
268 298 {
269 299 struct oce_nic_rqe *rqe;
270 300 oce_rq_bdesc_t *rqbd;
271 301 oce_rq_bdesc_t **shadow_rq;
272 302 int cnt;
273 303 int cur_index;
274 304 oce_ring_buffer_t *ring;
275 305
276 306 shadow_rq = rq->shadow_ring;
277 307 ring = rq->ring;
278 308 cur_index = ring->cidx;
279 309
280 310 for (cnt = 0; cnt < nbufs; cnt++) {
281 311 if (!repost) {
282 312 rqbd = oce_rqb_alloc(rq);
283 313 } else {
284 314 /* just repost the buffers from shadow ring */
285 315 rqbd = shadow_rq[cur_index];
286 316 cur_index = GET_Q_NEXT(cur_index, 1, ring->num_items);
287 317 }
288 318 /* fill the rqes */
289 319 rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring,
290 320 struct oce_nic_rqe);
291 321 rqe->u0.s.frag_pa_lo = rqbd->frag_addr.dw.addr_lo;
292 322 rqe->u0.s.frag_pa_hi = rqbd->frag_addr.dw.addr_hi;
293 323 shadow_rq[rq->ring->pidx] = rqbd;
294 324 DW_SWAP(u32ptr(rqe), sizeof (struct oce_nic_rqe));
295 325 RING_PUT(rq->ring, 1);
296 326 }
297 327
298 328 return (cnt);
299 329 } /* oce_rq_charge */
300 330
301 331 /*
302 332 * function to release the posted buffers
303 333 *
304 334 * rq - pointer to the RQ to charge
305 335 *
306 336 * return none
307 337 */
308 338 void
309 339 oce_rq_discharge(struct oce_rq *rq)
310 340 {
311 341 oce_rq_bdesc_t *rqbd;
312 342 oce_rq_bdesc_t **shadow_rq;
313 343
314 344 shadow_rq = rq->shadow_ring;
315 345 /* Free the posted buffer since RQ is destroyed already */
316 346 while ((int32_t)rq->buf_avail > 0) {
317 347 rqbd = shadow_rq[rq->ring->cidx];
318 348 oce_rqb_free(rq, rqbd);
319 349 RING_GET(rq->ring, 1);
320 350 rq->buf_avail--;
321 351 }
322 352 }
323 353 /*
324 354 * function to process a single packet
325 355 *
326 356 * dev - software handle to the device
327 357 * rq - pointer to the RQ to charge
328 358 * cqe - Pointer to Completion Q entry
329 359 *
330 360 * return mblk pointer => success, NULL => error
331 361 */
332 362 static inline mblk_t *
333 363 oce_rx(struct oce_dev *dev, struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
334 364 {
|
↓ open down ↓ |
132 lines elided |
↑ open up ↑ |
335 365 mblk_t *mp;
336 366 int pkt_len;
337 367 int32_t frag_cnt = 0;
338 368 mblk_t **mblk_tail;
339 369 mblk_t *mblk_head;
340 370 int frag_size;
341 371 oce_rq_bdesc_t *rqbd;
342 372 uint16_t cur_index;
343 373 oce_ring_buffer_t *ring;
344 374 int i;
375 + uint32_t hdr_len;
345 376
346 377 frag_cnt = cqe->u0.s.num_fragments & 0x7;
347 378 mblk_head = NULL;
348 379 mblk_tail = &mblk_head;
349 380
350 381 ring = rq->ring;
351 382 cur_index = ring->cidx;
352 383
353 384 /* Get the relevant Queue pointers */
354 385 pkt_len = cqe->u0.s.pkt_size;
386 +
387 + if (pkt_len == 0) {
388 + return (NULL);
389 + }
390 +
355 391 for (i = 0; i < frag_cnt; i++) {
356 392 rqbd = rq->shadow_ring[cur_index];
357 393 if (rqbd->mp == NULL) {
358 - rqbd->mp = desballoc((uchar_t *)rqbd->rqb->base,
359 - rqbd->rqb->size, 0, &rqbd->fr_rtn);
394 + rqbd->mp = desballoc((uchar_t *)rqbd->rqb.base,
395 + rqbd->rqb.size, 0, &rqbd->fr_rtn);
360 396 if (rqbd->mp == NULL) {
361 397 return (NULL);
362 398 }
363 -
364 - rqbd->mp->b_rptr =
365 - (uchar_t *)rqbd->rqb->base + OCE_RQE_BUF_HEADROOM;
366 399 }
367 400
368 401 mp = rqbd->mp;
369 402 frag_size = (pkt_len > rq->cfg.frag_size) ?
370 403 rq->cfg.frag_size : pkt_len;
371 404 mp->b_wptr = mp->b_rptr + frag_size;
372 405 pkt_len -= frag_size;
373 406 mp->b_next = mp->b_cont = NULL;
374 407 /* Chain the message mblks */
375 408 *mblk_tail = mp;
376 409 mblk_tail = &mp->b_cont;
377 - (void) DBUF_SYNC(rqbd->rqb, DDI_DMA_SYNC_FORCPU);
410 + DBUF_SYNC(rqbd->rqb, rqbd->rqb.off, rqbd->rqb.len,
411 + DDI_DMA_SYNC_FORCPU);
378 412 cur_index = GET_Q_NEXT(cur_index, 1, ring->num_items);
379 413 }
380 414
381 415 if (mblk_head == NULL) {
382 416 oce_log(dev, CE_WARN, MOD_RX, "%s", "oce_rx:no frags?");
383 417 return (NULL);
384 418 }
419 + /* coallesce headers + Vtag to first mblk */
420 + mp = allocb(OCE_HDR_LEN, BPRI_HI);
421 + if (mp == NULL) {
422 + return (NULL);
423 + }
424 + /* Align the IP header */
425 + mp->b_rptr += OCE_IP_ALIGN;
385 426
427 + if (oce_check_tagged(dev, cqe)) {
428 + hdr_len = min(MBLKL(mblk_head), OCE_HDR_LEN) -
429 + VTAG_SIZE - OCE_IP_ALIGN;
430 + (void) memcpy(mp->b_rptr, mblk_head->b_rptr, 2 * ETHERADDRL);
431 + oce_rx_insert_tag(dev, mp, cqe->u0.s.vlan_tag);
432 + (void) memcpy(mp->b_rptr + 16, mblk_head->b_rptr + 12,
433 + hdr_len - 12);
434 + mp->b_wptr = mp->b_rptr + VTAG_SIZE + hdr_len;
435 + } else {
436 +
437 + hdr_len = min(MBLKL(mblk_head), OCE_HDR_LEN) - OCE_IP_ALIGN;
438 + (void) memcpy(mp->b_rptr, mblk_head->b_rptr, hdr_len);
439 + mp->b_wptr = mp->b_rptr + hdr_len;
440 + }
441 + mblk_head->b_rptr += hdr_len;
442 + if (MBLKL(mblk_head) > 0) {
443 + mp->b_cont = mblk_head;
444 + } else {
445 + mp->b_cont = mblk_head->b_cont;
446 + freeb(mblk_head);
447 + }
386 448 /* replace the buffer with new ones */
387 449 (void) oce_rq_charge(rq, frag_cnt, B_FALSE);
388 450 atomic_add_32(&rq->pending, frag_cnt);
389 - return (mblk_head);
451 + return (mp);
390 452 } /* oce_rx */
391 453
392 454 static inline mblk_t *
393 455 oce_rx_bcopy(struct oce_dev *dev, struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
394 456 {
395 457 mblk_t *mp;
396 458 int pkt_len;
397 - int alloc_len;
398 459 int32_t frag_cnt = 0;
399 460 int frag_size;
400 461 oce_rq_bdesc_t *rqbd;
401 - unsigned char *rptr;
402 462 uint32_t cur_index;
403 463 oce_ring_buffer_t *ring;
404 464 oce_rq_bdesc_t **shadow_rq;
405 465 int cnt = 0;
406 -
407 - _NOTE(ARGUNUSED(dev));
408 -
409 - shadow_rq = rq->shadow_ring;
410 466 pkt_len = cqe->u0.s.pkt_size;
411 - alloc_len = pkt_len + OCE_RQE_BUF_HEADROOM;
412 - frag_cnt = cqe->u0.s.num_fragments & 0x7;
413 467
414 - mp = allocb(alloc_len, BPRI_HI);
468 + if (pkt_len == 0) {
469 + return (NULL);
470 + }
471 +
472 + mp = allocb(pkt_len + OCE_RQE_BUF_HEADROOM, BPRI_HI);
415 473 if (mp == NULL) {
416 474 return (NULL);
417 475 }
418 476
419 - mp->b_rptr += OCE_RQE_BUF_HEADROOM;
420 - rptr = mp->b_rptr;
421 - mp->b_wptr = mp->b_rptr + pkt_len;
422 477 ring = rq->ring;
423 -
478 + shadow_rq = rq->shadow_ring;
479 + frag_cnt = cqe->u0.s.num_fragments & 0x7;
424 480 cur_index = ring->cidx;
425 - for (cnt = 0; cnt < frag_cnt; cnt++) {
426 - rqbd = shadow_rq[cur_index];
427 - frag_size = (pkt_len > rq->cfg.frag_size) ?
428 - rq->cfg.frag_size : pkt_len;
429 - (void) DBUF_SYNC(rqbd->rqb, DDI_DMA_SYNC_FORCPU);
430 - bcopy(rqbd->rqb->base + OCE_RQE_BUF_HEADROOM, rptr, frag_size);
431 - rptr += frag_size;
432 - pkt_len -= frag_size;
481 + rqbd = shadow_rq[cur_index];
482 + frag_size = min(pkt_len, rq->cfg.frag_size);
483 + /* Align IP header */
484 + mp->b_rptr += OCE_IP_ALIGN;
485 +
486 + /* Sync the first buffer */
487 + DBUF_SYNC(rqbd->rqb, rqbd->rqb.off, rqbd->rqb.len,
488 + DDI_DMA_SYNC_FORCPU);
489 +
490 +
491 + if (oce_check_tagged(dev, cqe)) {
492 + (void) memcpy(mp->b_rptr, rqbd->rqb.base, 2 * ETHERADDRL);
493 + oce_rx_insert_tag(dev, mp, cqe->u0.s.vlan_tag);
494 + (void) memcpy(mp->b_rptr + 16, rqbd->rqb.base + 12,
495 + frag_size - 12);
496 + mp->b_wptr = mp->b_rptr + frag_size + VTAG_SIZE;
497 + } else {
498 + (void) memcpy(mp->b_rptr, rqbd->rqb.base, frag_size);
499 + mp->b_wptr = mp->b_rptr + frag_size;
500 + }
501 +
502 + for (cnt = 1; cnt < frag_cnt; cnt++) {
433 503 cur_index = GET_Q_NEXT(cur_index, 1, ring->num_items);
504 + pkt_len -= frag_size;
505 + rqbd = shadow_rq[cur_index];
506 + frag_size = min(rq->cfg.frag_size, pkt_len);
507 + DBUF_SYNC(rqbd->rqb, rqbd->rqb.off, rqbd->rqb.len,
508 + DDI_DMA_SYNC_FORCPU);
509 +
510 + (void) memcpy(mp->b_wptr, rqbd->rqb.base, frag_size);
511 + mp->b_wptr += frag_size;
434 512 }
435 513 (void) oce_rq_charge(rq, frag_cnt, B_TRUE);
436 514 return (mp);
437 515 }
438 516
439 517 static inline void
440 518 oce_set_rx_oflags(mblk_t *mp, struct oce_nic_rx_cqe *cqe)
441 519 {
442 520 int csum_flags = 0;
443 521
444 522 /* set flags */
445 523 if (cqe->u0.s.ip_cksum_pass) {
446 524 csum_flags |= HCK_IPV4_HDRCKSUM_OK;
447 525 }
448 526
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
449 527 if (cqe->u0.s.l4_cksum_pass) {
450 528 csum_flags |= (HCK_FULLCKSUM | HCK_FULLCKSUM_OK);
451 529 }
452 530
453 531 if (csum_flags) {
454 532 (void) mac_hcksum_set(mp, 0, 0, 0, 0, csum_flags);
455 533 }
456 534 }
457 535
458 536 static inline void
459 -oce_rx_insert_tag(mblk_t *mp, uint16_t vtag)
537 +oce_rx_insert_tag(struct oce_dev *dev, mblk_t *mp, uint16_t vtag)
460 538 {
461 539 struct ether_vlan_header *ehp;
462 540
463 - (void) memmove(mp->b_rptr - VTAG_SIZE,
464 - mp->b_rptr, 2 * ETHERADDRL);
465 - mp->b_rptr -= VTAG_SIZE;
466 541 ehp = (struct ether_vlan_header *)voidptr(mp->b_rptr);
467 542 ehp->ether_tpid = htons(ETHERTYPE_VLAN);
468 - ehp->ether_tci = LE_16(vtag);
543 + if (LANCER_CHIP(dev))
544 + ehp->ether_tci = htons(vtag);
545 + else
546 + ehp->ether_tci = LE_16(vtag);
547 +
469 548 }
470 549
471 550 static inline void
472 551 oce_rx_drop_pkt(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
473 552 {
474 553 int frag_cnt;
475 554 oce_rq_bdesc_t *rqbd;
476 555 oce_rq_bdesc_t **shadow_rq;
477 556 shadow_rq = rq->shadow_ring;
478 557 for (frag_cnt = 0; frag_cnt < cqe->u0.s.num_fragments; frag_cnt++) {
479 558 rqbd = shadow_rq[rq->ring->cidx];
480 559 oce_rqb_free(rq, rqbd);
481 560 RING_GET(rq->ring, 1);
482 561 }
483 562 }
484 563
485 -
486 -/*
487 - * function to process a Recieve queue
488 - *
489 - * arg - pointer to the RQ to charge
490 - *
491 - * return number of cqes processed
492 - */
493 -uint16_t
494 -oce_drain_rq_cq(void *arg)
564 +void *
565 +oce_drain_rq_cq(void *arg, int nbytes, int npkts)
495 566 {
496 - struct oce_nic_rx_cqe *cqe;
497 567 struct oce_rq *rq;
568 + struct oce_dev *dev;
569 + struct oce_nic_rx_cqe *cqe;
498 570 mblk_t *mp = NULL;
499 - mblk_t *mblk_head;
500 - mblk_t **mblk_tail;
501 - uint16_t num_cqe = 0;
502 571 struct oce_cq *cq;
503 - struct oce_dev *dev;
504 572 int32_t frag_cnt;
573 + uint16_t num_cqe = 0;
574 + uint16_t cqe_consumed = 0;
505 575 uint32_t nbufs = 0;
576 + int pkt_len;
577 + uint32_t poll = (nbytes || 0);
578 + mblk_t *mp_head = NULL;
579 + mblk_t **mp_tail = &mp_head;
506 580
507 581 rq = (struct oce_rq *)arg;
508 - dev = rq->parent;
509 582 cq = rq->cq;
510 - mblk_head = NULL;
511 - mblk_tail = &mblk_head;
583 + dev = rq->parent;
512 584
585 + if (!poll) {
586 + npkts = dev->rx_pkt_per_intr;
587 + }
588 +
589 + mutex_enter(&rq->rx_lock);
590 + if ((!poll) && (rq->qmode == OCE_MODE_POLL)) {
591 + /* reject any interrupt call in poll mode */
592 + mutex_exit(&rq->rx_lock);
593 + return (NULL);
594 + }
595 +
596 + if (rq->qstate == QDELETED) {
597 + mutex_exit(&rq->rx_lock);
598 + return (NULL);
599 + }
600 +
601 + DBUF_SYNC(cq->ring->dbuf, 0, 0, DDI_DMA_SYNC_FORKERNEL);
513 602 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
514 603
515 - (void) DBUF_SYNC(cq->ring->dbuf, DDI_DMA_SYNC_FORKERNEL);
516 604 /* dequeue till you reach an invalid cqe */
517 605 while (RQ_CQE_VALID(cqe)) {
518 606 DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_rx_cqe));
607 +
608 + pkt_len = cqe->u0.s.pkt_size;
609 +
610 +
611 + if (poll) {
612 + if (nbytes < pkt_len) {
613 + DW_SWAP(u32ptr(cqe),
614 + sizeof (struct oce_nic_rx_cqe));
615 + break;
616 + }
617 + /* reduce the available budget */
618 + nbytes -= pkt_len;
619 + }
620 +
519 621 frag_cnt = cqe->u0.s.num_fragments & 0x7;
622 +
520 623 /* if insufficient buffers to charge then do copy */
521 - if ((cqe->u0.s.pkt_size < dev->rx_bcopy_limit) ||
624 + if ((pkt_len < dev->rx_bcopy_limit) ||
522 625 (oce_atomic_reserve(&rq->rqb_free, frag_cnt) < 0)) {
523 626 mp = oce_rx_bcopy(dev, rq, cqe);
524 627 } else {
525 628 mp = oce_rx(dev, rq, cqe);
526 629 if (mp == NULL) {
527 630 atomic_add_32(&rq->rqb_free, frag_cnt);
528 631 mp = oce_rx_bcopy(dev, rq, cqe);
529 632 }
530 633 }
634 +
531 635 if (mp != NULL) {
532 - if (dev->function_mode & FLEX10_MODE) {
533 - if (cqe->u0.s.vlan_tag_present &&
534 - cqe->u0.s.qnq) {
535 - oce_rx_insert_tag(mp,
536 - cqe->u0.s.vlan_tag);
537 - }
538 - } else if (cqe->u0.s.vlan_tag_present) {
539 - oce_rx_insert_tag(mp, cqe->u0.s.vlan_tag);
540 - }
541 636 oce_set_rx_oflags(mp, cqe);
542 637
543 - *mblk_tail = mp;
544 - mblk_tail = &mp->b_next;
638 + *mp_tail = mp;
639 + mp_tail = &mp->b_next;
640 +
545 641 } else {
546 642 (void) oce_rq_charge(rq, frag_cnt, B_TRUE);
547 643 }
548 644 RING_GET(rq->ring, frag_cnt);
549 645 rq->buf_avail -= frag_cnt;
550 646 nbufs += frag_cnt;
551 647
552 - oce_rq_post_buffer(rq, frag_cnt);
648 + /* update the ring stats */
649 + rq->stat_bytes += pkt_len;
650 + rq->stat_pkts++;
651 +
553 652 RQ_CQE_INVALIDATE(cqe);
554 653 RING_GET(cq->ring, 1);
555 - cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
556 - struct oce_nic_rx_cqe);
557 654 num_cqe++;
558 - /* process max ring size */
559 - if (num_cqe > dev->rx_pkt_per_intr) {
655 +
656 + cqe_consumed++;
657 + if (nbufs >= OCE_DEFAULT_RECHARGE_THRESHOLD) {
658 + oce_arm_cq(dev, cq->cq_id, cqe_consumed, B_FALSE);
659 + oce_rq_post_buffer(rq, nbufs);
660 + nbufs = 0;
661 + cqe_consumed = 0;
662 + }
663 +
664 + if (!poll && (--npkts <= 0)) {
560 665 break;
561 666 }
667 + cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
668 + struct oce_nic_rx_cqe);
669 +
562 670 } /* for all valid CQEs */
563 671
564 - if (mblk_head) {
565 - mac_rx(dev->mac_handle, NULL, mblk_head);
672 + if (cqe_consumed) {
673 + oce_arm_cq(dev, cq->cq_id, cqe_consumed, rq->qmode);
674 + oce_rq_post_buffer(rq, nbufs);
675 + } else {
676 + oce_arm_cq(dev, cq->cq_id, 0, rq->qmode);
566 677 }
567 - oce_arm_cq(dev, cq->cq_id, num_cqe, B_TRUE);
568 - return (num_cqe);
678 +
679 + mutex_exit(&rq->rx_lock);
680 +
681 + if (!poll && mp_head) {
682 + mac_rx_ring(dev->mac_handle, rq->handle, mp_head,
683 + rq->gen_number);
684 + }
685 +
686 + return (mp_head);
687 +
569 688 } /* oce_drain_rq_cq */
570 689
571 690 /*
572 691 * function to free mblk databuffer to the RQ pool
573 692 *
574 693 * arg - pointer to the receive buffer descriptor
575 694 *
576 695 * return none
577 696 */
578 697 void
579 698 oce_rx_pool_free(char *arg)
580 699 {
581 700 oce_rq_bdesc_t *rqbd;
582 701 struct oce_rq *rq;
702 + struct oce_dev *dev;
583 703
584 704 /* During destroy, arg will be NULL */
585 705 if (arg == NULL) {
586 706 return;
587 707 }
588 708
589 709 /* retrieve the pointers from arg */
590 710 rqbd = (oce_rq_bdesc_t *)(void *)arg;
591 711 rq = rqbd->rq;
592 - rqbd->mp = desballoc((uchar_t *)rqbd->rqb->base,
593 - rqbd->rqb->size, 0, &rqbd->fr_rtn);
712 + dev = rq->parent;
713 + rqbd->mp = desballoc((uchar_t *)rqbd->rqb.base,
714 + rqbd->rqb.size, 0, &rqbd->fr_rtn);
594 715
595 - if (rqbd->mp) {
596 - rqbd->mp->b_rptr =
597 - (uchar_t *)rqbd->rqb->base + OCE_RQE_BUF_HEADROOM;
598 - }
599 -
600 716 oce_rqb_free(rq, rqbd);
601 717 (void) atomic_dec_32(&rq->pending);
718 +
719 + if (rq->pending == 0) {
720 + mutex_enter(&rq->rq_fini_lock);
721 + if (rq->qstate == QFINI_PENDING) {
722 + oce_rq_fini(dev, rq);
723 + }
724 + mutex_exit(&rq->rq_fini_lock);
725 + }
602 726 } /* rx_pool_free */
603 727
604 728 /*
605 729 * function to stop the RX
606 730 *
607 731 * rq - pointer to RQ structure
608 732 *
609 733 * return none
610 734 */
611 735 void
612 736 oce_clean_rq(struct oce_rq *rq)
613 737 {
614 738 uint16_t num_cqe = 0;
615 739 struct oce_cq *cq;
616 740 struct oce_dev *dev;
617 741 struct oce_nic_rx_cqe *cqe;
618 742 int32_t ti = 0;
743 + int frag_cnt;
619 744
620 745 dev = rq->parent;
621 746 cq = rq->cq;
622 747 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
623 748 /* dequeue till you reach an invalid cqe */
624 749 for (ti = 0; ti < DEFAULT_DRAIN_TIME; ti++) {
625 750
626 751 while (RQ_CQE_VALID(cqe)) {
627 752 DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_rx_cqe));
753 + frag_cnt = cqe->u0.s.num_fragments & 0x7;
754 + if (frag_cnt == 0) {
755 + oce_log(dev, CE_NOTE, MOD_RX, "%s",
756 + "Got Rx Completion Marble Returning ...\n");
757 + RQ_CQE_INVALIDATE(cqe);
758 + return;
759 + }
628 760 oce_rx_drop_pkt(rq, cqe);
629 761 atomic_add_32(&rq->buf_avail,
630 762 -(cqe->u0.s.num_fragments & 0x7));
631 763 oce_arm_cq(dev, cq->cq_id, 1, B_TRUE);
632 764 RQ_CQE_INVALIDATE(cqe);
633 765 RING_GET(cq->ring, 1);
634 766 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
635 767 struct oce_nic_rx_cqe);
636 768 num_cqe++;
637 769 }
770 + if (num_cqe == 0) {
771 + /* arm the queue again to get completion marble */
772 + oce_arm_cq(dev, cq->cq_id, 0, 1);
773 + } else {
774 + /* reset counter to reap valid completions again */
775 + num_cqe = 0;
776 + }
638 777 OCE_MSDELAY(1);
639 778 }
640 779 } /* oce_clean_rq */
641 780
642 781 /*
643 782 * function to start the RX
644 783 *
645 784 * rq - pointer to RQ structure
646 785 *
647 786 * return number of rqe's charges.
648 787 */
649 788 int
650 789 oce_start_rq(struct oce_rq *rq)
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
651 790 {
652 791 int ret = 0;
653 792 int to_charge = 0;
654 793 struct oce_dev *dev = rq->parent;
655 794 to_charge = rq->cfg.q_len - rq->buf_avail;
656 795 to_charge = min(to_charge, rq->rqb_free);
657 796 atomic_add_32(&rq->rqb_free, -to_charge);
658 797 (void) oce_rq_charge(rq, to_charge, B_FALSE);
659 798 /* ok to do it here since Rx has not even started */
660 799 oce_rq_post_buffer(rq, to_charge);
800 + rq->qmode = OCE_MODE_INTR;
661 801 oce_arm_cq(dev, rq->cq->cq_id, 0, B_TRUE);
662 802 return (ret);
663 803 } /* oce_start_rq */
664 804
665 805 /* Checks for pending rx buffers with Stack */
666 806 int
667 807 oce_rx_pending(struct oce_dev *dev, struct oce_rq *rq, int32_t timeout)
668 808 {
669 809 int ti;
670 810 _NOTE(ARGUNUSED(dev));
671 811
672 812 for (ti = 0; ti < timeout; ti++) {
673 813 if (rq->pending > 0) {
674 814 OCE_MSDELAY(10);
675 815 continue;
676 816 } else {
677 - rq->pending = 0;
678 817 break;
679 818 }
680 819 }
820 +
821 + if (rq->pending != 0) {
822 + oce_log(dev, CE_NOTE, MOD_CONFIG,
823 + "%d pending RX buffers in rq=0x%p", rq->pending,
824 + (void *)rq);
825 + }
681 826 return (rq->pending);
827 +}
828 +
829 +static boolean_t
830 +oce_check_tagged(struct oce_dev *dev, struct oce_nic_rx_cqe *cqe)
831 +{
832 + boolean_t tagged = B_FALSE;
833 + if (((dev->drvfn_caps & DRVFN_CAPAB_BE3_NATIVE) &&
834 + cqe->u0.s.vlan_tag_present) ||
835 + (!(dev->drvfn_caps & DRVFN_CAPAB_BE3_NATIVE) &&
836 + cqe->u0.v0.vlan_tag_present)) {
837 + if (dev->function_mode & FLEX10_MODE) {
838 + if (cqe->u0.s.qnq)
839 + tagged = B_TRUE;
840 + } else if (dev->pvid != 0) {
841 + if (dev->pvid != cqe->u0.v0.vlan_tag)
842 + tagged = B_TRUE;
843 + } else
844 + tagged = B_TRUE;
845 + }
846 + return (tagged);
682 847 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX