Print this page
NEX-1890 update oce from source provided by Emulex
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/fibre-channel/fca/oce/oce_tx.c
+++ new/usr/src/uts/common/io/fibre-channel/fca/oce/oce_tx.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
|
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 -/* Copyright © 2003-2011 Emulex. All rights reserved. */
22 +/*
23 + * Copyright (c) 2009-2012 Emulex. All rights reserved.
24 + * Use is subject to license terms.
25 + */
23 26
27 +
28 +
24 29 /*
25 30 * Source file containing the implementation of the Transmit
26 31 * Path
27 32 */
28 33
29 34 #include <oce_impl.h>
30 35
31 36 static void oce_free_wqed(struct oce_wq *wq, oce_wqe_desc_t *wqed);
32 37 static int oce_map_wqe(struct oce_wq *wq, oce_wqe_desc_t *wqed,
33 38 mblk_t *mp, uint32_t pkt_len);
34 39 static int oce_bcopy_wqe(struct oce_wq *wq, oce_wqe_desc_t *wqed, mblk_t *mp,
35 40 uint32_t pkt_len);
36 -static void oce_wqb_dtor(struct oce_wq *wq, oce_wq_bdesc_t *wqbd);
37 -static int oce_wqb_ctor(oce_wq_bdesc_t *wqbd, struct oce_wq *wq,
38 - size_t size, int flags);
39 41 static inline oce_wq_bdesc_t *oce_wqb_alloc(struct oce_wq *wq);
40 42 static void oce_wqb_free(struct oce_wq *wq, oce_wq_bdesc_t *wqbd);
41 43
42 44 static void oce_wqmd_free(struct oce_wq *wq, oce_wq_mdesc_t *wqmd);
43 45 static void oce_wqm_free(struct oce_wq *wq, oce_wq_mdesc_t *wqmd);
44 -static oce_wq_mdesc_t *oce_wqm_alloc(struct oce_wq *wq);
46 +static inline oce_wq_mdesc_t *oce_wqm_alloc(struct oce_wq *wq);
45 47 static int oce_wqm_ctor(oce_wq_mdesc_t *wqmd, struct oce_wq *wq);
46 48 static void oce_wqm_dtor(struct oce_wq *wq, oce_wq_mdesc_t *wqmd);
47 49 static void oce_fill_ring_descs(struct oce_wq *wq, oce_wqe_desc_t *wqed);
48 -static void oce_remove_vtag(mblk_t *mp);
49 -static void oce_insert_vtag(mblk_t *mp, uint16_t vlan_tag);
50 50 static inline int oce_process_tx_compl(struct oce_wq *wq, boolean_t rearm);
51 51
52 52
53 -static ddi_dma_attr_t tx_map_dma_attr = {
54 - DMA_ATTR_V0, /* version number */
55 - 0x0000000000000000ull, /* low address */
56 - 0xFFFFFFFFFFFFFFFFull, /* high address */
57 - 0x0000000000010000ull, /* dma counter max */
58 - OCE_TXMAP_ALIGN, /* alignment */
59 - 0x7FF, /* burst sizes */
60 - 0x00000001, /* minimum transfer size */
61 - 0x00000000FFFFFFFFull, /* maximum transfer size */
62 - 0xFFFFFFFFFFFFFFFFull, /* maximum segment size */
63 - OCE_MAX_TXDMA_COOKIES, /* scatter/gather list length */
64 - 0x00000001, /* granularity */
65 - DDI_DMA_FLAGERR /* dma_attr_flags */
66 -};
67 -
68 -
69 -ddi_dma_attr_t oce_tx_dma_buf_attr = {
70 - DMA_ATTR_V0, /* version number */
71 - 0x0000000000000000ull, /* low address */
72 - 0xFFFFFFFFFFFFFFFFull, /* high address */
73 - 0x00000000FFFFFFFFull, /* dma counter max */
74 - OCE_DMA_ALIGNMENT, /* alignment */
75 - 0x000007FF, /* burst sizes */
76 - 0x00000001, /* minimum transfer size */
77 - 0x00000000FFFFFFFFull, /* maximum transfer size */
78 - 0xFFFFFFFFFFFFFFFFull, /* maximum segment size */
79 - 1, /* scatter/gather list length */
80 - 0x00000001, /* granularity */
81 - DDI_DMA_FLAGERR /* dma_attr_flags */
82 -};
83 -
84 53 /*
85 54 * WQ map handle destructor
86 55 *
87 56 * wq - Pointer to WQ structure
88 57 * wqmd - pointer to WQE mapping handle descriptor
89 58 *
90 59 * return none
91 60 */
92 61
93 62 static void
94 63 oce_wqm_dtor(struct oce_wq *wq, oce_wq_mdesc_t *wqmd)
95 64 {
96 65 _NOTE(ARGUNUSED(wq));
97 66 /* Free the DMA handle */
98 67 if (wqmd->dma_handle != NULL)
99 68 (void) ddi_dma_free_handle(&(wqmd->dma_handle));
100 69 wqmd->dma_handle = NULL;
101 70 } /* oce_wqm_dtor */
102 71
103 72 /*
104 73 * WQ map handles contructor
105 74 *
|
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
106 75 * wqmd - pointer to WQE mapping handle descriptor
107 76 * wq - Pointer to WQ structure
108 77 *
109 78 * return DDI_SUCCESS=>success, DDI_FAILURE=>error
110 79 */
111 80 static int
112 81 oce_wqm_ctor(oce_wq_mdesc_t *wqmd, struct oce_wq *wq)
113 82 {
114 83 struct oce_dev *dev;
115 84 int ret;
85 + ddi_dma_attr_t tx_map_attr = {0};
116 86
117 87 dev = wq->parent;
88 + /* Populate the DMA attributes structure */
89 + tx_map_attr.dma_attr_version = DMA_ATTR_V0;
90 + tx_map_attr.dma_attr_addr_lo = 0x0000000000000000ull;
91 + tx_map_attr.dma_attr_addr_hi = 0xFFFFFFFFFFFFFFFFull;
92 + tx_map_attr.dma_attr_count_max = 0x00000000FFFFFFFFull;
93 + tx_map_attr.dma_attr_align = OCE_TXMAP_ALIGN;
94 + tx_map_attr.dma_attr_burstsizes = 0x000007FF;
95 + tx_map_attr.dma_attr_minxfer = 0x00000001;
96 + tx_map_attr.dma_attr_maxxfer = 0x00000000FFFFFFFFull;
97 + tx_map_attr.dma_attr_seg = 0xFFFFFFFFFFFFFFFFull;
98 + tx_map_attr.dma_attr_sgllen = OCE_MAX_TXDMA_COOKIES;
99 + tx_map_attr.dma_attr_granular = 0x00000001;
100 +
101 + if (DDI_FM_DMA_ERR_CAP(dev->fm_caps)) {
102 + tx_map_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
103 + }
118 104 /* Allocate DMA handle */
119 - ret = ddi_dma_alloc_handle(dev->dip, &tx_map_dma_attr,
105 + ret = ddi_dma_alloc_handle(dev->dip, &tx_map_attr,
120 106 DDI_DMA_DONTWAIT, NULL, &wqmd->dma_handle);
121 107
122 108 return (ret);
123 109 } /* oce_wqm_ctor */
124 110
125 111 /*
126 112 * function to create WQ mapping handles cache
127 113 *
128 114 * wq - pointer to WQ structure
129 115 *
130 116 * return DDI_SUCCESS=>success, DDI_FAILURE=>error
131 117 */
132 118 int
133 119 oce_wqm_cache_create(struct oce_wq *wq)
134 120 {
135 121 struct oce_dev *dev = wq->parent;
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
136 122 int size;
137 123 int cnt;
138 124 int ret;
139 125
140 126 size = wq->cfg.nhdl * sizeof (oce_wq_mdesc_t);
141 127 wq->wq_mdesc_array = kmem_zalloc(size, KM_NOSLEEP);
142 128 if (wq->wq_mdesc_array == NULL) {
143 129 return (DDI_FAILURE);
144 130 }
145 131
146 - /* Create the free buffer list */
147 - OCE_LIST_CREATE(&wq->wq_mdesc_list, DDI_INTR_PRI(dev->intr_pri));
132 + wq->wqm_freelist =
133 + kmem_zalloc(wq->cfg.nhdl * sizeof (oce_wq_mdesc_t *), KM_NOSLEEP);
134 + if (wq->wqm_freelist == NULL) {
135 + kmem_free(wq->wq_mdesc_array, size);
136 + return (DDI_FAILURE);
137 + }
148 138
149 139 for (cnt = 0; cnt < wq->cfg.nhdl; cnt++) {
150 140 ret = oce_wqm_ctor(&wq->wq_mdesc_array[cnt], wq);
151 141 if (ret != DDI_SUCCESS) {
152 142 goto wqm_fail;
153 143 }
154 - OCE_LIST_INSERT_TAIL(&wq->wq_mdesc_list,
155 - &wq->wq_mdesc_array[cnt]);
144 + wq->wqm_freelist[cnt] = &wq->wq_mdesc_array[cnt];
145 + atomic_inc_32(&wq->wqm_free);
156 146 }
147 +
148 + wq->wqmd_next_free = 0;
149 + wq->wqmd_rc_head = 0;
150 +
151 + mutex_init(&wq->wqm_alloc_lock, NULL, MUTEX_DRIVER,
152 + DDI_INTR_PRI(dev->intr_pri));
153 + mutex_init(&wq->wqm_free_lock, NULL, MUTEX_DRIVER,
154 + DDI_INTR_PRI(dev->intr_pri));
157 155 return (DDI_SUCCESS);
158 156
159 157 wqm_fail:
160 158 oce_wqm_cache_destroy(wq);
161 159 return (DDI_FAILURE);
162 160 }
163 161
164 162 /*
165 163 * function to destroy WQ mapping handles cache
166 164 *
167 165 * wq - pointer to WQ structure
168 166 *
169 167 * return none
170 168 */
171 169 void
172 170 oce_wqm_cache_destroy(struct oce_wq *wq)
173 171 {
174 172 oce_wq_mdesc_t *wqmd;
175 173
176 - while ((wqmd = OCE_LIST_REM_HEAD(&wq->wq_mdesc_list)) != NULL) {
174 + while ((wqmd = oce_wqm_alloc(wq)) != NULL) {
177 175 oce_wqm_dtor(wq, wqmd);
178 176 }
179 177
178 + mutex_destroy(&wq->wqm_alloc_lock);
179 + mutex_destroy(&wq->wqm_free_lock);
180 + kmem_free(wq->wqm_freelist,
181 + wq->cfg.nhdl * sizeof (oce_wq_mdesc_t *));
180 182 kmem_free(wq->wq_mdesc_array,
181 183 wq->cfg.nhdl * sizeof (oce_wq_mdesc_t));
182 -
183 - OCE_LIST_DESTROY(&wq->wq_mdesc_list);
184 184 }
185 185
186 186 /*
187 187 * function to create WQ buffer cache
188 188 *
189 189 * wq - pointer to WQ structure
190 190 * buf_size - size of the buffer
191 191 *
192 192 * return DDI_SUCCESS=>success, DDI_FAILURE=>error
193 193 */
194 194 int
195 195 oce_wqb_cache_create(struct oce_wq *wq, size_t buf_size)
196 196 {
197 197 struct oce_dev *dev = wq->parent;
198 + oce_wq_bdesc_t *wqbd;
199 + uint64_t paddr;
200 + caddr_t vaddr;
198 201 int size;
199 - int cnt;
202 + int bufs_per_cookie = 0;
203 + int tidx = 0;
204 + int ncookies = 0;
205 + int i = 0;
206 + ddi_dma_cookie_t cookie;
207 + ddi_dma_attr_t tx_buf_attr = {0};
200 208 int ret;
201 209
202 210 size = wq->cfg.nbufs * sizeof (oce_wq_bdesc_t);
203 211 wq->wq_bdesc_array = kmem_zalloc(size, KM_NOSLEEP);
204 212 if (wq->wq_bdesc_array == NULL) {
205 213 return (DDI_FAILURE);
206 214 }
207 215
208 - /* Create the free buffer list */
209 - OCE_LIST_CREATE(&wq->wq_buf_list, DDI_INTR_PRI(dev->intr_pri));
216 + wq->wqb_freelist =
217 + kmem_zalloc(wq->cfg.nbufs * sizeof (oce_wq_bdesc_t *), KM_NOSLEEP);
218 + if (wq->wqb_freelist == NULL) {
219 + kmem_free(wq->wq_bdesc_array,
220 + wq->cfg.nbufs * sizeof (oce_wq_bdesc_t));
221 + return (DDI_FAILURE);
222 + }
210 223
211 - for (cnt = 0; cnt < wq->cfg.nbufs; cnt++) {
212 - ret = oce_wqb_ctor(&wq->wq_bdesc_array[cnt],
213 - wq, buf_size, DDI_DMA_STREAMING);
224 + size = wq->cfg.nbufs * wq->cfg.buf_size;
225 +
226 + /* Populate dma attributes */
227 + tx_buf_attr.dma_attr_version = DMA_ATTR_V0;
228 + tx_buf_attr.dma_attr_addr_lo = 0x0000000000000000ull;
229 + tx_buf_attr.dma_attr_addr_hi = 0xFFFFFFFFFFFFFFFFull;
230 + tx_buf_attr.dma_attr_count_max = 0x00000000FFFFFFFFull;
231 + tx_buf_attr.dma_attr_align = OCE_DMA_ALIGNMENT;
232 + tx_buf_attr.dma_attr_burstsizes = 0x000007FF;
233 + tx_buf_attr.dma_attr_minxfer = 0x00000001;
234 + tx_buf_attr.dma_attr_maxxfer = 0x00000000FFFFFFFFull;
235 + tx_buf_attr.dma_attr_seg = 0xFFFFFFFFFFFFFFFFull;
236 + tx_buf_attr.dma_attr_granular = (uint32_t)buf_size;
237 +
238 + if (DDI_FM_DMA_ERR_CAP(dev->fm_caps)) {
239 + tx_buf_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
240 + }
241 +
242 + tx_buf_attr.dma_attr_sgllen = 1;
243 +
244 + ret = oce_alloc_dma_buffer(dev, &wq->wqb, size, &tx_buf_attr,
245 + DDI_DMA_STREAMING|DDI_DMA_WRITE);
246 + if (ret != DDI_SUCCESS) {
247 + tx_buf_attr.dma_attr_sgllen =
248 + size/ddi_ptob(dev->dip, (ulong_t)1) + 2;
249 + ret = oce_alloc_dma_buffer(dev, &wq->wqb, size, &tx_buf_attr,
250 + DDI_DMA_STREAMING|DDI_DMA_WRITE);
214 251 if (ret != DDI_SUCCESS) {
215 - goto wqb_fail;
252 + kmem_free(wq->wq_bdesc_array,
253 + wq->cfg.nbufs * sizeof (oce_wq_bdesc_t));
254 + kmem_free(wq->wqb_freelist,
255 + wq->cfg.nbufs * sizeof (oce_wq_bdesc_t *));
256 + return (DDI_FAILURE);
216 257 }
217 - OCE_LIST_INSERT_TAIL(&wq->wq_buf_list,
218 - &wq->wq_bdesc_array[cnt]);
219 258 }
220 - return (DDI_SUCCESS);
221 259
222 -wqb_fail:
223 - oce_wqb_cache_destroy(wq);
224 - return (DDI_FAILURE);
260 + wqbd = wq->wq_bdesc_array;
261 + vaddr = wq->wqb.base;
262 + cookie = wq->wqb.cookie;
263 + ncookies = wq->wqb.ncookies;
264 + do {
265 + paddr = cookie.dmac_laddress;
266 + bufs_per_cookie = cookie.dmac_size/buf_size;
267 + for (i = 0; i < bufs_per_cookie; i++, wqbd++) {
268 + wqbd->wqb.acc_handle = wq->wqb.acc_handle;
269 + wqbd->wqb.dma_handle = wq->wqb.dma_handle;
270 + wqbd->wqb.base = vaddr;
271 + wqbd->wqb.addr = paddr;
272 + wqbd->wqb.len = buf_size;
273 + wqbd->wqb.size = buf_size;
274 + wqbd->wqb.off = tidx * buf_size;
275 + wqbd->frag_addr.dw.addr_lo = ADDR_LO(paddr);
276 + wqbd->frag_addr.dw.addr_hi = ADDR_HI(paddr);
277 + wq->wqb_freelist[tidx] = wqbd;
278 + /* increment the addresses */
279 + paddr += buf_size;
280 + vaddr += buf_size;
281 + atomic_inc_32(&wq->wqb_free);
282 + tidx++;
283 + if (tidx >= wq->cfg.nbufs)
284 + break;
285 + }
286 + if (--ncookies > 0) {
287 + (void) ddi_dma_nextcookie(wq->wqb.dma_handle, &cookie);
288 + }
289 + } while (ncookies > 0);
290 +
291 + wq->wqbd_next_free = 0;
292 + wq->wqbd_rc_head = 0;
293 +
294 + mutex_init(&wq->wqb_alloc_lock, NULL, MUTEX_DRIVER,
295 + DDI_INTR_PRI(dev->intr_pri));
296 + mutex_init(&wq->wqb_free_lock, NULL, MUTEX_DRIVER,
297 + DDI_INTR_PRI(dev->intr_pri));
298 + return (DDI_SUCCESS);
225 299 }
226 300
227 301 /*
228 302 * function to destroy WQ buffer cache
229 303 *
230 304 * wq - pointer to WQ structure
231 305 *
232 306 * return none
233 307 */
234 308 void
235 309 oce_wqb_cache_destroy(struct oce_wq *wq)
236 310 {
237 - oce_wq_bdesc_t *wqbd;
238 - while ((wqbd = OCE_LIST_REM_HEAD(&wq->wq_buf_list)) != NULL) {
239 - oce_wqb_dtor(wq, wqbd);
240 - }
311 + /* Free Tx buffer dma memory */
312 + oce_free_dma_buffer(wq->parent, &wq->wqb);
313 +
314 + mutex_destroy(&wq->wqb_alloc_lock);
315 + mutex_destroy(&wq->wqb_free_lock);
316 + kmem_free(wq->wqb_freelist,
317 + wq->cfg.nbufs * sizeof (oce_wq_bdesc_t *));
318 + wq->wqb_freelist = NULL;
241 319 kmem_free(wq->wq_bdesc_array,
242 320 wq->cfg.nbufs * sizeof (oce_wq_bdesc_t));
243 - OCE_LIST_DESTROY(&wq->wq_buf_list);
244 321 }
245 322
246 323 /*
247 - * WQ buffer constructor
248 - *
249 - * wqbd - pointer to WQ buffer descriptor
250 - * wq - pointer to WQ structure
251 - * size - size of the buffer
252 - * flags - KM_SLEEP or KM_NOSLEEP
253 - *
254 - * return DDI_SUCCESS=>success, DDI_FAILURE=>error
255 - */
256 -static int
257 -oce_wqb_ctor(oce_wq_bdesc_t *wqbd, struct oce_wq *wq, size_t size, int flags)
258 -{
259 - struct oce_dev *dev;
260 - dev = wq->parent;
261 -
262 - wqbd->wqb = oce_alloc_dma_buffer(dev, size, &oce_tx_dma_buf_attr,
263 - flags);
264 - if (wqbd->wqb == NULL) {
265 - return (DDI_FAILURE);
266 - }
267 - wqbd->frag_addr.dw.addr_lo = ADDR_LO(wqbd->wqb->addr);
268 - wqbd->frag_addr.dw.addr_hi = ADDR_HI(wqbd->wqb->addr);
269 - return (DDI_SUCCESS);
270 -}
271 -
272 -/*
273 - * WQ buffer destructor
274 - *
275 - * wq - pointer to WQ structure
276 - * wqbd - pointer to WQ buffer descriptor
277 - *
278 - * return none
279 - */
280 -static void
281 -oce_wqb_dtor(struct oce_wq *wq, oce_wq_bdesc_t *wqbd)
282 -{
283 - oce_free_dma_buffer(wq->parent, wqbd->wqb);
284 -}
285 -
286 -/*
287 324 * function to alloc WQE buffer descriptor
288 325 *
289 326 * wq - pointer to WQ structure
290 327 *
291 328 * return pointer to WQE buffer descriptor
292 329 */
293 330 static inline oce_wq_bdesc_t *
294 331 oce_wqb_alloc(struct oce_wq *wq)
295 332 {
296 - return (OCE_LIST_REM_HEAD(&wq->wq_buf_list));
333 + oce_wq_bdesc_t *wqbd;
334 + if (oce_atomic_reserve(&wq->wqb_free, 1) < 0) {
335 + return (NULL);
336 + }
337 +
338 + mutex_enter(&wq->wqb_alloc_lock);
339 + wqbd = wq->wqb_freelist[wq->wqbd_next_free];
340 + wq->wqb_freelist[wq->wqbd_next_free] = NULL;
341 + wq->wqbd_next_free = GET_Q_NEXT(wq->wqbd_next_free, 1, wq->cfg.nbufs);
342 + mutex_exit(&wq->wqb_alloc_lock);
343 +
344 + return (wqbd);
297 345 }
298 346
299 347 /*
300 348 * function to free WQE buffer descriptor
301 349 *
302 350 * wq - pointer to WQ structure
303 351 * wqbd - pointer to WQ buffer descriptor
304 352 *
305 353 * return none
306 354 */
307 355 static inline void
308 356 oce_wqb_free(struct oce_wq *wq, oce_wq_bdesc_t *wqbd)
309 357 {
310 - OCE_LIST_INSERT_TAIL(&wq->wq_buf_list, wqbd);
358 + mutex_enter(&wq->wqb_free_lock);
359 + wq->wqb_freelist[wq->wqbd_rc_head] = wqbd;
360 + wq->wqbd_rc_head = GET_Q_NEXT(wq->wqbd_rc_head, 1, wq->cfg.nbufs);
361 + atomic_inc_32(&wq->wqb_free);
362 + mutex_exit(&wq->wqb_free_lock);
311 363 } /* oce_wqb_free */
312 364
313 365 /*
314 366 * function to allocate WQE mapping descriptor
315 367 *
316 368 * wq - pointer to WQ structure
317 369 *
318 370 * return pointer to WQE mapping descriptor
319 371 */
320 372 static inline oce_wq_mdesc_t *
321 373 oce_wqm_alloc(struct oce_wq *wq)
322 374 {
323 - return (OCE_LIST_REM_HEAD(&wq->wq_mdesc_list));
375 + oce_wq_mdesc_t *wqmd;
376 +
377 + if (oce_atomic_reserve(&wq->wqm_free, 1) < 0) {
378 + return (NULL);
379 + }
380 +
381 + mutex_enter(&wq->wqm_alloc_lock);
382 + wqmd = wq->wqm_freelist[wq->wqmd_next_free];
383 + wq->wqm_freelist[wq->wqmd_next_free] = NULL;
384 + wq->wqmd_next_free = GET_Q_NEXT(wq->wqmd_next_free, 1, wq->cfg.nhdl);
385 + mutex_exit(&wq->wqm_alloc_lock);
386 +
387 + return (wqmd);
324 388 } /* oce_wqm_alloc */
325 389
326 390 /*
327 391 * function to insert WQE mapping descriptor to the list
328 392 *
329 393 * wq - pointer to WQ structure
330 394 * wqmd - Pointer to WQ mapping descriptor
331 395 *
332 396 * return none
333 397 */
334 398 static inline void
335 399 oce_wqm_free(struct oce_wq *wq, oce_wq_mdesc_t *wqmd)
336 400 {
337 - OCE_LIST_INSERT_TAIL(&wq->wq_mdesc_list, wqmd);
401 + mutex_enter(&wq->wqm_free_lock);
402 + wq->wqm_freelist[wq->wqmd_rc_head] = wqmd;
403 + wq->wqmd_rc_head = GET_Q_NEXT(wq->wqmd_rc_head, 1, wq->cfg.nhdl);
404 + atomic_inc_32(&wq->wqm_free);
405 + mutex_exit(&wq->wqm_free_lock);
338 406 }
339 407
340 408 /*
341 409 * function to free WQE mapping descriptor
342 410 *
343 411 * wq - pointer to WQ structure
344 412 * wqmd - Pointer to WQ mapping descriptor
345 413 *
346 414 * return none
347 415 */
348 416 static void
349 417 oce_wqmd_free(struct oce_wq *wq, oce_wq_mdesc_t *wqmd)
350 418 {
351 419 if (wqmd == NULL) {
352 420 return;
353 421 }
354 422 (void) ddi_dma_unbind_handle(wqmd->dma_handle);
355 423 oce_wqm_free(wq, wqmd);
356 424 }
357 425
358 426 /*
359 427 * WQED kmem_cache constructor
360 428 *
361 429 * buf - pointer to WQE descriptor
362 430 *
363 431 * return DDI_SUCCESS
364 432 */
365 433 int
366 434 oce_wqe_desc_ctor(void *buf, void *arg, int kmflags)
367 435 {
368 436 _NOTE(ARGUNUSED(buf));
369 437 _NOTE(ARGUNUSED(arg));
370 438 _NOTE(ARGUNUSED(kmflags));
371 439
372 440 return (DDI_SUCCESS);
373 441 }
374 442
375 443 /*
376 444 * WQED kmem_cache destructor
377 445 *
378 446 * buf - pointer to WQE descriptor
379 447 *
|
↓ open down ↓ |
32 lines elided |
↑ open up ↑ |
380 448 * return none
381 449 */
382 450 void
383 451 oce_wqe_desc_dtor(void *buf, void *arg)
384 452 {
385 453 _NOTE(ARGUNUSED(buf));
386 454 _NOTE(ARGUNUSED(arg));
387 455 }
388 456
389 457 /*
390 - * function to choose a WQ given a mblk depending on priority, flowID etc.
391 - *
392 - * dev - software handle to device
393 - * mp - the mblk to send
394 - *
395 - * return pointer to the WQ selected
396 - */
397 -static uint8_t oce_tx_hash_policy = 0x4;
398 -struct oce_wq *
399 -oce_get_wq(struct oce_dev *dev, mblk_t *mp)
400 -{
401 - struct oce_wq *wq;
402 - int qidx = 0;
403 - if (dev->nwqs > 1) {
404 - qidx = mac_pkt_hash(DL_ETHER, mp, oce_tx_hash_policy, B_TRUE);
405 - qidx = qidx % dev->nwqs;
406 -
407 - } else {
408 - qidx = 0;
409 - }
410 - wq = dev->wq[qidx];
411 - /* for the time being hardcode */
412 - return (wq);
413 -} /* oce_get_wq */
414 -
415 -/*
416 458 * function to populate the single WQE
417 459 *
418 460 * wq - pointer to wq
419 461 * wqed - pointer to WQ entry descriptor
420 462 *
421 463 * return none
422 464 */
423 465 #pragma inline(oce_fill_ring_descs)
424 466 static void
425 467 oce_fill_ring_descs(struct oce_wq *wq, oce_wqe_desc_t *wqed)
426 468 {
427 469
428 470 struct oce_nic_frag_wqe *wqe;
429 471 int i;
430 472 /* Copy the precreate WQE descs to the ring desc */
431 473 for (i = 0; i < wqed->wqe_cnt; i++) {
432 474 wqe = RING_GET_PRODUCER_ITEM_VA(wq->ring,
433 475 struct oce_nic_frag_wqe);
434 476
435 477 bcopy(&wqed->frag[i], wqe, NIC_WQE_SIZE);
436 478 RING_PUT(wq->ring, 1);
437 479 }
438 480 } /* oce_fill_ring_descs */
439 481
440 482 /*
441 483 * function to copy the packet to preallocated Tx buffer
442 484 *
443 485 * wq - pointer to WQ
444 486 * wqed - Pointer to WQE descriptor
445 487 * mp - Pointer to packet chain
446 488 * pktlen - Size of the packet
447 489 *
448 490 * return 0=>success, error code otherwise
449 491 */
450 492 static int
451 493 oce_bcopy_wqe(struct oce_wq *wq, oce_wqe_desc_t *wqed, mblk_t *mp,
452 494 uint32_t pkt_len)
453 495 {
454 496 oce_wq_bdesc_t *wqbd;
455 497 caddr_t buf_va;
456 498 struct oce_dev *dev = wq->parent;
457 499 int len = 0;
458 500
459 501 wqbd = oce_wqb_alloc(wq);
460 502 if (wqbd == NULL) {
461 503 atomic_inc_32(&dev->tx_noxmtbuf);
462 504 oce_log(dev, CE_WARN, MOD_TX, "%s",
463 505 "wqb pool empty");
464 506 return (ENOMEM);
465 507 }
466 508
467 509 /* create a fragment wqe for the packet */
468 510 wqed->frag[wqed->frag_idx].u0.s.frag_pa_hi = wqbd->frag_addr.dw.addr_hi;
|
↓ open down ↓ |
43 lines elided |
↑ open up ↑ |
469 511 wqed->frag[wqed->frag_idx].u0.s.frag_pa_lo = wqbd->frag_addr.dw.addr_lo;
470 512 buf_va = DBUF_VA(wqbd->wqb);
471 513
472 514 /* copy pkt into buffer */
473 515 for (len = 0; mp != NULL && len < pkt_len; mp = mp->b_cont) {
474 516 bcopy(mp->b_rptr, buf_va, MBLKL(mp));
475 517 buf_va += MBLKL(mp);
476 518 len += MBLKL(mp);
477 519 }
478 520
479 - (void) ddi_dma_sync(DBUF_DHDL(wqbd->wqb), 0, pkt_len,
480 - DDI_DMA_SYNC_FORDEV);
521 + DBUF_SYNC(wqbd->wqb, wqbd->wqb.off, pkt_len, DDI_DMA_SYNC_FORDEV);
481 522
482 523 if (oce_fm_check_dma_handle(dev, DBUF_DHDL(wqbd->wqb))) {
483 524 ddi_fm_service_impact(dev->dip, DDI_SERVICE_DEGRADED);
484 525 /* Free the buffer */
485 526 oce_wqb_free(wq, wqbd);
486 527 return (EIO);
487 528 }
488 529 wqed->frag[wqed->frag_idx].u0.s.frag_len = pkt_len;
530 + wqed->frag[wqed->frag_idx].u0.s.rsvd0 = 0;
489 531 wqed->hdesc[wqed->nhdl].hdl = (void *)(wqbd);
490 532 wqed->hdesc[wqed->nhdl].type = COPY_WQE;
491 533 wqed->frag_cnt++;
492 534 wqed->frag_idx++;
493 535 wqed->nhdl++;
494 536 return (0);
495 537 } /* oce_bcopy_wqe */
496 538
497 539 /*
498 540 * function to copy the packet or dma map on the fly depending on size
499 541 *
500 542 * wq - pointer to WQ
501 543 * wqed - Pointer to WQE descriptor
502 544 * mp - Pointer to packet chain
503 545 *
504 546 * return DDI_SUCCESS=>success, DDI_FAILURE=>error
505 547 */
506 548 static int
507 549 oce_map_wqe(struct oce_wq *wq, oce_wqe_desc_t *wqed, mblk_t *mp,
508 550 uint32_t pkt_len)
509 551 {
510 552 ddi_dma_cookie_t cookie;
511 553 oce_wq_mdesc_t *wqmd;
512 554 uint32_t ncookies;
513 555 int ret;
514 556 struct oce_dev *dev = wq->parent;
515 557
516 558 wqmd = oce_wqm_alloc(wq);
517 559 if (wqmd == NULL) {
518 560 oce_log(dev, CE_WARN, MOD_TX, "%s",
519 561 "wqm pool empty");
520 562 return (ENOMEM);
521 563 }
522 564
523 565 ret = ddi_dma_addr_bind_handle(wqmd->dma_handle,
524 566 (struct as *)0, (caddr_t)mp->b_rptr,
525 567 pkt_len, DDI_DMA_WRITE | DDI_DMA_STREAMING,
526 568 DDI_DMA_DONTWAIT, NULL, &cookie, &ncookies);
527 569 if (ret != DDI_DMA_MAPPED) {
528 570 oce_log(dev, CE_WARN, MOD_TX, "MAP FAILED %d",
529 571 ret);
530 572 /* free the last one */
|
↓ open down ↓ |
32 lines elided |
↑ open up ↑ |
531 573 oce_wqm_free(wq, wqmd);
532 574 return (ENOMEM);
533 575 }
534 576 do {
535 577 wqed->frag[wqed->frag_idx].u0.s.frag_pa_hi =
536 578 ADDR_HI(cookie.dmac_laddress);
537 579 wqed->frag[wqed->frag_idx].u0.s.frag_pa_lo =
538 580 ADDR_LO(cookie.dmac_laddress);
539 581 wqed->frag[wqed->frag_idx].u0.s.frag_len =
540 582 (uint32_t)cookie.dmac_size;
583 + wqed->frag[wqed->frag_idx].u0.s.rsvd0 = 0;
541 584 wqed->frag_cnt++;
542 585 wqed->frag_idx++;
543 586 if (--ncookies > 0)
544 - ddi_dma_nextcookie(wqmd->dma_handle,
545 - &cookie);
546 - else break;
587 + ddi_dma_nextcookie(wqmd->dma_handle, &cookie);
588 + else
589 + break;
547 590 } while (ncookies > 0);
548 591
549 592 wqed->hdesc[wqed->nhdl].hdl = (void *)wqmd;
550 593 wqed->hdesc[wqed->nhdl].type = MAPPED_WQE;
551 594 wqed->nhdl++;
552 595 return (0);
553 596 } /* oce_map_wqe */
554 597
555 598 static inline int
556 599 oce_process_tx_compl(struct oce_wq *wq, boolean_t rearm)
557 600 {
558 601 struct oce_nic_tx_cqe *cqe;
559 602 uint16_t num_cqe = 0;
560 603 struct oce_cq *cq;
561 604 oce_wqe_desc_t *wqed;
562 605 int wqe_freed = 0;
563 606 struct oce_dev *dev;
607 + list_t wqe_desc_list;
564 608
565 609 cq = wq->cq;
566 610 dev = wq->parent;
567 - (void) ddi_dma_sync(cq->ring->dbuf->dma_handle, 0, 0,
568 - DDI_DMA_SYNC_FORKERNEL);
569 611
612 + DBUF_SYNC(cq->ring->dbuf, 0, 0, DDI_DMA_SYNC_FORKERNEL);
613 +
614 + list_create(&wqe_desc_list, sizeof (oce_wqe_desc_t),
615 + offsetof(oce_wqe_desc_t, link));
616 +
570 617 mutex_enter(&wq->txc_lock);
571 618 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
572 619 while (WQ_CQE_VALID(cqe)) {
573 620
574 621 DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_tx_cqe));
575 622
576 623 /* update stats */
577 624 if (cqe->u0.s.status != 0) {
578 625 atomic_inc_32(&dev->tx_errors);
579 626 }
580 627
581 - /* complete the WQEs */
582 - wqed = OCE_LIST_REM_HEAD(&wq->wqe_desc_list);
628 + mutex_enter(&wq->wqed_list_lock);
629 + wqed = list_remove_head(&wq->wqe_desc_list);
630 + mutex_exit(&wq->wqed_list_lock);
631 + if (wqed == NULL) {
632 + oce_log(dev, CE_NOTE, MOD_CONFIG, "%s",
633 + "oce_process_tx_compl: wqed list empty");
634 + break;
635 + }
636 + atomic_dec_32(&wq->wqe_pending);
583 637
584 - wqe_freed = wqed->wqe_cnt;
585 - oce_free_wqed(wq, wqed);
586 - RING_GET(wq->ring, wqe_freed);
587 - atomic_add_32(&wq->wq_free, wqe_freed);
638 + wqe_freed += wqed->wqe_cnt;
639 + list_insert_tail(&wqe_desc_list, wqed);
588 640 /* clear the valid bit and progress cqe */
589 641 WQ_CQE_INVALIDATE(cqe);
590 642 RING_GET(cq->ring, 1);
643 +
591 644 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
592 645 struct oce_nic_tx_cqe);
593 646 num_cqe++;
594 647 } /* for all valid CQE */
648 +
649 + if (num_cqe == 0 && wq->wqe_pending > 0) {
650 + mutex_exit(&wq->txc_lock);
651 + return (0);
652 + }
653 +
654 + DBUF_SYNC(cq->ring->dbuf, 0, 0, DDI_DMA_SYNC_FORDEV);
655 + oce_arm_cq(wq->parent, cq->cq_id, num_cqe, rearm);
656 +
657 + RING_GET(wq->ring, wqe_freed);
658 + atomic_add_32(&wq->wq_free, wqe_freed);
659 + if (wq->resched && wq->wq_free >= OCE_MAX_TX_HDL) {
660 + wq->resched = B_FALSE;
661 + mac_tx_ring_update(dev->mac_handle, wq->handle);
662 + }
663 + wq->last_compl = ddi_get_lbolt();
595 664 mutex_exit(&wq->txc_lock);
596 - if (num_cqe)
597 - oce_arm_cq(wq->parent, cq->cq_id, num_cqe, rearm);
665 + while (wqed = list_remove_head(&wqe_desc_list)) {
666 + oce_free_wqed(wq, wqed);
667 + }
668 + list_destroy(&wqe_desc_list);
598 669 return (num_cqe);
599 670 } /* oce_process_tx_completion */
600 671
601 672 /*
602 673 * function to drain a TxCQ and process its CQEs
603 674 *
604 675 * dev - software handle to the device
605 676 * cq - pointer to the cq to drain
606 677 *
607 678 * return the number of CQEs processed
608 679 */
609 -uint16_t
610 -oce_drain_wq_cq(void *arg)
680 +void *
681 +oce_drain_wq_cq(void *arg, int arg2, int arg3)
611 682 {
612 - uint16_t num_cqe = 0;
613 683 struct oce_dev *dev;
614 684 struct oce_wq *wq;
615 685
686 + _NOTE(ARGUNUSED(arg2));
687 + _NOTE(ARGUNUSED(arg3));
688 +
616 689 wq = (struct oce_wq *)arg;
617 690 dev = wq->parent;
618 -
691 + wq->last_intr = ddi_get_lbolt();
619 692 /* do while we do not reach a cqe that is not valid */
620 - num_cqe = oce_process_tx_compl(wq, B_FALSE);
621 -
622 - /* check if we need to restart Tx */
623 - if (wq->resched && num_cqe) {
693 + (void) oce_process_tx_compl(wq, B_FALSE);
694 + (void) atomic_cas_uint(&wq->qmode, OCE_MODE_INTR, OCE_MODE_POLL);
695 + if ((wq->wq_free > OCE_MAX_TX_HDL) && wq->resched) {
624 696 wq->resched = B_FALSE;
625 - mac_tx_update(dev->mac_handle);
697 + mac_tx_ring_update(dev->mac_handle, wq->handle);
626 698 }
699 + return (NULL);
627 700
628 - return (num_cqe);
629 701 } /* oce_process_wq_cqe */
630 702
703 +
704 +boolean_t
705 +oce_tx_stall_check(struct oce_dev *dev)
706 +{
707 + struct oce_wq *wq;
708 + int ring = 0;
709 + boolean_t is_stalled = B_FALSE;
710 +
711 + if (!(dev->state & STATE_MAC_STARTED) ||
712 + (dev->link_status != LINK_STATE_UP)) {
713 + return (B_FALSE);
714 + }
715 +
716 + for (ring = 0; ring < dev->tx_rings; ring++) {
717 + wq = &dev->wq[ring];
718 +
719 + if (wq->resched) {
720 + if (wq->wq_free > OCE_MAX_TX_HDL) {
721 + mac_tx_ring_update(dev->mac_handle, wq->handle);
722 + } else {
723 + /* enable the interrupts only once */
724 + if (atomic_cas_uint(&wq->qmode, OCE_MODE_POLL,
725 + OCE_MODE_INTR) == OCE_MODE_POLL) {
726 + oce_arm_cq(dev, wq->cq->cq_id, 0,
727 + B_TRUE);
728 + }
729 + }
730 + }
731 + }
732 + return (is_stalled);
733 +}
631 734 /*
632 - * function to insert vtag to packet
735 + * Function to check whether TX stall
736 + * can occur for an IPV6 packet for
737 + * some versions of BE cards
633 738 *
634 - * mp - mblk pointer
635 - * vlan_tag - tag to be inserted
739 + * dev - software handle to the device
740 + * mp - Pointer to packet chain
741 + * ipoffset - ip header offset in mp chain
636 742 *
637 - * return none
743 + * return B_TRUE or B_FALSE
638 744 */
639 -static inline void
640 -oce_insert_vtag(mblk_t *mp, uint16_t vlan_tag)
641 -{
642 - struct ether_vlan_header *evh;
643 - (void) memmove(mp->b_rptr - VTAG_SIZE,
644 - mp->b_rptr, 2 * ETHERADDRL);
645 - mp->b_rptr -= VTAG_SIZE;
646 - evh = (struct ether_vlan_header *)(void *)mp->b_rptr;
647 - evh->ether_tpid = htons(VLAN_TPID);
648 - evh->ether_tci = htons(vlan_tag);
649 -}
650 745
746 +static inline int
747 +oce_check_ipv6_tx_stall(struct oce_dev *dev,
748 + mblk_t *mp, uint32_t ip_offset) {
749 +
750 + _NOTE(ARGUNUSED(dev));
751 + ip6_t *ipv6_hdr;
752 + struct ip6_opt *v6_op;
753 + ipv6_hdr = (ip6_t *)(void *)
754 + (mp->b_rptr + ip_offset);
755 + v6_op = (struct ip6_opt *)(ipv6_hdr+1);
756 + if (ipv6_hdr->ip6_nxt != IPPROTO_TCP &&
757 + ipv6_hdr->ip6_nxt != IPPROTO_UDP &&
758 + v6_op->ip6o_len == 0xFF) {
759 + return (B_TRUE);
760 + } else {
761 + return (B_FALSE);
762 + }
763 +}
651 764 /*
652 - * function to strip vtag from packet
765 + * Function to insert VLAN Tag to
766 + * mp cahin
653 767 *
654 - * mp - mblk pointer
768 + * dev - software handle to the device
769 + * mblk_haad - Pointer holding packet chain
655 770 *
656 - * return none
771 + * return DDI_FAILURE or DDI_SUCCESS
657 772 */
658 773
659 -static inline void
660 -oce_remove_vtag(mblk_t *mp)
661 -{
662 - (void) memmove(mp->b_rptr + VTAG_SIZE, mp->b_rptr,
663 - ETHERADDRL * 2);
664 - mp->b_rptr += VTAG_SIZE;
774 +static int
775 +oce_ipv6_tx_stall_workaround(struct oce_dev *dev,
776 + mblk_t **mblk_head) {
777 +
778 + mblk_t *mp;
779 + struct ether_vlan_header *evh;
780 + mp = allocb(OCE_HDR_LEN, BPRI_HI);
781 + if (mp == NULL) {
782 + return (DDI_FAILURE);
783 + }
784 + /* copy ether header */
785 + (void) memcpy(mp->b_rptr, (*mblk_head)->b_rptr, 2 * ETHERADDRL);
786 + evh = (struct ether_vlan_header *)(void *)mp->b_rptr;
787 + evh->ether_tpid = htons(VLAN_TPID);
788 + evh->ether_tci = ((dev->pvid > 0) ? LE_16(dev->pvid) :
789 + htons(dev->QnQ_tag));
790 + mp->b_wptr = mp->b_rptr + (2 * ETHERADDRL) + VTAG_SIZE;
791 + (*mblk_head)->b_rptr += 2 * ETHERADDRL;
792 +
793 + if (MBLKL(*mblk_head) > 0) {
794 + mp->b_cont = *mblk_head;
795 + } else {
796 + mp->b_cont = (*mblk_head)->b_cont;
797 + freeb(*mblk_head);
798 + }
799 + *mblk_head = mp;
800 + return (DDI_SUCCESS);
665 801 }
666 802
667 803 /*
668 804 * function to xmit Single packet over the wire
669 805 *
670 806 * wq - pointer to WQ
671 807 * mp - Pointer to packet chain
672 808 *
673 809 * return pointer to the packet
674 810 */
675 811 mblk_t *
676 812 oce_send_packet(struct oce_wq *wq, mblk_t *mp)
677 813 {
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
678 814 struct oce_nic_hdr_wqe *wqeh;
679 815 struct oce_dev *dev;
680 816 struct ether_header *eh;
681 817 struct ether_vlan_header *evh;
682 818 int32_t num_wqes;
683 819 uint16_t etype;
684 820 uint32_t ip_offset;
685 821 uint32_t csum_flags = 0;
686 822 boolean_t use_copy = B_FALSE;
687 823 boolean_t tagged = B_FALSE;
688 - uint16_t vlan_tag;
824 + boolean_t ipv6_stall = B_FALSE;
689 825 uint32_t reg_value = 0;
690 826 oce_wqe_desc_t *wqed = NULL;
691 827 mblk_t *nmp = NULL;
692 828 mblk_t *tmp = NULL;
693 829 uint32_t pkt_len = 0;
694 830 int num_mblks = 0;
695 831 int ret = 0;
696 832 uint32_t mss = 0;
697 833 uint32_t flags = 0;
698 834 int len = 0;
699 835
700 836 /* retrieve the adap priv struct ptr */
701 837 dev = wq->parent;
702 838
703 839 /* check if we have enough free slots */
704 840 if (wq->wq_free < dev->tx_reclaim_threshold) {
705 841 (void) oce_process_tx_compl(wq, B_FALSE);
706 842 }
707 843 if (wq->wq_free < OCE_MAX_TX_HDL) {
844 + wq->resched = B_TRUE;
845 + wq->last_defered = ddi_get_lbolt();
846 + atomic_inc_32(&wq->tx_deferd);
708 847 return (mp);
709 848 }
710 849
711 850 /* check if we should copy */
712 851 for (tmp = mp; tmp != NULL; tmp = tmp->b_cont) {
713 852 pkt_len += MBLKL(tmp);
714 853 num_mblks++;
715 854 }
716 855
717 856 if (pkt_len == 0 || num_mblks == 0) {
718 857 freemsg(mp);
719 858 return (NULL);
720 859 }
721 860
722 861 /* retrieve LSO information */
723 862 mac_lso_get(mp, &mss, &flags);
724 863
725 864 /* get the offload flags */
726 865 mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &csum_flags);
727 866
728 867 /* restrict the mapped segment to wat we support */
|
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
729 868 if (num_mblks > OCE_MAX_TX_HDL) {
730 869 nmp = msgpullup(mp, -1);
731 870 if (nmp == NULL) {
732 871 atomic_inc_32(&wq->pkt_drops);
733 872 freemsg(mp);
734 873 return (NULL);
735 874 }
736 875 /* Reset it to new collapsed mp */
737 876 freemsg(mp);
738 877 mp = nmp;
878 + /* restore the flags on new mp */
879 + if (flags & HW_LSO) {
880 + DB_CKSUMFLAGS(mp) |= HW_LSO;
881 + DB_LSOMSS(mp) = (uint16_t)mss;
882 + }
883 + if (csum_flags != 0) {
884 + DB_CKSUMFLAGS(mp) |= csum_flags;
885 + }
739 886 }
740 887
741 888 /* Get the packet descriptor for Tx */
742 889 wqed = kmem_cache_alloc(wq->wqed_cache, KM_NOSLEEP);
743 890 if (wqed == NULL) {
744 891 atomic_inc_32(&wq->pkt_drops);
745 892 freemsg(mp);
746 893 return (NULL);
747 894 }
895 +
896 + /* Save the WQ pointer */
897 + wqed->wq = wq;
898 + wqed->frag_idx = 1; /* index zero is always header */
899 + wqed->frag_cnt = 0;
900 + wqed->nhdl = 0;
901 + wqed->mp = NULL;
902 +
748 903 eh = (struct ether_header *)(void *)mp->b_rptr;
749 904 if (ntohs(eh->ether_type) == VLAN_TPID) {
750 905 evh = (struct ether_vlan_header *)(void *)mp->b_rptr;
751 906 tagged = B_TRUE;
752 907 etype = ntohs(evh->ether_type);
753 908 ip_offset = sizeof (struct ether_vlan_header);
754 - pkt_len -= VTAG_SIZE;
755 - vlan_tag = ntohs(evh->ether_tci);
756 - oce_remove_vtag(mp);
909 +
757 910 } else {
758 911 etype = ntohs(eh->ether_type);
759 912 ip_offset = sizeof (struct ether_header);
760 913 }
914 + /* Check Workaround required for IPV6 TX stall */
915 + if (BE3_A1(dev) && (etype == ETHERTYPE_IPV6) &&
916 + ((dev->QnQ_valid) || (!tagged && dev->pvid != 0))) {
917 + len = ip_offset + sizeof (ip6_t) + sizeof (struct ip6_opt);
918 + if (MBLKL(mp) < len) {
919 + nmp = msgpullup(mp, len);
920 + if (nmp == NULL) {
921 + oce_free_wqed(wq, wqed);
922 + atomic_inc_32(&wq->pkt_drops);
923 + freemsg(mp);
924 + return (NULL);
925 + }
926 + freemsg(mp);
927 + mp = nmp;
928 + }
929 + ipv6_stall = oce_check_ipv6_tx_stall(dev, mp, ip_offset);
930 + if (ipv6_stall) {
931 + if (dev->QnQ_queried)
932 + ret = oce_ipv6_tx_stall_workaround(dev, &mp);
933 + else {
934 + /* FW Workaround not available */
935 + ret = DDI_FAILURE;
936 + }
937 + if (ret) {
938 + oce_free_wqed(wq, wqed);
939 + atomic_inc_32(&wq->pkt_drops);
940 + freemsg(mp);
941 + return (NULL);
942 + }
943 + pkt_len += VTAG_SIZE;
944 + }
945 + }
761 946
762 - /* Save the WQ pointer */
763 - wqed->wq = wq;
764 - wqed->frag_idx = 1; /* index zero is always header */
765 - wqed->frag_cnt = 0;
766 - wqed->nhdl = 0;
767 - wqed->mp = NULL;
768 - OCE_LIST_LINK_INIT(&wqed->link);
769 -
770 947 /* If entire packet is less than the copy limit just do copy */
771 948 if (pkt_len < dev->tx_bcopy_limit) {
772 949 use_copy = B_TRUE;
773 950 ret = oce_bcopy_wqe(wq, wqed, mp, pkt_len);
774 951 } else {
775 952 /* copy or dma map the individual fragments */
776 953 for (nmp = mp; nmp != NULL; nmp = nmp->b_cont) {
777 954 len = MBLKL(nmp);
778 955 if (len == 0) {
779 956 continue;
780 957 }
781 958 if (len < dev->tx_bcopy_limit) {
782 959 ret = oce_bcopy_wqe(wq, wqed, nmp, len);
783 960 } else {
784 961 ret = oce_map_wqe(wq, wqed, nmp, len);
785 962 }
786 963 if (ret != 0)
787 964 break;
788 965 }
789 966 }
790 967
791 968 /*
792 969 * Any failure other than insufficient Q entries
793 970 * drop the packet
794 971 */
795 972 if (ret != 0) {
796 973 oce_free_wqed(wq, wqed);
797 974 atomic_inc_32(&wq->pkt_drops);
798 975 freemsg(mp);
799 976 return (NULL);
800 977 }
801 978
802 979 wqeh = (struct oce_nic_hdr_wqe *)&wqed->frag[0];
803 980 bzero(wqeh, sizeof (struct oce_nic_hdr_wqe));
804 981
805 982 /* fill rest of wqe header fields based on packet */
806 983 if (flags & HW_LSO) {
807 984 wqeh->u0.s.lso = B_TRUE;
808 985 wqeh->u0.s.lso_mss = mss;
809 986 }
810 987 if (csum_flags & HCK_FULLCKSUM) {
811 988 uint8_t *proto;
812 989 if (etype == ETHERTYPE_IP) {
813 990 proto = (uint8_t *)(void *)
814 991 (mp->b_rptr + ip_offset);
815 992 if (proto[9] == 6)
|
↓ open down ↓ |
36 lines elided |
↑ open up ↑ |
816 993 /* IPPROTO_TCP */
817 994 wqeh->u0.s.tcpcs = B_TRUE;
818 995 else if (proto[9] == 17)
819 996 /* IPPROTO_UDP */
820 997 wqeh->u0.s.udpcs = B_TRUE;
821 998 }
822 999 }
823 1000
824 1001 if (csum_flags & HCK_IPV4_HDRCKSUM)
825 1002 wqeh->u0.s.ipcs = B_TRUE;
826 - if (tagged) {
827 - wqeh->u0.s.vlan = B_TRUE;
828 - wqeh->u0.s.vlan_tag = vlan_tag;
829 - }
1003 + if (ipv6_stall) {
1004 + wqeh->u0.s.complete = B_FALSE;
1005 + wqeh->u0.s.event = B_TRUE;
1006 + } else {
830 1007
831 - wqeh->u0.s.complete = B_TRUE;
832 - wqeh->u0.s.event = B_TRUE;
1008 + wqeh->u0.s.complete = B_TRUE;
1009 + wqeh->u0.s.event = B_TRUE;
1010 + }
833 1011 wqeh->u0.s.crc = B_TRUE;
834 1012 wqeh->u0.s.total_length = pkt_len;
835 1013
836 1014 num_wqes = wqed->frag_cnt + 1;
837 1015
838 1016 /* h/w expects even no. of WQEs */
839 - if (num_wqes & 0x1) {
1017 + if ((num_wqes & 0x1) && !(LANCER_CHIP(dev))) {
840 1018 bzero(&wqed->frag[num_wqes], sizeof (struct oce_nic_frag_wqe));
841 1019 num_wqes++;
842 1020 }
843 1021 wqed->wqe_cnt = (uint16_t)num_wqes;
844 1022 wqeh->u0.s.num_wqe = num_wqes;
845 1023 DW_SWAP(u32ptr(&wqed->frag[0]), (wqed->wqe_cnt * NIC_WQE_SIZE));
846 1024
847 1025 mutex_enter(&wq->tx_lock);
848 - if (num_wqes > wq->wq_free) {
1026 + if (num_wqes > wq->wq_free - 2) {
849 1027 atomic_inc_32(&wq->tx_deferd);
850 1028 mutex_exit(&wq->tx_lock);
851 1029 goto wqe_fail;
852 1030 }
853 1031 atomic_add_32(&wq->wq_free, -num_wqes);
854 1032
855 1033 /* fill the wq for adapter */
856 1034 oce_fill_ring_descs(wq, wqed);
857 1035
858 1036 /* Set the mp pointer in the wqe descriptor */
859 1037 if (use_copy == B_FALSE) {
860 1038 wqed->mp = mp;
861 1039 }
862 1040 /* Add the packet desc to list to be retrieved during cmpl */
863 - OCE_LIST_INSERT_TAIL(&wq->wqe_desc_list, wqed);
864 - (void) ddi_dma_sync(wq->ring->dbuf->dma_handle, 0, 0,
865 - DDI_DMA_SYNC_FORDEV);
1041 + mutex_enter(&wq->wqed_list_lock);
1042 + list_insert_tail(&wq->wqe_desc_list, wqed);
1043 + mutex_exit(&wq->wqed_list_lock);
1044 + atomic_inc_32(&wq->wqe_pending);
1045 + DBUF_SYNC(wq->ring->dbuf, 0, 0, DDI_DMA_SYNC_FORDEV);
866 1046
867 1047 /* ring tx doorbell */
868 1048 reg_value = (num_wqes << 16) | wq->wq_id;
869 1049 /* Ring the door bell */
870 1050 OCE_DB_WRITE32(dev, PD_TXULP_DB, reg_value);
1051 +
1052 + /* update the ring stats */
1053 + wq->stat_bytes += pkt_len;
1054 + wq->stat_pkts++;
1055 +
871 1056 mutex_exit(&wq->tx_lock);
872 1057 if (oce_fm_check_acc_handle(dev, dev->db_handle) != DDI_FM_OK) {
873 1058 ddi_fm_service_impact(dev->dip, DDI_SERVICE_DEGRADED);
874 1059 }
875 1060
876 1061 /* free mp if copied or packet chain collapsed */
877 1062 if (use_copy == B_TRUE) {
878 1063 freemsg(mp);
879 1064 }
880 1065 return (NULL);
881 1066
882 1067 wqe_fail:
883 1068
884 - if (tagged) {
885 - oce_insert_vtag(mp, vlan_tag);
886 - }
887 1069 oce_free_wqed(wq, wqed);
1070 + wq->resched = B_TRUE;
1071 + wq->last_defered = ddi_get_lbolt();
888 1072 return (mp);
889 1073 } /* oce_send_packet */
890 1074
891 1075 /*
892 1076 * function to free the WQE descriptor
893 1077 *
894 1078 * wq - pointer to WQ
895 1079 * wqed - Pointer to WQE descriptor
896 1080 *
897 1081 * return none
898 1082 */
899 1083 #pragma inline(oce_free_wqed)
900 1084 static void
901 1085 oce_free_wqed(struct oce_wq *wq, oce_wqe_desc_t *wqed)
902 1086 {
903 1087 int i = 0;
904 1088 if (wqed == NULL) {
905 1089 return;
906 1090 }
907 1091
908 1092 for (i = 0; i < wqed->nhdl; i++) {
909 1093 if (wqed->hdesc[i].type == COPY_WQE) {
910 1094 oce_wqb_free(wq, wqed->hdesc[i].hdl);
911 1095 } else if (wqed->hdesc[i].type == MAPPED_WQE) {
912 1096 oce_wqmd_free(wq, wqed->hdesc[i].hdl);
913 1097 }
914 1098 }
915 1099 if (wqed->mp)
916 1100 freemsg(wqed->mp);
917 1101 kmem_cache_free(wq->wqed_cache, wqed);
918 1102 } /* oce_free_wqed */
919 1103
920 1104 /*
921 1105 * function to start the WQ
922 1106 *
923 1107 * wq - pointer to WQ
924 1108 *
925 1109 * return DDI_SUCCESS
926 1110 */
927 1111
928 1112 int
929 1113 oce_start_wq(struct oce_wq *wq)
930 1114 {
931 1115 _NOTE(ARGUNUSED(wq));
932 1116 return (DDI_SUCCESS);
933 1117 } /* oce_start_wq */
934 1118
935 1119 /*
936 1120 * function to stop the WQ
937 1121 *
938 1122 * wq - pointer to WQ
939 1123 *
940 1124 * return none
941 1125 */
942 1126 void
943 1127 oce_clean_wq(struct oce_wq *wq)
944 1128 {
945 1129 oce_wqe_desc_t *wqed;
|
↓ open down ↓ |
48 lines elided |
↑ open up ↑ |
946 1130 int ti;
947 1131
948 1132 /* Wait for already posted Tx to complete */
949 1133
950 1134 for (ti = 0; ti < DEFAULT_DRAIN_TIME; ti++) {
951 1135 (void) oce_process_tx_compl(wq, B_FALSE);
952 1136 OCE_MSDELAY(1);
953 1137 }
954 1138
955 1139 /* Free the remaining descriptors */
956 - while ((wqed = OCE_LIST_REM_HEAD(&wq->wqe_desc_list)) != NULL) {
1140 + mutex_enter(&wq->wqed_list_lock);
1141 + while ((wqed = list_remove_head(&wq->wqe_desc_list)) != NULL) {
957 1142 atomic_add_32(&wq->wq_free, wqed->wqe_cnt);
958 1143 oce_free_wqed(wq, wqed);
959 1144 }
1145 + mutex_exit(&wq->wqed_list_lock);
960 1146 oce_drain_eq(wq->cq->eq);
961 1147 } /* oce_stop_wq */
962 -
963 -/*
964 - * function to set the tx mapping handle fma attr
965 - *
966 - * fm_caps - capability flags
967 - *
968 - * return none
969 - */
970 -
971 -void
972 -oce_set_tx_map_dma_fma_flags(int fm_caps)
973 -{
974 - if (fm_caps == DDI_FM_NOT_CAPABLE) {
975 - return;
976 - }
977 -
978 - if (DDI_FM_DMA_ERR_CAP(fm_caps)) {
979 - tx_map_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
980 - } else {
981 - tx_map_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
982 - }
983 -} /* oce_set_tx_map_dma_fma_flags */
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX