Print this page
NEX-1890 update oce from source provided by Emulex
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/sys/fibre-channel/fca/oce/oce_io.h
+++ new/usr/src/uts/common/sys/fibre-channel/fca/oce/oce_io.h
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
|
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 -/* Copyright © 2003-2011 Emulex. All rights reserved. */
22 +/*
23 + * Copyright (c) 2009-2012 Emulex. All rights reserved.
24 + * Use is subject to license terms.
25 + */
23 26
27 +
28 +
24 29 /*
25 30 * Header file defining the HW IO elements
26 31 */
27 32
28 33 #ifndef _OCE_IO_H_
29 34 #define _OCE_IO_H_
30 35
31 36 #ifdef __cplusplus
32 37 extern "C" {
33 38 #endif
34 39
35 40 #include <sys/types.h>
36 41 #include <sys/dditypes.h>
37 42 #include <sys/mutex.h>
38 43 #include <sys/stream.h>
|
↓ open down ↓ |
5 lines elided |
↑ open up ↑ |
39 44 #include <sys/debug.h>
40 45 #include <sys/byteorder.h>
41 46 #include <oce_hw.h>
42 47 #include <oce_buf.h>
43 48
44 49 #define DEFAULT_MQ_MBOX_TIMEOUT (5 * 1000 * 1000) /* 5 sec (in usec) */
45 50 #define MBX_READY_TIMEOUT (1 * 1000 * 1000) /* 1 sec (in usec) */
46 51 #define DEFAULT_DRAIN_TIME 200 /* Default Drain Time */
47 52 #define MBX_TIMEOUT_SEC 5
48 53 #define STAT_TIMEOUT 2000000 /* update stats every 2 sec */
54 +#define OCE_HDR_LEN 64
49 55
50 56 struct oce_dev;
51 57
52 58 enum eq_len {
53 59 EQ_LEN_256 = 256,
54 60 EQ_LEN_512 = 512,
55 61 EQ_LEN_1024 = 1024,
56 62 EQ_LEN_2048 = 2048,
57 63 EQ_LEN_4096 = 4096
58 64 };
59 65
60 66 enum eqe_size {
61 67 EQE_SIZE_4 = 4,
62 68 EQE_SIZE_16 = 16
63 69 };
64 70
65 71 enum qtype {
|
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
66 72 QTYPE_EQ,
67 73 QTYPE_MQ,
68 74 QTYPE_WQ,
69 75 QTYPE_RQ,
70 76 QTYPE_CQ,
71 77 QTYPE_RSS
72 78 };
73 79
74 80 typedef enum qstate_e {
75 81 QDELETED = 0x0,
76 - QCREATED = 0x1
82 + QCREATED,
83 + QSTARTED,
84 + QSTOPPED,
85 + QFINI,
86 + QFINI_PENDING
77 87 }qstate_t;
78 88
79 -struct eq_config {
80 - /* number of entries in the eq */
81 - enum eq_len q_len;
82 - /* size of each entry */
83 - enum eqe_size item_size;
84 - /* vector associated with this eq */
85 - uint32_t q_vector_num;
86 - /* minimum possible eq delay i usec */
87 - uint8_t min_eqd;
88 - /* max eq delay in usec */
89 - uint8_t max_eqd;
90 - /* currently configured eq delay in usec */
91 - uint8_t cur_eqd;
92 - /* pad */
93 - uint8_t pad;
89 +enum mb_mode_e {
90 + MBX_BOOTSTRAP = 0x00, /* issue a command on the bootstrap mbox */
91 + MBX_ASYNC_MQ = 0x01 /* issue command thru MCC */
94 92 };
95 93
94 +/* Driver special codes */
95 +
96 +#define MBX_SUCCESS 0x00
97 +#define MBX_FAILURE 0x01
98 +#define MBX_COMPLETED 0x02
99 +#define MBX_QUEUE_FULL 0xF8
100 +#define MBX_BUSY 0xFD
101 +#define MBX_TIMEOUT 0xFE
102 +
96 103 struct oce_eq {
97 - /* Lock for this queue */
98 - kmutex_t lock;
99 104 /* id assigned by the hw to this eq */
100 105 uint32_t eq_id;
101 106 /* handle to the creating parent dev */
102 107 void *parent;
103 - /* callback context */
104 - void *cb_context;
105 108 /* ring buffer for this eq */
106 109 oce_ring_buffer_t *ring;
107 110 /* reference count of this structure */
108 111 uint32_t ref_count;
109 - /* Queue state */
110 - qstate_t qstate;
111 - /* configuration of this eq */
112 - struct eq_config eq_cfg;
112 + /* vector index */
113 + uint32_t idx;
114 + uint32_t qstate;
115 + kmutex_t lock;
113 116 };
114 117
115 118 enum cq_len {
116 119 CQ_LEN_256 = 256,
117 120 CQ_LEN_512 = 512,
118 - CQ_LEN_1024 = 1024
121 + CQ_LEN_1024 = 1024,
122 + CQ_LEN_2048 = 2048
119 123 };
120 124
121 125 struct cq_config {
122 126 /* length of queue */
123 127 enum cq_len q_len;
124 - /* size of each item */
125 - uint32_t item_size;
126 - /* is eventable */
127 - boolean_t is_eventable;
128 - /* solicited eventable? */
129 - boolean_t sol_eventable;
130 - /* no delay? */
131 - boolean_t nodelay;
132 - /* dma coalescing */
133 - uint16_t dma_coalescing;
134 128 };
135 129
136 -typedef uint16_t (*cq_handler_t)(void *arg1);
130 +typedef void * (*cq_handler_t)(void *arg1, int arg2, int arg3);
137 131
138 132 struct oce_cq {
139 - /* lock */
140 - kmutex_t lock;
141 133 /* id given by the hardware */
142 134 uint32_t cq_id;
143 135 /* parent device to which this cq belongs */
144 136 void *parent;
145 137 /* event queue associated with this cq */
146 138 struct oce_eq *eq;
147 139 cq_handler_t cq_handler;
148 140 /* placeholder for callback context */
149 141 void *cb_arg;
150 142 /* ring buffer for this cq */
151 143 oce_ring_buffer_t *ring;
152 144 /* Queue state */
153 145 qstate_t qstate;
154 146 /* configuration of this cq */
155 147 struct cq_config cq_cfg;
156 148 /* reference count of this structure */
157 149 uint32_t ref_count;
158 150 };
159 151
160 152 struct mq_config {
161 153 uint32_t eqd;
162 154 uint8_t q_len;
163 155 uint8_t pad[3];
164 156
165 157 };
166 158
167 159 struct oce_mq {
168 160 /* lock for the mq */
169 161 kmutex_t lock;
170 162 /* handle to the parent device */
171 163 void *parent;
172 164 /* send queue */
173 165 oce_ring_buffer_t *ring;
174 166 /* idnetifier for the mq */
175 167 uint32_t mq_id;
176 168 struct oce_cq *cq;
177 169 struct oce_cq *async_cq;
178 170 /* free entries in Queue */
179 171 uint32_t mq_free;
180 172 /* Queue state */
181 173 qstate_t qstate;
182 174
183 175 /* configuration of this mq */
|
↓ open down ↓ |
33 lines elided |
↑ open up ↑ |
184 176 struct mq_config cfg;
185 177 };
186 178
187 179
188 180 /*
189 181 * utility structure that handles context of mbx
190 182 */
191 183 struct oce_mbx_ctx {
192 184 /* pointer to mbx */
193 185 struct oce_mbx *mbx;
186 + kmutex_t cv_lock;
187 + kcondvar_t cond_var;
194 188 /* call back functioin [optional] */
195 189 void (*cb)(void *ctx);
196 190 /* call back context [optional] */
197 191 void *cb_ctx;
192 + uint32_t mbx_status;
193 + struct oce_mbx *mqe; /* pointer to mq entry */
194 + uint32_t compl_status; /* mgmt status | addl status */
198 195 };
199 196
197 +#define OCE_MODE_POLL B_FALSE
198 +#define OCE_MODE_INTR B_TRUE
199 +
200 200 struct wq_config {
201 201 /* qtype */
202 202 uint8_t wq_type;
203 203 uint16_t buf_size;
204 204 uint8_t pad[1];
205 205 uint32_t q_len; /* number of wqes */
206 206 uint16_t pd_id; /* protection domain id */
207 207 uint16_t pci_fn_num; /* pci function number */
208 208 uint32_t eqd; /* interrupt delay */
209 209 uint32_t nbufs; /* copy buffers */
210 210 uint32_t nhdl; /* preallocated memory handles */
211 211 };
212 212
213 213 struct oce_wq {
214 214 kmutex_t tx_lock; /* lock for the WQ */
215 215 kmutex_t txc_lock; /* tx compl lock */
216 216 void *parent; /* parent of this wq */
217 + mac_ring_handle_t handle; /* ring handle used by crossbow framework */
218 +
217 219 oce_ring_buffer_t *ring; /* ring buffer managing the wqes */
218 220 struct oce_cq *cq; /* cq associated with this wq */
219 221 kmem_cache_t *wqed_cache; /* packet desc cache */
220 222 oce_wq_bdesc_t *wq_bdesc_array; /* buffer desc array */
221 - OCE_LIST_T wq_buf_list; /* buffer list */
222 - OCE_LIST_T wqe_desc_list; /* packet descriptor list */
223 - OCE_LIST_T wq_mdesc_list; /* free list of memory handles */
223 + uint32_t wqb_free; /* Wqb free */
224 + uint32_t wqbd_next_free; /* Next availble wqbd index */
225 + uint32_t wqbd_rc_head; /* wqbd recycle head */
226 + kmutex_t wqb_alloc_lock; /* wqbd lock in allocation path */
227 + kmutex_t wqb_free_lock; /* wqbd lock in recycle path */
228 + oce_wq_bdesc_t **wqb_freelist; /* Free wqbds */
229 + list_t wqe_desc_list; /* packet descriptor list */
230 + kmutex_t wqed_list_lock; /* wqed list lock */
231 + uint32_t wqe_pending; /* Wqe pending */
224 232 oce_wq_mdesc_t *wq_mdesc_array; /* preallocated memory handles */
233 + uint32_t wqm_free; /* Wqm free */
234 + uint32_t wqmd_next_free; /* Next free wqmd slot */
235 + uint32_t wqmd_rc_head; /* wqmd recycle head */
236 + kmutex_t wqm_alloc_lock; /* Lock for wqm alloc path */
237 + kmutex_t wqm_free_lock; /* Lock for wqm free path */
238 + oce_wq_mdesc_t **wqm_freelist; /* Free wqmds */
225 239 uint32_t wqm_used; /* memory handles uses */
240 + oce_dma_buf_t wqb; /* Tx Copy buffer dma memory */
226 241 boolean_t resched; /* used for mac_tx_update */
227 242 uint32_t wq_free; /* Wqe free */
243 +
228 244 uint32_t tx_deferd; /* Wqe free */
229 245 uint32_t pkt_drops; /* drops */
246 +
247 + int64_t last_compl;
248 + int64_t last_armed;
249 + int64_t last_intr;
250 + int64_t last_defered;
251 +
252 + uint64_t stat_bytes;
253 + uint64_t stat_pkts;
254 +
230 255 /* Queue state */
231 256 qstate_t qstate;
257 + uint_t qmode; /* poll or interrupt mode */
232 258 uint16_t wq_id; /* wq ID */
233 259 struct wq_config cfg; /* q config */
234 260 };
235 261
236 262 struct rq_config {
237 263 uint32_t q_len; /* q length */
238 264 uint32_t frag_size; /* fragment size. Send log2(size) in commmand */
239 265 uint32_t mtu; /* max frame size for this RQ */
240 266 uint32_t if_id; /* interface ID to associate this RQ with */
241 267 uint32_t is_rss_queue; /* is this RQ an RSS queue? */
242 268 uint32_t eqd; /* interrupt delay */
243 269 uint32_t nbufs; /* Total data buffers */
244 270 };
245 271
246 272 struct rq_shadow_entry {
247 273 oce_rq_bdesc_t *rqbd;
248 274 };
249 275
250 276 struct oce_rq {
277 +
278 + kmutex_t rx_lock;
279 + kmutex_t rq_fini_lock;
280 + mac_ring_handle_t handle; /* ring handle used by framework */
281 + uint64_t gen_number; /* used by framework */
282 + boolean_t qmode;
283 + uint64_t stat_bytes;
284 + uint64_t stat_pkts;
285 +
251 286 /* RQ config */
252 287 struct rq_config cfg;
253 - /* RQ id */
254 - uint32_t rq_id;
255 - /* parent of this rq */
256 - void *parent;
257 - /* CPU ID assigend to this RQ if it is an RSS queue */
258 - uint32_t rss_cpuid;
288 +
259 289 /* ring buffer managing the RQEs */
260 290 oce_ring_buffer_t *ring;
261 291 /* cq associated with this queue */
262 292 struct oce_cq *cq;
263 293 oce_rq_bdesc_t *rq_bdesc_array;
264 294 /* shadow list of mblk for rq ring */
265 295 oce_rq_bdesc_t **shadow_ring;
266 296 oce_rq_bdesc_t **rqb_freelist;
297 + oce_dma_buf_t rqb; /* data buffer for the rq's */
267 298 uint32_t rqb_free;
268 299 uint32_t rqb_next_free; /* next free slot */
269 300 uint32_t rqb_rc_head; /* recycling head */
270 301 uint32_t buf_avail; /* buffer avaialable with hw */
271 302 uint32_t pending; /* Buffers sent up */
272 - /* Queue state */
273 - qstate_t qstate;
274 303 /* rq lock */
275 - kmutex_t rx_lock;
276 304 kmutex_t rc_lock;
305 +
306 + /* parent of this rq */
307 + void *parent;
308 + /* RQ id */
309 + uint32_t rq_id;
310 + /* CPU ID assigend to this RQ if it is an RSS queue */
311 + uint32_t rss_cpuid;
312 + /* Queue state */
313 + uint32_t qstate;
314 + void *grp; /* group it belongs to */
277 315 };
278 316
279 317 struct link_status {
280 318 /* dw 0 */
281 319 uint8_t physical_port;
282 320 uint8_t mac_duplex;
283 321 uint8_t mac_speed;
284 322 uint8_t mac_fault;
285 323 /* dw 1 */
286 324 uint8_t mgmt_mac_duplex;
287 325 uint8_t mgmt_mac_speed;
288 326 uint16_t qos_link_speed;
289 327 /* dw2 */
290 328 uint32_t logical_link_status;
291 329 };
292 330
293 -oce_dma_buf_t *oce_alloc_dma_buffer(struct oce_dev *dev,
331 +int oce_alloc_dma_buffer(struct oce_dev *dev, oce_dma_buf_t *dbuf,
294 332 uint32_t size, ddi_dma_attr_t *dma_attr, uint32_t flags);
295 333 void oce_free_dma_buffer(struct oce_dev *dev, oce_dma_buf_t *dbuf);
296 334
297 -oce_ring_buffer_t *create_ring_buffer(struct oce_dev *dev,
335 +oce_ring_buffer_t *oce_create_ring_buffer(struct oce_dev *dev,
298 336 uint32_t num_items, uint32_t item_size,
299 337 uint32_t flags);
300 338 void destroy_ring_buffer(struct oce_dev *dev, oce_ring_buffer_t *ring);
301 339
302 340 /* Queues */
303 341 int oce_set_eq_delay(struct oce_dev *dev, uint32_t *eq_arr,
304 - uint32_t eq_cnt, uint32_t eq_delay);
342 + uint32_t eq_cnt, uint32_t eq_delay, uint32_t mode);
305 343 void oce_arm_eq(struct oce_dev *dev, int16_t qid, int npopped,
306 344 boolean_t rearm, boolean_t clearint);
307 345 void oce_arm_cq(struct oce_dev *dev, int16_t qid, int npopped,
308 346 boolean_t rearm);
309 347 void oce_drain_eq(struct oce_eq *eq);
310 -void oce_dev_rss_ready(struct oce_dev *dev);
311 348
349 +
312 350 /* Bootstrap */
313 351 int oce_mbox_init(struct oce_dev *dev);
314 352 int oce_mbox_fini(struct oce_dev *dev);
315 353 int oce_mbox_dispatch(struct oce_dev *dev, uint32_t tmo_sec);
316 354 int oce_mbox_wait(struct oce_dev *dev, uint32_t tmo_sec);
317 -int oce_mbox_post(struct oce_dev *dev, struct oce_mbx *mbx,
318 - struct oce_mbx_ctx *mbxctx);
355 +int oce_issue_mbox_passthru(struct oce_dev *dev, queue_t *wq, mblk_t *mp,
356 + uint32_t *rsp_len);
357 +int oce_issue_mbox_cmd(struct oce_dev *dev, struct oce_mbx *mbx,
358 + uint32_t tmo_sec, uint32_t flag);
319 359
320 360 /* Hardware */
321 361 boolean_t oce_is_reset_pci(struct oce_dev *dev);
322 362 int oce_pci_soft_reset(struct oce_dev *dev);
323 363 int oce_POST(struct oce_dev *dev);
324 364 int oce_pci_init(struct oce_dev *dev);
325 365 void oce_pci_fini(struct oce_dev *dev);
326 -int oce_init_txrx(struct oce_dev *dev);
327 -void oce_fini_txrx(struct oce_dev *dev);
366 +int oce_init_tx(struct oce_dev *dev);
367 +void oce_fini_tx(struct oce_dev *dev);
328 368 int oce_create_queues(struct oce_dev *dev);
369 +int oce_create_mcc_queue(struct oce_dev *dev);
329 370 void oce_delete_queues(struct oce_dev *dev);
330 -void oce_delete_nw_interface(struct oce_dev *dev);
331 -int oce_create_nw_interface(struct oce_dev *dev);
371 +void oce_delete_mcc_queue(struct oce_dev *dev);
332 372 int oce_reset_fun(struct oce_dev *dev);
333 373
334 374 /* Transmit */
335 -struct oce_wq *oce_get_wq(struct oce_dev *dev, mblk_t *pkt);
336 -uint16_t oce_drain_wq_cq(void *arg);
375 +void *oce_drain_wq_cq(void *arg, int arg2, int arg3);
337 376 mblk_t *oce_send_packet(struct oce_wq *wq, mblk_t *mp);
338 377 int oce_start_wq(struct oce_wq *wq);
339 378 void oce_clean_wq(struct oce_wq *wq);
340 379
341 -
342 380 /* Recieve */
343 -uint16_t oce_drain_rq_cq(void *arg);
381 +void * oce_drain_rq_cq(void *arg, int arg2, int arg3);
344 382 int oce_start_rq(struct oce_rq *rq);
345 383 void oce_clean_rq(struct oce_rq *rq);
346 384 void oce_rq_discharge(struct oce_rq *rq);
347 385 int oce_rx_pending(struct oce_dev *dev, struct oce_rq *rq, int32_t timeout);
386 +void oce_rq_fini(struct oce_dev *dev, struct oce_rq *rq);
348 387
349 -/* event handling */
350 -uint16_t oce_drain_mq_cq(void *arg);
351 -int oce_mq_mbox_post(struct oce_dev *dev, struct oce_mbx *mbx,
352 - struct oce_mbx_ctx *mbxctx);
388 +/* Mailbox */
389 +void * oce_drain_mq_cq(void *arg, int arg2, int arg3);
390 +int oce_issue_mq_mbox(struct oce_dev *dev, struct oce_mbx *mbx);
353 391 struct oce_mbx *oce_mq_get_mbx(struct oce_dev *dev);
354 392 void oce_clean_mq(struct oce_mq *mq);
355 393 int oce_start_mq(struct oce_mq *mq);
356 394
357 -
358 -/* mbx functions */
395 +/* mbx helper functions */
359 396 void mbx_common_req_hdr_init(struct mbx_hdr *hdr, uint8_t dom,
360 397 uint8_t port, uint8_t subsys, uint8_t opcode,
361 - uint32_t timeout, uint32_t pyld_len);
398 + uint32_t timeout, uint32_t pyld_len, uint8_t version);
362 399 void mbx_nic_req_hdr_init(struct mbx_hdr *hdr, uint8_t dom, uint8_t port,
363 400 uint8_t opcode, uint32_t timeout, uint32_t pyld_len);
364 -int oce_get_fw_version(struct oce_dev *dev);
401 +int oce_get_fw_version(struct oce_dev *dev, uint32_t mode);
365 402 int oce_read_mac_addr(struct oce_dev *dev, uint32_t if_id, uint8_t perm,
366 - uint8_t type, struct mac_address_format *mac);
403 + uint8_t type, struct mac_address_format *mac, uint32_t mode);
367 404 int oce_if_create(struct oce_dev *dev, uint32_t cap_flags, uint32_t en_flags,
368 - uint16_t vlan_tag, uint8_t *mac_addr, uint32_t *if_id);
369 -int oce_if_del(struct oce_dev *dev, uint32_t if_id);
370 -int oce_num_intr_vectors_set(struct oce_dev *dev, uint32_t num_vectors);
405 + uint16_t vlan_tag, uint8_t *mac_addr, uint32_t *if_id, uint32_t mode);
406 +int oce_if_del(struct oce_dev *dev, uint32_t if_id, uint32_t mode);
371 407
372 -int oce_get_link_status(struct oce_dev *dev, struct link_status *link);
408 +int oce_get_link_status(struct oce_dev *dev, link_state_t *link_status,
409 + int32_t *link_speed, uint8_t *link_duplex, uint8_t cmd_ver, uint32_t mode);
373 410 int oce_set_rx_filter(struct oce_dev *dev,
374 - struct mbx_set_common_ntwk_rx_filter *filter);
411 + struct mbx_set_common_ntwk_rx_filter *filter, uint32_t mode);
375 412 int oce_set_multicast_table(struct oce_dev *dev, uint32_t if_id,
376 - struct ether_addr *mca_table, uint16_t mca_cnt, boolean_t promisc);
377 -int oce_get_fw_config(struct oce_dev *dev);
378 -int oce_get_hw_stats(struct oce_dev *dev);
379 -int oce_set_flow_control(struct oce_dev *dev, uint32_t flow_control);
380 -int oce_get_flow_control(struct oce_dev *dev, uint32_t *flow_control);
381 -int oce_set_promiscuous(struct oce_dev *dev, boolean_t enable);
413 + struct ether_addr *mca_table, uint16_t mca_cnt, boolean_t promisc,
414 + uint32_t mode);
415 +int oce_get_fw_config(struct oce_dev *dev, uint32_t mode);
416 +int oce_get_hw_stats(struct oce_dev *dev, uint32_t mode);
417 +int oce_get_pport_stats(struct oce_dev *dev, uint32_t mode);
418 +int oce_set_flow_control(struct oce_dev *dev, uint32_t flow_control,
419 + uint32_t mode);
420 +int oce_get_flow_control(struct oce_dev *dev, uint32_t *flow_control,
421 + uint32_t mode);
422 +int oce_set_promiscuous(struct oce_dev *dev, boolean_t enable, uint32_t mode);
382 423 int oce_add_mac(struct oce_dev *dev, uint32_t if_id,
383 - const uint8_t *mac, uint32_t *pmac_id);
384 -int oce_del_mac(struct oce_dev *dev, uint32_t if_id, uint32_t *pmac_id);
424 + const uint8_t *mac, uint32_t *pmac_id, uint32_t mode);
425 +int oce_del_mac(struct oce_dev *dev, uint32_t if_id, uint32_t *pmac_id,
426 + uint32_t mode);
385 427 int oce_config_vlan(struct oce_dev *dev, uint32_t if_id,
386 428 struct normal_vlan *vtag_arr,
387 429 uint8_t vtag_cnt, boolean_t untagged,
388 - boolean_t enable_promisc);
389 -int oce_config_link(struct oce_dev *dev, boolean_t enable);
430 + boolean_t enable_promisc, uint32_t mode);
431 +int oce_config_link(struct oce_dev *dev, boolean_t enable, uint32_t mode);
390 432 int oce_config_rss(struct oce_dev *dev, uint16_t if_id, char *hkey, char *itbl,
391 - int tbl_sz, uint16_t rss_type, uint8_t flush);
433 + int tbl_sz, uint16_t rss_type, uint8_t flush, uint32_t mode);
392 434 int oce_issue_mbox(struct oce_dev *dev, queue_t *wq, mblk_t *mp,
393 435 uint32_t *payload_length);
394 436 #ifdef __cplusplus
395 437 }
396 438 #endif
397 439
398 440 #endif /* _OCE_IO_H_ */
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX