1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2009-2012 Emulex. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27
28
29 /*
30 * Header file defining the HW IO elements
31 */
32
33 #ifndef _OCE_IO_H_
34 #define _OCE_IO_H_
35
36 #ifdef __cplusplus
37 extern "C" {
38 #endif
39
40 #include <sys/types.h>
41 #include <sys/dditypes.h>
42 #include <sys/mutex.h>
43 #include <sys/stream.h>
44 #include <sys/debug.h>
45 #include <sys/byteorder.h>
46 #include <oce_hw.h>
47 #include <oce_buf.h>
48
49 #define DEFAULT_MQ_MBOX_TIMEOUT (5 * 1000 * 1000) /* 5 sec (in usec) */
50 #define MBX_READY_TIMEOUT (1 * 1000 * 1000) /* 1 sec (in usec) */
51 #define DEFAULT_DRAIN_TIME 200 /* Default Drain Time */
52 #define MBX_TIMEOUT_SEC 5
53 #define STAT_TIMEOUT 2000000 /* update stats every 2 sec */
54 #define OCE_HDR_LEN 64
55
56 struct oce_dev;
57
58 enum eq_len {
59 EQ_LEN_256 = 256,
60 EQ_LEN_512 = 512,
61 EQ_LEN_1024 = 1024,
62 EQ_LEN_2048 = 2048,
63 EQ_LEN_4096 = 4096
64 };
65
66 enum eqe_size {
67 EQE_SIZE_4 = 4,
68 EQE_SIZE_16 = 16
69 };
70
71 enum qtype {
72 QTYPE_EQ,
73 QTYPE_MQ,
74 QTYPE_WQ,
75 QTYPE_RQ,
76 QTYPE_CQ,
77 QTYPE_RSS
78 };
79
80 typedef enum qstate_e {
81 QDELETED = 0x0,
82 QCREATED,
83 QSTARTED,
84 QSTOPPED,
85 QFINI,
86 QFINI_PENDING
87 }qstate_t;
88
89 enum mb_mode_e {
90 MBX_BOOTSTRAP = 0x00, /* issue a command on the bootstrap mbox */
91 MBX_ASYNC_MQ = 0x01 /* issue command thru MCC */
92 };
93
94 /* Driver special codes */
95
96 #define MBX_SUCCESS 0x00
97 #define MBX_FAILURE 0x01
98 #define MBX_COMPLETED 0x02
99 #define MBX_QUEUE_FULL 0xF8
100 #define MBX_BUSY 0xFD
101 #define MBX_TIMEOUT 0xFE
102
103 struct oce_eq {
104 /* id assigned by the hw to this eq */
105 uint32_t eq_id;
106 /* handle to the creating parent dev */
107 void *parent;
108 /* ring buffer for this eq */
109 oce_ring_buffer_t *ring;
110 /* reference count of this structure */
111 uint32_t ref_count;
112 /* vector index */
113 uint32_t idx;
114 uint32_t qstate;
115 kmutex_t lock;
116 };
117
118 enum cq_len {
119 CQ_LEN_256 = 256,
120 CQ_LEN_512 = 512,
121 CQ_LEN_1024 = 1024,
122 CQ_LEN_2048 = 2048
123 };
124
125 struct cq_config {
126 /* length of queue */
127 enum cq_len q_len;
128 };
129
130 typedef void * (*cq_handler_t)(void *arg1, int arg2, int arg3);
131
132 struct oce_cq {
133 /* id given by the hardware */
134 uint32_t cq_id;
135 /* parent device to which this cq belongs */
136 void *parent;
137 /* event queue associated with this cq */
138 struct oce_eq *eq;
139 cq_handler_t cq_handler;
140 /* placeholder for callback context */
141 void *cb_arg;
142 /* ring buffer for this cq */
143 oce_ring_buffer_t *ring;
144 /* Queue state */
145 qstate_t qstate;
146 /* configuration of this cq */
147 struct cq_config cq_cfg;
148 /* reference count of this structure */
149 uint32_t ref_count;
150 };
151
152 struct mq_config {
153 uint32_t eqd;
154 uint8_t q_len;
155 uint8_t pad[3];
156
157 };
158
159 struct oce_mq {
160 /* lock for the mq */
161 kmutex_t lock;
162 /* handle to the parent device */
163 void *parent;
164 /* send queue */
165 oce_ring_buffer_t *ring;
166 /* idnetifier for the mq */
167 uint32_t mq_id;
168 struct oce_cq *cq;
169 struct oce_cq *async_cq;
170 /* free entries in Queue */
171 uint32_t mq_free;
172 /* Queue state */
173 qstate_t qstate;
174
175 /* configuration of this mq */
176 struct mq_config cfg;
177 };
178
179
180 /*
181 * utility structure that handles context of mbx
182 */
183 struct oce_mbx_ctx {
184 /* pointer to mbx */
185 struct oce_mbx *mbx;
186 kmutex_t cv_lock;
187 kcondvar_t cond_var;
188 /* call back functioin [optional] */
189 void (*cb)(void *ctx);
190 /* call back context [optional] */
191 void *cb_ctx;
192 uint32_t mbx_status;
193 struct oce_mbx *mqe; /* pointer to mq entry */
194 uint32_t compl_status; /* mgmt status | addl status */
195 };
196
197 #define OCE_MODE_POLL B_FALSE
198 #define OCE_MODE_INTR B_TRUE
199
200 struct wq_config {
201 /* qtype */
202 uint8_t wq_type;
203 uint16_t buf_size;
204 uint8_t pad[1];
205 uint32_t q_len; /* number of wqes */
206 uint16_t pd_id; /* protection domain id */
207 uint16_t pci_fn_num; /* pci function number */
208 uint32_t eqd; /* interrupt delay */
209 uint32_t nbufs; /* copy buffers */
210 uint32_t nhdl; /* preallocated memory handles */
211 };
212
213 struct oce_wq {
214 kmutex_t tx_lock; /* lock for the WQ */
215 kmutex_t txc_lock; /* tx compl lock */
216 void *parent; /* parent of this wq */
217 mac_ring_handle_t handle; /* ring handle used by crossbow framework */
218
219 oce_ring_buffer_t *ring; /* ring buffer managing the wqes */
220 struct oce_cq *cq; /* cq associated with this wq */
221 kmem_cache_t *wqed_cache; /* packet desc cache */
222 oce_wq_bdesc_t *wq_bdesc_array; /* buffer desc array */
223 uint32_t wqb_free; /* Wqb free */
224 uint32_t wqbd_next_free; /* Next availble wqbd index */
225 uint32_t wqbd_rc_head; /* wqbd recycle head */
226 kmutex_t wqb_alloc_lock; /* wqbd lock in allocation path */
227 kmutex_t wqb_free_lock; /* wqbd lock in recycle path */
228 oce_wq_bdesc_t **wqb_freelist; /* Free wqbds */
229 list_t wqe_desc_list; /* packet descriptor list */
230 kmutex_t wqed_list_lock; /* wqed list lock */
231 uint32_t wqe_pending; /* Wqe pending */
232 oce_wq_mdesc_t *wq_mdesc_array; /* preallocated memory handles */
233 uint32_t wqm_free; /* Wqm free */
234 uint32_t wqmd_next_free; /* Next free wqmd slot */
235 uint32_t wqmd_rc_head; /* wqmd recycle head */
236 kmutex_t wqm_alloc_lock; /* Lock for wqm alloc path */
237 kmutex_t wqm_free_lock; /* Lock for wqm free path */
238 oce_wq_mdesc_t **wqm_freelist; /* Free wqmds */
239 uint32_t wqm_used; /* memory handles uses */
240 oce_dma_buf_t wqb; /* Tx Copy buffer dma memory */
241 boolean_t resched; /* used for mac_tx_update */
242 uint32_t wq_free; /* Wqe free */
243
244 uint32_t tx_deferd; /* Wqe free */
245 uint32_t pkt_drops; /* drops */
246
247 int64_t last_compl;
248 int64_t last_armed;
249 int64_t last_intr;
250 int64_t last_defered;
251
252 uint64_t stat_bytes;
253 uint64_t stat_pkts;
254
255 /* Queue state */
256 qstate_t qstate;
257 uint_t qmode; /* poll or interrupt mode */
258 uint16_t wq_id; /* wq ID */
259 struct wq_config cfg; /* q config */
260 };
261
262 struct rq_config {
263 uint32_t q_len; /* q length */
264 uint32_t frag_size; /* fragment size. Send log2(size) in commmand */
265 uint32_t mtu; /* max frame size for this RQ */
266 uint32_t if_id; /* interface ID to associate this RQ with */
267 uint32_t is_rss_queue; /* is this RQ an RSS queue? */
268 uint32_t eqd; /* interrupt delay */
269 uint32_t nbufs; /* Total data buffers */
270 };
271
272 struct rq_shadow_entry {
273 oce_rq_bdesc_t *rqbd;
274 };
275
276 struct oce_rq {
277
278 kmutex_t rx_lock;
279 kmutex_t rq_fini_lock;
280 mac_ring_handle_t handle; /* ring handle used by framework */
281 uint64_t gen_number; /* used by framework */
282 boolean_t qmode;
283 uint64_t stat_bytes;
284 uint64_t stat_pkts;
285
286 /* RQ config */
287 struct rq_config cfg;
288
289 /* ring buffer managing the RQEs */
290 oce_ring_buffer_t *ring;
291 /* cq associated with this queue */
292 struct oce_cq *cq;
293 oce_rq_bdesc_t *rq_bdesc_array;
294 /* shadow list of mblk for rq ring */
295 oce_rq_bdesc_t **shadow_ring;
296 oce_rq_bdesc_t **rqb_freelist;
297 oce_dma_buf_t rqb; /* data buffer for the rq's */
298 uint32_t rqb_free;
299 uint32_t rqb_next_free; /* next free slot */
300 uint32_t rqb_rc_head; /* recycling head */
301 uint32_t buf_avail; /* buffer avaialable with hw */
302 uint32_t pending; /* Buffers sent up */
303 /* rq lock */
304 kmutex_t rc_lock;
305
306 /* parent of this rq */
307 void *parent;
308 /* RQ id */
309 uint32_t rq_id;
310 /* CPU ID assigend to this RQ if it is an RSS queue */
311 uint32_t rss_cpuid;
312 /* Queue state */
313 uint32_t qstate;
314 void *grp; /* group it belongs to */
315 };
316
317 struct link_status {
318 /* dw 0 */
319 uint8_t physical_port;
320 uint8_t mac_duplex;
321 uint8_t mac_speed;
322 uint8_t mac_fault;
323 /* dw 1 */
324 uint8_t mgmt_mac_duplex;
325 uint8_t mgmt_mac_speed;
326 uint16_t qos_link_speed;
327 /* dw2 */
328 uint32_t logical_link_status;
329 };
330
331 int oce_alloc_dma_buffer(struct oce_dev *dev, oce_dma_buf_t *dbuf,
332 uint32_t size, ddi_dma_attr_t *dma_attr, uint32_t flags);
333 void oce_free_dma_buffer(struct oce_dev *dev, oce_dma_buf_t *dbuf);
334
335 oce_ring_buffer_t *oce_create_ring_buffer(struct oce_dev *dev,
336 uint32_t num_items, uint32_t item_size,
337 uint32_t flags);
338 void destroy_ring_buffer(struct oce_dev *dev, oce_ring_buffer_t *ring);
339
340 /* Queues */
341 int oce_set_eq_delay(struct oce_dev *dev, uint32_t *eq_arr,
342 uint32_t eq_cnt, uint32_t eq_delay, uint32_t mode);
343 void oce_arm_eq(struct oce_dev *dev, int16_t qid, int npopped,
344 boolean_t rearm, boolean_t clearint);
345 void oce_arm_cq(struct oce_dev *dev, int16_t qid, int npopped,
346 boolean_t rearm);
347 void oce_drain_eq(struct oce_eq *eq);
348
349
350 /* Bootstrap */
351 int oce_mbox_init(struct oce_dev *dev);
352 int oce_mbox_fini(struct oce_dev *dev);
353 int oce_mbox_dispatch(struct oce_dev *dev, uint32_t tmo_sec);
354 int oce_mbox_wait(struct oce_dev *dev, uint32_t tmo_sec);
355 int oce_issue_mbox_passthru(struct oce_dev *dev, queue_t *wq, mblk_t *mp,
356 uint32_t *rsp_len);
357 int oce_issue_mbox_cmd(struct oce_dev *dev, struct oce_mbx *mbx,
358 uint32_t tmo_sec, uint32_t flag);
359
360 /* Hardware */
361 boolean_t oce_is_reset_pci(struct oce_dev *dev);
362 int oce_pci_soft_reset(struct oce_dev *dev);
363 int oce_POST(struct oce_dev *dev);
364 int oce_pci_init(struct oce_dev *dev);
365 void oce_pci_fini(struct oce_dev *dev);
366 int oce_init_tx(struct oce_dev *dev);
367 void oce_fini_tx(struct oce_dev *dev);
368 int oce_create_queues(struct oce_dev *dev);
369 int oce_create_mcc_queue(struct oce_dev *dev);
370 void oce_delete_queues(struct oce_dev *dev);
371 void oce_delete_mcc_queue(struct oce_dev *dev);
372 int oce_reset_fun(struct oce_dev *dev);
373
374 /* Transmit */
375 void *oce_drain_wq_cq(void *arg, int arg2, int arg3);
376 mblk_t *oce_send_packet(struct oce_wq *wq, mblk_t *mp);
377 int oce_start_wq(struct oce_wq *wq);
378 void oce_clean_wq(struct oce_wq *wq);
379
380 /* Recieve */
381 void * oce_drain_rq_cq(void *arg, int arg2, int arg3);
382 int oce_start_rq(struct oce_rq *rq);
383 void oce_clean_rq(struct oce_rq *rq);
384 void oce_rq_discharge(struct oce_rq *rq);
385 int oce_rx_pending(struct oce_dev *dev, struct oce_rq *rq, int32_t timeout);
386 void oce_rq_fini(struct oce_dev *dev, struct oce_rq *rq);
387
388 /* Mailbox */
389 void * oce_drain_mq_cq(void *arg, int arg2, int arg3);
390 int oce_issue_mq_mbox(struct oce_dev *dev, struct oce_mbx *mbx);
391 struct oce_mbx *oce_mq_get_mbx(struct oce_dev *dev);
392 void oce_clean_mq(struct oce_mq *mq);
393 int oce_start_mq(struct oce_mq *mq);
394
395 /* mbx helper functions */
396 void mbx_common_req_hdr_init(struct mbx_hdr *hdr, uint8_t dom,
397 uint8_t port, uint8_t subsys, uint8_t opcode,
398 uint32_t timeout, uint32_t pyld_len, uint8_t version);
399 void mbx_nic_req_hdr_init(struct mbx_hdr *hdr, uint8_t dom, uint8_t port,
400 uint8_t opcode, uint32_t timeout, uint32_t pyld_len);
401 int oce_get_fw_version(struct oce_dev *dev, uint32_t mode);
402 int oce_read_mac_addr(struct oce_dev *dev, uint32_t if_id, uint8_t perm,
403 uint8_t type, struct mac_address_format *mac, uint32_t mode);
404 int oce_if_create(struct oce_dev *dev, uint32_t cap_flags, uint32_t en_flags,
405 uint16_t vlan_tag, uint8_t *mac_addr, uint32_t *if_id, uint32_t mode);
406 int oce_if_del(struct oce_dev *dev, uint32_t if_id, uint32_t mode);
407
408 int oce_get_link_status(struct oce_dev *dev, link_state_t *link_status,
409 int32_t *link_speed, uint8_t *link_duplex, uint8_t cmd_ver, uint32_t mode);
410 int oce_set_rx_filter(struct oce_dev *dev,
411 struct mbx_set_common_ntwk_rx_filter *filter, uint32_t mode);
412 int oce_set_multicast_table(struct oce_dev *dev, uint32_t if_id,
413 struct ether_addr *mca_table, uint16_t mca_cnt, boolean_t promisc,
414 uint32_t mode);
415 int oce_get_fw_config(struct oce_dev *dev, uint32_t mode);
416 int oce_get_hw_stats(struct oce_dev *dev, uint32_t mode);
417 int oce_get_pport_stats(struct oce_dev *dev, uint32_t mode);
418 int oce_set_flow_control(struct oce_dev *dev, uint32_t flow_control,
419 uint32_t mode);
420 int oce_get_flow_control(struct oce_dev *dev, uint32_t *flow_control,
421 uint32_t mode);
422 int oce_set_promiscuous(struct oce_dev *dev, boolean_t enable, uint32_t mode);
423 int oce_add_mac(struct oce_dev *dev, uint32_t if_id,
424 const uint8_t *mac, uint32_t *pmac_id, uint32_t mode);
425 int oce_del_mac(struct oce_dev *dev, uint32_t if_id, uint32_t *pmac_id,
426 uint32_t mode);
427 int oce_config_vlan(struct oce_dev *dev, uint32_t if_id,
428 struct normal_vlan *vtag_arr,
429 uint8_t vtag_cnt, boolean_t untagged,
430 boolean_t enable_promisc, uint32_t mode);
431 int oce_config_link(struct oce_dev *dev, boolean_t enable, uint32_t mode);
432 int oce_config_rss(struct oce_dev *dev, uint16_t if_id, char *hkey, char *itbl,
433 int tbl_sz, uint16_t rss_type, uint8_t flush, uint32_t mode);
434 int oce_issue_mbox(struct oce_dev *dev, queue_t *wq, mblk_t *mp,
435 uint32_t *payload_length);
436 #ifdef __cplusplus
437 }
438 #endif
439
440 #endif /* _OCE_IO_H_ */