1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, v.1, (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2014-2017 Cavium, Inc.
24 * The contents of this file are subject to the terms of the Common Development
25 * and Distribution License, v.1, (the "License").
26
27 * You may not use this file except in compliance with the License.
28
29 * You can obtain a copy of the License at available
30 * at http://opensource.org/licenses/CDDL-1.0
31
32 * See the License for the specific language governing permissions and
33 * limitations under the License.
34 */
35
36 /*
37 * Copyright 2018 Joyent, Inc.
38 */
39
40 #include "qede.h"
41
42 #define FP_LOCK(ptr) \
43 mutex_enter(&ptr->fp_lock);
44 #define FP_UNLOCK(ptr) \
45 mutex_exit(&ptr->fp_lock);
46
47 int
48 qede_ucst_find(qede_t *qede, const uint8_t *mac_addr)
49 {
50 int slot;
51
52 for(slot = 0; slot < qede->ucst_total; slot++) {
53 if (bcmp(qede->ucst_mac[slot].mac_addr.ether_addr_octet,
54 mac_addr, ETHERADDRL) == 0) {
55 return (slot);
56 }
57 }
58 return (-1);
59
60 }
61
62 static int
63 qede_set_mac_addr(qede_t *qede, uint8_t *mac_addr, uint8_t fl)
64 {
65 struct ecore_filter_ucast params;
66
67 memset(¶ms, 0, sizeof (params));
68
69 params.opcode = fl;
70 params.type = ECORE_FILTER_MAC;
71 params.is_rx_filter = true;
72 params.is_tx_filter = true;
73 COPY_ETH_ADDRESS(mac_addr, params.mac);
74
75 return (ecore_filter_ucast_cmd(&qede->edev,
76 ¶ms, ECORE_SPQ_MODE_EBLOCK, NULL));
77
78
79 }
80 static int
81 qede_add_macaddr(qede_t *qede, uint8_t *mac_addr)
82 {
83 int i, ret = 0;
84
85 i = qede_ucst_find(qede, mac_addr);
86 if (i != -1) {
87 /* LINTED E_ARGUMENT_MISMATCH */
88 qede_info(qede, "mac addr already added %d\n",
89 qede->ucst_avail);
90 return (0);
91 }
92 if (qede->ucst_avail == 0) {
93 qede_info(qede, "add macaddr ignored \n");
94 return (ENOSPC);
95 }
96 for (i = 0; i < qede->ucst_total; i++) {
97 if (qede->ucst_mac[i].set == 0) {
98 break;
99 }
100 }
101 if (i >= qede->ucst_total) {
102 qede_info(qede, "add macaddr ignored no space");
103 return (ENOSPC);
104 }
105 ret = qede_set_mac_addr(qede, (uint8_t *)mac_addr, ECORE_FILTER_ADD);
106 if (ret == 0) {
107 bcopy(mac_addr,
108 qede->ucst_mac[i].mac_addr.ether_addr_octet,
109 ETHERADDRL);
110 qede->ucst_mac[i].set = 1;
111 qede->ucst_avail--;
112 /* LINTED E_ARGUMENT_MISMATCH */
113 qede_info(qede, " add macaddr passed for addr "
114 "%02x:%02x:%02x:%02x:%02x:%02x",
115 mac_addr[0], mac_addr[1],
116 mac_addr[2], mac_addr[3], mac_addr[4], mac_addr[5]);
117 } else {
118 /* LINTED E_ARGUMENT_MISMATCH */
119 qede_info(qede, "add macaddr failed for addr "
120 "%02x:%02x:%02x:%02x:%02x:%02x",
121 mac_addr[0], mac_addr[1],
122 mac_addr[2], mac_addr[3], mac_addr[4], mac_addr[5]);
123
124 }
125 if (qede->ucst_avail == (qede->ucst_total -1)) {
126 u8 bcast_addr[] =
127 {
128 0xff, 0xff, 0xff, 0xff, 0xff,
129 0xff
130 };
131 for (i = 0; i < qede->ucst_total; i++) {
132 if (qede->ucst_mac[i].set == 0)
133 break;
134 }
135 ret = qede_set_mac_addr(qede,
136 (uint8_t *)bcast_addr, ECORE_FILTER_ADD);
137 if (ret == 0) {
138 bcopy(bcast_addr,
139 qede->ucst_mac[i].mac_addr.ether_addr_octet,
140 ETHERADDRL);
141 qede->ucst_mac[i].set = 1;
142 qede->ucst_avail--;
143 } else {
144
145 /* LINTED E_ARGUMENT_MISMATCH */
146 qede_info(qede, "add macaddr failed for addr "
147 "%02x:%02x:%02x:%02x:%02x:%02x",
148 mac_addr[0], mac_addr[1],
149 mac_addr[2], mac_addr[3], mac_addr[4],
150 mac_addr[5]);
151 }
152
153 }
154
155 return (ret);
156
157 }
158
159 #ifndef ILLUMOS
160 static int
161 qede_add_mac_addr(void *arg, const uint8_t *mac_addr, const uint64_t flags)
162 #else
163 static int
164 qede_add_mac_addr(void *arg, const uint8_t *mac_addr)
165 #endif
166 {
167 qede_mac_group_t *rx_group = (qede_mac_group_t *)arg;
168 qede_t *qede = rx_group->qede;
169 int ret = DDI_SUCCESS;
170
171 /* LINTED E_ARGUMENT_MISMATCH */
172 qede_info(qede, " mac addr :" MAC_STRING, MACTOSTR(mac_addr));
173
174 mutex_enter(&qede->gld_lock);
175 if (qede->qede_state == QEDE_STATE_SUSPENDED) {
176 mutex_exit(&qede->gld_lock);
177 return (ECANCELED);
178 }
179 ret = qede_add_macaddr(qede, (uint8_t *)mac_addr);
180
181 mutex_exit(&qede->gld_lock);
182
183
184 return (ret);
185 }
186
187 static int
188 qede_rem_macaddr(qede_t *qede, uint8_t *mac_addr)
189 {
190 int ret = 0;
191 int i;
192
193 i = qede_ucst_find(qede, mac_addr);
194 if (i == -1) {
195 /* LINTED E_ARGUMENT_MISMATCH */
196 qede_info(qede,
197 "mac addr not there to remove",
198 MAC_STRING, MACTOSTR(mac_addr));
199 return (0);
200 }
201 if (qede->ucst_mac[i].set == 0) {
202 return (EINVAL);
203 }
204 ret = qede_set_mac_addr(qede, (uint8_t *)mac_addr, ECORE_FILTER_REMOVE);
205 if (ret == 0) {
206 bzero(qede->ucst_mac[i].mac_addr.ether_addr_octet,ETHERADDRL);
207 qede->ucst_mac[i].set = 0;
208 qede->ucst_avail++;
209 } else {
210 /* LINTED E_ARGUMENT_MISMATCH */
211 qede_info(qede, "mac addr remove failed",
212 MAC_STRING, MACTOSTR(mac_addr));
213 }
214 return (ret);
215
216 }
217
218
219 static int
220 qede_rem_mac_addr(void *arg, const uint8_t *mac_addr)
221 {
222 qede_mac_group_t *rx_group = (qede_mac_group_t *)arg;
223 qede_t *qede = rx_group->qede;
224 int ret = DDI_SUCCESS;
225
226 /* LINTED E_ARGUMENT_MISMATCH */
227 qede_info(qede, "mac addr remove:" MAC_STRING, MACTOSTR(mac_addr));
228 mutex_enter(&qede->gld_lock);
229 if (qede->qede_state == QEDE_STATE_SUSPENDED) {
230 mutex_exit(&qede->gld_lock);
231 return (ECANCELED);
232 }
233 ret = qede_rem_macaddr(qede, (uint8_t *)mac_addr);
234 mutex_exit(&qede->gld_lock);
235 return (ret);
236 }
237
238
239 static int
240 qede_tx_ring_stat(mac_ring_driver_t rh, uint_t stat, uint64_t *val)
241 {
242 int ret = 0;
243
244 qede_fastpath_t *fp = (qede_fastpath_t *)rh;
245 qede_tx_ring_t *tx_ring = fp->tx_ring[0];
246 qede_t *qede = fp->qede;
247
248
249 if (qede->qede_state == QEDE_STATE_SUSPENDED)
250 return (ECANCELED);
251
252 switch (stat) {
253 case MAC_STAT_OBYTES:
254 *val = tx_ring->tx_byte_count;
255 break;
256
257 case MAC_STAT_OPACKETS:
258 *val = tx_ring->tx_pkt_count;
259 break;
260
261 default:
262 *val = 0;
263 ret = ENOTSUP;
264 }
265
266 return (ret);
267 }
268
269 #ifndef ILLUMOS
270 static mblk_t *
271 qede_rx_ring_poll(void *arg, int poll_bytes, int poll_pkts)
272 {
273 #else
274 static mblk_t *
275 qede_rx_ring_poll(void *arg, int poll_bytes)
276 {
277 /* XXX pick a value at the moment */
278 int poll_pkts = 100;
279 #endif
280 qede_fastpath_t *fp = (qede_fastpath_t *)arg;
281 mblk_t *mp = NULL;
282 int work_done = 0;
283 qede_t *qede = fp->qede;
284
285 if (poll_bytes == 0) {
286 return (NULL);
287 }
288
289 mutex_enter(&fp->fp_lock);
290 qede->intrSbPollCnt[fp->vect_info->vect_index]++;
291
292 mp = qede_process_fastpath(fp, poll_bytes, poll_pkts, &work_done);
293 if (mp != NULL) {
294 fp->rx_ring->rx_poll_cnt++;
295 } else if ((mp == NULL) && (work_done == 0)) {
296 qede->intrSbPollNoChangeCnt[fp->vect_info->vect_index]++;
297 }
298
299 mutex_exit(&fp->fp_lock);
300 return (mp);
301 }
302
303 #ifndef ILLUMOS
304 static int
305 qede_rx_ring_intr_enable(mac_ring_driver_t rh)
306 #else
307 static int
308 qede_rx_ring_intr_enable(mac_intr_handle_t rh)
309 #endif
310 {
311 qede_fastpath_t *fp = (qede_fastpath_t *)rh;
312
313 mutex_enter(&fp->qede->drv_lock);
314 if (!fp->sb_phys && (fp->sb_dma_handle == NULL)) {
315 mutex_exit(&fp->qede->drv_lock);
316 return (DDI_FAILURE);
317 }
318
319 fp->rx_ring->intrEnableCnt++;
320 qede_enable_hw_intr(fp);
321 fp->disabled_by_poll = 0;
322 mutex_exit(&fp->qede->drv_lock);
323
324 return (DDI_SUCCESS);
325 }
326
327 #ifndef ILLUMOS
328 static int
329 qede_rx_ring_intr_disable(mac_ring_driver_t rh)
330 #else
331 static int
332 qede_rx_ring_intr_disable(mac_intr_handle_t rh)
333 #endif
334 {
335 qede_fastpath_t *fp = (qede_fastpath_t *)rh;
336
337 mutex_enter(&fp->qede->drv_lock);
338 if (!fp->sb_phys && (fp->sb_dma_handle == NULL)) {
339 mutex_exit(&fp->qede->drv_lock);
340 return (DDI_FAILURE);
341 }
342 fp->rx_ring->intrDisableCnt++;
343 qede_disable_hw_intr(fp);
344 fp->disabled_by_poll = 1;
345 mutex_exit(&fp->qede->drv_lock);
346 return (DDI_SUCCESS);
347 }
348
349 static int
350 qede_rx_ring_stat(mac_ring_driver_t rh, uint_t stat, uint64_t *val)
351 {
352
353 int ret = 0;
354
355 qede_fastpath_t *fp = (qede_fastpath_t *)rh;
356 qede_t *qede = fp->qede;
357 qede_rx_ring_t *rx_ring = fp->rx_ring;
358
359 if (qede->qede_state == QEDE_STATE_SUSPENDED) {
360 return (ECANCELED);
361 }
362
363 switch (stat) {
364 case MAC_STAT_RBYTES:
365 *val = rx_ring->rx_byte_cnt;
366 break;
367 case MAC_STAT_IPACKETS:
368 *val = rx_ring->rx_pkt_cnt;
369 break;
370 default:
371 *val = 0;
372 ret = ENOTSUP;
373 break;
374 }
375
376 return (ret);
377 }
378
379 static int
380 qede_get_global_ring_index(qede_t *qede, int gindex, int rindex)
381 {
382 qede_fastpath_t *fp;
383 qede_rx_ring_t *rx_ring;
384 int i = 0;
385
386 for (i = 0; i < qede->num_fp; i++) {
387 fp = &qede->fp_array[i];
388 rx_ring = fp->rx_ring;
389
390 if (rx_ring->group_index == gindex) {
391 rindex--;
392 }
393 if (rindex < 0) {
394 return (i);
395 }
396 }
397
398 return (-1);
399 }
400
401 static void
402 qede_rx_ring_stop(mac_ring_driver_t rh)
403 {
404 qede_fastpath_t *fp = (qede_fastpath_t *)rh;
405 qede_rx_ring_t *rx_ring = fp->rx_ring;
406
407 qede_print("!%s(%d): called", __func__,fp->qede->instance);
408 mutex_enter(&fp->fp_lock);
409 rx_ring->mac_ring_started = B_FALSE;
410 mutex_exit(&fp->fp_lock);
411 }
412
413 static int
414 qede_rx_ring_start(mac_ring_driver_t rh, u64 mr_gen_num)
415 {
416 qede_fastpath_t *fp = (qede_fastpath_t *)rh;
417 qede_rx_ring_t *rx_ring = fp->rx_ring;
418
419 qede_print("!%s(%d): called", __func__,fp->qede->instance);
420 mutex_enter(&fp->fp_lock);
421 rx_ring->mr_gen_num = mr_gen_num;
422 rx_ring->mac_ring_started = B_TRUE;
423 rx_ring->intrDisableCnt = 0;
424 rx_ring->intrEnableCnt = 0;
425 fp->disabled_by_poll = 0;
426
427 mutex_exit(&fp->fp_lock);
428
429 return (DDI_SUCCESS);
430 }
431
432 /* Callback function from mac layer to register rings */
433 void
434 qede_fill_ring(void *arg, mac_ring_type_t rtype, const int group_index,
435 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
436 {
437 qede_t *qede = (qede_t *)arg;
438 mac_intr_t *mintr = &infop->mri_intr;
439
440 switch (rtype) {
441 case MAC_RING_TYPE_RX: {
442 /*
443 * Index passed as a param is the ring index within the
444 * given group index. If multiple groups are supported
445 * then need to search into all groups to find out the
446 * global ring index for the passed group relative
447 * ring index
448 */
449 int global_ring_index = qede_get_global_ring_index(qede,
450 group_index, ring_index);
451 qede_fastpath_t *fp;
452 qede_rx_ring_t *rx_ring;
453 int i;
454
455 /*
456 * global_ring_index < 0 means group index passed
457 * was registered by our driver
458 */
459 ASSERT(global_ring_index >= 0);
460
461 if (rh == NULL) {
462 cmn_err(CE_WARN, "!rx ring(%d) ring handle NULL",
463 global_ring_index);
464 }
465
466 fp = &qede->fp_array[global_ring_index];
467 rx_ring = fp->rx_ring;
468 fp->qede = qede;
469
470 rx_ring->mac_ring_handle = rh;
471
472 qede_info(qede, "rx_ring %d mac_ring_handle %p",
473 rx_ring->rss_id, rh);
474
475 /* mri_driver passed as arg to mac_ring* callbacks */
476 infop->mri_driver = (mac_ring_driver_t)fp;
477 /*
478 * mri_start callback will supply a mac rings generation
479 * number which is needed while indicating packets
480 * upstream via mac_ring_rx() call
481 */
482 infop->mri_start = qede_rx_ring_start;
483 infop->mri_stop = qede_rx_ring_stop;
484 infop->mri_poll = qede_rx_ring_poll;
485 infop->mri_stat = qede_rx_ring_stat;
486
487 mintr->mi_handle = (mac_intr_handle_t)fp;
488 mintr->mi_enable = qede_rx_ring_intr_enable;
489 mintr->mi_disable = qede_rx_ring_intr_disable;
490 if (qede->intr_ctx.intr_type_in_use &
491 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
492 mintr->mi_ddi_handle =
493 qede->intr_ctx.
494 intr_hdl_array[global_ring_index + qede->num_hwfns];
495 }
496 break;
497 }
498 case MAC_RING_TYPE_TX: {
499 qede_fastpath_t *fp;
500 qede_tx_ring_t *tx_ring;
501 int i, tc;
502
503 ASSERT(ring_index < qede->num_fp);
504
505 fp = &qede->fp_array[ring_index];
506 fp->qede = qede;
507 tx_ring = fp->tx_ring[0];
508 tx_ring->mac_ring_handle = rh;
509 qede_info(qede, "tx_ring %d mac_ring_handle %p",
510 tx_ring->tx_queue_index, rh);
511 infop->mri_driver = (mac_ring_driver_t)fp;
512 infop->mri_start = NULL;
513 infop->mri_stop = NULL;
514 infop->mri_tx = qede_ring_tx;
515 infop->mri_stat = qede_tx_ring_stat;
516 if (qede->intr_ctx.intr_type_in_use &
517 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
518 mintr->mi_ddi_handle =
519 qede->intr_ctx.
520 intr_hdl_array[ring_index + qede->num_hwfns];
521 }
522 break;
523 }
524 default:
525 break;
526 }
527 }
528
529 /*
530 * Callback function from mac layer to register group
531 */
532 void
533 qede_fill_group(void *arg, mac_ring_type_t rtype, const int index,
534 mac_group_info_t *infop, mac_group_handle_t gh)
535 {
536 qede_t *qede = (qede_t *)arg;
537
538 switch (rtype) {
539 case MAC_RING_TYPE_RX: {
540 qede_mac_group_t *rx_group;
541
542 rx_group = &qede->rx_groups[index];
543 rx_group->group_handle = gh;
544 rx_group->group_index = index;
545 rx_group->qede = qede;
546 infop->mgi_driver = (mac_group_driver_t)rx_group;
547 infop->mgi_start = NULL;
548 infop->mgi_stop = NULL;
549 #ifndef ILLUMOS
550 infop->mgi_addvlan = NULL;
551 infop->mgi_remvlan = NULL;
552 infop->mgi_getsriov_info = NULL;
553 infop->mgi_setmtu = NULL;
554 #endif
555 infop->mgi_addmac = qede_add_mac_addr;
556 infop->mgi_remmac = qede_rem_mac_addr;
557 infop->mgi_count = qede->num_fp;
558 #ifndef ILLUMOS
559 if (index == 0) {
560 infop->mgi_flags = MAC_GROUP_DEFAULT;
561 }
562 #endif
563
564 break;
565 }
566 case MAC_RING_TYPE_TX: {
567 qede_mac_group_t *tx_group;
568
569 tx_group = &qede->tx_groups[index];
570 tx_group->group_handle = gh;
571 tx_group->group_index = index;
572 tx_group->qede = qede;
573
574 infop->mgi_driver = (mac_group_driver_t)tx_group;
575 infop->mgi_start = NULL;
576 infop->mgi_stop = NULL;
577 infop->mgi_addmac = NULL;
578 infop->mgi_remmac = NULL;
579 #ifndef ILLUMOS
580 infop->mgi_addvlan = NULL;
581 infop->mgi_remvlan = NULL;
582 infop->mgi_setmtu = NULL;
583 infop->mgi_getsriov_info = NULL;
584 #endif
585
586 infop->mgi_count = qede->num_fp;
587
588 #ifndef ILLUMOS
589 if (index == 0) {
590 infop->mgi_flags = MAC_GROUP_DEFAULT;
591 }
592 #endif
593 break;
594 }
595 default:
596 break;
597 }
598 }
599
600 #ifdef ILLUMOS_NOT_YET
601 static int
602 qede_transceiver_info(void *arg, uint_t id, mac_transceiver_info_t *infop)
603 {
604 qede_t *qede = arg;
605 struct ecore_dev *edev = &qede->edev;
606 struct ecore_hwfn *hwfn;
607 struct ecore_ptt *ptt;
608 uint32_t transceiver_state;
609
610 if (id >= edev->num_hwfns || arg == NULL || infop == NULL)
611 return (EINVAL);
612
613 hwfn = &edev->hwfns[id];
614 ptt = ecore_ptt_acquire(hwfn);
615 if (ptt == NULL) {
616 return (EIO);
617 }
618 /*
619 * Use the underlying raw API to get this information. While the
620 * ecore_phy routines have some ways of getting to this information, it
621 * ends up writing the raw data as ASCII characters which doesn't help
622 * us one bit.
623 */
624 transceiver_state = ecore_rd(hwfn, ptt, hwfn->mcp_info->port_addr +
625 OFFSETOF(struct public_port, transceiver_data));
626 transceiver_state = GET_FIELD(transceiver_state, ETH_TRANSCEIVER_STATE);
627 ecore_ptt_release(hwfn, ptt);
628
629 if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) != 0) {
630 mac_transceiver_info_set_present(infop, B_TRUE);
631 /*
632 * Based on our testing, the ETH_TRANSCEIVER_STATE_VALID flag is
633 * not set, so we cannot rely on it. Instead, we have found that
634 * the ETH_TRANSCEIVER_STATE_UPDATING will be set when we cannot
635 * use the transceiver.
636 */
637 if ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) != 0) {
638 mac_transceiver_info_set_usable(infop, B_FALSE);
639 } else {
640 mac_transceiver_info_set_usable(infop, B_TRUE);
641 }
642 } else {
643 mac_transceiver_info_set_present(infop, B_FALSE);
644 mac_transceiver_info_set_usable(infop, B_FALSE);
645 }
646
647 return (0);
648 }
649
650 static int
651 qede_transceiver_read(void *arg, uint_t id, uint_t page, void *buf,
652 size_t nbytes, off_t offset, size_t *nread)
653 {
654 qede_t *qede = arg;
655 struct ecore_dev *edev = &qede->edev;
656 struct ecore_hwfn *hwfn;
657 uint32_t port, lane;
658 struct ecore_ptt *ptt;
659 enum _ecore_status_t ret;
660
661 if (id >= edev->num_hwfns || buf == NULL || nbytes == 0 || nread == NULL ||
662 (page != 0xa0 && page != 0xa2) || offset < 0)
663 return (EINVAL);
664
665 /*
666 * Both supported pages have a length of 256 bytes, ensure nothing asks
667 * us to go beyond that.
668 */
669 if (nbytes > 256 || offset >= 256 || (offset + nbytes > 256)) {
670 return (EINVAL);
671 }
672
673 hwfn = &edev->hwfns[id];
674 ptt = ecore_ptt_acquire(hwfn);
675 if (ptt == NULL) {
676 return (EIO);
677 }
678
679 ret = ecore_mcp_phy_sfp_read(hwfn, ptt, hwfn->port_id, page, offset,
680 nbytes, buf);
681 ecore_ptt_release(hwfn, ptt);
682 if (ret != ECORE_SUCCESS) {
683 return (EIO);
684 }
685 *nread = nbytes;
686 return (0);
687 }
688 #endif /* ILLUMOS */
689
690
691 static int
692 qede_mac_stats(void * arg,
693 uint_t stat,
694 uint64_t * value)
695 {
696 qede_t * qede = (qede_t *)arg;
697 struct ecore_eth_stats vstats;
698 struct ecore_dev *edev = &qede->edev;
699 struct qede_link_cfg lnkcfg;
700 int rc = 0;
701 qede_fastpath_t *fp = &qede->fp_array[0];
702 qede_rx_ring_t *rx_ring;
703 qede_tx_ring_t *tx_ring;
704
705 if ((qede == NULL) || (value == NULL)) {
706 return EINVAL;
707 }
708
709
710 mutex_enter(&qede->gld_lock);
711
712 if(qede->qede_state != QEDE_STATE_STARTED) {
713 mutex_exit(&qede->gld_lock);
714 return EAGAIN;
715 }
716
717 *value = 0;
718
719 memset(&vstats, 0, sizeof(struct ecore_eth_stats));
720 ecore_get_vport_stats(edev, &vstats);
721
722
723 memset(&qede->curcfg, 0, sizeof(struct qede_link_cfg));
724 qede_get_link_info(&edev->hwfns[0], &qede->curcfg);
725
726
727
728 switch (stat)
729 {
730 case MAC_STAT_IFSPEED:
731 *value = (qede->props.link_speed * 1000000ULL);
732 break;
733 case MAC_STAT_MULTIRCV:
734 *value = vstats.common.rx_mcast_pkts;
735 break;
736 case MAC_STAT_BRDCSTRCV:
737 *value = vstats.common.rx_bcast_pkts;
738 break;
739 case MAC_STAT_MULTIXMT:
740 *value = vstats.common.tx_mcast_pkts;
741 break;
742 case MAC_STAT_BRDCSTXMT:
743 *value = vstats.common.tx_bcast_pkts;
744 break;
745 case MAC_STAT_NORCVBUF:
746 *value = vstats.common.no_buff_discards;
747 break;
748 case MAC_STAT_NOXMTBUF:
749 *value = 0;
750 break;
751 case MAC_STAT_IERRORS:
752 case ETHER_STAT_MACRCV_ERRORS:
753 *value = vstats.common.mac_filter_discards +
754 vstats.common.packet_too_big_discard +
755 vstats.common.rx_crc_errors;
756 break;
757
758 case MAC_STAT_OERRORS:
759 break;
760
761 case MAC_STAT_COLLISIONS:
762 *value = vstats.bb.tx_total_collisions;
763 break;
764
765 case MAC_STAT_RBYTES:
766 *value = vstats.common.rx_ucast_bytes +
767 vstats.common.rx_mcast_bytes +
768 vstats.common.rx_bcast_bytes;
769 break;
770
771 case MAC_STAT_IPACKETS:
772 *value = vstats.common.rx_ucast_pkts +
773 vstats.common.rx_mcast_pkts +
774 vstats.common.rx_bcast_pkts;
775 break;
776
777 case MAC_STAT_OBYTES:
778 *value = vstats.common.tx_ucast_bytes +
779 vstats.common.tx_mcast_bytes +
780 vstats.common.tx_bcast_bytes;
781 break;
782
783 case MAC_STAT_OPACKETS:
784 *value = vstats.common.tx_ucast_pkts +
785 vstats.common.tx_mcast_pkts +
786 vstats.common.tx_bcast_pkts;
787 break;
788
789 case ETHER_STAT_ALIGN_ERRORS:
790 *value = vstats.common.rx_align_errors;
791 break;
792
793 case ETHER_STAT_FCS_ERRORS:
794 *value = vstats.common.rx_crc_errors;
795 break;
796
797 case ETHER_STAT_FIRST_COLLISIONS:
798 break;
799
800 case ETHER_STAT_MULTI_COLLISIONS:
801 break;
802
803 case ETHER_STAT_DEFER_XMTS:
804 break;
805
806 case ETHER_STAT_TX_LATE_COLLISIONS:
807 break;
808
809 case ETHER_STAT_EX_COLLISIONS:
810 break;
811
812 case ETHER_STAT_MACXMT_ERRORS:
813 *value = 0;
814 break;
815
816 case ETHER_STAT_CARRIER_ERRORS:
817 break;
818
819 case ETHER_STAT_TOOLONG_ERRORS:
820 *value = vstats.common.rx_oversize_packets;
821 break;
822
823 #if (MAC_VERSION > 1)
824 case ETHER_STAT_TOOSHORT_ERRORS:
825 *value = vstats.common.rx_undersize_packets;
826 break;
827 #endif
828
829 case ETHER_STAT_XCVR_ADDR:
830 *value = 0;
831 break;
832
833 case ETHER_STAT_XCVR_ID:
834 *value = 0;
835 break;
836
837 case ETHER_STAT_XCVR_INUSE:
838 switch (qede->props.link_speed) {
839 default:
840 *value = XCVR_UNDEFINED;
841 }
842 break;
843 #if (MAC_VERSION > 1)
844 case ETHER_STAT_CAP_10GFDX:
845 *value = 0;
846 break;
847 #endif
848 case ETHER_STAT_CAP_100FDX:
849 *value = 0;
850 break;
851 case ETHER_STAT_CAP_100HDX:
852 *value = 0;
853 break;
854 case ETHER_STAT_CAP_ASMPAUSE:
855 *value = 1;
856 break;
857 case ETHER_STAT_CAP_PAUSE:
858 *value = 1;
859 break;
860 case ETHER_STAT_CAP_AUTONEG:
861 *value = 1;
862 break;
863
864 #if (MAC_VERSION > 1)
865 case ETHER_STAT_CAP_REMFAULT:
866 *value = 0;
867 break;
868 #endif
869
870 #if (MAC_VERSION > 1)
871 case ETHER_STAT_ADV_CAP_10GFDX:
872 *value = 0;
873 break;
874 #endif
875 case ETHER_STAT_ADV_CAP_ASMPAUSE:
876 *value = 1;
877 break;
878
879 case ETHER_STAT_ADV_CAP_PAUSE:
880 *value = 1;
881 break;
882
883 case ETHER_STAT_ADV_CAP_AUTONEG:
884 *value = qede->curcfg.adv_capab.autoneg;
885 break;
886
887 #if (MAC_VERSION > 1)
888 case ETHER_STAT_ADV_REMFAULT:
889 *value = 0;
890 break;
891 #endif
892
893 case ETHER_STAT_LINK_AUTONEG:
894 *value = qede->curcfg.autoneg;
895 break;
896
897 case ETHER_STAT_LINK_DUPLEX:
898 *value = (qede->props.link_duplex == DUPLEX_FULL) ?
899 LINK_DUPLEX_FULL : LINK_DUPLEX_HALF;
900 break;
901 /*
902 * Supported speeds. These indicate what hardware is capable of.
903 */
904 case ETHER_STAT_CAP_1000HDX:
905 *value = qede->curcfg.supp_capab.param_1000hdx;
906 break;
907
908 case ETHER_STAT_CAP_1000FDX:
909 *value = qede->curcfg.supp_capab.param_1000fdx;
910 break;
911
912 case ETHER_STAT_CAP_10GFDX:
913 *value = qede->curcfg.supp_capab.param_10000fdx;
914 break;
915
916 case ETHER_STAT_CAP_25GFDX:
917 *value = qede->curcfg.supp_capab.param_25000fdx;
918 break;
919
920 case ETHER_STAT_CAP_40GFDX:
921 *value = qede->curcfg.supp_capab.param_40000fdx;
922 break;
923
924 case ETHER_STAT_CAP_50GFDX:
925 *value = qede->curcfg.supp_capab.param_50000fdx;
926 break;
927
928 case ETHER_STAT_CAP_100GFDX:
929 *value = qede->curcfg.supp_capab.param_100000fdx;
930 break;
931
932 /*
933 * Advertised speeds. These indicate what hardware is currently sending.
934 */
935 case ETHER_STAT_ADV_CAP_1000HDX:
936 *value = qede->curcfg.adv_capab.param_1000hdx;
937 break;
938
939 case ETHER_STAT_ADV_CAP_1000FDX:
940 *value = qede->curcfg.adv_capab.param_1000fdx;
941 break;
942
943 case ETHER_STAT_ADV_CAP_10GFDX:
944 *value = qede->curcfg.adv_capab.param_10000fdx;
945 break;
946
947 case ETHER_STAT_ADV_CAP_25GFDX:
948 *value = qede->curcfg.adv_capab.param_25000fdx;
949 break;
950
951 case ETHER_STAT_ADV_CAP_40GFDX:
952 *value = qede->curcfg.adv_capab.param_40000fdx;
953 break;
954
955 case ETHER_STAT_ADV_CAP_50GFDX:
956 *value = qede->curcfg.adv_capab.param_50000fdx;
957 break;
958
959 case ETHER_STAT_ADV_CAP_100GFDX:
960 *value = qede->curcfg.adv_capab.param_100000fdx;
961 break;
962
963 default:
964 rc = ENOTSUP;
965 }
966
967 mutex_exit(&qede->gld_lock);
968 return (rc);
969 }
970
971 /* (flag) TRUE = on, FALSE = off */
972 static int
973 qede_mac_promiscuous(void *arg,
974 boolean_t on)
975 {
976 qede_t *qede = (qede_t *)arg;
977 qede_print("!%s(%d): called", __func__,qede->instance);
978 int ret = DDI_SUCCESS;
979 enum qede_filter_rx_mode_type mode;
980
981 mutex_enter(&qede->drv_lock);
982
983 if (qede->qede_state == QEDE_STATE_SUSPENDED) {
984 ret = ECANCELED;
985 goto exit;
986 }
987
988 if (on) {
989 qede_info(qede, "Entering promiscuous mode");
990 mode = QEDE_FILTER_RX_MODE_PROMISC;
991 qede->params.promisc_fl = B_TRUE;
992 } else {
993 qede_info(qede, "Leaving promiscuous mode");
994 if(qede->params.multi_promisc_fl == B_TRUE) {
995 mode = QEDE_FILTER_RX_MODE_MULTI_PROMISC;
996 } else {
997 mode = QEDE_FILTER_RX_MODE_REGULAR;
998 }
999 qede->params.promisc_fl = B_FALSE;
1000 }
1001
1002 ret = qede_set_filter_rx_mode(qede, mode);
1003
1004 exit:
1005 mutex_exit(&qede->drv_lock);
1006 return (ret);
1007 }
1008
1009 int qede_set_rx_mac_mcast(qede_t *qede, enum ecore_filter_opcode opcode,
1010 uint8_t *mac, int mc_cnt)
1011 {
1012 struct ecore_filter_mcast cmd;
1013 int i;
1014 memset(&cmd, 0, sizeof(cmd));
1015 cmd.opcode = opcode;
1016 cmd.num_mc_addrs = mc_cnt;
1017
1018 for (i = 0; i < mc_cnt; i++, mac += ETH_ALLEN) {
1019 COPY_ETH_ADDRESS(mac, cmd.mac[i]);
1020 }
1021
1022
1023 return (ecore_filter_mcast_cmd(&qede->edev, &cmd,
1024 ECORE_SPQ_MODE_CB, NULL));
1025
1026 }
1027
1028 int
1029 qede_set_filter_rx_mode(qede_t * qede, enum qede_filter_rx_mode_type type)
1030 {
1031 struct ecore_filter_accept_flags flg;
1032
1033 memset(&flg, 0, sizeof(flg));
1034
1035 flg.update_rx_mode_config = 1;
1036 flg.update_tx_mode_config = 1;
1037 flg.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
1038 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST;
1039 flg.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
1040 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST;
1041
1042 if (type == QEDE_FILTER_RX_MODE_PROMISC)
1043 flg.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
1044 ECORE_ACCEPT_MCAST_UNMATCHED;
1045 else if (type == QEDE_FILTER_RX_MODE_MULTI_PROMISC)
1046 flg.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
1047 qede_info(qede, "rx_mode rx_filter=0x%x tx_filter=0x%x type=0x%x\n",
1048 flg.rx_accept_filter, flg.tx_accept_filter, type);
1049 return (ecore_filter_accept_cmd(&qede->edev, 0, flg,
1050 0, /* update_accept_any_vlan */
1051 0, /* accept_any_vlan */
1052 ECORE_SPQ_MODE_CB, NULL));
1053 }
1054
1055 int
1056 qede_multicast(qede_t *qede, boolean_t flag, const uint8_t *ptr_mcaddr)
1057 {
1058 int i, ret = DDI_SUCCESS;
1059 qede_mcast_list_entry_t *ptr_mlist;
1060 qede_mcast_list_entry_t *ptr_entry;
1061 int mc_cnt;
1062 unsigned char *mc_macs, *tmpmc;
1063 size_t size;
1064 boolean_t mcmac_exists = B_FALSE;
1065 enum qede_filter_rx_mode_type mode;
1066
1067 #ifdef DEBUG
1068 if (!ptr_mcaddr) {
1069 cmn_err(CE_NOTE, "Removing all multicast");
1070 } else {
1071 cmn_err(CE_NOTE,
1072 "qede=%p %s multicast: %02x:%02x:%02x:%02x:%02x:%02x",
1073 qede, (flag) ? "Adding" : "Removing", ptr_mcaddr[0],
1074 ptr_mcaddr[1],ptr_mcaddr[2],ptr_mcaddr[3],ptr_mcaddr[4],
1075 ptr_mcaddr[5]);
1076 }
1077 #endif
1078
1079
1080 if (flag && (ptr_mcaddr == NULL)) {
1081 cmn_err(CE_WARN, "ERROR: Multicast address not specified");
1082 return EINVAL;
1083 }
1084
1085
1086 /* exceeds addition of mcaddr above limit */
1087 if (flag && (qede->mc_cnt >= MAX_MC_SOFT_LIMIT)) {
1088 qede_info(qede, "Cannot add more than MAX_MC_SOFT_LIMIT");
1089 return ENOENT;
1090 }
1091
1092 size = MAX_MC_SOFT_LIMIT * ETH_ALLEN;
1093
1094 mc_macs = kmem_zalloc(size, KM_NOSLEEP);
1095 if (!mc_macs) {
1096 cmn_err(CE_WARN, "ERROR: Failed to allocate for mc_macs");
1097 return EINVAL;
1098 }
1099
1100 tmpmc = mc_macs;
1101
1102 /* remove all multicast - as flag not set and mcaddr not specified*/
1103 if (!flag && (ptr_mcaddr == NULL)) {
1104 QEDE_LIST_FOR_EACH_ENTRY(ptr_entry,
1105 &qede->mclist.head, qede_mcast_list_entry_t, mclist_entry)
1106 {
1107 if (ptr_entry != NULL) {
1108 QEDE_LIST_REMOVE(&ptr_entry->mclist_entry,
1109 &qede->mclist.head);
1110 kmem_free(ptr_entry,
1111 sizeof (qede_mcast_list_entry_t) + ETH_ALLEN);
1112 }
1113 }
1114
1115 ret = qede_set_rx_mac_mcast(qede,
1116 ECORE_FILTER_REMOVE, mc_macs, 1);
1117 qede->mc_cnt = 0;
1118 goto exit;
1119 }
1120
1121 QEDE_LIST_FOR_EACH_ENTRY(ptr_entry,
1122 &qede->mclist.head, qede_mcast_list_entry_t, mclist_entry)
1123 {
1124 if ((ptr_entry != NULL) &&
1125 IS_ETH_ADDRESS_EQUAL(ptr_mcaddr, ptr_entry->mac)) {
1126 mcmac_exists = B_TRUE;
1127 break;
1128 }
1129 }
1130 if (flag && mcmac_exists) {
1131 ret = DDI_SUCCESS;
1132 goto exit;
1133 } else if (!flag && !mcmac_exists) {
1134 ret = DDI_SUCCESS;
1135 goto exit;
1136 }
1137
1138 if (flag) {
1139 ptr_entry = kmem_zalloc((sizeof (qede_mcast_list_entry_t) +
1140 ETH_ALLEN), KM_NOSLEEP);
1141 ptr_entry->mac = (uint8_t *)ptr_entry +
1142 sizeof (qede_mcast_list_entry_t);
1143 COPY_ETH_ADDRESS(ptr_mcaddr, ptr_entry->mac);
1144 QEDE_LIST_ADD(&ptr_entry->mclist_entry, &qede->mclist.head);
1145 } else {
1146 QEDE_LIST_REMOVE(&ptr_entry->mclist_entry, &qede->mclist.head);
1147 kmem_free(ptr_entry, sizeof(qede_mcast_list_entry_t) +
1148 ETH_ALLEN);
1149 }
1150
1151 mc_cnt = 0;
1152 QEDE_LIST_FOR_EACH_ENTRY(ptr_entry, &qede->mclist.head,
1153 qede_mcast_list_entry_t, mclist_entry) {
1154 COPY_ETH_ADDRESS(ptr_entry->mac, tmpmc);
1155 tmpmc += ETH_ALLEN;
1156 mc_cnt++;
1157 }
1158 qede->mc_cnt = mc_cnt;
1159 if (mc_cnt <=64) {
1160 ret = qede_set_rx_mac_mcast(qede, ECORE_FILTER_ADD,
1161 (unsigned char *)mc_macs, mc_cnt);
1162 if ((qede->params.multi_promisc_fl == B_TRUE) &&
1163 (qede->params.promisc_fl == B_FALSE)) {
1164 mode = QEDE_FILTER_RX_MODE_REGULAR;
1165 ret = qede_set_filter_rx_mode(qede, mode);
1166 }
1167 qede->params.multi_promisc_fl = B_FALSE;
1168 } else {
1169 if ((qede->params.multi_promisc_fl == B_FALSE) &&
1170 (qede->params.promisc_fl = B_FALSE)) {
1171 ret = qede_set_filter_rx_mode(qede,
1172 QEDE_FILTER_RX_MODE_MULTI_PROMISC);
1173 }
1174 qede->params.multi_promisc_fl = B_TRUE;
1175 qede_info(qede, "mode is MULTI_PROMISC");
1176 }
1177 exit:
1178 kmem_free(mc_macs, size);
1179 qede_info(qede, "multicast ret %d mc_cnt %d\n", ret, qede->mc_cnt);
1180 return (ret);
1181 }
1182
1183 /*
1184 * This function is used to enable or disable multicast packet reception for
1185 * particular multicast addresses.
1186 * (flag) TRUE = add, FALSE = remove
1187 */
1188 static int
1189 qede_mac_multicast(void *arg,
1190 boolean_t flag,
1191 const uint8_t * mcast_addr)
1192 {
1193 qede_t *qede = (qede_t *)arg;
1194 int ret = DDI_SUCCESS;
1195
1196
1197 mutex_enter(&qede->gld_lock);
1198 if(qede->qede_state != QEDE_STATE_STARTED) {
1199 mutex_exit(&qede->gld_lock);
1200 return (EAGAIN);
1201 }
1202 ret = qede_multicast(qede, flag, mcast_addr);
1203
1204 mutex_exit(&qede->gld_lock);
1205
1206 return (ret);
1207 }
1208 int
1209 qede_clear_filters(qede_t *qede)
1210 {
1211 int ret = 0;
1212 int i;
1213 if ((qede->params.promisc_fl == B_TRUE) ||
1214 (qede->params.multi_promisc_fl == B_TRUE)) {
1215 ret = qede_set_filter_rx_mode(qede,
1216 QEDE_FILTER_RX_MODE_REGULAR);
1217 if (ret) {
1218 qede_info(qede,
1219 "qede_clear_filters failed to set rx_mode");
1220 }
1221 }
1222 for (i=0; i < qede->ucst_total; i++)
1223 {
1224 if (qede->ucst_mac[i].set) {
1225 qede_rem_macaddr(qede,
1226 qede->ucst_mac[i].mac_addr.ether_addr_octet);
1227 }
1228 }
1229 qede_multicast(qede, B_FALSE, NULL);
1230 return (ret);
1231 }
1232
1233
1234 #ifdef NO_CROSSBOW
1235 static int
1236 qede_mac_unicast(void *arg,
1237 const uint8_t * mac_addr)
1238 {
1239 qede_t *qede = (qede_t *)arg;
1240 return 0;
1241 }
1242
1243
1244 static mblk_t *
1245 qede_mac_tx(void *arg,
1246 mblk_t * mblk)
1247 {
1248 qede_t *qede = (qede_t *)arg;
1249 qede_fastpath_t *fp = &qede->fp_array[0];
1250
1251 mblk = qede_ring_tx((void *)fp, mblk);
1252
1253 return (mblk);
1254 }
1255 #endif /* NO_CROSSBOW */
1256
1257
1258 static lb_property_t loopmodes[] = {
1259 { normal, "normal", QEDE_LOOP_NONE },
1260 { internal, "internal", QEDE_LOOP_INTERNAL },
1261 { external, "external", QEDE_LOOP_EXTERNAL },
1262 };
1263
1264 /*
1265 * Set Loopback mode
1266 */
1267
1268 static enum ioc_reply
1269 qede_set_loopback_mode(qede_t *qede, uint32_t mode)
1270 {
1271 int i = 0;
1272 struct ecore_dev *edev = &qede->edev;
1273 struct ecore_hwfn *hwfn;
1274 struct ecore_ptt *ptt = NULL;
1275 struct ecore_mcp_link_params *link_params;
1276
1277 hwfn = &edev->hwfns[0];
1278 link_params = ecore_mcp_get_link_params(hwfn);
1279 ptt = ecore_ptt_acquire(hwfn);
1280
1281 switch(mode) {
1282 default:
1283 qede_info(qede, "unknown loopback mode !!");
1284 ecore_ptt_release(hwfn, ptt);
1285 return IOC_INVAL;
1286
1287 case QEDE_LOOP_NONE:
1288 ecore_mcp_set_link(hwfn, ptt, 0);
1289
1290 while (qede->params.link_state && i < 5000) {
1291 OSAL_MSLEEP(1);
1292 i++;
1293 }
1294 i = 0;
1295
1296 link_params->loopback_mode = ETH_LOOPBACK_NONE;
1297 qede->loop_back_mode = QEDE_LOOP_NONE;
1298 (void) ecore_mcp_set_link(hwfn, ptt, 1);
1299 ecore_ptt_release(hwfn, ptt);
1300
1301 while (!qede->params.link_state && i < 5000) {
1302 OSAL_MSLEEP(1);
1303 i++;
1304 }
1305 return IOC_REPLY;
1306
1307 case QEDE_LOOP_INTERNAL:
1308 qede_print("!%s(%d) : loopback mode (INTERNAL) is set!",
1309 __func__, qede->instance);
1310 ecore_mcp_set_link(hwfn, ptt, 0);
1311
1312 while(qede->params.link_state && i < 5000) {
1313 OSAL_MSLEEP(1);
1314 i++;
1315 }
1316 i = 0;
1317 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
1318 qede->loop_back_mode = QEDE_LOOP_INTERNAL;
1319 (void) ecore_mcp_set_link(hwfn, ptt, 1);
1320 ecore_ptt_release(hwfn, ptt);
1321
1322 while(!qede->params.link_state && i < 5000) {
1323 OSAL_MSLEEP(1);
1324 i++;
1325 }
1326 return IOC_REPLY;
1327
1328 case QEDE_LOOP_EXTERNAL:
1329 qede_print("!%s(%d) : External loopback mode is not supported",
1330 __func__, qede->instance);
1331 ecore_ptt_release(hwfn, ptt);
1332 return IOC_INVAL;
1333 }
1334 }
1335
1336 static int
1337 qede_ioctl_pcicfg_rd(qede_t *qede, u32 addr, void *data,
1338 int len)
1339 {
1340 u32 crb, actual_crb;
1341 uint32_t ret = 0;
1342 int cap_offset = 0, cap_id = 0, next_cap = 0;
1343 ddi_acc_handle_t pci_cfg_handle = qede->pci_cfg_handle;
1344 qede_ioctl_data_t * data1 = (qede_ioctl_data_t *) data;
1345
1346 cap_offset = pci_config_get8(pci_cfg_handle, PCI_CONF_CAP_PTR);
1347 while (cap_offset != 0) {
1348 /* Check for an invalid PCI read. */
1349 if (cap_offset == PCI_EINVAL8) {
1350 return DDI_FAILURE;
1351 }
1352 cap_id = pci_config_get8(pci_cfg_handle, cap_offset);
1353 if (cap_id == PCI_CAP_ID_PCI_E) {
1354 /* PCIe expr capab struct found */
1355 break;
1356 } else {
1357 next_cap = pci_config_get8(pci_cfg_handle,
1358 cap_offset + 1);
1359 cap_offset = next_cap;
1360 }
1361 }
1362
1363 switch (len) {
1364 case 1:
1365 ret = pci_config_get8(qede->pci_cfg_handle, addr);
1366 (void) memcpy(data, &ret, sizeof(uint8_t));
1367 break;
1368 case 2:
1369 ret = pci_config_get16(qede->pci_cfg_handle, addr);
1370 (void) memcpy(data, &ret, sizeof(uint16_t));
1371 break;
1372 case 4:
1373 ret = pci_config_get32(qede->pci_cfg_handle, addr);
1374 (void) memcpy(data, &ret, sizeof(uint32_t));
1375 break;
1376 default:
1377 cmn_err(CE_WARN, "bad length for pci config read\n");
1378 return (1);
1379 }
1380 return (0);
1381 }
1382
1383 static int
1384 qede_ioctl_pcicfg_wr(qede_t *qede, u32 addr, void *data,
1385 int len)
1386 {
1387 uint16_t ret = 0;
1388 int cap_offset = 0, cap_id = 0, next_cap = 0;
1389 qede_ioctl_data_t * data1 = (qede_ioctl_data_t *) data;
1390 ddi_acc_handle_t pci_cfg_handle = qede->pci_cfg_handle;
1391 #if 1
1392 cap_offset = pci_config_get8(pci_cfg_handle, PCI_CONF_CAP_PTR);
1393 while (cap_offset != 0) {
1394 cap_id = pci_config_get8(pci_cfg_handle, cap_offset);
1395 if (cap_id == PCI_CAP_ID_PCI_E) {
1396 /* PCIe expr capab struct found */
1397 break;
1398 } else {
1399 next_cap = pci_config_get8(pci_cfg_handle,
1400 cap_offset + 1);
1401 cap_offset = next_cap;
1402 }
1403 }
1404 #endif
1405
1406 switch(len) {
1407 case 1:
1408 pci_config_put8(qede->pci_cfg_handle, addr,
1409 *(char *)&(data));
1410 break;
1411 case 2:
1412 ret = pci_config_get16(qede->pci_cfg_handle, addr);
1413 ret = ret | *(uint16_t *)data1->uabc;
1414
1415 pci_config_put16(qede->pci_cfg_handle, addr,
1416 ret);
1417 break;
1418 case 4:
1419 pci_config_put32(qede->pci_cfg_handle, addr, *(uint32_t *)data1->uabc);
1420 break;
1421
1422 default:
1423 return (1);
1424 }
1425 return (0);
1426 }
1427
1428 static int
1429 qede_ioctl_rd_wr_reg(qede_t *qede, void *data)
1430 {
1431 struct ecore_hwfn *p_hwfn;
1432 struct ecore_dev *edev = &qede->edev;
1433 struct ecore_ptt *ptt;
1434 qede_ioctl_data_t *data1 = (qede_ioctl_data_t *)data;
1435 uint32_t ret = 0;
1436 uint8_t cmd = (uint8_t) data1->unused1;
1437 uint32_t addr = data1->off;
1438 uint32_t val = *(uint32_t *)&data1->uabc[1];
1439 uint32_t hwfn_index = *(uint32_t *)&data1->uabc[5];
1440 uint32_t *reg_addr;
1441
1442 if (hwfn_index > qede->num_hwfns) {
1443 cmn_err(CE_WARN, "invalid hwfn index from application\n");
1444 return (EINVAL);
1445 }
1446 p_hwfn = &edev->hwfns[hwfn_index];
1447
1448 switch(cmd) {
1449 case QEDE_REG_READ:
1450 ret = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, addr);
1451 (void) memcpy(data1->uabc, &ret, sizeof(uint32_t));
1452 break;
1453
1454 case QEDE_REG_WRITE:
1455 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, addr, val);
1456 break;
1457
1458 default:
1459 cmn_err(CE_WARN,
1460 "wrong command in register read/write from application\n");
1461 break;
1462 }
1463 return (ret);
1464 }
1465
1466 static int
1467 qede_ioctl_rd_wr_nvram(qede_t *qede, mblk_t *mp)
1468 {
1469 qede_nvram_data_t *data1 = (qede_nvram_data_t *)(mp->b_cont->b_rptr);
1470 qede_nvram_data_t *data2, *next_data;
1471 struct ecore_dev *edev = &qede->edev;
1472 uint32_t hdr_size = 24, bytes_to_copy, copy_len = 0;
1473 uint32_t copy_len1 = 0;
1474 uint32_t addr = data1->off;
1475 uint32_t size = data1->size, i, buf_size;
1476 uint8_t cmd, cmd2;
1477 uint8_t *buf, *tmp_buf;
1478 mblk_t *mp1;
1479
1480 cmd = (uint8_t)data1->unused1;
1481
1482 switch(cmd) {
1483 case QEDE_NVRAM_CMD_READ:
1484 buf = kmem_zalloc(size, GFP_KERNEL);
1485 if(buf == NULL) {
1486 cmn_err(CE_WARN, "memory allocation failed"
1487 " in nvram read ioctl\n");
1488 return (DDI_FAILURE);
1489 }
1490 (void) ecore_mcp_nvm_read(edev, addr, buf, data1->size);
1491
1492 copy_len = (MBLKL(mp->b_cont)) - hdr_size;
1493 if(copy_len > size) {
1494 (void) memcpy(data1->uabc, buf, size);
1495 kmem_free(buf, size);
1496 //OSAL_FREE(edev, buf);
1497 break;
1498 }
1499 (void) memcpy(data1->uabc, buf, copy_len);
1500 bytes_to_copy = size - copy_len;
1501 tmp_buf = ((uint8_t *)buf) + copy_len;
1502 copy_len1 = copy_len;
1503 mp1 = mp->b_cont;
1504 mp1 = mp1->b_cont;
1505
1506 while (mp1) {
1507 copy_len = MBLKL(mp1);
1508 if(mp1->b_cont == NULL) {
1509 copy_len = MBLKL(mp1) - 4;
1510 }
1511 data2 = (qede_nvram_data_t *)mp1->b_rptr;
1512 if (copy_len > bytes_to_copy) {
1513 (void) memcpy(data2->uabc, tmp_buf,
1514 bytes_to_copy);
1515 kmem_free(buf, size);
1516 //OSAL_FREE(edev, buf);
1517 break;
1518 }
1519 (void) memcpy(data2->uabc, tmp_buf, copy_len);
1520 tmp_buf = tmp_buf + copy_len;
1521 copy_len += copy_len;
1522 mp1 = mp1->b_cont;
1523 bytes_to_copy = bytes_to_copy - copy_len;
1524 }
1525
1526 kmem_free(buf, size);
1527 //OSAL_FREE(edev, buf);
1528 break;
1529
1530 case QEDE_NVRAM_CMD_WRITE:
1531 cmd2 = (uint8_t )data1->cmd2;
1532 size = data1->size;
1533 addr = data1->off;
1534 buf_size = size; //data1->buf_size;
1535 //buf_size = data1->buf_size;
1536
1537 switch(cmd2){
1538 case START_NVM_WRITE:
1539 buf = kmem_zalloc(size, GFP_KERNEL);
1540 //buf = qede->reserved_buf;
1541 qede->nvm_buf_size = data1->size;
1542 if(buf == NULL) {
1543 cmn_err(CE_WARN,
1544 "memory allocation failed in START_NVM_WRITE\n");
1545 return DDI_FAILURE;
1546 }
1547 qede->nvm_buf_start = buf;
1548 qede->nvm_buf = buf;
1549 qede->copy_len = 0;
1550 //tmp_buf = buf + addr;
1551 break;
1552
1553 case ACCUMULATE_NVM_BUF:
1554 tmp_buf = qede->nvm_buf;
1555 copy_len = MBLKL(mp->b_cont) - hdr_size;
1556 if(copy_len > buf_size) {
1557 if (buf_size < qede->nvm_buf_size) {
1558 (void) memcpy(tmp_buf, data1->uabc, buf_size);
1559 qede->copy_len = qede->copy_len +
1560 buf_size;
1561 } else {
1562 (void) memcpy(tmp_buf,
1563 data1->uabc, qede->nvm_buf_size);
1564 qede->copy_len =
1565 qede->copy_len + qede->nvm_buf_size;
1566 }
1567 tmp_buf = tmp_buf + buf_size;
1568 qede->nvm_buf = tmp_buf;
1569 //qede->copy_len = qede->copy_len + buf_size;
1570 break;
1571 }
1572 (void) memcpy(tmp_buf, data1->uabc, copy_len);
1573 tmp_buf = tmp_buf + copy_len;
1574 bytes_to_copy = buf_size - copy_len;
1575 mp1 = mp->b_cont;
1576 mp1 = mp1->b_cont;
1577 copy_len1 = copy_len;
1578
1579 while (mp1) {
1580 copy_len = MBLKL(mp1);
1581 if (mp1->b_cont == NULL) {
1582 copy_len = MBLKL(mp1) - 4;
1583 }
1584 next_data = (qede_nvram_data_t *) mp1->b_rptr;
1585 if (copy_len > bytes_to_copy){
1586 (void) memcpy(tmp_buf, next_data->uabc,
1587 bytes_to_copy);
1588 qede->copy_len = qede->copy_len +
1589 bytes_to_copy;
1590 break;
1591 }
1592 (void) memcpy(tmp_buf, next_data->uabc,
1593 copy_len);
1594 qede->copy_len = qede->copy_len + copy_len;
1595 tmp_buf = tmp_buf + copy_len;
1596 copy_len = copy_len1 + copy_len;
1597 bytes_to_copy = bytes_to_copy - copy_len;
1598 mp1 = mp1->b_cont;
1599 }
1600 qede->nvm_buf = tmp_buf;
1601 break;
1602
1603 case STOP_NVM_WRITE:
1604 //qede->nvm_buf = tmp_buf;
1605 break;
1606 case READ_BUF:
1607 tmp_buf = (uint8_t *)qede->nvm_buf_start;
1608 for(i = 0; i < size ; i++){
1609 cmn_err(CE_NOTE,
1610 "buff (%d) : %d\n", i, *tmp_buf);
1611 tmp_buf ++;
1612 }
1613 break;
1614 }
1615 break;
1616 case QEDE_NVRAM_CMD_PUT_FILE_DATA:
1617 tmp_buf = qede->nvm_buf_start;
1618 (void) ecore_mcp_nvm_write(edev, ECORE_PUT_FILE_DATA,
1619 addr, tmp_buf, size);
1620 kmem_free(qede->nvm_buf_start, size);
1621 //OSAL_FREE(edev, tmp_buf);
1622 cmn_err(CE_NOTE, "total size = %x, copied size = %x\n",
1623 qede->nvm_buf_size, qede->copy_len);
1624 tmp_buf = NULL;
1625 qede->nvm_buf = NULL;
1626 qede->nvm_buf_start = NULL;
1627 break;
1628
1629 case QEDE_NVRAM_CMD_SET_SECURE_MODE:
1630 (void) ecore_mcp_nvm_set_secure_mode(edev, addr);
1631 break;
1632
1633 case QEDE_NVRAM_CMD_DEL_FILE:
1634 (void) ecore_mcp_nvm_del_file(edev, addr);
1635 break;
1636
1637 case QEDE_NVRAM_CMD_PUT_FILE_BEGIN:
1638 (void) ecore_mcp_nvm_put_file_begin(edev, addr);
1639 break;
1640
1641 case QEDE_NVRAM_CMD_GET_NVRAM_RESP:
1642 buf = kmem_zalloc(size, KM_SLEEP);
1643 (void) ecore_mcp_nvm_resp(edev, buf);
1644 (void)memcpy(data1->uabc, buf, size);
1645 kmem_free(buf, size);
1646 break;
1647
1648 default:
1649 cmn_err(CE_WARN,
1650 "wrong command in NVRAM read/write from application\n");
1651 break;
1652 }
1653 return (DDI_SUCCESS);
1654 }
1655
1656 static int
1657 qede_get_func_info(qede_t *qede, void *data)
1658 {
1659 qede_link_output_t link_op;
1660 qede_func_info_t func_info;
1661 qede_ioctl_data_t *data1 = (qede_ioctl_data_t *)data;
1662 struct ecore_dev *edev = &qede->edev;
1663 struct ecore_hwfn *hwfn;
1664 struct ecore_mcp_link_params params;
1665 struct ecore_mcp_link_state link;
1666
1667 hwfn = &edev->hwfns[0];
1668
1669 if(hwfn == NULL){
1670 cmn_err(CE_WARN, "(%s) : cannot acquire hwfn\n",
1671 __func__);
1672 return (DDI_FAILURE);
1673 }
1674 memcpy(¶ms, &hwfn->mcp_info->link_input, sizeof(params));
1675 memcpy(&link, &hwfn->mcp_info->link_output, sizeof(link));
1676
1677 if(link.link_up) {
1678 link_op.link_up = true;
1679 }
1680
1681 link_op.supported_caps = SUPPORTED_FIBRE;
1682 if(params.speed.autoneg) {
1683 link_op.supported_caps |= SUPPORTED_Autoneg;
1684 }
1685
1686 if(params.pause.autoneg ||
1687 (params.pause.forced_rx && params.pause.forced_tx)) {
1688 link_op.supported_caps |= SUPPORTED_Asym_Pause;
1689 }
1690
1691 if (params.pause.autoneg || params.pause.forced_rx ||
1692 params.pause.forced_tx) {
1693 link_op.supported_caps |= SUPPORTED_Pause;
1694 }
1695
1696 if (params.speed.advertised_speeds &
1697 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
1698 link_op.supported_caps |= SUPPORTED_1000baseT_Half |
1699 SUPPORTED_1000baseT_Full;
1700 }
1701
1702 if (params.speed.advertised_speeds &
1703 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
1704 link_op.supported_caps |= SUPPORTED_10000baseKR_Full;
1705 }
1706
1707 if (params.speed.advertised_speeds &
1708 NVM_CFG1_PORT_DRV_LINK_SPEED_40G) {
1709 link_op.supported_caps |= SUPPORTED_40000baseLR4_Full;
1710 }
1711
1712 link_op.advertised_caps = link_op.supported_caps;
1713
1714 if(link.link_up) {
1715 link_op.speed = link.speed;
1716 } else {
1717 link_op.speed = 0;
1718 }
1719
1720 link_op.duplex = DUPLEX_FULL;
1721 link_op.port = PORT_FIBRE;
1722
1723 link_op.autoneg = params.speed.autoneg;
1724
1725 /* Link partner capabilities */
1726 if (link.partner_adv_speed &
1727 ECORE_LINK_PARTNER_SPEED_1G_HD) {
1728 link_op.lp_caps |= SUPPORTED_1000baseT_Half;
1729 }
1730
1731 if (link.partner_adv_speed &
1732 ECORE_LINK_PARTNER_SPEED_1G_FD) {
1733 link_op.lp_caps |= SUPPORTED_1000baseT_Full;
1734 }
1735
1736 if (link.partner_adv_speed &
1737 ECORE_LINK_PARTNER_SPEED_10G) {
1738 link_op.lp_caps |= SUPPORTED_10000baseKR_Full;
1739 }
1740
1741 if (link.partner_adv_speed &
1742 ECORE_LINK_PARTNER_SPEED_20G) {
1743 link_op.lp_caps |= SUPPORTED_20000baseKR2_Full;
1744 }
1745
1746 if (link.partner_adv_speed &
1747 ECORE_LINK_PARTNER_SPEED_40G) {
1748 link_op.lp_caps |= SUPPORTED_40000baseLR4_Full;
1749 }
1750
1751 if (link.an_complete) {
1752 link_op.lp_caps |= SUPPORTED_Autoneg;
1753 }
1754
1755 if (link.partner_adv_pause) {
1756 link_op.lp_caps |= SUPPORTED_Pause;
1757 }
1758
1759 if (link.partner_adv_pause == ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE ||
1760 link.partner_adv_pause == ECORE_LINK_PARTNER_BOTH_PAUSE) {
1761 link_op.lp_caps |= SUPPORTED_Asym_Pause;
1762 }
1763
1764 func_info.supported = link_op.supported_caps;
1765 func_info.advertising = link_op.advertised_caps;
1766 func_info.speed = link_op.speed;
1767 func_info.duplex = link_op.duplex;
1768 func_info.port = qede->pci_func & 0x1;
1769 func_info.autoneg = link_op.autoneg;
1770
1771 (void) memcpy(data1->uabc, &func_info, sizeof(qede_func_info_t));
1772
1773 return (0);
1774 }
1775
1776 static int
1777 qede_do_ioctl(qede_t *qede, queue_t *q, mblk_t *mp)
1778 {
1779 qede_ioctl_data_t *up_data;
1780 qede_driver_info_t driver_info;
1781 struct ecore_dev *edev = &qede->edev;
1782 struct ecore_hwfn *hwfn;
1783 struct ecore_ptt *ptt = NULL;
1784 struct mcp_file_att attrib;
1785 uint32_t flash_size;
1786 uint32_t mcp_resp, mcp_param, txn_size;
1787 uint32_t cmd, size, ret = 0;
1788 uint64_t off;
1789 int * up_data1;
1790 void * ptr;
1791 mblk_t *mp1 = mp;
1792 char mac_addr[32];
1793
1794 up_data = (qede_ioctl_data_t *)(mp->b_cont->b_rptr);
1795
1796 cmd = up_data->cmd;
1797 off = up_data->off;
1798 size = up_data->size;
1799
1800 switch (cmd) {
1801 case QEDE_DRV_INFO:
1802 hwfn = &edev->hwfns[0];
1803 ptt = ecore_ptt_acquire(hwfn);
1804
1805 snprintf(driver_info.drv_name, MAX_QEDE_NAME_LEN, "%s", "qede");
1806 snprintf(driver_info.drv_version, QEDE_STR_SIZE,
1807 "v:%s", qede->version);
1808 snprintf(driver_info.mfw_version, QEDE_STR_SIZE,
1809 "%s", qede->versionMFW);
1810 snprintf(driver_info.stormfw_version, QEDE_STR_SIZE,
1811 "%s", qede->versionFW);
1812 snprintf(driver_info.bus_info, QEDE_STR_SIZE,
1813 "%s", qede->bus_dev_func);
1814
1815
1816 /*
1817 * calling ecore_mcp_nvm_rd_cmd to find the flash length, i
1818 * 0x08 is equivalent of NVM_TYPE_MFW_TRACE1
1819 */
1820 ecore_mcp_get_flash_size(hwfn, ptt, &flash_size);
1821 driver_info.eeprom_dump_len = flash_size;
1822 (void) memcpy(up_data->uabc, &driver_info,
1823 sizeof (qede_driver_info_t));
1824 up_data->size = sizeof (qede_driver_info_t);
1825
1826 ecore_ptt_release(hwfn, ptt);
1827 break;
1828
1829 case QEDE_RD_PCICFG:
1830 ret = qede_ioctl_pcicfg_rd(qede, off, up_data->uabc, size);
1831 break;
1832
1833 case QEDE_WR_PCICFG:
1834 ret = qede_ioctl_pcicfg_wr(qede, off, up_data, size);
1835 break;
1836
1837 case QEDE_RW_REG:
1838 ret = qede_ioctl_rd_wr_reg(qede, (void *)up_data);
1839 break;
1840
1841 case QEDE_RW_NVRAM:
1842 ret = qede_ioctl_rd_wr_nvram(qede, mp1);
1843 break;
1844
1845 case QEDE_FUNC_INFO:
1846 ret = qede_get_func_info(qede, (void *)up_data);
1847 break;
1848
1849 case QEDE_MAC_ADDR:
1850 snprintf(mac_addr, sizeof(mac_addr),
1851 "%02x:%02x:%02x:%02x:%02x:%02x",
1852 qede->ether_addr[0], qede->ether_addr[1],
1853 qede->ether_addr[2], qede->ether_addr[3],
1854 qede->ether_addr[4], qede->ether_addr[5]);
1855 (void) memcpy(up_data->uabc, &mac_addr, sizeof(mac_addr));
1856 break;
1857
1858 }
1859 //if (cmd == QEDE_RW_NVRAM) {
1860 // miocack (q, mp, (sizeof(qede_ioctl_data_t)), 0);
1861 // return IOC_REPLY;
1862 //}
1863 miocack (q, mp, (sizeof(qede_ioctl_data_t)), ret);
1864 //miocack (q, mp, 0, ret);
1865 return (IOC_REPLY);
1866 }
1867
1868 static void
1869 qede_ioctl(qede_t *qede, int cmd, queue_t *q, mblk_t *mp)
1870 {
1871 void *ptr;
1872
1873 switch(cmd) {
1874 case QEDE_CMD:
1875 (void) qede_do_ioctl(qede, q, mp);
1876 break;
1877 default :
1878 cmn_err(CE_WARN, "qede ioctl command %x not supported\n", cmd);
1879 break;
1880 }
1881 return;
1882 }
1883 enum ioc_reply
1884 qede_loopback_ioctl(qede_t *qede, queue_t *wq, mblk_t *mp,
1885 struct iocblk *iocp)
1886 {
1887 lb_info_sz_t *lb_info_size;
1888 lb_property_t *lb_prop;
1889 uint32_t *lb_mode;
1890 int cmd;
1891
1892 /*
1893 * Validate format of ioctl
1894 */
1895 if(mp->b_cont == NULL) {
1896 return IOC_INVAL;
1897 }
1898
1899 cmd = iocp->ioc_cmd;
1900
1901 switch(cmd) {
1902 default:
1903 qede_print("!%s(%d): unknown ioctl command %x\n",
1904 __func__, qede->instance, cmd);
1905 return IOC_INVAL;
1906 case LB_GET_INFO_SIZE:
1907 if (iocp->ioc_count != sizeof(lb_info_sz_t)) {
1908 qede_info(qede, "error: ioc_count %d, sizeof %d",
1909 iocp->ioc_count, sizeof(lb_info_sz_t));
1910 return IOC_INVAL;
1911 }
1912 lb_info_size = (void *)mp->b_cont->b_rptr;
1913 *lb_info_size = sizeof(loopmodes);
1914 return IOC_REPLY;
1915 case LB_GET_INFO:
1916 if (iocp->ioc_count != sizeof (loopmodes)) {
1917 qede_info(qede, "error: iocp->ioc_count %d, sizepof %d",
1918 iocp->ioc_count, sizeof (loopmodes));
1919 return (IOC_INVAL);
1920 }
1921 lb_prop = (void *)mp->b_cont->b_rptr;
1922 bcopy(loopmodes, lb_prop, sizeof (loopmodes));
1923 return IOC_REPLY;
1924 case LB_GET_MODE:
1925 if (iocp->ioc_count != sizeof (uint32_t)) {
1926 qede_info(qede, "iocp->ioc_count %d, sizeof : %d\n",
1927 iocp->ioc_count, sizeof (uint32_t));
1928 return (IOC_INVAL);
1929 }
1930 lb_mode = (void *)mp->b_cont->b_rptr;
1931 *lb_mode = qede->loop_back_mode;
1932 return IOC_REPLY;
1933 case LB_SET_MODE:
1934 if (iocp->ioc_count != sizeof (uint32_t)) {
1935 qede_info(qede, "iocp->ioc_count %d, sizeof : %d\n",
1936 iocp->ioc_count, sizeof (uint32_t));
1937 return (IOC_INVAL);
1938 }
1939 lb_mode = (void *)mp->b_cont->b_rptr;
1940 return (qede_set_loopback_mode(qede,*lb_mode));
1941 }
1942 }
1943
1944 static void
1945 qede_mac_ioctl(void * arg,
1946 queue_t * wq,
1947 mblk_t * mp)
1948 {
1949 int err, cmd;
1950 qede_t * qede = (qede_t *)arg;
1951 struct iocblk *iocp = (struct iocblk *) (uintptr_t)mp->b_rptr;
1952 enum ioc_reply status = IOC_DONE;
1953 boolean_t need_privilege = B_TRUE;
1954
1955 iocp->ioc_error = 0;
1956 cmd = iocp->ioc_cmd;
1957
1958 mutex_enter(&qede->drv_lock);
1959 if ((qede->qede_state == QEDE_STATE_SUSPENDING) ||
1960 (qede->qede_state == QEDE_STATE_SUSPENDED)) {
1961 mutex_exit(&qede->drv_lock);
1962 miocnak(wq, mp, 0, EINVAL);
1963 return;
1964 }
1965
1966 switch(cmd) {
1967 case QEDE_CMD:
1968 break;
1969 case LB_GET_INFO_SIZE:
1970 case LB_GET_INFO:
1971 case LB_GET_MODE:
1972 need_privilege = B_FALSE;
1973 case LB_SET_MODE:
1974 break;
1975 default:
1976 qede_print("!%s(%d) unknown ioctl command %x\n",
1977 __func__, qede->instance, cmd);
1978 miocnak(wq, mp, 0, EINVAL);
1979 mutex_exit(&qede->drv_lock);
1980 return;
1981 }
1982
1983 if(need_privilege) {
1984 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1985 if(err){
1986 qede_info(qede, "secpolicy() failed");
1987 miocnak(wq, mp, 0, err);
1988 mutex_exit(&qede->drv_lock);
1989 return;
1990 }
1991 }
1992
1993 switch (cmd) {
1994 default:
1995 qede_print("!%s(%d) : unknown ioctl command %x\n",
1996 __func__, qede->instance, cmd);
1997 status = IOC_INVAL;
1998 mutex_exit(&qede->drv_lock);
1999 return;
2000 case LB_GET_INFO_SIZE:
2001 case LB_GET_INFO:
2002 case LB_GET_MODE:
2003 case LB_SET_MODE:
2004 status = qede_loopback_ioctl(qede, wq, mp, iocp);
2005 break;
2006 case QEDE_CMD:
2007 qede_ioctl(qede, cmd, wq, mp);
2008 status = IOC_DONE;
2009 break;
2010 }
2011
2012 switch(status){
2013 default:
2014 qede_print("!%s(%d) : invalid status from ioctl",
2015 __func__,qede->instance);
2016 break;
2017 case IOC_DONE:
2018 /*
2019 * OK, Reply already sent
2020 */
2021
2022 break;
2023 case IOC_REPLY:
2024 mp->b_datap->db_type = iocp->ioc_error == 0 ?
2025 M_IOCACK : M_IOCNAK;
2026 qreply(wq, mp);
2027 break;
2028 case IOC_INVAL:
2029 mutex_exit(&qede->drv_lock);
2030 //miocack(wq, mp, 0, 0);
2031 miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
2032 EINVAL : iocp->ioc_error);
2033 return;
2034 }
2035 mutex_exit(&qede->drv_lock);
2036 }
2037
2038 extern ddi_dma_attr_t qede_buf2k_dma_attr_txbuf;
2039 extern ddi_dma_attr_t qede_dma_attr_rxbuf;
2040 extern ddi_dma_attr_t qede_dma_attr_desc;
2041
2042 static boolean_t
2043 qede_mac_get_capability(void *arg,
2044 mac_capab_t capability,
2045 void * cap_data)
2046 {
2047 qede_t * qede = (qede_t *)arg;
2048 uint32_t *txflags = cap_data;
2049 boolean_t ret = B_FALSE;
2050
2051 switch (capability) {
2052 case MAC_CAPAB_HCKSUM: {
2053 u32 *tx_flags = cap_data;
2054 /*
2055 * Check if checksum is enabled on
2056 * tx and advertise the cksum capab
2057 * to mac layer accordingly. On Rx
2058 * side checksummed packets are
2059 * reveiced anyway
2060 */
2061 qede_info(qede, "%s tx checksum offload",
2062 (qede->checksum == DEFAULT_CKSUM_OFFLOAD) ?
2063 "Enabling":
2064 "Disabling");
2065
2066 if (qede->checksum != DEFAULT_CKSUM_OFFLOAD) {
2067 ret = B_FALSE;
2068 break;
2069 }
2070 /*
2071 * Hardware does not support ICMPv6 checksumming. Right now the
2072 * GLDv3 doesn't provide us a way to specify that we don't
2073 * support that. As such, we cannot indicate
2074 * HCKSUM_INET_FULL_V6.
2075 */
2076
2077 *tx_flags = HCKSUM_INET_FULL_V4 |
2078 HCKSUM_IPHDRCKSUM;
2079 ret = B_TRUE;
2080 break;
2081 }
2082 case MAC_CAPAB_LSO: {
2083 mac_capab_lso_t *cap_lso = (mac_capab_lso_t *)cap_data;
2084
2085 qede_info(qede, "%s large segmentation offload",
2086 qede->lso_enable ? "Enabling": "Disabling");
2087 if (qede->lso_enable) {
2088 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
2089 cap_lso->lso_basic_tcp_ipv4.lso_max = QEDE_LSO_MAXLEN;
2090 ret = B_TRUE;
2091 }
2092 break;
2093 }
2094 case MAC_CAPAB_RINGS: {
2095 #ifndef NO_CROSSBOW
2096 mac_capab_rings_t *cap_rings = cap_data;
2097 #ifndef ILLUMOS
2098 cap_rings->mr_version = MAC_RINGS_VERSION_1;
2099 #endif
2100
2101 switch (cap_rings->mr_type) {
2102 case MAC_RING_TYPE_RX:
2103 #ifndef ILLUMOS
2104 cap_rings->mr_flags = MAC_RINGS_VLAN_TRANSPARENT;
2105 #endif
2106 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
2107 //cap_rings->mr_rnum = 1; /* qede variable */
2108 cap_rings->mr_rnum = qede->num_fp; /* qede variable */
2109 cap_rings->mr_gnum = 1;
2110 cap_rings->mr_rget = qede_fill_ring;
2111 cap_rings->mr_gget = qede_fill_group;
2112 cap_rings->mr_gaddring = NULL;
2113 cap_rings->mr_gremring = NULL;
2114 #ifndef ILLUMOS
2115 cap_rings->mr_ggetringtc = NULL;
2116 #endif
2117 ret = B_TRUE;
2118 break;
2119 case MAC_RING_TYPE_TX:
2120 #ifndef ILLUMOS
2121 cap_rings->mr_flags = MAC_RINGS_VLAN_TRANSPARENT;
2122 #endif
2123 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
2124 //cap_rings->mr_rnum = 1;
2125 cap_rings->mr_rnum = qede->num_fp;
2126 cap_rings->mr_gnum = 0;
2127 cap_rings->mr_rget = qede_fill_ring;
2128 cap_rings->mr_gget = qede_fill_group;
2129 cap_rings->mr_gaddring = NULL;
2130 cap_rings->mr_gremring = NULL;
2131 #ifndef ILLUMOS
2132 cap_rings->mr_ggetringtc = NULL;
2133 #endif
2134 ret = B_TRUE;
2135 break;
2136 default:
2137 ret = B_FALSE;
2138 break;
2139 }
2140 #endif
2141 break; /* CASE MAC_CAPAB_RINGS */
2142 }
2143 #ifdef ILLUMOS_NOT_YET
2144 case MAC_CAPAB_TRANSCEIVER: {
2145 mac_capab_transceiver_t *mct = cap_data;
2146
2147 mct->mct_flags = 0;
2148 mct->mct_ntransceivers = qede->edev.num_hwfns;
2149 mct->mct_info = qede_transceiver_info;
2150 mct->mct_read = qede_transceiver_read;
2151
2152 ret = B_TRUE;
2153 break;
2154 }
2155 #endif
2156 default:
2157 break;
2158 }
2159
2160 return (ret);
2161 }
2162
2163 int
2164 qede_configure_link(qede_t *qede, bool op);
2165
2166 static int
2167 qede_mac_set_property(void * arg,
2168 const char * pr_name,
2169 mac_prop_id_t pr_num,
2170 uint_t pr_valsize,
2171 const void * pr_val)
2172 {
2173 qede_t * qede = (qede_t *)arg;
2174 struct ecore_mcp_link_params *link_params;
2175 struct ecore_dev *edev = &qede->edev;
2176 struct ecore_hwfn *hwfn;
2177 int ret_val = 0, i;
2178 uint32_t option;
2179
2180 mutex_enter(&qede->gld_lock);
2181 switch (pr_num)
2182 {
2183 case MAC_PROP_MTU:
2184 bcopy(pr_val, &option, sizeof (option));
2185
2186 if(option == qede->mtu) {
2187 ret_val = 0;
2188 break;
2189 }
2190 if ((option != DEFAULT_JUMBO_MTU) &&
2191 (option != DEFAULT_MTU)) {
2192 ret_val = EINVAL;
2193 break;
2194 }
2195 if(qede->qede_state == QEDE_STATE_STARTED) {
2196 ret_val = EBUSY;
2197 break;
2198 }
2199
2200 ret_val = mac_maxsdu_update(qede->mac_handle, qede->mtu);
2201 if (ret_val == 0) {
2202
2203 qede->mtu = option;
2204 if (option == DEFAULT_JUMBO_MTU) {
2205 qede->jumbo_enable = B_TRUE;
2206 } else {
2207 qede->jumbo_enable = B_FALSE;
2208 }
2209
2210 hwfn = ECORE_LEADING_HWFN(edev);
2211 hwfn->hw_info.mtu = qede->mtu;
2212 ret_val = ecore_mcp_ov_update_mtu(hwfn,
2213 hwfn->p_main_ptt,
2214 hwfn->hw_info.mtu);
2215 if (ret_val != ECORE_SUCCESS) {
2216 qede_print("!%s(%d): MTU change %d option %d"
2217 "FAILED",
2218 __func__,qede->instance, qede->mtu, option);
2219 break;
2220 }
2221 qede_print("!%s(%d): MTU changed %d MTU option"
2222 " %d hwfn %d",
2223 __func__,qede->instance, qede->mtu,
2224 option, hwfn->hw_info.mtu);
2225 }
2226 break;
2227
2228 case MAC_PROP_EN_10GFDX_CAP:
2229 hwfn = &edev->hwfns[0];
2230 link_params = ecore_mcp_get_link_params(hwfn);
2231 if (*(uint8_t *) pr_val) {
2232 link_params->speed.autoneg = 0;
2233 link_params->speed.forced_speed = 10000;
2234 link_params->speed.advertised_speeds =
2235 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2236 qede->forced_speed_10G = *(uint8_t *)pr_val;
2237 }
2238 else {
2239 memcpy(link_params,
2240 &qede->link_input_params.default_link_params,
2241 sizeof (struct ecore_mcp_link_params));
2242 qede->forced_speed_10G = *(uint8_t *)pr_val;
2243 }
2244 if (qede->qede_state == QEDE_STATE_STARTED) {
2245 qede_configure_link(qede,1);
2246 } else {
2247 mutex_exit(&qede->gld_lock);
2248 return (0);
2249 }
2250 break;
2251 default:
2252 ret_val = ENOTSUP;
2253 break;
2254 }
2255 mutex_exit(&qede->gld_lock);
2256 return (ret_val);
2257 }
2258
2259 static void
2260 qede_mac_stop(void *arg)
2261 {
2262 qede_t *qede = (qede_t *)arg;
2263 int status;
2264
2265 qede_print("!%s(%d): called",
2266 __func__,qede->instance);
2267 mutex_enter(&qede->drv_lock);
2268 status = qede_stop(qede);
2269 if (status != DDI_SUCCESS) {
2270 qede_print("!%s(%d): qede_stop "
2271 "FAILED",
2272 __func__,qede->instance);
2273 }
2274
2275 mac_link_update(qede->mac_handle, LINK_STATE_UNKNOWN);
2276 mutex_exit(&qede->drv_lock);
2277 }
2278
2279 static int
2280 qede_mac_start(void *arg)
2281 {
2282 qede_t *qede = (qede_t *)arg;
2283 int status;
2284
2285 qede_print("!%s(%d): called", __func__,qede->instance);
2286 if (!mutex_tryenter(&qede->drv_lock)) {
2287 return (EAGAIN);
2288 }
2289
2290 if (qede->qede_state == QEDE_STATE_SUSPENDED) {
2291 mutex_exit(&qede->drv_lock);
2292 return (ECANCELED);
2293 }
2294
2295 status = qede_start(qede);
2296 if (status != DDI_SUCCESS) {
2297 mutex_exit(&qede->drv_lock);
2298 return (EIO);
2299 }
2300
2301 mutex_exit(&qede->drv_lock);
2302
2303 #ifdef DBLK_DMA_PREMAP
2304 qede->pm_handle = mac_pmh_tx_get(qede->mac_handle);
2305 #endif
2306 return (0);
2307 }
2308
2309 static int
2310 qede_mac_get_property(void *arg,
2311 const char *pr_name,
2312 mac_prop_id_t pr_num,
2313 uint_t pr_valsize,
2314 void *pr_val)
2315 {
2316 qede_t *qede = (qede_t *)arg;
2317 struct ecore_dev *edev = &qede->edev;
2318 link_state_t link_state;
2319 link_duplex_t link_duplex;
2320 uint64_t link_speed;
2321 link_flowctrl_t link_flowctrl;
2322 struct qede_link_cfg link_cfg;
2323 qede_link_cfg_t *hw_cfg = &qede->hwinit;
2324 int ret_val = 0;
2325
2326 memset(&link_cfg, 0, sizeof (struct qede_link_cfg));
2327 qede_get_link_info(&edev->hwfns[0], &link_cfg);
2328
2329
2330
2331 switch (pr_num)
2332 {
2333 case MAC_PROP_MTU:
2334
2335 ASSERT(pr_valsize >= sizeof(uint32_t));
2336 bcopy(&qede->mtu, pr_val, sizeof(uint32_t));
2337 break;
2338
2339 case MAC_PROP_DUPLEX:
2340
2341 ASSERT(pr_valsize >= sizeof(link_duplex_t));
2342 link_duplex = (qede->props.link_duplex) ?
2343 LINK_DUPLEX_FULL : LINK_DUPLEX_HALF;
2344 bcopy(&link_duplex, pr_val, sizeof(link_duplex_t));
2345 break;
2346
2347 case MAC_PROP_SPEED:
2348
2349 ASSERT(pr_valsize >= sizeof(link_speed));
2350
2351 link_speed = (qede->props.link_speed * 1000000ULL);
2352 bcopy(&link_speed, pr_val, sizeof(link_speed));
2353 break;
2354
2355 case MAC_PROP_STATUS:
2356
2357 ASSERT(pr_valsize >= sizeof(link_state_t));
2358
2359 link_state = (qede->params.link_state) ?
2360 LINK_STATE_UP : LINK_STATE_DOWN;
2361 bcopy(&link_state, pr_val, sizeof(link_state_t));
2362 qede_info(qede, "mac_prop_status %d\n", link_state);
2363 break;
2364
2365 case MAC_PROP_AUTONEG:
2366
2367 *(uint8_t *)pr_val = link_cfg.autoneg;
2368 break;
2369
2370 case MAC_PROP_FLOWCTRL:
2371
2372 ASSERT(pr_valsize >= sizeof(link_flowctrl_t));
2373
2374 /*
2375 * illumos does not have the notion of LINK_FLOWCTRL_AUTO at this time.
2376 */
2377 #ifndef ILLUMOS
2378 if (link_cfg.pause_cfg & QEDE_LINK_PAUSE_AUTONEG_ENABLE) {
2379 link_flowctrl = LINK_FLOWCTRL_AUTO;
2380 }
2381 #endif
2382
2383 if (!(link_cfg.pause_cfg & QEDE_LINK_PAUSE_RX_ENABLE) &&
2384 !(link_cfg.pause_cfg & QEDE_LINK_PAUSE_TX_ENABLE)) {
2385 link_flowctrl = LINK_FLOWCTRL_NONE;
2386 }
2387 if ((link_cfg.pause_cfg & QEDE_LINK_PAUSE_RX_ENABLE) &&
2388 !(link_cfg.pause_cfg & QEDE_LINK_PAUSE_TX_ENABLE)) {
2389 link_flowctrl = LINK_FLOWCTRL_RX;
2390 }
2391 if (!(link_cfg.pause_cfg & QEDE_LINK_PAUSE_RX_ENABLE) &&
2392 (link_cfg.pause_cfg & QEDE_LINK_PAUSE_TX_ENABLE)) {
2393 link_flowctrl = LINK_FLOWCTRL_TX;
2394 }
2395 if ((link_cfg.pause_cfg & QEDE_LINK_PAUSE_RX_ENABLE) &&
2396 (link_cfg.pause_cfg & QEDE_LINK_PAUSE_TX_ENABLE)) {
2397 link_flowctrl = LINK_FLOWCTRL_BI;
2398 }
2399
2400 bcopy(&link_flowctrl, pr_val, sizeof (link_flowctrl_t));
2401 break;
2402
2403 case MAC_PROP_ADV_10GFDX_CAP:
2404 *(uint8_t *)pr_val = link_cfg.adv_capab.param_10000fdx;
2405 break;
2406
2407 case MAC_PROP_EN_10GFDX_CAP:
2408 *(uint8_t *)pr_val = qede->forced_speed_10G;
2409 break;
2410
2411 case MAC_PROP_PRIVATE:
2412 default:
2413 return (ENOTSUP);
2414
2415 }
2416
2417 return (0);
2418 }
2419
2420 static void
2421 qede_mac_property_info(void *arg,
2422 const char *pr_name,
2423 mac_prop_id_t pr_num,
2424 mac_prop_info_handle_t prh)
2425 {
2426 qede_t *qede = (qede_t *)arg;
2427 qede_link_props_t *def_cfg = &qede_def_link_props;
2428 link_flowctrl_t link_flowctrl;
2429
2430
2431 switch (pr_num)
2432 {
2433
2434 case MAC_PROP_STATUS:
2435 case MAC_PROP_SPEED:
2436 case MAC_PROP_DUPLEX:
2437 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
2438 break;
2439
2440 case MAC_PROP_MTU:
2441
2442 mac_prop_info_set_range_uint32(prh,
2443 MIN_MTU,
2444 MAX_MTU);
2445 break;
2446
2447 case MAC_PROP_AUTONEG:
2448
2449 mac_prop_info_set_default_uint8(prh, def_cfg->autoneg);
2450 break;
2451
2452 case MAC_PROP_FLOWCTRL:
2453
2454 if (!def_cfg->pause) {
2455 link_flowctrl = LINK_FLOWCTRL_NONE;
2456 } else {
2457 link_flowctrl = LINK_FLOWCTRL_BI;
2458 }
2459
2460 mac_prop_info_set_default_link_flowctrl(prh, link_flowctrl);
2461 break;
2462
2463 case MAC_PROP_EN_10GFDX_CAP:
2464 mac_prop_info_set_perm(prh, MAC_PROP_PERM_RW);
2465 break;
2466
2467 case MAC_PROP_ADV_10GFDX_CAP:
2468 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
2469 break;
2470
2471 default:
2472 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
2473 break;
2474
2475 }
2476 }
2477
2478 static mac_callbacks_t qede_callbacks =
2479 {
2480 (
2481 MC_IOCTL
2482 /* | MC_RESOURCES */
2483 | MC_SETPROP
2484 | MC_GETPROP
2485 | MC_PROPINFO
2486 | MC_GETCAPAB
2487 ),
2488 qede_mac_stats,
2489 qede_mac_start,
2490 qede_mac_stop,
2491 qede_mac_promiscuous,
2492 qede_mac_multicast,
2493 NULL,
2494 #ifndef NO_CROSSBOW
2495 NULL,
2496 #else
2497 qede_mac_tx,
2498 #endif
2499 NULL, /* qede_mac_resources, */
2500 qede_mac_ioctl,
2501 qede_mac_get_capability,
2502 NULL,
2503 NULL,
2504 qede_mac_set_property,
2505 qede_mac_get_property,
2506 #ifdef MC_PROPINFO
2507 qede_mac_property_info
2508 #endif
2509 };
2510
2511 boolean_t
2512 qede_gld_init(qede_t *qede)
2513 {
2514 int status, ret;
2515 mac_register_t *macp;
2516
2517 macp = mac_alloc(MAC_VERSION);
2518 if (macp == NULL) {
2519 cmn_err(CE_NOTE, "%s: mac_alloc() failed\n", __func__);
2520 return (B_FALSE);
2521 }
2522
2523 macp->m_driver = qede;
2524 macp->m_dip = qede->dip;
2525 macp->m_instance = qede->instance;
2526 macp->m_priv_props = NULL;
2527 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
2528 macp->m_src_addr = qede->ether_addr;
2529 macp->m_callbacks = &qede_callbacks;
2530 macp->m_min_sdu = 0;
2531 macp->m_max_sdu = qede->mtu;
2532 macp->m_margin = VLAN_TAGSZ;
2533 #ifdef ILLUMOS
2534 macp->m_v12n = MAC_VIRT_LEVEL1;
2535 #endif
2536
2537 status = mac_register(macp, &qede->mac_handle);
2538 if (status != 0) {
2539 cmn_err(CE_NOTE, "%s: mac_register() failed\n", __func__);
2540 }
2541
2542 mac_free(macp);
2543 if (status == 0) {
2544 return (B_TRUE);
2545 }
2546 return (B_FALSE);
2547 }
2548
2549 boolean_t qede_gld_fini(qede_t * qede)
2550 {
2551 return (B_TRUE);
2552 }
2553
2554
2555 void qede_link_update(qede_t * qede,
2556 link_state_t state)
2557 {
2558 mac_link_update(qede->mac_handle, state);
2559 }
2560