2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /* Copyright © 2003-2011 Emulex. All rights reserved. */
23
24 /*
25 * Source file containing the implementation of the driver entry points
26 * and related helper functions
27 */
28
29 #include <oce_impl.h>
30 #include <oce_ioctl.h>
31
32 /* array of properties supported by this driver */
33 char *oce_priv_props[] = {
34 "_tx_ring_size",
35 "_tx_bcopy_limit",
36 "_rx_ring_size",
37 "_rx_bcopy_limit",
38 NULL
39 };
40
41 extern int pow10[];
42
43 /* ---[ static function declarations ]----------------------------------- */
44 static int oce_set_priv_prop(struct oce_dev *dev, const char *name,
45 uint_t size, const void *val);
46
47 static int oce_get_priv_prop(struct oce_dev *dev, const char *name,
48 uint_t size, void *val);
49
50 /* ---[ GLD entry points ]----------------------------------------------- */
51 int
52 oce_m_start(void *arg)
53 {
54 struct oce_dev *dev = arg;
55 int ret;
56
57 mutex_enter(&dev->dev_lock);
58
59 if (dev->state & STATE_MAC_STARTED) {
60 mutex_exit(&dev->dev_lock);
61 return (0);
62 }
63
64 if (dev->suspended) {
65 mutex_exit(&dev->dev_lock);
66 return (EIO);
67 }
68 ret = oce_start(dev);
69 if (ret != DDI_SUCCESS) {
70 mutex_exit(&dev->dev_lock);
71 return (EIO);
72 }
73
74 dev->state |= STATE_MAC_STARTED;
75 mutex_exit(&dev->dev_lock);
76
77
78 return (DDI_SUCCESS);
79 }
80
81 int
82 oce_start(struct oce_dev *dev)
83 {
84 int qidx = 0;
85 struct link_status link = {0};
86
87 /* get link status */
88 (void) oce_get_link_status(dev, &link);
89
90 dev->link_status = (link.logical_link_status == NTWK_LOGICAL_LINK_UP) ?
91 LINK_STATE_UP : LINK_STATE_DOWN;
92
93 dev->link_speed = link.qos_link_speed ? link.qos_link_speed * 10 :
94 pow10[link.mac_speed];
95
96 mac_link_update(dev->mac_handle, dev->link_status);
97
98 for (qidx = 0; qidx < dev->nwqs; qidx++) {
99 (void) oce_start_wq(dev->wq[qidx]);
100 }
101 for (qidx = 0; qidx < dev->nrqs; qidx++) {
102 (void) oce_start_rq(dev->rq[qidx]);
103 }
104 (void) oce_start_mq(dev->mq);
105 /* enable interrupts */
106 oce_ei(dev);
107 /* arm the eqs */
108 for (qidx = 0; qidx < dev->neqs; qidx++) {
109 oce_arm_eq(dev, dev->eq[qidx]->eq_id, 0, B_TRUE, B_FALSE);
110 }
111 /* TODO update state */
112 return (DDI_SUCCESS);
113 } /* oce_start */
114
115
116 void
117 oce_m_stop(void *arg)
118 {
119 struct oce_dev *dev = arg;
120
121 /* disable interrupts */
122
123 mutex_enter(&dev->dev_lock);
124 if (dev->suspended) {
125 mutex_exit(&dev->dev_lock);
126 return;
127 }
128 dev->state |= STATE_MAC_STOPPING;
129 oce_stop(dev);
130 dev->state &= ~(STATE_MAC_STOPPING | STATE_MAC_STARTED);
131 mutex_exit(&dev->dev_lock);
132 }
133 /* called with Tx/Rx comp locks held */
134 void
135 oce_stop(struct oce_dev *dev)
136 {
137 int qidx;
138 /* disable interrupts */
139 oce_di(dev);
140 for (qidx = 0; qidx < dev->nwqs; qidx++) {
141 mutex_enter(&dev->wq[qidx]->tx_lock);
142 }
143 mutex_enter(&dev->mq->lock);
144 /* complete the pending Tx */
145 for (qidx = 0; qidx < dev->nwqs; qidx++)
146 oce_clean_wq(dev->wq[qidx]);
147 /* Release all the locks */
148 mutex_exit(&dev->mq->lock);
149 for (qidx = 0; qidx < dev->nwqs; qidx++)
150 mutex_exit(&dev->wq[qidx]->tx_lock);
151 if (dev->link_status == LINK_STATE_UP) {
152 dev->link_status = LINK_STATE_UNKNOWN;
153 mac_link_update(dev->mac_handle, dev->link_status);
154 }
155
156 } /* oce_stop */
157
158 int
159 oce_m_multicast(void *arg, boolean_t add, const uint8_t *mca)
160 {
161 struct oce_dev *dev = (struct oce_dev *)arg;
162 struct ether_addr *mca_drv_list;
163 struct ether_addr mca_hw_list[OCE_MAX_MCA];
164 uint16_t new_mcnt = dev->num_mca;
165 int ret;
166 int i;
167
168 /* check the address */
169 if ((mca[0] & 0x1) == 0) {
170 return (EINVAL);
171 }
172 /* Allocate the local array for holding the addresses temporarily */
173 bzero(&mca_hw_list, sizeof (&mca_hw_list));
174 mca_drv_list = &dev->multi_cast[0];
175
176 DEV_LOCK(dev);
177 if (add) {
178 /* check if we exceeded hw max supported */
179 if (new_mcnt < OCE_MAX_MCA) {
180 /* copy entire dev mca to the mbx */
181 bcopy((void*)mca_drv_list,
182 (void*)mca_hw_list,
183 (dev->num_mca * sizeof (struct ether_addr)));
184 /* Append the new one to local list */
185 bcopy(mca, &mca_hw_list[dev->num_mca],
186 sizeof (struct ether_addr));
187 }
188 new_mcnt++;
189 } else {
190 struct ether_addr *hwlistp = &mca_hw_list[0];
191 for (i = 0; i < dev->num_mca; i++) {
192 /* copy only if it does not match */
193 if (bcmp((mca_drv_list + i), mca, ETHERADDRL)) {
194 bcopy(mca_drv_list + i, hwlistp,
195 ETHERADDRL);
196 hwlistp++;
197 } else {
198 new_mcnt--;
199 }
200 }
201 }
202
203 if (dev->suspended) {
204 goto finish;
205 }
206 if (new_mcnt > OCE_MAX_MCA) {
207 ret = oce_set_multicast_table(dev, dev->if_id, &mca_hw_list[0],
208 OCE_MAX_MCA, B_TRUE);
209 } else {
210 ret = oce_set_multicast_table(dev, dev->if_id,
211 &mca_hw_list[0], new_mcnt, B_FALSE);
212 }
213 if (ret != 0) {
214 oce_log(dev, CE_WARN, MOD_CONFIG,
215 "mcast %s fails", add ? "ADD" : "DEL");
216 DEV_UNLOCK(dev);
217 return (EIO);
218 }
219 /*
220 * Copy the local structure to dev structure
221 */
222 finish:
223 if (new_mcnt && new_mcnt <= OCE_MAX_MCA) {
224 bcopy(mca_hw_list, mca_drv_list,
225 new_mcnt * sizeof (struct ether_addr));
226
227 dev->num_mca = (uint16_t)new_mcnt;
228 }
229 DEV_UNLOCK(dev);
230 oce_log(dev, CE_NOTE, MOD_CONFIG,
231 "mcast %s, addr=%02x:%02x:%02x:%02x:%02x:%02x, num_mca=%d",
232 add ? "ADD" : "DEL",
233 mca[0], mca[1], mca[2], mca[3], mca[4], mca[5],
234 dev->num_mca);
235 return (0);
236 } /* oce_m_multicast */
237
238 int
239 oce_m_unicast(void *arg, const uint8_t *uca)
240 {
241 struct oce_dev *dev = arg;
242 int ret;
243
244 DEV_LOCK(dev);
245 if (dev->suspended) {
246 bcopy(uca, dev->unicast_addr, ETHERADDRL);
247 dev->num_smac = 0;
248 DEV_UNLOCK(dev);
249 return (DDI_SUCCESS);
250 }
251
252 /* Delete previous one and add new one */
253 ret = oce_del_mac(dev, dev->if_id, &dev->pmac_id);
254 if (ret != DDI_SUCCESS) {
255 DEV_UNLOCK(dev);
256 return (EIO);
257 }
258 dev->num_smac = 0;
259 bzero(dev->unicast_addr, ETHERADDRL);
260
261 /* Set the New MAC addr earlier is no longer valid */
262 ret = oce_add_mac(dev, dev->if_id, uca, &dev->pmac_id);
263 if (ret != DDI_SUCCESS) {
264 DEV_UNLOCK(dev);
265 return (EIO);
266 }
267 bcopy(uca, dev->unicast_addr, ETHERADDRL);
268 dev->num_smac = 1;
269 DEV_UNLOCK(dev);
270 return (ret);
271 } /* oce_m_unicast */
272
273 /*
274 * Hashing policy for load balancing over the set of TX rings
275 * available to the driver.
276 */
277 mblk_t *
278 oce_m_send(void *arg, mblk_t *mp)
279 {
280 struct oce_dev *dev = arg;
281 mblk_t *nxt_pkt;
282 mblk_t *rmp = NULL;
283 struct oce_wq *wq;
284
285 DEV_LOCK(dev);
286 if (dev->suspended || !(dev->state & STATE_MAC_STARTED)) {
287 DEV_UNLOCK(dev);
288 freemsg(mp);
289 return (NULL);
290 }
291 DEV_UNLOCK(dev);
292 /*
293 * Hash to pick a wq
294 */
295 wq = oce_get_wq(dev, mp);
296
297 while (mp != NULL) {
298 /* Save the Pointer since mp will be freed in case of copy */
299 nxt_pkt = mp->b_next;
300 mp->b_next = NULL;
301 /* Hardcode wq since we have only one */
302 rmp = oce_send_packet(wq, mp);
303 if (rmp != NULL) {
304 /* reschedule Tx */
305 wq->resched = B_TRUE;
306 oce_arm_cq(dev, wq->cq->cq_id, 0, B_TRUE);
307 /* restore the chain */
308 rmp->b_next = nxt_pkt;
309 break;
310 }
311 mp = nxt_pkt;
312 }
313 return (rmp);
314 } /* oce_send */
315
316 boolean_t
317 oce_m_getcap(void *arg, mac_capab_t cap, void *data)
318 {
319 struct oce_dev *dev = arg;
320 boolean_t ret = B_TRUE;
321 switch (cap) {
322
323 case MAC_CAPAB_HCKSUM: {
324 uint32_t *csum_flags = u32ptr(data);
325 *csum_flags = HCKSUM_ENABLE |
326 HCKSUM_INET_FULL_V4 |
327 HCKSUM_IPHDRCKSUM;
328 break;
329 }
330 case MAC_CAPAB_LSO: {
331 mac_capab_lso_t *mcap_lso = (mac_capab_lso_t *)data;
332 if (dev->lso_capable) {
333 mcap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
334 mcap_lso->lso_basic_tcp_ipv4.lso_max = OCE_LSO_MAX_SIZE;
335 } else {
336 ret = B_FALSE;
337 }
338 break;
339 }
340 default:
341 ret = B_FALSE;
342 break;
343 }
344 return (ret);
345 } /* oce_m_getcap */
346
347 int
348 oce_m_setprop(void *arg, const char *name, mac_prop_id_t id,
349 uint_t size, const void *val)
350 {
351 struct oce_dev *dev = arg;
352 int ret = 0;
353
354 DEV_LOCK(dev);
355 switch (id) {
356 case MAC_PROP_MTU: {
357 uint32_t mtu;
358
359 bcopy(val, &mtu, sizeof (uint32_t));
360
361 if (dev->mtu == mtu) {
362 ret = 0;
363 break;
364 }
365
366 if (mtu != OCE_MIN_MTU && mtu != OCE_MAX_MTU) {
367 ret = EINVAL;
368 break;
369 }
370
371 ret = mac_maxsdu_update(dev->mac_handle, mtu);
372 if (0 == ret) {
373 dev->mtu = mtu;
374 break;
375 }
376 break;
377 }
378
379 case MAC_PROP_FLOWCTRL: {
380 link_flowctrl_t flowctrl;
381 uint32_t fc = 0;
382
383 bcopy(val, &flowctrl, sizeof (link_flowctrl_t));
384
385 switch (flowctrl) {
386 case LINK_FLOWCTRL_NONE:
387 fc = 0;
388 break;
389
390 case LINK_FLOWCTRL_RX:
397
398 case LINK_FLOWCTRL_BI:
399 fc = OCE_FC_RX | OCE_FC_TX;
400 break;
401 default:
402 ret = EINVAL;
403 break;
404 } /* switch flowctrl */
405
406 if (ret)
407 break;
408
409 if (fc == dev->flow_control)
410 break;
411
412 if (dev->suspended) {
413 dev->flow_control = fc;
414 break;
415 }
416 /* call to set flow control */
417 ret = oce_set_flow_control(dev, fc);
418 /* store the new fc setting on success */
419 if (ret == 0) {
420 dev->flow_control = fc;
421 }
422 break;
423 }
424
425 case MAC_PROP_PRIVATE:
426 ret = oce_set_priv_prop(dev, name, size, val);
427 break;
428
429 default:
430 ret = ENOTSUP;
431 break;
432 } /* switch id */
433
434 DEV_UNLOCK(dev);
435 return (ret);
436 } /* oce_m_setprop */
437
443 uint32_t ret = 0;
444
445 switch (id) {
446 case MAC_PROP_ADV_10GFDX_CAP:
447 case MAC_PROP_EN_10GFDX_CAP:
448 *(uint8_t *)val = 0x01;
449 break;
450
451 case MAC_PROP_DUPLEX: {
452 uint32_t *mode = (uint32_t *)val;
453
454 ASSERT(size >= sizeof (link_duplex_t));
455 if (dev->state & STATE_MAC_STARTED)
456 *mode = LINK_DUPLEX_FULL;
457 else
458 *mode = LINK_DUPLEX_UNKNOWN;
459 break;
460 }
461
462 case MAC_PROP_SPEED: {
463 uint64_t *speed = (uint64_t *)val;
464 struct link_status link = {0};
465
466 ASSERT(size >= sizeof (uint64_t));
467 *speed = 0;
468
469 if (dev->state & STATE_MAC_STARTED) {
470 if (dev->link_speed < 0) {
471 (void) oce_get_link_status(dev, &link);
472 dev->link_speed = link.qos_link_speed ?
473 link.qos_link_speed * 10 :
474 pow10[link.mac_speed];
475 }
476
477 *speed = dev->link_speed * 1000000ull;
478 }
479 break;
480 }
481
482 case MAC_PROP_FLOWCTRL: {
483 link_flowctrl_t *fc = (link_flowctrl_t *)val;
484
485 ASSERT(size >= sizeof (link_flowctrl_t));
486 if (dev->flow_control & OCE_FC_TX &&
487 dev->flow_control & OCE_FC_RX)
488 *fc = LINK_FLOWCTRL_BI;
489 else if (dev->flow_control == OCE_FC_TX)
490 *fc = LINK_FLOWCTRL_TX;
491 else if (dev->flow_control == OCE_FC_RX)
492 *fc = LINK_FLOWCTRL_RX;
493 else if (dev->flow_control == 0)
494 *fc = LINK_FLOWCTRL_NONE;
495 else
496 ret = EINVAL;
497 break;
498 }
528 case MAC_PROP_ADV_10FDX_CAP:
529 case MAC_PROP_EN_10FDX_CAP:
530 case MAC_PROP_ADV_10HDX_CAP:
531 case MAC_PROP_EN_10HDX_CAP:
532 case MAC_PROP_ADV_100T4_CAP:
533 case MAC_PROP_EN_100T4_CAP:
534 case MAC_PROP_ADV_10GFDX_CAP:
535 case MAC_PROP_EN_10GFDX_CAP:
536 case MAC_PROP_SPEED:
537 case MAC_PROP_DUPLEX:
538 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
539 break;
540
541 case MAC_PROP_MTU:
542 mac_prop_info_set_range_uint32(prh, OCE_MIN_MTU, OCE_MAX_MTU);
543 break;
544
545 case MAC_PROP_PRIVATE: {
546 char valstr[64];
547 int value;
548
549 if (strcmp(name, "_tx_ring_size") == 0) {
550 value = OCE_DEFAULT_TX_RING_SIZE;
551 } else if (strcmp(name, "_rx_ring_size") == 0) {
552 value = OCE_DEFAULT_RX_RING_SIZE;
553 } else {
554 return;
555 }
556
557 (void) snprintf(valstr, sizeof (valstr), "%d", value);
558 mac_prop_info_set_default_str(prh, valstr);
559 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
560 break;
561 }
562 }
563 } /* oce_m_propinfo */
564
565 /*
566 * function to handle dlpi streams message from GLDv3 mac layer
567 */
568 void
569 oce_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
570 {
571 struct oce_dev *dev = arg;
572 struct iocblk *iocp;
573 int cmd;
574 uint32_t payload_length;
575 int ret;
576
577 iocp = (struct iocblk *)voidptr(mp->b_rptr);
578 iocp->ioc_error = 0;
579 cmd = iocp->ioc_cmd;
580
581 DEV_LOCK(dev);
582 if (dev->suspended) {
583 miocnak(wq, mp, 0, EINVAL);
584 DEV_UNLOCK(dev);
585 return;
586 }
587 DEV_UNLOCK(dev);
588
589 switch (cmd) {
590
591 case OCE_ISSUE_MBOX: {
592 ret = oce_issue_mbox(dev, wq, mp, &payload_length);
593 miocack(wq, mp, payload_length, ret);
594 break;
595 }
596 case OCE_QUERY_DRIVER_DATA: {
597 struct oce_driver_query *drv_query =
598 (struct oce_driver_query *)(void *)mp->b_cont->b_rptr;
599
600 /* if the driver version does not match bail */
601 if (drv_query->version != OCN_VERSION_SUPPORTED) {
602 oce_log(dev, CE_NOTE, MOD_CONFIG, "%s",
603 "One Connect version mismatch");
604 miocnak(wq, mp, 0, ENOTSUP);
605 break;
606 }
607
608 /* fill the return values */
609 bcopy(OCE_MOD_NAME, drv_query->driver_name,
610 (sizeof (OCE_MOD_NAME) > 32) ?
611 31 : sizeof (OCE_MOD_NAME));
612 drv_query->driver_name[31] = '\0';
642 int
643 oce_m_promiscuous(void *arg, boolean_t enable)
644 {
645 struct oce_dev *dev = arg;
646 int ret = 0;
647
648 DEV_LOCK(dev);
649
650 if (dev->promisc == enable) {
651 DEV_UNLOCK(dev);
652 return (ret);
653 }
654
655 if (dev->suspended) {
656 /* remember the setting */
657 dev->promisc = enable;
658 DEV_UNLOCK(dev);
659 return (ret);
660 }
661
662 ret = oce_set_promiscuous(dev, enable);
663 if (ret == DDI_SUCCESS)
664 dev->promisc = enable;
665 DEV_UNLOCK(dev);
666 return (ret);
667 } /* oce_m_promiscuous */
668
669 /*
670 * function to set a private property.
671 * Called from the set_prop GLD entry point
672 *
673 * dev - sofware handle to the device
674 * name - string containing the property name
675 * size - length of the string in name
676 * val - pointer to a location where the value to set is stored
677 *
678 * return EINVAL => invalid value in val 0 => success
679 */
680 static int
681 oce_set_priv_prop(struct oce_dev *dev, const char *name,
682 uint_t size, const void *val)
683 {
684 int ret = ENOTSUP;
685 long result;
686
687 _NOTE(ARGUNUSED(size));
688
689 if (NULL == val) {
690 ret = EINVAL;
691 return (ret);
692 }
693
694 if (strcmp(name, "_tx_bcopy_limit") == 0) {
695 (void) ddi_strtol(val, (char **)NULL, 0, &result);
696 if (result <= OCE_WQ_BUF_SIZE) {
697 if (result != dev->tx_bcopy_limit)
698 dev->tx_bcopy_limit = (uint32_t)result;
699 ret = 0;
700 } else {
701 ret = EINVAL;
702 }
703 }
704 if (strcmp(name, "_rx_bcopy_limit") == 0) {
705 (void) ddi_strtol(val, (char **)NULL, 0, &result);
706 if (result <= OCE_RQ_BUF_SIZE) {
707 if (result != dev->rx_bcopy_limit)
708 dev->rx_bcopy_limit = (uint32_t)result;
709 ret = 0;
710 } else {
711 ret = EINVAL;
712 }
713 }
714
715 return (ret);
716 } /* oce_set_priv_prop */
717
718 /*
719 * function to get the value of a private property. Called from get_prop
720 *
721 * dev - software handle to the device
722 * name - string containing the property name
723 * size - length of the string contained name
724 * val - [OUT] pointer to the location where the result is returned
725 *
726 * return EINVAL => invalid request 0 => success
727 */
728 static int
729 oce_get_priv_prop(struct oce_dev *dev, const char *name,
730 uint_t size, void *val)
731 {
732 int value;
733
734 if (strcmp(name, "_tx_ring_size") == 0) {
735 value = dev->tx_ring_size;
736 } else if (strcmp(name, "_tx_bcopy_limit") == 0) {
737 value = dev->tx_bcopy_limit;
738 } else if (strcmp(name, "_rx_ring_size") == 0) {
739 value = dev->rx_ring_size;
740 } else if (strcmp(name, "_rx_bcopy_limit") == 0) {
741 value = dev->rx_bcopy_limit;
742 } else {
743 return (ENOTSUP);
744 }
745
746 (void) snprintf(val, size, "%d", value);
747 return (0);
748 } /* oce_get_priv_prop */
|
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2009-2012 Emulex. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27
28
29 /*
30 * Source file containing the implementation of the driver entry points
31 * and related helper functions
32 */
33
34 #include <oce_impl.h>
35 #include <oce_ioctl.h>
36
37 /* ---[ static function declarations ]----------------------------------- */
38 static int oce_set_priv_prop(struct oce_dev *dev, const char *name,
39 uint_t size, const void *val);
40
41 static int oce_get_priv_prop(struct oce_dev *dev, const char *name,
42 uint_t size, void *val);
43
44 /* ---[ GLD entry points ]----------------------------------------------- */
45 int
46 oce_m_start(void *arg)
47 {
48 struct oce_dev *dev = arg;
49 int i;
50
51 mutex_enter(&dev->dev_lock);
52
53 if (dev->state & STATE_MAC_STARTED) {
54 mutex_exit(&dev->dev_lock);
55 return (0);
56 }
57
58 if (dev->suspended) {
59 mutex_exit(&dev->dev_lock);
60 return (EIO);
61 }
62
63 /* allocate Tx buffers */
64 if (oce_init_tx(dev) != DDI_SUCCESS) {
65 mutex_exit(&dev->dev_lock);
66 oce_log(dev, CE_WARN, MOD_CONFIG, "%s",
67 "Failed to init rings");
68 return (DDI_FAILURE);
69 }
70
71 if (oce_start(dev) != DDI_SUCCESS) {
72 oce_fini_tx(dev);
73 mutex_exit(&dev->dev_lock);
74 return (EIO);
75 }
76 dev->state |= STATE_MAC_STARTED;
77
78 /* initialise the group locks */
79 for (i = 0; i < dev->num_rx_groups; i++) {
80 mutex_init(&dev->rx_group[i].grp_lock, NULL, MUTEX_DRIVER,
81 DDI_INTR_PRI(dev->intr_pri));
82 }
83
84 mutex_exit(&dev->dev_lock);
85 oce_enable_wd_timer(dev);
86 return (DDI_SUCCESS);
87 }
88
89 void
90 oce_start_eqs(struct oce_dev *dev)
91 {
92 int qidx = 0;
93
94 for (qidx = 0; qidx < dev->neqs; qidx++) {
95 mutex_enter(&dev->eq[qidx].lock);
96 oce_arm_eq(dev, dev->eq[qidx].eq_id, 0, B_TRUE, B_FALSE);
97 dev->eq[qidx].qstate = QSTARTED;
98 mutex_exit(&dev->eq[qidx].lock);
99 }
100 }
101
102 void
103 oce_stop_eqs(struct oce_dev *dev)
104 {
105 int qidx = 0;
106
107 for (qidx = 0; qidx < dev->neqs; qidx++) {
108 mutex_enter(&dev->eq[qidx].lock);
109 oce_arm_eq(dev, dev->eq[qidx].eq_id, 0, B_FALSE, B_FALSE);
110 dev->eq[qidx].qstate = QSTOPPED;
111 mutex_exit(&dev->eq[qidx].lock);
112 }
113 }
114 int
115 oce_start(struct oce_dev *dev)
116 {
117 int qidx = 0;
118
119 /* disable the interrupts */
120 if (!LANCER_CHIP(dev))
121 oce_chip_di(dev);
122
123 /* set default flow control */
124 (void) oce_set_flow_control(dev, dev->flow_control, MBX_BOOTSTRAP);
125 (void) oce_set_promiscuous(dev, dev->promisc, MBX_BOOTSTRAP);
126
127 if (oce_ei(dev) != DDI_SUCCESS) {
128 return (DDI_FAILURE);
129 }
130
131 if (oce_create_queues(dev) != DDI_SUCCESS) {
132 goto cleanup_handler;
133 }
134
135 for (qidx = 0; qidx < dev->tx_rings; qidx++) {
136 mac_ring_intr_set(dev->default_tx_rings[qidx].tx->handle,
137 dev->htable[dev->default_tx_rings[qidx].tx->cq->eq->idx]);
138 (void) oce_start_wq(dev->default_tx_rings[qidx].tx);
139 }
140
141 if (oce_create_mcc_queue(dev) != DDI_SUCCESS) {
142 goto delete_queues;
143 }
144 (void) oce_start_mq(dev->mq);
145
146 dev->state |= STATE_INTR_ENABLED;
147
148 if (!LANCER_CHIP(dev))
149 oce_chip_ei(dev);
150
151 /* arm the eqs */
152 oce_start_eqs(dev);
153
154 /* get link status */
155 if (oce_get_link_status(dev, &dev->link_status, &dev->link_speed,
156 (uint8_t *)&dev->link_duplex, 1, MBX_ASYNC_MQ) != DDI_SUCCESS) {
157 (void) oce_get_link_status(dev, &dev->link_status,
158 &dev->link_speed, (uint8_t *)&dev->link_duplex,
159 0, MBX_ASYNC_MQ);
160 }
161 oce_log(dev, CE_NOTE, MOD_CONFIG, "link speed %d "
162 "link status %d", dev->link_speed, dev->link_status);
163
164 mac_link_update(dev->mac_handle, dev->link_status);
165 return (DDI_SUCCESS);
166
167 delete_queues:
168 oce_delete_queues(dev);
169 cleanup_handler:
170 (void) oce_di(dev);
171 return (DDI_FAILURE);
172 } /* oce_start */
173
174
175 void
176 oce_m_stop(void *arg)
177 {
178 struct oce_dev *dev = arg;
179 int i;
180
181 mutex_enter(&dev->dev_lock);
182 if (dev->suspended) {
183 mutex_exit(&dev->dev_lock);
184 return;
185 }
186
187 dev->state &= ~STATE_MAC_STARTED;
188 oce_stop(dev);
189
190 /* free Tx buffers */
191 oce_fini_tx(dev);
192
193 for (i = 0; i < dev->rx_rings; i++) {
194 while (dev->rq[i].pending > 0) {
195 oce_log(dev, CE_NOTE, MOD_CONFIG,
196 "%d pending buffers on rq %p\n",
197 dev->rq[i].pending, (void *)&dev->rq[i]);
198 drv_usecwait(10 * 1000);
199 }
200 }
201
202 /* destroy group locks */
203 for (i = 0; i < dev->num_rx_groups; i++) {
204 mutex_destroy(&dev->rx_group[i].grp_lock);
205 }
206
207 mutex_exit(&dev->dev_lock);
208 oce_disable_wd_timer(dev);
209 }
210
211
212 /* called with Tx/Rx comp locks held */
213 void
214 oce_stop(struct oce_dev *dev)
215 {
216 int qidx;
217
218 dev->state |= STATE_MAC_STOPPING;
219
220 /* disable interrupts */
221 (void) oce_di(dev);
222 oce_stop_eqs(dev);
223 dev->state &= (~STATE_INTR_ENABLED);
224
225 for (qidx = 0; qidx < dev->nwqs; qidx++) {
226 mac_ring_intr_set(dev->default_tx_rings[qidx].tx->handle, NULL);
227 mutex_enter(&dev->wq[qidx].tx_lock);
228 }
229 mutex_enter(&dev->mq->lock);
230
231 for (qidx = 0; qidx < dev->tx_rings; qidx++) {
232 /* stop and flush the Tx */
233 (void) oce_clean_wq(dev->default_tx_rings[qidx].tx);
234 }
235
236 /* Free the pending commands */
237 oce_clean_mq(dev->mq);
238
239 /* Release all the locks */
240 mutex_exit(&dev->mq->lock);
241 for (qidx = 0; qidx < dev->nwqs; qidx++)
242 mutex_exit(&dev->wq[qidx].tx_lock);
243
244 if (dev->link_status == LINK_STATE_UP) {
245 dev->link_status = LINK_STATE_UNKNOWN;
246 mac_link_update(dev->mac_handle, dev->link_status);
247 }
248
249 oce_delete_mcc_queue(dev);
250 oce_delete_queues(dev);
251
252 dev->state &= ~STATE_MAC_STOPPING;
253 } /* oce_stop */
254
255
256 int
257 oce_m_multicast(void *arg, boolean_t add, const uint8_t *mca)
258 {
259 struct oce_dev *dev = (struct oce_dev *)arg;
260 struct ether_addr *mca_drv_list;
261 struct ether_addr mca_hw_list[OCE_MAX_MCA];
262 uint16_t new_mcnt = dev->num_mca;
263 int ret;
264 int i;
265
266 /* Allocate the local array for holding the addresses temporarily */
267 bzero(&mca_hw_list, sizeof (&mca_hw_list));
268 mca_drv_list = &dev->multi_cast[0];
269
270 DEV_LOCK(dev);
271 if (add) {
272 /* check if we exceeded hw max supported */
273 if (new_mcnt < OCE_MAX_MCA) {
274 /* copy entire dev mca to the mbx */
275 bcopy((void*)mca_drv_list,
276 (void*)mca_hw_list,
277 (dev->num_mca * sizeof (struct ether_addr)));
278 /* Append the new one to local list */
279 bcopy(mca, &mca_hw_list[dev->num_mca],
280 sizeof (struct ether_addr));
281 }
282 new_mcnt++;
283 } else {
284 struct ether_addr *hwlistp = &mca_hw_list[0];
285 for (i = 0; i < dev->num_mca; i++) {
286 /* copy only if it does not match */
287 if (bcmp((mca_drv_list + i), mca, ETHERADDRL)) {
288 bcopy(mca_drv_list + i, hwlistp,
289 ETHERADDRL);
290 hwlistp++;
291 }
292 }
293 /* Decrement the count */
294 new_mcnt--;
295 }
296
297 if (dev->suspended) {
298 goto finish;
299 }
300 if (new_mcnt > OCE_MAX_MCA) {
301 ret = oce_set_multicast_table(dev, dev->if_id, &mca_hw_list[0],
302 OCE_MAX_MCA, B_TRUE, MBX_BOOTSTRAP);
303 } else {
304 ret = oce_set_multicast_table(dev, dev->if_id,
305 &mca_hw_list[0], new_mcnt, B_FALSE, MBX_BOOTSTRAP);
306 }
307 if (ret != 0) {
308 oce_log(dev, CE_WARN, MOD_CONFIG,
309 "mcast %s failed 0x%x", add ? "ADD" : "DEL", ret);
310 DEV_UNLOCK(dev);
311 return (EIO);
312 }
313 /*
314 * Copy the local structure to dev structure
315 */
316 finish:
317 if (new_mcnt && new_mcnt <= OCE_MAX_MCA) {
318 bcopy(mca_hw_list, mca_drv_list,
319 new_mcnt * sizeof (struct ether_addr));
320
321 dev->num_mca = (uint16_t)new_mcnt;
322 }
323 DEV_UNLOCK(dev);
324 oce_log(dev, CE_NOTE, MOD_CONFIG,
325 "mcast %s, addr=%02x:%02x:%02x:%02x:%02x:%02x, num_mca=%d",
326 add ? "ADD" : "DEL",
327 mca[0], mca[1], mca[2], mca[3], mca[4], mca[5],
328 dev->num_mca);
329 return (0);
330 } /* oce_m_multicast */
331
332
333 boolean_t
334 oce_m_getcap(void *arg, mac_capab_t cap, void *data)
335 {
336 struct oce_dev *dev = arg;
337 boolean_t ret = B_TRUE;
338 switch (cap) {
339
340 case MAC_CAPAB_HCKSUM: {
341 uint32_t *csum_flags = u32ptr(data);
342 *csum_flags = HCKSUM_ENABLE |
343 HCKSUM_INET_FULL_V4 |
344 HCKSUM_IPHDRCKSUM;
345 break;
346 }
347 case MAC_CAPAB_LSO: {
348 mac_capab_lso_t *mcap_lso = (mac_capab_lso_t *)data;
349 if (dev->lso_capable) {
350 mcap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
351 mcap_lso->lso_basic_tcp_ipv4.lso_max = OCE_LSO_MAX_SIZE;
352 } else {
353 ret = B_FALSE;
354 }
355 break;
356 }
357 case MAC_CAPAB_RINGS:
358
359 ret = oce_fill_rings_capab(dev, (mac_capab_rings_t *)data);
360 break;
361
362 default:
363 ret = B_FALSE;
364 break;
365 }
366 return (ret);
367 } /* oce_m_getcap */
368
369 int
370 oce_m_setprop(void *arg, const char *name, mac_prop_id_t id,
371 uint_t size, const void *val)
372 {
373 struct oce_dev *dev = arg;
374 int ret = 0;
375
376 DEV_LOCK(dev);
377 switch (id) {
378 case MAC_PROP_MTU: {
379 uint32_t mtu;
380
381 bcopy(val, &mtu, sizeof (uint32_t));
382
383 if (dev->mtu == mtu) {
384 ret = 0;
385 break;
386 }
387
388 if (mtu != OCE_MIN_MTU && mtu != OCE_MAX_MTU) {
389 ret = EINVAL;
390 break;
391 }
392
393 if (dev->state & STATE_MAC_STARTED) {
394 ret = EBUSY;
395 break;
396 }
397
398 ret = mac_maxsdu_update(dev->mac_handle, mtu);
399 if (0 == ret) {
400 dev->mtu = mtu;
401 break;
402 }
403 break;
404 }
405
406 case MAC_PROP_FLOWCTRL: {
407 link_flowctrl_t flowctrl;
408 uint32_t fc = 0;
409
410 bcopy(val, &flowctrl, sizeof (link_flowctrl_t));
411
412 switch (flowctrl) {
413 case LINK_FLOWCTRL_NONE:
414 fc = 0;
415 break;
416
417 case LINK_FLOWCTRL_RX:
424
425 case LINK_FLOWCTRL_BI:
426 fc = OCE_FC_RX | OCE_FC_TX;
427 break;
428 default:
429 ret = EINVAL;
430 break;
431 } /* switch flowctrl */
432
433 if (ret)
434 break;
435
436 if (fc == dev->flow_control)
437 break;
438
439 if (dev->suspended) {
440 dev->flow_control = fc;
441 break;
442 }
443 /* call to set flow control */
444 ret = oce_set_flow_control(dev, fc, MBX_ASYNC_MQ);
445 /* store the new fc setting on success */
446 if (ret == 0) {
447 dev->flow_control = fc;
448 }
449 break;
450 }
451
452 case MAC_PROP_PRIVATE:
453 ret = oce_set_priv_prop(dev, name, size, val);
454 break;
455
456 default:
457 ret = ENOTSUP;
458 break;
459 } /* switch id */
460
461 DEV_UNLOCK(dev);
462 return (ret);
463 } /* oce_m_setprop */
464
470 uint32_t ret = 0;
471
472 switch (id) {
473 case MAC_PROP_ADV_10GFDX_CAP:
474 case MAC_PROP_EN_10GFDX_CAP:
475 *(uint8_t *)val = 0x01;
476 break;
477
478 case MAC_PROP_DUPLEX: {
479 uint32_t *mode = (uint32_t *)val;
480
481 ASSERT(size >= sizeof (link_duplex_t));
482 if (dev->state & STATE_MAC_STARTED)
483 *mode = LINK_DUPLEX_FULL;
484 else
485 *mode = LINK_DUPLEX_UNKNOWN;
486 break;
487 }
488
489 case MAC_PROP_SPEED: {
490 uint64_t speed;
491 speed = dev->link_speed * 1000000ull;
492 bcopy(&speed, val, sizeof (speed));
493 break;
494 }
495
496 case MAC_PROP_FLOWCTRL: {
497 link_flowctrl_t *fc = (link_flowctrl_t *)val;
498
499 ASSERT(size >= sizeof (link_flowctrl_t));
500 if (dev->flow_control & OCE_FC_TX &&
501 dev->flow_control & OCE_FC_RX)
502 *fc = LINK_FLOWCTRL_BI;
503 else if (dev->flow_control == OCE_FC_TX)
504 *fc = LINK_FLOWCTRL_TX;
505 else if (dev->flow_control == OCE_FC_RX)
506 *fc = LINK_FLOWCTRL_RX;
507 else if (dev->flow_control == 0)
508 *fc = LINK_FLOWCTRL_NONE;
509 else
510 ret = EINVAL;
511 break;
512 }
542 case MAC_PROP_ADV_10FDX_CAP:
543 case MAC_PROP_EN_10FDX_CAP:
544 case MAC_PROP_ADV_10HDX_CAP:
545 case MAC_PROP_EN_10HDX_CAP:
546 case MAC_PROP_ADV_100T4_CAP:
547 case MAC_PROP_EN_100T4_CAP:
548 case MAC_PROP_ADV_10GFDX_CAP:
549 case MAC_PROP_EN_10GFDX_CAP:
550 case MAC_PROP_SPEED:
551 case MAC_PROP_DUPLEX:
552 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
553 break;
554
555 case MAC_PROP_MTU:
556 mac_prop_info_set_range_uint32(prh, OCE_MIN_MTU, OCE_MAX_MTU);
557 break;
558
559 case MAC_PROP_PRIVATE: {
560 char valstr[64];
561 int value;
562 uint_t perm = MAC_PROP_PERM_READ;
563
564 bzero(valstr, sizeof (valstr));
565 if (strcmp(name, "_tx_rings") == 0) {
566 value = OCE_DEFAULT_WQS;
567 } else if (strcmp(name, "_tx_ring_size") == 0) {
568 value = OCE_DEFAULT_TX_RING_SIZE;
569 perm = MAC_PROP_PERM_RW;
570 } else if (strcmp(name, "_tx_bcopy_limit") == 0) {
571 value = OCE_DEFAULT_TX_BCOPY_LIMIT;
572 perm = MAC_PROP_PERM_RW;
573 } else if (strcmp(name, "_tx_reclaim_threshold") == 0) {
574 value = OCE_DEFAULT_TX_RECLAIM_THRESHOLD;
575 perm = MAC_PROP_PERM_RW;
576 } else if (strcmp(name, "_rx_rings") == 0) {
577 value = OCE_DEFAULT_RQS;
578 } else if (strcmp(name, "_rx_rings_per_group") == 0) {
579 value = OCE_DEF_RING_PER_GROUP;
580 } else if (strcmp(name, "_rx_ring_size") == 0) {
581 value = OCE_DEFAULT_RX_RING_SIZE;
582 } else if (strcmp(name, "_rx_bcopy_limit") == 0) {
583 value = OCE_DEFAULT_RX_BCOPY_LIMIT;
584 perm = MAC_PROP_PERM_RW;
585 } else if (strcmp(name, "_rx_pkts_per_intr") == 0) {
586 value = OCE_DEFAULT_RX_PKTS_PER_INTR;
587 perm = MAC_PROP_PERM_RW;
588 } else if (strcmp(name, "_log_level") == 0) {
589 value = OCE_DEFAULT_LOG_SETTINGS;
590 perm = MAC_PROP_PERM_RW;
591 } else
592 return;
593
594 (void) snprintf(valstr, sizeof (valstr), "%d", value);
595 mac_prop_info_set_default_str(prh, valstr);
596 mac_prop_info_set_perm(prh, perm);
597 break;
598 }
599 }
600 } /* oce_m_propinfo */
601
602 /*
603 * function to handle dlpi streams message from GLDv3 mac layer
604 */
605 void
606 oce_m_ioctl(void *arg, queue_t *wq, mblk_t *mp)
607 {
608 struct oce_dev *dev = arg;
609 struct iocblk *iocp;
610 int cmd;
611 uint32_t payload_length;
612 int ret;
613
614 iocp = (struct iocblk *)voidptr(mp->b_rptr);
615 iocp->ioc_error = 0;
616 cmd = iocp->ioc_cmd;
617
618 DEV_LOCK(dev);
619 if (dev->suspended) {
620 miocnak(wq, mp, 0, EINVAL);
621 DEV_UNLOCK(dev);
622 return;
623 }
624 DEV_UNLOCK(dev);
625
626 switch (cmd) {
627
628 case OCE_ISSUE_MBOX: {
629 ret = oce_issue_mbox_passthru(dev, wq, mp, &payload_length);
630 miocack(wq, mp, payload_length, ret);
631 break;
632 }
633 case OCE_QUERY_DRIVER_DATA: {
634 struct oce_driver_query *drv_query =
635 (struct oce_driver_query *)(void *)mp->b_cont->b_rptr;
636
637 /* if the driver version does not match bail */
638 if (drv_query->version != OCN_VERSION_SUPPORTED) {
639 oce_log(dev, CE_NOTE, MOD_CONFIG, "%s",
640 "One Connect version mismatch");
641 miocnak(wq, mp, 0, ENOTSUP);
642 break;
643 }
644
645 /* fill the return values */
646 bcopy(OCE_MOD_NAME, drv_query->driver_name,
647 (sizeof (OCE_MOD_NAME) > 32) ?
648 31 : sizeof (OCE_MOD_NAME));
649 drv_query->driver_name[31] = '\0';
679 int
680 oce_m_promiscuous(void *arg, boolean_t enable)
681 {
682 struct oce_dev *dev = arg;
683 int ret = 0;
684
685 DEV_LOCK(dev);
686
687 if (dev->promisc == enable) {
688 DEV_UNLOCK(dev);
689 return (ret);
690 }
691
692 if (dev->suspended) {
693 /* remember the setting */
694 dev->promisc = enable;
695 DEV_UNLOCK(dev);
696 return (ret);
697 }
698
699 ret = oce_set_promiscuous(dev, enable, MBX_ASYNC_MQ);
700 if (ret == DDI_SUCCESS) {
701 dev->promisc = enable;
702 if (!(enable)) {
703 struct ether_addr *mca_drv_list;
704 mca_drv_list = &dev->multi_cast[0];
705 if (dev->num_mca > OCE_MAX_MCA) {
706 ret = oce_set_multicast_table(dev, dev->if_id,
707 &mca_drv_list[0], OCE_MAX_MCA, B_TRUE,
708 MBX_ASYNC_MQ);
709 } else {
710 ret = oce_set_multicast_table(dev, dev->if_id,
711 &mca_drv_list[0], dev->num_mca, B_FALSE,
712 MBX_ASYNC_MQ);
713 }
714 }
715 }
716 DEV_UNLOCK(dev);
717 return (ret);
718 } /* oce_m_promiscuous */
719
720 /*
721 * function to set a private property.
722 * Called from the set_prop GLD entry point
723 *
724 * dev - sofware handle to the device
725 * name - string containing the property name
726 * size - length of the string in name
727 * val - pointer to a location where the value to set is stored
728 *
729 * return EINVAL => invalid value in val 0 => success
730 */
731 static int
732 oce_set_priv_prop(struct oce_dev *dev, const char *name,
733 uint_t size, const void *val)
734 {
735 int ret = EINVAL;
736 long result;
737
738 _NOTE(ARGUNUSED(size));
739
740 if (NULL == val) {
741 return (EINVAL);
742 }
743 (void) ddi_strtol(val, (char **)NULL, 0, &result);
744 if (strcmp(name, "_tx_ring_size") == 0) {
745 if (result <= SIZE_2K) {
746 if (dev->tx_ring_size != result) {
747 dev->tx_ring_size = (uint32_t)result;
748 }
749 ret = 0;
750 }
751 } else if (strcmp(name, "_tx_bcopy_limit") == 0) {
752 if (result <= SIZE_2K) {
753 if (result != dev->tx_bcopy_limit)
754 dev->tx_bcopy_limit = (uint32_t)result;
755 ret = 0;
756 }
757 } else if (strcmp(name, "_tx_reclaim_threshold") == 0) {
758 if (result <= dev->tx_ring_size) {
759 if (dev->tx_reclaim_threshold != result) {
760 dev->tx_reclaim_threshold = (uint32_t)result;
761 }
762 ret = 0;
763 }
764 } else if (strcmp(name, "_rx_bcopy_limit") == 0) {
765 if (result <= dev->mtu) {
766 if (dev->rx_bcopy_limit != result) {
767 dev->rx_bcopy_limit = (uint32_t)result;
768 }
769 ret = 0;
770 }
771 } else if (strcmp(name, "_rx_pkts_per_intr") == 0) {
772 if (result <= dev->rx_ring_size) {
773 if (dev->rx_pkt_per_intr != result) {
774 dev->rx_pkt_per_intr = (uint32_t)result;
775 }
776 ret = 0;
777 }
778 } else if (strcmp(name, "_log_level") == 0) {
779 if (result <= OCE_MAX_LOG_SETTINGS) {
780 /* derive from the loglevel */
781 dev->severity = (uint16_t)(result & 0xffff);
782 dev->mod_mask = (uint16_t)(result >> 16);
783 }
784 ret = 0;
785 }
786
787 return (ret);
788 } /* oce_set_priv_prop */
789
790 /*
791 * function to get the value of a private property. Called from get_prop
792 *
793 * dev - software handle to the device
794 * name - string containing the property name
795 * size - length of the string contained name
796 * val - [OUT] pointer to the location where the result is returned
797 *
798 * return EINVAL => invalid request 0 => success
799 */
800 static int
801 oce_get_priv_prop(struct oce_dev *dev, const char *name,
802 uint_t size, void *val)
803 {
804 int value;
805
806 if (strcmp(name, "_tx_rings") == 0) {
807 value = dev->tx_rings;
808 } else if (strcmp(name, "_tx_ring_size") == 0) {
809 value = dev->tx_ring_size;
810 } else if (strcmp(name, "_tx_bcopy_limit") == 0) {
811 value = dev->tx_bcopy_limit;
812 } else if (strcmp(name, "_tx_reclaim_threshold") == 0) {
813 value = dev->tx_reclaim_threshold;
814 } else if (strcmp(name, "_rx_rings") == 0) {
815 value = dev->rx_rings;
816 } else if (strcmp(name, "_rx_rings_per_group") == 0) {
817 value = dev->rx_rings_per_group;
818 } else if (strcmp(name, "_rx_ring_size") == 0) {
819 value = dev->rx_ring_size;
820 } else if (strcmp(name, "_rx_bcopy_limit") == 0) {
821 value = dev->rx_bcopy_limit;
822 } else if (strcmp(name, "_rx_pkts_per_intr") == 0) {
823 value = dev->rx_pkt_per_intr;
824 } else if (strcmp(name, "_log_level") == 0) {
825 value = (dev->mod_mask << 16UL) | dev->severity;
826 } else {
827 return (ENOTSUP);
828 }
829
830 (void) snprintf(val, size, "%d", value);
831 return (0);
832 } /* oce_get_priv_prop */
|