Print this page
3534 Disable EEE support in igb for I350
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/igb/igb_gld.c
+++ new/usr/src/uts/common/io/igb/igb_gld.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
|
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
24 24 */
25 25
26 26 /*
27 27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 + * Copyright 2013, Nexenta Systems, Inc. All rights reserved.
28 29 */
29 30
30 31 #include "igb_sw.h"
31 32
32 33 int
33 34 igb_m_stat(void *arg, uint_t stat, uint64_t *val)
34 35 {
35 36 igb_t *igb = (igb_t *)arg;
36 37 struct e1000_hw *hw = &igb->hw;
37 38 igb_stat_t *igb_ks;
38 39 uint32_t low_val, high_val;
39 40
40 41 igb_ks = (igb_stat_t *)igb->igb_ks->ks_data;
41 42
42 43 mutex_enter(&igb->gen_lock);
43 44
44 45 if (igb->igb_state & IGB_SUSPENDED) {
45 46 mutex_exit(&igb->gen_lock);
46 47 return (ECANCELED);
47 48 }
48 49
49 50 switch (stat) {
50 51 case MAC_STAT_IFSPEED:
51 52 *val = igb->link_speed * 1000000ull;
52 53 break;
53 54
54 55 case MAC_STAT_MULTIRCV:
55 56 igb_ks->mprc.value.ui64 +=
56 57 E1000_READ_REG(hw, E1000_MPRC);
57 58 *val = igb_ks->mprc.value.ui64;
58 59 break;
59 60
60 61 case MAC_STAT_BRDCSTRCV:
61 62 igb_ks->bprc.value.ui64 +=
62 63 E1000_READ_REG(hw, E1000_BPRC);
63 64 *val = igb_ks->bprc.value.ui64;
64 65 break;
65 66
66 67 case MAC_STAT_MULTIXMT:
67 68 igb_ks->mptc.value.ui64 +=
68 69 E1000_READ_REG(hw, E1000_MPTC);
69 70 *val = igb_ks->mptc.value.ui64;
70 71 break;
71 72
72 73 case MAC_STAT_BRDCSTXMT:
73 74 igb_ks->bptc.value.ui64 +=
74 75 E1000_READ_REG(hw, E1000_BPTC);
75 76 *val = igb_ks->bptc.value.ui64;
76 77 break;
77 78
78 79 case MAC_STAT_NORCVBUF:
79 80 igb_ks->rnbc.value.ui64 +=
80 81 E1000_READ_REG(hw, E1000_RNBC);
81 82 *val = igb_ks->rnbc.value.ui64;
82 83 break;
83 84
84 85 case MAC_STAT_IERRORS:
85 86 igb_ks->rxerrc.value.ui64 +=
86 87 E1000_READ_REG(hw, E1000_RXERRC);
87 88 igb_ks->algnerrc.value.ui64 +=
88 89 E1000_READ_REG(hw, E1000_ALGNERRC);
89 90 igb_ks->rlec.value.ui64 +=
90 91 E1000_READ_REG(hw, E1000_RLEC);
91 92 igb_ks->crcerrs.value.ui64 +=
92 93 E1000_READ_REG(hw, E1000_CRCERRS);
93 94 igb_ks->cexterr.value.ui64 +=
94 95 E1000_READ_REG(hw, E1000_CEXTERR);
95 96 *val = igb_ks->rxerrc.value.ui64 +
96 97 igb_ks->algnerrc.value.ui64 +
97 98 igb_ks->rlec.value.ui64 +
98 99 igb_ks->crcerrs.value.ui64 +
99 100 igb_ks->cexterr.value.ui64;
100 101 break;
101 102
102 103 case MAC_STAT_NOXMTBUF:
103 104 *val = 0;
104 105 break;
105 106
106 107 case MAC_STAT_OERRORS:
107 108 igb_ks->ecol.value.ui64 +=
108 109 E1000_READ_REG(hw, E1000_ECOL);
109 110 *val = igb_ks->ecol.value.ui64;
110 111 break;
111 112
112 113 case MAC_STAT_COLLISIONS:
113 114 igb_ks->colc.value.ui64 +=
114 115 E1000_READ_REG(hw, E1000_COLC);
115 116 *val = igb_ks->colc.value.ui64;
116 117 break;
117 118
118 119 case MAC_STAT_RBYTES:
119 120 /*
120 121 * The 64-bit register will reset whenever the upper
121 122 * 32 bits are read. So we need to read the lower
122 123 * 32 bits first, then read the upper 32 bits.
123 124 */
124 125 low_val = E1000_READ_REG(hw, E1000_TORL);
125 126 high_val = E1000_READ_REG(hw, E1000_TORH);
126 127 igb_ks->tor.value.ui64 +=
127 128 (uint64_t)high_val << 32 | (uint64_t)low_val;
128 129 *val = igb_ks->tor.value.ui64;
129 130 break;
130 131
131 132 case MAC_STAT_IPACKETS:
132 133 igb_ks->tpr.value.ui64 +=
133 134 E1000_READ_REG(hw, E1000_TPR);
134 135 *val = igb_ks->tpr.value.ui64;
135 136 break;
136 137
137 138 case MAC_STAT_OBYTES:
138 139 /*
139 140 * The 64-bit register will reset whenever the upper
140 141 * 32 bits are read. So we need to read the lower
141 142 * 32 bits first, then read the upper 32 bits.
142 143 */
143 144 low_val = E1000_READ_REG(hw, E1000_TOTL);
144 145 high_val = E1000_READ_REG(hw, E1000_TOTH);
145 146 igb_ks->tot.value.ui64 +=
146 147 (uint64_t)high_val << 32 | (uint64_t)low_val;
147 148 *val = igb_ks->tot.value.ui64;
148 149 break;
149 150
150 151 case MAC_STAT_OPACKETS:
151 152 igb_ks->tpt.value.ui64 +=
152 153 E1000_READ_REG(hw, E1000_TPT);
153 154 *val = igb_ks->tpt.value.ui64;
154 155 break;
155 156
156 157 /* RFC 1643 stats */
157 158 case ETHER_STAT_ALIGN_ERRORS:
158 159 igb_ks->algnerrc.value.ui64 +=
159 160 E1000_READ_REG(hw, E1000_ALGNERRC);
160 161 *val = igb_ks->algnerrc.value.ui64;
161 162 break;
162 163
163 164 case ETHER_STAT_FCS_ERRORS:
164 165 igb_ks->crcerrs.value.ui64 +=
165 166 E1000_READ_REG(hw, E1000_CRCERRS);
166 167 *val = igb_ks->crcerrs.value.ui64;
167 168 break;
168 169
169 170 case ETHER_STAT_FIRST_COLLISIONS:
170 171 igb_ks->scc.value.ui64 +=
171 172 E1000_READ_REG(hw, E1000_SCC);
172 173 *val = igb_ks->scc.value.ui64;
173 174 break;
174 175
175 176 case ETHER_STAT_MULTI_COLLISIONS:
176 177 igb_ks->mcc.value.ui64 +=
177 178 E1000_READ_REG(hw, E1000_MCC);
178 179 *val = igb_ks->mcc.value.ui64;
179 180 break;
180 181
181 182 case ETHER_STAT_SQE_ERRORS:
182 183 igb_ks->sec.value.ui64 +=
183 184 E1000_READ_REG(hw, E1000_SEC);
184 185 *val = igb_ks->sec.value.ui64;
185 186 break;
186 187
187 188 case ETHER_STAT_DEFER_XMTS:
188 189 igb_ks->dc.value.ui64 +=
189 190 E1000_READ_REG(hw, E1000_DC);
190 191 *val = igb_ks->dc.value.ui64;
191 192 break;
192 193
193 194 case ETHER_STAT_TX_LATE_COLLISIONS:
194 195 igb_ks->latecol.value.ui64 +=
195 196 E1000_READ_REG(hw, E1000_LATECOL);
196 197 *val = igb_ks->latecol.value.ui64;
197 198 break;
198 199
199 200 case ETHER_STAT_EX_COLLISIONS:
200 201 igb_ks->ecol.value.ui64 +=
201 202 E1000_READ_REG(hw, E1000_ECOL);
202 203 *val = igb_ks->ecol.value.ui64;
203 204 break;
204 205
205 206 case ETHER_STAT_MACXMT_ERRORS:
206 207 igb_ks->ecol.value.ui64 +=
207 208 E1000_READ_REG(hw, E1000_ECOL);
208 209 *val = igb_ks->ecol.value.ui64;
209 210 break;
210 211
211 212 case ETHER_STAT_CARRIER_ERRORS:
212 213 igb_ks->cexterr.value.ui64 +=
213 214 E1000_READ_REG(hw, E1000_CEXTERR);
214 215 *val = igb_ks->cexterr.value.ui64;
215 216 break;
216 217
217 218 case ETHER_STAT_TOOLONG_ERRORS:
218 219 igb_ks->roc.value.ui64 +=
219 220 E1000_READ_REG(hw, E1000_ROC);
220 221 *val = igb_ks->roc.value.ui64;
221 222 break;
222 223
223 224 case ETHER_STAT_MACRCV_ERRORS:
224 225 igb_ks->rxerrc.value.ui64 +=
225 226 E1000_READ_REG(hw, E1000_RXERRC);
226 227 *val = igb_ks->rxerrc.value.ui64;
227 228 break;
228 229
229 230 /* MII/GMII stats */
230 231 case ETHER_STAT_XCVR_ADDR:
231 232 /* The Internal PHY's MDI address for each MAC is 1 */
232 233 *val = 1;
233 234 break;
234 235
235 236 case ETHER_STAT_XCVR_ID:
236 237 *val = hw->phy.id | hw->phy.revision;
237 238 break;
238 239
239 240 case ETHER_STAT_XCVR_INUSE:
240 241 switch (igb->link_speed) {
241 242 case SPEED_1000:
242 243 *val =
243 244 (hw->phy.media_type == e1000_media_type_copper) ?
244 245 XCVR_1000T : XCVR_1000X;
245 246 break;
246 247 case SPEED_100:
247 248 *val =
248 249 (hw->phy.media_type == e1000_media_type_copper) ?
249 250 (igb->param_100t4_cap == 1) ?
250 251 XCVR_100T4 : XCVR_100T2 : XCVR_100X;
251 252 break;
252 253 case SPEED_10:
253 254 *val = XCVR_10;
254 255 break;
255 256 default:
256 257 *val = XCVR_NONE;
257 258 break;
258 259 }
259 260 break;
260 261
261 262 case ETHER_STAT_CAP_1000FDX:
262 263 *val = igb->param_1000fdx_cap;
263 264 break;
264 265
265 266 case ETHER_STAT_CAP_1000HDX:
266 267 *val = igb->param_1000hdx_cap;
267 268 break;
268 269
269 270 case ETHER_STAT_CAP_100FDX:
270 271 *val = igb->param_100fdx_cap;
271 272 break;
272 273
273 274 case ETHER_STAT_CAP_100HDX:
274 275 *val = igb->param_100hdx_cap;
275 276 break;
276 277
277 278 case ETHER_STAT_CAP_10FDX:
278 279 *val = igb->param_10fdx_cap;
279 280 break;
280 281
281 282 case ETHER_STAT_CAP_10HDX:
282 283 *val = igb->param_10hdx_cap;
283 284 break;
284 285
285 286 case ETHER_STAT_CAP_ASMPAUSE:
286 287 *val = igb->param_asym_pause_cap;
287 288 break;
288 289
289 290 case ETHER_STAT_CAP_PAUSE:
290 291 *val = igb->param_pause_cap;
291 292 break;
292 293
293 294 case ETHER_STAT_CAP_AUTONEG:
294 295 *val = igb->param_autoneg_cap;
295 296 break;
296 297
297 298 case ETHER_STAT_ADV_CAP_1000FDX:
298 299 *val = igb->param_adv_1000fdx_cap;
299 300 break;
300 301
301 302 case ETHER_STAT_ADV_CAP_1000HDX:
302 303 *val = igb->param_adv_1000hdx_cap;
303 304 break;
304 305
305 306 case ETHER_STAT_ADV_CAP_100FDX:
306 307 *val = igb->param_adv_100fdx_cap;
307 308 break;
308 309
309 310 case ETHER_STAT_ADV_CAP_100HDX:
310 311 *val = igb->param_adv_100hdx_cap;
311 312 break;
312 313
313 314 case ETHER_STAT_ADV_CAP_10FDX:
314 315 *val = igb->param_adv_10fdx_cap;
315 316 break;
316 317
317 318 case ETHER_STAT_ADV_CAP_10HDX:
318 319 *val = igb->param_adv_10hdx_cap;
319 320 break;
320 321
321 322 case ETHER_STAT_ADV_CAP_ASMPAUSE:
322 323 *val = igb->param_adv_asym_pause_cap;
323 324 break;
324 325
325 326 case ETHER_STAT_ADV_CAP_PAUSE:
326 327 *val = igb->param_adv_pause_cap;
327 328 break;
328 329
329 330 case ETHER_STAT_ADV_CAP_AUTONEG:
330 331 *val = hw->mac.autoneg;
331 332 break;
332 333
333 334 case ETHER_STAT_LP_CAP_1000FDX:
334 335 *val = igb->param_lp_1000fdx_cap;
335 336 break;
336 337
337 338 case ETHER_STAT_LP_CAP_1000HDX:
338 339 *val = igb->param_lp_1000hdx_cap;
339 340 break;
340 341
341 342 case ETHER_STAT_LP_CAP_100FDX:
342 343 *val = igb->param_lp_100fdx_cap;
343 344 break;
344 345
345 346 case ETHER_STAT_LP_CAP_100HDX:
346 347 *val = igb->param_lp_100hdx_cap;
347 348 break;
348 349
349 350 case ETHER_STAT_LP_CAP_10FDX:
350 351 *val = igb->param_lp_10fdx_cap;
351 352 break;
352 353
353 354 case ETHER_STAT_LP_CAP_10HDX:
354 355 *val = igb->param_lp_10hdx_cap;
355 356 break;
356 357
357 358 case ETHER_STAT_LP_CAP_ASMPAUSE:
358 359 *val = igb->param_lp_asym_pause_cap;
359 360 break;
360 361
361 362 case ETHER_STAT_LP_CAP_PAUSE:
362 363 *val = igb->param_lp_pause_cap;
363 364 break;
364 365
365 366 case ETHER_STAT_LP_CAP_AUTONEG:
366 367 *val = igb->param_lp_autoneg_cap;
367 368 break;
368 369
369 370 case ETHER_STAT_LINK_ASMPAUSE:
370 371 *val = igb->param_asym_pause_cap;
371 372 break;
372 373
373 374 case ETHER_STAT_LINK_PAUSE:
374 375 *val = igb->param_pause_cap;
375 376 break;
376 377
377 378 case ETHER_STAT_LINK_AUTONEG:
378 379 *val = hw->mac.autoneg;
379 380 break;
380 381
381 382 case ETHER_STAT_LINK_DUPLEX:
382 383 *val = (igb->link_duplex == FULL_DUPLEX) ?
383 384 LINK_DUPLEX_FULL : LINK_DUPLEX_HALF;
384 385 break;
385 386
386 387 case ETHER_STAT_TOOSHORT_ERRORS:
387 388 igb_ks->ruc.value.ui64 +=
388 389 E1000_READ_REG(hw, E1000_RUC);
389 390 *val = igb_ks->ruc.value.ui64;
390 391 break;
391 392
392 393 case ETHER_STAT_CAP_REMFAULT:
393 394 *val = igb->param_rem_fault;
394 395 break;
395 396
396 397 case ETHER_STAT_ADV_REMFAULT:
397 398 *val = igb->param_adv_rem_fault;
398 399 break;
399 400
400 401 case ETHER_STAT_LP_REMFAULT:
401 402 *val = igb->param_lp_rem_fault;
402 403 break;
403 404
404 405 case ETHER_STAT_JABBER_ERRORS:
405 406 igb_ks->rjc.value.ui64 +=
406 407 E1000_READ_REG(hw, E1000_RJC);
407 408 *val = igb_ks->rjc.value.ui64;
408 409 break;
409 410
410 411 case ETHER_STAT_CAP_100T4:
411 412 *val = igb->param_100t4_cap;
412 413 break;
413 414
414 415 case ETHER_STAT_ADV_CAP_100T4:
415 416 *val = igb->param_adv_100t4_cap;
416 417 break;
417 418
418 419 case ETHER_STAT_LP_CAP_100T4:
419 420 *val = igb->param_lp_100t4_cap;
420 421 break;
421 422
422 423 default:
423 424 mutex_exit(&igb->gen_lock);
424 425 return (ENOTSUP);
425 426 }
426 427
427 428 mutex_exit(&igb->gen_lock);
428 429
429 430 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
430 431 ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
431 432 return (EIO);
432 433 }
433 434
434 435 return (0);
435 436 }
436 437
437 438 /*
438 439 * Bring the device out of the reset/quiesced state that it
439 440 * was in when the interface was registered.
440 441 */
441 442 int
442 443 igb_m_start(void *arg)
443 444 {
444 445 igb_t *igb = (igb_t *)arg;
445 446
446 447 mutex_enter(&igb->gen_lock);
447 448
448 449 if (igb->igb_state & IGB_SUSPENDED) {
449 450 mutex_exit(&igb->gen_lock);
450 451 return (ECANCELED);
451 452 }
452 453
453 454 if (igb_start(igb, B_TRUE) != IGB_SUCCESS) {
454 455 mutex_exit(&igb->gen_lock);
455 456 return (EIO);
456 457 }
457 458
458 459 atomic_or_32(&igb->igb_state, IGB_STARTED);
459 460
460 461 mutex_exit(&igb->gen_lock);
461 462
462 463 /*
463 464 * Enable and start the watchdog timer
464 465 */
465 466 igb_enable_watchdog_timer(igb);
466 467
467 468 return (0);
468 469 }
469 470
470 471 /*
471 472 * Stop the device and put it in a reset/quiesced state such
472 473 * that the interface can be unregistered.
473 474 */
474 475 void
475 476 igb_m_stop(void *arg)
476 477 {
477 478 igb_t *igb = (igb_t *)arg;
478 479
479 480 mutex_enter(&igb->gen_lock);
480 481
481 482 if (igb->igb_state & IGB_SUSPENDED) {
482 483 mutex_exit(&igb->gen_lock);
483 484 return;
484 485 }
485 486
486 487 atomic_and_32(&igb->igb_state, ~IGB_STARTED);
487 488
488 489 igb_stop(igb, B_TRUE);
489 490
490 491 mutex_exit(&igb->gen_lock);
491 492
492 493 /*
493 494 * Disable and stop the watchdog timer
494 495 */
495 496 igb_disable_watchdog_timer(igb);
496 497 }
497 498
498 499 /*
499 500 * Set the promiscuity of the device.
500 501 */
501 502 int
502 503 igb_m_promisc(void *arg, boolean_t on)
503 504 {
504 505 igb_t *igb = (igb_t *)arg;
505 506 uint32_t reg_val;
506 507
507 508 mutex_enter(&igb->gen_lock);
508 509
509 510 if (igb->igb_state & IGB_SUSPENDED) {
510 511 mutex_exit(&igb->gen_lock);
511 512 return (ECANCELED);
512 513 }
513 514
514 515 reg_val = E1000_READ_REG(&igb->hw, E1000_RCTL);
515 516
516 517 if (on)
517 518 reg_val |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
518 519 else
519 520 reg_val &= (~(E1000_RCTL_UPE | E1000_RCTL_MPE));
520 521
521 522 E1000_WRITE_REG(&igb->hw, E1000_RCTL, reg_val);
522 523
523 524 mutex_exit(&igb->gen_lock);
524 525
525 526 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
526 527 ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
527 528 return (EIO);
528 529 }
529 530
530 531 return (0);
531 532 }
532 533
533 534 /*
534 535 * Add/remove the addresses to/from the set of multicast
535 536 * addresses for which the device will receive packets.
536 537 */
537 538 int
538 539 igb_m_multicst(void *arg, boolean_t add, const uint8_t *mcst_addr)
539 540 {
540 541 igb_t *igb = (igb_t *)arg;
541 542 int result;
542 543
543 544 mutex_enter(&igb->gen_lock);
544 545
545 546 if (igb->igb_state & IGB_SUSPENDED) {
546 547 mutex_exit(&igb->gen_lock);
547 548 return (ECANCELED);
548 549 }
549 550
550 551 result = (add) ? igb_multicst_add(igb, mcst_addr)
551 552 : igb_multicst_remove(igb, mcst_addr);
552 553
553 554 mutex_exit(&igb->gen_lock);
554 555
555 556 return (result);
556 557 }
557 558
558 559 /*
559 560 * Pass on M_IOCTL messages passed to the DLD, and support
560 561 * private IOCTLs for debugging and ndd.
561 562 */
562 563 void
563 564 igb_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
564 565 {
565 566 igb_t *igb = (igb_t *)arg;
566 567 struct iocblk *iocp;
567 568 enum ioc_reply status;
568 569
569 570 iocp = (struct iocblk *)(uintptr_t)mp->b_rptr;
570 571 iocp->ioc_error = 0;
571 572
572 573 mutex_enter(&igb->gen_lock);
573 574 if (igb->igb_state & IGB_SUSPENDED) {
574 575 mutex_exit(&igb->gen_lock);
575 576 miocnak(q, mp, 0, EINVAL);
576 577 return;
577 578 }
578 579 mutex_exit(&igb->gen_lock);
579 580
580 581 switch (iocp->ioc_cmd) {
581 582 case LB_GET_INFO_SIZE:
582 583 case LB_GET_INFO:
583 584 case LB_GET_MODE:
584 585 case LB_SET_MODE:
585 586 status = igb_loopback_ioctl(igb, iocp, mp);
586 587 break;
587 588
588 589 default:
589 590 status = IOC_INVAL;
590 591 break;
591 592 }
592 593
593 594 /*
594 595 * Decide how to reply
595 596 */
596 597 switch (status) {
597 598 default:
598 599 case IOC_INVAL:
599 600 /*
600 601 * Error, reply with a NAK and EINVAL or the specified error
601 602 */
602 603 miocnak(q, mp, 0, iocp->ioc_error == 0 ?
603 604 EINVAL : iocp->ioc_error);
604 605 break;
605 606
606 607 case IOC_DONE:
607 608 /*
608 609 * OK, reply already sent
609 610 */
610 611 break;
611 612
612 613 case IOC_ACK:
613 614 /*
614 615 * OK, reply with an ACK
615 616 */
616 617 miocack(q, mp, 0, 0);
617 618 break;
618 619
619 620 case IOC_REPLY:
620 621 /*
621 622 * OK, send prepared reply as ACK or NAK
622 623 */
623 624 mp->b_datap->db_type = iocp->ioc_error == 0 ?
624 625 M_IOCACK : M_IOCNAK;
625 626 qreply(q, mp);
626 627 break;
627 628 }
628 629 }
629 630
630 631 /*
631 632 * Add a MAC address to the target RX group.
632 633 */
633 634 static int
634 635 igb_addmac(void *arg, const uint8_t *mac_addr)
635 636 {
636 637 igb_rx_group_t *rx_group = (igb_rx_group_t *)arg;
637 638 igb_t *igb = rx_group->igb;
638 639 struct e1000_hw *hw = &igb->hw;
639 640 int i, slot;
640 641
641 642 mutex_enter(&igb->gen_lock);
642 643
643 644 if (igb->igb_state & IGB_SUSPENDED) {
644 645 mutex_exit(&igb->gen_lock);
645 646 return (ECANCELED);
646 647 }
647 648
648 649 if (igb->unicst_avail == 0) {
649 650 /* no slots available */
650 651 mutex_exit(&igb->gen_lock);
651 652 return (ENOSPC);
652 653 }
653 654
654 655 /*
655 656 * The slots from 0 to igb->num_rx_groups are reserved slots which
656 657 * are 1 to 1 mapped with group index directly. The other slots are
657 658 * shared between the all of groups. While adding a MAC address,
658 659 * it will try to set the reserved slots first, then the shared slots.
659 660 */
660 661 slot = -1;
661 662 if (igb->unicst_addr[rx_group->index].mac.set == 1) {
662 663 /*
663 664 * The reserved slot for current group is used, find the free
664 665 * slots in the shared slots.
665 666 */
666 667 for (i = igb->num_rx_groups; i < igb->unicst_total; i++) {
667 668 if (igb->unicst_addr[i].mac.set == 0) {
668 669 slot = i;
669 670 break;
670 671 }
671 672 }
672 673 } else
673 674 slot = rx_group->index;
674 675
675 676 if (slot == -1) {
676 677 /* no slots available in the shared slots */
677 678 mutex_exit(&igb->gen_lock);
678 679 return (ENOSPC);
679 680 }
680 681
681 682 /* Set VMDq according to the mode supported by hardware. */
682 683 e1000_rar_set_vmdq(hw, mac_addr, slot, igb->vmdq_mode, rx_group->index);
683 684
684 685 bcopy(mac_addr, igb->unicst_addr[slot].mac.addr, ETHERADDRL);
685 686 igb->unicst_addr[slot].mac.group_index = rx_group->index;
686 687 igb->unicst_addr[slot].mac.set = 1;
687 688 igb->unicst_avail--;
688 689
689 690 mutex_exit(&igb->gen_lock);
690 691
691 692 return (0);
692 693 }
693 694
694 695 /*
695 696 * Remove a MAC address from the specified RX group.
696 697 */
697 698 static int
698 699 igb_remmac(void *arg, const uint8_t *mac_addr)
699 700 {
700 701 igb_rx_group_t *rx_group = (igb_rx_group_t *)arg;
701 702 igb_t *igb = rx_group->igb;
702 703 struct e1000_hw *hw = &igb->hw;
703 704 int slot;
704 705
705 706 mutex_enter(&igb->gen_lock);
706 707
707 708 if (igb->igb_state & IGB_SUSPENDED) {
708 709 mutex_exit(&igb->gen_lock);
709 710 return (ECANCELED);
710 711 }
711 712
712 713 slot = igb_unicst_find(igb, mac_addr);
713 714 if (slot == -1) {
714 715 mutex_exit(&igb->gen_lock);
715 716 return (EINVAL);
716 717 }
717 718
718 719 if (igb->unicst_addr[slot].mac.set == 0) {
719 720 mutex_exit(&igb->gen_lock);
720 721 return (EINVAL);
721 722 }
722 723
723 724 /* Clear the MAC ddress in the slot */
724 725 e1000_rar_clear(hw, slot);
725 726 igb->unicst_addr[slot].mac.set = 0;
726 727 igb->unicst_avail++;
727 728
728 729 mutex_exit(&igb->gen_lock);
729 730
730 731 return (0);
731 732 }
732 733
733 734 /*
734 735 * Enable interrupt on the specificed rx ring.
735 736 */
736 737 int
737 738 igb_rx_ring_intr_enable(mac_intr_handle_t intrh)
738 739 {
739 740 igb_rx_ring_t *rx_ring = (igb_rx_ring_t *)intrh;
740 741 igb_t *igb = rx_ring->igb;
741 742 struct e1000_hw *hw = &igb->hw;
742 743 uint32_t index = rx_ring->index;
743 744
744 745 if (igb->intr_type == DDI_INTR_TYPE_MSIX) {
745 746 /* Interrupt enabling for MSI-X */
746 747 igb->eims_mask |= (E1000_EICR_RX_QUEUE0 << index);
747 748 E1000_WRITE_REG(hw, E1000_EIMS, igb->eims_mask);
748 749 E1000_WRITE_REG(hw, E1000_EIAC, igb->eims_mask);
749 750 } else {
750 751 ASSERT(index == 0);
751 752 /* Interrupt enabling for MSI and legacy */
752 753 igb->ims_mask |= E1000_IMS_RXT0;
753 754 E1000_WRITE_REG(hw, E1000_IMS, igb->ims_mask);
754 755 }
755 756
756 757 E1000_WRITE_FLUSH(hw);
757 758
758 759 return (0);
759 760 }
760 761
761 762 /*
762 763 * Disable interrupt on the specificed rx ring.
763 764 */
764 765 int
765 766 igb_rx_ring_intr_disable(mac_intr_handle_t intrh)
766 767 {
767 768 igb_rx_ring_t *rx_ring = (igb_rx_ring_t *)intrh;
768 769 igb_t *igb = rx_ring->igb;
769 770 struct e1000_hw *hw = &igb->hw;
770 771 uint32_t index = rx_ring->index;
771 772
772 773 if (igb->intr_type == DDI_INTR_TYPE_MSIX) {
773 774 /* Interrupt disabling for MSI-X */
774 775 igb->eims_mask &= ~(E1000_EICR_RX_QUEUE0 << index);
775 776 E1000_WRITE_REG(hw, E1000_EIMC,
776 777 (E1000_EICR_RX_QUEUE0 << index));
777 778 E1000_WRITE_REG(hw, E1000_EIAC, igb->eims_mask);
778 779 } else {
779 780 ASSERT(index == 0);
780 781 /* Interrupt disabling for MSI and legacy */
781 782 igb->ims_mask &= ~E1000_IMS_RXT0;
782 783 E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0);
783 784 }
784 785
785 786 E1000_WRITE_FLUSH(hw);
786 787
787 788 return (0);
788 789 }
789 790
790 791 /*
791 792 * Get the global ring index by a ring index within a group.
792 793 */
793 794 int
794 795 igb_get_rx_ring_index(igb_t *igb, int gindex, int rindex)
795 796 {
796 797 igb_rx_ring_t *rx_ring;
797 798 int i;
798 799
799 800 for (i = 0; i < igb->num_rx_rings; i++) {
800 801 rx_ring = &igb->rx_rings[i];
801 802 if (rx_ring->group_index == gindex)
802 803 rindex--;
803 804 if (rindex < 0)
804 805 return (i);
805 806 }
806 807
807 808 return (-1);
808 809 }
809 810
810 811 static int
811 812 igb_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
812 813 {
813 814 igb_rx_ring_t *rx_ring = (igb_rx_ring_t *)rh;
814 815
815 816 mutex_enter(&rx_ring->rx_lock);
816 817 rx_ring->ring_gen_num = mr_gen_num;
817 818 mutex_exit(&rx_ring->rx_lock);
818 819 return (0);
819 820 }
820 821
821 822 /*
822 823 * Callback funtion for MAC layer to register all rings.
823 824 */
824 825 /* ARGSUSED */
825 826 void
826 827 igb_fill_ring(void *arg, mac_ring_type_t rtype, const int rg_index,
827 828 const int index, mac_ring_info_t *infop, mac_ring_handle_t rh)
828 829 {
829 830 igb_t *igb = (igb_t *)arg;
830 831 mac_intr_t *mintr = &infop->mri_intr;
831 832
832 833 switch (rtype) {
833 834 case MAC_RING_TYPE_RX: {
834 835 igb_rx_ring_t *rx_ring;
835 836 int global_index;
836 837
837 838 /*
838 839 * 'index' is the ring index within the group.
839 840 * We need the global ring index by searching in group.
840 841 */
841 842 global_index = igb_get_rx_ring_index(igb, rg_index, index);
842 843
843 844 ASSERT(global_index >= 0);
844 845
845 846 rx_ring = &igb->rx_rings[global_index];
846 847 rx_ring->ring_handle = rh;
847 848
848 849 infop->mri_driver = (mac_ring_driver_t)rx_ring;
849 850 infop->mri_start = igb_ring_start;
850 851 infop->mri_stop = NULL;
851 852 infop->mri_poll = (mac_ring_poll_t)igb_rx_ring_poll;
852 853 infop->mri_stat = igb_rx_ring_stat;
853 854
854 855 mintr->mi_handle = (mac_intr_handle_t)rx_ring;
855 856 mintr->mi_enable = igb_rx_ring_intr_enable;
856 857 mintr->mi_disable = igb_rx_ring_intr_disable;
857 858 if (igb->intr_type & (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
858 859 mintr->mi_ddi_handle =
859 860 igb->htable[rx_ring->intr_vector];
860 861 }
861 862 break;
862 863 }
863 864 case MAC_RING_TYPE_TX: {
864 865 ASSERT(index < igb->num_tx_rings);
865 866
866 867 igb_tx_ring_t *tx_ring = &igb->tx_rings[index];
867 868 tx_ring->ring_handle = rh;
868 869
869 870 infop->mri_driver = (mac_ring_driver_t)tx_ring;
870 871 infop->mri_start = NULL;
871 872 infop->mri_stop = NULL;
872 873 infop->mri_tx = igb_tx_ring_send;
873 874 infop->mri_stat = igb_tx_ring_stat;
874 875 if (igb->intr_type & (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
875 876 mintr->mi_ddi_handle =
876 877 igb->htable[tx_ring->intr_vector];
877 878 }
878 879 break;
879 880 }
880 881 default:
881 882 break;
882 883 }
883 884 }
884 885
885 886 void
886 887 igb_fill_group(void *arg, mac_ring_type_t rtype, const int index,
887 888 mac_group_info_t *infop, mac_group_handle_t gh)
888 889 {
889 890 igb_t *igb = (igb_t *)arg;
890 891
891 892 switch (rtype) {
892 893 case MAC_RING_TYPE_RX: {
893 894 igb_rx_group_t *rx_group;
894 895
895 896 ASSERT((index >= 0) && (index < igb->num_rx_groups));
896 897
897 898 rx_group = &igb->rx_groups[index];
898 899 rx_group->group_handle = gh;
899 900
900 901 infop->mgi_driver = (mac_group_driver_t)rx_group;
901 902 infop->mgi_start = NULL;
902 903 infop->mgi_stop = NULL;
903 904 infop->mgi_addmac = igb_addmac;
904 905 infop->mgi_remmac = igb_remmac;
905 906 infop->mgi_count = (igb->num_rx_rings / igb->num_rx_groups);
906 907
907 908 break;
908 909 }
909 910 case MAC_RING_TYPE_TX:
910 911 break;
911 912 default:
912 913 break;
913 914 }
914 915 }
915 916
916 917 /*
917 918 * Obtain the MAC's capabilities and associated data from
918 919 * the driver.
919 920 */
920 921 boolean_t
921 922 igb_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
922 923 {
923 924 igb_t *igb = (igb_t *)arg;
924 925
925 926 switch (cap) {
926 927 case MAC_CAPAB_HCKSUM: {
927 928 uint32_t *tx_hcksum_flags = cap_data;
928 929
929 930 /*
930 931 * We advertise our capabilities only if tx hcksum offload is
931 932 * enabled. On receive, the stack will accept checksummed
932 933 * packets anyway, even if we haven't said we can deliver
933 934 * them.
934 935 */
935 936 if (!igb->tx_hcksum_enable)
936 937 return (B_FALSE);
937 938
938 939 *tx_hcksum_flags = HCKSUM_INET_PARTIAL | HCKSUM_IPHDRCKSUM;
939 940 break;
940 941 }
941 942 case MAC_CAPAB_LSO: {
942 943 mac_capab_lso_t *cap_lso = cap_data;
943 944
944 945 if (igb->lso_enable) {
945 946 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
946 947 cap_lso->lso_basic_tcp_ipv4.lso_max = IGB_LSO_MAXLEN;
947 948 break;
948 949 } else {
949 950 return (B_FALSE);
950 951 }
951 952 }
952 953 case MAC_CAPAB_RINGS: {
953 954 mac_capab_rings_t *cap_rings = cap_data;
954 955
955 956 switch (cap_rings->mr_type) {
956 957 case MAC_RING_TYPE_RX:
957 958 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
958 959 cap_rings->mr_rnum = igb->num_rx_rings;
959 960 cap_rings->mr_gnum = igb->num_rx_groups;
960 961 cap_rings->mr_rget = igb_fill_ring;
961 962 cap_rings->mr_gget = igb_fill_group;
962 963 cap_rings->mr_gaddring = NULL;
963 964 cap_rings->mr_gremring = NULL;
964 965
965 966 break;
966 967 case MAC_RING_TYPE_TX:
967 968 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
968 969 cap_rings->mr_rnum = igb->num_tx_rings;
969 970 cap_rings->mr_gnum = 0;
970 971 cap_rings->mr_rget = igb_fill_ring;
971 972 cap_rings->mr_gget = NULL;
972 973
973 974 break;
974 975 default:
975 976 break;
976 977 }
977 978 break;
978 979 }
979 980
980 981 default:
981 982 return (B_FALSE);
982 983 }
983 984 return (B_TRUE);
984 985 }
985 986
986 987 int
987 988 igb_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
988 989 uint_t pr_valsize, const void *pr_val)
989 990 {
990 991 igb_t *igb = (igb_t *)arg;
991 992 struct e1000_hw *hw = &igb->hw;
992 993 int err = 0;
993 994 uint32_t flow_control;
994 995 uint32_t cur_mtu, new_mtu;
995 996 uint32_t rx_size;
996 997 uint32_t tx_size;
997 998
998 999 mutex_enter(&igb->gen_lock);
999 1000 if (igb->igb_state & IGB_SUSPENDED) {
1000 1001 mutex_exit(&igb->gen_lock);
1001 1002 return (ECANCELED);
1002 1003 }
1003 1004
1004 1005 if (igb->loopback_mode != IGB_LB_NONE && igb_param_locked(pr_num)) {
1005 1006 /*
1006 1007 * All en_* parameters are locked (read-only)
1007 1008 * while the device is in any sort of loopback mode.
1008 1009 */
1009 1010 mutex_exit(&igb->gen_lock);
1010 1011 return (EBUSY);
1011 1012 }
1012 1013
1013 1014 switch (pr_num) {
1014 1015 case MAC_PROP_EN_1000FDX_CAP:
1015 1016 /* read/write on copper, read-only on serdes */
1016 1017 if (hw->phy.media_type != e1000_media_type_copper) {
1017 1018 err = ENOTSUP;
1018 1019 break;
1019 1020 }
1020 1021 igb->param_en_1000fdx_cap = *(uint8_t *)pr_val;
1021 1022 igb->param_adv_1000fdx_cap = *(uint8_t *)pr_val;
1022 1023 goto setup_link;
1023 1024 case MAC_PROP_EN_100FDX_CAP:
1024 1025 if (hw->phy.media_type != e1000_media_type_copper) {
1025 1026 err = ENOTSUP;
1026 1027 break;
1027 1028 }
1028 1029 igb->param_en_100fdx_cap = *(uint8_t *)pr_val;
1029 1030 igb->param_adv_100fdx_cap = *(uint8_t *)pr_val;
1030 1031 goto setup_link;
1031 1032 case MAC_PROP_EN_100HDX_CAP:
1032 1033 if (hw->phy.media_type != e1000_media_type_copper) {
1033 1034 err = ENOTSUP;
1034 1035 break;
1035 1036 }
1036 1037 igb->param_en_100hdx_cap = *(uint8_t *)pr_val;
1037 1038 igb->param_adv_100hdx_cap = *(uint8_t *)pr_val;
1038 1039 goto setup_link;
1039 1040 case MAC_PROP_EN_10FDX_CAP:
1040 1041 if (hw->phy.media_type != e1000_media_type_copper) {
1041 1042 err = ENOTSUP;
1042 1043 break;
1043 1044 }
1044 1045 igb->param_en_10fdx_cap = *(uint8_t *)pr_val;
1045 1046 igb->param_adv_10fdx_cap = *(uint8_t *)pr_val;
1046 1047 goto setup_link;
1047 1048 case MAC_PROP_EN_10HDX_CAP:
1048 1049 if (hw->phy.media_type != e1000_media_type_copper) {
1049 1050 err = ENOTSUP;
1050 1051 break;
1051 1052 }
1052 1053 igb->param_en_10hdx_cap = *(uint8_t *)pr_val;
1053 1054 igb->param_adv_10hdx_cap = *(uint8_t *)pr_val;
1054 1055 goto setup_link;
1055 1056 case MAC_PROP_AUTONEG:
1056 1057 if (hw->phy.media_type != e1000_media_type_copper) {
1057 1058 err = ENOTSUP;
1058 1059 break;
1059 1060 }
1060 1061 igb->param_adv_autoneg_cap = *(uint8_t *)pr_val;
1061 1062 goto setup_link;
1062 1063 case MAC_PROP_FLOWCTRL:
1063 1064 bcopy(pr_val, &flow_control, sizeof (flow_control));
1064 1065
1065 1066 switch (flow_control) {
1066 1067 default:
1067 1068 err = EINVAL;
1068 1069 break;
1069 1070 case LINK_FLOWCTRL_NONE:
1070 1071 hw->fc.requested_mode = e1000_fc_none;
1071 1072 break;
1072 1073 case LINK_FLOWCTRL_RX:
1073 1074 hw->fc.requested_mode = e1000_fc_rx_pause;
1074 1075 break;
1075 1076 case LINK_FLOWCTRL_TX:
1076 1077 hw->fc.requested_mode = e1000_fc_tx_pause;
1077 1078 break;
1078 1079 case LINK_FLOWCTRL_BI:
1079 1080 hw->fc.requested_mode = e1000_fc_full;
1080 1081 break;
1081 1082 }
1082 1083 setup_link:
1083 1084 if (err == 0) {
1084 1085 if (igb_setup_link(igb, B_TRUE) != IGB_SUCCESS)
1085 1086 err = EINVAL;
1086 1087 }
1087 1088 break;
1088 1089 case MAC_PROP_ADV_1000FDX_CAP:
1089 1090 case MAC_PROP_ADV_1000HDX_CAP:
1090 1091 case MAC_PROP_ADV_100T4_CAP:
1091 1092 case MAC_PROP_ADV_100FDX_CAP:
1092 1093 case MAC_PROP_ADV_100HDX_CAP:
1093 1094 case MAC_PROP_ADV_10FDX_CAP:
1094 1095 case MAC_PROP_ADV_10HDX_CAP:
1095 1096 case MAC_PROP_EN_1000HDX_CAP:
1096 1097 case MAC_PROP_EN_100T4_CAP:
1097 1098 case MAC_PROP_STATUS:
1098 1099 case MAC_PROP_SPEED:
1099 1100 case MAC_PROP_DUPLEX:
1100 1101 err = ENOTSUP; /* read-only prop. Can't set this. */
1101 1102 break;
1102 1103 case MAC_PROP_MTU:
1103 1104 /* adapter must be stopped for an MTU change */
1104 1105 if (igb->igb_state & IGB_STARTED) {
1105 1106 err = EBUSY;
1106 1107 break;
1107 1108 }
1108 1109
1109 1110 cur_mtu = igb->default_mtu;
1110 1111 bcopy(pr_val, &new_mtu, sizeof (new_mtu));
1111 1112 if (new_mtu == cur_mtu) {
1112 1113 err = 0;
1113 1114 break;
1114 1115 }
1115 1116
1116 1117 if (new_mtu < MIN_MTU || new_mtu > MAX_MTU) {
1117 1118 err = EINVAL;
1118 1119 break;
1119 1120 }
1120 1121
1121 1122 err = mac_maxsdu_update(igb->mac_hdl, new_mtu);
1122 1123 if (err == 0) {
1123 1124 igb->default_mtu = new_mtu;
1124 1125 igb->max_frame_size = igb->default_mtu +
1125 1126 sizeof (struct ether_vlan_header) + ETHERFCSL;
1126 1127
1127 1128 /*
1128 1129 * Set rx buffer size
1129 1130 */
1130 1131 rx_size = igb->max_frame_size + IPHDR_ALIGN_ROOM;
1131 1132 igb->rx_buf_size = ((rx_size >> 10) + ((rx_size &
1132 1133 (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1133 1134
1134 1135 /*
1135 1136 * Set tx buffer size
1136 1137 */
1137 1138 tx_size = igb->max_frame_size;
1138 1139 igb->tx_buf_size = ((tx_size >> 10) + ((tx_size &
1139 1140 (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1140 1141 }
1141 1142 break;
1142 1143 case MAC_PROP_PRIVATE:
1143 1144 err = igb_set_priv_prop(igb, pr_name, pr_valsize, pr_val);
1144 1145 break;
1145 1146 default:
1146 1147 err = EINVAL;
1147 1148 break;
1148 1149 }
1149 1150
1150 1151 mutex_exit(&igb->gen_lock);
1151 1152
1152 1153 if (igb_check_acc_handle(igb->osdep.reg_handle) != DDI_FM_OK) {
1153 1154 ddi_fm_service_impact(igb->dip, DDI_SERVICE_DEGRADED);
1154 1155 return (EIO);
1155 1156 }
1156 1157
1157 1158 return (err);
1158 1159 }
1159 1160
1160 1161 int
1161 1162 igb_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
1162 1163 uint_t pr_valsize, void *pr_val)
1163 1164 {
1164 1165 igb_t *igb = (igb_t *)arg;
1165 1166 struct e1000_hw *hw = &igb->hw;
1166 1167 int err = 0;
1167 1168 uint32_t flow_control;
1168 1169 uint64_t tmp = 0;
1169 1170
1170 1171 switch (pr_num) {
1171 1172 case MAC_PROP_DUPLEX:
1172 1173 ASSERT(pr_valsize >= sizeof (link_duplex_t));
1173 1174 bcopy(&igb->link_duplex, pr_val, sizeof (link_duplex_t));
1174 1175 break;
1175 1176 case MAC_PROP_SPEED:
1176 1177 ASSERT(pr_valsize >= sizeof (uint64_t));
1177 1178 tmp = igb->link_speed * 1000000ull;
1178 1179 bcopy(&tmp, pr_val, sizeof (tmp));
1179 1180 break;
1180 1181 case MAC_PROP_AUTONEG:
1181 1182 ASSERT(pr_valsize >= sizeof (uint8_t));
1182 1183 *(uint8_t *)pr_val = igb->param_adv_autoneg_cap;
1183 1184 break;
1184 1185 case MAC_PROP_FLOWCTRL:
1185 1186 ASSERT(pr_valsize >= sizeof (uint32_t));
1186 1187 switch (hw->fc.requested_mode) {
1187 1188 case e1000_fc_none:
1188 1189 flow_control = LINK_FLOWCTRL_NONE;
1189 1190 break;
1190 1191 case e1000_fc_rx_pause:
1191 1192 flow_control = LINK_FLOWCTRL_RX;
1192 1193 break;
1193 1194 case e1000_fc_tx_pause:
1194 1195 flow_control = LINK_FLOWCTRL_TX;
1195 1196 break;
1196 1197 case e1000_fc_full:
1197 1198 flow_control = LINK_FLOWCTRL_BI;
1198 1199 break;
1199 1200 }
1200 1201 bcopy(&flow_control, pr_val, sizeof (flow_control));
1201 1202 break;
1202 1203 case MAC_PROP_ADV_1000FDX_CAP:
1203 1204 *(uint8_t *)pr_val = igb->param_adv_1000fdx_cap;
1204 1205 break;
1205 1206 case MAC_PROP_EN_1000FDX_CAP:
1206 1207 *(uint8_t *)pr_val = igb->param_en_1000fdx_cap;
1207 1208 break;
1208 1209 case MAC_PROP_ADV_1000HDX_CAP:
1209 1210 *(uint8_t *)pr_val = igb->param_adv_1000hdx_cap;
1210 1211 break;
1211 1212 case MAC_PROP_EN_1000HDX_CAP:
1212 1213 *(uint8_t *)pr_val = igb->param_en_1000hdx_cap;
1213 1214 break;
1214 1215 case MAC_PROP_ADV_100T4_CAP:
1215 1216 *(uint8_t *)pr_val = igb->param_adv_100t4_cap;
1216 1217 break;
1217 1218 case MAC_PROP_EN_100T4_CAP:
1218 1219 *(uint8_t *)pr_val = igb->param_en_100t4_cap;
1219 1220 break;
1220 1221 case MAC_PROP_ADV_100FDX_CAP:
1221 1222 *(uint8_t *)pr_val = igb->param_adv_100fdx_cap;
1222 1223 break;
1223 1224 case MAC_PROP_EN_100FDX_CAP:
1224 1225 *(uint8_t *)pr_val = igb->param_en_100fdx_cap;
1225 1226 break;
1226 1227 case MAC_PROP_ADV_100HDX_CAP:
1227 1228 *(uint8_t *)pr_val = igb->param_adv_100hdx_cap;
1228 1229 break;
1229 1230 case MAC_PROP_EN_100HDX_CAP:
1230 1231 *(uint8_t *)pr_val = igb->param_en_100hdx_cap;
1231 1232 break;
1232 1233 case MAC_PROP_ADV_10FDX_CAP:
1233 1234 *(uint8_t *)pr_val = igb->param_adv_10fdx_cap;
1234 1235 break;
1235 1236 case MAC_PROP_EN_10FDX_CAP:
1236 1237 *(uint8_t *)pr_val = igb->param_en_10fdx_cap;
1237 1238 break;
1238 1239 case MAC_PROP_ADV_10HDX_CAP:
1239 1240 *(uint8_t *)pr_val = igb->param_adv_10hdx_cap;
1240 1241 break;
1241 1242 case MAC_PROP_EN_10HDX_CAP:
1242 1243 *(uint8_t *)pr_val = igb->param_en_10hdx_cap;
1243 1244 break;
1244 1245 case MAC_PROP_PRIVATE:
1245 1246 err = igb_get_priv_prop(igb, pr_name, pr_valsize, pr_val);
1246 1247 break;
1247 1248 default:
1248 1249 err = EINVAL;
1249 1250 break;
1250 1251 }
1251 1252 return (err);
1252 1253 }
1253 1254
1254 1255 void
1255 1256 igb_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num,
1256 1257 mac_prop_info_handle_t prh)
1257 1258 {
1258 1259 igb_t *igb = (igb_t *)arg;
1259 1260 struct e1000_hw *hw = &igb->hw;
1260 1261 uint16_t phy_status, phy_ext_status;
1261 1262
1262 1263 switch (pr_num) {
1263 1264 case MAC_PROP_DUPLEX:
1264 1265 case MAC_PROP_SPEED:
1265 1266 case MAC_PROP_ADV_1000FDX_CAP:
1266 1267 case MAC_PROP_ADV_1000HDX_CAP:
1267 1268 case MAC_PROP_EN_1000HDX_CAP:
1268 1269 case MAC_PROP_ADV_100T4_CAP:
1269 1270 case MAC_PROP_EN_100T4_CAP:
1270 1271 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1271 1272 break;
1272 1273
1273 1274 case MAC_PROP_EN_1000FDX_CAP:
1274 1275 if (hw->phy.media_type != e1000_media_type_copper) {
1275 1276 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1276 1277 } else {
1277 1278 (void) e1000_read_phy_reg(hw, PHY_EXT_STATUS,
1278 1279 &phy_ext_status);
1279 1280 mac_prop_info_set_default_uint8(prh,
1280 1281 ((phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
1281 1282 (phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0);
1282 1283 }
1283 1284 break;
1284 1285
1285 1286 case MAC_PROP_ADV_100FDX_CAP:
1286 1287 case MAC_PROP_EN_100FDX_CAP:
1287 1288 if (hw->phy.media_type != e1000_media_type_copper) {
1288 1289 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1289 1290 } else {
1290 1291 (void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
1291 1292 mac_prop_info_set_default_uint8(prh,
1292 1293 ((phy_status & MII_SR_100X_FD_CAPS) ||
1293 1294 (phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0);
1294 1295 }
1295 1296 break;
1296 1297
1297 1298 case MAC_PROP_ADV_100HDX_CAP:
1298 1299 case MAC_PROP_EN_100HDX_CAP:
1299 1300 if (hw->phy.media_type != e1000_media_type_copper) {
1300 1301 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1301 1302 } else {
1302 1303 (void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
1303 1304 mac_prop_info_set_default_uint8(prh,
1304 1305 ((phy_status & MII_SR_100X_HD_CAPS) ||
1305 1306 (phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0);
1306 1307 }
1307 1308 break;
1308 1309
1309 1310 case MAC_PROP_ADV_10FDX_CAP:
1310 1311 case MAC_PROP_EN_10FDX_CAP:
1311 1312 if (hw->phy.media_type != e1000_media_type_copper) {
1312 1313 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1313 1314 } else {
1314 1315 (void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
1315 1316 mac_prop_info_set_default_uint8(prh,
1316 1317 (phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0);
1317 1318 }
1318 1319 break;
1319 1320
1320 1321 case MAC_PROP_ADV_10HDX_CAP:
1321 1322 case MAC_PROP_EN_10HDX_CAP:
1322 1323 if (hw->phy.media_type != e1000_media_type_copper) {
1323 1324 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1324 1325 } else {
1325 1326 (void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
1326 1327 mac_prop_info_set_default_uint8(prh,
1327 1328 (phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0);
1328 1329 }
1329 1330 break;
1330 1331
1331 1332 case MAC_PROP_AUTONEG:
1332 1333 if (hw->phy.media_type != e1000_media_type_copper) {
1333 1334 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1334 1335 } else {
1335 1336 (void) e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
1336 1337 mac_prop_info_set_default_uint8(prh,
1337 1338 (phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0);
1338 1339 }
1339 1340 break;
1340 1341
1341 1342 case MAC_PROP_FLOWCTRL:
1342 1343 mac_prop_info_set_default_link_flowctrl(prh, LINK_FLOWCTRL_BI);
1343 1344 break;
1344 1345
1345 1346 case MAC_PROP_MTU:
1346 1347 mac_prop_info_set_range_uint32(prh, MIN_MTU, MAX_MTU);
1347 1348 break;
1348 1349
1349 1350 case MAC_PROP_PRIVATE:
1350 1351 igb_priv_prop_info(igb, pr_name, prh);
1351 1352 break;
1352 1353 }
1353 1354
1354 1355 }
1355 1356
1356 1357 boolean_t
1357 1358 igb_param_locked(mac_prop_id_t pr_num)
1358 1359 {
1359 1360 /*
1360 1361 * All en_* parameters are locked (read-only) while
1361 1362 * the device is in any sort of loopback mode ...
1362 1363 */
1363 1364 switch (pr_num) {
1364 1365 case MAC_PROP_EN_1000FDX_CAP:
1365 1366 case MAC_PROP_EN_1000HDX_CAP:
1366 1367 case MAC_PROP_EN_100T4_CAP:
1367 1368 case MAC_PROP_EN_100FDX_CAP:
1368 1369 case MAC_PROP_EN_100HDX_CAP:
1369 1370 case MAC_PROP_EN_10FDX_CAP:
1370 1371 case MAC_PROP_EN_10HDX_CAP:
1371 1372 case MAC_PROP_AUTONEG:
1372 1373 case MAC_PROP_FLOWCTRL:
1373 1374 return (B_TRUE);
1374 1375 }
1375 1376 return (B_FALSE);
1376 1377 }
1377 1378
|
↓ open down ↓ |
1340 lines elided |
↑ open up ↑ |
1378 1379 /* ARGSUSED */
1379 1380 int
1380 1381 igb_set_priv_prop(igb_t *igb, const char *pr_name,
1381 1382 uint_t pr_valsize, const void *pr_val)
1382 1383 {
1383 1384 int err = 0;
1384 1385 long result;
1385 1386 struct e1000_hw *hw = &igb->hw;
1386 1387 int i;
1387 1388
1389 + if (strcmp(pr_name, "_eee_support") == 0) {
1390 + if (pr_val == NULL)
1391 + return (EINVAL);
1392 + (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1393 + switch (result) {
1394 + case 0:
1395 + case 1:
1396 + if (hw->mac.type != e1000_i350) {
1397 + /*
1398 + * For now, only supported on I350.
1399 + * Add new mac.type values (or use < instead)
1400 + * as new cards offer up EEE.
1401 + */
1402 + return (ENXIO);
1403 + }
1404 + /* Must set this prior to the set call. */
1405 + hw->dev_spec._82575.eee_disable = !result;
1406 + if (e1000_set_eee_i350(hw) != E1000_SUCCESS)
1407 + err = EIO;
1408 + break;
1409 + default:
1410 + err = EINVAL;
1411 + /* FALLTHRU */
1412 + }
1413 + return (err);
1414 + }
1388 1415 if (strcmp(pr_name, "_tx_copy_thresh") == 0) {
1389 1416 if (pr_val == NULL) {
1390 1417 err = EINVAL;
1391 1418 return (err);
1392 1419 }
1393 1420 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1394 1421 if (result < MIN_TX_COPY_THRESHOLD ||
1395 1422 result > MAX_TX_COPY_THRESHOLD)
1396 1423 err = EINVAL;
1397 1424 else {
1398 1425 igb->tx_copy_thresh = (uint32_t)result;
1399 1426 }
1400 1427 return (err);
1401 1428 }
1402 1429 if (strcmp(pr_name, "_tx_recycle_thresh") == 0) {
1403 1430 if (pr_val == NULL) {
1404 1431 err = EINVAL;
1405 1432 return (err);
1406 1433 }
1407 1434 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1408 1435 if (result < MIN_TX_RECYCLE_THRESHOLD ||
1409 1436 result > MAX_TX_RECYCLE_THRESHOLD)
1410 1437 err = EINVAL;
1411 1438 else {
1412 1439 igb->tx_recycle_thresh = (uint32_t)result;
1413 1440 }
1414 1441 return (err);
1415 1442 }
1416 1443 if (strcmp(pr_name, "_tx_overload_thresh") == 0) {
1417 1444 if (pr_val == NULL) {
1418 1445 err = EINVAL;
1419 1446 return (err);
1420 1447 }
1421 1448 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1422 1449 if (result < MIN_TX_OVERLOAD_THRESHOLD ||
1423 1450 result > MAX_TX_OVERLOAD_THRESHOLD)
1424 1451 err = EINVAL;
1425 1452 else {
1426 1453 igb->tx_overload_thresh = (uint32_t)result;
1427 1454 }
1428 1455 return (err);
1429 1456 }
1430 1457 if (strcmp(pr_name, "_tx_resched_thresh") == 0) {
1431 1458 if (pr_val == NULL) {
1432 1459 err = EINVAL;
1433 1460 return (err);
1434 1461 }
1435 1462 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1436 1463 if (result < MIN_TX_RESCHED_THRESHOLD ||
1437 1464 result > MAX_TX_RESCHED_THRESHOLD ||
1438 1465 result > igb->tx_ring_size)
1439 1466 err = EINVAL;
1440 1467 else {
1441 1468 igb->tx_resched_thresh = (uint32_t)result;
1442 1469 }
1443 1470 return (err);
1444 1471 }
1445 1472 if (strcmp(pr_name, "_rx_copy_thresh") == 0) {
1446 1473 if (pr_val == NULL) {
1447 1474 err = EINVAL;
1448 1475 return (err);
1449 1476 }
1450 1477 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1451 1478 if (result < MIN_RX_COPY_THRESHOLD ||
1452 1479 result > MAX_RX_COPY_THRESHOLD)
1453 1480 err = EINVAL;
1454 1481 else {
1455 1482 igb->rx_copy_thresh = (uint32_t)result;
1456 1483 }
1457 1484 return (err);
1458 1485 }
1459 1486 if (strcmp(pr_name, "_rx_limit_per_intr") == 0) {
1460 1487 if (pr_val == NULL) {
1461 1488 err = EINVAL;
1462 1489 return (err);
1463 1490 }
1464 1491 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1465 1492 if (result < MIN_RX_LIMIT_PER_INTR ||
1466 1493 result > MAX_RX_LIMIT_PER_INTR)
1467 1494 err = EINVAL;
1468 1495 else {
1469 1496 igb->rx_limit_per_intr = (uint32_t)result;
1470 1497 }
1471 1498 return (err);
1472 1499 }
1473 1500 if (strcmp(pr_name, "_intr_throttling") == 0) {
1474 1501 if (pr_val == NULL) {
1475 1502 err = EINVAL;
1476 1503 return (err);
1477 1504 }
1478 1505 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
1479 1506
1480 1507 if (result < igb->capab->min_intr_throttle ||
1481 1508 result > igb->capab->max_intr_throttle)
1482 1509 err = EINVAL;
1483 1510 else {
1484 1511 igb->intr_throttling[0] = (uint32_t)result;
1485 1512
1486 1513 for (i = 0; i < MAX_NUM_EITR; i++)
1487 1514 igb->intr_throttling[i] =
1488 1515 igb->intr_throttling[0];
1489 1516
1490 1517 /* Set interrupt throttling rate */
1491 1518 for (i = 0; i < igb->intr_cnt; i++)
1492 1519 E1000_WRITE_REG(hw, E1000_EITR(i),
1493 1520 igb->intr_throttling[i]);
1494 1521 }
1495 1522 return (err);
1496 1523 }
1497 1524 return (ENOTSUP);
1498 1525 }
1499 1526
|
↓ open down ↓ |
102 lines elided |
↑ open up ↑ |
1500 1527 int
1501 1528 igb_get_priv_prop(igb_t *igb, const char *pr_name, uint_t pr_valsize,
1502 1529 void *pr_val)
1503 1530 {
1504 1531 int value;
1505 1532
1506 1533 if (strcmp(pr_name, "_adv_pause_cap") == 0) {
1507 1534 value = igb->param_adv_pause_cap;
1508 1535 } else if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
1509 1536 value = igb->param_adv_asym_pause_cap;
1537 + } else if (strcmp(pr_name, "_eee_support") == 0) {
1538 + /*
1539 + * For now, only supported on I350. Add new mac.type values
1540 + * (or use < instead) as new cards offer up EEE.
1541 + */
1542 + value = (igb->hw.mac.type != e1000_i350) ? 0 :
1543 + !(igb->hw.dev_spec._82575.eee_disable);
1510 1544 } else if (strcmp(pr_name, "_tx_copy_thresh") == 0) {
1511 1545 value = igb->tx_copy_thresh;
1512 1546 } else if (strcmp(pr_name, "_tx_recycle_thresh") == 0) {
1513 1547 value = igb->tx_recycle_thresh;
1514 1548 } else if (strcmp(pr_name, "_tx_overload_thresh") == 0) {
1515 1549 value = igb->tx_overload_thresh;
1516 1550 } else if (strcmp(pr_name, "_tx_resched_thresh") == 0) {
1517 1551 value = igb->tx_resched_thresh;
1518 1552 } else if (strcmp(pr_name, "_rx_copy_thresh") == 0) {
1519 1553 value = igb->rx_copy_thresh;
1520 1554 } else if (strcmp(pr_name, "_rx_limit_per_intr") == 0) {
1521 1555 value = igb->rx_limit_per_intr;
1522 1556 } else if (strcmp(pr_name, "_intr_throttling") == 0) {
1523 1557 value = igb->intr_throttling[0];
1524 1558 } else {
1525 1559 return (ENOTSUP);
1526 1560 }
1527 1561
1528 1562 (void) snprintf(pr_val, pr_valsize, "%d", value);
1529 1563 return (0);
1530 1564 }
1531 1565
1532 1566 void
1533 1567 igb_priv_prop_info(igb_t *igb, const char *pr_name, mac_prop_info_handle_t prh)
1534 1568 {
1535 1569 char valstr[64];
1536 1570 int value;
1537 1571
1538 1572 if (strcmp(pr_name, "_adv_pause_cap") == 0 ||
1539 1573 strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
1540 1574 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
1541 1575 return;
1542 1576 } else if (strcmp(pr_name, "_tx_copy_thresh") == 0) {
1543 1577 value = DEFAULT_TX_COPY_THRESHOLD;
1544 1578 } else if (strcmp(pr_name, "_tx_recycle_thresh") == 0) {
1545 1579 value = DEFAULT_TX_RECYCLE_THRESHOLD;
1546 1580 } else if (strcmp(pr_name, "_tx_overload_thresh") == 0) {
1547 1581 value = DEFAULT_TX_OVERLOAD_THRESHOLD;
1548 1582 } else if (strcmp(pr_name, "_tx_resched_thresh") == 0) {
1549 1583 value = DEFAULT_TX_RESCHED_THRESHOLD;
1550 1584 } else if (strcmp(pr_name, "_rx_copy_thresh") == 0) {
1551 1585 value = DEFAULT_RX_COPY_THRESHOLD;
1552 1586 } else if (strcmp(pr_name, "_rx_limit_per_intr") == 0) {
1553 1587 value = DEFAULT_RX_LIMIT_PER_INTR;
1554 1588 } else if (strcmp(pr_name, "_intr_throttling") == 0) {
1555 1589 value = igb->capab->def_intr_throttle;
1556 1590 } else {
1557 1591 return;
1558 1592 }
1559 1593
1560 1594 (void) snprintf(valstr, sizeof (valstr), "%d", value);
1561 1595 mac_prop_info_set_default_str(prh, valstr);
1562 1596 }
|
↓ open down ↓ |
43 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX