Print this page
MFV: illumos-gate@5bb0bdfe588c5df0f63ff8ac292cd608a5f4492a
9950 Need support for Intel I219 v6-v9
Reviewed by: Jason King <jason.king@joyent.com>
Reviewed by: Garrett D'Amore <garrett@damore.org>
Approved by: Garrett D'Amore <garrett@damore.org>
Author: Robert Mustacchi <rm@joyent.com>
4547 e1000g common code doesn't account for LockMAC
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Richard Lowe <richlowe@richlowe.net>
Reviewed by: Garrett D'Amore <garrett@damore.org>
Reviewed by: Jason King <jason.brian.king@gmail.com>
Approved by: Garrett D'Amore <garrett@damore.org>
re #12675 rb4109 some e1000g devices don't support 15 unicast addresses
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/e1000g/e1000g_main.c
+++ new/usr/src/uts/common/io/e1000g/e1000g_main.c
1 1 /*
2 2 * This file is provided under a CDDLv1 license. When using or
3 3 * redistributing this file, you may do so under this license.
4 4 * In redistributing this file this license must be included
5 5 * and no other modification of this header file is permitted.
6 6 *
7 7 * CDDL LICENSE SUMMARY
8 8 *
9 9 * Copyright(c) 1999 - 2009 Intel Corporation. All rights reserved.
10 10 *
11 11 * The contents of this file are subject to the terms of Version
12 12 * 1.0 of the Common Development and Distribution License (the "License").
13 13 *
14 14 * You should have received a copy of the License with this software.
15 15 * You can obtain a copy of the License at
16 16 * http://www.opensolaris.org/os/licensing.
17 17 * See the License for the specific language governing permissions
|
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
18 18 * and limitations under the License.
19 19 */
20 20
21 21 /*
22 22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24
25 25 /*
26 26 * Copyright 2012 DEY Storage Systems, Inc. All rights reserved.
27 27 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
28 - * Copyright (c) 2017, Joyent, Inc.
28 + * Copyright (c) 2018, Joyent, Inc.
29 29 */
30 30
31 31 /*
32 32 * **********************************************************************
33 33 * *
34 34 * Module Name: *
35 35 * e1000g_main.c *
36 36 * *
37 37 * Abstract: *
38 38 * This file contains the interface routines for the solaris OS. *
39 39 * It has all DDI entry point routines and GLD entry point routines. *
40 40 * *
41 41 * This file also contains routines that take care of initialization *
42 42 * uninit routine and interrupt routine. *
43 43 * *
44 44 * **********************************************************************
45 45 */
46 46
47 47 #include <sys/dlpi.h>
48 48 #include <sys/mac.h>
49 49 #include "e1000g_sw.h"
50 50 #include "e1000g_debug.h"
51 51
52 52 static char ident[] = "Intel PRO/1000 Ethernet";
53 53 /* LINTED E_STATIC_UNUSED */
54 54 static char e1000g_version[] = "Driver Ver. 5.3.24";
55 55
56 56 /*
57 57 * Proto types for DDI entry points
58 58 */
59 59 static int e1000g_attach(dev_info_t *, ddi_attach_cmd_t);
60 60 static int e1000g_detach(dev_info_t *, ddi_detach_cmd_t);
61 61 static int e1000g_quiesce(dev_info_t *);
62 62
63 63 /*
64 64 * init and intr routines prototype
65 65 */
66 66 static int e1000g_resume(dev_info_t *);
67 67 static int e1000g_suspend(dev_info_t *);
68 68 static uint_t e1000g_intr_pciexpress(caddr_t);
69 69 static uint_t e1000g_intr(caddr_t);
70 70 static void e1000g_intr_work(struct e1000g *, uint32_t);
71 71 #pragma inline(e1000g_intr_work)
72 72 static int e1000g_init(struct e1000g *);
73 73 static int e1000g_start(struct e1000g *, boolean_t);
74 74 static void e1000g_stop(struct e1000g *, boolean_t);
75 75 static int e1000g_m_start(void *);
76 76 static void e1000g_m_stop(void *);
77 77 static int e1000g_m_promisc(void *, boolean_t);
78 78 static boolean_t e1000g_m_getcapab(void *, mac_capab_t, void *);
79 79 static int e1000g_m_multicst(void *, boolean_t, const uint8_t *);
80 80 static void e1000g_m_ioctl(void *, queue_t *, mblk_t *);
81 81 static int e1000g_m_setprop(void *, const char *, mac_prop_id_t,
82 82 uint_t, const void *);
83 83 static int e1000g_m_getprop(void *, const char *, mac_prop_id_t,
84 84 uint_t, void *);
85 85 static void e1000g_m_propinfo(void *, const char *, mac_prop_id_t,
86 86 mac_prop_info_handle_t);
87 87 static int e1000g_set_priv_prop(struct e1000g *, const char *, uint_t,
88 88 const void *);
89 89 static int e1000g_get_priv_prop(struct e1000g *, const char *, uint_t, void *);
90 90 static void e1000g_init_locks(struct e1000g *);
91 91 static void e1000g_destroy_locks(struct e1000g *);
92 92 static int e1000g_identify_hardware(struct e1000g *);
93 93 static int e1000g_regs_map(struct e1000g *);
94 94 static int e1000g_set_driver_params(struct e1000g *);
95 95 static void e1000g_set_bufsize(struct e1000g *);
96 96 static int e1000g_register_mac(struct e1000g *);
97 97 static boolean_t e1000g_rx_drain(struct e1000g *);
98 98 static boolean_t e1000g_tx_drain(struct e1000g *);
99 99 static void e1000g_init_unicst(struct e1000g *);
100 100 static int e1000g_unicst_set(struct e1000g *, const uint8_t *, int);
101 101 static int e1000g_alloc_rx_data(struct e1000g *);
102 102 static void e1000g_release_multicast(struct e1000g *);
103 103 static void e1000g_pch_limits(struct e1000g *);
104 104 static uint32_t e1000g_mtu2maxframe(uint32_t);
105 105
106 106 /*
107 107 * Local routines
108 108 */
109 109 static boolean_t e1000g_reset_adapter(struct e1000g *);
110 110 static void e1000g_tx_clean(struct e1000g *);
111 111 static void e1000g_rx_clean(struct e1000g *);
112 112 static void e1000g_link_timer(void *);
113 113 static void e1000g_local_timer(void *);
114 114 static boolean_t e1000g_link_check(struct e1000g *);
115 115 static boolean_t e1000g_stall_check(struct e1000g *);
116 116 static void e1000g_smartspeed(struct e1000g *);
117 117 static void e1000g_get_conf(struct e1000g *);
118 118 static boolean_t e1000g_get_prop(struct e1000g *, char *, int, int, int,
119 119 int *);
120 120 static void enable_watchdog_timer(struct e1000g *);
121 121 static void disable_watchdog_timer(struct e1000g *);
122 122 static void start_watchdog_timer(struct e1000g *);
123 123 static void restart_watchdog_timer(struct e1000g *);
124 124 static void stop_watchdog_timer(struct e1000g *);
125 125 static void stop_link_timer(struct e1000g *);
126 126 static void stop_82547_timer(e1000g_tx_ring_t *);
127 127 static void e1000g_force_speed_duplex(struct e1000g *);
128 128 static void e1000g_setup_max_mtu(struct e1000g *);
129 129 static void e1000g_get_max_frame_size(struct e1000g *);
130 130 static boolean_t is_valid_mac_addr(uint8_t *);
131 131 static void e1000g_unattach(dev_info_t *, struct e1000g *);
132 132 static int e1000g_get_bar_info(dev_info_t *, int, bar_info_t *);
133 133 #ifdef E1000G_DEBUG
134 134 static void e1000g_ioc_peek_reg(struct e1000g *, e1000g_peekpoke_t *);
135 135 static void e1000g_ioc_poke_reg(struct e1000g *, e1000g_peekpoke_t *);
136 136 static void e1000g_ioc_peek_mem(struct e1000g *, e1000g_peekpoke_t *);
137 137 static void e1000g_ioc_poke_mem(struct e1000g *, e1000g_peekpoke_t *);
138 138 static enum ioc_reply e1000g_pp_ioctl(struct e1000g *,
139 139 struct iocblk *, mblk_t *);
140 140 #endif
141 141 static enum ioc_reply e1000g_loopback_ioctl(struct e1000g *,
142 142 struct iocblk *, mblk_t *);
143 143 static boolean_t e1000g_check_loopback_support(struct e1000_hw *);
144 144 static boolean_t e1000g_set_loopback_mode(struct e1000g *, uint32_t);
145 145 static void e1000g_set_internal_loopback(struct e1000g *);
146 146 static void e1000g_set_external_loopback_1000(struct e1000g *);
147 147 static void e1000g_set_external_loopback_100(struct e1000g *);
148 148 static void e1000g_set_external_loopback_10(struct e1000g *);
149 149 static int e1000g_add_intrs(struct e1000g *);
150 150 static int e1000g_intr_add(struct e1000g *, int);
151 151 static int e1000g_rem_intrs(struct e1000g *);
152 152 static int e1000g_enable_intrs(struct e1000g *);
153 153 static int e1000g_disable_intrs(struct e1000g *);
154 154 static boolean_t e1000g_link_up(struct e1000g *);
155 155 #ifdef __sparc
156 156 static boolean_t e1000g_find_mac_address(struct e1000g *);
157 157 #endif
158 158 static void e1000g_get_phy_state(struct e1000g *);
159 159 static int e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
160 160 const void *impl_data);
161 161 static void e1000g_fm_init(struct e1000g *Adapter);
162 162 static void e1000g_fm_fini(struct e1000g *Adapter);
163 163 static void e1000g_param_sync(struct e1000g *);
164 164 static void e1000g_get_driver_control(struct e1000_hw *);
165 165 static void e1000g_release_driver_control(struct e1000_hw *);
166 166 static void e1000g_restore_promisc(struct e1000g *Adapter);
167 167
168 168 char *e1000g_priv_props[] = {
169 169 "_tx_bcopy_threshold",
170 170 "_tx_interrupt_enable",
171 171 "_tx_intr_delay",
172 172 "_tx_intr_abs_delay",
173 173 "_rx_bcopy_threshold",
174 174 "_max_num_rcv_packets",
175 175 "_rx_intr_delay",
176 176 "_rx_intr_abs_delay",
177 177 "_intr_throttling_rate",
178 178 "_intr_adaptive",
179 179 "_adv_pause_cap",
180 180 "_adv_asym_pause_cap",
181 181 NULL
182 182 };
183 183
184 184 static struct cb_ops cb_ws_ops = {
185 185 nulldev, /* cb_open */
186 186 nulldev, /* cb_close */
187 187 nodev, /* cb_strategy */
188 188 nodev, /* cb_print */
189 189 nodev, /* cb_dump */
190 190 nodev, /* cb_read */
191 191 nodev, /* cb_write */
192 192 nodev, /* cb_ioctl */
193 193 nodev, /* cb_devmap */
194 194 nodev, /* cb_mmap */
195 195 nodev, /* cb_segmap */
196 196 nochpoll, /* cb_chpoll */
197 197 ddi_prop_op, /* cb_prop_op */
198 198 NULL, /* cb_stream */
199 199 D_MP | D_HOTPLUG, /* cb_flag */
200 200 CB_REV, /* cb_rev */
201 201 nodev, /* cb_aread */
202 202 nodev /* cb_awrite */
203 203 };
204 204
205 205 static struct dev_ops ws_ops = {
206 206 DEVO_REV, /* devo_rev */
207 207 0, /* devo_refcnt */
208 208 NULL, /* devo_getinfo */
209 209 nulldev, /* devo_identify */
210 210 nulldev, /* devo_probe */
211 211 e1000g_attach, /* devo_attach */
212 212 e1000g_detach, /* devo_detach */
213 213 nodev, /* devo_reset */
214 214 &cb_ws_ops, /* devo_cb_ops */
215 215 NULL, /* devo_bus_ops */
216 216 ddi_power, /* devo_power */
217 217 e1000g_quiesce /* devo_quiesce */
218 218 };
219 219
220 220 static struct modldrv modldrv = {
221 221 &mod_driverops, /* Type of module. This one is a driver */
222 222 ident, /* Discription string */
223 223 &ws_ops, /* driver ops */
224 224 };
225 225
226 226 static struct modlinkage modlinkage = {
227 227 MODREV_1, &modldrv, NULL
228 228 };
229 229
230 230 /* Access attributes for register mapping */
231 231 static ddi_device_acc_attr_t e1000g_regs_acc_attr = {
232 232 DDI_DEVICE_ATTR_V1,
233 233 DDI_STRUCTURE_LE_ACC,
234 234 DDI_STRICTORDER_ACC,
235 235 DDI_FLAGERR_ACC
236 236 };
237 237
238 238 #define E1000G_M_CALLBACK_FLAGS \
239 239 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
240 240
241 241 static mac_callbacks_t e1000g_m_callbacks = {
242 242 E1000G_M_CALLBACK_FLAGS,
243 243 e1000g_m_stat,
244 244 e1000g_m_start,
245 245 e1000g_m_stop,
246 246 e1000g_m_promisc,
247 247 e1000g_m_multicst,
248 248 NULL,
249 249 e1000g_m_tx,
250 250 NULL,
251 251 e1000g_m_ioctl,
252 252 e1000g_m_getcapab,
253 253 NULL,
254 254 NULL,
255 255 e1000g_m_setprop,
256 256 e1000g_m_getprop,
257 257 e1000g_m_propinfo
258 258 };
259 259
260 260 /*
261 261 * Global variables
262 262 */
263 263 uint32_t e1000g_jumbo_mtu = MAXIMUM_MTU_9K;
264 264 uint32_t e1000g_mblks_pending = 0;
265 265 /*
266 266 * Workaround for Dynamic Reconfiguration support, for x86 platform only.
267 267 * Here we maintain a private dev_info list if e1000g_force_detach is
268 268 * enabled. If we force the driver to detach while there are still some
269 269 * rx buffers retained in the upper layer, we have to keep a copy of the
270 270 * dev_info. In some cases (Dynamic Reconfiguration), the dev_info data
271 271 * structure will be freed after the driver is detached. However when we
272 272 * finally free those rx buffers released by the upper layer, we need to
273 273 * refer to the dev_info to free the dma buffers. So we save a copy of
274 274 * the dev_info for this purpose. On x86 platform, we assume this copy
275 275 * of dev_info is always valid, but on SPARC platform, it could be invalid
276 276 * after the system board level DR operation. For this reason, the global
277 277 * variable e1000g_force_detach must be B_FALSE on SPARC platform.
278 278 */
279 279 #ifdef __sparc
280 280 boolean_t e1000g_force_detach = B_FALSE;
281 281 #else
282 282 boolean_t e1000g_force_detach = B_TRUE;
283 283 #endif
284 284 private_devi_list_t *e1000g_private_devi_list = NULL;
285 285
286 286 /*
287 287 * The mutex e1000g_rx_detach_lock is defined to protect the processing of
288 288 * the private dev_info list, and to serialize the processing of rx buffer
289 289 * freeing and rx buffer recycling.
290 290 */
291 291 kmutex_t e1000g_rx_detach_lock;
292 292 /*
293 293 * The rwlock e1000g_dma_type_lock is defined to protect the global flag
294 294 * e1000g_dma_type. For SPARC, the initial value of the flag is "USE_DVMA".
295 295 * If there are many e1000g instances, the system may run out of DVMA
296 296 * resources during the initialization of the instances, then the flag will
297 297 * be changed to "USE_DMA". Because different e1000g instances are initialized
298 298 * in parallel, we need to use this lock to protect the flag.
299 299 */
300 300 krwlock_t e1000g_dma_type_lock;
301 301
302 302 /*
303 303 * The 82546 chipset is a dual-port device, both the ports share one eeprom.
304 304 * Based on the information from Intel, the 82546 chipset has some hardware
305 305 * problem. When one port is being reset and the other port is trying to
306 306 * access the eeprom, it could cause system hang or panic. To workaround this
307 307 * hardware problem, we use a global mutex to prevent such operations from
308 308 * happening simultaneously on different instances. This workaround is applied
309 309 * to all the devices supported by this driver.
310 310 */
311 311 kmutex_t e1000g_nvm_lock;
312 312
313 313 /*
314 314 * Loadable module configuration entry points for the driver
315 315 */
316 316
317 317 /*
318 318 * _init - module initialization
319 319 */
320 320 int
321 321 _init(void)
322 322 {
323 323 int status;
324 324
325 325 mac_init_ops(&ws_ops, WSNAME);
326 326 status = mod_install(&modlinkage);
327 327 if (status != DDI_SUCCESS)
328 328 mac_fini_ops(&ws_ops);
329 329 else {
330 330 mutex_init(&e1000g_rx_detach_lock, NULL, MUTEX_DRIVER, NULL);
331 331 rw_init(&e1000g_dma_type_lock, NULL, RW_DRIVER, NULL);
332 332 mutex_init(&e1000g_nvm_lock, NULL, MUTEX_DRIVER, NULL);
333 333 }
334 334
335 335 return (status);
336 336 }
337 337
338 338 /*
339 339 * _fini - module finalization
340 340 */
341 341 int
342 342 _fini(void)
343 343 {
344 344 int status;
345 345
346 346 if (e1000g_mblks_pending != 0)
347 347 return (EBUSY);
348 348
349 349 status = mod_remove(&modlinkage);
350 350 if (status == DDI_SUCCESS) {
351 351 mac_fini_ops(&ws_ops);
352 352
353 353 if (e1000g_force_detach) {
354 354 private_devi_list_t *devi_node;
355 355
356 356 mutex_enter(&e1000g_rx_detach_lock);
357 357 while (e1000g_private_devi_list != NULL) {
358 358 devi_node = e1000g_private_devi_list;
359 359 e1000g_private_devi_list =
360 360 e1000g_private_devi_list->next;
361 361
362 362 kmem_free(devi_node->priv_dip,
363 363 sizeof (struct dev_info));
364 364 kmem_free(devi_node,
365 365 sizeof (private_devi_list_t));
366 366 }
367 367 mutex_exit(&e1000g_rx_detach_lock);
368 368 }
369 369
370 370 mutex_destroy(&e1000g_rx_detach_lock);
371 371 rw_destroy(&e1000g_dma_type_lock);
372 372 mutex_destroy(&e1000g_nvm_lock);
373 373 }
374 374
375 375 return (status);
376 376 }
377 377
378 378 /*
379 379 * _info - module information
380 380 */
381 381 int
382 382 _info(struct modinfo *modinfop)
383 383 {
384 384 return (mod_info(&modlinkage, modinfop));
385 385 }
386 386
387 387 /*
388 388 * e1000g_attach - driver attach
389 389 *
390 390 * This function is the device-specific initialization entry
391 391 * point. This entry point is required and must be written.
392 392 * The DDI_ATTACH command must be provided in the attach entry
393 393 * point. When attach() is called with cmd set to DDI_ATTACH,
394 394 * all normal kernel services (such as kmem_alloc(9F)) are
395 395 * available for use by the driver.
396 396 *
397 397 * The attach() function will be called once for each instance
398 398 * of the device on the system with cmd set to DDI_ATTACH.
399 399 * Until attach() succeeds, the only driver entry points which
400 400 * may be called are open(9E) and getinfo(9E).
401 401 */
402 402 static int
403 403 e1000g_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
404 404 {
405 405 struct e1000g *Adapter;
406 406 struct e1000_hw *hw;
407 407 struct e1000g_osdep *osdep;
408 408 int instance;
409 409
410 410 switch (cmd) {
411 411 default:
412 412 e1000g_log(NULL, CE_WARN,
413 413 "Unsupported command send to e1000g_attach... ");
414 414 return (DDI_FAILURE);
415 415
416 416 case DDI_RESUME:
417 417 return (e1000g_resume(devinfo));
418 418
419 419 case DDI_ATTACH:
420 420 break;
421 421 }
422 422
423 423 /*
424 424 * get device instance number
425 425 */
426 426 instance = ddi_get_instance(devinfo);
427 427
428 428 /*
429 429 * Allocate soft data structure
430 430 */
431 431 Adapter =
432 432 (struct e1000g *)kmem_zalloc(sizeof (*Adapter), KM_SLEEP);
433 433
434 434 Adapter->dip = devinfo;
435 435 Adapter->instance = instance;
436 436 Adapter->tx_ring->adapter = Adapter;
437 437 Adapter->rx_ring->adapter = Adapter;
438 438
439 439 hw = &Adapter->shared;
440 440 osdep = &Adapter->osdep;
441 441 hw->back = osdep;
442 442 osdep->adapter = Adapter;
443 443
444 444 ddi_set_driver_private(devinfo, (caddr_t)Adapter);
445 445
446 446 /*
447 447 * Initialize for fma support
448 448 */
449 449 (void) e1000g_get_prop(Adapter, "fm-capable",
450 450 0, 0x0f,
451 451 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
452 452 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE,
453 453 &Adapter->fm_capabilities);
454 454 e1000g_fm_init(Adapter);
455 455 Adapter->attach_progress |= ATTACH_PROGRESS_FMINIT;
456 456
457 457 /*
458 458 * PCI Configure
459 459 */
460 460 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
461 461 e1000g_log(Adapter, CE_WARN, "PCI configuration failed");
462 462 goto attach_fail;
463 463 }
464 464 Adapter->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
465 465
466 466 /*
467 467 * Setup hardware
468 468 */
469 469 if (e1000g_identify_hardware(Adapter) != DDI_SUCCESS) {
470 470 e1000g_log(Adapter, CE_WARN, "Identify hardware failed");
471 471 goto attach_fail;
472 472 }
473 473
474 474 /*
475 475 * Map in the device registers.
476 476 */
477 477 if (e1000g_regs_map(Adapter) != DDI_SUCCESS) {
478 478 e1000g_log(Adapter, CE_WARN, "Mapping registers failed");
479 479 goto attach_fail;
480 480 }
481 481 Adapter->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
482 482
483 483 /*
484 484 * Initialize driver parameters
485 485 */
486 486 if (e1000g_set_driver_params(Adapter) != DDI_SUCCESS) {
487 487 goto attach_fail;
488 488 }
489 489 Adapter->attach_progress |= ATTACH_PROGRESS_SETUP;
490 490
491 491 if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) {
492 492 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
493 493 goto attach_fail;
494 494 }
495 495
496 496 /*
497 497 * Disable ULP support
498 498 */
499 499 (void) e1000_disable_ulp_lpt_lp(hw, TRUE);
500 500
501 501 /*
502 502 * Initialize interrupts
503 503 */
504 504 if (e1000g_add_intrs(Adapter) != DDI_SUCCESS) {
505 505 e1000g_log(Adapter, CE_WARN, "Add interrupts failed");
506 506 goto attach_fail;
507 507 }
508 508 Adapter->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
509 509
510 510 /*
511 511 * Initialize mutex's for this device.
512 512 * Do this before enabling the interrupt handler and
513 513 * register the softint to avoid the condition where
514 514 * interrupt handler can try using uninitialized mutex
515 515 */
516 516 e1000g_init_locks(Adapter);
517 517 Adapter->attach_progress |= ATTACH_PROGRESS_LOCKS;
518 518
519 519 /*
520 520 * Initialize Driver Counters
521 521 */
522 522 if (e1000g_init_stats(Adapter) != DDI_SUCCESS) {
523 523 e1000g_log(Adapter, CE_WARN, "Init stats failed");
524 524 goto attach_fail;
525 525 }
526 526 Adapter->attach_progress |= ATTACH_PROGRESS_KSTATS;
527 527
528 528 /*
529 529 * Initialize chip hardware and software structures
530 530 */
531 531 rw_enter(&Adapter->chip_lock, RW_WRITER);
532 532 if (e1000g_init(Adapter) != DDI_SUCCESS) {
533 533 rw_exit(&Adapter->chip_lock);
534 534 e1000g_log(Adapter, CE_WARN, "Adapter initialization failed");
535 535 goto attach_fail;
536 536 }
537 537 rw_exit(&Adapter->chip_lock);
538 538 Adapter->attach_progress |= ATTACH_PROGRESS_INIT;
539 539
540 540 /*
541 541 * Register the driver to the MAC
542 542 */
543 543 if (e1000g_register_mac(Adapter) != DDI_SUCCESS) {
544 544 e1000g_log(Adapter, CE_WARN, "Register MAC failed");
545 545 goto attach_fail;
546 546 }
547 547 Adapter->attach_progress |= ATTACH_PROGRESS_MAC;
548 548
549 549 /*
550 550 * Now that mutex locks are initialized, and the chip is also
551 551 * initialized, enable interrupts.
552 552 */
553 553 if (e1000g_enable_intrs(Adapter) != DDI_SUCCESS) {
554 554 e1000g_log(Adapter, CE_WARN, "Enable DDI interrupts failed");
555 555 goto attach_fail;
556 556 }
557 557 Adapter->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
558 558
559 559 /*
560 560 * If e1000g_force_detach is enabled, in global private dip list,
561 561 * we will create a new entry, which maintains the priv_dip for DR
562 562 * supports after driver detached.
563 563 */
564 564 if (e1000g_force_detach) {
565 565 private_devi_list_t *devi_node;
566 566
567 567 Adapter->priv_dip =
568 568 kmem_zalloc(sizeof (struct dev_info), KM_SLEEP);
569 569 bcopy(DEVI(devinfo), DEVI(Adapter->priv_dip),
570 570 sizeof (struct dev_info));
571 571
572 572 devi_node =
573 573 kmem_zalloc(sizeof (private_devi_list_t), KM_SLEEP);
574 574
575 575 mutex_enter(&e1000g_rx_detach_lock);
576 576 devi_node->priv_dip = Adapter->priv_dip;
577 577 devi_node->flag = E1000G_PRIV_DEVI_ATTACH;
578 578 devi_node->pending_rx_count = 0;
579 579
580 580 Adapter->priv_devi_node = devi_node;
581 581
582 582 if (e1000g_private_devi_list == NULL) {
583 583 devi_node->prev = NULL;
584 584 devi_node->next = NULL;
585 585 e1000g_private_devi_list = devi_node;
586 586 } else {
587 587 devi_node->prev = NULL;
588 588 devi_node->next = e1000g_private_devi_list;
589 589 e1000g_private_devi_list->prev = devi_node;
590 590 e1000g_private_devi_list = devi_node;
591 591 }
592 592 mutex_exit(&e1000g_rx_detach_lock);
593 593 }
594 594
595 595 Adapter->e1000g_state = E1000G_INITIALIZED;
596 596 return (DDI_SUCCESS);
597 597
598 598 attach_fail:
599 599 e1000g_unattach(devinfo, Adapter);
600 600 return (DDI_FAILURE);
601 601 }
602 602
603 603 static int
604 604 e1000g_register_mac(struct e1000g *Adapter)
605 605 {
606 606 struct e1000_hw *hw = &Adapter->shared;
607 607 mac_register_t *mac;
608 608 int err;
609 609
610 610 if ((mac = mac_alloc(MAC_VERSION)) == NULL)
611 611 return (DDI_FAILURE);
612 612
613 613 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
614 614 mac->m_driver = Adapter;
615 615 mac->m_dip = Adapter->dip;
616 616 mac->m_src_addr = hw->mac.addr;
617 617 mac->m_callbacks = &e1000g_m_callbacks;
618 618 mac->m_min_sdu = 0;
619 619 mac->m_max_sdu = Adapter->default_mtu;
620 620 mac->m_margin = VLAN_TAGSZ;
621 621 mac->m_priv_props = e1000g_priv_props;
622 622 mac->m_v12n = MAC_VIRT_LEVEL1;
623 623
624 624 err = mac_register(mac, &Adapter->mh);
625 625 mac_free(mac);
626 626
627 627 return (err == 0 ? DDI_SUCCESS : DDI_FAILURE);
628 628 }
629 629
630 630 static int
631 631 e1000g_identify_hardware(struct e1000g *Adapter)
632 632 {
633 633 struct e1000_hw *hw = &Adapter->shared;
634 634 struct e1000g_osdep *osdep = &Adapter->osdep;
635 635
636 636 /* Get the device id */
637 637 hw->vendor_id =
638 638 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
639 639 hw->device_id =
640 640 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
641 641 hw->revision_id =
642 642 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
643 643 hw->subsystem_device_id =
644 644 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
645 645 hw->subsystem_vendor_id =
646 646 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
647 647
648 648 if (e1000_set_mac_type(hw) != E1000_SUCCESS) {
649 649 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
650 650 "MAC type could not be set properly.");
651 651 return (DDI_FAILURE);
652 652 }
653 653
654 654 return (DDI_SUCCESS);
655 655 }
656 656
657 657 static int
658 658 e1000g_regs_map(struct e1000g *Adapter)
659 659 {
660 660 dev_info_t *devinfo = Adapter->dip;
661 661 struct e1000_hw *hw = &Adapter->shared;
662 662 struct e1000g_osdep *osdep = &Adapter->osdep;
663 663 off_t mem_size;
664 664 bar_info_t bar_info;
665 665 int offset, rnumber;
666 666
667 667 rnumber = ADAPTER_REG_SET;
668 668 /* Get size of adapter register memory */
669 669 if (ddi_dev_regsize(devinfo, rnumber, &mem_size) !=
670 670 DDI_SUCCESS) {
671 671 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
672 672 "ddi_dev_regsize for registers failed");
673 673 return (DDI_FAILURE);
674 674 }
675 675
676 676 /* Map adapter register memory */
677 677 if ((ddi_regs_map_setup(devinfo, rnumber,
678 678 (caddr_t *)&hw->hw_addr, 0, mem_size, &e1000g_regs_acc_attr,
679 679 &osdep->reg_handle)) != DDI_SUCCESS) {
680 680 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
681 681 "ddi_regs_map_setup for registers failed");
682 682 goto regs_map_fail;
683 683 }
684 684
685 685 /* ICH needs to map flash memory */
686 686 switch (hw->mac.type) {
687 687 case e1000_ich8lan:
688 688 case e1000_ich9lan:
689 689 case e1000_ich10lan:
690 690 case e1000_pchlan:
691 691 case e1000_pch2lan:
692 692 case e1000_pch_lpt:
693 693 rnumber = ICH_FLASH_REG_SET;
694 694
695 695 /* get flash size */
696 696 if (ddi_dev_regsize(devinfo, rnumber,
697 697 &mem_size) != DDI_SUCCESS) {
698 698 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
699 699 "ddi_dev_regsize for ICH flash failed");
700 700 goto regs_map_fail;
701 701 }
702 702
703 703 /* map flash in */
|
↓ open down ↓ |
665 lines elided |
↑ open up ↑ |
704 704 if (ddi_regs_map_setup(devinfo, rnumber,
705 705 (caddr_t *)&hw->flash_address, 0,
706 706 mem_size, &e1000g_regs_acc_attr,
707 707 &osdep->ich_flash_handle) != DDI_SUCCESS) {
708 708 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
709 709 "ddi_regs_map_setup for ICH flash failed");
710 710 goto regs_map_fail;
711 711 }
712 712 break;
713 713 case e1000_pch_spt:
714 + case e1000_pch_cnp:
714 715 /*
715 716 * On the SPT, the device flash is actually in BAR0, not a
716 717 * separate BAR. Therefore we end up setting the
717 718 * ich_flash_handle to be the same as the register handle.
718 719 * We mark the same to reduce the confusion in the other
719 720 * functions and macros. Though this does make the set up and
720 721 * tear-down path slightly more complicated.
721 722 */
722 723 osdep->ich_flash_handle = osdep->reg_handle;
723 724 hw->flash_address = hw->hw_addr;
724 725 default:
725 726 break;
726 727 }
727 728
728 729 /* map io space */
729 730 switch (hw->mac.type) {
730 731 case e1000_82544:
731 732 case e1000_82540:
732 733 case e1000_82545:
733 734 case e1000_82546:
734 735 case e1000_82541:
735 736 case e1000_82541_rev_2:
736 737 /* find the IO bar */
737 738 rnumber = -1;
738 739 for (offset = PCI_CONF_BASE1;
739 740 offset <= PCI_CONF_BASE5; offset += 4) {
740 741 if (e1000g_get_bar_info(devinfo, offset, &bar_info)
741 742 != DDI_SUCCESS)
742 743 continue;
743 744 if (bar_info.type == E1000G_BAR_IO) {
744 745 rnumber = bar_info.rnumber;
745 746 break;
746 747 }
747 748 }
748 749
749 750 if (rnumber < 0) {
750 751 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
751 752 "No io space is found");
752 753 goto regs_map_fail;
753 754 }
754 755
755 756 /* get io space size */
756 757 if (ddi_dev_regsize(devinfo, rnumber,
757 758 &mem_size) != DDI_SUCCESS) {
758 759 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
759 760 "ddi_dev_regsize for io space failed");
760 761 goto regs_map_fail;
761 762 }
762 763
763 764 /* map io space */
764 765 if ((ddi_regs_map_setup(devinfo, rnumber,
765 766 (caddr_t *)&hw->io_base, 0, mem_size,
766 767 &e1000g_regs_acc_attr,
767 768 &osdep->io_reg_handle)) != DDI_SUCCESS) {
768 769 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
769 770 "ddi_regs_map_setup for io space failed");
770 771 goto regs_map_fail;
771 772 }
772 773 break;
|
↓ open down ↓ |
49 lines elided |
↑ open up ↑ |
773 774 default:
774 775 hw->io_base = 0;
775 776 break;
776 777 }
777 778
778 779 return (DDI_SUCCESS);
779 780
780 781 regs_map_fail:
781 782 if (osdep->reg_handle != NULL)
782 783 ddi_regs_map_free(&osdep->reg_handle);
783 - if (osdep->ich_flash_handle != NULL && hw->mac.type != e1000_pch_spt)
784 + if (osdep->ich_flash_handle != NULL && hw->mac.type < e1000_pch_spt)
784 785 ddi_regs_map_free(&osdep->ich_flash_handle);
785 786 return (DDI_FAILURE);
786 787 }
787 788
788 789 static int
789 790 e1000g_set_driver_params(struct e1000g *Adapter)
790 791 {
791 792 struct e1000_hw *hw;
792 793
793 794 hw = &Adapter->shared;
794 795
795 796 /* Set MAC type and initialize hardware functions */
796 797 if (e1000_setup_init_funcs(hw, B_TRUE) != E1000_SUCCESS) {
797 798 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
798 799 "Could not setup hardware functions");
799 800 return (DDI_FAILURE);
800 801 }
801 802
802 803 /* Get bus information */
803 804 if (e1000_get_bus_info(hw) != E1000_SUCCESS) {
804 805 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
805 806 "Could not get bus information");
806 807 return (DDI_FAILURE);
807 808 }
808 809
809 810 e1000_read_pci_cfg(hw, PCI_COMMAND_REGISTER, &hw->bus.pci_cmd_word);
810 811
811 812 hw->mac.autoneg_failed = B_TRUE;
812 813
813 814 /* Set the autoneg_wait_to_complete flag to B_FALSE */
814 815 hw->phy.autoneg_wait_to_complete = B_FALSE;
815 816
816 817 /* Adaptive IFS related changes */
817 818 hw->mac.adaptive_ifs = B_TRUE;
818 819
819 820 /* Enable phy init script for IGP phy of 82541/82547 */
820 821 if ((hw->mac.type == e1000_82547) ||
821 822 (hw->mac.type == e1000_82541) ||
822 823 (hw->mac.type == e1000_82547_rev_2) ||
823 824 (hw->mac.type == e1000_82541_rev_2))
824 825 e1000_init_script_state_82541(hw, B_TRUE);
825 826
826 827 /* Enable the TTL workaround for 82541/82547 */
827 828 e1000_set_ttl_workaround_state_82541(hw, B_TRUE);
828 829
829 830 #ifdef __sparc
830 831 Adapter->strip_crc = B_TRUE;
831 832 #else
832 833 Adapter->strip_crc = B_FALSE;
833 834 #endif
834 835
835 836 /* setup the maximum MTU size of the chip */
836 837 e1000g_setup_max_mtu(Adapter);
837 838
838 839 /* Get speed/duplex settings in conf file */
839 840 hw->mac.forced_speed_duplex = ADVERTISE_100_FULL;
840 841 hw->phy.autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
841 842 e1000g_force_speed_duplex(Adapter);
842 843
843 844 /* Get Jumbo Frames settings in conf file */
844 845 e1000g_get_max_frame_size(Adapter);
845 846
846 847 /* Get conf file properties */
847 848 e1000g_get_conf(Adapter);
848 849
849 850 /* enforce PCH limits */
850 851 e1000g_pch_limits(Adapter);
851 852
852 853 /* Set Rx/Tx buffer size */
853 854 e1000g_set_bufsize(Adapter);
854 855
855 856 /* Master Latency Timer */
856 857 Adapter->master_latency_timer = DEFAULT_MASTER_LATENCY_TIMER;
857 858
858 859 /* copper options */
859 860 if (hw->phy.media_type == e1000_media_type_copper) {
860 861 hw->phy.mdix = 0; /* AUTO_ALL_MODES */
861 862 hw->phy.disable_polarity_correction = B_FALSE;
862 863 hw->phy.ms_type = e1000_ms_hw_default; /* E1000_MASTER_SLAVE */
863 864 }
864 865
865 866 /* The initial link state should be "unknown" */
866 867 Adapter->link_state = LINK_STATE_UNKNOWN;
867 868
868 869 /* Initialize rx parameters */
869 870 Adapter->rx_intr_delay = DEFAULT_RX_INTR_DELAY;
870 871 Adapter->rx_intr_abs_delay = DEFAULT_RX_INTR_ABS_DELAY;
871 872
872 873 /* Initialize tx parameters */
873 874 Adapter->tx_intr_enable = DEFAULT_TX_INTR_ENABLE;
874 875 Adapter->tx_bcopy_thresh = DEFAULT_TX_BCOPY_THRESHOLD;
875 876 Adapter->tx_intr_delay = DEFAULT_TX_INTR_DELAY;
876 877 Adapter->tx_intr_abs_delay = DEFAULT_TX_INTR_ABS_DELAY;
877 878
878 879 /* Initialize rx parameters */
879 880 Adapter->rx_bcopy_thresh = DEFAULT_RX_BCOPY_THRESHOLD;
880 881
881 882 return (DDI_SUCCESS);
882 883 }
883 884
884 885 static void
885 886 e1000g_setup_max_mtu(struct e1000g *Adapter)
886 887 {
887 888 struct e1000_mac_info *mac = &Adapter->shared.mac;
888 889 struct e1000_phy_info *phy = &Adapter->shared.phy;
889 890
890 891 switch (mac->type) {
891 892 /* types that do not support jumbo frames */
892 893 case e1000_ich8lan:
893 894 case e1000_82573:
894 895 case e1000_82583:
895 896 Adapter->max_mtu = ETHERMTU;
896 897 break;
897 898 /* ich9 supports jumbo frames except on one phy type */
898 899 case e1000_ich9lan:
899 900 if (phy->type == e1000_phy_ife)
900 901 Adapter->max_mtu = ETHERMTU;
901 902 else
|
↓ open down ↓ |
108 lines elided |
↑ open up ↑ |
902 903 Adapter->max_mtu = MAXIMUM_MTU_9K;
903 904 break;
904 905 /* pch can do jumbo frames up to 4K */
905 906 case e1000_pchlan:
906 907 Adapter->max_mtu = MAXIMUM_MTU_4K;
907 908 break;
908 909 /* pch2 can do jumbo frames up to 9K */
909 910 case e1000_pch2lan:
910 911 case e1000_pch_lpt:
911 912 case e1000_pch_spt:
913 + case e1000_pch_cnp:
912 914 Adapter->max_mtu = MAXIMUM_MTU_9K;
913 915 break;
914 916 /* types with a special limit */
915 917 case e1000_82571:
916 918 case e1000_82572:
917 919 case e1000_82574:
918 920 case e1000_80003es2lan:
919 921 case e1000_ich10lan:
920 922 if (e1000g_jumbo_mtu >= ETHERMTU &&
921 923 e1000g_jumbo_mtu <= MAXIMUM_MTU_9K) {
922 924 Adapter->max_mtu = e1000g_jumbo_mtu;
923 925 } else {
924 926 Adapter->max_mtu = MAXIMUM_MTU_9K;
925 927 }
926 928 break;
927 929 /* default limit is 16K */
928 930 default:
929 931 Adapter->max_mtu = FRAME_SIZE_UPTO_16K -
930 932 sizeof (struct ether_vlan_header) - ETHERFCSL;
931 933 break;
932 934 }
933 935 }
934 936
935 937 static void
936 938 e1000g_set_bufsize(struct e1000g *Adapter)
937 939 {
938 940 struct e1000_mac_info *mac = &Adapter->shared.mac;
939 941 uint64_t rx_size;
940 942 uint64_t tx_size;
941 943
942 944 dev_info_t *devinfo = Adapter->dip;
943 945 #ifdef __sparc
944 946 ulong_t iommu_pagesize;
945 947 #endif
946 948 /* Get the system page size */
947 949 Adapter->sys_page_sz = ddi_ptob(devinfo, (ulong_t)1);
948 950
949 951 #ifdef __sparc
950 952 iommu_pagesize = dvma_pagesize(devinfo);
951 953 if (iommu_pagesize != 0) {
952 954 if (Adapter->sys_page_sz == iommu_pagesize) {
953 955 if (iommu_pagesize > 0x4000)
954 956 Adapter->sys_page_sz = 0x4000;
955 957 } else {
956 958 if (Adapter->sys_page_sz > iommu_pagesize)
957 959 Adapter->sys_page_sz = iommu_pagesize;
958 960 }
959 961 }
960 962 if (Adapter->lso_enable) {
961 963 Adapter->dvma_page_num = E1000_LSO_MAXLEN /
962 964 Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM;
963 965 } else {
964 966 Adapter->dvma_page_num = Adapter->max_frame_size /
965 967 Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM;
966 968 }
967 969 ASSERT(Adapter->dvma_page_num >= E1000G_DEFAULT_DVMA_PAGE_NUM);
968 970 #endif
969 971
970 972 Adapter->min_frame_size = ETHERMIN + ETHERFCSL;
971 973
972 974 if (Adapter->mem_workaround_82546 &&
973 975 ((mac->type == e1000_82545) ||
974 976 (mac->type == e1000_82546) ||
975 977 (mac->type == e1000_82546_rev_3))) {
976 978 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K;
977 979 } else {
978 980 rx_size = Adapter->max_frame_size;
979 981 if ((rx_size > FRAME_SIZE_UPTO_2K) &&
980 982 (rx_size <= FRAME_SIZE_UPTO_4K))
981 983 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_4K;
982 984 else if ((rx_size > FRAME_SIZE_UPTO_4K) &&
983 985 (rx_size <= FRAME_SIZE_UPTO_8K))
984 986 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_8K;
985 987 else if ((rx_size > FRAME_SIZE_UPTO_8K) &&
986 988 (rx_size <= FRAME_SIZE_UPTO_16K))
987 989 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_16K;
988 990 else
989 991 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K;
990 992 }
991 993 Adapter->rx_buffer_size += E1000G_IPALIGNROOM;
992 994
993 995 tx_size = Adapter->max_frame_size;
994 996 if ((tx_size > FRAME_SIZE_UPTO_2K) && (tx_size <= FRAME_SIZE_UPTO_4K))
995 997 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_4K;
996 998 else if ((tx_size > FRAME_SIZE_UPTO_4K) &&
997 999 (tx_size <= FRAME_SIZE_UPTO_8K))
998 1000 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_8K;
999 1001 else if ((tx_size > FRAME_SIZE_UPTO_8K) &&
1000 1002 (tx_size <= FRAME_SIZE_UPTO_16K))
1001 1003 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_16K;
1002 1004 else
1003 1005 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_2K;
1004 1006
1005 1007 /*
1006 1008 * For Wiseman adapters we have an requirement of having receive
1007 1009 * buffers aligned at 256 byte boundary. Since Livengood does not
1008 1010 * require this and forcing it for all hardwares will have
1009 1011 * performance implications, I am making it applicable only for
1010 1012 * Wiseman and for Jumbo frames enabled mode as rest of the time,
1011 1013 * it is okay to have normal frames...but it does involve a
1012 1014 * potential risk where we may loose data if buffer is not
1013 1015 * aligned...so all wiseman boards to have 256 byte aligned
1014 1016 * buffers
1015 1017 */
1016 1018 if (mac->type < e1000_82543)
1017 1019 Adapter->rx_buf_align = RECEIVE_BUFFER_ALIGN_SIZE;
1018 1020 else
1019 1021 Adapter->rx_buf_align = 1;
1020 1022 }
1021 1023
1022 1024 /*
1023 1025 * e1000g_detach - driver detach
1024 1026 *
1025 1027 * The detach() function is the complement of the attach routine.
1026 1028 * If cmd is set to DDI_DETACH, detach() is used to remove the
1027 1029 * state associated with a given instance of a device node
1028 1030 * prior to the removal of that instance from the system.
1029 1031 *
1030 1032 * The detach() function will be called once for each instance
1031 1033 * of the device for which there has been a successful attach()
1032 1034 * once there are no longer any opens on the device.
1033 1035 *
1034 1036 * Interrupts routine are disabled, All memory allocated by this
1035 1037 * driver are freed.
1036 1038 */
1037 1039 static int
1038 1040 e1000g_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
1039 1041 {
1040 1042 struct e1000g *Adapter;
1041 1043 boolean_t rx_drain;
1042 1044
1043 1045 switch (cmd) {
1044 1046 default:
1045 1047 return (DDI_FAILURE);
1046 1048
1047 1049 case DDI_SUSPEND:
1048 1050 return (e1000g_suspend(devinfo));
1049 1051
1050 1052 case DDI_DETACH:
1051 1053 break;
1052 1054 }
1053 1055
1054 1056 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
1055 1057 if (Adapter == NULL)
1056 1058 return (DDI_FAILURE);
1057 1059
1058 1060 rx_drain = e1000g_rx_drain(Adapter);
1059 1061 if (!rx_drain && !e1000g_force_detach)
1060 1062 return (DDI_FAILURE);
1061 1063
1062 1064 if (mac_unregister(Adapter->mh) != 0) {
1063 1065 e1000g_log(Adapter, CE_WARN, "Unregister MAC failed");
1064 1066 return (DDI_FAILURE);
1065 1067 }
1066 1068 Adapter->attach_progress &= ~ATTACH_PROGRESS_MAC;
1067 1069
1068 1070 ASSERT(!(Adapter->e1000g_state & E1000G_STARTED));
1069 1071
1070 1072 if (!e1000g_force_detach && !rx_drain)
1071 1073 return (DDI_FAILURE);
1072 1074
1073 1075 e1000g_unattach(devinfo, Adapter);
1074 1076
1075 1077 return (DDI_SUCCESS);
1076 1078 }
1077 1079
1078 1080 /*
1079 1081 * e1000g_free_priv_devi_node - free a priv_dip entry for driver instance
1080 1082 */
1081 1083 void
1082 1084 e1000g_free_priv_devi_node(private_devi_list_t *devi_node)
1083 1085 {
1084 1086 ASSERT(e1000g_private_devi_list != NULL);
1085 1087 ASSERT(devi_node != NULL);
1086 1088
1087 1089 if (devi_node->prev != NULL)
1088 1090 devi_node->prev->next = devi_node->next;
1089 1091 if (devi_node->next != NULL)
1090 1092 devi_node->next->prev = devi_node->prev;
1091 1093 if (devi_node == e1000g_private_devi_list)
1092 1094 e1000g_private_devi_list = devi_node->next;
1093 1095
1094 1096 kmem_free(devi_node->priv_dip,
1095 1097 sizeof (struct dev_info));
1096 1098 kmem_free(devi_node,
1097 1099 sizeof (private_devi_list_t));
1098 1100 }
1099 1101
1100 1102 static void
1101 1103 e1000g_unattach(dev_info_t *devinfo, struct e1000g *Adapter)
1102 1104 {
1103 1105 private_devi_list_t *devi_node;
1104 1106 int result;
1105 1107
1106 1108 if (Adapter->e1000g_blink != NULL) {
1107 1109 ddi_periodic_delete(Adapter->e1000g_blink);
1108 1110 Adapter->e1000g_blink = NULL;
1109 1111 }
1110 1112
1111 1113 if (Adapter->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
1112 1114 (void) e1000g_disable_intrs(Adapter);
1113 1115 }
1114 1116
1115 1117 if (Adapter->attach_progress & ATTACH_PROGRESS_MAC) {
1116 1118 (void) mac_unregister(Adapter->mh);
1117 1119 }
1118 1120
1119 1121 if (Adapter->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
1120 1122 (void) e1000g_rem_intrs(Adapter);
1121 1123 }
1122 1124
1123 1125 if (Adapter->attach_progress & ATTACH_PROGRESS_SETUP) {
1124 1126 (void) ddi_prop_remove_all(devinfo);
1125 1127 }
1126 1128
1127 1129 if (Adapter->attach_progress & ATTACH_PROGRESS_KSTATS) {
1128 1130 kstat_delete((kstat_t *)Adapter->e1000g_ksp);
1129 1131 }
1130 1132
1131 1133 if (Adapter->attach_progress & ATTACH_PROGRESS_INIT) {
1132 1134 stop_link_timer(Adapter);
1133 1135
1134 1136 mutex_enter(&e1000g_nvm_lock);
1135 1137 result = e1000_reset_hw(&Adapter->shared);
1136 1138 mutex_exit(&e1000g_nvm_lock);
1137 1139
1138 1140 if (result != E1000_SUCCESS) {
1139 1141 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
|
↓ open down ↓ |
218 lines elided |
↑ open up ↑ |
1140 1142 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1141 1143 }
1142 1144 }
1143 1145
1144 1146 e1000g_release_multicast(Adapter);
1145 1147
1146 1148 if (Adapter->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
1147 1149 if (Adapter->osdep.reg_handle != NULL)
1148 1150 ddi_regs_map_free(&Adapter->osdep.reg_handle);
1149 1151 if (Adapter->osdep.ich_flash_handle != NULL &&
1150 - Adapter->shared.mac.type != e1000_pch_spt)
1152 + Adapter->shared.mac.type < e1000_pch_spt)
1151 1153 ddi_regs_map_free(&Adapter->osdep.ich_flash_handle);
1152 1154 if (Adapter->osdep.io_reg_handle != NULL)
1153 1155 ddi_regs_map_free(&Adapter->osdep.io_reg_handle);
1154 1156 }
1155 1157
1156 1158 if (Adapter->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
1157 1159 if (Adapter->osdep.cfg_handle != NULL)
1158 1160 pci_config_teardown(&Adapter->osdep.cfg_handle);
1159 1161 }
1160 1162
1161 1163 if (Adapter->attach_progress & ATTACH_PROGRESS_LOCKS) {
1162 1164 e1000g_destroy_locks(Adapter);
1163 1165 }
1164 1166
1165 1167 if (Adapter->attach_progress & ATTACH_PROGRESS_FMINIT) {
1166 1168 e1000g_fm_fini(Adapter);
1167 1169 }
1168 1170
1169 1171 mutex_enter(&e1000g_rx_detach_lock);
1170 1172 if (e1000g_force_detach && (Adapter->priv_devi_node != NULL)) {
1171 1173 devi_node = Adapter->priv_devi_node;
1172 1174 devi_node->flag |= E1000G_PRIV_DEVI_DETACH;
1173 1175
1174 1176 if (devi_node->pending_rx_count == 0) {
1175 1177 e1000g_free_priv_devi_node(devi_node);
1176 1178 }
1177 1179 }
1178 1180 mutex_exit(&e1000g_rx_detach_lock);
1179 1181
1180 1182 kmem_free((caddr_t)Adapter, sizeof (struct e1000g));
1181 1183
1182 1184 /*
1183 1185 * Another hotplug spec requirement,
1184 1186 * run ddi_set_driver_private(devinfo, null);
1185 1187 */
1186 1188 ddi_set_driver_private(devinfo, NULL);
1187 1189 }
1188 1190
1189 1191 /*
1190 1192 * Get the BAR type and rnumber for a given PCI BAR offset
1191 1193 */
1192 1194 static int
1193 1195 e1000g_get_bar_info(dev_info_t *dip, int bar_offset, bar_info_t *bar_info)
1194 1196 {
1195 1197 pci_regspec_t *regs;
1196 1198 uint_t regs_length;
1197 1199 int type, rnumber, rcount;
1198 1200
1199 1201 ASSERT((bar_offset >= PCI_CONF_BASE0) &&
1200 1202 (bar_offset <= PCI_CONF_BASE5));
1201 1203
1202 1204 /*
1203 1205 * Get the DDI "reg" property
1204 1206 */
1205 1207 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
1206 1208 DDI_PROP_DONTPASS, "reg", (int **)®s,
1207 1209 ®s_length) != DDI_PROP_SUCCESS) {
1208 1210 return (DDI_FAILURE);
1209 1211 }
1210 1212
1211 1213 rcount = regs_length * sizeof (int) / sizeof (pci_regspec_t);
1212 1214 /*
1213 1215 * Check the BAR offset
1214 1216 */
1215 1217 for (rnumber = 0; rnumber < rcount; ++rnumber) {
1216 1218 if (PCI_REG_REG_G(regs[rnumber].pci_phys_hi) == bar_offset) {
1217 1219 type = regs[rnumber].pci_phys_hi & PCI_ADDR_MASK;
1218 1220 break;
1219 1221 }
1220 1222 }
1221 1223
1222 1224 ddi_prop_free(regs);
1223 1225
1224 1226 if (rnumber >= rcount)
1225 1227 return (DDI_FAILURE);
1226 1228
1227 1229 switch (type) {
1228 1230 case PCI_ADDR_CONFIG:
1229 1231 bar_info->type = E1000G_BAR_CONFIG;
1230 1232 break;
1231 1233 case PCI_ADDR_IO:
1232 1234 bar_info->type = E1000G_BAR_IO;
1233 1235 break;
1234 1236 case PCI_ADDR_MEM32:
1235 1237 bar_info->type = E1000G_BAR_MEM32;
1236 1238 break;
1237 1239 case PCI_ADDR_MEM64:
1238 1240 bar_info->type = E1000G_BAR_MEM64;
1239 1241 break;
1240 1242 default:
1241 1243 return (DDI_FAILURE);
1242 1244 }
1243 1245 bar_info->rnumber = rnumber;
1244 1246 return (DDI_SUCCESS);
1245 1247 }
1246 1248
1247 1249 static void
1248 1250 e1000g_init_locks(struct e1000g *Adapter)
1249 1251 {
1250 1252 e1000g_tx_ring_t *tx_ring;
1251 1253 e1000g_rx_ring_t *rx_ring;
1252 1254
1253 1255 rw_init(&Adapter->chip_lock, NULL,
1254 1256 RW_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1255 1257 mutex_init(&Adapter->link_lock, NULL,
1256 1258 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1257 1259 mutex_init(&Adapter->watchdog_lock, NULL,
1258 1260 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1259 1261
1260 1262 tx_ring = Adapter->tx_ring;
1261 1263
1262 1264 mutex_init(&tx_ring->tx_lock, NULL,
1263 1265 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1264 1266 mutex_init(&tx_ring->usedlist_lock, NULL,
1265 1267 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1266 1268 mutex_init(&tx_ring->freelist_lock, NULL,
1267 1269 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1268 1270
1269 1271 rx_ring = Adapter->rx_ring;
1270 1272
1271 1273 mutex_init(&rx_ring->rx_lock, NULL,
1272 1274 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1273 1275
1274 1276 mutex_init(&Adapter->e1000g_led_lock, NULL,
1275 1277 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1276 1278 }
1277 1279
1278 1280 static void
1279 1281 e1000g_destroy_locks(struct e1000g *Adapter)
1280 1282 {
1281 1283 e1000g_tx_ring_t *tx_ring;
1282 1284 e1000g_rx_ring_t *rx_ring;
1283 1285
1284 1286 mutex_destroy(&Adapter->e1000g_led_lock);
1285 1287
1286 1288 tx_ring = Adapter->tx_ring;
1287 1289 mutex_destroy(&tx_ring->tx_lock);
1288 1290 mutex_destroy(&tx_ring->usedlist_lock);
1289 1291 mutex_destroy(&tx_ring->freelist_lock);
1290 1292
1291 1293 rx_ring = Adapter->rx_ring;
1292 1294 mutex_destroy(&rx_ring->rx_lock);
1293 1295
1294 1296 mutex_destroy(&Adapter->link_lock);
1295 1297 mutex_destroy(&Adapter->watchdog_lock);
1296 1298 rw_destroy(&Adapter->chip_lock);
1297 1299
1298 1300 /* destory mutex initialized in shared code */
1299 1301 e1000_destroy_hw_mutex(&Adapter->shared);
1300 1302 }
1301 1303
1302 1304 static int
1303 1305 e1000g_resume(dev_info_t *devinfo)
1304 1306 {
1305 1307 struct e1000g *Adapter;
1306 1308
1307 1309 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
1308 1310 if (Adapter == NULL)
1309 1311 e1000g_log(Adapter, CE_PANIC,
1310 1312 "Instance pointer is null\n");
1311 1313
1312 1314 if (Adapter->dip != devinfo)
1313 1315 e1000g_log(Adapter, CE_PANIC,
1314 1316 "Devinfo is not the same as saved devinfo\n");
1315 1317
1316 1318 rw_enter(&Adapter->chip_lock, RW_WRITER);
1317 1319
1318 1320 if (Adapter->e1000g_state & E1000G_STARTED) {
1319 1321 if (e1000g_start(Adapter, B_FALSE) != DDI_SUCCESS) {
1320 1322 rw_exit(&Adapter->chip_lock);
1321 1323 /*
1322 1324 * We note the failure, but return success, as the
1323 1325 * system is still usable without this controller.
1324 1326 */
1325 1327 e1000g_log(Adapter, CE_WARN,
1326 1328 "e1000g_resume: failed to restart controller\n");
1327 1329 return (DDI_SUCCESS);
1328 1330 }
1329 1331 /* Enable and start the watchdog timer */
1330 1332 enable_watchdog_timer(Adapter);
1331 1333 }
1332 1334
1333 1335 Adapter->e1000g_state &= ~E1000G_SUSPENDED;
1334 1336
1335 1337 rw_exit(&Adapter->chip_lock);
1336 1338
1337 1339 return (DDI_SUCCESS);
1338 1340 }
1339 1341
1340 1342 static int
1341 1343 e1000g_suspend(dev_info_t *devinfo)
1342 1344 {
1343 1345 struct e1000g *Adapter;
1344 1346
1345 1347 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
1346 1348 if (Adapter == NULL)
1347 1349 return (DDI_FAILURE);
1348 1350
1349 1351 rw_enter(&Adapter->chip_lock, RW_WRITER);
1350 1352
1351 1353 Adapter->e1000g_state |= E1000G_SUSPENDED;
1352 1354
1353 1355 /* if the port isn't plumbed, we can simply return */
1354 1356 if (!(Adapter->e1000g_state & E1000G_STARTED)) {
1355 1357 rw_exit(&Adapter->chip_lock);
1356 1358 return (DDI_SUCCESS);
1357 1359 }
1358 1360
1359 1361 e1000g_stop(Adapter, B_FALSE);
1360 1362
1361 1363 rw_exit(&Adapter->chip_lock);
1362 1364
1363 1365 /* Disable and stop all the timers */
1364 1366 disable_watchdog_timer(Adapter);
1365 1367 stop_link_timer(Adapter);
1366 1368 stop_82547_timer(Adapter->tx_ring);
1367 1369
1368 1370 return (DDI_SUCCESS);
1369 1371 }
1370 1372
1371 1373 static int
1372 1374 e1000g_init(struct e1000g *Adapter)
1373 1375 {
1374 1376 uint32_t pba;
1375 1377 uint32_t high_water;
1376 1378 struct e1000_hw *hw;
1377 1379 clock_t link_timeout;
1378 1380 int result;
1379 1381
1380 1382 hw = &Adapter->shared;
1381 1383
1382 1384 /*
1383 1385 * reset to put the hardware in a known state
1384 1386 * before we try to do anything with the eeprom
1385 1387 */
1386 1388 mutex_enter(&e1000g_nvm_lock);
1387 1389 result = e1000_reset_hw(hw);
1388 1390 mutex_exit(&e1000g_nvm_lock);
1389 1391
1390 1392 if (result != E1000_SUCCESS) {
1391 1393 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1392 1394 goto init_fail;
1393 1395 }
1394 1396
1395 1397 mutex_enter(&e1000g_nvm_lock);
1396 1398 result = e1000_validate_nvm_checksum(hw);
1397 1399 if (result < E1000_SUCCESS) {
1398 1400 /*
1399 1401 * Some PCI-E parts fail the first check due to
1400 1402 * the link being in sleep state. Call it again,
1401 1403 * if it fails a second time its a real issue.
1402 1404 */
1403 1405 result = e1000_validate_nvm_checksum(hw);
1404 1406 }
1405 1407 mutex_exit(&e1000g_nvm_lock);
1406 1408
1407 1409 if (result < E1000_SUCCESS) {
1408 1410 e1000g_log(Adapter, CE_WARN,
1409 1411 "Invalid NVM checksum. Please contact "
1410 1412 "the vendor to update the NVM.");
1411 1413 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1412 1414 goto init_fail;
1413 1415 }
1414 1416
1415 1417 result = 0;
1416 1418 #ifdef __sparc
1417 1419 /*
1418 1420 * First, we try to get the local ethernet address from OBP. If
1419 1421 * failed, then we get it from the EEPROM of NIC card.
1420 1422 */
1421 1423 result = e1000g_find_mac_address(Adapter);
1422 1424 #endif
1423 1425 /* Get the local ethernet address. */
1424 1426 if (!result) {
1425 1427 mutex_enter(&e1000g_nvm_lock);
1426 1428 result = e1000_read_mac_addr(hw);
1427 1429 mutex_exit(&e1000g_nvm_lock);
1428 1430 }
1429 1431
1430 1432 if (result < E1000_SUCCESS) {
1431 1433 e1000g_log(Adapter, CE_WARN, "Read mac addr failed");
1432 1434 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1433 1435 goto init_fail;
1434 1436 }
1435 1437
1436 1438 /* check for valid mac address */
1437 1439 if (!is_valid_mac_addr(hw->mac.addr)) {
1438 1440 e1000g_log(Adapter, CE_WARN, "Invalid mac addr");
1439 1441 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1440 1442 goto init_fail;
1441 1443 }
1442 1444
1443 1445 /* Set LAA state for 82571 chipset */
1444 1446 e1000_set_laa_state_82571(hw, B_TRUE);
1445 1447
1446 1448 /* Master Latency Timer implementation */
1447 1449 if (Adapter->master_latency_timer) {
1448 1450 pci_config_put8(Adapter->osdep.cfg_handle,
1449 1451 PCI_CONF_LATENCY_TIMER, Adapter->master_latency_timer);
1450 1452 }
1451 1453
1452 1454 if (hw->mac.type < e1000_82547) {
1453 1455 /*
1454 1456 * Total FIFO is 64K
1455 1457 */
1456 1458 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1457 1459 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1458 1460 else
1459 1461 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1460 1462 } else if ((hw->mac.type == e1000_82571) ||
1461 1463 (hw->mac.type == e1000_82572) ||
1462 1464 (hw->mac.type == e1000_80003es2lan)) {
1463 1465 /*
1464 1466 * Total FIFO is 48K
1465 1467 */
1466 1468 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1467 1469 pba = E1000_PBA_30K; /* 30K for Rx, 18K for Tx */
1468 1470 else
1469 1471 pba = E1000_PBA_38K; /* 38K for Rx, 10K for Tx */
1470 1472 } else if (hw->mac.type == e1000_82573) {
1471 1473 pba = E1000_PBA_20K; /* 20K for Rx, 12K for Tx */
1472 1474 } else if (hw->mac.type == e1000_82574) {
1473 1475 /* Keep adapter default: 20K for Rx, 20K for Tx */
1474 1476 pba = E1000_READ_REG(hw, E1000_PBA);
1475 1477 } else if (hw->mac.type == e1000_ich8lan) {
1476 1478 pba = E1000_PBA_8K; /* 8K for Rx, 12K for Tx */
1477 1479 } else if (hw->mac.type == e1000_ich9lan) {
1478 1480 pba = E1000_PBA_10K;
|
↓ open down ↓ |
318 lines elided |
↑ open up ↑ |
1479 1481 } else if (hw->mac.type == e1000_ich10lan) {
1480 1482 pba = E1000_PBA_10K;
1481 1483 } else if (hw->mac.type == e1000_pchlan) {
1482 1484 pba = E1000_PBA_26K;
1483 1485 } else if (hw->mac.type == e1000_pch2lan) {
1484 1486 pba = E1000_PBA_26K;
1485 1487 } else if (hw->mac.type == e1000_pch_lpt) {
1486 1488 pba = E1000_PBA_26K;
1487 1489 } else if (hw->mac.type == e1000_pch_spt) {
1488 1490 pba = E1000_PBA_26K;
1491 + } else if (hw->mac.type == e1000_pch_cnp) {
1492 + pba = E1000_PBA_26K;
1489 1493 } else {
1490 1494 /*
1491 1495 * Total FIFO is 40K
1492 1496 */
1493 1497 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1494 1498 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1495 1499 else
1496 1500 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1497 1501 }
1498 1502 E1000_WRITE_REG(hw, E1000_PBA, pba);
1499 1503
1500 1504 /*
1501 1505 * These parameters set thresholds for the adapter's generation(Tx)
1502 1506 * and response(Rx) to Ethernet PAUSE frames. These are just threshold
1503 1507 * settings. Flow control is enabled or disabled in the configuration
1504 1508 * file.
1505 1509 * High-water mark is set down from the top of the rx fifo (not
1506 1510 * sensitive to max_frame_size) and low-water is set just below
1507 1511 * high-water mark.
1508 1512 * The high water mark must be low enough to fit one full frame above
1509 1513 * it in the rx FIFO. Should be the lower of:
1510 1514 * 90% of the Rx FIFO size and the full Rx FIFO size minus the early
1511 1515 * receive size (assuming ERT set to E1000_ERT_2048), or the full
1512 1516 * Rx FIFO size minus one full frame.
1513 1517 */
1514 1518 high_water = min(((pba << 10) * 9 / 10),
1515 1519 ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574 ||
1516 1520 hw->mac.type == e1000_ich9lan || hw->mac.type == e1000_ich10lan) ?
1517 1521 ((pba << 10) - (E1000_ERT_2048 << 3)) :
1518 1522 ((pba << 10) - Adapter->max_frame_size)));
1519 1523
1520 1524 hw->fc.high_water = high_water & 0xFFF8;
1521 1525 hw->fc.low_water = hw->fc.high_water - 8;
1522 1526
1523 1527 if (hw->mac.type == e1000_80003es2lan)
1524 1528 hw->fc.pause_time = 0xFFFF;
1525 1529 else
1526 1530 hw->fc.pause_time = E1000_FC_PAUSE_TIME;
1527 1531 hw->fc.send_xon = B_TRUE;
1528 1532
1529 1533 /*
1530 1534 * Reset the adapter hardware the second time.
1531 1535 */
1532 1536 mutex_enter(&e1000g_nvm_lock);
1533 1537 result = e1000_reset_hw(hw);
1534 1538 mutex_exit(&e1000g_nvm_lock);
1535 1539
1536 1540 if (result != E1000_SUCCESS) {
1537 1541 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1538 1542 goto init_fail;
1539 1543 }
1540 1544
1541 1545 /* disable wakeup control by default */
1542 1546 if (hw->mac.type >= e1000_82544)
1543 1547 E1000_WRITE_REG(hw, E1000_WUC, 0);
1544 1548
1545 1549 /*
1546 1550 * MWI should be disabled on 82546.
1547 1551 */
1548 1552 if (hw->mac.type == e1000_82546)
1549 1553 e1000_pci_clear_mwi(hw);
1550 1554 else
1551 1555 e1000_pci_set_mwi(hw);
1552 1556
1553 1557 /*
1554 1558 * Configure/Initialize hardware
1555 1559 */
1556 1560 mutex_enter(&e1000g_nvm_lock);
1557 1561 result = e1000_init_hw(hw);
1558 1562 mutex_exit(&e1000g_nvm_lock);
1559 1563
1560 1564 if (result < E1000_SUCCESS) {
1561 1565 e1000g_log(Adapter, CE_WARN, "Initialize hw failed");
1562 1566 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1563 1567 goto init_fail;
1564 1568 }
1565 1569
1566 1570 /*
1567 1571 * Restore LED settings to the default from EEPROM
1568 1572 * to meet the standard for Sun platforms.
1569 1573 */
1570 1574 (void) e1000_cleanup_led(hw);
1571 1575
1572 1576 /* Disable Smart Power Down */
1573 1577 phy_spd_state(hw, B_FALSE);
1574 1578
1575 1579 /* Make sure driver has control */
1576 1580 e1000g_get_driver_control(hw);
1577 1581
1578 1582 /*
1579 1583 * Initialize unicast addresses.
1580 1584 */
1581 1585 e1000g_init_unicst(Adapter);
1582 1586
1583 1587 /*
1584 1588 * Setup and initialize the mctable structures. After this routine
1585 1589 * completes Multicast table will be set
1586 1590 */
1587 1591 e1000_update_mc_addr_list(hw,
1588 1592 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count);
1589 1593 msec_delay(5);
1590 1594
1591 1595 /*
1592 1596 * Implement Adaptive IFS
1593 1597 */
1594 1598 e1000_reset_adaptive(hw);
1595 1599
1596 1600 /* Setup Interrupt Throttling Register */
1597 1601 if (hw->mac.type >= e1000_82540) {
1598 1602 E1000_WRITE_REG(hw, E1000_ITR, Adapter->intr_throttling_rate);
1599 1603 } else
1600 1604 Adapter->intr_adaptive = B_FALSE;
1601 1605
1602 1606 /* Start the timer for link setup */
1603 1607 if (hw->mac.autoneg)
1604 1608 link_timeout = PHY_AUTO_NEG_LIMIT * drv_usectohz(100000);
1605 1609 else
1606 1610 link_timeout = PHY_FORCE_LIMIT * drv_usectohz(100000);
1607 1611
1608 1612 mutex_enter(&Adapter->link_lock);
1609 1613 if (hw->phy.autoneg_wait_to_complete) {
1610 1614 Adapter->link_complete = B_TRUE;
1611 1615 } else {
1612 1616 Adapter->link_complete = B_FALSE;
1613 1617 Adapter->link_tid = timeout(e1000g_link_timer,
1614 1618 (void *)Adapter, link_timeout);
1615 1619 }
1616 1620 mutex_exit(&Adapter->link_lock);
1617 1621
1618 1622 /* Save the state of the phy */
1619 1623 e1000g_get_phy_state(Adapter);
1620 1624
1621 1625 e1000g_param_sync(Adapter);
1622 1626
1623 1627 Adapter->init_count++;
1624 1628
1625 1629 if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) {
1626 1630 goto init_fail;
1627 1631 }
1628 1632 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
1629 1633 goto init_fail;
1630 1634 }
1631 1635
1632 1636 Adapter->poll_mode = e1000g_poll_mode;
1633 1637
1634 1638 return (DDI_SUCCESS);
1635 1639
1636 1640 init_fail:
1637 1641 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1638 1642 return (DDI_FAILURE);
1639 1643 }
1640 1644
1641 1645 static int
1642 1646 e1000g_alloc_rx_data(struct e1000g *Adapter)
1643 1647 {
1644 1648 e1000g_rx_ring_t *rx_ring;
1645 1649 e1000g_rx_data_t *rx_data;
1646 1650
1647 1651 rx_ring = Adapter->rx_ring;
1648 1652
1649 1653 rx_data = kmem_zalloc(sizeof (e1000g_rx_data_t), KM_NOSLEEP);
1650 1654
1651 1655 if (rx_data == NULL)
1652 1656 return (DDI_FAILURE);
1653 1657
1654 1658 rx_data->priv_devi_node = Adapter->priv_devi_node;
1655 1659 rx_data->rx_ring = rx_ring;
1656 1660
1657 1661 mutex_init(&rx_data->freelist_lock, NULL,
1658 1662 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1659 1663 mutex_init(&rx_data->recycle_lock, NULL,
1660 1664 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1661 1665
1662 1666 rx_ring->rx_data = rx_data;
1663 1667
1664 1668 return (DDI_SUCCESS);
1665 1669 }
1666 1670
1667 1671 void
1668 1672 e1000g_free_rx_pending_buffers(e1000g_rx_data_t *rx_data)
1669 1673 {
1670 1674 rx_sw_packet_t *packet, *next_packet;
1671 1675
1672 1676 if (rx_data == NULL)
1673 1677 return;
1674 1678
1675 1679 packet = rx_data->packet_area;
1676 1680 while (packet != NULL) {
1677 1681 next_packet = packet->next;
1678 1682 e1000g_free_rx_sw_packet(packet, B_TRUE);
1679 1683 packet = next_packet;
1680 1684 }
1681 1685 rx_data->packet_area = NULL;
1682 1686 }
1683 1687
1684 1688 void
1685 1689 e1000g_free_rx_data(e1000g_rx_data_t *rx_data)
1686 1690 {
1687 1691 if (rx_data == NULL)
1688 1692 return;
1689 1693
1690 1694 mutex_destroy(&rx_data->freelist_lock);
1691 1695 mutex_destroy(&rx_data->recycle_lock);
1692 1696
1693 1697 kmem_free(rx_data, sizeof (e1000g_rx_data_t));
1694 1698 }
1695 1699
1696 1700 /*
1697 1701 * Check if the link is up
1698 1702 */
1699 1703 static boolean_t
1700 1704 e1000g_link_up(struct e1000g *Adapter)
1701 1705 {
1702 1706 struct e1000_hw *hw = &Adapter->shared;
1703 1707 boolean_t link_up = B_FALSE;
1704 1708
|
↓ open down ↓ |
206 lines elided |
↑ open up ↑ |
1705 1709 /*
1706 1710 * get_link_status is set in the interrupt handler on link-status-change
1707 1711 * or rx sequence error interrupt. get_link_status will stay
1708 1712 * false until the e1000_check_for_link establishes link only
1709 1713 * for copper adapters.
1710 1714 */
1711 1715 switch (hw->phy.media_type) {
1712 1716 case e1000_media_type_copper:
1713 1717 if (hw->mac.get_link_status) {
1714 1718 /*
1715 - * SPT devices need a bit of extra time before we ask
1716 - * them.
1719 + * SPT and newer devices need a bit of extra time before
1720 + * we ask them.
1717 1721 */
1718 - if (hw->mac.type == e1000_pch_spt)
1722 + if (hw->mac.type >= e1000_pch_spt)
1719 1723 msec_delay(50);
1720 1724 (void) e1000_check_for_link(hw);
1721 1725 if ((E1000_READ_REG(hw, E1000_STATUS) &
1722 1726 E1000_STATUS_LU)) {
1723 1727 link_up = B_TRUE;
1724 1728 } else {
1725 1729 link_up = !hw->mac.get_link_status;
1726 1730 }
1727 1731 } else {
1728 1732 link_up = B_TRUE;
1729 1733 }
1730 1734 break;
1731 1735 case e1000_media_type_fiber:
1732 1736 (void) e1000_check_for_link(hw);
1733 1737 link_up = (E1000_READ_REG(hw, E1000_STATUS) &
1734 1738 E1000_STATUS_LU);
1735 1739 break;
1736 1740 case e1000_media_type_internal_serdes:
1737 1741 (void) e1000_check_for_link(hw);
1738 1742 link_up = hw->mac.serdes_has_link;
1739 1743 break;
1740 1744 }
1741 1745
1742 1746 return (link_up);
1743 1747 }
1744 1748
1745 1749 static void
1746 1750 e1000g_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
1747 1751 {
1748 1752 struct iocblk *iocp;
1749 1753 struct e1000g *e1000gp;
1750 1754 enum ioc_reply status;
1751 1755
1752 1756 iocp = (struct iocblk *)(uintptr_t)mp->b_rptr;
1753 1757 iocp->ioc_error = 0;
1754 1758 e1000gp = (struct e1000g *)arg;
1755 1759
1756 1760 ASSERT(e1000gp);
1757 1761 if (e1000gp == NULL) {
1758 1762 miocnak(q, mp, 0, EINVAL);
1759 1763 return;
1760 1764 }
1761 1765
1762 1766 rw_enter(&e1000gp->chip_lock, RW_READER);
1763 1767 if (e1000gp->e1000g_state & E1000G_SUSPENDED) {
1764 1768 rw_exit(&e1000gp->chip_lock);
1765 1769 miocnak(q, mp, 0, EINVAL);
1766 1770 return;
1767 1771 }
1768 1772 rw_exit(&e1000gp->chip_lock);
1769 1773
1770 1774 switch (iocp->ioc_cmd) {
1771 1775
1772 1776 case LB_GET_INFO_SIZE:
1773 1777 case LB_GET_INFO:
1774 1778 case LB_GET_MODE:
1775 1779 case LB_SET_MODE:
1776 1780 status = e1000g_loopback_ioctl(e1000gp, iocp, mp);
1777 1781 break;
1778 1782
1779 1783
1780 1784 #ifdef E1000G_DEBUG
1781 1785 case E1000G_IOC_REG_PEEK:
1782 1786 case E1000G_IOC_REG_POKE:
1783 1787 status = e1000g_pp_ioctl(e1000gp, iocp, mp);
1784 1788 break;
1785 1789 case E1000G_IOC_CHIP_RESET:
1786 1790 e1000gp->reset_count++;
1787 1791 if (e1000g_reset_adapter(e1000gp))
1788 1792 status = IOC_ACK;
1789 1793 else
1790 1794 status = IOC_INVAL;
1791 1795 break;
1792 1796 #endif
1793 1797 default:
1794 1798 status = IOC_INVAL;
1795 1799 break;
1796 1800 }
1797 1801
1798 1802 /*
1799 1803 * Decide how to reply
1800 1804 */
1801 1805 switch (status) {
1802 1806 default:
1803 1807 case IOC_INVAL:
1804 1808 /*
1805 1809 * Error, reply with a NAK and EINVAL or the specified error
1806 1810 */
1807 1811 miocnak(q, mp, 0, iocp->ioc_error == 0 ?
1808 1812 EINVAL : iocp->ioc_error);
1809 1813 break;
1810 1814
1811 1815 case IOC_DONE:
1812 1816 /*
1813 1817 * OK, reply already sent
1814 1818 */
1815 1819 break;
1816 1820
1817 1821 case IOC_ACK:
1818 1822 /*
1819 1823 * OK, reply with an ACK
1820 1824 */
1821 1825 miocack(q, mp, 0, 0);
1822 1826 break;
1823 1827
1824 1828 case IOC_REPLY:
1825 1829 /*
1826 1830 * OK, send prepared reply as ACK or NAK
1827 1831 */
1828 1832 mp->b_datap->db_type = iocp->ioc_error == 0 ?
1829 1833 M_IOCACK : M_IOCNAK;
1830 1834 qreply(q, mp);
1831 1835 break;
1832 1836 }
1833 1837 }
1834 1838
1835 1839 /*
1836 1840 * The default value of e1000g_poll_mode == 0 assumes that the NIC is
1837 1841 * capable of supporting only one interrupt and we shouldn't disable
1838 1842 * the physical interrupt. In this case we let the interrupt come and
1839 1843 * we queue the packets in the rx ring itself in case we are in polling
1840 1844 * mode (better latency but slightly lower performance and a very
1841 1845 * high intrrupt count in mpstat which is harmless).
1842 1846 *
1843 1847 * e1000g_poll_mode == 1 assumes that we have per Rx ring interrupt
1844 1848 * which can be disabled in poll mode. This gives better overall
1845 1849 * throughput (compared to the mode above), shows very low interrupt
1846 1850 * count but has slightly higher latency since we pick the packets when
1847 1851 * the poll thread does polling.
1848 1852 *
1849 1853 * Currently, this flag should be enabled only while doing performance
1850 1854 * measurement or when it can be guaranteed that entire NIC going
1851 1855 * in poll mode will not harm any traffic like cluster heartbeat etc.
1852 1856 */
1853 1857 int e1000g_poll_mode = 0;
1854 1858
1855 1859 /*
1856 1860 * Called from the upper layers when driver is in polling mode to
1857 1861 * pick up any queued packets. Care should be taken to not block
1858 1862 * this thread.
1859 1863 */
1860 1864 static mblk_t *e1000g_poll_ring(void *arg, int bytes_to_pickup)
1861 1865 {
1862 1866 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)arg;
1863 1867 mblk_t *mp = NULL;
1864 1868 mblk_t *tail;
1865 1869 struct e1000g *adapter;
1866 1870
1867 1871 adapter = rx_ring->adapter;
1868 1872
1869 1873 rw_enter(&adapter->chip_lock, RW_READER);
1870 1874
1871 1875 if (adapter->e1000g_state & E1000G_SUSPENDED) {
1872 1876 rw_exit(&adapter->chip_lock);
1873 1877 return (NULL);
1874 1878 }
1875 1879
1876 1880 mutex_enter(&rx_ring->rx_lock);
1877 1881 mp = e1000g_receive(rx_ring, &tail, bytes_to_pickup);
1878 1882 mutex_exit(&rx_ring->rx_lock);
1879 1883 rw_exit(&adapter->chip_lock);
1880 1884 return (mp);
1881 1885 }
1882 1886
1883 1887 static int
1884 1888 e1000g_m_start(void *arg)
1885 1889 {
1886 1890 struct e1000g *Adapter = (struct e1000g *)arg;
1887 1891
1888 1892 rw_enter(&Adapter->chip_lock, RW_WRITER);
1889 1893
1890 1894 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
1891 1895 rw_exit(&Adapter->chip_lock);
1892 1896 return (ECANCELED);
1893 1897 }
1894 1898
1895 1899 if (e1000g_start(Adapter, B_TRUE) != DDI_SUCCESS) {
1896 1900 rw_exit(&Adapter->chip_lock);
1897 1901 return (ENOTACTIVE);
1898 1902 }
1899 1903
1900 1904 Adapter->e1000g_state |= E1000G_STARTED;
1901 1905
1902 1906 rw_exit(&Adapter->chip_lock);
1903 1907
1904 1908 /* Enable and start the watchdog timer */
1905 1909 enable_watchdog_timer(Adapter);
1906 1910
1907 1911 return (0);
1908 1912 }
1909 1913
1910 1914 static int
1911 1915 e1000g_start(struct e1000g *Adapter, boolean_t global)
1912 1916 {
1913 1917 e1000g_rx_data_t *rx_data;
1914 1918
1915 1919 if (global) {
1916 1920 if (e1000g_alloc_rx_data(Adapter) != DDI_SUCCESS) {
1917 1921 e1000g_log(Adapter, CE_WARN, "Allocate rx data failed");
1918 1922 goto start_fail;
1919 1923 }
1920 1924
1921 1925 /* Allocate dma resources for descriptors and buffers */
1922 1926 if (e1000g_alloc_dma_resources(Adapter) != DDI_SUCCESS) {
1923 1927 e1000g_log(Adapter, CE_WARN,
1924 1928 "Alloc DMA resources failed");
1925 1929 goto start_fail;
1926 1930 }
1927 1931 Adapter->rx_buffer_setup = B_FALSE;
1928 1932 }
1929 1933
1930 1934 if (!(Adapter->attach_progress & ATTACH_PROGRESS_INIT)) {
1931 1935 if (e1000g_init(Adapter) != DDI_SUCCESS) {
1932 1936 e1000g_log(Adapter, CE_WARN,
1933 1937 "Adapter initialization failed");
1934 1938 goto start_fail;
1935 1939 }
1936 1940 }
1937 1941
1938 1942 /* Setup and initialize the transmit structures */
1939 1943 e1000g_tx_setup(Adapter);
1940 1944 msec_delay(5);
1941 1945
1942 1946 /* Setup and initialize the receive structures */
1943 1947 e1000g_rx_setup(Adapter);
1944 1948 msec_delay(5);
1945 1949
1946 1950 /* Restore the e1000g promiscuous mode */
1947 1951 e1000g_restore_promisc(Adapter);
1948 1952
1949 1953 e1000g_mask_interrupt(Adapter);
1950 1954
1951 1955 Adapter->attach_progress |= ATTACH_PROGRESS_INIT;
1952 1956
1953 1957 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
1954 1958 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1955 1959 goto start_fail;
1956 1960 }
1957 1961
1958 1962 return (DDI_SUCCESS);
1959 1963
1960 1964 start_fail:
1961 1965 rx_data = Adapter->rx_ring->rx_data;
1962 1966
1963 1967 if (global) {
1964 1968 e1000g_release_dma_resources(Adapter);
1965 1969 e1000g_free_rx_pending_buffers(rx_data);
1966 1970 e1000g_free_rx_data(rx_data);
1967 1971 }
1968 1972
1969 1973 mutex_enter(&e1000g_nvm_lock);
1970 1974 (void) e1000_reset_hw(&Adapter->shared);
1971 1975 mutex_exit(&e1000g_nvm_lock);
1972 1976
1973 1977 return (DDI_FAILURE);
1974 1978 }
1975 1979
1976 1980 /*
1977 1981 * The I219 has the curious property that if the descriptor rings are not
1978 1982 * emptied before resetting the hardware or before changing the device state
1979 1983 * based on runtime power management, it'll cause the card to hang. This can
1980 1984 * then only be fixed by a PCI reset. As such, for the I219 and it alone, we
1981 1985 * have to flush the rings if we're in this state.
1982 1986 */
1983 1987 static void
1984 1988 e1000g_flush_desc_rings(struct e1000g *Adapter)
1985 1989 {
1986 1990 struct e1000_hw *hw = &Adapter->shared;
1987 1991 u16 hang_state;
1988 1992 u32 fext_nvm11, tdlen;
1989 1993
1990 1994 /* First, disable MULR fix in FEXTNVM11 */
1991 1995 fext_nvm11 = E1000_READ_REG(hw, E1000_FEXTNVM11);
1992 1996 fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX;
1993 1997 E1000_WRITE_REG(hw, E1000_FEXTNVM11, fext_nvm11);
1994 1998
1995 1999 /* do nothing if we're not in faulty state, or if the queue is empty */
1996 2000 tdlen = E1000_READ_REG(hw, E1000_TDLEN(0));
1997 2001 hang_state = pci_config_get16(Adapter->osdep.cfg_handle,
1998 2002 PCICFG_DESC_RING_STATUS);
1999 2003 if (!(hang_state & FLUSH_DESC_REQUIRED) || !tdlen)
2000 2004 return;
2001 2005 e1000g_flush_tx_ring(Adapter);
2002 2006
2003 2007 /* recheck, maybe the fault is caused by the rx ring */
2004 2008 hang_state = pci_config_get16(Adapter->osdep.cfg_handle,
2005 2009 PCICFG_DESC_RING_STATUS);
2006 2010 if (hang_state & FLUSH_DESC_REQUIRED)
2007 2011 e1000g_flush_rx_ring(Adapter);
2008 2012
2009 2013 }
2010 2014
2011 2015 static void
2012 2016 e1000g_m_stop(void *arg)
2013 2017 {
2014 2018 struct e1000g *Adapter = (struct e1000g *)arg;
2015 2019
2016 2020 /* Drain tx sessions */
2017 2021 (void) e1000g_tx_drain(Adapter);
2018 2022
2019 2023 rw_enter(&Adapter->chip_lock, RW_WRITER);
2020 2024
2021 2025 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2022 2026 rw_exit(&Adapter->chip_lock);
2023 2027 return;
2024 2028 }
2025 2029 Adapter->e1000g_state &= ~E1000G_STARTED;
2026 2030 e1000g_stop(Adapter, B_TRUE);
2027 2031
2028 2032 rw_exit(&Adapter->chip_lock);
2029 2033
2030 2034 /* Disable and stop all the timers */
2031 2035 disable_watchdog_timer(Adapter);
2032 2036 stop_link_timer(Adapter);
2033 2037 stop_82547_timer(Adapter->tx_ring);
2034 2038 }
2035 2039
2036 2040 static void
2037 2041 e1000g_stop(struct e1000g *Adapter, boolean_t global)
2038 2042 {
2039 2043 private_devi_list_t *devi_node;
2040 2044 e1000g_rx_data_t *rx_data;
2041 2045 int result;
2042 2046
2043 2047 Adapter->attach_progress &= ~ATTACH_PROGRESS_INIT;
2044 2048
2045 2049 /* Stop the chip and release pending resources */
2046 2050
2047 2051 /* Tell firmware driver is no longer in control */
2048 2052 e1000g_release_driver_control(&Adapter->shared);
2049 2053
2050 2054 e1000g_clear_all_interrupts(Adapter);
2051 2055
2052 2056 mutex_enter(&e1000g_nvm_lock);
2053 2057 result = e1000_reset_hw(&Adapter->shared);
2054 2058 mutex_exit(&e1000g_nvm_lock);
2055 2059
2056 2060 if (result != E1000_SUCCESS) {
2057 2061 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
2058 2062 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
2059 2063 }
2060 2064
2061 2065 mutex_enter(&Adapter->link_lock);
2062 2066 Adapter->link_complete = B_FALSE;
2063 2067 mutex_exit(&Adapter->link_lock);
2064 2068
2065 2069 /* Release resources still held by the TX descriptors */
2066 2070 e1000g_tx_clean(Adapter);
2067 2071
2068 2072 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
|
↓ open down ↓ |
340 lines elided |
↑ open up ↑ |
2069 2073 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
2070 2074
2071 2075 /* Clean the pending rx jumbo packet fragment */
2072 2076 e1000g_rx_clean(Adapter);
2073 2077
2074 2078 /*
2075 2079 * The I219, eg. the pch_spt, has bugs such that we must ensure that
2076 2080 * rings are flushed before we do anything else. This must be done
2077 2081 * before we release DMA resources.
2078 2082 */
2079 - if (Adapter->shared.mac.type == e1000_pch_spt)
2083 + if (Adapter->shared.mac.type >= e1000_pch_spt)
2080 2084 e1000g_flush_desc_rings(Adapter);
2081 2085
2082 2086 if (global) {
2083 2087 e1000g_release_dma_resources(Adapter);
2084 2088
2085 2089 mutex_enter(&e1000g_rx_detach_lock);
2086 2090 rx_data = Adapter->rx_ring->rx_data;
2087 2091 rx_data->flag |= E1000G_RX_STOPPED;
2088 2092
2089 2093 if (rx_data->pending_count == 0) {
2090 2094 e1000g_free_rx_pending_buffers(rx_data);
2091 2095 e1000g_free_rx_data(rx_data);
2092 2096 } else {
2093 2097 devi_node = rx_data->priv_devi_node;
2094 2098 if (devi_node != NULL)
2095 2099 atomic_inc_32(&devi_node->pending_rx_count);
2096 2100 else
2097 2101 atomic_inc_32(&Adapter->pending_rx_count);
2098 2102 }
2099 2103 mutex_exit(&e1000g_rx_detach_lock);
2100 2104 }
2101 2105
2102 2106 if (Adapter->link_state != LINK_STATE_UNKNOWN) {
2103 2107 Adapter->link_state = LINK_STATE_UNKNOWN;
2104 2108 if (!Adapter->reset_flag)
2105 2109 mac_link_update(Adapter->mh, Adapter->link_state);
2106 2110 }
2107 2111 }
2108 2112
2109 2113 static void
2110 2114 e1000g_rx_clean(struct e1000g *Adapter)
2111 2115 {
2112 2116 e1000g_rx_data_t *rx_data = Adapter->rx_ring->rx_data;
2113 2117
2114 2118 if (rx_data == NULL)
2115 2119 return;
2116 2120
2117 2121 if (rx_data->rx_mblk != NULL) {
2118 2122 freemsg(rx_data->rx_mblk);
2119 2123 rx_data->rx_mblk = NULL;
2120 2124 rx_data->rx_mblk_tail = NULL;
2121 2125 rx_data->rx_mblk_len = 0;
2122 2126 }
2123 2127 }
2124 2128
2125 2129 static void
2126 2130 e1000g_tx_clean(struct e1000g *Adapter)
2127 2131 {
2128 2132 e1000g_tx_ring_t *tx_ring;
2129 2133 p_tx_sw_packet_t packet;
2130 2134 mblk_t *mp;
2131 2135 mblk_t *nmp;
2132 2136 uint32_t packet_count;
2133 2137
2134 2138 tx_ring = Adapter->tx_ring;
2135 2139
2136 2140 /*
2137 2141 * Here we don't need to protect the lists using
2138 2142 * the usedlist_lock and freelist_lock, for they
2139 2143 * have been protected by the chip_lock.
2140 2144 */
2141 2145 mp = NULL;
2142 2146 nmp = NULL;
2143 2147 packet_count = 0;
2144 2148 packet = (p_tx_sw_packet_t)QUEUE_GET_HEAD(&tx_ring->used_list);
2145 2149 while (packet != NULL) {
2146 2150 if (packet->mp != NULL) {
2147 2151 /* Assemble the message chain */
2148 2152 if (mp == NULL) {
2149 2153 mp = packet->mp;
2150 2154 nmp = packet->mp;
2151 2155 } else {
2152 2156 nmp->b_next = packet->mp;
2153 2157 nmp = packet->mp;
2154 2158 }
2155 2159 /* Disconnect the message from the sw packet */
2156 2160 packet->mp = NULL;
2157 2161 }
2158 2162
2159 2163 e1000g_free_tx_swpkt(packet);
2160 2164 packet_count++;
2161 2165
2162 2166 packet = (p_tx_sw_packet_t)
2163 2167 QUEUE_GET_NEXT(&tx_ring->used_list, &packet->Link);
2164 2168 }
2165 2169
2166 2170 if (mp != NULL)
2167 2171 freemsgchain(mp);
2168 2172
2169 2173 if (packet_count > 0) {
2170 2174 QUEUE_APPEND(&tx_ring->free_list, &tx_ring->used_list);
2171 2175 QUEUE_INIT_LIST(&tx_ring->used_list);
2172 2176
2173 2177 /* Setup TX descriptor pointers */
2174 2178 tx_ring->tbd_next = tx_ring->tbd_first;
2175 2179 tx_ring->tbd_oldest = tx_ring->tbd_first;
2176 2180
2177 2181 /* Setup our HW Tx Head & Tail descriptor pointers */
2178 2182 E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0);
2179 2183 E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0);
2180 2184 }
2181 2185 }
2182 2186
2183 2187 static boolean_t
2184 2188 e1000g_tx_drain(struct e1000g *Adapter)
2185 2189 {
2186 2190 int i;
2187 2191 boolean_t done;
2188 2192 e1000g_tx_ring_t *tx_ring;
2189 2193
2190 2194 tx_ring = Adapter->tx_ring;
2191 2195
2192 2196 /* Allow up to 'wsdraintime' for pending xmit's to complete. */
2193 2197 for (i = 0; i < TX_DRAIN_TIME; i++) {
2194 2198 mutex_enter(&tx_ring->usedlist_lock);
2195 2199 done = IS_QUEUE_EMPTY(&tx_ring->used_list);
2196 2200 mutex_exit(&tx_ring->usedlist_lock);
2197 2201
2198 2202 if (done)
2199 2203 break;
2200 2204
2201 2205 msec_delay(1);
2202 2206 }
2203 2207
2204 2208 return (done);
2205 2209 }
2206 2210
2207 2211 static boolean_t
2208 2212 e1000g_rx_drain(struct e1000g *Adapter)
2209 2213 {
2210 2214 int i;
2211 2215 boolean_t done;
2212 2216
2213 2217 /*
2214 2218 * Allow up to RX_DRAIN_TIME for pending received packets to complete.
2215 2219 */
2216 2220 for (i = 0; i < RX_DRAIN_TIME; i++) {
2217 2221 done = (Adapter->pending_rx_count == 0);
2218 2222
2219 2223 if (done)
2220 2224 break;
2221 2225
2222 2226 msec_delay(1);
2223 2227 }
2224 2228
2225 2229 return (done);
2226 2230 }
2227 2231
2228 2232 static boolean_t
2229 2233 e1000g_reset_adapter(struct e1000g *Adapter)
2230 2234 {
2231 2235 /* Disable and stop all the timers */
2232 2236 disable_watchdog_timer(Adapter);
2233 2237 stop_link_timer(Adapter);
2234 2238 stop_82547_timer(Adapter->tx_ring);
2235 2239
2236 2240 rw_enter(&Adapter->chip_lock, RW_WRITER);
2237 2241
2238 2242 if (Adapter->stall_flag) {
2239 2243 Adapter->stall_flag = B_FALSE;
2240 2244 Adapter->reset_flag = B_TRUE;
2241 2245 }
2242 2246
2243 2247 if (!(Adapter->e1000g_state & E1000G_STARTED)) {
2244 2248 rw_exit(&Adapter->chip_lock);
2245 2249 return (B_TRUE);
2246 2250 }
2247 2251
2248 2252 e1000g_stop(Adapter, B_FALSE);
2249 2253
2250 2254 if (e1000g_start(Adapter, B_FALSE) != DDI_SUCCESS) {
2251 2255 rw_exit(&Adapter->chip_lock);
2252 2256 e1000g_log(Adapter, CE_WARN, "Reset failed");
2253 2257 return (B_FALSE);
2254 2258 }
2255 2259
2256 2260 rw_exit(&Adapter->chip_lock);
2257 2261
2258 2262 /* Enable and start the watchdog timer */
2259 2263 enable_watchdog_timer(Adapter);
2260 2264
2261 2265 return (B_TRUE);
2262 2266 }
2263 2267
2264 2268 boolean_t
2265 2269 e1000g_global_reset(struct e1000g *Adapter)
2266 2270 {
2267 2271 /* Disable and stop all the timers */
2268 2272 disable_watchdog_timer(Adapter);
2269 2273 stop_link_timer(Adapter);
2270 2274 stop_82547_timer(Adapter->tx_ring);
2271 2275
2272 2276 rw_enter(&Adapter->chip_lock, RW_WRITER);
2273 2277
2274 2278 e1000g_stop(Adapter, B_TRUE);
2275 2279
2276 2280 Adapter->init_count = 0;
2277 2281
2278 2282 if (e1000g_start(Adapter, B_TRUE) != DDI_SUCCESS) {
2279 2283 rw_exit(&Adapter->chip_lock);
2280 2284 e1000g_log(Adapter, CE_WARN, "Reset failed");
2281 2285 return (B_FALSE);
2282 2286 }
2283 2287
2284 2288 rw_exit(&Adapter->chip_lock);
2285 2289
2286 2290 /* Enable and start the watchdog timer */
2287 2291 enable_watchdog_timer(Adapter);
2288 2292
2289 2293 return (B_TRUE);
2290 2294 }
2291 2295
2292 2296 /*
2293 2297 * e1000g_intr_pciexpress - ISR for PCI Express chipsets
2294 2298 *
2295 2299 * This interrupt service routine is for PCI-Express adapters.
2296 2300 * The ICR contents is valid only when the E1000_ICR_INT_ASSERTED
2297 2301 * bit is set.
2298 2302 */
2299 2303 static uint_t
2300 2304 e1000g_intr_pciexpress(caddr_t arg)
2301 2305 {
2302 2306 struct e1000g *Adapter;
2303 2307 uint32_t icr;
2304 2308
2305 2309 Adapter = (struct e1000g *)(uintptr_t)arg;
2306 2310 icr = E1000_READ_REG(&Adapter->shared, E1000_ICR);
2307 2311
2308 2312 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2309 2313 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2310 2314 return (DDI_INTR_CLAIMED);
2311 2315 }
2312 2316
2313 2317 if (icr & E1000_ICR_INT_ASSERTED) {
2314 2318 /*
2315 2319 * E1000_ICR_INT_ASSERTED bit was set:
2316 2320 * Read(Clear) the ICR, claim this interrupt,
2317 2321 * look for work to do.
2318 2322 */
2319 2323 e1000g_intr_work(Adapter, icr);
2320 2324 return (DDI_INTR_CLAIMED);
2321 2325 } else {
2322 2326 /*
2323 2327 * E1000_ICR_INT_ASSERTED bit was not set:
2324 2328 * Don't claim this interrupt, return immediately.
2325 2329 */
2326 2330 return (DDI_INTR_UNCLAIMED);
2327 2331 }
2328 2332 }
2329 2333
2330 2334 /*
2331 2335 * e1000g_intr - ISR for PCI/PCI-X chipsets
2332 2336 *
2333 2337 * This interrupt service routine is for PCI/PCI-X adapters.
2334 2338 * We check the ICR contents no matter the E1000_ICR_INT_ASSERTED
2335 2339 * bit is set or not.
2336 2340 */
2337 2341 static uint_t
2338 2342 e1000g_intr(caddr_t arg)
2339 2343 {
2340 2344 struct e1000g *Adapter;
2341 2345 uint32_t icr;
2342 2346
2343 2347 Adapter = (struct e1000g *)(uintptr_t)arg;
2344 2348 icr = E1000_READ_REG(&Adapter->shared, E1000_ICR);
2345 2349
2346 2350 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2347 2351 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2348 2352 return (DDI_INTR_CLAIMED);
2349 2353 }
2350 2354
2351 2355 if (icr) {
2352 2356 /*
2353 2357 * Any bit was set in ICR:
2354 2358 * Read(Clear) the ICR, claim this interrupt,
2355 2359 * look for work to do.
2356 2360 */
2357 2361 e1000g_intr_work(Adapter, icr);
2358 2362 return (DDI_INTR_CLAIMED);
2359 2363 } else {
2360 2364 /*
2361 2365 * No bit was set in ICR:
2362 2366 * Don't claim this interrupt, return immediately.
2363 2367 */
2364 2368 return (DDI_INTR_UNCLAIMED);
2365 2369 }
2366 2370 }
2367 2371
2368 2372 /*
2369 2373 * e1000g_intr_work - actual processing of ISR
2370 2374 *
2371 2375 * Read(clear) the ICR contents and call appropriate interrupt
2372 2376 * processing routines.
2373 2377 */
2374 2378 static void
2375 2379 e1000g_intr_work(struct e1000g *Adapter, uint32_t icr)
2376 2380 {
2377 2381 struct e1000_hw *hw;
2378 2382 hw = &Adapter->shared;
2379 2383 e1000g_tx_ring_t *tx_ring = Adapter->tx_ring;
2380 2384
2381 2385 Adapter->rx_pkt_cnt = 0;
2382 2386 Adapter->tx_pkt_cnt = 0;
2383 2387
2384 2388 rw_enter(&Adapter->chip_lock, RW_READER);
2385 2389
2386 2390 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2387 2391 rw_exit(&Adapter->chip_lock);
2388 2392 return;
2389 2393 }
2390 2394 /*
2391 2395 * Here we need to check the "e1000g_state" flag within the chip_lock to
2392 2396 * ensure the receive routine will not execute when the adapter is
2393 2397 * being reset.
2394 2398 */
2395 2399 if (!(Adapter->e1000g_state & E1000G_STARTED)) {
2396 2400 rw_exit(&Adapter->chip_lock);
2397 2401 return;
2398 2402 }
2399 2403
2400 2404 if (icr & E1000_ICR_RXT0) {
2401 2405 mblk_t *mp = NULL;
2402 2406 mblk_t *tail = NULL;
2403 2407 e1000g_rx_ring_t *rx_ring;
2404 2408
2405 2409 rx_ring = Adapter->rx_ring;
2406 2410 mutex_enter(&rx_ring->rx_lock);
2407 2411 /*
2408 2412 * Sometimes with legacy interrupts, it possible that
2409 2413 * there is a single interrupt for Rx/Tx. In which
2410 2414 * case, if poll flag is set, we shouldn't really
2411 2415 * be doing Rx processing.
2412 2416 */
2413 2417 if (!rx_ring->poll_flag)
2414 2418 mp = e1000g_receive(rx_ring, &tail,
2415 2419 E1000G_CHAIN_NO_LIMIT);
2416 2420 mutex_exit(&rx_ring->rx_lock);
2417 2421 rw_exit(&Adapter->chip_lock);
2418 2422 if (mp != NULL)
2419 2423 mac_rx_ring(Adapter->mh, rx_ring->mrh,
2420 2424 mp, rx_ring->ring_gen_num);
2421 2425 } else
2422 2426 rw_exit(&Adapter->chip_lock);
2423 2427
2424 2428 if (icr & E1000_ICR_TXDW) {
2425 2429 if (!Adapter->tx_intr_enable)
2426 2430 e1000g_clear_tx_interrupt(Adapter);
2427 2431
2428 2432 /* Recycle the tx descriptors */
2429 2433 rw_enter(&Adapter->chip_lock, RW_READER);
2430 2434 (void) e1000g_recycle(tx_ring);
2431 2435 E1000G_DEBUG_STAT(tx_ring->stat_recycle_intr);
2432 2436 rw_exit(&Adapter->chip_lock);
2433 2437
2434 2438 if (tx_ring->resched_needed &&
2435 2439 (tx_ring->tbd_avail > DEFAULT_TX_UPDATE_THRESHOLD)) {
2436 2440 tx_ring->resched_needed = B_FALSE;
2437 2441 mac_tx_update(Adapter->mh);
2438 2442 E1000G_STAT(tx_ring->stat_reschedule);
2439 2443 }
2440 2444 }
2441 2445
2442 2446 /*
2443 2447 * The Receive Sequence errors RXSEQ and the link status change LSC
2444 2448 * are checked to detect that the cable has been pulled out. For
2445 2449 * the Wiseman 2.0 silicon, the receive sequence errors interrupt
2446 2450 * are an indication that cable is not connected.
2447 2451 */
2448 2452 if ((icr & E1000_ICR_RXSEQ) ||
2449 2453 (icr & E1000_ICR_LSC) ||
2450 2454 (icr & E1000_ICR_GPI_EN1)) {
2451 2455 boolean_t link_changed;
2452 2456 timeout_id_t tid = 0;
2453 2457
2454 2458 stop_watchdog_timer(Adapter);
2455 2459
2456 2460 rw_enter(&Adapter->chip_lock, RW_WRITER);
2457 2461
2458 2462 /*
2459 2463 * Because we got a link-status-change interrupt, force
2460 2464 * e1000_check_for_link() to look at phy
2461 2465 */
2462 2466 Adapter->shared.mac.get_link_status = B_TRUE;
2463 2467
2464 2468 /* e1000g_link_check takes care of link status change */
2465 2469 link_changed = e1000g_link_check(Adapter);
2466 2470
2467 2471 /* Get new phy state */
2468 2472 e1000g_get_phy_state(Adapter);
2469 2473
2470 2474 /*
2471 2475 * If the link timer has not timed out, we'll not notify
2472 2476 * the upper layer with any link state until the link is up.
2473 2477 */
2474 2478 if (link_changed && !Adapter->link_complete) {
2475 2479 if (Adapter->link_state == LINK_STATE_UP) {
2476 2480 mutex_enter(&Adapter->link_lock);
2477 2481 Adapter->link_complete = B_TRUE;
2478 2482 tid = Adapter->link_tid;
2479 2483 Adapter->link_tid = 0;
2480 2484 mutex_exit(&Adapter->link_lock);
2481 2485 } else {
2482 2486 link_changed = B_FALSE;
2483 2487 }
2484 2488 }
2485 2489 rw_exit(&Adapter->chip_lock);
2486 2490
2487 2491 if (link_changed) {
2488 2492 if (tid != 0)
2489 2493 (void) untimeout(tid);
2490 2494
2491 2495 /*
2492 2496 * Workaround for esb2. Data stuck in fifo on a link
2493 2497 * down event. Stop receiver here and reset in watchdog.
2494 2498 */
2495 2499 if ((Adapter->link_state == LINK_STATE_DOWN) &&
2496 2500 (Adapter->shared.mac.type == e1000_80003es2lan)) {
2497 2501 uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL);
2498 2502 E1000_WRITE_REG(hw, E1000_RCTL,
2499 2503 rctl & ~E1000_RCTL_EN);
2500 2504 e1000g_log(Adapter, CE_WARN,
2501 2505 "ESB2 receiver disabled");
2502 2506 Adapter->esb2_workaround = B_TRUE;
2503 2507 }
2504 2508 if (!Adapter->reset_flag)
2505 2509 mac_link_update(Adapter->mh,
2506 2510 Adapter->link_state);
2507 2511 if (Adapter->link_state == LINK_STATE_UP)
2508 2512 Adapter->reset_flag = B_FALSE;
2509 2513 }
2510 2514
2511 2515 start_watchdog_timer(Adapter);
2512 2516 }
2513 2517 }
2514 2518
2515 2519 static void
2516 2520 e1000g_init_unicst(struct e1000g *Adapter)
2517 2521 {
2518 2522 struct e1000_hw *hw;
2519 2523 int slot;
2520 2524
2521 2525 hw = &Adapter->shared;
2522 2526
2523 2527 if (Adapter->init_count == 0) {
2524 2528 /* Initialize the multiple unicast addresses */
2525 2529 Adapter->unicst_total = min(hw->mac.rar_entry_count,
2526 2530 MAX_NUM_UNICAST_ADDRESSES);
2527 2531
2528 2532 /*
|
↓ open down ↓ |
439 lines elided |
↑ open up ↑ |
2529 2533 * The common code does not correctly calculate the number of
2530 2534 * rar's that could be reserved by firmware for the pch_lpt and
2531 2535 * pch_spt macs. The interface has one primary rar, and 11
2532 2536 * additional ones. Those 11 additional ones are not always
2533 2537 * available. According to the datasheet, we need to check a
2534 2538 * few of the bits set in the FWSM register. If the value is
2535 2539 * zero, everything is available. If the value is 1, none of the
2536 2540 * additional registers are available. If the value is 2-7, only
2537 2541 * that number are available.
2538 2542 */
2539 - if (hw->mac.type == e1000_pch_lpt ||
2540 - hw->mac.type == e1000_pch_spt) {
2543 + if (hw->mac.type >= e1000_pch_lpt) {
2541 2544 uint32_t locked, rar;
2542 2545
2543 2546 locked = E1000_READ_REG(hw, E1000_FWSM) &
2544 2547 E1000_FWSM_WLOCK_MAC_MASK;
2545 2548 locked >>= E1000_FWSM_WLOCK_MAC_SHIFT;
2546 2549 rar = 1;
2547 2550 if (locked == 0)
2548 2551 rar += 11;
2549 2552 else if (locked == 1)
2550 2553 rar += 0;
2551 2554 else
2552 2555 rar += locked;
2553 2556 Adapter->unicst_total = min(rar,
2554 2557 MAX_NUM_UNICAST_ADDRESSES);
2555 2558 }
2556 2559
2557 2560 /* Workaround for an erratum of 82571 chipst */
2558 2561 if ((hw->mac.type == e1000_82571) &&
2559 2562 (e1000_get_laa_state_82571(hw) == B_TRUE))
2560 2563 Adapter->unicst_total--;
2561 2564
2562 2565 /* VMware doesn't support multiple mac addresses properly */
2563 2566 if (hw->subsystem_vendor_id == 0x15ad)
2564 2567 Adapter->unicst_total = 1;
2565 2568
2566 2569 Adapter->unicst_avail = Adapter->unicst_total;
2567 2570
2568 2571 for (slot = 0; slot < Adapter->unicst_total; slot++) {
2569 2572 /* Clear both the flag and MAC address */
2570 2573 Adapter->unicst_addr[slot].reg.high = 0;
2571 2574 Adapter->unicst_addr[slot].reg.low = 0;
2572 2575 }
2573 2576 } else {
2574 2577 /* Workaround for an erratum of 82571 chipst */
2575 2578 if ((hw->mac.type == e1000_82571) &&
2576 2579 (e1000_get_laa_state_82571(hw) == B_TRUE))
2577 2580 (void) e1000_rar_set(hw, hw->mac.addr, LAST_RAR_ENTRY);
2578 2581
2579 2582 /* Re-configure the RAR registers */
2580 2583 for (slot = 0; slot < Adapter->unicst_total; slot++)
2581 2584 if (Adapter->unicst_addr[slot].mac.set == 1)
2582 2585 (void) e1000_rar_set(hw,
2583 2586 Adapter->unicst_addr[slot].mac.addr, slot);
2584 2587 }
2585 2588
2586 2589 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
2587 2590 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2588 2591 }
2589 2592
2590 2593 static int
2591 2594 e1000g_unicst_set(struct e1000g *Adapter, const uint8_t *mac_addr,
2592 2595 int slot)
2593 2596 {
2594 2597 struct e1000_hw *hw;
2595 2598
2596 2599 hw = &Adapter->shared;
2597 2600
2598 2601 /*
2599 2602 * The first revision of Wiseman silicon (rev 2.0) has an errata
2600 2603 * that requires the receiver to be in reset when any of the
2601 2604 * receive address registers (RAR regs) are accessed. The first
2602 2605 * rev of Wiseman silicon also requires MWI to be disabled when
2603 2606 * a global reset or a receive reset is issued. So before we
2604 2607 * initialize the RARs, we check the rev of the Wiseman controller
2605 2608 * and work around any necessary HW errata.
2606 2609 */
2607 2610 if ((hw->mac.type == e1000_82542) &&
2608 2611 (hw->revision_id == E1000_REVISION_2)) {
2609 2612 e1000_pci_clear_mwi(hw);
2610 2613 E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST);
2611 2614 msec_delay(5);
2612 2615 }
2613 2616 if (mac_addr == NULL) {
2614 2617 E1000_WRITE_REG_ARRAY(hw, E1000_RA, slot << 1, 0);
2615 2618 E1000_WRITE_FLUSH(hw);
2616 2619 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (slot << 1) + 1, 0);
2617 2620 E1000_WRITE_FLUSH(hw);
2618 2621 /* Clear both the flag and MAC address */
2619 2622 Adapter->unicst_addr[slot].reg.high = 0;
2620 2623 Adapter->unicst_addr[slot].reg.low = 0;
2621 2624 } else {
2622 2625 bcopy(mac_addr, Adapter->unicst_addr[slot].mac.addr,
2623 2626 ETHERADDRL);
2624 2627 (void) e1000_rar_set(hw, (uint8_t *)mac_addr, slot);
2625 2628 Adapter->unicst_addr[slot].mac.set = 1;
2626 2629 }
2627 2630
2628 2631 /* Workaround for an erratum of 82571 chipst */
2629 2632 if (slot == 0) {
2630 2633 if ((hw->mac.type == e1000_82571) &&
2631 2634 (e1000_get_laa_state_82571(hw) == B_TRUE))
2632 2635 if (mac_addr == NULL) {
2633 2636 E1000_WRITE_REG_ARRAY(hw, E1000_RA,
2634 2637 slot << 1, 0);
2635 2638 E1000_WRITE_FLUSH(hw);
2636 2639 E1000_WRITE_REG_ARRAY(hw, E1000_RA,
2637 2640 (slot << 1) + 1, 0);
2638 2641 E1000_WRITE_FLUSH(hw);
2639 2642 } else {
2640 2643 (void) e1000_rar_set(hw, (uint8_t *)mac_addr,
2641 2644 LAST_RAR_ENTRY);
2642 2645 }
2643 2646 }
2644 2647
2645 2648 /*
2646 2649 * If we are using Wiseman rev 2.0 silicon, we will have previously
2647 2650 * put the receive in reset, and disabled MWI, to work around some
2648 2651 * HW errata. Now we should take the receiver out of reset, and
2649 2652 * re-enabled if MWI if it was previously enabled by the PCI BIOS.
2650 2653 */
2651 2654 if ((hw->mac.type == e1000_82542) &&
2652 2655 (hw->revision_id == E1000_REVISION_2)) {
2653 2656 E1000_WRITE_REG(hw, E1000_RCTL, 0);
2654 2657 msec_delay(1);
2655 2658 if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2656 2659 e1000_pci_set_mwi(hw);
2657 2660 e1000g_rx_setup(Adapter);
2658 2661 }
2659 2662
2660 2663 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2661 2664 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2662 2665 return (EIO);
2663 2666 }
2664 2667
2665 2668 return (0);
2666 2669 }
2667 2670
2668 2671 static int
2669 2672 multicst_add(struct e1000g *Adapter, const uint8_t *multiaddr)
2670 2673 {
2671 2674 struct e1000_hw *hw = &Adapter->shared;
2672 2675 struct ether_addr *newtable;
2673 2676 size_t new_len;
2674 2677 size_t old_len;
2675 2678 int res = 0;
2676 2679
2677 2680 if ((multiaddr[0] & 01) == 0) {
2678 2681 res = EINVAL;
2679 2682 e1000g_log(Adapter, CE_WARN, "Illegal multicast address");
2680 2683 goto done;
2681 2684 }
2682 2685
2683 2686 if (Adapter->mcast_count >= Adapter->mcast_max_num) {
2684 2687 res = ENOENT;
2685 2688 e1000g_log(Adapter, CE_WARN,
2686 2689 "Adapter requested more than %d mcast addresses",
2687 2690 Adapter->mcast_max_num);
2688 2691 goto done;
2689 2692 }
2690 2693
2691 2694
2692 2695 if (Adapter->mcast_count == Adapter->mcast_alloc_count) {
2693 2696 old_len = Adapter->mcast_alloc_count *
2694 2697 sizeof (struct ether_addr);
2695 2698 new_len = (Adapter->mcast_alloc_count + MCAST_ALLOC_SIZE) *
2696 2699 sizeof (struct ether_addr);
2697 2700
2698 2701 newtable = kmem_alloc(new_len, KM_NOSLEEP);
2699 2702 if (newtable == NULL) {
2700 2703 res = ENOMEM;
2701 2704 e1000g_log(Adapter, CE_WARN,
2702 2705 "Not enough memory to alloc mcast table");
2703 2706 goto done;
2704 2707 }
2705 2708
2706 2709 if (Adapter->mcast_table != NULL) {
2707 2710 bcopy(Adapter->mcast_table, newtable, old_len);
2708 2711 kmem_free(Adapter->mcast_table, old_len);
2709 2712 }
2710 2713 Adapter->mcast_alloc_count += MCAST_ALLOC_SIZE;
2711 2714 Adapter->mcast_table = newtable;
2712 2715 }
2713 2716
2714 2717 bcopy(multiaddr,
2715 2718 &Adapter->mcast_table[Adapter->mcast_count], ETHERADDRL);
2716 2719 Adapter->mcast_count++;
2717 2720
2718 2721 /*
2719 2722 * Update the MC table in the hardware
2720 2723 */
2721 2724 e1000g_clear_interrupt(Adapter);
2722 2725
2723 2726 e1000_update_mc_addr_list(hw,
2724 2727 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count);
2725 2728
2726 2729 e1000g_mask_interrupt(Adapter);
2727 2730
2728 2731 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2729 2732 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2730 2733 res = EIO;
2731 2734 }
2732 2735
2733 2736 done:
2734 2737 return (res);
2735 2738 }
2736 2739
2737 2740 static int
2738 2741 multicst_remove(struct e1000g *Adapter, const uint8_t *multiaddr)
2739 2742 {
2740 2743 struct e1000_hw *hw = &Adapter->shared;
2741 2744 struct ether_addr *newtable;
2742 2745 size_t new_len;
2743 2746 size_t old_len;
2744 2747 unsigned i;
2745 2748
2746 2749 for (i = 0; i < Adapter->mcast_count; i++) {
2747 2750 if (bcmp(multiaddr, &Adapter->mcast_table[i],
2748 2751 ETHERADDRL) == 0) {
2749 2752 for (i++; i < Adapter->mcast_count; i++) {
2750 2753 Adapter->mcast_table[i - 1] =
2751 2754 Adapter->mcast_table[i];
2752 2755 }
2753 2756 Adapter->mcast_count--;
2754 2757 break;
2755 2758 }
2756 2759 }
2757 2760
2758 2761 if ((Adapter->mcast_alloc_count - Adapter->mcast_count) >
2759 2762 MCAST_ALLOC_SIZE) {
2760 2763 old_len = Adapter->mcast_alloc_count *
2761 2764 sizeof (struct ether_addr);
2762 2765 new_len = (Adapter->mcast_alloc_count - MCAST_ALLOC_SIZE) *
2763 2766 sizeof (struct ether_addr);
2764 2767
2765 2768 newtable = kmem_alloc(new_len, KM_NOSLEEP);
2766 2769 if (newtable != NULL) {
2767 2770 bcopy(Adapter->mcast_table, newtable, new_len);
2768 2771 kmem_free(Adapter->mcast_table, old_len);
2769 2772
2770 2773 Adapter->mcast_alloc_count -= MCAST_ALLOC_SIZE;
2771 2774 Adapter->mcast_table = newtable;
2772 2775 }
2773 2776 }
2774 2777
2775 2778 /*
2776 2779 * Update the MC table in the hardware
2777 2780 */
2778 2781 e1000g_clear_interrupt(Adapter);
2779 2782
2780 2783 e1000_update_mc_addr_list(hw,
2781 2784 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count);
2782 2785
2783 2786 e1000g_mask_interrupt(Adapter);
2784 2787
2785 2788 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2786 2789 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2787 2790 return (EIO);
2788 2791 }
2789 2792
2790 2793 return (0);
2791 2794 }
2792 2795
2793 2796 static void
2794 2797 e1000g_release_multicast(struct e1000g *Adapter)
2795 2798 {
2796 2799 if (Adapter->mcast_table != NULL) {
2797 2800 kmem_free(Adapter->mcast_table,
2798 2801 Adapter->mcast_alloc_count * sizeof (struct ether_addr));
2799 2802 Adapter->mcast_table = NULL;
2800 2803 }
2801 2804 }
2802 2805
2803 2806 int
2804 2807 e1000g_m_multicst(void *arg, boolean_t add, const uint8_t *addr)
2805 2808 {
2806 2809 struct e1000g *Adapter = (struct e1000g *)arg;
2807 2810 int result;
2808 2811
2809 2812 rw_enter(&Adapter->chip_lock, RW_WRITER);
2810 2813
2811 2814 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2812 2815 result = ECANCELED;
2813 2816 goto done;
2814 2817 }
2815 2818
2816 2819 result = (add) ? multicst_add(Adapter, addr)
2817 2820 : multicst_remove(Adapter, addr);
2818 2821
2819 2822 done:
2820 2823 rw_exit(&Adapter->chip_lock);
2821 2824 return (result);
2822 2825
2823 2826 }
2824 2827
2825 2828 int
2826 2829 e1000g_m_promisc(void *arg, boolean_t on)
2827 2830 {
2828 2831 struct e1000g *Adapter = (struct e1000g *)arg;
2829 2832 uint32_t rctl;
2830 2833
2831 2834 rw_enter(&Adapter->chip_lock, RW_WRITER);
2832 2835
2833 2836 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2834 2837 rw_exit(&Adapter->chip_lock);
2835 2838 return (ECANCELED);
2836 2839 }
2837 2840
2838 2841 rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL);
2839 2842
2840 2843 if (on)
2841 2844 rctl |=
2842 2845 (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM);
2843 2846 else
2844 2847 rctl &= (~(E1000_RCTL_UPE | E1000_RCTL_MPE));
2845 2848
2846 2849 E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl);
2847 2850
2848 2851 Adapter->e1000g_promisc = on;
2849 2852
2850 2853 rw_exit(&Adapter->chip_lock);
2851 2854
2852 2855 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2853 2856 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2854 2857 return (EIO);
2855 2858 }
2856 2859
2857 2860 return (0);
2858 2861 }
2859 2862
2860 2863 /*
2861 2864 * Entry points to enable and disable interrupts at the granularity of
2862 2865 * a group.
2863 2866 * Turns the poll_mode for the whole adapter on and off to enable or
2864 2867 * override the ring level polling control over the hardware interrupts.
2865 2868 */
2866 2869 static int
2867 2870 e1000g_rx_group_intr_enable(mac_intr_handle_t arg)
2868 2871 {
2869 2872 struct e1000g *adapter = (struct e1000g *)arg;
2870 2873 e1000g_rx_ring_t *rx_ring = adapter->rx_ring;
2871 2874
2872 2875 /*
2873 2876 * Later interrupts at the granularity of the this ring will
2874 2877 * invoke mac_rx() with NULL, indicating the need for another
2875 2878 * software classification.
2876 2879 * We have a single ring usable per adapter now, so we only need to
2877 2880 * reset the rx handle for that one.
2878 2881 * When more RX rings can be used, we should update each one of them.
2879 2882 */
2880 2883 mutex_enter(&rx_ring->rx_lock);
2881 2884 rx_ring->mrh = NULL;
2882 2885 adapter->poll_mode = B_FALSE;
2883 2886 mutex_exit(&rx_ring->rx_lock);
2884 2887 return (0);
2885 2888 }
2886 2889
2887 2890 static int
2888 2891 e1000g_rx_group_intr_disable(mac_intr_handle_t arg)
2889 2892 {
2890 2893 struct e1000g *adapter = (struct e1000g *)arg;
2891 2894 e1000g_rx_ring_t *rx_ring = adapter->rx_ring;
2892 2895
2893 2896 mutex_enter(&rx_ring->rx_lock);
2894 2897
2895 2898 /*
2896 2899 * Later interrupts at the granularity of the this ring will
2897 2900 * invoke mac_rx() with the handle for this ring;
2898 2901 */
2899 2902 adapter->poll_mode = B_TRUE;
2900 2903 rx_ring->mrh = rx_ring->mrh_init;
2901 2904 mutex_exit(&rx_ring->rx_lock);
2902 2905 return (0);
2903 2906 }
2904 2907
2905 2908 /*
2906 2909 * Entry points to enable and disable interrupts at the granularity of
2907 2910 * a ring.
2908 2911 * adapter poll_mode controls whether we actually proceed with hardware
2909 2912 * interrupt toggling.
2910 2913 */
2911 2914 static int
2912 2915 e1000g_rx_ring_intr_enable(mac_intr_handle_t intrh)
2913 2916 {
2914 2917 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)intrh;
2915 2918 struct e1000g *adapter = rx_ring->adapter;
2916 2919 struct e1000_hw *hw = &adapter->shared;
2917 2920 uint32_t intr_mask;
2918 2921
2919 2922 rw_enter(&adapter->chip_lock, RW_READER);
2920 2923
2921 2924 if (adapter->e1000g_state & E1000G_SUSPENDED) {
2922 2925 rw_exit(&adapter->chip_lock);
2923 2926 return (0);
2924 2927 }
2925 2928
2926 2929 mutex_enter(&rx_ring->rx_lock);
2927 2930 rx_ring->poll_flag = 0;
2928 2931 mutex_exit(&rx_ring->rx_lock);
2929 2932
2930 2933 /* Rx interrupt enabling for MSI and legacy */
2931 2934 intr_mask = E1000_READ_REG(hw, E1000_IMS);
2932 2935 intr_mask |= E1000_IMS_RXT0;
2933 2936 E1000_WRITE_REG(hw, E1000_IMS, intr_mask);
2934 2937 E1000_WRITE_FLUSH(hw);
2935 2938
2936 2939 /* Trigger a Rx interrupt to check Rx ring */
2937 2940 E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0);
2938 2941 E1000_WRITE_FLUSH(hw);
2939 2942
2940 2943 rw_exit(&adapter->chip_lock);
2941 2944 return (0);
2942 2945 }
2943 2946
2944 2947 static int
2945 2948 e1000g_rx_ring_intr_disable(mac_intr_handle_t intrh)
2946 2949 {
2947 2950 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)intrh;
2948 2951 struct e1000g *adapter = rx_ring->adapter;
2949 2952 struct e1000_hw *hw = &adapter->shared;
2950 2953
2951 2954 rw_enter(&adapter->chip_lock, RW_READER);
2952 2955
2953 2956 if (adapter->e1000g_state & E1000G_SUSPENDED) {
2954 2957 rw_exit(&adapter->chip_lock);
2955 2958 return (0);
2956 2959 }
2957 2960 mutex_enter(&rx_ring->rx_lock);
2958 2961 rx_ring->poll_flag = 1;
2959 2962 mutex_exit(&rx_ring->rx_lock);
2960 2963
2961 2964 /* Rx interrupt disabling for MSI and legacy */
2962 2965 E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0);
2963 2966 E1000_WRITE_FLUSH(hw);
2964 2967
2965 2968 rw_exit(&adapter->chip_lock);
2966 2969 return (0);
2967 2970 }
2968 2971
2969 2972 /*
2970 2973 * e1000g_unicst_find - Find the slot for the specified unicast address
2971 2974 */
2972 2975 static int
2973 2976 e1000g_unicst_find(struct e1000g *Adapter, const uint8_t *mac_addr)
2974 2977 {
2975 2978 int slot;
2976 2979
2977 2980 for (slot = 0; slot < Adapter->unicst_total; slot++) {
2978 2981 if ((Adapter->unicst_addr[slot].mac.set == 1) &&
2979 2982 (bcmp(Adapter->unicst_addr[slot].mac.addr,
2980 2983 mac_addr, ETHERADDRL) == 0))
2981 2984 return (slot);
2982 2985 }
2983 2986
2984 2987 return (-1);
2985 2988 }
2986 2989
2987 2990 /*
2988 2991 * Entry points to add and remove a MAC address to a ring group.
2989 2992 * The caller takes care of adding and removing the MAC addresses
2990 2993 * to the filter via these two routines.
2991 2994 */
2992 2995
2993 2996 static int
2994 2997 e1000g_addmac(void *arg, const uint8_t *mac_addr)
2995 2998 {
2996 2999 struct e1000g *Adapter = (struct e1000g *)arg;
2997 3000 int slot, err;
2998 3001
2999 3002 rw_enter(&Adapter->chip_lock, RW_WRITER);
3000 3003
3001 3004 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
3002 3005 rw_exit(&Adapter->chip_lock);
3003 3006 return (ECANCELED);
3004 3007 }
3005 3008
3006 3009 if (e1000g_unicst_find(Adapter, mac_addr) != -1) {
3007 3010 /* The same address is already in slot */
3008 3011 rw_exit(&Adapter->chip_lock);
3009 3012 return (0);
3010 3013 }
3011 3014
3012 3015 if (Adapter->unicst_avail == 0) {
3013 3016 /* no slots available */
3014 3017 rw_exit(&Adapter->chip_lock);
3015 3018 return (ENOSPC);
3016 3019 }
3017 3020
3018 3021 /* Search for a free slot */
3019 3022 for (slot = 0; slot < Adapter->unicst_total; slot++) {
3020 3023 if (Adapter->unicst_addr[slot].mac.set == 0)
3021 3024 break;
3022 3025 }
3023 3026 ASSERT(slot < Adapter->unicst_total);
3024 3027
3025 3028 err = e1000g_unicst_set(Adapter, mac_addr, slot);
3026 3029 if (err == 0)
3027 3030 Adapter->unicst_avail--;
3028 3031
3029 3032 rw_exit(&Adapter->chip_lock);
3030 3033
3031 3034 return (err);
3032 3035 }
3033 3036
3034 3037 static int
3035 3038 e1000g_remmac(void *arg, const uint8_t *mac_addr)
3036 3039 {
3037 3040 struct e1000g *Adapter = (struct e1000g *)arg;
3038 3041 int slot, err;
3039 3042
3040 3043 rw_enter(&Adapter->chip_lock, RW_WRITER);
3041 3044
3042 3045 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
3043 3046 rw_exit(&Adapter->chip_lock);
3044 3047 return (ECANCELED);
3045 3048 }
3046 3049
3047 3050 slot = e1000g_unicst_find(Adapter, mac_addr);
3048 3051 if (slot == -1) {
3049 3052 rw_exit(&Adapter->chip_lock);
3050 3053 return (EINVAL);
3051 3054 }
3052 3055
3053 3056 ASSERT(Adapter->unicst_addr[slot].mac.set);
3054 3057
3055 3058 /* Clear this slot */
3056 3059 err = e1000g_unicst_set(Adapter, NULL, slot);
3057 3060 if (err == 0)
3058 3061 Adapter->unicst_avail++;
3059 3062
3060 3063 rw_exit(&Adapter->chip_lock);
3061 3064
3062 3065 return (err);
3063 3066 }
3064 3067
3065 3068 static int
3066 3069 e1000g_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
3067 3070 {
3068 3071 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)rh;
3069 3072
3070 3073 mutex_enter(&rx_ring->rx_lock);
3071 3074 rx_ring->ring_gen_num = mr_gen_num;
3072 3075 mutex_exit(&rx_ring->rx_lock);
3073 3076 return (0);
3074 3077 }
3075 3078
3076 3079 /*
3077 3080 * Callback funtion for MAC layer to register all rings.
3078 3081 *
3079 3082 * The hardware supports a single group with currently only one ring
3080 3083 * available.
3081 3084 * Though not offering virtualization ability per se, exposing the
3082 3085 * group/ring still enables the polling and interrupt toggling.
3083 3086 */
3084 3087 /* ARGSUSED */
3085 3088 void
3086 3089 e1000g_fill_ring(void *arg, mac_ring_type_t rtype, const int grp_index,
3087 3090 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
3088 3091 {
3089 3092 struct e1000g *Adapter = (struct e1000g *)arg;
3090 3093 e1000g_rx_ring_t *rx_ring = Adapter->rx_ring;
3091 3094 mac_intr_t *mintr;
3092 3095
3093 3096 /*
3094 3097 * We advertised only RX group/rings, so the MAC framework shouldn't
3095 3098 * ask for any thing else.
3096 3099 */
3097 3100 ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0 && ring_index == 0);
3098 3101
3099 3102 rx_ring->mrh = rx_ring->mrh_init = rh;
3100 3103 infop->mri_driver = (mac_ring_driver_t)rx_ring;
3101 3104 infop->mri_start = e1000g_ring_start;
3102 3105 infop->mri_stop = NULL;
3103 3106 infop->mri_poll = e1000g_poll_ring;
3104 3107 infop->mri_stat = e1000g_rx_ring_stat;
3105 3108
3106 3109 /* Ring level interrupts */
3107 3110 mintr = &infop->mri_intr;
3108 3111 mintr->mi_handle = (mac_intr_handle_t)rx_ring;
3109 3112 mintr->mi_enable = e1000g_rx_ring_intr_enable;
3110 3113 mintr->mi_disable = e1000g_rx_ring_intr_disable;
3111 3114 if (Adapter->msi_enable)
3112 3115 mintr->mi_ddi_handle = Adapter->htable[0];
3113 3116 }
3114 3117
3115 3118 /* ARGSUSED */
3116 3119 static void
3117 3120 e1000g_fill_group(void *arg, mac_ring_type_t rtype, const int grp_index,
3118 3121 mac_group_info_t *infop, mac_group_handle_t gh)
3119 3122 {
3120 3123 struct e1000g *Adapter = (struct e1000g *)arg;
3121 3124 mac_intr_t *mintr;
3122 3125
3123 3126 /*
3124 3127 * We advertised a single RX ring. Getting a request for anything else
3125 3128 * signifies a bug in the MAC framework.
3126 3129 */
3127 3130 ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0);
3128 3131
3129 3132 Adapter->rx_group = gh;
3130 3133
3131 3134 infop->mgi_driver = (mac_group_driver_t)Adapter;
3132 3135 infop->mgi_start = NULL;
3133 3136 infop->mgi_stop = NULL;
3134 3137 infop->mgi_addmac = e1000g_addmac;
3135 3138 infop->mgi_remmac = e1000g_remmac;
3136 3139 infop->mgi_count = 1;
3137 3140
3138 3141 /* Group level interrupts */
3139 3142 mintr = &infop->mgi_intr;
3140 3143 mintr->mi_handle = (mac_intr_handle_t)Adapter;
3141 3144 mintr->mi_enable = e1000g_rx_group_intr_enable;
3142 3145 mintr->mi_disable = e1000g_rx_group_intr_disable;
3143 3146 }
3144 3147
3145 3148 static void
3146 3149 e1000g_led_blink(void *arg)
3147 3150 {
3148 3151 e1000g_t *e1000g = arg;
3149 3152
3150 3153 mutex_enter(&e1000g->e1000g_led_lock);
3151 3154 VERIFY(e1000g->e1000g_emul_blink);
3152 3155 if (e1000g->e1000g_emul_state) {
3153 3156 (void) e1000_led_on(&e1000g->shared);
3154 3157 } else {
3155 3158 (void) e1000_led_off(&e1000g->shared);
3156 3159 }
3157 3160 e1000g->e1000g_emul_state = !e1000g->e1000g_emul_state;
3158 3161 mutex_exit(&e1000g->e1000g_led_lock);
3159 3162 }
3160 3163
3161 3164 static int
3162 3165 e1000g_led_set(void *arg, mac_led_mode_t mode, uint_t flags)
3163 3166 {
3164 3167 e1000g_t *e1000g = arg;
3165 3168
3166 3169 if (flags != 0)
3167 3170 return (EINVAL);
3168 3171
3169 3172 if (mode != MAC_LED_DEFAULT &&
3170 3173 mode != MAC_LED_IDENT &&
3171 3174 mode != MAC_LED_OFF &&
3172 3175 mode != MAC_LED_ON)
3173 3176 return (ENOTSUP);
3174 3177
3175 3178 mutex_enter(&e1000g->e1000g_led_lock);
3176 3179
3177 3180 if ((mode == MAC_LED_IDENT || mode == MAC_LED_OFF ||
3178 3181 mode == MAC_LED_ON) &&
3179 3182 !e1000g->e1000g_led_setup) {
3180 3183 if (e1000_setup_led(&e1000g->shared) != E1000_SUCCESS) {
3181 3184 mutex_exit(&e1000g->e1000g_led_lock);
3182 3185 return (EIO);
3183 3186 }
3184 3187
3185 3188 e1000g->e1000g_led_setup = B_TRUE;
3186 3189 }
3187 3190
3188 3191 if (mode != MAC_LED_IDENT && e1000g->e1000g_blink != NULL) {
3189 3192 ddi_periodic_t id = e1000g->e1000g_blink;
3190 3193 e1000g->e1000g_blink = NULL;
3191 3194 mutex_exit(&e1000g->e1000g_led_lock);
3192 3195 ddi_periodic_delete(id);
3193 3196 mutex_enter(&e1000g->e1000g_led_lock);
3194 3197 }
3195 3198
3196 3199 switch (mode) {
3197 3200 case MAC_LED_DEFAULT:
3198 3201 if (e1000g->e1000g_led_setup) {
3199 3202 if (e1000_cleanup_led(&e1000g->shared) !=
3200 3203 E1000_SUCCESS) {
3201 3204 mutex_exit(&e1000g->e1000g_led_lock);
3202 3205 return (EIO);
3203 3206 }
3204 3207 e1000g->e1000g_led_setup = B_FALSE;
3205 3208 }
3206 3209 break;
3207 3210 case MAC_LED_IDENT:
3208 3211 if (e1000g->e1000g_emul_blink) {
3209 3212 if (e1000g->e1000g_blink != NULL)
3210 3213 break;
3211 3214
3212 3215 /*
3213 3216 * Note, we use a 200 ms period here as that's what
3214 3217 * section 10.1.3 8254x Intel Manual (PCI/PCI-X Family
3215 3218 * of Gigabit Ethernet Controllers Software Developer's
3216 3219 * Manual) indicates that the optional blink hardware
3217 3220 * operates at.
3218 3221 */
3219 3222 e1000g->e1000g_blink =
3220 3223 ddi_periodic_add(e1000g_led_blink, e1000g,
3221 3224 200ULL * (NANOSEC / MILLISEC), DDI_IPL_0);
3222 3225 } else if (e1000_blink_led(&e1000g->shared) != E1000_SUCCESS) {
3223 3226 mutex_exit(&e1000g->e1000g_led_lock);
3224 3227 return (EIO);
3225 3228 }
3226 3229 break;
3227 3230 case MAC_LED_OFF:
3228 3231 if (e1000_led_off(&e1000g->shared) != E1000_SUCCESS) {
3229 3232 mutex_exit(&e1000g->e1000g_led_lock);
3230 3233 return (EIO);
3231 3234 }
3232 3235 break;
3233 3236 case MAC_LED_ON:
3234 3237 if (e1000_led_on(&e1000g->shared) != E1000_SUCCESS) {
3235 3238 mutex_exit(&e1000g->e1000g_led_lock);
3236 3239 return (EIO);
3237 3240 }
3238 3241 break;
3239 3242 default:
3240 3243 mutex_exit(&e1000g->e1000g_led_lock);
3241 3244 return (ENOTSUP);
3242 3245 }
3243 3246
3244 3247 mutex_exit(&e1000g->e1000g_led_lock);
3245 3248 return (0);
3246 3249
3247 3250 }
3248 3251
3249 3252 static boolean_t
3250 3253 e1000g_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
3251 3254 {
3252 3255 struct e1000g *Adapter = (struct e1000g *)arg;
3253 3256
3254 3257 switch (cap) {
3255 3258 case MAC_CAPAB_HCKSUM: {
3256 3259 uint32_t *txflags = cap_data;
3257 3260
3258 3261 if (Adapter->tx_hcksum_enable)
3259 3262 *txflags = HCKSUM_IPHDRCKSUM |
3260 3263 HCKSUM_INET_PARTIAL;
3261 3264 else
3262 3265 return (B_FALSE);
3263 3266 break;
3264 3267 }
3265 3268
3266 3269 case MAC_CAPAB_LSO: {
3267 3270 mac_capab_lso_t *cap_lso = cap_data;
3268 3271
3269 3272 if (Adapter->lso_enable) {
3270 3273 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
3271 3274 cap_lso->lso_basic_tcp_ipv4.lso_max =
3272 3275 E1000_LSO_MAXLEN;
3273 3276 } else
3274 3277 return (B_FALSE);
3275 3278 break;
3276 3279 }
3277 3280 case MAC_CAPAB_RINGS: {
3278 3281 mac_capab_rings_t *cap_rings = cap_data;
3279 3282
3280 3283 /* No TX rings exposed yet */
3281 3284 if (cap_rings->mr_type != MAC_RING_TYPE_RX)
3282 3285 return (B_FALSE);
3283 3286
3284 3287 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
3285 3288 cap_rings->mr_rnum = 1;
3286 3289 cap_rings->mr_gnum = 1;
3287 3290 cap_rings->mr_rget = e1000g_fill_ring;
3288 3291 cap_rings->mr_gget = e1000g_fill_group;
3289 3292 break;
3290 3293 }
3291 3294 case MAC_CAPAB_LED: {
3292 3295 mac_capab_led_t *cap_led = cap_data;
3293 3296
3294 3297 cap_led->mcl_flags = 0;
3295 3298 cap_led->mcl_modes = MAC_LED_DEFAULT;
3296 3299 if (Adapter->shared.mac.ops.blink_led != NULL &&
3297 3300 Adapter->shared.mac.ops.blink_led !=
3298 3301 e1000_null_ops_generic) {
3299 3302 cap_led->mcl_modes |= MAC_LED_IDENT;
3300 3303 }
3301 3304
3302 3305 if (Adapter->shared.mac.ops.led_off != NULL &&
3303 3306 Adapter->shared.mac.ops.led_off !=
3304 3307 e1000_null_ops_generic) {
3305 3308 cap_led->mcl_modes |= MAC_LED_OFF;
3306 3309 }
3307 3310
3308 3311 if (Adapter->shared.mac.ops.led_on != NULL &&
3309 3312 Adapter->shared.mac.ops.led_on !=
3310 3313 e1000_null_ops_generic) {
3311 3314 cap_led->mcl_modes |= MAC_LED_ON;
3312 3315 }
3313 3316
3314 3317 /*
3315 3318 * Some hardware doesn't support blinking natively as they're
3316 3319 * missing the optional blink circuit. If they have both off and
3317 3320 * on then we'll emulate it ourselves.
3318 3321 */
3319 3322 if (((cap_led->mcl_modes & MAC_LED_IDENT) == 0) &&
3320 3323 ((cap_led->mcl_modes & MAC_LED_OFF) != 0) &&
3321 3324 ((cap_led->mcl_modes & MAC_LED_ON) != 0)) {
3322 3325 cap_led->mcl_modes |= MAC_LED_IDENT;
3323 3326 Adapter->e1000g_emul_blink = B_TRUE;
3324 3327 }
3325 3328
3326 3329 cap_led->mcl_set = e1000g_led_set;
3327 3330 break;
3328 3331 }
3329 3332 default:
3330 3333 return (B_FALSE);
3331 3334 }
3332 3335 return (B_TRUE);
3333 3336 }
3334 3337
3335 3338 static boolean_t
3336 3339 e1000g_param_locked(mac_prop_id_t pr_num)
3337 3340 {
3338 3341 /*
3339 3342 * All en_* parameters are locked (read-only) while
3340 3343 * the device is in any sort of loopback mode ...
3341 3344 */
3342 3345 switch (pr_num) {
3343 3346 case MAC_PROP_EN_1000FDX_CAP:
3344 3347 case MAC_PROP_EN_1000HDX_CAP:
3345 3348 case MAC_PROP_EN_100FDX_CAP:
3346 3349 case MAC_PROP_EN_100HDX_CAP:
3347 3350 case MAC_PROP_EN_10FDX_CAP:
3348 3351 case MAC_PROP_EN_10HDX_CAP:
3349 3352 case MAC_PROP_AUTONEG:
3350 3353 case MAC_PROP_FLOWCTRL:
3351 3354 return (B_TRUE);
3352 3355 }
3353 3356 return (B_FALSE);
3354 3357 }
3355 3358
3356 3359 /*
3357 3360 * callback function for set/get of properties
3358 3361 */
3359 3362 static int
3360 3363 e1000g_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3361 3364 uint_t pr_valsize, const void *pr_val)
3362 3365 {
3363 3366 struct e1000g *Adapter = arg;
3364 3367 struct e1000_hw *hw = &Adapter->shared;
3365 3368 struct e1000_fc_info *fc = &Adapter->shared.fc;
3366 3369 int err = 0;
3367 3370 link_flowctrl_t flowctrl;
3368 3371 uint32_t cur_mtu, new_mtu;
3369 3372
3370 3373 rw_enter(&Adapter->chip_lock, RW_WRITER);
3371 3374
3372 3375 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
3373 3376 rw_exit(&Adapter->chip_lock);
3374 3377 return (ECANCELED);
3375 3378 }
3376 3379
3377 3380 if (Adapter->loopback_mode != E1000G_LB_NONE &&
3378 3381 e1000g_param_locked(pr_num)) {
3379 3382 /*
3380 3383 * All en_* parameters are locked (read-only)
3381 3384 * while the device is in any sort of loopback mode.
3382 3385 */
3383 3386 rw_exit(&Adapter->chip_lock);
3384 3387 return (EBUSY);
3385 3388 }
3386 3389
3387 3390 switch (pr_num) {
3388 3391 case MAC_PROP_EN_1000FDX_CAP:
3389 3392 if (hw->phy.media_type != e1000_media_type_copper) {
3390 3393 err = ENOTSUP;
3391 3394 break;
3392 3395 }
3393 3396 Adapter->param_en_1000fdx = *(uint8_t *)pr_val;
3394 3397 Adapter->param_adv_1000fdx = *(uint8_t *)pr_val;
3395 3398 goto reset;
3396 3399 case MAC_PROP_EN_100FDX_CAP:
3397 3400 if (hw->phy.media_type != e1000_media_type_copper) {
3398 3401 err = ENOTSUP;
3399 3402 break;
3400 3403 }
3401 3404 Adapter->param_en_100fdx = *(uint8_t *)pr_val;
3402 3405 Adapter->param_adv_100fdx = *(uint8_t *)pr_val;
3403 3406 goto reset;
3404 3407 case MAC_PROP_EN_100HDX_CAP:
3405 3408 if (hw->phy.media_type != e1000_media_type_copper) {
3406 3409 err = ENOTSUP;
3407 3410 break;
3408 3411 }
3409 3412 Adapter->param_en_100hdx = *(uint8_t *)pr_val;
3410 3413 Adapter->param_adv_100hdx = *(uint8_t *)pr_val;
3411 3414 goto reset;
3412 3415 case MAC_PROP_EN_10FDX_CAP:
3413 3416 if (hw->phy.media_type != e1000_media_type_copper) {
3414 3417 err = ENOTSUP;
3415 3418 break;
3416 3419 }
3417 3420 Adapter->param_en_10fdx = *(uint8_t *)pr_val;
3418 3421 Adapter->param_adv_10fdx = *(uint8_t *)pr_val;
3419 3422 goto reset;
3420 3423 case MAC_PROP_EN_10HDX_CAP:
3421 3424 if (hw->phy.media_type != e1000_media_type_copper) {
3422 3425 err = ENOTSUP;
3423 3426 break;
3424 3427 }
3425 3428 Adapter->param_en_10hdx = *(uint8_t *)pr_val;
3426 3429 Adapter->param_adv_10hdx = *(uint8_t *)pr_val;
3427 3430 goto reset;
3428 3431 case MAC_PROP_AUTONEG:
3429 3432 if (hw->phy.media_type != e1000_media_type_copper) {
3430 3433 err = ENOTSUP;
3431 3434 break;
3432 3435 }
3433 3436 Adapter->param_adv_autoneg = *(uint8_t *)pr_val;
3434 3437 goto reset;
3435 3438 case MAC_PROP_FLOWCTRL:
3436 3439 fc->send_xon = B_TRUE;
3437 3440 bcopy(pr_val, &flowctrl, sizeof (flowctrl));
3438 3441
3439 3442 switch (flowctrl) {
3440 3443 default:
3441 3444 err = EINVAL;
3442 3445 break;
3443 3446 case LINK_FLOWCTRL_NONE:
3444 3447 fc->requested_mode = e1000_fc_none;
3445 3448 break;
3446 3449 case LINK_FLOWCTRL_RX:
3447 3450 fc->requested_mode = e1000_fc_rx_pause;
3448 3451 break;
3449 3452 case LINK_FLOWCTRL_TX:
3450 3453 fc->requested_mode = e1000_fc_tx_pause;
3451 3454 break;
3452 3455 case LINK_FLOWCTRL_BI:
3453 3456 fc->requested_mode = e1000_fc_full;
3454 3457 break;
3455 3458 }
3456 3459 reset:
3457 3460 if (err == 0) {
3458 3461 /* check PCH limits & reset the link */
3459 3462 e1000g_pch_limits(Adapter);
3460 3463 if (e1000g_reset_link(Adapter) != DDI_SUCCESS)
3461 3464 err = EINVAL;
3462 3465 }
3463 3466 break;
3464 3467 case MAC_PROP_ADV_1000FDX_CAP:
3465 3468 case MAC_PROP_ADV_1000HDX_CAP:
3466 3469 case MAC_PROP_ADV_100FDX_CAP:
3467 3470 case MAC_PROP_ADV_100HDX_CAP:
3468 3471 case MAC_PROP_ADV_10FDX_CAP:
3469 3472 case MAC_PROP_ADV_10HDX_CAP:
3470 3473 case MAC_PROP_EN_1000HDX_CAP:
3471 3474 case MAC_PROP_STATUS:
3472 3475 case MAC_PROP_SPEED:
3473 3476 case MAC_PROP_DUPLEX:
3474 3477 err = ENOTSUP; /* read-only prop. Can't set this. */
3475 3478 break;
3476 3479 case MAC_PROP_MTU:
3477 3480 /* adapter must be stopped for an MTU change */
3478 3481 if (Adapter->e1000g_state & E1000G_STARTED) {
3479 3482 err = EBUSY;
3480 3483 break;
3481 3484 }
3482 3485
3483 3486 cur_mtu = Adapter->default_mtu;
3484 3487
3485 3488 /* get new requested MTU */
3486 3489 bcopy(pr_val, &new_mtu, sizeof (new_mtu));
3487 3490 if (new_mtu == cur_mtu) {
3488 3491 err = 0;
3489 3492 break;
3490 3493 }
3491 3494
3492 3495 if ((new_mtu < DEFAULT_MTU) ||
3493 3496 (new_mtu > Adapter->max_mtu)) {
3494 3497 err = EINVAL;
3495 3498 break;
3496 3499 }
3497 3500
3498 3501 /* inform MAC framework of new MTU */
3499 3502 err = mac_maxsdu_update(Adapter->mh, new_mtu);
3500 3503
3501 3504 if (err == 0) {
3502 3505 Adapter->default_mtu = new_mtu;
3503 3506 Adapter->max_frame_size =
3504 3507 e1000g_mtu2maxframe(new_mtu);
3505 3508
3506 3509 /*
3507 3510 * check PCH limits & set buffer sizes to
3508 3511 * match new MTU
3509 3512 */
3510 3513 e1000g_pch_limits(Adapter);
3511 3514 e1000g_set_bufsize(Adapter);
3512 3515
3513 3516 /*
3514 3517 * decrease the number of descriptors and free
3515 3518 * packets for jumbo frames to reduce tx/rx
3516 3519 * resource consumption
3517 3520 */
3518 3521 if (Adapter->max_frame_size >=
3519 3522 (FRAME_SIZE_UPTO_4K)) {
3520 3523 if (Adapter->tx_desc_num_flag == 0)
3521 3524 Adapter->tx_desc_num =
3522 3525 DEFAULT_JUMBO_NUM_TX_DESC;
3523 3526
3524 3527 if (Adapter->rx_desc_num_flag == 0)
3525 3528 Adapter->rx_desc_num =
3526 3529 DEFAULT_JUMBO_NUM_RX_DESC;
3527 3530
3528 3531 if (Adapter->tx_buf_num_flag == 0)
3529 3532 Adapter->tx_freelist_num =
3530 3533 DEFAULT_JUMBO_NUM_TX_BUF;
3531 3534
3532 3535 if (Adapter->rx_buf_num_flag == 0)
3533 3536 Adapter->rx_freelist_limit =
3534 3537 DEFAULT_JUMBO_NUM_RX_BUF;
3535 3538 } else {
3536 3539 if (Adapter->tx_desc_num_flag == 0)
3537 3540 Adapter->tx_desc_num =
3538 3541 DEFAULT_NUM_TX_DESCRIPTOR;
3539 3542
3540 3543 if (Adapter->rx_desc_num_flag == 0)
3541 3544 Adapter->rx_desc_num =
3542 3545 DEFAULT_NUM_RX_DESCRIPTOR;
3543 3546
3544 3547 if (Adapter->tx_buf_num_flag == 0)
3545 3548 Adapter->tx_freelist_num =
3546 3549 DEFAULT_NUM_TX_FREELIST;
3547 3550
3548 3551 if (Adapter->rx_buf_num_flag == 0)
3549 3552 Adapter->rx_freelist_limit =
3550 3553 DEFAULT_NUM_RX_FREELIST;
3551 3554 }
3552 3555 }
3553 3556 break;
3554 3557 case MAC_PROP_PRIVATE:
3555 3558 err = e1000g_set_priv_prop(Adapter, pr_name,
3556 3559 pr_valsize, pr_val);
3557 3560 break;
3558 3561 default:
3559 3562 err = ENOTSUP;
3560 3563 break;
3561 3564 }
3562 3565 rw_exit(&Adapter->chip_lock);
3563 3566 return (err);
3564 3567 }
3565 3568
3566 3569 static int
3567 3570 e1000g_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3568 3571 uint_t pr_valsize, void *pr_val)
3569 3572 {
3570 3573 struct e1000g *Adapter = arg;
3571 3574 struct e1000_fc_info *fc = &Adapter->shared.fc;
3572 3575 int err = 0;
3573 3576 link_flowctrl_t flowctrl;
3574 3577 uint64_t tmp = 0;
3575 3578
3576 3579 switch (pr_num) {
3577 3580 case MAC_PROP_DUPLEX:
3578 3581 ASSERT(pr_valsize >= sizeof (link_duplex_t));
3579 3582 bcopy(&Adapter->link_duplex, pr_val,
3580 3583 sizeof (link_duplex_t));
3581 3584 break;
3582 3585 case MAC_PROP_SPEED:
3583 3586 ASSERT(pr_valsize >= sizeof (uint64_t));
3584 3587 tmp = Adapter->link_speed * 1000000ull;
3585 3588 bcopy(&tmp, pr_val, sizeof (tmp));
3586 3589 break;
3587 3590 case MAC_PROP_AUTONEG:
3588 3591 *(uint8_t *)pr_val = Adapter->param_adv_autoneg;
3589 3592 break;
3590 3593 case MAC_PROP_FLOWCTRL:
3591 3594 ASSERT(pr_valsize >= sizeof (link_flowctrl_t));
3592 3595 switch (fc->current_mode) {
3593 3596 case e1000_fc_none:
3594 3597 flowctrl = LINK_FLOWCTRL_NONE;
3595 3598 break;
3596 3599 case e1000_fc_rx_pause:
3597 3600 flowctrl = LINK_FLOWCTRL_RX;
3598 3601 break;
3599 3602 case e1000_fc_tx_pause:
3600 3603 flowctrl = LINK_FLOWCTRL_TX;
3601 3604 break;
3602 3605 case e1000_fc_full:
3603 3606 flowctrl = LINK_FLOWCTRL_BI;
3604 3607 break;
3605 3608 }
3606 3609 bcopy(&flowctrl, pr_val, sizeof (flowctrl));
3607 3610 break;
3608 3611 case MAC_PROP_ADV_1000FDX_CAP:
3609 3612 *(uint8_t *)pr_val = Adapter->param_adv_1000fdx;
3610 3613 break;
3611 3614 case MAC_PROP_EN_1000FDX_CAP:
3612 3615 *(uint8_t *)pr_val = Adapter->param_en_1000fdx;
3613 3616 break;
3614 3617 case MAC_PROP_ADV_1000HDX_CAP:
3615 3618 *(uint8_t *)pr_val = Adapter->param_adv_1000hdx;
3616 3619 break;
3617 3620 case MAC_PROP_EN_1000HDX_CAP:
3618 3621 *(uint8_t *)pr_val = Adapter->param_en_1000hdx;
3619 3622 break;
3620 3623 case MAC_PROP_ADV_100FDX_CAP:
3621 3624 *(uint8_t *)pr_val = Adapter->param_adv_100fdx;
3622 3625 break;
3623 3626 case MAC_PROP_EN_100FDX_CAP:
3624 3627 *(uint8_t *)pr_val = Adapter->param_en_100fdx;
3625 3628 break;
3626 3629 case MAC_PROP_ADV_100HDX_CAP:
3627 3630 *(uint8_t *)pr_val = Adapter->param_adv_100hdx;
3628 3631 break;
3629 3632 case MAC_PROP_EN_100HDX_CAP:
3630 3633 *(uint8_t *)pr_val = Adapter->param_en_100hdx;
3631 3634 break;
3632 3635 case MAC_PROP_ADV_10FDX_CAP:
3633 3636 *(uint8_t *)pr_val = Adapter->param_adv_10fdx;
3634 3637 break;
3635 3638 case MAC_PROP_EN_10FDX_CAP:
3636 3639 *(uint8_t *)pr_val = Adapter->param_en_10fdx;
3637 3640 break;
3638 3641 case MAC_PROP_ADV_10HDX_CAP:
3639 3642 *(uint8_t *)pr_val = Adapter->param_adv_10hdx;
3640 3643 break;
3641 3644 case MAC_PROP_EN_10HDX_CAP:
3642 3645 *(uint8_t *)pr_val = Adapter->param_en_10hdx;
3643 3646 break;
3644 3647 case MAC_PROP_ADV_100T4_CAP:
3645 3648 case MAC_PROP_EN_100T4_CAP:
3646 3649 *(uint8_t *)pr_val = Adapter->param_adv_100t4;
3647 3650 break;
3648 3651 case MAC_PROP_PRIVATE:
3649 3652 err = e1000g_get_priv_prop(Adapter, pr_name,
3650 3653 pr_valsize, pr_val);
3651 3654 break;
3652 3655 default:
3653 3656 err = ENOTSUP;
3654 3657 break;
3655 3658 }
3656 3659
3657 3660 return (err);
3658 3661 }
3659 3662
3660 3663 static void
3661 3664 e1000g_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3662 3665 mac_prop_info_handle_t prh)
3663 3666 {
3664 3667 struct e1000g *Adapter = arg;
3665 3668 struct e1000_hw *hw = &Adapter->shared;
3666 3669
3667 3670 switch (pr_num) {
3668 3671 case MAC_PROP_DUPLEX:
3669 3672 case MAC_PROP_SPEED:
3670 3673 case MAC_PROP_ADV_1000FDX_CAP:
3671 3674 case MAC_PROP_ADV_1000HDX_CAP:
3672 3675 case MAC_PROP_ADV_100FDX_CAP:
3673 3676 case MAC_PROP_ADV_100HDX_CAP:
3674 3677 case MAC_PROP_ADV_10FDX_CAP:
3675 3678 case MAC_PROP_ADV_10HDX_CAP:
3676 3679 case MAC_PROP_ADV_100T4_CAP:
3677 3680 case MAC_PROP_EN_100T4_CAP:
3678 3681 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3679 3682 break;
3680 3683
3681 3684 case MAC_PROP_EN_1000FDX_CAP:
3682 3685 if (hw->phy.media_type != e1000_media_type_copper) {
3683 3686 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3684 3687 } else {
3685 3688 mac_prop_info_set_default_uint8(prh,
3686 3689 ((Adapter->phy_ext_status &
3687 3690 IEEE_ESR_1000T_FD_CAPS) ||
3688 3691 (Adapter->phy_ext_status &
3689 3692 IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0);
3690 3693 }
3691 3694 break;
3692 3695
3693 3696 case MAC_PROP_EN_100FDX_CAP:
3694 3697 if (hw->phy.media_type != e1000_media_type_copper) {
3695 3698 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3696 3699 } else {
3697 3700 mac_prop_info_set_default_uint8(prh,
3698 3701 ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
3699 3702 (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
3700 3703 ? 1 : 0);
3701 3704 }
3702 3705 break;
3703 3706
3704 3707 case MAC_PROP_EN_100HDX_CAP:
3705 3708 if (hw->phy.media_type != e1000_media_type_copper) {
3706 3709 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3707 3710 } else {
3708 3711 mac_prop_info_set_default_uint8(prh,
3709 3712 ((Adapter->phy_status & MII_SR_100X_HD_CAPS) ||
3710 3713 (Adapter->phy_status & MII_SR_100T2_HD_CAPS))
3711 3714 ? 1 : 0);
3712 3715 }
3713 3716 break;
3714 3717
3715 3718 case MAC_PROP_EN_10FDX_CAP:
3716 3719 if (hw->phy.media_type != e1000_media_type_copper) {
3717 3720 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3718 3721 } else {
3719 3722 mac_prop_info_set_default_uint8(prh,
3720 3723 (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0);
3721 3724 }
3722 3725 break;
3723 3726
3724 3727 case MAC_PROP_EN_10HDX_CAP:
3725 3728 if (hw->phy.media_type != e1000_media_type_copper) {
3726 3729 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3727 3730 } else {
3728 3731 mac_prop_info_set_default_uint8(prh,
3729 3732 (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0);
3730 3733 }
3731 3734 break;
3732 3735
3733 3736 case MAC_PROP_EN_1000HDX_CAP:
3734 3737 if (hw->phy.media_type != e1000_media_type_copper)
3735 3738 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3736 3739 break;
3737 3740
3738 3741 case MAC_PROP_AUTONEG:
3739 3742 if (hw->phy.media_type != e1000_media_type_copper) {
3740 3743 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3741 3744 } else {
3742 3745 mac_prop_info_set_default_uint8(prh,
3743 3746 (Adapter->phy_status & MII_SR_AUTONEG_CAPS)
3744 3747 ? 1 : 0);
3745 3748 }
3746 3749 break;
3747 3750
3748 3751 case MAC_PROP_FLOWCTRL:
3749 3752 mac_prop_info_set_default_link_flowctrl(prh, LINK_FLOWCTRL_BI);
3750 3753 break;
3751 3754
3752 3755 case MAC_PROP_MTU: {
3753 3756 struct e1000_mac_info *mac = &Adapter->shared.mac;
3754 3757 struct e1000_phy_info *phy = &Adapter->shared.phy;
3755 3758 uint32_t max;
3756 3759
3757 3760 /* some MAC types do not support jumbo frames */
3758 3761 if ((mac->type == e1000_ich8lan) ||
3759 3762 ((mac->type == e1000_ich9lan) && (phy->type ==
3760 3763 e1000_phy_ife))) {
3761 3764 max = DEFAULT_MTU;
3762 3765 } else {
3763 3766 max = Adapter->max_mtu;
3764 3767 }
3765 3768
3766 3769 mac_prop_info_set_range_uint32(prh, DEFAULT_MTU, max);
3767 3770 break;
3768 3771 }
3769 3772 case MAC_PROP_PRIVATE: {
3770 3773 char valstr[64];
3771 3774 int value;
3772 3775
3773 3776 if (strcmp(pr_name, "_adv_pause_cap") == 0 ||
3774 3777 strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
3775 3778 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3776 3779 return;
3777 3780 } else if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
3778 3781 value = DEFAULT_TX_BCOPY_THRESHOLD;
3779 3782 } else if (strcmp(pr_name, "_tx_interrupt_enable") == 0) {
3780 3783 value = DEFAULT_TX_INTR_ENABLE;
3781 3784 } else if (strcmp(pr_name, "_tx_intr_delay") == 0) {
3782 3785 value = DEFAULT_TX_INTR_DELAY;
3783 3786 } else if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) {
3784 3787 value = DEFAULT_TX_INTR_ABS_DELAY;
3785 3788 } else if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
3786 3789 value = DEFAULT_RX_BCOPY_THRESHOLD;
3787 3790 } else if (strcmp(pr_name, "_max_num_rcv_packets") == 0) {
3788 3791 value = DEFAULT_RX_LIMIT_ON_INTR;
3789 3792 } else if (strcmp(pr_name, "_rx_intr_delay") == 0) {
3790 3793 value = DEFAULT_RX_INTR_DELAY;
3791 3794 } else if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) {
3792 3795 value = DEFAULT_RX_INTR_ABS_DELAY;
3793 3796 } else if (strcmp(pr_name, "_intr_throttling_rate") == 0) {
3794 3797 value = DEFAULT_INTR_THROTTLING;
3795 3798 } else if (strcmp(pr_name, "_intr_adaptive") == 0) {
3796 3799 value = 1;
3797 3800 } else {
3798 3801 return;
3799 3802 }
3800 3803
3801 3804 (void) snprintf(valstr, sizeof (valstr), "%d", value);
3802 3805 mac_prop_info_set_default_str(prh, valstr);
3803 3806 break;
3804 3807 }
3805 3808 }
3806 3809 }
3807 3810
3808 3811 /* ARGSUSED2 */
3809 3812 static int
3810 3813 e1000g_set_priv_prop(struct e1000g *Adapter, const char *pr_name,
3811 3814 uint_t pr_valsize, const void *pr_val)
3812 3815 {
3813 3816 int err = 0;
3814 3817 long result;
3815 3818 struct e1000_hw *hw = &Adapter->shared;
3816 3819
3817 3820 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
3818 3821 if (pr_val == NULL) {
3819 3822 err = EINVAL;
3820 3823 return (err);
3821 3824 }
3822 3825 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3823 3826 if (result < MIN_TX_BCOPY_THRESHOLD ||
3824 3827 result > MAX_TX_BCOPY_THRESHOLD)
3825 3828 err = EINVAL;
3826 3829 else {
3827 3830 Adapter->tx_bcopy_thresh = (uint32_t)result;
3828 3831 }
3829 3832 return (err);
3830 3833 }
3831 3834 if (strcmp(pr_name, "_tx_interrupt_enable") == 0) {
3832 3835 if (pr_val == NULL) {
3833 3836 err = EINVAL;
3834 3837 return (err);
3835 3838 }
3836 3839 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3837 3840 if (result < 0 || result > 1)
3838 3841 err = EINVAL;
3839 3842 else {
3840 3843 Adapter->tx_intr_enable = (result == 1) ?
3841 3844 B_TRUE: B_FALSE;
3842 3845 if (Adapter->tx_intr_enable)
3843 3846 e1000g_mask_tx_interrupt(Adapter);
3844 3847 else
3845 3848 e1000g_clear_tx_interrupt(Adapter);
3846 3849 if (e1000g_check_acc_handle(
3847 3850 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3848 3851 ddi_fm_service_impact(Adapter->dip,
3849 3852 DDI_SERVICE_DEGRADED);
3850 3853 err = EIO;
3851 3854 }
3852 3855 }
3853 3856 return (err);
3854 3857 }
3855 3858 if (strcmp(pr_name, "_tx_intr_delay") == 0) {
3856 3859 if (pr_val == NULL) {
3857 3860 err = EINVAL;
3858 3861 return (err);
3859 3862 }
3860 3863 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3861 3864 if (result < MIN_TX_INTR_DELAY ||
3862 3865 result > MAX_TX_INTR_DELAY)
3863 3866 err = EINVAL;
3864 3867 else {
3865 3868 Adapter->tx_intr_delay = (uint32_t)result;
3866 3869 E1000_WRITE_REG(hw, E1000_TIDV, Adapter->tx_intr_delay);
3867 3870 if (e1000g_check_acc_handle(
3868 3871 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3869 3872 ddi_fm_service_impact(Adapter->dip,
3870 3873 DDI_SERVICE_DEGRADED);
3871 3874 err = EIO;
3872 3875 }
3873 3876 }
3874 3877 return (err);
3875 3878 }
3876 3879 if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) {
3877 3880 if (pr_val == NULL) {
3878 3881 err = EINVAL;
3879 3882 return (err);
3880 3883 }
3881 3884 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3882 3885 if (result < MIN_TX_INTR_ABS_DELAY ||
3883 3886 result > MAX_TX_INTR_ABS_DELAY)
3884 3887 err = EINVAL;
3885 3888 else {
3886 3889 Adapter->tx_intr_abs_delay = (uint32_t)result;
3887 3890 E1000_WRITE_REG(hw, E1000_TADV,
3888 3891 Adapter->tx_intr_abs_delay);
3889 3892 if (e1000g_check_acc_handle(
3890 3893 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3891 3894 ddi_fm_service_impact(Adapter->dip,
3892 3895 DDI_SERVICE_DEGRADED);
3893 3896 err = EIO;
3894 3897 }
3895 3898 }
3896 3899 return (err);
3897 3900 }
3898 3901 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
3899 3902 if (pr_val == NULL) {
3900 3903 err = EINVAL;
3901 3904 return (err);
3902 3905 }
3903 3906 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3904 3907 if (result < MIN_RX_BCOPY_THRESHOLD ||
3905 3908 result > MAX_RX_BCOPY_THRESHOLD)
3906 3909 err = EINVAL;
3907 3910 else
3908 3911 Adapter->rx_bcopy_thresh = (uint32_t)result;
3909 3912 return (err);
3910 3913 }
3911 3914 if (strcmp(pr_name, "_max_num_rcv_packets") == 0) {
3912 3915 if (pr_val == NULL) {
3913 3916 err = EINVAL;
3914 3917 return (err);
3915 3918 }
3916 3919 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3917 3920 if (result < MIN_RX_LIMIT_ON_INTR ||
3918 3921 result > MAX_RX_LIMIT_ON_INTR)
3919 3922 err = EINVAL;
3920 3923 else
3921 3924 Adapter->rx_limit_onintr = (uint32_t)result;
3922 3925 return (err);
3923 3926 }
3924 3927 if (strcmp(pr_name, "_rx_intr_delay") == 0) {
3925 3928 if (pr_val == NULL) {
3926 3929 err = EINVAL;
3927 3930 return (err);
3928 3931 }
3929 3932 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3930 3933 if (result < MIN_RX_INTR_DELAY ||
3931 3934 result > MAX_RX_INTR_DELAY)
3932 3935 err = EINVAL;
3933 3936 else {
3934 3937 Adapter->rx_intr_delay = (uint32_t)result;
3935 3938 E1000_WRITE_REG(hw, E1000_RDTR, Adapter->rx_intr_delay);
3936 3939 if (e1000g_check_acc_handle(
3937 3940 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3938 3941 ddi_fm_service_impact(Adapter->dip,
3939 3942 DDI_SERVICE_DEGRADED);
3940 3943 err = EIO;
3941 3944 }
3942 3945 }
3943 3946 return (err);
3944 3947 }
3945 3948 if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) {
3946 3949 if (pr_val == NULL) {
3947 3950 err = EINVAL;
3948 3951 return (err);
3949 3952 }
3950 3953 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3951 3954 if (result < MIN_RX_INTR_ABS_DELAY ||
3952 3955 result > MAX_RX_INTR_ABS_DELAY)
3953 3956 err = EINVAL;
3954 3957 else {
3955 3958 Adapter->rx_intr_abs_delay = (uint32_t)result;
3956 3959 E1000_WRITE_REG(hw, E1000_RADV,
3957 3960 Adapter->rx_intr_abs_delay);
3958 3961 if (e1000g_check_acc_handle(
3959 3962 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3960 3963 ddi_fm_service_impact(Adapter->dip,
3961 3964 DDI_SERVICE_DEGRADED);
3962 3965 err = EIO;
3963 3966 }
3964 3967 }
3965 3968 return (err);
3966 3969 }
3967 3970 if (strcmp(pr_name, "_intr_throttling_rate") == 0) {
3968 3971 if (pr_val == NULL) {
3969 3972 err = EINVAL;
3970 3973 return (err);
3971 3974 }
3972 3975 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3973 3976 if (result < MIN_INTR_THROTTLING ||
3974 3977 result > MAX_INTR_THROTTLING)
3975 3978 err = EINVAL;
3976 3979 else {
3977 3980 if (hw->mac.type >= e1000_82540) {
3978 3981 Adapter->intr_throttling_rate =
3979 3982 (uint32_t)result;
3980 3983 E1000_WRITE_REG(hw, E1000_ITR,
3981 3984 Adapter->intr_throttling_rate);
3982 3985 if (e1000g_check_acc_handle(
3983 3986 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3984 3987 ddi_fm_service_impact(Adapter->dip,
3985 3988 DDI_SERVICE_DEGRADED);
3986 3989 err = EIO;
3987 3990 }
3988 3991 } else
3989 3992 err = EINVAL;
3990 3993 }
3991 3994 return (err);
3992 3995 }
3993 3996 if (strcmp(pr_name, "_intr_adaptive") == 0) {
3994 3997 if (pr_val == NULL) {
3995 3998 err = EINVAL;
3996 3999 return (err);
3997 4000 }
3998 4001 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3999 4002 if (result < 0 || result > 1)
4000 4003 err = EINVAL;
4001 4004 else {
4002 4005 if (hw->mac.type >= e1000_82540) {
4003 4006 Adapter->intr_adaptive = (result == 1) ?
4004 4007 B_TRUE : B_FALSE;
4005 4008 } else {
4006 4009 err = EINVAL;
4007 4010 }
4008 4011 }
4009 4012 return (err);
4010 4013 }
4011 4014 return (ENOTSUP);
4012 4015 }
4013 4016
4014 4017 static int
4015 4018 e1000g_get_priv_prop(struct e1000g *Adapter, const char *pr_name,
4016 4019 uint_t pr_valsize, void *pr_val)
4017 4020 {
4018 4021 int err = ENOTSUP;
4019 4022 int value;
4020 4023
4021 4024 if (strcmp(pr_name, "_adv_pause_cap") == 0) {
4022 4025 value = Adapter->param_adv_pause;
4023 4026 err = 0;
4024 4027 goto done;
4025 4028 }
4026 4029 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
4027 4030 value = Adapter->param_adv_asym_pause;
4028 4031 err = 0;
4029 4032 goto done;
4030 4033 }
4031 4034 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
4032 4035 value = Adapter->tx_bcopy_thresh;
4033 4036 err = 0;
4034 4037 goto done;
4035 4038 }
4036 4039 if (strcmp(pr_name, "_tx_interrupt_enable") == 0) {
4037 4040 value = Adapter->tx_intr_enable;
4038 4041 err = 0;
4039 4042 goto done;
4040 4043 }
4041 4044 if (strcmp(pr_name, "_tx_intr_delay") == 0) {
4042 4045 value = Adapter->tx_intr_delay;
4043 4046 err = 0;
4044 4047 goto done;
4045 4048 }
4046 4049 if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) {
4047 4050 value = Adapter->tx_intr_abs_delay;
4048 4051 err = 0;
4049 4052 goto done;
4050 4053 }
4051 4054 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
4052 4055 value = Adapter->rx_bcopy_thresh;
4053 4056 err = 0;
4054 4057 goto done;
4055 4058 }
4056 4059 if (strcmp(pr_name, "_max_num_rcv_packets") == 0) {
4057 4060 value = Adapter->rx_limit_onintr;
4058 4061 err = 0;
4059 4062 goto done;
4060 4063 }
4061 4064 if (strcmp(pr_name, "_rx_intr_delay") == 0) {
4062 4065 value = Adapter->rx_intr_delay;
4063 4066 err = 0;
4064 4067 goto done;
4065 4068 }
4066 4069 if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) {
4067 4070 value = Adapter->rx_intr_abs_delay;
4068 4071 err = 0;
4069 4072 goto done;
4070 4073 }
4071 4074 if (strcmp(pr_name, "_intr_throttling_rate") == 0) {
4072 4075 value = Adapter->intr_throttling_rate;
4073 4076 err = 0;
4074 4077 goto done;
4075 4078 }
4076 4079 if (strcmp(pr_name, "_intr_adaptive") == 0) {
4077 4080 value = Adapter->intr_adaptive;
4078 4081 err = 0;
4079 4082 goto done;
4080 4083 }
4081 4084 done:
4082 4085 if (err == 0) {
4083 4086 (void) snprintf(pr_val, pr_valsize, "%d", value);
4084 4087 }
4085 4088 return (err);
4086 4089 }
4087 4090
4088 4091 /*
4089 4092 * e1000g_get_conf - get configurations set in e1000g.conf
4090 4093 * This routine gets user-configured values out of the configuration
4091 4094 * file e1000g.conf.
4092 4095 *
4093 4096 * For each configurable value, there is a minimum, a maximum, and a
4094 4097 * default.
4095 4098 * If user does not configure a value, use the default.
4096 4099 * If user configures below the minimum, use the minumum.
4097 4100 * If user configures above the maximum, use the maxumum.
4098 4101 */
4099 4102 static void
4100 4103 e1000g_get_conf(struct e1000g *Adapter)
4101 4104 {
4102 4105 struct e1000_hw *hw = &Adapter->shared;
4103 4106 boolean_t tbi_compatibility = B_FALSE;
4104 4107 boolean_t is_jumbo = B_FALSE;
4105 4108 int propval;
4106 4109 /*
4107 4110 * decrease the number of descriptors and free packets
4108 4111 * for jumbo frames to reduce tx/rx resource consumption
4109 4112 */
4110 4113 if (Adapter->max_frame_size >= FRAME_SIZE_UPTO_4K) {
4111 4114 is_jumbo = B_TRUE;
4112 4115 }
4113 4116
4114 4117 /*
4115 4118 * get each configurable property from e1000g.conf
4116 4119 */
4117 4120
4118 4121 /*
4119 4122 * NumTxDescriptors
4120 4123 */
4121 4124 Adapter->tx_desc_num_flag =
4122 4125 e1000g_get_prop(Adapter, "NumTxDescriptors",
4123 4126 MIN_NUM_TX_DESCRIPTOR, MAX_NUM_TX_DESCRIPTOR,
4124 4127 is_jumbo ? DEFAULT_JUMBO_NUM_TX_DESC
4125 4128 : DEFAULT_NUM_TX_DESCRIPTOR, &propval);
4126 4129 Adapter->tx_desc_num = propval;
4127 4130
4128 4131 /*
4129 4132 * NumRxDescriptors
4130 4133 */
4131 4134 Adapter->rx_desc_num_flag =
4132 4135 e1000g_get_prop(Adapter, "NumRxDescriptors",
4133 4136 MIN_NUM_RX_DESCRIPTOR, MAX_NUM_RX_DESCRIPTOR,
4134 4137 is_jumbo ? DEFAULT_JUMBO_NUM_RX_DESC
4135 4138 : DEFAULT_NUM_RX_DESCRIPTOR, &propval);
4136 4139 Adapter->rx_desc_num = propval;
4137 4140
4138 4141 /*
4139 4142 * NumRxFreeList
4140 4143 */
4141 4144 Adapter->rx_buf_num_flag =
4142 4145 e1000g_get_prop(Adapter, "NumRxFreeList",
4143 4146 MIN_NUM_RX_FREELIST, MAX_NUM_RX_FREELIST,
4144 4147 is_jumbo ? DEFAULT_JUMBO_NUM_RX_BUF
4145 4148 : DEFAULT_NUM_RX_FREELIST, &propval);
4146 4149 Adapter->rx_freelist_limit = propval;
4147 4150
4148 4151 /*
4149 4152 * NumTxPacketList
4150 4153 */
4151 4154 Adapter->tx_buf_num_flag =
4152 4155 e1000g_get_prop(Adapter, "NumTxPacketList",
4153 4156 MIN_NUM_TX_FREELIST, MAX_NUM_TX_FREELIST,
4154 4157 is_jumbo ? DEFAULT_JUMBO_NUM_TX_BUF
4155 4158 : DEFAULT_NUM_TX_FREELIST, &propval);
4156 4159 Adapter->tx_freelist_num = propval;
4157 4160
4158 4161 /*
4159 4162 * FlowControl
4160 4163 */
4161 4164 hw->fc.send_xon = B_TRUE;
4162 4165 (void) e1000g_get_prop(Adapter, "FlowControl",
4163 4166 e1000_fc_none, 4, DEFAULT_FLOW_CONTROL, &propval);
4164 4167 hw->fc.requested_mode = propval;
4165 4168 /* 4 is the setting that says "let the eeprom decide" */
4166 4169 if (hw->fc.requested_mode == 4)
4167 4170 hw->fc.requested_mode = e1000_fc_default;
4168 4171
4169 4172 /*
4170 4173 * Max Num Receive Packets on Interrupt
4171 4174 */
4172 4175 (void) e1000g_get_prop(Adapter, "MaxNumReceivePackets",
4173 4176 MIN_RX_LIMIT_ON_INTR, MAX_RX_LIMIT_ON_INTR,
4174 4177 DEFAULT_RX_LIMIT_ON_INTR, &propval);
4175 4178 Adapter->rx_limit_onintr = propval;
4176 4179
4177 4180 /*
4178 4181 * PHY master slave setting
4179 4182 */
4180 4183 (void) e1000g_get_prop(Adapter, "SetMasterSlave",
4181 4184 e1000_ms_hw_default, e1000_ms_auto,
4182 4185 e1000_ms_hw_default, &propval);
4183 4186 hw->phy.ms_type = propval;
4184 4187
4185 4188 /*
4186 4189 * Parameter which controls TBI mode workaround, which is only
4187 4190 * needed on certain switches such as Cisco 6500/Foundry
4188 4191 */
4189 4192 (void) e1000g_get_prop(Adapter, "TbiCompatibilityEnable",
4190 4193 0, 1, DEFAULT_TBI_COMPAT_ENABLE, &propval);
4191 4194 tbi_compatibility = (propval == 1);
4192 4195 e1000_set_tbi_compatibility_82543(hw, tbi_compatibility);
4193 4196
4194 4197 /*
4195 4198 * MSI Enable
4196 4199 */
4197 4200 (void) e1000g_get_prop(Adapter, "MSIEnable",
4198 4201 0, 1, DEFAULT_MSI_ENABLE, &propval);
4199 4202 Adapter->msi_enable = (propval == 1);
4200 4203
4201 4204 /*
4202 4205 * Interrupt Throttling Rate
4203 4206 */
4204 4207 (void) e1000g_get_prop(Adapter, "intr_throttling_rate",
4205 4208 MIN_INTR_THROTTLING, MAX_INTR_THROTTLING,
4206 4209 DEFAULT_INTR_THROTTLING, &propval);
4207 4210 Adapter->intr_throttling_rate = propval;
4208 4211
4209 4212 /*
4210 4213 * Adaptive Interrupt Blanking Enable/Disable
4211 4214 * It is enabled by default
4212 4215 */
4213 4216 (void) e1000g_get_prop(Adapter, "intr_adaptive", 0, 1, 1,
4214 4217 &propval);
4215 4218 Adapter->intr_adaptive = (propval == 1);
4216 4219
4217 4220 /*
4218 4221 * Hardware checksum enable/disable parameter
4219 4222 */
4220 4223 (void) e1000g_get_prop(Adapter, "tx_hcksum_enable",
4221 4224 0, 1, DEFAULT_TX_HCKSUM_ENABLE, &propval);
4222 4225 Adapter->tx_hcksum_enable = (propval == 1);
4223 4226 /*
4224 4227 * Checksum on/off selection via global parameters.
4225 4228 *
4226 4229 * If the chip is flagged as not capable of (correctly)
4227 4230 * handling checksumming, we don't enable it on either
4228 4231 * Rx or Tx side. Otherwise, we take this chip's settings
4229 4232 * from the patchable global defaults.
4230 4233 *
4231 4234 * We advertise our capabilities only if TX offload is
4232 4235 * enabled. On receive, the stack will accept checksummed
4233 4236 * packets anyway, even if we haven't said we can deliver
4234 4237 * them.
4235 4238 */
4236 4239 switch (hw->mac.type) {
4237 4240 case e1000_82540:
4238 4241 case e1000_82544:
4239 4242 case e1000_82545:
4240 4243 case e1000_82545_rev_3:
4241 4244 case e1000_82546:
4242 4245 case e1000_82546_rev_3:
4243 4246 case e1000_82571:
4244 4247 case e1000_82572:
4245 4248 case e1000_82573:
4246 4249 case e1000_80003es2lan:
4247 4250 break;
4248 4251 /*
4249 4252 * For the following Intel PRO/1000 chipsets, we have not
4250 4253 * tested the hardware checksum offload capability, so we
4251 4254 * disable the capability for them.
4252 4255 * e1000_82542,
4253 4256 * e1000_82543,
4254 4257 * e1000_82541,
4255 4258 * e1000_82541_rev_2,
4256 4259 * e1000_82547,
4257 4260 * e1000_82547_rev_2,
4258 4261 */
4259 4262 default:
4260 4263 Adapter->tx_hcksum_enable = B_FALSE;
4261 4264 }
4262 4265
4263 4266 /*
4264 4267 * Large Send Offloading(LSO) Enable/Disable
4265 4268 * If the tx hardware checksum is not enabled, LSO should be
4266 4269 * disabled.
4267 4270 */
4268 4271 (void) e1000g_get_prop(Adapter, "lso_enable",
4269 4272 0, 1, DEFAULT_LSO_ENABLE, &propval);
4270 4273 Adapter->lso_enable = (propval == 1);
4271 4274
4272 4275 switch (hw->mac.type) {
4273 4276 case e1000_82546:
4274 4277 case e1000_82546_rev_3:
4275 4278 if (Adapter->lso_enable)
4276 4279 Adapter->lso_premature_issue = B_TRUE;
4277 4280 /* FALLTHRU */
4278 4281 case e1000_82571:
4279 4282 case e1000_82572:
4280 4283 case e1000_82573:
4281 4284 case e1000_80003es2lan:
4282 4285 break;
4283 4286 default:
4284 4287 Adapter->lso_enable = B_FALSE;
4285 4288 }
4286 4289
4287 4290 if (!Adapter->tx_hcksum_enable) {
4288 4291 Adapter->lso_premature_issue = B_FALSE;
4289 4292 Adapter->lso_enable = B_FALSE;
4290 4293 }
4291 4294
4292 4295 /*
4293 4296 * If mem_workaround_82546 is enabled, the rx buffer allocated by
4294 4297 * e1000_82545, e1000_82546 and e1000_82546_rev_3
4295 4298 * will not cross 64k boundary.
4296 4299 */
4297 4300 (void) e1000g_get_prop(Adapter, "mem_workaround_82546",
4298 4301 0, 1, DEFAULT_MEM_WORKAROUND_82546, &propval);
4299 4302 Adapter->mem_workaround_82546 = (propval == 1);
4300 4303
4301 4304 /*
4302 4305 * Max number of multicast addresses
4303 4306 */
4304 4307 (void) e1000g_get_prop(Adapter, "mcast_max_num",
4305 4308 MIN_MCAST_NUM, MAX_MCAST_NUM, hw->mac.mta_reg_count * 32,
4306 4309 &propval);
4307 4310 Adapter->mcast_max_num = propval;
4308 4311 }
4309 4312
4310 4313 /*
4311 4314 * e1000g_get_prop - routine to read properties
4312 4315 *
4313 4316 * Get a user-configure property value out of the configuration
4314 4317 * file e1000g.conf.
4315 4318 *
4316 4319 * Caller provides name of the property, a default value, a minimum
4317 4320 * value, a maximum value and a pointer to the returned property
4318 4321 * value.
4319 4322 *
4320 4323 * Return B_TRUE if the configured value of the property is not a default
4321 4324 * value, otherwise return B_FALSE.
4322 4325 */
4323 4326 static boolean_t
4324 4327 e1000g_get_prop(struct e1000g *Adapter, /* point to per-adapter structure */
4325 4328 char *propname, /* name of the property */
4326 4329 int minval, /* minimum acceptable value */
4327 4330 int maxval, /* maximim acceptable value */
4328 4331 int defval, /* default value */
4329 4332 int *propvalue) /* property value return to caller */
4330 4333 {
4331 4334 int propval; /* value returned for requested property */
4332 4335 int *props; /* point to array of properties returned */
4333 4336 uint_t nprops; /* number of property value returned */
4334 4337 boolean_t ret = B_TRUE;
4335 4338
4336 4339 /*
4337 4340 * get the array of properties from the config file
4338 4341 */
4339 4342 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, Adapter->dip,
4340 4343 DDI_PROP_DONTPASS, propname, &props, &nprops) == DDI_PROP_SUCCESS) {
4341 4344 /* got some properties, test if we got enough */
4342 4345 if (Adapter->instance < nprops) {
4343 4346 propval = props[Adapter->instance];
4344 4347 } else {
4345 4348 /* not enough properties configured */
4346 4349 propval = defval;
4347 4350 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
4348 4351 "Not Enough %s values found in e1000g.conf"
4349 4352 " - set to %d\n",
4350 4353 propname, propval);
4351 4354 ret = B_FALSE;
4352 4355 }
4353 4356
4354 4357 /* free memory allocated for properties */
4355 4358 ddi_prop_free(props);
4356 4359
4357 4360 } else {
4358 4361 propval = defval;
4359 4362 ret = B_FALSE;
4360 4363 }
4361 4364
4362 4365 /*
4363 4366 * enforce limits
4364 4367 */
4365 4368 if (propval > maxval) {
4366 4369 propval = maxval;
4367 4370 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
4368 4371 "Too High %s value in e1000g.conf - set to %d\n",
4369 4372 propname, propval);
4370 4373 }
4371 4374
4372 4375 if (propval < minval) {
4373 4376 propval = minval;
4374 4377 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
4375 4378 "Too Low %s value in e1000g.conf - set to %d\n",
4376 4379 propname, propval);
4377 4380 }
4378 4381
4379 4382 *propvalue = propval;
4380 4383 return (ret);
4381 4384 }
4382 4385
4383 4386 static boolean_t
4384 4387 e1000g_link_check(struct e1000g *Adapter)
4385 4388 {
4386 4389 uint16_t speed, duplex, phydata;
4387 4390 boolean_t link_changed = B_FALSE;
4388 4391 struct e1000_hw *hw;
4389 4392 uint32_t reg_tarc;
4390 4393
4391 4394 hw = &Adapter->shared;
4392 4395
4393 4396 if (e1000g_link_up(Adapter)) {
4394 4397 /*
4395 4398 * The Link is up, check whether it was marked as down earlier
4396 4399 */
4397 4400 if (Adapter->link_state != LINK_STATE_UP) {
4398 4401 (void) e1000_get_speed_and_duplex(hw, &speed, &duplex);
4399 4402 Adapter->link_speed = speed;
4400 4403 Adapter->link_duplex = duplex;
4401 4404 Adapter->link_state = LINK_STATE_UP;
4402 4405 link_changed = B_TRUE;
4403 4406
4404 4407 if (Adapter->link_speed == SPEED_1000)
4405 4408 Adapter->stall_threshold = TX_STALL_TIME_2S;
4406 4409 else
4407 4410 Adapter->stall_threshold = TX_STALL_TIME_8S;
4408 4411
4409 4412 Adapter->tx_link_down_timeout = 0;
4410 4413
4411 4414 if ((hw->mac.type == e1000_82571) ||
4412 4415 (hw->mac.type == e1000_82572)) {
4413 4416 reg_tarc = E1000_READ_REG(hw, E1000_TARC(0));
4414 4417 if (speed == SPEED_1000)
4415 4418 reg_tarc |= (1 << 21);
4416 4419 else
4417 4420 reg_tarc &= ~(1 << 21);
4418 4421 E1000_WRITE_REG(hw, E1000_TARC(0), reg_tarc);
4419 4422 }
4420 4423 }
4421 4424 Adapter->smartspeed = 0;
4422 4425 } else {
4423 4426 if (Adapter->link_state != LINK_STATE_DOWN) {
4424 4427 Adapter->link_speed = 0;
4425 4428 Adapter->link_duplex = 0;
4426 4429 Adapter->link_state = LINK_STATE_DOWN;
4427 4430 link_changed = B_TRUE;
4428 4431
4429 4432 /*
4430 4433 * SmartSpeed workaround for Tabor/TanaX, When the
4431 4434 * driver loses link disable auto master/slave
4432 4435 * resolution.
4433 4436 */
4434 4437 if (hw->phy.type == e1000_phy_igp) {
4435 4438 (void) e1000_read_phy_reg(hw,
4436 4439 PHY_1000T_CTRL, &phydata);
4437 4440 phydata |= CR_1000T_MS_ENABLE;
4438 4441 (void) e1000_write_phy_reg(hw,
4439 4442 PHY_1000T_CTRL, phydata);
4440 4443 }
4441 4444 } else {
4442 4445 e1000g_smartspeed(Adapter);
4443 4446 }
4444 4447
4445 4448 if (Adapter->e1000g_state & E1000G_STARTED) {
4446 4449 if (Adapter->tx_link_down_timeout <
4447 4450 MAX_TX_LINK_DOWN_TIMEOUT) {
4448 4451 Adapter->tx_link_down_timeout++;
4449 4452 } else if (Adapter->tx_link_down_timeout ==
4450 4453 MAX_TX_LINK_DOWN_TIMEOUT) {
4451 4454 e1000g_tx_clean(Adapter);
4452 4455 Adapter->tx_link_down_timeout++;
4453 4456 }
4454 4457 }
4455 4458 }
4456 4459
4457 4460 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
4458 4461 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
4459 4462
4460 4463 return (link_changed);
4461 4464 }
4462 4465
4463 4466 /*
4464 4467 * e1000g_reset_link - Using the link properties to setup the link
4465 4468 */
4466 4469 int
4467 4470 e1000g_reset_link(struct e1000g *Adapter)
4468 4471 {
4469 4472 struct e1000_mac_info *mac;
4470 4473 struct e1000_phy_info *phy;
4471 4474 struct e1000_hw *hw;
4472 4475 boolean_t invalid;
4473 4476
4474 4477 mac = &Adapter->shared.mac;
4475 4478 phy = &Adapter->shared.phy;
4476 4479 hw = &Adapter->shared;
4477 4480 invalid = B_FALSE;
4478 4481
4479 4482 if (hw->phy.media_type != e1000_media_type_copper)
4480 4483 goto out;
4481 4484
4482 4485 if (Adapter->param_adv_autoneg == 1) {
4483 4486 mac->autoneg = B_TRUE;
4484 4487 phy->autoneg_advertised = 0;
4485 4488
4486 4489 /*
4487 4490 * 1000hdx is not supported for autonegotiation
4488 4491 */
4489 4492 if (Adapter->param_adv_1000fdx == 1)
4490 4493 phy->autoneg_advertised |= ADVERTISE_1000_FULL;
4491 4494
4492 4495 if (Adapter->param_adv_100fdx == 1)
4493 4496 phy->autoneg_advertised |= ADVERTISE_100_FULL;
4494 4497
4495 4498 if (Adapter->param_adv_100hdx == 1)
4496 4499 phy->autoneg_advertised |= ADVERTISE_100_HALF;
4497 4500
4498 4501 if (Adapter->param_adv_10fdx == 1)
4499 4502 phy->autoneg_advertised |= ADVERTISE_10_FULL;
4500 4503
4501 4504 if (Adapter->param_adv_10hdx == 1)
4502 4505 phy->autoneg_advertised |= ADVERTISE_10_HALF;
4503 4506
4504 4507 if (phy->autoneg_advertised == 0)
4505 4508 invalid = B_TRUE;
4506 4509 } else {
4507 4510 mac->autoneg = B_FALSE;
4508 4511
4509 4512 /*
4510 4513 * For Intel copper cards, 1000fdx and 1000hdx are not
4511 4514 * supported for forced link
4512 4515 */
4513 4516 if (Adapter->param_adv_100fdx == 1)
4514 4517 mac->forced_speed_duplex = ADVERTISE_100_FULL;
4515 4518 else if (Adapter->param_adv_100hdx == 1)
4516 4519 mac->forced_speed_duplex = ADVERTISE_100_HALF;
4517 4520 else if (Adapter->param_adv_10fdx == 1)
4518 4521 mac->forced_speed_duplex = ADVERTISE_10_FULL;
4519 4522 else if (Adapter->param_adv_10hdx == 1)
4520 4523 mac->forced_speed_duplex = ADVERTISE_10_HALF;
4521 4524 else
4522 4525 invalid = B_TRUE;
4523 4526
4524 4527 }
4525 4528
4526 4529 if (invalid) {
4527 4530 e1000g_log(Adapter, CE_WARN,
4528 4531 "Invalid link settings. Setup link to "
4529 4532 "support autonegotiation with all link capabilities.");
4530 4533 mac->autoneg = B_TRUE;
4531 4534 phy->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
4532 4535 }
4533 4536
4534 4537 out:
4535 4538 return (e1000_setup_link(&Adapter->shared));
4536 4539 }
4537 4540
4538 4541 static void
4539 4542 e1000g_timer_tx_resched(struct e1000g *Adapter)
4540 4543 {
4541 4544 e1000g_tx_ring_t *tx_ring = Adapter->tx_ring;
4542 4545
4543 4546 rw_enter(&Adapter->chip_lock, RW_READER);
4544 4547
4545 4548 if (tx_ring->resched_needed &&
4546 4549 ((ddi_get_lbolt() - tx_ring->resched_timestamp) >
4547 4550 drv_usectohz(1000000)) &&
4548 4551 (Adapter->e1000g_state & E1000G_STARTED) &&
4549 4552 (tx_ring->tbd_avail >= DEFAULT_TX_NO_RESOURCE)) {
4550 4553 tx_ring->resched_needed = B_FALSE;
4551 4554 mac_tx_update(Adapter->mh);
4552 4555 E1000G_STAT(tx_ring->stat_reschedule);
4553 4556 E1000G_STAT(tx_ring->stat_timer_reschedule);
4554 4557 }
4555 4558
4556 4559 rw_exit(&Adapter->chip_lock);
4557 4560 }
4558 4561
4559 4562 static void
4560 4563 e1000g_local_timer(void *ws)
4561 4564 {
4562 4565 struct e1000g *Adapter = (struct e1000g *)ws;
4563 4566 struct e1000_hw *hw;
4564 4567 e1000g_ether_addr_t ether_addr;
4565 4568 boolean_t link_changed;
4566 4569
4567 4570 hw = &Adapter->shared;
4568 4571
4569 4572 if (Adapter->e1000g_state & E1000G_ERROR) {
4570 4573 rw_enter(&Adapter->chip_lock, RW_WRITER);
4571 4574 Adapter->e1000g_state &= ~E1000G_ERROR;
4572 4575 rw_exit(&Adapter->chip_lock);
4573 4576
4574 4577 Adapter->reset_count++;
4575 4578 if (e1000g_global_reset(Adapter)) {
4576 4579 ddi_fm_service_impact(Adapter->dip,
4577 4580 DDI_SERVICE_RESTORED);
4578 4581 e1000g_timer_tx_resched(Adapter);
4579 4582 } else
4580 4583 ddi_fm_service_impact(Adapter->dip,
4581 4584 DDI_SERVICE_LOST);
4582 4585 return;
4583 4586 }
4584 4587
4585 4588 if (e1000g_stall_check(Adapter)) {
4586 4589 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
4587 4590 "Tx stall detected. Activate automatic recovery.\n");
4588 4591 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_STALL);
4589 4592 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
4590 4593 Adapter->reset_count++;
4591 4594 if (e1000g_reset_adapter(Adapter)) {
4592 4595 ddi_fm_service_impact(Adapter->dip,
4593 4596 DDI_SERVICE_RESTORED);
4594 4597 e1000g_timer_tx_resched(Adapter);
4595 4598 }
4596 4599 return;
4597 4600 }
4598 4601
4599 4602 link_changed = B_FALSE;
4600 4603 rw_enter(&Adapter->chip_lock, RW_READER);
4601 4604 if (Adapter->link_complete)
4602 4605 link_changed = e1000g_link_check(Adapter);
4603 4606 rw_exit(&Adapter->chip_lock);
4604 4607
4605 4608 if (link_changed) {
4606 4609 if (!Adapter->reset_flag &&
4607 4610 (Adapter->e1000g_state & E1000G_STARTED) &&
4608 4611 !(Adapter->e1000g_state & E1000G_SUSPENDED))
4609 4612 mac_link_update(Adapter->mh, Adapter->link_state);
4610 4613 if (Adapter->link_state == LINK_STATE_UP)
4611 4614 Adapter->reset_flag = B_FALSE;
4612 4615 }
4613 4616 /*
4614 4617 * Workaround for esb2. Data stuck in fifo on a link
4615 4618 * down event. Reset the adapter to recover it.
4616 4619 */
4617 4620 if (Adapter->esb2_workaround) {
4618 4621 Adapter->esb2_workaround = B_FALSE;
4619 4622 (void) e1000g_reset_adapter(Adapter);
4620 4623 return;
4621 4624 }
4622 4625
4623 4626 /*
4624 4627 * With 82571 controllers, any locally administered address will
4625 4628 * be overwritten when there is a reset on the other port.
4626 4629 * Detect this circumstance and correct it.
4627 4630 */
4628 4631 if ((hw->mac.type == e1000_82571) &&
4629 4632 (e1000_get_laa_state_82571(hw) == B_TRUE)) {
4630 4633 ether_addr.reg.low = E1000_READ_REG_ARRAY(hw, E1000_RA, 0);
4631 4634 ether_addr.reg.high = E1000_READ_REG_ARRAY(hw, E1000_RA, 1);
4632 4635
4633 4636 ether_addr.reg.low = ntohl(ether_addr.reg.low);
4634 4637 ether_addr.reg.high = ntohl(ether_addr.reg.high);
4635 4638
4636 4639 if ((ether_addr.mac.addr[5] != hw->mac.addr[0]) ||
4637 4640 (ether_addr.mac.addr[4] != hw->mac.addr[1]) ||
4638 4641 (ether_addr.mac.addr[3] != hw->mac.addr[2]) ||
4639 4642 (ether_addr.mac.addr[2] != hw->mac.addr[3]) ||
4640 4643 (ether_addr.mac.addr[1] != hw->mac.addr[4]) ||
4641 4644 (ether_addr.mac.addr[0] != hw->mac.addr[5])) {
4642 4645 (void) e1000_rar_set(hw, hw->mac.addr, 0);
4643 4646 }
4644 4647 }
4645 4648
4646 4649 /*
4647 4650 * Long TTL workaround for 82541/82547
4648 4651 */
4649 4652 (void) e1000_igp_ttl_workaround_82547(hw);
4650 4653
4651 4654 /*
4652 4655 * Check for Adaptive IFS settings If there are lots of collisions
4653 4656 * change the value in steps...
4654 4657 * These properties should only be set for 10/100
4655 4658 */
4656 4659 if ((hw->phy.media_type == e1000_media_type_copper) &&
4657 4660 ((Adapter->link_speed == SPEED_100) ||
4658 4661 (Adapter->link_speed == SPEED_10))) {
4659 4662 e1000_update_adaptive(hw);
4660 4663 }
4661 4664 /*
4662 4665 * Set Timer Interrupts
4663 4666 */
4664 4667 E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0);
4665 4668
4666 4669 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
4667 4670 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
4668 4671 else
4669 4672 e1000g_timer_tx_resched(Adapter);
4670 4673
4671 4674 restart_watchdog_timer(Adapter);
4672 4675 }
4673 4676
4674 4677 /*
4675 4678 * The function e1000g_link_timer() is called when the timer for link setup
4676 4679 * is expired, which indicates the completion of the link setup. The link
4677 4680 * state will not be updated until the link setup is completed. And the
4678 4681 * link state will not be sent to the upper layer through mac_link_update()
4679 4682 * in this function. It will be updated in the local timer routine or the
4680 4683 * interrupt service routine after the interface is started (plumbed).
4681 4684 */
4682 4685 static void
4683 4686 e1000g_link_timer(void *arg)
4684 4687 {
4685 4688 struct e1000g *Adapter = (struct e1000g *)arg;
4686 4689
4687 4690 mutex_enter(&Adapter->link_lock);
4688 4691 Adapter->link_complete = B_TRUE;
4689 4692 Adapter->link_tid = 0;
4690 4693 mutex_exit(&Adapter->link_lock);
4691 4694 }
4692 4695
4693 4696 /*
4694 4697 * e1000g_force_speed_duplex - read forced speed/duplex out of e1000g.conf
4695 4698 *
4696 4699 * This function read the forced speed and duplex for 10/100 Mbps speeds
4697 4700 * and also for 1000 Mbps speeds from the e1000g.conf file
4698 4701 */
4699 4702 static void
4700 4703 e1000g_force_speed_duplex(struct e1000g *Adapter)
4701 4704 {
4702 4705 int forced;
4703 4706 int propval;
4704 4707 struct e1000_mac_info *mac = &Adapter->shared.mac;
4705 4708 struct e1000_phy_info *phy = &Adapter->shared.phy;
4706 4709
4707 4710 /*
4708 4711 * get value out of config file
4709 4712 */
4710 4713 (void) e1000g_get_prop(Adapter, "ForceSpeedDuplex",
4711 4714 GDIAG_10_HALF, GDIAG_ANY, GDIAG_ANY, &forced);
4712 4715
4713 4716 switch (forced) {
4714 4717 case GDIAG_10_HALF:
4715 4718 /*
4716 4719 * Disable Auto Negotiation
4717 4720 */
4718 4721 mac->autoneg = B_FALSE;
4719 4722 mac->forced_speed_duplex = ADVERTISE_10_HALF;
4720 4723 break;
4721 4724 case GDIAG_10_FULL:
4722 4725 /*
4723 4726 * Disable Auto Negotiation
4724 4727 */
4725 4728 mac->autoneg = B_FALSE;
4726 4729 mac->forced_speed_duplex = ADVERTISE_10_FULL;
4727 4730 break;
4728 4731 case GDIAG_100_HALF:
4729 4732 /*
4730 4733 * Disable Auto Negotiation
4731 4734 */
4732 4735 mac->autoneg = B_FALSE;
4733 4736 mac->forced_speed_duplex = ADVERTISE_100_HALF;
4734 4737 break;
4735 4738 case GDIAG_100_FULL:
4736 4739 /*
4737 4740 * Disable Auto Negotiation
4738 4741 */
4739 4742 mac->autoneg = B_FALSE;
4740 4743 mac->forced_speed_duplex = ADVERTISE_100_FULL;
4741 4744 break;
4742 4745 case GDIAG_1000_FULL:
4743 4746 /*
4744 4747 * The gigabit spec requires autonegotiation. Therefore,
4745 4748 * when the user wants to force the speed to 1000Mbps, we
4746 4749 * enable AutoNeg, but only allow the harware to advertise
4747 4750 * 1000Mbps. This is different from 10/100 operation, where
4748 4751 * we are allowed to link without any negotiation.
4749 4752 */
4750 4753 mac->autoneg = B_TRUE;
4751 4754 phy->autoneg_advertised = ADVERTISE_1000_FULL;
4752 4755 break;
4753 4756 default: /* obey the setting of AutoNegAdvertised */
4754 4757 mac->autoneg = B_TRUE;
4755 4758 (void) e1000g_get_prop(Adapter, "AutoNegAdvertised",
4756 4759 0, AUTONEG_ADVERTISE_SPEED_DEFAULT,
4757 4760 AUTONEG_ADVERTISE_SPEED_DEFAULT, &propval);
4758 4761 phy->autoneg_advertised = (uint16_t)propval;
4759 4762 break;
4760 4763 } /* switch */
4761 4764 }
4762 4765
4763 4766 /*
4764 4767 * e1000g_get_max_frame_size - get jumbo frame setting from e1000g.conf
4765 4768 *
4766 4769 * This function reads MaxFrameSize from e1000g.conf
4767 4770 */
4768 4771 static void
4769 4772 e1000g_get_max_frame_size(struct e1000g *Adapter)
4770 4773 {
4771 4774 int max_frame;
4772 4775
4773 4776 /*
4774 4777 * get value out of config file
4775 4778 */
4776 4779 (void) e1000g_get_prop(Adapter, "MaxFrameSize", 0, 3, 0,
4777 4780 &max_frame);
4778 4781
4779 4782 switch (max_frame) {
4780 4783 case 0:
4781 4784 Adapter->default_mtu = ETHERMTU;
4782 4785 break;
4783 4786 case 1:
4784 4787 Adapter->default_mtu = FRAME_SIZE_UPTO_4K -
4785 4788 sizeof (struct ether_vlan_header) - ETHERFCSL;
4786 4789 break;
4787 4790 case 2:
4788 4791 Adapter->default_mtu = FRAME_SIZE_UPTO_8K -
4789 4792 sizeof (struct ether_vlan_header) - ETHERFCSL;
4790 4793 break;
4791 4794 case 3:
4792 4795 Adapter->default_mtu = FRAME_SIZE_UPTO_16K -
4793 4796 sizeof (struct ether_vlan_header) - ETHERFCSL;
4794 4797 break;
4795 4798 default:
4796 4799 Adapter->default_mtu = ETHERMTU;
4797 4800 break;
4798 4801 } /* switch */
4799 4802
4800 4803 /*
4801 4804 * If the user configed MTU is larger than the deivce's maximum MTU,
4802 4805 * the MTU is set to the deivce's maximum value.
4803 4806 */
4804 4807 if (Adapter->default_mtu > Adapter->max_mtu)
4805 4808 Adapter->default_mtu = Adapter->max_mtu;
4806 4809
4807 4810 Adapter->max_frame_size = e1000g_mtu2maxframe(Adapter->default_mtu);
4808 4811 }
4809 4812
4810 4813 /*
4811 4814 * e1000g_pch_limits - Apply limits of the PCH silicon type
4812 4815 *
4813 4816 * At any frame size larger than the ethernet default,
4814 4817 * prevent linking at 10/100 speeds.
4815 4818 */
4816 4819 static void
4817 4820 e1000g_pch_limits(struct e1000g *Adapter)
4818 4821 {
4819 4822 struct e1000_hw *hw = &Adapter->shared;
4820 4823
4821 4824 /* only applies to PCH silicon type */
4822 4825 if (hw->mac.type != e1000_pchlan && hw->mac.type != e1000_pch2lan)
4823 4826 return;
4824 4827
4825 4828 /* only applies to frames larger than ethernet default */
4826 4829 if (Adapter->max_frame_size > DEFAULT_FRAME_SIZE) {
4827 4830 hw->mac.autoneg = B_TRUE;
4828 4831 hw->phy.autoneg_advertised = ADVERTISE_1000_FULL;
4829 4832
4830 4833 Adapter->param_adv_autoneg = 1;
4831 4834 Adapter->param_adv_1000fdx = 1;
4832 4835
4833 4836 Adapter->param_adv_100fdx = 0;
4834 4837 Adapter->param_adv_100hdx = 0;
4835 4838 Adapter->param_adv_10fdx = 0;
4836 4839 Adapter->param_adv_10hdx = 0;
4837 4840
4838 4841 e1000g_param_sync(Adapter);
4839 4842 }
4840 4843 }
4841 4844
4842 4845 /*
4843 4846 * e1000g_mtu2maxframe - convert given MTU to maximum frame size
4844 4847 */
4845 4848 static uint32_t
4846 4849 e1000g_mtu2maxframe(uint32_t mtu)
4847 4850 {
4848 4851 uint32_t maxframe;
4849 4852
4850 4853 maxframe = mtu + sizeof (struct ether_vlan_header) + ETHERFCSL;
4851 4854
4852 4855 return (maxframe);
4853 4856 }
4854 4857
4855 4858 static void
4856 4859 arm_watchdog_timer(struct e1000g *Adapter)
4857 4860 {
4858 4861 Adapter->watchdog_tid =
4859 4862 timeout(e1000g_local_timer,
4860 4863 (void *)Adapter, 1 * drv_usectohz(1000000));
4861 4864 }
4862 4865 #pragma inline(arm_watchdog_timer)
4863 4866
4864 4867 static void
4865 4868 enable_watchdog_timer(struct e1000g *Adapter)
4866 4869 {
4867 4870 mutex_enter(&Adapter->watchdog_lock);
4868 4871
4869 4872 if (!Adapter->watchdog_timer_enabled) {
4870 4873 Adapter->watchdog_timer_enabled = B_TRUE;
4871 4874 Adapter->watchdog_timer_started = B_TRUE;
4872 4875 arm_watchdog_timer(Adapter);
4873 4876 }
4874 4877
4875 4878 mutex_exit(&Adapter->watchdog_lock);
4876 4879 }
4877 4880
4878 4881 static void
4879 4882 disable_watchdog_timer(struct e1000g *Adapter)
4880 4883 {
4881 4884 timeout_id_t tid;
4882 4885
4883 4886 mutex_enter(&Adapter->watchdog_lock);
4884 4887
4885 4888 Adapter->watchdog_timer_enabled = B_FALSE;
4886 4889 Adapter->watchdog_timer_started = B_FALSE;
4887 4890 tid = Adapter->watchdog_tid;
4888 4891 Adapter->watchdog_tid = 0;
4889 4892
4890 4893 mutex_exit(&Adapter->watchdog_lock);
4891 4894
4892 4895 if (tid != 0)
4893 4896 (void) untimeout(tid);
4894 4897 }
4895 4898
4896 4899 static void
4897 4900 start_watchdog_timer(struct e1000g *Adapter)
4898 4901 {
4899 4902 mutex_enter(&Adapter->watchdog_lock);
4900 4903
4901 4904 if (Adapter->watchdog_timer_enabled) {
4902 4905 if (!Adapter->watchdog_timer_started) {
4903 4906 Adapter->watchdog_timer_started = B_TRUE;
4904 4907 arm_watchdog_timer(Adapter);
4905 4908 }
4906 4909 }
4907 4910
4908 4911 mutex_exit(&Adapter->watchdog_lock);
4909 4912 }
4910 4913
4911 4914 static void
4912 4915 restart_watchdog_timer(struct e1000g *Adapter)
4913 4916 {
4914 4917 mutex_enter(&Adapter->watchdog_lock);
4915 4918
4916 4919 if (Adapter->watchdog_timer_started)
4917 4920 arm_watchdog_timer(Adapter);
4918 4921
4919 4922 mutex_exit(&Adapter->watchdog_lock);
4920 4923 }
4921 4924
4922 4925 static void
4923 4926 stop_watchdog_timer(struct e1000g *Adapter)
4924 4927 {
4925 4928 timeout_id_t tid;
4926 4929
4927 4930 mutex_enter(&Adapter->watchdog_lock);
4928 4931
4929 4932 Adapter->watchdog_timer_started = B_FALSE;
4930 4933 tid = Adapter->watchdog_tid;
4931 4934 Adapter->watchdog_tid = 0;
4932 4935
4933 4936 mutex_exit(&Adapter->watchdog_lock);
4934 4937
4935 4938 if (tid != 0)
4936 4939 (void) untimeout(tid);
4937 4940 }
4938 4941
4939 4942 static void
4940 4943 stop_link_timer(struct e1000g *Adapter)
4941 4944 {
4942 4945 timeout_id_t tid;
4943 4946
4944 4947 /* Disable the link timer */
4945 4948 mutex_enter(&Adapter->link_lock);
4946 4949
4947 4950 tid = Adapter->link_tid;
4948 4951 Adapter->link_tid = 0;
4949 4952
4950 4953 mutex_exit(&Adapter->link_lock);
4951 4954
4952 4955 if (tid != 0)
4953 4956 (void) untimeout(tid);
4954 4957 }
4955 4958
4956 4959 static void
4957 4960 stop_82547_timer(e1000g_tx_ring_t *tx_ring)
4958 4961 {
4959 4962 timeout_id_t tid;
4960 4963
4961 4964 /* Disable the tx timer for 82547 chipset */
4962 4965 mutex_enter(&tx_ring->tx_lock);
4963 4966
4964 4967 tx_ring->timer_enable_82547 = B_FALSE;
4965 4968 tid = tx_ring->timer_id_82547;
4966 4969 tx_ring->timer_id_82547 = 0;
4967 4970
4968 4971 mutex_exit(&tx_ring->tx_lock);
4969 4972
4970 4973 if (tid != 0)
4971 4974 (void) untimeout(tid);
4972 4975 }
4973 4976
4974 4977 void
4975 4978 e1000g_clear_interrupt(struct e1000g *Adapter)
4976 4979 {
4977 4980 E1000_WRITE_REG(&Adapter->shared, E1000_IMC,
4978 4981 0xffffffff & ~E1000_IMS_RXSEQ);
4979 4982 }
4980 4983
4981 4984 void
4982 4985 e1000g_mask_interrupt(struct e1000g *Adapter)
4983 4986 {
4984 4987 E1000_WRITE_REG(&Adapter->shared, E1000_IMS,
4985 4988 IMS_ENABLE_MASK & ~E1000_IMS_TXDW);
4986 4989
4987 4990 if (Adapter->tx_intr_enable)
4988 4991 e1000g_mask_tx_interrupt(Adapter);
4989 4992 }
4990 4993
4991 4994 /*
4992 4995 * This routine is called by e1000g_quiesce(), therefore must not block.
4993 4996 */
4994 4997 void
4995 4998 e1000g_clear_all_interrupts(struct e1000g *Adapter)
4996 4999 {
4997 5000 E1000_WRITE_REG(&Adapter->shared, E1000_IMC, 0xffffffff);
4998 5001 }
4999 5002
5000 5003 void
5001 5004 e1000g_mask_tx_interrupt(struct e1000g *Adapter)
5002 5005 {
5003 5006 E1000_WRITE_REG(&Adapter->shared, E1000_IMS, E1000_IMS_TXDW);
5004 5007 }
5005 5008
5006 5009 void
5007 5010 e1000g_clear_tx_interrupt(struct e1000g *Adapter)
5008 5011 {
5009 5012 E1000_WRITE_REG(&Adapter->shared, E1000_IMC, E1000_IMS_TXDW);
5010 5013 }
5011 5014
5012 5015 static void
5013 5016 e1000g_smartspeed(struct e1000g *Adapter)
5014 5017 {
5015 5018 struct e1000_hw *hw = &Adapter->shared;
5016 5019 uint16_t phy_status;
5017 5020 uint16_t phy_ctrl;
5018 5021
5019 5022 /*
5020 5023 * If we're not T-or-T, or we're not autoneg'ing, or we're not
5021 5024 * advertising 1000Full, we don't even use the workaround
5022 5025 */
5023 5026 if ((hw->phy.type != e1000_phy_igp) ||
5024 5027 !hw->mac.autoneg ||
5025 5028 !(hw->phy.autoneg_advertised & ADVERTISE_1000_FULL))
5026 5029 return;
5027 5030
5028 5031 /*
5029 5032 * True if this is the first call of this function or after every
5030 5033 * 30 seconds of not having link
5031 5034 */
5032 5035 if (Adapter->smartspeed == 0) {
5033 5036 /*
5034 5037 * If Master/Slave config fault is asserted twice, we
5035 5038 * assume back-to-back
5036 5039 */
5037 5040 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
5038 5041 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
5039 5042 return;
5040 5043
5041 5044 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
5042 5045 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
5043 5046 return;
5044 5047 /*
5045 5048 * We're assuming back-2-back because our status register
5046 5049 * insists! there's a fault in the master/slave
5047 5050 * relationship that was "negotiated"
5048 5051 */
5049 5052 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
5050 5053 /*
5051 5054 * Is the phy configured for manual configuration of
5052 5055 * master/slave?
5053 5056 */
5054 5057 if (phy_ctrl & CR_1000T_MS_ENABLE) {
5055 5058 /*
5056 5059 * Yes. Then disable manual configuration (enable
5057 5060 * auto configuration) of master/slave
5058 5061 */
5059 5062 phy_ctrl &= ~CR_1000T_MS_ENABLE;
5060 5063 (void) e1000_write_phy_reg(hw,
5061 5064 PHY_1000T_CTRL, phy_ctrl);
5062 5065 /*
5063 5066 * Effectively starting the clock
5064 5067 */
5065 5068 Adapter->smartspeed++;
5066 5069 /*
5067 5070 * Restart autonegotiation
5068 5071 */
5069 5072 if (!e1000_phy_setup_autoneg(hw) &&
5070 5073 !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) {
5071 5074 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
5072 5075 MII_CR_RESTART_AUTO_NEG);
5073 5076 (void) e1000_write_phy_reg(hw,
5074 5077 PHY_CONTROL, phy_ctrl);
5075 5078 }
5076 5079 }
5077 5080 return;
5078 5081 /*
5079 5082 * Has 6 seconds transpired still without link? Remember,
5080 5083 * you should reset the smartspeed counter once you obtain
5081 5084 * link
5082 5085 */
5083 5086 } else if (Adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
5084 5087 /*
5085 5088 * Yes. Remember, we did at the start determine that
5086 5089 * there's a master/slave configuration fault, so we're
5087 5090 * still assuming there's someone on the other end, but we
5088 5091 * just haven't yet been able to talk to it. We then
5089 5092 * re-enable auto configuration of master/slave to see if
5090 5093 * we're running 2/3 pair cables.
5091 5094 */
5092 5095 /*
5093 5096 * If still no link, perhaps using 2/3 pair cable
5094 5097 */
5095 5098 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
5096 5099 phy_ctrl |= CR_1000T_MS_ENABLE;
5097 5100 (void) e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
5098 5101 /*
5099 5102 * Restart autoneg with phy enabled for manual
5100 5103 * configuration of master/slave
5101 5104 */
5102 5105 if (!e1000_phy_setup_autoneg(hw) &&
5103 5106 !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) {
5104 5107 phy_ctrl |=
5105 5108 (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
5106 5109 (void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl);
5107 5110 }
5108 5111 /*
5109 5112 * Hopefully, there are no more faults and we've obtained
5110 5113 * link as a result.
5111 5114 */
5112 5115 }
5113 5116 /*
5114 5117 * Restart process after E1000_SMARTSPEED_MAX iterations (30
5115 5118 * seconds)
5116 5119 */
5117 5120 if (Adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
5118 5121 Adapter->smartspeed = 0;
5119 5122 }
5120 5123
5121 5124 static boolean_t
5122 5125 is_valid_mac_addr(uint8_t *mac_addr)
5123 5126 {
5124 5127 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
5125 5128 const uint8_t addr_test2[6] =
5126 5129 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
5127 5130
5128 5131 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
5129 5132 !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
5130 5133 return (B_FALSE);
5131 5134
5132 5135 return (B_TRUE);
5133 5136 }
5134 5137
5135 5138 /*
5136 5139 * e1000g_stall_check - check for tx stall
5137 5140 *
5138 5141 * This function checks if the adapter is stalled (in transmit).
5139 5142 *
5140 5143 * It is called each time the watchdog timeout is invoked.
5141 5144 * If the transmit descriptor reclaim continuously fails,
5142 5145 * the watchdog value will increment by 1. If the watchdog
5143 5146 * value exceeds the threshold, the adapter is assumed to
5144 5147 * have stalled and need to be reset.
5145 5148 */
5146 5149 static boolean_t
5147 5150 e1000g_stall_check(struct e1000g *Adapter)
5148 5151 {
5149 5152 e1000g_tx_ring_t *tx_ring;
5150 5153
5151 5154 tx_ring = Adapter->tx_ring;
5152 5155
5153 5156 if (Adapter->link_state != LINK_STATE_UP)
5154 5157 return (B_FALSE);
5155 5158
5156 5159 (void) e1000g_recycle(tx_ring);
5157 5160
5158 5161 if (Adapter->stall_flag)
5159 5162 return (B_TRUE);
5160 5163
5161 5164 return (B_FALSE);
5162 5165 }
5163 5166
5164 5167 #ifdef E1000G_DEBUG
5165 5168 static enum ioc_reply
5166 5169 e1000g_pp_ioctl(struct e1000g *e1000gp, struct iocblk *iocp, mblk_t *mp)
5167 5170 {
5168 5171 void (*ppfn)(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd);
5169 5172 e1000g_peekpoke_t *ppd;
5170 5173 uint64_t mem_va;
5171 5174 uint64_t maxoff;
5172 5175 boolean_t peek;
5173 5176
5174 5177 switch (iocp->ioc_cmd) {
5175 5178
5176 5179 case E1000G_IOC_REG_PEEK:
5177 5180 peek = B_TRUE;
5178 5181 break;
5179 5182
5180 5183 case E1000G_IOC_REG_POKE:
5181 5184 peek = B_FALSE;
5182 5185 break;
5183 5186
5184 5187 deault:
5185 5188 E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL,
5186 5189 "e1000g_diag_ioctl: invalid ioctl command 0x%X\n",
5187 5190 iocp->ioc_cmd);
5188 5191 return (IOC_INVAL);
5189 5192 }
5190 5193
5191 5194 /*
5192 5195 * Validate format of ioctl
5193 5196 */
5194 5197 if (iocp->ioc_count != sizeof (e1000g_peekpoke_t))
5195 5198 return (IOC_INVAL);
5196 5199 if (mp->b_cont == NULL)
5197 5200 return (IOC_INVAL);
5198 5201
5199 5202 ppd = (e1000g_peekpoke_t *)(uintptr_t)mp->b_cont->b_rptr;
5200 5203
5201 5204 /*
5202 5205 * Validate request parameters
5203 5206 */
5204 5207 switch (ppd->pp_acc_space) {
5205 5208
5206 5209 default:
5207 5210 E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL,
5208 5211 "e1000g_diag_ioctl: invalid access space 0x%X\n",
5209 5212 ppd->pp_acc_space);
5210 5213 return (IOC_INVAL);
5211 5214
5212 5215 case E1000G_PP_SPACE_REG:
5213 5216 /*
5214 5217 * Memory-mapped I/O space
5215 5218 */
5216 5219 ASSERT(ppd->pp_acc_size == 4);
5217 5220 if (ppd->pp_acc_size != 4)
5218 5221 return (IOC_INVAL);
5219 5222
5220 5223 if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0)
5221 5224 return (IOC_INVAL);
5222 5225
5223 5226 mem_va = 0;
5224 5227 maxoff = 0x10000;
5225 5228 ppfn = peek ? e1000g_ioc_peek_reg : e1000g_ioc_poke_reg;
5226 5229 break;
5227 5230
5228 5231 case E1000G_PP_SPACE_E1000G:
5229 5232 /*
5230 5233 * E1000g data structure!
5231 5234 */
5232 5235 mem_va = (uintptr_t)e1000gp;
5233 5236 maxoff = sizeof (struct e1000g);
5234 5237 ppfn = peek ? e1000g_ioc_peek_mem : e1000g_ioc_poke_mem;
5235 5238 break;
5236 5239
5237 5240 }
5238 5241
5239 5242 if (ppd->pp_acc_offset >= maxoff)
5240 5243 return (IOC_INVAL);
5241 5244
5242 5245 if (ppd->pp_acc_offset + ppd->pp_acc_size > maxoff)
5243 5246 return (IOC_INVAL);
5244 5247
5245 5248 /*
5246 5249 * All OK - go!
5247 5250 */
5248 5251 ppd->pp_acc_offset += mem_va;
5249 5252 (*ppfn)(e1000gp, ppd);
5250 5253 return (peek ? IOC_REPLY : IOC_ACK);
5251 5254 }
5252 5255
5253 5256 static void
5254 5257 e1000g_ioc_peek_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
5255 5258 {
5256 5259 ddi_acc_handle_t handle;
5257 5260 uint32_t *regaddr;
5258 5261
5259 5262 handle = e1000gp->osdep.reg_handle;
5260 5263 regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr +
5261 5264 (uintptr_t)ppd->pp_acc_offset);
5262 5265
5263 5266 ppd->pp_acc_data = ddi_get32(handle, regaddr);
5264 5267 }
5265 5268
5266 5269 static void
5267 5270 e1000g_ioc_poke_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
5268 5271 {
5269 5272 ddi_acc_handle_t handle;
5270 5273 uint32_t *regaddr;
5271 5274 uint32_t value;
5272 5275
5273 5276 handle = e1000gp->osdep.reg_handle;
5274 5277 regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr +
5275 5278 (uintptr_t)ppd->pp_acc_offset);
5276 5279 value = (uint32_t)ppd->pp_acc_data;
5277 5280
5278 5281 ddi_put32(handle, regaddr, value);
5279 5282 }
5280 5283
5281 5284 static void
5282 5285 e1000g_ioc_peek_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
5283 5286 {
5284 5287 uint64_t value;
5285 5288 void *vaddr;
5286 5289
5287 5290 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
5288 5291
5289 5292 switch (ppd->pp_acc_size) {
5290 5293 case 1:
5291 5294 value = *(uint8_t *)vaddr;
5292 5295 break;
5293 5296
5294 5297 case 2:
5295 5298 value = *(uint16_t *)vaddr;
5296 5299 break;
5297 5300
5298 5301 case 4:
5299 5302 value = *(uint32_t *)vaddr;
5300 5303 break;
5301 5304
5302 5305 case 8:
5303 5306 value = *(uint64_t *)vaddr;
5304 5307 break;
5305 5308 }
5306 5309
5307 5310 E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL,
5308 5311 "e1000g_ioc_peek_mem($%p, $%p) peeked 0x%llx from $%p\n",
5309 5312 (void *)e1000gp, (void *)ppd, value, vaddr);
5310 5313
5311 5314 ppd->pp_acc_data = value;
5312 5315 }
5313 5316
5314 5317 static void
5315 5318 e1000g_ioc_poke_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
5316 5319 {
5317 5320 uint64_t value;
5318 5321 void *vaddr;
5319 5322
5320 5323 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
5321 5324 value = ppd->pp_acc_data;
5322 5325
5323 5326 E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL,
5324 5327 "e1000g_ioc_poke_mem($%p, $%p) poking 0x%llx at $%p\n",
5325 5328 (void *)e1000gp, (void *)ppd, value, vaddr);
5326 5329
5327 5330 switch (ppd->pp_acc_size) {
5328 5331 case 1:
5329 5332 *(uint8_t *)vaddr = (uint8_t)value;
5330 5333 break;
5331 5334
5332 5335 case 2:
5333 5336 *(uint16_t *)vaddr = (uint16_t)value;
5334 5337 break;
5335 5338
5336 5339 case 4:
5337 5340 *(uint32_t *)vaddr = (uint32_t)value;
5338 5341 break;
5339 5342
5340 5343 case 8:
5341 5344 *(uint64_t *)vaddr = (uint64_t)value;
5342 5345 break;
5343 5346 }
5344 5347 }
5345 5348 #endif
5346 5349
5347 5350 /*
5348 5351 * Loopback Support
5349 5352 */
5350 5353 static lb_property_t lb_normal =
5351 5354 { normal, "normal", E1000G_LB_NONE };
5352 5355 static lb_property_t lb_external1000 =
5353 5356 { external, "1000Mbps", E1000G_LB_EXTERNAL_1000 };
5354 5357 static lb_property_t lb_external100 =
5355 5358 { external, "100Mbps", E1000G_LB_EXTERNAL_100 };
5356 5359 static lb_property_t lb_external10 =
5357 5360 { external, "10Mbps", E1000G_LB_EXTERNAL_10 };
5358 5361 static lb_property_t lb_phy =
5359 5362 { internal, "PHY", E1000G_LB_INTERNAL_PHY };
5360 5363
5361 5364 static enum ioc_reply
5362 5365 e1000g_loopback_ioctl(struct e1000g *Adapter, struct iocblk *iocp, mblk_t *mp)
5363 5366 {
5364 5367 lb_info_sz_t *lbsp;
5365 5368 lb_property_t *lbpp;
5366 5369 struct e1000_hw *hw;
5367 5370 uint32_t *lbmp;
5368 5371 uint32_t size;
5369 5372 uint32_t value;
5370 5373
5371 5374 hw = &Adapter->shared;
5372 5375
5373 5376 if (mp->b_cont == NULL)
5374 5377 return (IOC_INVAL);
5375 5378
5376 5379 if (!e1000g_check_loopback_support(hw)) {
5377 5380 e1000g_log(NULL, CE_WARN,
5378 5381 "Loopback is not supported on e1000g%d", Adapter->instance);
5379 5382 return (IOC_INVAL);
5380 5383 }
5381 5384
5382 5385 switch (iocp->ioc_cmd) {
5383 5386 default:
5384 5387 return (IOC_INVAL);
5385 5388
5386 5389 case LB_GET_INFO_SIZE:
5387 5390 size = sizeof (lb_info_sz_t);
5388 5391 if (iocp->ioc_count != size)
5389 5392 return (IOC_INVAL);
5390 5393
5391 5394 rw_enter(&Adapter->chip_lock, RW_WRITER);
5392 5395 e1000g_get_phy_state(Adapter);
5393 5396
5394 5397 /*
5395 5398 * Workaround for hardware faults. In order to get a stable
5396 5399 * state of phy, we will wait for a specific interval and
5397 5400 * try again. The time delay is an experiential value based
5398 5401 * on our testing.
5399 5402 */
5400 5403 msec_delay(100);
5401 5404 e1000g_get_phy_state(Adapter);
5402 5405 rw_exit(&Adapter->chip_lock);
5403 5406
5404 5407 value = sizeof (lb_normal);
5405 5408 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
5406 5409 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
5407 5410 (hw->phy.media_type == e1000_media_type_fiber) ||
5408 5411 (hw->phy.media_type == e1000_media_type_internal_serdes)) {
5409 5412 value += sizeof (lb_phy);
5410 5413 switch (hw->mac.type) {
5411 5414 case e1000_82571:
5412 5415 case e1000_82572:
5413 5416 case e1000_80003es2lan:
5414 5417 value += sizeof (lb_external1000);
5415 5418 break;
5416 5419 }
5417 5420 }
5418 5421 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
5419 5422 (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
5420 5423 value += sizeof (lb_external100);
5421 5424 if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
5422 5425 value += sizeof (lb_external10);
5423 5426
5424 5427 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
5425 5428 *lbsp = value;
5426 5429 break;
5427 5430
5428 5431 case LB_GET_INFO:
5429 5432 value = sizeof (lb_normal);
5430 5433 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
5431 5434 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
5432 5435 (hw->phy.media_type == e1000_media_type_fiber) ||
5433 5436 (hw->phy.media_type == e1000_media_type_internal_serdes)) {
5434 5437 value += sizeof (lb_phy);
5435 5438 switch (hw->mac.type) {
5436 5439 case e1000_82571:
5437 5440 case e1000_82572:
5438 5441 case e1000_80003es2lan:
5439 5442 value += sizeof (lb_external1000);
5440 5443 break;
5441 5444 }
5442 5445 }
5443 5446 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
5444 5447 (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
5445 5448 value += sizeof (lb_external100);
5446 5449 if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
5447 5450 value += sizeof (lb_external10);
5448 5451
5449 5452 size = value;
5450 5453 if (iocp->ioc_count != size)
5451 5454 return (IOC_INVAL);
5452 5455
5453 5456 value = 0;
5454 5457 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
5455 5458 lbpp[value++] = lb_normal;
5456 5459 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
5457 5460 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
5458 5461 (hw->phy.media_type == e1000_media_type_fiber) ||
5459 5462 (hw->phy.media_type == e1000_media_type_internal_serdes)) {
5460 5463 lbpp[value++] = lb_phy;
5461 5464 switch (hw->mac.type) {
5462 5465 case e1000_82571:
5463 5466 case e1000_82572:
5464 5467 case e1000_80003es2lan:
5465 5468 lbpp[value++] = lb_external1000;
5466 5469 break;
5467 5470 }
5468 5471 }
5469 5472 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
5470 5473 (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
5471 5474 lbpp[value++] = lb_external100;
5472 5475 if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
5473 5476 lbpp[value++] = lb_external10;
5474 5477 break;
5475 5478
5476 5479 case LB_GET_MODE:
5477 5480 size = sizeof (uint32_t);
5478 5481 if (iocp->ioc_count != size)
5479 5482 return (IOC_INVAL);
5480 5483
5481 5484 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
5482 5485 *lbmp = Adapter->loopback_mode;
5483 5486 break;
5484 5487
5485 5488 case LB_SET_MODE:
5486 5489 size = 0;
5487 5490 if (iocp->ioc_count != sizeof (uint32_t))
5488 5491 return (IOC_INVAL);
5489 5492
5490 5493 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
5491 5494 if (!e1000g_set_loopback_mode(Adapter, *lbmp))
5492 5495 return (IOC_INVAL);
5493 5496 break;
5494 5497 }
5495 5498
5496 5499 iocp->ioc_count = size;
5497 5500 iocp->ioc_error = 0;
5498 5501
5499 5502 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
5500 5503 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
5501 5504 return (IOC_INVAL);
5502 5505 }
5503 5506
5504 5507 return (IOC_REPLY);
5505 5508 }
5506 5509
5507 5510 static boolean_t
5508 5511 e1000g_check_loopback_support(struct e1000_hw *hw)
5509 5512 {
5510 5513 switch (hw->mac.type) {
5511 5514 case e1000_82540:
5512 5515 case e1000_82545:
5513 5516 case e1000_82545_rev_3:
5514 5517 case e1000_82546:
5515 5518 case e1000_82546_rev_3:
5516 5519 case e1000_82541:
5517 5520 case e1000_82541_rev_2:
5518 5521 case e1000_82547:
5519 5522 case e1000_82547_rev_2:
5520 5523 case e1000_82571:
5521 5524 case e1000_82572:
5522 5525 case e1000_82573:
5523 5526 case e1000_82574:
5524 5527 case e1000_80003es2lan:
5525 5528 case e1000_ich9lan:
5526 5529 case e1000_ich10lan:
5527 5530 return (B_TRUE);
5528 5531 }
5529 5532 return (B_FALSE);
5530 5533 }
5531 5534
5532 5535 static boolean_t
5533 5536 e1000g_set_loopback_mode(struct e1000g *Adapter, uint32_t mode)
5534 5537 {
5535 5538 struct e1000_hw *hw;
5536 5539 int i, times;
5537 5540 boolean_t link_up;
5538 5541
5539 5542 if (mode == Adapter->loopback_mode)
5540 5543 return (B_TRUE);
5541 5544
5542 5545 hw = &Adapter->shared;
5543 5546 times = 0;
5544 5547
5545 5548 Adapter->loopback_mode = mode;
5546 5549
5547 5550 if (mode == E1000G_LB_NONE) {
5548 5551 /* Reset the chip */
5549 5552 hw->phy.autoneg_wait_to_complete = B_TRUE;
5550 5553 (void) e1000g_reset_adapter(Adapter);
5551 5554 hw->phy.autoneg_wait_to_complete = B_FALSE;
5552 5555 return (B_TRUE);
5553 5556 }
5554 5557
5555 5558 again:
5556 5559
5557 5560 rw_enter(&Adapter->chip_lock, RW_WRITER);
5558 5561
5559 5562 switch (mode) {
5560 5563 default:
5561 5564 rw_exit(&Adapter->chip_lock);
5562 5565 return (B_FALSE);
5563 5566
5564 5567 case E1000G_LB_EXTERNAL_1000:
5565 5568 e1000g_set_external_loopback_1000(Adapter);
5566 5569 break;
5567 5570
5568 5571 case E1000G_LB_EXTERNAL_100:
5569 5572 e1000g_set_external_loopback_100(Adapter);
5570 5573 break;
5571 5574
5572 5575 case E1000G_LB_EXTERNAL_10:
5573 5576 e1000g_set_external_loopback_10(Adapter);
5574 5577 break;
5575 5578
5576 5579 case E1000G_LB_INTERNAL_PHY:
5577 5580 e1000g_set_internal_loopback(Adapter);
5578 5581 break;
5579 5582 }
5580 5583
5581 5584 times++;
5582 5585
5583 5586 rw_exit(&Adapter->chip_lock);
5584 5587
5585 5588 /* Wait for link up */
5586 5589 for (i = (PHY_FORCE_LIMIT * 2); i > 0; i--)
5587 5590 msec_delay(100);
5588 5591
5589 5592 rw_enter(&Adapter->chip_lock, RW_WRITER);
5590 5593
5591 5594 link_up = e1000g_link_up(Adapter);
5592 5595
5593 5596 rw_exit(&Adapter->chip_lock);
5594 5597
5595 5598 if (!link_up) {
5596 5599 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
5597 5600 "Failed to get the link up");
5598 5601 if (times < 2) {
5599 5602 /* Reset the link */
5600 5603 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
5601 5604 "Reset the link ...");
5602 5605 (void) e1000g_reset_adapter(Adapter);
5603 5606 goto again;
5604 5607 }
5605 5608
5606 5609 /*
5607 5610 * Reset driver to loopback none when set loopback failed
5608 5611 * for the second time.
5609 5612 */
5610 5613 Adapter->loopback_mode = E1000G_LB_NONE;
5611 5614
5612 5615 /* Reset the chip */
5613 5616 hw->phy.autoneg_wait_to_complete = B_TRUE;
5614 5617 (void) e1000g_reset_adapter(Adapter);
5615 5618 hw->phy.autoneg_wait_to_complete = B_FALSE;
5616 5619
5617 5620 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
5618 5621 "Set loopback mode failed, reset to loopback none");
5619 5622
5620 5623 return (B_FALSE);
5621 5624 }
5622 5625
5623 5626 return (B_TRUE);
5624 5627 }
5625 5628
5626 5629 /*
5627 5630 * The following loopback settings are from Intel's technical
5628 5631 * document - "How To Loopback". All the register settings and
5629 5632 * time delay values are directly inherited from the document
5630 5633 * without more explanations available.
5631 5634 */
5632 5635 static void
5633 5636 e1000g_set_internal_loopback(struct e1000g *Adapter)
5634 5637 {
5635 5638 struct e1000_hw *hw;
5636 5639 uint32_t ctrl;
5637 5640 uint32_t status;
5638 5641 uint16_t phy_ctrl;
5639 5642 uint16_t phy_reg;
5640 5643 uint32_t txcw;
5641 5644
5642 5645 hw = &Adapter->shared;
5643 5646
5644 5647 /* Disable Smart Power Down */
5645 5648 phy_spd_state(hw, B_FALSE);
5646 5649
5647 5650 (void) e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl);
5648 5651 phy_ctrl &= ~(MII_CR_AUTO_NEG_EN | MII_CR_SPEED_100 | MII_CR_SPEED_10);
5649 5652 phy_ctrl |= MII_CR_FULL_DUPLEX | MII_CR_SPEED_1000;
5650 5653
5651 5654 switch (hw->mac.type) {
5652 5655 case e1000_82540:
5653 5656 case e1000_82545:
5654 5657 case e1000_82545_rev_3:
5655 5658 case e1000_82546:
5656 5659 case e1000_82546_rev_3:
5657 5660 case e1000_82573:
5658 5661 /* Auto-MDI/MDIX off */
5659 5662 (void) e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
5660 5663 /* Reset PHY to update Auto-MDI/MDIX */
5661 5664 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5662 5665 phy_ctrl | MII_CR_RESET | MII_CR_AUTO_NEG_EN);
5663 5666 /* Reset PHY to auto-neg off and force 1000 */
5664 5667 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5665 5668 phy_ctrl | MII_CR_RESET);
5666 5669 /*
5667 5670 * Disable PHY receiver for 82540/545/546 and 82573 Family.
5668 5671 * See comments above e1000g_set_internal_loopback() for the
5669 5672 * background.
5670 5673 */
5671 5674 (void) e1000_write_phy_reg(hw, 29, 0x001F);
5672 5675 (void) e1000_write_phy_reg(hw, 30, 0x8FFC);
5673 5676 (void) e1000_write_phy_reg(hw, 29, 0x001A);
5674 5677 (void) e1000_write_phy_reg(hw, 30, 0x8FF0);
5675 5678 break;
5676 5679 case e1000_80003es2lan:
5677 5680 /* Force Link Up */
5678 5681 (void) e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
5679 5682 0x1CC);
5680 5683 /* Sets PCS loopback at 1Gbs */
5681 5684 (void) e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
5682 5685 0x1046);
5683 5686 break;
5684 5687 }
5685 5688
5686 5689 /*
5687 5690 * The following registers should be set for e1000_phy_bm phy type.
5688 5691 * e1000_82574, e1000_ich10lan and some e1000_ich9lan use this phy.
5689 5692 * For others, we do not need to set these registers.
5690 5693 */
5691 5694 if (hw->phy.type == e1000_phy_bm) {
5692 5695 /* Set Default MAC Interface speed to 1GB */
5693 5696 (void) e1000_read_phy_reg(hw, PHY_REG(2, 21), &phy_reg);
5694 5697 phy_reg &= ~0x0007;
5695 5698 phy_reg |= 0x006;
5696 5699 (void) e1000_write_phy_reg(hw, PHY_REG(2, 21), phy_reg);
5697 5700 /* Assert SW reset for above settings to take effect */
5698 5701 (void) e1000_phy_commit(hw);
5699 5702 msec_delay(1);
5700 5703 /* Force Full Duplex */
5701 5704 (void) e1000_read_phy_reg(hw, PHY_REG(769, 16), &phy_reg);
5702 5705 (void) e1000_write_phy_reg(hw, PHY_REG(769, 16),
5703 5706 phy_reg | 0x000C);
5704 5707 /* Set Link Up (in force link) */
5705 5708 (void) e1000_read_phy_reg(hw, PHY_REG(776, 16), &phy_reg);
5706 5709 (void) e1000_write_phy_reg(hw, PHY_REG(776, 16),
5707 5710 phy_reg | 0x0040);
5708 5711 /* Force Link */
5709 5712 (void) e1000_read_phy_reg(hw, PHY_REG(769, 16), &phy_reg);
5710 5713 (void) e1000_write_phy_reg(hw, PHY_REG(769, 16),
5711 5714 phy_reg | 0x0040);
5712 5715 /* Set Early Link Enable */
5713 5716 (void) e1000_read_phy_reg(hw, PHY_REG(769, 20), &phy_reg);
5714 5717 (void) e1000_write_phy_reg(hw, PHY_REG(769, 20),
5715 5718 phy_reg | 0x0400);
5716 5719 }
5717 5720
5718 5721 /* Set loopback */
5719 5722 (void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl | MII_CR_LOOPBACK);
5720 5723
5721 5724 msec_delay(250);
5722 5725
5723 5726 /* Now set up the MAC to the same speed/duplex as the PHY. */
5724 5727 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5725 5728 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
5726 5729 ctrl |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
5727 5730 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
5728 5731 E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */
5729 5732 E1000_CTRL_FD); /* Force Duplex to FULL */
5730 5733
5731 5734 switch (hw->mac.type) {
5732 5735 case e1000_82540:
5733 5736 case e1000_82545:
5734 5737 case e1000_82545_rev_3:
5735 5738 case e1000_82546:
5736 5739 case e1000_82546_rev_3:
5737 5740 /*
5738 5741 * For some serdes we'll need to commit the writes now
5739 5742 * so that the status is updated on link
5740 5743 */
5741 5744 if (hw->phy.media_type == e1000_media_type_internal_serdes) {
5742 5745 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5743 5746 msec_delay(100);
5744 5747 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5745 5748 }
5746 5749
5747 5750 if (hw->phy.media_type == e1000_media_type_copper) {
5748 5751 /* Invert Loss of Signal */
5749 5752 ctrl |= E1000_CTRL_ILOS;
5750 5753 } else {
5751 5754 /* Set ILOS on fiber nic if half duplex is detected */
5752 5755 status = E1000_READ_REG(hw, E1000_STATUS);
5753 5756 if ((status & E1000_STATUS_FD) == 0)
5754 5757 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
5755 5758 }
5756 5759 break;
5757 5760
5758 5761 case e1000_82571:
5759 5762 case e1000_82572:
5760 5763 /*
5761 5764 * The fiber/SerDes versions of this adapter do not contain an
5762 5765 * accessible PHY. Therefore, loopback beyond MAC must be done
5763 5766 * using SerDes analog loopback.
5764 5767 */
5765 5768 if (hw->phy.media_type != e1000_media_type_copper) {
5766 5769 /* Disable autoneg by setting bit 31 of TXCW to zero */
5767 5770 txcw = E1000_READ_REG(hw, E1000_TXCW);
5768 5771 txcw &= ~((uint32_t)1 << 31);
5769 5772 E1000_WRITE_REG(hw, E1000_TXCW, txcw);
5770 5773
5771 5774 /*
5772 5775 * Write 0x410 to Serdes Control register
5773 5776 * to enable Serdes analog loopback
5774 5777 */
5775 5778 E1000_WRITE_REG(hw, E1000_SCTL, 0x0410);
5776 5779 msec_delay(10);
5777 5780 }
5778 5781
5779 5782 status = E1000_READ_REG(hw, E1000_STATUS);
5780 5783 /* Set ILOS on fiber nic if half duplex is detected */
5781 5784 if ((hw->phy.media_type == e1000_media_type_fiber) &&
5782 5785 ((status & E1000_STATUS_FD) == 0 ||
5783 5786 (status & E1000_STATUS_LU) == 0))
5784 5787 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
5785 5788 else if (hw->phy.media_type == e1000_media_type_internal_serdes)
5786 5789 ctrl |= E1000_CTRL_SLU;
5787 5790 break;
5788 5791
5789 5792 case e1000_82573:
5790 5793 ctrl |= E1000_CTRL_ILOS;
5791 5794 break;
5792 5795 case e1000_ich9lan:
5793 5796 case e1000_ich10lan:
5794 5797 ctrl |= E1000_CTRL_SLU;
5795 5798 break;
5796 5799 }
5797 5800 if (hw->phy.type == e1000_phy_bm)
5798 5801 ctrl |= E1000_CTRL_SLU | E1000_CTRL_ILOS;
5799 5802
5800 5803 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5801 5804 }
5802 5805
5803 5806 static void
5804 5807 e1000g_set_external_loopback_1000(struct e1000g *Adapter)
5805 5808 {
5806 5809 struct e1000_hw *hw;
5807 5810 uint32_t rctl;
5808 5811 uint32_t ctrl_ext;
5809 5812 uint32_t ctrl;
5810 5813 uint32_t status;
5811 5814 uint32_t txcw;
5812 5815 uint16_t phydata;
5813 5816
5814 5817 hw = &Adapter->shared;
5815 5818
5816 5819 /* Disable Smart Power Down */
5817 5820 phy_spd_state(hw, B_FALSE);
5818 5821
5819 5822 switch (hw->mac.type) {
5820 5823 case e1000_82571:
5821 5824 case e1000_82572:
5822 5825 switch (hw->phy.media_type) {
5823 5826 case e1000_media_type_copper:
5824 5827 /* Force link up (Must be done before the PHY writes) */
5825 5828 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5826 5829 ctrl |= E1000_CTRL_SLU; /* Force Link Up */
5827 5830 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5828 5831
5829 5832 rctl = E1000_READ_REG(hw, E1000_RCTL);
5830 5833 rctl |= (E1000_RCTL_EN |
5831 5834 E1000_RCTL_SBP |
5832 5835 E1000_RCTL_UPE |
5833 5836 E1000_RCTL_MPE |
5834 5837 E1000_RCTL_LPE |
5835 5838 E1000_RCTL_BAM); /* 0x803E */
5836 5839 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
5837 5840
5838 5841 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
5839 5842 ctrl_ext |= (E1000_CTRL_EXT_SDP4_DATA |
5840 5843 E1000_CTRL_EXT_SDP6_DATA |
5841 5844 E1000_CTRL_EXT_SDP3_DATA |
5842 5845 E1000_CTRL_EXT_SDP4_DIR |
5843 5846 E1000_CTRL_EXT_SDP6_DIR |
5844 5847 E1000_CTRL_EXT_SDP3_DIR); /* 0x0DD0 */
5845 5848 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
5846 5849
5847 5850 /*
5848 5851 * This sequence tunes the PHY's SDP and no customer
5849 5852 * settable values. For background, see comments above
5850 5853 * e1000g_set_internal_loopback().
5851 5854 */
5852 5855 (void) e1000_write_phy_reg(hw, 0x0, 0x140);
5853 5856 msec_delay(10);
5854 5857 (void) e1000_write_phy_reg(hw, 0x9, 0x1A00);
5855 5858 (void) e1000_write_phy_reg(hw, 0x12, 0xC10);
5856 5859 (void) e1000_write_phy_reg(hw, 0x12, 0x1C10);
5857 5860 (void) e1000_write_phy_reg(hw, 0x1F37, 0x76);
5858 5861 (void) e1000_write_phy_reg(hw, 0x1F33, 0x1);
5859 5862 (void) e1000_write_phy_reg(hw, 0x1F33, 0x0);
5860 5863
5861 5864 (void) e1000_write_phy_reg(hw, 0x1F35, 0x65);
5862 5865 (void) e1000_write_phy_reg(hw, 0x1837, 0x3F7C);
5863 5866 (void) e1000_write_phy_reg(hw, 0x1437, 0x3FDC);
5864 5867 (void) e1000_write_phy_reg(hw, 0x1237, 0x3F7C);
5865 5868 (void) e1000_write_phy_reg(hw, 0x1137, 0x3FDC);
5866 5869
5867 5870 msec_delay(50);
5868 5871 break;
5869 5872 case e1000_media_type_fiber:
5870 5873 case e1000_media_type_internal_serdes:
5871 5874 status = E1000_READ_REG(hw, E1000_STATUS);
5872 5875 if (((status & E1000_STATUS_LU) == 0) ||
5873 5876 (hw->phy.media_type ==
5874 5877 e1000_media_type_internal_serdes)) {
5875 5878 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5876 5879 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
5877 5880 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5878 5881 }
5879 5882
5880 5883 /* Disable autoneg by setting bit 31 of TXCW to zero */
5881 5884 txcw = E1000_READ_REG(hw, E1000_TXCW);
5882 5885 txcw &= ~((uint32_t)1 << 31);
5883 5886 E1000_WRITE_REG(hw, E1000_TXCW, txcw);
5884 5887
5885 5888 /*
5886 5889 * Write 0x410 to Serdes Control register
5887 5890 * to enable Serdes analog loopback
5888 5891 */
5889 5892 E1000_WRITE_REG(hw, E1000_SCTL, 0x0410);
5890 5893 msec_delay(10);
5891 5894 break;
5892 5895 default:
5893 5896 break;
5894 5897 }
5895 5898 break;
5896 5899 case e1000_82574:
5897 5900 case e1000_80003es2lan:
5898 5901 case e1000_ich9lan:
5899 5902 case e1000_ich10lan:
5900 5903 (void) e1000_read_phy_reg(hw, GG82563_REG(6, 16), &phydata);
5901 5904 (void) e1000_write_phy_reg(hw, GG82563_REG(6, 16),
5902 5905 phydata | (1 << 5));
5903 5906 Adapter->param_adv_autoneg = 1;
5904 5907 Adapter->param_adv_1000fdx = 1;
5905 5908 (void) e1000g_reset_link(Adapter);
5906 5909 break;
5907 5910 }
5908 5911 }
5909 5912
5910 5913 static void
5911 5914 e1000g_set_external_loopback_100(struct e1000g *Adapter)
5912 5915 {
5913 5916 struct e1000_hw *hw;
5914 5917 uint32_t ctrl;
5915 5918 uint16_t phy_ctrl;
5916 5919
5917 5920 hw = &Adapter->shared;
5918 5921
5919 5922 /* Disable Smart Power Down */
5920 5923 phy_spd_state(hw, B_FALSE);
5921 5924
5922 5925 phy_ctrl = (MII_CR_FULL_DUPLEX |
5923 5926 MII_CR_SPEED_100);
5924 5927
5925 5928 /* Force 100/FD, reset PHY */
5926 5929 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5927 5930 phy_ctrl | MII_CR_RESET); /* 0xA100 */
5928 5931 msec_delay(10);
5929 5932
5930 5933 /* Force 100/FD */
5931 5934 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5932 5935 phy_ctrl); /* 0x2100 */
5933 5936 msec_delay(10);
5934 5937
5935 5938 /* Now setup the MAC to the same speed/duplex as the PHY. */
5936 5939 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5937 5940 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
5938 5941 ctrl |= (E1000_CTRL_SLU | /* Force Link Up */
5939 5942 E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
5940 5943 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
5941 5944 E1000_CTRL_SPD_100 | /* Force Speed to 100 */
5942 5945 E1000_CTRL_FD); /* Force Duplex to FULL */
5943 5946
5944 5947 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5945 5948 }
5946 5949
5947 5950 static void
5948 5951 e1000g_set_external_loopback_10(struct e1000g *Adapter)
5949 5952 {
5950 5953 struct e1000_hw *hw;
5951 5954 uint32_t ctrl;
5952 5955 uint16_t phy_ctrl;
5953 5956
5954 5957 hw = &Adapter->shared;
5955 5958
5956 5959 /* Disable Smart Power Down */
5957 5960 phy_spd_state(hw, B_FALSE);
5958 5961
5959 5962 phy_ctrl = (MII_CR_FULL_DUPLEX |
5960 5963 MII_CR_SPEED_10);
5961 5964
5962 5965 /* Force 10/FD, reset PHY */
5963 5966 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5964 5967 phy_ctrl | MII_CR_RESET); /* 0x8100 */
5965 5968 msec_delay(10);
5966 5969
5967 5970 /* Force 10/FD */
5968 5971 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5969 5972 phy_ctrl); /* 0x0100 */
5970 5973 msec_delay(10);
5971 5974
5972 5975 /* Now setup the MAC to the same speed/duplex as the PHY. */
5973 5976 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5974 5977 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
5975 5978 ctrl |= (E1000_CTRL_SLU | /* Force Link Up */
5976 5979 E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
5977 5980 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
5978 5981 E1000_CTRL_SPD_10 | /* Force Speed to 10 */
5979 5982 E1000_CTRL_FD); /* Force Duplex to FULL */
5980 5983
5981 5984 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5982 5985 }
5983 5986
5984 5987 #ifdef __sparc
5985 5988 static boolean_t
5986 5989 e1000g_find_mac_address(struct e1000g *Adapter)
5987 5990 {
5988 5991 struct e1000_hw *hw = &Adapter->shared;
5989 5992 uchar_t *bytes;
5990 5993 struct ether_addr sysaddr;
5991 5994 uint_t nelts;
5992 5995 int err;
5993 5996 boolean_t found = B_FALSE;
5994 5997
5995 5998 /*
5996 5999 * The "vendor's factory-set address" may already have
5997 6000 * been extracted from the chip, but if the property
5998 6001 * "local-mac-address" is set we use that instead.
5999 6002 *
6000 6003 * We check whether it looks like an array of 6
6001 6004 * bytes (which it should, if OBP set it). If we can't
6002 6005 * make sense of it this way, we'll ignore it.
6003 6006 */
6004 6007 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip,
6005 6008 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
6006 6009 if (err == DDI_PROP_SUCCESS) {
6007 6010 if (nelts == ETHERADDRL) {
6008 6011 while (nelts--)
6009 6012 hw->mac.addr[nelts] = bytes[nelts];
6010 6013 found = B_TRUE;
6011 6014 }
6012 6015 ddi_prop_free(bytes);
6013 6016 }
6014 6017
6015 6018 /*
6016 6019 * Look up the OBP property "local-mac-address?". If the user has set
6017 6020 * 'local-mac-address? = false', use "the system address" instead.
6018 6021 */
6019 6022 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip, 0,
6020 6023 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
6021 6024 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
6022 6025 if (localetheraddr(NULL, &sysaddr) != 0) {
6023 6026 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
6024 6027 found = B_TRUE;
6025 6028 }
6026 6029 }
6027 6030 ddi_prop_free(bytes);
6028 6031 }
6029 6032
6030 6033 /*
6031 6034 * Finally(!), if there's a valid "mac-address" property (created
6032 6035 * if we netbooted from this interface), we must use this instead
6033 6036 * of any of the above to ensure that the NFS/install server doesn't
6034 6037 * get confused by the address changing as Solaris takes over!
6035 6038 */
6036 6039 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip,
6037 6040 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
6038 6041 if (err == DDI_PROP_SUCCESS) {
6039 6042 if (nelts == ETHERADDRL) {
6040 6043 while (nelts--)
6041 6044 hw->mac.addr[nelts] = bytes[nelts];
6042 6045 found = B_TRUE;
6043 6046 }
6044 6047 ddi_prop_free(bytes);
6045 6048 }
6046 6049
6047 6050 if (found) {
6048 6051 bcopy(hw->mac.addr, hw->mac.perm_addr,
6049 6052 ETHERADDRL);
6050 6053 }
6051 6054
6052 6055 return (found);
6053 6056 }
6054 6057 #endif
6055 6058
6056 6059 static int
6057 6060 e1000g_add_intrs(struct e1000g *Adapter)
6058 6061 {
6059 6062 dev_info_t *devinfo;
6060 6063 int intr_types;
6061 6064 int rc;
6062 6065
6063 6066 devinfo = Adapter->dip;
6064 6067
6065 6068 /* Get supported interrupt types */
6066 6069 rc = ddi_intr_get_supported_types(devinfo, &intr_types);
6067 6070
6068 6071 if (rc != DDI_SUCCESS) {
6069 6072 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6070 6073 "Get supported interrupt types failed: %d\n", rc);
6071 6074 return (DDI_FAILURE);
6072 6075 }
6073 6076
6074 6077 /*
6075 6078 * Based on Intel Technical Advisory document (TA-160), there are some
6076 6079 * cases where some older Intel PCI-X NICs may "advertise" to the OS
6077 6080 * that it supports MSI, but in fact has problems.
6078 6081 * So we should only enable MSI for PCI-E NICs and disable MSI for old
6079 6082 * PCI/PCI-X NICs.
6080 6083 */
6081 6084 if (Adapter->shared.mac.type < e1000_82571)
6082 6085 Adapter->msi_enable = B_FALSE;
6083 6086
6084 6087 if ((intr_types & DDI_INTR_TYPE_MSI) && Adapter->msi_enable) {
6085 6088 rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_MSI);
6086 6089
6087 6090 if (rc != DDI_SUCCESS) {
6088 6091 /* EMPTY */
6089 6092 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
6090 6093 "Add MSI failed, trying Legacy interrupts\n");
6091 6094 } else {
6092 6095 Adapter->intr_type = DDI_INTR_TYPE_MSI;
6093 6096 }
6094 6097 }
6095 6098
6096 6099 if ((Adapter->intr_type == 0) &&
6097 6100 (intr_types & DDI_INTR_TYPE_FIXED)) {
6098 6101 rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_FIXED);
6099 6102
6100 6103 if (rc != DDI_SUCCESS) {
6101 6104 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
6102 6105 "Add Legacy interrupts failed\n");
6103 6106 return (DDI_FAILURE);
6104 6107 }
6105 6108
6106 6109 Adapter->intr_type = DDI_INTR_TYPE_FIXED;
6107 6110 }
6108 6111
6109 6112 if (Adapter->intr_type == 0) {
6110 6113 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
6111 6114 "No interrupts registered\n");
6112 6115 return (DDI_FAILURE);
6113 6116 }
6114 6117
6115 6118 return (DDI_SUCCESS);
6116 6119 }
6117 6120
6118 6121 /*
6119 6122 * e1000g_intr_add() handles MSI/Legacy interrupts
6120 6123 */
6121 6124 static int
6122 6125 e1000g_intr_add(struct e1000g *Adapter, int intr_type)
6123 6126 {
6124 6127 dev_info_t *devinfo;
6125 6128 int count, avail, actual;
6126 6129 int x, y, rc, inum = 0;
6127 6130 int flag;
6128 6131 ddi_intr_handler_t *intr_handler;
6129 6132
6130 6133 devinfo = Adapter->dip;
6131 6134
6132 6135 /* get number of interrupts */
6133 6136 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
6134 6137 if ((rc != DDI_SUCCESS) || (count == 0)) {
6135 6138 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
6136 6139 "Get interrupt number failed. Return: %d, count: %d\n",
6137 6140 rc, count);
6138 6141 return (DDI_FAILURE);
6139 6142 }
6140 6143
6141 6144 /* get number of available interrupts */
6142 6145 rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
6143 6146 if ((rc != DDI_SUCCESS) || (avail == 0)) {
6144 6147 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
6145 6148 "Get interrupt available number failed. "
6146 6149 "Return: %d, available: %d\n", rc, avail);
6147 6150 return (DDI_FAILURE);
6148 6151 }
6149 6152
6150 6153 if (avail < count) {
6151 6154 /* EMPTY */
6152 6155 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
6153 6156 "Interrupts count: %d, available: %d\n",
6154 6157 count, avail);
6155 6158 }
6156 6159
6157 6160 /* Allocate an array of interrupt handles */
6158 6161 Adapter->intr_size = count * sizeof (ddi_intr_handle_t);
6159 6162 Adapter->htable = kmem_alloc(Adapter->intr_size, KM_SLEEP);
6160 6163
6161 6164 /* Set NORMAL behavior for both MSI and FIXED interrupt */
6162 6165 flag = DDI_INTR_ALLOC_NORMAL;
6163 6166
6164 6167 /* call ddi_intr_alloc() */
6165 6168 rc = ddi_intr_alloc(devinfo, Adapter->htable, intr_type, inum,
6166 6169 count, &actual, flag);
6167 6170
6168 6171 if ((rc != DDI_SUCCESS) || (actual == 0)) {
6169 6172 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6170 6173 "Allocate interrupts failed: %d\n", rc);
6171 6174
6172 6175 kmem_free(Adapter->htable, Adapter->intr_size);
6173 6176 return (DDI_FAILURE);
6174 6177 }
6175 6178
6176 6179 if (actual < count) {
6177 6180 /* EMPTY */
6178 6181 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
6179 6182 "Interrupts requested: %d, received: %d\n",
6180 6183 count, actual);
6181 6184 }
6182 6185
6183 6186 Adapter->intr_cnt = actual;
6184 6187
6185 6188 /* Get priority for first msi, assume remaining are all the same */
6186 6189 rc = ddi_intr_get_pri(Adapter->htable[0], &Adapter->intr_pri);
6187 6190
6188 6191 if (rc != DDI_SUCCESS) {
6189 6192 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6190 6193 "Get interrupt priority failed: %d\n", rc);
6191 6194
6192 6195 /* Free already allocated intr */
6193 6196 for (y = 0; y < actual; y++)
6194 6197 (void) ddi_intr_free(Adapter->htable[y]);
6195 6198
6196 6199 kmem_free(Adapter->htable, Adapter->intr_size);
6197 6200 return (DDI_FAILURE);
6198 6201 }
6199 6202
6200 6203 /*
6201 6204 * In Legacy Interrupt mode, for PCI-Express adapters, we should
6202 6205 * use the interrupt service routine e1000g_intr_pciexpress()
6203 6206 * to avoid interrupt stealing when sharing interrupt with other
6204 6207 * devices.
6205 6208 */
6206 6209 if (Adapter->shared.mac.type < e1000_82571)
6207 6210 intr_handler = (ddi_intr_handler_t *)e1000g_intr;
6208 6211 else
6209 6212 intr_handler = (ddi_intr_handler_t *)e1000g_intr_pciexpress;
6210 6213
6211 6214 /* Call ddi_intr_add_handler() */
6212 6215 for (x = 0; x < actual; x++) {
6213 6216 rc = ddi_intr_add_handler(Adapter->htable[x],
6214 6217 intr_handler, (caddr_t)Adapter, NULL);
6215 6218
6216 6219 if (rc != DDI_SUCCESS) {
6217 6220 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6218 6221 "Add interrupt handler failed: %d\n", rc);
6219 6222
6220 6223 /* Remove already added handler */
6221 6224 for (y = 0; y < x; y++)
6222 6225 (void) ddi_intr_remove_handler(
6223 6226 Adapter->htable[y]);
6224 6227
6225 6228 /* Free already allocated intr */
6226 6229 for (y = 0; y < actual; y++)
6227 6230 (void) ddi_intr_free(Adapter->htable[y]);
6228 6231
6229 6232 kmem_free(Adapter->htable, Adapter->intr_size);
6230 6233 return (DDI_FAILURE);
6231 6234 }
6232 6235 }
6233 6236
6234 6237 rc = ddi_intr_get_cap(Adapter->htable[0], &Adapter->intr_cap);
6235 6238
6236 6239 if (rc != DDI_SUCCESS) {
6237 6240 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6238 6241 "Get interrupt cap failed: %d\n", rc);
6239 6242
6240 6243 /* Free already allocated intr */
6241 6244 for (y = 0; y < actual; y++) {
6242 6245 (void) ddi_intr_remove_handler(Adapter->htable[y]);
6243 6246 (void) ddi_intr_free(Adapter->htable[y]);
6244 6247 }
6245 6248
6246 6249 kmem_free(Adapter->htable, Adapter->intr_size);
6247 6250 return (DDI_FAILURE);
6248 6251 }
6249 6252
6250 6253 return (DDI_SUCCESS);
6251 6254 }
6252 6255
6253 6256 static int
6254 6257 e1000g_rem_intrs(struct e1000g *Adapter)
6255 6258 {
6256 6259 int x;
6257 6260 int rc;
6258 6261
6259 6262 for (x = 0; x < Adapter->intr_cnt; x++) {
6260 6263 rc = ddi_intr_remove_handler(Adapter->htable[x]);
6261 6264 if (rc != DDI_SUCCESS) {
6262 6265 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6263 6266 "Remove intr handler failed: %d\n", rc);
6264 6267 return (DDI_FAILURE);
6265 6268 }
6266 6269
6267 6270 rc = ddi_intr_free(Adapter->htable[x]);
6268 6271 if (rc != DDI_SUCCESS) {
6269 6272 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6270 6273 "Free intr failed: %d\n", rc);
6271 6274 return (DDI_FAILURE);
6272 6275 }
6273 6276 }
6274 6277
6275 6278 kmem_free(Adapter->htable, Adapter->intr_size);
6276 6279
6277 6280 return (DDI_SUCCESS);
6278 6281 }
6279 6282
6280 6283 static int
6281 6284 e1000g_enable_intrs(struct e1000g *Adapter)
6282 6285 {
6283 6286 int x;
6284 6287 int rc;
6285 6288
6286 6289 /* Enable interrupts */
6287 6290 if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) {
6288 6291 /* Call ddi_intr_block_enable() for MSI */
6289 6292 rc = ddi_intr_block_enable(Adapter->htable,
6290 6293 Adapter->intr_cnt);
6291 6294 if (rc != DDI_SUCCESS) {
6292 6295 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6293 6296 "Enable block intr failed: %d\n", rc);
6294 6297 return (DDI_FAILURE);
6295 6298 }
6296 6299 } else {
6297 6300 /* Call ddi_intr_enable() for Legacy/MSI non block enable */
6298 6301 for (x = 0; x < Adapter->intr_cnt; x++) {
6299 6302 rc = ddi_intr_enable(Adapter->htable[x]);
6300 6303 if (rc != DDI_SUCCESS) {
6301 6304 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6302 6305 "Enable intr failed: %d\n", rc);
6303 6306 return (DDI_FAILURE);
6304 6307 }
6305 6308 }
6306 6309 }
6307 6310
6308 6311 return (DDI_SUCCESS);
6309 6312 }
6310 6313
6311 6314 static int
6312 6315 e1000g_disable_intrs(struct e1000g *Adapter)
6313 6316 {
6314 6317 int x;
6315 6318 int rc;
6316 6319
6317 6320 /* Disable all interrupts */
6318 6321 if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) {
6319 6322 rc = ddi_intr_block_disable(Adapter->htable,
6320 6323 Adapter->intr_cnt);
6321 6324 if (rc != DDI_SUCCESS) {
6322 6325 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6323 6326 "Disable block intr failed: %d\n", rc);
6324 6327 return (DDI_FAILURE);
6325 6328 }
6326 6329 } else {
6327 6330 for (x = 0; x < Adapter->intr_cnt; x++) {
6328 6331 rc = ddi_intr_disable(Adapter->htable[x]);
6329 6332 if (rc != DDI_SUCCESS) {
6330 6333 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6331 6334 "Disable intr failed: %d\n", rc);
6332 6335 return (DDI_FAILURE);
6333 6336 }
6334 6337 }
6335 6338 }
6336 6339
6337 6340 return (DDI_SUCCESS);
6338 6341 }
6339 6342
6340 6343 /*
6341 6344 * e1000g_get_phy_state - get the state of PHY registers, save in the adapter
6342 6345 */
6343 6346 static void
6344 6347 e1000g_get_phy_state(struct e1000g *Adapter)
6345 6348 {
6346 6349 struct e1000_hw *hw = &Adapter->shared;
6347 6350
6348 6351 if (hw->phy.media_type == e1000_media_type_copper) {
6349 6352 (void) e1000_read_phy_reg(hw, PHY_CONTROL, &Adapter->phy_ctrl);
6350 6353 (void) e1000_read_phy_reg(hw, PHY_STATUS, &Adapter->phy_status);
6351 6354 (void) e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
6352 6355 &Adapter->phy_an_adv);
6353 6356 (void) e1000_read_phy_reg(hw, PHY_AUTONEG_EXP,
6354 6357 &Adapter->phy_an_exp);
6355 6358 (void) e1000_read_phy_reg(hw, PHY_EXT_STATUS,
6356 6359 &Adapter->phy_ext_status);
6357 6360 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL,
6358 6361 &Adapter->phy_1000t_ctrl);
6359 6362 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS,
6360 6363 &Adapter->phy_1000t_status);
6361 6364 (void) e1000_read_phy_reg(hw, PHY_LP_ABILITY,
6362 6365 &Adapter->phy_lp_able);
6363 6366
6364 6367 Adapter->param_autoneg_cap =
6365 6368 (Adapter->phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0;
6366 6369 Adapter->param_pause_cap =
6367 6370 (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0;
6368 6371 Adapter->param_asym_pause_cap =
6369 6372 (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0;
6370 6373 Adapter->param_1000fdx_cap =
6371 6374 ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
6372 6375 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0;
6373 6376 Adapter->param_1000hdx_cap =
6374 6377 ((Adapter->phy_ext_status & IEEE_ESR_1000T_HD_CAPS) ||
6375 6378 (Adapter->phy_ext_status & IEEE_ESR_1000X_HD_CAPS)) ? 1 : 0;
6376 6379 Adapter->param_100t4_cap =
6377 6380 (Adapter->phy_status & MII_SR_100T4_CAPS) ? 1 : 0;
6378 6381 Adapter->param_100fdx_cap =
6379 6382 ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
6380 6383 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0;
6381 6384 Adapter->param_100hdx_cap =
6382 6385 ((Adapter->phy_status & MII_SR_100X_HD_CAPS) ||
6383 6386 (Adapter->phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0;
6384 6387 Adapter->param_10fdx_cap =
6385 6388 (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0;
6386 6389 Adapter->param_10hdx_cap =
6387 6390 (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0;
6388 6391
6389 6392 Adapter->param_adv_autoneg = hw->mac.autoneg;
6390 6393 Adapter->param_adv_pause =
6391 6394 (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0;
6392 6395 Adapter->param_adv_asym_pause =
6393 6396 (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0;
6394 6397 Adapter->param_adv_1000hdx =
6395 6398 (Adapter->phy_1000t_ctrl & CR_1000T_HD_CAPS) ? 1 : 0;
6396 6399 Adapter->param_adv_100t4 =
6397 6400 (Adapter->phy_an_adv & NWAY_AR_100T4_CAPS) ? 1 : 0;
6398 6401 if (Adapter->param_adv_autoneg == 1) {
6399 6402 Adapter->param_adv_1000fdx =
6400 6403 (Adapter->phy_1000t_ctrl & CR_1000T_FD_CAPS)
6401 6404 ? 1 : 0;
6402 6405 Adapter->param_adv_100fdx =
6403 6406 (Adapter->phy_an_adv & NWAY_AR_100TX_FD_CAPS)
6404 6407 ? 1 : 0;
6405 6408 Adapter->param_adv_100hdx =
6406 6409 (Adapter->phy_an_adv & NWAY_AR_100TX_HD_CAPS)
6407 6410 ? 1 : 0;
6408 6411 Adapter->param_adv_10fdx =
6409 6412 (Adapter->phy_an_adv & NWAY_AR_10T_FD_CAPS) ? 1 : 0;
6410 6413 Adapter->param_adv_10hdx =
6411 6414 (Adapter->phy_an_adv & NWAY_AR_10T_HD_CAPS) ? 1 : 0;
6412 6415 }
6413 6416
6414 6417 Adapter->param_lp_autoneg =
6415 6418 (Adapter->phy_an_exp & NWAY_ER_LP_NWAY_CAPS) ? 1 : 0;
6416 6419 Adapter->param_lp_pause =
6417 6420 (Adapter->phy_lp_able & NWAY_LPAR_PAUSE) ? 1 : 0;
6418 6421 Adapter->param_lp_asym_pause =
6419 6422 (Adapter->phy_lp_able & NWAY_LPAR_ASM_DIR) ? 1 : 0;
6420 6423 Adapter->param_lp_1000fdx =
6421 6424 (Adapter->phy_1000t_status & SR_1000T_LP_FD_CAPS) ? 1 : 0;
6422 6425 Adapter->param_lp_1000hdx =
6423 6426 (Adapter->phy_1000t_status & SR_1000T_LP_HD_CAPS) ? 1 : 0;
6424 6427 Adapter->param_lp_100t4 =
6425 6428 (Adapter->phy_lp_able & NWAY_LPAR_100T4_CAPS) ? 1 : 0;
6426 6429 Adapter->param_lp_100fdx =
6427 6430 (Adapter->phy_lp_able & NWAY_LPAR_100TX_FD_CAPS) ? 1 : 0;
6428 6431 Adapter->param_lp_100hdx =
6429 6432 (Adapter->phy_lp_able & NWAY_LPAR_100TX_HD_CAPS) ? 1 : 0;
6430 6433 Adapter->param_lp_10fdx =
6431 6434 (Adapter->phy_lp_able & NWAY_LPAR_10T_FD_CAPS) ? 1 : 0;
6432 6435 Adapter->param_lp_10hdx =
6433 6436 (Adapter->phy_lp_able & NWAY_LPAR_10T_HD_CAPS) ? 1 : 0;
6434 6437 } else {
6435 6438 /*
6436 6439 * 1Gig Fiber adapter only offers 1Gig Full Duplex. Meaning,
6437 6440 * it can only work with 1Gig Full Duplex Link Partner.
6438 6441 */
6439 6442 Adapter->param_autoneg_cap = 0;
6440 6443 Adapter->param_pause_cap = 1;
6441 6444 Adapter->param_asym_pause_cap = 1;
6442 6445 Adapter->param_1000fdx_cap = 1;
6443 6446 Adapter->param_1000hdx_cap = 0;
6444 6447 Adapter->param_100t4_cap = 0;
6445 6448 Adapter->param_100fdx_cap = 0;
6446 6449 Adapter->param_100hdx_cap = 0;
6447 6450 Adapter->param_10fdx_cap = 0;
6448 6451 Adapter->param_10hdx_cap = 0;
6449 6452
6450 6453 Adapter->param_adv_autoneg = 0;
6451 6454 Adapter->param_adv_pause = 1;
6452 6455 Adapter->param_adv_asym_pause = 1;
6453 6456 Adapter->param_adv_1000fdx = 1;
6454 6457 Adapter->param_adv_1000hdx = 0;
6455 6458 Adapter->param_adv_100t4 = 0;
6456 6459 Adapter->param_adv_100fdx = 0;
6457 6460 Adapter->param_adv_100hdx = 0;
6458 6461 Adapter->param_adv_10fdx = 0;
6459 6462 Adapter->param_adv_10hdx = 0;
6460 6463
6461 6464 Adapter->param_lp_autoneg = 0;
6462 6465 Adapter->param_lp_pause = 0;
6463 6466 Adapter->param_lp_asym_pause = 0;
6464 6467 Adapter->param_lp_1000fdx = 0;
6465 6468 Adapter->param_lp_1000hdx = 0;
6466 6469 Adapter->param_lp_100t4 = 0;
6467 6470 Adapter->param_lp_100fdx = 0;
6468 6471 Adapter->param_lp_100hdx = 0;
6469 6472 Adapter->param_lp_10fdx = 0;
6470 6473 Adapter->param_lp_10hdx = 0;
6471 6474 }
6472 6475 }
6473 6476
6474 6477 /*
6475 6478 * FMA support
6476 6479 */
6477 6480
6478 6481 int
6479 6482 e1000g_check_acc_handle(ddi_acc_handle_t handle)
6480 6483 {
6481 6484 ddi_fm_error_t de;
6482 6485
6483 6486 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
6484 6487 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
6485 6488 return (de.fme_status);
6486 6489 }
6487 6490
6488 6491 int
6489 6492 e1000g_check_dma_handle(ddi_dma_handle_t handle)
6490 6493 {
6491 6494 ddi_fm_error_t de;
6492 6495
6493 6496 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
6494 6497 return (de.fme_status);
6495 6498 }
6496 6499
6497 6500 /*
6498 6501 * The IO fault service error handling callback function
6499 6502 */
6500 6503 /* ARGSUSED2 */
6501 6504 static int
6502 6505 e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
6503 6506 {
6504 6507 /*
6505 6508 * as the driver can always deal with an error in any dma or
6506 6509 * access handle, we can just return the fme_status value.
6507 6510 */
6508 6511 pci_ereport_post(dip, err, NULL);
6509 6512 return (err->fme_status);
6510 6513 }
6511 6514
6512 6515 static void
6513 6516 e1000g_fm_init(struct e1000g *Adapter)
6514 6517 {
6515 6518 ddi_iblock_cookie_t iblk;
6516 6519 int fma_dma_flag;
6517 6520
6518 6521 /* Only register with IO Fault Services if we have some capability */
6519 6522 if (Adapter->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
6520 6523 e1000g_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
6521 6524 } else {
6522 6525 e1000g_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
6523 6526 }
6524 6527
6525 6528 if (Adapter->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
6526 6529 fma_dma_flag = 1;
6527 6530 } else {
6528 6531 fma_dma_flag = 0;
6529 6532 }
6530 6533
6531 6534 (void) e1000g_set_fma_flags(fma_dma_flag);
6532 6535
6533 6536 if (Adapter->fm_capabilities) {
6534 6537
6535 6538 /* Register capabilities with IO Fault Services */
6536 6539 ddi_fm_init(Adapter->dip, &Adapter->fm_capabilities, &iblk);
6537 6540
6538 6541 /*
6539 6542 * Initialize pci ereport capabilities if ereport capable
6540 6543 */
6541 6544 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) ||
6542 6545 DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
6543 6546 pci_ereport_setup(Adapter->dip);
6544 6547
6545 6548 /*
6546 6549 * Register error callback if error callback capable
6547 6550 */
6548 6551 if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
6549 6552 ddi_fm_handler_register(Adapter->dip,
6550 6553 e1000g_fm_error_cb, (void*) Adapter);
6551 6554 }
6552 6555 }
6553 6556
6554 6557 static void
6555 6558 e1000g_fm_fini(struct e1000g *Adapter)
6556 6559 {
6557 6560 /* Only unregister FMA capabilities if we registered some */
6558 6561 if (Adapter->fm_capabilities) {
6559 6562
6560 6563 /*
6561 6564 * Release any resources allocated by pci_ereport_setup()
6562 6565 */
6563 6566 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) ||
6564 6567 DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
6565 6568 pci_ereport_teardown(Adapter->dip);
6566 6569
6567 6570 /*
6568 6571 * Un-register error callback if error callback capable
6569 6572 */
6570 6573 if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
6571 6574 ddi_fm_handler_unregister(Adapter->dip);
6572 6575
6573 6576 /* Unregister from IO Fault Services */
6574 6577 mutex_enter(&e1000g_rx_detach_lock);
6575 6578 ddi_fm_fini(Adapter->dip);
6576 6579 if (Adapter->priv_dip != NULL) {
6577 6580 DEVI(Adapter->priv_dip)->devi_fmhdl = NULL;
6578 6581 }
6579 6582 mutex_exit(&e1000g_rx_detach_lock);
6580 6583 }
6581 6584 }
6582 6585
6583 6586 void
6584 6587 e1000g_fm_ereport(struct e1000g *Adapter, char *detail)
6585 6588 {
6586 6589 uint64_t ena;
6587 6590 char buf[FM_MAX_CLASS];
6588 6591
6589 6592 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
6590 6593 ena = fm_ena_generate(0, FM_ENA_FMT1);
6591 6594 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities)) {
6592 6595 ddi_fm_ereport_post(Adapter->dip, buf, ena, DDI_NOSLEEP,
6593 6596 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
6594 6597 }
6595 6598 }
6596 6599
6597 6600 /*
6598 6601 * quiesce(9E) entry point.
6599 6602 *
6600 6603 * This function is called when the system is single-threaded at high
6601 6604 * PIL with preemption disabled. Therefore, this function must not be
6602 6605 * blocked.
6603 6606 *
6604 6607 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
6605 6608 * DDI_FAILURE indicates an error condition and should almost never happen.
6606 6609 */
6607 6610 static int
6608 6611 e1000g_quiesce(dev_info_t *devinfo)
6609 6612 {
6610 6613 struct e1000g *Adapter;
6611 6614
6612 6615 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
6613 6616
6614 6617 if (Adapter == NULL)
6615 6618 return (DDI_FAILURE);
6616 6619
6617 6620 e1000g_clear_all_interrupts(Adapter);
6618 6621
6619 6622 (void) e1000_reset_hw(&Adapter->shared);
6620 6623
6621 6624 /* Setup our HW Tx Head & Tail descriptor pointers */
6622 6625 E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0);
6623 6626 E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0);
6624 6627
6625 6628 /* Setup our HW Rx Head & Tail descriptor pointers */
6626 6629 E1000_WRITE_REG(&Adapter->shared, E1000_RDH(0), 0);
6627 6630 E1000_WRITE_REG(&Adapter->shared, E1000_RDT(0), 0);
6628 6631
6629 6632 return (DDI_SUCCESS);
6630 6633 }
6631 6634
6632 6635 /*
6633 6636 * synchronize the adv* and en* parameters.
6634 6637 *
6635 6638 * See comments in <sys/dld.h> for details of the *_en_*
6636 6639 * parameters. The usage of ndd for setting adv parameters will
6637 6640 * synchronize all the en parameters with the e1000g parameters,
6638 6641 * implicitly disabling any settings made via dladm.
6639 6642 */
6640 6643 static void
6641 6644 e1000g_param_sync(struct e1000g *Adapter)
6642 6645 {
6643 6646 Adapter->param_en_1000fdx = Adapter->param_adv_1000fdx;
6644 6647 Adapter->param_en_1000hdx = Adapter->param_adv_1000hdx;
6645 6648 Adapter->param_en_100fdx = Adapter->param_adv_100fdx;
6646 6649 Adapter->param_en_100hdx = Adapter->param_adv_100hdx;
6647 6650 Adapter->param_en_10fdx = Adapter->param_adv_10fdx;
6648 6651 Adapter->param_en_10hdx = Adapter->param_adv_10hdx;
6649 6652 }
6650 6653
6651 6654 /*
6652 6655 * e1000g_get_driver_control - tell manageability firmware that the driver
6653 6656 * has control.
6654 6657 */
6655 6658 static void
6656 6659 e1000g_get_driver_control(struct e1000_hw *hw)
6657 6660 {
6658 6661 uint32_t ctrl_ext;
6659 6662 uint32_t swsm;
6660 6663
6661 6664 /* tell manageability firmware the driver has taken over */
6662 6665 switch (hw->mac.type) {
6663 6666 case e1000_82573:
6664 6667 swsm = E1000_READ_REG(hw, E1000_SWSM);
6665 6668 E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_DRV_LOAD);
6666 6669 break;
6667 6670 case e1000_82571:
6668 6671 case e1000_82572:
6669 6672 case e1000_82574:
6670 6673 case e1000_80003es2lan:
6671 6674 case e1000_ich8lan:
6672 6675 case e1000_ich9lan:
6673 6676 case e1000_ich10lan:
6674 6677 case e1000_pchlan:
6675 6678 case e1000_pch2lan:
6676 6679 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
6677 6680 E1000_WRITE_REG(hw, E1000_CTRL_EXT,
6678 6681 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
6679 6682 break;
6680 6683 default:
6681 6684 /* no manageability firmware: do nothing */
6682 6685 break;
6683 6686 }
6684 6687 }
6685 6688
6686 6689 /*
6687 6690 * e1000g_release_driver_control - tell manageability firmware that the driver
6688 6691 * has released control.
6689 6692 */
6690 6693 static void
6691 6694 e1000g_release_driver_control(struct e1000_hw *hw)
6692 6695 {
6693 6696 uint32_t ctrl_ext;
6694 6697 uint32_t swsm;
6695 6698
6696 6699 /* tell manageability firmware the driver has released control */
6697 6700 switch (hw->mac.type) {
6698 6701 case e1000_82573:
6699 6702 swsm = E1000_READ_REG(hw, E1000_SWSM);
6700 6703 E1000_WRITE_REG(hw, E1000_SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
6701 6704 break;
6702 6705 case e1000_82571:
6703 6706 case e1000_82572:
6704 6707 case e1000_82574:
6705 6708 case e1000_80003es2lan:
6706 6709 case e1000_ich8lan:
6707 6710 case e1000_ich9lan:
6708 6711 case e1000_ich10lan:
6709 6712 case e1000_pchlan:
6710 6713 case e1000_pch2lan:
6711 6714 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
6712 6715 E1000_WRITE_REG(hw, E1000_CTRL_EXT,
6713 6716 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
6714 6717 break;
6715 6718 default:
6716 6719 /* no manageability firmware: do nothing */
6717 6720 break;
6718 6721 }
6719 6722 }
6720 6723
6721 6724 /*
6722 6725 * Restore e1000g promiscuous mode.
6723 6726 */
6724 6727 static void
6725 6728 e1000g_restore_promisc(struct e1000g *Adapter)
6726 6729 {
6727 6730 if (Adapter->e1000g_promisc) {
6728 6731 uint32_t rctl;
6729 6732
6730 6733 rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL);
6731 6734 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM);
6732 6735 E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl);
6733 6736 }
6734 6737 }
|
↓ open down ↓ |
4184 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX