1 /*
2 * This file is provided under a CDDLv1 license. When using or
3 * redistributing this file, you may do so under this license.
4 * In redistributing this file this license must be included
5 * and no other modification of this header file is permitted.
6 *
7 * CDDL LICENSE SUMMARY
8 *
9 * Copyright(c) 1999 - 2009 Intel Corporation. All rights reserved.
10 *
11 * The contents of this file are subject to the terms of Version
12 * 1.0 of the Common Development and Distribution License (the "License").
13 *
14 * You should have received a copy of the License with this software.
15 * You can obtain a copy of the License at
16 * http://www.opensolaris.org/os/licensing.
17 * See the License for the specific language governing permissions
18 * and limitations under the License.
19 */
20
21 /*
22 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 /*
26 * Copyright 2012 DEY Storage Systems, Inc. All rights reserved.
27 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
28 * Copyright (c) 2018, Joyent, Inc.
29 */
30
31 /*
32 * **********************************************************************
33 * *
34 * Module Name: *
35 * e1000g_main.c *
36 * *
37 * Abstract: *
38 * This file contains the interface routines for the solaris OS. *
39 * It has all DDI entry point routines and GLD entry point routines. *
40 * *
41 * This file also contains routines that take care of initialization *
42 * uninit routine and interrupt routine. *
43 * *
44 * **********************************************************************
45 */
46
47 #include <sys/dlpi.h>
48 #include <sys/mac.h>
49 #include "e1000g_sw.h"
50 #include "e1000g_debug.h"
51
52 static char ident[] = "Intel PRO/1000 Ethernet";
53 /* LINTED E_STATIC_UNUSED */
54 static char e1000g_version[] = "Driver Ver. 5.3.24";
55
56 /*
57 * Proto types for DDI entry points
58 */
59 static int e1000g_attach(dev_info_t *, ddi_attach_cmd_t);
60 static int e1000g_detach(dev_info_t *, ddi_detach_cmd_t);
61 static int e1000g_quiesce(dev_info_t *);
62
63 /*
64 * init and intr routines prototype
65 */
66 static int e1000g_resume(dev_info_t *);
67 static int e1000g_suspend(dev_info_t *);
68 static uint_t e1000g_intr_pciexpress(caddr_t);
69 static uint_t e1000g_intr(caddr_t);
70 static void e1000g_intr_work(struct e1000g *, uint32_t);
71 #pragma inline(e1000g_intr_work)
72 static int e1000g_init(struct e1000g *);
73 static int e1000g_start(struct e1000g *, boolean_t);
74 static void e1000g_stop(struct e1000g *, boolean_t);
75 static int e1000g_m_start(void *);
76 static void e1000g_m_stop(void *);
77 static int e1000g_m_promisc(void *, boolean_t);
78 static boolean_t e1000g_m_getcapab(void *, mac_capab_t, void *);
79 static int e1000g_m_multicst(void *, boolean_t, const uint8_t *);
80 static void e1000g_m_ioctl(void *, queue_t *, mblk_t *);
81 static int e1000g_m_setprop(void *, const char *, mac_prop_id_t,
82 uint_t, const void *);
83 static int e1000g_m_getprop(void *, const char *, mac_prop_id_t,
84 uint_t, void *);
85 static void e1000g_m_propinfo(void *, const char *, mac_prop_id_t,
86 mac_prop_info_handle_t);
87 static int e1000g_set_priv_prop(struct e1000g *, const char *, uint_t,
88 const void *);
89 static int e1000g_get_priv_prop(struct e1000g *, const char *, uint_t, void *);
90 static void e1000g_init_locks(struct e1000g *);
91 static void e1000g_destroy_locks(struct e1000g *);
92 static int e1000g_identify_hardware(struct e1000g *);
93 static int e1000g_regs_map(struct e1000g *);
94 static int e1000g_set_driver_params(struct e1000g *);
95 static void e1000g_set_bufsize(struct e1000g *);
96 static int e1000g_register_mac(struct e1000g *);
97 static boolean_t e1000g_rx_drain(struct e1000g *);
98 static boolean_t e1000g_tx_drain(struct e1000g *);
99 static void e1000g_init_unicst(struct e1000g *);
100 static int e1000g_unicst_set(struct e1000g *, const uint8_t *, int);
101 static int e1000g_alloc_rx_data(struct e1000g *);
102 static void e1000g_release_multicast(struct e1000g *);
103 static void e1000g_pch_limits(struct e1000g *);
104 static uint32_t e1000g_mtu2maxframe(uint32_t);
105
106 /*
107 * Local routines
108 */
109 static boolean_t e1000g_reset_adapter(struct e1000g *);
110 static void e1000g_tx_clean(struct e1000g *);
111 static void e1000g_rx_clean(struct e1000g *);
112 static void e1000g_link_timer(void *);
113 static void e1000g_local_timer(void *);
114 static boolean_t e1000g_link_check(struct e1000g *);
115 static boolean_t e1000g_stall_check(struct e1000g *);
116 static void e1000g_smartspeed(struct e1000g *);
117 static void e1000g_get_conf(struct e1000g *);
118 static boolean_t e1000g_get_prop(struct e1000g *, char *, int, int, int,
119 int *);
120 static void enable_watchdog_timer(struct e1000g *);
121 static void disable_watchdog_timer(struct e1000g *);
122 static void start_watchdog_timer(struct e1000g *);
123 static void restart_watchdog_timer(struct e1000g *);
124 static void stop_watchdog_timer(struct e1000g *);
125 static void stop_link_timer(struct e1000g *);
126 static void stop_82547_timer(e1000g_tx_ring_t *);
127 static void e1000g_force_speed_duplex(struct e1000g *);
128 static void e1000g_setup_max_mtu(struct e1000g *);
129 static void e1000g_get_max_frame_size(struct e1000g *);
130 static boolean_t is_valid_mac_addr(uint8_t *);
131 static void e1000g_unattach(dev_info_t *, struct e1000g *);
132 static int e1000g_get_bar_info(dev_info_t *, int, bar_info_t *);
133 #ifdef E1000G_DEBUG
134 static void e1000g_ioc_peek_reg(struct e1000g *, e1000g_peekpoke_t *);
135 static void e1000g_ioc_poke_reg(struct e1000g *, e1000g_peekpoke_t *);
136 static void e1000g_ioc_peek_mem(struct e1000g *, e1000g_peekpoke_t *);
137 static void e1000g_ioc_poke_mem(struct e1000g *, e1000g_peekpoke_t *);
138 static enum ioc_reply e1000g_pp_ioctl(struct e1000g *,
139 struct iocblk *, mblk_t *);
140 #endif
141 static enum ioc_reply e1000g_loopback_ioctl(struct e1000g *,
142 struct iocblk *, mblk_t *);
143 static boolean_t e1000g_check_loopback_support(struct e1000_hw *);
144 static boolean_t e1000g_set_loopback_mode(struct e1000g *, uint32_t);
145 static void e1000g_set_internal_loopback(struct e1000g *);
146 static void e1000g_set_external_loopback_1000(struct e1000g *);
147 static void e1000g_set_external_loopback_100(struct e1000g *);
148 static void e1000g_set_external_loopback_10(struct e1000g *);
149 static int e1000g_add_intrs(struct e1000g *);
150 static int e1000g_intr_add(struct e1000g *, int);
151 static int e1000g_rem_intrs(struct e1000g *);
152 static int e1000g_enable_intrs(struct e1000g *);
153 static int e1000g_disable_intrs(struct e1000g *);
154 static boolean_t e1000g_link_up(struct e1000g *);
155 #ifdef __sparc
156 static boolean_t e1000g_find_mac_address(struct e1000g *);
157 #endif
158 static void e1000g_get_phy_state(struct e1000g *);
159 static int e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
160 const void *impl_data);
161 static void e1000g_fm_init(struct e1000g *Adapter);
162 static void e1000g_fm_fini(struct e1000g *Adapter);
163 static void e1000g_param_sync(struct e1000g *);
164 static void e1000g_get_driver_control(struct e1000_hw *);
165 static void e1000g_release_driver_control(struct e1000_hw *);
166 static void e1000g_restore_promisc(struct e1000g *Adapter);
167
168 char *e1000g_priv_props[] = {
169 "_tx_bcopy_threshold",
170 "_tx_interrupt_enable",
171 "_tx_intr_delay",
172 "_tx_intr_abs_delay",
173 "_rx_bcopy_threshold",
174 "_max_num_rcv_packets",
175 "_rx_intr_delay",
176 "_rx_intr_abs_delay",
177 "_intr_throttling_rate",
178 "_intr_adaptive",
179 "_adv_pause_cap",
180 "_adv_asym_pause_cap",
181 NULL
182 };
183
184 static struct cb_ops cb_ws_ops = {
185 nulldev, /* cb_open */
186 nulldev, /* cb_close */
187 nodev, /* cb_strategy */
188 nodev, /* cb_print */
189 nodev, /* cb_dump */
190 nodev, /* cb_read */
191 nodev, /* cb_write */
192 nodev, /* cb_ioctl */
193 nodev, /* cb_devmap */
194 nodev, /* cb_mmap */
195 nodev, /* cb_segmap */
196 nochpoll, /* cb_chpoll */
197 ddi_prop_op, /* cb_prop_op */
198 NULL, /* cb_stream */
199 D_MP | D_HOTPLUG, /* cb_flag */
200 CB_REV, /* cb_rev */
201 nodev, /* cb_aread */
202 nodev /* cb_awrite */
203 };
204
205 static struct dev_ops ws_ops = {
206 DEVO_REV, /* devo_rev */
207 0, /* devo_refcnt */
208 NULL, /* devo_getinfo */
209 nulldev, /* devo_identify */
210 nulldev, /* devo_probe */
211 e1000g_attach, /* devo_attach */
212 e1000g_detach, /* devo_detach */
213 nodev, /* devo_reset */
214 &cb_ws_ops, /* devo_cb_ops */
215 NULL, /* devo_bus_ops */
216 ddi_power, /* devo_power */
217 e1000g_quiesce /* devo_quiesce */
218 };
219
220 static struct modldrv modldrv = {
221 &mod_driverops, /* Type of module. This one is a driver */
222 ident, /* Discription string */
223 &ws_ops, /* driver ops */
224 };
225
226 static struct modlinkage modlinkage = {
227 MODREV_1, &modldrv, NULL
228 };
229
230 /* Access attributes for register mapping */
231 static ddi_device_acc_attr_t e1000g_regs_acc_attr = {
232 DDI_DEVICE_ATTR_V1,
233 DDI_STRUCTURE_LE_ACC,
234 DDI_STRICTORDER_ACC,
235 DDI_FLAGERR_ACC
236 };
237
238 #define E1000G_M_CALLBACK_FLAGS \
239 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
240
241 static mac_callbacks_t e1000g_m_callbacks = {
242 E1000G_M_CALLBACK_FLAGS,
243 e1000g_m_stat,
244 e1000g_m_start,
245 e1000g_m_stop,
246 e1000g_m_promisc,
247 e1000g_m_multicst,
248 NULL,
249 e1000g_m_tx,
250 NULL,
251 e1000g_m_ioctl,
252 e1000g_m_getcapab,
253 NULL,
254 NULL,
255 e1000g_m_setprop,
256 e1000g_m_getprop,
257 e1000g_m_propinfo
258 };
259
260 /*
261 * Global variables
262 */
263 uint32_t e1000g_jumbo_mtu = MAXIMUM_MTU_9K;
264 uint32_t e1000g_mblks_pending = 0;
265 /*
266 * Workaround for Dynamic Reconfiguration support, for x86 platform only.
267 * Here we maintain a private dev_info list if e1000g_force_detach is
268 * enabled. If we force the driver to detach while there are still some
269 * rx buffers retained in the upper layer, we have to keep a copy of the
270 * dev_info. In some cases (Dynamic Reconfiguration), the dev_info data
271 * structure will be freed after the driver is detached. However when we
272 * finally free those rx buffers released by the upper layer, we need to
273 * refer to the dev_info to free the dma buffers. So we save a copy of
274 * the dev_info for this purpose. On x86 platform, we assume this copy
275 * of dev_info is always valid, but on SPARC platform, it could be invalid
276 * after the system board level DR operation. For this reason, the global
277 * variable e1000g_force_detach must be B_FALSE on SPARC platform.
278 */
279 #ifdef __sparc
280 boolean_t e1000g_force_detach = B_FALSE;
281 #else
282 boolean_t e1000g_force_detach = B_TRUE;
283 #endif
284 private_devi_list_t *e1000g_private_devi_list = NULL;
285
286 /*
287 * The mutex e1000g_rx_detach_lock is defined to protect the processing of
288 * the private dev_info list, and to serialize the processing of rx buffer
289 * freeing and rx buffer recycling.
290 */
291 kmutex_t e1000g_rx_detach_lock;
292 /*
293 * The rwlock e1000g_dma_type_lock is defined to protect the global flag
294 * e1000g_dma_type. For SPARC, the initial value of the flag is "USE_DVMA".
295 * If there are many e1000g instances, the system may run out of DVMA
296 * resources during the initialization of the instances, then the flag will
297 * be changed to "USE_DMA". Because different e1000g instances are initialized
298 * in parallel, we need to use this lock to protect the flag.
299 */
300 krwlock_t e1000g_dma_type_lock;
301
302 /*
303 * The 82546 chipset is a dual-port device, both the ports share one eeprom.
304 * Based on the information from Intel, the 82546 chipset has some hardware
305 * problem. When one port is being reset and the other port is trying to
306 * access the eeprom, it could cause system hang or panic. To workaround this
307 * hardware problem, we use a global mutex to prevent such operations from
308 * happening simultaneously on different instances. This workaround is applied
309 * to all the devices supported by this driver.
310 */
311 kmutex_t e1000g_nvm_lock;
312
313 /*
314 * Loadable module configuration entry points for the driver
315 */
316
317 /*
318 * _init - module initialization
319 */
320 int
321 _init(void)
322 {
323 int status;
324
325 mac_init_ops(&ws_ops, WSNAME);
326 status = mod_install(&modlinkage);
327 if (status != DDI_SUCCESS)
328 mac_fini_ops(&ws_ops);
329 else {
330 mutex_init(&e1000g_rx_detach_lock, NULL, MUTEX_DRIVER, NULL);
331 rw_init(&e1000g_dma_type_lock, NULL, RW_DRIVER, NULL);
332 mutex_init(&e1000g_nvm_lock, NULL, MUTEX_DRIVER, NULL);
333 }
334
335 return (status);
336 }
337
338 /*
339 * _fini - module finalization
340 */
341 int
342 _fini(void)
343 {
344 int status;
345
346 if (e1000g_mblks_pending != 0)
347 return (EBUSY);
348
349 status = mod_remove(&modlinkage);
350 if (status == DDI_SUCCESS) {
351 mac_fini_ops(&ws_ops);
352
353 if (e1000g_force_detach) {
354 private_devi_list_t *devi_node;
355
356 mutex_enter(&e1000g_rx_detach_lock);
357 while (e1000g_private_devi_list != NULL) {
358 devi_node = e1000g_private_devi_list;
359 e1000g_private_devi_list =
360 e1000g_private_devi_list->next;
361
362 kmem_free(devi_node->priv_dip,
363 sizeof (struct dev_info));
364 kmem_free(devi_node,
365 sizeof (private_devi_list_t));
366 }
367 mutex_exit(&e1000g_rx_detach_lock);
368 }
369
370 mutex_destroy(&e1000g_rx_detach_lock);
371 rw_destroy(&e1000g_dma_type_lock);
372 mutex_destroy(&e1000g_nvm_lock);
373 }
374
375 return (status);
376 }
377
378 /*
379 * _info - module information
380 */
381 int
382 _info(struct modinfo *modinfop)
383 {
384 return (mod_info(&modlinkage, modinfop));
385 }
386
387 /*
388 * e1000g_attach - driver attach
389 *
390 * This function is the device-specific initialization entry
391 * point. This entry point is required and must be written.
392 * The DDI_ATTACH command must be provided in the attach entry
393 * point. When attach() is called with cmd set to DDI_ATTACH,
394 * all normal kernel services (such as kmem_alloc(9F)) are
395 * available for use by the driver.
396 *
397 * The attach() function will be called once for each instance
398 * of the device on the system with cmd set to DDI_ATTACH.
399 * Until attach() succeeds, the only driver entry points which
400 * may be called are open(9E) and getinfo(9E).
401 */
402 static int
403 e1000g_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
404 {
405 struct e1000g *Adapter;
406 struct e1000_hw *hw;
407 struct e1000g_osdep *osdep;
408 int instance;
409
410 switch (cmd) {
411 default:
412 e1000g_log(NULL, CE_WARN,
413 "Unsupported command send to e1000g_attach... ");
414 return (DDI_FAILURE);
415
416 case DDI_RESUME:
417 return (e1000g_resume(devinfo));
418
419 case DDI_ATTACH:
420 break;
421 }
422
423 /*
424 * get device instance number
425 */
426 instance = ddi_get_instance(devinfo);
427
428 /*
429 * Allocate soft data structure
430 */
431 Adapter =
432 (struct e1000g *)kmem_zalloc(sizeof (*Adapter), KM_SLEEP);
433
434 Adapter->dip = devinfo;
435 Adapter->instance = instance;
436 Adapter->tx_ring->adapter = Adapter;
437 Adapter->rx_ring->adapter = Adapter;
438
439 hw = &Adapter->shared;
440 osdep = &Adapter->osdep;
441 hw->back = osdep;
442 osdep->adapter = Adapter;
443
444 ddi_set_driver_private(devinfo, (caddr_t)Adapter);
445
446 /*
447 * Initialize for fma support
448 */
449 (void) e1000g_get_prop(Adapter, "fm-capable",
450 0, 0x0f,
451 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
452 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE,
453 &Adapter->fm_capabilities);
454 e1000g_fm_init(Adapter);
455 Adapter->attach_progress |= ATTACH_PROGRESS_FMINIT;
456
457 /*
458 * PCI Configure
459 */
460 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
461 e1000g_log(Adapter, CE_WARN, "PCI configuration failed");
462 goto attach_fail;
463 }
464 Adapter->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
465
466 /*
467 * Setup hardware
468 */
469 if (e1000g_identify_hardware(Adapter) != DDI_SUCCESS) {
470 e1000g_log(Adapter, CE_WARN, "Identify hardware failed");
471 goto attach_fail;
472 }
473
474 /*
475 * Map in the device registers.
476 */
477 if (e1000g_regs_map(Adapter) != DDI_SUCCESS) {
478 e1000g_log(Adapter, CE_WARN, "Mapping registers failed");
479 goto attach_fail;
480 }
481 Adapter->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
482
483 /*
484 * Initialize driver parameters
485 */
486 if (e1000g_set_driver_params(Adapter) != DDI_SUCCESS) {
487 goto attach_fail;
488 }
489 Adapter->attach_progress |= ATTACH_PROGRESS_SETUP;
490
491 if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) {
492 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
493 goto attach_fail;
494 }
495
496 /*
497 * Disable ULP support
498 */
499 (void) e1000_disable_ulp_lpt_lp(hw, TRUE);
500
501 /*
502 * Initialize interrupts
503 */
504 if (e1000g_add_intrs(Adapter) != DDI_SUCCESS) {
505 e1000g_log(Adapter, CE_WARN, "Add interrupts failed");
506 goto attach_fail;
507 }
508 Adapter->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
509
510 /*
511 * Initialize mutex's for this device.
512 * Do this before enabling the interrupt handler and
513 * register the softint to avoid the condition where
514 * interrupt handler can try using uninitialized mutex
515 */
516 e1000g_init_locks(Adapter);
517 Adapter->attach_progress |= ATTACH_PROGRESS_LOCKS;
518
519 /*
520 * Initialize Driver Counters
521 */
522 if (e1000g_init_stats(Adapter) != DDI_SUCCESS) {
523 e1000g_log(Adapter, CE_WARN, "Init stats failed");
524 goto attach_fail;
525 }
526 Adapter->attach_progress |= ATTACH_PROGRESS_KSTATS;
527
528 /*
529 * Initialize chip hardware and software structures
530 */
531 rw_enter(&Adapter->chip_lock, RW_WRITER);
532 if (e1000g_init(Adapter) != DDI_SUCCESS) {
533 rw_exit(&Adapter->chip_lock);
534 e1000g_log(Adapter, CE_WARN, "Adapter initialization failed");
535 goto attach_fail;
536 }
537 rw_exit(&Adapter->chip_lock);
538 Adapter->attach_progress |= ATTACH_PROGRESS_INIT;
539
540 /*
541 * Register the driver to the MAC
542 */
543 if (e1000g_register_mac(Adapter) != DDI_SUCCESS) {
544 e1000g_log(Adapter, CE_WARN, "Register MAC failed");
545 goto attach_fail;
546 }
547 Adapter->attach_progress |= ATTACH_PROGRESS_MAC;
548
549 /*
550 * Now that mutex locks are initialized, and the chip is also
551 * initialized, enable interrupts.
552 */
553 if (e1000g_enable_intrs(Adapter) != DDI_SUCCESS) {
554 e1000g_log(Adapter, CE_WARN, "Enable DDI interrupts failed");
555 goto attach_fail;
556 }
557 Adapter->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
558
559 /*
560 * If e1000g_force_detach is enabled, in global private dip list,
561 * we will create a new entry, which maintains the priv_dip for DR
562 * supports after driver detached.
563 */
564 if (e1000g_force_detach) {
565 private_devi_list_t *devi_node;
566
567 Adapter->priv_dip =
568 kmem_zalloc(sizeof (struct dev_info), KM_SLEEP);
569 bcopy(DEVI(devinfo), DEVI(Adapter->priv_dip),
570 sizeof (struct dev_info));
571
572 devi_node =
573 kmem_zalloc(sizeof (private_devi_list_t), KM_SLEEP);
574
575 mutex_enter(&e1000g_rx_detach_lock);
576 devi_node->priv_dip = Adapter->priv_dip;
577 devi_node->flag = E1000G_PRIV_DEVI_ATTACH;
578 devi_node->pending_rx_count = 0;
579
580 Adapter->priv_devi_node = devi_node;
581
582 if (e1000g_private_devi_list == NULL) {
583 devi_node->prev = NULL;
584 devi_node->next = NULL;
585 e1000g_private_devi_list = devi_node;
586 } else {
587 devi_node->prev = NULL;
588 devi_node->next = e1000g_private_devi_list;
589 e1000g_private_devi_list->prev = devi_node;
590 e1000g_private_devi_list = devi_node;
591 }
592 mutex_exit(&e1000g_rx_detach_lock);
593 }
594
595 Adapter->e1000g_state = E1000G_INITIALIZED;
596 return (DDI_SUCCESS);
597
598 attach_fail:
599 e1000g_unattach(devinfo, Adapter);
600 return (DDI_FAILURE);
601 }
602
603 static int
604 e1000g_register_mac(struct e1000g *Adapter)
605 {
606 struct e1000_hw *hw = &Adapter->shared;
607 mac_register_t *mac;
608 int err;
609
610 if ((mac = mac_alloc(MAC_VERSION)) == NULL)
611 return (DDI_FAILURE);
612
613 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
614 mac->m_driver = Adapter;
615 mac->m_dip = Adapter->dip;
616 mac->m_src_addr = hw->mac.addr;
617 mac->m_callbacks = &e1000g_m_callbacks;
618 mac->m_min_sdu = 0;
619 mac->m_max_sdu = Adapter->default_mtu;
620 mac->m_margin = VLAN_TAGSZ;
621 mac->m_priv_props = e1000g_priv_props;
622 mac->m_v12n = MAC_VIRT_LEVEL1;
623
624 err = mac_register(mac, &Adapter->mh);
625 mac_free(mac);
626
627 return (err == 0 ? DDI_SUCCESS : DDI_FAILURE);
628 }
629
630 static int
631 e1000g_identify_hardware(struct e1000g *Adapter)
632 {
633 struct e1000_hw *hw = &Adapter->shared;
634 struct e1000g_osdep *osdep = &Adapter->osdep;
635
636 /* Get the device id */
637 hw->vendor_id =
638 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
639 hw->device_id =
640 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
641 hw->revision_id =
642 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
643 hw->subsystem_device_id =
644 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
645 hw->subsystem_vendor_id =
646 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
647
648 if (e1000_set_mac_type(hw) != E1000_SUCCESS) {
649 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
650 "MAC type could not be set properly.");
651 return (DDI_FAILURE);
652 }
653
654 return (DDI_SUCCESS);
655 }
656
657 static int
658 e1000g_regs_map(struct e1000g *Adapter)
659 {
660 dev_info_t *devinfo = Adapter->dip;
661 struct e1000_hw *hw = &Adapter->shared;
662 struct e1000g_osdep *osdep = &Adapter->osdep;
663 off_t mem_size;
664 bar_info_t bar_info;
665 int offset, rnumber;
666
667 rnumber = ADAPTER_REG_SET;
668 /* Get size of adapter register memory */
669 if (ddi_dev_regsize(devinfo, rnumber, &mem_size) !=
670 DDI_SUCCESS) {
671 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
672 "ddi_dev_regsize for registers failed");
673 return (DDI_FAILURE);
674 }
675
676 /* Map adapter register memory */
677 if ((ddi_regs_map_setup(devinfo, rnumber,
678 (caddr_t *)&hw->hw_addr, 0, mem_size, &e1000g_regs_acc_attr,
679 &osdep->reg_handle)) != DDI_SUCCESS) {
680 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
681 "ddi_regs_map_setup for registers failed");
682 goto regs_map_fail;
683 }
684
685 /* ICH needs to map flash memory */
686 switch (hw->mac.type) {
687 case e1000_ich8lan:
688 case e1000_ich9lan:
689 case e1000_ich10lan:
690 case e1000_pchlan:
691 case e1000_pch2lan:
692 case e1000_pch_lpt:
693 rnumber = ICH_FLASH_REG_SET;
694
695 /* get flash size */
696 if (ddi_dev_regsize(devinfo, rnumber,
697 &mem_size) != DDI_SUCCESS) {
698 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
699 "ddi_dev_regsize for ICH flash failed");
700 goto regs_map_fail;
701 }
702
703 /* map flash in */
704 if (ddi_regs_map_setup(devinfo, rnumber,
705 (caddr_t *)&hw->flash_address, 0,
706 mem_size, &e1000g_regs_acc_attr,
707 &osdep->ich_flash_handle) != DDI_SUCCESS) {
708 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
709 "ddi_regs_map_setup for ICH flash failed");
710 goto regs_map_fail;
711 }
712 break;
713 case e1000_pch_spt:
714 case e1000_pch_cnp:
715 /*
716 * On the SPT, the device flash is actually in BAR0, not a
717 * separate BAR. Therefore we end up setting the
718 * ich_flash_handle to be the same as the register handle.
719 * We mark the same to reduce the confusion in the other
720 * functions and macros. Though this does make the set up and
721 * tear-down path slightly more complicated.
722 */
723 osdep->ich_flash_handle = osdep->reg_handle;
724 hw->flash_address = hw->hw_addr;
725 default:
726 break;
727 }
728
729 /* map io space */
730 switch (hw->mac.type) {
731 case e1000_82544:
732 case e1000_82540:
733 case e1000_82545:
734 case e1000_82546:
735 case e1000_82541:
736 case e1000_82541_rev_2:
737 /* find the IO bar */
738 rnumber = -1;
739 for (offset = PCI_CONF_BASE1;
740 offset <= PCI_CONF_BASE5; offset += 4) {
741 if (e1000g_get_bar_info(devinfo, offset, &bar_info)
742 != DDI_SUCCESS)
743 continue;
744 if (bar_info.type == E1000G_BAR_IO) {
745 rnumber = bar_info.rnumber;
746 break;
747 }
748 }
749
750 if (rnumber < 0) {
751 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
752 "No io space is found");
753 goto regs_map_fail;
754 }
755
756 /* get io space size */
757 if (ddi_dev_regsize(devinfo, rnumber,
758 &mem_size) != DDI_SUCCESS) {
759 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
760 "ddi_dev_regsize for io space failed");
761 goto regs_map_fail;
762 }
763
764 /* map io space */
765 if ((ddi_regs_map_setup(devinfo, rnumber,
766 (caddr_t *)&hw->io_base, 0, mem_size,
767 &e1000g_regs_acc_attr,
768 &osdep->io_reg_handle)) != DDI_SUCCESS) {
769 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
770 "ddi_regs_map_setup for io space failed");
771 goto regs_map_fail;
772 }
773 break;
774 default:
775 hw->io_base = 0;
776 break;
777 }
778
779 return (DDI_SUCCESS);
780
781 regs_map_fail:
782 if (osdep->reg_handle != NULL)
783 ddi_regs_map_free(&osdep->reg_handle);
784 if (osdep->ich_flash_handle != NULL && hw->mac.type < e1000_pch_spt)
785 ddi_regs_map_free(&osdep->ich_flash_handle);
786 return (DDI_FAILURE);
787 }
788
789 static int
790 e1000g_set_driver_params(struct e1000g *Adapter)
791 {
792 struct e1000_hw *hw;
793
794 hw = &Adapter->shared;
795
796 /* Set MAC type and initialize hardware functions */
797 if (e1000_setup_init_funcs(hw, B_TRUE) != E1000_SUCCESS) {
798 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
799 "Could not setup hardware functions");
800 return (DDI_FAILURE);
801 }
802
803 /* Get bus information */
804 if (e1000_get_bus_info(hw) != E1000_SUCCESS) {
805 E1000G_DEBUGLOG_0(Adapter, CE_WARN,
806 "Could not get bus information");
807 return (DDI_FAILURE);
808 }
809
810 e1000_read_pci_cfg(hw, PCI_COMMAND_REGISTER, &hw->bus.pci_cmd_word);
811
812 hw->mac.autoneg_failed = B_TRUE;
813
814 /* Set the autoneg_wait_to_complete flag to B_FALSE */
815 hw->phy.autoneg_wait_to_complete = B_FALSE;
816
817 /* Adaptive IFS related changes */
818 hw->mac.adaptive_ifs = B_TRUE;
819
820 /* Enable phy init script for IGP phy of 82541/82547 */
821 if ((hw->mac.type == e1000_82547) ||
822 (hw->mac.type == e1000_82541) ||
823 (hw->mac.type == e1000_82547_rev_2) ||
824 (hw->mac.type == e1000_82541_rev_2))
825 e1000_init_script_state_82541(hw, B_TRUE);
826
827 /* Enable the TTL workaround for 82541/82547 */
828 e1000_set_ttl_workaround_state_82541(hw, B_TRUE);
829
830 #ifdef __sparc
831 Adapter->strip_crc = B_TRUE;
832 #else
833 Adapter->strip_crc = B_FALSE;
834 #endif
835
836 /* setup the maximum MTU size of the chip */
837 e1000g_setup_max_mtu(Adapter);
838
839 /* Get speed/duplex settings in conf file */
840 hw->mac.forced_speed_duplex = ADVERTISE_100_FULL;
841 hw->phy.autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
842 e1000g_force_speed_duplex(Adapter);
843
844 /* Get Jumbo Frames settings in conf file */
845 e1000g_get_max_frame_size(Adapter);
846
847 /* Get conf file properties */
848 e1000g_get_conf(Adapter);
849
850 /* enforce PCH limits */
851 e1000g_pch_limits(Adapter);
852
853 /* Set Rx/Tx buffer size */
854 e1000g_set_bufsize(Adapter);
855
856 /* Master Latency Timer */
857 Adapter->master_latency_timer = DEFAULT_MASTER_LATENCY_TIMER;
858
859 /* copper options */
860 if (hw->phy.media_type == e1000_media_type_copper) {
861 hw->phy.mdix = 0; /* AUTO_ALL_MODES */
862 hw->phy.disable_polarity_correction = B_FALSE;
863 hw->phy.ms_type = e1000_ms_hw_default; /* E1000_MASTER_SLAVE */
864 }
865
866 /* The initial link state should be "unknown" */
867 Adapter->link_state = LINK_STATE_UNKNOWN;
868
869 /* Initialize rx parameters */
870 Adapter->rx_intr_delay = DEFAULT_RX_INTR_DELAY;
871 Adapter->rx_intr_abs_delay = DEFAULT_RX_INTR_ABS_DELAY;
872
873 /* Initialize tx parameters */
874 Adapter->tx_intr_enable = DEFAULT_TX_INTR_ENABLE;
875 Adapter->tx_bcopy_thresh = DEFAULT_TX_BCOPY_THRESHOLD;
876 Adapter->tx_intr_delay = DEFAULT_TX_INTR_DELAY;
877 Adapter->tx_intr_abs_delay = DEFAULT_TX_INTR_ABS_DELAY;
878
879 /* Initialize rx parameters */
880 Adapter->rx_bcopy_thresh = DEFAULT_RX_BCOPY_THRESHOLD;
881
882 return (DDI_SUCCESS);
883 }
884
885 static void
886 e1000g_setup_max_mtu(struct e1000g *Adapter)
887 {
888 struct e1000_mac_info *mac = &Adapter->shared.mac;
889 struct e1000_phy_info *phy = &Adapter->shared.phy;
890
891 switch (mac->type) {
892 /* types that do not support jumbo frames */
893 case e1000_ich8lan:
894 case e1000_82573:
895 case e1000_82583:
896 Adapter->max_mtu = ETHERMTU;
897 break;
898 /* ich9 supports jumbo frames except on one phy type */
899 case e1000_ich9lan:
900 if (phy->type == e1000_phy_ife)
901 Adapter->max_mtu = ETHERMTU;
902 else
903 Adapter->max_mtu = MAXIMUM_MTU_9K;
904 break;
905 /* pch can do jumbo frames up to 4K */
906 case e1000_pchlan:
907 Adapter->max_mtu = MAXIMUM_MTU_4K;
908 break;
909 /* pch2 can do jumbo frames up to 9K */
910 case e1000_pch2lan:
911 case e1000_pch_lpt:
912 case e1000_pch_spt:
913 case e1000_pch_cnp:
914 Adapter->max_mtu = MAXIMUM_MTU_9K;
915 break;
916 /* types with a special limit */
917 case e1000_82571:
918 case e1000_82572:
919 case e1000_82574:
920 case e1000_80003es2lan:
921 case e1000_ich10lan:
922 if (e1000g_jumbo_mtu >= ETHERMTU &&
923 e1000g_jumbo_mtu <= MAXIMUM_MTU_9K) {
924 Adapter->max_mtu = e1000g_jumbo_mtu;
925 } else {
926 Adapter->max_mtu = MAXIMUM_MTU_9K;
927 }
928 break;
929 /* default limit is 16K */
930 default:
931 Adapter->max_mtu = FRAME_SIZE_UPTO_16K -
932 sizeof (struct ether_vlan_header) - ETHERFCSL;
933 break;
934 }
935 }
936
937 static void
938 e1000g_set_bufsize(struct e1000g *Adapter)
939 {
940 struct e1000_mac_info *mac = &Adapter->shared.mac;
941 uint64_t rx_size;
942 uint64_t tx_size;
943
944 dev_info_t *devinfo = Adapter->dip;
945 #ifdef __sparc
946 ulong_t iommu_pagesize;
947 #endif
948 /* Get the system page size */
949 Adapter->sys_page_sz = ddi_ptob(devinfo, (ulong_t)1);
950
951 #ifdef __sparc
952 iommu_pagesize = dvma_pagesize(devinfo);
953 if (iommu_pagesize != 0) {
954 if (Adapter->sys_page_sz == iommu_pagesize) {
955 if (iommu_pagesize > 0x4000)
956 Adapter->sys_page_sz = 0x4000;
957 } else {
958 if (Adapter->sys_page_sz > iommu_pagesize)
959 Adapter->sys_page_sz = iommu_pagesize;
960 }
961 }
962 if (Adapter->lso_enable) {
963 Adapter->dvma_page_num = E1000_LSO_MAXLEN /
964 Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM;
965 } else {
966 Adapter->dvma_page_num = Adapter->max_frame_size /
967 Adapter->sys_page_sz + E1000G_DEFAULT_DVMA_PAGE_NUM;
968 }
969 ASSERT(Adapter->dvma_page_num >= E1000G_DEFAULT_DVMA_PAGE_NUM);
970 #endif
971
972 Adapter->min_frame_size = ETHERMIN + ETHERFCSL;
973
974 if (Adapter->mem_workaround_82546 &&
975 ((mac->type == e1000_82545) ||
976 (mac->type == e1000_82546) ||
977 (mac->type == e1000_82546_rev_3))) {
978 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K;
979 } else {
980 rx_size = Adapter->max_frame_size;
981 if ((rx_size > FRAME_SIZE_UPTO_2K) &&
982 (rx_size <= FRAME_SIZE_UPTO_4K))
983 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_4K;
984 else if ((rx_size > FRAME_SIZE_UPTO_4K) &&
985 (rx_size <= FRAME_SIZE_UPTO_8K))
986 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_8K;
987 else if ((rx_size > FRAME_SIZE_UPTO_8K) &&
988 (rx_size <= FRAME_SIZE_UPTO_16K))
989 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_16K;
990 else
991 Adapter->rx_buffer_size = E1000_RX_BUFFER_SIZE_2K;
992 }
993 Adapter->rx_buffer_size += E1000G_IPALIGNROOM;
994
995 tx_size = Adapter->max_frame_size;
996 if ((tx_size > FRAME_SIZE_UPTO_2K) && (tx_size <= FRAME_SIZE_UPTO_4K))
997 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_4K;
998 else if ((tx_size > FRAME_SIZE_UPTO_4K) &&
999 (tx_size <= FRAME_SIZE_UPTO_8K))
1000 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_8K;
1001 else if ((tx_size > FRAME_SIZE_UPTO_8K) &&
1002 (tx_size <= FRAME_SIZE_UPTO_16K))
1003 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_16K;
1004 else
1005 Adapter->tx_buffer_size = E1000_TX_BUFFER_SIZE_2K;
1006
1007 /*
1008 * For Wiseman adapters we have an requirement of having receive
1009 * buffers aligned at 256 byte boundary. Since Livengood does not
1010 * require this and forcing it for all hardwares will have
1011 * performance implications, I am making it applicable only for
1012 * Wiseman and for Jumbo frames enabled mode as rest of the time,
1013 * it is okay to have normal frames...but it does involve a
1014 * potential risk where we may loose data if buffer is not
1015 * aligned...so all wiseman boards to have 256 byte aligned
1016 * buffers
1017 */
1018 if (mac->type < e1000_82543)
1019 Adapter->rx_buf_align = RECEIVE_BUFFER_ALIGN_SIZE;
1020 else
1021 Adapter->rx_buf_align = 1;
1022 }
1023
1024 /*
1025 * e1000g_detach - driver detach
1026 *
1027 * The detach() function is the complement of the attach routine.
1028 * If cmd is set to DDI_DETACH, detach() is used to remove the
1029 * state associated with a given instance of a device node
1030 * prior to the removal of that instance from the system.
1031 *
1032 * The detach() function will be called once for each instance
1033 * of the device for which there has been a successful attach()
1034 * once there are no longer any opens on the device.
1035 *
1036 * Interrupts routine are disabled, All memory allocated by this
1037 * driver are freed.
1038 */
1039 static int
1040 e1000g_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
1041 {
1042 struct e1000g *Adapter;
1043 boolean_t rx_drain;
1044
1045 switch (cmd) {
1046 default:
1047 return (DDI_FAILURE);
1048
1049 case DDI_SUSPEND:
1050 return (e1000g_suspend(devinfo));
1051
1052 case DDI_DETACH:
1053 break;
1054 }
1055
1056 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
1057 if (Adapter == NULL)
1058 return (DDI_FAILURE);
1059
1060 rx_drain = e1000g_rx_drain(Adapter);
1061 if (!rx_drain && !e1000g_force_detach)
1062 return (DDI_FAILURE);
1063
1064 if (mac_unregister(Adapter->mh) != 0) {
1065 e1000g_log(Adapter, CE_WARN, "Unregister MAC failed");
1066 return (DDI_FAILURE);
1067 }
1068 Adapter->attach_progress &= ~ATTACH_PROGRESS_MAC;
1069
1070 ASSERT(!(Adapter->e1000g_state & E1000G_STARTED));
1071
1072 if (!e1000g_force_detach && !rx_drain)
1073 return (DDI_FAILURE);
1074
1075 e1000g_unattach(devinfo, Adapter);
1076
1077 return (DDI_SUCCESS);
1078 }
1079
1080 /*
1081 * e1000g_free_priv_devi_node - free a priv_dip entry for driver instance
1082 */
1083 void
1084 e1000g_free_priv_devi_node(private_devi_list_t *devi_node)
1085 {
1086 ASSERT(e1000g_private_devi_list != NULL);
1087 ASSERT(devi_node != NULL);
1088
1089 if (devi_node->prev != NULL)
1090 devi_node->prev->next = devi_node->next;
1091 if (devi_node->next != NULL)
1092 devi_node->next->prev = devi_node->prev;
1093 if (devi_node == e1000g_private_devi_list)
1094 e1000g_private_devi_list = devi_node->next;
1095
1096 kmem_free(devi_node->priv_dip,
1097 sizeof (struct dev_info));
1098 kmem_free(devi_node,
1099 sizeof (private_devi_list_t));
1100 }
1101
1102 static void
1103 e1000g_unattach(dev_info_t *devinfo, struct e1000g *Adapter)
1104 {
1105 private_devi_list_t *devi_node;
1106 int result;
1107
1108 if (Adapter->e1000g_blink != NULL) {
1109 ddi_periodic_delete(Adapter->e1000g_blink);
1110 Adapter->e1000g_blink = NULL;
1111 }
1112
1113 if (Adapter->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
1114 (void) e1000g_disable_intrs(Adapter);
1115 }
1116
1117 if (Adapter->attach_progress & ATTACH_PROGRESS_MAC) {
1118 (void) mac_unregister(Adapter->mh);
1119 }
1120
1121 if (Adapter->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
1122 (void) e1000g_rem_intrs(Adapter);
1123 }
1124
1125 if (Adapter->attach_progress & ATTACH_PROGRESS_SETUP) {
1126 (void) ddi_prop_remove_all(devinfo);
1127 }
1128
1129 if (Adapter->attach_progress & ATTACH_PROGRESS_KSTATS) {
1130 kstat_delete((kstat_t *)Adapter->e1000g_ksp);
1131 }
1132
1133 if (Adapter->attach_progress & ATTACH_PROGRESS_INIT) {
1134 stop_link_timer(Adapter);
1135
1136 mutex_enter(&e1000g_nvm_lock);
1137 result = e1000_reset_hw(&Adapter->shared);
1138 mutex_exit(&e1000g_nvm_lock);
1139
1140 if (result != E1000_SUCCESS) {
1141 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1142 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1143 }
1144 }
1145
1146 e1000g_release_multicast(Adapter);
1147
1148 if (Adapter->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
1149 if (Adapter->osdep.reg_handle != NULL)
1150 ddi_regs_map_free(&Adapter->osdep.reg_handle);
1151 if (Adapter->osdep.ich_flash_handle != NULL &&
1152 Adapter->shared.mac.type < e1000_pch_spt)
1153 ddi_regs_map_free(&Adapter->osdep.ich_flash_handle);
1154 if (Adapter->osdep.io_reg_handle != NULL)
1155 ddi_regs_map_free(&Adapter->osdep.io_reg_handle);
1156 }
1157
1158 if (Adapter->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
1159 if (Adapter->osdep.cfg_handle != NULL)
1160 pci_config_teardown(&Adapter->osdep.cfg_handle);
1161 }
1162
1163 if (Adapter->attach_progress & ATTACH_PROGRESS_LOCKS) {
1164 e1000g_destroy_locks(Adapter);
1165 }
1166
1167 if (Adapter->attach_progress & ATTACH_PROGRESS_FMINIT) {
1168 e1000g_fm_fini(Adapter);
1169 }
1170
1171 mutex_enter(&e1000g_rx_detach_lock);
1172 if (e1000g_force_detach && (Adapter->priv_devi_node != NULL)) {
1173 devi_node = Adapter->priv_devi_node;
1174 devi_node->flag |= E1000G_PRIV_DEVI_DETACH;
1175
1176 if (devi_node->pending_rx_count == 0) {
1177 e1000g_free_priv_devi_node(devi_node);
1178 }
1179 }
1180 mutex_exit(&e1000g_rx_detach_lock);
1181
1182 kmem_free((caddr_t)Adapter, sizeof (struct e1000g));
1183
1184 /*
1185 * Another hotplug spec requirement,
1186 * run ddi_set_driver_private(devinfo, null);
1187 */
1188 ddi_set_driver_private(devinfo, NULL);
1189 }
1190
1191 /*
1192 * Get the BAR type and rnumber for a given PCI BAR offset
1193 */
1194 static int
1195 e1000g_get_bar_info(dev_info_t *dip, int bar_offset, bar_info_t *bar_info)
1196 {
1197 pci_regspec_t *regs;
1198 uint_t regs_length;
1199 int type, rnumber, rcount;
1200
1201 ASSERT((bar_offset >= PCI_CONF_BASE0) &&
1202 (bar_offset <= PCI_CONF_BASE5));
1203
1204 /*
1205 * Get the DDI "reg" property
1206 */
1207 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
1208 DDI_PROP_DONTPASS, "reg", (int **)®s,
1209 ®s_length) != DDI_PROP_SUCCESS) {
1210 return (DDI_FAILURE);
1211 }
1212
1213 rcount = regs_length * sizeof (int) / sizeof (pci_regspec_t);
1214 /*
1215 * Check the BAR offset
1216 */
1217 for (rnumber = 0; rnumber < rcount; ++rnumber) {
1218 if (PCI_REG_REG_G(regs[rnumber].pci_phys_hi) == bar_offset) {
1219 type = regs[rnumber].pci_phys_hi & PCI_ADDR_MASK;
1220 break;
1221 }
1222 }
1223
1224 ddi_prop_free(regs);
1225
1226 if (rnumber >= rcount)
1227 return (DDI_FAILURE);
1228
1229 switch (type) {
1230 case PCI_ADDR_CONFIG:
1231 bar_info->type = E1000G_BAR_CONFIG;
1232 break;
1233 case PCI_ADDR_IO:
1234 bar_info->type = E1000G_BAR_IO;
1235 break;
1236 case PCI_ADDR_MEM32:
1237 bar_info->type = E1000G_BAR_MEM32;
1238 break;
1239 case PCI_ADDR_MEM64:
1240 bar_info->type = E1000G_BAR_MEM64;
1241 break;
1242 default:
1243 return (DDI_FAILURE);
1244 }
1245 bar_info->rnumber = rnumber;
1246 return (DDI_SUCCESS);
1247 }
1248
1249 static void
1250 e1000g_init_locks(struct e1000g *Adapter)
1251 {
1252 e1000g_tx_ring_t *tx_ring;
1253 e1000g_rx_ring_t *rx_ring;
1254
1255 rw_init(&Adapter->chip_lock, NULL,
1256 RW_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1257 mutex_init(&Adapter->link_lock, NULL,
1258 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1259 mutex_init(&Adapter->watchdog_lock, NULL,
1260 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1261
1262 tx_ring = Adapter->tx_ring;
1263
1264 mutex_init(&tx_ring->tx_lock, NULL,
1265 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1266 mutex_init(&tx_ring->usedlist_lock, NULL,
1267 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1268 mutex_init(&tx_ring->freelist_lock, NULL,
1269 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1270
1271 rx_ring = Adapter->rx_ring;
1272
1273 mutex_init(&rx_ring->rx_lock, NULL,
1274 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1275
1276 mutex_init(&Adapter->e1000g_led_lock, NULL,
1277 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1278 }
1279
1280 static void
1281 e1000g_destroy_locks(struct e1000g *Adapter)
1282 {
1283 e1000g_tx_ring_t *tx_ring;
1284 e1000g_rx_ring_t *rx_ring;
1285
1286 mutex_destroy(&Adapter->e1000g_led_lock);
1287
1288 tx_ring = Adapter->tx_ring;
1289 mutex_destroy(&tx_ring->tx_lock);
1290 mutex_destroy(&tx_ring->usedlist_lock);
1291 mutex_destroy(&tx_ring->freelist_lock);
1292
1293 rx_ring = Adapter->rx_ring;
1294 mutex_destroy(&rx_ring->rx_lock);
1295
1296 mutex_destroy(&Adapter->link_lock);
1297 mutex_destroy(&Adapter->watchdog_lock);
1298 rw_destroy(&Adapter->chip_lock);
1299
1300 /* destory mutex initialized in shared code */
1301 e1000_destroy_hw_mutex(&Adapter->shared);
1302 }
1303
1304 static int
1305 e1000g_resume(dev_info_t *devinfo)
1306 {
1307 struct e1000g *Adapter;
1308
1309 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
1310 if (Adapter == NULL)
1311 e1000g_log(Adapter, CE_PANIC,
1312 "Instance pointer is null\n");
1313
1314 if (Adapter->dip != devinfo)
1315 e1000g_log(Adapter, CE_PANIC,
1316 "Devinfo is not the same as saved devinfo\n");
1317
1318 rw_enter(&Adapter->chip_lock, RW_WRITER);
1319
1320 if (Adapter->e1000g_state & E1000G_STARTED) {
1321 if (e1000g_start(Adapter, B_FALSE) != DDI_SUCCESS) {
1322 rw_exit(&Adapter->chip_lock);
1323 /*
1324 * We note the failure, but return success, as the
1325 * system is still usable without this controller.
1326 */
1327 e1000g_log(Adapter, CE_WARN,
1328 "e1000g_resume: failed to restart controller\n");
1329 return (DDI_SUCCESS);
1330 }
1331 /* Enable and start the watchdog timer */
1332 enable_watchdog_timer(Adapter);
1333 }
1334
1335 Adapter->e1000g_state &= ~E1000G_SUSPENDED;
1336
1337 rw_exit(&Adapter->chip_lock);
1338
1339 return (DDI_SUCCESS);
1340 }
1341
1342 static int
1343 e1000g_suspend(dev_info_t *devinfo)
1344 {
1345 struct e1000g *Adapter;
1346
1347 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
1348 if (Adapter == NULL)
1349 return (DDI_FAILURE);
1350
1351 rw_enter(&Adapter->chip_lock, RW_WRITER);
1352
1353 Adapter->e1000g_state |= E1000G_SUSPENDED;
1354
1355 /* if the port isn't plumbed, we can simply return */
1356 if (!(Adapter->e1000g_state & E1000G_STARTED)) {
1357 rw_exit(&Adapter->chip_lock);
1358 return (DDI_SUCCESS);
1359 }
1360
1361 e1000g_stop(Adapter, B_FALSE);
1362
1363 rw_exit(&Adapter->chip_lock);
1364
1365 /* Disable and stop all the timers */
1366 disable_watchdog_timer(Adapter);
1367 stop_link_timer(Adapter);
1368 stop_82547_timer(Adapter->tx_ring);
1369
1370 return (DDI_SUCCESS);
1371 }
1372
1373 static int
1374 e1000g_init(struct e1000g *Adapter)
1375 {
1376 uint32_t pba;
1377 uint32_t high_water;
1378 struct e1000_hw *hw;
1379 clock_t link_timeout;
1380 int result;
1381
1382 hw = &Adapter->shared;
1383
1384 /*
1385 * reset to put the hardware in a known state
1386 * before we try to do anything with the eeprom
1387 */
1388 mutex_enter(&e1000g_nvm_lock);
1389 result = e1000_reset_hw(hw);
1390 mutex_exit(&e1000g_nvm_lock);
1391
1392 if (result != E1000_SUCCESS) {
1393 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1394 goto init_fail;
1395 }
1396
1397 mutex_enter(&e1000g_nvm_lock);
1398 result = e1000_validate_nvm_checksum(hw);
1399 if (result < E1000_SUCCESS) {
1400 /*
1401 * Some PCI-E parts fail the first check due to
1402 * the link being in sleep state. Call it again,
1403 * if it fails a second time its a real issue.
1404 */
1405 result = e1000_validate_nvm_checksum(hw);
1406 }
1407 mutex_exit(&e1000g_nvm_lock);
1408
1409 if (result < E1000_SUCCESS) {
1410 e1000g_log(Adapter, CE_WARN,
1411 "Invalid NVM checksum. Please contact "
1412 "the vendor to update the NVM.");
1413 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1414 goto init_fail;
1415 }
1416
1417 result = 0;
1418 #ifdef __sparc
1419 /*
1420 * First, we try to get the local ethernet address from OBP. If
1421 * failed, then we get it from the EEPROM of NIC card.
1422 */
1423 result = e1000g_find_mac_address(Adapter);
1424 #endif
1425 /* Get the local ethernet address. */
1426 if (!result) {
1427 mutex_enter(&e1000g_nvm_lock);
1428 result = e1000_read_mac_addr(hw);
1429 mutex_exit(&e1000g_nvm_lock);
1430 }
1431
1432 if (result < E1000_SUCCESS) {
1433 e1000g_log(Adapter, CE_WARN, "Read mac addr failed");
1434 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1435 goto init_fail;
1436 }
1437
1438 /* check for valid mac address */
1439 if (!is_valid_mac_addr(hw->mac.addr)) {
1440 e1000g_log(Adapter, CE_WARN, "Invalid mac addr");
1441 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1442 goto init_fail;
1443 }
1444
1445 /* Set LAA state for 82571 chipset */
1446 e1000_set_laa_state_82571(hw, B_TRUE);
1447
1448 /* Master Latency Timer implementation */
1449 if (Adapter->master_latency_timer) {
1450 pci_config_put8(Adapter->osdep.cfg_handle,
1451 PCI_CONF_LATENCY_TIMER, Adapter->master_latency_timer);
1452 }
1453
1454 if (hw->mac.type < e1000_82547) {
1455 /*
1456 * Total FIFO is 64K
1457 */
1458 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1459 pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
1460 else
1461 pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */
1462 } else if ((hw->mac.type == e1000_82571) ||
1463 (hw->mac.type == e1000_82572) ||
1464 (hw->mac.type == e1000_80003es2lan)) {
1465 /*
1466 * Total FIFO is 48K
1467 */
1468 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1469 pba = E1000_PBA_30K; /* 30K for Rx, 18K for Tx */
1470 else
1471 pba = E1000_PBA_38K; /* 38K for Rx, 10K for Tx */
1472 } else if (hw->mac.type == e1000_82573) {
1473 pba = E1000_PBA_20K; /* 20K for Rx, 12K for Tx */
1474 } else if (hw->mac.type == e1000_82574) {
1475 /* Keep adapter default: 20K for Rx, 20K for Tx */
1476 pba = E1000_READ_REG(hw, E1000_PBA);
1477 } else if (hw->mac.type == e1000_ich8lan) {
1478 pba = E1000_PBA_8K; /* 8K for Rx, 12K for Tx */
1479 } else if (hw->mac.type == e1000_ich9lan) {
1480 pba = E1000_PBA_10K;
1481 } else if (hw->mac.type == e1000_ich10lan) {
1482 pba = E1000_PBA_10K;
1483 } else if (hw->mac.type == e1000_pchlan) {
1484 pba = E1000_PBA_26K;
1485 } else if (hw->mac.type == e1000_pch2lan) {
1486 pba = E1000_PBA_26K;
1487 } else if (hw->mac.type == e1000_pch_lpt) {
1488 pba = E1000_PBA_26K;
1489 } else if (hw->mac.type == e1000_pch_spt) {
1490 pba = E1000_PBA_26K;
1491 } else if (hw->mac.type == e1000_pch_cnp) {
1492 pba = E1000_PBA_26K;
1493 } else {
1494 /*
1495 * Total FIFO is 40K
1496 */
1497 if (Adapter->max_frame_size > FRAME_SIZE_UPTO_8K)
1498 pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */
1499 else
1500 pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */
1501 }
1502 E1000_WRITE_REG(hw, E1000_PBA, pba);
1503
1504 /*
1505 * These parameters set thresholds for the adapter's generation(Tx)
1506 * and response(Rx) to Ethernet PAUSE frames. These are just threshold
1507 * settings. Flow control is enabled or disabled in the configuration
1508 * file.
1509 * High-water mark is set down from the top of the rx fifo (not
1510 * sensitive to max_frame_size) and low-water is set just below
1511 * high-water mark.
1512 * The high water mark must be low enough to fit one full frame above
1513 * it in the rx FIFO. Should be the lower of:
1514 * 90% of the Rx FIFO size and the full Rx FIFO size minus the early
1515 * receive size (assuming ERT set to E1000_ERT_2048), or the full
1516 * Rx FIFO size minus one full frame.
1517 */
1518 high_water = min(((pba << 10) * 9 / 10),
1519 ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_82574 ||
1520 hw->mac.type == e1000_ich9lan || hw->mac.type == e1000_ich10lan) ?
1521 ((pba << 10) - (E1000_ERT_2048 << 3)) :
1522 ((pba << 10) - Adapter->max_frame_size)));
1523
1524 hw->fc.high_water = high_water & 0xFFF8;
1525 hw->fc.low_water = hw->fc.high_water - 8;
1526
1527 if (hw->mac.type == e1000_80003es2lan)
1528 hw->fc.pause_time = 0xFFFF;
1529 else
1530 hw->fc.pause_time = E1000_FC_PAUSE_TIME;
1531 hw->fc.send_xon = B_TRUE;
1532
1533 /*
1534 * Reset the adapter hardware the second time.
1535 */
1536 mutex_enter(&e1000g_nvm_lock);
1537 result = e1000_reset_hw(hw);
1538 mutex_exit(&e1000g_nvm_lock);
1539
1540 if (result != E1000_SUCCESS) {
1541 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1542 goto init_fail;
1543 }
1544
1545 /* disable wakeup control by default */
1546 if (hw->mac.type >= e1000_82544)
1547 E1000_WRITE_REG(hw, E1000_WUC, 0);
1548
1549 /*
1550 * MWI should be disabled on 82546.
1551 */
1552 if (hw->mac.type == e1000_82546)
1553 e1000_pci_clear_mwi(hw);
1554 else
1555 e1000_pci_set_mwi(hw);
1556
1557 /*
1558 * Configure/Initialize hardware
1559 */
1560 mutex_enter(&e1000g_nvm_lock);
1561 result = e1000_init_hw(hw);
1562 mutex_exit(&e1000g_nvm_lock);
1563
1564 if (result < E1000_SUCCESS) {
1565 e1000g_log(Adapter, CE_WARN, "Initialize hw failed");
1566 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
1567 goto init_fail;
1568 }
1569
1570 /*
1571 * Restore LED settings to the default from EEPROM
1572 * to meet the standard for Sun platforms.
1573 */
1574 (void) e1000_cleanup_led(hw);
1575
1576 /* Disable Smart Power Down */
1577 phy_spd_state(hw, B_FALSE);
1578
1579 /* Make sure driver has control */
1580 e1000g_get_driver_control(hw);
1581
1582 /*
1583 * Initialize unicast addresses.
1584 */
1585 e1000g_init_unicst(Adapter);
1586
1587 /*
1588 * Setup and initialize the mctable structures. After this routine
1589 * completes Multicast table will be set
1590 */
1591 e1000_update_mc_addr_list(hw,
1592 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count);
1593 msec_delay(5);
1594
1595 /*
1596 * Implement Adaptive IFS
1597 */
1598 e1000_reset_adaptive(hw);
1599
1600 /* Setup Interrupt Throttling Register */
1601 if (hw->mac.type >= e1000_82540) {
1602 E1000_WRITE_REG(hw, E1000_ITR, Adapter->intr_throttling_rate);
1603 } else
1604 Adapter->intr_adaptive = B_FALSE;
1605
1606 /* Start the timer for link setup */
1607 if (hw->mac.autoneg)
1608 link_timeout = PHY_AUTO_NEG_LIMIT * drv_usectohz(100000);
1609 else
1610 link_timeout = PHY_FORCE_LIMIT * drv_usectohz(100000);
1611
1612 mutex_enter(&Adapter->link_lock);
1613 if (hw->phy.autoneg_wait_to_complete) {
1614 Adapter->link_complete = B_TRUE;
1615 } else {
1616 Adapter->link_complete = B_FALSE;
1617 Adapter->link_tid = timeout(e1000g_link_timer,
1618 (void *)Adapter, link_timeout);
1619 }
1620 mutex_exit(&Adapter->link_lock);
1621
1622 /* Save the state of the phy */
1623 e1000g_get_phy_state(Adapter);
1624
1625 e1000g_param_sync(Adapter);
1626
1627 Adapter->init_count++;
1628
1629 if (e1000g_check_acc_handle(Adapter->osdep.cfg_handle) != DDI_FM_OK) {
1630 goto init_fail;
1631 }
1632 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
1633 goto init_fail;
1634 }
1635
1636 Adapter->poll_mode = e1000g_poll_mode;
1637
1638 return (DDI_SUCCESS);
1639
1640 init_fail:
1641 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1642 return (DDI_FAILURE);
1643 }
1644
1645 static int
1646 e1000g_alloc_rx_data(struct e1000g *Adapter)
1647 {
1648 e1000g_rx_ring_t *rx_ring;
1649 e1000g_rx_data_t *rx_data;
1650
1651 rx_ring = Adapter->rx_ring;
1652
1653 rx_data = kmem_zalloc(sizeof (e1000g_rx_data_t), KM_NOSLEEP);
1654
1655 if (rx_data == NULL)
1656 return (DDI_FAILURE);
1657
1658 rx_data->priv_devi_node = Adapter->priv_devi_node;
1659 rx_data->rx_ring = rx_ring;
1660
1661 mutex_init(&rx_data->freelist_lock, NULL,
1662 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1663 mutex_init(&rx_data->recycle_lock, NULL,
1664 MUTEX_DRIVER, DDI_INTR_PRI(Adapter->intr_pri));
1665
1666 rx_ring->rx_data = rx_data;
1667
1668 return (DDI_SUCCESS);
1669 }
1670
1671 void
1672 e1000g_free_rx_pending_buffers(e1000g_rx_data_t *rx_data)
1673 {
1674 rx_sw_packet_t *packet, *next_packet;
1675
1676 if (rx_data == NULL)
1677 return;
1678
1679 packet = rx_data->packet_area;
1680 while (packet != NULL) {
1681 next_packet = packet->next;
1682 e1000g_free_rx_sw_packet(packet, B_TRUE);
1683 packet = next_packet;
1684 }
1685 rx_data->packet_area = NULL;
1686 }
1687
1688 void
1689 e1000g_free_rx_data(e1000g_rx_data_t *rx_data)
1690 {
1691 if (rx_data == NULL)
1692 return;
1693
1694 mutex_destroy(&rx_data->freelist_lock);
1695 mutex_destroy(&rx_data->recycle_lock);
1696
1697 kmem_free(rx_data, sizeof (e1000g_rx_data_t));
1698 }
1699
1700 /*
1701 * Check if the link is up
1702 */
1703 static boolean_t
1704 e1000g_link_up(struct e1000g *Adapter)
1705 {
1706 struct e1000_hw *hw = &Adapter->shared;
1707 boolean_t link_up = B_FALSE;
1708
1709 /*
1710 * get_link_status is set in the interrupt handler on link-status-change
1711 * or rx sequence error interrupt. get_link_status will stay
1712 * false until the e1000_check_for_link establishes link only
1713 * for copper adapters.
1714 */
1715 switch (hw->phy.media_type) {
1716 case e1000_media_type_copper:
1717 if (hw->mac.get_link_status) {
1718 /*
1719 * SPT and newer devices need a bit of extra time before
1720 * we ask them.
1721 */
1722 if (hw->mac.type >= e1000_pch_spt)
1723 msec_delay(50);
1724 (void) e1000_check_for_link(hw);
1725 if ((E1000_READ_REG(hw, E1000_STATUS) &
1726 E1000_STATUS_LU)) {
1727 link_up = B_TRUE;
1728 } else {
1729 link_up = !hw->mac.get_link_status;
1730 }
1731 } else {
1732 link_up = B_TRUE;
1733 }
1734 break;
1735 case e1000_media_type_fiber:
1736 (void) e1000_check_for_link(hw);
1737 link_up = (E1000_READ_REG(hw, E1000_STATUS) &
1738 E1000_STATUS_LU);
1739 break;
1740 case e1000_media_type_internal_serdes:
1741 (void) e1000_check_for_link(hw);
1742 link_up = hw->mac.serdes_has_link;
1743 break;
1744 }
1745
1746 return (link_up);
1747 }
1748
1749 static void
1750 e1000g_m_ioctl(void *arg, queue_t *q, mblk_t *mp)
1751 {
1752 struct iocblk *iocp;
1753 struct e1000g *e1000gp;
1754 enum ioc_reply status;
1755
1756 iocp = (struct iocblk *)(uintptr_t)mp->b_rptr;
1757 iocp->ioc_error = 0;
1758 e1000gp = (struct e1000g *)arg;
1759
1760 ASSERT(e1000gp);
1761 if (e1000gp == NULL) {
1762 miocnak(q, mp, 0, EINVAL);
1763 return;
1764 }
1765
1766 rw_enter(&e1000gp->chip_lock, RW_READER);
1767 if (e1000gp->e1000g_state & E1000G_SUSPENDED) {
1768 rw_exit(&e1000gp->chip_lock);
1769 miocnak(q, mp, 0, EINVAL);
1770 return;
1771 }
1772 rw_exit(&e1000gp->chip_lock);
1773
1774 switch (iocp->ioc_cmd) {
1775
1776 case LB_GET_INFO_SIZE:
1777 case LB_GET_INFO:
1778 case LB_GET_MODE:
1779 case LB_SET_MODE:
1780 status = e1000g_loopback_ioctl(e1000gp, iocp, mp);
1781 break;
1782
1783
1784 #ifdef E1000G_DEBUG
1785 case E1000G_IOC_REG_PEEK:
1786 case E1000G_IOC_REG_POKE:
1787 status = e1000g_pp_ioctl(e1000gp, iocp, mp);
1788 break;
1789 case E1000G_IOC_CHIP_RESET:
1790 e1000gp->reset_count++;
1791 if (e1000g_reset_adapter(e1000gp))
1792 status = IOC_ACK;
1793 else
1794 status = IOC_INVAL;
1795 break;
1796 #endif
1797 default:
1798 status = IOC_INVAL;
1799 break;
1800 }
1801
1802 /*
1803 * Decide how to reply
1804 */
1805 switch (status) {
1806 default:
1807 case IOC_INVAL:
1808 /*
1809 * Error, reply with a NAK and EINVAL or the specified error
1810 */
1811 miocnak(q, mp, 0, iocp->ioc_error == 0 ?
1812 EINVAL : iocp->ioc_error);
1813 break;
1814
1815 case IOC_DONE:
1816 /*
1817 * OK, reply already sent
1818 */
1819 break;
1820
1821 case IOC_ACK:
1822 /*
1823 * OK, reply with an ACK
1824 */
1825 miocack(q, mp, 0, 0);
1826 break;
1827
1828 case IOC_REPLY:
1829 /*
1830 * OK, send prepared reply as ACK or NAK
1831 */
1832 mp->b_datap->db_type = iocp->ioc_error == 0 ?
1833 M_IOCACK : M_IOCNAK;
1834 qreply(q, mp);
1835 break;
1836 }
1837 }
1838
1839 /*
1840 * The default value of e1000g_poll_mode == 0 assumes that the NIC is
1841 * capable of supporting only one interrupt and we shouldn't disable
1842 * the physical interrupt. In this case we let the interrupt come and
1843 * we queue the packets in the rx ring itself in case we are in polling
1844 * mode (better latency but slightly lower performance and a very
1845 * high intrrupt count in mpstat which is harmless).
1846 *
1847 * e1000g_poll_mode == 1 assumes that we have per Rx ring interrupt
1848 * which can be disabled in poll mode. This gives better overall
1849 * throughput (compared to the mode above), shows very low interrupt
1850 * count but has slightly higher latency since we pick the packets when
1851 * the poll thread does polling.
1852 *
1853 * Currently, this flag should be enabled only while doing performance
1854 * measurement or when it can be guaranteed that entire NIC going
1855 * in poll mode will not harm any traffic like cluster heartbeat etc.
1856 */
1857 int e1000g_poll_mode = 0;
1858
1859 /*
1860 * Called from the upper layers when driver is in polling mode to
1861 * pick up any queued packets. Care should be taken to not block
1862 * this thread.
1863 */
1864 static mblk_t *e1000g_poll_ring(void *arg, int bytes_to_pickup)
1865 {
1866 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)arg;
1867 mblk_t *mp = NULL;
1868 mblk_t *tail;
1869 struct e1000g *adapter;
1870
1871 adapter = rx_ring->adapter;
1872
1873 rw_enter(&adapter->chip_lock, RW_READER);
1874
1875 if (adapter->e1000g_state & E1000G_SUSPENDED) {
1876 rw_exit(&adapter->chip_lock);
1877 return (NULL);
1878 }
1879
1880 mutex_enter(&rx_ring->rx_lock);
1881 mp = e1000g_receive(rx_ring, &tail, bytes_to_pickup);
1882 mutex_exit(&rx_ring->rx_lock);
1883 rw_exit(&adapter->chip_lock);
1884 return (mp);
1885 }
1886
1887 static int
1888 e1000g_m_start(void *arg)
1889 {
1890 struct e1000g *Adapter = (struct e1000g *)arg;
1891
1892 rw_enter(&Adapter->chip_lock, RW_WRITER);
1893
1894 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
1895 rw_exit(&Adapter->chip_lock);
1896 return (ECANCELED);
1897 }
1898
1899 if (e1000g_start(Adapter, B_TRUE) != DDI_SUCCESS) {
1900 rw_exit(&Adapter->chip_lock);
1901 return (ENOTACTIVE);
1902 }
1903
1904 Adapter->e1000g_state |= E1000G_STARTED;
1905
1906 rw_exit(&Adapter->chip_lock);
1907
1908 /* Enable and start the watchdog timer */
1909 enable_watchdog_timer(Adapter);
1910
1911 return (0);
1912 }
1913
1914 static int
1915 e1000g_start(struct e1000g *Adapter, boolean_t global)
1916 {
1917 e1000g_rx_data_t *rx_data;
1918
1919 if (global) {
1920 if (e1000g_alloc_rx_data(Adapter) != DDI_SUCCESS) {
1921 e1000g_log(Adapter, CE_WARN, "Allocate rx data failed");
1922 goto start_fail;
1923 }
1924
1925 /* Allocate dma resources for descriptors and buffers */
1926 if (e1000g_alloc_dma_resources(Adapter) != DDI_SUCCESS) {
1927 e1000g_log(Adapter, CE_WARN,
1928 "Alloc DMA resources failed");
1929 goto start_fail;
1930 }
1931 Adapter->rx_buffer_setup = B_FALSE;
1932 }
1933
1934 if (!(Adapter->attach_progress & ATTACH_PROGRESS_INIT)) {
1935 if (e1000g_init(Adapter) != DDI_SUCCESS) {
1936 e1000g_log(Adapter, CE_WARN,
1937 "Adapter initialization failed");
1938 goto start_fail;
1939 }
1940 }
1941
1942 /* Setup and initialize the transmit structures */
1943 e1000g_tx_setup(Adapter);
1944 msec_delay(5);
1945
1946 /* Setup and initialize the receive structures */
1947 e1000g_rx_setup(Adapter);
1948 msec_delay(5);
1949
1950 /* Restore the e1000g promiscuous mode */
1951 e1000g_restore_promisc(Adapter);
1952
1953 e1000g_mask_interrupt(Adapter);
1954
1955 Adapter->attach_progress |= ATTACH_PROGRESS_INIT;
1956
1957 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
1958 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
1959 goto start_fail;
1960 }
1961
1962 return (DDI_SUCCESS);
1963
1964 start_fail:
1965 rx_data = Adapter->rx_ring->rx_data;
1966
1967 if (global) {
1968 e1000g_release_dma_resources(Adapter);
1969 e1000g_free_rx_pending_buffers(rx_data);
1970 e1000g_free_rx_data(rx_data);
1971 }
1972
1973 mutex_enter(&e1000g_nvm_lock);
1974 (void) e1000_reset_hw(&Adapter->shared);
1975 mutex_exit(&e1000g_nvm_lock);
1976
1977 return (DDI_FAILURE);
1978 }
1979
1980 /*
1981 * The I219 has the curious property that if the descriptor rings are not
1982 * emptied before resetting the hardware or before changing the device state
1983 * based on runtime power management, it'll cause the card to hang. This can
1984 * then only be fixed by a PCI reset. As such, for the I219 and it alone, we
1985 * have to flush the rings if we're in this state.
1986 */
1987 static void
1988 e1000g_flush_desc_rings(struct e1000g *Adapter)
1989 {
1990 struct e1000_hw *hw = &Adapter->shared;
1991 u16 hang_state;
1992 u32 fext_nvm11, tdlen;
1993
1994 /* First, disable MULR fix in FEXTNVM11 */
1995 fext_nvm11 = E1000_READ_REG(hw, E1000_FEXTNVM11);
1996 fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX;
1997 E1000_WRITE_REG(hw, E1000_FEXTNVM11, fext_nvm11);
1998
1999 /* do nothing if we're not in faulty state, or if the queue is empty */
2000 tdlen = E1000_READ_REG(hw, E1000_TDLEN(0));
2001 hang_state = pci_config_get16(Adapter->osdep.cfg_handle,
2002 PCICFG_DESC_RING_STATUS);
2003 if (!(hang_state & FLUSH_DESC_REQUIRED) || !tdlen)
2004 return;
2005 e1000g_flush_tx_ring(Adapter);
2006
2007 /* recheck, maybe the fault is caused by the rx ring */
2008 hang_state = pci_config_get16(Adapter->osdep.cfg_handle,
2009 PCICFG_DESC_RING_STATUS);
2010 if (hang_state & FLUSH_DESC_REQUIRED)
2011 e1000g_flush_rx_ring(Adapter);
2012
2013 }
2014
2015 static void
2016 e1000g_m_stop(void *arg)
2017 {
2018 struct e1000g *Adapter = (struct e1000g *)arg;
2019
2020 /* Drain tx sessions */
2021 (void) e1000g_tx_drain(Adapter);
2022
2023 rw_enter(&Adapter->chip_lock, RW_WRITER);
2024
2025 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2026 rw_exit(&Adapter->chip_lock);
2027 return;
2028 }
2029 Adapter->e1000g_state &= ~E1000G_STARTED;
2030 e1000g_stop(Adapter, B_TRUE);
2031
2032 rw_exit(&Adapter->chip_lock);
2033
2034 /* Disable and stop all the timers */
2035 disable_watchdog_timer(Adapter);
2036 stop_link_timer(Adapter);
2037 stop_82547_timer(Adapter->tx_ring);
2038 }
2039
2040 static void
2041 e1000g_stop(struct e1000g *Adapter, boolean_t global)
2042 {
2043 private_devi_list_t *devi_node;
2044 e1000g_rx_data_t *rx_data;
2045 int result;
2046
2047 Adapter->attach_progress &= ~ATTACH_PROGRESS_INIT;
2048
2049 /* Stop the chip and release pending resources */
2050
2051 /* Tell firmware driver is no longer in control */
2052 e1000g_release_driver_control(&Adapter->shared);
2053
2054 e1000g_clear_all_interrupts(Adapter);
2055
2056 mutex_enter(&e1000g_nvm_lock);
2057 result = e1000_reset_hw(&Adapter->shared);
2058 mutex_exit(&e1000g_nvm_lock);
2059
2060 if (result != E1000_SUCCESS) {
2061 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_INVAL_STATE);
2062 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
2063 }
2064
2065 mutex_enter(&Adapter->link_lock);
2066 Adapter->link_complete = B_FALSE;
2067 mutex_exit(&Adapter->link_lock);
2068
2069 /* Release resources still held by the TX descriptors */
2070 e1000g_tx_clean(Adapter);
2071
2072 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
2073 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
2074
2075 /* Clean the pending rx jumbo packet fragment */
2076 e1000g_rx_clean(Adapter);
2077
2078 /*
2079 * The I219, eg. the pch_spt, has bugs such that we must ensure that
2080 * rings are flushed before we do anything else. This must be done
2081 * before we release DMA resources.
2082 */
2083 if (Adapter->shared.mac.type >= e1000_pch_spt)
2084 e1000g_flush_desc_rings(Adapter);
2085
2086 if (global) {
2087 e1000g_release_dma_resources(Adapter);
2088
2089 mutex_enter(&e1000g_rx_detach_lock);
2090 rx_data = Adapter->rx_ring->rx_data;
2091 rx_data->flag |= E1000G_RX_STOPPED;
2092
2093 if (rx_data->pending_count == 0) {
2094 e1000g_free_rx_pending_buffers(rx_data);
2095 e1000g_free_rx_data(rx_data);
2096 } else {
2097 devi_node = rx_data->priv_devi_node;
2098 if (devi_node != NULL)
2099 atomic_inc_32(&devi_node->pending_rx_count);
2100 else
2101 atomic_inc_32(&Adapter->pending_rx_count);
2102 }
2103 mutex_exit(&e1000g_rx_detach_lock);
2104 }
2105
2106 if (Adapter->link_state != LINK_STATE_UNKNOWN) {
2107 Adapter->link_state = LINK_STATE_UNKNOWN;
2108 if (!Adapter->reset_flag)
2109 mac_link_update(Adapter->mh, Adapter->link_state);
2110 }
2111 }
2112
2113 static void
2114 e1000g_rx_clean(struct e1000g *Adapter)
2115 {
2116 e1000g_rx_data_t *rx_data = Adapter->rx_ring->rx_data;
2117
2118 if (rx_data == NULL)
2119 return;
2120
2121 if (rx_data->rx_mblk != NULL) {
2122 freemsg(rx_data->rx_mblk);
2123 rx_data->rx_mblk = NULL;
2124 rx_data->rx_mblk_tail = NULL;
2125 rx_data->rx_mblk_len = 0;
2126 }
2127 }
2128
2129 static void
2130 e1000g_tx_clean(struct e1000g *Adapter)
2131 {
2132 e1000g_tx_ring_t *tx_ring;
2133 p_tx_sw_packet_t packet;
2134 mblk_t *mp;
2135 mblk_t *nmp;
2136 uint32_t packet_count;
2137
2138 tx_ring = Adapter->tx_ring;
2139
2140 /*
2141 * Here we don't need to protect the lists using
2142 * the usedlist_lock and freelist_lock, for they
2143 * have been protected by the chip_lock.
2144 */
2145 mp = NULL;
2146 nmp = NULL;
2147 packet_count = 0;
2148 packet = (p_tx_sw_packet_t)QUEUE_GET_HEAD(&tx_ring->used_list);
2149 while (packet != NULL) {
2150 if (packet->mp != NULL) {
2151 /* Assemble the message chain */
2152 if (mp == NULL) {
2153 mp = packet->mp;
2154 nmp = packet->mp;
2155 } else {
2156 nmp->b_next = packet->mp;
2157 nmp = packet->mp;
2158 }
2159 /* Disconnect the message from the sw packet */
2160 packet->mp = NULL;
2161 }
2162
2163 e1000g_free_tx_swpkt(packet);
2164 packet_count++;
2165
2166 packet = (p_tx_sw_packet_t)
2167 QUEUE_GET_NEXT(&tx_ring->used_list, &packet->Link);
2168 }
2169
2170 if (mp != NULL)
2171 freemsgchain(mp);
2172
2173 if (packet_count > 0) {
2174 QUEUE_APPEND(&tx_ring->free_list, &tx_ring->used_list);
2175 QUEUE_INIT_LIST(&tx_ring->used_list);
2176
2177 /* Setup TX descriptor pointers */
2178 tx_ring->tbd_next = tx_ring->tbd_first;
2179 tx_ring->tbd_oldest = tx_ring->tbd_first;
2180
2181 /* Setup our HW Tx Head & Tail descriptor pointers */
2182 E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0);
2183 E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0);
2184 }
2185 }
2186
2187 static boolean_t
2188 e1000g_tx_drain(struct e1000g *Adapter)
2189 {
2190 int i;
2191 boolean_t done;
2192 e1000g_tx_ring_t *tx_ring;
2193
2194 tx_ring = Adapter->tx_ring;
2195
2196 /* Allow up to 'wsdraintime' for pending xmit's to complete. */
2197 for (i = 0; i < TX_DRAIN_TIME; i++) {
2198 mutex_enter(&tx_ring->usedlist_lock);
2199 done = IS_QUEUE_EMPTY(&tx_ring->used_list);
2200 mutex_exit(&tx_ring->usedlist_lock);
2201
2202 if (done)
2203 break;
2204
2205 msec_delay(1);
2206 }
2207
2208 return (done);
2209 }
2210
2211 static boolean_t
2212 e1000g_rx_drain(struct e1000g *Adapter)
2213 {
2214 int i;
2215 boolean_t done;
2216
2217 /*
2218 * Allow up to RX_DRAIN_TIME for pending received packets to complete.
2219 */
2220 for (i = 0; i < RX_DRAIN_TIME; i++) {
2221 done = (Adapter->pending_rx_count == 0);
2222
2223 if (done)
2224 break;
2225
2226 msec_delay(1);
2227 }
2228
2229 return (done);
2230 }
2231
2232 static boolean_t
2233 e1000g_reset_adapter(struct e1000g *Adapter)
2234 {
2235 /* Disable and stop all the timers */
2236 disable_watchdog_timer(Adapter);
2237 stop_link_timer(Adapter);
2238 stop_82547_timer(Adapter->tx_ring);
2239
2240 rw_enter(&Adapter->chip_lock, RW_WRITER);
2241
2242 if (Adapter->stall_flag) {
2243 Adapter->stall_flag = B_FALSE;
2244 Adapter->reset_flag = B_TRUE;
2245 }
2246
2247 if (!(Adapter->e1000g_state & E1000G_STARTED)) {
2248 rw_exit(&Adapter->chip_lock);
2249 return (B_TRUE);
2250 }
2251
2252 e1000g_stop(Adapter, B_FALSE);
2253
2254 if (e1000g_start(Adapter, B_FALSE) != DDI_SUCCESS) {
2255 rw_exit(&Adapter->chip_lock);
2256 e1000g_log(Adapter, CE_WARN, "Reset failed");
2257 return (B_FALSE);
2258 }
2259
2260 rw_exit(&Adapter->chip_lock);
2261
2262 /* Enable and start the watchdog timer */
2263 enable_watchdog_timer(Adapter);
2264
2265 return (B_TRUE);
2266 }
2267
2268 boolean_t
2269 e1000g_global_reset(struct e1000g *Adapter)
2270 {
2271 /* Disable and stop all the timers */
2272 disable_watchdog_timer(Adapter);
2273 stop_link_timer(Adapter);
2274 stop_82547_timer(Adapter->tx_ring);
2275
2276 rw_enter(&Adapter->chip_lock, RW_WRITER);
2277
2278 e1000g_stop(Adapter, B_TRUE);
2279
2280 Adapter->init_count = 0;
2281
2282 if (e1000g_start(Adapter, B_TRUE) != DDI_SUCCESS) {
2283 rw_exit(&Adapter->chip_lock);
2284 e1000g_log(Adapter, CE_WARN, "Reset failed");
2285 return (B_FALSE);
2286 }
2287
2288 rw_exit(&Adapter->chip_lock);
2289
2290 /* Enable and start the watchdog timer */
2291 enable_watchdog_timer(Adapter);
2292
2293 return (B_TRUE);
2294 }
2295
2296 /*
2297 * e1000g_intr_pciexpress - ISR for PCI Express chipsets
2298 *
2299 * This interrupt service routine is for PCI-Express adapters.
2300 * The ICR contents is valid only when the E1000_ICR_INT_ASSERTED
2301 * bit is set.
2302 */
2303 static uint_t
2304 e1000g_intr_pciexpress(caddr_t arg)
2305 {
2306 struct e1000g *Adapter;
2307 uint32_t icr;
2308
2309 Adapter = (struct e1000g *)(uintptr_t)arg;
2310 icr = E1000_READ_REG(&Adapter->shared, E1000_ICR);
2311
2312 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2313 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2314 return (DDI_INTR_CLAIMED);
2315 }
2316
2317 if (icr & E1000_ICR_INT_ASSERTED) {
2318 /*
2319 * E1000_ICR_INT_ASSERTED bit was set:
2320 * Read(Clear) the ICR, claim this interrupt,
2321 * look for work to do.
2322 */
2323 e1000g_intr_work(Adapter, icr);
2324 return (DDI_INTR_CLAIMED);
2325 } else {
2326 /*
2327 * E1000_ICR_INT_ASSERTED bit was not set:
2328 * Don't claim this interrupt, return immediately.
2329 */
2330 return (DDI_INTR_UNCLAIMED);
2331 }
2332 }
2333
2334 /*
2335 * e1000g_intr - ISR for PCI/PCI-X chipsets
2336 *
2337 * This interrupt service routine is for PCI/PCI-X adapters.
2338 * We check the ICR contents no matter the E1000_ICR_INT_ASSERTED
2339 * bit is set or not.
2340 */
2341 static uint_t
2342 e1000g_intr(caddr_t arg)
2343 {
2344 struct e1000g *Adapter;
2345 uint32_t icr;
2346
2347 Adapter = (struct e1000g *)(uintptr_t)arg;
2348 icr = E1000_READ_REG(&Adapter->shared, E1000_ICR);
2349
2350 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2351 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2352 return (DDI_INTR_CLAIMED);
2353 }
2354
2355 if (icr) {
2356 /*
2357 * Any bit was set in ICR:
2358 * Read(Clear) the ICR, claim this interrupt,
2359 * look for work to do.
2360 */
2361 e1000g_intr_work(Adapter, icr);
2362 return (DDI_INTR_CLAIMED);
2363 } else {
2364 /*
2365 * No bit was set in ICR:
2366 * Don't claim this interrupt, return immediately.
2367 */
2368 return (DDI_INTR_UNCLAIMED);
2369 }
2370 }
2371
2372 /*
2373 * e1000g_intr_work - actual processing of ISR
2374 *
2375 * Read(clear) the ICR contents and call appropriate interrupt
2376 * processing routines.
2377 */
2378 static void
2379 e1000g_intr_work(struct e1000g *Adapter, uint32_t icr)
2380 {
2381 struct e1000_hw *hw;
2382 hw = &Adapter->shared;
2383 e1000g_tx_ring_t *tx_ring = Adapter->tx_ring;
2384
2385 Adapter->rx_pkt_cnt = 0;
2386 Adapter->tx_pkt_cnt = 0;
2387
2388 rw_enter(&Adapter->chip_lock, RW_READER);
2389
2390 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2391 rw_exit(&Adapter->chip_lock);
2392 return;
2393 }
2394 /*
2395 * Here we need to check the "e1000g_state" flag within the chip_lock to
2396 * ensure the receive routine will not execute when the adapter is
2397 * being reset.
2398 */
2399 if (!(Adapter->e1000g_state & E1000G_STARTED)) {
2400 rw_exit(&Adapter->chip_lock);
2401 return;
2402 }
2403
2404 if (icr & E1000_ICR_RXT0) {
2405 mblk_t *mp = NULL;
2406 mblk_t *tail = NULL;
2407 e1000g_rx_ring_t *rx_ring;
2408
2409 rx_ring = Adapter->rx_ring;
2410 mutex_enter(&rx_ring->rx_lock);
2411 /*
2412 * Sometimes with legacy interrupts, it possible that
2413 * there is a single interrupt for Rx/Tx. In which
2414 * case, if poll flag is set, we shouldn't really
2415 * be doing Rx processing.
2416 */
2417 if (!rx_ring->poll_flag)
2418 mp = e1000g_receive(rx_ring, &tail,
2419 E1000G_CHAIN_NO_LIMIT);
2420 mutex_exit(&rx_ring->rx_lock);
2421 rw_exit(&Adapter->chip_lock);
2422 if (mp != NULL)
2423 mac_rx_ring(Adapter->mh, rx_ring->mrh,
2424 mp, rx_ring->ring_gen_num);
2425 } else
2426 rw_exit(&Adapter->chip_lock);
2427
2428 if (icr & E1000_ICR_TXDW) {
2429 if (!Adapter->tx_intr_enable)
2430 e1000g_clear_tx_interrupt(Adapter);
2431
2432 /* Recycle the tx descriptors */
2433 rw_enter(&Adapter->chip_lock, RW_READER);
2434 (void) e1000g_recycle(tx_ring);
2435 E1000G_DEBUG_STAT(tx_ring->stat_recycle_intr);
2436 rw_exit(&Adapter->chip_lock);
2437
2438 if (tx_ring->resched_needed &&
2439 (tx_ring->tbd_avail > DEFAULT_TX_UPDATE_THRESHOLD)) {
2440 tx_ring->resched_needed = B_FALSE;
2441 mac_tx_update(Adapter->mh);
2442 E1000G_STAT(tx_ring->stat_reschedule);
2443 }
2444 }
2445
2446 /*
2447 * The Receive Sequence errors RXSEQ and the link status change LSC
2448 * are checked to detect that the cable has been pulled out. For
2449 * the Wiseman 2.0 silicon, the receive sequence errors interrupt
2450 * are an indication that cable is not connected.
2451 */
2452 if ((icr & E1000_ICR_RXSEQ) ||
2453 (icr & E1000_ICR_LSC) ||
2454 (icr & E1000_ICR_GPI_EN1)) {
2455 boolean_t link_changed;
2456 timeout_id_t tid = 0;
2457
2458 stop_watchdog_timer(Adapter);
2459
2460 rw_enter(&Adapter->chip_lock, RW_WRITER);
2461
2462 /*
2463 * Because we got a link-status-change interrupt, force
2464 * e1000_check_for_link() to look at phy
2465 */
2466 Adapter->shared.mac.get_link_status = B_TRUE;
2467
2468 /* e1000g_link_check takes care of link status change */
2469 link_changed = e1000g_link_check(Adapter);
2470
2471 /* Get new phy state */
2472 e1000g_get_phy_state(Adapter);
2473
2474 /*
2475 * If the link timer has not timed out, we'll not notify
2476 * the upper layer with any link state until the link is up.
2477 */
2478 if (link_changed && !Adapter->link_complete) {
2479 if (Adapter->link_state == LINK_STATE_UP) {
2480 mutex_enter(&Adapter->link_lock);
2481 Adapter->link_complete = B_TRUE;
2482 tid = Adapter->link_tid;
2483 Adapter->link_tid = 0;
2484 mutex_exit(&Adapter->link_lock);
2485 } else {
2486 link_changed = B_FALSE;
2487 }
2488 }
2489 rw_exit(&Adapter->chip_lock);
2490
2491 if (link_changed) {
2492 if (tid != 0)
2493 (void) untimeout(tid);
2494
2495 /*
2496 * Workaround for esb2. Data stuck in fifo on a link
2497 * down event. Stop receiver here and reset in watchdog.
2498 */
2499 if ((Adapter->link_state == LINK_STATE_DOWN) &&
2500 (Adapter->shared.mac.type == e1000_80003es2lan)) {
2501 uint32_t rctl = E1000_READ_REG(hw, E1000_RCTL);
2502 E1000_WRITE_REG(hw, E1000_RCTL,
2503 rctl & ~E1000_RCTL_EN);
2504 e1000g_log(Adapter, CE_WARN,
2505 "ESB2 receiver disabled");
2506 Adapter->esb2_workaround = B_TRUE;
2507 }
2508 if (!Adapter->reset_flag)
2509 mac_link_update(Adapter->mh,
2510 Adapter->link_state);
2511 if (Adapter->link_state == LINK_STATE_UP)
2512 Adapter->reset_flag = B_FALSE;
2513 }
2514
2515 start_watchdog_timer(Adapter);
2516 }
2517 }
2518
2519 static void
2520 e1000g_init_unicst(struct e1000g *Adapter)
2521 {
2522 struct e1000_hw *hw;
2523 int slot;
2524
2525 hw = &Adapter->shared;
2526
2527 if (Adapter->init_count == 0) {
2528 /* Initialize the multiple unicast addresses */
2529 Adapter->unicst_total = min(hw->mac.rar_entry_count,
2530 MAX_NUM_UNICAST_ADDRESSES);
2531
2532 /*
2533 * The common code does not correctly calculate the number of
2534 * rar's that could be reserved by firmware for the pch_lpt and
2535 * pch_spt macs. The interface has one primary rar, and 11
2536 * additional ones. Those 11 additional ones are not always
2537 * available. According to the datasheet, we need to check a
2538 * few of the bits set in the FWSM register. If the value is
2539 * zero, everything is available. If the value is 1, none of the
2540 * additional registers are available. If the value is 2-7, only
2541 * that number are available.
2542 */
2543 if (hw->mac.type >= e1000_pch_lpt) {
2544 uint32_t locked, rar;
2545
2546 locked = E1000_READ_REG(hw, E1000_FWSM) &
2547 E1000_FWSM_WLOCK_MAC_MASK;
2548 locked >>= E1000_FWSM_WLOCK_MAC_SHIFT;
2549 rar = 1;
2550 if (locked == 0)
2551 rar += 11;
2552 else if (locked == 1)
2553 rar += 0;
2554 else
2555 rar += locked;
2556 Adapter->unicst_total = min(rar,
2557 MAX_NUM_UNICAST_ADDRESSES);
2558 }
2559
2560 /* Workaround for an erratum of 82571 chipst */
2561 if ((hw->mac.type == e1000_82571) &&
2562 (e1000_get_laa_state_82571(hw) == B_TRUE))
2563 Adapter->unicst_total--;
2564
2565 /* VMware doesn't support multiple mac addresses properly */
2566 if (hw->subsystem_vendor_id == 0x15ad)
2567 Adapter->unicst_total = 1;
2568
2569 Adapter->unicst_avail = Adapter->unicst_total;
2570
2571 for (slot = 0; slot < Adapter->unicst_total; slot++) {
2572 /* Clear both the flag and MAC address */
2573 Adapter->unicst_addr[slot].reg.high = 0;
2574 Adapter->unicst_addr[slot].reg.low = 0;
2575 }
2576 } else {
2577 /* Workaround for an erratum of 82571 chipst */
2578 if ((hw->mac.type == e1000_82571) &&
2579 (e1000_get_laa_state_82571(hw) == B_TRUE))
2580 (void) e1000_rar_set(hw, hw->mac.addr, LAST_RAR_ENTRY);
2581
2582 /* Re-configure the RAR registers */
2583 for (slot = 0; slot < Adapter->unicst_total; slot++)
2584 if (Adapter->unicst_addr[slot].mac.set == 1)
2585 (void) e1000_rar_set(hw,
2586 Adapter->unicst_addr[slot].mac.addr, slot);
2587 }
2588
2589 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
2590 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2591 }
2592
2593 static int
2594 e1000g_unicst_set(struct e1000g *Adapter, const uint8_t *mac_addr,
2595 int slot)
2596 {
2597 struct e1000_hw *hw;
2598
2599 hw = &Adapter->shared;
2600
2601 /*
2602 * The first revision of Wiseman silicon (rev 2.0) has an errata
2603 * that requires the receiver to be in reset when any of the
2604 * receive address registers (RAR regs) are accessed. The first
2605 * rev of Wiseman silicon also requires MWI to be disabled when
2606 * a global reset or a receive reset is issued. So before we
2607 * initialize the RARs, we check the rev of the Wiseman controller
2608 * and work around any necessary HW errata.
2609 */
2610 if ((hw->mac.type == e1000_82542) &&
2611 (hw->revision_id == E1000_REVISION_2)) {
2612 e1000_pci_clear_mwi(hw);
2613 E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST);
2614 msec_delay(5);
2615 }
2616 if (mac_addr == NULL) {
2617 E1000_WRITE_REG_ARRAY(hw, E1000_RA, slot << 1, 0);
2618 E1000_WRITE_FLUSH(hw);
2619 E1000_WRITE_REG_ARRAY(hw, E1000_RA, (slot << 1) + 1, 0);
2620 E1000_WRITE_FLUSH(hw);
2621 /* Clear both the flag and MAC address */
2622 Adapter->unicst_addr[slot].reg.high = 0;
2623 Adapter->unicst_addr[slot].reg.low = 0;
2624 } else {
2625 bcopy(mac_addr, Adapter->unicst_addr[slot].mac.addr,
2626 ETHERADDRL);
2627 (void) e1000_rar_set(hw, (uint8_t *)mac_addr, slot);
2628 Adapter->unicst_addr[slot].mac.set = 1;
2629 }
2630
2631 /* Workaround for an erratum of 82571 chipst */
2632 if (slot == 0) {
2633 if ((hw->mac.type == e1000_82571) &&
2634 (e1000_get_laa_state_82571(hw) == B_TRUE))
2635 if (mac_addr == NULL) {
2636 E1000_WRITE_REG_ARRAY(hw, E1000_RA,
2637 slot << 1, 0);
2638 E1000_WRITE_FLUSH(hw);
2639 E1000_WRITE_REG_ARRAY(hw, E1000_RA,
2640 (slot << 1) + 1, 0);
2641 E1000_WRITE_FLUSH(hw);
2642 } else {
2643 (void) e1000_rar_set(hw, (uint8_t *)mac_addr,
2644 LAST_RAR_ENTRY);
2645 }
2646 }
2647
2648 /*
2649 * If we are using Wiseman rev 2.0 silicon, we will have previously
2650 * put the receive in reset, and disabled MWI, to work around some
2651 * HW errata. Now we should take the receiver out of reset, and
2652 * re-enabled if MWI if it was previously enabled by the PCI BIOS.
2653 */
2654 if ((hw->mac.type == e1000_82542) &&
2655 (hw->revision_id == E1000_REVISION_2)) {
2656 E1000_WRITE_REG(hw, E1000_RCTL, 0);
2657 msec_delay(1);
2658 if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
2659 e1000_pci_set_mwi(hw);
2660 e1000g_rx_setup(Adapter);
2661 }
2662
2663 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2664 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2665 return (EIO);
2666 }
2667
2668 return (0);
2669 }
2670
2671 static int
2672 multicst_add(struct e1000g *Adapter, const uint8_t *multiaddr)
2673 {
2674 struct e1000_hw *hw = &Adapter->shared;
2675 struct ether_addr *newtable;
2676 size_t new_len;
2677 size_t old_len;
2678 int res = 0;
2679
2680 if ((multiaddr[0] & 01) == 0) {
2681 res = EINVAL;
2682 e1000g_log(Adapter, CE_WARN, "Illegal multicast address");
2683 goto done;
2684 }
2685
2686 if (Adapter->mcast_count >= Adapter->mcast_max_num) {
2687 res = ENOENT;
2688 e1000g_log(Adapter, CE_WARN,
2689 "Adapter requested more than %d mcast addresses",
2690 Adapter->mcast_max_num);
2691 goto done;
2692 }
2693
2694
2695 if (Adapter->mcast_count == Adapter->mcast_alloc_count) {
2696 old_len = Adapter->mcast_alloc_count *
2697 sizeof (struct ether_addr);
2698 new_len = (Adapter->mcast_alloc_count + MCAST_ALLOC_SIZE) *
2699 sizeof (struct ether_addr);
2700
2701 newtable = kmem_alloc(new_len, KM_NOSLEEP);
2702 if (newtable == NULL) {
2703 res = ENOMEM;
2704 e1000g_log(Adapter, CE_WARN,
2705 "Not enough memory to alloc mcast table");
2706 goto done;
2707 }
2708
2709 if (Adapter->mcast_table != NULL) {
2710 bcopy(Adapter->mcast_table, newtable, old_len);
2711 kmem_free(Adapter->mcast_table, old_len);
2712 }
2713 Adapter->mcast_alloc_count += MCAST_ALLOC_SIZE;
2714 Adapter->mcast_table = newtable;
2715 }
2716
2717 bcopy(multiaddr,
2718 &Adapter->mcast_table[Adapter->mcast_count], ETHERADDRL);
2719 Adapter->mcast_count++;
2720
2721 /*
2722 * Update the MC table in the hardware
2723 */
2724 e1000g_clear_interrupt(Adapter);
2725
2726 e1000_update_mc_addr_list(hw,
2727 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count);
2728
2729 e1000g_mask_interrupt(Adapter);
2730
2731 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2732 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2733 res = EIO;
2734 }
2735
2736 done:
2737 return (res);
2738 }
2739
2740 static int
2741 multicst_remove(struct e1000g *Adapter, const uint8_t *multiaddr)
2742 {
2743 struct e1000_hw *hw = &Adapter->shared;
2744 struct ether_addr *newtable;
2745 size_t new_len;
2746 size_t old_len;
2747 unsigned i;
2748
2749 for (i = 0; i < Adapter->mcast_count; i++) {
2750 if (bcmp(multiaddr, &Adapter->mcast_table[i],
2751 ETHERADDRL) == 0) {
2752 for (i++; i < Adapter->mcast_count; i++) {
2753 Adapter->mcast_table[i - 1] =
2754 Adapter->mcast_table[i];
2755 }
2756 Adapter->mcast_count--;
2757 break;
2758 }
2759 }
2760
2761 if ((Adapter->mcast_alloc_count - Adapter->mcast_count) >
2762 MCAST_ALLOC_SIZE) {
2763 old_len = Adapter->mcast_alloc_count *
2764 sizeof (struct ether_addr);
2765 new_len = (Adapter->mcast_alloc_count - MCAST_ALLOC_SIZE) *
2766 sizeof (struct ether_addr);
2767
2768 newtable = kmem_alloc(new_len, KM_NOSLEEP);
2769 if (newtable != NULL) {
2770 bcopy(Adapter->mcast_table, newtable, new_len);
2771 kmem_free(Adapter->mcast_table, old_len);
2772
2773 Adapter->mcast_alloc_count -= MCAST_ALLOC_SIZE;
2774 Adapter->mcast_table = newtable;
2775 }
2776 }
2777
2778 /*
2779 * Update the MC table in the hardware
2780 */
2781 e1000g_clear_interrupt(Adapter);
2782
2783 e1000_update_mc_addr_list(hw,
2784 (uint8_t *)Adapter->mcast_table, Adapter->mcast_count);
2785
2786 e1000g_mask_interrupt(Adapter);
2787
2788 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2789 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2790 return (EIO);
2791 }
2792
2793 return (0);
2794 }
2795
2796 static void
2797 e1000g_release_multicast(struct e1000g *Adapter)
2798 {
2799 if (Adapter->mcast_table != NULL) {
2800 kmem_free(Adapter->mcast_table,
2801 Adapter->mcast_alloc_count * sizeof (struct ether_addr));
2802 Adapter->mcast_table = NULL;
2803 }
2804 }
2805
2806 int
2807 e1000g_m_multicst(void *arg, boolean_t add, const uint8_t *addr)
2808 {
2809 struct e1000g *Adapter = (struct e1000g *)arg;
2810 int result;
2811
2812 rw_enter(&Adapter->chip_lock, RW_WRITER);
2813
2814 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2815 result = ECANCELED;
2816 goto done;
2817 }
2818
2819 result = (add) ? multicst_add(Adapter, addr)
2820 : multicst_remove(Adapter, addr);
2821
2822 done:
2823 rw_exit(&Adapter->chip_lock);
2824 return (result);
2825
2826 }
2827
2828 int
2829 e1000g_m_promisc(void *arg, boolean_t on)
2830 {
2831 struct e1000g *Adapter = (struct e1000g *)arg;
2832 uint32_t rctl;
2833
2834 rw_enter(&Adapter->chip_lock, RW_WRITER);
2835
2836 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
2837 rw_exit(&Adapter->chip_lock);
2838 return (ECANCELED);
2839 }
2840
2841 rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL);
2842
2843 if (on)
2844 rctl |=
2845 (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM);
2846 else
2847 rctl &= (~(E1000_RCTL_UPE | E1000_RCTL_MPE));
2848
2849 E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl);
2850
2851 Adapter->e1000g_promisc = on;
2852
2853 rw_exit(&Adapter->chip_lock);
2854
2855 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
2856 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
2857 return (EIO);
2858 }
2859
2860 return (0);
2861 }
2862
2863 /*
2864 * Entry points to enable and disable interrupts at the granularity of
2865 * a group.
2866 * Turns the poll_mode for the whole adapter on and off to enable or
2867 * override the ring level polling control over the hardware interrupts.
2868 */
2869 static int
2870 e1000g_rx_group_intr_enable(mac_intr_handle_t arg)
2871 {
2872 struct e1000g *adapter = (struct e1000g *)arg;
2873 e1000g_rx_ring_t *rx_ring = adapter->rx_ring;
2874
2875 /*
2876 * Later interrupts at the granularity of the this ring will
2877 * invoke mac_rx() with NULL, indicating the need for another
2878 * software classification.
2879 * We have a single ring usable per adapter now, so we only need to
2880 * reset the rx handle for that one.
2881 * When more RX rings can be used, we should update each one of them.
2882 */
2883 mutex_enter(&rx_ring->rx_lock);
2884 rx_ring->mrh = NULL;
2885 adapter->poll_mode = B_FALSE;
2886 mutex_exit(&rx_ring->rx_lock);
2887 return (0);
2888 }
2889
2890 static int
2891 e1000g_rx_group_intr_disable(mac_intr_handle_t arg)
2892 {
2893 struct e1000g *adapter = (struct e1000g *)arg;
2894 e1000g_rx_ring_t *rx_ring = adapter->rx_ring;
2895
2896 mutex_enter(&rx_ring->rx_lock);
2897
2898 /*
2899 * Later interrupts at the granularity of the this ring will
2900 * invoke mac_rx() with the handle for this ring;
2901 */
2902 adapter->poll_mode = B_TRUE;
2903 rx_ring->mrh = rx_ring->mrh_init;
2904 mutex_exit(&rx_ring->rx_lock);
2905 return (0);
2906 }
2907
2908 /*
2909 * Entry points to enable and disable interrupts at the granularity of
2910 * a ring.
2911 * adapter poll_mode controls whether we actually proceed with hardware
2912 * interrupt toggling.
2913 */
2914 static int
2915 e1000g_rx_ring_intr_enable(mac_intr_handle_t intrh)
2916 {
2917 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)intrh;
2918 struct e1000g *adapter = rx_ring->adapter;
2919 struct e1000_hw *hw = &adapter->shared;
2920 uint32_t intr_mask;
2921
2922 rw_enter(&adapter->chip_lock, RW_READER);
2923
2924 if (adapter->e1000g_state & E1000G_SUSPENDED) {
2925 rw_exit(&adapter->chip_lock);
2926 return (0);
2927 }
2928
2929 mutex_enter(&rx_ring->rx_lock);
2930 rx_ring->poll_flag = 0;
2931 mutex_exit(&rx_ring->rx_lock);
2932
2933 /* Rx interrupt enabling for MSI and legacy */
2934 intr_mask = E1000_READ_REG(hw, E1000_IMS);
2935 intr_mask |= E1000_IMS_RXT0;
2936 E1000_WRITE_REG(hw, E1000_IMS, intr_mask);
2937 E1000_WRITE_FLUSH(hw);
2938
2939 /* Trigger a Rx interrupt to check Rx ring */
2940 E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0);
2941 E1000_WRITE_FLUSH(hw);
2942
2943 rw_exit(&adapter->chip_lock);
2944 return (0);
2945 }
2946
2947 static int
2948 e1000g_rx_ring_intr_disable(mac_intr_handle_t intrh)
2949 {
2950 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)intrh;
2951 struct e1000g *adapter = rx_ring->adapter;
2952 struct e1000_hw *hw = &adapter->shared;
2953
2954 rw_enter(&adapter->chip_lock, RW_READER);
2955
2956 if (adapter->e1000g_state & E1000G_SUSPENDED) {
2957 rw_exit(&adapter->chip_lock);
2958 return (0);
2959 }
2960 mutex_enter(&rx_ring->rx_lock);
2961 rx_ring->poll_flag = 1;
2962 mutex_exit(&rx_ring->rx_lock);
2963
2964 /* Rx interrupt disabling for MSI and legacy */
2965 E1000_WRITE_REG(hw, E1000_IMC, E1000_IMS_RXT0);
2966 E1000_WRITE_FLUSH(hw);
2967
2968 rw_exit(&adapter->chip_lock);
2969 return (0);
2970 }
2971
2972 /*
2973 * e1000g_unicst_find - Find the slot for the specified unicast address
2974 */
2975 static int
2976 e1000g_unicst_find(struct e1000g *Adapter, const uint8_t *mac_addr)
2977 {
2978 int slot;
2979
2980 for (slot = 0; slot < Adapter->unicst_total; slot++) {
2981 if ((Adapter->unicst_addr[slot].mac.set == 1) &&
2982 (bcmp(Adapter->unicst_addr[slot].mac.addr,
2983 mac_addr, ETHERADDRL) == 0))
2984 return (slot);
2985 }
2986
2987 return (-1);
2988 }
2989
2990 /*
2991 * Entry points to add and remove a MAC address to a ring group.
2992 * The caller takes care of adding and removing the MAC addresses
2993 * to the filter via these two routines.
2994 */
2995
2996 static int
2997 e1000g_addmac(void *arg, const uint8_t *mac_addr)
2998 {
2999 struct e1000g *Adapter = (struct e1000g *)arg;
3000 int slot, err;
3001
3002 rw_enter(&Adapter->chip_lock, RW_WRITER);
3003
3004 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
3005 rw_exit(&Adapter->chip_lock);
3006 return (ECANCELED);
3007 }
3008
3009 if (e1000g_unicst_find(Adapter, mac_addr) != -1) {
3010 /* The same address is already in slot */
3011 rw_exit(&Adapter->chip_lock);
3012 return (0);
3013 }
3014
3015 if (Adapter->unicst_avail == 0) {
3016 /* no slots available */
3017 rw_exit(&Adapter->chip_lock);
3018 return (ENOSPC);
3019 }
3020
3021 /* Search for a free slot */
3022 for (slot = 0; slot < Adapter->unicst_total; slot++) {
3023 if (Adapter->unicst_addr[slot].mac.set == 0)
3024 break;
3025 }
3026 ASSERT(slot < Adapter->unicst_total);
3027
3028 err = e1000g_unicst_set(Adapter, mac_addr, slot);
3029 if (err == 0)
3030 Adapter->unicst_avail--;
3031
3032 rw_exit(&Adapter->chip_lock);
3033
3034 return (err);
3035 }
3036
3037 static int
3038 e1000g_remmac(void *arg, const uint8_t *mac_addr)
3039 {
3040 struct e1000g *Adapter = (struct e1000g *)arg;
3041 int slot, err;
3042
3043 rw_enter(&Adapter->chip_lock, RW_WRITER);
3044
3045 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
3046 rw_exit(&Adapter->chip_lock);
3047 return (ECANCELED);
3048 }
3049
3050 slot = e1000g_unicst_find(Adapter, mac_addr);
3051 if (slot == -1) {
3052 rw_exit(&Adapter->chip_lock);
3053 return (EINVAL);
3054 }
3055
3056 ASSERT(Adapter->unicst_addr[slot].mac.set);
3057
3058 /* Clear this slot */
3059 err = e1000g_unicst_set(Adapter, NULL, slot);
3060 if (err == 0)
3061 Adapter->unicst_avail++;
3062
3063 rw_exit(&Adapter->chip_lock);
3064
3065 return (err);
3066 }
3067
3068 static int
3069 e1000g_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
3070 {
3071 e1000g_rx_ring_t *rx_ring = (e1000g_rx_ring_t *)rh;
3072
3073 mutex_enter(&rx_ring->rx_lock);
3074 rx_ring->ring_gen_num = mr_gen_num;
3075 mutex_exit(&rx_ring->rx_lock);
3076 return (0);
3077 }
3078
3079 /*
3080 * Callback funtion for MAC layer to register all rings.
3081 *
3082 * The hardware supports a single group with currently only one ring
3083 * available.
3084 * Though not offering virtualization ability per se, exposing the
3085 * group/ring still enables the polling and interrupt toggling.
3086 */
3087 /* ARGSUSED */
3088 void
3089 e1000g_fill_ring(void *arg, mac_ring_type_t rtype, const int grp_index,
3090 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
3091 {
3092 struct e1000g *Adapter = (struct e1000g *)arg;
3093 e1000g_rx_ring_t *rx_ring = Adapter->rx_ring;
3094 mac_intr_t *mintr;
3095
3096 /*
3097 * We advertised only RX group/rings, so the MAC framework shouldn't
3098 * ask for any thing else.
3099 */
3100 ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0 && ring_index == 0);
3101
3102 rx_ring->mrh = rx_ring->mrh_init = rh;
3103 infop->mri_driver = (mac_ring_driver_t)rx_ring;
3104 infop->mri_start = e1000g_ring_start;
3105 infop->mri_stop = NULL;
3106 infop->mri_poll = e1000g_poll_ring;
3107 infop->mri_stat = e1000g_rx_ring_stat;
3108
3109 /* Ring level interrupts */
3110 mintr = &infop->mri_intr;
3111 mintr->mi_handle = (mac_intr_handle_t)rx_ring;
3112 mintr->mi_enable = e1000g_rx_ring_intr_enable;
3113 mintr->mi_disable = e1000g_rx_ring_intr_disable;
3114 if (Adapter->msi_enable)
3115 mintr->mi_ddi_handle = Adapter->htable[0];
3116 }
3117
3118 /* ARGSUSED */
3119 static void
3120 e1000g_fill_group(void *arg, mac_ring_type_t rtype, const int grp_index,
3121 mac_group_info_t *infop, mac_group_handle_t gh)
3122 {
3123 struct e1000g *Adapter = (struct e1000g *)arg;
3124 mac_intr_t *mintr;
3125
3126 /*
3127 * We advertised a single RX ring. Getting a request for anything else
3128 * signifies a bug in the MAC framework.
3129 */
3130 ASSERT(rtype == MAC_RING_TYPE_RX && grp_index == 0);
3131
3132 Adapter->rx_group = gh;
3133
3134 infop->mgi_driver = (mac_group_driver_t)Adapter;
3135 infop->mgi_start = NULL;
3136 infop->mgi_stop = NULL;
3137 infop->mgi_addmac = e1000g_addmac;
3138 infop->mgi_remmac = e1000g_remmac;
3139 infop->mgi_count = 1;
3140
3141 /* Group level interrupts */
3142 mintr = &infop->mgi_intr;
3143 mintr->mi_handle = (mac_intr_handle_t)Adapter;
3144 mintr->mi_enable = e1000g_rx_group_intr_enable;
3145 mintr->mi_disable = e1000g_rx_group_intr_disable;
3146 }
3147
3148 static void
3149 e1000g_led_blink(void *arg)
3150 {
3151 e1000g_t *e1000g = arg;
3152
3153 mutex_enter(&e1000g->e1000g_led_lock);
3154 VERIFY(e1000g->e1000g_emul_blink);
3155 if (e1000g->e1000g_emul_state) {
3156 (void) e1000_led_on(&e1000g->shared);
3157 } else {
3158 (void) e1000_led_off(&e1000g->shared);
3159 }
3160 e1000g->e1000g_emul_state = !e1000g->e1000g_emul_state;
3161 mutex_exit(&e1000g->e1000g_led_lock);
3162 }
3163
3164 static int
3165 e1000g_led_set(void *arg, mac_led_mode_t mode, uint_t flags)
3166 {
3167 e1000g_t *e1000g = arg;
3168
3169 if (flags != 0)
3170 return (EINVAL);
3171
3172 if (mode != MAC_LED_DEFAULT &&
3173 mode != MAC_LED_IDENT &&
3174 mode != MAC_LED_OFF &&
3175 mode != MAC_LED_ON)
3176 return (ENOTSUP);
3177
3178 mutex_enter(&e1000g->e1000g_led_lock);
3179
3180 if ((mode == MAC_LED_IDENT || mode == MAC_LED_OFF ||
3181 mode == MAC_LED_ON) &&
3182 !e1000g->e1000g_led_setup) {
3183 if (e1000_setup_led(&e1000g->shared) != E1000_SUCCESS) {
3184 mutex_exit(&e1000g->e1000g_led_lock);
3185 return (EIO);
3186 }
3187
3188 e1000g->e1000g_led_setup = B_TRUE;
3189 }
3190
3191 if (mode != MAC_LED_IDENT && e1000g->e1000g_blink != NULL) {
3192 ddi_periodic_t id = e1000g->e1000g_blink;
3193 e1000g->e1000g_blink = NULL;
3194 mutex_exit(&e1000g->e1000g_led_lock);
3195 ddi_periodic_delete(id);
3196 mutex_enter(&e1000g->e1000g_led_lock);
3197 }
3198
3199 switch (mode) {
3200 case MAC_LED_DEFAULT:
3201 if (e1000g->e1000g_led_setup) {
3202 if (e1000_cleanup_led(&e1000g->shared) !=
3203 E1000_SUCCESS) {
3204 mutex_exit(&e1000g->e1000g_led_lock);
3205 return (EIO);
3206 }
3207 e1000g->e1000g_led_setup = B_FALSE;
3208 }
3209 break;
3210 case MAC_LED_IDENT:
3211 if (e1000g->e1000g_emul_blink) {
3212 if (e1000g->e1000g_blink != NULL)
3213 break;
3214
3215 /*
3216 * Note, we use a 200 ms period here as that's what
3217 * section 10.1.3 8254x Intel Manual (PCI/PCI-X Family
3218 * of Gigabit Ethernet Controllers Software Developer's
3219 * Manual) indicates that the optional blink hardware
3220 * operates at.
3221 */
3222 e1000g->e1000g_blink =
3223 ddi_periodic_add(e1000g_led_blink, e1000g,
3224 200ULL * (NANOSEC / MILLISEC), DDI_IPL_0);
3225 } else if (e1000_blink_led(&e1000g->shared) != E1000_SUCCESS) {
3226 mutex_exit(&e1000g->e1000g_led_lock);
3227 return (EIO);
3228 }
3229 break;
3230 case MAC_LED_OFF:
3231 if (e1000_led_off(&e1000g->shared) != E1000_SUCCESS) {
3232 mutex_exit(&e1000g->e1000g_led_lock);
3233 return (EIO);
3234 }
3235 break;
3236 case MAC_LED_ON:
3237 if (e1000_led_on(&e1000g->shared) != E1000_SUCCESS) {
3238 mutex_exit(&e1000g->e1000g_led_lock);
3239 return (EIO);
3240 }
3241 break;
3242 default:
3243 mutex_exit(&e1000g->e1000g_led_lock);
3244 return (ENOTSUP);
3245 }
3246
3247 mutex_exit(&e1000g->e1000g_led_lock);
3248 return (0);
3249
3250 }
3251
3252 static boolean_t
3253 e1000g_m_getcapab(void *arg, mac_capab_t cap, void *cap_data)
3254 {
3255 struct e1000g *Adapter = (struct e1000g *)arg;
3256
3257 switch (cap) {
3258 case MAC_CAPAB_HCKSUM: {
3259 uint32_t *txflags = cap_data;
3260
3261 if (Adapter->tx_hcksum_enable)
3262 *txflags = HCKSUM_IPHDRCKSUM |
3263 HCKSUM_INET_PARTIAL;
3264 else
3265 return (B_FALSE);
3266 break;
3267 }
3268
3269 case MAC_CAPAB_LSO: {
3270 mac_capab_lso_t *cap_lso = cap_data;
3271
3272 if (Adapter->lso_enable) {
3273 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
3274 cap_lso->lso_basic_tcp_ipv4.lso_max =
3275 E1000_LSO_MAXLEN;
3276 } else
3277 return (B_FALSE);
3278 break;
3279 }
3280 case MAC_CAPAB_RINGS: {
3281 mac_capab_rings_t *cap_rings = cap_data;
3282
3283 /* No TX rings exposed yet */
3284 if (cap_rings->mr_type != MAC_RING_TYPE_RX)
3285 return (B_FALSE);
3286
3287 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
3288 cap_rings->mr_rnum = 1;
3289 cap_rings->mr_gnum = 1;
3290 cap_rings->mr_rget = e1000g_fill_ring;
3291 cap_rings->mr_gget = e1000g_fill_group;
3292 break;
3293 }
3294 case MAC_CAPAB_LED: {
3295 mac_capab_led_t *cap_led = cap_data;
3296
3297 cap_led->mcl_flags = 0;
3298 cap_led->mcl_modes = MAC_LED_DEFAULT;
3299 if (Adapter->shared.mac.ops.blink_led != NULL &&
3300 Adapter->shared.mac.ops.blink_led !=
3301 e1000_null_ops_generic) {
3302 cap_led->mcl_modes |= MAC_LED_IDENT;
3303 }
3304
3305 if (Adapter->shared.mac.ops.led_off != NULL &&
3306 Adapter->shared.mac.ops.led_off !=
3307 e1000_null_ops_generic) {
3308 cap_led->mcl_modes |= MAC_LED_OFF;
3309 }
3310
3311 if (Adapter->shared.mac.ops.led_on != NULL &&
3312 Adapter->shared.mac.ops.led_on !=
3313 e1000_null_ops_generic) {
3314 cap_led->mcl_modes |= MAC_LED_ON;
3315 }
3316
3317 /*
3318 * Some hardware doesn't support blinking natively as they're
3319 * missing the optional blink circuit. If they have both off and
3320 * on then we'll emulate it ourselves.
3321 */
3322 if (((cap_led->mcl_modes & MAC_LED_IDENT) == 0) &&
3323 ((cap_led->mcl_modes & MAC_LED_OFF) != 0) &&
3324 ((cap_led->mcl_modes & MAC_LED_ON) != 0)) {
3325 cap_led->mcl_modes |= MAC_LED_IDENT;
3326 Adapter->e1000g_emul_blink = B_TRUE;
3327 }
3328
3329 cap_led->mcl_set = e1000g_led_set;
3330 break;
3331 }
3332 default:
3333 return (B_FALSE);
3334 }
3335 return (B_TRUE);
3336 }
3337
3338 static boolean_t
3339 e1000g_param_locked(mac_prop_id_t pr_num)
3340 {
3341 /*
3342 * All en_* parameters are locked (read-only) while
3343 * the device is in any sort of loopback mode ...
3344 */
3345 switch (pr_num) {
3346 case MAC_PROP_EN_1000FDX_CAP:
3347 case MAC_PROP_EN_1000HDX_CAP:
3348 case MAC_PROP_EN_100FDX_CAP:
3349 case MAC_PROP_EN_100HDX_CAP:
3350 case MAC_PROP_EN_10FDX_CAP:
3351 case MAC_PROP_EN_10HDX_CAP:
3352 case MAC_PROP_AUTONEG:
3353 case MAC_PROP_FLOWCTRL:
3354 return (B_TRUE);
3355 }
3356 return (B_FALSE);
3357 }
3358
3359 /*
3360 * callback function for set/get of properties
3361 */
3362 static int
3363 e1000g_m_setprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3364 uint_t pr_valsize, const void *pr_val)
3365 {
3366 struct e1000g *Adapter = arg;
3367 struct e1000_hw *hw = &Adapter->shared;
3368 struct e1000_fc_info *fc = &Adapter->shared.fc;
3369 int err = 0;
3370 link_flowctrl_t flowctrl;
3371 uint32_t cur_mtu, new_mtu;
3372
3373 rw_enter(&Adapter->chip_lock, RW_WRITER);
3374
3375 if (Adapter->e1000g_state & E1000G_SUSPENDED) {
3376 rw_exit(&Adapter->chip_lock);
3377 return (ECANCELED);
3378 }
3379
3380 if (Adapter->loopback_mode != E1000G_LB_NONE &&
3381 e1000g_param_locked(pr_num)) {
3382 /*
3383 * All en_* parameters are locked (read-only)
3384 * while the device is in any sort of loopback mode.
3385 */
3386 rw_exit(&Adapter->chip_lock);
3387 return (EBUSY);
3388 }
3389
3390 switch (pr_num) {
3391 case MAC_PROP_EN_1000FDX_CAP:
3392 if (hw->phy.media_type != e1000_media_type_copper) {
3393 err = ENOTSUP;
3394 break;
3395 }
3396 Adapter->param_en_1000fdx = *(uint8_t *)pr_val;
3397 Adapter->param_adv_1000fdx = *(uint8_t *)pr_val;
3398 goto reset;
3399 case MAC_PROP_EN_100FDX_CAP:
3400 if (hw->phy.media_type != e1000_media_type_copper) {
3401 err = ENOTSUP;
3402 break;
3403 }
3404 Adapter->param_en_100fdx = *(uint8_t *)pr_val;
3405 Adapter->param_adv_100fdx = *(uint8_t *)pr_val;
3406 goto reset;
3407 case MAC_PROP_EN_100HDX_CAP:
3408 if (hw->phy.media_type != e1000_media_type_copper) {
3409 err = ENOTSUP;
3410 break;
3411 }
3412 Adapter->param_en_100hdx = *(uint8_t *)pr_val;
3413 Adapter->param_adv_100hdx = *(uint8_t *)pr_val;
3414 goto reset;
3415 case MAC_PROP_EN_10FDX_CAP:
3416 if (hw->phy.media_type != e1000_media_type_copper) {
3417 err = ENOTSUP;
3418 break;
3419 }
3420 Adapter->param_en_10fdx = *(uint8_t *)pr_val;
3421 Adapter->param_adv_10fdx = *(uint8_t *)pr_val;
3422 goto reset;
3423 case MAC_PROP_EN_10HDX_CAP:
3424 if (hw->phy.media_type != e1000_media_type_copper) {
3425 err = ENOTSUP;
3426 break;
3427 }
3428 Adapter->param_en_10hdx = *(uint8_t *)pr_val;
3429 Adapter->param_adv_10hdx = *(uint8_t *)pr_val;
3430 goto reset;
3431 case MAC_PROP_AUTONEG:
3432 if (hw->phy.media_type != e1000_media_type_copper) {
3433 err = ENOTSUP;
3434 break;
3435 }
3436 Adapter->param_adv_autoneg = *(uint8_t *)pr_val;
3437 goto reset;
3438 case MAC_PROP_FLOWCTRL:
3439 fc->send_xon = B_TRUE;
3440 bcopy(pr_val, &flowctrl, sizeof (flowctrl));
3441
3442 switch (flowctrl) {
3443 default:
3444 err = EINVAL;
3445 break;
3446 case LINK_FLOWCTRL_NONE:
3447 fc->requested_mode = e1000_fc_none;
3448 break;
3449 case LINK_FLOWCTRL_RX:
3450 fc->requested_mode = e1000_fc_rx_pause;
3451 break;
3452 case LINK_FLOWCTRL_TX:
3453 fc->requested_mode = e1000_fc_tx_pause;
3454 break;
3455 case LINK_FLOWCTRL_BI:
3456 fc->requested_mode = e1000_fc_full;
3457 break;
3458 }
3459 reset:
3460 if (err == 0) {
3461 /* check PCH limits & reset the link */
3462 e1000g_pch_limits(Adapter);
3463 if (e1000g_reset_link(Adapter) != DDI_SUCCESS)
3464 err = EINVAL;
3465 }
3466 break;
3467 case MAC_PROP_ADV_1000FDX_CAP:
3468 case MAC_PROP_ADV_1000HDX_CAP:
3469 case MAC_PROP_ADV_100FDX_CAP:
3470 case MAC_PROP_ADV_100HDX_CAP:
3471 case MAC_PROP_ADV_10FDX_CAP:
3472 case MAC_PROP_ADV_10HDX_CAP:
3473 case MAC_PROP_EN_1000HDX_CAP:
3474 case MAC_PROP_STATUS:
3475 case MAC_PROP_SPEED:
3476 case MAC_PROP_DUPLEX:
3477 err = ENOTSUP; /* read-only prop. Can't set this. */
3478 break;
3479 case MAC_PROP_MTU:
3480 /* adapter must be stopped for an MTU change */
3481 if (Adapter->e1000g_state & E1000G_STARTED) {
3482 err = EBUSY;
3483 break;
3484 }
3485
3486 cur_mtu = Adapter->default_mtu;
3487
3488 /* get new requested MTU */
3489 bcopy(pr_val, &new_mtu, sizeof (new_mtu));
3490 if (new_mtu == cur_mtu) {
3491 err = 0;
3492 break;
3493 }
3494
3495 if ((new_mtu < DEFAULT_MTU) ||
3496 (new_mtu > Adapter->max_mtu)) {
3497 err = EINVAL;
3498 break;
3499 }
3500
3501 /* inform MAC framework of new MTU */
3502 err = mac_maxsdu_update(Adapter->mh, new_mtu);
3503
3504 if (err == 0) {
3505 Adapter->default_mtu = new_mtu;
3506 Adapter->max_frame_size =
3507 e1000g_mtu2maxframe(new_mtu);
3508
3509 /*
3510 * check PCH limits & set buffer sizes to
3511 * match new MTU
3512 */
3513 e1000g_pch_limits(Adapter);
3514 e1000g_set_bufsize(Adapter);
3515
3516 /*
3517 * decrease the number of descriptors and free
3518 * packets for jumbo frames to reduce tx/rx
3519 * resource consumption
3520 */
3521 if (Adapter->max_frame_size >=
3522 (FRAME_SIZE_UPTO_4K)) {
3523 if (Adapter->tx_desc_num_flag == 0)
3524 Adapter->tx_desc_num =
3525 DEFAULT_JUMBO_NUM_TX_DESC;
3526
3527 if (Adapter->rx_desc_num_flag == 0)
3528 Adapter->rx_desc_num =
3529 DEFAULT_JUMBO_NUM_RX_DESC;
3530
3531 if (Adapter->tx_buf_num_flag == 0)
3532 Adapter->tx_freelist_num =
3533 DEFAULT_JUMBO_NUM_TX_BUF;
3534
3535 if (Adapter->rx_buf_num_flag == 0)
3536 Adapter->rx_freelist_limit =
3537 DEFAULT_JUMBO_NUM_RX_BUF;
3538 } else {
3539 if (Adapter->tx_desc_num_flag == 0)
3540 Adapter->tx_desc_num =
3541 DEFAULT_NUM_TX_DESCRIPTOR;
3542
3543 if (Adapter->rx_desc_num_flag == 0)
3544 Adapter->rx_desc_num =
3545 DEFAULT_NUM_RX_DESCRIPTOR;
3546
3547 if (Adapter->tx_buf_num_flag == 0)
3548 Adapter->tx_freelist_num =
3549 DEFAULT_NUM_TX_FREELIST;
3550
3551 if (Adapter->rx_buf_num_flag == 0)
3552 Adapter->rx_freelist_limit =
3553 DEFAULT_NUM_RX_FREELIST;
3554 }
3555 }
3556 break;
3557 case MAC_PROP_PRIVATE:
3558 err = e1000g_set_priv_prop(Adapter, pr_name,
3559 pr_valsize, pr_val);
3560 break;
3561 default:
3562 err = ENOTSUP;
3563 break;
3564 }
3565 rw_exit(&Adapter->chip_lock);
3566 return (err);
3567 }
3568
3569 static int
3570 e1000g_m_getprop(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3571 uint_t pr_valsize, void *pr_val)
3572 {
3573 struct e1000g *Adapter = arg;
3574 struct e1000_fc_info *fc = &Adapter->shared.fc;
3575 int err = 0;
3576 link_flowctrl_t flowctrl;
3577 uint64_t tmp = 0;
3578
3579 switch (pr_num) {
3580 case MAC_PROP_DUPLEX:
3581 ASSERT(pr_valsize >= sizeof (link_duplex_t));
3582 bcopy(&Adapter->link_duplex, pr_val,
3583 sizeof (link_duplex_t));
3584 break;
3585 case MAC_PROP_SPEED:
3586 ASSERT(pr_valsize >= sizeof (uint64_t));
3587 tmp = Adapter->link_speed * 1000000ull;
3588 bcopy(&tmp, pr_val, sizeof (tmp));
3589 break;
3590 case MAC_PROP_AUTONEG:
3591 *(uint8_t *)pr_val = Adapter->param_adv_autoneg;
3592 break;
3593 case MAC_PROP_FLOWCTRL:
3594 ASSERT(pr_valsize >= sizeof (link_flowctrl_t));
3595 switch (fc->current_mode) {
3596 case e1000_fc_none:
3597 flowctrl = LINK_FLOWCTRL_NONE;
3598 break;
3599 case e1000_fc_rx_pause:
3600 flowctrl = LINK_FLOWCTRL_RX;
3601 break;
3602 case e1000_fc_tx_pause:
3603 flowctrl = LINK_FLOWCTRL_TX;
3604 break;
3605 case e1000_fc_full:
3606 flowctrl = LINK_FLOWCTRL_BI;
3607 break;
3608 }
3609 bcopy(&flowctrl, pr_val, sizeof (flowctrl));
3610 break;
3611 case MAC_PROP_ADV_1000FDX_CAP:
3612 *(uint8_t *)pr_val = Adapter->param_adv_1000fdx;
3613 break;
3614 case MAC_PROP_EN_1000FDX_CAP:
3615 *(uint8_t *)pr_val = Adapter->param_en_1000fdx;
3616 break;
3617 case MAC_PROP_ADV_1000HDX_CAP:
3618 *(uint8_t *)pr_val = Adapter->param_adv_1000hdx;
3619 break;
3620 case MAC_PROP_EN_1000HDX_CAP:
3621 *(uint8_t *)pr_val = Adapter->param_en_1000hdx;
3622 break;
3623 case MAC_PROP_ADV_100FDX_CAP:
3624 *(uint8_t *)pr_val = Adapter->param_adv_100fdx;
3625 break;
3626 case MAC_PROP_EN_100FDX_CAP:
3627 *(uint8_t *)pr_val = Adapter->param_en_100fdx;
3628 break;
3629 case MAC_PROP_ADV_100HDX_CAP:
3630 *(uint8_t *)pr_val = Adapter->param_adv_100hdx;
3631 break;
3632 case MAC_PROP_EN_100HDX_CAP:
3633 *(uint8_t *)pr_val = Adapter->param_en_100hdx;
3634 break;
3635 case MAC_PROP_ADV_10FDX_CAP:
3636 *(uint8_t *)pr_val = Adapter->param_adv_10fdx;
3637 break;
3638 case MAC_PROP_EN_10FDX_CAP:
3639 *(uint8_t *)pr_val = Adapter->param_en_10fdx;
3640 break;
3641 case MAC_PROP_ADV_10HDX_CAP:
3642 *(uint8_t *)pr_val = Adapter->param_adv_10hdx;
3643 break;
3644 case MAC_PROP_EN_10HDX_CAP:
3645 *(uint8_t *)pr_val = Adapter->param_en_10hdx;
3646 break;
3647 case MAC_PROP_ADV_100T4_CAP:
3648 case MAC_PROP_EN_100T4_CAP:
3649 *(uint8_t *)pr_val = Adapter->param_adv_100t4;
3650 break;
3651 case MAC_PROP_PRIVATE:
3652 err = e1000g_get_priv_prop(Adapter, pr_name,
3653 pr_valsize, pr_val);
3654 break;
3655 default:
3656 err = ENOTSUP;
3657 break;
3658 }
3659
3660 return (err);
3661 }
3662
3663 static void
3664 e1000g_m_propinfo(void *arg, const char *pr_name, mac_prop_id_t pr_num,
3665 mac_prop_info_handle_t prh)
3666 {
3667 struct e1000g *Adapter = arg;
3668 struct e1000_hw *hw = &Adapter->shared;
3669
3670 switch (pr_num) {
3671 case MAC_PROP_DUPLEX:
3672 case MAC_PROP_SPEED:
3673 case MAC_PROP_ADV_1000FDX_CAP:
3674 case MAC_PROP_ADV_1000HDX_CAP:
3675 case MAC_PROP_ADV_100FDX_CAP:
3676 case MAC_PROP_ADV_100HDX_CAP:
3677 case MAC_PROP_ADV_10FDX_CAP:
3678 case MAC_PROP_ADV_10HDX_CAP:
3679 case MAC_PROP_ADV_100T4_CAP:
3680 case MAC_PROP_EN_100T4_CAP:
3681 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3682 break;
3683
3684 case MAC_PROP_EN_1000FDX_CAP:
3685 if (hw->phy.media_type != e1000_media_type_copper) {
3686 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3687 } else {
3688 mac_prop_info_set_default_uint8(prh,
3689 ((Adapter->phy_ext_status &
3690 IEEE_ESR_1000T_FD_CAPS) ||
3691 (Adapter->phy_ext_status &
3692 IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0);
3693 }
3694 break;
3695
3696 case MAC_PROP_EN_100FDX_CAP:
3697 if (hw->phy.media_type != e1000_media_type_copper) {
3698 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3699 } else {
3700 mac_prop_info_set_default_uint8(prh,
3701 ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
3702 (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
3703 ? 1 : 0);
3704 }
3705 break;
3706
3707 case MAC_PROP_EN_100HDX_CAP:
3708 if (hw->phy.media_type != e1000_media_type_copper) {
3709 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3710 } else {
3711 mac_prop_info_set_default_uint8(prh,
3712 ((Adapter->phy_status & MII_SR_100X_HD_CAPS) ||
3713 (Adapter->phy_status & MII_SR_100T2_HD_CAPS))
3714 ? 1 : 0);
3715 }
3716 break;
3717
3718 case MAC_PROP_EN_10FDX_CAP:
3719 if (hw->phy.media_type != e1000_media_type_copper) {
3720 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3721 } else {
3722 mac_prop_info_set_default_uint8(prh,
3723 (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0);
3724 }
3725 break;
3726
3727 case MAC_PROP_EN_10HDX_CAP:
3728 if (hw->phy.media_type != e1000_media_type_copper) {
3729 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3730 } else {
3731 mac_prop_info_set_default_uint8(prh,
3732 (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0);
3733 }
3734 break;
3735
3736 case MAC_PROP_EN_1000HDX_CAP:
3737 if (hw->phy.media_type != e1000_media_type_copper)
3738 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3739 break;
3740
3741 case MAC_PROP_AUTONEG:
3742 if (hw->phy.media_type != e1000_media_type_copper) {
3743 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3744 } else {
3745 mac_prop_info_set_default_uint8(prh,
3746 (Adapter->phy_status & MII_SR_AUTONEG_CAPS)
3747 ? 1 : 0);
3748 }
3749 break;
3750
3751 case MAC_PROP_FLOWCTRL:
3752 mac_prop_info_set_default_link_flowctrl(prh, LINK_FLOWCTRL_BI);
3753 break;
3754
3755 case MAC_PROP_MTU: {
3756 struct e1000_mac_info *mac = &Adapter->shared.mac;
3757 struct e1000_phy_info *phy = &Adapter->shared.phy;
3758 uint32_t max;
3759
3760 /* some MAC types do not support jumbo frames */
3761 if ((mac->type == e1000_ich8lan) ||
3762 ((mac->type == e1000_ich9lan) && (phy->type ==
3763 e1000_phy_ife))) {
3764 max = DEFAULT_MTU;
3765 } else {
3766 max = Adapter->max_mtu;
3767 }
3768
3769 mac_prop_info_set_range_uint32(prh, DEFAULT_MTU, max);
3770 break;
3771 }
3772 case MAC_PROP_PRIVATE: {
3773 char valstr[64];
3774 int value;
3775
3776 if (strcmp(pr_name, "_adv_pause_cap") == 0 ||
3777 strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
3778 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
3779 return;
3780 } else if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
3781 value = DEFAULT_TX_BCOPY_THRESHOLD;
3782 } else if (strcmp(pr_name, "_tx_interrupt_enable") == 0) {
3783 value = DEFAULT_TX_INTR_ENABLE;
3784 } else if (strcmp(pr_name, "_tx_intr_delay") == 0) {
3785 value = DEFAULT_TX_INTR_DELAY;
3786 } else if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) {
3787 value = DEFAULT_TX_INTR_ABS_DELAY;
3788 } else if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
3789 value = DEFAULT_RX_BCOPY_THRESHOLD;
3790 } else if (strcmp(pr_name, "_max_num_rcv_packets") == 0) {
3791 value = DEFAULT_RX_LIMIT_ON_INTR;
3792 } else if (strcmp(pr_name, "_rx_intr_delay") == 0) {
3793 value = DEFAULT_RX_INTR_DELAY;
3794 } else if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) {
3795 value = DEFAULT_RX_INTR_ABS_DELAY;
3796 } else if (strcmp(pr_name, "_intr_throttling_rate") == 0) {
3797 value = DEFAULT_INTR_THROTTLING;
3798 } else if (strcmp(pr_name, "_intr_adaptive") == 0) {
3799 value = 1;
3800 } else {
3801 return;
3802 }
3803
3804 (void) snprintf(valstr, sizeof (valstr), "%d", value);
3805 mac_prop_info_set_default_str(prh, valstr);
3806 break;
3807 }
3808 }
3809 }
3810
3811 /* ARGSUSED2 */
3812 static int
3813 e1000g_set_priv_prop(struct e1000g *Adapter, const char *pr_name,
3814 uint_t pr_valsize, const void *pr_val)
3815 {
3816 int err = 0;
3817 long result;
3818 struct e1000_hw *hw = &Adapter->shared;
3819
3820 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
3821 if (pr_val == NULL) {
3822 err = EINVAL;
3823 return (err);
3824 }
3825 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3826 if (result < MIN_TX_BCOPY_THRESHOLD ||
3827 result > MAX_TX_BCOPY_THRESHOLD)
3828 err = EINVAL;
3829 else {
3830 Adapter->tx_bcopy_thresh = (uint32_t)result;
3831 }
3832 return (err);
3833 }
3834 if (strcmp(pr_name, "_tx_interrupt_enable") == 0) {
3835 if (pr_val == NULL) {
3836 err = EINVAL;
3837 return (err);
3838 }
3839 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3840 if (result < 0 || result > 1)
3841 err = EINVAL;
3842 else {
3843 Adapter->tx_intr_enable = (result == 1) ?
3844 B_TRUE: B_FALSE;
3845 if (Adapter->tx_intr_enable)
3846 e1000g_mask_tx_interrupt(Adapter);
3847 else
3848 e1000g_clear_tx_interrupt(Adapter);
3849 if (e1000g_check_acc_handle(
3850 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3851 ddi_fm_service_impact(Adapter->dip,
3852 DDI_SERVICE_DEGRADED);
3853 err = EIO;
3854 }
3855 }
3856 return (err);
3857 }
3858 if (strcmp(pr_name, "_tx_intr_delay") == 0) {
3859 if (pr_val == NULL) {
3860 err = EINVAL;
3861 return (err);
3862 }
3863 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3864 if (result < MIN_TX_INTR_DELAY ||
3865 result > MAX_TX_INTR_DELAY)
3866 err = EINVAL;
3867 else {
3868 Adapter->tx_intr_delay = (uint32_t)result;
3869 E1000_WRITE_REG(hw, E1000_TIDV, Adapter->tx_intr_delay);
3870 if (e1000g_check_acc_handle(
3871 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3872 ddi_fm_service_impact(Adapter->dip,
3873 DDI_SERVICE_DEGRADED);
3874 err = EIO;
3875 }
3876 }
3877 return (err);
3878 }
3879 if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) {
3880 if (pr_val == NULL) {
3881 err = EINVAL;
3882 return (err);
3883 }
3884 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3885 if (result < MIN_TX_INTR_ABS_DELAY ||
3886 result > MAX_TX_INTR_ABS_DELAY)
3887 err = EINVAL;
3888 else {
3889 Adapter->tx_intr_abs_delay = (uint32_t)result;
3890 E1000_WRITE_REG(hw, E1000_TADV,
3891 Adapter->tx_intr_abs_delay);
3892 if (e1000g_check_acc_handle(
3893 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3894 ddi_fm_service_impact(Adapter->dip,
3895 DDI_SERVICE_DEGRADED);
3896 err = EIO;
3897 }
3898 }
3899 return (err);
3900 }
3901 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
3902 if (pr_val == NULL) {
3903 err = EINVAL;
3904 return (err);
3905 }
3906 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3907 if (result < MIN_RX_BCOPY_THRESHOLD ||
3908 result > MAX_RX_BCOPY_THRESHOLD)
3909 err = EINVAL;
3910 else
3911 Adapter->rx_bcopy_thresh = (uint32_t)result;
3912 return (err);
3913 }
3914 if (strcmp(pr_name, "_max_num_rcv_packets") == 0) {
3915 if (pr_val == NULL) {
3916 err = EINVAL;
3917 return (err);
3918 }
3919 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3920 if (result < MIN_RX_LIMIT_ON_INTR ||
3921 result > MAX_RX_LIMIT_ON_INTR)
3922 err = EINVAL;
3923 else
3924 Adapter->rx_limit_onintr = (uint32_t)result;
3925 return (err);
3926 }
3927 if (strcmp(pr_name, "_rx_intr_delay") == 0) {
3928 if (pr_val == NULL) {
3929 err = EINVAL;
3930 return (err);
3931 }
3932 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3933 if (result < MIN_RX_INTR_DELAY ||
3934 result > MAX_RX_INTR_DELAY)
3935 err = EINVAL;
3936 else {
3937 Adapter->rx_intr_delay = (uint32_t)result;
3938 E1000_WRITE_REG(hw, E1000_RDTR, Adapter->rx_intr_delay);
3939 if (e1000g_check_acc_handle(
3940 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3941 ddi_fm_service_impact(Adapter->dip,
3942 DDI_SERVICE_DEGRADED);
3943 err = EIO;
3944 }
3945 }
3946 return (err);
3947 }
3948 if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) {
3949 if (pr_val == NULL) {
3950 err = EINVAL;
3951 return (err);
3952 }
3953 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3954 if (result < MIN_RX_INTR_ABS_DELAY ||
3955 result > MAX_RX_INTR_ABS_DELAY)
3956 err = EINVAL;
3957 else {
3958 Adapter->rx_intr_abs_delay = (uint32_t)result;
3959 E1000_WRITE_REG(hw, E1000_RADV,
3960 Adapter->rx_intr_abs_delay);
3961 if (e1000g_check_acc_handle(
3962 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3963 ddi_fm_service_impact(Adapter->dip,
3964 DDI_SERVICE_DEGRADED);
3965 err = EIO;
3966 }
3967 }
3968 return (err);
3969 }
3970 if (strcmp(pr_name, "_intr_throttling_rate") == 0) {
3971 if (pr_val == NULL) {
3972 err = EINVAL;
3973 return (err);
3974 }
3975 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
3976 if (result < MIN_INTR_THROTTLING ||
3977 result > MAX_INTR_THROTTLING)
3978 err = EINVAL;
3979 else {
3980 if (hw->mac.type >= e1000_82540) {
3981 Adapter->intr_throttling_rate =
3982 (uint32_t)result;
3983 E1000_WRITE_REG(hw, E1000_ITR,
3984 Adapter->intr_throttling_rate);
3985 if (e1000g_check_acc_handle(
3986 Adapter->osdep.reg_handle) != DDI_FM_OK) {
3987 ddi_fm_service_impact(Adapter->dip,
3988 DDI_SERVICE_DEGRADED);
3989 err = EIO;
3990 }
3991 } else
3992 err = EINVAL;
3993 }
3994 return (err);
3995 }
3996 if (strcmp(pr_name, "_intr_adaptive") == 0) {
3997 if (pr_val == NULL) {
3998 err = EINVAL;
3999 return (err);
4000 }
4001 (void) ddi_strtol(pr_val, (char **)NULL, 0, &result);
4002 if (result < 0 || result > 1)
4003 err = EINVAL;
4004 else {
4005 if (hw->mac.type >= e1000_82540) {
4006 Adapter->intr_adaptive = (result == 1) ?
4007 B_TRUE : B_FALSE;
4008 } else {
4009 err = EINVAL;
4010 }
4011 }
4012 return (err);
4013 }
4014 return (ENOTSUP);
4015 }
4016
4017 static int
4018 e1000g_get_priv_prop(struct e1000g *Adapter, const char *pr_name,
4019 uint_t pr_valsize, void *pr_val)
4020 {
4021 int err = ENOTSUP;
4022 int value;
4023
4024 if (strcmp(pr_name, "_adv_pause_cap") == 0) {
4025 value = Adapter->param_adv_pause;
4026 err = 0;
4027 goto done;
4028 }
4029 if (strcmp(pr_name, "_adv_asym_pause_cap") == 0) {
4030 value = Adapter->param_adv_asym_pause;
4031 err = 0;
4032 goto done;
4033 }
4034 if (strcmp(pr_name, "_tx_bcopy_threshold") == 0) {
4035 value = Adapter->tx_bcopy_thresh;
4036 err = 0;
4037 goto done;
4038 }
4039 if (strcmp(pr_name, "_tx_interrupt_enable") == 0) {
4040 value = Adapter->tx_intr_enable;
4041 err = 0;
4042 goto done;
4043 }
4044 if (strcmp(pr_name, "_tx_intr_delay") == 0) {
4045 value = Adapter->tx_intr_delay;
4046 err = 0;
4047 goto done;
4048 }
4049 if (strcmp(pr_name, "_tx_intr_abs_delay") == 0) {
4050 value = Adapter->tx_intr_abs_delay;
4051 err = 0;
4052 goto done;
4053 }
4054 if (strcmp(pr_name, "_rx_bcopy_threshold") == 0) {
4055 value = Adapter->rx_bcopy_thresh;
4056 err = 0;
4057 goto done;
4058 }
4059 if (strcmp(pr_name, "_max_num_rcv_packets") == 0) {
4060 value = Adapter->rx_limit_onintr;
4061 err = 0;
4062 goto done;
4063 }
4064 if (strcmp(pr_name, "_rx_intr_delay") == 0) {
4065 value = Adapter->rx_intr_delay;
4066 err = 0;
4067 goto done;
4068 }
4069 if (strcmp(pr_name, "_rx_intr_abs_delay") == 0) {
4070 value = Adapter->rx_intr_abs_delay;
4071 err = 0;
4072 goto done;
4073 }
4074 if (strcmp(pr_name, "_intr_throttling_rate") == 0) {
4075 value = Adapter->intr_throttling_rate;
4076 err = 0;
4077 goto done;
4078 }
4079 if (strcmp(pr_name, "_intr_adaptive") == 0) {
4080 value = Adapter->intr_adaptive;
4081 err = 0;
4082 goto done;
4083 }
4084 done:
4085 if (err == 0) {
4086 (void) snprintf(pr_val, pr_valsize, "%d", value);
4087 }
4088 return (err);
4089 }
4090
4091 /*
4092 * e1000g_get_conf - get configurations set in e1000g.conf
4093 * This routine gets user-configured values out of the configuration
4094 * file e1000g.conf.
4095 *
4096 * For each configurable value, there is a minimum, a maximum, and a
4097 * default.
4098 * If user does not configure a value, use the default.
4099 * If user configures below the minimum, use the minumum.
4100 * If user configures above the maximum, use the maxumum.
4101 */
4102 static void
4103 e1000g_get_conf(struct e1000g *Adapter)
4104 {
4105 struct e1000_hw *hw = &Adapter->shared;
4106 boolean_t tbi_compatibility = B_FALSE;
4107 boolean_t is_jumbo = B_FALSE;
4108 int propval;
4109 /*
4110 * decrease the number of descriptors and free packets
4111 * for jumbo frames to reduce tx/rx resource consumption
4112 */
4113 if (Adapter->max_frame_size >= FRAME_SIZE_UPTO_4K) {
4114 is_jumbo = B_TRUE;
4115 }
4116
4117 /*
4118 * get each configurable property from e1000g.conf
4119 */
4120
4121 /*
4122 * NumTxDescriptors
4123 */
4124 Adapter->tx_desc_num_flag =
4125 e1000g_get_prop(Adapter, "NumTxDescriptors",
4126 MIN_NUM_TX_DESCRIPTOR, MAX_NUM_TX_DESCRIPTOR,
4127 is_jumbo ? DEFAULT_JUMBO_NUM_TX_DESC
4128 : DEFAULT_NUM_TX_DESCRIPTOR, &propval);
4129 Adapter->tx_desc_num = propval;
4130
4131 /*
4132 * NumRxDescriptors
4133 */
4134 Adapter->rx_desc_num_flag =
4135 e1000g_get_prop(Adapter, "NumRxDescriptors",
4136 MIN_NUM_RX_DESCRIPTOR, MAX_NUM_RX_DESCRIPTOR,
4137 is_jumbo ? DEFAULT_JUMBO_NUM_RX_DESC
4138 : DEFAULT_NUM_RX_DESCRIPTOR, &propval);
4139 Adapter->rx_desc_num = propval;
4140
4141 /*
4142 * NumRxFreeList
4143 */
4144 Adapter->rx_buf_num_flag =
4145 e1000g_get_prop(Adapter, "NumRxFreeList",
4146 MIN_NUM_RX_FREELIST, MAX_NUM_RX_FREELIST,
4147 is_jumbo ? DEFAULT_JUMBO_NUM_RX_BUF
4148 : DEFAULT_NUM_RX_FREELIST, &propval);
4149 Adapter->rx_freelist_limit = propval;
4150
4151 /*
4152 * NumTxPacketList
4153 */
4154 Adapter->tx_buf_num_flag =
4155 e1000g_get_prop(Adapter, "NumTxPacketList",
4156 MIN_NUM_TX_FREELIST, MAX_NUM_TX_FREELIST,
4157 is_jumbo ? DEFAULT_JUMBO_NUM_TX_BUF
4158 : DEFAULT_NUM_TX_FREELIST, &propval);
4159 Adapter->tx_freelist_num = propval;
4160
4161 /*
4162 * FlowControl
4163 */
4164 hw->fc.send_xon = B_TRUE;
4165 (void) e1000g_get_prop(Adapter, "FlowControl",
4166 e1000_fc_none, 4, DEFAULT_FLOW_CONTROL, &propval);
4167 hw->fc.requested_mode = propval;
4168 /* 4 is the setting that says "let the eeprom decide" */
4169 if (hw->fc.requested_mode == 4)
4170 hw->fc.requested_mode = e1000_fc_default;
4171
4172 /*
4173 * Max Num Receive Packets on Interrupt
4174 */
4175 (void) e1000g_get_prop(Adapter, "MaxNumReceivePackets",
4176 MIN_RX_LIMIT_ON_INTR, MAX_RX_LIMIT_ON_INTR,
4177 DEFAULT_RX_LIMIT_ON_INTR, &propval);
4178 Adapter->rx_limit_onintr = propval;
4179
4180 /*
4181 * PHY master slave setting
4182 */
4183 (void) e1000g_get_prop(Adapter, "SetMasterSlave",
4184 e1000_ms_hw_default, e1000_ms_auto,
4185 e1000_ms_hw_default, &propval);
4186 hw->phy.ms_type = propval;
4187
4188 /*
4189 * Parameter which controls TBI mode workaround, which is only
4190 * needed on certain switches such as Cisco 6500/Foundry
4191 */
4192 (void) e1000g_get_prop(Adapter, "TbiCompatibilityEnable",
4193 0, 1, DEFAULT_TBI_COMPAT_ENABLE, &propval);
4194 tbi_compatibility = (propval == 1);
4195 e1000_set_tbi_compatibility_82543(hw, tbi_compatibility);
4196
4197 /*
4198 * MSI Enable
4199 */
4200 (void) e1000g_get_prop(Adapter, "MSIEnable",
4201 0, 1, DEFAULT_MSI_ENABLE, &propval);
4202 Adapter->msi_enable = (propval == 1);
4203
4204 /*
4205 * Interrupt Throttling Rate
4206 */
4207 (void) e1000g_get_prop(Adapter, "intr_throttling_rate",
4208 MIN_INTR_THROTTLING, MAX_INTR_THROTTLING,
4209 DEFAULT_INTR_THROTTLING, &propval);
4210 Adapter->intr_throttling_rate = propval;
4211
4212 /*
4213 * Adaptive Interrupt Blanking Enable/Disable
4214 * It is enabled by default
4215 */
4216 (void) e1000g_get_prop(Adapter, "intr_adaptive", 0, 1, 1,
4217 &propval);
4218 Adapter->intr_adaptive = (propval == 1);
4219
4220 /*
4221 * Hardware checksum enable/disable parameter
4222 */
4223 (void) e1000g_get_prop(Adapter, "tx_hcksum_enable",
4224 0, 1, DEFAULT_TX_HCKSUM_ENABLE, &propval);
4225 Adapter->tx_hcksum_enable = (propval == 1);
4226 /*
4227 * Checksum on/off selection via global parameters.
4228 *
4229 * If the chip is flagged as not capable of (correctly)
4230 * handling checksumming, we don't enable it on either
4231 * Rx or Tx side. Otherwise, we take this chip's settings
4232 * from the patchable global defaults.
4233 *
4234 * We advertise our capabilities only if TX offload is
4235 * enabled. On receive, the stack will accept checksummed
4236 * packets anyway, even if we haven't said we can deliver
4237 * them.
4238 */
4239 switch (hw->mac.type) {
4240 case e1000_82540:
4241 case e1000_82544:
4242 case e1000_82545:
4243 case e1000_82545_rev_3:
4244 case e1000_82546:
4245 case e1000_82546_rev_3:
4246 case e1000_82571:
4247 case e1000_82572:
4248 case e1000_82573:
4249 case e1000_80003es2lan:
4250 break;
4251 /*
4252 * For the following Intel PRO/1000 chipsets, we have not
4253 * tested the hardware checksum offload capability, so we
4254 * disable the capability for them.
4255 * e1000_82542,
4256 * e1000_82543,
4257 * e1000_82541,
4258 * e1000_82541_rev_2,
4259 * e1000_82547,
4260 * e1000_82547_rev_2,
4261 */
4262 default:
4263 Adapter->tx_hcksum_enable = B_FALSE;
4264 }
4265
4266 /*
4267 * Large Send Offloading(LSO) Enable/Disable
4268 * If the tx hardware checksum is not enabled, LSO should be
4269 * disabled.
4270 */
4271 (void) e1000g_get_prop(Adapter, "lso_enable",
4272 0, 1, DEFAULT_LSO_ENABLE, &propval);
4273 Adapter->lso_enable = (propval == 1);
4274
4275 switch (hw->mac.type) {
4276 case e1000_82546:
4277 case e1000_82546_rev_3:
4278 if (Adapter->lso_enable)
4279 Adapter->lso_premature_issue = B_TRUE;
4280 /* FALLTHRU */
4281 case e1000_82571:
4282 case e1000_82572:
4283 case e1000_82573:
4284 case e1000_80003es2lan:
4285 break;
4286 default:
4287 Adapter->lso_enable = B_FALSE;
4288 }
4289
4290 if (!Adapter->tx_hcksum_enable) {
4291 Adapter->lso_premature_issue = B_FALSE;
4292 Adapter->lso_enable = B_FALSE;
4293 }
4294
4295 /*
4296 * If mem_workaround_82546 is enabled, the rx buffer allocated by
4297 * e1000_82545, e1000_82546 and e1000_82546_rev_3
4298 * will not cross 64k boundary.
4299 */
4300 (void) e1000g_get_prop(Adapter, "mem_workaround_82546",
4301 0, 1, DEFAULT_MEM_WORKAROUND_82546, &propval);
4302 Adapter->mem_workaround_82546 = (propval == 1);
4303
4304 /*
4305 * Max number of multicast addresses
4306 */
4307 (void) e1000g_get_prop(Adapter, "mcast_max_num",
4308 MIN_MCAST_NUM, MAX_MCAST_NUM, hw->mac.mta_reg_count * 32,
4309 &propval);
4310 Adapter->mcast_max_num = propval;
4311 }
4312
4313 /*
4314 * e1000g_get_prop - routine to read properties
4315 *
4316 * Get a user-configure property value out of the configuration
4317 * file e1000g.conf.
4318 *
4319 * Caller provides name of the property, a default value, a minimum
4320 * value, a maximum value and a pointer to the returned property
4321 * value.
4322 *
4323 * Return B_TRUE if the configured value of the property is not a default
4324 * value, otherwise return B_FALSE.
4325 */
4326 static boolean_t
4327 e1000g_get_prop(struct e1000g *Adapter, /* point to per-adapter structure */
4328 char *propname, /* name of the property */
4329 int minval, /* minimum acceptable value */
4330 int maxval, /* maximim acceptable value */
4331 int defval, /* default value */
4332 int *propvalue) /* property value return to caller */
4333 {
4334 int propval; /* value returned for requested property */
4335 int *props; /* point to array of properties returned */
4336 uint_t nprops; /* number of property value returned */
4337 boolean_t ret = B_TRUE;
4338
4339 /*
4340 * get the array of properties from the config file
4341 */
4342 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, Adapter->dip,
4343 DDI_PROP_DONTPASS, propname, &props, &nprops) == DDI_PROP_SUCCESS) {
4344 /* got some properties, test if we got enough */
4345 if (Adapter->instance < nprops) {
4346 propval = props[Adapter->instance];
4347 } else {
4348 /* not enough properties configured */
4349 propval = defval;
4350 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
4351 "Not Enough %s values found in e1000g.conf"
4352 " - set to %d\n",
4353 propname, propval);
4354 ret = B_FALSE;
4355 }
4356
4357 /* free memory allocated for properties */
4358 ddi_prop_free(props);
4359
4360 } else {
4361 propval = defval;
4362 ret = B_FALSE;
4363 }
4364
4365 /*
4366 * enforce limits
4367 */
4368 if (propval > maxval) {
4369 propval = maxval;
4370 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
4371 "Too High %s value in e1000g.conf - set to %d\n",
4372 propname, propval);
4373 }
4374
4375 if (propval < minval) {
4376 propval = minval;
4377 E1000G_DEBUGLOG_2(Adapter, E1000G_INFO_LEVEL,
4378 "Too Low %s value in e1000g.conf - set to %d\n",
4379 propname, propval);
4380 }
4381
4382 *propvalue = propval;
4383 return (ret);
4384 }
4385
4386 static boolean_t
4387 e1000g_link_check(struct e1000g *Adapter)
4388 {
4389 uint16_t speed, duplex, phydata;
4390 boolean_t link_changed = B_FALSE;
4391 struct e1000_hw *hw;
4392 uint32_t reg_tarc;
4393
4394 hw = &Adapter->shared;
4395
4396 if (e1000g_link_up(Adapter)) {
4397 /*
4398 * The Link is up, check whether it was marked as down earlier
4399 */
4400 if (Adapter->link_state != LINK_STATE_UP) {
4401 (void) e1000_get_speed_and_duplex(hw, &speed, &duplex);
4402 Adapter->link_speed = speed;
4403 Adapter->link_duplex = duplex;
4404 Adapter->link_state = LINK_STATE_UP;
4405 link_changed = B_TRUE;
4406
4407 if (Adapter->link_speed == SPEED_1000)
4408 Adapter->stall_threshold = TX_STALL_TIME_2S;
4409 else
4410 Adapter->stall_threshold = TX_STALL_TIME_8S;
4411
4412 Adapter->tx_link_down_timeout = 0;
4413
4414 if ((hw->mac.type == e1000_82571) ||
4415 (hw->mac.type == e1000_82572)) {
4416 reg_tarc = E1000_READ_REG(hw, E1000_TARC(0));
4417 if (speed == SPEED_1000)
4418 reg_tarc |= (1 << 21);
4419 else
4420 reg_tarc &= ~(1 << 21);
4421 E1000_WRITE_REG(hw, E1000_TARC(0), reg_tarc);
4422 }
4423 }
4424 Adapter->smartspeed = 0;
4425 } else {
4426 if (Adapter->link_state != LINK_STATE_DOWN) {
4427 Adapter->link_speed = 0;
4428 Adapter->link_duplex = 0;
4429 Adapter->link_state = LINK_STATE_DOWN;
4430 link_changed = B_TRUE;
4431
4432 /*
4433 * SmartSpeed workaround for Tabor/TanaX, When the
4434 * driver loses link disable auto master/slave
4435 * resolution.
4436 */
4437 if (hw->phy.type == e1000_phy_igp) {
4438 (void) e1000_read_phy_reg(hw,
4439 PHY_1000T_CTRL, &phydata);
4440 phydata |= CR_1000T_MS_ENABLE;
4441 (void) e1000_write_phy_reg(hw,
4442 PHY_1000T_CTRL, phydata);
4443 }
4444 } else {
4445 e1000g_smartspeed(Adapter);
4446 }
4447
4448 if (Adapter->e1000g_state & E1000G_STARTED) {
4449 if (Adapter->tx_link_down_timeout <
4450 MAX_TX_LINK_DOWN_TIMEOUT) {
4451 Adapter->tx_link_down_timeout++;
4452 } else if (Adapter->tx_link_down_timeout ==
4453 MAX_TX_LINK_DOWN_TIMEOUT) {
4454 e1000g_tx_clean(Adapter);
4455 Adapter->tx_link_down_timeout++;
4456 }
4457 }
4458 }
4459
4460 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
4461 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
4462
4463 return (link_changed);
4464 }
4465
4466 /*
4467 * e1000g_reset_link - Using the link properties to setup the link
4468 */
4469 int
4470 e1000g_reset_link(struct e1000g *Adapter)
4471 {
4472 struct e1000_mac_info *mac;
4473 struct e1000_phy_info *phy;
4474 struct e1000_hw *hw;
4475 boolean_t invalid;
4476
4477 mac = &Adapter->shared.mac;
4478 phy = &Adapter->shared.phy;
4479 hw = &Adapter->shared;
4480 invalid = B_FALSE;
4481
4482 if (hw->phy.media_type != e1000_media_type_copper)
4483 goto out;
4484
4485 if (Adapter->param_adv_autoneg == 1) {
4486 mac->autoneg = B_TRUE;
4487 phy->autoneg_advertised = 0;
4488
4489 /*
4490 * 1000hdx is not supported for autonegotiation
4491 */
4492 if (Adapter->param_adv_1000fdx == 1)
4493 phy->autoneg_advertised |= ADVERTISE_1000_FULL;
4494
4495 if (Adapter->param_adv_100fdx == 1)
4496 phy->autoneg_advertised |= ADVERTISE_100_FULL;
4497
4498 if (Adapter->param_adv_100hdx == 1)
4499 phy->autoneg_advertised |= ADVERTISE_100_HALF;
4500
4501 if (Adapter->param_adv_10fdx == 1)
4502 phy->autoneg_advertised |= ADVERTISE_10_FULL;
4503
4504 if (Adapter->param_adv_10hdx == 1)
4505 phy->autoneg_advertised |= ADVERTISE_10_HALF;
4506
4507 if (phy->autoneg_advertised == 0)
4508 invalid = B_TRUE;
4509 } else {
4510 mac->autoneg = B_FALSE;
4511
4512 /*
4513 * For Intel copper cards, 1000fdx and 1000hdx are not
4514 * supported for forced link
4515 */
4516 if (Adapter->param_adv_100fdx == 1)
4517 mac->forced_speed_duplex = ADVERTISE_100_FULL;
4518 else if (Adapter->param_adv_100hdx == 1)
4519 mac->forced_speed_duplex = ADVERTISE_100_HALF;
4520 else if (Adapter->param_adv_10fdx == 1)
4521 mac->forced_speed_duplex = ADVERTISE_10_FULL;
4522 else if (Adapter->param_adv_10hdx == 1)
4523 mac->forced_speed_duplex = ADVERTISE_10_HALF;
4524 else
4525 invalid = B_TRUE;
4526
4527 }
4528
4529 if (invalid) {
4530 e1000g_log(Adapter, CE_WARN,
4531 "Invalid link settings. Setup link to "
4532 "support autonegotiation with all link capabilities.");
4533 mac->autoneg = B_TRUE;
4534 phy->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
4535 }
4536
4537 out:
4538 return (e1000_setup_link(&Adapter->shared));
4539 }
4540
4541 static void
4542 e1000g_timer_tx_resched(struct e1000g *Adapter)
4543 {
4544 e1000g_tx_ring_t *tx_ring = Adapter->tx_ring;
4545
4546 rw_enter(&Adapter->chip_lock, RW_READER);
4547
4548 if (tx_ring->resched_needed &&
4549 ((ddi_get_lbolt() - tx_ring->resched_timestamp) >
4550 drv_usectohz(1000000)) &&
4551 (Adapter->e1000g_state & E1000G_STARTED) &&
4552 (tx_ring->tbd_avail >= DEFAULT_TX_NO_RESOURCE)) {
4553 tx_ring->resched_needed = B_FALSE;
4554 mac_tx_update(Adapter->mh);
4555 E1000G_STAT(tx_ring->stat_reschedule);
4556 E1000G_STAT(tx_ring->stat_timer_reschedule);
4557 }
4558
4559 rw_exit(&Adapter->chip_lock);
4560 }
4561
4562 static void
4563 e1000g_local_timer(void *ws)
4564 {
4565 struct e1000g *Adapter = (struct e1000g *)ws;
4566 struct e1000_hw *hw;
4567 e1000g_ether_addr_t ether_addr;
4568 boolean_t link_changed;
4569
4570 hw = &Adapter->shared;
4571
4572 if (Adapter->e1000g_state & E1000G_ERROR) {
4573 rw_enter(&Adapter->chip_lock, RW_WRITER);
4574 Adapter->e1000g_state &= ~E1000G_ERROR;
4575 rw_exit(&Adapter->chip_lock);
4576
4577 Adapter->reset_count++;
4578 if (e1000g_global_reset(Adapter)) {
4579 ddi_fm_service_impact(Adapter->dip,
4580 DDI_SERVICE_RESTORED);
4581 e1000g_timer_tx_resched(Adapter);
4582 } else
4583 ddi_fm_service_impact(Adapter->dip,
4584 DDI_SERVICE_LOST);
4585 return;
4586 }
4587
4588 if (e1000g_stall_check(Adapter)) {
4589 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
4590 "Tx stall detected. Activate automatic recovery.\n");
4591 e1000g_fm_ereport(Adapter, DDI_FM_DEVICE_STALL);
4592 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_LOST);
4593 Adapter->reset_count++;
4594 if (e1000g_reset_adapter(Adapter)) {
4595 ddi_fm_service_impact(Adapter->dip,
4596 DDI_SERVICE_RESTORED);
4597 e1000g_timer_tx_resched(Adapter);
4598 }
4599 return;
4600 }
4601
4602 link_changed = B_FALSE;
4603 rw_enter(&Adapter->chip_lock, RW_READER);
4604 if (Adapter->link_complete)
4605 link_changed = e1000g_link_check(Adapter);
4606 rw_exit(&Adapter->chip_lock);
4607
4608 if (link_changed) {
4609 if (!Adapter->reset_flag &&
4610 (Adapter->e1000g_state & E1000G_STARTED) &&
4611 !(Adapter->e1000g_state & E1000G_SUSPENDED))
4612 mac_link_update(Adapter->mh, Adapter->link_state);
4613 if (Adapter->link_state == LINK_STATE_UP)
4614 Adapter->reset_flag = B_FALSE;
4615 }
4616 /*
4617 * Workaround for esb2. Data stuck in fifo on a link
4618 * down event. Reset the adapter to recover it.
4619 */
4620 if (Adapter->esb2_workaround) {
4621 Adapter->esb2_workaround = B_FALSE;
4622 (void) e1000g_reset_adapter(Adapter);
4623 return;
4624 }
4625
4626 /*
4627 * With 82571 controllers, any locally administered address will
4628 * be overwritten when there is a reset on the other port.
4629 * Detect this circumstance and correct it.
4630 */
4631 if ((hw->mac.type == e1000_82571) &&
4632 (e1000_get_laa_state_82571(hw) == B_TRUE)) {
4633 ether_addr.reg.low = E1000_READ_REG_ARRAY(hw, E1000_RA, 0);
4634 ether_addr.reg.high = E1000_READ_REG_ARRAY(hw, E1000_RA, 1);
4635
4636 ether_addr.reg.low = ntohl(ether_addr.reg.low);
4637 ether_addr.reg.high = ntohl(ether_addr.reg.high);
4638
4639 if ((ether_addr.mac.addr[5] != hw->mac.addr[0]) ||
4640 (ether_addr.mac.addr[4] != hw->mac.addr[1]) ||
4641 (ether_addr.mac.addr[3] != hw->mac.addr[2]) ||
4642 (ether_addr.mac.addr[2] != hw->mac.addr[3]) ||
4643 (ether_addr.mac.addr[1] != hw->mac.addr[4]) ||
4644 (ether_addr.mac.addr[0] != hw->mac.addr[5])) {
4645 (void) e1000_rar_set(hw, hw->mac.addr, 0);
4646 }
4647 }
4648
4649 /*
4650 * Long TTL workaround for 82541/82547
4651 */
4652 (void) e1000_igp_ttl_workaround_82547(hw);
4653
4654 /*
4655 * Check for Adaptive IFS settings If there are lots of collisions
4656 * change the value in steps...
4657 * These properties should only be set for 10/100
4658 */
4659 if ((hw->phy.media_type == e1000_media_type_copper) &&
4660 ((Adapter->link_speed == SPEED_100) ||
4661 (Adapter->link_speed == SPEED_10))) {
4662 e1000_update_adaptive(hw);
4663 }
4664 /*
4665 * Set Timer Interrupts
4666 */
4667 E1000_WRITE_REG(hw, E1000_ICS, E1000_IMS_RXT0);
4668
4669 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK)
4670 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
4671 else
4672 e1000g_timer_tx_resched(Adapter);
4673
4674 restart_watchdog_timer(Adapter);
4675 }
4676
4677 /*
4678 * The function e1000g_link_timer() is called when the timer for link setup
4679 * is expired, which indicates the completion of the link setup. The link
4680 * state will not be updated until the link setup is completed. And the
4681 * link state will not be sent to the upper layer through mac_link_update()
4682 * in this function. It will be updated in the local timer routine or the
4683 * interrupt service routine after the interface is started (plumbed).
4684 */
4685 static void
4686 e1000g_link_timer(void *arg)
4687 {
4688 struct e1000g *Adapter = (struct e1000g *)arg;
4689
4690 mutex_enter(&Adapter->link_lock);
4691 Adapter->link_complete = B_TRUE;
4692 Adapter->link_tid = 0;
4693 mutex_exit(&Adapter->link_lock);
4694 }
4695
4696 /*
4697 * e1000g_force_speed_duplex - read forced speed/duplex out of e1000g.conf
4698 *
4699 * This function read the forced speed and duplex for 10/100 Mbps speeds
4700 * and also for 1000 Mbps speeds from the e1000g.conf file
4701 */
4702 static void
4703 e1000g_force_speed_duplex(struct e1000g *Adapter)
4704 {
4705 int forced;
4706 int propval;
4707 struct e1000_mac_info *mac = &Adapter->shared.mac;
4708 struct e1000_phy_info *phy = &Adapter->shared.phy;
4709
4710 /*
4711 * get value out of config file
4712 */
4713 (void) e1000g_get_prop(Adapter, "ForceSpeedDuplex",
4714 GDIAG_10_HALF, GDIAG_ANY, GDIAG_ANY, &forced);
4715
4716 switch (forced) {
4717 case GDIAG_10_HALF:
4718 /*
4719 * Disable Auto Negotiation
4720 */
4721 mac->autoneg = B_FALSE;
4722 mac->forced_speed_duplex = ADVERTISE_10_HALF;
4723 break;
4724 case GDIAG_10_FULL:
4725 /*
4726 * Disable Auto Negotiation
4727 */
4728 mac->autoneg = B_FALSE;
4729 mac->forced_speed_duplex = ADVERTISE_10_FULL;
4730 break;
4731 case GDIAG_100_HALF:
4732 /*
4733 * Disable Auto Negotiation
4734 */
4735 mac->autoneg = B_FALSE;
4736 mac->forced_speed_duplex = ADVERTISE_100_HALF;
4737 break;
4738 case GDIAG_100_FULL:
4739 /*
4740 * Disable Auto Negotiation
4741 */
4742 mac->autoneg = B_FALSE;
4743 mac->forced_speed_duplex = ADVERTISE_100_FULL;
4744 break;
4745 case GDIAG_1000_FULL:
4746 /*
4747 * The gigabit spec requires autonegotiation. Therefore,
4748 * when the user wants to force the speed to 1000Mbps, we
4749 * enable AutoNeg, but only allow the harware to advertise
4750 * 1000Mbps. This is different from 10/100 operation, where
4751 * we are allowed to link without any negotiation.
4752 */
4753 mac->autoneg = B_TRUE;
4754 phy->autoneg_advertised = ADVERTISE_1000_FULL;
4755 break;
4756 default: /* obey the setting of AutoNegAdvertised */
4757 mac->autoneg = B_TRUE;
4758 (void) e1000g_get_prop(Adapter, "AutoNegAdvertised",
4759 0, AUTONEG_ADVERTISE_SPEED_DEFAULT,
4760 AUTONEG_ADVERTISE_SPEED_DEFAULT, &propval);
4761 phy->autoneg_advertised = (uint16_t)propval;
4762 break;
4763 } /* switch */
4764 }
4765
4766 /*
4767 * e1000g_get_max_frame_size - get jumbo frame setting from e1000g.conf
4768 *
4769 * This function reads MaxFrameSize from e1000g.conf
4770 */
4771 static void
4772 e1000g_get_max_frame_size(struct e1000g *Adapter)
4773 {
4774 int max_frame;
4775
4776 /*
4777 * get value out of config file
4778 */
4779 (void) e1000g_get_prop(Adapter, "MaxFrameSize", 0, 3, 0,
4780 &max_frame);
4781
4782 switch (max_frame) {
4783 case 0:
4784 Adapter->default_mtu = ETHERMTU;
4785 break;
4786 case 1:
4787 Adapter->default_mtu = FRAME_SIZE_UPTO_4K -
4788 sizeof (struct ether_vlan_header) - ETHERFCSL;
4789 break;
4790 case 2:
4791 Adapter->default_mtu = FRAME_SIZE_UPTO_8K -
4792 sizeof (struct ether_vlan_header) - ETHERFCSL;
4793 break;
4794 case 3:
4795 Adapter->default_mtu = FRAME_SIZE_UPTO_16K -
4796 sizeof (struct ether_vlan_header) - ETHERFCSL;
4797 break;
4798 default:
4799 Adapter->default_mtu = ETHERMTU;
4800 break;
4801 } /* switch */
4802
4803 /*
4804 * If the user configed MTU is larger than the deivce's maximum MTU,
4805 * the MTU is set to the deivce's maximum value.
4806 */
4807 if (Adapter->default_mtu > Adapter->max_mtu)
4808 Adapter->default_mtu = Adapter->max_mtu;
4809
4810 Adapter->max_frame_size = e1000g_mtu2maxframe(Adapter->default_mtu);
4811 }
4812
4813 /*
4814 * e1000g_pch_limits - Apply limits of the PCH silicon type
4815 *
4816 * At any frame size larger than the ethernet default,
4817 * prevent linking at 10/100 speeds.
4818 */
4819 static void
4820 e1000g_pch_limits(struct e1000g *Adapter)
4821 {
4822 struct e1000_hw *hw = &Adapter->shared;
4823
4824 /* only applies to PCH silicon type */
4825 if (hw->mac.type != e1000_pchlan && hw->mac.type != e1000_pch2lan)
4826 return;
4827
4828 /* only applies to frames larger than ethernet default */
4829 if (Adapter->max_frame_size > DEFAULT_FRAME_SIZE) {
4830 hw->mac.autoneg = B_TRUE;
4831 hw->phy.autoneg_advertised = ADVERTISE_1000_FULL;
4832
4833 Adapter->param_adv_autoneg = 1;
4834 Adapter->param_adv_1000fdx = 1;
4835
4836 Adapter->param_adv_100fdx = 0;
4837 Adapter->param_adv_100hdx = 0;
4838 Adapter->param_adv_10fdx = 0;
4839 Adapter->param_adv_10hdx = 0;
4840
4841 e1000g_param_sync(Adapter);
4842 }
4843 }
4844
4845 /*
4846 * e1000g_mtu2maxframe - convert given MTU to maximum frame size
4847 */
4848 static uint32_t
4849 e1000g_mtu2maxframe(uint32_t mtu)
4850 {
4851 uint32_t maxframe;
4852
4853 maxframe = mtu + sizeof (struct ether_vlan_header) + ETHERFCSL;
4854
4855 return (maxframe);
4856 }
4857
4858 static void
4859 arm_watchdog_timer(struct e1000g *Adapter)
4860 {
4861 Adapter->watchdog_tid =
4862 timeout(e1000g_local_timer,
4863 (void *)Adapter, 1 * drv_usectohz(1000000));
4864 }
4865 #pragma inline(arm_watchdog_timer)
4866
4867 static void
4868 enable_watchdog_timer(struct e1000g *Adapter)
4869 {
4870 mutex_enter(&Adapter->watchdog_lock);
4871
4872 if (!Adapter->watchdog_timer_enabled) {
4873 Adapter->watchdog_timer_enabled = B_TRUE;
4874 Adapter->watchdog_timer_started = B_TRUE;
4875 arm_watchdog_timer(Adapter);
4876 }
4877
4878 mutex_exit(&Adapter->watchdog_lock);
4879 }
4880
4881 static void
4882 disable_watchdog_timer(struct e1000g *Adapter)
4883 {
4884 timeout_id_t tid;
4885
4886 mutex_enter(&Adapter->watchdog_lock);
4887
4888 Adapter->watchdog_timer_enabled = B_FALSE;
4889 Adapter->watchdog_timer_started = B_FALSE;
4890 tid = Adapter->watchdog_tid;
4891 Adapter->watchdog_tid = 0;
4892
4893 mutex_exit(&Adapter->watchdog_lock);
4894
4895 if (tid != 0)
4896 (void) untimeout(tid);
4897 }
4898
4899 static void
4900 start_watchdog_timer(struct e1000g *Adapter)
4901 {
4902 mutex_enter(&Adapter->watchdog_lock);
4903
4904 if (Adapter->watchdog_timer_enabled) {
4905 if (!Adapter->watchdog_timer_started) {
4906 Adapter->watchdog_timer_started = B_TRUE;
4907 arm_watchdog_timer(Adapter);
4908 }
4909 }
4910
4911 mutex_exit(&Adapter->watchdog_lock);
4912 }
4913
4914 static void
4915 restart_watchdog_timer(struct e1000g *Adapter)
4916 {
4917 mutex_enter(&Adapter->watchdog_lock);
4918
4919 if (Adapter->watchdog_timer_started)
4920 arm_watchdog_timer(Adapter);
4921
4922 mutex_exit(&Adapter->watchdog_lock);
4923 }
4924
4925 static void
4926 stop_watchdog_timer(struct e1000g *Adapter)
4927 {
4928 timeout_id_t tid;
4929
4930 mutex_enter(&Adapter->watchdog_lock);
4931
4932 Adapter->watchdog_timer_started = B_FALSE;
4933 tid = Adapter->watchdog_tid;
4934 Adapter->watchdog_tid = 0;
4935
4936 mutex_exit(&Adapter->watchdog_lock);
4937
4938 if (tid != 0)
4939 (void) untimeout(tid);
4940 }
4941
4942 static void
4943 stop_link_timer(struct e1000g *Adapter)
4944 {
4945 timeout_id_t tid;
4946
4947 /* Disable the link timer */
4948 mutex_enter(&Adapter->link_lock);
4949
4950 tid = Adapter->link_tid;
4951 Adapter->link_tid = 0;
4952
4953 mutex_exit(&Adapter->link_lock);
4954
4955 if (tid != 0)
4956 (void) untimeout(tid);
4957 }
4958
4959 static void
4960 stop_82547_timer(e1000g_tx_ring_t *tx_ring)
4961 {
4962 timeout_id_t tid;
4963
4964 /* Disable the tx timer for 82547 chipset */
4965 mutex_enter(&tx_ring->tx_lock);
4966
4967 tx_ring->timer_enable_82547 = B_FALSE;
4968 tid = tx_ring->timer_id_82547;
4969 tx_ring->timer_id_82547 = 0;
4970
4971 mutex_exit(&tx_ring->tx_lock);
4972
4973 if (tid != 0)
4974 (void) untimeout(tid);
4975 }
4976
4977 void
4978 e1000g_clear_interrupt(struct e1000g *Adapter)
4979 {
4980 E1000_WRITE_REG(&Adapter->shared, E1000_IMC,
4981 0xffffffff & ~E1000_IMS_RXSEQ);
4982 }
4983
4984 void
4985 e1000g_mask_interrupt(struct e1000g *Adapter)
4986 {
4987 E1000_WRITE_REG(&Adapter->shared, E1000_IMS,
4988 IMS_ENABLE_MASK & ~E1000_IMS_TXDW);
4989
4990 if (Adapter->tx_intr_enable)
4991 e1000g_mask_tx_interrupt(Adapter);
4992 }
4993
4994 /*
4995 * This routine is called by e1000g_quiesce(), therefore must not block.
4996 */
4997 void
4998 e1000g_clear_all_interrupts(struct e1000g *Adapter)
4999 {
5000 E1000_WRITE_REG(&Adapter->shared, E1000_IMC, 0xffffffff);
5001 }
5002
5003 void
5004 e1000g_mask_tx_interrupt(struct e1000g *Adapter)
5005 {
5006 E1000_WRITE_REG(&Adapter->shared, E1000_IMS, E1000_IMS_TXDW);
5007 }
5008
5009 void
5010 e1000g_clear_tx_interrupt(struct e1000g *Adapter)
5011 {
5012 E1000_WRITE_REG(&Adapter->shared, E1000_IMC, E1000_IMS_TXDW);
5013 }
5014
5015 static void
5016 e1000g_smartspeed(struct e1000g *Adapter)
5017 {
5018 struct e1000_hw *hw = &Adapter->shared;
5019 uint16_t phy_status;
5020 uint16_t phy_ctrl;
5021
5022 /*
5023 * If we're not T-or-T, or we're not autoneg'ing, or we're not
5024 * advertising 1000Full, we don't even use the workaround
5025 */
5026 if ((hw->phy.type != e1000_phy_igp) ||
5027 !hw->mac.autoneg ||
5028 !(hw->phy.autoneg_advertised & ADVERTISE_1000_FULL))
5029 return;
5030
5031 /*
5032 * True if this is the first call of this function or after every
5033 * 30 seconds of not having link
5034 */
5035 if (Adapter->smartspeed == 0) {
5036 /*
5037 * If Master/Slave config fault is asserted twice, we
5038 * assume back-to-back
5039 */
5040 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
5041 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
5042 return;
5043
5044 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
5045 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT))
5046 return;
5047 /*
5048 * We're assuming back-2-back because our status register
5049 * insists! there's a fault in the master/slave
5050 * relationship that was "negotiated"
5051 */
5052 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
5053 /*
5054 * Is the phy configured for manual configuration of
5055 * master/slave?
5056 */
5057 if (phy_ctrl & CR_1000T_MS_ENABLE) {
5058 /*
5059 * Yes. Then disable manual configuration (enable
5060 * auto configuration) of master/slave
5061 */
5062 phy_ctrl &= ~CR_1000T_MS_ENABLE;
5063 (void) e1000_write_phy_reg(hw,
5064 PHY_1000T_CTRL, phy_ctrl);
5065 /*
5066 * Effectively starting the clock
5067 */
5068 Adapter->smartspeed++;
5069 /*
5070 * Restart autonegotiation
5071 */
5072 if (!e1000_phy_setup_autoneg(hw) &&
5073 !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) {
5074 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
5075 MII_CR_RESTART_AUTO_NEG);
5076 (void) e1000_write_phy_reg(hw,
5077 PHY_CONTROL, phy_ctrl);
5078 }
5079 }
5080 return;
5081 /*
5082 * Has 6 seconds transpired still without link? Remember,
5083 * you should reset the smartspeed counter once you obtain
5084 * link
5085 */
5086 } else if (Adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
5087 /*
5088 * Yes. Remember, we did at the start determine that
5089 * there's a master/slave configuration fault, so we're
5090 * still assuming there's someone on the other end, but we
5091 * just haven't yet been able to talk to it. We then
5092 * re-enable auto configuration of master/slave to see if
5093 * we're running 2/3 pair cables.
5094 */
5095 /*
5096 * If still no link, perhaps using 2/3 pair cable
5097 */
5098 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
5099 phy_ctrl |= CR_1000T_MS_ENABLE;
5100 (void) e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
5101 /*
5102 * Restart autoneg with phy enabled for manual
5103 * configuration of master/slave
5104 */
5105 if (!e1000_phy_setup_autoneg(hw) &&
5106 !e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl)) {
5107 phy_ctrl |=
5108 (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
5109 (void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl);
5110 }
5111 /*
5112 * Hopefully, there are no more faults and we've obtained
5113 * link as a result.
5114 */
5115 }
5116 /*
5117 * Restart process after E1000_SMARTSPEED_MAX iterations (30
5118 * seconds)
5119 */
5120 if (Adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
5121 Adapter->smartspeed = 0;
5122 }
5123
5124 static boolean_t
5125 is_valid_mac_addr(uint8_t *mac_addr)
5126 {
5127 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
5128 const uint8_t addr_test2[6] =
5129 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
5130
5131 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
5132 !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
5133 return (B_FALSE);
5134
5135 return (B_TRUE);
5136 }
5137
5138 /*
5139 * e1000g_stall_check - check for tx stall
5140 *
5141 * This function checks if the adapter is stalled (in transmit).
5142 *
5143 * It is called each time the watchdog timeout is invoked.
5144 * If the transmit descriptor reclaim continuously fails,
5145 * the watchdog value will increment by 1. If the watchdog
5146 * value exceeds the threshold, the adapter is assumed to
5147 * have stalled and need to be reset.
5148 */
5149 static boolean_t
5150 e1000g_stall_check(struct e1000g *Adapter)
5151 {
5152 e1000g_tx_ring_t *tx_ring;
5153
5154 tx_ring = Adapter->tx_ring;
5155
5156 if (Adapter->link_state != LINK_STATE_UP)
5157 return (B_FALSE);
5158
5159 (void) e1000g_recycle(tx_ring);
5160
5161 if (Adapter->stall_flag)
5162 return (B_TRUE);
5163
5164 return (B_FALSE);
5165 }
5166
5167 #ifdef E1000G_DEBUG
5168 static enum ioc_reply
5169 e1000g_pp_ioctl(struct e1000g *e1000gp, struct iocblk *iocp, mblk_t *mp)
5170 {
5171 void (*ppfn)(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd);
5172 e1000g_peekpoke_t *ppd;
5173 uint64_t mem_va;
5174 uint64_t maxoff;
5175 boolean_t peek;
5176
5177 switch (iocp->ioc_cmd) {
5178
5179 case E1000G_IOC_REG_PEEK:
5180 peek = B_TRUE;
5181 break;
5182
5183 case E1000G_IOC_REG_POKE:
5184 peek = B_FALSE;
5185 break;
5186
5187 deault:
5188 E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL,
5189 "e1000g_diag_ioctl: invalid ioctl command 0x%X\n",
5190 iocp->ioc_cmd);
5191 return (IOC_INVAL);
5192 }
5193
5194 /*
5195 * Validate format of ioctl
5196 */
5197 if (iocp->ioc_count != sizeof (e1000g_peekpoke_t))
5198 return (IOC_INVAL);
5199 if (mp->b_cont == NULL)
5200 return (IOC_INVAL);
5201
5202 ppd = (e1000g_peekpoke_t *)(uintptr_t)mp->b_cont->b_rptr;
5203
5204 /*
5205 * Validate request parameters
5206 */
5207 switch (ppd->pp_acc_space) {
5208
5209 default:
5210 E1000G_DEBUGLOG_1(e1000gp, E1000G_INFO_LEVEL,
5211 "e1000g_diag_ioctl: invalid access space 0x%X\n",
5212 ppd->pp_acc_space);
5213 return (IOC_INVAL);
5214
5215 case E1000G_PP_SPACE_REG:
5216 /*
5217 * Memory-mapped I/O space
5218 */
5219 ASSERT(ppd->pp_acc_size == 4);
5220 if (ppd->pp_acc_size != 4)
5221 return (IOC_INVAL);
5222
5223 if ((ppd->pp_acc_offset % ppd->pp_acc_size) != 0)
5224 return (IOC_INVAL);
5225
5226 mem_va = 0;
5227 maxoff = 0x10000;
5228 ppfn = peek ? e1000g_ioc_peek_reg : e1000g_ioc_poke_reg;
5229 break;
5230
5231 case E1000G_PP_SPACE_E1000G:
5232 /*
5233 * E1000g data structure!
5234 */
5235 mem_va = (uintptr_t)e1000gp;
5236 maxoff = sizeof (struct e1000g);
5237 ppfn = peek ? e1000g_ioc_peek_mem : e1000g_ioc_poke_mem;
5238 break;
5239
5240 }
5241
5242 if (ppd->pp_acc_offset >= maxoff)
5243 return (IOC_INVAL);
5244
5245 if (ppd->pp_acc_offset + ppd->pp_acc_size > maxoff)
5246 return (IOC_INVAL);
5247
5248 /*
5249 * All OK - go!
5250 */
5251 ppd->pp_acc_offset += mem_va;
5252 (*ppfn)(e1000gp, ppd);
5253 return (peek ? IOC_REPLY : IOC_ACK);
5254 }
5255
5256 static void
5257 e1000g_ioc_peek_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
5258 {
5259 ddi_acc_handle_t handle;
5260 uint32_t *regaddr;
5261
5262 handle = e1000gp->osdep.reg_handle;
5263 regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr +
5264 (uintptr_t)ppd->pp_acc_offset);
5265
5266 ppd->pp_acc_data = ddi_get32(handle, regaddr);
5267 }
5268
5269 static void
5270 e1000g_ioc_poke_reg(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
5271 {
5272 ddi_acc_handle_t handle;
5273 uint32_t *regaddr;
5274 uint32_t value;
5275
5276 handle = e1000gp->osdep.reg_handle;
5277 regaddr = (uint32_t *)((uintptr_t)e1000gp->shared.hw_addr +
5278 (uintptr_t)ppd->pp_acc_offset);
5279 value = (uint32_t)ppd->pp_acc_data;
5280
5281 ddi_put32(handle, regaddr, value);
5282 }
5283
5284 static void
5285 e1000g_ioc_peek_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
5286 {
5287 uint64_t value;
5288 void *vaddr;
5289
5290 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
5291
5292 switch (ppd->pp_acc_size) {
5293 case 1:
5294 value = *(uint8_t *)vaddr;
5295 break;
5296
5297 case 2:
5298 value = *(uint16_t *)vaddr;
5299 break;
5300
5301 case 4:
5302 value = *(uint32_t *)vaddr;
5303 break;
5304
5305 case 8:
5306 value = *(uint64_t *)vaddr;
5307 break;
5308 }
5309
5310 E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL,
5311 "e1000g_ioc_peek_mem($%p, $%p) peeked 0x%llx from $%p\n",
5312 (void *)e1000gp, (void *)ppd, value, vaddr);
5313
5314 ppd->pp_acc_data = value;
5315 }
5316
5317 static void
5318 e1000g_ioc_poke_mem(struct e1000g *e1000gp, e1000g_peekpoke_t *ppd)
5319 {
5320 uint64_t value;
5321 void *vaddr;
5322
5323 vaddr = (void *)(uintptr_t)ppd->pp_acc_offset;
5324 value = ppd->pp_acc_data;
5325
5326 E1000G_DEBUGLOG_4(e1000gp, E1000G_INFO_LEVEL,
5327 "e1000g_ioc_poke_mem($%p, $%p) poking 0x%llx at $%p\n",
5328 (void *)e1000gp, (void *)ppd, value, vaddr);
5329
5330 switch (ppd->pp_acc_size) {
5331 case 1:
5332 *(uint8_t *)vaddr = (uint8_t)value;
5333 break;
5334
5335 case 2:
5336 *(uint16_t *)vaddr = (uint16_t)value;
5337 break;
5338
5339 case 4:
5340 *(uint32_t *)vaddr = (uint32_t)value;
5341 break;
5342
5343 case 8:
5344 *(uint64_t *)vaddr = (uint64_t)value;
5345 break;
5346 }
5347 }
5348 #endif
5349
5350 /*
5351 * Loopback Support
5352 */
5353 static lb_property_t lb_normal =
5354 { normal, "normal", E1000G_LB_NONE };
5355 static lb_property_t lb_external1000 =
5356 { external, "1000Mbps", E1000G_LB_EXTERNAL_1000 };
5357 static lb_property_t lb_external100 =
5358 { external, "100Mbps", E1000G_LB_EXTERNAL_100 };
5359 static lb_property_t lb_external10 =
5360 { external, "10Mbps", E1000G_LB_EXTERNAL_10 };
5361 static lb_property_t lb_phy =
5362 { internal, "PHY", E1000G_LB_INTERNAL_PHY };
5363
5364 static enum ioc_reply
5365 e1000g_loopback_ioctl(struct e1000g *Adapter, struct iocblk *iocp, mblk_t *mp)
5366 {
5367 lb_info_sz_t *lbsp;
5368 lb_property_t *lbpp;
5369 struct e1000_hw *hw;
5370 uint32_t *lbmp;
5371 uint32_t size;
5372 uint32_t value;
5373
5374 hw = &Adapter->shared;
5375
5376 if (mp->b_cont == NULL)
5377 return (IOC_INVAL);
5378
5379 if (!e1000g_check_loopback_support(hw)) {
5380 e1000g_log(NULL, CE_WARN,
5381 "Loopback is not supported on e1000g%d", Adapter->instance);
5382 return (IOC_INVAL);
5383 }
5384
5385 switch (iocp->ioc_cmd) {
5386 default:
5387 return (IOC_INVAL);
5388
5389 case LB_GET_INFO_SIZE:
5390 size = sizeof (lb_info_sz_t);
5391 if (iocp->ioc_count != size)
5392 return (IOC_INVAL);
5393
5394 rw_enter(&Adapter->chip_lock, RW_WRITER);
5395 e1000g_get_phy_state(Adapter);
5396
5397 /*
5398 * Workaround for hardware faults. In order to get a stable
5399 * state of phy, we will wait for a specific interval and
5400 * try again. The time delay is an experiential value based
5401 * on our testing.
5402 */
5403 msec_delay(100);
5404 e1000g_get_phy_state(Adapter);
5405 rw_exit(&Adapter->chip_lock);
5406
5407 value = sizeof (lb_normal);
5408 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
5409 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
5410 (hw->phy.media_type == e1000_media_type_fiber) ||
5411 (hw->phy.media_type == e1000_media_type_internal_serdes)) {
5412 value += sizeof (lb_phy);
5413 switch (hw->mac.type) {
5414 case e1000_82571:
5415 case e1000_82572:
5416 case e1000_80003es2lan:
5417 value += sizeof (lb_external1000);
5418 break;
5419 }
5420 }
5421 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
5422 (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
5423 value += sizeof (lb_external100);
5424 if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
5425 value += sizeof (lb_external10);
5426
5427 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
5428 *lbsp = value;
5429 break;
5430
5431 case LB_GET_INFO:
5432 value = sizeof (lb_normal);
5433 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
5434 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
5435 (hw->phy.media_type == e1000_media_type_fiber) ||
5436 (hw->phy.media_type == e1000_media_type_internal_serdes)) {
5437 value += sizeof (lb_phy);
5438 switch (hw->mac.type) {
5439 case e1000_82571:
5440 case e1000_82572:
5441 case e1000_80003es2lan:
5442 value += sizeof (lb_external1000);
5443 break;
5444 }
5445 }
5446 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
5447 (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
5448 value += sizeof (lb_external100);
5449 if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
5450 value += sizeof (lb_external10);
5451
5452 size = value;
5453 if (iocp->ioc_count != size)
5454 return (IOC_INVAL);
5455
5456 value = 0;
5457 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
5458 lbpp[value++] = lb_normal;
5459 if ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
5460 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS) ||
5461 (hw->phy.media_type == e1000_media_type_fiber) ||
5462 (hw->phy.media_type == e1000_media_type_internal_serdes)) {
5463 lbpp[value++] = lb_phy;
5464 switch (hw->mac.type) {
5465 case e1000_82571:
5466 case e1000_82572:
5467 case e1000_80003es2lan:
5468 lbpp[value++] = lb_external1000;
5469 break;
5470 }
5471 }
5472 if ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
5473 (Adapter->phy_status & MII_SR_100T2_FD_CAPS))
5474 lbpp[value++] = lb_external100;
5475 if (Adapter->phy_status & MII_SR_10T_FD_CAPS)
5476 lbpp[value++] = lb_external10;
5477 break;
5478
5479 case LB_GET_MODE:
5480 size = sizeof (uint32_t);
5481 if (iocp->ioc_count != size)
5482 return (IOC_INVAL);
5483
5484 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
5485 *lbmp = Adapter->loopback_mode;
5486 break;
5487
5488 case LB_SET_MODE:
5489 size = 0;
5490 if (iocp->ioc_count != sizeof (uint32_t))
5491 return (IOC_INVAL);
5492
5493 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
5494 if (!e1000g_set_loopback_mode(Adapter, *lbmp))
5495 return (IOC_INVAL);
5496 break;
5497 }
5498
5499 iocp->ioc_count = size;
5500 iocp->ioc_error = 0;
5501
5502 if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) {
5503 ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED);
5504 return (IOC_INVAL);
5505 }
5506
5507 return (IOC_REPLY);
5508 }
5509
5510 static boolean_t
5511 e1000g_check_loopback_support(struct e1000_hw *hw)
5512 {
5513 switch (hw->mac.type) {
5514 case e1000_82540:
5515 case e1000_82545:
5516 case e1000_82545_rev_3:
5517 case e1000_82546:
5518 case e1000_82546_rev_3:
5519 case e1000_82541:
5520 case e1000_82541_rev_2:
5521 case e1000_82547:
5522 case e1000_82547_rev_2:
5523 case e1000_82571:
5524 case e1000_82572:
5525 case e1000_82573:
5526 case e1000_82574:
5527 case e1000_80003es2lan:
5528 case e1000_ich9lan:
5529 case e1000_ich10lan:
5530 return (B_TRUE);
5531 }
5532 return (B_FALSE);
5533 }
5534
5535 static boolean_t
5536 e1000g_set_loopback_mode(struct e1000g *Adapter, uint32_t mode)
5537 {
5538 struct e1000_hw *hw;
5539 int i, times;
5540 boolean_t link_up;
5541
5542 if (mode == Adapter->loopback_mode)
5543 return (B_TRUE);
5544
5545 hw = &Adapter->shared;
5546 times = 0;
5547
5548 Adapter->loopback_mode = mode;
5549
5550 if (mode == E1000G_LB_NONE) {
5551 /* Reset the chip */
5552 hw->phy.autoneg_wait_to_complete = B_TRUE;
5553 (void) e1000g_reset_adapter(Adapter);
5554 hw->phy.autoneg_wait_to_complete = B_FALSE;
5555 return (B_TRUE);
5556 }
5557
5558 again:
5559
5560 rw_enter(&Adapter->chip_lock, RW_WRITER);
5561
5562 switch (mode) {
5563 default:
5564 rw_exit(&Adapter->chip_lock);
5565 return (B_FALSE);
5566
5567 case E1000G_LB_EXTERNAL_1000:
5568 e1000g_set_external_loopback_1000(Adapter);
5569 break;
5570
5571 case E1000G_LB_EXTERNAL_100:
5572 e1000g_set_external_loopback_100(Adapter);
5573 break;
5574
5575 case E1000G_LB_EXTERNAL_10:
5576 e1000g_set_external_loopback_10(Adapter);
5577 break;
5578
5579 case E1000G_LB_INTERNAL_PHY:
5580 e1000g_set_internal_loopback(Adapter);
5581 break;
5582 }
5583
5584 times++;
5585
5586 rw_exit(&Adapter->chip_lock);
5587
5588 /* Wait for link up */
5589 for (i = (PHY_FORCE_LIMIT * 2); i > 0; i--)
5590 msec_delay(100);
5591
5592 rw_enter(&Adapter->chip_lock, RW_WRITER);
5593
5594 link_up = e1000g_link_up(Adapter);
5595
5596 rw_exit(&Adapter->chip_lock);
5597
5598 if (!link_up) {
5599 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
5600 "Failed to get the link up");
5601 if (times < 2) {
5602 /* Reset the link */
5603 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
5604 "Reset the link ...");
5605 (void) e1000g_reset_adapter(Adapter);
5606 goto again;
5607 }
5608
5609 /*
5610 * Reset driver to loopback none when set loopback failed
5611 * for the second time.
5612 */
5613 Adapter->loopback_mode = E1000G_LB_NONE;
5614
5615 /* Reset the chip */
5616 hw->phy.autoneg_wait_to_complete = B_TRUE;
5617 (void) e1000g_reset_adapter(Adapter);
5618 hw->phy.autoneg_wait_to_complete = B_FALSE;
5619
5620 E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL,
5621 "Set loopback mode failed, reset to loopback none");
5622
5623 return (B_FALSE);
5624 }
5625
5626 return (B_TRUE);
5627 }
5628
5629 /*
5630 * The following loopback settings are from Intel's technical
5631 * document - "How To Loopback". All the register settings and
5632 * time delay values are directly inherited from the document
5633 * without more explanations available.
5634 */
5635 static void
5636 e1000g_set_internal_loopback(struct e1000g *Adapter)
5637 {
5638 struct e1000_hw *hw;
5639 uint32_t ctrl;
5640 uint32_t status;
5641 uint16_t phy_ctrl;
5642 uint16_t phy_reg;
5643 uint32_t txcw;
5644
5645 hw = &Adapter->shared;
5646
5647 /* Disable Smart Power Down */
5648 phy_spd_state(hw, B_FALSE);
5649
5650 (void) e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl);
5651 phy_ctrl &= ~(MII_CR_AUTO_NEG_EN | MII_CR_SPEED_100 | MII_CR_SPEED_10);
5652 phy_ctrl |= MII_CR_FULL_DUPLEX | MII_CR_SPEED_1000;
5653
5654 switch (hw->mac.type) {
5655 case e1000_82540:
5656 case e1000_82545:
5657 case e1000_82545_rev_3:
5658 case e1000_82546:
5659 case e1000_82546_rev_3:
5660 case e1000_82573:
5661 /* Auto-MDI/MDIX off */
5662 (void) e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
5663 /* Reset PHY to update Auto-MDI/MDIX */
5664 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5665 phy_ctrl | MII_CR_RESET | MII_CR_AUTO_NEG_EN);
5666 /* Reset PHY to auto-neg off and force 1000 */
5667 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5668 phy_ctrl | MII_CR_RESET);
5669 /*
5670 * Disable PHY receiver for 82540/545/546 and 82573 Family.
5671 * See comments above e1000g_set_internal_loopback() for the
5672 * background.
5673 */
5674 (void) e1000_write_phy_reg(hw, 29, 0x001F);
5675 (void) e1000_write_phy_reg(hw, 30, 0x8FFC);
5676 (void) e1000_write_phy_reg(hw, 29, 0x001A);
5677 (void) e1000_write_phy_reg(hw, 30, 0x8FF0);
5678 break;
5679 case e1000_80003es2lan:
5680 /* Force Link Up */
5681 (void) e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
5682 0x1CC);
5683 /* Sets PCS loopback at 1Gbs */
5684 (void) e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
5685 0x1046);
5686 break;
5687 }
5688
5689 /*
5690 * The following registers should be set for e1000_phy_bm phy type.
5691 * e1000_82574, e1000_ich10lan and some e1000_ich9lan use this phy.
5692 * For others, we do not need to set these registers.
5693 */
5694 if (hw->phy.type == e1000_phy_bm) {
5695 /* Set Default MAC Interface speed to 1GB */
5696 (void) e1000_read_phy_reg(hw, PHY_REG(2, 21), &phy_reg);
5697 phy_reg &= ~0x0007;
5698 phy_reg |= 0x006;
5699 (void) e1000_write_phy_reg(hw, PHY_REG(2, 21), phy_reg);
5700 /* Assert SW reset for above settings to take effect */
5701 (void) e1000_phy_commit(hw);
5702 msec_delay(1);
5703 /* Force Full Duplex */
5704 (void) e1000_read_phy_reg(hw, PHY_REG(769, 16), &phy_reg);
5705 (void) e1000_write_phy_reg(hw, PHY_REG(769, 16),
5706 phy_reg | 0x000C);
5707 /* Set Link Up (in force link) */
5708 (void) e1000_read_phy_reg(hw, PHY_REG(776, 16), &phy_reg);
5709 (void) e1000_write_phy_reg(hw, PHY_REG(776, 16),
5710 phy_reg | 0x0040);
5711 /* Force Link */
5712 (void) e1000_read_phy_reg(hw, PHY_REG(769, 16), &phy_reg);
5713 (void) e1000_write_phy_reg(hw, PHY_REG(769, 16),
5714 phy_reg | 0x0040);
5715 /* Set Early Link Enable */
5716 (void) e1000_read_phy_reg(hw, PHY_REG(769, 20), &phy_reg);
5717 (void) e1000_write_phy_reg(hw, PHY_REG(769, 20),
5718 phy_reg | 0x0400);
5719 }
5720
5721 /* Set loopback */
5722 (void) e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl | MII_CR_LOOPBACK);
5723
5724 msec_delay(250);
5725
5726 /* Now set up the MAC to the same speed/duplex as the PHY. */
5727 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5728 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
5729 ctrl |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
5730 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
5731 E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */
5732 E1000_CTRL_FD); /* Force Duplex to FULL */
5733
5734 switch (hw->mac.type) {
5735 case e1000_82540:
5736 case e1000_82545:
5737 case e1000_82545_rev_3:
5738 case e1000_82546:
5739 case e1000_82546_rev_3:
5740 /*
5741 * For some serdes we'll need to commit the writes now
5742 * so that the status is updated on link
5743 */
5744 if (hw->phy.media_type == e1000_media_type_internal_serdes) {
5745 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5746 msec_delay(100);
5747 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5748 }
5749
5750 if (hw->phy.media_type == e1000_media_type_copper) {
5751 /* Invert Loss of Signal */
5752 ctrl |= E1000_CTRL_ILOS;
5753 } else {
5754 /* Set ILOS on fiber nic if half duplex is detected */
5755 status = E1000_READ_REG(hw, E1000_STATUS);
5756 if ((status & E1000_STATUS_FD) == 0)
5757 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
5758 }
5759 break;
5760
5761 case e1000_82571:
5762 case e1000_82572:
5763 /*
5764 * The fiber/SerDes versions of this adapter do not contain an
5765 * accessible PHY. Therefore, loopback beyond MAC must be done
5766 * using SerDes analog loopback.
5767 */
5768 if (hw->phy.media_type != e1000_media_type_copper) {
5769 /* Disable autoneg by setting bit 31 of TXCW to zero */
5770 txcw = E1000_READ_REG(hw, E1000_TXCW);
5771 txcw &= ~((uint32_t)1 << 31);
5772 E1000_WRITE_REG(hw, E1000_TXCW, txcw);
5773
5774 /*
5775 * Write 0x410 to Serdes Control register
5776 * to enable Serdes analog loopback
5777 */
5778 E1000_WRITE_REG(hw, E1000_SCTL, 0x0410);
5779 msec_delay(10);
5780 }
5781
5782 status = E1000_READ_REG(hw, E1000_STATUS);
5783 /* Set ILOS on fiber nic if half duplex is detected */
5784 if ((hw->phy.media_type == e1000_media_type_fiber) &&
5785 ((status & E1000_STATUS_FD) == 0 ||
5786 (status & E1000_STATUS_LU) == 0))
5787 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
5788 else if (hw->phy.media_type == e1000_media_type_internal_serdes)
5789 ctrl |= E1000_CTRL_SLU;
5790 break;
5791
5792 case e1000_82573:
5793 ctrl |= E1000_CTRL_ILOS;
5794 break;
5795 case e1000_ich9lan:
5796 case e1000_ich10lan:
5797 ctrl |= E1000_CTRL_SLU;
5798 break;
5799 }
5800 if (hw->phy.type == e1000_phy_bm)
5801 ctrl |= E1000_CTRL_SLU | E1000_CTRL_ILOS;
5802
5803 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5804 }
5805
5806 static void
5807 e1000g_set_external_loopback_1000(struct e1000g *Adapter)
5808 {
5809 struct e1000_hw *hw;
5810 uint32_t rctl;
5811 uint32_t ctrl_ext;
5812 uint32_t ctrl;
5813 uint32_t status;
5814 uint32_t txcw;
5815 uint16_t phydata;
5816
5817 hw = &Adapter->shared;
5818
5819 /* Disable Smart Power Down */
5820 phy_spd_state(hw, B_FALSE);
5821
5822 switch (hw->mac.type) {
5823 case e1000_82571:
5824 case e1000_82572:
5825 switch (hw->phy.media_type) {
5826 case e1000_media_type_copper:
5827 /* Force link up (Must be done before the PHY writes) */
5828 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5829 ctrl |= E1000_CTRL_SLU; /* Force Link Up */
5830 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5831
5832 rctl = E1000_READ_REG(hw, E1000_RCTL);
5833 rctl |= (E1000_RCTL_EN |
5834 E1000_RCTL_SBP |
5835 E1000_RCTL_UPE |
5836 E1000_RCTL_MPE |
5837 E1000_RCTL_LPE |
5838 E1000_RCTL_BAM); /* 0x803E */
5839 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
5840
5841 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
5842 ctrl_ext |= (E1000_CTRL_EXT_SDP4_DATA |
5843 E1000_CTRL_EXT_SDP6_DATA |
5844 E1000_CTRL_EXT_SDP3_DATA |
5845 E1000_CTRL_EXT_SDP4_DIR |
5846 E1000_CTRL_EXT_SDP6_DIR |
5847 E1000_CTRL_EXT_SDP3_DIR); /* 0x0DD0 */
5848 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
5849
5850 /*
5851 * This sequence tunes the PHY's SDP and no customer
5852 * settable values. For background, see comments above
5853 * e1000g_set_internal_loopback().
5854 */
5855 (void) e1000_write_phy_reg(hw, 0x0, 0x140);
5856 msec_delay(10);
5857 (void) e1000_write_phy_reg(hw, 0x9, 0x1A00);
5858 (void) e1000_write_phy_reg(hw, 0x12, 0xC10);
5859 (void) e1000_write_phy_reg(hw, 0x12, 0x1C10);
5860 (void) e1000_write_phy_reg(hw, 0x1F37, 0x76);
5861 (void) e1000_write_phy_reg(hw, 0x1F33, 0x1);
5862 (void) e1000_write_phy_reg(hw, 0x1F33, 0x0);
5863
5864 (void) e1000_write_phy_reg(hw, 0x1F35, 0x65);
5865 (void) e1000_write_phy_reg(hw, 0x1837, 0x3F7C);
5866 (void) e1000_write_phy_reg(hw, 0x1437, 0x3FDC);
5867 (void) e1000_write_phy_reg(hw, 0x1237, 0x3F7C);
5868 (void) e1000_write_phy_reg(hw, 0x1137, 0x3FDC);
5869
5870 msec_delay(50);
5871 break;
5872 case e1000_media_type_fiber:
5873 case e1000_media_type_internal_serdes:
5874 status = E1000_READ_REG(hw, E1000_STATUS);
5875 if (((status & E1000_STATUS_LU) == 0) ||
5876 (hw->phy.media_type ==
5877 e1000_media_type_internal_serdes)) {
5878 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5879 ctrl |= E1000_CTRL_ILOS | E1000_CTRL_SLU;
5880 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5881 }
5882
5883 /* Disable autoneg by setting bit 31 of TXCW to zero */
5884 txcw = E1000_READ_REG(hw, E1000_TXCW);
5885 txcw &= ~((uint32_t)1 << 31);
5886 E1000_WRITE_REG(hw, E1000_TXCW, txcw);
5887
5888 /*
5889 * Write 0x410 to Serdes Control register
5890 * to enable Serdes analog loopback
5891 */
5892 E1000_WRITE_REG(hw, E1000_SCTL, 0x0410);
5893 msec_delay(10);
5894 break;
5895 default:
5896 break;
5897 }
5898 break;
5899 case e1000_82574:
5900 case e1000_80003es2lan:
5901 case e1000_ich9lan:
5902 case e1000_ich10lan:
5903 (void) e1000_read_phy_reg(hw, GG82563_REG(6, 16), &phydata);
5904 (void) e1000_write_phy_reg(hw, GG82563_REG(6, 16),
5905 phydata | (1 << 5));
5906 Adapter->param_adv_autoneg = 1;
5907 Adapter->param_adv_1000fdx = 1;
5908 (void) e1000g_reset_link(Adapter);
5909 break;
5910 }
5911 }
5912
5913 static void
5914 e1000g_set_external_loopback_100(struct e1000g *Adapter)
5915 {
5916 struct e1000_hw *hw;
5917 uint32_t ctrl;
5918 uint16_t phy_ctrl;
5919
5920 hw = &Adapter->shared;
5921
5922 /* Disable Smart Power Down */
5923 phy_spd_state(hw, B_FALSE);
5924
5925 phy_ctrl = (MII_CR_FULL_DUPLEX |
5926 MII_CR_SPEED_100);
5927
5928 /* Force 100/FD, reset PHY */
5929 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5930 phy_ctrl | MII_CR_RESET); /* 0xA100 */
5931 msec_delay(10);
5932
5933 /* Force 100/FD */
5934 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5935 phy_ctrl); /* 0x2100 */
5936 msec_delay(10);
5937
5938 /* Now setup the MAC to the same speed/duplex as the PHY. */
5939 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5940 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
5941 ctrl |= (E1000_CTRL_SLU | /* Force Link Up */
5942 E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
5943 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
5944 E1000_CTRL_SPD_100 | /* Force Speed to 100 */
5945 E1000_CTRL_FD); /* Force Duplex to FULL */
5946
5947 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5948 }
5949
5950 static void
5951 e1000g_set_external_loopback_10(struct e1000g *Adapter)
5952 {
5953 struct e1000_hw *hw;
5954 uint32_t ctrl;
5955 uint16_t phy_ctrl;
5956
5957 hw = &Adapter->shared;
5958
5959 /* Disable Smart Power Down */
5960 phy_spd_state(hw, B_FALSE);
5961
5962 phy_ctrl = (MII_CR_FULL_DUPLEX |
5963 MII_CR_SPEED_10);
5964
5965 /* Force 10/FD, reset PHY */
5966 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5967 phy_ctrl | MII_CR_RESET); /* 0x8100 */
5968 msec_delay(10);
5969
5970 /* Force 10/FD */
5971 (void) e1000_write_phy_reg(hw, PHY_CONTROL,
5972 phy_ctrl); /* 0x0100 */
5973 msec_delay(10);
5974
5975 /* Now setup the MAC to the same speed/duplex as the PHY. */
5976 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5977 ctrl &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
5978 ctrl |= (E1000_CTRL_SLU | /* Force Link Up */
5979 E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
5980 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
5981 E1000_CTRL_SPD_10 | /* Force Speed to 10 */
5982 E1000_CTRL_FD); /* Force Duplex to FULL */
5983
5984 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5985 }
5986
5987 #ifdef __sparc
5988 static boolean_t
5989 e1000g_find_mac_address(struct e1000g *Adapter)
5990 {
5991 struct e1000_hw *hw = &Adapter->shared;
5992 uchar_t *bytes;
5993 struct ether_addr sysaddr;
5994 uint_t nelts;
5995 int err;
5996 boolean_t found = B_FALSE;
5997
5998 /*
5999 * The "vendor's factory-set address" may already have
6000 * been extracted from the chip, but if the property
6001 * "local-mac-address" is set we use that instead.
6002 *
6003 * We check whether it looks like an array of 6
6004 * bytes (which it should, if OBP set it). If we can't
6005 * make sense of it this way, we'll ignore it.
6006 */
6007 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip,
6008 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
6009 if (err == DDI_PROP_SUCCESS) {
6010 if (nelts == ETHERADDRL) {
6011 while (nelts--)
6012 hw->mac.addr[nelts] = bytes[nelts];
6013 found = B_TRUE;
6014 }
6015 ddi_prop_free(bytes);
6016 }
6017
6018 /*
6019 * Look up the OBP property "local-mac-address?". If the user has set
6020 * 'local-mac-address? = false', use "the system address" instead.
6021 */
6022 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip, 0,
6023 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
6024 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
6025 if (localetheraddr(NULL, &sysaddr) != 0) {
6026 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
6027 found = B_TRUE;
6028 }
6029 }
6030 ddi_prop_free(bytes);
6031 }
6032
6033 /*
6034 * Finally(!), if there's a valid "mac-address" property (created
6035 * if we netbooted from this interface), we must use this instead
6036 * of any of the above to ensure that the NFS/install server doesn't
6037 * get confused by the address changing as Solaris takes over!
6038 */
6039 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, Adapter->dip,
6040 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
6041 if (err == DDI_PROP_SUCCESS) {
6042 if (nelts == ETHERADDRL) {
6043 while (nelts--)
6044 hw->mac.addr[nelts] = bytes[nelts];
6045 found = B_TRUE;
6046 }
6047 ddi_prop_free(bytes);
6048 }
6049
6050 if (found) {
6051 bcopy(hw->mac.addr, hw->mac.perm_addr,
6052 ETHERADDRL);
6053 }
6054
6055 return (found);
6056 }
6057 #endif
6058
6059 static int
6060 e1000g_add_intrs(struct e1000g *Adapter)
6061 {
6062 dev_info_t *devinfo;
6063 int intr_types;
6064 int rc;
6065
6066 devinfo = Adapter->dip;
6067
6068 /* Get supported interrupt types */
6069 rc = ddi_intr_get_supported_types(devinfo, &intr_types);
6070
6071 if (rc != DDI_SUCCESS) {
6072 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6073 "Get supported interrupt types failed: %d\n", rc);
6074 return (DDI_FAILURE);
6075 }
6076
6077 /*
6078 * Based on Intel Technical Advisory document (TA-160), there are some
6079 * cases where some older Intel PCI-X NICs may "advertise" to the OS
6080 * that it supports MSI, but in fact has problems.
6081 * So we should only enable MSI for PCI-E NICs and disable MSI for old
6082 * PCI/PCI-X NICs.
6083 */
6084 if (Adapter->shared.mac.type < e1000_82571)
6085 Adapter->msi_enable = B_FALSE;
6086
6087 if ((intr_types & DDI_INTR_TYPE_MSI) && Adapter->msi_enable) {
6088 rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_MSI);
6089
6090 if (rc != DDI_SUCCESS) {
6091 /* EMPTY */
6092 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
6093 "Add MSI failed, trying Legacy interrupts\n");
6094 } else {
6095 Adapter->intr_type = DDI_INTR_TYPE_MSI;
6096 }
6097 }
6098
6099 if ((Adapter->intr_type == 0) &&
6100 (intr_types & DDI_INTR_TYPE_FIXED)) {
6101 rc = e1000g_intr_add(Adapter, DDI_INTR_TYPE_FIXED);
6102
6103 if (rc != DDI_SUCCESS) {
6104 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
6105 "Add Legacy interrupts failed\n");
6106 return (DDI_FAILURE);
6107 }
6108
6109 Adapter->intr_type = DDI_INTR_TYPE_FIXED;
6110 }
6111
6112 if (Adapter->intr_type == 0) {
6113 E1000G_DEBUGLOG_0(Adapter, E1000G_WARN_LEVEL,
6114 "No interrupts registered\n");
6115 return (DDI_FAILURE);
6116 }
6117
6118 return (DDI_SUCCESS);
6119 }
6120
6121 /*
6122 * e1000g_intr_add() handles MSI/Legacy interrupts
6123 */
6124 static int
6125 e1000g_intr_add(struct e1000g *Adapter, int intr_type)
6126 {
6127 dev_info_t *devinfo;
6128 int count, avail, actual;
6129 int x, y, rc, inum = 0;
6130 int flag;
6131 ddi_intr_handler_t *intr_handler;
6132
6133 devinfo = Adapter->dip;
6134
6135 /* get number of interrupts */
6136 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
6137 if ((rc != DDI_SUCCESS) || (count == 0)) {
6138 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
6139 "Get interrupt number failed. Return: %d, count: %d\n",
6140 rc, count);
6141 return (DDI_FAILURE);
6142 }
6143
6144 /* get number of available interrupts */
6145 rc = ddi_intr_get_navail(devinfo, intr_type, &avail);
6146 if ((rc != DDI_SUCCESS) || (avail == 0)) {
6147 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
6148 "Get interrupt available number failed. "
6149 "Return: %d, available: %d\n", rc, avail);
6150 return (DDI_FAILURE);
6151 }
6152
6153 if (avail < count) {
6154 /* EMPTY */
6155 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
6156 "Interrupts count: %d, available: %d\n",
6157 count, avail);
6158 }
6159
6160 /* Allocate an array of interrupt handles */
6161 Adapter->intr_size = count * sizeof (ddi_intr_handle_t);
6162 Adapter->htable = kmem_alloc(Adapter->intr_size, KM_SLEEP);
6163
6164 /* Set NORMAL behavior for both MSI and FIXED interrupt */
6165 flag = DDI_INTR_ALLOC_NORMAL;
6166
6167 /* call ddi_intr_alloc() */
6168 rc = ddi_intr_alloc(devinfo, Adapter->htable, intr_type, inum,
6169 count, &actual, flag);
6170
6171 if ((rc != DDI_SUCCESS) || (actual == 0)) {
6172 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6173 "Allocate interrupts failed: %d\n", rc);
6174
6175 kmem_free(Adapter->htable, Adapter->intr_size);
6176 return (DDI_FAILURE);
6177 }
6178
6179 if (actual < count) {
6180 /* EMPTY */
6181 E1000G_DEBUGLOG_2(Adapter, E1000G_WARN_LEVEL,
6182 "Interrupts requested: %d, received: %d\n",
6183 count, actual);
6184 }
6185
6186 Adapter->intr_cnt = actual;
6187
6188 /* Get priority for first msi, assume remaining are all the same */
6189 rc = ddi_intr_get_pri(Adapter->htable[0], &Adapter->intr_pri);
6190
6191 if (rc != DDI_SUCCESS) {
6192 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6193 "Get interrupt priority failed: %d\n", rc);
6194
6195 /* Free already allocated intr */
6196 for (y = 0; y < actual; y++)
6197 (void) ddi_intr_free(Adapter->htable[y]);
6198
6199 kmem_free(Adapter->htable, Adapter->intr_size);
6200 return (DDI_FAILURE);
6201 }
6202
6203 /*
6204 * In Legacy Interrupt mode, for PCI-Express adapters, we should
6205 * use the interrupt service routine e1000g_intr_pciexpress()
6206 * to avoid interrupt stealing when sharing interrupt with other
6207 * devices.
6208 */
6209 if (Adapter->shared.mac.type < e1000_82571)
6210 intr_handler = (ddi_intr_handler_t *)e1000g_intr;
6211 else
6212 intr_handler = (ddi_intr_handler_t *)e1000g_intr_pciexpress;
6213
6214 /* Call ddi_intr_add_handler() */
6215 for (x = 0; x < actual; x++) {
6216 rc = ddi_intr_add_handler(Adapter->htable[x],
6217 intr_handler, (caddr_t)Adapter, NULL);
6218
6219 if (rc != DDI_SUCCESS) {
6220 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6221 "Add interrupt handler failed: %d\n", rc);
6222
6223 /* Remove already added handler */
6224 for (y = 0; y < x; y++)
6225 (void) ddi_intr_remove_handler(
6226 Adapter->htable[y]);
6227
6228 /* Free already allocated intr */
6229 for (y = 0; y < actual; y++)
6230 (void) ddi_intr_free(Adapter->htable[y]);
6231
6232 kmem_free(Adapter->htable, Adapter->intr_size);
6233 return (DDI_FAILURE);
6234 }
6235 }
6236
6237 rc = ddi_intr_get_cap(Adapter->htable[0], &Adapter->intr_cap);
6238
6239 if (rc != DDI_SUCCESS) {
6240 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6241 "Get interrupt cap failed: %d\n", rc);
6242
6243 /* Free already allocated intr */
6244 for (y = 0; y < actual; y++) {
6245 (void) ddi_intr_remove_handler(Adapter->htable[y]);
6246 (void) ddi_intr_free(Adapter->htable[y]);
6247 }
6248
6249 kmem_free(Adapter->htable, Adapter->intr_size);
6250 return (DDI_FAILURE);
6251 }
6252
6253 return (DDI_SUCCESS);
6254 }
6255
6256 static int
6257 e1000g_rem_intrs(struct e1000g *Adapter)
6258 {
6259 int x;
6260 int rc;
6261
6262 for (x = 0; x < Adapter->intr_cnt; x++) {
6263 rc = ddi_intr_remove_handler(Adapter->htable[x]);
6264 if (rc != DDI_SUCCESS) {
6265 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6266 "Remove intr handler failed: %d\n", rc);
6267 return (DDI_FAILURE);
6268 }
6269
6270 rc = ddi_intr_free(Adapter->htable[x]);
6271 if (rc != DDI_SUCCESS) {
6272 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6273 "Free intr failed: %d\n", rc);
6274 return (DDI_FAILURE);
6275 }
6276 }
6277
6278 kmem_free(Adapter->htable, Adapter->intr_size);
6279
6280 return (DDI_SUCCESS);
6281 }
6282
6283 static int
6284 e1000g_enable_intrs(struct e1000g *Adapter)
6285 {
6286 int x;
6287 int rc;
6288
6289 /* Enable interrupts */
6290 if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) {
6291 /* Call ddi_intr_block_enable() for MSI */
6292 rc = ddi_intr_block_enable(Adapter->htable,
6293 Adapter->intr_cnt);
6294 if (rc != DDI_SUCCESS) {
6295 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6296 "Enable block intr failed: %d\n", rc);
6297 return (DDI_FAILURE);
6298 }
6299 } else {
6300 /* Call ddi_intr_enable() for Legacy/MSI non block enable */
6301 for (x = 0; x < Adapter->intr_cnt; x++) {
6302 rc = ddi_intr_enable(Adapter->htable[x]);
6303 if (rc != DDI_SUCCESS) {
6304 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6305 "Enable intr failed: %d\n", rc);
6306 return (DDI_FAILURE);
6307 }
6308 }
6309 }
6310
6311 return (DDI_SUCCESS);
6312 }
6313
6314 static int
6315 e1000g_disable_intrs(struct e1000g *Adapter)
6316 {
6317 int x;
6318 int rc;
6319
6320 /* Disable all interrupts */
6321 if (Adapter->intr_cap & DDI_INTR_FLAG_BLOCK) {
6322 rc = ddi_intr_block_disable(Adapter->htable,
6323 Adapter->intr_cnt);
6324 if (rc != DDI_SUCCESS) {
6325 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6326 "Disable block intr failed: %d\n", rc);
6327 return (DDI_FAILURE);
6328 }
6329 } else {
6330 for (x = 0; x < Adapter->intr_cnt; x++) {
6331 rc = ddi_intr_disable(Adapter->htable[x]);
6332 if (rc != DDI_SUCCESS) {
6333 E1000G_DEBUGLOG_1(Adapter, E1000G_WARN_LEVEL,
6334 "Disable intr failed: %d\n", rc);
6335 return (DDI_FAILURE);
6336 }
6337 }
6338 }
6339
6340 return (DDI_SUCCESS);
6341 }
6342
6343 /*
6344 * e1000g_get_phy_state - get the state of PHY registers, save in the adapter
6345 */
6346 static void
6347 e1000g_get_phy_state(struct e1000g *Adapter)
6348 {
6349 struct e1000_hw *hw = &Adapter->shared;
6350
6351 if (hw->phy.media_type == e1000_media_type_copper) {
6352 (void) e1000_read_phy_reg(hw, PHY_CONTROL, &Adapter->phy_ctrl);
6353 (void) e1000_read_phy_reg(hw, PHY_STATUS, &Adapter->phy_status);
6354 (void) e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
6355 &Adapter->phy_an_adv);
6356 (void) e1000_read_phy_reg(hw, PHY_AUTONEG_EXP,
6357 &Adapter->phy_an_exp);
6358 (void) e1000_read_phy_reg(hw, PHY_EXT_STATUS,
6359 &Adapter->phy_ext_status);
6360 (void) e1000_read_phy_reg(hw, PHY_1000T_CTRL,
6361 &Adapter->phy_1000t_ctrl);
6362 (void) e1000_read_phy_reg(hw, PHY_1000T_STATUS,
6363 &Adapter->phy_1000t_status);
6364 (void) e1000_read_phy_reg(hw, PHY_LP_ABILITY,
6365 &Adapter->phy_lp_able);
6366
6367 Adapter->param_autoneg_cap =
6368 (Adapter->phy_status & MII_SR_AUTONEG_CAPS) ? 1 : 0;
6369 Adapter->param_pause_cap =
6370 (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0;
6371 Adapter->param_asym_pause_cap =
6372 (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0;
6373 Adapter->param_1000fdx_cap =
6374 ((Adapter->phy_ext_status & IEEE_ESR_1000T_FD_CAPS) ||
6375 (Adapter->phy_ext_status & IEEE_ESR_1000X_FD_CAPS)) ? 1 : 0;
6376 Adapter->param_1000hdx_cap =
6377 ((Adapter->phy_ext_status & IEEE_ESR_1000T_HD_CAPS) ||
6378 (Adapter->phy_ext_status & IEEE_ESR_1000X_HD_CAPS)) ? 1 : 0;
6379 Adapter->param_100t4_cap =
6380 (Adapter->phy_status & MII_SR_100T4_CAPS) ? 1 : 0;
6381 Adapter->param_100fdx_cap =
6382 ((Adapter->phy_status & MII_SR_100X_FD_CAPS) ||
6383 (Adapter->phy_status & MII_SR_100T2_FD_CAPS)) ? 1 : 0;
6384 Adapter->param_100hdx_cap =
6385 ((Adapter->phy_status & MII_SR_100X_HD_CAPS) ||
6386 (Adapter->phy_status & MII_SR_100T2_HD_CAPS)) ? 1 : 0;
6387 Adapter->param_10fdx_cap =
6388 (Adapter->phy_status & MII_SR_10T_FD_CAPS) ? 1 : 0;
6389 Adapter->param_10hdx_cap =
6390 (Adapter->phy_status & MII_SR_10T_HD_CAPS) ? 1 : 0;
6391
6392 Adapter->param_adv_autoneg = hw->mac.autoneg;
6393 Adapter->param_adv_pause =
6394 (Adapter->phy_an_adv & NWAY_AR_PAUSE) ? 1 : 0;
6395 Adapter->param_adv_asym_pause =
6396 (Adapter->phy_an_adv & NWAY_AR_ASM_DIR) ? 1 : 0;
6397 Adapter->param_adv_1000hdx =
6398 (Adapter->phy_1000t_ctrl & CR_1000T_HD_CAPS) ? 1 : 0;
6399 Adapter->param_adv_100t4 =
6400 (Adapter->phy_an_adv & NWAY_AR_100T4_CAPS) ? 1 : 0;
6401 if (Adapter->param_adv_autoneg == 1) {
6402 Adapter->param_adv_1000fdx =
6403 (Adapter->phy_1000t_ctrl & CR_1000T_FD_CAPS)
6404 ? 1 : 0;
6405 Adapter->param_adv_100fdx =
6406 (Adapter->phy_an_adv & NWAY_AR_100TX_FD_CAPS)
6407 ? 1 : 0;
6408 Adapter->param_adv_100hdx =
6409 (Adapter->phy_an_adv & NWAY_AR_100TX_HD_CAPS)
6410 ? 1 : 0;
6411 Adapter->param_adv_10fdx =
6412 (Adapter->phy_an_adv & NWAY_AR_10T_FD_CAPS) ? 1 : 0;
6413 Adapter->param_adv_10hdx =
6414 (Adapter->phy_an_adv & NWAY_AR_10T_HD_CAPS) ? 1 : 0;
6415 }
6416
6417 Adapter->param_lp_autoneg =
6418 (Adapter->phy_an_exp & NWAY_ER_LP_NWAY_CAPS) ? 1 : 0;
6419 Adapter->param_lp_pause =
6420 (Adapter->phy_lp_able & NWAY_LPAR_PAUSE) ? 1 : 0;
6421 Adapter->param_lp_asym_pause =
6422 (Adapter->phy_lp_able & NWAY_LPAR_ASM_DIR) ? 1 : 0;
6423 Adapter->param_lp_1000fdx =
6424 (Adapter->phy_1000t_status & SR_1000T_LP_FD_CAPS) ? 1 : 0;
6425 Adapter->param_lp_1000hdx =
6426 (Adapter->phy_1000t_status & SR_1000T_LP_HD_CAPS) ? 1 : 0;
6427 Adapter->param_lp_100t4 =
6428 (Adapter->phy_lp_able & NWAY_LPAR_100T4_CAPS) ? 1 : 0;
6429 Adapter->param_lp_100fdx =
6430 (Adapter->phy_lp_able & NWAY_LPAR_100TX_FD_CAPS) ? 1 : 0;
6431 Adapter->param_lp_100hdx =
6432 (Adapter->phy_lp_able & NWAY_LPAR_100TX_HD_CAPS) ? 1 : 0;
6433 Adapter->param_lp_10fdx =
6434 (Adapter->phy_lp_able & NWAY_LPAR_10T_FD_CAPS) ? 1 : 0;
6435 Adapter->param_lp_10hdx =
6436 (Adapter->phy_lp_able & NWAY_LPAR_10T_HD_CAPS) ? 1 : 0;
6437 } else {
6438 /*
6439 * 1Gig Fiber adapter only offers 1Gig Full Duplex. Meaning,
6440 * it can only work with 1Gig Full Duplex Link Partner.
6441 */
6442 Adapter->param_autoneg_cap = 0;
6443 Adapter->param_pause_cap = 1;
6444 Adapter->param_asym_pause_cap = 1;
6445 Adapter->param_1000fdx_cap = 1;
6446 Adapter->param_1000hdx_cap = 0;
6447 Adapter->param_100t4_cap = 0;
6448 Adapter->param_100fdx_cap = 0;
6449 Adapter->param_100hdx_cap = 0;
6450 Adapter->param_10fdx_cap = 0;
6451 Adapter->param_10hdx_cap = 0;
6452
6453 Adapter->param_adv_autoneg = 0;
6454 Adapter->param_adv_pause = 1;
6455 Adapter->param_adv_asym_pause = 1;
6456 Adapter->param_adv_1000fdx = 1;
6457 Adapter->param_adv_1000hdx = 0;
6458 Adapter->param_adv_100t4 = 0;
6459 Adapter->param_adv_100fdx = 0;
6460 Adapter->param_adv_100hdx = 0;
6461 Adapter->param_adv_10fdx = 0;
6462 Adapter->param_adv_10hdx = 0;
6463
6464 Adapter->param_lp_autoneg = 0;
6465 Adapter->param_lp_pause = 0;
6466 Adapter->param_lp_asym_pause = 0;
6467 Adapter->param_lp_1000fdx = 0;
6468 Adapter->param_lp_1000hdx = 0;
6469 Adapter->param_lp_100t4 = 0;
6470 Adapter->param_lp_100fdx = 0;
6471 Adapter->param_lp_100hdx = 0;
6472 Adapter->param_lp_10fdx = 0;
6473 Adapter->param_lp_10hdx = 0;
6474 }
6475 }
6476
6477 /*
6478 * FMA support
6479 */
6480
6481 int
6482 e1000g_check_acc_handle(ddi_acc_handle_t handle)
6483 {
6484 ddi_fm_error_t de;
6485
6486 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
6487 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
6488 return (de.fme_status);
6489 }
6490
6491 int
6492 e1000g_check_dma_handle(ddi_dma_handle_t handle)
6493 {
6494 ddi_fm_error_t de;
6495
6496 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
6497 return (de.fme_status);
6498 }
6499
6500 /*
6501 * The IO fault service error handling callback function
6502 */
6503 /* ARGSUSED2 */
6504 static int
6505 e1000g_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
6506 {
6507 /*
6508 * as the driver can always deal with an error in any dma or
6509 * access handle, we can just return the fme_status value.
6510 */
6511 pci_ereport_post(dip, err, NULL);
6512 return (err->fme_status);
6513 }
6514
6515 static void
6516 e1000g_fm_init(struct e1000g *Adapter)
6517 {
6518 ddi_iblock_cookie_t iblk;
6519 int fma_dma_flag;
6520
6521 /* Only register with IO Fault Services if we have some capability */
6522 if (Adapter->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
6523 e1000g_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
6524 } else {
6525 e1000g_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
6526 }
6527
6528 if (Adapter->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
6529 fma_dma_flag = 1;
6530 } else {
6531 fma_dma_flag = 0;
6532 }
6533
6534 (void) e1000g_set_fma_flags(fma_dma_flag);
6535
6536 if (Adapter->fm_capabilities) {
6537
6538 /* Register capabilities with IO Fault Services */
6539 ddi_fm_init(Adapter->dip, &Adapter->fm_capabilities, &iblk);
6540
6541 /*
6542 * Initialize pci ereport capabilities if ereport capable
6543 */
6544 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) ||
6545 DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
6546 pci_ereport_setup(Adapter->dip);
6547
6548 /*
6549 * Register error callback if error callback capable
6550 */
6551 if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
6552 ddi_fm_handler_register(Adapter->dip,
6553 e1000g_fm_error_cb, (void*) Adapter);
6554 }
6555 }
6556
6557 static void
6558 e1000g_fm_fini(struct e1000g *Adapter)
6559 {
6560 /* Only unregister FMA capabilities if we registered some */
6561 if (Adapter->fm_capabilities) {
6562
6563 /*
6564 * Release any resources allocated by pci_ereport_setup()
6565 */
6566 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities) ||
6567 DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
6568 pci_ereport_teardown(Adapter->dip);
6569
6570 /*
6571 * Un-register error callback if error callback capable
6572 */
6573 if (DDI_FM_ERRCB_CAP(Adapter->fm_capabilities))
6574 ddi_fm_handler_unregister(Adapter->dip);
6575
6576 /* Unregister from IO Fault Services */
6577 mutex_enter(&e1000g_rx_detach_lock);
6578 ddi_fm_fini(Adapter->dip);
6579 if (Adapter->priv_dip != NULL) {
6580 DEVI(Adapter->priv_dip)->devi_fmhdl = NULL;
6581 }
6582 mutex_exit(&e1000g_rx_detach_lock);
6583 }
6584 }
6585
6586 void
6587 e1000g_fm_ereport(struct e1000g *Adapter, char *detail)
6588 {
6589 uint64_t ena;
6590 char buf[FM_MAX_CLASS];
6591
6592 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
6593 ena = fm_ena_generate(0, FM_ENA_FMT1);
6594 if (DDI_FM_EREPORT_CAP(Adapter->fm_capabilities)) {
6595 ddi_fm_ereport_post(Adapter->dip, buf, ena, DDI_NOSLEEP,
6596 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
6597 }
6598 }
6599
6600 /*
6601 * quiesce(9E) entry point.
6602 *
6603 * This function is called when the system is single-threaded at high
6604 * PIL with preemption disabled. Therefore, this function must not be
6605 * blocked.
6606 *
6607 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
6608 * DDI_FAILURE indicates an error condition and should almost never happen.
6609 */
6610 static int
6611 e1000g_quiesce(dev_info_t *devinfo)
6612 {
6613 struct e1000g *Adapter;
6614
6615 Adapter = (struct e1000g *)ddi_get_driver_private(devinfo);
6616
6617 if (Adapter == NULL)
6618 return (DDI_FAILURE);
6619
6620 e1000g_clear_all_interrupts(Adapter);
6621
6622 (void) e1000_reset_hw(&Adapter->shared);
6623
6624 /* Setup our HW Tx Head & Tail descriptor pointers */
6625 E1000_WRITE_REG(&Adapter->shared, E1000_TDH(0), 0);
6626 E1000_WRITE_REG(&Adapter->shared, E1000_TDT(0), 0);
6627
6628 /* Setup our HW Rx Head & Tail descriptor pointers */
6629 E1000_WRITE_REG(&Adapter->shared, E1000_RDH(0), 0);
6630 E1000_WRITE_REG(&Adapter->shared, E1000_RDT(0), 0);
6631
6632 return (DDI_SUCCESS);
6633 }
6634
6635 /*
6636 * synchronize the adv* and en* parameters.
6637 *
6638 * See comments in <sys/dld.h> for details of the *_en_*
6639 * parameters. The usage of ndd for setting adv parameters will
6640 * synchronize all the en parameters with the e1000g parameters,
6641 * implicitly disabling any settings made via dladm.
6642 */
6643 static void
6644 e1000g_param_sync(struct e1000g *Adapter)
6645 {
6646 Adapter->param_en_1000fdx = Adapter->param_adv_1000fdx;
6647 Adapter->param_en_1000hdx = Adapter->param_adv_1000hdx;
6648 Adapter->param_en_100fdx = Adapter->param_adv_100fdx;
6649 Adapter->param_en_100hdx = Adapter->param_adv_100hdx;
6650 Adapter->param_en_10fdx = Adapter->param_adv_10fdx;
6651 Adapter->param_en_10hdx = Adapter->param_adv_10hdx;
6652 }
6653
6654 /*
6655 * e1000g_get_driver_control - tell manageability firmware that the driver
6656 * has control.
6657 */
6658 static void
6659 e1000g_get_driver_control(struct e1000_hw *hw)
6660 {
6661 uint32_t ctrl_ext;
6662 uint32_t swsm;
6663
6664 /* tell manageability firmware the driver has taken over */
6665 switch (hw->mac.type) {
6666 case e1000_82573:
6667 swsm = E1000_READ_REG(hw, E1000_SWSM);
6668 E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_DRV_LOAD);
6669 break;
6670 case e1000_82571:
6671 case e1000_82572:
6672 case e1000_82574:
6673 case e1000_80003es2lan:
6674 case e1000_ich8lan:
6675 case e1000_ich9lan:
6676 case e1000_ich10lan:
6677 case e1000_pchlan:
6678 case e1000_pch2lan:
6679 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
6680 E1000_WRITE_REG(hw, E1000_CTRL_EXT,
6681 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
6682 break;
6683 default:
6684 /* no manageability firmware: do nothing */
6685 break;
6686 }
6687 }
6688
6689 /*
6690 * e1000g_release_driver_control - tell manageability firmware that the driver
6691 * has released control.
6692 */
6693 static void
6694 e1000g_release_driver_control(struct e1000_hw *hw)
6695 {
6696 uint32_t ctrl_ext;
6697 uint32_t swsm;
6698
6699 /* tell manageability firmware the driver has released control */
6700 switch (hw->mac.type) {
6701 case e1000_82573:
6702 swsm = E1000_READ_REG(hw, E1000_SWSM);
6703 E1000_WRITE_REG(hw, E1000_SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
6704 break;
6705 case e1000_82571:
6706 case e1000_82572:
6707 case e1000_82574:
6708 case e1000_80003es2lan:
6709 case e1000_ich8lan:
6710 case e1000_ich9lan:
6711 case e1000_ich10lan:
6712 case e1000_pchlan:
6713 case e1000_pch2lan:
6714 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
6715 E1000_WRITE_REG(hw, E1000_CTRL_EXT,
6716 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
6717 break;
6718 default:
6719 /* no manageability firmware: do nothing */
6720 break;
6721 }
6722 }
6723
6724 /*
6725 * Restore e1000g promiscuous mode.
6726 */
6727 static void
6728 e1000g_restore_promisc(struct e1000g *Adapter)
6729 {
6730 if (Adapter->e1000g_promisc) {
6731 uint32_t rctl;
6732
6733 rctl = E1000_READ_REG(&Adapter->shared, E1000_RCTL);
6734 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_BAM);
6735 E1000_WRITE_REG(&Adapter->shared, E1000_RCTL, rctl);
6736 }
6737 }