1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
24 */
25
26 /*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Copyright (c) 2017, Joyent, Inc.
29 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
30 * Copyright (c) 2013 Saso Kiselkov. All rights reserved.
31 * Copyright (c) 2013 OSN Online Service Nuernberg GmbH. All rights reserved.
32 * Copyright 2016 OmniTI Computer Consulting, Inc. All rights reserved.
33 */
34
35 #include "ixgbe_sw.h"
36
37 static char ixgbe_ident[] = "Intel 10Gb Ethernet";
38
39 /*
40 * Local function protoypes
41 */
42 static int ixgbe_register_mac(ixgbe_t *);
43 static int ixgbe_identify_hardware(ixgbe_t *);
44 static int ixgbe_regs_map(ixgbe_t *);
45 static void ixgbe_init_properties(ixgbe_t *);
46 static int ixgbe_init_driver_settings(ixgbe_t *);
47 static void ixgbe_init_locks(ixgbe_t *);
48 static void ixgbe_destroy_locks(ixgbe_t *);
49 static int ixgbe_init(ixgbe_t *);
50 static int ixgbe_chip_start(ixgbe_t *);
51 static void ixgbe_chip_stop(ixgbe_t *);
52 static int ixgbe_reset(ixgbe_t *);
53 static void ixgbe_tx_clean(ixgbe_t *);
54 static boolean_t ixgbe_tx_drain(ixgbe_t *);
55 static boolean_t ixgbe_rx_drain(ixgbe_t *);
56 static int ixgbe_alloc_rings(ixgbe_t *);
57 static void ixgbe_free_rings(ixgbe_t *);
58 static int ixgbe_alloc_rx_data(ixgbe_t *);
59 static void ixgbe_free_rx_data(ixgbe_t *);
60 static void ixgbe_setup_rings(ixgbe_t *);
61 static void ixgbe_setup_rx(ixgbe_t *);
62 static void ixgbe_setup_tx(ixgbe_t *);
63 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *);
64 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *);
65 static void ixgbe_setup_rss(ixgbe_t *);
66 static void ixgbe_setup_vmdq(ixgbe_t *);
67 static void ixgbe_setup_vmdq_rss(ixgbe_t *);
68 static void ixgbe_setup_rss_table(ixgbe_t *);
69 static void ixgbe_init_unicst(ixgbe_t *);
70 static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *);
71 static void ixgbe_setup_multicst(ixgbe_t *);
72 static void ixgbe_get_hw_state(ixgbe_t *);
73 static void ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe);
74 static void ixgbe_get_conf(ixgbe_t *);
75 static void ixgbe_init_params(ixgbe_t *);
76 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int);
77 static void ixgbe_driver_link_check(ixgbe_t *);
78 static void ixgbe_sfp_check(void *);
79 static void ixgbe_overtemp_check(void *);
80 static void ixgbe_phy_check(void *);
81 static void ixgbe_link_timer(void *);
82 static void ixgbe_local_timer(void *);
83 static void ixgbe_arm_watchdog_timer(ixgbe_t *);
84 static void ixgbe_restart_watchdog_timer(ixgbe_t *);
85 static void ixgbe_disable_adapter_interrupts(ixgbe_t *);
86 static void ixgbe_enable_adapter_interrupts(ixgbe_t *);
87 static boolean_t is_valid_mac_addr(uint8_t *);
88 static boolean_t ixgbe_stall_check(ixgbe_t *);
89 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t);
90 static void ixgbe_set_internal_mac_loopback(ixgbe_t *);
91 static boolean_t ixgbe_find_mac_address(ixgbe_t *);
92 static int ixgbe_alloc_intrs(ixgbe_t *);
93 static int ixgbe_alloc_intr_handles(ixgbe_t *, int);
94 static int ixgbe_add_intr_handlers(ixgbe_t *);
95 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int);
96 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int);
97 static void ixgbe_setup_ivar(ixgbe_t *, uint16_t, uint8_t, int8_t);
98 static void ixgbe_enable_ivar(ixgbe_t *, uint16_t, int8_t);
99 static void ixgbe_disable_ivar(ixgbe_t *, uint16_t, int8_t);
100 static uint32_t ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index);
101 static int ixgbe_map_intrs_to_vectors(ixgbe_t *);
102 static void ixgbe_setup_adapter_vector(ixgbe_t *);
103 static void ixgbe_rem_intr_handlers(ixgbe_t *);
104 static void ixgbe_rem_intrs(ixgbe_t *);
105 static int ixgbe_enable_intrs(ixgbe_t *);
106 static int ixgbe_disable_intrs(ixgbe_t *);
107 static uint_t ixgbe_intr_legacy(void *, void *);
108 static uint_t ixgbe_intr_msi(void *, void *);
109 static uint_t ixgbe_intr_msix(void *, void *);
110 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *);
111 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *);
112 static void ixgbe_intr_other_work(ixgbe_t *, uint32_t);
113 static void ixgbe_get_driver_control(struct ixgbe_hw *);
114 static int ixgbe_addmac(void *, const uint8_t *);
115 static int ixgbe_remmac(void *, const uint8_t *);
116 static void ixgbe_release_driver_control(struct ixgbe_hw *);
117
118 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t);
119 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t);
120 static int ixgbe_resume(dev_info_t *);
121 static int ixgbe_suspend(dev_info_t *);
122 static int ixgbe_quiesce(dev_info_t *);
123 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *);
124 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
125 static int ixgbe_cbfunc(dev_info_t *, ddi_cb_action_t, void *, void *, void *);
126 static int ixgbe_intr_cb_register(ixgbe_t *);
127 static int ixgbe_intr_adjust(ixgbe_t *, ddi_cb_action_t, int);
128
129 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
130 const void *impl_data);
131 static void ixgbe_fm_init(ixgbe_t *);
132 static void ixgbe_fm_fini(ixgbe_t *);
133
134 char *ixgbe_priv_props[] = {
135 "_tx_copy_thresh",
136 "_tx_recycle_thresh",
137 "_tx_overload_thresh",
138 "_tx_resched_thresh",
139 "_rx_copy_thresh",
140 "_rx_limit_per_intr",
141 "_intr_throttling",
142 "_adv_pause_cap",
143 "_adv_asym_pause_cap",
144 NULL
145 };
146
147 #define IXGBE_MAX_PRIV_PROPS \
148 (sizeof (ixgbe_priv_props) / sizeof (mac_priv_prop_t))
149
150 static struct cb_ops ixgbe_cb_ops = {
151 nulldev, /* cb_open */
152 nulldev, /* cb_close */
153 nodev, /* cb_strategy */
154 nodev, /* cb_print */
155 nodev, /* cb_dump */
156 nodev, /* cb_read */
157 nodev, /* cb_write */
158 nodev, /* cb_ioctl */
159 nodev, /* cb_devmap */
160 nodev, /* cb_mmap */
161 nodev, /* cb_segmap */
162 nochpoll, /* cb_chpoll */
163 ddi_prop_op, /* cb_prop_op */
164 NULL, /* cb_stream */
165 D_MP | D_HOTPLUG, /* cb_flag */
166 CB_REV, /* cb_rev */
167 nodev, /* cb_aread */
168 nodev /* cb_awrite */
169 };
170
171 static struct dev_ops ixgbe_dev_ops = {
172 DEVO_REV, /* devo_rev */
173 0, /* devo_refcnt */
174 NULL, /* devo_getinfo */
175 nulldev, /* devo_identify */
176 nulldev, /* devo_probe */
177 ixgbe_attach, /* devo_attach */
178 ixgbe_detach, /* devo_detach */
179 nodev, /* devo_reset */
180 &ixgbe_cb_ops, /* devo_cb_ops */
181 NULL, /* devo_bus_ops */
182 ddi_power, /* devo_power */
183 ixgbe_quiesce, /* devo_quiesce */
184 };
185
186 static struct modldrv ixgbe_modldrv = {
187 &mod_driverops, /* Type of module. This one is a driver */
188 ixgbe_ident, /* Discription string */
189 &ixgbe_dev_ops /* driver ops */
190 };
191
192 static struct modlinkage ixgbe_modlinkage = {
193 MODREV_1, &ixgbe_modldrv, NULL
194 };
195
196 /*
197 * Access attributes for register mapping
198 */
199 ddi_device_acc_attr_t ixgbe_regs_acc_attr = {
200 DDI_DEVICE_ATTR_V1,
201 DDI_STRUCTURE_LE_ACC,
202 DDI_STRICTORDER_ACC,
203 DDI_FLAGERR_ACC
204 };
205
206 /*
207 * Loopback property
208 */
209 static lb_property_t lb_normal = {
210 normal, "normal", IXGBE_LB_NONE
211 };
212
213 static lb_property_t lb_mac = {
214 internal, "MAC", IXGBE_LB_INTERNAL_MAC
215 };
216
217 static lb_property_t lb_external = {
218 external, "External", IXGBE_LB_EXTERNAL
219 };
220
221 #define IXGBE_M_CALLBACK_FLAGS \
222 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
223
224 static mac_callbacks_t ixgbe_m_callbacks = {
225 IXGBE_M_CALLBACK_FLAGS,
226 ixgbe_m_stat,
227 ixgbe_m_start,
228 ixgbe_m_stop,
229 ixgbe_m_promisc,
230 ixgbe_m_multicst,
231 NULL,
232 NULL,
233 NULL,
234 ixgbe_m_ioctl,
235 ixgbe_m_getcapab,
236 NULL,
237 NULL,
238 ixgbe_m_setprop,
239 ixgbe_m_getprop,
240 ixgbe_m_propinfo
241 };
242
243 /*
244 * Initialize capabilities of each supported adapter type
245 */
246 static adapter_info_t ixgbe_82598eb_cap = {
247 64, /* maximum number of rx queues */
248 1, /* minimum number of rx queues */
249 64, /* default number of rx queues */
250 16, /* maximum number of rx groups */
251 1, /* minimum number of rx groups */
252 1, /* default number of rx groups */
253 32, /* maximum number of tx queues */
254 1, /* minimum number of tx queues */
255 8, /* default number of tx queues */
256 16366, /* maximum MTU size */
257 0xFFFF, /* maximum interrupt throttle rate */
258 0, /* minimum interrupt throttle rate */
259 200, /* default interrupt throttle rate */
260 18, /* maximum total msix vectors */
261 16, /* maximum number of ring vectors */
262 2, /* maximum number of other vectors */
263 IXGBE_EICR_LSC, /* "other" interrupt types handled */
264 0, /* "other" interrupt types enable mask */
265 (IXGBE_FLAG_DCA_CAPABLE /* capability flags */
266 | IXGBE_FLAG_RSS_CAPABLE
267 | IXGBE_FLAG_VMDQ_CAPABLE)
268 };
269
270 static adapter_info_t ixgbe_82599eb_cap = {
271 128, /* maximum number of rx queues */
272 1, /* minimum number of rx queues */
273 128, /* default number of rx queues */
274 64, /* maximum number of rx groups */
275 1, /* minimum number of rx groups */
276 1, /* default number of rx groups */
277 128, /* maximum number of tx queues */
278 1, /* minimum number of tx queues */
279 8, /* default number of tx queues */
280 15500, /* maximum MTU size */
281 0xFF8, /* maximum interrupt throttle rate */
282 0, /* minimum interrupt throttle rate */
283 200, /* default interrupt throttle rate */
284 64, /* maximum total msix vectors */
285 16, /* maximum number of ring vectors */
286 2, /* maximum number of other vectors */
287 (IXGBE_EICR_LSC
288 | IXGBE_EICR_GPI_SDP1
289 | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */
290
291 (IXGBE_SDP1_GPIEN
292 | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */
293
294 (IXGBE_FLAG_DCA_CAPABLE
295 | IXGBE_FLAG_RSS_CAPABLE
296 | IXGBE_FLAG_VMDQ_CAPABLE
297 | IXGBE_FLAG_RSC_CAPABLE
298 | IXGBE_FLAG_SFP_PLUG_CAPABLE) /* capability flags */
299 };
300
301 static adapter_info_t ixgbe_X540_cap = {
302 128, /* maximum number of rx queues */
303 1, /* minimum number of rx queues */
304 128, /* default number of rx queues */
305 64, /* maximum number of rx groups */
306 1, /* minimum number of rx groups */
307 1, /* default number of rx groups */
308 128, /* maximum number of tx queues */
309 1, /* minimum number of tx queues */
310 8, /* default number of tx queues */
311 15500, /* maximum MTU size */
312 0xFF8, /* maximum interrupt throttle rate */
313 0, /* minimum interrupt throttle rate */
314 200, /* default interrupt throttle rate */
315 64, /* maximum total msix vectors */
316 16, /* maximum number of ring vectors */
317 2, /* maximum number of other vectors */
318 (IXGBE_EICR_LSC
319 | IXGBE_EICR_GPI_SDP1_X540
320 | IXGBE_EICR_GPI_SDP2_X540), /* "other" interrupt types handled */
321
322 (IXGBE_SDP1_GPIEN_X540
323 | IXGBE_SDP2_GPIEN_X540), /* "other" interrupt types enable mask */
324
325 (IXGBE_FLAG_DCA_CAPABLE
326 | IXGBE_FLAG_RSS_CAPABLE
327 | IXGBE_FLAG_VMDQ_CAPABLE
328 | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */
329 };
330
331 static adapter_info_t ixgbe_X550_cap = {
332 128, /* maximum number of rx queues */
333 1, /* minimum number of rx queues */
334 128, /* default number of rx queues */
335 64, /* maximum number of rx groups */
336 1, /* minimum number of rx groups */
337 1, /* default number of rx groups */
338 128, /* maximum number of tx queues */
339 1, /* minimum number of tx queues */
340 8, /* default number of tx queues */
341 15500, /* maximum MTU size */
342 0xFF8, /* maximum interrupt throttle rate */
343 0, /* minimum interrupt throttle rate */
344 0x200, /* default interrupt throttle rate */
345 64, /* maximum total msix vectors */
346 16, /* maximum number of ring vectors */
347 2, /* maximum number of other vectors */
348 IXGBE_EICR_LSC, /* "other" interrupt types handled */
349 0, /* "other" interrupt types enable mask */
350 (IXGBE_FLAG_RSS_CAPABLE
351 | IXGBE_FLAG_VMDQ_CAPABLE
352 | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */
353 };
354
355 /*
356 * Module Initialization Functions.
357 */
358
359 int
360 _init(void)
361 {
362 int status;
363
364 mac_init_ops(&ixgbe_dev_ops, MODULE_NAME);
365
366 status = mod_install(&ixgbe_modlinkage);
367
368 if (status != DDI_SUCCESS) {
369 mac_fini_ops(&ixgbe_dev_ops);
370 }
371
372 return (status);
373 }
374
375 int
376 _fini(void)
377 {
378 int status;
379
380 status = mod_remove(&ixgbe_modlinkage);
381
382 if (status == DDI_SUCCESS) {
383 mac_fini_ops(&ixgbe_dev_ops);
384 }
385
386 return (status);
387 }
388
389 int
390 _info(struct modinfo *modinfop)
391 {
392 int status;
393
394 status = mod_info(&ixgbe_modlinkage, modinfop);
395
396 return (status);
397 }
398
399 /*
400 * ixgbe_attach - Driver attach.
401 *
402 * This function is the device specific initialization entry
403 * point. This entry point is required and must be written.
404 * The DDI_ATTACH command must be provided in the attach entry
405 * point. When attach() is called with cmd set to DDI_ATTACH,
406 * all normal kernel services (such as kmem_alloc(9F)) are
407 * available for use by the driver.
408 *
409 * The attach() function will be called once for each instance
410 * of the device on the system with cmd set to DDI_ATTACH.
411 * Until attach() succeeds, the only driver entry points which
412 * may be called are open(9E) and getinfo(9E).
413 */
414 static int
415 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
416 {
417 ixgbe_t *ixgbe;
418 struct ixgbe_osdep *osdep;
419 struct ixgbe_hw *hw;
420 int instance;
421 char taskqname[32];
422
423 /*
424 * Check the command and perform corresponding operations
425 */
426 switch (cmd) {
427 default:
428 return (DDI_FAILURE);
429
430 case DDI_RESUME:
431 return (ixgbe_resume(devinfo));
432
433 case DDI_ATTACH:
434 break;
435 }
436
437 /* Get the device instance */
438 instance = ddi_get_instance(devinfo);
439
440 /* Allocate memory for the instance data structure */
441 ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP);
442
443 ixgbe->dip = devinfo;
444 ixgbe->instance = instance;
445
446 hw = &ixgbe->hw;
447 osdep = &ixgbe->osdep;
448 hw->back = osdep;
449 osdep->ixgbe = ixgbe;
450
451 /* Attach the instance pointer to the dev_info data structure */
452 ddi_set_driver_private(devinfo, ixgbe);
453
454 /*
455 * Initialize for FMA support
456 */
457 ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, PROP_FM_CAPABLE,
458 0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
459 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
460 ixgbe_fm_init(ixgbe);
461 ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT;
462
463 /*
464 * Map PCI config space registers
465 */
466 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
467 ixgbe_error(ixgbe, "Failed to map PCI configurations");
468 goto attach_fail;
469 }
470 ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
471
472 /*
473 * Identify the chipset family
474 */
475 if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) {
476 ixgbe_error(ixgbe, "Failed to identify hardware");
477 goto attach_fail;
478 }
479
480 /*
481 * Map device registers
482 */
483 if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) {
484 ixgbe_error(ixgbe, "Failed to map device registers");
485 goto attach_fail;
486 }
487 ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
488
489 /*
490 * Initialize driver parameters
491 */
492 ixgbe_init_properties(ixgbe);
493 ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS;
494
495 /*
496 * Allocate interrupts
497 */
498 if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) {
499 ixgbe_error(ixgbe, "Failed to allocate interrupts");
500 goto attach_fail;
501 }
502 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
503
504 /*
505 * Allocate rx/tx rings based on the ring numbers.
506 * The actual numbers of rx/tx rings are decided by the number of
507 * allocated interrupt vectors, so we should allocate the rings after
508 * interrupts are allocated.
509 */
510 if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) {
511 ixgbe_error(ixgbe, "Failed to allocate rx and tx rings");
512 goto attach_fail;
513 }
514 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS;
515
516 /*
517 * Map rings to interrupt vectors
518 */
519 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) {
520 ixgbe_error(ixgbe, "Failed to map interrupts to vectors");
521 goto attach_fail;
522 }
523
524 /*
525 * Add interrupt handlers
526 */
527 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
528 ixgbe_error(ixgbe, "Failed to add interrupt handlers");
529 goto attach_fail;
530 }
531 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
532
533 /*
534 * Create a taskq for sfp-change
535 */
536 (void) sprintf(taskqname, "ixgbe%d_sfp_taskq", instance);
537 if ((ixgbe->sfp_taskq = ddi_taskq_create(devinfo, taskqname,
538 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
539 ixgbe_error(ixgbe, "sfp_taskq create failed");
540 goto attach_fail;
541 }
542 ixgbe->attach_progress |= ATTACH_PROGRESS_SFP_TASKQ;
543
544 /*
545 * Create a taskq for over-temp
546 */
547 (void) sprintf(taskqname, "ixgbe%d_overtemp_taskq", instance);
548 if ((ixgbe->overtemp_taskq = ddi_taskq_create(devinfo, taskqname,
549 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
550 ixgbe_error(ixgbe, "overtemp_taskq create failed");
551 goto attach_fail;
552 }
553 ixgbe->attach_progress |= ATTACH_PROGRESS_OVERTEMP_TASKQ;
554
555 /*
556 * Create a taskq for processing external PHY interrupts
557 */
558 (void) sprintf(taskqname, "ixgbe%d_phy_taskq", instance);
559 if ((ixgbe->phy_taskq = ddi_taskq_create(devinfo, taskqname,
560 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
561 ixgbe_error(ixgbe, "phy_taskq create failed");
562 goto attach_fail;
563 }
564 ixgbe->attach_progress |= ATTACH_PROGRESS_PHY_TASKQ;
565
566 /*
567 * Initialize driver parameters
568 */
569 if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) {
570 ixgbe_error(ixgbe, "Failed to initialize driver settings");
571 goto attach_fail;
572 }
573
574 /*
575 * Initialize mutexes for this device.
576 * Do this before enabling the interrupt handler and
577 * register the softint to avoid the condition where
578 * interrupt handler can try using uninitialized mutex.
579 */
580 ixgbe_init_locks(ixgbe);
581 ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS;
582
583 /*
584 * Initialize chipset hardware
585 */
586 if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) {
587 ixgbe_error(ixgbe, "Failed to initialize adapter");
588 goto attach_fail;
589 }
590 ixgbe->link_check_complete = B_FALSE;
591 ixgbe->link_check_hrtime = gethrtime() +
592 (IXGBE_LINK_UP_TIME * 100000000ULL);
593 ixgbe->attach_progress |= ATTACH_PROGRESS_INIT;
594
595 if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) {
596 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
597 goto attach_fail;
598 }
599
600 /*
601 * Initialize adapter capabilities
602 */
603 ixgbe_init_params(ixgbe);
604
605 /*
606 * Initialize statistics
607 */
608 if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) {
609 ixgbe_error(ixgbe, "Failed to initialize statistics");
610 goto attach_fail;
611 }
612 ixgbe->attach_progress |= ATTACH_PROGRESS_STATS;
613
614 /*
615 * Register the driver to the MAC
616 */
617 if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) {
618 ixgbe_error(ixgbe, "Failed to register MAC");
619 goto attach_fail;
620 }
621 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN);
622 ixgbe->attach_progress |= ATTACH_PROGRESS_MAC;
623
624 ixgbe->periodic_id = ddi_periodic_add(ixgbe_link_timer, ixgbe,
625 IXGBE_CYCLIC_PERIOD, DDI_IPL_0);
626 if (ixgbe->periodic_id == 0) {
627 ixgbe_error(ixgbe, "Failed to add the link check timer");
628 goto attach_fail;
629 }
630 ixgbe->attach_progress |= ATTACH_PROGRESS_LINK_TIMER;
631
632 /*
633 * Now that mutex locks are initialized, and the chip is also
634 * initialized, enable interrupts.
635 */
636 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
637 ixgbe_error(ixgbe, "Failed to enable DDI interrupts");
638 goto attach_fail;
639 }
640 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
641
642 ixgbe_log(ixgbe, "%s", ixgbe_ident);
643 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_INITIALIZED);
644
645 /*
646 * Register interrupt callback
647 */
648 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX)
649 if (ixgbe_intr_cb_register(ixgbe) != IXGBE_SUCCESS) {
650 ixgbe_error(ixgbe,
651 "Failed to register interrupt callback");
652 }
653
654 return (DDI_SUCCESS);
655
656 attach_fail:
657 ixgbe_unconfigure(devinfo, ixgbe);
658 return (DDI_FAILURE);
659 }
660
661 /*
662 * ixgbe_detach - Driver detach.
663 *
664 * The detach() function is the complement of the attach routine.
665 * If cmd is set to DDI_DETACH, detach() is used to remove the
666 * state associated with a given instance of a device node
667 * prior to the removal of that instance from the system.
668 *
669 * The detach() function will be called once for each instance
670 * of the device for which there has been a successful attach()
671 * once there are no longer any opens on the device.
672 *
673 * Interrupts routine are disabled, All memory allocated by this
674 * driver are freed.
675 */
676 static int
677 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
678 {
679 ixgbe_t *ixgbe;
680
681 /*
682 * Check detach command
683 */
684 switch (cmd) {
685 default:
686 return (DDI_FAILURE);
687
688 case DDI_SUSPEND:
689 return (ixgbe_suspend(devinfo));
690
691 case DDI_DETACH:
692 break;
693 }
694
695 /*
696 * Get the pointer to the driver private data structure
697 */
698 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
699 if (ixgbe == NULL)
700 return (DDI_FAILURE);
701
702 /*
703 * If the device is still running, it needs to be stopped first.
704 * This check is necessary because under some specific circumstances,
705 * the detach routine can be called without stopping the interface
706 * first.
707 */
708 if (ixgbe->ixgbe_state & IXGBE_STARTED) {
709 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED);
710 mutex_enter(&ixgbe->gen_lock);
711 ixgbe_stop(ixgbe, B_TRUE);
712 mutex_exit(&ixgbe->gen_lock);
713 /* Disable and stop the watchdog timer */
714 ixgbe_disable_watchdog_timer(ixgbe);
715 }
716
717 /*
718 * Check if there are still rx buffers held by the upper layer.
719 * If so, fail the detach.
720 */
721 if (!ixgbe_rx_drain(ixgbe))
722 return (DDI_FAILURE);
723
724 /*
725 * Do the remaining unconfigure routines
726 */
727 ixgbe_unconfigure(devinfo, ixgbe);
728
729 return (DDI_SUCCESS);
730 }
731
732 /*
733 * quiesce(9E) entry point.
734 *
735 * This function is called when the system is single-threaded at high
736 * PIL with preemption disabled. Therefore, this function must not be
737 * blocked.
738 *
739 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
740 * DDI_FAILURE indicates an error condition and should almost never happen.
741 */
742 static int
743 ixgbe_quiesce(dev_info_t *devinfo)
744 {
745 ixgbe_t *ixgbe;
746 struct ixgbe_hw *hw;
747
748 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
749
750 if (ixgbe == NULL)
751 return (DDI_FAILURE);
752
753 hw = &ixgbe->hw;
754
755 /*
756 * Disable the adapter interrupts
757 */
758 ixgbe_disable_adapter_interrupts(ixgbe);
759
760 /*
761 * Tell firmware driver is no longer in control
762 */
763 ixgbe_release_driver_control(hw);
764
765 /*
766 * Reset the chipset
767 */
768 (void) ixgbe_reset_hw(hw);
769
770 /*
771 * Reset PHY
772 */
773 (void) ixgbe_reset_phy(hw);
774
775 return (DDI_SUCCESS);
776 }
777
778 static void
779 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe)
780 {
781 /*
782 * Unregister interrupt callback handler
783 */
784 if (ixgbe->cb_hdl != NULL)
785 (void) ddi_cb_unregister(ixgbe->cb_hdl);
786
787 /*
788 * Disable interrupt
789 */
790 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
791 (void) ixgbe_disable_intrs(ixgbe);
792 }
793
794 /*
795 * remove the link check timer
796 */
797 if (ixgbe->attach_progress & ATTACH_PROGRESS_LINK_TIMER) {
798 if (ixgbe->periodic_id != NULL) {
799 ddi_periodic_delete(ixgbe->periodic_id);
800 ixgbe->periodic_id = NULL;
801 }
802 }
803
804 /*
805 * Unregister MAC
806 */
807 if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) {
808 (void) mac_unregister(ixgbe->mac_hdl);
809 }
810
811 /*
812 * Free statistics
813 */
814 if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) {
815 kstat_delete((kstat_t *)ixgbe->ixgbe_ks);
816 }
817
818 /*
819 * Remove interrupt handlers
820 */
821 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
822 ixgbe_rem_intr_handlers(ixgbe);
823 }
824
825 /*
826 * Remove taskq for sfp-status-change
827 */
828 if (ixgbe->attach_progress & ATTACH_PROGRESS_SFP_TASKQ) {
829 ddi_taskq_destroy(ixgbe->sfp_taskq);
830 }
831
832 /*
833 * Remove taskq for over-temp
834 */
835 if (ixgbe->attach_progress & ATTACH_PROGRESS_OVERTEMP_TASKQ) {
836 ddi_taskq_destroy(ixgbe->overtemp_taskq);
837 }
838
839 /*
840 * Remove taskq for external PHYs
841 */
842 if (ixgbe->attach_progress & ATTACH_PROGRESS_PHY_TASKQ) {
843 ddi_taskq_destroy(ixgbe->phy_taskq);
844 }
845
846 /*
847 * Remove interrupts
848 */
849 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) {
850 ixgbe_rem_intrs(ixgbe);
851 }
852
853 /*
854 * Unregister interrupt callback handler
855 */
856 if (ixgbe->cb_hdl != NULL) {
857 (void) ddi_cb_unregister(ixgbe->cb_hdl);
858 }
859
860 /*
861 * Remove driver properties
862 */
863 if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) {
864 (void) ddi_prop_remove_all(devinfo);
865 }
866
867 /*
868 * Stop the chipset
869 */
870 if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) {
871 mutex_enter(&ixgbe->gen_lock);
872 ixgbe_chip_stop(ixgbe);
873 mutex_exit(&ixgbe->gen_lock);
874 }
875
876 /*
877 * Free register handle
878 */
879 if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
880 if (ixgbe->osdep.reg_handle != NULL)
881 ddi_regs_map_free(&ixgbe->osdep.reg_handle);
882 }
883
884 /*
885 * Free PCI config handle
886 */
887 if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
888 if (ixgbe->osdep.cfg_handle != NULL)
889 pci_config_teardown(&ixgbe->osdep.cfg_handle);
890 }
891
892 /*
893 * Free locks
894 */
895 if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) {
896 ixgbe_destroy_locks(ixgbe);
897 }
898
899 /*
900 * Free the rx/tx rings
901 */
902 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) {
903 ixgbe_free_rings(ixgbe);
904 }
905
906 /*
907 * Unregister FMA capabilities
908 */
909 if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) {
910 ixgbe_fm_fini(ixgbe);
911 }
912
913 /*
914 * Free the driver data structure
915 */
916 kmem_free(ixgbe, sizeof (ixgbe_t));
917
918 ddi_set_driver_private(devinfo, NULL);
919 }
920
921 /*
922 * ixgbe_register_mac - Register the driver and its function pointers with
923 * the GLD interface.
924 */
925 static int
926 ixgbe_register_mac(ixgbe_t *ixgbe)
927 {
928 struct ixgbe_hw *hw = &ixgbe->hw;
929 mac_register_t *mac;
930 int status;
931
932 if ((mac = mac_alloc(MAC_VERSION)) == NULL)
933 return (IXGBE_FAILURE);
934
935 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
936 mac->m_driver = ixgbe;
937 mac->m_dip = ixgbe->dip;
938 mac->m_src_addr = hw->mac.addr;
939 mac->m_callbacks = &ixgbe_m_callbacks;
940 mac->m_min_sdu = 0;
941 mac->m_max_sdu = ixgbe->default_mtu;
942 mac->m_margin = VLAN_TAGSZ;
943 mac->m_priv_props = ixgbe_priv_props;
944 mac->m_v12n = MAC_VIRT_LEVEL1;
945
946 status = mac_register(mac, &ixgbe->mac_hdl);
947
948 mac_free(mac);
949
950 return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE);
951 }
952
953 /*
954 * ixgbe_identify_hardware - Identify the type of the chipset.
955 */
956 static int
957 ixgbe_identify_hardware(ixgbe_t *ixgbe)
958 {
959 struct ixgbe_hw *hw = &ixgbe->hw;
960 struct ixgbe_osdep *osdep = &ixgbe->osdep;
961
962 /*
963 * Get the device id
964 */
965 hw->vendor_id =
966 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
967 hw->device_id =
968 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
969 hw->revision_id =
970 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
971 hw->subsystem_device_id =
972 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
973 hw->subsystem_vendor_id =
974 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
975
976 /*
977 * Set the mac type of the adapter based on the device id
978 */
979 if (ixgbe_set_mac_type(hw) != IXGBE_SUCCESS) {
980 return (IXGBE_FAILURE);
981 }
982
983 /*
984 * Install adapter capabilities
985 */
986 switch (hw->mac.type) {
987 case ixgbe_mac_82598EB:
988 IXGBE_DEBUGLOG_0(ixgbe, "identify 82598 adapter\n");
989 ixgbe->capab = &ixgbe_82598eb_cap;
990
991 if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) {
992 ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
993 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1;
994 ixgbe->capab->other_gpie |= IXGBE_SDP1_GPIEN;
995 }
996 break;
997
998 case ixgbe_mac_82599EB:
999 IXGBE_DEBUGLOG_0(ixgbe, "identify 82599 adapter\n");
1000 ixgbe->capab = &ixgbe_82599eb_cap;
1001
1002 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) {
1003 ixgbe->capab->flags |= IXGBE_FLAG_TEMP_SENSOR_CAPABLE;
1004 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP0;
1005 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN;
1006 }
1007 break;
1008
1009 case ixgbe_mac_X540:
1010 IXGBE_DEBUGLOG_0(ixgbe, "identify X540 adapter\n");
1011 ixgbe->capab = &ixgbe_X540_cap;
1012 /*
1013 * For now, X540 is all set in its capab structure.
1014 * As other X540 variants show up, things can change here.
1015 */
1016 break;
1017
1018 case ixgbe_mac_X550:
1019 case ixgbe_mac_X550EM_x:
1020 IXGBE_DEBUGLOG_0(ixgbe, "identify X550 adapter\n");
1021 ixgbe->capab = &ixgbe_X550_cap;
1022
1023 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
1024 ixgbe->capab->flags |= IXGBE_FLAG_SFP_PLUG_CAPABLE;
1025
1026 /*
1027 * Link detection on X552 SFP+ and X552/X557-AT
1028 */
1029 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
1030 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
1031 ixgbe->capab->other_intr |=
1032 IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
1033 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN_X540;
1034 }
1035 break;
1036
1037 default:
1038 IXGBE_DEBUGLOG_1(ixgbe,
1039 "adapter not supported in ixgbe_identify_hardware(): %d\n",
1040 hw->mac.type);
1041 return (IXGBE_FAILURE);
1042 }
1043
1044 return (IXGBE_SUCCESS);
1045 }
1046
1047 /*
1048 * ixgbe_regs_map - Map the device registers.
1049 *
1050 */
1051 static int
1052 ixgbe_regs_map(ixgbe_t *ixgbe)
1053 {
1054 dev_info_t *devinfo = ixgbe->dip;
1055 struct ixgbe_hw *hw = &ixgbe->hw;
1056 struct ixgbe_osdep *osdep = &ixgbe->osdep;
1057 off_t mem_size;
1058
1059 /*
1060 * First get the size of device registers to be mapped.
1061 */
1062 if (ddi_dev_regsize(devinfo, IXGBE_ADAPTER_REGSET, &mem_size)
1063 != DDI_SUCCESS) {
1064 return (IXGBE_FAILURE);
1065 }
1066
1067 /*
1068 * Call ddi_regs_map_setup() to map registers
1069 */
1070 if ((ddi_regs_map_setup(devinfo, IXGBE_ADAPTER_REGSET,
1071 (caddr_t *)&hw->hw_addr, 0,
1072 mem_size, &ixgbe_regs_acc_attr,
1073 &osdep->reg_handle)) != DDI_SUCCESS) {
1074 return (IXGBE_FAILURE);
1075 }
1076
1077 return (IXGBE_SUCCESS);
1078 }
1079
1080 /*
1081 * ixgbe_init_properties - Initialize driver properties.
1082 */
1083 static void
1084 ixgbe_init_properties(ixgbe_t *ixgbe)
1085 {
1086 /*
1087 * Get conf file properties, including link settings
1088 * jumbo frames, ring number, descriptor number, etc.
1089 */
1090 ixgbe_get_conf(ixgbe);
1091 }
1092
1093 /*
1094 * ixgbe_init_driver_settings - Initialize driver settings.
1095 *
1096 * The settings include hardware function pointers, bus information,
1097 * rx/tx rings settings, link state, and any other parameters that
1098 * need to be setup during driver initialization.
1099 */
1100 static int
1101 ixgbe_init_driver_settings(ixgbe_t *ixgbe)
1102 {
1103 struct ixgbe_hw *hw = &ixgbe->hw;
1104 dev_info_t *devinfo = ixgbe->dip;
1105 ixgbe_rx_ring_t *rx_ring;
1106 ixgbe_rx_group_t *rx_group;
1107 ixgbe_tx_ring_t *tx_ring;
1108 uint32_t rx_size;
1109 uint32_t tx_size;
1110 uint32_t ring_per_group;
1111 int i;
1112
1113 /*
1114 * Initialize chipset specific hardware function pointers
1115 */
1116 if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) {
1117 return (IXGBE_FAILURE);
1118 }
1119
1120 /*
1121 * Get the system page size
1122 */
1123 ixgbe->sys_page_size = ddi_ptob(devinfo, (ulong_t)1);
1124
1125 /*
1126 * Set rx buffer size
1127 *
1128 * The IP header alignment room is counted in the calculation.
1129 * The rx buffer size is in unit of 1K that is required by the
1130 * chipset hardware.
1131 */
1132 rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM;
1133 ixgbe->rx_buf_size = ((rx_size >> 10) +
1134 ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1135
1136 /*
1137 * Set tx buffer size
1138 */
1139 tx_size = ixgbe->max_frame_size;
1140 ixgbe->tx_buf_size = ((tx_size >> 10) +
1141 ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1142
1143 /*
1144 * Initialize rx/tx rings/groups parameters
1145 */
1146 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
1147 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1148 rx_ring = &ixgbe->rx_rings[i];
1149 rx_ring->index = i;
1150 rx_ring->ixgbe = ixgbe;
1151 rx_ring->group_index = i / ring_per_group;
1152 rx_ring->hw_index = ixgbe_get_hw_rx_index(ixgbe, i);
1153 }
1154
1155 for (i = 0; i < ixgbe->num_rx_groups; i++) {
1156 rx_group = &ixgbe->rx_groups[i];
1157 rx_group->index = i;
1158 rx_group->ixgbe = ixgbe;
1159 }
1160
1161 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1162 tx_ring = &ixgbe->tx_rings[i];
1163 tx_ring->index = i;
1164 tx_ring->ixgbe = ixgbe;
1165 if (ixgbe->tx_head_wb_enable)
1166 tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb;
1167 else
1168 tx_ring->tx_recycle = ixgbe_tx_recycle_legacy;
1169
1170 tx_ring->ring_size = ixgbe->tx_ring_size;
1171 tx_ring->free_list_size = ixgbe->tx_ring_size +
1172 (ixgbe->tx_ring_size >> 1);
1173 }
1174
1175 /*
1176 * Initialize values of interrupt throttling rate
1177 */
1178 for (i = 1; i < MAX_INTR_VECTOR; i++)
1179 ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0];
1180
1181 /*
1182 * The initial link state should be "unknown"
1183 */
1184 ixgbe->link_state = LINK_STATE_UNKNOWN;
1185
1186 return (IXGBE_SUCCESS);
1187 }
1188
1189 /*
1190 * ixgbe_init_locks - Initialize locks.
1191 */
1192 static void
1193 ixgbe_init_locks(ixgbe_t *ixgbe)
1194 {
1195 ixgbe_rx_ring_t *rx_ring;
1196 ixgbe_tx_ring_t *tx_ring;
1197 int i;
1198
1199 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1200 rx_ring = &ixgbe->rx_rings[i];
1201 mutex_init(&rx_ring->rx_lock, NULL,
1202 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1203 }
1204
1205 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1206 tx_ring = &ixgbe->tx_rings[i];
1207 mutex_init(&tx_ring->tx_lock, NULL,
1208 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1209 mutex_init(&tx_ring->recycle_lock, NULL,
1210 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1211 mutex_init(&tx_ring->tcb_head_lock, NULL,
1212 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1213 mutex_init(&tx_ring->tcb_tail_lock, NULL,
1214 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1215 }
1216
1217 mutex_init(&ixgbe->gen_lock, NULL,
1218 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1219
1220 mutex_init(&ixgbe->watchdog_lock, NULL,
1221 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1222 }
1223
1224 /*
1225 * ixgbe_destroy_locks - Destroy locks.
1226 */
1227 static void
1228 ixgbe_destroy_locks(ixgbe_t *ixgbe)
1229 {
1230 ixgbe_rx_ring_t *rx_ring;
1231 ixgbe_tx_ring_t *tx_ring;
1232 int i;
1233
1234 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1235 rx_ring = &ixgbe->rx_rings[i];
1236 mutex_destroy(&rx_ring->rx_lock);
1237 }
1238
1239 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1240 tx_ring = &ixgbe->tx_rings[i];
1241 mutex_destroy(&tx_ring->tx_lock);
1242 mutex_destroy(&tx_ring->recycle_lock);
1243 mutex_destroy(&tx_ring->tcb_head_lock);
1244 mutex_destroy(&tx_ring->tcb_tail_lock);
1245 }
1246
1247 mutex_destroy(&ixgbe->gen_lock);
1248 mutex_destroy(&ixgbe->watchdog_lock);
1249 }
1250
1251 /*
1252 * We need to try and determine which LED index in hardware corresponds to the
1253 * link/activity LED. This is the one that'll be overwritten when we perform
1254 * GLDv3 LED activity.
1255 */
1256 static void
1257 ixgbe_led_init(ixgbe_t *ixgbe)
1258 {
1259 uint32_t reg, i;
1260 struct ixgbe_hw *hw = &ixgbe->hw;
1261
1262 reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1263 for (i = 0; i < 4; i++) {
1264 if (((reg >> IXGBE_LED_MODE_SHIFT(i)) &
1265 IXGBE_LED_MODE_MASK_BASE) == IXGBE_LED_LINK_ACTIVE) {
1266 ixgbe->ixgbe_led_index = i;
1267 return;
1268 }
1269 }
1270
1271 /*
1272 * If we couldn't determine this, we use the default for various MACs
1273 * based on information Intel has inserted into other drivers over the
1274 * years. Note, when we have support for the X553 which should add the
1275 * ixgbe_x550_em_a mac type, that should be at index 0.
1276 */
1277 switch (hw->mac.type) {
1278 case ixgbe_mac_X550EM_x:
1279 ixgbe->ixgbe_led_index = 1;
1280 break;
1281 default:
1282 ixgbe->ixgbe_led_index = 2;
1283 break;
1284 }
1285 }
1286
1287 static int
1288 ixgbe_resume(dev_info_t *devinfo)
1289 {
1290 ixgbe_t *ixgbe;
1291 int i;
1292
1293 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1294 if (ixgbe == NULL)
1295 return (DDI_FAILURE);
1296
1297 mutex_enter(&ixgbe->gen_lock);
1298
1299 if (ixgbe->ixgbe_state & IXGBE_STARTED) {
1300 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1301 mutex_exit(&ixgbe->gen_lock);
1302 return (DDI_FAILURE);
1303 }
1304
1305 /*
1306 * Enable and start the watchdog timer
1307 */
1308 ixgbe_enable_watchdog_timer(ixgbe);
1309 }
1310
1311 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_SUSPENDED);
1312
1313 if (ixgbe->ixgbe_state & IXGBE_STARTED) {
1314 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1315 mac_tx_ring_update(ixgbe->mac_hdl,
1316 ixgbe->tx_rings[i].ring_handle);
1317 }
1318 }
1319
1320 mutex_exit(&ixgbe->gen_lock);
1321
1322 return (DDI_SUCCESS);
1323 }
1324
1325 static int
1326 ixgbe_suspend(dev_info_t *devinfo)
1327 {
1328 ixgbe_t *ixgbe;
1329
1330 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1331 if (ixgbe == NULL)
1332 return (DDI_FAILURE);
1333
1334 mutex_enter(&ixgbe->gen_lock);
1335
1336 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_SUSPENDED);
1337 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) {
1338 mutex_exit(&ixgbe->gen_lock);
1339 return (DDI_SUCCESS);
1340 }
1341 ixgbe_stop(ixgbe, B_FALSE);
1342
1343 mutex_exit(&ixgbe->gen_lock);
1344
1345 /*
1346 * Disable and stop the watchdog timer
1347 */
1348 ixgbe_disable_watchdog_timer(ixgbe);
1349
1350 return (DDI_SUCCESS);
1351 }
1352
1353 /*
1354 * ixgbe_init - Initialize the device.
1355 */
1356 static int
1357 ixgbe_init(ixgbe_t *ixgbe)
1358 {
1359 struct ixgbe_hw *hw = &ixgbe->hw;
1360 u8 pbanum[IXGBE_PBANUM_LENGTH];
1361 int rv;
1362
1363 mutex_enter(&ixgbe->gen_lock);
1364
1365 /*
1366 * Configure/Initialize hardware
1367 */
1368 rv = ixgbe_init_hw(hw);
1369 if (rv != IXGBE_SUCCESS) {
1370 switch (rv) {
1371
1372 /*
1373 * The first three errors are not prohibitive to us progressing
1374 * further, and are maily advisory in nature. In the case of a
1375 * SFP module not being present or not deemed supported by the
1376 * common code, we adivse the operator of this fact but carry on
1377 * instead of failing hard, as SFPs can be inserted or replaced
1378 * while the driver is running. In the case of a unknown error,
1379 * we fail-hard, logging the reason and emitting a FMA event.
1380 */
1381 case IXGBE_ERR_EEPROM_VERSION:
1382 ixgbe_error(ixgbe,
1383 "This Intel 10Gb Ethernet device is pre-release and"
1384 " contains outdated firmware. Please contact your"
1385 " hardware vendor for a replacement.");
1386 break;
1387 case IXGBE_ERR_SFP_NOT_PRESENT:
1388 ixgbe_error(ixgbe,
1389 "No SFP+ module detected on this interface. Please "
1390 "install a supported SFP+ module for this "
1391 "interface to become operational.");
1392 break;
1393 case IXGBE_ERR_SFP_NOT_SUPPORTED:
1394 ixgbe_error(ixgbe,
1395 "Unsupported SFP+ module detected. Please replace "
1396 "it with a supported SFP+ module per Intel "
1397 "documentation, or bypass this check with "
1398 "allow_unsupported_sfp=1 in ixgbe.conf.");
1399 break;
1400 default:
1401 ixgbe_error(ixgbe,
1402 "Failed to initialize hardware. ixgbe_init_hw "
1403 "returned %d", rv);
1404 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1405 goto init_fail;
1406 }
1407 }
1408
1409 /*
1410 * Need to init eeprom before validating the checksum.
1411 */
1412 if (ixgbe_init_eeprom_params(hw) < 0) {
1413 ixgbe_error(ixgbe,
1414 "Unable to intitialize the eeprom interface.");
1415 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1416 goto init_fail;
1417 }
1418
1419 /*
1420 * NVM validation
1421 */
1422 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1423 /*
1424 * Some PCI-E parts fail the first check due to
1425 * the link being in sleep state. Call it again,
1426 * if it fails a second time it's a real issue.
1427 */
1428 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1429 ixgbe_error(ixgbe,
1430 "Invalid NVM checksum. Please contact "
1431 "the vendor to update the NVM.");
1432 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1433 goto init_fail;
1434 }
1435 }
1436
1437 /*
1438 * Setup default flow control thresholds - enable/disable
1439 * & flow control type is controlled by ixgbe.conf
1440 */
1441 {
1442 uint32_t rxpb, frame, size, hitmp, lotmp;
1443
1444 frame = ixgbe->max_frame_size;
1445
1446 /* Calculate High and Low Water */
1447 if (hw->mac.type == ixgbe_mac_X540) {
1448 hitmp = IXGBE_DV_X540(frame, frame);
1449 lotmp = IXGBE_LOW_DV_X540(frame);
1450 } else {
1451 hitmp = IXGBE_DV(frame, frame);
1452 lotmp = IXGBE_LOW_DV(frame);
1453 }
1454 size = IXGBE_BT2KB(hitmp);
1455 rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
1456 hw->fc.high_water[0] = rxpb - size;
1457 hw->fc.low_water[0] = IXGBE_BT2KB(lotmp);
1458 }
1459
1460 hw->fc.pause_time = DEFAULT_FCPAUSE;
1461 hw->fc.send_xon = B_TRUE;
1462
1463 /*
1464 * Initialize flow control
1465 */
1466 (void) ixgbe_start_hw(hw);
1467
1468 /*
1469 * Initialize link settings
1470 */
1471 (void) ixgbe_driver_setup_link(ixgbe, B_FALSE);
1472
1473 /*
1474 * Initialize the chipset hardware
1475 */
1476 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1477 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1478 goto init_fail;
1479 }
1480
1481 /*
1482 * Read identifying information and place in devinfo.
1483 */
1484 pbanum[0] = '\0';
1485 (void) ixgbe_read_pba_string(hw, pbanum, sizeof (pbanum));
1486 if (*pbanum != '\0') {
1487 (void) ddi_prop_update_string(DDI_DEV_T_NONE, ixgbe->dip,
1488 "printed-board-assembly", (char *)pbanum);
1489 }
1490
1491 /*
1492 * Determine LED index.
1493 */
1494 ixgbe_led_init(ixgbe);
1495
1496 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1497 goto init_fail;
1498 }
1499
1500 mutex_exit(&ixgbe->gen_lock);
1501 return (IXGBE_SUCCESS);
1502
1503 init_fail:
1504 /*
1505 * Reset PHY
1506 */
1507 (void) ixgbe_reset_phy(hw);
1508
1509 mutex_exit(&ixgbe->gen_lock);
1510 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1511 return (IXGBE_FAILURE);
1512 }
1513
1514 /*
1515 * ixgbe_chip_start - Initialize and start the chipset hardware.
1516 */
1517 static int
1518 ixgbe_chip_start(ixgbe_t *ixgbe)
1519 {
1520 struct ixgbe_hw *hw = &ixgbe->hw;
1521 int i;
1522
1523 ASSERT(mutex_owned(&ixgbe->gen_lock));
1524
1525 /*
1526 * Get the mac address
1527 * This function should handle SPARC case correctly.
1528 */
1529 if (!ixgbe_find_mac_address(ixgbe)) {
1530 ixgbe_error(ixgbe, "Failed to get the mac address");
1531 return (IXGBE_FAILURE);
1532 }
1533
1534 /*
1535 * Validate the mac address
1536 */
1537 (void) ixgbe_init_rx_addrs(hw);
1538 if (!is_valid_mac_addr(hw->mac.addr)) {
1539 ixgbe_error(ixgbe, "Invalid mac address");
1540 return (IXGBE_FAILURE);
1541 }
1542
1543 /*
1544 * Re-enable relaxed ordering for performance. It is disabled
1545 * by default in the hardware init.
1546 */
1547 if (ixgbe->relax_order_enable == B_TRUE)
1548 ixgbe_enable_relaxed_ordering(hw);
1549
1550 /*
1551 * Setup adapter interrupt vectors
1552 */
1553 ixgbe_setup_adapter_vector(ixgbe);
1554
1555 /*
1556 * Initialize unicast addresses.
1557 */
1558 ixgbe_init_unicst(ixgbe);
1559
1560 /*
1561 * Setup and initialize the mctable structures.
1562 */
1563 ixgbe_setup_multicst(ixgbe);
1564
1565 /*
1566 * Set interrupt throttling rate
1567 */
1568 for (i = 0; i < ixgbe->intr_cnt; i++) {
1569 IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]);
1570 }
1571
1572 /*
1573 * Disable Wake-on-LAN
1574 */
1575 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
1576
1577 /*
1578 * Some adapters offer Energy Efficient Ethernet (EEE) support.
1579 * Due to issues with EEE in e1000g/igb, we disable this by default
1580 * as a precautionary measure.
1581 *
1582 * Currently, the only known adapter which supports EEE in the ixgbe
1583 * line is 8086,15AB (IXGBE_DEV_ID_X550EM_X_KR), and only after the
1584 * first revision of it, as well as any X550 with MAC type 6 (non-EM)
1585 */
1586 (void) ixgbe_setup_eee(hw, B_FALSE);
1587
1588 /*
1589 * Turn on any present SFP Tx laser
1590 */
1591 ixgbe_enable_tx_laser(hw);
1592
1593 /*
1594 * Power on the PHY
1595 */
1596 (void) ixgbe_set_phy_power(hw, B_TRUE);
1597
1598 /*
1599 * Save the state of the PHY
1600 */
1601 ixgbe_get_hw_state(ixgbe);
1602
1603 /*
1604 * Make sure driver has control
1605 */
1606 ixgbe_get_driver_control(hw);
1607
1608 return (IXGBE_SUCCESS);
1609 }
1610
1611 /*
1612 * ixgbe_chip_stop - Stop the chipset hardware
1613 */
1614 static void
1615 ixgbe_chip_stop(ixgbe_t *ixgbe)
1616 {
1617 struct ixgbe_hw *hw = &ixgbe->hw;
1618 int rv;
1619
1620 ASSERT(mutex_owned(&ixgbe->gen_lock));
1621
1622 /*
1623 * Stop interupt generation and disable Tx unit
1624 */
1625 hw->adapter_stopped = B_FALSE;
1626 (void) ixgbe_stop_adapter(hw);
1627
1628 /*
1629 * Reset the chipset
1630 */
1631 (void) ixgbe_reset_hw(hw);
1632
1633 /*
1634 * Reset PHY
1635 */
1636 (void) ixgbe_reset_phy(hw);
1637
1638 /*
1639 * Enter LPLU (Low Power, Link Up) mode, if available. Avoid resetting
1640 * the PHY while doing so. Else, just power down the PHY.
1641 */
1642 if (hw->phy.ops.enter_lplu != NULL) {
1643 hw->phy.reset_disable = B_TRUE;
1644 rv = hw->phy.ops.enter_lplu(hw);
1645 if (rv != IXGBE_SUCCESS)
1646 ixgbe_error(ixgbe, "Error while entering LPLU: %d", rv);
1647 hw->phy.reset_disable = B_FALSE;
1648 } else {
1649 (void) ixgbe_set_phy_power(hw, B_FALSE);
1650 }
1651
1652 /*
1653 * Turn off any present SFP Tx laser
1654 * Expected for health and safety reasons
1655 */
1656 ixgbe_disable_tx_laser(hw);
1657
1658 /*
1659 * Tell firmware driver is no longer in control
1660 */
1661 ixgbe_release_driver_control(hw);
1662
1663 }
1664
1665 /*
1666 * ixgbe_reset - Reset the chipset and re-start the driver.
1667 *
1668 * It involves stopping and re-starting the chipset,
1669 * and re-configuring the rx/tx rings.
1670 */
1671 static int
1672 ixgbe_reset(ixgbe_t *ixgbe)
1673 {
1674 int i;
1675
1676 /*
1677 * Disable and stop the watchdog timer
1678 */
1679 ixgbe_disable_watchdog_timer(ixgbe);
1680
1681 mutex_enter(&ixgbe->gen_lock);
1682
1683 ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED);
1684 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED);
1685
1686 ixgbe_stop(ixgbe, B_FALSE);
1687
1688 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1689 mutex_exit(&ixgbe->gen_lock);
1690 return (IXGBE_FAILURE);
1691 }
1692
1693 /*
1694 * After resetting, need to recheck the link status.
1695 */
1696 ixgbe->link_check_complete = B_FALSE;
1697 ixgbe->link_check_hrtime = gethrtime() +
1698 (IXGBE_LINK_UP_TIME * 100000000ULL);
1699
1700 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STARTED);
1701
1702 if (!(ixgbe->ixgbe_state & IXGBE_SUSPENDED)) {
1703 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1704 mac_tx_ring_update(ixgbe->mac_hdl,
1705 ixgbe->tx_rings[i].ring_handle);
1706 }
1707 }
1708
1709 mutex_exit(&ixgbe->gen_lock);
1710
1711 /*
1712 * Enable and start the watchdog timer
1713 */
1714 ixgbe_enable_watchdog_timer(ixgbe);
1715
1716 return (IXGBE_SUCCESS);
1717 }
1718
1719 /*
1720 * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources.
1721 */
1722 static void
1723 ixgbe_tx_clean(ixgbe_t *ixgbe)
1724 {
1725 ixgbe_tx_ring_t *tx_ring;
1726 tx_control_block_t *tcb;
1727 link_list_t pending_list;
1728 uint32_t desc_num;
1729 int i, j;
1730
1731 LINK_LIST_INIT(&pending_list);
1732
1733 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1734 tx_ring = &ixgbe->tx_rings[i];
1735
1736 mutex_enter(&tx_ring->recycle_lock);
1737
1738 /*
1739 * Clean the pending tx data - the pending packets in the
1740 * work_list that have no chances to be transmitted again.
1741 *
1742 * We must ensure the chipset is stopped or the link is down
1743 * before cleaning the transmit packets.
1744 */
1745 desc_num = 0;
1746 for (j = 0; j < tx_ring->ring_size; j++) {
1747 tcb = tx_ring->work_list[j];
1748 if (tcb != NULL) {
1749 desc_num += tcb->desc_num;
1750
1751 tx_ring->work_list[j] = NULL;
1752
1753 ixgbe_free_tcb(tcb);
1754
1755 LIST_PUSH_TAIL(&pending_list, &tcb->link);
1756 }
1757 }
1758
1759 if (desc_num > 0) {
1760 atomic_add_32(&tx_ring->tbd_free, desc_num);
1761 ASSERT(tx_ring->tbd_free == tx_ring->ring_size);
1762
1763 /*
1764 * Reset the head and tail pointers of the tbd ring;
1765 * Reset the writeback head if it's enable.
1766 */
1767 tx_ring->tbd_head = 0;
1768 tx_ring->tbd_tail = 0;
1769 if (ixgbe->tx_head_wb_enable)
1770 *tx_ring->tbd_head_wb = 0;
1771
1772 IXGBE_WRITE_REG(&ixgbe->hw,
1773 IXGBE_TDH(tx_ring->index), 0);
1774 IXGBE_WRITE_REG(&ixgbe->hw,
1775 IXGBE_TDT(tx_ring->index), 0);
1776 }
1777
1778 mutex_exit(&tx_ring->recycle_lock);
1779
1780 /*
1781 * Add the tx control blocks in the pending list to
1782 * the free list.
1783 */
1784 ixgbe_put_free_list(tx_ring, &pending_list);
1785 }
1786 }
1787
1788 /*
1789 * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be
1790 * transmitted.
1791 */
1792 static boolean_t
1793 ixgbe_tx_drain(ixgbe_t *ixgbe)
1794 {
1795 ixgbe_tx_ring_t *tx_ring;
1796 boolean_t done;
1797 int i, j;
1798
1799 /*
1800 * Wait for a specific time to allow pending tx packets
1801 * to be transmitted.
1802 *
1803 * Check the counter tbd_free to see if transmission is done.
1804 * No lock protection is needed here.
1805 *
1806 * Return B_TRUE if all pending packets have been transmitted;
1807 * Otherwise return B_FALSE;
1808 */
1809 for (i = 0; i < TX_DRAIN_TIME; i++) {
1810
1811 done = B_TRUE;
1812 for (j = 0; j < ixgbe->num_tx_rings; j++) {
1813 tx_ring = &ixgbe->tx_rings[j];
1814 done = done &&
1815 (tx_ring->tbd_free == tx_ring->ring_size);
1816 }
1817
1818 if (done)
1819 break;
1820
1821 msec_delay(1);
1822 }
1823
1824 return (done);
1825 }
1826
1827 /*
1828 * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer.
1829 */
1830 static boolean_t
1831 ixgbe_rx_drain(ixgbe_t *ixgbe)
1832 {
1833 boolean_t done = B_TRUE;
1834 int i;
1835
1836 /*
1837 * Polling the rx free list to check if those rx buffers held by
1838 * the upper layer are released.
1839 *
1840 * Check the counter rcb_free to see if all pending buffers are
1841 * released. No lock protection is needed here.
1842 *
1843 * Return B_TRUE if all pending buffers have been released;
1844 * Otherwise return B_FALSE;
1845 */
1846 for (i = 0; i < RX_DRAIN_TIME; i++) {
1847 done = (ixgbe->rcb_pending == 0);
1848
1849 if (done)
1850 break;
1851
1852 msec_delay(1);
1853 }
1854
1855 return (done);
1856 }
1857
1858 /*
1859 * ixgbe_start - Start the driver/chipset.
1860 */
1861 int
1862 ixgbe_start(ixgbe_t *ixgbe, boolean_t alloc_buffer)
1863 {
1864 struct ixgbe_hw *hw = &ixgbe->hw;
1865 int i;
1866
1867 ASSERT(mutex_owned(&ixgbe->gen_lock));
1868
1869 if (alloc_buffer) {
1870 if (ixgbe_alloc_rx_data(ixgbe) != IXGBE_SUCCESS) {
1871 ixgbe_error(ixgbe,
1872 "Failed to allocate software receive rings");
1873 return (IXGBE_FAILURE);
1874 }
1875
1876 /* Allocate buffers for all the rx/tx rings */
1877 if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS) {
1878 ixgbe_error(ixgbe, "Failed to allocate DMA resource");
1879 return (IXGBE_FAILURE);
1880 }
1881
1882 ixgbe->tx_ring_init = B_TRUE;
1883 } else {
1884 ixgbe->tx_ring_init = B_FALSE;
1885 }
1886
1887 for (i = 0; i < ixgbe->num_rx_rings; i++)
1888 mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1889 for (i = 0; i < ixgbe->num_tx_rings; i++)
1890 mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1891
1892 /*
1893 * Start the chipset hardware
1894 */
1895 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1896 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1897 goto start_failure;
1898 }
1899
1900 /*
1901 * Configure link now for X550
1902 *
1903 * X550 possesses a LPLU (Low-Power Link Up) mode which keeps the
1904 * resting state of the adapter at a 1Gb FDX speed. Prior to the X550,
1905 * the resting state of the link would be the maximum speed that
1906 * autonegotiation will allow (usually 10Gb, infrastructure allowing)
1907 * so we never bothered with explicitly setting the link to 10Gb as it
1908 * would already be at that state on driver attach. With X550, we must
1909 * trigger a re-negotiation of the link in order to switch from a LPLU
1910 * 1Gb link to 10Gb (cable and link partner permitting.)
1911 */
1912 if (hw->mac.type == ixgbe_mac_X550 ||
1913 hw->mac.type == ixgbe_mac_X550EM_x) {
1914 (void) ixgbe_driver_setup_link(ixgbe, B_TRUE);
1915 ixgbe_get_hw_state(ixgbe);
1916 }
1917
1918 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1919 goto start_failure;
1920 }
1921
1922 /*
1923 * Setup the rx/tx rings
1924 */
1925 ixgbe_setup_rings(ixgbe);
1926
1927 /*
1928 * ixgbe_start() will be called when resetting, however if reset
1929 * happens, we need to clear the ERROR, STALL and OVERTEMP flags
1930 * before enabling the interrupts.
1931 */
1932 atomic_and_32(&ixgbe->ixgbe_state, ~(IXGBE_ERROR
1933 | IXGBE_STALL| IXGBE_OVERTEMP));
1934
1935 /*
1936 * Enable adapter interrupts
1937 * The interrupts must be enabled after the driver state is START
1938 */
1939 ixgbe_enable_adapter_interrupts(ixgbe);
1940
1941 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1942 mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1943 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1944 mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1945
1946 return (IXGBE_SUCCESS);
1947
1948 start_failure:
1949 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1950 mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1951 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1952 mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1953
1954 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1955
1956 return (IXGBE_FAILURE);
1957 }
1958
1959 /*
1960 * ixgbe_stop - Stop the driver/chipset.
1961 */
1962 void
1963 ixgbe_stop(ixgbe_t *ixgbe, boolean_t free_buffer)
1964 {
1965 int i;
1966
1967 ASSERT(mutex_owned(&ixgbe->gen_lock));
1968
1969 /*
1970 * Disable the adapter interrupts
1971 */
1972 ixgbe_disable_adapter_interrupts(ixgbe);
1973
1974 /*
1975 * Drain the pending tx packets
1976 */
1977 (void) ixgbe_tx_drain(ixgbe);
1978
1979 for (i = 0; i < ixgbe->num_rx_rings; i++)
1980 mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1981 for (i = 0; i < ixgbe->num_tx_rings; i++)
1982 mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1983
1984 /*
1985 * Stop the chipset hardware
1986 */
1987 ixgbe_chip_stop(ixgbe);
1988
1989 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1990 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1991 }
1992
1993 /*
1994 * Clean the pending tx data/resources
1995 */
1996 ixgbe_tx_clean(ixgbe);
1997
1998 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1999 mutex_exit(&ixgbe->tx_rings[i].tx_lock);
2000 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
2001 mutex_exit(&ixgbe->rx_rings[i].rx_lock);
2002
2003 if (ixgbe->link_state == LINK_STATE_UP) {
2004 ixgbe->link_state = LINK_STATE_UNKNOWN;
2005 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
2006 }
2007
2008 if (free_buffer) {
2009 /*
2010 * Release the DMA/memory resources of rx/tx rings
2011 */
2012 ixgbe_free_dma(ixgbe);
2013 ixgbe_free_rx_data(ixgbe);
2014 }
2015 }
2016
2017 /*
2018 * ixgbe_cbfunc - Driver interface for generic DDI callbacks
2019 */
2020 /* ARGSUSED */
2021 static int
2022 ixgbe_cbfunc(dev_info_t *dip, ddi_cb_action_t cbaction, void *cbarg,
2023 void *arg1, void *arg2)
2024 {
2025 ixgbe_t *ixgbe = (ixgbe_t *)arg1;
2026
2027 switch (cbaction) {
2028 /* IRM callback */
2029 int count;
2030 case DDI_CB_INTR_ADD:
2031 case DDI_CB_INTR_REMOVE:
2032 count = (int)(uintptr_t)cbarg;
2033 ASSERT(ixgbe->intr_type == DDI_INTR_TYPE_MSIX);
2034 DTRACE_PROBE2(ixgbe__irm__callback, int, count,
2035 int, ixgbe->intr_cnt);
2036 if (ixgbe_intr_adjust(ixgbe, cbaction, count) !=
2037 DDI_SUCCESS) {
2038 ixgbe_error(ixgbe,
2039 "IRM CB: Failed to adjust interrupts");
2040 goto cb_fail;
2041 }
2042 break;
2043 default:
2044 IXGBE_DEBUGLOG_1(ixgbe, "DDI CB: action 0x%x NOT supported",
2045 cbaction);
2046 return (DDI_ENOTSUP);
2047 }
2048 return (DDI_SUCCESS);
2049 cb_fail:
2050 return (DDI_FAILURE);
2051 }
2052
2053 /*
2054 * ixgbe_intr_adjust - Adjust interrupt to respond to IRM request.
2055 */
2056 static int
2057 ixgbe_intr_adjust(ixgbe_t *ixgbe, ddi_cb_action_t cbaction, int count)
2058 {
2059 int i, rc, actual;
2060 uint32_t started;
2061
2062 if (!(ixgbe->ixgbe_state & IXGBE_INITIALIZED)) {
2063 return (DDI_FAILURE);
2064 }
2065
2066 if (cbaction == DDI_CB_INTR_REMOVE &&
2067 ixgbe->intr_cnt - count < ixgbe->intr_cnt_min)
2068 return (DDI_FAILURE);
2069
2070 if (cbaction == DDI_CB_INTR_ADD &&
2071 ixgbe->intr_cnt + count > ixgbe->intr_cnt_max)
2072 count = ixgbe->intr_cnt_max - ixgbe->intr_cnt;
2073
2074 if (count == 0)
2075 return (DDI_SUCCESS);
2076
2077 for (i = 0; i < ixgbe->num_rx_rings; i++)
2078 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle, NULL);
2079 for (i = 0; i < ixgbe->num_tx_rings; i++)
2080 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle, NULL);
2081
2082 mutex_enter(&ixgbe->gen_lock);
2083 started = ixgbe->ixgbe_state & IXGBE_STARTED;
2084 ixgbe->ixgbe_state &= ~IXGBE_STARTED;
2085 ixgbe->ixgbe_state |= IXGBE_INTR_ADJUST;
2086 ixgbe->ixgbe_state |= IXGBE_SUSPENDED;
2087 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN);
2088
2089 if (started)
2090 ixgbe_stop(ixgbe, B_FALSE);
2091 /*
2092 * Disable interrupts
2093 */
2094 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
2095 rc = ixgbe_disable_intrs(ixgbe);
2096 ASSERT(rc == IXGBE_SUCCESS);
2097 }
2098 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ENABLE_INTR;
2099
2100 /*
2101 * Remove interrupt handlers
2102 */
2103 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
2104 ixgbe_rem_intr_handlers(ixgbe);
2105 }
2106 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ADD_INTR;
2107
2108 /*
2109 * Clear vect_map
2110 */
2111 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
2112 switch (cbaction) {
2113 case DDI_CB_INTR_ADD:
2114 rc = ddi_intr_alloc(ixgbe->dip, ixgbe->htable,
2115 DDI_INTR_TYPE_MSIX, ixgbe->intr_cnt, count, &actual,
2116 DDI_INTR_ALLOC_NORMAL);
2117 if (rc != DDI_SUCCESS || actual != count) {
2118 ixgbe_log(ixgbe, "Adjust interrupts failed."
2119 "return: %d, irm cb size: %d, actual: %d",
2120 rc, count, actual);
2121 goto intr_adjust_fail;
2122 }
2123 ixgbe->intr_cnt += count;
2124 break;
2125
2126 case DDI_CB_INTR_REMOVE:
2127 for (i = ixgbe->intr_cnt - count;
2128 i < ixgbe->intr_cnt; i ++) {
2129 rc = ddi_intr_free(ixgbe->htable[i]);
2130 ixgbe->htable[i] = NULL;
2131 if (rc != DDI_SUCCESS) {
2132 ixgbe_log(ixgbe, "Adjust interrupts failed."
2133 "return: %d, irm cb size: %d, actual: %d",
2134 rc, count, actual);
2135 goto intr_adjust_fail;
2136 }
2137 }
2138 ixgbe->intr_cnt -= count;
2139 break;
2140 }
2141
2142 /*
2143 * Get priority for first vector, assume remaining are all the same
2144 */
2145 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
2146 if (rc != DDI_SUCCESS) {
2147 ixgbe_log(ixgbe,
2148 "Get interrupt priority failed: %d", rc);
2149 goto intr_adjust_fail;
2150 }
2151 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
2152 if (rc != DDI_SUCCESS) {
2153 ixgbe_log(ixgbe, "Get interrupt cap failed: %d", rc);
2154 goto intr_adjust_fail;
2155 }
2156 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
2157
2158 /*
2159 * Map rings to interrupt vectors
2160 */
2161 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) {
2162 ixgbe_error(ixgbe,
2163 "IRM CB: Failed to map interrupts to vectors");
2164 goto intr_adjust_fail;
2165 }
2166
2167 /*
2168 * Add interrupt handlers
2169 */
2170 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
2171 ixgbe_error(ixgbe, "IRM CB: Failed to add interrupt handlers");
2172 goto intr_adjust_fail;
2173 }
2174 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
2175
2176 /*
2177 * Now that mutex locks are initialized, and the chip is also
2178 * initialized, enable interrupts.
2179 */
2180 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
2181 ixgbe_error(ixgbe, "IRM CB: Failed to enable DDI interrupts");
2182 goto intr_adjust_fail;
2183 }
2184 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
2185 if (started)
2186 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
2187 ixgbe_error(ixgbe, "IRM CB: Failed to start");
2188 goto intr_adjust_fail;
2189 }
2190 ixgbe->ixgbe_state &= ~IXGBE_INTR_ADJUST;
2191 ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED;
2192 ixgbe->ixgbe_state |= started;
2193 mutex_exit(&ixgbe->gen_lock);
2194
2195 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2196 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle,
2197 ixgbe->htable[ixgbe->rx_rings[i].intr_vector]);
2198 }
2199 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2200 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle,
2201 ixgbe->htable[ixgbe->tx_rings[i].intr_vector]);
2202 }
2203
2204 /* Wakeup all Tx rings */
2205 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2206 mac_tx_ring_update(ixgbe->mac_hdl,
2207 ixgbe->tx_rings[i].ring_handle);
2208 }
2209
2210 IXGBE_DEBUGLOG_3(ixgbe,
2211 "IRM CB: interrupts new value: 0x%x(0x%x:0x%x).",
2212 ixgbe->intr_cnt, ixgbe->intr_cnt_min, ixgbe->intr_cnt_max);
2213 return (DDI_SUCCESS);
2214
2215 intr_adjust_fail:
2216 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
2217 mutex_exit(&ixgbe->gen_lock);
2218 return (DDI_FAILURE);
2219 }
2220
2221 /*
2222 * ixgbe_intr_cb_register - Register interrupt callback function.
2223 */
2224 static int
2225 ixgbe_intr_cb_register(ixgbe_t *ixgbe)
2226 {
2227 if (ddi_cb_register(ixgbe->dip, DDI_CB_FLAG_INTR, ixgbe_cbfunc,
2228 ixgbe, NULL, &ixgbe->cb_hdl) != DDI_SUCCESS) {
2229 return (IXGBE_FAILURE);
2230 }
2231 IXGBE_DEBUGLOG_0(ixgbe, "Interrupt callback function registered.");
2232 return (IXGBE_SUCCESS);
2233 }
2234
2235 /*
2236 * ixgbe_alloc_rings - Allocate memory space for rx/tx rings.
2237 */
2238 static int
2239 ixgbe_alloc_rings(ixgbe_t *ixgbe)
2240 {
2241 /*
2242 * Allocate memory space for rx rings
2243 */
2244 ixgbe->rx_rings = kmem_zalloc(
2245 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings,
2246 KM_NOSLEEP);
2247
2248 if (ixgbe->rx_rings == NULL) {
2249 return (IXGBE_FAILURE);
2250 }
2251
2252 /*
2253 * Allocate memory space for tx rings
2254 */
2255 ixgbe->tx_rings = kmem_zalloc(
2256 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings,
2257 KM_NOSLEEP);
2258
2259 if (ixgbe->tx_rings == NULL) {
2260 kmem_free(ixgbe->rx_rings,
2261 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
2262 ixgbe->rx_rings = NULL;
2263 return (IXGBE_FAILURE);
2264 }
2265
2266 /*
2267 * Allocate memory space for rx ring groups
2268 */
2269 ixgbe->rx_groups = kmem_zalloc(
2270 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups,
2271 KM_NOSLEEP);
2272
2273 if (ixgbe->rx_groups == NULL) {
2274 kmem_free(ixgbe->rx_rings,
2275 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
2276 kmem_free(ixgbe->tx_rings,
2277 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
2278 ixgbe->rx_rings = NULL;
2279 ixgbe->tx_rings = NULL;
2280 return (IXGBE_FAILURE);
2281 }
2282
2283 return (IXGBE_SUCCESS);
2284 }
2285
2286 /*
2287 * ixgbe_free_rings - Free the memory space of rx/tx rings.
2288 */
2289 static void
2290 ixgbe_free_rings(ixgbe_t *ixgbe)
2291 {
2292 if (ixgbe->rx_rings != NULL) {
2293 kmem_free(ixgbe->rx_rings,
2294 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
2295 ixgbe->rx_rings = NULL;
2296 }
2297
2298 if (ixgbe->tx_rings != NULL) {
2299 kmem_free(ixgbe->tx_rings,
2300 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
2301 ixgbe->tx_rings = NULL;
2302 }
2303
2304 if (ixgbe->rx_groups != NULL) {
2305 kmem_free(ixgbe->rx_groups,
2306 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups);
2307 ixgbe->rx_groups = NULL;
2308 }
2309 }
2310
2311 static int
2312 ixgbe_alloc_rx_data(ixgbe_t *ixgbe)
2313 {
2314 ixgbe_rx_ring_t *rx_ring;
2315 int i;
2316
2317 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2318 rx_ring = &ixgbe->rx_rings[i];
2319 if (ixgbe_alloc_rx_ring_data(rx_ring) != IXGBE_SUCCESS)
2320 goto alloc_rx_rings_failure;
2321 }
2322 return (IXGBE_SUCCESS);
2323
2324 alloc_rx_rings_failure:
2325 ixgbe_free_rx_data(ixgbe);
2326 return (IXGBE_FAILURE);
2327 }
2328
2329 static void
2330 ixgbe_free_rx_data(ixgbe_t *ixgbe)
2331 {
2332 ixgbe_rx_ring_t *rx_ring;
2333 ixgbe_rx_data_t *rx_data;
2334 int i;
2335
2336 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2337 rx_ring = &ixgbe->rx_rings[i];
2338
2339 mutex_enter(&ixgbe->rx_pending_lock);
2340 rx_data = rx_ring->rx_data;
2341
2342 if (rx_data != NULL) {
2343 rx_data->flag |= IXGBE_RX_STOPPED;
2344
2345 if (rx_data->rcb_pending == 0) {
2346 ixgbe_free_rx_ring_data(rx_data);
2347 rx_ring->rx_data = NULL;
2348 }
2349 }
2350
2351 mutex_exit(&ixgbe->rx_pending_lock);
2352 }
2353 }
2354
2355 /*
2356 * ixgbe_setup_rings - Setup rx/tx rings.
2357 */
2358 static void
2359 ixgbe_setup_rings(ixgbe_t *ixgbe)
2360 {
2361 /*
2362 * Setup the rx/tx rings, including the following:
2363 *
2364 * 1. Setup the descriptor ring and the control block buffers;
2365 * 2. Initialize necessary registers for receive/transmit;
2366 * 3. Initialize software pointers/parameters for receive/transmit;
2367 */
2368 ixgbe_setup_rx(ixgbe);
2369
2370 ixgbe_setup_tx(ixgbe);
2371 }
2372
2373 static void
2374 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring)
2375 {
2376 ixgbe_t *ixgbe = rx_ring->ixgbe;
2377 ixgbe_rx_data_t *rx_data = rx_ring->rx_data;
2378 struct ixgbe_hw *hw = &ixgbe->hw;
2379 rx_control_block_t *rcb;
2380 union ixgbe_adv_rx_desc *rbd;
2381 uint32_t size;
2382 uint32_t buf_low;
2383 uint32_t buf_high;
2384 uint32_t reg_val;
2385 int i;
2386
2387 ASSERT(mutex_owned(&rx_ring->rx_lock));
2388 ASSERT(mutex_owned(&ixgbe->gen_lock));
2389
2390 for (i = 0; i < ixgbe->rx_ring_size; i++) {
2391 rcb = rx_data->work_list[i];
2392 rbd = &rx_data->rbd_ring[i];
2393
2394 rbd->read.pkt_addr = rcb->rx_buf.dma_address;
2395 rbd->read.hdr_addr = NULL;
2396 }
2397
2398 /*
2399 * Initialize the length register
2400 */
2401 size = rx_data->ring_size * sizeof (union ixgbe_adv_rx_desc);
2402 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->hw_index), size);
2403
2404 /*
2405 * Initialize the base address registers
2406 */
2407 buf_low = (uint32_t)rx_data->rbd_area.dma_address;
2408 buf_high = (uint32_t)(rx_data->rbd_area.dma_address >> 32);
2409 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->hw_index), buf_high);
2410 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->hw_index), buf_low);
2411
2412 /*
2413 * Setup head & tail pointers
2414 */
2415 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->hw_index),
2416 rx_data->ring_size - 1);
2417 IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->hw_index), 0);
2418
2419 rx_data->rbd_next = 0;
2420 rx_data->lro_first = 0;
2421
2422 /*
2423 * Setup the Receive Descriptor Control Register (RXDCTL)
2424 * PTHRESH=32 descriptors (half the internal cache)
2425 * HTHRESH=0 descriptors (to minimize latency on fetch)
2426 * WTHRESH defaults to 1 (writeback each descriptor)
2427 */
2428 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index));
2429 reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */
2430
2431 /* Not a valid value for 82599, X540 or X550 */
2432 if (hw->mac.type == ixgbe_mac_82598EB) {
2433 reg_val |= 0x0020; /* pthresh */
2434 }
2435 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val);
2436
2437 if (hw->mac.type == ixgbe_mac_82599EB ||
2438 hw->mac.type == ixgbe_mac_X540 ||
2439 hw->mac.type == ixgbe_mac_X550 ||
2440 hw->mac.type == ixgbe_mac_X550EM_x) {
2441 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2442 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS);
2443 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2444 }
2445
2446 /*
2447 * Setup the Split and Replication Receive Control Register.
2448 * Set the rx buffer size and the advanced descriptor type.
2449 */
2450 reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
2451 IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2452 reg_val |= IXGBE_SRRCTL_DROP_EN;
2453 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val);
2454 }
2455
2456 static void
2457 ixgbe_setup_rx(ixgbe_t *ixgbe)
2458 {
2459 ixgbe_rx_ring_t *rx_ring;
2460 struct ixgbe_hw *hw = &ixgbe->hw;
2461 uint32_t reg_val;
2462 uint32_t ring_mapping;
2463 uint32_t i, index;
2464 uint32_t psrtype_rss_bit;
2465
2466 /*
2467 * Ensure that Rx is disabled while setting up
2468 * the Rx unit and Rx descriptor ring(s)
2469 */
2470 ixgbe_disable_rx(hw);
2471
2472 /* PSRTYPE must be configured for 82599 */
2473 if (ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ &&
2474 ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ_RSS) {
2475 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
2476 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
2477 reg_val |= IXGBE_PSRTYPE_L2HDR;
2478 reg_val |= 0x80000000;
2479 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), reg_val);
2480 } else {
2481 if (ixgbe->num_rx_groups > 32) {
2482 psrtype_rss_bit = 0x20000000;
2483 } else {
2484 psrtype_rss_bit = 0x40000000;
2485 }
2486 for (i = 0; i < ixgbe->capab->max_rx_grp_num; i++) {
2487 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
2488 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
2489 reg_val |= IXGBE_PSRTYPE_L2HDR;
2490 reg_val |= psrtype_rss_bit;
2491 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(i), reg_val);
2492 }
2493 }
2494
2495 /*
2496 * Set filter control in FCTRL to determine types of packets are passed
2497 * up to the driver.
2498 * - Pass broadcast packets.
2499 * - Do not pass flow control pause frames (82598-specific)
2500 */
2501 reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2502 reg_val |= IXGBE_FCTRL_BAM; /* Broadcast Accept Mode */
2503 if (hw->mac.type == ixgbe_mac_82598EB) {
2504 reg_val |= IXGBE_FCTRL_DPF; /* Discard Pause Frames */
2505 }
2506 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val);
2507
2508 /*
2509 * Hardware checksum settings
2510 */
2511 if (ixgbe->rx_hcksum_enable) {
2512 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2513 reg_val |= IXGBE_RXCSUM_IPPCSE; /* IP checksum */
2514 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val);
2515 }
2516
2517 /*
2518 * Setup VMDq and RSS for multiple receive queues
2519 */
2520 switch (ixgbe->classify_mode) {
2521 case IXGBE_CLASSIFY_RSS:
2522 /*
2523 * One group, only RSS is needed when more than
2524 * one ring enabled.
2525 */
2526 ixgbe_setup_rss(ixgbe);
2527 break;
2528
2529 case IXGBE_CLASSIFY_VMDQ:
2530 /*
2531 * Multiple groups, each group has one ring,
2532 * only VMDq is needed.
2533 */
2534 ixgbe_setup_vmdq(ixgbe);
2535 break;
2536
2537 case IXGBE_CLASSIFY_VMDQ_RSS:
2538 /*
2539 * Multiple groups and multiple rings, both
2540 * VMDq and RSS are needed.
2541 */
2542 ixgbe_setup_vmdq_rss(ixgbe);
2543 break;
2544
2545 default:
2546 break;
2547 }
2548
2549 /*
2550 * Enable the receive unit. This must be done after filter
2551 * control is set in FCTRL. On 82598, we disable the descriptor monitor.
2552 * 82598 is the only adapter which defines this RXCTRL option.
2553 */
2554 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2555 if (hw->mac.type == ixgbe_mac_82598EB)
2556 reg_val |= IXGBE_RXCTRL_DMBYPS; /* descriptor monitor bypass */
2557 reg_val |= IXGBE_RXCTRL_RXEN;
2558 (void) ixgbe_enable_rx_dma(hw, reg_val);
2559
2560 /*
2561 * ixgbe_setup_rx_ring must be called after configuring RXCTRL
2562 */
2563 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2564 rx_ring = &ixgbe->rx_rings[i];
2565 ixgbe_setup_rx_ring(rx_ring);
2566 }
2567
2568 /*
2569 * Setup the per-ring statistics mapping.
2570 */
2571 ring_mapping = 0;
2572 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2573 index = ixgbe->rx_rings[i].hw_index;
2574 ring_mapping = IXGBE_READ_REG(hw, IXGBE_RQSMR(index >> 2));
2575 ring_mapping |= (i & 0xF) << (8 * (index & 0x3));
2576 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(index >> 2), ring_mapping);
2577 }
2578
2579 /*
2580 * The Max Frame Size in MHADD/MAXFRS will be internally increased
2581 * by four bytes if the packet has a VLAN field, so includes MTU,
2582 * ethernet header and frame check sequence.
2583 * Register is MAXFRS in 82599.
2584 */
2585 reg_val = IXGBE_READ_REG(hw, IXGBE_MHADD);
2586 reg_val &= ~IXGBE_MHADD_MFS_MASK;
2587 reg_val |= (ixgbe->default_mtu + sizeof (struct ether_header)
2588 + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT;
2589 IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val);
2590
2591 /*
2592 * Setup Jumbo Frame enable bit
2593 */
2594 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2595 if (ixgbe->default_mtu > ETHERMTU)
2596 reg_val |= IXGBE_HLREG0_JUMBOEN;
2597 else
2598 reg_val &= ~IXGBE_HLREG0_JUMBOEN;
2599 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2600
2601 /*
2602 * Setup RSC for multiple receive queues.
2603 */
2604 if (ixgbe->lro_enable) {
2605 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2606 /*
2607 * Make sure rx_buf_size * MAXDESC not greater
2608 * than 65535.
2609 * Intel recommends 4 for MAXDESC field value.
2610 */
2611 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCCTL(i));
2612 reg_val |= IXGBE_RSCCTL_RSCEN;
2613 if (ixgbe->rx_buf_size == IXGBE_PKG_BUF_16k)
2614 reg_val |= IXGBE_RSCCTL_MAXDESC_1;
2615 else
2616 reg_val |= IXGBE_RSCCTL_MAXDESC_4;
2617 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(i), reg_val);
2618 }
2619
2620 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCDBU);
2621 reg_val |= IXGBE_RSCDBU_RSCACKDIS;
2622 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, reg_val);
2623
2624 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2625 reg_val |= IXGBE_RDRXCTL_RSCACKC;
2626 reg_val |= IXGBE_RDRXCTL_FCOE_WRFIX;
2627 reg_val &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2628
2629 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2630 }
2631 }
2632
2633 static void
2634 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring)
2635 {
2636 ixgbe_t *ixgbe = tx_ring->ixgbe;
2637 struct ixgbe_hw *hw = &ixgbe->hw;
2638 uint32_t size;
2639 uint32_t buf_low;
2640 uint32_t buf_high;
2641 uint32_t reg_val;
2642
2643 ASSERT(mutex_owned(&tx_ring->tx_lock));
2644 ASSERT(mutex_owned(&ixgbe->gen_lock));
2645
2646 /*
2647 * Initialize the length register
2648 */
2649 size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc);
2650 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size);
2651
2652 /*
2653 * Initialize the base address registers
2654 */
2655 buf_low = (uint32_t)tx_ring->tbd_area.dma_address;
2656 buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32);
2657 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low);
2658 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high);
2659
2660 /*
2661 * Setup head & tail pointers
2662 */
2663 IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0);
2664 IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0);
2665
2666 /*
2667 * Setup head write-back
2668 */
2669 if (ixgbe->tx_head_wb_enable) {
2670 /*
2671 * The memory of the head write-back is allocated using
2672 * the extra tbd beyond the tail of the tbd ring.
2673 */
2674 tx_ring->tbd_head_wb = (uint32_t *)
2675 ((uintptr_t)tx_ring->tbd_area.address + size);
2676 *tx_ring->tbd_head_wb = 0;
2677
2678 buf_low = (uint32_t)
2679 (tx_ring->tbd_area.dma_address + size);
2680 buf_high = (uint32_t)
2681 ((tx_ring->tbd_area.dma_address + size) >> 32);
2682
2683 /* Set the head write-back enable bit */
2684 buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
2685
2686 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low);
2687 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high);
2688
2689 /*
2690 * Turn off relaxed ordering for head write back or it will
2691 * cause problems with the tx recycling
2692 */
2693
2694 reg_val = (hw->mac.type == ixgbe_mac_82598EB) ?
2695 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(tx_ring->index)) :
2696 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(tx_ring->index));
2697 reg_val &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2698 if (hw->mac.type == ixgbe_mac_82598EB) {
2699 IXGBE_WRITE_REG(hw,
2700 IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
2701 } else {
2702 IXGBE_WRITE_REG(hw,
2703 IXGBE_DCA_TXCTRL_82599(tx_ring->index), reg_val);
2704 }
2705 } else {
2706 tx_ring->tbd_head_wb = NULL;
2707 }
2708
2709 tx_ring->tbd_head = 0;
2710 tx_ring->tbd_tail = 0;
2711 tx_ring->tbd_free = tx_ring->ring_size;
2712
2713 if (ixgbe->tx_ring_init == B_TRUE) {
2714 tx_ring->tcb_head = 0;
2715 tx_ring->tcb_tail = 0;
2716 tx_ring->tcb_free = tx_ring->free_list_size;
2717 }
2718
2719 /*
2720 * Initialize the s/w context structure
2721 */
2722 bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t));
2723 }
2724
2725 static void
2726 ixgbe_setup_tx(ixgbe_t *ixgbe)
2727 {
2728 struct ixgbe_hw *hw = &ixgbe->hw;
2729 ixgbe_tx_ring_t *tx_ring;
2730 uint32_t reg_val;
2731 uint32_t ring_mapping;
2732 int i;
2733
2734 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2735 tx_ring = &ixgbe->tx_rings[i];
2736 ixgbe_setup_tx_ring(tx_ring);
2737 }
2738
2739 /*
2740 * Setup the per-ring statistics mapping.
2741 */
2742 ring_mapping = 0;
2743 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2744 ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
2745 if ((i & 0x3) == 0x3) {
2746 switch (hw->mac.type) {
2747 case ixgbe_mac_82598EB:
2748 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2),
2749 ring_mapping);
2750 break;
2751
2752 case ixgbe_mac_82599EB:
2753 case ixgbe_mac_X540:
2754 case ixgbe_mac_X550:
2755 case ixgbe_mac_X550EM_x:
2756 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2),
2757 ring_mapping);
2758 break;
2759
2760 default:
2761 break;
2762 }
2763
2764 ring_mapping = 0;
2765 }
2766 }
2767 if (i & 0x3) {
2768 switch (hw->mac.type) {
2769 case ixgbe_mac_82598EB:
2770 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping);
2771 break;
2772
2773 case ixgbe_mac_82599EB:
2774 case ixgbe_mac_X540:
2775 case ixgbe_mac_X550:
2776 case ixgbe_mac_X550EM_x:
2777 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping);
2778 break;
2779
2780 default:
2781 break;
2782 }
2783 }
2784
2785 /*
2786 * Enable CRC appending and TX padding (for short tx frames)
2787 */
2788 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2789 reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN;
2790 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2791
2792 /*
2793 * enable DMA for 82599, X540 and X550 parts
2794 */
2795 if (hw->mac.type == ixgbe_mac_82599EB ||
2796 hw->mac.type == ixgbe_mac_X540 ||
2797 hw->mac.type == ixgbe_mac_X550 ||
2798 hw->mac.type == ixgbe_mac_X550EM_x) {
2799 /* DMATXCTL.TE must be set after all Tx config is complete */
2800 reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2801 reg_val |= IXGBE_DMATXCTL_TE;
2802 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val);
2803
2804 /* Disable arbiter to set MTQC */
2805 reg_val = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2806 reg_val |= IXGBE_RTTDCS_ARBDIS;
2807 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val);
2808 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2809 reg_val &= ~IXGBE_RTTDCS_ARBDIS;
2810 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val);
2811 }
2812
2813 /*
2814 * Enabling tx queues ..
2815 * For 82599 must be done after DMATXCTL.TE is set
2816 */
2817 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2818 tx_ring = &ixgbe->tx_rings[i];
2819 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index));
2820 reg_val |= IXGBE_TXDCTL_ENABLE;
2821 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val);
2822 }
2823 }
2824
2825 /*
2826 * ixgbe_setup_rss - Setup receive-side scaling feature.
2827 */
2828 static void
2829 ixgbe_setup_rss(ixgbe_t *ixgbe)
2830 {
2831 struct ixgbe_hw *hw = &ixgbe->hw;
2832 uint32_t mrqc;
2833
2834 /*
2835 * Initialize RETA/ERETA table
2836 */
2837 ixgbe_setup_rss_table(ixgbe);
2838
2839 /*
2840 * Enable RSS & perform hash on these packet types
2841 */
2842 mrqc = IXGBE_MRQC_RSSEN |
2843 IXGBE_MRQC_RSS_FIELD_IPV4 |
2844 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2845 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2846 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2847 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2848 IXGBE_MRQC_RSS_FIELD_IPV6 |
2849 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2850 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2851 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2852 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2853 }
2854
2855 /*
2856 * ixgbe_setup_vmdq - Setup MAC classification feature
2857 */
2858 static void
2859 ixgbe_setup_vmdq(ixgbe_t *ixgbe)
2860 {
2861 struct ixgbe_hw *hw = &ixgbe->hw;
2862 uint32_t vmdctl, i, vtctl;
2863
2864 /*
2865 * Setup the VMDq Control register, enable VMDq based on
2866 * packet destination MAC address:
2867 */
2868 switch (hw->mac.type) {
2869 case ixgbe_mac_82598EB:
2870 /*
2871 * VMDq Enable = 1;
2872 * VMDq Filter = 0; MAC filtering
2873 * Default VMDq output index = 0;
2874 */
2875 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2876 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2877 break;
2878
2879 case ixgbe_mac_82599EB:
2880 case ixgbe_mac_X540:
2881 case ixgbe_mac_X550:
2882 case ixgbe_mac_X550EM_x:
2883 /*
2884 * Enable VMDq-only.
2885 */
2886 vmdctl = IXGBE_MRQC_VMDQEN;
2887 IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl);
2888
2889 for (i = 0; i < hw->mac.num_rar_entries; i++) {
2890 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2891 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2892 }
2893
2894 /*
2895 * Enable Virtualization and Replication.
2896 */
2897 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2898 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2899
2900 /*
2901 * Enable receiving packets to all VFs
2902 */
2903 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2904 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2905 break;
2906
2907 default:
2908 break;
2909 }
2910 }
2911
2912 /*
2913 * ixgbe_setup_vmdq_rss - Setup both vmdq feature and rss feature.
2914 */
2915 static void
2916 ixgbe_setup_vmdq_rss(ixgbe_t *ixgbe)
2917 {
2918 struct ixgbe_hw *hw = &ixgbe->hw;
2919 uint32_t i, mrqc;
2920 uint32_t vtctl, vmdctl;
2921
2922 /*
2923 * Initialize RETA/ERETA table
2924 */
2925 ixgbe_setup_rss_table(ixgbe);
2926
2927 /*
2928 * Enable and setup RSS and VMDq
2929 */
2930 switch (hw->mac.type) {
2931 case ixgbe_mac_82598EB:
2932 /*
2933 * Enable RSS & Setup RSS Hash functions
2934 */
2935 mrqc = IXGBE_MRQC_RSSEN |
2936 IXGBE_MRQC_RSS_FIELD_IPV4 |
2937 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2938 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2939 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2940 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2941 IXGBE_MRQC_RSS_FIELD_IPV6 |
2942 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2943 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2944 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2945 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2946
2947 /*
2948 * Enable and Setup VMDq
2949 * VMDq Filter = 0; MAC filtering
2950 * Default VMDq output index = 0;
2951 */
2952 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2953 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2954 break;
2955
2956 case ixgbe_mac_82599EB:
2957 case ixgbe_mac_X540:
2958 case ixgbe_mac_X550:
2959 case ixgbe_mac_X550EM_x:
2960 /*
2961 * Enable RSS & Setup RSS Hash functions
2962 */
2963 mrqc = IXGBE_MRQC_RSS_FIELD_IPV4 |
2964 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2965 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2966 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2967 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2968 IXGBE_MRQC_RSS_FIELD_IPV6 |
2969 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2970 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2971 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2972
2973 /*
2974 * Enable VMDq+RSS.
2975 */
2976 if (ixgbe->num_rx_groups > 32) {
2977 mrqc = mrqc | IXGBE_MRQC_VMDQRSS64EN;
2978 } else {
2979 mrqc = mrqc | IXGBE_MRQC_VMDQRSS32EN;
2980 }
2981
2982 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2983
2984 for (i = 0; i < hw->mac.num_rar_entries; i++) {
2985 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2986 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2987 }
2988 break;
2989
2990 default:
2991 break;
2992
2993 }
2994
2995 if (hw->mac.type == ixgbe_mac_82599EB ||
2996 hw->mac.type == ixgbe_mac_X540 ||
2997 hw->mac.type == ixgbe_mac_X550 ||
2998 hw->mac.type == ixgbe_mac_X550EM_x) {
2999 /*
3000 * Enable Virtualization and Replication.
3001 */
3002 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
3003 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
3004
3005 /*
3006 * Enable receiving packets to all VFs
3007 */
3008 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
3009 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
3010 }
3011 }
3012
3013 /*
3014 * ixgbe_setup_rss_table - Setup RSS table
3015 */
3016 static void
3017 ixgbe_setup_rss_table(ixgbe_t *ixgbe)
3018 {
3019 struct ixgbe_hw *hw = &ixgbe->hw;
3020 uint32_t i, j;
3021 uint32_t random;
3022 uint32_t reta;
3023 uint32_t ring_per_group;
3024 uint32_t ring;
3025 uint32_t table_size;
3026 uint32_t index_mult;
3027 uint32_t rxcsum;
3028
3029 /*
3030 * Set multiplier for RETA setup and table size based on MAC type.
3031 * RETA table sizes vary by model:
3032 *
3033 * 82598, 82599, X540: 128 table entries.
3034 * X550: 512 table entries.
3035 */
3036 index_mult = 0x1;
3037 table_size = 128;
3038 switch (ixgbe->hw.mac.type) {
3039 case ixgbe_mac_82598EB:
3040 index_mult = 0x11;
3041 break;
3042 case ixgbe_mac_X550:
3043 case ixgbe_mac_X550EM_x:
3044 table_size = 512;
3045 break;
3046 default:
3047 break;
3048 }
3049
3050 /*
3051 * Fill out RSS redirection table. The configuation of the indices is
3052 * hardware-dependent.
3053 *
3054 * 82598: 8 bits wide containing two 4 bit RSS indices
3055 * 82599, X540: 8 bits wide containing one 4 bit RSS index
3056 * X550: 8 bits wide containing one 6 bit RSS index
3057 */
3058 reta = 0;
3059 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
3060
3061 for (i = 0, j = 0; i < table_size; i++, j++) {
3062 if (j == ring_per_group) j = 0;
3063
3064 /*
3065 * The low 8 bits are for hash value (n+0);
3066 * The next 8 bits are for hash value (n+1), etc.
3067 */
3068 ring = (j * index_mult);
3069 reta = reta >> 8;
3070 reta = reta | (((uint32_t)ring) << 24);
3071
3072 if ((i & 3) == 3) {
3073 /*
3074 * The first 128 table entries are programmed into the
3075 * RETA register, with any beyond that (eg; on X550)
3076 * into ERETA.
3077 */
3078 if (i < 128)
3079 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3080 else
3081 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
3082 reta);
3083 reta = 0;
3084 }
3085 }
3086
3087 /*
3088 * Fill out hash function seeds with a random constant
3089 */
3090 for (i = 0; i < 10; i++) {
3091 (void) random_get_pseudo_bytes((uint8_t *)&random,
3092 sizeof (uint32_t));
3093 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random);
3094 }
3095
3096 /*
3097 * Disable Packet Checksum to enable RSS for multiple receive queues.
3098 * It is an adapter hardware limitation that Packet Checksum is
3099 * mutually exclusive with RSS.
3100 */
3101 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3102 rxcsum |= IXGBE_RXCSUM_PCSD;
3103 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
3104 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3105 }
3106
3107 /*
3108 * ixgbe_init_unicst - Initialize the unicast addresses.
3109 */
3110 static void
3111 ixgbe_init_unicst(ixgbe_t *ixgbe)
3112 {
3113 struct ixgbe_hw *hw = &ixgbe->hw;
3114 uint8_t *mac_addr;
3115 int slot;
3116 /*
3117 * Here we should consider two situations:
3118 *
3119 * 1. Chipset is initialized at the first time,
3120 * Clear all the multiple unicast addresses.
3121 *
3122 * 2. Chipset is reset
3123 * Recover the multiple unicast addresses from the
3124 * software data structure to the RAR registers.
3125 */
3126 if (!ixgbe->unicst_init) {
3127 /*
3128 * Initialize the multiple unicast addresses
3129 */
3130 ixgbe->unicst_total = hw->mac.num_rar_entries;
3131 ixgbe->unicst_avail = ixgbe->unicst_total;
3132 for (slot = 0; slot < ixgbe->unicst_total; slot++) {
3133 mac_addr = ixgbe->unicst_addr[slot].mac.addr;
3134 bzero(mac_addr, ETHERADDRL);
3135 (void) ixgbe_set_rar(hw, slot, mac_addr, NULL, NULL);
3136 ixgbe->unicst_addr[slot].mac.set = 0;
3137 }
3138 ixgbe->unicst_init = B_TRUE;
3139 } else {
3140 /* Re-configure the RAR registers */
3141 for (slot = 0; slot < ixgbe->unicst_total; slot++) {
3142 mac_addr = ixgbe->unicst_addr[slot].mac.addr;
3143 if (ixgbe->unicst_addr[slot].mac.set == 1) {
3144 (void) ixgbe_set_rar(hw, slot, mac_addr,
3145 ixgbe->unicst_addr[slot].mac.group_index,
3146 IXGBE_RAH_AV);
3147 } else {
3148 bzero(mac_addr, ETHERADDRL);
3149 (void) ixgbe_set_rar(hw, slot, mac_addr,
3150 NULL, NULL);
3151 }
3152 }
3153 }
3154 }
3155
3156 /*
3157 * ixgbe_unicst_find - Find the slot for the specified unicast address
3158 */
3159 int
3160 ixgbe_unicst_find(ixgbe_t *ixgbe, const uint8_t *mac_addr)
3161 {
3162 int slot;
3163
3164 ASSERT(mutex_owned(&ixgbe->gen_lock));
3165
3166 for (slot = 0; slot < ixgbe->unicst_total; slot++) {
3167 if (bcmp(ixgbe->unicst_addr[slot].mac.addr,
3168 mac_addr, ETHERADDRL) == 0)
3169 return (slot);
3170 }
3171
3172 return (-1);
3173 }
3174
3175 /*
3176 * ixgbe_multicst_add - Add a multicst address.
3177 */
3178 int
3179 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr)
3180 {
3181 ASSERT(mutex_owned(&ixgbe->gen_lock));
3182
3183 if ((multiaddr[0] & 01) == 0) {
3184 return (EINVAL);
3185 }
3186
3187 if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) {
3188 return (ENOENT);
3189 }
3190
3191 bcopy(multiaddr,
3192 &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL);
3193 ixgbe->mcast_count++;
3194
3195 /*
3196 * Update the multicast table in the hardware
3197 */
3198 ixgbe_setup_multicst(ixgbe);
3199
3200 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
3201 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3202 return (EIO);
3203 }
3204
3205 return (0);
3206 }
3207
3208 /*
3209 * ixgbe_multicst_remove - Remove a multicst address.
3210 */
3211 int
3212 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr)
3213 {
3214 int i;
3215
3216 ASSERT(mutex_owned(&ixgbe->gen_lock));
3217
3218 for (i = 0; i < ixgbe->mcast_count; i++) {
3219 if (bcmp(multiaddr, &ixgbe->mcast_table[i],
3220 ETHERADDRL) == 0) {
3221 for (i++; i < ixgbe->mcast_count; i++) {
3222 ixgbe->mcast_table[i - 1] =
3223 ixgbe->mcast_table[i];
3224 }
3225 ixgbe->mcast_count--;
3226 break;
3227 }
3228 }
3229
3230 /*
3231 * Update the multicast table in the hardware
3232 */
3233 ixgbe_setup_multicst(ixgbe);
3234
3235 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
3236 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3237 return (EIO);
3238 }
3239
3240 return (0);
3241 }
3242
3243 /*
3244 * ixgbe_setup_multicast - Setup multicast data structures.
3245 *
3246 * This routine initializes all of the multicast related structures
3247 * and save them in the hardware registers.
3248 */
3249 static void
3250 ixgbe_setup_multicst(ixgbe_t *ixgbe)
3251 {
3252 uint8_t *mc_addr_list;
3253 uint32_t mc_addr_count;
3254 struct ixgbe_hw *hw = &ixgbe->hw;
3255
3256 ASSERT(mutex_owned(&ixgbe->gen_lock));
3257
3258 ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES);
3259
3260 mc_addr_list = (uint8_t *)ixgbe->mcast_table;
3261 mc_addr_count = ixgbe->mcast_count;
3262
3263 /*
3264 * Update the multicast addresses to the MTA registers
3265 */
3266 (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
3267 ixgbe_mc_table_itr, TRUE);
3268 }
3269
3270 /*
3271 * ixgbe_setup_vmdq_rss_conf - Configure vmdq and rss (number and mode).
3272 *
3273 * Configure the rx classification mode (vmdq & rss) and vmdq & rss numbers.
3274 * Different chipsets may have different allowed configuration of vmdq and rss.
3275 */
3276 static void
3277 ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe)
3278 {
3279 struct ixgbe_hw *hw = &ixgbe->hw;
3280 uint32_t ring_per_group;
3281
3282 switch (hw->mac.type) {
3283 case ixgbe_mac_82598EB:
3284 /*
3285 * 82598 supports the following combination:
3286 * vmdq no. x rss no.
3287 * [5..16] x 1
3288 * [1..4] x [1..16]
3289 * However 8 rss queue per pool (vmdq) is sufficient for
3290 * most cases.
3291 */
3292 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
3293 if (ixgbe->num_rx_groups > 4) {
3294 ixgbe->num_rx_rings = ixgbe->num_rx_groups;
3295 } else {
3296 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
3297 min(8, ring_per_group);
3298 }
3299
3300 break;
3301
3302 case ixgbe_mac_82599EB:
3303 case ixgbe_mac_X540:
3304 case ixgbe_mac_X550:
3305 case ixgbe_mac_X550EM_x:
3306 /*
3307 * 82599 supports the following combination:
3308 * vmdq no. x rss no.
3309 * [33..64] x [1..2]
3310 * [2..32] x [1..4]
3311 * 1 x [1..16]
3312 * However 8 rss queue per pool (vmdq) is sufficient for
3313 * most cases.
3314 *
3315 * For now, treat X540 and X550 like the 82599.
3316 */
3317 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
3318 if (ixgbe->num_rx_groups == 1) {
3319 ixgbe->num_rx_rings = min(8, ring_per_group);
3320 } else if (ixgbe->num_rx_groups <= 32) {
3321 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
3322 min(4, ring_per_group);
3323 } else if (ixgbe->num_rx_groups <= 64) {
3324 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
3325 min(2, ring_per_group);
3326 }
3327 break;
3328
3329 default:
3330 break;
3331 }
3332
3333 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
3334
3335 if (ixgbe->num_rx_groups == 1 && ring_per_group == 1) {
3336 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
3337 } else if (ixgbe->num_rx_groups != 1 && ring_per_group == 1) {
3338 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ;
3339 } else if (ixgbe->num_rx_groups != 1 && ring_per_group != 1) {
3340 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ_RSS;
3341 } else {
3342 ixgbe->classify_mode = IXGBE_CLASSIFY_RSS;
3343 }
3344
3345 IXGBE_DEBUGLOG_2(ixgbe, "rx group number:%d, rx ring number:%d",
3346 ixgbe->num_rx_groups, ixgbe->num_rx_rings);
3347 }
3348
3349 /*
3350 * ixgbe_get_conf - Get driver configurations set in driver.conf.
3351 *
3352 * This routine gets user-configured values out of the configuration
3353 * file ixgbe.conf.
3354 *
3355 * For each configurable value, there is a minimum, a maximum, and a
3356 * default.
3357 * If user does not configure a value, use the default.
3358 * If user configures below the minimum, use the minumum.
3359 * If user configures above the maximum, use the maxumum.
3360 */
3361 static void
3362 ixgbe_get_conf(ixgbe_t *ixgbe)
3363 {
3364 struct ixgbe_hw *hw = &ixgbe->hw;
3365 uint32_t flow_control;
3366
3367 /*
3368 * ixgbe driver supports the following user configurations:
3369 *
3370 * Jumbo frame configuration:
3371 * default_mtu
3372 *
3373 * Ethernet flow control configuration:
3374 * flow_control
3375 *
3376 * Multiple rings configurations:
3377 * tx_queue_number
3378 * tx_ring_size
3379 * rx_queue_number
3380 * rx_ring_size
3381 *
3382 * Call ixgbe_get_prop() to get the value for a specific
3383 * configuration parameter.
3384 */
3385
3386 /*
3387 * Jumbo frame configuration - max_frame_size controls host buffer
3388 * allocation, so includes MTU, ethernet header, vlan tag and
3389 * frame check sequence.
3390 */
3391 ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU,
3392 MIN_MTU, ixgbe->capab->max_mtu, DEFAULT_MTU);
3393
3394 ixgbe->max_frame_size = ixgbe->default_mtu +
3395 sizeof (struct ether_vlan_header) + ETHERFCSL;
3396
3397 /*
3398 * Ethernet flow control configuration
3399 */
3400 flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL,
3401 ixgbe_fc_none, 3, ixgbe_fc_none);
3402 if (flow_control == 3)
3403 flow_control = ixgbe_fc_default;
3404
3405 /*
3406 * fc.requested mode is what the user requests. After autoneg,
3407 * fc.current_mode will be the flow_control mode that was negotiated.
3408 */
3409 hw->fc.requested_mode = flow_control;
3410
3411 /*
3412 * Multiple rings configurations
3413 */
3414 ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM,
3415 ixgbe->capab->min_tx_que_num,
3416 ixgbe->capab->max_tx_que_num,
3417 ixgbe->capab->def_tx_que_num);
3418 ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE,
3419 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE);
3420
3421 ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM,
3422 ixgbe->capab->min_rx_que_num,
3423 ixgbe->capab->max_rx_que_num,
3424 ixgbe->capab->def_rx_que_num);
3425 ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE,
3426 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE);
3427
3428 /*
3429 * Multiple groups configuration
3430 */
3431 ixgbe->num_rx_groups = ixgbe_get_prop(ixgbe, PROP_RX_GROUP_NUM,
3432 ixgbe->capab->min_rx_grp_num, ixgbe->capab->max_rx_grp_num,
3433 ixgbe->capab->def_rx_grp_num);
3434
3435 ixgbe->mr_enable = ixgbe_get_prop(ixgbe, PROP_MR_ENABLE,
3436 0, 1, DEFAULT_MR_ENABLE);
3437
3438 if (ixgbe->mr_enable == B_FALSE) {
3439 ixgbe->num_tx_rings = 1;
3440 ixgbe->num_rx_rings = 1;
3441 ixgbe->num_rx_groups = 1;
3442 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
3443 } else {
3444 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
3445 max(ixgbe->num_rx_rings / ixgbe->num_rx_groups, 1);
3446 /*
3447 * The combination of num_rx_rings and num_rx_groups
3448 * may be not supported by h/w. We need to adjust
3449 * them to appropriate values.
3450 */
3451 ixgbe_setup_vmdq_rss_conf(ixgbe);
3452 }
3453
3454 /*
3455 * Tunable used to force an interrupt type. The only use is
3456 * for testing of the lesser interrupt types.
3457 * 0 = don't force interrupt type
3458 * 1 = force interrupt type MSI-X
3459 * 2 = force interrupt type MSI
3460 * 3 = force interrupt type Legacy
3461 */
3462 ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE,
3463 IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE);
3464
3465 ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE,
3466 0, 1, DEFAULT_TX_HCKSUM_ENABLE);
3467 ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE,
3468 0, 1, DEFAULT_RX_HCKSUM_ENABLE);
3469 ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE,
3470 0, 1, DEFAULT_LSO_ENABLE);
3471 ixgbe->lro_enable = ixgbe_get_prop(ixgbe, PROP_LRO_ENABLE,
3472 0, 1, DEFAULT_LRO_ENABLE);
3473 ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE,
3474 0, 1, DEFAULT_TX_HEAD_WB_ENABLE);
3475 ixgbe->relax_order_enable = ixgbe_get_prop(ixgbe,
3476 PROP_RELAX_ORDER_ENABLE, 0, 1, DEFAULT_RELAX_ORDER_ENABLE);
3477
3478 /* Head Write Back not recommended for 82599, X540 and X550 */
3479 if (hw->mac.type == ixgbe_mac_82599EB ||
3480 hw->mac.type == ixgbe_mac_X540 ||
3481 hw->mac.type == ixgbe_mac_X550 ||
3482 hw->mac.type == ixgbe_mac_X550EM_x) {
3483 ixgbe->tx_head_wb_enable = B_FALSE;
3484 }
3485
3486 /*
3487 * ixgbe LSO needs the tx h/w checksum support.
3488 * LSO will be disabled if tx h/w checksum is not
3489 * enabled.
3490 */
3491 if (ixgbe->tx_hcksum_enable == B_FALSE) {
3492 ixgbe->lso_enable = B_FALSE;
3493 }
3494
3495 /*
3496 * ixgbe LRO needs the rx h/w checksum support.
3497 * LRO will be disabled if rx h/w checksum is not
3498 * enabled.
3499 */
3500 if (ixgbe->rx_hcksum_enable == B_FALSE) {
3501 ixgbe->lro_enable = B_FALSE;
3502 }
3503
3504 /*
3505 * ixgbe LRO only supported by 82599, X540 and X550
3506 */
3507 if (hw->mac.type == ixgbe_mac_82598EB) {
3508 ixgbe->lro_enable = B_FALSE;
3509 }
3510 ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD,
3511 MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD,
3512 DEFAULT_TX_COPY_THRESHOLD);
3513 ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe,
3514 PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD,
3515 MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD);
3516 ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe,
3517 PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD,
3518 MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD);
3519 ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe,
3520 PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD,
3521 MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD);
3522
3523 ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD,
3524 MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD,
3525 DEFAULT_RX_COPY_THRESHOLD);
3526 ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR,
3527 MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR,
3528 DEFAULT_RX_LIMIT_PER_INTR);
3529
3530 ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING,
3531 ixgbe->capab->min_intr_throttle,
3532 ixgbe->capab->max_intr_throttle,
3533 ixgbe->capab->def_intr_throttle);
3534 /*
3535 * 82599, X540 and X550 require the interrupt throttling rate is
3536 * a multiple of 8. This is enforced by the register definiton.
3537 */
3538 if (hw->mac.type == ixgbe_mac_82599EB ||
3539 hw->mac.type == ixgbe_mac_X540 ||
3540 hw->mac.type == ixgbe_mac_X550 ||
3541 hw->mac.type == ixgbe_mac_X550EM_x)
3542 ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8;
3543
3544 hw->allow_unsupported_sfp = ixgbe_get_prop(ixgbe,
3545 PROP_ALLOW_UNSUPPORTED_SFP, 0, 1, DEFAULT_ALLOW_UNSUPPORTED_SFP);
3546 }
3547
3548 static void
3549 ixgbe_init_params(ixgbe_t *ixgbe)
3550 {
3551 struct ixgbe_hw *hw = &ixgbe->hw;
3552 ixgbe_link_speed speeds_supported = 0;
3553 boolean_t negotiate;
3554
3555 /*
3556 * Get a list of speeds the adapter supports. If the hw struct hasn't
3557 * been populated with this information yet, retrieve it from the
3558 * adapter and save it to our own variable.
3559 *
3560 * On certain adapters, such as ones which use SFPs, the contents of
3561 * hw->phy.speeds_supported (and hw->phy.autoneg_advertised) are not
3562 * updated, so we must rely on calling ixgbe_get_link_capabilities()
3563 * in order to ascertain the speeds which we are capable of supporting,
3564 * and in the case of SFP-equipped adapters, which speed we are
3565 * advertising. If ixgbe_get_link_capabilities() fails for some reason,
3566 * we'll go with a default list of speeds as a last resort.
3567 */
3568 speeds_supported = hw->phy.speeds_supported;
3569
3570 if (speeds_supported == 0) {
3571 if (ixgbe_get_link_capabilities(hw, &speeds_supported,
3572 &negotiate) != IXGBE_SUCCESS) {
3573 if (hw->mac.type == ixgbe_mac_82598EB) {
3574 speeds_supported =
3575 IXGBE_LINK_SPEED_82598_AUTONEG;
3576 } else {
3577 speeds_supported =
3578 IXGBE_LINK_SPEED_82599_AUTONEG;
3579 }
3580 }
3581 }
3582 ixgbe->speeds_supported = speeds_supported;
3583
3584 /*
3585 * By default, all supported speeds are enabled and advertised.
3586 */
3587 if (speeds_supported & IXGBE_LINK_SPEED_10GB_FULL) {
3588 ixgbe->param_en_10000fdx_cap = 1;
3589 ixgbe->param_adv_10000fdx_cap = 1;
3590 } else {
3591 ixgbe->param_en_10000fdx_cap = 0;
3592 ixgbe->param_adv_10000fdx_cap = 0;
3593 }
3594
3595 if (speeds_supported & IXGBE_LINK_SPEED_5GB_FULL) {
3596 ixgbe->param_en_5000fdx_cap = 1;
3597 ixgbe->param_adv_5000fdx_cap = 1;
3598 } else {
3599 ixgbe->param_en_5000fdx_cap = 0;
3600 ixgbe->param_adv_5000fdx_cap = 0;
3601 }
3602
3603 if (speeds_supported & IXGBE_LINK_SPEED_2_5GB_FULL) {
3604 ixgbe->param_en_2500fdx_cap = 1;
3605 ixgbe->param_adv_2500fdx_cap = 1;
3606 } else {
3607 ixgbe->param_en_2500fdx_cap = 0;
3608 ixgbe->param_adv_2500fdx_cap = 0;
3609 }
3610
3611 if (speeds_supported & IXGBE_LINK_SPEED_1GB_FULL) {
3612 ixgbe->param_en_1000fdx_cap = 1;
3613 ixgbe->param_adv_1000fdx_cap = 1;
3614 } else {
3615 ixgbe->param_en_1000fdx_cap = 0;
3616 ixgbe->param_adv_1000fdx_cap = 0;
3617 }
3618
3619 if (speeds_supported & IXGBE_LINK_SPEED_100_FULL) {
3620 ixgbe->param_en_100fdx_cap = 1;
3621 ixgbe->param_adv_100fdx_cap = 1;
3622 } else {
3623 ixgbe->param_en_100fdx_cap = 0;
3624 ixgbe->param_adv_100fdx_cap = 0;
3625 }
3626
3627 ixgbe->param_pause_cap = 1;
3628 ixgbe->param_asym_pause_cap = 1;
3629 ixgbe->param_rem_fault = 0;
3630
3631 ixgbe->param_adv_autoneg_cap = 1;
3632 ixgbe->param_adv_pause_cap = 1;
3633 ixgbe->param_adv_asym_pause_cap = 1;
3634 ixgbe->param_adv_rem_fault = 0;
3635
3636 ixgbe->param_lp_10000fdx_cap = 0;
3637 ixgbe->param_lp_5000fdx_cap = 0;
3638 ixgbe->param_lp_2500fdx_cap = 0;
3639 ixgbe->param_lp_1000fdx_cap = 0;
3640 ixgbe->param_lp_100fdx_cap = 0;
3641 ixgbe->param_lp_autoneg_cap = 0;
3642 ixgbe->param_lp_pause_cap = 0;
3643 ixgbe->param_lp_asym_pause_cap = 0;
3644 ixgbe->param_lp_rem_fault = 0;
3645 }
3646
3647 /*
3648 * ixgbe_get_prop - Get a property value out of the configuration file
3649 * ixgbe.conf.
3650 *
3651 * Caller provides the name of the property, a default value, a minimum
3652 * value, and a maximum value.
3653 *
3654 * Return configured value of the property, with default, minimum and
3655 * maximum properly applied.
3656 */
3657 static int
3658 ixgbe_get_prop(ixgbe_t *ixgbe,
3659 char *propname, /* name of the property */
3660 int minval, /* minimum acceptable value */
3661 int maxval, /* maximim acceptable value */
3662 int defval) /* default value */
3663 {
3664 int value;
3665
3666 /*
3667 * Call ddi_prop_get_int() to read the conf settings
3668 */
3669 value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip,
3670 DDI_PROP_DONTPASS, propname, defval);
3671 if (value > maxval)
3672 value = maxval;
3673
3674 if (value < minval)
3675 value = minval;
3676
3677 return (value);
3678 }
3679
3680 /*
3681 * ixgbe_driver_setup_link - Using the link properties to setup the link.
3682 */
3683 int
3684 ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw)
3685 {
3686 struct ixgbe_hw *hw = &ixgbe->hw;
3687 ixgbe_link_speed advertised = 0;
3688
3689 /*
3690 * Assemble a list of enabled speeds to auto-negotiate with.
3691 */
3692 if (ixgbe->param_en_10000fdx_cap == 1)
3693 advertised |= IXGBE_LINK_SPEED_10GB_FULL;
3694
3695 if (ixgbe->param_en_5000fdx_cap == 1)
3696 advertised |= IXGBE_LINK_SPEED_5GB_FULL;
3697
3698 if (ixgbe->param_en_2500fdx_cap == 1)
3699 advertised |= IXGBE_LINK_SPEED_2_5GB_FULL;
3700
3701 if (ixgbe->param_en_1000fdx_cap == 1)
3702 advertised |= IXGBE_LINK_SPEED_1GB_FULL;
3703
3704 if (ixgbe->param_en_100fdx_cap == 1)
3705 advertised |= IXGBE_LINK_SPEED_100_FULL;
3706
3707 /*
3708 * As a last resort, autoneg with a default list of speeds.
3709 */
3710 if (ixgbe->param_adv_autoneg_cap == 1 && advertised == 0) {
3711 ixgbe_notice(ixgbe, "Invalid link settings. Setting link "
3712 "to autonegotiate with full capabilities.");
3713
3714 if (hw->mac.type == ixgbe_mac_82598EB)
3715 advertised = IXGBE_LINK_SPEED_82598_AUTONEG;
3716 else
3717 advertised = IXGBE_LINK_SPEED_82599_AUTONEG;
3718 }
3719
3720 if (setup_hw) {
3721 if (ixgbe_setup_link(&ixgbe->hw, advertised,
3722 ixgbe->param_adv_autoneg_cap) != IXGBE_SUCCESS) {
3723 ixgbe_notice(ixgbe, "Setup link failed on this "
3724 "device.");
3725 return (IXGBE_FAILURE);
3726 }
3727 }
3728
3729 return (IXGBE_SUCCESS);
3730 }
3731
3732 /*
3733 * ixgbe_driver_link_check - Link status processing.
3734 *
3735 * This function can be called in both kernel context and interrupt context
3736 */
3737 static void
3738 ixgbe_driver_link_check(ixgbe_t *ixgbe)
3739 {
3740 struct ixgbe_hw *hw = &ixgbe->hw;
3741 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
3742 boolean_t link_up = B_FALSE;
3743 boolean_t link_changed = B_FALSE;
3744
3745 ASSERT(mutex_owned(&ixgbe->gen_lock));
3746
3747 (void) ixgbe_check_link(hw, &speed, &link_up, B_FALSE);
3748 if (link_up) {
3749 ixgbe->link_check_complete = B_TRUE;
3750
3751 /*
3752 * The Link is up, check whether it was marked as down earlier
3753 */
3754 if (ixgbe->link_state != LINK_STATE_UP) {
3755
3756 /* Link is up, enable flow control settings */
3757 (void) ixgbe_fc_enable(hw);
3758
3759 switch (speed) {
3760 case IXGBE_LINK_SPEED_10GB_FULL:
3761 ixgbe->link_speed = SPEED_10GB;
3762 break;
3763 case IXGBE_LINK_SPEED_5GB_FULL:
3764 ixgbe->link_speed = SPEED_5GB;
3765 break;
3766 case IXGBE_LINK_SPEED_2_5GB_FULL:
3767 ixgbe->link_speed = SPEED_2_5GB;
3768 break;
3769 case IXGBE_LINK_SPEED_1GB_FULL:
3770 ixgbe->link_speed = SPEED_1GB;
3771 break;
3772 case IXGBE_LINK_SPEED_100_FULL:
3773 ixgbe->link_speed = SPEED_100;
3774 }
3775 ixgbe->link_duplex = LINK_DUPLEX_FULL;
3776 ixgbe->link_state = LINK_STATE_UP;
3777 link_changed = B_TRUE;
3778 }
3779 } else {
3780 if (ixgbe->link_check_complete == B_TRUE ||
3781 (ixgbe->link_check_complete == B_FALSE &&
3782 gethrtime() >= ixgbe->link_check_hrtime)) {
3783 /*
3784 * The link is really down
3785 */
3786 ixgbe->link_check_complete = B_TRUE;
3787
3788 if (ixgbe->link_state != LINK_STATE_DOWN) {
3789 ixgbe->link_speed = 0;
3790 ixgbe->link_duplex = LINK_DUPLEX_UNKNOWN;
3791 ixgbe->link_state = LINK_STATE_DOWN;
3792 link_changed = B_TRUE;
3793 }
3794 }
3795 }
3796
3797 /*
3798 * If we are in an interrupt context, need to re-enable the
3799 * interrupt, which was automasked
3800 */
3801 if (servicing_interrupt() != 0) {
3802 ixgbe->eims |= IXGBE_EICR_LSC;
3803 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3804 }
3805
3806 if (link_changed) {
3807 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
3808 }
3809 }
3810
3811 /*
3812 * ixgbe_sfp_check - sfp module processing done in taskq only for 82599.
3813 */
3814 static void
3815 ixgbe_sfp_check(void *arg)
3816 {
3817 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3818 uint32_t eicr = ixgbe->eicr;
3819 struct ixgbe_hw *hw = &ixgbe->hw;
3820
3821 mutex_enter(&ixgbe->gen_lock);
3822 (void) hw->phy.ops.identify_sfp(hw);
3823 if (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) {
3824 /* clear the interrupt */
3825 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3826
3827 /* if link up, do multispeed fiber setup */
3828 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
3829 B_TRUE);
3830 ixgbe_driver_link_check(ixgbe);
3831 ixgbe_get_hw_state(ixgbe);
3832 } else if (eicr & IXGBE_EICR_GPI_SDP2_BY_MAC(hw)) {
3833 /* clear the interrupt */
3834 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2_BY_MAC(hw));
3835
3836 /* if link up, do sfp module setup */
3837 (void) hw->mac.ops.setup_sfp(hw);
3838
3839 /* do multispeed fiber setup */
3840 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
3841 B_TRUE);
3842 ixgbe_driver_link_check(ixgbe);
3843 ixgbe_get_hw_state(ixgbe);
3844 }
3845 mutex_exit(&ixgbe->gen_lock);
3846
3847 /*
3848 * We need to fully re-check the link later.
3849 */
3850 ixgbe->link_check_complete = B_FALSE;
3851 ixgbe->link_check_hrtime = gethrtime() +
3852 (IXGBE_LINK_UP_TIME * 100000000ULL);
3853 }
3854
3855 /*
3856 * ixgbe_overtemp_check - overtemp module processing done in taskq
3857 *
3858 * This routine will only be called on adapters with temperature sensor.
3859 * The indication of over-temperature can be either SDP0 interrupt or the link
3860 * status change interrupt.
3861 */
3862 static void
3863 ixgbe_overtemp_check(void *arg)
3864 {
3865 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3866 struct ixgbe_hw *hw = &ixgbe->hw;
3867 uint32_t eicr = ixgbe->eicr;
3868 ixgbe_link_speed speed;
3869 boolean_t link_up;
3870
3871 mutex_enter(&ixgbe->gen_lock);
3872
3873 /* make sure we know current state of link */
3874 (void) ixgbe_check_link(hw, &speed, &link_up, B_FALSE);
3875
3876 /* check over-temp condition */
3877 if (((eicr & IXGBE_EICR_GPI_SDP0_BY_MAC(hw)) && (!link_up)) ||
3878 (eicr & IXGBE_EICR_LSC)) {
3879 if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP) {
3880 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP);
3881
3882 /*
3883 * Disable the adapter interrupts
3884 */
3885 ixgbe_disable_adapter_interrupts(ixgbe);
3886
3887 /*
3888 * Disable Rx/Tx units
3889 */
3890 (void) ixgbe_stop_adapter(hw);
3891
3892 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
3893 ixgbe_error(ixgbe,
3894 "Problem: Network adapter has been stopped "
3895 "because it has overheated");
3896 ixgbe_error(ixgbe,
3897 "Action: Restart the computer. "
3898 "If the problem persists, power off the system "
3899 "and replace the adapter");
3900 }
3901 }
3902
3903 /* write to clear the interrupt */
3904 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3905
3906 mutex_exit(&ixgbe->gen_lock);
3907 }
3908
3909 /*
3910 * ixgbe_phy_check - taskq to process interrupts from an external PHY
3911 *
3912 * This routine will only be called on adapters with external PHYs
3913 * (such as X550) that may be trying to raise our attention to some event.
3914 * Currently, this is limited to claiming PHY overtemperature and link status
3915 * change (LSC) events, however this may expand to include other things in
3916 * future adapters.
3917 */
3918 static void
3919 ixgbe_phy_check(void *arg)
3920 {
3921 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3922 struct ixgbe_hw *hw = &ixgbe->hw;
3923 int rv;
3924
3925 mutex_enter(&ixgbe->gen_lock);
3926
3927 /*
3928 * X550 baseT PHY overtemp and LSC events are handled here.
3929 *
3930 * If an overtemp event occurs, it will be reflected in the
3931 * return value of phy.ops.handle_lasi() and the common code will
3932 * automatically power off the baseT PHY. This is our cue to trigger
3933 * an FMA event.
3934 *
3935 * If a link status change event occurs, phy.ops.handle_lasi() will
3936 * automatically initiate a link setup between the integrated KR PHY
3937 * and the external X557 PHY to ensure that the link speed between
3938 * them matches the link speed of the baseT link.
3939 */
3940 rv = ixgbe_handle_lasi(hw);
3941
3942 if (rv == IXGBE_ERR_OVERTEMP) {
3943 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP);
3944
3945 /*
3946 * Disable the adapter interrupts
3947 */
3948 ixgbe_disable_adapter_interrupts(ixgbe);
3949
3950 /*
3951 * Disable Rx/Tx units
3952 */
3953 (void) ixgbe_stop_adapter(hw);
3954
3955 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
3956 ixgbe_error(ixgbe,
3957 "Problem: Network adapter has been stopped due to a "
3958 "overtemperature event being detected.");
3959 ixgbe_error(ixgbe,
3960 "Action: Shut down or restart the computer. If the issue "
3961 "persists, please take action in accordance with the "
3962 "recommendations from your system vendor.");
3963 }
3964
3965 mutex_exit(&ixgbe->gen_lock);
3966 }
3967
3968 /*
3969 * ixgbe_link_timer - timer for link status detection
3970 */
3971 static void
3972 ixgbe_link_timer(void *arg)
3973 {
3974 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3975
3976 mutex_enter(&ixgbe->gen_lock);
3977 ixgbe_driver_link_check(ixgbe);
3978 mutex_exit(&ixgbe->gen_lock);
3979 }
3980
3981 /*
3982 * ixgbe_local_timer - Driver watchdog function.
3983 *
3984 * This function will handle the transmit stall check and other routines.
3985 */
3986 static void
3987 ixgbe_local_timer(void *arg)
3988 {
3989 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3990
3991 if (ixgbe->ixgbe_state & IXGBE_OVERTEMP)
3992 goto out;
3993
3994 if (ixgbe->ixgbe_state & IXGBE_ERROR) {
3995 ixgbe->reset_count++;
3996 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
3997 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
3998 goto out;
3999 }
4000
4001 if (ixgbe_stall_check(ixgbe)) {
4002 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STALL);
4003 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
4004
4005 ixgbe->reset_count++;
4006 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
4007 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
4008 }
4009
4010 out:
4011 ixgbe_restart_watchdog_timer(ixgbe);
4012 }
4013
4014 /*
4015 * ixgbe_stall_check - Check for transmit stall.
4016 *
4017 * This function checks if the adapter is stalled (in transmit).
4018 *
4019 * It is called each time the watchdog timeout is invoked.
4020 * If the transmit descriptor reclaim continuously fails,
4021 * the watchdog value will increment by 1. If the watchdog
4022 * value exceeds the threshold, the ixgbe is assumed to
4023 * have stalled and need to be reset.
4024 */
4025 static boolean_t
4026 ixgbe_stall_check(ixgbe_t *ixgbe)
4027 {
4028 ixgbe_tx_ring_t *tx_ring;
4029 boolean_t result;
4030 int i;
4031
4032 if (ixgbe->link_state != LINK_STATE_UP)
4033 return (B_FALSE);
4034
4035 /*
4036 * If any tx ring is stalled, we'll reset the chipset
4037 */
4038 result = B_FALSE;
4039 for (i = 0; i < ixgbe->num_tx_rings; i++) {
4040 tx_ring = &ixgbe->tx_rings[i];
4041 if (tx_ring->tbd_free <= ixgbe->tx_recycle_thresh) {
4042 tx_ring->tx_recycle(tx_ring);
4043 }
4044
4045 if (tx_ring->recycle_fail > 0)
4046 tx_ring->stall_watchdog++;
4047 else
4048 tx_ring->stall_watchdog = 0;
4049
4050 if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) {
4051 result = B_TRUE;
4052 break;
4053 }
4054 }
4055
4056 if (result) {
4057 tx_ring->stall_watchdog = 0;
4058 tx_ring->recycle_fail = 0;
4059 }
4060
4061 return (result);
4062 }
4063
4064
4065 /*
4066 * is_valid_mac_addr - Check if the mac address is valid.
4067 */
4068 static boolean_t
4069 is_valid_mac_addr(uint8_t *mac_addr)
4070 {
4071 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
4072 const uint8_t addr_test2[6] =
4073 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4074
4075 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
4076 !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
4077 return (B_FALSE);
4078
4079 return (B_TRUE);
4080 }
4081
4082 static boolean_t
4083 ixgbe_find_mac_address(ixgbe_t *ixgbe)
4084 {
4085 #ifdef __sparc
4086 struct ixgbe_hw *hw = &ixgbe->hw;
4087 uchar_t *bytes;
4088 struct ether_addr sysaddr;
4089 uint_t nelts;
4090 int err;
4091 boolean_t found = B_FALSE;
4092
4093 /*
4094 * The "vendor's factory-set address" may already have
4095 * been extracted from the chip, but if the property
4096 * "local-mac-address" is set we use that instead.
4097 *
4098 * We check whether it looks like an array of 6
4099 * bytes (which it should, if OBP set it). If we can't
4100 * make sense of it this way, we'll ignore it.
4101 */
4102 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
4103 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
4104 if (err == DDI_PROP_SUCCESS) {
4105 if (nelts == ETHERADDRL) {
4106 while (nelts--)
4107 hw->mac.addr[nelts] = bytes[nelts];
4108 found = B_TRUE;
4109 }
4110 ddi_prop_free(bytes);
4111 }
4112
4113 /*
4114 * Look up the OBP property "local-mac-address?". If the user has set
4115 * 'local-mac-address? = false', use "the system address" instead.
4116 */
4117 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0,
4118 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
4119 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
4120 if (localetheraddr(NULL, &sysaddr) != 0) {
4121 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
4122 found = B_TRUE;
4123 }
4124 }
4125 ddi_prop_free(bytes);
4126 }
4127
4128 /*
4129 * Finally(!), if there's a valid "mac-address" property (created
4130 * if we netbooted from this interface), we must use this instead
4131 * of any of the above to ensure that the NFS/install server doesn't
4132 * get confused by the address changing as illumos takes over!
4133 */
4134 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
4135 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
4136 if (err == DDI_PROP_SUCCESS) {
4137 if (nelts == ETHERADDRL) {
4138 while (nelts--)
4139 hw->mac.addr[nelts] = bytes[nelts];
4140 found = B_TRUE;
4141 }
4142 ddi_prop_free(bytes);
4143 }
4144
4145 if (found) {
4146 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL);
4147 return (B_TRUE);
4148 }
4149 #else
4150 _NOTE(ARGUNUSED(ixgbe));
4151 #endif
4152
4153 return (B_TRUE);
4154 }
4155
4156 #pragma inline(ixgbe_arm_watchdog_timer)
4157 static void
4158 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe)
4159 {
4160 /*
4161 * Fire a watchdog timer
4162 */
4163 ixgbe->watchdog_tid =
4164 timeout(ixgbe_local_timer,
4165 (void *)ixgbe, 1 * drv_usectohz(1000000));
4166
4167 }
4168
4169 /*
4170 * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer.
4171 */
4172 void
4173 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe)
4174 {
4175 mutex_enter(&ixgbe->watchdog_lock);
4176
4177 if (!ixgbe->watchdog_enable) {
4178 ixgbe->watchdog_enable = B_TRUE;
4179 ixgbe->watchdog_start = B_TRUE;
4180 ixgbe_arm_watchdog_timer(ixgbe);
4181 }
4182
4183 mutex_exit(&ixgbe->watchdog_lock);
4184 }
4185
4186 /*
4187 * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer.
4188 */
4189 void
4190 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe)
4191 {
4192 timeout_id_t tid;
4193
4194 mutex_enter(&ixgbe->watchdog_lock);
4195
4196 ixgbe->watchdog_enable = B_FALSE;
4197 ixgbe->watchdog_start = B_FALSE;
4198 tid = ixgbe->watchdog_tid;
4199 ixgbe->watchdog_tid = 0;
4200
4201 mutex_exit(&ixgbe->watchdog_lock);
4202
4203 if (tid != 0)
4204 (void) untimeout(tid);
4205 }
4206
4207 /*
4208 * ixgbe_start_watchdog_timer - Start the driver watchdog timer.
4209 */
4210 void
4211 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe)
4212 {
4213 mutex_enter(&ixgbe->watchdog_lock);
4214
4215 if (ixgbe->watchdog_enable) {
4216 if (!ixgbe->watchdog_start) {
4217 ixgbe->watchdog_start = B_TRUE;
4218 ixgbe_arm_watchdog_timer(ixgbe);
4219 }
4220 }
4221
4222 mutex_exit(&ixgbe->watchdog_lock);
4223 }
4224
4225 /*
4226 * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer.
4227 */
4228 static void
4229 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe)
4230 {
4231 mutex_enter(&ixgbe->watchdog_lock);
4232
4233 if (ixgbe->watchdog_start)
4234 ixgbe_arm_watchdog_timer(ixgbe);
4235
4236 mutex_exit(&ixgbe->watchdog_lock);
4237 }
4238
4239 /*
4240 * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer.
4241 */
4242 void
4243 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe)
4244 {
4245 timeout_id_t tid;
4246
4247 mutex_enter(&ixgbe->watchdog_lock);
4248
4249 ixgbe->watchdog_start = B_FALSE;
4250 tid = ixgbe->watchdog_tid;
4251 ixgbe->watchdog_tid = 0;
4252
4253 mutex_exit(&ixgbe->watchdog_lock);
4254
4255 if (tid != 0)
4256 (void) untimeout(tid);
4257 }
4258
4259 /*
4260 * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts.
4261 */
4262 static void
4263 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe)
4264 {
4265 struct ixgbe_hw *hw = &ixgbe->hw;
4266
4267 /*
4268 * mask all interrupts off
4269 */
4270 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff);
4271
4272 /*
4273 * for MSI-X, also disable autoclear
4274 */
4275 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
4276 IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0);
4277 }
4278
4279 IXGBE_WRITE_FLUSH(hw);
4280 }
4281
4282 /*
4283 * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts.
4284 */
4285 static void
4286 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe)
4287 {
4288 struct ixgbe_hw *hw = &ixgbe->hw;
4289 uint32_t eiac, eiam;
4290 uint32_t gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4291
4292 /* interrupt types to enable */
4293 ixgbe->eims = IXGBE_EIMS_ENABLE_MASK; /* shared code default */
4294 ixgbe->eims &= ~IXGBE_EIMS_TCP_TIMER; /* minus tcp timer */
4295 ixgbe->eims |= ixgbe->capab->other_intr; /* "other" interrupt types */
4296
4297 /* enable automask on "other" causes that this adapter can generate */
4298 eiam = ixgbe->capab->other_intr;
4299
4300 /*
4301 * msi-x mode
4302 */
4303 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
4304 /* enable autoclear but not on bits 29:20 */
4305 eiac = (ixgbe->eims & ~IXGBE_OTHER_INTR);
4306
4307 /* general purpose interrupt enable */
4308 gpie |= (IXGBE_GPIE_MSIX_MODE
4309 | IXGBE_GPIE_PBA_SUPPORT
4310 | IXGBE_GPIE_OCD
4311 | IXGBE_GPIE_EIAME);
4312 /*
4313 * non-msi-x mode
4314 */
4315 } else {
4316
4317 /* disable autoclear, leave gpie at default */
4318 eiac = 0;
4319
4320 /*
4321 * General purpose interrupt enable.
4322 * For 82599, X540 and X550, extended interrupt
4323 * automask enable only in MSI or MSI-X mode
4324 */
4325 if ((hw->mac.type == ixgbe_mac_82598EB) ||
4326 (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) {
4327 gpie |= IXGBE_GPIE_EIAME;
4328 }
4329 }
4330
4331 /* Enable specific "other" interrupt types */
4332 switch (hw->mac.type) {
4333 case ixgbe_mac_82598EB:
4334 gpie |= ixgbe->capab->other_gpie;
4335 break;
4336
4337 case ixgbe_mac_82599EB:
4338 case ixgbe_mac_X540:
4339 case ixgbe_mac_X550:
4340 case ixgbe_mac_X550EM_x:
4341 gpie |= ixgbe->capab->other_gpie;
4342
4343 /* Enable RSC Delay 8us when LRO enabled */
4344 if (ixgbe->lro_enable) {
4345 gpie |= (1 << IXGBE_GPIE_RSC_DELAY_SHIFT);
4346 }
4347 break;
4348
4349 default:
4350 break;
4351 }
4352
4353 /* write to interrupt control registers */
4354 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4355 IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac);
4356 IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam);
4357 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4358 IXGBE_WRITE_FLUSH(hw);
4359 }
4360
4361 /*
4362 * ixgbe_loopback_ioctl - Loopback support.
4363 */
4364 enum ioc_reply
4365 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp)
4366 {
4367 lb_info_sz_t *lbsp;
4368 lb_property_t *lbpp;
4369 uint32_t *lbmp;
4370 uint32_t size;
4371 uint32_t value;
4372
4373 if (mp->b_cont == NULL)
4374 return (IOC_INVAL);
4375
4376 switch (iocp->ioc_cmd) {
4377 default:
4378 return (IOC_INVAL);
4379
4380 case LB_GET_INFO_SIZE:
4381 size = sizeof (lb_info_sz_t);
4382 if (iocp->ioc_count != size)
4383 return (IOC_INVAL);
4384
4385 value = sizeof (lb_normal);
4386 value += sizeof (lb_mac);
4387 value += sizeof (lb_external);
4388
4389 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
4390 *lbsp = value;
4391 break;
4392
4393 case LB_GET_INFO:
4394 value = sizeof (lb_normal);
4395 value += sizeof (lb_mac);
4396 value += sizeof (lb_external);
4397
4398 size = value;
4399 if (iocp->ioc_count != size)
4400 return (IOC_INVAL);
4401
4402 value = 0;
4403 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
4404
4405 lbpp[value++] = lb_normal;
4406 lbpp[value++] = lb_mac;
4407 lbpp[value++] = lb_external;
4408 break;
4409
4410 case LB_GET_MODE:
4411 size = sizeof (uint32_t);
4412 if (iocp->ioc_count != size)
4413 return (IOC_INVAL);
4414
4415 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
4416 *lbmp = ixgbe->loopback_mode;
4417 break;
4418
4419 case LB_SET_MODE:
4420 size = 0;
4421 if (iocp->ioc_count != sizeof (uint32_t))
4422 return (IOC_INVAL);
4423
4424 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
4425 if (!ixgbe_set_loopback_mode(ixgbe, *lbmp))
4426 return (IOC_INVAL);
4427 break;
4428 }
4429
4430 iocp->ioc_count = size;
4431 iocp->ioc_error = 0;
4432
4433 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
4434 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
4435 return (IOC_INVAL);
4436 }
4437
4438 return (IOC_REPLY);
4439 }
4440
4441 /*
4442 * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode.
4443 */
4444 static boolean_t
4445 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode)
4446 {
4447 if (mode == ixgbe->loopback_mode)
4448 return (B_TRUE);
4449
4450 ixgbe->loopback_mode = mode;
4451
4452 if (mode == IXGBE_LB_NONE) {
4453 /*
4454 * Reset the chip
4455 */
4456 (void) ixgbe_reset(ixgbe);
4457 return (B_TRUE);
4458 }
4459
4460 mutex_enter(&ixgbe->gen_lock);
4461
4462 switch (mode) {
4463 default:
4464 mutex_exit(&ixgbe->gen_lock);
4465 return (B_FALSE);
4466
4467 case IXGBE_LB_EXTERNAL:
4468 break;
4469
4470 case IXGBE_LB_INTERNAL_MAC:
4471 ixgbe_set_internal_mac_loopback(ixgbe);
4472 break;
4473 }
4474
4475 mutex_exit(&ixgbe->gen_lock);
4476
4477 return (B_TRUE);
4478 }
4479
4480 /*
4481 * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode.
4482 */
4483 static void
4484 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe)
4485 {
4486 struct ixgbe_hw *hw;
4487 uint32_t reg;
4488 uint8_t atlas;
4489
4490 hw = &ixgbe->hw;
4491
4492 /*
4493 * Setup MAC loopback
4494 */
4495 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0);
4496 reg |= IXGBE_HLREG0_LPBK;
4497 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg);
4498
4499 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
4500 reg &= ~IXGBE_AUTOC_LMS_MASK;
4501 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
4502
4503 /*
4504 * Disable Atlas Tx lanes to keep packets in loopback and not on wire
4505 */
4506 switch (hw->mac.type) {
4507 case ixgbe_mac_82598EB:
4508 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
4509 &atlas);
4510 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
4511 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
4512 atlas);
4513
4514 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
4515 &atlas);
4516 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
4517 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
4518 atlas);
4519
4520 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
4521 &atlas);
4522 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
4523 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
4524 atlas);
4525
4526 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
4527 &atlas);
4528 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
4529 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
4530 atlas);
4531 break;
4532
4533 case ixgbe_mac_82599EB:
4534 case ixgbe_mac_X540:
4535 case ixgbe_mac_X550:
4536 case ixgbe_mac_X550EM_x:
4537 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
4538 reg |= (IXGBE_AUTOC_FLU |
4539 IXGBE_AUTOC_10G_KX4);
4540 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
4541
4542 (void) ixgbe_setup_link(&ixgbe->hw, IXGBE_LINK_SPEED_10GB_FULL,
4543 B_FALSE);
4544 break;
4545
4546 default:
4547 break;
4548 }
4549 }
4550
4551 #pragma inline(ixgbe_intr_rx_work)
4552 /*
4553 * ixgbe_intr_rx_work - RX processing of ISR.
4554 */
4555 static void
4556 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring)
4557 {
4558 mblk_t *mp;
4559
4560 mutex_enter(&rx_ring->rx_lock);
4561
4562 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
4563 mutex_exit(&rx_ring->rx_lock);
4564
4565 if (mp != NULL)
4566 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
4567 rx_ring->ring_gen_num);
4568 }
4569
4570 #pragma inline(ixgbe_intr_tx_work)
4571 /*
4572 * ixgbe_intr_tx_work - TX processing of ISR.
4573 */
4574 static void
4575 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring)
4576 {
4577 ixgbe_t *ixgbe = tx_ring->ixgbe;
4578
4579 /*
4580 * Recycle the tx descriptors
4581 */
4582 tx_ring->tx_recycle(tx_ring);
4583
4584 /*
4585 * Schedule the re-transmit
4586 */
4587 if (tx_ring->reschedule &&
4588 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)) {
4589 tx_ring->reschedule = B_FALSE;
4590 mac_tx_ring_update(tx_ring->ixgbe->mac_hdl,
4591 tx_ring->ring_handle);
4592 tx_ring->stat_reschedule++;
4593 }
4594 }
4595
4596 #pragma inline(ixgbe_intr_other_work)
4597 /*
4598 * ixgbe_intr_other_work - Process interrupt types other than tx/rx
4599 */
4600 static void
4601 ixgbe_intr_other_work(ixgbe_t *ixgbe, uint32_t eicr)
4602 {
4603 struct ixgbe_hw *hw = &ixgbe->hw;
4604
4605 ASSERT(mutex_owned(&ixgbe->gen_lock));
4606
4607 /*
4608 * handle link status change
4609 */
4610 if (eicr & IXGBE_EICR_LSC) {
4611 ixgbe_driver_link_check(ixgbe);
4612 ixgbe_get_hw_state(ixgbe);
4613 }
4614
4615 /*
4616 * check for fan failure on adapters with fans
4617 */
4618 if ((ixgbe->capab->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
4619 (eicr & IXGBE_EICR_GPI_SDP1)) {
4620 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP);
4621
4622 /*
4623 * Disable the adapter interrupts
4624 */
4625 ixgbe_disable_adapter_interrupts(ixgbe);
4626
4627 /*
4628 * Disable Rx/Tx units
4629 */
4630 (void) ixgbe_stop_adapter(&ixgbe->hw);
4631
4632 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
4633 ixgbe_error(ixgbe,
4634 "Problem: Network adapter has been stopped "
4635 "because the fan has stopped.\n");
4636 ixgbe_error(ixgbe,
4637 "Action: Replace the adapter.\n");
4638
4639 /* re-enable the interrupt, which was automasked */
4640 ixgbe->eims |= IXGBE_EICR_GPI_SDP1;
4641 }
4642
4643 /*
4644 * Do SFP check for adapters with hot-plug capability
4645 */
4646 if ((ixgbe->capab->flags & IXGBE_FLAG_SFP_PLUG_CAPABLE) &&
4647 ((eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) ||
4648 (eicr & IXGBE_EICR_GPI_SDP2_BY_MAC(hw)))) {
4649 ixgbe->eicr = eicr;
4650 if ((ddi_taskq_dispatch(ixgbe->sfp_taskq,
4651 ixgbe_sfp_check, (void *)ixgbe,
4652 DDI_NOSLEEP)) != DDI_SUCCESS) {
4653 ixgbe_log(ixgbe, "No memory available to dispatch "
4654 "taskq for SFP check");
4655 }
4656 }
4657
4658 /*
4659 * Do over-temperature check for adapters with temp sensor
4660 */
4661 if ((ixgbe->capab->flags & IXGBE_FLAG_TEMP_SENSOR_CAPABLE) &&
4662 ((eicr & IXGBE_EICR_GPI_SDP0_BY_MAC(hw)) ||
4663 (eicr & IXGBE_EICR_LSC))) {
4664 ixgbe->eicr = eicr;
4665 if ((ddi_taskq_dispatch(ixgbe->overtemp_taskq,
4666 ixgbe_overtemp_check, (void *)ixgbe,
4667 DDI_NOSLEEP)) != DDI_SUCCESS) {
4668 ixgbe_log(ixgbe, "No memory available to dispatch "
4669 "taskq for overtemp check");
4670 }
4671 }
4672
4673 /*
4674 * Process an external PHY interrupt
4675 */
4676 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
4677 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
4678 ixgbe->eicr = eicr;
4679 if ((ddi_taskq_dispatch(ixgbe->phy_taskq,
4680 ixgbe_phy_check, (void *)ixgbe,
4681 DDI_NOSLEEP)) != DDI_SUCCESS) {
4682 ixgbe_log(ixgbe, "No memory available to dispatch "
4683 "taskq for PHY check");
4684 }
4685 }
4686 }
4687
4688 /*
4689 * ixgbe_intr_legacy - Interrupt handler for legacy interrupts.
4690 */
4691 static uint_t
4692 ixgbe_intr_legacy(void *arg1, void *arg2)
4693 {
4694 ixgbe_t *ixgbe = (ixgbe_t *)arg1;
4695 struct ixgbe_hw *hw = &ixgbe->hw;
4696 ixgbe_tx_ring_t *tx_ring;
4697 ixgbe_rx_ring_t *rx_ring;
4698 uint32_t eicr;
4699 mblk_t *mp;
4700 boolean_t tx_reschedule;
4701 uint_t result;
4702
4703 _NOTE(ARGUNUSED(arg2));
4704
4705 mutex_enter(&ixgbe->gen_lock);
4706 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
4707 mutex_exit(&ixgbe->gen_lock);
4708 return (DDI_INTR_UNCLAIMED);
4709 }
4710
4711 mp = NULL;
4712 tx_reschedule = B_FALSE;
4713
4714 /*
4715 * Any bit set in eicr: claim this interrupt
4716 */
4717 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4718
4719 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
4720 mutex_exit(&ixgbe->gen_lock);
4721 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
4722 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4723 return (DDI_INTR_CLAIMED);
4724 }
4725
4726 if (eicr) {
4727 /*
4728 * For legacy interrupt, we have only one interrupt,
4729 * so we have only one rx ring and one tx ring enabled.
4730 */
4731 ASSERT(ixgbe->num_rx_rings == 1);
4732 ASSERT(ixgbe->num_tx_rings == 1);
4733
4734 /*
4735 * For legacy interrupt, rx rings[0] will use RTxQ[0].
4736 */
4737 if (eicr & 0x1) {
4738 ixgbe->eimc |= IXGBE_EICR_RTX_QUEUE;
4739 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4740 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
4741 /*
4742 * Clean the rx descriptors
4743 */
4744 rx_ring = &ixgbe->rx_rings[0];
4745 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
4746 }
4747
4748 /*
4749 * For legacy interrupt, tx rings[0] will use RTxQ[1].
4750 */
4751 if (eicr & 0x2) {
4752 /*
4753 * Recycle the tx descriptors
4754 */
4755 tx_ring = &ixgbe->tx_rings[0];
4756 tx_ring->tx_recycle(tx_ring);
4757
4758 /*
4759 * Schedule the re-transmit
4760 */
4761 tx_reschedule = (tx_ring->reschedule &&
4762 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh));
4763 }
4764
4765 /* any interrupt type other than tx/rx */
4766 if (eicr & ixgbe->capab->other_intr) {
4767 switch (hw->mac.type) {
4768 case ixgbe_mac_82598EB:
4769 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4770 break;
4771
4772 case ixgbe_mac_82599EB:
4773 case ixgbe_mac_X540:
4774 case ixgbe_mac_X550:
4775 case ixgbe_mac_X550EM_x:
4776 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4777 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4778 break;
4779
4780 default:
4781 break;
4782 }
4783 ixgbe_intr_other_work(ixgbe, eicr);
4784 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4785 }
4786
4787 mutex_exit(&ixgbe->gen_lock);
4788
4789 result = DDI_INTR_CLAIMED;
4790 } else {
4791 mutex_exit(&ixgbe->gen_lock);
4792
4793 /*
4794 * No interrupt cause bits set: don't claim this interrupt.
4795 */
4796 result = DDI_INTR_UNCLAIMED;
4797 }
4798
4799 /* re-enable the interrupts which were automasked */
4800 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4801
4802 /*
4803 * Do the following work outside of the gen_lock
4804 */
4805 if (mp != NULL) {
4806 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
4807 rx_ring->ring_gen_num);
4808 }
4809
4810 if (tx_reschedule) {
4811 tx_ring->reschedule = B_FALSE;
4812 mac_tx_ring_update(ixgbe->mac_hdl, tx_ring->ring_handle);
4813 tx_ring->stat_reschedule++;
4814 }
4815
4816 return (result);
4817 }
4818
4819 /*
4820 * ixgbe_intr_msi - Interrupt handler for MSI.
4821 */
4822 static uint_t
4823 ixgbe_intr_msi(void *arg1, void *arg2)
4824 {
4825 ixgbe_t *ixgbe = (ixgbe_t *)arg1;
4826 struct ixgbe_hw *hw = &ixgbe->hw;
4827 uint32_t eicr;
4828
4829 _NOTE(ARGUNUSED(arg2));
4830
4831 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4832
4833 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
4834 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
4835 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4836 return (DDI_INTR_CLAIMED);
4837 }
4838
4839 /*
4840 * For MSI interrupt, we have only one vector,
4841 * so we have only one rx ring and one tx ring enabled.
4842 */
4843 ASSERT(ixgbe->num_rx_rings == 1);
4844 ASSERT(ixgbe->num_tx_rings == 1);
4845
4846 /*
4847 * For MSI interrupt, rx rings[0] will use RTxQ[0].
4848 */
4849 if (eicr & 0x1) {
4850 ixgbe_intr_rx_work(&ixgbe->rx_rings[0]);
4851 }
4852
4853 /*
4854 * For MSI interrupt, tx rings[0] will use RTxQ[1].
4855 */
4856 if (eicr & 0x2) {
4857 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
4858 }
4859
4860 /* any interrupt type other than tx/rx */
4861 if (eicr & ixgbe->capab->other_intr) {
4862 mutex_enter(&ixgbe->gen_lock);
4863 switch (hw->mac.type) {
4864 case ixgbe_mac_82598EB:
4865 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4866 break;
4867
4868 case ixgbe_mac_82599EB:
4869 case ixgbe_mac_X540:
4870 case ixgbe_mac_X550:
4871 case ixgbe_mac_X550EM_x:
4872 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4873 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4874 break;
4875
4876 default:
4877 break;
4878 }
4879 ixgbe_intr_other_work(ixgbe, eicr);
4880 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4881 mutex_exit(&ixgbe->gen_lock);
4882 }
4883
4884 /* re-enable the interrupts which were automasked */
4885 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4886
4887 return (DDI_INTR_CLAIMED);
4888 }
4889
4890 /*
4891 * ixgbe_intr_msix - Interrupt handler for MSI-X.
4892 */
4893 static uint_t
4894 ixgbe_intr_msix(void *arg1, void *arg2)
4895 {
4896 ixgbe_intr_vector_t *vect = (ixgbe_intr_vector_t *)arg1;
4897 ixgbe_t *ixgbe = vect->ixgbe;
4898 struct ixgbe_hw *hw = &ixgbe->hw;
4899 uint32_t eicr;
4900 int r_idx = 0;
4901
4902 _NOTE(ARGUNUSED(arg2));
4903
4904 /*
4905 * Clean each rx ring that has its bit set in the map
4906 */
4907 r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1));
4908 while (r_idx >= 0) {
4909 ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]);
4910 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
4911 (ixgbe->num_rx_rings - 1));
4912 }
4913
4914 /*
4915 * Clean each tx ring that has its bit set in the map
4916 */
4917 r_idx = bt_getlowbit(vect->tx_map, 0, (ixgbe->num_tx_rings - 1));
4918 while (r_idx >= 0) {
4919 ixgbe_intr_tx_work(&ixgbe->tx_rings[r_idx]);
4920 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
4921 (ixgbe->num_tx_rings - 1));
4922 }
4923
4924
4925 /*
4926 * Clean other interrupt (link change) that has its bit set in the map
4927 */
4928 if (BT_TEST(vect->other_map, 0) == 1) {
4929 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4930
4931 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) !=
4932 DDI_FM_OK) {
4933 ddi_fm_service_impact(ixgbe->dip,
4934 DDI_SERVICE_DEGRADED);
4935 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4936 return (DDI_INTR_CLAIMED);
4937 }
4938
4939 /*
4940 * Check "other" cause bits: any interrupt type other than tx/rx
4941 */
4942 if (eicr & ixgbe->capab->other_intr) {
4943 mutex_enter(&ixgbe->gen_lock);
4944 switch (hw->mac.type) {
4945 case ixgbe_mac_82598EB:
4946 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4947 ixgbe_intr_other_work(ixgbe, eicr);
4948 break;
4949
4950 case ixgbe_mac_82599EB:
4951 case ixgbe_mac_X540:
4952 case ixgbe_mac_X550:
4953 case ixgbe_mac_X550EM_x:
4954 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
4955 ixgbe_intr_other_work(ixgbe, eicr);
4956 break;
4957
4958 default:
4959 break;
4960 }
4961 mutex_exit(&ixgbe->gen_lock);
4962 }
4963
4964 /* re-enable the interrupts which were automasked */
4965 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4966 }
4967
4968 return (DDI_INTR_CLAIMED);
4969 }
4970
4971 /*
4972 * ixgbe_alloc_intrs - Allocate interrupts for the driver.
4973 *
4974 * Normal sequence is to try MSI-X; if not sucessful, try MSI;
4975 * if not successful, try Legacy.
4976 * ixgbe->intr_force can be used to force sequence to start with
4977 * any of the 3 types.
4978 * If MSI-X is not used, number of tx/rx rings is forced to 1.
4979 */
4980 static int
4981 ixgbe_alloc_intrs(ixgbe_t *ixgbe)
4982 {
4983 dev_info_t *devinfo;
4984 int intr_types;
4985 int rc;
4986
4987 devinfo = ixgbe->dip;
4988
4989 /*
4990 * Get supported interrupt types
4991 */
4992 rc = ddi_intr_get_supported_types(devinfo, &intr_types);
4993
4994 if (rc != DDI_SUCCESS) {
4995 ixgbe_log(ixgbe,
4996 "Get supported interrupt types failed: %d", rc);
4997 return (IXGBE_FAILURE);
4998 }
4999 IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types);
5000
5001 ixgbe->intr_type = 0;
5002
5003 /*
5004 * Install MSI-X interrupts
5005 */
5006 if ((intr_types & DDI_INTR_TYPE_MSIX) &&
5007 (ixgbe->intr_force <= IXGBE_INTR_MSIX)) {
5008 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX);
5009 if (rc == IXGBE_SUCCESS)
5010 return (IXGBE_SUCCESS);
5011
5012 ixgbe_log(ixgbe,
5013 "Allocate MSI-X failed, trying MSI interrupts...");
5014 }
5015
5016 /*
5017 * MSI-X not used, force rings and groups to 1
5018 */
5019 ixgbe->num_rx_rings = 1;
5020 ixgbe->num_rx_groups = 1;
5021 ixgbe->num_tx_rings = 1;
5022 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
5023 ixgbe_log(ixgbe,
5024 "MSI-X not used, force rings and groups number to 1");
5025
5026 /*
5027 * Install MSI interrupts
5028 */
5029 if ((intr_types & DDI_INTR_TYPE_MSI) &&
5030 (ixgbe->intr_force <= IXGBE_INTR_MSI)) {
5031 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI);
5032 if (rc == IXGBE_SUCCESS)
5033 return (IXGBE_SUCCESS);
5034
5035 ixgbe_log(ixgbe,
5036 "Allocate MSI failed, trying Legacy interrupts...");
5037 }
5038
5039 /*
5040 * Install legacy interrupts
5041 */
5042 if (intr_types & DDI_INTR_TYPE_FIXED) {
5043 /*
5044 * Disallow legacy interrupts for X550. X550 has a silicon
5045 * bug which prevents Shared Legacy interrupts from working.
5046 * For details, please reference:
5047 *
5048 * Intel Ethernet Controller X550 Specification Update rev. 2.1
5049 * May 2016, erratum 22: PCIe Interrupt Status Bit
5050 */
5051 if (ixgbe->hw.mac.type == ixgbe_mac_X550 ||
5052 ixgbe->hw.mac.type == ixgbe_mac_X550EM_x ||
5053 ixgbe->hw.mac.type == ixgbe_mac_X550_vf ||
5054 ixgbe->hw.mac.type == ixgbe_mac_X550EM_x_vf) {
5055 ixgbe_log(ixgbe,
5056 "Legacy interrupts are not supported on this "
5057 "adapter. Please use MSI or MSI-X instead.");
5058 return (IXGBE_FAILURE);
5059 }
5060 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED);
5061 if (rc == IXGBE_SUCCESS)
5062 return (IXGBE_SUCCESS);
5063
5064 ixgbe_log(ixgbe,
5065 "Allocate Legacy interrupts failed");
5066 }
5067
5068 /*
5069 * If none of the 3 types succeeded, return failure
5070 */
5071 return (IXGBE_FAILURE);
5072 }
5073
5074 /*
5075 * ixgbe_alloc_intr_handles - Allocate interrupt handles.
5076 *
5077 * For legacy and MSI, only 1 handle is needed. For MSI-X,
5078 * if fewer than 2 handles are available, return failure.
5079 * Upon success, this maps the vectors to rx and tx rings for
5080 * interrupts.
5081 */
5082 static int
5083 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type)
5084 {
5085 dev_info_t *devinfo;
5086 int request, count, actual;
5087 int minimum;
5088 int rc;
5089 uint32_t ring_per_group;
5090
5091 devinfo = ixgbe->dip;
5092
5093 switch (intr_type) {
5094 case DDI_INTR_TYPE_FIXED:
5095 request = 1; /* Request 1 legacy interrupt handle */
5096 minimum = 1;
5097 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy");
5098 break;
5099
5100 case DDI_INTR_TYPE_MSI:
5101 request = 1; /* Request 1 MSI interrupt handle */
5102 minimum = 1;
5103 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI");
5104 break;
5105
5106 case DDI_INTR_TYPE_MSIX:
5107 /*
5108 * Best number of vectors for the adapter is
5109 * (# rx rings + # tx rings), however we will
5110 * limit the request number.
5111 */
5112 request = min(16, ixgbe->num_rx_rings + ixgbe->num_tx_rings);
5113 if (request > ixgbe->capab->max_ring_vect)
5114 request = ixgbe->capab->max_ring_vect;
5115 minimum = 1;
5116 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X");
5117 break;
5118
5119 default:
5120 ixgbe_log(ixgbe,
5121 "invalid call to ixgbe_alloc_intr_handles(): %d\n",
5122 intr_type);
5123 return (IXGBE_FAILURE);
5124 }
5125 IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d minimum: %d",
5126 request, minimum);
5127
5128 /*
5129 * Get number of supported interrupts
5130 */
5131 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
5132 if ((rc != DDI_SUCCESS) || (count < minimum)) {
5133 ixgbe_log(ixgbe,
5134 "Get interrupt number failed. Return: %d, count: %d",
5135 rc, count);
5136 return (IXGBE_FAILURE);
5137 }
5138 IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count);
5139
5140 actual = 0;
5141 ixgbe->intr_cnt = 0;
5142 ixgbe->intr_cnt_max = 0;
5143 ixgbe->intr_cnt_min = 0;
5144
5145 /*
5146 * Allocate an array of interrupt handles
5147 */
5148 ixgbe->intr_size = request * sizeof (ddi_intr_handle_t);
5149 ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP);
5150
5151 rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0,
5152 request, &actual, DDI_INTR_ALLOC_NORMAL);
5153 if (rc != DDI_SUCCESS) {
5154 ixgbe_log(ixgbe, "Allocate interrupts failed. "
5155 "return: %d, request: %d, actual: %d",
5156 rc, request, actual);
5157 goto alloc_handle_fail;
5158 }
5159 IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual);
5160
5161 /*
5162 * upper/lower limit of interrupts
5163 */
5164 ixgbe->intr_cnt = actual;
5165 ixgbe->intr_cnt_max = request;
5166 ixgbe->intr_cnt_min = minimum;
5167
5168 /*
5169 * rss number per group should not exceed the rx interrupt number,
5170 * else need to adjust rx ring number.
5171 */
5172 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
5173 ASSERT((ixgbe->num_rx_rings % ixgbe->num_rx_groups) == 0);
5174 if (actual < ring_per_group) {
5175 ixgbe->num_rx_rings = ixgbe->num_rx_groups * actual;
5176 ixgbe_setup_vmdq_rss_conf(ixgbe);
5177 }
5178
5179 /*
5180 * Now we know the actual number of vectors. Here we map the vector
5181 * to other, rx rings and tx ring.
5182 */
5183 if (actual < minimum) {
5184 ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d",
5185 actual);
5186 goto alloc_handle_fail;
5187 }
5188
5189 /*
5190 * Get priority for first vector, assume remaining are all the same
5191 */
5192 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
5193 if (rc != DDI_SUCCESS) {
5194 ixgbe_log(ixgbe,
5195 "Get interrupt priority failed: %d", rc);
5196 goto alloc_handle_fail;
5197 }
5198
5199 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
5200 if (rc != DDI_SUCCESS) {
5201 ixgbe_log(ixgbe,
5202 "Get interrupt cap failed: %d", rc);
5203 goto alloc_handle_fail;
5204 }
5205
5206 ixgbe->intr_type = intr_type;
5207
5208 return (IXGBE_SUCCESS);
5209
5210 alloc_handle_fail:
5211 ixgbe_rem_intrs(ixgbe);
5212
5213 return (IXGBE_FAILURE);
5214 }
5215
5216 /*
5217 * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type.
5218 *
5219 * Before adding the interrupt handlers, the interrupt vectors have
5220 * been allocated, and the rx/tx rings have also been allocated.
5221 */
5222 static int
5223 ixgbe_add_intr_handlers(ixgbe_t *ixgbe)
5224 {
5225 int vector = 0;
5226 int rc;
5227
5228 switch (ixgbe->intr_type) {
5229 case DDI_INTR_TYPE_MSIX:
5230 /*
5231 * Add interrupt handler for all vectors
5232 */
5233 for (vector = 0; vector < ixgbe->intr_cnt; vector++) {
5234 /*
5235 * install pointer to vect_map[vector]
5236 */
5237 rc = ddi_intr_add_handler(ixgbe->htable[vector],
5238 (ddi_intr_handler_t *)ixgbe_intr_msix,
5239 (void *)&ixgbe->vect_map[vector], NULL);
5240
5241 if (rc != DDI_SUCCESS) {
5242 ixgbe_log(ixgbe,
5243 "Add interrupt handler failed. "
5244 "return: %d, vector: %d", rc, vector);
5245 for (vector--; vector >= 0; vector--) {
5246 (void) ddi_intr_remove_handler(
5247 ixgbe->htable[vector]);
5248 }
5249 return (IXGBE_FAILURE);
5250 }
5251 }
5252
5253 break;
5254
5255 case DDI_INTR_TYPE_MSI:
5256 /*
5257 * Add interrupt handlers for the only vector
5258 */
5259 rc = ddi_intr_add_handler(ixgbe->htable[vector],
5260 (ddi_intr_handler_t *)ixgbe_intr_msi,
5261 (void *)ixgbe, NULL);
5262
5263 if (rc != DDI_SUCCESS) {
5264 ixgbe_log(ixgbe,
5265 "Add MSI interrupt handler failed: %d", rc);
5266 return (IXGBE_FAILURE);
5267 }
5268
5269 break;
5270
5271 case DDI_INTR_TYPE_FIXED:
5272 /*
5273 * Add interrupt handlers for the only vector
5274 */
5275 rc = ddi_intr_add_handler(ixgbe->htable[vector],
5276 (ddi_intr_handler_t *)ixgbe_intr_legacy,
5277 (void *)ixgbe, NULL);
5278
5279 if (rc != DDI_SUCCESS) {
5280 ixgbe_log(ixgbe,
5281 "Add legacy interrupt handler failed: %d", rc);
5282 return (IXGBE_FAILURE);
5283 }
5284
5285 break;
5286
5287 default:
5288 return (IXGBE_FAILURE);
5289 }
5290
5291 return (IXGBE_SUCCESS);
5292 }
5293
5294 #pragma inline(ixgbe_map_rxring_to_vector)
5295 /*
5296 * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector.
5297 */
5298 static void
5299 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx)
5300 {
5301 /*
5302 * Set bit in map
5303 */
5304 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
5305
5306 /*
5307 * Count bits set
5308 */
5309 ixgbe->vect_map[v_idx].rxr_cnt++;
5310
5311 /*
5312 * Remember bit position
5313 */
5314 ixgbe->rx_rings[r_idx].intr_vector = v_idx;
5315 ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx;
5316 }
5317
5318 #pragma inline(ixgbe_map_txring_to_vector)
5319 /*
5320 * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector.
5321 */
5322 static void
5323 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx)
5324 {
5325 /*
5326 * Set bit in map
5327 */
5328 BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx);
5329
5330 /*
5331 * Count bits set
5332 */
5333 ixgbe->vect_map[v_idx].txr_cnt++;
5334
5335 /*
5336 * Remember bit position
5337 */
5338 ixgbe->tx_rings[t_idx].intr_vector = v_idx;
5339 ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx;
5340 }
5341
5342 /*
5343 * ixgbe_setup_ivar - Set the given entry in the given interrupt vector
5344 * allocation register (IVAR).
5345 * cause:
5346 * -1 : other cause
5347 * 0 : rx
5348 * 1 : tx
5349 */
5350 static void
5351 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector,
5352 int8_t cause)
5353 {
5354 struct ixgbe_hw *hw = &ixgbe->hw;
5355 u32 ivar, index;
5356
5357 switch (hw->mac.type) {
5358 case ixgbe_mac_82598EB:
5359 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5360 if (cause == -1) {
5361 cause = 0;
5362 }
5363 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
5364 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
5365 ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3)));
5366 ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3)));
5367 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
5368 break;
5369
5370 case ixgbe_mac_82599EB:
5371 case ixgbe_mac_X540:
5372 case ixgbe_mac_X550:
5373 case ixgbe_mac_X550EM_x:
5374 if (cause == -1) {
5375 /* other causes */
5376 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5377 index = (intr_alloc_entry & 1) * 8;
5378 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
5379 ivar &= ~(0xFF << index);
5380 ivar |= (msix_vector << index);
5381 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
5382 } else {
5383 /* tx or rx causes */
5384 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5385 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
5386 ivar = IXGBE_READ_REG(hw,
5387 IXGBE_IVAR(intr_alloc_entry >> 1));
5388 ivar &= ~(0xFF << index);
5389 ivar |= (msix_vector << index);
5390 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
5391 ivar);
5392 }
5393 break;
5394
5395 default:
5396 break;
5397 }
5398 }
5399
5400 /*
5401 * ixgbe_enable_ivar - Enable the given entry by setting the VAL bit of
5402 * given interrupt vector allocation register (IVAR).
5403 * cause:
5404 * -1 : other cause
5405 * 0 : rx
5406 * 1 : tx
5407 */
5408 static void
5409 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
5410 {
5411 struct ixgbe_hw *hw = &ixgbe->hw;
5412 u32 ivar, index;
5413
5414 switch (hw->mac.type) {
5415 case ixgbe_mac_82598EB:
5416 if (cause == -1) {
5417 cause = 0;
5418 }
5419 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
5420 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
5421 ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 *
5422 (intr_alloc_entry & 0x3)));
5423 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
5424 break;
5425
5426 case ixgbe_mac_82599EB:
5427 case ixgbe_mac_X540:
5428 case ixgbe_mac_X550:
5429 case ixgbe_mac_X550EM_x:
5430 if (cause == -1) {
5431 /* other causes */
5432 index = (intr_alloc_entry & 1) * 8;
5433 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
5434 ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
5435 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
5436 } else {
5437 /* tx or rx causes */
5438 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
5439 ivar = IXGBE_READ_REG(hw,
5440 IXGBE_IVAR(intr_alloc_entry >> 1));
5441 ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
5442 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
5443 ivar);
5444 }
5445 break;
5446
5447 default:
5448 break;
5449 }
5450 }
5451
5452 /*
5453 * ixgbe_disable_ivar - Disble the given entry by clearing the VAL bit of
5454 * given interrupt vector allocation register (IVAR).
5455 * cause:
5456 * -1 : other cause
5457 * 0 : rx
5458 * 1 : tx
5459 */
5460 static void
5461 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
5462 {
5463 struct ixgbe_hw *hw = &ixgbe->hw;
5464 u32 ivar, index;
5465
5466 switch (hw->mac.type) {
5467 case ixgbe_mac_82598EB:
5468 if (cause == -1) {
5469 cause = 0;
5470 }
5471 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
5472 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
5473 ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 *
5474 (intr_alloc_entry & 0x3)));
5475 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
5476 break;
5477
5478 case ixgbe_mac_82599EB:
5479 case ixgbe_mac_X540:
5480 case ixgbe_mac_X550:
5481 case ixgbe_mac_X550EM_x:
5482 if (cause == -1) {
5483 /* other causes */
5484 index = (intr_alloc_entry & 1) * 8;
5485 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
5486 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
5487 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
5488 } else {
5489 /* tx or rx causes */
5490 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
5491 ivar = IXGBE_READ_REG(hw,
5492 IXGBE_IVAR(intr_alloc_entry >> 1));
5493 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
5494 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
5495 ivar);
5496 }
5497 break;
5498
5499 default:
5500 break;
5501 }
5502 }
5503
5504 /*
5505 * Convert the rx ring index driver maintained to the rx ring index
5506 * in h/w.
5507 */
5508 static uint32_t
5509 ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index)
5510 {
5511
5512 struct ixgbe_hw *hw = &ixgbe->hw;
5513 uint32_t rx_ring_per_group, hw_rx_index;
5514
5515 if (ixgbe->classify_mode == IXGBE_CLASSIFY_RSS ||
5516 ixgbe->classify_mode == IXGBE_CLASSIFY_NONE) {
5517 return (sw_rx_index);
5518 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ) {
5519 switch (hw->mac.type) {
5520 case ixgbe_mac_82598EB:
5521 return (sw_rx_index);
5522
5523 case ixgbe_mac_82599EB:
5524 case ixgbe_mac_X540:
5525 case ixgbe_mac_X550:
5526 case ixgbe_mac_X550EM_x:
5527 return (sw_rx_index * 2);
5528
5529 default:
5530 break;
5531 }
5532 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) {
5533 rx_ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
5534
5535 switch (hw->mac.type) {
5536 case ixgbe_mac_82598EB:
5537 hw_rx_index = (sw_rx_index / rx_ring_per_group) *
5538 16 + (sw_rx_index % rx_ring_per_group);
5539 return (hw_rx_index);
5540
5541 case ixgbe_mac_82599EB:
5542 case ixgbe_mac_X540:
5543 case ixgbe_mac_X550:
5544 case ixgbe_mac_X550EM_x:
5545 if (ixgbe->num_rx_groups > 32) {
5546 hw_rx_index = (sw_rx_index /
5547 rx_ring_per_group) * 2 +
5548 (sw_rx_index % rx_ring_per_group);
5549 } else {
5550 hw_rx_index = (sw_rx_index /
5551 rx_ring_per_group) * 4 +
5552 (sw_rx_index % rx_ring_per_group);
5553 }
5554 return (hw_rx_index);
5555
5556 default:
5557 break;
5558 }
5559 }
5560
5561 /*
5562 * Should never reach. Just to make compiler happy.
5563 */
5564 return (sw_rx_index);
5565 }
5566
5567 /*
5568 * ixgbe_map_intrs_to_vectors - Map different interrupts to MSI-X vectors.
5569 *
5570 * For MSI-X, here will map rx interrupt, tx interrupt and other interrupt
5571 * to vector[0 - (intr_cnt -1)].
5572 */
5573 static int
5574 ixgbe_map_intrs_to_vectors(ixgbe_t *ixgbe)
5575 {
5576 int i, vector = 0;
5577
5578 /* initialize vector map */
5579 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
5580 for (i = 0; i < ixgbe->intr_cnt; i++) {
5581 ixgbe->vect_map[i].ixgbe = ixgbe;
5582 }
5583
5584 /*
5585 * non-MSI-X case is very simple: rx rings[0] on RTxQ[0],
5586 * tx rings[0] on RTxQ[1].
5587 */
5588 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
5589 ixgbe_map_rxring_to_vector(ixgbe, 0, 0);
5590 ixgbe_map_txring_to_vector(ixgbe, 0, 1);
5591 return (IXGBE_SUCCESS);
5592 }
5593
5594 /*
5595 * Interrupts/vectors mapping for MSI-X
5596 */
5597
5598 /*
5599 * Map other interrupt to vector 0,
5600 * Set bit in map and count the bits set.
5601 */
5602 BT_SET(ixgbe->vect_map[vector].other_map, 0);
5603 ixgbe->vect_map[vector].other_cnt++;
5604
5605 /*
5606 * Map rx ring interrupts to vectors
5607 */
5608 for (i = 0; i < ixgbe->num_rx_rings; i++) {
5609 ixgbe_map_rxring_to_vector(ixgbe, i, vector);
5610 vector = (vector +1) % ixgbe->intr_cnt;
5611 }
5612
5613 /*
5614 * Map tx ring interrupts to vectors
5615 */
5616 for (i = 0; i < ixgbe->num_tx_rings; i++) {
5617 ixgbe_map_txring_to_vector(ixgbe, i, vector);
5618 vector = (vector +1) % ixgbe->intr_cnt;
5619 }
5620
5621 return (IXGBE_SUCCESS);
5622 }
5623
5624 /*
5625 * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s).
5626 *
5627 * This relies on ring/vector mapping already set up in the
5628 * vect_map[] structures
5629 */
5630 static void
5631 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe)
5632 {
5633 struct ixgbe_hw *hw = &ixgbe->hw;
5634 ixgbe_intr_vector_t *vect; /* vector bitmap */
5635 int r_idx; /* ring index */
5636 int v_idx; /* vector index */
5637 uint32_t hw_index;
5638
5639 /*
5640 * Clear any previous entries
5641 */
5642 switch (hw->mac.type) {
5643 case ixgbe_mac_82598EB:
5644 for (v_idx = 0; v_idx < 25; v_idx++)
5645 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
5646 break;
5647
5648 case ixgbe_mac_82599EB:
5649 case ixgbe_mac_X540:
5650 case ixgbe_mac_X550:
5651 case ixgbe_mac_X550EM_x:
5652 for (v_idx = 0; v_idx < 64; v_idx++)
5653 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
5654 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0);
5655 break;
5656
5657 default:
5658 break;
5659 }
5660
5661 /*
5662 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and
5663 * tx rings[0] will use RTxQ[1].
5664 */
5665 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
5666 ixgbe_setup_ivar(ixgbe, 0, 0, 0);
5667 ixgbe_setup_ivar(ixgbe, 0, 1, 1);
5668 return;
5669 }
5670
5671 /*
5672 * For MSI-X interrupt, "Other" is always on vector[0].
5673 */
5674 ixgbe_setup_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0, -1);
5675
5676 /*
5677 * For each interrupt vector, populate the IVAR table
5678 */
5679 for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) {
5680 vect = &ixgbe->vect_map[v_idx];
5681
5682 /*
5683 * For each rx ring bit set
5684 */
5685 r_idx = bt_getlowbit(vect->rx_map, 0,
5686 (ixgbe->num_rx_rings - 1));
5687
5688 while (r_idx >= 0) {
5689 hw_index = ixgbe->rx_rings[r_idx].hw_index;
5690 ixgbe_setup_ivar(ixgbe, hw_index, v_idx, 0);
5691 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
5692 (ixgbe->num_rx_rings - 1));
5693 }
5694
5695 /*
5696 * For each tx ring bit set
5697 */
5698 r_idx = bt_getlowbit(vect->tx_map, 0,
5699 (ixgbe->num_tx_rings - 1));
5700
5701 while (r_idx >= 0) {
5702 ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 1);
5703 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
5704 (ixgbe->num_tx_rings - 1));
5705 }
5706 }
5707 }
5708
5709 /*
5710 * ixgbe_rem_intr_handlers - Remove the interrupt handlers.
5711 */
5712 static void
5713 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe)
5714 {
5715 int i;
5716 int rc;
5717
5718 for (i = 0; i < ixgbe->intr_cnt; i++) {
5719 rc = ddi_intr_remove_handler(ixgbe->htable[i]);
5720 if (rc != DDI_SUCCESS) {
5721 IXGBE_DEBUGLOG_1(ixgbe,
5722 "Remove intr handler failed: %d", rc);
5723 }
5724 }
5725 }
5726
5727 /*
5728 * ixgbe_rem_intrs - Remove the allocated interrupts.
5729 */
5730 static void
5731 ixgbe_rem_intrs(ixgbe_t *ixgbe)
5732 {
5733 int i;
5734 int rc;
5735
5736 for (i = 0; i < ixgbe->intr_cnt; i++) {
5737 rc = ddi_intr_free(ixgbe->htable[i]);
5738 if (rc != DDI_SUCCESS) {
5739 IXGBE_DEBUGLOG_1(ixgbe,
5740 "Free intr failed: %d", rc);
5741 }
5742 }
5743
5744 kmem_free(ixgbe->htable, ixgbe->intr_size);
5745 ixgbe->htable = NULL;
5746 }
5747
5748 /*
5749 * ixgbe_enable_intrs - Enable all the ddi interrupts.
5750 */
5751 static int
5752 ixgbe_enable_intrs(ixgbe_t *ixgbe)
5753 {
5754 int i;
5755 int rc;
5756
5757 /*
5758 * Enable interrupts
5759 */
5760 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
5761 /*
5762 * Call ddi_intr_block_enable() for MSI
5763 */
5764 rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt);
5765 if (rc != DDI_SUCCESS) {
5766 ixgbe_log(ixgbe,
5767 "Enable block intr failed: %d", rc);
5768 return (IXGBE_FAILURE);
5769 }
5770 } else {
5771 /*
5772 * Call ddi_intr_enable() for Legacy/MSI non block enable
5773 */
5774 for (i = 0; i < ixgbe->intr_cnt; i++) {
5775 rc = ddi_intr_enable(ixgbe->htable[i]);
5776 if (rc != DDI_SUCCESS) {
5777 ixgbe_log(ixgbe,
5778 "Enable intr failed: %d", rc);
5779 return (IXGBE_FAILURE);
5780 }
5781 }
5782 }
5783
5784 return (IXGBE_SUCCESS);
5785 }
5786
5787 /*
5788 * ixgbe_disable_intrs - Disable all the interrupts.
5789 */
5790 static int
5791 ixgbe_disable_intrs(ixgbe_t *ixgbe)
5792 {
5793 int i;
5794 int rc;
5795
5796 /*
5797 * Disable all interrupts
5798 */
5799 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
5800 rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt);
5801 if (rc != DDI_SUCCESS) {
5802 ixgbe_log(ixgbe,
5803 "Disable block intr failed: %d", rc);
5804 return (IXGBE_FAILURE);
5805 }
5806 } else {
5807 for (i = 0; i < ixgbe->intr_cnt; i++) {
5808 rc = ddi_intr_disable(ixgbe->htable[i]);
5809 if (rc != DDI_SUCCESS) {
5810 ixgbe_log(ixgbe,
5811 "Disable intr failed: %d", rc);
5812 return (IXGBE_FAILURE);
5813 }
5814 }
5815 }
5816
5817 return (IXGBE_SUCCESS);
5818 }
5819
5820 /*
5821 * ixgbe_get_hw_state - Get and save parameters related to adapter hardware.
5822 */
5823 static void
5824 ixgbe_get_hw_state(ixgbe_t *ixgbe)
5825 {
5826 struct ixgbe_hw *hw = &ixgbe->hw;
5827 ixgbe_link_speed speed = 0;
5828 boolean_t link_up = B_FALSE;
5829 uint32_t pcs1g_anlp = 0;
5830
5831 ASSERT(mutex_owned(&ixgbe->gen_lock));
5832 ixgbe->param_lp_1000fdx_cap = 0;
5833 ixgbe->param_lp_100fdx_cap = 0;
5834
5835 /* check for link, don't wait */
5836 (void) ixgbe_check_link(hw, &speed, &link_up, B_FALSE);
5837
5838 /*
5839 * Update the observed Link Partner's capabilities. Not all adapters
5840 * can provide full information on the LP's capable speeds, so we
5841 * provide what we can.
5842 */
5843 if (link_up) {
5844 pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
5845
5846 ixgbe->param_lp_1000fdx_cap =
5847 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
5848 ixgbe->param_lp_100fdx_cap =
5849 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
5850 }
5851
5852 /*
5853 * Update GLD's notion of the adapter's currently advertised speeds.
5854 * Since the common code doesn't always record the current autonegotiate
5855 * settings in the phy struct for all parts (specifically, adapters with
5856 * SFPs) we first test to see if it is 0, and if so, we fall back to
5857 * using the adapter's speed capabilities which we saved during instance
5858 * init in ixgbe_init_params().
5859 *
5860 * Adapters with SFPs will always be shown as advertising all of their
5861 * supported speeds, and adapters with baseT PHYs (where the phy struct
5862 * is maintained by the common code) will always have a factual view of
5863 * their currently-advertised speeds. In the case of SFPs, this is
5864 * acceptable as we default to advertising all speeds that the adapter
5865 * claims to support, and those properties are immutable; unlike on
5866 * baseT (copper) PHYs, where speeds can be enabled or disabled at will.
5867 */
5868 speed = hw->phy.autoneg_advertised;
5869 if (speed == 0)
5870 speed = ixgbe->speeds_supported;
5871
5872 ixgbe->param_adv_10000fdx_cap =
5873 (speed & IXGBE_LINK_SPEED_10GB_FULL) ? 1 : 0;
5874 ixgbe->param_adv_5000fdx_cap =
5875 (speed & IXGBE_LINK_SPEED_5GB_FULL) ? 1 : 0;
5876 ixgbe->param_adv_2500fdx_cap =
5877 (speed & IXGBE_LINK_SPEED_2_5GB_FULL) ? 1 : 0;
5878 ixgbe->param_adv_1000fdx_cap =
5879 (speed & IXGBE_LINK_SPEED_1GB_FULL) ? 1 : 0;
5880 ixgbe->param_adv_100fdx_cap =
5881 (speed & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0;
5882 }
5883
5884 /*
5885 * ixgbe_get_driver_control - Notify that driver is in control of device.
5886 */
5887 static void
5888 ixgbe_get_driver_control(struct ixgbe_hw *hw)
5889 {
5890 uint32_t ctrl_ext;
5891
5892 /*
5893 * Notify firmware that driver is in control of device
5894 */
5895 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5896 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
5897 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5898 }
5899
5900 /*
5901 * ixgbe_release_driver_control - Notify that driver is no longer in control
5902 * of device.
5903 */
5904 static void
5905 ixgbe_release_driver_control(struct ixgbe_hw *hw)
5906 {
5907 uint32_t ctrl_ext;
5908
5909 /*
5910 * Notify firmware that driver is no longer in control of device
5911 */
5912 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5913 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
5914 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5915 }
5916
5917 /*
5918 * ixgbe_atomic_reserve - Atomic decrease operation.
5919 */
5920 int
5921 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n)
5922 {
5923 uint32_t oldval;
5924 uint32_t newval;
5925
5926 /*
5927 * ATOMICALLY
5928 */
5929 do {
5930 oldval = *count_p;
5931 if (oldval < n)
5932 return (-1);
5933 newval = oldval - n;
5934 } while (atomic_cas_32(count_p, oldval, newval) != oldval);
5935
5936 return (newval);
5937 }
5938
5939 /*
5940 * ixgbe_mc_table_itr - Traverse the entries in the multicast table.
5941 */
5942 static uint8_t *
5943 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq)
5944 {
5945 uint8_t *addr = *upd_ptr;
5946 uint8_t *new_ptr;
5947
5948 _NOTE(ARGUNUSED(hw));
5949 _NOTE(ARGUNUSED(vmdq));
5950
5951 new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
5952 *upd_ptr = new_ptr;
5953 return (addr);
5954 }
5955
5956 /*
5957 * FMA support
5958 */
5959 int
5960 ixgbe_check_acc_handle(ddi_acc_handle_t handle)
5961 {
5962 ddi_fm_error_t de;
5963
5964 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
5965 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
5966 return (de.fme_status);
5967 }
5968
5969 int
5970 ixgbe_check_dma_handle(ddi_dma_handle_t handle)
5971 {
5972 ddi_fm_error_t de;
5973
5974 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
5975 return (de.fme_status);
5976 }
5977
5978 /*
5979 * ixgbe_fm_error_cb - The IO fault service error handling callback function.
5980 */
5981 static int
5982 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
5983 {
5984 _NOTE(ARGUNUSED(impl_data));
5985 /*
5986 * as the driver can always deal with an error in any dma or
5987 * access handle, we can just return the fme_status value.
5988 */
5989 pci_ereport_post(dip, err, NULL);
5990 return (err->fme_status);
5991 }
5992
5993 static void
5994 ixgbe_fm_init(ixgbe_t *ixgbe)
5995 {
5996 ddi_iblock_cookie_t iblk;
5997 int fma_dma_flag;
5998
5999 /*
6000 * Only register with IO Fault Services if we have some capability
6001 */
6002 if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
6003 ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
6004 } else {
6005 ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
6006 }
6007
6008 if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
6009 fma_dma_flag = 1;
6010 } else {
6011 fma_dma_flag = 0;
6012 }
6013
6014 ixgbe_set_fma_flags(fma_dma_flag);
6015
6016 if (ixgbe->fm_capabilities) {
6017
6018 /*
6019 * Register capabilities with IO Fault Services
6020 */
6021 ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk);
6022
6023 /*
6024 * Initialize pci ereport capabilities if ereport capable
6025 */
6026 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
6027 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
6028 pci_ereport_setup(ixgbe->dip);
6029
6030 /*
6031 * Register error callback if error callback capable
6032 */
6033 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
6034 ddi_fm_handler_register(ixgbe->dip,
6035 ixgbe_fm_error_cb, (void*) ixgbe);
6036 }
6037 }
6038
6039 static void
6040 ixgbe_fm_fini(ixgbe_t *ixgbe)
6041 {
6042 /*
6043 * Only unregister FMA capabilities if they are registered
6044 */
6045 if (ixgbe->fm_capabilities) {
6046
6047 /*
6048 * Release any resources allocated by pci_ereport_setup()
6049 */
6050 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
6051 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
6052 pci_ereport_teardown(ixgbe->dip);
6053
6054 /*
6055 * Un-register error callback if error callback capable
6056 */
6057 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
6058 ddi_fm_handler_unregister(ixgbe->dip);
6059
6060 /*
6061 * Unregister from IO Fault Service
6062 */
6063 ddi_fm_fini(ixgbe->dip);
6064 }
6065 }
6066
6067 void
6068 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail)
6069 {
6070 uint64_t ena;
6071 char buf[FM_MAX_CLASS];
6072
6073 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
6074 ena = fm_ena_generate(0, FM_ENA_FMT1);
6075 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) {
6076 ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP,
6077 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
6078 }
6079 }
6080
6081 static int
6082 ixgbe_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
6083 {
6084 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)rh;
6085
6086 mutex_enter(&rx_ring->rx_lock);
6087 rx_ring->ring_gen_num = mr_gen_num;
6088 mutex_exit(&rx_ring->rx_lock);
6089 return (0);
6090 }
6091
6092 /*
6093 * Get the global ring index by a ring index within a group.
6094 */
6095 static int
6096 ixgbe_get_rx_ring_index(ixgbe_t *ixgbe, int gindex, int rindex)
6097 {
6098 ixgbe_rx_ring_t *rx_ring;
6099 int i;
6100
6101 for (i = 0; i < ixgbe->num_rx_rings; i++) {
6102 rx_ring = &ixgbe->rx_rings[i];
6103 if (rx_ring->group_index == gindex)
6104 rindex--;
6105 if (rindex < 0)
6106 return (i);
6107 }
6108
6109 return (-1);
6110 }
6111
6112 /*
6113 * Callback funtion for MAC layer to register all rings.
6114 */
6115 /* ARGSUSED */
6116 void
6117 ixgbe_fill_ring(void *arg, mac_ring_type_t rtype, const int group_index,
6118 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
6119 {
6120 ixgbe_t *ixgbe = (ixgbe_t *)arg;
6121 mac_intr_t *mintr = &infop->mri_intr;
6122
6123 switch (rtype) {
6124 case MAC_RING_TYPE_RX: {
6125 /*
6126 * 'index' is the ring index within the group.
6127 * Need to get the global ring index by searching in groups.
6128 */
6129 int global_ring_index = ixgbe_get_rx_ring_index(
6130 ixgbe, group_index, ring_index);
6131
6132 ASSERT(global_ring_index >= 0);
6133
6134 ixgbe_rx_ring_t *rx_ring = &ixgbe->rx_rings[global_ring_index];
6135 rx_ring->ring_handle = rh;
6136
6137 infop->mri_driver = (mac_ring_driver_t)rx_ring;
6138 infop->mri_start = ixgbe_ring_start;
6139 infop->mri_stop = NULL;
6140 infop->mri_poll = ixgbe_ring_rx_poll;
6141 infop->mri_stat = ixgbe_rx_ring_stat;
6142
6143 mintr->mi_handle = (mac_intr_handle_t)rx_ring;
6144 mintr->mi_enable = ixgbe_rx_ring_intr_enable;
6145 mintr->mi_disable = ixgbe_rx_ring_intr_disable;
6146 if (ixgbe->intr_type &
6147 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
6148 mintr->mi_ddi_handle =
6149 ixgbe->htable[rx_ring->intr_vector];
6150 }
6151
6152 break;
6153 }
6154 case MAC_RING_TYPE_TX: {
6155 ASSERT(group_index == -1);
6156 ASSERT(ring_index < ixgbe->num_tx_rings);
6157
6158 ixgbe_tx_ring_t *tx_ring = &ixgbe->tx_rings[ring_index];
6159 tx_ring->ring_handle = rh;
6160
6161 infop->mri_driver = (mac_ring_driver_t)tx_ring;
6162 infop->mri_start = NULL;
6163 infop->mri_stop = NULL;
6164 infop->mri_tx = ixgbe_ring_tx;
6165 infop->mri_stat = ixgbe_tx_ring_stat;
6166 if (ixgbe->intr_type &
6167 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
6168 mintr->mi_ddi_handle =
6169 ixgbe->htable[tx_ring->intr_vector];
6170 }
6171 break;
6172 }
6173 default:
6174 break;
6175 }
6176 }
6177
6178 /*
6179 * Callback funtion for MAC layer to register all groups.
6180 */
6181 void
6182 ixgbe_fill_group(void *arg, mac_ring_type_t rtype, const int index,
6183 mac_group_info_t *infop, mac_group_handle_t gh)
6184 {
6185 ixgbe_t *ixgbe = (ixgbe_t *)arg;
6186
6187 switch (rtype) {
6188 case MAC_RING_TYPE_RX: {
6189 ixgbe_rx_group_t *rx_group;
6190
6191 rx_group = &ixgbe->rx_groups[index];
6192 rx_group->group_handle = gh;
6193
6194 infop->mgi_driver = (mac_group_driver_t)rx_group;
6195 infop->mgi_start = NULL;
6196 infop->mgi_stop = NULL;
6197 infop->mgi_addmac = ixgbe_addmac;
6198 infop->mgi_remmac = ixgbe_remmac;
6199 infop->mgi_count = (ixgbe->num_rx_rings / ixgbe->num_rx_groups);
6200
6201 break;
6202 }
6203 case MAC_RING_TYPE_TX:
6204 break;
6205 default:
6206 break;
6207 }
6208 }
6209
6210 /*
6211 * Enable interrupt on the specificed rx ring.
6212 */
6213 int
6214 ixgbe_rx_ring_intr_enable(mac_intr_handle_t intrh)
6215 {
6216 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
6217 ixgbe_t *ixgbe = rx_ring->ixgbe;
6218 int r_idx = rx_ring->index;
6219 int hw_r_idx = rx_ring->hw_index;
6220 int v_idx = rx_ring->intr_vector;
6221
6222 mutex_enter(&ixgbe->gen_lock);
6223 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) {
6224 mutex_exit(&ixgbe->gen_lock);
6225 /*
6226 * Simply return 0.
6227 * Interrupts are being adjusted. ixgbe_intr_adjust()
6228 * will eventually re-enable the interrupt when it's
6229 * done with the adjustment.
6230 */
6231 return (0);
6232 }
6233
6234 /*
6235 * To enable interrupt by setting the VAL bit of given interrupt
6236 * vector allocation register (IVAR).
6237 */
6238 ixgbe_enable_ivar(ixgbe, hw_r_idx, 0);
6239
6240 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
6241
6242 /*
6243 * Trigger a Rx interrupt on this ring
6244 */
6245 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_EICS, (1 << v_idx));
6246 IXGBE_WRITE_FLUSH(&ixgbe->hw);
6247
6248 mutex_exit(&ixgbe->gen_lock);
6249
6250 return (0);
6251 }
6252
6253 /*
6254 * Disable interrupt on the specificed rx ring.
6255 */
6256 int
6257 ixgbe_rx_ring_intr_disable(mac_intr_handle_t intrh)
6258 {
6259 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
6260 ixgbe_t *ixgbe = rx_ring->ixgbe;
6261 int r_idx = rx_ring->index;
6262 int hw_r_idx = rx_ring->hw_index;
6263 int v_idx = rx_ring->intr_vector;
6264
6265 mutex_enter(&ixgbe->gen_lock);
6266 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) {
6267 mutex_exit(&ixgbe->gen_lock);
6268 /*
6269 * Simply return 0.
6270 * In the rare case where an interrupt is being
6271 * disabled while interrupts are being adjusted,
6272 * we don't fail the operation. No interrupts will
6273 * be generated while they are adjusted, and
6274 * ixgbe_intr_adjust() will cause the interrupts
6275 * to be re-enabled once it completes. Note that
6276 * in this case, packets may be delivered to the
6277 * stack via interrupts before xgbe_rx_ring_intr_enable()
6278 * is called again. This is acceptable since interrupt
6279 * adjustment is infrequent, and the stack will be
6280 * able to handle these packets.
6281 */
6282 return (0);
6283 }
6284
6285 /*
6286 * To disable interrupt by clearing the VAL bit of given interrupt
6287 * vector allocation register (IVAR).
6288 */
6289 ixgbe_disable_ivar(ixgbe, hw_r_idx, 0);
6290
6291 BT_CLEAR(ixgbe->vect_map[v_idx].rx_map, r_idx);
6292
6293 mutex_exit(&ixgbe->gen_lock);
6294
6295 return (0);
6296 }
6297
6298 /*
6299 * Add a mac address.
6300 */
6301 static int
6302 ixgbe_addmac(void *arg, const uint8_t *mac_addr)
6303 {
6304 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
6305 ixgbe_t *ixgbe = rx_group->ixgbe;
6306 struct ixgbe_hw *hw = &ixgbe->hw;
6307 int slot, i;
6308
6309 mutex_enter(&ixgbe->gen_lock);
6310
6311 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
6312 mutex_exit(&ixgbe->gen_lock);
6313 return (ECANCELED);
6314 }
6315
6316 if (ixgbe->unicst_avail == 0) {
6317 /* no slots available */
6318 mutex_exit(&ixgbe->gen_lock);
6319 return (ENOSPC);
6320 }
6321
6322 /*
6323 * The first ixgbe->num_rx_groups slots are reserved for each respective
6324 * group. The rest slots are shared by all groups. While adding a
6325 * MAC address, reserved slots are firstly checked then the shared
6326 * slots are searched.
6327 */
6328 slot = -1;
6329 if (ixgbe->unicst_addr[rx_group->index].mac.set == 1) {
6330 for (i = ixgbe->num_rx_groups; i < ixgbe->unicst_total; i++) {
6331 if (ixgbe->unicst_addr[i].mac.set == 0) {
6332 slot = i;
6333 break;
6334 }
6335 }
6336 } else {
6337 slot = rx_group->index;
6338 }
6339
6340 if (slot == -1) {
6341 /* no slots available */
6342 mutex_exit(&ixgbe->gen_lock);
6343 return (ENOSPC);
6344 }
6345
6346 bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
6347 (void) ixgbe_set_rar(hw, slot, ixgbe->unicst_addr[slot].mac.addr,
6348 rx_group->index, IXGBE_RAH_AV);
6349 ixgbe->unicst_addr[slot].mac.set = 1;
6350 ixgbe->unicst_addr[slot].mac.group_index = rx_group->index;
6351 ixgbe->unicst_avail--;
6352
6353 mutex_exit(&ixgbe->gen_lock);
6354
6355 return (0);
6356 }
6357
6358 /*
6359 * Remove a mac address.
6360 */
6361 static int
6362 ixgbe_remmac(void *arg, const uint8_t *mac_addr)
6363 {
6364 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
6365 ixgbe_t *ixgbe = rx_group->ixgbe;
6366 struct ixgbe_hw *hw = &ixgbe->hw;
6367 int slot;
6368
6369 mutex_enter(&ixgbe->gen_lock);
6370
6371 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
6372 mutex_exit(&ixgbe->gen_lock);
6373 return (ECANCELED);
6374 }
6375
6376 slot = ixgbe_unicst_find(ixgbe, mac_addr);
6377 if (slot == -1) {
6378 mutex_exit(&ixgbe->gen_lock);
6379 return (EINVAL);
6380 }
6381
6382 if (ixgbe->unicst_addr[slot].mac.set == 0) {
6383 mutex_exit(&ixgbe->gen_lock);
6384 return (EINVAL);
6385 }
6386
6387 bzero(ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
6388 (void) ixgbe_clear_rar(hw, slot);
6389 ixgbe->unicst_addr[slot].mac.set = 0;
6390 ixgbe->unicst_avail++;
6391
6392 mutex_exit(&ixgbe->gen_lock);
6393
6394 return (0);
6395 }