1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
24 */
25
26 /*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
29 * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
30 */
31
32 #include "ixgbe_sw.h"
33
34 static char ixgbe_ident[] = "Intel 10Gb Ethernet";
35 static char ixgbe_version[] = "ixgbe 1.1.7";
36
37 /*
38 * Local function protoypes
39 */
40 static int ixgbe_register_mac(ixgbe_t *);
41 static int ixgbe_identify_hardware(ixgbe_t *);
42 static int ixgbe_regs_map(ixgbe_t *);
43 static void ixgbe_init_properties(ixgbe_t *);
44 static int ixgbe_init_driver_settings(ixgbe_t *);
45 static void ixgbe_init_locks(ixgbe_t *);
46 static void ixgbe_destroy_locks(ixgbe_t *);
47 static int ixgbe_init(ixgbe_t *);
48 static int ixgbe_chip_start(ixgbe_t *);
49 static void ixgbe_chip_stop(ixgbe_t *);
50 static int ixgbe_reset(ixgbe_t *);
51 static void ixgbe_tx_clean(ixgbe_t *);
52 static boolean_t ixgbe_tx_drain(ixgbe_t *);
53 static boolean_t ixgbe_rx_drain(ixgbe_t *);
54 static int ixgbe_alloc_rings(ixgbe_t *);
55 static void ixgbe_free_rings(ixgbe_t *);
56 static int ixgbe_alloc_rx_data(ixgbe_t *);
57 static void ixgbe_free_rx_data(ixgbe_t *);
58 static void ixgbe_setup_rings(ixgbe_t *);
59 static void ixgbe_setup_rx(ixgbe_t *);
60 static void ixgbe_setup_tx(ixgbe_t *);
61 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *);
62 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *);
63 static void ixgbe_setup_rss(ixgbe_t *);
64 static void ixgbe_setup_vmdq(ixgbe_t *);
65 static void ixgbe_setup_vmdq_rss(ixgbe_t *);
66 static void ixgbe_init_unicst(ixgbe_t *);
67 static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *);
68 static void ixgbe_setup_multicst(ixgbe_t *);
69 static void ixgbe_get_hw_state(ixgbe_t *);
70 static void ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe);
71 static void ixgbe_get_conf(ixgbe_t *);
72 static void ixgbe_init_params(ixgbe_t *);
73 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int);
74 static void ixgbe_driver_link_check(ixgbe_t *);
75 static void ixgbe_sfp_check(void *);
76 static void ixgbe_overtemp_check(void *);
77 static void ixgbe_link_timer(void *);
78 static void ixgbe_local_timer(void *);
79 static void ixgbe_arm_watchdog_timer(ixgbe_t *);
80 static void ixgbe_restart_watchdog_timer(ixgbe_t *);
81 static void ixgbe_disable_adapter_interrupts(ixgbe_t *);
82 static void ixgbe_enable_adapter_interrupts(ixgbe_t *);
83 static boolean_t is_valid_mac_addr(uint8_t *);
84 static boolean_t ixgbe_stall_check(ixgbe_t *);
85 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t);
86 static void ixgbe_set_internal_mac_loopback(ixgbe_t *);
87 static boolean_t ixgbe_find_mac_address(ixgbe_t *);
88 static int ixgbe_alloc_intrs(ixgbe_t *);
89 static int ixgbe_alloc_intr_handles(ixgbe_t *, int);
90 static int ixgbe_add_intr_handlers(ixgbe_t *);
91 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int);
92 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int);
93 static void ixgbe_setup_ivar(ixgbe_t *, uint16_t, uint8_t, int8_t);
94 static void ixgbe_enable_ivar(ixgbe_t *, uint16_t, int8_t);
95 static void ixgbe_disable_ivar(ixgbe_t *, uint16_t, int8_t);
96 static uint32_t ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index);
97 static int ixgbe_map_intrs_to_vectors(ixgbe_t *);
98 static void ixgbe_setup_adapter_vector(ixgbe_t *);
99 static void ixgbe_rem_intr_handlers(ixgbe_t *);
100 static void ixgbe_rem_intrs(ixgbe_t *);
101 static int ixgbe_enable_intrs(ixgbe_t *);
102 static int ixgbe_disable_intrs(ixgbe_t *);
103 static uint_t ixgbe_intr_legacy(void *, void *);
104 static uint_t ixgbe_intr_msi(void *, void *);
105 static uint_t ixgbe_intr_msix(void *, void *);
106 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *);
107 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *);
108 static void ixgbe_intr_other_work(ixgbe_t *, uint32_t);
109 static void ixgbe_get_driver_control(struct ixgbe_hw *);
110 static int ixgbe_addmac(void *, const uint8_t *);
111 static int ixgbe_remmac(void *, const uint8_t *);
112 static void ixgbe_release_driver_control(struct ixgbe_hw *);
113
114 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t);
115 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t);
116 static int ixgbe_resume(dev_info_t *);
117 static int ixgbe_suspend(dev_info_t *);
118 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *);
119 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
120 static int ixgbe_cbfunc(dev_info_t *, ddi_cb_action_t, void *, void *, void *);
121 static int ixgbe_intr_cb_register(ixgbe_t *);
122 static int ixgbe_intr_adjust(ixgbe_t *, ddi_cb_action_t, int);
123
124 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
125 const void *impl_data);
126 static void ixgbe_fm_init(ixgbe_t *);
127 static void ixgbe_fm_fini(ixgbe_t *);
128
129 char *ixgbe_priv_props[] = {
130 "_tx_copy_thresh",
131 "_tx_recycle_thresh",
132 "_tx_overload_thresh",
133 "_tx_resched_thresh",
134 "_rx_copy_thresh",
135 "_rx_limit_per_intr",
136 "_intr_throttling",
137 "_adv_pause_cap",
138 "_adv_asym_pause_cap",
139 NULL
140 };
141
142 #define IXGBE_MAX_PRIV_PROPS \
143 (sizeof (ixgbe_priv_props) / sizeof (mac_priv_prop_t))
144
145 static struct cb_ops ixgbe_cb_ops = {
146 nulldev, /* cb_open */
147 nulldev, /* cb_close */
148 nodev, /* cb_strategy */
149 nodev, /* cb_print */
150 nodev, /* cb_dump */
151 nodev, /* cb_read */
152 nodev, /* cb_write */
153 nodev, /* cb_ioctl */
154 nodev, /* cb_devmap */
155 nodev, /* cb_mmap */
156 nodev, /* cb_segmap */
157 nochpoll, /* cb_chpoll */
158 ddi_prop_op, /* cb_prop_op */
159 NULL, /* cb_stream */
160 D_MP | D_HOTPLUG, /* cb_flag */
161 CB_REV, /* cb_rev */
162 nodev, /* cb_aread */
163 nodev /* cb_awrite */
164 };
165
166 static struct dev_ops ixgbe_dev_ops = {
167 DEVO_REV, /* devo_rev */
168 0, /* devo_refcnt */
169 NULL, /* devo_getinfo */
170 nulldev, /* devo_identify */
171 nulldev, /* devo_probe */
172 ixgbe_attach, /* devo_attach */
173 ixgbe_detach, /* devo_detach */
174 nodev, /* devo_reset */
175 &ixgbe_cb_ops, /* devo_cb_ops */
176 NULL, /* devo_bus_ops */
177 ddi_power, /* devo_power */
178 ddi_quiesce_not_supported, /* devo_quiesce */
179 };
180
181 static struct modldrv ixgbe_modldrv = {
182 &mod_driverops, /* Type of module. This one is a driver */
183 ixgbe_ident, /* Discription string */
184 &ixgbe_dev_ops /* driver ops */
185 };
186
187 static struct modlinkage ixgbe_modlinkage = {
188 MODREV_1, &ixgbe_modldrv, NULL
189 };
190
191 /*
192 * Access attributes for register mapping
193 */
194 ddi_device_acc_attr_t ixgbe_regs_acc_attr = {
195 DDI_DEVICE_ATTR_V1,
196 DDI_STRUCTURE_LE_ACC,
197 DDI_STRICTORDER_ACC,
198 DDI_FLAGERR_ACC
199 };
200
201 /*
202 * Loopback property
203 */
204 static lb_property_t lb_normal = {
205 normal, "normal", IXGBE_LB_NONE
206 };
207
208 static lb_property_t lb_mac = {
209 internal, "MAC", IXGBE_LB_INTERNAL_MAC
210 };
211
212 static lb_property_t lb_external = {
213 external, "External", IXGBE_LB_EXTERNAL
214 };
215
216 #define IXGBE_M_CALLBACK_FLAGS \
217 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
218
219 static mac_callbacks_t ixgbe_m_callbacks = {
220 IXGBE_M_CALLBACK_FLAGS,
221 ixgbe_m_stat,
222 ixgbe_m_start,
223 ixgbe_m_stop,
224 ixgbe_m_promisc,
225 ixgbe_m_multicst,
226 NULL,
227 NULL,
228 NULL,
229 ixgbe_m_ioctl,
230 ixgbe_m_getcapab,
231 NULL,
232 NULL,
233 ixgbe_m_setprop,
234 ixgbe_m_getprop,
235 ixgbe_m_propinfo
236 };
237
238 /*
239 * Initialize capabilities of each supported adapter type
240 */
241 static adapter_info_t ixgbe_82598eb_cap = {
242 64, /* maximum number of rx queues */
243 1, /* minimum number of rx queues */
244 64, /* default number of rx queues */
245 16, /* maximum number of rx groups */
246 1, /* minimum number of rx groups */
247 1, /* default number of rx groups */
248 32, /* maximum number of tx queues */
249 1, /* minimum number of tx queues */
250 8, /* default number of tx queues */
251 16366, /* maximum MTU size */
252 0xFFFF, /* maximum interrupt throttle rate */
253 0, /* minimum interrupt throttle rate */
254 200, /* default interrupt throttle rate */
255 18, /* maximum total msix vectors */
256 16, /* maximum number of ring vectors */
257 2, /* maximum number of other vectors */
258 IXGBE_EICR_LSC, /* "other" interrupt types handled */
259 0, /* "other" interrupt types enable mask */
260 (IXGBE_FLAG_DCA_CAPABLE /* capability flags */
261 | IXGBE_FLAG_RSS_CAPABLE
262 | IXGBE_FLAG_VMDQ_CAPABLE)
263 };
264
265 static adapter_info_t ixgbe_82599eb_cap = {
266 128, /* maximum number of rx queues */
267 1, /* minimum number of rx queues */
268 128, /* default number of rx queues */
269 64, /* maximum number of rx groups */
270 1, /* minimum number of rx groups */
271 1, /* default number of rx groups */
272 128, /* maximum number of tx queues */
273 1, /* minimum number of tx queues */
274 8, /* default number of tx queues */
275 15500, /* maximum MTU size */
276 0xFF8, /* maximum interrupt throttle rate */
277 0, /* minimum interrupt throttle rate */
278 200, /* default interrupt throttle rate */
279 64, /* maximum total msix vectors */
280 16, /* maximum number of ring vectors */
281 2, /* maximum number of other vectors */
282 (IXGBE_EICR_LSC
283 | IXGBE_EICR_GPI_SDP1
284 | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */
285
286 (IXGBE_SDP1_GPIEN
287 | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */
288
289 (IXGBE_FLAG_DCA_CAPABLE
290 | IXGBE_FLAG_RSS_CAPABLE
291 | IXGBE_FLAG_VMDQ_CAPABLE
292 | IXGBE_FLAG_RSC_CAPABLE
293 | IXGBE_FLAG_SFP_PLUG_CAPABLE) /* capability flags */
294 };
295
296 static adapter_info_t ixgbe_X540_cap = {
297 128, /* maximum number of rx queues */
298 1, /* minimum number of rx queues */
299 128, /* default number of rx queues */
300 64, /* maximum number of rx groups */
301 1, /* minimum number of rx groups */
302 1, /* default number of rx groups */
303 128, /* maximum number of tx queues */
304 1, /* minimum number of tx queues */
305 8, /* default number of tx queues */
306 15500, /* maximum MTU size */
307 0xFF8, /* maximum interrupt throttle rate */
308 0, /* minimum interrupt throttle rate */
309 200, /* default interrupt throttle rate */
310 64, /* maximum total msix vectors */
311 16, /* maximum number of ring vectors */
312 2, /* maximum number of other vectors */
313 (IXGBE_EICR_LSC
314 | IXGBE_EICR_GPI_SDP1
315 | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */
316
317 (IXGBE_SDP1_GPIEN
318 | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */
319
320 (IXGBE_FLAG_DCA_CAPABLE
321 | IXGBE_FLAG_RSS_CAPABLE
322 | IXGBE_FLAG_VMDQ_CAPABLE
323 | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */
324 };
325
326 /*
327 * Module Initialization Functions.
328 */
329
330 int
331 _init(void)
332 {
333 int status;
334
335 mac_init_ops(&ixgbe_dev_ops, MODULE_NAME);
336
337 status = mod_install(&ixgbe_modlinkage);
338
339 if (status != DDI_SUCCESS) {
340 mac_fini_ops(&ixgbe_dev_ops);
341 }
342
343 return (status);
344 }
345
346 int
347 _fini(void)
348 {
349 int status;
350
351 status = mod_remove(&ixgbe_modlinkage);
352
353 if (status == DDI_SUCCESS) {
354 mac_fini_ops(&ixgbe_dev_ops);
355 }
356
357 return (status);
358 }
359
360 int
361 _info(struct modinfo *modinfop)
362 {
363 int status;
364
365 status = mod_info(&ixgbe_modlinkage, modinfop);
366
367 return (status);
368 }
369
370 /*
371 * ixgbe_attach - Driver attach.
372 *
373 * This function is the device specific initialization entry
374 * point. This entry point is required and must be written.
375 * The DDI_ATTACH command must be provided in the attach entry
376 * point. When attach() is called with cmd set to DDI_ATTACH,
377 * all normal kernel services (such as kmem_alloc(9F)) are
378 * available for use by the driver.
379 *
380 * The attach() function will be called once for each instance
381 * of the device on the system with cmd set to DDI_ATTACH.
382 * Until attach() succeeds, the only driver entry points which
383 * may be called are open(9E) and getinfo(9E).
384 */
385 static int
386 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
387 {
388 ixgbe_t *ixgbe;
389 struct ixgbe_osdep *osdep;
390 struct ixgbe_hw *hw;
391 int instance;
392 char taskqname[32];
393
394 /*
395 * Check the command and perform corresponding operations
396 */
397 switch (cmd) {
398 default:
399 return (DDI_FAILURE);
400
401 case DDI_RESUME:
402 return (ixgbe_resume(devinfo));
403
404 case DDI_ATTACH:
405 break;
406 }
407
408 /* Get the device instance */
409 instance = ddi_get_instance(devinfo);
410
411 /* Allocate memory for the instance data structure */
412 ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP);
413
414 ixgbe->dip = devinfo;
415 ixgbe->instance = instance;
416
417 hw = &ixgbe->hw;
418 osdep = &ixgbe->osdep;
419 hw->back = osdep;
420 osdep->ixgbe = ixgbe;
421
422 /* Attach the instance pointer to the dev_info data structure */
423 ddi_set_driver_private(devinfo, ixgbe);
424
425 /*
426 * Initialize for fma support
427 */
428 ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, PROP_FM_CAPABLE,
429 0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
430 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
431 ixgbe_fm_init(ixgbe);
432 ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT;
433
434 /*
435 * Map PCI config space registers
436 */
437 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
438 ixgbe_error(ixgbe, "Failed to map PCI configurations");
439 goto attach_fail;
440 }
441 ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
442
443 /*
444 * Identify the chipset family
445 */
446 if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) {
447 ixgbe_error(ixgbe, "Failed to identify hardware");
448 goto attach_fail;
449 }
450
451 /*
452 * Map device registers
453 */
454 if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) {
455 ixgbe_error(ixgbe, "Failed to map device registers");
456 goto attach_fail;
457 }
458 ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
459
460 /*
461 * Initialize driver parameters
462 */
463 ixgbe_init_properties(ixgbe);
464 ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS;
465
466 /*
467 * Register interrupt callback
468 */
469 if (ixgbe_intr_cb_register(ixgbe) != IXGBE_SUCCESS) {
470 ixgbe_error(ixgbe, "Failed to register interrupt callback");
471 goto attach_fail;
472 }
473
474 /*
475 * Allocate interrupts
476 */
477 if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) {
478 ixgbe_error(ixgbe, "Failed to allocate interrupts");
479 goto attach_fail;
480 }
481 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
482
483 /*
484 * Allocate rx/tx rings based on the ring numbers.
485 * The actual numbers of rx/tx rings are decided by the number of
486 * allocated interrupt vectors, so we should allocate the rings after
487 * interrupts are allocated.
488 */
489 if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) {
490 ixgbe_error(ixgbe, "Failed to allocate rx and tx rings");
491 goto attach_fail;
492 }
493 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS;
494
495 /*
496 * Map rings to interrupt vectors
497 */
498 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) {
499 ixgbe_error(ixgbe, "Failed to map interrupts to vectors");
500 goto attach_fail;
501 }
502
503 /*
504 * Add interrupt handlers
505 */
506 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
507 ixgbe_error(ixgbe, "Failed to add interrupt handlers");
508 goto attach_fail;
509 }
510 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
511
512 /*
513 * Create a taskq for sfp-change
514 */
515 (void) sprintf(taskqname, "ixgbe%d_sfp_taskq", instance);
516 if ((ixgbe->sfp_taskq = ddi_taskq_create(devinfo, taskqname,
517 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
518 ixgbe_error(ixgbe, "sfp_taskq create failed");
519 goto attach_fail;
520 }
521 ixgbe->attach_progress |= ATTACH_PROGRESS_SFP_TASKQ;
522
523 /*
524 * Create a taskq for over-temp
525 */
526 (void) sprintf(taskqname, "ixgbe%d_overtemp_taskq", instance);
527 if ((ixgbe->overtemp_taskq = ddi_taskq_create(devinfo, taskqname,
528 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
529 ixgbe_error(ixgbe, "overtemp_taskq create failed");
530 goto attach_fail;
531 }
532 ixgbe->attach_progress |= ATTACH_PROGRESS_OVERTEMP_TASKQ;
533
534 /*
535 * Initialize driver parameters
536 */
537 if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) {
538 ixgbe_error(ixgbe, "Failed to initialize driver settings");
539 goto attach_fail;
540 }
541
542 /*
543 * Initialize mutexes for this device.
544 * Do this before enabling the interrupt handler and
545 * register the softint to avoid the condition where
546 * interrupt handler can try using uninitialized mutex.
547 */
548 ixgbe_init_locks(ixgbe);
549 ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS;
550
551 /*
552 * Initialize chipset hardware
553 */
554 if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) {
555 ixgbe_error(ixgbe, "Failed to initialize adapter");
556 goto attach_fail;
557 }
558 ixgbe->link_check_complete = B_FALSE;
559 ixgbe->link_check_hrtime = gethrtime() +
560 (IXGBE_LINK_UP_TIME * 100000000ULL);
561 ixgbe->attach_progress |= ATTACH_PROGRESS_INIT;
562
563 if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) {
564 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
565 goto attach_fail;
566 }
567
568 /*
569 * Initialize statistics
570 */
571 if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) {
572 ixgbe_error(ixgbe, "Failed to initialize statistics");
573 goto attach_fail;
574 }
575 ixgbe->attach_progress |= ATTACH_PROGRESS_STATS;
576
577 /*
578 * Register the driver to the MAC
579 */
580 if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) {
581 ixgbe_error(ixgbe, "Failed to register MAC");
582 goto attach_fail;
583 }
584 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN);
585 ixgbe->attach_progress |= ATTACH_PROGRESS_MAC;
586
587 ixgbe->periodic_id = ddi_periodic_add(ixgbe_link_timer, ixgbe,
588 IXGBE_CYCLIC_PERIOD, DDI_IPL_0);
589 if (ixgbe->periodic_id == 0) {
590 ixgbe_error(ixgbe, "Failed to add the link check timer");
591 goto attach_fail;
592 }
593 ixgbe->attach_progress |= ATTACH_PROGRESS_LINK_TIMER;
594
595 /*
596 * Now that mutex locks are initialized, and the chip is also
597 * initialized, enable interrupts.
598 */
599 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
600 ixgbe_error(ixgbe, "Failed to enable DDI interrupts");
601 goto attach_fail;
602 }
603 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
604
605 ixgbe_log(ixgbe, "%s, %s", ixgbe_ident, ixgbe_version);
606 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_INITIALIZED);
607
608 return (DDI_SUCCESS);
609
610 attach_fail:
611 ixgbe_unconfigure(devinfo, ixgbe);
612 return (DDI_FAILURE);
613 }
614
615 /*
616 * ixgbe_detach - Driver detach.
617 *
618 * The detach() function is the complement of the attach routine.
619 * If cmd is set to DDI_DETACH, detach() is used to remove the
620 * state associated with a given instance of a device node
621 * prior to the removal of that instance from the system.
622 *
623 * The detach() function will be called once for each instance
624 * of the device for which there has been a successful attach()
625 * once there are no longer any opens on the device.
626 *
627 * Interrupts routine are disabled, All memory allocated by this
628 * driver are freed.
629 */
630 static int
631 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
632 {
633 ixgbe_t *ixgbe;
634
635 /*
636 * Check detach command
637 */
638 switch (cmd) {
639 default:
640 return (DDI_FAILURE);
641
642 case DDI_SUSPEND:
643 return (ixgbe_suspend(devinfo));
644
645 case DDI_DETACH:
646 break;
647 }
648
649 /*
650 * Get the pointer to the driver private data structure
651 */
652 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
653 if (ixgbe == NULL)
654 return (DDI_FAILURE);
655
656 /*
657 * If the device is still running, it needs to be stopped first.
658 * This check is necessary because under some specific circumstances,
659 * the detach routine can be called without stopping the interface
660 * first.
661 */
662 if (ixgbe->ixgbe_state & IXGBE_STARTED) {
663 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED);
664 mutex_enter(&ixgbe->gen_lock);
665 ixgbe_stop(ixgbe, B_TRUE);
666 mutex_exit(&ixgbe->gen_lock);
667 /* Disable and stop the watchdog timer */
668 ixgbe_disable_watchdog_timer(ixgbe);
669 }
670
671 /*
672 * Check if there are still rx buffers held by the upper layer.
673 * If so, fail the detach.
674 */
675 if (!ixgbe_rx_drain(ixgbe))
676 return (DDI_FAILURE);
677
678 /*
679 * Do the remaining unconfigure routines
680 */
681 ixgbe_unconfigure(devinfo, ixgbe);
682
683 return (DDI_SUCCESS);
684 }
685
686 static void
687 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe)
688 {
689 /*
690 * Disable interrupt
691 */
692 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
693 (void) ixgbe_disable_intrs(ixgbe);
694 }
695
696 /*
697 * remove the link check timer
698 */
699 if (ixgbe->attach_progress & ATTACH_PROGRESS_LINK_TIMER) {
700 if (ixgbe->periodic_id != NULL) {
701 ddi_periodic_delete(ixgbe->periodic_id);
702 ixgbe->periodic_id = NULL;
703 }
704 }
705
706 /*
707 * Unregister MAC
708 */
709 if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) {
710 (void) mac_unregister(ixgbe->mac_hdl);
711 }
712
713 /*
714 * Free statistics
715 */
716 if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) {
717 kstat_delete((kstat_t *)ixgbe->ixgbe_ks);
718 }
719
720 /*
721 * Remove interrupt handlers
722 */
723 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
724 ixgbe_rem_intr_handlers(ixgbe);
725 }
726
727 /*
728 * Remove taskq for sfp-status-change
729 */
730 if (ixgbe->attach_progress & ATTACH_PROGRESS_SFP_TASKQ) {
731 ddi_taskq_destroy(ixgbe->sfp_taskq);
732 }
733
734 /*
735 * Remove taskq for over-temp
736 */
737 if (ixgbe->attach_progress & ATTACH_PROGRESS_OVERTEMP_TASKQ) {
738 ddi_taskq_destroy(ixgbe->overtemp_taskq);
739 }
740
741 /*
742 * Remove interrupts
743 */
744 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) {
745 ixgbe_rem_intrs(ixgbe);
746 }
747
748 /*
749 * Unregister interrupt callback handler
750 */
751 (void) ddi_cb_unregister(ixgbe->cb_hdl);
752
753 /*
754 * Remove driver properties
755 */
756 if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) {
757 (void) ddi_prop_remove_all(devinfo);
758 }
759
760 /*
761 * Stop the chipset
762 */
763 if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) {
764 mutex_enter(&ixgbe->gen_lock);
765 ixgbe_chip_stop(ixgbe);
766 mutex_exit(&ixgbe->gen_lock);
767 }
768
769 /*
770 * Free register handle
771 */
772 if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
773 if (ixgbe->osdep.reg_handle != NULL)
774 ddi_regs_map_free(&ixgbe->osdep.reg_handle);
775 }
776
777 /*
778 * Free PCI config handle
779 */
780 if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
781 if (ixgbe->osdep.cfg_handle != NULL)
782 pci_config_teardown(&ixgbe->osdep.cfg_handle);
783 }
784
785 /*
786 * Free locks
787 */
788 if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) {
789 ixgbe_destroy_locks(ixgbe);
790 }
791
792 /*
793 * Free the rx/tx rings
794 */
795 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) {
796 ixgbe_free_rings(ixgbe);
797 }
798
799 /*
800 * Unregister FMA capabilities
801 */
802 if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) {
803 ixgbe_fm_fini(ixgbe);
804 }
805
806 /*
807 * Free the driver data structure
808 */
809 kmem_free(ixgbe, sizeof (ixgbe_t));
810
811 ddi_set_driver_private(devinfo, NULL);
812 }
813
814 /*
815 * ixgbe_register_mac - Register the driver and its function pointers with
816 * the GLD interface.
817 */
818 static int
819 ixgbe_register_mac(ixgbe_t *ixgbe)
820 {
821 struct ixgbe_hw *hw = &ixgbe->hw;
822 mac_register_t *mac;
823 int status;
824
825 if ((mac = mac_alloc(MAC_VERSION)) == NULL)
826 return (IXGBE_FAILURE);
827
828 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
829 mac->m_driver = ixgbe;
830 mac->m_dip = ixgbe->dip;
831 mac->m_src_addr = hw->mac.addr;
832 mac->m_callbacks = &ixgbe_m_callbacks;
833 mac->m_min_sdu = 0;
834 mac->m_max_sdu = ixgbe->default_mtu;
835 mac->m_margin = VLAN_TAGSZ;
836 mac->m_priv_props = ixgbe_priv_props;
837 mac->m_v12n = MAC_VIRT_LEVEL1;
838
839 status = mac_register(mac, &ixgbe->mac_hdl);
840
841 mac_free(mac);
842
843 return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE);
844 }
845
846 /*
847 * ixgbe_identify_hardware - Identify the type of the chipset.
848 */
849 static int
850 ixgbe_identify_hardware(ixgbe_t *ixgbe)
851 {
852 struct ixgbe_hw *hw = &ixgbe->hw;
853 struct ixgbe_osdep *osdep = &ixgbe->osdep;
854
855 /*
856 * Get the device id
857 */
858 hw->vendor_id =
859 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
860 hw->device_id =
861 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
862 hw->revision_id =
863 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
864 hw->subsystem_device_id =
865 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
866 hw->subsystem_vendor_id =
867 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
868
869 /*
870 * Set the mac type of the adapter based on the device id
871 */
872 if (ixgbe_set_mac_type(hw) != IXGBE_SUCCESS) {
873 return (IXGBE_FAILURE);
874 }
875
876 /*
877 * Install adapter capabilities
878 */
879 switch (hw->mac.type) {
880 case ixgbe_mac_82598EB:
881 IXGBE_DEBUGLOG_0(ixgbe, "identify 82598 adapter\n");
882 ixgbe->capab = &ixgbe_82598eb_cap;
883
884 if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) {
885 ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
886 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1;
887 ixgbe->capab->other_gpie |= IXGBE_SDP1_GPIEN;
888 }
889 break;
890
891 case ixgbe_mac_82599EB:
892 IXGBE_DEBUGLOG_0(ixgbe, "identify 82599 adapter\n");
893 ixgbe->capab = &ixgbe_82599eb_cap;
894
895 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) {
896 ixgbe->capab->flags |= IXGBE_FLAG_TEMP_SENSOR_CAPABLE;
897 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP0;
898 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN;
899 }
900 break;
901
902 case ixgbe_mac_X540:
903 IXGBE_DEBUGLOG_0(ixgbe, "identify X540 adapter\n");
904 ixgbe->capab = &ixgbe_X540_cap;
905 /*
906 * For now, X540 is all set in its capab structure.
907 * As other X540 variants show up, things can change here.
908 */
909 break;
910
911 default:
912 IXGBE_DEBUGLOG_1(ixgbe,
913 "adapter not supported in ixgbe_identify_hardware(): %d\n",
914 hw->mac.type);
915 return (IXGBE_FAILURE);
916 }
917
918 return (IXGBE_SUCCESS);
919 }
920
921 /*
922 * ixgbe_regs_map - Map the device registers.
923 *
924 */
925 static int
926 ixgbe_regs_map(ixgbe_t *ixgbe)
927 {
928 dev_info_t *devinfo = ixgbe->dip;
929 struct ixgbe_hw *hw = &ixgbe->hw;
930 struct ixgbe_osdep *osdep = &ixgbe->osdep;
931 off_t mem_size;
932
933 /*
934 * First get the size of device registers to be mapped.
935 */
936 if (ddi_dev_regsize(devinfo, IXGBE_ADAPTER_REGSET, &mem_size)
937 != DDI_SUCCESS) {
938 return (IXGBE_FAILURE);
939 }
940
941 /*
942 * Call ddi_regs_map_setup() to map registers
943 */
944 if ((ddi_regs_map_setup(devinfo, IXGBE_ADAPTER_REGSET,
945 (caddr_t *)&hw->hw_addr, 0,
946 mem_size, &ixgbe_regs_acc_attr,
947 &osdep->reg_handle)) != DDI_SUCCESS) {
948 return (IXGBE_FAILURE);
949 }
950
951 return (IXGBE_SUCCESS);
952 }
953
954 /*
955 * ixgbe_init_properties - Initialize driver properties.
956 */
957 static void
958 ixgbe_init_properties(ixgbe_t *ixgbe)
959 {
960 /*
961 * Get conf file properties, including link settings
962 * jumbo frames, ring number, descriptor number, etc.
963 */
964 ixgbe_get_conf(ixgbe);
965
966 ixgbe_init_params(ixgbe);
967 }
968
969 /*
970 * ixgbe_init_driver_settings - Initialize driver settings.
971 *
972 * The settings include hardware function pointers, bus information,
973 * rx/tx rings settings, link state, and any other parameters that
974 * need to be setup during driver initialization.
975 */
976 static int
977 ixgbe_init_driver_settings(ixgbe_t *ixgbe)
978 {
979 struct ixgbe_hw *hw = &ixgbe->hw;
980 dev_info_t *devinfo = ixgbe->dip;
981 ixgbe_rx_ring_t *rx_ring;
982 ixgbe_rx_group_t *rx_group;
983 ixgbe_tx_ring_t *tx_ring;
984 uint32_t rx_size;
985 uint32_t tx_size;
986 uint32_t ring_per_group;
987 int i;
988
989 /*
990 * Initialize chipset specific hardware function pointers
991 */
992 if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) {
993 return (IXGBE_FAILURE);
994 }
995
996 /*
997 * Get the system page size
998 */
999 ixgbe->sys_page_size = ddi_ptob(devinfo, (ulong_t)1);
1000
1001 /*
1002 * Set rx buffer size
1003 *
1004 * The IP header alignment room is counted in the calculation.
1005 * The rx buffer size is in unit of 1K that is required by the
1006 * chipset hardware.
1007 */
1008 rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM;
1009 ixgbe->rx_buf_size = ((rx_size >> 10) +
1010 ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1011
1012 /*
1013 * Set tx buffer size
1014 */
1015 tx_size = ixgbe->max_frame_size;
1016 ixgbe->tx_buf_size = ((tx_size >> 10) +
1017 ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1018
1019 /*
1020 * Initialize rx/tx rings/groups parameters
1021 */
1022 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
1023 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1024 rx_ring = &ixgbe->rx_rings[i];
1025 rx_ring->index = i;
1026 rx_ring->ixgbe = ixgbe;
1027 rx_ring->group_index = i / ring_per_group;
1028 rx_ring->hw_index = ixgbe_get_hw_rx_index(ixgbe, i);
1029 }
1030
1031 for (i = 0; i < ixgbe->num_rx_groups; i++) {
1032 rx_group = &ixgbe->rx_groups[i];
1033 rx_group->index = i;
1034 rx_group->ixgbe = ixgbe;
1035 }
1036
1037 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1038 tx_ring = &ixgbe->tx_rings[i];
1039 tx_ring->index = i;
1040 tx_ring->ixgbe = ixgbe;
1041 if (ixgbe->tx_head_wb_enable)
1042 tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb;
1043 else
1044 tx_ring->tx_recycle = ixgbe_tx_recycle_legacy;
1045
1046 tx_ring->ring_size = ixgbe->tx_ring_size;
1047 tx_ring->free_list_size = ixgbe->tx_ring_size +
1048 (ixgbe->tx_ring_size >> 1);
1049 }
1050
1051 /*
1052 * Initialize values of interrupt throttling rate
1053 */
1054 for (i = 1; i < MAX_INTR_VECTOR; i++)
1055 ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0];
1056
1057 /*
1058 * The initial link state should be "unknown"
1059 */
1060 ixgbe->link_state = LINK_STATE_UNKNOWN;
1061
1062 return (IXGBE_SUCCESS);
1063 }
1064
1065 /*
1066 * ixgbe_init_locks - Initialize locks.
1067 */
1068 static void
1069 ixgbe_init_locks(ixgbe_t *ixgbe)
1070 {
1071 ixgbe_rx_ring_t *rx_ring;
1072 ixgbe_tx_ring_t *tx_ring;
1073 int i;
1074
1075 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1076 rx_ring = &ixgbe->rx_rings[i];
1077 mutex_init(&rx_ring->rx_lock, NULL,
1078 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1079 }
1080
1081 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1082 tx_ring = &ixgbe->tx_rings[i];
1083 mutex_init(&tx_ring->tx_lock, NULL,
1084 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1085 mutex_init(&tx_ring->recycle_lock, NULL,
1086 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1087 mutex_init(&tx_ring->tcb_head_lock, NULL,
1088 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1089 mutex_init(&tx_ring->tcb_tail_lock, NULL,
1090 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1091 }
1092
1093 mutex_init(&ixgbe->gen_lock, NULL,
1094 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1095
1096 mutex_init(&ixgbe->watchdog_lock, NULL,
1097 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1098 }
1099
1100 /*
1101 * ixgbe_destroy_locks - Destroy locks.
1102 */
1103 static void
1104 ixgbe_destroy_locks(ixgbe_t *ixgbe)
1105 {
1106 ixgbe_rx_ring_t *rx_ring;
1107 ixgbe_tx_ring_t *tx_ring;
1108 int i;
1109
1110 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1111 rx_ring = &ixgbe->rx_rings[i];
1112 mutex_destroy(&rx_ring->rx_lock);
1113 }
1114
1115 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1116 tx_ring = &ixgbe->tx_rings[i];
1117 mutex_destroy(&tx_ring->tx_lock);
1118 mutex_destroy(&tx_ring->recycle_lock);
1119 mutex_destroy(&tx_ring->tcb_head_lock);
1120 mutex_destroy(&tx_ring->tcb_tail_lock);
1121 }
1122
1123 mutex_destroy(&ixgbe->gen_lock);
1124 mutex_destroy(&ixgbe->watchdog_lock);
1125 }
1126
1127 static int
1128 ixgbe_resume(dev_info_t *devinfo)
1129 {
1130 ixgbe_t *ixgbe;
1131 int i;
1132
1133 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1134 if (ixgbe == NULL)
1135 return (DDI_FAILURE);
1136
1137 mutex_enter(&ixgbe->gen_lock);
1138
1139 if (ixgbe->ixgbe_state & IXGBE_STARTED) {
1140 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1141 mutex_exit(&ixgbe->gen_lock);
1142 return (DDI_FAILURE);
1143 }
1144
1145 /*
1146 * Enable and start the watchdog timer
1147 */
1148 ixgbe_enable_watchdog_timer(ixgbe);
1149 }
1150
1151 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_SUSPENDED);
1152
1153 if (ixgbe->ixgbe_state & IXGBE_STARTED) {
1154 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1155 mac_tx_ring_update(ixgbe->mac_hdl,
1156 ixgbe->tx_rings[i].ring_handle);
1157 }
1158 }
1159
1160 mutex_exit(&ixgbe->gen_lock);
1161
1162 return (DDI_SUCCESS);
1163 }
1164
1165 static int
1166 ixgbe_suspend(dev_info_t *devinfo)
1167 {
1168 ixgbe_t *ixgbe;
1169
1170 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1171 if (ixgbe == NULL)
1172 return (DDI_FAILURE);
1173
1174 mutex_enter(&ixgbe->gen_lock);
1175
1176 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_SUSPENDED);
1177 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) {
1178 mutex_exit(&ixgbe->gen_lock);
1179 return (DDI_SUCCESS);
1180 }
1181 ixgbe_stop(ixgbe, B_FALSE);
1182
1183 mutex_exit(&ixgbe->gen_lock);
1184
1185 /*
1186 * Disable and stop the watchdog timer
1187 */
1188 ixgbe_disable_watchdog_timer(ixgbe);
1189
1190 return (DDI_SUCCESS);
1191 }
1192
1193 /*
1194 * ixgbe_init - Initialize the device.
1195 */
1196 static int
1197 ixgbe_init(ixgbe_t *ixgbe)
1198 {
1199 struct ixgbe_hw *hw = &ixgbe->hw;
1200
1201 mutex_enter(&ixgbe->gen_lock);
1202
1203 /*
1204 * Reset chipset to put the hardware in a known state
1205 * before we try to do anything with the eeprom.
1206 */
1207 if (ixgbe_reset_hw(hw) != IXGBE_SUCCESS) {
1208 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1209 goto init_fail;
1210 }
1211
1212 /*
1213 * Need to init eeprom before validating the checksum.
1214 */
1215 if (ixgbe_init_eeprom_params(hw) < 0) {
1216 ixgbe_error(ixgbe,
1217 "Unable to intitialize the eeprom interface.");
1218 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1219 goto init_fail;
1220 }
1221
1222 /*
1223 * NVM validation
1224 */
1225 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1226 /*
1227 * Some PCI-E parts fail the first check due to
1228 * the link being in sleep state. Call it again,
1229 * if it fails a second time it's a real issue.
1230 */
1231 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1232 ixgbe_error(ixgbe,
1233 "Invalid NVM checksum. Please contact "
1234 "the vendor to update the NVM.");
1235 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1236 goto init_fail;
1237 }
1238 }
1239
1240 /*
1241 * Setup default flow control thresholds - enable/disable
1242 * & flow control type is controlled by ixgbe.conf
1243 */
1244 hw->fc.high_water[0] = DEFAULT_FCRTH;
1245 hw->fc.low_water[0] = DEFAULT_FCRTL;
1246 hw->fc.pause_time = DEFAULT_FCPAUSE;
1247 hw->fc.send_xon = B_TRUE;
1248
1249 /*
1250 * Initialize link settings
1251 */
1252 (void) ixgbe_driver_setup_link(ixgbe, B_FALSE);
1253
1254 /*
1255 * Initialize the chipset hardware
1256 */
1257 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1258 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1259 goto init_fail;
1260 }
1261
1262 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1263 goto init_fail;
1264 }
1265
1266 mutex_exit(&ixgbe->gen_lock);
1267 return (IXGBE_SUCCESS);
1268
1269 init_fail:
1270 /*
1271 * Reset PHY
1272 */
1273 (void) ixgbe_reset_phy(hw);
1274
1275 mutex_exit(&ixgbe->gen_lock);
1276 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1277 return (IXGBE_FAILURE);
1278 }
1279
1280 /*
1281 * ixgbe_chip_start - Initialize and start the chipset hardware.
1282 */
1283 static int
1284 ixgbe_chip_start(ixgbe_t *ixgbe)
1285 {
1286 struct ixgbe_hw *hw = &ixgbe->hw;
1287 int ret_val, i;
1288
1289 ASSERT(mutex_owned(&ixgbe->gen_lock));
1290
1291 /*
1292 * Get the mac address
1293 * This function should handle SPARC case correctly.
1294 */
1295 if (!ixgbe_find_mac_address(ixgbe)) {
1296 ixgbe_error(ixgbe, "Failed to get the mac address");
1297 return (IXGBE_FAILURE);
1298 }
1299
1300 /*
1301 * Validate the mac address
1302 */
1303 (void) ixgbe_init_rx_addrs(hw);
1304 if (!is_valid_mac_addr(hw->mac.addr)) {
1305 ixgbe_error(ixgbe, "Invalid mac address");
1306 return (IXGBE_FAILURE);
1307 }
1308
1309 /*
1310 * Configure/Initialize hardware
1311 */
1312 ret_val = ixgbe_init_hw(hw);
1313 if (ret_val != IXGBE_SUCCESS) {
1314 if (ret_val == IXGBE_ERR_EEPROM_VERSION) {
1315 ixgbe_error(ixgbe,
1316 "This 82599 device is pre-release and contains"
1317 " outdated firmware, please contact your hardware"
1318 " vendor for a replacement.");
1319 } else {
1320 ixgbe_error(ixgbe, "Failed to initialize hardware");
1321 return (IXGBE_FAILURE);
1322 }
1323 }
1324
1325 /*
1326 * Re-enable relaxed ordering for performance. It is disabled
1327 * by default in the hardware init.
1328 */
1329 if (ixgbe->relax_order_enable == B_TRUE)
1330 ixgbe_enable_relaxed_ordering(hw);
1331
1332 /*
1333 * Setup adapter interrupt vectors
1334 */
1335 ixgbe_setup_adapter_vector(ixgbe);
1336
1337 /*
1338 * Initialize unicast addresses.
1339 */
1340 ixgbe_init_unicst(ixgbe);
1341
1342 /*
1343 * Setup and initialize the mctable structures.
1344 */
1345 ixgbe_setup_multicst(ixgbe);
1346
1347 /*
1348 * Set interrupt throttling rate
1349 */
1350 for (i = 0; i < ixgbe->intr_cnt; i++) {
1351 IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]);
1352 }
1353
1354 /*
1355 * Save the state of the phy
1356 */
1357 ixgbe_get_hw_state(ixgbe);
1358
1359 /*
1360 * Make sure driver has control
1361 */
1362 ixgbe_get_driver_control(hw);
1363
1364 return (IXGBE_SUCCESS);
1365 }
1366
1367 /*
1368 * ixgbe_chip_stop - Stop the chipset hardware
1369 */
1370 static void
1371 ixgbe_chip_stop(ixgbe_t *ixgbe)
1372 {
1373 struct ixgbe_hw *hw = &ixgbe->hw;
1374
1375 ASSERT(mutex_owned(&ixgbe->gen_lock));
1376
1377 /*
1378 * Tell firmware driver is no longer in control
1379 */
1380 ixgbe_release_driver_control(hw);
1381
1382 /*
1383 * Reset the chipset
1384 */
1385 (void) ixgbe_reset_hw(hw);
1386
1387 /*
1388 * Reset PHY
1389 */
1390 (void) ixgbe_reset_phy(hw);
1391 }
1392
1393 /*
1394 * ixgbe_reset - Reset the chipset and re-start the driver.
1395 *
1396 * It involves stopping and re-starting the chipset,
1397 * and re-configuring the rx/tx rings.
1398 */
1399 static int
1400 ixgbe_reset(ixgbe_t *ixgbe)
1401 {
1402 int i;
1403
1404 /*
1405 * Disable and stop the watchdog timer
1406 */
1407 ixgbe_disable_watchdog_timer(ixgbe);
1408
1409 mutex_enter(&ixgbe->gen_lock);
1410
1411 ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED);
1412 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED);
1413
1414 ixgbe_stop(ixgbe, B_FALSE);
1415
1416 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1417 mutex_exit(&ixgbe->gen_lock);
1418 return (IXGBE_FAILURE);
1419 }
1420
1421 /*
1422 * After resetting, need to recheck the link status.
1423 */
1424 ixgbe->link_check_complete = B_FALSE;
1425 ixgbe->link_check_hrtime = gethrtime() +
1426 (IXGBE_LINK_UP_TIME * 100000000ULL);
1427
1428 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STARTED);
1429
1430 if (!(ixgbe->ixgbe_state & IXGBE_SUSPENDED)) {
1431 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1432 mac_tx_ring_update(ixgbe->mac_hdl,
1433 ixgbe->tx_rings[i].ring_handle);
1434 }
1435 }
1436
1437 mutex_exit(&ixgbe->gen_lock);
1438
1439 /*
1440 * Enable and start the watchdog timer
1441 */
1442 ixgbe_enable_watchdog_timer(ixgbe);
1443
1444 return (IXGBE_SUCCESS);
1445 }
1446
1447 /*
1448 * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources.
1449 */
1450 static void
1451 ixgbe_tx_clean(ixgbe_t *ixgbe)
1452 {
1453 ixgbe_tx_ring_t *tx_ring;
1454 tx_control_block_t *tcb;
1455 link_list_t pending_list;
1456 uint32_t desc_num;
1457 int i, j;
1458
1459 LINK_LIST_INIT(&pending_list);
1460
1461 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1462 tx_ring = &ixgbe->tx_rings[i];
1463
1464 mutex_enter(&tx_ring->recycle_lock);
1465
1466 /*
1467 * Clean the pending tx data - the pending packets in the
1468 * work_list that have no chances to be transmitted again.
1469 *
1470 * We must ensure the chipset is stopped or the link is down
1471 * before cleaning the transmit packets.
1472 */
1473 desc_num = 0;
1474 for (j = 0; j < tx_ring->ring_size; j++) {
1475 tcb = tx_ring->work_list[j];
1476 if (tcb != NULL) {
1477 desc_num += tcb->desc_num;
1478
1479 tx_ring->work_list[j] = NULL;
1480
1481 ixgbe_free_tcb(tcb);
1482
1483 LIST_PUSH_TAIL(&pending_list, &tcb->link);
1484 }
1485 }
1486
1487 if (desc_num > 0) {
1488 atomic_add_32(&tx_ring->tbd_free, desc_num);
1489 ASSERT(tx_ring->tbd_free == tx_ring->ring_size);
1490
1491 /*
1492 * Reset the head and tail pointers of the tbd ring;
1493 * Reset the writeback head if it's enable.
1494 */
1495 tx_ring->tbd_head = 0;
1496 tx_ring->tbd_tail = 0;
1497 if (ixgbe->tx_head_wb_enable)
1498 *tx_ring->tbd_head_wb = 0;
1499
1500 IXGBE_WRITE_REG(&ixgbe->hw,
1501 IXGBE_TDH(tx_ring->index), 0);
1502 IXGBE_WRITE_REG(&ixgbe->hw,
1503 IXGBE_TDT(tx_ring->index), 0);
1504 }
1505
1506 mutex_exit(&tx_ring->recycle_lock);
1507
1508 /*
1509 * Add the tx control blocks in the pending list to
1510 * the free list.
1511 */
1512 ixgbe_put_free_list(tx_ring, &pending_list);
1513 }
1514 }
1515
1516 /*
1517 * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be
1518 * transmitted.
1519 */
1520 static boolean_t
1521 ixgbe_tx_drain(ixgbe_t *ixgbe)
1522 {
1523 ixgbe_tx_ring_t *tx_ring;
1524 boolean_t done;
1525 int i, j;
1526
1527 /*
1528 * Wait for a specific time to allow pending tx packets
1529 * to be transmitted.
1530 *
1531 * Check the counter tbd_free to see if transmission is done.
1532 * No lock protection is needed here.
1533 *
1534 * Return B_TRUE if all pending packets have been transmitted;
1535 * Otherwise return B_FALSE;
1536 */
1537 for (i = 0; i < TX_DRAIN_TIME; i++) {
1538
1539 done = B_TRUE;
1540 for (j = 0; j < ixgbe->num_tx_rings; j++) {
1541 tx_ring = &ixgbe->tx_rings[j];
1542 done = done &&
1543 (tx_ring->tbd_free == tx_ring->ring_size);
1544 }
1545
1546 if (done)
1547 break;
1548
1549 msec_delay(1);
1550 }
1551
1552 return (done);
1553 }
1554
1555 /*
1556 * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer.
1557 */
1558 static boolean_t
1559 ixgbe_rx_drain(ixgbe_t *ixgbe)
1560 {
1561 boolean_t done = B_TRUE;
1562 int i;
1563
1564 /*
1565 * Polling the rx free list to check if those rx buffers held by
1566 * the upper layer are released.
1567 *
1568 * Check the counter rcb_free to see if all pending buffers are
1569 * released. No lock protection is needed here.
1570 *
1571 * Return B_TRUE if all pending buffers have been released;
1572 * Otherwise return B_FALSE;
1573 */
1574 for (i = 0; i < RX_DRAIN_TIME; i++) {
1575 done = (ixgbe->rcb_pending == 0);
1576
1577 if (done)
1578 break;
1579
1580 msec_delay(1);
1581 }
1582
1583 return (done);
1584 }
1585
1586 /*
1587 * ixgbe_start - Start the driver/chipset.
1588 */
1589 int
1590 ixgbe_start(ixgbe_t *ixgbe, boolean_t alloc_buffer)
1591 {
1592 int i;
1593
1594 ASSERT(mutex_owned(&ixgbe->gen_lock));
1595
1596 if (alloc_buffer) {
1597 if (ixgbe_alloc_rx_data(ixgbe) != IXGBE_SUCCESS) {
1598 ixgbe_error(ixgbe,
1599 "Failed to allocate software receive rings");
1600 return (IXGBE_FAILURE);
1601 }
1602
1603 /* Allocate buffers for all the rx/tx rings */
1604 if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS) {
1605 ixgbe_error(ixgbe, "Failed to allocate DMA resource");
1606 return (IXGBE_FAILURE);
1607 }
1608
1609 ixgbe->tx_ring_init = B_TRUE;
1610 } else {
1611 ixgbe->tx_ring_init = B_FALSE;
1612 }
1613
1614 for (i = 0; i < ixgbe->num_rx_rings; i++)
1615 mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1616 for (i = 0; i < ixgbe->num_tx_rings; i++)
1617 mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1618
1619 /*
1620 * Start the chipset hardware
1621 */
1622 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1623 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1624 goto start_failure;
1625 }
1626
1627 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1628 goto start_failure;
1629 }
1630
1631 /*
1632 * Setup the rx/tx rings
1633 */
1634 ixgbe_setup_rings(ixgbe);
1635
1636 /*
1637 * ixgbe_start() will be called when resetting, however if reset
1638 * happens, we need to clear the ERROR, STALL and OVERTEMP flags
1639 * before enabling the interrupts.
1640 */
1641 atomic_and_32(&ixgbe->ixgbe_state, ~(IXGBE_ERROR
1642 | IXGBE_STALL| IXGBE_OVERTEMP));
1643
1644 /*
1645 * Enable adapter interrupts
1646 * The interrupts must be enabled after the driver state is START
1647 */
1648 ixgbe_enable_adapter_interrupts(ixgbe);
1649
1650 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1651 mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1652 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1653 mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1654
1655 return (IXGBE_SUCCESS);
1656
1657 start_failure:
1658 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1659 mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1660 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1661 mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1662
1663 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1664
1665 return (IXGBE_FAILURE);
1666 }
1667
1668 /*
1669 * ixgbe_stop - Stop the driver/chipset.
1670 */
1671 void
1672 ixgbe_stop(ixgbe_t *ixgbe, boolean_t free_buffer)
1673 {
1674 int i;
1675
1676 ASSERT(mutex_owned(&ixgbe->gen_lock));
1677
1678 /*
1679 * Disable the adapter interrupts
1680 */
1681 ixgbe_disable_adapter_interrupts(ixgbe);
1682
1683 /*
1684 * Drain the pending tx packets
1685 */
1686 (void) ixgbe_tx_drain(ixgbe);
1687
1688 for (i = 0; i < ixgbe->num_rx_rings; i++)
1689 mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1690 for (i = 0; i < ixgbe->num_tx_rings; i++)
1691 mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1692
1693 /*
1694 * Stop the chipset hardware
1695 */
1696 ixgbe_chip_stop(ixgbe);
1697
1698 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1699 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1700 }
1701
1702 /*
1703 * Clean the pending tx data/resources
1704 */
1705 ixgbe_tx_clean(ixgbe);
1706
1707 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1708 mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1709 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1710 mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1711
1712 if (ixgbe->link_state == LINK_STATE_UP) {
1713 ixgbe->link_state = LINK_STATE_UNKNOWN;
1714 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
1715 }
1716
1717 if (free_buffer) {
1718 /*
1719 * Release the DMA/memory resources of rx/tx rings
1720 */
1721 ixgbe_free_dma(ixgbe);
1722 ixgbe_free_rx_data(ixgbe);
1723 }
1724 }
1725
1726 /*
1727 * ixgbe_cbfunc - Driver interface for generic DDI callbacks
1728 */
1729 /* ARGSUSED */
1730 static int
1731 ixgbe_cbfunc(dev_info_t *dip, ddi_cb_action_t cbaction, void *cbarg,
1732 void *arg1, void *arg2)
1733 {
1734 ixgbe_t *ixgbe = (ixgbe_t *)arg1;
1735
1736 switch (cbaction) {
1737 /* IRM callback */
1738 int count;
1739 case DDI_CB_INTR_ADD:
1740 case DDI_CB_INTR_REMOVE:
1741 count = (int)(uintptr_t)cbarg;
1742 ASSERT(ixgbe->intr_type == DDI_INTR_TYPE_MSIX);
1743 DTRACE_PROBE2(ixgbe__irm__callback, int, count,
1744 int, ixgbe->intr_cnt);
1745 if (ixgbe_intr_adjust(ixgbe, cbaction, count) !=
1746 DDI_SUCCESS) {
1747 ixgbe_error(ixgbe,
1748 "IRM CB: Failed to adjust interrupts");
1749 goto cb_fail;
1750 }
1751 break;
1752 default:
1753 IXGBE_DEBUGLOG_1(ixgbe, "DDI CB: action 0x%x NOT supported",
1754 cbaction);
1755 return (DDI_ENOTSUP);
1756 }
1757 return (DDI_SUCCESS);
1758 cb_fail:
1759 return (DDI_FAILURE);
1760 }
1761
1762 /*
1763 * ixgbe_intr_adjust - Adjust interrupt to respond to IRM request.
1764 */
1765 static int
1766 ixgbe_intr_adjust(ixgbe_t *ixgbe, ddi_cb_action_t cbaction, int count)
1767 {
1768 int i, rc, actual;
1769
1770 if (count == 0)
1771 return (DDI_SUCCESS);
1772
1773 if ((cbaction == DDI_CB_INTR_ADD &&
1774 ixgbe->intr_cnt + count > ixgbe->intr_cnt_max) ||
1775 (cbaction == DDI_CB_INTR_REMOVE &&
1776 ixgbe->intr_cnt - count < ixgbe->intr_cnt_min))
1777 return (DDI_FAILURE);
1778
1779 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) {
1780 return (DDI_FAILURE);
1781 }
1782
1783 for (i = 0; i < ixgbe->num_rx_rings; i++)
1784 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle, NULL);
1785 for (i = 0; i < ixgbe->num_tx_rings; i++)
1786 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle, NULL);
1787
1788 mutex_enter(&ixgbe->gen_lock);
1789 ixgbe->ixgbe_state &= ~IXGBE_STARTED;
1790 ixgbe->ixgbe_state |= IXGBE_INTR_ADJUST;
1791 ixgbe->ixgbe_state |= IXGBE_SUSPENDED;
1792 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN);
1793
1794 ixgbe_stop(ixgbe, B_FALSE);
1795 /*
1796 * Disable interrupts
1797 */
1798 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
1799 rc = ixgbe_disable_intrs(ixgbe);
1800 ASSERT(rc == IXGBE_SUCCESS);
1801 }
1802 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ENABLE_INTR;
1803
1804 /*
1805 * Remove interrupt handlers
1806 */
1807 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
1808 ixgbe_rem_intr_handlers(ixgbe);
1809 }
1810 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ADD_INTR;
1811
1812 /*
1813 * Clear vect_map
1814 */
1815 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
1816 switch (cbaction) {
1817 case DDI_CB_INTR_ADD:
1818 rc = ddi_intr_alloc(ixgbe->dip, ixgbe->htable,
1819 DDI_INTR_TYPE_MSIX, ixgbe->intr_cnt, count, &actual,
1820 DDI_INTR_ALLOC_NORMAL);
1821 if (rc != DDI_SUCCESS || actual != count) {
1822 ixgbe_log(ixgbe, "Adjust interrupts failed."
1823 "return: %d, irm cb size: %d, actual: %d",
1824 rc, count, actual);
1825 goto intr_adjust_fail;
1826 }
1827 ixgbe->intr_cnt += count;
1828 break;
1829
1830 case DDI_CB_INTR_REMOVE:
1831 for (i = ixgbe->intr_cnt - count;
1832 i < ixgbe->intr_cnt; i ++) {
1833 rc = ddi_intr_free(ixgbe->htable[i]);
1834 ixgbe->htable[i] = NULL;
1835 if (rc != DDI_SUCCESS) {
1836 ixgbe_log(ixgbe, "Adjust interrupts failed."
1837 "return: %d, irm cb size: %d, actual: %d",
1838 rc, count, actual);
1839 goto intr_adjust_fail;
1840 }
1841 }
1842 ixgbe->intr_cnt -= count;
1843 break;
1844 }
1845
1846 /*
1847 * Get priority for first vector, assume remaining are all the same
1848 */
1849 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
1850 if (rc != DDI_SUCCESS) {
1851 ixgbe_log(ixgbe,
1852 "Get interrupt priority failed: %d", rc);
1853 goto intr_adjust_fail;
1854 }
1855 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
1856 if (rc != DDI_SUCCESS) {
1857 ixgbe_log(ixgbe, "Get interrupt cap failed: %d", rc);
1858 goto intr_adjust_fail;
1859 }
1860 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
1861
1862 /*
1863 * Map rings to interrupt vectors
1864 */
1865 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) {
1866 ixgbe_error(ixgbe,
1867 "IRM CB: Failed to map interrupts to vectors");
1868 goto intr_adjust_fail;
1869 }
1870
1871 /*
1872 * Add interrupt handlers
1873 */
1874 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
1875 ixgbe_error(ixgbe, "IRM CB: Failed to add interrupt handlers");
1876 goto intr_adjust_fail;
1877 }
1878 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
1879
1880 /*
1881 * Now that mutex locks are initialized, and the chip is also
1882 * initialized, enable interrupts.
1883 */
1884 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
1885 ixgbe_error(ixgbe, "IRM CB: Failed to enable DDI interrupts");
1886 goto intr_adjust_fail;
1887 }
1888 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
1889 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1890 ixgbe_error(ixgbe, "IRM CB: Failed to start");
1891 goto intr_adjust_fail;
1892 }
1893 ixgbe->ixgbe_state &= ~IXGBE_INTR_ADJUST;
1894 ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED;
1895 ixgbe->ixgbe_state |= IXGBE_STARTED;
1896 mutex_exit(&ixgbe->gen_lock);
1897
1898 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1899 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle,
1900 ixgbe->htable[ixgbe->rx_rings[i].intr_vector]);
1901 }
1902 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1903 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle,
1904 ixgbe->htable[ixgbe->tx_rings[i].intr_vector]);
1905 }
1906
1907 /* Wakeup all Tx rings */
1908 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1909 mac_tx_ring_update(ixgbe->mac_hdl,
1910 ixgbe->tx_rings[i].ring_handle);
1911 }
1912
1913 IXGBE_DEBUGLOG_3(ixgbe,
1914 "IRM CB: interrupts new value: 0x%x(0x%x:0x%x).",
1915 ixgbe->intr_cnt, ixgbe->intr_cnt_min, ixgbe->intr_cnt_max);
1916 return (DDI_SUCCESS);
1917
1918 intr_adjust_fail:
1919 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1920 mutex_exit(&ixgbe->gen_lock);
1921 return (DDI_FAILURE);
1922 }
1923
1924 /*
1925 * ixgbe_intr_cb_register - Register interrupt callback function.
1926 */
1927 static int
1928 ixgbe_intr_cb_register(ixgbe_t *ixgbe)
1929 {
1930 if (ddi_cb_register(ixgbe->dip, DDI_CB_FLAG_INTR, ixgbe_cbfunc,
1931 ixgbe, NULL, &ixgbe->cb_hdl) != DDI_SUCCESS) {
1932 return (IXGBE_FAILURE);
1933 }
1934 IXGBE_DEBUGLOG_0(ixgbe, "Interrupt callback function registered.");
1935 return (IXGBE_SUCCESS);
1936 }
1937
1938 /*
1939 * ixgbe_alloc_rings - Allocate memory space for rx/tx rings.
1940 */
1941 static int
1942 ixgbe_alloc_rings(ixgbe_t *ixgbe)
1943 {
1944 /*
1945 * Allocate memory space for rx rings
1946 */
1947 ixgbe->rx_rings = kmem_zalloc(
1948 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings,
1949 KM_NOSLEEP);
1950
1951 if (ixgbe->rx_rings == NULL) {
1952 return (IXGBE_FAILURE);
1953 }
1954
1955 /*
1956 * Allocate memory space for tx rings
1957 */
1958 ixgbe->tx_rings = kmem_zalloc(
1959 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings,
1960 KM_NOSLEEP);
1961
1962 if (ixgbe->tx_rings == NULL) {
1963 kmem_free(ixgbe->rx_rings,
1964 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1965 ixgbe->rx_rings = NULL;
1966 return (IXGBE_FAILURE);
1967 }
1968
1969 /*
1970 * Allocate memory space for rx ring groups
1971 */
1972 ixgbe->rx_groups = kmem_zalloc(
1973 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups,
1974 KM_NOSLEEP);
1975
1976 if (ixgbe->rx_groups == NULL) {
1977 kmem_free(ixgbe->rx_rings,
1978 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1979 kmem_free(ixgbe->tx_rings,
1980 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
1981 ixgbe->rx_rings = NULL;
1982 ixgbe->tx_rings = NULL;
1983 return (IXGBE_FAILURE);
1984 }
1985
1986 return (IXGBE_SUCCESS);
1987 }
1988
1989 /*
1990 * ixgbe_free_rings - Free the memory space of rx/tx rings.
1991 */
1992 static void
1993 ixgbe_free_rings(ixgbe_t *ixgbe)
1994 {
1995 if (ixgbe->rx_rings != NULL) {
1996 kmem_free(ixgbe->rx_rings,
1997 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1998 ixgbe->rx_rings = NULL;
1999 }
2000
2001 if (ixgbe->tx_rings != NULL) {
2002 kmem_free(ixgbe->tx_rings,
2003 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
2004 ixgbe->tx_rings = NULL;
2005 }
2006
2007 if (ixgbe->rx_groups != NULL) {
2008 kmem_free(ixgbe->rx_groups,
2009 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups);
2010 ixgbe->rx_groups = NULL;
2011 }
2012 }
2013
2014 static int
2015 ixgbe_alloc_rx_data(ixgbe_t *ixgbe)
2016 {
2017 ixgbe_rx_ring_t *rx_ring;
2018 int i;
2019
2020 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2021 rx_ring = &ixgbe->rx_rings[i];
2022 if (ixgbe_alloc_rx_ring_data(rx_ring) != IXGBE_SUCCESS)
2023 goto alloc_rx_rings_failure;
2024 }
2025 return (IXGBE_SUCCESS);
2026
2027 alloc_rx_rings_failure:
2028 ixgbe_free_rx_data(ixgbe);
2029 return (IXGBE_FAILURE);
2030 }
2031
2032 static void
2033 ixgbe_free_rx_data(ixgbe_t *ixgbe)
2034 {
2035 ixgbe_rx_ring_t *rx_ring;
2036 ixgbe_rx_data_t *rx_data;
2037 int i;
2038
2039 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2040 rx_ring = &ixgbe->rx_rings[i];
2041
2042 mutex_enter(&ixgbe->rx_pending_lock);
2043 rx_data = rx_ring->rx_data;
2044
2045 if (rx_data != NULL) {
2046 rx_data->flag |= IXGBE_RX_STOPPED;
2047
2048 if (rx_data->rcb_pending == 0) {
2049 ixgbe_free_rx_ring_data(rx_data);
2050 rx_ring->rx_data = NULL;
2051 }
2052 }
2053
2054 mutex_exit(&ixgbe->rx_pending_lock);
2055 }
2056 }
2057
2058 /*
2059 * ixgbe_setup_rings - Setup rx/tx rings.
2060 */
2061 static void
2062 ixgbe_setup_rings(ixgbe_t *ixgbe)
2063 {
2064 /*
2065 * Setup the rx/tx rings, including the following:
2066 *
2067 * 1. Setup the descriptor ring and the control block buffers;
2068 * 2. Initialize necessary registers for receive/transmit;
2069 * 3. Initialize software pointers/parameters for receive/transmit;
2070 */
2071 ixgbe_setup_rx(ixgbe);
2072
2073 ixgbe_setup_tx(ixgbe);
2074 }
2075
2076 static void
2077 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring)
2078 {
2079 ixgbe_t *ixgbe = rx_ring->ixgbe;
2080 ixgbe_rx_data_t *rx_data = rx_ring->rx_data;
2081 struct ixgbe_hw *hw = &ixgbe->hw;
2082 rx_control_block_t *rcb;
2083 union ixgbe_adv_rx_desc *rbd;
2084 uint32_t size;
2085 uint32_t buf_low;
2086 uint32_t buf_high;
2087 uint32_t reg_val;
2088 int i;
2089
2090 ASSERT(mutex_owned(&rx_ring->rx_lock));
2091 ASSERT(mutex_owned(&ixgbe->gen_lock));
2092
2093 for (i = 0; i < ixgbe->rx_ring_size; i++) {
2094 rcb = rx_data->work_list[i];
2095 rbd = &rx_data->rbd_ring[i];
2096
2097 rbd->read.pkt_addr = rcb->rx_buf.dma_address;
2098 rbd->read.hdr_addr = NULL;
2099 }
2100
2101 /*
2102 * Initialize the length register
2103 */
2104 size = rx_data->ring_size * sizeof (union ixgbe_adv_rx_desc);
2105 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->hw_index), size);
2106
2107 /*
2108 * Initialize the base address registers
2109 */
2110 buf_low = (uint32_t)rx_data->rbd_area.dma_address;
2111 buf_high = (uint32_t)(rx_data->rbd_area.dma_address >> 32);
2112 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->hw_index), buf_high);
2113 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->hw_index), buf_low);
2114
2115 /*
2116 * Setup head & tail pointers
2117 */
2118 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->hw_index),
2119 rx_data->ring_size - 1);
2120 IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->hw_index), 0);
2121
2122 rx_data->rbd_next = 0;
2123 rx_data->lro_first = 0;
2124
2125 /*
2126 * Setup the Receive Descriptor Control Register (RXDCTL)
2127 * PTHRESH=32 descriptors (half the internal cache)
2128 * HTHRESH=0 descriptors (to minimize latency on fetch)
2129 * WTHRESH defaults to 1 (writeback each descriptor)
2130 */
2131 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index));
2132 reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */
2133
2134 /* Not a valid value for 82599 */
2135 if (hw->mac.type < ixgbe_mac_82599EB) {
2136 reg_val |= 0x0020; /* pthresh */
2137 }
2138 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val);
2139
2140 if (hw->mac.type >= ixgbe_mac_82599EB) {
2141 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2142 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS);
2143 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2144 }
2145
2146 /*
2147 * Setup the Split and Replication Receive Control Register.
2148 * Set the rx buffer size and the advanced descriptor type.
2149 */
2150 reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
2151 IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2152 reg_val |= IXGBE_SRRCTL_DROP_EN;
2153 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val);
2154 }
2155
2156 static void
2157 ixgbe_setup_rx(ixgbe_t *ixgbe)
2158 {
2159 ixgbe_rx_ring_t *rx_ring;
2160 struct ixgbe_hw *hw = &ixgbe->hw;
2161 uint32_t reg_val;
2162 uint32_t ring_mapping;
2163 uint32_t i, index;
2164 uint32_t psrtype_rss_bit;
2165
2166 /* PSRTYPE must be configured for 82599 */
2167 if (ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ &&
2168 ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ_RSS) {
2169 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
2170 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
2171 reg_val |= IXGBE_PSRTYPE_L2HDR;
2172 reg_val |= 0x80000000;
2173 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), reg_val);
2174 } else {
2175 if (ixgbe->num_rx_groups > 32) {
2176 psrtype_rss_bit = 0x20000000;
2177 } else {
2178 psrtype_rss_bit = 0x40000000;
2179 }
2180 for (i = 0; i < ixgbe->capab->max_rx_grp_num; i++) {
2181 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
2182 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
2183 reg_val |= IXGBE_PSRTYPE_L2HDR;
2184 reg_val |= psrtype_rss_bit;
2185 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(i), reg_val);
2186 }
2187 }
2188
2189 /*
2190 * Set filter control in FCTRL to accept broadcast packets and do
2191 * not pass pause frames to host. Flow control settings are already
2192 * in this register, so preserve them.
2193 */
2194 reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2195 reg_val |= IXGBE_FCTRL_BAM; /* broadcast accept mode */
2196 reg_val |= IXGBE_FCTRL_DPF; /* discard pause frames */
2197 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val);
2198
2199 /*
2200 * Hardware checksum settings
2201 */
2202 if (ixgbe->rx_hcksum_enable) {
2203 reg_val = IXGBE_RXCSUM_IPPCSE; /* IP checksum */
2204 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val);
2205 }
2206
2207 /*
2208 * Setup VMDq and RSS for multiple receive queues
2209 */
2210 switch (ixgbe->classify_mode) {
2211 case IXGBE_CLASSIFY_RSS:
2212 /*
2213 * One group, only RSS is needed when more than
2214 * one ring enabled.
2215 */
2216 ixgbe_setup_rss(ixgbe);
2217 break;
2218
2219 case IXGBE_CLASSIFY_VMDQ:
2220 /*
2221 * Multiple groups, each group has one ring,
2222 * only VMDq is needed.
2223 */
2224 ixgbe_setup_vmdq(ixgbe);
2225 break;
2226
2227 case IXGBE_CLASSIFY_VMDQ_RSS:
2228 /*
2229 * Multiple groups and multiple rings, both
2230 * VMDq and RSS are needed.
2231 */
2232 ixgbe_setup_vmdq_rss(ixgbe);
2233 break;
2234
2235 default:
2236 break;
2237 }
2238
2239 /*
2240 * Enable the receive unit. This must be done after filter
2241 * control is set in FCTRL.
2242 */
2243 reg_val = (IXGBE_RXCTRL_RXEN /* Enable Receive Unit */
2244 | IXGBE_RXCTRL_DMBYPS); /* descriptor monitor bypass */
2245 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
2246
2247 /*
2248 * ixgbe_setup_rx_ring must be called after configuring RXCTRL
2249 */
2250 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2251 rx_ring = &ixgbe->rx_rings[i];
2252 ixgbe_setup_rx_ring(rx_ring);
2253 }
2254
2255 /*
2256 * Setup the per-ring statistics mapping.
2257 */
2258 ring_mapping = 0;
2259 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2260 index = ixgbe->rx_rings[i].hw_index;
2261 ring_mapping = IXGBE_READ_REG(hw, IXGBE_RQSMR(index >> 2));
2262 ring_mapping |= (i & 0xF) << (8 * (index & 0x3));
2263 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(index >> 2), ring_mapping);
2264 }
2265
2266 /*
2267 * The Max Frame Size in MHADD/MAXFRS will be internally increased
2268 * by four bytes if the packet has a VLAN field, so includes MTU,
2269 * ethernet header and frame check sequence.
2270 * Register is MAXFRS in 82599.
2271 */
2272 reg_val = (ixgbe->default_mtu + sizeof (struct ether_header)
2273 + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT;
2274 IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val);
2275
2276 /*
2277 * Setup Jumbo Frame enable bit
2278 */
2279 if (ixgbe->default_mtu > ETHERMTU) {
2280 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2281 reg_val |= IXGBE_HLREG0_JUMBOEN;
2282 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2283 }
2284
2285 /*
2286 * Setup RSC for multiple receive queues.
2287 */
2288 if (ixgbe->lro_enable) {
2289 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2290 /*
2291 * Make sure rx_buf_size * MAXDESC not greater
2292 * than 65535.
2293 * Intel recommends 4 for MAXDESC field value.
2294 */
2295 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCCTL(i));
2296 reg_val |= IXGBE_RSCCTL_RSCEN;
2297 if (ixgbe->rx_buf_size == IXGBE_PKG_BUF_16k)
2298 reg_val |= IXGBE_RSCCTL_MAXDESC_1;
2299 else
2300 reg_val |= IXGBE_RSCCTL_MAXDESC_4;
2301 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(i), reg_val);
2302 }
2303
2304 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCDBU);
2305 reg_val |= IXGBE_RSCDBU_RSCACKDIS;
2306 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, reg_val);
2307
2308 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2309 reg_val |= IXGBE_RDRXCTL_RSCACKC;
2310 reg_val |= IXGBE_RDRXCTL_FCOE_WRFIX;
2311 reg_val &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2312
2313 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2314 }
2315 }
2316
2317 static void
2318 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring)
2319 {
2320 ixgbe_t *ixgbe = tx_ring->ixgbe;
2321 struct ixgbe_hw *hw = &ixgbe->hw;
2322 uint32_t size;
2323 uint32_t buf_low;
2324 uint32_t buf_high;
2325 uint32_t reg_val;
2326
2327 ASSERT(mutex_owned(&tx_ring->tx_lock));
2328 ASSERT(mutex_owned(&ixgbe->gen_lock));
2329
2330 /*
2331 * Initialize the length register
2332 */
2333 size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc);
2334 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size);
2335
2336 /*
2337 * Initialize the base address registers
2338 */
2339 buf_low = (uint32_t)tx_ring->tbd_area.dma_address;
2340 buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32);
2341 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low);
2342 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high);
2343
2344 /*
2345 * Setup head & tail pointers
2346 */
2347 IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0);
2348 IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0);
2349
2350 /*
2351 * Setup head write-back
2352 */
2353 if (ixgbe->tx_head_wb_enable) {
2354 /*
2355 * The memory of the head write-back is allocated using
2356 * the extra tbd beyond the tail of the tbd ring.
2357 */
2358 tx_ring->tbd_head_wb = (uint32_t *)
2359 ((uintptr_t)tx_ring->tbd_area.address + size);
2360 *tx_ring->tbd_head_wb = 0;
2361
2362 buf_low = (uint32_t)
2363 (tx_ring->tbd_area.dma_address + size);
2364 buf_high = (uint32_t)
2365 ((tx_ring->tbd_area.dma_address + size) >> 32);
2366
2367 /* Set the head write-back enable bit */
2368 buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
2369
2370 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low);
2371 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high);
2372
2373 /*
2374 * Turn off relaxed ordering for head write back or it will
2375 * cause problems with the tx recycling
2376 */
2377
2378 reg_val = (hw->mac.type == ixgbe_mac_82598EB) ?
2379 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(tx_ring->index)) :
2380 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(tx_ring->index));
2381 reg_val &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2382 if (hw->mac.type == ixgbe_mac_82598EB) {
2383 IXGBE_WRITE_REG(hw,
2384 IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
2385 } else {
2386 IXGBE_WRITE_REG(hw,
2387 IXGBE_DCA_TXCTRL_82599(tx_ring->index), reg_val);
2388 }
2389 } else {
2390 tx_ring->tbd_head_wb = NULL;
2391 }
2392
2393 tx_ring->tbd_head = 0;
2394 tx_ring->tbd_tail = 0;
2395 tx_ring->tbd_free = tx_ring->ring_size;
2396
2397 if (ixgbe->tx_ring_init == B_TRUE) {
2398 tx_ring->tcb_head = 0;
2399 tx_ring->tcb_tail = 0;
2400 tx_ring->tcb_free = tx_ring->free_list_size;
2401 }
2402
2403 /*
2404 * Initialize the s/w context structure
2405 */
2406 bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t));
2407 }
2408
2409 static void
2410 ixgbe_setup_tx(ixgbe_t *ixgbe)
2411 {
2412 struct ixgbe_hw *hw = &ixgbe->hw;
2413 ixgbe_tx_ring_t *tx_ring;
2414 uint32_t reg_val;
2415 uint32_t ring_mapping;
2416 int i;
2417
2418 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2419 tx_ring = &ixgbe->tx_rings[i];
2420 ixgbe_setup_tx_ring(tx_ring);
2421 }
2422
2423 /*
2424 * Setup the per-ring statistics mapping.
2425 */
2426 ring_mapping = 0;
2427 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2428 ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
2429 if ((i & 0x3) == 0x3) {
2430 switch (hw->mac.type) {
2431 case ixgbe_mac_82598EB:
2432 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2),
2433 ring_mapping);
2434 break;
2435
2436 case ixgbe_mac_82599EB:
2437 case ixgbe_mac_X540:
2438 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2),
2439 ring_mapping);
2440 break;
2441
2442 default:
2443 break;
2444 }
2445
2446 ring_mapping = 0;
2447 }
2448 }
2449 if (i & 0x3) {
2450 switch (hw->mac.type) {
2451 case ixgbe_mac_82598EB:
2452 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping);
2453 break;
2454
2455 case ixgbe_mac_82599EB:
2456 case ixgbe_mac_X540:
2457 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping);
2458 break;
2459
2460 default:
2461 break;
2462 }
2463 }
2464
2465 /*
2466 * Enable CRC appending and TX padding (for short tx frames)
2467 */
2468 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2469 reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN;
2470 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2471
2472 /*
2473 * enable DMA for 82599 and X540 parts
2474 */
2475 if (hw->mac.type >= ixgbe_mac_82599EB) {
2476 /* DMATXCTL.TE must be set after all Tx config is complete */
2477 reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2478 reg_val |= IXGBE_DMATXCTL_TE;
2479 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val);
2480
2481 /* Disable arbiter to set MTQC */
2482 reg_val = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2483 reg_val |= IXGBE_RTTDCS_ARBDIS;
2484 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val);
2485 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2486 reg_val &= ~IXGBE_RTTDCS_ARBDIS;
2487 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val);
2488 }
2489
2490 /*
2491 * Enabling tx queues ..
2492 * For 82599 must be done after DMATXCTL.TE is set
2493 */
2494 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2495 tx_ring = &ixgbe->tx_rings[i];
2496 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index));
2497 reg_val |= IXGBE_TXDCTL_ENABLE;
2498 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val);
2499 }
2500 }
2501
2502 /*
2503 * ixgbe_setup_rss - Setup receive-side scaling feature.
2504 */
2505 static void
2506 ixgbe_setup_rss(ixgbe_t *ixgbe)
2507 {
2508 struct ixgbe_hw *hw = &ixgbe->hw;
2509 uint32_t i, mrqc, rxcsum;
2510 uint32_t random;
2511 uint32_t reta;
2512 uint32_t ring_per_group;
2513
2514 /*
2515 * Fill out redirection table
2516 */
2517 reta = 0;
2518 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2519
2520 for (i = 0; i < 128; i++) {
2521 reta = (reta << 8) | (i % ring_per_group) |
2522 ((i % ring_per_group) << 4);
2523 if ((i & 3) == 3)
2524 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2525 }
2526
2527 /*
2528 * Fill out hash function seeds with a random constant
2529 */
2530 for (i = 0; i < 10; i++) {
2531 (void) random_get_pseudo_bytes((uint8_t *)&random,
2532 sizeof (uint32_t));
2533 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random);
2534 }
2535
2536 /*
2537 * Enable RSS & perform hash on these packet types
2538 */
2539 mrqc = IXGBE_MRQC_RSSEN |
2540 IXGBE_MRQC_RSS_FIELD_IPV4 |
2541 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2542 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2543 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2544 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2545 IXGBE_MRQC_RSS_FIELD_IPV6 |
2546 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2547 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2548 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2549 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2550
2551 /*
2552 * Disable Packet Checksum to enable RSS for multiple receive queues.
2553 * It is an adapter hardware limitation that Packet Checksum is
2554 * mutually exclusive with RSS.
2555 */
2556 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2557 rxcsum |= IXGBE_RXCSUM_PCSD;
2558 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
2559 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2560 }
2561
2562 /*
2563 * ixgbe_setup_vmdq - Setup MAC classification feature
2564 */
2565 static void
2566 ixgbe_setup_vmdq(ixgbe_t *ixgbe)
2567 {
2568 struct ixgbe_hw *hw = &ixgbe->hw;
2569 uint32_t vmdctl, i, vtctl;
2570
2571 /*
2572 * Setup the VMDq Control register, enable VMDq based on
2573 * packet destination MAC address:
2574 */
2575 switch (hw->mac.type) {
2576 case ixgbe_mac_82598EB:
2577 /*
2578 * VMDq Enable = 1;
2579 * VMDq Filter = 0; MAC filtering
2580 * Default VMDq output index = 0;
2581 */
2582 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2583 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2584 break;
2585
2586 case ixgbe_mac_82599EB:
2587 case ixgbe_mac_X540:
2588 /*
2589 * Enable VMDq-only.
2590 */
2591 vmdctl = IXGBE_MRQC_VMDQEN;
2592 IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl);
2593
2594 for (i = 0; i < hw->mac.num_rar_entries; i++) {
2595 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2596 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2597 }
2598
2599 /*
2600 * Enable Virtualization and Replication.
2601 */
2602 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2603 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2604
2605 /*
2606 * Enable receiving packets to all VFs
2607 */
2608 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2609 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2610 break;
2611
2612 default:
2613 break;
2614 }
2615 }
2616
2617 /*
2618 * ixgbe_setup_vmdq_rss - Setup both vmdq feature and rss feature.
2619 */
2620 static void
2621 ixgbe_setup_vmdq_rss(ixgbe_t *ixgbe)
2622 {
2623 struct ixgbe_hw *hw = &ixgbe->hw;
2624 uint32_t i, mrqc, rxcsum;
2625 uint32_t random;
2626 uint32_t reta;
2627 uint32_t ring_per_group;
2628 uint32_t vmdctl, vtctl;
2629
2630 /*
2631 * Fill out redirection table
2632 */
2633 reta = 0;
2634 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2635 for (i = 0; i < 128; i++) {
2636 reta = (reta << 8) | (i % ring_per_group) |
2637 ((i % ring_per_group) << 4);
2638 if ((i & 3) == 3)
2639 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2640 }
2641
2642 /*
2643 * Fill out hash function seeds with a random constant
2644 */
2645 for (i = 0; i < 10; i++) {
2646 (void) random_get_pseudo_bytes((uint8_t *)&random,
2647 sizeof (uint32_t));
2648 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random);
2649 }
2650
2651 /*
2652 * Enable and setup RSS and VMDq
2653 */
2654 switch (hw->mac.type) {
2655 case ixgbe_mac_82598EB:
2656 /*
2657 * Enable RSS & Setup RSS Hash functions
2658 */
2659 mrqc = IXGBE_MRQC_RSSEN |
2660 IXGBE_MRQC_RSS_FIELD_IPV4 |
2661 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2662 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2663 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2664 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2665 IXGBE_MRQC_RSS_FIELD_IPV6 |
2666 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2667 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2668 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2669 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2670
2671 /*
2672 * Enable and Setup VMDq
2673 * VMDq Filter = 0; MAC filtering
2674 * Default VMDq output index = 0;
2675 */
2676 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2677 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2678 break;
2679
2680 case ixgbe_mac_82599EB:
2681 case ixgbe_mac_X540:
2682 /*
2683 * Enable RSS & Setup RSS Hash functions
2684 */
2685 mrqc = IXGBE_MRQC_RSS_FIELD_IPV4 |
2686 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2687 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2688 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2689 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2690 IXGBE_MRQC_RSS_FIELD_IPV6 |
2691 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2692 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2693 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2694
2695 /*
2696 * Enable VMDq+RSS.
2697 */
2698 if (ixgbe->num_rx_groups > 32) {
2699 mrqc = mrqc | IXGBE_MRQC_VMDQRSS64EN;
2700 } else {
2701 mrqc = mrqc | IXGBE_MRQC_VMDQRSS32EN;
2702 }
2703
2704 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2705
2706 for (i = 0; i < hw->mac.num_rar_entries; i++) {
2707 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2708 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2709 }
2710 break;
2711
2712 default:
2713 break;
2714
2715 }
2716
2717 /*
2718 * Disable Packet Checksum to enable RSS for multiple receive queues.
2719 * It is an adapter hardware limitation that Packet Checksum is
2720 * mutually exclusive with RSS.
2721 */
2722 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2723 rxcsum |= IXGBE_RXCSUM_PCSD;
2724 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
2725 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2726
2727 if (hw->mac.type >= ixgbe_mac_82599EB) {
2728 /*
2729 * Enable Virtualization and Replication.
2730 */
2731 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2732 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2733
2734 /*
2735 * Enable receiving packets to all VFs
2736 */
2737 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2738 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2739 }
2740 }
2741
2742 /*
2743 * ixgbe_init_unicst - Initialize the unicast addresses.
2744 */
2745 static void
2746 ixgbe_init_unicst(ixgbe_t *ixgbe)
2747 {
2748 struct ixgbe_hw *hw = &ixgbe->hw;
2749 uint8_t *mac_addr;
2750 int slot;
2751 /*
2752 * Here we should consider two situations:
2753 *
2754 * 1. Chipset is initialized at the first time,
2755 * Clear all the multiple unicast addresses.
2756 *
2757 * 2. Chipset is reset
2758 * Recover the multiple unicast addresses from the
2759 * software data structure to the RAR registers.
2760 */
2761 if (!ixgbe->unicst_init) {
2762 /*
2763 * Initialize the multiple unicast addresses
2764 */
2765 ixgbe->unicst_total = hw->mac.num_rar_entries;
2766 ixgbe->unicst_avail = ixgbe->unicst_total;
2767 for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2768 mac_addr = ixgbe->unicst_addr[slot].mac.addr;
2769 bzero(mac_addr, ETHERADDRL);
2770 (void) ixgbe_set_rar(hw, slot, mac_addr, NULL, NULL);
2771 ixgbe->unicst_addr[slot].mac.set = 0;
2772 }
2773 ixgbe->unicst_init = B_TRUE;
2774 } else {
2775 /* Re-configure the RAR registers */
2776 for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2777 mac_addr = ixgbe->unicst_addr[slot].mac.addr;
2778 if (ixgbe->unicst_addr[slot].mac.set == 1) {
2779 (void) ixgbe_set_rar(hw, slot, mac_addr,
2780 ixgbe->unicst_addr[slot].mac.group_index,
2781 IXGBE_RAH_AV);
2782 } else {
2783 bzero(mac_addr, ETHERADDRL);
2784 (void) ixgbe_set_rar(hw, slot, mac_addr,
2785 NULL, NULL);
2786 }
2787 }
2788 }
2789 }
2790
2791 /*
2792 * ixgbe_unicst_find - Find the slot for the specified unicast address
2793 */
2794 int
2795 ixgbe_unicst_find(ixgbe_t *ixgbe, const uint8_t *mac_addr)
2796 {
2797 int slot;
2798
2799 ASSERT(mutex_owned(&ixgbe->gen_lock));
2800
2801 for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2802 if (bcmp(ixgbe->unicst_addr[slot].mac.addr,
2803 mac_addr, ETHERADDRL) == 0)
2804 return (slot);
2805 }
2806
2807 return (-1);
2808 }
2809
2810 /*
2811 * ixgbe_multicst_add - Add a multicst address.
2812 */
2813 int
2814 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2815 {
2816 ASSERT(mutex_owned(&ixgbe->gen_lock));
2817
2818 if ((multiaddr[0] & 01) == 0) {
2819 return (EINVAL);
2820 }
2821
2822 if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) {
2823 return (ENOENT);
2824 }
2825
2826 bcopy(multiaddr,
2827 &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL);
2828 ixgbe->mcast_count++;
2829
2830 /*
2831 * Update the multicast table in the hardware
2832 */
2833 ixgbe_setup_multicst(ixgbe);
2834
2835 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2836 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2837 return (EIO);
2838 }
2839
2840 return (0);
2841 }
2842
2843 /*
2844 * ixgbe_multicst_remove - Remove a multicst address.
2845 */
2846 int
2847 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2848 {
2849 int i;
2850
2851 ASSERT(mutex_owned(&ixgbe->gen_lock));
2852
2853 for (i = 0; i < ixgbe->mcast_count; i++) {
2854 if (bcmp(multiaddr, &ixgbe->mcast_table[i],
2855 ETHERADDRL) == 0) {
2856 for (i++; i < ixgbe->mcast_count; i++) {
2857 ixgbe->mcast_table[i - 1] =
2858 ixgbe->mcast_table[i];
2859 }
2860 ixgbe->mcast_count--;
2861 break;
2862 }
2863 }
2864
2865 /*
2866 * Update the multicast table in the hardware
2867 */
2868 ixgbe_setup_multicst(ixgbe);
2869
2870 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2871 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2872 return (EIO);
2873 }
2874
2875 return (0);
2876 }
2877
2878 /*
2879 * ixgbe_setup_multicast - Setup multicast data structures.
2880 *
2881 * This routine initializes all of the multicast related structures
2882 * and save them in the hardware registers.
2883 */
2884 static void
2885 ixgbe_setup_multicst(ixgbe_t *ixgbe)
2886 {
2887 uint8_t *mc_addr_list;
2888 uint32_t mc_addr_count;
2889 struct ixgbe_hw *hw = &ixgbe->hw;
2890
2891 ASSERT(mutex_owned(&ixgbe->gen_lock));
2892
2893 ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES);
2894
2895 mc_addr_list = (uint8_t *)ixgbe->mcast_table;
2896 mc_addr_count = ixgbe->mcast_count;
2897
2898 /*
2899 * Update the multicast addresses to the MTA registers
2900 */
2901 (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
2902 ixgbe_mc_table_itr, TRUE);
2903 }
2904
2905 /*
2906 * ixgbe_setup_vmdq_rss_conf - Configure vmdq and rss (number and mode).
2907 *
2908 * Configure the rx classification mode (vmdq & rss) and vmdq & rss numbers.
2909 * Different chipsets may have different allowed configuration of vmdq and rss.
2910 */
2911 static void
2912 ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe)
2913 {
2914 struct ixgbe_hw *hw = &ixgbe->hw;
2915 uint32_t ring_per_group;
2916
2917 switch (hw->mac.type) {
2918 case ixgbe_mac_82598EB:
2919 /*
2920 * 82598 supports the following combination:
2921 * vmdq no. x rss no.
2922 * [5..16] x 1
2923 * [1..4] x [1..16]
2924 * However 8 rss queue per pool (vmdq) is sufficient for
2925 * most cases.
2926 */
2927 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2928 if (ixgbe->num_rx_groups > 4) {
2929 ixgbe->num_rx_rings = ixgbe->num_rx_groups;
2930 } else {
2931 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2932 min(8, ring_per_group);
2933 }
2934
2935 break;
2936
2937 case ixgbe_mac_82599EB:
2938 case ixgbe_mac_X540:
2939 /*
2940 * 82599 supports the following combination:
2941 * vmdq no. x rss no.
2942 * [33..64] x [1..2]
2943 * [2..32] x [1..4]
2944 * 1 x [1..16]
2945 * However 8 rss queue per pool (vmdq) is sufficient for
2946 * most cases.
2947 *
2948 * For now, treat X540 like the 82599.
2949 */
2950 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2951 if (ixgbe->num_rx_groups == 1) {
2952 ixgbe->num_rx_rings = min(8, ring_per_group);
2953 } else if (ixgbe->num_rx_groups <= 32) {
2954 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2955 min(4, ring_per_group);
2956 } else if (ixgbe->num_rx_groups <= 64) {
2957 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2958 min(2, ring_per_group);
2959 }
2960 break;
2961
2962 default:
2963 break;
2964 }
2965
2966 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2967
2968 if (ixgbe->num_rx_groups == 1 && ring_per_group == 1) {
2969 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
2970 } else if (ixgbe->num_rx_groups != 1 && ring_per_group == 1) {
2971 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ;
2972 } else if (ixgbe->num_rx_groups != 1 && ring_per_group != 1) {
2973 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ_RSS;
2974 } else {
2975 ixgbe->classify_mode = IXGBE_CLASSIFY_RSS;
2976 }
2977
2978 IXGBE_DEBUGLOG_2(ixgbe, "rx group number:%d, rx ring number:%d",
2979 ixgbe->num_rx_groups, ixgbe->num_rx_rings);
2980 }
2981
2982 /*
2983 * ixgbe_get_conf - Get driver configurations set in driver.conf.
2984 *
2985 * This routine gets user-configured values out of the configuration
2986 * file ixgbe.conf.
2987 *
2988 * For each configurable value, there is a minimum, a maximum, and a
2989 * default.
2990 * If user does not configure a value, use the default.
2991 * If user configures below the minimum, use the minumum.
2992 * If user configures above the maximum, use the maxumum.
2993 */
2994 static void
2995 ixgbe_get_conf(ixgbe_t *ixgbe)
2996 {
2997 struct ixgbe_hw *hw = &ixgbe->hw;
2998 uint32_t flow_control;
2999
3000 /*
3001 * ixgbe driver supports the following user configurations:
3002 *
3003 * Jumbo frame configuration:
3004 * default_mtu
3005 *
3006 * Ethernet flow control configuration:
3007 * flow_control
3008 *
3009 * Multiple rings configurations:
3010 * tx_queue_number
3011 * tx_ring_size
3012 * rx_queue_number
3013 * rx_ring_size
3014 *
3015 * Call ixgbe_get_prop() to get the value for a specific
3016 * configuration parameter.
3017 */
3018
3019 /*
3020 * Jumbo frame configuration - max_frame_size controls host buffer
3021 * allocation, so includes MTU, ethernet header, vlan tag and
3022 * frame check sequence.
3023 */
3024 ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU,
3025 MIN_MTU, ixgbe->capab->max_mtu, DEFAULT_MTU);
3026
3027 ixgbe->max_frame_size = ixgbe->default_mtu +
3028 sizeof (struct ether_vlan_header) + ETHERFCSL;
3029
3030 /*
3031 * Ethernet flow control configuration
3032 */
3033 flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL,
3034 ixgbe_fc_none, 3, ixgbe_fc_none);
3035 if (flow_control == 3)
3036 flow_control = ixgbe_fc_default;
3037
3038 /*
3039 * fc.requested mode is what the user requests. After autoneg,
3040 * fc.current_mode will be the flow_control mode that was negotiated.
3041 */
3042 hw->fc.requested_mode = flow_control;
3043
3044 /*
3045 * Multiple rings configurations
3046 */
3047 ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM,
3048 ixgbe->capab->min_tx_que_num,
3049 ixgbe->capab->max_tx_que_num,
3050 ixgbe->capab->def_tx_que_num);
3051 ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE,
3052 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE);
3053
3054 ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM,
3055 ixgbe->capab->min_rx_que_num,
3056 ixgbe->capab->max_rx_que_num,
3057 ixgbe->capab->def_rx_que_num);
3058 ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE,
3059 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE);
3060
3061 /*
3062 * Multiple groups configuration
3063 */
3064 ixgbe->num_rx_groups = ixgbe_get_prop(ixgbe, PROP_RX_GROUP_NUM,
3065 ixgbe->capab->min_rx_grp_num, ixgbe->capab->max_rx_grp_num,
3066 ixgbe->capab->def_rx_grp_num);
3067
3068 ixgbe->mr_enable = ixgbe_get_prop(ixgbe, PROP_MR_ENABLE,
3069 0, 1, DEFAULT_MR_ENABLE);
3070
3071 if (ixgbe->mr_enable == B_FALSE) {
3072 ixgbe->num_tx_rings = 1;
3073 ixgbe->num_rx_rings = 1;
3074 ixgbe->num_rx_groups = 1;
3075 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
3076 } else {
3077 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
3078 max(ixgbe->num_rx_rings / ixgbe->num_rx_groups, 1);
3079 /*
3080 * The combination of num_rx_rings and num_rx_groups
3081 * may be not supported by h/w. We need to adjust
3082 * them to appropriate values.
3083 */
3084 ixgbe_setup_vmdq_rss_conf(ixgbe);
3085 }
3086
3087 /*
3088 * Tunable used to force an interrupt type. The only use is
3089 * for testing of the lesser interrupt types.
3090 * 0 = don't force interrupt type
3091 * 1 = force interrupt type MSI-X
3092 * 2 = force interrupt type MSI
3093 * 3 = force interrupt type Legacy
3094 */
3095 ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE,
3096 IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE);
3097
3098 ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE,
3099 0, 1, DEFAULT_TX_HCKSUM_ENABLE);
3100 ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE,
3101 0, 1, DEFAULT_RX_HCKSUM_ENABLE);
3102 ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE,
3103 0, 1, DEFAULT_LSO_ENABLE);
3104 ixgbe->lro_enable = ixgbe_get_prop(ixgbe, PROP_LRO_ENABLE,
3105 0, 1, DEFAULT_LRO_ENABLE);
3106 ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE,
3107 0, 1, DEFAULT_TX_HEAD_WB_ENABLE);
3108 ixgbe->relax_order_enable = ixgbe_get_prop(ixgbe,
3109 PROP_RELAX_ORDER_ENABLE, 0, 1, DEFAULT_RELAX_ORDER_ENABLE);
3110
3111 /* Head Write Back not recommended for 82599 and X540 */
3112 if (hw->mac.type >= ixgbe_mac_82599EB) {
3113 ixgbe->tx_head_wb_enable = B_FALSE;
3114 }
3115
3116 /*
3117 * ixgbe LSO needs the tx h/w checksum support.
3118 * LSO will be disabled if tx h/w checksum is not
3119 * enabled.
3120 */
3121 if (ixgbe->tx_hcksum_enable == B_FALSE) {
3122 ixgbe->lso_enable = B_FALSE;
3123 }
3124
3125 /*
3126 * ixgbe LRO needs the rx h/w checksum support.
3127 * LRO will be disabled if rx h/w checksum is not
3128 * enabled.
3129 */
3130 if (ixgbe->rx_hcksum_enable == B_FALSE) {
3131 ixgbe->lro_enable = B_FALSE;
3132 }
3133
3134 /*
3135 * ixgbe LRO only been supported by 82599 and X540 now
3136 */
3137 if (hw->mac.type < ixgbe_mac_82599EB) {
3138 ixgbe->lro_enable = B_FALSE;
3139 }
3140 ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD,
3141 MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD,
3142 DEFAULT_TX_COPY_THRESHOLD);
3143 ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe,
3144 PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD,
3145 MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD);
3146 ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe,
3147 PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD,
3148 MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD);
3149 ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe,
3150 PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD,
3151 MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD);
3152
3153 ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD,
3154 MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD,
3155 DEFAULT_RX_COPY_THRESHOLD);
3156 ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR,
3157 MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR,
3158 DEFAULT_RX_LIMIT_PER_INTR);
3159
3160 ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING,
3161 ixgbe->capab->min_intr_throttle,
3162 ixgbe->capab->max_intr_throttle,
3163 ixgbe->capab->def_intr_throttle);
3164 /*
3165 * 82599 and X540 require the interupt throttling rate is
3166 * a multiple of 8. This is enforced by the register
3167 * definiton.
3168 */
3169 if (hw->mac.type >= ixgbe_mac_82599EB)
3170 ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8;
3171 }
3172
3173 static void
3174 ixgbe_init_params(ixgbe_t *ixgbe)
3175 {
3176 ixgbe->param_en_10000fdx_cap = 1;
3177 ixgbe->param_en_1000fdx_cap = 1;
3178 ixgbe->param_en_100fdx_cap = 1;
3179 ixgbe->param_adv_10000fdx_cap = 1;
3180 ixgbe->param_adv_1000fdx_cap = 1;
3181 ixgbe->param_adv_100fdx_cap = 1;
3182
3183 ixgbe->param_pause_cap = 1;
3184 ixgbe->param_asym_pause_cap = 1;
3185 ixgbe->param_rem_fault = 0;
3186
3187 ixgbe->param_adv_autoneg_cap = 1;
3188 ixgbe->param_adv_pause_cap = 1;
3189 ixgbe->param_adv_asym_pause_cap = 1;
3190 ixgbe->param_adv_rem_fault = 0;
3191
3192 ixgbe->param_lp_10000fdx_cap = 0;
3193 ixgbe->param_lp_1000fdx_cap = 0;
3194 ixgbe->param_lp_100fdx_cap = 0;
3195 ixgbe->param_lp_autoneg_cap = 0;
3196 ixgbe->param_lp_pause_cap = 0;
3197 ixgbe->param_lp_asym_pause_cap = 0;
3198 ixgbe->param_lp_rem_fault = 0;
3199 }
3200
3201 /*
3202 * ixgbe_get_prop - Get a property value out of the configuration file
3203 * ixgbe.conf.
3204 *
3205 * Caller provides the name of the property, a default value, a minimum
3206 * value, and a maximum value.
3207 *
3208 * Return configured value of the property, with default, minimum and
3209 * maximum properly applied.
3210 */
3211 static int
3212 ixgbe_get_prop(ixgbe_t *ixgbe,
3213 char *propname, /* name of the property */
3214 int minval, /* minimum acceptable value */
3215 int maxval, /* maximim acceptable value */
3216 int defval) /* default value */
3217 {
3218 int value;
3219
3220 /*
3221 * Call ddi_prop_get_int() to read the conf settings
3222 */
3223 value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip,
3224 DDI_PROP_DONTPASS, propname, defval);
3225 if (value > maxval)
3226 value = maxval;
3227
3228 if (value < minval)
3229 value = minval;
3230
3231 return (value);
3232 }
3233
3234 /*
3235 * ixgbe_driver_setup_link - Using the link properties to setup the link.
3236 */
3237 int
3238 ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw)
3239 {
3240 u32 autoneg_advertised = 0;
3241
3242 /*
3243 * No half duplex support with 10Gb parts
3244 */
3245 if (ixgbe->param_adv_10000fdx_cap == 1)
3246 autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
3247
3248 if (ixgbe->param_adv_1000fdx_cap == 1)
3249 autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
3250
3251 if (ixgbe->param_adv_100fdx_cap == 1)
3252 autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
3253
3254 if (ixgbe->param_adv_autoneg_cap == 1 && autoneg_advertised == 0) {
3255 ixgbe_notice(ixgbe, "Invalid link settings. Setup link "
3256 "to autonegotiation with full link capabilities.");
3257
3258 autoneg_advertised = IXGBE_LINK_SPEED_10GB_FULL |
3259 IXGBE_LINK_SPEED_1GB_FULL |
3260 IXGBE_LINK_SPEED_100_FULL;
3261 }
3262
3263 if (setup_hw) {
3264 if (ixgbe_setup_link(&ixgbe->hw, autoneg_advertised,
3265 ixgbe->param_adv_autoneg_cap, B_TRUE) != IXGBE_SUCCESS) {
3266 ixgbe_notice(ixgbe, "Setup link failed on this "
3267 "device.");
3268 return (IXGBE_FAILURE);
3269 }
3270 }
3271
3272 return (IXGBE_SUCCESS);
3273 }
3274
3275 /*
3276 * ixgbe_driver_link_check - Link status processing.
3277 *
3278 * This function can be called in both kernel context and interrupt context
3279 */
3280 static void
3281 ixgbe_driver_link_check(ixgbe_t *ixgbe)
3282 {
3283 struct ixgbe_hw *hw = &ixgbe->hw;
3284 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
3285 boolean_t link_up = B_FALSE;
3286 boolean_t link_changed = B_FALSE;
3287
3288 ASSERT(mutex_owned(&ixgbe->gen_lock));
3289
3290 (void) ixgbe_check_link(hw, &speed, &link_up, false);
3291 if (link_up) {
3292 ixgbe->link_check_complete = B_TRUE;
3293
3294 /* Link is up, enable flow control settings */
3295 (void) ixgbe_fc_enable(hw);
3296
3297 /*
3298 * The Link is up, check whether it was marked as down earlier
3299 */
3300 if (ixgbe->link_state != LINK_STATE_UP) {
3301 switch (speed) {
3302 case IXGBE_LINK_SPEED_10GB_FULL:
3303 ixgbe->link_speed = SPEED_10GB;
3304 break;
3305 case IXGBE_LINK_SPEED_1GB_FULL:
3306 ixgbe->link_speed = SPEED_1GB;
3307 break;
3308 case IXGBE_LINK_SPEED_100_FULL:
3309 ixgbe->link_speed = SPEED_100;
3310 }
3311 ixgbe->link_duplex = LINK_DUPLEX_FULL;
3312 ixgbe->link_state = LINK_STATE_UP;
3313 link_changed = B_TRUE;
3314 }
3315 } else {
3316 if (ixgbe->link_check_complete == B_TRUE ||
3317 (ixgbe->link_check_complete == B_FALSE &&
3318 gethrtime() >= ixgbe->link_check_hrtime)) {
3319 /*
3320 * The link is really down
3321 */
3322 ixgbe->link_check_complete = B_TRUE;
3323
3324 if (ixgbe->link_state != LINK_STATE_DOWN) {
3325 ixgbe->link_speed = 0;
3326 ixgbe->link_duplex = LINK_DUPLEX_UNKNOWN;
3327 ixgbe->link_state = LINK_STATE_DOWN;
3328 link_changed = B_TRUE;
3329 }
3330 }
3331 }
3332
3333 /*
3334 * If we are in an interrupt context, need to re-enable the
3335 * interrupt, which was automasked
3336 */
3337 if (servicing_interrupt() != 0) {
3338 ixgbe->eims |= IXGBE_EICR_LSC;
3339 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3340 }
3341
3342 if (link_changed) {
3343 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
3344 }
3345 }
3346
3347 /*
3348 * ixgbe_sfp_check - sfp module processing done in taskq only for 82599.
3349 */
3350 static void
3351 ixgbe_sfp_check(void *arg)
3352 {
3353 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3354 uint32_t eicr = ixgbe->eicr;
3355 struct ixgbe_hw *hw = &ixgbe->hw;
3356
3357 mutex_enter(&ixgbe->gen_lock);
3358 if (eicr & IXGBE_EICR_GPI_SDP1) {
3359 /* clear the interrupt */
3360 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
3361
3362 /* if link up, do multispeed fiber setup */
3363 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
3364 B_TRUE, B_TRUE);
3365 ixgbe_driver_link_check(ixgbe);
3366 ixgbe_get_hw_state(ixgbe);
3367 } else if (eicr & IXGBE_EICR_GPI_SDP2) {
3368 /* clear the interrupt */
3369 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
3370
3371 /* if link up, do sfp module setup */
3372 (void) hw->mac.ops.setup_sfp(hw);
3373
3374 /* do multispeed fiber setup */
3375 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
3376 B_TRUE, B_TRUE);
3377 ixgbe_driver_link_check(ixgbe);
3378 ixgbe_get_hw_state(ixgbe);
3379 }
3380 mutex_exit(&ixgbe->gen_lock);
3381
3382 /*
3383 * We need to fully re-check the link later.
3384 */
3385 ixgbe->link_check_complete = B_FALSE;
3386 ixgbe->link_check_hrtime = gethrtime() +
3387 (IXGBE_LINK_UP_TIME * 100000000ULL);
3388 }
3389
3390 /*
3391 * ixgbe_overtemp_check - overtemp module processing done in taskq
3392 *
3393 * This routine will only be called on adapters with temperature sensor.
3394 * The indication of over-temperature can be either SDP0 interrupt or the link
3395 * status change interrupt.
3396 */
3397 static void
3398 ixgbe_overtemp_check(void *arg)
3399 {
3400 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3401 struct ixgbe_hw *hw = &ixgbe->hw;
3402 uint32_t eicr = ixgbe->eicr;
3403 ixgbe_link_speed speed;
3404 boolean_t link_up;
3405
3406 mutex_enter(&ixgbe->gen_lock);
3407
3408 /* make sure we know current state of link */
3409 (void) ixgbe_check_link(hw, &speed, &link_up, false);
3410
3411 /* check over-temp condition */
3412 if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) ||
3413 (eicr & IXGBE_EICR_LSC)) {
3414 if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP) {
3415 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP);
3416
3417 /*
3418 * Disable the adapter interrupts
3419 */
3420 ixgbe_disable_adapter_interrupts(ixgbe);
3421
3422 /*
3423 * Disable Rx/Tx units
3424 */
3425 (void) ixgbe_stop_adapter(hw);
3426
3427 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
3428 ixgbe_error(ixgbe,
3429 "Problem: Network adapter has been stopped "
3430 "because it has overheated");
3431 ixgbe_error(ixgbe,
3432 "Action: Restart the computer. "
3433 "If the problem persists, power off the system "
3434 "and replace the adapter");
3435 }
3436 }
3437
3438 /* write to clear the interrupt */
3439 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3440
3441 mutex_exit(&ixgbe->gen_lock);
3442 }
3443
3444 /*
3445 * ixgbe_link_timer - timer for link status detection
3446 */
3447 static void
3448 ixgbe_link_timer(void *arg)
3449 {
3450 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3451
3452 mutex_enter(&ixgbe->gen_lock);
3453 ixgbe_driver_link_check(ixgbe);
3454 mutex_exit(&ixgbe->gen_lock);
3455 }
3456
3457 /*
3458 * ixgbe_local_timer - Driver watchdog function.
3459 *
3460 * This function will handle the transmit stall check and other routines.
3461 */
3462 static void
3463 ixgbe_local_timer(void *arg)
3464 {
3465 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3466
3467 if (ixgbe->ixgbe_state & IXGBE_OVERTEMP)
3468 goto out;
3469
3470 if (ixgbe->ixgbe_state & IXGBE_ERROR) {
3471 ixgbe->reset_count++;
3472 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
3473 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
3474 goto out;
3475 }
3476
3477 if (ixgbe_stall_check(ixgbe)) {
3478 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STALL);
3479 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3480
3481 ixgbe->reset_count++;
3482 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
3483 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
3484 }
3485
3486 out:
3487 ixgbe_restart_watchdog_timer(ixgbe);
3488 }
3489
3490 /*
3491 * ixgbe_stall_check - Check for transmit stall.
3492 *
3493 * This function checks if the adapter is stalled (in transmit).
3494 *
3495 * It is called each time the watchdog timeout is invoked.
3496 * If the transmit descriptor reclaim continuously fails,
3497 * the watchdog value will increment by 1. If the watchdog
3498 * value exceeds the threshold, the ixgbe is assumed to
3499 * have stalled and need to be reset.
3500 */
3501 static boolean_t
3502 ixgbe_stall_check(ixgbe_t *ixgbe)
3503 {
3504 ixgbe_tx_ring_t *tx_ring;
3505 boolean_t result;
3506 int i;
3507
3508 if (ixgbe->link_state != LINK_STATE_UP)
3509 return (B_FALSE);
3510
3511 /*
3512 * If any tx ring is stalled, we'll reset the chipset
3513 */
3514 result = B_FALSE;
3515 for (i = 0; i < ixgbe->num_tx_rings; i++) {
3516 tx_ring = &ixgbe->tx_rings[i];
3517 if (tx_ring->tbd_free <= ixgbe->tx_recycle_thresh) {
3518 tx_ring->tx_recycle(tx_ring);
3519 }
3520
3521 if (tx_ring->recycle_fail > 0)
3522 tx_ring->stall_watchdog++;
3523 else
3524 tx_ring->stall_watchdog = 0;
3525
3526 if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) {
3527 result = B_TRUE;
3528 break;
3529 }
3530 }
3531
3532 if (result) {
3533 tx_ring->stall_watchdog = 0;
3534 tx_ring->recycle_fail = 0;
3535 }
3536
3537 return (result);
3538 }
3539
3540
3541 /*
3542 * is_valid_mac_addr - Check if the mac address is valid.
3543 */
3544 static boolean_t
3545 is_valid_mac_addr(uint8_t *mac_addr)
3546 {
3547 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
3548 const uint8_t addr_test2[6] =
3549 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3550
3551 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
3552 !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
3553 return (B_FALSE);
3554
3555 return (B_TRUE);
3556 }
3557
3558 static boolean_t
3559 ixgbe_find_mac_address(ixgbe_t *ixgbe)
3560 {
3561 #ifdef __sparc
3562 struct ixgbe_hw *hw = &ixgbe->hw;
3563 uchar_t *bytes;
3564 struct ether_addr sysaddr;
3565 uint_t nelts;
3566 int err;
3567 boolean_t found = B_FALSE;
3568
3569 /*
3570 * The "vendor's factory-set address" may already have
3571 * been extracted from the chip, but if the property
3572 * "local-mac-address" is set we use that instead.
3573 *
3574 * We check whether it looks like an array of 6
3575 * bytes (which it should, if OBP set it). If we can't
3576 * make sense of it this way, we'll ignore it.
3577 */
3578 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
3579 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
3580 if (err == DDI_PROP_SUCCESS) {
3581 if (nelts == ETHERADDRL) {
3582 while (nelts--)
3583 hw->mac.addr[nelts] = bytes[nelts];
3584 found = B_TRUE;
3585 }
3586 ddi_prop_free(bytes);
3587 }
3588
3589 /*
3590 * Look up the OBP property "local-mac-address?". If the user has set
3591 * 'local-mac-address? = false', use "the system address" instead.
3592 */
3593 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0,
3594 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
3595 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
3596 if (localetheraddr(NULL, &sysaddr) != 0) {
3597 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
3598 found = B_TRUE;
3599 }
3600 }
3601 ddi_prop_free(bytes);
3602 }
3603
3604 /*
3605 * Finally(!), if there's a valid "mac-address" property (created
3606 * if we netbooted from this interface), we must use this instead
3607 * of any of the above to ensure that the NFS/install server doesn't
3608 * get confused by the address changing as Solaris takes over!
3609 */
3610 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
3611 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
3612 if (err == DDI_PROP_SUCCESS) {
3613 if (nelts == ETHERADDRL) {
3614 while (nelts--)
3615 hw->mac.addr[nelts] = bytes[nelts];
3616 found = B_TRUE;
3617 }
3618 ddi_prop_free(bytes);
3619 }
3620
3621 if (found) {
3622 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL);
3623 return (B_TRUE);
3624 }
3625 #else
3626 _NOTE(ARGUNUSED(ixgbe));
3627 #endif
3628
3629 return (B_TRUE);
3630 }
3631
3632 #pragma inline(ixgbe_arm_watchdog_timer)
3633 static void
3634 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe)
3635 {
3636 /*
3637 * Fire a watchdog timer
3638 */
3639 ixgbe->watchdog_tid =
3640 timeout(ixgbe_local_timer,
3641 (void *)ixgbe, 1 * drv_usectohz(1000000));
3642
3643 }
3644
3645 /*
3646 * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer.
3647 */
3648 void
3649 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe)
3650 {
3651 mutex_enter(&ixgbe->watchdog_lock);
3652
3653 if (!ixgbe->watchdog_enable) {
3654 ixgbe->watchdog_enable = B_TRUE;
3655 ixgbe->watchdog_start = B_TRUE;
3656 ixgbe_arm_watchdog_timer(ixgbe);
3657 }
3658
3659 mutex_exit(&ixgbe->watchdog_lock);
3660 }
3661
3662 /*
3663 * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer.
3664 */
3665 void
3666 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe)
3667 {
3668 timeout_id_t tid;
3669
3670 mutex_enter(&ixgbe->watchdog_lock);
3671
3672 ixgbe->watchdog_enable = B_FALSE;
3673 ixgbe->watchdog_start = B_FALSE;
3674 tid = ixgbe->watchdog_tid;
3675 ixgbe->watchdog_tid = 0;
3676
3677 mutex_exit(&ixgbe->watchdog_lock);
3678
3679 if (tid != 0)
3680 (void) untimeout(tid);
3681 }
3682
3683 /*
3684 * ixgbe_start_watchdog_timer - Start the driver watchdog timer.
3685 */
3686 void
3687 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe)
3688 {
3689 mutex_enter(&ixgbe->watchdog_lock);
3690
3691 if (ixgbe->watchdog_enable) {
3692 if (!ixgbe->watchdog_start) {
3693 ixgbe->watchdog_start = B_TRUE;
3694 ixgbe_arm_watchdog_timer(ixgbe);
3695 }
3696 }
3697
3698 mutex_exit(&ixgbe->watchdog_lock);
3699 }
3700
3701 /*
3702 * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer.
3703 */
3704 static void
3705 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe)
3706 {
3707 mutex_enter(&ixgbe->watchdog_lock);
3708
3709 if (ixgbe->watchdog_start)
3710 ixgbe_arm_watchdog_timer(ixgbe);
3711
3712 mutex_exit(&ixgbe->watchdog_lock);
3713 }
3714
3715 /*
3716 * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer.
3717 */
3718 void
3719 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe)
3720 {
3721 timeout_id_t tid;
3722
3723 mutex_enter(&ixgbe->watchdog_lock);
3724
3725 ixgbe->watchdog_start = B_FALSE;
3726 tid = ixgbe->watchdog_tid;
3727 ixgbe->watchdog_tid = 0;
3728
3729 mutex_exit(&ixgbe->watchdog_lock);
3730
3731 if (tid != 0)
3732 (void) untimeout(tid);
3733 }
3734
3735 /*
3736 * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts.
3737 */
3738 static void
3739 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe)
3740 {
3741 struct ixgbe_hw *hw = &ixgbe->hw;
3742
3743 /*
3744 * mask all interrupts off
3745 */
3746 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff);
3747
3748 /*
3749 * for MSI-X, also disable autoclear
3750 */
3751 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
3752 IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0);
3753 }
3754
3755 IXGBE_WRITE_FLUSH(hw);
3756 }
3757
3758 /*
3759 * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts.
3760 */
3761 static void
3762 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe)
3763 {
3764 struct ixgbe_hw *hw = &ixgbe->hw;
3765 uint32_t eiac, eiam;
3766 uint32_t gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3767
3768 /* interrupt types to enable */
3769 ixgbe->eims = IXGBE_EIMS_ENABLE_MASK; /* shared code default */
3770 ixgbe->eims &= ~IXGBE_EIMS_TCP_TIMER; /* minus tcp timer */
3771 ixgbe->eims |= ixgbe->capab->other_intr; /* "other" interrupt types */
3772
3773 /* enable automask on "other" causes that this adapter can generate */
3774 eiam = ixgbe->capab->other_intr;
3775
3776 /*
3777 * msi-x mode
3778 */
3779 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
3780 /* enable autoclear but not on bits 29:20 */
3781 eiac = (ixgbe->eims & ~IXGBE_OTHER_INTR);
3782
3783 /* general purpose interrupt enable */
3784 gpie |= (IXGBE_GPIE_MSIX_MODE
3785 | IXGBE_GPIE_PBA_SUPPORT
3786 | IXGBE_GPIE_OCD
3787 | IXGBE_GPIE_EIAME);
3788 /*
3789 * non-msi-x mode
3790 */
3791 } else {
3792
3793 /* disable autoclear, leave gpie at default */
3794 eiac = 0;
3795
3796 /*
3797 * General purpose interrupt enable.
3798 * For 82599, extended interrupt automask enable
3799 * only in MSI or MSI-X mode
3800 */
3801 if ((hw->mac.type < ixgbe_mac_82599EB) ||
3802 (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) {
3803 gpie |= IXGBE_GPIE_EIAME;
3804 }
3805 }
3806
3807 /* Enable specific "other" interrupt types */
3808 switch (hw->mac.type) {
3809 case ixgbe_mac_82598EB:
3810 gpie |= ixgbe->capab->other_gpie;
3811 break;
3812
3813 case ixgbe_mac_82599EB:
3814 case ixgbe_mac_X540:
3815 gpie |= ixgbe->capab->other_gpie;
3816
3817 /* Enable RSC Delay 8us when LRO enabled */
3818 if (ixgbe->lro_enable) {
3819 gpie |= (1 << IXGBE_GPIE_RSC_DELAY_SHIFT);
3820 }
3821 break;
3822
3823 default:
3824 break;
3825 }
3826
3827 /* write to interrupt control registers */
3828 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3829 IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac);
3830 IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam);
3831 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3832 IXGBE_WRITE_FLUSH(hw);
3833 }
3834
3835 /*
3836 * ixgbe_loopback_ioctl - Loopback support.
3837 */
3838 enum ioc_reply
3839 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp)
3840 {
3841 lb_info_sz_t *lbsp;
3842 lb_property_t *lbpp;
3843 uint32_t *lbmp;
3844 uint32_t size;
3845 uint32_t value;
3846
3847 if (mp->b_cont == NULL)
3848 return (IOC_INVAL);
3849
3850 switch (iocp->ioc_cmd) {
3851 default:
3852 return (IOC_INVAL);
3853
3854 case LB_GET_INFO_SIZE:
3855 size = sizeof (lb_info_sz_t);
3856 if (iocp->ioc_count != size)
3857 return (IOC_INVAL);
3858
3859 value = sizeof (lb_normal);
3860 value += sizeof (lb_mac);
3861 value += sizeof (lb_external);
3862
3863 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
3864 *lbsp = value;
3865 break;
3866
3867 case LB_GET_INFO:
3868 value = sizeof (lb_normal);
3869 value += sizeof (lb_mac);
3870 value += sizeof (lb_external);
3871
3872 size = value;
3873 if (iocp->ioc_count != size)
3874 return (IOC_INVAL);
3875
3876 value = 0;
3877 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
3878
3879 lbpp[value++] = lb_normal;
3880 lbpp[value++] = lb_mac;
3881 lbpp[value++] = lb_external;
3882 break;
3883
3884 case LB_GET_MODE:
3885 size = sizeof (uint32_t);
3886 if (iocp->ioc_count != size)
3887 return (IOC_INVAL);
3888
3889 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
3890 *lbmp = ixgbe->loopback_mode;
3891 break;
3892
3893 case LB_SET_MODE:
3894 size = 0;
3895 if (iocp->ioc_count != sizeof (uint32_t))
3896 return (IOC_INVAL);
3897
3898 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
3899 if (!ixgbe_set_loopback_mode(ixgbe, *lbmp))
3900 return (IOC_INVAL);
3901 break;
3902 }
3903
3904 iocp->ioc_count = size;
3905 iocp->ioc_error = 0;
3906
3907 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
3908 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3909 return (IOC_INVAL);
3910 }
3911
3912 return (IOC_REPLY);
3913 }
3914
3915 /*
3916 * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode.
3917 */
3918 static boolean_t
3919 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode)
3920 {
3921 if (mode == ixgbe->loopback_mode)
3922 return (B_TRUE);
3923
3924 ixgbe->loopback_mode = mode;
3925
3926 if (mode == IXGBE_LB_NONE) {
3927 /*
3928 * Reset the chip
3929 */
3930 (void) ixgbe_reset(ixgbe);
3931 return (B_TRUE);
3932 }
3933
3934 mutex_enter(&ixgbe->gen_lock);
3935
3936 switch (mode) {
3937 default:
3938 mutex_exit(&ixgbe->gen_lock);
3939 return (B_FALSE);
3940
3941 case IXGBE_LB_EXTERNAL:
3942 break;
3943
3944 case IXGBE_LB_INTERNAL_MAC:
3945 ixgbe_set_internal_mac_loopback(ixgbe);
3946 break;
3947 }
3948
3949 mutex_exit(&ixgbe->gen_lock);
3950
3951 return (B_TRUE);
3952 }
3953
3954 /*
3955 * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode.
3956 */
3957 static void
3958 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe)
3959 {
3960 struct ixgbe_hw *hw;
3961 uint32_t reg;
3962 uint8_t atlas;
3963
3964 hw = &ixgbe->hw;
3965
3966 /*
3967 * Setup MAC loopback
3968 */
3969 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0);
3970 reg |= IXGBE_HLREG0_LPBK;
3971 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg);
3972
3973 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
3974 reg &= ~IXGBE_AUTOC_LMS_MASK;
3975 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
3976
3977 /*
3978 * Disable Atlas Tx lanes to keep packets in loopback and not on wire
3979 */
3980 switch (hw->mac.type) {
3981 case ixgbe_mac_82598EB:
3982 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
3983 &atlas);
3984 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
3985 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
3986 atlas);
3987
3988 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3989 &atlas);
3990 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
3991 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3992 atlas);
3993
3994 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
3995 &atlas);
3996 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
3997 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
3998 atlas);
3999
4000 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
4001 &atlas);
4002 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
4003 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
4004 atlas);
4005 break;
4006
4007 case ixgbe_mac_82599EB:
4008 case ixgbe_mac_X540:
4009 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
4010 reg |= (IXGBE_AUTOC_FLU |
4011 IXGBE_AUTOC_10G_KX4);
4012 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
4013
4014 (void) ixgbe_setup_link(&ixgbe->hw, IXGBE_LINK_SPEED_10GB_FULL,
4015 B_FALSE, B_TRUE);
4016 break;
4017
4018 default:
4019 break;
4020 }
4021 }
4022
4023 #pragma inline(ixgbe_intr_rx_work)
4024 /*
4025 * ixgbe_intr_rx_work - RX processing of ISR.
4026 */
4027 static void
4028 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring)
4029 {
4030 mblk_t *mp;
4031
4032 mutex_enter(&rx_ring->rx_lock);
4033
4034 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
4035 mutex_exit(&rx_ring->rx_lock);
4036
4037 if (mp != NULL)
4038 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
4039 rx_ring->ring_gen_num);
4040 }
4041
4042 #pragma inline(ixgbe_intr_tx_work)
4043 /*
4044 * ixgbe_intr_tx_work - TX processing of ISR.
4045 */
4046 static void
4047 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring)
4048 {
4049 ixgbe_t *ixgbe = tx_ring->ixgbe;
4050
4051 /*
4052 * Recycle the tx descriptors
4053 */
4054 tx_ring->tx_recycle(tx_ring);
4055
4056 /*
4057 * Schedule the re-transmit
4058 */
4059 if (tx_ring->reschedule &&
4060 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)) {
4061 tx_ring->reschedule = B_FALSE;
4062 mac_tx_ring_update(tx_ring->ixgbe->mac_hdl,
4063 tx_ring->ring_handle);
4064 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
4065 }
4066 }
4067
4068 #pragma inline(ixgbe_intr_other_work)
4069 /*
4070 * ixgbe_intr_other_work - Process interrupt types other than tx/rx
4071 */
4072 static void
4073 ixgbe_intr_other_work(ixgbe_t *ixgbe, uint32_t eicr)
4074 {
4075 ASSERT(mutex_owned(&ixgbe->gen_lock));
4076
4077 /*
4078 * handle link status change
4079 */
4080 if (eicr & IXGBE_EICR_LSC) {
4081 ixgbe_driver_link_check(ixgbe);
4082 ixgbe_get_hw_state(ixgbe);
4083 }
4084
4085 /*
4086 * check for fan failure on adapters with fans
4087 */
4088 if ((ixgbe->capab->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
4089 (eicr & IXGBE_EICR_GPI_SDP1)) {
4090 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP);
4091
4092 /*
4093 * Disable the adapter interrupts
4094 */
4095 ixgbe_disable_adapter_interrupts(ixgbe);
4096
4097 /*
4098 * Disable Rx/Tx units
4099 */
4100 (void) ixgbe_stop_adapter(&ixgbe->hw);
4101
4102 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
4103 ixgbe_error(ixgbe,
4104 "Problem: Network adapter has been stopped "
4105 "because the fan has stopped.\n");
4106 ixgbe_error(ixgbe,
4107 "Action: Replace the adapter.\n");
4108
4109 /* re-enable the interrupt, which was automasked */
4110 ixgbe->eims |= IXGBE_EICR_GPI_SDP1;
4111 }
4112
4113 /*
4114 * Do SFP check for adapters with hot-plug capability
4115 */
4116 if ((ixgbe->capab->flags & IXGBE_FLAG_SFP_PLUG_CAPABLE) &&
4117 ((eicr & IXGBE_EICR_GPI_SDP1) || (eicr & IXGBE_EICR_GPI_SDP2))) {
4118 ixgbe->eicr = eicr;
4119 if ((ddi_taskq_dispatch(ixgbe->sfp_taskq,
4120 ixgbe_sfp_check, (void *)ixgbe,
4121 DDI_NOSLEEP)) != DDI_SUCCESS) {
4122 ixgbe_log(ixgbe, "No memory available to dispatch "
4123 "taskq for SFP check");
4124 }
4125 }
4126
4127 /*
4128 * Do over-temperature check for adapters with temp sensor
4129 */
4130 if ((ixgbe->capab->flags & IXGBE_FLAG_TEMP_SENSOR_CAPABLE) &&
4131 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
4132 ixgbe->eicr = eicr;
4133 if ((ddi_taskq_dispatch(ixgbe->overtemp_taskq,
4134 ixgbe_overtemp_check, (void *)ixgbe,
4135 DDI_NOSLEEP)) != DDI_SUCCESS) {
4136 ixgbe_log(ixgbe, "No memory available to dispatch "
4137 "taskq for overtemp check");
4138 }
4139 }
4140 }
4141
4142 /*
4143 * ixgbe_intr_legacy - Interrupt handler for legacy interrupts.
4144 */
4145 static uint_t
4146 ixgbe_intr_legacy(void *arg1, void *arg2)
4147 {
4148 ixgbe_t *ixgbe = (ixgbe_t *)arg1;
4149 struct ixgbe_hw *hw = &ixgbe->hw;
4150 ixgbe_tx_ring_t *tx_ring;
4151 ixgbe_rx_ring_t *rx_ring;
4152 uint32_t eicr;
4153 mblk_t *mp;
4154 boolean_t tx_reschedule;
4155 uint_t result;
4156
4157 _NOTE(ARGUNUSED(arg2));
4158
4159 mutex_enter(&ixgbe->gen_lock);
4160 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
4161 mutex_exit(&ixgbe->gen_lock);
4162 return (DDI_INTR_UNCLAIMED);
4163 }
4164
4165 mp = NULL;
4166 tx_reschedule = B_FALSE;
4167
4168 /*
4169 * Any bit set in eicr: claim this interrupt
4170 */
4171 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4172
4173 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
4174 mutex_exit(&ixgbe->gen_lock);
4175 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
4176 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4177 return (DDI_INTR_CLAIMED);
4178 }
4179
4180 if (eicr) {
4181 /*
4182 * For legacy interrupt, we have only one interrupt,
4183 * so we have only one rx ring and one tx ring enabled.
4184 */
4185 ASSERT(ixgbe->num_rx_rings == 1);
4186 ASSERT(ixgbe->num_tx_rings == 1);
4187
4188 /*
4189 * For legacy interrupt, rx rings[0] will use RTxQ[0].
4190 */
4191 if (eicr & 0x1) {
4192 ixgbe->eimc |= IXGBE_EICR_RTX_QUEUE;
4193 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4194 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
4195 /*
4196 * Clean the rx descriptors
4197 */
4198 rx_ring = &ixgbe->rx_rings[0];
4199 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
4200 }
4201
4202 /*
4203 * For legacy interrupt, tx rings[0] will use RTxQ[1].
4204 */
4205 if (eicr & 0x2) {
4206 /*
4207 * Recycle the tx descriptors
4208 */
4209 tx_ring = &ixgbe->tx_rings[0];
4210 tx_ring->tx_recycle(tx_ring);
4211
4212 /*
4213 * Schedule the re-transmit
4214 */
4215 tx_reschedule = (tx_ring->reschedule &&
4216 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh));
4217 }
4218
4219 /* any interrupt type other than tx/rx */
4220 if (eicr & ixgbe->capab->other_intr) {
4221 switch (hw->mac.type) {
4222 case ixgbe_mac_82598EB:
4223 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4224 break;
4225
4226 case ixgbe_mac_82599EB:
4227 case ixgbe_mac_X540:
4228 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4229 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4230 break;
4231
4232 default:
4233 break;
4234 }
4235 ixgbe_intr_other_work(ixgbe, eicr);
4236 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4237 }
4238
4239 mutex_exit(&ixgbe->gen_lock);
4240
4241 result = DDI_INTR_CLAIMED;
4242 } else {
4243 mutex_exit(&ixgbe->gen_lock);
4244
4245 /*
4246 * No interrupt cause bits set: don't claim this interrupt.
4247 */
4248 result = DDI_INTR_UNCLAIMED;
4249 }
4250
4251 /* re-enable the interrupts which were automasked */
4252 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4253
4254 /*
4255 * Do the following work outside of the gen_lock
4256 */
4257 if (mp != NULL) {
4258 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
4259 rx_ring->ring_gen_num);
4260 }
4261
4262 if (tx_reschedule) {
4263 tx_ring->reschedule = B_FALSE;
4264 mac_tx_ring_update(ixgbe->mac_hdl, tx_ring->ring_handle);
4265 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
4266 }
4267
4268 return (result);
4269 }
4270
4271 /*
4272 * ixgbe_intr_msi - Interrupt handler for MSI.
4273 */
4274 static uint_t
4275 ixgbe_intr_msi(void *arg1, void *arg2)
4276 {
4277 ixgbe_t *ixgbe = (ixgbe_t *)arg1;
4278 struct ixgbe_hw *hw = &ixgbe->hw;
4279 uint32_t eicr;
4280
4281 _NOTE(ARGUNUSED(arg2));
4282
4283 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4284
4285 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
4286 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
4287 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4288 return (DDI_INTR_CLAIMED);
4289 }
4290
4291 /*
4292 * For MSI interrupt, we have only one vector,
4293 * so we have only one rx ring and one tx ring enabled.
4294 */
4295 ASSERT(ixgbe->num_rx_rings == 1);
4296 ASSERT(ixgbe->num_tx_rings == 1);
4297
4298 /*
4299 * For MSI interrupt, rx rings[0] will use RTxQ[0].
4300 */
4301 if (eicr & 0x1) {
4302 ixgbe_intr_rx_work(&ixgbe->rx_rings[0]);
4303 }
4304
4305 /*
4306 * For MSI interrupt, tx rings[0] will use RTxQ[1].
4307 */
4308 if (eicr & 0x2) {
4309 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
4310 }
4311
4312 /* any interrupt type other than tx/rx */
4313 if (eicr & ixgbe->capab->other_intr) {
4314 mutex_enter(&ixgbe->gen_lock);
4315 switch (hw->mac.type) {
4316 case ixgbe_mac_82598EB:
4317 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4318 break;
4319
4320 case ixgbe_mac_82599EB:
4321 case ixgbe_mac_X540:
4322 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4323 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4324 break;
4325
4326 default:
4327 break;
4328 }
4329 ixgbe_intr_other_work(ixgbe, eicr);
4330 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4331 mutex_exit(&ixgbe->gen_lock);
4332 }
4333
4334 /* re-enable the interrupts which were automasked */
4335 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4336
4337 return (DDI_INTR_CLAIMED);
4338 }
4339
4340 /*
4341 * ixgbe_intr_msix - Interrupt handler for MSI-X.
4342 */
4343 static uint_t
4344 ixgbe_intr_msix(void *arg1, void *arg2)
4345 {
4346 ixgbe_intr_vector_t *vect = (ixgbe_intr_vector_t *)arg1;
4347 ixgbe_t *ixgbe = vect->ixgbe;
4348 struct ixgbe_hw *hw = &ixgbe->hw;
4349 uint32_t eicr;
4350 int r_idx = 0;
4351
4352 _NOTE(ARGUNUSED(arg2));
4353
4354 /*
4355 * Clean each rx ring that has its bit set in the map
4356 */
4357 r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1));
4358 while (r_idx >= 0) {
4359 ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]);
4360 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
4361 (ixgbe->num_rx_rings - 1));
4362 }
4363
4364 /*
4365 * Clean each tx ring that has its bit set in the map
4366 */
4367 r_idx = bt_getlowbit(vect->tx_map, 0, (ixgbe->num_tx_rings - 1));
4368 while (r_idx >= 0) {
4369 ixgbe_intr_tx_work(&ixgbe->tx_rings[r_idx]);
4370 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
4371 (ixgbe->num_tx_rings - 1));
4372 }
4373
4374
4375 /*
4376 * Clean other interrupt (link change) that has its bit set in the map
4377 */
4378 if (BT_TEST(vect->other_map, 0) == 1) {
4379 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4380
4381 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) !=
4382 DDI_FM_OK) {
4383 ddi_fm_service_impact(ixgbe->dip,
4384 DDI_SERVICE_DEGRADED);
4385 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4386 return (DDI_INTR_CLAIMED);
4387 }
4388
4389 /*
4390 * Check "other" cause bits: any interrupt type other than tx/rx
4391 */
4392 if (eicr & ixgbe->capab->other_intr) {
4393 mutex_enter(&ixgbe->gen_lock);
4394 switch (hw->mac.type) {
4395 case ixgbe_mac_82598EB:
4396 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4397 ixgbe_intr_other_work(ixgbe, eicr);
4398 break;
4399
4400 case ixgbe_mac_82599EB:
4401 case ixgbe_mac_X540:
4402 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
4403 ixgbe_intr_other_work(ixgbe, eicr);
4404 break;
4405
4406 default:
4407 break;
4408 }
4409 mutex_exit(&ixgbe->gen_lock);
4410 }
4411
4412 /* re-enable the interrupts which were automasked */
4413 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4414 }
4415
4416 return (DDI_INTR_CLAIMED);
4417 }
4418
4419 /*
4420 * ixgbe_alloc_intrs - Allocate interrupts for the driver.
4421 *
4422 * Normal sequence is to try MSI-X; if not sucessful, try MSI;
4423 * if not successful, try Legacy.
4424 * ixgbe->intr_force can be used to force sequence to start with
4425 * any of the 3 types.
4426 * If MSI-X is not used, number of tx/rx rings is forced to 1.
4427 */
4428 static int
4429 ixgbe_alloc_intrs(ixgbe_t *ixgbe)
4430 {
4431 dev_info_t *devinfo;
4432 int intr_types;
4433 int rc;
4434
4435 devinfo = ixgbe->dip;
4436
4437 /*
4438 * Get supported interrupt types
4439 */
4440 rc = ddi_intr_get_supported_types(devinfo, &intr_types);
4441
4442 if (rc != DDI_SUCCESS) {
4443 ixgbe_log(ixgbe,
4444 "Get supported interrupt types failed: %d", rc);
4445 return (IXGBE_FAILURE);
4446 }
4447 IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types);
4448
4449 ixgbe->intr_type = 0;
4450
4451 /*
4452 * Install MSI-X interrupts
4453 */
4454 if ((intr_types & DDI_INTR_TYPE_MSIX) &&
4455 (ixgbe->intr_force <= IXGBE_INTR_MSIX)) {
4456 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX);
4457 if (rc == IXGBE_SUCCESS)
4458 return (IXGBE_SUCCESS);
4459
4460 ixgbe_log(ixgbe,
4461 "Allocate MSI-X failed, trying MSI interrupts...");
4462 }
4463
4464 /*
4465 * MSI-X not used, force rings and groups to 1
4466 */
4467 ixgbe->num_rx_rings = 1;
4468 ixgbe->num_rx_groups = 1;
4469 ixgbe->num_tx_rings = 1;
4470 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
4471 ixgbe_log(ixgbe,
4472 "MSI-X not used, force rings and groups number to 1");
4473
4474 /*
4475 * Install MSI interrupts
4476 */
4477 if ((intr_types & DDI_INTR_TYPE_MSI) &&
4478 (ixgbe->intr_force <= IXGBE_INTR_MSI)) {
4479 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI);
4480 if (rc == IXGBE_SUCCESS)
4481 return (IXGBE_SUCCESS);
4482
4483 ixgbe_log(ixgbe,
4484 "Allocate MSI failed, trying Legacy interrupts...");
4485 }
4486
4487 /*
4488 * Install legacy interrupts
4489 */
4490 if (intr_types & DDI_INTR_TYPE_FIXED) {
4491 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED);
4492 if (rc == IXGBE_SUCCESS)
4493 return (IXGBE_SUCCESS);
4494
4495 ixgbe_log(ixgbe,
4496 "Allocate Legacy interrupts failed");
4497 }
4498
4499 /*
4500 * If none of the 3 types succeeded, return failure
4501 */
4502 return (IXGBE_FAILURE);
4503 }
4504
4505 /*
4506 * ixgbe_alloc_intr_handles - Allocate interrupt handles.
4507 *
4508 * For legacy and MSI, only 1 handle is needed. For MSI-X,
4509 * if fewer than 2 handles are available, return failure.
4510 * Upon success, this maps the vectors to rx and tx rings for
4511 * interrupts.
4512 */
4513 static int
4514 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type)
4515 {
4516 dev_info_t *devinfo;
4517 int request, count, actual;
4518 int minimum;
4519 int rc;
4520 uint32_t ring_per_group;
4521
4522 devinfo = ixgbe->dip;
4523
4524 switch (intr_type) {
4525 case DDI_INTR_TYPE_FIXED:
4526 request = 1; /* Request 1 legacy interrupt handle */
4527 minimum = 1;
4528 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy");
4529 break;
4530
4531 case DDI_INTR_TYPE_MSI:
4532 request = 1; /* Request 1 MSI interrupt handle */
4533 minimum = 1;
4534 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI");
4535 break;
4536
4537 case DDI_INTR_TYPE_MSIX:
4538 /*
4539 * Best number of vectors for the adapter is
4540 * (# rx rings + # tx rings), however we will
4541 * limit the request number.
4542 */
4543 request = min(16, ixgbe->num_rx_rings + ixgbe->num_tx_rings);
4544 if (request > ixgbe->capab->max_ring_vect)
4545 request = ixgbe->capab->max_ring_vect;
4546 minimum = 1;
4547 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X");
4548 break;
4549
4550 default:
4551 ixgbe_log(ixgbe,
4552 "invalid call to ixgbe_alloc_intr_handles(): %d\n",
4553 intr_type);
4554 return (IXGBE_FAILURE);
4555 }
4556 IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d minimum: %d",
4557 request, minimum);
4558
4559 /*
4560 * Get number of supported interrupts
4561 */
4562 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
4563 if ((rc != DDI_SUCCESS) || (count < minimum)) {
4564 ixgbe_log(ixgbe,
4565 "Get interrupt number failed. Return: %d, count: %d",
4566 rc, count);
4567 return (IXGBE_FAILURE);
4568 }
4569 IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count);
4570
4571 actual = 0;
4572 ixgbe->intr_cnt = 0;
4573 ixgbe->intr_cnt_max = 0;
4574 ixgbe->intr_cnt_min = 0;
4575
4576 /*
4577 * Allocate an array of interrupt handles
4578 */
4579 ixgbe->intr_size = request * sizeof (ddi_intr_handle_t);
4580 ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP);
4581
4582 rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0,
4583 request, &actual, DDI_INTR_ALLOC_NORMAL);
4584 if (rc != DDI_SUCCESS) {
4585 ixgbe_log(ixgbe, "Allocate interrupts failed. "
4586 "return: %d, request: %d, actual: %d",
4587 rc, request, actual);
4588 goto alloc_handle_fail;
4589 }
4590 IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual);
4591
4592 /*
4593 * upper/lower limit of interrupts
4594 */
4595 ixgbe->intr_cnt = actual;
4596 ixgbe->intr_cnt_max = request;
4597 ixgbe->intr_cnt_min = minimum;
4598
4599 /*
4600 * rss number per group should not exceed the rx interrupt number,
4601 * else need to adjust rx ring number.
4602 */
4603 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
4604 ASSERT((ixgbe->num_rx_rings % ixgbe->num_rx_groups) == 0);
4605 if (actual < ring_per_group) {
4606 ixgbe->num_rx_rings = ixgbe->num_rx_groups * actual;
4607 ixgbe_setup_vmdq_rss_conf(ixgbe);
4608 }
4609
4610 /*
4611 * Now we know the actual number of vectors. Here we map the vector
4612 * to other, rx rings and tx ring.
4613 */
4614 if (actual < minimum) {
4615 ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d",
4616 actual);
4617 goto alloc_handle_fail;
4618 }
4619
4620 /*
4621 * Get priority for first vector, assume remaining are all the same
4622 */
4623 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
4624 if (rc != DDI_SUCCESS) {
4625 ixgbe_log(ixgbe,
4626 "Get interrupt priority failed: %d", rc);
4627 goto alloc_handle_fail;
4628 }
4629
4630 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
4631 if (rc != DDI_SUCCESS) {
4632 ixgbe_log(ixgbe,
4633 "Get interrupt cap failed: %d", rc);
4634 goto alloc_handle_fail;
4635 }
4636
4637 ixgbe->intr_type = intr_type;
4638
4639 return (IXGBE_SUCCESS);
4640
4641 alloc_handle_fail:
4642 ixgbe_rem_intrs(ixgbe);
4643
4644 return (IXGBE_FAILURE);
4645 }
4646
4647 /*
4648 * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type.
4649 *
4650 * Before adding the interrupt handlers, the interrupt vectors have
4651 * been allocated, and the rx/tx rings have also been allocated.
4652 */
4653 static int
4654 ixgbe_add_intr_handlers(ixgbe_t *ixgbe)
4655 {
4656 int vector = 0;
4657 int rc;
4658
4659 switch (ixgbe->intr_type) {
4660 case DDI_INTR_TYPE_MSIX:
4661 /*
4662 * Add interrupt handler for all vectors
4663 */
4664 for (vector = 0; vector < ixgbe->intr_cnt; vector++) {
4665 /*
4666 * install pointer to vect_map[vector]
4667 */
4668 rc = ddi_intr_add_handler(ixgbe->htable[vector],
4669 (ddi_intr_handler_t *)ixgbe_intr_msix,
4670 (void *)&ixgbe->vect_map[vector], NULL);
4671
4672 if (rc != DDI_SUCCESS) {
4673 ixgbe_log(ixgbe,
4674 "Add interrupt handler failed. "
4675 "return: %d, vector: %d", rc, vector);
4676 for (vector--; vector >= 0; vector--) {
4677 (void) ddi_intr_remove_handler(
4678 ixgbe->htable[vector]);
4679 }
4680 return (IXGBE_FAILURE);
4681 }
4682 }
4683
4684 break;
4685
4686 case DDI_INTR_TYPE_MSI:
4687 /*
4688 * Add interrupt handlers for the only vector
4689 */
4690 rc = ddi_intr_add_handler(ixgbe->htable[vector],
4691 (ddi_intr_handler_t *)ixgbe_intr_msi,
4692 (void *)ixgbe, NULL);
4693
4694 if (rc != DDI_SUCCESS) {
4695 ixgbe_log(ixgbe,
4696 "Add MSI interrupt handler failed: %d", rc);
4697 return (IXGBE_FAILURE);
4698 }
4699
4700 break;
4701
4702 case DDI_INTR_TYPE_FIXED:
4703 /*
4704 * Add interrupt handlers for the only vector
4705 */
4706 rc = ddi_intr_add_handler(ixgbe->htable[vector],
4707 (ddi_intr_handler_t *)ixgbe_intr_legacy,
4708 (void *)ixgbe, NULL);
4709
4710 if (rc != DDI_SUCCESS) {
4711 ixgbe_log(ixgbe,
4712 "Add legacy interrupt handler failed: %d", rc);
4713 return (IXGBE_FAILURE);
4714 }
4715
4716 break;
4717
4718 default:
4719 return (IXGBE_FAILURE);
4720 }
4721
4722 return (IXGBE_SUCCESS);
4723 }
4724
4725 #pragma inline(ixgbe_map_rxring_to_vector)
4726 /*
4727 * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector.
4728 */
4729 static void
4730 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx)
4731 {
4732 /*
4733 * Set bit in map
4734 */
4735 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
4736
4737 /*
4738 * Count bits set
4739 */
4740 ixgbe->vect_map[v_idx].rxr_cnt++;
4741
4742 /*
4743 * Remember bit position
4744 */
4745 ixgbe->rx_rings[r_idx].intr_vector = v_idx;
4746 ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx;
4747 }
4748
4749 #pragma inline(ixgbe_map_txring_to_vector)
4750 /*
4751 * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector.
4752 */
4753 static void
4754 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx)
4755 {
4756 /*
4757 * Set bit in map
4758 */
4759 BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx);
4760
4761 /*
4762 * Count bits set
4763 */
4764 ixgbe->vect_map[v_idx].txr_cnt++;
4765
4766 /*
4767 * Remember bit position
4768 */
4769 ixgbe->tx_rings[t_idx].intr_vector = v_idx;
4770 ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx;
4771 }
4772
4773 /*
4774 * ixgbe_setup_ivar - Set the given entry in the given interrupt vector
4775 * allocation register (IVAR).
4776 * cause:
4777 * -1 : other cause
4778 * 0 : rx
4779 * 1 : tx
4780 */
4781 static void
4782 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector,
4783 int8_t cause)
4784 {
4785 struct ixgbe_hw *hw = &ixgbe->hw;
4786 u32 ivar, index;
4787
4788 switch (hw->mac.type) {
4789 case ixgbe_mac_82598EB:
4790 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4791 if (cause == -1) {
4792 cause = 0;
4793 }
4794 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4795 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4796 ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3)));
4797 ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3)));
4798 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4799 break;
4800
4801 case ixgbe_mac_82599EB:
4802 case ixgbe_mac_X540:
4803 if (cause == -1) {
4804 /* other causes */
4805 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4806 index = (intr_alloc_entry & 1) * 8;
4807 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4808 ivar &= ~(0xFF << index);
4809 ivar |= (msix_vector << index);
4810 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4811 } else {
4812 /* tx or rx causes */
4813 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4814 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4815 ivar = IXGBE_READ_REG(hw,
4816 IXGBE_IVAR(intr_alloc_entry >> 1));
4817 ivar &= ~(0xFF << index);
4818 ivar |= (msix_vector << index);
4819 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4820 ivar);
4821 }
4822 break;
4823
4824 default:
4825 break;
4826 }
4827 }
4828
4829 /*
4830 * ixgbe_enable_ivar - Enable the given entry by setting the VAL bit of
4831 * given interrupt vector allocation register (IVAR).
4832 * cause:
4833 * -1 : other cause
4834 * 0 : rx
4835 * 1 : tx
4836 */
4837 static void
4838 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4839 {
4840 struct ixgbe_hw *hw = &ixgbe->hw;
4841 u32 ivar, index;
4842
4843 switch (hw->mac.type) {
4844 case ixgbe_mac_82598EB:
4845 if (cause == -1) {
4846 cause = 0;
4847 }
4848 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4849 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4850 ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 *
4851 (intr_alloc_entry & 0x3)));
4852 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4853 break;
4854
4855 case ixgbe_mac_82599EB:
4856 case ixgbe_mac_X540:
4857 if (cause == -1) {
4858 /* other causes */
4859 index = (intr_alloc_entry & 1) * 8;
4860 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4861 ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4862 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4863 } else {
4864 /* tx or rx causes */
4865 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4866 ivar = IXGBE_READ_REG(hw,
4867 IXGBE_IVAR(intr_alloc_entry >> 1));
4868 ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4869 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4870 ivar);
4871 }
4872 break;
4873
4874 default:
4875 break;
4876 }
4877 }
4878
4879 /*
4880 * ixgbe_disable_ivar - Disble the given entry by clearing the VAL bit of
4881 * given interrupt vector allocation register (IVAR).
4882 * cause:
4883 * -1 : other cause
4884 * 0 : rx
4885 * 1 : tx
4886 */
4887 static void
4888 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4889 {
4890 struct ixgbe_hw *hw = &ixgbe->hw;
4891 u32 ivar, index;
4892
4893 switch (hw->mac.type) {
4894 case ixgbe_mac_82598EB:
4895 if (cause == -1) {
4896 cause = 0;
4897 }
4898 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4899 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4900 ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 *
4901 (intr_alloc_entry & 0x3)));
4902 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4903 break;
4904
4905 case ixgbe_mac_82599EB:
4906 case ixgbe_mac_X540:
4907 if (cause == -1) {
4908 /* other causes */
4909 index = (intr_alloc_entry & 1) * 8;
4910 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4911 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4912 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4913 } else {
4914 /* tx or rx causes */
4915 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4916 ivar = IXGBE_READ_REG(hw,
4917 IXGBE_IVAR(intr_alloc_entry >> 1));
4918 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4919 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4920 ivar);
4921 }
4922 break;
4923
4924 default:
4925 break;
4926 }
4927 }
4928
4929 /*
4930 * Convert the rx ring index driver maintained to the rx ring index
4931 * in h/w.
4932 */
4933 static uint32_t
4934 ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index)
4935 {
4936
4937 struct ixgbe_hw *hw = &ixgbe->hw;
4938 uint32_t rx_ring_per_group, hw_rx_index;
4939
4940 if (ixgbe->classify_mode == IXGBE_CLASSIFY_RSS ||
4941 ixgbe->classify_mode == IXGBE_CLASSIFY_NONE) {
4942 return (sw_rx_index);
4943 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ) {
4944 switch (hw->mac.type) {
4945 case ixgbe_mac_82598EB:
4946 return (sw_rx_index);
4947
4948 case ixgbe_mac_82599EB:
4949 case ixgbe_mac_X540:
4950 return (sw_rx_index * 2);
4951
4952 default:
4953 break;
4954 }
4955 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) {
4956 rx_ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
4957
4958 switch (hw->mac.type) {
4959 case ixgbe_mac_82598EB:
4960 hw_rx_index = (sw_rx_index / rx_ring_per_group) *
4961 16 + (sw_rx_index % rx_ring_per_group);
4962 return (hw_rx_index);
4963
4964 case ixgbe_mac_82599EB:
4965 case ixgbe_mac_X540:
4966 if (ixgbe->num_rx_groups > 32) {
4967 hw_rx_index = (sw_rx_index /
4968 rx_ring_per_group) * 2 +
4969 (sw_rx_index % rx_ring_per_group);
4970 } else {
4971 hw_rx_index = (sw_rx_index /
4972 rx_ring_per_group) * 4 +
4973 (sw_rx_index % rx_ring_per_group);
4974 }
4975 return (hw_rx_index);
4976
4977 default:
4978 break;
4979 }
4980 }
4981
4982 /*
4983 * Should never reach. Just to make compiler happy.
4984 */
4985 return (sw_rx_index);
4986 }
4987
4988 /*
4989 * ixgbe_map_intrs_to_vectors - Map different interrupts to MSI-X vectors.
4990 *
4991 * For MSI-X, here will map rx interrupt, tx interrupt and other interrupt
4992 * to vector[0 - (intr_cnt -1)].
4993 */
4994 static int
4995 ixgbe_map_intrs_to_vectors(ixgbe_t *ixgbe)
4996 {
4997 int i, vector = 0;
4998
4999 /* initialize vector map */
5000 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
5001 for (i = 0; i < ixgbe->intr_cnt; i++) {
5002 ixgbe->vect_map[i].ixgbe = ixgbe;
5003 }
5004
5005 /*
5006 * non-MSI-X case is very simple: rx rings[0] on RTxQ[0],
5007 * tx rings[0] on RTxQ[1].
5008 */
5009 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
5010 ixgbe_map_rxring_to_vector(ixgbe, 0, 0);
5011 ixgbe_map_txring_to_vector(ixgbe, 0, 1);
5012 return (IXGBE_SUCCESS);
5013 }
5014
5015 /*
5016 * Interrupts/vectors mapping for MSI-X
5017 */
5018
5019 /*
5020 * Map other interrupt to vector 0,
5021 * Set bit in map and count the bits set.
5022 */
5023 BT_SET(ixgbe->vect_map[vector].other_map, 0);
5024 ixgbe->vect_map[vector].other_cnt++;
5025
5026 /*
5027 * Map rx ring interrupts to vectors
5028 */
5029 for (i = 0; i < ixgbe->num_rx_rings; i++) {
5030 ixgbe_map_rxring_to_vector(ixgbe, i, vector);
5031 vector = (vector +1) % ixgbe->intr_cnt;
5032 }
5033
5034 /*
5035 * Map tx ring interrupts to vectors
5036 */
5037 for (i = 0; i < ixgbe->num_tx_rings; i++) {
5038 ixgbe_map_txring_to_vector(ixgbe, i, vector);
5039 vector = (vector +1) % ixgbe->intr_cnt;
5040 }
5041
5042 return (IXGBE_SUCCESS);
5043 }
5044
5045 /*
5046 * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s).
5047 *
5048 * This relies on ring/vector mapping already set up in the
5049 * vect_map[] structures
5050 */
5051 static void
5052 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe)
5053 {
5054 struct ixgbe_hw *hw = &ixgbe->hw;
5055 ixgbe_intr_vector_t *vect; /* vector bitmap */
5056 int r_idx; /* ring index */
5057 int v_idx; /* vector index */
5058 uint32_t hw_index;
5059
5060 /*
5061 * Clear any previous entries
5062 */
5063 switch (hw->mac.type) {
5064 case ixgbe_mac_82598EB:
5065 for (v_idx = 0; v_idx < 25; v_idx++)
5066 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
5067 break;
5068
5069 case ixgbe_mac_82599EB:
5070 case ixgbe_mac_X540:
5071 for (v_idx = 0; v_idx < 64; v_idx++)
5072 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
5073 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0);
5074 break;
5075
5076 default:
5077 break;
5078 }
5079
5080 /*
5081 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and
5082 * tx rings[0] will use RTxQ[1].
5083 */
5084 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
5085 ixgbe_setup_ivar(ixgbe, 0, 0, 0);
5086 ixgbe_setup_ivar(ixgbe, 0, 1, 1);
5087 return;
5088 }
5089
5090 /*
5091 * For MSI-X interrupt, "Other" is always on vector[0].
5092 */
5093 ixgbe_setup_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0, -1);
5094
5095 /*
5096 * For each interrupt vector, populate the IVAR table
5097 */
5098 for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) {
5099 vect = &ixgbe->vect_map[v_idx];
5100
5101 /*
5102 * For each rx ring bit set
5103 */
5104 r_idx = bt_getlowbit(vect->rx_map, 0,
5105 (ixgbe->num_rx_rings - 1));
5106
5107 while (r_idx >= 0) {
5108 hw_index = ixgbe->rx_rings[r_idx].hw_index;
5109 ixgbe_setup_ivar(ixgbe, hw_index, v_idx, 0);
5110 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
5111 (ixgbe->num_rx_rings - 1));
5112 }
5113
5114 /*
5115 * For each tx ring bit set
5116 */
5117 r_idx = bt_getlowbit(vect->tx_map, 0,
5118 (ixgbe->num_tx_rings - 1));
5119
5120 while (r_idx >= 0) {
5121 ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 1);
5122 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
5123 (ixgbe->num_tx_rings - 1));
5124 }
5125 }
5126 }
5127
5128 /*
5129 * ixgbe_rem_intr_handlers - Remove the interrupt handlers.
5130 */
5131 static void
5132 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe)
5133 {
5134 int i;
5135 int rc;
5136
5137 for (i = 0; i < ixgbe->intr_cnt; i++) {
5138 rc = ddi_intr_remove_handler(ixgbe->htable[i]);
5139 if (rc != DDI_SUCCESS) {
5140 IXGBE_DEBUGLOG_1(ixgbe,
5141 "Remove intr handler failed: %d", rc);
5142 }
5143 }
5144 }
5145
5146 /*
5147 * ixgbe_rem_intrs - Remove the allocated interrupts.
5148 */
5149 static void
5150 ixgbe_rem_intrs(ixgbe_t *ixgbe)
5151 {
5152 int i;
5153 int rc;
5154
5155 for (i = 0; i < ixgbe->intr_cnt; i++) {
5156 rc = ddi_intr_free(ixgbe->htable[i]);
5157 if (rc != DDI_SUCCESS) {
5158 IXGBE_DEBUGLOG_1(ixgbe,
5159 "Free intr failed: %d", rc);
5160 }
5161 }
5162
5163 kmem_free(ixgbe->htable, ixgbe->intr_size);
5164 ixgbe->htable = NULL;
5165 }
5166
5167 /*
5168 * ixgbe_enable_intrs - Enable all the ddi interrupts.
5169 */
5170 static int
5171 ixgbe_enable_intrs(ixgbe_t *ixgbe)
5172 {
5173 int i;
5174 int rc;
5175
5176 /*
5177 * Enable interrupts
5178 */
5179 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
5180 /*
5181 * Call ddi_intr_block_enable() for MSI
5182 */
5183 rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt);
5184 if (rc != DDI_SUCCESS) {
5185 ixgbe_log(ixgbe,
5186 "Enable block intr failed: %d", rc);
5187 return (IXGBE_FAILURE);
5188 }
5189 } else {
5190 /*
5191 * Call ddi_intr_enable() for Legacy/MSI non block enable
5192 */
5193 for (i = 0; i < ixgbe->intr_cnt; i++) {
5194 rc = ddi_intr_enable(ixgbe->htable[i]);
5195 if (rc != DDI_SUCCESS) {
5196 ixgbe_log(ixgbe,
5197 "Enable intr failed: %d", rc);
5198 return (IXGBE_FAILURE);
5199 }
5200 }
5201 }
5202
5203 return (IXGBE_SUCCESS);
5204 }
5205
5206 /*
5207 * ixgbe_disable_intrs - Disable all the interrupts.
5208 */
5209 static int
5210 ixgbe_disable_intrs(ixgbe_t *ixgbe)
5211 {
5212 int i;
5213 int rc;
5214
5215 /*
5216 * Disable all interrupts
5217 */
5218 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
5219 rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt);
5220 if (rc != DDI_SUCCESS) {
5221 ixgbe_log(ixgbe,
5222 "Disable block intr failed: %d", rc);
5223 return (IXGBE_FAILURE);
5224 }
5225 } else {
5226 for (i = 0; i < ixgbe->intr_cnt; i++) {
5227 rc = ddi_intr_disable(ixgbe->htable[i]);
5228 if (rc != DDI_SUCCESS) {
5229 ixgbe_log(ixgbe,
5230 "Disable intr failed: %d", rc);
5231 return (IXGBE_FAILURE);
5232 }
5233 }
5234 }
5235
5236 return (IXGBE_SUCCESS);
5237 }
5238
5239 /*
5240 * ixgbe_get_hw_state - Get and save parameters related to adapter hardware.
5241 */
5242 static void
5243 ixgbe_get_hw_state(ixgbe_t *ixgbe)
5244 {
5245 struct ixgbe_hw *hw = &ixgbe->hw;
5246 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
5247 boolean_t link_up = B_FALSE;
5248 uint32_t pcs1g_anlp = 0;
5249 uint32_t pcs1g_ana = 0;
5250 boolean_t autoneg = B_FALSE;
5251
5252 ASSERT(mutex_owned(&ixgbe->gen_lock));
5253 ixgbe->param_lp_1000fdx_cap = 0;
5254 ixgbe->param_lp_100fdx_cap = 0;
5255
5256 /* check for link, don't wait */
5257 (void) ixgbe_check_link(hw, &speed, &link_up, false);
5258 pcs1g_ana = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
5259
5260 if (link_up) {
5261 pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
5262
5263 ixgbe->param_lp_1000fdx_cap =
5264 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
5265 ixgbe->param_lp_100fdx_cap =
5266 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
5267 }
5268
5269 (void) ixgbe_get_link_capabilities(hw, &speed, &autoneg);
5270
5271 ixgbe->param_adv_1000fdx_cap = ((pcs1g_ana & IXGBE_PCS1GANA_FDC) &&
5272 (speed & IXGBE_LINK_SPEED_1GB_FULL)) ? 1 : 0;
5273 ixgbe->param_adv_100fdx_cap = ((pcs1g_ana & IXGBE_PCS1GANA_FDC) &&
5274 (speed & IXGBE_LINK_SPEED_100_FULL)) ? 1 : 0;
5275 }
5276
5277 /*
5278 * ixgbe_get_driver_control - Notify that driver is in control of device.
5279 */
5280 static void
5281 ixgbe_get_driver_control(struct ixgbe_hw *hw)
5282 {
5283 uint32_t ctrl_ext;
5284
5285 /*
5286 * Notify firmware that driver is in control of device
5287 */
5288 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5289 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
5290 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5291 }
5292
5293 /*
5294 * ixgbe_release_driver_control - Notify that driver is no longer in control
5295 * of device.
5296 */
5297 static void
5298 ixgbe_release_driver_control(struct ixgbe_hw *hw)
5299 {
5300 uint32_t ctrl_ext;
5301
5302 /*
5303 * Notify firmware that driver is no longer in control of device
5304 */
5305 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5306 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
5307 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5308 }
5309
5310 /*
5311 * ixgbe_atomic_reserve - Atomic decrease operation.
5312 */
5313 int
5314 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n)
5315 {
5316 uint32_t oldval;
5317 uint32_t newval;
5318
5319 /*
5320 * ATOMICALLY
5321 */
5322 do {
5323 oldval = *count_p;
5324 if (oldval < n)
5325 return (-1);
5326 newval = oldval - n;
5327 } while (atomic_cas_32(count_p, oldval, newval) != oldval);
5328
5329 return (newval);
5330 }
5331
5332 /*
5333 * ixgbe_mc_table_itr - Traverse the entries in the multicast table.
5334 */
5335 static uint8_t *
5336 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq)
5337 {
5338 uint8_t *addr = *upd_ptr;
5339 uint8_t *new_ptr;
5340
5341 _NOTE(ARGUNUSED(hw));
5342 _NOTE(ARGUNUSED(vmdq));
5343
5344 new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
5345 *upd_ptr = new_ptr;
5346 return (addr);
5347 }
5348
5349 /*
5350 * FMA support
5351 */
5352 int
5353 ixgbe_check_acc_handle(ddi_acc_handle_t handle)
5354 {
5355 ddi_fm_error_t de;
5356
5357 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
5358 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
5359 return (de.fme_status);
5360 }
5361
5362 int
5363 ixgbe_check_dma_handle(ddi_dma_handle_t handle)
5364 {
5365 ddi_fm_error_t de;
5366
5367 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
5368 return (de.fme_status);
5369 }
5370
5371 /*
5372 * ixgbe_fm_error_cb - The IO fault service error handling callback function.
5373 */
5374 static int
5375 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
5376 {
5377 _NOTE(ARGUNUSED(impl_data));
5378 /*
5379 * as the driver can always deal with an error in any dma or
5380 * access handle, we can just return the fme_status value.
5381 */
5382 pci_ereport_post(dip, err, NULL);
5383 return (err->fme_status);
5384 }
5385
5386 static void
5387 ixgbe_fm_init(ixgbe_t *ixgbe)
5388 {
5389 ddi_iblock_cookie_t iblk;
5390 int fma_dma_flag;
5391
5392 /*
5393 * Only register with IO Fault Services if we have some capability
5394 */
5395 if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
5396 ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
5397 } else {
5398 ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
5399 }
5400
5401 if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
5402 fma_dma_flag = 1;
5403 } else {
5404 fma_dma_flag = 0;
5405 }
5406
5407 ixgbe_set_fma_flags(fma_dma_flag);
5408
5409 if (ixgbe->fm_capabilities) {
5410
5411 /*
5412 * Register capabilities with IO Fault Services
5413 */
5414 ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk);
5415
5416 /*
5417 * Initialize pci ereport capabilities if ereport capable
5418 */
5419 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
5420 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5421 pci_ereport_setup(ixgbe->dip);
5422
5423 /*
5424 * Register error callback if error callback capable
5425 */
5426 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5427 ddi_fm_handler_register(ixgbe->dip,
5428 ixgbe_fm_error_cb, (void*) ixgbe);
5429 }
5430 }
5431
5432 static void
5433 ixgbe_fm_fini(ixgbe_t *ixgbe)
5434 {
5435 /*
5436 * Only unregister FMA capabilities if they are registered
5437 */
5438 if (ixgbe->fm_capabilities) {
5439
5440 /*
5441 * Release any resources allocated by pci_ereport_setup()
5442 */
5443 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
5444 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5445 pci_ereport_teardown(ixgbe->dip);
5446
5447 /*
5448 * Un-register error callback if error callback capable
5449 */
5450 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5451 ddi_fm_handler_unregister(ixgbe->dip);
5452
5453 /*
5454 * Unregister from IO Fault Service
5455 */
5456 ddi_fm_fini(ixgbe->dip);
5457 }
5458 }
5459
5460 void
5461 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail)
5462 {
5463 uint64_t ena;
5464 char buf[FM_MAX_CLASS];
5465
5466 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
5467 ena = fm_ena_generate(0, FM_ENA_FMT1);
5468 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) {
5469 ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP,
5470 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
5471 }
5472 }
5473
5474 static int
5475 ixgbe_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
5476 {
5477 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)rh;
5478
5479 mutex_enter(&rx_ring->rx_lock);
5480 rx_ring->ring_gen_num = mr_gen_num;
5481 mutex_exit(&rx_ring->rx_lock);
5482 return (0);
5483 }
5484
5485 /*
5486 * Get the global ring index by a ring index within a group.
5487 */
5488 static int
5489 ixgbe_get_rx_ring_index(ixgbe_t *ixgbe, int gindex, int rindex)
5490 {
5491 ixgbe_rx_ring_t *rx_ring;
5492 int i;
5493
5494 for (i = 0; i < ixgbe->num_rx_rings; i++) {
5495 rx_ring = &ixgbe->rx_rings[i];
5496 if (rx_ring->group_index == gindex)
5497 rindex--;
5498 if (rindex < 0)
5499 return (i);
5500 }
5501
5502 return (-1);
5503 }
5504
5505 /*
5506 * Callback funtion for MAC layer to register all rings.
5507 */
5508 /* ARGSUSED */
5509 void
5510 ixgbe_fill_ring(void *arg, mac_ring_type_t rtype, const int group_index,
5511 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
5512 {
5513 ixgbe_t *ixgbe = (ixgbe_t *)arg;
5514 mac_intr_t *mintr = &infop->mri_intr;
5515
5516 switch (rtype) {
5517 case MAC_RING_TYPE_RX: {
5518 /*
5519 * 'index' is the ring index within the group.
5520 * Need to get the global ring index by searching in groups.
5521 */
5522 int global_ring_index = ixgbe_get_rx_ring_index(
5523 ixgbe, group_index, ring_index);
5524
5525 ASSERT(global_ring_index >= 0);
5526
5527 ixgbe_rx_ring_t *rx_ring = &ixgbe->rx_rings[global_ring_index];
5528 rx_ring->ring_handle = rh;
5529
5530 infop->mri_driver = (mac_ring_driver_t)rx_ring;
5531 infop->mri_start = ixgbe_ring_start;
5532 infop->mri_stop = NULL;
5533 infop->mri_poll = ixgbe_ring_rx_poll;
5534 infop->mri_stat = ixgbe_rx_ring_stat;
5535
5536 mintr->mi_handle = (mac_intr_handle_t)rx_ring;
5537 mintr->mi_enable = ixgbe_rx_ring_intr_enable;
5538 mintr->mi_disable = ixgbe_rx_ring_intr_disable;
5539 if (ixgbe->intr_type &
5540 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
5541 mintr->mi_ddi_handle =
5542 ixgbe->htable[rx_ring->intr_vector];
5543 }
5544
5545 break;
5546 }
5547 case MAC_RING_TYPE_TX: {
5548 ASSERT(group_index == -1);
5549 ASSERT(ring_index < ixgbe->num_tx_rings);
5550
5551 ixgbe_tx_ring_t *tx_ring = &ixgbe->tx_rings[ring_index];
5552 tx_ring->ring_handle = rh;
5553
5554 infop->mri_driver = (mac_ring_driver_t)tx_ring;
5555 infop->mri_start = NULL;
5556 infop->mri_stop = NULL;
5557 infop->mri_tx = ixgbe_ring_tx;
5558 infop->mri_stat = ixgbe_tx_ring_stat;
5559 if (ixgbe->intr_type &
5560 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
5561 mintr->mi_ddi_handle =
5562 ixgbe->htable[tx_ring->intr_vector];
5563 }
5564 break;
5565 }
5566 default:
5567 break;
5568 }
5569 }
5570
5571 /*
5572 * Callback funtion for MAC layer to register all groups.
5573 */
5574 void
5575 ixgbe_fill_group(void *arg, mac_ring_type_t rtype, const int index,
5576 mac_group_info_t *infop, mac_group_handle_t gh)
5577 {
5578 ixgbe_t *ixgbe = (ixgbe_t *)arg;
5579
5580 switch (rtype) {
5581 case MAC_RING_TYPE_RX: {
5582 ixgbe_rx_group_t *rx_group;
5583
5584 rx_group = &ixgbe->rx_groups[index];
5585 rx_group->group_handle = gh;
5586
5587 infop->mgi_driver = (mac_group_driver_t)rx_group;
5588 infop->mgi_start = NULL;
5589 infop->mgi_stop = NULL;
5590 infop->mgi_addmac = ixgbe_addmac;
5591 infop->mgi_remmac = ixgbe_remmac;
5592 infop->mgi_count = (ixgbe->num_rx_rings / ixgbe->num_rx_groups);
5593
5594 break;
5595 }
5596 case MAC_RING_TYPE_TX:
5597 break;
5598 default:
5599 break;
5600 }
5601 }
5602
5603 /*
5604 * Enable interrupt on the specificed rx ring.
5605 */
5606 int
5607 ixgbe_rx_ring_intr_enable(mac_intr_handle_t intrh)
5608 {
5609 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
5610 ixgbe_t *ixgbe = rx_ring->ixgbe;
5611 int r_idx = rx_ring->index;
5612 int hw_r_idx = rx_ring->hw_index;
5613 int v_idx = rx_ring->intr_vector;
5614
5615 mutex_enter(&ixgbe->gen_lock);
5616 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) {
5617 mutex_exit(&ixgbe->gen_lock);
5618 /*
5619 * Simply return 0.
5620 * Interrupts are being adjusted. ixgbe_intr_adjust()
5621 * will eventually re-enable the interrupt when it's
5622 * done with the adjustment.
5623 */
5624 return (0);
5625 }
5626
5627 /*
5628 * To enable interrupt by setting the VAL bit of given interrupt
5629 * vector allocation register (IVAR).
5630 */
5631 ixgbe_enable_ivar(ixgbe, hw_r_idx, 0);
5632
5633 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
5634
5635 /*
5636 * Trigger a Rx interrupt on this ring
5637 */
5638 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_EICS, (1 << v_idx));
5639 IXGBE_WRITE_FLUSH(&ixgbe->hw);
5640
5641 mutex_exit(&ixgbe->gen_lock);
5642
5643 return (0);
5644 }
5645
5646 /*
5647 * Disable interrupt on the specificed rx ring.
5648 */
5649 int
5650 ixgbe_rx_ring_intr_disable(mac_intr_handle_t intrh)
5651 {
5652 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
5653 ixgbe_t *ixgbe = rx_ring->ixgbe;
5654 int r_idx = rx_ring->index;
5655 int hw_r_idx = rx_ring->hw_index;
5656 int v_idx = rx_ring->intr_vector;
5657
5658 mutex_enter(&ixgbe->gen_lock);
5659 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) {
5660 mutex_exit(&ixgbe->gen_lock);
5661 /*
5662 * Simply return 0.
5663 * In the rare case where an interrupt is being
5664 * disabled while interrupts are being adjusted,
5665 * we don't fail the operation. No interrupts will
5666 * be generated while they are adjusted, and
5667 * ixgbe_intr_adjust() will cause the interrupts
5668 * to be re-enabled once it completes. Note that
5669 * in this case, packets may be delivered to the
5670 * stack via interrupts before xgbe_rx_ring_intr_enable()
5671 * is called again. This is acceptable since interrupt
5672 * adjustment is infrequent, and the stack will be
5673 * able to handle these packets.
5674 */
5675 return (0);
5676 }
5677
5678 /*
5679 * To disable interrupt by clearing the VAL bit of given interrupt
5680 * vector allocation register (IVAR).
5681 */
5682 ixgbe_disable_ivar(ixgbe, hw_r_idx, 0);
5683
5684 BT_CLEAR(ixgbe->vect_map[v_idx].rx_map, r_idx);
5685
5686 mutex_exit(&ixgbe->gen_lock);
5687
5688 return (0);
5689 }
5690
5691 /*
5692 * Add a mac address.
5693 */
5694 static int
5695 ixgbe_addmac(void *arg, const uint8_t *mac_addr)
5696 {
5697 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
5698 ixgbe_t *ixgbe = rx_group->ixgbe;
5699 struct ixgbe_hw *hw = &ixgbe->hw;
5700 int slot, i;
5701
5702 mutex_enter(&ixgbe->gen_lock);
5703
5704 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
5705 mutex_exit(&ixgbe->gen_lock);
5706 return (ECANCELED);
5707 }
5708
5709 if (ixgbe->unicst_avail == 0) {
5710 /* no slots available */
5711 mutex_exit(&ixgbe->gen_lock);
5712 return (ENOSPC);
5713 }
5714
5715 /*
5716 * The first ixgbe->num_rx_groups slots are reserved for each respective
5717 * group. The rest slots are shared by all groups. While adding a
5718 * MAC address, reserved slots are firstly checked then the shared
5719 * slots are searched.
5720 */
5721 slot = -1;
5722 if (ixgbe->unicst_addr[rx_group->index].mac.set == 1) {
5723 for (i = ixgbe->num_rx_groups; i < ixgbe->unicst_total; i++) {
5724 if (ixgbe->unicst_addr[i].mac.set == 0) {
5725 slot = i;
5726 break;
5727 }
5728 }
5729 } else {
5730 slot = rx_group->index;
5731 }
5732
5733 if (slot == -1) {
5734 /* no slots available */
5735 mutex_exit(&ixgbe->gen_lock);
5736 return (ENOSPC);
5737 }
5738
5739 bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
5740 (void) ixgbe_set_rar(hw, slot, ixgbe->unicst_addr[slot].mac.addr,
5741 rx_group->index, IXGBE_RAH_AV);
5742 ixgbe->unicst_addr[slot].mac.set = 1;
5743 ixgbe->unicst_addr[slot].mac.group_index = rx_group->index;
5744 ixgbe->unicst_avail--;
5745
5746 mutex_exit(&ixgbe->gen_lock);
5747
5748 return (0);
5749 }
5750
5751 /*
5752 * Remove a mac address.
5753 */
5754 static int
5755 ixgbe_remmac(void *arg, const uint8_t *mac_addr)
5756 {
5757 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
5758 ixgbe_t *ixgbe = rx_group->ixgbe;
5759 struct ixgbe_hw *hw = &ixgbe->hw;
5760 int slot;
5761
5762 mutex_enter(&ixgbe->gen_lock);
5763
5764 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
5765 mutex_exit(&ixgbe->gen_lock);
5766 return (ECANCELED);
5767 }
5768
5769 slot = ixgbe_unicst_find(ixgbe, mac_addr);
5770 if (slot == -1) {
5771 mutex_exit(&ixgbe->gen_lock);
5772 return (EINVAL);
5773 }
5774
5775 if (ixgbe->unicst_addr[slot].mac.set == 0) {
5776 mutex_exit(&ixgbe->gen_lock);
5777 return (EINVAL);
5778 }
5779
5780 bzero(ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
5781 (void) ixgbe_clear_rar(hw, slot);
5782 ixgbe->unicst_addr[slot].mac.set = 0;
5783 ixgbe->unicst_avail++;
5784
5785 mutex_exit(&ixgbe->gen_lock);
5786
5787 return (0);
5788 }