Print this page
9095 ixgbe MAC_CAPAB_LED support
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Sebastian Wiedenroth <sebastian.wiedenroth@skylime.net>
Reviewed by: Toomas Soome <tsoome@me.com>
Approved by: Dan McDonald <danmcd@joyent.com>
NEX-2081 ixgbe triggers warning when receiving too many interrupt vectors from DDI
SUP-479 10 Gigabit CX4 Dual Port Server Adapter EXPX9502CX4 unresponsive to external pings after upgrade from 3.1.2 to 3.1.3.5
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/ixgbe/ixgbe_main.c
+++ new/usr/src/uts/common/io/ixgbe/ixgbe_main.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
|
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
24 24 */
25 25
26 26 /*
27 27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 28 * Copyright (c) 2017, Joyent, Inc.
29 - * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
29 + * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
30 30 * Copyright (c) 2013 Saso Kiselkov. All rights reserved.
31 31 * Copyright (c) 2013 OSN Online Service Nuernberg GmbH. All rights reserved.
32 32 * Copyright 2016 OmniTI Computer Consulting, Inc. All rights reserved.
33 33 */
34 34
35 35 #include "ixgbe_sw.h"
36 36
37 37 static char ixgbe_ident[] = "Intel 10Gb Ethernet";
38 38
39 39 /*
40 40 * Local function protoypes
41 41 */
42 42 static int ixgbe_register_mac(ixgbe_t *);
43 43 static int ixgbe_identify_hardware(ixgbe_t *);
44 44 static int ixgbe_regs_map(ixgbe_t *);
45 45 static void ixgbe_init_properties(ixgbe_t *);
46 46 static int ixgbe_init_driver_settings(ixgbe_t *);
47 47 static void ixgbe_init_locks(ixgbe_t *);
48 48 static void ixgbe_destroy_locks(ixgbe_t *);
49 49 static int ixgbe_init(ixgbe_t *);
50 50 static int ixgbe_chip_start(ixgbe_t *);
51 51 static void ixgbe_chip_stop(ixgbe_t *);
52 52 static int ixgbe_reset(ixgbe_t *);
53 53 static void ixgbe_tx_clean(ixgbe_t *);
54 54 static boolean_t ixgbe_tx_drain(ixgbe_t *);
55 55 static boolean_t ixgbe_rx_drain(ixgbe_t *);
56 56 static int ixgbe_alloc_rings(ixgbe_t *);
57 57 static void ixgbe_free_rings(ixgbe_t *);
58 58 static int ixgbe_alloc_rx_data(ixgbe_t *);
59 59 static void ixgbe_free_rx_data(ixgbe_t *);
60 60 static void ixgbe_setup_rings(ixgbe_t *);
61 61 static void ixgbe_setup_rx(ixgbe_t *);
62 62 static void ixgbe_setup_tx(ixgbe_t *);
63 63 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *);
64 64 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *);
65 65 static void ixgbe_setup_rss(ixgbe_t *);
66 66 static void ixgbe_setup_vmdq(ixgbe_t *);
67 67 static void ixgbe_setup_vmdq_rss(ixgbe_t *);
68 68 static void ixgbe_setup_rss_table(ixgbe_t *);
69 69 static void ixgbe_init_unicst(ixgbe_t *);
70 70 static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *);
71 71 static void ixgbe_setup_multicst(ixgbe_t *);
72 72 static void ixgbe_get_hw_state(ixgbe_t *);
73 73 static void ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe);
74 74 static void ixgbe_get_conf(ixgbe_t *);
75 75 static void ixgbe_init_params(ixgbe_t *);
76 76 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int);
77 77 static void ixgbe_driver_link_check(ixgbe_t *);
78 78 static void ixgbe_sfp_check(void *);
79 79 static void ixgbe_overtemp_check(void *);
80 80 static void ixgbe_phy_check(void *);
81 81 static void ixgbe_link_timer(void *);
82 82 static void ixgbe_local_timer(void *);
83 83 static void ixgbe_arm_watchdog_timer(ixgbe_t *);
84 84 static void ixgbe_restart_watchdog_timer(ixgbe_t *);
85 85 static void ixgbe_disable_adapter_interrupts(ixgbe_t *);
86 86 static void ixgbe_enable_adapter_interrupts(ixgbe_t *);
87 87 static boolean_t is_valid_mac_addr(uint8_t *);
88 88 static boolean_t ixgbe_stall_check(ixgbe_t *);
89 89 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t);
90 90 static void ixgbe_set_internal_mac_loopback(ixgbe_t *);
91 91 static boolean_t ixgbe_find_mac_address(ixgbe_t *);
92 92 static int ixgbe_alloc_intrs(ixgbe_t *);
93 93 static int ixgbe_alloc_intr_handles(ixgbe_t *, int);
94 94 static int ixgbe_add_intr_handlers(ixgbe_t *);
95 95 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int);
96 96 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int);
97 97 static void ixgbe_setup_ivar(ixgbe_t *, uint16_t, uint8_t, int8_t);
98 98 static void ixgbe_enable_ivar(ixgbe_t *, uint16_t, int8_t);
99 99 static void ixgbe_disable_ivar(ixgbe_t *, uint16_t, int8_t);
100 100 static uint32_t ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index);
101 101 static int ixgbe_map_intrs_to_vectors(ixgbe_t *);
102 102 static void ixgbe_setup_adapter_vector(ixgbe_t *);
103 103 static void ixgbe_rem_intr_handlers(ixgbe_t *);
104 104 static void ixgbe_rem_intrs(ixgbe_t *);
105 105 static int ixgbe_enable_intrs(ixgbe_t *);
106 106 static int ixgbe_disable_intrs(ixgbe_t *);
107 107 static uint_t ixgbe_intr_legacy(void *, void *);
108 108 static uint_t ixgbe_intr_msi(void *, void *);
109 109 static uint_t ixgbe_intr_msix(void *, void *);
110 110 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *);
111 111 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *);
112 112 static void ixgbe_intr_other_work(ixgbe_t *, uint32_t);
113 113 static void ixgbe_get_driver_control(struct ixgbe_hw *);
114 114 static int ixgbe_addmac(void *, const uint8_t *);
115 115 static int ixgbe_remmac(void *, const uint8_t *);
116 116 static void ixgbe_release_driver_control(struct ixgbe_hw *);
117 117
118 118 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t);
119 119 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t);
120 120 static int ixgbe_resume(dev_info_t *);
121 121 static int ixgbe_suspend(dev_info_t *);
122 122 static int ixgbe_quiesce(dev_info_t *);
123 123 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *);
124 124 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
125 125 static int ixgbe_cbfunc(dev_info_t *, ddi_cb_action_t, void *, void *, void *);
126 126 static int ixgbe_intr_cb_register(ixgbe_t *);
127 127 static int ixgbe_intr_adjust(ixgbe_t *, ddi_cb_action_t, int);
128 128
129 129 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
130 130 const void *impl_data);
131 131 static void ixgbe_fm_init(ixgbe_t *);
132 132 static void ixgbe_fm_fini(ixgbe_t *);
133 133
134 134 char *ixgbe_priv_props[] = {
135 135 "_tx_copy_thresh",
136 136 "_tx_recycle_thresh",
137 137 "_tx_overload_thresh",
138 138 "_tx_resched_thresh",
139 139 "_rx_copy_thresh",
140 140 "_rx_limit_per_intr",
141 141 "_intr_throttling",
142 142 "_adv_pause_cap",
143 143 "_adv_asym_pause_cap",
144 144 NULL
145 145 };
146 146
147 147 #define IXGBE_MAX_PRIV_PROPS \
148 148 (sizeof (ixgbe_priv_props) / sizeof (mac_priv_prop_t))
149 149
150 150 static struct cb_ops ixgbe_cb_ops = {
151 151 nulldev, /* cb_open */
152 152 nulldev, /* cb_close */
153 153 nodev, /* cb_strategy */
154 154 nodev, /* cb_print */
155 155 nodev, /* cb_dump */
156 156 nodev, /* cb_read */
157 157 nodev, /* cb_write */
158 158 nodev, /* cb_ioctl */
159 159 nodev, /* cb_devmap */
160 160 nodev, /* cb_mmap */
161 161 nodev, /* cb_segmap */
162 162 nochpoll, /* cb_chpoll */
163 163 ddi_prop_op, /* cb_prop_op */
164 164 NULL, /* cb_stream */
165 165 D_MP | D_HOTPLUG, /* cb_flag */
166 166 CB_REV, /* cb_rev */
167 167 nodev, /* cb_aread */
168 168 nodev /* cb_awrite */
169 169 };
170 170
171 171 static struct dev_ops ixgbe_dev_ops = {
172 172 DEVO_REV, /* devo_rev */
173 173 0, /* devo_refcnt */
174 174 NULL, /* devo_getinfo */
175 175 nulldev, /* devo_identify */
176 176 nulldev, /* devo_probe */
177 177 ixgbe_attach, /* devo_attach */
178 178 ixgbe_detach, /* devo_detach */
179 179 nodev, /* devo_reset */
180 180 &ixgbe_cb_ops, /* devo_cb_ops */
181 181 NULL, /* devo_bus_ops */
182 182 ddi_power, /* devo_power */
183 183 ixgbe_quiesce, /* devo_quiesce */
184 184 };
185 185
186 186 static struct modldrv ixgbe_modldrv = {
187 187 &mod_driverops, /* Type of module. This one is a driver */
188 188 ixgbe_ident, /* Discription string */
189 189 &ixgbe_dev_ops /* driver ops */
190 190 };
191 191
192 192 static struct modlinkage ixgbe_modlinkage = {
193 193 MODREV_1, &ixgbe_modldrv, NULL
194 194 };
195 195
196 196 /*
197 197 * Access attributes for register mapping
198 198 */
199 199 ddi_device_acc_attr_t ixgbe_regs_acc_attr = {
200 200 DDI_DEVICE_ATTR_V1,
201 201 DDI_STRUCTURE_LE_ACC,
202 202 DDI_STRICTORDER_ACC,
203 203 DDI_FLAGERR_ACC
204 204 };
205 205
206 206 /*
207 207 * Loopback property
208 208 */
209 209 static lb_property_t lb_normal = {
210 210 normal, "normal", IXGBE_LB_NONE
211 211 };
212 212
213 213 static lb_property_t lb_mac = {
214 214 internal, "MAC", IXGBE_LB_INTERNAL_MAC
215 215 };
216 216
217 217 static lb_property_t lb_external = {
218 218 external, "External", IXGBE_LB_EXTERNAL
219 219 };
220 220
221 221 #define IXGBE_M_CALLBACK_FLAGS \
222 222 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
223 223
224 224 static mac_callbacks_t ixgbe_m_callbacks = {
225 225 IXGBE_M_CALLBACK_FLAGS,
226 226 ixgbe_m_stat,
227 227 ixgbe_m_start,
228 228 ixgbe_m_stop,
229 229 ixgbe_m_promisc,
230 230 ixgbe_m_multicst,
231 231 NULL,
232 232 NULL,
233 233 NULL,
234 234 ixgbe_m_ioctl,
235 235 ixgbe_m_getcapab,
236 236 NULL,
237 237 NULL,
238 238 ixgbe_m_setprop,
239 239 ixgbe_m_getprop,
240 240 ixgbe_m_propinfo
241 241 };
242 242
243 243 /*
244 244 * Initialize capabilities of each supported adapter type
245 245 */
246 246 static adapter_info_t ixgbe_82598eb_cap = {
247 247 64, /* maximum number of rx queues */
248 248 1, /* minimum number of rx queues */
249 249 64, /* default number of rx queues */
250 250 16, /* maximum number of rx groups */
251 251 1, /* minimum number of rx groups */
252 252 1, /* default number of rx groups */
253 253 32, /* maximum number of tx queues */
254 254 1, /* minimum number of tx queues */
255 255 8, /* default number of tx queues */
256 256 16366, /* maximum MTU size */
257 257 0xFFFF, /* maximum interrupt throttle rate */
258 258 0, /* minimum interrupt throttle rate */
259 259 200, /* default interrupt throttle rate */
260 260 18, /* maximum total msix vectors */
261 261 16, /* maximum number of ring vectors */
262 262 2, /* maximum number of other vectors */
263 263 IXGBE_EICR_LSC, /* "other" interrupt types handled */
264 264 0, /* "other" interrupt types enable mask */
265 265 (IXGBE_FLAG_DCA_CAPABLE /* capability flags */
266 266 | IXGBE_FLAG_RSS_CAPABLE
267 267 | IXGBE_FLAG_VMDQ_CAPABLE)
268 268 };
269 269
270 270 static adapter_info_t ixgbe_82599eb_cap = {
271 271 128, /* maximum number of rx queues */
272 272 1, /* minimum number of rx queues */
273 273 128, /* default number of rx queues */
274 274 64, /* maximum number of rx groups */
275 275 1, /* minimum number of rx groups */
276 276 1, /* default number of rx groups */
277 277 128, /* maximum number of tx queues */
278 278 1, /* minimum number of tx queues */
279 279 8, /* default number of tx queues */
280 280 15500, /* maximum MTU size */
281 281 0xFF8, /* maximum interrupt throttle rate */
282 282 0, /* minimum interrupt throttle rate */
283 283 200, /* default interrupt throttle rate */
284 284 64, /* maximum total msix vectors */
285 285 16, /* maximum number of ring vectors */
286 286 2, /* maximum number of other vectors */
287 287 (IXGBE_EICR_LSC
288 288 | IXGBE_EICR_GPI_SDP1
289 289 | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */
290 290
291 291 (IXGBE_SDP1_GPIEN
292 292 | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */
293 293
294 294 (IXGBE_FLAG_DCA_CAPABLE
295 295 | IXGBE_FLAG_RSS_CAPABLE
296 296 | IXGBE_FLAG_VMDQ_CAPABLE
297 297 | IXGBE_FLAG_RSC_CAPABLE
298 298 | IXGBE_FLAG_SFP_PLUG_CAPABLE) /* capability flags */
299 299 };
300 300
301 301 static adapter_info_t ixgbe_X540_cap = {
302 302 128, /* maximum number of rx queues */
303 303 1, /* minimum number of rx queues */
304 304 128, /* default number of rx queues */
305 305 64, /* maximum number of rx groups */
306 306 1, /* minimum number of rx groups */
307 307 1, /* default number of rx groups */
308 308 128, /* maximum number of tx queues */
309 309 1, /* minimum number of tx queues */
310 310 8, /* default number of tx queues */
311 311 15500, /* maximum MTU size */
312 312 0xFF8, /* maximum interrupt throttle rate */
313 313 0, /* minimum interrupt throttle rate */
314 314 200, /* default interrupt throttle rate */
315 315 64, /* maximum total msix vectors */
316 316 16, /* maximum number of ring vectors */
317 317 2, /* maximum number of other vectors */
318 318 (IXGBE_EICR_LSC
319 319 | IXGBE_EICR_GPI_SDP1_X540
320 320 | IXGBE_EICR_GPI_SDP2_X540), /* "other" interrupt types handled */
321 321
322 322 (IXGBE_SDP1_GPIEN_X540
323 323 | IXGBE_SDP2_GPIEN_X540), /* "other" interrupt types enable mask */
324 324
325 325 (IXGBE_FLAG_DCA_CAPABLE
326 326 | IXGBE_FLAG_RSS_CAPABLE
327 327 | IXGBE_FLAG_VMDQ_CAPABLE
328 328 | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */
329 329 };
330 330
331 331 static adapter_info_t ixgbe_X550_cap = {
332 332 128, /* maximum number of rx queues */
333 333 1, /* minimum number of rx queues */
334 334 128, /* default number of rx queues */
335 335 64, /* maximum number of rx groups */
336 336 1, /* minimum number of rx groups */
337 337 1, /* default number of rx groups */
338 338 128, /* maximum number of tx queues */
339 339 1, /* minimum number of tx queues */
340 340 8, /* default number of tx queues */
341 341 15500, /* maximum MTU size */
342 342 0xFF8, /* maximum interrupt throttle rate */
343 343 0, /* minimum interrupt throttle rate */
344 344 0x200, /* default interrupt throttle rate */
345 345 64, /* maximum total msix vectors */
346 346 16, /* maximum number of ring vectors */
347 347 2, /* maximum number of other vectors */
348 348 IXGBE_EICR_LSC, /* "other" interrupt types handled */
349 349 0, /* "other" interrupt types enable mask */
350 350 (IXGBE_FLAG_RSS_CAPABLE
351 351 | IXGBE_FLAG_VMDQ_CAPABLE
352 352 | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */
353 353 };
354 354
355 355 /*
356 356 * Module Initialization Functions.
357 357 */
358 358
359 359 int
360 360 _init(void)
361 361 {
362 362 int status;
363 363
364 364 mac_init_ops(&ixgbe_dev_ops, MODULE_NAME);
365 365
366 366 status = mod_install(&ixgbe_modlinkage);
367 367
368 368 if (status != DDI_SUCCESS) {
369 369 mac_fini_ops(&ixgbe_dev_ops);
370 370 }
371 371
372 372 return (status);
373 373 }
374 374
375 375 int
376 376 _fini(void)
377 377 {
378 378 int status;
379 379
380 380 status = mod_remove(&ixgbe_modlinkage);
381 381
382 382 if (status == DDI_SUCCESS) {
383 383 mac_fini_ops(&ixgbe_dev_ops);
384 384 }
385 385
386 386 return (status);
387 387 }
388 388
389 389 int
390 390 _info(struct modinfo *modinfop)
391 391 {
392 392 int status;
393 393
394 394 status = mod_info(&ixgbe_modlinkage, modinfop);
395 395
396 396 return (status);
397 397 }
398 398
399 399 /*
400 400 * ixgbe_attach - Driver attach.
401 401 *
402 402 * This function is the device specific initialization entry
403 403 * point. This entry point is required and must be written.
404 404 * The DDI_ATTACH command must be provided in the attach entry
405 405 * point. When attach() is called with cmd set to DDI_ATTACH,
406 406 * all normal kernel services (such as kmem_alloc(9F)) are
407 407 * available for use by the driver.
408 408 *
409 409 * The attach() function will be called once for each instance
410 410 * of the device on the system with cmd set to DDI_ATTACH.
411 411 * Until attach() succeeds, the only driver entry points which
412 412 * may be called are open(9E) and getinfo(9E).
413 413 */
414 414 static int
415 415 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
416 416 {
417 417 ixgbe_t *ixgbe;
418 418 struct ixgbe_osdep *osdep;
419 419 struct ixgbe_hw *hw;
420 420 int instance;
421 421 char taskqname[32];
422 422
423 423 /*
424 424 * Check the command and perform corresponding operations
425 425 */
426 426 switch (cmd) {
427 427 default:
428 428 return (DDI_FAILURE);
429 429
430 430 case DDI_RESUME:
431 431 return (ixgbe_resume(devinfo));
432 432
433 433 case DDI_ATTACH:
434 434 break;
435 435 }
436 436
437 437 /* Get the device instance */
438 438 instance = ddi_get_instance(devinfo);
439 439
440 440 /* Allocate memory for the instance data structure */
441 441 ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP);
442 442
443 443 ixgbe->dip = devinfo;
444 444 ixgbe->instance = instance;
445 445
446 446 hw = &ixgbe->hw;
447 447 osdep = &ixgbe->osdep;
448 448 hw->back = osdep;
449 449 osdep->ixgbe = ixgbe;
450 450
451 451 /* Attach the instance pointer to the dev_info data structure */
452 452 ddi_set_driver_private(devinfo, ixgbe);
453 453
454 454 /*
455 455 * Initialize for FMA support
456 456 */
457 457 ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, PROP_FM_CAPABLE,
458 458 0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
459 459 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
460 460 ixgbe_fm_init(ixgbe);
461 461 ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT;
462 462
463 463 /*
464 464 * Map PCI config space registers
465 465 */
466 466 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
467 467 ixgbe_error(ixgbe, "Failed to map PCI configurations");
468 468 goto attach_fail;
469 469 }
470 470 ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
471 471
472 472 /*
473 473 * Identify the chipset family
474 474 */
475 475 if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) {
476 476 ixgbe_error(ixgbe, "Failed to identify hardware");
477 477 goto attach_fail;
478 478 }
479 479
480 480 /*
481 481 * Map device registers
482 482 */
483 483 if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) {
484 484 ixgbe_error(ixgbe, "Failed to map device registers");
485 485 goto attach_fail;
|
↓ open down ↓ |
446 lines elided |
↑ open up ↑ |
486 486 }
487 487 ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
488 488
489 489 /*
490 490 * Initialize driver parameters
491 491 */
492 492 ixgbe_init_properties(ixgbe);
493 493 ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS;
494 494
495 495 /*
496 - * Register interrupt callback
497 - */
498 - if (ixgbe_intr_cb_register(ixgbe) != IXGBE_SUCCESS) {
499 - ixgbe_error(ixgbe, "Failed to register interrupt callback");
500 - goto attach_fail;
501 - }
502 -
503 - /*
504 496 * Allocate interrupts
505 497 */
506 498 if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) {
507 499 ixgbe_error(ixgbe, "Failed to allocate interrupts");
508 500 goto attach_fail;
509 501 }
510 502 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
511 503
512 504 /*
513 505 * Allocate rx/tx rings based on the ring numbers.
514 506 * The actual numbers of rx/tx rings are decided by the number of
515 507 * allocated interrupt vectors, so we should allocate the rings after
516 508 * interrupts are allocated.
517 509 */
518 510 if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) {
519 511 ixgbe_error(ixgbe, "Failed to allocate rx and tx rings");
520 512 goto attach_fail;
521 513 }
522 514 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS;
523 515
524 516 /*
525 517 * Map rings to interrupt vectors
526 518 */
527 519 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) {
528 520 ixgbe_error(ixgbe, "Failed to map interrupts to vectors");
529 521 goto attach_fail;
530 522 }
531 523
532 524 /*
533 525 * Add interrupt handlers
534 526 */
535 527 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
536 528 ixgbe_error(ixgbe, "Failed to add interrupt handlers");
537 529 goto attach_fail;
538 530 }
539 531 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
540 532
541 533 /*
542 534 * Create a taskq for sfp-change
543 535 */
544 536 (void) sprintf(taskqname, "ixgbe%d_sfp_taskq", instance);
545 537 if ((ixgbe->sfp_taskq = ddi_taskq_create(devinfo, taskqname,
546 538 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
547 539 ixgbe_error(ixgbe, "sfp_taskq create failed");
548 540 goto attach_fail;
549 541 }
550 542 ixgbe->attach_progress |= ATTACH_PROGRESS_SFP_TASKQ;
551 543
552 544 /*
553 545 * Create a taskq for over-temp
554 546 */
555 547 (void) sprintf(taskqname, "ixgbe%d_overtemp_taskq", instance);
556 548 if ((ixgbe->overtemp_taskq = ddi_taskq_create(devinfo, taskqname,
557 549 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
558 550 ixgbe_error(ixgbe, "overtemp_taskq create failed");
559 551 goto attach_fail;
560 552 }
561 553 ixgbe->attach_progress |= ATTACH_PROGRESS_OVERTEMP_TASKQ;
562 554
563 555 /*
564 556 * Create a taskq for processing external PHY interrupts
565 557 */
566 558 (void) sprintf(taskqname, "ixgbe%d_phy_taskq", instance);
567 559 if ((ixgbe->phy_taskq = ddi_taskq_create(devinfo, taskqname,
568 560 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
569 561 ixgbe_error(ixgbe, "phy_taskq create failed");
570 562 goto attach_fail;
571 563 }
572 564 ixgbe->attach_progress |= ATTACH_PROGRESS_PHY_TASKQ;
573 565
574 566 /*
575 567 * Initialize driver parameters
576 568 */
577 569 if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) {
578 570 ixgbe_error(ixgbe, "Failed to initialize driver settings");
579 571 goto attach_fail;
580 572 }
581 573
582 574 /*
583 575 * Initialize mutexes for this device.
584 576 * Do this before enabling the interrupt handler and
585 577 * register the softint to avoid the condition where
586 578 * interrupt handler can try using uninitialized mutex.
587 579 */
588 580 ixgbe_init_locks(ixgbe);
589 581 ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS;
590 582
591 583 /*
592 584 * Initialize chipset hardware
593 585 */
594 586 if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) {
595 587 ixgbe_error(ixgbe, "Failed to initialize adapter");
596 588 goto attach_fail;
597 589 }
598 590 ixgbe->link_check_complete = B_FALSE;
599 591 ixgbe->link_check_hrtime = gethrtime() +
600 592 (IXGBE_LINK_UP_TIME * 100000000ULL);
601 593 ixgbe->attach_progress |= ATTACH_PROGRESS_INIT;
602 594
603 595 if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) {
604 596 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
605 597 goto attach_fail;
606 598 }
607 599
608 600 /*
609 601 * Initialize adapter capabilities
610 602 */
611 603 ixgbe_init_params(ixgbe);
612 604
613 605 /*
614 606 * Initialize statistics
615 607 */
616 608 if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) {
617 609 ixgbe_error(ixgbe, "Failed to initialize statistics");
618 610 goto attach_fail;
619 611 }
620 612 ixgbe->attach_progress |= ATTACH_PROGRESS_STATS;
621 613
622 614 /*
623 615 * Register the driver to the MAC
624 616 */
625 617 if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) {
626 618 ixgbe_error(ixgbe, "Failed to register MAC");
627 619 goto attach_fail;
628 620 }
629 621 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN);
630 622 ixgbe->attach_progress |= ATTACH_PROGRESS_MAC;
631 623
632 624 ixgbe->periodic_id = ddi_periodic_add(ixgbe_link_timer, ixgbe,
633 625 IXGBE_CYCLIC_PERIOD, DDI_IPL_0);
634 626 if (ixgbe->periodic_id == 0) {
635 627 ixgbe_error(ixgbe, "Failed to add the link check timer");
636 628 goto attach_fail;
637 629 }
638 630 ixgbe->attach_progress |= ATTACH_PROGRESS_LINK_TIMER;
639 631
640 632 /*
641 633 * Now that mutex locks are initialized, and the chip is also
642 634 * initialized, enable interrupts.
|
↓ open down ↓ |
129 lines elided |
↑ open up ↑ |
643 635 */
644 636 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
645 637 ixgbe_error(ixgbe, "Failed to enable DDI interrupts");
646 638 goto attach_fail;
647 639 }
648 640 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
649 641
650 642 ixgbe_log(ixgbe, "%s", ixgbe_ident);
651 643 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_INITIALIZED);
652 644
645 + /*
646 + * Register interrupt callback
647 + */
648 + if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX)
649 + if (ixgbe_intr_cb_register(ixgbe) != IXGBE_SUCCESS) {
650 + ixgbe_error(ixgbe,
651 + "Failed to register interrupt callback");
652 + }
653 +
653 654 return (DDI_SUCCESS);
654 655
655 656 attach_fail:
656 657 ixgbe_unconfigure(devinfo, ixgbe);
657 658 return (DDI_FAILURE);
658 659 }
659 660
660 661 /*
661 662 * ixgbe_detach - Driver detach.
662 663 *
663 664 * The detach() function is the complement of the attach routine.
664 665 * If cmd is set to DDI_DETACH, detach() is used to remove the
665 666 * state associated with a given instance of a device node
666 667 * prior to the removal of that instance from the system.
667 668 *
668 669 * The detach() function will be called once for each instance
669 670 * of the device for which there has been a successful attach()
670 671 * once there are no longer any opens on the device.
671 672 *
672 673 * Interrupts routine are disabled, All memory allocated by this
673 674 * driver are freed.
674 675 */
675 676 static int
676 677 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
677 678 {
678 679 ixgbe_t *ixgbe;
679 680
680 681 /*
681 682 * Check detach command
682 683 */
683 684 switch (cmd) {
684 685 default:
685 686 return (DDI_FAILURE);
686 687
687 688 case DDI_SUSPEND:
688 689 return (ixgbe_suspend(devinfo));
689 690
690 691 case DDI_DETACH:
691 692 break;
692 693 }
693 694
694 695 /*
695 696 * Get the pointer to the driver private data structure
696 697 */
697 698 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
698 699 if (ixgbe == NULL)
699 700 return (DDI_FAILURE);
700 701
701 702 /*
702 703 * If the device is still running, it needs to be stopped first.
703 704 * This check is necessary because under some specific circumstances,
704 705 * the detach routine can be called without stopping the interface
705 706 * first.
706 707 */
707 708 if (ixgbe->ixgbe_state & IXGBE_STARTED) {
708 709 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED);
709 710 mutex_enter(&ixgbe->gen_lock);
710 711 ixgbe_stop(ixgbe, B_TRUE);
711 712 mutex_exit(&ixgbe->gen_lock);
712 713 /* Disable and stop the watchdog timer */
713 714 ixgbe_disable_watchdog_timer(ixgbe);
714 715 }
715 716
716 717 /*
717 718 * Check if there are still rx buffers held by the upper layer.
718 719 * If so, fail the detach.
719 720 */
720 721 if (!ixgbe_rx_drain(ixgbe))
721 722 return (DDI_FAILURE);
722 723
723 724 /*
724 725 * Do the remaining unconfigure routines
725 726 */
726 727 ixgbe_unconfigure(devinfo, ixgbe);
727 728
728 729 return (DDI_SUCCESS);
729 730 }
730 731
731 732 /*
732 733 * quiesce(9E) entry point.
733 734 *
734 735 * This function is called when the system is single-threaded at high
735 736 * PIL with preemption disabled. Therefore, this function must not be
736 737 * blocked.
737 738 *
738 739 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
739 740 * DDI_FAILURE indicates an error condition and should almost never happen.
740 741 */
741 742 static int
742 743 ixgbe_quiesce(dev_info_t *devinfo)
743 744 {
744 745 ixgbe_t *ixgbe;
745 746 struct ixgbe_hw *hw;
746 747
747 748 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
748 749
749 750 if (ixgbe == NULL)
750 751 return (DDI_FAILURE);
751 752
752 753 hw = &ixgbe->hw;
753 754
754 755 /*
755 756 * Disable the adapter interrupts
756 757 */
757 758 ixgbe_disable_adapter_interrupts(ixgbe);
758 759
759 760 /*
760 761 * Tell firmware driver is no longer in control
761 762 */
762 763 ixgbe_release_driver_control(hw);
763 764
764 765 /*
765 766 * Reset the chipset
766 767 */
767 768 (void) ixgbe_reset_hw(hw);
768 769
769 770 /*
770 771 * Reset PHY
|
↓ open down ↓ |
108 lines elided |
↑ open up ↑ |
771 772 */
772 773 (void) ixgbe_reset_phy(hw);
773 774
774 775 return (DDI_SUCCESS);
775 776 }
776 777
777 778 static void
778 779 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe)
779 780 {
780 781 /*
782 + * Unregister interrupt callback handler
783 + */
784 + if (ixgbe->cb_hdl != NULL)
785 + (void) ddi_cb_unregister(ixgbe->cb_hdl);
786 +
787 + /*
781 788 * Disable interrupt
782 789 */
783 790 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
784 791 (void) ixgbe_disable_intrs(ixgbe);
785 792 }
786 793
787 794 /*
788 795 * remove the link check timer
789 796 */
790 797 if (ixgbe->attach_progress & ATTACH_PROGRESS_LINK_TIMER) {
791 798 if (ixgbe->periodic_id != NULL) {
792 799 ddi_periodic_delete(ixgbe->periodic_id);
793 800 ixgbe->periodic_id = NULL;
794 801 }
795 802 }
796 803
797 804 /*
798 805 * Unregister MAC
799 806 */
800 807 if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) {
801 808 (void) mac_unregister(ixgbe->mac_hdl);
802 809 }
803 810
804 811 /*
805 812 * Free statistics
806 813 */
807 814 if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) {
808 815 kstat_delete((kstat_t *)ixgbe->ixgbe_ks);
809 816 }
810 817
811 818 /*
812 819 * Remove interrupt handlers
813 820 */
814 821 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
815 822 ixgbe_rem_intr_handlers(ixgbe);
816 823 }
817 824
818 825 /*
819 826 * Remove taskq for sfp-status-change
820 827 */
821 828 if (ixgbe->attach_progress & ATTACH_PROGRESS_SFP_TASKQ) {
822 829 ddi_taskq_destroy(ixgbe->sfp_taskq);
823 830 }
824 831
825 832 /*
826 833 * Remove taskq for over-temp
827 834 */
828 835 if (ixgbe->attach_progress & ATTACH_PROGRESS_OVERTEMP_TASKQ) {
829 836 ddi_taskq_destroy(ixgbe->overtemp_taskq);
830 837 }
831 838
832 839 /*
833 840 * Remove taskq for external PHYs
834 841 */
835 842 if (ixgbe->attach_progress & ATTACH_PROGRESS_PHY_TASKQ) {
836 843 ddi_taskq_destroy(ixgbe->phy_taskq);
837 844 }
838 845
839 846 /*
840 847 * Remove interrupts
841 848 */
842 849 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) {
843 850 ixgbe_rem_intrs(ixgbe);
844 851 }
845 852
846 853 /*
847 854 * Unregister interrupt callback handler
848 855 */
849 856 if (ixgbe->cb_hdl != NULL) {
850 857 (void) ddi_cb_unregister(ixgbe->cb_hdl);
851 858 }
852 859
853 860 /*
854 861 * Remove driver properties
855 862 */
856 863 if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) {
857 864 (void) ddi_prop_remove_all(devinfo);
858 865 }
859 866
860 867 /*
861 868 * Stop the chipset
862 869 */
863 870 if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) {
864 871 mutex_enter(&ixgbe->gen_lock);
865 872 ixgbe_chip_stop(ixgbe);
866 873 mutex_exit(&ixgbe->gen_lock);
867 874 }
868 875
869 876 /*
870 877 * Free register handle
871 878 */
872 879 if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
873 880 if (ixgbe->osdep.reg_handle != NULL)
874 881 ddi_regs_map_free(&ixgbe->osdep.reg_handle);
875 882 }
876 883
877 884 /*
878 885 * Free PCI config handle
879 886 */
880 887 if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
881 888 if (ixgbe->osdep.cfg_handle != NULL)
882 889 pci_config_teardown(&ixgbe->osdep.cfg_handle);
883 890 }
884 891
885 892 /*
886 893 * Free locks
887 894 */
888 895 if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) {
889 896 ixgbe_destroy_locks(ixgbe);
890 897 }
891 898
892 899 /*
893 900 * Free the rx/tx rings
894 901 */
895 902 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) {
896 903 ixgbe_free_rings(ixgbe);
897 904 }
898 905
899 906 /*
900 907 * Unregister FMA capabilities
901 908 */
902 909 if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) {
903 910 ixgbe_fm_fini(ixgbe);
904 911 }
905 912
906 913 /*
907 914 * Free the driver data structure
908 915 */
909 916 kmem_free(ixgbe, sizeof (ixgbe_t));
910 917
911 918 ddi_set_driver_private(devinfo, NULL);
912 919 }
913 920
914 921 /*
915 922 * ixgbe_register_mac - Register the driver and its function pointers with
916 923 * the GLD interface.
917 924 */
918 925 static int
919 926 ixgbe_register_mac(ixgbe_t *ixgbe)
920 927 {
921 928 struct ixgbe_hw *hw = &ixgbe->hw;
922 929 mac_register_t *mac;
923 930 int status;
924 931
925 932 if ((mac = mac_alloc(MAC_VERSION)) == NULL)
926 933 return (IXGBE_FAILURE);
927 934
928 935 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
929 936 mac->m_driver = ixgbe;
930 937 mac->m_dip = ixgbe->dip;
931 938 mac->m_src_addr = hw->mac.addr;
932 939 mac->m_callbacks = &ixgbe_m_callbacks;
933 940 mac->m_min_sdu = 0;
934 941 mac->m_max_sdu = ixgbe->default_mtu;
935 942 mac->m_margin = VLAN_TAGSZ;
936 943 mac->m_priv_props = ixgbe_priv_props;
937 944 mac->m_v12n = MAC_VIRT_LEVEL1;
938 945
939 946 status = mac_register(mac, &ixgbe->mac_hdl);
940 947
941 948 mac_free(mac);
942 949
943 950 return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE);
944 951 }
945 952
946 953 /*
947 954 * ixgbe_identify_hardware - Identify the type of the chipset.
948 955 */
949 956 static int
950 957 ixgbe_identify_hardware(ixgbe_t *ixgbe)
951 958 {
952 959 struct ixgbe_hw *hw = &ixgbe->hw;
953 960 struct ixgbe_osdep *osdep = &ixgbe->osdep;
954 961
955 962 /*
956 963 * Get the device id
957 964 */
958 965 hw->vendor_id =
959 966 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
960 967 hw->device_id =
961 968 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
962 969 hw->revision_id =
963 970 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
964 971 hw->subsystem_device_id =
965 972 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
966 973 hw->subsystem_vendor_id =
967 974 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
968 975
969 976 /*
970 977 * Set the mac type of the adapter based on the device id
971 978 */
972 979 if (ixgbe_set_mac_type(hw) != IXGBE_SUCCESS) {
973 980 return (IXGBE_FAILURE);
974 981 }
975 982
976 983 /*
977 984 * Install adapter capabilities
978 985 */
979 986 switch (hw->mac.type) {
980 987 case ixgbe_mac_82598EB:
981 988 IXGBE_DEBUGLOG_0(ixgbe, "identify 82598 adapter\n");
982 989 ixgbe->capab = &ixgbe_82598eb_cap;
983 990
984 991 if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) {
985 992 ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
986 993 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1;
987 994 ixgbe->capab->other_gpie |= IXGBE_SDP1_GPIEN;
988 995 }
989 996 break;
990 997
991 998 case ixgbe_mac_82599EB:
992 999 IXGBE_DEBUGLOG_0(ixgbe, "identify 82599 adapter\n");
993 1000 ixgbe->capab = &ixgbe_82599eb_cap;
994 1001
995 1002 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) {
996 1003 ixgbe->capab->flags |= IXGBE_FLAG_TEMP_SENSOR_CAPABLE;
997 1004 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP0;
998 1005 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN;
999 1006 }
1000 1007 break;
1001 1008
1002 1009 case ixgbe_mac_X540:
1003 1010 IXGBE_DEBUGLOG_0(ixgbe, "identify X540 adapter\n");
1004 1011 ixgbe->capab = &ixgbe_X540_cap;
1005 1012 /*
1006 1013 * For now, X540 is all set in its capab structure.
1007 1014 * As other X540 variants show up, things can change here.
1008 1015 */
1009 1016 break;
1010 1017
1011 1018 case ixgbe_mac_X550:
1012 1019 case ixgbe_mac_X550EM_x:
1013 1020 IXGBE_DEBUGLOG_0(ixgbe, "identify X550 adapter\n");
1014 1021 ixgbe->capab = &ixgbe_X550_cap;
1015 1022
1016 1023 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
1017 1024 ixgbe->capab->flags |= IXGBE_FLAG_SFP_PLUG_CAPABLE;
1018 1025
1019 1026 /*
1020 1027 * Link detection on X552 SFP+ and X552/X557-AT
1021 1028 */
1022 1029 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
1023 1030 hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
1024 1031 ixgbe->capab->other_intr |=
1025 1032 IXGBE_EIMS_GPI_SDP0_BY_MAC(hw);
1026 1033 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN_X540;
1027 1034 }
1028 1035 break;
1029 1036
1030 1037 default:
1031 1038 IXGBE_DEBUGLOG_1(ixgbe,
1032 1039 "adapter not supported in ixgbe_identify_hardware(): %d\n",
1033 1040 hw->mac.type);
1034 1041 return (IXGBE_FAILURE);
1035 1042 }
1036 1043
1037 1044 return (IXGBE_SUCCESS);
1038 1045 }
1039 1046
1040 1047 /*
1041 1048 * ixgbe_regs_map - Map the device registers.
1042 1049 *
1043 1050 */
1044 1051 static int
1045 1052 ixgbe_regs_map(ixgbe_t *ixgbe)
1046 1053 {
1047 1054 dev_info_t *devinfo = ixgbe->dip;
1048 1055 struct ixgbe_hw *hw = &ixgbe->hw;
1049 1056 struct ixgbe_osdep *osdep = &ixgbe->osdep;
1050 1057 off_t mem_size;
1051 1058
1052 1059 /*
1053 1060 * First get the size of device registers to be mapped.
1054 1061 */
1055 1062 if (ddi_dev_regsize(devinfo, IXGBE_ADAPTER_REGSET, &mem_size)
1056 1063 != DDI_SUCCESS) {
1057 1064 return (IXGBE_FAILURE);
1058 1065 }
1059 1066
1060 1067 /*
1061 1068 * Call ddi_regs_map_setup() to map registers
1062 1069 */
1063 1070 if ((ddi_regs_map_setup(devinfo, IXGBE_ADAPTER_REGSET,
1064 1071 (caddr_t *)&hw->hw_addr, 0,
1065 1072 mem_size, &ixgbe_regs_acc_attr,
1066 1073 &osdep->reg_handle)) != DDI_SUCCESS) {
1067 1074 return (IXGBE_FAILURE);
1068 1075 }
1069 1076
1070 1077 return (IXGBE_SUCCESS);
1071 1078 }
1072 1079
1073 1080 /*
1074 1081 * ixgbe_init_properties - Initialize driver properties.
1075 1082 */
1076 1083 static void
1077 1084 ixgbe_init_properties(ixgbe_t *ixgbe)
1078 1085 {
1079 1086 /*
1080 1087 * Get conf file properties, including link settings
1081 1088 * jumbo frames, ring number, descriptor number, etc.
1082 1089 */
1083 1090 ixgbe_get_conf(ixgbe);
1084 1091 }
1085 1092
1086 1093 /*
1087 1094 * ixgbe_init_driver_settings - Initialize driver settings.
1088 1095 *
1089 1096 * The settings include hardware function pointers, bus information,
1090 1097 * rx/tx rings settings, link state, and any other parameters that
1091 1098 * need to be setup during driver initialization.
1092 1099 */
1093 1100 static int
1094 1101 ixgbe_init_driver_settings(ixgbe_t *ixgbe)
1095 1102 {
1096 1103 struct ixgbe_hw *hw = &ixgbe->hw;
1097 1104 dev_info_t *devinfo = ixgbe->dip;
1098 1105 ixgbe_rx_ring_t *rx_ring;
1099 1106 ixgbe_rx_group_t *rx_group;
1100 1107 ixgbe_tx_ring_t *tx_ring;
1101 1108 uint32_t rx_size;
1102 1109 uint32_t tx_size;
1103 1110 uint32_t ring_per_group;
1104 1111 int i;
1105 1112
1106 1113 /*
1107 1114 * Initialize chipset specific hardware function pointers
1108 1115 */
1109 1116 if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) {
1110 1117 return (IXGBE_FAILURE);
1111 1118 }
1112 1119
1113 1120 /*
1114 1121 * Get the system page size
1115 1122 */
1116 1123 ixgbe->sys_page_size = ddi_ptob(devinfo, (ulong_t)1);
1117 1124
1118 1125 /*
1119 1126 * Set rx buffer size
1120 1127 *
1121 1128 * The IP header alignment room is counted in the calculation.
1122 1129 * The rx buffer size is in unit of 1K that is required by the
1123 1130 * chipset hardware.
1124 1131 */
1125 1132 rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM;
1126 1133 ixgbe->rx_buf_size = ((rx_size >> 10) +
1127 1134 ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1128 1135
1129 1136 /*
1130 1137 * Set tx buffer size
1131 1138 */
1132 1139 tx_size = ixgbe->max_frame_size;
1133 1140 ixgbe->tx_buf_size = ((tx_size >> 10) +
1134 1141 ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
1135 1142
1136 1143 /*
1137 1144 * Initialize rx/tx rings/groups parameters
1138 1145 */
1139 1146 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
1140 1147 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1141 1148 rx_ring = &ixgbe->rx_rings[i];
1142 1149 rx_ring->index = i;
1143 1150 rx_ring->ixgbe = ixgbe;
1144 1151 rx_ring->group_index = i / ring_per_group;
1145 1152 rx_ring->hw_index = ixgbe_get_hw_rx_index(ixgbe, i);
1146 1153 }
1147 1154
1148 1155 for (i = 0; i < ixgbe->num_rx_groups; i++) {
1149 1156 rx_group = &ixgbe->rx_groups[i];
1150 1157 rx_group->index = i;
1151 1158 rx_group->ixgbe = ixgbe;
1152 1159 }
1153 1160
1154 1161 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1155 1162 tx_ring = &ixgbe->tx_rings[i];
1156 1163 tx_ring->index = i;
1157 1164 tx_ring->ixgbe = ixgbe;
1158 1165 if (ixgbe->tx_head_wb_enable)
1159 1166 tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb;
1160 1167 else
1161 1168 tx_ring->tx_recycle = ixgbe_tx_recycle_legacy;
1162 1169
1163 1170 tx_ring->ring_size = ixgbe->tx_ring_size;
1164 1171 tx_ring->free_list_size = ixgbe->tx_ring_size +
1165 1172 (ixgbe->tx_ring_size >> 1);
1166 1173 }
1167 1174
1168 1175 /*
1169 1176 * Initialize values of interrupt throttling rate
1170 1177 */
1171 1178 for (i = 1; i < MAX_INTR_VECTOR; i++)
1172 1179 ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0];
1173 1180
1174 1181 /*
1175 1182 * The initial link state should be "unknown"
1176 1183 */
1177 1184 ixgbe->link_state = LINK_STATE_UNKNOWN;
1178 1185
1179 1186 return (IXGBE_SUCCESS);
1180 1187 }
1181 1188
1182 1189 /*
1183 1190 * ixgbe_init_locks - Initialize locks.
1184 1191 */
1185 1192 static void
1186 1193 ixgbe_init_locks(ixgbe_t *ixgbe)
1187 1194 {
1188 1195 ixgbe_rx_ring_t *rx_ring;
1189 1196 ixgbe_tx_ring_t *tx_ring;
1190 1197 int i;
1191 1198
1192 1199 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1193 1200 rx_ring = &ixgbe->rx_rings[i];
1194 1201 mutex_init(&rx_ring->rx_lock, NULL,
1195 1202 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1196 1203 }
1197 1204
1198 1205 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1199 1206 tx_ring = &ixgbe->tx_rings[i];
1200 1207 mutex_init(&tx_ring->tx_lock, NULL,
1201 1208 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1202 1209 mutex_init(&tx_ring->recycle_lock, NULL,
1203 1210 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1204 1211 mutex_init(&tx_ring->tcb_head_lock, NULL,
1205 1212 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1206 1213 mutex_init(&tx_ring->tcb_tail_lock, NULL,
1207 1214 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1208 1215 }
1209 1216
1210 1217 mutex_init(&ixgbe->gen_lock, NULL,
1211 1218 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1212 1219
1213 1220 mutex_init(&ixgbe->watchdog_lock, NULL,
1214 1221 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1215 1222 }
1216 1223
1217 1224 /*
1218 1225 * ixgbe_destroy_locks - Destroy locks.
1219 1226 */
1220 1227 static void
1221 1228 ixgbe_destroy_locks(ixgbe_t *ixgbe)
1222 1229 {
1223 1230 ixgbe_rx_ring_t *rx_ring;
1224 1231 ixgbe_tx_ring_t *tx_ring;
1225 1232 int i;
1226 1233
1227 1234 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1228 1235 rx_ring = &ixgbe->rx_rings[i];
1229 1236 mutex_destroy(&rx_ring->rx_lock);
1230 1237 }
1231 1238
1232 1239 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1233 1240 tx_ring = &ixgbe->tx_rings[i];
|
↓ open down ↓ |
443 lines elided |
↑ open up ↑ |
1234 1241 mutex_destroy(&tx_ring->tx_lock);
1235 1242 mutex_destroy(&tx_ring->recycle_lock);
1236 1243 mutex_destroy(&tx_ring->tcb_head_lock);
1237 1244 mutex_destroy(&tx_ring->tcb_tail_lock);
1238 1245 }
1239 1246
1240 1247 mutex_destroy(&ixgbe->gen_lock);
1241 1248 mutex_destroy(&ixgbe->watchdog_lock);
1242 1249 }
1243 1250
1251 +/*
1252 + * We need to try and determine which LED index in hardware corresponds to the
1253 + * link/activity LED. This is the one that'll be overwritten when we perform
1254 + * GLDv3 LED activity.
1255 + */
1256 +static void
1257 +ixgbe_led_init(ixgbe_t *ixgbe)
1258 +{
1259 + uint32_t reg, i;
1260 + struct ixgbe_hw *hw = &ixgbe->hw;
1261 +
1262 + reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
1263 + for (i = 0; i < 4; i++) {
1264 + if (((reg >> IXGBE_LED_MODE_SHIFT(i)) &
1265 + IXGBE_LED_MODE_MASK_BASE) == IXGBE_LED_LINK_ACTIVE) {
1266 + ixgbe->ixgbe_led_index = i;
1267 + return;
1268 + }
1269 + }
1270 +
1271 + /*
1272 + * If we couldn't determine this, we use the default for various MACs
1273 + * based on information Intel has inserted into other drivers over the
1274 + * years. Note, when we have support for the X553 which should add the
1275 + * ixgbe_x550_em_a mac type, that should be at index 0.
1276 + */
1277 + switch (hw->mac.type) {
1278 + case ixgbe_mac_X550EM_x:
1279 + ixgbe->ixgbe_led_index = 1;
1280 + break;
1281 + default:
1282 + ixgbe->ixgbe_led_index = 2;
1283 + break;
1284 + }
1285 +}
1286 +
1244 1287 static int
1245 1288 ixgbe_resume(dev_info_t *devinfo)
1246 1289 {
1247 1290 ixgbe_t *ixgbe;
1248 1291 int i;
1249 1292
1250 1293 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1251 1294 if (ixgbe == NULL)
1252 1295 return (DDI_FAILURE);
1253 1296
1254 1297 mutex_enter(&ixgbe->gen_lock);
1255 1298
1256 1299 if (ixgbe->ixgbe_state & IXGBE_STARTED) {
1257 1300 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1258 1301 mutex_exit(&ixgbe->gen_lock);
1259 1302 return (DDI_FAILURE);
1260 1303 }
1261 1304
1262 1305 /*
1263 1306 * Enable and start the watchdog timer
1264 1307 */
1265 1308 ixgbe_enable_watchdog_timer(ixgbe);
1266 1309 }
1267 1310
1268 1311 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_SUSPENDED);
1269 1312
1270 1313 if (ixgbe->ixgbe_state & IXGBE_STARTED) {
1271 1314 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1272 1315 mac_tx_ring_update(ixgbe->mac_hdl,
1273 1316 ixgbe->tx_rings[i].ring_handle);
1274 1317 }
1275 1318 }
1276 1319
1277 1320 mutex_exit(&ixgbe->gen_lock);
1278 1321
1279 1322 return (DDI_SUCCESS);
1280 1323 }
1281 1324
1282 1325 static int
1283 1326 ixgbe_suspend(dev_info_t *devinfo)
1284 1327 {
1285 1328 ixgbe_t *ixgbe;
1286 1329
1287 1330 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1288 1331 if (ixgbe == NULL)
1289 1332 return (DDI_FAILURE);
1290 1333
1291 1334 mutex_enter(&ixgbe->gen_lock);
1292 1335
1293 1336 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_SUSPENDED);
1294 1337 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) {
1295 1338 mutex_exit(&ixgbe->gen_lock);
1296 1339 return (DDI_SUCCESS);
1297 1340 }
1298 1341 ixgbe_stop(ixgbe, B_FALSE);
1299 1342
1300 1343 mutex_exit(&ixgbe->gen_lock);
1301 1344
1302 1345 /*
1303 1346 * Disable and stop the watchdog timer
1304 1347 */
1305 1348 ixgbe_disable_watchdog_timer(ixgbe);
1306 1349
1307 1350 return (DDI_SUCCESS);
1308 1351 }
1309 1352
1310 1353 /*
1311 1354 * ixgbe_init - Initialize the device.
1312 1355 */
1313 1356 static int
1314 1357 ixgbe_init(ixgbe_t *ixgbe)
1315 1358 {
1316 1359 struct ixgbe_hw *hw = &ixgbe->hw;
1317 1360 u8 pbanum[IXGBE_PBANUM_LENGTH];
1318 1361 int rv;
1319 1362
1320 1363 mutex_enter(&ixgbe->gen_lock);
1321 1364
1322 1365 /*
1323 1366 * Configure/Initialize hardware
1324 1367 */
1325 1368 rv = ixgbe_init_hw(hw);
1326 1369 if (rv != IXGBE_SUCCESS) {
1327 1370 switch (rv) {
1328 1371
1329 1372 /*
1330 1373 * The first three errors are not prohibitive to us progressing
1331 1374 * further, and are maily advisory in nature. In the case of a
1332 1375 * SFP module not being present or not deemed supported by the
1333 1376 * common code, we adivse the operator of this fact but carry on
1334 1377 * instead of failing hard, as SFPs can be inserted or replaced
1335 1378 * while the driver is running. In the case of a unknown error,
1336 1379 * we fail-hard, logging the reason and emitting a FMA event.
1337 1380 */
1338 1381 case IXGBE_ERR_EEPROM_VERSION:
1339 1382 ixgbe_error(ixgbe,
1340 1383 "This Intel 10Gb Ethernet device is pre-release and"
1341 1384 " contains outdated firmware. Please contact your"
1342 1385 " hardware vendor for a replacement.");
1343 1386 break;
1344 1387 case IXGBE_ERR_SFP_NOT_PRESENT:
1345 1388 ixgbe_error(ixgbe,
1346 1389 "No SFP+ module detected on this interface. Please "
1347 1390 "install a supported SFP+ module for this "
1348 1391 "interface to become operational.");
1349 1392 break;
1350 1393 case IXGBE_ERR_SFP_NOT_SUPPORTED:
1351 1394 ixgbe_error(ixgbe,
1352 1395 "Unsupported SFP+ module detected. Please replace "
1353 1396 "it with a supported SFP+ module per Intel "
1354 1397 "documentation, or bypass this check with "
1355 1398 "allow_unsupported_sfp=1 in ixgbe.conf.");
1356 1399 break;
1357 1400 default:
1358 1401 ixgbe_error(ixgbe,
1359 1402 "Failed to initialize hardware. ixgbe_init_hw "
1360 1403 "returned %d", rv);
1361 1404 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1362 1405 goto init_fail;
1363 1406 }
1364 1407 }
1365 1408
1366 1409 /*
1367 1410 * Need to init eeprom before validating the checksum.
1368 1411 */
1369 1412 if (ixgbe_init_eeprom_params(hw) < 0) {
1370 1413 ixgbe_error(ixgbe,
1371 1414 "Unable to intitialize the eeprom interface.");
1372 1415 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1373 1416 goto init_fail;
1374 1417 }
1375 1418
1376 1419 /*
1377 1420 * NVM validation
1378 1421 */
1379 1422 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1380 1423 /*
1381 1424 * Some PCI-E parts fail the first check due to
1382 1425 * the link being in sleep state. Call it again,
1383 1426 * if it fails a second time it's a real issue.
1384 1427 */
1385 1428 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1386 1429 ixgbe_error(ixgbe,
1387 1430 "Invalid NVM checksum. Please contact "
|
↓ open down ↓ |
134 lines elided |
↑ open up ↑ |
1388 1431 "the vendor to update the NVM.");
1389 1432 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1390 1433 goto init_fail;
1391 1434 }
1392 1435 }
1393 1436
1394 1437 /*
1395 1438 * Setup default flow control thresholds - enable/disable
1396 1439 * & flow control type is controlled by ixgbe.conf
1397 1440 */
1398 - hw->fc.high_water[0] = DEFAULT_FCRTH;
1399 - hw->fc.low_water[0] = DEFAULT_FCRTL;
1441 + {
1442 + uint32_t rxpb, frame, size, hitmp, lotmp;
1443 +
1444 + frame = ixgbe->max_frame_size;
1445 +
1446 + /* Calculate High and Low Water */
1447 + if (hw->mac.type == ixgbe_mac_X540) {
1448 + hitmp = IXGBE_DV_X540(frame, frame);
1449 + lotmp = IXGBE_LOW_DV_X540(frame);
1450 + } else {
1451 + hitmp = IXGBE_DV(frame, frame);
1452 + lotmp = IXGBE_LOW_DV(frame);
1453 + }
1454 + size = IXGBE_BT2KB(hitmp);
1455 + rxpb = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) >> 10;
1456 + hw->fc.high_water[0] = rxpb - size;
1457 + hw->fc.low_water[0] = IXGBE_BT2KB(lotmp);
1458 + }
1459 +
1400 1460 hw->fc.pause_time = DEFAULT_FCPAUSE;
1401 1461 hw->fc.send_xon = B_TRUE;
1402 1462
1403 1463 /*
1404 1464 * Initialize flow control
1405 1465 */
1406 1466 (void) ixgbe_start_hw(hw);
1407 1467
1408 1468 /*
1409 1469 * Initialize link settings
1410 1470 */
1411 1471 (void) ixgbe_driver_setup_link(ixgbe, B_FALSE);
1412 1472
1413 1473 /*
1414 1474 * Initialize the chipset hardware
1415 1475 */
1416 1476 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1417 1477 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1418 1478 goto init_fail;
1419 1479 }
1420 1480
|
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
1421 1481 /*
1422 1482 * Read identifying information and place in devinfo.
1423 1483 */
1424 1484 pbanum[0] = '\0';
1425 1485 (void) ixgbe_read_pba_string(hw, pbanum, sizeof (pbanum));
1426 1486 if (*pbanum != '\0') {
1427 1487 (void) ddi_prop_update_string(DDI_DEV_T_NONE, ixgbe->dip,
1428 1488 "printed-board-assembly", (char *)pbanum);
1429 1489 }
1430 1490
1491 + /*
1492 + * Determine LED index.
1493 + */
1494 + ixgbe_led_init(ixgbe);
1495 +
1431 1496 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1432 1497 goto init_fail;
1433 1498 }
1434 1499
1435 1500 mutex_exit(&ixgbe->gen_lock);
1436 1501 return (IXGBE_SUCCESS);
1437 1502
1438 1503 init_fail:
1439 1504 /*
1440 1505 * Reset PHY
1441 1506 */
1442 1507 (void) ixgbe_reset_phy(hw);
1443 1508
1444 1509 mutex_exit(&ixgbe->gen_lock);
1445 1510 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1446 1511 return (IXGBE_FAILURE);
1447 1512 }
1448 1513
1449 1514 /*
1450 1515 * ixgbe_chip_start - Initialize and start the chipset hardware.
1451 1516 */
1452 1517 static int
1453 1518 ixgbe_chip_start(ixgbe_t *ixgbe)
1454 1519 {
1455 1520 struct ixgbe_hw *hw = &ixgbe->hw;
1456 1521 int i;
1457 1522
1458 1523 ASSERT(mutex_owned(&ixgbe->gen_lock));
1459 1524
1460 1525 /*
1461 1526 * Get the mac address
1462 1527 * This function should handle SPARC case correctly.
1463 1528 */
1464 1529 if (!ixgbe_find_mac_address(ixgbe)) {
1465 1530 ixgbe_error(ixgbe, "Failed to get the mac address");
1466 1531 return (IXGBE_FAILURE);
1467 1532 }
1468 1533
1469 1534 /*
1470 1535 * Validate the mac address
1471 1536 */
1472 1537 (void) ixgbe_init_rx_addrs(hw);
1473 1538 if (!is_valid_mac_addr(hw->mac.addr)) {
1474 1539 ixgbe_error(ixgbe, "Invalid mac address");
1475 1540 return (IXGBE_FAILURE);
1476 1541 }
1477 1542
1478 1543 /*
1479 1544 * Re-enable relaxed ordering for performance. It is disabled
1480 1545 * by default in the hardware init.
1481 1546 */
1482 1547 if (ixgbe->relax_order_enable == B_TRUE)
1483 1548 ixgbe_enable_relaxed_ordering(hw);
1484 1549
1485 1550 /*
1486 1551 * Setup adapter interrupt vectors
1487 1552 */
1488 1553 ixgbe_setup_adapter_vector(ixgbe);
1489 1554
1490 1555 /*
1491 1556 * Initialize unicast addresses.
1492 1557 */
1493 1558 ixgbe_init_unicst(ixgbe);
1494 1559
1495 1560 /*
1496 1561 * Setup and initialize the mctable structures.
1497 1562 */
1498 1563 ixgbe_setup_multicst(ixgbe);
1499 1564
1500 1565 /*
1501 1566 * Set interrupt throttling rate
1502 1567 */
1503 1568 for (i = 0; i < ixgbe->intr_cnt; i++) {
1504 1569 IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]);
1505 1570 }
1506 1571
1507 1572 /*
1508 1573 * Disable Wake-on-LAN
1509 1574 */
1510 1575 IXGBE_WRITE_REG(hw, IXGBE_WUC, 0);
1511 1576
1512 1577 /*
1513 1578 * Some adapters offer Energy Efficient Ethernet (EEE) support.
1514 1579 * Due to issues with EEE in e1000g/igb, we disable this by default
1515 1580 * as a precautionary measure.
1516 1581 *
1517 1582 * Currently, the only known adapter which supports EEE in the ixgbe
1518 1583 * line is 8086,15AB (IXGBE_DEV_ID_X550EM_X_KR), and only after the
1519 1584 * first revision of it, as well as any X550 with MAC type 6 (non-EM)
1520 1585 */
1521 1586 (void) ixgbe_setup_eee(hw, B_FALSE);
1522 1587
1523 1588 /*
1524 1589 * Turn on any present SFP Tx laser
1525 1590 */
1526 1591 ixgbe_enable_tx_laser(hw);
1527 1592
1528 1593 /*
1529 1594 * Power on the PHY
1530 1595 */
1531 1596 (void) ixgbe_set_phy_power(hw, B_TRUE);
1532 1597
1533 1598 /*
1534 1599 * Save the state of the PHY
1535 1600 */
1536 1601 ixgbe_get_hw_state(ixgbe);
1537 1602
1538 1603 /*
1539 1604 * Make sure driver has control
1540 1605 */
1541 1606 ixgbe_get_driver_control(hw);
1542 1607
1543 1608 return (IXGBE_SUCCESS);
1544 1609 }
1545 1610
1546 1611 /*
1547 1612 * ixgbe_chip_stop - Stop the chipset hardware
1548 1613 */
1549 1614 static void
1550 1615 ixgbe_chip_stop(ixgbe_t *ixgbe)
1551 1616 {
1552 1617 struct ixgbe_hw *hw = &ixgbe->hw;
1553 1618 int rv;
1554 1619
1555 1620 ASSERT(mutex_owned(&ixgbe->gen_lock));
1556 1621
1557 1622 /*
1558 1623 * Stop interupt generation and disable Tx unit
1559 1624 */
1560 1625 hw->adapter_stopped = B_FALSE;
1561 1626 (void) ixgbe_stop_adapter(hw);
1562 1627
1563 1628 /*
1564 1629 * Reset the chipset
1565 1630 */
1566 1631 (void) ixgbe_reset_hw(hw);
1567 1632
1568 1633 /*
1569 1634 * Reset PHY
1570 1635 */
1571 1636 (void) ixgbe_reset_phy(hw);
1572 1637
1573 1638 /*
1574 1639 * Enter LPLU (Low Power, Link Up) mode, if available. Avoid resetting
1575 1640 * the PHY while doing so. Else, just power down the PHY.
1576 1641 */
1577 1642 if (hw->phy.ops.enter_lplu != NULL) {
1578 1643 hw->phy.reset_disable = B_TRUE;
1579 1644 rv = hw->phy.ops.enter_lplu(hw);
1580 1645 if (rv != IXGBE_SUCCESS)
1581 1646 ixgbe_error(ixgbe, "Error while entering LPLU: %d", rv);
1582 1647 hw->phy.reset_disable = B_FALSE;
1583 1648 } else {
1584 1649 (void) ixgbe_set_phy_power(hw, B_FALSE);
1585 1650 }
1586 1651
1587 1652 /*
1588 1653 * Turn off any present SFP Tx laser
1589 1654 * Expected for health and safety reasons
1590 1655 */
1591 1656 ixgbe_disable_tx_laser(hw);
1592 1657
1593 1658 /*
1594 1659 * Tell firmware driver is no longer in control
1595 1660 */
1596 1661 ixgbe_release_driver_control(hw);
1597 1662
1598 1663 }
1599 1664
1600 1665 /*
1601 1666 * ixgbe_reset - Reset the chipset and re-start the driver.
1602 1667 *
1603 1668 * It involves stopping and re-starting the chipset,
1604 1669 * and re-configuring the rx/tx rings.
1605 1670 */
1606 1671 static int
1607 1672 ixgbe_reset(ixgbe_t *ixgbe)
1608 1673 {
1609 1674 int i;
1610 1675
1611 1676 /*
1612 1677 * Disable and stop the watchdog timer
1613 1678 */
1614 1679 ixgbe_disable_watchdog_timer(ixgbe);
1615 1680
1616 1681 mutex_enter(&ixgbe->gen_lock);
1617 1682
1618 1683 ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED);
1619 1684 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED);
1620 1685
1621 1686 ixgbe_stop(ixgbe, B_FALSE);
1622 1687
1623 1688 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1624 1689 mutex_exit(&ixgbe->gen_lock);
1625 1690 return (IXGBE_FAILURE);
1626 1691 }
1627 1692
1628 1693 /*
1629 1694 * After resetting, need to recheck the link status.
1630 1695 */
1631 1696 ixgbe->link_check_complete = B_FALSE;
1632 1697 ixgbe->link_check_hrtime = gethrtime() +
1633 1698 (IXGBE_LINK_UP_TIME * 100000000ULL);
1634 1699
1635 1700 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STARTED);
1636 1701
1637 1702 if (!(ixgbe->ixgbe_state & IXGBE_SUSPENDED)) {
1638 1703 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1639 1704 mac_tx_ring_update(ixgbe->mac_hdl,
1640 1705 ixgbe->tx_rings[i].ring_handle);
1641 1706 }
1642 1707 }
1643 1708
1644 1709 mutex_exit(&ixgbe->gen_lock);
1645 1710
1646 1711 /*
1647 1712 * Enable and start the watchdog timer
1648 1713 */
1649 1714 ixgbe_enable_watchdog_timer(ixgbe);
1650 1715
1651 1716 return (IXGBE_SUCCESS);
1652 1717 }
1653 1718
1654 1719 /*
1655 1720 * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources.
1656 1721 */
1657 1722 static void
1658 1723 ixgbe_tx_clean(ixgbe_t *ixgbe)
1659 1724 {
1660 1725 ixgbe_tx_ring_t *tx_ring;
1661 1726 tx_control_block_t *tcb;
1662 1727 link_list_t pending_list;
1663 1728 uint32_t desc_num;
1664 1729 int i, j;
1665 1730
1666 1731 LINK_LIST_INIT(&pending_list);
1667 1732
1668 1733 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1669 1734 tx_ring = &ixgbe->tx_rings[i];
1670 1735
1671 1736 mutex_enter(&tx_ring->recycle_lock);
1672 1737
1673 1738 /*
1674 1739 * Clean the pending tx data - the pending packets in the
1675 1740 * work_list that have no chances to be transmitted again.
1676 1741 *
1677 1742 * We must ensure the chipset is stopped or the link is down
1678 1743 * before cleaning the transmit packets.
1679 1744 */
1680 1745 desc_num = 0;
1681 1746 for (j = 0; j < tx_ring->ring_size; j++) {
1682 1747 tcb = tx_ring->work_list[j];
1683 1748 if (tcb != NULL) {
1684 1749 desc_num += tcb->desc_num;
1685 1750
1686 1751 tx_ring->work_list[j] = NULL;
1687 1752
1688 1753 ixgbe_free_tcb(tcb);
1689 1754
1690 1755 LIST_PUSH_TAIL(&pending_list, &tcb->link);
1691 1756 }
1692 1757 }
1693 1758
1694 1759 if (desc_num > 0) {
1695 1760 atomic_add_32(&tx_ring->tbd_free, desc_num);
1696 1761 ASSERT(tx_ring->tbd_free == tx_ring->ring_size);
1697 1762
1698 1763 /*
1699 1764 * Reset the head and tail pointers of the tbd ring;
1700 1765 * Reset the writeback head if it's enable.
1701 1766 */
1702 1767 tx_ring->tbd_head = 0;
1703 1768 tx_ring->tbd_tail = 0;
1704 1769 if (ixgbe->tx_head_wb_enable)
1705 1770 *tx_ring->tbd_head_wb = 0;
1706 1771
1707 1772 IXGBE_WRITE_REG(&ixgbe->hw,
1708 1773 IXGBE_TDH(tx_ring->index), 0);
1709 1774 IXGBE_WRITE_REG(&ixgbe->hw,
1710 1775 IXGBE_TDT(tx_ring->index), 0);
1711 1776 }
1712 1777
1713 1778 mutex_exit(&tx_ring->recycle_lock);
1714 1779
1715 1780 /*
1716 1781 * Add the tx control blocks in the pending list to
1717 1782 * the free list.
1718 1783 */
1719 1784 ixgbe_put_free_list(tx_ring, &pending_list);
1720 1785 }
1721 1786 }
1722 1787
1723 1788 /*
1724 1789 * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be
1725 1790 * transmitted.
1726 1791 */
1727 1792 static boolean_t
1728 1793 ixgbe_tx_drain(ixgbe_t *ixgbe)
1729 1794 {
1730 1795 ixgbe_tx_ring_t *tx_ring;
1731 1796 boolean_t done;
1732 1797 int i, j;
1733 1798
1734 1799 /*
1735 1800 * Wait for a specific time to allow pending tx packets
1736 1801 * to be transmitted.
1737 1802 *
1738 1803 * Check the counter tbd_free to see if transmission is done.
1739 1804 * No lock protection is needed here.
1740 1805 *
1741 1806 * Return B_TRUE if all pending packets have been transmitted;
1742 1807 * Otherwise return B_FALSE;
1743 1808 */
1744 1809 for (i = 0; i < TX_DRAIN_TIME; i++) {
1745 1810
1746 1811 done = B_TRUE;
1747 1812 for (j = 0; j < ixgbe->num_tx_rings; j++) {
1748 1813 tx_ring = &ixgbe->tx_rings[j];
1749 1814 done = done &&
1750 1815 (tx_ring->tbd_free == tx_ring->ring_size);
1751 1816 }
1752 1817
1753 1818 if (done)
1754 1819 break;
1755 1820
1756 1821 msec_delay(1);
1757 1822 }
1758 1823
1759 1824 return (done);
1760 1825 }
1761 1826
1762 1827 /*
1763 1828 * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer.
1764 1829 */
1765 1830 static boolean_t
1766 1831 ixgbe_rx_drain(ixgbe_t *ixgbe)
1767 1832 {
1768 1833 boolean_t done = B_TRUE;
1769 1834 int i;
1770 1835
1771 1836 /*
1772 1837 * Polling the rx free list to check if those rx buffers held by
1773 1838 * the upper layer are released.
1774 1839 *
1775 1840 * Check the counter rcb_free to see if all pending buffers are
1776 1841 * released. No lock protection is needed here.
1777 1842 *
1778 1843 * Return B_TRUE if all pending buffers have been released;
1779 1844 * Otherwise return B_FALSE;
1780 1845 */
1781 1846 for (i = 0; i < RX_DRAIN_TIME; i++) {
1782 1847 done = (ixgbe->rcb_pending == 0);
1783 1848
1784 1849 if (done)
1785 1850 break;
1786 1851
1787 1852 msec_delay(1);
1788 1853 }
1789 1854
1790 1855 return (done);
1791 1856 }
1792 1857
1793 1858 /*
1794 1859 * ixgbe_start - Start the driver/chipset.
1795 1860 */
1796 1861 int
1797 1862 ixgbe_start(ixgbe_t *ixgbe, boolean_t alloc_buffer)
1798 1863 {
1799 1864 struct ixgbe_hw *hw = &ixgbe->hw;
1800 1865 int i;
1801 1866
1802 1867 ASSERT(mutex_owned(&ixgbe->gen_lock));
1803 1868
1804 1869 if (alloc_buffer) {
1805 1870 if (ixgbe_alloc_rx_data(ixgbe) != IXGBE_SUCCESS) {
1806 1871 ixgbe_error(ixgbe,
1807 1872 "Failed to allocate software receive rings");
1808 1873 return (IXGBE_FAILURE);
1809 1874 }
1810 1875
1811 1876 /* Allocate buffers for all the rx/tx rings */
1812 1877 if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS) {
1813 1878 ixgbe_error(ixgbe, "Failed to allocate DMA resource");
1814 1879 return (IXGBE_FAILURE);
1815 1880 }
1816 1881
1817 1882 ixgbe->tx_ring_init = B_TRUE;
1818 1883 } else {
1819 1884 ixgbe->tx_ring_init = B_FALSE;
1820 1885 }
1821 1886
1822 1887 for (i = 0; i < ixgbe->num_rx_rings; i++)
1823 1888 mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1824 1889 for (i = 0; i < ixgbe->num_tx_rings; i++)
1825 1890 mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1826 1891
1827 1892 /*
1828 1893 * Start the chipset hardware
1829 1894 */
1830 1895 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1831 1896 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1832 1897 goto start_failure;
1833 1898 }
1834 1899
1835 1900 /*
1836 1901 * Configure link now for X550
1837 1902 *
1838 1903 * X550 possesses a LPLU (Low-Power Link Up) mode which keeps the
1839 1904 * resting state of the adapter at a 1Gb FDX speed. Prior to the X550,
1840 1905 * the resting state of the link would be the maximum speed that
1841 1906 * autonegotiation will allow (usually 10Gb, infrastructure allowing)
1842 1907 * so we never bothered with explicitly setting the link to 10Gb as it
1843 1908 * would already be at that state on driver attach. With X550, we must
1844 1909 * trigger a re-negotiation of the link in order to switch from a LPLU
1845 1910 * 1Gb link to 10Gb (cable and link partner permitting.)
1846 1911 */
1847 1912 if (hw->mac.type == ixgbe_mac_X550 ||
1848 1913 hw->mac.type == ixgbe_mac_X550EM_x) {
1849 1914 (void) ixgbe_driver_setup_link(ixgbe, B_TRUE);
1850 1915 ixgbe_get_hw_state(ixgbe);
1851 1916 }
1852 1917
1853 1918 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1854 1919 goto start_failure;
1855 1920 }
1856 1921
1857 1922 /*
1858 1923 * Setup the rx/tx rings
1859 1924 */
1860 1925 ixgbe_setup_rings(ixgbe);
1861 1926
1862 1927 /*
1863 1928 * ixgbe_start() will be called when resetting, however if reset
1864 1929 * happens, we need to clear the ERROR, STALL and OVERTEMP flags
1865 1930 * before enabling the interrupts.
1866 1931 */
1867 1932 atomic_and_32(&ixgbe->ixgbe_state, ~(IXGBE_ERROR
1868 1933 | IXGBE_STALL| IXGBE_OVERTEMP));
1869 1934
1870 1935 /*
1871 1936 * Enable adapter interrupts
1872 1937 * The interrupts must be enabled after the driver state is START
1873 1938 */
1874 1939 ixgbe_enable_adapter_interrupts(ixgbe);
1875 1940
1876 1941 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1877 1942 mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1878 1943 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1879 1944 mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1880 1945
1881 1946 return (IXGBE_SUCCESS);
1882 1947
1883 1948 start_failure:
1884 1949 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1885 1950 mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1886 1951 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1887 1952 mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1888 1953
1889 1954 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1890 1955
1891 1956 return (IXGBE_FAILURE);
1892 1957 }
1893 1958
1894 1959 /*
1895 1960 * ixgbe_stop - Stop the driver/chipset.
1896 1961 */
1897 1962 void
1898 1963 ixgbe_stop(ixgbe_t *ixgbe, boolean_t free_buffer)
1899 1964 {
1900 1965 int i;
1901 1966
1902 1967 ASSERT(mutex_owned(&ixgbe->gen_lock));
1903 1968
1904 1969 /*
1905 1970 * Disable the adapter interrupts
1906 1971 */
1907 1972 ixgbe_disable_adapter_interrupts(ixgbe);
1908 1973
1909 1974 /*
1910 1975 * Drain the pending tx packets
1911 1976 */
1912 1977 (void) ixgbe_tx_drain(ixgbe);
1913 1978
1914 1979 for (i = 0; i < ixgbe->num_rx_rings; i++)
1915 1980 mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1916 1981 for (i = 0; i < ixgbe->num_tx_rings; i++)
1917 1982 mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1918 1983
1919 1984 /*
1920 1985 * Stop the chipset hardware
1921 1986 */
1922 1987 ixgbe_chip_stop(ixgbe);
1923 1988
1924 1989 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1925 1990 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1926 1991 }
1927 1992
1928 1993 /*
1929 1994 * Clean the pending tx data/resources
1930 1995 */
1931 1996 ixgbe_tx_clean(ixgbe);
1932 1997
1933 1998 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1934 1999 mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1935 2000 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1936 2001 mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1937 2002
1938 2003 if (ixgbe->link_state == LINK_STATE_UP) {
1939 2004 ixgbe->link_state = LINK_STATE_UNKNOWN;
1940 2005 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
1941 2006 }
1942 2007
1943 2008 if (free_buffer) {
1944 2009 /*
1945 2010 * Release the DMA/memory resources of rx/tx rings
1946 2011 */
1947 2012 ixgbe_free_dma(ixgbe);
1948 2013 ixgbe_free_rx_data(ixgbe);
1949 2014 }
1950 2015 }
1951 2016
1952 2017 /*
1953 2018 * ixgbe_cbfunc - Driver interface for generic DDI callbacks
1954 2019 */
1955 2020 /* ARGSUSED */
1956 2021 static int
1957 2022 ixgbe_cbfunc(dev_info_t *dip, ddi_cb_action_t cbaction, void *cbarg,
1958 2023 void *arg1, void *arg2)
1959 2024 {
1960 2025 ixgbe_t *ixgbe = (ixgbe_t *)arg1;
1961 2026
1962 2027 switch (cbaction) {
1963 2028 /* IRM callback */
1964 2029 int count;
1965 2030 case DDI_CB_INTR_ADD:
1966 2031 case DDI_CB_INTR_REMOVE:
1967 2032 count = (int)(uintptr_t)cbarg;
1968 2033 ASSERT(ixgbe->intr_type == DDI_INTR_TYPE_MSIX);
1969 2034 DTRACE_PROBE2(ixgbe__irm__callback, int, count,
1970 2035 int, ixgbe->intr_cnt);
1971 2036 if (ixgbe_intr_adjust(ixgbe, cbaction, count) !=
1972 2037 DDI_SUCCESS) {
1973 2038 ixgbe_error(ixgbe,
1974 2039 "IRM CB: Failed to adjust interrupts");
1975 2040 goto cb_fail;
1976 2041 }
1977 2042 break;
1978 2043 default:
1979 2044 IXGBE_DEBUGLOG_1(ixgbe, "DDI CB: action 0x%x NOT supported",
1980 2045 cbaction);
1981 2046 return (DDI_ENOTSUP);
1982 2047 }
1983 2048 return (DDI_SUCCESS);
1984 2049 cb_fail:
|
↓ open down ↓ |
544 lines elided |
↑ open up ↑ |
1985 2050 return (DDI_FAILURE);
1986 2051 }
1987 2052
1988 2053 /*
1989 2054 * ixgbe_intr_adjust - Adjust interrupt to respond to IRM request.
1990 2055 */
1991 2056 static int
1992 2057 ixgbe_intr_adjust(ixgbe_t *ixgbe, ddi_cb_action_t cbaction, int count)
1993 2058 {
1994 2059 int i, rc, actual;
2060 + uint32_t started;
1995 2061
1996 - if (count == 0)
1997 - return (DDI_SUCCESS);
1998 -
1999 - if ((cbaction == DDI_CB_INTR_ADD &&
2000 - ixgbe->intr_cnt + count > ixgbe->intr_cnt_max) ||
2001 - (cbaction == DDI_CB_INTR_REMOVE &&
2002 - ixgbe->intr_cnt - count < ixgbe->intr_cnt_min))
2062 + if (!(ixgbe->ixgbe_state & IXGBE_INITIALIZED)) {
2003 2063 return (DDI_FAILURE);
2064 + }
2004 2065
2005 - if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) {
2066 + if (cbaction == DDI_CB_INTR_REMOVE &&
2067 + ixgbe->intr_cnt - count < ixgbe->intr_cnt_min)
2006 2068 return (DDI_FAILURE);
2007 - }
2008 2069
2070 + if (cbaction == DDI_CB_INTR_ADD &&
2071 + ixgbe->intr_cnt + count > ixgbe->intr_cnt_max)
2072 + count = ixgbe->intr_cnt_max - ixgbe->intr_cnt;
2073 +
2074 + if (count == 0)
2075 + return (DDI_SUCCESS);
2076 +
2009 2077 for (i = 0; i < ixgbe->num_rx_rings; i++)
2010 2078 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle, NULL);
2011 2079 for (i = 0; i < ixgbe->num_tx_rings; i++)
2012 2080 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle, NULL);
2013 2081
2014 2082 mutex_enter(&ixgbe->gen_lock);
2083 + started = ixgbe->ixgbe_state & IXGBE_STARTED;
2015 2084 ixgbe->ixgbe_state &= ~IXGBE_STARTED;
2016 2085 ixgbe->ixgbe_state |= IXGBE_INTR_ADJUST;
2017 2086 ixgbe->ixgbe_state |= IXGBE_SUSPENDED;
2018 2087 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN);
2019 2088
2020 - ixgbe_stop(ixgbe, B_FALSE);
2089 + if (started)
2090 + ixgbe_stop(ixgbe, B_FALSE);
2021 2091 /*
2022 2092 * Disable interrupts
2023 2093 */
2024 2094 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
2025 2095 rc = ixgbe_disable_intrs(ixgbe);
2026 2096 ASSERT(rc == IXGBE_SUCCESS);
2027 2097 }
2028 2098 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ENABLE_INTR;
2029 2099
2030 2100 /*
2031 2101 * Remove interrupt handlers
2032 2102 */
2033 2103 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
2034 2104 ixgbe_rem_intr_handlers(ixgbe);
2035 2105 }
2036 2106 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ADD_INTR;
2037 2107
2038 2108 /*
2039 2109 * Clear vect_map
2040 2110 */
2041 2111 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
2042 2112 switch (cbaction) {
2043 2113 case DDI_CB_INTR_ADD:
2044 2114 rc = ddi_intr_alloc(ixgbe->dip, ixgbe->htable,
2045 2115 DDI_INTR_TYPE_MSIX, ixgbe->intr_cnt, count, &actual,
2046 2116 DDI_INTR_ALLOC_NORMAL);
2047 2117 if (rc != DDI_SUCCESS || actual != count) {
2048 2118 ixgbe_log(ixgbe, "Adjust interrupts failed."
2049 2119 "return: %d, irm cb size: %d, actual: %d",
2050 2120 rc, count, actual);
2051 2121 goto intr_adjust_fail;
2052 2122 }
2053 2123 ixgbe->intr_cnt += count;
2054 2124 break;
2055 2125
2056 2126 case DDI_CB_INTR_REMOVE:
2057 2127 for (i = ixgbe->intr_cnt - count;
2058 2128 i < ixgbe->intr_cnt; i ++) {
2059 2129 rc = ddi_intr_free(ixgbe->htable[i]);
2060 2130 ixgbe->htable[i] = NULL;
2061 2131 if (rc != DDI_SUCCESS) {
2062 2132 ixgbe_log(ixgbe, "Adjust interrupts failed."
2063 2133 "return: %d, irm cb size: %d, actual: %d",
2064 2134 rc, count, actual);
2065 2135 goto intr_adjust_fail;
2066 2136 }
2067 2137 }
2068 2138 ixgbe->intr_cnt -= count;
2069 2139 break;
2070 2140 }
2071 2141
2072 2142 /*
2073 2143 * Get priority for first vector, assume remaining are all the same
2074 2144 */
2075 2145 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
2076 2146 if (rc != DDI_SUCCESS) {
2077 2147 ixgbe_log(ixgbe,
2078 2148 "Get interrupt priority failed: %d", rc);
2079 2149 goto intr_adjust_fail;
2080 2150 }
2081 2151 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
2082 2152 if (rc != DDI_SUCCESS) {
2083 2153 ixgbe_log(ixgbe, "Get interrupt cap failed: %d", rc);
2084 2154 goto intr_adjust_fail;
2085 2155 }
2086 2156 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
2087 2157
2088 2158 /*
2089 2159 * Map rings to interrupt vectors
2090 2160 */
2091 2161 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) {
2092 2162 ixgbe_error(ixgbe,
2093 2163 "IRM CB: Failed to map interrupts to vectors");
2094 2164 goto intr_adjust_fail;
2095 2165 }
2096 2166
2097 2167 /*
2098 2168 * Add interrupt handlers
2099 2169 */
2100 2170 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
2101 2171 ixgbe_error(ixgbe, "IRM CB: Failed to add interrupt handlers");
2102 2172 goto intr_adjust_fail;
2103 2173 }
2104 2174 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
|
↓ open down ↓ |
74 lines elided |
↑ open up ↑ |
2105 2175
2106 2176 /*
2107 2177 * Now that mutex locks are initialized, and the chip is also
2108 2178 * initialized, enable interrupts.
2109 2179 */
2110 2180 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
2111 2181 ixgbe_error(ixgbe, "IRM CB: Failed to enable DDI interrupts");
2112 2182 goto intr_adjust_fail;
2113 2183 }
2114 2184 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
2115 - if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
2116 - ixgbe_error(ixgbe, "IRM CB: Failed to start");
2117 - goto intr_adjust_fail;
2118 - }
2185 + if (started)
2186 + if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
2187 + ixgbe_error(ixgbe, "IRM CB: Failed to start");
2188 + goto intr_adjust_fail;
2189 + }
2119 2190 ixgbe->ixgbe_state &= ~IXGBE_INTR_ADJUST;
2120 2191 ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED;
2121 - ixgbe->ixgbe_state |= IXGBE_STARTED;
2192 + ixgbe->ixgbe_state |= started;
2122 2193 mutex_exit(&ixgbe->gen_lock);
2123 2194
2124 2195 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2125 2196 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle,
2126 2197 ixgbe->htable[ixgbe->rx_rings[i].intr_vector]);
2127 2198 }
2128 2199 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2129 2200 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle,
2130 2201 ixgbe->htable[ixgbe->tx_rings[i].intr_vector]);
2131 2202 }
2132 2203
2133 2204 /* Wakeup all Tx rings */
2134 2205 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2135 2206 mac_tx_ring_update(ixgbe->mac_hdl,
2136 2207 ixgbe->tx_rings[i].ring_handle);
2137 2208 }
2138 2209
2139 2210 IXGBE_DEBUGLOG_3(ixgbe,
2140 2211 "IRM CB: interrupts new value: 0x%x(0x%x:0x%x).",
2141 2212 ixgbe->intr_cnt, ixgbe->intr_cnt_min, ixgbe->intr_cnt_max);
2142 2213 return (DDI_SUCCESS);
2143 2214
2144 2215 intr_adjust_fail:
2145 2216 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
2146 2217 mutex_exit(&ixgbe->gen_lock);
2147 2218 return (DDI_FAILURE);
2148 2219 }
2149 2220
2150 2221 /*
2151 2222 * ixgbe_intr_cb_register - Register interrupt callback function.
2152 2223 */
2153 2224 static int
2154 2225 ixgbe_intr_cb_register(ixgbe_t *ixgbe)
2155 2226 {
2156 2227 if (ddi_cb_register(ixgbe->dip, DDI_CB_FLAG_INTR, ixgbe_cbfunc,
2157 2228 ixgbe, NULL, &ixgbe->cb_hdl) != DDI_SUCCESS) {
2158 2229 return (IXGBE_FAILURE);
2159 2230 }
2160 2231 IXGBE_DEBUGLOG_0(ixgbe, "Interrupt callback function registered.");
2161 2232 return (IXGBE_SUCCESS);
2162 2233 }
2163 2234
2164 2235 /*
2165 2236 * ixgbe_alloc_rings - Allocate memory space for rx/tx rings.
2166 2237 */
2167 2238 static int
2168 2239 ixgbe_alloc_rings(ixgbe_t *ixgbe)
2169 2240 {
2170 2241 /*
2171 2242 * Allocate memory space for rx rings
2172 2243 */
2173 2244 ixgbe->rx_rings = kmem_zalloc(
2174 2245 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings,
2175 2246 KM_NOSLEEP);
2176 2247
2177 2248 if (ixgbe->rx_rings == NULL) {
2178 2249 return (IXGBE_FAILURE);
2179 2250 }
2180 2251
2181 2252 /*
2182 2253 * Allocate memory space for tx rings
2183 2254 */
2184 2255 ixgbe->tx_rings = kmem_zalloc(
2185 2256 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings,
2186 2257 KM_NOSLEEP);
2187 2258
2188 2259 if (ixgbe->tx_rings == NULL) {
2189 2260 kmem_free(ixgbe->rx_rings,
2190 2261 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
2191 2262 ixgbe->rx_rings = NULL;
2192 2263 return (IXGBE_FAILURE);
2193 2264 }
2194 2265
2195 2266 /*
2196 2267 * Allocate memory space for rx ring groups
2197 2268 */
2198 2269 ixgbe->rx_groups = kmem_zalloc(
2199 2270 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups,
2200 2271 KM_NOSLEEP);
2201 2272
2202 2273 if (ixgbe->rx_groups == NULL) {
2203 2274 kmem_free(ixgbe->rx_rings,
2204 2275 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
2205 2276 kmem_free(ixgbe->tx_rings,
2206 2277 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
2207 2278 ixgbe->rx_rings = NULL;
2208 2279 ixgbe->tx_rings = NULL;
2209 2280 return (IXGBE_FAILURE);
2210 2281 }
2211 2282
2212 2283 return (IXGBE_SUCCESS);
2213 2284 }
2214 2285
2215 2286 /*
2216 2287 * ixgbe_free_rings - Free the memory space of rx/tx rings.
2217 2288 */
2218 2289 static void
2219 2290 ixgbe_free_rings(ixgbe_t *ixgbe)
2220 2291 {
2221 2292 if (ixgbe->rx_rings != NULL) {
2222 2293 kmem_free(ixgbe->rx_rings,
2223 2294 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
2224 2295 ixgbe->rx_rings = NULL;
2225 2296 }
2226 2297
2227 2298 if (ixgbe->tx_rings != NULL) {
2228 2299 kmem_free(ixgbe->tx_rings,
2229 2300 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
2230 2301 ixgbe->tx_rings = NULL;
2231 2302 }
2232 2303
2233 2304 if (ixgbe->rx_groups != NULL) {
2234 2305 kmem_free(ixgbe->rx_groups,
2235 2306 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups);
2236 2307 ixgbe->rx_groups = NULL;
2237 2308 }
2238 2309 }
2239 2310
2240 2311 static int
2241 2312 ixgbe_alloc_rx_data(ixgbe_t *ixgbe)
2242 2313 {
2243 2314 ixgbe_rx_ring_t *rx_ring;
2244 2315 int i;
2245 2316
2246 2317 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2247 2318 rx_ring = &ixgbe->rx_rings[i];
2248 2319 if (ixgbe_alloc_rx_ring_data(rx_ring) != IXGBE_SUCCESS)
2249 2320 goto alloc_rx_rings_failure;
2250 2321 }
2251 2322 return (IXGBE_SUCCESS);
2252 2323
2253 2324 alloc_rx_rings_failure:
2254 2325 ixgbe_free_rx_data(ixgbe);
2255 2326 return (IXGBE_FAILURE);
2256 2327 }
2257 2328
2258 2329 static void
2259 2330 ixgbe_free_rx_data(ixgbe_t *ixgbe)
2260 2331 {
2261 2332 ixgbe_rx_ring_t *rx_ring;
2262 2333 ixgbe_rx_data_t *rx_data;
2263 2334 int i;
2264 2335
2265 2336 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2266 2337 rx_ring = &ixgbe->rx_rings[i];
2267 2338
2268 2339 mutex_enter(&ixgbe->rx_pending_lock);
2269 2340 rx_data = rx_ring->rx_data;
2270 2341
2271 2342 if (rx_data != NULL) {
2272 2343 rx_data->flag |= IXGBE_RX_STOPPED;
2273 2344
2274 2345 if (rx_data->rcb_pending == 0) {
2275 2346 ixgbe_free_rx_ring_data(rx_data);
2276 2347 rx_ring->rx_data = NULL;
2277 2348 }
2278 2349 }
2279 2350
2280 2351 mutex_exit(&ixgbe->rx_pending_lock);
2281 2352 }
2282 2353 }
2283 2354
2284 2355 /*
2285 2356 * ixgbe_setup_rings - Setup rx/tx rings.
2286 2357 */
2287 2358 static void
2288 2359 ixgbe_setup_rings(ixgbe_t *ixgbe)
2289 2360 {
2290 2361 /*
2291 2362 * Setup the rx/tx rings, including the following:
2292 2363 *
2293 2364 * 1. Setup the descriptor ring and the control block buffers;
2294 2365 * 2. Initialize necessary registers for receive/transmit;
2295 2366 * 3. Initialize software pointers/parameters for receive/transmit;
2296 2367 */
2297 2368 ixgbe_setup_rx(ixgbe);
2298 2369
2299 2370 ixgbe_setup_tx(ixgbe);
2300 2371 }
2301 2372
2302 2373 static void
2303 2374 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring)
2304 2375 {
2305 2376 ixgbe_t *ixgbe = rx_ring->ixgbe;
2306 2377 ixgbe_rx_data_t *rx_data = rx_ring->rx_data;
2307 2378 struct ixgbe_hw *hw = &ixgbe->hw;
2308 2379 rx_control_block_t *rcb;
2309 2380 union ixgbe_adv_rx_desc *rbd;
2310 2381 uint32_t size;
2311 2382 uint32_t buf_low;
2312 2383 uint32_t buf_high;
2313 2384 uint32_t reg_val;
2314 2385 int i;
2315 2386
2316 2387 ASSERT(mutex_owned(&rx_ring->rx_lock));
2317 2388 ASSERT(mutex_owned(&ixgbe->gen_lock));
2318 2389
2319 2390 for (i = 0; i < ixgbe->rx_ring_size; i++) {
2320 2391 rcb = rx_data->work_list[i];
2321 2392 rbd = &rx_data->rbd_ring[i];
2322 2393
2323 2394 rbd->read.pkt_addr = rcb->rx_buf.dma_address;
2324 2395 rbd->read.hdr_addr = NULL;
2325 2396 }
2326 2397
2327 2398 /*
2328 2399 * Initialize the length register
2329 2400 */
2330 2401 size = rx_data->ring_size * sizeof (union ixgbe_adv_rx_desc);
2331 2402 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->hw_index), size);
2332 2403
2333 2404 /*
2334 2405 * Initialize the base address registers
2335 2406 */
2336 2407 buf_low = (uint32_t)rx_data->rbd_area.dma_address;
2337 2408 buf_high = (uint32_t)(rx_data->rbd_area.dma_address >> 32);
2338 2409 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->hw_index), buf_high);
2339 2410 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->hw_index), buf_low);
2340 2411
2341 2412 /*
2342 2413 * Setup head & tail pointers
2343 2414 */
2344 2415 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->hw_index),
2345 2416 rx_data->ring_size - 1);
2346 2417 IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->hw_index), 0);
2347 2418
2348 2419 rx_data->rbd_next = 0;
2349 2420 rx_data->lro_first = 0;
2350 2421
2351 2422 /*
2352 2423 * Setup the Receive Descriptor Control Register (RXDCTL)
2353 2424 * PTHRESH=32 descriptors (half the internal cache)
2354 2425 * HTHRESH=0 descriptors (to minimize latency on fetch)
2355 2426 * WTHRESH defaults to 1 (writeback each descriptor)
2356 2427 */
2357 2428 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index));
2358 2429 reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */
2359 2430
2360 2431 /* Not a valid value for 82599, X540 or X550 */
2361 2432 if (hw->mac.type == ixgbe_mac_82598EB) {
2362 2433 reg_val |= 0x0020; /* pthresh */
2363 2434 }
2364 2435 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val);
2365 2436
2366 2437 if (hw->mac.type == ixgbe_mac_82599EB ||
2367 2438 hw->mac.type == ixgbe_mac_X540 ||
2368 2439 hw->mac.type == ixgbe_mac_X550 ||
2369 2440 hw->mac.type == ixgbe_mac_X550EM_x) {
2370 2441 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2371 2442 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS);
2372 2443 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2373 2444 }
2374 2445
2375 2446 /*
2376 2447 * Setup the Split and Replication Receive Control Register.
2377 2448 * Set the rx buffer size and the advanced descriptor type.
2378 2449 */
2379 2450 reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
2380 2451 IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2381 2452 reg_val |= IXGBE_SRRCTL_DROP_EN;
2382 2453 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val);
2383 2454 }
2384 2455
2385 2456 static void
2386 2457 ixgbe_setup_rx(ixgbe_t *ixgbe)
2387 2458 {
2388 2459 ixgbe_rx_ring_t *rx_ring;
2389 2460 struct ixgbe_hw *hw = &ixgbe->hw;
2390 2461 uint32_t reg_val;
2391 2462 uint32_t ring_mapping;
2392 2463 uint32_t i, index;
2393 2464 uint32_t psrtype_rss_bit;
2394 2465
2395 2466 /*
2396 2467 * Ensure that Rx is disabled while setting up
2397 2468 * the Rx unit and Rx descriptor ring(s)
2398 2469 */
2399 2470 ixgbe_disable_rx(hw);
2400 2471
2401 2472 /* PSRTYPE must be configured for 82599 */
2402 2473 if (ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ &&
2403 2474 ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ_RSS) {
2404 2475 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
2405 2476 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
2406 2477 reg_val |= IXGBE_PSRTYPE_L2HDR;
2407 2478 reg_val |= 0x80000000;
2408 2479 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), reg_val);
2409 2480 } else {
2410 2481 if (ixgbe->num_rx_groups > 32) {
2411 2482 psrtype_rss_bit = 0x20000000;
2412 2483 } else {
2413 2484 psrtype_rss_bit = 0x40000000;
2414 2485 }
2415 2486 for (i = 0; i < ixgbe->capab->max_rx_grp_num; i++) {
2416 2487 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
2417 2488 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
2418 2489 reg_val |= IXGBE_PSRTYPE_L2HDR;
2419 2490 reg_val |= psrtype_rss_bit;
2420 2491 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(i), reg_val);
2421 2492 }
2422 2493 }
2423 2494
2424 2495 /*
2425 2496 * Set filter control in FCTRL to determine types of packets are passed
2426 2497 * up to the driver.
2427 2498 * - Pass broadcast packets.
2428 2499 * - Do not pass flow control pause frames (82598-specific)
2429 2500 */
2430 2501 reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2431 2502 reg_val |= IXGBE_FCTRL_BAM; /* Broadcast Accept Mode */
2432 2503 if (hw->mac.type == ixgbe_mac_82598EB) {
2433 2504 reg_val |= IXGBE_FCTRL_DPF; /* Discard Pause Frames */
2434 2505 }
2435 2506 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val);
2436 2507
2437 2508 /*
2438 2509 * Hardware checksum settings
2439 2510 */
2440 2511 if (ixgbe->rx_hcksum_enable) {
2441 2512 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2442 2513 reg_val |= IXGBE_RXCSUM_IPPCSE; /* IP checksum */
2443 2514 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val);
2444 2515 }
2445 2516
2446 2517 /*
2447 2518 * Setup VMDq and RSS for multiple receive queues
2448 2519 */
2449 2520 switch (ixgbe->classify_mode) {
2450 2521 case IXGBE_CLASSIFY_RSS:
2451 2522 /*
2452 2523 * One group, only RSS is needed when more than
2453 2524 * one ring enabled.
2454 2525 */
2455 2526 ixgbe_setup_rss(ixgbe);
2456 2527 break;
2457 2528
2458 2529 case IXGBE_CLASSIFY_VMDQ:
2459 2530 /*
2460 2531 * Multiple groups, each group has one ring,
2461 2532 * only VMDq is needed.
2462 2533 */
2463 2534 ixgbe_setup_vmdq(ixgbe);
2464 2535 break;
2465 2536
2466 2537 case IXGBE_CLASSIFY_VMDQ_RSS:
2467 2538 /*
2468 2539 * Multiple groups and multiple rings, both
2469 2540 * VMDq and RSS are needed.
2470 2541 */
2471 2542 ixgbe_setup_vmdq_rss(ixgbe);
2472 2543 break;
2473 2544
2474 2545 default:
2475 2546 break;
2476 2547 }
2477 2548
2478 2549 /*
2479 2550 * Enable the receive unit. This must be done after filter
2480 2551 * control is set in FCTRL. On 82598, we disable the descriptor monitor.
2481 2552 * 82598 is the only adapter which defines this RXCTRL option.
2482 2553 */
2483 2554 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
2484 2555 if (hw->mac.type == ixgbe_mac_82598EB)
2485 2556 reg_val |= IXGBE_RXCTRL_DMBYPS; /* descriptor monitor bypass */
2486 2557 reg_val |= IXGBE_RXCTRL_RXEN;
2487 2558 (void) ixgbe_enable_rx_dma(hw, reg_val);
2488 2559
2489 2560 /*
2490 2561 * ixgbe_setup_rx_ring must be called after configuring RXCTRL
2491 2562 */
2492 2563 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2493 2564 rx_ring = &ixgbe->rx_rings[i];
2494 2565 ixgbe_setup_rx_ring(rx_ring);
2495 2566 }
2496 2567
2497 2568 /*
2498 2569 * Setup the per-ring statistics mapping.
2499 2570 */
2500 2571 ring_mapping = 0;
2501 2572 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2502 2573 index = ixgbe->rx_rings[i].hw_index;
2503 2574 ring_mapping = IXGBE_READ_REG(hw, IXGBE_RQSMR(index >> 2));
2504 2575 ring_mapping |= (i & 0xF) << (8 * (index & 0x3));
2505 2576 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(index >> 2), ring_mapping);
2506 2577 }
2507 2578
2508 2579 /*
2509 2580 * The Max Frame Size in MHADD/MAXFRS will be internally increased
2510 2581 * by four bytes if the packet has a VLAN field, so includes MTU,
2511 2582 * ethernet header and frame check sequence.
2512 2583 * Register is MAXFRS in 82599.
2513 2584 */
2514 2585 reg_val = IXGBE_READ_REG(hw, IXGBE_MHADD);
2515 2586 reg_val &= ~IXGBE_MHADD_MFS_MASK;
2516 2587 reg_val |= (ixgbe->default_mtu + sizeof (struct ether_header)
2517 2588 + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT;
2518 2589 IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val);
2519 2590
2520 2591 /*
2521 2592 * Setup Jumbo Frame enable bit
2522 2593 */
2523 2594 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2524 2595 if (ixgbe->default_mtu > ETHERMTU)
2525 2596 reg_val |= IXGBE_HLREG0_JUMBOEN;
2526 2597 else
2527 2598 reg_val &= ~IXGBE_HLREG0_JUMBOEN;
2528 2599 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2529 2600
2530 2601 /*
2531 2602 * Setup RSC for multiple receive queues.
2532 2603 */
2533 2604 if (ixgbe->lro_enable) {
2534 2605 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2535 2606 /*
2536 2607 * Make sure rx_buf_size * MAXDESC not greater
2537 2608 * than 65535.
2538 2609 * Intel recommends 4 for MAXDESC field value.
2539 2610 */
2540 2611 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCCTL(i));
2541 2612 reg_val |= IXGBE_RSCCTL_RSCEN;
2542 2613 if (ixgbe->rx_buf_size == IXGBE_PKG_BUF_16k)
2543 2614 reg_val |= IXGBE_RSCCTL_MAXDESC_1;
2544 2615 else
2545 2616 reg_val |= IXGBE_RSCCTL_MAXDESC_4;
2546 2617 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(i), reg_val);
2547 2618 }
2548 2619
2549 2620 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCDBU);
2550 2621 reg_val |= IXGBE_RSCDBU_RSCACKDIS;
2551 2622 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, reg_val);
2552 2623
2553 2624 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2554 2625 reg_val |= IXGBE_RDRXCTL_RSCACKC;
2555 2626 reg_val |= IXGBE_RDRXCTL_FCOE_WRFIX;
2556 2627 reg_val &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2557 2628
2558 2629 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2559 2630 }
2560 2631 }
2561 2632
2562 2633 static void
2563 2634 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring)
2564 2635 {
2565 2636 ixgbe_t *ixgbe = tx_ring->ixgbe;
2566 2637 struct ixgbe_hw *hw = &ixgbe->hw;
2567 2638 uint32_t size;
2568 2639 uint32_t buf_low;
2569 2640 uint32_t buf_high;
2570 2641 uint32_t reg_val;
2571 2642
2572 2643 ASSERT(mutex_owned(&tx_ring->tx_lock));
2573 2644 ASSERT(mutex_owned(&ixgbe->gen_lock));
2574 2645
2575 2646 /*
2576 2647 * Initialize the length register
2577 2648 */
2578 2649 size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc);
2579 2650 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size);
2580 2651
2581 2652 /*
2582 2653 * Initialize the base address registers
2583 2654 */
2584 2655 buf_low = (uint32_t)tx_ring->tbd_area.dma_address;
2585 2656 buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32);
2586 2657 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low);
2587 2658 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high);
2588 2659
2589 2660 /*
2590 2661 * Setup head & tail pointers
2591 2662 */
2592 2663 IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0);
2593 2664 IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0);
2594 2665
2595 2666 /*
2596 2667 * Setup head write-back
2597 2668 */
2598 2669 if (ixgbe->tx_head_wb_enable) {
2599 2670 /*
2600 2671 * The memory of the head write-back is allocated using
2601 2672 * the extra tbd beyond the tail of the tbd ring.
2602 2673 */
2603 2674 tx_ring->tbd_head_wb = (uint32_t *)
2604 2675 ((uintptr_t)tx_ring->tbd_area.address + size);
2605 2676 *tx_ring->tbd_head_wb = 0;
2606 2677
2607 2678 buf_low = (uint32_t)
2608 2679 (tx_ring->tbd_area.dma_address + size);
2609 2680 buf_high = (uint32_t)
2610 2681 ((tx_ring->tbd_area.dma_address + size) >> 32);
2611 2682
2612 2683 /* Set the head write-back enable bit */
2613 2684 buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
2614 2685
2615 2686 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low);
2616 2687 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high);
2617 2688
2618 2689 /*
2619 2690 * Turn off relaxed ordering for head write back or it will
2620 2691 * cause problems with the tx recycling
2621 2692 */
2622 2693
2623 2694 reg_val = (hw->mac.type == ixgbe_mac_82598EB) ?
2624 2695 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(tx_ring->index)) :
2625 2696 IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(tx_ring->index));
2626 2697 reg_val &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2627 2698 if (hw->mac.type == ixgbe_mac_82598EB) {
2628 2699 IXGBE_WRITE_REG(hw,
2629 2700 IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
2630 2701 } else {
2631 2702 IXGBE_WRITE_REG(hw,
2632 2703 IXGBE_DCA_TXCTRL_82599(tx_ring->index), reg_val);
2633 2704 }
2634 2705 } else {
2635 2706 tx_ring->tbd_head_wb = NULL;
2636 2707 }
2637 2708
2638 2709 tx_ring->tbd_head = 0;
2639 2710 tx_ring->tbd_tail = 0;
2640 2711 tx_ring->tbd_free = tx_ring->ring_size;
2641 2712
2642 2713 if (ixgbe->tx_ring_init == B_TRUE) {
2643 2714 tx_ring->tcb_head = 0;
2644 2715 tx_ring->tcb_tail = 0;
2645 2716 tx_ring->tcb_free = tx_ring->free_list_size;
2646 2717 }
2647 2718
2648 2719 /*
2649 2720 * Initialize the s/w context structure
2650 2721 */
2651 2722 bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t));
2652 2723 }
2653 2724
2654 2725 static void
2655 2726 ixgbe_setup_tx(ixgbe_t *ixgbe)
2656 2727 {
2657 2728 struct ixgbe_hw *hw = &ixgbe->hw;
2658 2729 ixgbe_tx_ring_t *tx_ring;
2659 2730 uint32_t reg_val;
2660 2731 uint32_t ring_mapping;
2661 2732 int i;
2662 2733
2663 2734 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2664 2735 tx_ring = &ixgbe->tx_rings[i];
2665 2736 ixgbe_setup_tx_ring(tx_ring);
2666 2737 }
2667 2738
2668 2739 /*
2669 2740 * Setup the per-ring statistics mapping.
2670 2741 */
2671 2742 ring_mapping = 0;
2672 2743 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2673 2744 ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
2674 2745 if ((i & 0x3) == 0x3) {
2675 2746 switch (hw->mac.type) {
2676 2747 case ixgbe_mac_82598EB:
2677 2748 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2),
2678 2749 ring_mapping);
2679 2750 break;
2680 2751
2681 2752 case ixgbe_mac_82599EB:
2682 2753 case ixgbe_mac_X540:
2683 2754 case ixgbe_mac_X550:
2684 2755 case ixgbe_mac_X550EM_x:
2685 2756 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2),
2686 2757 ring_mapping);
2687 2758 break;
2688 2759
2689 2760 default:
2690 2761 break;
2691 2762 }
2692 2763
2693 2764 ring_mapping = 0;
2694 2765 }
2695 2766 }
2696 2767 if (i & 0x3) {
2697 2768 switch (hw->mac.type) {
2698 2769 case ixgbe_mac_82598EB:
2699 2770 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping);
2700 2771 break;
2701 2772
2702 2773 case ixgbe_mac_82599EB:
2703 2774 case ixgbe_mac_X540:
2704 2775 case ixgbe_mac_X550:
2705 2776 case ixgbe_mac_X550EM_x:
2706 2777 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping);
2707 2778 break;
2708 2779
2709 2780 default:
2710 2781 break;
2711 2782 }
2712 2783 }
2713 2784
2714 2785 /*
2715 2786 * Enable CRC appending and TX padding (for short tx frames)
2716 2787 */
2717 2788 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2718 2789 reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN;
2719 2790 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2720 2791
2721 2792 /*
2722 2793 * enable DMA for 82599, X540 and X550 parts
2723 2794 */
2724 2795 if (hw->mac.type == ixgbe_mac_82599EB ||
2725 2796 hw->mac.type == ixgbe_mac_X540 ||
2726 2797 hw->mac.type == ixgbe_mac_X550 ||
2727 2798 hw->mac.type == ixgbe_mac_X550EM_x) {
2728 2799 /* DMATXCTL.TE must be set after all Tx config is complete */
2729 2800 reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2730 2801 reg_val |= IXGBE_DMATXCTL_TE;
2731 2802 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val);
2732 2803
2733 2804 /* Disable arbiter to set MTQC */
2734 2805 reg_val = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2735 2806 reg_val |= IXGBE_RTTDCS_ARBDIS;
2736 2807 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val);
2737 2808 IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2738 2809 reg_val &= ~IXGBE_RTTDCS_ARBDIS;
2739 2810 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val);
2740 2811 }
2741 2812
2742 2813 /*
2743 2814 * Enabling tx queues ..
2744 2815 * For 82599 must be done after DMATXCTL.TE is set
2745 2816 */
2746 2817 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2747 2818 tx_ring = &ixgbe->tx_rings[i];
2748 2819 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index));
2749 2820 reg_val |= IXGBE_TXDCTL_ENABLE;
2750 2821 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val);
2751 2822 }
2752 2823 }
2753 2824
2754 2825 /*
2755 2826 * ixgbe_setup_rss - Setup receive-side scaling feature.
2756 2827 */
2757 2828 static void
2758 2829 ixgbe_setup_rss(ixgbe_t *ixgbe)
2759 2830 {
2760 2831 struct ixgbe_hw *hw = &ixgbe->hw;
2761 2832 uint32_t mrqc;
2762 2833
2763 2834 /*
2764 2835 * Initialize RETA/ERETA table
2765 2836 */
2766 2837 ixgbe_setup_rss_table(ixgbe);
2767 2838
2768 2839 /*
2769 2840 * Enable RSS & perform hash on these packet types
2770 2841 */
2771 2842 mrqc = IXGBE_MRQC_RSSEN |
2772 2843 IXGBE_MRQC_RSS_FIELD_IPV4 |
2773 2844 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2774 2845 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2775 2846 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2776 2847 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2777 2848 IXGBE_MRQC_RSS_FIELD_IPV6 |
2778 2849 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2779 2850 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2780 2851 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2781 2852 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2782 2853 }
2783 2854
2784 2855 /*
2785 2856 * ixgbe_setup_vmdq - Setup MAC classification feature
2786 2857 */
2787 2858 static void
2788 2859 ixgbe_setup_vmdq(ixgbe_t *ixgbe)
2789 2860 {
2790 2861 struct ixgbe_hw *hw = &ixgbe->hw;
2791 2862 uint32_t vmdctl, i, vtctl;
2792 2863
2793 2864 /*
2794 2865 * Setup the VMDq Control register, enable VMDq based on
2795 2866 * packet destination MAC address:
2796 2867 */
2797 2868 switch (hw->mac.type) {
2798 2869 case ixgbe_mac_82598EB:
2799 2870 /*
2800 2871 * VMDq Enable = 1;
2801 2872 * VMDq Filter = 0; MAC filtering
2802 2873 * Default VMDq output index = 0;
2803 2874 */
2804 2875 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2805 2876 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2806 2877 break;
2807 2878
2808 2879 case ixgbe_mac_82599EB:
2809 2880 case ixgbe_mac_X540:
2810 2881 case ixgbe_mac_X550:
2811 2882 case ixgbe_mac_X550EM_x:
2812 2883 /*
2813 2884 * Enable VMDq-only.
2814 2885 */
2815 2886 vmdctl = IXGBE_MRQC_VMDQEN;
2816 2887 IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl);
2817 2888
2818 2889 for (i = 0; i < hw->mac.num_rar_entries; i++) {
2819 2890 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2820 2891 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2821 2892 }
2822 2893
2823 2894 /*
2824 2895 * Enable Virtualization and Replication.
2825 2896 */
2826 2897 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2827 2898 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2828 2899
2829 2900 /*
2830 2901 * Enable receiving packets to all VFs
2831 2902 */
2832 2903 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2833 2904 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2834 2905 break;
2835 2906
2836 2907 default:
2837 2908 break;
2838 2909 }
2839 2910 }
2840 2911
2841 2912 /*
2842 2913 * ixgbe_setup_vmdq_rss - Setup both vmdq feature and rss feature.
2843 2914 */
2844 2915 static void
2845 2916 ixgbe_setup_vmdq_rss(ixgbe_t *ixgbe)
2846 2917 {
2847 2918 struct ixgbe_hw *hw = &ixgbe->hw;
2848 2919 uint32_t i, mrqc;
2849 2920 uint32_t vtctl, vmdctl;
2850 2921
2851 2922 /*
2852 2923 * Initialize RETA/ERETA table
2853 2924 */
2854 2925 ixgbe_setup_rss_table(ixgbe);
2855 2926
2856 2927 /*
2857 2928 * Enable and setup RSS and VMDq
2858 2929 */
2859 2930 switch (hw->mac.type) {
2860 2931 case ixgbe_mac_82598EB:
2861 2932 /*
2862 2933 * Enable RSS & Setup RSS Hash functions
2863 2934 */
2864 2935 mrqc = IXGBE_MRQC_RSSEN |
2865 2936 IXGBE_MRQC_RSS_FIELD_IPV4 |
2866 2937 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2867 2938 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2868 2939 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2869 2940 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2870 2941 IXGBE_MRQC_RSS_FIELD_IPV6 |
2871 2942 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2872 2943 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2873 2944 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2874 2945 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2875 2946
2876 2947 /*
2877 2948 * Enable and Setup VMDq
2878 2949 * VMDq Filter = 0; MAC filtering
2879 2950 * Default VMDq output index = 0;
2880 2951 */
2881 2952 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2882 2953 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2883 2954 break;
2884 2955
2885 2956 case ixgbe_mac_82599EB:
2886 2957 case ixgbe_mac_X540:
2887 2958 case ixgbe_mac_X550:
2888 2959 case ixgbe_mac_X550EM_x:
2889 2960 /*
2890 2961 * Enable RSS & Setup RSS Hash functions
2891 2962 */
2892 2963 mrqc = IXGBE_MRQC_RSS_FIELD_IPV4 |
2893 2964 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2894 2965 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2895 2966 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2896 2967 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2897 2968 IXGBE_MRQC_RSS_FIELD_IPV6 |
2898 2969 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2899 2970 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2900 2971 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2901 2972
2902 2973 /*
2903 2974 * Enable VMDq+RSS.
2904 2975 */
2905 2976 if (ixgbe->num_rx_groups > 32) {
2906 2977 mrqc = mrqc | IXGBE_MRQC_VMDQRSS64EN;
2907 2978 } else {
2908 2979 mrqc = mrqc | IXGBE_MRQC_VMDQRSS32EN;
2909 2980 }
2910 2981
2911 2982 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2912 2983
2913 2984 for (i = 0; i < hw->mac.num_rar_entries; i++) {
2914 2985 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2915 2986 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2916 2987 }
2917 2988 break;
2918 2989
2919 2990 default:
2920 2991 break;
2921 2992
2922 2993 }
2923 2994
2924 2995 if (hw->mac.type == ixgbe_mac_82599EB ||
2925 2996 hw->mac.type == ixgbe_mac_X540 ||
2926 2997 hw->mac.type == ixgbe_mac_X550 ||
2927 2998 hw->mac.type == ixgbe_mac_X550EM_x) {
2928 2999 /*
2929 3000 * Enable Virtualization and Replication.
2930 3001 */
2931 3002 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2932 3003 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2933 3004
2934 3005 /*
2935 3006 * Enable receiving packets to all VFs
2936 3007 */
2937 3008 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2938 3009 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2939 3010 }
2940 3011 }
2941 3012
2942 3013 /*
2943 3014 * ixgbe_setup_rss_table - Setup RSS table
2944 3015 */
2945 3016 static void
2946 3017 ixgbe_setup_rss_table(ixgbe_t *ixgbe)
2947 3018 {
2948 3019 struct ixgbe_hw *hw = &ixgbe->hw;
2949 3020 uint32_t i, j;
2950 3021 uint32_t random;
2951 3022 uint32_t reta;
2952 3023 uint32_t ring_per_group;
2953 3024 uint32_t ring;
2954 3025 uint32_t table_size;
2955 3026 uint32_t index_mult;
2956 3027 uint32_t rxcsum;
2957 3028
2958 3029 /*
2959 3030 * Set multiplier for RETA setup and table size based on MAC type.
2960 3031 * RETA table sizes vary by model:
2961 3032 *
2962 3033 * 82598, 82599, X540: 128 table entries.
2963 3034 * X550: 512 table entries.
2964 3035 */
2965 3036 index_mult = 0x1;
2966 3037 table_size = 128;
2967 3038 switch (ixgbe->hw.mac.type) {
2968 3039 case ixgbe_mac_82598EB:
2969 3040 index_mult = 0x11;
2970 3041 break;
2971 3042 case ixgbe_mac_X550:
2972 3043 case ixgbe_mac_X550EM_x:
2973 3044 table_size = 512;
2974 3045 break;
2975 3046 default:
2976 3047 break;
2977 3048 }
2978 3049
2979 3050 /*
2980 3051 * Fill out RSS redirection table. The configuation of the indices is
2981 3052 * hardware-dependent.
2982 3053 *
2983 3054 * 82598: 8 bits wide containing two 4 bit RSS indices
2984 3055 * 82599, X540: 8 bits wide containing one 4 bit RSS index
2985 3056 * X550: 8 bits wide containing one 6 bit RSS index
2986 3057 */
2987 3058 reta = 0;
2988 3059 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2989 3060
2990 3061 for (i = 0, j = 0; i < table_size; i++, j++) {
2991 3062 if (j == ring_per_group) j = 0;
2992 3063
2993 3064 /*
2994 3065 * The low 8 bits are for hash value (n+0);
2995 3066 * The next 8 bits are for hash value (n+1), etc.
2996 3067 */
2997 3068 ring = (j * index_mult);
2998 3069 reta = reta >> 8;
2999 3070 reta = reta | (((uint32_t)ring) << 24);
3000 3071
3001 3072 if ((i & 3) == 3) {
3002 3073 /*
3003 3074 * The first 128 table entries are programmed into the
3004 3075 * RETA register, with any beyond that (eg; on X550)
3005 3076 * into ERETA.
3006 3077 */
3007 3078 if (i < 128)
3008 3079 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
3009 3080 else
3010 3081 IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
3011 3082 reta);
3012 3083 reta = 0;
3013 3084 }
3014 3085 }
3015 3086
3016 3087 /*
3017 3088 * Fill out hash function seeds with a random constant
3018 3089 */
3019 3090 for (i = 0; i < 10; i++) {
3020 3091 (void) random_get_pseudo_bytes((uint8_t *)&random,
3021 3092 sizeof (uint32_t));
3022 3093 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random);
3023 3094 }
3024 3095
3025 3096 /*
3026 3097 * Disable Packet Checksum to enable RSS for multiple receive queues.
3027 3098 * It is an adapter hardware limitation that Packet Checksum is
3028 3099 * mutually exclusive with RSS.
3029 3100 */
3030 3101 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
3031 3102 rxcsum |= IXGBE_RXCSUM_PCSD;
3032 3103 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
3033 3104 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
3034 3105 }
3035 3106
3036 3107 /*
3037 3108 * ixgbe_init_unicst - Initialize the unicast addresses.
3038 3109 */
3039 3110 static void
3040 3111 ixgbe_init_unicst(ixgbe_t *ixgbe)
3041 3112 {
3042 3113 struct ixgbe_hw *hw = &ixgbe->hw;
3043 3114 uint8_t *mac_addr;
3044 3115 int slot;
3045 3116 /*
3046 3117 * Here we should consider two situations:
3047 3118 *
3048 3119 * 1. Chipset is initialized at the first time,
3049 3120 * Clear all the multiple unicast addresses.
3050 3121 *
3051 3122 * 2. Chipset is reset
3052 3123 * Recover the multiple unicast addresses from the
3053 3124 * software data structure to the RAR registers.
3054 3125 */
3055 3126 if (!ixgbe->unicst_init) {
3056 3127 /*
3057 3128 * Initialize the multiple unicast addresses
3058 3129 */
3059 3130 ixgbe->unicst_total = hw->mac.num_rar_entries;
3060 3131 ixgbe->unicst_avail = ixgbe->unicst_total;
3061 3132 for (slot = 0; slot < ixgbe->unicst_total; slot++) {
3062 3133 mac_addr = ixgbe->unicst_addr[slot].mac.addr;
3063 3134 bzero(mac_addr, ETHERADDRL);
3064 3135 (void) ixgbe_set_rar(hw, slot, mac_addr, NULL, NULL);
3065 3136 ixgbe->unicst_addr[slot].mac.set = 0;
3066 3137 }
3067 3138 ixgbe->unicst_init = B_TRUE;
3068 3139 } else {
3069 3140 /* Re-configure the RAR registers */
3070 3141 for (slot = 0; slot < ixgbe->unicst_total; slot++) {
3071 3142 mac_addr = ixgbe->unicst_addr[slot].mac.addr;
3072 3143 if (ixgbe->unicst_addr[slot].mac.set == 1) {
3073 3144 (void) ixgbe_set_rar(hw, slot, mac_addr,
3074 3145 ixgbe->unicst_addr[slot].mac.group_index,
3075 3146 IXGBE_RAH_AV);
3076 3147 } else {
3077 3148 bzero(mac_addr, ETHERADDRL);
3078 3149 (void) ixgbe_set_rar(hw, slot, mac_addr,
3079 3150 NULL, NULL);
3080 3151 }
3081 3152 }
3082 3153 }
3083 3154 }
3084 3155
3085 3156 /*
3086 3157 * ixgbe_unicst_find - Find the slot for the specified unicast address
3087 3158 */
3088 3159 int
3089 3160 ixgbe_unicst_find(ixgbe_t *ixgbe, const uint8_t *mac_addr)
3090 3161 {
3091 3162 int slot;
3092 3163
3093 3164 ASSERT(mutex_owned(&ixgbe->gen_lock));
3094 3165
3095 3166 for (slot = 0; slot < ixgbe->unicst_total; slot++) {
3096 3167 if (bcmp(ixgbe->unicst_addr[slot].mac.addr,
3097 3168 mac_addr, ETHERADDRL) == 0)
3098 3169 return (slot);
3099 3170 }
3100 3171
3101 3172 return (-1);
3102 3173 }
3103 3174
3104 3175 /*
3105 3176 * ixgbe_multicst_add - Add a multicst address.
3106 3177 */
3107 3178 int
3108 3179 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr)
3109 3180 {
3110 3181 ASSERT(mutex_owned(&ixgbe->gen_lock));
3111 3182
3112 3183 if ((multiaddr[0] & 01) == 0) {
3113 3184 return (EINVAL);
3114 3185 }
3115 3186
3116 3187 if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) {
3117 3188 return (ENOENT);
3118 3189 }
3119 3190
3120 3191 bcopy(multiaddr,
3121 3192 &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL);
3122 3193 ixgbe->mcast_count++;
3123 3194
3124 3195 /*
3125 3196 * Update the multicast table in the hardware
3126 3197 */
3127 3198 ixgbe_setup_multicst(ixgbe);
3128 3199
3129 3200 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
3130 3201 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3131 3202 return (EIO);
3132 3203 }
3133 3204
3134 3205 return (0);
3135 3206 }
3136 3207
3137 3208 /*
3138 3209 * ixgbe_multicst_remove - Remove a multicst address.
3139 3210 */
3140 3211 int
3141 3212 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr)
3142 3213 {
3143 3214 int i;
3144 3215
3145 3216 ASSERT(mutex_owned(&ixgbe->gen_lock));
3146 3217
3147 3218 for (i = 0; i < ixgbe->mcast_count; i++) {
3148 3219 if (bcmp(multiaddr, &ixgbe->mcast_table[i],
3149 3220 ETHERADDRL) == 0) {
3150 3221 for (i++; i < ixgbe->mcast_count; i++) {
3151 3222 ixgbe->mcast_table[i - 1] =
3152 3223 ixgbe->mcast_table[i];
3153 3224 }
3154 3225 ixgbe->mcast_count--;
3155 3226 break;
3156 3227 }
3157 3228 }
3158 3229
3159 3230 /*
3160 3231 * Update the multicast table in the hardware
3161 3232 */
3162 3233 ixgbe_setup_multicst(ixgbe);
3163 3234
3164 3235 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
3165 3236 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3166 3237 return (EIO);
3167 3238 }
3168 3239
3169 3240 return (0);
3170 3241 }
3171 3242
3172 3243 /*
3173 3244 * ixgbe_setup_multicast - Setup multicast data structures.
3174 3245 *
3175 3246 * This routine initializes all of the multicast related structures
3176 3247 * and save them in the hardware registers.
3177 3248 */
3178 3249 static void
3179 3250 ixgbe_setup_multicst(ixgbe_t *ixgbe)
3180 3251 {
3181 3252 uint8_t *mc_addr_list;
3182 3253 uint32_t mc_addr_count;
3183 3254 struct ixgbe_hw *hw = &ixgbe->hw;
3184 3255
3185 3256 ASSERT(mutex_owned(&ixgbe->gen_lock));
3186 3257
3187 3258 ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES);
3188 3259
3189 3260 mc_addr_list = (uint8_t *)ixgbe->mcast_table;
3190 3261 mc_addr_count = ixgbe->mcast_count;
3191 3262
3192 3263 /*
3193 3264 * Update the multicast addresses to the MTA registers
3194 3265 */
3195 3266 (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
3196 3267 ixgbe_mc_table_itr, TRUE);
3197 3268 }
3198 3269
3199 3270 /*
3200 3271 * ixgbe_setup_vmdq_rss_conf - Configure vmdq and rss (number and mode).
3201 3272 *
3202 3273 * Configure the rx classification mode (vmdq & rss) and vmdq & rss numbers.
3203 3274 * Different chipsets may have different allowed configuration of vmdq and rss.
3204 3275 */
3205 3276 static void
3206 3277 ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe)
3207 3278 {
3208 3279 struct ixgbe_hw *hw = &ixgbe->hw;
3209 3280 uint32_t ring_per_group;
3210 3281
3211 3282 switch (hw->mac.type) {
3212 3283 case ixgbe_mac_82598EB:
3213 3284 /*
3214 3285 * 82598 supports the following combination:
3215 3286 * vmdq no. x rss no.
3216 3287 * [5..16] x 1
3217 3288 * [1..4] x [1..16]
3218 3289 * However 8 rss queue per pool (vmdq) is sufficient for
3219 3290 * most cases.
3220 3291 */
3221 3292 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
3222 3293 if (ixgbe->num_rx_groups > 4) {
3223 3294 ixgbe->num_rx_rings = ixgbe->num_rx_groups;
3224 3295 } else {
3225 3296 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
3226 3297 min(8, ring_per_group);
3227 3298 }
3228 3299
3229 3300 break;
3230 3301
3231 3302 case ixgbe_mac_82599EB:
3232 3303 case ixgbe_mac_X540:
3233 3304 case ixgbe_mac_X550:
3234 3305 case ixgbe_mac_X550EM_x:
3235 3306 /*
3236 3307 * 82599 supports the following combination:
3237 3308 * vmdq no. x rss no.
3238 3309 * [33..64] x [1..2]
3239 3310 * [2..32] x [1..4]
3240 3311 * 1 x [1..16]
3241 3312 * However 8 rss queue per pool (vmdq) is sufficient for
3242 3313 * most cases.
3243 3314 *
3244 3315 * For now, treat X540 and X550 like the 82599.
3245 3316 */
3246 3317 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
3247 3318 if (ixgbe->num_rx_groups == 1) {
3248 3319 ixgbe->num_rx_rings = min(8, ring_per_group);
3249 3320 } else if (ixgbe->num_rx_groups <= 32) {
3250 3321 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
3251 3322 min(4, ring_per_group);
3252 3323 } else if (ixgbe->num_rx_groups <= 64) {
3253 3324 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
3254 3325 min(2, ring_per_group);
3255 3326 }
3256 3327 break;
3257 3328
3258 3329 default:
3259 3330 break;
3260 3331 }
3261 3332
3262 3333 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
3263 3334
3264 3335 if (ixgbe->num_rx_groups == 1 && ring_per_group == 1) {
3265 3336 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
3266 3337 } else if (ixgbe->num_rx_groups != 1 && ring_per_group == 1) {
3267 3338 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ;
3268 3339 } else if (ixgbe->num_rx_groups != 1 && ring_per_group != 1) {
3269 3340 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ_RSS;
3270 3341 } else {
3271 3342 ixgbe->classify_mode = IXGBE_CLASSIFY_RSS;
3272 3343 }
3273 3344
3274 3345 IXGBE_DEBUGLOG_2(ixgbe, "rx group number:%d, rx ring number:%d",
3275 3346 ixgbe->num_rx_groups, ixgbe->num_rx_rings);
3276 3347 }
3277 3348
3278 3349 /*
3279 3350 * ixgbe_get_conf - Get driver configurations set in driver.conf.
3280 3351 *
3281 3352 * This routine gets user-configured values out of the configuration
3282 3353 * file ixgbe.conf.
3283 3354 *
3284 3355 * For each configurable value, there is a minimum, a maximum, and a
3285 3356 * default.
3286 3357 * If user does not configure a value, use the default.
3287 3358 * If user configures below the minimum, use the minumum.
3288 3359 * If user configures above the maximum, use the maxumum.
3289 3360 */
3290 3361 static void
3291 3362 ixgbe_get_conf(ixgbe_t *ixgbe)
3292 3363 {
3293 3364 struct ixgbe_hw *hw = &ixgbe->hw;
3294 3365 uint32_t flow_control;
3295 3366
3296 3367 /*
3297 3368 * ixgbe driver supports the following user configurations:
3298 3369 *
3299 3370 * Jumbo frame configuration:
3300 3371 * default_mtu
3301 3372 *
3302 3373 * Ethernet flow control configuration:
3303 3374 * flow_control
3304 3375 *
3305 3376 * Multiple rings configurations:
3306 3377 * tx_queue_number
3307 3378 * tx_ring_size
3308 3379 * rx_queue_number
3309 3380 * rx_ring_size
3310 3381 *
3311 3382 * Call ixgbe_get_prop() to get the value for a specific
3312 3383 * configuration parameter.
3313 3384 */
3314 3385
3315 3386 /*
3316 3387 * Jumbo frame configuration - max_frame_size controls host buffer
3317 3388 * allocation, so includes MTU, ethernet header, vlan tag and
3318 3389 * frame check sequence.
3319 3390 */
3320 3391 ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU,
3321 3392 MIN_MTU, ixgbe->capab->max_mtu, DEFAULT_MTU);
3322 3393
3323 3394 ixgbe->max_frame_size = ixgbe->default_mtu +
3324 3395 sizeof (struct ether_vlan_header) + ETHERFCSL;
3325 3396
3326 3397 /*
3327 3398 * Ethernet flow control configuration
3328 3399 */
3329 3400 flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL,
3330 3401 ixgbe_fc_none, 3, ixgbe_fc_none);
3331 3402 if (flow_control == 3)
3332 3403 flow_control = ixgbe_fc_default;
3333 3404
3334 3405 /*
3335 3406 * fc.requested mode is what the user requests. After autoneg,
3336 3407 * fc.current_mode will be the flow_control mode that was negotiated.
3337 3408 */
3338 3409 hw->fc.requested_mode = flow_control;
3339 3410
3340 3411 /*
3341 3412 * Multiple rings configurations
3342 3413 */
3343 3414 ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM,
3344 3415 ixgbe->capab->min_tx_que_num,
3345 3416 ixgbe->capab->max_tx_que_num,
3346 3417 ixgbe->capab->def_tx_que_num);
3347 3418 ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE,
3348 3419 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE);
3349 3420
3350 3421 ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM,
3351 3422 ixgbe->capab->min_rx_que_num,
3352 3423 ixgbe->capab->max_rx_que_num,
3353 3424 ixgbe->capab->def_rx_que_num);
3354 3425 ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE,
3355 3426 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE);
3356 3427
3357 3428 /*
3358 3429 * Multiple groups configuration
3359 3430 */
3360 3431 ixgbe->num_rx_groups = ixgbe_get_prop(ixgbe, PROP_RX_GROUP_NUM,
3361 3432 ixgbe->capab->min_rx_grp_num, ixgbe->capab->max_rx_grp_num,
3362 3433 ixgbe->capab->def_rx_grp_num);
3363 3434
3364 3435 ixgbe->mr_enable = ixgbe_get_prop(ixgbe, PROP_MR_ENABLE,
3365 3436 0, 1, DEFAULT_MR_ENABLE);
3366 3437
3367 3438 if (ixgbe->mr_enable == B_FALSE) {
3368 3439 ixgbe->num_tx_rings = 1;
3369 3440 ixgbe->num_rx_rings = 1;
3370 3441 ixgbe->num_rx_groups = 1;
3371 3442 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
3372 3443 } else {
3373 3444 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
3374 3445 max(ixgbe->num_rx_rings / ixgbe->num_rx_groups, 1);
3375 3446 /*
3376 3447 * The combination of num_rx_rings and num_rx_groups
3377 3448 * may be not supported by h/w. We need to adjust
3378 3449 * them to appropriate values.
3379 3450 */
3380 3451 ixgbe_setup_vmdq_rss_conf(ixgbe);
3381 3452 }
3382 3453
3383 3454 /*
3384 3455 * Tunable used to force an interrupt type. The only use is
3385 3456 * for testing of the lesser interrupt types.
3386 3457 * 0 = don't force interrupt type
3387 3458 * 1 = force interrupt type MSI-X
3388 3459 * 2 = force interrupt type MSI
3389 3460 * 3 = force interrupt type Legacy
3390 3461 */
3391 3462 ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE,
3392 3463 IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE);
3393 3464
3394 3465 ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE,
3395 3466 0, 1, DEFAULT_TX_HCKSUM_ENABLE);
3396 3467 ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE,
3397 3468 0, 1, DEFAULT_RX_HCKSUM_ENABLE);
3398 3469 ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE,
3399 3470 0, 1, DEFAULT_LSO_ENABLE);
3400 3471 ixgbe->lro_enable = ixgbe_get_prop(ixgbe, PROP_LRO_ENABLE,
3401 3472 0, 1, DEFAULT_LRO_ENABLE);
3402 3473 ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE,
3403 3474 0, 1, DEFAULT_TX_HEAD_WB_ENABLE);
3404 3475 ixgbe->relax_order_enable = ixgbe_get_prop(ixgbe,
3405 3476 PROP_RELAX_ORDER_ENABLE, 0, 1, DEFAULT_RELAX_ORDER_ENABLE);
3406 3477
3407 3478 /* Head Write Back not recommended for 82599, X540 and X550 */
3408 3479 if (hw->mac.type == ixgbe_mac_82599EB ||
3409 3480 hw->mac.type == ixgbe_mac_X540 ||
3410 3481 hw->mac.type == ixgbe_mac_X550 ||
3411 3482 hw->mac.type == ixgbe_mac_X550EM_x) {
3412 3483 ixgbe->tx_head_wb_enable = B_FALSE;
3413 3484 }
3414 3485
3415 3486 /*
3416 3487 * ixgbe LSO needs the tx h/w checksum support.
3417 3488 * LSO will be disabled if tx h/w checksum is not
3418 3489 * enabled.
3419 3490 */
3420 3491 if (ixgbe->tx_hcksum_enable == B_FALSE) {
3421 3492 ixgbe->lso_enable = B_FALSE;
3422 3493 }
3423 3494
3424 3495 /*
3425 3496 * ixgbe LRO needs the rx h/w checksum support.
3426 3497 * LRO will be disabled if rx h/w checksum is not
3427 3498 * enabled.
3428 3499 */
3429 3500 if (ixgbe->rx_hcksum_enable == B_FALSE) {
3430 3501 ixgbe->lro_enable = B_FALSE;
3431 3502 }
3432 3503
3433 3504 /*
3434 3505 * ixgbe LRO only supported by 82599, X540 and X550
3435 3506 */
3436 3507 if (hw->mac.type == ixgbe_mac_82598EB) {
3437 3508 ixgbe->lro_enable = B_FALSE;
3438 3509 }
3439 3510 ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD,
3440 3511 MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD,
3441 3512 DEFAULT_TX_COPY_THRESHOLD);
3442 3513 ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe,
3443 3514 PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD,
3444 3515 MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD);
3445 3516 ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe,
3446 3517 PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD,
3447 3518 MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD);
3448 3519 ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe,
3449 3520 PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD,
3450 3521 MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD);
3451 3522
3452 3523 ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD,
3453 3524 MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD,
3454 3525 DEFAULT_RX_COPY_THRESHOLD);
3455 3526 ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR,
3456 3527 MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR,
3457 3528 DEFAULT_RX_LIMIT_PER_INTR);
3458 3529
3459 3530 ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING,
3460 3531 ixgbe->capab->min_intr_throttle,
3461 3532 ixgbe->capab->max_intr_throttle,
3462 3533 ixgbe->capab->def_intr_throttle);
3463 3534 /*
3464 3535 * 82599, X540 and X550 require the interrupt throttling rate is
3465 3536 * a multiple of 8. This is enforced by the register definiton.
3466 3537 */
3467 3538 if (hw->mac.type == ixgbe_mac_82599EB ||
3468 3539 hw->mac.type == ixgbe_mac_X540 ||
3469 3540 hw->mac.type == ixgbe_mac_X550 ||
3470 3541 hw->mac.type == ixgbe_mac_X550EM_x)
3471 3542 ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8;
3472 3543
3473 3544 hw->allow_unsupported_sfp = ixgbe_get_prop(ixgbe,
3474 3545 PROP_ALLOW_UNSUPPORTED_SFP, 0, 1, DEFAULT_ALLOW_UNSUPPORTED_SFP);
3475 3546 }
3476 3547
3477 3548 static void
3478 3549 ixgbe_init_params(ixgbe_t *ixgbe)
3479 3550 {
3480 3551 struct ixgbe_hw *hw = &ixgbe->hw;
3481 3552 ixgbe_link_speed speeds_supported = 0;
3482 3553 boolean_t negotiate;
3483 3554
3484 3555 /*
3485 3556 * Get a list of speeds the adapter supports. If the hw struct hasn't
3486 3557 * been populated with this information yet, retrieve it from the
3487 3558 * adapter and save it to our own variable.
3488 3559 *
3489 3560 * On certain adapters, such as ones which use SFPs, the contents of
3490 3561 * hw->phy.speeds_supported (and hw->phy.autoneg_advertised) are not
3491 3562 * updated, so we must rely on calling ixgbe_get_link_capabilities()
3492 3563 * in order to ascertain the speeds which we are capable of supporting,
3493 3564 * and in the case of SFP-equipped adapters, which speed we are
3494 3565 * advertising. If ixgbe_get_link_capabilities() fails for some reason,
3495 3566 * we'll go with a default list of speeds as a last resort.
3496 3567 */
3497 3568 speeds_supported = hw->phy.speeds_supported;
3498 3569
3499 3570 if (speeds_supported == 0) {
3500 3571 if (ixgbe_get_link_capabilities(hw, &speeds_supported,
3501 3572 &negotiate) != IXGBE_SUCCESS) {
3502 3573 if (hw->mac.type == ixgbe_mac_82598EB) {
3503 3574 speeds_supported =
3504 3575 IXGBE_LINK_SPEED_82598_AUTONEG;
3505 3576 } else {
3506 3577 speeds_supported =
3507 3578 IXGBE_LINK_SPEED_82599_AUTONEG;
3508 3579 }
3509 3580 }
3510 3581 }
3511 3582 ixgbe->speeds_supported = speeds_supported;
3512 3583
3513 3584 /*
3514 3585 * By default, all supported speeds are enabled and advertised.
3515 3586 */
3516 3587 if (speeds_supported & IXGBE_LINK_SPEED_10GB_FULL) {
3517 3588 ixgbe->param_en_10000fdx_cap = 1;
3518 3589 ixgbe->param_adv_10000fdx_cap = 1;
3519 3590 } else {
3520 3591 ixgbe->param_en_10000fdx_cap = 0;
3521 3592 ixgbe->param_adv_10000fdx_cap = 0;
3522 3593 }
3523 3594
3524 3595 if (speeds_supported & IXGBE_LINK_SPEED_5GB_FULL) {
3525 3596 ixgbe->param_en_5000fdx_cap = 1;
3526 3597 ixgbe->param_adv_5000fdx_cap = 1;
3527 3598 } else {
3528 3599 ixgbe->param_en_5000fdx_cap = 0;
3529 3600 ixgbe->param_adv_5000fdx_cap = 0;
3530 3601 }
3531 3602
3532 3603 if (speeds_supported & IXGBE_LINK_SPEED_2_5GB_FULL) {
3533 3604 ixgbe->param_en_2500fdx_cap = 1;
3534 3605 ixgbe->param_adv_2500fdx_cap = 1;
3535 3606 } else {
3536 3607 ixgbe->param_en_2500fdx_cap = 0;
3537 3608 ixgbe->param_adv_2500fdx_cap = 0;
3538 3609 }
3539 3610
3540 3611 if (speeds_supported & IXGBE_LINK_SPEED_1GB_FULL) {
3541 3612 ixgbe->param_en_1000fdx_cap = 1;
3542 3613 ixgbe->param_adv_1000fdx_cap = 1;
3543 3614 } else {
3544 3615 ixgbe->param_en_1000fdx_cap = 0;
3545 3616 ixgbe->param_adv_1000fdx_cap = 0;
3546 3617 }
3547 3618
3548 3619 if (speeds_supported & IXGBE_LINK_SPEED_100_FULL) {
3549 3620 ixgbe->param_en_100fdx_cap = 1;
3550 3621 ixgbe->param_adv_100fdx_cap = 1;
3551 3622 } else {
3552 3623 ixgbe->param_en_100fdx_cap = 0;
3553 3624 ixgbe->param_adv_100fdx_cap = 0;
3554 3625 }
3555 3626
3556 3627 ixgbe->param_pause_cap = 1;
3557 3628 ixgbe->param_asym_pause_cap = 1;
3558 3629 ixgbe->param_rem_fault = 0;
3559 3630
3560 3631 ixgbe->param_adv_autoneg_cap = 1;
3561 3632 ixgbe->param_adv_pause_cap = 1;
3562 3633 ixgbe->param_adv_asym_pause_cap = 1;
3563 3634 ixgbe->param_adv_rem_fault = 0;
3564 3635
3565 3636 ixgbe->param_lp_10000fdx_cap = 0;
3566 3637 ixgbe->param_lp_5000fdx_cap = 0;
3567 3638 ixgbe->param_lp_2500fdx_cap = 0;
3568 3639 ixgbe->param_lp_1000fdx_cap = 0;
3569 3640 ixgbe->param_lp_100fdx_cap = 0;
3570 3641 ixgbe->param_lp_autoneg_cap = 0;
3571 3642 ixgbe->param_lp_pause_cap = 0;
3572 3643 ixgbe->param_lp_asym_pause_cap = 0;
3573 3644 ixgbe->param_lp_rem_fault = 0;
3574 3645 }
3575 3646
3576 3647 /*
3577 3648 * ixgbe_get_prop - Get a property value out of the configuration file
3578 3649 * ixgbe.conf.
3579 3650 *
3580 3651 * Caller provides the name of the property, a default value, a minimum
3581 3652 * value, and a maximum value.
3582 3653 *
3583 3654 * Return configured value of the property, with default, minimum and
3584 3655 * maximum properly applied.
3585 3656 */
3586 3657 static int
3587 3658 ixgbe_get_prop(ixgbe_t *ixgbe,
3588 3659 char *propname, /* name of the property */
3589 3660 int minval, /* minimum acceptable value */
3590 3661 int maxval, /* maximim acceptable value */
3591 3662 int defval) /* default value */
3592 3663 {
3593 3664 int value;
3594 3665
3595 3666 /*
3596 3667 * Call ddi_prop_get_int() to read the conf settings
3597 3668 */
3598 3669 value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip,
3599 3670 DDI_PROP_DONTPASS, propname, defval);
3600 3671 if (value > maxval)
3601 3672 value = maxval;
3602 3673
3603 3674 if (value < minval)
3604 3675 value = minval;
3605 3676
3606 3677 return (value);
3607 3678 }
3608 3679
3609 3680 /*
3610 3681 * ixgbe_driver_setup_link - Using the link properties to setup the link.
3611 3682 */
3612 3683 int
3613 3684 ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw)
3614 3685 {
3615 3686 struct ixgbe_hw *hw = &ixgbe->hw;
3616 3687 ixgbe_link_speed advertised = 0;
3617 3688
3618 3689 /*
3619 3690 * Assemble a list of enabled speeds to auto-negotiate with.
3620 3691 */
3621 3692 if (ixgbe->param_en_10000fdx_cap == 1)
3622 3693 advertised |= IXGBE_LINK_SPEED_10GB_FULL;
3623 3694
3624 3695 if (ixgbe->param_en_5000fdx_cap == 1)
3625 3696 advertised |= IXGBE_LINK_SPEED_5GB_FULL;
3626 3697
3627 3698 if (ixgbe->param_en_2500fdx_cap == 1)
3628 3699 advertised |= IXGBE_LINK_SPEED_2_5GB_FULL;
3629 3700
3630 3701 if (ixgbe->param_en_1000fdx_cap == 1)
3631 3702 advertised |= IXGBE_LINK_SPEED_1GB_FULL;
3632 3703
3633 3704 if (ixgbe->param_en_100fdx_cap == 1)
3634 3705 advertised |= IXGBE_LINK_SPEED_100_FULL;
3635 3706
3636 3707 /*
3637 3708 * As a last resort, autoneg with a default list of speeds.
3638 3709 */
3639 3710 if (ixgbe->param_adv_autoneg_cap == 1 && advertised == 0) {
3640 3711 ixgbe_notice(ixgbe, "Invalid link settings. Setting link "
3641 3712 "to autonegotiate with full capabilities.");
3642 3713
3643 3714 if (hw->mac.type == ixgbe_mac_82598EB)
3644 3715 advertised = IXGBE_LINK_SPEED_82598_AUTONEG;
3645 3716 else
3646 3717 advertised = IXGBE_LINK_SPEED_82599_AUTONEG;
3647 3718 }
3648 3719
3649 3720 if (setup_hw) {
3650 3721 if (ixgbe_setup_link(&ixgbe->hw, advertised,
3651 3722 ixgbe->param_adv_autoneg_cap) != IXGBE_SUCCESS) {
3652 3723 ixgbe_notice(ixgbe, "Setup link failed on this "
3653 3724 "device.");
3654 3725 return (IXGBE_FAILURE);
3655 3726 }
3656 3727 }
3657 3728
3658 3729 return (IXGBE_SUCCESS);
3659 3730 }
3660 3731
3661 3732 /*
3662 3733 * ixgbe_driver_link_check - Link status processing.
3663 3734 *
3664 3735 * This function can be called in both kernel context and interrupt context
3665 3736 */
3666 3737 static void
3667 3738 ixgbe_driver_link_check(ixgbe_t *ixgbe)
3668 3739 {
3669 3740 struct ixgbe_hw *hw = &ixgbe->hw;
|
↓ open down ↓ |
1538 lines elided |
↑ open up ↑ |
3670 3741 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
3671 3742 boolean_t link_up = B_FALSE;
3672 3743 boolean_t link_changed = B_FALSE;
3673 3744
3674 3745 ASSERT(mutex_owned(&ixgbe->gen_lock));
3675 3746
3676 3747 (void) ixgbe_check_link(hw, &speed, &link_up, B_FALSE);
3677 3748 if (link_up) {
3678 3749 ixgbe->link_check_complete = B_TRUE;
3679 3750
3680 - /* Link is up, enable flow control settings */
3681 - (void) ixgbe_fc_enable(hw);
3682 -
3683 3751 /*
3684 3752 * The Link is up, check whether it was marked as down earlier
3685 3753 */
3686 3754 if (ixgbe->link_state != LINK_STATE_UP) {
3755 +
3756 + /* Link is up, enable flow control settings */
3757 + (void) ixgbe_fc_enable(hw);
3758 +
3687 3759 switch (speed) {
3688 3760 case IXGBE_LINK_SPEED_10GB_FULL:
3689 3761 ixgbe->link_speed = SPEED_10GB;
3690 3762 break;
3691 3763 case IXGBE_LINK_SPEED_5GB_FULL:
3692 3764 ixgbe->link_speed = SPEED_5GB;
3693 3765 break;
3694 3766 case IXGBE_LINK_SPEED_2_5GB_FULL:
3695 3767 ixgbe->link_speed = SPEED_2_5GB;
3696 3768 break;
3697 3769 case IXGBE_LINK_SPEED_1GB_FULL:
3698 3770 ixgbe->link_speed = SPEED_1GB;
3699 3771 break;
3700 3772 case IXGBE_LINK_SPEED_100_FULL:
3701 3773 ixgbe->link_speed = SPEED_100;
3702 3774 }
3703 3775 ixgbe->link_duplex = LINK_DUPLEX_FULL;
3704 3776 ixgbe->link_state = LINK_STATE_UP;
3705 3777 link_changed = B_TRUE;
3706 3778 }
3707 3779 } else {
3708 3780 if (ixgbe->link_check_complete == B_TRUE ||
3709 3781 (ixgbe->link_check_complete == B_FALSE &&
3710 3782 gethrtime() >= ixgbe->link_check_hrtime)) {
3711 3783 /*
3712 3784 * The link is really down
3713 3785 */
3714 3786 ixgbe->link_check_complete = B_TRUE;
3715 3787
3716 3788 if (ixgbe->link_state != LINK_STATE_DOWN) {
3717 3789 ixgbe->link_speed = 0;
3718 3790 ixgbe->link_duplex = LINK_DUPLEX_UNKNOWN;
3719 3791 ixgbe->link_state = LINK_STATE_DOWN;
3720 3792 link_changed = B_TRUE;
3721 3793 }
3722 3794 }
3723 3795 }
3724 3796
3725 3797 /*
3726 3798 * If we are in an interrupt context, need to re-enable the
3727 3799 * interrupt, which was automasked
3728 3800 */
3729 3801 if (servicing_interrupt() != 0) {
3730 3802 ixgbe->eims |= IXGBE_EICR_LSC;
3731 3803 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3732 3804 }
3733 3805
3734 3806 if (link_changed) {
3735 3807 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
3736 3808 }
3737 3809 }
3738 3810
3739 3811 /*
3740 3812 * ixgbe_sfp_check - sfp module processing done in taskq only for 82599.
3741 3813 */
3742 3814 static void
3743 3815 ixgbe_sfp_check(void *arg)
3744 3816 {
3745 3817 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3746 3818 uint32_t eicr = ixgbe->eicr;
3747 3819 struct ixgbe_hw *hw = &ixgbe->hw;
3748 3820
3749 3821 mutex_enter(&ixgbe->gen_lock);
3750 3822 (void) hw->phy.ops.identify_sfp(hw);
3751 3823 if (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) {
3752 3824 /* clear the interrupt */
3753 3825 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
3754 3826
3755 3827 /* if link up, do multispeed fiber setup */
3756 3828 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
3757 3829 B_TRUE);
3758 3830 ixgbe_driver_link_check(ixgbe);
3759 3831 ixgbe_get_hw_state(ixgbe);
3760 3832 } else if (eicr & IXGBE_EICR_GPI_SDP2_BY_MAC(hw)) {
3761 3833 /* clear the interrupt */
3762 3834 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2_BY_MAC(hw));
3763 3835
3764 3836 /* if link up, do sfp module setup */
3765 3837 (void) hw->mac.ops.setup_sfp(hw);
3766 3838
3767 3839 /* do multispeed fiber setup */
3768 3840 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
3769 3841 B_TRUE);
3770 3842 ixgbe_driver_link_check(ixgbe);
3771 3843 ixgbe_get_hw_state(ixgbe);
3772 3844 }
3773 3845 mutex_exit(&ixgbe->gen_lock);
3774 3846
3775 3847 /*
3776 3848 * We need to fully re-check the link later.
3777 3849 */
3778 3850 ixgbe->link_check_complete = B_FALSE;
3779 3851 ixgbe->link_check_hrtime = gethrtime() +
3780 3852 (IXGBE_LINK_UP_TIME * 100000000ULL);
3781 3853 }
3782 3854
3783 3855 /*
3784 3856 * ixgbe_overtemp_check - overtemp module processing done in taskq
3785 3857 *
3786 3858 * This routine will only be called on adapters with temperature sensor.
3787 3859 * The indication of over-temperature can be either SDP0 interrupt or the link
3788 3860 * status change interrupt.
3789 3861 */
3790 3862 static void
3791 3863 ixgbe_overtemp_check(void *arg)
3792 3864 {
3793 3865 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3794 3866 struct ixgbe_hw *hw = &ixgbe->hw;
3795 3867 uint32_t eicr = ixgbe->eicr;
3796 3868 ixgbe_link_speed speed;
3797 3869 boolean_t link_up;
3798 3870
3799 3871 mutex_enter(&ixgbe->gen_lock);
3800 3872
3801 3873 /* make sure we know current state of link */
3802 3874 (void) ixgbe_check_link(hw, &speed, &link_up, B_FALSE);
3803 3875
3804 3876 /* check over-temp condition */
3805 3877 if (((eicr & IXGBE_EICR_GPI_SDP0_BY_MAC(hw)) && (!link_up)) ||
3806 3878 (eicr & IXGBE_EICR_LSC)) {
3807 3879 if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP) {
3808 3880 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP);
3809 3881
3810 3882 /*
3811 3883 * Disable the adapter interrupts
3812 3884 */
3813 3885 ixgbe_disable_adapter_interrupts(ixgbe);
3814 3886
3815 3887 /*
3816 3888 * Disable Rx/Tx units
3817 3889 */
3818 3890 (void) ixgbe_stop_adapter(hw);
3819 3891
3820 3892 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
3821 3893 ixgbe_error(ixgbe,
3822 3894 "Problem: Network adapter has been stopped "
3823 3895 "because it has overheated");
3824 3896 ixgbe_error(ixgbe,
3825 3897 "Action: Restart the computer. "
3826 3898 "If the problem persists, power off the system "
3827 3899 "and replace the adapter");
3828 3900 }
3829 3901 }
3830 3902
3831 3903 /* write to clear the interrupt */
3832 3904 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3833 3905
3834 3906 mutex_exit(&ixgbe->gen_lock);
3835 3907 }
3836 3908
3837 3909 /*
3838 3910 * ixgbe_phy_check - taskq to process interrupts from an external PHY
3839 3911 *
3840 3912 * This routine will only be called on adapters with external PHYs
3841 3913 * (such as X550) that may be trying to raise our attention to some event.
3842 3914 * Currently, this is limited to claiming PHY overtemperature and link status
3843 3915 * change (LSC) events, however this may expand to include other things in
3844 3916 * future adapters.
3845 3917 */
3846 3918 static void
3847 3919 ixgbe_phy_check(void *arg)
3848 3920 {
3849 3921 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3850 3922 struct ixgbe_hw *hw = &ixgbe->hw;
3851 3923 int rv;
3852 3924
3853 3925 mutex_enter(&ixgbe->gen_lock);
3854 3926
3855 3927 /*
3856 3928 * X550 baseT PHY overtemp and LSC events are handled here.
3857 3929 *
3858 3930 * If an overtemp event occurs, it will be reflected in the
3859 3931 * return value of phy.ops.handle_lasi() and the common code will
3860 3932 * automatically power off the baseT PHY. This is our cue to trigger
3861 3933 * an FMA event.
3862 3934 *
3863 3935 * If a link status change event occurs, phy.ops.handle_lasi() will
3864 3936 * automatically initiate a link setup between the integrated KR PHY
3865 3937 * and the external X557 PHY to ensure that the link speed between
3866 3938 * them matches the link speed of the baseT link.
3867 3939 */
3868 3940 rv = ixgbe_handle_lasi(hw);
3869 3941
3870 3942 if (rv == IXGBE_ERR_OVERTEMP) {
3871 3943 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP);
3872 3944
3873 3945 /*
3874 3946 * Disable the adapter interrupts
3875 3947 */
3876 3948 ixgbe_disable_adapter_interrupts(ixgbe);
3877 3949
3878 3950 /*
3879 3951 * Disable Rx/Tx units
3880 3952 */
3881 3953 (void) ixgbe_stop_adapter(hw);
3882 3954
3883 3955 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
3884 3956 ixgbe_error(ixgbe,
3885 3957 "Problem: Network adapter has been stopped due to a "
3886 3958 "overtemperature event being detected.");
3887 3959 ixgbe_error(ixgbe,
3888 3960 "Action: Shut down or restart the computer. If the issue "
3889 3961 "persists, please take action in accordance with the "
3890 3962 "recommendations from your system vendor.");
3891 3963 }
3892 3964
3893 3965 mutex_exit(&ixgbe->gen_lock);
3894 3966 }
3895 3967
3896 3968 /*
3897 3969 * ixgbe_link_timer - timer for link status detection
3898 3970 */
3899 3971 static void
3900 3972 ixgbe_link_timer(void *arg)
3901 3973 {
3902 3974 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3903 3975
3904 3976 mutex_enter(&ixgbe->gen_lock);
3905 3977 ixgbe_driver_link_check(ixgbe);
3906 3978 mutex_exit(&ixgbe->gen_lock);
3907 3979 }
3908 3980
3909 3981 /*
3910 3982 * ixgbe_local_timer - Driver watchdog function.
3911 3983 *
3912 3984 * This function will handle the transmit stall check and other routines.
3913 3985 */
3914 3986 static void
3915 3987 ixgbe_local_timer(void *arg)
3916 3988 {
3917 3989 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3918 3990
3919 3991 if (ixgbe->ixgbe_state & IXGBE_OVERTEMP)
3920 3992 goto out;
3921 3993
3922 3994 if (ixgbe->ixgbe_state & IXGBE_ERROR) {
3923 3995 ixgbe->reset_count++;
3924 3996 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
3925 3997 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
3926 3998 goto out;
3927 3999 }
3928 4000
3929 4001 if (ixgbe_stall_check(ixgbe)) {
3930 4002 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STALL);
3931 4003 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3932 4004
3933 4005 ixgbe->reset_count++;
3934 4006 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
3935 4007 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
3936 4008 }
3937 4009
3938 4010 out:
3939 4011 ixgbe_restart_watchdog_timer(ixgbe);
3940 4012 }
3941 4013
3942 4014 /*
3943 4015 * ixgbe_stall_check - Check for transmit stall.
3944 4016 *
3945 4017 * This function checks if the adapter is stalled (in transmit).
3946 4018 *
3947 4019 * It is called each time the watchdog timeout is invoked.
3948 4020 * If the transmit descriptor reclaim continuously fails,
3949 4021 * the watchdog value will increment by 1. If the watchdog
3950 4022 * value exceeds the threshold, the ixgbe is assumed to
3951 4023 * have stalled and need to be reset.
3952 4024 */
3953 4025 static boolean_t
3954 4026 ixgbe_stall_check(ixgbe_t *ixgbe)
3955 4027 {
3956 4028 ixgbe_tx_ring_t *tx_ring;
3957 4029 boolean_t result;
3958 4030 int i;
3959 4031
3960 4032 if (ixgbe->link_state != LINK_STATE_UP)
3961 4033 return (B_FALSE);
3962 4034
3963 4035 /*
3964 4036 * If any tx ring is stalled, we'll reset the chipset
3965 4037 */
3966 4038 result = B_FALSE;
3967 4039 for (i = 0; i < ixgbe->num_tx_rings; i++) {
3968 4040 tx_ring = &ixgbe->tx_rings[i];
3969 4041 if (tx_ring->tbd_free <= ixgbe->tx_recycle_thresh) {
3970 4042 tx_ring->tx_recycle(tx_ring);
3971 4043 }
3972 4044
3973 4045 if (tx_ring->recycle_fail > 0)
3974 4046 tx_ring->stall_watchdog++;
3975 4047 else
3976 4048 tx_ring->stall_watchdog = 0;
3977 4049
3978 4050 if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) {
3979 4051 result = B_TRUE;
3980 4052 break;
3981 4053 }
3982 4054 }
3983 4055
3984 4056 if (result) {
3985 4057 tx_ring->stall_watchdog = 0;
3986 4058 tx_ring->recycle_fail = 0;
3987 4059 }
3988 4060
3989 4061 return (result);
3990 4062 }
3991 4063
3992 4064
3993 4065 /*
3994 4066 * is_valid_mac_addr - Check if the mac address is valid.
3995 4067 */
3996 4068 static boolean_t
3997 4069 is_valid_mac_addr(uint8_t *mac_addr)
3998 4070 {
3999 4071 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
4000 4072 const uint8_t addr_test2[6] =
4001 4073 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
4002 4074
4003 4075 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
4004 4076 !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
4005 4077 return (B_FALSE);
4006 4078
4007 4079 return (B_TRUE);
4008 4080 }
4009 4081
4010 4082 static boolean_t
4011 4083 ixgbe_find_mac_address(ixgbe_t *ixgbe)
4012 4084 {
4013 4085 #ifdef __sparc
4014 4086 struct ixgbe_hw *hw = &ixgbe->hw;
4015 4087 uchar_t *bytes;
4016 4088 struct ether_addr sysaddr;
4017 4089 uint_t nelts;
4018 4090 int err;
4019 4091 boolean_t found = B_FALSE;
4020 4092
4021 4093 /*
4022 4094 * The "vendor's factory-set address" may already have
4023 4095 * been extracted from the chip, but if the property
4024 4096 * "local-mac-address" is set we use that instead.
4025 4097 *
4026 4098 * We check whether it looks like an array of 6
4027 4099 * bytes (which it should, if OBP set it). If we can't
4028 4100 * make sense of it this way, we'll ignore it.
4029 4101 */
4030 4102 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
4031 4103 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
4032 4104 if (err == DDI_PROP_SUCCESS) {
4033 4105 if (nelts == ETHERADDRL) {
4034 4106 while (nelts--)
4035 4107 hw->mac.addr[nelts] = bytes[nelts];
4036 4108 found = B_TRUE;
4037 4109 }
4038 4110 ddi_prop_free(bytes);
4039 4111 }
4040 4112
4041 4113 /*
4042 4114 * Look up the OBP property "local-mac-address?". If the user has set
4043 4115 * 'local-mac-address? = false', use "the system address" instead.
4044 4116 */
4045 4117 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0,
4046 4118 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
4047 4119 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
4048 4120 if (localetheraddr(NULL, &sysaddr) != 0) {
4049 4121 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
4050 4122 found = B_TRUE;
4051 4123 }
4052 4124 }
4053 4125 ddi_prop_free(bytes);
4054 4126 }
4055 4127
4056 4128 /*
4057 4129 * Finally(!), if there's a valid "mac-address" property (created
4058 4130 * if we netbooted from this interface), we must use this instead
4059 4131 * of any of the above to ensure that the NFS/install server doesn't
4060 4132 * get confused by the address changing as illumos takes over!
4061 4133 */
4062 4134 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
4063 4135 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
4064 4136 if (err == DDI_PROP_SUCCESS) {
4065 4137 if (nelts == ETHERADDRL) {
4066 4138 while (nelts--)
4067 4139 hw->mac.addr[nelts] = bytes[nelts];
4068 4140 found = B_TRUE;
4069 4141 }
4070 4142 ddi_prop_free(bytes);
4071 4143 }
4072 4144
4073 4145 if (found) {
4074 4146 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL);
4075 4147 return (B_TRUE);
4076 4148 }
4077 4149 #else
4078 4150 _NOTE(ARGUNUSED(ixgbe));
4079 4151 #endif
4080 4152
4081 4153 return (B_TRUE);
4082 4154 }
4083 4155
4084 4156 #pragma inline(ixgbe_arm_watchdog_timer)
4085 4157 static void
4086 4158 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe)
4087 4159 {
4088 4160 /*
4089 4161 * Fire a watchdog timer
4090 4162 */
4091 4163 ixgbe->watchdog_tid =
4092 4164 timeout(ixgbe_local_timer,
4093 4165 (void *)ixgbe, 1 * drv_usectohz(1000000));
4094 4166
4095 4167 }
4096 4168
4097 4169 /*
4098 4170 * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer.
4099 4171 */
4100 4172 void
4101 4173 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe)
4102 4174 {
4103 4175 mutex_enter(&ixgbe->watchdog_lock);
4104 4176
4105 4177 if (!ixgbe->watchdog_enable) {
4106 4178 ixgbe->watchdog_enable = B_TRUE;
4107 4179 ixgbe->watchdog_start = B_TRUE;
4108 4180 ixgbe_arm_watchdog_timer(ixgbe);
4109 4181 }
4110 4182
4111 4183 mutex_exit(&ixgbe->watchdog_lock);
4112 4184 }
4113 4185
4114 4186 /*
4115 4187 * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer.
4116 4188 */
4117 4189 void
4118 4190 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe)
4119 4191 {
4120 4192 timeout_id_t tid;
4121 4193
4122 4194 mutex_enter(&ixgbe->watchdog_lock);
4123 4195
4124 4196 ixgbe->watchdog_enable = B_FALSE;
4125 4197 ixgbe->watchdog_start = B_FALSE;
4126 4198 tid = ixgbe->watchdog_tid;
4127 4199 ixgbe->watchdog_tid = 0;
4128 4200
4129 4201 mutex_exit(&ixgbe->watchdog_lock);
4130 4202
4131 4203 if (tid != 0)
4132 4204 (void) untimeout(tid);
4133 4205 }
4134 4206
4135 4207 /*
4136 4208 * ixgbe_start_watchdog_timer - Start the driver watchdog timer.
4137 4209 */
4138 4210 void
4139 4211 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe)
4140 4212 {
4141 4213 mutex_enter(&ixgbe->watchdog_lock);
4142 4214
4143 4215 if (ixgbe->watchdog_enable) {
4144 4216 if (!ixgbe->watchdog_start) {
4145 4217 ixgbe->watchdog_start = B_TRUE;
4146 4218 ixgbe_arm_watchdog_timer(ixgbe);
4147 4219 }
4148 4220 }
4149 4221
4150 4222 mutex_exit(&ixgbe->watchdog_lock);
4151 4223 }
4152 4224
4153 4225 /*
4154 4226 * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer.
4155 4227 */
4156 4228 static void
4157 4229 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe)
4158 4230 {
4159 4231 mutex_enter(&ixgbe->watchdog_lock);
4160 4232
4161 4233 if (ixgbe->watchdog_start)
4162 4234 ixgbe_arm_watchdog_timer(ixgbe);
4163 4235
4164 4236 mutex_exit(&ixgbe->watchdog_lock);
4165 4237 }
4166 4238
4167 4239 /*
4168 4240 * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer.
4169 4241 */
4170 4242 void
4171 4243 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe)
4172 4244 {
4173 4245 timeout_id_t tid;
4174 4246
4175 4247 mutex_enter(&ixgbe->watchdog_lock);
4176 4248
4177 4249 ixgbe->watchdog_start = B_FALSE;
4178 4250 tid = ixgbe->watchdog_tid;
4179 4251 ixgbe->watchdog_tid = 0;
4180 4252
4181 4253 mutex_exit(&ixgbe->watchdog_lock);
4182 4254
4183 4255 if (tid != 0)
4184 4256 (void) untimeout(tid);
4185 4257 }
4186 4258
4187 4259 /*
4188 4260 * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts.
4189 4261 */
4190 4262 static void
4191 4263 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe)
4192 4264 {
4193 4265 struct ixgbe_hw *hw = &ixgbe->hw;
4194 4266
4195 4267 /*
4196 4268 * mask all interrupts off
4197 4269 */
4198 4270 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff);
4199 4271
4200 4272 /*
4201 4273 * for MSI-X, also disable autoclear
4202 4274 */
4203 4275 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
4204 4276 IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0);
4205 4277 }
4206 4278
4207 4279 IXGBE_WRITE_FLUSH(hw);
4208 4280 }
4209 4281
4210 4282 /*
4211 4283 * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts.
4212 4284 */
4213 4285 static void
4214 4286 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe)
4215 4287 {
4216 4288 struct ixgbe_hw *hw = &ixgbe->hw;
4217 4289 uint32_t eiac, eiam;
4218 4290 uint32_t gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
4219 4291
4220 4292 /* interrupt types to enable */
4221 4293 ixgbe->eims = IXGBE_EIMS_ENABLE_MASK; /* shared code default */
4222 4294 ixgbe->eims &= ~IXGBE_EIMS_TCP_TIMER; /* minus tcp timer */
4223 4295 ixgbe->eims |= ixgbe->capab->other_intr; /* "other" interrupt types */
4224 4296
4225 4297 /* enable automask on "other" causes that this adapter can generate */
4226 4298 eiam = ixgbe->capab->other_intr;
4227 4299
4228 4300 /*
4229 4301 * msi-x mode
4230 4302 */
4231 4303 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
4232 4304 /* enable autoclear but not on bits 29:20 */
4233 4305 eiac = (ixgbe->eims & ~IXGBE_OTHER_INTR);
4234 4306
4235 4307 /* general purpose interrupt enable */
4236 4308 gpie |= (IXGBE_GPIE_MSIX_MODE
4237 4309 | IXGBE_GPIE_PBA_SUPPORT
4238 4310 | IXGBE_GPIE_OCD
4239 4311 | IXGBE_GPIE_EIAME);
4240 4312 /*
4241 4313 * non-msi-x mode
4242 4314 */
4243 4315 } else {
4244 4316
4245 4317 /* disable autoclear, leave gpie at default */
4246 4318 eiac = 0;
4247 4319
4248 4320 /*
4249 4321 * General purpose interrupt enable.
4250 4322 * For 82599, X540 and X550, extended interrupt
4251 4323 * automask enable only in MSI or MSI-X mode
4252 4324 */
4253 4325 if ((hw->mac.type == ixgbe_mac_82598EB) ||
4254 4326 (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) {
4255 4327 gpie |= IXGBE_GPIE_EIAME;
4256 4328 }
4257 4329 }
4258 4330
4259 4331 /* Enable specific "other" interrupt types */
4260 4332 switch (hw->mac.type) {
4261 4333 case ixgbe_mac_82598EB:
4262 4334 gpie |= ixgbe->capab->other_gpie;
4263 4335 break;
4264 4336
4265 4337 case ixgbe_mac_82599EB:
4266 4338 case ixgbe_mac_X540:
4267 4339 case ixgbe_mac_X550:
4268 4340 case ixgbe_mac_X550EM_x:
4269 4341 gpie |= ixgbe->capab->other_gpie;
4270 4342
4271 4343 /* Enable RSC Delay 8us when LRO enabled */
4272 4344 if (ixgbe->lro_enable) {
4273 4345 gpie |= (1 << IXGBE_GPIE_RSC_DELAY_SHIFT);
4274 4346 }
4275 4347 break;
4276 4348
4277 4349 default:
4278 4350 break;
4279 4351 }
4280 4352
4281 4353 /* write to interrupt control registers */
4282 4354 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4283 4355 IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac);
4284 4356 IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam);
4285 4357 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
4286 4358 IXGBE_WRITE_FLUSH(hw);
4287 4359 }
4288 4360
4289 4361 /*
4290 4362 * ixgbe_loopback_ioctl - Loopback support.
4291 4363 */
4292 4364 enum ioc_reply
4293 4365 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp)
4294 4366 {
4295 4367 lb_info_sz_t *lbsp;
4296 4368 lb_property_t *lbpp;
4297 4369 uint32_t *lbmp;
4298 4370 uint32_t size;
4299 4371 uint32_t value;
4300 4372
4301 4373 if (mp->b_cont == NULL)
4302 4374 return (IOC_INVAL);
4303 4375
4304 4376 switch (iocp->ioc_cmd) {
4305 4377 default:
4306 4378 return (IOC_INVAL);
4307 4379
4308 4380 case LB_GET_INFO_SIZE:
4309 4381 size = sizeof (lb_info_sz_t);
4310 4382 if (iocp->ioc_count != size)
4311 4383 return (IOC_INVAL);
4312 4384
4313 4385 value = sizeof (lb_normal);
4314 4386 value += sizeof (lb_mac);
4315 4387 value += sizeof (lb_external);
4316 4388
4317 4389 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
4318 4390 *lbsp = value;
4319 4391 break;
4320 4392
4321 4393 case LB_GET_INFO:
4322 4394 value = sizeof (lb_normal);
4323 4395 value += sizeof (lb_mac);
4324 4396 value += sizeof (lb_external);
4325 4397
4326 4398 size = value;
4327 4399 if (iocp->ioc_count != size)
4328 4400 return (IOC_INVAL);
4329 4401
4330 4402 value = 0;
4331 4403 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
4332 4404
4333 4405 lbpp[value++] = lb_normal;
4334 4406 lbpp[value++] = lb_mac;
4335 4407 lbpp[value++] = lb_external;
4336 4408 break;
4337 4409
4338 4410 case LB_GET_MODE:
4339 4411 size = sizeof (uint32_t);
4340 4412 if (iocp->ioc_count != size)
4341 4413 return (IOC_INVAL);
4342 4414
4343 4415 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
4344 4416 *lbmp = ixgbe->loopback_mode;
4345 4417 break;
4346 4418
4347 4419 case LB_SET_MODE:
4348 4420 size = 0;
4349 4421 if (iocp->ioc_count != sizeof (uint32_t))
4350 4422 return (IOC_INVAL);
4351 4423
4352 4424 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
4353 4425 if (!ixgbe_set_loopback_mode(ixgbe, *lbmp))
4354 4426 return (IOC_INVAL);
4355 4427 break;
4356 4428 }
4357 4429
4358 4430 iocp->ioc_count = size;
4359 4431 iocp->ioc_error = 0;
4360 4432
4361 4433 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
4362 4434 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
4363 4435 return (IOC_INVAL);
4364 4436 }
4365 4437
4366 4438 return (IOC_REPLY);
4367 4439 }
4368 4440
4369 4441 /*
4370 4442 * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode.
4371 4443 */
4372 4444 static boolean_t
4373 4445 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode)
4374 4446 {
4375 4447 if (mode == ixgbe->loopback_mode)
4376 4448 return (B_TRUE);
4377 4449
4378 4450 ixgbe->loopback_mode = mode;
4379 4451
4380 4452 if (mode == IXGBE_LB_NONE) {
4381 4453 /*
4382 4454 * Reset the chip
4383 4455 */
4384 4456 (void) ixgbe_reset(ixgbe);
4385 4457 return (B_TRUE);
4386 4458 }
4387 4459
4388 4460 mutex_enter(&ixgbe->gen_lock);
4389 4461
4390 4462 switch (mode) {
4391 4463 default:
4392 4464 mutex_exit(&ixgbe->gen_lock);
4393 4465 return (B_FALSE);
4394 4466
4395 4467 case IXGBE_LB_EXTERNAL:
4396 4468 break;
4397 4469
4398 4470 case IXGBE_LB_INTERNAL_MAC:
4399 4471 ixgbe_set_internal_mac_loopback(ixgbe);
4400 4472 break;
4401 4473 }
4402 4474
4403 4475 mutex_exit(&ixgbe->gen_lock);
4404 4476
4405 4477 return (B_TRUE);
4406 4478 }
4407 4479
4408 4480 /*
4409 4481 * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode.
4410 4482 */
4411 4483 static void
4412 4484 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe)
4413 4485 {
4414 4486 struct ixgbe_hw *hw;
4415 4487 uint32_t reg;
4416 4488 uint8_t atlas;
4417 4489
4418 4490 hw = &ixgbe->hw;
4419 4491
4420 4492 /*
4421 4493 * Setup MAC loopback
4422 4494 */
4423 4495 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0);
4424 4496 reg |= IXGBE_HLREG0_LPBK;
4425 4497 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg);
4426 4498
4427 4499 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
4428 4500 reg &= ~IXGBE_AUTOC_LMS_MASK;
4429 4501 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
4430 4502
4431 4503 /*
4432 4504 * Disable Atlas Tx lanes to keep packets in loopback and not on wire
4433 4505 */
4434 4506 switch (hw->mac.type) {
4435 4507 case ixgbe_mac_82598EB:
4436 4508 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
4437 4509 &atlas);
4438 4510 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
4439 4511 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
4440 4512 atlas);
4441 4513
4442 4514 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
4443 4515 &atlas);
4444 4516 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
4445 4517 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
4446 4518 atlas);
4447 4519
4448 4520 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
4449 4521 &atlas);
4450 4522 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
4451 4523 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
4452 4524 atlas);
4453 4525
4454 4526 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
4455 4527 &atlas);
4456 4528 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
4457 4529 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
4458 4530 atlas);
4459 4531 break;
4460 4532
4461 4533 case ixgbe_mac_82599EB:
4462 4534 case ixgbe_mac_X540:
4463 4535 case ixgbe_mac_X550:
4464 4536 case ixgbe_mac_X550EM_x:
4465 4537 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
4466 4538 reg |= (IXGBE_AUTOC_FLU |
4467 4539 IXGBE_AUTOC_10G_KX4);
4468 4540 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
4469 4541
4470 4542 (void) ixgbe_setup_link(&ixgbe->hw, IXGBE_LINK_SPEED_10GB_FULL,
4471 4543 B_FALSE);
4472 4544 break;
4473 4545
4474 4546 default:
4475 4547 break;
4476 4548 }
4477 4549 }
4478 4550
4479 4551 #pragma inline(ixgbe_intr_rx_work)
4480 4552 /*
4481 4553 * ixgbe_intr_rx_work - RX processing of ISR.
4482 4554 */
4483 4555 static void
4484 4556 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring)
4485 4557 {
4486 4558 mblk_t *mp;
4487 4559
4488 4560 mutex_enter(&rx_ring->rx_lock);
4489 4561
4490 4562 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
4491 4563 mutex_exit(&rx_ring->rx_lock);
4492 4564
4493 4565 if (mp != NULL)
4494 4566 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
4495 4567 rx_ring->ring_gen_num);
4496 4568 }
4497 4569
4498 4570 #pragma inline(ixgbe_intr_tx_work)
4499 4571 /*
4500 4572 * ixgbe_intr_tx_work - TX processing of ISR.
4501 4573 */
4502 4574 static void
4503 4575 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring)
4504 4576 {
4505 4577 ixgbe_t *ixgbe = tx_ring->ixgbe;
4506 4578
4507 4579 /*
4508 4580 * Recycle the tx descriptors
4509 4581 */
4510 4582 tx_ring->tx_recycle(tx_ring);
4511 4583
4512 4584 /*
4513 4585 * Schedule the re-transmit
4514 4586 */
4515 4587 if (tx_ring->reschedule &&
4516 4588 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)) {
4517 4589 tx_ring->reschedule = B_FALSE;
4518 4590 mac_tx_ring_update(tx_ring->ixgbe->mac_hdl,
4519 4591 tx_ring->ring_handle);
4520 4592 tx_ring->stat_reschedule++;
4521 4593 }
4522 4594 }
4523 4595
4524 4596 #pragma inline(ixgbe_intr_other_work)
4525 4597 /*
4526 4598 * ixgbe_intr_other_work - Process interrupt types other than tx/rx
4527 4599 */
4528 4600 static void
4529 4601 ixgbe_intr_other_work(ixgbe_t *ixgbe, uint32_t eicr)
4530 4602 {
4531 4603 struct ixgbe_hw *hw = &ixgbe->hw;
4532 4604
4533 4605 ASSERT(mutex_owned(&ixgbe->gen_lock));
4534 4606
4535 4607 /*
4536 4608 * handle link status change
4537 4609 */
4538 4610 if (eicr & IXGBE_EICR_LSC) {
4539 4611 ixgbe_driver_link_check(ixgbe);
4540 4612 ixgbe_get_hw_state(ixgbe);
4541 4613 }
4542 4614
4543 4615 /*
4544 4616 * check for fan failure on adapters with fans
4545 4617 */
4546 4618 if ((ixgbe->capab->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
4547 4619 (eicr & IXGBE_EICR_GPI_SDP1)) {
4548 4620 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP);
4549 4621
4550 4622 /*
4551 4623 * Disable the adapter interrupts
4552 4624 */
4553 4625 ixgbe_disable_adapter_interrupts(ixgbe);
4554 4626
4555 4627 /*
4556 4628 * Disable Rx/Tx units
4557 4629 */
4558 4630 (void) ixgbe_stop_adapter(&ixgbe->hw);
4559 4631
4560 4632 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
4561 4633 ixgbe_error(ixgbe,
4562 4634 "Problem: Network adapter has been stopped "
4563 4635 "because the fan has stopped.\n");
4564 4636 ixgbe_error(ixgbe,
4565 4637 "Action: Replace the adapter.\n");
4566 4638
4567 4639 /* re-enable the interrupt, which was automasked */
4568 4640 ixgbe->eims |= IXGBE_EICR_GPI_SDP1;
4569 4641 }
4570 4642
4571 4643 /*
4572 4644 * Do SFP check for adapters with hot-plug capability
4573 4645 */
4574 4646 if ((ixgbe->capab->flags & IXGBE_FLAG_SFP_PLUG_CAPABLE) &&
4575 4647 ((eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw)) ||
4576 4648 (eicr & IXGBE_EICR_GPI_SDP2_BY_MAC(hw)))) {
4577 4649 ixgbe->eicr = eicr;
4578 4650 if ((ddi_taskq_dispatch(ixgbe->sfp_taskq,
4579 4651 ixgbe_sfp_check, (void *)ixgbe,
4580 4652 DDI_NOSLEEP)) != DDI_SUCCESS) {
4581 4653 ixgbe_log(ixgbe, "No memory available to dispatch "
4582 4654 "taskq for SFP check");
4583 4655 }
4584 4656 }
4585 4657
4586 4658 /*
4587 4659 * Do over-temperature check for adapters with temp sensor
4588 4660 */
4589 4661 if ((ixgbe->capab->flags & IXGBE_FLAG_TEMP_SENSOR_CAPABLE) &&
4590 4662 ((eicr & IXGBE_EICR_GPI_SDP0_BY_MAC(hw)) ||
4591 4663 (eicr & IXGBE_EICR_LSC))) {
4592 4664 ixgbe->eicr = eicr;
4593 4665 if ((ddi_taskq_dispatch(ixgbe->overtemp_taskq,
4594 4666 ixgbe_overtemp_check, (void *)ixgbe,
4595 4667 DDI_NOSLEEP)) != DDI_SUCCESS) {
4596 4668 ixgbe_log(ixgbe, "No memory available to dispatch "
4597 4669 "taskq for overtemp check");
4598 4670 }
4599 4671 }
4600 4672
4601 4673 /*
4602 4674 * Process an external PHY interrupt
4603 4675 */
4604 4676 if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T &&
4605 4677 (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
4606 4678 ixgbe->eicr = eicr;
4607 4679 if ((ddi_taskq_dispatch(ixgbe->phy_taskq,
4608 4680 ixgbe_phy_check, (void *)ixgbe,
4609 4681 DDI_NOSLEEP)) != DDI_SUCCESS) {
4610 4682 ixgbe_log(ixgbe, "No memory available to dispatch "
4611 4683 "taskq for PHY check");
4612 4684 }
4613 4685 }
4614 4686 }
4615 4687
4616 4688 /*
4617 4689 * ixgbe_intr_legacy - Interrupt handler for legacy interrupts.
4618 4690 */
4619 4691 static uint_t
4620 4692 ixgbe_intr_legacy(void *arg1, void *arg2)
4621 4693 {
4622 4694 ixgbe_t *ixgbe = (ixgbe_t *)arg1;
4623 4695 struct ixgbe_hw *hw = &ixgbe->hw;
4624 4696 ixgbe_tx_ring_t *tx_ring;
4625 4697 ixgbe_rx_ring_t *rx_ring;
4626 4698 uint32_t eicr;
4627 4699 mblk_t *mp;
4628 4700 boolean_t tx_reschedule;
4629 4701 uint_t result;
4630 4702
4631 4703 _NOTE(ARGUNUSED(arg2));
4632 4704
4633 4705 mutex_enter(&ixgbe->gen_lock);
4634 4706 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
4635 4707 mutex_exit(&ixgbe->gen_lock);
4636 4708 return (DDI_INTR_UNCLAIMED);
4637 4709 }
4638 4710
4639 4711 mp = NULL;
4640 4712 tx_reschedule = B_FALSE;
4641 4713
4642 4714 /*
4643 4715 * Any bit set in eicr: claim this interrupt
4644 4716 */
4645 4717 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4646 4718
4647 4719 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
4648 4720 mutex_exit(&ixgbe->gen_lock);
4649 4721 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
4650 4722 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4651 4723 return (DDI_INTR_CLAIMED);
4652 4724 }
4653 4725
4654 4726 if (eicr) {
4655 4727 /*
4656 4728 * For legacy interrupt, we have only one interrupt,
4657 4729 * so we have only one rx ring and one tx ring enabled.
4658 4730 */
4659 4731 ASSERT(ixgbe->num_rx_rings == 1);
4660 4732 ASSERT(ixgbe->num_tx_rings == 1);
4661 4733
4662 4734 /*
4663 4735 * For legacy interrupt, rx rings[0] will use RTxQ[0].
4664 4736 */
4665 4737 if (eicr & 0x1) {
4666 4738 ixgbe->eimc |= IXGBE_EICR_RTX_QUEUE;
4667 4739 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4668 4740 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
4669 4741 /*
4670 4742 * Clean the rx descriptors
4671 4743 */
4672 4744 rx_ring = &ixgbe->rx_rings[0];
4673 4745 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
4674 4746 }
4675 4747
4676 4748 /*
4677 4749 * For legacy interrupt, tx rings[0] will use RTxQ[1].
4678 4750 */
4679 4751 if (eicr & 0x2) {
4680 4752 /*
4681 4753 * Recycle the tx descriptors
4682 4754 */
4683 4755 tx_ring = &ixgbe->tx_rings[0];
4684 4756 tx_ring->tx_recycle(tx_ring);
4685 4757
4686 4758 /*
4687 4759 * Schedule the re-transmit
4688 4760 */
4689 4761 tx_reschedule = (tx_ring->reschedule &&
4690 4762 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh));
4691 4763 }
4692 4764
4693 4765 /* any interrupt type other than tx/rx */
4694 4766 if (eicr & ixgbe->capab->other_intr) {
4695 4767 switch (hw->mac.type) {
4696 4768 case ixgbe_mac_82598EB:
4697 4769 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4698 4770 break;
4699 4771
4700 4772 case ixgbe_mac_82599EB:
4701 4773 case ixgbe_mac_X540:
4702 4774 case ixgbe_mac_X550:
4703 4775 case ixgbe_mac_X550EM_x:
4704 4776 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4705 4777 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4706 4778 break;
4707 4779
4708 4780 default:
4709 4781 break;
4710 4782 }
4711 4783 ixgbe_intr_other_work(ixgbe, eicr);
4712 4784 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4713 4785 }
4714 4786
4715 4787 mutex_exit(&ixgbe->gen_lock);
4716 4788
4717 4789 result = DDI_INTR_CLAIMED;
4718 4790 } else {
4719 4791 mutex_exit(&ixgbe->gen_lock);
4720 4792
4721 4793 /*
4722 4794 * No interrupt cause bits set: don't claim this interrupt.
4723 4795 */
4724 4796 result = DDI_INTR_UNCLAIMED;
4725 4797 }
4726 4798
4727 4799 /* re-enable the interrupts which were automasked */
4728 4800 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4729 4801
4730 4802 /*
4731 4803 * Do the following work outside of the gen_lock
4732 4804 */
4733 4805 if (mp != NULL) {
4734 4806 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
4735 4807 rx_ring->ring_gen_num);
4736 4808 }
4737 4809
4738 4810 if (tx_reschedule) {
4739 4811 tx_ring->reschedule = B_FALSE;
4740 4812 mac_tx_ring_update(ixgbe->mac_hdl, tx_ring->ring_handle);
4741 4813 tx_ring->stat_reschedule++;
4742 4814 }
4743 4815
4744 4816 return (result);
4745 4817 }
4746 4818
4747 4819 /*
4748 4820 * ixgbe_intr_msi - Interrupt handler for MSI.
4749 4821 */
4750 4822 static uint_t
4751 4823 ixgbe_intr_msi(void *arg1, void *arg2)
4752 4824 {
4753 4825 ixgbe_t *ixgbe = (ixgbe_t *)arg1;
4754 4826 struct ixgbe_hw *hw = &ixgbe->hw;
4755 4827 uint32_t eicr;
4756 4828
4757 4829 _NOTE(ARGUNUSED(arg2));
4758 4830
4759 4831 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4760 4832
4761 4833 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
4762 4834 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
4763 4835 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4764 4836 return (DDI_INTR_CLAIMED);
4765 4837 }
4766 4838
4767 4839 /*
4768 4840 * For MSI interrupt, we have only one vector,
4769 4841 * so we have only one rx ring and one tx ring enabled.
4770 4842 */
4771 4843 ASSERT(ixgbe->num_rx_rings == 1);
4772 4844 ASSERT(ixgbe->num_tx_rings == 1);
4773 4845
4774 4846 /*
4775 4847 * For MSI interrupt, rx rings[0] will use RTxQ[0].
4776 4848 */
4777 4849 if (eicr & 0x1) {
4778 4850 ixgbe_intr_rx_work(&ixgbe->rx_rings[0]);
4779 4851 }
4780 4852
4781 4853 /*
4782 4854 * For MSI interrupt, tx rings[0] will use RTxQ[1].
4783 4855 */
4784 4856 if (eicr & 0x2) {
4785 4857 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
4786 4858 }
4787 4859
4788 4860 /* any interrupt type other than tx/rx */
4789 4861 if (eicr & ixgbe->capab->other_intr) {
4790 4862 mutex_enter(&ixgbe->gen_lock);
4791 4863 switch (hw->mac.type) {
4792 4864 case ixgbe_mac_82598EB:
4793 4865 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4794 4866 break;
4795 4867
4796 4868 case ixgbe_mac_82599EB:
4797 4869 case ixgbe_mac_X540:
4798 4870 case ixgbe_mac_X550:
4799 4871 case ixgbe_mac_X550EM_x:
4800 4872 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4801 4873 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4802 4874 break;
4803 4875
4804 4876 default:
4805 4877 break;
4806 4878 }
4807 4879 ixgbe_intr_other_work(ixgbe, eicr);
4808 4880 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4809 4881 mutex_exit(&ixgbe->gen_lock);
4810 4882 }
4811 4883
4812 4884 /* re-enable the interrupts which were automasked */
4813 4885 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4814 4886
4815 4887 return (DDI_INTR_CLAIMED);
4816 4888 }
4817 4889
4818 4890 /*
4819 4891 * ixgbe_intr_msix - Interrupt handler for MSI-X.
4820 4892 */
4821 4893 static uint_t
4822 4894 ixgbe_intr_msix(void *arg1, void *arg2)
4823 4895 {
4824 4896 ixgbe_intr_vector_t *vect = (ixgbe_intr_vector_t *)arg1;
4825 4897 ixgbe_t *ixgbe = vect->ixgbe;
4826 4898 struct ixgbe_hw *hw = &ixgbe->hw;
4827 4899 uint32_t eicr;
4828 4900 int r_idx = 0;
4829 4901
4830 4902 _NOTE(ARGUNUSED(arg2));
4831 4903
4832 4904 /*
4833 4905 * Clean each rx ring that has its bit set in the map
4834 4906 */
4835 4907 r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1));
4836 4908 while (r_idx >= 0) {
4837 4909 ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]);
4838 4910 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
4839 4911 (ixgbe->num_rx_rings - 1));
4840 4912 }
4841 4913
4842 4914 /*
4843 4915 * Clean each tx ring that has its bit set in the map
4844 4916 */
4845 4917 r_idx = bt_getlowbit(vect->tx_map, 0, (ixgbe->num_tx_rings - 1));
4846 4918 while (r_idx >= 0) {
4847 4919 ixgbe_intr_tx_work(&ixgbe->tx_rings[r_idx]);
4848 4920 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
4849 4921 (ixgbe->num_tx_rings - 1));
4850 4922 }
4851 4923
4852 4924
4853 4925 /*
4854 4926 * Clean other interrupt (link change) that has its bit set in the map
4855 4927 */
4856 4928 if (BT_TEST(vect->other_map, 0) == 1) {
4857 4929 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4858 4930
4859 4931 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) !=
4860 4932 DDI_FM_OK) {
4861 4933 ddi_fm_service_impact(ixgbe->dip,
4862 4934 DDI_SERVICE_DEGRADED);
4863 4935 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4864 4936 return (DDI_INTR_CLAIMED);
4865 4937 }
4866 4938
4867 4939 /*
4868 4940 * Check "other" cause bits: any interrupt type other than tx/rx
4869 4941 */
4870 4942 if (eicr & ixgbe->capab->other_intr) {
4871 4943 mutex_enter(&ixgbe->gen_lock);
4872 4944 switch (hw->mac.type) {
4873 4945 case ixgbe_mac_82598EB:
4874 4946 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4875 4947 ixgbe_intr_other_work(ixgbe, eicr);
4876 4948 break;
4877 4949
4878 4950 case ixgbe_mac_82599EB:
4879 4951 case ixgbe_mac_X540:
4880 4952 case ixgbe_mac_X550:
4881 4953 case ixgbe_mac_X550EM_x:
4882 4954 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
4883 4955 ixgbe_intr_other_work(ixgbe, eicr);
4884 4956 break;
4885 4957
4886 4958 default:
4887 4959 break;
4888 4960 }
4889 4961 mutex_exit(&ixgbe->gen_lock);
4890 4962 }
4891 4963
4892 4964 /* re-enable the interrupts which were automasked */
4893 4965 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4894 4966 }
4895 4967
4896 4968 return (DDI_INTR_CLAIMED);
4897 4969 }
4898 4970
4899 4971 /*
4900 4972 * ixgbe_alloc_intrs - Allocate interrupts for the driver.
4901 4973 *
4902 4974 * Normal sequence is to try MSI-X; if not sucessful, try MSI;
4903 4975 * if not successful, try Legacy.
4904 4976 * ixgbe->intr_force can be used to force sequence to start with
4905 4977 * any of the 3 types.
4906 4978 * If MSI-X is not used, number of tx/rx rings is forced to 1.
4907 4979 */
4908 4980 static int
4909 4981 ixgbe_alloc_intrs(ixgbe_t *ixgbe)
4910 4982 {
4911 4983 dev_info_t *devinfo;
4912 4984 int intr_types;
4913 4985 int rc;
4914 4986
4915 4987 devinfo = ixgbe->dip;
4916 4988
4917 4989 /*
4918 4990 * Get supported interrupt types
4919 4991 */
4920 4992 rc = ddi_intr_get_supported_types(devinfo, &intr_types);
4921 4993
4922 4994 if (rc != DDI_SUCCESS) {
4923 4995 ixgbe_log(ixgbe,
4924 4996 "Get supported interrupt types failed: %d", rc);
4925 4997 return (IXGBE_FAILURE);
4926 4998 }
4927 4999 IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types);
4928 5000
4929 5001 ixgbe->intr_type = 0;
4930 5002
4931 5003 /*
4932 5004 * Install MSI-X interrupts
4933 5005 */
4934 5006 if ((intr_types & DDI_INTR_TYPE_MSIX) &&
4935 5007 (ixgbe->intr_force <= IXGBE_INTR_MSIX)) {
4936 5008 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX);
4937 5009 if (rc == IXGBE_SUCCESS)
4938 5010 return (IXGBE_SUCCESS);
4939 5011
4940 5012 ixgbe_log(ixgbe,
4941 5013 "Allocate MSI-X failed, trying MSI interrupts...");
4942 5014 }
4943 5015
4944 5016 /*
4945 5017 * MSI-X not used, force rings and groups to 1
4946 5018 */
4947 5019 ixgbe->num_rx_rings = 1;
4948 5020 ixgbe->num_rx_groups = 1;
4949 5021 ixgbe->num_tx_rings = 1;
4950 5022 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
4951 5023 ixgbe_log(ixgbe,
4952 5024 "MSI-X not used, force rings and groups number to 1");
4953 5025
4954 5026 /*
4955 5027 * Install MSI interrupts
4956 5028 */
4957 5029 if ((intr_types & DDI_INTR_TYPE_MSI) &&
4958 5030 (ixgbe->intr_force <= IXGBE_INTR_MSI)) {
4959 5031 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI);
4960 5032 if (rc == IXGBE_SUCCESS)
4961 5033 return (IXGBE_SUCCESS);
4962 5034
4963 5035 ixgbe_log(ixgbe,
4964 5036 "Allocate MSI failed, trying Legacy interrupts...");
4965 5037 }
4966 5038
4967 5039 /*
4968 5040 * Install legacy interrupts
4969 5041 */
4970 5042 if (intr_types & DDI_INTR_TYPE_FIXED) {
4971 5043 /*
4972 5044 * Disallow legacy interrupts for X550. X550 has a silicon
4973 5045 * bug which prevents Shared Legacy interrupts from working.
4974 5046 * For details, please reference:
4975 5047 *
4976 5048 * Intel Ethernet Controller X550 Specification Update rev. 2.1
4977 5049 * May 2016, erratum 22: PCIe Interrupt Status Bit
4978 5050 */
4979 5051 if (ixgbe->hw.mac.type == ixgbe_mac_X550 ||
4980 5052 ixgbe->hw.mac.type == ixgbe_mac_X550EM_x ||
4981 5053 ixgbe->hw.mac.type == ixgbe_mac_X550_vf ||
4982 5054 ixgbe->hw.mac.type == ixgbe_mac_X550EM_x_vf) {
4983 5055 ixgbe_log(ixgbe,
4984 5056 "Legacy interrupts are not supported on this "
4985 5057 "adapter. Please use MSI or MSI-X instead.");
4986 5058 return (IXGBE_FAILURE);
4987 5059 }
4988 5060 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED);
4989 5061 if (rc == IXGBE_SUCCESS)
4990 5062 return (IXGBE_SUCCESS);
4991 5063
4992 5064 ixgbe_log(ixgbe,
4993 5065 "Allocate Legacy interrupts failed");
4994 5066 }
4995 5067
4996 5068 /*
4997 5069 * If none of the 3 types succeeded, return failure
4998 5070 */
4999 5071 return (IXGBE_FAILURE);
5000 5072 }
5001 5073
5002 5074 /*
5003 5075 * ixgbe_alloc_intr_handles - Allocate interrupt handles.
5004 5076 *
5005 5077 * For legacy and MSI, only 1 handle is needed. For MSI-X,
5006 5078 * if fewer than 2 handles are available, return failure.
5007 5079 * Upon success, this maps the vectors to rx and tx rings for
5008 5080 * interrupts.
5009 5081 */
5010 5082 static int
5011 5083 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type)
5012 5084 {
5013 5085 dev_info_t *devinfo;
5014 5086 int request, count, actual;
5015 5087 int minimum;
5016 5088 int rc;
5017 5089 uint32_t ring_per_group;
5018 5090
5019 5091 devinfo = ixgbe->dip;
5020 5092
5021 5093 switch (intr_type) {
5022 5094 case DDI_INTR_TYPE_FIXED:
5023 5095 request = 1; /* Request 1 legacy interrupt handle */
5024 5096 minimum = 1;
5025 5097 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy");
5026 5098 break;
5027 5099
5028 5100 case DDI_INTR_TYPE_MSI:
5029 5101 request = 1; /* Request 1 MSI interrupt handle */
5030 5102 minimum = 1;
5031 5103 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI");
5032 5104 break;
5033 5105
5034 5106 case DDI_INTR_TYPE_MSIX:
5035 5107 /*
5036 5108 * Best number of vectors for the adapter is
5037 5109 * (# rx rings + # tx rings), however we will
5038 5110 * limit the request number.
5039 5111 */
5040 5112 request = min(16, ixgbe->num_rx_rings + ixgbe->num_tx_rings);
5041 5113 if (request > ixgbe->capab->max_ring_vect)
5042 5114 request = ixgbe->capab->max_ring_vect;
5043 5115 minimum = 1;
5044 5116 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X");
5045 5117 break;
5046 5118
5047 5119 default:
5048 5120 ixgbe_log(ixgbe,
5049 5121 "invalid call to ixgbe_alloc_intr_handles(): %d\n",
5050 5122 intr_type);
5051 5123 return (IXGBE_FAILURE);
5052 5124 }
5053 5125 IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d minimum: %d",
5054 5126 request, minimum);
5055 5127
5056 5128 /*
5057 5129 * Get number of supported interrupts
5058 5130 */
5059 5131 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
5060 5132 if ((rc != DDI_SUCCESS) || (count < minimum)) {
5061 5133 ixgbe_log(ixgbe,
5062 5134 "Get interrupt number failed. Return: %d, count: %d",
5063 5135 rc, count);
5064 5136 return (IXGBE_FAILURE);
5065 5137 }
5066 5138 IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count);
5067 5139
5068 5140 actual = 0;
5069 5141 ixgbe->intr_cnt = 0;
5070 5142 ixgbe->intr_cnt_max = 0;
5071 5143 ixgbe->intr_cnt_min = 0;
5072 5144
5073 5145 /*
5074 5146 * Allocate an array of interrupt handles
5075 5147 */
5076 5148 ixgbe->intr_size = request * sizeof (ddi_intr_handle_t);
5077 5149 ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP);
5078 5150
5079 5151 rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0,
5080 5152 request, &actual, DDI_INTR_ALLOC_NORMAL);
5081 5153 if (rc != DDI_SUCCESS) {
5082 5154 ixgbe_log(ixgbe, "Allocate interrupts failed. "
5083 5155 "return: %d, request: %d, actual: %d",
5084 5156 rc, request, actual);
5085 5157 goto alloc_handle_fail;
5086 5158 }
5087 5159 IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual);
5088 5160
5089 5161 /*
5090 5162 * upper/lower limit of interrupts
5091 5163 */
5092 5164 ixgbe->intr_cnt = actual;
5093 5165 ixgbe->intr_cnt_max = request;
5094 5166 ixgbe->intr_cnt_min = minimum;
5095 5167
5096 5168 /*
5097 5169 * rss number per group should not exceed the rx interrupt number,
5098 5170 * else need to adjust rx ring number.
5099 5171 */
5100 5172 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
5101 5173 ASSERT((ixgbe->num_rx_rings % ixgbe->num_rx_groups) == 0);
5102 5174 if (actual < ring_per_group) {
5103 5175 ixgbe->num_rx_rings = ixgbe->num_rx_groups * actual;
5104 5176 ixgbe_setup_vmdq_rss_conf(ixgbe);
5105 5177 }
5106 5178
5107 5179 /*
5108 5180 * Now we know the actual number of vectors. Here we map the vector
5109 5181 * to other, rx rings and tx ring.
5110 5182 */
5111 5183 if (actual < minimum) {
5112 5184 ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d",
5113 5185 actual);
5114 5186 goto alloc_handle_fail;
5115 5187 }
5116 5188
5117 5189 /*
5118 5190 * Get priority for first vector, assume remaining are all the same
5119 5191 */
5120 5192 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
5121 5193 if (rc != DDI_SUCCESS) {
5122 5194 ixgbe_log(ixgbe,
5123 5195 "Get interrupt priority failed: %d", rc);
5124 5196 goto alloc_handle_fail;
5125 5197 }
5126 5198
5127 5199 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
5128 5200 if (rc != DDI_SUCCESS) {
5129 5201 ixgbe_log(ixgbe,
5130 5202 "Get interrupt cap failed: %d", rc);
5131 5203 goto alloc_handle_fail;
5132 5204 }
5133 5205
5134 5206 ixgbe->intr_type = intr_type;
5135 5207
5136 5208 return (IXGBE_SUCCESS);
5137 5209
5138 5210 alloc_handle_fail:
5139 5211 ixgbe_rem_intrs(ixgbe);
5140 5212
5141 5213 return (IXGBE_FAILURE);
5142 5214 }
5143 5215
5144 5216 /*
5145 5217 * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type.
5146 5218 *
5147 5219 * Before adding the interrupt handlers, the interrupt vectors have
5148 5220 * been allocated, and the rx/tx rings have also been allocated.
5149 5221 */
5150 5222 static int
5151 5223 ixgbe_add_intr_handlers(ixgbe_t *ixgbe)
5152 5224 {
5153 5225 int vector = 0;
5154 5226 int rc;
5155 5227
5156 5228 switch (ixgbe->intr_type) {
5157 5229 case DDI_INTR_TYPE_MSIX:
5158 5230 /*
5159 5231 * Add interrupt handler for all vectors
5160 5232 */
5161 5233 for (vector = 0; vector < ixgbe->intr_cnt; vector++) {
5162 5234 /*
5163 5235 * install pointer to vect_map[vector]
5164 5236 */
5165 5237 rc = ddi_intr_add_handler(ixgbe->htable[vector],
5166 5238 (ddi_intr_handler_t *)ixgbe_intr_msix,
5167 5239 (void *)&ixgbe->vect_map[vector], NULL);
5168 5240
5169 5241 if (rc != DDI_SUCCESS) {
5170 5242 ixgbe_log(ixgbe,
5171 5243 "Add interrupt handler failed. "
5172 5244 "return: %d, vector: %d", rc, vector);
5173 5245 for (vector--; vector >= 0; vector--) {
5174 5246 (void) ddi_intr_remove_handler(
5175 5247 ixgbe->htable[vector]);
5176 5248 }
5177 5249 return (IXGBE_FAILURE);
5178 5250 }
5179 5251 }
5180 5252
5181 5253 break;
5182 5254
5183 5255 case DDI_INTR_TYPE_MSI:
5184 5256 /*
5185 5257 * Add interrupt handlers for the only vector
5186 5258 */
5187 5259 rc = ddi_intr_add_handler(ixgbe->htable[vector],
5188 5260 (ddi_intr_handler_t *)ixgbe_intr_msi,
5189 5261 (void *)ixgbe, NULL);
5190 5262
5191 5263 if (rc != DDI_SUCCESS) {
5192 5264 ixgbe_log(ixgbe,
5193 5265 "Add MSI interrupt handler failed: %d", rc);
5194 5266 return (IXGBE_FAILURE);
5195 5267 }
5196 5268
5197 5269 break;
5198 5270
5199 5271 case DDI_INTR_TYPE_FIXED:
5200 5272 /*
5201 5273 * Add interrupt handlers for the only vector
5202 5274 */
5203 5275 rc = ddi_intr_add_handler(ixgbe->htable[vector],
5204 5276 (ddi_intr_handler_t *)ixgbe_intr_legacy,
5205 5277 (void *)ixgbe, NULL);
5206 5278
5207 5279 if (rc != DDI_SUCCESS) {
5208 5280 ixgbe_log(ixgbe,
5209 5281 "Add legacy interrupt handler failed: %d", rc);
5210 5282 return (IXGBE_FAILURE);
5211 5283 }
5212 5284
5213 5285 break;
5214 5286
5215 5287 default:
5216 5288 return (IXGBE_FAILURE);
5217 5289 }
5218 5290
5219 5291 return (IXGBE_SUCCESS);
5220 5292 }
5221 5293
5222 5294 #pragma inline(ixgbe_map_rxring_to_vector)
5223 5295 /*
5224 5296 * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector.
5225 5297 */
5226 5298 static void
5227 5299 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx)
5228 5300 {
5229 5301 /*
5230 5302 * Set bit in map
5231 5303 */
5232 5304 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
5233 5305
5234 5306 /*
5235 5307 * Count bits set
5236 5308 */
5237 5309 ixgbe->vect_map[v_idx].rxr_cnt++;
5238 5310
5239 5311 /*
5240 5312 * Remember bit position
5241 5313 */
5242 5314 ixgbe->rx_rings[r_idx].intr_vector = v_idx;
5243 5315 ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx;
5244 5316 }
5245 5317
5246 5318 #pragma inline(ixgbe_map_txring_to_vector)
5247 5319 /*
5248 5320 * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector.
5249 5321 */
5250 5322 static void
5251 5323 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx)
5252 5324 {
5253 5325 /*
5254 5326 * Set bit in map
5255 5327 */
5256 5328 BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx);
5257 5329
5258 5330 /*
5259 5331 * Count bits set
5260 5332 */
5261 5333 ixgbe->vect_map[v_idx].txr_cnt++;
5262 5334
5263 5335 /*
5264 5336 * Remember bit position
5265 5337 */
5266 5338 ixgbe->tx_rings[t_idx].intr_vector = v_idx;
5267 5339 ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx;
5268 5340 }
5269 5341
5270 5342 /*
5271 5343 * ixgbe_setup_ivar - Set the given entry in the given interrupt vector
5272 5344 * allocation register (IVAR).
5273 5345 * cause:
5274 5346 * -1 : other cause
5275 5347 * 0 : rx
5276 5348 * 1 : tx
5277 5349 */
5278 5350 static void
5279 5351 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector,
5280 5352 int8_t cause)
5281 5353 {
5282 5354 struct ixgbe_hw *hw = &ixgbe->hw;
5283 5355 u32 ivar, index;
5284 5356
5285 5357 switch (hw->mac.type) {
5286 5358 case ixgbe_mac_82598EB:
5287 5359 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5288 5360 if (cause == -1) {
5289 5361 cause = 0;
5290 5362 }
5291 5363 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
5292 5364 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
5293 5365 ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3)));
5294 5366 ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3)));
5295 5367 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
5296 5368 break;
5297 5369
5298 5370 case ixgbe_mac_82599EB:
5299 5371 case ixgbe_mac_X540:
5300 5372 case ixgbe_mac_X550:
5301 5373 case ixgbe_mac_X550EM_x:
5302 5374 if (cause == -1) {
5303 5375 /* other causes */
5304 5376 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5305 5377 index = (intr_alloc_entry & 1) * 8;
5306 5378 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
5307 5379 ivar &= ~(0xFF << index);
5308 5380 ivar |= (msix_vector << index);
5309 5381 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
5310 5382 } else {
5311 5383 /* tx or rx causes */
5312 5384 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
5313 5385 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
5314 5386 ivar = IXGBE_READ_REG(hw,
5315 5387 IXGBE_IVAR(intr_alloc_entry >> 1));
5316 5388 ivar &= ~(0xFF << index);
5317 5389 ivar |= (msix_vector << index);
5318 5390 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
5319 5391 ivar);
5320 5392 }
5321 5393 break;
5322 5394
5323 5395 default:
5324 5396 break;
5325 5397 }
5326 5398 }
5327 5399
5328 5400 /*
5329 5401 * ixgbe_enable_ivar - Enable the given entry by setting the VAL bit of
5330 5402 * given interrupt vector allocation register (IVAR).
5331 5403 * cause:
5332 5404 * -1 : other cause
5333 5405 * 0 : rx
5334 5406 * 1 : tx
5335 5407 */
5336 5408 static void
5337 5409 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
5338 5410 {
5339 5411 struct ixgbe_hw *hw = &ixgbe->hw;
5340 5412 u32 ivar, index;
5341 5413
5342 5414 switch (hw->mac.type) {
5343 5415 case ixgbe_mac_82598EB:
5344 5416 if (cause == -1) {
5345 5417 cause = 0;
5346 5418 }
5347 5419 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
5348 5420 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
5349 5421 ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 *
5350 5422 (intr_alloc_entry & 0x3)));
5351 5423 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
5352 5424 break;
5353 5425
5354 5426 case ixgbe_mac_82599EB:
5355 5427 case ixgbe_mac_X540:
5356 5428 case ixgbe_mac_X550:
5357 5429 case ixgbe_mac_X550EM_x:
5358 5430 if (cause == -1) {
5359 5431 /* other causes */
5360 5432 index = (intr_alloc_entry & 1) * 8;
5361 5433 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
5362 5434 ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
5363 5435 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
5364 5436 } else {
5365 5437 /* tx or rx causes */
5366 5438 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
5367 5439 ivar = IXGBE_READ_REG(hw,
5368 5440 IXGBE_IVAR(intr_alloc_entry >> 1));
5369 5441 ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
5370 5442 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
5371 5443 ivar);
5372 5444 }
5373 5445 break;
5374 5446
5375 5447 default:
5376 5448 break;
5377 5449 }
5378 5450 }
5379 5451
5380 5452 /*
5381 5453 * ixgbe_disable_ivar - Disble the given entry by clearing the VAL bit of
5382 5454 * given interrupt vector allocation register (IVAR).
5383 5455 * cause:
5384 5456 * -1 : other cause
5385 5457 * 0 : rx
5386 5458 * 1 : tx
5387 5459 */
5388 5460 static void
5389 5461 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
5390 5462 {
5391 5463 struct ixgbe_hw *hw = &ixgbe->hw;
5392 5464 u32 ivar, index;
5393 5465
5394 5466 switch (hw->mac.type) {
5395 5467 case ixgbe_mac_82598EB:
5396 5468 if (cause == -1) {
5397 5469 cause = 0;
5398 5470 }
5399 5471 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
5400 5472 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
5401 5473 ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 *
5402 5474 (intr_alloc_entry & 0x3)));
5403 5475 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
5404 5476 break;
5405 5477
5406 5478 case ixgbe_mac_82599EB:
5407 5479 case ixgbe_mac_X540:
5408 5480 case ixgbe_mac_X550:
5409 5481 case ixgbe_mac_X550EM_x:
5410 5482 if (cause == -1) {
5411 5483 /* other causes */
5412 5484 index = (intr_alloc_entry & 1) * 8;
5413 5485 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
5414 5486 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
5415 5487 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
5416 5488 } else {
5417 5489 /* tx or rx causes */
5418 5490 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
5419 5491 ivar = IXGBE_READ_REG(hw,
5420 5492 IXGBE_IVAR(intr_alloc_entry >> 1));
5421 5493 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
5422 5494 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
5423 5495 ivar);
5424 5496 }
5425 5497 break;
5426 5498
5427 5499 default:
5428 5500 break;
5429 5501 }
5430 5502 }
5431 5503
5432 5504 /*
5433 5505 * Convert the rx ring index driver maintained to the rx ring index
5434 5506 * in h/w.
5435 5507 */
5436 5508 static uint32_t
5437 5509 ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index)
5438 5510 {
5439 5511
5440 5512 struct ixgbe_hw *hw = &ixgbe->hw;
5441 5513 uint32_t rx_ring_per_group, hw_rx_index;
5442 5514
5443 5515 if (ixgbe->classify_mode == IXGBE_CLASSIFY_RSS ||
5444 5516 ixgbe->classify_mode == IXGBE_CLASSIFY_NONE) {
5445 5517 return (sw_rx_index);
5446 5518 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ) {
5447 5519 switch (hw->mac.type) {
5448 5520 case ixgbe_mac_82598EB:
5449 5521 return (sw_rx_index);
5450 5522
5451 5523 case ixgbe_mac_82599EB:
5452 5524 case ixgbe_mac_X540:
5453 5525 case ixgbe_mac_X550:
5454 5526 case ixgbe_mac_X550EM_x:
5455 5527 return (sw_rx_index * 2);
5456 5528
5457 5529 default:
5458 5530 break;
5459 5531 }
5460 5532 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) {
5461 5533 rx_ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
5462 5534
5463 5535 switch (hw->mac.type) {
5464 5536 case ixgbe_mac_82598EB:
5465 5537 hw_rx_index = (sw_rx_index / rx_ring_per_group) *
5466 5538 16 + (sw_rx_index % rx_ring_per_group);
5467 5539 return (hw_rx_index);
5468 5540
5469 5541 case ixgbe_mac_82599EB:
5470 5542 case ixgbe_mac_X540:
5471 5543 case ixgbe_mac_X550:
5472 5544 case ixgbe_mac_X550EM_x:
5473 5545 if (ixgbe->num_rx_groups > 32) {
5474 5546 hw_rx_index = (sw_rx_index /
5475 5547 rx_ring_per_group) * 2 +
5476 5548 (sw_rx_index % rx_ring_per_group);
5477 5549 } else {
5478 5550 hw_rx_index = (sw_rx_index /
5479 5551 rx_ring_per_group) * 4 +
5480 5552 (sw_rx_index % rx_ring_per_group);
5481 5553 }
5482 5554 return (hw_rx_index);
5483 5555
5484 5556 default:
5485 5557 break;
5486 5558 }
5487 5559 }
5488 5560
5489 5561 /*
5490 5562 * Should never reach. Just to make compiler happy.
5491 5563 */
5492 5564 return (sw_rx_index);
5493 5565 }
5494 5566
5495 5567 /*
5496 5568 * ixgbe_map_intrs_to_vectors - Map different interrupts to MSI-X vectors.
5497 5569 *
5498 5570 * For MSI-X, here will map rx interrupt, tx interrupt and other interrupt
5499 5571 * to vector[0 - (intr_cnt -1)].
5500 5572 */
5501 5573 static int
5502 5574 ixgbe_map_intrs_to_vectors(ixgbe_t *ixgbe)
5503 5575 {
5504 5576 int i, vector = 0;
5505 5577
5506 5578 /* initialize vector map */
5507 5579 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
5508 5580 for (i = 0; i < ixgbe->intr_cnt; i++) {
5509 5581 ixgbe->vect_map[i].ixgbe = ixgbe;
5510 5582 }
5511 5583
5512 5584 /*
5513 5585 * non-MSI-X case is very simple: rx rings[0] on RTxQ[0],
5514 5586 * tx rings[0] on RTxQ[1].
5515 5587 */
5516 5588 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
5517 5589 ixgbe_map_rxring_to_vector(ixgbe, 0, 0);
5518 5590 ixgbe_map_txring_to_vector(ixgbe, 0, 1);
5519 5591 return (IXGBE_SUCCESS);
5520 5592 }
5521 5593
5522 5594 /*
5523 5595 * Interrupts/vectors mapping for MSI-X
5524 5596 */
5525 5597
5526 5598 /*
5527 5599 * Map other interrupt to vector 0,
5528 5600 * Set bit in map and count the bits set.
5529 5601 */
5530 5602 BT_SET(ixgbe->vect_map[vector].other_map, 0);
5531 5603 ixgbe->vect_map[vector].other_cnt++;
5532 5604
5533 5605 /*
5534 5606 * Map rx ring interrupts to vectors
5535 5607 */
5536 5608 for (i = 0; i < ixgbe->num_rx_rings; i++) {
5537 5609 ixgbe_map_rxring_to_vector(ixgbe, i, vector);
5538 5610 vector = (vector +1) % ixgbe->intr_cnt;
5539 5611 }
5540 5612
5541 5613 /*
5542 5614 * Map tx ring interrupts to vectors
5543 5615 */
5544 5616 for (i = 0; i < ixgbe->num_tx_rings; i++) {
5545 5617 ixgbe_map_txring_to_vector(ixgbe, i, vector);
5546 5618 vector = (vector +1) % ixgbe->intr_cnt;
5547 5619 }
5548 5620
5549 5621 return (IXGBE_SUCCESS);
5550 5622 }
5551 5623
5552 5624 /*
5553 5625 * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s).
5554 5626 *
5555 5627 * This relies on ring/vector mapping already set up in the
5556 5628 * vect_map[] structures
5557 5629 */
5558 5630 static void
5559 5631 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe)
5560 5632 {
5561 5633 struct ixgbe_hw *hw = &ixgbe->hw;
5562 5634 ixgbe_intr_vector_t *vect; /* vector bitmap */
5563 5635 int r_idx; /* ring index */
5564 5636 int v_idx; /* vector index */
5565 5637 uint32_t hw_index;
5566 5638
5567 5639 /*
5568 5640 * Clear any previous entries
5569 5641 */
5570 5642 switch (hw->mac.type) {
5571 5643 case ixgbe_mac_82598EB:
5572 5644 for (v_idx = 0; v_idx < 25; v_idx++)
5573 5645 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
5574 5646 break;
5575 5647
5576 5648 case ixgbe_mac_82599EB:
5577 5649 case ixgbe_mac_X540:
5578 5650 case ixgbe_mac_X550:
5579 5651 case ixgbe_mac_X550EM_x:
5580 5652 for (v_idx = 0; v_idx < 64; v_idx++)
5581 5653 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
5582 5654 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0);
5583 5655 break;
5584 5656
5585 5657 default:
5586 5658 break;
5587 5659 }
5588 5660
5589 5661 /*
5590 5662 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and
5591 5663 * tx rings[0] will use RTxQ[1].
5592 5664 */
5593 5665 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
5594 5666 ixgbe_setup_ivar(ixgbe, 0, 0, 0);
5595 5667 ixgbe_setup_ivar(ixgbe, 0, 1, 1);
5596 5668 return;
5597 5669 }
5598 5670
5599 5671 /*
5600 5672 * For MSI-X interrupt, "Other" is always on vector[0].
5601 5673 */
5602 5674 ixgbe_setup_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0, -1);
5603 5675
5604 5676 /*
5605 5677 * For each interrupt vector, populate the IVAR table
5606 5678 */
5607 5679 for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) {
5608 5680 vect = &ixgbe->vect_map[v_idx];
5609 5681
5610 5682 /*
5611 5683 * For each rx ring bit set
5612 5684 */
5613 5685 r_idx = bt_getlowbit(vect->rx_map, 0,
5614 5686 (ixgbe->num_rx_rings - 1));
5615 5687
5616 5688 while (r_idx >= 0) {
5617 5689 hw_index = ixgbe->rx_rings[r_idx].hw_index;
5618 5690 ixgbe_setup_ivar(ixgbe, hw_index, v_idx, 0);
5619 5691 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
5620 5692 (ixgbe->num_rx_rings - 1));
5621 5693 }
5622 5694
5623 5695 /*
5624 5696 * For each tx ring bit set
5625 5697 */
5626 5698 r_idx = bt_getlowbit(vect->tx_map, 0,
5627 5699 (ixgbe->num_tx_rings - 1));
5628 5700
5629 5701 while (r_idx >= 0) {
5630 5702 ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 1);
5631 5703 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
5632 5704 (ixgbe->num_tx_rings - 1));
5633 5705 }
5634 5706 }
5635 5707 }
5636 5708
5637 5709 /*
5638 5710 * ixgbe_rem_intr_handlers - Remove the interrupt handlers.
5639 5711 */
5640 5712 static void
5641 5713 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe)
5642 5714 {
5643 5715 int i;
5644 5716 int rc;
5645 5717
5646 5718 for (i = 0; i < ixgbe->intr_cnt; i++) {
5647 5719 rc = ddi_intr_remove_handler(ixgbe->htable[i]);
5648 5720 if (rc != DDI_SUCCESS) {
5649 5721 IXGBE_DEBUGLOG_1(ixgbe,
5650 5722 "Remove intr handler failed: %d", rc);
5651 5723 }
5652 5724 }
5653 5725 }
5654 5726
5655 5727 /*
5656 5728 * ixgbe_rem_intrs - Remove the allocated interrupts.
5657 5729 */
5658 5730 static void
5659 5731 ixgbe_rem_intrs(ixgbe_t *ixgbe)
5660 5732 {
5661 5733 int i;
5662 5734 int rc;
5663 5735
5664 5736 for (i = 0; i < ixgbe->intr_cnt; i++) {
5665 5737 rc = ddi_intr_free(ixgbe->htable[i]);
5666 5738 if (rc != DDI_SUCCESS) {
5667 5739 IXGBE_DEBUGLOG_1(ixgbe,
5668 5740 "Free intr failed: %d", rc);
5669 5741 }
5670 5742 }
5671 5743
5672 5744 kmem_free(ixgbe->htable, ixgbe->intr_size);
5673 5745 ixgbe->htable = NULL;
5674 5746 }
5675 5747
5676 5748 /*
5677 5749 * ixgbe_enable_intrs - Enable all the ddi interrupts.
5678 5750 */
5679 5751 static int
5680 5752 ixgbe_enable_intrs(ixgbe_t *ixgbe)
5681 5753 {
5682 5754 int i;
5683 5755 int rc;
5684 5756
5685 5757 /*
5686 5758 * Enable interrupts
5687 5759 */
5688 5760 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
5689 5761 /*
5690 5762 * Call ddi_intr_block_enable() for MSI
5691 5763 */
5692 5764 rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt);
5693 5765 if (rc != DDI_SUCCESS) {
5694 5766 ixgbe_log(ixgbe,
5695 5767 "Enable block intr failed: %d", rc);
5696 5768 return (IXGBE_FAILURE);
5697 5769 }
5698 5770 } else {
5699 5771 /*
5700 5772 * Call ddi_intr_enable() for Legacy/MSI non block enable
5701 5773 */
5702 5774 for (i = 0; i < ixgbe->intr_cnt; i++) {
5703 5775 rc = ddi_intr_enable(ixgbe->htable[i]);
5704 5776 if (rc != DDI_SUCCESS) {
5705 5777 ixgbe_log(ixgbe,
5706 5778 "Enable intr failed: %d", rc);
5707 5779 return (IXGBE_FAILURE);
5708 5780 }
5709 5781 }
5710 5782 }
5711 5783
5712 5784 return (IXGBE_SUCCESS);
5713 5785 }
5714 5786
5715 5787 /*
5716 5788 * ixgbe_disable_intrs - Disable all the interrupts.
5717 5789 */
5718 5790 static int
5719 5791 ixgbe_disable_intrs(ixgbe_t *ixgbe)
5720 5792 {
5721 5793 int i;
5722 5794 int rc;
5723 5795
5724 5796 /*
5725 5797 * Disable all interrupts
5726 5798 */
5727 5799 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
5728 5800 rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt);
5729 5801 if (rc != DDI_SUCCESS) {
5730 5802 ixgbe_log(ixgbe,
5731 5803 "Disable block intr failed: %d", rc);
5732 5804 return (IXGBE_FAILURE);
5733 5805 }
5734 5806 } else {
5735 5807 for (i = 0; i < ixgbe->intr_cnt; i++) {
5736 5808 rc = ddi_intr_disable(ixgbe->htable[i]);
5737 5809 if (rc != DDI_SUCCESS) {
5738 5810 ixgbe_log(ixgbe,
5739 5811 "Disable intr failed: %d", rc);
5740 5812 return (IXGBE_FAILURE);
5741 5813 }
5742 5814 }
5743 5815 }
5744 5816
5745 5817 return (IXGBE_SUCCESS);
5746 5818 }
5747 5819
5748 5820 /*
5749 5821 * ixgbe_get_hw_state - Get and save parameters related to adapter hardware.
5750 5822 */
5751 5823 static void
5752 5824 ixgbe_get_hw_state(ixgbe_t *ixgbe)
5753 5825 {
5754 5826 struct ixgbe_hw *hw = &ixgbe->hw;
5755 5827 ixgbe_link_speed speed = 0;
5756 5828 boolean_t link_up = B_FALSE;
5757 5829 uint32_t pcs1g_anlp = 0;
5758 5830
5759 5831 ASSERT(mutex_owned(&ixgbe->gen_lock));
5760 5832 ixgbe->param_lp_1000fdx_cap = 0;
5761 5833 ixgbe->param_lp_100fdx_cap = 0;
5762 5834
5763 5835 /* check for link, don't wait */
5764 5836 (void) ixgbe_check_link(hw, &speed, &link_up, B_FALSE);
5765 5837
5766 5838 /*
5767 5839 * Update the observed Link Partner's capabilities. Not all adapters
5768 5840 * can provide full information on the LP's capable speeds, so we
5769 5841 * provide what we can.
5770 5842 */
5771 5843 if (link_up) {
5772 5844 pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
5773 5845
5774 5846 ixgbe->param_lp_1000fdx_cap =
5775 5847 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
5776 5848 ixgbe->param_lp_100fdx_cap =
5777 5849 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
5778 5850 }
5779 5851
5780 5852 /*
5781 5853 * Update GLD's notion of the adapter's currently advertised speeds.
5782 5854 * Since the common code doesn't always record the current autonegotiate
5783 5855 * settings in the phy struct for all parts (specifically, adapters with
5784 5856 * SFPs) we first test to see if it is 0, and if so, we fall back to
5785 5857 * using the adapter's speed capabilities which we saved during instance
5786 5858 * init in ixgbe_init_params().
5787 5859 *
5788 5860 * Adapters with SFPs will always be shown as advertising all of their
5789 5861 * supported speeds, and adapters with baseT PHYs (where the phy struct
5790 5862 * is maintained by the common code) will always have a factual view of
5791 5863 * their currently-advertised speeds. In the case of SFPs, this is
5792 5864 * acceptable as we default to advertising all speeds that the adapter
5793 5865 * claims to support, and those properties are immutable; unlike on
5794 5866 * baseT (copper) PHYs, where speeds can be enabled or disabled at will.
5795 5867 */
5796 5868 speed = hw->phy.autoneg_advertised;
5797 5869 if (speed == 0)
5798 5870 speed = ixgbe->speeds_supported;
5799 5871
5800 5872 ixgbe->param_adv_10000fdx_cap =
5801 5873 (speed & IXGBE_LINK_SPEED_10GB_FULL) ? 1 : 0;
5802 5874 ixgbe->param_adv_5000fdx_cap =
5803 5875 (speed & IXGBE_LINK_SPEED_5GB_FULL) ? 1 : 0;
5804 5876 ixgbe->param_adv_2500fdx_cap =
5805 5877 (speed & IXGBE_LINK_SPEED_2_5GB_FULL) ? 1 : 0;
5806 5878 ixgbe->param_adv_1000fdx_cap =
5807 5879 (speed & IXGBE_LINK_SPEED_1GB_FULL) ? 1 : 0;
5808 5880 ixgbe->param_adv_100fdx_cap =
5809 5881 (speed & IXGBE_LINK_SPEED_100_FULL) ? 1 : 0;
5810 5882 }
5811 5883
5812 5884 /*
5813 5885 * ixgbe_get_driver_control - Notify that driver is in control of device.
5814 5886 */
5815 5887 static void
5816 5888 ixgbe_get_driver_control(struct ixgbe_hw *hw)
5817 5889 {
5818 5890 uint32_t ctrl_ext;
5819 5891
5820 5892 /*
5821 5893 * Notify firmware that driver is in control of device
5822 5894 */
5823 5895 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5824 5896 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
5825 5897 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5826 5898 }
5827 5899
5828 5900 /*
5829 5901 * ixgbe_release_driver_control - Notify that driver is no longer in control
5830 5902 * of device.
5831 5903 */
5832 5904 static void
5833 5905 ixgbe_release_driver_control(struct ixgbe_hw *hw)
5834 5906 {
5835 5907 uint32_t ctrl_ext;
5836 5908
5837 5909 /*
5838 5910 * Notify firmware that driver is no longer in control of device
5839 5911 */
5840 5912 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5841 5913 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
5842 5914 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5843 5915 }
5844 5916
5845 5917 /*
5846 5918 * ixgbe_atomic_reserve - Atomic decrease operation.
5847 5919 */
5848 5920 int
5849 5921 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n)
5850 5922 {
5851 5923 uint32_t oldval;
5852 5924 uint32_t newval;
5853 5925
5854 5926 /*
5855 5927 * ATOMICALLY
5856 5928 */
5857 5929 do {
5858 5930 oldval = *count_p;
5859 5931 if (oldval < n)
5860 5932 return (-1);
5861 5933 newval = oldval - n;
5862 5934 } while (atomic_cas_32(count_p, oldval, newval) != oldval);
5863 5935
5864 5936 return (newval);
5865 5937 }
5866 5938
5867 5939 /*
5868 5940 * ixgbe_mc_table_itr - Traverse the entries in the multicast table.
5869 5941 */
5870 5942 static uint8_t *
5871 5943 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq)
5872 5944 {
5873 5945 uint8_t *addr = *upd_ptr;
5874 5946 uint8_t *new_ptr;
5875 5947
5876 5948 _NOTE(ARGUNUSED(hw));
5877 5949 _NOTE(ARGUNUSED(vmdq));
5878 5950
5879 5951 new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
5880 5952 *upd_ptr = new_ptr;
5881 5953 return (addr);
5882 5954 }
5883 5955
5884 5956 /*
5885 5957 * FMA support
5886 5958 */
5887 5959 int
5888 5960 ixgbe_check_acc_handle(ddi_acc_handle_t handle)
5889 5961 {
5890 5962 ddi_fm_error_t de;
5891 5963
5892 5964 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
5893 5965 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
5894 5966 return (de.fme_status);
5895 5967 }
5896 5968
5897 5969 int
5898 5970 ixgbe_check_dma_handle(ddi_dma_handle_t handle)
5899 5971 {
5900 5972 ddi_fm_error_t de;
5901 5973
5902 5974 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
5903 5975 return (de.fme_status);
5904 5976 }
5905 5977
5906 5978 /*
5907 5979 * ixgbe_fm_error_cb - The IO fault service error handling callback function.
5908 5980 */
5909 5981 static int
5910 5982 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
5911 5983 {
5912 5984 _NOTE(ARGUNUSED(impl_data));
5913 5985 /*
5914 5986 * as the driver can always deal with an error in any dma or
5915 5987 * access handle, we can just return the fme_status value.
5916 5988 */
5917 5989 pci_ereport_post(dip, err, NULL);
5918 5990 return (err->fme_status);
5919 5991 }
5920 5992
5921 5993 static void
5922 5994 ixgbe_fm_init(ixgbe_t *ixgbe)
5923 5995 {
5924 5996 ddi_iblock_cookie_t iblk;
5925 5997 int fma_dma_flag;
5926 5998
5927 5999 /*
5928 6000 * Only register with IO Fault Services if we have some capability
5929 6001 */
5930 6002 if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
5931 6003 ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
5932 6004 } else {
5933 6005 ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
5934 6006 }
5935 6007
5936 6008 if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
5937 6009 fma_dma_flag = 1;
5938 6010 } else {
5939 6011 fma_dma_flag = 0;
5940 6012 }
5941 6013
5942 6014 ixgbe_set_fma_flags(fma_dma_flag);
5943 6015
5944 6016 if (ixgbe->fm_capabilities) {
5945 6017
5946 6018 /*
5947 6019 * Register capabilities with IO Fault Services
5948 6020 */
5949 6021 ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk);
5950 6022
5951 6023 /*
5952 6024 * Initialize pci ereport capabilities if ereport capable
5953 6025 */
5954 6026 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
5955 6027 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5956 6028 pci_ereport_setup(ixgbe->dip);
5957 6029
5958 6030 /*
5959 6031 * Register error callback if error callback capable
5960 6032 */
5961 6033 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5962 6034 ddi_fm_handler_register(ixgbe->dip,
5963 6035 ixgbe_fm_error_cb, (void*) ixgbe);
5964 6036 }
5965 6037 }
5966 6038
5967 6039 static void
5968 6040 ixgbe_fm_fini(ixgbe_t *ixgbe)
5969 6041 {
5970 6042 /*
5971 6043 * Only unregister FMA capabilities if they are registered
5972 6044 */
5973 6045 if (ixgbe->fm_capabilities) {
5974 6046
5975 6047 /*
5976 6048 * Release any resources allocated by pci_ereport_setup()
5977 6049 */
5978 6050 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
5979 6051 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5980 6052 pci_ereport_teardown(ixgbe->dip);
5981 6053
5982 6054 /*
5983 6055 * Un-register error callback if error callback capable
5984 6056 */
5985 6057 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5986 6058 ddi_fm_handler_unregister(ixgbe->dip);
5987 6059
5988 6060 /*
5989 6061 * Unregister from IO Fault Service
5990 6062 */
5991 6063 ddi_fm_fini(ixgbe->dip);
5992 6064 }
5993 6065 }
5994 6066
5995 6067 void
5996 6068 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail)
5997 6069 {
5998 6070 uint64_t ena;
5999 6071 char buf[FM_MAX_CLASS];
6000 6072
6001 6073 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
6002 6074 ena = fm_ena_generate(0, FM_ENA_FMT1);
6003 6075 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) {
6004 6076 ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP,
6005 6077 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
6006 6078 }
6007 6079 }
6008 6080
6009 6081 static int
6010 6082 ixgbe_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
6011 6083 {
6012 6084 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)rh;
6013 6085
6014 6086 mutex_enter(&rx_ring->rx_lock);
6015 6087 rx_ring->ring_gen_num = mr_gen_num;
6016 6088 mutex_exit(&rx_ring->rx_lock);
6017 6089 return (0);
6018 6090 }
6019 6091
6020 6092 /*
6021 6093 * Get the global ring index by a ring index within a group.
6022 6094 */
6023 6095 static int
6024 6096 ixgbe_get_rx_ring_index(ixgbe_t *ixgbe, int gindex, int rindex)
6025 6097 {
6026 6098 ixgbe_rx_ring_t *rx_ring;
6027 6099 int i;
6028 6100
6029 6101 for (i = 0; i < ixgbe->num_rx_rings; i++) {
6030 6102 rx_ring = &ixgbe->rx_rings[i];
6031 6103 if (rx_ring->group_index == gindex)
6032 6104 rindex--;
6033 6105 if (rindex < 0)
6034 6106 return (i);
6035 6107 }
6036 6108
6037 6109 return (-1);
6038 6110 }
6039 6111
6040 6112 /*
6041 6113 * Callback funtion for MAC layer to register all rings.
6042 6114 */
6043 6115 /* ARGSUSED */
6044 6116 void
6045 6117 ixgbe_fill_ring(void *arg, mac_ring_type_t rtype, const int group_index,
6046 6118 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
6047 6119 {
6048 6120 ixgbe_t *ixgbe = (ixgbe_t *)arg;
6049 6121 mac_intr_t *mintr = &infop->mri_intr;
6050 6122
6051 6123 switch (rtype) {
6052 6124 case MAC_RING_TYPE_RX: {
6053 6125 /*
6054 6126 * 'index' is the ring index within the group.
6055 6127 * Need to get the global ring index by searching in groups.
6056 6128 */
6057 6129 int global_ring_index = ixgbe_get_rx_ring_index(
6058 6130 ixgbe, group_index, ring_index);
6059 6131
6060 6132 ASSERT(global_ring_index >= 0);
6061 6133
6062 6134 ixgbe_rx_ring_t *rx_ring = &ixgbe->rx_rings[global_ring_index];
6063 6135 rx_ring->ring_handle = rh;
6064 6136
6065 6137 infop->mri_driver = (mac_ring_driver_t)rx_ring;
6066 6138 infop->mri_start = ixgbe_ring_start;
6067 6139 infop->mri_stop = NULL;
6068 6140 infop->mri_poll = ixgbe_ring_rx_poll;
6069 6141 infop->mri_stat = ixgbe_rx_ring_stat;
6070 6142
6071 6143 mintr->mi_handle = (mac_intr_handle_t)rx_ring;
6072 6144 mintr->mi_enable = ixgbe_rx_ring_intr_enable;
6073 6145 mintr->mi_disable = ixgbe_rx_ring_intr_disable;
6074 6146 if (ixgbe->intr_type &
6075 6147 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
6076 6148 mintr->mi_ddi_handle =
6077 6149 ixgbe->htable[rx_ring->intr_vector];
6078 6150 }
6079 6151
6080 6152 break;
6081 6153 }
6082 6154 case MAC_RING_TYPE_TX: {
6083 6155 ASSERT(group_index == -1);
6084 6156 ASSERT(ring_index < ixgbe->num_tx_rings);
6085 6157
6086 6158 ixgbe_tx_ring_t *tx_ring = &ixgbe->tx_rings[ring_index];
6087 6159 tx_ring->ring_handle = rh;
6088 6160
6089 6161 infop->mri_driver = (mac_ring_driver_t)tx_ring;
6090 6162 infop->mri_start = NULL;
6091 6163 infop->mri_stop = NULL;
6092 6164 infop->mri_tx = ixgbe_ring_tx;
6093 6165 infop->mri_stat = ixgbe_tx_ring_stat;
6094 6166 if (ixgbe->intr_type &
6095 6167 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
6096 6168 mintr->mi_ddi_handle =
6097 6169 ixgbe->htable[tx_ring->intr_vector];
6098 6170 }
6099 6171 break;
6100 6172 }
6101 6173 default:
6102 6174 break;
6103 6175 }
6104 6176 }
6105 6177
6106 6178 /*
6107 6179 * Callback funtion for MAC layer to register all groups.
6108 6180 */
6109 6181 void
6110 6182 ixgbe_fill_group(void *arg, mac_ring_type_t rtype, const int index,
6111 6183 mac_group_info_t *infop, mac_group_handle_t gh)
6112 6184 {
6113 6185 ixgbe_t *ixgbe = (ixgbe_t *)arg;
6114 6186
6115 6187 switch (rtype) {
6116 6188 case MAC_RING_TYPE_RX: {
6117 6189 ixgbe_rx_group_t *rx_group;
6118 6190
6119 6191 rx_group = &ixgbe->rx_groups[index];
6120 6192 rx_group->group_handle = gh;
6121 6193
6122 6194 infop->mgi_driver = (mac_group_driver_t)rx_group;
6123 6195 infop->mgi_start = NULL;
6124 6196 infop->mgi_stop = NULL;
6125 6197 infop->mgi_addmac = ixgbe_addmac;
6126 6198 infop->mgi_remmac = ixgbe_remmac;
6127 6199 infop->mgi_count = (ixgbe->num_rx_rings / ixgbe->num_rx_groups);
6128 6200
6129 6201 break;
6130 6202 }
6131 6203 case MAC_RING_TYPE_TX:
6132 6204 break;
6133 6205 default:
6134 6206 break;
6135 6207 }
6136 6208 }
6137 6209
6138 6210 /*
6139 6211 * Enable interrupt on the specificed rx ring.
6140 6212 */
6141 6213 int
6142 6214 ixgbe_rx_ring_intr_enable(mac_intr_handle_t intrh)
6143 6215 {
6144 6216 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
6145 6217 ixgbe_t *ixgbe = rx_ring->ixgbe;
6146 6218 int r_idx = rx_ring->index;
6147 6219 int hw_r_idx = rx_ring->hw_index;
6148 6220 int v_idx = rx_ring->intr_vector;
6149 6221
6150 6222 mutex_enter(&ixgbe->gen_lock);
6151 6223 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) {
6152 6224 mutex_exit(&ixgbe->gen_lock);
6153 6225 /*
6154 6226 * Simply return 0.
6155 6227 * Interrupts are being adjusted. ixgbe_intr_adjust()
6156 6228 * will eventually re-enable the interrupt when it's
6157 6229 * done with the adjustment.
6158 6230 */
6159 6231 return (0);
6160 6232 }
6161 6233
6162 6234 /*
6163 6235 * To enable interrupt by setting the VAL bit of given interrupt
6164 6236 * vector allocation register (IVAR).
6165 6237 */
6166 6238 ixgbe_enable_ivar(ixgbe, hw_r_idx, 0);
6167 6239
6168 6240 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
6169 6241
6170 6242 /*
6171 6243 * Trigger a Rx interrupt on this ring
6172 6244 */
6173 6245 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_EICS, (1 << v_idx));
6174 6246 IXGBE_WRITE_FLUSH(&ixgbe->hw);
6175 6247
6176 6248 mutex_exit(&ixgbe->gen_lock);
6177 6249
6178 6250 return (0);
6179 6251 }
6180 6252
6181 6253 /*
6182 6254 * Disable interrupt on the specificed rx ring.
6183 6255 */
6184 6256 int
6185 6257 ixgbe_rx_ring_intr_disable(mac_intr_handle_t intrh)
6186 6258 {
6187 6259 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
6188 6260 ixgbe_t *ixgbe = rx_ring->ixgbe;
6189 6261 int r_idx = rx_ring->index;
6190 6262 int hw_r_idx = rx_ring->hw_index;
6191 6263 int v_idx = rx_ring->intr_vector;
6192 6264
6193 6265 mutex_enter(&ixgbe->gen_lock);
6194 6266 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) {
6195 6267 mutex_exit(&ixgbe->gen_lock);
6196 6268 /*
6197 6269 * Simply return 0.
6198 6270 * In the rare case where an interrupt is being
6199 6271 * disabled while interrupts are being adjusted,
6200 6272 * we don't fail the operation. No interrupts will
6201 6273 * be generated while they are adjusted, and
6202 6274 * ixgbe_intr_adjust() will cause the interrupts
6203 6275 * to be re-enabled once it completes. Note that
6204 6276 * in this case, packets may be delivered to the
6205 6277 * stack via interrupts before xgbe_rx_ring_intr_enable()
6206 6278 * is called again. This is acceptable since interrupt
6207 6279 * adjustment is infrequent, and the stack will be
6208 6280 * able to handle these packets.
6209 6281 */
6210 6282 return (0);
6211 6283 }
6212 6284
6213 6285 /*
6214 6286 * To disable interrupt by clearing the VAL bit of given interrupt
6215 6287 * vector allocation register (IVAR).
6216 6288 */
6217 6289 ixgbe_disable_ivar(ixgbe, hw_r_idx, 0);
6218 6290
6219 6291 BT_CLEAR(ixgbe->vect_map[v_idx].rx_map, r_idx);
6220 6292
6221 6293 mutex_exit(&ixgbe->gen_lock);
6222 6294
6223 6295 return (0);
6224 6296 }
6225 6297
6226 6298 /*
6227 6299 * Add a mac address.
6228 6300 */
6229 6301 static int
6230 6302 ixgbe_addmac(void *arg, const uint8_t *mac_addr)
6231 6303 {
6232 6304 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
6233 6305 ixgbe_t *ixgbe = rx_group->ixgbe;
6234 6306 struct ixgbe_hw *hw = &ixgbe->hw;
6235 6307 int slot, i;
6236 6308
6237 6309 mutex_enter(&ixgbe->gen_lock);
6238 6310
6239 6311 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
6240 6312 mutex_exit(&ixgbe->gen_lock);
6241 6313 return (ECANCELED);
6242 6314 }
6243 6315
6244 6316 if (ixgbe->unicst_avail == 0) {
6245 6317 /* no slots available */
6246 6318 mutex_exit(&ixgbe->gen_lock);
6247 6319 return (ENOSPC);
6248 6320 }
6249 6321
6250 6322 /*
6251 6323 * The first ixgbe->num_rx_groups slots are reserved for each respective
6252 6324 * group. The rest slots are shared by all groups. While adding a
6253 6325 * MAC address, reserved slots are firstly checked then the shared
6254 6326 * slots are searched.
6255 6327 */
6256 6328 slot = -1;
6257 6329 if (ixgbe->unicst_addr[rx_group->index].mac.set == 1) {
6258 6330 for (i = ixgbe->num_rx_groups; i < ixgbe->unicst_total; i++) {
6259 6331 if (ixgbe->unicst_addr[i].mac.set == 0) {
6260 6332 slot = i;
6261 6333 break;
6262 6334 }
6263 6335 }
6264 6336 } else {
6265 6337 slot = rx_group->index;
6266 6338 }
6267 6339
6268 6340 if (slot == -1) {
6269 6341 /* no slots available */
6270 6342 mutex_exit(&ixgbe->gen_lock);
6271 6343 return (ENOSPC);
6272 6344 }
6273 6345
6274 6346 bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
6275 6347 (void) ixgbe_set_rar(hw, slot, ixgbe->unicst_addr[slot].mac.addr,
6276 6348 rx_group->index, IXGBE_RAH_AV);
6277 6349 ixgbe->unicst_addr[slot].mac.set = 1;
6278 6350 ixgbe->unicst_addr[slot].mac.group_index = rx_group->index;
6279 6351 ixgbe->unicst_avail--;
6280 6352
6281 6353 mutex_exit(&ixgbe->gen_lock);
6282 6354
6283 6355 return (0);
6284 6356 }
6285 6357
6286 6358 /*
6287 6359 * Remove a mac address.
6288 6360 */
6289 6361 static int
6290 6362 ixgbe_remmac(void *arg, const uint8_t *mac_addr)
6291 6363 {
6292 6364 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
6293 6365 ixgbe_t *ixgbe = rx_group->ixgbe;
6294 6366 struct ixgbe_hw *hw = &ixgbe->hw;
6295 6367 int slot;
6296 6368
6297 6369 mutex_enter(&ixgbe->gen_lock);
6298 6370
6299 6371 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
6300 6372 mutex_exit(&ixgbe->gen_lock);
6301 6373 return (ECANCELED);
6302 6374 }
6303 6375
6304 6376 slot = ixgbe_unicst_find(ixgbe, mac_addr);
6305 6377 if (slot == -1) {
6306 6378 mutex_exit(&ixgbe->gen_lock);
6307 6379 return (EINVAL);
6308 6380 }
6309 6381
6310 6382 if (ixgbe->unicst_addr[slot].mac.set == 0) {
6311 6383 mutex_exit(&ixgbe->gen_lock);
6312 6384 return (EINVAL);
6313 6385 }
6314 6386
6315 6387 bzero(ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
6316 6388 (void) ixgbe_clear_rar(hw, slot);
6317 6389 ixgbe->unicst_addr[slot].mac.set = 0;
6318 6390 ixgbe->unicst_avail++;
6319 6391
6320 6392 mutex_exit(&ixgbe->gen_lock);
6321 6393
6322 6394 return (0);
6323 6395 }
|
↓ open down ↓ |
2627 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX