Print this page
3014 Intel X540 Support
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/ixgbe/ixgbe_main.c
+++ new/usr/src/uts/common/io/ixgbe/ixgbe_main.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
|
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
24 24 */
25 25
26 26 /*
27 27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 + * Copyright (c) 2012, Joyent, Inc. All rights reserved.
29 + * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
28 30 */
29 31
30 32 #include "ixgbe_sw.h"
31 33
32 34 static char ixgbe_ident[] = "Intel 10Gb Ethernet";
33 35 static char ixgbe_version[] = "ixgbe 1.1.7";
34 36
35 37 /*
36 38 * Local function protoypes
37 39 */
38 40 static int ixgbe_register_mac(ixgbe_t *);
39 41 static int ixgbe_identify_hardware(ixgbe_t *);
40 42 static int ixgbe_regs_map(ixgbe_t *);
41 43 static void ixgbe_init_properties(ixgbe_t *);
42 44 static int ixgbe_init_driver_settings(ixgbe_t *);
43 45 static void ixgbe_init_locks(ixgbe_t *);
44 46 static void ixgbe_destroy_locks(ixgbe_t *);
45 47 static int ixgbe_init(ixgbe_t *);
46 48 static int ixgbe_chip_start(ixgbe_t *);
47 49 static void ixgbe_chip_stop(ixgbe_t *);
48 50 static int ixgbe_reset(ixgbe_t *);
49 51 static void ixgbe_tx_clean(ixgbe_t *);
50 52 static boolean_t ixgbe_tx_drain(ixgbe_t *);
51 53 static boolean_t ixgbe_rx_drain(ixgbe_t *);
52 54 static int ixgbe_alloc_rings(ixgbe_t *);
53 55 static void ixgbe_free_rings(ixgbe_t *);
54 56 static int ixgbe_alloc_rx_data(ixgbe_t *);
55 57 static void ixgbe_free_rx_data(ixgbe_t *);
56 58 static void ixgbe_setup_rings(ixgbe_t *);
57 59 static void ixgbe_setup_rx(ixgbe_t *);
58 60 static void ixgbe_setup_tx(ixgbe_t *);
59 61 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *);
60 62 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *);
61 63 static void ixgbe_setup_rss(ixgbe_t *);
62 64 static void ixgbe_setup_vmdq(ixgbe_t *);
63 65 static void ixgbe_setup_vmdq_rss(ixgbe_t *);
64 66 static void ixgbe_init_unicst(ixgbe_t *);
65 67 static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *);
66 68 static void ixgbe_setup_multicst(ixgbe_t *);
67 69 static void ixgbe_get_hw_state(ixgbe_t *);
68 70 static void ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe);
69 71 static void ixgbe_get_conf(ixgbe_t *);
70 72 static void ixgbe_init_params(ixgbe_t *);
71 73 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int);
72 74 static void ixgbe_driver_link_check(ixgbe_t *);
73 75 static void ixgbe_sfp_check(void *);
74 76 static void ixgbe_overtemp_check(void *);
75 77 static void ixgbe_link_timer(void *);
76 78 static void ixgbe_local_timer(void *);
77 79 static void ixgbe_arm_watchdog_timer(ixgbe_t *);
78 80 static void ixgbe_restart_watchdog_timer(ixgbe_t *);
79 81 static void ixgbe_disable_adapter_interrupts(ixgbe_t *);
80 82 static void ixgbe_enable_adapter_interrupts(ixgbe_t *);
81 83 static boolean_t is_valid_mac_addr(uint8_t *);
82 84 static boolean_t ixgbe_stall_check(ixgbe_t *);
83 85 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t);
84 86 static void ixgbe_set_internal_mac_loopback(ixgbe_t *);
85 87 static boolean_t ixgbe_find_mac_address(ixgbe_t *);
86 88 static int ixgbe_alloc_intrs(ixgbe_t *);
87 89 static int ixgbe_alloc_intr_handles(ixgbe_t *, int);
88 90 static int ixgbe_add_intr_handlers(ixgbe_t *);
89 91 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int);
90 92 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int);
91 93 static void ixgbe_setup_ivar(ixgbe_t *, uint16_t, uint8_t, int8_t);
92 94 static void ixgbe_enable_ivar(ixgbe_t *, uint16_t, int8_t);
93 95 static void ixgbe_disable_ivar(ixgbe_t *, uint16_t, int8_t);
94 96 static uint32_t ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index);
95 97 static int ixgbe_map_intrs_to_vectors(ixgbe_t *);
96 98 static void ixgbe_setup_adapter_vector(ixgbe_t *);
97 99 static void ixgbe_rem_intr_handlers(ixgbe_t *);
98 100 static void ixgbe_rem_intrs(ixgbe_t *);
99 101 static int ixgbe_enable_intrs(ixgbe_t *);
100 102 static int ixgbe_disable_intrs(ixgbe_t *);
101 103 static uint_t ixgbe_intr_legacy(void *, void *);
102 104 static uint_t ixgbe_intr_msi(void *, void *);
103 105 static uint_t ixgbe_intr_msix(void *, void *);
104 106 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *);
105 107 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *);
106 108 static void ixgbe_intr_other_work(ixgbe_t *, uint32_t);
107 109 static void ixgbe_get_driver_control(struct ixgbe_hw *);
108 110 static int ixgbe_addmac(void *, const uint8_t *);
109 111 static int ixgbe_remmac(void *, const uint8_t *);
110 112 static void ixgbe_release_driver_control(struct ixgbe_hw *);
111 113
112 114 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t);
113 115 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t);
114 116 static int ixgbe_resume(dev_info_t *);
115 117 static int ixgbe_suspend(dev_info_t *);
116 118 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *);
117 119 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
118 120 static int ixgbe_cbfunc(dev_info_t *, ddi_cb_action_t, void *, void *, void *);
119 121 static int ixgbe_intr_cb_register(ixgbe_t *);
120 122 static int ixgbe_intr_adjust(ixgbe_t *, ddi_cb_action_t, int);
121 123
122 124 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
123 125 const void *impl_data);
124 126 static void ixgbe_fm_init(ixgbe_t *);
125 127 static void ixgbe_fm_fini(ixgbe_t *);
126 128
127 129 char *ixgbe_priv_props[] = {
128 130 "_tx_copy_thresh",
129 131 "_tx_recycle_thresh",
130 132 "_tx_overload_thresh",
131 133 "_tx_resched_thresh",
132 134 "_rx_copy_thresh",
133 135 "_rx_limit_per_intr",
134 136 "_intr_throttling",
135 137 "_adv_pause_cap",
136 138 "_adv_asym_pause_cap",
137 139 NULL
138 140 };
139 141
140 142 #define IXGBE_MAX_PRIV_PROPS \
141 143 (sizeof (ixgbe_priv_props) / sizeof (mac_priv_prop_t))
142 144
143 145 static struct cb_ops ixgbe_cb_ops = {
144 146 nulldev, /* cb_open */
145 147 nulldev, /* cb_close */
146 148 nodev, /* cb_strategy */
147 149 nodev, /* cb_print */
148 150 nodev, /* cb_dump */
149 151 nodev, /* cb_read */
150 152 nodev, /* cb_write */
151 153 nodev, /* cb_ioctl */
152 154 nodev, /* cb_devmap */
153 155 nodev, /* cb_mmap */
154 156 nodev, /* cb_segmap */
155 157 nochpoll, /* cb_chpoll */
156 158 ddi_prop_op, /* cb_prop_op */
157 159 NULL, /* cb_stream */
158 160 D_MP | D_HOTPLUG, /* cb_flag */
159 161 CB_REV, /* cb_rev */
160 162 nodev, /* cb_aread */
161 163 nodev /* cb_awrite */
162 164 };
163 165
164 166 static struct dev_ops ixgbe_dev_ops = {
165 167 DEVO_REV, /* devo_rev */
166 168 0, /* devo_refcnt */
167 169 NULL, /* devo_getinfo */
168 170 nulldev, /* devo_identify */
169 171 nulldev, /* devo_probe */
170 172 ixgbe_attach, /* devo_attach */
171 173 ixgbe_detach, /* devo_detach */
172 174 nodev, /* devo_reset */
173 175 &ixgbe_cb_ops, /* devo_cb_ops */
174 176 NULL, /* devo_bus_ops */
175 177 ddi_power, /* devo_power */
176 178 ddi_quiesce_not_supported, /* devo_quiesce */
177 179 };
178 180
179 181 static struct modldrv ixgbe_modldrv = {
180 182 &mod_driverops, /* Type of module. This one is a driver */
181 183 ixgbe_ident, /* Discription string */
182 184 &ixgbe_dev_ops /* driver ops */
183 185 };
184 186
185 187 static struct modlinkage ixgbe_modlinkage = {
186 188 MODREV_1, &ixgbe_modldrv, NULL
187 189 };
188 190
189 191 /*
190 192 * Access attributes for register mapping
191 193 */
192 194 ddi_device_acc_attr_t ixgbe_regs_acc_attr = {
193 195 DDI_DEVICE_ATTR_V1,
194 196 DDI_STRUCTURE_LE_ACC,
195 197 DDI_STRICTORDER_ACC,
196 198 DDI_FLAGERR_ACC
197 199 };
198 200
199 201 /*
200 202 * Loopback property
201 203 */
202 204 static lb_property_t lb_normal = {
203 205 normal, "normal", IXGBE_LB_NONE
204 206 };
205 207
206 208 static lb_property_t lb_mac = {
207 209 internal, "MAC", IXGBE_LB_INTERNAL_MAC
208 210 };
209 211
210 212 static lb_property_t lb_external = {
211 213 external, "External", IXGBE_LB_EXTERNAL
212 214 };
213 215
214 216 #define IXGBE_M_CALLBACK_FLAGS \
215 217 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
216 218
217 219 static mac_callbacks_t ixgbe_m_callbacks = {
218 220 IXGBE_M_CALLBACK_FLAGS,
219 221 ixgbe_m_stat,
220 222 ixgbe_m_start,
221 223 ixgbe_m_stop,
222 224 ixgbe_m_promisc,
223 225 ixgbe_m_multicst,
224 226 NULL,
225 227 NULL,
226 228 NULL,
227 229 ixgbe_m_ioctl,
228 230 ixgbe_m_getcapab,
229 231 NULL,
230 232 NULL,
231 233 ixgbe_m_setprop,
232 234 ixgbe_m_getprop,
233 235 ixgbe_m_propinfo
234 236 };
235 237
236 238 /*
237 239 * Initialize capabilities of each supported adapter type
238 240 */
239 241 static adapter_info_t ixgbe_82598eb_cap = {
240 242 64, /* maximum number of rx queues */
241 243 1, /* minimum number of rx queues */
242 244 64, /* default number of rx queues */
243 245 16, /* maximum number of rx groups */
244 246 1, /* minimum number of rx groups */
245 247 1, /* default number of rx groups */
246 248 32, /* maximum number of tx queues */
247 249 1, /* minimum number of tx queues */
248 250 8, /* default number of tx queues */
249 251 16366, /* maximum MTU size */
250 252 0xFFFF, /* maximum interrupt throttle rate */
251 253 0, /* minimum interrupt throttle rate */
252 254 200, /* default interrupt throttle rate */
253 255 18, /* maximum total msix vectors */
254 256 16, /* maximum number of ring vectors */
255 257 2, /* maximum number of other vectors */
256 258 IXGBE_EICR_LSC, /* "other" interrupt types handled */
257 259 0, /* "other" interrupt types enable mask */
258 260 (IXGBE_FLAG_DCA_CAPABLE /* capability flags */
259 261 | IXGBE_FLAG_RSS_CAPABLE
260 262 | IXGBE_FLAG_VMDQ_CAPABLE)
261 263 };
262 264
263 265 static adapter_info_t ixgbe_82599eb_cap = {
264 266 128, /* maximum number of rx queues */
265 267 1, /* minimum number of rx queues */
266 268 128, /* default number of rx queues */
267 269 64, /* maximum number of rx groups */
268 270 1, /* minimum number of rx groups */
269 271 1, /* default number of rx groups */
270 272 128, /* maximum number of tx queues */
271 273 1, /* minimum number of tx queues */
272 274 8, /* default number of tx queues */
273 275 15500, /* maximum MTU size */
274 276 0xFF8, /* maximum interrupt throttle rate */
275 277 0, /* minimum interrupt throttle rate */
276 278 200, /* default interrupt throttle rate */
277 279 64, /* maximum total msix vectors */
278 280 16, /* maximum number of ring vectors */
279 281 2, /* maximum number of other vectors */
280 282 (IXGBE_EICR_LSC
281 283 | IXGBE_EICR_GPI_SDP1
282 284 | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */
283 285
|
↓ open down ↓ |
246 lines elided |
↑ open up ↑ |
284 286 (IXGBE_SDP1_GPIEN
285 287 | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */
286 288
287 289 (IXGBE_FLAG_DCA_CAPABLE
288 290 | IXGBE_FLAG_RSS_CAPABLE
289 291 | IXGBE_FLAG_VMDQ_CAPABLE
290 292 | IXGBE_FLAG_RSC_CAPABLE
291 293 | IXGBE_FLAG_SFP_PLUG_CAPABLE) /* capability flags */
292 294 };
293 295
296 +static adapter_info_t ixgbe_X540_cap = {
297 + 128, /* maximum number of rx queues */
298 + 1, /* minimum number of rx queues */
299 + 128, /* default number of rx queues */
300 + 64, /* maximum number of rx groups */
301 + 1, /* minimum number of rx groups */
302 + 1, /* default number of rx groups */
303 + 128, /* maximum number of tx queues */
304 + 1, /* minimum number of tx queues */
305 + 8, /* default number of tx queues */
306 + 15500, /* maximum MTU size */
307 + 0xFF8, /* maximum interrupt throttle rate */
308 + 0, /* minimum interrupt throttle rate */
309 + 200, /* default interrupt throttle rate */
310 + 64, /* maximum total msix vectors */
311 + 16, /* maximum number of ring vectors */
312 + 2, /* maximum number of other vectors */
313 + (IXGBE_EICR_LSC
314 + | IXGBE_EICR_GPI_SDP1
315 + | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */
316 +
317 + (IXGBE_SDP1_GPIEN
318 + | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */
319 +
320 + (IXGBE_FLAG_DCA_CAPABLE
321 + | IXGBE_FLAG_RSS_CAPABLE
322 + | IXGBE_FLAG_VMDQ_CAPABLE
323 + | IXGBE_FLAG_RSC_CAPABLE) /* capability flags */
324 +};
325 +
294 326 /*
295 327 * Module Initialization Functions.
296 328 */
297 329
298 330 int
299 331 _init(void)
300 332 {
301 333 int status;
302 334
303 335 mac_init_ops(&ixgbe_dev_ops, MODULE_NAME);
304 336
305 337 status = mod_install(&ixgbe_modlinkage);
306 338
307 339 if (status != DDI_SUCCESS) {
308 340 mac_fini_ops(&ixgbe_dev_ops);
309 341 }
310 342
311 343 return (status);
312 344 }
313 345
314 346 int
315 347 _fini(void)
316 348 {
317 349 int status;
318 350
319 351 status = mod_remove(&ixgbe_modlinkage);
320 352
321 353 if (status == DDI_SUCCESS) {
322 354 mac_fini_ops(&ixgbe_dev_ops);
323 355 }
324 356
325 357 return (status);
326 358 }
327 359
328 360 int
329 361 _info(struct modinfo *modinfop)
330 362 {
331 363 int status;
332 364
333 365 status = mod_info(&ixgbe_modlinkage, modinfop);
334 366
335 367 return (status);
336 368 }
337 369
338 370 /*
339 371 * ixgbe_attach - Driver attach.
340 372 *
341 373 * This function is the device specific initialization entry
342 374 * point. This entry point is required and must be written.
343 375 * The DDI_ATTACH command must be provided in the attach entry
344 376 * point. When attach() is called with cmd set to DDI_ATTACH,
345 377 * all normal kernel services (such as kmem_alloc(9F)) are
346 378 * available for use by the driver.
347 379 *
348 380 * The attach() function will be called once for each instance
349 381 * of the device on the system with cmd set to DDI_ATTACH.
350 382 * Until attach() succeeds, the only driver entry points which
351 383 * may be called are open(9E) and getinfo(9E).
352 384 */
353 385 static int
354 386 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
355 387 {
356 388 ixgbe_t *ixgbe;
357 389 struct ixgbe_osdep *osdep;
358 390 struct ixgbe_hw *hw;
359 391 int instance;
360 392 char taskqname[32];
361 393
362 394 /*
363 395 * Check the command and perform corresponding operations
364 396 */
365 397 switch (cmd) {
366 398 default:
367 399 return (DDI_FAILURE);
368 400
369 401 case DDI_RESUME:
370 402 return (ixgbe_resume(devinfo));
371 403
372 404 case DDI_ATTACH:
373 405 break;
374 406 }
375 407
376 408 /* Get the device instance */
377 409 instance = ddi_get_instance(devinfo);
378 410
379 411 /* Allocate memory for the instance data structure */
380 412 ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP);
381 413
382 414 ixgbe->dip = devinfo;
383 415 ixgbe->instance = instance;
384 416
385 417 hw = &ixgbe->hw;
386 418 osdep = &ixgbe->osdep;
387 419 hw->back = osdep;
388 420 osdep->ixgbe = ixgbe;
389 421
390 422 /* Attach the instance pointer to the dev_info data structure */
391 423 ddi_set_driver_private(devinfo, ixgbe);
392 424
393 425 /*
394 426 * Initialize for fma support
395 427 */
396 428 ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, PROP_FM_CAPABLE,
397 429 0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
398 430 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
399 431 ixgbe_fm_init(ixgbe);
400 432 ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT;
401 433
402 434 /*
403 435 * Map PCI config space registers
404 436 */
405 437 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
406 438 ixgbe_error(ixgbe, "Failed to map PCI configurations");
407 439 goto attach_fail;
408 440 }
409 441 ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
410 442
411 443 /*
412 444 * Identify the chipset family
413 445 */
414 446 if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) {
415 447 ixgbe_error(ixgbe, "Failed to identify hardware");
416 448 goto attach_fail;
417 449 }
418 450
419 451 /*
420 452 * Map device registers
421 453 */
422 454 if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) {
423 455 ixgbe_error(ixgbe, "Failed to map device registers");
424 456 goto attach_fail;
425 457 }
426 458 ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
427 459
428 460 /*
429 461 * Initialize driver parameters
430 462 */
431 463 ixgbe_init_properties(ixgbe);
432 464 ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS;
433 465
434 466 /*
435 467 * Register interrupt callback
436 468 */
437 469 if (ixgbe_intr_cb_register(ixgbe) != IXGBE_SUCCESS) {
438 470 ixgbe_error(ixgbe, "Failed to register interrupt callback");
439 471 goto attach_fail;
440 472 }
441 473
442 474 /*
443 475 * Allocate interrupts
444 476 */
445 477 if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) {
446 478 ixgbe_error(ixgbe, "Failed to allocate interrupts");
447 479 goto attach_fail;
448 480 }
449 481 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
450 482
451 483 /*
452 484 * Allocate rx/tx rings based on the ring numbers.
453 485 * The actual numbers of rx/tx rings are decided by the number of
454 486 * allocated interrupt vectors, so we should allocate the rings after
455 487 * interrupts are allocated.
456 488 */
457 489 if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) {
458 490 ixgbe_error(ixgbe, "Failed to allocate rx and tx rings");
459 491 goto attach_fail;
460 492 }
461 493 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS;
462 494
463 495 /*
464 496 * Map rings to interrupt vectors
465 497 */
466 498 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) {
467 499 ixgbe_error(ixgbe, "Failed to map interrupts to vectors");
468 500 goto attach_fail;
469 501 }
470 502
471 503 /*
472 504 * Add interrupt handlers
473 505 */
474 506 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
475 507 ixgbe_error(ixgbe, "Failed to add interrupt handlers");
476 508 goto attach_fail;
477 509 }
478 510 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
479 511
480 512 /*
481 513 * Create a taskq for sfp-change
482 514 */
483 515 (void) sprintf(taskqname, "ixgbe%d_sfp_taskq", instance);
484 516 if ((ixgbe->sfp_taskq = ddi_taskq_create(devinfo, taskqname,
485 517 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
486 518 ixgbe_error(ixgbe, "sfp_taskq create failed");
487 519 goto attach_fail;
488 520 }
489 521 ixgbe->attach_progress |= ATTACH_PROGRESS_SFP_TASKQ;
490 522
491 523 /*
492 524 * Create a taskq for over-temp
493 525 */
494 526 (void) sprintf(taskqname, "ixgbe%d_overtemp_taskq", instance);
495 527 if ((ixgbe->overtemp_taskq = ddi_taskq_create(devinfo, taskqname,
496 528 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
497 529 ixgbe_error(ixgbe, "overtemp_taskq create failed");
498 530 goto attach_fail;
499 531 }
500 532 ixgbe->attach_progress |= ATTACH_PROGRESS_OVERTEMP_TASKQ;
501 533
502 534 /*
503 535 * Initialize driver parameters
504 536 */
505 537 if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) {
506 538 ixgbe_error(ixgbe, "Failed to initialize driver settings");
507 539 goto attach_fail;
508 540 }
509 541
510 542 /*
511 543 * Initialize mutexes for this device.
512 544 * Do this before enabling the interrupt handler and
513 545 * register the softint to avoid the condition where
514 546 * interrupt handler can try using uninitialized mutex.
515 547 */
516 548 ixgbe_init_locks(ixgbe);
517 549 ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS;
518 550
519 551 /*
520 552 * Initialize chipset hardware
521 553 */
522 554 if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) {
523 555 ixgbe_error(ixgbe, "Failed to initialize adapter");
524 556 goto attach_fail;
525 557 }
526 558 ixgbe->link_check_complete = B_FALSE;
527 559 ixgbe->link_check_hrtime = gethrtime() +
528 560 (IXGBE_LINK_UP_TIME * 100000000ULL);
529 561 ixgbe->attach_progress |= ATTACH_PROGRESS_INIT;
530 562
531 563 if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) {
532 564 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
533 565 goto attach_fail;
534 566 }
535 567
536 568 /*
537 569 * Initialize statistics
538 570 */
539 571 if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) {
540 572 ixgbe_error(ixgbe, "Failed to initialize statistics");
541 573 goto attach_fail;
542 574 }
543 575 ixgbe->attach_progress |= ATTACH_PROGRESS_STATS;
544 576
545 577 /*
546 578 * Register the driver to the MAC
547 579 */
548 580 if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) {
549 581 ixgbe_error(ixgbe, "Failed to register MAC");
550 582 goto attach_fail;
551 583 }
552 584 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN);
553 585 ixgbe->attach_progress |= ATTACH_PROGRESS_MAC;
554 586
555 587 ixgbe->periodic_id = ddi_periodic_add(ixgbe_link_timer, ixgbe,
556 588 IXGBE_CYCLIC_PERIOD, DDI_IPL_0);
557 589 if (ixgbe->periodic_id == 0) {
558 590 ixgbe_error(ixgbe, "Failed to add the link check timer");
559 591 goto attach_fail;
560 592 }
561 593 ixgbe->attach_progress |= ATTACH_PROGRESS_LINK_TIMER;
562 594
563 595 /*
564 596 * Now that mutex locks are initialized, and the chip is also
565 597 * initialized, enable interrupts.
566 598 */
567 599 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
568 600 ixgbe_error(ixgbe, "Failed to enable DDI interrupts");
569 601 goto attach_fail;
570 602 }
571 603 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
572 604
573 605 ixgbe_log(ixgbe, "%s, %s", ixgbe_ident, ixgbe_version);
574 606 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_INITIALIZED);
575 607
576 608 return (DDI_SUCCESS);
577 609
578 610 attach_fail:
579 611 ixgbe_unconfigure(devinfo, ixgbe);
580 612 return (DDI_FAILURE);
581 613 }
582 614
583 615 /*
584 616 * ixgbe_detach - Driver detach.
585 617 *
586 618 * The detach() function is the complement of the attach routine.
587 619 * If cmd is set to DDI_DETACH, detach() is used to remove the
588 620 * state associated with a given instance of a device node
589 621 * prior to the removal of that instance from the system.
590 622 *
591 623 * The detach() function will be called once for each instance
592 624 * of the device for which there has been a successful attach()
593 625 * once there are no longer any opens on the device.
594 626 *
595 627 * Interrupts routine are disabled, All memory allocated by this
596 628 * driver are freed.
597 629 */
598 630 static int
599 631 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
600 632 {
601 633 ixgbe_t *ixgbe;
602 634
603 635 /*
604 636 * Check detach command
605 637 */
606 638 switch (cmd) {
607 639 default:
608 640 return (DDI_FAILURE);
609 641
610 642 case DDI_SUSPEND:
611 643 return (ixgbe_suspend(devinfo));
612 644
613 645 case DDI_DETACH:
614 646 break;
615 647 }
616 648
617 649 /*
618 650 * Get the pointer to the driver private data structure
619 651 */
620 652 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
621 653 if (ixgbe == NULL)
622 654 return (DDI_FAILURE);
623 655
624 656 /*
625 657 * If the device is still running, it needs to be stopped first.
626 658 * This check is necessary because under some specific circumstances,
627 659 * the detach routine can be called without stopping the interface
628 660 * first.
629 661 */
630 662 if (ixgbe->ixgbe_state & IXGBE_STARTED) {
631 663 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED);
632 664 mutex_enter(&ixgbe->gen_lock);
633 665 ixgbe_stop(ixgbe, B_TRUE);
634 666 mutex_exit(&ixgbe->gen_lock);
635 667 /* Disable and stop the watchdog timer */
636 668 ixgbe_disable_watchdog_timer(ixgbe);
637 669 }
638 670
639 671 /*
640 672 * Check if there are still rx buffers held by the upper layer.
641 673 * If so, fail the detach.
642 674 */
643 675 if (!ixgbe_rx_drain(ixgbe))
644 676 return (DDI_FAILURE);
645 677
646 678 /*
647 679 * Do the remaining unconfigure routines
648 680 */
649 681 ixgbe_unconfigure(devinfo, ixgbe);
650 682
651 683 return (DDI_SUCCESS);
652 684 }
653 685
654 686 static void
655 687 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe)
656 688 {
657 689 /*
658 690 * Disable interrupt
659 691 */
660 692 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
661 693 (void) ixgbe_disable_intrs(ixgbe);
662 694 }
663 695
664 696 /*
665 697 * remove the link check timer
666 698 */
667 699 if (ixgbe->attach_progress & ATTACH_PROGRESS_LINK_TIMER) {
668 700 if (ixgbe->periodic_id != NULL) {
669 701 ddi_periodic_delete(ixgbe->periodic_id);
670 702 ixgbe->periodic_id = NULL;
671 703 }
672 704 }
673 705
674 706 /*
675 707 * Unregister MAC
676 708 */
677 709 if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) {
678 710 (void) mac_unregister(ixgbe->mac_hdl);
679 711 }
680 712
681 713 /*
682 714 * Free statistics
683 715 */
684 716 if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) {
685 717 kstat_delete((kstat_t *)ixgbe->ixgbe_ks);
686 718 }
687 719
688 720 /*
689 721 * Remove interrupt handlers
690 722 */
691 723 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
692 724 ixgbe_rem_intr_handlers(ixgbe);
693 725 }
694 726
695 727 /*
696 728 * Remove taskq for sfp-status-change
697 729 */
698 730 if (ixgbe->attach_progress & ATTACH_PROGRESS_SFP_TASKQ) {
699 731 ddi_taskq_destroy(ixgbe->sfp_taskq);
700 732 }
701 733
702 734 /*
703 735 * Remove taskq for over-temp
704 736 */
705 737 if (ixgbe->attach_progress & ATTACH_PROGRESS_OVERTEMP_TASKQ) {
706 738 ddi_taskq_destroy(ixgbe->overtemp_taskq);
707 739 }
708 740
709 741 /*
710 742 * Remove interrupts
711 743 */
712 744 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) {
713 745 ixgbe_rem_intrs(ixgbe);
714 746 }
715 747
716 748 /*
717 749 * Unregister interrupt callback handler
718 750 */
719 751 (void) ddi_cb_unregister(ixgbe->cb_hdl);
720 752
721 753 /*
722 754 * Remove driver properties
723 755 */
724 756 if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) {
725 757 (void) ddi_prop_remove_all(devinfo);
726 758 }
727 759
728 760 /*
729 761 * Stop the chipset
730 762 */
731 763 if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) {
732 764 mutex_enter(&ixgbe->gen_lock);
733 765 ixgbe_chip_stop(ixgbe);
734 766 mutex_exit(&ixgbe->gen_lock);
735 767 }
736 768
737 769 /*
738 770 * Free register handle
739 771 */
740 772 if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
741 773 if (ixgbe->osdep.reg_handle != NULL)
742 774 ddi_regs_map_free(&ixgbe->osdep.reg_handle);
743 775 }
744 776
745 777 /*
746 778 * Free PCI config handle
747 779 */
748 780 if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
749 781 if (ixgbe->osdep.cfg_handle != NULL)
750 782 pci_config_teardown(&ixgbe->osdep.cfg_handle);
751 783 }
752 784
753 785 /*
754 786 * Free locks
755 787 */
756 788 if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) {
757 789 ixgbe_destroy_locks(ixgbe);
758 790 }
759 791
760 792 /*
761 793 * Free the rx/tx rings
762 794 */
763 795 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) {
764 796 ixgbe_free_rings(ixgbe);
765 797 }
766 798
767 799 /*
768 800 * Unregister FMA capabilities
769 801 */
770 802 if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) {
771 803 ixgbe_fm_fini(ixgbe);
772 804 }
773 805
774 806 /*
775 807 * Free the driver data structure
776 808 */
777 809 kmem_free(ixgbe, sizeof (ixgbe_t));
778 810
779 811 ddi_set_driver_private(devinfo, NULL);
780 812 }
781 813
782 814 /*
783 815 * ixgbe_register_mac - Register the driver and its function pointers with
784 816 * the GLD interface.
785 817 */
786 818 static int
787 819 ixgbe_register_mac(ixgbe_t *ixgbe)
788 820 {
789 821 struct ixgbe_hw *hw = &ixgbe->hw;
790 822 mac_register_t *mac;
791 823 int status;
792 824
793 825 if ((mac = mac_alloc(MAC_VERSION)) == NULL)
794 826 return (IXGBE_FAILURE);
795 827
796 828 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
797 829 mac->m_driver = ixgbe;
798 830 mac->m_dip = ixgbe->dip;
799 831 mac->m_src_addr = hw->mac.addr;
800 832 mac->m_callbacks = &ixgbe_m_callbacks;
801 833 mac->m_min_sdu = 0;
802 834 mac->m_max_sdu = ixgbe->default_mtu;
803 835 mac->m_margin = VLAN_TAGSZ;
804 836 mac->m_priv_props = ixgbe_priv_props;
805 837 mac->m_v12n = MAC_VIRT_LEVEL1;
806 838
807 839 status = mac_register(mac, &ixgbe->mac_hdl);
808 840
809 841 mac_free(mac);
810 842
811 843 return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE);
812 844 }
813 845
814 846 /*
815 847 * ixgbe_identify_hardware - Identify the type of the chipset.
816 848 */
817 849 static int
818 850 ixgbe_identify_hardware(ixgbe_t *ixgbe)
819 851 {
820 852 struct ixgbe_hw *hw = &ixgbe->hw;
821 853 struct ixgbe_osdep *osdep = &ixgbe->osdep;
822 854
823 855 /*
824 856 * Get the device id
825 857 */
826 858 hw->vendor_id =
827 859 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
828 860 hw->device_id =
829 861 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
830 862 hw->revision_id =
831 863 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
832 864 hw->subsystem_device_id =
833 865 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
834 866 hw->subsystem_vendor_id =
835 867 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
836 868
837 869 /*
838 870 * Set the mac type of the adapter based on the device id
839 871 */
840 872 if (ixgbe_set_mac_type(hw) != IXGBE_SUCCESS) {
841 873 return (IXGBE_FAILURE);
842 874 }
843 875
844 876 /*
845 877 * Install adapter capabilities
846 878 */
847 879 switch (hw->mac.type) {
848 880 case ixgbe_mac_82598EB:
849 881 IXGBE_DEBUGLOG_0(ixgbe, "identify 82598 adapter\n");
850 882 ixgbe->capab = &ixgbe_82598eb_cap;
851 883
852 884 if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) {
853 885 ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
854 886 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1;
855 887 ixgbe->capab->other_gpie |= IXGBE_SDP1_GPIEN;
856 888 }
857 889 break;
858 890
859 891 case ixgbe_mac_82599EB:
|
↓ open down ↓ |
556 lines elided |
↑ open up ↑ |
860 892 IXGBE_DEBUGLOG_0(ixgbe, "identify 82599 adapter\n");
861 893 ixgbe->capab = &ixgbe_82599eb_cap;
862 894
863 895 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) {
864 896 ixgbe->capab->flags |= IXGBE_FLAG_TEMP_SENSOR_CAPABLE;
865 897 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP0;
866 898 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN;
867 899 }
868 900 break;
869 901
902 + case ixgbe_mac_X540:
903 + IXGBE_DEBUGLOG_0(ixgbe, "identify X540 adapter\n");
904 + ixgbe->capab = &ixgbe_X540_cap;
905 + /*
906 + * For now, X540 is all set in its capab structure.
907 + * As other X540 variants show up, things can change here.
908 + */
909 + break;
910 +
870 911 default:
871 912 IXGBE_DEBUGLOG_1(ixgbe,
872 913 "adapter not supported in ixgbe_identify_hardware(): %d\n",
873 914 hw->mac.type);
874 915 return (IXGBE_FAILURE);
875 916 }
876 917
877 918 return (IXGBE_SUCCESS);
878 919 }
879 920
880 921 /*
881 922 * ixgbe_regs_map - Map the device registers.
882 923 *
883 924 */
884 925 static int
885 926 ixgbe_regs_map(ixgbe_t *ixgbe)
886 927 {
887 928 dev_info_t *devinfo = ixgbe->dip;
888 929 struct ixgbe_hw *hw = &ixgbe->hw;
889 930 struct ixgbe_osdep *osdep = &ixgbe->osdep;
890 931 off_t mem_size;
891 932
892 933 /*
893 934 * First get the size of device registers to be mapped.
894 935 */
895 936 if (ddi_dev_regsize(devinfo, IXGBE_ADAPTER_REGSET, &mem_size)
896 937 != DDI_SUCCESS) {
897 938 return (IXGBE_FAILURE);
898 939 }
899 940
900 941 /*
901 942 * Call ddi_regs_map_setup() to map registers
902 943 */
903 944 if ((ddi_regs_map_setup(devinfo, IXGBE_ADAPTER_REGSET,
904 945 (caddr_t *)&hw->hw_addr, 0,
905 946 mem_size, &ixgbe_regs_acc_attr,
906 947 &osdep->reg_handle)) != DDI_SUCCESS) {
907 948 return (IXGBE_FAILURE);
908 949 }
909 950
910 951 return (IXGBE_SUCCESS);
911 952 }
912 953
913 954 /*
914 955 * ixgbe_init_properties - Initialize driver properties.
915 956 */
916 957 static void
917 958 ixgbe_init_properties(ixgbe_t *ixgbe)
918 959 {
919 960 /*
920 961 * Get conf file properties, including link settings
921 962 * jumbo frames, ring number, descriptor number, etc.
922 963 */
923 964 ixgbe_get_conf(ixgbe);
924 965
925 966 ixgbe_init_params(ixgbe);
926 967 }
927 968
928 969 /*
929 970 * ixgbe_init_driver_settings - Initialize driver settings.
930 971 *
931 972 * The settings include hardware function pointers, bus information,
932 973 * rx/tx rings settings, link state, and any other parameters that
933 974 * need to be setup during driver initialization.
934 975 */
935 976 static int
936 977 ixgbe_init_driver_settings(ixgbe_t *ixgbe)
937 978 {
938 979 struct ixgbe_hw *hw = &ixgbe->hw;
939 980 dev_info_t *devinfo = ixgbe->dip;
940 981 ixgbe_rx_ring_t *rx_ring;
941 982 ixgbe_rx_group_t *rx_group;
942 983 ixgbe_tx_ring_t *tx_ring;
943 984 uint32_t rx_size;
944 985 uint32_t tx_size;
945 986 uint32_t ring_per_group;
946 987 int i;
947 988
948 989 /*
949 990 * Initialize chipset specific hardware function pointers
950 991 */
951 992 if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) {
952 993 return (IXGBE_FAILURE);
953 994 }
954 995
955 996 /*
956 997 * Get the system page size
957 998 */
958 999 ixgbe->sys_page_size = ddi_ptob(devinfo, (ulong_t)1);
959 1000
960 1001 /*
961 1002 * Set rx buffer size
962 1003 *
963 1004 * The IP header alignment room is counted in the calculation.
964 1005 * The rx buffer size is in unit of 1K that is required by the
965 1006 * chipset hardware.
966 1007 */
967 1008 rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM;
968 1009 ixgbe->rx_buf_size = ((rx_size >> 10) +
969 1010 ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
970 1011
971 1012 /*
972 1013 * Set tx buffer size
973 1014 */
974 1015 tx_size = ixgbe->max_frame_size;
975 1016 ixgbe->tx_buf_size = ((tx_size >> 10) +
976 1017 ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
977 1018
978 1019 /*
979 1020 * Initialize rx/tx rings/groups parameters
980 1021 */
981 1022 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
982 1023 for (i = 0; i < ixgbe->num_rx_rings; i++) {
983 1024 rx_ring = &ixgbe->rx_rings[i];
984 1025 rx_ring->index = i;
985 1026 rx_ring->ixgbe = ixgbe;
986 1027 rx_ring->group_index = i / ring_per_group;
987 1028 rx_ring->hw_index = ixgbe_get_hw_rx_index(ixgbe, i);
988 1029 }
989 1030
990 1031 for (i = 0; i < ixgbe->num_rx_groups; i++) {
991 1032 rx_group = &ixgbe->rx_groups[i];
992 1033 rx_group->index = i;
993 1034 rx_group->ixgbe = ixgbe;
994 1035 }
995 1036
996 1037 for (i = 0; i < ixgbe->num_tx_rings; i++) {
997 1038 tx_ring = &ixgbe->tx_rings[i];
998 1039 tx_ring->index = i;
999 1040 tx_ring->ixgbe = ixgbe;
1000 1041 if (ixgbe->tx_head_wb_enable)
1001 1042 tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb;
1002 1043 else
1003 1044 tx_ring->tx_recycle = ixgbe_tx_recycle_legacy;
1004 1045
1005 1046 tx_ring->ring_size = ixgbe->tx_ring_size;
1006 1047 tx_ring->free_list_size = ixgbe->tx_ring_size +
1007 1048 (ixgbe->tx_ring_size >> 1);
1008 1049 }
1009 1050
1010 1051 /*
1011 1052 * Initialize values of interrupt throttling rate
1012 1053 */
1013 1054 for (i = 1; i < MAX_INTR_VECTOR; i++)
1014 1055 ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0];
1015 1056
1016 1057 /*
1017 1058 * The initial link state should be "unknown"
1018 1059 */
1019 1060 ixgbe->link_state = LINK_STATE_UNKNOWN;
1020 1061
1021 1062 return (IXGBE_SUCCESS);
1022 1063 }
1023 1064
1024 1065 /*
1025 1066 * ixgbe_init_locks - Initialize locks.
1026 1067 */
1027 1068 static void
1028 1069 ixgbe_init_locks(ixgbe_t *ixgbe)
1029 1070 {
1030 1071 ixgbe_rx_ring_t *rx_ring;
1031 1072 ixgbe_tx_ring_t *tx_ring;
1032 1073 int i;
1033 1074
1034 1075 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1035 1076 rx_ring = &ixgbe->rx_rings[i];
1036 1077 mutex_init(&rx_ring->rx_lock, NULL,
1037 1078 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1038 1079 }
1039 1080
1040 1081 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1041 1082 tx_ring = &ixgbe->tx_rings[i];
1042 1083 mutex_init(&tx_ring->tx_lock, NULL,
1043 1084 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1044 1085 mutex_init(&tx_ring->recycle_lock, NULL,
1045 1086 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1046 1087 mutex_init(&tx_ring->tcb_head_lock, NULL,
1047 1088 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1048 1089 mutex_init(&tx_ring->tcb_tail_lock, NULL,
1049 1090 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1050 1091 }
1051 1092
1052 1093 mutex_init(&ixgbe->gen_lock, NULL,
1053 1094 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1054 1095
1055 1096 mutex_init(&ixgbe->watchdog_lock, NULL,
1056 1097 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1057 1098 }
1058 1099
1059 1100 /*
1060 1101 * ixgbe_destroy_locks - Destroy locks.
1061 1102 */
1062 1103 static void
1063 1104 ixgbe_destroy_locks(ixgbe_t *ixgbe)
1064 1105 {
1065 1106 ixgbe_rx_ring_t *rx_ring;
1066 1107 ixgbe_tx_ring_t *tx_ring;
1067 1108 int i;
1068 1109
1069 1110 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1070 1111 rx_ring = &ixgbe->rx_rings[i];
1071 1112 mutex_destroy(&rx_ring->rx_lock);
1072 1113 }
1073 1114
1074 1115 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1075 1116 tx_ring = &ixgbe->tx_rings[i];
1076 1117 mutex_destroy(&tx_ring->tx_lock);
1077 1118 mutex_destroy(&tx_ring->recycle_lock);
1078 1119 mutex_destroy(&tx_ring->tcb_head_lock);
1079 1120 mutex_destroy(&tx_ring->tcb_tail_lock);
1080 1121 }
1081 1122
1082 1123 mutex_destroy(&ixgbe->gen_lock);
1083 1124 mutex_destroy(&ixgbe->watchdog_lock);
1084 1125 }
1085 1126
1086 1127 static int
1087 1128 ixgbe_resume(dev_info_t *devinfo)
1088 1129 {
1089 1130 ixgbe_t *ixgbe;
1090 1131 int i;
1091 1132
1092 1133 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1093 1134 if (ixgbe == NULL)
1094 1135 return (DDI_FAILURE);
1095 1136
1096 1137 mutex_enter(&ixgbe->gen_lock);
1097 1138
1098 1139 if (ixgbe->ixgbe_state & IXGBE_STARTED) {
1099 1140 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1100 1141 mutex_exit(&ixgbe->gen_lock);
1101 1142 return (DDI_FAILURE);
1102 1143 }
1103 1144
1104 1145 /*
1105 1146 * Enable and start the watchdog timer
1106 1147 */
1107 1148 ixgbe_enable_watchdog_timer(ixgbe);
1108 1149 }
1109 1150
1110 1151 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_SUSPENDED);
1111 1152
1112 1153 if (ixgbe->ixgbe_state & IXGBE_STARTED) {
1113 1154 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1114 1155 mac_tx_ring_update(ixgbe->mac_hdl,
1115 1156 ixgbe->tx_rings[i].ring_handle);
1116 1157 }
1117 1158 }
1118 1159
1119 1160 mutex_exit(&ixgbe->gen_lock);
1120 1161
1121 1162 return (DDI_SUCCESS);
1122 1163 }
1123 1164
1124 1165 static int
1125 1166 ixgbe_suspend(dev_info_t *devinfo)
1126 1167 {
1127 1168 ixgbe_t *ixgbe;
1128 1169
1129 1170 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1130 1171 if (ixgbe == NULL)
1131 1172 return (DDI_FAILURE);
1132 1173
1133 1174 mutex_enter(&ixgbe->gen_lock);
1134 1175
1135 1176 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_SUSPENDED);
1136 1177 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) {
1137 1178 mutex_exit(&ixgbe->gen_lock);
1138 1179 return (DDI_SUCCESS);
1139 1180 }
1140 1181 ixgbe_stop(ixgbe, B_FALSE);
1141 1182
1142 1183 mutex_exit(&ixgbe->gen_lock);
1143 1184
1144 1185 /*
1145 1186 * Disable and stop the watchdog timer
1146 1187 */
1147 1188 ixgbe_disable_watchdog_timer(ixgbe);
1148 1189
1149 1190 return (DDI_SUCCESS);
1150 1191 }
1151 1192
1152 1193 /*
1153 1194 * ixgbe_init - Initialize the device.
1154 1195 */
1155 1196 static int
1156 1197 ixgbe_init(ixgbe_t *ixgbe)
1157 1198 {
1158 1199 struct ixgbe_hw *hw = &ixgbe->hw;
1159 1200
1160 1201 mutex_enter(&ixgbe->gen_lock);
1161 1202
1162 1203 /*
1163 1204 * Reset chipset to put the hardware in a known state
1164 1205 * before we try to do anything with the eeprom.
1165 1206 */
1166 1207 if (ixgbe_reset_hw(hw) != IXGBE_SUCCESS) {
1167 1208 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1168 1209 goto init_fail;
1169 1210 }
1170 1211
1171 1212 /*
1172 1213 * Need to init eeprom before validating the checksum.
1173 1214 */
1174 1215 if (ixgbe_init_eeprom_params(hw) < 0) {
1175 1216 ixgbe_error(ixgbe,
1176 1217 "Unable to intitialize the eeprom interface.");
1177 1218 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1178 1219 goto init_fail;
1179 1220 }
1180 1221
1181 1222 /*
1182 1223 * NVM validation
1183 1224 */
1184 1225 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1185 1226 /*
1186 1227 * Some PCI-E parts fail the first check due to
1187 1228 * the link being in sleep state. Call it again,
1188 1229 * if it fails a second time it's a real issue.
1189 1230 */
1190 1231 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1191 1232 ixgbe_error(ixgbe,
1192 1233 "Invalid NVM checksum. Please contact "
|
↓ open down ↓ |
313 lines elided |
↑ open up ↑ |
1193 1234 "the vendor to update the NVM.");
1194 1235 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1195 1236 goto init_fail;
1196 1237 }
1197 1238 }
1198 1239
1199 1240 /*
1200 1241 * Setup default flow control thresholds - enable/disable
1201 1242 * & flow control type is controlled by ixgbe.conf
1202 1243 */
1203 - hw->fc.high_water = DEFAULT_FCRTH;
1204 - hw->fc.low_water = DEFAULT_FCRTL;
1244 + hw->fc.high_water[0] = DEFAULT_FCRTH;
1245 + hw->fc.low_water[0] = DEFAULT_FCRTL;
1205 1246 hw->fc.pause_time = DEFAULT_FCPAUSE;
1206 1247 hw->fc.send_xon = B_TRUE;
1207 1248
1208 1249 /*
1209 1250 * Initialize link settings
1210 1251 */
1211 1252 (void) ixgbe_driver_setup_link(ixgbe, B_FALSE);
1212 1253
1213 1254 /*
1214 1255 * Initialize the chipset hardware
1215 1256 */
1216 1257 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1217 1258 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1218 1259 goto init_fail;
1219 1260 }
1220 1261
1221 1262 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1222 1263 goto init_fail;
1223 1264 }
1224 1265
1225 1266 mutex_exit(&ixgbe->gen_lock);
1226 1267 return (IXGBE_SUCCESS);
1227 1268
1228 1269 init_fail:
1229 1270 /*
1230 1271 * Reset PHY
1231 1272 */
1232 1273 (void) ixgbe_reset_phy(hw);
1233 1274
1234 1275 mutex_exit(&ixgbe->gen_lock);
1235 1276 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1236 1277 return (IXGBE_FAILURE);
1237 1278 }
1238 1279
1239 1280 /*
1240 1281 * ixgbe_chip_start - Initialize and start the chipset hardware.
1241 1282 */
1242 1283 static int
1243 1284 ixgbe_chip_start(ixgbe_t *ixgbe)
1244 1285 {
1245 1286 struct ixgbe_hw *hw = &ixgbe->hw;
1246 1287 int ret_val, i;
1247 1288
1248 1289 ASSERT(mutex_owned(&ixgbe->gen_lock));
1249 1290
1250 1291 /*
1251 1292 * Get the mac address
1252 1293 * This function should handle SPARC case correctly.
1253 1294 */
1254 1295 if (!ixgbe_find_mac_address(ixgbe)) {
1255 1296 ixgbe_error(ixgbe, "Failed to get the mac address");
1256 1297 return (IXGBE_FAILURE);
1257 1298 }
1258 1299
1259 1300 /*
1260 1301 * Validate the mac address
1261 1302 */
1262 1303 (void) ixgbe_init_rx_addrs(hw);
1263 1304 if (!is_valid_mac_addr(hw->mac.addr)) {
1264 1305 ixgbe_error(ixgbe, "Invalid mac address");
1265 1306 return (IXGBE_FAILURE);
1266 1307 }
1267 1308
1268 1309 /*
1269 1310 * Configure/Initialize hardware
1270 1311 */
1271 1312 ret_val = ixgbe_init_hw(hw);
1272 1313 if (ret_val != IXGBE_SUCCESS) {
1273 1314 if (ret_val == IXGBE_ERR_EEPROM_VERSION) {
1274 1315 ixgbe_error(ixgbe,
1275 1316 "This 82599 device is pre-release and contains"
1276 1317 " outdated firmware, please contact your hardware"
1277 1318 " vendor for a replacement.");
1278 1319 } else {
1279 1320 ixgbe_error(ixgbe, "Failed to initialize hardware");
1280 1321 return (IXGBE_FAILURE);
1281 1322 }
1282 1323 }
1283 1324
1284 1325 /*
1285 1326 * Re-enable relaxed ordering for performance. It is disabled
1286 1327 * by default in the hardware init.
1287 1328 */
1288 1329 if (ixgbe->relax_order_enable == B_TRUE)
1289 1330 ixgbe_enable_relaxed_ordering(hw);
1290 1331
1291 1332 /*
1292 1333 * Setup adapter interrupt vectors
1293 1334 */
1294 1335 ixgbe_setup_adapter_vector(ixgbe);
1295 1336
1296 1337 /*
1297 1338 * Initialize unicast addresses.
1298 1339 */
1299 1340 ixgbe_init_unicst(ixgbe);
1300 1341
1301 1342 /*
1302 1343 * Setup and initialize the mctable structures.
1303 1344 */
1304 1345 ixgbe_setup_multicst(ixgbe);
1305 1346
1306 1347 /*
1307 1348 * Set interrupt throttling rate
1308 1349 */
1309 1350 for (i = 0; i < ixgbe->intr_cnt; i++) {
1310 1351 IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]);
1311 1352 }
1312 1353
1313 1354 /*
1314 1355 * Save the state of the phy
1315 1356 */
1316 1357 ixgbe_get_hw_state(ixgbe);
1317 1358
1318 1359 /*
1319 1360 * Make sure driver has control
1320 1361 */
1321 1362 ixgbe_get_driver_control(hw);
1322 1363
1323 1364 return (IXGBE_SUCCESS);
1324 1365 }
1325 1366
1326 1367 /*
1327 1368 * ixgbe_chip_stop - Stop the chipset hardware
1328 1369 */
1329 1370 static void
1330 1371 ixgbe_chip_stop(ixgbe_t *ixgbe)
1331 1372 {
1332 1373 struct ixgbe_hw *hw = &ixgbe->hw;
1333 1374
1334 1375 ASSERT(mutex_owned(&ixgbe->gen_lock));
1335 1376
1336 1377 /*
1337 1378 * Tell firmware driver is no longer in control
1338 1379 */
1339 1380 ixgbe_release_driver_control(hw);
1340 1381
1341 1382 /*
1342 1383 * Reset the chipset
1343 1384 */
1344 1385 (void) ixgbe_reset_hw(hw);
1345 1386
1346 1387 /*
1347 1388 * Reset PHY
1348 1389 */
1349 1390 (void) ixgbe_reset_phy(hw);
1350 1391 }
1351 1392
1352 1393 /*
1353 1394 * ixgbe_reset - Reset the chipset and re-start the driver.
1354 1395 *
1355 1396 * It involves stopping and re-starting the chipset,
1356 1397 * and re-configuring the rx/tx rings.
1357 1398 */
1358 1399 static int
1359 1400 ixgbe_reset(ixgbe_t *ixgbe)
1360 1401 {
1361 1402 int i;
1362 1403
1363 1404 /*
1364 1405 * Disable and stop the watchdog timer
1365 1406 */
1366 1407 ixgbe_disable_watchdog_timer(ixgbe);
1367 1408
1368 1409 mutex_enter(&ixgbe->gen_lock);
1369 1410
1370 1411 ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED);
1371 1412 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED);
1372 1413
1373 1414 ixgbe_stop(ixgbe, B_FALSE);
1374 1415
1375 1416 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1376 1417 mutex_exit(&ixgbe->gen_lock);
1377 1418 return (IXGBE_FAILURE);
1378 1419 }
1379 1420
1380 1421 /*
1381 1422 * After resetting, need to recheck the link status.
1382 1423 */
1383 1424 ixgbe->link_check_complete = B_FALSE;
1384 1425 ixgbe->link_check_hrtime = gethrtime() +
1385 1426 (IXGBE_LINK_UP_TIME * 100000000ULL);
1386 1427
1387 1428 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STARTED);
1388 1429
1389 1430 if (!(ixgbe->ixgbe_state & IXGBE_SUSPENDED)) {
1390 1431 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1391 1432 mac_tx_ring_update(ixgbe->mac_hdl,
1392 1433 ixgbe->tx_rings[i].ring_handle);
1393 1434 }
1394 1435 }
1395 1436
1396 1437 mutex_exit(&ixgbe->gen_lock);
1397 1438
1398 1439 /*
1399 1440 * Enable and start the watchdog timer
1400 1441 */
1401 1442 ixgbe_enable_watchdog_timer(ixgbe);
1402 1443
1403 1444 return (IXGBE_SUCCESS);
1404 1445 }
1405 1446
1406 1447 /*
1407 1448 * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources.
1408 1449 */
1409 1450 static void
1410 1451 ixgbe_tx_clean(ixgbe_t *ixgbe)
1411 1452 {
1412 1453 ixgbe_tx_ring_t *tx_ring;
1413 1454 tx_control_block_t *tcb;
1414 1455 link_list_t pending_list;
1415 1456 uint32_t desc_num;
1416 1457 int i, j;
1417 1458
1418 1459 LINK_LIST_INIT(&pending_list);
1419 1460
1420 1461 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1421 1462 tx_ring = &ixgbe->tx_rings[i];
1422 1463
1423 1464 mutex_enter(&tx_ring->recycle_lock);
1424 1465
1425 1466 /*
1426 1467 * Clean the pending tx data - the pending packets in the
1427 1468 * work_list that have no chances to be transmitted again.
1428 1469 *
1429 1470 * We must ensure the chipset is stopped or the link is down
1430 1471 * before cleaning the transmit packets.
1431 1472 */
1432 1473 desc_num = 0;
1433 1474 for (j = 0; j < tx_ring->ring_size; j++) {
1434 1475 tcb = tx_ring->work_list[j];
1435 1476 if (tcb != NULL) {
1436 1477 desc_num += tcb->desc_num;
1437 1478
1438 1479 tx_ring->work_list[j] = NULL;
1439 1480
1440 1481 ixgbe_free_tcb(tcb);
1441 1482
1442 1483 LIST_PUSH_TAIL(&pending_list, &tcb->link);
1443 1484 }
1444 1485 }
1445 1486
1446 1487 if (desc_num > 0) {
1447 1488 atomic_add_32(&tx_ring->tbd_free, desc_num);
1448 1489 ASSERT(tx_ring->tbd_free == tx_ring->ring_size);
1449 1490
1450 1491 /*
1451 1492 * Reset the head and tail pointers of the tbd ring;
1452 1493 * Reset the writeback head if it's enable.
1453 1494 */
1454 1495 tx_ring->tbd_head = 0;
1455 1496 tx_ring->tbd_tail = 0;
1456 1497 if (ixgbe->tx_head_wb_enable)
1457 1498 *tx_ring->tbd_head_wb = 0;
1458 1499
1459 1500 IXGBE_WRITE_REG(&ixgbe->hw,
1460 1501 IXGBE_TDH(tx_ring->index), 0);
1461 1502 IXGBE_WRITE_REG(&ixgbe->hw,
1462 1503 IXGBE_TDT(tx_ring->index), 0);
1463 1504 }
1464 1505
1465 1506 mutex_exit(&tx_ring->recycle_lock);
1466 1507
1467 1508 /*
1468 1509 * Add the tx control blocks in the pending list to
1469 1510 * the free list.
1470 1511 */
1471 1512 ixgbe_put_free_list(tx_ring, &pending_list);
1472 1513 }
1473 1514 }
1474 1515
1475 1516 /*
1476 1517 * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be
1477 1518 * transmitted.
1478 1519 */
1479 1520 static boolean_t
1480 1521 ixgbe_tx_drain(ixgbe_t *ixgbe)
1481 1522 {
1482 1523 ixgbe_tx_ring_t *tx_ring;
1483 1524 boolean_t done;
1484 1525 int i, j;
1485 1526
1486 1527 /*
1487 1528 * Wait for a specific time to allow pending tx packets
1488 1529 * to be transmitted.
1489 1530 *
1490 1531 * Check the counter tbd_free to see if transmission is done.
1491 1532 * No lock protection is needed here.
1492 1533 *
1493 1534 * Return B_TRUE if all pending packets have been transmitted;
1494 1535 * Otherwise return B_FALSE;
1495 1536 */
1496 1537 for (i = 0; i < TX_DRAIN_TIME; i++) {
1497 1538
1498 1539 done = B_TRUE;
1499 1540 for (j = 0; j < ixgbe->num_tx_rings; j++) {
1500 1541 tx_ring = &ixgbe->tx_rings[j];
1501 1542 done = done &&
1502 1543 (tx_ring->tbd_free == tx_ring->ring_size);
1503 1544 }
1504 1545
1505 1546 if (done)
1506 1547 break;
1507 1548
1508 1549 msec_delay(1);
1509 1550 }
1510 1551
1511 1552 return (done);
1512 1553 }
1513 1554
1514 1555 /*
1515 1556 * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer.
1516 1557 */
1517 1558 static boolean_t
1518 1559 ixgbe_rx_drain(ixgbe_t *ixgbe)
1519 1560 {
1520 1561 boolean_t done = B_TRUE;
1521 1562 int i;
1522 1563
1523 1564 /*
1524 1565 * Polling the rx free list to check if those rx buffers held by
1525 1566 * the upper layer are released.
1526 1567 *
1527 1568 * Check the counter rcb_free to see if all pending buffers are
1528 1569 * released. No lock protection is needed here.
1529 1570 *
1530 1571 * Return B_TRUE if all pending buffers have been released;
1531 1572 * Otherwise return B_FALSE;
1532 1573 */
1533 1574 for (i = 0; i < RX_DRAIN_TIME; i++) {
1534 1575 done = (ixgbe->rcb_pending == 0);
1535 1576
1536 1577 if (done)
1537 1578 break;
1538 1579
1539 1580 msec_delay(1);
1540 1581 }
1541 1582
1542 1583 return (done);
1543 1584 }
1544 1585
1545 1586 /*
1546 1587 * ixgbe_start - Start the driver/chipset.
1547 1588 */
1548 1589 int
1549 1590 ixgbe_start(ixgbe_t *ixgbe, boolean_t alloc_buffer)
1550 1591 {
1551 1592 int i;
1552 1593
1553 1594 ASSERT(mutex_owned(&ixgbe->gen_lock));
1554 1595
1555 1596 if (alloc_buffer) {
1556 1597 if (ixgbe_alloc_rx_data(ixgbe) != IXGBE_SUCCESS) {
1557 1598 ixgbe_error(ixgbe,
1558 1599 "Failed to allocate software receive rings");
1559 1600 return (IXGBE_FAILURE);
1560 1601 }
1561 1602
1562 1603 /* Allocate buffers for all the rx/tx rings */
1563 1604 if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS) {
1564 1605 ixgbe_error(ixgbe, "Failed to allocate DMA resource");
1565 1606 return (IXGBE_FAILURE);
1566 1607 }
1567 1608
1568 1609 ixgbe->tx_ring_init = B_TRUE;
1569 1610 } else {
1570 1611 ixgbe->tx_ring_init = B_FALSE;
1571 1612 }
1572 1613
1573 1614 for (i = 0; i < ixgbe->num_rx_rings; i++)
1574 1615 mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1575 1616 for (i = 0; i < ixgbe->num_tx_rings; i++)
1576 1617 mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1577 1618
1578 1619 /*
1579 1620 * Start the chipset hardware
1580 1621 */
1581 1622 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1582 1623 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1583 1624 goto start_failure;
1584 1625 }
1585 1626
1586 1627 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1587 1628 goto start_failure;
1588 1629 }
1589 1630
1590 1631 /*
1591 1632 * Setup the rx/tx rings
1592 1633 */
1593 1634 ixgbe_setup_rings(ixgbe);
1594 1635
1595 1636 /*
1596 1637 * ixgbe_start() will be called when resetting, however if reset
1597 1638 * happens, we need to clear the ERROR, STALL and OVERTEMP flags
1598 1639 * before enabling the interrupts.
1599 1640 */
1600 1641 atomic_and_32(&ixgbe->ixgbe_state, ~(IXGBE_ERROR
1601 1642 | IXGBE_STALL| IXGBE_OVERTEMP));
1602 1643
1603 1644 /*
1604 1645 * Enable adapter interrupts
1605 1646 * The interrupts must be enabled after the driver state is START
1606 1647 */
1607 1648 ixgbe_enable_adapter_interrupts(ixgbe);
1608 1649
1609 1650 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1610 1651 mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1611 1652 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1612 1653 mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1613 1654
1614 1655 return (IXGBE_SUCCESS);
1615 1656
1616 1657 start_failure:
1617 1658 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1618 1659 mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1619 1660 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1620 1661 mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1621 1662
1622 1663 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1623 1664
1624 1665 return (IXGBE_FAILURE);
1625 1666 }
1626 1667
1627 1668 /*
1628 1669 * ixgbe_stop - Stop the driver/chipset.
1629 1670 */
1630 1671 void
1631 1672 ixgbe_stop(ixgbe_t *ixgbe, boolean_t free_buffer)
1632 1673 {
1633 1674 int i;
1634 1675
1635 1676 ASSERT(mutex_owned(&ixgbe->gen_lock));
1636 1677
1637 1678 /*
1638 1679 * Disable the adapter interrupts
1639 1680 */
1640 1681 ixgbe_disable_adapter_interrupts(ixgbe);
1641 1682
1642 1683 /*
1643 1684 * Drain the pending tx packets
1644 1685 */
1645 1686 (void) ixgbe_tx_drain(ixgbe);
1646 1687
1647 1688 for (i = 0; i < ixgbe->num_rx_rings; i++)
1648 1689 mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1649 1690 for (i = 0; i < ixgbe->num_tx_rings; i++)
1650 1691 mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1651 1692
1652 1693 /*
1653 1694 * Stop the chipset hardware
1654 1695 */
1655 1696 ixgbe_chip_stop(ixgbe);
1656 1697
1657 1698 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1658 1699 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1659 1700 }
1660 1701
1661 1702 /*
1662 1703 * Clean the pending tx data/resources
1663 1704 */
1664 1705 ixgbe_tx_clean(ixgbe);
1665 1706
1666 1707 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1667 1708 mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1668 1709 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1669 1710 mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1670 1711
1671 1712 if (ixgbe->link_state == LINK_STATE_UP) {
1672 1713 ixgbe->link_state = LINK_STATE_UNKNOWN;
1673 1714 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
1674 1715 }
1675 1716
1676 1717 if (free_buffer) {
1677 1718 /*
1678 1719 * Release the DMA/memory resources of rx/tx rings
1679 1720 */
1680 1721 ixgbe_free_dma(ixgbe);
1681 1722 ixgbe_free_rx_data(ixgbe);
1682 1723 }
1683 1724 }
1684 1725
1685 1726 /*
1686 1727 * ixgbe_cbfunc - Driver interface for generic DDI callbacks
1687 1728 */
1688 1729 /* ARGSUSED */
1689 1730 static int
1690 1731 ixgbe_cbfunc(dev_info_t *dip, ddi_cb_action_t cbaction, void *cbarg,
1691 1732 void *arg1, void *arg2)
1692 1733 {
1693 1734 ixgbe_t *ixgbe = (ixgbe_t *)arg1;
1694 1735
1695 1736 switch (cbaction) {
1696 1737 /* IRM callback */
1697 1738 int count;
1698 1739 case DDI_CB_INTR_ADD:
1699 1740 case DDI_CB_INTR_REMOVE:
1700 1741 count = (int)(uintptr_t)cbarg;
1701 1742 ASSERT(ixgbe->intr_type == DDI_INTR_TYPE_MSIX);
1702 1743 DTRACE_PROBE2(ixgbe__irm__callback, int, count,
1703 1744 int, ixgbe->intr_cnt);
1704 1745 if (ixgbe_intr_adjust(ixgbe, cbaction, count) !=
1705 1746 DDI_SUCCESS) {
1706 1747 ixgbe_error(ixgbe,
1707 1748 "IRM CB: Failed to adjust interrupts");
1708 1749 goto cb_fail;
1709 1750 }
1710 1751 break;
1711 1752 default:
1712 1753 IXGBE_DEBUGLOG_1(ixgbe, "DDI CB: action 0x%x NOT supported",
1713 1754 cbaction);
1714 1755 return (DDI_ENOTSUP);
1715 1756 }
1716 1757 return (DDI_SUCCESS);
1717 1758 cb_fail:
1718 1759 return (DDI_FAILURE);
1719 1760 }
1720 1761
1721 1762 /*
1722 1763 * ixgbe_intr_adjust - Adjust interrupt to respond to IRM request.
1723 1764 */
1724 1765 static int
1725 1766 ixgbe_intr_adjust(ixgbe_t *ixgbe, ddi_cb_action_t cbaction, int count)
1726 1767 {
1727 1768 int i, rc, actual;
1728 1769
1729 1770 if (count == 0)
1730 1771 return (DDI_SUCCESS);
1731 1772
1732 1773 if ((cbaction == DDI_CB_INTR_ADD &&
1733 1774 ixgbe->intr_cnt + count > ixgbe->intr_cnt_max) ||
1734 1775 (cbaction == DDI_CB_INTR_REMOVE &&
1735 1776 ixgbe->intr_cnt - count < ixgbe->intr_cnt_min))
1736 1777 return (DDI_FAILURE);
1737 1778
1738 1779 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) {
1739 1780 return (DDI_FAILURE);
1740 1781 }
1741 1782
1742 1783 for (i = 0; i < ixgbe->num_rx_rings; i++)
1743 1784 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle, NULL);
1744 1785 for (i = 0; i < ixgbe->num_tx_rings; i++)
1745 1786 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle, NULL);
1746 1787
1747 1788 mutex_enter(&ixgbe->gen_lock);
1748 1789 ixgbe->ixgbe_state &= ~IXGBE_STARTED;
1749 1790 ixgbe->ixgbe_state |= IXGBE_INTR_ADJUST;
1750 1791 ixgbe->ixgbe_state |= IXGBE_SUSPENDED;
1751 1792 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN);
1752 1793
1753 1794 ixgbe_stop(ixgbe, B_FALSE);
1754 1795 /*
1755 1796 * Disable interrupts
1756 1797 */
1757 1798 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
1758 1799 rc = ixgbe_disable_intrs(ixgbe);
1759 1800 ASSERT(rc == IXGBE_SUCCESS);
1760 1801 }
1761 1802 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ENABLE_INTR;
1762 1803
1763 1804 /*
1764 1805 * Remove interrupt handlers
1765 1806 */
1766 1807 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
1767 1808 ixgbe_rem_intr_handlers(ixgbe);
1768 1809 }
1769 1810 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ADD_INTR;
1770 1811
1771 1812 /*
1772 1813 * Clear vect_map
1773 1814 */
1774 1815 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
1775 1816 switch (cbaction) {
1776 1817 case DDI_CB_INTR_ADD:
1777 1818 rc = ddi_intr_alloc(ixgbe->dip, ixgbe->htable,
1778 1819 DDI_INTR_TYPE_MSIX, ixgbe->intr_cnt, count, &actual,
1779 1820 DDI_INTR_ALLOC_NORMAL);
1780 1821 if (rc != DDI_SUCCESS || actual != count) {
1781 1822 ixgbe_log(ixgbe, "Adjust interrupts failed."
1782 1823 "return: %d, irm cb size: %d, actual: %d",
1783 1824 rc, count, actual);
1784 1825 goto intr_adjust_fail;
1785 1826 }
1786 1827 ixgbe->intr_cnt += count;
1787 1828 break;
1788 1829
1789 1830 case DDI_CB_INTR_REMOVE:
1790 1831 for (i = ixgbe->intr_cnt - count;
1791 1832 i < ixgbe->intr_cnt; i ++) {
1792 1833 rc = ddi_intr_free(ixgbe->htable[i]);
1793 1834 ixgbe->htable[i] = NULL;
1794 1835 if (rc != DDI_SUCCESS) {
1795 1836 ixgbe_log(ixgbe, "Adjust interrupts failed."
1796 1837 "return: %d, irm cb size: %d, actual: %d",
1797 1838 rc, count, actual);
1798 1839 goto intr_adjust_fail;
1799 1840 }
1800 1841 }
1801 1842 ixgbe->intr_cnt -= count;
1802 1843 break;
1803 1844 }
1804 1845
1805 1846 /*
1806 1847 * Get priority for first vector, assume remaining are all the same
1807 1848 */
1808 1849 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
1809 1850 if (rc != DDI_SUCCESS) {
1810 1851 ixgbe_log(ixgbe,
1811 1852 "Get interrupt priority failed: %d", rc);
1812 1853 goto intr_adjust_fail;
1813 1854 }
1814 1855 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
1815 1856 if (rc != DDI_SUCCESS) {
1816 1857 ixgbe_log(ixgbe, "Get interrupt cap failed: %d", rc);
1817 1858 goto intr_adjust_fail;
1818 1859 }
1819 1860 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
1820 1861
1821 1862 /*
1822 1863 * Map rings to interrupt vectors
1823 1864 */
1824 1865 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) {
1825 1866 ixgbe_error(ixgbe,
1826 1867 "IRM CB: Failed to map interrupts to vectors");
1827 1868 goto intr_adjust_fail;
1828 1869 }
1829 1870
1830 1871 /*
1831 1872 * Add interrupt handlers
1832 1873 */
1833 1874 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
1834 1875 ixgbe_error(ixgbe, "IRM CB: Failed to add interrupt handlers");
1835 1876 goto intr_adjust_fail;
1836 1877 }
1837 1878 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
1838 1879
1839 1880 /*
1840 1881 * Now that mutex locks are initialized, and the chip is also
1841 1882 * initialized, enable interrupts.
1842 1883 */
1843 1884 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
1844 1885 ixgbe_error(ixgbe, "IRM CB: Failed to enable DDI interrupts");
1845 1886 goto intr_adjust_fail;
1846 1887 }
1847 1888 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
1848 1889 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1849 1890 ixgbe_error(ixgbe, "IRM CB: Failed to start");
1850 1891 goto intr_adjust_fail;
1851 1892 }
1852 1893 ixgbe->ixgbe_state &= ~IXGBE_INTR_ADJUST;
1853 1894 ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED;
1854 1895 ixgbe->ixgbe_state |= IXGBE_STARTED;
1855 1896 mutex_exit(&ixgbe->gen_lock);
1856 1897
1857 1898 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1858 1899 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle,
1859 1900 ixgbe->htable[ixgbe->rx_rings[i].intr_vector]);
1860 1901 }
1861 1902 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1862 1903 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle,
1863 1904 ixgbe->htable[ixgbe->tx_rings[i].intr_vector]);
1864 1905 }
1865 1906
1866 1907 /* Wakeup all Tx rings */
1867 1908 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1868 1909 mac_tx_ring_update(ixgbe->mac_hdl,
1869 1910 ixgbe->tx_rings[i].ring_handle);
1870 1911 }
1871 1912
1872 1913 IXGBE_DEBUGLOG_3(ixgbe,
1873 1914 "IRM CB: interrupts new value: 0x%x(0x%x:0x%x).",
1874 1915 ixgbe->intr_cnt, ixgbe->intr_cnt_min, ixgbe->intr_cnt_max);
1875 1916 return (DDI_SUCCESS);
1876 1917
1877 1918 intr_adjust_fail:
1878 1919 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1879 1920 mutex_exit(&ixgbe->gen_lock);
1880 1921 return (DDI_FAILURE);
1881 1922 }
1882 1923
1883 1924 /*
1884 1925 * ixgbe_intr_cb_register - Register interrupt callback function.
1885 1926 */
1886 1927 static int
1887 1928 ixgbe_intr_cb_register(ixgbe_t *ixgbe)
1888 1929 {
1889 1930 if (ddi_cb_register(ixgbe->dip, DDI_CB_FLAG_INTR, ixgbe_cbfunc,
1890 1931 ixgbe, NULL, &ixgbe->cb_hdl) != DDI_SUCCESS) {
1891 1932 return (IXGBE_FAILURE);
1892 1933 }
1893 1934 IXGBE_DEBUGLOG_0(ixgbe, "Interrupt callback function registered.");
1894 1935 return (IXGBE_SUCCESS);
1895 1936 }
1896 1937
1897 1938 /*
1898 1939 * ixgbe_alloc_rings - Allocate memory space for rx/tx rings.
1899 1940 */
1900 1941 static int
1901 1942 ixgbe_alloc_rings(ixgbe_t *ixgbe)
1902 1943 {
1903 1944 /*
1904 1945 * Allocate memory space for rx rings
1905 1946 */
1906 1947 ixgbe->rx_rings = kmem_zalloc(
1907 1948 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings,
1908 1949 KM_NOSLEEP);
1909 1950
1910 1951 if (ixgbe->rx_rings == NULL) {
1911 1952 return (IXGBE_FAILURE);
1912 1953 }
1913 1954
1914 1955 /*
1915 1956 * Allocate memory space for tx rings
1916 1957 */
1917 1958 ixgbe->tx_rings = kmem_zalloc(
1918 1959 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings,
1919 1960 KM_NOSLEEP);
1920 1961
1921 1962 if (ixgbe->tx_rings == NULL) {
1922 1963 kmem_free(ixgbe->rx_rings,
1923 1964 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1924 1965 ixgbe->rx_rings = NULL;
1925 1966 return (IXGBE_FAILURE);
1926 1967 }
1927 1968
1928 1969 /*
1929 1970 * Allocate memory space for rx ring groups
1930 1971 */
1931 1972 ixgbe->rx_groups = kmem_zalloc(
1932 1973 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups,
1933 1974 KM_NOSLEEP);
1934 1975
1935 1976 if (ixgbe->rx_groups == NULL) {
1936 1977 kmem_free(ixgbe->rx_rings,
1937 1978 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1938 1979 kmem_free(ixgbe->tx_rings,
1939 1980 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
1940 1981 ixgbe->rx_rings = NULL;
1941 1982 ixgbe->tx_rings = NULL;
1942 1983 return (IXGBE_FAILURE);
1943 1984 }
1944 1985
1945 1986 return (IXGBE_SUCCESS);
1946 1987 }
1947 1988
1948 1989 /*
1949 1990 * ixgbe_free_rings - Free the memory space of rx/tx rings.
1950 1991 */
1951 1992 static void
1952 1993 ixgbe_free_rings(ixgbe_t *ixgbe)
1953 1994 {
1954 1995 if (ixgbe->rx_rings != NULL) {
1955 1996 kmem_free(ixgbe->rx_rings,
1956 1997 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1957 1998 ixgbe->rx_rings = NULL;
1958 1999 }
1959 2000
1960 2001 if (ixgbe->tx_rings != NULL) {
1961 2002 kmem_free(ixgbe->tx_rings,
1962 2003 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
1963 2004 ixgbe->tx_rings = NULL;
1964 2005 }
1965 2006
1966 2007 if (ixgbe->rx_groups != NULL) {
1967 2008 kmem_free(ixgbe->rx_groups,
1968 2009 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups);
1969 2010 ixgbe->rx_groups = NULL;
1970 2011 }
1971 2012 }
1972 2013
1973 2014 static int
1974 2015 ixgbe_alloc_rx_data(ixgbe_t *ixgbe)
1975 2016 {
1976 2017 ixgbe_rx_ring_t *rx_ring;
1977 2018 int i;
1978 2019
1979 2020 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1980 2021 rx_ring = &ixgbe->rx_rings[i];
1981 2022 if (ixgbe_alloc_rx_ring_data(rx_ring) != IXGBE_SUCCESS)
1982 2023 goto alloc_rx_rings_failure;
1983 2024 }
1984 2025 return (IXGBE_SUCCESS);
1985 2026
1986 2027 alloc_rx_rings_failure:
1987 2028 ixgbe_free_rx_data(ixgbe);
1988 2029 return (IXGBE_FAILURE);
1989 2030 }
1990 2031
1991 2032 static void
1992 2033 ixgbe_free_rx_data(ixgbe_t *ixgbe)
1993 2034 {
1994 2035 ixgbe_rx_ring_t *rx_ring;
1995 2036 ixgbe_rx_data_t *rx_data;
1996 2037 int i;
1997 2038
1998 2039 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1999 2040 rx_ring = &ixgbe->rx_rings[i];
2000 2041
2001 2042 mutex_enter(&ixgbe->rx_pending_lock);
2002 2043 rx_data = rx_ring->rx_data;
2003 2044
2004 2045 if (rx_data != NULL) {
2005 2046 rx_data->flag |= IXGBE_RX_STOPPED;
2006 2047
2007 2048 if (rx_data->rcb_pending == 0) {
2008 2049 ixgbe_free_rx_ring_data(rx_data);
2009 2050 rx_ring->rx_data = NULL;
2010 2051 }
2011 2052 }
2012 2053
2013 2054 mutex_exit(&ixgbe->rx_pending_lock);
2014 2055 }
2015 2056 }
2016 2057
2017 2058 /*
2018 2059 * ixgbe_setup_rings - Setup rx/tx rings.
2019 2060 */
2020 2061 static void
2021 2062 ixgbe_setup_rings(ixgbe_t *ixgbe)
2022 2063 {
2023 2064 /*
2024 2065 * Setup the rx/tx rings, including the following:
2025 2066 *
2026 2067 * 1. Setup the descriptor ring and the control block buffers;
2027 2068 * 2. Initialize necessary registers for receive/transmit;
2028 2069 * 3. Initialize software pointers/parameters for receive/transmit;
2029 2070 */
2030 2071 ixgbe_setup_rx(ixgbe);
2031 2072
2032 2073 ixgbe_setup_tx(ixgbe);
2033 2074 }
2034 2075
2035 2076 static void
2036 2077 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring)
2037 2078 {
2038 2079 ixgbe_t *ixgbe = rx_ring->ixgbe;
2039 2080 ixgbe_rx_data_t *rx_data = rx_ring->rx_data;
2040 2081 struct ixgbe_hw *hw = &ixgbe->hw;
2041 2082 rx_control_block_t *rcb;
2042 2083 union ixgbe_adv_rx_desc *rbd;
2043 2084 uint32_t size;
2044 2085 uint32_t buf_low;
2045 2086 uint32_t buf_high;
2046 2087 uint32_t reg_val;
2047 2088 int i;
2048 2089
2049 2090 ASSERT(mutex_owned(&rx_ring->rx_lock));
2050 2091 ASSERT(mutex_owned(&ixgbe->gen_lock));
2051 2092
2052 2093 for (i = 0; i < ixgbe->rx_ring_size; i++) {
2053 2094 rcb = rx_data->work_list[i];
2054 2095 rbd = &rx_data->rbd_ring[i];
2055 2096
2056 2097 rbd->read.pkt_addr = rcb->rx_buf.dma_address;
2057 2098 rbd->read.hdr_addr = NULL;
2058 2099 }
2059 2100
2060 2101 /*
2061 2102 * Initialize the length register
2062 2103 */
2063 2104 size = rx_data->ring_size * sizeof (union ixgbe_adv_rx_desc);
2064 2105 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->hw_index), size);
2065 2106
2066 2107 /*
2067 2108 * Initialize the base address registers
2068 2109 */
2069 2110 buf_low = (uint32_t)rx_data->rbd_area.dma_address;
2070 2111 buf_high = (uint32_t)(rx_data->rbd_area.dma_address >> 32);
2071 2112 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->hw_index), buf_high);
2072 2113 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->hw_index), buf_low);
2073 2114
2074 2115 /*
2075 2116 * Setup head & tail pointers
2076 2117 */
2077 2118 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->hw_index),
2078 2119 rx_data->ring_size - 1);
2079 2120 IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->hw_index), 0);
2080 2121
2081 2122 rx_data->rbd_next = 0;
2082 2123 rx_data->lro_first = 0;
|
↓ open down ↓ |
868 lines elided |
↑ open up ↑ |
2083 2124
2084 2125 /*
2085 2126 * Setup the Receive Descriptor Control Register (RXDCTL)
2086 2127 * PTHRESH=32 descriptors (half the internal cache)
2087 2128 * HTHRESH=0 descriptors (to minimize latency on fetch)
2088 2129 * WTHRESH defaults to 1 (writeback each descriptor)
2089 2130 */
2090 2131 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index));
2091 2132 reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */
2092 2133
2093 - /* Not a valid value for 82599 */
2094 - if (hw->mac.type < ixgbe_mac_82599EB) {
2134 + /* Not a valid value for 82599 or X540 */
2135 + if (hw->mac.type == ixgbe_mac_82598EB) {
2095 2136 reg_val |= 0x0020; /* pthresh */
2096 2137 }
2097 2138 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val);
2098 2139
2099 - if (hw->mac.type == ixgbe_mac_82599EB) {
2140 + if (hw->mac.type == ixgbe_mac_82599EB ||
2141 + hw->mac.type == ixgbe_mac_X540) {
2100 2142 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2101 2143 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS);
2102 2144 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2103 2145 }
2104 2146
2105 2147 /*
2106 2148 * Setup the Split and Replication Receive Control Register.
2107 2149 * Set the rx buffer size and the advanced descriptor type.
2108 2150 */
2109 2151 reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
2110 2152 IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2111 2153 reg_val |= IXGBE_SRRCTL_DROP_EN;
2112 2154 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val);
2113 2155 }
2114 2156
2115 2157 static void
2116 2158 ixgbe_setup_rx(ixgbe_t *ixgbe)
2117 2159 {
2118 2160 ixgbe_rx_ring_t *rx_ring;
2119 2161 struct ixgbe_hw *hw = &ixgbe->hw;
2120 2162 uint32_t reg_val;
2121 2163 uint32_t ring_mapping;
2122 2164 uint32_t i, index;
2123 2165 uint32_t psrtype_rss_bit;
2124 2166
2125 2167 /* PSRTYPE must be configured for 82599 */
2126 2168 if (ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ &&
2127 2169 ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ_RSS) {
2128 2170 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
2129 2171 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
2130 2172 reg_val |= IXGBE_PSRTYPE_L2HDR;
2131 2173 reg_val |= 0x80000000;
2132 2174 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), reg_val);
2133 2175 } else {
2134 2176 if (ixgbe->num_rx_groups > 32) {
2135 2177 psrtype_rss_bit = 0x20000000;
2136 2178 } else {
2137 2179 psrtype_rss_bit = 0x40000000;
2138 2180 }
2139 2181 for (i = 0; i < ixgbe->capab->max_rx_grp_num; i++) {
2140 2182 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
2141 2183 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
2142 2184 reg_val |= IXGBE_PSRTYPE_L2HDR;
2143 2185 reg_val |= psrtype_rss_bit;
2144 2186 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(i), reg_val);
2145 2187 }
2146 2188 }
2147 2189
2148 2190 /*
2149 2191 * Set filter control in FCTRL to accept broadcast packets and do
2150 2192 * not pass pause frames to host. Flow control settings are already
2151 2193 * in this register, so preserve them.
2152 2194 */
2153 2195 reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2154 2196 reg_val |= IXGBE_FCTRL_BAM; /* broadcast accept mode */
2155 2197 reg_val |= IXGBE_FCTRL_DPF; /* discard pause frames */
2156 2198 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val);
2157 2199
2158 2200 /*
2159 2201 * Hardware checksum settings
2160 2202 */
2161 2203 if (ixgbe->rx_hcksum_enable) {
2162 2204 reg_val = IXGBE_RXCSUM_IPPCSE; /* IP checksum */
2163 2205 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val);
2164 2206 }
2165 2207
2166 2208 /*
2167 2209 * Setup VMDq and RSS for multiple receive queues
2168 2210 */
2169 2211 switch (ixgbe->classify_mode) {
2170 2212 case IXGBE_CLASSIFY_RSS:
2171 2213 /*
2172 2214 * One group, only RSS is needed when more than
2173 2215 * one ring enabled.
2174 2216 */
2175 2217 ixgbe_setup_rss(ixgbe);
2176 2218 break;
2177 2219
2178 2220 case IXGBE_CLASSIFY_VMDQ:
2179 2221 /*
2180 2222 * Multiple groups, each group has one ring,
2181 2223 * only VMDq is needed.
2182 2224 */
2183 2225 ixgbe_setup_vmdq(ixgbe);
2184 2226 break;
2185 2227
2186 2228 case IXGBE_CLASSIFY_VMDQ_RSS:
2187 2229 /*
2188 2230 * Multiple groups and multiple rings, both
2189 2231 * VMDq and RSS are needed.
2190 2232 */
2191 2233 ixgbe_setup_vmdq_rss(ixgbe);
2192 2234 break;
2193 2235
2194 2236 default:
2195 2237 break;
2196 2238 }
2197 2239
2198 2240 /*
2199 2241 * Enable the receive unit. This must be done after filter
2200 2242 * control is set in FCTRL.
2201 2243 */
2202 2244 reg_val = (IXGBE_RXCTRL_RXEN /* Enable Receive Unit */
2203 2245 | IXGBE_RXCTRL_DMBYPS); /* descriptor monitor bypass */
2204 2246 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
2205 2247
2206 2248 /*
2207 2249 * ixgbe_setup_rx_ring must be called after configuring RXCTRL
2208 2250 */
2209 2251 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2210 2252 rx_ring = &ixgbe->rx_rings[i];
2211 2253 ixgbe_setup_rx_ring(rx_ring);
2212 2254 }
2213 2255
2214 2256 /*
2215 2257 * Setup the per-ring statistics mapping.
2216 2258 */
2217 2259 ring_mapping = 0;
2218 2260 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2219 2261 index = ixgbe->rx_rings[i].hw_index;
2220 2262 ring_mapping = IXGBE_READ_REG(hw, IXGBE_RQSMR(index >> 2));
2221 2263 ring_mapping |= (i & 0xF) << (8 * (index & 0x3));
2222 2264 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(index >> 2), ring_mapping);
2223 2265 }
2224 2266
2225 2267 /*
2226 2268 * The Max Frame Size in MHADD/MAXFRS will be internally increased
2227 2269 * by four bytes if the packet has a VLAN field, so includes MTU,
2228 2270 * ethernet header and frame check sequence.
2229 2271 * Register is MAXFRS in 82599.
2230 2272 */
2231 2273 reg_val = (ixgbe->default_mtu + sizeof (struct ether_header)
2232 2274 + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT;
2233 2275 IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val);
2234 2276
2235 2277 /*
2236 2278 * Setup Jumbo Frame enable bit
2237 2279 */
2238 2280 if (ixgbe->default_mtu > ETHERMTU) {
2239 2281 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2240 2282 reg_val |= IXGBE_HLREG0_JUMBOEN;
2241 2283 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2242 2284 }
2243 2285
2244 2286 /*
2245 2287 * Setup RSC for multiple receive queues.
2246 2288 */
2247 2289 if (ixgbe->lro_enable) {
2248 2290 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2249 2291 /*
2250 2292 * Make sure rx_buf_size * MAXDESC not greater
2251 2293 * than 65535.
2252 2294 * Intel recommends 4 for MAXDESC field value.
2253 2295 */
2254 2296 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCCTL(i));
2255 2297 reg_val |= IXGBE_RSCCTL_RSCEN;
2256 2298 if (ixgbe->rx_buf_size == IXGBE_PKG_BUF_16k)
2257 2299 reg_val |= IXGBE_RSCCTL_MAXDESC_1;
2258 2300 else
2259 2301 reg_val |= IXGBE_RSCCTL_MAXDESC_4;
2260 2302 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(i), reg_val);
2261 2303 }
2262 2304
2263 2305 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCDBU);
2264 2306 reg_val |= IXGBE_RSCDBU_RSCACKDIS;
2265 2307 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, reg_val);
2266 2308
2267 2309 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2268 2310 reg_val |= IXGBE_RDRXCTL_RSCACKC;
2269 2311 reg_val |= IXGBE_RDRXCTL_FCOE_WRFIX;
2270 2312 reg_val &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2271 2313
2272 2314 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2273 2315 }
2274 2316 }
2275 2317
2276 2318 static void
2277 2319 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring)
2278 2320 {
2279 2321 ixgbe_t *ixgbe = tx_ring->ixgbe;
2280 2322 struct ixgbe_hw *hw = &ixgbe->hw;
2281 2323 uint32_t size;
2282 2324 uint32_t buf_low;
2283 2325 uint32_t buf_high;
2284 2326 uint32_t reg_val;
2285 2327
2286 2328 ASSERT(mutex_owned(&tx_ring->tx_lock));
2287 2329 ASSERT(mutex_owned(&ixgbe->gen_lock));
2288 2330
2289 2331 /*
2290 2332 * Initialize the length register
2291 2333 */
2292 2334 size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc);
2293 2335 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size);
2294 2336
2295 2337 /*
2296 2338 * Initialize the base address registers
2297 2339 */
2298 2340 buf_low = (uint32_t)tx_ring->tbd_area.dma_address;
2299 2341 buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32);
2300 2342 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low);
2301 2343 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high);
2302 2344
2303 2345 /*
2304 2346 * Setup head & tail pointers
2305 2347 */
2306 2348 IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0);
2307 2349 IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0);
2308 2350
2309 2351 /*
2310 2352 * Setup head write-back
2311 2353 */
2312 2354 if (ixgbe->tx_head_wb_enable) {
2313 2355 /*
2314 2356 * The memory of the head write-back is allocated using
2315 2357 * the extra tbd beyond the tail of the tbd ring.
2316 2358 */
2317 2359 tx_ring->tbd_head_wb = (uint32_t *)
2318 2360 ((uintptr_t)tx_ring->tbd_area.address + size);
2319 2361 *tx_ring->tbd_head_wb = 0;
2320 2362
2321 2363 buf_low = (uint32_t)
2322 2364 (tx_ring->tbd_area.dma_address + size);
2323 2365 buf_high = (uint32_t)
2324 2366 ((tx_ring->tbd_area.dma_address + size) >> 32);
2325 2367
|
↓ open down ↓ |
216 lines elided |
↑ open up ↑ |
2326 2368 /* Set the head write-back enable bit */
2327 2369 buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
2328 2370
2329 2371 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low);
2330 2372 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high);
2331 2373
2332 2374 /*
2333 2375 * Turn off relaxed ordering for head write back or it will
2334 2376 * cause problems with the tx recycling
2335 2377 */
2336 - reg_val = IXGBE_READ_REG(hw,
2337 - IXGBE_DCA_TXCTRL(tx_ring->index));
2338 - reg_val &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
2339 - IXGBE_WRITE_REG(hw,
2340 - IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
2378 +
2379 + reg_val = (hw->mac.type == ixgbe_mac_82598EB) ?
2380 + IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(tx_ring->index)) :
2381 + IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(tx_ring->index));
2382 + reg_val &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2383 + if (hw->mac.type == ixgbe_mac_82598EB) {
2384 + IXGBE_WRITE_REG(hw,
2385 + IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
2386 + } else {
2387 + IXGBE_WRITE_REG(hw,
2388 + IXGBE_DCA_TXCTRL_82599(tx_ring->index), reg_val);
2389 + }
2341 2390 } else {
2342 2391 tx_ring->tbd_head_wb = NULL;
2343 2392 }
2344 2393
2345 2394 tx_ring->tbd_head = 0;
2346 2395 tx_ring->tbd_tail = 0;
2347 2396 tx_ring->tbd_free = tx_ring->ring_size;
2348 2397
2349 2398 if (ixgbe->tx_ring_init == B_TRUE) {
2350 2399 tx_ring->tcb_head = 0;
2351 2400 tx_ring->tcb_tail = 0;
2352 2401 tx_ring->tcb_free = tx_ring->free_list_size;
2353 2402 }
2354 2403
2355 2404 /*
2356 2405 * Initialize the s/w context structure
2357 2406 */
2358 2407 bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t));
2359 2408 }
2360 2409
2361 2410 static void
2362 2411 ixgbe_setup_tx(ixgbe_t *ixgbe)
2363 2412 {
2364 2413 struct ixgbe_hw *hw = &ixgbe->hw;
2365 2414 ixgbe_tx_ring_t *tx_ring;
2366 2415 uint32_t reg_val;
2367 2416 uint32_t ring_mapping;
2368 2417 int i;
2369 2418
2370 2419 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2371 2420 tx_ring = &ixgbe->tx_rings[i];
2372 2421 ixgbe_setup_tx_ring(tx_ring);
2373 2422 }
2374 2423
2375 2424 /*
2376 2425 * Setup the per-ring statistics mapping.
2377 2426 */
2378 2427 ring_mapping = 0;
|
↓ open down ↓ |
28 lines elided |
↑ open up ↑ |
2379 2428 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2380 2429 ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
2381 2430 if ((i & 0x3) == 0x3) {
2382 2431 switch (hw->mac.type) {
2383 2432 case ixgbe_mac_82598EB:
2384 2433 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2),
2385 2434 ring_mapping);
2386 2435 break;
2387 2436
2388 2437 case ixgbe_mac_82599EB:
2438 + case ixgbe_mac_X540:
2389 2439 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2),
2390 2440 ring_mapping);
2391 2441 break;
2392 2442
2393 2443 default:
2394 2444 break;
2395 2445 }
2396 2446
2397 2447 ring_mapping = 0;
2398 2448 }
2399 2449 }
2400 2450 if (i & 0x3) {
2401 2451 switch (hw->mac.type) {
2402 2452 case ixgbe_mac_82598EB:
2403 2453 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping);
2404 2454 break;
2405 2455
2406 2456 case ixgbe_mac_82599EB:
2457 + case ixgbe_mac_X540:
2407 2458 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping);
2408 2459 break;
2409 2460
2410 2461 default:
2411 2462 break;
2412 2463 }
2413 2464 }
2414 2465
2415 2466 /*
2416 2467 * Enable CRC appending and TX padding (for short tx frames)
2417 2468 */
2418 2469 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2419 2470 reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN;
2420 2471 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2421 2472
2422 2473 /*
2423 - * enable DMA for 82599 parts
2474 + * enable DMA for 82599 and X540 parts
2424 2475 */
2425 - if (hw->mac.type == ixgbe_mac_82599EB) {
2426 - /* DMATXCTL.TE must be set after all Tx config is complete */
2476 + if (hw->mac.type == ixgbe_mac_82599EB ||
2477 + hw->mac.type == ixgbe_mac_X540) {
2478 + /* DMATXCTL.TE must be set after all Tx config is complete */
2427 2479 reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2428 2480 reg_val |= IXGBE_DMATXCTL_TE;
2429 2481 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val);
2482 +
2483 + /* Disable arbiter to set MTQC */
2484 + reg_val = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2485 + reg_val |= IXGBE_RTTDCS_ARBDIS;
2486 + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val);
2487 + IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2488 + reg_val &= ~IXGBE_RTTDCS_ARBDIS;
2489 + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val);
2430 2490 }
2431 2491
2432 2492 /*
2433 2493 * Enabling tx queues ..
2434 2494 * For 82599 must be done after DMATXCTL.TE is set
2435 2495 */
2436 2496 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2437 2497 tx_ring = &ixgbe->tx_rings[i];
2438 2498 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index));
2439 2499 reg_val |= IXGBE_TXDCTL_ENABLE;
2440 2500 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val);
2441 2501 }
2442 2502 }
2443 2503
2444 2504 /*
2445 2505 * ixgbe_setup_rss - Setup receive-side scaling feature.
2446 2506 */
2447 2507 static void
2448 2508 ixgbe_setup_rss(ixgbe_t *ixgbe)
2449 2509 {
2450 2510 struct ixgbe_hw *hw = &ixgbe->hw;
2451 2511 uint32_t i, mrqc, rxcsum;
2452 2512 uint32_t random;
2453 2513 uint32_t reta;
2454 2514 uint32_t ring_per_group;
2455 2515
2456 2516 /*
2457 2517 * Fill out redirection table
2458 2518 */
2459 2519 reta = 0;
2460 2520 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2461 2521
2462 2522 for (i = 0; i < 128; i++) {
2463 2523 reta = (reta << 8) | (i % ring_per_group) |
2464 2524 ((i % ring_per_group) << 4);
2465 2525 if ((i & 3) == 3)
2466 2526 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2467 2527 }
2468 2528
2469 2529 /*
2470 2530 * Fill out hash function seeds with a random constant
2471 2531 */
2472 2532 for (i = 0; i < 10; i++) {
2473 2533 (void) random_get_pseudo_bytes((uint8_t *)&random,
2474 2534 sizeof (uint32_t));
2475 2535 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random);
2476 2536 }
2477 2537
2478 2538 /*
2479 2539 * Enable RSS & perform hash on these packet types
2480 2540 */
2481 2541 mrqc = IXGBE_MRQC_RSSEN |
2482 2542 IXGBE_MRQC_RSS_FIELD_IPV4 |
2483 2543 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2484 2544 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2485 2545 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2486 2546 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2487 2547 IXGBE_MRQC_RSS_FIELD_IPV6 |
2488 2548 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2489 2549 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2490 2550 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2491 2551 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2492 2552
2493 2553 /*
2494 2554 * Disable Packet Checksum to enable RSS for multiple receive queues.
2495 2555 * It is an adapter hardware limitation that Packet Checksum is
2496 2556 * mutually exclusive with RSS.
2497 2557 */
2498 2558 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2499 2559 rxcsum |= IXGBE_RXCSUM_PCSD;
2500 2560 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
2501 2561 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2502 2562 }
2503 2563
2504 2564 /*
2505 2565 * ixgbe_setup_vmdq - Setup MAC classification feature
2506 2566 */
2507 2567 static void
2508 2568 ixgbe_setup_vmdq(ixgbe_t *ixgbe)
2509 2569 {
2510 2570 struct ixgbe_hw *hw = &ixgbe->hw;
2511 2571 uint32_t vmdctl, i, vtctl;
2512 2572
2513 2573 /*
2514 2574 * Setup the VMDq Control register, enable VMDq based on
2515 2575 * packet destination MAC address:
2516 2576 */
2517 2577 switch (hw->mac.type) {
2518 2578 case ixgbe_mac_82598EB:
|
↓ open down ↓ |
79 lines elided |
↑ open up ↑ |
2519 2579 /*
2520 2580 * VMDq Enable = 1;
2521 2581 * VMDq Filter = 0; MAC filtering
2522 2582 * Default VMDq output index = 0;
2523 2583 */
2524 2584 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2525 2585 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2526 2586 break;
2527 2587
2528 2588 case ixgbe_mac_82599EB:
2589 + case ixgbe_mac_X540:
2529 2590 /*
2530 2591 * Enable VMDq-only.
2531 2592 */
2532 2593 vmdctl = IXGBE_MRQC_VMDQEN;
2533 2594 IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl);
2534 2595
2535 2596 for (i = 0; i < hw->mac.num_rar_entries; i++) {
2536 2597 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2537 2598 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2538 2599 }
2539 2600
2540 2601 /*
2541 2602 * Enable Virtualization and Replication.
2542 2603 */
2543 2604 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2544 2605 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2545 2606
2546 2607 /*
2547 2608 * Enable receiving packets to all VFs
2548 2609 */
2549 2610 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2550 2611 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2551 2612 break;
2552 2613
2553 2614 default:
2554 2615 break;
2555 2616 }
2556 2617 }
2557 2618
2558 2619 /*
2559 2620 * ixgbe_setup_vmdq_rss - Setup both vmdq feature and rss feature.
2560 2621 */
2561 2622 static void
2562 2623 ixgbe_setup_vmdq_rss(ixgbe_t *ixgbe)
2563 2624 {
2564 2625 struct ixgbe_hw *hw = &ixgbe->hw;
2565 2626 uint32_t i, mrqc, rxcsum;
2566 2627 uint32_t random;
2567 2628 uint32_t reta;
2568 2629 uint32_t ring_per_group;
2569 2630 uint32_t vmdctl, vtctl;
2570 2631
2571 2632 /*
2572 2633 * Fill out redirection table
2573 2634 */
2574 2635 reta = 0;
2575 2636 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2576 2637 for (i = 0; i < 128; i++) {
2577 2638 reta = (reta << 8) | (i % ring_per_group) |
2578 2639 ((i % ring_per_group) << 4);
2579 2640 if ((i & 3) == 3)
2580 2641 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2581 2642 }
2582 2643
2583 2644 /*
2584 2645 * Fill out hash function seeds with a random constant
2585 2646 */
2586 2647 for (i = 0; i < 10; i++) {
2587 2648 (void) random_get_pseudo_bytes((uint8_t *)&random,
2588 2649 sizeof (uint32_t));
2589 2650 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random);
2590 2651 }
2591 2652
2592 2653 /*
2593 2654 * Enable and setup RSS and VMDq
2594 2655 */
2595 2656 switch (hw->mac.type) {
2596 2657 case ixgbe_mac_82598EB:
2597 2658 /*
2598 2659 * Enable RSS & Setup RSS Hash functions
2599 2660 */
2600 2661 mrqc = IXGBE_MRQC_RSSEN |
2601 2662 IXGBE_MRQC_RSS_FIELD_IPV4 |
2602 2663 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2603 2664 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2604 2665 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2605 2666 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2606 2667 IXGBE_MRQC_RSS_FIELD_IPV6 |
2607 2668 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2608 2669 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2609 2670 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2610 2671 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2611 2672
|
↓ open down ↓ |
73 lines elided |
↑ open up ↑ |
2612 2673 /*
2613 2674 * Enable and Setup VMDq
2614 2675 * VMDq Filter = 0; MAC filtering
2615 2676 * Default VMDq output index = 0;
2616 2677 */
2617 2678 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2618 2679 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2619 2680 break;
2620 2681
2621 2682 case ixgbe_mac_82599EB:
2683 + case ixgbe_mac_X540:
2622 2684 /*
2623 2685 * Enable RSS & Setup RSS Hash functions
2624 2686 */
2625 2687 mrqc = IXGBE_MRQC_RSS_FIELD_IPV4 |
2626 2688 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2627 2689 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2628 2690 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2629 2691 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2630 2692 IXGBE_MRQC_RSS_FIELD_IPV6 |
2631 2693 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2632 2694 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2633 2695 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2634 2696
2635 2697 /*
2636 2698 * Enable VMDq+RSS.
2637 2699 */
2638 2700 if (ixgbe->num_rx_groups > 32) {
2639 2701 mrqc = mrqc | IXGBE_MRQC_VMDQRSS64EN;
2640 2702 } else {
2641 2703 mrqc = mrqc | IXGBE_MRQC_VMDQRSS32EN;
2642 2704 }
2643 2705
2644 2706 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2645 2707
2646 2708 for (i = 0; i < hw->mac.num_rar_entries; i++) {
2647 2709 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2648 2710 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2649 2711 }
2650 2712 break;
2651 2713
2652 2714 default:
2653 2715 break;
2654 2716
2655 2717 }
2656 2718
|
↓ open down ↓ |
25 lines elided |
↑ open up ↑ |
2657 2719 /*
2658 2720 * Disable Packet Checksum to enable RSS for multiple receive queues.
2659 2721 * It is an adapter hardware limitation that Packet Checksum is
2660 2722 * mutually exclusive with RSS.
2661 2723 */
2662 2724 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2663 2725 rxcsum |= IXGBE_RXCSUM_PCSD;
2664 2726 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
2665 2727 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2666 2728
2667 - if (hw->mac.type == ixgbe_mac_82599EB) {
2729 + if (hw->mac.type == ixgbe_mac_82599EB ||
2730 + hw->mac.type == ixgbe_mac_X540) {
2668 2731 /*
2669 2732 * Enable Virtualization and Replication.
2670 2733 */
2671 2734 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2672 2735 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2673 2736
2674 2737 /*
2675 2738 * Enable receiving packets to all VFs
2676 2739 */
2677 2740 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2678 2741 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2679 2742 }
2680 2743 }
2681 2744
2682 2745 /*
2683 2746 * ixgbe_init_unicst - Initialize the unicast addresses.
2684 2747 */
2685 2748 static void
2686 2749 ixgbe_init_unicst(ixgbe_t *ixgbe)
2687 2750 {
2688 2751 struct ixgbe_hw *hw = &ixgbe->hw;
2689 2752 uint8_t *mac_addr;
2690 2753 int slot;
2691 2754 /*
2692 2755 * Here we should consider two situations:
2693 2756 *
2694 2757 * 1. Chipset is initialized at the first time,
2695 2758 * Clear all the multiple unicast addresses.
2696 2759 *
2697 2760 * 2. Chipset is reset
2698 2761 * Recover the multiple unicast addresses from the
2699 2762 * software data structure to the RAR registers.
2700 2763 */
2701 2764 if (!ixgbe->unicst_init) {
2702 2765 /*
2703 2766 * Initialize the multiple unicast addresses
2704 2767 */
2705 2768 ixgbe->unicst_total = hw->mac.num_rar_entries;
2706 2769 ixgbe->unicst_avail = ixgbe->unicst_total;
2707 2770 for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2708 2771 mac_addr = ixgbe->unicst_addr[slot].mac.addr;
2709 2772 bzero(mac_addr, ETHERADDRL);
2710 2773 (void) ixgbe_set_rar(hw, slot, mac_addr, NULL, NULL);
2711 2774 ixgbe->unicst_addr[slot].mac.set = 0;
2712 2775 }
2713 2776 ixgbe->unicst_init = B_TRUE;
2714 2777 } else {
2715 2778 /* Re-configure the RAR registers */
2716 2779 for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2717 2780 mac_addr = ixgbe->unicst_addr[slot].mac.addr;
2718 2781 if (ixgbe->unicst_addr[slot].mac.set == 1) {
2719 2782 (void) ixgbe_set_rar(hw, slot, mac_addr,
2720 2783 ixgbe->unicst_addr[slot].mac.group_index,
2721 2784 IXGBE_RAH_AV);
2722 2785 } else {
2723 2786 bzero(mac_addr, ETHERADDRL);
2724 2787 (void) ixgbe_set_rar(hw, slot, mac_addr,
2725 2788 NULL, NULL);
2726 2789 }
2727 2790 }
2728 2791 }
2729 2792 }
2730 2793
2731 2794 /*
2732 2795 * ixgbe_unicst_find - Find the slot for the specified unicast address
2733 2796 */
2734 2797 int
2735 2798 ixgbe_unicst_find(ixgbe_t *ixgbe, const uint8_t *mac_addr)
2736 2799 {
2737 2800 int slot;
2738 2801
2739 2802 ASSERT(mutex_owned(&ixgbe->gen_lock));
2740 2803
2741 2804 for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2742 2805 if (bcmp(ixgbe->unicst_addr[slot].mac.addr,
2743 2806 mac_addr, ETHERADDRL) == 0)
2744 2807 return (slot);
2745 2808 }
2746 2809
2747 2810 return (-1);
2748 2811 }
2749 2812
2750 2813 /*
2751 2814 * ixgbe_multicst_add - Add a multicst address.
2752 2815 */
2753 2816 int
2754 2817 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2755 2818 {
2756 2819 ASSERT(mutex_owned(&ixgbe->gen_lock));
2757 2820
2758 2821 if ((multiaddr[0] & 01) == 0) {
2759 2822 return (EINVAL);
2760 2823 }
2761 2824
2762 2825 if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) {
2763 2826 return (ENOENT);
2764 2827 }
2765 2828
2766 2829 bcopy(multiaddr,
2767 2830 &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL);
2768 2831 ixgbe->mcast_count++;
2769 2832
2770 2833 /*
2771 2834 * Update the multicast table in the hardware
2772 2835 */
2773 2836 ixgbe_setup_multicst(ixgbe);
2774 2837
2775 2838 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2776 2839 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2777 2840 return (EIO);
2778 2841 }
2779 2842
2780 2843 return (0);
2781 2844 }
2782 2845
2783 2846 /*
2784 2847 * ixgbe_multicst_remove - Remove a multicst address.
2785 2848 */
2786 2849 int
2787 2850 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2788 2851 {
2789 2852 int i;
2790 2853
2791 2854 ASSERT(mutex_owned(&ixgbe->gen_lock));
2792 2855
2793 2856 for (i = 0; i < ixgbe->mcast_count; i++) {
2794 2857 if (bcmp(multiaddr, &ixgbe->mcast_table[i],
2795 2858 ETHERADDRL) == 0) {
2796 2859 for (i++; i < ixgbe->mcast_count; i++) {
2797 2860 ixgbe->mcast_table[i - 1] =
2798 2861 ixgbe->mcast_table[i];
2799 2862 }
2800 2863 ixgbe->mcast_count--;
2801 2864 break;
2802 2865 }
2803 2866 }
2804 2867
2805 2868 /*
2806 2869 * Update the multicast table in the hardware
2807 2870 */
2808 2871 ixgbe_setup_multicst(ixgbe);
2809 2872
2810 2873 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2811 2874 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2812 2875 return (EIO);
2813 2876 }
2814 2877
2815 2878 return (0);
2816 2879 }
2817 2880
2818 2881 /*
2819 2882 * ixgbe_setup_multicast - Setup multicast data structures.
2820 2883 *
2821 2884 * This routine initializes all of the multicast related structures
2822 2885 * and save them in the hardware registers.
2823 2886 */
2824 2887 static void
2825 2888 ixgbe_setup_multicst(ixgbe_t *ixgbe)
2826 2889 {
2827 2890 uint8_t *mc_addr_list;
2828 2891 uint32_t mc_addr_count;
2829 2892 struct ixgbe_hw *hw = &ixgbe->hw;
2830 2893
2831 2894 ASSERT(mutex_owned(&ixgbe->gen_lock));
|
↓ open down ↓ |
154 lines elided |
↑ open up ↑ |
2832 2895
2833 2896 ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES);
2834 2897
2835 2898 mc_addr_list = (uint8_t *)ixgbe->mcast_table;
2836 2899 mc_addr_count = ixgbe->mcast_count;
2837 2900
2838 2901 /*
2839 2902 * Update the multicast addresses to the MTA registers
2840 2903 */
2841 2904 (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
2842 - ixgbe_mc_table_itr);
2905 + ixgbe_mc_table_itr, TRUE);
2843 2906 }
2844 2907
2845 2908 /*
2846 2909 * ixgbe_setup_vmdq_rss_conf - Configure vmdq and rss (number and mode).
2847 2910 *
2848 2911 * Configure the rx classification mode (vmdq & rss) and vmdq & rss numbers.
2849 2912 * Different chipsets may have different allowed configuration of vmdq and rss.
2850 2913 */
2851 2914 static void
2852 2915 ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe)
2853 2916 {
2854 2917 struct ixgbe_hw *hw = &ixgbe->hw;
2855 2918 uint32_t ring_per_group;
2856 2919
2857 2920 switch (hw->mac.type) {
2858 2921 case ixgbe_mac_82598EB:
2859 2922 /*
2860 2923 * 82598 supports the following combination:
2861 2924 * vmdq no. x rss no.
2862 2925 * [5..16] x 1
2863 2926 * [1..4] x [1..16]
2864 2927 * However 8 rss queue per pool (vmdq) is sufficient for
2865 2928 * most cases.
2866 2929 */
2867 2930 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
|
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
2868 2931 if (ixgbe->num_rx_groups > 4) {
2869 2932 ixgbe->num_rx_rings = ixgbe->num_rx_groups;
2870 2933 } else {
2871 2934 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2872 2935 min(8, ring_per_group);
2873 2936 }
2874 2937
2875 2938 break;
2876 2939
2877 2940 case ixgbe_mac_82599EB:
2941 + case ixgbe_mac_X540:
2878 2942 /*
2879 2943 * 82599 supports the following combination:
2880 2944 * vmdq no. x rss no.
2881 2945 * [33..64] x [1..2]
2882 2946 * [2..32] x [1..4]
2883 2947 * 1 x [1..16]
2884 2948 * However 8 rss queue per pool (vmdq) is sufficient for
2885 2949 * most cases.
2950 + *
2951 + * For now, treat X540 like the 82599.
2886 2952 */
2887 2953 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2888 2954 if (ixgbe->num_rx_groups == 1) {
2889 2955 ixgbe->num_rx_rings = min(8, ring_per_group);
2890 2956 } else if (ixgbe->num_rx_groups <= 32) {
2891 2957 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2892 2958 min(4, ring_per_group);
2893 2959 } else if (ixgbe->num_rx_groups <= 64) {
2894 2960 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2895 2961 min(2, ring_per_group);
2896 2962 }
2897 2963 break;
2898 2964
2899 2965 default:
2900 2966 break;
2901 2967 }
2902 2968
2903 2969 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2904 2970
2905 2971 if (ixgbe->num_rx_groups == 1 && ring_per_group == 1) {
2906 2972 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
2907 2973 } else if (ixgbe->num_rx_groups != 1 && ring_per_group == 1) {
2908 2974 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ;
2909 2975 } else if (ixgbe->num_rx_groups != 1 && ring_per_group != 1) {
2910 2976 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ_RSS;
2911 2977 } else {
2912 2978 ixgbe->classify_mode = IXGBE_CLASSIFY_RSS;
2913 2979 }
2914 2980
2915 2981 IXGBE_DEBUGLOG_2(ixgbe, "rx group number:%d, rx ring number:%d",
2916 2982 ixgbe->num_rx_groups, ixgbe->num_rx_rings);
2917 2983 }
2918 2984
2919 2985 /*
2920 2986 * ixgbe_get_conf - Get driver configurations set in driver.conf.
2921 2987 *
2922 2988 * This routine gets user-configured values out of the configuration
2923 2989 * file ixgbe.conf.
2924 2990 *
2925 2991 * For each configurable value, there is a minimum, a maximum, and a
2926 2992 * default.
2927 2993 * If user does not configure a value, use the default.
2928 2994 * If user configures below the minimum, use the minumum.
2929 2995 * If user configures above the maximum, use the maxumum.
2930 2996 */
2931 2997 static void
2932 2998 ixgbe_get_conf(ixgbe_t *ixgbe)
2933 2999 {
2934 3000 struct ixgbe_hw *hw = &ixgbe->hw;
2935 3001 uint32_t flow_control;
2936 3002
2937 3003 /*
2938 3004 * ixgbe driver supports the following user configurations:
2939 3005 *
2940 3006 * Jumbo frame configuration:
2941 3007 * default_mtu
2942 3008 *
2943 3009 * Ethernet flow control configuration:
2944 3010 * flow_control
2945 3011 *
2946 3012 * Multiple rings configurations:
2947 3013 * tx_queue_number
2948 3014 * tx_ring_size
2949 3015 * rx_queue_number
2950 3016 * rx_ring_size
2951 3017 *
2952 3018 * Call ixgbe_get_prop() to get the value for a specific
2953 3019 * configuration parameter.
2954 3020 */
2955 3021
2956 3022 /*
2957 3023 * Jumbo frame configuration - max_frame_size controls host buffer
2958 3024 * allocation, so includes MTU, ethernet header, vlan tag and
2959 3025 * frame check sequence.
2960 3026 */
2961 3027 ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU,
2962 3028 MIN_MTU, ixgbe->capab->max_mtu, DEFAULT_MTU);
2963 3029
2964 3030 ixgbe->max_frame_size = ixgbe->default_mtu +
2965 3031 sizeof (struct ether_vlan_header) + ETHERFCSL;
2966 3032
2967 3033 /*
2968 3034 * Ethernet flow control configuration
2969 3035 */
2970 3036 flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL,
2971 3037 ixgbe_fc_none, 3, ixgbe_fc_none);
2972 3038 if (flow_control == 3)
2973 3039 flow_control = ixgbe_fc_default;
2974 3040
2975 3041 /*
2976 3042 * fc.requested mode is what the user requests. After autoneg,
2977 3043 * fc.current_mode will be the flow_control mode that was negotiated.
2978 3044 */
2979 3045 hw->fc.requested_mode = flow_control;
2980 3046
2981 3047 /*
2982 3048 * Multiple rings configurations
2983 3049 */
2984 3050 ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM,
2985 3051 ixgbe->capab->min_tx_que_num,
2986 3052 ixgbe->capab->max_tx_que_num,
2987 3053 ixgbe->capab->def_tx_que_num);
2988 3054 ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE,
2989 3055 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE);
2990 3056
2991 3057 ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM,
2992 3058 ixgbe->capab->min_rx_que_num,
2993 3059 ixgbe->capab->max_rx_que_num,
2994 3060 ixgbe->capab->def_rx_que_num);
2995 3061 ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE,
2996 3062 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE);
2997 3063
2998 3064 /*
2999 3065 * Multiple groups configuration
3000 3066 */
3001 3067 ixgbe->num_rx_groups = ixgbe_get_prop(ixgbe, PROP_RX_GROUP_NUM,
3002 3068 ixgbe->capab->min_rx_grp_num, ixgbe->capab->max_rx_grp_num,
3003 3069 ixgbe->capab->def_rx_grp_num);
3004 3070
3005 3071 ixgbe->mr_enable = ixgbe_get_prop(ixgbe, PROP_MR_ENABLE,
3006 3072 0, 1, DEFAULT_MR_ENABLE);
3007 3073
3008 3074 if (ixgbe->mr_enable == B_FALSE) {
3009 3075 ixgbe->num_tx_rings = 1;
3010 3076 ixgbe->num_rx_rings = 1;
3011 3077 ixgbe->num_rx_groups = 1;
3012 3078 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
3013 3079 } else {
3014 3080 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
3015 3081 max(ixgbe->num_rx_rings / ixgbe->num_rx_groups, 1);
3016 3082 /*
3017 3083 * The combination of num_rx_rings and num_rx_groups
3018 3084 * may be not supported by h/w. We need to adjust
3019 3085 * them to appropriate values.
3020 3086 */
3021 3087 ixgbe_setup_vmdq_rss_conf(ixgbe);
3022 3088 }
3023 3089
3024 3090 /*
3025 3091 * Tunable used to force an interrupt type. The only use is
3026 3092 * for testing of the lesser interrupt types.
3027 3093 * 0 = don't force interrupt type
3028 3094 * 1 = force interrupt type MSI-X
3029 3095 * 2 = force interrupt type MSI
3030 3096 * 3 = force interrupt type Legacy
3031 3097 */
3032 3098 ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE,
3033 3099 IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE);
3034 3100
3035 3101 ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE,
3036 3102 0, 1, DEFAULT_TX_HCKSUM_ENABLE);
3037 3103 ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE,
|
↓ open down ↓ |
142 lines elided |
↑ open up ↑ |
3038 3104 0, 1, DEFAULT_RX_HCKSUM_ENABLE);
3039 3105 ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE,
3040 3106 0, 1, DEFAULT_LSO_ENABLE);
3041 3107 ixgbe->lro_enable = ixgbe_get_prop(ixgbe, PROP_LRO_ENABLE,
3042 3108 0, 1, DEFAULT_LRO_ENABLE);
3043 3109 ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE,
3044 3110 0, 1, DEFAULT_TX_HEAD_WB_ENABLE);
3045 3111 ixgbe->relax_order_enable = ixgbe_get_prop(ixgbe,
3046 3112 PROP_RELAX_ORDER_ENABLE, 0, 1, DEFAULT_RELAX_ORDER_ENABLE);
3047 3113
3048 - /* Head Write Back not recommended for 82599 */
3049 - if (hw->mac.type >= ixgbe_mac_82599EB) {
3114 + /* Head Write Back not recommended for 82599 and X540 */
3115 + if (hw->mac.type == ixgbe_mac_82599EB ||
3116 + hw->mac.type == ixgbe_mac_X540) {
3050 3117 ixgbe->tx_head_wb_enable = B_FALSE;
3051 3118 }
3052 3119
3053 3120 /*
3054 3121 * ixgbe LSO needs the tx h/w checksum support.
3055 3122 * LSO will be disabled if tx h/w checksum is not
3056 3123 * enabled.
3057 3124 */
3058 3125 if (ixgbe->tx_hcksum_enable == B_FALSE) {
3059 3126 ixgbe->lso_enable = B_FALSE;
3060 3127 }
3061 3128
|
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
3062 3129 /*
3063 3130 * ixgbe LRO needs the rx h/w checksum support.
3064 3131 * LRO will be disabled if rx h/w checksum is not
3065 3132 * enabled.
3066 3133 */
3067 3134 if (ixgbe->rx_hcksum_enable == B_FALSE) {
3068 3135 ixgbe->lro_enable = B_FALSE;
3069 3136 }
3070 3137
3071 3138 /*
3072 - * ixgbe LRO only been supported by 82599 now
3139 + * ixgbe LRO only been supported by 82599 and X540 now
3073 3140 */
3074 - if (hw->mac.type != ixgbe_mac_82599EB) {
3141 + if (hw->mac.type == ixgbe_mac_82598EB) {
3075 3142 ixgbe->lro_enable = B_FALSE;
3076 3143 }
3077 3144 ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD,
3078 3145 MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD,
3079 3146 DEFAULT_TX_COPY_THRESHOLD);
3080 3147 ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe,
3081 3148 PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD,
3082 3149 MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD);
3083 3150 ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe,
3084 3151 PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD,
3085 3152 MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD);
3086 3153 ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe,
3087 3154 PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD,
3088 3155 MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD);
3089 3156
3090 3157 ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD,
3091 3158 MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD,
|
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
3092 3159 DEFAULT_RX_COPY_THRESHOLD);
3093 3160 ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR,
3094 3161 MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR,
3095 3162 DEFAULT_RX_LIMIT_PER_INTR);
3096 3163
3097 3164 ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING,
3098 3165 ixgbe->capab->min_intr_throttle,
3099 3166 ixgbe->capab->max_intr_throttle,
3100 3167 ixgbe->capab->def_intr_throttle);
3101 3168 /*
3102 - * 82599 requires the interupt throttling rate is
3169 + * 82599 and X540 require the interupt throttling rate is
3103 3170 * a multiple of 8. This is enforced by the register
3104 3171 * definiton.
3105 3172 */
3106 - if (hw->mac.type == ixgbe_mac_82599EB)
3173 + if (hw->mac.type == ixgbe_mac_82599EB || hw->mac.type == ixgbe_mac_X540)
3107 3174 ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8;
3108 3175 }
3109 3176
3110 3177 static void
3111 3178 ixgbe_init_params(ixgbe_t *ixgbe)
3112 3179 {
3113 3180 ixgbe->param_en_10000fdx_cap = 1;
3114 3181 ixgbe->param_en_1000fdx_cap = 1;
3115 3182 ixgbe->param_en_100fdx_cap = 1;
3116 3183 ixgbe->param_adv_10000fdx_cap = 1;
3117 3184 ixgbe->param_adv_1000fdx_cap = 1;
3118 3185 ixgbe->param_adv_100fdx_cap = 1;
3119 3186
3120 3187 ixgbe->param_pause_cap = 1;
3121 3188 ixgbe->param_asym_pause_cap = 1;
3122 3189 ixgbe->param_rem_fault = 0;
3123 3190
3124 3191 ixgbe->param_adv_autoneg_cap = 1;
3125 3192 ixgbe->param_adv_pause_cap = 1;
3126 3193 ixgbe->param_adv_asym_pause_cap = 1;
3127 3194 ixgbe->param_adv_rem_fault = 0;
3128 3195
3129 3196 ixgbe->param_lp_10000fdx_cap = 0;
3130 3197 ixgbe->param_lp_1000fdx_cap = 0;
3131 3198 ixgbe->param_lp_100fdx_cap = 0;
3132 3199 ixgbe->param_lp_autoneg_cap = 0;
3133 3200 ixgbe->param_lp_pause_cap = 0;
3134 3201 ixgbe->param_lp_asym_pause_cap = 0;
3135 3202 ixgbe->param_lp_rem_fault = 0;
3136 3203 }
3137 3204
3138 3205 /*
3139 3206 * ixgbe_get_prop - Get a property value out of the configuration file
3140 3207 * ixgbe.conf.
3141 3208 *
3142 3209 * Caller provides the name of the property, a default value, a minimum
3143 3210 * value, and a maximum value.
3144 3211 *
3145 3212 * Return configured value of the property, with default, minimum and
3146 3213 * maximum properly applied.
3147 3214 */
3148 3215 static int
3149 3216 ixgbe_get_prop(ixgbe_t *ixgbe,
3150 3217 char *propname, /* name of the property */
3151 3218 int minval, /* minimum acceptable value */
3152 3219 int maxval, /* maximim acceptable value */
3153 3220 int defval) /* default value */
3154 3221 {
3155 3222 int value;
3156 3223
3157 3224 /*
3158 3225 * Call ddi_prop_get_int() to read the conf settings
3159 3226 */
3160 3227 value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip,
3161 3228 DDI_PROP_DONTPASS, propname, defval);
3162 3229 if (value > maxval)
3163 3230 value = maxval;
3164 3231
3165 3232 if (value < minval)
3166 3233 value = minval;
3167 3234
3168 3235 return (value);
3169 3236 }
3170 3237
3171 3238 /*
3172 3239 * ixgbe_driver_setup_link - Using the link properties to setup the link.
3173 3240 */
3174 3241 int
3175 3242 ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw)
3176 3243 {
3177 3244 u32 autoneg_advertised = 0;
3178 3245
3179 3246 /*
3180 3247 * No half duplex support with 10Gb parts
3181 3248 */
3182 3249 if (ixgbe->param_adv_10000fdx_cap == 1)
3183 3250 autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
3184 3251
3185 3252 if (ixgbe->param_adv_1000fdx_cap == 1)
3186 3253 autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
3187 3254
3188 3255 if (ixgbe->param_adv_100fdx_cap == 1)
3189 3256 autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
3190 3257
3191 3258 if (ixgbe->param_adv_autoneg_cap == 1 && autoneg_advertised == 0) {
3192 3259 ixgbe_notice(ixgbe, "Invalid link settings. Setup link "
3193 3260 "to autonegotiation with full link capabilities.");
3194 3261
3195 3262 autoneg_advertised = IXGBE_LINK_SPEED_10GB_FULL |
3196 3263 IXGBE_LINK_SPEED_1GB_FULL |
3197 3264 IXGBE_LINK_SPEED_100_FULL;
3198 3265 }
3199 3266
3200 3267 if (setup_hw) {
3201 3268 if (ixgbe_setup_link(&ixgbe->hw, autoneg_advertised,
3202 3269 ixgbe->param_adv_autoneg_cap, B_TRUE) != IXGBE_SUCCESS) {
3203 3270 ixgbe_notice(ixgbe, "Setup link failed on this "
3204 3271 "device.");
3205 3272 return (IXGBE_FAILURE);
3206 3273 }
3207 3274 }
3208 3275
3209 3276 return (IXGBE_SUCCESS);
3210 3277 }
3211 3278
3212 3279 /*
3213 3280 * ixgbe_driver_link_check - Link status processing.
3214 3281 *
3215 3282 * This function can be called in both kernel context and interrupt context
3216 3283 */
3217 3284 static void
3218 3285 ixgbe_driver_link_check(ixgbe_t *ixgbe)
3219 3286 {
3220 3287 struct ixgbe_hw *hw = &ixgbe->hw;
3221 3288 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
|
↓ open down ↓ |
105 lines elided |
↑ open up ↑ |
3222 3289 boolean_t link_up = B_FALSE;
3223 3290 boolean_t link_changed = B_FALSE;
3224 3291
3225 3292 ASSERT(mutex_owned(&ixgbe->gen_lock));
3226 3293
3227 3294 (void) ixgbe_check_link(hw, &speed, &link_up, false);
3228 3295 if (link_up) {
3229 3296 ixgbe->link_check_complete = B_TRUE;
3230 3297
3231 3298 /* Link is up, enable flow control settings */
3232 - (void) ixgbe_fc_enable(hw, 0);
3299 + (void) ixgbe_fc_enable(hw);
3233 3300
3234 3301 /*
3235 3302 * The Link is up, check whether it was marked as down earlier
3236 3303 */
3237 3304 if (ixgbe->link_state != LINK_STATE_UP) {
3238 3305 switch (speed) {
3239 3306 case IXGBE_LINK_SPEED_10GB_FULL:
3240 3307 ixgbe->link_speed = SPEED_10GB;
3241 3308 break;
3242 3309 case IXGBE_LINK_SPEED_1GB_FULL:
3243 3310 ixgbe->link_speed = SPEED_1GB;
3244 3311 break;
3245 3312 case IXGBE_LINK_SPEED_100_FULL:
3246 3313 ixgbe->link_speed = SPEED_100;
3247 3314 }
3248 3315 ixgbe->link_duplex = LINK_DUPLEX_FULL;
3249 3316 ixgbe->link_state = LINK_STATE_UP;
3250 3317 link_changed = B_TRUE;
3251 3318 }
3252 3319 } else {
3253 3320 if (ixgbe->link_check_complete == B_TRUE ||
3254 3321 (ixgbe->link_check_complete == B_FALSE &&
3255 3322 gethrtime() >= ixgbe->link_check_hrtime)) {
3256 3323 /*
3257 3324 * The link is really down
3258 3325 */
3259 3326 ixgbe->link_check_complete = B_TRUE;
3260 3327
3261 3328 if (ixgbe->link_state != LINK_STATE_DOWN) {
3262 3329 ixgbe->link_speed = 0;
3263 3330 ixgbe->link_duplex = LINK_DUPLEX_UNKNOWN;
3264 3331 ixgbe->link_state = LINK_STATE_DOWN;
3265 3332 link_changed = B_TRUE;
3266 3333 }
3267 3334 }
3268 3335 }
3269 3336
3270 3337 /*
3271 3338 * If we are in an interrupt context, need to re-enable the
3272 3339 * interrupt, which was automasked
3273 3340 */
3274 3341 if (servicing_interrupt() != 0) {
3275 3342 ixgbe->eims |= IXGBE_EICR_LSC;
3276 3343 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3277 3344 }
3278 3345
3279 3346 if (link_changed) {
3280 3347 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
3281 3348 }
3282 3349 }
3283 3350
3284 3351 /*
3285 3352 * ixgbe_sfp_check - sfp module processing done in taskq only for 82599.
3286 3353 */
3287 3354 static void
3288 3355 ixgbe_sfp_check(void *arg)
3289 3356 {
3290 3357 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3291 3358 uint32_t eicr = ixgbe->eicr;
3292 3359 struct ixgbe_hw *hw = &ixgbe->hw;
3293 3360
3294 3361 mutex_enter(&ixgbe->gen_lock);
3295 3362 if (eicr & IXGBE_EICR_GPI_SDP1) {
3296 3363 /* clear the interrupt */
3297 3364 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
3298 3365
3299 3366 /* if link up, do multispeed fiber setup */
3300 3367 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
3301 3368 B_TRUE, B_TRUE);
3302 3369 ixgbe_driver_link_check(ixgbe);
3303 3370 ixgbe_get_hw_state(ixgbe);
3304 3371 } else if (eicr & IXGBE_EICR_GPI_SDP2) {
3305 3372 /* clear the interrupt */
3306 3373 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
3307 3374
3308 3375 /* if link up, do sfp module setup */
3309 3376 (void) hw->mac.ops.setup_sfp(hw);
3310 3377
3311 3378 /* do multispeed fiber setup */
3312 3379 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
3313 3380 B_TRUE, B_TRUE);
3314 3381 ixgbe_driver_link_check(ixgbe);
3315 3382 ixgbe_get_hw_state(ixgbe);
3316 3383 }
3317 3384 mutex_exit(&ixgbe->gen_lock);
3318 3385
3319 3386 /*
3320 3387 * We need to fully re-check the link later.
3321 3388 */
3322 3389 ixgbe->link_check_complete = B_FALSE;
3323 3390 ixgbe->link_check_hrtime = gethrtime() +
3324 3391 (IXGBE_LINK_UP_TIME * 100000000ULL);
3325 3392 }
3326 3393
3327 3394 /*
3328 3395 * ixgbe_overtemp_check - overtemp module processing done in taskq
3329 3396 *
3330 3397 * This routine will only be called on adapters with temperature sensor.
3331 3398 * The indication of over-temperature can be either SDP0 interrupt or the link
3332 3399 * status change interrupt.
3333 3400 */
3334 3401 static void
3335 3402 ixgbe_overtemp_check(void *arg)
3336 3403 {
3337 3404 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3338 3405 struct ixgbe_hw *hw = &ixgbe->hw;
3339 3406 uint32_t eicr = ixgbe->eicr;
3340 3407 ixgbe_link_speed speed;
3341 3408 boolean_t link_up;
3342 3409
3343 3410 mutex_enter(&ixgbe->gen_lock);
3344 3411
3345 3412 /* make sure we know current state of link */
3346 3413 (void) ixgbe_check_link(hw, &speed, &link_up, false);
3347 3414
3348 3415 /* check over-temp condition */
3349 3416 if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) ||
3350 3417 (eicr & IXGBE_EICR_LSC)) {
3351 3418 if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP) {
3352 3419 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP);
3353 3420
3354 3421 /*
3355 3422 * Disable the adapter interrupts
3356 3423 */
3357 3424 ixgbe_disable_adapter_interrupts(ixgbe);
3358 3425
3359 3426 /*
3360 3427 * Disable Rx/Tx units
3361 3428 */
3362 3429 (void) ixgbe_stop_adapter(hw);
3363 3430
3364 3431 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
3365 3432 ixgbe_error(ixgbe,
3366 3433 "Problem: Network adapter has been stopped "
3367 3434 "because it has overheated");
3368 3435 ixgbe_error(ixgbe,
3369 3436 "Action: Restart the computer. "
3370 3437 "If the problem persists, power off the system "
3371 3438 "and replace the adapter");
3372 3439 }
3373 3440 }
3374 3441
3375 3442 /* write to clear the interrupt */
3376 3443 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3377 3444
3378 3445 mutex_exit(&ixgbe->gen_lock);
3379 3446 }
3380 3447
3381 3448 /*
3382 3449 * ixgbe_link_timer - timer for link status detection
3383 3450 */
3384 3451 static void
3385 3452 ixgbe_link_timer(void *arg)
3386 3453 {
3387 3454 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3388 3455
3389 3456 mutex_enter(&ixgbe->gen_lock);
3390 3457 ixgbe_driver_link_check(ixgbe);
3391 3458 mutex_exit(&ixgbe->gen_lock);
3392 3459 }
3393 3460
3394 3461 /*
3395 3462 * ixgbe_local_timer - Driver watchdog function.
3396 3463 *
3397 3464 * This function will handle the transmit stall check and other routines.
3398 3465 */
3399 3466 static void
3400 3467 ixgbe_local_timer(void *arg)
3401 3468 {
3402 3469 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3403 3470
3404 3471 if (ixgbe->ixgbe_state & IXGBE_OVERTEMP)
3405 3472 goto out;
3406 3473
3407 3474 if (ixgbe->ixgbe_state & IXGBE_ERROR) {
3408 3475 ixgbe->reset_count++;
3409 3476 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
3410 3477 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
3411 3478 goto out;
3412 3479 }
3413 3480
3414 3481 if (ixgbe_stall_check(ixgbe)) {
3415 3482 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STALL);
3416 3483 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3417 3484
3418 3485 ixgbe->reset_count++;
3419 3486 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
3420 3487 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
3421 3488 }
3422 3489
3423 3490 out:
3424 3491 ixgbe_restart_watchdog_timer(ixgbe);
3425 3492 }
3426 3493
3427 3494 /*
3428 3495 * ixgbe_stall_check - Check for transmit stall.
3429 3496 *
3430 3497 * This function checks if the adapter is stalled (in transmit).
3431 3498 *
3432 3499 * It is called each time the watchdog timeout is invoked.
3433 3500 * If the transmit descriptor reclaim continuously fails,
3434 3501 * the watchdog value will increment by 1. If the watchdog
3435 3502 * value exceeds the threshold, the ixgbe is assumed to
3436 3503 * have stalled and need to be reset.
3437 3504 */
3438 3505 static boolean_t
3439 3506 ixgbe_stall_check(ixgbe_t *ixgbe)
3440 3507 {
3441 3508 ixgbe_tx_ring_t *tx_ring;
3442 3509 boolean_t result;
3443 3510 int i;
3444 3511
3445 3512 if (ixgbe->link_state != LINK_STATE_UP)
3446 3513 return (B_FALSE);
3447 3514
3448 3515 /*
3449 3516 * If any tx ring is stalled, we'll reset the chipset
3450 3517 */
3451 3518 result = B_FALSE;
3452 3519 for (i = 0; i < ixgbe->num_tx_rings; i++) {
3453 3520 tx_ring = &ixgbe->tx_rings[i];
3454 3521 if (tx_ring->tbd_free <= ixgbe->tx_recycle_thresh) {
3455 3522 tx_ring->tx_recycle(tx_ring);
3456 3523 }
3457 3524
3458 3525 if (tx_ring->recycle_fail > 0)
3459 3526 tx_ring->stall_watchdog++;
3460 3527 else
3461 3528 tx_ring->stall_watchdog = 0;
3462 3529
3463 3530 if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) {
3464 3531 result = B_TRUE;
3465 3532 break;
3466 3533 }
3467 3534 }
3468 3535
3469 3536 if (result) {
3470 3537 tx_ring->stall_watchdog = 0;
3471 3538 tx_ring->recycle_fail = 0;
3472 3539 }
3473 3540
3474 3541 return (result);
3475 3542 }
3476 3543
3477 3544
3478 3545 /*
3479 3546 * is_valid_mac_addr - Check if the mac address is valid.
3480 3547 */
3481 3548 static boolean_t
3482 3549 is_valid_mac_addr(uint8_t *mac_addr)
3483 3550 {
3484 3551 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
3485 3552 const uint8_t addr_test2[6] =
3486 3553 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3487 3554
3488 3555 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
3489 3556 !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
3490 3557 return (B_FALSE);
3491 3558
3492 3559 return (B_TRUE);
3493 3560 }
3494 3561
3495 3562 static boolean_t
3496 3563 ixgbe_find_mac_address(ixgbe_t *ixgbe)
3497 3564 {
3498 3565 #ifdef __sparc
3499 3566 struct ixgbe_hw *hw = &ixgbe->hw;
3500 3567 uchar_t *bytes;
3501 3568 struct ether_addr sysaddr;
3502 3569 uint_t nelts;
3503 3570 int err;
3504 3571 boolean_t found = B_FALSE;
3505 3572
3506 3573 /*
3507 3574 * The "vendor's factory-set address" may already have
3508 3575 * been extracted from the chip, but if the property
3509 3576 * "local-mac-address" is set we use that instead.
3510 3577 *
3511 3578 * We check whether it looks like an array of 6
3512 3579 * bytes (which it should, if OBP set it). If we can't
3513 3580 * make sense of it this way, we'll ignore it.
3514 3581 */
3515 3582 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
3516 3583 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
3517 3584 if (err == DDI_PROP_SUCCESS) {
3518 3585 if (nelts == ETHERADDRL) {
3519 3586 while (nelts--)
3520 3587 hw->mac.addr[nelts] = bytes[nelts];
3521 3588 found = B_TRUE;
3522 3589 }
3523 3590 ddi_prop_free(bytes);
3524 3591 }
3525 3592
3526 3593 /*
3527 3594 * Look up the OBP property "local-mac-address?". If the user has set
3528 3595 * 'local-mac-address? = false', use "the system address" instead.
3529 3596 */
3530 3597 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0,
3531 3598 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
3532 3599 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
3533 3600 if (localetheraddr(NULL, &sysaddr) != 0) {
3534 3601 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
3535 3602 found = B_TRUE;
3536 3603 }
3537 3604 }
3538 3605 ddi_prop_free(bytes);
3539 3606 }
3540 3607
3541 3608 /*
3542 3609 * Finally(!), if there's a valid "mac-address" property (created
3543 3610 * if we netbooted from this interface), we must use this instead
3544 3611 * of any of the above to ensure that the NFS/install server doesn't
3545 3612 * get confused by the address changing as Solaris takes over!
3546 3613 */
3547 3614 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
3548 3615 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
3549 3616 if (err == DDI_PROP_SUCCESS) {
3550 3617 if (nelts == ETHERADDRL) {
3551 3618 while (nelts--)
3552 3619 hw->mac.addr[nelts] = bytes[nelts];
3553 3620 found = B_TRUE;
3554 3621 }
3555 3622 ddi_prop_free(bytes);
3556 3623 }
3557 3624
3558 3625 if (found) {
3559 3626 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL);
3560 3627 return (B_TRUE);
3561 3628 }
3562 3629 #else
3563 3630 _NOTE(ARGUNUSED(ixgbe));
3564 3631 #endif
3565 3632
3566 3633 return (B_TRUE);
3567 3634 }
3568 3635
3569 3636 #pragma inline(ixgbe_arm_watchdog_timer)
3570 3637 static void
3571 3638 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe)
3572 3639 {
3573 3640 /*
3574 3641 * Fire a watchdog timer
3575 3642 */
3576 3643 ixgbe->watchdog_tid =
3577 3644 timeout(ixgbe_local_timer,
3578 3645 (void *)ixgbe, 1 * drv_usectohz(1000000));
3579 3646
3580 3647 }
3581 3648
3582 3649 /*
3583 3650 * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer.
3584 3651 */
3585 3652 void
3586 3653 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe)
3587 3654 {
3588 3655 mutex_enter(&ixgbe->watchdog_lock);
3589 3656
3590 3657 if (!ixgbe->watchdog_enable) {
3591 3658 ixgbe->watchdog_enable = B_TRUE;
3592 3659 ixgbe->watchdog_start = B_TRUE;
3593 3660 ixgbe_arm_watchdog_timer(ixgbe);
3594 3661 }
3595 3662
3596 3663 mutex_exit(&ixgbe->watchdog_lock);
3597 3664 }
3598 3665
3599 3666 /*
3600 3667 * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer.
3601 3668 */
3602 3669 void
3603 3670 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe)
3604 3671 {
3605 3672 timeout_id_t tid;
3606 3673
3607 3674 mutex_enter(&ixgbe->watchdog_lock);
3608 3675
3609 3676 ixgbe->watchdog_enable = B_FALSE;
3610 3677 ixgbe->watchdog_start = B_FALSE;
3611 3678 tid = ixgbe->watchdog_tid;
3612 3679 ixgbe->watchdog_tid = 0;
3613 3680
3614 3681 mutex_exit(&ixgbe->watchdog_lock);
3615 3682
3616 3683 if (tid != 0)
3617 3684 (void) untimeout(tid);
3618 3685 }
3619 3686
3620 3687 /*
3621 3688 * ixgbe_start_watchdog_timer - Start the driver watchdog timer.
3622 3689 */
3623 3690 void
3624 3691 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe)
3625 3692 {
3626 3693 mutex_enter(&ixgbe->watchdog_lock);
3627 3694
3628 3695 if (ixgbe->watchdog_enable) {
3629 3696 if (!ixgbe->watchdog_start) {
3630 3697 ixgbe->watchdog_start = B_TRUE;
3631 3698 ixgbe_arm_watchdog_timer(ixgbe);
3632 3699 }
3633 3700 }
3634 3701
3635 3702 mutex_exit(&ixgbe->watchdog_lock);
3636 3703 }
3637 3704
3638 3705 /*
3639 3706 * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer.
3640 3707 */
3641 3708 static void
3642 3709 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe)
3643 3710 {
3644 3711 mutex_enter(&ixgbe->watchdog_lock);
3645 3712
3646 3713 if (ixgbe->watchdog_start)
3647 3714 ixgbe_arm_watchdog_timer(ixgbe);
3648 3715
3649 3716 mutex_exit(&ixgbe->watchdog_lock);
3650 3717 }
3651 3718
3652 3719 /*
3653 3720 * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer.
3654 3721 */
3655 3722 void
3656 3723 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe)
3657 3724 {
3658 3725 timeout_id_t tid;
3659 3726
3660 3727 mutex_enter(&ixgbe->watchdog_lock);
3661 3728
3662 3729 ixgbe->watchdog_start = B_FALSE;
3663 3730 tid = ixgbe->watchdog_tid;
3664 3731 ixgbe->watchdog_tid = 0;
3665 3732
3666 3733 mutex_exit(&ixgbe->watchdog_lock);
3667 3734
3668 3735 if (tid != 0)
3669 3736 (void) untimeout(tid);
3670 3737 }
3671 3738
3672 3739 /*
3673 3740 * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts.
3674 3741 */
3675 3742 static void
3676 3743 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe)
3677 3744 {
3678 3745 struct ixgbe_hw *hw = &ixgbe->hw;
3679 3746
3680 3747 /*
3681 3748 * mask all interrupts off
3682 3749 */
3683 3750 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff);
3684 3751
3685 3752 /*
3686 3753 * for MSI-X, also disable autoclear
3687 3754 */
3688 3755 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
3689 3756 IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0);
3690 3757 }
3691 3758
3692 3759 IXGBE_WRITE_FLUSH(hw);
3693 3760 }
3694 3761
3695 3762 /*
3696 3763 * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts.
3697 3764 */
3698 3765 static void
3699 3766 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe)
3700 3767 {
3701 3768 struct ixgbe_hw *hw = &ixgbe->hw;
3702 3769 uint32_t eiac, eiam;
3703 3770 uint32_t gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3704 3771
3705 3772 /* interrupt types to enable */
3706 3773 ixgbe->eims = IXGBE_EIMS_ENABLE_MASK; /* shared code default */
3707 3774 ixgbe->eims &= ~IXGBE_EIMS_TCP_TIMER; /* minus tcp timer */
3708 3775 ixgbe->eims |= ixgbe->capab->other_intr; /* "other" interrupt types */
3709 3776
3710 3777 /* enable automask on "other" causes that this adapter can generate */
3711 3778 eiam = ixgbe->capab->other_intr;
3712 3779
3713 3780 /*
3714 3781 * msi-x mode
3715 3782 */
3716 3783 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
3717 3784 /* enable autoclear but not on bits 29:20 */
3718 3785 eiac = (ixgbe->eims & ~IXGBE_OTHER_INTR);
3719 3786
3720 3787 /* general purpose interrupt enable */
3721 3788 gpie |= (IXGBE_GPIE_MSIX_MODE
3722 3789 | IXGBE_GPIE_PBA_SUPPORT
3723 3790 | IXGBE_GPIE_OCD
3724 3791 | IXGBE_GPIE_EIAME);
|
↓ open down ↓ |
482 lines elided |
↑ open up ↑ |
3725 3792 /*
3726 3793 * non-msi-x mode
3727 3794 */
3728 3795 } else {
3729 3796
3730 3797 /* disable autoclear, leave gpie at default */
3731 3798 eiac = 0;
3732 3799
3733 3800 /*
3734 3801 * General purpose interrupt enable.
3735 - * For 82599, extended interrupt automask enable
3802 + * For 82599 or X540, extended interrupt automask enable
3736 3803 * only in MSI or MSI-X mode
3737 3804 */
3738 - if ((hw->mac.type < ixgbe_mac_82599EB) ||
3805 + if ((hw->mac.type == ixgbe_mac_82598EB) ||
3739 3806 (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) {
3740 3807 gpie |= IXGBE_GPIE_EIAME;
3741 3808 }
3742 3809 }
3743 3810
3744 3811 /* Enable specific "other" interrupt types */
3745 3812 switch (hw->mac.type) {
3746 3813 case ixgbe_mac_82598EB:
3747 3814 gpie |= ixgbe->capab->other_gpie;
3748 3815 break;
3749 3816
3750 3817 case ixgbe_mac_82599EB:
3818 + case ixgbe_mac_X540:
3751 3819 gpie |= ixgbe->capab->other_gpie;
3752 3820
3753 3821 /* Enable RSC Delay 8us when LRO enabled */
3754 3822 if (ixgbe->lro_enable) {
3755 3823 gpie |= (1 << IXGBE_GPIE_RSC_DELAY_SHIFT);
3756 3824 }
3757 3825 break;
3758 3826
3759 3827 default:
3760 3828 break;
3761 3829 }
3762 3830
3763 3831 /* write to interrupt control registers */
3764 3832 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3765 3833 IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac);
3766 3834 IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam);
3767 3835 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3768 3836 IXGBE_WRITE_FLUSH(hw);
3769 3837 }
3770 3838
3771 3839 /*
3772 3840 * ixgbe_loopback_ioctl - Loopback support.
3773 3841 */
3774 3842 enum ioc_reply
3775 3843 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp)
3776 3844 {
3777 3845 lb_info_sz_t *lbsp;
3778 3846 lb_property_t *lbpp;
3779 3847 uint32_t *lbmp;
3780 3848 uint32_t size;
3781 3849 uint32_t value;
3782 3850
3783 3851 if (mp->b_cont == NULL)
3784 3852 return (IOC_INVAL);
3785 3853
3786 3854 switch (iocp->ioc_cmd) {
3787 3855 default:
3788 3856 return (IOC_INVAL);
3789 3857
3790 3858 case LB_GET_INFO_SIZE:
3791 3859 size = sizeof (lb_info_sz_t);
3792 3860 if (iocp->ioc_count != size)
3793 3861 return (IOC_INVAL);
3794 3862
3795 3863 value = sizeof (lb_normal);
3796 3864 value += sizeof (lb_mac);
3797 3865 value += sizeof (lb_external);
3798 3866
3799 3867 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
3800 3868 *lbsp = value;
3801 3869 break;
3802 3870
3803 3871 case LB_GET_INFO:
3804 3872 value = sizeof (lb_normal);
3805 3873 value += sizeof (lb_mac);
3806 3874 value += sizeof (lb_external);
3807 3875
3808 3876 size = value;
3809 3877 if (iocp->ioc_count != size)
3810 3878 return (IOC_INVAL);
3811 3879
3812 3880 value = 0;
3813 3881 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
3814 3882
3815 3883 lbpp[value++] = lb_normal;
3816 3884 lbpp[value++] = lb_mac;
3817 3885 lbpp[value++] = lb_external;
3818 3886 break;
3819 3887
3820 3888 case LB_GET_MODE:
3821 3889 size = sizeof (uint32_t);
3822 3890 if (iocp->ioc_count != size)
3823 3891 return (IOC_INVAL);
3824 3892
3825 3893 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
3826 3894 *lbmp = ixgbe->loopback_mode;
3827 3895 break;
3828 3896
3829 3897 case LB_SET_MODE:
3830 3898 size = 0;
3831 3899 if (iocp->ioc_count != sizeof (uint32_t))
3832 3900 return (IOC_INVAL);
3833 3901
3834 3902 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
3835 3903 if (!ixgbe_set_loopback_mode(ixgbe, *lbmp))
3836 3904 return (IOC_INVAL);
3837 3905 break;
3838 3906 }
3839 3907
3840 3908 iocp->ioc_count = size;
3841 3909 iocp->ioc_error = 0;
3842 3910
3843 3911 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
3844 3912 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3845 3913 return (IOC_INVAL);
3846 3914 }
3847 3915
3848 3916 return (IOC_REPLY);
3849 3917 }
3850 3918
3851 3919 /*
3852 3920 * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode.
3853 3921 */
3854 3922 static boolean_t
3855 3923 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode)
3856 3924 {
3857 3925 if (mode == ixgbe->loopback_mode)
3858 3926 return (B_TRUE);
3859 3927
3860 3928 ixgbe->loopback_mode = mode;
3861 3929
3862 3930 if (mode == IXGBE_LB_NONE) {
3863 3931 /*
3864 3932 * Reset the chip
3865 3933 */
3866 3934 (void) ixgbe_reset(ixgbe);
3867 3935 return (B_TRUE);
3868 3936 }
3869 3937
3870 3938 mutex_enter(&ixgbe->gen_lock);
3871 3939
3872 3940 switch (mode) {
3873 3941 default:
3874 3942 mutex_exit(&ixgbe->gen_lock);
3875 3943 return (B_FALSE);
3876 3944
3877 3945 case IXGBE_LB_EXTERNAL:
3878 3946 break;
3879 3947
3880 3948 case IXGBE_LB_INTERNAL_MAC:
3881 3949 ixgbe_set_internal_mac_loopback(ixgbe);
3882 3950 break;
3883 3951 }
3884 3952
3885 3953 mutex_exit(&ixgbe->gen_lock);
3886 3954
3887 3955 return (B_TRUE);
3888 3956 }
3889 3957
3890 3958 /*
3891 3959 * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode.
3892 3960 */
3893 3961 static void
3894 3962 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe)
3895 3963 {
3896 3964 struct ixgbe_hw *hw;
3897 3965 uint32_t reg;
3898 3966 uint8_t atlas;
3899 3967
3900 3968 hw = &ixgbe->hw;
3901 3969
3902 3970 /*
3903 3971 * Setup MAC loopback
3904 3972 */
3905 3973 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0);
3906 3974 reg |= IXGBE_HLREG0_LPBK;
3907 3975 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg);
3908 3976
3909 3977 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
3910 3978 reg &= ~IXGBE_AUTOC_LMS_MASK;
3911 3979 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
3912 3980
3913 3981 /*
3914 3982 * Disable Atlas Tx lanes to keep packets in loopback and not on wire
3915 3983 */
3916 3984 switch (hw->mac.type) {
3917 3985 case ixgbe_mac_82598EB:
3918 3986 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
3919 3987 &atlas);
3920 3988 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
3921 3989 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
3922 3990 atlas);
3923 3991
3924 3992 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3925 3993 &atlas);
3926 3994 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
3927 3995 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3928 3996 atlas);
3929 3997
3930 3998 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
3931 3999 &atlas);
3932 4000 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
3933 4001 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
|
↓ open down ↓ |
173 lines elided |
↑ open up ↑ |
3934 4002 atlas);
3935 4003
3936 4004 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
3937 4005 &atlas);
3938 4006 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
3939 4007 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
3940 4008 atlas);
3941 4009 break;
3942 4010
3943 4011 case ixgbe_mac_82599EB:
4012 + case ixgbe_mac_X540:
3944 4013 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
3945 4014 reg |= (IXGBE_AUTOC_FLU |
3946 4015 IXGBE_AUTOC_10G_KX4);
3947 4016 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
3948 4017
3949 4018 (void) ixgbe_setup_link(&ixgbe->hw, IXGBE_LINK_SPEED_10GB_FULL,
3950 4019 B_FALSE, B_TRUE);
3951 4020 break;
3952 4021
3953 4022 default:
3954 4023 break;
3955 4024 }
3956 4025 }
3957 4026
3958 4027 #pragma inline(ixgbe_intr_rx_work)
3959 4028 /*
3960 4029 * ixgbe_intr_rx_work - RX processing of ISR.
3961 4030 */
3962 4031 static void
3963 4032 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring)
3964 4033 {
3965 4034 mblk_t *mp;
3966 4035
3967 4036 mutex_enter(&rx_ring->rx_lock);
3968 4037
3969 4038 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
3970 4039 mutex_exit(&rx_ring->rx_lock);
3971 4040
3972 4041 if (mp != NULL)
3973 4042 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
3974 4043 rx_ring->ring_gen_num);
3975 4044 }
3976 4045
3977 4046 #pragma inline(ixgbe_intr_tx_work)
3978 4047 /*
3979 4048 * ixgbe_intr_tx_work - TX processing of ISR.
3980 4049 */
3981 4050 static void
3982 4051 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring)
3983 4052 {
3984 4053 ixgbe_t *ixgbe = tx_ring->ixgbe;
3985 4054
3986 4055 /*
3987 4056 * Recycle the tx descriptors
3988 4057 */
3989 4058 tx_ring->tx_recycle(tx_ring);
3990 4059
3991 4060 /*
3992 4061 * Schedule the re-transmit
3993 4062 */
3994 4063 if (tx_ring->reschedule &&
3995 4064 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)) {
3996 4065 tx_ring->reschedule = B_FALSE;
3997 4066 mac_tx_ring_update(tx_ring->ixgbe->mac_hdl,
3998 4067 tx_ring->ring_handle);
3999 4068 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
4000 4069 }
4001 4070 }
4002 4071
4003 4072 #pragma inline(ixgbe_intr_other_work)
4004 4073 /*
4005 4074 * ixgbe_intr_other_work - Process interrupt types other than tx/rx
4006 4075 */
4007 4076 static void
4008 4077 ixgbe_intr_other_work(ixgbe_t *ixgbe, uint32_t eicr)
4009 4078 {
4010 4079 ASSERT(mutex_owned(&ixgbe->gen_lock));
4011 4080
4012 4081 /*
4013 4082 * handle link status change
4014 4083 */
4015 4084 if (eicr & IXGBE_EICR_LSC) {
4016 4085 ixgbe_driver_link_check(ixgbe);
4017 4086 ixgbe_get_hw_state(ixgbe);
4018 4087 }
4019 4088
4020 4089 /*
4021 4090 * check for fan failure on adapters with fans
4022 4091 */
4023 4092 if ((ixgbe->capab->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
4024 4093 (eicr & IXGBE_EICR_GPI_SDP1)) {
4025 4094 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP);
4026 4095
4027 4096 /*
4028 4097 * Disable the adapter interrupts
4029 4098 */
4030 4099 ixgbe_disable_adapter_interrupts(ixgbe);
4031 4100
4032 4101 /*
4033 4102 * Disable Rx/Tx units
4034 4103 */
4035 4104 (void) ixgbe_stop_adapter(&ixgbe->hw);
4036 4105
4037 4106 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
4038 4107 ixgbe_error(ixgbe,
4039 4108 "Problem: Network adapter has been stopped "
4040 4109 "because the fan has stopped.\n");
4041 4110 ixgbe_error(ixgbe,
4042 4111 "Action: Replace the adapter.\n");
4043 4112
4044 4113 /* re-enable the interrupt, which was automasked */
4045 4114 ixgbe->eims |= IXGBE_EICR_GPI_SDP1;
4046 4115 }
4047 4116
4048 4117 /*
4049 4118 * Do SFP check for adapters with hot-plug capability
4050 4119 */
4051 4120 if ((ixgbe->capab->flags & IXGBE_FLAG_SFP_PLUG_CAPABLE) &&
4052 4121 ((eicr & IXGBE_EICR_GPI_SDP1) || (eicr & IXGBE_EICR_GPI_SDP2))) {
4053 4122 ixgbe->eicr = eicr;
4054 4123 if ((ddi_taskq_dispatch(ixgbe->sfp_taskq,
4055 4124 ixgbe_sfp_check, (void *)ixgbe,
4056 4125 DDI_NOSLEEP)) != DDI_SUCCESS) {
4057 4126 ixgbe_log(ixgbe, "No memory available to dispatch "
4058 4127 "taskq for SFP check");
4059 4128 }
4060 4129 }
4061 4130
4062 4131 /*
4063 4132 * Do over-temperature check for adapters with temp sensor
4064 4133 */
4065 4134 if ((ixgbe->capab->flags & IXGBE_FLAG_TEMP_SENSOR_CAPABLE) &&
4066 4135 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
4067 4136 ixgbe->eicr = eicr;
4068 4137 if ((ddi_taskq_dispatch(ixgbe->overtemp_taskq,
4069 4138 ixgbe_overtemp_check, (void *)ixgbe,
4070 4139 DDI_NOSLEEP)) != DDI_SUCCESS) {
4071 4140 ixgbe_log(ixgbe, "No memory available to dispatch "
4072 4141 "taskq for overtemp check");
4073 4142 }
4074 4143 }
4075 4144 }
4076 4145
4077 4146 /*
4078 4147 * ixgbe_intr_legacy - Interrupt handler for legacy interrupts.
4079 4148 */
4080 4149 static uint_t
4081 4150 ixgbe_intr_legacy(void *arg1, void *arg2)
4082 4151 {
4083 4152 ixgbe_t *ixgbe = (ixgbe_t *)arg1;
4084 4153 struct ixgbe_hw *hw = &ixgbe->hw;
4085 4154 ixgbe_tx_ring_t *tx_ring;
4086 4155 ixgbe_rx_ring_t *rx_ring;
4087 4156 uint32_t eicr;
4088 4157 mblk_t *mp;
4089 4158 boolean_t tx_reschedule;
4090 4159 uint_t result;
4091 4160
4092 4161 _NOTE(ARGUNUSED(arg2));
4093 4162
4094 4163 mutex_enter(&ixgbe->gen_lock);
4095 4164 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
4096 4165 mutex_exit(&ixgbe->gen_lock);
4097 4166 return (DDI_INTR_UNCLAIMED);
4098 4167 }
4099 4168
4100 4169 mp = NULL;
4101 4170 tx_reschedule = B_FALSE;
4102 4171
4103 4172 /*
4104 4173 * Any bit set in eicr: claim this interrupt
4105 4174 */
4106 4175 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4107 4176
4108 4177 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
4109 4178 mutex_exit(&ixgbe->gen_lock);
4110 4179 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
4111 4180 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4112 4181 return (DDI_INTR_CLAIMED);
4113 4182 }
4114 4183
4115 4184 if (eicr) {
4116 4185 /*
4117 4186 * For legacy interrupt, we have only one interrupt,
4118 4187 * so we have only one rx ring and one tx ring enabled.
4119 4188 */
4120 4189 ASSERT(ixgbe->num_rx_rings == 1);
4121 4190 ASSERT(ixgbe->num_tx_rings == 1);
4122 4191
4123 4192 /*
4124 4193 * For legacy interrupt, rx rings[0] will use RTxQ[0].
4125 4194 */
4126 4195 if (eicr & 0x1) {
4127 4196 ixgbe->eimc |= IXGBE_EICR_RTX_QUEUE;
4128 4197 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4129 4198 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
4130 4199 /*
4131 4200 * Clean the rx descriptors
4132 4201 */
4133 4202 rx_ring = &ixgbe->rx_rings[0];
4134 4203 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
4135 4204 }
4136 4205
4137 4206 /*
4138 4207 * For legacy interrupt, tx rings[0] will use RTxQ[1].
4139 4208 */
4140 4209 if (eicr & 0x2) {
4141 4210 /*
4142 4211 * Recycle the tx descriptors
4143 4212 */
4144 4213 tx_ring = &ixgbe->tx_rings[0];
4145 4214 tx_ring->tx_recycle(tx_ring);
4146 4215
4147 4216 /*
4148 4217 * Schedule the re-transmit
4149 4218 */
4150 4219 tx_reschedule = (tx_ring->reschedule &&
4151 4220 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh));
|
↓ open down ↓ |
198 lines elided |
↑ open up ↑ |
4152 4221 }
4153 4222
4154 4223 /* any interrupt type other than tx/rx */
4155 4224 if (eicr & ixgbe->capab->other_intr) {
4156 4225 switch (hw->mac.type) {
4157 4226 case ixgbe_mac_82598EB:
4158 4227 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4159 4228 break;
4160 4229
4161 4230 case ixgbe_mac_82599EB:
4231 + case ixgbe_mac_X540:
4162 4232 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4163 4233 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4164 4234 break;
4165 4235
4166 4236 default:
4167 4237 break;
4168 4238 }
4169 4239 ixgbe_intr_other_work(ixgbe, eicr);
4170 4240 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4171 4241 }
4172 4242
4173 4243 mutex_exit(&ixgbe->gen_lock);
4174 4244
4175 4245 result = DDI_INTR_CLAIMED;
4176 4246 } else {
4177 4247 mutex_exit(&ixgbe->gen_lock);
4178 4248
4179 4249 /*
4180 4250 * No interrupt cause bits set: don't claim this interrupt.
4181 4251 */
4182 4252 result = DDI_INTR_UNCLAIMED;
4183 4253 }
4184 4254
4185 4255 /* re-enable the interrupts which were automasked */
4186 4256 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4187 4257
4188 4258 /*
4189 4259 * Do the following work outside of the gen_lock
4190 4260 */
4191 4261 if (mp != NULL) {
4192 4262 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
4193 4263 rx_ring->ring_gen_num);
4194 4264 }
4195 4265
4196 4266 if (tx_reschedule) {
4197 4267 tx_ring->reschedule = B_FALSE;
4198 4268 mac_tx_ring_update(ixgbe->mac_hdl, tx_ring->ring_handle);
4199 4269 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
4200 4270 }
4201 4271
4202 4272 return (result);
4203 4273 }
4204 4274
4205 4275 /*
4206 4276 * ixgbe_intr_msi - Interrupt handler for MSI.
4207 4277 */
4208 4278 static uint_t
4209 4279 ixgbe_intr_msi(void *arg1, void *arg2)
4210 4280 {
4211 4281 ixgbe_t *ixgbe = (ixgbe_t *)arg1;
4212 4282 struct ixgbe_hw *hw = &ixgbe->hw;
4213 4283 uint32_t eicr;
4214 4284
4215 4285 _NOTE(ARGUNUSED(arg2));
4216 4286
4217 4287 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4218 4288
4219 4289 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
4220 4290 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
4221 4291 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4222 4292 return (DDI_INTR_CLAIMED);
4223 4293 }
4224 4294
4225 4295 /*
4226 4296 * For MSI interrupt, we have only one vector,
4227 4297 * so we have only one rx ring and one tx ring enabled.
4228 4298 */
4229 4299 ASSERT(ixgbe->num_rx_rings == 1);
4230 4300 ASSERT(ixgbe->num_tx_rings == 1);
4231 4301
4232 4302 /*
4233 4303 * For MSI interrupt, rx rings[0] will use RTxQ[0].
4234 4304 */
4235 4305 if (eicr & 0x1) {
4236 4306 ixgbe_intr_rx_work(&ixgbe->rx_rings[0]);
4237 4307 }
4238 4308
4239 4309 /*
4240 4310 * For MSI interrupt, tx rings[0] will use RTxQ[1].
4241 4311 */
4242 4312 if (eicr & 0x2) {
4243 4313 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
4244 4314 }
|
↓ open down ↓ |
73 lines elided |
↑ open up ↑ |
4245 4315
4246 4316 /* any interrupt type other than tx/rx */
4247 4317 if (eicr & ixgbe->capab->other_intr) {
4248 4318 mutex_enter(&ixgbe->gen_lock);
4249 4319 switch (hw->mac.type) {
4250 4320 case ixgbe_mac_82598EB:
4251 4321 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4252 4322 break;
4253 4323
4254 4324 case ixgbe_mac_82599EB:
4325 + case ixgbe_mac_X540:
4255 4326 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4256 4327 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4257 4328 break;
4258 4329
4259 4330 default:
4260 4331 break;
4261 4332 }
4262 4333 ixgbe_intr_other_work(ixgbe, eicr);
4263 4334 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4264 4335 mutex_exit(&ixgbe->gen_lock);
4265 4336 }
4266 4337
4267 4338 /* re-enable the interrupts which were automasked */
4268 4339 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4269 4340
4270 4341 return (DDI_INTR_CLAIMED);
4271 4342 }
4272 4343
4273 4344 /*
4274 4345 * ixgbe_intr_msix - Interrupt handler for MSI-X.
4275 4346 */
4276 4347 static uint_t
4277 4348 ixgbe_intr_msix(void *arg1, void *arg2)
4278 4349 {
4279 4350 ixgbe_intr_vector_t *vect = (ixgbe_intr_vector_t *)arg1;
4280 4351 ixgbe_t *ixgbe = vect->ixgbe;
4281 4352 struct ixgbe_hw *hw = &ixgbe->hw;
4282 4353 uint32_t eicr;
4283 4354 int r_idx = 0;
4284 4355
4285 4356 _NOTE(ARGUNUSED(arg2));
4286 4357
4287 4358 /*
4288 4359 * Clean each rx ring that has its bit set in the map
4289 4360 */
4290 4361 r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1));
4291 4362 while (r_idx >= 0) {
4292 4363 ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]);
4293 4364 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
4294 4365 (ixgbe->num_rx_rings - 1));
4295 4366 }
4296 4367
4297 4368 /*
4298 4369 * Clean each tx ring that has its bit set in the map
4299 4370 */
4300 4371 r_idx = bt_getlowbit(vect->tx_map, 0, (ixgbe->num_tx_rings - 1));
4301 4372 while (r_idx >= 0) {
4302 4373 ixgbe_intr_tx_work(&ixgbe->tx_rings[r_idx]);
4303 4374 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
4304 4375 (ixgbe->num_tx_rings - 1));
4305 4376 }
4306 4377
4307 4378
4308 4379 /*
4309 4380 * Clean other interrupt (link change) that has its bit set in the map
4310 4381 */
4311 4382 if (BT_TEST(vect->other_map, 0) == 1) {
4312 4383 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4313 4384
4314 4385 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) !=
4315 4386 DDI_FM_OK) {
4316 4387 ddi_fm_service_impact(ixgbe->dip,
4317 4388 DDI_SERVICE_DEGRADED);
4318 4389 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4319 4390 return (DDI_INTR_CLAIMED);
4320 4391 }
4321 4392
4322 4393 /*
4323 4394 * Check "other" cause bits: any interrupt type other than tx/rx
|
↓ open down ↓ |
59 lines elided |
↑ open up ↑ |
4324 4395 */
4325 4396 if (eicr & ixgbe->capab->other_intr) {
4326 4397 mutex_enter(&ixgbe->gen_lock);
4327 4398 switch (hw->mac.type) {
4328 4399 case ixgbe_mac_82598EB:
4329 4400 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4330 4401 ixgbe_intr_other_work(ixgbe, eicr);
4331 4402 break;
4332 4403
4333 4404 case ixgbe_mac_82599EB:
4405 + case ixgbe_mac_X540:
4334 4406 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
4335 4407 ixgbe_intr_other_work(ixgbe, eicr);
4336 4408 break;
4337 4409
4338 4410 default:
4339 4411 break;
4340 4412 }
4341 4413 mutex_exit(&ixgbe->gen_lock);
4342 4414 }
4343 4415
4344 4416 /* re-enable the interrupts which were automasked */
4345 4417 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4346 4418 }
4347 4419
4348 4420 return (DDI_INTR_CLAIMED);
4349 4421 }
4350 4422
4351 4423 /*
4352 4424 * ixgbe_alloc_intrs - Allocate interrupts for the driver.
4353 4425 *
4354 4426 * Normal sequence is to try MSI-X; if not sucessful, try MSI;
4355 4427 * if not successful, try Legacy.
4356 4428 * ixgbe->intr_force can be used to force sequence to start with
4357 4429 * any of the 3 types.
4358 4430 * If MSI-X is not used, number of tx/rx rings is forced to 1.
4359 4431 */
4360 4432 static int
4361 4433 ixgbe_alloc_intrs(ixgbe_t *ixgbe)
4362 4434 {
4363 4435 dev_info_t *devinfo;
4364 4436 int intr_types;
4365 4437 int rc;
4366 4438
4367 4439 devinfo = ixgbe->dip;
4368 4440
4369 4441 /*
4370 4442 * Get supported interrupt types
4371 4443 */
4372 4444 rc = ddi_intr_get_supported_types(devinfo, &intr_types);
4373 4445
4374 4446 if (rc != DDI_SUCCESS) {
4375 4447 ixgbe_log(ixgbe,
4376 4448 "Get supported interrupt types failed: %d", rc);
4377 4449 return (IXGBE_FAILURE);
4378 4450 }
4379 4451 IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types);
4380 4452
4381 4453 ixgbe->intr_type = 0;
4382 4454
4383 4455 /*
4384 4456 * Install MSI-X interrupts
4385 4457 */
4386 4458 if ((intr_types & DDI_INTR_TYPE_MSIX) &&
4387 4459 (ixgbe->intr_force <= IXGBE_INTR_MSIX)) {
4388 4460 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX);
4389 4461 if (rc == IXGBE_SUCCESS)
4390 4462 return (IXGBE_SUCCESS);
4391 4463
4392 4464 ixgbe_log(ixgbe,
4393 4465 "Allocate MSI-X failed, trying MSI interrupts...");
4394 4466 }
4395 4467
4396 4468 /*
4397 4469 * MSI-X not used, force rings and groups to 1
4398 4470 */
4399 4471 ixgbe->num_rx_rings = 1;
4400 4472 ixgbe->num_rx_groups = 1;
4401 4473 ixgbe->num_tx_rings = 1;
4402 4474 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
4403 4475 ixgbe_log(ixgbe,
4404 4476 "MSI-X not used, force rings and groups number to 1");
4405 4477
4406 4478 /*
4407 4479 * Install MSI interrupts
4408 4480 */
4409 4481 if ((intr_types & DDI_INTR_TYPE_MSI) &&
4410 4482 (ixgbe->intr_force <= IXGBE_INTR_MSI)) {
4411 4483 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI);
4412 4484 if (rc == IXGBE_SUCCESS)
4413 4485 return (IXGBE_SUCCESS);
4414 4486
4415 4487 ixgbe_log(ixgbe,
4416 4488 "Allocate MSI failed, trying Legacy interrupts...");
4417 4489 }
4418 4490
4419 4491 /*
4420 4492 * Install legacy interrupts
4421 4493 */
4422 4494 if (intr_types & DDI_INTR_TYPE_FIXED) {
4423 4495 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED);
4424 4496 if (rc == IXGBE_SUCCESS)
4425 4497 return (IXGBE_SUCCESS);
4426 4498
4427 4499 ixgbe_log(ixgbe,
4428 4500 "Allocate Legacy interrupts failed");
4429 4501 }
4430 4502
4431 4503 /*
4432 4504 * If none of the 3 types succeeded, return failure
4433 4505 */
4434 4506 return (IXGBE_FAILURE);
4435 4507 }
4436 4508
4437 4509 /*
4438 4510 * ixgbe_alloc_intr_handles - Allocate interrupt handles.
4439 4511 *
4440 4512 * For legacy and MSI, only 1 handle is needed. For MSI-X,
4441 4513 * if fewer than 2 handles are available, return failure.
4442 4514 * Upon success, this maps the vectors to rx and tx rings for
4443 4515 * interrupts.
4444 4516 */
4445 4517 static int
4446 4518 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type)
4447 4519 {
4448 4520 dev_info_t *devinfo;
4449 4521 int request, count, actual;
4450 4522 int minimum;
4451 4523 int rc;
4452 4524 uint32_t ring_per_group;
4453 4525
4454 4526 devinfo = ixgbe->dip;
4455 4527
4456 4528 switch (intr_type) {
4457 4529 case DDI_INTR_TYPE_FIXED:
4458 4530 request = 1; /* Request 1 legacy interrupt handle */
4459 4531 minimum = 1;
4460 4532 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy");
4461 4533 break;
4462 4534
4463 4535 case DDI_INTR_TYPE_MSI:
4464 4536 request = 1; /* Request 1 MSI interrupt handle */
4465 4537 minimum = 1;
4466 4538 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI");
4467 4539 break;
4468 4540
4469 4541 case DDI_INTR_TYPE_MSIX:
4470 4542 /*
4471 4543 * Best number of vectors for the adapter is
4472 4544 * (# rx rings + # tx rings), however we will
4473 4545 * limit the request number.
4474 4546 */
4475 4547 request = min(16, ixgbe->num_rx_rings + ixgbe->num_tx_rings);
4476 4548 if (request > ixgbe->capab->max_ring_vect)
4477 4549 request = ixgbe->capab->max_ring_vect;
4478 4550 minimum = 1;
4479 4551 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X");
4480 4552 break;
4481 4553
4482 4554 default:
4483 4555 ixgbe_log(ixgbe,
4484 4556 "invalid call to ixgbe_alloc_intr_handles(): %d\n",
4485 4557 intr_type);
4486 4558 return (IXGBE_FAILURE);
4487 4559 }
4488 4560 IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d minimum: %d",
4489 4561 request, minimum);
4490 4562
4491 4563 /*
4492 4564 * Get number of supported interrupts
4493 4565 */
4494 4566 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
4495 4567 if ((rc != DDI_SUCCESS) || (count < minimum)) {
4496 4568 ixgbe_log(ixgbe,
4497 4569 "Get interrupt number failed. Return: %d, count: %d",
4498 4570 rc, count);
4499 4571 return (IXGBE_FAILURE);
4500 4572 }
4501 4573 IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count);
4502 4574
4503 4575 actual = 0;
4504 4576 ixgbe->intr_cnt = 0;
4505 4577 ixgbe->intr_cnt_max = 0;
4506 4578 ixgbe->intr_cnt_min = 0;
4507 4579
4508 4580 /*
4509 4581 * Allocate an array of interrupt handles
4510 4582 */
4511 4583 ixgbe->intr_size = request * sizeof (ddi_intr_handle_t);
4512 4584 ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP);
4513 4585
4514 4586 rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0,
4515 4587 request, &actual, DDI_INTR_ALLOC_NORMAL);
4516 4588 if (rc != DDI_SUCCESS) {
4517 4589 ixgbe_log(ixgbe, "Allocate interrupts failed. "
4518 4590 "return: %d, request: %d, actual: %d",
4519 4591 rc, request, actual);
4520 4592 goto alloc_handle_fail;
4521 4593 }
4522 4594 IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual);
4523 4595
4524 4596 /*
4525 4597 * upper/lower limit of interrupts
4526 4598 */
4527 4599 ixgbe->intr_cnt = actual;
4528 4600 ixgbe->intr_cnt_max = request;
4529 4601 ixgbe->intr_cnt_min = minimum;
4530 4602
4531 4603 /*
4532 4604 * rss number per group should not exceed the rx interrupt number,
4533 4605 * else need to adjust rx ring number.
4534 4606 */
4535 4607 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
4536 4608 ASSERT((ixgbe->num_rx_rings % ixgbe->num_rx_groups) == 0);
4537 4609 if (actual < ring_per_group) {
4538 4610 ixgbe->num_rx_rings = ixgbe->num_rx_groups * actual;
4539 4611 ixgbe_setup_vmdq_rss_conf(ixgbe);
4540 4612 }
4541 4613
4542 4614 /*
4543 4615 * Now we know the actual number of vectors. Here we map the vector
4544 4616 * to other, rx rings and tx ring.
4545 4617 */
4546 4618 if (actual < minimum) {
4547 4619 ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d",
4548 4620 actual);
4549 4621 goto alloc_handle_fail;
4550 4622 }
4551 4623
4552 4624 /*
4553 4625 * Get priority for first vector, assume remaining are all the same
4554 4626 */
4555 4627 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
4556 4628 if (rc != DDI_SUCCESS) {
4557 4629 ixgbe_log(ixgbe,
4558 4630 "Get interrupt priority failed: %d", rc);
4559 4631 goto alloc_handle_fail;
4560 4632 }
4561 4633
4562 4634 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
4563 4635 if (rc != DDI_SUCCESS) {
4564 4636 ixgbe_log(ixgbe,
4565 4637 "Get interrupt cap failed: %d", rc);
4566 4638 goto alloc_handle_fail;
4567 4639 }
4568 4640
4569 4641 ixgbe->intr_type = intr_type;
4570 4642
4571 4643 return (IXGBE_SUCCESS);
4572 4644
4573 4645 alloc_handle_fail:
4574 4646 ixgbe_rem_intrs(ixgbe);
4575 4647
4576 4648 return (IXGBE_FAILURE);
4577 4649 }
4578 4650
4579 4651 /*
4580 4652 * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type.
4581 4653 *
4582 4654 * Before adding the interrupt handlers, the interrupt vectors have
4583 4655 * been allocated, and the rx/tx rings have also been allocated.
4584 4656 */
4585 4657 static int
4586 4658 ixgbe_add_intr_handlers(ixgbe_t *ixgbe)
4587 4659 {
4588 4660 int vector = 0;
4589 4661 int rc;
4590 4662
4591 4663 switch (ixgbe->intr_type) {
4592 4664 case DDI_INTR_TYPE_MSIX:
4593 4665 /*
4594 4666 * Add interrupt handler for all vectors
4595 4667 */
4596 4668 for (vector = 0; vector < ixgbe->intr_cnt; vector++) {
4597 4669 /*
4598 4670 * install pointer to vect_map[vector]
4599 4671 */
4600 4672 rc = ddi_intr_add_handler(ixgbe->htable[vector],
4601 4673 (ddi_intr_handler_t *)ixgbe_intr_msix,
4602 4674 (void *)&ixgbe->vect_map[vector], NULL);
4603 4675
4604 4676 if (rc != DDI_SUCCESS) {
4605 4677 ixgbe_log(ixgbe,
4606 4678 "Add interrupt handler failed. "
4607 4679 "return: %d, vector: %d", rc, vector);
4608 4680 for (vector--; vector >= 0; vector--) {
4609 4681 (void) ddi_intr_remove_handler(
4610 4682 ixgbe->htable[vector]);
4611 4683 }
4612 4684 return (IXGBE_FAILURE);
4613 4685 }
4614 4686 }
4615 4687
4616 4688 break;
4617 4689
4618 4690 case DDI_INTR_TYPE_MSI:
4619 4691 /*
4620 4692 * Add interrupt handlers for the only vector
4621 4693 */
4622 4694 rc = ddi_intr_add_handler(ixgbe->htable[vector],
4623 4695 (ddi_intr_handler_t *)ixgbe_intr_msi,
4624 4696 (void *)ixgbe, NULL);
4625 4697
4626 4698 if (rc != DDI_SUCCESS) {
4627 4699 ixgbe_log(ixgbe,
4628 4700 "Add MSI interrupt handler failed: %d", rc);
4629 4701 return (IXGBE_FAILURE);
4630 4702 }
4631 4703
4632 4704 break;
4633 4705
4634 4706 case DDI_INTR_TYPE_FIXED:
4635 4707 /*
4636 4708 * Add interrupt handlers for the only vector
4637 4709 */
4638 4710 rc = ddi_intr_add_handler(ixgbe->htable[vector],
4639 4711 (ddi_intr_handler_t *)ixgbe_intr_legacy,
4640 4712 (void *)ixgbe, NULL);
4641 4713
4642 4714 if (rc != DDI_SUCCESS) {
4643 4715 ixgbe_log(ixgbe,
4644 4716 "Add legacy interrupt handler failed: %d", rc);
4645 4717 return (IXGBE_FAILURE);
4646 4718 }
4647 4719
4648 4720 break;
4649 4721
4650 4722 default:
4651 4723 return (IXGBE_FAILURE);
4652 4724 }
4653 4725
4654 4726 return (IXGBE_SUCCESS);
4655 4727 }
4656 4728
4657 4729 #pragma inline(ixgbe_map_rxring_to_vector)
4658 4730 /*
4659 4731 * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector.
4660 4732 */
4661 4733 static void
4662 4734 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx)
4663 4735 {
4664 4736 /*
4665 4737 * Set bit in map
4666 4738 */
4667 4739 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
4668 4740
4669 4741 /*
4670 4742 * Count bits set
4671 4743 */
4672 4744 ixgbe->vect_map[v_idx].rxr_cnt++;
4673 4745
4674 4746 /*
4675 4747 * Remember bit position
4676 4748 */
4677 4749 ixgbe->rx_rings[r_idx].intr_vector = v_idx;
4678 4750 ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx;
4679 4751 }
4680 4752
4681 4753 #pragma inline(ixgbe_map_txring_to_vector)
4682 4754 /*
4683 4755 * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector.
4684 4756 */
4685 4757 static void
4686 4758 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx)
4687 4759 {
4688 4760 /*
4689 4761 * Set bit in map
4690 4762 */
4691 4763 BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx);
4692 4764
4693 4765 /*
4694 4766 * Count bits set
4695 4767 */
4696 4768 ixgbe->vect_map[v_idx].txr_cnt++;
4697 4769
4698 4770 /*
4699 4771 * Remember bit position
4700 4772 */
4701 4773 ixgbe->tx_rings[t_idx].intr_vector = v_idx;
4702 4774 ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx;
4703 4775 }
4704 4776
4705 4777 /*
4706 4778 * ixgbe_setup_ivar - Set the given entry in the given interrupt vector
4707 4779 * allocation register (IVAR).
4708 4780 * cause:
4709 4781 * -1 : other cause
4710 4782 * 0 : rx
4711 4783 * 1 : tx
4712 4784 */
4713 4785 static void
4714 4786 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector,
4715 4787 int8_t cause)
4716 4788 {
4717 4789 struct ixgbe_hw *hw = &ixgbe->hw;
4718 4790 u32 ivar, index;
4719 4791
4720 4792 switch (hw->mac.type) {
4721 4793 case ixgbe_mac_82598EB:
4722 4794 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4723 4795 if (cause == -1) {
|
↓ open down ↓ |
380 lines elided |
↑ open up ↑ |
4724 4796 cause = 0;
4725 4797 }
4726 4798 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4727 4799 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4728 4800 ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3)));
4729 4801 ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3)));
4730 4802 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4731 4803 break;
4732 4804
4733 4805 case ixgbe_mac_82599EB:
4806 + case ixgbe_mac_X540:
4734 4807 if (cause == -1) {
4735 4808 /* other causes */
4736 4809 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4737 4810 index = (intr_alloc_entry & 1) * 8;
4738 4811 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4739 4812 ivar &= ~(0xFF << index);
4740 4813 ivar |= (msix_vector << index);
4741 4814 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4742 4815 } else {
4743 4816 /* tx or rx causes */
4744 4817 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4745 4818 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4746 4819 ivar = IXGBE_READ_REG(hw,
4747 4820 IXGBE_IVAR(intr_alloc_entry >> 1));
4748 4821 ivar &= ~(0xFF << index);
4749 4822 ivar |= (msix_vector << index);
4750 4823 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4751 4824 ivar);
4752 4825 }
4753 4826 break;
4754 4827
4755 4828 default:
4756 4829 break;
4757 4830 }
4758 4831 }
4759 4832
4760 4833 /*
4761 4834 * ixgbe_enable_ivar - Enable the given entry by setting the VAL bit of
4762 4835 * given interrupt vector allocation register (IVAR).
4763 4836 * cause:
4764 4837 * -1 : other cause
4765 4838 * 0 : rx
4766 4839 * 1 : tx
4767 4840 */
4768 4841 static void
4769 4842 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4770 4843 {
4771 4844 struct ixgbe_hw *hw = &ixgbe->hw;
4772 4845 u32 ivar, index;
4773 4846
4774 4847 switch (hw->mac.type) {
4775 4848 case ixgbe_mac_82598EB:
4776 4849 if (cause == -1) {
|
↓ open down ↓ |
33 lines elided |
↑ open up ↑ |
4777 4850 cause = 0;
4778 4851 }
4779 4852 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4780 4853 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4781 4854 ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 *
4782 4855 (intr_alloc_entry & 0x3)));
4783 4856 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4784 4857 break;
4785 4858
4786 4859 case ixgbe_mac_82599EB:
4860 + case ixgbe_mac_X540:
4787 4861 if (cause == -1) {
4788 4862 /* other causes */
4789 4863 index = (intr_alloc_entry & 1) * 8;
4790 4864 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4791 4865 ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4792 4866 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4793 4867 } else {
4794 4868 /* tx or rx causes */
4795 4869 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4796 4870 ivar = IXGBE_READ_REG(hw,
4797 4871 IXGBE_IVAR(intr_alloc_entry >> 1));
4798 4872 ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4799 4873 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4800 4874 ivar);
4801 4875 }
4802 4876 break;
4803 4877
4804 4878 default:
4805 4879 break;
4806 4880 }
4807 4881 }
4808 4882
4809 4883 /*
4810 4884 * ixgbe_disable_ivar - Disble the given entry by clearing the VAL bit of
4811 4885 * given interrupt vector allocation register (IVAR).
4812 4886 * cause:
4813 4887 * -1 : other cause
4814 4888 * 0 : rx
4815 4889 * 1 : tx
4816 4890 */
4817 4891 static void
4818 4892 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4819 4893 {
4820 4894 struct ixgbe_hw *hw = &ixgbe->hw;
4821 4895 u32 ivar, index;
4822 4896
4823 4897 switch (hw->mac.type) {
4824 4898 case ixgbe_mac_82598EB:
4825 4899 if (cause == -1) {
|
↓ open down ↓ |
29 lines elided |
↑ open up ↑ |
4826 4900 cause = 0;
4827 4901 }
4828 4902 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4829 4903 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4830 4904 ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 *
4831 4905 (intr_alloc_entry & 0x3)));
4832 4906 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4833 4907 break;
4834 4908
4835 4909 case ixgbe_mac_82599EB:
4910 + case ixgbe_mac_X540:
4836 4911 if (cause == -1) {
4837 4912 /* other causes */
4838 4913 index = (intr_alloc_entry & 1) * 8;
4839 4914 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4840 4915 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4841 4916 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4842 4917 } else {
4843 4918 /* tx or rx causes */
4844 4919 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4845 4920 ivar = IXGBE_READ_REG(hw,
4846 4921 IXGBE_IVAR(intr_alloc_entry >> 1));
4847 4922 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4848 4923 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4849 4924 ivar);
4850 4925 }
4851 4926 break;
4852 4927
4853 4928 default:
4854 4929 break;
4855 4930 }
4856 4931 }
4857 4932
4858 4933 /*
4859 4934 * Convert the rx ring index driver maintained to the rx ring index
4860 4935 * in h/w.
4861 4936 */
4862 4937 static uint32_t
4863 4938 ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index)
4864 4939 {
4865 4940
4866 4941 struct ixgbe_hw *hw = &ixgbe->hw;
4867 4942 uint32_t rx_ring_per_group, hw_rx_index;
|
↓ open down ↓ |
22 lines elided |
↑ open up ↑ |
4868 4943
4869 4944 if (ixgbe->classify_mode == IXGBE_CLASSIFY_RSS ||
4870 4945 ixgbe->classify_mode == IXGBE_CLASSIFY_NONE) {
4871 4946 return (sw_rx_index);
4872 4947 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ) {
4873 4948 switch (hw->mac.type) {
4874 4949 case ixgbe_mac_82598EB:
4875 4950 return (sw_rx_index);
4876 4951
4877 4952 case ixgbe_mac_82599EB:
4953 + case ixgbe_mac_X540:
4878 4954 return (sw_rx_index * 2);
4879 4955
4880 4956 default:
4881 4957 break;
4882 4958 }
4883 4959 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) {
4884 4960 rx_ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
4885 4961
4886 4962 switch (hw->mac.type) {
4887 4963 case ixgbe_mac_82598EB:
4888 4964 hw_rx_index = (sw_rx_index / rx_ring_per_group) *
4889 4965 16 + (sw_rx_index % rx_ring_per_group);
4890 4966 return (hw_rx_index);
4891 4967
4892 4968 case ixgbe_mac_82599EB:
4969 + case ixgbe_mac_X540:
4893 4970 if (ixgbe->num_rx_groups > 32) {
4894 4971 hw_rx_index = (sw_rx_index /
4895 4972 rx_ring_per_group) * 2 +
4896 4973 (sw_rx_index % rx_ring_per_group);
4897 4974 } else {
4898 4975 hw_rx_index = (sw_rx_index /
4899 4976 rx_ring_per_group) * 4 +
4900 4977 (sw_rx_index % rx_ring_per_group);
4901 4978 }
4902 4979 return (hw_rx_index);
4903 4980
4904 4981 default:
4905 4982 break;
4906 4983 }
4907 4984 }
4908 4985
4909 4986 /*
4910 4987 * Should never reach. Just to make compiler happy.
4911 4988 */
4912 4989 return (sw_rx_index);
4913 4990 }
4914 4991
4915 4992 /*
4916 4993 * ixgbe_map_intrs_to_vectors - Map different interrupts to MSI-X vectors.
4917 4994 *
4918 4995 * For MSI-X, here will map rx interrupt, tx interrupt and other interrupt
4919 4996 * to vector[0 - (intr_cnt -1)].
4920 4997 */
4921 4998 static int
4922 4999 ixgbe_map_intrs_to_vectors(ixgbe_t *ixgbe)
4923 5000 {
4924 5001 int i, vector = 0;
4925 5002
4926 5003 /* initialize vector map */
4927 5004 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
4928 5005 for (i = 0; i < ixgbe->intr_cnt; i++) {
4929 5006 ixgbe->vect_map[i].ixgbe = ixgbe;
4930 5007 }
4931 5008
4932 5009 /*
4933 5010 * non-MSI-X case is very simple: rx rings[0] on RTxQ[0],
4934 5011 * tx rings[0] on RTxQ[1].
4935 5012 */
4936 5013 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
4937 5014 ixgbe_map_rxring_to_vector(ixgbe, 0, 0);
4938 5015 ixgbe_map_txring_to_vector(ixgbe, 0, 1);
4939 5016 return (IXGBE_SUCCESS);
4940 5017 }
4941 5018
4942 5019 /*
4943 5020 * Interrupts/vectors mapping for MSI-X
4944 5021 */
4945 5022
4946 5023 /*
4947 5024 * Map other interrupt to vector 0,
4948 5025 * Set bit in map and count the bits set.
4949 5026 */
4950 5027 BT_SET(ixgbe->vect_map[vector].other_map, 0);
4951 5028 ixgbe->vect_map[vector].other_cnt++;
4952 5029
4953 5030 /*
4954 5031 * Map rx ring interrupts to vectors
4955 5032 */
4956 5033 for (i = 0; i < ixgbe->num_rx_rings; i++) {
4957 5034 ixgbe_map_rxring_to_vector(ixgbe, i, vector);
4958 5035 vector = (vector +1) % ixgbe->intr_cnt;
4959 5036 }
4960 5037
4961 5038 /*
4962 5039 * Map tx ring interrupts to vectors
4963 5040 */
4964 5041 for (i = 0; i < ixgbe->num_tx_rings; i++) {
4965 5042 ixgbe_map_txring_to_vector(ixgbe, i, vector);
4966 5043 vector = (vector +1) % ixgbe->intr_cnt;
4967 5044 }
4968 5045
4969 5046 return (IXGBE_SUCCESS);
4970 5047 }
4971 5048
4972 5049 /*
4973 5050 * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s).
4974 5051 *
4975 5052 * This relies on ring/vector mapping already set up in the
4976 5053 * vect_map[] structures
4977 5054 */
4978 5055 static void
4979 5056 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe)
4980 5057 {
4981 5058 struct ixgbe_hw *hw = &ixgbe->hw;
4982 5059 ixgbe_intr_vector_t *vect; /* vector bitmap */
4983 5060 int r_idx; /* ring index */
4984 5061 int v_idx; /* vector index */
4985 5062 uint32_t hw_index;
4986 5063
|
↓ open down ↓ |
84 lines elided |
↑ open up ↑ |
4987 5064 /*
4988 5065 * Clear any previous entries
4989 5066 */
4990 5067 switch (hw->mac.type) {
4991 5068 case ixgbe_mac_82598EB:
4992 5069 for (v_idx = 0; v_idx < 25; v_idx++)
4993 5070 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
4994 5071 break;
4995 5072
4996 5073 case ixgbe_mac_82599EB:
5074 + case ixgbe_mac_X540:
4997 5075 for (v_idx = 0; v_idx < 64; v_idx++)
4998 5076 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
4999 5077 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0);
5000 5078 break;
5001 5079
5002 5080 default:
5003 5081 break;
5004 5082 }
5005 5083
5006 5084 /*
5007 5085 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and
5008 5086 * tx rings[0] will use RTxQ[1].
5009 5087 */
5010 5088 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
5011 5089 ixgbe_setup_ivar(ixgbe, 0, 0, 0);
5012 5090 ixgbe_setup_ivar(ixgbe, 0, 1, 1);
5013 5091 return;
5014 5092 }
5015 5093
5016 5094 /*
5017 5095 * For MSI-X interrupt, "Other" is always on vector[0].
5018 5096 */
5019 5097 ixgbe_setup_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0, -1);
5020 5098
5021 5099 /*
5022 5100 * For each interrupt vector, populate the IVAR table
5023 5101 */
5024 5102 for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) {
5025 5103 vect = &ixgbe->vect_map[v_idx];
5026 5104
5027 5105 /*
5028 5106 * For each rx ring bit set
5029 5107 */
5030 5108 r_idx = bt_getlowbit(vect->rx_map, 0,
5031 5109 (ixgbe->num_rx_rings - 1));
5032 5110
5033 5111 while (r_idx >= 0) {
5034 5112 hw_index = ixgbe->rx_rings[r_idx].hw_index;
5035 5113 ixgbe_setup_ivar(ixgbe, hw_index, v_idx, 0);
5036 5114 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
5037 5115 (ixgbe->num_rx_rings - 1));
5038 5116 }
5039 5117
5040 5118 /*
5041 5119 * For each tx ring bit set
5042 5120 */
5043 5121 r_idx = bt_getlowbit(vect->tx_map, 0,
5044 5122 (ixgbe->num_tx_rings - 1));
5045 5123
5046 5124 while (r_idx >= 0) {
5047 5125 ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 1);
5048 5126 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
5049 5127 (ixgbe->num_tx_rings - 1));
5050 5128 }
5051 5129 }
5052 5130 }
5053 5131
5054 5132 /*
5055 5133 * ixgbe_rem_intr_handlers - Remove the interrupt handlers.
5056 5134 */
5057 5135 static void
5058 5136 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe)
5059 5137 {
5060 5138 int i;
5061 5139 int rc;
5062 5140
5063 5141 for (i = 0; i < ixgbe->intr_cnt; i++) {
5064 5142 rc = ddi_intr_remove_handler(ixgbe->htable[i]);
5065 5143 if (rc != DDI_SUCCESS) {
5066 5144 IXGBE_DEBUGLOG_1(ixgbe,
5067 5145 "Remove intr handler failed: %d", rc);
5068 5146 }
5069 5147 }
5070 5148 }
5071 5149
5072 5150 /*
5073 5151 * ixgbe_rem_intrs - Remove the allocated interrupts.
5074 5152 */
5075 5153 static void
5076 5154 ixgbe_rem_intrs(ixgbe_t *ixgbe)
5077 5155 {
5078 5156 int i;
5079 5157 int rc;
5080 5158
5081 5159 for (i = 0; i < ixgbe->intr_cnt; i++) {
5082 5160 rc = ddi_intr_free(ixgbe->htable[i]);
5083 5161 if (rc != DDI_SUCCESS) {
5084 5162 IXGBE_DEBUGLOG_1(ixgbe,
5085 5163 "Free intr failed: %d", rc);
5086 5164 }
5087 5165 }
5088 5166
5089 5167 kmem_free(ixgbe->htable, ixgbe->intr_size);
5090 5168 ixgbe->htable = NULL;
5091 5169 }
5092 5170
5093 5171 /*
5094 5172 * ixgbe_enable_intrs - Enable all the ddi interrupts.
5095 5173 */
5096 5174 static int
5097 5175 ixgbe_enable_intrs(ixgbe_t *ixgbe)
5098 5176 {
5099 5177 int i;
5100 5178 int rc;
5101 5179
5102 5180 /*
5103 5181 * Enable interrupts
5104 5182 */
5105 5183 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
5106 5184 /*
5107 5185 * Call ddi_intr_block_enable() for MSI
5108 5186 */
5109 5187 rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt);
5110 5188 if (rc != DDI_SUCCESS) {
5111 5189 ixgbe_log(ixgbe,
5112 5190 "Enable block intr failed: %d", rc);
5113 5191 return (IXGBE_FAILURE);
5114 5192 }
5115 5193 } else {
5116 5194 /*
5117 5195 * Call ddi_intr_enable() for Legacy/MSI non block enable
5118 5196 */
5119 5197 for (i = 0; i < ixgbe->intr_cnt; i++) {
5120 5198 rc = ddi_intr_enable(ixgbe->htable[i]);
5121 5199 if (rc != DDI_SUCCESS) {
5122 5200 ixgbe_log(ixgbe,
5123 5201 "Enable intr failed: %d", rc);
5124 5202 return (IXGBE_FAILURE);
5125 5203 }
5126 5204 }
5127 5205 }
5128 5206
5129 5207 return (IXGBE_SUCCESS);
5130 5208 }
5131 5209
5132 5210 /*
5133 5211 * ixgbe_disable_intrs - Disable all the interrupts.
5134 5212 */
5135 5213 static int
5136 5214 ixgbe_disable_intrs(ixgbe_t *ixgbe)
5137 5215 {
5138 5216 int i;
5139 5217 int rc;
5140 5218
5141 5219 /*
5142 5220 * Disable all interrupts
5143 5221 */
5144 5222 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
5145 5223 rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt);
5146 5224 if (rc != DDI_SUCCESS) {
5147 5225 ixgbe_log(ixgbe,
5148 5226 "Disable block intr failed: %d", rc);
5149 5227 return (IXGBE_FAILURE);
5150 5228 }
5151 5229 } else {
5152 5230 for (i = 0; i < ixgbe->intr_cnt; i++) {
5153 5231 rc = ddi_intr_disable(ixgbe->htable[i]);
5154 5232 if (rc != DDI_SUCCESS) {
5155 5233 ixgbe_log(ixgbe,
5156 5234 "Disable intr failed: %d", rc);
5157 5235 return (IXGBE_FAILURE);
5158 5236 }
5159 5237 }
5160 5238 }
5161 5239
5162 5240 return (IXGBE_SUCCESS);
5163 5241 }
5164 5242
5165 5243 /*
5166 5244 * ixgbe_get_hw_state - Get and save parameters related to adapter hardware.
5167 5245 */
5168 5246 static void
5169 5247 ixgbe_get_hw_state(ixgbe_t *ixgbe)
5170 5248 {
5171 5249 struct ixgbe_hw *hw = &ixgbe->hw;
5172 5250 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
5173 5251 boolean_t link_up = B_FALSE;
5174 5252 uint32_t pcs1g_anlp = 0;
5175 5253 uint32_t pcs1g_ana = 0;
5176 5254 boolean_t autoneg = B_FALSE;
5177 5255
5178 5256 ASSERT(mutex_owned(&ixgbe->gen_lock));
5179 5257 ixgbe->param_lp_1000fdx_cap = 0;
5180 5258 ixgbe->param_lp_100fdx_cap = 0;
5181 5259
5182 5260 /* check for link, don't wait */
5183 5261 (void) ixgbe_check_link(hw, &speed, &link_up, false);
5184 5262 pcs1g_ana = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
5185 5263
5186 5264 if (link_up) {
5187 5265 pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
5188 5266
5189 5267 ixgbe->param_lp_1000fdx_cap =
5190 5268 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
5191 5269 ixgbe->param_lp_100fdx_cap =
5192 5270 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
5193 5271 }
5194 5272
5195 5273 (void) ixgbe_get_link_capabilities(hw, &speed, &autoneg);
5196 5274
5197 5275 ixgbe->param_adv_1000fdx_cap = ((pcs1g_ana & IXGBE_PCS1GANA_FDC) &&
5198 5276 (speed & IXGBE_LINK_SPEED_1GB_FULL)) ? 1 : 0;
5199 5277 ixgbe->param_adv_100fdx_cap = ((pcs1g_ana & IXGBE_PCS1GANA_FDC) &&
5200 5278 (speed & IXGBE_LINK_SPEED_100_FULL)) ? 1 : 0;
5201 5279 }
5202 5280
5203 5281 /*
5204 5282 * ixgbe_get_driver_control - Notify that driver is in control of device.
5205 5283 */
5206 5284 static void
5207 5285 ixgbe_get_driver_control(struct ixgbe_hw *hw)
5208 5286 {
5209 5287 uint32_t ctrl_ext;
5210 5288
5211 5289 /*
5212 5290 * Notify firmware that driver is in control of device
5213 5291 */
5214 5292 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5215 5293 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
5216 5294 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5217 5295 }
5218 5296
5219 5297 /*
5220 5298 * ixgbe_release_driver_control - Notify that driver is no longer in control
5221 5299 * of device.
5222 5300 */
5223 5301 static void
5224 5302 ixgbe_release_driver_control(struct ixgbe_hw *hw)
5225 5303 {
5226 5304 uint32_t ctrl_ext;
5227 5305
5228 5306 /*
5229 5307 * Notify firmware that driver is no longer in control of device
5230 5308 */
5231 5309 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5232 5310 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
5233 5311 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5234 5312 }
5235 5313
5236 5314 /*
5237 5315 * ixgbe_atomic_reserve - Atomic decrease operation.
5238 5316 */
5239 5317 int
5240 5318 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n)
5241 5319 {
5242 5320 uint32_t oldval;
5243 5321 uint32_t newval;
5244 5322
5245 5323 /*
5246 5324 * ATOMICALLY
5247 5325 */
5248 5326 do {
5249 5327 oldval = *count_p;
5250 5328 if (oldval < n)
5251 5329 return (-1);
5252 5330 newval = oldval - n;
5253 5331 } while (atomic_cas_32(count_p, oldval, newval) != oldval);
5254 5332
5255 5333 return (newval);
5256 5334 }
5257 5335
5258 5336 /*
5259 5337 * ixgbe_mc_table_itr - Traverse the entries in the multicast table.
5260 5338 */
5261 5339 static uint8_t *
5262 5340 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq)
5263 5341 {
5264 5342 uint8_t *addr = *upd_ptr;
5265 5343 uint8_t *new_ptr;
5266 5344
5267 5345 _NOTE(ARGUNUSED(hw));
5268 5346 _NOTE(ARGUNUSED(vmdq));
5269 5347
5270 5348 new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
5271 5349 *upd_ptr = new_ptr;
5272 5350 return (addr);
5273 5351 }
5274 5352
5275 5353 /*
5276 5354 * FMA support
5277 5355 */
5278 5356 int
5279 5357 ixgbe_check_acc_handle(ddi_acc_handle_t handle)
5280 5358 {
5281 5359 ddi_fm_error_t de;
5282 5360
5283 5361 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
5284 5362 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
5285 5363 return (de.fme_status);
5286 5364 }
5287 5365
5288 5366 int
5289 5367 ixgbe_check_dma_handle(ddi_dma_handle_t handle)
5290 5368 {
5291 5369 ddi_fm_error_t de;
5292 5370
5293 5371 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
5294 5372 return (de.fme_status);
5295 5373 }
5296 5374
5297 5375 /*
5298 5376 * ixgbe_fm_error_cb - The IO fault service error handling callback function.
5299 5377 */
5300 5378 static int
5301 5379 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
5302 5380 {
5303 5381 _NOTE(ARGUNUSED(impl_data));
5304 5382 /*
5305 5383 * as the driver can always deal with an error in any dma or
5306 5384 * access handle, we can just return the fme_status value.
5307 5385 */
5308 5386 pci_ereport_post(dip, err, NULL);
5309 5387 return (err->fme_status);
5310 5388 }
5311 5389
5312 5390 static void
5313 5391 ixgbe_fm_init(ixgbe_t *ixgbe)
5314 5392 {
5315 5393 ddi_iblock_cookie_t iblk;
5316 5394 int fma_dma_flag;
5317 5395
5318 5396 /*
5319 5397 * Only register with IO Fault Services if we have some capability
5320 5398 */
5321 5399 if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
5322 5400 ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
5323 5401 } else {
5324 5402 ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
5325 5403 }
5326 5404
5327 5405 if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
5328 5406 fma_dma_flag = 1;
5329 5407 } else {
5330 5408 fma_dma_flag = 0;
5331 5409 }
5332 5410
5333 5411 ixgbe_set_fma_flags(fma_dma_flag);
5334 5412
5335 5413 if (ixgbe->fm_capabilities) {
5336 5414
5337 5415 /*
5338 5416 * Register capabilities with IO Fault Services
5339 5417 */
5340 5418 ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk);
5341 5419
5342 5420 /*
5343 5421 * Initialize pci ereport capabilities if ereport capable
5344 5422 */
5345 5423 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
5346 5424 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5347 5425 pci_ereport_setup(ixgbe->dip);
5348 5426
5349 5427 /*
5350 5428 * Register error callback if error callback capable
5351 5429 */
5352 5430 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5353 5431 ddi_fm_handler_register(ixgbe->dip,
5354 5432 ixgbe_fm_error_cb, (void*) ixgbe);
5355 5433 }
5356 5434 }
5357 5435
5358 5436 static void
5359 5437 ixgbe_fm_fini(ixgbe_t *ixgbe)
5360 5438 {
5361 5439 /*
5362 5440 * Only unregister FMA capabilities if they are registered
5363 5441 */
5364 5442 if (ixgbe->fm_capabilities) {
5365 5443
5366 5444 /*
5367 5445 * Release any resources allocated by pci_ereport_setup()
5368 5446 */
5369 5447 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
5370 5448 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5371 5449 pci_ereport_teardown(ixgbe->dip);
5372 5450
5373 5451 /*
5374 5452 * Un-register error callback if error callback capable
5375 5453 */
5376 5454 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5377 5455 ddi_fm_handler_unregister(ixgbe->dip);
5378 5456
5379 5457 /*
5380 5458 * Unregister from IO Fault Service
5381 5459 */
5382 5460 ddi_fm_fini(ixgbe->dip);
5383 5461 }
5384 5462 }
5385 5463
5386 5464 void
5387 5465 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail)
5388 5466 {
5389 5467 uint64_t ena;
5390 5468 char buf[FM_MAX_CLASS];
5391 5469
5392 5470 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
5393 5471 ena = fm_ena_generate(0, FM_ENA_FMT1);
5394 5472 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) {
5395 5473 ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP,
5396 5474 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
5397 5475 }
5398 5476 }
5399 5477
5400 5478 static int
5401 5479 ixgbe_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
5402 5480 {
5403 5481 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)rh;
5404 5482
5405 5483 mutex_enter(&rx_ring->rx_lock);
5406 5484 rx_ring->ring_gen_num = mr_gen_num;
5407 5485 mutex_exit(&rx_ring->rx_lock);
5408 5486 return (0);
5409 5487 }
5410 5488
5411 5489 /*
5412 5490 * Get the global ring index by a ring index within a group.
5413 5491 */
5414 5492 static int
5415 5493 ixgbe_get_rx_ring_index(ixgbe_t *ixgbe, int gindex, int rindex)
5416 5494 {
5417 5495 ixgbe_rx_ring_t *rx_ring;
5418 5496 int i;
5419 5497
5420 5498 for (i = 0; i < ixgbe->num_rx_rings; i++) {
5421 5499 rx_ring = &ixgbe->rx_rings[i];
5422 5500 if (rx_ring->group_index == gindex)
5423 5501 rindex--;
5424 5502 if (rindex < 0)
5425 5503 return (i);
5426 5504 }
5427 5505
5428 5506 return (-1);
5429 5507 }
5430 5508
5431 5509 /*
5432 5510 * Callback funtion for MAC layer to register all rings.
5433 5511 */
5434 5512 /* ARGSUSED */
5435 5513 void
5436 5514 ixgbe_fill_ring(void *arg, mac_ring_type_t rtype, const int group_index,
5437 5515 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
5438 5516 {
5439 5517 ixgbe_t *ixgbe = (ixgbe_t *)arg;
5440 5518 mac_intr_t *mintr = &infop->mri_intr;
5441 5519
5442 5520 switch (rtype) {
5443 5521 case MAC_RING_TYPE_RX: {
5444 5522 /*
5445 5523 * 'index' is the ring index within the group.
5446 5524 * Need to get the global ring index by searching in groups.
5447 5525 */
5448 5526 int global_ring_index = ixgbe_get_rx_ring_index(
5449 5527 ixgbe, group_index, ring_index);
5450 5528
5451 5529 ASSERT(global_ring_index >= 0);
5452 5530
5453 5531 ixgbe_rx_ring_t *rx_ring = &ixgbe->rx_rings[global_ring_index];
5454 5532 rx_ring->ring_handle = rh;
5455 5533
5456 5534 infop->mri_driver = (mac_ring_driver_t)rx_ring;
5457 5535 infop->mri_start = ixgbe_ring_start;
5458 5536 infop->mri_stop = NULL;
5459 5537 infop->mri_poll = ixgbe_ring_rx_poll;
5460 5538 infop->mri_stat = ixgbe_rx_ring_stat;
5461 5539
5462 5540 mintr->mi_handle = (mac_intr_handle_t)rx_ring;
5463 5541 mintr->mi_enable = ixgbe_rx_ring_intr_enable;
5464 5542 mintr->mi_disable = ixgbe_rx_ring_intr_disable;
5465 5543 if (ixgbe->intr_type &
5466 5544 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
5467 5545 mintr->mi_ddi_handle =
5468 5546 ixgbe->htable[rx_ring->intr_vector];
5469 5547 }
5470 5548
5471 5549 break;
5472 5550 }
5473 5551 case MAC_RING_TYPE_TX: {
5474 5552 ASSERT(group_index == -1);
5475 5553 ASSERT(ring_index < ixgbe->num_tx_rings);
5476 5554
5477 5555 ixgbe_tx_ring_t *tx_ring = &ixgbe->tx_rings[ring_index];
5478 5556 tx_ring->ring_handle = rh;
5479 5557
5480 5558 infop->mri_driver = (mac_ring_driver_t)tx_ring;
5481 5559 infop->mri_start = NULL;
5482 5560 infop->mri_stop = NULL;
5483 5561 infop->mri_tx = ixgbe_ring_tx;
5484 5562 infop->mri_stat = ixgbe_tx_ring_stat;
5485 5563 if (ixgbe->intr_type &
5486 5564 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
5487 5565 mintr->mi_ddi_handle =
5488 5566 ixgbe->htable[tx_ring->intr_vector];
5489 5567 }
5490 5568 break;
5491 5569 }
5492 5570 default:
5493 5571 break;
5494 5572 }
5495 5573 }
5496 5574
5497 5575 /*
5498 5576 * Callback funtion for MAC layer to register all groups.
5499 5577 */
5500 5578 void
5501 5579 ixgbe_fill_group(void *arg, mac_ring_type_t rtype, const int index,
5502 5580 mac_group_info_t *infop, mac_group_handle_t gh)
5503 5581 {
5504 5582 ixgbe_t *ixgbe = (ixgbe_t *)arg;
5505 5583
5506 5584 switch (rtype) {
5507 5585 case MAC_RING_TYPE_RX: {
5508 5586 ixgbe_rx_group_t *rx_group;
5509 5587
5510 5588 rx_group = &ixgbe->rx_groups[index];
5511 5589 rx_group->group_handle = gh;
5512 5590
5513 5591 infop->mgi_driver = (mac_group_driver_t)rx_group;
5514 5592 infop->mgi_start = NULL;
5515 5593 infop->mgi_stop = NULL;
5516 5594 infop->mgi_addmac = ixgbe_addmac;
5517 5595 infop->mgi_remmac = ixgbe_remmac;
5518 5596 infop->mgi_count = (ixgbe->num_rx_rings / ixgbe->num_rx_groups);
5519 5597
5520 5598 break;
5521 5599 }
5522 5600 case MAC_RING_TYPE_TX:
5523 5601 break;
5524 5602 default:
5525 5603 break;
5526 5604 }
5527 5605 }
5528 5606
5529 5607 /*
5530 5608 * Enable interrupt on the specificed rx ring.
5531 5609 */
5532 5610 int
5533 5611 ixgbe_rx_ring_intr_enable(mac_intr_handle_t intrh)
5534 5612 {
5535 5613 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
5536 5614 ixgbe_t *ixgbe = rx_ring->ixgbe;
5537 5615 int r_idx = rx_ring->index;
5538 5616 int hw_r_idx = rx_ring->hw_index;
5539 5617 int v_idx = rx_ring->intr_vector;
5540 5618
5541 5619 mutex_enter(&ixgbe->gen_lock);
5542 5620 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) {
5543 5621 mutex_exit(&ixgbe->gen_lock);
5544 5622 /*
5545 5623 * Simply return 0.
5546 5624 * Interrupts are being adjusted. ixgbe_intr_adjust()
5547 5625 * will eventually re-enable the interrupt when it's
5548 5626 * done with the adjustment.
5549 5627 */
5550 5628 return (0);
5551 5629 }
5552 5630
5553 5631 /*
5554 5632 * To enable interrupt by setting the VAL bit of given interrupt
5555 5633 * vector allocation register (IVAR).
5556 5634 */
5557 5635 ixgbe_enable_ivar(ixgbe, hw_r_idx, 0);
5558 5636
5559 5637 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
5560 5638
5561 5639 /*
5562 5640 * Trigger a Rx interrupt on this ring
5563 5641 */
5564 5642 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_EICS, (1 << v_idx));
5565 5643 IXGBE_WRITE_FLUSH(&ixgbe->hw);
5566 5644
5567 5645 mutex_exit(&ixgbe->gen_lock);
5568 5646
5569 5647 return (0);
5570 5648 }
5571 5649
5572 5650 /*
5573 5651 * Disable interrupt on the specificed rx ring.
5574 5652 */
5575 5653 int
5576 5654 ixgbe_rx_ring_intr_disable(mac_intr_handle_t intrh)
5577 5655 {
5578 5656 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
5579 5657 ixgbe_t *ixgbe = rx_ring->ixgbe;
5580 5658 int r_idx = rx_ring->index;
5581 5659 int hw_r_idx = rx_ring->hw_index;
5582 5660 int v_idx = rx_ring->intr_vector;
5583 5661
5584 5662 mutex_enter(&ixgbe->gen_lock);
5585 5663 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) {
5586 5664 mutex_exit(&ixgbe->gen_lock);
5587 5665 /*
5588 5666 * Simply return 0.
5589 5667 * In the rare case where an interrupt is being
5590 5668 * disabled while interrupts are being adjusted,
5591 5669 * we don't fail the operation. No interrupts will
5592 5670 * be generated while they are adjusted, and
5593 5671 * ixgbe_intr_adjust() will cause the interrupts
5594 5672 * to be re-enabled once it completes. Note that
5595 5673 * in this case, packets may be delivered to the
5596 5674 * stack via interrupts before xgbe_rx_ring_intr_enable()
5597 5675 * is called again. This is acceptable since interrupt
5598 5676 * adjustment is infrequent, and the stack will be
5599 5677 * able to handle these packets.
5600 5678 */
5601 5679 return (0);
5602 5680 }
5603 5681
5604 5682 /*
5605 5683 * To disable interrupt by clearing the VAL bit of given interrupt
5606 5684 * vector allocation register (IVAR).
5607 5685 */
5608 5686 ixgbe_disable_ivar(ixgbe, hw_r_idx, 0);
5609 5687
5610 5688 BT_CLEAR(ixgbe->vect_map[v_idx].rx_map, r_idx);
5611 5689
5612 5690 mutex_exit(&ixgbe->gen_lock);
5613 5691
5614 5692 return (0);
5615 5693 }
5616 5694
5617 5695 /*
5618 5696 * Add a mac address.
5619 5697 */
5620 5698 static int
5621 5699 ixgbe_addmac(void *arg, const uint8_t *mac_addr)
5622 5700 {
5623 5701 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
5624 5702 ixgbe_t *ixgbe = rx_group->ixgbe;
5625 5703 struct ixgbe_hw *hw = &ixgbe->hw;
5626 5704 int slot, i;
5627 5705
5628 5706 mutex_enter(&ixgbe->gen_lock);
5629 5707
5630 5708 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
5631 5709 mutex_exit(&ixgbe->gen_lock);
5632 5710 return (ECANCELED);
5633 5711 }
5634 5712
5635 5713 if (ixgbe->unicst_avail == 0) {
5636 5714 /* no slots available */
5637 5715 mutex_exit(&ixgbe->gen_lock);
5638 5716 return (ENOSPC);
5639 5717 }
5640 5718
5641 5719 /*
5642 5720 * The first ixgbe->num_rx_groups slots are reserved for each respective
5643 5721 * group. The rest slots are shared by all groups. While adding a
5644 5722 * MAC address, reserved slots are firstly checked then the shared
5645 5723 * slots are searched.
5646 5724 */
5647 5725 slot = -1;
5648 5726 if (ixgbe->unicst_addr[rx_group->index].mac.set == 1) {
5649 5727 for (i = ixgbe->num_rx_groups; i < ixgbe->unicst_total; i++) {
5650 5728 if (ixgbe->unicst_addr[i].mac.set == 0) {
5651 5729 slot = i;
5652 5730 break;
5653 5731 }
5654 5732 }
5655 5733 } else {
5656 5734 slot = rx_group->index;
5657 5735 }
5658 5736
5659 5737 if (slot == -1) {
5660 5738 /* no slots available */
5661 5739 mutex_exit(&ixgbe->gen_lock);
5662 5740 return (ENOSPC);
5663 5741 }
5664 5742
5665 5743 bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
5666 5744 (void) ixgbe_set_rar(hw, slot, ixgbe->unicst_addr[slot].mac.addr,
5667 5745 rx_group->index, IXGBE_RAH_AV);
5668 5746 ixgbe->unicst_addr[slot].mac.set = 1;
5669 5747 ixgbe->unicst_addr[slot].mac.group_index = rx_group->index;
5670 5748 ixgbe->unicst_avail--;
5671 5749
5672 5750 mutex_exit(&ixgbe->gen_lock);
5673 5751
5674 5752 return (0);
5675 5753 }
5676 5754
5677 5755 /*
5678 5756 * Remove a mac address.
5679 5757 */
5680 5758 static int
5681 5759 ixgbe_remmac(void *arg, const uint8_t *mac_addr)
5682 5760 {
5683 5761 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
5684 5762 ixgbe_t *ixgbe = rx_group->ixgbe;
5685 5763 struct ixgbe_hw *hw = &ixgbe->hw;
5686 5764 int slot;
5687 5765
5688 5766 mutex_enter(&ixgbe->gen_lock);
5689 5767
5690 5768 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
5691 5769 mutex_exit(&ixgbe->gen_lock);
5692 5770 return (ECANCELED);
5693 5771 }
5694 5772
5695 5773 slot = ixgbe_unicst_find(ixgbe, mac_addr);
5696 5774 if (slot == -1) {
5697 5775 mutex_exit(&ixgbe->gen_lock);
5698 5776 return (EINVAL);
5699 5777 }
5700 5778
5701 5779 if (ixgbe->unicst_addr[slot].mac.set == 0) {
5702 5780 mutex_exit(&ixgbe->gen_lock);
5703 5781 return (EINVAL);
5704 5782 }
5705 5783
5706 5784 bzero(ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
5707 5785 (void) ixgbe_clear_rar(hw, slot);
5708 5786 ixgbe->unicst_addr[slot].mac.set = 0;
5709 5787 ixgbe->unicst_avail++;
5710 5788
5711 5789 mutex_exit(&ixgbe->gen_lock);
5712 5790
5713 5791 return (0);
5714 5792 }
|
↓ open down ↓ |
708 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX