Print this page
XXXX Intel X540 support
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/ixgbe/ixgbe_main.c
+++ new/usr/src/uts/common/io/ixgbe/ixgbe_main.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
|
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
24 24 */
25 25
26 26 /*
27 27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 + * Copyright (c) 2012, Joyent, Inc. All rights reserved.
29 + * Copyright 2012, Nexenta Systems, Inc. All rights reserved.
28 30 */
29 31
30 32 #include "ixgbe_sw.h"
31 33
32 34 static char ixgbe_ident[] = "Intel 10Gb Ethernet";
33 35 static char ixgbe_version[] = "ixgbe 1.1.7";
34 36
35 37 /*
36 38 * Local function protoypes
37 39 */
38 40 static int ixgbe_register_mac(ixgbe_t *);
39 41 static int ixgbe_identify_hardware(ixgbe_t *);
40 42 static int ixgbe_regs_map(ixgbe_t *);
41 43 static void ixgbe_init_properties(ixgbe_t *);
42 44 static int ixgbe_init_driver_settings(ixgbe_t *);
43 45 static void ixgbe_init_locks(ixgbe_t *);
44 46 static void ixgbe_destroy_locks(ixgbe_t *);
45 47 static int ixgbe_init(ixgbe_t *);
46 48 static int ixgbe_chip_start(ixgbe_t *);
47 49 static void ixgbe_chip_stop(ixgbe_t *);
48 50 static int ixgbe_reset(ixgbe_t *);
49 51 static void ixgbe_tx_clean(ixgbe_t *);
50 52 static boolean_t ixgbe_tx_drain(ixgbe_t *);
51 53 static boolean_t ixgbe_rx_drain(ixgbe_t *);
52 54 static int ixgbe_alloc_rings(ixgbe_t *);
53 55 static void ixgbe_free_rings(ixgbe_t *);
54 56 static int ixgbe_alloc_rx_data(ixgbe_t *);
55 57 static void ixgbe_free_rx_data(ixgbe_t *);
56 58 static void ixgbe_setup_rings(ixgbe_t *);
57 59 static void ixgbe_setup_rx(ixgbe_t *);
58 60 static void ixgbe_setup_tx(ixgbe_t *);
59 61 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *);
60 62 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *);
61 63 static void ixgbe_setup_rss(ixgbe_t *);
62 64 static void ixgbe_setup_vmdq(ixgbe_t *);
63 65 static void ixgbe_setup_vmdq_rss(ixgbe_t *);
64 66 static void ixgbe_init_unicst(ixgbe_t *);
65 67 static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *);
66 68 static void ixgbe_setup_multicst(ixgbe_t *);
67 69 static void ixgbe_get_hw_state(ixgbe_t *);
68 70 static void ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe);
69 71 static void ixgbe_get_conf(ixgbe_t *);
70 72 static void ixgbe_init_params(ixgbe_t *);
71 73 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int);
72 74 static void ixgbe_driver_link_check(ixgbe_t *);
73 75 static void ixgbe_sfp_check(void *);
74 76 static void ixgbe_overtemp_check(void *);
75 77 static void ixgbe_link_timer(void *);
76 78 static void ixgbe_local_timer(void *);
77 79 static void ixgbe_arm_watchdog_timer(ixgbe_t *);
78 80 static void ixgbe_restart_watchdog_timer(ixgbe_t *);
79 81 static void ixgbe_disable_adapter_interrupts(ixgbe_t *);
80 82 static void ixgbe_enable_adapter_interrupts(ixgbe_t *);
81 83 static boolean_t is_valid_mac_addr(uint8_t *);
82 84 static boolean_t ixgbe_stall_check(ixgbe_t *);
83 85 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t);
84 86 static void ixgbe_set_internal_mac_loopback(ixgbe_t *);
85 87 static boolean_t ixgbe_find_mac_address(ixgbe_t *);
86 88 static int ixgbe_alloc_intrs(ixgbe_t *);
87 89 static int ixgbe_alloc_intr_handles(ixgbe_t *, int);
88 90 static int ixgbe_add_intr_handlers(ixgbe_t *);
89 91 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int);
90 92 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int);
91 93 static void ixgbe_setup_ivar(ixgbe_t *, uint16_t, uint8_t, int8_t);
92 94 static void ixgbe_enable_ivar(ixgbe_t *, uint16_t, int8_t);
93 95 static void ixgbe_disable_ivar(ixgbe_t *, uint16_t, int8_t);
94 96 static uint32_t ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index);
95 97 static int ixgbe_map_intrs_to_vectors(ixgbe_t *);
96 98 static void ixgbe_setup_adapter_vector(ixgbe_t *);
97 99 static void ixgbe_rem_intr_handlers(ixgbe_t *);
98 100 static void ixgbe_rem_intrs(ixgbe_t *);
99 101 static int ixgbe_enable_intrs(ixgbe_t *);
100 102 static int ixgbe_disable_intrs(ixgbe_t *);
101 103 static uint_t ixgbe_intr_legacy(void *, void *);
102 104 static uint_t ixgbe_intr_msi(void *, void *);
103 105 static uint_t ixgbe_intr_msix(void *, void *);
104 106 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *);
105 107 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *);
106 108 static void ixgbe_intr_other_work(ixgbe_t *, uint32_t);
107 109 static void ixgbe_get_driver_control(struct ixgbe_hw *);
108 110 static int ixgbe_addmac(void *, const uint8_t *);
109 111 static int ixgbe_remmac(void *, const uint8_t *);
110 112 static void ixgbe_release_driver_control(struct ixgbe_hw *);
111 113
112 114 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t);
113 115 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t);
114 116 static int ixgbe_resume(dev_info_t *);
115 117 static int ixgbe_suspend(dev_info_t *);
116 118 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *);
117 119 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
118 120 static int ixgbe_cbfunc(dev_info_t *, ddi_cb_action_t, void *, void *, void *);
119 121 static int ixgbe_intr_cb_register(ixgbe_t *);
120 122 static int ixgbe_intr_adjust(ixgbe_t *, ddi_cb_action_t, int);
121 123
122 124 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
123 125 const void *impl_data);
124 126 static void ixgbe_fm_init(ixgbe_t *);
125 127 static void ixgbe_fm_fini(ixgbe_t *);
126 128
127 129 char *ixgbe_priv_props[] = {
128 130 "_tx_copy_thresh",
129 131 "_tx_recycle_thresh",
130 132 "_tx_overload_thresh",
131 133 "_tx_resched_thresh",
132 134 "_rx_copy_thresh",
133 135 "_rx_limit_per_intr",
134 136 "_intr_throttling",
135 137 "_adv_pause_cap",
136 138 "_adv_asym_pause_cap",
137 139 NULL
138 140 };
139 141
140 142 #define IXGBE_MAX_PRIV_PROPS \
141 143 (sizeof (ixgbe_priv_props) / sizeof (mac_priv_prop_t))
142 144
143 145 static struct cb_ops ixgbe_cb_ops = {
144 146 nulldev, /* cb_open */
145 147 nulldev, /* cb_close */
146 148 nodev, /* cb_strategy */
147 149 nodev, /* cb_print */
148 150 nodev, /* cb_dump */
149 151 nodev, /* cb_read */
150 152 nodev, /* cb_write */
151 153 nodev, /* cb_ioctl */
152 154 nodev, /* cb_devmap */
153 155 nodev, /* cb_mmap */
154 156 nodev, /* cb_segmap */
155 157 nochpoll, /* cb_chpoll */
156 158 ddi_prop_op, /* cb_prop_op */
157 159 NULL, /* cb_stream */
158 160 D_MP | D_HOTPLUG, /* cb_flag */
159 161 CB_REV, /* cb_rev */
160 162 nodev, /* cb_aread */
161 163 nodev /* cb_awrite */
162 164 };
163 165
164 166 static struct dev_ops ixgbe_dev_ops = {
165 167 DEVO_REV, /* devo_rev */
166 168 0, /* devo_refcnt */
167 169 NULL, /* devo_getinfo */
168 170 nulldev, /* devo_identify */
169 171 nulldev, /* devo_probe */
170 172 ixgbe_attach, /* devo_attach */
171 173 ixgbe_detach, /* devo_detach */
172 174 nodev, /* devo_reset */
173 175 &ixgbe_cb_ops, /* devo_cb_ops */
174 176 NULL, /* devo_bus_ops */
175 177 ddi_power, /* devo_power */
176 178 ddi_quiesce_not_supported, /* devo_quiesce */
177 179 };
178 180
179 181 static struct modldrv ixgbe_modldrv = {
180 182 &mod_driverops, /* Type of module. This one is a driver */
181 183 ixgbe_ident, /* Discription string */
182 184 &ixgbe_dev_ops /* driver ops */
183 185 };
184 186
185 187 static struct modlinkage ixgbe_modlinkage = {
186 188 MODREV_1, &ixgbe_modldrv, NULL
187 189 };
188 190
189 191 /*
190 192 * Access attributes for register mapping
191 193 */
192 194 ddi_device_acc_attr_t ixgbe_regs_acc_attr = {
193 195 DDI_DEVICE_ATTR_V1,
194 196 DDI_STRUCTURE_LE_ACC,
195 197 DDI_STRICTORDER_ACC,
196 198 DDI_FLAGERR_ACC
197 199 };
198 200
199 201 /*
200 202 * Loopback property
201 203 */
202 204 static lb_property_t lb_normal = {
203 205 normal, "normal", IXGBE_LB_NONE
204 206 };
205 207
206 208 static lb_property_t lb_mac = {
207 209 internal, "MAC", IXGBE_LB_INTERNAL_MAC
208 210 };
209 211
210 212 static lb_property_t lb_external = {
211 213 external, "External", IXGBE_LB_EXTERNAL
212 214 };
213 215
214 216 #define IXGBE_M_CALLBACK_FLAGS \
215 217 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
216 218
217 219 static mac_callbacks_t ixgbe_m_callbacks = {
218 220 IXGBE_M_CALLBACK_FLAGS,
219 221 ixgbe_m_stat,
220 222 ixgbe_m_start,
221 223 ixgbe_m_stop,
222 224 ixgbe_m_promisc,
223 225 ixgbe_m_multicst,
224 226 NULL,
225 227 NULL,
226 228 NULL,
227 229 ixgbe_m_ioctl,
228 230 ixgbe_m_getcapab,
229 231 NULL,
230 232 NULL,
231 233 ixgbe_m_setprop,
232 234 ixgbe_m_getprop,
233 235 ixgbe_m_propinfo
234 236 };
235 237
236 238 /*
237 239 * Initialize capabilities of each supported adapter type
238 240 */
239 241 static adapter_info_t ixgbe_82598eb_cap = {
240 242 64, /* maximum number of rx queues */
241 243 1, /* minimum number of rx queues */
242 244 64, /* default number of rx queues */
243 245 16, /* maximum number of rx groups */
244 246 1, /* minimum number of rx groups */
245 247 1, /* default number of rx groups */
246 248 32, /* maximum number of tx queues */
247 249 1, /* minimum number of tx queues */
248 250 8, /* default number of tx queues */
249 251 16366, /* maximum MTU size */
250 252 0xFFFF, /* maximum interrupt throttle rate */
251 253 0, /* minimum interrupt throttle rate */
252 254 200, /* default interrupt throttle rate */
253 255 18, /* maximum total msix vectors */
254 256 16, /* maximum number of ring vectors */
255 257 2, /* maximum number of other vectors */
256 258 IXGBE_EICR_LSC, /* "other" interrupt types handled */
257 259 0, /* "other" interrupt types enable mask */
258 260 (IXGBE_FLAG_DCA_CAPABLE /* capability flags */
259 261 | IXGBE_FLAG_RSS_CAPABLE
260 262 | IXGBE_FLAG_VMDQ_CAPABLE)
261 263 };
262 264
263 265 static adapter_info_t ixgbe_82599eb_cap = {
264 266 128, /* maximum number of rx queues */
265 267 1, /* minimum number of rx queues */
266 268 128, /* default number of rx queues */
267 269 64, /* maximum number of rx groups */
268 270 1, /* minimum number of rx groups */
269 271 1, /* default number of rx groups */
270 272 128, /* maximum number of tx queues */
271 273 1, /* minimum number of tx queues */
272 274 8, /* default number of tx queues */
273 275 15500, /* maximum MTU size */
274 276 0xFF8, /* maximum interrupt throttle rate */
275 277 0, /* minimum interrupt throttle rate */
276 278 200, /* default interrupt throttle rate */
277 279 64, /* maximum total msix vectors */
278 280 16, /* maximum number of ring vectors */
279 281 2, /* maximum number of other vectors */
280 282 (IXGBE_EICR_LSC
281 283 | IXGBE_EICR_GPI_SDP1
282 284 | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */
283 285
|
↓ open down ↓ |
246 lines elided |
↑ open up ↑ |
284 286 (IXGBE_SDP1_GPIEN
285 287 | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */
286 288
287 289 (IXGBE_FLAG_DCA_CAPABLE
288 290 | IXGBE_FLAG_RSS_CAPABLE
289 291 | IXGBE_FLAG_VMDQ_CAPABLE
290 292 | IXGBE_FLAG_RSC_CAPABLE
291 293 | IXGBE_FLAG_SFP_PLUG_CAPABLE) /* capability flags */
292 294 };
293 295
296 +static adapter_info_t ixgbe_X540_cap = {
297 + 128, /* maximum number of rx queues */
298 + 1, /* minimum number of rx queues */
299 + 128, /* default number of rx queues */
300 + 64, /* maximum number of rx groups */
301 + 1, /* minimum number of rx groups */
302 + 1, /* default number of rx groups */
303 + 128, /* maximum number of tx queues */
304 + 1, /* minimum number of tx queues */
305 + 8, /* default number of tx queues */
306 + 15500, /* maximum MTU size */
307 + 0xFF8, /* maximum interrupt throttle rate */
308 + 0, /* minimum interrupt throttle rate */
309 + 200, /* default interrupt throttle rate */
310 + 64, /* maximum total msix vectors */
311 + 16, /* maximum number of ring vectors */
312 + 2, /* maximum number of other vectors */
313 + /* XXX KEBE ASKS, Do we care about X540's SDP3? */
314 + (IXGBE_EICR_LSC
315 + | IXGBE_EICR_GPI_SDP0
316 + | IXGBE_EICR_GPI_SDP1
317 + | IXGBE_EICR_GPI_SDP2
318 + /* | IXGBE_EICR_GPI_SDP3 */), /* "other" interrupt types handled */
319 +
320 + (IXGBE_SDP0_GPIEN
321 + | IXGBE_SDP1_GPIEN
322 + /* | IXGBE_SDP2_GPIEN
323 + | IXGBE_SDP3_GPIEN */), /* "other" interrupt types enable mask */
324 +
325 + (IXGBE_FLAG_DCA_CAPABLE
326 + | IXGBE_FLAG_RSS_CAPABLE
327 + | IXGBE_FLAG_VMDQ_CAPABLE
328 + | IXGBE_FLAG_RSC_CAPABLE
329 + | IXGBE_FLAG_SFP_PLUG_CAPABLE) /* capability flags */
330 + /* XXX KEBE ASKS, SFP_PLUG capable?!? */
331 +};
332 +
294 333 /*
295 334 * Module Initialization Functions.
296 335 */
297 336
298 337 int
299 338 _init(void)
300 339 {
301 340 int status;
302 341
303 342 mac_init_ops(&ixgbe_dev_ops, MODULE_NAME);
304 343
305 344 status = mod_install(&ixgbe_modlinkage);
306 345
307 346 if (status != DDI_SUCCESS) {
308 347 mac_fini_ops(&ixgbe_dev_ops);
309 348 }
310 349
311 350 return (status);
312 351 }
313 352
314 353 int
315 354 _fini(void)
316 355 {
317 356 int status;
318 357
319 358 status = mod_remove(&ixgbe_modlinkage);
320 359
321 360 if (status == DDI_SUCCESS) {
322 361 mac_fini_ops(&ixgbe_dev_ops);
323 362 }
324 363
325 364 return (status);
326 365 }
327 366
328 367 int
329 368 _info(struct modinfo *modinfop)
330 369 {
331 370 int status;
332 371
333 372 status = mod_info(&ixgbe_modlinkage, modinfop);
334 373
335 374 return (status);
336 375 }
337 376
338 377 /*
339 378 * ixgbe_attach - Driver attach.
340 379 *
341 380 * This function is the device specific initialization entry
342 381 * point. This entry point is required and must be written.
343 382 * The DDI_ATTACH command must be provided in the attach entry
344 383 * point. When attach() is called with cmd set to DDI_ATTACH,
345 384 * all normal kernel services (such as kmem_alloc(9F)) are
346 385 * available for use by the driver.
347 386 *
348 387 * The attach() function will be called once for each instance
349 388 * of the device on the system with cmd set to DDI_ATTACH.
350 389 * Until attach() succeeds, the only driver entry points which
351 390 * may be called are open(9E) and getinfo(9E).
352 391 */
353 392 static int
354 393 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
355 394 {
356 395 ixgbe_t *ixgbe;
357 396 struct ixgbe_osdep *osdep;
358 397 struct ixgbe_hw *hw;
359 398 int instance;
360 399 char taskqname[32];
361 400
362 401 /*
363 402 * Check the command and perform corresponding operations
364 403 */
365 404 switch (cmd) {
366 405 default:
367 406 return (DDI_FAILURE);
368 407
369 408 case DDI_RESUME:
370 409 return (ixgbe_resume(devinfo));
371 410
372 411 case DDI_ATTACH:
373 412 break;
374 413 }
375 414
376 415 /* Get the device instance */
377 416 instance = ddi_get_instance(devinfo);
378 417
379 418 /* Allocate memory for the instance data structure */
380 419 ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP);
381 420
382 421 ixgbe->dip = devinfo;
383 422 ixgbe->instance = instance;
384 423
385 424 hw = &ixgbe->hw;
386 425 osdep = &ixgbe->osdep;
387 426 hw->back = osdep;
388 427 osdep->ixgbe = ixgbe;
389 428
390 429 /* Attach the instance pointer to the dev_info data structure */
391 430 ddi_set_driver_private(devinfo, ixgbe);
392 431
393 432 /*
394 433 * Initialize for fma support
395 434 */
396 435 ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, PROP_FM_CAPABLE,
397 436 0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
398 437 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
399 438 ixgbe_fm_init(ixgbe);
400 439 ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT;
401 440
402 441 /*
403 442 * Map PCI config space registers
404 443 */
405 444 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
406 445 ixgbe_error(ixgbe, "Failed to map PCI configurations");
407 446 goto attach_fail;
408 447 }
409 448 ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
410 449
411 450 /*
412 451 * Identify the chipset family
413 452 */
414 453 if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) {
415 454 ixgbe_error(ixgbe, "Failed to identify hardware");
416 455 goto attach_fail;
417 456 }
418 457
419 458 /*
420 459 * Map device registers
421 460 */
422 461 if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) {
423 462 ixgbe_error(ixgbe, "Failed to map device registers");
424 463 goto attach_fail;
425 464 }
426 465 ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
427 466
428 467 /*
429 468 * Initialize driver parameters
430 469 */
431 470 ixgbe_init_properties(ixgbe);
432 471 ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS;
433 472
434 473 /*
435 474 * Register interrupt callback
436 475 */
437 476 if (ixgbe_intr_cb_register(ixgbe) != IXGBE_SUCCESS) {
438 477 ixgbe_error(ixgbe, "Failed to register interrupt callback");
439 478 goto attach_fail;
440 479 }
441 480
442 481 /*
443 482 * Allocate interrupts
444 483 */
445 484 if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) {
446 485 ixgbe_error(ixgbe, "Failed to allocate interrupts");
447 486 goto attach_fail;
448 487 }
449 488 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
450 489
451 490 /*
452 491 * Allocate rx/tx rings based on the ring numbers.
453 492 * The actual numbers of rx/tx rings are decided by the number of
454 493 * allocated interrupt vectors, so we should allocate the rings after
455 494 * interrupts are allocated.
456 495 */
457 496 if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) {
458 497 ixgbe_error(ixgbe, "Failed to allocate rx and tx rings");
459 498 goto attach_fail;
460 499 }
461 500 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS;
462 501
463 502 /*
464 503 * Map rings to interrupt vectors
465 504 */
466 505 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) {
467 506 ixgbe_error(ixgbe, "Failed to map interrupts to vectors");
468 507 goto attach_fail;
469 508 }
470 509
471 510 /*
472 511 * Add interrupt handlers
473 512 */
474 513 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
475 514 ixgbe_error(ixgbe, "Failed to add interrupt handlers");
476 515 goto attach_fail;
477 516 }
478 517 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
479 518
480 519 /*
481 520 * Create a taskq for sfp-change
482 521 */
483 522 (void) sprintf(taskqname, "ixgbe%d_sfp_taskq", instance);
484 523 if ((ixgbe->sfp_taskq = ddi_taskq_create(devinfo, taskqname,
485 524 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
486 525 ixgbe_error(ixgbe, "sfp_taskq create failed");
487 526 goto attach_fail;
488 527 }
489 528 ixgbe->attach_progress |= ATTACH_PROGRESS_SFP_TASKQ;
490 529
491 530 /*
492 531 * Create a taskq for over-temp
493 532 */
494 533 (void) sprintf(taskqname, "ixgbe%d_overtemp_taskq", instance);
495 534 if ((ixgbe->overtemp_taskq = ddi_taskq_create(devinfo, taskqname,
496 535 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
497 536 ixgbe_error(ixgbe, "overtemp_taskq create failed");
498 537 goto attach_fail;
499 538 }
500 539 ixgbe->attach_progress |= ATTACH_PROGRESS_OVERTEMP_TASKQ;
501 540
502 541 /*
503 542 * Initialize driver parameters
504 543 */
505 544 if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) {
506 545 ixgbe_error(ixgbe, "Failed to initialize driver settings");
507 546 goto attach_fail;
508 547 }
509 548
510 549 /*
511 550 * Initialize mutexes for this device.
512 551 * Do this before enabling the interrupt handler and
513 552 * register the softint to avoid the condition where
514 553 * interrupt handler can try using uninitialized mutex.
515 554 */
516 555 ixgbe_init_locks(ixgbe);
517 556 ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS;
518 557
519 558 /*
520 559 * Initialize chipset hardware
521 560 */
522 561 if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) {
523 562 ixgbe_error(ixgbe, "Failed to initialize adapter");
524 563 goto attach_fail;
525 564 }
526 565 ixgbe->link_check_complete = B_FALSE;
527 566 ixgbe->link_check_hrtime = gethrtime() +
528 567 (IXGBE_LINK_UP_TIME * 100000000ULL);
529 568 ixgbe->attach_progress |= ATTACH_PROGRESS_INIT;
530 569
531 570 if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) {
532 571 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
533 572 goto attach_fail;
534 573 }
535 574
536 575 /*
537 576 * Initialize statistics
538 577 */
539 578 if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) {
540 579 ixgbe_error(ixgbe, "Failed to initialize statistics");
541 580 goto attach_fail;
542 581 }
543 582 ixgbe->attach_progress |= ATTACH_PROGRESS_STATS;
544 583
545 584 /*
546 585 * Register the driver to the MAC
547 586 */
548 587 if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) {
549 588 ixgbe_error(ixgbe, "Failed to register MAC");
550 589 goto attach_fail;
551 590 }
552 591 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN);
553 592 ixgbe->attach_progress |= ATTACH_PROGRESS_MAC;
554 593
555 594 ixgbe->periodic_id = ddi_periodic_add(ixgbe_link_timer, ixgbe,
556 595 IXGBE_CYCLIC_PERIOD, DDI_IPL_0);
557 596 if (ixgbe->periodic_id == 0) {
558 597 ixgbe_error(ixgbe, "Failed to add the link check timer");
559 598 goto attach_fail;
560 599 }
561 600 ixgbe->attach_progress |= ATTACH_PROGRESS_LINK_TIMER;
562 601
563 602 /*
564 603 * Now that mutex locks are initialized, and the chip is also
565 604 * initialized, enable interrupts.
566 605 */
567 606 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
568 607 ixgbe_error(ixgbe, "Failed to enable DDI interrupts");
569 608 goto attach_fail;
570 609 }
571 610 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
572 611
573 612 ixgbe_log(ixgbe, "%s, %s", ixgbe_ident, ixgbe_version);
574 613 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_INITIALIZED);
575 614
576 615 return (DDI_SUCCESS);
577 616
578 617 attach_fail:
579 618 ixgbe_unconfigure(devinfo, ixgbe);
580 619 return (DDI_FAILURE);
581 620 }
582 621
583 622 /*
584 623 * ixgbe_detach - Driver detach.
585 624 *
586 625 * The detach() function is the complement of the attach routine.
587 626 * If cmd is set to DDI_DETACH, detach() is used to remove the
588 627 * state associated with a given instance of a device node
589 628 * prior to the removal of that instance from the system.
590 629 *
591 630 * The detach() function will be called once for each instance
592 631 * of the device for which there has been a successful attach()
593 632 * once there are no longer any opens on the device.
594 633 *
595 634 * Interrupts routine are disabled, All memory allocated by this
596 635 * driver are freed.
597 636 */
598 637 static int
599 638 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
600 639 {
601 640 ixgbe_t *ixgbe;
602 641
603 642 /*
604 643 * Check detach command
605 644 */
606 645 switch (cmd) {
607 646 default:
608 647 return (DDI_FAILURE);
609 648
610 649 case DDI_SUSPEND:
611 650 return (ixgbe_suspend(devinfo));
612 651
613 652 case DDI_DETACH:
614 653 break;
615 654 }
616 655
617 656 /*
618 657 * Get the pointer to the driver private data structure
619 658 */
620 659 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
621 660 if (ixgbe == NULL)
622 661 return (DDI_FAILURE);
623 662
624 663 /*
625 664 * If the device is still running, it needs to be stopped first.
626 665 * This check is necessary because under some specific circumstances,
627 666 * the detach routine can be called without stopping the interface
628 667 * first.
629 668 */
630 669 if (ixgbe->ixgbe_state & IXGBE_STARTED) {
631 670 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED);
632 671 mutex_enter(&ixgbe->gen_lock);
633 672 ixgbe_stop(ixgbe, B_TRUE);
634 673 mutex_exit(&ixgbe->gen_lock);
635 674 /* Disable and stop the watchdog timer */
636 675 ixgbe_disable_watchdog_timer(ixgbe);
637 676 }
638 677
639 678 /*
640 679 * Check if there are still rx buffers held by the upper layer.
641 680 * If so, fail the detach.
642 681 */
643 682 if (!ixgbe_rx_drain(ixgbe))
644 683 return (DDI_FAILURE);
645 684
646 685 /*
647 686 * Do the remaining unconfigure routines
648 687 */
649 688 ixgbe_unconfigure(devinfo, ixgbe);
650 689
651 690 return (DDI_SUCCESS);
652 691 }
653 692
654 693 static void
655 694 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe)
656 695 {
657 696 /*
658 697 * Disable interrupt
659 698 */
660 699 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
661 700 (void) ixgbe_disable_intrs(ixgbe);
662 701 }
663 702
664 703 /*
665 704 * remove the link check timer
666 705 */
667 706 if (ixgbe->attach_progress & ATTACH_PROGRESS_LINK_TIMER) {
668 707 if (ixgbe->periodic_id != NULL) {
669 708 ddi_periodic_delete(ixgbe->periodic_id);
670 709 ixgbe->periodic_id = NULL;
671 710 }
672 711 }
673 712
674 713 /*
675 714 * Unregister MAC
676 715 */
677 716 if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) {
678 717 (void) mac_unregister(ixgbe->mac_hdl);
679 718 }
680 719
681 720 /*
682 721 * Free statistics
683 722 */
684 723 if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) {
685 724 kstat_delete((kstat_t *)ixgbe->ixgbe_ks);
686 725 }
687 726
688 727 /*
689 728 * Remove interrupt handlers
690 729 */
691 730 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
692 731 ixgbe_rem_intr_handlers(ixgbe);
693 732 }
694 733
695 734 /*
696 735 * Remove taskq for sfp-status-change
697 736 */
698 737 if (ixgbe->attach_progress & ATTACH_PROGRESS_SFP_TASKQ) {
699 738 ddi_taskq_destroy(ixgbe->sfp_taskq);
700 739 }
701 740
702 741 /*
703 742 * Remove taskq for over-temp
704 743 */
705 744 if (ixgbe->attach_progress & ATTACH_PROGRESS_OVERTEMP_TASKQ) {
706 745 ddi_taskq_destroy(ixgbe->overtemp_taskq);
707 746 }
708 747
709 748 /*
710 749 * Remove interrupts
711 750 */
712 751 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) {
713 752 ixgbe_rem_intrs(ixgbe);
714 753 }
715 754
716 755 /*
717 756 * Unregister interrupt callback handler
718 757 */
719 758 (void) ddi_cb_unregister(ixgbe->cb_hdl);
720 759
721 760 /*
722 761 * Remove driver properties
723 762 */
724 763 if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) {
725 764 (void) ddi_prop_remove_all(devinfo);
726 765 }
727 766
728 767 /*
729 768 * Stop the chipset
730 769 */
731 770 if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) {
732 771 mutex_enter(&ixgbe->gen_lock);
733 772 ixgbe_chip_stop(ixgbe);
734 773 mutex_exit(&ixgbe->gen_lock);
735 774 }
736 775
737 776 /*
738 777 * Free register handle
739 778 */
740 779 if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
741 780 if (ixgbe->osdep.reg_handle != NULL)
742 781 ddi_regs_map_free(&ixgbe->osdep.reg_handle);
743 782 }
744 783
745 784 /*
746 785 * Free PCI config handle
747 786 */
748 787 if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
749 788 if (ixgbe->osdep.cfg_handle != NULL)
750 789 pci_config_teardown(&ixgbe->osdep.cfg_handle);
751 790 }
752 791
753 792 /*
754 793 * Free locks
755 794 */
756 795 if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) {
757 796 ixgbe_destroy_locks(ixgbe);
758 797 }
759 798
760 799 /*
761 800 * Free the rx/tx rings
762 801 */
763 802 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) {
764 803 ixgbe_free_rings(ixgbe);
765 804 }
766 805
767 806 /*
768 807 * Unregister FMA capabilities
769 808 */
770 809 if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) {
771 810 ixgbe_fm_fini(ixgbe);
772 811 }
773 812
774 813 /*
775 814 * Free the driver data structure
776 815 */
777 816 kmem_free(ixgbe, sizeof (ixgbe_t));
778 817
779 818 ddi_set_driver_private(devinfo, NULL);
780 819 }
781 820
782 821 /*
783 822 * ixgbe_register_mac - Register the driver and its function pointers with
784 823 * the GLD interface.
785 824 */
786 825 static int
787 826 ixgbe_register_mac(ixgbe_t *ixgbe)
788 827 {
789 828 struct ixgbe_hw *hw = &ixgbe->hw;
790 829 mac_register_t *mac;
791 830 int status;
792 831
793 832 if ((mac = mac_alloc(MAC_VERSION)) == NULL)
794 833 return (IXGBE_FAILURE);
795 834
796 835 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
797 836 mac->m_driver = ixgbe;
798 837 mac->m_dip = ixgbe->dip;
799 838 mac->m_src_addr = hw->mac.addr;
800 839 mac->m_callbacks = &ixgbe_m_callbacks;
801 840 mac->m_min_sdu = 0;
802 841 mac->m_max_sdu = ixgbe->default_mtu;
803 842 mac->m_margin = VLAN_TAGSZ;
804 843 mac->m_priv_props = ixgbe_priv_props;
805 844 mac->m_v12n = MAC_VIRT_LEVEL1;
806 845
807 846 status = mac_register(mac, &ixgbe->mac_hdl);
808 847
809 848 mac_free(mac);
810 849
811 850 return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE);
812 851 }
813 852
814 853 /*
815 854 * ixgbe_identify_hardware - Identify the type of the chipset.
816 855 */
817 856 static int
818 857 ixgbe_identify_hardware(ixgbe_t *ixgbe)
819 858 {
820 859 struct ixgbe_hw *hw = &ixgbe->hw;
821 860 struct ixgbe_osdep *osdep = &ixgbe->osdep;
822 861
823 862 /*
824 863 * Get the device id
825 864 */
826 865 hw->vendor_id =
827 866 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
828 867 hw->device_id =
829 868 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
830 869 hw->revision_id =
831 870 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
832 871 hw->subsystem_device_id =
833 872 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
834 873 hw->subsystem_vendor_id =
835 874 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
836 875
837 876 /*
838 877 * Set the mac type of the adapter based on the device id
839 878 */
840 879 if (ixgbe_set_mac_type(hw) != IXGBE_SUCCESS) {
841 880 return (IXGBE_FAILURE);
842 881 }
843 882
844 883 /*
845 884 * Install adapter capabilities
846 885 */
847 886 switch (hw->mac.type) {
848 887 case ixgbe_mac_82598EB:
849 888 IXGBE_DEBUGLOG_0(ixgbe, "identify 82598 adapter\n");
850 889 ixgbe->capab = &ixgbe_82598eb_cap;
851 890
852 891 if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) {
853 892 ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
854 893 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1;
855 894 ixgbe->capab->other_gpie |= IXGBE_SDP1_GPIEN;
856 895 }
857 896 break;
858 897
859 898 case ixgbe_mac_82599EB:
|
↓ open down ↓ |
556 lines elided |
↑ open up ↑ |
860 899 IXGBE_DEBUGLOG_0(ixgbe, "identify 82599 adapter\n");
861 900 ixgbe->capab = &ixgbe_82599eb_cap;
862 901
863 902 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) {
864 903 ixgbe->capab->flags |= IXGBE_FLAG_TEMP_SENSOR_CAPABLE;
865 904 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP0;
866 905 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN;
867 906 }
868 907 break;
869 908
909 + case ixgbe_mac_X540:
910 + IXGBE_DEBUGLOG_0(ixgbe, "identify X540 adapter\n");
911 + ixgbe->capab = &ixgbe_X540_cap;
912 + /*
913 + * For now, X540 is all set in its capab structure.
914 + * As other X540 variants show up, things can change here.
915 + */
916 + break;
917 +
870 918 default:
871 919 IXGBE_DEBUGLOG_1(ixgbe,
872 920 "adapter not supported in ixgbe_identify_hardware(): %d\n",
873 921 hw->mac.type);
874 922 return (IXGBE_FAILURE);
875 923 }
876 924
877 925 return (IXGBE_SUCCESS);
878 926 }
879 927
880 928 /*
881 929 * ixgbe_regs_map - Map the device registers.
882 930 *
883 931 */
884 932 static int
885 933 ixgbe_regs_map(ixgbe_t *ixgbe)
886 934 {
887 935 dev_info_t *devinfo = ixgbe->dip;
888 936 struct ixgbe_hw *hw = &ixgbe->hw;
889 937 struct ixgbe_osdep *osdep = &ixgbe->osdep;
890 938 off_t mem_size;
891 939
892 940 /*
893 941 * First get the size of device registers to be mapped.
894 942 */
895 943 if (ddi_dev_regsize(devinfo, IXGBE_ADAPTER_REGSET, &mem_size)
896 944 != DDI_SUCCESS) {
897 945 return (IXGBE_FAILURE);
898 946 }
899 947
900 948 /*
901 949 * Call ddi_regs_map_setup() to map registers
902 950 */
903 951 if ((ddi_regs_map_setup(devinfo, IXGBE_ADAPTER_REGSET,
904 952 (caddr_t *)&hw->hw_addr, 0,
905 953 mem_size, &ixgbe_regs_acc_attr,
906 954 &osdep->reg_handle)) != DDI_SUCCESS) {
907 955 return (IXGBE_FAILURE);
908 956 }
909 957
910 958 return (IXGBE_SUCCESS);
911 959 }
912 960
913 961 /*
914 962 * ixgbe_init_properties - Initialize driver properties.
915 963 */
916 964 static void
917 965 ixgbe_init_properties(ixgbe_t *ixgbe)
918 966 {
919 967 /*
920 968 * Get conf file properties, including link settings
921 969 * jumbo frames, ring number, descriptor number, etc.
922 970 */
923 971 ixgbe_get_conf(ixgbe);
924 972
925 973 ixgbe_init_params(ixgbe);
926 974 }
927 975
928 976 /*
929 977 * ixgbe_init_driver_settings - Initialize driver settings.
930 978 *
931 979 * The settings include hardware function pointers, bus information,
932 980 * rx/tx rings settings, link state, and any other parameters that
933 981 * need to be setup during driver initialization.
934 982 */
935 983 static int
936 984 ixgbe_init_driver_settings(ixgbe_t *ixgbe)
937 985 {
938 986 struct ixgbe_hw *hw = &ixgbe->hw;
939 987 dev_info_t *devinfo = ixgbe->dip;
940 988 ixgbe_rx_ring_t *rx_ring;
941 989 ixgbe_rx_group_t *rx_group;
942 990 ixgbe_tx_ring_t *tx_ring;
943 991 uint32_t rx_size;
944 992 uint32_t tx_size;
945 993 uint32_t ring_per_group;
946 994 int i;
947 995
948 996 /*
949 997 * Initialize chipset specific hardware function pointers
950 998 */
951 999 if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) {
952 1000 return (IXGBE_FAILURE);
953 1001 }
954 1002
955 1003 /*
956 1004 * Get the system page size
957 1005 */
958 1006 ixgbe->sys_page_size = ddi_ptob(devinfo, (ulong_t)1);
959 1007
960 1008 /*
961 1009 * Set rx buffer size
962 1010 *
963 1011 * The IP header alignment room is counted in the calculation.
964 1012 * The rx buffer size is in unit of 1K that is required by the
965 1013 * chipset hardware.
966 1014 */
967 1015 rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM;
968 1016 ixgbe->rx_buf_size = ((rx_size >> 10) +
969 1017 ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
970 1018
971 1019 /*
972 1020 * Set tx buffer size
973 1021 */
974 1022 tx_size = ixgbe->max_frame_size;
975 1023 ixgbe->tx_buf_size = ((tx_size >> 10) +
976 1024 ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
977 1025
978 1026 /*
979 1027 * Initialize rx/tx rings/groups parameters
980 1028 */
981 1029 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
982 1030 for (i = 0; i < ixgbe->num_rx_rings; i++) {
983 1031 rx_ring = &ixgbe->rx_rings[i];
984 1032 rx_ring->index = i;
985 1033 rx_ring->ixgbe = ixgbe;
986 1034 rx_ring->group_index = i / ring_per_group;
987 1035 rx_ring->hw_index = ixgbe_get_hw_rx_index(ixgbe, i);
988 1036 }
989 1037
990 1038 for (i = 0; i < ixgbe->num_rx_groups; i++) {
991 1039 rx_group = &ixgbe->rx_groups[i];
992 1040 rx_group->index = i;
993 1041 rx_group->ixgbe = ixgbe;
994 1042 }
995 1043
996 1044 for (i = 0; i < ixgbe->num_tx_rings; i++) {
997 1045 tx_ring = &ixgbe->tx_rings[i];
998 1046 tx_ring->index = i;
999 1047 tx_ring->ixgbe = ixgbe;
1000 1048 if (ixgbe->tx_head_wb_enable)
1001 1049 tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb;
1002 1050 else
1003 1051 tx_ring->tx_recycle = ixgbe_tx_recycle_legacy;
1004 1052
1005 1053 tx_ring->ring_size = ixgbe->tx_ring_size;
1006 1054 tx_ring->free_list_size = ixgbe->tx_ring_size +
1007 1055 (ixgbe->tx_ring_size >> 1);
1008 1056 }
1009 1057
1010 1058 /*
1011 1059 * Initialize values of interrupt throttling rate
1012 1060 */
1013 1061 for (i = 1; i < MAX_INTR_VECTOR; i++)
1014 1062 ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0];
1015 1063
1016 1064 /*
1017 1065 * The initial link state should be "unknown"
1018 1066 */
1019 1067 ixgbe->link_state = LINK_STATE_UNKNOWN;
1020 1068
1021 1069 return (IXGBE_SUCCESS);
1022 1070 }
1023 1071
1024 1072 /*
1025 1073 * ixgbe_init_locks - Initialize locks.
1026 1074 */
1027 1075 static void
1028 1076 ixgbe_init_locks(ixgbe_t *ixgbe)
1029 1077 {
1030 1078 ixgbe_rx_ring_t *rx_ring;
1031 1079 ixgbe_tx_ring_t *tx_ring;
1032 1080 int i;
1033 1081
1034 1082 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1035 1083 rx_ring = &ixgbe->rx_rings[i];
1036 1084 mutex_init(&rx_ring->rx_lock, NULL,
1037 1085 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1038 1086 }
1039 1087
1040 1088 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1041 1089 tx_ring = &ixgbe->tx_rings[i];
1042 1090 mutex_init(&tx_ring->tx_lock, NULL,
1043 1091 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1044 1092 mutex_init(&tx_ring->recycle_lock, NULL,
1045 1093 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1046 1094 mutex_init(&tx_ring->tcb_head_lock, NULL,
1047 1095 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1048 1096 mutex_init(&tx_ring->tcb_tail_lock, NULL,
1049 1097 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1050 1098 }
1051 1099
1052 1100 mutex_init(&ixgbe->gen_lock, NULL,
1053 1101 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1054 1102
1055 1103 mutex_init(&ixgbe->watchdog_lock, NULL,
1056 1104 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1057 1105 }
1058 1106
1059 1107 /*
1060 1108 * ixgbe_destroy_locks - Destroy locks.
1061 1109 */
1062 1110 static void
1063 1111 ixgbe_destroy_locks(ixgbe_t *ixgbe)
1064 1112 {
1065 1113 ixgbe_rx_ring_t *rx_ring;
1066 1114 ixgbe_tx_ring_t *tx_ring;
1067 1115 int i;
1068 1116
1069 1117 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1070 1118 rx_ring = &ixgbe->rx_rings[i];
1071 1119 mutex_destroy(&rx_ring->rx_lock);
1072 1120 }
1073 1121
1074 1122 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1075 1123 tx_ring = &ixgbe->tx_rings[i];
1076 1124 mutex_destroy(&tx_ring->tx_lock);
1077 1125 mutex_destroy(&tx_ring->recycle_lock);
1078 1126 mutex_destroy(&tx_ring->tcb_head_lock);
1079 1127 mutex_destroy(&tx_ring->tcb_tail_lock);
1080 1128 }
1081 1129
1082 1130 mutex_destroy(&ixgbe->gen_lock);
1083 1131 mutex_destroy(&ixgbe->watchdog_lock);
1084 1132 }
1085 1133
1086 1134 static int
1087 1135 ixgbe_resume(dev_info_t *devinfo)
1088 1136 {
1089 1137 ixgbe_t *ixgbe;
1090 1138 int i;
1091 1139
1092 1140 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1093 1141 if (ixgbe == NULL)
1094 1142 return (DDI_FAILURE);
1095 1143
1096 1144 mutex_enter(&ixgbe->gen_lock);
1097 1145
1098 1146 if (ixgbe->ixgbe_state & IXGBE_STARTED) {
1099 1147 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1100 1148 mutex_exit(&ixgbe->gen_lock);
1101 1149 return (DDI_FAILURE);
1102 1150 }
1103 1151
1104 1152 /*
1105 1153 * Enable and start the watchdog timer
1106 1154 */
1107 1155 ixgbe_enable_watchdog_timer(ixgbe);
1108 1156 }
1109 1157
1110 1158 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_SUSPENDED);
1111 1159
1112 1160 if (ixgbe->ixgbe_state & IXGBE_STARTED) {
1113 1161 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1114 1162 mac_tx_ring_update(ixgbe->mac_hdl,
1115 1163 ixgbe->tx_rings[i].ring_handle);
1116 1164 }
1117 1165 }
1118 1166
1119 1167 mutex_exit(&ixgbe->gen_lock);
1120 1168
1121 1169 return (DDI_SUCCESS);
1122 1170 }
1123 1171
1124 1172 static int
1125 1173 ixgbe_suspend(dev_info_t *devinfo)
1126 1174 {
1127 1175 ixgbe_t *ixgbe;
1128 1176
1129 1177 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1130 1178 if (ixgbe == NULL)
1131 1179 return (DDI_FAILURE);
1132 1180
1133 1181 mutex_enter(&ixgbe->gen_lock);
1134 1182
1135 1183 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_SUSPENDED);
1136 1184 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) {
1137 1185 mutex_exit(&ixgbe->gen_lock);
1138 1186 return (DDI_SUCCESS);
1139 1187 }
1140 1188 ixgbe_stop(ixgbe, B_FALSE);
1141 1189
1142 1190 mutex_exit(&ixgbe->gen_lock);
1143 1191
1144 1192 /*
1145 1193 * Disable and stop the watchdog timer
1146 1194 */
1147 1195 ixgbe_disable_watchdog_timer(ixgbe);
1148 1196
1149 1197 return (DDI_SUCCESS);
1150 1198 }
1151 1199
1152 1200 /*
1153 1201 * ixgbe_init - Initialize the device.
1154 1202 */
1155 1203 static int
1156 1204 ixgbe_init(ixgbe_t *ixgbe)
1157 1205 {
1158 1206 struct ixgbe_hw *hw = &ixgbe->hw;
1159 1207
1160 1208 mutex_enter(&ixgbe->gen_lock);
1161 1209
1162 1210 /*
1163 1211 * Reset chipset to put the hardware in a known state
1164 1212 * before we try to do anything with the eeprom.
1165 1213 */
1166 1214 if (ixgbe_reset_hw(hw) != IXGBE_SUCCESS) {
1167 1215 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1168 1216 goto init_fail;
1169 1217 }
1170 1218
1171 1219 /*
1172 1220 * Need to init eeprom before validating the checksum.
1173 1221 */
1174 1222 if (ixgbe_init_eeprom_params(hw) < 0) {
1175 1223 ixgbe_error(ixgbe,
1176 1224 "Unable to intitialize the eeprom interface.");
1177 1225 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1178 1226 goto init_fail;
1179 1227 }
1180 1228
1181 1229 /*
1182 1230 * NVM validation
1183 1231 */
1184 1232 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1185 1233 /*
1186 1234 * Some PCI-E parts fail the first check due to
1187 1235 * the link being in sleep state. Call it again,
1188 1236 * if it fails a second time it's a real issue.
1189 1237 */
1190 1238 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1191 1239 ixgbe_error(ixgbe,
1192 1240 "Invalid NVM checksum. Please contact "
|
↓ open down ↓ |
313 lines elided |
↑ open up ↑ |
1193 1241 "the vendor to update the NVM.");
1194 1242 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1195 1243 goto init_fail;
1196 1244 }
1197 1245 }
1198 1246
1199 1247 /*
1200 1248 * Setup default flow control thresholds - enable/disable
1201 1249 * & flow control type is controlled by ixgbe.conf
1202 1250 */
1203 - hw->fc.high_water = DEFAULT_FCRTH;
1204 - hw->fc.low_water = DEFAULT_FCRTL;
1251 + hw->fc.high_water[0] = DEFAULT_FCRTH;
1252 + hw->fc.low_water[0] = DEFAULT_FCRTL;
1205 1253 hw->fc.pause_time = DEFAULT_FCPAUSE;
1206 1254 hw->fc.send_xon = B_TRUE;
1207 1255
1208 1256 /*
1209 1257 * Initialize link settings
1210 1258 */
1211 1259 (void) ixgbe_driver_setup_link(ixgbe, B_FALSE);
1212 1260
1213 1261 /*
1214 1262 * Initialize the chipset hardware
1215 1263 */
1216 1264 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1217 1265 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1218 1266 goto init_fail;
1219 1267 }
1220 1268
1221 1269 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1222 1270 goto init_fail;
1223 1271 }
1224 1272
1225 1273 mutex_exit(&ixgbe->gen_lock);
1226 1274 return (IXGBE_SUCCESS);
1227 1275
1228 1276 init_fail:
1229 1277 /*
1230 1278 * Reset PHY
1231 1279 */
1232 1280 (void) ixgbe_reset_phy(hw);
1233 1281
1234 1282 mutex_exit(&ixgbe->gen_lock);
1235 1283 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1236 1284 return (IXGBE_FAILURE);
1237 1285 }
1238 1286
1239 1287 /*
1240 1288 * ixgbe_chip_start - Initialize and start the chipset hardware.
1241 1289 */
1242 1290 static int
1243 1291 ixgbe_chip_start(ixgbe_t *ixgbe)
1244 1292 {
1245 1293 struct ixgbe_hw *hw = &ixgbe->hw;
1246 1294 int ret_val, i;
1247 1295
1248 1296 ASSERT(mutex_owned(&ixgbe->gen_lock));
1249 1297
1250 1298 /*
1251 1299 * Get the mac address
1252 1300 * This function should handle SPARC case correctly.
1253 1301 */
1254 1302 if (!ixgbe_find_mac_address(ixgbe)) {
1255 1303 ixgbe_error(ixgbe, "Failed to get the mac address");
1256 1304 return (IXGBE_FAILURE);
1257 1305 }
1258 1306
1259 1307 /*
1260 1308 * Validate the mac address
1261 1309 */
1262 1310 (void) ixgbe_init_rx_addrs(hw);
1263 1311 if (!is_valid_mac_addr(hw->mac.addr)) {
1264 1312 ixgbe_error(ixgbe, "Invalid mac address");
1265 1313 return (IXGBE_FAILURE);
1266 1314 }
1267 1315
1268 1316 /*
1269 1317 * Configure/Initialize hardware
1270 1318 */
1271 1319 ret_val = ixgbe_init_hw(hw);
1272 1320 if (ret_val != IXGBE_SUCCESS) {
1273 1321 if (ret_val == IXGBE_ERR_EEPROM_VERSION) {
1274 1322 ixgbe_error(ixgbe,
1275 1323 "This 82599 device is pre-release and contains"
1276 1324 " outdated firmware, please contact your hardware"
1277 1325 " vendor for a replacement.");
1278 1326 } else {
1279 1327 ixgbe_error(ixgbe, "Failed to initialize hardware");
1280 1328 return (IXGBE_FAILURE);
1281 1329 }
1282 1330 }
1283 1331
1284 1332 /*
1285 1333 * Re-enable relaxed ordering for performance. It is disabled
1286 1334 * by default in the hardware init.
1287 1335 */
1288 1336 if (ixgbe->relax_order_enable == B_TRUE)
1289 1337 ixgbe_enable_relaxed_ordering(hw);
1290 1338
1291 1339 /*
1292 1340 * Setup adapter interrupt vectors
1293 1341 */
1294 1342 ixgbe_setup_adapter_vector(ixgbe);
1295 1343
1296 1344 /*
1297 1345 * Initialize unicast addresses.
1298 1346 */
1299 1347 ixgbe_init_unicst(ixgbe);
1300 1348
1301 1349 /*
1302 1350 * Setup and initialize the mctable structures.
1303 1351 */
1304 1352 ixgbe_setup_multicst(ixgbe);
1305 1353
1306 1354 /*
1307 1355 * Set interrupt throttling rate
1308 1356 */
1309 1357 for (i = 0; i < ixgbe->intr_cnt; i++) {
1310 1358 IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]);
1311 1359 }
1312 1360
1313 1361 /*
1314 1362 * Save the state of the phy
1315 1363 */
1316 1364 ixgbe_get_hw_state(ixgbe);
1317 1365
1318 1366 /*
1319 1367 * Make sure driver has control
1320 1368 */
1321 1369 ixgbe_get_driver_control(hw);
1322 1370
1323 1371 return (IXGBE_SUCCESS);
1324 1372 }
1325 1373
1326 1374 /*
1327 1375 * ixgbe_chip_stop - Stop the chipset hardware
1328 1376 */
1329 1377 static void
1330 1378 ixgbe_chip_stop(ixgbe_t *ixgbe)
1331 1379 {
1332 1380 struct ixgbe_hw *hw = &ixgbe->hw;
1333 1381
1334 1382 ASSERT(mutex_owned(&ixgbe->gen_lock));
1335 1383
1336 1384 /*
1337 1385 * Tell firmware driver is no longer in control
1338 1386 */
1339 1387 ixgbe_release_driver_control(hw);
1340 1388
1341 1389 /*
1342 1390 * Reset the chipset
1343 1391 */
1344 1392 (void) ixgbe_reset_hw(hw);
1345 1393
1346 1394 /*
1347 1395 * Reset PHY
1348 1396 */
1349 1397 (void) ixgbe_reset_phy(hw);
1350 1398 }
1351 1399
1352 1400 /*
1353 1401 * ixgbe_reset - Reset the chipset and re-start the driver.
1354 1402 *
1355 1403 * It involves stopping and re-starting the chipset,
1356 1404 * and re-configuring the rx/tx rings.
1357 1405 */
1358 1406 static int
1359 1407 ixgbe_reset(ixgbe_t *ixgbe)
1360 1408 {
1361 1409 int i;
1362 1410
1363 1411 /*
1364 1412 * Disable and stop the watchdog timer
1365 1413 */
1366 1414 ixgbe_disable_watchdog_timer(ixgbe);
1367 1415
1368 1416 mutex_enter(&ixgbe->gen_lock);
1369 1417
1370 1418 ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED);
1371 1419 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED);
1372 1420
1373 1421 ixgbe_stop(ixgbe, B_FALSE);
1374 1422
1375 1423 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1376 1424 mutex_exit(&ixgbe->gen_lock);
1377 1425 return (IXGBE_FAILURE);
1378 1426 }
1379 1427
1380 1428 /*
1381 1429 * After resetting, need to recheck the link status.
1382 1430 */
1383 1431 ixgbe->link_check_complete = B_FALSE;
1384 1432 ixgbe->link_check_hrtime = gethrtime() +
1385 1433 (IXGBE_LINK_UP_TIME * 100000000ULL);
1386 1434
1387 1435 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STARTED);
1388 1436
1389 1437 if (!(ixgbe->ixgbe_state & IXGBE_SUSPENDED)) {
1390 1438 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1391 1439 mac_tx_ring_update(ixgbe->mac_hdl,
1392 1440 ixgbe->tx_rings[i].ring_handle);
1393 1441 }
1394 1442 }
1395 1443
1396 1444 mutex_exit(&ixgbe->gen_lock);
1397 1445
1398 1446 /*
1399 1447 * Enable and start the watchdog timer
1400 1448 */
1401 1449 ixgbe_enable_watchdog_timer(ixgbe);
1402 1450
1403 1451 return (IXGBE_SUCCESS);
1404 1452 }
1405 1453
1406 1454 /*
1407 1455 * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources.
1408 1456 */
1409 1457 static void
1410 1458 ixgbe_tx_clean(ixgbe_t *ixgbe)
1411 1459 {
1412 1460 ixgbe_tx_ring_t *tx_ring;
1413 1461 tx_control_block_t *tcb;
1414 1462 link_list_t pending_list;
1415 1463 uint32_t desc_num;
1416 1464 int i, j;
1417 1465
1418 1466 LINK_LIST_INIT(&pending_list);
1419 1467
1420 1468 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1421 1469 tx_ring = &ixgbe->tx_rings[i];
1422 1470
1423 1471 mutex_enter(&tx_ring->recycle_lock);
1424 1472
1425 1473 /*
1426 1474 * Clean the pending tx data - the pending packets in the
1427 1475 * work_list that have no chances to be transmitted again.
1428 1476 *
1429 1477 * We must ensure the chipset is stopped or the link is down
1430 1478 * before cleaning the transmit packets.
1431 1479 */
1432 1480 desc_num = 0;
1433 1481 for (j = 0; j < tx_ring->ring_size; j++) {
1434 1482 tcb = tx_ring->work_list[j];
1435 1483 if (tcb != NULL) {
1436 1484 desc_num += tcb->desc_num;
1437 1485
1438 1486 tx_ring->work_list[j] = NULL;
1439 1487
1440 1488 ixgbe_free_tcb(tcb);
1441 1489
1442 1490 LIST_PUSH_TAIL(&pending_list, &tcb->link);
1443 1491 }
1444 1492 }
1445 1493
1446 1494 if (desc_num > 0) {
1447 1495 atomic_add_32(&tx_ring->tbd_free, desc_num);
1448 1496 ASSERT(tx_ring->tbd_free == tx_ring->ring_size);
1449 1497
1450 1498 /*
1451 1499 * Reset the head and tail pointers of the tbd ring;
1452 1500 * Reset the writeback head if it's enable.
1453 1501 */
1454 1502 tx_ring->tbd_head = 0;
1455 1503 tx_ring->tbd_tail = 0;
1456 1504 if (ixgbe->tx_head_wb_enable)
1457 1505 *tx_ring->tbd_head_wb = 0;
1458 1506
1459 1507 IXGBE_WRITE_REG(&ixgbe->hw,
1460 1508 IXGBE_TDH(tx_ring->index), 0);
1461 1509 IXGBE_WRITE_REG(&ixgbe->hw,
1462 1510 IXGBE_TDT(tx_ring->index), 0);
1463 1511 }
1464 1512
1465 1513 mutex_exit(&tx_ring->recycle_lock);
1466 1514
1467 1515 /*
1468 1516 * Add the tx control blocks in the pending list to
1469 1517 * the free list.
1470 1518 */
1471 1519 ixgbe_put_free_list(tx_ring, &pending_list);
1472 1520 }
1473 1521 }
1474 1522
1475 1523 /*
1476 1524 * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be
1477 1525 * transmitted.
1478 1526 */
1479 1527 static boolean_t
1480 1528 ixgbe_tx_drain(ixgbe_t *ixgbe)
1481 1529 {
1482 1530 ixgbe_tx_ring_t *tx_ring;
1483 1531 boolean_t done;
1484 1532 int i, j;
1485 1533
1486 1534 /*
1487 1535 * Wait for a specific time to allow pending tx packets
1488 1536 * to be transmitted.
1489 1537 *
1490 1538 * Check the counter tbd_free to see if transmission is done.
1491 1539 * No lock protection is needed here.
1492 1540 *
1493 1541 * Return B_TRUE if all pending packets have been transmitted;
1494 1542 * Otherwise return B_FALSE;
1495 1543 */
1496 1544 for (i = 0; i < TX_DRAIN_TIME; i++) {
1497 1545
1498 1546 done = B_TRUE;
1499 1547 for (j = 0; j < ixgbe->num_tx_rings; j++) {
1500 1548 tx_ring = &ixgbe->tx_rings[j];
1501 1549 done = done &&
1502 1550 (tx_ring->tbd_free == tx_ring->ring_size);
1503 1551 }
1504 1552
1505 1553 if (done)
1506 1554 break;
1507 1555
1508 1556 msec_delay(1);
1509 1557 }
1510 1558
1511 1559 return (done);
1512 1560 }
1513 1561
1514 1562 /*
1515 1563 * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer.
1516 1564 */
1517 1565 static boolean_t
1518 1566 ixgbe_rx_drain(ixgbe_t *ixgbe)
1519 1567 {
1520 1568 boolean_t done = B_TRUE;
1521 1569 int i;
1522 1570
1523 1571 /*
1524 1572 * Polling the rx free list to check if those rx buffers held by
1525 1573 * the upper layer are released.
1526 1574 *
1527 1575 * Check the counter rcb_free to see if all pending buffers are
1528 1576 * released. No lock protection is needed here.
1529 1577 *
1530 1578 * Return B_TRUE if all pending buffers have been released;
1531 1579 * Otherwise return B_FALSE;
1532 1580 */
1533 1581 for (i = 0; i < RX_DRAIN_TIME; i++) {
1534 1582 done = (ixgbe->rcb_pending == 0);
1535 1583
1536 1584 if (done)
1537 1585 break;
1538 1586
1539 1587 msec_delay(1);
1540 1588 }
1541 1589
1542 1590 return (done);
1543 1591 }
1544 1592
1545 1593 /*
1546 1594 * ixgbe_start - Start the driver/chipset.
1547 1595 */
1548 1596 int
1549 1597 ixgbe_start(ixgbe_t *ixgbe, boolean_t alloc_buffer)
1550 1598 {
1551 1599 int i;
1552 1600
1553 1601 ASSERT(mutex_owned(&ixgbe->gen_lock));
1554 1602
1555 1603 if (alloc_buffer) {
1556 1604 if (ixgbe_alloc_rx_data(ixgbe) != IXGBE_SUCCESS) {
1557 1605 ixgbe_error(ixgbe,
1558 1606 "Failed to allocate software receive rings");
1559 1607 return (IXGBE_FAILURE);
1560 1608 }
1561 1609
1562 1610 /* Allocate buffers for all the rx/tx rings */
1563 1611 if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS) {
1564 1612 ixgbe_error(ixgbe, "Failed to allocate DMA resource");
1565 1613 return (IXGBE_FAILURE);
1566 1614 }
1567 1615
1568 1616 ixgbe->tx_ring_init = B_TRUE;
1569 1617 } else {
1570 1618 ixgbe->tx_ring_init = B_FALSE;
1571 1619 }
1572 1620
1573 1621 for (i = 0; i < ixgbe->num_rx_rings; i++)
1574 1622 mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1575 1623 for (i = 0; i < ixgbe->num_tx_rings; i++)
1576 1624 mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1577 1625
1578 1626 /*
1579 1627 * Start the chipset hardware
1580 1628 */
1581 1629 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1582 1630 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1583 1631 goto start_failure;
1584 1632 }
1585 1633
1586 1634 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1587 1635 goto start_failure;
1588 1636 }
1589 1637
1590 1638 /*
1591 1639 * Setup the rx/tx rings
1592 1640 */
1593 1641 ixgbe_setup_rings(ixgbe);
1594 1642
1595 1643 /*
1596 1644 * ixgbe_start() will be called when resetting, however if reset
1597 1645 * happens, we need to clear the ERROR, STALL and OVERTEMP flags
1598 1646 * before enabling the interrupts.
1599 1647 */
1600 1648 atomic_and_32(&ixgbe->ixgbe_state, ~(IXGBE_ERROR
1601 1649 | IXGBE_STALL| IXGBE_OVERTEMP));
1602 1650
1603 1651 /*
1604 1652 * Enable adapter interrupts
1605 1653 * The interrupts must be enabled after the driver state is START
1606 1654 */
1607 1655 ixgbe_enable_adapter_interrupts(ixgbe);
1608 1656
1609 1657 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1610 1658 mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1611 1659 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1612 1660 mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1613 1661
1614 1662 return (IXGBE_SUCCESS);
1615 1663
1616 1664 start_failure:
1617 1665 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1618 1666 mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1619 1667 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1620 1668 mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1621 1669
1622 1670 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1623 1671
1624 1672 return (IXGBE_FAILURE);
1625 1673 }
1626 1674
1627 1675 /*
1628 1676 * ixgbe_stop - Stop the driver/chipset.
1629 1677 */
1630 1678 void
1631 1679 ixgbe_stop(ixgbe_t *ixgbe, boolean_t free_buffer)
1632 1680 {
1633 1681 int i;
1634 1682
1635 1683 ASSERT(mutex_owned(&ixgbe->gen_lock));
1636 1684
1637 1685 /*
1638 1686 * Disable the adapter interrupts
1639 1687 */
1640 1688 ixgbe_disable_adapter_interrupts(ixgbe);
1641 1689
1642 1690 /*
1643 1691 * Drain the pending tx packets
1644 1692 */
1645 1693 (void) ixgbe_tx_drain(ixgbe);
1646 1694
1647 1695 for (i = 0; i < ixgbe->num_rx_rings; i++)
1648 1696 mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1649 1697 for (i = 0; i < ixgbe->num_tx_rings; i++)
1650 1698 mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1651 1699
1652 1700 /*
1653 1701 * Stop the chipset hardware
1654 1702 */
1655 1703 ixgbe_chip_stop(ixgbe);
1656 1704
1657 1705 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1658 1706 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1659 1707 }
1660 1708
1661 1709 /*
1662 1710 * Clean the pending tx data/resources
1663 1711 */
1664 1712 ixgbe_tx_clean(ixgbe);
1665 1713
1666 1714 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1667 1715 mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1668 1716 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1669 1717 mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1670 1718
1671 1719 if (ixgbe->link_state == LINK_STATE_UP) {
1672 1720 ixgbe->link_state = LINK_STATE_UNKNOWN;
1673 1721 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
1674 1722 }
1675 1723
1676 1724 if (free_buffer) {
1677 1725 /*
1678 1726 * Release the DMA/memory resources of rx/tx rings
1679 1727 */
1680 1728 ixgbe_free_dma(ixgbe);
1681 1729 ixgbe_free_rx_data(ixgbe);
1682 1730 }
1683 1731 }
1684 1732
1685 1733 /*
1686 1734 * ixgbe_cbfunc - Driver interface for generic DDI callbacks
1687 1735 */
1688 1736 /* ARGSUSED */
1689 1737 static int
1690 1738 ixgbe_cbfunc(dev_info_t *dip, ddi_cb_action_t cbaction, void *cbarg,
1691 1739 void *arg1, void *arg2)
1692 1740 {
1693 1741 ixgbe_t *ixgbe = (ixgbe_t *)arg1;
1694 1742
1695 1743 switch (cbaction) {
1696 1744 /* IRM callback */
1697 1745 int count;
1698 1746 case DDI_CB_INTR_ADD:
1699 1747 case DDI_CB_INTR_REMOVE:
1700 1748 count = (int)(uintptr_t)cbarg;
1701 1749 ASSERT(ixgbe->intr_type == DDI_INTR_TYPE_MSIX);
1702 1750 DTRACE_PROBE2(ixgbe__irm__callback, int, count,
1703 1751 int, ixgbe->intr_cnt);
1704 1752 if (ixgbe_intr_adjust(ixgbe, cbaction, count) !=
1705 1753 DDI_SUCCESS) {
1706 1754 ixgbe_error(ixgbe,
1707 1755 "IRM CB: Failed to adjust interrupts");
1708 1756 goto cb_fail;
1709 1757 }
1710 1758 break;
1711 1759 default:
1712 1760 IXGBE_DEBUGLOG_1(ixgbe, "DDI CB: action 0x%x NOT supported",
1713 1761 cbaction);
1714 1762 return (DDI_ENOTSUP);
1715 1763 }
1716 1764 return (DDI_SUCCESS);
1717 1765 cb_fail:
1718 1766 return (DDI_FAILURE);
1719 1767 }
1720 1768
1721 1769 /*
1722 1770 * ixgbe_intr_adjust - Adjust interrupt to respond to IRM request.
1723 1771 */
1724 1772 static int
1725 1773 ixgbe_intr_adjust(ixgbe_t *ixgbe, ddi_cb_action_t cbaction, int count)
1726 1774 {
1727 1775 int i, rc, actual;
1728 1776
1729 1777 if (count == 0)
1730 1778 return (DDI_SUCCESS);
1731 1779
1732 1780 if ((cbaction == DDI_CB_INTR_ADD &&
1733 1781 ixgbe->intr_cnt + count > ixgbe->intr_cnt_max) ||
1734 1782 (cbaction == DDI_CB_INTR_REMOVE &&
1735 1783 ixgbe->intr_cnt - count < ixgbe->intr_cnt_min))
1736 1784 return (DDI_FAILURE);
1737 1785
1738 1786 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) {
1739 1787 return (DDI_FAILURE);
1740 1788 }
1741 1789
1742 1790 for (i = 0; i < ixgbe->num_rx_rings; i++)
1743 1791 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle, NULL);
1744 1792 for (i = 0; i < ixgbe->num_tx_rings; i++)
1745 1793 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle, NULL);
1746 1794
1747 1795 mutex_enter(&ixgbe->gen_lock);
1748 1796 ixgbe->ixgbe_state &= ~IXGBE_STARTED;
1749 1797 ixgbe->ixgbe_state |= IXGBE_INTR_ADJUST;
1750 1798 ixgbe->ixgbe_state |= IXGBE_SUSPENDED;
1751 1799 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN);
1752 1800
1753 1801 ixgbe_stop(ixgbe, B_FALSE);
1754 1802 /*
1755 1803 * Disable interrupts
1756 1804 */
1757 1805 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
1758 1806 rc = ixgbe_disable_intrs(ixgbe);
1759 1807 ASSERT(rc == IXGBE_SUCCESS);
1760 1808 }
1761 1809 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ENABLE_INTR;
1762 1810
1763 1811 /*
1764 1812 * Remove interrupt handlers
1765 1813 */
1766 1814 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
1767 1815 ixgbe_rem_intr_handlers(ixgbe);
1768 1816 }
1769 1817 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ADD_INTR;
1770 1818
1771 1819 /*
1772 1820 * Clear vect_map
1773 1821 */
1774 1822 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
1775 1823 switch (cbaction) {
1776 1824 case DDI_CB_INTR_ADD:
1777 1825 rc = ddi_intr_alloc(ixgbe->dip, ixgbe->htable,
1778 1826 DDI_INTR_TYPE_MSIX, ixgbe->intr_cnt, count, &actual,
1779 1827 DDI_INTR_ALLOC_NORMAL);
1780 1828 if (rc != DDI_SUCCESS || actual != count) {
1781 1829 ixgbe_log(ixgbe, "Adjust interrupts failed."
1782 1830 "return: %d, irm cb size: %d, actual: %d",
1783 1831 rc, count, actual);
1784 1832 goto intr_adjust_fail;
1785 1833 }
1786 1834 ixgbe->intr_cnt += count;
1787 1835 break;
1788 1836
1789 1837 case DDI_CB_INTR_REMOVE:
1790 1838 for (i = ixgbe->intr_cnt - count;
1791 1839 i < ixgbe->intr_cnt; i ++) {
1792 1840 rc = ddi_intr_free(ixgbe->htable[i]);
1793 1841 ixgbe->htable[i] = NULL;
1794 1842 if (rc != DDI_SUCCESS) {
1795 1843 ixgbe_log(ixgbe, "Adjust interrupts failed."
1796 1844 "return: %d, irm cb size: %d, actual: %d",
1797 1845 rc, count, actual);
1798 1846 goto intr_adjust_fail;
1799 1847 }
1800 1848 }
1801 1849 ixgbe->intr_cnt -= count;
1802 1850 break;
1803 1851 }
1804 1852
1805 1853 /*
1806 1854 * Get priority for first vector, assume remaining are all the same
1807 1855 */
1808 1856 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
1809 1857 if (rc != DDI_SUCCESS) {
1810 1858 ixgbe_log(ixgbe,
1811 1859 "Get interrupt priority failed: %d", rc);
1812 1860 goto intr_adjust_fail;
1813 1861 }
1814 1862 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
1815 1863 if (rc != DDI_SUCCESS) {
1816 1864 ixgbe_log(ixgbe, "Get interrupt cap failed: %d", rc);
1817 1865 goto intr_adjust_fail;
1818 1866 }
1819 1867 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
1820 1868
1821 1869 /*
1822 1870 * Map rings to interrupt vectors
1823 1871 */
1824 1872 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) {
1825 1873 ixgbe_error(ixgbe,
1826 1874 "IRM CB: Failed to map interrupts to vectors");
1827 1875 goto intr_adjust_fail;
1828 1876 }
1829 1877
1830 1878 /*
1831 1879 * Add interrupt handlers
1832 1880 */
1833 1881 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
1834 1882 ixgbe_error(ixgbe, "IRM CB: Failed to add interrupt handlers");
1835 1883 goto intr_adjust_fail;
1836 1884 }
1837 1885 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
1838 1886
1839 1887 /*
1840 1888 * Now that mutex locks are initialized, and the chip is also
1841 1889 * initialized, enable interrupts.
1842 1890 */
1843 1891 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
1844 1892 ixgbe_error(ixgbe, "IRM CB: Failed to enable DDI interrupts");
1845 1893 goto intr_adjust_fail;
1846 1894 }
1847 1895 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
1848 1896 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1849 1897 ixgbe_error(ixgbe, "IRM CB: Failed to start");
1850 1898 goto intr_adjust_fail;
1851 1899 }
1852 1900 ixgbe->ixgbe_state &= ~IXGBE_INTR_ADJUST;
1853 1901 ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED;
1854 1902 ixgbe->ixgbe_state |= IXGBE_STARTED;
1855 1903 mutex_exit(&ixgbe->gen_lock);
1856 1904
1857 1905 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1858 1906 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle,
1859 1907 ixgbe->htable[ixgbe->rx_rings[i].intr_vector]);
1860 1908 }
1861 1909 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1862 1910 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle,
1863 1911 ixgbe->htable[ixgbe->tx_rings[i].intr_vector]);
1864 1912 }
1865 1913
1866 1914 /* Wakeup all Tx rings */
1867 1915 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1868 1916 mac_tx_ring_update(ixgbe->mac_hdl,
1869 1917 ixgbe->tx_rings[i].ring_handle);
1870 1918 }
1871 1919
1872 1920 IXGBE_DEBUGLOG_3(ixgbe,
1873 1921 "IRM CB: interrupts new value: 0x%x(0x%x:0x%x).",
1874 1922 ixgbe->intr_cnt, ixgbe->intr_cnt_min, ixgbe->intr_cnt_max);
1875 1923 return (DDI_SUCCESS);
1876 1924
1877 1925 intr_adjust_fail:
1878 1926 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1879 1927 mutex_exit(&ixgbe->gen_lock);
1880 1928 return (DDI_FAILURE);
1881 1929 }
1882 1930
1883 1931 /*
1884 1932 * ixgbe_intr_cb_register - Register interrupt callback function.
1885 1933 */
1886 1934 static int
1887 1935 ixgbe_intr_cb_register(ixgbe_t *ixgbe)
1888 1936 {
1889 1937 if (ddi_cb_register(ixgbe->dip, DDI_CB_FLAG_INTR, ixgbe_cbfunc,
1890 1938 ixgbe, NULL, &ixgbe->cb_hdl) != DDI_SUCCESS) {
1891 1939 return (IXGBE_FAILURE);
1892 1940 }
1893 1941 IXGBE_DEBUGLOG_0(ixgbe, "Interrupt callback function registered.");
1894 1942 return (IXGBE_SUCCESS);
1895 1943 }
1896 1944
1897 1945 /*
1898 1946 * ixgbe_alloc_rings - Allocate memory space for rx/tx rings.
1899 1947 */
1900 1948 static int
1901 1949 ixgbe_alloc_rings(ixgbe_t *ixgbe)
1902 1950 {
1903 1951 /*
1904 1952 * Allocate memory space for rx rings
1905 1953 */
1906 1954 ixgbe->rx_rings = kmem_zalloc(
1907 1955 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings,
1908 1956 KM_NOSLEEP);
1909 1957
1910 1958 if (ixgbe->rx_rings == NULL) {
1911 1959 return (IXGBE_FAILURE);
1912 1960 }
1913 1961
1914 1962 /*
1915 1963 * Allocate memory space for tx rings
1916 1964 */
1917 1965 ixgbe->tx_rings = kmem_zalloc(
1918 1966 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings,
1919 1967 KM_NOSLEEP);
1920 1968
1921 1969 if (ixgbe->tx_rings == NULL) {
1922 1970 kmem_free(ixgbe->rx_rings,
1923 1971 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1924 1972 ixgbe->rx_rings = NULL;
1925 1973 return (IXGBE_FAILURE);
1926 1974 }
1927 1975
1928 1976 /*
1929 1977 * Allocate memory space for rx ring groups
1930 1978 */
1931 1979 ixgbe->rx_groups = kmem_zalloc(
1932 1980 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups,
1933 1981 KM_NOSLEEP);
1934 1982
1935 1983 if (ixgbe->rx_groups == NULL) {
1936 1984 kmem_free(ixgbe->rx_rings,
1937 1985 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1938 1986 kmem_free(ixgbe->tx_rings,
1939 1987 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
1940 1988 ixgbe->rx_rings = NULL;
1941 1989 ixgbe->tx_rings = NULL;
1942 1990 return (IXGBE_FAILURE);
1943 1991 }
1944 1992
1945 1993 return (IXGBE_SUCCESS);
1946 1994 }
1947 1995
1948 1996 /*
1949 1997 * ixgbe_free_rings - Free the memory space of rx/tx rings.
1950 1998 */
1951 1999 static void
1952 2000 ixgbe_free_rings(ixgbe_t *ixgbe)
1953 2001 {
1954 2002 if (ixgbe->rx_rings != NULL) {
1955 2003 kmem_free(ixgbe->rx_rings,
1956 2004 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1957 2005 ixgbe->rx_rings = NULL;
1958 2006 }
1959 2007
1960 2008 if (ixgbe->tx_rings != NULL) {
1961 2009 kmem_free(ixgbe->tx_rings,
1962 2010 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
1963 2011 ixgbe->tx_rings = NULL;
1964 2012 }
1965 2013
1966 2014 if (ixgbe->rx_groups != NULL) {
1967 2015 kmem_free(ixgbe->rx_groups,
1968 2016 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups);
1969 2017 ixgbe->rx_groups = NULL;
1970 2018 }
1971 2019 }
1972 2020
1973 2021 static int
1974 2022 ixgbe_alloc_rx_data(ixgbe_t *ixgbe)
1975 2023 {
1976 2024 ixgbe_rx_ring_t *rx_ring;
1977 2025 int i;
1978 2026
1979 2027 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1980 2028 rx_ring = &ixgbe->rx_rings[i];
1981 2029 if (ixgbe_alloc_rx_ring_data(rx_ring) != IXGBE_SUCCESS)
1982 2030 goto alloc_rx_rings_failure;
1983 2031 }
1984 2032 return (IXGBE_SUCCESS);
1985 2033
1986 2034 alloc_rx_rings_failure:
1987 2035 ixgbe_free_rx_data(ixgbe);
1988 2036 return (IXGBE_FAILURE);
1989 2037 }
1990 2038
1991 2039 static void
1992 2040 ixgbe_free_rx_data(ixgbe_t *ixgbe)
1993 2041 {
1994 2042 ixgbe_rx_ring_t *rx_ring;
1995 2043 ixgbe_rx_data_t *rx_data;
1996 2044 int i;
1997 2045
1998 2046 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1999 2047 rx_ring = &ixgbe->rx_rings[i];
2000 2048
2001 2049 mutex_enter(&ixgbe->rx_pending_lock);
2002 2050 rx_data = rx_ring->rx_data;
2003 2051
2004 2052 if (rx_data != NULL) {
2005 2053 rx_data->flag |= IXGBE_RX_STOPPED;
2006 2054
2007 2055 if (rx_data->rcb_pending == 0) {
2008 2056 ixgbe_free_rx_ring_data(rx_data);
2009 2057 rx_ring->rx_data = NULL;
2010 2058 }
2011 2059 }
2012 2060
2013 2061 mutex_exit(&ixgbe->rx_pending_lock);
2014 2062 }
2015 2063 }
2016 2064
2017 2065 /*
2018 2066 * ixgbe_setup_rings - Setup rx/tx rings.
2019 2067 */
2020 2068 static void
2021 2069 ixgbe_setup_rings(ixgbe_t *ixgbe)
2022 2070 {
2023 2071 /*
2024 2072 * Setup the rx/tx rings, including the following:
2025 2073 *
2026 2074 * 1. Setup the descriptor ring and the control block buffers;
2027 2075 * 2. Initialize necessary registers for receive/transmit;
2028 2076 * 3. Initialize software pointers/parameters for receive/transmit;
2029 2077 */
2030 2078 ixgbe_setup_rx(ixgbe);
2031 2079
2032 2080 ixgbe_setup_tx(ixgbe);
2033 2081 }
2034 2082
2035 2083 static void
2036 2084 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring)
2037 2085 {
2038 2086 ixgbe_t *ixgbe = rx_ring->ixgbe;
2039 2087 ixgbe_rx_data_t *rx_data = rx_ring->rx_data;
2040 2088 struct ixgbe_hw *hw = &ixgbe->hw;
2041 2089 rx_control_block_t *rcb;
2042 2090 union ixgbe_adv_rx_desc *rbd;
2043 2091 uint32_t size;
2044 2092 uint32_t buf_low;
2045 2093 uint32_t buf_high;
2046 2094 uint32_t reg_val;
2047 2095 int i;
2048 2096
2049 2097 ASSERT(mutex_owned(&rx_ring->rx_lock));
2050 2098 ASSERT(mutex_owned(&ixgbe->gen_lock));
2051 2099
2052 2100 for (i = 0; i < ixgbe->rx_ring_size; i++) {
2053 2101 rcb = rx_data->work_list[i];
2054 2102 rbd = &rx_data->rbd_ring[i];
2055 2103
2056 2104 rbd->read.pkt_addr = rcb->rx_buf.dma_address;
2057 2105 rbd->read.hdr_addr = NULL;
2058 2106 }
2059 2107
2060 2108 /*
2061 2109 * Initialize the length register
2062 2110 */
2063 2111 size = rx_data->ring_size * sizeof (union ixgbe_adv_rx_desc);
2064 2112 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->hw_index), size);
2065 2113
2066 2114 /*
2067 2115 * Initialize the base address registers
2068 2116 */
2069 2117 buf_low = (uint32_t)rx_data->rbd_area.dma_address;
2070 2118 buf_high = (uint32_t)(rx_data->rbd_area.dma_address >> 32);
2071 2119 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->hw_index), buf_high);
2072 2120 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->hw_index), buf_low);
2073 2121
2074 2122 /*
2075 2123 * Setup head & tail pointers
2076 2124 */
2077 2125 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->hw_index),
2078 2126 rx_data->ring_size - 1);
2079 2127 IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->hw_index), 0);
2080 2128
2081 2129 rx_data->rbd_next = 0;
2082 2130 rx_data->lro_first = 0;
2083 2131
2084 2132 /*
2085 2133 * Setup the Receive Descriptor Control Register (RXDCTL)
2086 2134 * PTHRESH=32 descriptors (half the internal cache)
2087 2135 * HTHRESH=0 descriptors (to minimize latency on fetch)
2088 2136 * WTHRESH defaults to 1 (writeback each descriptor)
|
↓ open down ↓ |
874 lines elided |
↑ open up ↑ |
2089 2137 */
2090 2138 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index));
2091 2139 reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */
2092 2140
2093 2141 /* Not a valid value for 82599 */
2094 2142 if (hw->mac.type < ixgbe_mac_82599EB) {
2095 2143 reg_val |= 0x0020; /* pthresh */
2096 2144 }
2097 2145 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val);
2098 2146
2099 - if (hw->mac.type == ixgbe_mac_82599EB) {
2147 + if (hw->mac.type >= ixgbe_mac_82599EB) {
2100 2148 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2101 2149 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS);
2102 2150 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2103 2151 }
2104 2152
2105 2153 /*
2106 2154 * Setup the Split and Replication Receive Control Register.
2107 2155 * Set the rx buffer size and the advanced descriptor type.
2108 2156 */
2109 2157 reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
2110 2158 IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2111 2159 reg_val |= IXGBE_SRRCTL_DROP_EN;
2112 2160 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val);
2113 2161 }
2114 2162
2115 2163 static void
2116 2164 ixgbe_setup_rx(ixgbe_t *ixgbe)
2117 2165 {
2118 2166 ixgbe_rx_ring_t *rx_ring;
2119 2167 struct ixgbe_hw *hw = &ixgbe->hw;
2120 2168 uint32_t reg_val;
2121 2169 uint32_t ring_mapping;
2122 2170 uint32_t i, index;
2123 2171 uint32_t psrtype_rss_bit;
2124 2172
2125 2173 /* PSRTYPE must be configured for 82599 */
2126 2174 if (ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ &&
2127 2175 ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ_RSS) {
2128 2176 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
2129 2177 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
2130 2178 reg_val |= IXGBE_PSRTYPE_L2HDR;
2131 2179 reg_val |= 0x80000000;
2132 2180 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), reg_val);
2133 2181 } else {
2134 2182 if (ixgbe->num_rx_groups > 32) {
2135 2183 psrtype_rss_bit = 0x20000000;
2136 2184 } else {
2137 2185 psrtype_rss_bit = 0x40000000;
2138 2186 }
2139 2187 for (i = 0; i < ixgbe->capab->max_rx_grp_num; i++) {
2140 2188 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
2141 2189 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
2142 2190 reg_val |= IXGBE_PSRTYPE_L2HDR;
2143 2191 reg_val |= psrtype_rss_bit;
2144 2192 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(i), reg_val);
2145 2193 }
2146 2194 }
2147 2195
2148 2196 /*
2149 2197 * Set filter control in FCTRL to accept broadcast packets and do
2150 2198 * not pass pause frames to host. Flow control settings are already
2151 2199 * in this register, so preserve them.
2152 2200 */
2153 2201 reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2154 2202 reg_val |= IXGBE_FCTRL_BAM; /* broadcast accept mode */
2155 2203 reg_val |= IXGBE_FCTRL_DPF; /* discard pause frames */
2156 2204 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val);
2157 2205
2158 2206 /*
2159 2207 * Hardware checksum settings
2160 2208 */
2161 2209 if (ixgbe->rx_hcksum_enable) {
2162 2210 reg_val = IXGBE_RXCSUM_IPPCSE; /* IP checksum */
2163 2211 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val);
2164 2212 }
2165 2213
2166 2214 /*
2167 2215 * Setup VMDq and RSS for multiple receive queues
2168 2216 */
2169 2217 switch (ixgbe->classify_mode) {
2170 2218 case IXGBE_CLASSIFY_RSS:
2171 2219 /*
2172 2220 * One group, only RSS is needed when more than
2173 2221 * one ring enabled.
2174 2222 */
2175 2223 ixgbe_setup_rss(ixgbe);
2176 2224 break;
2177 2225
2178 2226 case IXGBE_CLASSIFY_VMDQ:
2179 2227 /*
2180 2228 * Multiple groups, each group has one ring,
2181 2229 * only VMDq is needed.
2182 2230 */
2183 2231 ixgbe_setup_vmdq(ixgbe);
2184 2232 break;
2185 2233
2186 2234 case IXGBE_CLASSIFY_VMDQ_RSS:
2187 2235 /*
2188 2236 * Multiple groups and multiple rings, both
2189 2237 * VMDq and RSS are needed.
2190 2238 */
2191 2239 ixgbe_setup_vmdq_rss(ixgbe);
2192 2240 break;
2193 2241
2194 2242 default:
2195 2243 break;
2196 2244 }
2197 2245
2198 2246 /*
2199 2247 * Enable the receive unit. This must be done after filter
2200 2248 * control is set in FCTRL.
2201 2249 */
2202 2250 reg_val = (IXGBE_RXCTRL_RXEN /* Enable Receive Unit */
2203 2251 | IXGBE_RXCTRL_DMBYPS); /* descriptor monitor bypass */
2204 2252 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
2205 2253
2206 2254 /*
2207 2255 * ixgbe_setup_rx_ring must be called after configuring RXCTRL
2208 2256 */
2209 2257 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2210 2258 rx_ring = &ixgbe->rx_rings[i];
2211 2259 ixgbe_setup_rx_ring(rx_ring);
2212 2260 }
2213 2261
2214 2262 /*
2215 2263 * Setup the per-ring statistics mapping.
2216 2264 */
2217 2265 ring_mapping = 0;
2218 2266 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2219 2267 index = ixgbe->rx_rings[i].hw_index;
2220 2268 ring_mapping = IXGBE_READ_REG(hw, IXGBE_RQSMR(index >> 2));
2221 2269 ring_mapping |= (i & 0xF) << (8 * (index & 0x3));
2222 2270 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(index >> 2), ring_mapping);
2223 2271 }
2224 2272
2225 2273 /*
2226 2274 * The Max Frame Size in MHADD/MAXFRS will be internally increased
2227 2275 * by four bytes if the packet has a VLAN field, so includes MTU,
2228 2276 * ethernet header and frame check sequence.
2229 2277 * Register is MAXFRS in 82599.
2230 2278 */
2231 2279 reg_val = (ixgbe->default_mtu + sizeof (struct ether_header)
2232 2280 + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT;
2233 2281 IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val);
2234 2282
2235 2283 /*
2236 2284 * Setup Jumbo Frame enable bit
2237 2285 */
2238 2286 if (ixgbe->default_mtu > ETHERMTU) {
2239 2287 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2240 2288 reg_val |= IXGBE_HLREG0_JUMBOEN;
2241 2289 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2242 2290 }
2243 2291
2244 2292 /*
2245 2293 * Setup RSC for multiple receive queues.
2246 2294 */
2247 2295 if (ixgbe->lro_enable) {
2248 2296 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2249 2297 /*
2250 2298 * Make sure rx_buf_size * MAXDESC not greater
2251 2299 * than 65535.
2252 2300 * Intel recommends 4 for MAXDESC field value.
2253 2301 */
2254 2302 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCCTL(i));
2255 2303 reg_val |= IXGBE_RSCCTL_RSCEN;
2256 2304 if (ixgbe->rx_buf_size == IXGBE_PKG_BUF_16k)
2257 2305 reg_val |= IXGBE_RSCCTL_MAXDESC_1;
2258 2306 else
2259 2307 reg_val |= IXGBE_RSCCTL_MAXDESC_4;
2260 2308 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(i), reg_val);
2261 2309 }
2262 2310
2263 2311 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCDBU);
2264 2312 reg_val |= IXGBE_RSCDBU_RSCACKDIS;
2265 2313 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, reg_val);
2266 2314
2267 2315 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2268 2316 reg_val |= IXGBE_RDRXCTL_RSCACKC;
2269 2317 reg_val |= IXGBE_RDRXCTL_FCOE_WRFIX;
2270 2318 reg_val &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2271 2319
2272 2320 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2273 2321 }
2274 2322 }
2275 2323
2276 2324 static void
2277 2325 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring)
2278 2326 {
2279 2327 ixgbe_t *ixgbe = tx_ring->ixgbe;
2280 2328 struct ixgbe_hw *hw = &ixgbe->hw;
2281 2329 uint32_t size;
2282 2330 uint32_t buf_low;
2283 2331 uint32_t buf_high;
2284 2332 uint32_t reg_val;
2285 2333
2286 2334 ASSERT(mutex_owned(&tx_ring->tx_lock));
2287 2335 ASSERT(mutex_owned(&ixgbe->gen_lock));
2288 2336
2289 2337 /*
2290 2338 * Initialize the length register
2291 2339 */
2292 2340 size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc);
2293 2341 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size);
2294 2342
2295 2343 /*
2296 2344 * Initialize the base address registers
2297 2345 */
2298 2346 buf_low = (uint32_t)tx_ring->tbd_area.dma_address;
2299 2347 buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32);
2300 2348 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low);
2301 2349 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high);
2302 2350
2303 2351 /*
2304 2352 * Setup head & tail pointers
2305 2353 */
2306 2354 IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0);
2307 2355 IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0);
2308 2356
2309 2357 /*
2310 2358 * Setup head write-back
2311 2359 */
2312 2360 if (ixgbe->tx_head_wb_enable) {
2313 2361 /*
2314 2362 * The memory of the head write-back is allocated using
2315 2363 * the extra tbd beyond the tail of the tbd ring.
2316 2364 */
2317 2365 tx_ring->tbd_head_wb = (uint32_t *)
2318 2366 ((uintptr_t)tx_ring->tbd_area.address + size);
2319 2367 *tx_ring->tbd_head_wb = 0;
2320 2368
2321 2369 buf_low = (uint32_t)
2322 2370 (tx_ring->tbd_area.dma_address + size);
2323 2371 buf_high = (uint32_t)
2324 2372 ((tx_ring->tbd_area.dma_address + size) >> 32);
2325 2373
|
↓ open down ↓ |
216 lines elided |
↑ open up ↑ |
2326 2374 /* Set the head write-back enable bit */
2327 2375 buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
2328 2376
2329 2377 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low);
2330 2378 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high);
2331 2379
2332 2380 /*
2333 2381 * Turn off relaxed ordering for head write back or it will
2334 2382 * cause problems with the tx recycling
2335 2383 */
2384 +#if 0
2385 + /* XXX KEBE ASKS --> Should we do what FreeBSD does? */
2386 + reg_val = (hw->mac.type == ixgbe_mac_82598EB) ?
2387 + IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(tx_ring->index)) :
2388 + IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(tx_ring->index));
2389 + reg_val &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2390 + if (hw->mac.type == ixgbe_mac_82598EB) {
2391 + IXGBE_WRITE_REG(hw,
2392 + IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
2393 + } else {
2394 + IXGBE_WRITE_REG(hw,
2395 + IXGBE_DCA_TXCTRL_82599(tx_ring->index), reg_val);
2396 + }
2397 +#else
2398 + /* XXX KEBE ASKS --> Or should we do what we've always done? */
2336 2399 reg_val = IXGBE_READ_REG(hw,
2337 2400 IXGBE_DCA_TXCTRL(tx_ring->index));
2338 - reg_val &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
2401 + reg_val &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2339 2402 IXGBE_WRITE_REG(hw,
2340 2403 IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
2404 +#endif
2341 2405 } else {
2342 2406 tx_ring->tbd_head_wb = NULL;
2407 +#if 0
2408 + /*
2409 + * XXX KEBE ASKS --> Should we do what FreeBSD does and
2410 + * twiddle TXCTRL_DESC_WR0_EN off anyway?
2411 + */
2412 + reg_val = (hw->mac.type == ixgbe_mac_82598EB) ?
2413 + IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(tx_ring->index)) :
2414 + IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(tx_ring->index));
2415 + reg_val &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2416 + if (hw->mac.type == ixgbe_mac_82598EB) {
2417 + IXGBE_WRITE_REG(hw,
2418 + IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
2419 + } else {
2420 + IXGBE_WRITE_REG(hw,
2421 + IXGBE_DCA_TXCTRL_82599(tx_ring->index), reg_val);
2422 + }
2423 +#endif
2343 2424 }
2344 2425
2345 2426 tx_ring->tbd_head = 0;
2346 2427 tx_ring->tbd_tail = 0;
2347 2428 tx_ring->tbd_free = tx_ring->ring_size;
2348 2429
2349 2430 if (ixgbe->tx_ring_init == B_TRUE) {
2350 2431 tx_ring->tcb_head = 0;
2351 2432 tx_ring->tcb_tail = 0;
2352 2433 tx_ring->tcb_free = tx_ring->free_list_size;
2353 2434 }
2354 2435
2355 2436 /*
2356 2437 * Initialize the s/w context structure
2357 2438 */
2358 2439 bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t));
2359 2440 }
2360 2441
2361 2442 static void
2362 2443 ixgbe_setup_tx(ixgbe_t *ixgbe)
2363 2444 {
2364 2445 struct ixgbe_hw *hw = &ixgbe->hw;
2365 2446 ixgbe_tx_ring_t *tx_ring;
2366 2447 uint32_t reg_val;
2367 2448 uint32_t ring_mapping;
2368 2449 int i;
2369 2450
2370 2451 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2371 2452 tx_ring = &ixgbe->tx_rings[i];
2372 2453 ixgbe_setup_tx_ring(tx_ring);
2373 2454 }
2374 2455
2375 2456 /*
2376 2457 * Setup the per-ring statistics mapping.
2377 2458 */
2378 2459 ring_mapping = 0;
|
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
2379 2460 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2380 2461 ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
2381 2462 if ((i & 0x3) == 0x3) {
2382 2463 switch (hw->mac.type) {
2383 2464 case ixgbe_mac_82598EB:
2384 2465 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2),
2385 2466 ring_mapping);
2386 2467 break;
2387 2468
2388 2469 case ixgbe_mac_82599EB:
2470 + case ixgbe_mac_X540:
2389 2471 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2),
2390 2472 ring_mapping);
2391 2473 break;
2392 2474
2393 2475 default:
2394 2476 break;
2395 2477 }
2396 2478
2397 2479 ring_mapping = 0;
2398 2480 }
2399 2481 }
2400 2482 if (i & 0x3) {
2401 2483 switch (hw->mac.type) {
2402 2484 case ixgbe_mac_82598EB:
2403 2485 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping);
2404 2486 break;
2405 2487
2406 2488 case ixgbe_mac_82599EB:
2489 + case ixgbe_mac_X540:
2407 2490 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping);
2408 2491 break;
2409 2492
2410 2493 default:
2411 2494 break;
2412 2495 }
2413 2496 }
2414 2497
2415 2498 /*
2416 2499 * Enable CRC appending and TX padding (for short tx frames)
2417 2500 */
2418 2501 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2419 2502 reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN;
2420 2503 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2421 2504
2422 2505 /*
2423 - * enable DMA for 82599 parts
2506 + * enable DMA for 82599 and X540 parts
2424 2507 */
2425 - if (hw->mac.type == ixgbe_mac_82599EB) {
2508 + if (hw->mac.type >= ixgbe_mac_82599EB) {
2426 2509 /* DMATXCTL.TE must be set after all Tx config is complete */
2427 2510 reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2428 2511 reg_val |= IXGBE_DMATXCTL_TE;
2429 2512 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val);
2513 + /* XXX KEBE SAYS - FreeBSD sets up MTQC. Should we? */
2430 2514 }
2431 2515
2432 2516 /*
2433 2517 * Enabling tx queues ..
2434 2518 * For 82599 must be done after DMATXCTL.TE is set
2435 2519 */
2436 2520 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2437 2521 tx_ring = &ixgbe->tx_rings[i];
2438 2522 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index));
2439 2523 reg_val |= IXGBE_TXDCTL_ENABLE;
2440 2524 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val);
2441 2525 }
2442 2526 }
2443 2527
2444 2528 /*
2445 2529 * ixgbe_setup_rss - Setup receive-side scaling feature.
2446 2530 */
2447 2531 static void
2448 2532 ixgbe_setup_rss(ixgbe_t *ixgbe)
2449 2533 {
2450 2534 struct ixgbe_hw *hw = &ixgbe->hw;
2451 2535 uint32_t i, mrqc, rxcsum;
2452 2536 uint32_t random;
2453 2537 uint32_t reta;
2454 2538 uint32_t ring_per_group;
2455 2539
2456 2540 /*
2457 2541 * Fill out redirection table
2458 2542 */
2459 2543 reta = 0;
2460 2544 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2461 2545
2462 2546 for (i = 0; i < 128; i++) {
2463 2547 reta = (reta << 8) | (i % ring_per_group) |
2464 2548 ((i % ring_per_group) << 4);
2465 2549 if ((i & 3) == 3)
2466 2550 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2467 2551 }
2468 2552
2469 2553 /*
2470 2554 * Fill out hash function seeds with a random constant
2471 2555 */
2472 2556 for (i = 0; i < 10; i++) {
2473 2557 (void) random_get_pseudo_bytes((uint8_t *)&random,
2474 2558 sizeof (uint32_t));
2475 2559 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random);
2476 2560 }
2477 2561
2478 2562 /*
2479 2563 * Enable RSS & perform hash on these packet types
2480 2564 */
2481 2565 mrqc = IXGBE_MRQC_RSSEN |
2482 2566 IXGBE_MRQC_RSS_FIELD_IPV4 |
2483 2567 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2484 2568 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2485 2569 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2486 2570 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2487 2571 IXGBE_MRQC_RSS_FIELD_IPV6 |
2488 2572 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2489 2573 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2490 2574 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2491 2575 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2492 2576
2493 2577 /*
2494 2578 * Disable Packet Checksum to enable RSS for multiple receive queues.
2495 2579 * It is an adapter hardware limitation that Packet Checksum is
2496 2580 * mutually exclusive with RSS.
2497 2581 */
2498 2582 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2499 2583 rxcsum |= IXGBE_RXCSUM_PCSD;
2500 2584 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
2501 2585 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2502 2586 }
2503 2587
2504 2588 /*
2505 2589 * ixgbe_setup_vmdq - Setup MAC classification feature
2506 2590 */
2507 2591 static void
2508 2592 ixgbe_setup_vmdq(ixgbe_t *ixgbe)
2509 2593 {
2510 2594 struct ixgbe_hw *hw = &ixgbe->hw;
2511 2595 uint32_t vmdctl, i, vtctl;
2512 2596
2513 2597 /*
2514 2598 * Setup the VMDq Control register, enable VMDq based on
2515 2599 * packet destination MAC address:
2516 2600 */
2517 2601 switch (hw->mac.type) {
2518 2602 case ixgbe_mac_82598EB:
|
↓ open down ↓ |
79 lines elided |
↑ open up ↑ |
2519 2603 /*
2520 2604 * VMDq Enable = 1;
2521 2605 * VMDq Filter = 0; MAC filtering
2522 2606 * Default VMDq output index = 0;
2523 2607 */
2524 2608 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2525 2609 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2526 2610 break;
2527 2611
2528 2612 case ixgbe_mac_82599EB:
2613 + case ixgbe_mac_X540:
2529 2614 /*
2530 2615 * Enable VMDq-only.
2531 2616 */
2532 2617 vmdctl = IXGBE_MRQC_VMDQEN;
2533 2618 IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl);
2534 2619
2535 2620 for (i = 0; i < hw->mac.num_rar_entries; i++) {
2536 2621 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2537 2622 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2538 2623 }
2539 2624
2540 2625 /*
2541 2626 * Enable Virtualization and Replication.
2542 2627 */
2543 2628 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2544 2629 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2545 2630
2546 2631 /*
2547 2632 * Enable receiving packets to all VFs
2548 2633 */
2549 2634 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2550 2635 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2551 2636 break;
2552 2637
2553 2638 default:
2554 2639 break;
2555 2640 }
2556 2641 }
2557 2642
2558 2643 /*
2559 2644 * ixgbe_setup_vmdq_rss - Setup both vmdq feature and rss feature.
2560 2645 */
2561 2646 static void
2562 2647 ixgbe_setup_vmdq_rss(ixgbe_t *ixgbe)
2563 2648 {
2564 2649 struct ixgbe_hw *hw = &ixgbe->hw;
2565 2650 uint32_t i, mrqc, rxcsum;
2566 2651 uint32_t random;
2567 2652 uint32_t reta;
2568 2653 uint32_t ring_per_group;
2569 2654 uint32_t vmdctl, vtctl;
2570 2655
2571 2656 /*
2572 2657 * Fill out redirection table
2573 2658 */
2574 2659 reta = 0;
2575 2660 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2576 2661 for (i = 0; i < 128; i++) {
2577 2662 reta = (reta << 8) | (i % ring_per_group) |
2578 2663 ((i % ring_per_group) << 4);
2579 2664 if ((i & 3) == 3)
2580 2665 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2581 2666 }
2582 2667
2583 2668 /*
2584 2669 * Fill out hash function seeds with a random constant
2585 2670 */
2586 2671 for (i = 0; i < 10; i++) {
2587 2672 (void) random_get_pseudo_bytes((uint8_t *)&random,
2588 2673 sizeof (uint32_t));
2589 2674 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random);
2590 2675 }
2591 2676
2592 2677 /*
2593 2678 * Enable and setup RSS and VMDq
2594 2679 */
2595 2680 switch (hw->mac.type) {
2596 2681 case ixgbe_mac_82598EB:
2597 2682 /*
2598 2683 * Enable RSS & Setup RSS Hash functions
2599 2684 */
2600 2685 mrqc = IXGBE_MRQC_RSSEN |
2601 2686 IXGBE_MRQC_RSS_FIELD_IPV4 |
2602 2687 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2603 2688 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2604 2689 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2605 2690 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2606 2691 IXGBE_MRQC_RSS_FIELD_IPV6 |
2607 2692 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2608 2693 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2609 2694 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2610 2695 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2611 2696
|
↓ open down ↓ |
73 lines elided |
↑ open up ↑ |
2612 2697 /*
2613 2698 * Enable and Setup VMDq
2614 2699 * VMDq Filter = 0; MAC filtering
2615 2700 * Default VMDq output index = 0;
2616 2701 */
2617 2702 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2618 2703 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2619 2704 break;
2620 2705
2621 2706 case ixgbe_mac_82599EB:
2707 + case ixgbe_mac_X540:
2622 2708 /*
2623 2709 * Enable RSS & Setup RSS Hash functions
2624 2710 */
2625 2711 mrqc = IXGBE_MRQC_RSS_FIELD_IPV4 |
2626 2712 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2627 2713 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2628 2714 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2629 2715 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2630 2716 IXGBE_MRQC_RSS_FIELD_IPV6 |
2631 2717 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2632 2718 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2633 2719 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2634 2720
2635 2721 /*
2636 2722 * Enable VMDq+RSS.
2637 2723 */
2638 2724 if (ixgbe->num_rx_groups > 32) {
2639 2725 mrqc = mrqc | IXGBE_MRQC_VMDQRSS64EN;
2640 2726 } else {
2641 2727 mrqc = mrqc | IXGBE_MRQC_VMDQRSS32EN;
2642 2728 }
2643 2729
2644 2730 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2645 2731
2646 2732 for (i = 0; i < hw->mac.num_rar_entries; i++) {
2647 2733 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2648 2734 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2649 2735 }
2650 2736 break;
2651 2737
2652 2738 default:
2653 2739 break;
2654 2740
2655 2741 }
2656 2742
|
↓ open down ↓ |
25 lines elided |
↑ open up ↑ |
2657 2743 /*
2658 2744 * Disable Packet Checksum to enable RSS for multiple receive queues.
2659 2745 * It is an adapter hardware limitation that Packet Checksum is
2660 2746 * mutually exclusive with RSS.
2661 2747 */
2662 2748 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2663 2749 rxcsum |= IXGBE_RXCSUM_PCSD;
2664 2750 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
2665 2751 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2666 2752
2667 - if (hw->mac.type == ixgbe_mac_82599EB) {
2753 + if (hw->mac.type >= ixgbe_mac_82599EB) {
2668 2754 /*
2669 2755 * Enable Virtualization and Replication.
2670 2756 */
2671 2757 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2672 2758 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2673 2759
2674 2760 /*
2675 2761 * Enable receiving packets to all VFs
2676 2762 */
2677 2763 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2678 2764 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2679 2765 }
2680 2766 }
2681 2767
2682 2768 /*
2683 2769 * ixgbe_init_unicst - Initialize the unicast addresses.
2684 2770 */
2685 2771 static void
2686 2772 ixgbe_init_unicst(ixgbe_t *ixgbe)
2687 2773 {
2688 2774 struct ixgbe_hw *hw = &ixgbe->hw;
2689 2775 uint8_t *mac_addr;
2690 2776 int slot;
2691 2777 /*
2692 2778 * Here we should consider two situations:
2693 2779 *
2694 2780 * 1. Chipset is initialized at the first time,
2695 2781 * Clear all the multiple unicast addresses.
2696 2782 *
2697 2783 * 2. Chipset is reset
2698 2784 * Recover the multiple unicast addresses from the
2699 2785 * software data structure to the RAR registers.
2700 2786 */
2701 2787 if (!ixgbe->unicst_init) {
2702 2788 /*
2703 2789 * Initialize the multiple unicast addresses
2704 2790 */
2705 2791 ixgbe->unicst_total = hw->mac.num_rar_entries;
2706 2792 ixgbe->unicst_avail = ixgbe->unicst_total;
2707 2793 for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2708 2794 mac_addr = ixgbe->unicst_addr[slot].mac.addr;
2709 2795 bzero(mac_addr, ETHERADDRL);
2710 2796 (void) ixgbe_set_rar(hw, slot, mac_addr, NULL, NULL);
2711 2797 ixgbe->unicst_addr[slot].mac.set = 0;
2712 2798 }
2713 2799 ixgbe->unicst_init = B_TRUE;
2714 2800 } else {
2715 2801 /* Re-configure the RAR registers */
2716 2802 for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2717 2803 mac_addr = ixgbe->unicst_addr[slot].mac.addr;
2718 2804 if (ixgbe->unicst_addr[slot].mac.set == 1) {
2719 2805 (void) ixgbe_set_rar(hw, slot, mac_addr,
2720 2806 ixgbe->unicst_addr[slot].mac.group_index,
2721 2807 IXGBE_RAH_AV);
2722 2808 } else {
2723 2809 bzero(mac_addr, ETHERADDRL);
2724 2810 (void) ixgbe_set_rar(hw, slot, mac_addr,
2725 2811 NULL, NULL);
2726 2812 }
2727 2813 }
2728 2814 }
2729 2815 }
2730 2816
2731 2817 /*
2732 2818 * ixgbe_unicst_find - Find the slot for the specified unicast address
2733 2819 */
2734 2820 int
2735 2821 ixgbe_unicst_find(ixgbe_t *ixgbe, const uint8_t *mac_addr)
2736 2822 {
2737 2823 int slot;
2738 2824
2739 2825 ASSERT(mutex_owned(&ixgbe->gen_lock));
2740 2826
2741 2827 for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2742 2828 if (bcmp(ixgbe->unicst_addr[slot].mac.addr,
2743 2829 mac_addr, ETHERADDRL) == 0)
2744 2830 return (slot);
2745 2831 }
2746 2832
2747 2833 return (-1);
2748 2834 }
2749 2835
2750 2836 /*
2751 2837 * ixgbe_multicst_add - Add a multicst address.
2752 2838 */
2753 2839 int
2754 2840 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2755 2841 {
2756 2842 ASSERT(mutex_owned(&ixgbe->gen_lock));
2757 2843
2758 2844 if ((multiaddr[0] & 01) == 0) {
2759 2845 return (EINVAL);
2760 2846 }
2761 2847
2762 2848 if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) {
2763 2849 return (ENOENT);
2764 2850 }
2765 2851
2766 2852 bcopy(multiaddr,
2767 2853 &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL);
2768 2854 ixgbe->mcast_count++;
2769 2855
2770 2856 /*
2771 2857 * Update the multicast table in the hardware
2772 2858 */
2773 2859 ixgbe_setup_multicst(ixgbe);
2774 2860
2775 2861 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2776 2862 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2777 2863 return (EIO);
2778 2864 }
2779 2865
2780 2866 return (0);
2781 2867 }
2782 2868
2783 2869 /*
2784 2870 * ixgbe_multicst_remove - Remove a multicst address.
2785 2871 */
2786 2872 int
2787 2873 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2788 2874 {
2789 2875 int i;
2790 2876
2791 2877 ASSERT(mutex_owned(&ixgbe->gen_lock));
2792 2878
2793 2879 for (i = 0; i < ixgbe->mcast_count; i++) {
2794 2880 if (bcmp(multiaddr, &ixgbe->mcast_table[i],
2795 2881 ETHERADDRL) == 0) {
2796 2882 for (i++; i < ixgbe->mcast_count; i++) {
2797 2883 ixgbe->mcast_table[i - 1] =
2798 2884 ixgbe->mcast_table[i];
2799 2885 }
2800 2886 ixgbe->mcast_count--;
2801 2887 break;
2802 2888 }
2803 2889 }
2804 2890
2805 2891 /*
2806 2892 * Update the multicast table in the hardware
2807 2893 */
2808 2894 ixgbe_setup_multicst(ixgbe);
2809 2895
2810 2896 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2811 2897 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2812 2898 return (EIO);
2813 2899 }
2814 2900
2815 2901 return (0);
2816 2902 }
2817 2903
2818 2904 /*
2819 2905 * ixgbe_setup_multicast - Setup multicast data structures.
2820 2906 *
2821 2907 * This routine initializes all of the multicast related structures
2822 2908 * and save them in the hardware registers.
2823 2909 */
2824 2910 static void
2825 2911 ixgbe_setup_multicst(ixgbe_t *ixgbe)
2826 2912 {
2827 2913 uint8_t *mc_addr_list;
2828 2914 uint32_t mc_addr_count;
2829 2915 struct ixgbe_hw *hw = &ixgbe->hw;
2830 2916
2831 2917 ASSERT(mutex_owned(&ixgbe->gen_lock));
|
↓ open down ↓ |
154 lines elided |
↑ open up ↑ |
2832 2918
2833 2919 ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES);
2834 2920
2835 2921 mc_addr_list = (uint8_t *)ixgbe->mcast_table;
2836 2922 mc_addr_count = ixgbe->mcast_count;
2837 2923
2838 2924 /*
2839 2925 * Update the multicast addresses to the MTA registers
2840 2926 */
2841 2927 (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
2842 - ixgbe_mc_table_itr);
2928 + ixgbe_mc_table_itr, TRUE);
2843 2929 }
2844 2930
2845 2931 /*
2846 2932 * ixgbe_setup_vmdq_rss_conf - Configure vmdq and rss (number and mode).
2847 2933 *
2848 2934 * Configure the rx classification mode (vmdq & rss) and vmdq & rss numbers.
2849 2935 * Different chipsets may have different allowed configuration of vmdq and rss.
2850 2936 */
2851 2937 static void
2852 2938 ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe)
2853 2939 {
2854 2940 struct ixgbe_hw *hw = &ixgbe->hw;
2855 2941 uint32_t ring_per_group;
2856 2942
2857 2943 switch (hw->mac.type) {
2858 2944 case ixgbe_mac_82598EB:
2859 2945 /*
2860 2946 * 82598 supports the following combination:
2861 2947 * vmdq no. x rss no.
2862 2948 * [5..16] x 1
2863 2949 * [1..4] x [1..16]
2864 2950 * However 8 rss queue per pool (vmdq) is sufficient for
2865 2951 * most cases.
2866 2952 */
2867 2953 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
|
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
2868 2954 if (ixgbe->num_rx_groups > 4) {
2869 2955 ixgbe->num_rx_rings = ixgbe->num_rx_groups;
2870 2956 } else {
2871 2957 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2872 2958 min(8, ring_per_group);
2873 2959 }
2874 2960
2875 2961 break;
2876 2962
2877 2963 case ixgbe_mac_82599EB:
2964 + case ixgbe_mac_X540:
2878 2965 /*
2879 2966 * 82599 supports the following combination:
2880 2967 * vmdq no. x rss no.
2881 2968 * [33..64] x [1..2]
2882 2969 * [2..32] x [1..4]
2883 2970 * 1 x [1..16]
2884 2971 * However 8 rss queue per pool (vmdq) is sufficient for
2885 2972 * most cases.
2973 + *
2974 + * For now, treat X540 like the 82599.
2886 2975 */
2887 2976 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2888 2977 if (ixgbe->num_rx_groups == 1) {
2889 2978 ixgbe->num_rx_rings = min(8, ring_per_group);
2890 2979 } else if (ixgbe->num_rx_groups <= 32) {
2891 2980 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2892 2981 min(4, ring_per_group);
2893 2982 } else if (ixgbe->num_rx_groups <= 64) {
2894 2983 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2895 2984 min(2, ring_per_group);
2896 2985 }
2897 2986 break;
2898 2987
2899 2988 default:
2900 2989 break;
2901 2990 }
2902 2991
2903 2992 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2904 2993
2905 2994 if (ixgbe->num_rx_groups == 1 && ring_per_group == 1) {
2906 2995 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
2907 2996 } else if (ixgbe->num_rx_groups != 1 && ring_per_group == 1) {
2908 2997 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ;
2909 2998 } else if (ixgbe->num_rx_groups != 1 && ring_per_group != 1) {
2910 2999 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ_RSS;
2911 3000 } else {
2912 3001 ixgbe->classify_mode = IXGBE_CLASSIFY_RSS;
2913 3002 }
2914 3003
2915 3004 IXGBE_DEBUGLOG_2(ixgbe, "rx group number:%d, rx ring number:%d",
2916 3005 ixgbe->num_rx_groups, ixgbe->num_rx_rings);
2917 3006 }
2918 3007
2919 3008 /*
2920 3009 * ixgbe_get_conf - Get driver configurations set in driver.conf.
2921 3010 *
2922 3011 * This routine gets user-configured values out of the configuration
2923 3012 * file ixgbe.conf.
2924 3013 *
2925 3014 * For each configurable value, there is a minimum, a maximum, and a
2926 3015 * default.
2927 3016 * If user does not configure a value, use the default.
2928 3017 * If user configures below the minimum, use the minumum.
2929 3018 * If user configures above the maximum, use the maxumum.
2930 3019 */
2931 3020 static void
2932 3021 ixgbe_get_conf(ixgbe_t *ixgbe)
2933 3022 {
2934 3023 struct ixgbe_hw *hw = &ixgbe->hw;
2935 3024 uint32_t flow_control;
2936 3025
2937 3026 /*
2938 3027 * ixgbe driver supports the following user configurations:
2939 3028 *
2940 3029 * Jumbo frame configuration:
2941 3030 * default_mtu
2942 3031 *
2943 3032 * Ethernet flow control configuration:
2944 3033 * flow_control
2945 3034 *
2946 3035 * Multiple rings configurations:
2947 3036 * tx_queue_number
2948 3037 * tx_ring_size
2949 3038 * rx_queue_number
2950 3039 * rx_ring_size
2951 3040 *
2952 3041 * Call ixgbe_get_prop() to get the value for a specific
2953 3042 * configuration parameter.
2954 3043 */
2955 3044
2956 3045 /*
2957 3046 * Jumbo frame configuration - max_frame_size controls host buffer
2958 3047 * allocation, so includes MTU, ethernet header, vlan tag and
2959 3048 * frame check sequence.
2960 3049 */
2961 3050 ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU,
2962 3051 MIN_MTU, ixgbe->capab->max_mtu, DEFAULT_MTU);
2963 3052
2964 3053 ixgbe->max_frame_size = ixgbe->default_mtu +
2965 3054 sizeof (struct ether_vlan_header) + ETHERFCSL;
2966 3055
2967 3056 /*
2968 3057 * Ethernet flow control configuration
2969 3058 */
2970 3059 flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL,
2971 3060 ixgbe_fc_none, 3, ixgbe_fc_none);
2972 3061 if (flow_control == 3)
2973 3062 flow_control = ixgbe_fc_default;
2974 3063
2975 3064 /*
2976 3065 * fc.requested mode is what the user requests. After autoneg,
2977 3066 * fc.current_mode will be the flow_control mode that was negotiated.
2978 3067 */
2979 3068 hw->fc.requested_mode = flow_control;
2980 3069
2981 3070 /*
2982 3071 * Multiple rings configurations
2983 3072 */
2984 3073 ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM,
2985 3074 ixgbe->capab->min_tx_que_num,
2986 3075 ixgbe->capab->max_tx_que_num,
2987 3076 ixgbe->capab->def_tx_que_num);
2988 3077 ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE,
2989 3078 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE);
2990 3079
2991 3080 ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM,
2992 3081 ixgbe->capab->min_rx_que_num,
2993 3082 ixgbe->capab->max_rx_que_num,
2994 3083 ixgbe->capab->def_rx_que_num);
2995 3084 ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE,
2996 3085 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE);
2997 3086
2998 3087 /*
2999 3088 * Multiple groups configuration
3000 3089 */
3001 3090 ixgbe->num_rx_groups = ixgbe_get_prop(ixgbe, PROP_RX_GROUP_NUM,
3002 3091 ixgbe->capab->min_rx_grp_num, ixgbe->capab->max_rx_grp_num,
3003 3092 ixgbe->capab->def_rx_grp_num);
3004 3093
3005 3094 ixgbe->mr_enable = ixgbe_get_prop(ixgbe, PROP_MR_ENABLE,
3006 3095 0, 1, DEFAULT_MR_ENABLE);
3007 3096
3008 3097 if (ixgbe->mr_enable == B_FALSE) {
3009 3098 ixgbe->num_tx_rings = 1;
3010 3099 ixgbe->num_rx_rings = 1;
3011 3100 ixgbe->num_rx_groups = 1;
3012 3101 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
3013 3102 } else {
3014 3103 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
3015 3104 max(ixgbe->num_rx_rings / ixgbe->num_rx_groups, 1);
3016 3105 /*
3017 3106 * The combination of num_rx_rings and num_rx_groups
3018 3107 * may be not supported by h/w. We need to adjust
3019 3108 * them to appropriate values.
3020 3109 */
3021 3110 ixgbe_setup_vmdq_rss_conf(ixgbe);
3022 3111 }
3023 3112
3024 3113 /*
3025 3114 * Tunable used to force an interrupt type. The only use is
3026 3115 * for testing of the lesser interrupt types.
3027 3116 * 0 = don't force interrupt type
3028 3117 * 1 = force interrupt type MSI-X
3029 3118 * 2 = force interrupt type MSI
3030 3119 * 3 = force interrupt type Legacy
3031 3120 */
3032 3121 ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE,
3033 3122 IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE);
3034 3123
3035 3124 ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE,
3036 3125 0, 1, DEFAULT_TX_HCKSUM_ENABLE);
3037 3126 ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE,
|
↓ open down ↓ |
142 lines elided |
↑ open up ↑ |
3038 3127 0, 1, DEFAULT_RX_HCKSUM_ENABLE);
3039 3128 ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE,
3040 3129 0, 1, DEFAULT_LSO_ENABLE);
3041 3130 ixgbe->lro_enable = ixgbe_get_prop(ixgbe, PROP_LRO_ENABLE,
3042 3131 0, 1, DEFAULT_LRO_ENABLE);
3043 3132 ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE,
3044 3133 0, 1, DEFAULT_TX_HEAD_WB_ENABLE);
3045 3134 ixgbe->relax_order_enable = ixgbe_get_prop(ixgbe,
3046 3135 PROP_RELAX_ORDER_ENABLE, 0, 1, DEFAULT_RELAX_ORDER_ENABLE);
3047 3136
3048 - /* Head Write Back not recommended for 82599 */
3137 + /* Head Write Back not recommended for 82599 and X540 */
3049 3138 if (hw->mac.type >= ixgbe_mac_82599EB) {
3050 3139 ixgbe->tx_head_wb_enable = B_FALSE;
3051 3140 }
3052 3141
3053 3142 /*
3054 3143 * ixgbe LSO needs the tx h/w checksum support.
3055 3144 * LSO will be disabled if tx h/w checksum is not
3056 3145 * enabled.
3057 3146 */
3058 3147 if (ixgbe->tx_hcksum_enable == B_FALSE) {
3059 3148 ixgbe->lso_enable = B_FALSE;
3060 3149 }
3061 3150
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
3062 3151 /*
3063 3152 * ixgbe LRO needs the rx h/w checksum support.
3064 3153 * LRO will be disabled if rx h/w checksum is not
3065 3154 * enabled.
3066 3155 */
3067 3156 if (ixgbe->rx_hcksum_enable == B_FALSE) {
3068 3157 ixgbe->lro_enable = B_FALSE;
3069 3158 }
3070 3159
3071 3160 /*
3072 - * ixgbe LRO only been supported by 82599 now
3161 + * ixgbe LRO only been supported by 82599 and X540 now
3073 3162 */
3074 - if (hw->mac.type != ixgbe_mac_82599EB) {
3163 + if (hw->mac.type < ixgbe_mac_82599EB) {
3075 3164 ixgbe->lro_enable = B_FALSE;
3076 3165 }
3077 3166 ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD,
3078 3167 MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD,
3079 3168 DEFAULT_TX_COPY_THRESHOLD);
3080 3169 ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe,
3081 3170 PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD,
3082 3171 MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD);
3083 3172 ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe,
3084 3173 PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD,
3085 3174 MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD);
3086 3175 ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe,
3087 3176 PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD,
3088 3177 MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD);
3089 3178
3090 3179 ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD,
3091 3180 MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD,
|
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
3092 3181 DEFAULT_RX_COPY_THRESHOLD);
3093 3182 ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR,
3094 3183 MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR,
3095 3184 DEFAULT_RX_LIMIT_PER_INTR);
3096 3185
3097 3186 ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING,
3098 3187 ixgbe->capab->min_intr_throttle,
3099 3188 ixgbe->capab->max_intr_throttle,
3100 3189 ixgbe->capab->def_intr_throttle);
3101 3190 /*
3102 - * 82599 requires the interupt throttling rate is
3191 + * 82599 and X540 require the interupt throttling rate is
3103 3192 * a multiple of 8. This is enforced by the register
3104 3193 * definiton.
3105 3194 */
3106 - if (hw->mac.type == ixgbe_mac_82599EB)
3195 + if (hw->mac.type >= ixgbe_mac_82599EB)
3107 3196 ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8;
3108 3197 }
3109 3198
3110 3199 static void
3111 3200 ixgbe_init_params(ixgbe_t *ixgbe)
3112 3201 {
3113 3202 ixgbe->param_en_10000fdx_cap = 1;
3114 3203 ixgbe->param_en_1000fdx_cap = 1;
3115 3204 ixgbe->param_en_100fdx_cap = 1;
3116 3205 ixgbe->param_adv_10000fdx_cap = 1;
3117 3206 ixgbe->param_adv_1000fdx_cap = 1;
3118 3207 ixgbe->param_adv_100fdx_cap = 1;
3119 3208
3120 3209 ixgbe->param_pause_cap = 1;
3121 3210 ixgbe->param_asym_pause_cap = 1;
3122 3211 ixgbe->param_rem_fault = 0;
3123 3212
3124 3213 ixgbe->param_adv_autoneg_cap = 1;
3125 3214 ixgbe->param_adv_pause_cap = 1;
3126 3215 ixgbe->param_adv_asym_pause_cap = 1;
3127 3216 ixgbe->param_adv_rem_fault = 0;
3128 3217
3129 3218 ixgbe->param_lp_10000fdx_cap = 0;
3130 3219 ixgbe->param_lp_1000fdx_cap = 0;
3131 3220 ixgbe->param_lp_100fdx_cap = 0;
3132 3221 ixgbe->param_lp_autoneg_cap = 0;
3133 3222 ixgbe->param_lp_pause_cap = 0;
3134 3223 ixgbe->param_lp_asym_pause_cap = 0;
3135 3224 ixgbe->param_lp_rem_fault = 0;
3136 3225 }
3137 3226
3138 3227 /*
3139 3228 * ixgbe_get_prop - Get a property value out of the configuration file
3140 3229 * ixgbe.conf.
3141 3230 *
3142 3231 * Caller provides the name of the property, a default value, a minimum
3143 3232 * value, and a maximum value.
3144 3233 *
3145 3234 * Return configured value of the property, with default, minimum and
3146 3235 * maximum properly applied.
3147 3236 */
3148 3237 static int
3149 3238 ixgbe_get_prop(ixgbe_t *ixgbe,
3150 3239 char *propname, /* name of the property */
3151 3240 int minval, /* minimum acceptable value */
3152 3241 int maxval, /* maximim acceptable value */
3153 3242 int defval) /* default value */
3154 3243 {
3155 3244 int value;
3156 3245
3157 3246 /*
3158 3247 * Call ddi_prop_get_int() to read the conf settings
3159 3248 */
3160 3249 value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip,
3161 3250 DDI_PROP_DONTPASS, propname, defval);
3162 3251 if (value > maxval)
3163 3252 value = maxval;
3164 3253
3165 3254 if (value < minval)
3166 3255 value = minval;
3167 3256
3168 3257 return (value);
3169 3258 }
3170 3259
3171 3260 /*
3172 3261 * ixgbe_driver_setup_link - Using the link properties to setup the link.
3173 3262 */
3174 3263 int
3175 3264 ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw)
3176 3265 {
3177 3266 u32 autoneg_advertised = 0;
3178 3267
3179 3268 /*
3180 3269 * No half duplex support with 10Gb parts
3181 3270 */
3182 3271 if (ixgbe->param_adv_10000fdx_cap == 1)
3183 3272 autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
3184 3273
3185 3274 if (ixgbe->param_adv_1000fdx_cap == 1)
3186 3275 autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
3187 3276
3188 3277 if (ixgbe->param_adv_100fdx_cap == 1)
3189 3278 autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
3190 3279
3191 3280 if (ixgbe->param_adv_autoneg_cap == 1 && autoneg_advertised == 0) {
3192 3281 ixgbe_notice(ixgbe, "Invalid link settings. Setup link "
3193 3282 "to autonegotiation with full link capabilities.");
3194 3283
3195 3284 autoneg_advertised = IXGBE_LINK_SPEED_10GB_FULL |
3196 3285 IXGBE_LINK_SPEED_1GB_FULL |
3197 3286 IXGBE_LINK_SPEED_100_FULL;
3198 3287 }
3199 3288
3200 3289 if (setup_hw) {
3201 3290 if (ixgbe_setup_link(&ixgbe->hw, autoneg_advertised,
3202 3291 ixgbe->param_adv_autoneg_cap, B_TRUE) != IXGBE_SUCCESS) {
3203 3292 ixgbe_notice(ixgbe, "Setup link failed on this "
3204 3293 "device.");
3205 3294 return (IXGBE_FAILURE);
3206 3295 }
3207 3296 }
3208 3297
3209 3298 return (IXGBE_SUCCESS);
3210 3299 }
3211 3300
3212 3301 /*
3213 3302 * ixgbe_driver_link_check - Link status processing.
3214 3303 *
3215 3304 * This function can be called in both kernel context and interrupt context
3216 3305 */
3217 3306 static void
3218 3307 ixgbe_driver_link_check(ixgbe_t *ixgbe)
3219 3308 {
3220 3309 struct ixgbe_hw *hw = &ixgbe->hw;
3221 3310 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
|
↓ open down ↓ |
105 lines elided |
↑ open up ↑ |
3222 3311 boolean_t link_up = B_FALSE;
3223 3312 boolean_t link_changed = B_FALSE;
3224 3313
3225 3314 ASSERT(mutex_owned(&ixgbe->gen_lock));
3226 3315
3227 3316 (void) ixgbe_check_link(hw, &speed, &link_up, false);
3228 3317 if (link_up) {
3229 3318 ixgbe->link_check_complete = B_TRUE;
3230 3319
3231 3320 /* Link is up, enable flow control settings */
3232 - (void) ixgbe_fc_enable(hw, 0);
3321 + (void) ixgbe_fc_enable(hw);
3233 3322
3234 3323 /*
3235 3324 * The Link is up, check whether it was marked as down earlier
3236 3325 */
3237 3326 if (ixgbe->link_state != LINK_STATE_UP) {
3238 3327 switch (speed) {
3239 3328 case IXGBE_LINK_SPEED_10GB_FULL:
3240 3329 ixgbe->link_speed = SPEED_10GB;
3241 3330 break;
3242 3331 case IXGBE_LINK_SPEED_1GB_FULL:
3243 3332 ixgbe->link_speed = SPEED_1GB;
3244 3333 break;
3245 3334 case IXGBE_LINK_SPEED_100_FULL:
3246 3335 ixgbe->link_speed = SPEED_100;
3247 3336 }
3248 3337 ixgbe->link_duplex = LINK_DUPLEX_FULL;
3249 3338 ixgbe->link_state = LINK_STATE_UP;
3250 3339 link_changed = B_TRUE;
3251 3340 }
3252 3341 } else {
3253 3342 if (ixgbe->link_check_complete == B_TRUE ||
3254 3343 (ixgbe->link_check_complete == B_FALSE &&
3255 3344 gethrtime() >= ixgbe->link_check_hrtime)) {
3256 3345 /*
3257 3346 * The link is really down
3258 3347 */
3259 3348 ixgbe->link_check_complete = B_TRUE;
3260 3349
3261 3350 if (ixgbe->link_state != LINK_STATE_DOWN) {
3262 3351 ixgbe->link_speed = 0;
3263 3352 ixgbe->link_duplex = LINK_DUPLEX_UNKNOWN;
3264 3353 ixgbe->link_state = LINK_STATE_DOWN;
3265 3354 link_changed = B_TRUE;
3266 3355 }
3267 3356 }
3268 3357 }
3269 3358
3270 3359 /*
3271 3360 * If we are in an interrupt context, need to re-enable the
3272 3361 * interrupt, which was automasked
3273 3362 */
3274 3363 if (servicing_interrupt() != 0) {
3275 3364 ixgbe->eims |= IXGBE_EICR_LSC;
3276 3365 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3277 3366 }
3278 3367
3279 3368 if (link_changed) {
3280 3369 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
3281 3370 }
3282 3371 }
3283 3372
3284 3373 /*
3285 3374 * ixgbe_sfp_check - sfp module processing done in taskq only for 82599.
3286 3375 */
3287 3376 static void
3288 3377 ixgbe_sfp_check(void *arg)
3289 3378 {
3290 3379 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3291 3380 uint32_t eicr = ixgbe->eicr;
3292 3381 struct ixgbe_hw *hw = &ixgbe->hw;
3293 3382
3294 3383 mutex_enter(&ixgbe->gen_lock);
3295 3384 if (eicr & IXGBE_EICR_GPI_SDP1) {
3296 3385 /* clear the interrupt */
3297 3386 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
3298 3387
3299 3388 /* if link up, do multispeed fiber setup */
3300 3389 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
3301 3390 B_TRUE, B_TRUE);
3302 3391 ixgbe_driver_link_check(ixgbe);
3303 3392 ixgbe_get_hw_state(ixgbe);
3304 3393 } else if (eicr & IXGBE_EICR_GPI_SDP2) {
3305 3394 /* clear the interrupt */
3306 3395 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
3307 3396
3308 3397 /* if link up, do sfp module setup */
3309 3398 (void) hw->mac.ops.setup_sfp(hw);
3310 3399
3311 3400 /* do multispeed fiber setup */
3312 3401 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
3313 3402 B_TRUE, B_TRUE);
3314 3403 ixgbe_driver_link_check(ixgbe);
3315 3404 ixgbe_get_hw_state(ixgbe);
3316 3405 }
3317 3406 mutex_exit(&ixgbe->gen_lock);
3318 3407
3319 3408 /*
3320 3409 * We need to fully re-check the link later.
3321 3410 */
3322 3411 ixgbe->link_check_complete = B_FALSE;
3323 3412 ixgbe->link_check_hrtime = gethrtime() +
3324 3413 (IXGBE_LINK_UP_TIME * 100000000ULL);
3325 3414 }
3326 3415
3327 3416 /*
3328 3417 * ixgbe_overtemp_check - overtemp module processing done in taskq
3329 3418 *
3330 3419 * This routine will only be called on adapters with temperature sensor.
3331 3420 * The indication of over-temperature can be either SDP0 interrupt or the link
3332 3421 * status change interrupt.
3333 3422 */
3334 3423 static void
3335 3424 ixgbe_overtemp_check(void *arg)
3336 3425 {
3337 3426 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3338 3427 struct ixgbe_hw *hw = &ixgbe->hw;
3339 3428 uint32_t eicr = ixgbe->eicr;
3340 3429 ixgbe_link_speed speed;
3341 3430 boolean_t link_up;
3342 3431
3343 3432 mutex_enter(&ixgbe->gen_lock);
3344 3433
3345 3434 /* make sure we know current state of link */
3346 3435 (void) ixgbe_check_link(hw, &speed, &link_up, false);
3347 3436
3348 3437 /* check over-temp condition */
3349 3438 if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) ||
3350 3439 (eicr & IXGBE_EICR_LSC)) {
3351 3440 if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP) {
3352 3441 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP);
3353 3442
3354 3443 /*
3355 3444 * Disable the adapter interrupts
3356 3445 */
3357 3446 ixgbe_disable_adapter_interrupts(ixgbe);
3358 3447
3359 3448 /*
3360 3449 * Disable Rx/Tx units
3361 3450 */
3362 3451 (void) ixgbe_stop_adapter(hw);
3363 3452
3364 3453 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
3365 3454 ixgbe_error(ixgbe,
3366 3455 "Problem: Network adapter has been stopped "
3367 3456 "because it has overheated");
3368 3457 ixgbe_error(ixgbe,
3369 3458 "Action: Restart the computer. "
3370 3459 "If the problem persists, power off the system "
3371 3460 "and replace the adapter");
3372 3461 }
3373 3462 }
3374 3463
3375 3464 /* write to clear the interrupt */
3376 3465 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3377 3466
3378 3467 mutex_exit(&ixgbe->gen_lock);
3379 3468 }
3380 3469
3381 3470 /*
3382 3471 * ixgbe_link_timer - timer for link status detection
3383 3472 */
3384 3473 static void
3385 3474 ixgbe_link_timer(void *arg)
3386 3475 {
3387 3476 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3388 3477
3389 3478 mutex_enter(&ixgbe->gen_lock);
3390 3479 ixgbe_driver_link_check(ixgbe);
3391 3480 mutex_exit(&ixgbe->gen_lock);
3392 3481 }
3393 3482
3394 3483 /*
3395 3484 * ixgbe_local_timer - Driver watchdog function.
3396 3485 *
3397 3486 * This function will handle the transmit stall check and other routines.
3398 3487 */
3399 3488 static void
3400 3489 ixgbe_local_timer(void *arg)
3401 3490 {
3402 3491 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3403 3492
3404 3493 if (ixgbe->ixgbe_state & IXGBE_OVERTEMP)
3405 3494 goto out;
3406 3495
3407 3496 if (ixgbe->ixgbe_state & IXGBE_ERROR) {
3408 3497 ixgbe->reset_count++;
3409 3498 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
3410 3499 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
3411 3500 goto out;
3412 3501 }
3413 3502
3414 3503 if (ixgbe_stall_check(ixgbe)) {
3415 3504 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STALL);
3416 3505 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3417 3506
3418 3507 ixgbe->reset_count++;
3419 3508 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
3420 3509 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
3421 3510 }
3422 3511
3423 3512 out:
3424 3513 ixgbe_restart_watchdog_timer(ixgbe);
3425 3514 }
3426 3515
3427 3516 /*
3428 3517 * ixgbe_stall_check - Check for transmit stall.
3429 3518 *
3430 3519 * This function checks if the adapter is stalled (in transmit).
3431 3520 *
3432 3521 * It is called each time the watchdog timeout is invoked.
3433 3522 * If the transmit descriptor reclaim continuously fails,
3434 3523 * the watchdog value will increment by 1. If the watchdog
3435 3524 * value exceeds the threshold, the ixgbe is assumed to
3436 3525 * have stalled and need to be reset.
3437 3526 */
3438 3527 static boolean_t
3439 3528 ixgbe_stall_check(ixgbe_t *ixgbe)
3440 3529 {
3441 3530 ixgbe_tx_ring_t *tx_ring;
3442 3531 boolean_t result;
3443 3532 int i;
3444 3533
3445 3534 if (ixgbe->link_state != LINK_STATE_UP)
3446 3535 return (B_FALSE);
3447 3536
3448 3537 /*
3449 3538 * If any tx ring is stalled, we'll reset the chipset
3450 3539 */
3451 3540 result = B_FALSE;
3452 3541 for (i = 0; i < ixgbe->num_tx_rings; i++) {
3453 3542 tx_ring = &ixgbe->tx_rings[i];
3454 3543 if (tx_ring->tbd_free <= ixgbe->tx_recycle_thresh) {
3455 3544 tx_ring->tx_recycle(tx_ring);
3456 3545 }
3457 3546
3458 3547 if (tx_ring->recycle_fail > 0)
3459 3548 tx_ring->stall_watchdog++;
3460 3549 else
3461 3550 tx_ring->stall_watchdog = 0;
3462 3551
3463 3552 if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) {
3464 3553 result = B_TRUE;
3465 3554 break;
3466 3555 }
3467 3556 }
3468 3557
3469 3558 if (result) {
3470 3559 tx_ring->stall_watchdog = 0;
3471 3560 tx_ring->recycle_fail = 0;
3472 3561 }
3473 3562
3474 3563 return (result);
3475 3564 }
3476 3565
3477 3566
3478 3567 /*
3479 3568 * is_valid_mac_addr - Check if the mac address is valid.
3480 3569 */
3481 3570 static boolean_t
3482 3571 is_valid_mac_addr(uint8_t *mac_addr)
3483 3572 {
3484 3573 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
3485 3574 const uint8_t addr_test2[6] =
3486 3575 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3487 3576
3488 3577 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
3489 3578 !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
3490 3579 return (B_FALSE);
3491 3580
3492 3581 return (B_TRUE);
3493 3582 }
3494 3583
3495 3584 static boolean_t
3496 3585 ixgbe_find_mac_address(ixgbe_t *ixgbe)
3497 3586 {
3498 3587 #ifdef __sparc
3499 3588 struct ixgbe_hw *hw = &ixgbe->hw;
3500 3589 uchar_t *bytes;
3501 3590 struct ether_addr sysaddr;
3502 3591 uint_t nelts;
3503 3592 int err;
3504 3593 boolean_t found = B_FALSE;
3505 3594
3506 3595 /*
3507 3596 * The "vendor's factory-set address" may already have
3508 3597 * been extracted from the chip, but if the property
3509 3598 * "local-mac-address" is set we use that instead.
3510 3599 *
3511 3600 * We check whether it looks like an array of 6
3512 3601 * bytes (which it should, if OBP set it). If we can't
3513 3602 * make sense of it this way, we'll ignore it.
3514 3603 */
3515 3604 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
3516 3605 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
3517 3606 if (err == DDI_PROP_SUCCESS) {
3518 3607 if (nelts == ETHERADDRL) {
3519 3608 while (nelts--)
3520 3609 hw->mac.addr[nelts] = bytes[nelts];
3521 3610 found = B_TRUE;
3522 3611 }
3523 3612 ddi_prop_free(bytes);
3524 3613 }
3525 3614
3526 3615 /*
3527 3616 * Look up the OBP property "local-mac-address?". If the user has set
3528 3617 * 'local-mac-address? = false', use "the system address" instead.
3529 3618 */
3530 3619 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0,
3531 3620 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
3532 3621 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
3533 3622 if (localetheraddr(NULL, &sysaddr) != 0) {
3534 3623 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
3535 3624 found = B_TRUE;
3536 3625 }
3537 3626 }
3538 3627 ddi_prop_free(bytes);
3539 3628 }
3540 3629
3541 3630 /*
3542 3631 * Finally(!), if there's a valid "mac-address" property (created
3543 3632 * if we netbooted from this interface), we must use this instead
3544 3633 * of any of the above to ensure that the NFS/install server doesn't
3545 3634 * get confused by the address changing as Solaris takes over!
3546 3635 */
3547 3636 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
3548 3637 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
3549 3638 if (err == DDI_PROP_SUCCESS) {
3550 3639 if (nelts == ETHERADDRL) {
3551 3640 while (nelts--)
3552 3641 hw->mac.addr[nelts] = bytes[nelts];
3553 3642 found = B_TRUE;
3554 3643 }
3555 3644 ddi_prop_free(bytes);
3556 3645 }
3557 3646
3558 3647 if (found) {
3559 3648 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL);
3560 3649 return (B_TRUE);
3561 3650 }
3562 3651 #else
3563 3652 _NOTE(ARGUNUSED(ixgbe));
3564 3653 #endif
3565 3654
3566 3655 return (B_TRUE);
3567 3656 }
3568 3657
3569 3658 #pragma inline(ixgbe_arm_watchdog_timer)
3570 3659 static void
3571 3660 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe)
3572 3661 {
3573 3662 /*
3574 3663 * Fire a watchdog timer
3575 3664 */
3576 3665 ixgbe->watchdog_tid =
3577 3666 timeout(ixgbe_local_timer,
3578 3667 (void *)ixgbe, 1 * drv_usectohz(1000000));
3579 3668
3580 3669 }
3581 3670
3582 3671 /*
3583 3672 * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer.
3584 3673 */
3585 3674 void
3586 3675 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe)
3587 3676 {
3588 3677 mutex_enter(&ixgbe->watchdog_lock);
3589 3678
3590 3679 if (!ixgbe->watchdog_enable) {
3591 3680 ixgbe->watchdog_enable = B_TRUE;
3592 3681 ixgbe->watchdog_start = B_TRUE;
3593 3682 ixgbe_arm_watchdog_timer(ixgbe);
3594 3683 }
3595 3684
3596 3685 mutex_exit(&ixgbe->watchdog_lock);
3597 3686 }
3598 3687
3599 3688 /*
3600 3689 * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer.
3601 3690 */
3602 3691 void
3603 3692 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe)
3604 3693 {
3605 3694 timeout_id_t tid;
3606 3695
3607 3696 mutex_enter(&ixgbe->watchdog_lock);
3608 3697
3609 3698 ixgbe->watchdog_enable = B_FALSE;
3610 3699 ixgbe->watchdog_start = B_FALSE;
3611 3700 tid = ixgbe->watchdog_tid;
3612 3701 ixgbe->watchdog_tid = 0;
3613 3702
3614 3703 mutex_exit(&ixgbe->watchdog_lock);
3615 3704
3616 3705 if (tid != 0)
3617 3706 (void) untimeout(tid);
3618 3707 }
3619 3708
3620 3709 /*
3621 3710 * ixgbe_start_watchdog_timer - Start the driver watchdog timer.
3622 3711 */
3623 3712 void
3624 3713 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe)
3625 3714 {
3626 3715 mutex_enter(&ixgbe->watchdog_lock);
3627 3716
3628 3717 if (ixgbe->watchdog_enable) {
3629 3718 if (!ixgbe->watchdog_start) {
3630 3719 ixgbe->watchdog_start = B_TRUE;
3631 3720 ixgbe_arm_watchdog_timer(ixgbe);
3632 3721 }
3633 3722 }
3634 3723
3635 3724 mutex_exit(&ixgbe->watchdog_lock);
3636 3725 }
3637 3726
3638 3727 /*
3639 3728 * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer.
3640 3729 */
3641 3730 static void
3642 3731 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe)
3643 3732 {
3644 3733 mutex_enter(&ixgbe->watchdog_lock);
3645 3734
3646 3735 if (ixgbe->watchdog_start)
3647 3736 ixgbe_arm_watchdog_timer(ixgbe);
3648 3737
3649 3738 mutex_exit(&ixgbe->watchdog_lock);
3650 3739 }
3651 3740
3652 3741 /*
3653 3742 * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer.
3654 3743 */
3655 3744 void
3656 3745 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe)
3657 3746 {
3658 3747 timeout_id_t tid;
3659 3748
3660 3749 mutex_enter(&ixgbe->watchdog_lock);
3661 3750
3662 3751 ixgbe->watchdog_start = B_FALSE;
3663 3752 tid = ixgbe->watchdog_tid;
3664 3753 ixgbe->watchdog_tid = 0;
3665 3754
3666 3755 mutex_exit(&ixgbe->watchdog_lock);
3667 3756
3668 3757 if (tid != 0)
3669 3758 (void) untimeout(tid);
3670 3759 }
3671 3760
3672 3761 /*
3673 3762 * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts.
3674 3763 */
3675 3764 static void
3676 3765 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe)
3677 3766 {
3678 3767 struct ixgbe_hw *hw = &ixgbe->hw;
3679 3768
3680 3769 /*
3681 3770 * mask all interrupts off
3682 3771 */
3683 3772 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff);
3684 3773
3685 3774 /*
3686 3775 * for MSI-X, also disable autoclear
3687 3776 */
3688 3777 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
3689 3778 IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0);
3690 3779 }
3691 3780
3692 3781 IXGBE_WRITE_FLUSH(hw);
3693 3782 }
3694 3783
3695 3784 /*
3696 3785 * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts.
3697 3786 */
3698 3787 static void
3699 3788 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe)
3700 3789 {
3701 3790 struct ixgbe_hw *hw = &ixgbe->hw;
3702 3791 uint32_t eiac, eiam;
3703 3792 uint32_t gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3704 3793
3705 3794 /* interrupt types to enable */
3706 3795 ixgbe->eims = IXGBE_EIMS_ENABLE_MASK; /* shared code default */
3707 3796 ixgbe->eims &= ~IXGBE_EIMS_TCP_TIMER; /* minus tcp timer */
3708 3797 ixgbe->eims |= ixgbe->capab->other_intr; /* "other" interrupt types */
3709 3798
3710 3799 /* enable automask on "other" causes that this adapter can generate */
3711 3800 eiam = ixgbe->capab->other_intr;
3712 3801
3713 3802 /*
3714 3803 * msi-x mode
3715 3804 */
3716 3805 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
3717 3806 /* enable autoclear but not on bits 29:20 */
3718 3807 eiac = (ixgbe->eims & ~IXGBE_OTHER_INTR);
3719 3808
3720 3809 /* general purpose interrupt enable */
3721 3810 gpie |= (IXGBE_GPIE_MSIX_MODE
3722 3811 | IXGBE_GPIE_PBA_SUPPORT
3723 3812 | IXGBE_GPIE_OCD
3724 3813 | IXGBE_GPIE_EIAME);
3725 3814 /*
3726 3815 * non-msi-x mode
3727 3816 */
3728 3817 } else {
3729 3818
3730 3819 /* disable autoclear, leave gpie at default */
3731 3820 eiac = 0;
3732 3821
3733 3822 /*
3734 3823 * General purpose interrupt enable.
3735 3824 * For 82599, extended interrupt automask enable
3736 3825 * only in MSI or MSI-X mode
3737 3826 */
3738 3827 if ((hw->mac.type < ixgbe_mac_82599EB) ||
3739 3828 (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) {
3740 3829 gpie |= IXGBE_GPIE_EIAME;
|
↓ open down ↓ |
498 lines elided |
↑ open up ↑ |
3741 3830 }
3742 3831 }
3743 3832
3744 3833 /* Enable specific "other" interrupt types */
3745 3834 switch (hw->mac.type) {
3746 3835 case ixgbe_mac_82598EB:
3747 3836 gpie |= ixgbe->capab->other_gpie;
3748 3837 break;
3749 3838
3750 3839 case ixgbe_mac_82599EB:
3840 + case ixgbe_mac_X540:
3751 3841 gpie |= ixgbe->capab->other_gpie;
3752 3842
3753 3843 /* Enable RSC Delay 8us when LRO enabled */
3754 3844 if (ixgbe->lro_enable) {
3755 3845 gpie |= (1 << IXGBE_GPIE_RSC_DELAY_SHIFT);
3756 3846 }
3757 3847 break;
3758 3848
3759 3849 default:
3760 3850 break;
3761 3851 }
3762 3852
3763 3853 /* write to interrupt control registers */
3764 3854 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3765 3855 IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac);
3766 3856 IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam);
3767 3857 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3768 3858 IXGBE_WRITE_FLUSH(hw);
3769 3859 }
3770 3860
3771 3861 /*
3772 3862 * ixgbe_loopback_ioctl - Loopback support.
3773 3863 */
3774 3864 enum ioc_reply
3775 3865 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp)
3776 3866 {
3777 3867 lb_info_sz_t *lbsp;
3778 3868 lb_property_t *lbpp;
3779 3869 uint32_t *lbmp;
3780 3870 uint32_t size;
3781 3871 uint32_t value;
3782 3872
3783 3873 if (mp->b_cont == NULL)
3784 3874 return (IOC_INVAL);
3785 3875
3786 3876 switch (iocp->ioc_cmd) {
3787 3877 default:
3788 3878 return (IOC_INVAL);
3789 3879
3790 3880 case LB_GET_INFO_SIZE:
3791 3881 size = sizeof (lb_info_sz_t);
3792 3882 if (iocp->ioc_count != size)
3793 3883 return (IOC_INVAL);
3794 3884
3795 3885 value = sizeof (lb_normal);
3796 3886 value += sizeof (lb_mac);
3797 3887 value += sizeof (lb_external);
3798 3888
3799 3889 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
3800 3890 *lbsp = value;
3801 3891 break;
3802 3892
3803 3893 case LB_GET_INFO:
3804 3894 value = sizeof (lb_normal);
3805 3895 value += sizeof (lb_mac);
3806 3896 value += sizeof (lb_external);
3807 3897
3808 3898 size = value;
3809 3899 if (iocp->ioc_count != size)
3810 3900 return (IOC_INVAL);
3811 3901
3812 3902 value = 0;
3813 3903 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
3814 3904
3815 3905 lbpp[value++] = lb_normal;
3816 3906 lbpp[value++] = lb_mac;
3817 3907 lbpp[value++] = lb_external;
3818 3908 break;
3819 3909
3820 3910 case LB_GET_MODE:
3821 3911 size = sizeof (uint32_t);
3822 3912 if (iocp->ioc_count != size)
3823 3913 return (IOC_INVAL);
3824 3914
3825 3915 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
3826 3916 *lbmp = ixgbe->loopback_mode;
3827 3917 break;
3828 3918
3829 3919 case LB_SET_MODE:
3830 3920 size = 0;
3831 3921 if (iocp->ioc_count != sizeof (uint32_t))
3832 3922 return (IOC_INVAL);
3833 3923
3834 3924 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
3835 3925 if (!ixgbe_set_loopback_mode(ixgbe, *lbmp))
3836 3926 return (IOC_INVAL);
3837 3927 break;
3838 3928 }
3839 3929
3840 3930 iocp->ioc_count = size;
3841 3931 iocp->ioc_error = 0;
3842 3932
3843 3933 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
3844 3934 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3845 3935 return (IOC_INVAL);
3846 3936 }
3847 3937
3848 3938 return (IOC_REPLY);
3849 3939 }
3850 3940
3851 3941 /*
3852 3942 * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode.
3853 3943 */
3854 3944 static boolean_t
3855 3945 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode)
3856 3946 {
3857 3947 if (mode == ixgbe->loopback_mode)
3858 3948 return (B_TRUE);
3859 3949
3860 3950 ixgbe->loopback_mode = mode;
3861 3951
3862 3952 if (mode == IXGBE_LB_NONE) {
3863 3953 /*
3864 3954 * Reset the chip
3865 3955 */
3866 3956 (void) ixgbe_reset(ixgbe);
3867 3957 return (B_TRUE);
3868 3958 }
3869 3959
3870 3960 mutex_enter(&ixgbe->gen_lock);
3871 3961
3872 3962 switch (mode) {
3873 3963 default:
3874 3964 mutex_exit(&ixgbe->gen_lock);
3875 3965 return (B_FALSE);
3876 3966
3877 3967 case IXGBE_LB_EXTERNAL:
3878 3968 break;
3879 3969
3880 3970 case IXGBE_LB_INTERNAL_MAC:
3881 3971 ixgbe_set_internal_mac_loopback(ixgbe);
3882 3972 break;
3883 3973 }
3884 3974
3885 3975 mutex_exit(&ixgbe->gen_lock);
3886 3976
3887 3977 return (B_TRUE);
3888 3978 }
3889 3979
3890 3980 /*
3891 3981 * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode.
3892 3982 */
3893 3983 static void
3894 3984 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe)
3895 3985 {
3896 3986 struct ixgbe_hw *hw;
3897 3987 uint32_t reg;
3898 3988 uint8_t atlas;
3899 3989
3900 3990 hw = &ixgbe->hw;
3901 3991
3902 3992 /*
3903 3993 * Setup MAC loopback
3904 3994 */
3905 3995 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0);
3906 3996 reg |= IXGBE_HLREG0_LPBK;
3907 3997 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg);
3908 3998
3909 3999 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
3910 4000 reg &= ~IXGBE_AUTOC_LMS_MASK;
3911 4001 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
3912 4002
3913 4003 /*
3914 4004 * Disable Atlas Tx lanes to keep packets in loopback and not on wire
3915 4005 */
3916 4006 switch (hw->mac.type) {
3917 4007 case ixgbe_mac_82598EB:
3918 4008 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
3919 4009 &atlas);
3920 4010 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
3921 4011 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
3922 4012 atlas);
3923 4013
3924 4014 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3925 4015 &atlas);
3926 4016 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
3927 4017 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3928 4018 atlas);
3929 4019
3930 4020 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
3931 4021 &atlas);
3932 4022 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
3933 4023 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
|
↓ open down ↓ |
173 lines elided |
↑ open up ↑ |
3934 4024 atlas);
3935 4025
3936 4026 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
3937 4027 &atlas);
3938 4028 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
3939 4029 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
3940 4030 atlas);
3941 4031 break;
3942 4032
3943 4033 case ixgbe_mac_82599EB:
4034 + case ixgbe_mac_X540:
3944 4035 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
3945 4036 reg |= (IXGBE_AUTOC_FLU |
3946 4037 IXGBE_AUTOC_10G_KX4);
3947 4038 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
3948 4039
3949 4040 (void) ixgbe_setup_link(&ixgbe->hw, IXGBE_LINK_SPEED_10GB_FULL,
3950 4041 B_FALSE, B_TRUE);
3951 4042 break;
3952 4043
3953 4044 default:
3954 4045 break;
3955 4046 }
3956 4047 }
3957 4048
3958 4049 #pragma inline(ixgbe_intr_rx_work)
3959 4050 /*
3960 4051 * ixgbe_intr_rx_work - RX processing of ISR.
3961 4052 */
3962 4053 static void
3963 4054 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring)
3964 4055 {
3965 4056 mblk_t *mp;
3966 4057
3967 4058 mutex_enter(&rx_ring->rx_lock);
3968 4059
3969 4060 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
3970 4061 mutex_exit(&rx_ring->rx_lock);
3971 4062
3972 4063 if (mp != NULL)
3973 4064 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
3974 4065 rx_ring->ring_gen_num);
3975 4066 }
3976 4067
3977 4068 #pragma inline(ixgbe_intr_tx_work)
3978 4069 /*
3979 4070 * ixgbe_intr_tx_work - TX processing of ISR.
3980 4071 */
3981 4072 static void
3982 4073 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring)
3983 4074 {
3984 4075 ixgbe_t *ixgbe = tx_ring->ixgbe;
3985 4076
3986 4077 /*
3987 4078 * Recycle the tx descriptors
3988 4079 */
3989 4080 tx_ring->tx_recycle(tx_ring);
3990 4081
3991 4082 /*
3992 4083 * Schedule the re-transmit
3993 4084 */
3994 4085 if (tx_ring->reschedule &&
3995 4086 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)) {
3996 4087 tx_ring->reschedule = B_FALSE;
3997 4088 mac_tx_ring_update(tx_ring->ixgbe->mac_hdl,
3998 4089 tx_ring->ring_handle);
3999 4090 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
4000 4091 }
4001 4092 }
4002 4093
4003 4094 #pragma inline(ixgbe_intr_other_work)
4004 4095 /*
4005 4096 * ixgbe_intr_other_work - Process interrupt types other than tx/rx
4006 4097 */
4007 4098 static void
4008 4099 ixgbe_intr_other_work(ixgbe_t *ixgbe, uint32_t eicr)
4009 4100 {
4010 4101 ASSERT(mutex_owned(&ixgbe->gen_lock));
4011 4102
4012 4103 /*
4013 4104 * handle link status change
4014 4105 */
4015 4106 if (eicr & IXGBE_EICR_LSC) {
4016 4107 ixgbe_driver_link_check(ixgbe);
4017 4108 ixgbe_get_hw_state(ixgbe);
4018 4109 }
4019 4110
4020 4111 /*
4021 4112 * check for fan failure on adapters with fans
4022 4113 */
4023 4114 if ((ixgbe->capab->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
4024 4115 (eicr & IXGBE_EICR_GPI_SDP1)) {
4025 4116 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP);
4026 4117
4027 4118 /*
4028 4119 * Disable the adapter interrupts
4029 4120 */
4030 4121 ixgbe_disable_adapter_interrupts(ixgbe);
4031 4122
4032 4123 /*
4033 4124 * Disable Rx/Tx units
4034 4125 */
4035 4126 (void) ixgbe_stop_adapter(&ixgbe->hw);
4036 4127
4037 4128 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
4038 4129 ixgbe_error(ixgbe,
4039 4130 "Problem: Network adapter has been stopped "
4040 4131 "because the fan has stopped.\n");
4041 4132 ixgbe_error(ixgbe,
4042 4133 "Action: Replace the adapter.\n");
4043 4134
4044 4135 /* re-enable the interrupt, which was automasked */
4045 4136 ixgbe->eims |= IXGBE_EICR_GPI_SDP1;
4046 4137 }
4047 4138
4048 4139 /*
4049 4140 * Do SFP check for adapters with hot-plug capability
4050 4141 */
4051 4142 if ((ixgbe->capab->flags & IXGBE_FLAG_SFP_PLUG_CAPABLE) &&
4052 4143 ((eicr & IXGBE_EICR_GPI_SDP1) || (eicr & IXGBE_EICR_GPI_SDP2))) {
4053 4144 ixgbe->eicr = eicr;
4054 4145 if ((ddi_taskq_dispatch(ixgbe->sfp_taskq,
4055 4146 ixgbe_sfp_check, (void *)ixgbe,
4056 4147 DDI_NOSLEEP)) != DDI_SUCCESS) {
4057 4148 ixgbe_log(ixgbe, "No memory available to dispatch "
4058 4149 "taskq for SFP check");
4059 4150 }
4060 4151 }
4061 4152
4062 4153 /*
4063 4154 * Do over-temperature check for adapters with temp sensor
4064 4155 */
4065 4156 if ((ixgbe->capab->flags & IXGBE_FLAG_TEMP_SENSOR_CAPABLE) &&
4066 4157 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
4067 4158 ixgbe->eicr = eicr;
4068 4159 if ((ddi_taskq_dispatch(ixgbe->overtemp_taskq,
4069 4160 ixgbe_overtemp_check, (void *)ixgbe,
4070 4161 DDI_NOSLEEP)) != DDI_SUCCESS) {
4071 4162 ixgbe_log(ixgbe, "No memory available to dispatch "
4072 4163 "taskq for overtemp check");
4073 4164 }
4074 4165 }
4075 4166 }
4076 4167
4077 4168 /*
4078 4169 * ixgbe_intr_legacy - Interrupt handler for legacy interrupts.
4079 4170 */
4080 4171 static uint_t
4081 4172 ixgbe_intr_legacy(void *arg1, void *arg2)
4082 4173 {
4083 4174 ixgbe_t *ixgbe = (ixgbe_t *)arg1;
4084 4175 struct ixgbe_hw *hw = &ixgbe->hw;
4085 4176 ixgbe_tx_ring_t *tx_ring;
4086 4177 ixgbe_rx_ring_t *rx_ring;
4087 4178 uint32_t eicr;
4088 4179 mblk_t *mp;
4089 4180 boolean_t tx_reschedule;
4090 4181 uint_t result;
4091 4182
4092 4183 _NOTE(ARGUNUSED(arg2));
4093 4184
4094 4185 mutex_enter(&ixgbe->gen_lock);
4095 4186 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
4096 4187 mutex_exit(&ixgbe->gen_lock);
4097 4188 return (DDI_INTR_UNCLAIMED);
4098 4189 }
4099 4190
4100 4191 mp = NULL;
4101 4192 tx_reschedule = B_FALSE;
4102 4193
4103 4194 /*
4104 4195 * Any bit set in eicr: claim this interrupt
4105 4196 */
4106 4197 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4107 4198
4108 4199 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
4109 4200 mutex_exit(&ixgbe->gen_lock);
4110 4201 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
4111 4202 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4112 4203 return (DDI_INTR_CLAIMED);
4113 4204 }
4114 4205
4115 4206 if (eicr) {
4116 4207 /*
4117 4208 * For legacy interrupt, we have only one interrupt,
4118 4209 * so we have only one rx ring and one tx ring enabled.
4119 4210 */
4120 4211 ASSERT(ixgbe->num_rx_rings == 1);
4121 4212 ASSERT(ixgbe->num_tx_rings == 1);
4122 4213
4123 4214 /*
4124 4215 * For legacy interrupt, rx rings[0] will use RTxQ[0].
4125 4216 */
4126 4217 if (eicr & 0x1) {
4127 4218 ixgbe->eimc |= IXGBE_EICR_RTX_QUEUE;
4128 4219 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4129 4220 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
4130 4221 /*
4131 4222 * Clean the rx descriptors
4132 4223 */
4133 4224 rx_ring = &ixgbe->rx_rings[0];
4134 4225 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
4135 4226 }
4136 4227
4137 4228 /*
4138 4229 * For legacy interrupt, tx rings[0] will use RTxQ[1].
4139 4230 */
4140 4231 if (eicr & 0x2) {
4141 4232 /*
4142 4233 * Recycle the tx descriptors
4143 4234 */
4144 4235 tx_ring = &ixgbe->tx_rings[0];
4145 4236 tx_ring->tx_recycle(tx_ring);
4146 4237
4147 4238 /*
4148 4239 * Schedule the re-transmit
4149 4240 */
4150 4241 tx_reschedule = (tx_ring->reschedule &&
4151 4242 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh));
|
↓ open down ↓ |
198 lines elided |
↑ open up ↑ |
4152 4243 }
4153 4244
4154 4245 /* any interrupt type other than tx/rx */
4155 4246 if (eicr & ixgbe->capab->other_intr) {
4156 4247 switch (hw->mac.type) {
4157 4248 case ixgbe_mac_82598EB:
4158 4249 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4159 4250 break;
4160 4251
4161 4252 case ixgbe_mac_82599EB:
4253 + case ixgbe_mac_X540:
4162 4254 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4163 4255 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4164 4256 break;
4165 4257
4166 4258 default:
4167 4259 break;
4168 4260 }
4169 4261 ixgbe_intr_other_work(ixgbe, eicr);
4170 4262 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4171 4263 }
4172 4264
4173 4265 mutex_exit(&ixgbe->gen_lock);
4174 4266
4175 4267 result = DDI_INTR_CLAIMED;
4176 4268 } else {
4177 4269 mutex_exit(&ixgbe->gen_lock);
4178 4270
4179 4271 /*
4180 4272 * No interrupt cause bits set: don't claim this interrupt.
4181 4273 */
4182 4274 result = DDI_INTR_UNCLAIMED;
4183 4275 }
4184 4276
4185 4277 /* re-enable the interrupts which were automasked */
4186 4278 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4187 4279
4188 4280 /*
4189 4281 * Do the following work outside of the gen_lock
4190 4282 */
4191 4283 if (mp != NULL) {
4192 4284 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
4193 4285 rx_ring->ring_gen_num);
4194 4286 }
4195 4287
4196 4288 if (tx_reschedule) {
4197 4289 tx_ring->reschedule = B_FALSE;
4198 4290 mac_tx_ring_update(ixgbe->mac_hdl, tx_ring->ring_handle);
4199 4291 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
4200 4292 }
4201 4293
4202 4294 return (result);
4203 4295 }
4204 4296
4205 4297 /*
4206 4298 * ixgbe_intr_msi - Interrupt handler for MSI.
4207 4299 */
4208 4300 static uint_t
4209 4301 ixgbe_intr_msi(void *arg1, void *arg2)
4210 4302 {
4211 4303 ixgbe_t *ixgbe = (ixgbe_t *)arg1;
4212 4304 struct ixgbe_hw *hw = &ixgbe->hw;
4213 4305 uint32_t eicr;
4214 4306
4215 4307 _NOTE(ARGUNUSED(arg2));
4216 4308
4217 4309 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4218 4310
4219 4311 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
4220 4312 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
4221 4313 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4222 4314 return (DDI_INTR_CLAIMED);
4223 4315 }
4224 4316
4225 4317 /*
4226 4318 * For MSI interrupt, we have only one vector,
4227 4319 * so we have only one rx ring and one tx ring enabled.
4228 4320 */
4229 4321 ASSERT(ixgbe->num_rx_rings == 1);
4230 4322 ASSERT(ixgbe->num_tx_rings == 1);
4231 4323
4232 4324 /*
4233 4325 * For MSI interrupt, rx rings[0] will use RTxQ[0].
4234 4326 */
4235 4327 if (eicr & 0x1) {
4236 4328 ixgbe_intr_rx_work(&ixgbe->rx_rings[0]);
4237 4329 }
4238 4330
4239 4331 /*
4240 4332 * For MSI interrupt, tx rings[0] will use RTxQ[1].
4241 4333 */
4242 4334 if (eicr & 0x2) {
4243 4335 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
4244 4336 }
|
↓ open down ↓ |
73 lines elided |
↑ open up ↑ |
4245 4337
4246 4338 /* any interrupt type other than tx/rx */
4247 4339 if (eicr & ixgbe->capab->other_intr) {
4248 4340 mutex_enter(&ixgbe->gen_lock);
4249 4341 switch (hw->mac.type) {
4250 4342 case ixgbe_mac_82598EB:
4251 4343 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4252 4344 break;
4253 4345
4254 4346 case ixgbe_mac_82599EB:
4347 + case ixgbe_mac_X540:
4255 4348 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4256 4349 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4257 4350 break;
4258 4351
4259 4352 default:
4260 4353 break;
4261 4354 }
4262 4355 ixgbe_intr_other_work(ixgbe, eicr);
4263 4356 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4264 4357 mutex_exit(&ixgbe->gen_lock);
4265 4358 }
4266 4359
4267 4360 /* re-enable the interrupts which were automasked */
4268 4361 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4269 4362
4270 4363 return (DDI_INTR_CLAIMED);
4271 4364 }
4272 4365
4273 4366 /*
4274 4367 * ixgbe_intr_msix - Interrupt handler for MSI-X.
4275 4368 */
4276 4369 static uint_t
4277 4370 ixgbe_intr_msix(void *arg1, void *arg2)
4278 4371 {
4279 4372 ixgbe_intr_vector_t *vect = (ixgbe_intr_vector_t *)arg1;
4280 4373 ixgbe_t *ixgbe = vect->ixgbe;
4281 4374 struct ixgbe_hw *hw = &ixgbe->hw;
4282 4375 uint32_t eicr;
4283 4376 int r_idx = 0;
4284 4377
4285 4378 _NOTE(ARGUNUSED(arg2));
4286 4379
4287 4380 /*
4288 4381 * Clean each rx ring that has its bit set in the map
4289 4382 */
4290 4383 r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1));
4291 4384 while (r_idx >= 0) {
4292 4385 ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]);
4293 4386 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
4294 4387 (ixgbe->num_rx_rings - 1));
4295 4388 }
4296 4389
4297 4390 /*
4298 4391 * Clean each tx ring that has its bit set in the map
4299 4392 */
4300 4393 r_idx = bt_getlowbit(vect->tx_map, 0, (ixgbe->num_tx_rings - 1));
4301 4394 while (r_idx >= 0) {
4302 4395 ixgbe_intr_tx_work(&ixgbe->tx_rings[r_idx]);
4303 4396 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
4304 4397 (ixgbe->num_tx_rings - 1));
4305 4398 }
4306 4399
4307 4400
4308 4401 /*
4309 4402 * Clean other interrupt (link change) that has its bit set in the map
4310 4403 */
4311 4404 if (BT_TEST(vect->other_map, 0) == 1) {
4312 4405 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4313 4406
4314 4407 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) !=
4315 4408 DDI_FM_OK) {
4316 4409 ddi_fm_service_impact(ixgbe->dip,
4317 4410 DDI_SERVICE_DEGRADED);
4318 4411 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4319 4412 return (DDI_INTR_CLAIMED);
4320 4413 }
4321 4414
4322 4415 /*
4323 4416 * Check "other" cause bits: any interrupt type other than tx/rx
|
↓ open down ↓ |
59 lines elided |
↑ open up ↑ |
4324 4417 */
4325 4418 if (eicr & ixgbe->capab->other_intr) {
4326 4419 mutex_enter(&ixgbe->gen_lock);
4327 4420 switch (hw->mac.type) {
4328 4421 case ixgbe_mac_82598EB:
4329 4422 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4330 4423 ixgbe_intr_other_work(ixgbe, eicr);
4331 4424 break;
4332 4425
4333 4426 case ixgbe_mac_82599EB:
4427 + case ixgbe_mac_X540:
4334 4428 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
4335 4429 ixgbe_intr_other_work(ixgbe, eicr);
4336 4430 break;
4337 4431
4338 4432 default:
4339 4433 break;
4340 4434 }
4341 4435 mutex_exit(&ixgbe->gen_lock);
4342 4436 }
4343 4437
4344 4438 /* re-enable the interrupts which were automasked */
4345 4439 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4346 4440 }
4347 4441
4348 4442 return (DDI_INTR_CLAIMED);
4349 4443 }
4350 4444
4351 4445 /*
4352 4446 * ixgbe_alloc_intrs - Allocate interrupts for the driver.
4353 4447 *
4354 4448 * Normal sequence is to try MSI-X; if not sucessful, try MSI;
4355 4449 * if not successful, try Legacy.
4356 4450 * ixgbe->intr_force can be used to force sequence to start with
4357 4451 * any of the 3 types.
4358 4452 * If MSI-X is not used, number of tx/rx rings is forced to 1.
4359 4453 */
4360 4454 static int
4361 4455 ixgbe_alloc_intrs(ixgbe_t *ixgbe)
4362 4456 {
4363 4457 dev_info_t *devinfo;
4364 4458 int intr_types;
4365 4459 int rc;
4366 4460
4367 4461 devinfo = ixgbe->dip;
4368 4462
4369 4463 /*
4370 4464 * Get supported interrupt types
4371 4465 */
4372 4466 rc = ddi_intr_get_supported_types(devinfo, &intr_types);
4373 4467
4374 4468 if (rc != DDI_SUCCESS) {
4375 4469 ixgbe_log(ixgbe,
4376 4470 "Get supported interrupt types failed: %d", rc);
4377 4471 return (IXGBE_FAILURE);
4378 4472 }
4379 4473 IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types);
4380 4474
4381 4475 ixgbe->intr_type = 0;
4382 4476
4383 4477 /*
4384 4478 * Install MSI-X interrupts
4385 4479 */
4386 4480 if ((intr_types & DDI_INTR_TYPE_MSIX) &&
4387 4481 (ixgbe->intr_force <= IXGBE_INTR_MSIX)) {
4388 4482 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX);
4389 4483 if (rc == IXGBE_SUCCESS)
4390 4484 return (IXGBE_SUCCESS);
4391 4485
4392 4486 ixgbe_log(ixgbe,
4393 4487 "Allocate MSI-X failed, trying MSI interrupts...");
4394 4488 }
4395 4489
4396 4490 /*
4397 4491 * MSI-X not used, force rings and groups to 1
4398 4492 */
4399 4493 ixgbe->num_rx_rings = 1;
4400 4494 ixgbe->num_rx_groups = 1;
4401 4495 ixgbe->num_tx_rings = 1;
4402 4496 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
4403 4497 ixgbe_log(ixgbe,
4404 4498 "MSI-X not used, force rings and groups number to 1");
4405 4499
4406 4500 /*
4407 4501 * Install MSI interrupts
4408 4502 */
4409 4503 if ((intr_types & DDI_INTR_TYPE_MSI) &&
4410 4504 (ixgbe->intr_force <= IXGBE_INTR_MSI)) {
4411 4505 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI);
4412 4506 if (rc == IXGBE_SUCCESS)
4413 4507 return (IXGBE_SUCCESS);
4414 4508
4415 4509 ixgbe_log(ixgbe,
4416 4510 "Allocate MSI failed, trying Legacy interrupts...");
4417 4511 }
4418 4512
4419 4513 /*
4420 4514 * Install legacy interrupts
4421 4515 */
4422 4516 if (intr_types & DDI_INTR_TYPE_FIXED) {
4423 4517 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED);
4424 4518 if (rc == IXGBE_SUCCESS)
4425 4519 return (IXGBE_SUCCESS);
4426 4520
4427 4521 ixgbe_log(ixgbe,
4428 4522 "Allocate Legacy interrupts failed");
4429 4523 }
4430 4524
4431 4525 /*
4432 4526 * If none of the 3 types succeeded, return failure
4433 4527 */
4434 4528 return (IXGBE_FAILURE);
4435 4529 }
4436 4530
4437 4531 /*
4438 4532 * ixgbe_alloc_intr_handles - Allocate interrupt handles.
4439 4533 *
4440 4534 * For legacy and MSI, only 1 handle is needed. For MSI-X,
4441 4535 * if fewer than 2 handles are available, return failure.
4442 4536 * Upon success, this maps the vectors to rx and tx rings for
4443 4537 * interrupts.
4444 4538 */
4445 4539 static int
4446 4540 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type)
4447 4541 {
4448 4542 dev_info_t *devinfo;
4449 4543 int request, count, actual;
4450 4544 int minimum;
4451 4545 int rc;
4452 4546 uint32_t ring_per_group;
4453 4547
4454 4548 devinfo = ixgbe->dip;
4455 4549
4456 4550 switch (intr_type) {
4457 4551 case DDI_INTR_TYPE_FIXED:
4458 4552 request = 1; /* Request 1 legacy interrupt handle */
4459 4553 minimum = 1;
4460 4554 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy");
4461 4555 break;
4462 4556
4463 4557 case DDI_INTR_TYPE_MSI:
4464 4558 request = 1; /* Request 1 MSI interrupt handle */
4465 4559 minimum = 1;
4466 4560 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI");
4467 4561 break;
4468 4562
4469 4563 case DDI_INTR_TYPE_MSIX:
4470 4564 /*
4471 4565 * Best number of vectors for the adapter is
4472 4566 * (# rx rings + # tx rings), however we will
4473 4567 * limit the request number.
4474 4568 */
4475 4569 request = min(16, ixgbe->num_rx_rings + ixgbe->num_tx_rings);
4476 4570 if (request > ixgbe->capab->max_ring_vect)
4477 4571 request = ixgbe->capab->max_ring_vect;
4478 4572 minimum = 1;
4479 4573 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X");
4480 4574 break;
4481 4575
4482 4576 default:
4483 4577 ixgbe_log(ixgbe,
4484 4578 "invalid call to ixgbe_alloc_intr_handles(): %d\n",
4485 4579 intr_type);
4486 4580 return (IXGBE_FAILURE);
4487 4581 }
4488 4582 IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d minimum: %d",
4489 4583 request, minimum);
4490 4584
4491 4585 /*
4492 4586 * Get number of supported interrupts
4493 4587 */
4494 4588 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
4495 4589 if ((rc != DDI_SUCCESS) || (count < minimum)) {
4496 4590 ixgbe_log(ixgbe,
4497 4591 "Get interrupt number failed. Return: %d, count: %d",
4498 4592 rc, count);
4499 4593 return (IXGBE_FAILURE);
4500 4594 }
4501 4595 IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count);
4502 4596
4503 4597 actual = 0;
4504 4598 ixgbe->intr_cnt = 0;
4505 4599 ixgbe->intr_cnt_max = 0;
4506 4600 ixgbe->intr_cnt_min = 0;
4507 4601
4508 4602 /*
4509 4603 * Allocate an array of interrupt handles
4510 4604 */
4511 4605 ixgbe->intr_size = request * sizeof (ddi_intr_handle_t);
4512 4606 ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP);
4513 4607
4514 4608 rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0,
4515 4609 request, &actual, DDI_INTR_ALLOC_NORMAL);
4516 4610 if (rc != DDI_SUCCESS) {
4517 4611 ixgbe_log(ixgbe, "Allocate interrupts failed. "
4518 4612 "return: %d, request: %d, actual: %d",
4519 4613 rc, request, actual);
4520 4614 goto alloc_handle_fail;
4521 4615 }
4522 4616 IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual);
4523 4617
4524 4618 /*
4525 4619 * upper/lower limit of interrupts
4526 4620 */
4527 4621 ixgbe->intr_cnt = actual;
4528 4622 ixgbe->intr_cnt_max = request;
4529 4623 ixgbe->intr_cnt_min = minimum;
4530 4624
4531 4625 /*
4532 4626 * rss number per group should not exceed the rx interrupt number,
4533 4627 * else need to adjust rx ring number.
4534 4628 */
4535 4629 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
4536 4630 ASSERT((ixgbe->num_rx_rings % ixgbe->num_rx_groups) == 0);
4537 4631 if (actual < ring_per_group) {
4538 4632 ixgbe->num_rx_rings = ixgbe->num_rx_groups * actual;
4539 4633 ixgbe_setup_vmdq_rss_conf(ixgbe);
4540 4634 }
4541 4635
4542 4636 /*
4543 4637 * Now we know the actual number of vectors. Here we map the vector
4544 4638 * to other, rx rings and tx ring.
4545 4639 */
4546 4640 if (actual < minimum) {
4547 4641 ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d",
4548 4642 actual);
4549 4643 goto alloc_handle_fail;
4550 4644 }
4551 4645
4552 4646 /*
4553 4647 * Get priority for first vector, assume remaining are all the same
4554 4648 */
4555 4649 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
4556 4650 if (rc != DDI_SUCCESS) {
4557 4651 ixgbe_log(ixgbe,
4558 4652 "Get interrupt priority failed: %d", rc);
4559 4653 goto alloc_handle_fail;
4560 4654 }
4561 4655
4562 4656 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
4563 4657 if (rc != DDI_SUCCESS) {
4564 4658 ixgbe_log(ixgbe,
4565 4659 "Get interrupt cap failed: %d", rc);
4566 4660 goto alloc_handle_fail;
4567 4661 }
4568 4662
4569 4663 ixgbe->intr_type = intr_type;
4570 4664
4571 4665 return (IXGBE_SUCCESS);
4572 4666
4573 4667 alloc_handle_fail:
4574 4668 ixgbe_rem_intrs(ixgbe);
4575 4669
4576 4670 return (IXGBE_FAILURE);
4577 4671 }
4578 4672
4579 4673 /*
4580 4674 * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type.
4581 4675 *
4582 4676 * Before adding the interrupt handlers, the interrupt vectors have
4583 4677 * been allocated, and the rx/tx rings have also been allocated.
4584 4678 */
4585 4679 static int
4586 4680 ixgbe_add_intr_handlers(ixgbe_t *ixgbe)
4587 4681 {
4588 4682 int vector = 0;
4589 4683 int rc;
4590 4684
4591 4685 switch (ixgbe->intr_type) {
4592 4686 case DDI_INTR_TYPE_MSIX:
4593 4687 /*
4594 4688 * Add interrupt handler for all vectors
4595 4689 */
4596 4690 for (vector = 0; vector < ixgbe->intr_cnt; vector++) {
4597 4691 /*
4598 4692 * install pointer to vect_map[vector]
4599 4693 */
4600 4694 rc = ddi_intr_add_handler(ixgbe->htable[vector],
4601 4695 (ddi_intr_handler_t *)ixgbe_intr_msix,
4602 4696 (void *)&ixgbe->vect_map[vector], NULL);
4603 4697
4604 4698 if (rc != DDI_SUCCESS) {
4605 4699 ixgbe_log(ixgbe,
4606 4700 "Add interrupt handler failed. "
4607 4701 "return: %d, vector: %d", rc, vector);
4608 4702 for (vector--; vector >= 0; vector--) {
4609 4703 (void) ddi_intr_remove_handler(
4610 4704 ixgbe->htable[vector]);
4611 4705 }
4612 4706 return (IXGBE_FAILURE);
4613 4707 }
4614 4708 }
4615 4709
4616 4710 break;
4617 4711
4618 4712 case DDI_INTR_TYPE_MSI:
4619 4713 /*
4620 4714 * Add interrupt handlers for the only vector
4621 4715 */
4622 4716 rc = ddi_intr_add_handler(ixgbe->htable[vector],
4623 4717 (ddi_intr_handler_t *)ixgbe_intr_msi,
4624 4718 (void *)ixgbe, NULL);
4625 4719
4626 4720 if (rc != DDI_SUCCESS) {
4627 4721 ixgbe_log(ixgbe,
4628 4722 "Add MSI interrupt handler failed: %d", rc);
4629 4723 return (IXGBE_FAILURE);
4630 4724 }
4631 4725
4632 4726 break;
4633 4727
4634 4728 case DDI_INTR_TYPE_FIXED:
4635 4729 /*
4636 4730 * Add interrupt handlers for the only vector
4637 4731 */
4638 4732 rc = ddi_intr_add_handler(ixgbe->htable[vector],
4639 4733 (ddi_intr_handler_t *)ixgbe_intr_legacy,
4640 4734 (void *)ixgbe, NULL);
4641 4735
4642 4736 if (rc != DDI_SUCCESS) {
4643 4737 ixgbe_log(ixgbe,
4644 4738 "Add legacy interrupt handler failed: %d", rc);
4645 4739 return (IXGBE_FAILURE);
4646 4740 }
4647 4741
4648 4742 break;
4649 4743
4650 4744 default:
4651 4745 return (IXGBE_FAILURE);
4652 4746 }
4653 4747
4654 4748 return (IXGBE_SUCCESS);
4655 4749 }
4656 4750
4657 4751 #pragma inline(ixgbe_map_rxring_to_vector)
4658 4752 /*
4659 4753 * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector.
4660 4754 */
4661 4755 static void
4662 4756 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx)
4663 4757 {
4664 4758 /*
4665 4759 * Set bit in map
4666 4760 */
4667 4761 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
4668 4762
4669 4763 /*
4670 4764 * Count bits set
4671 4765 */
4672 4766 ixgbe->vect_map[v_idx].rxr_cnt++;
4673 4767
4674 4768 /*
4675 4769 * Remember bit position
4676 4770 */
4677 4771 ixgbe->rx_rings[r_idx].intr_vector = v_idx;
4678 4772 ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx;
4679 4773 }
4680 4774
4681 4775 #pragma inline(ixgbe_map_txring_to_vector)
4682 4776 /*
4683 4777 * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector.
4684 4778 */
4685 4779 static void
4686 4780 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx)
4687 4781 {
4688 4782 /*
4689 4783 * Set bit in map
4690 4784 */
4691 4785 BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx);
4692 4786
4693 4787 /*
4694 4788 * Count bits set
4695 4789 */
4696 4790 ixgbe->vect_map[v_idx].txr_cnt++;
4697 4791
4698 4792 /*
4699 4793 * Remember bit position
4700 4794 */
4701 4795 ixgbe->tx_rings[t_idx].intr_vector = v_idx;
4702 4796 ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx;
4703 4797 }
4704 4798
4705 4799 /*
4706 4800 * ixgbe_setup_ivar - Set the given entry in the given interrupt vector
4707 4801 * allocation register (IVAR).
4708 4802 * cause:
4709 4803 * -1 : other cause
4710 4804 * 0 : rx
4711 4805 * 1 : tx
4712 4806 */
4713 4807 static void
4714 4808 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector,
4715 4809 int8_t cause)
4716 4810 {
4717 4811 struct ixgbe_hw *hw = &ixgbe->hw;
4718 4812 u32 ivar, index;
4719 4813
4720 4814 switch (hw->mac.type) {
4721 4815 case ixgbe_mac_82598EB:
4722 4816 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4723 4817 if (cause == -1) {
|
↓ open down ↓ |
380 lines elided |
↑ open up ↑ |
4724 4818 cause = 0;
4725 4819 }
4726 4820 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4727 4821 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4728 4822 ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3)));
4729 4823 ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3)));
4730 4824 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4731 4825 break;
4732 4826
4733 4827 case ixgbe_mac_82599EB:
4828 + case ixgbe_mac_X540:
4734 4829 if (cause == -1) {
4735 4830 /* other causes */
4736 4831 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4737 4832 index = (intr_alloc_entry & 1) * 8;
4738 4833 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4739 4834 ivar &= ~(0xFF << index);
4740 4835 ivar |= (msix_vector << index);
4741 4836 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4742 4837 } else {
4743 4838 /* tx or rx causes */
4744 4839 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4745 4840 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4746 4841 ivar = IXGBE_READ_REG(hw,
4747 4842 IXGBE_IVAR(intr_alloc_entry >> 1));
4748 4843 ivar &= ~(0xFF << index);
4749 4844 ivar |= (msix_vector << index);
4750 4845 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4751 4846 ivar);
4752 4847 }
4753 4848 break;
4754 4849
4755 4850 default:
4756 4851 break;
4757 4852 }
4758 4853 }
4759 4854
4760 4855 /*
4761 4856 * ixgbe_enable_ivar - Enable the given entry by setting the VAL bit of
4762 4857 * given interrupt vector allocation register (IVAR).
4763 4858 * cause:
4764 4859 * -1 : other cause
4765 4860 * 0 : rx
4766 4861 * 1 : tx
4767 4862 */
4768 4863 static void
4769 4864 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4770 4865 {
4771 4866 struct ixgbe_hw *hw = &ixgbe->hw;
4772 4867 u32 ivar, index;
4773 4868
4774 4869 switch (hw->mac.type) {
4775 4870 case ixgbe_mac_82598EB:
4776 4871 if (cause == -1) {
|
↓ open down ↓ |
33 lines elided |
↑ open up ↑ |
4777 4872 cause = 0;
4778 4873 }
4779 4874 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4780 4875 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4781 4876 ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 *
4782 4877 (intr_alloc_entry & 0x3)));
4783 4878 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4784 4879 break;
4785 4880
4786 4881 case ixgbe_mac_82599EB:
4882 + case ixgbe_mac_X540:
4787 4883 if (cause == -1) {
4788 4884 /* other causes */
4789 4885 index = (intr_alloc_entry & 1) * 8;
4790 4886 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4791 4887 ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4792 4888 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4793 4889 } else {
4794 4890 /* tx or rx causes */
4795 4891 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4796 4892 ivar = IXGBE_READ_REG(hw,
4797 4893 IXGBE_IVAR(intr_alloc_entry >> 1));
4798 4894 ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4799 4895 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4800 4896 ivar);
4801 4897 }
4802 4898 break;
4803 4899
4804 4900 default:
4805 4901 break;
4806 4902 }
4807 4903 }
4808 4904
4809 4905 /*
4810 4906 * ixgbe_disable_ivar - Disble the given entry by clearing the VAL bit of
4811 4907 * given interrupt vector allocation register (IVAR).
4812 4908 * cause:
4813 4909 * -1 : other cause
4814 4910 * 0 : rx
4815 4911 * 1 : tx
4816 4912 */
4817 4913 static void
4818 4914 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4819 4915 {
4820 4916 struct ixgbe_hw *hw = &ixgbe->hw;
4821 4917 u32 ivar, index;
4822 4918
4823 4919 switch (hw->mac.type) {
4824 4920 case ixgbe_mac_82598EB:
4825 4921 if (cause == -1) {
|
↓ open down ↓ |
29 lines elided |
↑ open up ↑ |
4826 4922 cause = 0;
4827 4923 }
4828 4924 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4829 4925 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4830 4926 ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 *
4831 4927 (intr_alloc_entry & 0x3)));
4832 4928 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4833 4929 break;
4834 4930
4835 4931 case ixgbe_mac_82599EB:
4932 + case ixgbe_mac_X540:
4836 4933 if (cause == -1) {
4837 4934 /* other causes */
4838 4935 index = (intr_alloc_entry & 1) * 8;
4839 4936 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4840 4937 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4841 4938 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4842 4939 } else {
4843 4940 /* tx or rx causes */
4844 4941 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4845 4942 ivar = IXGBE_READ_REG(hw,
4846 4943 IXGBE_IVAR(intr_alloc_entry >> 1));
4847 4944 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4848 4945 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4849 4946 ivar);
4850 4947 }
4851 4948 break;
4852 4949
4853 4950 default:
4854 4951 break;
4855 4952 }
4856 4953 }
4857 4954
4858 4955 /*
4859 4956 * Convert the rx ring index driver maintained to the rx ring index
4860 4957 * in h/w.
4861 4958 */
4862 4959 static uint32_t
4863 4960 ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index)
4864 4961 {
4865 4962
4866 4963 struct ixgbe_hw *hw = &ixgbe->hw;
4867 4964 uint32_t rx_ring_per_group, hw_rx_index;
|
↓ open down ↓ |
22 lines elided |
↑ open up ↑ |
4868 4965
4869 4966 if (ixgbe->classify_mode == IXGBE_CLASSIFY_RSS ||
4870 4967 ixgbe->classify_mode == IXGBE_CLASSIFY_NONE) {
4871 4968 return (sw_rx_index);
4872 4969 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ) {
4873 4970 switch (hw->mac.type) {
4874 4971 case ixgbe_mac_82598EB:
4875 4972 return (sw_rx_index);
4876 4973
4877 4974 case ixgbe_mac_82599EB:
4975 + case ixgbe_mac_X540:
4878 4976 return (sw_rx_index * 2);
4879 4977
4880 4978 default:
4881 4979 break;
4882 4980 }
4883 4981 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) {
4884 4982 rx_ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
4885 4983
4886 4984 switch (hw->mac.type) {
4887 4985 case ixgbe_mac_82598EB:
4888 4986 hw_rx_index = (sw_rx_index / rx_ring_per_group) *
4889 4987 16 + (sw_rx_index % rx_ring_per_group);
4890 4988 return (hw_rx_index);
4891 4989
4892 4990 case ixgbe_mac_82599EB:
4991 + case ixgbe_mac_X540:
4893 4992 if (ixgbe->num_rx_groups > 32) {
4894 4993 hw_rx_index = (sw_rx_index /
4895 4994 rx_ring_per_group) * 2 +
4896 4995 (sw_rx_index % rx_ring_per_group);
4897 4996 } else {
4898 4997 hw_rx_index = (sw_rx_index /
4899 4998 rx_ring_per_group) * 4 +
4900 4999 (sw_rx_index % rx_ring_per_group);
4901 5000 }
4902 5001 return (hw_rx_index);
4903 5002
4904 5003 default:
4905 5004 break;
4906 5005 }
4907 5006 }
4908 5007
4909 5008 /*
4910 5009 * Should never reach. Just to make compiler happy.
4911 5010 */
4912 5011 return (sw_rx_index);
4913 5012 }
4914 5013
4915 5014 /*
4916 5015 * ixgbe_map_intrs_to_vectors - Map different interrupts to MSI-X vectors.
4917 5016 *
4918 5017 * For MSI-X, here will map rx interrupt, tx interrupt and other interrupt
4919 5018 * to vector[0 - (intr_cnt -1)].
4920 5019 */
4921 5020 static int
4922 5021 ixgbe_map_intrs_to_vectors(ixgbe_t *ixgbe)
4923 5022 {
4924 5023 int i, vector = 0;
4925 5024
4926 5025 /* initialize vector map */
4927 5026 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
4928 5027 for (i = 0; i < ixgbe->intr_cnt; i++) {
4929 5028 ixgbe->vect_map[i].ixgbe = ixgbe;
4930 5029 }
4931 5030
4932 5031 /*
4933 5032 * non-MSI-X case is very simple: rx rings[0] on RTxQ[0],
4934 5033 * tx rings[0] on RTxQ[1].
4935 5034 */
4936 5035 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
4937 5036 ixgbe_map_rxring_to_vector(ixgbe, 0, 0);
4938 5037 ixgbe_map_txring_to_vector(ixgbe, 0, 1);
4939 5038 return (IXGBE_SUCCESS);
4940 5039 }
4941 5040
4942 5041 /*
4943 5042 * Interrupts/vectors mapping for MSI-X
4944 5043 */
4945 5044
4946 5045 /*
4947 5046 * Map other interrupt to vector 0,
4948 5047 * Set bit in map and count the bits set.
4949 5048 */
4950 5049 BT_SET(ixgbe->vect_map[vector].other_map, 0);
4951 5050 ixgbe->vect_map[vector].other_cnt++;
4952 5051
4953 5052 /*
4954 5053 * Map rx ring interrupts to vectors
4955 5054 */
4956 5055 for (i = 0; i < ixgbe->num_rx_rings; i++) {
4957 5056 ixgbe_map_rxring_to_vector(ixgbe, i, vector);
4958 5057 vector = (vector +1) % ixgbe->intr_cnt;
4959 5058 }
4960 5059
4961 5060 /*
4962 5061 * Map tx ring interrupts to vectors
4963 5062 */
4964 5063 for (i = 0; i < ixgbe->num_tx_rings; i++) {
4965 5064 ixgbe_map_txring_to_vector(ixgbe, i, vector);
4966 5065 vector = (vector +1) % ixgbe->intr_cnt;
4967 5066 }
4968 5067
4969 5068 return (IXGBE_SUCCESS);
4970 5069 }
4971 5070
4972 5071 /*
4973 5072 * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s).
4974 5073 *
4975 5074 * This relies on ring/vector mapping already set up in the
4976 5075 * vect_map[] structures
4977 5076 */
4978 5077 static void
4979 5078 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe)
4980 5079 {
4981 5080 struct ixgbe_hw *hw = &ixgbe->hw;
4982 5081 ixgbe_intr_vector_t *vect; /* vector bitmap */
4983 5082 int r_idx; /* ring index */
4984 5083 int v_idx; /* vector index */
4985 5084 uint32_t hw_index;
4986 5085
|
↓ open down ↓ |
84 lines elided |
↑ open up ↑ |
4987 5086 /*
4988 5087 * Clear any previous entries
4989 5088 */
4990 5089 switch (hw->mac.type) {
4991 5090 case ixgbe_mac_82598EB:
4992 5091 for (v_idx = 0; v_idx < 25; v_idx++)
4993 5092 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
4994 5093 break;
4995 5094
4996 5095 case ixgbe_mac_82599EB:
5096 + case ixgbe_mac_X540:
4997 5097 for (v_idx = 0; v_idx < 64; v_idx++)
4998 5098 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
4999 5099 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0);
5000 5100 break;
5001 5101
5002 5102 default:
5003 5103 break;
5004 5104 }
5005 5105
5006 5106 /*
5007 5107 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and
5008 5108 * tx rings[0] will use RTxQ[1].
5009 5109 */
5010 5110 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
5011 5111 ixgbe_setup_ivar(ixgbe, 0, 0, 0);
5012 5112 ixgbe_setup_ivar(ixgbe, 0, 1, 1);
5013 5113 return;
5014 5114 }
5015 5115
5016 5116 /*
5017 5117 * For MSI-X interrupt, "Other" is always on vector[0].
5018 5118 */
5019 5119 ixgbe_setup_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0, -1);
5020 5120
5021 5121 /*
5022 5122 * For each interrupt vector, populate the IVAR table
5023 5123 */
5024 5124 for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) {
5025 5125 vect = &ixgbe->vect_map[v_idx];
5026 5126
5027 5127 /*
5028 5128 * For each rx ring bit set
5029 5129 */
5030 5130 r_idx = bt_getlowbit(vect->rx_map, 0,
5031 5131 (ixgbe->num_rx_rings - 1));
5032 5132
5033 5133 while (r_idx >= 0) {
5034 5134 hw_index = ixgbe->rx_rings[r_idx].hw_index;
5035 5135 ixgbe_setup_ivar(ixgbe, hw_index, v_idx, 0);
5036 5136 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
5037 5137 (ixgbe->num_rx_rings - 1));
5038 5138 }
5039 5139
5040 5140 /*
5041 5141 * For each tx ring bit set
5042 5142 */
5043 5143 r_idx = bt_getlowbit(vect->tx_map, 0,
5044 5144 (ixgbe->num_tx_rings - 1));
5045 5145
5046 5146 while (r_idx >= 0) {
5047 5147 ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 1);
5048 5148 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
5049 5149 (ixgbe->num_tx_rings - 1));
5050 5150 }
5051 5151 }
5052 5152 }
5053 5153
5054 5154 /*
5055 5155 * ixgbe_rem_intr_handlers - Remove the interrupt handlers.
5056 5156 */
5057 5157 static void
5058 5158 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe)
5059 5159 {
5060 5160 int i;
5061 5161 int rc;
5062 5162
5063 5163 for (i = 0; i < ixgbe->intr_cnt; i++) {
5064 5164 rc = ddi_intr_remove_handler(ixgbe->htable[i]);
5065 5165 if (rc != DDI_SUCCESS) {
5066 5166 IXGBE_DEBUGLOG_1(ixgbe,
5067 5167 "Remove intr handler failed: %d", rc);
5068 5168 }
5069 5169 }
5070 5170 }
5071 5171
5072 5172 /*
5073 5173 * ixgbe_rem_intrs - Remove the allocated interrupts.
5074 5174 */
5075 5175 static void
5076 5176 ixgbe_rem_intrs(ixgbe_t *ixgbe)
5077 5177 {
5078 5178 int i;
5079 5179 int rc;
5080 5180
5081 5181 for (i = 0; i < ixgbe->intr_cnt; i++) {
5082 5182 rc = ddi_intr_free(ixgbe->htable[i]);
5083 5183 if (rc != DDI_SUCCESS) {
5084 5184 IXGBE_DEBUGLOG_1(ixgbe,
5085 5185 "Free intr failed: %d", rc);
5086 5186 }
5087 5187 }
5088 5188
5089 5189 kmem_free(ixgbe->htable, ixgbe->intr_size);
5090 5190 ixgbe->htable = NULL;
5091 5191 }
5092 5192
5093 5193 /*
5094 5194 * ixgbe_enable_intrs - Enable all the ddi interrupts.
5095 5195 */
5096 5196 static int
5097 5197 ixgbe_enable_intrs(ixgbe_t *ixgbe)
5098 5198 {
5099 5199 int i;
5100 5200 int rc;
5101 5201
5102 5202 /*
5103 5203 * Enable interrupts
5104 5204 */
5105 5205 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
5106 5206 /*
5107 5207 * Call ddi_intr_block_enable() for MSI
5108 5208 */
5109 5209 rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt);
5110 5210 if (rc != DDI_SUCCESS) {
5111 5211 ixgbe_log(ixgbe,
5112 5212 "Enable block intr failed: %d", rc);
5113 5213 return (IXGBE_FAILURE);
5114 5214 }
5115 5215 } else {
5116 5216 /*
5117 5217 * Call ddi_intr_enable() for Legacy/MSI non block enable
5118 5218 */
5119 5219 for (i = 0; i < ixgbe->intr_cnt; i++) {
5120 5220 rc = ddi_intr_enable(ixgbe->htable[i]);
5121 5221 if (rc != DDI_SUCCESS) {
5122 5222 ixgbe_log(ixgbe,
5123 5223 "Enable intr failed: %d", rc);
5124 5224 return (IXGBE_FAILURE);
5125 5225 }
5126 5226 }
5127 5227 }
5128 5228
5129 5229 return (IXGBE_SUCCESS);
5130 5230 }
5131 5231
5132 5232 /*
5133 5233 * ixgbe_disable_intrs - Disable all the interrupts.
5134 5234 */
5135 5235 static int
5136 5236 ixgbe_disable_intrs(ixgbe_t *ixgbe)
5137 5237 {
5138 5238 int i;
5139 5239 int rc;
5140 5240
5141 5241 /*
5142 5242 * Disable all interrupts
5143 5243 */
5144 5244 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
5145 5245 rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt);
5146 5246 if (rc != DDI_SUCCESS) {
5147 5247 ixgbe_log(ixgbe,
5148 5248 "Disable block intr failed: %d", rc);
5149 5249 return (IXGBE_FAILURE);
5150 5250 }
5151 5251 } else {
5152 5252 for (i = 0; i < ixgbe->intr_cnt; i++) {
5153 5253 rc = ddi_intr_disable(ixgbe->htable[i]);
5154 5254 if (rc != DDI_SUCCESS) {
5155 5255 ixgbe_log(ixgbe,
5156 5256 "Disable intr failed: %d", rc);
5157 5257 return (IXGBE_FAILURE);
5158 5258 }
5159 5259 }
5160 5260 }
5161 5261
5162 5262 return (IXGBE_SUCCESS);
5163 5263 }
5164 5264
5165 5265 /*
5166 5266 * ixgbe_get_hw_state - Get and save parameters related to adapter hardware.
5167 5267 */
5168 5268 static void
5169 5269 ixgbe_get_hw_state(ixgbe_t *ixgbe)
5170 5270 {
5171 5271 struct ixgbe_hw *hw = &ixgbe->hw;
5172 5272 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
5173 5273 boolean_t link_up = B_FALSE;
5174 5274 uint32_t pcs1g_anlp = 0;
5175 5275 uint32_t pcs1g_ana = 0;
5176 5276 boolean_t autoneg = B_FALSE;
5177 5277
5178 5278 ASSERT(mutex_owned(&ixgbe->gen_lock));
5179 5279 ixgbe->param_lp_1000fdx_cap = 0;
5180 5280 ixgbe->param_lp_100fdx_cap = 0;
5181 5281
5182 5282 /* check for link, don't wait */
5183 5283 (void) ixgbe_check_link(hw, &speed, &link_up, false);
5184 5284 pcs1g_ana = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
5185 5285
5186 5286 if (link_up) {
5187 5287 pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
5188 5288
5189 5289 ixgbe->param_lp_1000fdx_cap =
5190 5290 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
5191 5291 ixgbe->param_lp_100fdx_cap =
5192 5292 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
5193 5293 }
5194 5294
5195 5295 (void) ixgbe_get_link_capabilities(hw, &speed, &autoneg);
5196 5296
5197 5297 ixgbe->param_adv_1000fdx_cap = ((pcs1g_ana & IXGBE_PCS1GANA_FDC) &&
5198 5298 (speed & IXGBE_LINK_SPEED_1GB_FULL)) ? 1 : 0;
5199 5299 ixgbe->param_adv_100fdx_cap = ((pcs1g_ana & IXGBE_PCS1GANA_FDC) &&
5200 5300 (speed & IXGBE_LINK_SPEED_100_FULL)) ? 1 : 0;
5201 5301 }
5202 5302
5203 5303 /*
5204 5304 * ixgbe_get_driver_control - Notify that driver is in control of device.
5205 5305 */
5206 5306 static void
5207 5307 ixgbe_get_driver_control(struct ixgbe_hw *hw)
5208 5308 {
5209 5309 uint32_t ctrl_ext;
5210 5310
5211 5311 /*
5212 5312 * Notify firmware that driver is in control of device
5213 5313 */
5214 5314 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5215 5315 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
5216 5316 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5217 5317 }
5218 5318
5219 5319 /*
5220 5320 * ixgbe_release_driver_control - Notify that driver is no longer in control
5221 5321 * of device.
5222 5322 */
5223 5323 static void
5224 5324 ixgbe_release_driver_control(struct ixgbe_hw *hw)
5225 5325 {
5226 5326 uint32_t ctrl_ext;
5227 5327
5228 5328 /*
5229 5329 * Notify firmware that driver is no longer in control of device
5230 5330 */
5231 5331 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5232 5332 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
5233 5333 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5234 5334 }
5235 5335
5236 5336 /*
5237 5337 * ixgbe_atomic_reserve - Atomic decrease operation.
5238 5338 */
5239 5339 int
5240 5340 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n)
5241 5341 {
5242 5342 uint32_t oldval;
5243 5343 uint32_t newval;
5244 5344
5245 5345 /*
5246 5346 * ATOMICALLY
5247 5347 */
5248 5348 do {
5249 5349 oldval = *count_p;
5250 5350 if (oldval < n)
5251 5351 return (-1);
5252 5352 newval = oldval - n;
5253 5353 } while (atomic_cas_32(count_p, oldval, newval) != oldval);
5254 5354
5255 5355 return (newval);
5256 5356 }
5257 5357
5258 5358 /*
5259 5359 * ixgbe_mc_table_itr - Traverse the entries in the multicast table.
5260 5360 */
5261 5361 static uint8_t *
5262 5362 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq)
5263 5363 {
5264 5364 uint8_t *addr = *upd_ptr;
5265 5365 uint8_t *new_ptr;
5266 5366
5267 5367 _NOTE(ARGUNUSED(hw));
5268 5368 _NOTE(ARGUNUSED(vmdq));
5269 5369
5270 5370 new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
5271 5371 *upd_ptr = new_ptr;
5272 5372 return (addr);
5273 5373 }
5274 5374
5275 5375 /*
5276 5376 * FMA support
5277 5377 */
5278 5378 int
5279 5379 ixgbe_check_acc_handle(ddi_acc_handle_t handle)
5280 5380 {
5281 5381 ddi_fm_error_t de;
5282 5382
5283 5383 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
5284 5384 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
5285 5385 return (de.fme_status);
5286 5386 }
5287 5387
5288 5388 int
5289 5389 ixgbe_check_dma_handle(ddi_dma_handle_t handle)
5290 5390 {
5291 5391 ddi_fm_error_t de;
5292 5392
5293 5393 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
5294 5394 return (de.fme_status);
5295 5395 }
5296 5396
5297 5397 /*
5298 5398 * ixgbe_fm_error_cb - The IO fault service error handling callback function.
5299 5399 */
5300 5400 static int
5301 5401 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
5302 5402 {
5303 5403 _NOTE(ARGUNUSED(impl_data));
5304 5404 /*
5305 5405 * as the driver can always deal with an error in any dma or
5306 5406 * access handle, we can just return the fme_status value.
5307 5407 */
5308 5408 pci_ereport_post(dip, err, NULL);
5309 5409 return (err->fme_status);
5310 5410 }
5311 5411
5312 5412 static void
5313 5413 ixgbe_fm_init(ixgbe_t *ixgbe)
5314 5414 {
5315 5415 ddi_iblock_cookie_t iblk;
5316 5416 int fma_dma_flag;
5317 5417
5318 5418 /*
5319 5419 * Only register with IO Fault Services if we have some capability
5320 5420 */
5321 5421 if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
5322 5422 ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
5323 5423 } else {
5324 5424 ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
5325 5425 }
5326 5426
5327 5427 if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
5328 5428 fma_dma_flag = 1;
5329 5429 } else {
5330 5430 fma_dma_flag = 0;
5331 5431 }
5332 5432
5333 5433 ixgbe_set_fma_flags(fma_dma_flag);
5334 5434
5335 5435 if (ixgbe->fm_capabilities) {
5336 5436
5337 5437 /*
5338 5438 * Register capabilities with IO Fault Services
5339 5439 */
5340 5440 ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk);
5341 5441
5342 5442 /*
5343 5443 * Initialize pci ereport capabilities if ereport capable
5344 5444 */
5345 5445 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
5346 5446 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5347 5447 pci_ereport_setup(ixgbe->dip);
5348 5448
5349 5449 /*
5350 5450 * Register error callback if error callback capable
5351 5451 */
5352 5452 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5353 5453 ddi_fm_handler_register(ixgbe->dip,
5354 5454 ixgbe_fm_error_cb, (void*) ixgbe);
5355 5455 }
5356 5456 }
5357 5457
5358 5458 static void
5359 5459 ixgbe_fm_fini(ixgbe_t *ixgbe)
5360 5460 {
5361 5461 /*
5362 5462 * Only unregister FMA capabilities if they are registered
5363 5463 */
5364 5464 if (ixgbe->fm_capabilities) {
5365 5465
5366 5466 /*
5367 5467 * Release any resources allocated by pci_ereport_setup()
5368 5468 */
5369 5469 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
5370 5470 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5371 5471 pci_ereport_teardown(ixgbe->dip);
5372 5472
5373 5473 /*
5374 5474 * Un-register error callback if error callback capable
5375 5475 */
5376 5476 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5377 5477 ddi_fm_handler_unregister(ixgbe->dip);
5378 5478
5379 5479 /*
5380 5480 * Unregister from IO Fault Service
5381 5481 */
5382 5482 ddi_fm_fini(ixgbe->dip);
5383 5483 }
5384 5484 }
5385 5485
5386 5486 void
5387 5487 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail)
5388 5488 {
5389 5489 uint64_t ena;
5390 5490 char buf[FM_MAX_CLASS];
5391 5491
5392 5492 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
5393 5493 ena = fm_ena_generate(0, FM_ENA_FMT1);
5394 5494 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) {
5395 5495 ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP,
5396 5496 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
5397 5497 }
5398 5498 }
5399 5499
5400 5500 static int
5401 5501 ixgbe_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
5402 5502 {
5403 5503 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)rh;
5404 5504
5405 5505 mutex_enter(&rx_ring->rx_lock);
5406 5506 rx_ring->ring_gen_num = mr_gen_num;
5407 5507 mutex_exit(&rx_ring->rx_lock);
5408 5508 return (0);
5409 5509 }
5410 5510
5411 5511 /*
5412 5512 * Get the global ring index by a ring index within a group.
5413 5513 */
5414 5514 static int
5415 5515 ixgbe_get_rx_ring_index(ixgbe_t *ixgbe, int gindex, int rindex)
5416 5516 {
5417 5517 ixgbe_rx_ring_t *rx_ring;
5418 5518 int i;
5419 5519
5420 5520 for (i = 0; i < ixgbe->num_rx_rings; i++) {
5421 5521 rx_ring = &ixgbe->rx_rings[i];
5422 5522 if (rx_ring->group_index == gindex)
5423 5523 rindex--;
5424 5524 if (rindex < 0)
5425 5525 return (i);
5426 5526 }
5427 5527
5428 5528 return (-1);
5429 5529 }
5430 5530
5431 5531 /*
5432 5532 * Callback funtion for MAC layer to register all rings.
5433 5533 */
5434 5534 /* ARGSUSED */
5435 5535 void
5436 5536 ixgbe_fill_ring(void *arg, mac_ring_type_t rtype, const int group_index,
5437 5537 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
5438 5538 {
5439 5539 ixgbe_t *ixgbe = (ixgbe_t *)arg;
5440 5540 mac_intr_t *mintr = &infop->mri_intr;
5441 5541
5442 5542 switch (rtype) {
5443 5543 case MAC_RING_TYPE_RX: {
5444 5544 /*
5445 5545 * 'index' is the ring index within the group.
5446 5546 * Need to get the global ring index by searching in groups.
5447 5547 */
5448 5548 int global_ring_index = ixgbe_get_rx_ring_index(
5449 5549 ixgbe, group_index, ring_index);
5450 5550
5451 5551 ASSERT(global_ring_index >= 0);
5452 5552
5453 5553 ixgbe_rx_ring_t *rx_ring = &ixgbe->rx_rings[global_ring_index];
5454 5554 rx_ring->ring_handle = rh;
5455 5555
5456 5556 infop->mri_driver = (mac_ring_driver_t)rx_ring;
5457 5557 infop->mri_start = ixgbe_ring_start;
5458 5558 infop->mri_stop = NULL;
5459 5559 infop->mri_poll = ixgbe_ring_rx_poll;
5460 5560 infop->mri_stat = ixgbe_rx_ring_stat;
5461 5561
5462 5562 mintr->mi_handle = (mac_intr_handle_t)rx_ring;
5463 5563 mintr->mi_enable = ixgbe_rx_ring_intr_enable;
5464 5564 mintr->mi_disable = ixgbe_rx_ring_intr_disable;
5465 5565 if (ixgbe->intr_type &
5466 5566 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
5467 5567 mintr->mi_ddi_handle =
5468 5568 ixgbe->htable[rx_ring->intr_vector];
5469 5569 }
5470 5570
5471 5571 break;
5472 5572 }
5473 5573 case MAC_RING_TYPE_TX: {
5474 5574 ASSERT(group_index == -1);
5475 5575 ASSERT(ring_index < ixgbe->num_tx_rings);
5476 5576
5477 5577 ixgbe_tx_ring_t *tx_ring = &ixgbe->tx_rings[ring_index];
5478 5578 tx_ring->ring_handle = rh;
5479 5579
5480 5580 infop->mri_driver = (mac_ring_driver_t)tx_ring;
5481 5581 infop->mri_start = NULL;
5482 5582 infop->mri_stop = NULL;
5483 5583 infop->mri_tx = ixgbe_ring_tx;
5484 5584 infop->mri_stat = ixgbe_tx_ring_stat;
5485 5585 if (ixgbe->intr_type &
5486 5586 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
5487 5587 mintr->mi_ddi_handle =
5488 5588 ixgbe->htable[tx_ring->intr_vector];
5489 5589 }
5490 5590 break;
5491 5591 }
5492 5592 default:
5493 5593 break;
5494 5594 }
5495 5595 }
5496 5596
5497 5597 /*
5498 5598 * Callback funtion for MAC layer to register all groups.
5499 5599 */
5500 5600 void
5501 5601 ixgbe_fill_group(void *arg, mac_ring_type_t rtype, const int index,
5502 5602 mac_group_info_t *infop, mac_group_handle_t gh)
5503 5603 {
5504 5604 ixgbe_t *ixgbe = (ixgbe_t *)arg;
5505 5605
5506 5606 switch (rtype) {
5507 5607 case MAC_RING_TYPE_RX: {
5508 5608 ixgbe_rx_group_t *rx_group;
5509 5609
5510 5610 rx_group = &ixgbe->rx_groups[index];
5511 5611 rx_group->group_handle = gh;
5512 5612
5513 5613 infop->mgi_driver = (mac_group_driver_t)rx_group;
5514 5614 infop->mgi_start = NULL;
5515 5615 infop->mgi_stop = NULL;
5516 5616 infop->mgi_addmac = ixgbe_addmac;
5517 5617 infop->mgi_remmac = ixgbe_remmac;
5518 5618 infop->mgi_count = (ixgbe->num_rx_rings / ixgbe->num_rx_groups);
5519 5619
5520 5620 break;
5521 5621 }
5522 5622 case MAC_RING_TYPE_TX:
5523 5623 break;
5524 5624 default:
5525 5625 break;
5526 5626 }
5527 5627 }
5528 5628
5529 5629 /*
5530 5630 * Enable interrupt on the specificed rx ring.
5531 5631 */
5532 5632 int
5533 5633 ixgbe_rx_ring_intr_enable(mac_intr_handle_t intrh)
5534 5634 {
5535 5635 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
5536 5636 ixgbe_t *ixgbe = rx_ring->ixgbe;
5537 5637 int r_idx = rx_ring->index;
5538 5638 int hw_r_idx = rx_ring->hw_index;
5539 5639 int v_idx = rx_ring->intr_vector;
5540 5640
5541 5641 mutex_enter(&ixgbe->gen_lock);
5542 5642 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) {
5543 5643 mutex_exit(&ixgbe->gen_lock);
5544 5644 /*
5545 5645 * Simply return 0.
5546 5646 * Interrupts are being adjusted. ixgbe_intr_adjust()
5547 5647 * will eventually re-enable the interrupt when it's
5548 5648 * done with the adjustment.
5549 5649 */
5550 5650 return (0);
5551 5651 }
5552 5652
5553 5653 /*
5554 5654 * To enable interrupt by setting the VAL bit of given interrupt
5555 5655 * vector allocation register (IVAR).
5556 5656 */
5557 5657 ixgbe_enable_ivar(ixgbe, hw_r_idx, 0);
5558 5658
5559 5659 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
5560 5660
5561 5661 /*
5562 5662 * Trigger a Rx interrupt on this ring
5563 5663 */
5564 5664 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_EICS, (1 << v_idx));
5565 5665 IXGBE_WRITE_FLUSH(&ixgbe->hw);
5566 5666
5567 5667 mutex_exit(&ixgbe->gen_lock);
5568 5668
5569 5669 return (0);
5570 5670 }
5571 5671
5572 5672 /*
5573 5673 * Disable interrupt on the specificed rx ring.
5574 5674 */
5575 5675 int
5576 5676 ixgbe_rx_ring_intr_disable(mac_intr_handle_t intrh)
5577 5677 {
5578 5678 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
5579 5679 ixgbe_t *ixgbe = rx_ring->ixgbe;
5580 5680 int r_idx = rx_ring->index;
5581 5681 int hw_r_idx = rx_ring->hw_index;
5582 5682 int v_idx = rx_ring->intr_vector;
5583 5683
5584 5684 mutex_enter(&ixgbe->gen_lock);
5585 5685 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) {
5586 5686 mutex_exit(&ixgbe->gen_lock);
5587 5687 /*
5588 5688 * Simply return 0.
5589 5689 * In the rare case where an interrupt is being
5590 5690 * disabled while interrupts are being adjusted,
5591 5691 * we don't fail the operation. No interrupts will
5592 5692 * be generated while they are adjusted, and
5593 5693 * ixgbe_intr_adjust() will cause the interrupts
5594 5694 * to be re-enabled once it completes. Note that
5595 5695 * in this case, packets may be delivered to the
5596 5696 * stack via interrupts before xgbe_rx_ring_intr_enable()
5597 5697 * is called again. This is acceptable since interrupt
5598 5698 * adjustment is infrequent, and the stack will be
5599 5699 * able to handle these packets.
5600 5700 */
5601 5701 return (0);
5602 5702 }
5603 5703
5604 5704 /*
5605 5705 * To disable interrupt by clearing the VAL bit of given interrupt
5606 5706 * vector allocation register (IVAR).
5607 5707 */
5608 5708 ixgbe_disable_ivar(ixgbe, hw_r_idx, 0);
5609 5709
5610 5710 BT_CLEAR(ixgbe->vect_map[v_idx].rx_map, r_idx);
5611 5711
5612 5712 mutex_exit(&ixgbe->gen_lock);
5613 5713
5614 5714 return (0);
5615 5715 }
5616 5716
5617 5717 /*
5618 5718 * Add a mac address.
5619 5719 */
5620 5720 static int
5621 5721 ixgbe_addmac(void *arg, const uint8_t *mac_addr)
5622 5722 {
5623 5723 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
5624 5724 ixgbe_t *ixgbe = rx_group->ixgbe;
5625 5725 struct ixgbe_hw *hw = &ixgbe->hw;
5626 5726 int slot, i;
5627 5727
5628 5728 mutex_enter(&ixgbe->gen_lock);
5629 5729
5630 5730 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
5631 5731 mutex_exit(&ixgbe->gen_lock);
5632 5732 return (ECANCELED);
5633 5733 }
5634 5734
5635 5735 if (ixgbe->unicst_avail == 0) {
5636 5736 /* no slots available */
5637 5737 mutex_exit(&ixgbe->gen_lock);
5638 5738 return (ENOSPC);
5639 5739 }
5640 5740
5641 5741 /*
5642 5742 * The first ixgbe->num_rx_groups slots are reserved for each respective
5643 5743 * group. The rest slots are shared by all groups. While adding a
5644 5744 * MAC address, reserved slots are firstly checked then the shared
5645 5745 * slots are searched.
5646 5746 */
5647 5747 slot = -1;
5648 5748 if (ixgbe->unicst_addr[rx_group->index].mac.set == 1) {
5649 5749 for (i = ixgbe->num_rx_groups; i < ixgbe->unicst_total; i++) {
5650 5750 if (ixgbe->unicst_addr[i].mac.set == 0) {
5651 5751 slot = i;
5652 5752 break;
5653 5753 }
5654 5754 }
5655 5755 } else {
5656 5756 slot = rx_group->index;
5657 5757 }
5658 5758
5659 5759 if (slot == -1) {
5660 5760 /* no slots available */
5661 5761 mutex_exit(&ixgbe->gen_lock);
5662 5762 return (ENOSPC);
5663 5763 }
5664 5764
5665 5765 bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
5666 5766 (void) ixgbe_set_rar(hw, slot, ixgbe->unicst_addr[slot].mac.addr,
5667 5767 rx_group->index, IXGBE_RAH_AV);
5668 5768 ixgbe->unicst_addr[slot].mac.set = 1;
5669 5769 ixgbe->unicst_addr[slot].mac.group_index = rx_group->index;
5670 5770 ixgbe->unicst_avail--;
5671 5771
5672 5772 mutex_exit(&ixgbe->gen_lock);
5673 5773
5674 5774 return (0);
5675 5775 }
5676 5776
5677 5777 /*
5678 5778 * Remove a mac address.
5679 5779 */
5680 5780 static int
5681 5781 ixgbe_remmac(void *arg, const uint8_t *mac_addr)
5682 5782 {
5683 5783 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
5684 5784 ixgbe_t *ixgbe = rx_group->ixgbe;
5685 5785 struct ixgbe_hw *hw = &ixgbe->hw;
5686 5786 int slot;
5687 5787
5688 5788 mutex_enter(&ixgbe->gen_lock);
5689 5789
5690 5790 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
5691 5791 mutex_exit(&ixgbe->gen_lock);
5692 5792 return (ECANCELED);
5693 5793 }
5694 5794
5695 5795 slot = ixgbe_unicst_find(ixgbe, mac_addr);
5696 5796 if (slot == -1) {
5697 5797 mutex_exit(&ixgbe->gen_lock);
5698 5798 return (EINVAL);
5699 5799 }
5700 5800
5701 5801 if (ixgbe->unicst_addr[slot].mac.set == 0) {
5702 5802 mutex_exit(&ixgbe->gen_lock);
5703 5803 return (EINVAL);
5704 5804 }
5705 5805
5706 5806 bzero(ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
5707 5807 (void) ixgbe_clear_rar(hw, slot);
5708 5808 ixgbe->unicst_addr[slot].mac.set = 0;
5709 5809 ixgbe->unicst_avail++;
5710 5810
5711 5811 mutex_exit(&ixgbe->gen_lock);
5712 5812
5713 5813 return (0);
5714 5814 }
|
↓ open down ↓ |
708 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX