Print this page
XXXX Intel X540 support
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/ixgbe/ixgbe_main.c
+++ new/usr/src/uts/common/io/ixgbe/ixgbe_main.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
|
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
24 24 */
25 25
26 26 /*
27 27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 + * Copyright (c) 2012, Joyent, Inc. All rights reserved.
29 + * Copyright 2012 Nexenta Systems, Inc. All rights reserved.
28 30 */
29 31
30 32 #include "ixgbe_sw.h"
31 33
32 34 static char ixgbe_ident[] = "Intel 10Gb Ethernet";
33 35 static char ixgbe_version[] = "ixgbe 1.1.7";
34 36
35 37 /*
36 38 * Local function protoypes
37 39 */
38 40 static int ixgbe_register_mac(ixgbe_t *);
39 41 static int ixgbe_identify_hardware(ixgbe_t *);
40 42 static int ixgbe_regs_map(ixgbe_t *);
41 43 static void ixgbe_init_properties(ixgbe_t *);
42 44 static int ixgbe_init_driver_settings(ixgbe_t *);
43 45 static void ixgbe_init_locks(ixgbe_t *);
44 46 static void ixgbe_destroy_locks(ixgbe_t *);
45 47 static int ixgbe_init(ixgbe_t *);
46 48 static int ixgbe_chip_start(ixgbe_t *);
47 49 static void ixgbe_chip_stop(ixgbe_t *);
48 50 static int ixgbe_reset(ixgbe_t *);
49 51 static void ixgbe_tx_clean(ixgbe_t *);
50 52 static boolean_t ixgbe_tx_drain(ixgbe_t *);
51 53 static boolean_t ixgbe_rx_drain(ixgbe_t *);
52 54 static int ixgbe_alloc_rings(ixgbe_t *);
53 55 static void ixgbe_free_rings(ixgbe_t *);
54 56 static int ixgbe_alloc_rx_data(ixgbe_t *);
55 57 static void ixgbe_free_rx_data(ixgbe_t *);
56 58 static void ixgbe_setup_rings(ixgbe_t *);
57 59 static void ixgbe_setup_rx(ixgbe_t *);
58 60 static void ixgbe_setup_tx(ixgbe_t *);
59 61 static void ixgbe_setup_rx_ring(ixgbe_rx_ring_t *);
60 62 static void ixgbe_setup_tx_ring(ixgbe_tx_ring_t *);
61 63 static void ixgbe_setup_rss(ixgbe_t *);
62 64 static void ixgbe_setup_vmdq(ixgbe_t *);
63 65 static void ixgbe_setup_vmdq_rss(ixgbe_t *);
64 66 static void ixgbe_init_unicst(ixgbe_t *);
65 67 static int ixgbe_unicst_find(ixgbe_t *, const uint8_t *);
66 68 static void ixgbe_setup_multicst(ixgbe_t *);
67 69 static void ixgbe_get_hw_state(ixgbe_t *);
68 70 static void ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe);
69 71 static void ixgbe_get_conf(ixgbe_t *);
70 72 static void ixgbe_init_params(ixgbe_t *);
71 73 static int ixgbe_get_prop(ixgbe_t *, char *, int, int, int);
72 74 static void ixgbe_driver_link_check(ixgbe_t *);
73 75 static void ixgbe_sfp_check(void *);
74 76 static void ixgbe_overtemp_check(void *);
75 77 static void ixgbe_link_timer(void *);
76 78 static void ixgbe_local_timer(void *);
77 79 static void ixgbe_arm_watchdog_timer(ixgbe_t *);
78 80 static void ixgbe_restart_watchdog_timer(ixgbe_t *);
79 81 static void ixgbe_disable_adapter_interrupts(ixgbe_t *);
80 82 static void ixgbe_enable_adapter_interrupts(ixgbe_t *);
81 83 static boolean_t is_valid_mac_addr(uint8_t *);
82 84 static boolean_t ixgbe_stall_check(ixgbe_t *);
83 85 static boolean_t ixgbe_set_loopback_mode(ixgbe_t *, uint32_t);
84 86 static void ixgbe_set_internal_mac_loopback(ixgbe_t *);
85 87 static boolean_t ixgbe_find_mac_address(ixgbe_t *);
86 88 static int ixgbe_alloc_intrs(ixgbe_t *);
87 89 static int ixgbe_alloc_intr_handles(ixgbe_t *, int);
88 90 static int ixgbe_add_intr_handlers(ixgbe_t *);
89 91 static void ixgbe_map_rxring_to_vector(ixgbe_t *, int, int);
90 92 static void ixgbe_map_txring_to_vector(ixgbe_t *, int, int);
91 93 static void ixgbe_setup_ivar(ixgbe_t *, uint16_t, uint8_t, int8_t);
92 94 static void ixgbe_enable_ivar(ixgbe_t *, uint16_t, int8_t);
93 95 static void ixgbe_disable_ivar(ixgbe_t *, uint16_t, int8_t);
94 96 static uint32_t ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index);
95 97 static int ixgbe_map_intrs_to_vectors(ixgbe_t *);
96 98 static void ixgbe_setup_adapter_vector(ixgbe_t *);
97 99 static void ixgbe_rem_intr_handlers(ixgbe_t *);
98 100 static void ixgbe_rem_intrs(ixgbe_t *);
99 101 static int ixgbe_enable_intrs(ixgbe_t *);
100 102 static int ixgbe_disable_intrs(ixgbe_t *);
101 103 static uint_t ixgbe_intr_legacy(void *, void *);
102 104 static uint_t ixgbe_intr_msi(void *, void *);
103 105 static uint_t ixgbe_intr_msix(void *, void *);
104 106 static void ixgbe_intr_rx_work(ixgbe_rx_ring_t *);
105 107 static void ixgbe_intr_tx_work(ixgbe_tx_ring_t *);
106 108 static void ixgbe_intr_other_work(ixgbe_t *, uint32_t);
107 109 static void ixgbe_get_driver_control(struct ixgbe_hw *);
108 110 static int ixgbe_addmac(void *, const uint8_t *);
109 111 static int ixgbe_remmac(void *, const uint8_t *);
110 112 static void ixgbe_release_driver_control(struct ixgbe_hw *);
111 113
112 114 static int ixgbe_attach(dev_info_t *, ddi_attach_cmd_t);
113 115 static int ixgbe_detach(dev_info_t *, ddi_detach_cmd_t);
114 116 static int ixgbe_resume(dev_info_t *);
115 117 static int ixgbe_suspend(dev_info_t *);
116 118 static void ixgbe_unconfigure(dev_info_t *, ixgbe_t *);
117 119 static uint8_t *ixgbe_mc_table_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
118 120 static int ixgbe_cbfunc(dev_info_t *, ddi_cb_action_t, void *, void *, void *);
119 121 static int ixgbe_intr_cb_register(ixgbe_t *);
120 122 static int ixgbe_intr_adjust(ixgbe_t *, ddi_cb_action_t, int);
121 123
122 124 static int ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err,
123 125 const void *impl_data);
124 126 static void ixgbe_fm_init(ixgbe_t *);
125 127 static void ixgbe_fm_fini(ixgbe_t *);
126 128
127 129 char *ixgbe_priv_props[] = {
128 130 "_tx_copy_thresh",
129 131 "_tx_recycle_thresh",
130 132 "_tx_overload_thresh",
131 133 "_tx_resched_thresh",
132 134 "_rx_copy_thresh",
133 135 "_rx_limit_per_intr",
134 136 "_intr_throttling",
135 137 "_adv_pause_cap",
136 138 "_adv_asym_pause_cap",
137 139 NULL
138 140 };
139 141
140 142 #define IXGBE_MAX_PRIV_PROPS \
141 143 (sizeof (ixgbe_priv_props) / sizeof (mac_priv_prop_t))
142 144
143 145 static struct cb_ops ixgbe_cb_ops = {
144 146 nulldev, /* cb_open */
145 147 nulldev, /* cb_close */
146 148 nodev, /* cb_strategy */
147 149 nodev, /* cb_print */
148 150 nodev, /* cb_dump */
149 151 nodev, /* cb_read */
150 152 nodev, /* cb_write */
151 153 nodev, /* cb_ioctl */
152 154 nodev, /* cb_devmap */
153 155 nodev, /* cb_mmap */
154 156 nodev, /* cb_segmap */
155 157 nochpoll, /* cb_chpoll */
156 158 ddi_prop_op, /* cb_prop_op */
157 159 NULL, /* cb_stream */
158 160 D_MP | D_HOTPLUG, /* cb_flag */
159 161 CB_REV, /* cb_rev */
160 162 nodev, /* cb_aread */
161 163 nodev /* cb_awrite */
162 164 };
163 165
164 166 static struct dev_ops ixgbe_dev_ops = {
165 167 DEVO_REV, /* devo_rev */
166 168 0, /* devo_refcnt */
167 169 NULL, /* devo_getinfo */
168 170 nulldev, /* devo_identify */
169 171 nulldev, /* devo_probe */
170 172 ixgbe_attach, /* devo_attach */
171 173 ixgbe_detach, /* devo_detach */
172 174 nodev, /* devo_reset */
173 175 &ixgbe_cb_ops, /* devo_cb_ops */
174 176 NULL, /* devo_bus_ops */
175 177 ddi_power, /* devo_power */
176 178 ddi_quiesce_not_supported, /* devo_quiesce */
177 179 };
178 180
179 181 static struct modldrv ixgbe_modldrv = {
180 182 &mod_driverops, /* Type of module. This one is a driver */
181 183 ixgbe_ident, /* Discription string */
182 184 &ixgbe_dev_ops /* driver ops */
183 185 };
184 186
185 187 static struct modlinkage ixgbe_modlinkage = {
186 188 MODREV_1, &ixgbe_modldrv, NULL
187 189 };
188 190
189 191 /*
190 192 * Access attributes for register mapping
191 193 */
192 194 ddi_device_acc_attr_t ixgbe_regs_acc_attr = {
193 195 DDI_DEVICE_ATTR_V1,
194 196 DDI_STRUCTURE_LE_ACC,
195 197 DDI_STRICTORDER_ACC,
196 198 DDI_FLAGERR_ACC
197 199 };
198 200
199 201 /*
200 202 * Loopback property
201 203 */
202 204 static lb_property_t lb_normal = {
203 205 normal, "normal", IXGBE_LB_NONE
204 206 };
205 207
206 208 static lb_property_t lb_mac = {
207 209 internal, "MAC", IXGBE_LB_INTERNAL_MAC
208 210 };
209 211
210 212 static lb_property_t lb_external = {
211 213 external, "External", IXGBE_LB_EXTERNAL
212 214 };
213 215
214 216 #define IXGBE_M_CALLBACK_FLAGS \
215 217 (MC_IOCTL | MC_GETCAPAB | MC_SETPROP | MC_GETPROP | MC_PROPINFO)
216 218
217 219 static mac_callbacks_t ixgbe_m_callbacks = {
218 220 IXGBE_M_CALLBACK_FLAGS,
219 221 ixgbe_m_stat,
220 222 ixgbe_m_start,
221 223 ixgbe_m_stop,
222 224 ixgbe_m_promisc,
223 225 ixgbe_m_multicst,
224 226 NULL,
225 227 NULL,
226 228 NULL,
227 229 ixgbe_m_ioctl,
228 230 ixgbe_m_getcapab,
229 231 NULL,
230 232 NULL,
231 233 ixgbe_m_setprop,
232 234 ixgbe_m_getprop,
233 235 ixgbe_m_propinfo
234 236 };
235 237
236 238 /*
237 239 * Initialize capabilities of each supported adapter type
238 240 */
239 241 static adapter_info_t ixgbe_82598eb_cap = {
240 242 64, /* maximum number of rx queues */
241 243 1, /* minimum number of rx queues */
242 244 64, /* default number of rx queues */
243 245 16, /* maximum number of rx groups */
244 246 1, /* minimum number of rx groups */
245 247 1, /* default number of rx groups */
246 248 32, /* maximum number of tx queues */
247 249 1, /* minimum number of tx queues */
248 250 8, /* default number of tx queues */
249 251 16366, /* maximum MTU size */
250 252 0xFFFF, /* maximum interrupt throttle rate */
251 253 0, /* minimum interrupt throttle rate */
252 254 200, /* default interrupt throttle rate */
253 255 18, /* maximum total msix vectors */
254 256 16, /* maximum number of ring vectors */
255 257 2, /* maximum number of other vectors */
256 258 IXGBE_EICR_LSC, /* "other" interrupt types handled */
257 259 0, /* "other" interrupt types enable mask */
258 260 (IXGBE_FLAG_DCA_CAPABLE /* capability flags */
259 261 | IXGBE_FLAG_RSS_CAPABLE
260 262 | IXGBE_FLAG_VMDQ_CAPABLE)
261 263 };
262 264
263 265 static adapter_info_t ixgbe_82599eb_cap = {
264 266 128, /* maximum number of rx queues */
265 267 1, /* minimum number of rx queues */
266 268 128, /* default number of rx queues */
267 269 64, /* maximum number of rx groups */
268 270 1, /* minimum number of rx groups */
269 271 1, /* default number of rx groups */
270 272 128, /* maximum number of tx queues */
271 273 1, /* minimum number of tx queues */
272 274 8, /* default number of tx queues */
273 275 15500, /* maximum MTU size */
274 276 0xFF8, /* maximum interrupt throttle rate */
275 277 0, /* minimum interrupt throttle rate */
276 278 200, /* default interrupt throttle rate */
277 279 64, /* maximum total msix vectors */
278 280 16, /* maximum number of ring vectors */
279 281 2, /* maximum number of other vectors */
280 282 (IXGBE_EICR_LSC
281 283 | IXGBE_EICR_GPI_SDP1
282 284 | IXGBE_EICR_GPI_SDP2), /* "other" interrupt types handled */
283 285
|
↓ open down ↓ |
246 lines elided |
↑ open up ↑ |
284 286 (IXGBE_SDP1_GPIEN
285 287 | IXGBE_SDP2_GPIEN), /* "other" interrupt types enable mask */
286 288
287 289 (IXGBE_FLAG_DCA_CAPABLE
288 290 | IXGBE_FLAG_RSS_CAPABLE
289 291 | IXGBE_FLAG_VMDQ_CAPABLE
290 292 | IXGBE_FLAG_RSC_CAPABLE
291 293 | IXGBE_FLAG_SFP_PLUG_CAPABLE) /* capability flags */
292 294 };
293 295
296 +static adapter_info_t ixgbe_X540_cap = {
297 + 128, /* maximum number of rx queues */
298 + 1, /* minimum number of rx queues */
299 + 128, /* default number of rx queues */
300 + 64, /* maximum number of rx groups */
301 + 1, /* minimum number of rx groups */
302 + 1, /* default number of rx groups */
303 + 128, /* maximum number of tx queues */
304 + 1, /* minimum number of tx queues */
305 + 8, /* default number of tx queues */
306 + 15500, /* maximum MTU size */
307 + 0xFF8, /* maximum interrupt throttle rate */
308 + 0, /* minimum interrupt throttle rate */
309 + 200, /* default interrupt throttle rate */
310 + 64, /* maximum total msix vectors */
311 + 16, /* maximum number of ring vectors */
312 + 2, /* maximum number of other vectors */
313 + /* XXX KEBE ASKS, Do we care about X540's SDP3? */
314 + (IXGBE_EICR_LSC
315 + | IXGBE_EICR_GPI_SDP0
316 + | IXGBE_EICR_GPI_SDP1
317 + | IXGBE_EICR_GPI_SDP2
318 + /* | IXGBE_EICR_GPI_SDP3 */), /* "other" interrupt types handled */
319 +
320 + (IXGBE_SDP1_GPIEN
321 + | IXGBE_SDP2_GPIEN
322 + /* | IXGBE_SDP3_GPIEN */), /* "other" interrupt types enable mask */
323 +
324 + /* XXX KEBE ASKS, SFP_PLUG capable?!? */
325 + (IXGBE_FLAG_DCA_CAPABLE
326 + | IXGBE_FLAG_RSS_CAPABLE
327 + | IXGBE_FLAG_VMDQ_CAPABLE
328 + | IXGBE_FLAG_RSC_CAPABLE
329 + /* | IXGBE_FLAG_SFP_PLUG_CAPABLE */) /* capability flags */
330 +};
331 +
294 332 /*
295 333 * Module Initialization Functions.
296 334 */
297 335
298 336 int
299 337 _init(void)
300 338 {
301 339 int status;
302 340
303 341 mac_init_ops(&ixgbe_dev_ops, MODULE_NAME);
304 342
305 343 status = mod_install(&ixgbe_modlinkage);
306 344
307 345 if (status != DDI_SUCCESS) {
308 346 mac_fini_ops(&ixgbe_dev_ops);
309 347 }
310 348
311 349 return (status);
312 350 }
313 351
314 352 int
315 353 _fini(void)
316 354 {
317 355 int status;
318 356
319 357 status = mod_remove(&ixgbe_modlinkage);
320 358
321 359 if (status == DDI_SUCCESS) {
322 360 mac_fini_ops(&ixgbe_dev_ops);
323 361 }
324 362
325 363 return (status);
326 364 }
327 365
328 366 int
329 367 _info(struct modinfo *modinfop)
330 368 {
331 369 int status;
332 370
333 371 status = mod_info(&ixgbe_modlinkage, modinfop);
334 372
335 373 return (status);
336 374 }
337 375
338 376 /*
339 377 * ixgbe_attach - Driver attach.
340 378 *
341 379 * This function is the device specific initialization entry
342 380 * point. This entry point is required and must be written.
343 381 * The DDI_ATTACH command must be provided in the attach entry
344 382 * point. When attach() is called with cmd set to DDI_ATTACH,
345 383 * all normal kernel services (such as kmem_alloc(9F)) are
346 384 * available for use by the driver.
347 385 *
348 386 * The attach() function will be called once for each instance
349 387 * of the device on the system with cmd set to DDI_ATTACH.
350 388 * Until attach() succeeds, the only driver entry points which
351 389 * may be called are open(9E) and getinfo(9E).
352 390 */
353 391 static int
354 392 ixgbe_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
355 393 {
356 394 ixgbe_t *ixgbe;
357 395 struct ixgbe_osdep *osdep;
358 396 struct ixgbe_hw *hw;
359 397 int instance;
360 398 char taskqname[32];
361 399
362 400 /*
363 401 * Check the command and perform corresponding operations
364 402 */
365 403 switch (cmd) {
366 404 default:
367 405 return (DDI_FAILURE);
368 406
369 407 case DDI_RESUME:
370 408 return (ixgbe_resume(devinfo));
371 409
372 410 case DDI_ATTACH:
373 411 break;
374 412 }
375 413
376 414 /* Get the device instance */
377 415 instance = ddi_get_instance(devinfo);
378 416
379 417 /* Allocate memory for the instance data structure */
380 418 ixgbe = kmem_zalloc(sizeof (ixgbe_t), KM_SLEEP);
381 419
382 420 ixgbe->dip = devinfo;
383 421 ixgbe->instance = instance;
384 422
385 423 hw = &ixgbe->hw;
386 424 osdep = &ixgbe->osdep;
387 425 hw->back = osdep;
388 426 osdep->ixgbe = ixgbe;
389 427
390 428 /* Attach the instance pointer to the dev_info data structure */
391 429 ddi_set_driver_private(devinfo, ixgbe);
392 430
393 431 /*
394 432 * Initialize for fma support
395 433 */
396 434 ixgbe->fm_capabilities = ixgbe_get_prop(ixgbe, PROP_FM_CAPABLE,
397 435 0, 0x0f, DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
398 436 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
399 437 ixgbe_fm_init(ixgbe);
400 438 ixgbe->attach_progress |= ATTACH_PROGRESS_FM_INIT;
401 439
402 440 /*
403 441 * Map PCI config space registers
404 442 */
405 443 if (pci_config_setup(devinfo, &osdep->cfg_handle) != DDI_SUCCESS) {
406 444 ixgbe_error(ixgbe, "Failed to map PCI configurations");
407 445 goto attach_fail;
408 446 }
409 447 ixgbe->attach_progress |= ATTACH_PROGRESS_PCI_CONFIG;
410 448
411 449 /*
412 450 * Identify the chipset family
413 451 */
414 452 if (ixgbe_identify_hardware(ixgbe) != IXGBE_SUCCESS) {
415 453 ixgbe_error(ixgbe, "Failed to identify hardware");
416 454 goto attach_fail;
417 455 }
418 456
419 457 /*
420 458 * Map device registers
421 459 */
422 460 if (ixgbe_regs_map(ixgbe) != IXGBE_SUCCESS) {
423 461 ixgbe_error(ixgbe, "Failed to map device registers");
424 462 goto attach_fail;
425 463 }
426 464 ixgbe->attach_progress |= ATTACH_PROGRESS_REGS_MAP;
427 465
428 466 /*
429 467 * Initialize driver parameters
430 468 */
431 469 ixgbe_init_properties(ixgbe);
432 470 ixgbe->attach_progress |= ATTACH_PROGRESS_PROPS;
433 471
434 472 /*
435 473 * Register interrupt callback
436 474 */
437 475 if (ixgbe_intr_cb_register(ixgbe) != IXGBE_SUCCESS) {
438 476 ixgbe_error(ixgbe, "Failed to register interrupt callback");
439 477 goto attach_fail;
440 478 }
441 479
442 480 /*
443 481 * Allocate interrupts
444 482 */
445 483 if (ixgbe_alloc_intrs(ixgbe) != IXGBE_SUCCESS) {
446 484 ixgbe_error(ixgbe, "Failed to allocate interrupts");
447 485 goto attach_fail;
448 486 }
449 487 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
450 488
451 489 /*
452 490 * Allocate rx/tx rings based on the ring numbers.
453 491 * The actual numbers of rx/tx rings are decided by the number of
454 492 * allocated interrupt vectors, so we should allocate the rings after
455 493 * interrupts are allocated.
456 494 */
457 495 if (ixgbe_alloc_rings(ixgbe) != IXGBE_SUCCESS) {
458 496 ixgbe_error(ixgbe, "Failed to allocate rx and tx rings");
459 497 goto attach_fail;
460 498 }
461 499 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_RINGS;
462 500
463 501 /*
464 502 * Map rings to interrupt vectors
465 503 */
466 504 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) {
467 505 ixgbe_error(ixgbe, "Failed to map interrupts to vectors");
468 506 goto attach_fail;
469 507 }
470 508
471 509 /*
472 510 * Add interrupt handlers
473 511 */
474 512 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
475 513 ixgbe_error(ixgbe, "Failed to add interrupt handlers");
476 514 goto attach_fail;
477 515 }
478 516 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
479 517
480 518 /*
481 519 * Create a taskq for sfp-change
482 520 */
483 521 (void) sprintf(taskqname, "ixgbe%d_sfp_taskq", instance);
484 522 if ((ixgbe->sfp_taskq = ddi_taskq_create(devinfo, taskqname,
485 523 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
486 524 ixgbe_error(ixgbe, "sfp_taskq create failed");
487 525 goto attach_fail;
488 526 }
489 527 ixgbe->attach_progress |= ATTACH_PROGRESS_SFP_TASKQ;
490 528
491 529 /*
492 530 * Create a taskq for over-temp
493 531 */
494 532 (void) sprintf(taskqname, "ixgbe%d_overtemp_taskq", instance);
495 533 if ((ixgbe->overtemp_taskq = ddi_taskq_create(devinfo, taskqname,
496 534 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
497 535 ixgbe_error(ixgbe, "overtemp_taskq create failed");
498 536 goto attach_fail;
499 537 }
500 538 ixgbe->attach_progress |= ATTACH_PROGRESS_OVERTEMP_TASKQ;
501 539
502 540 /*
503 541 * Initialize driver parameters
504 542 */
505 543 if (ixgbe_init_driver_settings(ixgbe) != IXGBE_SUCCESS) {
506 544 ixgbe_error(ixgbe, "Failed to initialize driver settings");
507 545 goto attach_fail;
508 546 }
509 547
510 548 /*
511 549 * Initialize mutexes for this device.
512 550 * Do this before enabling the interrupt handler and
513 551 * register the softint to avoid the condition where
514 552 * interrupt handler can try using uninitialized mutex.
515 553 */
516 554 ixgbe_init_locks(ixgbe);
517 555 ixgbe->attach_progress |= ATTACH_PROGRESS_LOCKS;
518 556
519 557 /*
520 558 * Initialize chipset hardware
521 559 */
522 560 if (ixgbe_init(ixgbe) != IXGBE_SUCCESS) {
523 561 ixgbe_error(ixgbe, "Failed to initialize adapter");
524 562 goto attach_fail;
525 563 }
526 564 ixgbe->link_check_complete = B_FALSE;
527 565 ixgbe->link_check_hrtime = gethrtime() +
528 566 (IXGBE_LINK_UP_TIME * 100000000ULL);
529 567 ixgbe->attach_progress |= ATTACH_PROGRESS_INIT;
530 568
531 569 if (ixgbe_check_acc_handle(ixgbe->osdep.cfg_handle) != DDI_FM_OK) {
532 570 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
533 571 goto attach_fail;
534 572 }
535 573
536 574 /*
537 575 * Initialize statistics
538 576 */
539 577 if (ixgbe_init_stats(ixgbe) != IXGBE_SUCCESS) {
540 578 ixgbe_error(ixgbe, "Failed to initialize statistics");
541 579 goto attach_fail;
542 580 }
543 581 ixgbe->attach_progress |= ATTACH_PROGRESS_STATS;
544 582
545 583 /*
546 584 * Register the driver to the MAC
547 585 */
548 586 if (ixgbe_register_mac(ixgbe) != IXGBE_SUCCESS) {
549 587 ixgbe_error(ixgbe, "Failed to register MAC");
550 588 goto attach_fail;
551 589 }
552 590 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN);
553 591 ixgbe->attach_progress |= ATTACH_PROGRESS_MAC;
554 592
555 593 ixgbe->periodic_id = ddi_periodic_add(ixgbe_link_timer, ixgbe,
556 594 IXGBE_CYCLIC_PERIOD, DDI_IPL_0);
557 595 if (ixgbe->periodic_id == 0) {
558 596 ixgbe_error(ixgbe, "Failed to add the link check timer");
559 597 goto attach_fail;
560 598 }
561 599 ixgbe->attach_progress |= ATTACH_PROGRESS_LINK_TIMER;
562 600
563 601 /*
564 602 * Now that mutex locks are initialized, and the chip is also
565 603 * initialized, enable interrupts.
566 604 */
567 605 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
568 606 ixgbe_error(ixgbe, "Failed to enable DDI interrupts");
569 607 goto attach_fail;
570 608 }
571 609 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
572 610
573 611 ixgbe_log(ixgbe, "%s, %s", ixgbe_ident, ixgbe_version);
574 612 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_INITIALIZED);
575 613
576 614 return (DDI_SUCCESS);
577 615
578 616 attach_fail:
579 617 ixgbe_unconfigure(devinfo, ixgbe);
580 618 return (DDI_FAILURE);
581 619 }
582 620
583 621 /*
584 622 * ixgbe_detach - Driver detach.
585 623 *
586 624 * The detach() function is the complement of the attach routine.
587 625 * If cmd is set to DDI_DETACH, detach() is used to remove the
588 626 * state associated with a given instance of a device node
589 627 * prior to the removal of that instance from the system.
590 628 *
591 629 * The detach() function will be called once for each instance
592 630 * of the device for which there has been a successful attach()
593 631 * once there are no longer any opens on the device.
594 632 *
595 633 * Interrupts routine are disabled, All memory allocated by this
596 634 * driver are freed.
597 635 */
598 636 static int
599 637 ixgbe_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
600 638 {
601 639 ixgbe_t *ixgbe;
602 640
603 641 /*
604 642 * Check detach command
605 643 */
606 644 switch (cmd) {
607 645 default:
608 646 return (DDI_FAILURE);
609 647
610 648 case DDI_SUSPEND:
611 649 return (ixgbe_suspend(devinfo));
612 650
613 651 case DDI_DETACH:
614 652 break;
615 653 }
616 654
617 655 /*
618 656 * Get the pointer to the driver private data structure
619 657 */
620 658 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
621 659 if (ixgbe == NULL)
622 660 return (DDI_FAILURE);
623 661
624 662 /*
625 663 * If the device is still running, it needs to be stopped first.
626 664 * This check is necessary because under some specific circumstances,
627 665 * the detach routine can be called without stopping the interface
628 666 * first.
629 667 */
630 668 if (ixgbe->ixgbe_state & IXGBE_STARTED) {
631 669 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED);
632 670 mutex_enter(&ixgbe->gen_lock);
633 671 ixgbe_stop(ixgbe, B_TRUE);
634 672 mutex_exit(&ixgbe->gen_lock);
635 673 /* Disable and stop the watchdog timer */
636 674 ixgbe_disable_watchdog_timer(ixgbe);
637 675 }
638 676
639 677 /*
640 678 * Check if there are still rx buffers held by the upper layer.
641 679 * If so, fail the detach.
642 680 */
643 681 if (!ixgbe_rx_drain(ixgbe))
644 682 return (DDI_FAILURE);
645 683
646 684 /*
647 685 * Do the remaining unconfigure routines
648 686 */
649 687 ixgbe_unconfigure(devinfo, ixgbe);
650 688
651 689 return (DDI_SUCCESS);
652 690 }
653 691
654 692 static void
655 693 ixgbe_unconfigure(dev_info_t *devinfo, ixgbe_t *ixgbe)
656 694 {
657 695 /*
658 696 * Disable interrupt
659 697 */
660 698 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
661 699 (void) ixgbe_disable_intrs(ixgbe);
662 700 }
663 701
664 702 /*
665 703 * remove the link check timer
666 704 */
667 705 if (ixgbe->attach_progress & ATTACH_PROGRESS_LINK_TIMER) {
668 706 if (ixgbe->periodic_id != NULL) {
669 707 ddi_periodic_delete(ixgbe->periodic_id);
670 708 ixgbe->periodic_id = NULL;
671 709 }
672 710 }
673 711
674 712 /*
675 713 * Unregister MAC
676 714 */
677 715 if (ixgbe->attach_progress & ATTACH_PROGRESS_MAC) {
678 716 (void) mac_unregister(ixgbe->mac_hdl);
679 717 }
680 718
681 719 /*
682 720 * Free statistics
683 721 */
684 722 if (ixgbe->attach_progress & ATTACH_PROGRESS_STATS) {
685 723 kstat_delete((kstat_t *)ixgbe->ixgbe_ks);
686 724 }
687 725
688 726 /*
689 727 * Remove interrupt handlers
690 728 */
691 729 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
692 730 ixgbe_rem_intr_handlers(ixgbe);
693 731 }
694 732
695 733 /*
696 734 * Remove taskq for sfp-status-change
697 735 */
698 736 if (ixgbe->attach_progress & ATTACH_PROGRESS_SFP_TASKQ) {
699 737 ddi_taskq_destroy(ixgbe->sfp_taskq);
700 738 }
701 739
702 740 /*
703 741 * Remove taskq for over-temp
704 742 */
705 743 if (ixgbe->attach_progress & ATTACH_PROGRESS_OVERTEMP_TASKQ) {
706 744 ddi_taskq_destroy(ixgbe->overtemp_taskq);
707 745 }
708 746
709 747 /*
710 748 * Remove interrupts
711 749 */
712 750 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_INTR) {
713 751 ixgbe_rem_intrs(ixgbe);
714 752 }
715 753
716 754 /*
717 755 * Unregister interrupt callback handler
718 756 */
719 757 (void) ddi_cb_unregister(ixgbe->cb_hdl);
720 758
721 759 /*
722 760 * Remove driver properties
723 761 */
724 762 if (ixgbe->attach_progress & ATTACH_PROGRESS_PROPS) {
725 763 (void) ddi_prop_remove_all(devinfo);
726 764 }
727 765
728 766 /*
729 767 * Stop the chipset
730 768 */
731 769 if (ixgbe->attach_progress & ATTACH_PROGRESS_INIT) {
732 770 mutex_enter(&ixgbe->gen_lock);
733 771 ixgbe_chip_stop(ixgbe);
734 772 mutex_exit(&ixgbe->gen_lock);
735 773 }
736 774
737 775 /*
738 776 * Free register handle
739 777 */
740 778 if (ixgbe->attach_progress & ATTACH_PROGRESS_REGS_MAP) {
741 779 if (ixgbe->osdep.reg_handle != NULL)
742 780 ddi_regs_map_free(&ixgbe->osdep.reg_handle);
743 781 }
744 782
745 783 /*
746 784 * Free PCI config handle
747 785 */
748 786 if (ixgbe->attach_progress & ATTACH_PROGRESS_PCI_CONFIG) {
749 787 if (ixgbe->osdep.cfg_handle != NULL)
750 788 pci_config_teardown(&ixgbe->osdep.cfg_handle);
751 789 }
752 790
753 791 /*
754 792 * Free locks
755 793 */
756 794 if (ixgbe->attach_progress & ATTACH_PROGRESS_LOCKS) {
757 795 ixgbe_destroy_locks(ixgbe);
758 796 }
759 797
760 798 /*
761 799 * Free the rx/tx rings
762 800 */
763 801 if (ixgbe->attach_progress & ATTACH_PROGRESS_ALLOC_RINGS) {
764 802 ixgbe_free_rings(ixgbe);
765 803 }
766 804
767 805 /*
768 806 * Unregister FMA capabilities
769 807 */
770 808 if (ixgbe->attach_progress & ATTACH_PROGRESS_FM_INIT) {
771 809 ixgbe_fm_fini(ixgbe);
772 810 }
773 811
774 812 /*
775 813 * Free the driver data structure
776 814 */
777 815 kmem_free(ixgbe, sizeof (ixgbe_t));
778 816
779 817 ddi_set_driver_private(devinfo, NULL);
780 818 }
781 819
782 820 /*
783 821 * ixgbe_register_mac - Register the driver and its function pointers with
784 822 * the GLD interface.
785 823 */
786 824 static int
787 825 ixgbe_register_mac(ixgbe_t *ixgbe)
788 826 {
789 827 struct ixgbe_hw *hw = &ixgbe->hw;
790 828 mac_register_t *mac;
791 829 int status;
792 830
793 831 if ((mac = mac_alloc(MAC_VERSION)) == NULL)
794 832 return (IXGBE_FAILURE);
795 833
796 834 mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
797 835 mac->m_driver = ixgbe;
798 836 mac->m_dip = ixgbe->dip;
799 837 mac->m_src_addr = hw->mac.addr;
800 838 mac->m_callbacks = &ixgbe_m_callbacks;
801 839 mac->m_min_sdu = 0;
802 840 mac->m_max_sdu = ixgbe->default_mtu;
803 841 mac->m_margin = VLAN_TAGSZ;
804 842 mac->m_priv_props = ixgbe_priv_props;
805 843 mac->m_v12n = MAC_VIRT_LEVEL1;
806 844
807 845 status = mac_register(mac, &ixgbe->mac_hdl);
808 846
809 847 mac_free(mac);
810 848
811 849 return ((status == 0) ? IXGBE_SUCCESS : IXGBE_FAILURE);
812 850 }
813 851
814 852 /*
815 853 * ixgbe_identify_hardware - Identify the type of the chipset.
816 854 */
817 855 static int
818 856 ixgbe_identify_hardware(ixgbe_t *ixgbe)
819 857 {
820 858 struct ixgbe_hw *hw = &ixgbe->hw;
821 859 struct ixgbe_osdep *osdep = &ixgbe->osdep;
822 860
823 861 /*
824 862 * Get the device id
825 863 */
826 864 hw->vendor_id =
827 865 pci_config_get16(osdep->cfg_handle, PCI_CONF_VENID);
828 866 hw->device_id =
829 867 pci_config_get16(osdep->cfg_handle, PCI_CONF_DEVID);
830 868 hw->revision_id =
831 869 pci_config_get8(osdep->cfg_handle, PCI_CONF_REVID);
832 870 hw->subsystem_device_id =
833 871 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBSYSID);
834 872 hw->subsystem_vendor_id =
835 873 pci_config_get16(osdep->cfg_handle, PCI_CONF_SUBVENID);
836 874
837 875 /*
838 876 * Set the mac type of the adapter based on the device id
839 877 */
840 878 if (ixgbe_set_mac_type(hw) != IXGBE_SUCCESS) {
841 879 return (IXGBE_FAILURE);
842 880 }
843 881
844 882 /*
845 883 * Install adapter capabilities
846 884 */
847 885 switch (hw->mac.type) {
848 886 case ixgbe_mac_82598EB:
849 887 IXGBE_DEBUGLOG_0(ixgbe, "identify 82598 adapter\n");
850 888 ixgbe->capab = &ixgbe_82598eb_cap;
851 889
852 890 if (ixgbe_get_media_type(hw) == ixgbe_media_type_copper) {
853 891 ixgbe->capab->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
854 892 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP1;
855 893 ixgbe->capab->other_gpie |= IXGBE_SDP1_GPIEN;
856 894 }
857 895 break;
858 896
859 897 case ixgbe_mac_82599EB:
|
↓ open down ↓ |
556 lines elided |
↑ open up ↑ |
860 898 IXGBE_DEBUGLOG_0(ixgbe, "identify 82599 adapter\n");
861 899 ixgbe->capab = &ixgbe_82599eb_cap;
862 900
863 901 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) {
864 902 ixgbe->capab->flags |= IXGBE_FLAG_TEMP_SENSOR_CAPABLE;
865 903 ixgbe->capab->other_intr |= IXGBE_EICR_GPI_SDP0;
866 904 ixgbe->capab->other_gpie |= IXGBE_SDP0_GPIEN;
867 905 }
868 906 break;
869 907
908 + case ixgbe_mac_X540:
909 + IXGBE_DEBUGLOG_0(ixgbe, "identify X540 adapter\n");
910 + ixgbe->capab = &ixgbe_X540_cap;
911 + /*
912 + * For now, X540 is all set in its capab structure.
913 + * As other X540 variants show up, things can change here.
914 + */
915 + break;
916 +
870 917 default:
871 918 IXGBE_DEBUGLOG_1(ixgbe,
872 919 "adapter not supported in ixgbe_identify_hardware(): %d\n",
873 920 hw->mac.type);
874 921 return (IXGBE_FAILURE);
875 922 }
876 923
877 924 return (IXGBE_SUCCESS);
878 925 }
879 926
880 927 /*
881 928 * ixgbe_regs_map - Map the device registers.
882 929 *
883 930 */
884 931 static int
885 932 ixgbe_regs_map(ixgbe_t *ixgbe)
886 933 {
887 934 dev_info_t *devinfo = ixgbe->dip;
888 935 struct ixgbe_hw *hw = &ixgbe->hw;
889 936 struct ixgbe_osdep *osdep = &ixgbe->osdep;
890 937 off_t mem_size;
891 938
892 939 /*
893 940 * First get the size of device registers to be mapped.
894 941 */
895 942 if (ddi_dev_regsize(devinfo, IXGBE_ADAPTER_REGSET, &mem_size)
896 943 != DDI_SUCCESS) {
897 944 return (IXGBE_FAILURE);
898 945 }
899 946
900 947 /*
901 948 * Call ddi_regs_map_setup() to map registers
902 949 */
903 950 if ((ddi_regs_map_setup(devinfo, IXGBE_ADAPTER_REGSET,
904 951 (caddr_t *)&hw->hw_addr, 0,
905 952 mem_size, &ixgbe_regs_acc_attr,
906 953 &osdep->reg_handle)) != DDI_SUCCESS) {
907 954 return (IXGBE_FAILURE);
908 955 }
909 956
910 957 return (IXGBE_SUCCESS);
911 958 }
912 959
913 960 /*
914 961 * ixgbe_init_properties - Initialize driver properties.
915 962 */
916 963 static void
917 964 ixgbe_init_properties(ixgbe_t *ixgbe)
918 965 {
919 966 /*
920 967 * Get conf file properties, including link settings
921 968 * jumbo frames, ring number, descriptor number, etc.
922 969 */
923 970 ixgbe_get_conf(ixgbe);
924 971
925 972 ixgbe_init_params(ixgbe);
926 973 }
927 974
928 975 /*
929 976 * ixgbe_init_driver_settings - Initialize driver settings.
930 977 *
931 978 * The settings include hardware function pointers, bus information,
932 979 * rx/tx rings settings, link state, and any other parameters that
933 980 * need to be setup during driver initialization.
934 981 */
935 982 static int
936 983 ixgbe_init_driver_settings(ixgbe_t *ixgbe)
937 984 {
938 985 struct ixgbe_hw *hw = &ixgbe->hw;
939 986 dev_info_t *devinfo = ixgbe->dip;
940 987 ixgbe_rx_ring_t *rx_ring;
941 988 ixgbe_rx_group_t *rx_group;
942 989 ixgbe_tx_ring_t *tx_ring;
943 990 uint32_t rx_size;
944 991 uint32_t tx_size;
945 992 uint32_t ring_per_group;
946 993 int i;
947 994
948 995 /*
949 996 * Initialize chipset specific hardware function pointers
950 997 */
951 998 if (ixgbe_init_shared_code(hw) != IXGBE_SUCCESS) {
952 999 return (IXGBE_FAILURE);
953 1000 }
954 1001
955 1002 /*
956 1003 * Get the system page size
957 1004 */
958 1005 ixgbe->sys_page_size = ddi_ptob(devinfo, (ulong_t)1);
959 1006
960 1007 /*
961 1008 * Set rx buffer size
962 1009 *
963 1010 * The IP header alignment room is counted in the calculation.
964 1011 * The rx buffer size is in unit of 1K that is required by the
965 1012 * chipset hardware.
966 1013 */
967 1014 rx_size = ixgbe->max_frame_size + IPHDR_ALIGN_ROOM;
968 1015 ixgbe->rx_buf_size = ((rx_size >> 10) +
969 1016 ((rx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
970 1017
971 1018 /*
972 1019 * Set tx buffer size
973 1020 */
974 1021 tx_size = ixgbe->max_frame_size;
975 1022 ixgbe->tx_buf_size = ((tx_size >> 10) +
976 1023 ((tx_size & (((uint32_t)1 << 10) - 1)) > 0 ? 1 : 0)) << 10;
977 1024
978 1025 /*
979 1026 * Initialize rx/tx rings/groups parameters
980 1027 */
981 1028 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
982 1029 for (i = 0; i < ixgbe->num_rx_rings; i++) {
983 1030 rx_ring = &ixgbe->rx_rings[i];
984 1031 rx_ring->index = i;
985 1032 rx_ring->ixgbe = ixgbe;
986 1033 rx_ring->group_index = i / ring_per_group;
987 1034 rx_ring->hw_index = ixgbe_get_hw_rx_index(ixgbe, i);
988 1035 }
989 1036
990 1037 for (i = 0; i < ixgbe->num_rx_groups; i++) {
991 1038 rx_group = &ixgbe->rx_groups[i];
992 1039 rx_group->index = i;
993 1040 rx_group->ixgbe = ixgbe;
994 1041 }
995 1042
996 1043 for (i = 0; i < ixgbe->num_tx_rings; i++) {
997 1044 tx_ring = &ixgbe->tx_rings[i];
998 1045 tx_ring->index = i;
999 1046 tx_ring->ixgbe = ixgbe;
1000 1047 if (ixgbe->tx_head_wb_enable)
1001 1048 tx_ring->tx_recycle = ixgbe_tx_recycle_head_wb;
1002 1049 else
1003 1050 tx_ring->tx_recycle = ixgbe_tx_recycle_legacy;
1004 1051
1005 1052 tx_ring->ring_size = ixgbe->tx_ring_size;
1006 1053 tx_ring->free_list_size = ixgbe->tx_ring_size +
1007 1054 (ixgbe->tx_ring_size >> 1);
1008 1055 }
1009 1056
1010 1057 /*
1011 1058 * Initialize values of interrupt throttling rate
1012 1059 */
1013 1060 for (i = 1; i < MAX_INTR_VECTOR; i++)
1014 1061 ixgbe->intr_throttling[i] = ixgbe->intr_throttling[0];
1015 1062
1016 1063 /*
1017 1064 * The initial link state should be "unknown"
1018 1065 */
1019 1066 ixgbe->link_state = LINK_STATE_UNKNOWN;
1020 1067
1021 1068 return (IXGBE_SUCCESS);
1022 1069 }
1023 1070
1024 1071 /*
1025 1072 * ixgbe_init_locks - Initialize locks.
1026 1073 */
1027 1074 static void
1028 1075 ixgbe_init_locks(ixgbe_t *ixgbe)
1029 1076 {
1030 1077 ixgbe_rx_ring_t *rx_ring;
1031 1078 ixgbe_tx_ring_t *tx_ring;
1032 1079 int i;
1033 1080
1034 1081 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1035 1082 rx_ring = &ixgbe->rx_rings[i];
1036 1083 mutex_init(&rx_ring->rx_lock, NULL,
1037 1084 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1038 1085 }
1039 1086
1040 1087 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1041 1088 tx_ring = &ixgbe->tx_rings[i];
1042 1089 mutex_init(&tx_ring->tx_lock, NULL,
1043 1090 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1044 1091 mutex_init(&tx_ring->recycle_lock, NULL,
1045 1092 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1046 1093 mutex_init(&tx_ring->tcb_head_lock, NULL,
1047 1094 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1048 1095 mutex_init(&tx_ring->tcb_tail_lock, NULL,
1049 1096 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1050 1097 }
1051 1098
1052 1099 mutex_init(&ixgbe->gen_lock, NULL,
1053 1100 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1054 1101
1055 1102 mutex_init(&ixgbe->watchdog_lock, NULL,
1056 1103 MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));
1057 1104 }
1058 1105
1059 1106 /*
1060 1107 * ixgbe_destroy_locks - Destroy locks.
1061 1108 */
1062 1109 static void
1063 1110 ixgbe_destroy_locks(ixgbe_t *ixgbe)
1064 1111 {
1065 1112 ixgbe_rx_ring_t *rx_ring;
1066 1113 ixgbe_tx_ring_t *tx_ring;
1067 1114 int i;
1068 1115
1069 1116 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1070 1117 rx_ring = &ixgbe->rx_rings[i];
1071 1118 mutex_destroy(&rx_ring->rx_lock);
1072 1119 }
1073 1120
1074 1121 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1075 1122 tx_ring = &ixgbe->tx_rings[i];
1076 1123 mutex_destroy(&tx_ring->tx_lock);
1077 1124 mutex_destroy(&tx_ring->recycle_lock);
1078 1125 mutex_destroy(&tx_ring->tcb_head_lock);
1079 1126 mutex_destroy(&tx_ring->tcb_tail_lock);
1080 1127 }
1081 1128
1082 1129 mutex_destroy(&ixgbe->gen_lock);
1083 1130 mutex_destroy(&ixgbe->watchdog_lock);
1084 1131 }
1085 1132
1086 1133 static int
1087 1134 ixgbe_resume(dev_info_t *devinfo)
1088 1135 {
1089 1136 ixgbe_t *ixgbe;
1090 1137 int i;
1091 1138
1092 1139 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1093 1140 if (ixgbe == NULL)
1094 1141 return (DDI_FAILURE);
1095 1142
1096 1143 mutex_enter(&ixgbe->gen_lock);
1097 1144
1098 1145 if (ixgbe->ixgbe_state & IXGBE_STARTED) {
1099 1146 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1100 1147 mutex_exit(&ixgbe->gen_lock);
1101 1148 return (DDI_FAILURE);
1102 1149 }
1103 1150
1104 1151 /*
1105 1152 * Enable and start the watchdog timer
1106 1153 */
1107 1154 ixgbe_enable_watchdog_timer(ixgbe);
1108 1155 }
1109 1156
1110 1157 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_SUSPENDED);
1111 1158
1112 1159 if (ixgbe->ixgbe_state & IXGBE_STARTED) {
1113 1160 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1114 1161 mac_tx_ring_update(ixgbe->mac_hdl,
1115 1162 ixgbe->tx_rings[i].ring_handle);
1116 1163 }
1117 1164 }
1118 1165
1119 1166 mutex_exit(&ixgbe->gen_lock);
1120 1167
1121 1168 return (DDI_SUCCESS);
1122 1169 }
1123 1170
1124 1171 static int
1125 1172 ixgbe_suspend(dev_info_t *devinfo)
1126 1173 {
1127 1174 ixgbe_t *ixgbe;
1128 1175
1129 1176 ixgbe = (ixgbe_t *)ddi_get_driver_private(devinfo);
1130 1177 if (ixgbe == NULL)
1131 1178 return (DDI_FAILURE);
1132 1179
1133 1180 mutex_enter(&ixgbe->gen_lock);
1134 1181
1135 1182 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_SUSPENDED);
1136 1183 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) {
1137 1184 mutex_exit(&ixgbe->gen_lock);
1138 1185 return (DDI_SUCCESS);
1139 1186 }
1140 1187 ixgbe_stop(ixgbe, B_FALSE);
1141 1188
1142 1189 mutex_exit(&ixgbe->gen_lock);
1143 1190
1144 1191 /*
1145 1192 * Disable and stop the watchdog timer
1146 1193 */
1147 1194 ixgbe_disable_watchdog_timer(ixgbe);
1148 1195
1149 1196 return (DDI_SUCCESS);
1150 1197 }
1151 1198
1152 1199 /*
1153 1200 * ixgbe_init - Initialize the device.
1154 1201 */
1155 1202 static int
1156 1203 ixgbe_init(ixgbe_t *ixgbe)
1157 1204 {
1158 1205 struct ixgbe_hw *hw = &ixgbe->hw;
1159 1206
1160 1207 mutex_enter(&ixgbe->gen_lock);
1161 1208
1162 1209 /*
1163 1210 * Reset chipset to put the hardware in a known state
1164 1211 * before we try to do anything with the eeprom.
1165 1212 */
1166 1213 if (ixgbe_reset_hw(hw) != IXGBE_SUCCESS) {
1167 1214 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1168 1215 goto init_fail;
1169 1216 }
1170 1217
1171 1218 /*
1172 1219 * Need to init eeprom before validating the checksum.
1173 1220 */
1174 1221 if (ixgbe_init_eeprom_params(hw) < 0) {
1175 1222 ixgbe_error(ixgbe,
1176 1223 "Unable to intitialize the eeprom interface.");
1177 1224 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1178 1225 goto init_fail;
1179 1226 }
1180 1227
1181 1228 /*
1182 1229 * NVM validation
1183 1230 */
1184 1231 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1185 1232 /*
1186 1233 * Some PCI-E parts fail the first check due to
1187 1234 * the link being in sleep state. Call it again,
1188 1235 * if it fails a second time it's a real issue.
1189 1236 */
1190 1237 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
1191 1238 ixgbe_error(ixgbe,
1192 1239 "Invalid NVM checksum. Please contact "
|
↓ open down ↓ |
313 lines elided |
↑ open up ↑ |
1193 1240 "the vendor to update the NVM.");
1194 1241 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1195 1242 goto init_fail;
1196 1243 }
1197 1244 }
1198 1245
1199 1246 /*
1200 1247 * Setup default flow control thresholds - enable/disable
1201 1248 * & flow control type is controlled by ixgbe.conf
1202 1249 */
1203 - hw->fc.high_water = DEFAULT_FCRTH;
1204 - hw->fc.low_water = DEFAULT_FCRTL;
1250 + hw->fc.high_water[0] = DEFAULT_FCRTH;
1251 + hw->fc.low_water[0] = DEFAULT_FCRTL;
1205 1252 hw->fc.pause_time = DEFAULT_FCPAUSE;
1206 1253 hw->fc.send_xon = B_TRUE;
1207 1254
1208 1255 /*
1209 1256 * Initialize link settings
1210 1257 */
1211 1258 (void) ixgbe_driver_setup_link(ixgbe, B_FALSE);
1212 1259
1213 1260 /*
1214 1261 * Initialize the chipset hardware
1215 1262 */
1216 1263 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1217 1264 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1218 1265 goto init_fail;
1219 1266 }
1220 1267
1221 1268 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1222 1269 goto init_fail;
1223 1270 }
1224 1271
1225 1272 mutex_exit(&ixgbe->gen_lock);
1226 1273 return (IXGBE_SUCCESS);
1227 1274
1228 1275 init_fail:
1229 1276 /*
1230 1277 * Reset PHY
1231 1278 */
1232 1279 (void) ixgbe_reset_phy(hw);
1233 1280
1234 1281 mutex_exit(&ixgbe->gen_lock);
1235 1282 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1236 1283 return (IXGBE_FAILURE);
1237 1284 }
1238 1285
1239 1286 /*
1240 1287 * ixgbe_chip_start - Initialize and start the chipset hardware.
1241 1288 */
1242 1289 static int
1243 1290 ixgbe_chip_start(ixgbe_t *ixgbe)
1244 1291 {
1245 1292 struct ixgbe_hw *hw = &ixgbe->hw;
1246 1293 int ret_val, i;
1247 1294
1248 1295 ASSERT(mutex_owned(&ixgbe->gen_lock));
1249 1296
1250 1297 /*
1251 1298 * Get the mac address
1252 1299 * This function should handle SPARC case correctly.
1253 1300 */
1254 1301 if (!ixgbe_find_mac_address(ixgbe)) {
1255 1302 ixgbe_error(ixgbe, "Failed to get the mac address");
1256 1303 return (IXGBE_FAILURE);
1257 1304 }
1258 1305
1259 1306 /*
1260 1307 * Validate the mac address
1261 1308 */
1262 1309 (void) ixgbe_init_rx_addrs(hw);
1263 1310 if (!is_valid_mac_addr(hw->mac.addr)) {
1264 1311 ixgbe_error(ixgbe, "Invalid mac address");
1265 1312 return (IXGBE_FAILURE);
1266 1313 }
1267 1314
1268 1315 /*
1269 1316 * Configure/Initialize hardware
1270 1317 */
1271 1318 ret_val = ixgbe_init_hw(hw);
1272 1319 if (ret_val != IXGBE_SUCCESS) {
1273 1320 if (ret_val == IXGBE_ERR_EEPROM_VERSION) {
1274 1321 ixgbe_error(ixgbe,
1275 1322 "This 82599 device is pre-release and contains"
1276 1323 " outdated firmware, please contact your hardware"
1277 1324 " vendor for a replacement.");
1278 1325 } else {
1279 1326 ixgbe_error(ixgbe, "Failed to initialize hardware");
1280 1327 return (IXGBE_FAILURE);
1281 1328 }
1282 1329 }
1283 1330
1284 1331 /*
1285 1332 * Re-enable relaxed ordering for performance. It is disabled
1286 1333 * by default in the hardware init.
1287 1334 */
1288 1335 if (ixgbe->relax_order_enable == B_TRUE)
1289 1336 ixgbe_enable_relaxed_ordering(hw);
1290 1337
1291 1338 /*
1292 1339 * Setup adapter interrupt vectors
1293 1340 */
1294 1341 ixgbe_setup_adapter_vector(ixgbe);
1295 1342
1296 1343 /*
1297 1344 * Initialize unicast addresses.
1298 1345 */
1299 1346 ixgbe_init_unicst(ixgbe);
1300 1347
1301 1348 /*
1302 1349 * Setup and initialize the mctable structures.
1303 1350 */
1304 1351 ixgbe_setup_multicst(ixgbe);
1305 1352
1306 1353 /*
1307 1354 * Set interrupt throttling rate
1308 1355 */
1309 1356 for (i = 0; i < ixgbe->intr_cnt; i++) {
1310 1357 IXGBE_WRITE_REG(hw, IXGBE_EITR(i), ixgbe->intr_throttling[i]);
1311 1358 }
1312 1359
1313 1360 /*
1314 1361 * Save the state of the phy
1315 1362 */
1316 1363 ixgbe_get_hw_state(ixgbe);
1317 1364
1318 1365 /*
1319 1366 * Make sure driver has control
1320 1367 */
1321 1368 ixgbe_get_driver_control(hw);
1322 1369
1323 1370 return (IXGBE_SUCCESS);
1324 1371 }
1325 1372
1326 1373 /*
1327 1374 * ixgbe_chip_stop - Stop the chipset hardware
1328 1375 */
1329 1376 static void
1330 1377 ixgbe_chip_stop(ixgbe_t *ixgbe)
1331 1378 {
1332 1379 struct ixgbe_hw *hw = &ixgbe->hw;
1333 1380
1334 1381 ASSERT(mutex_owned(&ixgbe->gen_lock));
1335 1382
1336 1383 /*
1337 1384 * Tell firmware driver is no longer in control
1338 1385 */
1339 1386 ixgbe_release_driver_control(hw);
1340 1387
1341 1388 /*
1342 1389 * Reset the chipset
1343 1390 */
1344 1391 (void) ixgbe_reset_hw(hw);
1345 1392
1346 1393 /*
1347 1394 * Reset PHY
1348 1395 */
1349 1396 (void) ixgbe_reset_phy(hw);
1350 1397 }
1351 1398
1352 1399 /*
1353 1400 * ixgbe_reset - Reset the chipset and re-start the driver.
1354 1401 *
1355 1402 * It involves stopping and re-starting the chipset,
1356 1403 * and re-configuring the rx/tx rings.
1357 1404 */
1358 1405 static int
1359 1406 ixgbe_reset(ixgbe_t *ixgbe)
1360 1407 {
1361 1408 int i;
1362 1409
1363 1410 /*
1364 1411 * Disable and stop the watchdog timer
1365 1412 */
1366 1413 ixgbe_disable_watchdog_timer(ixgbe);
1367 1414
1368 1415 mutex_enter(&ixgbe->gen_lock);
1369 1416
1370 1417 ASSERT(ixgbe->ixgbe_state & IXGBE_STARTED);
1371 1418 atomic_and_32(&ixgbe->ixgbe_state, ~IXGBE_STARTED);
1372 1419
1373 1420 ixgbe_stop(ixgbe, B_FALSE);
1374 1421
1375 1422 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1376 1423 mutex_exit(&ixgbe->gen_lock);
1377 1424 return (IXGBE_FAILURE);
1378 1425 }
1379 1426
1380 1427 /*
1381 1428 * After resetting, need to recheck the link status.
1382 1429 */
1383 1430 ixgbe->link_check_complete = B_FALSE;
1384 1431 ixgbe->link_check_hrtime = gethrtime() +
1385 1432 (IXGBE_LINK_UP_TIME * 100000000ULL);
1386 1433
1387 1434 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STARTED);
1388 1435
1389 1436 if (!(ixgbe->ixgbe_state & IXGBE_SUSPENDED)) {
1390 1437 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1391 1438 mac_tx_ring_update(ixgbe->mac_hdl,
1392 1439 ixgbe->tx_rings[i].ring_handle);
1393 1440 }
1394 1441 }
1395 1442
1396 1443 mutex_exit(&ixgbe->gen_lock);
1397 1444
1398 1445 /*
1399 1446 * Enable and start the watchdog timer
1400 1447 */
1401 1448 ixgbe_enable_watchdog_timer(ixgbe);
1402 1449
1403 1450 return (IXGBE_SUCCESS);
1404 1451 }
1405 1452
1406 1453 /*
1407 1454 * ixgbe_tx_clean - Clean the pending transmit packets and DMA resources.
1408 1455 */
1409 1456 static void
1410 1457 ixgbe_tx_clean(ixgbe_t *ixgbe)
1411 1458 {
1412 1459 ixgbe_tx_ring_t *tx_ring;
1413 1460 tx_control_block_t *tcb;
1414 1461 link_list_t pending_list;
1415 1462 uint32_t desc_num;
1416 1463 int i, j;
1417 1464
1418 1465 LINK_LIST_INIT(&pending_list);
1419 1466
1420 1467 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1421 1468 tx_ring = &ixgbe->tx_rings[i];
1422 1469
1423 1470 mutex_enter(&tx_ring->recycle_lock);
1424 1471
1425 1472 /*
1426 1473 * Clean the pending tx data - the pending packets in the
1427 1474 * work_list that have no chances to be transmitted again.
1428 1475 *
1429 1476 * We must ensure the chipset is stopped or the link is down
1430 1477 * before cleaning the transmit packets.
1431 1478 */
1432 1479 desc_num = 0;
1433 1480 for (j = 0; j < tx_ring->ring_size; j++) {
1434 1481 tcb = tx_ring->work_list[j];
1435 1482 if (tcb != NULL) {
1436 1483 desc_num += tcb->desc_num;
1437 1484
1438 1485 tx_ring->work_list[j] = NULL;
1439 1486
1440 1487 ixgbe_free_tcb(tcb);
1441 1488
1442 1489 LIST_PUSH_TAIL(&pending_list, &tcb->link);
1443 1490 }
1444 1491 }
1445 1492
1446 1493 if (desc_num > 0) {
1447 1494 atomic_add_32(&tx_ring->tbd_free, desc_num);
1448 1495 ASSERT(tx_ring->tbd_free == tx_ring->ring_size);
1449 1496
1450 1497 /*
1451 1498 * Reset the head and tail pointers of the tbd ring;
1452 1499 * Reset the writeback head if it's enable.
1453 1500 */
1454 1501 tx_ring->tbd_head = 0;
1455 1502 tx_ring->tbd_tail = 0;
1456 1503 if (ixgbe->tx_head_wb_enable)
1457 1504 *tx_ring->tbd_head_wb = 0;
1458 1505
1459 1506 IXGBE_WRITE_REG(&ixgbe->hw,
1460 1507 IXGBE_TDH(tx_ring->index), 0);
1461 1508 IXGBE_WRITE_REG(&ixgbe->hw,
1462 1509 IXGBE_TDT(tx_ring->index), 0);
1463 1510 }
1464 1511
1465 1512 mutex_exit(&tx_ring->recycle_lock);
1466 1513
1467 1514 /*
1468 1515 * Add the tx control blocks in the pending list to
1469 1516 * the free list.
1470 1517 */
1471 1518 ixgbe_put_free_list(tx_ring, &pending_list);
1472 1519 }
1473 1520 }
1474 1521
1475 1522 /*
1476 1523 * ixgbe_tx_drain - Drain the tx rings to allow pending packets to be
1477 1524 * transmitted.
1478 1525 */
1479 1526 static boolean_t
1480 1527 ixgbe_tx_drain(ixgbe_t *ixgbe)
1481 1528 {
1482 1529 ixgbe_tx_ring_t *tx_ring;
1483 1530 boolean_t done;
1484 1531 int i, j;
1485 1532
1486 1533 /*
1487 1534 * Wait for a specific time to allow pending tx packets
1488 1535 * to be transmitted.
1489 1536 *
1490 1537 * Check the counter tbd_free to see if transmission is done.
1491 1538 * No lock protection is needed here.
1492 1539 *
1493 1540 * Return B_TRUE if all pending packets have been transmitted;
1494 1541 * Otherwise return B_FALSE;
1495 1542 */
1496 1543 for (i = 0; i < TX_DRAIN_TIME; i++) {
1497 1544
1498 1545 done = B_TRUE;
1499 1546 for (j = 0; j < ixgbe->num_tx_rings; j++) {
1500 1547 tx_ring = &ixgbe->tx_rings[j];
1501 1548 done = done &&
1502 1549 (tx_ring->tbd_free == tx_ring->ring_size);
1503 1550 }
1504 1551
1505 1552 if (done)
1506 1553 break;
1507 1554
1508 1555 msec_delay(1);
1509 1556 }
1510 1557
1511 1558 return (done);
1512 1559 }
1513 1560
1514 1561 /*
1515 1562 * ixgbe_rx_drain - Wait for all rx buffers to be released by upper layer.
1516 1563 */
1517 1564 static boolean_t
1518 1565 ixgbe_rx_drain(ixgbe_t *ixgbe)
1519 1566 {
1520 1567 boolean_t done = B_TRUE;
1521 1568 int i;
1522 1569
1523 1570 /*
1524 1571 * Polling the rx free list to check if those rx buffers held by
1525 1572 * the upper layer are released.
1526 1573 *
1527 1574 * Check the counter rcb_free to see if all pending buffers are
1528 1575 * released. No lock protection is needed here.
1529 1576 *
1530 1577 * Return B_TRUE if all pending buffers have been released;
1531 1578 * Otherwise return B_FALSE;
1532 1579 */
1533 1580 for (i = 0; i < RX_DRAIN_TIME; i++) {
1534 1581 done = (ixgbe->rcb_pending == 0);
1535 1582
1536 1583 if (done)
1537 1584 break;
1538 1585
1539 1586 msec_delay(1);
1540 1587 }
1541 1588
1542 1589 return (done);
1543 1590 }
1544 1591
1545 1592 /*
1546 1593 * ixgbe_start - Start the driver/chipset.
1547 1594 */
1548 1595 int
1549 1596 ixgbe_start(ixgbe_t *ixgbe, boolean_t alloc_buffer)
1550 1597 {
1551 1598 int i;
1552 1599
1553 1600 ASSERT(mutex_owned(&ixgbe->gen_lock));
1554 1601
1555 1602 if (alloc_buffer) {
1556 1603 if (ixgbe_alloc_rx_data(ixgbe) != IXGBE_SUCCESS) {
1557 1604 ixgbe_error(ixgbe,
1558 1605 "Failed to allocate software receive rings");
1559 1606 return (IXGBE_FAILURE);
1560 1607 }
1561 1608
1562 1609 /* Allocate buffers for all the rx/tx rings */
1563 1610 if (ixgbe_alloc_dma(ixgbe) != IXGBE_SUCCESS) {
1564 1611 ixgbe_error(ixgbe, "Failed to allocate DMA resource");
1565 1612 return (IXGBE_FAILURE);
1566 1613 }
1567 1614
1568 1615 ixgbe->tx_ring_init = B_TRUE;
1569 1616 } else {
1570 1617 ixgbe->tx_ring_init = B_FALSE;
1571 1618 }
1572 1619
1573 1620 for (i = 0; i < ixgbe->num_rx_rings; i++)
1574 1621 mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1575 1622 for (i = 0; i < ixgbe->num_tx_rings; i++)
1576 1623 mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1577 1624
1578 1625 /*
1579 1626 * Start the chipset hardware
1580 1627 */
1581 1628 if (ixgbe_chip_start(ixgbe) != IXGBE_SUCCESS) {
1582 1629 ixgbe_fm_ereport(ixgbe, DDI_FM_DEVICE_INVAL_STATE);
1583 1630 goto start_failure;
1584 1631 }
1585 1632
1586 1633 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1587 1634 goto start_failure;
1588 1635 }
1589 1636
1590 1637 /*
1591 1638 * Setup the rx/tx rings
1592 1639 */
1593 1640 ixgbe_setup_rings(ixgbe);
1594 1641
1595 1642 /*
1596 1643 * ixgbe_start() will be called when resetting, however if reset
1597 1644 * happens, we need to clear the ERROR, STALL and OVERTEMP flags
1598 1645 * before enabling the interrupts.
1599 1646 */
1600 1647 atomic_and_32(&ixgbe->ixgbe_state, ~(IXGBE_ERROR
1601 1648 | IXGBE_STALL| IXGBE_OVERTEMP));
1602 1649
1603 1650 /*
1604 1651 * Enable adapter interrupts
1605 1652 * The interrupts must be enabled after the driver state is START
1606 1653 */
1607 1654 ixgbe_enable_adapter_interrupts(ixgbe);
1608 1655
1609 1656 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1610 1657 mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1611 1658 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1612 1659 mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1613 1660
1614 1661 return (IXGBE_SUCCESS);
1615 1662
1616 1663 start_failure:
1617 1664 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1618 1665 mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1619 1666 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1620 1667 mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1621 1668
1622 1669 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1623 1670
1624 1671 return (IXGBE_FAILURE);
1625 1672 }
1626 1673
1627 1674 /*
1628 1675 * ixgbe_stop - Stop the driver/chipset.
1629 1676 */
1630 1677 void
1631 1678 ixgbe_stop(ixgbe_t *ixgbe, boolean_t free_buffer)
1632 1679 {
1633 1680 int i;
1634 1681
1635 1682 ASSERT(mutex_owned(&ixgbe->gen_lock));
1636 1683
1637 1684 /*
1638 1685 * Disable the adapter interrupts
1639 1686 */
1640 1687 ixgbe_disable_adapter_interrupts(ixgbe);
1641 1688
1642 1689 /*
1643 1690 * Drain the pending tx packets
1644 1691 */
1645 1692 (void) ixgbe_tx_drain(ixgbe);
1646 1693
1647 1694 for (i = 0; i < ixgbe->num_rx_rings; i++)
1648 1695 mutex_enter(&ixgbe->rx_rings[i].rx_lock);
1649 1696 for (i = 0; i < ixgbe->num_tx_rings; i++)
1650 1697 mutex_enter(&ixgbe->tx_rings[i].tx_lock);
1651 1698
1652 1699 /*
1653 1700 * Stop the chipset hardware
1654 1701 */
1655 1702 ixgbe_chip_stop(ixgbe);
1656 1703
1657 1704 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
1658 1705 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1659 1706 }
1660 1707
1661 1708 /*
1662 1709 * Clean the pending tx data/resources
1663 1710 */
1664 1711 ixgbe_tx_clean(ixgbe);
1665 1712
1666 1713 for (i = ixgbe->num_tx_rings - 1; i >= 0; i--)
1667 1714 mutex_exit(&ixgbe->tx_rings[i].tx_lock);
1668 1715 for (i = ixgbe->num_rx_rings - 1; i >= 0; i--)
1669 1716 mutex_exit(&ixgbe->rx_rings[i].rx_lock);
1670 1717
1671 1718 if (ixgbe->link_state == LINK_STATE_UP) {
1672 1719 ixgbe->link_state = LINK_STATE_UNKNOWN;
1673 1720 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
1674 1721 }
1675 1722
1676 1723 if (free_buffer) {
1677 1724 /*
1678 1725 * Release the DMA/memory resources of rx/tx rings
1679 1726 */
1680 1727 ixgbe_free_dma(ixgbe);
1681 1728 ixgbe_free_rx_data(ixgbe);
1682 1729 }
1683 1730 }
1684 1731
1685 1732 /*
1686 1733 * ixgbe_cbfunc - Driver interface for generic DDI callbacks
1687 1734 */
1688 1735 /* ARGSUSED */
1689 1736 static int
1690 1737 ixgbe_cbfunc(dev_info_t *dip, ddi_cb_action_t cbaction, void *cbarg,
1691 1738 void *arg1, void *arg2)
1692 1739 {
1693 1740 ixgbe_t *ixgbe = (ixgbe_t *)arg1;
1694 1741
1695 1742 switch (cbaction) {
1696 1743 /* IRM callback */
1697 1744 int count;
1698 1745 case DDI_CB_INTR_ADD:
1699 1746 case DDI_CB_INTR_REMOVE:
1700 1747 count = (int)(uintptr_t)cbarg;
1701 1748 ASSERT(ixgbe->intr_type == DDI_INTR_TYPE_MSIX);
1702 1749 DTRACE_PROBE2(ixgbe__irm__callback, int, count,
1703 1750 int, ixgbe->intr_cnt);
1704 1751 if (ixgbe_intr_adjust(ixgbe, cbaction, count) !=
1705 1752 DDI_SUCCESS) {
1706 1753 ixgbe_error(ixgbe,
1707 1754 "IRM CB: Failed to adjust interrupts");
1708 1755 goto cb_fail;
1709 1756 }
1710 1757 break;
1711 1758 default:
1712 1759 IXGBE_DEBUGLOG_1(ixgbe, "DDI CB: action 0x%x NOT supported",
1713 1760 cbaction);
1714 1761 return (DDI_ENOTSUP);
1715 1762 }
1716 1763 return (DDI_SUCCESS);
1717 1764 cb_fail:
1718 1765 return (DDI_FAILURE);
1719 1766 }
1720 1767
1721 1768 /*
1722 1769 * ixgbe_intr_adjust - Adjust interrupt to respond to IRM request.
1723 1770 */
1724 1771 static int
1725 1772 ixgbe_intr_adjust(ixgbe_t *ixgbe, ddi_cb_action_t cbaction, int count)
1726 1773 {
1727 1774 int i, rc, actual;
1728 1775
1729 1776 if (count == 0)
1730 1777 return (DDI_SUCCESS);
1731 1778
1732 1779 if ((cbaction == DDI_CB_INTR_ADD &&
1733 1780 ixgbe->intr_cnt + count > ixgbe->intr_cnt_max) ||
1734 1781 (cbaction == DDI_CB_INTR_REMOVE &&
1735 1782 ixgbe->intr_cnt - count < ixgbe->intr_cnt_min))
1736 1783 return (DDI_FAILURE);
1737 1784
1738 1785 if (!(ixgbe->ixgbe_state & IXGBE_STARTED)) {
1739 1786 return (DDI_FAILURE);
1740 1787 }
1741 1788
1742 1789 for (i = 0; i < ixgbe->num_rx_rings; i++)
1743 1790 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle, NULL);
1744 1791 for (i = 0; i < ixgbe->num_tx_rings; i++)
1745 1792 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle, NULL);
1746 1793
1747 1794 mutex_enter(&ixgbe->gen_lock);
1748 1795 ixgbe->ixgbe_state &= ~IXGBE_STARTED;
1749 1796 ixgbe->ixgbe_state |= IXGBE_INTR_ADJUST;
1750 1797 ixgbe->ixgbe_state |= IXGBE_SUSPENDED;
1751 1798 mac_link_update(ixgbe->mac_hdl, LINK_STATE_UNKNOWN);
1752 1799
1753 1800 ixgbe_stop(ixgbe, B_FALSE);
1754 1801 /*
1755 1802 * Disable interrupts
1756 1803 */
1757 1804 if (ixgbe->attach_progress & ATTACH_PROGRESS_ENABLE_INTR) {
1758 1805 rc = ixgbe_disable_intrs(ixgbe);
1759 1806 ASSERT(rc == IXGBE_SUCCESS);
1760 1807 }
1761 1808 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ENABLE_INTR;
1762 1809
1763 1810 /*
1764 1811 * Remove interrupt handlers
1765 1812 */
1766 1813 if (ixgbe->attach_progress & ATTACH_PROGRESS_ADD_INTR) {
1767 1814 ixgbe_rem_intr_handlers(ixgbe);
1768 1815 }
1769 1816 ixgbe->attach_progress &= ~ATTACH_PROGRESS_ADD_INTR;
1770 1817
1771 1818 /*
1772 1819 * Clear vect_map
1773 1820 */
1774 1821 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
1775 1822 switch (cbaction) {
1776 1823 case DDI_CB_INTR_ADD:
1777 1824 rc = ddi_intr_alloc(ixgbe->dip, ixgbe->htable,
1778 1825 DDI_INTR_TYPE_MSIX, ixgbe->intr_cnt, count, &actual,
1779 1826 DDI_INTR_ALLOC_NORMAL);
1780 1827 if (rc != DDI_SUCCESS || actual != count) {
1781 1828 ixgbe_log(ixgbe, "Adjust interrupts failed."
1782 1829 "return: %d, irm cb size: %d, actual: %d",
1783 1830 rc, count, actual);
1784 1831 goto intr_adjust_fail;
1785 1832 }
1786 1833 ixgbe->intr_cnt += count;
1787 1834 break;
1788 1835
1789 1836 case DDI_CB_INTR_REMOVE:
1790 1837 for (i = ixgbe->intr_cnt - count;
1791 1838 i < ixgbe->intr_cnt; i ++) {
1792 1839 rc = ddi_intr_free(ixgbe->htable[i]);
1793 1840 ixgbe->htable[i] = NULL;
1794 1841 if (rc != DDI_SUCCESS) {
1795 1842 ixgbe_log(ixgbe, "Adjust interrupts failed."
1796 1843 "return: %d, irm cb size: %d, actual: %d",
1797 1844 rc, count, actual);
1798 1845 goto intr_adjust_fail;
1799 1846 }
1800 1847 }
1801 1848 ixgbe->intr_cnt -= count;
1802 1849 break;
1803 1850 }
1804 1851
1805 1852 /*
1806 1853 * Get priority for first vector, assume remaining are all the same
1807 1854 */
1808 1855 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
1809 1856 if (rc != DDI_SUCCESS) {
1810 1857 ixgbe_log(ixgbe,
1811 1858 "Get interrupt priority failed: %d", rc);
1812 1859 goto intr_adjust_fail;
1813 1860 }
1814 1861 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
1815 1862 if (rc != DDI_SUCCESS) {
1816 1863 ixgbe_log(ixgbe, "Get interrupt cap failed: %d", rc);
1817 1864 goto intr_adjust_fail;
1818 1865 }
1819 1866 ixgbe->attach_progress |= ATTACH_PROGRESS_ALLOC_INTR;
1820 1867
1821 1868 /*
1822 1869 * Map rings to interrupt vectors
1823 1870 */
1824 1871 if (ixgbe_map_intrs_to_vectors(ixgbe) != IXGBE_SUCCESS) {
1825 1872 ixgbe_error(ixgbe,
1826 1873 "IRM CB: Failed to map interrupts to vectors");
1827 1874 goto intr_adjust_fail;
1828 1875 }
1829 1876
1830 1877 /*
1831 1878 * Add interrupt handlers
1832 1879 */
1833 1880 if (ixgbe_add_intr_handlers(ixgbe) != IXGBE_SUCCESS) {
1834 1881 ixgbe_error(ixgbe, "IRM CB: Failed to add interrupt handlers");
1835 1882 goto intr_adjust_fail;
1836 1883 }
1837 1884 ixgbe->attach_progress |= ATTACH_PROGRESS_ADD_INTR;
1838 1885
1839 1886 /*
1840 1887 * Now that mutex locks are initialized, and the chip is also
1841 1888 * initialized, enable interrupts.
1842 1889 */
1843 1890 if (ixgbe_enable_intrs(ixgbe) != IXGBE_SUCCESS) {
1844 1891 ixgbe_error(ixgbe, "IRM CB: Failed to enable DDI interrupts");
1845 1892 goto intr_adjust_fail;
1846 1893 }
1847 1894 ixgbe->attach_progress |= ATTACH_PROGRESS_ENABLE_INTR;
1848 1895 if (ixgbe_start(ixgbe, B_FALSE) != IXGBE_SUCCESS) {
1849 1896 ixgbe_error(ixgbe, "IRM CB: Failed to start");
1850 1897 goto intr_adjust_fail;
1851 1898 }
1852 1899 ixgbe->ixgbe_state &= ~IXGBE_INTR_ADJUST;
1853 1900 ixgbe->ixgbe_state &= ~IXGBE_SUSPENDED;
1854 1901 ixgbe->ixgbe_state |= IXGBE_STARTED;
1855 1902 mutex_exit(&ixgbe->gen_lock);
1856 1903
1857 1904 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1858 1905 mac_ring_intr_set(ixgbe->rx_rings[i].ring_handle,
1859 1906 ixgbe->htable[ixgbe->rx_rings[i].intr_vector]);
1860 1907 }
1861 1908 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1862 1909 mac_ring_intr_set(ixgbe->tx_rings[i].ring_handle,
1863 1910 ixgbe->htable[ixgbe->tx_rings[i].intr_vector]);
1864 1911 }
1865 1912
1866 1913 /* Wakeup all Tx rings */
1867 1914 for (i = 0; i < ixgbe->num_tx_rings; i++) {
1868 1915 mac_tx_ring_update(ixgbe->mac_hdl,
1869 1916 ixgbe->tx_rings[i].ring_handle);
1870 1917 }
1871 1918
1872 1919 IXGBE_DEBUGLOG_3(ixgbe,
1873 1920 "IRM CB: interrupts new value: 0x%x(0x%x:0x%x).",
1874 1921 ixgbe->intr_cnt, ixgbe->intr_cnt_min, ixgbe->intr_cnt_max);
1875 1922 return (DDI_SUCCESS);
1876 1923
1877 1924 intr_adjust_fail:
1878 1925 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
1879 1926 mutex_exit(&ixgbe->gen_lock);
1880 1927 return (DDI_FAILURE);
1881 1928 }
1882 1929
1883 1930 /*
1884 1931 * ixgbe_intr_cb_register - Register interrupt callback function.
1885 1932 */
1886 1933 static int
1887 1934 ixgbe_intr_cb_register(ixgbe_t *ixgbe)
1888 1935 {
1889 1936 if (ddi_cb_register(ixgbe->dip, DDI_CB_FLAG_INTR, ixgbe_cbfunc,
1890 1937 ixgbe, NULL, &ixgbe->cb_hdl) != DDI_SUCCESS) {
1891 1938 return (IXGBE_FAILURE);
1892 1939 }
1893 1940 IXGBE_DEBUGLOG_0(ixgbe, "Interrupt callback function registered.");
1894 1941 return (IXGBE_SUCCESS);
1895 1942 }
1896 1943
1897 1944 /*
1898 1945 * ixgbe_alloc_rings - Allocate memory space for rx/tx rings.
1899 1946 */
1900 1947 static int
1901 1948 ixgbe_alloc_rings(ixgbe_t *ixgbe)
1902 1949 {
1903 1950 /*
1904 1951 * Allocate memory space for rx rings
1905 1952 */
1906 1953 ixgbe->rx_rings = kmem_zalloc(
1907 1954 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings,
1908 1955 KM_NOSLEEP);
1909 1956
1910 1957 if (ixgbe->rx_rings == NULL) {
1911 1958 return (IXGBE_FAILURE);
1912 1959 }
1913 1960
1914 1961 /*
1915 1962 * Allocate memory space for tx rings
1916 1963 */
1917 1964 ixgbe->tx_rings = kmem_zalloc(
1918 1965 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings,
1919 1966 KM_NOSLEEP);
1920 1967
1921 1968 if (ixgbe->tx_rings == NULL) {
1922 1969 kmem_free(ixgbe->rx_rings,
1923 1970 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1924 1971 ixgbe->rx_rings = NULL;
1925 1972 return (IXGBE_FAILURE);
1926 1973 }
1927 1974
1928 1975 /*
1929 1976 * Allocate memory space for rx ring groups
1930 1977 */
1931 1978 ixgbe->rx_groups = kmem_zalloc(
1932 1979 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups,
1933 1980 KM_NOSLEEP);
1934 1981
1935 1982 if (ixgbe->rx_groups == NULL) {
1936 1983 kmem_free(ixgbe->rx_rings,
1937 1984 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1938 1985 kmem_free(ixgbe->tx_rings,
1939 1986 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
1940 1987 ixgbe->rx_rings = NULL;
1941 1988 ixgbe->tx_rings = NULL;
1942 1989 return (IXGBE_FAILURE);
1943 1990 }
1944 1991
1945 1992 return (IXGBE_SUCCESS);
1946 1993 }
1947 1994
1948 1995 /*
1949 1996 * ixgbe_free_rings - Free the memory space of rx/tx rings.
1950 1997 */
1951 1998 static void
1952 1999 ixgbe_free_rings(ixgbe_t *ixgbe)
1953 2000 {
1954 2001 if (ixgbe->rx_rings != NULL) {
1955 2002 kmem_free(ixgbe->rx_rings,
1956 2003 sizeof (ixgbe_rx_ring_t) * ixgbe->num_rx_rings);
1957 2004 ixgbe->rx_rings = NULL;
1958 2005 }
1959 2006
1960 2007 if (ixgbe->tx_rings != NULL) {
1961 2008 kmem_free(ixgbe->tx_rings,
1962 2009 sizeof (ixgbe_tx_ring_t) * ixgbe->num_tx_rings);
1963 2010 ixgbe->tx_rings = NULL;
1964 2011 }
1965 2012
1966 2013 if (ixgbe->rx_groups != NULL) {
1967 2014 kmem_free(ixgbe->rx_groups,
1968 2015 sizeof (ixgbe_rx_group_t) * ixgbe->num_rx_groups);
1969 2016 ixgbe->rx_groups = NULL;
1970 2017 }
1971 2018 }
1972 2019
1973 2020 static int
1974 2021 ixgbe_alloc_rx_data(ixgbe_t *ixgbe)
1975 2022 {
1976 2023 ixgbe_rx_ring_t *rx_ring;
1977 2024 int i;
1978 2025
1979 2026 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1980 2027 rx_ring = &ixgbe->rx_rings[i];
1981 2028 if (ixgbe_alloc_rx_ring_data(rx_ring) != IXGBE_SUCCESS)
1982 2029 goto alloc_rx_rings_failure;
1983 2030 }
1984 2031 return (IXGBE_SUCCESS);
1985 2032
1986 2033 alloc_rx_rings_failure:
1987 2034 ixgbe_free_rx_data(ixgbe);
1988 2035 return (IXGBE_FAILURE);
1989 2036 }
1990 2037
1991 2038 static void
1992 2039 ixgbe_free_rx_data(ixgbe_t *ixgbe)
1993 2040 {
1994 2041 ixgbe_rx_ring_t *rx_ring;
1995 2042 ixgbe_rx_data_t *rx_data;
1996 2043 int i;
1997 2044
1998 2045 for (i = 0; i < ixgbe->num_rx_rings; i++) {
1999 2046 rx_ring = &ixgbe->rx_rings[i];
2000 2047
2001 2048 mutex_enter(&ixgbe->rx_pending_lock);
2002 2049 rx_data = rx_ring->rx_data;
2003 2050
2004 2051 if (rx_data != NULL) {
2005 2052 rx_data->flag |= IXGBE_RX_STOPPED;
2006 2053
2007 2054 if (rx_data->rcb_pending == 0) {
2008 2055 ixgbe_free_rx_ring_data(rx_data);
2009 2056 rx_ring->rx_data = NULL;
2010 2057 }
2011 2058 }
2012 2059
2013 2060 mutex_exit(&ixgbe->rx_pending_lock);
2014 2061 }
2015 2062 }
2016 2063
2017 2064 /*
2018 2065 * ixgbe_setup_rings - Setup rx/tx rings.
2019 2066 */
2020 2067 static void
2021 2068 ixgbe_setup_rings(ixgbe_t *ixgbe)
2022 2069 {
2023 2070 /*
2024 2071 * Setup the rx/tx rings, including the following:
2025 2072 *
2026 2073 * 1. Setup the descriptor ring and the control block buffers;
2027 2074 * 2. Initialize necessary registers for receive/transmit;
2028 2075 * 3. Initialize software pointers/parameters for receive/transmit;
2029 2076 */
2030 2077 ixgbe_setup_rx(ixgbe);
2031 2078
2032 2079 ixgbe_setup_tx(ixgbe);
2033 2080 }
2034 2081
2035 2082 static void
2036 2083 ixgbe_setup_rx_ring(ixgbe_rx_ring_t *rx_ring)
2037 2084 {
2038 2085 ixgbe_t *ixgbe = rx_ring->ixgbe;
2039 2086 ixgbe_rx_data_t *rx_data = rx_ring->rx_data;
2040 2087 struct ixgbe_hw *hw = &ixgbe->hw;
2041 2088 rx_control_block_t *rcb;
2042 2089 union ixgbe_adv_rx_desc *rbd;
2043 2090 uint32_t size;
2044 2091 uint32_t buf_low;
2045 2092 uint32_t buf_high;
2046 2093 uint32_t reg_val;
2047 2094 int i;
2048 2095
2049 2096 ASSERT(mutex_owned(&rx_ring->rx_lock));
2050 2097 ASSERT(mutex_owned(&ixgbe->gen_lock));
2051 2098
2052 2099 for (i = 0; i < ixgbe->rx_ring_size; i++) {
2053 2100 rcb = rx_data->work_list[i];
2054 2101 rbd = &rx_data->rbd_ring[i];
2055 2102
2056 2103 rbd->read.pkt_addr = rcb->rx_buf.dma_address;
2057 2104 rbd->read.hdr_addr = NULL;
2058 2105 }
2059 2106
2060 2107 /*
2061 2108 * Initialize the length register
2062 2109 */
2063 2110 size = rx_data->ring_size * sizeof (union ixgbe_adv_rx_desc);
2064 2111 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(rx_ring->hw_index), size);
2065 2112
2066 2113 /*
2067 2114 * Initialize the base address registers
2068 2115 */
2069 2116 buf_low = (uint32_t)rx_data->rbd_area.dma_address;
2070 2117 buf_high = (uint32_t)(rx_data->rbd_area.dma_address >> 32);
2071 2118 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(rx_ring->hw_index), buf_high);
2072 2119 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(rx_ring->hw_index), buf_low);
2073 2120
2074 2121 /*
2075 2122 * Setup head & tail pointers
2076 2123 */
2077 2124 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->hw_index),
2078 2125 rx_data->ring_size - 1);
2079 2126 IXGBE_WRITE_REG(hw, IXGBE_RDH(rx_ring->hw_index), 0);
2080 2127
2081 2128 rx_data->rbd_next = 0;
2082 2129 rx_data->lro_first = 0;
2083 2130
2084 2131 /*
2085 2132 * Setup the Receive Descriptor Control Register (RXDCTL)
2086 2133 * PTHRESH=32 descriptors (half the internal cache)
2087 2134 * HTHRESH=0 descriptors (to minimize latency on fetch)
2088 2135 * WTHRESH defaults to 1 (writeback each descriptor)
|
↓ open down ↓ |
874 lines elided |
↑ open up ↑ |
2089 2136 */
2090 2137 reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index));
2091 2138 reg_val |= IXGBE_RXDCTL_ENABLE; /* enable queue */
2092 2139
2093 2140 /* Not a valid value for 82599 */
2094 2141 if (hw->mac.type < ixgbe_mac_82599EB) {
2095 2142 reg_val |= 0x0020; /* pthresh */
2096 2143 }
2097 2144 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->hw_index), reg_val);
2098 2145
2099 - if (hw->mac.type == ixgbe_mac_82599EB) {
2146 + if (hw->mac.type >= ixgbe_mac_82599EB) {
2100 2147 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2101 2148 reg_val |= (IXGBE_RDRXCTL_CRCSTRIP | IXGBE_RDRXCTL_AGGDIS);
2102 2149 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2103 2150 }
2104 2151
2105 2152 /*
2106 2153 * Setup the Split and Replication Receive Control Register.
2107 2154 * Set the rx buffer size and the advanced descriptor type.
2108 2155 */
2109 2156 reg_val = (ixgbe->rx_buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) |
2110 2157 IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2111 2158 reg_val |= IXGBE_SRRCTL_DROP_EN;
2112 2159 IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(rx_ring->hw_index), reg_val);
2113 2160 }
2114 2161
2115 2162 static void
2116 2163 ixgbe_setup_rx(ixgbe_t *ixgbe)
2117 2164 {
2118 2165 ixgbe_rx_ring_t *rx_ring;
2119 2166 struct ixgbe_hw *hw = &ixgbe->hw;
2120 2167 uint32_t reg_val;
2121 2168 uint32_t ring_mapping;
2122 2169 uint32_t i, index;
2123 2170 uint32_t psrtype_rss_bit;
2124 2171
2125 2172 /* PSRTYPE must be configured for 82599 */
2126 2173 if (ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ &&
2127 2174 ixgbe->classify_mode != IXGBE_CLASSIFY_VMDQ_RSS) {
2128 2175 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
2129 2176 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
2130 2177 reg_val |= IXGBE_PSRTYPE_L2HDR;
2131 2178 reg_val |= 0x80000000;
2132 2179 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), reg_val);
2133 2180 } else {
2134 2181 if (ixgbe->num_rx_groups > 32) {
2135 2182 psrtype_rss_bit = 0x20000000;
2136 2183 } else {
2137 2184 psrtype_rss_bit = 0x40000000;
2138 2185 }
2139 2186 for (i = 0; i < ixgbe->capab->max_rx_grp_num; i++) {
2140 2187 reg_val = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR |
2141 2188 IXGBE_PSRTYPE_IPV4HDR | IXGBE_PSRTYPE_IPV6HDR;
2142 2189 reg_val |= IXGBE_PSRTYPE_L2HDR;
2143 2190 reg_val |= psrtype_rss_bit;
2144 2191 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(i), reg_val);
2145 2192 }
2146 2193 }
2147 2194
2148 2195 /*
2149 2196 * Set filter control in FCTRL to accept broadcast packets and do
2150 2197 * not pass pause frames to host. Flow control settings are already
2151 2198 * in this register, so preserve them.
2152 2199 */
2153 2200 reg_val = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2154 2201 reg_val |= IXGBE_FCTRL_BAM; /* broadcast accept mode */
2155 2202 reg_val |= IXGBE_FCTRL_DPF; /* discard pause frames */
2156 2203 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_val);
2157 2204
2158 2205 /*
2159 2206 * Hardware checksum settings
2160 2207 */
2161 2208 if (ixgbe->rx_hcksum_enable) {
2162 2209 reg_val = IXGBE_RXCSUM_IPPCSE; /* IP checksum */
2163 2210 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, reg_val);
2164 2211 }
2165 2212
2166 2213 /*
2167 2214 * Setup VMDq and RSS for multiple receive queues
2168 2215 */
2169 2216 switch (ixgbe->classify_mode) {
2170 2217 case IXGBE_CLASSIFY_RSS:
2171 2218 /*
2172 2219 * One group, only RSS is needed when more than
2173 2220 * one ring enabled.
2174 2221 */
2175 2222 ixgbe_setup_rss(ixgbe);
2176 2223 break;
2177 2224
2178 2225 case IXGBE_CLASSIFY_VMDQ:
2179 2226 /*
2180 2227 * Multiple groups, each group has one ring,
2181 2228 * only VMDq is needed.
2182 2229 */
2183 2230 ixgbe_setup_vmdq(ixgbe);
2184 2231 break;
2185 2232
2186 2233 case IXGBE_CLASSIFY_VMDQ_RSS:
2187 2234 /*
2188 2235 * Multiple groups and multiple rings, both
2189 2236 * VMDq and RSS are needed.
2190 2237 */
2191 2238 ixgbe_setup_vmdq_rss(ixgbe);
2192 2239 break;
2193 2240
2194 2241 default:
2195 2242 break;
2196 2243 }
2197 2244
2198 2245 /*
2199 2246 * Enable the receive unit. This must be done after filter
2200 2247 * control is set in FCTRL.
2201 2248 */
2202 2249 reg_val = (IXGBE_RXCTRL_RXEN /* Enable Receive Unit */
2203 2250 | IXGBE_RXCTRL_DMBYPS); /* descriptor monitor bypass */
2204 2251 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
2205 2252
2206 2253 /*
2207 2254 * ixgbe_setup_rx_ring must be called after configuring RXCTRL
2208 2255 */
2209 2256 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2210 2257 rx_ring = &ixgbe->rx_rings[i];
2211 2258 ixgbe_setup_rx_ring(rx_ring);
2212 2259 }
2213 2260
2214 2261 /*
2215 2262 * Setup the per-ring statistics mapping.
2216 2263 */
2217 2264 ring_mapping = 0;
2218 2265 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2219 2266 index = ixgbe->rx_rings[i].hw_index;
2220 2267 ring_mapping = IXGBE_READ_REG(hw, IXGBE_RQSMR(index >> 2));
2221 2268 ring_mapping |= (i & 0xF) << (8 * (index & 0x3));
2222 2269 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(index >> 2), ring_mapping);
2223 2270 }
2224 2271
2225 2272 /*
2226 2273 * The Max Frame Size in MHADD/MAXFRS will be internally increased
2227 2274 * by four bytes if the packet has a VLAN field, so includes MTU,
2228 2275 * ethernet header and frame check sequence.
2229 2276 * Register is MAXFRS in 82599.
2230 2277 */
2231 2278 reg_val = (ixgbe->default_mtu + sizeof (struct ether_header)
2232 2279 + ETHERFCSL) << IXGBE_MHADD_MFS_SHIFT;
2233 2280 IXGBE_WRITE_REG(hw, IXGBE_MHADD, reg_val);
2234 2281
2235 2282 /*
2236 2283 * Setup Jumbo Frame enable bit
2237 2284 */
2238 2285 if (ixgbe->default_mtu > ETHERMTU) {
2239 2286 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2240 2287 reg_val |= IXGBE_HLREG0_JUMBOEN;
2241 2288 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2242 2289 }
2243 2290
2244 2291 /*
2245 2292 * Setup RSC for multiple receive queues.
2246 2293 */
2247 2294 if (ixgbe->lro_enable) {
2248 2295 for (i = 0; i < ixgbe->num_rx_rings; i++) {
2249 2296 /*
2250 2297 * Make sure rx_buf_size * MAXDESC not greater
2251 2298 * than 65535.
2252 2299 * Intel recommends 4 for MAXDESC field value.
2253 2300 */
2254 2301 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCCTL(i));
2255 2302 reg_val |= IXGBE_RSCCTL_RSCEN;
2256 2303 if (ixgbe->rx_buf_size == IXGBE_PKG_BUF_16k)
2257 2304 reg_val |= IXGBE_RSCCTL_MAXDESC_1;
2258 2305 else
2259 2306 reg_val |= IXGBE_RSCCTL_MAXDESC_4;
2260 2307 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(i), reg_val);
2261 2308 }
2262 2309
2263 2310 reg_val = IXGBE_READ_REG(hw, IXGBE_RSCDBU);
2264 2311 reg_val |= IXGBE_RSCDBU_RSCACKDIS;
2265 2312 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, reg_val);
2266 2313
2267 2314 reg_val = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
2268 2315 reg_val |= IXGBE_RDRXCTL_RSCACKC;
2269 2316 reg_val |= IXGBE_RDRXCTL_FCOE_WRFIX;
2270 2317 reg_val &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
2271 2318
2272 2319 IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg_val);
2273 2320 }
2274 2321 }
2275 2322
2276 2323 static void
2277 2324 ixgbe_setup_tx_ring(ixgbe_tx_ring_t *tx_ring)
2278 2325 {
2279 2326 ixgbe_t *ixgbe = tx_ring->ixgbe;
2280 2327 struct ixgbe_hw *hw = &ixgbe->hw;
2281 2328 uint32_t size;
2282 2329 uint32_t buf_low;
2283 2330 uint32_t buf_high;
2284 2331 uint32_t reg_val;
2285 2332
2286 2333 ASSERT(mutex_owned(&tx_ring->tx_lock));
2287 2334 ASSERT(mutex_owned(&ixgbe->gen_lock));
2288 2335
2289 2336 /*
2290 2337 * Initialize the length register
2291 2338 */
2292 2339 size = tx_ring->ring_size * sizeof (union ixgbe_adv_tx_desc);
2293 2340 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(tx_ring->index), size);
2294 2341
2295 2342 /*
2296 2343 * Initialize the base address registers
2297 2344 */
2298 2345 buf_low = (uint32_t)tx_ring->tbd_area.dma_address;
2299 2346 buf_high = (uint32_t)(tx_ring->tbd_area.dma_address >> 32);
2300 2347 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(tx_ring->index), buf_low);
2301 2348 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(tx_ring->index), buf_high);
2302 2349
2303 2350 /*
2304 2351 * Setup head & tail pointers
2305 2352 */
2306 2353 IXGBE_WRITE_REG(hw, IXGBE_TDH(tx_ring->index), 0);
2307 2354 IXGBE_WRITE_REG(hw, IXGBE_TDT(tx_ring->index), 0);
2308 2355
2309 2356 /*
2310 2357 * Setup head write-back
2311 2358 */
2312 2359 if (ixgbe->tx_head_wb_enable) {
2313 2360 /*
2314 2361 * The memory of the head write-back is allocated using
2315 2362 * the extra tbd beyond the tail of the tbd ring.
2316 2363 */
2317 2364 tx_ring->tbd_head_wb = (uint32_t *)
2318 2365 ((uintptr_t)tx_ring->tbd_area.address + size);
2319 2366 *tx_ring->tbd_head_wb = 0;
2320 2367
2321 2368 buf_low = (uint32_t)
2322 2369 (tx_ring->tbd_area.dma_address + size);
2323 2370 buf_high = (uint32_t)
2324 2371 ((tx_ring->tbd_area.dma_address + size) >> 32);
2325 2372
|
↓ open down ↓ |
216 lines elided |
↑ open up ↑ |
2326 2373 /* Set the head write-back enable bit */
2327 2374 buf_low |= IXGBE_TDWBAL_HEAD_WB_ENABLE;
2328 2375
2329 2376 IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(tx_ring->index), buf_low);
2330 2377 IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(tx_ring->index), buf_high);
2331 2378
2332 2379 /*
2333 2380 * Turn off relaxed ordering for head write back or it will
2334 2381 * cause problems with the tx recycling
2335 2382 */
2336 - reg_val = IXGBE_READ_REG(hw,
2337 - IXGBE_DCA_TXCTRL(tx_ring->index));
2338 - reg_val &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
2339 - IXGBE_WRITE_REG(hw,
2340 - IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
2383 +#if 1
2384 + /* XXX KEBE ASKS --> Should we do what FreeBSD does? */
2385 + reg_val = (hw->mac.type == ixgbe_mac_82598EB) ?
2386 + IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(tx_ring->index)) :
2387 + IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(tx_ring->index));
2388 + reg_val &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2389 + if (hw->mac.type == ixgbe_mac_82598EB) {
2390 + IXGBE_WRITE_REG(hw,
2391 + IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
2392 + } else {
2393 + IXGBE_WRITE_REG(hw,
2394 + IXGBE_DCA_TXCTRL_82599(tx_ring->index), reg_val);
2395 + }
2396 +#else
2397 + /* XXX KEBE ASKS --> Or should we do what we've always done? */
2398 + reg_val = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(tx_ring->index));
2399 + reg_val &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2400 + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
2401 +#endif
2341 2402 } else {
2342 2403 tx_ring->tbd_head_wb = NULL;
2404 +#if 1
2405 + /*
2406 + * XXX KEBE ASKS --> Should we do what FreeBSD does and
2407 + * twiddle TXCTRL_DESC_WR0_EN off anyway?
2408 + */
2409 + reg_val = (hw->mac.type == ixgbe_mac_82598EB) ?
2410 + IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(tx_ring->index)) :
2411 + IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(tx_ring->index));
2412 + reg_val &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
2413 + if (hw->mac.type == ixgbe_mac_82598EB) {
2414 + IXGBE_WRITE_REG(hw,
2415 + IXGBE_DCA_TXCTRL(tx_ring->index), reg_val);
2416 + } else {
2417 + IXGBE_WRITE_REG(hw,
2418 + IXGBE_DCA_TXCTRL_82599(tx_ring->index), reg_val);
2419 + }
2420 +#endif
2343 2421 }
2344 2422
2345 2423 tx_ring->tbd_head = 0;
2346 2424 tx_ring->tbd_tail = 0;
2347 2425 tx_ring->tbd_free = tx_ring->ring_size;
2348 2426
2349 2427 if (ixgbe->tx_ring_init == B_TRUE) {
2350 2428 tx_ring->tcb_head = 0;
2351 2429 tx_ring->tcb_tail = 0;
2352 2430 tx_ring->tcb_free = tx_ring->free_list_size;
2353 2431 }
2354 2432
2355 2433 /*
2356 2434 * Initialize the s/w context structure
2357 2435 */
2358 2436 bzero(&tx_ring->tx_context, sizeof (ixgbe_tx_context_t));
2359 2437 }
2360 2438
2361 2439 static void
2362 2440 ixgbe_setup_tx(ixgbe_t *ixgbe)
2363 2441 {
2364 2442 struct ixgbe_hw *hw = &ixgbe->hw;
2365 2443 ixgbe_tx_ring_t *tx_ring;
2366 2444 uint32_t reg_val;
2367 2445 uint32_t ring_mapping;
2368 2446 int i;
2369 2447
2370 2448 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2371 2449 tx_ring = &ixgbe->tx_rings[i];
2372 2450 ixgbe_setup_tx_ring(tx_ring);
2373 2451 }
2374 2452
2375 2453 /*
2376 2454 * Setup the per-ring statistics mapping.
2377 2455 */
2378 2456 ring_mapping = 0;
|
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
2379 2457 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2380 2458 ring_mapping |= (i & 0xF) << (8 * (i & 0x3));
2381 2459 if ((i & 0x3) == 0x3) {
2382 2460 switch (hw->mac.type) {
2383 2461 case ixgbe_mac_82598EB:
2384 2462 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2),
2385 2463 ring_mapping);
2386 2464 break;
2387 2465
2388 2466 case ixgbe_mac_82599EB:
2467 + case ixgbe_mac_X540:
2389 2468 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2),
2390 2469 ring_mapping);
2391 2470 break;
2392 2471
2393 2472 default:
2394 2473 break;
2395 2474 }
2396 2475
2397 2476 ring_mapping = 0;
2398 2477 }
2399 2478 }
2400 2479 if (i & 0x3) {
2401 2480 switch (hw->mac.type) {
2402 2481 case ixgbe_mac_82598EB:
2403 2482 IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i >> 2), ring_mapping);
2404 2483 break;
2405 2484
2406 2485 case ixgbe_mac_82599EB:
2486 + case ixgbe_mac_X540:
2407 2487 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i >> 2), ring_mapping);
2408 2488 break;
2409 2489
2410 2490 default:
2411 2491 break;
2412 2492 }
2413 2493 }
2414 2494
2415 2495 /*
2416 2496 * Enable CRC appending and TX padding (for short tx frames)
2417 2497 */
2418 2498 reg_val = IXGBE_READ_REG(hw, IXGBE_HLREG0);
2419 2499 reg_val |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN;
2420 2500 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_val);
2421 2501
2422 2502 /*
2423 - * enable DMA for 82599 parts
2503 + * enable DMA for 82599 and X540 parts
2424 2504 */
2425 - if (hw->mac.type == ixgbe_mac_82599EB) {
2505 + if (hw->mac.type >= ixgbe_mac_82599EB) {
2426 2506 /* DMATXCTL.TE must be set after all Tx config is complete */
2427 2507 reg_val = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2428 2508 reg_val |= IXGBE_DMATXCTL_TE;
2429 2509 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_val);
2510 +#if 0
2511 + /* XXX KEBE SAYS - FreeBSD sets up MTQC. Should we? */
2512 + /* Disable arbiter to set MTQC */
2513 + reg_val = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2514 + reg_val |= IXGBE_RTTDCS_ARBDIS;
2515 + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val);
2516 + IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
2517 + reg_val &= ~IXGBE_RTTDCS_ARBDIS;
2518 + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg_val);
2519 +#endif
2430 2520 }
2431 2521
2432 2522 /*
2433 2523 * Enabling tx queues ..
2434 2524 * For 82599 must be done after DMATXCTL.TE is set
2435 2525 */
2436 2526 for (i = 0; i < ixgbe->num_tx_rings; i++) {
2437 2527 tx_ring = &ixgbe->tx_rings[i];
2438 2528 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->index));
2439 2529 reg_val |= IXGBE_TXDCTL_ENABLE;
2440 2530 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->index), reg_val);
2441 2531 }
2442 2532 }
2443 2533
2444 2534 /*
2445 2535 * ixgbe_setup_rss - Setup receive-side scaling feature.
2446 2536 */
2447 2537 static void
2448 2538 ixgbe_setup_rss(ixgbe_t *ixgbe)
2449 2539 {
2450 2540 struct ixgbe_hw *hw = &ixgbe->hw;
2451 2541 uint32_t i, mrqc, rxcsum;
2452 2542 uint32_t random;
2453 2543 uint32_t reta;
2454 2544 uint32_t ring_per_group;
2455 2545
2456 2546 /*
2457 2547 * Fill out redirection table
2458 2548 */
2459 2549 reta = 0;
2460 2550 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2461 2551
2462 2552 for (i = 0; i < 128; i++) {
2463 2553 reta = (reta << 8) | (i % ring_per_group) |
2464 2554 ((i % ring_per_group) << 4);
2465 2555 if ((i & 3) == 3)
2466 2556 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2467 2557 }
2468 2558
2469 2559 /*
2470 2560 * Fill out hash function seeds with a random constant
2471 2561 */
2472 2562 for (i = 0; i < 10; i++) {
2473 2563 (void) random_get_pseudo_bytes((uint8_t *)&random,
2474 2564 sizeof (uint32_t));
2475 2565 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random);
2476 2566 }
2477 2567
2478 2568 /*
2479 2569 * Enable RSS & perform hash on these packet types
2480 2570 */
2481 2571 mrqc = IXGBE_MRQC_RSSEN |
2482 2572 IXGBE_MRQC_RSS_FIELD_IPV4 |
2483 2573 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2484 2574 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2485 2575 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2486 2576 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2487 2577 IXGBE_MRQC_RSS_FIELD_IPV6 |
2488 2578 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2489 2579 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2490 2580 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2491 2581 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2492 2582
2493 2583 /*
2494 2584 * Disable Packet Checksum to enable RSS for multiple receive queues.
2495 2585 * It is an adapter hardware limitation that Packet Checksum is
2496 2586 * mutually exclusive with RSS.
2497 2587 */
2498 2588 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2499 2589 rxcsum |= IXGBE_RXCSUM_PCSD;
2500 2590 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
2501 2591 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2502 2592 }
2503 2593
2504 2594 /*
2505 2595 * ixgbe_setup_vmdq - Setup MAC classification feature
2506 2596 */
2507 2597 static void
2508 2598 ixgbe_setup_vmdq(ixgbe_t *ixgbe)
2509 2599 {
2510 2600 struct ixgbe_hw *hw = &ixgbe->hw;
2511 2601 uint32_t vmdctl, i, vtctl;
2512 2602
2513 2603 /*
2514 2604 * Setup the VMDq Control register, enable VMDq based on
2515 2605 * packet destination MAC address:
2516 2606 */
2517 2607 switch (hw->mac.type) {
2518 2608 case ixgbe_mac_82598EB:
|
↓ open down ↓ |
79 lines elided |
↑ open up ↑ |
2519 2609 /*
2520 2610 * VMDq Enable = 1;
2521 2611 * VMDq Filter = 0; MAC filtering
2522 2612 * Default VMDq output index = 0;
2523 2613 */
2524 2614 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2525 2615 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2526 2616 break;
2527 2617
2528 2618 case ixgbe_mac_82599EB:
2619 + case ixgbe_mac_X540:
2529 2620 /*
2530 2621 * Enable VMDq-only.
2531 2622 */
2532 2623 vmdctl = IXGBE_MRQC_VMDQEN;
2533 2624 IXGBE_WRITE_REG(hw, IXGBE_MRQC, vmdctl);
2534 2625
2535 2626 for (i = 0; i < hw->mac.num_rar_entries; i++) {
2536 2627 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2537 2628 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2538 2629 }
2539 2630
2540 2631 /*
2541 2632 * Enable Virtualization and Replication.
2542 2633 */
2543 2634 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2544 2635 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2545 2636
2546 2637 /*
2547 2638 * Enable receiving packets to all VFs
2548 2639 */
2549 2640 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2550 2641 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2551 2642 break;
2552 2643
2553 2644 default:
2554 2645 break;
2555 2646 }
2556 2647 }
2557 2648
2558 2649 /*
2559 2650 * ixgbe_setup_vmdq_rss - Setup both vmdq feature and rss feature.
2560 2651 */
2561 2652 static void
2562 2653 ixgbe_setup_vmdq_rss(ixgbe_t *ixgbe)
2563 2654 {
2564 2655 struct ixgbe_hw *hw = &ixgbe->hw;
2565 2656 uint32_t i, mrqc, rxcsum;
2566 2657 uint32_t random;
2567 2658 uint32_t reta;
2568 2659 uint32_t ring_per_group;
2569 2660 uint32_t vmdctl, vtctl;
2570 2661
2571 2662 /*
2572 2663 * Fill out redirection table
2573 2664 */
2574 2665 reta = 0;
2575 2666 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2576 2667 for (i = 0; i < 128; i++) {
2577 2668 reta = (reta << 8) | (i % ring_per_group) |
2578 2669 ((i % ring_per_group) << 4);
2579 2670 if ((i & 3) == 3)
2580 2671 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2581 2672 }
2582 2673
2583 2674 /*
2584 2675 * Fill out hash function seeds with a random constant
2585 2676 */
2586 2677 for (i = 0; i < 10; i++) {
2587 2678 (void) random_get_pseudo_bytes((uint8_t *)&random,
2588 2679 sizeof (uint32_t));
2589 2680 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random);
2590 2681 }
2591 2682
2592 2683 /*
2593 2684 * Enable and setup RSS and VMDq
2594 2685 */
2595 2686 switch (hw->mac.type) {
2596 2687 case ixgbe_mac_82598EB:
2597 2688 /*
2598 2689 * Enable RSS & Setup RSS Hash functions
2599 2690 */
2600 2691 mrqc = IXGBE_MRQC_RSSEN |
2601 2692 IXGBE_MRQC_RSS_FIELD_IPV4 |
2602 2693 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2603 2694 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2604 2695 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2605 2696 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2606 2697 IXGBE_MRQC_RSS_FIELD_IPV6 |
2607 2698 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2608 2699 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2609 2700 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2610 2701 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2611 2702
|
↓ open down ↓ |
73 lines elided |
↑ open up ↑ |
2612 2703 /*
2613 2704 * Enable and Setup VMDq
2614 2705 * VMDq Filter = 0; MAC filtering
2615 2706 * Default VMDq output index = 0;
2616 2707 */
2617 2708 vmdctl = IXGBE_VMD_CTL_VMDQ_EN;
2618 2709 IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl);
2619 2710 break;
2620 2711
2621 2712 case ixgbe_mac_82599EB:
2713 + case ixgbe_mac_X540:
2622 2714 /*
2623 2715 * Enable RSS & Setup RSS Hash functions
2624 2716 */
2625 2717 mrqc = IXGBE_MRQC_RSS_FIELD_IPV4 |
2626 2718 IXGBE_MRQC_RSS_FIELD_IPV4_TCP |
2627 2719 IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2628 2720 IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP |
2629 2721 IXGBE_MRQC_RSS_FIELD_IPV6_EX |
2630 2722 IXGBE_MRQC_RSS_FIELD_IPV6 |
2631 2723 IXGBE_MRQC_RSS_FIELD_IPV6_TCP |
2632 2724 IXGBE_MRQC_RSS_FIELD_IPV6_UDP |
2633 2725 IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
2634 2726
2635 2727 /*
2636 2728 * Enable VMDq+RSS.
2637 2729 */
2638 2730 if (ixgbe->num_rx_groups > 32) {
2639 2731 mrqc = mrqc | IXGBE_MRQC_VMDQRSS64EN;
2640 2732 } else {
2641 2733 mrqc = mrqc | IXGBE_MRQC_VMDQRSS32EN;
2642 2734 }
2643 2735
2644 2736 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2645 2737
2646 2738 for (i = 0; i < hw->mac.num_rar_entries; i++) {
2647 2739 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(i), 0);
2648 2740 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(i), 0);
2649 2741 }
2650 2742 break;
2651 2743
2652 2744 default:
2653 2745 break;
2654 2746
2655 2747 }
2656 2748
|
↓ open down ↓ |
25 lines elided |
↑ open up ↑ |
2657 2749 /*
2658 2750 * Disable Packet Checksum to enable RSS for multiple receive queues.
2659 2751 * It is an adapter hardware limitation that Packet Checksum is
2660 2752 * mutually exclusive with RSS.
2661 2753 */
2662 2754 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2663 2755 rxcsum |= IXGBE_RXCSUM_PCSD;
2664 2756 rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
2665 2757 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2666 2758
2667 - if (hw->mac.type == ixgbe_mac_82599EB) {
2759 + if (hw->mac.type >= ixgbe_mac_82599EB) {
2668 2760 /*
2669 2761 * Enable Virtualization and Replication.
2670 2762 */
2671 2763 vtctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
2672 2764 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
2673 2765
2674 2766 /*
2675 2767 * Enable receiving packets to all VFs
2676 2768 */
2677 2769 IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), IXGBE_VFRE_ENABLE_ALL);
2678 2770 IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), IXGBE_VFRE_ENABLE_ALL);
2679 2771 }
2680 2772 }
2681 2773
2682 2774 /*
2683 2775 * ixgbe_init_unicst - Initialize the unicast addresses.
2684 2776 */
2685 2777 static void
2686 2778 ixgbe_init_unicst(ixgbe_t *ixgbe)
2687 2779 {
2688 2780 struct ixgbe_hw *hw = &ixgbe->hw;
2689 2781 uint8_t *mac_addr;
2690 2782 int slot;
2691 2783 /*
2692 2784 * Here we should consider two situations:
2693 2785 *
2694 2786 * 1. Chipset is initialized at the first time,
2695 2787 * Clear all the multiple unicast addresses.
2696 2788 *
2697 2789 * 2. Chipset is reset
2698 2790 * Recover the multiple unicast addresses from the
2699 2791 * software data structure to the RAR registers.
2700 2792 */
2701 2793 if (!ixgbe->unicst_init) {
2702 2794 /*
2703 2795 * Initialize the multiple unicast addresses
2704 2796 */
2705 2797 ixgbe->unicst_total = hw->mac.num_rar_entries;
2706 2798 ixgbe->unicst_avail = ixgbe->unicst_total;
2707 2799 for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2708 2800 mac_addr = ixgbe->unicst_addr[slot].mac.addr;
2709 2801 bzero(mac_addr, ETHERADDRL);
2710 2802 (void) ixgbe_set_rar(hw, slot, mac_addr, NULL, NULL);
2711 2803 ixgbe->unicst_addr[slot].mac.set = 0;
2712 2804 }
2713 2805 ixgbe->unicst_init = B_TRUE;
2714 2806 } else {
2715 2807 /* Re-configure the RAR registers */
2716 2808 for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2717 2809 mac_addr = ixgbe->unicst_addr[slot].mac.addr;
2718 2810 if (ixgbe->unicst_addr[slot].mac.set == 1) {
2719 2811 (void) ixgbe_set_rar(hw, slot, mac_addr,
2720 2812 ixgbe->unicst_addr[slot].mac.group_index,
2721 2813 IXGBE_RAH_AV);
2722 2814 } else {
2723 2815 bzero(mac_addr, ETHERADDRL);
2724 2816 (void) ixgbe_set_rar(hw, slot, mac_addr,
2725 2817 NULL, NULL);
2726 2818 }
2727 2819 }
2728 2820 }
2729 2821 }
2730 2822
2731 2823 /*
2732 2824 * ixgbe_unicst_find - Find the slot for the specified unicast address
2733 2825 */
2734 2826 int
2735 2827 ixgbe_unicst_find(ixgbe_t *ixgbe, const uint8_t *mac_addr)
2736 2828 {
2737 2829 int slot;
2738 2830
2739 2831 ASSERT(mutex_owned(&ixgbe->gen_lock));
2740 2832
2741 2833 for (slot = 0; slot < ixgbe->unicst_total; slot++) {
2742 2834 if (bcmp(ixgbe->unicst_addr[slot].mac.addr,
2743 2835 mac_addr, ETHERADDRL) == 0)
2744 2836 return (slot);
2745 2837 }
2746 2838
2747 2839 return (-1);
2748 2840 }
2749 2841
2750 2842 /*
2751 2843 * ixgbe_multicst_add - Add a multicst address.
2752 2844 */
2753 2845 int
2754 2846 ixgbe_multicst_add(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2755 2847 {
2756 2848 ASSERT(mutex_owned(&ixgbe->gen_lock));
2757 2849
2758 2850 if ((multiaddr[0] & 01) == 0) {
2759 2851 return (EINVAL);
2760 2852 }
2761 2853
2762 2854 if (ixgbe->mcast_count >= MAX_NUM_MULTICAST_ADDRESSES) {
2763 2855 return (ENOENT);
2764 2856 }
2765 2857
2766 2858 bcopy(multiaddr,
2767 2859 &ixgbe->mcast_table[ixgbe->mcast_count], ETHERADDRL);
2768 2860 ixgbe->mcast_count++;
2769 2861
2770 2862 /*
2771 2863 * Update the multicast table in the hardware
2772 2864 */
2773 2865 ixgbe_setup_multicst(ixgbe);
2774 2866
2775 2867 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2776 2868 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2777 2869 return (EIO);
2778 2870 }
2779 2871
2780 2872 return (0);
2781 2873 }
2782 2874
2783 2875 /*
2784 2876 * ixgbe_multicst_remove - Remove a multicst address.
2785 2877 */
2786 2878 int
2787 2879 ixgbe_multicst_remove(ixgbe_t *ixgbe, const uint8_t *multiaddr)
2788 2880 {
2789 2881 int i;
2790 2882
2791 2883 ASSERT(mutex_owned(&ixgbe->gen_lock));
2792 2884
2793 2885 for (i = 0; i < ixgbe->mcast_count; i++) {
2794 2886 if (bcmp(multiaddr, &ixgbe->mcast_table[i],
2795 2887 ETHERADDRL) == 0) {
2796 2888 for (i++; i < ixgbe->mcast_count; i++) {
2797 2889 ixgbe->mcast_table[i - 1] =
2798 2890 ixgbe->mcast_table[i];
2799 2891 }
2800 2892 ixgbe->mcast_count--;
2801 2893 break;
2802 2894 }
2803 2895 }
2804 2896
2805 2897 /*
2806 2898 * Update the multicast table in the hardware
2807 2899 */
2808 2900 ixgbe_setup_multicst(ixgbe);
2809 2901
2810 2902 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
2811 2903 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
2812 2904 return (EIO);
2813 2905 }
2814 2906
2815 2907 return (0);
2816 2908 }
2817 2909
2818 2910 /*
2819 2911 * ixgbe_setup_multicast - Setup multicast data structures.
2820 2912 *
2821 2913 * This routine initializes all of the multicast related structures
2822 2914 * and save them in the hardware registers.
2823 2915 */
2824 2916 static void
2825 2917 ixgbe_setup_multicst(ixgbe_t *ixgbe)
2826 2918 {
2827 2919 uint8_t *mc_addr_list;
2828 2920 uint32_t mc_addr_count;
2829 2921 struct ixgbe_hw *hw = &ixgbe->hw;
2830 2922
2831 2923 ASSERT(mutex_owned(&ixgbe->gen_lock));
|
↓ open down ↓ |
154 lines elided |
↑ open up ↑ |
2832 2924
2833 2925 ASSERT(ixgbe->mcast_count <= MAX_NUM_MULTICAST_ADDRESSES);
2834 2926
2835 2927 mc_addr_list = (uint8_t *)ixgbe->mcast_table;
2836 2928 mc_addr_count = ixgbe->mcast_count;
2837 2929
2838 2930 /*
2839 2931 * Update the multicast addresses to the MTA registers
2840 2932 */
2841 2933 (void) ixgbe_update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
2842 - ixgbe_mc_table_itr);
2934 + ixgbe_mc_table_itr, TRUE);
2843 2935 }
2844 2936
2845 2937 /*
2846 2938 * ixgbe_setup_vmdq_rss_conf - Configure vmdq and rss (number and mode).
2847 2939 *
2848 2940 * Configure the rx classification mode (vmdq & rss) and vmdq & rss numbers.
2849 2941 * Different chipsets may have different allowed configuration of vmdq and rss.
2850 2942 */
2851 2943 static void
2852 2944 ixgbe_setup_vmdq_rss_conf(ixgbe_t *ixgbe)
2853 2945 {
2854 2946 struct ixgbe_hw *hw = &ixgbe->hw;
2855 2947 uint32_t ring_per_group;
2856 2948
2857 2949 switch (hw->mac.type) {
2858 2950 case ixgbe_mac_82598EB:
2859 2951 /*
2860 2952 * 82598 supports the following combination:
2861 2953 * vmdq no. x rss no.
2862 2954 * [5..16] x 1
2863 2955 * [1..4] x [1..16]
2864 2956 * However 8 rss queue per pool (vmdq) is sufficient for
2865 2957 * most cases.
2866 2958 */
2867 2959 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
|
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
2868 2960 if (ixgbe->num_rx_groups > 4) {
2869 2961 ixgbe->num_rx_rings = ixgbe->num_rx_groups;
2870 2962 } else {
2871 2963 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2872 2964 min(8, ring_per_group);
2873 2965 }
2874 2966
2875 2967 break;
2876 2968
2877 2969 case ixgbe_mac_82599EB:
2970 + case ixgbe_mac_X540:
2878 2971 /*
2879 2972 * 82599 supports the following combination:
2880 2973 * vmdq no. x rss no.
2881 2974 * [33..64] x [1..2]
2882 2975 * [2..32] x [1..4]
2883 2976 * 1 x [1..16]
2884 2977 * However 8 rss queue per pool (vmdq) is sufficient for
2885 2978 * most cases.
2979 + *
2980 + * For now, treat X540 like the 82599.
2886 2981 */
2887 2982 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2888 2983 if (ixgbe->num_rx_groups == 1) {
2889 2984 ixgbe->num_rx_rings = min(8, ring_per_group);
2890 2985 } else if (ixgbe->num_rx_groups <= 32) {
2891 2986 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2892 2987 min(4, ring_per_group);
2893 2988 } else if (ixgbe->num_rx_groups <= 64) {
2894 2989 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
2895 2990 min(2, ring_per_group);
2896 2991 }
2897 2992 break;
2898 2993
2899 2994 default:
2900 2995 break;
2901 2996 }
2902 2997
2903 2998 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
2904 2999
2905 3000 if (ixgbe->num_rx_groups == 1 && ring_per_group == 1) {
2906 3001 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
2907 3002 } else if (ixgbe->num_rx_groups != 1 && ring_per_group == 1) {
2908 3003 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ;
2909 3004 } else if (ixgbe->num_rx_groups != 1 && ring_per_group != 1) {
2910 3005 ixgbe->classify_mode = IXGBE_CLASSIFY_VMDQ_RSS;
2911 3006 } else {
2912 3007 ixgbe->classify_mode = IXGBE_CLASSIFY_RSS;
2913 3008 }
2914 3009
2915 3010 IXGBE_DEBUGLOG_2(ixgbe, "rx group number:%d, rx ring number:%d",
2916 3011 ixgbe->num_rx_groups, ixgbe->num_rx_rings);
2917 3012 }
2918 3013
2919 3014 /*
2920 3015 * ixgbe_get_conf - Get driver configurations set in driver.conf.
2921 3016 *
2922 3017 * This routine gets user-configured values out of the configuration
2923 3018 * file ixgbe.conf.
2924 3019 *
2925 3020 * For each configurable value, there is a minimum, a maximum, and a
2926 3021 * default.
2927 3022 * If user does not configure a value, use the default.
2928 3023 * If user configures below the minimum, use the minumum.
2929 3024 * If user configures above the maximum, use the maxumum.
2930 3025 */
2931 3026 static void
2932 3027 ixgbe_get_conf(ixgbe_t *ixgbe)
2933 3028 {
2934 3029 struct ixgbe_hw *hw = &ixgbe->hw;
2935 3030 uint32_t flow_control;
2936 3031
2937 3032 /*
2938 3033 * ixgbe driver supports the following user configurations:
2939 3034 *
2940 3035 * Jumbo frame configuration:
2941 3036 * default_mtu
2942 3037 *
2943 3038 * Ethernet flow control configuration:
2944 3039 * flow_control
2945 3040 *
2946 3041 * Multiple rings configurations:
2947 3042 * tx_queue_number
2948 3043 * tx_ring_size
2949 3044 * rx_queue_number
2950 3045 * rx_ring_size
2951 3046 *
2952 3047 * Call ixgbe_get_prop() to get the value for a specific
2953 3048 * configuration parameter.
2954 3049 */
2955 3050
2956 3051 /*
2957 3052 * Jumbo frame configuration - max_frame_size controls host buffer
2958 3053 * allocation, so includes MTU, ethernet header, vlan tag and
2959 3054 * frame check sequence.
2960 3055 */
2961 3056 ixgbe->default_mtu = ixgbe_get_prop(ixgbe, PROP_DEFAULT_MTU,
2962 3057 MIN_MTU, ixgbe->capab->max_mtu, DEFAULT_MTU);
2963 3058
2964 3059 ixgbe->max_frame_size = ixgbe->default_mtu +
2965 3060 sizeof (struct ether_vlan_header) + ETHERFCSL;
2966 3061
2967 3062 /*
2968 3063 * Ethernet flow control configuration
2969 3064 */
2970 3065 flow_control = ixgbe_get_prop(ixgbe, PROP_FLOW_CONTROL,
2971 3066 ixgbe_fc_none, 3, ixgbe_fc_none);
2972 3067 if (flow_control == 3)
2973 3068 flow_control = ixgbe_fc_default;
2974 3069
2975 3070 /*
2976 3071 * fc.requested mode is what the user requests. After autoneg,
2977 3072 * fc.current_mode will be the flow_control mode that was negotiated.
2978 3073 */
2979 3074 hw->fc.requested_mode = flow_control;
2980 3075
2981 3076 /*
2982 3077 * Multiple rings configurations
2983 3078 */
2984 3079 ixgbe->num_tx_rings = ixgbe_get_prop(ixgbe, PROP_TX_QUEUE_NUM,
2985 3080 ixgbe->capab->min_tx_que_num,
2986 3081 ixgbe->capab->max_tx_que_num,
2987 3082 ixgbe->capab->def_tx_que_num);
2988 3083 ixgbe->tx_ring_size = ixgbe_get_prop(ixgbe, PROP_TX_RING_SIZE,
2989 3084 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE, DEFAULT_TX_RING_SIZE);
2990 3085
2991 3086 ixgbe->num_rx_rings = ixgbe_get_prop(ixgbe, PROP_RX_QUEUE_NUM,
2992 3087 ixgbe->capab->min_rx_que_num,
2993 3088 ixgbe->capab->max_rx_que_num,
2994 3089 ixgbe->capab->def_rx_que_num);
2995 3090 ixgbe->rx_ring_size = ixgbe_get_prop(ixgbe, PROP_RX_RING_SIZE,
2996 3091 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE, DEFAULT_RX_RING_SIZE);
2997 3092
2998 3093 /*
2999 3094 * Multiple groups configuration
3000 3095 */
3001 3096 ixgbe->num_rx_groups = ixgbe_get_prop(ixgbe, PROP_RX_GROUP_NUM,
3002 3097 ixgbe->capab->min_rx_grp_num, ixgbe->capab->max_rx_grp_num,
3003 3098 ixgbe->capab->def_rx_grp_num);
3004 3099
3005 3100 ixgbe->mr_enable = ixgbe_get_prop(ixgbe, PROP_MR_ENABLE,
3006 3101 0, 1, DEFAULT_MR_ENABLE);
3007 3102
3008 3103 if (ixgbe->mr_enable == B_FALSE) {
3009 3104 ixgbe->num_tx_rings = 1;
3010 3105 ixgbe->num_rx_rings = 1;
3011 3106 ixgbe->num_rx_groups = 1;
3012 3107 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
3013 3108 } else {
3014 3109 ixgbe->num_rx_rings = ixgbe->num_rx_groups *
3015 3110 max(ixgbe->num_rx_rings / ixgbe->num_rx_groups, 1);
3016 3111 /*
3017 3112 * The combination of num_rx_rings and num_rx_groups
3018 3113 * may be not supported by h/w. We need to adjust
3019 3114 * them to appropriate values.
3020 3115 */
3021 3116 ixgbe_setup_vmdq_rss_conf(ixgbe);
3022 3117 }
3023 3118
3024 3119 /*
3025 3120 * Tunable used to force an interrupt type. The only use is
3026 3121 * for testing of the lesser interrupt types.
3027 3122 * 0 = don't force interrupt type
3028 3123 * 1 = force interrupt type MSI-X
3029 3124 * 2 = force interrupt type MSI
3030 3125 * 3 = force interrupt type Legacy
3031 3126 */
3032 3127 ixgbe->intr_force = ixgbe_get_prop(ixgbe, PROP_INTR_FORCE,
3033 3128 IXGBE_INTR_NONE, IXGBE_INTR_LEGACY, IXGBE_INTR_NONE);
3034 3129
3035 3130 ixgbe->tx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_TX_HCKSUM_ENABLE,
3036 3131 0, 1, DEFAULT_TX_HCKSUM_ENABLE);
3037 3132 ixgbe->rx_hcksum_enable = ixgbe_get_prop(ixgbe, PROP_RX_HCKSUM_ENABLE,
|
↓ open down ↓ |
142 lines elided |
↑ open up ↑ |
3038 3133 0, 1, DEFAULT_RX_HCKSUM_ENABLE);
3039 3134 ixgbe->lso_enable = ixgbe_get_prop(ixgbe, PROP_LSO_ENABLE,
3040 3135 0, 1, DEFAULT_LSO_ENABLE);
3041 3136 ixgbe->lro_enable = ixgbe_get_prop(ixgbe, PROP_LRO_ENABLE,
3042 3137 0, 1, DEFAULT_LRO_ENABLE);
3043 3138 ixgbe->tx_head_wb_enable = ixgbe_get_prop(ixgbe, PROP_TX_HEAD_WB_ENABLE,
3044 3139 0, 1, DEFAULT_TX_HEAD_WB_ENABLE);
3045 3140 ixgbe->relax_order_enable = ixgbe_get_prop(ixgbe,
3046 3141 PROP_RELAX_ORDER_ENABLE, 0, 1, DEFAULT_RELAX_ORDER_ENABLE);
3047 3142
3048 - /* Head Write Back not recommended for 82599 */
3143 + /* Head Write Back not recommended for 82599 and X540 */
3049 3144 if (hw->mac.type >= ixgbe_mac_82599EB) {
3050 3145 ixgbe->tx_head_wb_enable = B_FALSE;
3051 3146 }
3052 3147
3053 3148 /*
3054 3149 * ixgbe LSO needs the tx h/w checksum support.
3055 3150 * LSO will be disabled if tx h/w checksum is not
3056 3151 * enabled.
3057 3152 */
3058 3153 if (ixgbe->tx_hcksum_enable == B_FALSE) {
3059 3154 ixgbe->lso_enable = B_FALSE;
3060 3155 }
3061 3156
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
3062 3157 /*
3063 3158 * ixgbe LRO needs the rx h/w checksum support.
3064 3159 * LRO will be disabled if rx h/w checksum is not
3065 3160 * enabled.
3066 3161 */
3067 3162 if (ixgbe->rx_hcksum_enable == B_FALSE) {
3068 3163 ixgbe->lro_enable = B_FALSE;
3069 3164 }
3070 3165
3071 3166 /*
3072 - * ixgbe LRO only been supported by 82599 now
3167 + * ixgbe LRO only been supported by 82599 and X540 now
3073 3168 */
3074 - if (hw->mac.type != ixgbe_mac_82599EB) {
3169 + if (hw->mac.type < ixgbe_mac_82599EB) {
3075 3170 ixgbe->lro_enable = B_FALSE;
3076 3171 }
3077 3172 ixgbe->tx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_TX_COPY_THRESHOLD,
3078 3173 MIN_TX_COPY_THRESHOLD, MAX_TX_COPY_THRESHOLD,
3079 3174 DEFAULT_TX_COPY_THRESHOLD);
3080 3175 ixgbe->tx_recycle_thresh = ixgbe_get_prop(ixgbe,
3081 3176 PROP_TX_RECYCLE_THRESHOLD, MIN_TX_RECYCLE_THRESHOLD,
3082 3177 MAX_TX_RECYCLE_THRESHOLD, DEFAULT_TX_RECYCLE_THRESHOLD);
3083 3178 ixgbe->tx_overload_thresh = ixgbe_get_prop(ixgbe,
3084 3179 PROP_TX_OVERLOAD_THRESHOLD, MIN_TX_OVERLOAD_THRESHOLD,
3085 3180 MAX_TX_OVERLOAD_THRESHOLD, DEFAULT_TX_OVERLOAD_THRESHOLD);
3086 3181 ixgbe->tx_resched_thresh = ixgbe_get_prop(ixgbe,
3087 3182 PROP_TX_RESCHED_THRESHOLD, MIN_TX_RESCHED_THRESHOLD,
3088 3183 MAX_TX_RESCHED_THRESHOLD, DEFAULT_TX_RESCHED_THRESHOLD);
3089 3184
3090 3185 ixgbe->rx_copy_thresh = ixgbe_get_prop(ixgbe, PROP_RX_COPY_THRESHOLD,
3091 3186 MIN_RX_COPY_THRESHOLD, MAX_RX_COPY_THRESHOLD,
|
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
3092 3187 DEFAULT_RX_COPY_THRESHOLD);
3093 3188 ixgbe->rx_limit_per_intr = ixgbe_get_prop(ixgbe, PROP_RX_LIMIT_PER_INTR,
3094 3189 MIN_RX_LIMIT_PER_INTR, MAX_RX_LIMIT_PER_INTR,
3095 3190 DEFAULT_RX_LIMIT_PER_INTR);
3096 3191
3097 3192 ixgbe->intr_throttling[0] = ixgbe_get_prop(ixgbe, PROP_INTR_THROTTLING,
3098 3193 ixgbe->capab->min_intr_throttle,
3099 3194 ixgbe->capab->max_intr_throttle,
3100 3195 ixgbe->capab->def_intr_throttle);
3101 3196 /*
3102 - * 82599 requires the interupt throttling rate is
3197 + * 82599 and X540 require the interupt throttling rate is
3103 3198 * a multiple of 8. This is enforced by the register
3104 3199 * definiton.
3105 3200 */
3106 - if (hw->mac.type == ixgbe_mac_82599EB)
3201 + if (hw->mac.type >= ixgbe_mac_82599EB)
3107 3202 ixgbe->intr_throttling[0] = ixgbe->intr_throttling[0] & 0xFF8;
3108 3203 }
3109 3204
3110 3205 static void
3111 3206 ixgbe_init_params(ixgbe_t *ixgbe)
3112 3207 {
3113 3208 ixgbe->param_en_10000fdx_cap = 1;
3114 3209 ixgbe->param_en_1000fdx_cap = 1;
3115 3210 ixgbe->param_en_100fdx_cap = 1;
3116 3211 ixgbe->param_adv_10000fdx_cap = 1;
3117 3212 ixgbe->param_adv_1000fdx_cap = 1;
3118 3213 ixgbe->param_adv_100fdx_cap = 1;
3119 3214
3120 3215 ixgbe->param_pause_cap = 1;
3121 3216 ixgbe->param_asym_pause_cap = 1;
3122 3217 ixgbe->param_rem_fault = 0;
3123 3218
3124 3219 ixgbe->param_adv_autoneg_cap = 1;
3125 3220 ixgbe->param_adv_pause_cap = 1;
3126 3221 ixgbe->param_adv_asym_pause_cap = 1;
3127 3222 ixgbe->param_adv_rem_fault = 0;
3128 3223
3129 3224 ixgbe->param_lp_10000fdx_cap = 0;
3130 3225 ixgbe->param_lp_1000fdx_cap = 0;
3131 3226 ixgbe->param_lp_100fdx_cap = 0;
3132 3227 ixgbe->param_lp_autoneg_cap = 0;
3133 3228 ixgbe->param_lp_pause_cap = 0;
3134 3229 ixgbe->param_lp_asym_pause_cap = 0;
3135 3230 ixgbe->param_lp_rem_fault = 0;
3136 3231 }
3137 3232
3138 3233 /*
3139 3234 * ixgbe_get_prop - Get a property value out of the configuration file
3140 3235 * ixgbe.conf.
3141 3236 *
3142 3237 * Caller provides the name of the property, a default value, a minimum
3143 3238 * value, and a maximum value.
3144 3239 *
3145 3240 * Return configured value of the property, with default, minimum and
3146 3241 * maximum properly applied.
3147 3242 */
3148 3243 static int
3149 3244 ixgbe_get_prop(ixgbe_t *ixgbe,
3150 3245 char *propname, /* name of the property */
3151 3246 int minval, /* minimum acceptable value */
3152 3247 int maxval, /* maximim acceptable value */
3153 3248 int defval) /* default value */
3154 3249 {
3155 3250 int value;
3156 3251
3157 3252 /*
3158 3253 * Call ddi_prop_get_int() to read the conf settings
3159 3254 */
3160 3255 value = ddi_prop_get_int(DDI_DEV_T_ANY, ixgbe->dip,
3161 3256 DDI_PROP_DONTPASS, propname, defval);
3162 3257 if (value > maxval)
3163 3258 value = maxval;
3164 3259
3165 3260 if (value < minval)
3166 3261 value = minval;
3167 3262
3168 3263 return (value);
3169 3264 }
3170 3265
3171 3266 /*
3172 3267 * ixgbe_driver_setup_link - Using the link properties to setup the link.
3173 3268 */
3174 3269 int
3175 3270 ixgbe_driver_setup_link(ixgbe_t *ixgbe, boolean_t setup_hw)
3176 3271 {
3177 3272 u32 autoneg_advertised = 0;
3178 3273
3179 3274 /*
3180 3275 * No half duplex support with 10Gb parts
3181 3276 */
3182 3277 if (ixgbe->param_adv_10000fdx_cap == 1)
3183 3278 autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
3184 3279
3185 3280 if (ixgbe->param_adv_1000fdx_cap == 1)
3186 3281 autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
3187 3282
3188 3283 if (ixgbe->param_adv_100fdx_cap == 1)
3189 3284 autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
3190 3285
3191 3286 if (ixgbe->param_adv_autoneg_cap == 1 && autoneg_advertised == 0) {
3192 3287 ixgbe_notice(ixgbe, "Invalid link settings. Setup link "
3193 3288 "to autonegotiation with full link capabilities.");
3194 3289
3195 3290 autoneg_advertised = IXGBE_LINK_SPEED_10GB_FULL |
3196 3291 IXGBE_LINK_SPEED_1GB_FULL |
3197 3292 IXGBE_LINK_SPEED_100_FULL;
3198 3293 }
3199 3294
3200 3295 if (setup_hw) {
3201 3296 if (ixgbe_setup_link(&ixgbe->hw, autoneg_advertised,
3202 3297 ixgbe->param_adv_autoneg_cap, B_TRUE) != IXGBE_SUCCESS) {
3203 3298 ixgbe_notice(ixgbe, "Setup link failed on this "
3204 3299 "device.");
3205 3300 return (IXGBE_FAILURE);
3206 3301 }
3207 3302 }
3208 3303
3209 3304 return (IXGBE_SUCCESS);
3210 3305 }
3211 3306
3212 3307 /*
3213 3308 * ixgbe_driver_link_check - Link status processing.
3214 3309 *
3215 3310 * This function can be called in both kernel context and interrupt context
3216 3311 */
3217 3312 static void
3218 3313 ixgbe_driver_link_check(ixgbe_t *ixgbe)
3219 3314 {
3220 3315 struct ixgbe_hw *hw = &ixgbe->hw;
3221 3316 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
|
↓ open down ↓ |
105 lines elided |
↑ open up ↑ |
3222 3317 boolean_t link_up = B_FALSE;
3223 3318 boolean_t link_changed = B_FALSE;
3224 3319
3225 3320 ASSERT(mutex_owned(&ixgbe->gen_lock));
3226 3321
3227 3322 (void) ixgbe_check_link(hw, &speed, &link_up, false);
3228 3323 if (link_up) {
3229 3324 ixgbe->link_check_complete = B_TRUE;
3230 3325
3231 3326 /* Link is up, enable flow control settings */
3232 - (void) ixgbe_fc_enable(hw, 0);
3327 + (void) ixgbe_fc_enable(hw);
3233 3328
3234 3329 /*
3235 3330 * The Link is up, check whether it was marked as down earlier
3236 3331 */
3237 3332 if (ixgbe->link_state != LINK_STATE_UP) {
3238 3333 switch (speed) {
3239 3334 case IXGBE_LINK_SPEED_10GB_FULL:
3240 3335 ixgbe->link_speed = SPEED_10GB;
3241 3336 break;
3242 3337 case IXGBE_LINK_SPEED_1GB_FULL:
3243 3338 ixgbe->link_speed = SPEED_1GB;
3244 3339 break;
3245 3340 case IXGBE_LINK_SPEED_100_FULL:
3246 3341 ixgbe->link_speed = SPEED_100;
3247 3342 }
3248 3343 ixgbe->link_duplex = LINK_DUPLEX_FULL;
3249 3344 ixgbe->link_state = LINK_STATE_UP;
3250 3345 link_changed = B_TRUE;
3251 3346 }
3252 3347 } else {
3253 3348 if (ixgbe->link_check_complete == B_TRUE ||
3254 3349 (ixgbe->link_check_complete == B_FALSE &&
3255 3350 gethrtime() >= ixgbe->link_check_hrtime)) {
3256 3351 /*
3257 3352 * The link is really down
3258 3353 */
3259 3354 ixgbe->link_check_complete = B_TRUE;
3260 3355
3261 3356 if (ixgbe->link_state != LINK_STATE_DOWN) {
3262 3357 ixgbe->link_speed = 0;
3263 3358 ixgbe->link_duplex = LINK_DUPLEX_UNKNOWN;
3264 3359 ixgbe->link_state = LINK_STATE_DOWN;
3265 3360 link_changed = B_TRUE;
3266 3361 }
3267 3362 }
3268 3363 }
3269 3364
3270 3365 /*
3271 3366 * If we are in an interrupt context, need to re-enable the
3272 3367 * interrupt, which was automasked
3273 3368 */
3274 3369 if (servicing_interrupt() != 0) {
3275 3370 ixgbe->eims |= IXGBE_EICR_LSC;
3276 3371 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3277 3372 }
3278 3373
3279 3374 if (link_changed) {
3280 3375 mac_link_update(ixgbe->mac_hdl, ixgbe->link_state);
3281 3376 }
3282 3377 }
3283 3378
3284 3379 /*
3285 3380 * ixgbe_sfp_check - sfp module processing done in taskq only for 82599.
3286 3381 */
3287 3382 static void
3288 3383 ixgbe_sfp_check(void *arg)
3289 3384 {
3290 3385 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3291 3386 uint32_t eicr = ixgbe->eicr;
3292 3387 struct ixgbe_hw *hw = &ixgbe->hw;
3293 3388
3294 3389 mutex_enter(&ixgbe->gen_lock);
3295 3390 if (eicr & IXGBE_EICR_GPI_SDP1) {
3296 3391 /* clear the interrupt */
3297 3392 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
3298 3393
3299 3394 /* if link up, do multispeed fiber setup */
3300 3395 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
3301 3396 B_TRUE, B_TRUE);
3302 3397 ixgbe_driver_link_check(ixgbe);
3303 3398 ixgbe_get_hw_state(ixgbe);
3304 3399 } else if (eicr & IXGBE_EICR_GPI_SDP2) {
3305 3400 /* clear the interrupt */
3306 3401 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
3307 3402
3308 3403 /* if link up, do sfp module setup */
3309 3404 (void) hw->mac.ops.setup_sfp(hw);
3310 3405
3311 3406 /* do multispeed fiber setup */
3312 3407 (void) ixgbe_setup_link(hw, IXGBE_LINK_SPEED_82599_AUTONEG,
3313 3408 B_TRUE, B_TRUE);
3314 3409 ixgbe_driver_link_check(ixgbe);
3315 3410 ixgbe_get_hw_state(ixgbe);
3316 3411 }
3317 3412 mutex_exit(&ixgbe->gen_lock);
3318 3413
3319 3414 /*
3320 3415 * We need to fully re-check the link later.
3321 3416 */
3322 3417 ixgbe->link_check_complete = B_FALSE;
3323 3418 ixgbe->link_check_hrtime = gethrtime() +
3324 3419 (IXGBE_LINK_UP_TIME * 100000000ULL);
3325 3420 }
3326 3421
3327 3422 /*
3328 3423 * ixgbe_overtemp_check - overtemp module processing done in taskq
3329 3424 *
3330 3425 * This routine will only be called on adapters with temperature sensor.
3331 3426 * The indication of over-temperature can be either SDP0 interrupt or the link
3332 3427 * status change interrupt.
3333 3428 */
3334 3429 static void
3335 3430 ixgbe_overtemp_check(void *arg)
3336 3431 {
3337 3432 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3338 3433 struct ixgbe_hw *hw = &ixgbe->hw;
3339 3434 uint32_t eicr = ixgbe->eicr;
3340 3435 ixgbe_link_speed speed;
3341 3436 boolean_t link_up;
3342 3437
3343 3438 mutex_enter(&ixgbe->gen_lock);
3344 3439
3345 3440 /* make sure we know current state of link */
3346 3441 (void) ixgbe_check_link(hw, &speed, &link_up, false);
3347 3442
3348 3443 /* check over-temp condition */
3349 3444 if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) ||
3350 3445 (eicr & IXGBE_EICR_LSC)) {
3351 3446 if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP) {
3352 3447 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP);
3353 3448
3354 3449 /*
3355 3450 * Disable the adapter interrupts
3356 3451 */
3357 3452 ixgbe_disable_adapter_interrupts(ixgbe);
3358 3453
3359 3454 /*
3360 3455 * Disable Rx/Tx units
3361 3456 */
3362 3457 (void) ixgbe_stop_adapter(hw);
3363 3458
3364 3459 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
3365 3460 ixgbe_error(ixgbe,
3366 3461 "Problem: Network adapter has been stopped "
3367 3462 "because it has overheated");
3368 3463 ixgbe_error(ixgbe,
3369 3464 "Action: Restart the computer. "
3370 3465 "If the problem persists, power off the system "
3371 3466 "and replace the adapter");
3372 3467 }
3373 3468 }
3374 3469
3375 3470 /* write to clear the interrupt */
3376 3471 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
3377 3472
3378 3473 mutex_exit(&ixgbe->gen_lock);
3379 3474 }
3380 3475
3381 3476 /*
3382 3477 * ixgbe_link_timer - timer for link status detection
3383 3478 */
3384 3479 static void
3385 3480 ixgbe_link_timer(void *arg)
3386 3481 {
3387 3482 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3388 3483
3389 3484 mutex_enter(&ixgbe->gen_lock);
3390 3485 ixgbe_driver_link_check(ixgbe);
3391 3486 mutex_exit(&ixgbe->gen_lock);
3392 3487 }
3393 3488
3394 3489 /*
3395 3490 * ixgbe_local_timer - Driver watchdog function.
3396 3491 *
3397 3492 * This function will handle the transmit stall check and other routines.
3398 3493 */
3399 3494 static void
3400 3495 ixgbe_local_timer(void *arg)
3401 3496 {
3402 3497 ixgbe_t *ixgbe = (ixgbe_t *)arg;
3403 3498
3404 3499 if (ixgbe->ixgbe_state & IXGBE_OVERTEMP)
3405 3500 goto out;
3406 3501
3407 3502 if (ixgbe->ixgbe_state & IXGBE_ERROR) {
3408 3503 ixgbe->reset_count++;
3409 3504 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
3410 3505 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
3411 3506 goto out;
3412 3507 }
3413 3508
3414 3509 if (ixgbe_stall_check(ixgbe)) {
3415 3510 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_STALL);
3416 3511 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3417 3512
3418 3513 ixgbe->reset_count++;
3419 3514 if (ixgbe_reset(ixgbe) == IXGBE_SUCCESS)
3420 3515 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_RESTORED);
3421 3516 }
3422 3517
3423 3518 out:
3424 3519 ixgbe_restart_watchdog_timer(ixgbe);
3425 3520 }
3426 3521
3427 3522 /*
3428 3523 * ixgbe_stall_check - Check for transmit stall.
3429 3524 *
3430 3525 * This function checks if the adapter is stalled (in transmit).
3431 3526 *
3432 3527 * It is called each time the watchdog timeout is invoked.
3433 3528 * If the transmit descriptor reclaim continuously fails,
3434 3529 * the watchdog value will increment by 1. If the watchdog
3435 3530 * value exceeds the threshold, the ixgbe is assumed to
3436 3531 * have stalled and need to be reset.
3437 3532 */
3438 3533 static boolean_t
3439 3534 ixgbe_stall_check(ixgbe_t *ixgbe)
3440 3535 {
3441 3536 ixgbe_tx_ring_t *tx_ring;
3442 3537 boolean_t result;
3443 3538 int i;
3444 3539
3445 3540 if (ixgbe->link_state != LINK_STATE_UP)
3446 3541 return (B_FALSE);
3447 3542
3448 3543 /*
3449 3544 * If any tx ring is stalled, we'll reset the chipset
3450 3545 */
3451 3546 result = B_FALSE;
3452 3547 for (i = 0; i < ixgbe->num_tx_rings; i++) {
3453 3548 tx_ring = &ixgbe->tx_rings[i];
3454 3549 if (tx_ring->tbd_free <= ixgbe->tx_recycle_thresh) {
3455 3550 tx_ring->tx_recycle(tx_ring);
3456 3551 }
3457 3552
3458 3553 if (tx_ring->recycle_fail > 0)
3459 3554 tx_ring->stall_watchdog++;
3460 3555 else
3461 3556 tx_ring->stall_watchdog = 0;
3462 3557
3463 3558 if (tx_ring->stall_watchdog >= STALL_WATCHDOG_TIMEOUT) {
3464 3559 result = B_TRUE;
3465 3560 break;
3466 3561 }
3467 3562 }
3468 3563
3469 3564 if (result) {
3470 3565 tx_ring->stall_watchdog = 0;
3471 3566 tx_ring->recycle_fail = 0;
3472 3567 }
3473 3568
3474 3569 return (result);
3475 3570 }
3476 3571
3477 3572
3478 3573 /*
3479 3574 * is_valid_mac_addr - Check if the mac address is valid.
3480 3575 */
3481 3576 static boolean_t
3482 3577 is_valid_mac_addr(uint8_t *mac_addr)
3483 3578 {
3484 3579 const uint8_t addr_test1[6] = { 0, 0, 0, 0, 0, 0 };
3485 3580 const uint8_t addr_test2[6] =
3486 3581 { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3487 3582
3488 3583 if (!(bcmp(addr_test1, mac_addr, ETHERADDRL)) ||
3489 3584 !(bcmp(addr_test2, mac_addr, ETHERADDRL)))
3490 3585 return (B_FALSE);
3491 3586
3492 3587 return (B_TRUE);
3493 3588 }
3494 3589
3495 3590 static boolean_t
3496 3591 ixgbe_find_mac_address(ixgbe_t *ixgbe)
3497 3592 {
3498 3593 #ifdef __sparc
3499 3594 struct ixgbe_hw *hw = &ixgbe->hw;
3500 3595 uchar_t *bytes;
3501 3596 struct ether_addr sysaddr;
3502 3597 uint_t nelts;
3503 3598 int err;
3504 3599 boolean_t found = B_FALSE;
3505 3600
3506 3601 /*
3507 3602 * The "vendor's factory-set address" may already have
3508 3603 * been extracted from the chip, but if the property
3509 3604 * "local-mac-address" is set we use that instead.
3510 3605 *
3511 3606 * We check whether it looks like an array of 6
3512 3607 * bytes (which it should, if OBP set it). If we can't
3513 3608 * make sense of it this way, we'll ignore it.
3514 3609 */
3515 3610 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
3516 3611 DDI_PROP_DONTPASS, "local-mac-address", &bytes, &nelts);
3517 3612 if (err == DDI_PROP_SUCCESS) {
3518 3613 if (nelts == ETHERADDRL) {
3519 3614 while (nelts--)
3520 3615 hw->mac.addr[nelts] = bytes[nelts];
3521 3616 found = B_TRUE;
3522 3617 }
3523 3618 ddi_prop_free(bytes);
3524 3619 }
3525 3620
3526 3621 /*
3527 3622 * Look up the OBP property "local-mac-address?". If the user has set
3528 3623 * 'local-mac-address? = false', use "the system address" instead.
3529 3624 */
3530 3625 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip, 0,
3531 3626 "local-mac-address?", &bytes, &nelts) == DDI_PROP_SUCCESS) {
3532 3627 if (strncmp("false", (caddr_t)bytes, (size_t)nelts) == 0) {
3533 3628 if (localetheraddr(NULL, &sysaddr) != 0) {
3534 3629 bcopy(&sysaddr, hw->mac.addr, ETHERADDRL);
3535 3630 found = B_TRUE;
3536 3631 }
3537 3632 }
3538 3633 ddi_prop_free(bytes);
3539 3634 }
3540 3635
3541 3636 /*
3542 3637 * Finally(!), if there's a valid "mac-address" property (created
3543 3638 * if we netbooted from this interface), we must use this instead
3544 3639 * of any of the above to ensure that the NFS/install server doesn't
3545 3640 * get confused by the address changing as Solaris takes over!
3546 3641 */
3547 3642 err = ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, ixgbe->dip,
3548 3643 DDI_PROP_DONTPASS, "mac-address", &bytes, &nelts);
3549 3644 if (err == DDI_PROP_SUCCESS) {
3550 3645 if (nelts == ETHERADDRL) {
3551 3646 while (nelts--)
3552 3647 hw->mac.addr[nelts] = bytes[nelts];
3553 3648 found = B_TRUE;
3554 3649 }
3555 3650 ddi_prop_free(bytes);
3556 3651 }
3557 3652
3558 3653 if (found) {
3559 3654 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL);
3560 3655 return (B_TRUE);
3561 3656 }
3562 3657 #else
3563 3658 _NOTE(ARGUNUSED(ixgbe));
3564 3659 #endif
3565 3660
3566 3661 return (B_TRUE);
3567 3662 }
3568 3663
3569 3664 #pragma inline(ixgbe_arm_watchdog_timer)
3570 3665 static void
3571 3666 ixgbe_arm_watchdog_timer(ixgbe_t *ixgbe)
3572 3667 {
3573 3668 /*
3574 3669 * Fire a watchdog timer
3575 3670 */
3576 3671 ixgbe->watchdog_tid =
3577 3672 timeout(ixgbe_local_timer,
3578 3673 (void *)ixgbe, 1 * drv_usectohz(1000000));
3579 3674
3580 3675 }
3581 3676
3582 3677 /*
3583 3678 * ixgbe_enable_watchdog_timer - Enable and start the driver watchdog timer.
3584 3679 */
3585 3680 void
3586 3681 ixgbe_enable_watchdog_timer(ixgbe_t *ixgbe)
3587 3682 {
3588 3683 mutex_enter(&ixgbe->watchdog_lock);
3589 3684
3590 3685 if (!ixgbe->watchdog_enable) {
3591 3686 ixgbe->watchdog_enable = B_TRUE;
3592 3687 ixgbe->watchdog_start = B_TRUE;
3593 3688 ixgbe_arm_watchdog_timer(ixgbe);
3594 3689 }
3595 3690
3596 3691 mutex_exit(&ixgbe->watchdog_lock);
3597 3692 }
3598 3693
3599 3694 /*
3600 3695 * ixgbe_disable_watchdog_timer - Disable and stop the driver watchdog timer.
3601 3696 */
3602 3697 void
3603 3698 ixgbe_disable_watchdog_timer(ixgbe_t *ixgbe)
3604 3699 {
3605 3700 timeout_id_t tid;
3606 3701
3607 3702 mutex_enter(&ixgbe->watchdog_lock);
3608 3703
3609 3704 ixgbe->watchdog_enable = B_FALSE;
3610 3705 ixgbe->watchdog_start = B_FALSE;
3611 3706 tid = ixgbe->watchdog_tid;
3612 3707 ixgbe->watchdog_tid = 0;
3613 3708
3614 3709 mutex_exit(&ixgbe->watchdog_lock);
3615 3710
3616 3711 if (tid != 0)
3617 3712 (void) untimeout(tid);
3618 3713 }
3619 3714
3620 3715 /*
3621 3716 * ixgbe_start_watchdog_timer - Start the driver watchdog timer.
3622 3717 */
3623 3718 void
3624 3719 ixgbe_start_watchdog_timer(ixgbe_t *ixgbe)
3625 3720 {
3626 3721 mutex_enter(&ixgbe->watchdog_lock);
3627 3722
3628 3723 if (ixgbe->watchdog_enable) {
3629 3724 if (!ixgbe->watchdog_start) {
3630 3725 ixgbe->watchdog_start = B_TRUE;
3631 3726 ixgbe_arm_watchdog_timer(ixgbe);
3632 3727 }
3633 3728 }
3634 3729
3635 3730 mutex_exit(&ixgbe->watchdog_lock);
3636 3731 }
3637 3732
3638 3733 /*
3639 3734 * ixgbe_restart_watchdog_timer - Restart the driver watchdog timer.
3640 3735 */
3641 3736 static void
3642 3737 ixgbe_restart_watchdog_timer(ixgbe_t *ixgbe)
3643 3738 {
3644 3739 mutex_enter(&ixgbe->watchdog_lock);
3645 3740
3646 3741 if (ixgbe->watchdog_start)
3647 3742 ixgbe_arm_watchdog_timer(ixgbe);
3648 3743
3649 3744 mutex_exit(&ixgbe->watchdog_lock);
3650 3745 }
3651 3746
3652 3747 /*
3653 3748 * ixgbe_stop_watchdog_timer - Stop the driver watchdog timer.
3654 3749 */
3655 3750 void
3656 3751 ixgbe_stop_watchdog_timer(ixgbe_t *ixgbe)
3657 3752 {
3658 3753 timeout_id_t tid;
3659 3754
3660 3755 mutex_enter(&ixgbe->watchdog_lock);
3661 3756
3662 3757 ixgbe->watchdog_start = B_FALSE;
3663 3758 tid = ixgbe->watchdog_tid;
3664 3759 ixgbe->watchdog_tid = 0;
3665 3760
3666 3761 mutex_exit(&ixgbe->watchdog_lock);
3667 3762
3668 3763 if (tid != 0)
3669 3764 (void) untimeout(tid);
3670 3765 }
3671 3766
3672 3767 /*
3673 3768 * ixgbe_disable_adapter_interrupts - Disable all adapter interrupts.
3674 3769 */
3675 3770 static void
3676 3771 ixgbe_disable_adapter_interrupts(ixgbe_t *ixgbe)
3677 3772 {
3678 3773 struct ixgbe_hw *hw = &ixgbe->hw;
3679 3774
3680 3775 /*
3681 3776 * mask all interrupts off
3682 3777 */
3683 3778 IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xffffffff);
3684 3779
3685 3780 /*
3686 3781 * for MSI-X, also disable autoclear
3687 3782 */
3688 3783 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
3689 3784 IXGBE_WRITE_REG(hw, IXGBE_EIAC, 0x0);
3690 3785 }
3691 3786
3692 3787 IXGBE_WRITE_FLUSH(hw);
3693 3788 }
3694 3789
3695 3790 /*
3696 3791 * ixgbe_enable_adapter_interrupts - Enable all hardware interrupts.
3697 3792 */
3698 3793 static void
3699 3794 ixgbe_enable_adapter_interrupts(ixgbe_t *ixgbe)
3700 3795 {
3701 3796 struct ixgbe_hw *hw = &ixgbe->hw;
3702 3797 uint32_t eiac, eiam;
3703 3798 uint32_t gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
3704 3799
3705 3800 /* interrupt types to enable */
3706 3801 ixgbe->eims = IXGBE_EIMS_ENABLE_MASK; /* shared code default */
3707 3802 ixgbe->eims &= ~IXGBE_EIMS_TCP_TIMER; /* minus tcp timer */
3708 3803 ixgbe->eims |= ixgbe->capab->other_intr; /* "other" interrupt types */
3709 3804
3710 3805 /* enable automask on "other" causes that this adapter can generate */
3711 3806 eiam = ixgbe->capab->other_intr;
3712 3807
3713 3808 /*
3714 3809 * msi-x mode
3715 3810 */
3716 3811 if (ixgbe->intr_type == DDI_INTR_TYPE_MSIX) {
3717 3812 /* enable autoclear but not on bits 29:20 */
3718 3813 eiac = (ixgbe->eims & ~IXGBE_OTHER_INTR);
3719 3814
3720 3815 /* general purpose interrupt enable */
3721 3816 gpie |= (IXGBE_GPIE_MSIX_MODE
3722 3817 | IXGBE_GPIE_PBA_SUPPORT
3723 3818 | IXGBE_GPIE_OCD
3724 3819 | IXGBE_GPIE_EIAME);
3725 3820 /*
3726 3821 * non-msi-x mode
3727 3822 */
3728 3823 } else {
3729 3824
3730 3825 /* disable autoclear, leave gpie at default */
3731 3826 eiac = 0;
3732 3827
3733 3828 /*
3734 3829 * General purpose interrupt enable.
3735 3830 * For 82599, extended interrupt automask enable
3736 3831 * only in MSI or MSI-X mode
3737 3832 */
3738 3833 if ((hw->mac.type < ixgbe_mac_82599EB) ||
3739 3834 (ixgbe->intr_type == DDI_INTR_TYPE_MSI)) {
3740 3835 gpie |= IXGBE_GPIE_EIAME;
|
↓ open down ↓ |
498 lines elided |
↑ open up ↑ |
3741 3836 }
3742 3837 }
3743 3838
3744 3839 /* Enable specific "other" interrupt types */
3745 3840 switch (hw->mac.type) {
3746 3841 case ixgbe_mac_82598EB:
3747 3842 gpie |= ixgbe->capab->other_gpie;
3748 3843 break;
3749 3844
3750 3845 case ixgbe_mac_82599EB:
3846 + case ixgbe_mac_X540:
3751 3847 gpie |= ixgbe->capab->other_gpie;
3752 3848
3753 3849 /* Enable RSC Delay 8us when LRO enabled */
3754 3850 if (ixgbe->lro_enable) {
3755 3851 gpie |= (1 << IXGBE_GPIE_RSC_DELAY_SHIFT);
3756 3852 }
3757 3853 break;
3758 3854
3759 3855 default:
3760 3856 break;
3761 3857 }
3762 3858
3763 3859 /* write to interrupt control registers */
3764 3860 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
3765 3861 IXGBE_WRITE_REG(hw, IXGBE_EIAC, eiac);
3766 3862 IXGBE_WRITE_REG(hw, IXGBE_EIAM, eiam);
3767 3863 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
3768 3864 IXGBE_WRITE_FLUSH(hw);
3769 3865 }
3770 3866
3771 3867 /*
3772 3868 * ixgbe_loopback_ioctl - Loopback support.
3773 3869 */
3774 3870 enum ioc_reply
3775 3871 ixgbe_loopback_ioctl(ixgbe_t *ixgbe, struct iocblk *iocp, mblk_t *mp)
3776 3872 {
3777 3873 lb_info_sz_t *lbsp;
3778 3874 lb_property_t *lbpp;
3779 3875 uint32_t *lbmp;
3780 3876 uint32_t size;
3781 3877 uint32_t value;
3782 3878
3783 3879 if (mp->b_cont == NULL)
3784 3880 return (IOC_INVAL);
3785 3881
3786 3882 switch (iocp->ioc_cmd) {
3787 3883 default:
3788 3884 return (IOC_INVAL);
3789 3885
3790 3886 case LB_GET_INFO_SIZE:
3791 3887 size = sizeof (lb_info_sz_t);
3792 3888 if (iocp->ioc_count != size)
3793 3889 return (IOC_INVAL);
3794 3890
3795 3891 value = sizeof (lb_normal);
3796 3892 value += sizeof (lb_mac);
3797 3893 value += sizeof (lb_external);
3798 3894
3799 3895 lbsp = (lb_info_sz_t *)(uintptr_t)mp->b_cont->b_rptr;
3800 3896 *lbsp = value;
3801 3897 break;
3802 3898
3803 3899 case LB_GET_INFO:
3804 3900 value = sizeof (lb_normal);
3805 3901 value += sizeof (lb_mac);
3806 3902 value += sizeof (lb_external);
3807 3903
3808 3904 size = value;
3809 3905 if (iocp->ioc_count != size)
3810 3906 return (IOC_INVAL);
3811 3907
3812 3908 value = 0;
3813 3909 lbpp = (lb_property_t *)(uintptr_t)mp->b_cont->b_rptr;
3814 3910
3815 3911 lbpp[value++] = lb_normal;
3816 3912 lbpp[value++] = lb_mac;
3817 3913 lbpp[value++] = lb_external;
3818 3914 break;
3819 3915
3820 3916 case LB_GET_MODE:
3821 3917 size = sizeof (uint32_t);
3822 3918 if (iocp->ioc_count != size)
3823 3919 return (IOC_INVAL);
3824 3920
3825 3921 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
3826 3922 *lbmp = ixgbe->loopback_mode;
3827 3923 break;
3828 3924
3829 3925 case LB_SET_MODE:
3830 3926 size = 0;
3831 3927 if (iocp->ioc_count != sizeof (uint32_t))
3832 3928 return (IOC_INVAL);
3833 3929
3834 3930 lbmp = (uint32_t *)(uintptr_t)mp->b_cont->b_rptr;
3835 3931 if (!ixgbe_set_loopback_mode(ixgbe, *lbmp))
3836 3932 return (IOC_INVAL);
3837 3933 break;
3838 3934 }
3839 3935
3840 3936 iocp->ioc_count = size;
3841 3937 iocp->ioc_error = 0;
3842 3938
3843 3939 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
3844 3940 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
3845 3941 return (IOC_INVAL);
3846 3942 }
3847 3943
3848 3944 return (IOC_REPLY);
3849 3945 }
3850 3946
3851 3947 /*
3852 3948 * ixgbe_set_loopback_mode - Setup loopback based on the loopback mode.
3853 3949 */
3854 3950 static boolean_t
3855 3951 ixgbe_set_loopback_mode(ixgbe_t *ixgbe, uint32_t mode)
3856 3952 {
3857 3953 if (mode == ixgbe->loopback_mode)
3858 3954 return (B_TRUE);
3859 3955
3860 3956 ixgbe->loopback_mode = mode;
3861 3957
3862 3958 if (mode == IXGBE_LB_NONE) {
3863 3959 /*
3864 3960 * Reset the chip
3865 3961 */
3866 3962 (void) ixgbe_reset(ixgbe);
3867 3963 return (B_TRUE);
3868 3964 }
3869 3965
3870 3966 mutex_enter(&ixgbe->gen_lock);
3871 3967
3872 3968 switch (mode) {
3873 3969 default:
3874 3970 mutex_exit(&ixgbe->gen_lock);
3875 3971 return (B_FALSE);
3876 3972
3877 3973 case IXGBE_LB_EXTERNAL:
3878 3974 break;
3879 3975
3880 3976 case IXGBE_LB_INTERNAL_MAC:
3881 3977 ixgbe_set_internal_mac_loopback(ixgbe);
3882 3978 break;
3883 3979 }
3884 3980
3885 3981 mutex_exit(&ixgbe->gen_lock);
3886 3982
3887 3983 return (B_TRUE);
3888 3984 }
3889 3985
3890 3986 /*
3891 3987 * ixgbe_set_internal_mac_loopback - Set the internal MAC loopback mode.
3892 3988 */
3893 3989 static void
3894 3990 ixgbe_set_internal_mac_loopback(ixgbe_t *ixgbe)
3895 3991 {
3896 3992 struct ixgbe_hw *hw;
3897 3993 uint32_t reg;
3898 3994 uint8_t atlas;
3899 3995
3900 3996 hw = &ixgbe->hw;
3901 3997
3902 3998 /*
3903 3999 * Setup MAC loopback
3904 4000 */
3905 4001 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_HLREG0);
3906 4002 reg |= IXGBE_HLREG0_LPBK;
3907 4003 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_HLREG0, reg);
3908 4004
3909 4005 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
3910 4006 reg &= ~IXGBE_AUTOC_LMS_MASK;
3911 4007 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
3912 4008
3913 4009 /*
3914 4010 * Disable Atlas Tx lanes to keep packets in loopback and not on wire
3915 4011 */
3916 4012 switch (hw->mac.type) {
3917 4013 case ixgbe_mac_82598EB:
3918 4014 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
3919 4015 &atlas);
3920 4016 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
3921 4017 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_LPBK,
3922 4018 atlas);
3923 4019
3924 4020 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3925 4021 &atlas);
3926 4022 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
3927 4023 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_10G,
3928 4024 atlas);
3929 4025
3930 4026 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
3931 4027 &atlas);
3932 4028 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
3933 4029 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_1G,
|
↓ open down ↓ |
173 lines elided |
↑ open up ↑ |
3934 4030 atlas);
3935 4031
3936 4032 (void) ixgbe_read_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
3937 4033 &atlas);
3938 4034 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
3939 4035 (void) ixgbe_write_analog_reg8(&ixgbe->hw, IXGBE_ATLAS_PDN_AN,
3940 4036 atlas);
3941 4037 break;
3942 4038
3943 4039 case ixgbe_mac_82599EB:
4040 + case ixgbe_mac_X540:
3944 4041 reg = IXGBE_READ_REG(&ixgbe->hw, IXGBE_AUTOC);
3945 4042 reg |= (IXGBE_AUTOC_FLU |
3946 4043 IXGBE_AUTOC_10G_KX4);
3947 4044 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_AUTOC, reg);
3948 4045
3949 4046 (void) ixgbe_setup_link(&ixgbe->hw, IXGBE_LINK_SPEED_10GB_FULL,
3950 4047 B_FALSE, B_TRUE);
3951 4048 break;
3952 4049
3953 4050 default:
3954 4051 break;
3955 4052 }
3956 4053 }
3957 4054
3958 4055 #pragma inline(ixgbe_intr_rx_work)
3959 4056 /*
3960 4057 * ixgbe_intr_rx_work - RX processing of ISR.
3961 4058 */
3962 4059 static void
3963 4060 ixgbe_intr_rx_work(ixgbe_rx_ring_t *rx_ring)
3964 4061 {
3965 4062 mblk_t *mp;
3966 4063
3967 4064 mutex_enter(&rx_ring->rx_lock);
3968 4065
3969 4066 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
3970 4067 mutex_exit(&rx_ring->rx_lock);
3971 4068
3972 4069 if (mp != NULL)
3973 4070 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
3974 4071 rx_ring->ring_gen_num);
3975 4072 }
3976 4073
3977 4074 #pragma inline(ixgbe_intr_tx_work)
3978 4075 /*
3979 4076 * ixgbe_intr_tx_work - TX processing of ISR.
3980 4077 */
3981 4078 static void
3982 4079 ixgbe_intr_tx_work(ixgbe_tx_ring_t *tx_ring)
3983 4080 {
3984 4081 ixgbe_t *ixgbe = tx_ring->ixgbe;
3985 4082
3986 4083 /*
3987 4084 * Recycle the tx descriptors
3988 4085 */
3989 4086 tx_ring->tx_recycle(tx_ring);
3990 4087
3991 4088 /*
3992 4089 * Schedule the re-transmit
3993 4090 */
3994 4091 if (tx_ring->reschedule &&
3995 4092 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh)) {
3996 4093 tx_ring->reschedule = B_FALSE;
3997 4094 mac_tx_ring_update(tx_ring->ixgbe->mac_hdl,
3998 4095 tx_ring->ring_handle);
3999 4096 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
4000 4097 }
4001 4098 }
4002 4099
4003 4100 #pragma inline(ixgbe_intr_other_work)
4004 4101 /*
4005 4102 * ixgbe_intr_other_work - Process interrupt types other than tx/rx
4006 4103 */
4007 4104 static void
4008 4105 ixgbe_intr_other_work(ixgbe_t *ixgbe, uint32_t eicr)
4009 4106 {
4010 4107 ASSERT(mutex_owned(&ixgbe->gen_lock));
4011 4108
4012 4109 /*
4013 4110 * handle link status change
4014 4111 */
4015 4112 if (eicr & IXGBE_EICR_LSC) {
4016 4113 ixgbe_driver_link_check(ixgbe);
4017 4114 ixgbe_get_hw_state(ixgbe);
4018 4115 }
4019 4116
4020 4117 /*
4021 4118 * check for fan failure on adapters with fans
4022 4119 */
4023 4120 if ((ixgbe->capab->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
4024 4121 (eicr & IXGBE_EICR_GPI_SDP1)) {
4025 4122 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_OVERTEMP);
4026 4123
4027 4124 /*
4028 4125 * Disable the adapter interrupts
4029 4126 */
4030 4127 ixgbe_disable_adapter_interrupts(ixgbe);
4031 4128
4032 4129 /*
4033 4130 * Disable Rx/Tx units
4034 4131 */
4035 4132 (void) ixgbe_stop_adapter(&ixgbe->hw);
4036 4133
4037 4134 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_LOST);
4038 4135 ixgbe_error(ixgbe,
4039 4136 "Problem: Network adapter has been stopped "
4040 4137 "because the fan has stopped.\n");
4041 4138 ixgbe_error(ixgbe,
4042 4139 "Action: Replace the adapter.\n");
4043 4140
4044 4141 /* re-enable the interrupt, which was automasked */
4045 4142 ixgbe->eims |= IXGBE_EICR_GPI_SDP1;
4046 4143 }
4047 4144
4048 4145 /*
4049 4146 * Do SFP check for adapters with hot-plug capability
4050 4147 */
4051 4148 if ((ixgbe->capab->flags & IXGBE_FLAG_SFP_PLUG_CAPABLE) &&
4052 4149 ((eicr & IXGBE_EICR_GPI_SDP1) || (eicr & IXGBE_EICR_GPI_SDP2))) {
4053 4150 ixgbe->eicr = eicr;
4054 4151 if ((ddi_taskq_dispatch(ixgbe->sfp_taskq,
4055 4152 ixgbe_sfp_check, (void *)ixgbe,
4056 4153 DDI_NOSLEEP)) != DDI_SUCCESS) {
4057 4154 ixgbe_log(ixgbe, "No memory available to dispatch "
4058 4155 "taskq for SFP check");
4059 4156 }
4060 4157 }
4061 4158
4062 4159 /*
4063 4160 * Do over-temperature check for adapters with temp sensor
4064 4161 */
4065 4162 if ((ixgbe->capab->flags & IXGBE_FLAG_TEMP_SENSOR_CAPABLE) &&
4066 4163 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
4067 4164 ixgbe->eicr = eicr;
4068 4165 if ((ddi_taskq_dispatch(ixgbe->overtemp_taskq,
4069 4166 ixgbe_overtemp_check, (void *)ixgbe,
4070 4167 DDI_NOSLEEP)) != DDI_SUCCESS) {
4071 4168 ixgbe_log(ixgbe, "No memory available to dispatch "
4072 4169 "taskq for overtemp check");
4073 4170 }
4074 4171 }
4075 4172 }
4076 4173
4077 4174 /*
4078 4175 * ixgbe_intr_legacy - Interrupt handler for legacy interrupts.
4079 4176 */
4080 4177 static uint_t
4081 4178 ixgbe_intr_legacy(void *arg1, void *arg2)
4082 4179 {
4083 4180 ixgbe_t *ixgbe = (ixgbe_t *)arg1;
4084 4181 struct ixgbe_hw *hw = &ixgbe->hw;
4085 4182 ixgbe_tx_ring_t *tx_ring;
4086 4183 ixgbe_rx_ring_t *rx_ring;
4087 4184 uint32_t eicr;
4088 4185 mblk_t *mp;
4089 4186 boolean_t tx_reschedule;
4090 4187 uint_t result;
4091 4188
4092 4189 _NOTE(ARGUNUSED(arg2));
4093 4190
4094 4191 mutex_enter(&ixgbe->gen_lock);
4095 4192 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
4096 4193 mutex_exit(&ixgbe->gen_lock);
4097 4194 return (DDI_INTR_UNCLAIMED);
4098 4195 }
4099 4196
4100 4197 mp = NULL;
4101 4198 tx_reschedule = B_FALSE;
4102 4199
4103 4200 /*
4104 4201 * Any bit set in eicr: claim this interrupt
4105 4202 */
4106 4203 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4107 4204
4108 4205 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
4109 4206 mutex_exit(&ixgbe->gen_lock);
4110 4207 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
4111 4208 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4112 4209 return (DDI_INTR_CLAIMED);
4113 4210 }
4114 4211
4115 4212 if (eicr) {
4116 4213 /*
4117 4214 * For legacy interrupt, we have only one interrupt,
4118 4215 * so we have only one rx ring and one tx ring enabled.
4119 4216 */
4120 4217 ASSERT(ixgbe->num_rx_rings == 1);
4121 4218 ASSERT(ixgbe->num_tx_rings == 1);
4122 4219
4123 4220 /*
4124 4221 * For legacy interrupt, rx rings[0] will use RTxQ[0].
4125 4222 */
4126 4223 if (eicr & 0x1) {
4127 4224 ixgbe->eimc |= IXGBE_EICR_RTX_QUEUE;
4128 4225 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4129 4226 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
4130 4227 /*
4131 4228 * Clean the rx descriptors
4132 4229 */
4133 4230 rx_ring = &ixgbe->rx_rings[0];
4134 4231 mp = ixgbe_ring_rx(rx_ring, IXGBE_POLL_NULL);
4135 4232 }
4136 4233
4137 4234 /*
4138 4235 * For legacy interrupt, tx rings[0] will use RTxQ[1].
4139 4236 */
4140 4237 if (eicr & 0x2) {
4141 4238 /*
4142 4239 * Recycle the tx descriptors
4143 4240 */
4144 4241 tx_ring = &ixgbe->tx_rings[0];
4145 4242 tx_ring->tx_recycle(tx_ring);
4146 4243
4147 4244 /*
4148 4245 * Schedule the re-transmit
4149 4246 */
4150 4247 tx_reschedule = (tx_ring->reschedule &&
4151 4248 (tx_ring->tbd_free >= ixgbe->tx_resched_thresh));
|
↓ open down ↓ |
198 lines elided |
↑ open up ↑ |
4152 4249 }
4153 4250
4154 4251 /* any interrupt type other than tx/rx */
4155 4252 if (eicr & ixgbe->capab->other_intr) {
4156 4253 switch (hw->mac.type) {
4157 4254 case ixgbe_mac_82598EB:
4158 4255 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4159 4256 break;
4160 4257
4161 4258 case ixgbe_mac_82599EB:
4259 + case ixgbe_mac_X540:
4162 4260 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4163 4261 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4164 4262 break;
4165 4263
4166 4264 default:
4167 4265 break;
4168 4266 }
4169 4267 ixgbe_intr_other_work(ixgbe, eicr);
4170 4268 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4171 4269 }
4172 4270
4173 4271 mutex_exit(&ixgbe->gen_lock);
4174 4272
4175 4273 result = DDI_INTR_CLAIMED;
4176 4274 } else {
4177 4275 mutex_exit(&ixgbe->gen_lock);
4178 4276
4179 4277 /*
4180 4278 * No interrupt cause bits set: don't claim this interrupt.
4181 4279 */
4182 4280 result = DDI_INTR_UNCLAIMED;
4183 4281 }
4184 4282
4185 4283 /* re-enable the interrupts which were automasked */
4186 4284 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4187 4285
4188 4286 /*
4189 4287 * Do the following work outside of the gen_lock
4190 4288 */
4191 4289 if (mp != NULL) {
4192 4290 mac_rx_ring(rx_ring->ixgbe->mac_hdl, rx_ring->ring_handle, mp,
4193 4291 rx_ring->ring_gen_num);
4194 4292 }
4195 4293
4196 4294 if (tx_reschedule) {
4197 4295 tx_ring->reschedule = B_FALSE;
4198 4296 mac_tx_ring_update(ixgbe->mac_hdl, tx_ring->ring_handle);
4199 4297 IXGBE_DEBUG_STAT(tx_ring->stat_reschedule);
4200 4298 }
4201 4299
4202 4300 return (result);
4203 4301 }
4204 4302
4205 4303 /*
4206 4304 * ixgbe_intr_msi - Interrupt handler for MSI.
4207 4305 */
4208 4306 static uint_t
4209 4307 ixgbe_intr_msi(void *arg1, void *arg2)
4210 4308 {
4211 4309 ixgbe_t *ixgbe = (ixgbe_t *)arg1;
4212 4310 struct ixgbe_hw *hw = &ixgbe->hw;
4213 4311 uint32_t eicr;
4214 4312
4215 4313 _NOTE(ARGUNUSED(arg2));
4216 4314
4217 4315 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4218 4316
4219 4317 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) != DDI_FM_OK) {
4220 4318 ddi_fm_service_impact(ixgbe->dip, DDI_SERVICE_DEGRADED);
4221 4319 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4222 4320 return (DDI_INTR_CLAIMED);
4223 4321 }
4224 4322
4225 4323 /*
4226 4324 * For MSI interrupt, we have only one vector,
4227 4325 * so we have only one rx ring and one tx ring enabled.
4228 4326 */
4229 4327 ASSERT(ixgbe->num_rx_rings == 1);
4230 4328 ASSERT(ixgbe->num_tx_rings == 1);
4231 4329
4232 4330 /*
4233 4331 * For MSI interrupt, rx rings[0] will use RTxQ[0].
4234 4332 */
4235 4333 if (eicr & 0x1) {
4236 4334 ixgbe_intr_rx_work(&ixgbe->rx_rings[0]);
4237 4335 }
4238 4336
4239 4337 /*
4240 4338 * For MSI interrupt, tx rings[0] will use RTxQ[1].
4241 4339 */
4242 4340 if (eicr & 0x2) {
4243 4341 ixgbe_intr_tx_work(&ixgbe->tx_rings[0]);
4244 4342 }
|
↓ open down ↓ |
73 lines elided |
↑ open up ↑ |
4245 4343
4246 4344 /* any interrupt type other than tx/rx */
4247 4345 if (eicr & ixgbe->capab->other_intr) {
4248 4346 mutex_enter(&ixgbe->gen_lock);
4249 4347 switch (hw->mac.type) {
4250 4348 case ixgbe_mac_82598EB:
4251 4349 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4252 4350 break;
4253 4351
4254 4352 case ixgbe_mac_82599EB:
4353 + case ixgbe_mac_X540:
4255 4354 ixgbe->eimc = IXGBE_82599_OTHER_INTR;
4256 4355 IXGBE_WRITE_REG(hw, IXGBE_EIMC, ixgbe->eimc);
4257 4356 break;
4258 4357
4259 4358 default:
4260 4359 break;
4261 4360 }
4262 4361 ixgbe_intr_other_work(ixgbe, eicr);
4263 4362 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4264 4363 mutex_exit(&ixgbe->gen_lock);
4265 4364 }
4266 4365
4267 4366 /* re-enable the interrupts which were automasked */
4268 4367 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4269 4368
4270 4369 return (DDI_INTR_CLAIMED);
4271 4370 }
4272 4371
4273 4372 /*
4274 4373 * ixgbe_intr_msix - Interrupt handler for MSI-X.
4275 4374 */
4276 4375 static uint_t
4277 4376 ixgbe_intr_msix(void *arg1, void *arg2)
4278 4377 {
4279 4378 ixgbe_intr_vector_t *vect = (ixgbe_intr_vector_t *)arg1;
4280 4379 ixgbe_t *ixgbe = vect->ixgbe;
4281 4380 struct ixgbe_hw *hw = &ixgbe->hw;
4282 4381 uint32_t eicr;
4283 4382 int r_idx = 0;
4284 4383
4285 4384 _NOTE(ARGUNUSED(arg2));
4286 4385
4287 4386 /*
4288 4387 * Clean each rx ring that has its bit set in the map
4289 4388 */
4290 4389 r_idx = bt_getlowbit(vect->rx_map, 0, (ixgbe->num_rx_rings - 1));
4291 4390 while (r_idx >= 0) {
4292 4391 ixgbe_intr_rx_work(&ixgbe->rx_rings[r_idx]);
4293 4392 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
4294 4393 (ixgbe->num_rx_rings - 1));
4295 4394 }
4296 4395
4297 4396 /*
4298 4397 * Clean each tx ring that has its bit set in the map
4299 4398 */
4300 4399 r_idx = bt_getlowbit(vect->tx_map, 0, (ixgbe->num_tx_rings - 1));
4301 4400 while (r_idx >= 0) {
4302 4401 ixgbe_intr_tx_work(&ixgbe->tx_rings[r_idx]);
4303 4402 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
4304 4403 (ixgbe->num_tx_rings - 1));
4305 4404 }
4306 4405
4307 4406
4308 4407 /*
4309 4408 * Clean other interrupt (link change) that has its bit set in the map
4310 4409 */
4311 4410 if (BT_TEST(vect->other_map, 0) == 1) {
4312 4411 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
4313 4412
4314 4413 if (ixgbe_check_acc_handle(ixgbe->osdep.reg_handle) !=
4315 4414 DDI_FM_OK) {
4316 4415 ddi_fm_service_impact(ixgbe->dip,
4317 4416 DDI_SERVICE_DEGRADED);
4318 4417 atomic_or_32(&ixgbe->ixgbe_state, IXGBE_ERROR);
4319 4418 return (DDI_INTR_CLAIMED);
4320 4419 }
4321 4420
4322 4421 /*
4323 4422 * Check "other" cause bits: any interrupt type other than tx/rx
|
↓ open down ↓ |
59 lines elided |
↑ open up ↑ |
4324 4423 */
4325 4424 if (eicr & ixgbe->capab->other_intr) {
4326 4425 mutex_enter(&ixgbe->gen_lock);
4327 4426 switch (hw->mac.type) {
4328 4427 case ixgbe_mac_82598EB:
4329 4428 ixgbe->eims &= ~(eicr & IXGBE_OTHER_INTR);
4330 4429 ixgbe_intr_other_work(ixgbe, eicr);
4331 4430 break;
4332 4431
4333 4432 case ixgbe_mac_82599EB:
4433 + case ixgbe_mac_X540:
4334 4434 ixgbe->eims |= IXGBE_EICR_RTX_QUEUE;
4335 4435 ixgbe_intr_other_work(ixgbe, eicr);
4336 4436 break;
4337 4437
4338 4438 default:
4339 4439 break;
4340 4440 }
4341 4441 mutex_exit(&ixgbe->gen_lock);
4342 4442 }
4343 4443
4344 4444 /* re-enable the interrupts which were automasked */
4345 4445 IXGBE_WRITE_REG(hw, IXGBE_EIMS, ixgbe->eims);
4346 4446 }
4347 4447
4348 4448 return (DDI_INTR_CLAIMED);
4349 4449 }
4350 4450
4351 4451 /*
4352 4452 * ixgbe_alloc_intrs - Allocate interrupts for the driver.
4353 4453 *
4354 4454 * Normal sequence is to try MSI-X; if not sucessful, try MSI;
4355 4455 * if not successful, try Legacy.
4356 4456 * ixgbe->intr_force can be used to force sequence to start with
4357 4457 * any of the 3 types.
4358 4458 * If MSI-X is not used, number of tx/rx rings is forced to 1.
4359 4459 */
4360 4460 static int
4361 4461 ixgbe_alloc_intrs(ixgbe_t *ixgbe)
4362 4462 {
4363 4463 dev_info_t *devinfo;
4364 4464 int intr_types;
4365 4465 int rc;
4366 4466
4367 4467 devinfo = ixgbe->dip;
4368 4468
4369 4469 /*
4370 4470 * Get supported interrupt types
4371 4471 */
4372 4472 rc = ddi_intr_get_supported_types(devinfo, &intr_types);
4373 4473
4374 4474 if (rc != DDI_SUCCESS) {
4375 4475 ixgbe_log(ixgbe,
4376 4476 "Get supported interrupt types failed: %d", rc);
4377 4477 return (IXGBE_FAILURE);
4378 4478 }
4379 4479 IXGBE_DEBUGLOG_1(ixgbe, "Supported interrupt types: %x", intr_types);
4380 4480
4381 4481 ixgbe->intr_type = 0;
4382 4482
4383 4483 /*
4384 4484 * Install MSI-X interrupts
4385 4485 */
4386 4486 if ((intr_types & DDI_INTR_TYPE_MSIX) &&
4387 4487 (ixgbe->intr_force <= IXGBE_INTR_MSIX)) {
4388 4488 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSIX);
4389 4489 if (rc == IXGBE_SUCCESS)
4390 4490 return (IXGBE_SUCCESS);
4391 4491
4392 4492 ixgbe_log(ixgbe,
4393 4493 "Allocate MSI-X failed, trying MSI interrupts...");
4394 4494 }
4395 4495
4396 4496 /*
4397 4497 * MSI-X not used, force rings and groups to 1
4398 4498 */
4399 4499 ixgbe->num_rx_rings = 1;
4400 4500 ixgbe->num_rx_groups = 1;
4401 4501 ixgbe->num_tx_rings = 1;
4402 4502 ixgbe->classify_mode = IXGBE_CLASSIFY_NONE;
4403 4503 ixgbe_log(ixgbe,
4404 4504 "MSI-X not used, force rings and groups number to 1");
4405 4505
4406 4506 /*
4407 4507 * Install MSI interrupts
4408 4508 */
4409 4509 if ((intr_types & DDI_INTR_TYPE_MSI) &&
4410 4510 (ixgbe->intr_force <= IXGBE_INTR_MSI)) {
4411 4511 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_MSI);
4412 4512 if (rc == IXGBE_SUCCESS)
4413 4513 return (IXGBE_SUCCESS);
4414 4514
4415 4515 ixgbe_log(ixgbe,
4416 4516 "Allocate MSI failed, trying Legacy interrupts...");
4417 4517 }
4418 4518
4419 4519 /*
4420 4520 * Install legacy interrupts
4421 4521 */
4422 4522 if (intr_types & DDI_INTR_TYPE_FIXED) {
4423 4523 rc = ixgbe_alloc_intr_handles(ixgbe, DDI_INTR_TYPE_FIXED);
4424 4524 if (rc == IXGBE_SUCCESS)
4425 4525 return (IXGBE_SUCCESS);
4426 4526
4427 4527 ixgbe_log(ixgbe,
4428 4528 "Allocate Legacy interrupts failed");
4429 4529 }
4430 4530
4431 4531 /*
4432 4532 * If none of the 3 types succeeded, return failure
4433 4533 */
4434 4534 return (IXGBE_FAILURE);
4435 4535 }
4436 4536
4437 4537 /*
4438 4538 * ixgbe_alloc_intr_handles - Allocate interrupt handles.
4439 4539 *
4440 4540 * For legacy and MSI, only 1 handle is needed. For MSI-X,
4441 4541 * if fewer than 2 handles are available, return failure.
4442 4542 * Upon success, this maps the vectors to rx and tx rings for
4443 4543 * interrupts.
4444 4544 */
4445 4545 static int
4446 4546 ixgbe_alloc_intr_handles(ixgbe_t *ixgbe, int intr_type)
4447 4547 {
4448 4548 dev_info_t *devinfo;
4449 4549 int request, count, actual;
4450 4550 int minimum;
4451 4551 int rc;
4452 4552 uint32_t ring_per_group;
4453 4553
4454 4554 devinfo = ixgbe->dip;
4455 4555
4456 4556 switch (intr_type) {
4457 4557 case DDI_INTR_TYPE_FIXED:
4458 4558 request = 1; /* Request 1 legacy interrupt handle */
4459 4559 minimum = 1;
4460 4560 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: legacy");
4461 4561 break;
4462 4562
4463 4563 case DDI_INTR_TYPE_MSI:
4464 4564 request = 1; /* Request 1 MSI interrupt handle */
4465 4565 minimum = 1;
4466 4566 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI");
4467 4567 break;
4468 4568
4469 4569 case DDI_INTR_TYPE_MSIX:
4470 4570 /*
4471 4571 * Best number of vectors for the adapter is
4472 4572 * (# rx rings + # tx rings), however we will
4473 4573 * limit the request number.
4474 4574 */
4475 4575 request = min(16, ixgbe->num_rx_rings + ixgbe->num_tx_rings);
4476 4576 if (request > ixgbe->capab->max_ring_vect)
4477 4577 request = ixgbe->capab->max_ring_vect;
4478 4578 minimum = 1;
4479 4579 IXGBE_DEBUGLOG_0(ixgbe, "interrupt type: MSI-X");
4480 4580 break;
4481 4581
4482 4582 default:
4483 4583 ixgbe_log(ixgbe,
4484 4584 "invalid call to ixgbe_alloc_intr_handles(): %d\n",
4485 4585 intr_type);
4486 4586 return (IXGBE_FAILURE);
4487 4587 }
4488 4588 IXGBE_DEBUGLOG_2(ixgbe, "interrupt handles requested: %d minimum: %d",
4489 4589 request, minimum);
4490 4590
4491 4591 /*
4492 4592 * Get number of supported interrupts
4493 4593 */
4494 4594 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
4495 4595 if ((rc != DDI_SUCCESS) || (count < minimum)) {
4496 4596 ixgbe_log(ixgbe,
4497 4597 "Get interrupt number failed. Return: %d, count: %d",
4498 4598 rc, count);
4499 4599 return (IXGBE_FAILURE);
4500 4600 }
4501 4601 IXGBE_DEBUGLOG_1(ixgbe, "interrupts supported: %d", count);
4502 4602
4503 4603 actual = 0;
4504 4604 ixgbe->intr_cnt = 0;
4505 4605 ixgbe->intr_cnt_max = 0;
4506 4606 ixgbe->intr_cnt_min = 0;
4507 4607
4508 4608 /*
4509 4609 * Allocate an array of interrupt handles
4510 4610 */
4511 4611 ixgbe->intr_size = request * sizeof (ddi_intr_handle_t);
4512 4612 ixgbe->htable = kmem_alloc(ixgbe->intr_size, KM_SLEEP);
4513 4613
4514 4614 rc = ddi_intr_alloc(devinfo, ixgbe->htable, intr_type, 0,
4515 4615 request, &actual, DDI_INTR_ALLOC_NORMAL);
4516 4616 if (rc != DDI_SUCCESS) {
4517 4617 ixgbe_log(ixgbe, "Allocate interrupts failed. "
4518 4618 "return: %d, request: %d, actual: %d",
4519 4619 rc, request, actual);
4520 4620 goto alloc_handle_fail;
4521 4621 }
4522 4622 IXGBE_DEBUGLOG_1(ixgbe, "interrupts actually allocated: %d", actual);
4523 4623
4524 4624 /*
4525 4625 * upper/lower limit of interrupts
4526 4626 */
4527 4627 ixgbe->intr_cnt = actual;
4528 4628 ixgbe->intr_cnt_max = request;
4529 4629 ixgbe->intr_cnt_min = minimum;
4530 4630
4531 4631 /*
4532 4632 * rss number per group should not exceed the rx interrupt number,
4533 4633 * else need to adjust rx ring number.
4534 4634 */
4535 4635 ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
4536 4636 ASSERT((ixgbe->num_rx_rings % ixgbe->num_rx_groups) == 0);
4537 4637 if (actual < ring_per_group) {
4538 4638 ixgbe->num_rx_rings = ixgbe->num_rx_groups * actual;
4539 4639 ixgbe_setup_vmdq_rss_conf(ixgbe);
4540 4640 }
4541 4641
4542 4642 /*
4543 4643 * Now we know the actual number of vectors. Here we map the vector
4544 4644 * to other, rx rings and tx ring.
4545 4645 */
4546 4646 if (actual < minimum) {
4547 4647 ixgbe_log(ixgbe, "Insufficient interrupt handles available: %d",
4548 4648 actual);
4549 4649 goto alloc_handle_fail;
4550 4650 }
4551 4651
4552 4652 /*
4553 4653 * Get priority for first vector, assume remaining are all the same
4554 4654 */
4555 4655 rc = ddi_intr_get_pri(ixgbe->htable[0], &ixgbe->intr_pri);
4556 4656 if (rc != DDI_SUCCESS) {
4557 4657 ixgbe_log(ixgbe,
4558 4658 "Get interrupt priority failed: %d", rc);
4559 4659 goto alloc_handle_fail;
4560 4660 }
4561 4661
4562 4662 rc = ddi_intr_get_cap(ixgbe->htable[0], &ixgbe->intr_cap);
4563 4663 if (rc != DDI_SUCCESS) {
4564 4664 ixgbe_log(ixgbe,
4565 4665 "Get interrupt cap failed: %d", rc);
4566 4666 goto alloc_handle_fail;
4567 4667 }
4568 4668
4569 4669 ixgbe->intr_type = intr_type;
4570 4670
4571 4671 return (IXGBE_SUCCESS);
4572 4672
4573 4673 alloc_handle_fail:
4574 4674 ixgbe_rem_intrs(ixgbe);
4575 4675
4576 4676 return (IXGBE_FAILURE);
4577 4677 }
4578 4678
4579 4679 /*
4580 4680 * ixgbe_add_intr_handlers - Add interrupt handlers based on the interrupt type.
4581 4681 *
4582 4682 * Before adding the interrupt handlers, the interrupt vectors have
4583 4683 * been allocated, and the rx/tx rings have also been allocated.
4584 4684 */
4585 4685 static int
4586 4686 ixgbe_add_intr_handlers(ixgbe_t *ixgbe)
4587 4687 {
4588 4688 int vector = 0;
4589 4689 int rc;
4590 4690
4591 4691 switch (ixgbe->intr_type) {
4592 4692 case DDI_INTR_TYPE_MSIX:
4593 4693 /*
4594 4694 * Add interrupt handler for all vectors
4595 4695 */
4596 4696 for (vector = 0; vector < ixgbe->intr_cnt; vector++) {
4597 4697 /*
4598 4698 * install pointer to vect_map[vector]
4599 4699 */
4600 4700 rc = ddi_intr_add_handler(ixgbe->htable[vector],
4601 4701 (ddi_intr_handler_t *)ixgbe_intr_msix,
4602 4702 (void *)&ixgbe->vect_map[vector], NULL);
4603 4703
4604 4704 if (rc != DDI_SUCCESS) {
4605 4705 ixgbe_log(ixgbe,
4606 4706 "Add interrupt handler failed. "
4607 4707 "return: %d, vector: %d", rc, vector);
4608 4708 for (vector--; vector >= 0; vector--) {
4609 4709 (void) ddi_intr_remove_handler(
4610 4710 ixgbe->htable[vector]);
4611 4711 }
4612 4712 return (IXGBE_FAILURE);
4613 4713 }
4614 4714 }
4615 4715
4616 4716 break;
4617 4717
4618 4718 case DDI_INTR_TYPE_MSI:
4619 4719 /*
4620 4720 * Add interrupt handlers for the only vector
4621 4721 */
4622 4722 rc = ddi_intr_add_handler(ixgbe->htable[vector],
4623 4723 (ddi_intr_handler_t *)ixgbe_intr_msi,
4624 4724 (void *)ixgbe, NULL);
4625 4725
4626 4726 if (rc != DDI_SUCCESS) {
4627 4727 ixgbe_log(ixgbe,
4628 4728 "Add MSI interrupt handler failed: %d", rc);
4629 4729 return (IXGBE_FAILURE);
4630 4730 }
4631 4731
4632 4732 break;
4633 4733
4634 4734 case DDI_INTR_TYPE_FIXED:
4635 4735 /*
4636 4736 * Add interrupt handlers for the only vector
4637 4737 */
4638 4738 rc = ddi_intr_add_handler(ixgbe->htable[vector],
4639 4739 (ddi_intr_handler_t *)ixgbe_intr_legacy,
4640 4740 (void *)ixgbe, NULL);
4641 4741
4642 4742 if (rc != DDI_SUCCESS) {
4643 4743 ixgbe_log(ixgbe,
4644 4744 "Add legacy interrupt handler failed: %d", rc);
4645 4745 return (IXGBE_FAILURE);
4646 4746 }
4647 4747
4648 4748 break;
4649 4749
4650 4750 default:
4651 4751 return (IXGBE_FAILURE);
4652 4752 }
4653 4753
4654 4754 return (IXGBE_SUCCESS);
4655 4755 }
4656 4756
4657 4757 #pragma inline(ixgbe_map_rxring_to_vector)
4658 4758 /*
4659 4759 * ixgbe_map_rxring_to_vector - Map given rx ring to given interrupt vector.
4660 4760 */
4661 4761 static void
4662 4762 ixgbe_map_rxring_to_vector(ixgbe_t *ixgbe, int r_idx, int v_idx)
4663 4763 {
4664 4764 /*
4665 4765 * Set bit in map
4666 4766 */
4667 4767 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
4668 4768
4669 4769 /*
4670 4770 * Count bits set
4671 4771 */
4672 4772 ixgbe->vect_map[v_idx].rxr_cnt++;
4673 4773
4674 4774 /*
4675 4775 * Remember bit position
4676 4776 */
4677 4777 ixgbe->rx_rings[r_idx].intr_vector = v_idx;
4678 4778 ixgbe->rx_rings[r_idx].vect_bit = 1 << v_idx;
4679 4779 }
4680 4780
4681 4781 #pragma inline(ixgbe_map_txring_to_vector)
4682 4782 /*
4683 4783 * ixgbe_map_txring_to_vector - Map given tx ring to given interrupt vector.
4684 4784 */
4685 4785 static void
4686 4786 ixgbe_map_txring_to_vector(ixgbe_t *ixgbe, int t_idx, int v_idx)
4687 4787 {
4688 4788 /*
4689 4789 * Set bit in map
4690 4790 */
4691 4791 BT_SET(ixgbe->vect_map[v_idx].tx_map, t_idx);
4692 4792
4693 4793 /*
4694 4794 * Count bits set
4695 4795 */
4696 4796 ixgbe->vect_map[v_idx].txr_cnt++;
4697 4797
4698 4798 /*
4699 4799 * Remember bit position
4700 4800 */
4701 4801 ixgbe->tx_rings[t_idx].intr_vector = v_idx;
4702 4802 ixgbe->tx_rings[t_idx].vect_bit = 1 << v_idx;
4703 4803 }
4704 4804
4705 4805 /*
4706 4806 * ixgbe_setup_ivar - Set the given entry in the given interrupt vector
4707 4807 * allocation register (IVAR).
4708 4808 * cause:
4709 4809 * -1 : other cause
4710 4810 * 0 : rx
4711 4811 * 1 : tx
4712 4812 */
4713 4813 static void
4714 4814 ixgbe_setup_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, uint8_t msix_vector,
4715 4815 int8_t cause)
4716 4816 {
4717 4817 struct ixgbe_hw *hw = &ixgbe->hw;
4718 4818 u32 ivar, index;
4719 4819
4720 4820 switch (hw->mac.type) {
4721 4821 case ixgbe_mac_82598EB:
4722 4822 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4723 4823 if (cause == -1) {
|
↓ open down ↓ |
380 lines elided |
↑ open up ↑ |
4724 4824 cause = 0;
4725 4825 }
4726 4826 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4727 4827 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4728 4828 ivar &= ~(0xFF << (8 * (intr_alloc_entry & 0x3)));
4729 4829 ivar |= (msix_vector << (8 * (intr_alloc_entry & 0x3)));
4730 4830 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4731 4831 break;
4732 4832
4733 4833 case ixgbe_mac_82599EB:
4834 + case ixgbe_mac_X540:
4734 4835 if (cause == -1) {
4735 4836 /* other causes */
4736 4837 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4737 4838 index = (intr_alloc_entry & 1) * 8;
4738 4839 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4739 4840 ivar &= ~(0xFF << index);
4740 4841 ivar |= (msix_vector << index);
4741 4842 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4742 4843 } else {
4743 4844 /* tx or rx causes */
4744 4845 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
4745 4846 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4746 4847 ivar = IXGBE_READ_REG(hw,
4747 4848 IXGBE_IVAR(intr_alloc_entry >> 1));
4748 4849 ivar &= ~(0xFF << index);
4749 4850 ivar |= (msix_vector << index);
4750 4851 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4751 4852 ivar);
4752 4853 }
4753 4854 break;
4754 4855
4755 4856 default:
4756 4857 break;
4757 4858 }
4758 4859 }
4759 4860
4760 4861 /*
4761 4862 * ixgbe_enable_ivar - Enable the given entry by setting the VAL bit of
4762 4863 * given interrupt vector allocation register (IVAR).
4763 4864 * cause:
4764 4865 * -1 : other cause
4765 4866 * 0 : rx
4766 4867 * 1 : tx
4767 4868 */
4768 4869 static void
4769 4870 ixgbe_enable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4770 4871 {
4771 4872 struct ixgbe_hw *hw = &ixgbe->hw;
4772 4873 u32 ivar, index;
4773 4874
4774 4875 switch (hw->mac.type) {
4775 4876 case ixgbe_mac_82598EB:
4776 4877 if (cause == -1) {
|
↓ open down ↓ |
33 lines elided |
↑ open up ↑ |
4777 4878 cause = 0;
4778 4879 }
4779 4880 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4780 4881 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4781 4882 ivar |= (IXGBE_IVAR_ALLOC_VAL << (8 *
4782 4883 (intr_alloc_entry & 0x3)));
4783 4884 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4784 4885 break;
4785 4886
4786 4887 case ixgbe_mac_82599EB:
4888 + case ixgbe_mac_X540:
4787 4889 if (cause == -1) {
4788 4890 /* other causes */
4789 4891 index = (intr_alloc_entry & 1) * 8;
4790 4892 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4791 4893 ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4792 4894 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4793 4895 } else {
4794 4896 /* tx or rx causes */
4795 4897 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4796 4898 ivar = IXGBE_READ_REG(hw,
4797 4899 IXGBE_IVAR(intr_alloc_entry >> 1));
4798 4900 ivar |= (IXGBE_IVAR_ALLOC_VAL << index);
4799 4901 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4800 4902 ivar);
4801 4903 }
4802 4904 break;
4803 4905
4804 4906 default:
4805 4907 break;
4806 4908 }
4807 4909 }
4808 4910
4809 4911 /*
4810 4912 * ixgbe_disable_ivar - Disble the given entry by clearing the VAL bit of
4811 4913 * given interrupt vector allocation register (IVAR).
4812 4914 * cause:
4813 4915 * -1 : other cause
4814 4916 * 0 : rx
4815 4917 * 1 : tx
4816 4918 */
4817 4919 static void
4818 4920 ixgbe_disable_ivar(ixgbe_t *ixgbe, uint16_t intr_alloc_entry, int8_t cause)
4819 4921 {
4820 4922 struct ixgbe_hw *hw = &ixgbe->hw;
4821 4923 u32 ivar, index;
4822 4924
4823 4925 switch (hw->mac.type) {
4824 4926 case ixgbe_mac_82598EB:
4825 4927 if (cause == -1) {
|
↓ open down ↓ |
29 lines elided |
↑ open up ↑ |
4826 4928 cause = 0;
4827 4929 }
4828 4930 index = (((cause * 64) + intr_alloc_entry) >> 2) & 0x1F;
4829 4931 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
4830 4932 ivar &= ~(IXGBE_IVAR_ALLOC_VAL<< (8 *
4831 4933 (intr_alloc_entry & 0x3)));
4832 4934 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
4833 4935 break;
4834 4936
4835 4937 case ixgbe_mac_82599EB:
4938 + case ixgbe_mac_X540:
4836 4939 if (cause == -1) {
4837 4940 /* other causes */
4838 4941 index = (intr_alloc_entry & 1) * 8;
4839 4942 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
4840 4943 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4841 4944 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
4842 4945 } else {
4843 4946 /* tx or rx causes */
4844 4947 index = ((16 * (intr_alloc_entry & 1)) + (8 * cause));
4845 4948 ivar = IXGBE_READ_REG(hw,
4846 4949 IXGBE_IVAR(intr_alloc_entry >> 1));
4847 4950 ivar &= ~(IXGBE_IVAR_ALLOC_VAL << index);
4848 4951 IXGBE_WRITE_REG(hw, IXGBE_IVAR(intr_alloc_entry >> 1),
4849 4952 ivar);
4850 4953 }
4851 4954 break;
4852 4955
4853 4956 default:
4854 4957 break;
4855 4958 }
4856 4959 }
4857 4960
4858 4961 /*
4859 4962 * Convert the rx ring index driver maintained to the rx ring index
4860 4963 * in h/w.
4861 4964 */
4862 4965 static uint32_t
4863 4966 ixgbe_get_hw_rx_index(ixgbe_t *ixgbe, uint32_t sw_rx_index)
4864 4967 {
4865 4968
4866 4969 struct ixgbe_hw *hw = &ixgbe->hw;
4867 4970 uint32_t rx_ring_per_group, hw_rx_index;
|
↓ open down ↓ |
22 lines elided |
↑ open up ↑ |
4868 4971
4869 4972 if (ixgbe->classify_mode == IXGBE_CLASSIFY_RSS ||
4870 4973 ixgbe->classify_mode == IXGBE_CLASSIFY_NONE) {
4871 4974 return (sw_rx_index);
4872 4975 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ) {
4873 4976 switch (hw->mac.type) {
4874 4977 case ixgbe_mac_82598EB:
4875 4978 return (sw_rx_index);
4876 4979
4877 4980 case ixgbe_mac_82599EB:
4981 + case ixgbe_mac_X540:
4878 4982 return (sw_rx_index * 2);
4879 4983
4880 4984 default:
4881 4985 break;
4882 4986 }
4883 4987 } else if (ixgbe->classify_mode == IXGBE_CLASSIFY_VMDQ_RSS) {
4884 4988 rx_ring_per_group = ixgbe->num_rx_rings / ixgbe->num_rx_groups;
4885 4989
4886 4990 switch (hw->mac.type) {
4887 4991 case ixgbe_mac_82598EB:
4888 4992 hw_rx_index = (sw_rx_index / rx_ring_per_group) *
4889 4993 16 + (sw_rx_index % rx_ring_per_group);
4890 4994 return (hw_rx_index);
4891 4995
4892 4996 case ixgbe_mac_82599EB:
4997 + case ixgbe_mac_X540:
4893 4998 if (ixgbe->num_rx_groups > 32) {
4894 4999 hw_rx_index = (sw_rx_index /
4895 5000 rx_ring_per_group) * 2 +
4896 5001 (sw_rx_index % rx_ring_per_group);
4897 5002 } else {
4898 5003 hw_rx_index = (sw_rx_index /
4899 5004 rx_ring_per_group) * 4 +
4900 5005 (sw_rx_index % rx_ring_per_group);
4901 5006 }
4902 5007 return (hw_rx_index);
4903 5008
4904 5009 default:
4905 5010 break;
4906 5011 }
4907 5012 }
4908 5013
4909 5014 /*
4910 5015 * Should never reach. Just to make compiler happy.
4911 5016 */
4912 5017 return (sw_rx_index);
4913 5018 }
4914 5019
4915 5020 /*
4916 5021 * ixgbe_map_intrs_to_vectors - Map different interrupts to MSI-X vectors.
4917 5022 *
4918 5023 * For MSI-X, here will map rx interrupt, tx interrupt and other interrupt
4919 5024 * to vector[0 - (intr_cnt -1)].
4920 5025 */
4921 5026 static int
4922 5027 ixgbe_map_intrs_to_vectors(ixgbe_t *ixgbe)
4923 5028 {
4924 5029 int i, vector = 0;
4925 5030
4926 5031 /* initialize vector map */
4927 5032 bzero(&ixgbe->vect_map, sizeof (ixgbe->vect_map));
4928 5033 for (i = 0; i < ixgbe->intr_cnt; i++) {
4929 5034 ixgbe->vect_map[i].ixgbe = ixgbe;
4930 5035 }
4931 5036
4932 5037 /*
4933 5038 * non-MSI-X case is very simple: rx rings[0] on RTxQ[0],
4934 5039 * tx rings[0] on RTxQ[1].
4935 5040 */
4936 5041 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
4937 5042 ixgbe_map_rxring_to_vector(ixgbe, 0, 0);
4938 5043 ixgbe_map_txring_to_vector(ixgbe, 0, 1);
4939 5044 return (IXGBE_SUCCESS);
4940 5045 }
4941 5046
4942 5047 /*
4943 5048 * Interrupts/vectors mapping for MSI-X
4944 5049 */
4945 5050
4946 5051 /*
4947 5052 * Map other interrupt to vector 0,
4948 5053 * Set bit in map and count the bits set.
4949 5054 */
4950 5055 BT_SET(ixgbe->vect_map[vector].other_map, 0);
4951 5056 ixgbe->vect_map[vector].other_cnt++;
4952 5057
4953 5058 /*
4954 5059 * Map rx ring interrupts to vectors
4955 5060 */
4956 5061 for (i = 0; i < ixgbe->num_rx_rings; i++) {
4957 5062 ixgbe_map_rxring_to_vector(ixgbe, i, vector);
4958 5063 vector = (vector +1) % ixgbe->intr_cnt;
4959 5064 }
4960 5065
4961 5066 /*
4962 5067 * Map tx ring interrupts to vectors
4963 5068 */
4964 5069 for (i = 0; i < ixgbe->num_tx_rings; i++) {
4965 5070 ixgbe_map_txring_to_vector(ixgbe, i, vector);
4966 5071 vector = (vector +1) % ixgbe->intr_cnt;
4967 5072 }
4968 5073
4969 5074 return (IXGBE_SUCCESS);
4970 5075 }
4971 5076
4972 5077 /*
4973 5078 * ixgbe_setup_adapter_vector - Setup the adapter interrupt vector(s).
4974 5079 *
4975 5080 * This relies on ring/vector mapping already set up in the
4976 5081 * vect_map[] structures
4977 5082 */
4978 5083 static void
4979 5084 ixgbe_setup_adapter_vector(ixgbe_t *ixgbe)
4980 5085 {
4981 5086 struct ixgbe_hw *hw = &ixgbe->hw;
4982 5087 ixgbe_intr_vector_t *vect; /* vector bitmap */
4983 5088 int r_idx; /* ring index */
4984 5089 int v_idx; /* vector index */
4985 5090 uint32_t hw_index;
4986 5091
|
↓ open down ↓ |
84 lines elided |
↑ open up ↑ |
4987 5092 /*
4988 5093 * Clear any previous entries
4989 5094 */
4990 5095 switch (hw->mac.type) {
4991 5096 case ixgbe_mac_82598EB:
4992 5097 for (v_idx = 0; v_idx < 25; v_idx++)
4993 5098 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
4994 5099 break;
4995 5100
4996 5101 case ixgbe_mac_82599EB:
5102 + case ixgbe_mac_X540:
4997 5103 for (v_idx = 0; v_idx < 64; v_idx++)
4998 5104 IXGBE_WRITE_REG(hw, IXGBE_IVAR(v_idx), 0);
4999 5105 IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, 0);
5000 5106 break;
5001 5107
5002 5108 default:
5003 5109 break;
5004 5110 }
5005 5111
5006 5112 /*
5007 5113 * For non MSI-X interrupt, rx rings[0] will use RTxQ[0], and
5008 5114 * tx rings[0] will use RTxQ[1].
5009 5115 */
5010 5116 if (ixgbe->intr_type != DDI_INTR_TYPE_MSIX) {
5011 5117 ixgbe_setup_ivar(ixgbe, 0, 0, 0);
5012 5118 ixgbe_setup_ivar(ixgbe, 0, 1, 1);
5013 5119 return;
5014 5120 }
5015 5121
5016 5122 /*
5017 5123 * For MSI-X interrupt, "Other" is always on vector[0].
5018 5124 */
5019 5125 ixgbe_setup_ivar(ixgbe, IXGBE_IVAR_OTHER_CAUSES_INDEX, 0, -1);
5020 5126
5021 5127 /*
5022 5128 * For each interrupt vector, populate the IVAR table
5023 5129 */
5024 5130 for (v_idx = 0; v_idx < ixgbe->intr_cnt; v_idx++) {
5025 5131 vect = &ixgbe->vect_map[v_idx];
5026 5132
5027 5133 /*
5028 5134 * For each rx ring bit set
5029 5135 */
5030 5136 r_idx = bt_getlowbit(vect->rx_map, 0,
5031 5137 (ixgbe->num_rx_rings - 1));
5032 5138
5033 5139 while (r_idx >= 0) {
5034 5140 hw_index = ixgbe->rx_rings[r_idx].hw_index;
5035 5141 ixgbe_setup_ivar(ixgbe, hw_index, v_idx, 0);
5036 5142 r_idx = bt_getlowbit(vect->rx_map, (r_idx + 1),
5037 5143 (ixgbe->num_rx_rings - 1));
5038 5144 }
5039 5145
5040 5146 /*
5041 5147 * For each tx ring bit set
5042 5148 */
5043 5149 r_idx = bt_getlowbit(vect->tx_map, 0,
5044 5150 (ixgbe->num_tx_rings - 1));
5045 5151
5046 5152 while (r_idx >= 0) {
5047 5153 ixgbe_setup_ivar(ixgbe, r_idx, v_idx, 1);
5048 5154 r_idx = bt_getlowbit(vect->tx_map, (r_idx + 1),
5049 5155 (ixgbe->num_tx_rings - 1));
5050 5156 }
5051 5157 }
5052 5158 }
5053 5159
5054 5160 /*
5055 5161 * ixgbe_rem_intr_handlers - Remove the interrupt handlers.
5056 5162 */
5057 5163 static void
5058 5164 ixgbe_rem_intr_handlers(ixgbe_t *ixgbe)
5059 5165 {
5060 5166 int i;
5061 5167 int rc;
5062 5168
5063 5169 for (i = 0; i < ixgbe->intr_cnt; i++) {
5064 5170 rc = ddi_intr_remove_handler(ixgbe->htable[i]);
5065 5171 if (rc != DDI_SUCCESS) {
5066 5172 IXGBE_DEBUGLOG_1(ixgbe,
5067 5173 "Remove intr handler failed: %d", rc);
5068 5174 }
5069 5175 }
5070 5176 }
5071 5177
5072 5178 /*
5073 5179 * ixgbe_rem_intrs - Remove the allocated interrupts.
5074 5180 */
5075 5181 static void
5076 5182 ixgbe_rem_intrs(ixgbe_t *ixgbe)
5077 5183 {
5078 5184 int i;
5079 5185 int rc;
5080 5186
5081 5187 for (i = 0; i < ixgbe->intr_cnt; i++) {
5082 5188 rc = ddi_intr_free(ixgbe->htable[i]);
5083 5189 if (rc != DDI_SUCCESS) {
5084 5190 IXGBE_DEBUGLOG_1(ixgbe,
5085 5191 "Free intr failed: %d", rc);
5086 5192 }
5087 5193 }
5088 5194
5089 5195 kmem_free(ixgbe->htable, ixgbe->intr_size);
5090 5196 ixgbe->htable = NULL;
5091 5197 }
5092 5198
5093 5199 /*
5094 5200 * ixgbe_enable_intrs - Enable all the ddi interrupts.
5095 5201 */
5096 5202 static int
5097 5203 ixgbe_enable_intrs(ixgbe_t *ixgbe)
5098 5204 {
5099 5205 int i;
5100 5206 int rc;
5101 5207
5102 5208 /*
5103 5209 * Enable interrupts
5104 5210 */
5105 5211 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
5106 5212 /*
5107 5213 * Call ddi_intr_block_enable() for MSI
5108 5214 */
5109 5215 rc = ddi_intr_block_enable(ixgbe->htable, ixgbe->intr_cnt);
5110 5216 if (rc != DDI_SUCCESS) {
5111 5217 ixgbe_log(ixgbe,
5112 5218 "Enable block intr failed: %d", rc);
5113 5219 return (IXGBE_FAILURE);
5114 5220 }
5115 5221 } else {
5116 5222 /*
5117 5223 * Call ddi_intr_enable() for Legacy/MSI non block enable
5118 5224 */
5119 5225 for (i = 0; i < ixgbe->intr_cnt; i++) {
5120 5226 rc = ddi_intr_enable(ixgbe->htable[i]);
5121 5227 if (rc != DDI_SUCCESS) {
5122 5228 ixgbe_log(ixgbe,
5123 5229 "Enable intr failed: %d", rc);
5124 5230 return (IXGBE_FAILURE);
5125 5231 }
5126 5232 }
5127 5233 }
5128 5234
5129 5235 return (IXGBE_SUCCESS);
5130 5236 }
5131 5237
5132 5238 /*
5133 5239 * ixgbe_disable_intrs - Disable all the interrupts.
5134 5240 */
5135 5241 static int
5136 5242 ixgbe_disable_intrs(ixgbe_t *ixgbe)
5137 5243 {
5138 5244 int i;
5139 5245 int rc;
5140 5246
5141 5247 /*
5142 5248 * Disable all interrupts
5143 5249 */
5144 5250 if (ixgbe->intr_cap & DDI_INTR_FLAG_BLOCK) {
5145 5251 rc = ddi_intr_block_disable(ixgbe->htable, ixgbe->intr_cnt);
5146 5252 if (rc != DDI_SUCCESS) {
5147 5253 ixgbe_log(ixgbe,
5148 5254 "Disable block intr failed: %d", rc);
5149 5255 return (IXGBE_FAILURE);
5150 5256 }
5151 5257 } else {
5152 5258 for (i = 0; i < ixgbe->intr_cnt; i++) {
5153 5259 rc = ddi_intr_disable(ixgbe->htable[i]);
5154 5260 if (rc != DDI_SUCCESS) {
5155 5261 ixgbe_log(ixgbe,
5156 5262 "Disable intr failed: %d", rc);
5157 5263 return (IXGBE_FAILURE);
5158 5264 }
5159 5265 }
5160 5266 }
5161 5267
5162 5268 return (IXGBE_SUCCESS);
5163 5269 }
5164 5270
5165 5271 /*
5166 5272 * ixgbe_get_hw_state - Get and save parameters related to adapter hardware.
5167 5273 */
5168 5274 static void
5169 5275 ixgbe_get_hw_state(ixgbe_t *ixgbe)
5170 5276 {
5171 5277 struct ixgbe_hw *hw = &ixgbe->hw;
5172 5278 ixgbe_link_speed speed = IXGBE_LINK_SPEED_UNKNOWN;
5173 5279 boolean_t link_up = B_FALSE;
5174 5280 uint32_t pcs1g_anlp = 0;
5175 5281 uint32_t pcs1g_ana = 0;
5176 5282 boolean_t autoneg = B_FALSE;
5177 5283
5178 5284 ASSERT(mutex_owned(&ixgbe->gen_lock));
5179 5285 ixgbe->param_lp_1000fdx_cap = 0;
5180 5286 ixgbe->param_lp_100fdx_cap = 0;
5181 5287
5182 5288 /* check for link, don't wait */
5183 5289 (void) ixgbe_check_link(hw, &speed, &link_up, false);
5184 5290 pcs1g_ana = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
5185 5291
5186 5292 if (link_up) {
5187 5293 pcs1g_anlp = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
5188 5294
5189 5295 ixgbe->param_lp_1000fdx_cap =
5190 5296 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
5191 5297 ixgbe->param_lp_100fdx_cap =
5192 5298 (pcs1g_anlp & IXGBE_PCS1GANLP_LPFD) ? 1 : 0;
5193 5299 }
5194 5300
5195 5301 (void) ixgbe_get_link_capabilities(hw, &speed, &autoneg);
5196 5302
5197 5303 ixgbe->param_adv_1000fdx_cap = ((pcs1g_ana & IXGBE_PCS1GANA_FDC) &&
5198 5304 (speed & IXGBE_LINK_SPEED_1GB_FULL)) ? 1 : 0;
5199 5305 ixgbe->param_adv_100fdx_cap = ((pcs1g_ana & IXGBE_PCS1GANA_FDC) &&
5200 5306 (speed & IXGBE_LINK_SPEED_100_FULL)) ? 1 : 0;
5201 5307 }
5202 5308
5203 5309 /*
5204 5310 * ixgbe_get_driver_control - Notify that driver is in control of device.
5205 5311 */
5206 5312 static void
5207 5313 ixgbe_get_driver_control(struct ixgbe_hw *hw)
5208 5314 {
5209 5315 uint32_t ctrl_ext;
5210 5316
5211 5317 /*
5212 5318 * Notify firmware that driver is in control of device
5213 5319 */
5214 5320 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5215 5321 ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
5216 5322 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5217 5323 }
5218 5324
5219 5325 /*
5220 5326 * ixgbe_release_driver_control - Notify that driver is no longer in control
5221 5327 * of device.
5222 5328 */
5223 5329 static void
5224 5330 ixgbe_release_driver_control(struct ixgbe_hw *hw)
5225 5331 {
5226 5332 uint32_t ctrl_ext;
5227 5333
5228 5334 /*
5229 5335 * Notify firmware that driver is no longer in control of device
5230 5336 */
5231 5337 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
5232 5338 ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
5233 5339 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
5234 5340 }
5235 5341
5236 5342 /*
5237 5343 * ixgbe_atomic_reserve - Atomic decrease operation.
5238 5344 */
5239 5345 int
5240 5346 ixgbe_atomic_reserve(uint32_t *count_p, uint32_t n)
5241 5347 {
5242 5348 uint32_t oldval;
5243 5349 uint32_t newval;
5244 5350
5245 5351 /*
5246 5352 * ATOMICALLY
5247 5353 */
5248 5354 do {
5249 5355 oldval = *count_p;
5250 5356 if (oldval < n)
5251 5357 return (-1);
5252 5358 newval = oldval - n;
5253 5359 } while (atomic_cas_32(count_p, oldval, newval) != oldval);
5254 5360
5255 5361 return (newval);
5256 5362 }
5257 5363
5258 5364 /*
5259 5365 * ixgbe_mc_table_itr - Traverse the entries in the multicast table.
5260 5366 */
5261 5367 static uint8_t *
5262 5368 ixgbe_mc_table_itr(struct ixgbe_hw *hw, uint8_t **upd_ptr, uint32_t *vmdq)
5263 5369 {
5264 5370 uint8_t *addr = *upd_ptr;
5265 5371 uint8_t *new_ptr;
5266 5372
5267 5373 _NOTE(ARGUNUSED(hw));
5268 5374 _NOTE(ARGUNUSED(vmdq));
5269 5375
5270 5376 new_ptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
5271 5377 *upd_ptr = new_ptr;
5272 5378 return (addr);
5273 5379 }
5274 5380
5275 5381 /*
5276 5382 * FMA support
5277 5383 */
5278 5384 int
5279 5385 ixgbe_check_acc_handle(ddi_acc_handle_t handle)
5280 5386 {
5281 5387 ddi_fm_error_t de;
5282 5388
5283 5389 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
5284 5390 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
5285 5391 return (de.fme_status);
5286 5392 }
5287 5393
5288 5394 int
5289 5395 ixgbe_check_dma_handle(ddi_dma_handle_t handle)
5290 5396 {
5291 5397 ddi_fm_error_t de;
5292 5398
5293 5399 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
5294 5400 return (de.fme_status);
5295 5401 }
5296 5402
5297 5403 /*
5298 5404 * ixgbe_fm_error_cb - The IO fault service error handling callback function.
5299 5405 */
5300 5406 static int
5301 5407 ixgbe_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
5302 5408 {
5303 5409 _NOTE(ARGUNUSED(impl_data));
5304 5410 /*
5305 5411 * as the driver can always deal with an error in any dma or
5306 5412 * access handle, we can just return the fme_status value.
5307 5413 */
5308 5414 pci_ereport_post(dip, err, NULL);
5309 5415 return (err->fme_status);
5310 5416 }
5311 5417
5312 5418 static void
5313 5419 ixgbe_fm_init(ixgbe_t *ixgbe)
5314 5420 {
5315 5421 ddi_iblock_cookie_t iblk;
5316 5422 int fma_dma_flag;
5317 5423
5318 5424 /*
5319 5425 * Only register with IO Fault Services if we have some capability
5320 5426 */
5321 5427 if (ixgbe->fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
5322 5428 ixgbe_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
5323 5429 } else {
5324 5430 ixgbe_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
5325 5431 }
5326 5432
5327 5433 if (ixgbe->fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
5328 5434 fma_dma_flag = 1;
5329 5435 } else {
5330 5436 fma_dma_flag = 0;
5331 5437 }
5332 5438
5333 5439 ixgbe_set_fma_flags(fma_dma_flag);
5334 5440
5335 5441 if (ixgbe->fm_capabilities) {
5336 5442
5337 5443 /*
5338 5444 * Register capabilities with IO Fault Services
5339 5445 */
5340 5446 ddi_fm_init(ixgbe->dip, &ixgbe->fm_capabilities, &iblk);
5341 5447
5342 5448 /*
5343 5449 * Initialize pci ereport capabilities if ereport capable
5344 5450 */
5345 5451 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
5346 5452 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5347 5453 pci_ereport_setup(ixgbe->dip);
5348 5454
5349 5455 /*
5350 5456 * Register error callback if error callback capable
5351 5457 */
5352 5458 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5353 5459 ddi_fm_handler_register(ixgbe->dip,
5354 5460 ixgbe_fm_error_cb, (void*) ixgbe);
5355 5461 }
5356 5462 }
5357 5463
5358 5464 static void
5359 5465 ixgbe_fm_fini(ixgbe_t *ixgbe)
5360 5466 {
5361 5467 /*
5362 5468 * Only unregister FMA capabilities if they are registered
5363 5469 */
5364 5470 if (ixgbe->fm_capabilities) {
5365 5471
5366 5472 /*
5367 5473 * Release any resources allocated by pci_ereport_setup()
5368 5474 */
5369 5475 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities) ||
5370 5476 DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5371 5477 pci_ereport_teardown(ixgbe->dip);
5372 5478
5373 5479 /*
5374 5480 * Un-register error callback if error callback capable
5375 5481 */
5376 5482 if (DDI_FM_ERRCB_CAP(ixgbe->fm_capabilities))
5377 5483 ddi_fm_handler_unregister(ixgbe->dip);
5378 5484
5379 5485 /*
5380 5486 * Unregister from IO Fault Service
5381 5487 */
5382 5488 ddi_fm_fini(ixgbe->dip);
5383 5489 }
5384 5490 }
5385 5491
5386 5492 void
5387 5493 ixgbe_fm_ereport(ixgbe_t *ixgbe, char *detail)
5388 5494 {
5389 5495 uint64_t ena;
5390 5496 char buf[FM_MAX_CLASS];
5391 5497
5392 5498 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
5393 5499 ena = fm_ena_generate(0, FM_ENA_FMT1);
5394 5500 if (DDI_FM_EREPORT_CAP(ixgbe->fm_capabilities)) {
5395 5501 ddi_fm_ereport_post(ixgbe->dip, buf, ena, DDI_NOSLEEP,
5396 5502 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
5397 5503 }
5398 5504 }
5399 5505
5400 5506 static int
5401 5507 ixgbe_ring_start(mac_ring_driver_t rh, uint64_t mr_gen_num)
5402 5508 {
5403 5509 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)rh;
5404 5510
5405 5511 mutex_enter(&rx_ring->rx_lock);
5406 5512 rx_ring->ring_gen_num = mr_gen_num;
5407 5513 mutex_exit(&rx_ring->rx_lock);
5408 5514 return (0);
5409 5515 }
5410 5516
5411 5517 /*
5412 5518 * Get the global ring index by a ring index within a group.
5413 5519 */
5414 5520 static int
5415 5521 ixgbe_get_rx_ring_index(ixgbe_t *ixgbe, int gindex, int rindex)
5416 5522 {
5417 5523 ixgbe_rx_ring_t *rx_ring;
5418 5524 int i;
5419 5525
5420 5526 for (i = 0; i < ixgbe->num_rx_rings; i++) {
5421 5527 rx_ring = &ixgbe->rx_rings[i];
5422 5528 if (rx_ring->group_index == gindex)
5423 5529 rindex--;
5424 5530 if (rindex < 0)
5425 5531 return (i);
5426 5532 }
5427 5533
5428 5534 return (-1);
5429 5535 }
5430 5536
5431 5537 /*
5432 5538 * Callback funtion for MAC layer to register all rings.
5433 5539 */
5434 5540 /* ARGSUSED */
5435 5541 void
5436 5542 ixgbe_fill_ring(void *arg, mac_ring_type_t rtype, const int group_index,
5437 5543 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
5438 5544 {
5439 5545 ixgbe_t *ixgbe = (ixgbe_t *)arg;
5440 5546 mac_intr_t *mintr = &infop->mri_intr;
5441 5547
5442 5548 switch (rtype) {
5443 5549 case MAC_RING_TYPE_RX: {
5444 5550 /*
5445 5551 * 'index' is the ring index within the group.
5446 5552 * Need to get the global ring index by searching in groups.
5447 5553 */
5448 5554 int global_ring_index = ixgbe_get_rx_ring_index(
5449 5555 ixgbe, group_index, ring_index);
5450 5556
5451 5557 ASSERT(global_ring_index >= 0);
5452 5558
5453 5559 ixgbe_rx_ring_t *rx_ring = &ixgbe->rx_rings[global_ring_index];
5454 5560 rx_ring->ring_handle = rh;
5455 5561
5456 5562 infop->mri_driver = (mac_ring_driver_t)rx_ring;
5457 5563 infop->mri_start = ixgbe_ring_start;
5458 5564 infop->mri_stop = NULL;
5459 5565 infop->mri_poll = ixgbe_ring_rx_poll;
5460 5566 infop->mri_stat = ixgbe_rx_ring_stat;
5461 5567
5462 5568 mintr->mi_handle = (mac_intr_handle_t)rx_ring;
5463 5569 mintr->mi_enable = ixgbe_rx_ring_intr_enable;
5464 5570 mintr->mi_disable = ixgbe_rx_ring_intr_disable;
5465 5571 if (ixgbe->intr_type &
5466 5572 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
5467 5573 mintr->mi_ddi_handle =
5468 5574 ixgbe->htable[rx_ring->intr_vector];
5469 5575 }
5470 5576
5471 5577 break;
5472 5578 }
5473 5579 case MAC_RING_TYPE_TX: {
5474 5580 ASSERT(group_index == -1);
5475 5581 ASSERT(ring_index < ixgbe->num_tx_rings);
5476 5582
5477 5583 ixgbe_tx_ring_t *tx_ring = &ixgbe->tx_rings[ring_index];
5478 5584 tx_ring->ring_handle = rh;
5479 5585
5480 5586 infop->mri_driver = (mac_ring_driver_t)tx_ring;
5481 5587 infop->mri_start = NULL;
5482 5588 infop->mri_stop = NULL;
5483 5589 infop->mri_tx = ixgbe_ring_tx;
5484 5590 infop->mri_stat = ixgbe_tx_ring_stat;
5485 5591 if (ixgbe->intr_type &
5486 5592 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
5487 5593 mintr->mi_ddi_handle =
5488 5594 ixgbe->htable[tx_ring->intr_vector];
5489 5595 }
5490 5596 break;
5491 5597 }
5492 5598 default:
5493 5599 break;
5494 5600 }
5495 5601 }
5496 5602
5497 5603 /*
5498 5604 * Callback funtion for MAC layer to register all groups.
5499 5605 */
5500 5606 void
5501 5607 ixgbe_fill_group(void *arg, mac_ring_type_t rtype, const int index,
5502 5608 mac_group_info_t *infop, mac_group_handle_t gh)
5503 5609 {
5504 5610 ixgbe_t *ixgbe = (ixgbe_t *)arg;
5505 5611
5506 5612 switch (rtype) {
5507 5613 case MAC_RING_TYPE_RX: {
5508 5614 ixgbe_rx_group_t *rx_group;
5509 5615
5510 5616 rx_group = &ixgbe->rx_groups[index];
5511 5617 rx_group->group_handle = gh;
5512 5618
5513 5619 infop->mgi_driver = (mac_group_driver_t)rx_group;
5514 5620 infop->mgi_start = NULL;
5515 5621 infop->mgi_stop = NULL;
5516 5622 infop->mgi_addmac = ixgbe_addmac;
5517 5623 infop->mgi_remmac = ixgbe_remmac;
5518 5624 infop->mgi_count = (ixgbe->num_rx_rings / ixgbe->num_rx_groups);
5519 5625
5520 5626 break;
5521 5627 }
5522 5628 case MAC_RING_TYPE_TX:
5523 5629 break;
5524 5630 default:
5525 5631 break;
5526 5632 }
5527 5633 }
5528 5634
5529 5635 /*
5530 5636 * Enable interrupt on the specificed rx ring.
5531 5637 */
5532 5638 int
5533 5639 ixgbe_rx_ring_intr_enable(mac_intr_handle_t intrh)
5534 5640 {
5535 5641 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
5536 5642 ixgbe_t *ixgbe = rx_ring->ixgbe;
5537 5643 int r_idx = rx_ring->index;
5538 5644 int hw_r_idx = rx_ring->hw_index;
5539 5645 int v_idx = rx_ring->intr_vector;
5540 5646
5541 5647 mutex_enter(&ixgbe->gen_lock);
5542 5648 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) {
5543 5649 mutex_exit(&ixgbe->gen_lock);
5544 5650 /*
5545 5651 * Simply return 0.
5546 5652 * Interrupts are being adjusted. ixgbe_intr_adjust()
5547 5653 * will eventually re-enable the interrupt when it's
5548 5654 * done with the adjustment.
5549 5655 */
5550 5656 return (0);
5551 5657 }
5552 5658
5553 5659 /*
5554 5660 * To enable interrupt by setting the VAL bit of given interrupt
5555 5661 * vector allocation register (IVAR).
5556 5662 */
5557 5663 ixgbe_enable_ivar(ixgbe, hw_r_idx, 0);
5558 5664
5559 5665 BT_SET(ixgbe->vect_map[v_idx].rx_map, r_idx);
5560 5666
5561 5667 /*
5562 5668 * Trigger a Rx interrupt on this ring
5563 5669 */
5564 5670 IXGBE_WRITE_REG(&ixgbe->hw, IXGBE_EICS, (1 << v_idx));
5565 5671 IXGBE_WRITE_FLUSH(&ixgbe->hw);
5566 5672
5567 5673 mutex_exit(&ixgbe->gen_lock);
5568 5674
5569 5675 return (0);
5570 5676 }
5571 5677
5572 5678 /*
5573 5679 * Disable interrupt on the specificed rx ring.
5574 5680 */
5575 5681 int
5576 5682 ixgbe_rx_ring_intr_disable(mac_intr_handle_t intrh)
5577 5683 {
5578 5684 ixgbe_rx_ring_t *rx_ring = (ixgbe_rx_ring_t *)intrh;
5579 5685 ixgbe_t *ixgbe = rx_ring->ixgbe;
5580 5686 int r_idx = rx_ring->index;
5581 5687 int hw_r_idx = rx_ring->hw_index;
5582 5688 int v_idx = rx_ring->intr_vector;
5583 5689
5584 5690 mutex_enter(&ixgbe->gen_lock);
5585 5691 if (ixgbe->ixgbe_state & IXGBE_INTR_ADJUST) {
5586 5692 mutex_exit(&ixgbe->gen_lock);
5587 5693 /*
5588 5694 * Simply return 0.
5589 5695 * In the rare case where an interrupt is being
5590 5696 * disabled while interrupts are being adjusted,
5591 5697 * we don't fail the operation. No interrupts will
5592 5698 * be generated while they are adjusted, and
5593 5699 * ixgbe_intr_adjust() will cause the interrupts
5594 5700 * to be re-enabled once it completes. Note that
5595 5701 * in this case, packets may be delivered to the
5596 5702 * stack via interrupts before xgbe_rx_ring_intr_enable()
5597 5703 * is called again. This is acceptable since interrupt
5598 5704 * adjustment is infrequent, and the stack will be
5599 5705 * able to handle these packets.
5600 5706 */
5601 5707 return (0);
5602 5708 }
5603 5709
5604 5710 /*
5605 5711 * To disable interrupt by clearing the VAL bit of given interrupt
5606 5712 * vector allocation register (IVAR).
5607 5713 */
5608 5714 ixgbe_disable_ivar(ixgbe, hw_r_idx, 0);
5609 5715
5610 5716 BT_CLEAR(ixgbe->vect_map[v_idx].rx_map, r_idx);
5611 5717
5612 5718 mutex_exit(&ixgbe->gen_lock);
5613 5719
5614 5720 return (0);
5615 5721 }
5616 5722
5617 5723 /*
5618 5724 * Add a mac address.
5619 5725 */
5620 5726 static int
5621 5727 ixgbe_addmac(void *arg, const uint8_t *mac_addr)
5622 5728 {
5623 5729 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
5624 5730 ixgbe_t *ixgbe = rx_group->ixgbe;
5625 5731 struct ixgbe_hw *hw = &ixgbe->hw;
5626 5732 int slot, i;
5627 5733
5628 5734 mutex_enter(&ixgbe->gen_lock);
5629 5735
5630 5736 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
5631 5737 mutex_exit(&ixgbe->gen_lock);
5632 5738 return (ECANCELED);
5633 5739 }
5634 5740
5635 5741 if (ixgbe->unicst_avail == 0) {
5636 5742 /* no slots available */
5637 5743 mutex_exit(&ixgbe->gen_lock);
5638 5744 return (ENOSPC);
5639 5745 }
5640 5746
5641 5747 /*
5642 5748 * The first ixgbe->num_rx_groups slots are reserved for each respective
5643 5749 * group. The rest slots are shared by all groups. While adding a
5644 5750 * MAC address, reserved slots are firstly checked then the shared
5645 5751 * slots are searched.
5646 5752 */
5647 5753 slot = -1;
5648 5754 if (ixgbe->unicst_addr[rx_group->index].mac.set == 1) {
5649 5755 for (i = ixgbe->num_rx_groups; i < ixgbe->unicst_total; i++) {
5650 5756 if (ixgbe->unicst_addr[i].mac.set == 0) {
5651 5757 slot = i;
5652 5758 break;
5653 5759 }
5654 5760 }
5655 5761 } else {
5656 5762 slot = rx_group->index;
5657 5763 }
5658 5764
5659 5765 if (slot == -1) {
5660 5766 /* no slots available */
5661 5767 mutex_exit(&ixgbe->gen_lock);
5662 5768 return (ENOSPC);
5663 5769 }
5664 5770
5665 5771 bcopy(mac_addr, ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
5666 5772 (void) ixgbe_set_rar(hw, slot, ixgbe->unicst_addr[slot].mac.addr,
5667 5773 rx_group->index, IXGBE_RAH_AV);
5668 5774 ixgbe->unicst_addr[slot].mac.set = 1;
5669 5775 ixgbe->unicst_addr[slot].mac.group_index = rx_group->index;
5670 5776 ixgbe->unicst_avail--;
5671 5777
5672 5778 mutex_exit(&ixgbe->gen_lock);
5673 5779
5674 5780 return (0);
5675 5781 }
5676 5782
5677 5783 /*
5678 5784 * Remove a mac address.
5679 5785 */
5680 5786 static int
5681 5787 ixgbe_remmac(void *arg, const uint8_t *mac_addr)
5682 5788 {
5683 5789 ixgbe_rx_group_t *rx_group = (ixgbe_rx_group_t *)arg;
5684 5790 ixgbe_t *ixgbe = rx_group->ixgbe;
5685 5791 struct ixgbe_hw *hw = &ixgbe->hw;
5686 5792 int slot;
5687 5793
5688 5794 mutex_enter(&ixgbe->gen_lock);
5689 5795
5690 5796 if (ixgbe->ixgbe_state & IXGBE_SUSPENDED) {
5691 5797 mutex_exit(&ixgbe->gen_lock);
5692 5798 return (ECANCELED);
5693 5799 }
5694 5800
5695 5801 slot = ixgbe_unicst_find(ixgbe, mac_addr);
5696 5802 if (slot == -1) {
5697 5803 mutex_exit(&ixgbe->gen_lock);
5698 5804 return (EINVAL);
5699 5805 }
5700 5806
5701 5807 if (ixgbe->unicst_addr[slot].mac.set == 0) {
5702 5808 mutex_exit(&ixgbe->gen_lock);
5703 5809 return (EINVAL);
5704 5810 }
5705 5811
5706 5812 bzero(ixgbe->unicst_addr[slot].mac.addr, ETHERADDRL);
5707 5813 (void) ixgbe_clear_rar(hw, slot);
5708 5814 ixgbe->unicst_addr[slot].mac.set = 0;
5709 5815 ixgbe->unicst_avail++;
5710 5816
5711 5817 mutex_exit(&ixgbe->gen_lock);
5712 5818
5713 5819 return (0);
5714 5820 }
|
↓ open down ↓ |
708 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX