Print this page
3014 Intel X540 Support (fix lint)
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/ixgbe/ixgbe_82598.c
+++ new/usr/src/uts/common/io/ixgbe/ixgbe_82598.c
1 1 /******************************************************************************
2 2
3 3 Copyright (c) 2001-2012, Intel Corporation
4 4 All rights reserved.
5 5
6 6 Redistribution and use in source and binary forms, with or without
7 7 modification, are permitted provided that the following conditions are met:
8 8
9 9 1. Redistributions of source code must retain the above copyright notice,
10 10 this list of conditions and the following disclaimer.
11 11
12 12 2. Redistributions in binary form must reproduce the above copyright
13 13 notice, this list of conditions and the following disclaimer in the
14 14 documentation and/or other materials provided with the distribution.
15 15
16 16 3. Neither the name of the Intel Corporation nor the names of its
17 17 contributors may be used to endorse or promote products derived from
18 18 this software without specific prior written permission.
19 19
20 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 30 POSSIBILITY OF SUCH DAMAGE.
31 31
32 32 ******************************************************************************/
33 33 /*$FreeBSD: src/sys/dev/ixgbe/ixgbe_82598.c,v 1.13 2012/07/05 20:51:44 jfv Exp $*/
34 34
35 35 #include "ixgbe_type.h"
36 36 #include "ixgbe_82598.h"
37 37 #include "ixgbe_api.h"
38 38 #include "ixgbe_common.h"
39 39 #include "ixgbe_phy.h"
40 40
41 41 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
42 42 ixgbe_link_speed *speed,
43 43 bool *autoneg);
44 44 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
45 45 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
46 46 bool autoneg_wait_to_complete);
47 47 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
48 48 ixgbe_link_speed *speed, bool *link_up,
49 49 bool link_up_wait_to_complete);
50 50 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
51 51 ixgbe_link_speed speed,
52 52 bool autoneg,
53 53 bool autoneg_wait_to_complete);
54 54 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
55 55 ixgbe_link_speed speed,
56 56 bool autoneg,
57 57 bool autoneg_wait_to_complete);
58 58 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
59 59 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
60 60 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
61 61 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
62 62 u32 headroom, int strategy);
63 63
64 64 /**
65 65 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
66 66 * @hw: pointer to the HW structure
67 67 *
68 68 * The defaults for 82598 should be in the range of 50us to 50ms,
69 69 * however the hardware default for these parts is 500us to 1ms which is less
70 70 * than the 10ms recommended by the pci-e spec. To address this we need to
71 71 * increase the value to either 10ms to 250ms for capability version 1 config,
72 72 * or 16ms to 55ms for version 2.
73 73 **/
74 74 void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
75 75 {
76 76 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
77 77 u16 pcie_devctl2;
78 78
79 79 /* only take action if timeout value is defaulted to 0 */
80 80 if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
81 81 goto out;
82 82
83 83 /*
84 84 * if capababilities version is type 1 we can write the
85 85 * timeout of 10ms to 250ms through the GCR register
86 86 */
87 87 if (!(gcr & IXGBE_GCR_CAP_VER2)) {
88 88 gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
89 89 goto out;
90 90 }
91 91
92 92 /*
93 93 * for version 2 capabilities we need to write the config space
94 94 * directly in order to set the completion timeout value for
95 95 * 16ms to 55ms
96 96 */
97 97 pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
98 98 pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
99 99 IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
100 100 out:
101 101 /* disable completion timeout resend */
102 102 gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
103 103 IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
104 104 }
105 105
106 106 /**
107 107 * ixgbe_init_ops_82598 - Inits func ptrs and MAC type
108 108 * @hw: pointer to hardware structure
109 109 *
110 110 * Initialize the function pointers and assign the MAC type for 82598.
111 111 * Does not touch the hardware.
112 112 **/
113 113 s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
114 114 {
115 115 struct ixgbe_mac_info *mac = &hw->mac;
116 116 struct ixgbe_phy_info *phy = &hw->phy;
117 117 s32 ret_val;
118 118
119 119 DEBUGFUNC("ixgbe_init_ops_82598");
120 120
121 121 ret_val = ixgbe_init_phy_ops_generic(hw);
122 122 ret_val = ixgbe_init_ops_generic(hw);
123 123
124 124 /* PHY */
125 125 phy->ops.init = &ixgbe_init_phy_ops_82598;
126 126
127 127 /* MAC */
128 128 mac->ops.start_hw = &ixgbe_start_hw_82598;
129 129 mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598;
130 130 mac->ops.reset_hw = &ixgbe_reset_hw_82598;
131 131 mac->ops.get_media_type = &ixgbe_get_media_type_82598;
132 132 mac->ops.get_supported_physical_layer =
133 133 &ixgbe_get_supported_physical_layer_82598;
134 134 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
135 135 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
136 136 mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
137 137
138 138 /* RAR, Multicast, VLAN */
139 139 mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
140 140 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
141 141 mac->ops.set_vfta = &ixgbe_set_vfta_82598;
142 142 mac->ops.set_vlvf = NULL;
143 143 mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
144 144
145 145 /* Flow Control */
146 146 mac->ops.fc_enable = &ixgbe_fc_enable_82598;
147 147
148 148 mac->mcft_size = 128;
149 149 mac->vft_size = 128;
150 150 mac->num_rar_entries = 16;
151 151 mac->rx_pb_size = 512;
152 152 mac->max_tx_queues = 32;
153 153 mac->max_rx_queues = 64;
154 154 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
155 155
156 156 /* SFP+ Module */
157 157 phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
158 158
159 159 /* Link */
160 160 mac->ops.check_link = &ixgbe_check_mac_link_82598;
161 161 mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
162 162 mac->ops.flap_tx_laser = NULL;
163 163 mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82598;
164 164 mac->ops.setup_rxpba = &ixgbe_set_rxpba_82598;
165 165
166 166 /* Manageability interface */
167 167 mac->ops.set_fw_drv_ver = NULL;
168 168
169 169 return ret_val;
170 170 }
171 171
172 172 /**
173 173 * ixgbe_init_phy_ops_82598 - PHY/SFP specific init
174 174 * @hw: pointer to hardware structure
175 175 *
176 176 * Initialize any function pointers that were not able to be
177 177 * set during init_shared_code because the PHY/SFP type was
178 178 * not known. Perform the SFP init if necessary.
179 179 *
180 180 **/
181 181 s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
182 182 {
183 183 struct ixgbe_mac_info *mac = &hw->mac;
184 184 struct ixgbe_phy_info *phy = &hw->phy;
185 185 s32 ret_val = IXGBE_SUCCESS;
186 186 u16 list_offset, data_offset;
187 187
188 188 DEBUGFUNC("ixgbe_init_phy_ops_82598");
189 189
190 190 /* Identify the PHY */
191 191 phy->ops.identify(hw);
192 192
193 193 /* Overwrite the link function pointers if copper PHY */
194 194 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
195 195 mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
196 196 mac->ops.get_link_capabilities =
197 197 &ixgbe_get_copper_link_capabilities_generic;
198 198 }
199 199
200 200 switch (hw->phy.type) {
201 201 case ixgbe_phy_tn:
202 202 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
203 203 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
204 204 phy->ops.get_firmware_version =
205 205 &ixgbe_get_phy_firmware_version_tnx;
206 206 break;
207 207 case ixgbe_phy_nl:
208 208 phy->ops.reset = &ixgbe_reset_phy_nl;
209 209
210 210 /* Call SFP+ identify routine to get the SFP+ module type */
211 211 ret_val = phy->ops.identify_sfp(hw);
212 212 if (ret_val != IXGBE_SUCCESS)
213 213 goto out;
214 214 else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
215 215 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
216 216 goto out;
217 217 }
218 218
219 219 /* Check to see if SFP+ module is supported */
220 220 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
221 221 &list_offset,
222 222 &data_offset);
223 223 if (ret_val != IXGBE_SUCCESS) {
224 224 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
225 225 goto out;
226 226 }
227 227 break;
228 228 default:
229 229 break;
230 230 }
231 231
232 232 out:
233 233 return ret_val;
234 234 }
235 235
236 236 /**
237 237 * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
238 238 * @hw: pointer to hardware structure
239 239 *
240 240 * Starts the hardware using the generic start_hw function.
241 241 * Disables relaxed ordering Then set pcie completion timeout
242 242 *
243 243 **/
244 244 s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
245 245 {
246 246 u32 regval;
247 247 u32 i;
248 248 s32 ret_val = IXGBE_SUCCESS;
249 249
250 250 DEBUGFUNC("ixgbe_start_hw_82598");
251 251
252 252 ret_val = ixgbe_start_hw_generic(hw);
253 253
254 254 /* Disable relaxed ordering */
255 255 for (i = 0; ((i < hw->mac.max_tx_queues) &&
256 256 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
257 257 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
258 258 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
259 259 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
260 260 }
261 261
262 262 for (i = 0; ((i < hw->mac.max_rx_queues) &&
263 263 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
264 264 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
265 265 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
266 266 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
267 267 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
268 268 }
269 269
270 270 /* set the completion timeout for interface */
271 271 if (ret_val == IXGBE_SUCCESS)
272 272 ixgbe_set_pcie_completion_timeout(hw);
273 273
274 274 return ret_val;
275 275 }
276 276
277 277 /**
278 278 * ixgbe_get_link_capabilities_82598 - Determines link capabilities
279 279 * @hw: pointer to hardware structure
280 280 * @speed: pointer to link speed
281 281 * @autoneg: boolean auto-negotiation value
282 282 *
283 283 * Determines the link capabilities by reading the AUTOC register.
284 284 **/
285 285 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
286 286 ixgbe_link_speed *speed,
287 287 bool *autoneg)
288 288 {
289 289 s32 status = IXGBE_SUCCESS;
290 290 u32 autoc = 0;
291 291
292 292 DEBUGFUNC("ixgbe_get_link_capabilities_82598");
293 293
294 294 /*
295 295 * Determine link capabilities based on the stored value of AUTOC,
296 296 * which represents EEPROM defaults. If AUTOC value has not been
297 297 * stored, use the current register value.
298 298 */
299 299 if (hw->mac.orig_link_settings_stored)
300 300 autoc = hw->mac.orig_autoc;
301 301 else
302 302 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
303 303
304 304 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
305 305 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
306 306 *speed = IXGBE_LINK_SPEED_1GB_FULL;
307 307 *autoneg = FALSE;
308 308 break;
309 309
310 310 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
311 311 *speed = IXGBE_LINK_SPEED_10GB_FULL;
312 312 *autoneg = FALSE;
313 313 break;
314 314
315 315 case IXGBE_AUTOC_LMS_1G_AN:
316 316 *speed = IXGBE_LINK_SPEED_1GB_FULL;
317 317 *autoneg = TRUE;
318 318 break;
319 319
320 320 case IXGBE_AUTOC_LMS_KX4_AN:
321 321 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
322 322 *speed = IXGBE_LINK_SPEED_UNKNOWN;
323 323 if (autoc & IXGBE_AUTOC_KX4_SUPP)
324 324 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
325 325 if (autoc & IXGBE_AUTOC_KX_SUPP)
326 326 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
327 327 *autoneg = TRUE;
328 328 break;
329 329
330 330 default:
331 331 status = IXGBE_ERR_LINK_SETUP;
332 332 break;
333 333 }
334 334
335 335 return status;
336 336 }
337 337
338 338 /**
339 339 * ixgbe_get_media_type_82598 - Determines media type
340 340 * @hw: pointer to hardware structure
341 341 *
342 342 * Returns the media type (fiber, copper, backplane)
343 343 **/
344 344 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
345 345 {
346 346 enum ixgbe_media_type media_type;
347 347
348 348 DEBUGFUNC("ixgbe_get_media_type_82598");
349 349
350 350 /* Detect if there is a copper PHY attached. */
351 351 switch (hw->phy.type) {
352 352 case ixgbe_phy_cu_unknown:
353 353 case ixgbe_phy_tn:
354 354 media_type = ixgbe_media_type_copper;
355 355 goto out;
356 356 default:
357 357 break;
358 358 }
359 359
360 360 /* Media type for I82598 is based on device ID */
361 361 switch (hw->device_id) {
362 362 case IXGBE_DEV_ID_82598:
363 363 case IXGBE_DEV_ID_82598_BX:
364 364 /* Default device ID is mezzanine card KX/KX4 */
365 365 media_type = ixgbe_media_type_backplane;
366 366 break;
367 367 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
368 368 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
369 369 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
370 370 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
371 371 case IXGBE_DEV_ID_82598EB_XF_LR:
372 372 case IXGBE_DEV_ID_82598EB_SFP_LOM:
373 373 media_type = ixgbe_media_type_fiber;
374 374 break;
375 375 case IXGBE_DEV_ID_82598EB_CX4:
376 376 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
377 377 media_type = ixgbe_media_type_cx4;
378 378 break;
379 379 case IXGBE_DEV_ID_82598AT:
380 380 case IXGBE_DEV_ID_82598AT2:
381 381 media_type = ixgbe_media_type_copper;
382 382 break;
383 383 default:
384 384 media_type = ixgbe_media_type_unknown;
385 385 break;
386 386 }
387 387 out:
388 388 return media_type;
389 389 }
390 390
391 391 /**
392 392 * ixgbe_fc_enable_82598 - Enable flow control
393 393 * @hw: pointer to hardware structure
394 394 *
395 395 * Enable flow control according to the current settings.
396 396 **/
397 397 s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
398 398 {
399 399 s32 ret_val = IXGBE_SUCCESS;
400 400 u32 fctrl_reg;
401 401 u32 rmcs_reg;
402 402 u32 reg;
403 403 u32 fcrtl, fcrth;
404 404 u32 link_speed = 0;
405 405 int i;
406 406 bool link_up;
407 407
408 408 DEBUGFUNC("ixgbe_fc_enable_82598");
409 409
410 410 /* Validate the water mark configuration */
411 411 if (!hw->fc.pause_time) {
412 412 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
413 413 goto out;
414 414 }
415 415
416 416 /* Low water mark of zero causes XOFF floods */
417 417 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
418 418 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
419 419 hw->fc.high_water[i]) {
420 420 if (!hw->fc.low_water[i] ||
421 421 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
422 422 DEBUGOUT("Invalid water mark configuration\n");
423 423 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
424 424 goto out;
425 425 }
426 426 }
427 427 }
428 428
429 429 /*
430 430 * On 82598 having Rx FC on causes resets while doing 1G
431 431 * so if it's on turn it off once we know link_speed. For
432 432 * more details see 82598 Specification update.
433 433 */
434 434 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
435 435 if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
436 436 switch (hw->fc.requested_mode) {
437 437 case ixgbe_fc_full:
438 438 hw->fc.requested_mode = ixgbe_fc_tx_pause;
439 439 break;
440 440 case ixgbe_fc_rx_pause:
441 441 hw->fc.requested_mode = ixgbe_fc_none;
442 442 break;
443 443 default:
444 444 /* no change */
445 445 break;
446 446 }
447 447 }
448 448
449 449 /* Negotiate the fc mode to use */
450 450 ixgbe_fc_autoneg(hw);
451 451
452 452 /* Disable any previous flow control settings */
453 453 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
454 454 fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
455 455
456 456 rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
457 457 rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
458 458
459 459 /*
460 460 * The possible values of fc.current_mode are:
461 461 * 0: Flow control is completely disabled
462 462 * 1: Rx flow control is enabled (we can receive pause frames,
463 463 * but not send pause frames).
464 464 * 2: Tx flow control is enabled (we can send pause frames but
465 465 * we do not support receiving pause frames).
466 466 * 3: Both Rx and Tx flow control (symmetric) are enabled.
467 467 * other: Invalid.
468 468 */
469 469 switch (hw->fc.current_mode) {
470 470 case ixgbe_fc_none:
471 471 /*
472 472 * Flow control is disabled by software override or autoneg.
473 473 * The code below will actually disable it in the HW.
474 474 */
475 475 break;
476 476 case ixgbe_fc_rx_pause:
477 477 /*
478 478 * Rx Flow control is enabled and Tx Flow control is
479 479 * disabled by software override. Since there really
480 480 * isn't a way to advertise that we are capable of RX
481 481 * Pause ONLY, we will advertise that we support both
482 482 * symmetric and asymmetric Rx PAUSE. Later, we will
483 483 * disable the adapter's ability to send PAUSE frames.
484 484 */
485 485 fctrl_reg |= IXGBE_FCTRL_RFCE;
486 486 break;
487 487 case ixgbe_fc_tx_pause:
488 488 /*
489 489 * Tx Flow control is enabled, and Rx Flow control is
490 490 * disabled by software override.
491 491 */
492 492 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
493 493 break;
494 494 case ixgbe_fc_full:
495 495 /* Flow control (both Rx and Tx) is enabled by SW override. */
496 496 fctrl_reg |= IXGBE_FCTRL_RFCE;
497 497 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
498 498 break;
499 499 default:
500 500 DEBUGOUT("Flow control param set incorrectly\n");
501 501 ret_val = IXGBE_ERR_CONFIG;
502 502 goto out;
503 503 }
504 504
505 505 /* Set 802.3x based flow control settings. */
506 506 fctrl_reg |= IXGBE_FCTRL_DPF;
507 507 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
508 508 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
509 509
510 510 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
511 511 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
512 512 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
513 513 hw->fc.high_water[i]) {
514 514 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
515 515 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
516 516 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
517 517 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
518 518 } else {
519 519 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
520 520 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
521 521 }
522 522
523 523 }
524 524
525 525 /* Configure pause time (2 TCs per register) */
526 526 reg = hw->fc.pause_time * 0x00010001;
527 527 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
528 528 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
529 529
530 530 /* Configure flow control refresh threshold value */
531 531 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
532 532
533 533 out:
534 534 return ret_val;
535 535 }
536 536
537 537 /**
538 538 * ixgbe_start_mac_link_82598 - Configures MAC link settings
539 539 * @hw: pointer to hardware structure
540 540 *
541 541 * Configures link settings based on values in the ixgbe_hw struct.
542 542 * Restarts the link. Performs autonegotiation if needed.
543 543 **/
544 544 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
545 545 bool autoneg_wait_to_complete)
546 546 {
547 547 u32 autoc_reg;
548 548 u32 links_reg;
549 549 u32 i;
550 550 s32 status = IXGBE_SUCCESS;
551 551
552 552 DEBUGFUNC("ixgbe_start_mac_link_82598");
553 553
554 554 /* Restart link */
555 555 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
556 556 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
557 557 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
558 558
559 559 /* Only poll for autoneg to complete if specified to do so */
560 560 if (autoneg_wait_to_complete) {
561 561 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
562 562 IXGBE_AUTOC_LMS_KX4_AN ||
563 563 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
564 564 IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
565 565 links_reg = 0; /* Just in case Autoneg time = 0 */
566 566 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
567 567 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
568 568 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
569 569 break;
570 570 msec_delay(100);
571 571 }
572 572 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
573 573 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
574 574 DEBUGOUT("Autonegotiation did not complete.\n");
575 575 }
576 576 }
577 577 }
578 578
579 579 /* Add delay to filter out noises during initial link setup */
580 580 msec_delay(50);
581 581
582 582 return status;
583 583 }
584 584
585 585 /**
586 586 * ixgbe_validate_link_ready - Function looks for phy link
587 587 * @hw: pointer to hardware structure
588 588 *
589 589 * Function indicates success when phy link is available. If phy is not ready
590 590 * within 5 seconds of MAC indicating link, the function returns error.
591 591 **/
592 592 static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
593 593 {
594 594 u32 timeout;
595 595 u16 an_reg;
596 596
597 597 if (hw->device_id != IXGBE_DEV_ID_82598AT2)
598 598 return IXGBE_SUCCESS;
599 599
600 600 for (timeout = 0;
601 601 timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
602 602 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
603 603 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
604 604
605 605 if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
606 606 (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
607 607 break;
608 608
609 609 msec_delay(100);
610 610 }
611 611
612 612 if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
613 613 DEBUGOUT("Link was indicated but link is down\n");
614 614 return IXGBE_ERR_LINK_SETUP;
615 615 }
616 616
617 617 return IXGBE_SUCCESS;
618 618 }
619 619
620 620 /**
621 621 * ixgbe_check_mac_link_82598 - Get link/speed status
622 622 * @hw: pointer to hardware structure
623 623 * @speed: pointer to link speed
624 624 * @link_up: TRUE is link is up, FALSE otherwise
625 625 * @link_up_wait_to_complete: bool used to wait for link up or not
626 626 *
627 627 * Reads the links register to determine if link is up and the current speed
628 628 **/
629 629 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
630 630 ixgbe_link_speed *speed, bool *link_up,
631 631 bool link_up_wait_to_complete)
632 632 {
633 633 u32 links_reg;
634 634 u32 i;
635 635 u16 link_reg, adapt_comp_reg;
636 636
637 637 DEBUGFUNC("ixgbe_check_mac_link_82598");
638 638
639 639 /*
640 640 * SERDES PHY requires us to read link status from undocumented
641 641 * register 0xC79F. Bit 0 set indicates link is up/ready; clear
642 642 * indicates link down. OxC00C is read to check that the XAUI lanes
643 643 * are active. Bit 0 clear indicates active; set indicates inactive.
644 644 */
645 645 if (hw->phy.type == ixgbe_phy_nl) {
646 646 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
647 647 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
648 648 hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
649 649 &adapt_comp_reg);
650 650 if (link_up_wait_to_complete) {
651 651 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
652 652 if ((link_reg & 1) &&
653 653 ((adapt_comp_reg & 1) == 0)) {
654 654 *link_up = TRUE;
655 655 break;
656 656 } else {
657 657 *link_up = FALSE;
658 658 }
659 659 msec_delay(100);
660 660 hw->phy.ops.read_reg(hw, 0xC79F,
661 661 IXGBE_TWINAX_DEV,
662 662 &link_reg);
663 663 hw->phy.ops.read_reg(hw, 0xC00C,
664 664 IXGBE_TWINAX_DEV,
665 665 &adapt_comp_reg);
666 666 }
667 667 } else {
668 668 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
669 669 *link_up = TRUE;
670 670 else
671 671 *link_up = FALSE;
672 672 }
673 673
674 674 if (*link_up == FALSE)
675 675 goto out;
676 676 }
677 677
678 678 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
679 679 if (link_up_wait_to_complete) {
680 680 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
681 681 if (links_reg & IXGBE_LINKS_UP) {
682 682 *link_up = TRUE;
683 683 break;
684 684 } else {
685 685 *link_up = FALSE;
686 686 }
687 687 msec_delay(100);
688 688 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
689 689 }
690 690 } else {
691 691 if (links_reg & IXGBE_LINKS_UP)
692 692 *link_up = TRUE;
693 693 else
694 694 *link_up = FALSE;
695 695 }
696 696
697 697 if (links_reg & IXGBE_LINKS_SPEED)
698 698 *speed = IXGBE_LINK_SPEED_10GB_FULL;
699 699 else
700 700 *speed = IXGBE_LINK_SPEED_1GB_FULL;
701 701
702 702 if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) &&
703 703 (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
704 704 *link_up = FALSE;
705 705
706 706 out:
707 707 return IXGBE_SUCCESS;
708 708 }
709 709
710 710 /**
711 711 * ixgbe_setup_mac_link_82598 - Set MAC link speed
712 712 * @hw: pointer to hardware structure
|
↓ open down ↓ |
712 lines elided |
↑ open up ↑ |
713 713 * @speed: new link speed
714 714 * @autoneg: TRUE if autonegotiation enabled
715 715 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
716 716 *
717 717 * Set the link speed in the AUTOC register and restarts link.
718 718 **/
719 719 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
720 720 ixgbe_link_speed speed, bool autoneg,
721 721 bool autoneg_wait_to_complete)
722 722 {
723 - s32 status = IXGBE_SUCCESS;
723 + s32 status;
724 724 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
725 725 u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
726 726 u32 autoc = curr_autoc;
727 727 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
728 728
729 729 DEBUGFUNC("ixgbe_setup_mac_link_82598");
730 730
731 731 /* Check to see if speed passed in is supported. */
732 - ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
732 + status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
733 + if (status != IXGBE_SUCCESS)
734 + return (status);
733 735 speed &= link_capabilities;
734 736
735 737 if (speed == IXGBE_LINK_SPEED_UNKNOWN)
736 738 status = IXGBE_ERR_LINK_SETUP;
737 739
738 740 /* Set KX4/KX support according to speed requested */
739 741 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
740 742 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
741 743 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
742 744 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
743 745 autoc |= IXGBE_AUTOC_KX4_SUPP;
744 746 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
745 747 autoc |= IXGBE_AUTOC_KX_SUPP;
746 748 if (autoc != curr_autoc)
747 749 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
748 750 }
749 751
750 752 if (status == IXGBE_SUCCESS) {
751 753 /*
752 754 * Setup and restart the link based on the new values in
753 755 * ixgbe_hw This will write the AUTOC register based on the new
754 756 * stored values
755 757 */
756 758 status = ixgbe_start_mac_link_82598(hw,
757 759 autoneg_wait_to_complete);
758 760 }
759 761
760 762 return status;
761 763 }
762 764
763 765
764 766 /**
765 767 * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
766 768 * @hw: pointer to hardware structure
767 769 * @speed: new link speed
768 770 * @autoneg: TRUE if autonegotiation enabled
769 771 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete
770 772 *
771 773 * Sets the link speed in the AUTOC register in the MAC and restarts link.
772 774 **/
773 775 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
774 776 ixgbe_link_speed speed,
|
↓ open down ↓ |
32 lines elided |
↑ open up ↑ |
775 777 bool autoneg,
776 778 bool autoneg_wait_to_complete)
777 779 {
778 780 s32 status;
779 781
780 782 DEBUGFUNC("ixgbe_setup_copper_link_82598");
781 783
782 784 /* Setup the PHY according to input speed */
783 785 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
784 786 autoneg_wait_to_complete);
785 - /* Set up MAC */
786 - ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
787 + if (status == IXGBE_SUCCESS) {
788 + /* Set up MAC */
789 + status =
790 + ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
791 + }
787 792
788 793 return status;
789 794 }
790 795
791 796 /**
792 797 * ixgbe_reset_hw_82598 - Performs hardware reset
793 798 * @hw: pointer to hardware structure
794 799 *
795 800 * Resets the hardware by resetting the transmit and receive units, masks and
796 801 * clears all interrupts, performing a PHY reset, and performing a link (MAC)
797 802 * reset.
798 803 **/
799 804 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
800 805 {
801 806 s32 status = IXGBE_SUCCESS;
802 807 s32 phy_status = IXGBE_SUCCESS;
803 808 u32 ctrl;
804 809 u32 gheccr;
805 810 u32 i;
806 811 u32 autoc;
807 812 u8 analog_val;
808 813
809 814 DEBUGFUNC("ixgbe_reset_hw_82598");
810 815
811 816 /* Call adapter stop to disable tx/rx and clear interrupts */
812 817 status = hw->mac.ops.stop_adapter(hw);
813 818 if (status != IXGBE_SUCCESS)
814 819 goto reset_hw_out;
815 820
816 821 /*
817 822 * Power up the Atlas Tx lanes if they are currently powered down.
818 823 * Atlas Tx lanes are powered down for MAC loopback tests, but
819 824 * they are not automatically restored on reset.
820 825 */
821 826 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
822 827 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
823 828 /* Enable Tx Atlas so packets can be transmitted again */
824 829 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
825 830 &analog_val);
826 831 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
827 832 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
828 833 analog_val);
829 834
830 835 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
831 836 &analog_val);
832 837 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
833 838 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
834 839 analog_val);
835 840
836 841 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
837 842 &analog_val);
838 843 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
839 844 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
840 845 analog_val);
841 846
842 847 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
843 848 &analog_val);
844 849 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
845 850 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
846 851 analog_val);
847 852 }
848 853
849 854 /* Reset PHY */
850 855 if (hw->phy.reset_disable == FALSE) {
851 856 /* PHY ops must be identified and initialized prior to reset */
852 857
853 858 /* Init PHY and function pointers, perform SFP setup */
854 859 phy_status = hw->phy.ops.init(hw);
855 860 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
856 861 goto reset_hw_out;
857 862 if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
858 863 goto mac_reset_top;
859 864
860 865 hw->phy.ops.reset(hw);
861 866 }
862 867
863 868 mac_reset_top:
864 869 /*
865 870 * Issue global reset to the MAC. This needs to be a SW reset.
866 871 * If link reset is used, it might reset the MAC when mng is using it
867 872 */
868 873 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
869 874 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
870 875 IXGBE_WRITE_FLUSH(hw);
871 876
872 877 /* Poll for reset bit to self-clear indicating reset is complete */
873 878 for (i = 0; i < 10; i++) {
874 879 usec_delay(1);
875 880 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
876 881 if (!(ctrl & IXGBE_CTRL_RST))
877 882 break;
878 883 }
879 884 if (ctrl & IXGBE_CTRL_RST) {
880 885 status = IXGBE_ERR_RESET_FAILED;
881 886 DEBUGOUT("Reset polling failed to complete.\n");
882 887 }
883 888
884 889 msec_delay(50);
885 890
886 891 /*
887 892 * Double resets are required for recovery from certain error
888 893 * conditions. Between resets, it is necessary to stall to allow time
889 894 * for any pending HW events to complete.
890 895 */
891 896 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
892 897 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
893 898 goto mac_reset_top;
894 899 }
895 900
896 901 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
897 902 gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
898 903 IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
899 904
900 905 /*
901 906 * Store the original AUTOC value if it has not been
902 907 * stored off yet. Otherwise restore the stored original
903 908 * AUTOC value since the reset operation sets back to deaults.
904 909 */
905 910 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
906 911 if (hw->mac.orig_link_settings_stored == FALSE) {
907 912 hw->mac.orig_autoc = autoc;
908 913 hw->mac.orig_link_settings_stored = TRUE;
909 914 } else if (autoc != hw->mac.orig_autoc) {
910 915 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
911 916 }
912 917
913 918 /* Store the permanent mac address */
914 919 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
915 920
916 921 /*
917 922 * Store MAC address from RAR0, clear receive address registers, and
918 923 * clear the multicast table
919 924 */
920 925 hw->mac.ops.init_rx_addrs(hw);
921 926
922 927 reset_hw_out:
923 928 if (phy_status != IXGBE_SUCCESS)
924 929 status = phy_status;
925 930
926 931 return status;
927 932 }
928 933
929 934 /**
930 935 * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
931 936 * @hw: pointer to hardware struct
932 937 * @rar: receive address register index to associate with a VMDq index
933 938 * @vmdq: VMDq set index
934 939 **/
935 940 s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
936 941 {
937 942 u32 rar_high;
938 943 u32 rar_entries = hw->mac.num_rar_entries;
939 944
940 945 DEBUGFUNC("ixgbe_set_vmdq_82598");
941 946
942 947 /* Make sure we are using a valid rar index range */
943 948 if (rar >= rar_entries) {
944 949 DEBUGOUT1("RAR index %d is out of range.\n", rar);
945 950 return IXGBE_ERR_INVALID_ARGUMENT;
946 951 }
947 952
948 953 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
949 954 rar_high &= ~IXGBE_RAH_VIND_MASK;
950 955 rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
951 956 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
952 957 return IXGBE_SUCCESS;
953 958 }
954 959
955 960 /**
956 961 * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
957 962 * @hw: pointer to hardware struct
958 963 * @rar: receive address register index to associate with a VMDq index
959 964 * @vmdq: VMDq clear index (not used in 82598, but elsewhere)
960 965 **/
961 966 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
962 967 {
963 968 u32 rar_high;
964 969 u32 rar_entries = hw->mac.num_rar_entries;
965 970
966 971 UNREFERENCED_1PARAMETER(vmdq);
967 972
968 973 /* Make sure we are using a valid rar index range */
969 974 if (rar >= rar_entries) {
970 975 DEBUGOUT1("RAR index %d is out of range.\n", rar);
971 976 return IXGBE_ERR_INVALID_ARGUMENT;
972 977 }
973 978
974 979 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
975 980 if (rar_high & IXGBE_RAH_VIND_MASK) {
976 981 rar_high &= ~IXGBE_RAH_VIND_MASK;
977 982 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
978 983 }
979 984
980 985 return IXGBE_SUCCESS;
981 986 }
982 987
983 988 /**
984 989 * ixgbe_set_vfta_82598 - Set VLAN filter table
985 990 * @hw: pointer to hardware structure
986 991 * @vlan: VLAN id to write to VLAN filter
987 992 * @vind: VMDq output index that maps queue to VLAN id in VFTA
988 993 * @vlan_on: boolean flag to turn on/off VLAN in VFTA
989 994 *
990 995 * Turn on/off specified VLAN in the VLAN filter table.
991 996 **/
992 997 s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
993 998 bool vlan_on)
994 999 {
995 1000 u32 regindex;
996 1001 u32 bitindex;
997 1002 u32 bits;
998 1003 u32 vftabyte;
999 1004
1000 1005 DEBUGFUNC("ixgbe_set_vfta_82598");
1001 1006
1002 1007 if (vlan > 4095)
1003 1008 return IXGBE_ERR_PARAM;
1004 1009
1005 1010 /* Determine 32-bit word position in array */
1006 1011 regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
1007 1012
1008 1013 /* Determine the location of the (VMD) queue index */
1009 1014 vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
1010 1015 bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
1011 1016
1012 1017 /* Set the nibble for VMD queue index */
1013 1018 bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
1014 1019 bits &= (~(0x0F << bitindex));
1015 1020 bits |= (vind << bitindex);
1016 1021 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
1017 1022
1018 1023 /* Determine the location of the bit for this VLAN id */
1019 1024 bitindex = vlan & 0x1F; /* lower five bits */
1020 1025
1021 1026 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
1022 1027 if (vlan_on)
1023 1028 /* Turn on this VLAN id */
1024 1029 bits |= (1 << bitindex);
1025 1030 else
1026 1031 /* Turn off this VLAN id */
1027 1032 bits &= ~(1 << bitindex);
1028 1033 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
1029 1034
1030 1035 return IXGBE_SUCCESS;
1031 1036 }
1032 1037
1033 1038 /**
1034 1039 * ixgbe_clear_vfta_82598 - Clear VLAN filter table
1035 1040 * @hw: pointer to hardware structure
1036 1041 *
1037 1042 * Clears the VLAN filer table, and the VMDq index associated with the filter
1038 1043 **/
1039 1044 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
1040 1045 {
1041 1046 u32 offset;
1042 1047 u32 vlanbyte;
1043 1048
1044 1049 DEBUGFUNC("ixgbe_clear_vfta_82598");
1045 1050
1046 1051 for (offset = 0; offset < hw->mac.vft_size; offset++)
1047 1052 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
1048 1053
1049 1054 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
1050 1055 for (offset = 0; offset < hw->mac.vft_size; offset++)
1051 1056 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
1052 1057 0);
1053 1058
1054 1059 return IXGBE_SUCCESS;
1055 1060 }
1056 1061
1057 1062 /**
1058 1063 * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
1059 1064 * @hw: pointer to hardware structure
1060 1065 * @reg: analog register to read
1061 1066 * @val: read value
1062 1067 *
1063 1068 * Performs read operation to Atlas analog register specified.
1064 1069 **/
1065 1070 s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
1066 1071 {
1067 1072 u32 atlas_ctl;
1068 1073
1069 1074 DEBUGFUNC("ixgbe_read_analog_reg8_82598");
1070 1075
1071 1076 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
1072 1077 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
1073 1078 IXGBE_WRITE_FLUSH(hw);
1074 1079 usec_delay(10);
1075 1080 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
1076 1081 *val = (u8)atlas_ctl;
1077 1082
1078 1083 return IXGBE_SUCCESS;
1079 1084 }
1080 1085
1081 1086 /**
1082 1087 * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
1083 1088 * @hw: pointer to hardware structure
1084 1089 * @reg: atlas register to write
1085 1090 * @val: value to write
1086 1091 *
1087 1092 * Performs write operation to Atlas analog register specified.
1088 1093 **/
1089 1094 s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
1090 1095 {
1091 1096 u32 atlas_ctl;
1092 1097
1093 1098 DEBUGFUNC("ixgbe_write_analog_reg8_82598");
1094 1099
1095 1100 atlas_ctl = (reg << 8) | val;
1096 1101 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
1097 1102 IXGBE_WRITE_FLUSH(hw);
1098 1103 usec_delay(10);
1099 1104
1100 1105 return IXGBE_SUCCESS;
1101 1106 }
1102 1107
1103 1108 /**
1104 1109 * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1105 1110 * @hw: pointer to hardware structure
1106 1111 * @byte_offset: EEPROM byte offset to read
1107 1112 * @eeprom_data: value read
1108 1113 *
1109 1114 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1110 1115 **/
1111 1116 s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1112 1117 u8 *eeprom_data)
1113 1118 {
1114 1119 s32 status = IXGBE_SUCCESS;
1115 1120 u16 sfp_addr = 0;
1116 1121 u16 sfp_data = 0;
1117 1122 u16 sfp_stat = 0;
1118 1123 u32 i;
1119 1124
1120 1125 DEBUGFUNC("ixgbe_read_i2c_eeprom_82598");
1121 1126
1122 1127 if (hw->phy.type == ixgbe_phy_nl) {
1123 1128 /*
1124 1129 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
1125 1130 * 0xC30D. These registers are used to talk to the SFP+
1126 1131 * module's EEPROM through the SDA/SCL (I2C) interface.
1127 1132 */
1128 1133 sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
1129 1134 sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1130 1135 hw->phy.ops.write_reg(hw,
1131 1136 IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1132 1137 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1133 1138 sfp_addr);
1134 1139
1135 1140 /* Poll status */
1136 1141 for (i = 0; i < 100; i++) {
1137 1142 hw->phy.ops.read_reg(hw,
1138 1143 IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1139 1144 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1140 1145 &sfp_stat);
1141 1146 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1142 1147 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1143 1148 break;
1144 1149 msec_delay(10);
1145 1150 }
1146 1151
1147 1152 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
1148 1153 DEBUGOUT("EEPROM read did not pass.\n");
1149 1154 status = IXGBE_ERR_SFP_NOT_PRESENT;
1150 1155 goto out;
1151 1156 }
1152 1157
1153 1158 /* Read data */
1154 1159 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1155 1160 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
1156 1161
1157 1162 *eeprom_data = (u8)(sfp_data >> 8);
1158 1163 } else {
1159 1164 status = IXGBE_ERR_PHY;
1160 1165 goto out;
1161 1166 }
1162 1167
1163 1168 out:
1164 1169 return status;
1165 1170 }
1166 1171
1167 1172 /**
1168 1173 * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
1169 1174 * @hw: pointer to hardware structure
1170 1175 *
1171 1176 * Determines physical layer capabilities of the current configuration.
1172 1177 **/
1173 1178 u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1174 1179 {
1175 1180 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1176 1181 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1177 1182 u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1178 1183 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1179 1184 u16 ext_ability = 0;
1180 1185
1181 1186 DEBUGFUNC("ixgbe_get_supported_physical_layer_82598");
1182 1187
1183 1188 hw->phy.ops.identify(hw);
1184 1189
1185 1190 /* Copper PHY must be checked before AUTOC LMS to determine correct
1186 1191 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
1187 1192 switch (hw->phy.type) {
1188 1193 case ixgbe_phy_tn:
1189 1194 case ixgbe_phy_cu_unknown:
1190 1195 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1191 1196 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1192 1197 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1193 1198 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1194 1199 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1195 1200 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1196 1201 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1197 1202 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1198 1203 goto out;
1199 1204 default:
1200 1205 break;
1201 1206 }
1202 1207
1203 1208 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1204 1209 case IXGBE_AUTOC_LMS_1G_AN:
1205 1210 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1206 1211 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
1207 1212 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1208 1213 else
1209 1214 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1210 1215 break;
1211 1216 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1212 1217 if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
1213 1218 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1214 1219 else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
1215 1220 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1216 1221 else /* XAUI */
1217 1222 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1218 1223 break;
1219 1224 case IXGBE_AUTOC_LMS_KX4_AN:
1220 1225 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
1221 1226 if (autoc & IXGBE_AUTOC_KX_SUPP)
1222 1227 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1223 1228 if (autoc & IXGBE_AUTOC_KX4_SUPP)
1224 1229 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1225 1230 break;
1226 1231 default:
1227 1232 break;
1228 1233 }
1229 1234
1230 1235 if (hw->phy.type == ixgbe_phy_nl) {
1231 1236 hw->phy.ops.identify_sfp(hw);
1232 1237
1233 1238 switch (hw->phy.sfp_type) {
1234 1239 case ixgbe_sfp_type_da_cu:
1235 1240 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1236 1241 break;
1237 1242 case ixgbe_sfp_type_sr:
1238 1243 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1239 1244 break;
1240 1245 case ixgbe_sfp_type_lr:
1241 1246 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1242 1247 break;
1243 1248 default:
1244 1249 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1245 1250 break;
1246 1251 }
1247 1252 }
1248 1253
1249 1254 switch (hw->device_id) {
1250 1255 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
1251 1256 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1252 1257 break;
1253 1258 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
1254 1259 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
1255 1260 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
1256 1261 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1257 1262 break;
1258 1263 case IXGBE_DEV_ID_82598EB_XF_LR:
1259 1264 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1260 1265 break;
1261 1266 default:
1262 1267 break;
1263 1268 }
1264 1269
1265 1270 out:
1266 1271 return physical_layer;
1267 1272 }
1268 1273
1269 1274 /**
1270 1275 * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1271 1276 * port devices.
1272 1277 * @hw: pointer to the HW structure
1273 1278 *
1274 1279 * Calls common function and corrects issue with some single port devices
1275 1280 * that enable LAN1 but not LAN0.
1276 1281 **/
1277 1282 void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1278 1283 {
1279 1284 struct ixgbe_bus_info *bus = &hw->bus;
1280 1285 u16 pci_gen = 0;
1281 1286 u16 pci_ctrl2 = 0;
1282 1287
1283 1288 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598");
1284 1289
1285 1290 ixgbe_set_lan_id_multi_port_pcie(hw);
1286 1291
1287 1292 /* check if LAN0 is disabled */
1288 1293 hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1289 1294 if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1290 1295
1291 1296 hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1292 1297
1293 1298 /* if LAN0 is completely disabled force function to 0 */
1294 1299 if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1295 1300 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1296 1301 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1297 1302
1298 1303 bus->func = 0;
1299 1304 }
1300 1305 }
1301 1306 }
1302 1307
1303 1308 /**
1304 1309 * ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
1305 1310 * @hw: pointer to hardware structure
1306 1311 *
1307 1312 **/
1308 1313 void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
1309 1314 {
1310 1315 u32 regval;
1311 1316 u32 i;
1312 1317
1313 1318 DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598");
1314 1319
1315 1320 /* Enable relaxed ordering */
1316 1321 for (i = 0; ((i < hw->mac.max_tx_queues) &&
1317 1322 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1318 1323 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
1319 1324 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1320 1325 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
1321 1326 }
1322 1327
1323 1328 for (i = 0; ((i < hw->mac.max_rx_queues) &&
1324 1329 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1325 1330 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
1326 1331 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
1327 1332 IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
1328 1333 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
1329 1334 }
1330 1335
1331 1336 }
1332 1337
1333 1338 /**
1334 1339 * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
1335 1340 * @hw: pointer to hardware structure
1336 1341 * @num_pb: number of packet buffers to allocate
1337 1342 * @headroom: reserve n KB of headroom
1338 1343 * @strategy: packet buffer allocation strategy
1339 1344 **/
1340 1345 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
1341 1346 u32 headroom, int strategy)
1342 1347 {
1343 1348 u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
1344 1349 u8 i = 0;
1345 1350 UNREFERENCED_1PARAMETER(headroom);
1346 1351
1347 1352 if (!num_pb)
1348 1353 return;
1349 1354
|
↓ open down ↓ |
553 lines elided |
↑ open up ↑ |
1350 1355 /* Setup Rx packet buffer sizes */
1351 1356 switch (strategy) {
1352 1357 case PBA_STRATEGY_WEIGHTED:
1353 1358 /* Setup the first four at 80KB */
1354 1359 rxpktsize = IXGBE_RXPBSIZE_80KB;
1355 1360 for (; i < 4; i++)
1356 1361 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1357 1362 /* Setup the last four at 48KB...don't re-init i */
1358 1363 rxpktsize = IXGBE_RXPBSIZE_48KB;
1359 1364 /* Fall Through */
1365 + /* FALLTHRU */
1360 1366 case PBA_STRATEGY_EQUAL:
1361 1367 default:
1362 1368 /* Divide the remaining Rx packet buffer evenly among the TCs */
1363 1369 for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1364 1370 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1365 1371 break;
1366 1372 }
1367 1373
1368 1374 /* Setup Tx packet buffer sizes */
1369 1375 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1370 1376 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
1371 -
1372 - return;
1373 1377 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX