Print this page
Import some changes from FreeBSD (details later, this is quick-n-dirty for now).
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/ixgbe/ixgbe_82598.c
+++ new/usr/src/uts/common/io/ixgbe/ixgbe_82598.c
1 1 /******************************************************************************
2 2
3 - Copyright (c) 2001-2012, Intel Corporation
3 + Copyright (c) 2001-2013, Intel Corporation
4 4 All rights reserved.
5 5
6 6 Redistribution and use in source and binary forms, with or without
7 7 modification, are permitted provided that the following conditions are met:
8 8
9 9 1. Redistributions of source code must retain the above copyright notice,
10 10 this list of conditions and the following disclaimer.
11 11
12 12 2. Redistributions in binary form must reproduce the above copyright
13 13 notice, this list of conditions and the following disclaimer in the
14 14 documentation and/or other materials provided with the distribution.
15 15
16 16 3. Neither the name of the Intel Corporation nor the names of its
17 17 contributors may be used to endorse or promote products derived from
18 18 this software without specific prior written permission.
19 19
20 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 30 POSSIBILITY OF SUCH DAMAGE.
31 31
32 32 ******************************************************************************/
33 33 /*$FreeBSD: src/sys/dev/ixgbe/ixgbe_82598.c,v 1.13 2012/07/05 20:51:44 jfv Exp $*/
34 34
35 35 #include "ixgbe_type.h"
36 36 #include "ixgbe_82598.h"
37 37 #include "ixgbe_api.h"
38 38 #include "ixgbe_common.h"
39 39 #include "ixgbe_phy.h"
40 40
41 41 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
|
↓ open down ↓ |
28 lines elided |
↑ open up ↑ |
42 42 ixgbe_link_speed *speed,
43 43 bool *autoneg);
44 44 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
45 45 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
46 46 bool autoneg_wait_to_complete);
47 47 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
48 48 ixgbe_link_speed *speed, bool *link_up,
49 49 bool link_up_wait_to_complete);
50 50 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
51 51 ixgbe_link_speed speed,
52 - bool autoneg,
53 52 bool autoneg_wait_to_complete);
54 53 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
55 54 ixgbe_link_speed speed,
56 - bool autoneg,
57 55 bool autoneg_wait_to_complete);
58 56 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
59 57 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
60 58 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
61 59 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
62 60 u32 headroom, int strategy);
63 -
61 +static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
62 + u8 *sff8472_data);
64 63 /**
65 64 * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
66 65 * @hw: pointer to the HW structure
67 66 *
68 67 * The defaults for 82598 should be in the range of 50us to 50ms,
69 68 * however the hardware default for these parts is 500us to 1ms which is less
70 69 * than the 10ms recommended by the pci-e spec. To address this we need to
71 70 * increase the value to either 10ms to 250ms for capability version 1 config,
72 71 * or 16ms to 55ms for version 2.
73 72 **/
74 73 void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
75 74 {
76 75 u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
77 76 u16 pcie_devctl2;
78 77
79 78 /* only take action if timeout value is defaulted to 0 */
80 79 if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
81 80 goto out;
82 81
83 82 /*
84 83 * if capababilities version is type 1 we can write the
85 84 * timeout of 10ms to 250ms through the GCR register
86 85 */
87 86 if (!(gcr & IXGBE_GCR_CAP_VER2)) {
88 87 gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
89 88 goto out;
90 89 }
91 90
92 91 /*
93 92 * for version 2 capabilities we need to write the config space
94 93 * directly in order to set the completion timeout value for
95 94 * 16ms to 55ms
96 95 */
97 96 pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
98 97 pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
99 98 IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
100 99 out:
101 100 /* disable completion timeout resend */
102 101 gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
103 102 IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
104 103 }
105 104
106 105 /**
107 106 * ixgbe_init_ops_82598 - Inits func ptrs and MAC type
108 107 * @hw: pointer to hardware structure
109 108 *
110 109 * Initialize the function pointers and assign the MAC type for 82598.
111 110 * Does not touch the hardware.
112 111 **/
113 112 s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
114 113 {
115 114 struct ixgbe_mac_info *mac = &hw->mac;
116 115 struct ixgbe_phy_info *phy = &hw->phy;
117 116 s32 ret_val;
118 117
119 118 DEBUGFUNC("ixgbe_init_ops_82598");
120 119
121 120 ret_val = ixgbe_init_phy_ops_generic(hw);
122 121 ret_val = ixgbe_init_ops_generic(hw);
123 122
124 123 /* PHY */
125 124 phy->ops.init = &ixgbe_init_phy_ops_82598;
126 125
127 126 /* MAC */
128 127 mac->ops.start_hw = &ixgbe_start_hw_82598;
129 128 mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598;
130 129 mac->ops.reset_hw = &ixgbe_reset_hw_82598;
131 130 mac->ops.get_media_type = &ixgbe_get_media_type_82598;
132 131 mac->ops.get_supported_physical_layer =
133 132 &ixgbe_get_supported_physical_layer_82598;
134 133 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
135 134 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
136 135 mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
137 136
138 137 /* RAR, Multicast, VLAN */
139 138 mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
140 139 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
141 140 mac->ops.set_vfta = &ixgbe_set_vfta_82598;
142 141 mac->ops.set_vlvf = NULL;
143 142 mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
144 143
145 144 /* Flow Control */
146 145 mac->ops.fc_enable = &ixgbe_fc_enable_82598;
147 146
|
↓ open down ↓ |
74 lines elided |
↑ open up ↑ |
148 147 mac->mcft_size = 128;
149 148 mac->vft_size = 128;
150 149 mac->num_rar_entries = 16;
151 150 mac->rx_pb_size = 512;
152 151 mac->max_tx_queues = 32;
153 152 mac->max_rx_queues = 64;
154 153 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
155 154
156 155 /* SFP+ Module */
157 156 phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
157 + phy->ops.read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598;
158 158
159 159 /* Link */
160 160 mac->ops.check_link = &ixgbe_check_mac_link_82598;
161 161 mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
162 162 mac->ops.flap_tx_laser = NULL;
163 163 mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82598;
164 164 mac->ops.setup_rxpba = &ixgbe_set_rxpba_82598;
165 165
166 166 /* Manageability interface */
167 167 mac->ops.set_fw_drv_ver = NULL;
168 168
169 169 return ret_val;
170 170 }
171 171
172 172 /**
173 173 * ixgbe_init_phy_ops_82598 - PHY/SFP specific init
174 174 * @hw: pointer to hardware structure
175 175 *
176 176 * Initialize any function pointers that were not able to be
177 177 * set during init_shared_code because the PHY/SFP type was
178 178 * not known. Perform the SFP init if necessary.
179 179 *
180 180 **/
181 181 s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
182 182 {
183 183 struct ixgbe_mac_info *mac = &hw->mac;
184 184 struct ixgbe_phy_info *phy = &hw->phy;
185 185 s32 ret_val = IXGBE_SUCCESS;
186 186 u16 list_offset, data_offset;
187 187
188 188 DEBUGFUNC("ixgbe_init_phy_ops_82598");
189 189
190 190 /* Identify the PHY */
191 191 phy->ops.identify(hw);
192 192
193 193 /* Overwrite the link function pointers if copper PHY */
194 194 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
195 195 mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
196 196 mac->ops.get_link_capabilities =
197 197 &ixgbe_get_copper_link_capabilities_generic;
198 198 }
199 199
200 200 switch (hw->phy.type) {
201 201 case ixgbe_phy_tn:
202 202 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
203 203 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
204 204 phy->ops.get_firmware_version =
205 205 &ixgbe_get_phy_firmware_version_tnx;
206 206 break;
207 207 case ixgbe_phy_nl:
208 208 phy->ops.reset = &ixgbe_reset_phy_nl;
209 209
210 210 /* Call SFP+ identify routine to get the SFP+ module type */
211 211 ret_val = phy->ops.identify_sfp(hw);
212 212 if (ret_val != IXGBE_SUCCESS)
213 213 goto out;
214 214 else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
215 215 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
216 216 goto out;
217 217 }
218 218
219 219 /* Check to see if SFP+ module is supported */
220 220 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
221 221 &list_offset,
222 222 &data_offset);
223 223 if (ret_val != IXGBE_SUCCESS) {
224 224 ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
225 225 goto out;
226 226 }
227 227 break;
228 228 default:
229 229 break;
230 230 }
231 231
232 232 out:
233 233 return ret_val;
234 234 }
235 235
236 236 /**
237 237 * ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
238 238 * @hw: pointer to hardware structure
239 239 *
240 240 * Starts the hardware using the generic start_hw function.
241 241 * Disables relaxed ordering Then set pcie completion timeout
242 242 *
243 243 **/
244 244 s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
245 245 {
246 246 u32 regval;
247 247 u32 i;
248 248 s32 ret_val = IXGBE_SUCCESS;
249 249
250 250 DEBUGFUNC("ixgbe_start_hw_82598");
251 251
252 252 ret_val = ixgbe_start_hw_generic(hw);
253 253
254 254 /* Disable relaxed ordering */
255 255 for (i = 0; ((i < hw->mac.max_tx_queues) &&
256 256 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
257 257 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
258 258 regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
259 259 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
260 260 }
261 261
262 262 for (i = 0; ((i < hw->mac.max_rx_queues) &&
263 263 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
264 264 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
265 265 regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
266 266 IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
267 267 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
268 268 }
269 269
270 270 /* set the completion timeout for interface */
271 271 if (ret_val == IXGBE_SUCCESS)
272 272 ixgbe_set_pcie_completion_timeout(hw);
273 273
274 274 return ret_val;
275 275 }
276 276
277 277 /**
278 278 * ixgbe_get_link_capabilities_82598 - Determines link capabilities
279 279 * @hw: pointer to hardware structure
280 280 * @speed: pointer to link speed
281 281 * @autoneg: boolean auto-negotiation value
282 282 *
283 283 * Determines the link capabilities by reading the AUTOC register.
284 284 **/
285 285 static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
286 286 ixgbe_link_speed *speed,
287 287 bool *autoneg)
288 288 {
289 289 s32 status = IXGBE_SUCCESS;
290 290 u32 autoc = 0;
291 291
292 292 DEBUGFUNC("ixgbe_get_link_capabilities_82598");
293 293
294 294 /*
295 295 * Determine link capabilities based on the stored value of AUTOC,
296 296 * which represents EEPROM defaults. If AUTOC value has not been
297 297 * stored, use the current register value.
298 298 */
299 299 if (hw->mac.orig_link_settings_stored)
300 300 autoc = hw->mac.orig_autoc;
301 301 else
302 302 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
303 303
304 304 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
305 305 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
306 306 *speed = IXGBE_LINK_SPEED_1GB_FULL;
307 307 *autoneg = FALSE;
308 308 break;
309 309
310 310 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
311 311 *speed = IXGBE_LINK_SPEED_10GB_FULL;
312 312 *autoneg = FALSE;
313 313 break;
314 314
315 315 case IXGBE_AUTOC_LMS_1G_AN:
316 316 *speed = IXGBE_LINK_SPEED_1GB_FULL;
317 317 *autoneg = TRUE;
318 318 break;
319 319
320 320 case IXGBE_AUTOC_LMS_KX4_AN:
321 321 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
322 322 *speed = IXGBE_LINK_SPEED_UNKNOWN;
323 323 if (autoc & IXGBE_AUTOC_KX4_SUPP)
324 324 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
325 325 if (autoc & IXGBE_AUTOC_KX_SUPP)
326 326 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
327 327 *autoneg = TRUE;
328 328 break;
329 329
330 330 default:
331 331 status = IXGBE_ERR_LINK_SETUP;
332 332 break;
333 333 }
334 334
335 335 return status;
336 336 }
337 337
338 338 /**
339 339 * ixgbe_get_media_type_82598 - Determines media type
340 340 * @hw: pointer to hardware structure
341 341 *
342 342 * Returns the media type (fiber, copper, backplane)
343 343 **/
344 344 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
345 345 {
346 346 enum ixgbe_media_type media_type;
347 347
348 348 DEBUGFUNC("ixgbe_get_media_type_82598");
349 349
350 350 /* Detect if there is a copper PHY attached. */
351 351 switch (hw->phy.type) {
352 352 case ixgbe_phy_cu_unknown:
353 353 case ixgbe_phy_tn:
354 354 media_type = ixgbe_media_type_copper;
355 355 goto out;
356 356 default:
357 357 break;
358 358 }
359 359
360 360 /* Media type for I82598 is based on device ID */
361 361 switch (hw->device_id) {
362 362 case IXGBE_DEV_ID_82598:
363 363 case IXGBE_DEV_ID_82598_BX:
364 364 /* Default device ID is mezzanine card KX/KX4 */
365 365 media_type = ixgbe_media_type_backplane;
366 366 break;
367 367 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
368 368 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
369 369 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
370 370 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
371 371 case IXGBE_DEV_ID_82598EB_XF_LR:
372 372 case IXGBE_DEV_ID_82598EB_SFP_LOM:
373 373 media_type = ixgbe_media_type_fiber;
374 374 break;
375 375 case IXGBE_DEV_ID_82598EB_CX4:
376 376 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
377 377 media_type = ixgbe_media_type_cx4;
378 378 break;
379 379 case IXGBE_DEV_ID_82598AT:
380 380 case IXGBE_DEV_ID_82598AT2:
381 381 media_type = ixgbe_media_type_copper;
382 382 break;
383 383 default:
384 384 media_type = ixgbe_media_type_unknown;
385 385 break;
386 386 }
387 387 out:
388 388 return media_type;
389 389 }
390 390
391 391 /**
392 392 * ixgbe_fc_enable_82598 - Enable flow control
393 393 * @hw: pointer to hardware structure
394 394 *
395 395 * Enable flow control according to the current settings.
396 396 **/
397 397 s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
398 398 {
399 399 s32 ret_val = IXGBE_SUCCESS;
400 400 u32 fctrl_reg;
401 401 u32 rmcs_reg;
402 402 u32 reg;
403 403 u32 fcrtl, fcrth;
404 404 u32 link_speed = 0;
405 405 int i;
406 406 bool link_up;
407 407
408 408 DEBUGFUNC("ixgbe_fc_enable_82598");
409 409
410 410 /* Validate the water mark configuration */
411 411 if (!hw->fc.pause_time) {
412 412 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
413 413 goto out;
414 414 }
415 415
416 416 /* Low water mark of zero causes XOFF floods */
417 417 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
418 418 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
419 419 hw->fc.high_water[i]) {
420 420 if (!hw->fc.low_water[i] ||
421 421 hw->fc.low_water[i] >= hw->fc.high_water[i]) {
422 422 DEBUGOUT("Invalid water mark configuration\n");
423 423 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
424 424 goto out;
425 425 }
426 426 }
427 427 }
428 428
429 429 /*
430 430 * On 82598 having Rx FC on causes resets while doing 1G
431 431 * so if it's on turn it off once we know link_speed. For
432 432 * more details see 82598 Specification update.
433 433 */
434 434 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
435 435 if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
436 436 switch (hw->fc.requested_mode) {
437 437 case ixgbe_fc_full:
438 438 hw->fc.requested_mode = ixgbe_fc_tx_pause;
439 439 break;
440 440 case ixgbe_fc_rx_pause:
441 441 hw->fc.requested_mode = ixgbe_fc_none;
442 442 break;
443 443 default:
444 444 /* no change */
445 445 break;
446 446 }
447 447 }
448 448
449 449 /* Negotiate the fc mode to use */
450 450 ixgbe_fc_autoneg(hw);
451 451
452 452 /* Disable any previous flow control settings */
453 453 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
454 454 fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
455 455
456 456 rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
457 457 rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
458 458
459 459 /*
460 460 * The possible values of fc.current_mode are:
461 461 * 0: Flow control is completely disabled
462 462 * 1: Rx flow control is enabled (we can receive pause frames,
463 463 * but not send pause frames).
464 464 * 2: Tx flow control is enabled (we can send pause frames but
465 465 * we do not support receiving pause frames).
466 466 * 3: Both Rx and Tx flow control (symmetric) are enabled.
467 467 * other: Invalid.
468 468 */
469 469 switch (hw->fc.current_mode) {
470 470 case ixgbe_fc_none:
471 471 /*
472 472 * Flow control is disabled by software override or autoneg.
473 473 * The code below will actually disable it in the HW.
474 474 */
475 475 break;
476 476 case ixgbe_fc_rx_pause:
477 477 /*
478 478 * Rx Flow control is enabled and Tx Flow control is
479 479 * disabled by software override. Since there really
480 480 * isn't a way to advertise that we are capable of RX
481 481 * Pause ONLY, we will advertise that we support both
482 482 * symmetric and asymmetric Rx PAUSE. Later, we will
483 483 * disable the adapter's ability to send PAUSE frames.
484 484 */
485 485 fctrl_reg |= IXGBE_FCTRL_RFCE;
486 486 break;
487 487 case ixgbe_fc_tx_pause:
488 488 /*
489 489 * Tx Flow control is enabled, and Rx Flow control is
490 490 * disabled by software override.
491 491 */
492 492 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
493 493 break;
494 494 case ixgbe_fc_full:
495 495 /* Flow control (both Rx and Tx) is enabled by SW override. */
496 496 fctrl_reg |= IXGBE_FCTRL_RFCE;
497 497 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
498 498 break;
499 499 default:
500 500 DEBUGOUT("Flow control param set incorrectly\n");
501 501 ret_val = IXGBE_ERR_CONFIG;
502 502 goto out;
503 503 }
504 504
505 505 /* Set 802.3x based flow control settings. */
506 506 fctrl_reg |= IXGBE_FCTRL_DPF;
507 507 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
508 508 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
509 509
510 510 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
511 511 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
512 512 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
513 513 hw->fc.high_water[i]) {
514 514 fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
515 515 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
516 516 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
517 517 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
518 518 } else {
519 519 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
520 520 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
521 521 }
522 522
523 523 }
524 524
525 525 /* Configure pause time (2 TCs per register) */
526 526 reg = hw->fc.pause_time * 0x00010001;
527 527 for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
528 528 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
529 529
530 530 /* Configure flow control refresh threshold value */
531 531 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
532 532
533 533 out:
534 534 return ret_val;
535 535 }
536 536
537 537 /**
538 538 * ixgbe_start_mac_link_82598 - Configures MAC link settings
539 539 * @hw: pointer to hardware structure
540 540 *
541 541 * Configures link settings based on values in the ixgbe_hw struct.
542 542 * Restarts the link. Performs autonegotiation if needed.
543 543 **/
544 544 static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
545 545 bool autoneg_wait_to_complete)
546 546 {
547 547 u32 autoc_reg;
548 548 u32 links_reg;
549 549 u32 i;
550 550 s32 status = IXGBE_SUCCESS;
551 551
552 552 DEBUGFUNC("ixgbe_start_mac_link_82598");
553 553
554 554 /* Restart link */
555 555 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
556 556 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
557 557 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
558 558
559 559 /* Only poll for autoneg to complete if specified to do so */
560 560 if (autoneg_wait_to_complete) {
561 561 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
562 562 IXGBE_AUTOC_LMS_KX4_AN ||
563 563 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
564 564 IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
565 565 links_reg = 0; /* Just in case Autoneg time = 0 */
566 566 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
567 567 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
568 568 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
569 569 break;
570 570 msec_delay(100);
571 571 }
572 572 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
573 573 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
574 574 DEBUGOUT("Autonegotiation did not complete.\n");
575 575 }
576 576 }
577 577 }
578 578
579 579 /* Add delay to filter out noises during initial link setup */
580 580 msec_delay(50);
581 581
582 582 return status;
583 583 }
584 584
585 585 /**
586 586 * ixgbe_validate_link_ready - Function looks for phy link
587 587 * @hw: pointer to hardware structure
588 588 *
589 589 * Function indicates success when phy link is available. If phy is not ready
590 590 * within 5 seconds of MAC indicating link, the function returns error.
591 591 **/
592 592 static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
593 593 {
594 594 u32 timeout;
595 595 u16 an_reg;
596 596
597 597 if (hw->device_id != IXGBE_DEV_ID_82598AT2)
598 598 return IXGBE_SUCCESS;
599 599
600 600 for (timeout = 0;
601 601 timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
602 602 hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
603 603 IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
604 604
605 605 if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
606 606 (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
607 607 break;
608 608
609 609 msec_delay(100);
610 610 }
611 611
612 612 if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
613 613 DEBUGOUT("Link was indicated but link is down\n");
614 614 return IXGBE_ERR_LINK_SETUP;
615 615 }
616 616
617 617 return IXGBE_SUCCESS;
618 618 }
619 619
620 620 /**
621 621 * ixgbe_check_mac_link_82598 - Get link/speed status
622 622 * @hw: pointer to hardware structure
623 623 * @speed: pointer to link speed
624 624 * @link_up: TRUE is link is up, FALSE otherwise
625 625 * @link_up_wait_to_complete: bool used to wait for link up or not
626 626 *
627 627 * Reads the links register to determine if link is up and the current speed
628 628 **/
629 629 static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
630 630 ixgbe_link_speed *speed, bool *link_up,
631 631 bool link_up_wait_to_complete)
632 632 {
633 633 u32 links_reg;
634 634 u32 i;
635 635 u16 link_reg, adapt_comp_reg;
636 636
637 637 DEBUGFUNC("ixgbe_check_mac_link_82598");
638 638
639 639 /*
640 640 * SERDES PHY requires us to read link status from undocumented
641 641 * register 0xC79F. Bit 0 set indicates link is up/ready; clear
642 642 * indicates link down. OxC00C is read to check that the XAUI lanes
643 643 * are active. Bit 0 clear indicates active; set indicates inactive.
644 644 */
645 645 if (hw->phy.type == ixgbe_phy_nl) {
646 646 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
647 647 hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
648 648 hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
649 649 &adapt_comp_reg);
650 650 if (link_up_wait_to_complete) {
651 651 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
652 652 if ((link_reg & 1) &&
653 653 ((adapt_comp_reg & 1) == 0)) {
654 654 *link_up = TRUE;
655 655 break;
656 656 } else {
657 657 *link_up = FALSE;
658 658 }
659 659 msec_delay(100);
660 660 hw->phy.ops.read_reg(hw, 0xC79F,
661 661 IXGBE_TWINAX_DEV,
662 662 &link_reg);
663 663 hw->phy.ops.read_reg(hw, 0xC00C,
664 664 IXGBE_TWINAX_DEV,
665 665 &adapt_comp_reg);
666 666 }
667 667 } else {
668 668 if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
669 669 *link_up = TRUE;
670 670 else
671 671 *link_up = FALSE;
672 672 }
673 673
674 674 if (*link_up == FALSE)
675 675 goto out;
676 676 }
677 677
678 678 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
679 679 if (link_up_wait_to_complete) {
680 680 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
681 681 if (links_reg & IXGBE_LINKS_UP) {
682 682 *link_up = TRUE;
683 683 break;
684 684 } else {
685 685 *link_up = FALSE;
686 686 }
687 687 msec_delay(100);
688 688 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
689 689 }
690 690 } else {
691 691 if (links_reg & IXGBE_LINKS_UP)
692 692 *link_up = TRUE;
693 693 else
694 694 *link_up = FALSE;
695 695 }
696 696
697 697 if (links_reg & IXGBE_LINKS_SPEED)
698 698 *speed = IXGBE_LINK_SPEED_10GB_FULL;
699 699 else
700 700 *speed = IXGBE_LINK_SPEED_1GB_FULL;
701 701
702 702 if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) &&
703 703 (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
704 704 *link_up = FALSE;
705 705
706 706 out:
707 707 return IXGBE_SUCCESS;
708 708 }
709 709
|
↓ open down ↓ |
542 lines elided |
↑ open up ↑ |
710 710 /**
711 711 * ixgbe_setup_mac_link_82598 - Set MAC link speed
712 712 * @hw: pointer to hardware structure
713 713 * @speed: new link speed
714 714 * @autoneg: TRUE if autonegotiation enabled
715 715 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
716 716 *
717 717 * Set the link speed in the AUTOC register and restarts link.
718 718 **/
719 719 static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
720 - ixgbe_link_speed speed, bool autoneg,
720 + ixgbe_link_speed speed,
721 721 bool autoneg_wait_to_complete)
722 722 {
723 + bool autoneg = FALSE;
723 724 s32 status;
724 725 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
725 726 u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
726 727 u32 autoc = curr_autoc;
727 728 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
728 729
729 730 DEBUGFUNC("ixgbe_setup_mac_link_82598");
730 731
731 732 /* Check to see if speed passed in is supported. */
732 733 status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
733 734 if (status != IXGBE_SUCCESS)
734 735 return (status);
735 736 speed &= link_capabilities;
736 737
737 738 if (speed == IXGBE_LINK_SPEED_UNKNOWN)
738 739 status = IXGBE_ERR_LINK_SETUP;
739 740
740 741 /* Set KX4/KX support according to speed requested */
741 742 else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
742 743 link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
743 744 autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
744 745 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
745 746 autoc |= IXGBE_AUTOC_KX4_SUPP;
746 747 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
747 748 autoc |= IXGBE_AUTOC_KX_SUPP;
748 749 if (autoc != curr_autoc)
749 750 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
750 751 }
751 752
752 753 if (status == IXGBE_SUCCESS) {
753 754 /*
754 755 * Setup and restart the link based on the new values in
755 756 * ixgbe_hw This will write the AUTOC register based on the new
756 757 * stored values
757 758 */
758 759 status = ixgbe_start_mac_link_82598(hw,
759 760 autoneg_wait_to_complete);
|
↓ open down ↓ |
27 lines elided |
↑ open up ↑ |
760 761 }
761 762
762 763 return status;
763 764 }
764 765
765 766
766 767 /**
767 768 * ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
768 769 * @hw: pointer to hardware structure
769 770 * @speed: new link speed
770 - * @autoneg: TRUE if autonegotiation enabled
771 771 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete
772 772 *
773 773 * Sets the link speed in the AUTOC register in the MAC and restarts link.
774 774 **/
775 775 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
776 776 ixgbe_link_speed speed,
777 - bool autoneg,
778 777 bool autoneg_wait_to_complete)
779 778 {
780 779 s32 status;
781 780
782 781 DEBUGFUNC("ixgbe_setup_copper_link_82598");
783 782
784 783 /* Setup the PHY according to input speed */
785 - status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
784 + status = hw->phy.ops.setup_link_speed(hw, speed,
786 785 autoneg_wait_to_complete);
787 786 if (status == IXGBE_SUCCESS) {
788 787 /* Set up MAC */
789 788 status =
790 789 ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
791 790 }
792 791
793 792 return status;
794 793 }
795 794
796 795 /**
797 796 * ixgbe_reset_hw_82598 - Performs hardware reset
798 797 * @hw: pointer to hardware structure
799 798 *
800 799 * Resets the hardware by resetting the transmit and receive units, masks and
801 800 * clears all interrupts, performing a PHY reset, and performing a link (MAC)
802 801 * reset.
803 802 **/
804 803 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
805 804 {
806 805 s32 status = IXGBE_SUCCESS;
807 806 s32 phy_status = IXGBE_SUCCESS;
808 807 u32 ctrl;
809 808 u32 gheccr;
810 809 u32 i;
811 810 u32 autoc;
812 811 u8 analog_val;
813 812
814 813 DEBUGFUNC("ixgbe_reset_hw_82598");
815 814
816 815 /* Call adapter stop to disable tx/rx and clear interrupts */
817 816 status = hw->mac.ops.stop_adapter(hw);
818 817 if (status != IXGBE_SUCCESS)
819 818 goto reset_hw_out;
820 819
821 820 /*
822 821 * Power up the Atlas Tx lanes if they are currently powered down.
823 822 * Atlas Tx lanes are powered down for MAC loopback tests, but
824 823 * they are not automatically restored on reset.
825 824 */
826 825 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
827 826 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
828 827 /* Enable Tx Atlas so packets can be transmitted again */
829 828 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
830 829 &analog_val);
831 830 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
832 831 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
833 832 analog_val);
834 833
835 834 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
836 835 &analog_val);
837 836 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
838 837 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
839 838 analog_val);
840 839
841 840 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
842 841 &analog_val);
843 842 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
844 843 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
845 844 analog_val);
846 845
847 846 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
848 847 &analog_val);
849 848 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
850 849 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
851 850 analog_val);
852 851 }
853 852
854 853 /* Reset PHY */
855 854 if (hw->phy.reset_disable == FALSE) {
856 855 /* PHY ops must be identified and initialized prior to reset */
857 856
858 857 /* Init PHY and function pointers, perform SFP setup */
859 858 phy_status = hw->phy.ops.init(hw);
860 859 if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
861 860 goto reset_hw_out;
862 861 if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
863 862 goto mac_reset_top;
864 863
865 864 hw->phy.ops.reset(hw);
866 865 }
867 866
868 867 mac_reset_top:
869 868 /*
870 869 * Issue global reset to the MAC. This needs to be a SW reset.
871 870 * If link reset is used, it might reset the MAC when mng is using it
872 871 */
873 872 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
874 873 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
875 874 IXGBE_WRITE_FLUSH(hw);
876 875
877 876 /* Poll for reset bit to self-clear indicating reset is complete */
878 877 for (i = 0; i < 10; i++) {
879 878 usec_delay(1);
880 879 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
881 880 if (!(ctrl & IXGBE_CTRL_RST))
882 881 break;
883 882 }
884 883 if (ctrl & IXGBE_CTRL_RST) {
885 884 status = IXGBE_ERR_RESET_FAILED;
886 885 DEBUGOUT("Reset polling failed to complete.\n");
887 886 }
888 887
889 888 msec_delay(50);
890 889
891 890 /*
892 891 * Double resets are required for recovery from certain error
893 892 * conditions. Between resets, it is necessary to stall to allow time
894 893 * for any pending HW events to complete.
895 894 */
896 895 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
897 896 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
898 897 goto mac_reset_top;
899 898 }
900 899
901 900 gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
902 901 gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
903 902 IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
904 903
905 904 /*
906 905 * Store the original AUTOC value if it has not been
907 906 * stored off yet. Otherwise restore the stored original
908 907 * AUTOC value since the reset operation sets back to deaults.
909 908 */
910 909 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
911 910 if (hw->mac.orig_link_settings_stored == FALSE) {
912 911 hw->mac.orig_autoc = autoc;
913 912 hw->mac.orig_link_settings_stored = TRUE;
914 913 } else if (autoc != hw->mac.orig_autoc) {
915 914 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
916 915 }
917 916
918 917 /* Store the permanent mac address */
919 918 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
920 919
921 920 /*
922 921 * Store MAC address from RAR0, clear receive address registers, and
923 922 * clear the multicast table
924 923 */
925 924 hw->mac.ops.init_rx_addrs(hw);
926 925
927 926 reset_hw_out:
928 927 if (phy_status != IXGBE_SUCCESS)
929 928 status = phy_status;
930 929
931 930 return status;
932 931 }
933 932
934 933 /**
935 934 * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
936 935 * @hw: pointer to hardware struct
937 936 * @rar: receive address register index to associate with a VMDq index
938 937 * @vmdq: VMDq set index
939 938 **/
940 939 s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
941 940 {
942 941 u32 rar_high;
943 942 u32 rar_entries = hw->mac.num_rar_entries;
944 943
945 944 DEBUGFUNC("ixgbe_set_vmdq_82598");
946 945
947 946 /* Make sure we are using a valid rar index range */
948 947 if (rar >= rar_entries) {
949 948 DEBUGOUT1("RAR index %d is out of range.\n", rar);
950 949 return IXGBE_ERR_INVALID_ARGUMENT;
951 950 }
952 951
953 952 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
954 953 rar_high &= ~IXGBE_RAH_VIND_MASK;
955 954 rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
956 955 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
957 956 return IXGBE_SUCCESS;
958 957 }
959 958
960 959 /**
961 960 * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
962 961 * @hw: pointer to hardware struct
963 962 * @rar: receive address register index to associate with a VMDq index
964 963 * @vmdq: VMDq clear index (not used in 82598, but elsewhere)
965 964 **/
966 965 static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
967 966 {
968 967 u32 rar_high;
969 968 u32 rar_entries = hw->mac.num_rar_entries;
970 969
971 970 UNREFERENCED_1PARAMETER(vmdq);
972 971
973 972 /* Make sure we are using a valid rar index range */
974 973 if (rar >= rar_entries) {
975 974 DEBUGOUT1("RAR index %d is out of range.\n", rar);
976 975 return IXGBE_ERR_INVALID_ARGUMENT;
977 976 }
978 977
979 978 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
980 979 if (rar_high & IXGBE_RAH_VIND_MASK) {
981 980 rar_high &= ~IXGBE_RAH_VIND_MASK;
982 981 IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
983 982 }
984 983
985 984 return IXGBE_SUCCESS;
986 985 }
987 986
988 987 /**
989 988 * ixgbe_set_vfta_82598 - Set VLAN filter table
990 989 * @hw: pointer to hardware structure
991 990 * @vlan: VLAN id to write to VLAN filter
992 991 * @vind: VMDq output index that maps queue to VLAN id in VFTA
993 992 * @vlan_on: boolean flag to turn on/off VLAN in VFTA
994 993 *
995 994 * Turn on/off specified VLAN in the VLAN filter table.
996 995 **/
997 996 s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
998 997 bool vlan_on)
999 998 {
1000 999 u32 regindex;
1001 1000 u32 bitindex;
1002 1001 u32 bits;
1003 1002 u32 vftabyte;
1004 1003
1005 1004 DEBUGFUNC("ixgbe_set_vfta_82598");
1006 1005
1007 1006 if (vlan > 4095)
1008 1007 return IXGBE_ERR_PARAM;
1009 1008
1010 1009 /* Determine 32-bit word position in array */
1011 1010 regindex = (vlan >> 5) & 0x7F; /* upper seven bits */
1012 1011
1013 1012 /* Determine the location of the (VMD) queue index */
1014 1013 vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
1015 1014 bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */
1016 1015
1017 1016 /* Set the nibble for VMD queue index */
1018 1017 bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
1019 1018 bits &= (~(0x0F << bitindex));
1020 1019 bits |= (vind << bitindex);
1021 1020 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
1022 1021
1023 1022 /* Determine the location of the bit for this VLAN id */
1024 1023 bitindex = vlan & 0x1F; /* lower five bits */
1025 1024
1026 1025 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
1027 1026 if (vlan_on)
1028 1027 /* Turn on this VLAN id */
1029 1028 bits |= (1 << bitindex);
1030 1029 else
1031 1030 /* Turn off this VLAN id */
1032 1031 bits &= ~(1 << bitindex);
1033 1032 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
1034 1033
1035 1034 return IXGBE_SUCCESS;
1036 1035 }
1037 1036
1038 1037 /**
1039 1038 * ixgbe_clear_vfta_82598 - Clear VLAN filter table
1040 1039 * @hw: pointer to hardware structure
1041 1040 *
1042 1041 * Clears the VLAN filer table, and the VMDq index associated with the filter
1043 1042 **/
1044 1043 static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
1045 1044 {
1046 1045 u32 offset;
1047 1046 u32 vlanbyte;
1048 1047
1049 1048 DEBUGFUNC("ixgbe_clear_vfta_82598");
1050 1049
1051 1050 for (offset = 0; offset < hw->mac.vft_size; offset++)
1052 1051 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
1053 1052
1054 1053 for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
1055 1054 for (offset = 0; offset < hw->mac.vft_size; offset++)
1056 1055 IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
1057 1056 0);
1058 1057
1059 1058 return IXGBE_SUCCESS;
1060 1059 }
1061 1060
1062 1061 /**
1063 1062 * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
1064 1063 * @hw: pointer to hardware structure
1065 1064 * @reg: analog register to read
1066 1065 * @val: read value
1067 1066 *
1068 1067 * Performs read operation to Atlas analog register specified.
1069 1068 **/
1070 1069 s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
1071 1070 {
1072 1071 u32 atlas_ctl;
1073 1072
1074 1073 DEBUGFUNC("ixgbe_read_analog_reg8_82598");
1075 1074
1076 1075 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
1077 1076 IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
1078 1077 IXGBE_WRITE_FLUSH(hw);
1079 1078 usec_delay(10);
1080 1079 atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
1081 1080 *val = (u8)atlas_ctl;
1082 1081
1083 1082 return IXGBE_SUCCESS;
1084 1083 }
1085 1084
1086 1085 /**
1087 1086 * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
1088 1087 * @hw: pointer to hardware structure
1089 1088 * @reg: atlas register to write
1090 1089 * @val: value to write
1091 1090 *
1092 1091 * Performs write operation to Atlas analog register specified.
1093 1092 **/
1094 1093 s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
1095 1094 {
1096 1095 u32 atlas_ctl;
1097 1096
1098 1097 DEBUGFUNC("ixgbe_write_analog_reg8_82598");
|
↓ open down ↓ |
303 lines elided |
↑ open up ↑ |
1099 1098
1100 1099 atlas_ctl = (reg << 8) | val;
1101 1100 IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
1102 1101 IXGBE_WRITE_FLUSH(hw);
1103 1102 usec_delay(10);
1104 1103
1105 1104 return IXGBE_SUCCESS;
1106 1105 }
1107 1106
1108 1107 /**
1109 - * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1108 + * ixgbe_read_i2c_phy_82598 - Reads 8 bit word over I2C interface.
1110 1109 * @hw: pointer to hardware structure
1111 - * @byte_offset: EEPROM byte offset to read
1110 + * @dev_addr: address to read from
1111 + * @byte_offset: byte offset to read from dev_addr
1112 1112 * @eeprom_data: value read
1113 1113 *
1114 1114 * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1115 1115 **/
1116 -s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1117 - u8 *eeprom_data)
1116 +static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
1117 + u8 byte_offset, u8 *eeprom_data)
1118 1118 {
1119 1119 s32 status = IXGBE_SUCCESS;
1120 1120 u16 sfp_addr = 0;
1121 1121 u16 sfp_data = 0;
1122 1122 u16 sfp_stat = 0;
1123 1123 u32 i;
1124 1124
1125 - DEBUGFUNC("ixgbe_read_i2c_eeprom_82598");
1125 + DEBUGFUNC("ixgbe_read_i2c_phy_82598");
1126 1126
1127 1127 if (hw->phy.type == ixgbe_phy_nl) {
1128 1128 /*
1129 1129 * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
1130 1130 * 0xC30D. These registers are used to talk to the SFP+
1131 1131 * module's EEPROM through the SDA/SCL (I2C) interface.
1132 1132 */
1133 - sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
1133 + sfp_addr = (dev_addr << 8) + byte_offset;
1134 1134 sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
1135 1135 hw->phy.ops.write_reg(hw,
1136 1136 IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
1137 1137 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1138 1138 sfp_addr);
1139 1139
1140 1140 /* Poll status */
1141 1141 for (i = 0; i < 100; i++) {
1142 1142 hw->phy.ops.read_reg(hw,
1143 1143 IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
1144 1144 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
1145 1145 &sfp_stat);
1146 1146 sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
1147 1147 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
1148 1148 break;
1149 1149 msec_delay(10);
1150 1150 }
1151 1151
1152 1152 if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
1153 1153 DEBUGOUT("EEPROM read did not pass.\n");
1154 1154 status = IXGBE_ERR_SFP_NOT_PRESENT;
|
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
1155 1155 goto out;
1156 1156 }
1157 1157
1158 1158 /* Read data */
1159 1159 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
1160 1160 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
1161 1161
1162 1162 *eeprom_data = (u8)(sfp_data >> 8);
1163 1163 } else {
1164 1164 status = IXGBE_ERR_PHY;
1165 - goto out;
1166 1165 }
1167 1166
1168 1167 out:
1169 1168 return status;
1170 1169 }
1171 1170
1171 +/**
1172 + * ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
1173 + * @hw: pointer to hardware structure
1174 + * @byte_offset: EEPROM byte offset to read
1175 + * @eeprom_data: value read
1176 + *
1177 + * Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
1178 + **/
1179 +s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
1180 + u8 *eeprom_data)
1181 +{
1182 + return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR,
1183 + byte_offset, eeprom_data);
1184 +}
1185 +
1186 +/**
1187 + * ixgbe_read_i2c_sff8472_82598 - Reads 8 bit word over I2C interface.
1188 + * @hw: pointer to hardware structure
1189 + * @byte_offset: byte offset at address 0xA2
1190 + * @eeprom_data: value read
1191 + *
1192 + * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C
1193 + **/
1194 +static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
1195 + u8 *sff8472_data)
1196 +{
1197 + return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2,
1198 + byte_offset, sff8472_data);
1199 +}
1200 +
1172 1201 /**
1173 1202 * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
1174 1203 * @hw: pointer to hardware structure
1175 1204 *
1176 1205 * Determines physical layer capabilities of the current configuration.
1177 1206 **/
1178 1207 u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
1179 1208 {
1180 1209 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1181 1210 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1182 1211 u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1183 1212 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1184 1213 u16 ext_ability = 0;
1185 1214
1186 1215 DEBUGFUNC("ixgbe_get_supported_physical_layer_82598");
1187 1216
1188 1217 hw->phy.ops.identify(hw);
1189 1218
1190 1219 /* Copper PHY must be checked before AUTOC LMS to determine correct
1191 1220 * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
1192 1221 switch (hw->phy.type) {
1193 1222 case ixgbe_phy_tn:
1194 1223 case ixgbe_phy_cu_unknown:
1195 1224 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1196 1225 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1197 1226 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1198 1227 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1199 1228 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1200 1229 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1201 1230 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1202 1231 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1203 1232 goto out;
1204 1233 default:
1205 1234 break;
1206 1235 }
1207 1236
1208 1237 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1209 1238 case IXGBE_AUTOC_LMS_1G_AN:
1210 1239 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1211 1240 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
1212 1241 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1213 1242 else
1214 1243 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1215 1244 break;
1216 1245 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
1217 1246 if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
1218 1247 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
1219 1248 else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
1220 1249 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1221 1250 else /* XAUI */
1222 1251 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1223 1252 break;
1224 1253 case IXGBE_AUTOC_LMS_KX4_AN:
1225 1254 case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
1226 1255 if (autoc & IXGBE_AUTOC_KX_SUPP)
1227 1256 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
1228 1257 if (autoc & IXGBE_AUTOC_KX4_SUPP)
1229 1258 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
1230 1259 break;
1231 1260 default:
1232 1261 break;
1233 1262 }
1234 1263
1235 1264 if (hw->phy.type == ixgbe_phy_nl) {
1236 1265 hw->phy.ops.identify_sfp(hw);
1237 1266
1238 1267 switch (hw->phy.sfp_type) {
1239 1268 case ixgbe_sfp_type_da_cu:
1240 1269 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1241 1270 break;
1242 1271 case ixgbe_sfp_type_sr:
1243 1272 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1244 1273 break;
1245 1274 case ixgbe_sfp_type_lr:
1246 1275 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1247 1276 break;
1248 1277 default:
1249 1278 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1250 1279 break;
1251 1280 }
1252 1281 }
1253 1282
1254 1283 switch (hw->device_id) {
1255 1284 case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
1256 1285 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
1257 1286 break;
1258 1287 case IXGBE_DEV_ID_82598AF_DUAL_PORT:
1259 1288 case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
1260 1289 case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
1261 1290 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
1262 1291 break;
1263 1292 case IXGBE_DEV_ID_82598EB_XF_LR:
1264 1293 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
1265 1294 break;
1266 1295 default:
1267 1296 break;
1268 1297 }
1269 1298
1270 1299 out:
1271 1300 return physical_layer;
1272 1301 }
1273 1302
1274 1303 /**
1275 1304 * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
1276 1305 * port devices.
1277 1306 * @hw: pointer to the HW structure
1278 1307 *
1279 1308 * Calls common function and corrects issue with some single port devices
1280 1309 * that enable LAN1 but not LAN0.
1281 1310 **/
1282 1311 void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
1283 1312 {
1284 1313 struct ixgbe_bus_info *bus = &hw->bus;
1285 1314 u16 pci_gen = 0;
1286 1315 u16 pci_ctrl2 = 0;
1287 1316
1288 1317 DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598");
1289 1318
1290 1319 ixgbe_set_lan_id_multi_port_pcie(hw);
1291 1320
1292 1321 /* check if LAN0 is disabled */
1293 1322 hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
1294 1323 if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
1295 1324
1296 1325 hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
1297 1326
1298 1327 /* if LAN0 is completely disabled force function to 0 */
1299 1328 if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
1300 1329 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
1301 1330 !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
1302 1331
1303 1332 bus->func = 0;
1304 1333 }
1305 1334 }
1306 1335 }
1307 1336
1308 1337 /**
1309 1338 * ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
1310 1339 * @hw: pointer to hardware structure
1311 1340 *
1312 1341 **/
1313 1342 void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
1314 1343 {
1315 1344 u32 regval;
1316 1345 u32 i;
1317 1346
1318 1347 DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598");
1319 1348
1320 1349 /* Enable relaxed ordering */
1321 1350 for (i = 0; ((i < hw->mac.max_tx_queues) &&
1322 1351 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1323 1352 regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
1324 1353 regval |= IXGBE_DCA_TXCTRL_DESC_WRO_EN;
1325 1354 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
1326 1355 }
1327 1356
1328 1357 for (i = 0; ((i < hw->mac.max_rx_queues) &&
1329 1358 (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
1330 1359 regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
1331 1360 regval |= IXGBE_DCA_RXCTRL_DATA_WRO_EN |
1332 1361 IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
1333 1362 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
1334 1363 }
1335 1364
1336 1365 }
1337 1366
1338 1367 /**
1339 1368 * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
1340 1369 * @hw: pointer to hardware structure
1341 1370 * @num_pb: number of packet buffers to allocate
1342 1371 * @headroom: reserve n KB of headroom
1343 1372 * @strategy: packet buffer allocation strategy
1344 1373 **/
1345 1374 static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
1346 1375 u32 headroom, int strategy)
1347 1376 {
1348 1377 u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
1349 1378 u8 i = 0;
1350 1379 UNREFERENCED_1PARAMETER(headroom);
1351 1380
1352 1381 if (!num_pb)
1353 1382 return;
1354 1383
1355 1384 /* Setup Rx packet buffer sizes */
1356 1385 switch (strategy) {
1357 1386 case PBA_STRATEGY_WEIGHTED:
1358 1387 /* Setup the first four at 80KB */
1359 1388 rxpktsize = IXGBE_RXPBSIZE_80KB;
1360 1389 for (; i < 4; i++)
1361 1390 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1362 1391 /* Setup the last four at 48KB...don't re-init i */
1363 1392 rxpktsize = IXGBE_RXPBSIZE_48KB;
1364 1393 /* Fall Through */
1365 1394 /* FALLTHRU */
1366 1395 case PBA_STRATEGY_EQUAL:
1367 1396 default:
1368 1397 /* Divide the remaining Rx packet buffer evenly among the TCs */
1369 1398 for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1370 1399 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
1371 1400 break;
1372 1401 }
1373 1402
1374 1403 /* Setup Tx packet buffer sizes */
1375 1404 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
1376 1405 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
1377 1406 }
|
↓ open down ↓ |
196 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX