1 /******************************************************************************
2
3 Copyright (c) 2001-2013, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
15
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ******************************************************************************/
33 /*$FreeBSD: src/sys/dev/ixgbe/ixgbe_82599.c,v 1.8 2012/07/05 20:51:44 jfv Exp $*/
34
35 #include "ixgbe_type.h"
36 #include "ixgbe_82599.h"
37 #include "ixgbe_api.h"
38 #include "ixgbe_common.h"
39 #include "ixgbe_phy.h"
40
41 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
42 ixgbe_link_speed speed,
43 bool autoneg_wait_to_complete);
44 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
45 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
46 u16 offset, u16 *data);
47 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
48 u16 words, u16 *data);
49
50 static bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
51 {
52 u32 fwsm, manc, factps;
53
54 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
55 if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
56 return FALSE;
57
58 manc = IXGBE_READ_REG(hw, IXGBE_MANC);
59 if (!(manc & IXGBE_MANC_RCV_TCO_EN))
60 return FALSE;
61
62 factps = IXGBE_READ_REG(hw, IXGBE_FACTPS);
63 if (factps & IXGBE_FACTPS_MNGCG)
64 return FALSE;
65
66 return TRUE;
67 }
68
69 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
70 {
71 struct ixgbe_mac_info *mac = &hw->mac;
72
73 DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
74
75 /*
76 * enable the laser control functions for SFP+ fiber
77 * and MNG not enabled
78 */
79 if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
80 !(ixgbe_mng_enabled(hw))) {
81 mac->ops.disable_tx_laser =
82 &ixgbe_disable_tx_laser_multispeed_fiber;
83 mac->ops.enable_tx_laser =
84 &ixgbe_enable_tx_laser_multispeed_fiber;
85 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
86
87 } else {
88 mac->ops.disable_tx_laser = NULL;
89 mac->ops.enable_tx_laser = NULL;
90 mac->ops.flap_tx_laser = NULL;
91 }
92
93 if (hw->phy.multispeed_fiber) {
94 /* Set up dual speed SFP+ support */
95 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
96 } else {
97 if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
98 (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
99 hw->phy.smart_speed == ixgbe_smart_speed_on) &&
100 !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
101 mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
102 } else {
103 mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
104 }
105 }
106 }
107
108 /**
109 * ixgbe_init_phy_ops_82599 - PHY/SFP specific init
110 * @hw: pointer to hardware structure
111 *
112 * Initialize any function pointers that were not able to be
113 * set during init_shared_code because the PHY/SFP type was
114 * not known. Perform the SFP init if necessary.
115 *
116 **/
117 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
118 {
119 struct ixgbe_mac_info *mac = &hw->mac;
120 struct ixgbe_phy_info *phy = &hw->phy;
121 s32 ret_val = IXGBE_SUCCESS;
122
123 DEBUGFUNC("ixgbe_init_phy_ops_82599");
124
125 /* Identify the PHY or SFP module */
126 ret_val = phy->ops.identify(hw);
127 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
128 goto init_phy_ops_out;
129
130 /* Setup function pointers based on detected SFP module and speeds */
131 ixgbe_init_mac_link_ops_82599(hw);
132 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
133 hw->phy.ops.reset = NULL;
134
135 /* If copper media, overwrite with copper function pointers */
136 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
137 mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
138 mac->ops.get_link_capabilities =
139 &ixgbe_get_copper_link_capabilities_generic;
140 }
141
142 /* Set necessary function pointers based on phy type */
143 switch (hw->phy.type) {
144 case ixgbe_phy_tn:
145 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
146 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
147 phy->ops.get_firmware_version =
148 &ixgbe_get_phy_firmware_version_tnx;
149 break;
150 default:
151 break;
152 }
153 init_phy_ops_out:
154 return ret_val;
155 }
156
157 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
158 {
159 s32 ret_val = IXGBE_SUCCESS;
160 u16 list_offset, data_offset, data_value;
161 bool got_lock = FALSE;
162
163 DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
164
165 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
166 ixgbe_init_mac_link_ops_82599(hw);
167
168 hw->phy.ops.reset = NULL;
169
170 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
171 &data_offset);
172 if (ret_val != IXGBE_SUCCESS)
173 goto setup_sfp_out;
174
175 /* PHY config will finish before releasing the semaphore */
176 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
177 IXGBE_GSSR_MAC_CSR_SM);
178 if (ret_val != IXGBE_SUCCESS) {
179 ret_val = IXGBE_ERR_SWFW_SYNC;
180 goto setup_sfp_out;
181 }
182
183 hw->eeprom.ops.read(hw, ++data_offset, &data_value);
184 while (data_value != 0xffff) {
185 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
186 IXGBE_WRITE_FLUSH(hw);
187 hw->eeprom.ops.read(hw, ++data_offset, &data_value);
188 }
189
190 /* Release the semaphore */
191 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
192 /* Delay obtaining semaphore again to allow FW access */
193 msec_delay(hw->eeprom.semaphore_delay);
194
195 /* Need SW/FW semaphore around AUTOC writes if LESM on,
196 * likewise reset_pipeline requires lock as it also writes
197 * AUTOC.
198 */
199 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
200 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
201 IXGBE_GSSR_MAC_CSR_SM);
202 if (ret_val != IXGBE_SUCCESS) {
203 ret_val = IXGBE_ERR_SWFW_SYNC;
204 goto setup_sfp_out;
205 }
206
207 got_lock = TRUE;
208 }
209
210 /* Restart DSP and set SFI mode */
211 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((hw->mac.orig_autoc) |
212 IXGBE_AUTOC_LMS_10G_SERIAL));
213 hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
214 ret_val = ixgbe_reset_pipeline_82599(hw);
215
216 if (got_lock) {
217 hw->mac.ops.release_swfw_sync(hw,
218 IXGBE_GSSR_MAC_CSR_SM);
219 got_lock = FALSE;
220 }
221
222 if (ret_val) {
223 DEBUGOUT("sfp module setup not complete\n");
224 ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
225 goto setup_sfp_out;
226 }
227
228 }
229
230 setup_sfp_out:
231 return ret_val;
232 }
233
234 /**
235 * ixgbe_init_ops_82599 - Inits func ptrs and MAC type
236 * @hw: pointer to hardware structure
237 *
238 * Initialize the function pointers and assign the MAC type for 82599.
239 * Does not touch the hardware.
240 **/
241
242 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
243 {
244 struct ixgbe_mac_info *mac = &hw->mac;
245 struct ixgbe_phy_info *phy = &hw->phy;
246 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
247 s32 ret_val;
248
249 DEBUGFUNC("ixgbe_init_ops_82599");
250
251 (void) ixgbe_init_phy_ops_generic(hw);
252 ret_val = ixgbe_init_ops_generic(hw);
253
254 /* PHY */
255 phy->ops.identify = &ixgbe_identify_phy_82599;
256 phy->ops.init = &ixgbe_init_phy_ops_82599;
257
258 /* MAC */
259 mac->ops.reset_hw = &ixgbe_reset_hw_82599;
260 mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2;
261 mac->ops.get_media_type = &ixgbe_get_media_type_82599;
262 mac->ops.get_supported_physical_layer =
263 &ixgbe_get_supported_physical_layer_82599;
264 mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
265 mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
266 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
267 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
268 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
269 mac->ops.start_hw = &ixgbe_start_hw_82599;
270 mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
271 mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
272 mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
273 mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
274 mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
275
276 /* RAR, Multicast, VLAN */
277 mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
278 mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic;
279 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
280 mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
281 mac->rar_highwater = 1;
282 mac->ops.set_vfta = &ixgbe_set_vfta_generic;
283 mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
284 mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
285 mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
286 mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
287 mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
288 mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
289
290 /* Link */
291 mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
292 mac->ops.check_link = &ixgbe_check_mac_link_generic;
293 mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
294 ixgbe_init_mac_link_ops_82599(hw);
295
296 mac->mcft_size = 128;
297 mac->vft_size = 128;
298 mac->num_rar_entries = 128;
299 mac->rx_pb_size = 512;
300 mac->max_tx_queues = 128;
301 mac->max_rx_queues = 128;
302 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
303
304 mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
305 IXGBE_FWSM_MODE_MASK) ? TRUE : FALSE;
306
307 hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
308
309 /* EEPROM */
310 eeprom->ops.read = &ixgbe_read_eeprom_82599;
311 eeprom->ops.read_buffer = &ixgbe_read_eeprom_buffer_82599;
312
313 /* Manageability interface */
314 mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
315
316
317 return ret_val;
318 }
319
320 /**
321 * ixgbe_get_link_capabilities_82599 - Determines link capabilities
322 * @hw: pointer to hardware structure
323 * @speed: pointer to link speed
324 * @autoneg: TRUE when autoneg or autotry is enabled
325 *
326 * Determines the link capabilities by reading the AUTOC register.
327 **/
328 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
329 ixgbe_link_speed *speed,
330 bool *autoneg)
331 {
332 s32 status = IXGBE_SUCCESS;
333 u32 autoc = 0;
334
335 DEBUGFUNC("ixgbe_get_link_capabilities_82599");
336
337
338 /* Check if 1G SFP module. */
339 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
340 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
341 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
342 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
343 *speed = IXGBE_LINK_SPEED_1GB_FULL;
344 *autoneg = TRUE;
345 goto out;
346 }
347
348 /*
349 * Determine link capabilities based on the stored value of AUTOC,
350 * which represents EEPROM defaults. If AUTOC value has not
351 * been stored, use the current register values.
352 */
353 if (hw->mac.orig_link_settings_stored)
354 autoc = hw->mac.orig_autoc;
355 else
356 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
357
358 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
359 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
360 *speed = IXGBE_LINK_SPEED_1GB_FULL;
361 *autoneg = FALSE;
362 break;
363
364 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
365 *speed = IXGBE_LINK_SPEED_10GB_FULL;
366 *autoneg = FALSE;
367 break;
368
369 case IXGBE_AUTOC_LMS_1G_AN:
370 *speed = IXGBE_LINK_SPEED_1GB_FULL;
371 *autoneg = TRUE;
372 break;
373
374 case IXGBE_AUTOC_LMS_10G_SERIAL:
375 *speed = IXGBE_LINK_SPEED_10GB_FULL;
376 *autoneg = FALSE;
377 break;
378
379 case IXGBE_AUTOC_LMS_KX4_KX_KR:
380 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
381 *speed = IXGBE_LINK_SPEED_UNKNOWN;
382 if (autoc & IXGBE_AUTOC_KR_SUPP)
383 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
384 if (autoc & IXGBE_AUTOC_KX4_SUPP)
385 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
386 if (autoc & IXGBE_AUTOC_KX_SUPP)
387 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
388 *autoneg = TRUE;
389 break;
390
391 case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
392 *speed = IXGBE_LINK_SPEED_100_FULL;
393 if (autoc & IXGBE_AUTOC_KR_SUPP)
394 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
395 if (autoc & IXGBE_AUTOC_KX4_SUPP)
396 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
397 if (autoc & IXGBE_AUTOC_KX_SUPP)
398 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
399 *autoneg = TRUE;
400 break;
401
402 case IXGBE_AUTOC_LMS_SGMII_1G_100M:
403 *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
404 *autoneg = FALSE;
405 break;
406
407 default:
408 status = IXGBE_ERR_LINK_SETUP;
409 goto out;
410 }
411
412 if (hw->phy.multispeed_fiber) {
413 *speed |= IXGBE_LINK_SPEED_10GB_FULL |
414 IXGBE_LINK_SPEED_1GB_FULL;
415 *autoneg = TRUE;
416 }
417
418 out:
419 return status;
420 }
421
422 /**
423 * ixgbe_get_media_type_82599 - Get media type
424 * @hw: pointer to hardware structure
425 *
426 * Returns the media type (fiber, copper, backplane)
427 **/
428 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
429 {
430 enum ixgbe_media_type media_type;
431
432 DEBUGFUNC("ixgbe_get_media_type_82599");
433
434 /* Detect if there is a copper PHY attached. */
435 switch (hw->phy.type) {
436 case ixgbe_phy_cu_unknown:
437 case ixgbe_phy_tn:
438 media_type = ixgbe_media_type_copper;
439 goto out;
440 default:
441 break;
442 }
443
444 switch (hw->device_id) {
445 case IXGBE_DEV_ID_82599_KX4:
446 case IXGBE_DEV_ID_82599_KX4_MEZZ:
447 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
448 case IXGBE_DEV_ID_82599_KR:
449 case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
450 case IXGBE_DEV_ID_82599_XAUI_LOM:
451 /* Default device ID is mezzanine card KX/KX4 */
452 media_type = ixgbe_media_type_backplane;
453 break;
454 case IXGBE_DEV_ID_82599_SFP:
455 case IXGBE_DEV_ID_82599_SFP_FCOE:
456 case IXGBE_DEV_ID_82599_SFP_EM:
457 case IXGBE_DEV_ID_82599_SFP_SF2:
458 case IXGBE_DEV_ID_82599_SFP_SF_QP:
459 case IXGBE_DEV_ID_82599EN_SFP:
460 media_type = ixgbe_media_type_fiber;
461 break;
462 case IXGBE_DEV_ID_82599_CX4:
463 media_type = ixgbe_media_type_cx4;
464 break;
465 case IXGBE_DEV_ID_82599_T3_LOM:
466 media_type = ixgbe_media_type_copper;
467 break;
468 case IXGBE_DEV_ID_82599_BYPASS:
469 media_type = ixgbe_media_type_fiber_fixed;
470 hw->phy.multispeed_fiber = TRUE;
471 break;
472 default:
473 media_type = ixgbe_media_type_unknown;
474 break;
475 }
476 out:
477 return media_type;
478 }
479
480 /**
481 * ixgbe_start_mac_link_82599 - Setup MAC link settings
482 * @hw: pointer to hardware structure
483 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
484 *
485 * Configures link settings based on values in the ixgbe_hw struct.
486 * Restarts the link. Performs autonegotiation if needed.
487 **/
488 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
489 bool autoneg_wait_to_complete)
490 {
491 u32 autoc_reg;
492 u32 links_reg;
493 u32 i;
494 s32 status = IXGBE_SUCCESS;
495 bool got_lock = FALSE;
496
497 DEBUGFUNC("ixgbe_start_mac_link_82599");
498
499
500 /* reset_pipeline requires us to hold this lock as it writes to
501 * AUTOC.
502 */
503 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
504 status = hw->mac.ops.acquire_swfw_sync(hw,
505 IXGBE_GSSR_MAC_CSR_SM);
506 if (status != IXGBE_SUCCESS)
507 goto out;
508
509 got_lock = TRUE;
510 }
511
512 /* Restart link */
513 (void) ixgbe_reset_pipeline_82599(hw);
514
515 if (got_lock)
516 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
517
518 /* Only poll for autoneg to complete if specified to do so */
519 if (autoneg_wait_to_complete) {
520 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
521 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
522 IXGBE_AUTOC_LMS_KX4_KX_KR ||
523 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
524 IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
525 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
526 IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
527 links_reg = 0; /* Just in case Autoneg time = 0 */
528 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
529 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
530 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
531 break;
532 msec_delay(100);
533 }
534 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
535 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
536 DEBUGOUT("Autoneg did not complete.\n");
537 }
538 }
539 }
540
541 /* Add delay to filter out noises during initial link setup */
542 msec_delay(50);
543
544 out:
545 return status;
546 }
547
548 /**
549 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
550 * @hw: pointer to hardware structure
551 *
552 * The base drivers may require better control over SFP+ module
553 * PHY states. This includes selectively shutting down the Tx
554 * laser on the PHY, effectively halting physical link.
555 **/
556 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
557 {
558 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
559
560 /* Disable tx laser; allow 100us to go dark per spec */
561 esdp_reg |= IXGBE_ESDP_SDP3;
562 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
563 IXGBE_WRITE_FLUSH(hw);
564 usec_delay(100);
565 }
566
567 /**
568 * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
569 * @hw: pointer to hardware structure
570 *
571 * The base drivers may require better control over SFP+ module
572 * PHY states. This includes selectively turning on the Tx
573 * laser on the PHY, effectively starting physical link.
574 **/
575 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
576 {
577 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
578
579 /* Enable tx laser; allow 100ms to light up */
580 esdp_reg &= ~IXGBE_ESDP_SDP3;
581 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
582 IXGBE_WRITE_FLUSH(hw);
583 msec_delay(100);
584 }
585
586 /**
587 * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
588 * @hw: pointer to hardware structure
589 *
590 * When the driver changes the link speeds that it can support,
591 * it sets autotry_restart to TRUE to indicate that we need to
592 * initiate a new autotry session with the link partner. To do
593 * so, we set the speed then disable and re-enable the tx laser, to
594 * alert the link partner that it also needs to restart autotry on its
595 * end. This is consistent with TRUE clause 37 autoneg, which also
596 * involves a loss of signal.
597 **/
598 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
599 {
600 DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
601
602 if (hw->mac.autotry_restart) {
603 ixgbe_disable_tx_laser_multispeed_fiber(hw);
604 ixgbe_enable_tx_laser_multispeed_fiber(hw);
605 hw->mac.autotry_restart = FALSE;
606 }
607 }
608
609 /**
610 * ixgbe_set_fiber_fixed_speed - Set module link speed for fixed fiber
611 * @hw: pointer to hardware structure
612 * @speed: link speed to set
613 *
614 * We set the module speed differently for fixed fiber. For other
615 * multi-speed devices we don't have an error value so here if we
616 * detect an error we just log it and exit.
617 */
618 static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw,
619 ixgbe_link_speed speed)
620 {
621 s32 status;
622 u8 rs, eeprom_data;
623
624 switch (speed) {
625 case IXGBE_LINK_SPEED_10GB_FULL:
626 /* one bit mask same as setting on */
627 rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
628 break;
629 case IXGBE_LINK_SPEED_1GB_FULL:
630 rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
631 break;
632 default:
633 DEBUGOUT("Invalid fixed module speed\n");
634 return;
635 }
636
637 /* Set RS0 */
638 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
639 IXGBE_I2C_EEPROM_DEV_ADDR2,
640 &eeprom_data);
641 if (status) {
642 DEBUGOUT("Failed to read Rx Rate Select RS0\n");
643 goto out;
644 }
645
646 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
647
648 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
649 IXGBE_I2C_EEPROM_DEV_ADDR2,
650 eeprom_data);
651 if (status) {
652 DEBUGOUT("Failed to write Rx Rate Select RS0\n");
653 goto out;
654 }
655
656 /* Set RS1 */
657 status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
658 IXGBE_I2C_EEPROM_DEV_ADDR2,
659 &eeprom_data);
660 if (status) {
661 DEBUGOUT("Failed to read Rx Rate Select RS1\n");
662 goto out;
663 }
664
665 eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
666
667 status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
668 IXGBE_I2C_EEPROM_DEV_ADDR2,
669 eeprom_data);
670 if (status) {
671 DEBUGOUT("Failed to write Rx Rate Select RS1\n");
672 goto out;
673 }
674 out:
675 return;
676 }
677
678 /**
679 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
680 * @hw: pointer to hardware structure
681 * @speed: new link speed
682 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
683 *
684 * Set the link speed in the AUTOC register and restarts link.
685 **/
686 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
687 ixgbe_link_speed speed,
688 bool autoneg_wait_to_complete)
689 {
690 s32 status = IXGBE_SUCCESS;
691 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
692 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
693 u32 speedcnt = 0;
694 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
695 u32 i = 0;
696 bool autoneg, link_up = FALSE;
697
698 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
699
700 /* Mask off requested but non-supported speeds */
701 status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
702 if (status != IXGBE_SUCCESS)
703 return status;
704
705 speed &= link_speed;
706
707 /*
708 * Try each speed one by one, highest priority first. We do this in
709 * software because 10gb fiber doesn't support speed autonegotiation.
710 */
711 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
712 speedcnt++;
713 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
714
715 /* If we already have link at this speed, just jump out */
716 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
717 if (status != IXGBE_SUCCESS)
718 return status;
719
720 if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
721 goto out;
722
723 /* Set the module link speed */
724 if (hw->phy.media_type == ixgbe_media_type_fiber_fixed) {
725 ixgbe_set_fiber_fixed_speed(hw,
726 IXGBE_LINK_SPEED_10GB_FULL);
727 } else {
728 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
729 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
730 IXGBE_WRITE_FLUSH(hw);
731 }
732
733 /* Allow module to change analog characteristics (1G->10G) */
734 msec_delay(40);
735
736 status = ixgbe_setup_mac_link_82599(hw,
737 IXGBE_LINK_SPEED_10GB_FULL,
738 autoneg_wait_to_complete);
739 if (status != IXGBE_SUCCESS)
740 return status;
741
742 /* Flap the tx laser if it has not already been done */
743 ixgbe_flap_tx_laser(hw);
744
745 /*
746 * Wait for the controller to acquire link. Per IEEE 802.3ap,
747 * Section 73.10.2, we may have to wait up to 500ms if KR is
748 * attempted. 82599 uses the same timing for 10g SFI.
749 */
750 for (i = 0; i < 5; i++) {
751 /* Wait for the link partner to also set speed */
752 msec_delay(100);
753
754 /* If we have link, just jump out */
755 status = ixgbe_check_link(hw, &link_speed,
756 &link_up, FALSE);
757 if (status != IXGBE_SUCCESS)
758 return status;
759
760 if (link_up)
761 goto out;
762 }
763 }
764
765 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
766 speedcnt++;
767 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
768 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
769
770 /* If we already have link at this speed, just jump out */
771 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
772 if (status != IXGBE_SUCCESS)
773 return status;
774
775 if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
776 goto out;
777
778 /* Set the module link speed */
779 if (hw->phy.media_type == ixgbe_media_type_fiber_fixed) {
780 ixgbe_set_fiber_fixed_speed(hw,
781 IXGBE_LINK_SPEED_1GB_FULL);
782 } else {
783 esdp_reg &= ~IXGBE_ESDP_SDP5;
784 esdp_reg |= IXGBE_ESDP_SDP5_DIR;
785 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
786 IXGBE_WRITE_FLUSH(hw);
787 }
788
789 /* Allow module to change analog characteristics (10G->1G) */
790 msec_delay(40);
791
792 status = ixgbe_setup_mac_link_82599(hw,
793 IXGBE_LINK_SPEED_1GB_FULL,
794 autoneg_wait_to_complete);
795 if (status != IXGBE_SUCCESS)
796 return status;
797
798 /* Flap the tx laser if it has not already been done */
799 ixgbe_flap_tx_laser(hw);
800
801 /* Wait for the link partner to also set speed */
802 msec_delay(100);
803
804 /* If we have link, just jump out */
805 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
806 if (status != IXGBE_SUCCESS)
807 return status;
808
809 if (link_up)
810 goto out;
811 }
812
813 /*
814 * We didn't get link. Configure back to the highest speed we tried,
815 * (if there was more than one). We call ourselves back with just the
816 * single highest speed that the user requested.
817 */
818 if (speedcnt > 1)
819 status = ixgbe_setup_mac_link_multispeed_fiber(hw,
820 highest_link_speed, autoneg_wait_to_complete);
821
822 out:
823 /* Set autoneg_advertised value based on input link speed */
824 hw->phy.autoneg_advertised = 0;
825
826 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
827 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
828
829 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
830 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
831
832 return status;
833 }
834
835 /**
836 * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
837 * @hw: pointer to hardware structure
838 * @speed: new link speed
839 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
840 *
841 * Implements the Intel SmartSpeed algorithm.
842 **/
843 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
844 ixgbe_link_speed speed,
845 bool autoneg_wait_to_complete)
846 {
847 s32 status = IXGBE_SUCCESS;
848 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
849 s32 i, j;
850 bool link_up = FALSE;
851 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
852
853 DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
854
855 /* Set autoneg_advertised value based on input link speed */
856 hw->phy.autoneg_advertised = 0;
857
858 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
859 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
860
861 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
862 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
863
864 if (speed & IXGBE_LINK_SPEED_100_FULL)
865 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
866
867 /*
868 * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the
869 * autoneg advertisement if link is unable to be established at the
870 * highest negotiated rate. This can sometimes happen due to integrity
871 * issues with the physical media connection.
872 */
873
874 /* First, try to get link with full advertisement */
875 hw->phy.smart_speed_active = FALSE;
876 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
877 status = ixgbe_setup_mac_link_82599(hw, speed,
878 autoneg_wait_to_complete);
879 if (status != IXGBE_SUCCESS)
880 goto out;
881
882 /*
883 * Wait for the controller to acquire link. Per IEEE 802.3ap,
884 * Section 73.10.2, we may have to wait up to 500ms if KR is
885 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
886 * Table 9 in the AN MAS.
887 */
888 for (i = 0; i < 5; i++) {
889 msec_delay(100);
890
891 /* If we have link, just jump out */
892 status = ixgbe_check_link(hw, &link_speed, &link_up,
893 FALSE);
894 if (status != IXGBE_SUCCESS)
895 goto out;
896
897 if (link_up)
898 goto out;
899 }
900 }
901
902 /*
903 * We didn't get link. If we advertised KR plus one of KX4/KX
904 * (or BX4/BX), then disable KR and try again.
905 */
906 if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
907 ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
908 goto out;
909
910 /* Turn SmartSpeed on to disable KR support */
911 hw->phy.smart_speed_active = TRUE;
912 status = ixgbe_setup_mac_link_82599(hw, speed,
913 autoneg_wait_to_complete);
914 if (status != IXGBE_SUCCESS)
915 goto out;
916
917 /*
918 * Wait for the controller to acquire link. 600ms will allow for
919 * the AN link_fail_inhibit_timer as well for multiple cycles of
920 * parallel detect, both 10g and 1g. This allows for the maximum
921 * connect attempts as defined in the AN MAS table 73-7.
922 */
923 for (i = 0; i < 6; i++) {
924 msec_delay(100);
925
926 /* If we have link, just jump out */
927 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
928 if (status != IXGBE_SUCCESS)
929 goto out;
930
931 if (link_up)
932 goto out;
933 }
934
935 /* We didn't get link. Turn SmartSpeed back off. */
936 hw->phy.smart_speed_active = FALSE;
937 status = ixgbe_setup_mac_link_82599(hw, speed,
938 autoneg_wait_to_complete);
939
940 out:
941 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
942 DEBUGOUT("Smartspeed has downgraded the link speed "
943 "from the maximum advertised\n");
944 return status;
945 }
946
947 /**
948 * ixgbe_setup_mac_link_82599 - Set MAC link speed
949 * @hw: pointer to hardware structure
950 * @speed: new link speed
951 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
952 *
953 * Set the link speed in the AUTOC register and restarts link.
954 **/
955 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
956 ixgbe_link_speed speed,
957 bool autoneg_wait_to_complete)
958 {
959 bool autoneg = FALSE;
960 s32 status = IXGBE_SUCCESS;
961 u32 autoc, pma_pmd_1g, link_mode, start_autoc;
962 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
963 u32 orig_autoc = 0;
964 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
965 u32 links_reg;
966 u32 i;
967 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
968 bool got_lock = FALSE;
969
970 DEBUGFUNC("ixgbe_setup_mac_link_82599");
971
972 /* Check to see if speed passed in is supported. */
973 status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
974 if (status)
975 goto out;
976
977 speed &= link_capabilities;
978
979 if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
980 status = IXGBE_ERR_LINK_SETUP;
981 goto out;
982 }
983
984 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
985 if (hw->mac.orig_link_settings_stored)
986 autoc = hw->mac.orig_autoc;
987 else
988 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
989
990 orig_autoc = autoc;
991 start_autoc = hw->mac.cached_autoc;
992 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
993 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
994
995 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
996 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
997 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
998 /* Set KX4/KX/KR support according to speed requested */
999 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
1000 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
1001 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
1002 autoc |= IXGBE_AUTOC_KX4_SUPP;
1003 if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
1004 (hw->phy.smart_speed_active == FALSE))
1005 autoc |= IXGBE_AUTOC_KR_SUPP;
1006 }
1007 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
1008 autoc |= IXGBE_AUTOC_KX_SUPP;
1009 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
1010 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
1011 link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
1012 /* Switch from 1G SFI to 10G SFI if requested */
1013 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
1014 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
1015 autoc &= ~IXGBE_AUTOC_LMS_MASK;
1016 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
1017 }
1018 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
1019 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
1020 /* Switch from 10G SFI to 1G SFI if requested */
1021 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
1022 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
1023 autoc &= ~IXGBE_AUTOC_LMS_MASK;
1024 if (autoneg)
1025 autoc |= IXGBE_AUTOC_LMS_1G_AN;
1026 else
1027 autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
1028 }
1029 }
1030
1031 if (autoc != start_autoc) {
1032 /* Need SW/FW semaphore around AUTOC writes if LESM is on,
1033 * likewise reset_pipeline requires us to hold this lock as
1034 * it also writes to AUTOC.
1035 */
1036 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
1037 status = hw->mac.ops.acquire_swfw_sync(hw,
1038 IXGBE_GSSR_MAC_CSR_SM);
1039 if (status != IXGBE_SUCCESS) {
1040 status = IXGBE_ERR_SWFW_SYNC;
1041 goto out;
1042 }
1043
1044 got_lock = TRUE;
1045 }
1046
1047 /* Restart link */
1048 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
1049 hw->mac.cached_autoc = autoc;
1050 (void) ixgbe_reset_pipeline_82599(hw);
1051
1052 if (got_lock) {
1053 hw->mac.ops.release_swfw_sync(hw,
1054 IXGBE_GSSR_MAC_CSR_SM);
1055 got_lock = FALSE;
1056 }
1057
1058 /* Only poll for autoneg to complete if specified to do so */
1059 if (autoneg_wait_to_complete) {
1060 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
1061 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
1062 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
1063 links_reg = 0; /*Just in case Autoneg time=0*/
1064 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
1065 links_reg =
1066 IXGBE_READ_REG(hw, IXGBE_LINKS);
1067 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
1068 break;
1069 msec_delay(100);
1070 }
1071 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
1072 status =
1073 IXGBE_ERR_AUTONEG_NOT_COMPLETE;
1074 DEBUGOUT("Autoneg did not complete.\n");
1075 }
1076 }
1077 }
1078
1079 /* Add delay to filter out noises during initial link setup */
1080 msec_delay(50);
1081 }
1082
1083 out:
1084 return status;
1085 }
1086
1087 /**
1088 * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
1089 * @hw: pointer to hardware structure
1090 * @speed: new link speed
1091 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete
1092 *
1093 * Restarts link on PHY and MAC based on settings passed in.
1094 **/
1095 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
1096 ixgbe_link_speed speed,
1097 bool autoneg_wait_to_complete)
1098 {
1099 s32 status;
1100
1101 DEBUGFUNC("ixgbe_setup_copper_link_82599");
1102
1103 /* Setup the PHY according to input speed */
1104 status = hw->phy.ops.setup_link_speed(hw, speed,
1105 autoneg_wait_to_complete);
1106 /* Set up MAC */
1107 (void) ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
1108
1109 return status;
1110 }
1111
1112 /**
1113 * ixgbe_reset_hw_82599 - Perform hardware reset
1114 * @hw: pointer to hardware structure
1115 *
1116 * Resets the hardware by resetting the transmit and receive units, masks
1117 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
1118 * reset.
1119 **/
1120 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
1121 {
1122 ixgbe_link_speed link_speed;
1123 s32 status;
1124 u32 ctrl, i, autoc, autoc2;
1125 bool link_up = FALSE;
1126
1127 DEBUGFUNC("ixgbe_reset_hw_82599");
1128
1129 /* Call adapter stop to disable tx/rx and clear interrupts */
1130 status = hw->mac.ops.stop_adapter(hw);
1131 if (status != IXGBE_SUCCESS)
1132 goto reset_hw_out;
1133
1134 /* flush pending Tx transactions */
1135 ixgbe_clear_tx_pending(hw);
1136
1137 /* PHY ops must be identified and initialized prior to reset */
1138
1139 /* Identify PHY and related function pointers */
1140 status = hw->phy.ops.init(hw);
1141
1142 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1143 goto reset_hw_out;
1144
1145 /* Setup SFP module if there is one present. */
1146 if (hw->phy.sfp_setup_needed) {
1147 status = hw->mac.ops.setup_sfp(hw);
1148 hw->phy.sfp_setup_needed = FALSE;
1149 }
1150
1151 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1152 goto reset_hw_out;
1153
1154 /* Reset PHY */
1155 if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
1156 hw->phy.ops.reset(hw);
1157
1158 mac_reset_top:
1159 /*
1160 * Issue global reset to the MAC. Needs to be SW reset if link is up.
1161 * If link reset is used when link is up, it might reset the PHY when
1162 * mng is using it. If link is down or the flag to force full link
1163 * reset is set, then perform link reset.
1164 */
1165 ctrl = IXGBE_CTRL_LNK_RST;
1166 if (!hw->force_full_reset) {
1167 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
1168 if (link_up)
1169 ctrl = IXGBE_CTRL_RST;
1170 }
1171
1172 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
1173 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
1174 IXGBE_WRITE_FLUSH(hw);
1175
1176 /* Poll for reset bit to self-clear indicating reset is complete */
1177 for (i = 0; i < 10; i++) {
1178 usec_delay(1);
1179 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
1180 if (!(ctrl & IXGBE_CTRL_RST_MASK))
1181 break;
1182 }
1183
1184 if (ctrl & IXGBE_CTRL_RST_MASK) {
1185 status = IXGBE_ERR_RESET_FAILED;
1186 DEBUGOUT("Reset polling failed to complete.\n");
1187 }
1188
1189 msec_delay(50);
1190
1191 /*
1192 * Double resets are required for recovery from certain error
1193 * conditions. Between resets, it is necessary to stall to allow time
1194 * for any pending HW events to complete.
1195 */
1196 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
1197 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
1198 goto mac_reset_top;
1199 }
1200
1201 /*
1202 * Store the original AUTOC/AUTOC2 values if they have not been
1203 * stored off yet. Otherwise restore the stored original
1204 * values since the reset operation sets back to defaults.
1205 */
1206 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1207 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1208
1209 /* Enable link if disabled in NVM */
1210 if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
1211 autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
1212 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1213 IXGBE_WRITE_FLUSH(hw);
1214 }
1215
1216 if (hw->mac.orig_link_settings_stored == FALSE) {
1217 hw->mac.orig_autoc = autoc;
1218 hw->mac.orig_autoc2 = autoc2;
1219 hw->mac.cached_autoc = autoc;
1220 hw->mac.orig_link_settings_stored = TRUE;
1221 } else {
1222 if (autoc != hw->mac.orig_autoc) {
1223 /* Need SW/FW semaphore around AUTOC writes if LESM is
1224 * on, likewise reset_pipeline requires us to hold
1225 * this lock as it also writes to AUTOC.
1226 */
1227 bool got_lock = FALSE;
1228 if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
1229 status = hw->mac.ops.acquire_swfw_sync(hw,
1230 IXGBE_GSSR_MAC_CSR_SM);
1231 if (status != IXGBE_SUCCESS) {
1232 status = IXGBE_ERR_SWFW_SYNC;
1233 goto reset_hw_out;
1234 }
1235
1236 got_lock = TRUE;
1237 }
1238
1239 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
1240 hw->mac.cached_autoc = hw->mac.orig_autoc;
1241 (void) ixgbe_reset_pipeline_82599(hw);
1242
1243 if (got_lock)
1244 hw->mac.ops.release_swfw_sync(hw,
1245 IXGBE_GSSR_MAC_CSR_SM);
1246 }
1247
1248 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
1249 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
1250 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
1251 autoc2 |= (hw->mac.orig_autoc2 &
1252 IXGBE_AUTOC2_UPPER_MASK);
1253 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1254 }
1255 }
1256
1257 /* Store the permanent mac address */
1258 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
1259
1260 /*
1261 * Store MAC address from RAR0, clear receive address registers, and
1262 * clear the multicast table. Also reset num_rar_entries to 128,
1263 * since we modify this value when programming the SAN MAC address.
1264 */
1265 hw->mac.num_rar_entries = 128;
1266 hw->mac.ops.init_rx_addrs(hw);
1267
1268 /* Store the permanent SAN mac address */
1269 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
1270
1271 /* Add the SAN MAC address to the RAR only if it's a valid address */
1272 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
1273 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
1274 hw->mac.san_addr, 0, IXGBE_RAH_AV);
1275
1276 /* Save the SAN MAC RAR index */
1277 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
1278
1279 /* Reserve the last RAR for the SAN MAC address */
1280 hw->mac.num_rar_entries--;
1281 }
1282
1283 /* Store the alternative WWNN/WWPN prefix */
1284 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1285 &hw->mac.wwpn_prefix);
1286
1287 reset_hw_out:
1288 return status;
1289 }
1290
1291 /**
1292 * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
1293 * @hw: pointer to hardware structure
1294 **/
1295 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1296 {
1297 int i;
1298 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1299 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
1300
1301 DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
1302
1303 /*
1304 * Before starting reinitialization process,
1305 * FDIRCMD.CMD must be zero.
1306 */
1307 for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
1308 if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1309 IXGBE_FDIRCMD_CMD_MASK))
1310 break;
1311 usec_delay(10);
1312 }
1313 if (i >= IXGBE_FDIRCMD_CMD_POLL) {
1314 DEBUGOUT("Flow Director previous command isn't complete, "
1315 "aborting table re-initialization.\n");
1316 return IXGBE_ERR_FDIR_REINIT_FAILED;
1317 }
1318
1319 IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
1320 IXGBE_WRITE_FLUSH(hw);
1321 /*
1322 * 82599 adapters flow director init flow cannot be restarted,
1323 * Workaround 82599 silicon errata by performing the following steps
1324 * before re-writing the FDIRCTRL control register with the same value.
1325 * - write 1 to bit 8 of FDIRCMD register &
1326 * - write 0 to bit 8 of FDIRCMD register
1327 */
1328 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1329 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1330 IXGBE_FDIRCMD_CLEARHT));
1331 IXGBE_WRITE_FLUSH(hw);
1332 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1333 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1334 ~IXGBE_FDIRCMD_CLEARHT));
1335 IXGBE_WRITE_FLUSH(hw);
1336 /*
1337 * Clear FDIR Hash register to clear any leftover hashes
1338 * waiting to be programmed.
1339 */
1340 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
1341 IXGBE_WRITE_FLUSH(hw);
1342
1343 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1344 IXGBE_WRITE_FLUSH(hw);
1345
1346 /* Poll init-done after we write FDIRCTRL register */
1347 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1348 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1349 IXGBE_FDIRCTRL_INIT_DONE)
1350 break;
1351 msec_delay(1);
1352 }
1353 if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1354 DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1355 return IXGBE_ERR_FDIR_REINIT_FAILED;
1356 }
1357
1358 /* Clear FDIR statistics registers (read to clear) */
1359 (void) IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1360 (void) IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
1361 (void) IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1362 (void) IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1363 (void) IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1364
1365 return IXGBE_SUCCESS;
1366 }
1367
1368 /**
1369 * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
1370 * @hw: pointer to hardware structure
1371 * @fdirctrl: value to write to flow director control register
1372 **/
1373 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1374 {
1375 int i;
1376
1377 DEBUGFUNC("ixgbe_fdir_enable_82599");
1378
1379 /* Prime the keys for hashing */
1380 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1381 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
1382
1383 /*
1384 * Poll init-done after we write the register. Estimated times:
1385 * 10G: PBALLOC = 11b, timing is 60us
1386 * 1G: PBALLOC = 11b, timing is 600us
1387 * 100M: PBALLOC = 11b, timing is 6ms
1388 *
1389 * Multiple these timings by 4 if under full Rx load
1390 *
1391 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1392 * 1 msec per poll time. If we're at line rate and drop to 100M, then
1393 * this might not finish in our poll time, but we can live with that
1394 * for now.
1395 */
1396 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1397 IXGBE_WRITE_FLUSH(hw);
1398 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1399 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1400 IXGBE_FDIRCTRL_INIT_DONE)
1401 break;
1402 msec_delay(1);
1403 }
1404
1405 if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1406 DEBUGOUT("Flow Director poll time exceeded!\n");
1407 }
1408
1409 /**
1410 * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
1411 * @hw: pointer to hardware structure
1412 * @fdirctrl: value to write to flow director control register, initially
1413 * contains just the value of the Rx packet buffer allocation
1414 **/
1415 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1416 {
1417 DEBUGFUNC("ixgbe_init_fdir_signature_82599");
1418
1419 /*
1420 * Continue setup of fdirctrl register bits:
1421 * Move the flexible bytes to use the ethertype - shift 6 words
1422 * Set the maximum length per hash bucket to 0xA filters
1423 * Send interrupt when 64 filters are left
1424 */
1425 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1426 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1427 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1428
1429 /* write hashes and fdirctrl register, poll for completion */
1430 ixgbe_fdir_enable_82599(hw, fdirctrl);
1431
1432 return IXGBE_SUCCESS;
1433 }
1434
1435 /**
1436 * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
1437 * @hw: pointer to hardware structure
1438 * @fdirctrl: value to write to flow director control register, initially
1439 * contains just the value of the Rx packet buffer allocation
1440 **/
1441 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1442 {
1443 DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
1444
1445 /*
1446 * Continue setup of fdirctrl register bits:
1447 * Turn perfect match filtering on
1448 * Report hash in RSS field of Rx wb descriptor
1449 * Initialize the drop queue
1450 * Move the flexible bytes to use the ethertype - shift 6 words
1451 * Set the maximum length per hash bucket to 0xA filters
1452 * Send interrupt when 64 (0x4 * 16) filters are left
1453 */
1454 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
1455 IXGBE_FDIRCTRL_REPORT_STATUS |
1456 (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
1457 (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1458 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1459 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1460
1461 /* write hashes and fdirctrl register, poll for completion */
1462 ixgbe_fdir_enable_82599(hw, fdirctrl);
1463
1464 return IXGBE_SUCCESS;
1465 }
1466
1467 /*
1468 * These defines allow us to quickly generate all of the necessary instructions
1469 * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
1470 * for values 0 through 15
1471 */
1472 #define IXGBE_ATR_COMMON_HASH_KEY \
1473 (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
1474 #ifdef lint
1475 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n)
1476 #else
1477 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
1478 do { \
1479 u32 n = (_n); \
1480 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
1481 common_hash ^= lo_hash_dword >> n; \
1482 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1483 bucket_hash ^= lo_hash_dword >> n; \
1484 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
1485 sig_hash ^= lo_hash_dword << (16 - n); \
1486 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
1487 common_hash ^= hi_hash_dword >> n; \
1488 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1489 bucket_hash ^= hi_hash_dword >> n; \
1490 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
1491 sig_hash ^= hi_hash_dword << (16 - n); \
1492 } while (0);
1493 #endif
1494
1495 /**
1496 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
1497 * @stream: input bitstream to compute the hash on
1498 *
1499 * This function is almost identical to the function above but contains
1500 * several optomizations such as unwinding all of the loops, letting the
1501 * compiler work out all of the conditional ifs since the keys are static
1502 * defines, and computing two keys at once since the hashed dword stream
1503 * will be the same for both keys.
1504 **/
1505 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
1506 union ixgbe_atr_hash_dword common)
1507 {
1508 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1509 u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
1510
1511 /* record the flow_vm_vlan bits as they are a key part to the hash */
1512 flow_vm_vlan = IXGBE_NTOHL(input.dword);
1513
1514 /* generate common hash dword */
1515 hi_hash_dword = IXGBE_NTOHL(common.dword);
1516
1517 /* low dword is word swapped version of common */
1518 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1519
1520 /* apply flow ID/VM pool/VLAN ID bits to hash words */
1521 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1522
1523 /* Process bits 0 and 16 */
1524 IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
1525
1526 /*
1527 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1528 * delay this because bit 0 of the stream should not be processed
1529 * so we do not add the vlan until after bit 0 was processed
1530 */
1531 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1532
1533 /* Process remaining 30 bit of the key */
1534 IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
1535 IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
1536 IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
1537 IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
1538 IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
1539 IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
1540 IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
1541 IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
1542 IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
1543 IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
1544 IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
1545 IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
1546 IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
1547 IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
1548 IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
1549
1550 /* combine common_hash result with signature and bucket hashes */
1551 bucket_hash ^= common_hash;
1552 bucket_hash &= IXGBE_ATR_HASH_MASK;
1553
1554 sig_hash ^= common_hash << 16;
1555 sig_hash &= IXGBE_ATR_HASH_MASK << 16;
1556
1557 /* return completed signature hash */
1558 return sig_hash ^ bucket_hash;
1559 }
1560
1561 /**
1562 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
1563 * @hw: pointer to hardware structure
1564 * @input: unique input dword
1565 * @common: compressed common input dword
1566 * @queue: queue index to direct traffic to
1567 **/
1568 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1569 union ixgbe_atr_hash_dword input,
1570 union ixgbe_atr_hash_dword common,
1571 u8 queue)
1572 {
1573 u64 fdirhashcmd;
1574 u32 fdircmd;
1575
1576 DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
1577
1578 /*
1579 * Get the flow_type in order to program FDIRCMD properly
1580 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
1581 */
1582 switch (input.formatted.flow_type) {
1583 case IXGBE_ATR_FLOW_TYPE_TCPV4:
1584 case IXGBE_ATR_FLOW_TYPE_UDPV4:
1585 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1586 case IXGBE_ATR_FLOW_TYPE_TCPV6:
1587 case IXGBE_ATR_FLOW_TYPE_UDPV6:
1588 case IXGBE_ATR_FLOW_TYPE_SCTPV6:
1589 break;
1590 default:
1591 DEBUGOUT(" Error on flow type input\n");
1592 return IXGBE_ERR_CONFIG;
1593 }
1594
1595 /* configure FDIRCMD register */
1596 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1597 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1598 fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1599 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1600
1601 /*
1602 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1603 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
1604 */
1605 fdirhashcmd = (u64)fdircmd << 32;
1606 fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
1607 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1608
1609 DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
1610
1611 return IXGBE_SUCCESS;
1612 }
1613
1614 #ifdef lint
1615 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n)
1616 #else
1617 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
1618 do { \
1619 u32 n = (_n); \
1620 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1621 bucket_hash ^= lo_hash_dword >> n; \
1622 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1623 bucket_hash ^= hi_hash_dword >> n; \
1624 } while (0);
1625 #endif
1626 /**
1627 * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
1628 * @atr_input: input bitstream to compute the hash on
1629 * @input_mask: mask for the input bitstream
1630 *
1631 * This function serves two main purposes. First it applys the input_mask
1632 * to the atr_input resulting in a cleaned up atr_input data stream.
1633 * Secondly it computes the hash and stores it in the bkt_hash field at
1634 * the end of the input byte stream. This way it will be available for
1635 * future use without needing to recompute the hash.
1636 **/
1637 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
1638 union ixgbe_atr_input *input_mask)
1639 {
1640
1641 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1642 u32 bucket_hash = 0;
1643
1644 /* Apply masks to input data */
1645 input->dword_stream[0] &= input_mask->dword_stream[0];
1646 input->dword_stream[1] &= input_mask->dword_stream[1];
1647 input->dword_stream[2] &= input_mask->dword_stream[2];
1648 input->dword_stream[3] &= input_mask->dword_stream[3];
1649 input->dword_stream[4] &= input_mask->dword_stream[4];
1650 input->dword_stream[5] &= input_mask->dword_stream[5];
1651 input->dword_stream[6] &= input_mask->dword_stream[6];
1652 input->dword_stream[7] &= input_mask->dword_stream[7];
1653 input->dword_stream[8] &= input_mask->dword_stream[8];
1654 input->dword_stream[9] &= input_mask->dword_stream[9];
1655 input->dword_stream[10] &= input_mask->dword_stream[10];
1656
1657 /* record the flow_vm_vlan bits as they are a key part to the hash */
1658 flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
1659
1660 /* generate common hash dword */
1661 hi_hash_dword = IXGBE_NTOHL(input->dword_stream[1] ^
1662 input->dword_stream[2] ^
1663 input->dword_stream[3] ^
1664 input->dword_stream[4] ^
1665 input->dword_stream[5] ^
1666 input->dword_stream[6] ^
1667 input->dword_stream[7] ^
1668 input->dword_stream[8] ^
1669 input->dword_stream[9] ^
1670 input->dword_stream[10]);
1671
1672 /* low dword is word swapped version of common */
1673 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1674
1675 /* apply flow ID/VM pool/VLAN ID bits to hash words */
1676 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1677
1678 /* Process bits 0 and 16 */
1679 IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
1680
1681 /*
1682 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1683 * delay this because bit 0 of the stream should not be processed
1684 * so we do not add the vlan until after bit 0 was processed
1685 */
1686 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1687
1688 /* Process remaining 30 bit of the key */
1689 IXGBE_COMPUTE_BKT_HASH_ITERATION(1);
1690 IXGBE_COMPUTE_BKT_HASH_ITERATION(2);
1691 IXGBE_COMPUTE_BKT_HASH_ITERATION(3);
1692 IXGBE_COMPUTE_BKT_HASH_ITERATION(4);
1693 IXGBE_COMPUTE_BKT_HASH_ITERATION(5);
1694 IXGBE_COMPUTE_BKT_HASH_ITERATION(6);
1695 IXGBE_COMPUTE_BKT_HASH_ITERATION(7);
1696 IXGBE_COMPUTE_BKT_HASH_ITERATION(8);
1697 IXGBE_COMPUTE_BKT_HASH_ITERATION(9);
1698 IXGBE_COMPUTE_BKT_HASH_ITERATION(10);
1699 IXGBE_COMPUTE_BKT_HASH_ITERATION(11);
1700 IXGBE_COMPUTE_BKT_HASH_ITERATION(12);
1701 IXGBE_COMPUTE_BKT_HASH_ITERATION(13);
1702 IXGBE_COMPUTE_BKT_HASH_ITERATION(14);
1703 IXGBE_COMPUTE_BKT_HASH_ITERATION(15);
1704
1705 /*
1706 * Limit hash to 13 bits since max bucket count is 8K.
1707 * Store result at the end of the input stream.
1708 */
1709 input->formatted.bkt_hash = bucket_hash & 0x1FFF;
1710 }
1711
1712 /**
1713 * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
1714 * @input_mask: mask to be bit swapped
1715 *
1716 * The source and destination port masks for flow director are bit swapped
1717 * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to
1718 * generate a correctly swapped value we need to bit swap the mask and that
1719 * is what is accomplished by this function.
1720 **/
1721 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
1722 {
1723 u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
1724 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
1725 mask |= IXGBE_NTOHS(input_mask->formatted.src_port);
1726 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
1727 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
1728 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
1729 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
1730 }
1731
1732 /*
1733 * These two macros are meant to address the fact that we have registers
1734 * that are either all or in part big-endian. As a result on big-endian
1735 * systems we will end up byte swapping the value to little-endian before
1736 * it is byte swapped again and written to the hardware in the original
1737 * big-endian format.
1738 */
1739 #define IXGBE_STORE_AS_BE32(_value) \
1740 (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
1741 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
1742
1743 #define IXGBE_WRITE_REG_BE32(a, reg, value) \
1744 IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
1745
1746 #define IXGBE_STORE_AS_BE16(_value) \
1747 IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
1748
1749 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
1750 union ixgbe_atr_input *input_mask)
1751 {
1752 /* mask IPv6 since it is currently not supported */
1753 u32 fdirm = IXGBE_FDIRM_DIPv6;
1754 u32 fdirtcpm;
1755
1756 DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
1757
1758 /*
1759 * Program the relevant mask registers. If src/dst_port or src/dst_addr
1760 * are zero, then assume a full mask for that field. Also assume that
1761 * a VLAN of 0 is unspecified, so mask that out as well. L4type
1762 * cannot be masked out in this implementation.
1763 *
1764 * This also assumes IPv4 only. IPv6 masking isn't supported at this
1765 * point in time.
1766 */
1767
1768 /* verify bucket hash is cleared on hash generation */
1769 if (input_mask->formatted.bkt_hash)
1770 DEBUGOUT(" bucket hash should always be 0 in mask\n");
1771
1772 /* Program FDIRM and verify partial masks */
1773 switch (input_mask->formatted.vm_pool & 0x7F) {
1774 case 0x0:
1775 fdirm |= IXGBE_FDIRM_POOL;
1776 /* FALLTHRU */
1777 case 0x7F:
1778 break;
1779 default:
1780 DEBUGOUT(" Error on vm pool mask\n");
1781 return IXGBE_ERR_CONFIG;
1782 }
1783
1784 switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
1785 case 0x0:
1786 fdirm |= IXGBE_FDIRM_L4P;
1787 if (input_mask->formatted.dst_port ||
1788 input_mask->formatted.src_port) {
1789 DEBUGOUT(" Error on src/dst port mask\n");
1790 return IXGBE_ERR_CONFIG;
1791 }
1792 /* FALLTHRU */
1793 case IXGBE_ATR_L4TYPE_MASK:
1794 break;
1795 default:
1796 DEBUGOUT(" Error on flow type mask\n");
1797 return IXGBE_ERR_CONFIG;
1798 }
1799
1800 switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
1801 case 0x0000:
1802 /* mask VLAN ID, fall through to mask VLAN priority */
1803 fdirm |= IXGBE_FDIRM_VLANID;
1804 /* FALLTHRU */
1805 case 0x0FFF:
1806 /* mask VLAN priority */
1807 fdirm |= IXGBE_FDIRM_VLANP;
1808 break;
1809 case 0xE000:
1810 /* mask VLAN ID only, fall through */
1811 fdirm |= IXGBE_FDIRM_VLANID;
1812 /* FALLTHRU */
1813 case 0xEFFF:
1814 /* no VLAN fields masked */
1815 break;
1816 default:
1817 DEBUGOUT(" Error on VLAN mask\n");
1818 return IXGBE_ERR_CONFIG;
1819 }
1820
1821 switch (input_mask->formatted.flex_bytes & 0xFFFF) {
1822 case 0x0000:
1823 /* Mask Flex Bytes, fall through */
1824 fdirm |= IXGBE_FDIRM_FLEX;
1825 /* FALLTHRU */
1826 case 0xFFFF:
1827 break;
1828 default:
1829 DEBUGOUT(" Error on flexible byte mask\n");
1830 return IXGBE_ERR_CONFIG;
1831 }
1832
1833 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
1834 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
1835
1836 /* store the TCP/UDP port masks, bit reversed from port layout */
1837 fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
1838
1839 /* write both the same so that UDP and TCP use the same mask */
1840 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
1841 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
1842
1843 /* store source and destination IP masks (big-enian) */
1844 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
1845 ~input_mask->formatted.src_ip[0]);
1846 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
1847 ~input_mask->formatted.dst_ip[0]);
1848
1849 return IXGBE_SUCCESS;
1850 }
1851
1852 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
1853 union ixgbe_atr_input *input,
1854 u16 soft_id, u8 queue)
1855 {
1856 u32 fdirport, fdirvlan, fdirhash, fdircmd;
1857
1858 DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
1859
1860 /* currently IPv6 is not supported, must be programmed with 0 */
1861 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
1862 input->formatted.src_ip[0]);
1863 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
1864 input->formatted.src_ip[1]);
1865 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
1866 input->formatted.src_ip[2]);
1867
1868 /* record the source address (big-endian) */
1869 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
1870
1871 /* record the first 32 bits of the destination address (big-endian) */
1872 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
1873
1874 /* record source and destination port (little-endian)*/
1875 fdirport = IXGBE_NTOHS(input->formatted.dst_port);
1876 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
1877 fdirport |= IXGBE_NTOHS(input->formatted.src_port);
1878 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
1879
1880 /* record vlan (little-endian) and flex_bytes(big-endian) */
1881 fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
1882 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
1883 fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
1884 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
1885
1886 /* configure FDIRHASH register */
1887 fdirhash = input->formatted.bkt_hash;
1888 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1889 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1890
1891 /*
1892 * flush all previous writes to make certain registers are
1893 * programmed prior to issuing the command
1894 */
1895 IXGBE_WRITE_FLUSH(hw);
1896
1897 /* configure FDIRCMD register */
1898 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1899 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1900 if (queue == IXGBE_FDIR_DROP_QUEUE)
1901 fdircmd |= IXGBE_FDIRCMD_DROP;
1902 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1903 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1904 fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
1905
1906 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1907
1908 return IXGBE_SUCCESS;
1909 }
1910
1911 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
1912 union ixgbe_atr_input *input,
1913 u16 soft_id)
1914 {
1915 u32 fdirhash;
1916 u32 fdircmd = 0;
1917 u32 retry_count;
1918 s32 err = IXGBE_SUCCESS;
1919
1920 /* configure FDIRHASH register */
1921 fdirhash = input->formatted.bkt_hash;
1922 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1923 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1924
1925 /* flush hash to HW */
1926 IXGBE_WRITE_FLUSH(hw);
1927
1928 /* Query if filter is present */
1929 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
1930
1931 for (retry_count = 10; retry_count; retry_count--) {
1932 /* allow 10us for query to process */
1933 usec_delay(10);
1934 /* verify query completed successfully */
1935 fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
1936 if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK))
1937 break;
1938 }
1939
1940 if (!retry_count)
1941 err = IXGBE_ERR_FDIR_REINIT_FAILED;
1942
1943 /* if filter exists in hardware then remove it */
1944 if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
1945 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1946 IXGBE_WRITE_FLUSH(hw);
1947 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1948 IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
1949 }
1950
1951 return err;
1952 }
1953
1954 /**
1955 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
1956 * @hw: pointer to hardware structure
1957 * @input: input bitstream
1958 * @input_mask: mask for the input bitstream
1959 * @soft_id: software index for the filters
1960 * @queue: queue index to direct traffic to
1961 *
1962 * Note that the caller to this function must lock before calling, since the
1963 * hardware writes must be protected from one another.
1964 **/
1965 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
1966 union ixgbe_atr_input *input,
1967 union ixgbe_atr_input *input_mask,
1968 u16 soft_id, u8 queue)
1969 {
1970 s32 err = IXGBE_ERR_CONFIG;
1971
1972 DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
1973
1974 /*
1975 * Check flow_type formatting, and bail out before we touch the hardware
1976 * if there's a configuration issue
1977 */
1978 switch (input->formatted.flow_type) {
1979 case IXGBE_ATR_FLOW_TYPE_IPV4:
1980 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
1981 if (input->formatted.dst_port || input->formatted.src_port) {
1982 DEBUGOUT(" Error on src/dst port\n");
1983 return IXGBE_ERR_CONFIG;
1984 }
1985 break;
1986 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1987 if (input->formatted.dst_port || input->formatted.src_port) {
1988 DEBUGOUT(" Error on src/dst port\n");
1989 return IXGBE_ERR_CONFIG;
1990 }
1991 /* FALLTHRU */
1992 case IXGBE_ATR_FLOW_TYPE_TCPV4:
1993 case IXGBE_ATR_FLOW_TYPE_UDPV4:
1994 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
1995 IXGBE_ATR_L4TYPE_MASK;
1996 break;
1997 default:
1998 DEBUGOUT(" Error on flow type input\n");
1999 return err;
2000 }
2001
2002 /* program input mask into the HW */
2003 err = ixgbe_fdir_set_input_mask_82599(hw, input_mask);
2004 if (err)
2005 return err;
2006
2007 /* apply mask and compute/store hash */
2008 ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
2009
2010 /* program filters to filter memory */
2011 return ixgbe_fdir_write_perfect_filter_82599(hw, input,
2012 soft_id, queue);
2013 }
2014
2015 /**
2016 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
2017 * @hw: pointer to hardware structure
2018 * @reg: analog register to read
2019 * @val: read value
2020 *
2021 * Performs read operation to Omer analog register specified.
2022 **/
2023 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
2024 {
2025 u32 core_ctl;
2026
2027 DEBUGFUNC("ixgbe_read_analog_reg8_82599");
2028
2029 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
2030 (reg << 8));
2031 IXGBE_WRITE_FLUSH(hw);
2032 usec_delay(10);
2033 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
2034 *val = (u8)core_ctl;
2035
2036 return IXGBE_SUCCESS;
2037 }
2038
2039 /**
2040 * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
2041 * @hw: pointer to hardware structure
2042 * @reg: atlas register to write
2043 * @val: value to write
2044 *
2045 * Performs write operation to Omer analog register specified.
2046 **/
2047 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
2048 {
2049 u32 core_ctl;
2050
2051 DEBUGFUNC("ixgbe_write_analog_reg8_82599");
2052
2053 core_ctl = (reg << 8) | val;
2054 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
2055 IXGBE_WRITE_FLUSH(hw);
2056 usec_delay(10);
2057
2058 return IXGBE_SUCCESS;
2059 }
2060
2061 /**
2062 * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
2063 * @hw: pointer to hardware structure
2064 *
2065 * Starts the hardware using the generic start_hw function
2066 * and the generation start_hw function.
2067 * Then performs revision-specific operations, if any.
2068 **/
2069 s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
2070 {
2071 s32 ret_val = IXGBE_SUCCESS;
2072
2073 DEBUGFUNC("ixgbe_start_hw_82599");
2074
2075 ret_val = ixgbe_start_hw_generic(hw);
2076 if (ret_val != IXGBE_SUCCESS)
2077 goto out;
2078
2079 ret_val = ixgbe_start_hw_gen2(hw);
2080 if (ret_val != IXGBE_SUCCESS)
2081 goto out;
2082
2083 /* We need to run link autotry after the driver loads */
2084 hw->mac.autotry_restart = TRUE;
2085
2086 if (ret_val == IXGBE_SUCCESS)
2087 ret_val = ixgbe_verify_fw_version_82599(hw);
2088 out:
2089 return ret_val;
2090 }
2091
2092 /**
2093 * ixgbe_identify_phy_82599 - Get physical layer module
2094 * @hw: pointer to hardware structure
2095 *
2096 * Determines the physical layer module found on the current adapter.
2097 * If PHY already detected, maintains current PHY type in hw struct,
2098 * otherwise executes the PHY detection routine.
2099 **/
2100 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
2101 {
2102 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
2103
2104 DEBUGFUNC("ixgbe_identify_phy_82599");
2105
2106 /* Detect PHY if not unknown - returns success if already detected. */
2107 status = ixgbe_identify_phy_generic(hw);
2108 if (status != IXGBE_SUCCESS) {
2109 /* 82599 10GBASE-T requires an external PHY */
2110 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
2111 goto out;
2112 else
2113 status = ixgbe_identify_module_generic(hw);
2114 }
2115
2116 /* Set PHY type none if no PHY detected */
2117 if (hw->phy.type == ixgbe_phy_unknown) {
2118 hw->phy.type = ixgbe_phy_none;
2119 status = IXGBE_SUCCESS;
2120 }
2121
2122 /* Return error if SFP module has been detected but is not supported */
2123 if (hw->phy.type == ixgbe_phy_sfp_unsupported)
2124 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
2125
2126 out:
2127 return status;
2128 }
2129
2130 /**
2131 * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
2132 * @hw: pointer to hardware structure
2133 *
2134 * Determines physical layer capabilities of the current configuration.
2135 **/
2136 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
2137 {
2138 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
2139 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2140 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2141 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
2142 u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
2143 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
2144 u16 ext_ability = 0;
2145 u8 comp_codes_10g = 0;
2146 u8 comp_codes_1g = 0;
2147
2148 DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
2149
2150 hw->phy.ops.identify(hw);
2151
2152 switch (hw->phy.type) {
2153 case ixgbe_phy_tn:
2154 case ixgbe_phy_cu_unknown:
2155 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
2156 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
2157 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
2158 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
2159 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
2160 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
2161 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
2162 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
2163 goto out;
2164 default:
2165 break;
2166 }
2167
2168 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
2169 case IXGBE_AUTOC_LMS_1G_AN:
2170 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
2171 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
2172 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
2173 IXGBE_PHYSICAL_LAYER_1000BASE_BX;
2174 goto out;
2175 }
2176 /* SFI mode so read SFP module */
2177 goto sfp_check;
2178 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
2179 if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
2180 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
2181 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
2182 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2183 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
2184 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
2185 goto out;
2186 case IXGBE_AUTOC_LMS_10G_SERIAL:
2187 if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
2188 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2189 goto out;
2190 } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
2191 goto sfp_check;
2192 break;
2193 case IXGBE_AUTOC_LMS_KX4_KX_KR:
2194 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
2195 if (autoc & IXGBE_AUTOC_KX_SUPP)
2196 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
2197 if (autoc & IXGBE_AUTOC_KX4_SUPP)
2198 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2199 if (autoc & IXGBE_AUTOC_KR_SUPP)
2200 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2201 goto out;
2202 default:
2203 goto out;
2204 }
2205
2206 sfp_check:
2207 /* SFP check must be done last since DA modules are sometimes used to
2208 * test KR mode - we need to id KR mode correctly before SFP module.
2209 * Call identify_sfp because the pluggable module may have changed */
2210 hw->phy.ops.identify_sfp(hw);
2211 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
2212 goto out;
2213
2214 switch (hw->phy.type) {
2215 case ixgbe_phy_sfp_passive_tyco:
2216 case ixgbe_phy_sfp_passive_unknown:
2217 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
2218 break;
2219 case ixgbe_phy_sfp_ftl_active:
2220 case ixgbe_phy_sfp_active_unknown:
2221 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
2222 break;
2223 case ixgbe_phy_sfp_avago:
2224 case ixgbe_phy_sfp_ftl:
2225 case ixgbe_phy_sfp_intel:
2226 case ixgbe_phy_sfp_unknown:
2227 hw->phy.ops.read_i2c_eeprom(hw,
2228 IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g);
2229 hw->phy.ops.read_i2c_eeprom(hw,
2230 IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
2231 if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
2232 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
2233 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
2234 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
2235 else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
2236 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
2237 else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE)
2238 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX;
2239 break;
2240 default:
2241 break;
2242 }
2243
2244 out:
2245 return physical_layer;
2246 }
2247
2248 /**
2249 * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
2250 * @hw: pointer to hardware structure
2251 * @regval: register value to write to RXCTRL
2252 *
2253 * Enables the Rx DMA unit for 82599
2254 **/
2255 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
2256 {
2257
2258 DEBUGFUNC("ixgbe_enable_rx_dma_82599");
2259
2260 /*
2261 * Workaround for 82599 silicon errata when enabling the Rx datapath.
2262 * If traffic is incoming before we enable the Rx unit, it could hang
2263 * the Rx DMA unit. Therefore, make sure the security engine is
2264 * completely disabled prior to enabling the Rx unit.
2265 */
2266
2267 hw->mac.ops.disable_sec_rx_path(hw);
2268
2269 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
2270
2271 hw->mac.ops.enable_sec_rx_path(hw);
2272
2273 return IXGBE_SUCCESS;
2274 }
2275
2276 /**
2277 * ixgbe_verify_fw_version_82599 - verify fw version for 82599
2278 * @hw: pointer to hardware structure
2279 *
2280 * Verifies that installed the firmware version is 0.6 or higher
2281 * for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
2282 *
2283 * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
2284 * if the FW version is not supported.
2285 **/
2286 s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2287 {
2288 s32 status = IXGBE_ERR_EEPROM_VERSION;
2289 u16 fw_offset, fw_ptp_cfg_offset;
2290 u16 fw_version = 0;
2291
2292 DEBUGFUNC("ixgbe_verify_fw_version_82599");
2293
2294 /* firmware check is only necessary for SFI devices */
2295 if (hw->phy.media_type != ixgbe_media_type_fiber) {
2296 status = IXGBE_SUCCESS;
2297 goto fw_version_out;
2298 }
2299
2300 /* get the offset to the Firmware Module block */
2301 hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2302
2303 if ((fw_offset == 0) || (fw_offset == 0xFFFF))
2304 goto fw_version_out;
2305
2306 /* get the offset to the Pass Through Patch Configuration block */
2307 hw->eeprom.ops.read(hw, (fw_offset +
2308 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
2309 &fw_ptp_cfg_offset);
2310
2311 if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
2312 goto fw_version_out;
2313
2314 /* get the firmware version */
2315 hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
2316 IXGBE_FW_PATCH_VERSION_4), &fw_version);
2317
2318 if (fw_version > 0x5)
2319 status = IXGBE_SUCCESS;
2320
2321 fw_version_out:
2322 return status;
2323 }
2324
2325 /**
2326 * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
2327 * @hw: pointer to hardware structure
2328 *
2329 * Returns TRUE if the LESM FW module is present and enabled. Otherwise
2330 * returns FALSE. Smart Speed must be disabled if LESM FW module is enabled.
2331 **/
2332 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
2333 {
2334 bool lesm_enabled = FALSE;
2335 u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
2336 s32 status;
2337
2338 DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599");
2339
2340 /* get the offset to the Firmware Module block */
2341 status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2342
2343 if ((status != IXGBE_SUCCESS) ||
2344 (fw_offset == 0) || (fw_offset == 0xFFFF))
2345 goto out;
2346
2347 /* get the offset to the LESM Parameters block */
2348 status = hw->eeprom.ops.read(hw, (fw_offset +
2349 IXGBE_FW_LESM_PARAMETERS_PTR),
2350 &fw_lesm_param_offset);
2351
2352 if ((status != IXGBE_SUCCESS) ||
2353 (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
2354 goto out;
2355
2356 /* get the lesm state word */
2357 status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
2358 IXGBE_FW_LESM_STATE_1),
2359 &fw_lesm_state);
2360
2361 if ((status == IXGBE_SUCCESS) &&
2362 (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
2363 lesm_enabled = TRUE;
2364
2365 out:
2366 return lesm_enabled;
2367 }
2368
2369 /**
2370 * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
2371 * fastest available method
2372 *
2373 * @hw: pointer to hardware structure
2374 * @offset: offset of word in EEPROM to read
2375 * @words: number of words
2376 * @data: word(s) read from the EEPROM
2377 *
2378 * Retrieves 16 bit word(s) read from EEPROM
2379 **/
2380 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
2381 u16 words, u16 *data)
2382 {
2383 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2384 s32 ret_val = IXGBE_ERR_CONFIG;
2385
2386 DEBUGFUNC("ixgbe_read_eeprom_buffer_82599");
2387
2388 /*
2389 * If EEPROM is detected and can be addressed using 14 bits,
2390 * use EERD otherwise use bit bang
2391 */
2392 if ((eeprom->type == ixgbe_eeprom_spi) &&
2393 (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
2394 ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
2395 data);
2396 else
2397 ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
2398 words,
2399 data);
2400
2401 return ret_val;
2402 }
2403
2404 /**
2405 * ixgbe_read_eeprom_82599 - Read EEPROM word using
2406 * fastest available method
2407 *
2408 * @hw: pointer to hardware structure
2409 * @offset: offset of word in the EEPROM to read
2410 * @data: word read from the EEPROM
2411 *
2412 * Reads a 16 bit word from the EEPROM
2413 **/
2414 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
2415 u16 offset, u16 *data)
2416 {
2417 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2418 s32 ret_val = IXGBE_ERR_CONFIG;
2419
2420 DEBUGFUNC("ixgbe_read_eeprom_82599");
2421
2422 /*
2423 * If EEPROM is detected and can be addressed using 14 bits,
2424 * use EERD otherwise use bit bang
2425 */
2426 if ((eeprom->type == ixgbe_eeprom_spi) &&
2427 (offset <= IXGBE_EERD_MAX_ADDR))
2428 ret_val = ixgbe_read_eerd_generic(hw, offset, data);
2429 else
2430 ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
2431
2432 return ret_val;
2433 }
2434
2435 /**
2436 * ixgbe_reset_pipeline_82599 - perform pipeline reset
2437 *
2438 * @hw: pointer to hardware structure
2439 *
2440 * Reset pipeline by asserting Restart_AN together with LMS change to ensure
2441 * full pipeline reset
2442 **/
2443 s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
2444 {
2445 s32 ret_val;
2446 u32 anlp1_reg = 0;
2447 u32 i, autoc_reg, autoc2_reg;
2448
2449 /* Enable link if disabled in NVM */
2450 autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2451 if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
2452 autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
2453 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
2454 IXGBE_WRITE_FLUSH(hw);
2455 }
2456
2457 autoc_reg = hw->mac.cached_autoc;
2458 autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2459 /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
2460 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ IXGBE_AUTOC_LMS_1G_AN);
2461 /* Wait for AN to leave state 0 */
2462 for (i = 0; i < 10; i++) {
2463 msec_delay(4);
2464 anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2465 if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
2466 break;
2467 }
2468
2469 if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
2470 DEBUGOUT("auto negotiation not completed\n");
2471 ret_val = IXGBE_ERR_RESET_FAILED;
2472 goto reset_pipeline_out;
2473 }
2474
2475 ret_val = IXGBE_SUCCESS;
2476
2477 reset_pipeline_out:
2478 /* Write AUTOC register with original LMS field and Restart_AN */
2479 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2480 IXGBE_WRITE_FLUSH(hw);
2481
2482 return ret_val;
2483 }
2484
2485
2486