Print this page
Import some changes from FreeBSD (details later, this is quick-n-dirty for now).
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/ixgbe/ixgbe_82599.c
+++ new/usr/src/uts/common/io/ixgbe/ixgbe_82599.c
1 1 /******************************************************************************
2 2
3 - Copyright (c) 2001-2012, Intel Corporation
3 + Copyright (c) 2001-2013, Intel Corporation
4 4 All rights reserved.
5 5
6 6 Redistribution and use in source and binary forms, with or without
7 7 modification, are permitted provided that the following conditions are met:
8 8
9 9 1. Redistributions of source code must retain the above copyright notice,
10 10 this list of conditions and the following disclaimer.
11 11
12 12 2. Redistributions in binary form must reproduce the above copyright
13 13 notice, this list of conditions and the following disclaimer in the
14 14 documentation and/or other materials provided with the distribution.
15 15
16 16 3. Neither the name of the Intel Corporation nor the names of its
17 17 contributors may be used to endorse or promote products derived from
18 18 this software without specific prior written permission.
19 19
20 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 30 POSSIBILITY OF SUCH DAMAGE.
31 31
32 32 ******************************************************************************/
|
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
33 33 /*$FreeBSD: src/sys/dev/ixgbe/ixgbe_82599.c,v 1.8 2012/07/05 20:51:44 jfv Exp $*/
34 34
35 35 #include "ixgbe_type.h"
36 36 #include "ixgbe_82599.h"
37 37 #include "ixgbe_api.h"
38 38 #include "ixgbe_common.h"
39 39 #include "ixgbe_phy.h"
40 40
41 41 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
42 42 ixgbe_link_speed speed,
43 - bool autoneg,
44 43 bool autoneg_wait_to_complete);
45 44 static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
46 45 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
47 46 u16 offset, u16 *data);
48 47 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
49 48 u16 words, u16 *data);
50 49
50 +static bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
51 +{
52 + u32 fwsm, manc, factps;
53 +
54 + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
55 + if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT)
56 + return FALSE;
57 +
58 + manc = IXGBE_READ_REG(hw, IXGBE_MANC);
59 + if (!(manc & IXGBE_MANC_RCV_TCO_EN))
60 + return FALSE;
61 +
62 + factps = IXGBE_READ_REG(hw, IXGBE_FACTPS);
63 + if (factps & IXGBE_FACTPS_MNGCG)
64 + return FALSE;
65 +
66 + return TRUE;
67 +}
68 +
51 69 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
52 70 {
53 71 struct ixgbe_mac_info *mac = &hw->mac;
54 72
55 73 DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
56 74
57 - /* enable the laser control functions for SFP+ fiber */
58 - if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) {
75 + /*
76 + * enable the laser control functions for SFP+ fiber
77 + * and MNG not enabled
78 + */
79 + if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
80 + !(ixgbe_mng_enabled(hw))) {
59 81 mac->ops.disable_tx_laser =
60 82 &ixgbe_disable_tx_laser_multispeed_fiber;
61 83 mac->ops.enable_tx_laser =
62 84 &ixgbe_enable_tx_laser_multispeed_fiber;
63 85 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
64 86
65 87 } else {
66 88 mac->ops.disable_tx_laser = NULL;
67 89 mac->ops.enable_tx_laser = NULL;
68 90 mac->ops.flap_tx_laser = NULL;
69 91 }
70 92
71 93 if (hw->phy.multispeed_fiber) {
72 94 /* Set up dual speed SFP+ support */
73 95 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
74 96 } else {
75 97 if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
76 98 (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
77 99 hw->phy.smart_speed == ixgbe_smart_speed_on) &&
78 100 !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
79 101 mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
80 102 } else {
81 103 mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
82 104 }
83 105 }
84 106 }
85 107
86 108 /**
87 109 * ixgbe_init_phy_ops_82599 - PHY/SFP specific init
88 110 * @hw: pointer to hardware structure
89 111 *
90 112 * Initialize any function pointers that were not able to be
91 113 * set during init_shared_code because the PHY/SFP type was
92 114 * not known. Perform the SFP init if necessary.
93 115 *
94 116 **/
95 117 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
96 118 {
97 119 struct ixgbe_mac_info *mac = &hw->mac;
98 120 struct ixgbe_phy_info *phy = &hw->phy;
99 121 s32 ret_val = IXGBE_SUCCESS;
100 122
101 123 DEBUGFUNC("ixgbe_init_phy_ops_82599");
102 124
103 125 /* Identify the PHY or SFP module */
104 126 ret_val = phy->ops.identify(hw);
105 127 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
106 128 goto init_phy_ops_out;
107 129
108 130 /* Setup function pointers based on detected SFP module and speeds */
109 131 ixgbe_init_mac_link_ops_82599(hw);
110 132 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
111 133 hw->phy.ops.reset = NULL;
112 134
113 135 /* If copper media, overwrite with copper function pointers */
114 136 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
115 137 mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
116 138 mac->ops.get_link_capabilities =
117 139 &ixgbe_get_copper_link_capabilities_generic;
118 140 }
119 141
120 142 /* Set necessary function pointers based on phy type */
121 143 switch (hw->phy.type) {
122 144 case ixgbe_phy_tn:
123 145 phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
124 146 phy->ops.check_link = &ixgbe_check_phy_link_tnx;
125 147 phy->ops.get_firmware_version =
126 148 &ixgbe_get_phy_firmware_version_tnx;
127 149 break;
|
↓ open down ↓ |
59 lines elided |
↑ open up ↑ |
128 150 default:
129 151 break;
130 152 }
131 153 init_phy_ops_out:
132 154 return ret_val;
133 155 }
134 156
135 157 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
136 158 {
137 159 s32 ret_val = IXGBE_SUCCESS;
138 - u32 reg_anlp1 = 0;
139 - u32 i = 0;
140 160 u16 list_offset, data_offset, data_value;
161 + bool got_lock = FALSE;
141 162
142 163 DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
143 164
144 165 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
145 166 ixgbe_init_mac_link_ops_82599(hw);
146 167
147 168 hw->phy.ops.reset = NULL;
148 169
149 170 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
150 171 &data_offset);
151 172 if (ret_val != IXGBE_SUCCESS)
152 173 goto setup_sfp_out;
153 174
154 175 /* PHY config will finish before releasing the semaphore */
155 176 ret_val = hw->mac.ops.acquire_swfw_sync(hw,
156 177 IXGBE_GSSR_MAC_CSR_SM);
157 178 if (ret_val != IXGBE_SUCCESS) {
158 179 ret_val = IXGBE_ERR_SWFW_SYNC;
159 180 goto setup_sfp_out;
160 181 }
161 182
162 183 hw->eeprom.ops.read(hw, ++data_offset, &data_value);
163 184 while (data_value != 0xffff) {
|
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
164 185 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
165 186 IXGBE_WRITE_FLUSH(hw);
166 187 hw->eeprom.ops.read(hw, ++data_offset, &data_value);
167 188 }
168 189
169 190 /* Release the semaphore */
170 191 hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
171 192 /* Delay obtaining semaphore again to allow FW access */
172 193 msec_delay(hw->eeprom.semaphore_delay);
173 194
174 - /* Now restart DSP by setting Restart_AN and clearing LMS */
175 - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
176 - IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
177 - IXGBE_AUTOC_AN_RESTART));
195 + /* Need SW/FW semaphore around AUTOC writes if LESM on,
196 + * likewise reset_pipeline requires lock as it also writes
197 + * AUTOC.
198 + */
199 + if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
200 + ret_val = hw->mac.ops.acquire_swfw_sync(hw,
201 + IXGBE_GSSR_MAC_CSR_SM);
202 + if (ret_val != IXGBE_SUCCESS) {
203 + ret_val = IXGBE_ERR_SWFW_SYNC;
204 + goto setup_sfp_out;
205 + }
178 206
179 - /* Wait for AN to leave state 0 */
180 - for (i = 0; i < 10; i++) {
181 - msec_delay(4);
182 - reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
183 - if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
184 - break;
207 + got_lock = TRUE;
185 208 }
186 - if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
209 +
210 + /* Restart DSP and set SFI mode */
211 + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((hw->mac.orig_autoc) |
212 + IXGBE_AUTOC_LMS_10G_SERIAL));
213 + hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
214 + ret_val = ixgbe_reset_pipeline_82599(hw);
215 +
216 + if (got_lock) {
217 + hw->mac.ops.release_swfw_sync(hw,
218 + IXGBE_GSSR_MAC_CSR_SM);
219 + got_lock = FALSE;
220 + }
221 +
222 + if (ret_val) {
187 223 DEBUGOUT("sfp module setup not complete\n");
188 224 ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
189 225 goto setup_sfp_out;
190 226 }
191 227
192 - /* Restart DSP by setting Restart_AN and return to SFI mode */
193 - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
194 - IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
195 - IXGBE_AUTOC_AN_RESTART));
196 228 }
197 229
198 230 setup_sfp_out:
199 231 return ret_val;
200 232 }
201 233
202 234 /**
203 235 * ixgbe_init_ops_82599 - Inits func ptrs and MAC type
204 236 * @hw: pointer to hardware structure
205 237 *
206 238 * Initialize the function pointers and assign the MAC type for 82599.
207 239 * Does not touch the hardware.
208 240 **/
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
209 241
210 242 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
211 243 {
212 244 struct ixgbe_mac_info *mac = &hw->mac;
213 245 struct ixgbe_phy_info *phy = &hw->phy;
214 246 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
215 247 s32 ret_val;
216 248
217 249 DEBUGFUNC("ixgbe_init_ops_82599");
218 250
219 - ret_val = ixgbe_init_phy_ops_generic(hw);
251 + (void) ixgbe_init_phy_ops_generic(hw);
220 252 ret_val = ixgbe_init_ops_generic(hw);
221 253
222 254 /* PHY */
223 255 phy->ops.identify = &ixgbe_identify_phy_82599;
224 256 phy->ops.init = &ixgbe_init_phy_ops_82599;
225 257
226 258 /* MAC */
227 259 mac->ops.reset_hw = &ixgbe_reset_hw_82599;
228 260 mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2;
229 261 mac->ops.get_media_type = &ixgbe_get_media_type_82599;
230 262 mac->ops.get_supported_physical_layer =
231 263 &ixgbe_get_supported_physical_layer_82599;
232 264 mac->ops.disable_sec_rx_path = &ixgbe_disable_sec_rx_path_generic;
233 265 mac->ops.enable_sec_rx_path = &ixgbe_enable_sec_rx_path_generic;
234 266 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
235 267 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
236 268 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
237 269 mac->ops.start_hw = &ixgbe_start_hw_82599;
238 270 mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
239 271 mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
240 272 mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
241 273 mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
242 274 mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
243 275
244 276 /* RAR, Multicast, VLAN */
245 277 mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
246 278 mac->ops.set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic;
247 279 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
248 280 mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
249 281 mac->rar_highwater = 1;
250 282 mac->ops.set_vfta = &ixgbe_set_vfta_generic;
251 283 mac->ops.set_vlvf = &ixgbe_set_vlvf_generic;
252 284 mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
253 285 mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
254 286 mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
255 287 mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
256 288 mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
257 289
258 290 /* Link */
259 291 mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
260 292 mac->ops.check_link = &ixgbe_check_mac_link_generic;
261 293 mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
262 294 ixgbe_init_mac_link_ops_82599(hw);
263 295
264 296 mac->mcft_size = 128;
265 297 mac->vft_size = 128;
266 298 mac->num_rar_entries = 128;
267 299 mac->rx_pb_size = 512;
268 300 mac->max_tx_queues = 128;
269 301 mac->max_rx_queues = 128;
270 302 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
271 303
272 304 mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
273 305 IXGBE_FWSM_MODE_MASK) ? TRUE : FALSE;
274 306
275 307 hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
276 308
277 309 /* EEPROM */
278 310 eeprom->ops.read = &ixgbe_read_eeprom_82599;
279 311 eeprom->ops.read_buffer = &ixgbe_read_eeprom_buffer_82599;
280 312
281 313 /* Manageability interface */
|
↓ open down ↓ |
52 lines elided |
↑ open up ↑ |
282 314 mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
283 315
284 316
285 317 return ret_val;
286 318 }
287 319
288 320 /**
289 321 * ixgbe_get_link_capabilities_82599 - Determines link capabilities
290 322 * @hw: pointer to hardware structure
291 323 * @speed: pointer to link speed
292 - * @negotiation: TRUE when autoneg or autotry is enabled
324 + * @autoneg: TRUE when autoneg or autotry is enabled
293 325 *
294 326 * Determines the link capabilities by reading the AUTOC register.
295 327 **/
296 328 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
297 329 ixgbe_link_speed *speed,
298 - bool *negotiation)
330 + bool *autoneg)
299 331 {
300 332 s32 status = IXGBE_SUCCESS;
301 333 u32 autoc = 0;
302 334
303 335 DEBUGFUNC("ixgbe_get_link_capabilities_82599");
304 336
305 337
306 338 /* Check if 1G SFP module. */
307 339 if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
308 340 hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
309 341 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
310 342 hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
311 343 *speed = IXGBE_LINK_SPEED_1GB_FULL;
312 - *negotiation = TRUE;
344 + *autoneg = TRUE;
313 345 goto out;
314 346 }
315 347
316 348 /*
317 349 * Determine link capabilities based on the stored value of AUTOC,
318 350 * which represents EEPROM defaults. If AUTOC value has not
319 351 * been stored, use the current register values.
320 352 */
321 353 if (hw->mac.orig_link_settings_stored)
322 354 autoc = hw->mac.orig_autoc;
323 355 else
324 356 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
325 357
326 358 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
327 359 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
328 360 *speed = IXGBE_LINK_SPEED_1GB_FULL;
329 - *negotiation = FALSE;
361 + *autoneg = FALSE;
330 362 break;
331 363
332 364 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
333 365 *speed = IXGBE_LINK_SPEED_10GB_FULL;
334 - *negotiation = FALSE;
366 + *autoneg = FALSE;
335 367 break;
336 368
337 369 case IXGBE_AUTOC_LMS_1G_AN:
338 370 *speed = IXGBE_LINK_SPEED_1GB_FULL;
339 - *negotiation = TRUE;
371 + *autoneg = TRUE;
340 372 break;
341 373
342 374 case IXGBE_AUTOC_LMS_10G_SERIAL:
343 375 *speed = IXGBE_LINK_SPEED_10GB_FULL;
344 - *negotiation = FALSE;
376 + *autoneg = FALSE;
345 377 break;
346 378
347 379 case IXGBE_AUTOC_LMS_KX4_KX_KR:
348 380 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
349 381 *speed = IXGBE_LINK_SPEED_UNKNOWN;
350 382 if (autoc & IXGBE_AUTOC_KR_SUPP)
351 383 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
352 384 if (autoc & IXGBE_AUTOC_KX4_SUPP)
353 385 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
354 386 if (autoc & IXGBE_AUTOC_KX_SUPP)
355 387 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
356 - *negotiation = TRUE;
388 + *autoneg = TRUE;
357 389 break;
358 390
359 391 case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
360 392 *speed = IXGBE_LINK_SPEED_100_FULL;
361 393 if (autoc & IXGBE_AUTOC_KR_SUPP)
362 394 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
363 395 if (autoc & IXGBE_AUTOC_KX4_SUPP)
364 396 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
365 397 if (autoc & IXGBE_AUTOC_KX_SUPP)
366 398 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
367 - *negotiation = TRUE;
399 + *autoneg = TRUE;
368 400 break;
369 401
370 402 case IXGBE_AUTOC_LMS_SGMII_1G_100M:
371 403 *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
372 - *negotiation = FALSE;
404 + *autoneg = FALSE;
373 405 break;
374 406
375 407 default:
376 408 status = IXGBE_ERR_LINK_SETUP;
377 409 goto out;
378 410 }
379 411
380 412 if (hw->phy.multispeed_fiber) {
381 413 *speed |= IXGBE_LINK_SPEED_10GB_FULL |
382 414 IXGBE_LINK_SPEED_1GB_FULL;
383 - *negotiation = TRUE;
415 + *autoneg = TRUE;
384 416 }
385 417
386 418 out:
387 419 return status;
388 420 }
389 421
390 422 /**
391 423 * ixgbe_get_media_type_82599 - Get media type
392 424 * @hw: pointer to hardware structure
393 425 *
394 426 * Returns the media type (fiber, copper, backplane)
395 427 **/
396 428 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
397 429 {
398 430 enum ixgbe_media_type media_type;
399 431
400 432 DEBUGFUNC("ixgbe_get_media_type_82599");
401 433
402 434 /* Detect if there is a copper PHY attached. */
403 435 switch (hw->phy.type) {
404 436 case ixgbe_phy_cu_unknown:
405 437 case ixgbe_phy_tn:
406 438 media_type = ixgbe_media_type_copper;
407 439 goto out;
408 440 default:
409 441 break;
410 442 }
411 443
412 444 switch (hw->device_id) {
413 445 case IXGBE_DEV_ID_82599_KX4:
414 446 case IXGBE_DEV_ID_82599_KX4_MEZZ:
415 447 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
|
↓ open down ↓ |
22 lines elided |
↑ open up ↑ |
416 448 case IXGBE_DEV_ID_82599_KR:
417 449 case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
418 450 case IXGBE_DEV_ID_82599_XAUI_LOM:
419 451 /* Default device ID is mezzanine card KX/KX4 */
420 452 media_type = ixgbe_media_type_backplane;
421 453 break;
422 454 case IXGBE_DEV_ID_82599_SFP:
423 455 case IXGBE_DEV_ID_82599_SFP_FCOE:
424 456 case IXGBE_DEV_ID_82599_SFP_EM:
425 457 case IXGBE_DEV_ID_82599_SFP_SF2:
458 + case IXGBE_DEV_ID_82599_SFP_SF_QP:
426 459 case IXGBE_DEV_ID_82599EN_SFP:
427 460 media_type = ixgbe_media_type_fiber;
428 461 break;
429 462 case IXGBE_DEV_ID_82599_CX4:
430 463 media_type = ixgbe_media_type_cx4;
431 464 break;
432 465 case IXGBE_DEV_ID_82599_T3_LOM:
433 466 media_type = ixgbe_media_type_copper;
434 467 break;
468 + case IXGBE_DEV_ID_82599_BYPASS:
469 + media_type = ixgbe_media_type_fiber_fixed;
470 + hw->phy.multispeed_fiber = TRUE;
471 + break;
435 472 default:
436 473 media_type = ixgbe_media_type_unknown;
437 474 break;
438 475 }
439 476 out:
440 477 return media_type;
441 478 }
442 479
443 480 /**
444 481 * ixgbe_start_mac_link_82599 - Setup MAC link settings
445 482 * @hw: pointer to hardware structure
446 483 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
447 484 *
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
448 485 * Configures link settings based on values in the ixgbe_hw struct.
449 486 * Restarts the link. Performs autonegotiation if needed.
450 487 **/
451 488 s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
452 489 bool autoneg_wait_to_complete)
453 490 {
454 491 u32 autoc_reg;
455 492 u32 links_reg;
456 493 u32 i;
457 494 s32 status = IXGBE_SUCCESS;
495 + bool got_lock = FALSE;
458 496
459 497 DEBUGFUNC("ixgbe_start_mac_link_82599");
460 498
461 499
500 + /* reset_pipeline requires us to hold this lock as it writes to
501 + * AUTOC.
502 + */
503 + if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
504 + status = hw->mac.ops.acquire_swfw_sync(hw,
505 + IXGBE_GSSR_MAC_CSR_SM);
506 + if (status != IXGBE_SUCCESS)
507 + goto out;
508 +
509 + got_lock = TRUE;
510 + }
511 +
462 512 /* Restart link */
463 - autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
464 - autoc_reg |= IXGBE_AUTOC_AN_RESTART;
465 - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
513 + (void) ixgbe_reset_pipeline_82599(hw);
466 514
515 + if (got_lock)
516 + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
517 +
467 518 /* Only poll for autoneg to complete if specified to do so */
468 519 if (autoneg_wait_to_complete) {
520 + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
469 521 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
470 522 IXGBE_AUTOC_LMS_KX4_KX_KR ||
471 523 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
472 524 IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
473 525 (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
474 526 IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
475 527 links_reg = 0; /* Just in case Autoneg time = 0 */
476 528 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
477 529 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
478 530 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
479 531 break;
480 532 msec_delay(100);
481 533 }
|
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
482 534 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
483 535 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
484 536 DEBUGOUT("Autoneg did not complete.\n");
485 537 }
486 538 }
487 539 }
488 540
489 541 /* Add delay to filter out noises during initial link setup */
490 542 msec_delay(50);
491 543
544 +out:
492 545 return status;
493 546 }
494 547
495 548 /**
496 549 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
497 550 * @hw: pointer to hardware structure
498 551 *
499 552 * The base drivers may require better control over SFP+ module
500 553 * PHY states. This includes selectively shutting down the Tx
501 554 * laser on the PHY, effectively halting physical link.
502 555 **/
503 556 void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
504 557 {
505 558 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
506 559
507 560 /* Disable tx laser; allow 100us to go dark per spec */
508 561 esdp_reg |= IXGBE_ESDP_SDP3;
509 562 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
510 563 IXGBE_WRITE_FLUSH(hw);
511 564 usec_delay(100);
512 565 }
513 566
514 567 /**
515 568 * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
516 569 * @hw: pointer to hardware structure
517 570 *
518 571 * The base drivers may require better control over SFP+ module
519 572 * PHY states. This includes selectively turning on the Tx
520 573 * laser on the PHY, effectively starting physical link.
521 574 **/
522 575 void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
523 576 {
524 577 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
525 578
526 579 /* Enable tx laser; allow 100ms to light up */
527 580 esdp_reg &= ~IXGBE_ESDP_SDP3;
528 581 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
529 582 IXGBE_WRITE_FLUSH(hw);
530 583 msec_delay(100);
531 584 }
532 585
533 586 /**
534 587 * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
535 588 * @hw: pointer to hardware structure
536 589 *
537 590 * When the driver changes the link speeds that it can support,
538 591 * it sets autotry_restart to TRUE to indicate that we need to
539 592 * initiate a new autotry session with the link partner. To do
540 593 * so, we set the speed then disable and re-enable the tx laser, to
541 594 * alert the link partner that it also needs to restart autotry on its
542 595 * end. This is consistent with TRUE clause 37 autoneg, which also
543 596 * involves a loss of signal.
544 597 **/
545 598 void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
546 599 {
|
↓ open down ↓ |
45 lines elided |
↑ open up ↑ |
547 600 DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
548 601
549 602 if (hw->mac.autotry_restart) {
550 603 ixgbe_disable_tx_laser_multispeed_fiber(hw);
551 604 ixgbe_enable_tx_laser_multispeed_fiber(hw);
552 605 hw->mac.autotry_restart = FALSE;
553 606 }
554 607 }
555 608
556 609 /**
610 + * ixgbe_set_fiber_fixed_speed - Set module link speed for fixed fiber
611 + * @hw: pointer to hardware structure
612 + * @speed: link speed to set
613 + *
614 + * We set the module speed differently for fixed fiber. For other
615 + * multi-speed devices we don't have an error value so here if we
616 + * detect an error we just log it and exit.
617 + */
618 +static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw,
619 + ixgbe_link_speed speed)
620 +{
621 + s32 status;
622 + u8 rs, eeprom_data;
623 +
624 + switch (speed) {
625 + case IXGBE_LINK_SPEED_10GB_FULL:
626 + /* one bit mask same as setting on */
627 + rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
628 + break;
629 + case IXGBE_LINK_SPEED_1GB_FULL:
630 + rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
631 + break;
632 + default:
633 + DEBUGOUT("Invalid fixed module speed\n");
634 + return;
635 + }
636 +
637 + /* Set RS0 */
638 + status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
639 + IXGBE_I2C_EEPROM_DEV_ADDR2,
640 + &eeprom_data);
641 + if (status) {
642 + DEBUGOUT("Failed to read Rx Rate Select RS0\n");
643 + goto out;
644 + }
645 +
646 + eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
647 +
648 + status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
649 + IXGBE_I2C_EEPROM_DEV_ADDR2,
650 + eeprom_data);
651 + if (status) {
652 + DEBUGOUT("Failed to write Rx Rate Select RS0\n");
653 + goto out;
654 + }
655 +
656 + /* Set RS1 */
657 + status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
658 + IXGBE_I2C_EEPROM_DEV_ADDR2,
659 + &eeprom_data);
660 + if (status) {
661 + DEBUGOUT("Failed to read Rx Rate Select RS1\n");
662 + goto out;
663 + }
664 +
665 + eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
666 +
667 + status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
668 + IXGBE_I2C_EEPROM_DEV_ADDR2,
669 + eeprom_data);
670 + if (status) {
671 + DEBUGOUT("Failed to write Rx Rate Select RS1\n");
672 + goto out;
673 + }
674 +out:
675 + return;
676 +}
677 +
678 +/**
557 679 * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
558 680 * @hw: pointer to hardware structure
559 681 * @speed: new link speed
560 - * @autoneg: TRUE if autonegotiation enabled
561 682 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
562 683 *
563 684 * Set the link speed in the AUTOC register and restarts link.
564 685 **/
565 686 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
566 - ixgbe_link_speed speed, bool autoneg,
687 + ixgbe_link_speed speed,
567 688 bool autoneg_wait_to_complete)
568 689 {
569 690 s32 status = IXGBE_SUCCESS;
570 691 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
571 692 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
572 693 u32 speedcnt = 0;
573 694 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
574 695 u32 i = 0;
575 - bool link_up = FALSE;
576 - bool negotiation;
696 + bool autoneg, link_up = FALSE;
577 697
578 698 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
579 699
580 700 /* Mask off requested but non-supported speeds */
581 - status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation);
701 + status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg);
582 702 if (status != IXGBE_SUCCESS)
583 703 return status;
584 704
585 705 speed &= link_speed;
586 706
587 707 /*
588 708 * Try each speed one by one, highest priority first. We do this in
589 709 * software because 10gb fiber doesn't support speed autonegotiation.
590 710 */
591 711 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
592 712 speedcnt++;
593 713 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
|
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
594 714
595 715 /* If we already have link at this speed, just jump out */
596 716 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
597 717 if (status != IXGBE_SUCCESS)
598 718 return status;
599 719
600 720 if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
601 721 goto out;
602 722
603 723 /* Set the module link speed */
604 - esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
605 - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
606 - IXGBE_WRITE_FLUSH(hw);
724 + if (hw->phy.media_type == ixgbe_media_type_fiber_fixed) {
725 + ixgbe_set_fiber_fixed_speed(hw,
726 + IXGBE_LINK_SPEED_10GB_FULL);
727 + } else {
728 + esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
729 + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
730 + IXGBE_WRITE_FLUSH(hw);
731 + }
607 732
608 733 /* Allow module to change analog characteristics (1G->10G) */
609 734 msec_delay(40);
610 735
611 736 status = ixgbe_setup_mac_link_82599(hw,
612 737 IXGBE_LINK_SPEED_10GB_FULL,
613 - autoneg,
614 738 autoneg_wait_to_complete);
615 739 if (status != IXGBE_SUCCESS)
616 740 return status;
617 741
618 742 /* Flap the tx laser if it has not already been done */
619 743 ixgbe_flap_tx_laser(hw);
620 744
621 745 /*
622 746 * Wait for the controller to acquire link. Per IEEE 802.3ap,
623 747 * Section 73.10.2, we may have to wait up to 500ms if KR is
624 748 * attempted. 82599 uses the same timing for 10g SFI.
625 749 */
626 750 for (i = 0; i < 5; i++) {
627 751 /* Wait for the link partner to also set speed */
628 752 msec_delay(100);
629 753
630 754 /* If we have link, just jump out */
631 755 status = ixgbe_check_link(hw, &link_speed,
632 756 &link_up, FALSE);
633 757 if (status != IXGBE_SUCCESS)
634 758 return status;
635 759
636 760 if (link_up)
637 761 goto out;
638 762 }
639 763 }
640 764
641 765 if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
642 766 speedcnt++;
643 767 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
644 768 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
|
↓ open down ↓ |
21 lines elided |
↑ open up ↑ |
645 769
646 770 /* If we already have link at this speed, just jump out */
647 771 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
648 772 if (status != IXGBE_SUCCESS)
649 773 return status;
650 774
651 775 if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
652 776 goto out;
653 777
654 778 /* Set the module link speed */
655 - esdp_reg &= ~IXGBE_ESDP_SDP5;
656 - esdp_reg |= IXGBE_ESDP_SDP5_DIR;
657 - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
658 - IXGBE_WRITE_FLUSH(hw);
779 + if (hw->phy.media_type == ixgbe_media_type_fiber_fixed) {
780 + ixgbe_set_fiber_fixed_speed(hw,
781 + IXGBE_LINK_SPEED_1GB_FULL);
782 + } else {
783 + esdp_reg &= ~IXGBE_ESDP_SDP5;
784 + esdp_reg |= IXGBE_ESDP_SDP5_DIR;
785 + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
786 + IXGBE_WRITE_FLUSH(hw);
787 + }
659 788
660 789 /* Allow module to change analog characteristics (10G->1G) */
661 790 msec_delay(40);
662 791
663 792 status = ixgbe_setup_mac_link_82599(hw,
664 793 IXGBE_LINK_SPEED_1GB_FULL,
665 - autoneg,
666 794 autoneg_wait_to_complete);
667 795 if (status != IXGBE_SUCCESS)
668 796 return status;
669 797
670 798 /* Flap the tx laser if it has not already been done */
671 799 ixgbe_flap_tx_laser(hw);
672 800
673 801 /* Wait for the link partner to also set speed */
674 802 msec_delay(100);
675 803
676 804 /* If we have link, just jump out */
677 805 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
678 806 if (status != IXGBE_SUCCESS)
679 807 return status;
680 808
681 809 if (link_up)
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
682 810 goto out;
683 811 }
684 812
685 813 /*
686 814 * We didn't get link. Configure back to the highest speed we tried,
687 815 * (if there was more than one). We call ourselves back with just the
688 816 * single highest speed that the user requested.
689 817 */
690 818 if (speedcnt > 1)
691 819 status = ixgbe_setup_mac_link_multispeed_fiber(hw,
692 - highest_link_speed, autoneg, autoneg_wait_to_complete);
820 + highest_link_speed, autoneg_wait_to_complete);
693 821
694 822 out:
695 823 /* Set autoneg_advertised value based on input link speed */
696 824 hw->phy.autoneg_advertised = 0;
697 825
698 826 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
699 827 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
700 828
701 829 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
702 830 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
703 831
704 832 return status;
705 833 }
706 834
707 835 /**
708 836 * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
709 837 * @hw: pointer to hardware structure
710 838 * @speed: new link speed
711 - * @autoneg: TRUE if autonegotiation enabled
712 839 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
713 840 *
714 841 * Implements the Intel SmartSpeed algorithm.
715 842 **/
716 843 s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
717 - ixgbe_link_speed speed, bool autoneg,
844 + ixgbe_link_speed speed,
718 845 bool autoneg_wait_to_complete)
719 846 {
720 847 s32 status = IXGBE_SUCCESS;
721 848 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
722 849 s32 i, j;
723 850 bool link_up = FALSE;
724 851 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
725 852
726 853 DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
727 854
728 855 /* Set autoneg_advertised value based on input link speed */
729 856 hw->phy.autoneg_advertised = 0;
730 857
731 858 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
732 859 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
733 860
734 861 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
735 862 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
736 863
737 864 if (speed & IXGBE_LINK_SPEED_100_FULL)
738 865 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
739 866
|
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
740 867 /*
741 868 * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the
742 869 * autoneg advertisement if link is unable to be established at the
743 870 * highest negotiated rate. This can sometimes happen due to integrity
744 871 * issues with the physical media connection.
745 872 */
746 873
747 874 /* First, try to get link with full advertisement */
748 875 hw->phy.smart_speed_active = FALSE;
749 876 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
750 - status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
877 + status = ixgbe_setup_mac_link_82599(hw, speed,
751 878 autoneg_wait_to_complete);
752 879 if (status != IXGBE_SUCCESS)
753 880 goto out;
754 881
755 882 /*
756 883 * Wait for the controller to acquire link. Per IEEE 802.3ap,
757 884 * Section 73.10.2, we may have to wait up to 500ms if KR is
758 885 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
759 886 * Table 9 in the AN MAS.
760 887 */
761 888 for (i = 0; i < 5; i++) {
762 889 msec_delay(100);
763 890
764 891 /* If we have link, just jump out */
765 892 status = ixgbe_check_link(hw, &link_speed, &link_up,
766 893 FALSE);
767 894 if (status != IXGBE_SUCCESS)
768 895 goto out;
769 896
770 897 if (link_up)
771 898 goto out;
772 899 }
773 900 }
774 901
|
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
775 902 /*
776 903 * We didn't get link. If we advertised KR plus one of KX4/KX
777 904 * (or BX4/BX), then disable KR and try again.
778 905 */
779 906 if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
780 907 ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
781 908 goto out;
782 909
783 910 /* Turn SmartSpeed on to disable KR support */
784 911 hw->phy.smart_speed_active = TRUE;
785 - status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
912 + status = ixgbe_setup_mac_link_82599(hw, speed,
786 913 autoneg_wait_to_complete);
787 914 if (status != IXGBE_SUCCESS)
788 915 goto out;
789 916
790 917 /*
791 918 * Wait for the controller to acquire link. 600ms will allow for
792 919 * the AN link_fail_inhibit_timer as well for multiple cycles of
793 920 * parallel detect, both 10g and 1g. This allows for the maximum
794 921 * connect attempts as defined in the AN MAS table 73-7.
795 922 */
796 923 for (i = 0; i < 6; i++) {
797 924 msec_delay(100);
798 925
799 926 /* If we have link, just jump out */
|
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
800 927 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
801 928 if (status != IXGBE_SUCCESS)
802 929 goto out;
803 930
804 931 if (link_up)
805 932 goto out;
806 933 }
807 934
808 935 /* We didn't get link. Turn SmartSpeed back off. */
809 936 hw->phy.smart_speed_active = FALSE;
810 - status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
937 + status = ixgbe_setup_mac_link_82599(hw, speed,
811 938 autoneg_wait_to_complete);
812 939
813 940 out:
814 941 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
815 942 DEBUGOUT("Smartspeed has downgraded the link speed "
816 943 "from the maximum advertised\n");
817 944 return status;
818 945 }
819 946
820 947 /**
821 948 * ixgbe_setup_mac_link_82599 - Set MAC link speed
822 949 * @hw: pointer to hardware structure
823 950 * @speed: new link speed
824 - * @autoneg: TRUE if autonegotiation enabled
825 951 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
826 952 *
827 953 * Set the link speed in the AUTOC register and restarts link.
828 954 **/
829 955 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
830 - ixgbe_link_speed speed, bool autoneg,
956 + ixgbe_link_speed speed,
831 957 bool autoneg_wait_to_complete)
832 958 {
959 + bool autoneg = FALSE;
833 960 s32 status = IXGBE_SUCCESS;
834 - u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
961 + u32 autoc, pma_pmd_1g, link_mode, start_autoc;
835 962 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
836 - u32 start_autoc = autoc;
837 963 u32 orig_autoc = 0;
838 - u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
839 - u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
840 964 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
841 965 u32 links_reg;
842 966 u32 i;
843 967 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
968 + bool got_lock = FALSE;
844 969
845 970 DEBUGFUNC("ixgbe_setup_mac_link_82599");
846 971
847 972 /* Check to see if speed passed in is supported. */
848 973 status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
849 - if (status != IXGBE_SUCCESS)
974 + if (status)
850 975 goto out;
851 976
852 977 speed &= link_capabilities;
853 978
854 979 if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
855 980 status = IXGBE_ERR_LINK_SETUP;
856 981 goto out;
857 982 }
858 983
859 984 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
860 985 if (hw->mac.orig_link_settings_stored)
861 - orig_autoc = hw->mac.orig_autoc;
986 + autoc = hw->mac.orig_autoc;
862 987 else
863 - orig_autoc = autoc;
988 + autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
864 989
990 + orig_autoc = autoc;
991 + start_autoc = hw->mac.cached_autoc;
992 + link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
993 + pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
994 +
865 995 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
866 996 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
867 997 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
868 998 /* Set KX4/KX/KR support according to speed requested */
869 999 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
870 1000 if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
871 1001 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
872 1002 autoc |= IXGBE_AUTOC_KX4_SUPP;
873 1003 if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
874 1004 (hw->phy.smart_speed_active == FALSE))
875 1005 autoc |= IXGBE_AUTOC_KR_SUPP;
876 1006 }
877 1007 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
878 1008 autoc |= IXGBE_AUTOC_KX_SUPP;
879 1009 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
880 1010 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
881 1011 link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
882 1012 /* Switch from 1G SFI to 10G SFI if requested */
883 1013 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
884 1014 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
885 1015 autoc &= ~IXGBE_AUTOC_LMS_MASK;
886 1016 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
887 1017 }
888 1018 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
889 1019 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
890 1020 /* Switch from 10G SFI to 1G SFI if requested */
891 1021 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
|
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
892 1022 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
893 1023 autoc &= ~IXGBE_AUTOC_LMS_MASK;
894 1024 if (autoneg)
895 1025 autoc |= IXGBE_AUTOC_LMS_1G_AN;
896 1026 else
897 1027 autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
898 1028 }
899 1029 }
900 1030
901 1031 if (autoc != start_autoc) {
1032 + /* Need SW/FW semaphore around AUTOC writes if LESM is on,
1033 + * likewise reset_pipeline requires us to hold this lock as
1034 + * it also writes to AUTOC.
1035 + */
1036 + if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
1037 + status = hw->mac.ops.acquire_swfw_sync(hw,
1038 + IXGBE_GSSR_MAC_CSR_SM);
1039 + if (status != IXGBE_SUCCESS) {
1040 + status = IXGBE_ERR_SWFW_SYNC;
1041 + goto out;
1042 + }
1043 +
1044 + got_lock = TRUE;
1045 + }
1046 +
902 1047 /* Restart link */
903 - autoc |= IXGBE_AUTOC_AN_RESTART;
904 1048 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
1049 + hw->mac.cached_autoc = autoc;
1050 + (void) ixgbe_reset_pipeline_82599(hw);
905 1051
1052 + if (got_lock) {
1053 + hw->mac.ops.release_swfw_sync(hw,
1054 + IXGBE_GSSR_MAC_CSR_SM);
1055 + got_lock = FALSE;
1056 + }
1057 +
906 1058 /* Only poll for autoneg to complete if specified to do so */
907 1059 if (autoneg_wait_to_complete) {
908 1060 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
909 1061 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
910 1062 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
911 1063 links_reg = 0; /*Just in case Autoneg time=0*/
912 1064 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
913 1065 links_reg =
914 1066 IXGBE_READ_REG(hw, IXGBE_LINKS);
915 1067 if (links_reg & IXGBE_LINKS_KX_AN_COMP)
916 1068 break;
917 1069 msec_delay(100);
918 1070 }
919 1071 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
920 1072 status =
921 1073 IXGBE_ERR_AUTONEG_NOT_COMPLETE;
922 1074 DEBUGOUT("Autoneg did not complete.\n");
923 1075 }
924 1076 }
925 1077 }
926 1078
927 1079 /* Add delay to filter out noises during initial link setup */
928 1080 msec_delay(50);
|
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
929 1081 }
930 1082
931 1083 out:
932 1084 return status;
933 1085 }
934 1086
935 1087 /**
936 1088 * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
937 1089 * @hw: pointer to hardware structure
938 1090 * @speed: new link speed
939 - * @autoneg: TRUE if autonegotiation enabled
940 1091 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete
941 1092 *
942 1093 * Restarts link on PHY and MAC based on settings passed in.
943 1094 **/
944 1095 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
945 1096 ixgbe_link_speed speed,
946 - bool autoneg,
947 1097 bool autoneg_wait_to_complete)
948 1098 {
949 1099 s32 status;
950 1100
951 1101 DEBUGFUNC("ixgbe_setup_copper_link_82599");
952 1102
953 1103 /* Setup the PHY according to input speed */
954 - status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
1104 + status = hw->phy.ops.setup_link_speed(hw, speed,
955 1105 autoneg_wait_to_complete);
956 - if (status == IXGBE_SUCCESS) {
957 - /* Set up MAC */
958 - status =
959 - ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
960 - }
1106 + /* Set up MAC */
1107 + (void) ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
961 1108
962 1109 return status;
963 1110 }
964 1111
965 1112 /**
966 1113 * ixgbe_reset_hw_82599 - Perform hardware reset
967 1114 * @hw: pointer to hardware structure
968 1115 *
969 1116 * Resets the hardware by resetting the transmit and receive units, masks
970 1117 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
971 1118 * reset.
972 1119 **/
973 1120 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
974 1121 {
975 1122 ixgbe_link_speed link_speed;
976 1123 s32 status;
977 1124 u32 ctrl, i, autoc, autoc2;
978 1125 bool link_up = FALSE;
979 1126
980 1127 DEBUGFUNC("ixgbe_reset_hw_82599");
981 1128
982 1129 /* Call adapter stop to disable tx/rx and clear interrupts */
983 1130 status = hw->mac.ops.stop_adapter(hw);
984 1131 if (status != IXGBE_SUCCESS)
985 1132 goto reset_hw_out;
986 1133
987 1134 /* flush pending Tx transactions */
988 1135 ixgbe_clear_tx_pending(hw);
989 1136
990 1137 /* PHY ops must be identified and initialized prior to reset */
991 1138
992 1139 /* Identify PHY and related function pointers */
993 1140 status = hw->phy.ops.init(hw);
994 1141
995 1142 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
996 1143 goto reset_hw_out;
997 1144
998 1145 /* Setup SFP module if there is one present. */
999 1146 if (hw->phy.sfp_setup_needed) {
1000 1147 status = hw->mac.ops.setup_sfp(hw);
1001 1148 hw->phy.sfp_setup_needed = FALSE;
1002 1149 }
1003 1150
1004 1151 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
1005 1152 goto reset_hw_out;
1006 1153
1007 1154 /* Reset PHY */
1008 1155 if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
1009 1156 hw->phy.ops.reset(hw);
1010 1157
1011 1158 mac_reset_top:
1012 1159 /*
1013 1160 * Issue global reset to the MAC. Needs to be SW reset if link is up.
1014 1161 * If link reset is used when link is up, it might reset the PHY when
1015 1162 * mng is using it. If link is down or the flag to force full link
1016 1163 * reset is set, then perform link reset.
1017 1164 */
1018 1165 ctrl = IXGBE_CTRL_LNK_RST;
1019 1166 if (!hw->force_full_reset) {
1020 1167 hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
1021 1168 if (link_up)
1022 1169 ctrl = IXGBE_CTRL_RST;
1023 1170 }
1024 1171
1025 1172 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
1026 1173 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
1027 1174 IXGBE_WRITE_FLUSH(hw);
1028 1175
1029 1176 /* Poll for reset bit to self-clear indicating reset is complete */
1030 1177 for (i = 0; i < 10; i++) {
1031 1178 usec_delay(1);
1032 1179 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
1033 1180 if (!(ctrl & IXGBE_CTRL_RST_MASK))
1034 1181 break;
1035 1182 }
1036 1183
1037 1184 if (ctrl & IXGBE_CTRL_RST_MASK) {
1038 1185 status = IXGBE_ERR_RESET_FAILED;
1039 1186 DEBUGOUT("Reset polling failed to complete.\n");
1040 1187 }
1041 1188
1042 1189 msec_delay(50);
1043 1190
1044 1191 /*
1045 1192 * Double resets are required for recovery from certain error
1046 1193 * conditions. Between resets, it is necessary to stall to allow time
1047 1194 * for any pending HW events to complete.
1048 1195 */
1049 1196 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
1050 1197 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
|
↓ open down ↓ |
80 lines elided |
↑ open up ↑ |
1051 1198 goto mac_reset_top;
1052 1199 }
1053 1200
1054 1201 /*
1055 1202 * Store the original AUTOC/AUTOC2 values if they have not been
1056 1203 * stored off yet. Otherwise restore the stored original
1057 1204 * values since the reset operation sets back to defaults.
1058 1205 */
1059 1206 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1060 1207 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1208 +
1209 + /* Enable link if disabled in NVM */
1210 + if (autoc2 & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
1211 + autoc2 &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
1212 + IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1213 + IXGBE_WRITE_FLUSH(hw);
1214 + }
1215 +
1061 1216 if (hw->mac.orig_link_settings_stored == FALSE) {
1062 1217 hw->mac.orig_autoc = autoc;
1063 1218 hw->mac.orig_autoc2 = autoc2;
1219 + hw->mac.cached_autoc = autoc;
1064 1220 hw->mac.orig_link_settings_stored = TRUE;
1065 1221 } else {
1066 - if (autoc != hw->mac.orig_autoc)
1067 - IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
1068 - IXGBE_AUTOC_AN_RESTART));
1222 + if (autoc != hw->mac.orig_autoc) {
1223 + /* Need SW/FW semaphore around AUTOC writes if LESM is
1224 + * on, likewise reset_pipeline requires us to hold
1225 + * this lock as it also writes to AUTOC.
1226 + */
1227 + bool got_lock = FALSE;
1228 + if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
1229 + status = hw->mac.ops.acquire_swfw_sync(hw,
1230 + IXGBE_GSSR_MAC_CSR_SM);
1231 + if (status != IXGBE_SUCCESS) {
1232 + status = IXGBE_ERR_SWFW_SYNC;
1233 + goto reset_hw_out;
1234 + }
1069 1235
1236 + got_lock = TRUE;
1237 + }
1238 +
1239 + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
1240 + hw->mac.cached_autoc = hw->mac.orig_autoc;
1241 + (void) ixgbe_reset_pipeline_82599(hw);
1242 +
1243 + if (got_lock)
1244 + hw->mac.ops.release_swfw_sync(hw,
1245 + IXGBE_GSSR_MAC_CSR_SM);
1246 + }
1247 +
1070 1248 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
1071 1249 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
1072 1250 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
1073 1251 autoc2 |= (hw->mac.orig_autoc2 &
1074 1252 IXGBE_AUTOC2_UPPER_MASK);
1075 1253 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
1076 1254 }
1077 1255 }
1078 1256
1079 1257 /* Store the permanent mac address */
1080 1258 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
1081 1259
1082 1260 /*
1083 1261 * Store MAC address from RAR0, clear receive address registers, and
1084 1262 * clear the multicast table. Also reset num_rar_entries to 128,
1085 1263 * since we modify this value when programming the SAN MAC address.
1086 1264 */
1087 1265 hw->mac.num_rar_entries = 128;
1088 1266 hw->mac.ops.init_rx_addrs(hw);
1089 1267
1090 1268 /* Store the permanent SAN mac address */
1091 1269 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
1092 1270
1093 1271 /* Add the SAN MAC address to the RAR only if it's a valid address */
1094 1272 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
1095 1273 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
1096 1274 hw->mac.san_addr, 0, IXGBE_RAH_AV);
1097 1275
1098 1276 /* Save the SAN MAC RAR index */
1099 1277 hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
1100 1278
1101 1279 /* Reserve the last RAR for the SAN MAC address */
1102 1280 hw->mac.num_rar_entries--;
1103 1281 }
1104 1282
1105 1283 /* Store the alternative WWNN/WWPN prefix */
1106 1284 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1107 1285 &hw->mac.wwpn_prefix);
1108 1286
1109 1287 reset_hw_out:
1110 1288 return status;
1111 1289 }
1112 1290
1113 1291 /**
1114 1292 * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
1115 1293 * @hw: pointer to hardware structure
1116 1294 **/
1117 1295 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1118 1296 {
1119 1297 int i;
1120 1298 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
1121 1299 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
1122 1300
1123 1301 DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
1124 1302
1125 1303 /*
1126 1304 * Before starting reinitialization process,
1127 1305 * FDIRCMD.CMD must be zero.
1128 1306 */
1129 1307 for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
1130 1308 if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1131 1309 IXGBE_FDIRCMD_CMD_MASK))
1132 1310 break;
1133 1311 usec_delay(10);
1134 1312 }
1135 1313 if (i >= IXGBE_FDIRCMD_CMD_POLL) {
1136 1314 DEBUGOUT("Flow Director previous command isn't complete, "
1137 1315 "aborting table re-initialization.\n");
1138 1316 return IXGBE_ERR_FDIR_REINIT_FAILED;
1139 1317 }
1140 1318
1141 1319 IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
1142 1320 IXGBE_WRITE_FLUSH(hw);
1143 1321 /*
1144 1322 * 82599 adapters flow director init flow cannot be restarted,
1145 1323 * Workaround 82599 silicon errata by performing the following steps
1146 1324 * before re-writing the FDIRCTRL control register with the same value.
1147 1325 * - write 1 to bit 8 of FDIRCMD register &
1148 1326 * - write 0 to bit 8 of FDIRCMD register
1149 1327 */
1150 1328 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1151 1329 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
1152 1330 IXGBE_FDIRCMD_CLEARHT));
1153 1331 IXGBE_WRITE_FLUSH(hw);
1154 1332 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1155 1333 (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
1156 1334 ~IXGBE_FDIRCMD_CLEARHT));
1157 1335 IXGBE_WRITE_FLUSH(hw);
1158 1336 /*
1159 1337 * Clear FDIR Hash register to clear any leftover hashes
1160 1338 * waiting to be programmed.
1161 1339 */
1162 1340 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
|
↓ open down ↓ |
83 lines elided |
↑ open up ↑ |
1163 1341 IXGBE_WRITE_FLUSH(hw);
1164 1342
1165 1343 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1166 1344 IXGBE_WRITE_FLUSH(hw);
1167 1345
1168 1346 /* Poll init-done after we write FDIRCTRL register */
1169 1347 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1170 1348 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1171 1349 IXGBE_FDIRCTRL_INIT_DONE)
1172 1350 break;
1173 - usec_delay(10);
1351 + msec_delay(1);
1174 1352 }
1175 1353 if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
1176 1354 DEBUGOUT("Flow Director Signature poll time exceeded!\n");
1177 1355 return IXGBE_ERR_FDIR_REINIT_FAILED;
1178 1356 }
1179 1357
1180 1358 /* Clear FDIR statistics registers (read to clear) */
1181 1359 (void) IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
1182 1360 (void) IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
1183 1361 (void) IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
1184 1362 (void) IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
1185 1363 (void) IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
1186 1364
1187 1365 return IXGBE_SUCCESS;
1188 1366 }
1189 1367
1190 1368 /**
1191 1369 * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
1192 1370 * @hw: pointer to hardware structure
1193 1371 * @fdirctrl: value to write to flow director control register
1194 1372 **/
1195 1373 static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1196 1374 {
1197 1375 int i;
1198 1376
1199 1377 DEBUGFUNC("ixgbe_fdir_enable_82599");
1200 1378
1201 1379 /* Prime the keys for hashing */
1202 1380 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1203 1381 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
1204 1382
1205 1383 /*
1206 1384 * Poll init-done after we write the register. Estimated times:
1207 1385 * 10G: PBALLOC = 11b, timing is 60us
1208 1386 * 1G: PBALLOC = 11b, timing is 600us
1209 1387 * 100M: PBALLOC = 11b, timing is 6ms
1210 1388 *
1211 1389 * Multiple these timings by 4 if under full Rx load
1212 1390 *
1213 1391 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
1214 1392 * 1 msec per poll time. If we're at line rate and drop to 100M, then
1215 1393 * this might not finish in our poll time, but we can live with that
1216 1394 * for now.
1217 1395 */
1218 1396 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
1219 1397 IXGBE_WRITE_FLUSH(hw);
1220 1398 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
1221 1399 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
1222 1400 IXGBE_FDIRCTRL_INIT_DONE)
1223 1401 break;
1224 1402 msec_delay(1);
1225 1403 }
1226 1404
1227 1405 if (i >= IXGBE_FDIR_INIT_DONE_POLL)
1228 1406 DEBUGOUT("Flow Director poll time exceeded!\n");
1229 1407 }
1230 1408
1231 1409 /**
1232 1410 * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
1233 1411 * @hw: pointer to hardware structure
1234 1412 * @fdirctrl: value to write to flow director control register, initially
1235 1413 * contains just the value of the Rx packet buffer allocation
1236 1414 **/
1237 1415 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1238 1416 {
1239 1417 DEBUGFUNC("ixgbe_init_fdir_signature_82599");
1240 1418
1241 1419 /*
1242 1420 * Continue setup of fdirctrl register bits:
1243 1421 * Move the flexible bytes to use the ethertype - shift 6 words
1244 1422 * Set the maximum length per hash bucket to 0xA filters
1245 1423 * Send interrupt when 64 filters are left
1246 1424 */
1247 1425 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1248 1426 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1249 1427 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1250 1428
1251 1429 /* write hashes and fdirctrl register, poll for completion */
1252 1430 ixgbe_fdir_enable_82599(hw, fdirctrl);
1253 1431
1254 1432 return IXGBE_SUCCESS;
1255 1433 }
1256 1434
1257 1435 /**
1258 1436 * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
1259 1437 * @hw: pointer to hardware structure
1260 1438 * @fdirctrl: value to write to flow director control register, initially
1261 1439 * contains just the value of the Rx packet buffer allocation
1262 1440 **/
1263 1441 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
1264 1442 {
1265 1443 DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
1266 1444
1267 1445 /*
1268 1446 * Continue setup of fdirctrl register bits:
1269 1447 * Turn perfect match filtering on
1270 1448 * Report hash in RSS field of Rx wb descriptor
1271 1449 * Initialize the drop queue
1272 1450 * Move the flexible bytes to use the ethertype - shift 6 words
1273 1451 * Set the maximum length per hash bucket to 0xA filters
1274 1452 * Send interrupt when 64 (0x4 * 16) filters are left
1275 1453 */
1276 1454 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
1277 1455 IXGBE_FDIRCTRL_REPORT_STATUS |
1278 1456 (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
1279 1457 (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
1280 1458 (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
1281 1459 (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
1282 1460
1283 1461 /* write hashes and fdirctrl register, poll for completion */
1284 1462 ixgbe_fdir_enable_82599(hw, fdirctrl);
1285 1463
1286 1464 return IXGBE_SUCCESS;
1287 1465 }
1288 1466
1289 1467 /*
1290 1468 * These defines allow us to quickly generate all of the necessary instructions
1291 1469 * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
1292 1470 * for values 0 through 15
1293 1471 */
1294 1472 #define IXGBE_ATR_COMMON_HASH_KEY \
1295 1473 (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
1296 1474 #ifdef lint
1297 1475 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n)
1298 1476 #else
1299 1477 #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
1300 1478 do { \
1301 1479 u32 n = (_n); \
1302 1480 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
1303 1481 common_hash ^= lo_hash_dword >> n; \
1304 1482 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1305 1483 bucket_hash ^= lo_hash_dword >> n; \
1306 1484 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
1307 1485 sig_hash ^= lo_hash_dword << (16 - n); \
1308 1486 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
1309 1487 common_hash ^= hi_hash_dword >> n; \
1310 1488 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1311 1489 bucket_hash ^= hi_hash_dword >> n; \
1312 1490 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
1313 1491 sig_hash ^= hi_hash_dword << (16 - n); \
1314 1492 } while (0);
1315 1493 #endif
1316 1494
1317 1495 /**
1318 1496 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
1319 1497 * @stream: input bitstream to compute the hash on
1320 1498 *
1321 1499 * This function is almost identical to the function above but contains
1322 1500 * several optomizations such as unwinding all of the loops, letting the
1323 1501 * compiler work out all of the conditional ifs since the keys are static
1324 1502 * defines, and computing two keys at once since the hashed dword stream
1325 1503 * will be the same for both keys.
1326 1504 **/
1327 1505 u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
1328 1506 union ixgbe_atr_hash_dword common)
1329 1507 {
1330 1508 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1331 1509 u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
1332 1510
1333 1511 /* record the flow_vm_vlan bits as they are a key part to the hash */
1334 1512 flow_vm_vlan = IXGBE_NTOHL(input.dword);
1335 1513
1336 1514 /* generate common hash dword */
1337 1515 hi_hash_dword = IXGBE_NTOHL(common.dword);
1338 1516
1339 1517 /* low dword is word swapped version of common */
1340 1518 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1341 1519
1342 1520 /* apply flow ID/VM pool/VLAN ID bits to hash words */
1343 1521 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1344 1522
1345 1523 /* Process bits 0 and 16 */
1346 1524 IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
1347 1525
1348 1526 /*
1349 1527 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1350 1528 * delay this because bit 0 of the stream should not be processed
1351 1529 * so we do not add the vlan until after bit 0 was processed
1352 1530 */
1353 1531 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1354 1532
1355 1533 /* Process remaining 30 bit of the key */
1356 1534 IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
1357 1535 IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
1358 1536 IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
1359 1537 IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
1360 1538 IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
1361 1539 IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
1362 1540 IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
1363 1541 IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
1364 1542 IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
1365 1543 IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
1366 1544 IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
1367 1545 IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
1368 1546 IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
1369 1547 IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
1370 1548 IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
1371 1549
1372 1550 /* combine common_hash result with signature and bucket hashes */
1373 1551 bucket_hash ^= common_hash;
1374 1552 bucket_hash &= IXGBE_ATR_HASH_MASK;
1375 1553
1376 1554 sig_hash ^= common_hash << 16;
1377 1555 sig_hash &= IXGBE_ATR_HASH_MASK << 16;
1378 1556
1379 1557 /* return completed signature hash */
1380 1558 return sig_hash ^ bucket_hash;
1381 1559 }
1382 1560
1383 1561 /**
1384 1562 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
1385 1563 * @hw: pointer to hardware structure
1386 1564 * @input: unique input dword
1387 1565 * @common: compressed common input dword
1388 1566 * @queue: queue index to direct traffic to
1389 1567 **/
1390 1568 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1391 1569 union ixgbe_atr_hash_dword input,
1392 1570 union ixgbe_atr_hash_dword common,
1393 1571 u8 queue)
1394 1572 {
1395 1573 u64 fdirhashcmd;
1396 1574 u32 fdircmd;
1397 1575
1398 1576 DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
1399 1577
1400 1578 /*
1401 1579 * Get the flow_type in order to program FDIRCMD properly
1402 1580 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
1403 1581 */
1404 1582 switch (input.formatted.flow_type) {
1405 1583 case IXGBE_ATR_FLOW_TYPE_TCPV4:
1406 1584 case IXGBE_ATR_FLOW_TYPE_UDPV4:
1407 1585 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1408 1586 case IXGBE_ATR_FLOW_TYPE_TCPV6:
1409 1587 case IXGBE_ATR_FLOW_TYPE_UDPV6:
1410 1588 case IXGBE_ATR_FLOW_TYPE_SCTPV6:
1411 1589 break;
1412 1590 default:
1413 1591 DEBUGOUT(" Error on flow type input\n");
1414 1592 return IXGBE_ERR_CONFIG;
1415 1593 }
1416 1594
1417 1595 /* configure FDIRCMD register */
1418 1596 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1419 1597 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1420 1598 fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1421 1599 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1422 1600
1423 1601 /*
1424 1602 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1425 1603 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
1426 1604 */
1427 1605 fdirhashcmd = (u64)fdircmd << 32;
1428 1606 fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
1429 1607 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1430 1608
1431 1609 DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
1432 1610
1433 1611 return IXGBE_SUCCESS;
1434 1612 }
1435 1613
1436 1614 #ifdef lint
1437 1615 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n)
1438 1616 #else
1439 1617 #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
1440 1618 do { \
1441 1619 u32 n = (_n); \
1442 1620 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1443 1621 bucket_hash ^= lo_hash_dword >> n; \
1444 1622 if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1445 1623 bucket_hash ^= hi_hash_dword >> n; \
1446 1624 } while (0);
1447 1625 #endif
1448 1626 /**
1449 1627 * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
1450 1628 * @atr_input: input bitstream to compute the hash on
1451 1629 * @input_mask: mask for the input bitstream
1452 1630 *
1453 1631 * This function serves two main purposes. First it applys the input_mask
1454 1632 * to the atr_input resulting in a cleaned up atr_input data stream.
1455 1633 * Secondly it computes the hash and stores it in the bkt_hash field at
1456 1634 * the end of the input byte stream. This way it will be available for
1457 1635 * future use without needing to recompute the hash.
1458 1636 **/
1459 1637 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
1460 1638 union ixgbe_atr_input *input_mask)
1461 1639 {
1462 1640
1463 1641 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1464 1642 u32 bucket_hash = 0;
1465 1643
1466 1644 /* Apply masks to input data */
1467 1645 input->dword_stream[0] &= input_mask->dword_stream[0];
1468 1646 input->dword_stream[1] &= input_mask->dword_stream[1];
1469 1647 input->dword_stream[2] &= input_mask->dword_stream[2];
1470 1648 input->dword_stream[3] &= input_mask->dword_stream[3];
1471 1649 input->dword_stream[4] &= input_mask->dword_stream[4];
1472 1650 input->dword_stream[5] &= input_mask->dword_stream[5];
1473 1651 input->dword_stream[6] &= input_mask->dword_stream[6];
1474 1652 input->dword_stream[7] &= input_mask->dword_stream[7];
1475 1653 input->dword_stream[8] &= input_mask->dword_stream[8];
1476 1654 input->dword_stream[9] &= input_mask->dword_stream[9];
1477 1655 input->dword_stream[10] &= input_mask->dword_stream[10];
1478 1656
1479 1657 /* record the flow_vm_vlan bits as they are a key part to the hash */
1480 1658 flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
1481 1659
1482 1660 /* generate common hash dword */
1483 1661 hi_hash_dword = IXGBE_NTOHL(input->dword_stream[1] ^
1484 1662 input->dword_stream[2] ^
1485 1663 input->dword_stream[3] ^
1486 1664 input->dword_stream[4] ^
1487 1665 input->dword_stream[5] ^
1488 1666 input->dword_stream[6] ^
1489 1667 input->dword_stream[7] ^
1490 1668 input->dword_stream[8] ^
1491 1669 input->dword_stream[9] ^
1492 1670 input->dword_stream[10]);
1493 1671
1494 1672 /* low dword is word swapped version of common */
1495 1673 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1496 1674
1497 1675 /* apply flow ID/VM pool/VLAN ID bits to hash words */
1498 1676 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1499 1677
1500 1678 /* Process bits 0 and 16 */
1501 1679 IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
1502 1680
1503 1681 /*
1504 1682 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1505 1683 * delay this because bit 0 of the stream should not be processed
1506 1684 * so we do not add the vlan until after bit 0 was processed
1507 1685 */
1508 1686 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1509 1687
1510 1688 /* Process remaining 30 bit of the key */
1511 1689 IXGBE_COMPUTE_BKT_HASH_ITERATION(1);
1512 1690 IXGBE_COMPUTE_BKT_HASH_ITERATION(2);
1513 1691 IXGBE_COMPUTE_BKT_HASH_ITERATION(3);
1514 1692 IXGBE_COMPUTE_BKT_HASH_ITERATION(4);
1515 1693 IXGBE_COMPUTE_BKT_HASH_ITERATION(5);
1516 1694 IXGBE_COMPUTE_BKT_HASH_ITERATION(6);
1517 1695 IXGBE_COMPUTE_BKT_HASH_ITERATION(7);
1518 1696 IXGBE_COMPUTE_BKT_HASH_ITERATION(8);
1519 1697 IXGBE_COMPUTE_BKT_HASH_ITERATION(9);
1520 1698 IXGBE_COMPUTE_BKT_HASH_ITERATION(10);
1521 1699 IXGBE_COMPUTE_BKT_HASH_ITERATION(11);
1522 1700 IXGBE_COMPUTE_BKT_HASH_ITERATION(12);
1523 1701 IXGBE_COMPUTE_BKT_HASH_ITERATION(13);
1524 1702 IXGBE_COMPUTE_BKT_HASH_ITERATION(14);
1525 1703 IXGBE_COMPUTE_BKT_HASH_ITERATION(15);
1526 1704
1527 1705 /*
1528 1706 * Limit hash to 13 bits since max bucket count is 8K.
1529 1707 * Store result at the end of the input stream.
1530 1708 */
1531 1709 input->formatted.bkt_hash = bucket_hash & 0x1FFF;
1532 1710 }
1533 1711
1534 1712 /**
1535 1713 * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
1536 1714 * @input_mask: mask to be bit swapped
1537 1715 *
1538 1716 * The source and destination port masks for flow director are bit swapped
1539 1717 * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to
1540 1718 * generate a correctly swapped value we need to bit swap the mask and that
1541 1719 * is what is accomplished by this function.
1542 1720 **/
1543 1721 static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
1544 1722 {
1545 1723 u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
1546 1724 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
1547 1725 mask |= IXGBE_NTOHS(input_mask->formatted.src_port);
1548 1726 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
1549 1727 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
1550 1728 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
1551 1729 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
1552 1730 }
1553 1731
1554 1732 /*
1555 1733 * These two macros are meant to address the fact that we have registers
1556 1734 * that are either all or in part big-endian. As a result on big-endian
1557 1735 * systems we will end up byte swapping the value to little-endian before
1558 1736 * it is byte swapped again and written to the hardware in the original
1559 1737 * big-endian format.
1560 1738 */
1561 1739 #define IXGBE_STORE_AS_BE32(_value) \
1562 1740 (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
1563 1741 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
1564 1742
1565 1743 #define IXGBE_WRITE_REG_BE32(a, reg, value) \
1566 1744 IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
1567 1745
1568 1746 #define IXGBE_STORE_AS_BE16(_value) \
1569 1747 IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
1570 1748
1571 1749 s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
1572 1750 union ixgbe_atr_input *input_mask)
1573 1751 {
1574 1752 /* mask IPv6 since it is currently not supported */
1575 1753 u32 fdirm = IXGBE_FDIRM_DIPv6;
1576 1754 u32 fdirtcpm;
1577 1755
1578 1756 DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
1579 1757
1580 1758 /*
1581 1759 * Program the relevant mask registers. If src/dst_port or src/dst_addr
1582 1760 * are zero, then assume a full mask for that field. Also assume that
1583 1761 * a VLAN of 0 is unspecified, so mask that out as well. L4type
1584 1762 * cannot be masked out in this implementation.
1585 1763 *
1586 1764 * This also assumes IPv4 only. IPv6 masking isn't supported at this
1587 1765 * point in time.
1588 1766 */
1589 1767
1590 1768 /* verify bucket hash is cleared on hash generation */
1591 1769 if (input_mask->formatted.bkt_hash)
1592 1770 DEBUGOUT(" bucket hash should always be 0 in mask\n");
1593 1771
1594 1772 /* Program FDIRM and verify partial masks */
1595 1773 switch (input_mask->formatted.vm_pool & 0x7F) {
1596 1774 case 0x0:
1597 1775 fdirm |= IXGBE_FDIRM_POOL;
1598 1776 /* FALLTHRU */
1599 1777 case 0x7F:
1600 1778 break;
1601 1779 default:
1602 1780 DEBUGOUT(" Error on vm pool mask\n");
1603 1781 return IXGBE_ERR_CONFIG;
1604 1782 }
1605 1783
1606 1784 switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
1607 1785 case 0x0:
1608 1786 fdirm |= IXGBE_FDIRM_L4P;
1609 1787 if (input_mask->formatted.dst_port ||
1610 1788 input_mask->formatted.src_port) {
1611 1789 DEBUGOUT(" Error on src/dst port mask\n");
1612 1790 return IXGBE_ERR_CONFIG;
1613 1791 }
1614 1792 /* FALLTHRU */
1615 1793 case IXGBE_ATR_L4TYPE_MASK:
1616 1794 break;
1617 1795 default:
1618 1796 DEBUGOUT(" Error on flow type mask\n");
1619 1797 return IXGBE_ERR_CONFIG;
1620 1798 }
1621 1799
1622 1800 switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
1623 1801 case 0x0000:
1624 1802 /* mask VLAN ID, fall through to mask VLAN priority */
1625 1803 fdirm |= IXGBE_FDIRM_VLANID;
1626 1804 /* FALLTHRU */
1627 1805 case 0x0FFF:
1628 1806 /* mask VLAN priority */
1629 1807 fdirm |= IXGBE_FDIRM_VLANP;
1630 1808 break;
1631 1809 case 0xE000:
1632 1810 /* mask VLAN ID only, fall through */
1633 1811 fdirm |= IXGBE_FDIRM_VLANID;
1634 1812 /* FALLTHRU */
1635 1813 case 0xEFFF:
1636 1814 /* no VLAN fields masked */
1637 1815 break;
1638 1816 default:
1639 1817 DEBUGOUT(" Error on VLAN mask\n");
1640 1818 return IXGBE_ERR_CONFIG;
1641 1819 }
1642 1820
1643 1821 switch (input_mask->formatted.flex_bytes & 0xFFFF) {
1644 1822 case 0x0000:
1645 1823 /* Mask Flex Bytes, fall through */
1646 1824 fdirm |= IXGBE_FDIRM_FLEX;
1647 1825 /* FALLTHRU */
1648 1826 case 0xFFFF:
1649 1827 break;
1650 1828 default:
1651 1829 DEBUGOUT(" Error on flexible byte mask\n");
1652 1830 return IXGBE_ERR_CONFIG;
1653 1831 }
1654 1832
1655 1833 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
1656 1834 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
1657 1835
1658 1836 /* store the TCP/UDP port masks, bit reversed from port layout */
1659 1837 fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
1660 1838
1661 1839 /* write both the same so that UDP and TCP use the same mask */
1662 1840 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
1663 1841 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
1664 1842
1665 1843 /* store source and destination IP masks (big-enian) */
1666 1844 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
1667 1845 ~input_mask->formatted.src_ip[0]);
1668 1846 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
1669 1847 ~input_mask->formatted.dst_ip[0]);
1670 1848
1671 1849 return IXGBE_SUCCESS;
1672 1850 }
1673 1851
1674 1852 s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
1675 1853 union ixgbe_atr_input *input,
1676 1854 u16 soft_id, u8 queue)
1677 1855 {
1678 1856 u32 fdirport, fdirvlan, fdirhash, fdircmd;
1679 1857
1680 1858 DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
1681 1859
1682 1860 /* currently IPv6 is not supported, must be programmed with 0 */
1683 1861 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
1684 1862 input->formatted.src_ip[0]);
1685 1863 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
1686 1864 input->formatted.src_ip[1]);
1687 1865 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
1688 1866 input->formatted.src_ip[2]);
1689 1867
1690 1868 /* record the source address (big-endian) */
1691 1869 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
1692 1870
1693 1871 /* record the first 32 bits of the destination address (big-endian) */
1694 1872 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
1695 1873
1696 1874 /* record source and destination port (little-endian)*/
1697 1875 fdirport = IXGBE_NTOHS(input->formatted.dst_port);
1698 1876 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
1699 1877 fdirport |= IXGBE_NTOHS(input->formatted.src_port);
1700 1878 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
1701 1879
1702 1880 /* record vlan (little-endian) and flex_bytes(big-endian) */
1703 1881 fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
1704 1882 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
1705 1883 fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
1706 1884 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
1707 1885
1708 1886 /* configure FDIRHASH register */
1709 1887 fdirhash = input->formatted.bkt_hash;
1710 1888 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1711 1889 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1712 1890
1713 1891 /*
1714 1892 * flush all previous writes to make certain registers are
1715 1893 * programmed prior to issuing the command
1716 1894 */
1717 1895 IXGBE_WRITE_FLUSH(hw);
1718 1896
1719 1897 /* configure FDIRCMD register */
1720 1898 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1721 1899 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1722 1900 if (queue == IXGBE_FDIR_DROP_QUEUE)
1723 1901 fdircmd |= IXGBE_FDIRCMD_DROP;
1724 1902 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1725 1903 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1726 1904 fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
1727 1905
1728 1906 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1729 1907
1730 1908 return IXGBE_SUCCESS;
1731 1909 }
1732 1910
1733 1911 s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
1734 1912 union ixgbe_atr_input *input,
1735 1913 u16 soft_id)
1736 1914 {
1737 1915 u32 fdirhash;
1738 1916 u32 fdircmd = 0;
1739 1917 u32 retry_count;
1740 1918 s32 err = IXGBE_SUCCESS;
1741 1919
1742 1920 /* configure FDIRHASH register */
1743 1921 fdirhash = input->formatted.bkt_hash;
1744 1922 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1745 1923 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1746 1924
1747 1925 /* flush hash to HW */
1748 1926 IXGBE_WRITE_FLUSH(hw);
1749 1927
1750 1928 /* Query if filter is present */
1751 1929 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
1752 1930
1753 1931 for (retry_count = 10; retry_count; retry_count--) {
1754 1932 /* allow 10us for query to process */
1755 1933 usec_delay(10);
1756 1934 /* verify query completed successfully */
1757 1935 fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
1758 1936 if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK))
1759 1937 break;
1760 1938 }
1761 1939
1762 1940 if (!retry_count)
1763 1941 err = IXGBE_ERR_FDIR_REINIT_FAILED;
1764 1942
1765 1943 /* if filter exists in hardware then remove it */
1766 1944 if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
1767 1945 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1768 1946 IXGBE_WRITE_FLUSH(hw);
1769 1947 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
1770 1948 IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
1771 1949 }
1772 1950
1773 1951 return err;
1774 1952 }
1775 1953
1776 1954 /**
1777 1955 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
1778 1956 * @hw: pointer to hardware structure
1779 1957 * @input: input bitstream
1780 1958 * @input_mask: mask for the input bitstream
1781 1959 * @soft_id: software index for the filters
1782 1960 * @queue: queue index to direct traffic to
1783 1961 *
1784 1962 * Note that the caller to this function must lock before calling, since the
1785 1963 * hardware writes must be protected from one another.
1786 1964 **/
1787 1965 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
1788 1966 union ixgbe_atr_input *input,
1789 1967 union ixgbe_atr_input *input_mask,
1790 1968 u16 soft_id, u8 queue)
1791 1969 {
1792 1970 s32 err = IXGBE_ERR_CONFIG;
1793 1971
1794 1972 DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
1795 1973
1796 1974 /*
1797 1975 * Check flow_type formatting, and bail out before we touch the hardware
1798 1976 * if there's a configuration issue
1799 1977 */
1800 1978 switch (input->formatted.flow_type) {
1801 1979 case IXGBE_ATR_FLOW_TYPE_IPV4:
1802 1980 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
1803 1981 if (input->formatted.dst_port || input->formatted.src_port) {
1804 1982 DEBUGOUT(" Error on src/dst port\n");
1805 1983 return IXGBE_ERR_CONFIG;
1806 1984 }
1807 1985 break;
1808 1986 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1809 1987 if (input->formatted.dst_port || input->formatted.src_port) {
1810 1988 DEBUGOUT(" Error on src/dst port\n");
1811 1989 return IXGBE_ERR_CONFIG;
1812 1990 }
1813 1991 /* FALLTHRU */
1814 1992 case IXGBE_ATR_FLOW_TYPE_TCPV4:
1815 1993 case IXGBE_ATR_FLOW_TYPE_UDPV4:
1816 1994 input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
1817 1995 IXGBE_ATR_L4TYPE_MASK;
1818 1996 break;
1819 1997 default:
1820 1998 DEBUGOUT(" Error on flow type input\n");
1821 1999 return err;
1822 2000 }
1823 2001
1824 2002 /* program input mask into the HW */
1825 2003 err = ixgbe_fdir_set_input_mask_82599(hw, input_mask);
1826 2004 if (err)
1827 2005 return err;
1828 2006
1829 2007 /* apply mask and compute/store hash */
1830 2008 ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
1831 2009
1832 2010 /* program filters to filter memory */
1833 2011 return ixgbe_fdir_write_perfect_filter_82599(hw, input,
1834 2012 soft_id, queue);
1835 2013 }
1836 2014
1837 2015 /**
1838 2016 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
1839 2017 * @hw: pointer to hardware structure
1840 2018 * @reg: analog register to read
1841 2019 * @val: read value
1842 2020 *
1843 2021 * Performs read operation to Omer analog register specified.
1844 2022 **/
1845 2023 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
1846 2024 {
1847 2025 u32 core_ctl;
1848 2026
1849 2027 DEBUGFUNC("ixgbe_read_analog_reg8_82599");
1850 2028
1851 2029 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
1852 2030 (reg << 8));
1853 2031 IXGBE_WRITE_FLUSH(hw);
1854 2032 usec_delay(10);
1855 2033 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
1856 2034 *val = (u8)core_ctl;
1857 2035
1858 2036 return IXGBE_SUCCESS;
1859 2037 }
1860 2038
1861 2039 /**
1862 2040 * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
1863 2041 * @hw: pointer to hardware structure
1864 2042 * @reg: atlas register to write
1865 2043 * @val: value to write
1866 2044 *
1867 2045 * Performs write operation to Omer analog register specified.
1868 2046 **/
1869 2047 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
1870 2048 {
1871 2049 u32 core_ctl;
1872 2050
1873 2051 DEBUGFUNC("ixgbe_write_analog_reg8_82599");
1874 2052
1875 2053 core_ctl = (reg << 8) | val;
1876 2054 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
1877 2055 IXGBE_WRITE_FLUSH(hw);
1878 2056 usec_delay(10);
1879 2057
1880 2058 return IXGBE_SUCCESS;
1881 2059 }
1882 2060
1883 2061 /**
1884 2062 * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
1885 2063 * @hw: pointer to hardware structure
1886 2064 *
1887 2065 * Starts the hardware using the generic start_hw function
1888 2066 * and the generation start_hw function.
1889 2067 * Then performs revision-specific operations, if any.
1890 2068 **/
1891 2069 s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
1892 2070 {
1893 2071 s32 ret_val = IXGBE_SUCCESS;
1894 2072
1895 2073 DEBUGFUNC("ixgbe_start_hw_82599");
1896 2074
1897 2075 ret_val = ixgbe_start_hw_generic(hw);
1898 2076 if (ret_val != IXGBE_SUCCESS)
1899 2077 goto out;
1900 2078
1901 2079 ret_val = ixgbe_start_hw_gen2(hw);
1902 2080 if (ret_val != IXGBE_SUCCESS)
1903 2081 goto out;
1904 2082
1905 2083 /* We need to run link autotry after the driver loads */
1906 2084 hw->mac.autotry_restart = TRUE;
1907 2085
1908 2086 if (ret_val == IXGBE_SUCCESS)
1909 2087 ret_val = ixgbe_verify_fw_version_82599(hw);
1910 2088 out:
1911 2089 return ret_val;
1912 2090 }
1913 2091
1914 2092 /**
1915 2093 * ixgbe_identify_phy_82599 - Get physical layer module
1916 2094 * @hw: pointer to hardware structure
1917 2095 *
1918 2096 * Determines the physical layer module found on the current adapter.
1919 2097 * If PHY already detected, maintains current PHY type in hw struct,
1920 2098 * otherwise executes the PHY detection routine.
1921 2099 **/
1922 2100 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
1923 2101 {
1924 2102 s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
1925 2103
1926 2104 DEBUGFUNC("ixgbe_identify_phy_82599");
1927 2105
1928 2106 /* Detect PHY if not unknown - returns success if already detected. */
1929 2107 status = ixgbe_identify_phy_generic(hw);
1930 2108 if (status != IXGBE_SUCCESS) {
1931 2109 /* 82599 10GBASE-T requires an external PHY */
1932 2110 if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
1933 2111 goto out;
1934 2112 else
1935 2113 status = ixgbe_identify_module_generic(hw);
1936 2114 }
1937 2115
1938 2116 /* Set PHY type none if no PHY detected */
1939 2117 if (hw->phy.type == ixgbe_phy_unknown) {
1940 2118 hw->phy.type = ixgbe_phy_none;
1941 2119 status = IXGBE_SUCCESS;
1942 2120 }
1943 2121
1944 2122 /* Return error if SFP module has been detected but is not supported */
1945 2123 if (hw->phy.type == ixgbe_phy_sfp_unsupported)
1946 2124 status = IXGBE_ERR_SFP_NOT_SUPPORTED;
1947 2125
1948 2126 out:
1949 2127 return status;
1950 2128 }
1951 2129
1952 2130 /**
1953 2131 * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
1954 2132 * @hw: pointer to hardware structure
1955 2133 *
1956 2134 * Determines physical layer capabilities of the current configuration.
1957 2135 **/
1958 2136 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
1959 2137 {
1960 2138 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
1961 2139 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1962 2140 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
1963 2141 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
1964 2142 u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
1965 2143 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
1966 2144 u16 ext_ability = 0;
1967 2145 u8 comp_codes_10g = 0;
1968 2146 u8 comp_codes_1g = 0;
1969 2147
1970 2148 DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
1971 2149
1972 2150 hw->phy.ops.identify(hw);
1973 2151
1974 2152 switch (hw->phy.type) {
1975 2153 case ixgbe_phy_tn:
1976 2154 case ixgbe_phy_cu_unknown:
1977 2155 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
1978 2156 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
1979 2157 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
1980 2158 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
1981 2159 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
1982 2160 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
1983 2161 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
1984 2162 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
1985 2163 goto out;
1986 2164 default:
1987 2165 break;
1988 2166 }
1989 2167
1990 2168 switch (autoc & IXGBE_AUTOC_LMS_MASK) {
1991 2169 case IXGBE_AUTOC_LMS_1G_AN:
1992 2170 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
1993 2171 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
1994 2172 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
1995 2173 IXGBE_PHYSICAL_LAYER_1000BASE_BX;
1996 2174 goto out;
1997 2175 }
1998 2176 /* SFI mode so read SFP module */
1999 2177 goto sfp_check;
2000 2178 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
2001 2179 if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
2002 2180 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
2003 2181 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
2004 2182 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2005 2183 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
2006 2184 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
2007 2185 goto out;
2008 2186 case IXGBE_AUTOC_LMS_10G_SERIAL:
2009 2187 if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
2010 2188 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2011 2189 goto out;
2012 2190 } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
2013 2191 goto sfp_check;
2014 2192 break;
2015 2193 case IXGBE_AUTOC_LMS_KX4_KX_KR:
2016 2194 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
2017 2195 if (autoc & IXGBE_AUTOC_KX_SUPP)
2018 2196 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
2019 2197 if (autoc & IXGBE_AUTOC_KX4_SUPP)
2020 2198 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
2021 2199 if (autoc & IXGBE_AUTOC_KR_SUPP)
2022 2200 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
2023 2201 goto out;
2024 2202 default:
2025 2203 goto out;
2026 2204 }
2027 2205
2028 2206 sfp_check:
2029 2207 /* SFP check must be done last since DA modules are sometimes used to
2030 2208 * test KR mode - we need to id KR mode correctly before SFP module.
2031 2209 * Call identify_sfp because the pluggable module may have changed */
2032 2210 hw->phy.ops.identify_sfp(hw);
2033 2211 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
2034 2212 goto out;
2035 2213
2036 2214 switch (hw->phy.type) {
2037 2215 case ixgbe_phy_sfp_passive_tyco:
2038 2216 case ixgbe_phy_sfp_passive_unknown:
2039 2217 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
2040 2218 break;
2041 2219 case ixgbe_phy_sfp_ftl_active:
2042 2220 case ixgbe_phy_sfp_active_unknown:
2043 2221 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
2044 2222 break;
2045 2223 case ixgbe_phy_sfp_avago:
2046 2224 case ixgbe_phy_sfp_ftl:
2047 2225 case ixgbe_phy_sfp_intel:
2048 2226 case ixgbe_phy_sfp_unknown:
2049 2227 hw->phy.ops.read_i2c_eeprom(hw,
2050 2228 IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g);
2051 2229 hw->phy.ops.read_i2c_eeprom(hw,
2052 2230 IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
2053 2231 if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
2054 2232 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
2055 2233 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
2056 2234 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
2057 2235 else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
2058 2236 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
2059 2237 else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE)
2060 2238 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX;
2061 2239 break;
2062 2240 default:
2063 2241 break;
2064 2242 }
2065 2243
2066 2244 out:
2067 2245 return physical_layer;
2068 2246 }
2069 2247
2070 2248 /**
2071 2249 * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
2072 2250 * @hw: pointer to hardware structure
2073 2251 * @regval: register value to write to RXCTRL
2074 2252 *
2075 2253 * Enables the Rx DMA unit for 82599
2076 2254 **/
2077 2255 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
2078 2256 {
2079 2257
2080 2258 DEBUGFUNC("ixgbe_enable_rx_dma_82599");
2081 2259
2082 2260 /*
2083 2261 * Workaround for 82599 silicon errata when enabling the Rx datapath.
2084 2262 * If traffic is incoming before we enable the Rx unit, it could hang
2085 2263 * the Rx DMA unit. Therefore, make sure the security engine is
2086 2264 * completely disabled prior to enabling the Rx unit.
2087 2265 */
2088 2266
2089 2267 hw->mac.ops.disable_sec_rx_path(hw);
2090 2268
2091 2269 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
2092 2270
2093 2271 hw->mac.ops.enable_sec_rx_path(hw);
2094 2272
2095 2273 return IXGBE_SUCCESS;
2096 2274 }
2097 2275
|
↓ open down ↓ |
914 lines elided |
↑ open up ↑ |
2098 2276 /**
2099 2277 * ixgbe_verify_fw_version_82599 - verify fw version for 82599
2100 2278 * @hw: pointer to hardware structure
2101 2279 *
2102 2280 * Verifies that installed the firmware version is 0.6 or higher
2103 2281 * for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
2104 2282 *
2105 2283 * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
2106 2284 * if the FW version is not supported.
2107 2285 **/
2108 -static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2286 +s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
2109 2287 {
2110 2288 s32 status = IXGBE_ERR_EEPROM_VERSION;
2111 2289 u16 fw_offset, fw_ptp_cfg_offset;
2112 2290 u16 fw_version = 0;
2113 2291
2114 2292 DEBUGFUNC("ixgbe_verify_fw_version_82599");
2115 2293
2116 2294 /* firmware check is only necessary for SFI devices */
2117 2295 if (hw->phy.media_type != ixgbe_media_type_fiber) {
2118 2296 status = IXGBE_SUCCESS;
2119 2297 goto fw_version_out;
2120 2298 }
2121 2299
2122 2300 /* get the offset to the Firmware Module block */
2123 2301 hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2124 2302
2125 2303 if ((fw_offset == 0) || (fw_offset == 0xFFFF))
2126 2304 goto fw_version_out;
2127 2305
2128 2306 /* get the offset to the Pass Through Patch Configuration block */
2129 2307 hw->eeprom.ops.read(hw, (fw_offset +
2130 2308 IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
2131 2309 &fw_ptp_cfg_offset);
2132 2310
2133 2311 if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
2134 2312 goto fw_version_out;
2135 2313
2136 2314 /* get the firmware version */
2137 2315 hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
2138 2316 IXGBE_FW_PATCH_VERSION_4), &fw_version);
2139 2317
2140 2318 if (fw_version > 0x5)
2141 2319 status = IXGBE_SUCCESS;
2142 2320
2143 2321 fw_version_out:
2144 2322 return status;
2145 2323 }
2146 2324
2147 2325 /**
2148 2326 * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
2149 2327 * @hw: pointer to hardware structure
2150 2328 *
2151 2329 * Returns TRUE if the LESM FW module is present and enabled. Otherwise
2152 2330 * returns FALSE. Smart Speed must be disabled if LESM FW module is enabled.
2153 2331 **/
2154 2332 bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
2155 2333 {
2156 2334 bool lesm_enabled = FALSE;
2157 2335 u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
2158 2336 s32 status;
2159 2337
2160 2338 DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599");
2161 2339
2162 2340 /* get the offset to the Firmware Module block */
2163 2341 status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
2164 2342
2165 2343 if ((status != IXGBE_SUCCESS) ||
2166 2344 (fw_offset == 0) || (fw_offset == 0xFFFF))
2167 2345 goto out;
2168 2346
2169 2347 /* get the offset to the LESM Parameters block */
2170 2348 status = hw->eeprom.ops.read(hw, (fw_offset +
2171 2349 IXGBE_FW_LESM_PARAMETERS_PTR),
2172 2350 &fw_lesm_param_offset);
2173 2351
2174 2352 if ((status != IXGBE_SUCCESS) ||
2175 2353 (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
2176 2354 goto out;
2177 2355
2178 2356 /* get the lesm state word */
2179 2357 status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
2180 2358 IXGBE_FW_LESM_STATE_1),
2181 2359 &fw_lesm_state);
2182 2360
2183 2361 if ((status == IXGBE_SUCCESS) &&
2184 2362 (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
2185 2363 lesm_enabled = TRUE;
2186 2364
2187 2365 out:
2188 2366 return lesm_enabled;
2189 2367 }
2190 2368
2191 2369 /**
2192 2370 * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
2193 2371 * fastest available method
2194 2372 *
2195 2373 * @hw: pointer to hardware structure
2196 2374 * @offset: offset of word in EEPROM to read
2197 2375 * @words: number of words
2198 2376 * @data: word(s) read from the EEPROM
2199 2377 *
2200 2378 * Retrieves 16 bit word(s) read from EEPROM
2201 2379 **/
2202 2380 static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
2203 2381 u16 words, u16 *data)
2204 2382 {
2205 2383 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2206 2384 s32 ret_val = IXGBE_ERR_CONFIG;
2207 2385
2208 2386 DEBUGFUNC("ixgbe_read_eeprom_buffer_82599");
2209 2387
2210 2388 /*
2211 2389 * If EEPROM is detected and can be addressed using 14 bits,
2212 2390 * use EERD otherwise use bit bang
2213 2391 */
2214 2392 if ((eeprom->type == ixgbe_eeprom_spi) &&
2215 2393 (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
2216 2394 ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
2217 2395 data);
2218 2396 else
2219 2397 ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
2220 2398 words,
2221 2399 data);
2222 2400
2223 2401 return ret_val;
2224 2402 }
2225 2403
2226 2404 /**
2227 2405 * ixgbe_read_eeprom_82599 - Read EEPROM word using
2228 2406 * fastest available method
2229 2407 *
2230 2408 * @hw: pointer to hardware structure
2231 2409 * @offset: offset of word in the EEPROM to read
2232 2410 * @data: word read from the EEPROM
2233 2411 *
2234 2412 * Reads a 16 bit word from the EEPROM
2235 2413 **/
2236 2414 static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
2237 2415 u16 offset, u16 *data)
2238 2416 {
2239 2417 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2240 2418 s32 ret_val = IXGBE_ERR_CONFIG;
2241 2419
2242 2420 DEBUGFUNC("ixgbe_read_eeprom_82599");
2243 2421
2244 2422 /*
2245 2423 * If EEPROM is detected and can be addressed using 14 bits,
2246 2424 * use EERD otherwise use bit bang
|
↓ open down ↓ |
128 lines elided |
↑ open up ↑ |
2247 2425 */
2248 2426 if ((eeprom->type == ixgbe_eeprom_spi) &&
2249 2427 (offset <= IXGBE_EERD_MAX_ADDR))
2250 2428 ret_val = ixgbe_read_eerd_generic(hw, offset, data);
2251 2429 else
2252 2430 ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
2253 2431
2254 2432 return ret_val;
2255 2433 }
2256 2434
2435 +/**
2436 + * ixgbe_reset_pipeline_82599 - perform pipeline reset
2437 + *
2438 + * @hw: pointer to hardware structure
2439 + *
2440 + * Reset pipeline by asserting Restart_AN together with LMS change to ensure
2441 + * full pipeline reset
2442 + **/
2443 +s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
2444 +{
2445 + s32 ret_val;
2446 + u32 anlp1_reg = 0;
2447 + u32 i, autoc_reg, autoc2_reg;
2257 2448
2449 + /* Enable link if disabled in NVM */
2450 + autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
2451 + if (autoc2_reg & IXGBE_AUTOC2_LINK_DISABLE_MASK) {
2452 + autoc2_reg &= ~IXGBE_AUTOC2_LINK_DISABLE_MASK;
2453 + IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
2454 + IXGBE_WRITE_FLUSH(hw);
2455 + }
2456 +
2457 + autoc_reg = hw->mac.cached_autoc;
2458 + autoc_reg |= IXGBE_AUTOC_AN_RESTART;
2459 + /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
2460 + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ IXGBE_AUTOC_LMS_1G_AN);
2461 + /* Wait for AN to leave state 0 */
2462 + for (i = 0; i < 10; i++) {
2463 + msec_delay(4);
2464 + anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
2465 + if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
2466 + break;
2467 + }
2468 +
2469 + if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
2470 + DEBUGOUT("auto negotiation not completed\n");
2471 + ret_val = IXGBE_ERR_RESET_FAILED;
2472 + goto reset_pipeline_out;
2473 + }
2474 +
2475 + ret_val = IXGBE_SUCCESS;
2476 +
2477 +reset_pipeline_out:
2478 + /* Write AUTOC register with original LMS field and Restart_AN */
2479 + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
2480 + IXGBE_WRITE_FLUSH(hw);
2481 +
2482 + return ret_val;
2483 +}
2484 +
2485 +
2486 +
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX