Print this page
2038 Add in I350 and ET2 support into igb
Reviewed by: Dan McDonald <danmcd@nexenta.com>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/igb/igb_82575.c
+++ new/usr/src/uts/common/io/igb/igb_82575.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
|
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 - * Copyright(c) 2007-2010 Intel Corporation. All rights reserved.
23 + * Copyright (c) 2007-2012 Intel Corporation. All rights reserved.
24 24 */
25 25
26 26 /*
27 27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 28 */
29 29
30 30 /* IntelVersion: 1.146.2.2 v3_3_14_3_BHSW1 */
31 31
32 32 /*
33 33 * 82575EB Gigabit Network Connection
34 34 * 82575EB Gigabit Backplane Connection
35 35 * 82575GB Gigabit Network Connection
36 36 * 82576 Gigabit Network Connection
37 37 * 82576 Quad Port Gigabit Mezzanine Adapter
38 38 */
39 39
40 40 #include "igb_api.h"
41 41
42 42 static s32 e1000_init_phy_params_82575(struct e1000_hw *hw);
43 43 static s32 e1000_init_nvm_params_82575(struct e1000_hw *hw);
44 44 static s32 e1000_init_mac_params_82575(struct e1000_hw *hw);
45 45 static s32 e1000_acquire_phy_82575(struct e1000_hw *hw);
46 46 static void e1000_release_phy_82575(struct e1000_hw *hw);
47 47 static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw);
48 48 static void e1000_release_nvm_82575(struct e1000_hw *hw);
49 49 static s32 e1000_check_for_link_82575(struct e1000_hw *hw);
50 50 static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw);
51 51 static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
52 52 u16 *duplex);
53 53 static s32 e1000_init_hw_82575(struct e1000_hw *hw);
54 54 static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw);
55 55 static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
56 56 u16 *data);
57 57 static s32 e1000_reset_hw_82575(struct e1000_hw *hw);
58 58 static s32 e1000_reset_hw_82580(struct e1000_hw *hw);
59 59 static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset,
60 60 u16 *data);
61 61 static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset,
62 62 u16 data);
63 63 static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw,
64 64 bool active);
65 65 static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw);
66 66 static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw);
67 67 static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data);
68 68 static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw,
69 69 u32 offset, u16 data);
70 70 static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw);
71 71 static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
72 72 static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
|
↓ open down ↓ |
39 lines elided |
↑ open up ↑ |
73 73 u16 *speed, u16 *duplex);
74 74 static s32 e1000_get_phy_id_82575(struct e1000_hw *hw);
75 75 static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
76 76 static bool e1000_sgmii_active_82575(struct e1000_hw *hw);
77 77 static s32 e1000_reset_init_script_82575(struct e1000_hw *hw);
78 78 static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw);
79 79 static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw);
80 80 static void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw);
81 81 static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw);
82 82
83 +static s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw,
84 + u16 offset);
85 +static s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
86 + u16 offset);
87 +static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw);
88 +static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw);
89 +static void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value);
90 +static void e1000_clear_vfta_i350(struct e1000_hw *hw);
91 +
83 92 static const u16 e1000_82580_rxpbs_table[] =
84 93 {36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140};
85 94 #define E1000_82580_RXPBS_TABLE_SIZE \
86 95 (sizeof (e1000_82580_rxpbs_table)/sizeof (u16))
87 96
88 97 /*
89 98 * e1000_init_phy_params_82575 - Init PHY func ptrs.
90 99 * @hw: pointer to the HW structure
91 100 */
92 101 static s32
93 102 e1000_init_phy_params_82575(struct e1000_hw *hw)
94 103 {
95 104 struct e1000_phy_info *phy = &hw->phy;
96 105 s32 ret_val = E1000_SUCCESS;
97 106
98 107 DEBUGFUNC("e1000_init_phy_params_82575");
99 108
100 109 if (hw->phy.media_type != e1000_media_type_copper) {
101 110 phy->type = e1000_phy_none;
102 111 goto out;
103 112 }
104 113
105 114 phy->ops.power_up = e1000_power_up_phy_copper;
106 115 phy->ops.power_down = e1000_power_down_phy_copper_82575;
107 116
108 117 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
109 118 phy->reset_delay_us = 100;
110 119
111 120 phy->ops.acquire = e1000_acquire_phy_82575;
112 121 phy->ops.check_reset_block = e1000_check_reset_block_generic;
113 122 phy->ops.commit = e1000_phy_sw_reset_generic;
114 123 phy->ops.get_cfg_done = e1000_get_cfg_done_82575;
115 124 phy->ops.release = e1000_release_phy_82575;
116 125
117 126 if (e1000_sgmii_active_82575(hw)) {
118 127 phy->ops.reset = e1000_phy_hw_reset_sgmii_82575;
119 128 phy->ops.read_reg = e1000_read_phy_reg_sgmii_82575;
120 129 phy->ops.write_reg = e1000_write_phy_reg_sgmii_82575;
121 130 } else if (hw->mac.type == e1000_82580) {
122 131 phy->ops.reset = e1000_phy_hw_reset_generic;
123 132 phy->ops.read_reg = e1000_read_phy_reg_82580;
124 133 phy->ops.write_reg = e1000_write_phy_reg_82580;
125 134 } else {
126 135 phy->ops.reset = e1000_phy_hw_reset_generic;
127 136 phy->ops.read_reg = e1000_read_phy_reg_igp;
128 137 phy->ops.write_reg = e1000_write_phy_reg_igp;
129 138 }
130 139
131 140 /* Set phy->phy_addr and phy->id. */
132 141 ret_val = e1000_get_phy_id_82575(hw);
133 142
134 143 /* Verify phy id and set remaining function pointers */
135 144 switch (phy->id) {
136 145 case M88E1111_I_PHY_ID:
137 146 phy->type = e1000_phy_m88;
138 147 phy->ops.check_polarity = e1000_check_polarity_m88;
139 148 phy->ops.get_info = e1000_get_phy_info_m88;
140 149 phy->ops.get_cable_length = e1000_get_cable_length_m88;
141 150 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
142 151 break;
143 152 case IGP03E1000_E_PHY_ID:
|
↓ open down ↓ |
51 lines elided |
↑ open up ↑ |
144 153 case IGP04E1000_E_PHY_ID:
145 154 phy->type = e1000_phy_igp_3;
146 155 phy->ops.check_polarity = e1000_check_polarity_igp;
147 156 phy->ops.get_info = e1000_get_phy_info_igp;
148 157 phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
149 158 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
150 159 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82575;
151 160 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic;
152 161 break;
153 162 case I82580_I_PHY_ID:
163 + case I350_I_PHY_ID:
154 164 phy->type = e1000_phy_82580;
155 165 phy->ops.check_polarity = e1000_check_polarity_82577;
156 166 phy->ops.force_speed_duplex =
157 167 e1000_phy_force_speed_duplex_82577;
158 168 phy->ops.get_cable_length = e1000_get_cable_length_82577;
159 169 phy->ops.get_info = e1000_get_phy_info_82577;
160 170 break;
161 171 default:
162 172 ret_val = -E1000_ERR_PHY;
163 173 goto out;
164 174 }
165 175
166 176 out:
167 177 return (ret_val);
168 178 }
169 179
170 180 /*
171 181 * e1000_init_nvm_params_82575 - Init NVM func ptrs.
172 182 * @hw: pointer to the HW structure
173 183 */
174 184 static s32
175 185 e1000_init_nvm_params_82575(struct e1000_hw *hw)
176 186 {
177 187 struct e1000_nvm_info *nvm = &hw->nvm;
178 188 u32 eecd = E1000_READ_REG(hw, E1000_EECD);
179 189 u16 size;
180 190
181 191 DEBUGFUNC("e1000_init_nvm_params_82575");
182 192
183 193 nvm->opcode_bits = 8;
184 194 nvm->delay_usec = 1;
185 195 switch (nvm->override) {
186 196 case e1000_nvm_override_spi_large:
187 197 nvm->page_size = 32;
188 198 nvm->address_bits = 16;
189 199 break;
190 200 case e1000_nvm_override_spi_small:
191 201 nvm->page_size = 8;
192 202 nvm->address_bits = 8;
193 203 break;
194 204 default:
195 205 nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
196 206 nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
197 207 break;
198 208 }
199 209
200 210 nvm->type = e1000_nvm_eeprom_spi;
201 211
202 212 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
203 213 E1000_EECD_SIZE_EX_SHIFT);
204 214
205 215 /*
206 216 * Added to a constant, "size" becomes the left-shift value
207 217 * for setting word_size.
208 218 */
209 219 size += NVM_WORD_SIZE_BASE_SHIFT;
210 220
211 221 /* EEPROM access above 16k is unsupported */
212 222 if (size > 14)
213 223 size = 14;
214 224 nvm->word_size = 1 << size;
|
↓ open down ↓ |
51 lines elided |
↑ open up ↑ |
215 225
216 226 /* Function Pointers */
217 227 nvm->ops.acquire = e1000_acquire_nvm_82575;
218 228 nvm->ops.read = e1000_read_nvm_eerd;
219 229 nvm->ops.release = e1000_release_nvm_82575;
220 230 nvm->ops.update = e1000_update_nvm_checksum_generic;
221 231 nvm->ops.valid_led_default = e1000_valid_led_default_82575;
222 232 nvm->ops.validate = e1000_validate_nvm_checksum_generic;
223 233 nvm->ops.write = e1000_write_nvm_spi;
224 234
235 + /* override genric family function pointers for specific descendants */
236 + switch (hw->mac.type) {
237 + case e1000_i350:
238 + nvm->ops.validate = e1000_validate_nvm_checksum_i350;
239 + nvm->ops.update = e1000_update_nvm_checksum_i350;
240 + break;
241 + default:
242 + break;
243 + }
244 +
245 +
225 246 return (E1000_SUCCESS);
226 247 }
227 248
228 249 /*
229 250 * e1000_init_mac_params_82575 - Init MAC func ptrs.
230 251 * @hw: pointer to the HW structure
231 252 */
232 253 static s32
233 254 e1000_init_mac_params_82575(struct e1000_hw *hw)
234 255 {
235 256 struct e1000_mac_info *mac = &hw->mac;
236 257 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
237 258 u32 ctrl_ext = 0;
238 259
239 260 DEBUGFUNC("e1000_init_mac_params_82575");
240 261
241 262 /* Set media type */
242 263 /*
243 264 * The 82575 uses bits 22:23 for link mode. The mode can be changed
244 265 * based on the EEPROM. We cannot rely upon device ID. There
245 266 * is no distinguishable difference between fiber and internal
246 267 * SerDes mode on the 82575. There can be an external PHY attached
247 268 * on the SGMII interface. For this, we'll set sgmii_active to true.
248 269 */
249 270 hw->phy.media_type = e1000_media_type_copper;
250 271 dev_spec->sgmii_active = false;
251 272
252 273 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
253 274 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
254 275 case E1000_CTRL_EXT_LINK_MODE_SGMII:
255 276 dev_spec->sgmii_active = true;
256 277 ctrl_ext |= E1000_CTRL_I2C_ENA;
257 278 break;
258 279 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
259 280 case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
260 281 hw->phy.media_type = e1000_media_type_internal_serdes;
261 282 ctrl_ext |= E1000_CTRL_I2C_ENA;
262 283 break;
263 284 default:
264 285 ctrl_ext &= ~E1000_CTRL_I2C_ENA;
265 286 break;
266 287 }
267 288
268 289 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
269 290
270 291 /*
271 292 * if using i2c make certain the MDICNFG register is cleared to prevent
272 293 * communications from being misrouted to the mdic registers
273 294 */
274 295 if ((ctrl_ext & E1000_CTRL_I2C_ENA) && (hw->mac.type == e1000_82580))
275 296 E1000_WRITE_REG(hw, E1000_MDICNFG, 0);
276 297
|
↓ open down ↓ |
42 lines elided |
↑ open up ↑ |
277 298 /* Set mta register count */
278 299 mac->mta_reg_count = 128;
279 300 /* Set uta register count */
280 301 mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128;
281 302 /* Set rar entry count */
282 303 mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
283 304 if (mac->type == e1000_82576)
284 305 mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
285 306 if (mac->type == e1000_82580)
286 307 mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
308 + if (mac->type == e1000_i350) {
309 + mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
310 + /* Enable EEE default settings for i350 */
311 + dev_spec->eee_disable = B_FALSE;
312 + }
287 313 /* Set if part includes ASF firmware */
288 314 mac->asf_firmware_present = true;
289 315 /* Set if manageability features are enabled. */
290 316 mac->arc_subsystem_valid =
291 317 (E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK)
292 318 ? true : false;
293 319
294 320 /* Function pointers */
295 321
296 322 /* bus type/speed/width */
297 323 mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic;
298 324 /* reset */
299 325 if (mac->type == e1000_82580)
300 326 mac->ops.reset_hw = e1000_reset_hw_82580;
301 327 else
302 328 mac->ops.reset_hw = e1000_reset_hw_82575;
303 329 /* hw initialization */
304 330 mac->ops.init_hw = e1000_init_hw_82575;
305 331 /* link setup */
306 332 mac->ops.setup_link = e1000_setup_link_generic;
307 333 /* physical interface link setup */
308 334 mac->ops.setup_physical_interface =
309 335 (hw->phy.media_type == e1000_media_type_copper)
310 336 ? e1000_setup_copper_link_82575
311 337 : e1000_setup_serdes_link_82575;
|
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
312 338 /* physical interface shutdown */
313 339 mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575;
314 340 /* check for link */
315 341 mac->ops.check_for_link = e1000_check_for_link_82575;
316 342 /* receive address register setting */
317 343 mac->ops.rar_set = e1000_rar_set_generic;
318 344 /* read mac address */
319 345 mac->ops.read_mac_addr = e1000_read_mac_addr_82575;
320 346 /* multicast address update */
321 347 mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
322 - /* writing VFTA */
323 - mac->ops.write_vfta = e1000_write_vfta_generic;
324 - /* clearing VFTA */
325 - mac->ops.clear_vfta = e1000_clear_vfta_generic;
348 +
349 + if (hw->mac.type == e1000_i350) {
350 + /* writing VFTA */
351 + mac->ops.write_vfta = e1000_write_vfta_i350;
352 + /* clearing VFTA */
353 + mac->ops.clear_vfta = e1000_clear_vfta_i350;
354 + } else {
355 + /* writing VFTA */
356 + mac->ops.write_vfta = e1000_write_vfta_generic;
357 + /* clearing VFTA */
358 + mac->ops.clear_vfta = e1000_clear_vfta_generic;
359 + }
326 360 /* setting MTA */
327 361 mac->ops.mta_set = e1000_mta_set_generic;
328 362 /* ID LED init */
329 363 mac->ops.id_led_init = e1000_id_led_init_generic;
330 364 /* blink LED */
331 365 mac->ops.blink_led = e1000_blink_led_generic;
332 366 /* setup LED */
333 367 mac->ops.setup_led = e1000_setup_led_generic;
334 368 /* cleanup LED */
335 369 mac->ops.cleanup_led = e1000_cleanup_led_generic;
336 370 /* turn on/off LED */
337 371 mac->ops.led_on = e1000_led_on_generic;
338 372 mac->ops.led_off = e1000_led_off_generic;
339 373 /* clear hardware counters */
340 374 mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82575;
341 375 /* link info */
342 376 mac->ops.get_link_up_info = e1000_get_link_up_info_82575;
343 377
344 378 /* set lan id for port to determine which phy lock to use */
345 379 hw->mac.ops.set_lan_id(hw);
346 380
347 381 return (E1000_SUCCESS);
348 382 }
349 383
350 384 /*
351 385 * e1000_init_function_pointers_82575 - Init func ptrs.
352 386 * @hw: pointer to the HW structure
353 387 *
354 388 * Called to initialize all function pointers and parameters.
355 389 */
356 390 void
357 391 e1000_init_function_pointers_82575(struct e1000_hw *hw)
358 392 {
359 393 DEBUGFUNC("e1000_init_function_pointers_82575");
360 394
361 395 hw->mac.ops.init_params = e1000_init_mac_params_82575;
362 396 hw->nvm.ops.init_params = e1000_init_nvm_params_82575;
363 397 hw->phy.ops.init_params = e1000_init_phy_params_82575;
364 398 }
365 399
366 400 /*
367 401 * e1000_acquire_phy_82575 - Acquire rights to access PHY
368 402 * @hw: pointer to the HW structure
369 403 *
370 404 * Acquire access rights to the correct PHY.
371 405 */
372 406 static s32
373 407 e1000_acquire_phy_82575(struct e1000_hw *hw)
374 408 {
375 409 u16 mask = E1000_SWFW_PHY0_SM;
376 410
377 411 DEBUGFUNC("e1000_acquire_phy_82575");
378 412
379 413 if (hw->bus.func == E1000_FUNC_1)
380 414 mask = E1000_SWFW_PHY1_SM;
381 415 else if (hw->bus.func == E1000_FUNC_2)
382 416 mask = E1000_SWFW_PHY2_SM;
383 417 else if (hw->bus.func == E1000_FUNC_3)
384 418 mask = E1000_SWFW_PHY3_SM;
385 419
386 420 return (e1000_acquire_swfw_sync_82575(hw, mask));
387 421 }
388 422
389 423 /*
390 424 * e1000_release_phy_82575 - Release rights to access PHY
391 425 * @hw: pointer to the HW structure
392 426 *
393 427 * A wrapper to release access rights to the correct PHY.
394 428 */
395 429 static void
396 430 e1000_release_phy_82575(struct e1000_hw *hw)
397 431 {
398 432 u16 mask = E1000_SWFW_PHY0_SM;
399 433
400 434 DEBUGFUNC("e1000_release_phy_82575");
401 435
402 436 if (hw->bus.func == E1000_FUNC_1)
403 437 mask = E1000_SWFW_PHY1_SM;
404 438 else if (hw->bus.func == E1000_FUNC_2)
405 439 mask = E1000_SWFW_PHY2_SM;
406 440 else if (hw->bus.func == E1000_FUNC_3)
407 441 mask = E1000_SWFW_PHY3_SM;
408 442
409 443 e1000_release_swfw_sync_82575(hw, mask);
410 444 }
411 445
412 446 /*
413 447 * e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
414 448 * @hw: pointer to the HW structure
415 449 * @offset: register offset to be read
416 450 * @data: pointer to the read data
417 451 *
418 452 * Reads the PHY register at offset using the serial gigabit media independent
419 453 * interface and stores the retrieved information in data.
420 454 */
421 455 static s32
422 456 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, u16 *data)
423 457 {
424 458 s32 ret_val = -E1000_ERR_PARAM;
425 459
426 460 DEBUGFUNC("e1000_read_phy_reg_sgmii_82575");
427 461
428 462 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
429 463 DEBUGOUT1("PHY Address %u is out of range\n", offset);
430 464 goto out;
431 465 }
432 466
433 467 ret_val = hw->phy.ops.acquire(hw);
434 468 if (ret_val)
435 469 goto out;
436 470
437 471 ret_val = e1000_read_phy_reg_i2c(hw, offset, data);
438 472
439 473 hw->phy.ops.release(hw);
440 474
441 475 out:
442 476 return (ret_val);
443 477 }
444 478
445 479 /*
446 480 * e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
447 481 * @hw: pointer to the HW structure
448 482 * @offset: register offset to write to
449 483 * @data: data to write at register offset
450 484 *
451 485 * Writes the data to PHY register at the offset using the serial gigabit
452 486 * media independent interface.
453 487 */
454 488 static s32
455 489 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, u16 data)
456 490 {
457 491 s32 ret_val = -E1000_ERR_PARAM;
458 492
459 493 DEBUGFUNC("e1000_write_phy_reg_sgmii_82575");
460 494
461 495 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
462 496 DEBUGOUT1("PHY Address %d is out of range\n", offset);
463 497 goto out;
464 498 }
465 499
466 500 ret_val = hw->phy.ops.acquire(hw);
467 501 if (ret_val)
468 502 goto out;
469 503
470 504 ret_val = e1000_write_phy_reg_i2c(hw, offset, data);
471 505
472 506 hw->phy.ops.release(hw);
473 507
474 508 out:
475 509 return (ret_val);
476 510 }
477 511
478 512 /*
479 513 * e1000_get_phy_id_82575 - Retrieve PHY addr and id
480 514 * @hw: pointer to the HW structure
481 515 *
482 516 * Retrieves the PHY address and ID for both PHY's which do and do not use
483 517 * sgmi interface.
484 518 */
485 519 static s32
486 520 e1000_get_phy_id_82575(struct e1000_hw *hw)
487 521 {
488 522 struct e1000_phy_info *phy = &hw->phy;
489 523 s32 ret_val = E1000_SUCCESS;
490 524 u16 phy_id;
491 525 u32 ctrl_ext;
492 526
493 527 DEBUGFUNC("e1000_get_phy_id_82575");
494 528
495 529 /*
496 530 * For SGMII PHYs, we try the list of possible addresses until
497 531 * we find one that works. For non-SGMII PHYs
498 532 * (e.g. integrated copper PHYs), an address of 1 should
499 533 * work. The result of this function should mean phy->phy_addr
500 534 * and phy->id are set correctly.
501 535 */
502 536 if (!e1000_sgmii_active_82575(hw)) {
503 537 phy->addr = 1;
504 538 ret_val = e1000_get_phy_id(hw);
505 539 goto out;
506 540 }
507 541
508 542 /* Power on sgmii phy if it is disabled */
509 543 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
510 544 E1000_WRITE_REG(hw, E1000_CTRL_EXT,
511 545 ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
512 546 E1000_WRITE_FLUSH(hw);
513 547 msec_delay(300);
514 548
515 549 /*
516 550 * The address field in the I2CCMD register is 3 bits and 0 is invalid.
517 551 * Therefore, we need to test 1-7
518 552 */
519 553 for (phy->addr = 1; phy->addr < 8; phy->addr++) {
520 554 ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
521 555 if (ret_val == E1000_SUCCESS) {
522 556 DEBUGOUT2("Vendor ID 0x%08X read at address %u\n",
523 557 phy_id,
524 558 phy->addr);
525 559 /*
526 560 * At the time of this writing, The M88 part is
527 561 * the only supported SGMII PHY product.
528 562 */
529 563 if (phy_id == M88_VENDOR)
530 564 break;
531 565 } else {
532 566 DEBUGOUT1("PHY address %u was unreadable\n",
533 567 phy->addr);
534 568 }
535 569 }
536 570
537 571 /* A valid PHY type couldn't be found. */
538 572 if (phy->addr == 8) {
539 573 phy->addr = 0;
540 574 ret_val = -E1000_ERR_PHY;
541 575 } else {
542 576 ret_val = e1000_get_phy_id(hw);
543 577 }
544 578
545 579 /* restore previous sfp cage power state */
546 580 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
547 581
548 582 out:
549 583 return (ret_val);
550 584 }
551 585
552 586 /*
553 587 * e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset
554 588 * @hw: pointer to the HW structure
555 589 *
556 590 * Resets the PHY using the serial gigabit media independent interface.
557 591 */
558 592 static s32
559 593 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
560 594 {
561 595 s32 ret_val = E1000_SUCCESS;
562 596
563 597 DEBUGFUNC("e1000_phy_hw_reset_sgmii_82575");
564 598
565 599 /*
566 600 * This isn't a true "hard" reset, but is the only reset
567 601 * available to us at this time.
568 602 */
569 603
570 604 DEBUGOUT("Soft resetting SGMII attached PHY...\n");
571 605
572 606 if (!(hw->phy.ops.write_reg))
573 607 goto out;
574 608
575 609 /*
576 610 * SFP documentation requires the following to configure the SPF module
577 611 * to work on SGMII. No further documentation is given.
578 612 */
579 613 ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
580 614 if (ret_val)
581 615 goto out;
582 616
583 617 ret_val = hw->phy.ops.commit(hw);
584 618
585 619 out:
586 620 return (ret_val);
587 621 }
588 622
589 623 /*
590 624 * e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
591 625 * @hw: pointer to the HW structure
592 626 * @active: true to enable LPLU, false to disable
593 627 *
594 628 * Sets the LPLU D0 state according to the active flag. When
595 629 * activating LPLU this function also disables smart speed
596 630 * and vice versa. LPLU will not be activated unless the
597 631 * device autonegotiation advertisement meets standards of
598 632 * either 10 or 10/100 or 10/100/1000 at all duplexes.
599 633 * This is a function pointer entry point only called by
600 634 * PHY setup routines.
601 635 */
602 636 static s32
603 637 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
604 638 {
605 639 struct e1000_phy_info *phy = &hw->phy;
606 640 s32 ret_val = E1000_SUCCESS;
607 641 u16 data;
608 642
609 643 DEBUGFUNC("e1000_set_d0_lplu_state_82575");
610 644
611 645 if (!(hw->phy.ops.read_reg))
612 646 goto out;
613 647
614 648 ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
615 649 if (ret_val)
616 650 goto out;
617 651
618 652 if (active) {
619 653 data |= IGP02E1000_PM_D0_LPLU;
620 654 ret_val = phy->ops.write_reg(hw,
621 655 IGP02E1000_PHY_POWER_MGMT,
622 656 data);
623 657 if (ret_val)
624 658 goto out;
625 659
626 660 /* When LPLU is enabled, we should disable SmartSpeed */
627 661 ret_val = phy->ops.read_reg(hw,
628 662 IGP01E1000_PHY_PORT_CONFIG,
629 663 &data);
630 664 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
631 665 ret_val = phy->ops.write_reg(hw,
632 666 IGP01E1000_PHY_PORT_CONFIG,
633 667 data);
634 668 if (ret_val)
635 669 goto out;
636 670 } else {
637 671 data &= ~IGP02E1000_PM_D0_LPLU;
638 672 ret_val = phy->ops.write_reg(hw,
639 673 IGP02E1000_PHY_POWER_MGMT,
640 674 data);
641 675 /*
642 676 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
643 677 * during Dx states where the power conservation is most
644 678 * important. During driver activity we should enable
645 679 * SmartSpeed, so performance is maintained.
646 680 */
647 681 if (phy->smart_speed == e1000_smart_speed_on) {
648 682 ret_val = phy->ops.read_reg(hw,
649 683 IGP01E1000_PHY_PORT_CONFIG,
650 684 &data);
651 685 if (ret_val)
652 686 goto out;
653 687
654 688 data |= IGP01E1000_PSCFR_SMART_SPEED;
655 689 ret_val = phy->ops.write_reg(hw,
656 690 IGP01E1000_PHY_PORT_CONFIG,
657 691 data);
658 692 if (ret_val)
659 693 goto out;
660 694 } else if (phy->smart_speed == e1000_smart_speed_off) {
661 695 ret_val = phy->ops.read_reg(hw,
662 696 IGP01E1000_PHY_PORT_CONFIG,
663 697 &data);
664 698 if (ret_val)
665 699 goto out;
666 700
667 701 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
668 702 ret_val = phy->ops.write_reg(hw,
669 703 IGP01E1000_PHY_PORT_CONFIG,
670 704 data);
671 705 if (ret_val)
672 706 goto out;
673 707 }
674 708 }
675 709
676 710 out:
677 711 return (ret_val);
678 712 }
679 713
680 714 /*
681 715 * e1000_acquire_nvm_82575 - Request for access to EEPROM
682 716 * @hw: pointer to the HW structure
683 717 *
684 718 * Acquire the necessary semaphores for exclusive access to the EEPROM.
685 719 * Set the EEPROM access request bit and wait for EEPROM access grant bit.
686 720 * Return successful if access grant bit set, else clear the request for
687 721 * EEPROM access and return -E1000_ERR_NVM (-1).
688 722 */
689 723 static s32
|
↓ open down ↓ |
354 lines elided |
↑ open up ↑ |
690 724 e1000_acquire_nvm_82575(struct e1000_hw *hw)
691 725 {
692 726 s32 ret_val;
693 727
694 728 DEBUGFUNC("e1000_acquire_nvm_82575");
695 729
696 730 ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
697 731 if (ret_val)
698 732 goto out;
699 733
734 + /*
735 + * Check if there is some access
736 + * error this access may hook on
737 + */
738 + if (hw->mac.type == e1000_i350) {
739 + u32 eecd = E1000_READ_REG(hw, E1000_EECD);
740 + if (eecd & (E1000_EECD_BLOCKED | E1000_EECD_ABORT |
741 + E1000_EECD_TIMEOUT)) {
742 + /* Clear all access error flags */
743 + E1000_WRITE_REG(hw, E1000_EECD, eecd |
744 + E1000_EECD_ERROR_CLR);
745 + DEBUGOUT("Nvm bit banging access error "
746 + "detected and cleared.\n");
747 + }
748 + }
749 +
700 750 ret_val = e1000_acquire_nvm_generic(hw);
701 751
702 752 if (ret_val)
703 753 e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
704 754
705 755 out:
706 756 return (ret_val);
707 757 }
708 758
709 759 /*
710 760 * e1000_release_nvm_82575 - Release exclusive access to EEPROM
711 761 * @hw: pointer to the HW structure
712 762 *
713 763 * Stop any current commands to the EEPROM and clear the EEPROM request bit,
714 764 * then release the semaphores acquired.
715 765 */
716 766 static void
717 767 e1000_release_nvm_82575(struct e1000_hw *hw)
718 768 {
719 769 DEBUGFUNC("e1000_release_nvm_82575");
720 770
721 771 e1000_release_nvm_generic(hw);
722 772 e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
723 773 }
724 774
725 775 /*
726 776 * e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
727 777 * @hw: pointer to the HW structure
728 778 * @mask: specifies which semaphore to acquire
729 779 *
730 780 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
731 781 * will also specify which port we're acquiring the lock for.
732 782 */
733 783 static s32
734 784 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
735 785 {
736 786 u32 swfw_sync;
737 787 u32 swmask = mask;
738 788 u32 fwmask = mask << 16;
739 789 s32 ret_val = E1000_SUCCESS;
740 790 s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
741 791
742 792 DEBUGFUNC("e1000_acquire_swfw_sync_82575");
743 793
744 794 while (i < timeout) {
745 795 if (e1000_get_hw_semaphore_generic(hw)) {
746 796 ret_val = -E1000_ERR_SWFW_SYNC;
747 797 goto out;
748 798 }
749 799
750 800 swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
751 801 if (!(swfw_sync & (fwmask | swmask)))
752 802 break;
753 803
754 804 /*
755 805 * Firmware currently using resource (fwmask)
756 806 * or other software thread using resource (swmask)
757 807 */
758 808 e1000_put_hw_semaphore_generic(hw);
759 809 msec_delay_irq(5);
760 810 i++;
761 811 }
762 812
763 813 if (i == timeout) {
764 814 DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
765 815 ret_val = -E1000_ERR_SWFW_SYNC;
766 816 goto out;
767 817 }
768 818
769 819 swfw_sync |= swmask;
770 820 E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
771 821
772 822 e1000_put_hw_semaphore_generic(hw);
773 823
774 824 out:
775 825 return (ret_val);
776 826 }
777 827
778 828 /*
779 829 * e1000_release_swfw_sync_82575 - Release SW/FW semaphore
780 830 * @hw: pointer to the HW structure
781 831 * @mask: specifies which semaphore to acquire
782 832 *
783 833 * Release the SW/FW semaphore used to access the PHY or NVM. The mask
784 834 * will also specify which port we're releasing the lock for.
785 835 */
786 836 static void
787 837 e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
788 838 {
789 839 u32 swfw_sync;
790 840
791 841 DEBUGFUNC("e1000_release_swfw_sync_82575");
792 842
793 843 while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS) {
794 844 /* Empty */
795 845 }
796 846
797 847 swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
798 848 swfw_sync &= ~mask;
799 849 E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
800 850
801 851 e1000_put_hw_semaphore_generic(hw);
802 852 }
803 853
804 854 /*
805 855 * e1000_get_cfg_done_82575 - Read config done bit
806 856 * @hw: pointer to the HW structure
807 857 *
808 858 * Read the management control register for the config done bit for
809 859 * completion status. NOTE: silicon which is EEPROM-less will fail trying
810 860 * to read the config done bit, so an error is *ONLY* logged and returns
811 861 * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon
812 862 * would not be able to be reset or change link.
813 863 */
814 864 static s32
815 865 e1000_get_cfg_done_82575(struct e1000_hw *hw)
816 866 {
817 867 s32 timeout = PHY_CFG_TIMEOUT;
818 868 s32 ret_val = E1000_SUCCESS;
819 869 u32 mask = E1000_NVM_CFG_DONE_PORT_0;
820 870
821 871 DEBUGFUNC("e1000_get_cfg_done_82575");
822 872
823 873 if (hw->bus.func == E1000_FUNC_1)
824 874 mask = E1000_NVM_CFG_DONE_PORT_1;
825 875 else if (hw->bus.func == E1000_FUNC_2)
826 876 mask = E1000_NVM_CFG_DONE_PORT_2;
827 877 else if (hw->bus.func == E1000_FUNC_3)
828 878 mask = E1000_NVM_CFG_DONE_PORT_3;
829 879
830 880 while (timeout) {
831 881 if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask)
832 882 break;
833 883 msec_delay(1);
834 884 timeout--;
835 885 }
836 886 if (!timeout)
837 887 DEBUGOUT("MNG configuration cycle has not completed.\n");
838 888
839 889 /* If EEPROM is not marked present, init the PHY manually */
840 890 if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) &&
841 891 (hw->phy.type == e1000_phy_igp_3))
842 892 (void) e1000_phy_init_script_igp3(hw);
843 893
844 894 return (ret_val);
845 895 }
846 896
847 897 /*
848 898 * e1000_get_link_up_info_82575 - Get link speed/duplex info
849 899 * @hw: pointer to the HW structure
850 900 * @speed: stores the current speed
851 901 * @duplex: stores the current duplex
852 902 *
853 903 * This is a wrapper function, if using the serial gigabit media independent
854 904 * interface, use PCS to retrieve the link speed and duplex information.
855 905 * Otherwise, use the generic function to get the link speed and duplex info.
856 906 */
857 907 static s32
858 908 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, u16 *duplex)
859 909 {
860 910 s32 ret_val;
861 911
862 912 DEBUGFUNC("e1000_get_link_up_info_82575");
863 913
864 914 if (hw->phy.media_type != e1000_media_type_copper)
865 915 ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed,
866 916 duplex);
867 917 else
868 918 ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed,
869 919 duplex);
870 920
871 921 return (ret_val);
872 922 }
873 923
874 924 /*
875 925 * e1000_check_for_link_82575 - Check for link
876 926 * @hw: pointer to the HW structure
877 927 *
878 928 * If sgmii is enabled, then use the pcs register to determine link, otherwise
879 929 * use the generic interface for determining link.
880 930 */
881 931 static s32
882 932 e1000_check_for_link_82575(struct e1000_hw *hw)
883 933 {
884 934 s32 ret_val;
885 935 u16 speed, duplex;
886 936
887 937 DEBUGFUNC("e1000_check_for_link_82575");
888 938
889 939 /* SGMII link check is done through the PCS register. */
890 940 if (hw->phy.media_type != e1000_media_type_copper) {
891 941 ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed,
892 942 &duplex);
893 943 /*
894 944 * Use this flag to determine if link needs to be checked or
895 945 * not. If we have link clear the flag so that we do not
896 946 * continue to check for link.
897 947 */
898 948 hw->mac.get_link_status = !hw->mac.serdes_has_link;
899 949 } else {
900 950 ret_val = e1000_check_for_copper_link_generic(hw);
901 951 }
902 952
903 953 return (ret_val);
904 954 }
905 955
906 956 /*
907 957 * e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
908 958 * @hw: pointer to the HW structure
909 959 * @speed: stores the current speed
910 960 * @duplex: stores the current duplex
911 961 *
912 962 * Using the physical coding sub-layer (PCS), retrieve the current speed and
913 963 * duplex, then store the values in the pointers provided.
914 964 */
915 965 static s32
916 966 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
917 967 u16 *speed, u16 *duplex)
918 968 {
919 969 struct e1000_mac_info *mac = &hw->mac;
920 970 u32 pcs;
921 971
922 972 DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575");
923 973
924 974 /* Set up defaults for the return values of this function */
925 975 mac->serdes_has_link = false;
926 976 *speed = 0;
927 977 *duplex = 0;
928 978
929 979 /*
930 980 * Read the PCS Status register for link state. For non-copper mode,
931 981 * the status register is not accurate. The PCS status register is
932 982 * used instead.
933 983 */
934 984 pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT);
935 985
936 986 /*
937 987 * The link up bit determines when link is up on autoneg. The sync ok
938 988 * gets set once both sides sync up and agree upon link. Stable link
939 989 * can be determined by checking for both link up and link sync ok
940 990 */
941 991 if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) {
942 992 mac->serdes_has_link = true;
943 993
944 994 /* Detect and store PCS speed */
945 995 if (pcs & E1000_PCS_LSTS_SPEED_1000) {
946 996 *speed = SPEED_1000;
947 997 } else if (pcs & E1000_PCS_LSTS_SPEED_100) {
948 998 *speed = SPEED_100;
949 999 } else {
950 1000 *speed = SPEED_10;
951 1001 }
952 1002
953 1003 /* Detect and store PCS duplex */
954 1004 if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) {
955 1005 *duplex = FULL_DUPLEX;
956 1006 } else {
957 1007 *duplex = HALF_DUPLEX;
958 1008 }
959 1009 }
960 1010
961 1011 return (E1000_SUCCESS);
962 1012 }
963 1013
964 1014 /*
965 1015 * e1000_shutdown_serdes_link_82575 - Remove link during power down
966 1016 * @hw: pointer to the HW structure
967 1017 *
968 1018 * In the case of serdes shut down sfp and PCS on driver unload
969 1019 * when management pass thru is not enabled.
970 1020 */
971 1021 void
972 1022 e1000_shutdown_serdes_link_82575(struct e1000_hw *hw)
973 1023 {
974 1024 u32 reg;
975 1025 u16 eeprom_data = 0;
976 1026
977 1027 if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
978 1028 !e1000_sgmii_active_82575(hw))
979 1029 return;
980 1030
981 1031 if (hw->bus.func == E1000_FUNC_0)
982 1032 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
983 1033 else if (hw->mac.type == e1000_82580)
984 1034 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
985 1035 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
986 1036 &eeprom_data);
987 1037 else if (hw->bus.func == E1000_FUNC_1)
988 1038 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
989 1039
990 1040 /*
991 1041 * If APM is not enabled in the EEPROM and management interface is
992 1042 * not enabled, then power down.
993 1043 */
994 1044 if (!(eeprom_data & E1000_NVM_APME_82575) &&
995 1045 !e1000_enable_mng_pass_thru(hw)) {
996 1046 /* Disable PCS to turn off link */
997 1047 reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
998 1048 reg &= ~E1000_PCS_CFG_PCS_EN;
999 1049 E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
1000 1050
1001 1051 /* shutdown the laser */
1002 1052 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1003 1053 reg |= E1000_CTRL_EXT_SDP3_DATA;
1004 1054 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1005 1055
1006 1056 /* flush the write to verify completion */
1007 1057 E1000_WRITE_FLUSH(hw);
1008 1058 msec_delay(1);
1009 1059 }
1010 1060 }
1011 1061
1012 1062 /*
1013 1063 * e1000_reset_hw_82575 - Reset hardware
1014 1064 * @hw: pointer to the HW structure
1015 1065 *
1016 1066 * This resets the hardware into a known state.
1017 1067 */
1018 1068 static s32
1019 1069 e1000_reset_hw_82575(struct e1000_hw *hw)
1020 1070 {
1021 1071 u32 ctrl;
1022 1072 s32 ret_val;
1023 1073
1024 1074 DEBUGFUNC("e1000_reset_hw_82575");
1025 1075
1026 1076 /*
1027 1077 * Prevent the PCI-E bus from sticking if there is no TLP connection
1028 1078 * on the last TLP read/write transaction when MAC is reset.
1029 1079 */
1030 1080 ret_val = e1000_disable_pcie_master_generic(hw);
1031 1081 if (ret_val) {
1032 1082 DEBUGOUT("PCI-E Master disable polling has failed.\n");
1033 1083 }
1034 1084
1035 1085 /* set the completion timeout for interface */
1036 1086 ret_val = e1000_set_pcie_completion_timeout(hw);
1037 1087 if (ret_val) {
1038 1088 DEBUGOUT("PCI-E Set completion timeout has failed.\n");
1039 1089 }
1040 1090
1041 1091 DEBUGOUT("Masking off all interrupts\n");
1042 1092 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
1043 1093
1044 1094 E1000_WRITE_REG(hw, E1000_RCTL, 0);
1045 1095 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
1046 1096 E1000_WRITE_FLUSH(hw);
1047 1097
1048 1098 msec_delay(10);
1049 1099
1050 1100 ctrl = E1000_READ_REG(hw, E1000_CTRL);
1051 1101
1052 1102 DEBUGOUT("Issuing a global reset to MAC\n");
1053 1103 E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
1054 1104
1055 1105 ret_val = e1000_get_auto_rd_done_generic(hw);
1056 1106 if (ret_val) {
1057 1107 /*
1058 1108 * When auto config read does not complete, do not
1059 1109 * return with an error. This can happen in situations
1060 1110 * where there is no eeprom and prevents getting link.
1061 1111 */
1062 1112 DEBUGOUT("Auto Read Done did not complete\n");
1063 1113 }
1064 1114
1065 1115 /* If EEPROM is not present, run manual init scripts */
1066 1116 if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0)
1067 1117 (void) e1000_reset_init_script_82575(hw);
1068 1118
1069 1119 /* Clear any pending interrupt events. */
1070 1120 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
1071 1121 (void) E1000_READ_REG(hw, E1000_ICR);
1072 1122
1073 1123 /* Install any alternate MAC address into RAR0 */
1074 1124 ret_val = e1000_check_alt_mac_addr_generic(hw);
1075 1125
1076 1126 return (ret_val);
1077 1127 }
1078 1128
1079 1129 /*
1080 1130 * e1000_init_hw_82575 - Initialize hardware
1081 1131 * @hw: pointer to the HW structure
1082 1132 *
1083 1133 * This inits the hardware readying it for operation.
1084 1134 */
1085 1135 static s32
1086 1136 e1000_init_hw_82575(struct e1000_hw *hw)
1087 1137 {
1088 1138 struct e1000_mac_info *mac = &hw->mac;
1089 1139 s32 ret_val;
1090 1140 u16 i, rar_count = mac->rar_entry_count;
1091 1141
1092 1142 DEBUGFUNC("e1000_init_hw_82575");
1093 1143
1094 1144 /* Initialize identification LED */
1095 1145 ret_val = mac->ops.id_led_init(hw);
1096 1146 if (ret_val) {
1097 1147 DEBUGOUT("Error initializing identification LED\n");
1098 1148 /* This is not fatal and we should not stop init due to this */
1099 1149 }
1100 1150
1101 1151 /* Disabling VLAN filtering */
1102 1152 DEBUGOUT("Initializing the IEEE VLAN\n");
1103 1153 mac->ops.clear_vfta(hw);
1104 1154
1105 1155 /* Setup the receive address */
1106 1156 e1000_init_rx_addrs_generic(hw, rar_count);
1107 1157 /* Zero out the Multicast HASH table */
1108 1158 DEBUGOUT("Zeroing the MTA\n");
1109 1159 for (i = 0; i < mac->mta_reg_count; i++)
1110 1160 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
1111 1161
1112 1162 /* Zero out the Unicast HASH table */
1113 1163 DEBUGOUT("Zeroing the UTA\n");
1114 1164 for (i = 0; i < mac->uta_reg_count; i++)
1115 1165 E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0);
1116 1166
1117 1167 /* Setup link and flow control */
1118 1168 ret_val = mac->ops.setup_link(hw);
1119 1169
1120 1170 /*
1121 1171 * Clear all of the statistics registers (clear on read). It is
1122 1172 * important that we do this after we have tried to establish link
1123 1173 * because the symbol error count will increment wildly if there
1124 1174 * is no link.
1125 1175 */
1126 1176 e1000_clear_hw_cntrs_82575(hw);
1127 1177
1128 1178 return (ret_val);
1129 1179 }
1130 1180
1131 1181 /*
1132 1182 * e1000_setup_copper_link_82575 - Configure copper link settings
1133 1183 * @hw: pointer to the HW structure
1134 1184 *
1135 1185 * Configures the link for auto-neg or forced speed and duplex. Then we check
1136 1186 * for link, once link is established calls to configure collision distance
1137 1187 * and flow control are called.
1138 1188 */
1139 1189 static s32
1140 1190 e1000_setup_copper_link_82575(struct e1000_hw *hw)
1141 1191 {
1142 1192 u32 ctrl;
1143 1193 s32 ret_val;
1144 1194
1145 1195 DEBUGFUNC("e1000_setup_copper_link_82575");
1146 1196
1147 1197 ctrl = E1000_READ_REG(hw, E1000_CTRL);
1148 1198 ctrl |= E1000_CTRL_SLU;
1149 1199 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1150 1200 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
1151 1201
1152 1202 ret_val = e1000_setup_serdes_link_82575(hw);
1153 1203 if (ret_val)
1154 1204 goto out;
1155 1205
1156 1206 if (e1000_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
1157 1207 /* allow time for SFP cage time to power up phy */
1158 1208 msec_delay(300);
1159 1209
1160 1210 ret_val = hw->phy.ops.reset(hw);
1161 1211 if (ret_val) {
1162 1212 DEBUGOUT("Error resetting the PHY.\n");
1163 1213 goto out;
1164 1214 }
1165 1215 }
1166 1216 switch (hw->phy.type) {
1167 1217 case e1000_phy_m88:
1168 1218 ret_val = e1000_copper_link_setup_m88(hw);
1169 1219 break;
1170 1220 case e1000_phy_igp_3:
1171 1221 ret_val = e1000_copper_link_setup_igp(hw);
1172 1222 break;
1173 1223 case e1000_phy_82580:
1174 1224 ret_val = e1000_copper_link_setup_82577(hw);
1175 1225 break;
1176 1226 default:
1177 1227 ret_val = -E1000_ERR_PHY;
1178 1228 break;
1179 1229 }
1180 1230
1181 1231 if (ret_val)
1182 1232 goto out;
1183 1233
1184 1234 ret_val = e1000_setup_copper_link_generic(hw);
1185 1235 out:
1186 1236 return (ret_val);
1187 1237 }
1188 1238
1189 1239 /*
1190 1240 * e1000_setup_serdes_link_82575 - Setup link for serdes
1191 1241 * @hw: pointer to the HW structure
1192 1242 *
1193 1243 * Configure the physical coding sub-layer (PCS) link. The PCS link is
1194 1244 * used on copper connections where the serialized gigabit media independent
1195 1245 * interface (sgmii), or serdes fiber is being used. Configures the link
1196 1246 * for auto-negotiation or forces speed/duplex.
1197 1247 */
1198 1248 static s32
1199 1249 e1000_setup_serdes_link_82575(struct e1000_hw *hw)
1200 1250 {
1201 1251 u32 ctrl_ext, ctrl_reg, reg;
1202 1252 bool pcs_autoneg;
1203 1253
1204 1254 DEBUGFUNC("e1000_setup_serdes_link_82575");
1205 1255
1206 1256 if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1207 1257 !e1000_sgmii_active_82575(hw))
1208 1258 return (E1000_SUCCESS);
1209 1259
1210 1260 /*
1211 1261 * On the 82575, SerDes loopback mode persists until it is
1212 1262 * explicitly turned off or a power cycle is performed. A read to
1213 1263 * the register does not indicate its status. Therefore, we ensure
1214 1264 * loopback mode is disabled during initialization.
1215 1265 */
1216 1266 E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
1217 1267
1218 1268 /* power on the sfp cage if present */
1219 1269 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1220 1270 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
1221 1271 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
1222 1272
1223 1273 ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
1224 1274 ctrl_reg |= E1000_CTRL_SLU;
1225 1275
1226 1276 if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) {
1227 1277 /* set both sw defined pins */
1228 1278 ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
1229 1279
1230 1280 /* Set switch control to serdes energy detect */
1231 1281 reg = E1000_READ_REG(hw, E1000_CONNSW);
1232 1282 reg |= E1000_CONNSW_ENRGSRC;
1233 1283 E1000_WRITE_REG(hw, E1000_CONNSW, reg);
1234 1284 }
1235 1285
1236 1286 reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
1237 1287
1238 1288 /* default pcs_autoneg to the same setting as mac autoneg */
1239 1289 pcs_autoneg = hw->mac.autoneg;
1240 1290
1241 1291 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
1242 1292 case E1000_CTRL_EXT_LINK_MODE_SGMII:
1243 1293 /* sgmii mode lets the phy handle forcing speed/duplex */
1244 1294 pcs_autoneg = true;
1245 1295 /* autoneg time out should be disabled for SGMII mode */
1246 1296 reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
1247 1297 break;
1248 1298 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
1249 1299 /* disable PCS autoneg and support parallel detect only */
1250 1300 pcs_autoneg = false;
1251 1301 default:
1252 1302 /*
1253 1303 * non-SGMII modes only supports a speed of 1000/Full for the
1254 1304 * link so it is best to just force the MAC and let the pcs
1255 1305 * link either autoneg or be forced to 1000/Full
1256 1306 */
1257 1307 ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
1258 1308 E1000_CTRL_FD | E1000_CTRL_FRCDPX;
1259 1309
1260 1310 /* set speed of 1000/Full if speed/duplex is forced */
1261 1311 reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
1262 1312 break;
1263 1313 }
1264 1314
1265 1315 E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
1266 1316
1267 1317 /*
1268 1318 * New SerDes mode allows for forcing speed or autonegotiating speed
1269 1319 * at 1gb. Autoneg should be default set by most drivers. This is the
1270 1320 * mode that will be compatible with older link partners and switches.
1271 1321 * However, both are supported by the hardware and some drivers/tools.
1272 1322 */
1273 1323
1274 1324 reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
1275 1325 E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
1276 1326
1277 1327 /*
1278 1328 * We force flow control to prevent the CTRL register values from being
1279 1329 * overwritten by the autonegotiated flow control values
1280 1330 */
1281 1331 reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1282 1332
1283 1333 if (pcs_autoneg) {
1284 1334 /* Set PCS register for autoneg */
1285 1335 reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
1286 1336 E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
1287 1337 DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
1288 1338 } else {
1289 1339 /* Set PCS register for forced link */
1290 1340 reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
1291 1341 DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
1292 1342 }
1293 1343
1294 1344 E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
1295 1345
1296 1346 if (!e1000_sgmii_active_82575(hw))
1297 1347 (void) e1000_force_mac_fc_generic(hw);
1298 1348
1299 1349 return (E1000_SUCCESS);
1300 1350 }
1301 1351
1302 1352 /*
1303 1353 * e1000_valid_led_default_82575 - Verify a valid default LED config
1304 1354 * @hw: pointer to the HW structure
1305 1355 * @data: pointer to the NVM (EEPROM)
1306 1356 *
1307 1357 * Read the EEPROM for the current default LED configuration. If the
1308 1358 * LED configuration is not valid, set to a valid LED configuration.
1309 1359 */
1310 1360 static s32
1311 1361 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data)
1312 1362 {
1313 1363 s32 ret_val;
1314 1364
1315 1365 DEBUGFUNC("e1000_valid_led_default_82575");
1316 1366
1317 1367 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
1318 1368 if (ret_val) {
1319 1369 DEBUGOUT("NVM Read Error\n");
1320 1370 goto out;
1321 1371 }
1322 1372
1323 1373 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
1324 1374 switch (hw->phy.media_type) {
1325 1375 case e1000_media_type_internal_serdes:
1326 1376 *data = ID_LED_DEFAULT_82575_SERDES;
1327 1377 break;
1328 1378 case e1000_media_type_copper:
1329 1379 default:
1330 1380 *data = ID_LED_DEFAULT;
1331 1381 break;
1332 1382 }
1333 1383 }
1334 1384 out:
1335 1385 return (ret_val);
1336 1386 }
1337 1387
1338 1388 /*
1339 1389 * e1000_sgmii_active_82575 - Return sgmii state
1340 1390 * @hw: pointer to the HW structure
1341 1391 *
1342 1392 * 82575 silicon has a serialized gigabit media independent interface (sgmii)
1343 1393 * which can be enabled for use in the embedded applications. Simply
1344 1394 * return the current state of the sgmii interface.
1345 1395 */
1346 1396 static bool
1347 1397 e1000_sgmii_active_82575(struct e1000_hw *hw)
1348 1398 {
1349 1399 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
1350 1400 return (dev_spec->sgmii_active);
1351 1401 }
1352 1402
1353 1403 /*
1354 1404 * e1000_reset_init_script_82575 - Inits HW defaults after reset
1355 1405 * @hw: pointer to the HW structure
1356 1406 *
1357 1407 * Inits recommended HW defaults after a reset when there is no EEPROM
1358 1408 * detected. This is only for the 82575.
1359 1409 */
1360 1410 static s32
1361 1411 e1000_reset_init_script_82575(struct e1000_hw *hw)
1362 1412 {
1363 1413 DEBUGFUNC("e1000_reset_init_script_82575");
1364 1414
1365 1415 if (hw->mac.type == e1000_82575) {
1366 1416 DEBUGOUT("Running reset init script for 82575\n");
1367 1417 /* SerDes configuration via SERDESCTRL */
1368 1418 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL,
1369 1419 0x00, 0x0C);
1370 1420 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL,
1371 1421 0x01, 0x78);
1372 1422 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL,
1373 1423 0x1B, 0x23);
1374 1424 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL,
1375 1425 0x23, 0x15);
1376 1426
1377 1427 /* CCM configuration via CCMCTL register */
1378 1428 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL,
1379 1429 0x14, 0x00);
1380 1430 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL,
1381 1431 0x10, 0x00);
1382 1432
1383 1433 /* PCIe lanes configuration */
1384 1434 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL,
1385 1435 0x00, 0xEC);
1386 1436 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL,
1387 1437 0x61, 0xDF);
1388 1438 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL,
1389 1439 0x34, 0x05);
1390 1440 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL,
1391 1441 0x2F, 0x81);
1392 1442
1393 1443 /* PCIe PLL Configuration */
1394 1444 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL,
1395 1445 0x02, 0x47);
1396 1446 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL,
1397 1447 0x14, 0x00);
1398 1448 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL,
1399 1449 0x10, 0x00);
1400 1450 }
1401 1451
1402 1452 return (E1000_SUCCESS);
1403 1453 }
1404 1454
1405 1455 /*
1406 1456 * e1000_read_mac_addr_82575 - Read device MAC address
1407 1457 * @hw: pointer to the HW structure
1408 1458 */
1409 1459 static s32
1410 1460 e1000_read_mac_addr_82575(struct e1000_hw *hw)
1411 1461 {
1412 1462 s32 ret_val = E1000_SUCCESS;
1413 1463
1414 1464 DEBUGFUNC("e1000_read_mac_addr_82575");
1415 1465
1416 1466 /*
1417 1467 * If there's an alternate MAC address place it in RAR0
1418 1468 * so that it will override the Si installed default perm
1419 1469 * address.
1420 1470 */
1421 1471 ret_val = e1000_check_alt_mac_addr_generic(hw);
1422 1472 if (ret_val)
1423 1473 goto out;
1424 1474
1425 1475 ret_val = e1000_read_mac_addr_generic(hw);
1426 1476
1427 1477 out:
1428 1478 return (ret_val);
1429 1479 }
1430 1480
1431 1481 /*
1432 1482 * e1000_power_down_phy_copper_82575 - Remove link during PHY power down
1433 1483 * @hw: pointer to the HW structure
1434 1484 *
1435 1485 * In the case of a PHY power down to save power, or to turn off link during a
1436 1486 * driver unload, or wake on lan is not enabled, remove the link.
1437 1487 */
1438 1488 static void
1439 1489 e1000_power_down_phy_copper_82575(struct e1000_hw *hw)
1440 1490 {
1441 1491 struct e1000_phy_info *phy = &hw->phy;
1442 1492 struct e1000_mac_info *mac = &hw->mac;
1443 1493
1444 1494 if (!(phy->ops.check_reset_block))
1445 1495 return;
1446 1496
1447 1497 /* If the management interface is not enabled, then power down */
1448 1498 if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw)))
1449 1499 e1000_power_down_phy_copper(hw);
1450 1500 }
1451 1501
1452 1502 /*
1453 1503 * e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters
1454 1504 * @hw: pointer to the HW structure
1455 1505 *
1456 1506 * Clears the hardware counters by reading the counter registers.
1457 1507 */
1458 1508 static void
1459 1509 e1000_clear_hw_cntrs_82575(struct e1000_hw *hw)
1460 1510 {
1461 1511 DEBUGFUNC("e1000_clear_hw_cntrs_82575");
1462 1512
1463 1513 e1000_clear_hw_cntrs_base_generic(hw);
1464 1514
1465 1515 (void) E1000_READ_REG(hw, E1000_PRC64);
1466 1516 (void) E1000_READ_REG(hw, E1000_PRC127);
1467 1517 (void) E1000_READ_REG(hw, E1000_PRC255);
1468 1518 (void) E1000_READ_REG(hw, E1000_PRC511);
1469 1519 (void) E1000_READ_REG(hw, E1000_PRC1023);
1470 1520 (void) E1000_READ_REG(hw, E1000_PRC1522);
1471 1521 (void) E1000_READ_REG(hw, E1000_PTC64);
1472 1522 (void) E1000_READ_REG(hw, E1000_PTC127);
1473 1523 (void) E1000_READ_REG(hw, E1000_PTC255);
1474 1524 (void) E1000_READ_REG(hw, E1000_PTC511);
1475 1525 (void) E1000_READ_REG(hw, E1000_PTC1023);
1476 1526 (void) E1000_READ_REG(hw, E1000_PTC1522);
1477 1527
1478 1528 (void) E1000_READ_REG(hw, E1000_ALGNERRC);
1479 1529 (void) E1000_READ_REG(hw, E1000_RXERRC);
1480 1530 (void) E1000_READ_REG(hw, E1000_TNCRS);
1481 1531 (void) E1000_READ_REG(hw, E1000_CEXTERR);
1482 1532 (void) E1000_READ_REG(hw, E1000_TSCTC);
1483 1533 (void) E1000_READ_REG(hw, E1000_TSCTFC);
1484 1534
1485 1535 (void) E1000_READ_REG(hw, E1000_MGTPRC);
1486 1536 (void) E1000_READ_REG(hw, E1000_MGTPDC);
1487 1537 (void) E1000_READ_REG(hw, E1000_MGTPTC);
1488 1538
1489 1539 (void) E1000_READ_REG(hw, E1000_IAC);
1490 1540 (void) E1000_READ_REG(hw, E1000_ICRXOC);
1491 1541
1492 1542 (void) E1000_READ_REG(hw, E1000_ICRXPTC);
1493 1543 (void) E1000_READ_REG(hw, E1000_ICRXATC);
1494 1544 (void) E1000_READ_REG(hw, E1000_ICTXPTC);
1495 1545 (void) E1000_READ_REG(hw, E1000_ICTXATC);
1496 1546 (void) E1000_READ_REG(hw, E1000_ICTXQEC);
1497 1547 (void) E1000_READ_REG(hw, E1000_ICTXQMTC);
1498 1548 (void) E1000_READ_REG(hw, E1000_ICRXDMTC);
1499 1549
1500 1550 (void) E1000_READ_REG(hw, E1000_CBTMPC);
1501 1551 (void) E1000_READ_REG(hw, E1000_HTDPMC);
1502 1552 (void) E1000_READ_REG(hw, E1000_CBRMPC);
1503 1553 (void) E1000_READ_REG(hw, E1000_RPTHC);
1504 1554 (void) E1000_READ_REG(hw, E1000_HGPTC);
1505 1555 (void) E1000_READ_REG(hw, E1000_HTCBDPC);
1506 1556 (void) E1000_READ_REG(hw, E1000_HGORCL);
1507 1557 (void) E1000_READ_REG(hw, E1000_HGORCH);
1508 1558 (void) E1000_READ_REG(hw, E1000_HGOTCL);
1509 1559 (void) E1000_READ_REG(hw, E1000_HGOTCH);
1510 1560 (void) E1000_READ_REG(hw, E1000_LENERRS);
1511 1561
1512 1562 /* This register should not be read in copper configurations */
1513 1563 if ((hw->phy.media_type == e1000_media_type_internal_serdes) ||
1514 1564 e1000_sgmii_active_82575(hw))
1515 1565 (void) E1000_READ_REG(hw, E1000_SCVPC);
1516 1566 }
1517 1567
1518 1568 /*
1519 1569 * e1000_rx_fifo_flush_82575 - Clean rx fifo after RX enable
1520 1570 * @hw: pointer to the HW structure
1521 1571 *
1522 1572 * After rx enable if managability is enabled then there is likely some
1523 1573 * bad data at the start of the fifo and possibly in the DMA fifo. This
1524 1574 * function clears the fifos and flushes any packets that came in as rx was
1525 1575 * being enabled.
1526 1576 */
1527 1577 void
1528 1578 e1000_rx_fifo_flush_82575(struct e1000_hw *hw)
1529 1579 {
1530 1580 u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
1531 1581 int i, ms_wait;
1532 1582
1533 1583 DEBUGFUNC("e1000_rx_fifo_workaround_82575");
1534 1584 if (hw->mac.type != e1000_82575 ||
1535 1585 !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN))
1536 1586 return;
1537 1587
1538 1588 /* Disable all RX queues */
1539 1589 for (i = 0; i < 4; i++) {
1540 1590 rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
1541 1591 E1000_WRITE_REG(hw, E1000_RXDCTL(i),
1542 1592 rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
1543 1593 }
1544 1594 /* Poll all queues to verify they have shut down */
1545 1595 for (ms_wait = 0; ms_wait < 10; ms_wait++) {
1546 1596 msec_delay(1);
1547 1597 rx_enabled = 0;
1548 1598 for (i = 0; i < 4; i++)
1549 1599 rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i));
1550 1600 if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
1551 1601 break;
1552 1602 }
1553 1603
1554 1604 if (ms_wait == 10)
1555 1605 DEBUGOUT("Queue disable timed out after 10ms\n");
1556 1606
1557 1607 /*
1558 1608 * Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
1559 1609 * incoming packets are rejected. Set enable and wait 2ms so that
1560 1610 * any packet that was coming in as RCTL.EN was set is flushed
1561 1611 */
1562 1612 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
1563 1613 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
1564 1614
1565 1615 rlpml = E1000_READ_REG(hw, E1000_RLPML);
1566 1616 E1000_WRITE_REG(hw, E1000_RLPML, 0);
1567 1617
1568 1618 rctl = E1000_READ_REG(hw, E1000_RCTL);
1569 1619 temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
1570 1620 temp_rctl |= E1000_RCTL_LPE;
1571 1621
1572 1622 E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl);
1573 1623 E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN);
1574 1624 E1000_WRITE_FLUSH(hw);
1575 1625 msec_delay(2);
1576 1626
1577 1627 /*
1578 1628 * Enable RX queues that were previously enabled and restore our
1579 1629 * previous state
1580 1630 */
1581 1631 for (i = 0; i < 4; i++)
1582 1632 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]);
1583 1633 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1584 1634 E1000_WRITE_FLUSH(hw);
1585 1635
1586 1636 E1000_WRITE_REG(hw, E1000_RLPML, rlpml);
1587 1637 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
1588 1638
1589 1639 /* Flush receive errors generated by workaround */
1590 1640 (void) E1000_READ_REG(hw, E1000_ROC);
1591 1641 (void) E1000_READ_REG(hw, E1000_RNBC);
1592 1642 (void) E1000_READ_REG(hw, E1000_MPC);
1593 1643 }
1594 1644
1595 1645 /*
1596 1646 * e1000_set_pcie_completion_timeout - set pci-e completion timeout
1597 1647 * @hw: pointer to the HW structure
1598 1648 *
1599 1649 * The defaults for 82575 and 82576 should be in the range of 50us to 50ms,
1600 1650 * however the hardware default for these parts is 500us to 1ms which is less
1601 1651 * than the 10ms recommended by the pci-e spec. To address this we need to
1602 1652 * increase the value to either 10ms to 200ms for capability version 1 config,
1603 1653 * or 16ms to 55ms for version 2.
1604 1654 */
1605 1655 static s32
1606 1656 e1000_set_pcie_completion_timeout(struct e1000_hw *hw)
1607 1657 {
1608 1658 u32 gcr = E1000_READ_REG(hw, E1000_GCR);
1609 1659 s32 ret_val = E1000_SUCCESS;
1610 1660 u16 pcie_devctl2;
1611 1661
1612 1662 /* only take action if timeout value is defaulted to 0 */
1613 1663 if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
1614 1664 goto out;
1615 1665
1616 1666 /*
1617 1667 * if capababilities version is type 1 we can write the
1618 1668 * timeout of 10ms to 200ms through the GCR register
1619 1669 */
1620 1670 if (!(gcr & E1000_GCR_CAP_VER2)) {
1621 1671 gcr |= E1000_GCR_CMPL_TMOUT_10ms;
1622 1672 goto out;
1623 1673 }
1624 1674
1625 1675 /*
1626 1676 * for version 2 capabilities we need to write the config space
1627 1677 * directly in order to set the completion timeout value for
1628 1678 * 16ms to 55ms
1629 1679 */
1630 1680 ret_val = e1000_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
1631 1681 &pcie_devctl2);
1632 1682 if (ret_val)
1633 1683 goto out;
1634 1684
1635 1685 pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
1636 1686
1637 1687 ret_val = e1000_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
1638 1688 &pcie_devctl2);
1639 1689 out:
1640 1690 /* disable completion timeout resend */
1641 1691 gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
1642 1692
1643 1693 E1000_WRITE_REG(hw, E1000_GCR, gcr);
1644 1694 return (ret_val);
1645 1695 }
1646 1696
1647 1697 /*
1648 1698 * e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback
1649 1699 * @hw: pointer to the hardware struct
1650 1700 * @enable: state to enter, either enabled or disabled
1651 1701 *
1652 1702 * enables/disables L2 switch loopback functionality.
1653 1703 */
1654 1704 void
1655 1705 e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
1656 1706 {
1657 1707 u32 dtxswc = E1000_READ_REG(hw, E1000_DTXSWC);
1658 1708
1659 1709 if (enable)
1660 1710 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1661 1711 else
1662 1712 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1663 1713
1664 1714 E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc);
1665 1715 }
1666 1716
1667 1717 /*
1668 1718 * e1000_vmdq_set_replication_pf - enable or disable vmdq replication
1669 1719 * @hw: pointer to the hardware struct
1670 1720 * @enable: state to enter, either enabled or disabled
1671 1721 *
1672 1722 * enables/disables replication of packets across multiple pools.
1673 1723 */
1674 1724 void
1675 1725 e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
1676 1726 {
1677 1727 u32 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1678 1728
1679 1729 if (enable)
1680 1730 vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
1681 1731 else
1682 1732 vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
1683 1733
1684 1734 E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1685 1735 }
1686 1736
1687 1737 /*
1688 1738 * e1000_read_phy_reg_82580 - Read 82580 MDI control register
1689 1739 * @hw: pointer to the HW structure
1690 1740 * @offset: register offset to be read
1691 1741 * @data: pointer to the read data
1692 1742 *
1693 1743 * Reads the MDI control register in the PHY at offset and stores the
1694 1744 * information read to data.
1695 1745 */
1696 1746 static s32
1697 1747 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
1698 1748 {
1699 1749 u32 mdicnfg = 0;
1700 1750 s32 ret_val;
1701 1751
1702 1752 DEBUGFUNC("e1000_read_phy_reg_82580");
1703 1753
1704 1754 ret_val = hw->phy.ops.acquire(hw);
1705 1755 if (ret_val)
1706 1756 goto out;
1707 1757
1708 1758 /*
1709 1759 * We config the phy address in MDICNFG register now. Same bits
1710 1760 * as before. The values in MDIC can be written but will be
1711 1761 * ignored. This allows us to call the old function after
1712 1762 * configuring the PHY address in the new register
1713 1763 */
1714 1764 mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT);
1715 1765 E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
1716 1766
1717 1767 ret_val = e1000_read_phy_reg_mdic(hw, offset, data);
1718 1768
1719 1769 hw->phy.ops.release(hw);
1720 1770
1721 1771 out:
1722 1772 return (ret_val);
1723 1773 }
1724 1774
1725 1775 /*
1726 1776 * e1000_write_phy_reg_82580 - Write 82580 MDI control register
1727 1777 * @hw: pointer to the HW structure
1728 1778 * @offset: register offset to write to
1729 1779 * @data: data to write to register at offset
1730 1780 *
1731 1781 * Writes data to MDI control register in the PHY at offset.
1732 1782 */
1733 1783 static s32
1734 1784 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
1735 1785 {
1736 1786 u32 mdicnfg = 0;
1737 1787 s32 ret_val;
1738 1788
1739 1789 DEBUGFUNC("e1000_write_phy_reg_82580");
1740 1790
1741 1791 ret_val = hw->phy.ops.acquire(hw);
1742 1792 if (ret_val)
1743 1793 goto out;
1744 1794
1745 1795 /*
1746 1796 * We config the phy address in MDICNFG register now. Same bits
1747 1797 * as before. The values in MDIC can be written but will be
1748 1798 * ignored. This allows us to call the old function after
1749 1799 * configuring the PHY address in the new register
1750 1800 */
1751 1801 mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT);
1752 1802 E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
1753 1803
1754 1804 ret_val = e1000_write_phy_reg_mdic(hw, offset, data);
1755 1805
1756 1806 hw->phy.ops.release(hw);
1757 1807
1758 1808 out:
1759 1809 return (ret_val);
1760 1810 }
1761 1811
1762 1812 /*
1763 1813 * e1000_reset_hw_82580 - Reset hardware
1764 1814 * @hw: pointer to the HW structure
1765 1815 *
1766 1816 * This resets function or entire device (all ports, etc.)
1767 1817 * to a known state.
1768 1818 */
1769 1819 static s32
1770 1820 e1000_reset_hw_82580(struct e1000_hw *hw)
1771 1821 {
1772 1822 s32 ret_val = E1000_SUCCESS;
1773 1823 /* BH SW mailbox bit in SW_FW_SYNC */
1774 1824 u16 swmbsw_mask = E1000_SW_SYNCH_MB;
1775 1825 u32 ctrl;
1776 1826 bool global_device_reset = hw->dev_spec._82575.global_device_reset;
1777 1827
1778 1828 DEBUGFUNC("e1000_reset_hw_82580");
1779 1829
1780 1830 hw->dev_spec._82575.global_device_reset = false;
1781 1831
1782 1832 /* Get current control state. */
1783 1833 ctrl = E1000_READ_REG(hw, E1000_CTRL);
1784 1834
1785 1835 /*
1786 1836 * Prevent the PCI-E bus from sticking if there is no TLP connection
1787 1837 * on the last TLP read/write transaction when MAC is reset.
1788 1838 */
1789 1839 ret_val = e1000_disable_pcie_master_generic(hw);
1790 1840 if (ret_val)
1791 1841 DEBUGOUT("PCI-E Master disable polling has failed.\n");
1792 1842
1793 1843 DEBUGOUT("Masking off all interrupts\n");
1794 1844 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
1795 1845 E1000_WRITE_REG(hw, E1000_RCTL, 0);
1796 1846 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
1797 1847 E1000_WRITE_FLUSH(hw);
1798 1848
1799 1849 msec_delay(10);
1800 1850
1801 1851 /* Determine whether or not a global dev reset is requested */
1802 1852 if (global_device_reset &&
1803 1853 e1000_acquire_swfw_sync_82575(hw, swmbsw_mask))
1804 1854 global_device_reset = false;
1805 1855
1806 1856 if (global_device_reset &&
1807 1857 !(E1000_READ_REG(hw, E1000_STATUS) & E1000_STAT_DEV_RST_SET))
1808 1858 ctrl |= E1000_CTRL_DEV_RST;
1809 1859 else
1810 1860 ctrl |= E1000_CTRL_RST;
1811 1861
1812 1862 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
1813 1863
1814 1864 /* Add delay to insure DEV_RST has time to complete */
1815 1865 if (global_device_reset)
1816 1866 msec_delay(5);
1817 1867
1818 1868 ret_val = e1000_get_auto_rd_done_generic(hw);
1819 1869 if (ret_val) {
1820 1870 /*
1821 1871 * When auto config read does not complete, do not
1822 1872 * return with an error. This can happen in situations
1823 1873 * where there is no eeprom and prevents getting link.
1824 1874 */
1825 1875 DEBUGOUT("Auto Read Done did not complete\n");
1826 1876 }
1827 1877
1828 1878 /* If EEPROM is not present, run manual init scripts */
1829 1879 if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0)
1830 1880 (void) e1000_reset_init_script_82575(hw);
1831 1881
1832 1882 /* clear global device reset status bit */
1833 1883 E1000_WRITE_REG(hw, E1000_STATUS, E1000_STAT_DEV_RST_SET);
1834 1884
1835 1885 /* Clear any pending interrupt events. */
1836 1886 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
1837 1887 (void) E1000_READ_REG(hw, E1000_ICR);
1838 1888
1839 1889 /* Install any alternate MAC address into RAR0 */
1840 1890 ret_val = e1000_check_alt_mac_addr_generic(hw);
1841 1891
1842 1892 /* Release semaphore */
1843 1893 if (global_device_reset)
1844 1894 e1000_release_swfw_sync_82575(hw, swmbsw_mask);
1845 1895
1846 1896 return (ret_val);
1847 1897 }
1848 1898
1849 1899 /*
1850 1900 * e1000_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size
1851 1901 * @data: data received by reading RXPBS register
1852 1902 *
1853 1903 * The 82580 uses a table based approach for packet buffer allocation sizes.
1854 1904 * This function converts the retrieved value into the correct table value
1855 1905 * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7
1856 1906 * 0x0 36 72 144 1 2 4 8 16
1857 1907 * 0x8 35 70 140 rsv rsv rsv rsv rsv
|
↓ open down ↓ |
1148 lines elided |
↑ open up ↑ |
1858 1908 */
1859 1909 u16
1860 1910 e1000_rxpbs_adjust_82580(u32 data)
1861 1911 {
1862 1912 u16 ret_val = 0;
1863 1913
1864 1914 if (data < E1000_82580_RXPBS_TABLE_SIZE)
1865 1915 ret_val = e1000_82580_rxpbs_table[data];
1866 1916
1867 1917 return (ret_val);
1918 +}
1919 +
1920 +/*
1921 + * Due to a hw errata, if the host tries to configure the VFTA register
1922 + * while performing queries from the BMC or DMA, then the VFTA in some
1923 + * cases won't be written.
1924 + */
1925 +
1926 +/*
1927 + * e1000_clear_vfta_i350 - Clear VLAN filter table
1928 + * @hw: pointer to the HW structure
1929 + *
1930 + * Clears the register array which contains the VLAN filter table by
1931 + * setting all the values to 0.
1932 + */
1933 +void
1934 +e1000_clear_vfta_i350(struct e1000_hw *hw)
1935 +{
1936 + u32 offset;
1937 + int i;
1938 +
1939 + DEBUGFUNC("e1000_clear_vfta_350");
1940 +
1941 + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
1942 + for (i = 0; i < 10; i++)
1943 + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
1944 +
1945 + E1000_WRITE_FLUSH(hw);
1946 + }
1947 +}
1948 +
1949 +/*
1950 + * e1000_write_vfta_i350 - Write value to VLAN filter table
1951 + * @hw: pointer to the HW structure
1952 + * @offset: register offset in VLAN filter table
1953 + * @value: register value written to VLAN filter table
1954 + *
1955 + * Writes value at the given offset in the register array which stores
1956 + * the VLAN filter table.
1957 + */
1958 +void
1959 +e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
1960 +{
1961 + int i;
1962 +
1963 + DEBUGFUNC("e1000_write_vfta_350");
1964 +
1965 + for (i = 0; i < 10; i++)
1966 + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
1967 +
1968 + E1000_WRITE_FLUSH(hw);
1969 +}
1970 +
1971 +/*
1972 + * e1000_validate_nvm_checksum_with_offset - Validate EEPROM
1973 + * checksum
1974 + * @hw: pointer to the HW structure
1975 + * @offset: offset in words of the checksum protected region
1976 + *
1977 + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
1978 + * and then verifies that the sum of the EEPROM is equal to 0xBABA.
1979 + */
1980 +s32
1981 +e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
1982 +{
1983 + s32 ret_val = E1000_SUCCESS;
1984 + u16 checksum = 0;
1985 + u16 i, nvm_data;
1986 +
1987 + DEBUGFUNC("e1000_validate_nvm_checksum_with_offset");
1988 +
1989 + for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
1990 + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
1991 + if (ret_val) {
1992 + DEBUGOUT("NVM Read Error\n");
1993 + goto out;
1994 + }
1995 + checksum += nvm_data;
1996 + }
1997 +
1998 + if (checksum != (u16) NVM_SUM) {
1999 + DEBUGOUT("NVM Checksum Invalid\n");
2000 + ret_val = -E1000_ERR_NVM;
2001 + goto out;
2002 + }
2003 +
2004 +out:
2005 + return (ret_val);
2006 +}
2007 +
2008 +/*
2009 + * e1000_update_nvm_checksum_with_offset - Update EEPROM
2010 + * checksum
2011 + * @hw: pointer to the HW structure
2012 + * @offset: offset in words of the checksum protected region
2013 + *
2014 + * Updates the EEPROM checksum by reading/adding each word of the EEPROM
2015 + * up to the checksum. Then calculates the EEPROM checksum and writes the
2016 + * value to the EEPROM.
2017 + */
2018 +s32
2019 +e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
2020 +{
2021 + s32 ret_val;
2022 + u16 checksum = 0;
2023 + u16 i, nvm_data;
2024 +
2025 + DEBUGFUNC("e1000_update_nvm_checksum_with_offset");
2026 +
2027 + for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
2028 + ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
2029 + if (ret_val) {
2030 + DEBUGOUT("NVM Read Error while updating checksum.\n");
2031 + goto out;
2032 + }
2033 + checksum += nvm_data;
2034 + }
2035 + checksum = (u16) NVM_SUM - checksum;
2036 + ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
2037 + &checksum);
2038 + if (ret_val)
2039 + DEBUGOUT("NVM Write Error while updating checksum.\n");
2040 +
2041 +out:
2042 + return (ret_val);
2043 +}
2044 +
2045 +/*
2046 + * e1000_validate_nvm_checksum_i350 - Validate EEPROM checksum
2047 + * @hw: pointer to the HW structure
2048 + *
2049 + * Calculates the EEPROM section checksum by reading/adding each word of
2050 + * the EEPROM and then verifies that the sum of the EEPROM is
2051 + * equal to 0xBABA.
2052 + */
2053 +static s32
2054 +e1000_validate_nvm_checksum_i350(struct e1000_hw *hw)
2055 +{
2056 + s32 ret_val = E1000_SUCCESS;
2057 + u16 j;
2058 + u16 nvm_offset;
2059 +
2060 + DEBUGFUNC("e1000_validate_nvm_checksum_i350");
2061 +
2062 + for (j = 0; j < 4; j++) {
2063 + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2064 + ret_val = e1000_validate_nvm_checksum_with_offset(hw,
2065 + nvm_offset);
2066 + if (ret_val != E1000_SUCCESS)
2067 + goto out;
2068 + }
2069 +
2070 +out:
2071 + return (ret_val);
2072 +}
2073 +
2074 +/*
2075 + * e1000_update_nvm_checksum_i350 - Update EEPROM checksum
2076 + * @hw: pointer to the HW structure
2077 + *
2078 + * Updates the EEPROM section checksums for all 4 ports by reading/adding
2079 + * each word of the EEPROM up to the checksum. Then calculates the EEPROM
2080 + * checksum and writes the value to the EEPROM.
2081 + */
2082 +static s32
2083 +e1000_update_nvm_checksum_i350(struct e1000_hw *hw)
2084 +{
2085 + s32 ret_val = E1000_SUCCESS;
2086 + u16 j;
2087 + u16 nvm_offset;
2088 +
2089 + DEBUGFUNC("e1000_update_nvm_checksum_i350");
2090 +
2091 + for (j = 0; j < 4; j++) {
2092 + nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2093 + ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset);
2094 + if (ret_val != E1000_SUCCESS)
2095 + goto out;
2096 + }
2097 +
2098 +out:
2099 + return (ret_val);
2100 +}
2101 +
2102 +
2103 +
2104 +/*
2105 + * e1000_set_eee_i350 - Enable/disable EEE support
2106 + * @hw: pointer to the HW structure
2107 + *
2108 + * Enable/disable EEE based on setting in dev_spec structure.
2109 + *
2110 + */
2111 +s32
2112 +e1000_set_eee_i350(struct e1000_hw *hw)
2113 +{
2114 +
2115 + s32 ret_val = E1000_SUCCESS;
2116 + u32 ipcnfg, eeer;
2117 +
2118 + DEBUGFUNC("e1000_set_eee_i350");
2119 +
2120 + if ((hw->mac.type < e1000_i350) ||
2121 + (hw->phy.media_type != e1000_media_type_copper))
2122 + goto out;
2123 + ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG);
2124 + eeer = E1000_READ_REG(hw, E1000_EEER);
2125 +
2126 + /* enable or disable per user setting */
2127 + if (!(hw->dev_spec._82575.eee_disable)) {
2128 + ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
2129 + eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
2130 + E1000_EEER_LPI_FC);
2131 +
2132 + } else {
2133 + ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
2134 + eeer &= ~(E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
2135 + E1000_EEER_LPI_FC);
2136 + }
2137 + E1000_WRITE_REG(hw, E1000_IPCNFG, ipcnfg);
2138 + E1000_WRITE_REG(hw, E1000_EEER, eeer);
2139 + ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG);
2140 + eeer = E1000_READ_REG(hw, E1000_EEER);
2141 +out:
2142 +
2143 + return (ret_val);
1868 2144 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX