Print this page
3534 Disable EEE support in igb for I350
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/igb/igb_82575.c
+++ new/usr/src/uts/common/io/igb/igb_82575.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
|
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2007-2012 Intel Corporation. All rights reserved.
24 24 */
25 25
26 26 /*
27 27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 + * Copyright 2013, Nexenta Systems, Inc. All rights reserved.
28 29 */
29 30
30 31 /* IntelVersion: 1.146.2.2 v3_3_14_3_BHSW1 */
31 32
32 33 /*
33 34 * 82575EB Gigabit Network Connection
34 35 * 82575EB Gigabit Backplane Connection
35 36 * 82575GB Gigabit Network Connection
36 37 * 82576 Gigabit Network Connection
37 38 * 82576 Quad Port Gigabit Mezzanine Adapter
38 39 */
39 40
40 41 #include "igb_api.h"
41 42
42 43 static s32 e1000_init_phy_params_82575(struct e1000_hw *hw);
43 44 static s32 e1000_init_nvm_params_82575(struct e1000_hw *hw);
44 45 static s32 e1000_init_mac_params_82575(struct e1000_hw *hw);
45 46 static s32 e1000_acquire_phy_82575(struct e1000_hw *hw);
46 47 static void e1000_release_phy_82575(struct e1000_hw *hw);
47 48 static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw);
48 49 static void e1000_release_nvm_82575(struct e1000_hw *hw);
49 50 static s32 e1000_check_for_link_82575(struct e1000_hw *hw);
50 51 static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw);
51 52 static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
52 53 u16 *duplex);
53 54 static s32 e1000_init_hw_82575(struct e1000_hw *hw);
54 55 static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw);
55 56 static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
56 57 u16 *data);
57 58 static s32 e1000_reset_hw_82575(struct e1000_hw *hw);
58 59 static s32 e1000_reset_hw_82580(struct e1000_hw *hw);
59 60 static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset,
60 61 u16 *data);
61 62 static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset,
62 63 u16 data);
63 64 static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw,
64 65 bool active);
65 66 static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw);
66 67 static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw);
67 68 static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data);
68 69 static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw,
69 70 u32 offset, u16 data);
70 71 static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw);
71 72 static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
72 73 static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
73 74 u16 *speed, u16 *duplex);
74 75 static s32 e1000_get_phy_id_82575(struct e1000_hw *hw);
75 76 static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
76 77 static bool e1000_sgmii_active_82575(struct e1000_hw *hw);
77 78 static s32 e1000_reset_init_script_82575(struct e1000_hw *hw);
78 79 static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw);
79 80 static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw);
80 81 static void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw);
81 82 static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw);
82 83
83 84 static s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw,
84 85 u16 offset);
85 86 static s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
86 87 u16 offset);
87 88 static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw);
88 89 static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw);
89 90 static void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value);
90 91 static void e1000_clear_vfta_i350(struct e1000_hw *hw);
91 92
92 93 static const u16 e1000_82580_rxpbs_table[] =
93 94 {36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140};
94 95 #define E1000_82580_RXPBS_TABLE_SIZE \
95 96 (sizeof (e1000_82580_rxpbs_table)/sizeof (u16))
96 97
97 98 /*
98 99 * e1000_init_phy_params_82575 - Init PHY func ptrs.
99 100 * @hw: pointer to the HW structure
100 101 */
101 102 static s32
102 103 e1000_init_phy_params_82575(struct e1000_hw *hw)
103 104 {
104 105 struct e1000_phy_info *phy = &hw->phy;
105 106 s32 ret_val = E1000_SUCCESS;
106 107
107 108 DEBUGFUNC("e1000_init_phy_params_82575");
108 109
109 110 if (hw->phy.media_type != e1000_media_type_copper) {
110 111 phy->type = e1000_phy_none;
111 112 goto out;
112 113 }
113 114
114 115 phy->ops.power_up = e1000_power_up_phy_copper;
115 116 phy->ops.power_down = e1000_power_down_phy_copper_82575;
116 117
117 118 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
118 119 phy->reset_delay_us = 100;
119 120
120 121 phy->ops.acquire = e1000_acquire_phy_82575;
121 122 phy->ops.check_reset_block = e1000_check_reset_block_generic;
122 123 phy->ops.commit = e1000_phy_sw_reset_generic;
123 124 phy->ops.get_cfg_done = e1000_get_cfg_done_82575;
124 125 phy->ops.release = e1000_release_phy_82575;
125 126
126 127 if (e1000_sgmii_active_82575(hw)) {
127 128 phy->ops.reset = e1000_phy_hw_reset_sgmii_82575;
128 129 phy->ops.read_reg = e1000_read_phy_reg_sgmii_82575;
129 130 phy->ops.write_reg = e1000_write_phy_reg_sgmii_82575;
130 131 } else if (hw->mac.type == e1000_82580) {
131 132 phy->ops.reset = e1000_phy_hw_reset_generic;
132 133 phy->ops.read_reg = e1000_read_phy_reg_82580;
133 134 phy->ops.write_reg = e1000_write_phy_reg_82580;
134 135 } else {
135 136 phy->ops.reset = e1000_phy_hw_reset_generic;
136 137 phy->ops.read_reg = e1000_read_phy_reg_igp;
137 138 phy->ops.write_reg = e1000_write_phy_reg_igp;
138 139 }
139 140
140 141 /* Set phy->phy_addr and phy->id. */
141 142 ret_val = e1000_get_phy_id_82575(hw);
142 143
143 144 /* Verify phy id and set remaining function pointers */
144 145 switch (phy->id) {
145 146 case M88E1111_I_PHY_ID:
146 147 phy->type = e1000_phy_m88;
147 148 phy->ops.check_polarity = e1000_check_polarity_m88;
148 149 phy->ops.get_info = e1000_get_phy_info_m88;
149 150 phy->ops.get_cable_length = e1000_get_cable_length_m88;
150 151 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
151 152 break;
152 153 case IGP03E1000_E_PHY_ID:
153 154 case IGP04E1000_E_PHY_ID:
154 155 phy->type = e1000_phy_igp_3;
155 156 phy->ops.check_polarity = e1000_check_polarity_igp;
156 157 phy->ops.get_info = e1000_get_phy_info_igp;
157 158 phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
158 159 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
159 160 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82575;
160 161 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic;
161 162 break;
162 163 case I82580_I_PHY_ID:
163 164 case I350_I_PHY_ID:
164 165 phy->type = e1000_phy_82580;
165 166 phy->ops.check_polarity = e1000_check_polarity_82577;
166 167 phy->ops.force_speed_duplex =
167 168 e1000_phy_force_speed_duplex_82577;
168 169 phy->ops.get_cable_length = e1000_get_cable_length_82577;
169 170 phy->ops.get_info = e1000_get_phy_info_82577;
170 171 break;
171 172 default:
172 173 ret_val = -E1000_ERR_PHY;
173 174 goto out;
174 175 }
175 176
176 177 out:
177 178 return (ret_val);
178 179 }
179 180
180 181 /*
181 182 * e1000_init_nvm_params_82575 - Init NVM func ptrs.
182 183 * @hw: pointer to the HW structure
183 184 */
184 185 static s32
185 186 e1000_init_nvm_params_82575(struct e1000_hw *hw)
186 187 {
187 188 struct e1000_nvm_info *nvm = &hw->nvm;
188 189 u32 eecd = E1000_READ_REG(hw, E1000_EECD);
189 190 u16 size;
190 191
191 192 DEBUGFUNC("e1000_init_nvm_params_82575");
192 193
193 194 nvm->opcode_bits = 8;
194 195 nvm->delay_usec = 1;
195 196 switch (nvm->override) {
196 197 case e1000_nvm_override_spi_large:
197 198 nvm->page_size = 32;
198 199 nvm->address_bits = 16;
199 200 break;
200 201 case e1000_nvm_override_spi_small:
201 202 nvm->page_size = 8;
202 203 nvm->address_bits = 8;
203 204 break;
204 205 default:
205 206 nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
206 207 nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
207 208 break;
208 209 }
209 210
210 211 nvm->type = e1000_nvm_eeprom_spi;
211 212
212 213 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
213 214 E1000_EECD_SIZE_EX_SHIFT);
214 215
215 216 /*
216 217 * Added to a constant, "size" becomes the left-shift value
217 218 * for setting word_size.
218 219 */
219 220 size += NVM_WORD_SIZE_BASE_SHIFT;
220 221
221 222 /* EEPROM access above 16k is unsupported */
222 223 if (size > 14)
223 224 size = 14;
224 225 nvm->word_size = 1 << size;
225 226
226 227 /* Function Pointers */
227 228 nvm->ops.acquire = e1000_acquire_nvm_82575;
228 229 nvm->ops.read = e1000_read_nvm_eerd;
229 230 nvm->ops.release = e1000_release_nvm_82575;
230 231 nvm->ops.update = e1000_update_nvm_checksum_generic;
231 232 nvm->ops.valid_led_default = e1000_valid_led_default_82575;
232 233 nvm->ops.validate = e1000_validate_nvm_checksum_generic;
233 234 nvm->ops.write = e1000_write_nvm_spi;
234 235
235 236 /* override genric family function pointers for specific descendants */
236 237 switch (hw->mac.type) {
237 238 case e1000_i350:
238 239 nvm->ops.validate = e1000_validate_nvm_checksum_i350;
239 240 nvm->ops.update = e1000_update_nvm_checksum_i350;
240 241 break;
241 242 default:
242 243 break;
243 244 }
244 245
245 246
246 247 return (E1000_SUCCESS);
247 248 }
248 249
249 250 /*
250 251 * e1000_init_mac_params_82575 - Init MAC func ptrs.
251 252 * @hw: pointer to the HW structure
252 253 */
253 254 static s32
254 255 e1000_init_mac_params_82575(struct e1000_hw *hw)
255 256 {
256 257 struct e1000_mac_info *mac = &hw->mac;
257 258 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
258 259 u32 ctrl_ext = 0;
259 260
260 261 DEBUGFUNC("e1000_init_mac_params_82575");
261 262
262 263 /* Set media type */
263 264 /*
264 265 * The 82575 uses bits 22:23 for link mode. The mode can be changed
265 266 * based on the EEPROM. We cannot rely upon device ID. There
266 267 * is no distinguishable difference between fiber and internal
267 268 * SerDes mode on the 82575. There can be an external PHY attached
268 269 * on the SGMII interface. For this, we'll set sgmii_active to true.
269 270 */
270 271 hw->phy.media_type = e1000_media_type_copper;
271 272 dev_spec->sgmii_active = false;
272 273
273 274 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
274 275 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
275 276 case E1000_CTRL_EXT_LINK_MODE_SGMII:
276 277 dev_spec->sgmii_active = true;
277 278 ctrl_ext |= E1000_CTRL_I2C_ENA;
278 279 break;
279 280 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
280 281 case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
281 282 hw->phy.media_type = e1000_media_type_internal_serdes;
282 283 ctrl_ext |= E1000_CTRL_I2C_ENA;
283 284 break;
284 285 default:
285 286 ctrl_ext &= ~E1000_CTRL_I2C_ENA;
286 287 break;
287 288 }
288 289
289 290 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
290 291
291 292 /*
292 293 * if using i2c make certain the MDICNFG register is cleared to prevent
293 294 * communications from being misrouted to the mdic registers
294 295 */
295 296 if ((ctrl_ext & E1000_CTRL_I2C_ENA) && (hw->mac.type == e1000_82580))
296 297 E1000_WRITE_REG(hw, E1000_MDICNFG, 0);
297 298
298 299 /* Set mta register count */
299 300 mac->mta_reg_count = 128;
|
↓ open down ↓ |
262 lines elided |
↑ open up ↑ |
300 301 /* Set uta register count */
301 302 mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128;
302 303 /* Set rar entry count */
303 304 mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
304 305 if (mac->type == e1000_82576)
305 306 mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
306 307 if (mac->type == e1000_82580)
307 308 mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
308 309 if (mac->type == e1000_i350) {
309 310 mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
310 - /* Enable EEE default settings for i350 */
311 - dev_spec->eee_disable = B_FALSE;
311 + /* Disable EEE default settings for i350 */
312 + dev_spec->eee_disable = B_TRUE;
312 313 }
313 314 /* Set if part includes ASF firmware */
314 315 mac->asf_firmware_present = true;
315 316 /* Set if manageability features are enabled. */
316 317 mac->arc_subsystem_valid =
317 318 (E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK)
318 319 ? true : false;
319 320
320 321 /* Function pointers */
321 322
322 323 /* bus type/speed/width */
323 324 mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic;
324 325 /* reset */
325 326 if (mac->type == e1000_82580)
326 327 mac->ops.reset_hw = e1000_reset_hw_82580;
327 328 else
328 329 mac->ops.reset_hw = e1000_reset_hw_82575;
329 330 /* hw initialization */
330 331 mac->ops.init_hw = e1000_init_hw_82575;
331 332 /* link setup */
332 333 mac->ops.setup_link = e1000_setup_link_generic;
333 334 /* physical interface link setup */
334 335 mac->ops.setup_physical_interface =
335 336 (hw->phy.media_type == e1000_media_type_copper)
336 337 ? e1000_setup_copper_link_82575
337 338 : e1000_setup_serdes_link_82575;
338 339 /* physical interface shutdown */
339 340 mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575;
340 341 /* check for link */
341 342 mac->ops.check_for_link = e1000_check_for_link_82575;
342 343 /* receive address register setting */
343 344 mac->ops.rar_set = e1000_rar_set_generic;
344 345 /* read mac address */
345 346 mac->ops.read_mac_addr = e1000_read_mac_addr_82575;
346 347 /* multicast address update */
347 348 mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
348 349
349 350 if (hw->mac.type == e1000_i350) {
350 351 /* writing VFTA */
351 352 mac->ops.write_vfta = e1000_write_vfta_i350;
352 353 /* clearing VFTA */
353 354 mac->ops.clear_vfta = e1000_clear_vfta_i350;
354 355 } else {
355 356 /* writing VFTA */
356 357 mac->ops.write_vfta = e1000_write_vfta_generic;
357 358 /* clearing VFTA */
358 359 mac->ops.clear_vfta = e1000_clear_vfta_generic;
359 360 }
360 361 /* setting MTA */
361 362 mac->ops.mta_set = e1000_mta_set_generic;
362 363 /* ID LED init */
363 364 mac->ops.id_led_init = e1000_id_led_init_generic;
364 365 /* blink LED */
365 366 mac->ops.blink_led = e1000_blink_led_generic;
366 367 /* setup LED */
367 368 mac->ops.setup_led = e1000_setup_led_generic;
368 369 /* cleanup LED */
369 370 mac->ops.cleanup_led = e1000_cleanup_led_generic;
370 371 /* turn on/off LED */
371 372 mac->ops.led_on = e1000_led_on_generic;
372 373 mac->ops.led_off = e1000_led_off_generic;
373 374 /* clear hardware counters */
374 375 mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82575;
375 376 /* link info */
376 377 mac->ops.get_link_up_info = e1000_get_link_up_info_82575;
377 378
378 379 /* set lan id for port to determine which phy lock to use */
379 380 hw->mac.ops.set_lan_id(hw);
380 381
381 382 return (E1000_SUCCESS);
382 383 }
383 384
384 385 /*
385 386 * e1000_init_function_pointers_82575 - Init func ptrs.
386 387 * @hw: pointer to the HW structure
387 388 *
388 389 * Called to initialize all function pointers and parameters.
389 390 */
390 391 void
391 392 e1000_init_function_pointers_82575(struct e1000_hw *hw)
392 393 {
393 394 DEBUGFUNC("e1000_init_function_pointers_82575");
394 395
395 396 hw->mac.ops.init_params = e1000_init_mac_params_82575;
396 397 hw->nvm.ops.init_params = e1000_init_nvm_params_82575;
397 398 hw->phy.ops.init_params = e1000_init_phy_params_82575;
398 399 }
399 400
400 401 /*
401 402 * e1000_acquire_phy_82575 - Acquire rights to access PHY
402 403 * @hw: pointer to the HW structure
403 404 *
404 405 * Acquire access rights to the correct PHY.
405 406 */
406 407 static s32
407 408 e1000_acquire_phy_82575(struct e1000_hw *hw)
408 409 {
409 410 u16 mask = E1000_SWFW_PHY0_SM;
410 411
411 412 DEBUGFUNC("e1000_acquire_phy_82575");
412 413
413 414 if (hw->bus.func == E1000_FUNC_1)
414 415 mask = E1000_SWFW_PHY1_SM;
415 416 else if (hw->bus.func == E1000_FUNC_2)
416 417 mask = E1000_SWFW_PHY2_SM;
417 418 else if (hw->bus.func == E1000_FUNC_3)
418 419 mask = E1000_SWFW_PHY3_SM;
419 420
420 421 return (e1000_acquire_swfw_sync_82575(hw, mask));
421 422 }
422 423
423 424 /*
424 425 * e1000_release_phy_82575 - Release rights to access PHY
425 426 * @hw: pointer to the HW structure
426 427 *
427 428 * A wrapper to release access rights to the correct PHY.
428 429 */
429 430 static void
430 431 e1000_release_phy_82575(struct e1000_hw *hw)
431 432 {
432 433 u16 mask = E1000_SWFW_PHY0_SM;
433 434
434 435 DEBUGFUNC("e1000_release_phy_82575");
435 436
436 437 if (hw->bus.func == E1000_FUNC_1)
437 438 mask = E1000_SWFW_PHY1_SM;
438 439 else if (hw->bus.func == E1000_FUNC_2)
439 440 mask = E1000_SWFW_PHY2_SM;
440 441 else if (hw->bus.func == E1000_FUNC_3)
441 442 mask = E1000_SWFW_PHY3_SM;
442 443
443 444 e1000_release_swfw_sync_82575(hw, mask);
444 445 }
445 446
446 447 /*
447 448 * e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
448 449 * @hw: pointer to the HW structure
449 450 * @offset: register offset to be read
450 451 * @data: pointer to the read data
451 452 *
452 453 * Reads the PHY register at offset using the serial gigabit media independent
453 454 * interface and stores the retrieved information in data.
454 455 */
455 456 static s32
456 457 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, u16 *data)
457 458 {
458 459 s32 ret_val = -E1000_ERR_PARAM;
459 460
460 461 DEBUGFUNC("e1000_read_phy_reg_sgmii_82575");
461 462
462 463 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
463 464 DEBUGOUT1("PHY Address %u is out of range\n", offset);
464 465 goto out;
465 466 }
466 467
467 468 ret_val = hw->phy.ops.acquire(hw);
468 469 if (ret_val)
469 470 goto out;
470 471
471 472 ret_val = e1000_read_phy_reg_i2c(hw, offset, data);
472 473
473 474 hw->phy.ops.release(hw);
474 475
475 476 out:
476 477 return (ret_val);
477 478 }
478 479
479 480 /*
480 481 * e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
481 482 * @hw: pointer to the HW structure
482 483 * @offset: register offset to write to
483 484 * @data: data to write at register offset
484 485 *
485 486 * Writes the data to PHY register at the offset using the serial gigabit
486 487 * media independent interface.
487 488 */
488 489 static s32
489 490 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, u16 data)
490 491 {
491 492 s32 ret_val = -E1000_ERR_PARAM;
492 493
493 494 DEBUGFUNC("e1000_write_phy_reg_sgmii_82575");
494 495
495 496 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
496 497 DEBUGOUT1("PHY Address %d is out of range\n", offset);
497 498 goto out;
498 499 }
499 500
500 501 ret_val = hw->phy.ops.acquire(hw);
501 502 if (ret_val)
502 503 goto out;
503 504
504 505 ret_val = e1000_write_phy_reg_i2c(hw, offset, data);
505 506
506 507 hw->phy.ops.release(hw);
507 508
508 509 out:
509 510 return (ret_val);
510 511 }
511 512
512 513 /*
513 514 * e1000_get_phy_id_82575 - Retrieve PHY addr and id
514 515 * @hw: pointer to the HW structure
515 516 *
516 517 * Retrieves the PHY address and ID for both PHY's which do and do not use
517 518 * sgmi interface.
518 519 */
519 520 static s32
520 521 e1000_get_phy_id_82575(struct e1000_hw *hw)
521 522 {
522 523 struct e1000_phy_info *phy = &hw->phy;
523 524 s32 ret_val = E1000_SUCCESS;
524 525 u16 phy_id;
525 526 u32 ctrl_ext;
526 527
527 528 DEBUGFUNC("e1000_get_phy_id_82575");
528 529
529 530 /*
530 531 * For SGMII PHYs, we try the list of possible addresses until
531 532 * we find one that works. For non-SGMII PHYs
532 533 * (e.g. integrated copper PHYs), an address of 1 should
533 534 * work. The result of this function should mean phy->phy_addr
534 535 * and phy->id are set correctly.
535 536 */
536 537 if (!e1000_sgmii_active_82575(hw)) {
537 538 phy->addr = 1;
538 539 ret_val = e1000_get_phy_id(hw);
539 540 goto out;
540 541 }
541 542
542 543 /* Power on sgmii phy if it is disabled */
543 544 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
544 545 E1000_WRITE_REG(hw, E1000_CTRL_EXT,
545 546 ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
546 547 E1000_WRITE_FLUSH(hw);
547 548 msec_delay(300);
548 549
549 550 /*
550 551 * The address field in the I2CCMD register is 3 bits and 0 is invalid.
551 552 * Therefore, we need to test 1-7
552 553 */
553 554 for (phy->addr = 1; phy->addr < 8; phy->addr++) {
554 555 ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
555 556 if (ret_val == E1000_SUCCESS) {
556 557 DEBUGOUT2("Vendor ID 0x%08X read at address %u\n",
557 558 phy_id,
558 559 phy->addr);
559 560 /*
560 561 * At the time of this writing, The M88 part is
561 562 * the only supported SGMII PHY product.
562 563 */
563 564 if (phy_id == M88_VENDOR)
564 565 break;
565 566 } else {
566 567 DEBUGOUT1("PHY address %u was unreadable\n",
567 568 phy->addr);
568 569 }
569 570 }
570 571
571 572 /* A valid PHY type couldn't be found. */
572 573 if (phy->addr == 8) {
573 574 phy->addr = 0;
574 575 ret_val = -E1000_ERR_PHY;
575 576 } else {
576 577 ret_val = e1000_get_phy_id(hw);
577 578 }
578 579
579 580 /* restore previous sfp cage power state */
580 581 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
581 582
582 583 out:
583 584 return (ret_val);
584 585 }
585 586
586 587 /*
587 588 * e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset
588 589 * @hw: pointer to the HW structure
589 590 *
590 591 * Resets the PHY using the serial gigabit media independent interface.
591 592 */
592 593 static s32
593 594 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
594 595 {
595 596 s32 ret_val = E1000_SUCCESS;
596 597
597 598 DEBUGFUNC("e1000_phy_hw_reset_sgmii_82575");
598 599
599 600 /*
600 601 * This isn't a true "hard" reset, but is the only reset
601 602 * available to us at this time.
602 603 */
603 604
604 605 DEBUGOUT("Soft resetting SGMII attached PHY...\n");
605 606
606 607 if (!(hw->phy.ops.write_reg))
607 608 goto out;
608 609
609 610 /*
610 611 * SFP documentation requires the following to configure the SPF module
611 612 * to work on SGMII. No further documentation is given.
612 613 */
613 614 ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
614 615 if (ret_val)
615 616 goto out;
616 617
617 618 ret_val = hw->phy.ops.commit(hw);
618 619
619 620 out:
620 621 return (ret_val);
621 622 }
622 623
623 624 /*
624 625 * e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
625 626 * @hw: pointer to the HW structure
626 627 * @active: true to enable LPLU, false to disable
627 628 *
628 629 * Sets the LPLU D0 state according to the active flag. When
629 630 * activating LPLU this function also disables smart speed
630 631 * and vice versa. LPLU will not be activated unless the
631 632 * device autonegotiation advertisement meets standards of
632 633 * either 10 or 10/100 or 10/100/1000 at all duplexes.
633 634 * This is a function pointer entry point only called by
634 635 * PHY setup routines.
635 636 */
636 637 static s32
637 638 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
638 639 {
639 640 struct e1000_phy_info *phy = &hw->phy;
640 641 s32 ret_val = E1000_SUCCESS;
641 642 u16 data;
642 643
643 644 DEBUGFUNC("e1000_set_d0_lplu_state_82575");
644 645
645 646 if (!(hw->phy.ops.read_reg))
646 647 goto out;
647 648
648 649 ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
649 650 if (ret_val)
650 651 goto out;
651 652
652 653 if (active) {
653 654 data |= IGP02E1000_PM_D0_LPLU;
654 655 ret_val = phy->ops.write_reg(hw,
655 656 IGP02E1000_PHY_POWER_MGMT,
656 657 data);
657 658 if (ret_val)
658 659 goto out;
659 660
660 661 /* When LPLU is enabled, we should disable SmartSpeed */
661 662 ret_val = phy->ops.read_reg(hw,
662 663 IGP01E1000_PHY_PORT_CONFIG,
663 664 &data);
664 665 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
665 666 ret_val = phy->ops.write_reg(hw,
666 667 IGP01E1000_PHY_PORT_CONFIG,
667 668 data);
668 669 if (ret_val)
669 670 goto out;
670 671 } else {
671 672 data &= ~IGP02E1000_PM_D0_LPLU;
672 673 ret_val = phy->ops.write_reg(hw,
673 674 IGP02E1000_PHY_POWER_MGMT,
674 675 data);
675 676 /*
676 677 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
677 678 * during Dx states where the power conservation is most
678 679 * important. During driver activity we should enable
679 680 * SmartSpeed, so performance is maintained.
680 681 */
681 682 if (phy->smart_speed == e1000_smart_speed_on) {
682 683 ret_val = phy->ops.read_reg(hw,
683 684 IGP01E1000_PHY_PORT_CONFIG,
684 685 &data);
685 686 if (ret_val)
686 687 goto out;
687 688
688 689 data |= IGP01E1000_PSCFR_SMART_SPEED;
689 690 ret_val = phy->ops.write_reg(hw,
690 691 IGP01E1000_PHY_PORT_CONFIG,
691 692 data);
692 693 if (ret_val)
693 694 goto out;
694 695 } else if (phy->smart_speed == e1000_smart_speed_off) {
695 696 ret_val = phy->ops.read_reg(hw,
696 697 IGP01E1000_PHY_PORT_CONFIG,
697 698 &data);
698 699 if (ret_val)
699 700 goto out;
700 701
701 702 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
702 703 ret_val = phy->ops.write_reg(hw,
703 704 IGP01E1000_PHY_PORT_CONFIG,
704 705 data);
705 706 if (ret_val)
706 707 goto out;
707 708 }
708 709 }
709 710
710 711 out:
711 712 return (ret_val);
712 713 }
713 714
714 715 /*
715 716 * e1000_acquire_nvm_82575 - Request for access to EEPROM
716 717 * @hw: pointer to the HW structure
717 718 *
718 719 * Acquire the necessary semaphores for exclusive access to the EEPROM.
719 720 * Set the EEPROM access request bit and wait for EEPROM access grant bit.
720 721 * Return successful if access grant bit set, else clear the request for
721 722 * EEPROM access and return -E1000_ERR_NVM (-1).
722 723 */
723 724 static s32
724 725 e1000_acquire_nvm_82575(struct e1000_hw *hw)
725 726 {
726 727 s32 ret_val;
727 728
728 729 DEBUGFUNC("e1000_acquire_nvm_82575");
729 730
730 731 ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
731 732 if (ret_val)
732 733 goto out;
733 734
734 735 /*
735 736 * Check if there is some access
736 737 * error this access may hook on
737 738 */
738 739 if (hw->mac.type == e1000_i350) {
739 740 u32 eecd = E1000_READ_REG(hw, E1000_EECD);
740 741 if (eecd & (E1000_EECD_BLOCKED | E1000_EECD_ABORT |
741 742 E1000_EECD_TIMEOUT)) {
742 743 /* Clear all access error flags */
743 744 E1000_WRITE_REG(hw, E1000_EECD, eecd |
744 745 E1000_EECD_ERROR_CLR);
745 746 DEBUGOUT("Nvm bit banging access error "
746 747 "detected and cleared.\n");
747 748 }
748 749 }
749 750
750 751 ret_val = e1000_acquire_nvm_generic(hw);
751 752
752 753 if (ret_val)
753 754 e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
754 755
755 756 out:
756 757 return (ret_val);
757 758 }
758 759
759 760 /*
760 761 * e1000_release_nvm_82575 - Release exclusive access to EEPROM
761 762 * @hw: pointer to the HW structure
762 763 *
763 764 * Stop any current commands to the EEPROM and clear the EEPROM request bit,
764 765 * then release the semaphores acquired.
765 766 */
766 767 static void
767 768 e1000_release_nvm_82575(struct e1000_hw *hw)
768 769 {
769 770 DEBUGFUNC("e1000_release_nvm_82575");
770 771
771 772 e1000_release_nvm_generic(hw);
772 773 e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
773 774 }
774 775
775 776 /*
776 777 * e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
777 778 * @hw: pointer to the HW structure
778 779 * @mask: specifies which semaphore to acquire
779 780 *
780 781 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
781 782 * will also specify which port we're acquiring the lock for.
782 783 */
783 784 static s32
784 785 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
785 786 {
786 787 u32 swfw_sync;
787 788 u32 swmask = mask;
788 789 u32 fwmask = mask << 16;
789 790 s32 ret_val = E1000_SUCCESS;
790 791 s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
791 792
792 793 DEBUGFUNC("e1000_acquire_swfw_sync_82575");
793 794
794 795 while (i < timeout) {
795 796 if (e1000_get_hw_semaphore_generic(hw)) {
796 797 ret_val = -E1000_ERR_SWFW_SYNC;
797 798 goto out;
798 799 }
799 800
800 801 swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
801 802 if (!(swfw_sync & (fwmask | swmask)))
802 803 break;
803 804
804 805 /*
805 806 * Firmware currently using resource (fwmask)
806 807 * or other software thread using resource (swmask)
807 808 */
808 809 e1000_put_hw_semaphore_generic(hw);
809 810 msec_delay_irq(5);
810 811 i++;
811 812 }
812 813
813 814 if (i == timeout) {
814 815 DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
815 816 ret_val = -E1000_ERR_SWFW_SYNC;
816 817 goto out;
817 818 }
818 819
819 820 swfw_sync |= swmask;
820 821 E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
821 822
822 823 e1000_put_hw_semaphore_generic(hw);
823 824
824 825 out:
825 826 return (ret_val);
826 827 }
827 828
828 829 /*
829 830 * e1000_release_swfw_sync_82575 - Release SW/FW semaphore
830 831 * @hw: pointer to the HW structure
831 832 * @mask: specifies which semaphore to acquire
832 833 *
833 834 * Release the SW/FW semaphore used to access the PHY or NVM. The mask
834 835 * will also specify which port we're releasing the lock for.
835 836 */
836 837 static void
837 838 e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
838 839 {
839 840 u32 swfw_sync;
840 841
841 842 DEBUGFUNC("e1000_release_swfw_sync_82575");
842 843
843 844 while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS) {
844 845 /* Empty */
845 846 }
846 847
847 848 swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
848 849 swfw_sync &= ~mask;
849 850 E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
850 851
851 852 e1000_put_hw_semaphore_generic(hw);
852 853 }
853 854
854 855 /*
855 856 * e1000_get_cfg_done_82575 - Read config done bit
856 857 * @hw: pointer to the HW structure
857 858 *
858 859 * Read the management control register for the config done bit for
859 860 * completion status. NOTE: silicon which is EEPROM-less will fail trying
860 861 * to read the config done bit, so an error is *ONLY* logged and returns
861 862 * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon
862 863 * would not be able to be reset or change link.
863 864 */
864 865 static s32
865 866 e1000_get_cfg_done_82575(struct e1000_hw *hw)
866 867 {
867 868 s32 timeout = PHY_CFG_TIMEOUT;
868 869 s32 ret_val = E1000_SUCCESS;
869 870 u32 mask = E1000_NVM_CFG_DONE_PORT_0;
870 871
871 872 DEBUGFUNC("e1000_get_cfg_done_82575");
872 873
873 874 if (hw->bus.func == E1000_FUNC_1)
874 875 mask = E1000_NVM_CFG_DONE_PORT_1;
875 876 else if (hw->bus.func == E1000_FUNC_2)
876 877 mask = E1000_NVM_CFG_DONE_PORT_2;
877 878 else if (hw->bus.func == E1000_FUNC_3)
878 879 mask = E1000_NVM_CFG_DONE_PORT_3;
879 880
880 881 while (timeout) {
881 882 if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask)
882 883 break;
883 884 msec_delay(1);
884 885 timeout--;
885 886 }
886 887 if (!timeout)
887 888 DEBUGOUT("MNG configuration cycle has not completed.\n");
888 889
889 890 /* If EEPROM is not marked present, init the PHY manually */
890 891 if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) &&
891 892 (hw->phy.type == e1000_phy_igp_3))
892 893 (void) e1000_phy_init_script_igp3(hw);
893 894
894 895 return (ret_val);
895 896 }
896 897
897 898 /*
898 899 * e1000_get_link_up_info_82575 - Get link speed/duplex info
899 900 * @hw: pointer to the HW structure
900 901 * @speed: stores the current speed
901 902 * @duplex: stores the current duplex
902 903 *
903 904 * This is a wrapper function, if using the serial gigabit media independent
904 905 * interface, use PCS to retrieve the link speed and duplex information.
905 906 * Otherwise, use the generic function to get the link speed and duplex info.
906 907 */
907 908 static s32
908 909 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, u16 *duplex)
909 910 {
910 911 s32 ret_val;
911 912
912 913 DEBUGFUNC("e1000_get_link_up_info_82575");
913 914
914 915 if (hw->phy.media_type != e1000_media_type_copper)
915 916 ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed,
916 917 duplex);
917 918 else
918 919 ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed,
919 920 duplex);
920 921
921 922 return (ret_val);
922 923 }
923 924
924 925 /*
925 926 * e1000_check_for_link_82575 - Check for link
926 927 * @hw: pointer to the HW structure
927 928 *
928 929 * If sgmii is enabled, then use the pcs register to determine link, otherwise
929 930 * use the generic interface for determining link.
930 931 */
931 932 static s32
932 933 e1000_check_for_link_82575(struct e1000_hw *hw)
933 934 {
934 935 s32 ret_val;
935 936 u16 speed, duplex;
936 937
937 938 DEBUGFUNC("e1000_check_for_link_82575");
938 939
939 940 /* SGMII link check is done through the PCS register. */
940 941 if (hw->phy.media_type != e1000_media_type_copper) {
941 942 ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed,
942 943 &duplex);
943 944 /*
944 945 * Use this flag to determine if link needs to be checked or
945 946 * not. If we have link clear the flag so that we do not
946 947 * continue to check for link.
947 948 */
948 949 hw->mac.get_link_status = !hw->mac.serdes_has_link;
949 950 } else {
950 951 ret_val = e1000_check_for_copper_link_generic(hw);
951 952 }
952 953
953 954 return (ret_val);
954 955 }
955 956
956 957 /*
957 958 * e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
958 959 * @hw: pointer to the HW structure
959 960 * @speed: stores the current speed
960 961 * @duplex: stores the current duplex
961 962 *
962 963 * Using the physical coding sub-layer (PCS), retrieve the current speed and
963 964 * duplex, then store the values in the pointers provided.
964 965 */
965 966 static s32
966 967 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
967 968 u16 *speed, u16 *duplex)
968 969 {
969 970 struct e1000_mac_info *mac = &hw->mac;
970 971 u32 pcs;
971 972
972 973 DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575");
973 974
974 975 /* Set up defaults for the return values of this function */
975 976 mac->serdes_has_link = false;
976 977 *speed = 0;
977 978 *duplex = 0;
978 979
979 980 /*
980 981 * Read the PCS Status register for link state. For non-copper mode,
981 982 * the status register is not accurate. The PCS status register is
982 983 * used instead.
983 984 */
984 985 pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT);
985 986
986 987 /*
987 988 * The link up bit determines when link is up on autoneg. The sync ok
988 989 * gets set once both sides sync up and agree upon link. Stable link
989 990 * can be determined by checking for both link up and link sync ok
990 991 */
991 992 if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) {
992 993 mac->serdes_has_link = true;
993 994
994 995 /* Detect and store PCS speed */
995 996 if (pcs & E1000_PCS_LSTS_SPEED_1000) {
996 997 *speed = SPEED_1000;
997 998 } else if (pcs & E1000_PCS_LSTS_SPEED_100) {
998 999 *speed = SPEED_100;
999 1000 } else {
1000 1001 *speed = SPEED_10;
1001 1002 }
1002 1003
1003 1004 /* Detect and store PCS duplex */
1004 1005 if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) {
1005 1006 *duplex = FULL_DUPLEX;
1006 1007 } else {
1007 1008 *duplex = HALF_DUPLEX;
1008 1009 }
1009 1010 }
1010 1011
1011 1012 return (E1000_SUCCESS);
1012 1013 }
1013 1014
1014 1015 /*
1015 1016 * e1000_shutdown_serdes_link_82575 - Remove link during power down
1016 1017 * @hw: pointer to the HW structure
1017 1018 *
1018 1019 * In the case of serdes shut down sfp and PCS on driver unload
1019 1020 * when management pass thru is not enabled.
1020 1021 */
1021 1022 void
1022 1023 e1000_shutdown_serdes_link_82575(struct e1000_hw *hw)
1023 1024 {
1024 1025 u32 reg;
1025 1026 u16 eeprom_data = 0;
1026 1027
1027 1028 if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1028 1029 !e1000_sgmii_active_82575(hw))
1029 1030 return;
1030 1031
1031 1032 if (hw->bus.func == E1000_FUNC_0)
1032 1033 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1033 1034 else if (hw->mac.type == e1000_82580)
1034 1035 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
1035 1036 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
1036 1037 &eeprom_data);
1037 1038 else if (hw->bus.func == E1000_FUNC_1)
1038 1039 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1039 1040
1040 1041 /*
1041 1042 * If APM is not enabled in the EEPROM and management interface is
1042 1043 * not enabled, then power down.
1043 1044 */
1044 1045 if (!(eeprom_data & E1000_NVM_APME_82575) &&
1045 1046 !e1000_enable_mng_pass_thru(hw)) {
1046 1047 /* Disable PCS to turn off link */
1047 1048 reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
1048 1049 reg &= ~E1000_PCS_CFG_PCS_EN;
1049 1050 E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
1050 1051
1051 1052 /* shutdown the laser */
1052 1053 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1053 1054 reg |= E1000_CTRL_EXT_SDP3_DATA;
1054 1055 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1055 1056
1056 1057 /* flush the write to verify completion */
1057 1058 E1000_WRITE_FLUSH(hw);
1058 1059 msec_delay(1);
1059 1060 }
1060 1061 }
1061 1062
1062 1063 /*
1063 1064 * e1000_reset_hw_82575 - Reset hardware
1064 1065 * @hw: pointer to the HW structure
1065 1066 *
1066 1067 * This resets the hardware into a known state.
1067 1068 */
1068 1069 static s32
1069 1070 e1000_reset_hw_82575(struct e1000_hw *hw)
1070 1071 {
1071 1072 u32 ctrl;
1072 1073 s32 ret_val;
1073 1074
1074 1075 DEBUGFUNC("e1000_reset_hw_82575");
1075 1076
1076 1077 /*
1077 1078 * Prevent the PCI-E bus from sticking if there is no TLP connection
1078 1079 * on the last TLP read/write transaction when MAC is reset.
1079 1080 */
1080 1081 ret_val = e1000_disable_pcie_master_generic(hw);
1081 1082 if (ret_val) {
1082 1083 DEBUGOUT("PCI-E Master disable polling has failed.\n");
1083 1084 }
1084 1085
1085 1086 /* set the completion timeout for interface */
1086 1087 ret_val = e1000_set_pcie_completion_timeout(hw);
1087 1088 if (ret_val) {
1088 1089 DEBUGOUT("PCI-E Set completion timeout has failed.\n");
1089 1090 }
1090 1091
1091 1092 DEBUGOUT("Masking off all interrupts\n");
1092 1093 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
1093 1094
1094 1095 E1000_WRITE_REG(hw, E1000_RCTL, 0);
1095 1096 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
1096 1097 E1000_WRITE_FLUSH(hw);
1097 1098
1098 1099 msec_delay(10);
1099 1100
1100 1101 ctrl = E1000_READ_REG(hw, E1000_CTRL);
1101 1102
1102 1103 DEBUGOUT("Issuing a global reset to MAC\n");
1103 1104 E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
1104 1105
1105 1106 ret_val = e1000_get_auto_rd_done_generic(hw);
1106 1107 if (ret_val) {
1107 1108 /*
1108 1109 * When auto config read does not complete, do not
1109 1110 * return with an error. This can happen in situations
1110 1111 * where there is no eeprom and prevents getting link.
1111 1112 */
1112 1113 DEBUGOUT("Auto Read Done did not complete\n");
1113 1114 }
1114 1115
1115 1116 /* If EEPROM is not present, run manual init scripts */
1116 1117 if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0)
1117 1118 (void) e1000_reset_init_script_82575(hw);
1118 1119
1119 1120 /* Clear any pending interrupt events. */
1120 1121 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
1121 1122 (void) E1000_READ_REG(hw, E1000_ICR);
1122 1123
1123 1124 /* Install any alternate MAC address into RAR0 */
1124 1125 ret_val = e1000_check_alt_mac_addr_generic(hw);
1125 1126
1126 1127 return (ret_val);
1127 1128 }
1128 1129
1129 1130 /*
1130 1131 * e1000_init_hw_82575 - Initialize hardware
1131 1132 * @hw: pointer to the HW structure
1132 1133 *
1133 1134 * This inits the hardware readying it for operation.
1134 1135 */
1135 1136 static s32
1136 1137 e1000_init_hw_82575(struct e1000_hw *hw)
1137 1138 {
1138 1139 struct e1000_mac_info *mac = &hw->mac;
1139 1140 s32 ret_val;
1140 1141 u16 i, rar_count = mac->rar_entry_count;
1141 1142
1142 1143 DEBUGFUNC("e1000_init_hw_82575");
1143 1144
1144 1145 /* Initialize identification LED */
1145 1146 ret_val = mac->ops.id_led_init(hw);
1146 1147 if (ret_val) {
1147 1148 DEBUGOUT("Error initializing identification LED\n");
1148 1149 /* This is not fatal and we should not stop init due to this */
1149 1150 }
1150 1151
1151 1152 /* Disabling VLAN filtering */
1152 1153 DEBUGOUT("Initializing the IEEE VLAN\n");
1153 1154 mac->ops.clear_vfta(hw);
1154 1155
1155 1156 /* Setup the receive address */
1156 1157 e1000_init_rx_addrs_generic(hw, rar_count);
1157 1158 /* Zero out the Multicast HASH table */
1158 1159 DEBUGOUT("Zeroing the MTA\n");
1159 1160 for (i = 0; i < mac->mta_reg_count; i++)
1160 1161 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
1161 1162
1162 1163 /* Zero out the Unicast HASH table */
1163 1164 DEBUGOUT("Zeroing the UTA\n");
1164 1165 for (i = 0; i < mac->uta_reg_count; i++)
1165 1166 E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0);
1166 1167
1167 1168 /* Setup link and flow control */
1168 1169 ret_val = mac->ops.setup_link(hw);
1169 1170
1170 1171 /*
1171 1172 * Clear all of the statistics registers (clear on read). It is
1172 1173 * important that we do this after we have tried to establish link
1173 1174 * because the symbol error count will increment wildly if there
1174 1175 * is no link.
1175 1176 */
1176 1177 e1000_clear_hw_cntrs_82575(hw);
1177 1178
1178 1179 return (ret_val);
1179 1180 }
1180 1181
1181 1182 /*
1182 1183 * e1000_setup_copper_link_82575 - Configure copper link settings
1183 1184 * @hw: pointer to the HW structure
1184 1185 *
1185 1186 * Configures the link for auto-neg or forced speed and duplex. Then we check
1186 1187 * for link, once link is established calls to configure collision distance
1187 1188 * and flow control are called.
1188 1189 */
1189 1190 static s32
1190 1191 e1000_setup_copper_link_82575(struct e1000_hw *hw)
1191 1192 {
1192 1193 u32 ctrl;
1193 1194 s32 ret_val;
1194 1195
1195 1196 DEBUGFUNC("e1000_setup_copper_link_82575");
1196 1197
1197 1198 ctrl = E1000_READ_REG(hw, E1000_CTRL);
1198 1199 ctrl |= E1000_CTRL_SLU;
1199 1200 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1200 1201 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
1201 1202
1202 1203 ret_val = e1000_setup_serdes_link_82575(hw);
1203 1204 if (ret_val)
1204 1205 goto out;
1205 1206
1206 1207 if (e1000_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
1207 1208 /* allow time for SFP cage time to power up phy */
1208 1209 msec_delay(300);
1209 1210
1210 1211 ret_val = hw->phy.ops.reset(hw);
1211 1212 if (ret_val) {
1212 1213 DEBUGOUT("Error resetting the PHY.\n");
1213 1214 goto out;
1214 1215 }
1215 1216 }
1216 1217 switch (hw->phy.type) {
1217 1218 case e1000_phy_m88:
1218 1219 ret_val = e1000_copper_link_setup_m88(hw);
1219 1220 break;
1220 1221 case e1000_phy_igp_3:
1221 1222 ret_val = e1000_copper_link_setup_igp(hw);
1222 1223 break;
1223 1224 case e1000_phy_82580:
1224 1225 ret_val = e1000_copper_link_setup_82577(hw);
1225 1226 break;
1226 1227 default:
1227 1228 ret_val = -E1000_ERR_PHY;
1228 1229 break;
1229 1230 }
1230 1231
1231 1232 if (ret_val)
1232 1233 goto out;
1233 1234
1234 1235 ret_val = e1000_setup_copper_link_generic(hw);
1235 1236 out:
1236 1237 return (ret_val);
1237 1238 }
1238 1239
1239 1240 /*
1240 1241 * e1000_setup_serdes_link_82575 - Setup link for serdes
1241 1242 * @hw: pointer to the HW structure
1242 1243 *
1243 1244 * Configure the physical coding sub-layer (PCS) link. The PCS link is
1244 1245 * used on copper connections where the serialized gigabit media independent
1245 1246 * interface (sgmii), or serdes fiber is being used. Configures the link
1246 1247 * for auto-negotiation or forces speed/duplex.
1247 1248 */
1248 1249 static s32
1249 1250 e1000_setup_serdes_link_82575(struct e1000_hw *hw)
1250 1251 {
1251 1252 u32 ctrl_ext, ctrl_reg, reg;
1252 1253 bool pcs_autoneg;
1253 1254
1254 1255 DEBUGFUNC("e1000_setup_serdes_link_82575");
1255 1256
1256 1257 if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1257 1258 !e1000_sgmii_active_82575(hw))
1258 1259 return (E1000_SUCCESS);
1259 1260
1260 1261 /*
1261 1262 * On the 82575, SerDes loopback mode persists until it is
1262 1263 * explicitly turned off or a power cycle is performed. A read to
1263 1264 * the register does not indicate its status. Therefore, we ensure
1264 1265 * loopback mode is disabled during initialization.
1265 1266 */
1266 1267 E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
1267 1268
1268 1269 /* power on the sfp cage if present */
1269 1270 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1270 1271 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
1271 1272 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
1272 1273
1273 1274 ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
1274 1275 ctrl_reg |= E1000_CTRL_SLU;
1275 1276
1276 1277 if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) {
1277 1278 /* set both sw defined pins */
1278 1279 ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
1279 1280
1280 1281 /* Set switch control to serdes energy detect */
1281 1282 reg = E1000_READ_REG(hw, E1000_CONNSW);
1282 1283 reg |= E1000_CONNSW_ENRGSRC;
1283 1284 E1000_WRITE_REG(hw, E1000_CONNSW, reg);
1284 1285 }
1285 1286
1286 1287 reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
1287 1288
1288 1289 /* default pcs_autoneg to the same setting as mac autoneg */
1289 1290 pcs_autoneg = hw->mac.autoneg;
1290 1291
1291 1292 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
1292 1293 case E1000_CTRL_EXT_LINK_MODE_SGMII:
1293 1294 /* sgmii mode lets the phy handle forcing speed/duplex */
1294 1295 pcs_autoneg = true;
1295 1296 /* autoneg time out should be disabled for SGMII mode */
1296 1297 reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
1297 1298 break;
1298 1299 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
1299 1300 /* disable PCS autoneg and support parallel detect only */
1300 1301 pcs_autoneg = false;
1301 1302 default:
1302 1303 /*
1303 1304 * non-SGMII modes only supports a speed of 1000/Full for the
1304 1305 * link so it is best to just force the MAC and let the pcs
1305 1306 * link either autoneg or be forced to 1000/Full
1306 1307 */
1307 1308 ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
1308 1309 E1000_CTRL_FD | E1000_CTRL_FRCDPX;
1309 1310
1310 1311 /* set speed of 1000/Full if speed/duplex is forced */
1311 1312 reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
1312 1313 break;
1313 1314 }
1314 1315
1315 1316 E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
1316 1317
1317 1318 /*
1318 1319 * New SerDes mode allows for forcing speed or autonegotiating speed
1319 1320 * at 1gb. Autoneg should be default set by most drivers. This is the
1320 1321 * mode that will be compatible with older link partners and switches.
1321 1322 * However, both are supported by the hardware and some drivers/tools.
1322 1323 */
1323 1324
1324 1325 reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
1325 1326 E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
1326 1327
1327 1328 /*
1328 1329 * We force flow control to prevent the CTRL register values from being
1329 1330 * overwritten by the autonegotiated flow control values
1330 1331 */
1331 1332 reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1332 1333
1333 1334 if (pcs_autoneg) {
1334 1335 /* Set PCS register for autoneg */
1335 1336 reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
1336 1337 E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
1337 1338 DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
1338 1339 } else {
1339 1340 /* Set PCS register for forced link */
1340 1341 reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
1341 1342 DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
1342 1343 }
1343 1344
1344 1345 E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
1345 1346
1346 1347 if (!e1000_sgmii_active_82575(hw))
1347 1348 (void) e1000_force_mac_fc_generic(hw);
1348 1349
1349 1350 return (E1000_SUCCESS);
1350 1351 }
1351 1352
1352 1353 /*
1353 1354 * e1000_valid_led_default_82575 - Verify a valid default LED config
1354 1355 * @hw: pointer to the HW structure
1355 1356 * @data: pointer to the NVM (EEPROM)
1356 1357 *
1357 1358 * Read the EEPROM for the current default LED configuration. If the
1358 1359 * LED configuration is not valid, set to a valid LED configuration.
1359 1360 */
1360 1361 static s32
1361 1362 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data)
1362 1363 {
1363 1364 s32 ret_val;
1364 1365
1365 1366 DEBUGFUNC("e1000_valid_led_default_82575");
1366 1367
1367 1368 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
1368 1369 if (ret_val) {
1369 1370 DEBUGOUT("NVM Read Error\n");
1370 1371 goto out;
1371 1372 }
1372 1373
1373 1374 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
1374 1375 switch (hw->phy.media_type) {
1375 1376 case e1000_media_type_internal_serdes:
1376 1377 *data = ID_LED_DEFAULT_82575_SERDES;
1377 1378 break;
1378 1379 case e1000_media_type_copper:
1379 1380 default:
1380 1381 *data = ID_LED_DEFAULT;
1381 1382 break;
1382 1383 }
1383 1384 }
1384 1385 out:
1385 1386 return (ret_val);
1386 1387 }
1387 1388
1388 1389 /*
1389 1390 * e1000_sgmii_active_82575 - Return sgmii state
1390 1391 * @hw: pointer to the HW structure
1391 1392 *
1392 1393 * 82575 silicon has a serialized gigabit media independent interface (sgmii)
1393 1394 * which can be enabled for use in the embedded applications. Simply
1394 1395 * return the current state of the sgmii interface.
1395 1396 */
1396 1397 static bool
1397 1398 e1000_sgmii_active_82575(struct e1000_hw *hw)
1398 1399 {
1399 1400 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
1400 1401 return (dev_spec->sgmii_active);
1401 1402 }
1402 1403
1403 1404 /*
1404 1405 * e1000_reset_init_script_82575 - Inits HW defaults after reset
1405 1406 * @hw: pointer to the HW structure
1406 1407 *
1407 1408 * Inits recommended HW defaults after a reset when there is no EEPROM
1408 1409 * detected. This is only for the 82575.
1409 1410 */
1410 1411 static s32
1411 1412 e1000_reset_init_script_82575(struct e1000_hw *hw)
1412 1413 {
1413 1414 DEBUGFUNC("e1000_reset_init_script_82575");
1414 1415
1415 1416 if (hw->mac.type == e1000_82575) {
1416 1417 DEBUGOUT("Running reset init script for 82575\n");
1417 1418 /* SerDes configuration via SERDESCTRL */
1418 1419 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL,
1419 1420 0x00, 0x0C);
1420 1421 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL,
1421 1422 0x01, 0x78);
1422 1423 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL,
1423 1424 0x1B, 0x23);
1424 1425 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL,
1425 1426 0x23, 0x15);
1426 1427
1427 1428 /* CCM configuration via CCMCTL register */
1428 1429 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL,
1429 1430 0x14, 0x00);
1430 1431 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL,
1431 1432 0x10, 0x00);
1432 1433
1433 1434 /* PCIe lanes configuration */
1434 1435 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL,
1435 1436 0x00, 0xEC);
1436 1437 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL,
1437 1438 0x61, 0xDF);
1438 1439 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL,
1439 1440 0x34, 0x05);
1440 1441 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL,
1441 1442 0x2F, 0x81);
1442 1443
1443 1444 /* PCIe PLL Configuration */
1444 1445 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL,
1445 1446 0x02, 0x47);
1446 1447 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL,
1447 1448 0x14, 0x00);
1448 1449 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL,
1449 1450 0x10, 0x00);
1450 1451 }
1451 1452
1452 1453 return (E1000_SUCCESS);
1453 1454 }
1454 1455
1455 1456 /*
1456 1457 * e1000_read_mac_addr_82575 - Read device MAC address
1457 1458 * @hw: pointer to the HW structure
1458 1459 */
1459 1460 static s32
1460 1461 e1000_read_mac_addr_82575(struct e1000_hw *hw)
1461 1462 {
1462 1463 s32 ret_val = E1000_SUCCESS;
1463 1464
1464 1465 DEBUGFUNC("e1000_read_mac_addr_82575");
1465 1466
1466 1467 /*
1467 1468 * If there's an alternate MAC address place it in RAR0
1468 1469 * so that it will override the Si installed default perm
1469 1470 * address.
1470 1471 */
1471 1472 ret_val = e1000_check_alt_mac_addr_generic(hw);
1472 1473 if (ret_val)
1473 1474 goto out;
1474 1475
1475 1476 ret_val = e1000_read_mac_addr_generic(hw);
1476 1477
1477 1478 out:
1478 1479 return (ret_val);
1479 1480 }
1480 1481
1481 1482 /*
1482 1483 * e1000_power_down_phy_copper_82575 - Remove link during PHY power down
1483 1484 * @hw: pointer to the HW structure
1484 1485 *
1485 1486 * In the case of a PHY power down to save power, or to turn off link during a
1486 1487 * driver unload, or wake on lan is not enabled, remove the link.
1487 1488 */
1488 1489 static void
1489 1490 e1000_power_down_phy_copper_82575(struct e1000_hw *hw)
1490 1491 {
1491 1492 struct e1000_phy_info *phy = &hw->phy;
1492 1493 struct e1000_mac_info *mac = &hw->mac;
1493 1494
1494 1495 if (!(phy->ops.check_reset_block))
1495 1496 return;
1496 1497
1497 1498 /* If the management interface is not enabled, then power down */
1498 1499 if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw)))
1499 1500 e1000_power_down_phy_copper(hw);
1500 1501 }
1501 1502
1502 1503 /*
1503 1504 * e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters
1504 1505 * @hw: pointer to the HW structure
1505 1506 *
1506 1507 * Clears the hardware counters by reading the counter registers.
1507 1508 */
1508 1509 static void
1509 1510 e1000_clear_hw_cntrs_82575(struct e1000_hw *hw)
1510 1511 {
1511 1512 DEBUGFUNC("e1000_clear_hw_cntrs_82575");
1512 1513
1513 1514 e1000_clear_hw_cntrs_base_generic(hw);
1514 1515
1515 1516 (void) E1000_READ_REG(hw, E1000_PRC64);
1516 1517 (void) E1000_READ_REG(hw, E1000_PRC127);
1517 1518 (void) E1000_READ_REG(hw, E1000_PRC255);
1518 1519 (void) E1000_READ_REG(hw, E1000_PRC511);
1519 1520 (void) E1000_READ_REG(hw, E1000_PRC1023);
1520 1521 (void) E1000_READ_REG(hw, E1000_PRC1522);
1521 1522 (void) E1000_READ_REG(hw, E1000_PTC64);
1522 1523 (void) E1000_READ_REG(hw, E1000_PTC127);
1523 1524 (void) E1000_READ_REG(hw, E1000_PTC255);
1524 1525 (void) E1000_READ_REG(hw, E1000_PTC511);
1525 1526 (void) E1000_READ_REG(hw, E1000_PTC1023);
1526 1527 (void) E1000_READ_REG(hw, E1000_PTC1522);
1527 1528
1528 1529 (void) E1000_READ_REG(hw, E1000_ALGNERRC);
1529 1530 (void) E1000_READ_REG(hw, E1000_RXERRC);
1530 1531 (void) E1000_READ_REG(hw, E1000_TNCRS);
1531 1532 (void) E1000_READ_REG(hw, E1000_CEXTERR);
1532 1533 (void) E1000_READ_REG(hw, E1000_TSCTC);
1533 1534 (void) E1000_READ_REG(hw, E1000_TSCTFC);
1534 1535
1535 1536 (void) E1000_READ_REG(hw, E1000_MGTPRC);
1536 1537 (void) E1000_READ_REG(hw, E1000_MGTPDC);
1537 1538 (void) E1000_READ_REG(hw, E1000_MGTPTC);
1538 1539
1539 1540 (void) E1000_READ_REG(hw, E1000_IAC);
1540 1541 (void) E1000_READ_REG(hw, E1000_ICRXOC);
1541 1542
1542 1543 (void) E1000_READ_REG(hw, E1000_ICRXPTC);
1543 1544 (void) E1000_READ_REG(hw, E1000_ICRXATC);
1544 1545 (void) E1000_READ_REG(hw, E1000_ICTXPTC);
1545 1546 (void) E1000_READ_REG(hw, E1000_ICTXATC);
1546 1547 (void) E1000_READ_REG(hw, E1000_ICTXQEC);
1547 1548 (void) E1000_READ_REG(hw, E1000_ICTXQMTC);
1548 1549 (void) E1000_READ_REG(hw, E1000_ICRXDMTC);
1549 1550
1550 1551 (void) E1000_READ_REG(hw, E1000_CBTMPC);
1551 1552 (void) E1000_READ_REG(hw, E1000_HTDPMC);
1552 1553 (void) E1000_READ_REG(hw, E1000_CBRMPC);
1553 1554 (void) E1000_READ_REG(hw, E1000_RPTHC);
1554 1555 (void) E1000_READ_REG(hw, E1000_HGPTC);
1555 1556 (void) E1000_READ_REG(hw, E1000_HTCBDPC);
1556 1557 (void) E1000_READ_REG(hw, E1000_HGORCL);
1557 1558 (void) E1000_READ_REG(hw, E1000_HGORCH);
1558 1559 (void) E1000_READ_REG(hw, E1000_HGOTCL);
1559 1560 (void) E1000_READ_REG(hw, E1000_HGOTCH);
1560 1561 (void) E1000_READ_REG(hw, E1000_LENERRS);
1561 1562
1562 1563 /* This register should not be read in copper configurations */
1563 1564 if ((hw->phy.media_type == e1000_media_type_internal_serdes) ||
1564 1565 e1000_sgmii_active_82575(hw))
1565 1566 (void) E1000_READ_REG(hw, E1000_SCVPC);
1566 1567 }
1567 1568
1568 1569 /*
1569 1570 * e1000_rx_fifo_flush_82575 - Clean rx fifo after RX enable
1570 1571 * @hw: pointer to the HW structure
1571 1572 *
1572 1573 * After rx enable if managability is enabled then there is likely some
1573 1574 * bad data at the start of the fifo and possibly in the DMA fifo. This
1574 1575 * function clears the fifos and flushes any packets that came in as rx was
1575 1576 * being enabled.
1576 1577 */
1577 1578 void
1578 1579 e1000_rx_fifo_flush_82575(struct e1000_hw *hw)
1579 1580 {
1580 1581 u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
1581 1582 int i, ms_wait;
1582 1583
1583 1584 DEBUGFUNC("e1000_rx_fifo_workaround_82575");
1584 1585 if (hw->mac.type != e1000_82575 ||
1585 1586 !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN))
1586 1587 return;
1587 1588
1588 1589 /* Disable all RX queues */
1589 1590 for (i = 0; i < 4; i++) {
1590 1591 rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
1591 1592 E1000_WRITE_REG(hw, E1000_RXDCTL(i),
1592 1593 rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
1593 1594 }
1594 1595 /* Poll all queues to verify they have shut down */
1595 1596 for (ms_wait = 0; ms_wait < 10; ms_wait++) {
1596 1597 msec_delay(1);
1597 1598 rx_enabled = 0;
1598 1599 for (i = 0; i < 4; i++)
1599 1600 rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i));
1600 1601 if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
1601 1602 break;
1602 1603 }
1603 1604
1604 1605 if (ms_wait == 10)
1605 1606 DEBUGOUT("Queue disable timed out after 10ms\n");
1606 1607
1607 1608 /*
1608 1609 * Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
1609 1610 * incoming packets are rejected. Set enable and wait 2ms so that
1610 1611 * any packet that was coming in as RCTL.EN was set is flushed
1611 1612 */
1612 1613 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
1613 1614 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
1614 1615
1615 1616 rlpml = E1000_READ_REG(hw, E1000_RLPML);
1616 1617 E1000_WRITE_REG(hw, E1000_RLPML, 0);
1617 1618
1618 1619 rctl = E1000_READ_REG(hw, E1000_RCTL);
1619 1620 temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
1620 1621 temp_rctl |= E1000_RCTL_LPE;
1621 1622
1622 1623 E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl);
1623 1624 E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN);
1624 1625 E1000_WRITE_FLUSH(hw);
1625 1626 msec_delay(2);
1626 1627
1627 1628 /*
1628 1629 * Enable RX queues that were previously enabled and restore our
1629 1630 * previous state
1630 1631 */
1631 1632 for (i = 0; i < 4; i++)
1632 1633 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]);
1633 1634 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1634 1635 E1000_WRITE_FLUSH(hw);
1635 1636
1636 1637 E1000_WRITE_REG(hw, E1000_RLPML, rlpml);
1637 1638 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
1638 1639
1639 1640 /* Flush receive errors generated by workaround */
1640 1641 (void) E1000_READ_REG(hw, E1000_ROC);
1641 1642 (void) E1000_READ_REG(hw, E1000_RNBC);
1642 1643 (void) E1000_READ_REG(hw, E1000_MPC);
1643 1644 }
1644 1645
1645 1646 /*
1646 1647 * e1000_set_pcie_completion_timeout - set pci-e completion timeout
1647 1648 * @hw: pointer to the HW structure
1648 1649 *
1649 1650 * The defaults for 82575 and 82576 should be in the range of 50us to 50ms,
1650 1651 * however the hardware default for these parts is 500us to 1ms which is less
1651 1652 * than the 10ms recommended by the pci-e spec. To address this we need to
1652 1653 * increase the value to either 10ms to 200ms for capability version 1 config,
1653 1654 * or 16ms to 55ms for version 2.
1654 1655 */
1655 1656 static s32
1656 1657 e1000_set_pcie_completion_timeout(struct e1000_hw *hw)
1657 1658 {
1658 1659 u32 gcr = E1000_READ_REG(hw, E1000_GCR);
1659 1660 s32 ret_val = E1000_SUCCESS;
1660 1661 u16 pcie_devctl2;
1661 1662
1662 1663 /* only take action if timeout value is defaulted to 0 */
1663 1664 if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
1664 1665 goto out;
1665 1666
1666 1667 /*
1667 1668 * if capababilities version is type 1 we can write the
1668 1669 * timeout of 10ms to 200ms through the GCR register
1669 1670 */
1670 1671 if (!(gcr & E1000_GCR_CAP_VER2)) {
1671 1672 gcr |= E1000_GCR_CMPL_TMOUT_10ms;
1672 1673 goto out;
1673 1674 }
1674 1675
1675 1676 /*
1676 1677 * for version 2 capabilities we need to write the config space
1677 1678 * directly in order to set the completion timeout value for
1678 1679 * 16ms to 55ms
1679 1680 */
1680 1681 ret_val = e1000_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
1681 1682 &pcie_devctl2);
1682 1683 if (ret_val)
1683 1684 goto out;
1684 1685
1685 1686 pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
1686 1687
1687 1688 ret_val = e1000_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
1688 1689 &pcie_devctl2);
1689 1690 out:
1690 1691 /* disable completion timeout resend */
1691 1692 gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
1692 1693
1693 1694 E1000_WRITE_REG(hw, E1000_GCR, gcr);
1694 1695 return (ret_val);
1695 1696 }
1696 1697
1697 1698 /*
1698 1699 * e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback
1699 1700 * @hw: pointer to the hardware struct
1700 1701 * @enable: state to enter, either enabled or disabled
1701 1702 *
1702 1703 * enables/disables L2 switch loopback functionality.
1703 1704 */
1704 1705 void
1705 1706 e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
1706 1707 {
1707 1708 u32 dtxswc = E1000_READ_REG(hw, E1000_DTXSWC);
1708 1709
1709 1710 if (enable)
1710 1711 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1711 1712 else
1712 1713 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1713 1714
1714 1715 E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc);
1715 1716 }
1716 1717
1717 1718 /*
1718 1719 * e1000_vmdq_set_replication_pf - enable or disable vmdq replication
1719 1720 * @hw: pointer to the hardware struct
1720 1721 * @enable: state to enter, either enabled or disabled
1721 1722 *
1722 1723 * enables/disables replication of packets across multiple pools.
1723 1724 */
1724 1725 void
1725 1726 e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
1726 1727 {
1727 1728 u32 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1728 1729
1729 1730 if (enable)
1730 1731 vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
1731 1732 else
1732 1733 vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
1733 1734
1734 1735 E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1735 1736 }
1736 1737
1737 1738 /*
1738 1739 * e1000_read_phy_reg_82580 - Read 82580 MDI control register
1739 1740 * @hw: pointer to the HW structure
1740 1741 * @offset: register offset to be read
1741 1742 * @data: pointer to the read data
1742 1743 *
1743 1744 * Reads the MDI control register in the PHY at offset and stores the
1744 1745 * information read to data.
1745 1746 */
1746 1747 static s32
1747 1748 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
1748 1749 {
1749 1750 u32 mdicnfg = 0;
1750 1751 s32 ret_val;
1751 1752
1752 1753 DEBUGFUNC("e1000_read_phy_reg_82580");
1753 1754
1754 1755 ret_val = hw->phy.ops.acquire(hw);
1755 1756 if (ret_val)
1756 1757 goto out;
1757 1758
1758 1759 /*
1759 1760 * We config the phy address in MDICNFG register now. Same bits
1760 1761 * as before. The values in MDIC can be written but will be
1761 1762 * ignored. This allows us to call the old function after
1762 1763 * configuring the PHY address in the new register
1763 1764 */
1764 1765 mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT);
1765 1766 E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
1766 1767
1767 1768 ret_val = e1000_read_phy_reg_mdic(hw, offset, data);
1768 1769
1769 1770 hw->phy.ops.release(hw);
1770 1771
1771 1772 out:
1772 1773 return (ret_val);
1773 1774 }
1774 1775
1775 1776 /*
1776 1777 * e1000_write_phy_reg_82580 - Write 82580 MDI control register
1777 1778 * @hw: pointer to the HW structure
1778 1779 * @offset: register offset to write to
1779 1780 * @data: data to write to register at offset
1780 1781 *
1781 1782 * Writes data to MDI control register in the PHY at offset.
1782 1783 */
1783 1784 static s32
1784 1785 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
1785 1786 {
1786 1787 u32 mdicnfg = 0;
1787 1788 s32 ret_val;
1788 1789
1789 1790 DEBUGFUNC("e1000_write_phy_reg_82580");
1790 1791
1791 1792 ret_val = hw->phy.ops.acquire(hw);
1792 1793 if (ret_val)
1793 1794 goto out;
1794 1795
1795 1796 /*
1796 1797 * We config the phy address in MDICNFG register now. Same bits
1797 1798 * as before. The values in MDIC can be written but will be
1798 1799 * ignored. This allows us to call the old function after
1799 1800 * configuring the PHY address in the new register
1800 1801 */
1801 1802 mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT);
1802 1803 E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
1803 1804
1804 1805 ret_val = e1000_write_phy_reg_mdic(hw, offset, data);
1805 1806
1806 1807 hw->phy.ops.release(hw);
1807 1808
1808 1809 out:
1809 1810 return (ret_val);
1810 1811 }
1811 1812
1812 1813 /*
1813 1814 * e1000_reset_hw_82580 - Reset hardware
1814 1815 * @hw: pointer to the HW structure
1815 1816 *
1816 1817 * This resets function or entire device (all ports, etc.)
1817 1818 * to a known state.
1818 1819 */
1819 1820 static s32
1820 1821 e1000_reset_hw_82580(struct e1000_hw *hw)
1821 1822 {
1822 1823 s32 ret_val = E1000_SUCCESS;
1823 1824 /* BH SW mailbox bit in SW_FW_SYNC */
1824 1825 u16 swmbsw_mask = E1000_SW_SYNCH_MB;
1825 1826 u32 ctrl;
1826 1827 bool global_device_reset = hw->dev_spec._82575.global_device_reset;
1827 1828
1828 1829 DEBUGFUNC("e1000_reset_hw_82580");
1829 1830
1830 1831 hw->dev_spec._82575.global_device_reset = false;
1831 1832
1832 1833 /* Get current control state. */
1833 1834 ctrl = E1000_READ_REG(hw, E1000_CTRL);
1834 1835
1835 1836 /*
1836 1837 * Prevent the PCI-E bus from sticking if there is no TLP connection
1837 1838 * on the last TLP read/write transaction when MAC is reset.
1838 1839 */
1839 1840 ret_val = e1000_disable_pcie_master_generic(hw);
1840 1841 if (ret_val)
1841 1842 DEBUGOUT("PCI-E Master disable polling has failed.\n");
1842 1843
1843 1844 DEBUGOUT("Masking off all interrupts\n");
1844 1845 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
1845 1846 E1000_WRITE_REG(hw, E1000_RCTL, 0);
1846 1847 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
1847 1848 E1000_WRITE_FLUSH(hw);
1848 1849
1849 1850 msec_delay(10);
1850 1851
1851 1852 /* Determine whether or not a global dev reset is requested */
1852 1853 if (global_device_reset &&
1853 1854 e1000_acquire_swfw_sync_82575(hw, swmbsw_mask))
1854 1855 global_device_reset = false;
1855 1856
1856 1857 if (global_device_reset &&
1857 1858 !(E1000_READ_REG(hw, E1000_STATUS) & E1000_STAT_DEV_RST_SET))
1858 1859 ctrl |= E1000_CTRL_DEV_RST;
1859 1860 else
1860 1861 ctrl |= E1000_CTRL_RST;
1861 1862
1862 1863 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
1863 1864
1864 1865 /* Add delay to insure DEV_RST has time to complete */
1865 1866 if (global_device_reset)
1866 1867 msec_delay(5);
1867 1868
1868 1869 ret_val = e1000_get_auto_rd_done_generic(hw);
1869 1870 if (ret_val) {
1870 1871 /*
1871 1872 * When auto config read does not complete, do not
1872 1873 * return with an error. This can happen in situations
1873 1874 * where there is no eeprom and prevents getting link.
1874 1875 */
1875 1876 DEBUGOUT("Auto Read Done did not complete\n");
1876 1877 }
1877 1878
1878 1879 /* If EEPROM is not present, run manual init scripts */
1879 1880 if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0)
1880 1881 (void) e1000_reset_init_script_82575(hw);
1881 1882
1882 1883 /* clear global device reset status bit */
1883 1884 E1000_WRITE_REG(hw, E1000_STATUS, E1000_STAT_DEV_RST_SET);
1884 1885
1885 1886 /* Clear any pending interrupt events. */
1886 1887 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
1887 1888 (void) E1000_READ_REG(hw, E1000_ICR);
1888 1889
1889 1890 /* Install any alternate MAC address into RAR0 */
1890 1891 ret_val = e1000_check_alt_mac_addr_generic(hw);
1891 1892
1892 1893 /* Release semaphore */
1893 1894 if (global_device_reset)
1894 1895 e1000_release_swfw_sync_82575(hw, swmbsw_mask);
1895 1896
1896 1897 return (ret_val);
1897 1898 }
1898 1899
1899 1900 /*
1900 1901 * e1000_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size
1901 1902 * @data: data received by reading RXPBS register
1902 1903 *
1903 1904 * The 82580 uses a table based approach for packet buffer allocation sizes.
1904 1905 * This function converts the retrieved value into the correct table value
1905 1906 * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7
1906 1907 * 0x0 36 72 144 1 2 4 8 16
1907 1908 * 0x8 35 70 140 rsv rsv rsv rsv rsv
1908 1909 */
1909 1910 u16
1910 1911 e1000_rxpbs_adjust_82580(u32 data)
1911 1912 {
1912 1913 u16 ret_val = 0;
1913 1914
1914 1915 if (data < E1000_82580_RXPBS_TABLE_SIZE)
1915 1916 ret_val = e1000_82580_rxpbs_table[data];
1916 1917
1917 1918 return (ret_val);
1918 1919 }
1919 1920
1920 1921 /*
1921 1922 * Due to a hw errata, if the host tries to configure the VFTA register
1922 1923 * while performing queries from the BMC or DMA, then the VFTA in some
1923 1924 * cases won't be written.
1924 1925 */
1925 1926
1926 1927 /*
1927 1928 * e1000_clear_vfta_i350 - Clear VLAN filter table
1928 1929 * @hw: pointer to the HW structure
1929 1930 *
1930 1931 * Clears the register array which contains the VLAN filter table by
1931 1932 * setting all the values to 0.
1932 1933 */
1933 1934 void
1934 1935 e1000_clear_vfta_i350(struct e1000_hw *hw)
1935 1936 {
1936 1937 u32 offset;
1937 1938 int i;
1938 1939
1939 1940 DEBUGFUNC("e1000_clear_vfta_350");
1940 1941
1941 1942 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
1942 1943 for (i = 0; i < 10; i++)
1943 1944 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
1944 1945
1945 1946 E1000_WRITE_FLUSH(hw);
1946 1947 }
1947 1948 }
1948 1949
1949 1950 /*
1950 1951 * e1000_write_vfta_i350 - Write value to VLAN filter table
1951 1952 * @hw: pointer to the HW structure
1952 1953 * @offset: register offset in VLAN filter table
1953 1954 * @value: register value written to VLAN filter table
1954 1955 *
1955 1956 * Writes value at the given offset in the register array which stores
1956 1957 * the VLAN filter table.
1957 1958 */
1958 1959 void
1959 1960 e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
1960 1961 {
1961 1962 int i;
1962 1963
1963 1964 DEBUGFUNC("e1000_write_vfta_350");
1964 1965
1965 1966 for (i = 0; i < 10; i++)
1966 1967 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
1967 1968
1968 1969 E1000_WRITE_FLUSH(hw);
1969 1970 }
1970 1971
1971 1972 /*
1972 1973 * e1000_validate_nvm_checksum_with_offset - Validate EEPROM
1973 1974 * checksum
1974 1975 * @hw: pointer to the HW structure
1975 1976 * @offset: offset in words of the checksum protected region
1976 1977 *
1977 1978 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
1978 1979 * and then verifies that the sum of the EEPROM is equal to 0xBABA.
1979 1980 */
1980 1981 s32
1981 1982 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
1982 1983 {
1983 1984 s32 ret_val = E1000_SUCCESS;
1984 1985 u16 checksum = 0;
1985 1986 u16 i, nvm_data;
1986 1987
1987 1988 DEBUGFUNC("e1000_validate_nvm_checksum_with_offset");
1988 1989
1989 1990 for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
1990 1991 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
1991 1992 if (ret_val) {
1992 1993 DEBUGOUT("NVM Read Error\n");
1993 1994 goto out;
1994 1995 }
1995 1996 checksum += nvm_data;
1996 1997 }
1997 1998
1998 1999 if (checksum != (u16) NVM_SUM) {
1999 2000 DEBUGOUT("NVM Checksum Invalid\n");
2000 2001 ret_val = -E1000_ERR_NVM;
2001 2002 goto out;
2002 2003 }
2003 2004
2004 2005 out:
2005 2006 return (ret_val);
2006 2007 }
2007 2008
2008 2009 /*
2009 2010 * e1000_update_nvm_checksum_with_offset - Update EEPROM
2010 2011 * checksum
2011 2012 * @hw: pointer to the HW structure
2012 2013 * @offset: offset in words of the checksum protected region
2013 2014 *
2014 2015 * Updates the EEPROM checksum by reading/adding each word of the EEPROM
2015 2016 * up to the checksum. Then calculates the EEPROM checksum and writes the
2016 2017 * value to the EEPROM.
2017 2018 */
2018 2019 s32
2019 2020 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
2020 2021 {
2021 2022 s32 ret_val;
2022 2023 u16 checksum = 0;
2023 2024 u16 i, nvm_data;
2024 2025
2025 2026 DEBUGFUNC("e1000_update_nvm_checksum_with_offset");
2026 2027
2027 2028 for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
2028 2029 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
2029 2030 if (ret_val) {
2030 2031 DEBUGOUT("NVM Read Error while updating checksum.\n");
2031 2032 goto out;
2032 2033 }
2033 2034 checksum += nvm_data;
2034 2035 }
2035 2036 checksum = (u16) NVM_SUM - checksum;
2036 2037 ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
2037 2038 &checksum);
2038 2039 if (ret_val)
2039 2040 DEBUGOUT("NVM Write Error while updating checksum.\n");
2040 2041
2041 2042 out:
2042 2043 return (ret_val);
2043 2044 }
2044 2045
2045 2046 /*
2046 2047 * e1000_validate_nvm_checksum_i350 - Validate EEPROM checksum
2047 2048 * @hw: pointer to the HW structure
2048 2049 *
2049 2050 * Calculates the EEPROM section checksum by reading/adding each word of
2050 2051 * the EEPROM and then verifies that the sum of the EEPROM is
2051 2052 * equal to 0xBABA.
2052 2053 */
2053 2054 static s32
2054 2055 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw)
2055 2056 {
2056 2057 s32 ret_val = E1000_SUCCESS;
2057 2058 u16 j;
2058 2059 u16 nvm_offset;
2059 2060
2060 2061 DEBUGFUNC("e1000_validate_nvm_checksum_i350");
2061 2062
2062 2063 for (j = 0; j < 4; j++) {
2063 2064 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2064 2065 ret_val = e1000_validate_nvm_checksum_with_offset(hw,
2065 2066 nvm_offset);
2066 2067 if (ret_val != E1000_SUCCESS)
2067 2068 goto out;
2068 2069 }
2069 2070
2070 2071 out:
2071 2072 return (ret_val);
2072 2073 }
2073 2074
2074 2075 /*
2075 2076 * e1000_update_nvm_checksum_i350 - Update EEPROM checksum
2076 2077 * @hw: pointer to the HW structure
2077 2078 *
2078 2079 * Updates the EEPROM section checksums for all 4 ports by reading/adding
2079 2080 * each word of the EEPROM up to the checksum. Then calculates the EEPROM
2080 2081 * checksum and writes the value to the EEPROM.
2081 2082 */
2082 2083 static s32
2083 2084 e1000_update_nvm_checksum_i350(struct e1000_hw *hw)
2084 2085 {
2085 2086 s32 ret_val = E1000_SUCCESS;
2086 2087 u16 j;
2087 2088 u16 nvm_offset;
2088 2089
2089 2090 DEBUGFUNC("e1000_update_nvm_checksum_i350");
2090 2091
2091 2092 for (j = 0; j < 4; j++) {
2092 2093 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2093 2094 ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset);
2094 2095 if (ret_val != E1000_SUCCESS)
2095 2096 goto out;
2096 2097 }
2097 2098
2098 2099 out:
2099 2100 return (ret_val);
2100 2101 }
2101 2102
2102 2103
2103 2104
2104 2105 /*
2105 2106 * e1000_set_eee_i350 - Enable/disable EEE support
2106 2107 * @hw: pointer to the HW structure
2107 2108 *
2108 2109 * Enable/disable EEE based on setting in dev_spec structure.
2109 2110 *
2110 2111 */
2111 2112 s32
2112 2113 e1000_set_eee_i350(struct e1000_hw *hw)
2113 2114 {
2114 2115
2115 2116 s32 ret_val = E1000_SUCCESS;
2116 2117 u32 ipcnfg, eeer;
2117 2118
2118 2119 DEBUGFUNC("e1000_set_eee_i350");
2119 2120
2120 2121 if ((hw->mac.type < e1000_i350) ||
2121 2122 (hw->phy.media_type != e1000_media_type_copper))
2122 2123 goto out;
2123 2124 ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG);
2124 2125 eeer = E1000_READ_REG(hw, E1000_EEER);
2125 2126
2126 2127 /* enable or disable per user setting */
2127 2128 if (!(hw->dev_spec._82575.eee_disable)) {
2128 2129 ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
2129 2130 eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
2130 2131 E1000_EEER_LPI_FC);
2131 2132
2132 2133 } else {
2133 2134 ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
2134 2135 eeer &= ~(E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
2135 2136 E1000_EEER_LPI_FC);
2136 2137 }
2137 2138 E1000_WRITE_REG(hw, E1000_IPCNFG, ipcnfg);
2138 2139 E1000_WRITE_REG(hw, E1000_EEER, eeer);
2139 2140 ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG);
2140 2141 eeer = E1000_READ_REG(hw, E1000_EEER);
2141 2142 out:
2142 2143
2143 2144 return (ret_val);
2144 2145 }
|
↓ open down ↓ |
1823 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX