1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2007-2012 Intel Corporation. All rights reserved.
24 */
25
26 /*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Copyright 2013, Nexenta Systems, Inc. All rights reserved.
29 */
30
31 /* IntelVersion: 1.146.2.2 v3_3_14_3_BHSW1 */
32
33 /*
34 * 82575EB Gigabit Network Connection
35 * 82575EB Gigabit Backplane Connection
36 * 82575GB Gigabit Network Connection
37 * 82576 Gigabit Network Connection
38 * 82576 Quad Port Gigabit Mezzanine Adapter
39 */
40
41 #include "igb_api.h"
42
43 static s32 e1000_init_phy_params_82575(struct e1000_hw *hw);
44 static s32 e1000_init_nvm_params_82575(struct e1000_hw *hw);
45 static s32 e1000_init_mac_params_82575(struct e1000_hw *hw);
46 static s32 e1000_acquire_phy_82575(struct e1000_hw *hw);
47 static void e1000_release_phy_82575(struct e1000_hw *hw);
48 static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw);
49 static void e1000_release_nvm_82575(struct e1000_hw *hw);
50 static s32 e1000_check_for_link_82575(struct e1000_hw *hw);
51 static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw);
52 static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
53 u16 *duplex);
54 static s32 e1000_init_hw_82575(struct e1000_hw *hw);
55 static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw);
56 static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
57 u16 *data);
58 static s32 e1000_reset_hw_82575(struct e1000_hw *hw);
59 static s32 e1000_reset_hw_82580(struct e1000_hw *hw);
60 static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset,
61 u16 *data);
62 static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset,
63 u16 data);
64 static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw,
65 bool active);
66 static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw);
67 static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw);
68 static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data);
69 static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw,
70 u32 offset, u16 data);
71 static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw);
72 static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
73 static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
74 u16 *speed, u16 *duplex);
75 static s32 e1000_get_phy_id_82575(struct e1000_hw *hw);
76 static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
77 static bool e1000_sgmii_active_82575(struct e1000_hw *hw);
78 static s32 e1000_reset_init_script_82575(struct e1000_hw *hw);
79 static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw);
80 static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw);
81 static void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw);
82 static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw);
83
84 static s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw,
85 u16 offset);
86 static s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
87 u16 offset);
88 static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw);
89 static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw);
90 static void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value);
91 static void e1000_clear_vfta_i350(struct e1000_hw *hw);
92
93 static const u16 e1000_82580_rxpbs_table[] =
94 {36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140};
95 #define E1000_82580_RXPBS_TABLE_SIZE \
96 (sizeof (e1000_82580_rxpbs_table)/sizeof (u16))
97
98 /*
99 * e1000_init_phy_params_82575 - Init PHY func ptrs.
100 * @hw: pointer to the HW structure
101 */
102 static s32
103 e1000_init_phy_params_82575(struct e1000_hw *hw)
104 {
105 struct e1000_phy_info *phy = &hw->phy;
106 s32 ret_val = E1000_SUCCESS;
107
108 DEBUGFUNC("e1000_init_phy_params_82575");
109
110 if (hw->phy.media_type != e1000_media_type_copper) {
111 phy->type = e1000_phy_none;
112 goto out;
113 }
114
115 phy->ops.power_up = e1000_power_up_phy_copper;
116 phy->ops.power_down = e1000_power_down_phy_copper_82575;
117
118 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
119 phy->reset_delay_us = 100;
120
121 phy->ops.acquire = e1000_acquire_phy_82575;
122 phy->ops.check_reset_block = e1000_check_reset_block_generic;
123 phy->ops.commit = e1000_phy_sw_reset_generic;
124 phy->ops.get_cfg_done = e1000_get_cfg_done_82575;
125 phy->ops.release = e1000_release_phy_82575;
126
127 if (e1000_sgmii_active_82575(hw)) {
128 phy->ops.reset = e1000_phy_hw_reset_sgmii_82575;
129 phy->ops.read_reg = e1000_read_phy_reg_sgmii_82575;
130 phy->ops.write_reg = e1000_write_phy_reg_sgmii_82575;
131 } else if (hw->mac.type == e1000_82580) {
132 phy->ops.reset = e1000_phy_hw_reset_generic;
133 phy->ops.read_reg = e1000_read_phy_reg_82580;
134 phy->ops.write_reg = e1000_write_phy_reg_82580;
135 } else {
136 phy->ops.reset = e1000_phy_hw_reset_generic;
137 phy->ops.read_reg = e1000_read_phy_reg_igp;
138 phy->ops.write_reg = e1000_write_phy_reg_igp;
139 }
140
141 /* Set phy->phy_addr and phy->id. */
142 ret_val = e1000_get_phy_id_82575(hw);
143
144 /* Verify phy id and set remaining function pointers */
145 switch (phy->id) {
146 case M88E1111_I_PHY_ID:
147 phy->type = e1000_phy_m88;
148 phy->ops.check_polarity = e1000_check_polarity_m88;
149 phy->ops.get_info = e1000_get_phy_info_m88;
150 phy->ops.get_cable_length = e1000_get_cable_length_m88;
151 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
152 break;
153 case IGP03E1000_E_PHY_ID:
154 case IGP04E1000_E_PHY_ID:
155 phy->type = e1000_phy_igp_3;
156 phy->ops.check_polarity = e1000_check_polarity_igp;
157 phy->ops.get_info = e1000_get_phy_info_igp;
158 phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
159 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
160 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82575;
161 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic;
162 break;
163 case I82580_I_PHY_ID:
164 case I350_I_PHY_ID:
165 phy->type = e1000_phy_82580;
166 phy->ops.check_polarity = e1000_check_polarity_82577;
167 phy->ops.force_speed_duplex =
168 e1000_phy_force_speed_duplex_82577;
169 phy->ops.get_cable_length = e1000_get_cable_length_82577;
170 phy->ops.get_info = e1000_get_phy_info_82577;
171 break;
172 default:
173 ret_val = -E1000_ERR_PHY;
174 goto out;
175 }
176
177 out:
178 return (ret_val);
179 }
180
181 /*
182 * e1000_init_nvm_params_82575 - Init NVM func ptrs.
183 * @hw: pointer to the HW structure
184 */
185 static s32
186 e1000_init_nvm_params_82575(struct e1000_hw *hw)
187 {
188 struct e1000_nvm_info *nvm = &hw->nvm;
189 u32 eecd = E1000_READ_REG(hw, E1000_EECD);
190 u16 size;
191
192 DEBUGFUNC("e1000_init_nvm_params_82575");
193
194 nvm->opcode_bits = 8;
195 nvm->delay_usec = 1;
196 switch (nvm->override) {
197 case e1000_nvm_override_spi_large:
198 nvm->page_size = 32;
199 nvm->address_bits = 16;
200 break;
201 case e1000_nvm_override_spi_small:
202 nvm->page_size = 8;
203 nvm->address_bits = 8;
204 break;
205 default:
206 nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
207 nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
208 break;
209 }
210
211 nvm->type = e1000_nvm_eeprom_spi;
212
213 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
214 E1000_EECD_SIZE_EX_SHIFT);
215
216 /*
217 * Added to a constant, "size" becomes the left-shift value
218 * for setting word_size.
219 */
220 size += NVM_WORD_SIZE_BASE_SHIFT;
221
222 /* EEPROM access above 16k is unsupported */
223 if (size > 14)
224 size = 14;
225 nvm->word_size = 1 << size;
226
227 /* Function Pointers */
228 nvm->ops.acquire = e1000_acquire_nvm_82575;
229 nvm->ops.read = e1000_read_nvm_eerd;
230 nvm->ops.release = e1000_release_nvm_82575;
231 nvm->ops.update = e1000_update_nvm_checksum_generic;
232 nvm->ops.valid_led_default = e1000_valid_led_default_82575;
233 nvm->ops.validate = e1000_validate_nvm_checksum_generic;
234 nvm->ops.write = e1000_write_nvm_spi;
235
236 /* override genric family function pointers for specific descendants */
237 switch (hw->mac.type) {
238 case e1000_i350:
239 nvm->ops.validate = e1000_validate_nvm_checksum_i350;
240 nvm->ops.update = e1000_update_nvm_checksum_i350;
241 break;
242 default:
243 break;
244 }
245
246
247 return (E1000_SUCCESS);
248 }
249
250 /*
251 * e1000_init_mac_params_82575 - Init MAC func ptrs.
252 * @hw: pointer to the HW structure
253 */
254 static s32
255 e1000_init_mac_params_82575(struct e1000_hw *hw)
256 {
257 struct e1000_mac_info *mac = &hw->mac;
258 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
259 u32 ctrl_ext = 0;
260
261 DEBUGFUNC("e1000_init_mac_params_82575");
262
263 /* Set media type */
264 /*
265 * The 82575 uses bits 22:23 for link mode. The mode can be changed
266 * based on the EEPROM. We cannot rely upon device ID. There
267 * is no distinguishable difference between fiber and internal
268 * SerDes mode on the 82575. There can be an external PHY attached
269 * on the SGMII interface. For this, we'll set sgmii_active to true.
270 */
271 hw->phy.media_type = e1000_media_type_copper;
272 dev_spec->sgmii_active = false;
273
274 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
275 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
276 case E1000_CTRL_EXT_LINK_MODE_SGMII:
277 dev_spec->sgmii_active = true;
278 ctrl_ext |= E1000_CTRL_I2C_ENA;
279 break;
280 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
281 case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
282 hw->phy.media_type = e1000_media_type_internal_serdes;
283 ctrl_ext |= E1000_CTRL_I2C_ENA;
284 break;
285 default:
286 ctrl_ext &= ~E1000_CTRL_I2C_ENA;
287 break;
288 }
289
290 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
291
292 /*
293 * if using i2c make certain the MDICNFG register is cleared to prevent
294 * communications from being misrouted to the mdic registers
295 */
296 if ((ctrl_ext & E1000_CTRL_I2C_ENA) && (hw->mac.type == e1000_82580))
297 E1000_WRITE_REG(hw, E1000_MDICNFG, 0);
298
299 /* Set mta register count */
300 mac->mta_reg_count = 128;
301 /* Set uta register count */
302 mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128;
303 /* Set rar entry count */
304 mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
305 if (mac->type == e1000_82576)
306 mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
307 if (mac->type == e1000_82580)
308 mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
309 if (mac->type == e1000_i350) {
310 mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
311 /* Disable EEE default settings for i350 */
312 dev_spec->eee_disable = B_TRUE;
313 }
314 /* Set if part includes ASF firmware */
315 mac->asf_firmware_present = true;
316 /* Set if manageability features are enabled. */
317 mac->arc_subsystem_valid =
318 (E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK)
319 ? true : false;
320
321 /* Function pointers */
322
323 /* bus type/speed/width */
324 mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic;
325 /* reset */
326 if (mac->type == e1000_82580)
327 mac->ops.reset_hw = e1000_reset_hw_82580;
328 else
329 mac->ops.reset_hw = e1000_reset_hw_82575;
330 /* hw initialization */
331 mac->ops.init_hw = e1000_init_hw_82575;
332 /* link setup */
333 mac->ops.setup_link = e1000_setup_link_generic;
334 /* physical interface link setup */
335 mac->ops.setup_physical_interface =
336 (hw->phy.media_type == e1000_media_type_copper)
337 ? e1000_setup_copper_link_82575
338 : e1000_setup_serdes_link_82575;
339 /* physical interface shutdown */
340 mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575;
341 /* check for link */
342 mac->ops.check_for_link = e1000_check_for_link_82575;
343 /* receive address register setting */
344 mac->ops.rar_set = e1000_rar_set_generic;
345 /* read mac address */
346 mac->ops.read_mac_addr = e1000_read_mac_addr_82575;
347 /* multicast address update */
348 mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
349
350 if (hw->mac.type == e1000_i350) {
351 /* writing VFTA */
352 mac->ops.write_vfta = e1000_write_vfta_i350;
353 /* clearing VFTA */
354 mac->ops.clear_vfta = e1000_clear_vfta_i350;
355 } else {
356 /* writing VFTA */
357 mac->ops.write_vfta = e1000_write_vfta_generic;
358 /* clearing VFTA */
359 mac->ops.clear_vfta = e1000_clear_vfta_generic;
360 }
361 /* setting MTA */
362 mac->ops.mta_set = e1000_mta_set_generic;
363 /* ID LED init */
364 mac->ops.id_led_init = e1000_id_led_init_generic;
365 /* blink LED */
366 mac->ops.blink_led = e1000_blink_led_generic;
367 /* setup LED */
368 mac->ops.setup_led = e1000_setup_led_generic;
369 /* cleanup LED */
370 mac->ops.cleanup_led = e1000_cleanup_led_generic;
371 /* turn on/off LED */
372 mac->ops.led_on = e1000_led_on_generic;
373 mac->ops.led_off = e1000_led_off_generic;
374 /* clear hardware counters */
375 mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82575;
376 /* link info */
377 mac->ops.get_link_up_info = e1000_get_link_up_info_82575;
378
379 /* set lan id for port to determine which phy lock to use */
380 hw->mac.ops.set_lan_id(hw);
381
382 return (E1000_SUCCESS);
383 }
384
385 /*
386 * e1000_init_function_pointers_82575 - Init func ptrs.
387 * @hw: pointer to the HW structure
388 *
389 * Called to initialize all function pointers and parameters.
390 */
391 void
392 e1000_init_function_pointers_82575(struct e1000_hw *hw)
393 {
394 DEBUGFUNC("e1000_init_function_pointers_82575");
395
396 hw->mac.ops.init_params = e1000_init_mac_params_82575;
397 hw->nvm.ops.init_params = e1000_init_nvm_params_82575;
398 hw->phy.ops.init_params = e1000_init_phy_params_82575;
399 }
400
401 /*
402 * e1000_acquire_phy_82575 - Acquire rights to access PHY
403 * @hw: pointer to the HW structure
404 *
405 * Acquire access rights to the correct PHY.
406 */
407 static s32
408 e1000_acquire_phy_82575(struct e1000_hw *hw)
409 {
410 u16 mask = E1000_SWFW_PHY0_SM;
411
412 DEBUGFUNC("e1000_acquire_phy_82575");
413
414 if (hw->bus.func == E1000_FUNC_1)
415 mask = E1000_SWFW_PHY1_SM;
416 else if (hw->bus.func == E1000_FUNC_2)
417 mask = E1000_SWFW_PHY2_SM;
418 else if (hw->bus.func == E1000_FUNC_3)
419 mask = E1000_SWFW_PHY3_SM;
420
421 return (e1000_acquire_swfw_sync_82575(hw, mask));
422 }
423
424 /*
425 * e1000_release_phy_82575 - Release rights to access PHY
426 * @hw: pointer to the HW structure
427 *
428 * A wrapper to release access rights to the correct PHY.
429 */
430 static void
431 e1000_release_phy_82575(struct e1000_hw *hw)
432 {
433 u16 mask = E1000_SWFW_PHY0_SM;
434
435 DEBUGFUNC("e1000_release_phy_82575");
436
437 if (hw->bus.func == E1000_FUNC_1)
438 mask = E1000_SWFW_PHY1_SM;
439 else if (hw->bus.func == E1000_FUNC_2)
440 mask = E1000_SWFW_PHY2_SM;
441 else if (hw->bus.func == E1000_FUNC_3)
442 mask = E1000_SWFW_PHY3_SM;
443
444 e1000_release_swfw_sync_82575(hw, mask);
445 }
446
447 /*
448 * e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
449 * @hw: pointer to the HW structure
450 * @offset: register offset to be read
451 * @data: pointer to the read data
452 *
453 * Reads the PHY register at offset using the serial gigabit media independent
454 * interface and stores the retrieved information in data.
455 */
456 static s32
457 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, u16 *data)
458 {
459 s32 ret_val = -E1000_ERR_PARAM;
460
461 DEBUGFUNC("e1000_read_phy_reg_sgmii_82575");
462
463 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
464 DEBUGOUT1("PHY Address %u is out of range\n", offset);
465 goto out;
466 }
467
468 ret_val = hw->phy.ops.acquire(hw);
469 if (ret_val)
470 goto out;
471
472 ret_val = e1000_read_phy_reg_i2c(hw, offset, data);
473
474 hw->phy.ops.release(hw);
475
476 out:
477 return (ret_val);
478 }
479
480 /*
481 * e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
482 * @hw: pointer to the HW structure
483 * @offset: register offset to write to
484 * @data: data to write at register offset
485 *
486 * Writes the data to PHY register at the offset using the serial gigabit
487 * media independent interface.
488 */
489 static s32
490 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, u16 data)
491 {
492 s32 ret_val = -E1000_ERR_PARAM;
493
494 DEBUGFUNC("e1000_write_phy_reg_sgmii_82575");
495
496 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
497 DEBUGOUT1("PHY Address %d is out of range\n", offset);
498 goto out;
499 }
500
501 ret_val = hw->phy.ops.acquire(hw);
502 if (ret_val)
503 goto out;
504
505 ret_val = e1000_write_phy_reg_i2c(hw, offset, data);
506
507 hw->phy.ops.release(hw);
508
509 out:
510 return (ret_val);
511 }
512
513 /*
514 * e1000_get_phy_id_82575 - Retrieve PHY addr and id
515 * @hw: pointer to the HW structure
516 *
517 * Retrieves the PHY address and ID for both PHY's which do and do not use
518 * sgmi interface.
519 */
520 static s32
521 e1000_get_phy_id_82575(struct e1000_hw *hw)
522 {
523 struct e1000_phy_info *phy = &hw->phy;
524 s32 ret_val = E1000_SUCCESS;
525 u16 phy_id;
526 u32 ctrl_ext;
527
528 DEBUGFUNC("e1000_get_phy_id_82575");
529
530 /*
531 * For SGMII PHYs, we try the list of possible addresses until
532 * we find one that works. For non-SGMII PHYs
533 * (e.g. integrated copper PHYs), an address of 1 should
534 * work. The result of this function should mean phy->phy_addr
535 * and phy->id are set correctly.
536 */
537 if (!e1000_sgmii_active_82575(hw)) {
538 phy->addr = 1;
539 ret_val = e1000_get_phy_id(hw);
540 goto out;
541 }
542
543 /* Power on sgmii phy if it is disabled */
544 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
545 E1000_WRITE_REG(hw, E1000_CTRL_EXT,
546 ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
547 E1000_WRITE_FLUSH(hw);
548 msec_delay(300);
549
550 /*
551 * The address field in the I2CCMD register is 3 bits and 0 is invalid.
552 * Therefore, we need to test 1-7
553 */
554 for (phy->addr = 1; phy->addr < 8; phy->addr++) {
555 ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
556 if (ret_val == E1000_SUCCESS) {
557 DEBUGOUT2("Vendor ID 0x%08X read at address %u\n",
558 phy_id,
559 phy->addr);
560 /*
561 * At the time of this writing, The M88 part is
562 * the only supported SGMII PHY product.
563 */
564 if (phy_id == M88_VENDOR)
565 break;
566 } else {
567 DEBUGOUT1("PHY address %u was unreadable\n",
568 phy->addr);
569 }
570 }
571
572 /* A valid PHY type couldn't be found. */
573 if (phy->addr == 8) {
574 phy->addr = 0;
575 ret_val = -E1000_ERR_PHY;
576 } else {
577 ret_val = e1000_get_phy_id(hw);
578 }
579
580 /* restore previous sfp cage power state */
581 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
582
583 out:
584 return (ret_val);
585 }
586
587 /*
588 * e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset
589 * @hw: pointer to the HW structure
590 *
591 * Resets the PHY using the serial gigabit media independent interface.
592 */
593 static s32
594 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
595 {
596 s32 ret_val = E1000_SUCCESS;
597
598 DEBUGFUNC("e1000_phy_hw_reset_sgmii_82575");
599
600 /*
601 * This isn't a true "hard" reset, but is the only reset
602 * available to us at this time.
603 */
604
605 DEBUGOUT("Soft resetting SGMII attached PHY...\n");
606
607 if (!(hw->phy.ops.write_reg))
608 goto out;
609
610 /*
611 * SFP documentation requires the following to configure the SPF module
612 * to work on SGMII. No further documentation is given.
613 */
614 ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
615 if (ret_val)
616 goto out;
617
618 ret_val = hw->phy.ops.commit(hw);
619
620 out:
621 return (ret_val);
622 }
623
624 /*
625 * e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
626 * @hw: pointer to the HW structure
627 * @active: true to enable LPLU, false to disable
628 *
629 * Sets the LPLU D0 state according to the active flag. When
630 * activating LPLU this function also disables smart speed
631 * and vice versa. LPLU will not be activated unless the
632 * device autonegotiation advertisement meets standards of
633 * either 10 or 10/100 or 10/100/1000 at all duplexes.
634 * This is a function pointer entry point only called by
635 * PHY setup routines.
636 */
637 static s32
638 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
639 {
640 struct e1000_phy_info *phy = &hw->phy;
641 s32 ret_val = E1000_SUCCESS;
642 u16 data;
643
644 DEBUGFUNC("e1000_set_d0_lplu_state_82575");
645
646 if (!(hw->phy.ops.read_reg))
647 goto out;
648
649 ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
650 if (ret_val)
651 goto out;
652
653 if (active) {
654 data |= IGP02E1000_PM_D0_LPLU;
655 ret_val = phy->ops.write_reg(hw,
656 IGP02E1000_PHY_POWER_MGMT,
657 data);
658 if (ret_val)
659 goto out;
660
661 /* When LPLU is enabled, we should disable SmartSpeed */
662 ret_val = phy->ops.read_reg(hw,
663 IGP01E1000_PHY_PORT_CONFIG,
664 &data);
665 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
666 ret_val = phy->ops.write_reg(hw,
667 IGP01E1000_PHY_PORT_CONFIG,
668 data);
669 if (ret_val)
670 goto out;
671 } else {
672 data &= ~IGP02E1000_PM_D0_LPLU;
673 ret_val = phy->ops.write_reg(hw,
674 IGP02E1000_PHY_POWER_MGMT,
675 data);
676 /*
677 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
678 * during Dx states where the power conservation is most
679 * important. During driver activity we should enable
680 * SmartSpeed, so performance is maintained.
681 */
682 if (phy->smart_speed == e1000_smart_speed_on) {
683 ret_val = phy->ops.read_reg(hw,
684 IGP01E1000_PHY_PORT_CONFIG,
685 &data);
686 if (ret_val)
687 goto out;
688
689 data |= IGP01E1000_PSCFR_SMART_SPEED;
690 ret_val = phy->ops.write_reg(hw,
691 IGP01E1000_PHY_PORT_CONFIG,
692 data);
693 if (ret_val)
694 goto out;
695 } else if (phy->smart_speed == e1000_smart_speed_off) {
696 ret_val = phy->ops.read_reg(hw,
697 IGP01E1000_PHY_PORT_CONFIG,
698 &data);
699 if (ret_val)
700 goto out;
701
702 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
703 ret_val = phy->ops.write_reg(hw,
704 IGP01E1000_PHY_PORT_CONFIG,
705 data);
706 if (ret_val)
707 goto out;
708 }
709 }
710
711 out:
712 return (ret_val);
713 }
714
715 /*
716 * e1000_acquire_nvm_82575 - Request for access to EEPROM
717 * @hw: pointer to the HW structure
718 *
719 * Acquire the necessary semaphores for exclusive access to the EEPROM.
720 * Set the EEPROM access request bit and wait for EEPROM access grant bit.
721 * Return successful if access grant bit set, else clear the request for
722 * EEPROM access and return -E1000_ERR_NVM (-1).
723 */
724 static s32
725 e1000_acquire_nvm_82575(struct e1000_hw *hw)
726 {
727 s32 ret_val;
728
729 DEBUGFUNC("e1000_acquire_nvm_82575");
730
731 ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
732 if (ret_val)
733 goto out;
734
735 /*
736 * Check if there is some access
737 * error this access may hook on
738 */
739 if (hw->mac.type == e1000_i350) {
740 u32 eecd = E1000_READ_REG(hw, E1000_EECD);
741 if (eecd & (E1000_EECD_BLOCKED | E1000_EECD_ABORT |
742 E1000_EECD_TIMEOUT)) {
743 /* Clear all access error flags */
744 E1000_WRITE_REG(hw, E1000_EECD, eecd |
745 E1000_EECD_ERROR_CLR);
746 DEBUGOUT("Nvm bit banging access error "
747 "detected and cleared.\n");
748 }
749 }
750
751 ret_val = e1000_acquire_nvm_generic(hw);
752
753 if (ret_val)
754 e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
755
756 out:
757 return (ret_val);
758 }
759
760 /*
761 * e1000_release_nvm_82575 - Release exclusive access to EEPROM
762 * @hw: pointer to the HW structure
763 *
764 * Stop any current commands to the EEPROM and clear the EEPROM request bit,
765 * then release the semaphores acquired.
766 */
767 static void
768 e1000_release_nvm_82575(struct e1000_hw *hw)
769 {
770 DEBUGFUNC("e1000_release_nvm_82575");
771
772 e1000_release_nvm_generic(hw);
773 e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
774 }
775
776 /*
777 * e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
778 * @hw: pointer to the HW structure
779 * @mask: specifies which semaphore to acquire
780 *
781 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
782 * will also specify which port we're acquiring the lock for.
783 */
784 static s32
785 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
786 {
787 u32 swfw_sync;
788 u32 swmask = mask;
789 u32 fwmask = mask << 16;
790 s32 ret_val = E1000_SUCCESS;
791 s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
792
793 DEBUGFUNC("e1000_acquire_swfw_sync_82575");
794
795 while (i < timeout) {
796 if (e1000_get_hw_semaphore_generic(hw)) {
797 ret_val = -E1000_ERR_SWFW_SYNC;
798 goto out;
799 }
800
801 swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
802 if (!(swfw_sync & (fwmask | swmask)))
803 break;
804
805 /*
806 * Firmware currently using resource (fwmask)
807 * or other software thread using resource (swmask)
808 */
809 e1000_put_hw_semaphore_generic(hw);
810 msec_delay_irq(5);
811 i++;
812 }
813
814 if (i == timeout) {
815 DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
816 ret_val = -E1000_ERR_SWFW_SYNC;
817 goto out;
818 }
819
820 swfw_sync |= swmask;
821 E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
822
823 e1000_put_hw_semaphore_generic(hw);
824
825 out:
826 return (ret_val);
827 }
828
829 /*
830 * e1000_release_swfw_sync_82575 - Release SW/FW semaphore
831 * @hw: pointer to the HW structure
832 * @mask: specifies which semaphore to acquire
833 *
834 * Release the SW/FW semaphore used to access the PHY or NVM. The mask
835 * will also specify which port we're releasing the lock for.
836 */
837 static void
838 e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
839 {
840 u32 swfw_sync;
841
842 DEBUGFUNC("e1000_release_swfw_sync_82575");
843
844 while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS) {
845 /* Empty */
846 }
847
848 swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
849 swfw_sync &= ~mask;
850 E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
851
852 e1000_put_hw_semaphore_generic(hw);
853 }
854
855 /*
856 * e1000_get_cfg_done_82575 - Read config done bit
857 * @hw: pointer to the HW structure
858 *
859 * Read the management control register for the config done bit for
860 * completion status. NOTE: silicon which is EEPROM-less will fail trying
861 * to read the config done bit, so an error is *ONLY* logged and returns
862 * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon
863 * would not be able to be reset or change link.
864 */
865 static s32
866 e1000_get_cfg_done_82575(struct e1000_hw *hw)
867 {
868 s32 timeout = PHY_CFG_TIMEOUT;
869 s32 ret_val = E1000_SUCCESS;
870 u32 mask = E1000_NVM_CFG_DONE_PORT_0;
871
872 DEBUGFUNC("e1000_get_cfg_done_82575");
873
874 if (hw->bus.func == E1000_FUNC_1)
875 mask = E1000_NVM_CFG_DONE_PORT_1;
876 else if (hw->bus.func == E1000_FUNC_2)
877 mask = E1000_NVM_CFG_DONE_PORT_2;
878 else if (hw->bus.func == E1000_FUNC_3)
879 mask = E1000_NVM_CFG_DONE_PORT_3;
880
881 while (timeout) {
882 if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask)
883 break;
884 msec_delay(1);
885 timeout--;
886 }
887 if (!timeout)
888 DEBUGOUT("MNG configuration cycle has not completed.\n");
889
890 /* If EEPROM is not marked present, init the PHY manually */
891 if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) &&
892 (hw->phy.type == e1000_phy_igp_3))
893 (void) e1000_phy_init_script_igp3(hw);
894
895 return (ret_val);
896 }
897
898 /*
899 * e1000_get_link_up_info_82575 - Get link speed/duplex info
900 * @hw: pointer to the HW structure
901 * @speed: stores the current speed
902 * @duplex: stores the current duplex
903 *
904 * This is a wrapper function, if using the serial gigabit media independent
905 * interface, use PCS to retrieve the link speed and duplex information.
906 * Otherwise, use the generic function to get the link speed and duplex info.
907 */
908 static s32
909 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, u16 *duplex)
910 {
911 s32 ret_val;
912
913 DEBUGFUNC("e1000_get_link_up_info_82575");
914
915 if (hw->phy.media_type != e1000_media_type_copper)
916 ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed,
917 duplex);
918 else
919 ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed,
920 duplex);
921
922 return (ret_val);
923 }
924
925 /*
926 * e1000_check_for_link_82575 - Check for link
927 * @hw: pointer to the HW structure
928 *
929 * If sgmii is enabled, then use the pcs register to determine link, otherwise
930 * use the generic interface for determining link.
931 */
932 static s32
933 e1000_check_for_link_82575(struct e1000_hw *hw)
934 {
935 s32 ret_val;
936 u16 speed, duplex;
937
938 DEBUGFUNC("e1000_check_for_link_82575");
939
940 /* SGMII link check is done through the PCS register. */
941 if (hw->phy.media_type != e1000_media_type_copper) {
942 ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed,
943 &duplex);
944 /*
945 * Use this flag to determine if link needs to be checked or
946 * not. If we have link clear the flag so that we do not
947 * continue to check for link.
948 */
949 hw->mac.get_link_status = !hw->mac.serdes_has_link;
950 } else {
951 ret_val = e1000_check_for_copper_link_generic(hw);
952 }
953
954 return (ret_val);
955 }
956
957 /*
958 * e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
959 * @hw: pointer to the HW structure
960 * @speed: stores the current speed
961 * @duplex: stores the current duplex
962 *
963 * Using the physical coding sub-layer (PCS), retrieve the current speed and
964 * duplex, then store the values in the pointers provided.
965 */
966 static s32
967 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
968 u16 *speed, u16 *duplex)
969 {
970 struct e1000_mac_info *mac = &hw->mac;
971 u32 pcs;
972
973 DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575");
974
975 /* Set up defaults for the return values of this function */
976 mac->serdes_has_link = false;
977 *speed = 0;
978 *duplex = 0;
979
980 /*
981 * Read the PCS Status register for link state. For non-copper mode,
982 * the status register is not accurate. The PCS status register is
983 * used instead.
984 */
985 pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT);
986
987 /*
988 * The link up bit determines when link is up on autoneg. The sync ok
989 * gets set once both sides sync up and agree upon link. Stable link
990 * can be determined by checking for both link up and link sync ok
991 */
992 if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) {
993 mac->serdes_has_link = true;
994
995 /* Detect and store PCS speed */
996 if (pcs & E1000_PCS_LSTS_SPEED_1000) {
997 *speed = SPEED_1000;
998 } else if (pcs & E1000_PCS_LSTS_SPEED_100) {
999 *speed = SPEED_100;
1000 } else {
1001 *speed = SPEED_10;
1002 }
1003
1004 /* Detect and store PCS duplex */
1005 if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) {
1006 *duplex = FULL_DUPLEX;
1007 } else {
1008 *duplex = HALF_DUPLEX;
1009 }
1010 }
1011
1012 return (E1000_SUCCESS);
1013 }
1014
1015 /*
1016 * e1000_shutdown_serdes_link_82575 - Remove link during power down
1017 * @hw: pointer to the HW structure
1018 *
1019 * In the case of serdes shut down sfp and PCS on driver unload
1020 * when management pass thru is not enabled.
1021 */
1022 void
1023 e1000_shutdown_serdes_link_82575(struct e1000_hw *hw)
1024 {
1025 u32 reg;
1026 u16 eeprom_data = 0;
1027
1028 if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1029 !e1000_sgmii_active_82575(hw))
1030 return;
1031
1032 if (hw->bus.func == E1000_FUNC_0)
1033 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1034 else if (hw->mac.type == e1000_82580)
1035 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
1036 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
1037 &eeprom_data);
1038 else if (hw->bus.func == E1000_FUNC_1)
1039 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1040
1041 /*
1042 * If APM is not enabled in the EEPROM and management interface is
1043 * not enabled, then power down.
1044 */
1045 if (!(eeprom_data & E1000_NVM_APME_82575) &&
1046 !e1000_enable_mng_pass_thru(hw)) {
1047 /* Disable PCS to turn off link */
1048 reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
1049 reg &= ~E1000_PCS_CFG_PCS_EN;
1050 E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
1051
1052 /* shutdown the laser */
1053 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1054 reg |= E1000_CTRL_EXT_SDP3_DATA;
1055 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1056
1057 /* flush the write to verify completion */
1058 E1000_WRITE_FLUSH(hw);
1059 msec_delay(1);
1060 }
1061 }
1062
1063 /*
1064 * e1000_reset_hw_82575 - Reset hardware
1065 * @hw: pointer to the HW structure
1066 *
1067 * This resets the hardware into a known state.
1068 */
1069 static s32
1070 e1000_reset_hw_82575(struct e1000_hw *hw)
1071 {
1072 u32 ctrl;
1073 s32 ret_val;
1074
1075 DEBUGFUNC("e1000_reset_hw_82575");
1076
1077 /*
1078 * Prevent the PCI-E bus from sticking if there is no TLP connection
1079 * on the last TLP read/write transaction when MAC is reset.
1080 */
1081 ret_val = e1000_disable_pcie_master_generic(hw);
1082 if (ret_val) {
1083 DEBUGOUT("PCI-E Master disable polling has failed.\n");
1084 }
1085
1086 /* set the completion timeout for interface */
1087 ret_val = e1000_set_pcie_completion_timeout(hw);
1088 if (ret_val) {
1089 DEBUGOUT("PCI-E Set completion timeout has failed.\n");
1090 }
1091
1092 DEBUGOUT("Masking off all interrupts\n");
1093 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
1094
1095 E1000_WRITE_REG(hw, E1000_RCTL, 0);
1096 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
1097 E1000_WRITE_FLUSH(hw);
1098
1099 msec_delay(10);
1100
1101 ctrl = E1000_READ_REG(hw, E1000_CTRL);
1102
1103 DEBUGOUT("Issuing a global reset to MAC\n");
1104 E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
1105
1106 ret_val = e1000_get_auto_rd_done_generic(hw);
1107 if (ret_val) {
1108 /*
1109 * When auto config read does not complete, do not
1110 * return with an error. This can happen in situations
1111 * where there is no eeprom and prevents getting link.
1112 */
1113 DEBUGOUT("Auto Read Done did not complete\n");
1114 }
1115
1116 /* If EEPROM is not present, run manual init scripts */
1117 if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0)
1118 (void) e1000_reset_init_script_82575(hw);
1119
1120 /* Clear any pending interrupt events. */
1121 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
1122 (void) E1000_READ_REG(hw, E1000_ICR);
1123
1124 /* Install any alternate MAC address into RAR0 */
1125 ret_val = e1000_check_alt_mac_addr_generic(hw);
1126
1127 return (ret_val);
1128 }
1129
1130 /*
1131 * e1000_init_hw_82575 - Initialize hardware
1132 * @hw: pointer to the HW structure
1133 *
1134 * This inits the hardware readying it for operation.
1135 */
1136 static s32
1137 e1000_init_hw_82575(struct e1000_hw *hw)
1138 {
1139 struct e1000_mac_info *mac = &hw->mac;
1140 s32 ret_val;
1141 u16 i, rar_count = mac->rar_entry_count;
1142
1143 DEBUGFUNC("e1000_init_hw_82575");
1144
1145 /* Initialize identification LED */
1146 ret_val = mac->ops.id_led_init(hw);
1147 if (ret_val) {
1148 DEBUGOUT("Error initializing identification LED\n");
1149 /* This is not fatal and we should not stop init due to this */
1150 }
1151
1152 /* Disabling VLAN filtering */
1153 DEBUGOUT("Initializing the IEEE VLAN\n");
1154 mac->ops.clear_vfta(hw);
1155
1156 /* Setup the receive address */
1157 e1000_init_rx_addrs_generic(hw, rar_count);
1158 /* Zero out the Multicast HASH table */
1159 DEBUGOUT("Zeroing the MTA\n");
1160 for (i = 0; i < mac->mta_reg_count; i++)
1161 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
1162
1163 /* Zero out the Unicast HASH table */
1164 DEBUGOUT("Zeroing the UTA\n");
1165 for (i = 0; i < mac->uta_reg_count; i++)
1166 E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0);
1167
1168 /* Setup link and flow control */
1169 ret_val = mac->ops.setup_link(hw);
1170
1171 /*
1172 * Clear all of the statistics registers (clear on read). It is
1173 * important that we do this after we have tried to establish link
1174 * because the symbol error count will increment wildly if there
1175 * is no link.
1176 */
1177 e1000_clear_hw_cntrs_82575(hw);
1178
1179 return (ret_val);
1180 }
1181
1182 /*
1183 * e1000_setup_copper_link_82575 - Configure copper link settings
1184 * @hw: pointer to the HW structure
1185 *
1186 * Configures the link for auto-neg or forced speed and duplex. Then we check
1187 * for link, once link is established calls to configure collision distance
1188 * and flow control are called.
1189 */
1190 static s32
1191 e1000_setup_copper_link_82575(struct e1000_hw *hw)
1192 {
1193 u32 ctrl;
1194 s32 ret_val;
1195
1196 DEBUGFUNC("e1000_setup_copper_link_82575");
1197
1198 ctrl = E1000_READ_REG(hw, E1000_CTRL);
1199 ctrl |= E1000_CTRL_SLU;
1200 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1201 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
1202
1203 ret_val = e1000_setup_serdes_link_82575(hw);
1204 if (ret_val)
1205 goto out;
1206
1207 if (e1000_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
1208 /* allow time for SFP cage time to power up phy */
1209 msec_delay(300);
1210
1211 ret_val = hw->phy.ops.reset(hw);
1212 if (ret_val) {
1213 DEBUGOUT("Error resetting the PHY.\n");
1214 goto out;
1215 }
1216 }
1217 switch (hw->phy.type) {
1218 case e1000_phy_m88:
1219 ret_val = e1000_copper_link_setup_m88(hw);
1220 break;
1221 case e1000_phy_igp_3:
1222 ret_val = e1000_copper_link_setup_igp(hw);
1223 break;
1224 case e1000_phy_82580:
1225 ret_val = e1000_copper_link_setup_82577(hw);
1226 break;
1227 default:
1228 ret_val = -E1000_ERR_PHY;
1229 break;
1230 }
1231
1232 if (ret_val)
1233 goto out;
1234
1235 ret_val = e1000_setup_copper_link_generic(hw);
1236 out:
1237 return (ret_val);
1238 }
1239
1240 /*
1241 * e1000_setup_serdes_link_82575 - Setup link for serdes
1242 * @hw: pointer to the HW structure
1243 *
1244 * Configure the physical coding sub-layer (PCS) link. The PCS link is
1245 * used on copper connections where the serialized gigabit media independent
1246 * interface (sgmii), or serdes fiber is being used. Configures the link
1247 * for auto-negotiation or forces speed/duplex.
1248 */
1249 static s32
1250 e1000_setup_serdes_link_82575(struct e1000_hw *hw)
1251 {
1252 u32 ctrl_ext, ctrl_reg, reg;
1253 bool pcs_autoneg;
1254
1255 DEBUGFUNC("e1000_setup_serdes_link_82575");
1256
1257 if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1258 !e1000_sgmii_active_82575(hw))
1259 return (E1000_SUCCESS);
1260
1261 /*
1262 * On the 82575, SerDes loopback mode persists until it is
1263 * explicitly turned off or a power cycle is performed. A read to
1264 * the register does not indicate its status. Therefore, we ensure
1265 * loopback mode is disabled during initialization.
1266 */
1267 E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
1268
1269 /* power on the sfp cage if present */
1270 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1271 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
1272 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
1273
1274 ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
1275 ctrl_reg |= E1000_CTRL_SLU;
1276
1277 if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) {
1278 /* set both sw defined pins */
1279 ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
1280
1281 /* Set switch control to serdes energy detect */
1282 reg = E1000_READ_REG(hw, E1000_CONNSW);
1283 reg |= E1000_CONNSW_ENRGSRC;
1284 E1000_WRITE_REG(hw, E1000_CONNSW, reg);
1285 }
1286
1287 reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
1288
1289 /* default pcs_autoneg to the same setting as mac autoneg */
1290 pcs_autoneg = hw->mac.autoneg;
1291
1292 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
1293 case E1000_CTRL_EXT_LINK_MODE_SGMII:
1294 /* sgmii mode lets the phy handle forcing speed/duplex */
1295 pcs_autoneg = true;
1296 /* autoneg time out should be disabled for SGMII mode */
1297 reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
1298 break;
1299 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
1300 /* disable PCS autoneg and support parallel detect only */
1301 pcs_autoneg = false;
1302 default:
1303 /*
1304 * non-SGMII modes only supports a speed of 1000/Full for the
1305 * link so it is best to just force the MAC and let the pcs
1306 * link either autoneg or be forced to 1000/Full
1307 */
1308 ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
1309 E1000_CTRL_FD | E1000_CTRL_FRCDPX;
1310
1311 /* set speed of 1000/Full if speed/duplex is forced */
1312 reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
1313 break;
1314 }
1315
1316 E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
1317
1318 /*
1319 * New SerDes mode allows for forcing speed or autonegotiating speed
1320 * at 1gb. Autoneg should be default set by most drivers. This is the
1321 * mode that will be compatible with older link partners and switches.
1322 * However, both are supported by the hardware and some drivers/tools.
1323 */
1324
1325 reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
1326 E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
1327
1328 /*
1329 * We force flow control to prevent the CTRL register values from being
1330 * overwritten by the autonegotiated flow control values
1331 */
1332 reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1333
1334 if (pcs_autoneg) {
1335 /* Set PCS register for autoneg */
1336 reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
1337 E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
1338 DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
1339 } else {
1340 /* Set PCS register for forced link */
1341 reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
1342 DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
1343 }
1344
1345 E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
1346
1347 if (!e1000_sgmii_active_82575(hw))
1348 (void) e1000_force_mac_fc_generic(hw);
1349
1350 return (E1000_SUCCESS);
1351 }
1352
1353 /*
1354 * e1000_valid_led_default_82575 - Verify a valid default LED config
1355 * @hw: pointer to the HW structure
1356 * @data: pointer to the NVM (EEPROM)
1357 *
1358 * Read the EEPROM for the current default LED configuration. If the
1359 * LED configuration is not valid, set to a valid LED configuration.
1360 */
1361 static s32
1362 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data)
1363 {
1364 s32 ret_val;
1365
1366 DEBUGFUNC("e1000_valid_led_default_82575");
1367
1368 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
1369 if (ret_val) {
1370 DEBUGOUT("NVM Read Error\n");
1371 goto out;
1372 }
1373
1374 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
1375 switch (hw->phy.media_type) {
1376 case e1000_media_type_internal_serdes:
1377 *data = ID_LED_DEFAULT_82575_SERDES;
1378 break;
1379 case e1000_media_type_copper:
1380 default:
1381 *data = ID_LED_DEFAULT;
1382 break;
1383 }
1384 }
1385 out:
1386 return (ret_val);
1387 }
1388
1389 /*
1390 * e1000_sgmii_active_82575 - Return sgmii state
1391 * @hw: pointer to the HW structure
1392 *
1393 * 82575 silicon has a serialized gigabit media independent interface (sgmii)
1394 * which can be enabled for use in the embedded applications. Simply
1395 * return the current state of the sgmii interface.
1396 */
1397 static bool
1398 e1000_sgmii_active_82575(struct e1000_hw *hw)
1399 {
1400 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
1401 return (dev_spec->sgmii_active);
1402 }
1403
1404 /*
1405 * e1000_reset_init_script_82575 - Inits HW defaults after reset
1406 * @hw: pointer to the HW structure
1407 *
1408 * Inits recommended HW defaults after a reset when there is no EEPROM
1409 * detected. This is only for the 82575.
1410 */
1411 static s32
1412 e1000_reset_init_script_82575(struct e1000_hw *hw)
1413 {
1414 DEBUGFUNC("e1000_reset_init_script_82575");
1415
1416 if (hw->mac.type == e1000_82575) {
1417 DEBUGOUT("Running reset init script for 82575\n");
1418 /* SerDes configuration via SERDESCTRL */
1419 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL,
1420 0x00, 0x0C);
1421 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL,
1422 0x01, 0x78);
1423 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL,
1424 0x1B, 0x23);
1425 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL,
1426 0x23, 0x15);
1427
1428 /* CCM configuration via CCMCTL register */
1429 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL,
1430 0x14, 0x00);
1431 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL,
1432 0x10, 0x00);
1433
1434 /* PCIe lanes configuration */
1435 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL,
1436 0x00, 0xEC);
1437 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL,
1438 0x61, 0xDF);
1439 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL,
1440 0x34, 0x05);
1441 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL,
1442 0x2F, 0x81);
1443
1444 /* PCIe PLL Configuration */
1445 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL,
1446 0x02, 0x47);
1447 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL,
1448 0x14, 0x00);
1449 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL,
1450 0x10, 0x00);
1451 }
1452
1453 return (E1000_SUCCESS);
1454 }
1455
1456 /*
1457 * e1000_read_mac_addr_82575 - Read device MAC address
1458 * @hw: pointer to the HW structure
1459 */
1460 static s32
1461 e1000_read_mac_addr_82575(struct e1000_hw *hw)
1462 {
1463 s32 ret_val = E1000_SUCCESS;
1464
1465 DEBUGFUNC("e1000_read_mac_addr_82575");
1466
1467 /*
1468 * If there's an alternate MAC address place it in RAR0
1469 * so that it will override the Si installed default perm
1470 * address.
1471 */
1472 ret_val = e1000_check_alt_mac_addr_generic(hw);
1473 if (ret_val)
1474 goto out;
1475
1476 ret_val = e1000_read_mac_addr_generic(hw);
1477
1478 out:
1479 return (ret_val);
1480 }
1481
1482 /*
1483 * e1000_power_down_phy_copper_82575 - Remove link during PHY power down
1484 * @hw: pointer to the HW structure
1485 *
1486 * In the case of a PHY power down to save power, or to turn off link during a
1487 * driver unload, or wake on lan is not enabled, remove the link.
1488 */
1489 static void
1490 e1000_power_down_phy_copper_82575(struct e1000_hw *hw)
1491 {
1492 struct e1000_phy_info *phy = &hw->phy;
1493 struct e1000_mac_info *mac = &hw->mac;
1494
1495 if (!(phy->ops.check_reset_block))
1496 return;
1497
1498 /* If the management interface is not enabled, then power down */
1499 if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw)))
1500 e1000_power_down_phy_copper(hw);
1501 }
1502
1503 /*
1504 * e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters
1505 * @hw: pointer to the HW structure
1506 *
1507 * Clears the hardware counters by reading the counter registers.
1508 */
1509 static void
1510 e1000_clear_hw_cntrs_82575(struct e1000_hw *hw)
1511 {
1512 DEBUGFUNC("e1000_clear_hw_cntrs_82575");
1513
1514 e1000_clear_hw_cntrs_base_generic(hw);
1515
1516 (void) E1000_READ_REG(hw, E1000_PRC64);
1517 (void) E1000_READ_REG(hw, E1000_PRC127);
1518 (void) E1000_READ_REG(hw, E1000_PRC255);
1519 (void) E1000_READ_REG(hw, E1000_PRC511);
1520 (void) E1000_READ_REG(hw, E1000_PRC1023);
1521 (void) E1000_READ_REG(hw, E1000_PRC1522);
1522 (void) E1000_READ_REG(hw, E1000_PTC64);
1523 (void) E1000_READ_REG(hw, E1000_PTC127);
1524 (void) E1000_READ_REG(hw, E1000_PTC255);
1525 (void) E1000_READ_REG(hw, E1000_PTC511);
1526 (void) E1000_READ_REG(hw, E1000_PTC1023);
1527 (void) E1000_READ_REG(hw, E1000_PTC1522);
1528
1529 (void) E1000_READ_REG(hw, E1000_ALGNERRC);
1530 (void) E1000_READ_REG(hw, E1000_RXERRC);
1531 (void) E1000_READ_REG(hw, E1000_TNCRS);
1532 (void) E1000_READ_REG(hw, E1000_CEXTERR);
1533 (void) E1000_READ_REG(hw, E1000_TSCTC);
1534 (void) E1000_READ_REG(hw, E1000_TSCTFC);
1535
1536 (void) E1000_READ_REG(hw, E1000_MGTPRC);
1537 (void) E1000_READ_REG(hw, E1000_MGTPDC);
1538 (void) E1000_READ_REG(hw, E1000_MGTPTC);
1539
1540 (void) E1000_READ_REG(hw, E1000_IAC);
1541 (void) E1000_READ_REG(hw, E1000_ICRXOC);
1542
1543 (void) E1000_READ_REG(hw, E1000_ICRXPTC);
1544 (void) E1000_READ_REG(hw, E1000_ICRXATC);
1545 (void) E1000_READ_REG(hw, E1000_ICTXPTC);
1546 (void) E1000_READ_REG(hw, E1000_ICTXATC);
1547 (void) E1000_READ_REG(hw, E1000_ICTXQEC);
1548 (void) E1000_READ_REG(hw, E1000_ICTXQMTC);
1549 (void) E1000_READ_REG(hw, E1000_ICRXDMTC);
1550
1551 (void) E1000_READ_REG(hw, E1000_CBTMPC);
1552 (void) E1000_READ_REG(hw, E1000_HTDPMC);
1553 (void) E1000_READ_REG(hw, E1000_CBRMPC);
1554 (void) E1000_READ_REG(hw, E1000_RPTHC);
1555 (void) E1000_READ_REG(hw, E1000_HGPTC);
1556 (void) E1000_READ_REG(hw, E1000_HTCBDPC);
1557 (void) E1000_READ_REG(hw, E1000_HGORCL);
1558 (void) E1000_READ_REG(hw, E1000_HGORCH);
1559 (void) E1000_READ_REG(hw, E1000_HGOTCL);
1560 (void) E1000_READ_REG(hw, E1000_HGOTCH);
1561 (void) E1000_READ_REG(hw, E1000_LENERRS);
1562
1563 /* This register should not be read in copper configurations */
1564 if ((hw->phy.media_type == e1000_media_type_internal_serdes) ||
1565 e1000_sgmii_active_82575(hw))
1566 (void) E1000_READ_REG(hw, E1000_SCVPC);
1567 }
1568
1569 /*
1570 * e1000_rx_fifo_flush_82575 - Clean rx fifo after RX enable
1571 * @hw: pointer to the HW structure
1572 *
1573 * After rx enable if managability is enabled then there is likely some
1574 * bad data at the start of the fifo and possibly in the DMA fifo. This
1575 * function clears the fifos and flushes any packets that came in as rx was
1576 * being enabled.
1577 */
1578 void
1579 e1000_rx_fifo_flush_82575(struct e1000_hw *hw)
1580 {
1581 u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
1582 int i, ms_wait;
1583
1584 DEBUGFUNC("e1000_rx_fifo_workaround_82575");
1585 if (hw->mac.type != e1000_82575 ||
1586 !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN))
1587 return;
1588
1589 /* Disable all RX queues */
1590 for (i = 0; i < 4; i++) {
1591 rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
1592 E1000_WRITE_REG(hw, E1000_RXDCTL(i),
1593 rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
1594 }
1595 /* Poll all queues to verify they have shut down */
1596 for (ms_wait = 0; ms_wait < 10; ms_wait++) {
1597 msec_delay(1);
1598 rx_enabled = 0;
1599 for (i = 0; i < 4; i++)
1600 rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i));
1601 if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
1602 break;
1603 }
1604
1605 if (ms_wait == 10)
1606 DEBUGOUT("Queue disable timed out after 10ms\n");
1607
1608 /*
1609 * Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
1610 * incoming packets are rejected. Set enable and wait 2ms so that
1611 * any packet that was coming in as RCTL.EN was set is flushed
1612 */
1613 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
1614 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
1615
1616 rlpml = E1000_READ_REG(hw, E1000_RLPML);
1617 E1000_WRITE_REG(hw, E1000_RLPML, 0);
1618
1619 rctl = E1000_READ_REG(hw, E1000_RCTL);
1620 temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
1621 temp_rctl |= E1000_RCTL_LPE;
1622
1623 E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl);
1624 E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN);
1625 E1000_WRITE_FLUSH(hw);
1626 msec_delay(2);
1627
1628 /*
1629 * Enable RX queues that were previously enabled and restore our
1630 * previous state
1631 */
1632 for (i = 0; i < 4; i++)
1633 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]);
1634 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1635 E1000_WRITE_FLUSH(hw);
1636
1637 E1000_WRITE_REG(hw, E1000_RLPML, rlpml);
1638 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
1639
1640 /* Flush receive errors generated by workaround */
1641 (void) E1000_READ_REG(hw, E1000_ROC);
1642 (void) E1000_READ_REG(hw, E1000_RNBC);
1643 (void) E1000_READ_REG(hw, E1000_MPC);
1644 }
1645
1646 /*
1647 * e1000_set_pcie_completion_timeout - set pci-e completion timeout
1648 * @hw: pointer to the HW structure
1649 *
1650 * The defaults for 82575 and 82576 should be in the range of 50us to 50ms,
1651 * however the hardware default for these parts is 500us to 1ms which is less
1652 * than the 10ms recommended by the pci-e spec. To address this we need to
1653 * increase the value to either 10ms to 200ms for capability version 1 config,
1654 * or 16ms to 55ms for version 2.
1655 */
1656 static s32
1657 e1000_set_pcie_completion_timeout(struct e1000_hw *hw)
1658 {
1659 u32 gcr = E1000_READ_REG(hw, E1000_GCR);
1660 s32 ret_val = E1000_SUCCESS;
1661 u16 pcie_devctl2;
1662
1663 /* only take action if timeout value is defaulted to 0 */
1664 if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
1665 goto out;
1666
1667 /*
1668 * if capababilities version is type 1 we can write the
1669 * timeout of 10ms to 200ms through the GCR register
1670 */
1671 if (!(gcr & E1000_GCR_CAP_VER2)) {
1672 gcr |= E1000_GCR_CMPL_TMOUT_10ms;
1673 goto out;
1674 }
1675
1676 /*
1677 * for version 2 capabilities we need to write the config space
1678 * directly in order to set the completion timeout value for
1679 * 16ms to 55ms
1680 */
1681 ret_val = e1000_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
1682 &pcie_devctl2);
1683 if (ret_val)
1684 goto out;
1685
1686 pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
1687
1688 ret_val = e1000_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
1689 &pcie_devctl2);
1690 out:
1691 /* disable completion timeout resend */
1692 gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
1693
1694 E1000_WRITE_REG(hw, E1000_GCR, gcr);
1695 return (ret_val);
1696 }
1697
1698 /*
1699 * e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback
1700 * @hw: pointer to the hardware struct
1701 * @enable: state to enter, either enabled or disabled
1702 *
1703 * enables/disables L2 switch loopback functionality.
1704 */
1705 void
1706 e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
1707 {
1708 u32 dtxswc = E1000_READ_REG(hw, E1000_DTXSWC);
1709
1710 if (enable)
1711 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1712 else
1713 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1714
1715 E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc);
1716 }
1717
1718 /*
1719 * e1000_vmdq_set_replication_pf - enable or disable vmdq replication
1720 * @hw: pointer to the hardware struct
1721 * @enable: state to enter, either enabled or disabled
1722 *
1723 * enables/disables replication of packets across multiple pools.
1724 */
1725 void
1726 e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
1727 {
1728 u32 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1729
1730 if (enable)
1731 vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
1732 else
1733 vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
1734
1735 E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1736 }
1737
1738 /*
1739 * e1000_read_phy_reg_82580 - Read 82580 MDI control register
1740 * @hw: pointer to the HW structure
1741 * @offset: register offset to be read
1742 * @data: pointer to the read data
1743 *
1744 * Reads the MDI control register in the PHY at offset and stores the
1745 * information read to data.
1746 */
1747 static s32
1748 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
1749 {
1750 u32 mdicnfg = 0;
1751 s32 ret_val;
1752
1753 DEBUGFUNC("e1000_read_phy_reg_82580");
1754
1755 ret_val = hw->phy.ops.acquire(hw);
1756 if (ret_val)
1757 goto out;
1758
1759 /*
1760 * We config the phy address in MDICNFG register now. Same bits
1761 * as before. The values in MDIC can be written but will be
1762 * ignored. This allows us to call the old function after
1763 * configuring the PHY address in the new register
1764 */
1765 mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT);
1766 E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
1767
1768 ret_val = e1000_read_phy_reg_mdic(hw, offset, data);
1769
1770 hw->phy.ops.release(hw);
1771
1772 out:
1773 return (ret_val);
1774 }
1775
1776 /*
1777 * e1000_write_phy_reg_82580 - Write 82580 MDI control register
1778 * @hw: pointer to the HW structure
1779 * @offset: register offset to write to
1780 * @data: data to write to register at offset
1781 *
1782 * Writes data to MDI control register in the PHY at offset.
1783 */
1784 static s32
1785 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
1786 {
1787 u32 mdicnfg = 0;
1788 s32 ret_val;
1789
1790 DEBUGFUNC("e1000_write_phy_reg_82580");
1791
1792 ret_val = hw->phy.ops.acquire(hw);
1793 if (ret_val)
1794 goto out;
1795
1796 /*
1797 * We config the phy address in MDICNFG register now. Same bits
1798 * as before. The values in MDIC can be written but will be
1799 * ignored. This allows us to call the old function after
1800 * configuring the PHY address in the new register
1801 */
1802 mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT);
1803 E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
1804
1805 ret_val = e1000_write_phy_reg_mdic(hw, offset, data);
1806
1807 hw->phy.ops.release(hw);
1808
1809 out:
1810 return (ret_val);
1811 }
1812
1813 /*
1814 * e1000_reset_hw_82580 - Reset hardware
1815 * @hw: pointer to the HW structure
1816 *
1817 * This resets function or entire device (all ports, etc.)
1818 * to a known state.
1819 */
1820 static s32
1821 e1000_reset_hw_82580(struct e1000_hw *hw)
1822 {
1823 s32 ret_val = E1000_SUCCESS;
1824 /* BH SW mailbox bit in SW_FW_SYNC */
1825 u16 swmbsw_mask = E1000_SW_SYNCH_MB;
1826 u32 ctrl;
1827 bool global_device_reset = hw->dev_spec._82575.global_device_reset;
1828
1829 DEBUGFUNC("e1000_reset_hw_82580");
1830
1831 hw->dev_spec._82575.global_device_reset = false;
1832
1833 /* Get current control state. */
1834 ctrl = E1000_READ_REG(hw, E1000_CTRL);
1835
1836 /*
1837 * Prevent the PCI-E bus from sticking if there is no TLP connection
1838 * on the last TLP read/write transaction when MAC is reset.
1839 */
1840 ret_val = e1000_disable_pcie_master_generic(hw);
1841 if (ret_val)
1842 DEBUGOUT("PCI-E Master disable polling has failed.\n");
1843
1844 DEBUGOUT("Masking off all interrupts\n");
1845 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
1846 E1000_WRITE_REG(hw, E1000_RCTL, 0);
1847 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
1848 E1000_WRITE_FLUSH(hw);
1849
1850 msec_delay(10);
1851
1852 /* Determine whether or not a global dev reset is requested */
1853 if (global_device_reset &&
1854 e1000_acquire_swfw_sync_82575(hw, swmbsw_mask))
1855 global_device_reset = false;
1856
1857 if (global_device_reset &&
1858 !(E1000_READ_REG(hw, E1000_STATUS) & E1000_STAT_DEV_RST_SET))
1859 ctrl |= E1000_CTRL_DEV_RST;
1860 else
1861 ctrl |= E1000_CTRL_RST;
1862
1863 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
1864
1865 /* Add delay to insure DEV_RST has time to complete */
1866 if (global_device_reset)
1867 msec_delay(5);
1868
1869 ret_val = e1000_get_auto_rd_done_generic(hw);
1870 if (ret_val) {
1871 /*
1872 * When auto config read does not complete, do not
1873 * return with an error. This can happen in situations
1874 * where there is no eeprom and prevents getting link.
1875 */
1876 DEBUGOUT("Auto Read Done did not complete\n");
1877 }
1878
1879 /* If EEPROM is not present, run manual init scripts */
1880 if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0)
1881 (void) e1000_reset_init_script_82575(hw);
1882
1883 /* clear global device reset status bit */
1884 E1000_WRITE_REG(hw, E1000_STATUS, E1000_STAT_DEV_RST_SET);
1885
1886 /* Clear any pending interrupt events. */
1887 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
1888 (void) E1000_READ_REG(hw, E1000_ICR);
1889
1890 /* Install any alternate MAC address into RAR0 */
1891 ret_val = e1000_check_alt_mac_addr_generic(hw);
1892
1893 /* Release semaphore */
1894 if (global_device_reset)
1895 e1000_release_swfw_sync_82575(hw, swmbsw_mask);
1896
1897 return (ret_val);
1898 }
1899
1900 /*
1901 * e1000_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size
1902 * @data: data received by reading RXPBS register
1903 *
1904 * The 82580 uses a table based approach for packet buffer allocation sizes.
1905 * This function converts the retrieved value into the correct table value
1906 * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7
1907 * 0x0 36 72 144 1 2 4 8 16
1908 * 0x8 35 70 140 rsv rsv rsv rsv rsv
1909 */
1910 u16
1911 e1000_rxpbs_adjust_82580(u32 data)
1912 {
1913 u16 ret_val = 0;
1914
1915 if (data < E1000_82580_RXPBS_TABLE_SIZE)
1916 ret_val = e1000_82580_rxpbs_table[data];
1917
1918 return (ret_val);
1919 }
1920
1921 /*
1922 * Due to a hw errata, if the host tries to configure the VFTA register
1923 * while performing queries from the BMC or DMA, then the VFTA in some
1924 * cases won't be written.
1925 */
1926
1927 /*
1928 * e1000_clear_vfta_i350 - Clear VLAN filter table
1929 * @hw: pointer to the HW structure
1930 *
1931 * Clears the register array which contains the VLAN filter table by
1932 * setting all the values to 0.
1933 */
1934 void
1935 e1000_clear_vfta_i350(struct e1000_hw *hw)
1936 {
1937 u32 offset;
1938 int i;
1939
1940 DEBUGFUNC("e1000_clear_vfta_350");
1941
1942 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
1943 for (i = 0; i < 10; i++)
1944 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
1945
1946 E1000_WRITE_FLUSH(hw);
1947 }
1948 }
1949
1950 /*
1951 * e1000_write_vfta_i350 - Write value to VLAN filter table
1952 * @hw: pointer to the HW structure
1953 * @offset: register offset in VLAN filter table
1954 * @value: register value written to VLAN filter table
1955 *
1956 * Writes value at the given offset in the register array which stores
1957 * the VLAN filter table.
1958 */
1959 void
1960 e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
1961 {
1962 int i;
1963
1964 DEBUGFUNC("e1000_write_vfta_350");
1965
1966 for (i = 0; i < 10; i++)
1967 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
1968
1969 E1000_WRITE_FLUSH(hw);
1970 }
1971
1972 /*
1973 * e1000_validate_nvm_checksum_with_offset - Validate EEPROM
1974 * checksum
1975 * @hw: pointer to the HW structure
1976 * @offset: offset in words of the checksum protected region
1977 *
1978 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
1979 * and then verifies that the sum of the EEPROM is equal to 0xBABA.
1980 */
1981 s32
1982 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
1983 {
1984 s32 ret_val = E1000_SUCCESS;
1985 u16 checksum = 0;
1986 u16 i, nvm_data;
1987
1988 DEBUGFUNC("e1000_validate_nvm_checksum_with_offset");
1989
1990 for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
1991 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
1992 if (ret_val) {
1993 DEBUGOUT("NVM Read Error\n");
1994 goto out;
1995 }
1996 checksum += nvm_data;
1997 }
1998
1999 if (checksum != (u16) NVM_SUM) {
2000 DEBUGOUT("NVM Checksum Invalid\n");
2001 ret_val = -E1000_ERR_NVM;
2002 goto out;
2003 }
2004
2005 out:
2006 return (ret_val);
2007 }
2008
2009 /*
2010 * e1000_update_nvm_checksum_with_offset - Update EEPROM
2011 * checksum
2012 * @hw: pointer to the HW structure
2013 * @offset: offset in words of the checksum protected region
2014 *
2015 * Updates the EEPROM checksum by reading/adding each word of the EEPROM
2016 * up to the checksum. Then calculates the EEPROM checksum and writes the
2017 * value to the EEPROM.
2018 */
2019 s32
2020 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
2021 {
2022 s32 ret_val;
2023 u16 checksum = 0;
2024 u16 i, nvm_data;
2025
2026 DEBUGFUNC("e1000_update_nvm_checksum_with_offset");
2027
2028 for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
2029 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
2030 if (ret_val) {
2031 DEBUGOUT("NVM Read Error while updating checksum.\n");
2032 goto out;
2033 }
2034 checksum += nvm_data;
2035 }
2036 checksum = (u16) NVM_SUM - checksum;
2037 ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
2038 &checksum);
2039 if (ret_val)
2040 DEBUGOUT("NVM Write Error while updating checksum.\n");
2041
2042 out:
2043 return (ret_val);
2044 }
2045
2046 /*
2047 * e1000_validate_nvm_checksum_i350 - Validate EEPROM checksum
2048 * @hw: pointer to the HW structure
2049 *
2050 * Calculates the EEPROM section checksum by reading/adding each word of
2051 * the EEPROM and then verifies that the sum of the EEPROM is
2052 * equal to 0xBABA.
2053 */
2054 static s32
2055 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw)
2056 {
2057 s32 ret_val = E1000_SUCCESS;
2058 u16 j;
2059 u16 nvm_offset;
2060
2061 DEBUGFUNC("e1000_validate_nvm_checksum_i350");
2062
2063 for (j = 0; j < 4; j++) {
2064 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2065 ret_val = e1000_validate_nvm_checksum_with_offset(hw,
2066 nvm_offset);
2067 if (ret_val != E1000_SUCCESS)
2068 goto out;
2069 }
2070
2071 out:
2072 return (ret_val);
2073 }
2074
2075 /*
2076 * e1000_update_nvm_checksum_i350 - Update EEPROM checksum
2077 * @hw: pointer to the HW structure
2078 *
2079 * Updates the EEPROM section checksums for all 4 ports by reading/adding
2080 * each word of the EEPROM up to the checksum. Then calculates the EEPROM
2081 * checksum and writes the value to the EEPROM.
2082 */
2083 static s32
2084 e1000_update_nvm_checksum_i350(struct e1000_hw *hw)
2085 {
2086 s32 ret_val = E1000_SUCCESS;
2087 u16 j;
2088 u16 nvm_offset;
2089
2090 DEBUGFUNC("e1000_update_nvm_checksum_i350");
2091
2092 for (j = 0; j < 4; j++) {
2093 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2094 ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset);
2095 if (ret_val != E1000_SUCCESS)
2096 goto out;
2097 }
2098
2099 out:
2100 return (ret_val);
2101 }
2102
2103
2104
2105 /*
2106 * e1000_set_eee_i350 - Enable/disable EEE support
2107 * @hw: pointer to the HW structure
2108 *
2109 * Enable/disable EEE based on setting in dev_spec structure.
2110 *
2111 */
2112 s32
2113 e1000_set_eee_i350(struct e1000_hw *hw)
2114 {
2115
2116 s32 ret_val = E1000_SUCCESS;
2117 u32 ipcnfg, eeer;
2118
2119 DEBUGFUNC("e1000_set_eee_i350");
2120
2121 if ((hw->mac.type < e1000_i350) ||
2122 (hw->phy.media_type != e1000_media_type_copper))
2123 goto out;
2124 ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG);
2125 eeer = E1000_READ_REG(hw, E1000_EEER);
2126
2127 /* enable or disable per user setting */
2128 if (!(hw->dev_spec._82575.eee_disable)) {
2129 ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
2130 eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
2131 E1000_EEER_LPI_FC);
2132
2133 } else {
2134 ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
2135 eeer &= ~(E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
2136 E1000_EEER_LPI_FC);
2137 }
2138 E1000_WRITE_REG(hw, E1000_IPCNFG, ipcnfg);
2139 E1000_WRITE_REG(hw, E1000_EEER, eeer);
2140 ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG);
2141 eeer = E1000_READ_REG(hw, E1000_EEER);
2142 out:
2143
2144 return (ret_val);
2145 }