1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2007-2012 Intel Corporation. All rights reserved.
24 */
25
26 /*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 */
29
30 /* IntelVersion: 1.146.2.2 v3_3_14_3_BHSW1 */
31
32 /*
33 * 82575EB Gigabit Network Connection
34 * 82575EB Gigabit Backplane Connection
35 * 82575GB Gigabit Network Connection
36 * 82576 Gigabit Network Connection
37 * 82576 Quad Port Gigabit Mezzanine Adapter
38 */
39
40 #include "igb_api.h"
41
42 static s32 e1000_init_phy_params_82575(struct e1000_hw *hw);
43 static s32 e1000_init_nvm_params_82575(struct e1000_hw *hw);
44 static s32 e1000_init_mac_params_82575(struct e1000_hw *hw);
45 static s32 e1000_acquire_phy_82575(struct e1000_hw *hw);
46 static void e1000_release_phy_82575(struct e1000_hw *hw);
47 static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw);
48 static void e1000_release_nvm_82575(struct e1000_hw *hw);
49 static s32 e1000_check_for_link_82575(struct e1000_hw *hw);
50 static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw);
51 static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
52 u16 *duplex);
53 static s32 e1000_init_hw_82575(struct e1000_hw *hw);
54 static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw);
55 static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
56 u16 *data);
57 static s32 e1000_reset_hw_82575(struct e1000_hw *hw);
58 static s32 e1000_reset_hw_82580(struct e1000_hw *hw);
59 static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset,
60 u16 *data);
61 static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset,
62 u16 data);
63 static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw,
64 bool active);
65 static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw);
66 static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw);
67 static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data);
68 static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw,
69 u32 offset, u16 data);
70 static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw);
71 static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
72 static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
73 u16 *speed, u16 *duplex);
74 static s32 e1000_get_phy_id_82575(struct e1000_hw *hw);
75 static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
76 static bool e1000_sgmii_active_82575(struct e1000_hw *hw);
77 static s32 e1000_reset_init_script_82575(struct e1000_hw *hw);
78 static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw);
79 static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw);
80 static void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw);
81 static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw);
82
83 static s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw,
84 u16 offset);
85 static s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
86 u16 offset);
87 static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw);
88 static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw);
89 static void e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value);
90 static void e1000_clear_vfta_i350(struct e1000_hw *hw);
91
92 static const u16 e1000_82580_rxpbs_table[] =
93 {36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140};
94 #define E1000_82580_RXPBS_TABLE_SIZE \
95 (sizeof (e1000_82580_rxpbs_table)/sizeof (u16))
96
97 /*
98 * e1000_init_phy_params_82575 - Init PHY func ptrs.
99 * @hw: pointer to the HW structure
100 */
101 static s32
102 e1000_init_phy_params_82575(struct e1000_hw *hw)
103 {
104 struct e1000_phy_info *phy = &hw->phy;
105 s32 ret_val = E1000_SUCCESS;
106
107 DEBUGFUNC("e1000_init_phy_params_82575");
108
109 if (hw->phy.media_type != e1000_media_type_copper) {
110 phy->type = e1000_phy_none;
111 goto out;
112 }
113
114 phy->ops.power_up = e1000_power_up_phy_copper;
115 phy->ops.power_down = e1000_power_down_phy_copper_82575;
116
117 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
118 phy->reset_delay_us = 100;
119
120 phy->ops.acquire = e1000_acquire_phy_82575;
121 phy->ops.check_reset_block = e1000_check_reset_block_generic;
122 phy->ops.commit = e1000_phy_sw_reset_generic;
123 phy->ops.get_cfg_done = e1000_get_cfg_done_82575;
124 phy->ops.release = e1000_release_phy_82575;
125
126 if (e1000_sgmii_active_82575(hw)) {
127 phy->ops.reset = e1000_phy_hw_reset_sgmii_82575;
128 phy->ops.read_reg = e1000_read_phy_reg_sgmii_82575;
129 phy->ops.write_reg = e1000_write_phy_reg_sgmii_82575;
130 } else if (hw->mac.type == e1000_82580) {
131 phy->ops.reset = e1000_phy_hw_reset_generic;
132 phy->ops.read_reg = e1000_read_phy_reg_82580;
133 phy->ops.write_reg = e1000_write_phy_reg_82580;
134 } else {
135 phy->ops.reset = e1000_phy_hw_reset_generic;
136 phy->ops.read_reg = e1000_read_phy_reg_igp;
137 phy->ops.write_reg = e1000_write_phy_reg_igp;
138 }
139
140 /* Set phy->phy_addr and phy->id. */
141 ret_val = e1000_get_phy_id_82575(hw);
142
143 /* Verify phy id and set remaining function pointers */
144 switch (phy->id) {
145 case M88E1111_I_PHY_ID:
146 phy->type = e1000_phy_m88;
147 phy->ops.check_polarity = e1000_check_polarity_m88;
148 phy->ops.get_info = e1000_get_phy_info_m88;
149 phy->ops.get_cable_length = e1000_get_cable_length_m88;
150 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
151 break;
152 case IGP03E1000_E_PHY_ID:
153 case IGP04E1000_E_PHY_ID:
154 phy->type = e1000_phy_igp_3;
155 phy->ops.check_polarity = e1000_check_polarity_igp;
156 phy->ops.get_info = e1000_get_phy_info_igp;
157 phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
158 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
159 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82575;
160 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_generic;
161 break;
162 case I82580_I_PHY_ID:
163 case I350_I_PHY_ID:
164 phy->type = e1000_phy_82580;
165 phy->ops.check_polarity = e1000_check_polarity_82577;
166 phy->ops.force_speed_duplex =
167 e1000_phy_force_speed_duplex_82577;
168 phy->ops.get_cable_length = e1000_get_cable_length_82577;
169 phy->ops.get_info = e1000_get_phy_info_82577;
170 break;
171 default:
172 ret_val = -E1000_ERR_PHY;
173 goto out;
174 }
175
176 out:
177 return (ret_val);
178 }
179
180 /*
181 * e1000_init_nvm_params_82575 - Init NVM func ptrs.
182 * @hw: pointer to the HW structure
183 */
184 static s32
185 e1000_init_nvm_params_82575(struct e1000_hw *hw)
186 {
187 struct e1000_nvm_info *nvm = &hw->nvm;
188 u32 eecd = E1000_READ_REG(hw, E1000_EECD);
189 u16 size;
190
191 DEBUGFUNC("e1000_init_nvm_params_82575");
192
193 nvm->opcode_bits = 8;
194 nvm->delay_usec = 1;
195 switch (nvm->override) {
196 case e1000_nvm_override_spi_large:
197 nvm->page_size = 32;
198 nvm->address_bits = 16;
199 break;
200 case e1000_nvm_override_spi_small:
201 nvm->page_size = 8;
202 nvm->address_bits = 8;
203 break;
204 default:
205 nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
206 nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
207 break;
208 }
209
210 nvm->type = e1000_nvm_eeprom_spi;
211
212 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
213 E1000_EECD_SIZE_EX_SHIFT);
214
215 /*
216 * Added to a constant, "size" becomes the left-shift value
217 * for setting word_size.
218 */
219 size += NVM_WORD_SIZE_BASE_SHIFT;
220
221 /* EEPROM access above 16k is unsupported */
222 if (size > 14)
223 size = 14;
224 nvm->word_size = 1 << size;
225
226 /* Function Pointers */
227 nvm->ops.acquire = e1000_acquire_nvm_82575;
228 nvm->ops.read = e1000_read_nvm_eerd;
229 nvm->ops.release = e1000_release_nvm_82575;
230 nvm->ops.update = e1000_update_nvm_checksum_generic;
231 nvm->ops.valid_led_default = e1000_valid_led_default_82575;
232 nvm->ops.validate = e1000_validate_nvm_checksum_generic;
233 nvm->ops.write = e1000_write_nvm_spi;
234
235 /* override genric family function pointers for specific descendants */
236 switch (hw->mac.type) {
237 case e1000_i350:
238 nvm->ops.validate = e1000_validate_nvm_checksum_i350;
239 nvm->ops.update = e1000_update_nvm_checksum_i350;
240 break;
241 default:
242 break;
243 }
244
245
246 return (E1000_SUCCESS);
247 }
248
249 /*
250 * e1000_init_mac_params_82575 - Init MAC func ptrs.
251 * @hw: pointer to the HW structure
252 */
253 static s32
254 e1000_init_mac_params_82575(struct e1000_hw *hw)
255 {
256 struct e1000_mac_info *mac = &hw->mac;
257 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
258 u32 ctrl_ext = 0;
259
260 DEBUGFUNC("e1000_init_mac_params_82575");
261
262 /* Set media type */
263 /*
264 * The 82575 uses bits 22:23 for link mode. The mode can be changed
265 * based on the EEPROM. We cannot rely upon device ID. There
266 * is no distinguishable difference between fiber and internal
267 * SerDes mode on the 82575. There can be an external PHY attached
268 * on the SGMII interface. For this, we'll set sgmii_active to true.
269 */
270 hw->phy.media_type = e1000_media_type_copper;
271 dev_spec->sgmii_active = false;
272
273 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
274 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
275 case E1000_CTRL_EXT_LINK_MODE_SGMII:
276 dev_spec->sgmii_active = true;
277 ctrl_ext |= E1000_CTRL_I2C_ENA;
278 break;
279 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
280 case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
281 hw->phy.media_type = e1000_media_type_internal_serdes;
282 ctrl_ext |= E1000_CTRL_I2C_ENA;
283 break;
284 default:
285 ctrl_ext &= ~E1000_CTRL_I2C_ENA;
286 break;
287 }
288
289 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
290
291 /*
292 * if using i2c make certain the MDICNFG register is cleared to prevent
293 * communications from being misrouted to the mdic registers
294 */
295 if ((ctrl_ext & E1000_CTRL_I2C_ENA) && (hw->mac.type == e1000_82580))
296 E1000_WRITE_REG(hw, E1000_MDICNFG, 0);
297
298 /* Set mta register count */
299 mac->mta_reg_count = 128;
300 /* Set uta register count */
301 mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128;
302 /* Set rar entry count */
303 mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
304 if (mac->type == e1000_82576)
305 mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
306 if (mac->type == e1000_82580)
307 mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
308 if (mac->type == e1000_i350) {
309 mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
310 /* Enable EEE default settings for i350 */
311 dev_spec->eee_disable = B_FALSE;
312 }
313 /* Set if part includes ASF firmware */
314 mac->asf_firmware_present = true;
315 /* Set if manageability features are enabled. */
316 mac->arc_subsystem_valid =
317 (E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK)
318 ? true : false;
319
320 /* Function pointers */
321
322 /* bus type/speed/width */
323 mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic;
324 /* reset */
325 if (mac->type == e1000_82580)
326 mac->ops.reset_hw = e1000_reset_hw_82580;
327 else
328 mac->ops.reset_hw = e1000_reset_hw_82575;
329 /* hw initialization */
330 mac->ops.init_hw = e1000_init_hw_82575;
331 /* link setup */
332 mac->ops.setup_link = e1000_setup_link_generic;
333 /* physical interface link setup */
334 mac->ops.setup_physical_interface =
335 (hw->phy.media_type == e1000_media_type_copper)
336 ? e1000_setup_copper_link_82575
337 : e1000_setup_serdes_link_82575;
338 /* physical interface shutdown */
339 mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575;
340 /* check for link */
341 mac->ops.check_for_link = e1000_check_for_link_82575;
342 /* receive address register setting */
343 mac->ops.rar_set = e1000_rar_set_generic;
344 /* read mac address */
345 mac->ops.read_mac_addr = e1000_read_mac_addr_82575;
346 /* multicast address update */
347 mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
348
349 if (hw->mac.type == e1000_i350) {
350 /* writing VFTA */
351 mac->ops.write_vfta = e1000_write_vfta_i350;
352 /* clearing VFTA */
353 mac->ops.clear_vfta = e1000_clear_vfta_i350;
354 } else {
355 /* writing VFTA */
356 mac->ops.write_vfta = e1000_write_vfta_generic;
357 /* clearing VFTA */
358 mac->ops.clear_vfta = e1000_clear_vfta_generic;
359 }
360 /* setting MTA */
361 mac->ops.mta_set = e1000_mta_set_generic;
362 /* ID LED init */
363 mac->ops.id_led_init = e1000_id_led_init_generic;
364 /* blink LED */
365 mac->ops.blink_led = e1000_blink_led_generic;
366 /* setup LED */
367 mac->ops.setup_led = e1000_setup_led_generic;
368 /* cleanup LED */
369 mac->ops.cleanup_led = e1000_cleanup_led_generic;
370 /* turn on/off LED */
371 mac->ops.led_on = e1000_led_on_generic;
372 mac->ops.led_off = e1000_led_off_generic;
373 /* clear hardware counters */
374 mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82575;
375 /* link info */
376 mac->ops.get_link_up_info = e1000_get_link_up_info_82575;
377
378 /* set lan id for port to determine which phy lock to use */
379 hw->mac.ops.set_lan_id(hw);
380
381 return (E1000_SUCCESS);
382 }
383
384 /*
385 * e1000_init_function_pointers_82575 - Init func ptrs.
386 * @hw: pointer to the HW structure
387 *
388 * Called to initialize all function pointers and parameters.
389 */
390 void
391 e1000_init_function_pointers_82575(struct e1000_hw *hw)
392 {
393 DEBUGFUNC("e1000_init_function_pointers_82575");
394
395 hw->mac.ops.init_params = e1000_init_mac_params_82575;
396 hw->nvm.ops.init_params = e1000_init_nvm_params_82575;
397 hw->phy.ops.init_params = e1000_init_phy_params_82575;
398 }
399
400 /*
401 * e1000_acquire_phy_82575 - Acquire rights to access PHY
402 * @hw: pointer to the HW structure
403 *
404 * Acquire access rights to the correct PHY.
405 */
406 static s32
407 e1000_acquire_phy_82575(struct e1000_hw *hw)
408 {
409 u16 mask = E1000_SWFW_PHY0_SM;
410
411 DEBUGFUNC("e1000_acquire_phy_82575");
412
413 if (hw->bus.func == E1000_FUNC_1)
414 mask = E1000_SWFW_PHY1_SM;
415 else if (hw->bus.func == E1000_FUNC_2)
416 mask = E1000_SWFW_PHY2_SM;
417 else if (hw->bus.func == E1000_FUNC_3)
418 mask = E1000_SWFW_PHY3_SM;
419
420 return (e1000_acquire_swfw_sync_82575(hw, mask));
421 }
422
423 /*
424 * e1000_release_phy_82575 - Release rights to access PHY
425 * @hw: pointer to the HW structure
426 *
427 * A wrapper to release access rights to the correct PHY.
428 */
429 static void
430 e1000_release_phy_82575(struct e1000_hw *hw)
431 {
432 u16 mask = E1000_SWFW_PHY0_SM;
433
434 DEBUGFUNC("e1000_release_phy_82575");
435
436 if (hw->bus.func == E1000_FUNC_1)
437 mask = E1000_SWFW_PHY1_SM;
438 else if (hw->bus.func == E1000_FUNC_2)
439 mask = E1000_SWFW_PHY2_SM;
440 else if (hw->bus.func == E1000_FUNC_3)
441 mask = E1000_SWFW_PHY3_SM;
442
443 e1000_release_swfw_sync_82575(hw, mask);
444 }
445
446 /*
447 * e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
448 * @hw: pointer to the HW structure
449 * @offset: register offset to be read
450 * @data: pointer to the read data
451 *
452 * Reads the PHY register at offset using the serial gigabit media independent
453 * interface and stores the retrieved information in data.
454 */
455 static s32
456 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, u16 *data)
457 {
458 s32 ret_val = -E1000_ERR_PARAM;
459
460 DEBUGFUNC("e1000_read_phy_reg_sgmii_82575");
461
462 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
463 DEBUGOUT1("PHY Address %u is out of range\n", offset);
464 goto out;
465 }
466
467 ret_val = hw->phy.ops.acquire(hw);
468 if (ret_val)
469 goto out;
470
471 ret_val = e1000_read_phy_reg_i2c(hw, offset, data);
472
473 hw->phy.ops.release(hw);
474
475 out:
476 return (ret_val);
477 }
478
479 /*
480 * e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
481 * @hw: pointer to the HW structure
482 * @offset: register offset to write to
483 * @data: data to write at register offset
484 *
485 * Writes the data to PHY register at the offset using the serial gigabit
486 * media independent interface.
487 */
488 static s32
489 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, u16 data)
490 {
491 s32 ret_val = -E1000_ERR_PARAM;
492
493 DEBUGFUNC("e1000_write_phy_reg_sgmii_82575");
494
495 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
496 DEBUGOUT1("PHY Address %d is out of range\n", offset);
497 goto out;
498 }
499
500 ret_val = hw->phy.ops.acquire(hw);
501 if (ret_val)
502 goto out;
503
504 ret_val = e1000_write_phy_reg_i2c(hw, offset, data);
505
506 hw->phy.ops.release(hw);
507
508 out:
509 return (ret_val);
510 }
511
512 /*
513 * e1000_get_phy_id_82575 - Retrieve PHY addr and id
514 * @hw: pointer to the HW structure
515 *
516 * Retrieves the PHY address and ID for both PHY's which do and do not use
517 * sgmi interface.
518 */
519 static s32
520 e1000_get_phy_id_82575(struct e1000_hw *hw)
521 {
522 struct e1000_phy_info *phy = &hw->phy;
523 s32 ret_val = E1000_SUCCESS;
524 u16 phy_id;
525 u32 ctrl_ext;
526
527 DEBUGFUNC("e1000_get_phy_id_82575");
528
529 /*
530 * For SGMII PHYs, we try the list of possible addresses until
531 * we find one that works. For non-SGMII PHYs
532 * (e.g. integrated copper PHYs), an address of 1 should
533 * work. The result of this function should mean phy->phy_addr
534 * and phy->id are set correctly.
535 */
536 if (!e1000_sgmii_active_82575(hw)) {
537 phy->addr = 1;
538 ret_val = e1000_get_phy_id(hw);
539 goto out;
540 }
541
542 /* Power on sgmii phy if it is disabled */
543 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
544 E1000_WRITE_REG(hw, E1000_CTRL_EXT,
545 ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
546 E1000_WRITE_FLUSH(hw);
547 msec_delay(300);
548
549 /*
550 * The address field in the I2CCMD register is 3 bits and 0 is invalid.
551 * Therefore, we need to test 1-7
552 */
553 for (phy->addr = 1; phy->addr < 8; phy->addr++) {
554 ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
555 if (ret_val == E1000_SUCCESS) {
556 DEBUGOUT2("Vendor ID 0x%08X read at address %u\n",
557 phy_id,
558 phy->addr);
559 /*
560 * At the time of this writing, The M88 part is
561 * the only supported SGMII PHY product.
562 */
563 if (phy_id == M88_VENDOR)
564 break;
565 } else {
566 DEBUGOUT1("PHY address %u was unreadable\n",
567 phy->addr);
568 }
569 }
570
571 /* A valid PHY type couldn't be found. */
572 if (phy->addr == 8) {
573 phy->addr = 0;
574 ret_val = -E1000_ERR_PHY;
575 } else {
576 ret_val = e1000_get_phy_id(hw);
577 }
578
579 /* restore previous sfp cage power state */
580 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
581
582 out:
583 return (ret_val);
584 }
585
586 /*
587 * e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset
588 * @hw: pointer to the HW structure
589 *
590 * Resets the PHY using the serial gigabit media independent interface.
591 */
592 static s32
593 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
594 {
595 s32 ret_val = E1000_SUCCESS;
596
597 DEBUGFUNC("e1000_phy_hw_reset_sgmii_82575");
598
599 /*
600 * This isn't a true "hard" reset, but is the only reset
601 * available to us at this time.
602 */
603
604 DEBUGOUT("Soft resetting SGMII attached PHY...\n");
605
606 if (!(hw->phy.ops.write_reg))
607 goto out;
608
609 /*
610 * SFP documentation requires the following to configure the SPF module
611 * to work on SGMII. No further documentation is given.
612 */
613 ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
614 if (ret_val)
615 goto out;
616
617 ret_val = hw->phy.ops.commit(hw);
618
619 out:
620 return (ret_val);
621 }
622
623 /*
624 * e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
625 * @hw: pointer to the HW structure
626 * @active: true to enable LPLU, false to disable
627 *
628 * Sets the LPLU D0 state according to the active flag. When
629 * activating LPLU this function also disables smart speed
630 * and vice versa. LPLU will not be activated unless the
631 * device autonegotiation advertisement meets standards of
632 * either 10 or 10/100 or 10/100/1000 at all duplexes.
633 * This is a function pointer entry point only called by
634 * PHY setup routines.
635 */
636 static s32
637 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
638 {
639 struct e1000_phy_info *phy = &hw->phy;
640 s32 ret_val = E1000_SUCCESS;
641 u16 data;
642
643 DEBUGFUNC("e1000_set_d0_lplu_state_82575");
644
645 if (!(hw->phy.ops.read_reg))
646 goto out;
647
648 ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
649 if (ret_val)
650 goto out;
651
652 if (active) {
653 data |= IGP02E1000_PM_D0_LPLU;
654 ret_val = phy->ops.write_reg(hw,
655 IGP02E1000_PHY_POWER_MGMT,
656 data);
657 if (ret_val)
658 goto out;
659
660 /* When LPLU is enabled, we should disable SmartSpeed */
661 ret_val = phy->ops.read_reg(hw,
662 IGP01E1000_PHY_PORT_CONFIG,
663 &data);
664 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
665 ret_val = phy->ops.write_reg(hw,
666 IGP01E1000_PHY_PORT_CONFIG,
667 data);
668 if (ret_val)
669 goto out;
670 } else {
671 data &= ~IGP02E1000_PM_D0_LPLU;
672 ret_val = phy->ops.write_reg(hw,
673 IGP02E1000_PHY_POWER_MGMT,
674 data);
675 /*
676 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
677 * during Dx states where the power conservation is most
678 * important. During driver activity we should enable
679 * SmartSpeed, so performance is maintained.
680 */
681 if (phy->smart_speed == e1000_smart_speed_on) {
682 ret_val = phy->ops.read_reg(hw,
683 IGP01E1000_PHY_PORT_CONFIG,
684 &data);
685 if (ret_val)
686 goto out;
687
688 data |= IGP01E1000_PSCFR_SMART_SPEED;
689 ret_val = phy->ops.write_reg(hw,
690 IGP01E1000_PHY_PORT_CONFIG,
691 data);
692 if (ret_val)
693 goto out;
694 } else if (phy->smart_speed == e1000_smart_speed_off) {
695 ret_val = phy->ops.read_reg(hw,
696 IGP01E1000_PHY_PORT_CONFIG,
697 &data);
698 if (ret_val)
699 goto out;
700
701 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
702 ret_val = phy->ops.write_reg(hw,
703 IGP01E1000_PHY_PORT_CONFIG,
704 data);
705 if (ret_val)
706 goto out;
707 }
708 }
709
710 out:
711 return (ret_val);
712 }
713
714 /*
715 * e1000_acquire_nvm_82575 - Request for access to EEPROM
716 * @hw: pointer to the HW structure
717 *
718 * Acquire the necessary semaphores for exclusive access to the EEPROM.
719 * Set the EEPROM access request bit and wait for EEPROM access grant bit.
720 * Return successful if access grant bit set, else clear the request for
721 * EEPROM access and return -E1000_ERR_NVM (-1).
722 */
723 static s32
724 e1000_acquire_nvm_82575(struct e1000_hw *hw)
725 {
726 s32 ret_val;
727
728 DEBUGFUNC("e1000_acquire_nvm_82575");
729
730 ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
731 if (ret_val)
732 goto out;
733
734 /*
735 * Check if there is some access
736 * error this access may hook on
737 */
738 if (hw->mac.type == e1000_i350) {
739 u32 eecd = E1000_READ_REG(hw, E1000_EECD);
740 if (eecd & (E1000_EECD_BLOCKED | E1000_EECD_ABORT |
741 E1000_EECD_TIMEOUT)) {
742 /* Clear all access error flags */
743 E1000_WRITE_REG(hw, E1000_EECD, eecd |
744 E1000_EECD_ERROR_CLR);
745 DEBUGOUT("Nvm bit banging access error "
746 "detected and cleared.\n");
747 }
748 }
749
750 ret_val = e1000_acquire_nvm_generic(hw);
751
752 if (ret_val)
753 e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
754
755 out:
756 return (ret_val);
757 }
758
759 /*
760 * e1000_release_nvm_82575 - Release exclusive access to EEPROM
761 * @hw: pointer to the HW structure
762 *
763 * Stop any current commands to the EEPROM and clear the EEPROM request bit,
764 * then release the semaphores acquired.
765 */
766 static void
767 e1000_release_nvm_82575(struct e1000_hw *hw)
768 {
769 DEBUGFUNC("e1000_release_nvm_82575");
770
771 e1000_release_nvm_generic(hw);
772 e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
773 }
774
775 /*
776 * e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
777 * @hw: pointer to the HW structure
778 * @mask: specifies which semaphore to acquire
779 *
780 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
781 * will also specify which port we're acquiring the lock for.
782 */
783 static s32
784 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
785 {
786 u32 swfw_sync;
787 u32 swmask = mask;
788 u32 fwmask = mask << 16;
789 s32 ret_val = E1000_SUCCESS;
790 s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
791
792 DEBUGFUNC("e1000_acquire_swfw_sync_82575");
793
794 while (i < timeout) {
795 if (e1000_get_hw_semaphore_generic(hw)) {
796 ret_val = -E1000_ERR_SWFW_SYNC;
797 goto out;
798 }
799
800 swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
801 if (!(swfw_sync & (fwmask | swmask)))
802 break;
803
804 /*
805 * Firmware currently using resource (fwmask)
806 * or other software thread using resource (swmask)
807 */
808 e1000_put_hw_semaphore_generic(hw);
809 msec_delay_irq(5);
810 i++;
811 }
812
813 if (i == timeout) {
814 DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
815 ret_val = -E1000_ERR_SWFW_SYNC;
816 goto out;
817 }
818
819 swfw_sync |= swmask;
820 E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
821
822 e1000_put_hw_semaphore_generic(hw);
823
824 out:
825 return (ret_val);
826 }
827
828 /*
829 * e1000_release_swfw_sync_82575 - Release SW/FW semaphore
830 * @hw: pointer to the HW structure
831 * @mask: specifies which semaphore to acquire
832 *
833 * Release the SW/FW semaphore used to access the PHY or NVM. The mask
834 * will also specify which port we're releasing the lock for.
835 */
836 static void
837 e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
838 {
839 u32 swfw_sync;
840
841 DEBUGFUNC("e1000_release_swfw_sync_82575");
842
843 while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS) {
844 /* Empty */
845 }
846
847 swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
848 swfw_sync &= ~mask;
849 E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
850
851 e1000_put_hw_semaphore_generic(hw);
852 }
853
854 /*
855 * e1000_get_cfg_done_82575 - Read config done bit
856 * @hw: pointer to the HW structure
857 *
858 * Read the management control register for the config done bit for
859 * completion status. NOTE: silicon which is EEPROM-less will fail trying
860 * to read the config done bit, so an error is *ONLY* logged and returns
861 * E1000_SUCCESS. If we were to return with error, EEPROM-less silicon
862 * would not be able to be reset or change link.
863 */
864 static s32
865 e1000_get_cfg_done_82575(struct e1000_hw *hw)
866 {
867 s32 timeout = PHY_CFG_TIMEOUT;
868 s32 ret_val = E1000_SUCCESS;
869 u32 mask = E1000_NVM_CFG_DONE_PORT_0;
870
871 DEBUGFUNC("e1000_get_cfg_done_82575");
872
873 if (hw->bus.func == E1000_FUNC_1)
874 mask = E1000_NVM_CFG_DONE_PORT_1;
875 else if (hw->bus.func == E1000_FUNC_2)
876 mask = E1000_NVM_CFG_DONE_PORT_2;
877 else if (hw->bus.func == E1000_FUNC_3)
878 mask = E1000_NVM_CFG_DONE_PORT_3;
879
880 while (timeout) {
881 if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask)
882 break;
883 msec_delay(1);
884 timeout--;
885 }
886 if (!timeout)
887 DEBUGOUT("MNG configuration cycle has not completed.\n");
888
889 /* If EEPROM is not marked present, init the PHY manually */
890 if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) &&
891 (hw->phy.type == e1000_phy_igp_3))
892 (void) e1000_phy_init_script_igp3(hw);
893
894 return (ret_val);
895 }
896
897 /*
898 * e1000_get_link_up_info_82575 - Get link speed/duplex info
899 * @hw: pointer to the HW structure
900 * @speed: stores the current speed
901 * @duplex: stores the current duplex
902 *
903 * This is a wrapper function, if using the serial gigabit media independent
904 * interface, use PCS to retrieve the link speed and duplex information.
905 * Otherwise, use the generic function to get the link speed and duplex info.
906 */
907 static s32
908 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed, u16 *duplex)
909 {
910 s32 ret_val;
911
912 DEBUGFUNC("e1000_get_link_up_info_82575");
913
914 if (hw->phy.media_type != e1000_media_type_copper)
915 ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed,
916 duplex);
917 else
918 ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed,
919 duplex);
920
921 return (ret_val);
922 }
923
924 /*
925 * e1000_check_for_link_82575 - Check for link
926 * @hw: pointer to the HW structure
927 *
928 * If sgmii is enabled, then use the pcs register to determine link, otherwise
929 * use the generic interface for determining link.
930 */
931 static s32
932 e1000_check_for_link_82575(struct e1000_hw *hw)
933 {
934 s32 ret_val;
935 u16 speed, duplex;
936
937 DEBUGFUNC("e1000_check_for_link_82575");
938
939 /* SGMII link check is done through the PCS register. */
940 if (hw->phy.media_type != e1000_media_type_copper) {
941 ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed,
942 &duplex);
943 /*
944 * Use this flag to determine if link needs to be checked or
945 * not. If we have link clear the flag so that we do not
946 * continue to check for link.
947 */
948 hw->mac.get_link_status = !hw->mac.serdes_has_link;
949 } else {
950 ret_val = e1000_check_for_copper_link_generic(hw);
951 }
952
953 return (ret_val);
954 }
955
956 /*
957 * e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
958 * @hw: pointer to the HW structure
959 * @speed: stores the current speed
960 * @duplex: stores the current duplex
961 *
962 * Using the physical coding sub-layer (PCS), retrieve the current speed and
963 * duplex, then store the values in the pointers provided.
964 */
965 static s32
966 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
967 u16 *speed, u16 *duplex)
968 {
969 struct e1000_mac_info *mac = &hw->mac;
970 u32 pcs;
971
972 DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575");
973
974 /* Set up defaults for the return values of this function */
975 mac->serdes_has_link = false;
976 *speed = 0;
977 *duplex = 0;
978
979 /*
980 * Read the PCS Status register for link state. For non-copper mode,
981 * the status register is not accurate. The PCS status register is
982 * used instead.
983 */
984 pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT);
985
986 /*
987 * The link up bit determines when link is up on autoneg. The sync ok
988 * gets set once both sides sync up and agree upon link. Stable link
989 * can be determined by checking for both link up and link sync ok
990 */
991 if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) {
992 mac->serdes_has_link = true;
993
994 /* Detect and store PCS speed */
995 if (pcs & E1000_PCS_LSTS_SPEED_1000) {
996 *speed = SPEED_1000;
997 } else if (pcs & E1000_PCS_LSTS_SPEED_100) {
998 *speed = SPEED_100;
999 } else {
1000 *speed = SPEED_10;
1001 }
1002
1003 /* Detect and store PCS duplex */
1004 if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) {
1005 *duplex = FULL_DUPLEX;
1006 } else {
1007 *duplex = HALF_DUPLEX;
1008 }
1009 }
1010
1011 return (E1000_SUCCESS);
1012 }
1013
1014 /*
1015 * e1000_shutdown_serdes_link_82575 - Remove link during power down
1016 * @hw: pointer to the HW structure
1017 *
1018 * In the case of serdes shut down sfp and PCS on driver unload
1019 * when management pass thru is not enabled.
1020 */
1021 void
1022 e1000_shutdown_serdes_link_82575(struct e1000_hw *hw)
1023 {
1024 u32 reg;
1025 u16 eeprom_data = 0;
1026
1027 if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1028 !e1000_sgmii_active_82575(hw))
1029 return;
1030
1031 if (hw->bus.func == E1000_FUNC_0)
1032 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1033 else if (hw->mac.type == e1000_82580)
1034 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
1035 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
1036 &eeprom_data);
1037 else if (hw->bus.func == E1000_FUNC_1)
1038 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1039
1040 /*
1041 * If APM is not enabled in the EEPROM and management interface is
1042 * not enabled, then power down.
1043 */
1044 if (!(eeprom_data & E1000_NVM_APME_82575) &&
1045 !e1000_enable_mng_pass_thru(hw)) {
1046 /* Disable PCS to turn off link */
1047 reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
1048 reg &= ~E1000_PCS_CFG_PCS_EN;
1049 E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
1050
1051 /* shutdown the laser */
1052 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1053 reg |= E1000_CTRL_EXT_SDP3_DATA;
1054 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
1055
1056 /* flush the write to verify completion */
1057 E1000_WRITE_FLUSH(hw);
1058 msec_delay(1);
1059 }
1060 }
1061
1062 /*
1063 * e1000_reset_hw_82575 - Reset hardware
1064 * @hw: pointer to the HW structure
1065 *
1066 * This resets the hardware into a known state.
1067 */
1068 static s32
1069 e1000_reset_hw_82575(struct e1000_hw *hw)
1070 {
1071 u32 ctrl;
1072 s32 ret_val;
1073
1074 DEBUGFUNC("e1000_reset_hw_82575");
1075
1076 /*
1077 * Prevent the PCI-E bus from sticking if there is no TLP connection
1078 * on the last TLP read/write transaction when MAC is reset.
1079 */
1080 ret_val = e1000_disable_pcie_master_generic(hw);
1081 if (ret_val) {
1082 DEBUGOUT("PCI-E Master disable polling has failed.\n");
1083 }
1084
1085 /* set the completion timeout for interface */
1086 ret_val = e1000_set_pcie_completion_timeout(hw);
1087 if (ret_val) {
1088 DEBUGOUT("PCI-E Set completion timeout has failed.\n");
1089 }
1090
1091 DEBUGOUT("Masking off all interrupts\n");
1092 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
1093
1094 E1000_WRITE_REG(hw, E1000_RCTL, 0);
1095 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
1096 E1000_WRITE_FLUSH(hw);
1097
1098 msec_delay(10);
1099
1100 ctrl = E1000_READ_REG(hw, E1000_CTRL);
1101
1102 DEBUGOUT("Issuing a global reset to MAC\n");
1103 E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
1104
1105 ret_val = e1000_get_auto_rd_done_generic(hw);
1106 if (ret_val) {
1107 /*
1108 * When auto config read does not complete, do not
1109 * return with an error. This can happen in situations
1110 * where there is no eeprom and prevents getting link.
1111 */
1112 DEBUGOUT("Auto Read Done did not complete\n");
1113 }
1114
1115 /* If EEPROM is not present, run manual init scripts */
1116 if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0)
1117 (void) e1000_reset_init_script_82575(hw);
1118
1119 /* Clear any pending interrupt events. */
1120 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
1121 (void) E1000_READ_REG(hw, E1000_ICR);
1122
1123 /* Install any alternate MAC address into RAR0 */
1124 ret_val = e1000_check_alt_mac_addr_generic(hw);
1125
1126 return (ret_val);
1127 }
1128
1129 /*
1130 * e1000_init_hw_82575 - Initialize hardware
1131 * @hw: pointer to the HW structure
1132 *
1133 * This inits the hardware readying it for operation.
1134 */
1135 static s32
1136 e1000_init_hw_82575(struct e1000_hw *hw)
1137 {
1138 struct e1000_mac_info *mac = &hw->mac;
1139 s32 ret_val;
1140 u16 i, rar_count = mac->rar_entry_count;
1141
1142 DEBUGFUNC("e1000_init_hw_82575");
1143
1144 /* Initialize identification LED */
1145 ret_val = mac->ops.id_led_init(hw);
1146 if (ret_val) {
1147 DEBUGOUT("Error initializing identification LED\n");
1148 /* This is not fatal and we should not stop init due to this */
1149 }
1150
1151 /* Disabling VLAN filtering */
1152 DEBUGOUT("Initializing the IEEE VLAN\n");
1153 mac->ops.clear_vfta(hw);
1154
1155 /* Setup the receive address */
1156 e1000_init_rx_addrs_generic(hw, rar_count);
1157 /* Zero out the Multicast HASH table */
1158 DEBUGOUT("Zeroing the MTA\n");
1159 for (i = 0; i < mac->mta_reg_count; i++)
1160 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
1161
1162 /* Zero out the Unicast HASH table */
1163 DEBUGOUT("Zeroing the UTA\n");
1164 for (i = 0; i < mac->uta_reg_count; i++)
1165 E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0);
1166
1167 /* Setup link and flow control */
1168 ret_val = mac->ops.setup_link(hw);
1169
1170 /*
1171 * Clear all of the statistics registers (clear on read). It is
1172 * important that we do this after we have tried to establish link
1173 * because the symbol error count will increment wildly if there
1174 * is no link.
1175 */
1176 e1000_clear_hw_cntrs_82575(hw);
1177
1178 return (ret_val);
1179 }
1180
1181 /*
1182 * e1000_setup_copper_link_82575 - Configure copper link settings
1183 * @hw: pointer to the HW structure
1184 *
1185 * Configures the link for auto-neg or forced speed and duplex. Then we check
1186 * for link, once link is established calls to configure collision distance
1187 * and flow control are called.
1188 */
1189 static s32
1190 e1000_setup_copper_link_82575(struct e1000_hw *hw)
1191 {
1192 u32 ctrl;
1193 s32 ret_val;
1194
1195 DEBUGFUNC("e1000_setup_copper_link_82575");
1196
1197 ctrl = E1000_READ_REG(hw, E1000_CTRL);
1198 ctrl |= E1000_CTRL_SLU;
1199 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1200 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
1201
1202 ret_val = e1000_setup_serdes_link_82575(hw);
1203 if (ret_val)
1204 goto out;
1205
1206 if (e1000_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
1207 /* allow time for SFP cage time to power up phy */
1208 msec_delay(300);
1209
1210 ret_val = hw->phy.ops.reset(hw);
1211 if (ret_val) {
1212 DEBUGOUT("Error resetting the PHY.\n");
1213 goto out;
1214 }
1215 }
1216 switch (hw->phy.type) {
1217 case e1000_phy_m88:
1218 ret_val = e1000_copper_link_setup_m88(hw);
1219 break;
1220 case e1000_phy_igp_3:
1221 ret_val = e1000_copper_link_setup_igp(hw);
1222 break;
1223 case e1000_phy_82580:
1224 ret_val = e1000_copper_link_setup_82577(hw);
1225 break;
1226 default:
1227 ret_val = -E1000_ERR_PHY;
1228 break;
1229 }
1230
1231 if (ret_val)
1232 goto out;
1233
1234 ret_val = e1000_setup_copper_link_generic(hw);
1235 out:
1236 return (ret_val);
1237 }
1238
1239 /*
1240 * e1000_setup_serdes_link_82575 - Setup link for serdes
1241 * @hw: pointer to the HW structure
1242 *
1243 * Configure the physical coding sub-layer (PCS) link. The PCS link is
1244 * used on copper connections where the serialized gigabit media independent
1245 * interface (sgmii), or serdes fiber is being used. Configures the link
1246 * for auto-negotiation or forces speed/duplex.
1247 */
1248 static s32
1249 e1000_setup_serdes_link_82575(struct e1000_hw *hw)
1250 {
1251 u32 ctrl_ext, ctrl_reg, reg;
1252 bool pcs_autoneg;
1253
1254 DEBUGFUNC("e1000_setup_serdes_link_82575");
1255
1256 if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1257 !e1000_sgmii_active_82575(hw))
1258 return (E1000_SUCCESS);
1259
1260 /*
1261 * On the 82575, SerDes loopback mode persists until it is
1262 * explicitly turned off or a power cycle is performed. A read to
1263 * the register does not indicate its status. Therefore, we ensure
1264 * loopback mode is disabled during initialization.
1265 */
1266 E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
1267
1268 /* power on the sfp cage if present */
1269 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
1270 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
1271 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
1272
1273 ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
1274 ctrl_reg |= E1000_CTRL_SLU;
1275
1276 if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) {
1277 /* set both sw defined pins */
1278 ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
1279
1280 /* Set switch control to serdes energy detect */
1281 reg = E1000_READ_REG(hw, E1000_CONNSW);
1282 reg |= E1000_CONNSW_ENRGSRC;
1283 E1000_WRITE_REG(hw, E1000_CONNSW, reg);
1284 }
1285
1286 reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
1287
1288 /* default pcs_autoneg to the same setting as mac autoneg */
1289 pcs_autoneg = hw->mac.autoneg;
1290
1291 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
1292 case E1000_CTRL_EXT_LINK_MODE_SGMII:
1293 /* sgmii mode lets the phy handle forcing speed/duplex */
1294 pcs_autoneg = true;
1295 /* autoneg time out should be disabled for SGMII mode */
1296 reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
1297 break;
1298 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
1299 /* disable PCS autoneg and support parallel detect only */
1300 pcs_autoneg = false;
1301 default:
1302 /*
1303 * non-SGMII modes only supports a speed of 1000/Full for the
1304 * link so it is best to just force the MAC and let the pcs
1305 * link either autoneg or be forced to 1000/Full
1306 */
1307 ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
1308 E1000_CTRL_FD | E1000_CTRL_FRCDPX;
1309
1310 /* set speed of 1000/Full if speed/duplex is forced */
1311 reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
1312 break;
1313 }
1314
1315 E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
1316
1317 /*
1318 * New SerDes mode allows for forcing speed or autonegotiating speed
1319 * at 1gb. Autoneg should be default set by most drivers. This is the
1320 * mode that will be compatible with older link partners and switches.
1321 * However, both are supported by the hardware and some drivers/tools.
1322 */
1323
1324 reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
1325 E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
1326
1327 /*
1328 * We force flow control to prevent the CTRL register values from being
1329 * overwritten by the autonegotiated flow control values
1330 */
1331 reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1332
1333 if (pcs_autoneg) {
1334 /* Set PCS register for autoneg */
1335 reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
1336 E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
1337 DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
1338 } else {
1339 /* Set PCS register for forced link */
1340 reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
1341 DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
1342 }
1343
1344 E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
1345
1346 if (!e1000_sgmii_active_82575(hw))
1347 (void) e1000_force_mac_fc_generic(hw);
1348
1349 return (E1000_SUCCESS);
1350 }
1351
1352 /*
1353 * e1000_valid_led_default_82575 - Verify a valid default LED config
1354 * @hw: pointer to the HW structure
1355 * @data: pointer to the NVM (EEPROM)
1356 *
1357 * Read the EEPROM for the current default LED configuration. If the
1358 * LED configuration is not valid, set to a valid LED configuration.
1359 */
1360 static s32
1361 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data)
1362 {
1363 s32 ret_val;
1364
1365 DEBUGFUNC("e1000_valid_led_default_82575");
1366
1367 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
1368 if (ret_val) {
1369 DEBUGOUT("NVM Read Error\n");
1370 goto out;
1371 }
1372
1373 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
1374 switch (hw->phy.media_type) {
1375 case e1000_media_type_internal_serdes:
1376 *data = ID_LED_DEFAULT_82575_SERDES;
1377 break;
1378 case e1000_media_type_copper:
1379 default:
1380 *data = ID_LED_DEFAULT;
1381 break;
1382 }
1383 }
1384 out:
1385 return (ret_val);
1386 }
1387
1388 /*
1389 * e1000_sgmii_active_82575 - Return sgmii state
1390 * @hw: pointer to the HW structure
1391 *
1392 * 82575 silicon has a serialized gigabit media independent interface (sgmii)
1393 * which can be enabled for use in the embedded applications. Simply
1394 * return the current state of the sgmii interface.
1395 */
1396 static bool
1397 e1000_sgmii_active_82575(struct e1000_hw *hw)
1398 {
1399 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
1400 return (dev_spec->sgmii_active);
1401 }
1402
1403 /*
1404 * e1000_reset_init_script_82575 - Inits HW defaults after reset
1405 * @hw: pointer to the HW structure
1406 *
1407 * Inits recommended HW defaults after a reset when there is no EEPROM
1408 * detected. This is only for the 82575.
1409 */
1410 static s32
1411 e1000_reset_init_script_82575(struct e1000_hw *hw)
1412 {
1413 DEBUGFUNC("e1000_reset_init_script_82575");
1414
1415 if (hw->mac.type == e1000_82575) {
1416 DEBUGOUT("Running reset init script for 82575\n");
1417 /* SerDes configuration via SERDESCTRL */
1418 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL,
1419 0x00, 0x0C);
1420 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL,
1421 0x01, 0x78);
1422 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL,
1423 0x1B, 0x23);
1424 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL,
1425 0x23, 0x15);
1426
1427 /* CCM configuration via CCMCTL register */
1428 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL,
1429 0x14, 0x00);
1430 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL,
1431 0x10, 0x00);
1432
1433 /* PCIe lanes configuration */
1434 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL,
1435 0x00, 0xEC);
1436 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL,
1437 0x61, 0xDF);
1438 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL,
1439 0x34, 0x05);
1440 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL,
1441 0x2F, 0x81);
1442
1443 /* PCIe PLL Configuration */
1444 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL,
1445 0x02, 0x47);
1446 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL,
1447 0x14, 0x00);
1448 (void) e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL,
1449 0x10, 0x00);
1450 }
1451
1452 return (E1000_SUCCESS);
1453 }
1454
1455 /*
1456 * e1000_read_mac_addr_82575 - Read device MAC address
1457 * @hw: pointer to the HW structure
1458 */
1459 static s32
1460 e1000_read_mac_addr_82575(struct e1000_hw *hw)
1461 {
1462 s32 ret_val = E1000_SUCCESS;
1463
1464 DEBUGFUNC("e1000_read_mac_addr_82575");
1465
1466 /*
1467 * If there's an alternate MAC address place it in RAR0
1468 * so that it will override the Si installed default perm
1469 * address.
1470 */
1471 ret_val = e1000_check_alt_mac_addr_generic(hw);
1472 if (ret_val)
1473 goto out;
1474
1475 ret_val = e1000_read_mac_addr_generic(hw);
1476
1477 out:
1478 return (ret_val);
1479 }
1480
1481 /*
1482 * e1000_power_down_phy_copper_82575 - Remove link during PHY power down
1483 * @hw: pointer to the HW structure
1484 *
1485 * In the case of a PHY power down to save power, or to turn off link during a
1486 * driver unload, or wake on lan is not enabled, remove the link.
1487 */
1488 static void
1489 e1000_power_down_phy_copper_82575(struct e1000_hw *hw)
1490 {
1491 struct e1000_phy_info *phy = &hw->phy;
1492 struct e1000_mac_info *mac = &hw->mac;
1493
1494 if (!(phy->ops.check_reset_block))
1495 return;
1496
1497 /* If the management interface is not enabled, then power down */
1498 if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw)))
1499 e1000_power_down_phy_copper(hw);
1500 }
1501
1502 /*
1503 * e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters
1504 * @hw: pointer to the HW structure
1505 *
1506 * Clears the hardware counters by reading the counter registers.
1507 */
1508 static void
1509 e1000_clear_hw_cntrs_82575(struct e1000_hw *hw)
1510 {
1511 DEBUGFUNC("e1000_clear_hw_cntrs_82575");
1512
1513 e1000_clear_hw_cntrs_base_generic(hw);
1514
1515 (void) E1000_READ_REG(hw, E1000_PRC64);
1516 (void) E1000_READ_REG(hw, E1000_PRC127);
1517 (void) E1000_READ_REG(hw, E1000_PRC255);
1518 (void) E1000_READ_REG(hw, E1000_PRC511);
1519 (void) E1000_READ_REG(hw, E1000_PRC1023);
1520 (void) E1000_READ_REG(hw, E1000_PRC1522);
1521 (void) E1000_READ_REG(hw, E1000_PTC64);
1522 (void) E1000_READ_REG(hw, E1000_PTC127);
1523 (void) E1000_READ_REG(hw, E1000_PTC255);
1524 (void) E1000_READ_REG(hw, E1000_PTC511);
1525 (void) E1000_READ_REG(hw, E1000_PTC1023);
1526 (void) E1000_READ_REG(hw, E1000_PTC1522);
1527
1528 (void) E1000_READ_REG(hw, E1000_ALGNERRC);
1529 (void) E1000_READ_REG(hw, E1000_RXERRC);
1530 (void) E1000_READ_REG(hw, E1000_TNCRS);
1531 (void) E1000_READ_REG(hw, E1000_CEXTERR);
1532 (void) E1000_READ_REG(hw, E1000_TSCTC);
1533 (void) E1000_READ_REG(hw, E1000_TSCTFC);
1534
1535 (void) E1000_READ_REG(hw, E1000_MGTPRC);
1536 (void) E1000_READ_REG(hw, E1000_MGTPDC);
1537 (void) E1000_READ_REG(hw, E1000_MGTPTC);
1538
1539 (void) E1000_READ_REG(hw, E1000_IAC);
1540 (void) E1000_READ_REG(hw, E1000_ICRXOC);
1541
1542 (void) E1000_READ_REG(hw, E1000_ICRXPTC);
1543 (void) E1000_READ_REG(hw, E1000_ICRXATC);
1544 (void) E1000_READ_REG(hw, E1000_ICTXPTC);
1545 (void) E1000_READ_REG(hw, E1000_ICTXATC);
1546 (void) E1000_READ_REG(hw, E1000_ICTXQEC);
1547 (void) E1000_READ_REG(hw, E1000_ICTXQMTC);
1548 (void) E1000_READ_REG(hw, E1000_ICRXDMTC);
1549
1550 (void) E1000_READ_REG(hw, E1000_CBTMPC);
1551 (void) E1000_READ_REG(hw, E1000_HTDPMC);
1552 (void) E1000_READ_REG(hw, E1000_CBRMPC);
1553 (void) E1000_READ_REG(hw, E1000_RPTHC);
1554 (void) E1000_READ_REG(hw, E1000_HGPTC);
1555 (void) E1000_READ_REG(hw, E1000_HTCBDPC);
1556 (void) E1000_READ_REG(hw, E1000_HGORCL);
1557 (void) E1000_READ_REG(hw, E1000_HGORCH);
1558 (void) E1000_READ_REG(hw, E1000_HGOTCL);
1559 (void) E1000_READ_REG(hw, E1000_HGOTCH);
1560 (void) E1000_READ_REG(hw, E1000_LENERRS);
1561
1562 /* This register should not be read in copper configurations */
1563 if ((hw->phy.media_type == e1000_media_type_internal_serdes) ||
1564 e1000_sgmii_active_82575(hw))
1565 (void) E1000_READ_REG(hw, E1000_SCVPC);
1566 }
1567
1568 /*
1569 * e1000_rx_fifo_flush_82575 - Clean rx fifo after RX enable
1570 * @hw: pointer to the HW structure
1571 *
1572 * After rx enable if managability is enabled then there is likely some
1573 * bad data at the start of the fifo and possibly in the DMA fifo. This
1574 * function clears the fifos and flushes any packets that came in as rx was
1575 * being enabled.
1576 */
1577 void
1578 e1000_rx_fifo_flush_82575(struct e1000_hw *hw)
1579 {
1580 u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
1581 int i, ms_wait;
1582
1583 DEBUGFUNC("e1000_rx_fifo_workaround_82575");
1584 if (hw->mac.type != e1000_82575 ||
1585 !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN))
1586 return;
1587
1588 /* Disable all RX queues */
1589 for (i = 0; i < 4; i++) {
1590 rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
1591 E1000_WRITE_REG(hw, E1000_RXDCTL(i),
1592 rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
1593 }
1594 /* Poll all queues to verify they have shut down */
1595 for (ms_wait = 0; ms_wait < 10; ms_wait++) {
1596 msec_delay(1);
1597 rx_enabled = 0;
1598 for (i = 0; i < 4; i++)
1599 rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i));
1600 if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
1601 break;
1602 }
1603
1604 if (ms_wait == 10)
1605 DEBUGOUT("Queue disable timed out after 10ms\n");
1606
1607 /*
1608 * Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
1609 * incoming packets are rejected. Set enable and wait 2ms so that
1610 * any packet that was coming in as RCTL.EN was set is flushed
1611 */
1612 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
1613 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
1614
1615 rlpml = E1000_READ_REG(hw, E1000_RLPML);
1616 E1000_WRITE_REG(hw, E1000_RLPML, 0);
1617
1618 rctl = E1000_READ_REG(hw, E1000_RCTL);
1619 temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
1620 temp_rctl |= E1000_RCTL_LPE;
1621
1622 E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl);
1623 E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN);
1624 E1000_WRITE_FLUSH(hw);
1625 msec_delay(2);
1626
1627 /*
1628 * Enable RX queues that were previously enabled and restore our
1629 * previous state
1630 */
1631 for (i = 0; i < 4; i++)
1632 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]);
1633 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1634 E1000_WRITE_FLUSH(hw);
1635
1636 E1000_WRITE_REG(hw, E1000_RLPML, rlpml);
1637 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
1638
1639 /* Flush receive errors generated by workaround */
1640 (void) E1000_READ_REG(hw, E1000_ROC);
1641 (void) E1000_READ_REG(hw, E1000_RNBC);
1642 (void) E1000_READ_REG(hw, E1000_MPC);
1643 }
1644
1645 /*
1646 * e1000_set_pcie_completion_timeout - set pci-e completion timeout
1647 * @hw: pointer to the HW structure
1648 *
1649 * The defaults for 82575 and 82576 should be in the range of 50us to 50ms,
1650 * however the hardware default for these parts is 500us to 1ms which is less
1651 * than the 10ms recommended by the pci-e spec. To address this we need to
1652 * increase the value to either 10ms to 200ms for capability version 1 config,
1653 * or 16ms to 55ms for version 2.
1654 */
1655 static s32
1656 e1000_set_pcie_completion_timeout(struct e1000_hw *hw)
1657 {
1658 u32 gcr = E1000_READ_REG(hw, E1000_GCR);
1659 s32 ret_val = E1000_SUCCESS;
1660 u16 pcie_devctl2;
1661
1662 /* only take action if timeout value is defaulted to 0 */
1663 if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
1664 goto out;
1665
1666 /*
1667 * if capababilities version is type 1 we can write the
1668 * timeout of 10ms to 200ms through the GCR register
1669 */
1670 if (!(gcr & E1000_GCR_CAP_VER2)) {
1671 gcr |= E1000_GCR_CMPL_TMOUT_10ms;
1672 goto out;
1673 }
1674
1675 /*
1676 * for version 2 capabilities we need to write the config space
1677 * directly in order to set the completion timeout value for
1678 * 16ms to 55ms
1679 */
1680 ret_val = e1000_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
1681 &pcie_devctl2);
1682 if (ret_val)
1683 goto out;
1684
1685 pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
1686
1687 ret_val = e1000_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
1688 &pcie_devctl2);
1689 out:
1690 /* disable completion timeout resend */
1691 gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
1692
1693 E1000_WRITE_REG(hw, E1000_GCR, gcr);
1694 return (ret_val);
1695 }
1696
1697 /*
1698 * e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback
1699 * @hw: pointer to the hardware struct
1700 * @enable: state to enter, either enabled or disabled
1701 *
1702 * enables/disables L2 switch loopback functionality.
1703 */
1704 void
1705 e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
1706 {
1707 u32 dtxswc = E1000_READ_REG(hw, E1000_DTXSWC);
1708
1709 if (enable)
1710 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1711 else
1712 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
1713
1714 E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc);
1715 }
1716
1717 /*
1718 * e1000_vmdq_set_replication_pf - enable or disable vmdq replication
1719 * @hw: pointer to the hardware struct
1720 * @enable: state to enter, either enabled or disabled
1721 *
1722 * enables/disables replication of packets across multiple pools.
1723 */
1724 void
1725 e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
1726 {
1727 u32 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
1728
1729 if (enable)
1730 vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
1731 else
1732 vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
1733
1734 E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
1735 }
1736
1737 /*
1738 * e1000_read_phy_reg_82580 - Read 82580 MDI control register
1739 * @hw: pointer to the HW structure
1740 * @offset: register offset to be read
1741 * @data: pointer to the read data
1742 *
1743 * Reads the MDI control register in the PHY at offset and stores the
1744 * information read to data.
1745 */
1746 static s32
1747 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
1748 {
1749 u32 mdicnfg = 0;
1750 s32 ret_val;
1751
1752 DEBUGFUNC("e1000_read_phy_reg_82580");
1753
1754 ret_val = hw->phy.ops.acquire(hw);
1755 if (ret_val)
1756 goto out;
1757
1758 /*
1759 * We config the phy address in MDICNFG register now. Same bits
1760 * as before. The values in MDIC can be written but will be
1761 * ignored. This allows us to call the old function after
1762 * configuring the PHY address in the new register
1763 */
1764 mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT);
1765 E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
1766
1767 ret_val = e1000_read_phy_reg_mdic(hw, offset, data);
1768
1769 hw->phy.ops.release(hw);
1770
1771 out:
1772 return (ret_val);
1773 }
1774
1775 /*
1776 * e1000_write_phy_reg_82580 - Write 82580 MDI control register
1777 * @hw: pointer to the HW structure
1778 * @offset: register offset to write to
1779 * @data: data to write to register at offset
1780 *
1781 * Writes data to MDI control register in the PHY at offset.
1782 */
1783 static s32
1784 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
1785 {
1786 u32 mdicnfg = 0;
1787 s32 ret_val;
1788
1789 DEBUGFUNC("e1000_write_phy_reg_82580");
1790
1791 ret_val = hw->phy.ops.acquire(hw);
1792 if (ret_val)
1793 goto out;
1794
1795 /*
1796 * We config the phy address in MDICNFG register now. Same bits
1797 * as before. The values in MDIC can be written but will be
1798 * ignored. This allows us to call the old function after
1799 * configuring the PHY address in the new register
1800 */
1801 mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT);
1802 E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
1803
1804 ret_val = e1000_write_phy_reg_mdic(hw, offset, data);
1805
1806 hw->phy.ops.release(hw);
1807
1808 out:
1809 return (ret_val);
1810 }
1811
1812 /*
1813 * e1000_reset_hw_82580 - Reset hardware
1814 * @hw: pointer to the HW structure
1815 *
1816 * This resets function or entire device (all ports, etc.)
1817 * to a known state.
1818 */
1819 static s32
1820 e1000_reset_hw_82580(struct e1000_hw *hw)
1821 {
1822 s32 ret_val = E1000_SUCCESS;
1823 /* BH SW mailbox bit in SW_FW_SYNC */
1824 u16 swmbsw_mask = E1000_SW_SYNCH_MB;
1825 u32 ctrl;
1826 bool global_device_reset = hw->dev_spec._82575.global_device_reset;
1827
1828 DEBUGFUNC("e1000_reset_hw_82580");
1829
1830 hw->dev_spec._82575.global_device_reset = false;
1831
1832 /* Get current control state. */
1833 ctrl = E1000_READ_REG(hw, E1000_CTRL);
1834
1835 /*
1836 * Prevent the PCI-E bus from sticking if there is no TLP connection
1837 * on the last TLP read/write transaction when MAC is reset.
1838 */
1839 ret_val = e1000_disable_pcie_master_generic(hw);
1840 if (ret_val)
1841 DEBUGOUT("PCI-E Master disable polling has failed.\n");
1842
1843 DEBUGOUT("Masking off all interrupts\n");
1844 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
1845 E1000_WRITE_REG(hw, E1000_RCTL, 0);
1846 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
1847 E1000_WRITE_FLUSH(hw);
1848
1849 msec_delay(10);
1850
1851 /* Determine whether or not a global dev reset is requested */
1852 if (global_device_reset &&
1853 e1000_acquire_swfw_sync_82575(hw, swmbsw_mask))
1854 global_device_reset = false;
1855
1856 if (global_device_reset &&
1857 !(E1000_READ_REG(hw, E1000_STATUS) & E1000_STAT_DEV_RST_SET))
1858 ctrl |= E1000_CTRL_DEV_RST;
1859 else
1860 ctrl |= E1000_CTRL_RST;
1861
1862 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
1863
1864 /* Add delay to insure DEV_RST has time to complete */
1865 if (global_device_reset)
1866 msec_delay(5);
1867
1868 ret_val = e1000_get_auto_rd_done_generic(hw);
1869 if (ret_val) {
1870 /*
1871 * When auto config read does not complete, do not
1872 * return with an error. This can happen in situations
1873 * where there is no eeprom and prevents getting link.
1874 */
1875 DEBUGOUT("Auto Read Done did not complete\n");
1876 }
1877
1878 /* If EEPROM is not present, run manual init scripts */
1879 if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0)
1880 (void) e1000_reset_init_script_82575(hw);
1881
1882 /* clear global device reset status bit */
1883 E1000_WRITE_REG(hw, E1000_STATUS, E1000_STAT_DEV_RST_SET);
1884
1885 /* Clear any pending interrupt events. */
1886 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
1887 (void) E1000_READ_REG(hw, E1000_ICR);
1888
1889 /* Install any alternate MAC address into RAR0 */
1890 ret_val = e1000_check_alt_mac_addr_generic(hw);
1891
1892 /* Release semaphore */
1893 if (global_device_reset)
1894 e1000_release_swfw_sync_82575(hw, swmbsw_mask);
1895
1896 return (ret_val);
1897 }
1898
1899 /*
1900 * e1000_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size
1901 * @data: data received by reading RXPBS register
1902 *
1903 * The 82580 uses a table based approach for packet buffer allocation sizes.
1904 * This function converts the retrieved value into the correct table value
1905 * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7
1906 * 0x0 36 72 144 1 2 4 8 16
1907 * 0x8 35 70 140 rsv rsv rsv rsv rsv
1908 */
1909 u16
1910 e1000_rxpbs_adjust_82580(u32 data)
1911 {
1912 u16 ret_val = 0;
1913
1914 if (data < E1000_82580_RXPBS_TABLE_SIZE)
1915 ret_val = e1000_82580_rxpbs_table[data];
1916
1917 return (ret_val);
1918 }
1919
1920 /*
1921 * Due to a hw errata, if the host tries to configure the VFTA register
1922 * while performing queries from the BMC or DMA, then the VFTA in some
1923 * cases won't be written.
1924 */
1925
1926 /*
1927 * e1000_clear_vfta_i350 - Clear VLAN filter table
1928 * @hw: pointer to the HW structure
1929 *
1930 * Clears the register array which contains the VLAN filter table by
1931 * setting all the values to 0.
1932 */
1933 void
1934 e1000_clear_vfta_i350(struct e1000_hw *hw)
1935 {
1936 u32 offset;
1937 int i;
1938
1939 DEBUGFUNC("e1000_clear_vfta_350");
1940
1941 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
1942 for (i = 0; i < 10; i++)
1943 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
1944
1945 E1000_WRITE_FLUSH(hw);
1946 }
1947 }
1948
1949 /*
1950 * e1000_write_vfta_i350 - Write value to VLAN filter table
1951 * @hw: pointer to the HW structure
1952 * @offset: register offset in VLAN filter table
1953 * @value: register value written to VLAN filter table
1954 *
1955 * Writes value at the given offset in the register array which stores
1956 * the VLAN filter table.
1957 */
1958 void
1959 e1000_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
1960 {
1961 int i;
1962
1963 DEBUGFUNC("e1000_write_vfta_350");
1964
1965 for (i = 0; i < 10; i++)
1966 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
1967
1968 E1000_WRITE_FLUSH(hw);
1969 }
1970
1971 /*
1972 * e1000_validate_nvm_checksum_with_offset - Validate EEPROM
1973 * checksum
1974 * @hw: pointer to the HW structure
1975 * @offset: offset in words of the checksum protected region
1976 *
1977 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
1978 * and then verifies that the sum of the EEPROM is equal to 0xBABA.
1979 */
1980 s32
1981 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
1982 {
1983 s32 ret_val = E1000_SUCCESS;
1984 u16 checksum = 0;
1985 u16 i, nvm_data;
1986
1987 DEBUGFUNC("e1000_validate_nvm_checksum_with_offset");
1988
1989 for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
1990 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
1991 if (ret_val) {
1992 DEBUGOUT("NVM Read Error\n");
1993 goto out;
1994 }
1995 checksum += nvm_data;
1996 }
1997
1998 if (checksum != (u16) NVM_SUM) {
1999 DEBUGOUT("NVM Checksum Invalid\n");
2000 ret_val = -E1000_ERR_NVM;
2001 goto out;
2002 }
2003
2004 out:
2005 return (ret_val);
2006 }
2007
2008 /*
2009 * e1000_update_nvm_checksum_with_offset - Update EEPROM
2010 * checksum
2011 * @hw: pointer to the HW structure
2012 * @offset: offset in words of the checksum protected region
2013 *
2014 * Updates the EEPROM checksum by reading/adding each word of the EEPROM
2015 * up to the checksum. Then calculates the EEPROM checksum and writes the
2016 * value to the EEPROM.
2017 */
2018 s32
2019 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
2020 {
2021 s32 ret_val;
2022 u16 checksum = 0;
2023 u16 i, nvm_data;
2024
2025 DEBUGFUNC("e1000_update_nvm_checksum_with_offset");
2026
2027 for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
2028 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
2029 if (ret_val) {
2030 DEBUGOUT("NVM Read Error while updating checksum.\n");
2031 goto out;
2032 }
2033 checksum += nvm_data;
2034 }
2035 checksum = (u16) NVM_SUM - checksum;
2036 ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
2037 &checksum);
2038 if (ret_val)
2039 DEBUGOUT("NVM Write Error while updating checksum.\n");
2040
2041 out:
2042 return (ret_val);
2043 }
2044
2045 /*
2046 * e1000_validate_nvm_checksum_i350 - Validate EEPROM checksum
2047 * @hw: pointer to the HW structure
2048 *
2049 * Calculates the EEPROM section checksum by reading/adding each word of
2050 * the EEPROM and then verifies that the sum of the EEPROM is
2051 * equal to 0xBABA.
2052 */
2053 static s32
2054 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw)
2055 {
2056 s32 ret_val = E1000_SUCCESS;
2057 u16 j;
2058 u16 nvm_offset;
2059
2060 DEBUGFUNC("e1000_validate_nvm_checksum_i350");
2061
2062 for (j = 0; j < 4; j++) {
2063 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2064 ret_val = e1000_validate_nvm_checksum_with_offset(hw,
2065 nvm_offset);
2066 if (ret_val != E1000_SUCCESS)
2067 goto out;
2068 }
2069
2070 out:
2071 return (ret_val);
2072 }
2073
2074 /*
2075 * e1000_update_nvm_checksum_i350 - Update EEPROM checksum
2076 * @hw: pointer to the HW structure
2077 *
2078 * Updates the EEPROM section checksums for all 4 ports by reading/adding
2079 * each word of the EEPROM up to the checksum. Then calculates the EEPROM
2080 * checksum and writes the value to the EEPROM.
2081 */
2082 static s32
2083 e1000_update_nvm_checksum_i350(struct e1000_hw *hw)
2084 {
2085 s32 ret_val = E1000_SUCCESS;
2086 u16 j;
2087 u16 nvm_offset;
2088
2089 DEBUGFUNC("e1000_update_nvm_checksum_i350");
2090
2091 for (j = 0; j < 4; j++) {
2092 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2093 ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset);
2094 if (ret_val != E1000_SUCCESS)
2095 goto out;
2096 }
2097
2098 out:
2099 return (ret_val);
2100 }
2101
2102
2103
2104 /*
2105 * e1000_set_eee_i350 - Enable/disable EEE support
2106 * @hw: pointer to the HW structure
2107 *
2108 * Enable/disable EEE based on setting in dev_spec structure.
2109 *
2110 */
2111 s32
2112 e1000_set_eee_i350(struct e1000_hw *hw)
2113 {
2114
2115 s32 ret_val = E1000_SUCCESS;
2116 u32 ipcnfg, eeer;
2117
2118 DEBUGFUNC("e1000_set_eee_i350");
2119
2120 if ((hw->mac.type < e1000_i350) ||
2121 (hw->phy.media_type != e1000_media_type_copper))
2122 goto out;
2123 ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG);
2124 eeer = E1000_READ_REG(hw, E1000_EEER);
2125
2126 /* enable or disable per user setting */
2127 if (!(hw->dev_spec._82575.eee_disable)) {
2128 ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
2129 eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
2130 E1000_EEER_LPI_FC);
2131
2132 } else {
2133 ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
2134 eeer &= ~(E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
2135 E1000_EEER_LPI_FC);
2136 }
2137 E1000_WRITE_REG(hw, E1000_IPCNFG, ipcnfg);
2138 E1000_WRITE_REG(hw, E1000_EEER, eeer);
2139 ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG);
2140 eeer = E1000_READ_REG(hw, E1000_EEER);
2141 out:
2142
2143 return (ret_val);
2144 }