Print this page
MFV: illumos-gate@5bb0bdfe588c5df0f63ff8ac292cd608a5f4492a
9950 Need support for Intel I219 v6-v9
Reviewed by: Jason King <jason.king@joyent.com>
Reviewed by: Garrett D'Amore <garrett@damore.org>
Approved by: Garrett D'Amore <garrett@damore.org>
Author: Robert Mustacchi <rm@joyent.com>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/e1000api/e1000_ich8lan.c
+++ new/usr/src/uts/common/io/e1000api/e1000_ich8lan.c
1 1 /******************************************************************************
2 2
3 3 Copyright (c) 2001-2015, Intel Corporation
4 4 All rights reserved.
5 5
6 6 Redistribution and use in source and binary forms, with or without
7 7 modification, are permitted provided that the following conditions are met:
8 8
9 9 1. Redistributions of source code must retain the above copyright notice,
10 10 this list of conditions and the following disclaimer.
11 11
12 12 2. Redistributions in binary form must reproduce the above copyright
13 13 notice, this list of conditions and the following disclaimer in the
14 14 documentation and/or other materials provided with the distribution.
15 15
16 16 3. Neither the name of the Intel Corporation nor the names of its
17 17 contributors may be used to endorse or promote products derived from
18 18 this software without specific prior written permission.
19 19
20 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 30 POSSIBILITY OF SUCH DAMAGE.
31 31
32 32 ******************************************************************************/
33 33 /*$FreeBSD$*/
34 34
35 35 /* 82562G 10/100 Network Connection
36 36 * 82562G-2 10/100 Network Connection
37 37 * 82562GT 10/100 Network Connection
38 38 * 82562GT-2 10/100 Network Connection
39 39 * 82562V 10/100 Network Connection
40 40 * 82562V-2 10/100 Network Connection
41 41 * 82566DC-2 Gigabit Network Connection
42 42 * 82566DC Gigabit Network Connection
43 43 * 82566DM-2 Gigabit Network Connection
44 44 * 82566DM Gigabit Network Connection
45 45 * 82566MC Gigabit Network Connection
46 46 * 82566MM Gigabit Network Connection
47 47 * 82567LM Gigabit Network Connection
48 48 * 82567LF Gigabit Network Connection
49 49 * 82567V Gigabit Network Connection
50 50 * 82567LM-2 Gigabit Network Connection
51 51 * 82567LF-2 Gigabit Network Connection
52 52 * 82567V-2 Gigabit Network Connection
53 53 * 82567LF-3 Gigabit Network Connection
54 54 * 82567LM-3 Gigabit Network Connection
55 55 * 82567LM-4 Gigabit Network Connection
56 56 * 82577LM Gigabit Network Connection
57 57 * 82577LC Gigabit Network Connection
58 58 * 82578DM Gigabit Network Connection
59 59 * 82578DC Gigabit Network Connection
60 60 * 82579LM Gigabit Network Connection
61 61 * 82579V Gigabit Network Connection
62 62 * Ethernet Connection I217-LM
63 63 * Ethernet Connection I217-V
64 64 * Ethernet Connection I218-V
65 65 * Ethernet Connection I218-LM
66 66 * Ethernet Connection (2) I218-LM
67 67 * Ethernet Connection (2) I218-V
68 68 * Ethernet Connection (3) I218-LM
69 69 * Ethernet Connection (3) I218-V
70 70 */
71 71
72 72 #include "e1000_api.h"
73 73
74 74 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
75 75 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
76 76 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw);
77 77 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw);
78 78 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
79 79 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
80 80 static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
81 81 static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
82 82 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw);
83 83 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
84 84 u8 *mc_addr_list,
85 85 u32 mc_addr_count);
86 86 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
87 87 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
88 88 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
89 89 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
90 90 bool active);
91 91 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
92 92 bool active);
93 93 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
94 94 u16 words, u16 *data);
95 95 static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
96 96 u16 *data);
97 97 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
98 98 u16 words, u16 *data);
99 99 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
100 100 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
101 101 static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw);
102 102 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
103 103 u16 *data);
104 104 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
105 105 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
106 106 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw);
107 107 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw);
108 108 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
109 109 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
110 110 static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
111 111 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
112 112 u16 *speed, u16 *duplex);
113 113 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
114 114 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
115 115 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
116 116 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
117 117 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
118 118 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
119 119 static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
120 120 static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
121 121 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
122 122 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
123 123 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
124 124 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
125 125 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw,
126 126 u32 offset, u8 *data);
127 127 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
128 128 u8 size, u16 *data);
129 129 static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
130 130 u32 *data);
131 131 static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw,
132 132 u32 offset, u32 *data);
133 133 static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw,
134 134 u32 offset, u32 data);
135 135 static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
136 136 u32 offset, u32 dword);
137 137 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
138 138 u32 offset, u16 *data);
139 139 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
140 140 u32 offset, u8 byte);
141 141 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
142 142 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
143 143 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw);
144 144 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
145 145 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
146 146 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
147 147 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr);
148 148
149 149 /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
150 150 /* Offset 04h HSFSTS */
151 151 union ich8_hws_flash_status {
152 152 struct ich8_hsfsts {
153 153 u16 flcdone:1; /* bit 0 Flash Cycle Done */
154 154 u16 flcerr:1; /* bit 1 Flash Cycle Error */
155 155 u16 dael:1; /* bit 2 Direct Access error Log */
156 156 u16 berasesz:2; /* bit 4:3 Sector Erase Size */
157 157 u16 flcinprog:1; /* bit 5 flash cycle in Progress */
158 158 u16 reserved1:2; /* bit 13:6 Reserved */
159 159 u16 reserved2:6; /* bit 13:6 Reserved */
160 160 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
161 161 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
162 162 } hsf_status;
163 163 u16 regval;
164 164 };
165 165
166 166 /* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
167 167 /* Offset 06h FLCTL */
168 168 union ich8_hws_flash_ctrl {
169 169 struct ich8_hsflctl {
170 170 u16 flcgo:1; /* 0 Flash Cycle Go */
171 171 u16 flcycle:2; /* 2:1 Flash Cycle */
172 172 u16 reserved:5; /* 7:3 Reserved */
173 173 u16 fldbcount:2; /* 9:8 Flash Data Byte Count */
174 174 u16 flockdn:6; /* 15:10 Reserved */
175 175 } hsf_ctrl;
176 176 u16 regval;
177 177 };
178 178
179 179 /* ICH Flash Region Access Permissions */
180 180 union ich8_hws_flash_regacc {
181 181 struct ich8_flracc {
182 182 u32 grra:8; /* 0:7 GbE region Read Access */
183 183 u32 grwa:8; /* 8:15 GbE region Write Access */
184 184 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
185 185 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
186 186 } hsf_flregacc;
187 187 u16 regval;
188 188 };
189 189
190 190 /**
191 191 * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
192 192 * @hw: pointer to the HW structure
193 193 *
194 194 * Test access to the PHY registers by reading the PHY ID registers. If
195 195 * the PHY ID is already known (e.g. resume path) compare it with known ID,
196 196 * otherwise assume the read PHY ID is correct if it is valid.
197 197 *
198 198 * Assumes the sw/fw/hw semaphore is already acquired.
199 199 **/
200 200 static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
201 201 {
202 202 u16 phy_reg = 0;
203 203 u32 phy_id = 0;
204 204 s32 ret_val = 0;
205 205 u16 retry_count;
206 206 u32 mac_reg = 0;
207 207
208 208 for (retry_count = 0; retry_count < 2; retry_count++) {
209 209 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_reg);
210 210 if (ret_val || (phy_reg == 0xFFFF))
211 211 continue;
212 212 phy_id = (u32)(phy_reg << 16);
213 213
214 214 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_reg);
215 215 if (ret_val || (phy_reg == 0xFFFF)) {
216 216 phy_id = 0;
217 217 continue;
218 218 }
219 219 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
220 220 break;
221 221 }
222 222
223 223 if (hw->phy.id) {
224 224 if (hw->phy.id == phy_id)
225 225 goto out;
226 226 } else if (phy_id) {
227 227 hw->phy.id = phy_id;
228 228 hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
229 229 goto out;
230 230 }
231 231
232 232 /* In case the PHY needs to be in mdio slow mode,
233 233 * set slow mode and try to get the PHY id again.
234 234 */
235 235 if (hw->mac.type < e1000_pch_lpt) {
|
↓ open down ↓ |
235 lines elided |
↑ open up ↑ |
236 236 hw->phy.ops.release(hw);
237 237 ret_val = e1000_set_mdio_slow_mode_hv(hw);
238 238 if (!ret_val)
239 239 ret_val = e1000_get_phy_id(hw);
240 240 hw->phy.ops.acquire(hw);
241 241 }
242 242
243 243 if (ret_val)
244 244 return FALSE;
245 245 out:
246 - if ((hw->mac.type == e1000_pch_lpt) ||
247 - (hw->mac.type == e1000_pch_spt)) {
246 + if (hw->mac.type >= e1000_pch_lpt) {
248 247 /* Only unforce SMBus if ME is not active */
249 248 if (!(E1000_READ_REG(hw, E1000_FWSM) &
250 249 E1000_ICH_FWSM_FW_VALID)) {
251 250 /* Unforce SMBus mode in PHY */
252 251 hw->phy.ops.read_reg_locked(hw, CV_SMB_CTRL, &phy_reg);
253 252 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
254 253 hw->phy.ops.write_reg_locked(hw, CV_SMB_CTRL, phy_reg);
255 254
256 255 /* Unforce SMBus mode in MAC */
257 256 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
258 257 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
259 258 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
260 259 }
261 260 }
262 261
263 262 return TRUE;
264 263 }
265 264
266 265 /**
267 266 * e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
268 267 * @hw: pointer to the HW structure
269 268 *
270 269 * Toggling the LANPHYPC pin value fully power-cycles the PHY and is
271 270 * used to reset the PHY to a quiescent state when necessary.
272 271 **/
273 272 static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
274 273 {
275 274 u32 mac_reg;
276 275
277 276 DEBUGFUNC("e1000_toggle_lanphypc_pch_lpt");
278 277
279 278 /* Set Phy Config Counter to 50msec */
280 279 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
281 280 mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
282 281 mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
283 282 E1000_WRITE_REG(hw, E1000_FEXTNVM3, mac_reg);
284 283
285 284 /* Toggle LANPHYPC Value bit */
286 285 mac_reg = E1000_READ_REG(hw, E1000_CTRL);
287 286 mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
288 287 mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
289 288 E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
290 289 E1000_WRITE_FLUSH(hw);
291 290 msec_delay(1);
292 291 mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
293 292 E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
294 293 E1000_WRITE_FLUSH(hw);
295 294
296 295 if (hw->mac.type < e1000_pch_lpt) {
297 296 msec_delay(50);
298 297 } else {
299 298 u16 count = 20;
300 299
301 300 do {
302 301 msec_delay(5);
303 302 } while (!(E1000_READ_REG(hw, E1000_CTRL_EXT) &
304 303 E1000_CTRL_EXT_LPCD) && count--);
305 304
306 305 msec_delay(30);
307 306 }
308 307 }
309 308
310 309 /**
311 310 * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
312 311 * @hw: pointer to the HW structure
313 312 *
314 313 * Workarounds/flow necessary for PHY initialization during driver load
315 314 * and resume paths.
316 315 **/
317 316 static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
318 317 {
319 318 u32 mac_reg, fwsm = E1000_READ_REG(hw, E1000_FWSM);
320 319 s32 ret_val;
321 320
322 321 DEBUGFUNC("e1000_init_phy_workarounds_pchlan");
323 322
324 323 /* Gate automatic PHY configuration by hardware on managed and
325 324 * non-managed 82579 and newer adapters.
326 325 */
327 326 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
328 327
329 328 /* It is not possible to be certain of the current state of ULP
330 329 * so forcibly disable it.
331 330 */
332 331 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
333 332 e1000_disable_ulp_lpt_lp(hw, TRUE);
334 333
335 334 ret_val = hw->phy.ops.acquire(hw);
336 335 if (ret_val) {
337 336 DEBUGOUT("Failed to initialize PHY flow\n");
|
↓ open down ↓ |
80 lines elided |
↑ open up ↑ |
338 337 goto out;
339 338 }
340 339
341 340 /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
342 341 * inaccessible and resetting the PHY is not blocked, toggle the
343 342 * LANPHYPC Value bit to force the interconnect to PCIe mode.
344 343 */
345 344 switch (hw->mac.type) {
346 345 case e1000_pch_lpt:
347 346 case e1000_pch_spt:
347 + case e1000_pch_cnp:
348 348 if (e1000_phy_is_accessible_pchlan(hw))
349 349 break;
350 350
351 351 /* Before toggling LANPHYPC, see if PHY is accessible by
352 352 * forcing MAC to SMBus mode first.
353 353 */
354 354 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
355 355 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
356 356 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
357 357
358 358 /* Wait 50 milliseconds for MAC to finish any retries
359 359 * that it might be trying to perform from previous
360 360 * attempts to acknowledge any phy read requests.
361 361 */
362 362 msec_delay(50);
363 363
364 364 /* fall-through */
365 365 case e1000_pch2lan:
366 366 if (e1000_phy_is_accessible_pchlan(hw))
367 367 break;
368 368
369 369 /* fall-through */
370 370 case e1000_pchlan:
371 371 if ((hw->mac.type == e1000_pchlan) &&
372 372 (fwsm & E1000_ICH_FWSM_FW_VALID))
373 373 break;
374 374
375 375 if (hw->phy.ops.check_reset_block(hw)) {
376 376 DEBUGOUT("Required LANPHYPC toggle blocked by ME\n");
377 377 ret_val = -E1000_ERR_PHY;
378 378 break;
379 379 }
380 380
381 381 /* Toggle LANPHYPC Value bit */
382 382 e1000_toggle_lanphypc_pch_lpt(hw);
383 383 if (hw->mac.type >= e1000_pch_lpt) {
384 384 if (e1000_phy_is_accessible_pchlan(hw))
385 385 break;
386 386
387 387 /* Toggling LANPHYPC brings the PHY out of SMBus mode
388 388 * so ensure that the MAC is also out of SMBus mode
389 389 */
390 390 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
391 391 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
392 392 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
393 393
394 394 if (e1000_phy_is_accessible_pchlan(hw))
395 395 break;
396 396
397 397 ret_val = -E1000_ERR_PHY;
398 398 }
399 399 break;
400 400 default:
401 401 break;
402 402 }
403 403
404 404 hw->phy.ops.release(hw);
405 405 if (!ret_val) {
406 406
407 407 /* Check to see if able to reset PHY. Print error if not */
408 408 if (hw->phy.ops.check_reset_block(hw)) {
409 409 ERROR_REPORT("Reset blocked by ME\n");
410 410 goto out;
411 411 }
412 412
413 413 /* Reset the PHY before any access to it. Doing so, ensures
414 414 * that the PHY is in a known good state before we read/write
415 415 * PHY registers. The generic reset is sufficient here,
416 416 * because we haven't determined the PHY type yet.
417 417 */
418 418 ret_val = e1000_phy_hw_reset_generic(hw);
419 419 if (ret_val)
420 420 goto out;
421 421
422 422 /* On a successful reset, possibly need to wait for the PHY
423 423 * to quiesce to an accessible state before returning control
424 424 * to the calling function. If the PHY does not quiesce, then
425 425 * return E1000E_BLK_PHY_RESET, as this is the condition that
426 426 * the PHY is in.
427 427 */
428 428 ret_val = hw->phy.ops.check_reset_block(hw);
429 429 if (ret_val)
430 430 ERROR_REPORT("ME blocked access to PHY after reset\n");
431 431 }
432 432
433 433 out:
434 434 /* Ungate automatic PHY configuration on non-managed 82579 */
435 435 if ((hw->mac.type == e1000_pch2lan) &&
436 436 !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
437 437 msec_delay(10);
438 438 e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
439 439 }
440 440
441 441 return ret_val;
442 442 }
443 443
444 444 /**
445 445 * e1000_init_phy_params_pchlan - Initialize PHY function pointers
446 446 * @hw: pointer to the HW structure
447 447 *
448 448 * Initialize family-specific PHY parameters and function pointers.
449 449 **/
450 450 static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
451 451 {
452 452 struct e1000_phy_info *phy = &hw->phy;
453 453 s32 ret_val;
454 454
455 455 DEBUGFUNC("e1000_init_phy_params_pchlan");
456 456
457 457 phy->addr = 1;
458 458 phy->reset_delay_us = 100;
459 459
460 460 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
461 461 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
462 462 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
463 463 phy->ops.set_page = e1000_set_page_igp;
464 464 phy->ops.read_reg = e1000_read_phy_reg_hv;
465 465 phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
466 466 phy->ops.read_reg_page = e1000_read_phy_reg_page_hv;
467 467 phy->ops.release = e1000_release_swflag_ich8lan;
468 468 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
469 469 phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
470 470 phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
471 471 phy->ops.write_reg = e1000_write_phy_reg_hv;
472 472 phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
473 473 phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
474 474 phy->ops.power_up = e1000_power_up_phy_copper;
475 475 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
476 476 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
477 477
478 478 phy->id = e1000_phy_unknown;
479 479
480 480 ret_val = e1000_init_phy_workarounds_pchlan(hw);
481 481 if (ret_val)
482 482 return ret_val;
483 483
484 484 if (phy->id == e1000_phy_unknown)
485 485 switch (hw->mac.type) {
|
↓ open down ↓ |
128 lines elided |
↑ open up ↑ |
486 486 default:
487 487 ret_val = e1000_get_phy_id(hw);
488 488 if (ret_val)
489 489 return ret_val;
490 490 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
491 491 break;
492 492 /* fall-through */
493 493 case e1000_pch2lan:
494 494 case e1000_pch_lpt:
495 495 case e1000_pch_spt:
496 + case e1000_pch_cnp:
496 497 /* In case the PHY needs to be in mdio slow mode,
497 498 * set slow mode and try to get the PHY id again.
498 499 */
499 500 ret_val = e1000_set_mdio_slow_mode_hv(hw);
500 501 if (ret_val)
501 502 return ret_val;
502 503 ret_val = e1000_get_phy_id(hw);
503 504 if (ret_val)
504 505 return ret_val;
505 506 break;
506 507 }
507 508 phy->type = e1000_get_phy_type_from_id(phy->id);
508 509
509 510 switch (phy->type) {
510 511 case e1000_phy_82577:
511 512 case e1000_phy_82579:
512 513 case e1000_phy_i217:
513 514 phy->ops.check_polarity = e1000_check_polarity_82577;
514 515 phy->ops.force_speed_duplex =
515 516 e1000_phy_force_speed_duplex_82577;
516 517 phy->ops.get_cable_length = e1000_get_cable_length_82577;
517 518 phy->ops.get_info = e1000_get_phy_info_82577;
518 519 phy->ops.commit = e1000_phy_sw_reset_generic;
519 520 break;
520 521 case e1000_phy_82578:
521 522 phy->ops.check_polarity = e1000_check_polarity_m88;
522 523 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
523 524 phy->ops.get_cable_length = e1000_get_cable_length_m88;
524 525 phy->ops.get_info = e1000_get_phy_info_m88;
525 526 break;
526 527 default:
527 528 ret_val = -E1000_ERR_PHY;
528 529 break;
529 530 }
530 531
531 532 return ret_val;
532 533 }
533 534
534 535 /**
535 536 * e1000_init_phy_params_ich8lan - Initialize PHY function pointers
536 537 * @hw: pointer to the HW structure
537 538 *
538 539 * Initialize family-specific PHY parameters and function pointers.
539 540 **/
540 541 static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
541 542 {
542 543 struct e1000_phy_info *phy = &hw->phy;
543 544 s32 ret_val;
544 545 u16 i = 0;
545 546
546 547 DEBUGFUNC("e1000_init_phy_params_ich8lan");
547 548
548 549 phy->addr = 1;
549 550 phy->reset_delay_us = 100;
550 551
551 552 phy->ops.acquire = e1000_acquire_swflag_ich8lan;
552 553 phy->ops.check_reset_block = e1000_check_reset_block_ich8lan;
553 554 phy->ops.get_cable_length = e1000_get_cable_length_igp_2;
554 555 phy->ops.get_cfg_done = e1000_get_cfg_done_ich8lan;
555 556 phy->ops.read_reg = e1000_read_phy_reg_igp;
556 557 phy->ops.release = e1000_release_swflag_ich8lan;
557 558 phy->ops.reset = e1000_phy_hw_reset_ich8lan;
558 559 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan;
559 560 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan;
560 561 phy->ops.write_reg = e1000_write_phy_reg_igp;
561 562 phy->ops.power_up = e1000_power_up_phy_copper;
562 563 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
563 564
564 565 /* We may need to do this twice - once for IGP and if that fails,
565 566 * we'll set BM func pointers and try again
566 567 */
567 568 ret_val = e1000_determine_phy_address(hw);
568 569 if (ret_val) {
569 570 phy->ops.write_reg = e1000_write_phy_reg_bm;
570 571 phy->ops.read_reg = e1000_read_phy_reg_bm;
571 572 ret_val = e1000_determine_phy_address(hw);
572 573 if (ret_val) {
573 574 DEBUGOUT("Cannot determine PHY addr. Erroring out\n");
574 575 return ret_val;
575 576 }
576 577 }
577 578
578 579 phy->id = 0;
579 580 while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
580 581 (i++ < 100)) {
581 582 msec_delay(1);
582 583 ret_val = e1000_get_phy_id(hw);
583 584 if (ret_val)
584 585 return ret_val;
585 586 }
586 587
587 588 /* Verify phy id */
588 589 switch (phy->id) {
589 590 case IGP03E1000_E_PHY_ID:
590 591 phy->type = e1000_phy_igp_3;
591 592 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
592 593 phy->ops.read_reg_locked = e1000_read_phy_reg_igp_locked;
593 594 phy->ops.write_reg_locked = e1000_write_phy_reg_igp_locked;
594 595 phy->ops.get_info = e1000_get_phy_info_igp;
595 596 phy->ops.check_polarity = e1000_check_polarity_igp;
596 597 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
597 598 break;
598 599 case IFE_E_PHY_ID:
599 600 case IFE_PLUS_E_PHY_ID:
600 601 case IFE_C_E_PHY_ID:
601 602 phy->type = e1000_phy_ife;
602 603 phy->autoneg_mask = E1000_ALL_NOT_GIG;
603 604 phy->ops.get_info = e1000_get_phy_info_ife;
604 605 phy->ops.check_polarity = e1000_check_polarity_ife;
605 606 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
606 607 break;
607 608 case BME1000_E_PHY_ID:
608 609 phy->type = e1000_phy_bm;
609 610 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
610 611 phy->ops.read_reg = e1000_read_phy_reg_bm;
611 612 phy->ops.write_reg = e1000_write_phy_reg_bm;
612 613 phy->ops.commit = e1000_phy_sw_reset_generic;
613 614 phy->ops.get_info = e1000_get_phy_info_m88;
614 615 phy->ops.check_polarity = e1000_check_polarity_m88;
615 616 phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
616 617 break;
617 618 default:
618 619 return -E1000_ERR_PHY;
619 620 break;
620 621 }
621 622
622 623 return E1000_SUCCESS;
623 624 }
624 625
625 626 /**
626 627 * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
627 628 * @hw: pointer to the HW structure
628 629 *
629 630 * Initialize family-specific NVM parameters and function
630 631 * pointers.
631 632 **/
632 633 static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
633 634 {
|
↓ open down ↓ |
128 lines elided |
↑ open up ↑ |
634 635 struct e1000_nvm_info *nvm = &hw->nvm;
635 636 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
636 637 u32 gfpreg, sector_base_addr, sector_end_addr;
637 638 u16 i;
638 639 u32 nvm_size;
639 640
640 641 DEBUGFUNC("e1000_init_nvm_params_ich8lan");
641 642
642 643 nvm->type = e1000_nvm_flash_sw;
643 644
644 - if (hw->mac.type == e1000_pch_spt) {
645 + if (hw->mac.type >= e1000_pch_spt) {
645 646 /* in SPT, gfpreg doesn't exist. NVM size is taken from the
646 647 * STRAP register. This is because in SPT the GbE Flash region
647 648 * is no longer accessed through the flash registers. Instead,
648 649 * the mechanism has changed, and the Flash region access
649 650 * registers are now implemented in GbE memory space.
650 651 */
651 652 nvm->flash_base_addr = 0;
652 653 nvm_size =
653 654 (((E1000_READ_REG(hw, E1000_STRAP) >> 1) & 0x1F) + 1)
654 655 * NVM_SIZE_MULTIPLIER;
655 656 nvm->flash_bank_size = nvm_size / 2;
656 657 /* Adjust to word count */
657 658 nvm->flash_bank_size /= sizeof(u16);
658 659 /* Set the base address for flash register access */
659 660 hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
660 661 } else {
661 662 /* Can't read flash registers if register set isn't mapped. */
662 663 if (!hw->flash_address) {
663 664 DEBUGOUT("ERROR: Flash registers not mapped\n");
664 665 return -E1000_ERR_CONFIG;
665 666 }
666 667
667 668 gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
668 669
669 670 /* sector_X_addr is a "sector"-aligned address (4096 bytes)
670 671 * Add 1 to sector_end_addr since this sector is included in
671 672 * the overall size.
672 673 */
673 674 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
674 675 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
675 676
676 677 /* flash_base_addr is byte-aligned */
677 678 nvm->flash_base_addr = sector_base_addr
678 679 << FLASH_SECTOR_ADDR_SHIFT;
679 680
680 681 /* find total size of the NVM, then cut in half since the total
681 682 * size represents two separate NVM banks.
682 683 */
683 684 nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
684 685 << FLASH_SECTOR_ADDR_SHIFT);
685 686 nvm->flash_bank_size /= 2;
686 687 /* Adjust to word count */
687 688 nvm->flash_bank_size /= sizeof(u16);
688 689 }
689 690
690 691 nvm->word_size = E1000_SHADOW_RAM_WORDS;
691 692
692 693 /* Clear shadow ram */
693 694 for (i = 0; i < nvm->word_size; i++) {
|
↓ open down ↓ |
39 lines elided |
↑ open up ↑ |
694 695 dev_spec->shadow_ram[i].modified = FALSE;
695 696 dev_spec->shadow_ram[i].value = 0xFFFF;
696 697 }
697 698
698 699 E1000_MUTEX_INIT(&dev_spec->nvm_mutex);
699 700 E1000_MUTEX_INIT(&dev_spec->swflag_mutex);
700 701
701 702 /* Function Pointers */
702 703 nvm->ops.acquire = e1000_acquire_nvm_ich8lan;
703 704 nvm->ops.release = e1000_release_nvm_ich8lan;
704 - if (hw->mac.type == e1000_pch_spt) {
705 + if (hw->mac.type >= e1000_pch_spt) {
705 706 nvm->ops.read = e1000_read_nvm_spt;
706 707 nvm->ops.update = e1000_update_nvm_checksum_spt;
707 708 } else {
708 709 nvm->ops.read = e1000_read_nvm_ich8lan;
709 710 nvm->ops.update = e1000_update_nvm_checksum_ich8lan;
710 711 }
711 712 nvm->ops.valid_led_default = e1000_valid_led_default_ich8lan;
712 713 nvm->ops.validate = e1000_validate_nvm_checksum_ich8lan;
713 714 nvm->ops.write = e1000_write_nvm_ich8lan;
714 715
715 716 return E1000_SUCCESS;
716 717 }
717 718
718 719 /**
719 720 * e1000_init_mac_params_ich8lan - Initialize MAC function pointers
720 721 * @hw: pointer to the HW structure
721 722 *
722 723 * Initialize family-specific MAC parameters and function
723 724 * pointers.
724 725 **/
725 726 static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
726 727 {
727 728 struct e1000_mac_info *mac = &hw->mac;
728 729
729 730 DEBUGFUNC("e1000_init_mac_params_ich8lan");
730 731
731 732 /* Set media type function pointer */
732 733 hw->phy.media_type = e1000_media_type_copper;
733 734
734 735 /* Set mta register count */
735 736 mac->mta_reg_count = 32;
736 737 /* Set rar entry count */
737 738 mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
738 739 if (mac->type == e1000_ich8lan)
739 740 mac->rar_entry_count--;
740 741 /* Set if part includes ASF firmware */
741 742 mac->asf_firmware_present = TRUE;
742 743 /* FWSM register */
743 744 mac->has_fwsm = TRUE;
744 745 /* ARC subsystem not supported */
745 746 mac->arc_subsystem_valid = FALSE;
746 747 /* Adaptive IFS supported */
747 748 mac->adaptive_ifs = TRUE;
748 749
749 750 /* Function pointers */
750 751
751 752 /* bus type/speed/width */
752 753 mac->ops.get_bus_info = e1000_get_bus_info_ich8lan;
753 754 /* function id */
754 755 mac->ops.set_lan_id = e1000_set_lan_id_single_port;
755 756 /* reset */
756 757 mac->ops.reset_hw = e1000_reset_hw_ich8lan;
757 758 /* hw initialization */
758 759 mac->ops.init_hw = e1000_init_hw_ich8lan;
759 760 /* link setup */
760 761 mac->ops.setup_link = e1000_setup_link_ich8lan;
761 762 /* physical interface setup */
762 763 mac->ops.setup_physical_interface = e1000_setup_copper_link_ich8lan;
763 764 /* check for link */
764 765 mac->ops.check_for_link = e1000_check_for_copper_link_ich8lan;
765 766 /* link info */
766 767 mac->ops.get_link_up_info = e1000_get_link_up_info_ich8lan;
767 768 /* multicast address update */
768 769 mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
769 770 /* clear hardware counters */
770 771 mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
771 772
772 773 /* LED and other operations */
773 774 switch (mac->type) {
774 775 case e1000_ich8lan:
775 776 case e1000_ich9lan:
776 777 case e1000_ich10lan:
777 778 /* check management mode */
778 779 mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
779 780 /* ID LED init */
780 781 mac->ops.id_led_init = e1000_id_led_init_generic;
781 782 /* blink LED */
782 783 mac->ops.blink_led = e1000_blink_led_generic;
783 784 /* setup LED */
784 785 mac->ops.setup_led = e1000_setup_led_generic;
785 786 /* cleanup LED */
786 787 mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
|
↓ open down ↓ |
72 lines elided |
↑ open up ↑ |
787 788 /* turn on/off LED */
788 789 mac->ops.led_on = e1000_led_on_ich8lan;
789 790 mac->ops.led_off = e1000_led_off_ich8lan;
790 791 break;
791 792 case e1000_pch2lan:
792 793 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
793 794 mac->ops.rar_set = e1000_rar_set_pch2lan;
794 795 /* fall-through */
795 796 case e1000_pch_lpt:
796 797 case e1000_pch_spt:
798 + case e1000_pch_cnp:
797 799 /* multicast address update for pch2 */
798 800 mac->ops.update_mc_addr_list =
799 801 e1000_update_mc_addr_list_pch2lan;
800 802 /* fall-through */
801 803 case e1000_pchlan:
802 804 /* check management mode */
803 805 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
804 806 /* ID LED init */
805 807 mac->ops.id_led_init = e1000_id_led_init_pchlan;
806 808 /* setup LED */
807 809 mac->ops.setup_led = e1000_setup_led_pchlan;
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
808 810 /* cleanup LED */
809 811 mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
810 812 /* turn on/off LED */
811 813 mac->ops.led_on = e1000_led_on_pchlan;
812 814 mac->ops.led_off = e1000_led_off_pchlan;
813 815 break;
814 816 default:
815 817 break;
816 818 }
817 819
818 - if ((mac->type == e1000_pch_lpt) ||
819 - (mac->type == e1000_pch_spt)) {
820 + if (mac->type >= e1000_pch_lpt) {
820 821 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
821 822 mac->ops.rar_set = e1000_rar_set_pch_lpt;
822 823 mac->ops.setup_physical_interface = e1000_setup_copper_link_pch_lpt;
823 824 mac->ops.set_obff_timer = e1000_set_obff_timer_pch_lpt;
824 825 }
825 826
826 827 /* Enable PCS Lock-loss workaround for ICH8 */
827 828 if (mac->type == e1000_ich8lan)
828 829 e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
829 830
830 831 return E1000_SUCCESS;
831 832 }
832 833
833 834 /**
834 835 * __e1000_access_emi_reg_locked - Read/write EMI register
835 836 * @hw: pointer to the HW structure
836 837 * @addr: EMI address to program
837 838 * @data: pointer to value to read/write from/to the EMI address
838 839 * @read: boolean flag to indicate read or write
839 840 *
840 841 * This helper function assumes the SW/FW/HW Semaphore is already acquired.
841 842 **/
842 843 static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
843 844 u16 *data, bool read)
844 845 {
845 846 s32 ret_val;
846 847
847 848 DEBUGFUNC("__e1000_access_emi_reg_locked");
848 849
849 850 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, address);
850 851 if (ret_val)
851 852 return ret_val;
852 853
853 854 if (read)
854 855 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_EMI_DATA,
855 856 data);
856 857 else
857 858 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
858 859 *data);
859 860
860 861 return ret_val;
861 862 }
862 863
863 864 /**
864 865 * e1000_read_emi_reg_locked - Read Extended Management Interface register
865 866 * @hw: pointer to the HW structure
866 867 * @addr: EMI address to program
867 868 * @data: value to be read from the EMI address
868 869 *
869 870 * Assumes the SW/FW/HW Semaphore is already acquired.
870 871 **/
871 872 s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
872 873 {
873 874 DEBUGFUNC("e1000_read_emi_reg_locked");
874 875
875 876 return __e1000_access_emi_reg_locked(hw, addr, data, TRUE);
876 877 }
877 878
878 879 /**
879 880 * e1000_write_emi_reg_locked - Write Extended Management Interface register
880 881 * @hw: pointer to the HW structure
881 882 * @addr: EMI address to program
882 883 * @data: value to be written to the EMI address
883 884 *
884 885 * Assumes the SW/FW/HW Semaphore is already acquired.
885 886 **/
886 887 s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
887 888 {
888 889 DEBUGFUNC("e1000_read_emi_reg_locked");
889 890
890 891 return __e1000_access_emi_reg_locked(hw, addr, &data, FALSE);
891 892 }
892 893
893 894 /**
894 895 * e1000_set_eee_pchlan - Enable/disable EEE support
895 896 * @hw: pointer to the HW structure
896 897 *
897 898 * Enable/disable EEE based on setting in dev_spec structure, the duplex of
898 899 * the link and the EEE capabilities of the link partner. The LPI Control
899 900 * register bits will remain set only if/when link is up.
900 901 *
901 902 * EEE LPI must not be asserted earlier than one second after link is up.
902 903 * On 82579, EEE LPI should not be enabled until such time otherwise there
903 904 * can be link issues with some switches. Other devices can have EEE LPI
904 905 * enabled immediately upon link up since they have a timer in hardware which
905 906 * prevents LPI from being asserted too early.
906 907 **/
907 908 s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
908 909 {
909 910 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
910 911 s32 ret_val;
911 912 u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
912 913
913 914 DEBUGFUNC("e1000_set_eee_pchlan");
914 915
915 916 switch (hw->phy.type) {
916 917 case e1000_phy_82579:
917 918 lpa = I82579_EEE_LP_ABILITY;
918 919 pcs_status = I82579_EEE_PCS_STATUS;
919 920 adv_addr = I82579_EEE_ADVERTISEMENT;
920 921 break;
921 922 case e1000_phy_i217:
922 923 lpa = I217_EEE_LP_ABILITY;
923 924 pcs_status = I217_EEE_PCS_STATUS;
924 925 adv_addr = I217_EEE_ADVERTISEMENT;
925 926 break;
926 927 default:
927 928 return E1000_SUCCESS;
928 929 }
929 930
930 931 ret_val = hw->phy.ops.acquire(hw);
931 932 if (ret_val)
932 933 return ret_val;
933 934
934 935 ret_val = hw->phy.ops.read_reg_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
935 936 if (ret_val)
936 937 goto release;
937 938
938 939 /* Clear bits that enable EEE in various speeds */
939 940 lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
940 941
941 942 /* Enable EEE if not disabled by user */
942 943 if (!dev_spec->eee_disable) {
943 944 /* Save off link partner's EEE ability */
944 945 ret_val = e1000_read_emi_reg_locked(hw, lpa,
945 946 &dev_spec->eee_lp_ability);
946 947 if (ret_val)
947 948 goto release;
948 949
949 950 /* Read EEE advertisement */
950 951 ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
951 952 if (ret_val)
952 953 goto release;
953 954
954 955 /* Enable EEE only for speeds in which the link partner is
955 956 * EEE capable and for which we advertise EEE.
956 957 */
957 958 if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
958 959 lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
959 960
960 961 if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
961 962 hw->phy.ops.read_reg_locked(hw, PHY_LP_ABILITY, &data);
962 963 if (data & NWAY_LPAR_100TX_FD_CAPS)
963 964 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
964 965 else
965 966 /* EEE is not supported in 100Half, so ignore
966 967 * partner's EEE in 100 ability if full-duplex
967 968 * is not advertised.
968 969 */
969 970 dev_spec->eee_lp_ability &=
970 971 ~I82579_EEE_100_SUPPORTED;
971 972 }
972 973 }
973 974
974 975 if (hw->phy.type == e1000_phy_82579) {
975 976 ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
976 977 &data);
977 978 if (ret_val)
978 979 goto release;
979 980
980 981 data &= ~I82579_LPI_100_PLL_SHUT;
981 982 ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
982 983 data);
983 984 }
984 985
985 986 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
986 987 ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
987 988 if (ret_val)
988 989 goto release;
989 990
990 991 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
991 992 release:
992 993 hw->phy.ops.release(hw);
993 994
994 995 return ret_val;
995 996 }
996 997
997 998 /**
998 999 * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
999 1000 * @hw: pointer to the HW structure
1000 1001 * @link: link up bool flag
1001 1002 *
1002 1003 * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
1003 1004 * preventing further DMA write requests. Workaround the issue by disabling
1004 1005 * the de-assertion of the clock request when in 1Gpbs mode.
1005 1006 * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
1006 1007 * speeds in order to avoid Tx hangs.
1007 1008 **/
1008 1009 static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
1009 1010 {
1010 1011 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1011 1012 u32 status = E1000_READ_REG(hw, E1000_STATUS);
1012 1013 s32 ret_val = E1000_SUCCESS;
1013 1014 u16 reg;
1014 1015
1015 1016 if (link && (status & E1000_STATUS_SPEED_1000)) {
1016 1017 ret_val = hw->phy.ops.acquire(hw);
1017 1018 if (ret_val)
1018 1019 return ret_val;
1019 1020
1020 1021 ret_val =
1021 1022 e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
1022 1023 ®);
1023 1024 if (ret_val)
1024 1025 goto release;
1025 1026
1026 1027 ret_val =
1027 1028 e1000_write_kmrn_reg_locked(hw,
1028 1029 E1000_KMRNCTRLSTA_K1_CONFIG,
1029 1030 reg &
1030 1031 ~E1000_KMRNCTRLSTA_K1_ENABLE);
1031 1032 if (ret_val)
1032 1033 goto release;
1033 1034
1034 1035 usec_delay(10);
1035 1036
1036 1037 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
1037 1038 fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
1038 1039
1039 1040 ret_val =
1040 1041 e1000_write_kmrn_reg_locked(hw,
1041 1042 E1000_KMRNCTRLSTA_K1_CONFIG,
1042 1043 reg);
1043 1044 release:
1044 1045 hw->phy.ops.release(hw);
1045 1046 } else {
1046 1047 /* clear FEXTNVM6 bit 8 on link down or 10/100 */
1047 1048 fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
1048 1049
1049 1050 if ((hw->phy.revision > 5) || !link ||
1050 1051 ((status & E1000_STATUS_SPEED_100) &&
1051 1052 (status & E1000_STATUS_FD)))
1052 1053 goto update_fextnvm6;
1053 1054
1054 1055 ret_val = hw->phy.ops.read_reg(hw, I217_INBAND_CTRL, ®);
1055 1056 if (ret_val)
1056 1057 return ret_val;
1057 1058
1058 1059 /* Clear link status transmit timeout */
1059 1060 reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
1060 1061
1061 1062 if (status & E1000_STATUS_SPEED_100) {
1062 1063 /* Set inband Tx timeout to 5x10us for 100Half */
1063 1064 reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1064 1065
1065 1066 /* Do not extend the K1 entry latency for 100Half */
1066 1067 fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1067 1068 } else {
1068 1069 /* Set inband Tx timeout to 50x10us for 10Full/Half */
1069 1070 reg |= 50 <<
1070 1071 I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
1071 1072
1072 1073 /* Extend the K1 entry latency for 10 Mbps */
1073 1074 fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
1074 1075 }
1075 1076
1076 1077 ret_val = hw->phy.ops.write_reg(hw, I217_INBAND_CTRL, reg);
1077 1078 if (ret_val)
1078 1079 return ret_val;
1079 1080
1080 1081 update_fextnvm6:
1081 1082 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1082 1083 }
1083 1084
1084 1085 return ret_val;
1085 1086 }
1086 1087
1087 1088 static u64 e1000_ltr2ns(u16 ltr)
1088 1089 {
1089 1090 u32 value, scale;
1090 1091
1091 1092 /* Determine the latency in nsec based on the LTR value & scale */
1092 1093 value = ltr & E1000_LTRV_VALUE_MASK;
1093 1094 scale = (ltr & E1000_LTRV_SCALE_MASK) >> E1000_LTRV_SCALE_SHIFT;
1094 1095
1095 1096 return value * (1 << (scale * E1000_LTRV_SCALE_FACTOR));
1096 1097 }
1097 1098
1098 1099 /**
1099 1100 * e1000_platform_pm_pch_lpt - Set platform power management values
1100 1101 * @hw: pointer to the HW structure
1101 1102 * @link: bool indicating link status
1102 1103 *
1103 1104 * Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like"
1104 1105 * GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed
1105 1106 * when link is up (which must not exceed the maximum latency supported
1106 1107 * by the platform), otherwise specify there is no LTR requirement.
1107 1108 * Unlike TRUE-PCIe devices which set the LTR maximum snoop/no-snoop
1108 1109 * latencies in the LTR Extended Capability Structure in the PCIe Extended
1109 1110 * Capability register set, on this device LTR is set by writing the
1110 1111 * equivalent snoop/no-snoop latencies in the LTRV register in the MAC and
1111 1112 * set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB)
1112 1113 * message to the PMC.
1113 1114 *
1114 1115 * Use the LTR value to calculate the Optimized Buffer Flush/Fill (OBFF)
1115 1116 * high-water mark.
1116 1117 **/
1117 1118 static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
1118 1119 {
1119 1120 u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
1120 1121 link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
1121 1122 u16 lat_enc = 0; /* latency encoded */
1122 1123 s32 obff_hwm = 0;
1123 1124
1124 1125 DEBUGFUNC("e1000_platform_pm_pch_lpt");
1125 1126
1126 1127 if (link) {
1127 1128 u16 speed, duplex, scale = 0;
1128 1129 u16 max_snoop, max_nosnoop;
1129 1130 u16 max_ltr_enc; /* max LTR latency encoded */
1130 1131 s64 lat_ns;
1131 1132 s64 value;
1132 1133 u32 rxa;
1133 1134
1134 1135 if (!hw->mac.max_frame_size) {
1135 1136 DEBUGOUT("max_frame_size not set.\n");
1136 1137 return -E1000_ERR_CONFIG;
1137 1138 }
1138 1139
1139 1140 hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
1140 1141 if (!speed) {
1141 1142 DEBUGOUT("Speed not set.\n");
1142 1143 return -E1000_ERR_CONFIG;
1143 1144 }
1144 1145
1145 1146 /* Rx Packet Buffer Allocation size (KB) */
1146 1147 rxa = E1000_READ_REG(hw, E1000_PBA) & E1000_PBA_RXA_MASK;
1147 1148
1148 1149 /* Determine the maximum latency tolerated by the device.
1149 1150 *
1150 1151 * Per the PCIe spec, the tolerated latencies are encoded as
1151 1152 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
1152 1153 * a 10-bit value (0-1023) to provide a range from 1 ns to
1153 1154 * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns,
1154 1155 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
1155 1156 */
1156 1157 lat_ns = ((s64)rxa * 1024 -
1157 1158 (2 * (s64)hw->mac.max_frame_size)) * 8 * 1000;
1158 1159 if (lat_ns < 0)
1159 1160 lat_ns = 0;
1160 1161 else
1161 1162 lat_ns /= speed;
1162 1163 value = lat_ns;
1163 1164
1164 1165 while (value > E1000_LTRV_VALUE_MASK) {
1165 1166 scale++;
1166 1167 value = E1000_DIVIDE_ROUND_UP(value, (1 << 5));
1167 1168 }
1168 1169 if (scale > E1000_LTRV_SCALE_MAX) {
1169 1170 DEBUGOUT1("Invalid LTR latency scale %d\n", scale);
1170 1171 return -E1000_ERR_CONFIG;
1171 1172 }
1172 1173 lat_enc = (u16)((scale << E1000_LTRV_SCALE_SHIFT) | value);
1173 1174
1174 1175 /* Determine the maximum latency tolerated by the platform */
1175 1176 e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT, &max_snoop);
1176 1177 e1000_read_pci_cfg(hw, E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
1177 1178 max_ltr_enc = E1000_MAX(max_snoop, max_nosnoop);
1178 1179
1179 1180 if (lat_enc > max_ltr_enc) {
1180 1181 lat_enc = max_ltr_enc;
1181 1182 lat_ns = e1000_ltr2ns(max_ltr_enc);
1182 1183 }
1183 1184
1184 1185 if (lat_ns) {
1185 1186 lat_ns *= speed * 1000;
1186 1187 lat_ns /= 8;
1187 1188 lat_ns /= 1000000000;
1188 1189 obff_hwm = (s32)(rxa - lat_ns);
1189 1190 }
1190 1191 if ((obff_hwm < 0) || (obff_hwm > E1000_SVT_OFF_HWM_MASK)) {
1191 1192 DEBUGOUT1("Invalid high water mark %d\n", obff_hwm);
1192 1193 return -E1000_ERR_CONFIG;
1193 1194 }
1194 1195 }
1195 1196
1196 1197 /* Set Snoop and No-Snoop latencies the same */
1197 1198 reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT);
1198 1199 E1000_WRITE_REG(hw, E1000_LTRV, reg);
1199 1200
1200 1201 /* Set OBFF high water mark */
1201 1202 reg = E1000_READ_REG(hw, E1000_SVT) & ~E1000_SVT_OFF_HWM_MASK;
1202 1203 reg |= obff_hwm;
1203 1204 E1000_WRITE_REG(hw, E1000_SVT, reg);
1204 1205
1205 1206 /* Enable OBFF */
1206 1207 reg = E1000_READ_REG(hw, E1000_SVCR);
1207 1208 reg |= E1000_SVCR_OFF_EN;
1208 1209 /* Always unblock interrupts to the CPU even when the system is
1209 1210 * in OBFF mode. This ensures that small round-robin traffic
1210 1211 * (like ping) does not get dropped or experience long latency.
1211 1212 */
1212 1213 reg |= E1000_SVCR_OFF_MASKINT;
1213 1214 E1000_WRITE_REG(hw, E1000_SVCR, reg);
1214 1215
1215 1216 return E1000_SUCCESS;
1216 1217 }
1217 1218
1218 1219 /**
1219 1220 * e1000_set_obff_timer_pch_lpt - Update Optimized Buffer Flush/Fill timer
1220 1221 * @hw: pointer to the HW structure
1221 1222 * @itr: interrupt throttling rate
1222 1223 *
1223 1224 * Configure OBFF with the updated interrupt rate.
1224 1225 **/
1225 1226 static s32 e1000_set_obff_timer_pch_lpt(struct e1000_hw *hw, u32 itr)
1226 1227 {
1227 1228 u32 svcr;
1228 1229 s32 timer;
1229 1230
1230 1231 DEBUGFUNC("e1000_set_obff_timer_pch_lpt");
1231 1232
1232 1233 /* Convert ITR value into microseconds for OBFF timer */
1233 1234 timer = itr & E1000_ITR_MASK;
1234 1235 timer = (timer * E1000_ITR_MULT) / 1000;
1235 1236
1236 1237 if ((timer < 0) || (timer > E1000_ITR_MASK)) {
1237 1238 DEBUGOUT1("Invalid OBFF timer %d\n", timer);
1238 1239 return -E1000_ERR_CONFIG;
1239 1240 }
1240 1241
1241 1242 svcr = E1000_READ_REG(hw, E1000_SVCR);
1242 1243 svcr &= ~E1000_SVCR_OFF_TIMER_MASK;
1243 1244 svcr |= timer << E1000_SVCR_OFF_TIMER_SHIFT;
1244 1245 E1000_WRITE_REG(hw, E1000_SVCR, svcr);
1245 1246
1246 1247 return E1000_SUCCESS;
1247 1248 }
1248 1249
1249 1250 /**
1250 1251 * e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1251 1252 * @hw: pointer to the HW structure
1252 1253 * @to_sx: boolean indicating a system power state transition to Sx
1253 1254 *
1254 1255 * When link is down, configure ULP mode to significantly reduce the power
1255 1256 * to the PHY. If on a Manageability Engine (ME) enabled system, tell the
1256 1257 * ME firmware to start the ULP configuration. If not on an ME enabled
1257 1258 * system, configure the ULP mode by software.
1258 1259 */
1259 1260 s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1260 1261 {
1261 1262 u32 mac_reg;
1262 1263 s32 ret_val = E1000_SUCCESS;
1263 1264 u16 phy_reg;
1264 1265 u16 oem_reg = 0;
1265 1266
1266 1267 if ((hw->mac.type < e1000_pch_lpt) ||
1267 1268 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1268 1269 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1269 1270 (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1270 1271 (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1271 1272 (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1272 1273 return 0;
1273 1274
1274 1275 if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1275 1276 /* Request ME configure ULP mode in the PHY */
1276 1277 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1277 1278 mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1278 1279 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1279 1280
1280 1281 goto out;
1281 1282 }
1282 1283
1283 1284 if (!to_sx) {
1284 1285 int i = 0;
1285 1286
1286 1287 /* Poll up to 5 seconds for Cable Disconnected indication */
1287 1288 while (!(E1000_READ_REG(hw, E1000_FEXT) &
1288 1289 E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1289 1290 /* Bail if link is re-acquired */
1290 1291 if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)
1291 1292 return -E1000_ERR_PHY;
1292 1293
1293 1294 if (i++ == 100)
1294 1295 break;
1295 1296
1296 1297 msec_delay(50);
1297 1298 }
1298 1299 DEBUGOUT2("CABLE_DISCONNECTED %s set after %dmsec\n",
1299 1300 (E1000_READ_REG(hw, E1000_FEXT) &
1300 1301 E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not",
1301 1302 i * 50);
1302 1303 }
1303 1304
1304 1305 ret_val = hw->phy.ops.acquire(hw);
1305 1306 if (ret_val)
1306 1307 goto out;
1307 1308
1308 1309 /* Force SMBus mode in PHY */
1309 1310 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1310 1311 if (ret_val)
1311 1312 goto release;
1312 1313 phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1313 1314 e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1314 1315
1315 1316 /* Force SMBus mode in MAC */
1316 1317 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1317 1318 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1318 1319 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1319 1320
1320 1321 /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable
1321 1322 * LPLU and disable Gig speed when entering ULP
1322 1323 */
1323 1324 if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) {
1324 1325 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS,
1325 1326 &oem_reg);
1326 1327 if (ret_val)
1327 1328 goto release;
1328 1329
1329 1330 phy_reg = oem_reg;
1330 1331 phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS;
1331 1332
1332 1333 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1333 1334 phy_reg);
1334 1335
1335 1336 if (ret_val)
1336 1337 goto release;
1337 1338 }
1338 1339
1339 1340 /* Set Inband ULP Exit, Reset to SMBus mode and
1340 1341 * Disable SMBus Release on PERST# in PHY
1341 1342 */
1342 1343 ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1343 1344 if (ret_val)
1344 1345 goto release;
1345 1346 phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1346 1347 I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1347 1348 if (to_sx) {
1348 1349 if (E1000_READ_REG(hw, E1000_WUFC) & E1000_WUFC_LNKC)
1349 1350 phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1350 1351 else
1351 1352 phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1352 1353
1353 1354 phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1354 1355 phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT;
1355 1356 } else {
1356 1357 phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1357 1358 phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP;
1358 1359 phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST;
1359 1360 }
1360 1361 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1361 1362
1362 1363 /* Set Disable SMBus Release on PERST# in MAC */
1363 1364 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1364 1365 mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1365 1366 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1366 1367
1367 1368 /* Commit ULP changes in PHY by starting auto ULP configuration */
1368 1369 phy_reg |= I218_ULP_CONFIG1_START;
1369 1370 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1370 1371
1371 1372 if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) &&
1372 1373 to_sx && (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
1373 1374 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1374 1375 oem_reg);
1375 1376 if (ret_val)
1376 1377 goto release;
1377 1378 }
1378 1379
1379 1380 release:
1380 1381 hw->phy.ops.release(hw);
1381 1382 out:
1382 1383 if (ret_val)
1383 1384 DEBUGOUT1("Error in ULP enable flow: %d\n", ret_val);
1384 1385 else
1385 1386 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1386 1387
1387 1388 return ret_val;
1388 1389 }
1389 1390
1390 1391 /**
1391 1392 * e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1392 1393 * @hw: pointer to the HW structure
1393 1394 * @force: boolean indicating whether or not to force disabling ULP
1394 1395 *
1395 1396 * Un-configure ULP mode when link is up, the system is transitioned from
1396 1397 * Sx or the driver is unloaded. If on a Manageability Engine (ME) enabled
1397 1398 * system, poll for an indication from ME that ULP has been un-configured.
1398 1399 * If not on an ME enabled system, un-configure the ULP mode by software.
1399 1400 *
1400 1401 * During nominal operation, this function is called when link is acquired
1401 1402 * to disable ULP mode (force=FALSE); otherwise, for example when unloading
1402 1403 * the driver or during Sx->S0 transitions, this is called with force=TRUE
1403 1404 * to forcibly disable ULP.
1404 1405 */
1405 1406 s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1406 1407 {
1407 1408 s32 ret_val = E1000_SUCCESS;
1408 1409 u32 mac_reg;
1409 1410 u16 phy_reg;
1410 1411 int i = 0;
1411 1412
1412 1413 if ((hw->mac.type < e1000_pch_lpt) ||
1413 1414 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1414 1415 (hw->device_id == E1000_DEV_ID_PCH_LPT_I217_V) ||
1415 1416 (hw->device_id == E1000_DEV_ID_PCH_I218_LM2) ||
1416 1417 (hw->device_id == E1000_DEV_ID_PCH_I218_V2) ||
1417 1418 (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1418 1419 return 0;
1419 1420
1420 1421 if (E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID) {
1421 1422 if (force) {
1422 1423 /* Request ME un-configure ULP mode in the PHY */
1423 1424 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1424 1425 mac_reg &= ~E1000_H2ME_ULP;
1425 1426 mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1426 1427 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1427 1428 }
1428 1429
1429 1430 /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
1430 1431 while (E1000_READ_REG(hw, E1000_FWSM) &
1431 1432 E1000_FWSM_ULP_CFG_DONE) {
1432 1433 if (i++ == 30) {
1433 1434 ret_val = -E1000_ERR_PHY;
1434 1435 goto out;
1435 1436 }
1436 1437
1437 1438 msec_delay(10);
1438 1439 }
1439 1440 DEBUGOUT1("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1440 1441
1441 1442 if (force) {
1442 1443 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1443 1444 mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1444 1445 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1445 1446 } else {
1446 1447 /* Clear H2ME.ULP after ME ULP configuration */
1447 1448 mac_reg = E1000_READ_REG(hw, E1000_H2ME);
1448 1449 mac_reg &= ~E1000_H2ME_ULP;
1449 1450 E1000_WRITE_REG(hw, E1000_H2ME, mac_reg);
1450 1451 }
1451 1452
1452 1453 goto out;
1453 1454 }
1454 1455
1455 1456 ret_val = hw->phy.ops.acquire(hw);
1456 1457 if (ret_val)
1457 1458 goto out;
1458 1459
1459 1460 if (force)
1460 1461 /* Toggle LANPHYPC Value bit */
1461 1462 e1000_toggle_lanphypc_pch_lpt(hw);
1462 1463
1463 1464 /* Unforce SMBus mode in PHY */
1464 1465 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1465 1466 if (ret_val) {
1466 1467 /* The MAC might be in PCIe mode, so temporarily force to
1467 1468 * SMBus mode in order to access the PHY.
1468 1469 */
1469 1470 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1470 1471 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1471 1472 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1472 1473
1473 1474 msec_delay(50);
1474 1475
1475 1476 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1476 1477 &phy_reg);
1477 1478 if (ret_val)
1478 1479 goto release;
1479 1480 }
1480 1481 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1481 1482 e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1482 1483
1483 1484 /* Unforce SMBus mode in MAC */
1484 1485 mac_reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
1485 1486 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1486 1487 E1000_WRITE_REG(hw, E1000_CTRL_EXT, mac_reg);
1487 1488
1488 1489 /* When ULP mode was previously entered, K1 was disabled by the
1489 1490 * hardware. Re-Enable K1 in the PHY when exiting ULP.
1490 1491 */
1491 1492 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1492 1493 if (ret_val)
1493 1494 goto release;
1494 1495 phy_reg |= HV_PM_CTRL_K1_ENABLE;
1495 1496 e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1496 1497
1497 1498 /* Clear ULP enabled configuration */
1498 1499 ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1499 1500 if (ret_val)
1500 1501 goto release;
1501 1502 phy_reg &= ~(I218_ULP_CONFIG1_IND |
1502 1503 I218_ULP_CONFIG1_STICKY_ULP |
1503 1504 I218_ULP_CONFIG1_RESET_TO_SMBUS |
1504 1505 I218_ULP_CONFIG1_WOL_HOST |
1505 1506 I218_ULP_CONFIG1_INBAND_EXIT |
1506 1507 I218_ULP_CONFIG1_EN_ULP_LANPHYPC |
1507 1508 I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST |
1508 1509 I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1509 1510 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1510 1511
1511 1512 /* Commit ULP changes by starting auto ULP configuration */
1512 1513 phy_reg |= I218_ULP_CONFIG1_START;
1513 1514 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1514 1515
1515 1516 /* Clear Disable SMBus Release on PERST# in MAC */
1516 1517 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM7);
1517 1518 mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1518 1519 E1000_WRITE_REG(hw, E1000_FEXTNVM7, mac_reg);
1519 1520
1520 1521 release:
1521 1522 hw->phy.ops.release(hw);
1522 1523 if (force) {
1523 1524 hw->phy.ops.reset(hw);
1524 1525 msec_delay(50);
1525 1526 }
1526 1527 out:
1527 1528 if (ret_val)
1528 1529 DEBUGOUT1("Error in ULP disable flow: %d\n", ret_val);
1529 1530 else
1530 1531 hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1531 1532
1532 1533 return ret_val;
1533 1534 }
1534 1535
1535 1536 /**
1536 1537 * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1537 1538 * @hw: pointer to the HW structure
1538 1539 *
1539 1540 * Checks to see of the link status of the hardware has changed. If a
1540 1541 * change in link status has been detected, then we read the PHY registers
1541 1542 * to get the current speed/duplex if link exists.
1542 1543 **/
1543 1544 static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1544 1545 {
1545 1546 struct e1000_mac_info *mac = &hw->mac;
1546 1547 s32 ret_val, tipg_reg = 0;
1547 1548 u16 emi_addr, emi_val = 0;
1548 1549 bool link;
1549 1550 u16 phy_reg;
1550 1551
1551 1552 DEBUGFUNC("e1000_check_for_copper_link_ich8lan");
1552 1553
1553 1554 /* We only want to go out to the PHY registers to see if Auto-Neg
1554 1555 * has completed and/or if our link status has changed. The
1555 1556 * get_link_status flag is set upon receiving a Link Status
1556 1557 * Change or Rx Sequence Error interrupt.
1557 1558 */
1558 1559 if (!mac->get_link_status)
1559 1560 return E1000_SUCCESS;
1560 1561
1561 1562 /* First we want to see if the MII Status Register reports
1562 1563 * link. If so, then we want to get the current speed/duplex
1563 1564 * of the PHY.
1564 1565 */
1565 1566 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
1566 1567 if (ret_val)
1567 1568 return ret_val;
1568 1569
|
↓ open down ↓ |
739 lines elided |
↑ open up ↑ |
1569 1570 if (hw->mac.type == e1000_pchlan) {
1570 1571 ret_val = e1000_k1_gig_workaround_hv(hw, link);
1571 1572 if (ret_val)
1572 1573 return ret_val;
1573 1574 }
1574 1575
1575 1576 /* When connected at 10Mbps half-duplex, some parts are excessively
1576 1577 * aggressive resulting in many collisions. To avoid this, increase
1577 1578 * the IPG and reduce Rx latency in the PHY.
1578 1579 */
1579 - if (((hw->mac.type == e1000_pch2lan) ||
1580 - (hw->mac.type == e1000_pch_lpt) ||
1581 - (hw->mac.type == e1000_pch_spt)) && link) {
1580 + if ((hw->mac.type >= e1000_pch2lan) && link) {
1582 1581 u16 speed, duplex;
1583 1582
1584 1583 e1000_get_speed_and_duplex_copper_generic(hw, &speed, &duplex);
1585 1584 tipg_reg = E1000_READ_REG(hw, E1000_TIPG);
1586 1585 tipg_reg &= ~E1000_TIPG_IPGT_MASK;
1587 1586
1588 1587 if (duplex == HALF_DUPLEX && speed == SPEED_10) {
1589 1588 tipg_reg |= 0xFF;
1590 1589 /* Reduce Rx latency in analog PHY */
1591 1590 emi_val = 0;
1592 - } else if (hw->mac.type == e1000_pch_spt &&
1591 + } else if (hw->mac.type >= e1000_pch_spt &&
1593 1592 duplex == FULL_DUPLEX && speed != SPEED_1000) {
1594 1593 tipg_reg |= 0xC;
1595 1594 emi_val = 1;
1596 1595 } else {
1597 1596 /* Roll back the default values */
1598 1597 tipg_reg |= 0x08;
1599 1598 emi_val = 1;
1600 1599 }
1601 1600
1602 1601 E1000_WRITE_REG(hw, E1000_TIPG, tipg_reg);
1603 1602
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
1604 1603 ret_val = hw->phy.ops.acquire(hw);
1605 1604 if (ret_val)
1606 1605 return ret_val;
1607 1606
1608 1607 if (hw->mac.type == e1000_pch2lan)
1609 1608 emi_addr = I82579_RX_CONFIG;
1610 1609 else
1611 1610 emi_addr = I217_RX_CONFIG;
1612 1611 ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
1613 1612
1614 - if (hw->mac.type == e1000_pch_lpt ||
1615 - hw->mac.type == e1000_pch_spt) {
1613 + if (hw->mac.type >= e1000_pch_lpt) {
1616 1614 u16 phy_reg;
1617 1615
1618 1616 hw->phy.ops.read_reg_locked(hw, I217_PLL_CLOCK_GATE_REG,
1619 1617 &phy_reg);
1620 1618 phy_reg &= ~I217_PLL_CLOCK_GATE_MASK;
1621 1619 if (speed == SPEED_100 || speed == SPEED_10)
1622 1620 phy_reg |= 0x3E8;
1623 1621 else
1624 1622 phy_reg |= 0xFA;
1625 1623 hw->phy.ops.write_reg_locked(hw,
1626 1624 I217_PLL_CLOCK_GATE_REG,
1627 1625 phy_reg);
1628 1626
1629 1627 if (speed == SPEED_1000) {
1630 1628 hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL,
1631 1629 &phy_reg);
1632 1630
1633 1631 phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
|
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
1634 1632
1635 1633 hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL,
1636 1634 phy_reg);
1637 1635 }
1638 1636 }
1639 1637 hw->phy.ops.release(hw);
1640 1638
1641 1639 if (ret_val)
1642 1640 return ret_val;
1643 1641
1644 - if (hw->mac.type == e1000_pch_spt) {
1642 + if (hw->mac.type >= e1000_pch_spt) {
1645 1643 u16 data;
1646 1644 u16 ptr_gap;
1647 1645
1648 1646 if (speed == SPEED_1000) {
1649 1647 ret_val = hw->phy.ops.acquire(hw);
1650 1648 if (ret_val)
1651 1649 return ret_val;
1652 1650
1653 1651 ret_val = hw->phy.ops.read_reg_locked(hw,
1654 1652 PHY_REG(776, 20),
1655 1653 &data);
1656 1654 if (ret_val) {
1657 1655 hw->phy.ops.release(hw);
1658 1656 return ret_val;
1659 1657 }
1660 1658
1661 1659 ptr_gap = (data & (0x3FF << 2)) >> 2;
1662 1660 if (ptr_gap < 0x18) {
1663 1661 data &= ~(0x3FF << 2);
1664 1662 data |= (0x18 << 2);
1665 1663 ret_val =
1666 1664 hw->phy.ops.write_reg_locked(hw,
1667 1665 PHY_REG(776, 20), data);
1668 1666 }
1669 1667 hw->phy.ops.release(hw);
1670 1668 if (ret_val)
1671 1669 return ret_val;
1672 1670 } else {
1673 1671 ret_val = hw->phy.ops.acquire(hw);
1674 1672 if (ret_val)
1675 1673 return ret_val;
1676 1674
1677 1675 ret_val = hw->phy.ops.write_reg_locked(hw,
1678 1676 PHY_REG(776, 20),
1679 1677 0xC023);
1680 1678 hw->phy.ops.release(hw);
1681 1679 if (ret_val)
1682 1680 return ret_val;
|
↓ open down ↓ |
28 lines elided |
↑ open up ↑ |
1683 1681
1684 1682 }
1685 1683 }
1686 1684 }
1687 1685
1688 1686 /* I217 Packet Loss issue:
1689 1687 * ensure that FEXTNVM4 Beacon Duration is set correctly
1690 1688 * on power up.
1691 1689 * Set the Beacon Duration for I217 to 8 usec
1692 1690 */
1693 - if ((hw->mac.type == e1000_pch_lpt) ||
1694 - (hw->mac.type == e1000_pch_spt)) {
1691 + if (hw->mac.type >= e1000_pch_lpt) {
1695 1692 u32 mac_reg;
1696 1693
1697 1694 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
1698 1695 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
1699 1696 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1700 1697 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
1701 1698 }
1702 1699
1703 1700 /* Work-around I218 hang issue */
1704 1701 if ((hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1705 1702 (hw->device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1706 1703 (hw->device_id == E1000_DEV_ID_PCH_I218_LM3) ||
1707 1704 (hw->device_id == E1000_DEV_ID_PCH_I218_V3)) {
1708 1705 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1709 1706 if (ret_val)
1710 1707 return ret_val;
1711 1708 }
1712 - if ((hw->mac.type == e1000_pch_lpt) ||
1713 - (hw->mac.type == e1000_pch_spt)) {
1709 + if (hw->mac.type >= e1000_pch_lpt) {
1714 1710 /* Set platform power management values for
1715 1711 * Latency Tolerance Reporting (LTR)
1716 1712 * Optimized Buffer Flush/Fill (OBFF)
1717 1713 */
1718 1714 ret_val = e1000_platform_pm_pch_lpt(hw, link);
1719 1715 if (ret_val)
1720 1716 return ret_val;
1721 1717 }
1722 1718
1723 1719 /* Clear link partner's EEE ability */
1724 1720 hw->dev_spec.ich8lan.eee_lp_ability = 0;
1725 1721
1726 - /* FEXTNVM6 K1-off workaround */
1722 + /* FEXTNVM6 K1-off workaround - for SPT only */
1727 1723 if (hw->mac.type == e1000_pch_spt) {
1728 1724 u32 pcieanacfg = E1000_READ_REG(hw, E1000_PCIEANACFG);
1729 1725 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
1730 1726
1731 1727 if ((pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE) &&
1732 1728 (hw->dev_spec.ich8lan.disable_k1_off == FALSE))
1733 1729 fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
1734 1730 else
1735 1731 fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1736 1732
1737 1733 E1000_WRITE_REG(hw, E1000_FEXTNVM6, fextnvm6);
1738 1734 }
1739 1735
1740 1736 if (!link)
1741 1737 return E1000_SUCCESS; /* No link detected */
1742 1738
1743 1739 mac->get_link_status = FALSE;
1744 1740
1745 1741 switch (hw->mac.type) {
1746 1742 case e1000_pch2lan:
1747 1743 ret_val = e1000_k1_workaround_lv(hw);
1748 1744 if (ret_val)
1749 1745 return ret_val;
1750 1746 /* fall-thru */
1751 1747 case e1000_pchlan:
1752 1748 if (hw->phy.type == e1000_phy_82578) {
1753 1749 ret_val = e1000_link_stall_workaround_hv(hw);
1754 1750 if (ret_val)
1755 1751 return ret_val;
1756 1752 }
1757 1753
1758 1754 /* Workaround for PCHx parts in half-duplex:
1759 1755 * Set the number of preambles removed from the packet
1760 1756 * when it is passed from the PHY to the MAC to prevent
1761 1757 * the MAC from misinterpreting the packet type.
1762 1758 */
1763 1759 hw->phy.ops.read_reg(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1764 1760 phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1765 1761
1766 1762 if ((E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FD) !=
1767 1763 E1000_STATUS_FD)
1768 1764 phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1769 1765
1770 1766 hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1771 1767 break;
1772 1768 default:
1773 1769 break;
1774 1770 }
1775 1771
1776 1772 /* Check if there was DownShift, must be checked
1777 1773 * immediately after link-up
1778 1774 */
1779 1775 e1000_check_downshift_generic(hw);
1780 1776
1781 1777 /* Enable/Disable EEE after link up */
1782 1778 if (hw->phy.type > e1000_phy_82579) {
1783 1779 ret_val = e1000_set_eee_pchlan(hw);
1784 1780 if (ret_val)
1785 1781 return ret_val;
1786 1782 }
1787 1783
1788 1784 /* If we are forcing speed/duplex, then we simply return since
1789 1785 * we have already determined whether we have link or not.
1790 1786 */
1791 1787 if (!mac->autoneg)
1792 1788 return -E1000_ERR_CONFIG;
1793 1789
1794 1790 /* Auto-Neg is enabled. Auto Speed Detection takes care
1795 1791 * of MAC speed/duplex configuration. So we only need to
1796 1792 * configure Collision Distance in the MAC.
1797 1793 */
1798 1794 mac->ops.config_collision_dist(hw);
1799 1795
1800 1796 /* Configure Flow Control now that Auto-Neg has completed.
1801 1797 * First, we need to restore the desired flow control
1802 1798 * settings because we may have had to re-autoneg with a
1803 1799 * different link partner.
1804 1800 */
1805 1801 ret_val = e1000_config_fc_after_link_up_generic(hw);
1806 1802 if (ret_val)
1807 1803 DEBUGOUT("Error configuring flow control\n");
1808 1804
1809 1805 return ret_val;
1810 1806 }
1811 1807
1812 1808 /**
1813 1809 * e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
1814 1810 * @hw: pointer to the HW structure
1815 1811 *
1816 1812 * Initialize family-specific function pointers for PHY, MAC, and NVM.
1817 1813 **/
1818 1814 void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
1819 1815 {
1820 1816 DEBUGFUNC("e1000_init_function_pointers_ich8lan");
1821 1817
1822 1818 hw->mac.ops.init_params = e1000_init_mac_params_ich8lan;
1823 1819 hw->nvm.ops.init_params = e1000_init_nvm_params_ich8lan;
|
↓ open down ↓ |
87 lines elided |
↑ open up ↑ |
1824 1820 switch (hw->mac.type) {
1825 1821 case e1000_ich8lan:
1826 1822 case e1000_ich9lan:
1827 1823 case e1000_ich10lan:
1828 1824 hw->phy.ops.init_params = e1000_init_phy_params_ich8lan;
1829 1825 break;
1830 1826 case e1000_pchlan:
1831 1827 case e1000_pch2lan:
1832 1828 case e1000_pch_lpt:
1833 1829 case e1000_pch_spt:
1830 + case e1000_pch_cnp:
1834 1831 hw->phy.ops.init_params = e1000_init_phy_params_pchlan;
1835 1832 break;
1836 1833 default:
1837 1834 break;
1838 1835 }
1839 1836 }
1840 1837
1841 1838 /**
1842 1839 * e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1843 1840 * @hw: pointer to the HW structure
1844 1841 *
1845 1842 * Acquires the mutex for performing NVM operations.
1846 1843 **/
1847 1844 static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
1848 1845 {
1849 1846 DEBUGFUNC("e1000_acquire_nvm_ich8lan");
1850 1847
1851 1848 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1852 1849
1853 1850 return E1000_SUCCESS;
1854 1851 }
1855 1852
1856 1853 /**
1857 1854 * e1000_release_nvm_ich8lan - Release NVM mutex
1858 1855 * @hw: pointer to the HW structure
1859 1856 *
1860 1857 * Releases the mutex used while performing NVM operations.
1861 1858 **/
1862 1859 static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
1863 1860 {
1864 1861 DEBUGFUNC("e1000_release_nvm_ich8lan");
1865 1862
1866 1863 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.nvm_mutex);
1867 1864
1868 1865 return;
1869 1866 }
1870 1867
1871 1868 /**
1872 1869 * e1000_acquire_swflag_ich8lan - Acquire software control flag
1873 1870 * @hw: pointer to the HW structure
1874 1871 *
1875 1872 * Acquires the software control flag for performing PHY and select
1876 1873 * MAC CSR accesses.
1877 1874 **/
1878 1875 static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1879 1876 {
1880 1877 u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1881 1878 s32 ret_val = E1000_SUCCESS;
1882 1879
1883 1880 DEBUGFUNC("e1000_acquire_swflag_ich8lan");
1884 1881
1885 1882 E1000_MUTEX_LOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1886 1883
1887 1884 while (timeout) {
1888 1885 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1889 1886 if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1890 1887 break;
1891 1888
1892 1889 msec_delay_irq(1);
1893 1890 timeout--;
1894 1891 }
1895 1892
1896 1893 if (!timeout) {
1897 1894 DEBUGOUT("SW has already locked the resource.\n");
1898 1895 ret_val = -E1000_ERR_CONFIG;
1899 1896 goto out;
1900 1897 }
1901 1898
1902 1899 timeout = SW_FLAG_TIMEOUT;
1903 1900
1904 1901 extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1905 1902 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1906 1903
1907 1904 while (timeout) {
1908 1905 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1909 1906 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1910 1907 break;
1911 1908
1912 1909 msec_delay_irq(1);
1913 1910 timeout--;
1914 1911 }
1915 1912
1916 1913 if (!timeout) {
1917 1914 DEBUGOUT2("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1918 1915 E1000_READ_REG(hw, E1000_FWSM), extcnf_ctrl);
1919 1916 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1920 1917 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1921 1918 ret_val = -E1000_ERR_CONFIG;
1922 1919 goto out;
1923 1920 }
1924 1921
1925 1922 out:
1926 1923 if (ret_val)
1927 1924 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1928 1925
1929 1926 return ret_val;
1930 1927 }
1931 1928
1932 1929 /**
1933 1930 * e1000_release_swflag_ich8lan - Release software control flag
1934 1931 * @hw: pointer to the HW structure
1935 1932 *
1936 1933 * Releases the software control flag for performing PHY and select
1937 1934 * MAC CSR accesses.
1938 1935 **/
1939 1936 static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1940 1937 {
1941 1938 u32 extcnf_ctrl;
1942 1939
1943 1940 DEBUGFUNC("e1000_release_swflag_ich8lan");
1944 1941
1945 1942 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
1946 1943
1947 1944 if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1948 1945 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1949 1946 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
1950 1947 } else {
1951 1948 DEBUGOUT("Semaphore unexpectedly released by sw/fw/hw\n");
1952 1949 }
1953 1950
1954 1951 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
1955 1952
1956 1953 return;
1957 1954 }
1958 1955
1959 1956 /**
1960 1957 * e1000_check_mng_mode_ich8lan - Checks management mode
1961 1958 * @hw: pointer to the HW structure
1962 1959 *
1963 1960 * This checks if the adapter has any manageability enabled.
1964 1961 * This is a function pointer entry point only called by read/write
1965 1962 * routines for the PHY and NVM parts.
1966 1963 **/
1967 1964 static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1968 1965 {
1969 1966 u32 fwsm;
1970 1967
1971 1968 DEBUGFUNC("e1000_check_mng_mode_ich8lan");
1972 1969
1973 1970 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1974 1971
1975 1972 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1976 1973 ((fwsm & E1000_FWSM_MODE_MASK) ==
1977 1974 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1978 1975 }
1979 1976
1980 1977 /**
1981 1978 * e1000_check_mng_mode_pchlan - Checks management mode
1982 1979 * @hw: pointer to the HW structure
1983 1980 *
1984 1981 * This checks if the adapter has iAMT enabled.
1985 1982 * This is a function pointer entry point only called by read/write
1986 1983 * routines for the PHY and NVM parts.
1987 1984 **/
1988 1985 static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1989 1986 {
1990 1987 u32 fwsm;
1991 1988
1992 1989 DEBUGFUNC("e1000_check_mng_mode_pchlan");
1993 1990
1994 1991 fwsm = E1000_READ_REG(hw, E1000_FWSM);
1995 1992
1996 1993 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1997 1994 (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1998 1995 }
1999 1996
2000 1997 /**
2001 1998 * e1000_rar_set_pch2lan - Set receive address register
2002 1999 * @hw: pointer to the HW structure
2003 2000 * @addr: pointer to the receive address
2004 2001 * @index: receive address array register
2005 2002 *
2006 2003 * Sets the receive address array register at index to the address passed
2007 2004 * in by addr. For 82579, RAR[0] is the base address register that is to
2008 2005 * contain the MAC address but RAR[1-6] are reserved for manageability (ME).
2009 2006 * Use SHRA[0-3] in place of those reserved for ME.
2010 2007 **/
2011 2008 static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
2012 2009 {
2013 2010 u32 rar_low, rar_high;
2014 2011
2015 2012 DEBUGFUNC("e1000_rar_set_pch2lan");
2016 2013
2017 2014 /* HW expects these in little endian so we reverse the byte order
2018 2015 * from network order (big endian) to little endian
2019 2016 */
2020 2017 rar_low = ((u32) addr[0] |
2021 2018 ((u32) addr[1] << 8) |
2022 2019 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
2023 2020
2024 2021 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
2025 2022
2026 2023 /* If MAC address zero, no need to set the AV bit */
2027 2024 if (rar_low || rar_high)
2028 2025 rar_high |= E1000_RAH_AV;
2029 2026
2030 2027 if (index == 0) {
2031 2028 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
2032 2029 E1000_WRITE_FLUSH(hw);
2033 2030 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
2034 2031 E1000_WRITE_FLUSH(hw);
2035 2032 return E1000_SUCCESS;
2036 2033 }
2037 2034
2038 2035 /* RAR[1-6] are owned by manageability. Skip those and program the
2039 2036 * next address into the SHRA register array.
2040 2037 */
2041 2038 if (index < (u32) (hw->mac.rar_entry_count)) {
2042 2039 s32 ret_val;
2043 2040
2044 2041 ret_val = e1000_acquire_swflag_ich8lan(hw);
2045 2042 if (ret_val)
2046 2043 goto out;
2047 2044
2048 2045 E1000_WRITE_REG(hw, E1000_SHRAL(index - 1), rar_low);
2049 2046 E1000_WRITE_FLUSH(hw);
2050 2047 E1000_WRITE_REG(hw, E1000_SHRAH(index - 1), rar_high);
2051 2048 E1000_WRITE_FLUSH(hw);
2052 2049
2053 2050 e1000_release_swflag_ich8lan(hw);
2054 2051
2055 2052 /* verify the register updates */
2056 2053 if ((E1000_READ_REG(hw, E1000_SHRAL(index - 1)) == rar_low) &&
2057 2054 (E1000_READ_REG(hw, E1000_SHRAH(index - 1)) == rar_high))
2058 2055 return E1000_SUCCESS;
2059 2056
2060 2057 DEBUGOUT2("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
2061 2058 (index - 1), E1000_READ_REG(hw, E1000_FWSM));
2062 2059 }
2063 2060
2064 2061 out:
2065 2062 DEBUGOUT1("Failed to write receive address at index %d\n", index);
2066 2063 return -E1000_ERR_CONFIG;
2067 2064 }
2068 2065
2069 2066 /**
2070 2067 * e1000_rar_set_pch_lpt - Set receive address registers
2071 2068 * @hw: pointer to the HW structure
2072 2069 * @addr: pointer to the receive address
2073 2070 * @index: receive address array register
2074 2071 *
2075 2072 * Sets the receive address register array at index to the address passed
2076 2073 * in by addr. For LPT, RAR[0] is the base address register that is to
2077 2074 * contain the MAC address. SHRA[0-10] are the shared receive address
2078 2075 * registers that are shared between the Host and manageability engine (ME).
2079 2076 **/
2080 2077 static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
2081 2078 {
2082 2079 u32 rar_low, rar_high;
2083 2080 u32 wlock_mac;
2084 2081
2085 2082 DEBUGFUNC("e1000_rar_set_pch_lpt");
2086 2083
2087 2084 /* HW expects these in little endian so we reverse the byte order
2088 2085 * from network order (big endian) to little endian
2089 2086 */
2090 2087 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
2091 2088 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
2092 2089
2093 2090 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
2094 2091
2095 2092 /* If MAC address zero, no need to set the AV bit */
2096 2093 if (rar_low || rar_high)
2097 2094 rar_high |= E1000_RAH_AV;
2098 2095
2099 2096 if (index == 0) {
2100 2097 E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
2101 2098 E1000_WRITE_FLUSH(hw);
2102 2099 E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
2103 2100 E1000_WRITE_FLUSH(hw);
2104 2101 return E1000_SUCCESS;
2105 2102 }
2106 2103
2107 2104 /* The manageability engine (ME) can lock certain SHRAR registers that
2108 2105 * it is using - those registers are unavailable for use.
2109 2106 */
2110 2107 if (index < hw->mac.rar_entry_count) {
2111 2108 wlock_mac = E1000_READ_REG(hw, E1000_FWSM) &
2112 2109 E1000_FWSM_WLOCK_MAC_MASK;
2113 2110 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
2114 2111
2115 2112 /* Check if all SHRAR registers are locked */
2116 2113 if (wlock_mac == 1)
2117 2114 goto out;
2118 2115
2119 2116 if ((wlock_mac == 0) || (index <= wlock_mac)) {
2120 2117 s32 ret_val;
2121 2118
2122 2119 ret_val = e1000_acquire_swflag_ich8lan(hw);
2123 2120
2124 2121 if (ret_val)
2125 2122 goto out;
2126 2123
2127 2124 E1000_WRITE_REG(hw, E1000_SHRAL_PCH_LPT(index - 1),
2128 2125 rar_low);
2129 2126 E1000_WRITE_FLUSH(hw);
2130 2127 E1000_WRITE_REG(hw, E1000_SHRAH_PCH_LPT(index - 1),
2131 2128 rar_high);
2132 2129 E1000_WRITE_FLUSH(hw);
2133 2130
2134 2131 e1000_release_swflag_ich8lan(hw);
2135 2132
2136 2133 /* verify the register updates */
2137 2134 if ((E1000_READ_REG(hw, E1000_SHRAL_PCH_LPT(index - 1)) == rar_low) &&
2138 2135 (E1000_READ_REG(hw, E1000_SHRAH_PCH_LPT(index - 1)) == rar_high))
2139 2136 return E1000_SUCCESS;
2140 2137 }
2141 2138 }
2142 2139
2143 2140 out:
2144 2141 DEBUGOUT1("Failed to write receive address at index %d\n", index);
2145 2142 return -E1000_ERR_CONFIG;
2146 2143 }
2147 2144
2148 2145 /**
2149 2146 * e1000_update_mc_addr_list_pch2lan - Update Multicast addresses
2150 2147 * @hw: pointer to the HW structure
2151 2148 * @mc_addr_list: array of multicast addresses to program
2152 2149 * @mc_addr_count: number of multicast addresses to program
2153 2150 *
2154 2151 * Updates entire Multicast Table Array of the PCH2 MAC and PHY.
2155 2152 * The caller must have a packed mc_addr_list of multicast addresses.
2156 2153 **/
2157 2154 static void e1000_update_mc_addr_list_pch2lan(struct e1000_hw *hw,
2158 2155 u8 *mc_addr_list,
2159 2156 u32 mc_addr_count)
2160 2157 {
2161 2158 u16 phy_reg = 0;
2162 2159 int i;
2163 2160 s32 ret_val;
2164 2161
2165 2162 DEBUGFUNC("e1000_update_mc_addr_list_pch2lan");
2166 2163
2167 2164 e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count);
2168 2165
2169 2166 ret_val = hw->phy.ops.acquire(hw);
2170 2167 if (ret_val)
2171 2168 return;
2172 2169
2173 2170 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2174 2171 if (ret_val)
2175 2172 goto release;
2176 2173
2177 2174 for (i = 0; i < hw->mac.mta_reg_count; i++) {
2178 2175 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
2179 2176 (u16)(hw->mac.mta_shadow[i] &
2180 2177 0xFFFF));
2181 2178 hw->phy.ops.write_reg_page(hw, (BM_MTA(i) + 1),
2182 2179 (u16)((hw->mac.mta_shadow[i] >> 16) &
2183 2180 0xFFFF));
2184 2181 }
2185 2182
2186 2183 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2187 2184
2188 2185 release:
2189 2186 hw->phy.ops.release(hw);
2190 2187 }
2191 2188
2192 2189 /**
2193 2190 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
2194 2191 * @hw: pointer to the HW structure
2195 2192 *
2196 2193 * Checks if firmware is blocking the reset of the PHY.
2197 2194 * This is a function pointer entry point only called by
2198 2195 * reset routines.
2199 2196 **/
2200 2197 static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
2201 2198 {
2202 2199 u32 fwsm;
2203 2200 bool blocked = FALSE;
2204 2201 int i = 0;
2205 2202
2206 2203 DEBUGFUNC("e1000_check_reset_block_ich8lan");
2207 2204
2208 2205 do {
2209 2206 fwsm = E1000_READ_REG(hw, E1000_FWSM);
2210 2207 if (!(fwsm & E1000_ICH_FWSM_RSPCIPHY)) {
2211 2208 blocked = TRUE;
2212 2209 msec_delay(10);
2213 2210 continue;
2214 2211 }
2215 2212 blocked = FALSE;
2216 2213 } while (blocked && (i++ < 30));
2217 2214 return blocked ? E1000_BLK_PHY_RESET : E1000_SUCCESS;
2218 2215 }
2219 2216
2220 2217 /**
2221 2218 * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
2222 2219 * @hw: pointer to the HW structure
2223 2220 *
2224 2221 * Assumes semaphore already acquired.
2225 2222 *
2226 2223 **/
2227 2224 static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
2228 2225 {
2229 2226 u16 phy_data;
2230 2227 u32 strap = E1000_READ_REG(hw, E1000_STRAP);
2231 2228 u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
2232 2229 E1000_STRAP_SMT_FREQ_SHIFT;
2233 2230 s32 ret_val;
2234 2231
2235 2232 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
2236 2233
2237 2234 ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
2238 2235 if (ret_val)
2239 2236 return ret_val;
2240 2237
2241 2238 phy_data &= ~HV_SMB_ADDR_MASK;
2242 2239 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
2243 2240 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
2244 2241
2245 2242 if (hw->phy.type == e1000_phy_i217) {
2246 2243 /* Restore SMBus frequency */
2247 2244 if (freq--) {
2248 2245 phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
2249 2246 phy_data |= (freq & (1 << 0)) <<
2250 2247 HV_SMB_ADDR_FREQ_LOW_SHIFT;
2251 2248 phy_data |= (freq & (1 << 1)) <<
2252 2249 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
2253 2250 } else {
2254 2251 DEBUGOUT("Unsupported SMB frequency in PHY\n");
2255 2252 }
2256 2253 }
2257 2254
2258 2255 return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
2259 2256 }
2260 2257
2261 2258 /**
2262 2259 * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
2263 2260 * @hw: pointer to the HW structure
2264 2261 *
2265 2262 * SW should configure the LCD from the NVM extended configuration region
2266 2263 * as a workaround for certain parts.
2267 2264 **/
2268 2265 static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
2269 2266 {
2270 2267 struct e1000_phy_info *phy = &hw->phy;
2271 2268 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
2272 2269 s32 ret_val = E1000_SUCCESS;
2273 2270 u16 word_addr, reg_data, reg_addr, phy_page = 0;
2274 2271
2275 2272 DEBUGFUNC("e1000_sw_lcd_config_ich8lan");
2276 2273
2277 2274 /* Initialize the PHY from the NVM on ICH platforms. This
2278 2275 * is needed due to an issue where the NVM configuration is
2279 2276 * not properly autoloaded after power transitions.
2280 2277 * Therefore, after each PHY reset, we will load the
2281 2278 * configuration data out of the NVM manually.
2282 2279 */
2283 2280 switch (hw->mac.type) {
2284 2281 case e1000_ich8lan:
2285 2282 if (phy->type != e1000_phy_igp_3)
2286 2283 return ret_val;
2287 2284
|
↓ open down ↓ |
444 lines elided |
↑ open up ↑ |
2288 2285 if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
2289 2286 (hw->device_id == E1000_DEV_ID_ICH8_IGP_C)) {
2290 2287 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
2291 2288 break;
2292 2289 }
2293 2290 /* Fall-thru */
2294 2291 case e1000_pchlan:
2295 2292 case e1000_pch2lan:
2296 2293 case e1000_pch_lpt:
2297 2294 case e1000_pch_spt:
2295 + case e1000_pch_cnp:
2298 2296 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2299 2297 break;
2300 2298 default:
2301 2299 return ret_val;
2302 2300 }
2303 2301
2304 2302 ret_val = hw->phy.ops.acquire(hw);
2305 2303 if (ret_val)
2306 2304 return ret_val;
2307 2305
2308 2306 data = E1000_READ_REG(hw, E1000_FEXTNVM);
2309 2307 if (!(data & sw_cfg_mask))
2310 2308 goto release;
2311 2309
2312 2310 /* Make sure HW does not configure LCD from PHY
2313 2311 * extended configuration before SW configuration
2314 2312 */
2315 2313 data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2316 2314 if ((hw->mac.type < e1000_pch2lan) &&
2317 2315 (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2318 2316 goto release;
2319 2317
2320 2318 cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
2321 2319 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2322 2320 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2323 2321 if (!cnf_size)
2324 2322 goto release;
2325 2323
2326 2324 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2327 2325 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2328 2326
2329 2327 if (((hw->mac.type == e1000_pchlan) &&
2330 2328 !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2331 2329 (hw->mac.type > e1000_pchlan)) {
2332 2330 /* HW configures the SMBus address and LEDs when the
2333 2331 * OEM and LCD Write Enable bits are set in the NVM.
2334 2332 * When both NVM bits are cleared, SW will configure
2335 2333 * them instead.
2336 2334 */
2337 2335 ret_val = e1000_write_smbus_addr(hw);
2338 2336 if (ret_val)
2339 2337 goto release;
2340 2338
2341 2339 data = E1000_READ_REG(hw, E1000_LEDCTL);
2342 2340 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2343 2341 (u16)data);
2344 2342 if (ret_val)
2345 2343 goto release;
2346 2344 }
2347 2345
2348 2346 /* Configure LCD from extended configuration region. */
2349 2347
2350 2348 /* cnf_base_addr is in DWORD */
2351 2349 word_addr = (u16)(cnf_base_addr << 1);
2352 2350
2353 2351 for (i = 0; i < cnf_size; i++) {
2354 2352 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2), 1,
2355 2353 ®_data);
2356 2354 if (ret_val)
2357 2355 goto release;
2358 2356
2359 2357 ret_val = hw->nvm.ops.read(hw, (word_addr + i * 2 + 1),
2360 2358 1, ®_addr);
2361 2359 if (ret_val)
2362 2360 goto release;
2363 2361
2364 2362 /* Save off the PHY page for future writes. */
2365 2363 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2366 2364 phy_page = reg_data;
2367 2365 continue;
2368 2366 }
2369 2367
2370 2368 reg_addr &= PHY_REG_MASK;
2371 2369 reg_addr |= phy_page;
2372 2370
2373 2371 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
2374 2372 reg_data);
2375 2373 if (ret_val)
2376 2374 goto release;
2377 2375 }
2378 2376
2379 2377 release:
2380 2378 hw->phy.ops.release(hw);
2381 2379 return ret_val;
2382 2380 }
2383 2381
2384 2382 /**
2385 2383 * e1000_k1_gig_workaround_hv - K1 Si workaround
2386 2384 * @hw: pointer to the HW structure
2387 2385 * @link: link up bool flag
2388 2386 *
2389 2387 * If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2390 2388 * from a lower speed. This workaround disables K1 whenever link is at 1Gig
2391 2389 * If link is down, the function will restore the default K1 setting located
2392 2390 * in the NVM.
2393 2391 **/
2394 2392 static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2395 2393 {
2396 2394 s32 ret_val = E1000_SUCCESS;
2397 2395 u16 status_reg = 0;
2398 2396 bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2399 2397
2400 2398 DEBUGFUNC("e1000_k1_gig_workaround_hv");
2401 2399
2402 2400 if (hw->mac.type != e1000_pchlan)
2403 2401 return E1000_SUCCESS;
2404 2402
2405 2403 /* Wrap the whole flow with the sw flag */
2406 2404 ret_val = hw->phy.ops.acquire(hw);
2407 2405 if (ret_val)
2408 2406 return ret_val;
2409 2407
2410 2408 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2411 2409 if (link) {
2412 2410 if (hw->phy.type == e1000_phy_82578) {
2413 2411 ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
2414 2412 &status_reg);
2415 2413 if (ret_val)
2416 2414 goto release;
2417 2415
2418 2416 status_reg &= (BM_CS_STATUS_LINK_UP |
2419 2417 BM_CS_STATUS_RESOLVED |
2420 2418 BM_CS_STATUS_SPEED_MASK);
2421 2419
2422 2420 if (status_reg == (BM_CS_STATUS_LINK_UP |
2423 2421 BM_CS_STATUS_RESOLVED |
2424 2422 BM_CS_STATUS_SPEED_1000))
2425 2423 k1_enable = FALSE;
2426 2424 }
2427 2425
2428 2426 if (hw->phy.type == e1000_phy_82577) {
2429 2427 ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
2430 2428 &status_reg);
2431 2429 if (ret_val)
2432 2430 goto release;
2433 2431
2434 2432 status_reg &= (HV_M_STATUS_LINK_UP |
2435 2433 HV_M_STATUS_AUTONEG_COMPLETE |
2436 2434 HV_M_STATUS_SPEED_MASK);
2437 2435
2438 2436 if (status_reg == (HV_M_STATUS_LINK_UP |
2439 2437 HV_M_STATUS_AUTONEG_COMPLETE |
2440 2438 HV_M_STATUS_SPEED_1000))
2441 2439 k1_enable = FALSE;
2442 2440 }
2443 2441
2444 2442 /* Link stall fix for link up */
2445 2443 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2446 2444 0x0100);
2447 2445 if (ret_val)
2448 2446 goto release;
2449 2447
2450 2448 } else {
2451 2449 /* Link stall fix for link down */
2452 2450 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
2453 2451 0x4100);
2454 2452 if (ret_val)
2455 2453 goto release;
2456 2454 }
2457 2455
2458 2456 ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2459 2457
2460 2458 release:
2461 2459 hw->phy.ops.release(hw);
2462 2460
2463 2461 return ret_val;
2464 2462 }
2465 2463
2466 2464 /**
2467 2465 * e1000_configure_k1_ich8lan - Configure K1 power state
2468 2466 * @hw: pointer to the HW structure
2469 2467 * @enable: K1 state to configure
2470 2468 *
2471 2469 * Configure the K1 power state based on the provided parameter.
2472 2470 * Assumes semaphore already acquired.
2473 2471 *
2474 2472 * Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2475 2473 **/
2476 2474 s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2477 2475 {
2478 2476 s32 ret_val;
2479 2477 u32 ctrl_reg = 0;
2480 2478 u32 ctrl_ext = 0;
2481 2479 u32 reg = 0;
2482 2480 u16 kmrn_reg = 0;
2483 2481
2484 2482 DEBUGFUNC("e1000_configure_k1_ich8lan");
2485 2483
2486 2484 ret_val = e1000_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2487 2485 &kmrn_reg);
2488 2486 if (ret_val)
2489 2487 return ret_val;
2490 2488
2491 2489 if (k1_enable)
2492 2490 kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2493 2491 else
2494 2492 kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2495 2493
2496 2494 ret_val = e1000_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2497 2495 kmrn_reg);
2498 2496 if (ret_val)
2499 2497 return ret_val;
2500 2498
2501 2499 usec_delay(20);
2502 2500 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
2503 2501 ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
2504 2502
2505 2503 reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2506 2504 reg |= E1000_CTRL_FRCSPD;
2507 2505 E1000_WRITE_REG(hw, E1000_CTRL, reg);
2508 2506
2509 2507 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2510 2508 E1000_WRITE_FLUSH(hw);
2511 2509 usec_delay(20);
2512 2510 E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
2513 2511 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
2514 2512 E1000_WRITE_FLUSH(hw);
2515 2513 usec_delay(20);
2516 2514
2517 2515 return E1000_SUCCESS;
2518 2516 }
2519 2517
2520 2518 /**
2521 2519 * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2522 2520 * @hw: pointer to the HW structure
2523 2521 * @d0_state: boolean if entering d0 or d3 device state
2524 2522 *
2525 2523 * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2526 2524 * collectively called OEM bits. The OEM Write Enable bit and SW Config bit
2527 2525 * in NVM determines whether HW should configure LPLU and Gbe Disable.
2528 2526 **/
2529 2527 static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2530 2528 {
2531 2529 s32 ret_val = 0;
2532 2530 u32 mac_reg;
2533 2531 u16 oem_reg;
2534 2532
2535 2533 DEBUGFUNC("e1000_oem_bits_config_ich8lan");
2536 2534
2537 2535 if (hw->mac.type < e1000_pchlan)
2538 2536 return ret_val;
2539 2537
2540 2538 ret_val = hw->phy.ops.acquire(hw);
2541 2539 if (ret_val)
2542 2540 return ret_val;
2543 2541
2544 2542 if (hw->mac.type == e1000_pchlan) {
2545 2543 mac_reg = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
2546 2544 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2547 2545 goto release;
2548 2546 }
2549 2547
2550 2548 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM);
2551 2549 if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2552 2550 goto release;
2553 2551
2554 2552 mac_reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
2555 2553
2556 2554 ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
2557 2555 if (ret_val)
2558 2556 goto release;
2559 2557
2560 2558 oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2561 2559
2562 2560 if (d0_state) {
2563 2561 if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2564 2562 oem_reg |= HV_OEM_BITS_GBE_DIS;
2565 2563
2566 2564 if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2567 2565 oem_reg |= HV_OEM_BITS_LPLU;
2568 2566 } else {
2569 2567 if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2570 2568 E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2571 2569 oem_reg |= HV_OEM_BITS_GBE_DIS;
2572 2570
2573 2571 if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2574 2572 E1000_PHY_CTRL_NOND0A_LPLU))
2575 2573 oem_reg |= HV_OEM_BITS_LPLU;
2576 2574 }
2577 2575
2578 2576 /* Set Restart auto-neg to activate the bits */
2579 2577 if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2580 2578 !hw->phy.ops.check_reset_block(hw))
2581 2579 oem_reg |= HV_OEM_BITS_RESTART_AN;
2582 2580
2583 2581 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
2584 2582
2585 2583 release:
2586 2584 hw->phy.ops.release(hw);
2587 2585
2588 2586 return ret_val;
2589 2587 }
2590 2588
2591 2589
2592 2590 /**
2593 2591 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2594 2592 * @hw: pointer to the HW structure
2595 2593 **/
2596 2594 static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2597 2595 {
2598 2596 s32 ret_val;
2599 2597 u16 data;
2600 2598
2601 2599 DEBUGFUNC("e1000_set_mdio_slow_mode_hv");
2602 2600
2603 2601 ret_val = hw->phy.ops.read_reg(hw, HV_KMRN_MODE_CTRL, &data);
2604 2602 if (ret_val)
2605 2603 return ret_val;
2606 2604
2607 2605 data |= HV_KMRN_MDIO_SLOW;
2608 2606
2609 2607 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_MODE_CTRL, data);
2610 2608
2611 2609 return ret_val;
2612 2610 }
2613 2611
2614 2612 /**
2615 2613 * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2616 2614 * done after every PHY reset.
2617 2615 **/
2618 2616 static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2619 2617 {
2620 2618 s32 ret_val = E1000_SUCCESS;
2621 2619 u16 phy_data;
2622 2620
2623 2621 DEBUGFUNC("e1000_hv_phy_workarounds_ich8lan");
2624 2622
2625 2623 if (hw->mac.type != e1000_pchlan)
2626 2624 return E1000_SUCCESS;
2627 2625
2628 2626 /* Set MDIO slow mode before any other MDIO access */
2629 2627 if (hw->phy.type == e1000_phy_82577) {
2630 2628 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2631 2629 if (ret_val)
2632 2630 return ret_val;
2633 2631 }
2634 2632
2635 2633 if (((hw->phy.type == e1000_phy_82577) &&
2636 2634 ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2637 2635 ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2638 2636 /* Disable generation of early preamble */
2639 2637 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 25), 0x4431);
2640 2638 if (ret_val)
2641 2639 return ret_val;
2642 2640
2643 2641 /* Preamble tuning for SSC */
2644 2642 ret_val = hw->phy.ops.write_reg(hw, HV_KMRN_FIFO_CTRLSTA,
2645 2643 0xA204);
2646 2644 if (ret_val)
2647 2645 return ret_val;
2648 2646 }
2649 2647
2650 2648 if (hw->phy.type == e1000_phy_82578) {
2651 2649 /* Return registers to default by doing a soft reset then
2652 2650 * writing 0x3140 to the control register.
2653 2651 */
2654 2652 if (hw->phy.revision < 2) {
2655 2653 e1000_phy_sw_reset_generic(hw);
2656 2654 ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL,
2657 2655 0x3140);
2658 2656 }
2659 2657 }
2660 2658
2661 2659 /* Select page 0 */
2662 2660 ret_val = hw->phy.ops.acquire(hw);
2663 2661 if (ret_val)
2664 2662 return ret_val;
2665 2663
2666 2664 hw->phy.addr = 1;
2667 2665 ret_val = e1000_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2668 2666 hw->phy.ops.release(hw);
2669 2667 if (ret_val)
2670 2668 return ret_val;
2671 2669
2672 2670 /* Configure the K1 Si workaround during phy reset assuming there is
2673 2671 * link so that it disables K1 if link is in 1Gbps.
2674 2672 */
2675 2673 ret_val = e1000_k1_gig_workaround_hv(hw, TRUE);
2676 2674 if (ret_val)
2677 2675 return ret_val;
2678 2676
2679 2677 /* Workaround for link disconnects on a busy hub in half duplex */
2680 2678 ret_val = hw->phy.ops.acquire(hw);
2681 2679 if (ret_val)
2682 2680 return ret_val;
2683 2681 ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2684 2682 if (ret_val)
2685 2683 goto release;
2686 2684 ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
2687 2685 phy_data & 0x00FF);
2688 2686 if (ret_val)
2689 2687 goto release;
2690 2688
2691 2689 /* set MSE higher to enable link to stay up when noise is high */
2692 2690 ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2693 2691 release:
2694 2692 hw->phy.ops.release(hw);
2695 2693
2696 2694 return ret_val;
2697 2695 }
2698 2696
2699 2697 /**
2700 2698 * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2701 2699 * @hw: pointer to the HW structure
2702 2700 **/
2703 2701 void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2704 2702 {
2705 2703 u32 mac_reg;
2706 2704 u16 i, phy_reg = 0;
2707 2705 s32 ret_val;
2708 2706
2709 2707 DEBUGFUNC("e1000_copy_rx_addrs_to_phy_ich8lan");
2710 2708
2711 2709 ret_val = hw->phy.ops.acquire(hw);
2712 2710 if (ret_val)
2713 2711 return;
2714 2712 ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2715 2713 if (ret_val)
2716 2714 goto release;
2717 2715
2718 2716 /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2719 2717 for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2720 2718 mac_reg = E1000_READ_REG(hw, E1000_RAL(i));
2721 2719 hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2722 2720 (u16)(mac_reg & 0xFFFF));
2723 2721 hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2724 2722 (u16)((mac_reg >> 16) & 0xFFFF));
2725 2723
2726 2724 mac_reg = E1000_READ_REG(hw, E1000_RAH(i));
2727 2725 hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2728 2726 (u16)(mac_reg & 0xFFFF));
2729 2727 hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2730 2728 (u16)((mac_reg & E1000_RAH_AV)
2731 2729 >> 16));
2732 2730 }
2733 2731
2734 2732 e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2735 2733
2736 2734 release:
2737 2735 hw->phy.ops.release(hw);
2738 2736 }
2739 2737
2740 2738 static u32 e1000_calc_rx_da_crc(u8 mac[])
2741 2739 {
2742 2740 u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */
2743 2741 u32 i, j, mask, crc;
2744 2742
2745 2743 DEBUGFUNC("e1000_calc_rx_da_crc");
2746 2744
2747 2745 crc = 0xffffffff;
2748 2746 for (i = 0; i < 6; i++) {
2749 2747 crc = crc ^ mac[i];
2750 2748 for (j = 8; j > 0; j--) {
2751 2749 mask = (crc & 1) * (-1);
2752 2750 crc = (crc >> 1) ^ (poly & mask);
2753 2751 }
2754 2752 }
2755 2753 return ~crc;
2756 2754 }
2757 2755
2758 2756 /**
2759 2757 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2760 2758 * with 82579 PHY
2761 2759 * @hw: pointer to the HW structure
2762 2760 * @enable: flag to enable/disable workaround when enabling/disabling jumbos
2763 2761 **/
2764 2762 s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2765 2763 {
2766 2764 s32 ret_val = E1000_SUCCESS;
2767 2765 u16 phy_reg, data;
2768 2766 u32 mac_reg;
2769 2767 u16 i;
2770 2768
2771 2769 DEBUGFUNC("e1000_lv_jumbo_workaround_ich8lan");
2772 2770
2773 2771 if (hw->mac.type < e1000_pch2lan)
2774 2772 return E1000_SUCCESS;
2775 2773
2776 2774 /* disable Rx path while enabling/disabling workaround */
2777 2775 hw->phy.ops.read_reg(hw, PHY_REG(769, 20), &phy_reg);
2778 2776 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 20),
2779 2777 phy_reg | (1 << 14));
2780 2778 if (ret_val)
2781 2779 return ret_val;
2782 2780
2783 2781 if (enable) {
2784 2782 /* Write Rx addresses (rar_entry_count for RAL/H, and
2785 2783 * SHRAL/H) and initial CRC values to the MAC
2786 2784 */
2787 2785 for (i = 0; i < hw->mac.rar_entry_count; i++) {
2788 2786 u8 mac_addr[ETH_ADDR_LEN] = {0};
2789 2787 u32 addr_high, addr_low;
2790 2788
2791 2789 addr_high = E1000_READ_REG(hw, E1000_RAH(i));
2792 2790 if (!(addr_high & E1000_RAH_AV))
2793 2791 continue;
2794 2792 addr_low = E1000_READ_REG(hw, E1000_RAL(i));
2795 2793 mac_addr[0] = (addr_low & 0xFF);
2796 2794 mac_addr[1] = ((addr_low >> 8) & 0xFF);
2797 2795 mac_addr[2] = ((addr_low >> 16) & 0xFF);
2798 2796 mac_addr[3] = ((addr_low >> 24) & 0xFF);
2799 2797 mac_addr[4] = (addr_high & 0xFF);
2800 2798 mac_addr[5] = ((addr_high >> 8) & 0xFF);
2801 2799
2802 2800 E1000_WRITE_REG(hw, E1000_PCH_RAICC(i),
2803 2801 e1000_calc_rx_da_crc(mac_addr));
2804 2802 }
2805 2803
2806 2804 /* Write Rx addresses to the PHY */
2807 2805 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2808 2806
2809 2807 /* Enable jumbo frame workaround in the MAC */
2810 2808 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2811 2809 mac_reg &= ~(1 << 14);
2812 2810 mac_reg |= (7 << 15);
2813 2811 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2814 2812
2815 2813 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2816 2814 mac_reg |= E1000_RCTL_SECRC;
2817 2815 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2818 2816
2819 2817 ret_val = e1000_read_kmrn_reg_generic(hw,
2820 2818 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2821 2819 &data);
2822 2820 if (ret_val)
2823 2821 return ret_val;
2824 2822 ret_val = e1000_write_kmrn_reg_generic(hw,
2825 2823 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2826 2824 data | (1 << 0));
2827 2825 if (ret_val)
2828 2826 return ret_val;
2829 2827 ret_val = e1000_read_kmrn_reg_generic(hw,
2830 2828 E1000_KMRNCTRLSTA_HD_CTRL,
2831 2829 &data);
2832 2830 if (ret_val)
2833 2831 return ret_val;
2834 2832 data &= ~(0xF << 8);
2835 2833 data |= (0xB << 8);
2836 2834 ret_val = e1000_write_kmrn_reg_generic(hw,
2837 2835 E1000_KMRNCTRLSTA_HD_CTRL,
2838 2836 data);
2839 2837 if (ret_val)
2840 2838 return ret_val;
2841 2839
2842 2840 /* Enable jumbo frame workaround in the PHY */
2843 2841 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2844 2842 data &= ~(0x7F << 5);
2845 2843 data |= (0x37 << 5);
2846 2844 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2847 2845 if (ret_val)
2848 2846 return ret_val;
2849 2847 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2850 2848 data &= ~(1 << 13);
2851 2849 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2852 2850 if (ret_val)
2853 2851 return ret_val;
2854 2852 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2855 2853 data &= ~(0x3FF << 2);
2856 2854 data |= (E1000_TX_PTR_GAP << 2);
2857 2855 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2858 2856 if (ret_val)
2859 2857 return ret_val;
2860 2858 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0xF100);
2861 2859 if (ret_val)
2862 2860 return ret_val;
2863 2861 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2864 2862 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data |
2865 2863 (1 << 10));
2866 2864 if (ret_val)
2867 2865 return ret_val;
2868 2866 } else {
2869 2867 /* Write MAC register values back to h/w defaults */
2870 2868 mac_reg = E1000_READ_REG(hw, E1000_FFLT_DBG);
2871 2869 mac_reg &= ~(0xF << 14);
2872 2870 E1000_WRITE_REG(hw, E1000_FFLT_DBG, mac_reg);
2873 2871
2874 2872 mac_reg = E1000_READ_REG(hw, E1000_RCTL);
2875 2873 mac_reg &= ~E1000_RCTL_SECRC;
2876 2874 E1000_WRITE_REG(hw, E1000_RCTL, mac_reg);
2877 2875
2878 2876 ret_val = e1000_read_kmrn_reg_generic(hw,
2879 2877 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2880 2878 &data);
2881 2879 if (ret_val)
2882 2880 return ret_val;
2883 2881 ret_val = e1000_write_kmrn_reg_generic(hw,
2884 2882 E1000_KMRNCTRLSTA_CTRL_OFFSET,
2885 2883 data & ~(1 << 0));
2886 2884 if (ret_val)
2887 2885 return ret_val;
2888 2886 ret_val = e1000_read_kmrn_reg_generic(hw,
2889 2887 E1000_KMRNCTRLSTA_HD_CTRL,
2890 2888 &data);
2891 2889 if (ret_val)
2892 2890 return ret_val;
2893 2891 data &= ~(0xF << 8);
2894 2892 data |= (0xB << 8);
2895 2893 ret_val = e1000_write_kmrn_reg_generic(hw,
2896 2894 E1000_KMRNCTRLSTA_HD_CTRL,
2897 2895 data);
2898 2896 if (ret_val)
2899 2897 return ret_val;
2900 2898
2901 2899 /* Write PHY register values back to h/w defaults */
2902 2900 hw->phy.ops.read_reg(hw, PHY_REG(769, 23), &data);
2903 2901 data &= ~(0x7F << 5);
2904 2902 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 23), data);
2905 2903 if (ret_val)
2906 2904 return ret_val;
2907 2905 hw->phy.ops.read_reg(hw, PHY_REG(769, 16), &data);
2908 2906 data |= (1 << 13);
2909 2907 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(769, 16), data);
2910 2908 if (ret_val)
2911 2909 return ret_val;
2912 2910 hw->phy.ops.read_reg(hw, PHY_REG(776, 20), &data);
2913 2911 data &= ~(0x3FF << 2);
2914 2912 data |= (0x8 << 2);
2915 2913 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 20), data);
2916 2914 if (ret_val)
2917 2915 return ret_val;
2918 2916 ret_val = hw->phy.ops.write_reg(hw, PHY_REG(776, 23), 0x7E00);
2919 2917 if (ret_val)
2920 2918 return ret_val;
2921 2919 hw->phy.ops.read_reg(hw, HV_PM_CTRL, &data);
2922 2920 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL, data &
2923 2921 ~(1 << 10));
2924 2922 if (ret_val)
2925 2923 return ret_val;
2926 2924 }
2927 2925
2928 2926 /* re-enable Rx path after enabling/disabling workaround */
2929 2927 return hw->phy.ops.write_reg(hw, PHY_REG(769, 20), phy_reg &
2930 2928 ~(1 << 14));
2931 2929 }
2932 2930
2933 2931 /**
2934 2932 * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2935 2933 * done after every PHY reset.
2936 2934 **/
2937 2935 static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2938 2936 {
2939 2937 s32 ret_val = E1000_SUCCESS;
2940 2938
2941 2939 DEBUGFUNC("e1000_lv_phy_workarounds_ich8lan");
2942 2940
2943 2941 if (hw->mac.type != e1000_pch2lan)
2944 2942 return E1000_SUCCESS;
2945 2943
2946 2944 /* Set MDIO slow mode before any other MDIO access */
2947 2945 ret_val = e1000_set_mdio_slow_mode_hv(hw);
2948 2946 if (ret_val)
2949 2947 return ret_val;
2950 2948
2951 2949 ret_val = hw->phy.ops.acquire(hw);
2952 2950 if (ret_val)
2953 2951 return ret_val;
2954 2952 /* set MSE higher to enable link to stay up when noise is high */
2955 2953 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2956 2954 if (ret_val)
2957 2955 goto release;
2958 2956 /* drop link after 5 times MSE threshold was reached */
2959 2957 ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2960 2958 release:
2961 2959 hw->phy.ops.release(hw);
2962 2960
2963 2961 return ret_val;
2964 2962 }
2965 2963
2966 2964 /**
2967 2965 * e1000_k1_gig_workaround_lv - K1 Si workaround
2968 2966 * @hw: pointer to the HW structure
2969 2967 *
2970 2968 * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2971 2969 * Disable K1 for 1000 and 100 speeds
2972 2970 **/
2973 2971 static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2974 2972 {
2975 2973 s32 ret_val = E1000_SUCCESS;
2976 2974 u16 status_reg = 0;
2977 2975
2978 2976 DEBUGFUNC("e1000_k1_workaround_lv");
2979 2977
2980 2978 if (hw->mac.type != e1000_pch2lan)
2981 2979 return E1000_SUCCESS;
2982 2980
2983 2981 /* Set K1 beacon duration based on 10Mbs speed */
2984 2982 ret_val = hw->phy.ops.read_reg(hw, HV_M_STATUS, &status_reg);
2985 2983 if (ret_val)
2986 2984 return ret_val;
2987 2985
2988 2986 if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2989 2987 == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2990 2988 if (status_reg &
2991 2989 (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2992 2990 u16 pm_phy_reg;
2993 2991
2994 2992 /* LV 1G/100 Packet drop issue wa */
2995 2993 ret_val = hw->phy.ops.read_reg(hw, HV_PM_CTRL,
2996 2994 &pm_phy_reg);
2997 2995 if (ret_val)
2998 2996 return ret_val;
2999 2997 pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
3000 2998 ret_val = hw->phy.ops.write_reg(hw, HV_PM_CTRL,
3001 2999 pm_phy_reg);
3002 3000 if (ret_val)
3003 3001 return ret_val;
3004 3002 } else {
3005 3003 u32 mac_reg;
3006 3004 mac_reg = E1000_READ_REG(hw, E1000_FEXTNVM4);
3007 3005 mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
3008 3006 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
3009 3007 E1000_WRITE_REG(hw, E1000_FEXTNVM4, mac_reg);
3010 3008 }
3011 3009 }
3012 3010
3013 3011 return ret_val;
3014 3012 }
3015 3013
3016 3014 /**
3017 3015 * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
3018 3016 * @hw: pointer to the HW structure
3019 3017 * @gate: boolean set to TRUE to gate, FALSE to ungate
3020 3018 *
3021 3019 * Gate/ungate the automatic PHY configuration via hardware; perform
3022 3020 * the configuration via software instead.
3023 3021 **/
3024 3022 static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
3025 3023 {
3026 3024 u32 extcnf_ctrl;
3027 3025
3028 3026 DEBUGFUNC("e1000_gate_hw_phy_config_ich8lan");
3029 3027
3030 3028 if (hw->mac.type < e1000_pch2lan)
3031 3029 return;
3032 3030
3033 3031 extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
3034 3032
3035 3033 if (gate)
3036 3034 extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
3037 3035 else
3038 3036 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
3039 3037
3040 3038 E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
3041 3039 }
3042 3040
3043 3041 /**
3044 3042 * e1000_lan_init_done_ich8lan - Check for PHY config completion
3045 3043 * @hw: pointer to the HW structure
3046 3044 *
3047 3045 * Check the appropriate indication the MAC has finished configuring the
3048 3046 * PHY after a software reset.
3049 3047 **/
3050 3048 static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
3051 3049 {
3052 3050 u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
3053 3051
3054 3052 DEBUGFUNC("e1000_lan_init_done_ich8lan");
3055 3053
3056 3054 /* Wait for basic configuration completes before proceeding */
3057 3055 do {
3058 3056 data = E1000_READ_REG(hw, E1000_STATUS);
3059 3057 data &= E1000_STATUS_LAN_INIT_DONE;
3060 3058 usec_delay(100);
3061 3059 } while ((!data) && --loop);
3062 3060
3063 3061 /* If basic configuration is incomplete before the above loop
3064 3062 * count reaches 0, loading the configuration from NVM will
3065 3063 * leave the PHY in a bad state possibly resulting in no link.
3066 3064 */
3067 3065 if (loop == 0)
3068 3066 DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
3069 3067
3070 3068 /* Clear the Init Done bit for the next init event */
3071 3069 data = E1000_READ_REG(hw, E1000_STATUS);
3072 3070 data &= ~E1000_STATUS_LAN_INIT_DONE;
3073 3071 E1000_WRITE_REG(hw, E1000_STATUS, data);
3074 3072 }
3075 3073
3076 3074 /**
3077 3075 * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
3078 3076 * @hw: pointer to the HW structure
3079 3077 **/
3080 3078 static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
3081 3079 {
3082 3080 s32 ret_val = E1000_SUCCESS;
3083 3081 u16 reg;
3084 3082
3085 3083 DEBUGFUNC("e1000_post_phy_reset_ich8lan");
3086 3084
3087 3085 if (hw->phy.ops.check_reset_block(hw))
3088 3086 return E1000_SUCCESS;
3089 3087
3090 3088 /* Allow time for h/w to get to quiescent state after reset */
3091 3089 msec_delay(10);
3092 3090
3093 3091 /* Perform any necessary post-reset workarounds */
3094 3092 switch (hw->mac.type) {
3095 3093 case e1000_pchlan:
3096 3094 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
3097 3095 if (ret_val)
3098 3096 return ret_val;
3099 3097 break;
3100 3098 case e1000_pch2lan:
3101 3099 ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
3102 3100 if (ret_val)
3103 3101 return ret_val;
3104 3102 break;
3105 3103 default:
3106 3104 break;
3107 3105 }
3108 3106
3109 3107 /* Clear the host wakeup bit after lcd reset */
3110 3108 if (hw->mac.type >= e1000_pchlan) {
3111 3109 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, ®);
3112 3110 reg &= ~BM_WUC_HOST_WU_BIT;
3113 3111 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, reg);
3114 3112 }
3115 3113
3116 3114 /* Configure the LCD with the extended configuration region in NVM */
3117 3115 ret_val = e1000_sw_lcd_config_ich8lan(hw);
3118 3116 if (ret_val)
3119 3117 return ret_val;
3120 3118
3121 3119 /* Configure the LCD with the OEM bits in NVM */
3122 3120 ret_val = e1000_oem_bits_config_ich8lan(hw, TRUE);
3123 3121
3124 3122 if (hw->mac.type == e1000_pch2lan) {
3125 3123 /* Ungate automatic PHY configuration on non-managed 82579 */
3126 3124 if (!(E1000_READ_REG(hw, E1000_FWSM) &
3127 3125 E1000_ICH_FWSM_FW_VALID)) {
3128 3126 msec_delay(10);
3129 3127 e1000_gate_hw_phy_config_ich8lan(hw, FALSE);
3130 3128 }
3131 3129
3132 3130 /* Set EEE LPI Update Timer to 200usec */
3133 3131 ret_val = hw->phy.ops.acquire(hw);
3134 3132 if (ret_val)
3135 3133 return ret_val;
3136 3134 ret_val = e1000_write_emi_reg_locked(hw,
3137 3135 I82579_LPI_UPDATE_TIMER,
3138 3136 0x1387);
3139 3137 hw->phy.ops.release(hw);
3140 3138 }
3141 3139
3142 3140 return ret_val;
3143 3141 }
3144 3142
3145 3143 /**
3146 3144 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
3147 3145 * @hw: pointer to the HW structure
3148 3146 *
3149 3147 * Resets the PHY
3150 3148 * This is a function pointer entry point called by drivers
3151 3149 * or other shared routines.
3152 3150 **/
3153 3151 static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
3154 3152 {
3155 3153 s32 ret_val = E1000_SUCCESS;
3156 3154
3157 3155 DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
3158 3156
3159 3157 /* Gate automatic PHY configuration by hardware on non-managed 82579 */
3160 3158 if ((hw->mac.type == e1000_pch2lan) &&
3161 3159 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
3162 3160 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
3163 3161
3164 3162 ret_val = e1000_phy_hw_reset_generic(hw);
3165 3163 if (ret_val)
3166 3164 return ret_val;
3167 3165
3168 3166 return e1000_post_phy_reset_ich8lan(hw);
3169 3167 }
3170 3168
3171 3169 /**
3172 3170 * e1000_set_lplu_state_pchlan - Set Low Power Link Up state
3173 3171 * @hw: pointer to the HW structure
3174 3172 * @active: TRUE to enable LPLU, FALSE to disable
3175 3173 *
3176 3174 * Sets the LPLU state according to the active flag. For PCH, if OEM write
3177 3175 * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
3178 3176 * the phy speed. This function will manually set the LPLU bit and restart
3179 3177 * auto-neg as hw would do. D3 and D0 LPLU will call the same function
3180 3178 * since it configures the same bit.
3181 3179 **/
3182 3180 static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
3183 3181 {
3184 3182 s32 ret_val;
3185 3183 u16 oem_reg;
3186 3184
3187 3185 DEBUGFUNC("e1000_set_lplu_state_pchlan");
3188 3186 ret_val = hw->phy.ops.read_reg(hw, HV_OEM_BITS, &oem_reg);
3189 3187 if (ret_val)
3190 3188 return ret_val;
3191 3189
3192 3190 if (active)
3193 3191 oem_reg |= HV_OEM_BITS_LPLU;
3194 3192 else
3195 3193 oem_reg &= ~HV_OEM_BITS_LPLU;
3196 3194
3197 3195 if (!hw->phy.ops.check_reset_block(hw))
3198 3196 oem_reg |= HV_OEM_BITS_RESTART_AN;
3199 3197
3200 3198 return hw->phy.ops.write_reg(hw, HV_OEM_BITS, oem_reg);
3201 3199 }
3202 3200
3203 3201 /**
3204 3202 * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
3205 3203 * @hw: pointer to the HW structure
3206 3204 * @active: TRUE to enable LPLU, FALSE to disable
3207 3205 *
3208 3206 * Sets the LPLU D0 state according to the active flag. When
3209 3207 * activating LPLU this function also disables smart speed
3210 3208 * and vice versa. LPLU will not be activated unless the
3211 3209 * device autonegotiation advertisement meets standards of
3212 3210 * either 10 or 10/100 or 10/100/1000 at all duplexes.
3213 3211 * This is a function pointer entry point only called by
3214 3212 * PHY setup routines.
3215 3213 **/
3216 3214 static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3217 3215 {
3218 3216 struct e1000_phy_info *phy = &hw->phy;
3219 3217 u32 phy_ctrl;
3220 3218 s32 ret_val = E1000_SUCCESS;
3221 3219 u16 data;
3222 3220
3223 3221 DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
3224 3222
3225 3223 if (phy->type == e1000_phy_ife)
3226 3224 return E1000_SUCCESS;
3227 3225
3228 3226 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3229 3227
3230 3228 if (active) {
3231 3229 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
3232 3230 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3233 3231
3234 3232 if (phy->type != e1000_phy_igp_3)
3235 3233 return E1000_SUCCESS;
3236 3234
3237 3235 /* Call gig speed drop workaround on LPLU before accessing
3238 3236 * any PHY registers
3239 3237 */
3240 3238 if (hw->mac.type == e1000_ich8lan)
3241 3239 e1000_gig_downshift_workaround_ich8lan(hw);
3242 3240
3243 3241 /* When LPLU is enabled, we should disable SmartSpeed */
3244 3242 ret_val = phy->ops.read_reg(hw,
3245 3243 IGP01E1000_PHY_PORT_CONFIG,
3246 3244 &data);
3247 3245 if (ret_val)
3248 3246 return ret_val;
3249 3247 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3250 3248 ret_val = phy->ops.write_reg(hw,
3251 3249 IGP01E1000_PHY_PORT_CONFIG,
3252 3250 data);
3253 3251 if (ret_val)
3254 3252 return ret_val;
3255 3253 } else {
3256 3254 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
3257 3255 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3258 3256
3259 3257 if (phy->type != e1000_phy_igp_3)
3260 3258 return E1000_SUCCESS;
3261 3259
3262 3260 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
3263 3261 * during Dx states where the power conservation is most
3264 3262 * important. During driver activity we should enable
3265 3263 * SmartSpeed, so performance is maintained.
3266 3264 */
3267 3265 if (phy->smart_speed == e1000_smart_speed_on) {
3268 3266 ret_val = phy->ops.read_reg(hw,
3269 3267 IGP01E1000_PHY_PORT_CONFIG,
3270 3268 &data);
3271 3269 if (ret_val)
3272 3270 return ret_val;
3273 3271
3274 3272 data |= IGP01E1000_PSCFR_SMART_SPEED;
3275 3273 ret_val = phy->ops.write_reg(hw,
3276 3274 IGP01E1000_PHY_PORT_CONFIG,
3277 3275 data);
3278 3276 if (ret_val)
3279 3277 return ret_val;
3280 3278 } else if (phy->smart_speed == e1000_smart_speed_off) {
3281 3279 ret_val = phy->ops.read_reg(hw,
3282 3280 IGP01E1000_PHY_PORT_CONFIG,
3283 3281 &data);
3284 3282 if (ret_val)
3285 3283 return ret_val;
3286 3284
3287 3285 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3288 3286 ret_val = phy->ops.write_reg(hw,
3289 3287 IGP01E1000_PHY_PORT_CONFIG,
3290 3288 data);
3291 3289 if (ret_val)
3292 3290 return ret_val;
3293 3291 }
3294 3292 }
3295 3293
3296 3294 return E1000_SUCCESS;
3297 3295 }
3298 3296
3299 3297 /**
3300 3298 * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
3301 3299 * @hw: pointer to the HW structure
3302 3300 * @active: TRUE to enable LPLU, FALSE to disable
3303 3301 *
3304 3302 * Sets the LPLU D3 state according to the active flag. When
3305 3303 * activating LPLU this function also disables smart speed
3306 3304 * and vice versa. LPLU will not be activated unless the
3307 3305 * device autonegotiation advertisement meets standards of
3308 3306 * either 10 or 10/100 or 10/100/1000 at all duplexes.
3309 3307 * This is a function pointer entry point only called by
3310 3308 * PHY setup routines.
3311 3309 **/
3312 3310 static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
3313 3311 {
3314 3312 struct e1000_phy_info *phy = &hw->phy;
3315 3313 u32 phy_ctrl;
3316 3314 s32 ret_val = E1000_SUCCESS;
3317 3315 u16 data;
3318 3316
3319 3317 DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
3320 3318
3321 3319 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
3322 3320
3323 3321 if (!active) {
3324 3322 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
3325 3323 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3326 3324
3327 3325 if (phy->type != e1000_phy_igp_3)
3328 3326 return E1000_SUCCESS;
3329 3327
3330 3328 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
3331 3329 * during Dx states where the power conservation is most
3332 3330 * important. During driver activity we should enable
3333 3331 * SmartSpeed, so performance is maintained.
3334 3332 */
3335 3333 if (phy->smart_speed == e1000_smart_speed_on) {
3336 3334 ret_val = phy->ops.read_reg(hw,
3337 3335 IGP01E1000_PHY_PORT_CONFIG,
3338 3336 &data);
3339 3337 if (ret_val)
3340 3338 return ret_val;
3341 3339
3342 3340 data |= IGP01E1000_PSCFR_SMART_SPEED;
3343 3341 ret_val = phy->ops.write_reg(hw,
3344 3342 IGP01E1000_PHY_PORT_CONFIG,
3345 3343 data);
3346 3344 if (ret_val)
3347 3345 return ret_val;
3348 3346 } else if (phy->smart_speed == e1000_smart_speed_off) {
3349 3347 ret_val = phy->ops.read_reg(hw,
3350 3348 IGP01E1000_PHY_PORT_CONFIG,
3351 3349 &data);
3352 3350 if (ret_val)
3353 3351 return ret_val;
3354 3352
3355 3353 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3356 3354 ret_val = phy->ops.write_reg(hw,
3357 3355 IGP01E1000_PHY_PORT_CONFIG,
3358 3356 data);
3359 3357 if (ret_val)
3360 3358 return ret_val;
3361 3359 }
3362 3360 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
3363 3361 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
3364 3362 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
3365 3363 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
3366 3364 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
3367 3365
3368 3366 if (phy->type != e1000_phy_igp_3)
3369 3367 return E1000_SUCCESS;
3370 3368
3371 3369 /* Call gig speed drop workaround on LPLU before accessing
3372 3370 * any PHY registers
3373 3371 */
3374 3372 if (hw->mac.type == e1000_ich8lan)
3375 3373 e1000_gig_downshift_workaround_ich8lan(hw);
3376 3374
3377 3375 /* When LPLU is enabled, we should disable SmartSpeed */
3378 3376 ret_val = phy->ops.read_reg(hw,
3379 3377 IGP01E1000_PHY_PORT_CONFIG,
3380 3378 &data);
3381 3379 if (ret_val)
3382 3380 return ret_val;
3383 3381
3384 3382 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3385 3383 ret_val = phy->ops.write_reg(hw,
3386 3384 IGP01E1000_PHY_PORT_CONFIG,
3387 3385 data);
3388 3386 }
3389 3387
3390 3388 return ret_val;
3391 3389 }
3392 3390
3393 3391 /**
3394 3392 * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3395 3393 * @hw: pointer to the HW structure
3396 3394 * @bank: pointer to the variable that returns the active bank
3397 3395 *
3398 3396 * Reads signature byte from the NVM using the flash access registers.
3399 3397 * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3400 3398 **/
3401 3399 static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3402 3400 {
3403 3401 u32 eecd;
3404 3402 struct e1000_nvm_info *nvm = &hw->nvm;
|
↓ open down ↓ |
1097 lines elided |
↑ open up ↑ |
3405 3403 u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3406 3404 u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3407 3405 u32 nvm_dword = 0;
3408 3406 u8 sig_byte = 0;
3409 3407 s32 ret_val;
3410 3408
3411 3409 DEBUGFUNC("e1000_valid_nvm_bank_detect_ich8lan");
3412 3410
3413 3411 switch (hw->mac.type) {
3414 3412 case e1000_pch_spt:
3413 + case e1000_pch_cnp:
3415 3414 bank1_offset = nvm->flash_bank_size;
3416 3415 act_offset = E1000_ICH_NVM_SIG_WORD;
3417 3416
3418 3417 /* set bank to 0 in case flash read fails */
3419 3418 *bank = 0;
3420 3419
3421 3420 /* Check bank 0 */
3422 3421 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset,
3423 3422 &nvm_dword);
3424 3423 if (ret_val)
3425 3424 return ret_val;
3426 3425 sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3427 3426 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3428 3427 E1000_ICH_NVM_SIG_VALUE) {
3429 3428 *bank = 0;
3430 3429 return E1000_SUCCESS;
3431 3430 }
3432 3431
3433 3432 /* Check bank 1 */
3434 3433 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset +
3435 3434 bank1_offset,
3436 3435 &nvm_dword);
3437 3436 if (ret_val)
3438 3437 return ret_val;
3439 3438 sig_byte = (u8)((nvm_dword & 0xFF00) >> 8);
3440 3439 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3441 3440 E1000_ICH_NVM_SIG_VALUE) {
3442 3441 *bank = 1;
3443 3442 return E1000_SUCCESS;
3444 3443 }
3445 3444
3446 3445 DEBUGOUT("ERROR: No valid NVM bank present\n");
3447 3446 return -E1000_ERR_NVM;
3448 3447 case e1000_ich8lan:
3449 3448 case e1000_ich9lan:
3450 3449 eecd = E1000_READ_REG(hw, E1000_EECD);
3451 3450 if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3452 3451 E1000_EECD_SEC1VAL_VALID_MASK) {
3453 3452 if (eecd & E1000_EECD_SEC1VAL)
3454 3453 *bank = 1;
3455 3454 else
3456 3455 *bank = 0;
3457 3456
3458 3457 return E1000_SUCCESS;
3459 3458 }
3460 3459 DEBUGOUT("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3461 3460 /* fall-thru */
3462 3461 default:
3463 3462 /* set bank to 0 in case flash read fails */
3464 3463 *bank = 0;
3465 3464
3466 3465 /* Check bank 0 */
3467 3466 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3468 3467 &sig_byte);
3469 3468 if (ret_val)
3470 3469 return ret_val;
3471 3470 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3472 3471 E1000_ICH_NVM_SIG_VALUE) {
3473 3472 *bank = 0;
3474 3473 return E1000_SUCCESS;
3475 3474 }
3476 3475
3477 3476 /* Check bank 1 */
3478 3477 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3479 3478 bank1_offset,
3480 3479 &sig_byte);
3481 3480 if (ret_val)
3482 3481 return ret_val;
3483 3482 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3484 3483 E1000_ICH_NVM_SIG_VALUE) {
3485 3484 *bank = 1;
3486 3485 return E1000_SUCCESS;
3487 3486 }
3488 3487
3489 3488 DEBUGOUT("ERROR: No valid NVM bank present\n");
3490 3489 return -E1000_ERR_NVM;
3491 3490 }
3492 3491 }
3493 3492
3494 3493 /**
3495 3494 * e1000_read_nvm_spt - NVM access for SPT
3496 3495 * @hw: pointer to the HW structure
3497 3496 * @offset: The offset (in bytes) of the word(s) to read.
3498 3497 * @words: Size of data to read in words.
3499 3498 * @data: pointer to the word(s) to read at offset.
3500 3499 *
3501 3500 * Reads a word(s) from the NVM
3502 3501 **/
3503 3502 static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
3504 3503 u16 *data)
3505 3504 {
3506 3505 struct e1000_nvm_info *nvm = &hw->nvm;
3507 3506 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3508 3507 u32 act_offset;
3509 3508 s32 ret_val = E1000_SUCCESS;
3510 3509 u32 bank = 0;
3511 3510 u32 dword = 0;
3512 3511 u16 offset_to_read;
3513 3512 u16 i;
3514 3513
3515 3514 DEBUGFUNC("e1000_read_nvm_spt");
3516 3515
3517 3516 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3518 3517 (words == 0)) {
3519 3518 DEBUGOUT("nvm parameter(s) out of bounds\n");
3520 3519 ret_val = -E1000_ERR_NVM;
3521 3520 goto out;
3522 3521 }
3523 3522
3524 3523 nvm->ops.acquire(hw);
3525 3524
3526 3525 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3527 3526 if (ret_val != E1000_SUCCESS) {
3528 3527 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3529 3528 bank = 0;
3530 3529 }
3531 3530
3532 3531 act_offset = (bank) ? nvm->flash_bank_size : 0;
3533 3532 act_offset += offset;
3534 3533
3535 3534 ret_val = E1000_SUCCESS;
3536 3535
3537 3536 for (i = 0; i < words; i += 2) {
3538 3537 if (words - i == 1) {
3539 3538 if (dev_spec->shadow_ram[offset+i].modified) {
3540 3539 data[i] = dev_spec->shadow_ram[offset+i].value;
3541 3540 } else {
3542 3541 offset_to_read = act_offset + i -
3543 3542 ((act_offset + i) % 2);
3544 3543 ret_val =
3545 3544 e1000_read_flash_dword_ich8lan(hw,
3546 3545 offset_to_read,
3547 3546 &dword);
3548 3547 if (ret_val)
3549 3548 break;
3550 3549 if ((act_offset + i) % 2 == 0)
3551 3550 data[i] = (u16)(dword & 0xFFFF);
3552 3551 else
3553 3552 data[i] = (u16)((dword >> 16) & 0xFFFF);
3554 3553 }
3555 3554 } else {
3556 3555 offset_to_read = act_offset + i;
3557 3556 if (!(dev_spec->shadow_ram[offset+i].modified) ||
3558 3557 !(dev_spec->shadow_ram[offset+i+1].modified)) {
3559 3558 ret_val =
3560 3559 e1000_read_flash_dword_ich8lan(hw,
3561 3560 offset_to_read,
3562 3561 &dword);
3563 3562 if (ret_val)
3564 3563 break;
3565 3564 }
3566 3565 if (dev_spec->shadow_ram[offset+i].modified)
3567 3566 data[i] = dev_spec->shadow_ram[offset+i].value;
3568 3567 else
3569 3568 data[i] = (u16) (dword & 0xFFFF);
3570 3569 if (dev_spec->shadow_ram[offset+i].modified)
3571 3570 data[i+1] =
3572 3571 dev_spec->shadow_ram[offset+i+1].value;
3573 3572 else
3574 3573 data[i+1] = (u16) (dword >> 16 & 0xFFFF);
3575 3574 }
3576 3575 }
3577 3576
3578 3577 nvm->ops.release(hw);
3579 3578
3580 3579 out:
3581 3580 if (ret_val)
3582 3581 DEBUGOUT1("NVM read error: %d\n", ret_val);
3583 3582
3584 3583 return ret_val;
3585 3584 }
3586 3585
3587 3586 /**
3588 3587 * e1000_read_nvm_ich8lan - Read word(s) from the NVM
3589 3588 * @hw: pointer to the HW structure
3590 3589 * @offset: The offset (in bytes) of the word(s) to read.
3591 3590 * @words: Size of data to read in words
3592 3591 * @data: Pointer to the word(s) to read at offset.
3593 3592 *
3594 3593 * Reads a word(s) from the NVM using the flash access registers.
3595 3594 **/
3596 3595 static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3597 3596 u16 *data)
3598 3597 {
3599 3598 struct e1000_nvm_info *nvm = &hw->nvm;
3600 3599 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3601 3600 u32 act_offset;
3602 3601 s32 ret_val = E1000_SUCCESS;
3603 3602 u32 bank = 0;
3604 3603 u16 i, word;
3605 3604
3606 3605 DEBUGFUNC("e1000_read_nvm_ich8lan");
3607 3606
3608 3607 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3609 3608 (words == 0)) {
3610 3609 DEBUGOUT("nvm parameter(s) out of bounds\n");
3611 3610 ret_val = -E1000_ERR_NVM;
3612 3611 goto out;
3613 3612 }
3614 3613
3615 3614 nvm->ops.acquire(hw);
3616 3615
3617 3616 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3618 3617 if (ret_val != E1000_SUCCESS) {
3619 3618 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
3620 3619 bank = 0;
3621 3620 }
3622 3621
3623 3622 act_offset = (bank) ? nvm->flash_bank_size : 0;
3624 3623 act_offset += offset;
3625 3624
3626 3625 ret_val = E1000_SUCCESS;
3627 3626 for (i = 0; i < words; i++) {
3628 3627 if (dev_spec->shadow_ram[offset+i].modified) {
3629 3628 data[i] = dev_spec->shadow_ram[offset+i].value;
3630 3629 } else {
3631 3630 ret_val = e1000_read_flash_word_ich8lan(hw,
3632 3631 act_offset + i,
3633 3632 &word);
3634 3633 if (ret_val)
3635 3634 break;
3636 3635 data[i] = word;
3637 3636 }
3638 3637 }
3639 3638
3640 3639 nvm->ops.release(hw);
3641 3640
3642 3641 out:
3643 3642 if (ret_val)
3644 3643 DEBUGOUT1("NVM read error: %d\n", ret_val);
3645 3644
3646 3645 return ret_val;
3647 3646 }
3648 3647
3649 3648 /**
3650 3649 * e1000_flash_cycle_init_ich8lan - Initialize flash
3651 3650 * @hw: pointer to the HW structure
3652 3651 *
3653 3652 * This function does initial flash setup so that a new read/write/erase cycle
3654 3653 * can be started.
3655 3654 **/
3656 3655 static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3657 3656 {
3658 3657 union ich8_hws_flash_status hsfsts;
3659 3658 s32 ret_val = -E1000_ERR_NVM;
3660 3659
3661 3660 DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
3662 3661
3663 3662 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
|
↓ open down ↓ |
239 lines elided |
↑ open up ↑ |
3664 3663
3665 3664 /* Check if the flash descriptor is valid */
3666 3665 if (!hsfsts.hsf_status.fldesvalid) {
3667 3666 DEBUGOUT("Flash descriptor invalid. SW Sequencing must be used.\n");
3668 3667 return -E1000_ERR_NVM;
3669 3668 }
3670 3669
3671 3670 /* Clear FCERR and DAEL in hw status by writing 1 */
3672 3671 hsfsts.hsf_status.flcerr = 1;
3673 3672 hsfsts.hsf_status.dael = 1;
3674 - if (hw->mac.type == e1000_pch_spt)
3673 + if (hw->mac.type >= e1000_pch_spt)
3675 3674 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3676 3675 hsfsts.regval & 0xFFFF);
3677 3676 else
3678 3677 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
3679 3678
3680 3679 /* Either we should have a hardware SPI cycle in progress
3681 3680 * bit to check against, in order to start a new cycle or
3682 3681 * FDONE bit should be changed in the hardware so that it
3683 3682 * is 1 after hardware reset, which can then be used as an
3684 3683 * indication whether a cycle is in progress or has been
3685 3684 * completed.
3686 3685 */
3687 3686
3688 3687 if (!hsfsts.hsf_status.flcinprog) {
3689 3688 /* There is no cycle running at present,
3690 3689 * so we can start a cycle.
3691 3690 * Begin by setting Flash Cycle Done.
3692 3691 */
3693 3692 hsfsts.hsf_status.flcdone = 1;
3694 - if (hw->mac.type == e1000_pch_spt)
3693 + if (hw->mac.type >= e1000_pch_spt)
3695 3694 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3696 3695 hsfsts.regval & 0xFFFF);
3697 3696 else
3698 3697 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3699 3698 hsfsts.regval);
3700 3699 ret_val = E1000_SUCCESS;
3701 3700 } else {
3702 3701 s32 i;
3703 3702
3704 3703 /* Otherwise poll for sometime so the current
3705 3704 * cycle has a chance to end before giving up.
3706 3705 */
3707 3706 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3708 3707 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3709 3708 ICH_FLASH_HSFSTS);
3710 3709 if (!hsfsts.hsf_status.flcinprog) {
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
3711 3710 ret_val = E1000_SUCCESS;
3712 3711 break;
3713 3712 }
3714 3713 usec_delay(1);
3715 3714 }
3716 3715 if (ret_val == E1000_SUCCESS) {
3717 3716 /* Successful in waiting for previous cycle to timeout,
3718 3717 * now set the Flash Cycle Done.
3719 3718 */
3720 3719 hsfsts.hsf_status.flcdone = 1;
3721 - if (hw->mac.type == e1000_pch_spt)
3720 + if (hw->mac.type >= e1000_pch_spt)
3722 3721 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3723 3722 hsfsts.regval & 0xFFFF);
3724 3723 else
3725 3724 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS,
3726 3725 hsfsts.regval);
3727 3726 } else {
3728 3727 DEBUGOUT("Flash controller busy, cannot get access\n");
3729 3728 }
3730 3729 }
3731 3730
3732 3731 return ret_val;
3733 3732 }
3734 3733
3735 3734 /**
3736 3735 * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3737 3736 * @hw: pointer to the HW structure
3738 3737 * @timeout: maximum time to wait for completion
3739 3738 *
3740 3739 * This function starts a flash cycle and waits for its completion.
|
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
3741 3740 **/
3742 3741 static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3743 3742 {
3744 3743 union ich8_hws_flash_ctrl hsflctl;
3745 3744 union ich8_hws_flash_status hsfsts;
3746 3745 u32 i = 0;
3747 3746
3748 3747 DEBUGFUNC("e1000_flash_cycle_ich8lan");
3749 3748
3750 3749 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3751 - if (hw->mac.type == e1000_pch_spt)
3750 + if (hw->mac.type >= e1000_pch_spt)
3752 3751 hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
3753 3752 else
3754 3753 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3755 3754 hsflctl.hsf_ctrl.flcgo = 1;
3756 3755
3757 - if (hw->mac.type == e1000_pch_spt)
3756 + if (hw->mac.type >= e1000_pch_spt)
3758 3757 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3759 3758 hsflctl.regval << 16);
3760 3759 else
3761 3760 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3762 3761
3763 3762 /* wait till FDONE bit is set to 1 */
3764 3763 do {
3765 3764 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
3766 3765 if (hsfsts.hsf_status.flcdone)
3767 3766 break;
3768 3767 usec_delay(1);
3769 3768 } while (i++ < timeout);
3770 3769
3771 3770 if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3772 3771 return E1000_SUCCESS;
3773 3772
3774 3773 return -E1000_ERR_NVM;
3775 3774 }
3776 3775
3777 3776 /**
3778 3777 * e1000_read_flash_dword_ich8lan - Read dword from flash
3779 3778 * @hw: pointer to the HW structure
3780 3779 * @offset: offset to data location
3781 3780 * @data: pointer to the location for storing the data
3782 3781 *
3783 3782 * Reads the flash dword at offset into data. Offset is converted
3784 3783 * to bytes before read.
3785 3784 **/
3786 3785 static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset,
3787 3786 u32 *data)
3788 3787 {
3789 3788 DEBUGFUNC("e1000_read_flash_dword_ich8lan");
3790 3789
3791 3790 if (!data)
3792 3791 return -E1000_ERR_NVM;
3793 3792
3794 3793 /* Must convert word offset into bytes. */
3795 3794 offset <<= 1;
3796 3795
3797 3796 return e1000_read_flash_data32_ich8lan(hw, offset, data);
3798 3797 }
3799 3798
3800 3799 /**
3801 3800 * e1000_read_flash_word_ich8lan - Read word from flash
3802 3801 * @hw: pointer to the HW structure
3803 3802 * @offset: offset to data location
3804 3803 * @data: pointer to the location for storing the data
3805 3804 *
3806 3805 * Reads the flash word at offset into data. Offset is converted
3807 3806 * to bytes before read.
3808 3807 **/
3809 3808 static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3810 3809 u16 *data)
3811 3810 {
3812 3811 DEBUGFUNC("e1000_read_flash_word_ich8lan");
3813 3812
3814 3813 if (!data)
3815 3814 return -E1000_ERR_NVM;
3816 3815
3817 3816 /* Must convert offset into bytes. */
3818 3817 offset <<= 1;
3819 3818
3820 3819 return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3821 3820 }
3822 3821
3823 3822 /**
3824 3823 * e1000_read_flash_byte_ich8lan - Read byte from flash
3825 3824 * @hw: pointer to the HW structure
3826 3825 * @offset: The offset of the byte to read.
3827 3826 * @data: Pointer to a byte to store the value read.
3828 3827 *
3829 3828 * Reads a single byte from the NVM using the flash access registers.
|
↓ open down ↓ |
62 lines elided |
↑ open up ↑ |
3830 3829 **/
3831 3830 static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3832 3831 u8 *data)
3833 3832 {
3834 3833 s32 ret_val;
3835 3834 u16 word = 0;
3836 3835
3837 3836 /* In SPT, only 32 bits access is supported,
3838 3837 * so this function should not be called.
3839 3838 */
3840 - if (hw->mac.type == e1000_pch_spt)
3839 + if (hw->mac.type >= e1000_pch_spt)
3841 3840 return -E1000_ERR_NVM;
3842 3841 else
3843 3842 ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3844 3843
3845 3844 if (ret_val)
3846 3845 return ret_val;
3847 3846
3848 3847 *data = (u8)word;
3849 3848
3850 3849 return E1000_SUCCESS;
3851 3850 }
3852 3851
3853 3852 /**
3854 3853 * e1000_read_flash_data_ich8lan - Read byte or word from NVM
3855 3854 * @hw: pointer to the HW structure
3856 3855 * @offset: The offset (in bytes) of the byte or word to read.
3857 3856 * @size: Size of data to read, 1=byte 2=word
3858 3857 * @data: Pointer to the word to store the value read.
3859 3858 *
3860 3859 * Reads a byte or word from the NVM using the flash access registers.
3861 3860 **/
3862 3861 static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3863 3862 u8 size, u16 *data)
3864 3863 {
3865 3864 union ich8_hws_flash_status hsfsts;
3866 3865 union ich8_hws_flash_ctrl hsflctl;
3867 3866 u32 flash_linear_addr;
3868 3867 u32 flash_data = 0;
3869 3868 s32 ret_val = -E1000_ERR_NVM;
3870 3869 u8 count = 0;
3871 3870
3872 3871 DEBUGFUNC("e1000_read_flash_data_ich8lan");
3873 3872
3874 3873 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3875 3874 return -E1000_ERR_NVM;
3876 3875 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3877 3876 hw->nvm.flash_base_addr);
3878 3877
3879 3878 do {
3880 3879 usec_delay(1);
3881 3880 /* Steps */
3882 3881 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3883 3882 if (ret_val != E1000_SUCCESS)
3884 3883 break;
3885 3884 hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
3886 3885
3887 3886 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3888 3887 hsflctl.hsf_ctrl.fldbcount = size - 1;
3889 3888 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3890 3889 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
3891 3890 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3892 3891
3893 3892 ret_val = e1000_flash_cycle_ich8lan(hw,
3894 3893 ICH_FLASH_READ_COMMAND_TIMEOUT);
3895 3894
3896 3895 /* Check if FCERR is set to 1, if set to 1, clear it
3897 3896 * and try the whole sequence a few more times, else
3898 3897 * read in (shift in) the Flash Data0, the order is
3899 3898 * least significant byte first msb to lsb
3900 3899 */
3901 3900 if (ret_val == E1000_SUCCESS) {
3902 3901 flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3903 3902 if (size == 1)
3904 3903 *data = (u8)(flash_data & 0x000000FF);
3905 3904 else if (size == 2)
3906 3905 *data = (u16)(flash_data & 0x0000FFFF);
3907 3906 break;
3908 3907 } else {
3909 3908 /* If we've gotten here, then things are probably
3910 3909 * completely hosed, but if the error condition is
3911 3910 * detected, it won't hurt to give it another try...
3912 3911 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3913 3912 */
3914 3913 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3915 3914 ICH_FLASH_HSFSTS);
3916 3915 if (hsfsts.hsf_status.flcerr) {
3917 3916 /* Repeat for some time before giving up. */
3918 3917 continue;
3919 3918 } else if (!hsfsts.hsf_status.flcdone) {
3920 3919 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3921 3920 break;
3922 3921 }
3923 3922 }
3924 3923 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3925 3924
3926 3925 return ret_val;
3927 3926 }
3928 3927
3929 3928 /**
3930 3929 * e1000_read_flash_data32_ich8lan - Read dword from NVM
3931 3930 * @hw: pointer to the HW structure
3932 3931 * @offset: The offset (in bytes) of the dword to read.
3933 3932 * @data: Pointer to the dword to store the value read.
3934 3933 *
3935 3934 * Reads a byte or word from the NVM using the flash access registers.
3936 3935 **/
3937 3936 static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
|
↓ open down ↓ |
87 lines elided |
↑ open up ↑ |
3938 3937 u32 *data)
3939 3938 {
3940 3939 union ich8_hws_flash_status hsfsts;
3941 3940 union ich8_hws_flash_ctrl hsflctl;
3942 3941 u32 flash_linear_addr;
3943 3942 s32 ret_val = -E1000_ERR_NVM;
3944 3943 u8 count = 0;
3945 3944
3946 3945 DEBUGFUNC("e1000_read_flash_data_ich8lan");
3947 3946
3948 - if (offset > ICH_FLASH_LINEAR_ADDR_MASK ||
3949 - hw->mac.type != e1000_pch_spt)
3950 - return -E1000_ERR_NVM;
3947 + if (offset > ICH_FLASH_LINEAR_ADDR_MASK && hw->mac.type < e1000_pch_spt)
3948 + return -E1000_ERR_NVM;
3951 3949 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3952 3950 hw->nvm.flash_base_addr);
3953 3951
3954 3952 do {
3955 3953 usec_delay(1);
3956 3954 /* Steps */
3957 3955 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3958 3956 if (ret_val != E1000_SUCCESS)
3959 3957 break;
3960 3958 /* In SPT, This register is in Lan memory space, not flash.
3961 3959 * Therefore, only 32 bit access is supported
3962 3960 */
3963 3961 hsflctl.regval = E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
3964 3962
3965 3963 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3966 3964 hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
3967 3965 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3968 3966 /* In SPT, This register is in Lan memory space, not flash.
3969 3967 * Therefore, only 32 bit access is supported
3970 3968 */
3971 3969 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
3972 3970 (u32)hsflctl.regval << 16);
3973 3971 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
3974 3972
3975 3973 ret_val = e1000_flash_cycle_ich8lan(hw,
3976 3974 ICH_FLASH_READ_COMMAND_TIMEOUT);
3977 3975
3978 3976 /* Check if FCERR is set to 1, if set to 1, clear it
3979 3977 * and try the whole sequence a few more times, else
3980 3978 * read in (shift in) the Flash Data0, the order is
3981 3979 * least significant byte first msb to lsb
3982 3980 */
3983 3981 if (ret_val == E1000_SUCCESS) {
3984 3982 *data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
3985 3983 break;
3986 3984 } else {
3987 3985 /* If we've gotten here, then things are probably
3988 3986 * completely hosed, but if the error condition is
3989 3987 * detected, it won't hurt to give it another try...
3990 3988 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3991 3989 */
3992 3990 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
3993 3991 ICH_FLASH_HSFSTS);
3994 3992 if (hsfsts.hsf_status.flcerr) {
3995 3993 /* Repeat for some time before giving up. */
3996 3994 continue;
3997 3995 } else if (!hsfsts.hsf_status.flcdone) {
3998 3996 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
3999 3997 break;
4000 3998 }
4001 3999 }
4002 4000 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4003 4001
4004 4002 return ret_val;
4005 4003 }
4006 4004
4007 4005 /**
4008 4006 * e1000_write_nvm_ich8lan - Write word(s) to the NVM
4009 4007 * @hw: pointer to the HW structure
4010 4008 * @offset: The offset (in bytes) of the word(s) to write.
4011 4009 * @words: Size of data to write in words
4012 4010 * @data: Pointer to the word(s) to write at offset.
4013 4011 *
4014 4012 * Writes a byte or word to the NVM using the flash access registers.
4015 4013 **/
4016 4014 static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
4017 4015 u16 *data)
4018 4016 {
4019 4017 struct e1000_nvm_info *nvm = &hw->nvm;
4020 4018 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4021 4019 u16 i;
4022 4020
4023 4021 DEBUGFUNC("e1000_write_nvm_ich8lan");
4024 4022
4025 4023 if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
4026 4024 (words == 0)) {
4027 4025 DEBUGOUT("nvm parameter(s) out of bounds\n");
4028 4026 return -E1000_ERR_NVM;
4029 4027 }
4030 4028
4031 4029 nvm->ops.acquire(hw);
4032 4030
4033 4031 for (i = 0; i < words; i++) {
4034 4032 dev_spec->shadow_ram[offset+i].modified = TRUE;
4035 4033 dev_spec->shadow_ram[offset+i].value = data[i];
4036 4034 }
4037 4035
4038 4036 nvm->ops.release(hw);
4039 4037
4040 4038 return E1000_SUCCESS;
4041 4039 }
4042 4040
4043 4041 /**
4044 4042 * e1000_update_nvm_checksum_spt - Update the checksum for NVM
4045 4043 * @hw: pointer to the HW structure
4046 4044 *
4047 4045 * The NVM checksum is updated by calling the generic update_nvm_checksum,
4048 4046 * which writes the checksum to the shadow ram. The changes in the shadow
4049 4047 * ram are then committed to the EEPROM by processing each bank at a time
4050 4048 * checking for the modified bit and writing only the pending changes.
4051 4049 * After a successful commit, the shadow ram is cleared and is ready for
4052 4050 * future writes.
4053 4051 **/
4054 4052 static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
4055 4053 {
4056 4054 struct e1000_nvm_info *nvm = &hw->nvm;
4057 4055 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4058 4056 u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
4059 4057 s32 ret_val;
4060 4058 u32 dword = 0;
4061 4059
4062 4060 DEBUGFUNC("e1000_update_nvm_checksum_spt");
4063 4061
4064 4062 ret_val = e1000_update_nvm_checksum_generic(hw);
4065 4063 if (ret_val)
4066 4064 goto out;
4067 4065
4068 4066 if (nvm->type != e1000_nvm_flash_sw)
4069 4067 goto out;
4070 4068
4071 4069 nvm->ops.acquire(hw);
4072 4070
4073 4071 /* We're writing to the opposite bank so if we're on bank 1,
4074 4072 * write to bank 0 etc. We also need to erase the segment that
4075 4073 * is going to be written
4076 4074 */
4077 4075 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
4078 4076 if (ret_val != E1000_SUCCESS) {
4079 4077 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
4080 4078 bank = 0;
4081 4079 }
4082 4080
4083 4081 if (bank == 0) {
4084 4082 new_bank_offset = nvm->flash_bank_size;
4085 4083 old_bank_offset = 0;
4086 4084 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
4087 4085 if (ret_val)
4088 4086 goto release;
4089 4087 } else {
4090 4088 old_bank_offset = nvm->flash_bank_size;
4091 4089 new_bank_offset = 0;
4092 4090 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
4093 4091 if (ret_val)
4094 4092 goto release;
4095 4093 }
4096 4094 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i += 2) {
4097 4095 /* Determine whether to write the value stored
4098 4096 * in the other NVM bank or a modified value stored
4099 4097 * in the shadow RAM
4100 4098 */
4101 4099 ret_val = e1000_read_flash_dword_ich8lan(hw,
4102 4100 i + old_bank_offset,
4103 4101 &dword);
4104 4102
4105 4103 if (dev_spec->shadow_ram[i].modified) {
4106 4104 dword &= 0xffff0000;
4107 4105 dword |= (dev_spec->shadow_ram[i].value & 0xffff);
4108 4106 }
4109 4107 if (dev_spec->shadow_ram[i + 1].modified) {
4110 4108 dword &= 0x0000ffff;
4111 4109 dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff)
4112 4110 << 16);
4113 4111 }
4114 4112 if (ret_val)
4115 4113 break;
4116 4114
4117 4115 /* If the word is 0x13, then make sure the signature bits
4118 4116 * (15:14) are 11b until the commit has completed.
4119 4117 * This will allow us to write 10b which indicates the
4120 4118 * signature is valid. We want to do this after the write
4121 4119 * has completed so that we don't mark the segment valid
4122 4120 * while the write is still in progress
4123 4121 */
4124 4122 if (i == E1000_ICH_NVM_SIG_WORD - 1)
4125 4123 dword |= E1000_ICH_NVM_SIG_MASK << 16;
4126 4124
4127 4125 /* Convert offset to bytes. */
4128 4126 act_offset = (i + new_bank_offset) << 1;
4129 4127
4130 4128 usec_delay(100);
4131 4129
4132 4130 /* Write the data to the new bank. Offset in words*/
4133 4131 act_offset = i + new_bank_offset;
4134 4132 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset,
4135 4133 dword);
4136 4134 if (ret_val)
4137 4135 break;
4138 4136 }
4139 4137
4140 4138 /* Don't bother writing the segment valid bits if sector
4141 4139 * programming failed.
4142 4140 */
4143 4141 if (ret_val) {
4144 4142 DEBUGOUT("Flash commit failed.\n");
4145 4143 goto release;
4146 4144 }
4147 4145
4148 4146 /* Finally validate the new segment by setting bit 15:14
4149 4147 * to 10b in word 0x13 , this can be done without an
4150 4148 * erase as well since these bits are 11 to start with
4151 4149 * and we need to change bit 14 to 0b
4152 4150 */
4153 4151 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4154 4152
4155 4153 /*offset in words but we read dword*/
4156 4154 --act_offset;
4157 4155 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
4158 4156
4159 4157 if (ret_val)
4160 4158 goto release;
4161 4159
4162 4160 dword &= 0xBFFFFFFF;
4163 4161 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
4164 4162
4165 4163 if (ret_val)
4166 4164 goto release;
4167 4165
4168 4166 /* And invalidate the previously valid segment by setting
4169 4167 * its signature word (0x13) high_byte to 0b. This can be
4170 4168 * done without an erase because flash erase sets all bits
4171 4169 * to 1's. We can write 1's to 0's without an erase
4172 4170 */
4173 4171 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4174 4172
4175 4173 /* offset in words but we read dword*/
4176 4174 act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1;
4177 4175 ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
4178 4176
4179 4177 if (ret_val)
4180 4178 goto release;
4181 4179
4182 4180 dword &= 0x00FFFFFF;
4183 4181 ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
4184 4182
4185 4183 if (ret_val)
4186 4184 goto release;
4187 4185
4188 4186 /* Great! Everything worked, we can now clear the cached entries. */
4189 4187 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4190 4188 dev_spec->shadow_ram[i].modified = FALSE;
4191 4189 dev_spec->shadow_ram[i].value = 0xFFFF;
4192 4190 }
4193 4191
4194 4192 release:
4195 4193 nvm->ops.release(hw);
4196 4194
4197 4195 /* Reload the EEPROM, or else modifications will not appear
4198 4196 * until after the next adapter reset.
4199 4197 */
4200 4198 if (!ret_val) {
4201 4199 nvm->ops.reload(hw);
4202 4200 msec_delay(10);
4203 4201 }
4204 4202
4205 4203 out:
4206 4204 if (ret_val)
4207 4205 DEBUGOUT1("NVM update error: %d\n", ret_val);
4208 4206
4209 4207 return ret_val;
4210 4208 }
4211 4209
4212 4210 /**
4213 4211 * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
4214 4212 * @hw: pointer to the HW structure
4215 4213 *
4216 4214 * The NVM checksum is updated by calling the generic update_nvm_checksum,
4217 4215 * which writes the checksum to the shadow ram. The changes in the shadow
4218 4216 * ram are then committed to the EEPROM by processing each bank at a time
4219 4217 * checking for the modified bit and writing only the pending changes.
4220 4218 * After a successful commit, the shadow ram is cleared and is ready for
4221 4219 * future writes.
4222 4220 **/
4223 4221 static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
4224 4222 {
4225 4223 struct e1000_nvm_info *nvm = &hw->nvm;
4226 4224 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4227 4225 u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
4228 4226 s32 ret_val;
4229 4227 u16 data = 0;
4230 4228
4231 4229 DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
4232 4230
4233 4231 ret_val = e1000_update_nvm_checksum_generic(hw);
4234 4232 if (ret_val)
4235 4233 goto out;
4236 4234
4237 4235 if (nvm->type != e1000_nvm_flash_sw)
4238 4236 goto out;
4239 4237
4240 4238 nvm->ops.acquire(hw);
4241 4239
4242 4240 /* We're writing to the opposite bank so if we're on bank 1,
4243 4241 * write to bank 0 etc. We also need to erase the segment that
4244 4242 * is going to be written
4245 4243 */
4246 4244 ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
4247 4245 if (ret_val != E1000_SUCCESS) {
4248 4246 DEBUGOUT("Could not detect valid bank, assuming bank 0\n");
4249 4247 bank = 0;
4250 4248 }
4251 4249
4252 4250 if (bank == 0) {
4253 4251 new_bank_offset = nvm->flash_bank_size;
4254 4252 old_bank_offset = 0;
4255 4253 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
4256 4254 if (ret_val)
4257 4255 goto release;
4258 4256 } else {
4259 4257 old_bank_offset = nvm->flash_bank_size;
4260 4258 new_bank_offset = 0;
4261 4259 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
4262 4260 if (ret_val)
4263 4261 goto release;
4264 4262 }
4265 4263 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4266 4264 if (dev_spec->shadow_ram[i].modified) {
4267 4265 data = dev_spec->shadow_ram[i].value;
4268 4266 } else {
4269 4267 ret_val = e1000_read_flash_word_ich8lan(hw, i +
4270 4268 old_bank_offset,
4271 4269 &data);
4272 4270 if (ret_val)
4273 4271 break;
4274 4272 }
4275 4273 /* If the word is 0x13, then make sure the signature bits
4276 4274 * (15:14) are 11b until the commit has completed.
4277 4275 * This will allow us to write 10b which indicates the
4278 4276 * signature is valid. We want to do this after the write
4279 4277 * has completed so that we don't mark the segment valid
4280 4278 * while the write is still in progress
4281 4279 */
4282 4280 if (i == E1000_ICH_NVM_SIG_WORD)
4283 4281 data |= E1000_ICH_NVM_SIG_MASK;
4284 4282
4285 4283 /* Convert offset to bytes. */
4286 4284 act_offset = (i + new_bank_offset) << 1;
4287 4285
4288 4286 usec_delay(100);
4289 4287
4290 4288 /* Write the bytes to the new bank. */
4291 4289 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4292 4290 act_offset,
4293 4291 (u8)data);
4294 4292 if (ret_val)
4295 4293 break;
4296 4294
4297 4295 usec_delay(100);
4298 4296 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
4299 4297 act_offset + 1,
4300 4298 (u8)(data >> 8));
4301 4299 if (ret_val)
4302 4300 break;
4303 4301 }
4304 4302
4305 4303 /* Don't bother writing the segment valid bits if sector
4306 4304 * programming failed.
4307 4305 */
4308 4306 if (ret_val) {
4309 4307 DEBUGOUT("Flash commit failed.\n");
4310 4308 goto release;
4311 4309 }
4312 4310
4313 4311 /* Finally validate the new segment by setting bit 15:14
4314 4312 * to 10b in word 0x13 , this can be done without an
4315 4313 * erase as well since these bits are 11 to start with
4316 4314 * and we need to change bit 14 to 0b
4317 4315 */
4318 4316 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
4319 4317 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
4320 4318 if (ret_val)
4321 4319 goto release;
4322 4320
4323 4321 data &= 0xBFFF;
4324 4322 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset * 2 + 1,
4325 4323 (u8)(data >> 8));
4326 4324 if (ret_val)
4327 4325 goto release;
4328 4326
4329 4327 /* And invalidate the previously valid segment by setting
4330 4328 * its signature word (0x13) high_byte to 0b. This can be
4331 4329 * done without an erase because flash erase sets all bits
4332 4330 * to 1's. We can write 1's to 0's without an erase
4333 4331 */
4334 4332 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
4335 4333
4336 4334 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
4337 4335
4338 4336 if (ret_val)
4339 4337 goto release;
4340 4338
4341 4339 /* Great! Everything worked, we can now clear the cached entries. */
4342 4340 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4343 4341 dev_spec->shadow_ram[i].modified = FALSE;
4344 4342 dev_spec->shadow_ram[i].value = 0xFFFF;
4345 4343 }
4346 4344
4347 4345 release:
4348 4346 nvm->ops.release(hw);
4349 4347
4350 4348 /* Reload the EEPROM, or else modifications will not appear
4351 4349 * until after the next adapter reset.
4352 4350 */
4353 4351 if (!ret_val) {
4354 4352 nvm->ops.reload(hw);
4355 4353 msec_delay(10);
4356 4354 }
4357 4355
4358 4356 out:
4359 4357 if (ret_val)
4360 4358 DEBUGOUT1("NVM update error: %d\n", ret_val);
4361 4359
4362 4360 return ret_val;
4363 4361 }
4364 4362
4365 4363 /**
4366 4364 * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
4367 4365 * @hw: pointer to the HW structure
4368 4366 *
4369 4367 * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
4370 4368 * If the bit is 0, that the EEPROM had been modified, but the checksum was not
4371 4369 * calculated, in which case we need to calculate the checksum and set bit 6.
4372 4370 **/
4373 4371 static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
4374 4372 {
4375 4373 s32 ret_val;
4376 4374 u16 data;
4377 4375 u16 word;
4378 4376 u16 valid_csum_mask;
4379 4377
|
↓ open down ↓ |
419 lines elided |
↑ open up ↑ |
4380 4378 DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
4381 4379
4382 4380 /* Read NVM and check Invalid Image CSUM bit. If this bit is 0,
4383 4381 * the checksum needs to be fixed. This bit is an indication that
4384 4382 * the NVM was prepared by OEM software and did not calculate
4385 4383 * the checksum...a likely scenario.
4386 4384 */
4387 4385 switch (hw->mac.type) {
4388 4386 case e1000_pch_lpt:
4389 4387 case e1000_pch_spt:
4388 + case e1000_pch_cnp:
4390 4389 word = NVM_COMPAT;
4391 4390 valid_csum_mask = NVM_COMPAT_VALID_CSUM;
4392 4391 break;
4393 4392 default:
4394 4393 word = NVM_FUTURE_INIT_WORD1;
4395 4394 valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
4396 4395 break;
4397 4396 }
4398 4397
4399 4398 ret_val = hw->nvm.ops.read(hw, word, 1, &data);
4400 4399 if (ret_val)
4401 4400 return ret_val;
4402 4401
4403 4402 if (!(data & valid_csum_mask)) {
4404 4403 data |= valid_csum_mask;
4405 4404 ret_val = hw->nvm.ops.write(hw, word, 1, &data);
4406 4405 if (ret_val)
4407 4406 return ret_val;
4408 4407 ret_val = hw->nvm.ops.update(hw);
4409 4408 if (ret_val)
4410 4409 return ret_val;
4411 4410 }
4412 4411
4413 4412 return e1000_validate_nvm_checksum_generic(hw);
4414 4413 }
4415 4414
4416 4415 /**
4417 4416 * e1000_write_flash_data_ich8lan - Writes bytes to the NVM
4418 4417 * @hw: pointer to the HW structure
4419 4418 * @offset: The offset (in bytes) of the byte/word to read.
4420 4419 * @size: Size of data to read, 1=byte 2=word
4421 4420 * @data: The byte(s) to write to the NVM.
4422 4421 *
4423 4422 * Writes one/two bytes to the NVM using the flash access registers.
4424 4423 **/
4425 4424 static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
4426 4425 u8 size, u16 data)
|
↓ open down ↓ |
27 lines elided |
↑ open up ↑ |
4427 4426 {
4428 4427 union ich8_hws_flash_status hsfsts;
4429 4428 union ich8_hws_flash_ctrl hsflctl;
4430 4429 u32 flash_linear_addr;
4431 4430 u32 flash_data = 0;
4432 4431 s32 ret_val;
4433 4432 u8 count = 0;
4434 4433
4435 4434 DEBUGFUNC("e1000_write_ich8_data");
4436 4435
4437 - if (hw->mac.type == e1000_pch_spt) {
4436 + if (hw->mac.type >= e1000_pch_spt) {
4438 4437 if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4439 4438 return -E1000_ERR_NVM;
4440 4439 } else {
4441 4440 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4442 4441 return -E1000_ERR_NVM;
4443 4442 }
4444 4443
4445 4444 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4446 4445 hw->nvm.flash_base_addr);
4447 4446
4448 4447 do {
4449 4448 usec_delay(1);
4450 4449 /* Steps */
4451 4450 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4452 4451 if (ret_val != E1000_SUCCESS)
4453 4452 break;
4454 4453 /* In SPT, This register is in Lan memory space, not
4455 4454 * flash. Therefore, only 32 bit access is supported
4456 4455 */
4457 - if (hw->mac.type == e1000_pch_spt)
4456 + if (hw->mac.type >= e1000_pch_spt)
4458 4457 hsflctl.regval =
4459 - E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS)>>16;
4458 + E1000_READ_FLASH_REG(hw, ICH_FLASH_HSFSTS) >> 16;
4460 4459 else
4461 4460 hsflctl.regval =
4462 4461 E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
4463 4462
4464 4463 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
4465 4464 hsflctl.hsf_ctrl.fldbcount = size - 1;
4466 4465 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4467 4466 /* In SPT, This register is in Lan memory space,
4468 4467 * not flash. Therefore, only 32 bit access is
4469 4468 * supported
4470 4469 */
4471 - if (hw->mac.type == e1000_pch_spt)
4470 + if (hw->mac.type >= e1000_pch_spt)
4472 4471 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4473 4472 hsflctl.regval << 16);
4474 4473 else
4475 4474 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4476 4475 hsflctl.regval);
4477 4476
4478 4477 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4479 4478
4480 4479 if (size == 1)
4481 4480 flash_data = (u32)data & 0x00FF;
4482 4481 else
4483 4482 flash_data = (u32)data;
4484 4483
4485 4484 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
4486 4485
4487 4486 /* check if FCERR is set to 1 , if set to 1, clear it
4488 4487 * and try the whole sequence a few more times else done
4489 4488 */
4490 4489 ret_val =
4491 4490 e1000_flash_cycle_ich8lan(hw,
4492 4491 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4493 4492 if (ret_val == E1000_SUCCESS)
4494 4493 break;
4495 4494
4496 4495 /* If we're here, then things are most likely
4497 4496 * completely hosed, but if the error condition
4498 4497 * is detected, it won't hurt to give it another
4499 4498 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4500 4499 */
4501 4500 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4502 4501 if (hsfsts.hsf_status.flcerr)
4503 4502 /* Repeat for some time before giving up. */
4504 4503 continue;
4505 4504 if (!hsfsts.hsf_status.flcdone) {
4506 4505 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4507 4506 break;
4508 4507 }
4509 4508 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4510 4509
4511 4510 return ret_val;
4512 4511 }
4513 4512
4514 4513 /**
4515 4514 * e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM
4516 4515 * @hw: pointer to the HW structure
4517 4516 * @offset: The offset (in bytes) of the dwords to read.
4518 4517 * @data: The 4 bytes to write to the NVM.
4519 4518 *
4520 4519 * Writes one/two/four bytes to the NVM using the flash access registers.
4521 4520 **/
4522 4521 static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
|
↓ open down ↓ |
41 lines elided |
↑ open up ↑ |
4523 4522 u32 data)
4524 4523 {
4525 4524 union ich8_hws_flash_status hsfsts;
4526 4525 union ich8_hws_flash_ctrl hsflctl;
4527 4526 u32 flash_linear_addr;
4528 4527 s32 ret_val;
4529 4528 u8 count = 0;
4530 4529
4531 4530 DEBUGFUNC("e1000_write_flash_data32_ich8lan");
4532 4531
4533 - if (hw->mac.type == e1000_pch_spt) {
4532 + if (hw->mac.type >= e1000_pch_spt) {
4534 4533 if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
4535 4534 return -E1000_ERR_NVM;
4536 4535 }
4537 4536 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4538 4537 hw->nvm.flash_base_addr);
4539 4538 do {
4540 4539 usec_delay(1);
4541 4540 /* Steps */
4542 4541 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4543 4542 if (ret_val != E1000_SUCCESS)
4544 4543 break;
4545 4544
4546 4545 /* In SPT, This register is in Lan memory space, not
4547 4546 * flash. Therefore, only 32 bit access is supported
4548 4547 */
4549 - if (hw->mac.type == e1000_pch_spt)
4548 + if (hw->mac.type >= e1000_pch_spt)
4550 4549 hsflctl.regval = E1000_READ_FLASH_REG(hw,
4551 4550 ICH_FLASH_HSFSTS)
4552 4551 >> 16;
4553 4552 else
4554 4553 hsflctl.regval = E1000_READ_FLASH_REG16(hw,
4555 4554 ICH_FLASH_HSFCTL);
4556 4555
4557 4556 hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
4558 4557 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4559 4558
4560 4559 /* In SPT, This register is in Lan memory space,
4561 4560 * not flash. Therefore, only 32 bit access is
4562 4561 * supported
4563 4562 */
4564 - if (hw->mac.type == e1000_pch_spt)
4563 + if (hw->mac.type >= e1000_pch_spt)
4565 4564 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4566 4565 hsflctl.regval << 16);
4567 4566 else
4568 4567 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4569 4568 hsflctl.regval);
4570 4569
4571 4570 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
4572 4571
4573 4572 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, data);
4574 4573
4575 4574 /* check if FCERR is set to 1 , if set to 1, clear it
4576 4575 * and try the whole sequence a few more times else done
4577 4576 */
4578 4577 ret_val = e1000_flash_cycle_ich8lan(hw,
4579 4578 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4580 4579
4581 4580 if (ret_val == E1000_SUCCESS)
4582 4581 break;
4583 4582
4584 4583 /* If we're here, then things are most likely
4585 4584 * completely hosed, but if the error condition
4586 4585 * is detected, it won't hurt to give it another
4587 4586 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4588 4587 */
4589 4588 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4590 4589
4591 4590 if (hsfsts.hsf_status.flcerr)
4592 4591 /* Repeat for some time before giving up. */
4593 4592 continue;
4594 4593 if (!hsfsts.hsf_status.flcdone) {
4595 4594 DEBUGOUT("Timeout error - flash cycle did not complete.\n");
4596 4595 break;
4597 4596 }
4598 4597 } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4599 4598
4600 4599 return ret_val;
4601 4600 }
4602 4601
4603 4602 /**
4604 4603 * e1000_write_flash_byte_ich8lan - Write a single byte to NVM
4605 4604 * @hw: pointer to the HW structure
4606 4605 * @offset: The index of the byte to read.
4607 4606 * @data: The byte to write to the NVM.
4608 4607 *
4609 4608 * Writes a single byte to the NVM using the flash access registers.
4610 4609 **/
4611 4610 static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
4612 4611 u8 data)
4613 4612 {
4614 4613 u16 word = (u16)data;
4615 4614
4616 4615 DEBUGFUNC("e1000_write_flash_byte_ich8lan");
4617 4616
4618 4617 return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
4619 4618 }
4620 4619
4621 4620 /**
4622 4621 * e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM
4623 4622 * @hw: pointer to the HW structure
4624 4623 * @offset: The offset of the word to write.
4625 4624 * @dword: The dword to write to the NVM.
4626 4625 *
4627 4626 * Writes a single dword to the NVM using the flash access registers.
4628 4627 * Goes through a retry algorithm before giving up.
4629 4628 **/
4630 4629 static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
4631 4630 u32 offset, u32 dword)
4632 4631 {
4633 4632 s32 ret_val;
4634 4633 u16 program_retries;
4635 4634
4636 4635 DEBUGFUNC("e1000_retry_write_flash_dword_ich8lan");
4637 4636
4638 4637 /* Must convert word offset into bytes. */
4639 4638 offset <<= 1;
4640 4639
4641 4640 ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4642 4641
4643 4642 if (!ret_val)
4644 4643 return ret_val;
4645 4644 for (program_retries = 0; program_retries < 100; program_retries++) {
4646 4645 DEBUGOUT2("Retrying Byte %8.8X at offset %u\n", dword, offset);
4647 4646 usec_delay(100);
4648 4647 ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4649 4648 if (ret_val == E1000_SUCCESS)
4650 4649 break;
4651 4650 }
4652 4651 if (program_retries == 100)
4653 4652 return -E1000_ERR_NVM;
4654 4653
4655 4654 return E1000_SUCCESS;
4656 4655 }
4657 4656
4658 4657 /**
4659 4658 * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
4660 4659 * @hw: pointer to the HW structure
4661 4660 * @offset: The offset of the byte to write.
4662 4661 * @byte: The byte to write to the NVM.
4663 4662 *
4664 4663 * Writes a single byte to the NVM using the flash access registers.
4665 4664 * Goes through a retry algorithm before giving up.
4666 4665 **/
4667 4666 static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
4668 4667 u32 offset, u8 byte)
4669 4668 {
4670 4669 s32 ret_val;
4671 4670 u16 program_retries;
4672 4671
4673 4672 DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
4674 4673
4675 4674 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4676 4675 if (!ret_val)
4677 4676 return ret_val;
4678 4677
4679 4678 for (program_retries = 0; program_retries < 100; program_retries++) {
4680 4679 DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
4681 4680 usec_delay(100);
4682 4681 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4683 4682 if (ret_val == E1000_SUCCESS)
4684 4683 break;
4685 4684 }
4686 4685 if (program_retries == 100)
4687 4686 return -E1000_ERR_NVM;
4688 4687
4689 4688 return E1000_SUCCESS;
4690 4689 }
4691 4690
4692 4691 /**
4693 4692 * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
4694 4693 * @hw: pointer to the HW structure
4695 4694 * @bank: 0 for first bank, 1 for second bank, etc.
4696 4695 *
4697 4696 * Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
4698 4697 * bank N is 4096 * N + flash_reg_addr.
4699 4698 **/
4700 4699 static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
4701 4700 {
4702 4701 struct e1000_nvm_info *nvm = &hw->nvm;
4703 4702 union ich8_hws_flash_status hsfsts;
4704 4703 union ich8_hws_flash_ctrl hsflctl;
4705 4704 u32 flash_linear_addr;
4706 4705 /* bank size is in 16bit words - adjust to bytes */
4707 4706 u32 flash_bank_size = nvm->flash_bank_size * 2;
4708 4707 s32 ret_val;
4709 4708 s32 count = 0;
4710 4709 s32 j, iteration, sector_size;
4711 4710
4712 4711 DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
4713 4712
4714 4713 hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
4715 4714
4716 4715 /* Determine HW Sector size: Read BERASE bits of hw flash status
4717 4716 * register
4718 4717 * 00: The Hw sector is 256 bytes, hence we need to erase 16
4719 4718 * consecutive sectors. The start index for the nth Hw sector
4720 4719 * can be calculated as = bank * 4096 + n * 256
4721 4720 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
4722 4721 * The start index for the nth Hw sector can be calculated
4723 4722 * as = bank * 4096
4724 4723 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
4725 4724 * (ich9 only, otherwise error condition)
4726 4725 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
4727 4726 */
4728 4727 switch (hsfsts.hsf_status.berasesz) {
4729 4728 case 0:
4730 4729 /* Hw sector size 256 */
4731 4730 sector_size = ICH_FLASH_SEG_SIZE_256;
4732 4731 iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
4733 4732 break;
4734 4733 case 1:
4735 4734 sector_size = ICH_FLASH_SEG_SIZE_4K;
4736 4735 iteration = 1;
4737 4736 break;
4738 4737 case 2:
4739 4738 sector_size = ICH_FLASH_SEG_SIZE_8K;
4740 4739 iteration = 1;
4741 4740 break;
4742 4741 case 3:
4743 4742 sector_size = ICH_FLASH_SEG_SIZE_64K;
4744 4743 iteration = 1;
4745 4744 break;
4746 4745 default:
4747 4746 return -E1000_ERR_NVM;
4748 4747 }
4749 4748
4750 4749 /* Start with the base address, then add the sector offset. */
4751 4750 flash_linear_addr = hw->nvm.flash_base_addr;
4752 4751 flash_linear_addr += (bank) ? flash_bank_size : 0;
4753 4752
4754 4753 for (j = 0; j < iteration; j++) {
4755 4754 do {
|
↓ open down ↓ |
181 lines elided |
↑ open up ↑ |
4756 4755 u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
4757 4756
4758 4757 /* Steps */
4759 4758 ret_val = e1000_flash_cycle_init_ich8lan(hw);
4760 4759 if (ret_val)
4761 4760 return ret_val;
4762 4761
4763 4762 /* Write a value 11 (block Erase) in Flash
4764 4763 * Cycle field in hw flash control
4765 4764 */
4766 - if (hw->mac.type == e1000_pch_spt)
4765 + if (hw->mac.type >= e1000_pch_spt)
4767 4766 hsflctl.regval =
4768 4767 E1000_READ_FLASH_REG(hw,
4769 4768 ICH_FLASH_HSFSTS)>>16;
4770 4769 else
4771 4770 hsflctl.regval =
4772 4771 E1000_READ_FLASH_REG16(hw,
4773 4772 ICH_FLASH_HSFCTL);
4774 4773
4775 4774 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
4776 - if (hw->mac.type == e1000_pch_spt)
4775 + if (hw->mac.type >= e1000_pch_spt)
4777 4776 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_HSFSTS,
4778 4777 hsflctl.regval << 16);
4779 4778 else
4780 4779 E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL,
4781 4780 hsflctl.regval);
4782 4781
4783 4782 /* Write the last 24 bits of an index within the
4784 4783 * block into Flash Linear address field in Flash
4785 4784 * Address.
4786 4785 */
4787 4786 flash_linear_addr += (j * sector_size);
4788 4787 E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR,
4789 4788 flash_linear_addr);
4790 4789
4791 4790 ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
4792 4791 if (ret_val == E1000_SUCCESS)
4793 4792 break;
4794 4793
4795 4794 /* Check if FCERR is set to 1. If 1,
4796 4795 * clear it and try the whole sequence
4797 4796 * a few more times else Done
4798 4797 */
4799 4798 hsfsts.regval = E1000_READ_FLASH_REG16(hw,
4800 4799 ICH_FLASH_HSFSTS);
4801 4800 if (hsfsts.hsf_status.flcerr)
4802 4801 /* repeat for some time before giving up */
4803 4802 continue;
4804 4803 else if (!hsfsts.hsf_status.flcdone)
4805 4804 return ret_val;
4806 4805 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
4807 4806 }
4808 4807
4809 4808 return E1000_SUCCESS;
4810 4809 }
4811 4810
4812 4811 /**
4813 4812 * e1000_valid_led_default_ich8lan - Set the default LED settings
4814 4813 * @hw: pointer to the HW structure
4815 4814 * @data: Pointer to the LED settings
4816 4815 *
4817 4816 * Reads the LED default settings from the NVM to data. If the NVM LED
4818 4817 * settings is all 0's or F's, set the LED default to a valid LED default
4819 4818 * setting.
4820 4819 **/
4821 4820 static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
4822 4821 {
4823 4822 s32 ret_val;
4824 4823
4825 4824 DEBUGFUNC("e1000_valid_led_default_ich8lan");
4826 4825
4827 4826 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
4828 4827 if (ret_val) {
4829 4828 DEBUGOUT("NVM Read Error\n");
4830 4829 return ret_val;
4831 4830 }
4832 4831
4833 4832 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4834 4833 *data = ID_LED_DEFAULT_ICH8LAN;
4835 4834
4836 4835 return E1000_SUCCESS;
4837 4836 }
4838 4837
4839 4838 /**
4840 4839 * e1000_id_led_init_pchlan - store LED configurations
4841 4840 * @hw: pointer to the HW structure
4842 4841 *
4843 4842 * PCH does not control LEDs via the LEDCTL register, rather it uses
4844 4843 * the PHY LED configuration register.
4845 4844 *
4846 4845 * PCH also does not have an "always on" or "always off" mode which
4847 4846 * complicates the ID feature. Instead of using the "on" mode to indicate
4848 4847 * in ledctl_mode2 the LEDs to use for ID (see e1000_id_led_init_generic()),
4849 4848 * use "link_up" mode. The LEDs will still ID on request if there is no
4850 4849 * link based on logic in e1000_led_[on|off]_pchlan().
4851 4850 **/
4852 4851 static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4853 4852 {
4854 4853 struct e1000_mac_info *mac = &hw->mac;
4855 4854 s32 ret_val;
4856 4855 const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4857 4856 const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4858 4857 u16 data, i, temp, shift;
4859 4858
4860 4859 DEBUGFUNC("e1000_id_led_init_pchlan");
4861 4860
4862 4861 /* Get default ID LED modes */
4863 4862 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4864 4863 if (ret_val)
4865 4864 return ret_val;
4866 4865
4867 4866 mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
4868 4867 mac->ledctl_mode1 = mac->ledctl_default;
4869 4868 mac->ledctl_mode2 = mac->ledctl_default;
4870 4869
4871 4870 for (i = 0; i < 4; i++) {
4872 4871 temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4873 4872 shift = (i * 5);
4874 4873 switch (temp) {
4875 4874 case ID_LED_ON1_DEF2:
4876 4875 case ID_LED_ON1_ON2:
4877 4876 case ID_LED_ON1_OFF2:
4878 4877 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4879 4878 mac->ledctl_mode1 |= (ledctl_on << shift);
4880 4879 break;
4881 4880 case ID_LED_OFF1_DEF2:
4882 4881 case ID_LED_OFF1_ON2:
4883 4882 case ID_LED_OFF1_OFF2:
4884 4883 mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4885 4884 mac->ledctl_mode1 |= (ledctl_off << shift);
4886 4885 break;
4887 4886 default:
4888 4887 /* Do nothing */
4889 4888 break;
4890 4889 }
4891 4890 switch (temp) {
4892 4891 case ID_LED_DEF1_ON2:
4893 4892 case ID_LED_ON1_ON2:
4894 4893 case ID_LED_OFF1_ON2:
4895 4894 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4896 4895 mac->ledctl_mode2 |= (ledctl_on << shift);
4897 4896 break;
4898 4897 case ID_LED_DEF1_OFF2:
4899 4898 case ID_LED_ON1_OFF2:
4900 4899 case ID_LED_OFF1_OFF2:
4901 4900 mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4902 4901 mac->ledctl_mode2 |= (ledctl_off << shift);
4903 4902 break;
4904 4903 default:
4905 4904 /* Do nothing */
4906 4905 break;
4907 4906 }
4908 4907 }
4909 4908
4910 4909 return E1000_SUCCESS;
4911 4910 }
4912 4911
4913 4912 /**
4914 4913 * e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4915 4914 * @hw: pointer to the HW structure
4916 4915 *
4917 4916 * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4918 4917 * register, so the bus width is hard coded.
4919 4918 **/
4920 4919 static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4921 4920 {
4922 4921 struct e1000_bus_info *bus = &hw->bus;
4923 4922 s32 ret_val;
4924 4923
4925 4924 DEBUGFUNC("e1000_get_bus_info_ich8lan");
4926 4925
4927 4926 ret_val = e1000_get_bus_info_pcie_generic(hw);
4928 4927
4929 4928 /* ICH devices are "PCI Express"-ish. They have
4930 4929 * a configuration space, but do not contain
4931 4930 * PCI Express Capability registers, so bus width
4932 4931 * must be hardcoded.
4933 4932 */
4934 4933 if (bus->width == e1000_bus_width_unknown)
4935 4934 bus->width = e1000_bus_width_pcie_x1;
4936 4935
4937 4936 return ret_val;
4938 4937 }
4939 4938
4940 4939 /**
4941 4940 * e1000_reset_hw_ich8lan - Reset the hardware
4942 4941 * @hw: pointer to the HW structure
4943 4942 *
4944 4943 * Does a full reset of the hardware which includes a reset of the PHY and
4945 4944 * MAC.
4946 4945 **/
4947 4946 static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4948 4947 {
4949 4948 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4950 4949 u16 kum_cfg;
4951 4950 u32 ctrl, reg;
4952 4951 s32 ret_val;
4953 4952
4954 4953 DEBUGFUNC("e1000_reset_hw_ich8lan");
4955 4954
4956 4955 /* Prevent the PCI-E bus from sticking if there is no TLP connection
4957 4956 * on the last TLP read/write transaction when MAC is reset.
4958 4957 */
4959 4958 ret_val = e1000_disable_pcie_master_generic(hw);
4960 4959 if (ret_val)
4961 4960 DEBUGOUT("PCI-E Master disable polling has failed.\n");
4962 4961
4963 4962 DEBUGOUT("Masking off all interrupts\n");
4964 4963 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
4965 4964
4966 4965 /* Disable the Transmit and Receive units. Then delay to allow
4967 4966 * any pending transactions to complete before we hit the MAC
4968 4967 * with the global reset.
4969 4968 */
4970 4969 E1000_WRITE_REG(hw, E1000_RCTL, 0);
4971 4970 E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
4972 4971 E1000_WRITE_FLUSH(hw);
4973 4972
4974 4973 msec_delay(10);
4975 4974
4976 4975 /* Workaround for ICH8 bit corruption issue in FIFO memory */
4977 4976 if (hw->mac.type == e1000_ich8lan) {
4978 4977 /* Set Tx and Rx buffer allocation to 8k apiece. */
4979 4978 E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
4980 4979 /* Set Packet Buffer Size to 16k. */
4981 4980 E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
4982 4981 }
4983 4982
4984 4983 if (hw->mac.type == e1000_pchlan) {
4985 4984 /* Save the NVM K1 bit setting*/
4986 4985 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4987 4986 if (ret_val)
4988 4987 return ret_val;
4989 4988
4990 4989 if (kum_cfg & E1000_NVM_K1_ENABLE)
4991 4990 dev_spec->nvm_k1_enabled = TRUE;
4992 4991 else
4993 4992 dev_spec->nvm_k1_enabled = FALSE;
4994 4993 }
4995 4994
4996 4995 ctrl = E1000_READ_REG(hw, E1000_CTRL);
4997 4996
4998 4997 if (!hw->phy.ops.check_reset_block(hw)) {
4999 4998 /* Full-chip reset requires MAC and PHY reset at the same
5000 4999 * time to make sure the interface between MAC and the
5001 5000 * external PHY is reset.
5002 5001 */
5003 5002 ctrl |= E1000_CTRL_PHY_RST;
5004 5003
5005 5004 /* Gate automatic PHY configuration by hardware on
5006 5005 * non-managed 82579
5007 5006 */
5008 5007 if ((hw->mac.type == e1000_pch2lan) &&
5009 5008 !(E1000_READ_REG(hw, E1000_FWSM) & E1000_ICH_FWSM_FW_VALID))
5010 5009 e1000_gate_hw_phy_config_ich8lan(hw, TRUE);
5011 5010 }
5012 5011 ret_val = e1000_acquire_swflag_ich8lan(hw);
5013 5012 DEBUGOUT("Issuing a global reset to ich8lan\n");
5014 5013 E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
5015 5014 /* cannot issue a flush here because it hangs the hardware */
5016 5015 msec_delay(20);
5017 5016
5018 5017 /* Set Phy Config Counter to 50msec */
5019 5018 if (hw->mac.type == e1000_pch2lan) {
5020 5019 reg = E1000_READ_REG(hw, E1000_FEXTNVM3);
5021 5020 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
5022 5021 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
5023 5022 E1000_WRITE_REG(hw, E1000_FEXTNVM3, reg);
5024 5023 }
5025 5024
5026 5025 if (!ret_val)
5027 5026 E1000_MUTEX_UNLOCK(&hw->dev_spec.ich8lan.swflag_mutex);
5028 5027
5029 5028 if (ctrl & E1000_CTRL_PHY_RST) {
5030 5029 ret_val = hw->phy.ops.get_cfg_done(hw);
5031 5030 if (ret_val)
5032 5031 return ret_val;
5033 5032
5034 5033 ret_val = e1000_post_phy_reset_ich8lan(hw);
5035 5034 if (ret_val)
5036 5035 return ret_val;
5037 5036 }
5038 5037
5039 5038 /* For PCH, this write will make sure that any noise
5040 5039 * will be detected as a CRC error and be dropped rather than show up
5041 5040 * as a bad packet to the DMA engine.
5042 5041 */
5043 5042 if (hw->mac.type == e1000_pchlan)
5044 5043 E1000_WRITE_REG(hw, E1000_CRC_OFFSET, 0x65656565);
5045 5044
5046 5045 E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
5047 5046 E1000_READ_REG(hw, E1000_ICR);
5048 5047
5049 5048 reg = E1000_READ_REG(hw, E1000_KABGTXD);
5050 5049 reg |= E1000_KABGTXD_BGSQLBIAS;
5051 5050 E1000_WRITE_REG(hw, E1000_KABGTXD, reg);
5052 5051
5053 5052 return E1000_SUCCESS;
5054 5053 }
5055 5054
5056 5055 /**
5057 5056 * e1000_init_hw_ich8lan - Initialize the hardware
5058 5057 * @hw: pointer to the HW structure
5059 5058 *
5060 5059 * Prepares the hardware for transmit and receive by doing the following:
5061 5060 * - initialize hardware bits
5062 5061 * - initialize LED identification
5063 5062 * - setup receive address registers
5064 5063 * - setup flow control
5065 5064 * - setup transmit descriptors
5066 5065 * - clear statistics
5067 5066 **/
5068 5067 static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
5069 5068 {
5070 5069 struct e1000_mac_info *mac = &hw->mac;
5071 5070 u32 ctrl_ext, txdctl, snoop;
5072 5071 s32 ret_val;
5073 5072 u16 i;
5074 5073
5075 5074 DEBUGFUNC("e1000_init_hw_ich8lan");
5076 5075
5077 5076 e1000_initialize_hw_bits_ich8lan(hw);
5078 5077
5079 5078 /* Initialize identification LED */
5080 5079 ret_val = mac->ops.id_led_init(hw);
5081 5080 /* An error is not fatal and we should not stop init due to this */
5082 5081 if (ret_val)
5083 5082 DEBUGOUT("Error initializing identification LED\n");
5084 5083
5085 5084 /* Setup the receive address. */
5086 5085 e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
5087 5086
5088 5087 /* Zero out the Multicast HASH table */
5089 5088 DEBUGOUT("Zeroing the MTA\n");
5090 5089 for (i = 0; i < mac->mta_reg_count; i++)
5091 5090 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
5092 5091
5093 5092 /* The 82578 Rx buffer will stall if wakeup is enabled in host and
5094 5093 * the ME. Disable wakeup by clearing the host wakeup bit.
5095 5094 * Reset the phy after disabling host wakeup to reset the Rx buffer.
5096 5095 */
5097 5096 if (hw->phy.type == e1000_phy_82578) {
5098 5097 hw->phy.ops.read_reg(hw, BM_PORT_GEN_CFG, &i);
5099 5098 i &= ~BM_WUC_HOST_WU_BIT;
5100 5099 hw->phy.ops.write_reg(hw, BM_PORT_GEN_CFG, i);
5101 5100 ret_val = e1000_phy_hw_reset_ich8lan(hw);
5102 5101 if (ret_val)
5103 5102 return ret_val;
5104 5103 }
5105 5104
5106 5105 /* Setup link and flow control */
5107 5106 ret_val = mac->ops.setup_link(hw);
5108 5107
5109 5108 /* Set the transmit descriptor write-back policy for both queues */
5110 5109 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
5111 5110 txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
5112 5111 E1000_TXDCTL_FULL_TX_DESC_WB);
5113 5112 txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
5114 5113 E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
5115 5114 E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
5116 5115 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
5117 5116 txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
5118 5117 E1000_TXDCTL_FULL_TX_DESC_WB);
5119 5118 txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
5120 5119 E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
5121 5120 E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
5122 5121
5123 5122 /* ICH8 has opposite polarity of no_snoop bits.
5124 5123 * By default, we should use snoop behavior.
5125 5124 */
5126 5125 if (mac->type == e1000_ich8lan)
5127 5126 snoop = PCIE_ICH8_SNOOP_ALL;
5128 5127 else
5129 5128 snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
5130 5129 e1000_set_pcie_no_snoop_generic(hw, snoop);
5131 5130
5132 5131 ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
5133 5132 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
5134 5133 E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
5135 5134
5136 5135 /* Clear all of the statistics registers (clear on read). It is
5137 5136 * important that we do this after we have tried to establish link
5138 5137 * because the symbol error count will increment wildly if there
5139 5138 * is no link.
5140 5139 */
5141 5140 e1000_clear_hw_cntrs_ich8lan(hw);
5142 5141
5143 5142 return ret_val;
5144 5143 }
5145 5144
5146 5145 /**
5147 5146 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
5148 5147 * @hw: pointer to the HW structure
5149 5148 *
5150 5149 * Sets/Clears required hardware bits necessary for correctly setting up the
5151 5150 * hardware for transmit and receive.
5152 5151 **/
5153 5152 static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
5154 5153 {
5155 5154 u32 reg;
5156 5155
5157 5156 DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
5158 5157
5159 5158 /* Extended Device Control */
5160 5159 reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
5161 5160 reg |= (1 << 22);
5162 5161 /* Enable PHY low-power state when MAC is at D3 w/o WoL */
5163 5162 if (hw->mac.type >= e1000_pchlan)
5164 5163 reg |= E1000_CTRL_EXT_PHYPDEN;
5165 5164 E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
5166 5165
5167 5166 /* Transmit Descriptor Control 0 */
5168 5167 reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
5169 5168 reg |= (1 << 22);
5170 5169 E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
5171 5170
5172 5171 /* Transmit Descriptor Control 1 */
5173 5172 reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
5174 5173 reg |= (1 << 22);
5175 5174 E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
5176 5175
5177 5176 /* Transmit Arbitration Control 0 */
5178 5177 reg = E1000_READ_REG(hw, E1000_TARC(0));
5179 5178 if (hw->mac.type == e1000_ich8lan)
5180 5179 reg |= (1 << 28) | (1 << 29);
5181 5180 reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
5182 5181 E1000_WRITE_REG(hw, E1000_TARC(0), reg);
5183 5182
5184 5183 /* Transmit Arbitration Control 1 */
5185 5184 reg = E1000_READ_REG(hw, E1000_TARC(1));
5186 5185 if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
5187 5186 reg &= ~(1 << 28);
5188 5187 else
5189 5188 reg |= (1 << 28);
5190 5189 reg |= (1 << 24) | (1 << 26) | (1 << 30);
5191 5190 E1000_WRITE_REG(hw, E1000_TARC(1), reg);
5192 5191
5193 5192 /* Device Status */
5194 5193 if (hw->mac.type == e1000_ich8lan) {
5195 5194 reg = E1000_READ_REG(hw, E1000_STATUS);
5196 5195 reg &= ~(1UL << 31);
5197 5196 E1000_WRITE_REG(hw, E1000_STATUS, reg);
5198 5197 }
5199 5198
5200 5199 /* work-around descriptor data corruption issue during nfs v2 udp
5201 5200 * traffic, just disable the nfs filtering capability
5202 5201 */
5203 5202 reg = E1000_READ_REG(hw, E1000_RFCTL);
|
↓ open down ↓ |
417 lines elided |
↑ open up ↑ |
5204 5203 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
5205 5204
5206 5205 /* Disable IPv6 extension header parsing because some malformed
5207 5206 * IPv6 headers can hang the Rx.
5208 5207 */
5209 5208 if (hw->mac.type == e1000_ich8lan)
5210 5209 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
5211 5210 E1000_WRITE_REG(hw, E1000_RFCTL, reg);
5212 5211
5213 5212 /* Enable ECC on Lynxpoint */
5214 - if ((hw->mac.type == e1000_pch_lpt) ||
5215 - (hw->mac.type == e1000_pch_spt)) {
5213 + if (hw->mac.type >= e1000_pch_lpt) {
5216 5214 reg = E1000_READ_REG(hw, E1000_PBECCSTS);
5217 5215 reg |= E1000_PBECCSTS_ECC_ENABLE;
5218 5216 E1000_WRITE_REG(hw, E1000_PBECCSTS, reg);
5219 5217
5220 5218 reg = E1000_READ_REG(hw, E1000_CTRL);
5221 5219 reg |= E1000_CTRL_MEHE;
5222 5220 E1000_WRITE_REG(hw, E1000_CTRL, reg);
5223 5221 }
5224 5222
5225 5223 return;
5226 5224 }
5227 5225
5228 5226 /**
5229 5227 * e1000_setup_link_ich8lan - Setup flow control and link settings
5230 5228 * @hw: pointer to the HW structure
5231 5229 *
5232 5230 * Determines which flow control settings to use, then configures flow
5233 5231 * control. Calls the appropriate media-specific link configuration
5234 5232 * function. Assuming the adapter has a valid link partner, a valid link
5235 5233 * should be established. Assumes the hardware has previously been reset
5236 5234 * and the transmitter and receiver are not enabled.
5237 5235 **/
5238 5236 static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
5239 5237 {
5240 5238 s32 ret_val;
5241 5239
5242 5240 DEBUGFUNC("e1000_setup_link_ich8lan");
5243 5241
5244 5242 if (hw->phy.ops.check_reset_block(hw))
5245 5243 return E1000_SUCCESS;
5246 5244
5247 5245 /* ICH parts do not have a word in the NVM to determine
5248 5246 * the default flow control setting, so we explicitly
5249 5247 * set it to full.
5250 5248 */
5251 5249 if (hw->fc.requested_mode == e1000_fc_default)
5252 5250 hw->fc.requested_mode = e1000_fc_full;
5253 5251
5254 5252 /* Save off the requested flow control mode for use later. Depending
5255 5253 * on the link partner's capabilities, we may or may not use this mode.
5256 5254 */
5257 5255 hw->fc.current_mode = hw->fc.requested_mode;
5258 5256
5259 5257 DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
5260 5258 hw->fc.current_mode);
5261 5259
5262 5260 /* Continue to configure the copper link. */
5263 5261 ret_val = hw->mac.ops.setup_physical_interface(hw);
5264 5262 if (ret_val)
5265 5263 return ret_val;
5266 5264
5267 5265 E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
5268 5266 if ((hw->phy.type == e1000_phy_82578) ||
5269 5267 (hw->phy.type == e1000_phy_82579) ||
5270 5268 (hw->phy.type == e1000_phy_i217) ||
5271 5269 (hw->phy.type == e1000_phy_82577)) {
5272 5270 E1000_WRITE_REG(hw, E1000_FCRTV_PCH, hw->fc.refresh_time);
5273 5271
5274 5272 ret_val = hw->phy.ops.write_reg(hw,
5275 5273 PHY_REG(BM_PORT_CTRL_PAGE, 27),
5276 5274 hw->fc.pause_time);
5277 5275 if (ret_val)
5278 5276 return ret_val;
5279 5277 }
5280 5278
5281 5279 return e1000_set_fc_watermarks_generic(hw);
5282 5280 }
5283 5281
5284 5282 /**
5285 5283 * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
5286 5284 * @hw: pointer to the HW structure
5287 5285 *
5288 5286 * Configures the kumeran interface to the PHY to wait the appropriate time
5289 5287 * when polling the PHY, then call the generic setup_copper_link to finish
5290 5288 * configuring the copper link.
5291 5289 **/
5292 5290 static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
5293 5291 {
5294 5292 u32 ctrl;
5295 5293 s32 ret_val;
5296 5294 u16 reg_data;
5297 5295
5298 5296 DEBUGFUNC("e1000_setup_copper_link_ich8lan");
5299 5297
5300 5298 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5301 5299 ctrl |= E1000_CTRL_SLU;
5302 5300 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5303 5301 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5304 5302
5305 5303 /* Set the mac to wait the maximum time between each iteration
5306 5304 * and increase the max iterations when polling the phy;
5307 5305 * this fixes erroneous timeouts at 10Mbps.
5308 5306 */
5309 5307 ret_val = e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_TIMEOUTS,
5310 5308 0xFFFF);
5311 5309 if (ret_val)
5312 5310 return ret_val;
5313 5311 ret_val = e1000_read_kmrn_reg_generic(hw,
5314 5312 E1000_KMRNCTRLSTA_INBAND_PARAM,
5315 5313 ®_data);
5316 5314 if (ret_val)
5317 5315 return ret_val;
5318 5316 reg_data |= 0x3F;
5319 5317 ret_val = e1000_write_kmrn_reg_generic(hw,
5320 5318 E1000_KMRNCTRLSTA_INBAND_PARAM,
5321 5319 reg_data);
5322 5320 if (ret_val)
5323 5321 return ret_val;
5324 5322
5325 5323 switch (hw->phy.type) {
5326 5324 case e1000_phy_igp_3:
5327 5325 ret_val = e1000_copper_link_setup_igp(hw);
5328 5326 if (ret_val)
5329 5327 return ret_val;
5330 5328 break;
5331 5329 case e1000_phy_bm:
5332 5330 case e1000_phy_82578:
5333 5331 ret_val = e1000_copper_link_setup_m88(hw);
5334 5332 if (ret_val)
5335 5333 return ret_val;
5336 5334 break;
5337 5335 case e1000_phy_82577:
5338 5336 case e1000_phy_82579:
5339 5337 ret_val = e1000_copper_link_setup_82577(hw);
5340 5338 if (ret_val)
5341 5339 return ret_val;
5342 5340 break;
5343 5341 case e1000_phy_ife:
5344 5342 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL,
5345 5343 ®_data);
5346 5344 if (ret_val)
5347 5345 return ret_val;
5348 5346
5349 5347 reg_data &= ~IFE_PMC_AUTO_MDIX;
5350 5348
5351 5349 switch (hw->phy.mdix) {
5352 5350 case 1:
5353 5351 reg_data &= ~IFE_PMC_FORCE_MDIX;
5354 5352 break;
5355 5353 case 2:
5356 5354 reg_data |= IFE_PMC_FORCE_MDIX;
5357 5355 break;
5358 5356 case 0:
5359 5357 default:
5360 5358 reg_data |= IFE_PMC_AUTO_MDIX;
5361 5359 break;
5362 5360 }
5363 5361 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL,
5364 5362 reg_data);
5365 5363 if (ret_val)
5366 5364 return ret_val;
5367 5365 break;
5368 5366 default:
5369 5367 break;
5370 5368 }
5371 5369
5372 5370 return e1000_setup_copper_link_generic(hw);
5373 5371 }
5374 5372
5375 5373 /**
5376 5374 * e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
5377 5375 * @hw: pointer to the HW structure
5378 5376 *
5379 5377 * Calls the PHY specific link setup function and then calls the
5380 5378 * generic setup_copper_link to finish configuring the link for
5381 5379 * Lynxpoint PCH devices
5382 5380 **/
5383 5381 static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
5384 5382 {
5385 5383 u32 ctrl;
5386 5384 s32 ret_val;
5387 5385
5388 5386 DEBUGFUNC("e1000_setup_copper_link_pch_lpt");
5389 5387
5390 5388 ctrl = E1000_READ_REG(hw, E1000_CTRL);
5391 5389 ctrl |= E1000_CTRL_SLU;
5392 5390 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
5393 5391 E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
5394 5392
5395 5393 ret_val = e1000_copper_link_setup_82577(hw);
5396 5394 if (ret_val)
5397 5395 return ret_val;
5398 5396
5399 5397 return e1000_setup_copper_link_generic(hw);
5400 5398 }
5401 5399
5402 5400 /**
5403 5401 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex
5404 5402 * @hw: pointer to the HW structure
5405 5403 * @speed: pointer to store current link speed
5406 5404 * @duplex: pointer to store the current link duplex
5407 5405 *
5408 5406 * Calls the generic get_speed_and_duplex to retrieve the current link
5409 5407 * information and then calls the Kumeran lock loss workaround for links at
5410 5408 * gigabit speeds.
5411 5409 **/
5412 5410 static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
5413 5411 u16 *duplex)
5414 5412 {
5415 5413 s32 ret_val;
5416 5414
5417 5415 DEBUGFUNC("e1000_get_link_up_info_ich8lan");
5418 5416
5419 5417 ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
5420 5418 if (ret_val)
5421 5419 return ret_val;
5422 5420
5423 5421 if ((hw->mac.type == e1000_ich8lan) &&
5424 5422 (hw->phy.type == e1000_phy_igp_3) &&
5425 5423 (*speed == SPEED_1000)) {
5426 5424 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
5427 5425 }
5428 5426
5429 5427 return ret_val;
5430 5428 }
5431 5429
5432 5430 /**
5433 5431 * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
5434 5432 * @hw: pointer to the HW structure
5435 5433 *
5436 5434 * Work-around for 82566 Kumeran PCS lock loss:
5437 5435 * On link status change (i.e. PCI reset, speed change) and link is up and
5438 5436 * speed is gigabit-
5439 5437 * 0) if workaround is optionally disabled do nothing
5440 5438 * 1) wait 1ms for Kumeran link to come up
5441 5439 * 2) check Kumeran Diagnostic register PCS lock loss bit
5442 5440 * 3) if not set the link is locked (all is good), otherwise...
5443 5441 * 4) reset the PHY
5444 5442 * 5) repeat up to 10 times
5445 5443 * Note: this is only called for IGP3 copper when speed is 1gb.
5446 5444 **/
5447 5445 static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
5448 5446 {
5449 5447 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5450 5448 u32 phy_ctrl;
5451 5449 s32 ret_val;
5452 5450 u16 i, data;
5453 5451 bool link;
5454 5452
5455 5453 DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
5456 5454
5457 5455 if (!dev_spec->kmrn_lock_loss_workaround_enabled)
5458 5456 return E1000_SUCCESS;
5459 5457
5460 5458 /* Make sure link is up before proceeding. If not just return.
5461 5459 * Attempting this while link is negotiating fouled up link
5462 5460 * stability
5463 5461 */
5464 5462 ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
5465 5463 if (!link)
5466 5464 return E1000_SUCCESS;
5467 5465
5468 5466 for (i = 0; i < 10; i++) {
5469 5467 /* read once to clear */
5470 5468 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5471 5469 if (ret_val)
5472 5470 return ret_val;
5473 5471 /* and again to get new status */
5474 5472 ret_val = hw->phy.ops.read_reg(hw, IGP3_KMRN_DIAG, &data);
5475 5473 if (ret_val)
5476 5474 return ret_val;
5477 5475
5478 5476 /* check for PCS lock */
5479 5477 if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
5480 5478 return E1000_SUCCESS;
5481 5479
5482 5480 /* Issue PHY reset */
5483 5481 hw->phy.ops.reset(hw);
5484 5482 msec_delay_irq(5);
5485 5483 }
5486 5484 /* Disable GigE link negotiation */
5487 5485 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5488 5486 phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
5489 5487 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5490 5488 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5491 5489
5492 5490 /* Call gig speed drop workaround on Gig disable before accessing
5493 5491 * any PHY registers
5494 5492 */
5495 5493 e1000_gig_downshift_workaround_ich8lan(hw);
5496 5494
5497 5495 /* unable to acquire PCS lock */
5498 5496 return -E1000_ERR_PHY;
5499 5497 }
5500 5498
5501 5499 /**
5502 5500 * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
5503 5501 * @hw: pointer to the HW structure
5504 5502 * @state: boolean value used to set the current Kumeran workaround state
5505 5503 *
5506 5504 * If ICH8, set the current Kumeran workaround state (enabled - TRUE
5507 5505 * /disabled - FALSE).
5508 5506 **/
5509 5507 void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
5510 5508 bool state)
5511 5509 {
5512 5510 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5513 5511
5514 5512 DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
5515 5513
5516 5514 if (hw->mac.type != e1000_ich8lan) {
5517 5515 DEBUGOUT("Workaround applies to ICH8 only.\n");
5518 5516 return;
5519 5517 }
5520 5518
5521 5519 dev_spec->kmrn_lock_loss_workaround_enabled = state;
5522 5520
5523 5521 return;
5524 5522 }
5525 5523
5526 5524 /**
5527 5525 * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
5528 5526 * @hw: pointer to the HW structure
5529 5527 *
5530 5528 * Workaround for 82566 power-down on D3 entry:
5531 5529 * 1) disable gigabit link
5532 5530 * 2) write VR power-down enable
5533 5531 * 3) read it back
5534 5532 * Continue if successful, else issue LCD reset and repeat
5535 5533 **/
5536 5534 void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
5537 5535 {
5538 5536 u32 reg;
5539 5537 u16 data;
5540 5538 u8 retry = 0;
5541 5539
5542 5540 DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
5543 5541
5544 5542 if (hw->phy.type != e1000_phy_igp_3)
5545 5543 return;
5546 5544
5547 5545 /* Try the workaround twice (if needed) */
5548 5546 do {
5549 5547 /* Disable link */
5550 5548 reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
5551 5549 reg |= (E1000_PHY_CTRL_GBE_DISABLE |
5552 5550 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5553 5551 E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
5554 5552
5555 5553 /* Call gig speed drop workaround on Gig disable before
5556 5554 * accessing any PHY registers
5557 5555 */
5558 5556 if (hw->mac.type == e1000_ich8lan)
5559 5557 e1000_gig_downshift_workaround_ich8lan(hw);
5560 5558
5561 5559 /* Write VR power-down enable */
5562 5560 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5563 5561 data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5564 5562 hw->phy.ops.write_reg(hw, IGP3_VR_CTRL,
5565 5563 data | IGP3_VR_CTRL_MODE_SHUTDOWN);
5566 5564
5567 5565 /* Read it back and test */
5568 5566 hw->phy.ops.read_reg(hw, IGP3_VR_CTRL, &data);
5569 5567 data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5570 5568 if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
5571 5569 break;
5572 5570
5573 5571 /* Issue PHY reset and repeat at most one more time */
5574 5572 reg = E1000_READ_REG(hw, E1000_CTRL);
5575 5573 E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
5576 5574 retry++;
5577 5575 } while (retry);
5578 5576 }
5579 5577
5580 5578 /**
5581 5579 * e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
5582 5580 * @hw: pointer to the HW structure
5583 5581 *
5584 5582 * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
5585 5583 * LPLU, Gig disable, MDIC PHY reset):
5586 5584 * 1) Set Kumeran Near-end loopback
5587 5585 * 2) Clear Kumeran Near-end loopback
5588 5586 * Should only be called for ICH8[m] devices with any 1G Phy.
5589 5587 **/
5590 5588 void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
5591 5589 {
5592 5590 s32 ret_val;
5593 5591 u16 reg_data;
5594 5592
5595 5593 DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
5596 5594
5597 5595 if ((hw->mac.type != e1000_ich8lan) ||
5598 5596 (hw->phy.type == e1000_phy_ife))
5599 5597 return;
5600 5598
5601 5599 ret_val = e1000_read_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5602 5600 ®_data);
5603 5601 if (ret_val)
5604 5602 return;
5605 5603 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
5606 5604 ret_val = e1000_write_kmrn_reg_generic(hw,
5607 5605 E1000_KMRNCTRLSTA_DIAG_OFFSET,
5608 5606 reg_data);
5609 5607 if (ret_val)
5610 5608 return;
5611 5609 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
5612 5610 e1000_write_kmrn_reg_generic(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5613 5611 reg_data);
5614 5612 }
5615 5613
5616 5614 /**
5617 5615 * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
5618 5616 * @hw: pointer to the HW structure
5619 5617 *
5620 5618 * During S0 to Sx transition, it is possible the link remains at gig
5621 5619 * instead of negotiating to a lower speed. Before going to Sx, set
5622 5620 * 'Gig Disable' to force link speed negotiation to a lower speed based on
5623 5621 * the LPLU setting in the NVM or custom setting. For PCH and newer parts,
5624 5622 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
5625 5623 * needs to be written.
5626 5624 * Parts that support (and are linked to a partner which support) EEE in
5627 5625 * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
5628 5626 * than 10Mbps w/o EEE.
5629 5627 **/
5630 5628 void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
5631 5629 {
5632 5630 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5633 5631 u32 phy_ctrl;
5634 5632 s32 ret_val;
5635 5633
5636 5634 DEBUGFUNC("e1000_suspend_workarounds_ich8lan");
5637 5635
|
↓ open down ↓ |
412 lines elided |
↑ open up ↑ |
5638 5636 phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
5639 5637 phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
5640 5638
5641 5639 if (hw->phy.type == e1000_phy_i217) {
5642 5640 u16 phy_reg, device_id = hw->device_id;
5643 5641
5644 5642 if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
5645 5643 (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
5646 5644 (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
5647 5645 (device_id == E1000_DEV_ID_PCH_I218_V3) ||
5648 - (hw->mac.type == e1000_pch_spt)) {
5646 + (hw->mac.type >= e1000_pch_spt)) {
5649 5647 u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
5650 5648
5651 5649 E1000_WRITE_REG(hw, E1000_FEXTNVM6,
5652 5650 fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
5653 5651 }
5654 5652
5655 5653 ret_val = hw->phy.ops.acquire(hw);
5656 5654 if (ret_val)
5657 5655 goto out;
5658 5656
5659 5657 if (!dev_spec->eee_disable) {
5660 5658 u16 eee_advert;
5661 5659
5662 5660 ret_val =
5663 5661 e1000_read_emi_reg_locked(hw,
5664 5662 I217_EEE_ADVERTISEMENT,
5665 5663 &eee_advert);
5666 5664 if (ret_val)
5667 5665 goto release;
5668 5666
5669 5667 /* Disable LPLU if both link partners support 100BaseT
5670 5668 * EEE and 100Full is advertised on both ends of the
5671 5669 * link, and enable Auto Enable LPI since there will
5672 5670 * be no driver to enable LPI while in Sx.
5673 5671 */
5674 5672 if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
5675 5673 (dev_spec->eee_lp_ability &
5676 5674 I82579_EEE_100_SUPPORTED) &&
5677 5675 (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
5678 5676 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
5679 5677 E1000_PHY_CTRL_NOND0A_LPLU);
5680 5678
5681 5679 /* Set Auto Enable LPI after link up */
5682 5680 hw->phy.ops.read_reg_locked(hw,
5683 5681 I217_LPI_GPIO_CTRL,
5684 5682 &phy_reg);
5685 5683 phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5686 5684 hw->phy.ops.write_reg_locked(hw,
5687 5685 I217_LPI_GPIO_CTRL,
5688 5686 phy_reg);
5689 5687 }
5690 5688 }
5691 5689
5692 5690 /* For i217 Intel Rapid Start Technology support,
5693 5691 * when the system is going into Sx and no manageability engine
5694 5692 * is present, the driver must configure proxy to reset only on
5695 5693 * power good. LPI (Low Power Idle) state must also reset only
5696 5694 * on power good, as well as the MTA (Multicast table array).
5697 5695 * The SMBus release must also be disabled on LCD reset.
5698 5696 */
5699 5697 if (!(E1000_READ_REG(hw, E1000_FWSM) &
5700 5698 E1000_ICH_FWSM_FW_VALID)) {
5701 5699 /* Enable proxy to reset only on power good. */
5702 5700 hw->phy.ops.read_reg_locked(hw, I217_PROXY_CTRL,
5703 5701 &phy_reg);
5704 5702 phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
5705 5703 hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL,
5706 5704 phy_reg);
5707 5705
5708 5706 /* Set bit enable LPI (EEE) to reset only on
5709 5707 * power good.
5710 5708 */
5711 5709 hw->phy.ops.read_reg_locked(hw, I217_SxCTRL, &phy_reg);
5712 5710 phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
5713 5711 hw->phy.ops.write_reg_locked(hw, I217_SxCTRL, phy_reg);
5714 5712
5715 5713 /* Disable the SMB release on LCD reset. */
5716 5714 hw->phy.ops.read_reg_locked(hw, I217_MEMPWR, &phy_reg);
5717 5715 phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
5718 5716 hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5719 5717 }
5720 5718
5721 5719 /* Enable MTA to reset for Intel Rapid Start Technology
5722 5720 * Support
5723 5721 */
5724 5722 hw->phy.ops.read_reg_locked(hw, I217_CGFREG, &phy_reg);
5725 5723 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
5726 5724 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5727 5725
5728 5726 release:
5729 5727 hw->phy.ops.release(hw);
5730 5728 }
5731 5729 out:
5732 5730 E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
5733 5731
5734 5732 if (hw->mac.type == e1000_ich8lan)
5735 5733 e1000_gig_downshift_workaround_ich8lan(hw);
5736 5734
5737 5735 if (hw->mac.type >= e1000_pchlan) {
5738 5736 e1000_oem_bits_config_ich8lan(hw, FALSE);
5739 5737
5740 5738 /* Reset PHY to activate OEM bits on 82577/8 */
5741 5739 if (hw->mac.type == e1000_pchlan)
5742 5740 e1000_phy_hw_reset_generic(hw);
5743 5741
5744 5742 ret_val = hw->phy.ops.acquire(hw);
5745 5743 if (ret_val)
5746 5744 return;
5747 5745 e1000_write_smbus_addr(hw);
5748 5746 hw->phy.ops.release(hw);
5749 5747 }
5750 5748
5751 5749 return;
5752 5750 }
5753 5751
5754 5752 /**
5755 5753 * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
5756 5754 * @hw: pointer to the HW structure
5757 5755 *
5758 5756 * During Sx to S0 transitions on non-managed devices or managed devices
5759 5757 * on which PHY resets are not blocked, if the PHY registers cannot be
5760 5758 * accessed properly by the s/w toggle the LANPHYPC value to power cycle
5761 5759 * the PHY.
5762 5760 * On i217, setup Intel Rapid Start Technology.
5763 5761 **/
5764 5762 u32 e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
5765 5763 {
5766 5764 s32 ret_val;
5767 5765
5768 5766 DEBUGFUNC("e1000_resume_workarounds_pchlan");
5769 5767 if (hw->mac.type < e1000_pch2lan)
5770 5768 return E1000_SUCCESS;
5771 5769
5772 5770 ret_val = e1000_init_phy_workarounds_pchlan(hw);
5773 5771 if (ret_val) {
5774 5772 DEBUGOUT1("Failed to init PHY flow ret_val=%d\n", ret_val);
5775 5773 return ret_val;
5776 5774 }
5777 5775
5778 5776 /* For i217 Intel Rapid Start Technology support when the system
5779 5777 * is transitioning from Sx and no manageability engine is present
5780 5778 * configure SMBus to restore on reset, disable proxy, and enable
5781 5779 * the reset on MTA (Multicast table array).
5782 5780 */
5783 5781 if (hw->phy.type == e1000_phy_i217) {
5784 5782 u16 phy_reg;
5785 5783
5786 5784 ret_val = hw->phy.ops.acquire(hw);
5787 5785 if (ret_val) {
5788 5786 DEBUGOUT("Failed to setup iRST\n");
5789 5787 return ret_val;
5790 5788 }
5791 5789
5792 5790 /* Clear Auto Enable LPI after link up */
5793 5791 hw->phy.ops.read_reg_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
5794 5792 phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5795 5793 hw->phy.ops.write_reg_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
5796 5794
5797 5795 if (!(E1000_READ_REG(hw, E1000_FWSM) &
5798 5796 E1000_ICH_FWSM_FW_VALID)) {
5799 5797 /* Restore clear on SMB if no manageability engine
5800 5798 * is present
5801 5799 */
5802 5800 ret_val = hw->phy.ops.read_reg_locked(hw, I217_MEMPWR,
5803 5801 &phy_reg);
5804 5802 if (ret_val)
5805 5803 goto release;
5806 5804 phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
5807 5805 hw->phy.ops.write_reg_locked(hw, I217_MEMPWR, phy_reg);
5808 5806
5809 5807 /* Disable Proxy */
5810 5808 hw->phy.ops.write_reg_locked(hw, I217_PROXY_CTRL, 0);
5811 5809 }
5812 5810 /* Enable reset on MTA */
5813 5811 ret_val = hw->phy.ops.read_reg_locked(hw, I217_CGFREG,
5814 5812 &phy_reg);
5815 5813 if (ret_val)
5816 5814 goto release;
5817 5815 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
5818 5816 hw->phy.ops.write_reg_locked(hw, I217_CGFREG, phy_reg);
5819 5817 release:
5820 5818 if (ret_val)
5821 5819 DEBUGOUT1("Error %d in resume workarounds\n", ret_val);
5822 5820 hw->phy.ops.release(hw);
5823 5821 return ret_val;
5824 5822 }
5825 5823 return E1000_SUCCESS;
5826 5824 }
5827 5825
5828 5826 /**
5829 5827 * e1000_cleanup_led_ich8lan - Restore the default LED operation
5830 5828 * @hw: pointer to the HW structure
5831 5829 *
5832 5830 * Return the LED back to the default configuration.
5833 5831 **/
5834 5832 static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5835 5833 {
5836 5834 DEBUGFUNC("e1000_cleanup_led_ich8lan");
5837 5835
5838 5836 if (hw->phy.type == e1000_phy_ife)
5839 5837 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5840 5838 0);
5841 5839
5842 5840 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
5843 5841 return E1000_SUCCESS;
5844 5842 }
5845 5843
5846 5844 /**
5847 5845 * e1000_led_on_ich8lan - Turn LEDs on
5848 5846 * @hw: pointer to the HW structure
5849 5847 *
5850 5848 * Turn on the LEDs.
5851 5849 **/
5852 5850 static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5853 5851 {
5854 5852 DEBUGFUNC("e1000_led_on_ich8lan");
5855 5853
5856 5854 if (hw->phy.type == e1000_phy_ife)
5857 5855 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5858 5856 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5859 5857
5860 5858 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
5861 5859 return E1000_SUCCESS;
5862 5860 }
5863 5861
5864 5862 /**
5865 5863 * e1000_led_off_ich8lan - Turn LEDs off
5866 5864 * @hw: pointer to the HW structure
5867 5865 *
5868 5866 * Turn off the LEDs.
5869 5867 **/
5870 5868 static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5871 5869 {
5872 5870 DEBUGFUNC("e1000_led_off_ich8lan");
5873 5871
5874 5872 if (hw->phy.type == e1000_phy_ife)
5875 5873 return hw->phy.ops.write_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5876 5874 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
5877 5875
5878 5876 E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
5879 5877 return E1000_SUCCESS;
5880 5878 }
5881 5879
5882 5880 /**
5883 5881 * e1000_setup_led_pchlan - Configures SW controllable LED
5884 5882 * @hw: pointer to the HW structure
5885 5883 *
5886 5884 * This prepares the SW controllable LED for use.
5887 5885 **/
5888 5886 static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5889 5887 {
5890 5888 DEBUGFUNC("e1000_setup_led_pchlan");
5891 5889
5892 5890 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5893 5891 (u16)hw->mac.ledctl_mode1);
5894 5892 }
5895 5893
5896 5894 /**
5897 5895 * e1000_cleanup_led_pchlan - Restore the default LED operation
5898 5896 * @hw: pointer to the HW structure
5899 5897 *
5900 5898 * Return the LED back to the default configuration.
5901 5899 **/
5902 5900 static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5903 5901 {
5904 5902 DEBUGFUNC("e1000_cleanup_led_pchlan");
5905 5903
5906 5904 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG,
5907 5905 (u16)hw->mac.ledctl_default);
5908 5906 }
5909 5907
5910 5908 /**
5911 5909 * e1000_led_on_pchlan - Turn LEDs on
5912 5910 * @hw: pointer to the HW structure
5913 5911 *
5914 5912 * Turn on the LEDs.
5915 5913 **/
5916 5914 static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5917 5915 {
5918 5916 u16 data = (u16)hw->mac.ledctl_mode2;
5919 5917 u32 i, led;
5920 5918
5921 5919 DEBUGFUNC("e1000_led_on_pchlan");
5922 5920
5923 5921 /* If no link, then turn LED on by setting the invert bit
5924 5922 * for each LED that's mode is "link_up" in ledctl_mode2.
5925 5923 */
5926 5924 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5927 5925 for (i = 0; i < 3; i++) {
5928 5926 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5929 5927 if ((led & E1000_PHY_LED0_MODE_MASK) !=
5930 5928 E1000_LEDCTL_MODE_LINK_UP)
5931 5929 continue;
5932 5930 if (led & E1000_PHY_LED0_IVRT)
5933 5931 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5934 5932 else
5935 5933 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5936 5934 }
5937 5935 }
5938 5936
5939 5937 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5940 5938 }
5941 5939
5942 5940 /**
5943 5941 * e1000_led_off_pchlan - Turn LEDs off
5944 5942 * @hw: pointer to the HW structure
5945 5943 *
5946 5944 * Turn off the LEDs.
5947 5945 **/
5948 5946 static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5949 5947 {
5950 5948 u16 data = (u16)hw->mac.ledctl_mode1;
5951 5949 u32 i, led;
5952 5950
5953 5951 DEBUGFUNC("e1000_led_off_pchlan");
5954 5952
5955 5953 /* If no link, then turn LED off by clearing the invert bit
5956 5954 * for each LED that's mode is "link_up" in ledctl_mode1.
5957 5955 */
5958 5956 if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
5959 5957 for (i = 0; i < 3; i++) {
5960 5958 led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5961 5959 if ((led & E1000_PHY_LED0_MODE_MASK) !=
5962 5960 E1000_LEDCTL_MODE_LINK_UP)
5963 5961 continue;
5964 5962 if (led & E1000_PHY_LED0_IVRT)
5965 5963 data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5966 5964 else
5967 5965 data |= (E1000_PHY_LED0_IVRT << (i * 5));
5968 5966 }
5969 5967 }
5970 5968
5971 5969 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data);
5972 5970 }
5973 5971
5974 5972 /**
5975 5973 * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5976 5974 * @hw: pointer to the HW structure
5977 5975 *
5978 5976 * Read appropriate register for the config done bit for completion status
5979 5977 * and configure the PHY through s/w for EEPROM-less parts.
5980 5978 *
5981 5979 * NOTE: some silicon which is EEPROM-less will fail trying to read the
5982 5980 * config done bit, so only an error is logged and continues. If we were
5983 5981 * to return with error, EEPROM-less silicon would not be able to be reset
5984 5982 * or change link.
5985 5983 **/
5986 5984 static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5987 5985 {
5988 5986 s32 ret_val = E1000_SUCCESS;
5989 5987 u32 bank = 0;
5990 5988 u32 status;
5991 5989
5992 5990 DEBUGFUNC("e1000_get_cfg_done_ich8lan");
5993 5991
5994 5992 e1000_get_cfg_done_generic(hw);
5995 5993
5996 5994 /* Wait for indication from h/w that it has completed basic config */
5997 5995 if (hw->mac.type >= e1000_ich10lan) {
5998 5996 e1000_lan_init_done_ich8lan(hw);
5999 5997 } else {
6000 5998 ret_val = e1000_get_auto_rd_done_generic(hw);
6001 5999 if (ret_val) {
6002 6000 /* When auto config read does not complete, do not
6003 6001 * return with an error. This can happen in situations
6004 6002 * where there is no eeprom and prevents getting link.
6005 6003 */
6006 6004 DEBUGOUT("Auto Read Done did not complete\n");
6007 6005 ret_val = E1000_SUCCESS;
6008 6006 }
6009 6007 }
6010 6008
6011 6009 /* Clear PHY Reset Asserted bit */
6012 6010 status = E1000_READ_REG(hw, E1000_STATUS);
6013 6011 if (status & E1000_STATUS_PHYRA)
6014 6012 E1000_WRITE_REG(hw, E1000_STATUS, status & ~E1000_STATUS_PHYRA);
6015 6013 else
6016 6014 DEBUGOUT("PHY Reset Asserted not set - needs delay\n");
6017 6015
6018 6016 /* If EEPROM is not marked present, init the IGP 3 PHY manually */
6019 6017 if (hw->mac.type <= e1000_ich9lan) {
6020 6018 if (!(E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) &&
6021 6019 (hw->phy.type == e1000_phy_igp_3)) {
6022 6020 e1000_phy_init_script_igp3(hw);
6023 6021 }
6024 6022 } else {
6025 6023 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
6026 6024 /* Maybe we should do a basic PHY config */
6027 6025 DEBUGOUT("EEPROM not present\n");
6028 6026 ret_val = -E1000_ERR_CONFIG;
6029 6027 }
6030 6028 }
6031 6029
6032 6030 return ret_val;
6033 6031 }
6034 6032
6035 6033 /**
6036 6034 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
6037 6035 * @hw: pointer to the HW structure
6038 6036 *
6039 6037 * In the case of a PHY power down to save power, or to turn off link during a
6040 6038 * driver unload, or wake on lan is not enabled, remove the link.
6041 6039 **/
6042 6040 static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
6043 6041 {
6044 6042 /* If the management interface is not enabled, then power down */
6045 6043 if (!(hw->mac.ops.check_mng_mode(hw) ||
6046 6044 hw->phy.ops.check_reset_block(hw)))
6047 6045 e1000_power_down_phy_copper(hw);
6048 6046
6049 6047 return;
6050 6048 }
6051 6049
6052 6050 /**
6053 6051 * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
6054 6052 * @hw: pointer to the HW structure
6055 6053 *
6056 6054 * Clears hardware counters specific to the silicon family and calls
6057 6055 * clear_hw_cntrs_generic to clear all general purpose counters.
6058 6056 **/
6059 6057 static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
6060 6058 {
6061 6059 u16 phy_data;
6062 6060 s32 ret_val;
6063 6061
6064 6062 DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
6065 6063
6066 6064 e1000_clear_hw_cntrs_base_generic(hw);
6067 6065
6068 6066 E1000_READ_REG(hw, E1000_ALGNERRC);
6069 6067 E1000_READ_REG(hw, E1000_RXERRC);
6070 6068 E1000_READ_REG(hw, E1000_TNCRS);
6071 6069 E1000_READ_REG(hw, E1000_CEXTERR);
6072 6070 E1000_READ_REG(hw, E1000_TSCTC);
6073 6071 E1000_READ_REG(hw, E1000_TSCTFC);
6074 6072
6075 6073 E1000_READ_REG(hw, E1000_MGTPRC);
6076 6074 E1000_READ_REG(hw, E1000_MGTPDC);
6077 6075 E1000_READ_REG(hw, E1000_MGTPTC);
6078 6076
6079 6077 E1000_READ_REG(hw, E1000_IAC);
6080 6078 E1000_READ_REG(hw, E1000_ICRXOC);
6081 6079
6082 6080 /* Clear PHY statistics registers */
6083 6081 if ((hw->phy.type == e1000_phy_82578) ||
6084 6082 (hw->phy.type == e1000_phy_82579) ||
6085 6083 (hw->phy.type == e1000_phy_i217) ||
6086 6084 (hw->phy.type == e1000_phy_82577)) {
6087 6085 ret_val = hw->phy.ops.acquire(hw);
6088 6086 if (ret_val)
6089 6087 return;
6090 6088 ret_val = hw->phy.ops.set_page(hw,
6091 6089 HV_STATS_PAGE << IGP_PAGE_SHIFT);
6092 6090 if (ret_val)
6093 6091 goto release;
6094 6092 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
6095 6093 hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
6096 6094 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
6097 6095 hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
6098 6096 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
6099 6097 hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
6100 6098 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
6101 6099 hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
6102 6100 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
6103 6101 hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
6104 6102 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
6105 6103 hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
6106 6104 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
6107 6105 hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
6108 6106 release:
6109 6107 hw->phy.ops.release(hw);
6110 6108 }
6111 6109 }
6112 6110
|
↓ open down ↓ |
454 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX